diff --git a/.gcloudignore b/.gcloudignore new file mode 100644 index 00000000000..863d57bfd64 --- /dev/null +++ b/.gcloudignore @@ -0,0 +1,7 @@ +# By default, everything inside .gitignore (as well as the .git directory and +# the .gitignore file itself) is not uploaded to Google Cloud Build. But the +# bintray publishing task requires the .git directory to uploaded. +# +# Adding this file overrides the default, so everything gets uploaded, but we +# still want to exclude the large .gradle cache directory. +.gradle diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c2f1cf0970b..eee1bb58d6e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -4,7 +4,7 @@ Please refer to [Contributing to Spinnaker](https://spinnaker.io/community/contr When filling out a pull request, please consider the following: -* Follow the commit message conventions [found here](http://www.spinnaker.io/v1.0/docs/how-to-submit-a-patch). +* Follow the commit message conventions [found here](https://spinnaker.io/community/contributing/submitting/). * Provide a descriptive summary for your changes. * If it fixes a bug or resolves a feature request, be sure to link to that issue. * Add inline code comments to changes that might not be obvious. diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000000..f9ecf576e17 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + # Maintain dependencies for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000000..0dd8ac5430c --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,90 @@ +name: Branch Build + +on: + push: + branches: + - master + - release-* + +env: + GRADLE_OPTS: -Dorg.gradle.daemon=false -Xmx12g -Xms12g + CONTAINER_REGISTRY: us-docker.pkg.dev/spinnaker-community/docker + +jobs: + branch-build: + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + runs-on: ubuntu-latest + steps: + - name: Create more disk space + run: sudo rm -rf /usr/share/dotnet && sudo rm -rf /opt/ghc && sudo rm -rf "/usr/local/share/boost" && sudo rm -rf "$AGENT_TOOLSDIRECTORY" + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - uses: actions/setup-java@v4 + with: + java-version: | + 17 + distribution: 'zulu' + cache: 'gradle' + - name: Prepare build variables + id: build_variables + run: | + echo REPO="${GITHUB_REPOSITORY##*/}" >> $GITHUB_OUTPUT + echo VERSION="$(git describe --tags --abbrev=0 --match='v[0-9]*' | cut -c2-)-dev-${GITHUB_REF_NAME}-$(git rev-parse --short HEAD)-$(date --utc +'%Y%m%d%H%M')" >> $GITHUB_OUTPUT + - name: Build + env: + ORG_GRADLE_PROJECT_version: ${{ steps.build_variables.outputs.VERSION }} + run: ./gradlew build --stacktrace ${{ steps.build_variables.outputs.REPO }}-web:installDist + - name: Build local slim container image for testing + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.slim + load: true + platforms: local + tags: | + "${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}-unvalidated" + - name: Test local slim container image + env: + FULL_DOCKER_IMAGE_NAME: "${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}-unvalidated" + run: ./gradlew ${{ steps.build_variables.outputs.REPO }}-integration:test + - name: Login to GAR + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + uses: docker/login-action@v3 + # use service account flow defined at: https://github.com/docker/login-action#service-account-based-authentication-1 + with: + registry: us-docker.pkg.dev + username: _json_key + password: ${{ secrets.GAR_JSON_KEY }} + - name: Build and publish slim container image + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.slim + platforms: linux/amd64,linux/arm64 + push: true + tags: | + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ github.ref_name }}-latest-unvalidated" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}-unvalidated" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ github.ref_name }}-latest-unvalidated-slim" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}-unvalidated-slim" + - name: Build and publish ubuntu container image + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.ubuntu + platforms: linux/amd64,linux/arm64 + push: true + tags: | + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ github.ref_name }}-latest-unvalidated-ubuntu" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}-unvalidated-ubuntu" diff --git a/.github/workflows/bump_dependencies.yml b/.github/workflows/bump_dependencies.yml new file mode 100644 index 00000000000..9a3e287d4a7 --- /dev/null +++ b/.github/workflows/bump_dependencies.yml @@ -0,0 +1,17 @@ +name: Bump Dependencies + +on: + repository_dispatch: + types: [bump-dependencies] + +jobs: + bump-dependencies: + runs-on: ubuntu-latest + steps: + - uses: spinnaker/bumpdeps@master + with: + ref: ${{ github.event.client_payload.ref }} + key: clouddriverVersion + repositories: halyard + env: + GITHUB_OAUTH: ${{ secrets.SPINNAKER_GITHUB_TOKEN }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000000..538a03382d3 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,66 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + schedule: + - cron: '22 20 * * *' + +jobs: + analyze: + if: startsWith(github.repository, 'spinnaker/') + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'java' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://git.io/codeql-language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/gradle-wrapper-validation.yml b/.github/workflows/gradle-wrapper-validation.yml new file mode 100644 index 00000000000..eb00f93cd42 --- /dev/null +++ b/.github/workflows/gradle-wrapper-validation.yml @@ -0,0 +1,10 @@ +name: "Validate Gradle Wrapper" +on: [push, pull_request] + +jobs: + validation: + name: "Gradle wrapper validation" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: gradle/wrapper-validation-action@v3 diff --git a/.github/workflows/integration-tests-kubernetes.yml b/.github/workflows/integration-tests-kubernetes.yml new file mode 100644 index 00000000000..7ae041ee770 --- /dev/null +++ b/.github/workflows/integration-tests-kubernetes.yml @@ -0,0 +1,69 @@ +name: Kubernetes Integration Tests + +on: workflow_call + +env: + GRADLE_OPTS: -Dorg.gradle.daemon=false -Xmx12g -Xms12g + +jobs: + it-test-kubernetes: + strategy: + matrix: + kubectl-version: + - 1.22.17 + kubernetes-image: + - "kindest/node:v1.29.0@sha256:eaa1450915475849a73a9227b8f201df25e55e268e5d619312131292e324d570" + - "kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31" + - "kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72" + - "kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb" + - "kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8" + - "kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab" + - "kindest/node:v1.23.17@sha256:59c989ff8a517a93127d4a536e7014d28e235fb3529d9fba91b3951d461edfdb" + - "kindest/node:v1.22.17@sha256:f5b2e5698c6c9d6d0adc419c0deae21a425c07d81bbf3b6a6834042f25d4fba2" + - "kindest/node:v1.21.14@sha256:8a4e9bb3f415d2bb81629ce33ef9c76ba514c14d707f9797a01e3216376ba093" + include: + - kubectl-version: 1.29.1 + kubernetes-image: "kindest/node:v1.29.0@sha256:eaa1450915475849a73a9227b8f201df25e55e268e5d619312131292e324d570" + - kubectl-version: 1.28.6 + kubernetes-image: "kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31" + - kubectl-version: 1.27.10 + kubernetes-image: "kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72" + - kubectl-version: 1.26.13 + kubernetes-image: "kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb" + - kubectl-version: 1.25.16 + kubernetes-image: "kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8" + - kubectl-version: 1.24.17 + kubernetes-image: "kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab" + - kubectl-version: 1.23.17 + kubernetes-image: "kindest/node:v1.23.17@sha256:59c989ff8a517a93127d4a536e7014d28e235fb3529d9fba91b3951d461edfdb" + - kubectl-version: 1.21.14 + kubernetes-image: "kindest/node:v1.21.14@sha256:8a4e9bb3f415d2bb81629ce33ef9c76ba514c14d707f9797a01e3216376ba093" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + java-version: | + 17 + distribution: 'zulu' + - name: Cache on push + if: github.event_name == 'push' + uses: actions/cache@v4 + with: + path: ~/.gradle + key: ${{ runner.os }}-cd-it-${{ github.sha }} + # Restore build outputs from the previous commit (if successful), if current commit hasn't run successfully yet + restore-keys: | + ${{ runner.os }}-cd-it-${{ github.event.before }} + - name: Cache on pull_request + if: github.event_name == 'pull_request' + uses: actions/cache@v4 + with: + path: ~/.gradle + key: ${{ runner.os }}-cd-it-${{ github.event.pull_request.head.sha }} + restore-keys: | + ${{ runner.os }}-cd-it-${{ github.event.before }} + - name: Kubernetes Provider Integration Tests + run: | + version=$(echo '${{ matrix.kubernetes-image }}' | grep -o 'v[0-9]*\.[0-9]*') + ./gradlew --build-cache :clouddriver-kubernetes:integrationTest -Pkubernetes-image=${{ matrix.kubernetes-image }} -Pkubectl-version=${{ matrix.kubectl-version }} -Pkubernetes-version=${version} diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml new file mode 100644 index 00000000000..c3e763d3738 --- /dev/null +++ b/.github/workflows/integration_tests.yml @@ -0,0 +1,47 @@ +name: Integration Tests + +on: + push: + branches: + - master + pull_request: + +env: + GRADLE_OPTS: -Dorg.gradle.daemon=false -Xmx4g -Xms4g + +jobs: + it-test-kubernetes: + uses: ./.github/workflows/integration-tests-kubernetes.yml + it-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + java-version: | + 17 + distribution: 'zulu' + - name: Cache on push + if: github.event_name == 'push' + uses: actions/cache@v4 + with: + path: ~/.gradle + key: ${{ runner.os }}-cd-it-${{ github.sha }} + # Restore build outputs from the previous commit (if successful), if current commit hasn't run successfully yet + restore-keys: | + ${{ runner.os }}-cd-it-${{ github.event.before }} + - name: Cache on pull_request + if: github.event_name == 'pull_request' + uses: actions/cache@v4 + with: + path: ~/.gradle + key: ${{ runner.os }}-cd-it-${{ github.event.pull_request.head.sha }} + restore-keys: | + ${{ runner.os }}-cd-it-${{ github.event.before }} + # Separating integration tests by provider allows to have separate logs + - name: Amazon ECS Provider Integration Tests + run: ./gradlew --build-cache --no-daemon :clouddriver-ecs:integrationTest + - name: Artifacts Integration Tests + run: ./gradlew --build-cache :clouddriver-artifacts:integrationTest + - name: AWS EC2 Provider Integration Tests + run: ./gradlew --build-cache :clouddriver-aws:integrationTest diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml new file mode 100644 index 00000000000..7d97a23f395 --- /dev/null +++ b/.github/workflows/pr.yml @@ -0,0 +1,69 @@ +name: PR Build + +on: [ pull_request ] + +env: + GRADLE_OPTS: -Dorg.gradle.daemon=false -Xmx12g -Xms12g + CONTAINER_REGISTRY: us-docker.pkg.dev/spinnaker-community/docker + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Create more disk space + run: sudo rm -rf /usr/share/dotnet && sudo rm -rf /opt/ghc && sudo rm -rf "/usr/local/share/boost" && sudo rm -rf "$AGENT_TOOLSDIRECTORY" + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - uses: actions/setup-java@v4 + with: + java-version: | + 17 + distribution: 'zulu' + cache: 'gradle' + - name: Prepare build variables + id: build_variables + run: | + echo REPO="${GITHUB_REPOSITORY##*/}" >> $GITHUB_OUTPUT + echo VERSION="$(git describe --tags --abbrev=0 --match='v[0-9]*' | cut -c2-)-dev-pr-$(git rev-parse --short HEAD)-$(date --utc +'%Y%m%d%H%M')" >> $GITHUB_OUTPUT + - name: Build + env: + ORG_GRADLE_PROJECT_version: ${{ steps.build_variables.outputs.VERSION }} + run: ./gradlew build ${{ steps.build_variables.outputs.REPO }}-web:installDist + - name: Build slim container image + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.slim + platforms: linux/amd64,linux/arm64 + tags: | + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:latest" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:latest-slim" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}-slim" + - name: Build ubuntu container image + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.ubuntu + platforms: linux/amd64,linux/arm64 + tags: | + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:latest-ubuntu" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}-ubuntu" + - name: Build local slim container image for testing + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.slim + load: true + platforms: local + tags: | + "${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}" + - name: Test local slim container image + env: + FULL_DOCKER_IMAGE_NAME: "${{ steps.build_variables.outputs.REPO }}:${{ steps.build_variables.outputs.VERSION }}" + run: ./gradlew ${{ steps.build_variables.outputs.REPO }}-integration:test diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..30c65c53c7d --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,158 @@ +name: Release + +on: + push: + tags: + - "v[0-9]+.[0-9]+.[0-9]+" + - "v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+" + +env: + GRADLE_OPTS: -Dorg.gradle.daemon=false -Xmx12g -Xms12g + CONTAINER_REGISTRY: us-docker.pkg.dev/spinnaker-community/docker + +jobs: + release: + runs-on: ubuntu-latest + steps: + #https://github.com/NASA-IMPACT/hls-base/pull/17/files borrowed from this.... + - name: Create more disk space + run: sudo rm -rf /usr/share/dotnet && sudo rm -rf /opt/ghc && sudo rm -rf "/usr/local/share/boost" && sudo rm -rf "$AGENT_TOOLSDIRECTORY" + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + # Given a tag, determine what branch we are on, so we can bump dependencies (or not) + - name: Get Branch + run: | + BRANCHES=$(git branch -r --contains ${{ github.ref }}) + echo "BRANCHES is '${BRANCHES}'" + # Check for no branches explicitly...Otherwise echo adds a newline so wc thinks there's + # one branch. And echo -n makes it appears that there's one less branch than there + # actually is. + if [ -z "$BRANCHES" ]; then + echo "exactly one branch required to release clouddriver, but there are none" + exit 1 + fi + NUM_BRANCHES=$(($(echo "$BRANCHES" | wc -l))) + echo "NUM_BRANCHES is '${NUM_BRANCHES}'" + if [ $NUM_BRANCHES -ne 1 ]; then + echo "exactly one branch required to release clouddriver, but there are $NUM_BRANCHES ($BRANCHES)" + exit 1 + fi + BRANCH=$(echo $BRANCHES | xargs) + echo "exactly one branch ($BRANCH)" + echo BRANCH="$BRANCH" >> $GITHUB_ENV + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - uses: actions/setup-java@v4 + with: + java-version: | + 17 + distribution: 'zulu' + cache: 'gradle' + - name: Assemble release info + id: release_info + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + . .github/workflows/release_info.sh ${{ github.event.repository.full_name }} + echo CHANGELOG=$(echo -e "${CHANGELOG}") >> $GITHUB_OUTPUT + echo SKIP_RELEASE="${SKIP_RELEASE}" >> $GITHUB_OUTPUT + echo IS_CANDIDATE="${IS_CANDIDATE}" >> $GITHUB_OUTPUT + echo RELEASE_VERSION="${RELEASE_VERSION}" >> $GITHUB_OUTPUT + - name: Prepare build variables + id: build_variables + run: | + echo REPO="${GITHUB_REPOSITORY##*/}" >> $GITHUB_OUTPUT + echo VERSION="$(git rev-parse --short HEAD)-$(date --utc +'%Y%m%d%H%M')" >> $GITHUB_OUTPUT + - name: Release build + env: + ORG_GRADLE_PROJECT_version: ${{ steps.release_info.outputs.RELEASE_VERSION }} + ORG_GRADLE_PROJECT_nexusPublishEnabled: true + ORG_GRADLE_PROJECT_nexusUsername: ${{ secrets.NEXUS_USERNAME }} + ORG_GRADLE_PROJECT_nexusPassword: ${{ secrets.NEXUS_PASSWORD }} + ORG_GRADLE_PROJECT_nexusPgpSigningKey: ${{ secrets.NEXUS_PGP_SIGNING_KEY }} + ORG_GRADLE_PROJECT_nexusPgpSigningPassword: ${{ secrets.NEXUS_PGP_SIGNING_PASSWORD }} + run: | + ./gradlew --info build ${{ steps.build_variables.outputs.REPO }}-web:installDist publishToNexus closeAndReleaseNexusStagingRepository + - name: Publish apt packages to Google Artifact Registry + env: + ORG_GRADLE_PROJECT_version: ${{ steps.release_info.outputs.RELEASE_VERSION }} + ORG_GRADLE_PROJECT_artifactRegistryPublishEnabled: true + GAR_JSON_KEY: ${{ secrets.GAR_JSON_KEY }} + run: | + ./gradlew --info publish + - name: Login to Google Cloud + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + uses: 'google-github-actions/auth@v2' + # use service account flow defined at: https://github.com/google-github-actions/upload-cloud-storage#authenticating-via-service-account-key-json + with: + credentials_json: '${{ secrets.GAR_JSON_KEY }}' + - name: Upload halconfig profiles to GCS + # https://console.cloud.google.com/storage/browser/halconfig + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + uses: 'google-github-actions/upload-cloud-storage@v2' + with: + path: 'halconfig/' + destination: 'halconfig/${{ steps.build_variables.outputs.REPO }}/${{ steps.release_info.outputs.RELEASE_VERSION }}' + parent: false + - name: Login to GAR + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + uses: docker/login-action@v3 + # use service account flow defined at: https://github.com/docker/login-action#service-account-based-authentication-1 + with: + registry: us-docker.pkg.dev + username: _json_key + password: ${{ secrets.GAR_JSON_KEY }} + - name: Build and publish slim container image + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.slim + platforms: linux/amd64,linux/arm64 + push: true + tags: | + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.release_info.outputs.RELEASE_VERSION }}-unvalidated" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.release_info.outputs.RELEASE_VERSION }}-unvalidated-slim" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.release_info.outputs.RELEASE_VERSION }}-${{ steps.build_variables.outputs.VERSION }}-unvalidated-slim" + - name: Build and publish ubuntu container image + # Only run this on repositories in the 'spinnaker' org, not on forks. + if: startsWith(github.repository, 'spinnaker/') + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.ubuntu + platforms: linux/amd64,linux/arm64 + push: true + tags: | + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.release_info.outputs.RELEASE_VERSION }}-unvalidated-ubuntu" + "${{ env.CONTAINER_REGISTRY }}/${{ steps.build_variables.outputs.REPO }}:${{ steps.release_info.outputs.RELEASE_VERSION }}-${{ steps.build_variables.outputs.VERSION }}-unvalidated-ubuntu" + - name: Create release + if: steps.release_info.outputs.SKIP_RELEASE == 'false' + uses: softprops/action-gh-release@v2 + with: + body: | + ${{ steps.release_info.outputs.CHANGELOG }} + draft: false + name: ${{ github.event.repository.name }} ${{ github.ref_name }} + prerelease: ${{ steps.release_info.outputs.IS_CANDIDATE }} + tag_name: ${{ github.ref }} + token: ${{ secrets.GITHUB_TOKEN }} + - name: Pause before dependency bump + # The only dependency to bump is halyard, which only consumes from + # master since it has a different versioning scheme. + if: env.BRANCH == 'origin/master' + run: sleep 900 + - name: Trigger dependency bump workflow + if: env.BRANCH == 'origin/master' + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.SPINNAKER_GITHUB_TOKEN }} + event-type: bump-dependencies + client-payload: '{"ref": "${{ github.ref }}"}' diff --git a/.github/workflows/release_info.sh b/.github/workflows/release_info.sh new file mode 100755 index 00000000000..3c3a158aa59 --- /dev/null +++ b/.github/workflows/release_info.sh @@ -0,0 +1,41 @@ +#!/bin/bash -x + +NEW_TAG=${GITHUB_REF/refs\/tags\//} +export NEW_TAG +echo "NEW_TAG=$NEW_TAG" +# Glob match previous tags which should be format v1.2.3. Avoids Deck's npm tagging. +PREVIOUS_TAG=$(git describe --abbrev=0 --tags "${NEW_TAG}"^ --match 'v[0-9]*') +export PREVIOUS_TAG +echo "PREVIOUS_TAG=$PREVIOUS_TAG" +CHANGELOG=$(git log "$NEW_TAG"..."$PREVIOUS_TAG" --oneline) +export CHANGELOG +echo "CHANGELOG=$CHANGELOG" + +# Format the changelog so it's markdown compatible +CHANGELOG="${CHANGELOG//$'%'/%25}" +CHANGELOG="${CHANGELOG//$'\n'/%0A}" +CHANGELOG="${CHANGELOG//$'\r'/%0D}" + +# If the previous release tag is the same as this tag the user likely cut a release (and in the process created a tag), which means we can skip the need to create a release +SKIP_RELEASE=$([[ "$PREVIOUS_TAG" = "$NEW_TAG" ]] && echo "true" || echo "false") +export SKIP_RELEASE + +# https://github.com/fsaintjacques/semver-tool/blob/master/src/semver#L5-L14 +NAT='0|[1-9][0-9]*' +ALPHANUM='[0-9]*[A-Za-z-][0-9A-Za-z-]*' +IDENT="$NAT|$ALPHANUM" +FIELD='[0-9A-Za-z-]+' +SEMVER_REGEX="\ +^[vV]?\ +($NAT)\\.($NAT)\\.($NAT)\ +(\\-(${IDENT})(\\.(${IDENT}))*)?\ +(\\+${FIELD}(\\.${FIELD})*)?$" + +# Used in downstream steps to determine if the release should be marked as a "prerelease" and if the build should build candidate release artifacts +IS_CANDIDATE=$([[ $NEW_TAG =~ $SEMVER_REGEX && -n ${BASH_REMATCH[4]} ]] && echo "true" || echo "false") +export IS_CANDIDATE + +# This is the version string we will pass to the build, trim off leading 'v' if present +RELEASE_VERSION=$([[ $NEW_TAG =~ $SEMVER_REGEX ]] && echo "${NEW_TAG:1}" || echo "${NEW_TAG}") +export RELEASE_VERSION +echo "RELEASE_VERSION=$RELEASE_VERSION" diff --git a/.gitignore b/.gitignore index 0d7617240f5..bf71aba9d5b 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,5 @@ es-tmp/ clouddriver-oracle-bmcs/bmcs-sdk gatling.conf .DS_Store +/plugins/ +*/test-tar/* diff --git a/.idea/README.md b/.idea/README.md new file mode 100644 index 00000000000..791237770d0 --- /dev/null +++ b/.idea/README.md @@ -0,0 +1,10 @@ +# Spinnaker IntelliJ IDEA files + +IntelliJ IDEA will modify some of these files from their checked-in versions when the project is +opened. To work around this, the Spinnaker Gradle plugin will mark these files in Git as "assume +unchanged", telling Git to ignore any local changes. If you want to commit changes to these files, +you will need to undo that. + +```bash +$ git update-index --no-assume-unchanged $FILENAME +``` diff --git a/.idea/compiler.xml b/.idea/compiler.xml new file mode 100644 index 00000000000..a1757ae52c7 --- /dev/null +++ b/.idea/compiler.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/copyright/ALS2.xml b/.idea/copyright/ALS2.xml new file mode 100644 index 00000000000..2f6849bb338 --- /dev/null +++ b/.idea/copyright/ALS2.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/copyright/profiles_settings.xml b/.idea/copyright/profiles_settings.xml new file mode 100644 index 00000000000..5c6994bea20 --- /dev/null +++ b/.idea/copyright/profiles_settings.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/.idea/google-java-format.xml b/.idea/google-java-format.xml new file mode 100644 index 00000000000..4a0e553c729 --- /dev/null +++ b/.idea/google-java-format.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/.idea/gradle.xml b/.idea/gradle.xml new file mode 100644 index 00000000000..4aa6c33c739 --- /dev/null +++ b/.idea/gradle.xml @@ -0,0 +1,12 @@ + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 00000000000..35eb1ddfbbc --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 00000000000..501cf50af92 --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,74 @@ +queue_rules: + - name: default + merge_method: squash + queue_conditions: + - check-success=build + - check-success=it-test + +pull_request_rules: + - name: Make sure PR are up to date before merging + description: This automatically updates PRs when they are out-of-date with the + base branch to avoid semantic conflicts (next step is using a merge queue). + conditions: [] + actions: + update: + - name: Automatically merge backports to releases on succesful build + conditions: + - base~=^(release-) + - head~=^mergify\/bp\/ + - "author=mergify[bot]" + actions: + queue: + name: default + label: + add: ["auto merged"] + - name: Automatically merge on CI success and review + conditions: + - base=master + - "label=ready to merge" + - "approved-reviews-by=@oss-approvers" + - "#approved-reviews-by>=1" + actions: + queue: + name: default + label: + add: ["auto merged"] + - name: Automatically merge release branch changes on CI success and release manager review + conditions: + - base~=^release- + - "label=ready to merge" + - "approved-reviews-by=@release-managers" + actions: + queue: + name: default + label: + add: ["auto merged"] + - name: Automatically merge PRs from maintainers on CI success and review + conditions: + - base=master + - "label=ready to merge" + - "author=@oss-approvers" + - "#approved-reviews-by>=1" + actions: + queue: + name: default + label: + add: ["auto merged"] + - name: Automatically merge autobump PRs on CI success + conditions: + - base~=^(master|release-) + - "label~=autobump-*" + - "author:spinnakerbot" + actions: + queue: + name: default + label: + add: ["auto merged"] + - name: Request reviews for autobump PRs on CI failure + conditions: + - base~=^(master|release-) + - "label~=autobump-*" + - base=master + actions: + request_reviews: + teams: ["oss-approvers"] diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b99bc1b055e..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: java -jdk: -- oraclejdk8 -sudo: required -dist: trusty -install: gradle/installViaTravis.sh -script: gradle/buildViaTravis.sh -before_cache: gradle/prepCaches.sh -cache: - directories: - - $HOME/.gradle -notifications: - slack: - secure: JtMdRjbeLkUjKe/IjiB7UEvEcOcKHT6lAVT31Hn5Kkw/U28JuscxOYTSD79b66Kxnq8kDFiWyhyUP5PfLp4+maX1zrYdieX0WhUkknsEvfP3Evcfcp6wYNexmxuTn3cmFLcSxKOQSr0KJ93YmBCHaGk/d2zt9NxhpXabtKptMfPrHFf9YvQMAtTLjU6U4pDvBOJGcW6KSD7ZwPYJXvyDCabm3EhCLG09IpJNj9P1XtpEBRA8FSWqLbAmmSx2yLsWfGvuf94niJ7C4xnAt2IRUTtYPwLv0H/ezptKLOhN1QOlkl784f7KHMimETyEFAnE94sjCJUQt32oB/nvowUjWtVajUKYYR0BmjtcD1jwS+nOwy+5sfBb8DN0JkEVijWoxXdlxY1jUZrGy0oF51UFxHvZYMxr20ScDqM+4SOq+TvRHpygRT3nRlcP8HugJErF1La8+sboDuTGy8Z6CI7or55/xrrXAhXolimc7s2TKNTpSqA9tGLehTBFcc/HDyG2MdvNxzCXNb4NHGKtLFvULI5I3nsr0eWpbmeWOGIeqG5dX9zr/YvyMfAXQf8qScQBQlwdS671lV2roJYZooZf5WI1wE/Bb3qApPXqSlW6EkyCUxRgJ4lCAgpbpU0B/ChyoKbFokRviinQFoVPQpkLiPGZMmrqRUnxh/NKN2OZymw= -env: - global: - - secure: rItFfaK85MWIa0Dp9nsOwK5670d2JFA0bqfTCSuJNKfaFi4+45nQFUcVOccR2zY7oKCyylXxiDZKCz2P1j4gY+cYcxc0gC5VZzrGaP3Zde9EFdd3x9L4rxFxmjW09LNxFn1ilH8+KUE56W5Ak8o+9x9oYdEqP/B9N5omSf31mJtOveYCm6I8YHyEDI9/UznwzZMYNMTWYZyaOQZl8TVZVJS+oWBLgw4QlsllWc4J9RoPizFe51kCFgB2E8Ze0e05Kr76oZJ+64kBov36sUaOcSOls6eJ6VPN8lVRG9EQi75NnGlbmZvg8MT19v6uByuk14QvRbvr/B5lRC6GleH+kQ9ZrSFnLhYUMsq3tDrhdnsUtPZOYe93Tt/CsDhxuGpFsRYG8Itc8bDnyMbdhi0Eg5tTY/hlTG+e3/aUmIN5Ea/eqPmzGOcAzbLsC+zVx7FcC0C88EBQ/L/UDmH2htVs3L0MvBTquzm3suJ433eT8XoDV0YyfQ3mzf2PitJOVf+QNJxpLC1NwLmgca73HRgbsZ1pqkX/P6roOSZWdrMy+jwLupqxGwOuDR2yc7YILVEkFm2qfY67xs+r+ZMqVM2iB3mqwRIxLop8M0OvWnxS7VhfUD3ti5LGlGebMXLQXPJIsDNTHCTwephTFutvAlkr175VOyODAk32PsT5NWUkZXk= - - secure: WGxAwuZM9idIQLUm69HfHZ1p+tQjTYVCXFNbIojrgcdO62z9UCIzpc+bPqYNI9KsxkWqtRK61JFRVh+OTN3Ua5g36dVIRG9Qs1N4RqkkIXpXlL6XU89J1BnLCHUEp3/JeD76I0rFVJSpVKea1clB45+VahgW8FkWKeNhJNHgDpQfifdVU0db/Hr4/423n5KHeOyek8MevO9EiQts8Fd4XQrYIcAfF+FIdQROzfEMEMDh8tvKCDzDkYqgtm2XjIPnBK2PzMbas5l8wzknMFmPaRnp3LC/etFyGsY2qdaEp5YrLVPtQMd4pZLA7x3xUXn7k1wRMAswvo2AuRny2CMSgf7xhU1Rf2HG5qUr2r38os9a2729WBslVkoeMW5tKDfQko425fUdhkeSpQbqVa1DPgMohdg9Of+c4jvQLE2rjSFev1RbKDAWRtw+/KSF3Ghz9O18k9glZYtNjmBNZzrrRGwXnoI+jldZAyEN7smpfeqdR3wa3jIwea3O7C+xmsI75vDI7R+0XGJkGs+2RjS/xISgXqD+MoiZnXVqsDlUCXNDEnWIGZMMg41FjYzmkWK2YzgZepuk4x5qQpUBZNe8QmHee5/pBHigtmC474nie4ciijtAQNiLoBKBlHnLUlbgPo1zODUKGa0K5ewoE8gASHZStVzAWtG4YFbqNVNdAEk= -git: - depth: 250 \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000000..80e4dcae7da --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,11 @@ +clouddriver-kubernetes/ @clanesf @german-muzquiz +clouddriver-google/ @Nirmalyasen @plumpy @rebala @skandragon +clouddriver-cloudfoundry/ @zachsmith1 +clouddriver-appengine/ @zachsmith1 +clouddriver-lambda/ @zachsmith1 + +clouddriver-saga/ @cfieber +clouddriver-core/ @cfieber + +clouddriver-aws/ @jeyrschabu @aravindmd @ajordens +clouddriver-titus/ @jeyrschabu @aravindmd @ajordens diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index f8abbaad0ca..00000000000 --- a/Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -FROM openjdk:8 - -MAINTAINER delivery-engineering@netflix.com - -COPY . workdir/ - -WORKDIR workdir - -RUN GRADLE_USER_HOME=cache ./gradlew buildDeb -x test && \ - dpkg -i ./clouddriver-web/build/distributions/*.deb && \ - cd .. && \ - rm -rf workdir && \ - apt-get -y update && \ - apt-get -y install apt-transport-https && \ - echo "deb https://packages.cloud.google.com/apt cloud-sdk-trusty main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ - wget https://packages.cloud.google.com/apt/doc/apt-key.gpg && \ - apt-key add apt-key.gpg && \ - apt-get -y update && \ - apt-get -y install python2.7 unzip ca-certificates google-cloud-sdk && \ - apt-get clean - -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ - chmod +x kubectl && \ - mv ./kubectl /usr/local/bin/kubectl - -RUN curl -o heptio-authenticator-aws https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/bin/linux/amd64/heptio-authenticator-aws && \ - chmod +x ./heptio-authenticator-aws && \ - mv ./heptio-authenticator-aws /usr/local/bin/heptio-authenticator-aws - -ENV PATH "$PATH:/usr/local/bin/heptio-authenticator-aws" - -CMD ["/opt/clouddriver/bin/clouddriver"] diff --git a/Dockerfile.compile b/Dockerfile.compile new file mode 100644 index 00000000000..3298b97566b --- /dev/null +++ b/Dockerfile.compile @@ -0,0 +1,8 @@ +FROM ubuntu:jammy +RUN apt-get update && apt-get install -y \ + openjdk-17-jdk \ + && rm -rf /var/lib/apt/lists/* +LABEL maintainer="sig-platform@spinnaker.io" +ENV GRADLE_USER_HOME /workspace/.gradle +ENV GRADLE_OPTS "-Xmx12g -Xms12g" +CMD ./gradlew --no-daemon clouddriver-web:installDist -x test diff --git a/Dockerfile.slim b/Dockerfile.slim index 3f779305819..17b9fcc862d 100644 --- a/Dockerfile.slim +++ b/Dockerfile.slim @@ -1,32 +1,62 @@ -FROM openjdk:8-jdk-alpine - -MAINTAINER delivery-engineering@netflix.com - -COPY ./clouddriver-web/build/install/clouddriver /opt/clouddriver - -RUN apk --no-cache add --update bash wget unzip 'python2>2.7.9' && \ - wget -nv https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip && \ - unzip -qq google-cloud-sdk.zip -d /opt && \ - rm google-cloud-sdk.zip && \ - CLOUDSDK_PYTHON="python2.7" /opt/google-cloud-sdk/install.sh --usage-reporting=false --bash-completion=false --additional-components app-engine-java && \ - rm -rf ~/.config/gcloud - -RUN wget https://storage.googleapis.com/kubernetes-release/release/stable.txt && wget https://storage.googleapis.com/kubernetes-release/release/$(cat stable.txt)/bin/linux/amd64/kubectl && \ - rm stable.txt && \ - chmod +x kubectl && \ - mv ./kubectl /usr/local/bin/kubectl - -RUN wget https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/bin/linux/amd64/heptio-authenticator-aws && \ - chmod +x ./heptio-authenticator-aws && \ - mv ./heptio-authenticator-aws /usr/local/bin/heptio-authenticator-aws - -ENV PATH "$PATH:/usr/local/bin/heptio-authenticator-aws" - -ENV PATH=$PATH:/opt/google-cloud-sdk/bin/ - -RUN adduser -D -S spinnaker +FROM python:3.12-alpine3.20 +LABEL maintainer="sig-platform@spinnaker.io" +ARG TARGETARCH + +ENV KUBECTL_DEFAULT_RELEASE=1.22.17 +ENV KUBECTL_RELEASES="${KUBECTL_DEFAULT_RELEASE} 1.26.12 1.27.9 1.28.5 1.29.0" +ENV AWS_CLI_VERSION=2.15.57 +ENV AWS_AIM_AUTHENTICATOR_VERSION=0.6.14 +ENV GOOGLE_CLOUD_SDK_VERSION=476.0.0 +ENV ECR_TOKEN_VERSION=v1.0.2 + +ENV PATH="$PATH:/usr/local/bin/:/opt/google-cloud-sdk/bin/:/usr/local/bin/aws-iam-authenticator" + +RUN apk update \ + && apk upgrade \ + && apk --no-cache add --update \ + bash \ + ca-certificates \ + curl \ + wget \ + openjdk17 \ + git \ + openssh-client \ + unzip + +# AWS CLI 2 +RUN apk add aws-cli=${AWS_CLI_VERSION}-r0 + +# Google cloud SDK +RUN [ $TARGETARCH == 'amd64' ] && export GCP_ARCH="x86_64" || export GCP_ARCH="arm" \ + && wget -nv https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-${GCP_ARCH}.tar.gz \ + && mkdir -p /opt && cd /opt \ + && tar -xzf /google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-${GCP_ARCH}.tar.gz \ + && rm /google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-${GCP_ARCH}.tar.gz \ + && CLOUDSDK_PYTHON="python3" /opt/google-cloud-sdk/install.sh --usage-reporting=false --bash-completion=false \ + --additional-components app-engine-java app-engine-go gke-gcloud-auth-plugin \ + && rm -rf ~/.config/gcloud \ + && rm -rf /opt/google-cloud-sdk/.install/.backup + +# kubectl + AWS IAM authenticator +RUN for version in $KUBECTL_RELEASES; do \ + release_version=$(echo ${version} | cut -d. -f1,2); \ + wget -nv https://cdn.dl.k8s.io/release/v${version}/bin/linux/${TARGETARCH}/kubectl -O /usr/local/bin/kubectl-${release_version}; \ + chmod +x /usr/local/bin/kubectl-${release_version}; \ + done \ + && ln -sf "/usr/local/bin/kubectl-$(echo ${KUBECTL_DEFAULT_RELEASE} | cut -d. -f1,2)" /usr/local/bin/kubectl \ + && wget -nv -O aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v${AWS_AIM_AUTHENTICATOR_VERSION}/aws-iam-authenticator_${AWS_AIM_AUTHENTICATOR_VERSION}_linux_${TARGETARCH} \ + && chmod +x ./aws-iam-authenticator \ + && mv ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator\ + && ln -sf /usr/local/bin/aws-iam-authenticator /usr/local/bin/heptio-authenticator-aws + +RUN rm /var/cache/apk/* + +RUN addgroup -S -g 10111 spinnaker +RUN adduser -S -G spinnaker -u 10111 spinnaker + +COPY clouddriver-web/build/install/clouddriver /opt/clouddriver +RUN mkdir -p /opt/clouddriver/plugins && chown -R spinnaker:nogroup /opt/clouddriver/plugins USER spinnaker - -WORKDIR /home/spinnaker +HEALTHCHECK CMD curl --fail http://localhost:7002/health CMD ["/opt/clouddriver/bin/clouddriver"] diff --git a/Dockerfile.ubuntu b/Dockerfile.ubuntu new file mode 100644 index 00000000000..27f33038b2a --- /dev/null +++ b/Dockerfile.ubuntu @@ -0,0 +1,66 @@ +FROM ubuntu:jammy +LABEL maintainer="sig-platform@spinnaker.io" +ARG TARGETARCH +ENV GOOGLE_CLOUD_SDK_VERSION=476.0.0 +ENV PATH="$PATH:/opt/google-cloud-sdk/bin/" +ENV KUBECTL_DEFAULT_RELEASE=1.22.17 +ENV KUBECTL_RELEASES="${KUBECTL_DEFAULT_RELEASE} 1.26.12 1.27.9 1.28.5 1.29.0" +ENV AWS_CLI_VERSION=2.15.57 +ENV AWS_AIM_AUTHENTICATOR_VERSION=0.6.14 + +RUN apt-get update && apt-get install -y curl gnupg && \ + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ + echo "deb https://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/sources.list.d/cloud-sdk.list && \ + apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y \ + curl \ + openjdk-17-jre-headless \ + wget \ + python3-pip \ + python3 \ + git \ + openssh-client \ + unzip && \ + rm -rf ~/.config/gcloud + +# AWS CLI 2 +RUN if [ "${TARGETARCH}" = "arm64" ]; then \ + wget -nv -O "awscliv2.zip" "https://awscli.amazonaws.com/awscli-exe-linux-aarch64-${AWS_CLI_VERSION}.zip"; \ + else \ + wget -nv -O "awscliv2.zip" "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${AWS_CLI_VERSION}.zip"; \ + fi && \ + unzip awscliv2.zip && \ + ./aws/install && \ + rm -rf ./awscliv2.zip ./aws + +# kubectl + AWS IAM authenticator +RUN for version in $KUBECTL_RELEASES; do \ + release_version=$(echo ${version} | cut -d. -f1,2); \ + wget -nv https://cdn.dl.k8s.io/release/v${version}/bin/linux/${TARGETARCH}/kubectl -O /usr/local/bin/kubectl-${release_version}; \ + chmod +x /usr/local/bin/kubectl-${release_version}; \ + done \ + && ln -sf "/usr/local/bin/kubectl-$(echo ${KUBECTL_DEFAULT_RELEASE} | cut -d. -f1,2)" /usr/local/bin/kubectl \ + && wget -nv -O aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v${AWS_AIM_AUTHENTICATOR_VERSION}/aws-iam-authenticator_${AWS_AIM_AUTHENTICATOR_VERSION}_linux_${TARGETARCH} \ + && chmod +x ./aws-iam-authenticator \ + && mv ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator\ + && ln -sf /usr/local/bin/aws-iam-authenticator /usr/local/bin/heptio-authenticator-aws + +# Google cloud SDK +RUN [ $TARGETARCH = 'amd64' ] && export GCP_ARCH="x86_64" || export GCP_ARCH="arm" \ + && wget -nv https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-${GCP_ARCH}.tar.gz \ + && mkdir -p /opt && cd /opt \ + && tar -xzf /google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-${GCP_ARCH}.tar.gz \ + && rm /google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-${GCP_ARCH}.tar.gz \ + && CLOUDSDK_PYTHON="python3" /opt/google-cloud-sdk/install.sh --usage-reporting=false --bash-completion=false \ + --additional-components app-engine-java app-engine-go gke-gcloud-auth-plugin \ + && rm -rf ~/.config/gcloud \ + && rm -rf /opt/google-cloud-sdk/.install/.backup + + +RUN adduser --system --uid 10111 --group spinnaker +COPY clouddriver-web/build/install/clouddriver /opt/clouddriver +RUN mkdir -p /opt/clouddriver/plugins && chown -R spinnaker:nogroup /opt/clouddriver/plugins +USER spinnaker +HEALTHCHECK CMD curl --fail http://localhost:7002/health +CMD ["/opt/clouddriver/bin/clouddriver"] diff --git a/OWNERS.md b/OWNERS.md new file mode 100644 index 00000000000..7ebe9a0557a --- /dev/null +++ b/OWNERS.md @@ -0,0 +1,4 @@ +ajordens +asher +cfieber +robzienert diff --git a/README.md b/README.md index be5d841bc2c..242d0732d0c 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ Spinnaker Cloud Provider Service ------------------------------------ -[![Build Status](https://api.travis-ci.org/spinnaker/clouddriver.svg?branch=master)](https://travis-ci.org/spinnaker/clouddriver) +[![Build Status](https://github.com/spinnaker/clouddriver/workflows/Branch%20Build/badge.svg)](https://github.com/spinnaker/clouddriver/actions) -This service is the main integration point for Spinnaker cloud providers like AWS, GCE, CloudFoundry, Azure etc. +This service is the main integration point for Spinnaker cloud providers like AWS, GCE, CloudFoundry, Azure etc. ### Developing with Intellij -To configure this repo as an Intellij project, run `./gradlew idea` in the root directory. +To configure this repo as an Intellij project, run `./gradlew idea` in the root directory. Some of the modules make use of [Lombok](https://projectlombok.org/), which will compile correctly on its own. However, for Intellij to make sense of the Lombok annotations, you'll need to install the [Lombok plugin](https://plugins.jetbrains.com/plugin/6317-lombok-plugin) as well as [check 'enable' under annotation processing](https://www.jetbrains.com/help/idea/configuring-annotation-processing.html#3). diff --git a/build.gradle b/build.gradle index d1215a81362..7d03bbf9ee9 100644 --- a/build.gradle +++ b/build.gradle @@ -1,5 +1,6 @@ /* * Copyright 2014 Netflix, Inc. + * Copyright (c) 2018, salesforce.com, inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,107 +15,88 @@ * limitations under the License. */ -buildscript { - ext { - springBootVersion = "1.5.10.RELEASE" - kotlinVersion = "1.2.41" - junitPlatformVersion = "1.0.2" - } - repositories { - mavenCentral() - jcenter() - maven { url "http://spinnaker.bintray.com/gradle" } - maven { url "https://plugins.gradle.org/m2/" } - } - dependencies { - classpath 'com.netflix.spinnaker.gradle:spinnaker-gradle-project:4.2.0' - classpath "org.springframework.boot:spring-boot-gradle-plugin:${springBootVersion}" - classpath "org.junit.platform:junit-platform-gradle-plugin:${junitPlatformVersion}" - classpath "com.netflix.nebula:nebula-kotlin-plugin:${kotlinVersion}" - } +plugins { + id 'io.spinnaker.project' version "$spinnakerGradleVersion" apply false + id 'org.jetbrains.kotlin.jvm' version "$kotlinVersion" + id 'org.jetbrains.kotlin.plugin.allopen' version "$kotlinVersion" apply false + id "com.google.protobuf" version "0.8.12" apply false } allprojects { - group = "com.netflix.spinnaker.clouddriver" - apply plugin: 'spinnaker.project' - apply plugin: 'groovy' + apply plugin: 'io.spinnaker.project' +} - ext { - spinnakerDependenciesVersion = project.hasProperty('spinnakerDependenciesVersion') ? project.property('spinnakerDependenciesVersion') : '1.0.10' - } +subprojects { + group = "io.spinnaker.clouddriver" - def checkLocalVersions = [spinnakerDependenciesVersion: spinnakerDependenciesVersion] - if (ext.has('versions')) { - def extVers = ext.get('versions') - if (extVers instanceof Map) { - checkLocalVersions.putAll(extVers) - } - } + if (name != "clouddriver-bom" && name != "clouddriver-api") { + apply plugin: 'java-library' + apply plugin: 'groovy' + apply plugin: 'kotlin' + apply plugin: "kotlin-allopen" + apply plugin: "jacoco" - def localVersions = checkLocalVersions.findAll { it.value.endsWith('-SNAPSHOT') } - if (localVersions) { - logger.info("Enabling mavenLocal repo for $localVersions") - repositories { - mavenLocal() - } - } + sourceSets.main.java.srcDirs = [] + sourceSets.main.groovy.srcDirs += ["src/main/java"] - spinnaker { - dependenciesVersion = spinnakerDependenciesVersion - } + dependencies { + api enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion") - test { - testLogging { - exceptionFormat = 'full' - } - if (project.hasProperty('slowTest')) { - long slow = 250 - try { - slow = Long.parseLong(project.property('slowTest')) - } catch (Exception ex) { - } - afterTest { desc, result -> - long duration = result.getEndTime() - result.getStartTime() - if (duration > slow) { - logger.warn("test exceeded $slow ms: $desc.className :: $desc.name ($duration milliseconds)") - } - } + compileOnly enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion") + compileOnly "org.projectlombok:lombok" + + annotationProcessor enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion") + annotationProcessor "org.projectlombok:lombok" + annotationProcessor("org.springframework.boot:spring-boot-configuration-processor") + + testAnnotationProcessor enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion") + testAnnotationProcessor "org.projectlombok:lombok" + + testCompileOnly enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion") + testCompileOnly "org.projectlombok:lombok" + + testRuntimeOnly enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion") + testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine" } - minHeapSize = "512m" - maxHeapSize = "512m" - } -} -subprojects { project -> - - configurations { - all { - exclude group: 'javax.servlet', module: 'servlet-api' - exclude group: 'javax.ws.rs', module: 'jsr311-api' - resolutionStrategy { - force 'org.antlr:antlr-runtime:3.5.2' - eachDependency { - if (it.requested.group == 'asm' || it.requested.group == 'org.ow2.asm') { - it.useTarget group: 'org.ow2.asm', name: 'asm-all', version: '5.0.3' - } - if (it.requested.group == 'junit') { - it.useTarget group: 'junit', name: 'junit', version: '4.12' - } - if (it.requested.group == 'cglib' || it.requested.name == 'cglib') { - it.useTarget group: 'cglib', name: 'cglib', version: '3.2.0' + test { + useJUnitPlatform() + testLogging { + exceptionFormat = 'full' + if (project.hasProperty('slowTest')) { + long slow = 250 + try { + slow = Long.parseLong(project.property('slowTest')) + } catch (Exception ex) { } - if (it.requested.group == 'com.google.guava') { - it.useTarget group: 'com.google.guava', name: 'guava', version: '18.0' - } - if (it.requested.group == 'antlr') { - it.useTarget group: 'org.antlr', name: it.requested.name, version: '3.5.2' - } - if (it.requested.group == 'org.apache.xbean') { - it.useVersion '4.3' + afterTest { desc, result -> + long duration = result.getEndTime() - result.getStartTime() + if (duration > slow) { + logger.warn("test exceeded $slow ms: $desc.className :: $desc.name ($duration milliseconds)") + } } } + + } + minHeapSize = "512m" + maxHeapSize = "1g" + maxParallelForks = 4 + jacoco { + enabled = project.hasProperty('testCoverage') } } + + // The test report requires tests to have run first + jacocoTestReport { + dependsOn test + } + } + + if ([korkVersion, fiatVersion].any { it.endsWith("-SNAPSHOT") }) { + logger.info("Enabling mavenLocal") + repositories { + mavenLocal() + } } tasks.withType(JavaExec) { @@ -122,11 +104,6 @@ subprojects { project -> jvmArgs '-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=7102' } } - - dependencies { - compile spinnaker.dependency('groovy') - spinnaker.group('test') - } } -defaultTasks ':clouddriver-web:bootRun' +defaultTasks ':clouddriver-web:run' diff --git a/cats/cats-core/cats-core.gradle b/cats/cats-core/cats-core.gradle index afb2f5657c1..5d2e23405b7 100644 --- a/cats/cats-core/cats-core.gradle +++ b/cats/cats-core/cats-core.gradle @@ -1,6 +1,13 @@ dependencies { - compile spinnaker.dependency('slf4jApi') - compile spinnaker.dependency('jacksonAnnotations') + implementation project(":clouddriver-api") - testCompile project(":cats:cats-test") + implementation "org.slf4j:slf4j-api" + implementation "com.fasterxml.jackson.core:jackson-annotations" + implementation "org.apache.groovy:groovy" + implementation "com.google.guava:guava" + + testImplementation project(":cats:cats-test") + + testImplementation "org.spockframework:spock-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentController.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentController.java index cff3049eba2..952ee714d9f 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentController.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentController.java @@ -20,22 +20,26 @@ import com.netflix.spinnaker.cats.provider.ProviderRegistry; /** - * AgentController schedules an AgentExecution for each Agent in each Provider in the ProviderRegistry. - * - * When the AgentControllers AgentExecution is invoked, it will trigger a load and cache cycle for that agent. + * AgentController schedules an AgentExecution for each Agent in each Provider in the + * ProviderRegistry. + * + *

When the AgentControllers AgentExecution is invoked, it will trigger a load and cache cycle + * for that agent. */ public class AgentController { - public AgentController(ProviderRegistry providerRegistry, - AgentScheduler agentScheduler, - ExecutionInstrumentation executionInstrumentation) { - for (Provider provider : providerRegistry.getProviders()) { - if (provider instanceof AgentSchedulerAware) { - ((AgentSchedulerAware)provider).setAgentScheduler(agentScheduler); - } + public AgentController( + ProviderRegistry providerRegistry, + AgentScheduler agentScheduler, + ExecutionInstrumentation executionInstrumentation) { + for (Provider provider : providerRegistry.getProviders()) { + if (provider instanceof AgentSchedulerAware) { + ((AgentSchedulerAware) provider).setAgentScheduler(agentScheduler); + } - for (Agent agent : provider.getAgents()) { - agentScheduler.schedule(agent, agent.getAgentExecution(providerRegistry), executionInstrumentation); - } - } + for (Agent agent : provider.getAgents()) { + agentScheduler.schedule( + agent, agent.getAgentExecution(providerRegistry), executionInstrumentation); + } } + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentDataType.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentDataType.java deleted file mode 100644 index 813826f850f..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentDataType.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.agent; - -/** - * Describes both the type name as well as authority for an Agent's provided data. - * - * If an agent is an Authoritative source of data, then it's resulting data set will be - * considered the current complete set for that data source. If an agent is an Informative - * source of data, its results will contribute to the data set for that type, but is never - * considered the complete set of data, so will not result in deletions when elements are - * no longer present. - */ -public class AgentDataType { - public static enum Authority { - AUTHORITATIVE, - INFORMATIVE; - - public AgentDataType forType(String typeName) { - return new AgentDataType(typeName, this); - } - } - - private final String typeName; - private final Authority authority; - - public AgentDataType(String typeName, Authority authority) { - this.typeName = typeName; - this.authority = authority; - } - - public String getTypeName() { - return typeName; - } - - public Authority getAuthority() { - return authority; - } -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentIntervalAware.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentIntervalAware.java index 4a41fd611dd..125dd99bcc5 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentIntervalAware.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentIntervalAware.java @@ -15,19 +15,16 @@ */ /** - * Identifies an entity (usually an Agent) that can report what interval it wants to be scheduled at. + * Identifies an entity (usually an Agent) that can report what interval it wants to be scheduled + * at. */ package com.netflix.spinnaker.cats.agent; public interface AgentIntervalAware { - /** - * @return Agent's interval to be scheduled at in milliseconds. - */ + /** @return Agent's interval to be scheduled at in milliseconds. */ Long getAgentInterval(); - /** - * @return Agent's error interval to be scheduled at in milliseconds. - */ + /** @return Agent's error interval to be scheduled at in milliseconds. */ default Long getAgentErrorInterval() { return getAgentInterval(); } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentScheduler.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentScheduler.java deleted file mode 100644 index 7d4ac7a0d8f..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentScheduler.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.agent; - -/** - * An AgentScheduler manages the execution of a CachingAgent. - */ -public interface AgentScheduler { - void schedule(Agent agent, AgentExecution agentExecution, ExecutionInstrumentation executionInstrumentation); - default void unschedule(Agent agent) {}; - - /** - * @return True iff this scheduler supports synchronization between LoadData and OnDemand cache updates. - */ - default boolean isAtomic() { return false; }; - - /** - * @param agent The agent being locked. - * - * @return A "Lock" that will allow exclusive access to updating this agent's cache data. null iff isAtomic == false. - */ - default T tryLock(Agent agent) { return null; }; - - /** - * @param lock The lock being released. - * - * @return True iff the lock was still in our possession when the release call was made. - */ - default boolean tryRelease(T lock) { return false; }; - - /** - * @param lock The lock being checked for validity. - * - * @return True iff the lock is still in our possession. - */ - default boolean lockValid(T lock) { return false; }; -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CacheResult.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CacheResult.java deleted file mode 100644 index 3df1e21233f..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CacheResult.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.agent; - -import com.netflix.spinnaker.cats.cache.CacheData; - -import java.util.Collection; -import java.util.Collections; -import java.util.Map; - -/** - * The result of a CachingAgent run. - */ -public interface CacheResult { - /** - * @return The CacheDatas to cache, keyed by item type. - */ - Map> getCacheResults(); - - /** - * Provides a means to explicitly evict items as a result of a CachingAgent execution. - * - * Note: Eviction will already occur based on the values in getCacheResults for all the types - * that the CachingAgent authoritatively caches - this collection is for additional items - * that were potentially cached out of band of a complete caching run. - * @return The ids of items that should be explicitly evicted. - */ - default Map> getEvictions() { return Collections.emptyMap(); } -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CachingAgent.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CachingAgent.java deleted file mode 100644 index 33119467321..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CachingAgent.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.agent; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.cats.provider.ProviderRegistry; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * A CachingAgent loads one or more types of data. - *

- * The data set for a caching agent is scoped to the provider and agent type. For example - * an agent might load clusters for the AWS provider, and be scoped to a particular account - * and region. - */ -public interface CachingAgent extends Agent { - /** - * @return the data types this Agent returns - * @see com.netflix.spinnaker.cats.agent.AgentDataType.Authority - */ - Collection getProvidedDataTypes(); - - /** - * Triggered by an AgentScheduler to tell this Agent to load its data. - * - * @param providerCache Cache associated with this Agent's provider - * @return the complete set of data for this Agent. - */ - CacheResult loadData(ProviderCache providerCache); - - default Optional> getCacheKeyPatterns() { - return Optional.empty(); - } - - default AgentExecution getAgentExecution(ProviderRegistry providerRegistry) { - return new CacheExecution(providerRegistry); - } - - class CacheExecution implements AgentExecution { - private final Logger log = LoggerFactory.getLogger(CacheExecution.class); - private final ProviderRegistry providerRegistry; - - public CacheExecution(ProviderRegistry providerRegistry) { - this.providerRegistry = providerRegistry; - } - - @Override - public void executeAgent(Agent agent) { - storeAgentResult(agent, executeAgentWithoutStore(agent)); - } - - public CacheResult executeAgentWithoutStore(Agent agent) { - CachingAgent cachingAgent = (CachingAgent) agent; - ProviderCache cache = providerRegistry.getProviderCache(cachingAgent.getProviderName()); - - return cachingAgent.loadData(cache); - } - - public void storeAgentResult(Agent agent, CacheResult result) { - CachingAgent cachingAgent = (CachingAgent) agent; - ProviderCache cache = providerRegistry.getProviderCache(cachingAgent.getProviderName()); - Collection providedTypes = cachingAgent.getProvidedDataTypes(); - Collection authoritative = new HashSet<>(providedTypes.size()); - for (AgentDataType type : providedTypes) { - if (type.getAuthority() == AgentDataType.Authority.AUTHORITATIVE) { - authoritative.add(type.getTypeName()); - } - } - - - Optional> cacheKeyPatterns = cachingAgent.getCacheKeyPatterns(); - if (cacheKeyPatterns.isPresent()) { - for (String type : authoritative) { - String cacheKeyPatternForType = cacheKeyPatterns.get().get(type); - if (cacheKeyPatternForType != null) { - try { - Set cachedIdentifiersForType = result.getCacheResults().get(type) - .stream() - .map(CacheData::getId) - .collect(Collectors.toSet()); - - Collection evictableIdentifiers = cache.filterIdentifiers(type, cacheKeyPatternForType) - .stream() - .filter(i -> !cachedIdentifiersForType.contains(i)) - .collect(Collectors.toSet()); - - // any key that existed previously but was not re-cached by this agent is considered evictable - if (!evictableIdentifiers.isEmpty()) { - Collection evictionsForType = result.getEvictions().computeIfAbsent(type, evictableKeys -> new ArrayList<>()); - evictionsForType.addAll(evictableIdentifiers); - - log.debug("Evicting stale identifiers: {}", evictableIdentifiers); - } - } catch (Exception e) { - log.error("Failed to check for stale identifiers (type: {}, pattern: {}, agent: {})", type, cacheKeyPatternForType, agent, e); - } - } - } - } - - cache.putCacheResult(agent.getAgentType(), authoritative, result); - } - } -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CompositeExecutionInstrumentation.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CompositeExecutionInstrumentation.java index d8a0d638722..2de310ad7ca 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CompositeExecutionInstrumentation.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/CompositeExecutionInstrumentation.java @@ -19,30 +19,30 @@ import java.util.Collection; public class CompositeExecutionInstrumentation implements ExecutionInstrumentation { - private final Collection instrumentations; + private final Collection instrumentations; - public CompositeExecutionInstrumentation(Collection instrumentations) { - this.instrumentations = instrumentations; - } + public CompositeExecutionInstrumentation(Collection instrumentations) { + this.instrumentations = instrumentations; + } - @Override - public void executionStarted(Agent agent) { - for (ExecutionInstrumentation exec : instrumentations) { - exec.executionStarted(agent); - } + @Override + public void executionStarted(Agent agent) { + for (ExecutionInstrumentation exec : instrumentations) { + exec.executionStarted(agent); } + } - @Override - public void executionCompleted(Agent agent, long elapsedMs) { - for (ExecutionInstrumentation exec : instrumentations) { - exec.executionCompleted(agent, elapsedMs); - } + @Override + public void executionCompleted(Agent agent, long elapsedMs) { + for (ExecutionInstrumentation exec : instrumentations) { + exec.executionCompleted(agent, elapsedMs); } + } - @Override - public void executionFailed(Agent agent, Throwable cause) { - for (ExecutionInstrumentation exec : instrumentations) { - exec.executionFailed(agent, cause); - } + @Override + public void executionFailed(Agent agent, Throwable cause, long elapsedMs) { + for (ExecutionInstrumentation exec : instrumentations) { + exec.executionFailed(agent, cause, elapsedMs); } + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/DefaultAgentScheduler.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/DefaultAgentScheduler.java index 746a51e6443..a01193f86f9 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/DefaultAgentScheduler.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/DefaultAgentScheduler.java @@ -16,9 +16,10 @@ package com.netflix.spinnaker.cats.agent; -import com.netflix.spinnaker.cats.module.CatsModuleAware; -import com.netflix.spinnaker.cats.thread.NamedThreadFactory; +import static com.netflix.spinnaker.cats.agent.ExecutionInstrumentation.elapsedTimeMs; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spinnaker.cats.module.CatsModuleAware; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; @@ -29,96 +30,112 @@ /** * An AgentScheduler that executes on a fixed interval. * - * This AgentScheduler will capture any exceptions thrown by the AgentExecution and - * report them to the provided ExecutionInstrumentation. + *

This AgentScheduler will capture any exceptions thrown by the AgentExecution and report them + * to the provided ExecutionInstrumentation. * - * An exception thrown while reporting executionFailure will abort the schedule for - * the CachingAgent. + *

An exception thrown while reporting executionFailure will abort the schedule for the + * CachingAgent. */ public class DefaultAgentScheduler extends CatsModuleAware implements AgentScheduler { - private static final long DEFAULT_INTERVAL = 60000; - - private final ScheduledExecutorService scheduledExecutorService; - private final long interval; - private final TimeUnit timeUnit; - private final Map agentFutures = new ConcurrentHashMap(); - - public DefaultAgentScheduler() { - this(DEFAULT_INTERVAL); - } - - public DefaultAgentScheduler(long interval) { - this(interval, TimeUnit.MILLISECONDS); - } - - public DefaultAgentScheduler(long interval, TimeUnit unit) { - this(Executors.newScheduledThreadPool(Runtime.getRuntime().availableProcessors(), new NamedThreadFactory(DefaultAgentScheduler.class.getSimpleName())), interval, unit); + private static final long DEFAULT_INTERVAL = 60000; + + private final ScheduledExecutorService scheduledExecutorService; + private final long interval; + private final TimeUnit timeUnit; + private final Map agentFutures = new ConcurrentHashMap(); + + public DefaultAgentScheduler() { + this(DEFAULT_INTERVAL); + } + + public DefaultAgentScheduler(long interval) { + this(interval, TimeUnit.MILLISECONDS); + } + + public DefaultAgentScheduler(long interval, TimeUnit unit) { + this( + Executors.newScheduledThreadPool( + Runtime.getRuntime().availableProcessors(), + new ThreadFactoryBuilder() + .setNameFormat(DefaultAgentScheduler.class.getSimpleName() + "-%d") + .build()), + interval, + unit); + } + + public DefaultAgentScheduler( + ScheduledExecutorService scheduledExecutorService, long interval, TimeUnit timeUnit) { + this.scheduledExecutorService = scheduledExecutorService; + this.interval = interval; + this.timeUnit = timeUnit; + } + + @Override + public void schedule( + Agent agent, + AgentExecution agentExecution, + ExecutionInstrumentation executionInstrumentation) { + Long agentInterval = interval; + TimeUnit agentTimeUnit = timeUnit; + if (agent instanceof AgentIntervalAware) { + agentInterval = ((AgentIntervalAware) agent).getAgentInterval(); + agentTimeUnit = TimeUnit.MILLISECONDS; } - public DefaultAgentScheduler(ScheduledExecutorService scheduledExecutorService, long interval, TimeUnit timeUnit) { - this.scheduledExecutorService = scheduledExecutorService; - this.interval = interval; - this.timeUnit = timeUnit; + Future agentFuture = + scheduledExecutorService.scheduleAtFixedRate( + new AgentExecutionRunnable(agent, agentExecution, executionInstrumentation), + 0, + agentInterval, + agentTimeUnit); + + agentFutures.put(agent, agentFuture); + } + + @Override + public void unschedule(Agent agent) { + if (agentFutures.containsKey(agent)) { + agentFutures.get(agent).cancel(false); + agentFutures.remove(agent); } - - @Override - public void schedule(Agent agent, AgentExecution agentExecution, ExecutionInstrumentation executionInstrumentation) { - Long agentInterval = interval; - TimeUnit agentTimeUnit = timeUnit; - if (agent instanceof AgentIntervalAware) { - agentInterval = ((AgentIntervalAware) agent).getAgentInterval(); - agentTimeUnit = TimeUnit.MILLISECONDS; - } - - Future agentFuture = - scheduledExecutorService.scheduleAtFixedRate(new AgentExecutionRunnable(agent, agentExecution, executionInstrumentation), 0, agentInterval, agentTimeUnit); - - agentFutures.put(agent, agentFuture); - } - - @Override - public void unschedule(Agent agent) { - if (agentFutures.containsKey(agent)) { - agentFutures.get(agent).cancel(false); - agentFutures.remove(agent); - } - } - - @Override - public AgentLock tryLock(Agent agent) { - return null; - } - - @Override - public boolean tryRelease(AgentLock lock) { - return false; - } - - @Override - public boolean isAtomic() { - return false; + } + + @Override + public AgentLock tryLock(Agent agent) { + return null; + } + + @Override + public boolean tryRelease(AgentLock lock) { + return false; + } + + @Override + public boolean isAtomic() { + return false; + } + + private static class AgentExecutionRunnable implements Runnable { + private final Agent agent; + private final AgentExecution execution; + private final ExecutionInstrumentation executionInstrumentation; + + public AgentExecutionRunnable( + Agent agent, AgentExecution execution, ExecutionInstrumentation executionInstrumentation) { + this.agent = agent; + this.execution = execution; + this.executionInstrumentation = executionInstrumentation; } - private static class AgentExecutionRunnable implements Runnable { - private final Agent agent; - private final AgentExecution execution; - private final ExecutionInstrumentation executionInstrumentation; - - public AgentExecutionRunnable(Agent agent, AgentExecution execution, ExecutionInstrumentation executionInstrumentation) { - this.agent = agent; - this.execution = execution; - this.executionInstrumentation = executionInstrumentation; - } - - public void run() { - try { - executionInstrumentation.executionStarted(agent); - long startTime = System.nanoTime(); - execution.executeAgent(agent); - executionInstrumentation.executionCompleted(agent, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)); - } catch (Throwable t) { - executionInstrumentation.executionFailed(agent, t); - } - } + public void run() { + long startTimeMs = System.currentTimeMillis(); + try { + executionInstrumentation.executionStarted(agent); + execution.executeAgent(agent); + executionInstrumentation.executionCompleted(agent, elapsedTimeMs(startTimeMs)); + } catch (Throwable t) { + executionInstrumentation.executionFailed(agent, t, elapsedTimeMs(startTimeMs)); + } } + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/DefaultCacheResult.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/DefaultCacheResult.java deleted file mode 100644 index ad8bd78a451..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/DefaultCacheResult.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.agent; - -import com.netflix.spinnaker.cats.cache.CacheData; - -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -/** - * An immutable CacheResult. - */ -public class DefaultCacheResult implements CacheResult { - private final Map> cacheResults; - private final Map> evictions; - - public DefaultCacheResult(Map> cacheResults) { - this(cacheResults, new HashMap<>()); - } - public DefaultCacheResult(Map> cacheResults, Map> evictions) { - this.cacheResults = cacheResults; - this.evictions = evictions; - } - - @Override - public Map> getCacheResults() { - return cacheResults; - } - - @Override - public Map> getEvictions() { - return evictions; - } -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/ExecutionInstrumentation.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/ExecutionInstrumentation.java deleted file mode 100644 index 30bd4ea8488..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/ExecutionInstrumentation.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.agent; - -public interface ExecutionInstrumentation { - void executionStarted(Agent agent); - - void executionCompleted(Agent agent, long elapsedMs); - - void executionFailed(Agent agent, Throwable cause); -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/NoopExecutionInstrumentation.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/NoopExecutionInstrumentation.java index 33324e0fe6c..422356bc2fe 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/NoopExecutionInstrumentation.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/NoopExecutionInstrumentation.java @@ -17,18 +17,18 @@ package com.netflix.spinnaker.cats.agent; public class NoopExecutionInstrumentation implements ExecutionInstrumentation { - @Override - public void executionStarted(Agent agent) { - //noop - } + @Override + public void executionStarted(Agent agent) { + // noop + } - @Override - public void executionCompleted(Agent agent, long elapsedMs) { - //noop - } + @Override + public void executionCompleted(Agent agent, long elapsedMs) { + // noop + } - @Override - public void executionFailed(Agent agent, Throwable cause) { - //noop - } + @Override + public void executionFailed(Agent agent, Throwable cause, long elapsedMs) { + // noop + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/Cache.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/Cache.java deleted file mode 100644 index e7c6291c36d..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/Cache.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.cache; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; - -/** - * Cache provides view access to data keyed by type and identifier. - */ -public interface Cache { - /** - * Gets a single item from the cache by type and id - * - * @param type the type of the item - * @param id the id of the item - * @return the item matching the type and id - */ - CacheData get(String type, String id); - - CacheData get(String type, String id, CacheFilter cacheFilter); - - /** - * Determines if a specified id exists in the cache without loading the data. - * - * @param type the type of the item - * @param identifier the id of the item - * @return true iff the item is present in the cache - */ - default boolean exists(String type, String identifier) { - return !existingIdentifiers(type, Collections.singleton(identifier)).isEmpty(); - } - - /** - * Filters the supplied list of identifiers to only those that exist in the cache. - * - * @param type the type of the item - * @param identifiers the identifiers for the items - * @return the list of identifiers that are present in the cache from the provided identifiers - */ - default Collection existingIdentifiers(String type, String... identifiers) { - if (identifiers.length == 0) { - return Collections.emptySet(); - } - return existingIdentifiers(type, Arrays.asList(identifiers)); - } - - /** - * Filters the supplied list of identifiers to only those that exist in the cache. - * - * @param type the type of the item - * @param identifiers the identifiers for the items - * @return the list of identifiers that are present in the cache from the provided identifiers - */ - Collection existingIdentifiers(String type, Collection identifiers); - - /** - * Retrieves all the identifiers for a type - * - * @param type the type for which to retrieve identifiers - * @return the identifiers for the type - */ - Collection getIdentifiers(String type); - - /** - * Returns the identifiers for the specified type that match the provided glob. - * - * @param type The type for which to retrieve identifiers - * @param glob The glob to match against the identifiers - * @return the identifiers for the type that match the glob - */ - Collection filterIdentifiers(String type, String glob); - - /** - * Retrieves all the items for the specified type - * - * @param type the type for which to retrieve items - * @return all the items for the type - */ - Collection getAll(String type); - - Collection getAll(String type, CacheFilter cacheFilter); - - /** - * Retrieves the items for the specified type matching the provided identifiers - * - * @param type the type for which to retrieve items - * @param identifiers the identifiers - * @return the items matching the type and identifiers - */ - Collection getAll(String type, Collection identifiers); - - Collection getAll(String type, Collection identifiers, CacheFilter cacheFilter); - - /** - * Retrieves the items for the specified type matching the provided identifiers - * - * @param type the type for which to retrieve items - * @param identifiers the identifiers - * @return the items matching the type and identifiers - */ - Collection getAll(String type, String... identifiers); -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CacheData.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CacheData.java deleted file mode 100644 index c22cbcadb5b..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CacheData.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.cache; - -import java.util.Collection; -import java.util.Map; - -/** - * CacheData is stored in a Cache. - * Attributes are facts about the CacheData that can be updated by CachingAgents. - * Relationships are links to other CacheData. - * - * Note: Not all caches may support a per record ttl - */ -public interface CacheData { - String getId(); - - /** - * @return The ttl (in seconds) for this CacheData - */ - int getTtlSeconds(); - - Map getAttributes(); - - /** - * @return relationships for this CacheData, keyed by type returning a collection of ids for that type - */ - Map> getRelationships(); -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CompositeCache.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CompositeCache.java index 231e88f2cfc..fcf6e100ead 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CompositeCache.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CompositeCache.java @@ -16,147 +16,176 @@ package com.netflix.spinnaker.cats.cache; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; - -/** - * A cache that provides a unified view of multiples, merging items from each - * cache together. - */ -public class CompositeCache implements Cache { +import java.util.*; - private final Collection caches; +/** A cache that provides a unified view of multiples, merging items from each cache together. */ +public class CompositeCache implements Cache { - public CompositeCache(Collection caches) { - this.caches = caches; + private final Collection caches; + + public CompositeCache(Collection caches) { + this.caches = caches; + } + + @Override + public boolean supportsGetAllByApplication() { + return caches.stream().allMatch(Cache::supportsGetAllByApplication); + } + + @Override + public CacheData get(String type, String id) { + return get(type, id, null); + } + + @Override + public CacheData get(String type, String id, CacheFilter cacheFilter) { + Collection elements = new ArrayList<>(caches.size()); + for (Cache cache : caches) { + CacheData element = cache.get(type, id, cacheFilter); + if (element != null) { + elements.add(element); + } } - - @Override - public CacheData get(String type, String id) { - return get(type, id, null); + if (elements.isEmpty()) { + return null; } - - @Override - public CacheData get(String type, String id, CacheFilter cacheFilter) { - Collection elements = new ArrayList<>(caches.size()); - for (Cache cache : caches) { - CacheData element = cache.get(type, id, cacheFilter); - if (element != null) { - elements.add(element); - } - } - if (elements.isEmpty()) { - return null; - } - return merge(id, elements); + return merge(id, elements); + } + + @Override + public Collection getAll(String type) { + return getAll(type, (CacheFilter) null); + } + + @Override + public Collection getAll(String type, CacheFilter cacheFilter) { + Map allItems = new HashMap<>(); + for (Cache cache : caches) { + allItems = merge(allItems, cache.getAll(type, cacheFilter)); } - - @Override - public Collection getAll(String type) { - return getAll(type, (CacheFilter) null); - + return allItems.values(); + } + + @Override + public Collection existingIdentifiers(String type, Collection ids) { + HashSet identifiers = new HashSet<>(ids.size()); + HashSet remainingIds = new HashSet<>(ids); + for (Cache cache : caches) { + Collection existing = cache.existingIdentifiers(type, remainingIds); + identifiers.addAll(existing); + + // minimize redis exists calls - if we've seen the identifier in at least + // one cache, then we know it exists + remainingIds.removeAll(existing); + if (remainingIds.isEmpty()) { + break; + } } - - @Override - public Collection getAll(String type, CacheFilter cacheFilter) { - Map allItems = new HashMap<>(); - for (Cache cache : caches) { - allItems = merge(allItems, cache.getAll(type, cacheFilter)); - } - return allItems.values(); + return identifiers; + } + + @Override + public Collection getIdentifiers(String type) { + HashSet identifiers = new HashSet<>(); + for (Cache cache : caches) { + identifiers.addAll(cache.getIdentifiers(type)); } - - @Override - public Collection existingIdentifiers(String type, Collection ids) { - HashSet identifiers = new HashSet<>(ids.size()); - HashSet remainingIds = new HashSet<>(ids); - for (Cache cache : caches) { - Collection existing = cache.existingIdentifiers(type, remainingIds); - identifiers.addAll(existing); - - //minimize redis exists calls - if we've seen the identifier in at least - // one cache, then we know it exists - remainingIds.removeAll(existing); - if (remainingIds.isEmpty()) { - break; - } - } - return identifiers; + return identifiers; + } + + @Override + public Collection filterIdentifiers(String type, String glob) { + HashSet identifiers = new HashSet<>(); + for (Cache cache : caches) { + identifiers.addAll(cache.filterIdentifiers(type, glob)); } - - @Override - public Collection getIdentifiers(String type) { - HashSet identifiers = new HashSet<>(); - for (Cache cache : caches) { - identifiers.addAll(cache.getIdentifiers(type)); - } - return identifiers; + return identifiers; + } + + @Override + public Collection getAll(String type, Collection identifiers) { + return getAll(type, identifiers, null); + } + + @Override + public Collection getAll( + String type, Collection identifiers, CacheFilter cacheFilter) { + Map allItems = new HashMap<>(); + for (Cache cache : caches) { + allItems = merge(allItems, cache.getAll(type, identifiers, cacheFilter)); } - - @Override - public Collection filterIdentifiers(String type, String glob) { - HashSet identifiers = new HashSet<>(); - for (Cache cache : caches) { - identifiers.addAll(cache.filterIdentifiers(type, glob)); - } - return identifiers; + return allItems.values(); + } + + @Override + public Collection getAll(String type, String... identifiers) { + return getAll(type, Arrays.asList(identifiers)); + } + + @Override + public Map> getAllByApplication(String type, String application) { + Map> allItems = new HashMap<>(); + for (Cache cache : caches) { + allItems.putAll(cache.getAllByApplication(type, application)); } - - @Override - public Collection getAll(String type, Collection identifiers) { - return getAll(type, identifiers, null); + return allItems; + } + + @Override + public Map> getAllByApplication( + String type, String application, CacheFilter filter) { + Map> allItems = new HashMap<>(); + for (Cache cache : caches) { + allItems.putAll(cache.getAllByApplication(type, application, filter)); } - - @Override - public Collection getAll(String type, Collection identifiers, CacheFilter cacheFilter) { - Map allItems = new HashMap<>(); - for (Cache cache : caches) { - allItems = merge(allItems, cache.getAll(type, identifiers, cacheFilter)); - } - return allItems.values(); + return allItems; + } + + @Override + public Map> getAllByApplication( + Collection types, String application, Map cacheFilters) { + Map> allItems = new HashMap<>(); + for (Cache cache : caches) { + allItems.putAll(cache.getAllByApplication(types, application, cacheFilters)); } - - @Override - public Collection getAll(String type, String... identifiers) { - return getAll(type, Arrays.asList(identifiers)); - } - - Map merge(Map existingItems, Collection results) { - final Map allItems = existingItems == null ? new HashMap() : existingItems; - for (CacheData item : results) { - CacheData existing = allItems.get(item.getId()); - if (existing == null) { - allItems.put(item.getId(), item); - } else { - allItems.put(item.getId(), merge(item.getId(), existing, item)); - } - } - - return allItems; - } - - CacheData merge(String id, CacheData... elements) { - return merge(id, Arrays.asList(elements)); + return allItems; + } + + Map merge( + Map existingItems, Collection results) { + final Map allItems = + existingItems == null ? new HashMap() : existingItems; + for (CacheData item : results) { + CacheData existing = allItems.get(item.getId()); + if (existing == null) { + allItems.put(item.getId(), item); + } else { + allItems.put(item.getId(), merge(item.getId(), existing, item)); + } } - CacheData merge(String id, Collection elements) { - Map attributes = new HashMap<>(); - Map> relationships = new HashMap<>(); - for (CacheData data : elements) { - attributes.putAll(data.getAttributes()); - for (Map.Entry> relationship : data.getRelationships().entrySet()) { - Collection existing = relationships.get(relationship.getKey()); - if (existing == null) { - existing = new HashSet<>(); - relationships.put(relationship.getKey(), existing); - } - existing.addAll(relationship.getValue()); - } + return allItems; + } + + CacheData merge(String id, CacheData... elements) { + return merge(id, Arrays.asList(elements)); + } + + CacheData merge(String id, Collection elements) { + Map attributes = new HashMap<>(); + Map> relationships = new HashMap<>(); + for (CacheData data : elements) { + attributes.putAll(data.getAttributes()); + for (Map.Entry> relationship : + data.getRelationships().entrySet()) { + Collection existing = relationships.get(relationship.getKey()); + if (existing == null) { + existing = new HashSet<>(); + relationships.put(relationship.getKey(), existing); } - return new DefaultCacheData(id, attributes, relationships); + existing.addAll(relationship.getValue()); + } } + return new DefaultCacheData(id, attributes, relationships); + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/DefaultCacheData.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/DefaultCacheData.java deleted file mode 100644 index 2399cf3ca29..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/DefaultCacheData.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.cache; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.time.Clock; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -/** - * An immutable CacheData. - */ -public class DefaultCacheData implements CacheData { - private final String id; - private final int ttlSeconds; - private final Map attributes; - private final Map> relationships; - - public DefaultCacheData(String id, Map attributes, Map> relationships) { - this(id, -1, attributes, relationships); - } - - @JsonCreator - public DefaultCacheData(@JsonProperty("id") String id, - @JsonProperty("ttlSeconds") int ttlSeconds, - @JsonProperty("attributes") Map attributes, - @JsonProperty("relationships") Map> relationships) { - this(id, ttlSeconds, attributes, relationships, Clock.systemDefaultZone()); - } - - public DefaultCacheData(String id, int ttlSeconds, Map attributes, Map> relationships, Clock clock) { - // ensure attributes is non-null and mutable given that `cacheExpiry` will be added - attributes = attributes == null ? new HashMap<>() : new HashMap<>(attributes); - - this.id = id; - this.attributes = attributes; - this.relationships = relationships; - - if (ttlSeconds > 0) { - Long cacheExpiry = clock.millis() + ttlSeconds * 1000; - this.attributes.put("cacheExpiry", cacheExpiry); - } - - if (ttlSeconds < 0 && attributes.containsKey("cacheExpiry")) { - ttlSeconds = (int) (clock.millis() - (long) attributes.get("cacheExpiry")) * -1 / 1000; - } - - this.ttlSeconds = ttlSeconds; - } - - @Override - public String getId() { - return id; - } - - @Override - public int getTtlSeconds() { - return ttlSeconds; - } - - @Override - public Map getAttributes() { - return attributes; - } - - @Override - public Map> getRelationships() { - return relationships; - } -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/DefaultJsonCacheData.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/DefaultJsonCacheData.java new file mode 100644 index 00000000000..4673880019b --- /dev/null +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/DefaultJsonCacheData.java @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.cats.cache; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.time.Clock; +import java.util.Collection; +import java.util.Map; + +public class DefaultJsonCacheData extends DefaultCacheData { + + @JsonCreator + public DefaultJsonCacheData( + @JsonProperty("id") String id, + @JsonProperty("ttlSeconds") int ttlSeconds, + @JsonProperty("attributes") Map attributes, + @JsonProperty("relationships") Map> relationships) { + super(id, ttlSeconds, attributes, relationships, Clock.systemDefaultZone()); + } +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/NamedCacheFactory.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/NamedCacheFactory.java index b95e98b501c..b282ea5d257 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/NamedCacheFactory.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/NamedCacheFactory.java @@ -16,9 +16,15 @@ package com.netflix.spinnaker.cats.cache; -/** - * Produces writeable caches by name. - */ +import com.netflix.spinnaker.cats.provider.ProviderCacheConfiguration; + +/** Produces writeable caches by name. */ public interface NamedCacheFactory { - WriteableCache getCache(String name); + WriteableCache getCache(String name); + + default WriteableCache getCache( + String name, ProviderCacheConfiguration providerCacheConfiguration) { + // not all caches support per-provider configuration + return getCache(name); + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/RelationshipCacheFilter.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/RelationshipCacheFilter.java index 39a7693d4eb..a5093973a5d 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/RelationshipCacheFilter.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/RelationshipCacheFilter.java @@ -25,16 +25,15 @@ private RelationshipCacheFilter(List allowableRelationshipPrefixes) { this.allowableRelationshipPrefixes = allowableRelationshipPrefixes; } - /** - * @return CacheFilter that will filter out all relationships - */ + /** @return CacheFilter that will filter out all relationships */ public static RelationshipCacheFilter none() { return new RelationshipCacheFilter(Collections.emptyList()); } /** * @param relationshipPrefixes Allowable relationship prefixes - * @return CacheFilter that will filter out all relationships not prefixed with one of the relationshipPrefixes + * @return CacheFilter that will filter out all relationships not prefixed with one of the + * relationshipPrefixes */ public static RelationshipCacheFilter include(String... relationshipPrefixes) { return new RelationshipCacheFilter(Arrays.asList(relationshipPrefixes)); @@ -58,4 +57,8 @@ public Collection filter(Type type, Collection identifiers) { return filteredIdentifiers; } + + public List getAllowableRelationshipPrefixes() { + return allowableRelationshipPrefixes; + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/WriteableCache.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/WriteableCache.java index 1e32d430805..acbf21a21ee 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/WriteableCache.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/WriteableCache.java @@ -18,15 +18,13 @@ import java.util.Collection; -/** - * A WriteableCache is a Cache that is updatable. - */ +/** A WriteableCache is a Cache that is updatable. */ public interface WriteableCache extends Cache { - void merge(String type, CacheData cacheData); + void merge(String type, CacheData cacheData); - void mergeAll(String type, Collection items); + void mergeAll(String type, Collection items); - void evict(String type, String id); + void evict(String type, String id); - void evictAll(String type, Collection ids); + void evictAll(String type, Collection ids); } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/AgentIntervalProvider.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/AgentIntervalProvider.java new file mode 100644 index 00000000000..85e61567bac --- /dev/null +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/AgentIntervalProvider.java @@ -0,0 +1,58 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.cluster; + +import com.netflix.spinnaker.cats.agent.Agent; + +/** Provides a poll interval and timeout for an Agent. */ +public interface AgentIntervalProvider { + public static class Interval { + final long interval; + final long errorInterval; + final long timeout; + + public Interval(long interval, long timeout) { + this(interval, interval, timeout); + } + + public Interval(long interval, long errorInterval, long timeout) { + this.interval = interval; + this.errorInterval = errorInterval; + this.timeout = timeout; + } + + /** @return how frequently the Agent should run in milliseconds */ + public long getInterval() { + return interval; + } + + /** @return how frequently after an error the Agent should run in milliseconds */ + public long getErrorInterval() { + return errorInterval; + } + + /** + * @return the maximum amount of time in milliseconds for an Agent to complete its run before + * the run is rescheduled + */ + public long getTimeout() { + return timeout; + } + } + + Interval getInterval(Agent agent); +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultAgentIntervalProvider.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultAgentIntervalProvider.java new file mode 100644 index 00000000000..abeb61da109 --- /dev/null +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultAgentIntervalProvider.java @@ -0,0 +1,68 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.cluster; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentIntervalAware; + +public class DefaultAgentIntervalProvider implements AgentIntervalProvider { + private final long interval; + private final long errorInterval; + private final long timeout; + + public DefaultAgentIntervalProvider(long interval) { + this(interval, interval * 2); + } + + public DefaultAgentIntervalProvider(long interval, long timeout) { + this(interval, interval, timeout); + } + + public DefaultAgentIntervalProvider(long interval, long errorInterval, long timeout) { + this.interval = interval; + this.errorInterval = errorInterval; + this.timeout = timeout; + } + + @Override + public Interval getInterval(Agent agent) { + if (agent instanceof AgentIntervalAware) { + Long agentInterval = ((AgentIntervalAware) agent).getAgentInterval(); + Long agentErrorInterval = ((AgentIntervalAware) agent).getAgentErrorInterval(); + if (agentInterval != null && agentInterval > 0) { + // Specify the caching agent timeout as twice the interval. This gives a high upper bound + // on the time it should take the agent to complete its work. The agent's lock is revoked + // after the timeout. + return new Interval(agentInterval, agentErrorInterval, 2 * agentInterval); + } + } + + return new Interval(interval, errorInterval, timeout); + } + + public long getInterval() { + return interval; + } + + public long getErrorInterval() { + return errorInterval; + } + + public long getTimeout() { + return timeout; + } +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultNodeIdentity.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultNodeIdentity.java new file mode 100644 index 00000000000..232523a3389 --- /dev/null +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultNodeIdentity.java @@ -0,0 +1,140 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.cluster; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.NetworkInterface; +import java.net.Socket; +import java.net.SocketException; +import java.util.Collections; +import java.util.Enumeration; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +public class DefaultNodeIdentity implements NodeIdentity { + + public static final String UNKNOWN_HOST = "UnknownHost"; + private static final long REFRESH_INTERVAL = TimeUnit.SECONDS.toMillis(30); + + private static String getHostName(String validationHost, int validationPort) { + final Enumeration interfaces; + try { + interfaces = NetworkInterface.getNetworkInterfaces(); + } catch (SocketException ignored) { + return UNKNOWN_HOST; + } + if (interfaces == null || validationHost == null) { + return UNKNOWN_HOST; + } + + for (NetworkInterface networkInterface : Collections.list(interfaces)) { + try { + if (networkInterface.isLoopback() + && !validationHost.equals("localhost") + && !validationHost.startsWith("127.")) { + continue; + } + + if (!networkInterface.isUp()) { + continue; + } + } catch (SocketException ignored) { + continue; + } + + for (InetAddress address : Collections.list(networkInterface.getInetAddresses())) { + Socket socket = null; + try { + socket = new Socket(); + socket.bind(new InetSocketAddress(address, 0)); + socket.connect(new InetSocketAddress(validationHost, validationPort), 125); + return address.getHostName(); + } catch (IOException ignored) { + // ignored + } finally { + if (socket != null) { + try { + socket.close(); + } catch (IOException ignored) { + // ignored + } + } + } + } + } + + return UNKNOWN_HOST; + } + + private final String validationAddress; + private final int validationPort; + private final String runtimeName; + private final AtomicReference identity = new AtomicReference<>(null); + private final AtomicBoolean validIdentity = new AtomicBoolean(false); + private final AtomicLong refreshTime = new AtomicLong(0); + private final Lock refreshLock = new ReentrantLock(); + private final long refreshInterval; + + public DefaultNodeIdentity() { + this("www.google.com", 80); + } + + public DefaultNodeIdentity(String validationAddress, int validationPort) { + this(validationAddress, validationPort, REFRESH_INTERVAL); + } + + public DefaultNodeIdentity(String validationAddress, int validationPort, long refreshInterval) { + this.validationAddress = validationAddress; + this.validationPort = validationPort; + this.runtimeName = ManagementFactory.getRuntimeMXBean().getName(); + this.refreshInterval = refreshInterval; + loadIdentity(); + } + + @Override + public String getNodeIdentity() { + if (!validIdentity.get() && shouldRefresh()) { + refreshLock.lock(); + try { + if (!validIdentity.get() && shouldRefresh()) { + loadIdentity(); + } + } finally { + refreshLock.unlock(); + } + } + return identity.get(); + } + + private boolean shouldRefresh() { + return System.currentTimeMillis() - refreshTime.get() > refreshInterval; + } + + private void loadIdentity() { + identity.set( + String.format("%s:%s", getHostName(validationAddress, validationPort), runtimeName)); + validIdentity.set(!identity.get().contains(UNKNOWN_HOST)); + refreshTime.set(System.currentTimeMillis()); + } +} diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeStatusProvider.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultNodeStatusProvider.java similarity index 93% rename from cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeStatusProvider.java rename to cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultNodeStatusProvider.java index 86d84826183..21ea9d455ca 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeStatusProvider.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/DefaultNodeStatusProvider.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.netflix.spinnaker.cats.redis.cluster; +package com.netflix.spinnaker.cats.cluster; public class DefaultNodeStatusProvider implements NodeStatusProvider { @Override diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/NodeIdentity.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NodeIdentity.java similarity index 88% rename from cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/NodeIdentity.java rename to cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NodeIdentity.java index ef034a6fdcf..de6a2f74fac 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/NodeIdentity.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NodeIdentity.java @@ -14,8 +14,8 @@ * limitations under the License. */ -package com.netflix.spinnaker.cats.redis.cluster; +package com.netflix.spinnaker.cats.cluster; public interface NodeIdentity { - String getNodeIdentity(); + String getNodeIdentity(); } diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/NodeStatusProvider.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NodeStatusProvider.java similarity index 92% rename from cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/NodeStatusProvider.java rename to cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NodeStatusProvider.java index b3fd06d86ac..078848aa4c7 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/NodeStatusProvider.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NodeStatusProvider.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.netflix.spinnaker.cats.redis.cluster; +package com.netflix.spinnaker.cats.cluster; public interface NodeStatusProvider { boolean isNodeEnabled(); diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NoopShardingFilter.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NoopShardingFilter.java new file mode 100644 index 00000000000..b71ea0f295d --- /dev/null +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/NoopShardingFilter.java @@ -0,0 +1,27 @@ +/* + * Copyright 2021 OpsMx + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.cats.cluster; + +import com.netflix.spinnaker.cats.agent.Agent; + +public class NoopShardingFilter implements ShardingFilter { + @Override + public boolean filter(Agent agent) { + return true; + } +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/ShardingFilter.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/ShardingFilter.java new file mode 100644 index 00000000000..50f8054d59c --- /dev/null +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cluster/ShardingFilter.java @@ -0,0 +1,24 @@ +/* + * Copyright 2021 OpsMx + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.cats.cluster; + +import com.netflix.spinnaker.cats.agent.Agent; + +public interface ShardingFilter { + boolean filter(Agent agent); +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/compression/CompressionStrategy.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/compression/CompressionStrategy.java index eb8790bec04..dd170a813af 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/compression/CompressionStrategy.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/compression/CompressionStrategy.java @@ -17,5 +17,6 @@ public interface CompressionStrategy { String compress(final String str); + String decompress(final String compressed); } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/compression/GZipCompression.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/compression/GZipCompression.java index f58e87d3b0b..aa01c6ffb6f 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/compression/GZipCompression.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/compression/GZipCompression.java @@ -15,9 +15,6 @@ */ package com.netflix.spinnaker.cats.compression; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -27,18 +24,23 @@ import java.util.Base64; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class GZipCompression implements CompressionStrategy { - private final static Logger log = LoggerFactory.getLogger(GZipCompression.class); + private static final Logger log = LoggerFactory.getLogger(GZipCompression.class); - private final static String CHARSET = "UTF-8"; + private static final String CHARSET = "UTF-8"; private final long thresholdBytesSize; private final boolean enabled; public GZipCompression(long thresholdBytesSize, boolean enabled) { - log.info("Cats using gzip compression: {} bytes threshold, compress enabled: {}", thresholdBytesSize, enabled); + log.info( + "Cats using gzip compression: {} bytes threshold, compress enabled: {}", + thresholdBytesSize, + enabled); this.thresholdBytesSize = thresholdBytesSize; this.enabled = enabled; } @@ -89,7 +91,7 @@ public String decompress(final String compressed) { byte[] bytes; try { bytes = Base64.getDecoder().decode(compressed.getBytes(CHARSET)); - } catch (IllegalArgumentException|UnsupportedEncodingException e) { + } catch (IllegalArgumentException | UnsupportedEncodingException e) { return compressed; } @@ -115,6 +117,7 @@ public String decompress(final String compressed) { } private static boolean isCompressed(final byte[] compressed) { - return compressed[0] == (byte) (GZIPInputStream.GZIP_MAGIC) && compressed[1] == (byte) (GZIPInputStream.GZIP_MAGIC >> 8); + return compressed[0] == (byte) (GZIPInputStream.GZIP_MAGIC) + && compressed[1] == (byte) (GZIPInputStream.GZIP_MAGIC >> 8); } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/mem/InMemoryCache.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/mem/InMemoryCache.java index 3d64297e5f3..290a55fa4d1 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/mem/InMemoryCache.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/mem/InMemoryCache.java @@ -20,7 +20,6 @@ import com.netflix.spinnaker.cats.cache.CacheFilter; import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.cats.cache.WriteableCache; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -35,342 +34,347 @@ import java.util.concurrent.ConcurrentMap; import java.util.regex.Pattern; -/** - * A WriteableCache that stores objects in an in-memory map. - */ +/** A WriteableCache that stores objects in an in-memory map. */ public class InMemoryCache implements WriteableCache { - private ConcurrentMap> typeMap = new ConcurrentHashMap<>(); - - @Override - public void merge(String type, CacheData cacheData) { - merge(getOrCreate(type, cacheData.getId()), cacheData); + private ConcurrentMap> typeMap = + new ConcurrentHashMap<>(); + + @Override + public void merge(String type, CacheData cacheData) { + merge(getOrCreate(type, cacheData.getId()), cacheData); + } + + @Override + public void mergeAll(String type, Collection items) { + for (CacheData item : items) { + merge(type, item); } - - @Override - public void mergeAll(String type, Collection items) { - for (CacheData item : items) { - merge(type, item); - } + } + + @Override + public void evict(String type, String id) { + getTypeMap(type).remove(id); + } + + @Override + public void evictAll(String type, Collection ids) { + ConcurrentMap map = getTypeMap(type); + for (String id : ids) { + map.remove(id); + } + } + + @Override + public CacheData get(String type, String id) { + return get(type, id, null); + } + + @Override + public CacheData get(String type, String id, CacheFilter cacheFilter) { + CacheData existing = getTypeMap(type).get(id); + if (existing != null) { + return wrap(existing, cacheFilter); + } + return null; + } + + @Override + public Collection getAll(String type) { + return getAll(type, (CacheFilter) null); + } + + @Override + public Collection getAll(String type, CacheFilter cacheFilter) { + ConcurrentMap map = getTypeMap(type); + Collection values = new LinkedList<>(); + for (CacheData data : map.values()) { + CacheData toReturn = wrap(data, cacheFilter); + if (toReturn != null) { + values.add(wrap(data, cacheFilter)); + } + } + return values; + } + + @Override + public Collection getAll(String type, Collection identifiers) { + return getAll(type, identifiers, null); + } + + @Override + public Collection getAll( + String type, Collection identifiers, CacheFilter cacheFilter) { + ConcurrentMap map = getTypeMap(type); + Collection values = new ArrayList<>(identifiers.size()); + for (String id : identifiers) { + CacheData toReturn = wrap(map.get(id), cacheFilter); + if (toReturn != null) { + values.add(toReturn); + } + } + return values; + } + + @Override + public Collection getAll(String type, String... identifiers) { + return getAll(type, Arrays.asList(identifiers)); + } + + @Override + public Collection existingIdentifiers(String type, Collection ids) { + Set existing = new HashSet<>(ids); + existing.retainAll(getTypeMap(type).keySet()); + return existing; + } + + public Collection getIdentifiers(String type) { + return new HashSet<>(getTypeMap(type).keySet()); + } + + public Collection filterIdentifiers(String type, String glob) { + final Pattern pattern = new Glob(glob).toPattern(); + final HashSet matches = new HashSet<>(); + for (String key : getTypeMap(type).keySet()) { + if (pattern.matcher(key).matches()) { + matches.add(key); + } + } + return matches; + } + + private CacheData getOrCreate(String type, String id) { + return getCacheData(getTypeMap(type), id); + } + + private ConcurrentMap getTypeMap(String type) { + ConcurrentMap newValue = new ConcurrentHashMap<>(); + ConcurrentMap existing = typeMap.putIfAbsent(type, newValue); + if (existing == null) { + return newValue; } - @Override - public void evict(String type, String id) { - getTypeMap(type).remove(id); + return existing; + } + + private CacheData wrap(CacheData data, CacheFilter cacheFilter) { + if (data == null || data.getAttributes().isEmpty()) { + return null; } - @Override - public void evictAll(String type, Collection ids) { - ConcurrentMap map = getTypeMap(type); - for (String id : ids) { - map.remove(id); - } + Map> relationships = data.getRelationships(); + if (cacheFilter != null) { + relationships = new HashMap<>(); + for (String relationship : + cacheFilter.filter(CacheFilter.Type.RELATIONSHIP, data.getRelationships().keySet())) { + relationships.put(relationship, data.getRelationships().get(relationship)); + } } - @Override - public CacheData get(String type, String id) { - return get(type, id, null); + return new DefaultCacheData(data.getId(), data.getAttributes(), relationships); + } + + private CacheData getCacheData(ConcurrentMap map, String id) { + CacheData newValue = new BackingData(id); + CacheData existing = map.putIfAbsent(id, newValue); + if (existing == null) { + return newValue; } - @Override - public CacheData get(String type, String id, CacheFilter cacheFilter) { - CacheData existing = getTypeMap(type).get(id); - if (existing != null) { - return wrap(existing, cacheFilter); + return existing; + } + + private void merge(CacheData existing, CacheData update) { + MapMutation attributes = new MapMutation<>(update.getAttributes()); + MapMutation> relationships = + new MapMutation<>(update.getRelationships()); + + Set missingAttributes = new HashSet<>(existing.getAttributes().keySet()); + missingAttributes.removeAll(update.getAttributes().keySet()); + attributes.apply(existing.getAttributes()); + existing.getAttributes().keySet().removeAll(missingAttributes); + relationships.apply(existing.getRelationships()); + } + + /** + * ConcurrentHashMap doesn't support null values, this translates a sourceMap into a combination + * of non-null update values and a set of keys to remove + * + * @param the key type + * @param the value type + */ + private static class MapMutation { + private final Map updateData; + private final Set removalSet; + + public MapMutation(Map source) { + Map toPut = new HashMap<>(); + Set toRemove = new HashSet<>(); + for (Map.Entry entry : source.entrySet()) { + if (entry.getValue() == null) { + toRemove.add(entry.getKey()); + } else { + toPut.put(entry.getKey(), entry.getValue()); } - return null; + } + updateData = Collections.unmodifiableMap(toPut); + removalSet = Collections.unmodifiableSet(toRemove); } - @Override - public Collection getAll(String type) { - return getAll(type, (CacheFilter) null); + public void apply(Map target) { + target.putAll(updateData); + target.keySet().removeAll(removalSet); } + } - @Override - public Collection getAll(String type, CacheFilter cacheFilter) { - ConcurrentMap map = getTypeMap(type); - Collection values = new LinkedList<>(); - for (CacheData data : map.values()) { - CacheData toReturn = wrap(data, cacheFilter); - if (toReturn != null) { - values.add(wrap(data, cacheFilter)); - } - } - return values; - } + private static class BackingData implements CacheData { + private final ConcurrentMap attributes = new ConcurrentHashMap<>(); + private final ConcurrentMap> relationships = + new ConcurrentHashMap<>(); + private final String id; - @Override - public Collection getAll(String type, Collection identifiers) { - return getAll(type, identifiers, null); + public BackingData(String id) { + this.id = id; } @Override - public Collection getAll(String type, Collection identifiers, CacheFilter cacheFilter) { - ConcurrentMap map = getTypeMap(type); - Collection values = new ArrayList<>(identifiers.size()); - for (String id : identifiers) { - CacheData toReturn = wrap(map.get(id), cacheFilter); - if (toReturn != null) { - values.add(toReturn); - } - } - return values; + public String getId() { + return id; } @Override - public Collection getAll(String type, String... identifiers) { - return getAll(type, Arrays.asList(identifiers)); + public int getTtlSeconds() { + return -1; } @Override - public Collection existingIdentifiers(String type, Collection ids) { - Set existing = new HashSet<>(ids); - existing.retainAll(getTypeMap(type).keySet()); - return existing; + public Map getAttributes() { + return attributes; } - public Collection getIdentifiers(String type) { - return new HashSet<>(getTypeMap(type).keySet()); + @Override + public Map> getRelationships() { + return relationships; } + } - public Collection filterIdentifiers(String type, String glob) { - final Pattern pattern = new Glob(glob).toPattern(); - final HashSet matches = new HashSet<>(); - for (String key : getTypeMap(type).keySet()) { - if (pattern.matcher(key).matches()) { - matches.add(key); - } - } - return matches; - } + public static class Glob { + private static final String TOKENS = "*?[]\\"; - private CacheData getOrCreate(String type, String id) { - return getCacheData(getTypeMap(type), id); + private static enum State { + INIT, + ESCAPING, + CAPTURING, + CAPTURING_ESCAPE } - private ConcurrentMap getTypeMap(String type) { - ConcurrentMap newValue = new ConcurrentHashMap<>(); - ConcurrentMap existing = typeMap.putIfAbsent(type, newValue); - if (existing == null) { - return newValue; - } + private final StringTokenizer globTokenizer; + private final StringBuilder regex = new StringBuilder(); + private final StringBuilder capture = new StringBuilder(); - return existing; - } + private State state = State.INIT; - private CacheData wrap(CacheData data, CacheFilter cacheFilter) { - if (data == null || data.getAttributes().isEmpty()) { - return null; - } + private final Pattern pattern; - Map> relationships = data.getRelationships(); - if (cacheFilter != null) { - relationships = new HashMap<>(); - for (String relationship : cacheFilter.filter(CacheFilter.Type.RELATIONSHIP, data.getRelationships().keySet())) { - relationships.put(relationship, data.getRelationships().get(relationship)); - } - } - - return new DefaultCacheData(data.getId(), data.getAttributes(), relationships); + public Glob(String globString) { + globTokenizer = new StringTokenizer(globString, TOKENS, true); + toInit(); + pattern = buildPattern(); } - private CacheData getCacheData(ConcurrentMap map, String id) { - CacheData newValue = new BackingData(id); - CacheData existing = map.putIfAbsent(id, newValue); - if (existing == null) { - return newValue; - } - - return existing; + public Pattern toPattern() { + return pattern; } - private void merge(CacheData existing, CacheData update) { - MapMutation attributes = new MapMutation<>(update.getAttributes()); - MapMutation> relationships = new MapMutation<>(update.getRelationships()); - - Set missingAttributes = new HashSet<>(existing.getAttributes().keySet()); - missingAttributes.removeAll(update.getAttributes().keySet()); - attributes.apply(existing.getAttributes()); - existing.getAttributes().keySet().removeAll(missingAttributes); - relationships.apply(existing.getRelationships()); + private void toInit() { + state = State.INIT; + capture.setLength(0); } - /** - * ConcurrentHashMap doesn't support null values, this translates a sourceMap into - * a combination of non-null update values and a set of keys to remove - * - * @param the key type - * @param the value type - */ - private static class MapMutation { - private final Map updateData; - private final Set removalSet; - - public MapMutation(Map source) { - Map toPut = new HashMap<>(); - Set toRemove = new HashSet<>(); - for (Map.Entry entry : source.entrySet()) { - if (entry.getValue() == null) { - toRemove.add(entry.getKey()); - } else { - toPut.put(entry.getKey(), entry.getValue()); - } - } - updateData = Collections.unmodifiableMap(toPut); - removalSet = Collections.unmodifiableSet(toRemove); - } - - public void apply(Map target) { - target.putAll(updateData); - target.keySet().removeAll(removalSet); - } + private void toEscaping() { + state = State.ESCAPING; } - private static class BackingData implements CacheData { - private final ConcurrentMap attributes = new ConcurrentHashMap<>(); - private final ConcurrentMap> relationships = new ConcurrentHashMap<>(); - private final String id; - - public BackingData(String id) { - this.id = id; - } - - @Override - public String getId() { - return id; - } - - @Override - public int getTtlSeconds() { - return -1; - } - - @Override - public Map getAttributes() { - return attributes; - } - - @Override - public Map> getRelationships() { - return relationships; - } + private void toCapturing() { + state = State.CAPTURING; } - public static class Glob { - private static final String TOKENS = "*?[]\\"; - - private static enum State { - INIT, ESCAPING, CAPTURING, CAPTURING_ESCAPE - } - - private final StringTokenizer globTokenizer; - private final StringBuilder regex = new StringBuilder(); - private final StringBuilder capture = new StringBuilder(); - - private State state = State.INIT; - - private final Pattern pattern; + private void toCapturingEscape() { + state = State.CAPTURING_ESCAPE; + } - public Glob(String globString) { - globTokenizer = new StringTokenizer(globString, TOKENS, true); + private void handleDelim(String s) { + switch (state) { + case ESCAPING: + regex.append(Pattern.quote(s)); + toInit(); + break; + case CAPTURING_ESCAPE: + capture.append(Pattern.quote(s)); + toCapturing(); + break; + case CAPTURING: + if ("\\".equals(s)) { + toCapturingEscape(); + } else if ("]".equals(s)) { + regex.append("[").append(capture).append("]"); toInit(); - pattern = buildPattern(); - } - - public Pattern toPattern() { - return pattern; - } - - private void toInit() { - state = State.INIT; - capture.setLength(0); - } - - private void toEscaping() { - state = State.ESCAPING; - } - - private void toCapturing() { - state = State.CAPTURING; - } - - private void toCapturingEscape() { - state = State.CAPTURING_ESCAPE; - } - - private void handleDelim(String s) { - switch (state) { - case ESCAPING: - regex.append(Pattern.quote(s)); - toInit(); - break; - case CAPTURING_ESCAPE: - capture.append(Pattern.quote(s)); - toCapturing(); - break; - case CAPTURING: - if ("\\".equals(s)) { - toCapturingEscape(); - } else if ("]".equals(s)) { - regex.append("[").append(capture).append("]"); - toInit(); - } else { - capture.append(Pattern.quote(s)); - toCapturing(); - } - break; - default: - switch (s) { - case "\\": - toEscaping(); - break; - case "*": - regex.append(".*"); - toInit(); - break; - case "?": - regex.append("."); - toInit(); - break; - case "[": - toCapturing(); - break; - case "]": - regex.append(Pattern.quote("]")); - toInit(); - break; - default: - throw new IllegalStateException("Unhandled delimiter in init state: " + s); - } - } - } + } else { + capture.append(Pattern.quote(s)); + toCapturing(); + } + break; + default: + switch (s) { + case "\\": + toEscaping(); + break; + case "*": + regex.append(".*"); + toInit(); + break; + case "?": + regex.append("."); + toInit(); + break; + case "[": + toCapturing(); + break; + case "]": + regex.append(Pattern.quote("]")); + toInit(); + break; + default: + throw new IllegalStateException("Unhandled delimiter in init state: " + s); + } + } + } - private void handleStr(String s) { - switch (state) { - case CAPTURING: - capture.append(Pattern.quote(s)); - toCapturing(); - break; - default: - regex.append(Pattern.quote(s)); - toInit(); - } - } + private void handleStr(String s) { + switch (state) { + case CAPTURING: + capture.append(Pattern.quote(s)); + toCapturing(); + break; + default: + regex.append(Pattern.quote(s)); + toInit(); + } + } - private Pattern buildPattern() { - while (globTokenizer.hasMoreTokens()) { - String token = globTokenizer.nextToken(); - if (token.length() == 1 && TOKENS.indexOf(token.charAt(0)) != -1) { - handleDelim(token); - } else { - handleStr(token); - } - } - if (state == State.CAPTURING || state == State.CAPTURING_ESCAPE) { - regex.append(Pattern.quote("[")); - regex.append(capture); - } - return Pattern.compile(regex.toString()); + private Pattern buildPattern() { + while (globTokenizer.hasMoreTokens()) { + String token = globTokenizer.nextToken(); + if (token.length() == 1 && TOKENS.indexOf(token.charAt(0)) != -1) { + handleDelim(token); + } else { + handleStr(token); } - + } + if (state == State.CAPTURING || state == State.CAPTURING_ESCAPE) { + regex.append(Pattern.quote("[")); + regex.append(capture); + } + return Pattern.compile(regex.toString()); } + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/mem/InMemoryNamedCacheFactory.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/mem/InMemoryNamedCacheFactory.java index b268c47b5c2..084c67f8a60 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/mem/InMemoryNamedCacheFactory.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/mem/InMemoryNamedCacheFactory.java @@ -18,23 +18,20 @@ import com.netflix.spinnaker.cats.cache.NamedCacheFactory; import com.netflix.spinnaker.cats.cache.WriteableCache; - import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -/** - * Produces InMemoryCaches. - */ +/** Produces InMemoryCaches. */ public class InMemoryNamedCacheFactory implements NamedCacheFactory { - private final ConcurrentMap caches = new ConcurrentHashMap<>(); + private final ConcurrentMap caches = new ConcurrentHashMap<>(); - @Override - public WriteableCache getCache(String name) { - WriteableCache cache = new InMemoryCache(); - WriteableCache existing = caches.putIfAbsent(name, cache); - if (existing == null) { - return cache; - } - return existing; + @Override + public WriteableCache getCache(String name) { + WriteableCache cache = new InMemoryCache(); + WriteableCache existing = caches.putIfAbsent(name, cache); + if (existing == null) { + return cache; } + return existing; + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/CatsModule.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/CatsModule.java index 2f9284d484a..b4aba48bcce 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/CatsModule.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/CatsModule.java @@ -26,88 +26,92 @@ import com.netflix.spinnaker.cats.mem.InMemoryNamedCacheFactory; import com.netflix.spinnaker.cats.provider.Provider; import com.netflix.spinnaker.cats.provider.ProviderRegistry; - import java.util.Arrays; import java.util.Collection; import java.util.LinkedList; import java.util.concurrent.TimeUnit; /** - * A CatsModule should provide the component configuration for caching a - * collection of Providers, and return a readable view Cache for access to - * the cached data. + * A CatsModule should provide the component configuration for caching a collection of Providers, + * and return a readable view Cache for access to the cached data. */ public interface CatsModule { - NamedCacheFactory getNamedCacheFactory(); - - ProviderRegistry getProviderRegistry(); - - AgentScheduler getAgentScheduler(); - - Cache getView(); - - ExecutionInstrumentation getExecutionInstrumentation(); - - public static class Builder { - private NamedCacheFactory cacheFactory; - private AgentScheduler scheduler; - private Collection instrumentations = new LinkedList<>(); - - public Builder scheduler(AgentScheduler agentScheduler) { - if (this.scheduler != null) { - throw new IllegalStateException("AgentScheduler already configured"); - } - this.scheduler = agentScheduler; - return this; - } - - public Builder intervalScheduler(long interval) { - return scheduler(new DefaultAgentScheduler(interval, TimeUnit.MILLISECONDS)); - } - - public Builder intervalScheduler(long interval, TimeUnit unit) { - return intervalScheduler(unit.toMillis(interval)); - } - - public Builder instrumentation(Collection instrumentation) { - instrumentations.addAll(instrumentation); - return this; - } - - public Builder instrumentation(ExecutionInstrumentation... instrumentation) { - return instrumentation(Arrays.asList(instrumentation)); - } - - public Builder cacheFactory(NamedCacheFactory namedCacheFactory) { - if (this.cacheFactory != null) { - throw new IllegalStateException("NamedCacheFactory already configured"); - } - this.cacheFactory = namedCacheFactory; - return this; - } - - public CatsModule build(Provider... providers) { - return build(Arrays.asList(providers)); - } - - public CatsModule build(Collection providers) { - final ExecutionInstrumentation instrumentation; - if (instrumentations.isEmpty()) { - instrumentation = new NoopExecutionInstrumentation(); - } else { - instrumentation = new CompositeExecutionInstrumentation(instrumentations); - } - - if (scheduler == null) { - scheduler = new DefaultAgentScheduler(); - } - - if (cacheFactory == null) { - cacheFactory = new InMemoryNamedCacheFactory(); - } - return new DefaultCatsModule(providers, cacheFactory, scheduler, instrumentation); - } + NamedCacheFactory getNamedCacheFactory(); + + ProviderRegistry getProviderRegistry(); + + AgentScheduler getAgentScheduler(); + + Cache getView(); + + ExecutionInstrumentation getExecutionInstrumentation(); + + public static class Builder { + private NamedCacheFactory cacheFactory; + private AgentScheduler scheduler; + private ProviderRegistry providerRegistry; + private Collection instrumentations = new LinkedList<>(); + + public Builder scheduler(AgentScheduler agentScheduler) { + if (this.scheduler != null) { + throw new IllegalStateException("AgentScheduler already configured"); + } + this.scheduler = agentScheduler; + return this; + } + + public Builder intervalScheduler(long interval) { + return scheduler(new DefaultAgentScheduler(interval, TimeUnit.MILLISECONDS)); + } + + public Builder intervalScheduler(long interval, TimeUnit unit) { + return intervalScheduler(unit.toMillis(interval)); } + public Builder instrumentation(Collection instrumentation) { + instrumentations.addAll(instrumentation); + return this; + } + + public Builder instrumentation(ExecutionInstrumentation... instrumentation) { + return instrumentation(Arrays.asList(instrumentation)); + } + + public Builder cacheFactory(NamedCacheFactory namedCacheFactory) { + if (this.cacheFactory != null) { + throw new IllegalStateException("NamedCacheFactory already configured"); + } + this.cacheFactory = namedCacheFactory; + return this; + } + + public Builder providerRegistry(ProviderRegistry providerRegistry) { + this.providerRegistry = providerRegistry; + return this; + } + + public CatsModule build(Provider... providers) { + return build(Arrays.asList(providers)); + } + + public CatsModule build(Collection providers) { + final ExecutionInstrumentation instrumentation; + if (instrumentations.isEmpty()) { + instrumentation = new NoopExecutionInstrumentation(); + } else { + instrumentation = new CompositeExecutionInstrumentation(instrumentations); + } + + if (scheduler == null) { + scheduler = new DefaultAgentScheduler(); + } + + if (cacheFactory == null) { + cacheFactory = new InMemoryNamedCacheFactory(); + } + return new DefaultCatsModule( + providerRegistry, providers, cacheFactory, scheduler, instrumentation); + } + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/CatsModuleAware.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/CatsModuleAware.java index 0348ad9db9f..f8722db84df 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/CatsModuleAware.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/CatsModuleAware.java @@ -17,22 +17,18 @@ package com.netflix.spinnaker.cats.module; /** - * This class is used to identify classes (typically Schedulers) that are capable of returning the cats module they are - * associated with. + * This class is used to identify classes (typically Schedulers) that are capable of returning the + * cats module they are associated with. */ public abstract class CatsModuleAware { private CatsModule catsModule; - /** - * Set this object's cats module. - */ + /** Set this object's cats module. */ public void setCatsModule(CatsModule catsModule) { this.catsModule = catsModule; } - /** - * Get this object's cats module. - */ + /** Get this object's cats module. */ public CatsModule getCatsModule() { return catsModule; } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/DefaultCatsModule.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/DefaultCatsModule.java index b6b07c29093..daa9483d29e 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/DefaultCatsModule.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/module/DefaultCatsModule.java @@ -25,49 +25,59 @@ import com.netflix.spinnaker.cats.provider.DefaultProviderRegistry; import com.netflix.spinnaker.cats.provider.Provider; import com.netflix.spinnaker.cats.provider.ProviderRegistry; - import java.util.Collection; public class DefaultCatsModule implements CatsModule { - private final NamedCacheFactory namedCacheFactory; - private final ProviderRegistry providerRegistry; - private final AgentScheduler agentScheduler; - private final Cache view; - private final ExecutionInstrumentation executionInstrumentation; + private final NamedCacheFactory namedCacheFactory; + private final ProviderRegistry providerRegistry; + private final AgentScheduler agentScheduler; + private final Cache view; + private final ExecutionInstrumentation executionInstrumentation; - public DefaultCatsModule(Collection providers, NamedCacheFactory namedCacheFactory, AgentScheduler agentScheduler, ExecutionInstrumentation executionInstrumentation) { - this.namedCacheFactory = namedCacheFactory; - providerRegistry = new DefaultProviderRegistry(providers, namedCacheFactory); - this.agentScheduler = agentScheduler; + public DefaultCatsModule( + ProviderRegistry registry, + Collection providers, + NamedCacheFactory namedCacheFactory, + AgentScheduler agentScheduler, + ExecutionInstrumentation executionInstrumentation) { + if (registry == null) { + this.providerRegistry = new DefaultProviderRegistry(providers, namedCacheFactory); + } else { + this.providerRegistry = registry; + } - if (agentScheduler instanceof CatsModuleAware) { - ((CatsModuleAware)agentScheduler).setCatsModule(this); - } + this.namedCacheFactory = namedCacheFactory; - view = new CompositeCache(providerRegistry.getProviderCaches()); - this.executionInstrumentation = executionInstrumentation; - new AgentController(providerRegistry, agentScheduler, executionInstrumentation); - } + this.agentScheduler = agentScheduler; - public NamedCacheFactory getNamedCacheFactory() { - return namedCacheFactory; + if (agentScheduler instanceof CatsModuleAware) { + ((CatsModuleAware) agentScheduler).setCatsModule(this); } - public ProviderRegistry getProviderRegistry() { - return providerRegistry; - } + view = new CompositeCache(providerRegistry.getProviderCaches()); + this.executionInstrumentation = executionInstrumentation; + new AgentController(providerRegistry, agentScheduler, executionInstrumentation); + } - public AgentScheduler getAgentScheduler() { - return agentScheduler; - } + public NamedCacheFactory getNamedCacheFactory() { + return namedCacheFactory; + } - @Override - public Cache getView() { - return view; - } + public ProviderRegistry getProviderRegistry() { + return providerRegistry; + } - @Override - public ExecutionInstrumentation getExecutionInstrumentation() { - return executionInstrumentation; - } + public AgentScheduler getAgentScheduler() { + return agentScheduler; + } + + @Override + public Cache getView() { + return view; + } + + @Override + public ExecutionInstrumentation getExecutionInstrumentation() { + return executionInstrumentation; + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/DefaultProviderCache.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/DefaultProviderCache.java index 729fe52eeae..7f6d6b145c4 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/DefaultProviderCache.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/DefaultProviderCache.java @@ -20,8 +20,8 @@ import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.cats.cache.CacheFilter; import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; import com.netflix.spinnaker.cats.cache.WriteableCache; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -32,227 +32,246 @@ import java.util.Set; /** - * An implementation of ProviderCache that writes through to a provided backing - * WriteableCache. + * An implementation of ProviderCache that writes through to a provided backing WriteableCache. * - * This implementation will handle aggregating results from multiple sources, and - * the view methods will merge relationships from all sources into a single relationship. + *

This implementation will handle aggregating results from multiple sources, and the view + * methods will merge relationships from all sources into a single relationship. */ public class DefaultProviderCache implements ProviderCache { - private static final String ALL_ID = "_ALL_"; //dirty = true - private static final Map ALL_ATTRIBUTE = Collections.unmodifiableMap(new HashMap(1) {{ - put("id", ALL_ID); - }}); - - private final WriteableCache backingStore; - - public DefaultProviderCache(WriteableCache backingStore) { - this.backingStore = backingStore; - } - - @Override - public CacheData get(String type, String id) { - return get(type, id, null); - } - - @Override - public CacheData get(String type, String id, CacheFilter cacheFilter) { - validateTypes(type); - if (ALL_ID.equals(id)) { - return null; - } - CacheData item = backingStore.get(type, id, cacheFilter); - if (item == null) { - return null; - } - - return mergeRelationships(item); - } - - @Override - public Collection getAll(String type) { - return getAll(type, (CacheFilter) null); - } - - @Override - public Collection getAll(String type, CacheFilter cacheFilter) { - validateTypes(type); - Collection all = backingStore.getAll(type, cacheFilter); - return buildResponse(all); - } + private static final String ALL_ID = "_ALL_"; // dirty = true + private static final Map ALL_ATTRIBUTE = + Collections.unmodifiableMap( + new HashMap(1) { + { + put("id", ALL_ID); + } + }); - @Override - public Collection getAll(String type, Collection identifiers) { - return getAll(type, identifiers, null); - } + private final WriteableCache backingStore; - @Override - public Collection getAll(String type, Collection identifiers, CacheFilter cacheFilter) { - validateTypes(type); - Collection byId = backingStore.getAll(type, identifiers, cacheFilter); - return buildResponse(byId); - } + public DefaultProviderCache(WriteableCache backingStore) { + this.backingStore = backingStore; + } - @Override - public Collection getAll(String type, String... identifiers) { - return getAll(type, Arrays.asList(identifiers)); - } + @Override + public CacheData get(String type, String id) { + return get(type, id, null); + } - @Override - public Collection existingIdentifiers(String type, Collection identifiers) { - Set existing = new HashSet<>(backingStore.existingIdentifiers(type, identifiers)); - existing.remove(ALL_ID); - return existing; + @Override + public CacheData get(String type, String id, CacheFilter cacheFilter) { + validateTypes(type); + if (ALL_ID.equals(id)) { + return null; } - - @Override - public Collection getIdentifiers(String type) { - validateTypes(type); - Set identifiers = new HashSet<>(backingStore.getIdentifiers(type)); - identifiers.remove(ALL_ID); - return identifiers; + CacheData item = backingStore.get(type, id, cacheFilter); + if (item == null) { + return null; } - @Override - public Collection filterIdentifiers(String type, String glob) { - validateTypes(type); - Set identifiers = new HashSet<>(backingStore.filterIdentifiers(type, glob)); - identifiers.remove(ALL_ID); - - return identifiers; - } - - @Override - public void putCacheResult(String sourceAgentType, Collection authoritativeTypes, CacheResult cacheResult) { - Set allTypes = new HashSet<>(cacheResult.getCacheResults().keySet()); - allTypes.addAll(authoritativeTypes); - allTypes.addAll(cacheResult.getEvictions().keySet()); - validateTypes(allTypes); - - Map> evictions = new HashMap<>(); - - for (String type : allTypes) { - final Collection previousSet; - if (authoritativeTypes.contains(type)) { - previousSet = getExistingSourceIdentifiers(type, sourceAgentType); - } else { - previousSet = new HashSet<>(); - } - if (cacheResult.getCacheResults().containsKey(type)) { - cacheDataType(type, sourceAgentType, cacheResult.getCacheResults().get(type)); - for (CacheData data : cacheResult.getCacheResults().get(type)) { - previousSet.remove(data.getId()); - } - } - if (cacheResult.getEvictions().containsKey(type)) { - previousSet.addAll(cacheResult.getEvictions().get(type)); - } - if (!previousSet.isEmpty()) { - evictions.put(type, previousSet); - } - } - - for (Map.Entry> eviction : evictions.entrySet()) { - evictDeletedItems(eviction.getKey(), eviction.getValue()); + return mergeRelationships(item); + } + + @Override + public Collection getAll(String type) { + return getAll(type, (CacheFilter) null); + } + + @Override + public Collection getAll(String type, CacheFilter cacheFilter) { + validateTypes(type); + Collection all = backingStore.getAll(type, cacheFilter); + return buildResponse(all); + } + + @Override + public Collection getAll(String type, Collection identifiers) { + return getAll(type, identifiers, null); + } + + @Override + public Collection getAll( + String type, Collection identifiers, CacheFilter cacheFilter) { + validateTypes(type); + Collection byId = backingStore.getAll(type, identifiers, cacheFilter); + return buildResponse(byId); + } + + @Override + public Collection getAll(String type, String... identifiers) { + return getAll(type, Arrays.asList(identifiers)); + } + + @Override + public Collection existingIdentifiers(String type, Collection identifiers) { + Set existing = new HashSet<>(backingStore.existingIdentifiers(type, identifiers)); + existing.remove(ALL_ID); + return existing; + } + + @Override + public Collection getIdentifiers(String type) { + validateTypes(type); + Set identifiers = new HashSet<>(backingStore.getIdentifiers(type)); + identifiers.remove(ALL_ID); + return identifiers; + } + + @Override + public Collection filterIdentifiers(String type, String glob) { + validateTypes(type); + Set identifiers = new HashSet<>(backingStore.filterIdentifiers(type, glob)); + identifiers.remove(ALL_ID); + + return identifiers; + } + + @Override + public void putCacheResult( + String sourceAgentType, Collection authoritativeTypes, CacheResult cacheResult) { + Set allTypes = new HashSet<>(cacheResult.getCacheResults().keySet()); + allTypes.addAll(authoritativeTypes); + allTypes.addAll(cacheResult.getEvictions().keySet()); + validateTypes(allTypes); + + Map> evictions = new HashMap<>(); + + for (String type : allTypes) { + final Collection previousSet; + if (authoritativeTypes.contains(type)) { + previousSet = getExistingSourceIdentifiers(type, sourceAgentType); + } else { + previousSet = new HashSet<>(); + } + if (cacheResult.getCacheResults().containsKey(type)) { + cacheDataType(type, sourceAgentType, cacheResult.getCacheResults().get(type)); + for (CacheData data : cacheResult.getCacheResults().get(type)) { + previousSet.remove(data.getId()); } + } + if (cacheResult.getEvictions().containsKey(type)) { + previousSet.addAll(cacheResult.getEvictions().get(type)); + } + if (!previousSet.isEmpty()) { + evictions.put(type, previousSet); + } } - @Override - public void putCacheData(String sourceAgentType, CacheData cacheData) { - backingStore.merge(sourceAgentType, cacheData); + for (Map.Entry> eviction : evictions.entrySet()) { + evictDeletedItems(eviction.getKey(), eviction.getValue()); } - - private void validateTypes(String... types) { - validateTypes(Arrays.asList(types)); + } + + @Override + public void addCacheResult( + String sourceAgentType, Collection authoritativeTypes, CacheResult cacheResult) { + Set allTypes = new HashSet<>(cacheResult.getCacheResults().keySet()); + validateTypes(allTypes); + + allTypes.forEach( + type -> { + cacheDataType(type, sourceAgentType, cacheResult.getCacheResults().get(type)); + }); + } + + @Override + public void putCacheData(String sourceAgentType, CacheData cacheData) { + backingStore.merge(sourceAgentType, cacheData); + } + + private void validateTypes(String... types) { + validateTypes(Arrays.asList(types)); + } + + private void validateTypes(Collection types) { + Set invalid = new HashSet<>(); + for (String type : types) { + if (!validType(type)) { + invalid.add(type); + } } - - private void validateTypes(Collection types) { - Set invalid = new HashSet<>(); - for (String type : types) { - if (!validType(type)) { - invalid.add(type); - } - } - if (!invalid.isEmpty()) { - throw new IllegalArgumentException("Types contain unsupported characters: " + invalid); - } + if (!invalid.isEmpty()) { + throw new IllegalArgumentException("Types contain unsupported characters: " + invalid); } - - private boolean validType(String type) { - return type.indexOf(':') == -1; + } + + private boolean validType(String type) { + return type.indexOf(':') == -1; + } + + private Collection buildResponse(Collection source) { + Collection response = new ArrayList<>(source.size()); + for (CacheData item : source) { + if (!ALL_ID.equals(item.getId())) { + response.add(mergeRelationships(item)); + } } - - private Collection buildResponse(Collection source) { - Collection response = new ArrayList<>(source.size()); - for (CacheData item : source) { - if (!ALL_ID.equals(item.getId())) { - response.add(mergeRelationships(item)); - } - } - return Collections.unmodifiableCollection(response); + return Collections.unmodifiableCollection(response); + } + + private Collection getExistingSourceIdentifiers(String type, String sourceAgentType) { + CacheData all = + backingStore.get(type, ALL_ID, RelationshipCacheFilter.include(sourceAgentType)); + if (all == null) { + return new HashSet<>(); } - - private Collection getExistingSourceIdentifiers(String type, String sourceAgentType) { - CacheData all = backingStore.get(type, ALL_ID); - if (all == null) { - return new HashSet<>(); - } - Collection relationship = all.getRelationships().get(sourceAgentType); - if (relationship == null) { - return new HashSet<>(); - } - return relationship; + Collection relationship = all.getRelationships().get(sourceAgentType); + if (relationship == null) { + return new HashSet<>(); } + return relationship; + } - private void cacheDataType(String type, String sourceAgentType, Collection items) { - Collection idSet = new HashSet<>(); - - int ttlSeconds = -1; - Collection toStore = new ArrayList<>(items.size() + 1); - for (CacheData item : items) { - idSet.add(item.getId()); - toStore.add(uniqueifyRelationships(item, sourceAgentType)); + private void cacheDataType(String type, String sourceAgentType, Collection items) { + Collection idSet = new HashSet<>(); - if (item.getTtlSeconds() > ttlSeconds) { - ttlSeconds = item.getTtlSeconds(); - } - } - Map> allRelationship = new HashMap<>(); - allRelationship.put(sourceAgentType, idSet); + int ttlSeconds = -1; + Collection toStore = new ArrayList<>(items.size() + 1); + for (CacheData item : items) { + idSet.add(item.getId()); + toStore.add(uniqueifyRelationships(item, sourceAgentType)); - toStore.add(new DefaultCacheData(ALL_ID, ttlSeconds, ALL_ATTRIBUTE, allRelationship)); - backingStore.mergeAll(type, toStore); + if (item.getTtlSeconds() > ttlSeconds) { + ttlSeconds = item.getTtlSeconds(); + } } + Map> allRelationship = new HashMap<>(); + allRelationship.put(sourceAgentType, idSet); - private CacheData uniqueifyRelationships(CacheData source, String sourceAgentType) { - Map> relationships = new HashMap<>(source.getRelationships().size()); - for (Map.Entry> entry : source.getRelationships().entrySet()) { - relationships.put(entry.getKey() + ':' + sourceAgentType, entry.getValue()); - } - return new DefaultCacheData(source.getId(), source.getTtlSeconds(), source.getAttributes(), relationships); - } + toStore.add(new DefaultCacheData(ALL_ID, ttlSeconds, ALL_ATTRIBUTE, allRelationship)); + backingStore.mergeAll(type, toStore); + } - private CacheData mergeRelationships(CacheData source) { - Map> relationships = new HashMap<>(source.getRelationships().size()); - for (Map.Entry> entry : source.getRelationships().entrySet()) { - int idx = entry.getKey().indexOf(':'); - if (idx == -1) { - throw new IllegalStateException("Expected delimiter in relationship key"); - } - String type = entry.getKey().substring(0, idx); - Collection values = relationships.get(type); - if (values == null) { - values = new HashSet<>(); - relationships.put(type, values); - } - values.addAll(entry.getValue()); - } - return new DefaultCacheData(source.getId(), source.getAttributes(), relationships); + private CacheData uniqueifyRelationships(CacheData source, String sourceAgentType) { + Map> relationships = new HashMap<>(source.getRelationships().size()); + for (Map.Entry> entry : source.getRelationships().entrySet()) { + relationships.put(entry.getKey() + ':' + sourceAgentType, entry.getValue()); } - - @Override - public void evictDeletedItems(String type, Collection ids) { - backingStore.evictAll(type, ids); + return new DefaultCacheData( + source.getId(), source.getTtlSeconds(), source.getAttributes(), relationships); + } + + private CacheData mergeRelationships(CacheData source) { + Map> relationships = new HashMap<>(source.getRelationships().size()); + for (Map.Entry> entry : source.getRelationships().entrySet()) { + int idx = entry.getKey().indexOf(':'); + if (idx == -1) { + throw new IllegalStateException("Expected delimiter in relationship key"); + } + String type = entry.getKey().substring(0, idx); + Collection values = relationships.get(type); + if (values == null) { + values = new HashSet<>(); + relationships.put(type, values); + } + values.addAll(entry.getValue()); } + return new DefaultCacheData(source.getId(), source.getAttributes(), relationships); + } + + @Override + public void evictDeletedItems(String type, Collection ids) { + backingStore.evictAll(type, ids); + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/DefaultProviderRegistry.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/DefaultProviderRegistry.java index 00f0207386d..b5de347a9d8 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/DefaultProviderRegistry.java +++ b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/DefaultProviderRegistry.java @@ -18,35 +18,36 @@ import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.NamedCacheFactory; - import java.util.Collection; import java.util.Collections; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; public class DefaultProviderRegistry implements ProviderRegistry { - private final ConcurrentMap providerCaches = new ConcurrentHashMap<>(); - private final Collection providers; - - public DefaultProviderRegistry(Collection providers, NamedCacheFactory cacheFactory) { - this.providers = Collections.unmodifiableCollection(providers); - for (Provider provider : providers) { - providerCaches.put(provider.getProviderName(), new DefaultProviderCache(cacheFactory.getCache(provider.getProviderName()))); - } - } - - @Override - public Collection getProviders() { - return providers; - } - - @Override - public Collection getProviderCaches() { - return Collections.unmodifiableCollection(providerCaches.values()); - } - - @Override - public ProviderCache getProviderCache(String providerName) { - return providerCaches.get(providerName); + private final ConcurrentMap providerCaches = new ConcurrentHashMap<>(); + private final Collection providers; + + public DefaultProviderRegistry(Collection providers, NamedCacheFactory cacheFactory) { + this.providers = Collections.unmodifiableCollection(providers); + for (Provider provider : providers) { + providerCaches.put( + provider.getProviderName(), + new DefaultProviderCache(cacheFactory.getCache(provider.getProviderName()))); } + } + + @Override + public Collection getProviders() { + return providers; + } + + @Override + public Collection getProviderCaches() { + return Collections.unmodifiableCollection(providerCaches.values()); + } + + @Override + public ProviderCache getProviderCache(String providerName) { + return providerCaches.get(providerName); + } } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCache.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCache.java deleted file mode 100644 index 325c37eeb08..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCache.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.provider; - -import com.netflix.spinnaker.cats.agent.CacheResult; -import com.netflix.spinnaker.cats.cache.Cache; -import com.netflix.spinnaker.cats.cache.CacheData; - -import java.util.Collection; - -public interface ProviderCache extends Cache { - void putCacheResult(String source, Collection authoritativeTypes, CacheResult cacheResult); - void putCacheData(String type, CacheData cacheData); - - Collection getAll(String type, Collection identifiers); - - void evictDeletedItems(String type, Collection ids); -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderSynchronizerTypeWrapper.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderSynchronizerTypeWrapper.java deleted file mode 100644 index 84fcf382543..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderSynchronizerTypeWrapper.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.provider; - -/** - * This interface is used by the credentials refresh controller to identify providers that should be re-created when the - * credentials have changed. - */ -public interface ProviderSynchronizerTypeWrapper { - /** - * Get the type of the bean to request from Spring's application context. It is expected that the Accounts and Agents - * managed by the provider will be synchronized with the latest configured accounts as a result of requesting this - * bean. - */ - Class getSynchronizerType(); -} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/thread/NamedThreadFactory.java b/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/thread/NamedThreadFactory.java deleted file mode 100644 index 0ccde00aeb3..00000000000 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/thread/NamedThreadFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.thread; - -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicLong; - -public class NamedThreadFactory implements ThreadFactory { - private final AtomicLong threadNumber = new AtomicLong(); - private final String baseName; - - public NamedThreadFactory(String baseName) { - this.baseName = baseName; - } - - @Override - public Thread newThread(Runnable r) { - Thread t = Executors.defaultThreadFactory().newThread(r); - t.setName(baseName + "-" + threadNumber.incrementAndGet()); - return t; - } -} diff --git a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/CacheExecutionSpec.groovy b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/CacheExecutionSpec.groovy index c88d2e31611..3d7d19f67b3 100644 --- a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/CacheExecutionSpec.groovy +++ b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/CacheExecutionSpec.groovy @@ -16,7 +16,6 @@ package com.netflix.spinnaker.cats.agent -import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.cats.provider.ProviderRegistry @@ -46,9 +45,9 @@ class CacheExecutionSpec extends Specification { ] } 1 * cachingAgent.getCacheKeyPatterns() >> { - return [ + return Optional.of([ "securityGroups": "securityGroups:*:test:us-west-1" - ] + ]) } 1 * providerCache.filterIdentifiers("securityGroups", "securityGroups:*:test:us-west-1") >> { return [ diff --git a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/CompositeExecutionInstrumentationSpec.groovy b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/CompositeExecutionInstrumentationSpec.groovy index 3d7db88af76..582bff4dd82 100644 --- a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/CompositeExecutionInstrumentationSpec.groovy +++ b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/CompositeExecutionInstrumentationSpec.groovy @@ -43,10 +43,10 @@ class CompositeExecutionInstrumentationSpec extends Specification { 1 * e2.executionCompleted(agent, 100) when: - subj.executionFailed(agent, cause) + subj.executionFailed(agent, cause, 100) then: - 1 * e1.executionFailed(agent, cause) - 1 * e2.executionFailed(agent, cause) + 1 * e1.executionFailed(agent, cause, 100) + 1 * e2.executionFailed(agent, cause, 100) } } diff --git a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/DefaultAgentSchedulerSpec.groovy b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/DefaultAgentSchedulerSpec.groovy index b3788531036..5fbcd555b7e 100644 --- a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/DefaultAgentSchedulerSpec.groovy +++ b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/agent/DefaultAgentSchedulerSpec.groovy @@ -63,7 +63,7 @@ class DefaultAgentSchedulerSpec extends Specification { then: 1 * instr.executionStarted(agent) 1 * exec.executeAgent(agent) >> { throw cause } - 1 * instr.executionFailed(agent, cause) + 1 * instr.executionFailed(agent, cause, _) 0 * _ } diff --git a/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeIdentitySpec.groovy b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/cluster/DefaultNodeIdentitySpec.groovy similarity index 94% rename from cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeIdentitySpec.groovy rename to cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/cluster/DefaultNodeIdentitySpec.groovy index 2cc1dee5175..25e4571f483 100644 --- a/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeIdentitySpec.groovy +++ b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/cluster/DefaultNodeIdentitySpec.groovy @@ -14,8 +14,9 @@ * limitations under the License. */ -package com.netflix.spinnaker.cats.redis.cluster +package com.netflix.spinnaker.cats.cluster +import com.netflix.spinnaker.cats.cluster.DefaultNodeIdentity import com.netflix.spinnaker.cats.redis.test.NetworkUnavailableCheck import spock.lang.IgnoreIf import spock.lang.Specification diff --git a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/mem/InMemoryCacheSpec.groovy b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/mem/InMemoryCacheSpec.groovy index dbba455311f..627f9fc3a0c 100644 --- a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/mem/InMemoryCacheSpec.groovy +++ b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/mem/InMemoryCacheSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.cats.mem +import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.WriteableCacheSpec class InMemoryCacheSpec extends WriteableCacheSpec { @@ -24,4 +25,28 @@ class InMemoryCacheSpec extends WriteableCacheSpec { InMemoryCache getSubject() { new InMemoryCache() } + + def 'mergeAll with two items that have the same id uses the second item'() { + given: 'one item in the cache' + String id = 'bar' + def itemOneAttributes = [att1: 'val1'] + CacheData itemOne = createData(id, itemOneAttributes) + def itemTwoAttributes = [att2: 'val2'] + CacheData itemTwo = createData(id, itemTwoAttributes) + String type = 'foo' + cache.mergeAll(type, [ itemOne ]) + assert itemOneAttributes.equals(cache.get(type, id).attributes) + + when: 'adding both items' + cache.mergeAll(type, [ itemOne, itemTwo ]) + + then: 'itemTwo is in the cache' + itemTwoAttributes.equals(cache.get(type, id).attributes) + + when: 'storing the items again' + cache.mergeAll(type, [ itemOne, itemTwo ]) + + then: 'itemTwo is still in the cache' + itemTwoAttributes.equals(cache.get(type, id).attributes) + } } diff --git a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/provider/DefaultProviderCacheSpec.groovy b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/provider/DefaultProviderCacheSpec.groovy index 606a0b29ad8..0ee3e58a9cc 100644 --- a/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/provider/DefaultProviderCacheSpec.groovy +++ b/cats/cats-core/src/test/groovy/com/netflix/spinnaker/cats/provider/DefaultProviderCacheSpec.groovy @@ -16,129 +16,5 @@ package com.netflix.spinnaker.cats.provider -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.* -import com.netflix.spinnaker.cats.mem.InMemoryCache - -class DefaultProviderCacheSpec extends CacheSpec { - - WriteableCache backingStore - - @Override - Cache getSubject() { - backingStore = new InMemoryCache() - new DefaultProviderCache(backingStore) - } - - void populateOne(String type, String id, CacheData cacheData = createData(id)) { - defaultProviderCache.putCacheResult('testAgent', [], new DefaultCacheResult((type): [cacheData])) - } - - DefaultProviderCache getDefaultProviderCache() { - getCache() as DefaultProviderCache - } - - def 'explicit evictions are removed from the cache'() { - setup: - String agent = 'agent' - CacheResult result = new DefaultCacheResult(test: [new DefaultCacheData('id', [id: 'id'], [:])]) - defaultProviderCache.putCacheResult(agent, [], result) - - when: - def data = defaultProviderCache.get('test', 'id') - - then: - data != null - data.id == 'id' - - when: - defaultProviderCache.putCacheResult(agent, [], new DefaultCacheResult([:], [test: ['id']])) - data = defaultProviderCache.get('test', 'id') - - then: - data == null - } - - def 'multiple agents can cache the same data type'() { - setup: - String usEast1Agent = 'AwsProvider:test/us-east-1/ClusterCachingAgent' - CacheResult testUsEast1 = buildCacheResult('test', 'us-east-1') - String usWest2Agent = 'AwsProvider:test/us-west-2/ClusterCachingAgent' - CacheResult testUsWest2 = buildCacheResult('test', 'us-west-2') - defaultProviderCache.putCacheResult(usEast1Agent, ['serverGroup'], testUsEast1) - defaultProviderCache.putCacheResult(usWest2Agent, ['serverGroup'], testUsWest2) - - when: - def app = defaultProviderCache.get('application', 'testapp') - - then: - app.attributes.accountName == 'test' - app.relationships.serverGroup.sort() == ['test/us-east-1/testapp-test-v001', 'test/us-west-2/testapp-test-v001'] - } - - def "an agents deletions don't affect another agent"() { - setup: - String usEast1Agent = 'AwsProvider:test/us-east-1/ClusterCachingAgent' - CacheResult testUsEast1 = buildCacheResult('test', 'us-east-1') - String usWest2Agent = 'AwsProvider:test/us-west-2/ClusterCachingAgent' - CacheResult testUsWest2 = buildCacheResult('test', 'us-west-2') - defaultProviderCache.putCacheResult(usEast1Agent, ['serverGroup'], testUsEast1) - defaultProviderCache.putCacheResult(usWest2Agent, ['serverGroup'], testUsWest2) - - when: - def app = defaultProviderCache.get('application', 'testapp') - - then: - app.attributes.accountName == 'test' - app.relationships.serverGroup.sort() == ['test/us-east-1/testapp-test-v001', 'test/us-west-2/testapp-test-v001'] - - when: - testUsEast1 = buildCacheResult('test', 'us-east-1', 'v002') - defaultProviderCache.putCacheResult(usEast1Agent, ['serverGroup'], testUsEast1) - app = defaultProviderCache.get('application', 'testapp') - - then: - app.relationships.serverGroup.sort() == ['test/us-east-1/testapp-test-v002', 'test/us-west-2/testapp-test-v001'] - - } - - def "items can be evicted by type and id"() { - setup: - String usEast1Agent = 'AwsProvider:test/us-east-1/ClusterCachingAgent' - CacheResult testUsEast1 = buildCacheResult('test', 'us-east-1') - defaultProviderCache.putCacheResult(usEast1Agent, ['serverGroup'], testUsEast1) - - when: - def sg = defaultProviderCache.get('serverGroup', 'test/us-east-1/testapp-test-v001') - - then: - sg != null - - when: - defaultProviderCache.evictDeletedItems('serverGroup', ['test/us-east-1/testapp-test-v001']) - sg = defaultProviderCache.get('serverGroup', 'test/us-east-1/testapp-test-v001') - - then: - sg == null - } - - private CacheResult buildCacheResult(String account, String region, String sgVersion = 'v001') { - String serverGroup = "$account/$region/testapp-test-$sgVersion" - String cluster = "$account/testapp-test" - String application = 'testapp' - String loadbalancer = "$account/$region/testapp--frontend" - Map serverGroupAtts = [ - name : 'testapp-test-v001', - account: account, - region : region - ] - - CacheData app = new DefaultCacheData(application, [accountName: account], [serverGroup: [serverGroup], cluster: [cluster]]) - CacheData sg = new DefaultCacheData(serverGroup, serverGroupAtts, [application: [application], cluster: [cluster], loadBalancer: [loadbalancer]]) - CacheData clu = new DefaultCacheData(cluster, [:], [application: [application], serverGroup: [serverGroup]]) - CacheData lb = new DefaultCacheData(loadbalancer, [:], [serverGroup: [serverGroup]]) - - new DefaultCacheResult([application: [app], serverGroup: [sg], cluster: [clu], loadBalancer: [lb]]) - } +class DefaultProvierCacheSpec extends ProviderCacheSpec { } diff --git a/cats/cats-dynomite/cats-dynomite.gradle b/cats/cats-dynomite/cats-dynomite.gradle deleted file mode 100644 index 47bda644e1c..00000000000 --- a/cats/cats-dynomite/cats-dynomite.gradle +++ /dev/null @@ -1,8 +0,0 @@ -dependencies { - compile project(':cats:cats-redis') - compile("com.netflix.spinnaker.kork:kork-dynomite:${spinnaker.version("kork")}") - compile('net.jodah:failsafe:1.0.4') - - testCompile project(':cats:cats-test') - testCompile spinnaker.dependency('korkJedisTest') -} diff --git a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/DynomiteUtils.java b/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/DynomiteUtils.java deleted file mode 100644 index 2397276b3e9..00000000000 --- a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/DynomiteUtils.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.cats.dynomite; - -import com.netflix.dyno.connectionpool.exception.DynoException; -import com.netflix.spinnaker.kork.dynomite.DynomiteClientDelegate; -import net.jodah.failsafe.RetryPolicy; -import redis.clients.jedis.exceptions.JedisException; - -import java.util.Arrays; -import java.util.concurrent.TimeUnit; - -public class DynomiteUtils { - - private DynomiteUtils() {} - - public static RetryPolicy greedyRetryPolicy(long delayMs) { - return new RetryPolicy() - .retryOn(Arrays.asList( - JedisException.class, - DynoException.class, - DynomiteClientDelegate.ClientDelegateException.class - )) - .withDelay(delayMs, TimeUnit.MILLISECONDS) - .withMaxRetries(3); - } -} diff --git a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/ExcessiveDynoFailureRetries.java b/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/ExcessiveDynoFailureRetries.java deleted file mode 100644 index d035e5617fa..00000000000 --- a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/ExcessiveDynoFailureRetries.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.cats.dynomite; - -public class ExcessiveDynoFailureRetries extends RuntimeException { - public ExcessiveDynoFailureRetries(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cache/DynomiteCache.java b/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cache/DynomiteCache.java deleted file mode 100644 index e91dd40ab9c..00000000000 --- a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cache/DynomiteCache.java +++ /dev/null @@ -1,453 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.cats.dynomite.cache; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.Lists; -import com.google.common.hash.Hashing; -import com.netflix.dyno.connectionpool.exception.DynoException; -import com.netflix.dyno.jedis.DynoJedisPipeline; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.cache.DefaultCacheData; -import com.netflix.spinnaker.cats.compression.CompressionStrategy; -import com.netflix.spinnaker.cats.compression.NoopCompression; -import com.netflix.spinnaker.cats.dynomite.DynomiteUtils; -import com.netflix.spinnaker.cats.dynomite.ExcessiveDynoFailureRetries; -import com.netflix.spinnaker.cats.redis.cache.AbstractRedisCache; -import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions; -import com.netflix.spinnaker.kork.dynomite.DynomiteClientDelegate; -import net.jodah.failsafe.Failsafe; -import net.jodah.failsafe.RetryPolicy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import redis.clients.jedis.Response; -import redis.clients.jedis.exceptions.JedisException; - -import java.io.IOException; -import java.time.Duration; -import java.util.*; -import java.util.Map.Entry; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; - -import static java.lang.String.format; -import static java.nio.charset.StandardCharsets.UTF_8; - -public class DynomiteCache extends AbstractRedisCache { - - public interface CacheMetrics { - default void merge(String prefix, - String type, - int itemCount, - int relationshipCount, - int hashMatches, - int hashUpdates, - int saddOperations, - int hmsetOperations, - int expireOperations, - int delOperations) { - // noop - } - - default void evict(String prefix, - String type, - int itemCount, - int delOperations, - int sremOperations) { - // noop - } - - default void get(String prefix, - String type, - int itemCount, - int requestedSize, - int relationshipsRequested, - int hmgetAllOperations) { - // noop - } - - class NOOP implements CacheMetrics {} - } - - private final Logger log = LoggerFactory.getLogger(getClass()); - - // TODO rz - Make retry policy configurable - private static final RetryPolicy REDIS_RETRY_POLICY = DynomiteUtils.greedyRetryPolicy(500); - - private final CacheMetrics cacheMetrics; - - private final CompressionStrategy compressionStrategy; - - public DynomiteCache(String prefix, - DynomiteClientDelegate dynomiteClientDelegate, - ObjectMapper objectMapper, - RedisCacheOptions options, - CacheMetrics cacheMetrics, - CompressionStrategy compressionStrategy) { - super(prefix, dynomiteClientDelegate, objectMapper, options); - this.cacheMetrics = cacheMetrics == null ? new CacheMetrics.NOOP() : cacheMetrics; - this.compressionStrategy = compressionStrategy == null ? new NoopCompression() : compressionStrategy; - } - - @Override - public void mergeItems(String type, Collection items) { - if (items.isEmpty()){ - return; - } - - AtomicInteger relationships = new AtomicInteger(); - AtomicInteger hmsetOperations = new AtomicInteger(); - AtomicInteger saddOperations = new AtomicInteger(); - AtomicInteger expireOperations = new AtomicInteger(); - AtomicInteger delOperations = new AtomicInteger(); - AtomicInteger skippedWrites = new AtomicInteger(); - AtomicInteger hashesUpdated = new AtomicInteger(); - - Map> allHashes = getAllHashes(type, items); - Failsafe - .with(REDIS_RETRY_POLICY) - .onRetriesExceeded(failure -> { - log.error("Encountered repeated failures while caching {}:{}, attempting cleanup", prefix, type, failure); - try { - redisClientDelegate.withPipeline(pipeline -> { - DynoJedisPipeline p = (DynoJedisPipeline) pipeline; - for (CacheData item : items) { - p.del(itemHashesId(type, item.getId())); - delOperations.incrementAndGet(); - } - p.sync(); - }); - } catch (JedisException|DynoException e) { - log.error("Failed cleaning up hashes in failure handler in {}:{}", prefix, type, e); - } - throw new ExcessiveDynoFailureRetries(format("Running cache agent %s:%s", prefix, type), failure); - }) - .run(() -> redisClientDelegate.withPipeline(pipeline -> { - DynoJedisPipeline p = (DynoJedisPipeline) pipeline; - - // https://github.com/xetorthio/jedis/issues/758 - boolean pipelineHasOps = false; - for (CacheData item : items) { - MergeOp op = buildHashedMergeOp(type, item, allHashes.get(item)); - skippedWrites.addAndGet(op.skippedWrites); - - if (op.valuesToSet.isEmpty()) { - continue; - } - - pipelineHasOps = true; - - p.hmset(itemId(type, item.getId()), op.valuesToSet); - hmsetOperations.incrementAndGet(); - - if (!op.relNames.isEmpty()) { - p.sadd(allRelationshipsId(type), op.relNames.toArray(new String[op.relNames.size()])); - saddOperations.incrementAndGet(); - relationships.addAndGet(op.relNames.size()); - } - - if (item.getTtlSeconds() > 0) { - p.expire(itemId(type, item.getId()), item.getTtlSeconds()); - expireOperations.incrementAndGet(); - } - - p.sadd(allOfTypeId(type), item.getId()); - saddOperations.incrementAndGet(); - - if (!op.hashesToSet.isEmpty()) { - p.hmset(itemHashesId(type, item.getId()), op.hashesToSet); - hmsetOperations.incrementAndGet(); - p.expire(itemHashesId(type, item.getId()), getHashExpiry()); - expireOperations.incrementAndGet(); - hashesUpdated.addAndGet(op.hashesToSet.size()); - } - } - if (pipelineHasOps) { - p.sync(); - } - })); - - cacheMetrics.merge( - prefix, - type, - items.size(), - relationships.get(), - skippedWrites.get(), - hashesUpdated.get(), - saddOperations.get(), - hmsetOperations.get(), - expireOperations.get(), - delOperations.get() - ); - } - - @Override - protected void evictItems(String type, List identifiers, Collection allRelationships) { - AtomicInteger delOperations = new AtomicInteger(); - AtomicInteger sremOperations = new AtomicInteger(); - - Failsafe - .with(REDIS_RETRY_POLICY) - .onRetriesExceeded(failure -> { - throw new ExcessiveDynoFailureRetries(format("Evicting items for %s:%s", prefix, type), failure); - }) - .run(() -> redisClientDelegate.withPipeline(pipeline -> { - DynoJedisPipeline p = (DynoJedisPipeline) pipeline; - - for (List idPartition : Lists.partition(identifiers, options.getMaxDelSize())) { - String[] ids = idPartition.toArray(new String[idPartition.size()]); - pipeline.srem(allOfTypeId(type), ids); - sremOperations.incrementAndGet(); - } - - for (String id : identifiers) { - pipeline.del(itemId(type, id)); - delOperations.incrementAndGet(); - pipeline.del(itemHashesId(type, id)); - delOperations.incrementAndGet(); - } - - if (!identifiers.isEmpty()) { - p.sync(); - } - })); - - cacheMetrics.evict( - prefix, - type, - identifiers.size(), - delOperations.get(), - sremOperations.get() - ); - } - - @Override - protected Collection getItems(String type, List ids, List knownRels) { - if (ids.isEmpty()) { - return new ArrayList<>(); - } - - AtomicInteger hmgetAllOperations = new AtomicInteger(); - Map> rawItems = Failsafe - .with(REDIS_RETRY_POLICY) - .onRetriesExceeded(failure -> { - throw new ExcessiveDynoFailureRetries(format("Getting items for %s:%s", prefix, type), failure); - }) - .get(() -> redisClientDelegate.withPipeline(pipeline -> { - DynoJedisPipeline p = (DynoJedisPipeline) pipeline; - - Map>> responses = new HashMap<>(); - for (String id : ids) { - responses.put(id, pipeline.hgetAll(itemId(type, id))); - hmgetAllOperations.incrementAndGet(); - } - p.sync(); - - return responses.entrySet().stream() - .filter(e -> !e.getValue().get().isEmpty()) - .collect(Collectors.toMap(Entry::getKey, it -> it.getValue().get())); - })); - - Collection results = new ArrayList<>(ids.size()); - for (Map.Entry> rawItem : rawItems.entrySet()) { - CacheData item = extractHashedItem(type, rawItem.getKey(), rawItem.getValue(), knownRels); - if (item != null) { - results.add(item); - } - } - - cacheMetrics.get(prefix, type, results.size(), ids.size(), knownRels.size(), hmgetAllOperations.get()); - return results; - } - - private CacheData extractHashedItem(String type, String id, Map values, List knownRels) { - if (values == null) { - return null; - } - - try { - final Map attributes; - if (values.get("attributes") != null) { - attributes = objectMapper.readValue(compressionStrategy.decompress(values.get("attributes")), ATTRIBUTES); - } else { - attributes = null; - } - final Map> relationships = new HashMap<>(); - for (Map.Entry value : values.entrySet()) { - if (value.getKey().equals("attributes") || value.getKey().equals("id") || !knownRels.contains(value.getKey())) { - continue; - } - - Collection deserializedRel; - try { - deserializedRel = objectMapper.readValue( - compressionStrategy.decompress(value.getValue()), - getRelationshipsTypeReference() - ); - } catch (JsonProcessingException e) { - log.warn("Failed processing property '{}' on item '{}'", value.getKey(), itemId(type, id)); - continue; - } - relationships.put(value.getKey(), deserializedRel); - } - return new DefaultCacheData(id, attributes, relationships); - } catch (IOException deserializationException) { - throw new RuntimeException("Deserialization failed", deserializationException); - } - } - - @Override - protected Set scanMembers(String setKey, Optional glob) { - return Failsafe - .with(REDIS_RETRY_POLICY) - .get(() -> super.scanMembers(setKey, glob)); - } - - private static class MergeOp { - final Set relNames; - final Map valuesToSet; - final Map hashesToSet; - final int skippedWrites; - - public MergeOp(Set relNames, Map valuesToSet, Map hashesToSet, int skippedWrites) { - this.relNames = relNames; - this.valuesToSet = valuesToSet; - this.hashesToSet = hashesToSet; - this.skippedWrites = skippedWrites; - } - } - - private boolean hashCheck(Map hashes, String id, String serializedValue, Map updatedHashes, boolean hasTtl) { - if (options.isHashingEnabled() && !hasTtl) { - final String hash = Hashing.sha1().newHasher().putString(serializedValue, UTF_8).hash().toString(); - final String existingHash = hashes.get(id); - if (hash.equals(existingHash)) { - return true; - } - updatedHashes.put(id, hash); - } - return false; - } - - private MergeOp buildHashedMergeOp(String type, CacheData cacheData, Map hashes) { - int skippedWrites = 0; - final boolean hasTtl = cacheData.getTtlSeconds() > 0; - final String serializedAttributes; - try { - if (cacheData.getAttributes().isEmpty()) { - serializedAttributes = null; - } else { - serializedAttributes = objectMapper.writeValueAsString(cacheData.getAttributes()); - } - } catch (JsonProcessingException serializationException) { - throw new RuntimeException("Attribute serialization failed", serializationException); - } - - final Map hashesToSet = new HashMap<>(); - final Map valuesToSet = new HashMap<>(); - if (serializedAttributes != null && hashCheck(hashes, attributesId(type, cacheData.getId()), serializedAttributes, hashesToSet, hasTtl)) { - skippedWrites++; - } else if (serializedAttributes != null) { - valuesToSet.put("attributes", compressionStrategy.compress(serializedAttributes)); - } - - if (!cacheData.getRelationships().isEmpty()) { - for (Map.Entry> relationship : cacheData.getRelationships().entrySet()) { - final String relationshipValue; - try { - relationshipValue = objectMapper.writeValueAsString(new LinkedHashSet<>(relationship.getValue())); - } catch (JsonProcessingException serializationException) { - throw new RuntimeException("Relationship serialization failed", serializationException); - } - if (hashCheck(hashes, relationshipId(type, cacheData.getId(), relationship.getKey()), relationshipValue, hashesToSet, hasTtl)) { - skippedWrites++; - } else { - valuesToSet.put(relationship.getKey(), compressionStrategy.compress(relationshipValue)); - } - } - } - - return new MergeOp(cacheData.getRelationships().keySet(), valuesToSet, hashesToSet, skippedWrites); - } - - private Map> getAllHashes(String type, Collection items) { - if (isHashingDisabled(type)) { - return new HashMap<>(); - } - - return Failsafe - .with(REDIS_RETRY_POLICY) - .onRetriesExceeded(failure -> { - throw new ExcessiveDynoFailureRetries(format("Getting all requested hashes for %s:%s", prefix, type), failure); - }) - .get(() -> redisClientDelegate.withPipeline(pipeline -> { - DynoJedisPipeline p = (DynoJedisPipeline) pipeline; - - Map>> responses = new HashMap<>(); - for (CacheData item : items) { - responses.put(item, p.hgetAll(itemHashesId(type, item.getId()))); - } - p.sync(); - - return responses.entrySet().stream().collect(Collectors.toMap(Entry::getKey, it -> it.getValue().get())); - })); - } - - @Override - protected boolean isHashingDisabled(String type) { - return Failsafe - .with(REDIS_RETRY_POLICY) - .onRetriesExceeded(failure -> { - throw new ExcessiveDynoFailureRetries(format("Getting hashing flag for %s:%s", prefix, type), failure); - }) - .get(() -> super.isHashingDisabled(type)); - } - - private int getHashExpiry() { - // between 1 and 3 hours; boundary is exclusive - return (int) Duration.ofMinutes(ThreadLocalRandom.current().nextInt(60, 4 * 60)).getSeconds(); - } - - private String itemId(String type, String id) { - return format("{%s:%s}:%s", prefix, type, id); - } - - private String itemHashesId(String type, String id) { - return format("{%s:%s}:hashes:%s", prefix, type, id); - } - - @Override - protected String attributesId(String type, String id) { - return format("{%s:%s}:attributes:%s", prefix, type, id); - } - - @Override - protected String relationshipId(String type, String id, String relationship) { - return format("{%s:%s}:relationships:%s:%s", prefix, type, id, relationship); - } - - @Override - protected String allRelationshipsId(String type) { - return format("{%s:%s}:relationships", prefix, type); - } - - @Override - protected String allOfTypeId(String type) { - return format("{%s:%s}:members", prefix, type); - } -} diff --git a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cache/DynomiteNamedCacheFactory.java b/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cache/DynomiteNamedCacheFactory.java deleted file mode 100644 index 67ba5b7a741..00000000000 --- a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cache/DynomiteNamedCacheFactory.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.cats.dynomite.cache; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.cats.cache.NamedCacheFactory; -import com.netflix.spinnaker.cats.cache.WriteableCache; -import com.netflix.spinnaker.cats.compression.CompressionStrategy; -import com.netflix.spinnaker.cats.dynomite.cache.DynomiteCache.CacheMetrics; -import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions; -import com.netflix.spinnaker.kork.dynomite.DynomiteClientDelegate; - -import java.util.Optional; - -public class DynomiteNamedCacheFactory implements NamedCacheFactory { - - private final Optional keyspace; - private final DynomiteClientDelegate dynomiteClientDelegate; - private final ObjectMapper objectMapper; - private final RedisCacheOptions options; - private final CacheMetrics cacheMetrics; - private final CompressionStrategy compressionStrategy; - - public DynomiteNamedCacheFactory(Optional keyspace, - DynomiteClientDelegate dynomiteClientDelegate, - ObjectMapper objectMapper, - RedisCacheOptions options, - CacheMetrics cacheMetrics, - CompressionStrategy compressionStrategy) { - this.keyspace = keyspace; - this.dynomiteClientDelegate = dynomiteClientDelegate; - this.objectMapper = objectMapper; - this.options = options; - this.cacheMetrics = cacheMetrics; - this.compressionStrategy = compressionStrategy; - } - - @Override - public WriteableCache getCache(String name) { - return new DynomiteCache(getPrefix(name), dynomiteClientDelegate, objectMapper, options, cacheMetrics, compressionStrategy); - } - - private String getPrefix(String name) { - return keyspace.map(k -> name + "-" + k).orElse(name); - } -} diff --git a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cluster/DynoClusteredAgentScheduler.java b/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cluster/DynoClusteredAgentScheduler.java deleted file mode 100644 index 7832dce3f11..00000000000 --- a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cluster/DynoClusteredAgentScheduler.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.cats.dynomite.cluster; - -import com.netflix.dyno.connectionpool.exception.DynoException; -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.agent.AgentExecution; -import com.netflix.spinnaker.cats.agent.AgentLock; -import com.netflix.spinnaker.cats.agent.AgentScheduler; -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; -import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation; -import com.netflix.spinnaker.cats.module.CatsModuleAware; -import com.netflix.spinnaker.cats.redis.cluster.AgentIntervalProvider; -import com.netflix.spinnaker.cats.redis.cluster.NodeIdentity; -import com.netflix.spinnaker.cats.redis.cluster.NodeStatusProvider; -import com.netflix.spinnaker.cats.thread.NamedThreadFactory; -import com.netflix.spinnaker.kork.dynomite.DynomiteClientDelegate; -import net.jodah.failsafe.Failsafe; -import net.jodah.failsafe.RetryPolicy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import redis.clients.jedis.JedisCommands; -import redis.clients.jedis.exceptions.JedisException; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * Temporary clustered agent scheduler while we're waiting for Dyno client support of evalsha and loadscript. - * - * Shares a similar strategy as ClusteredAgentScheduler, but doesn't use Lua, is slower and less safe. Dynomite - * support for Lua is in-progress, so this class is rather temporary, then we can move to ClusteredSortAgentScheduler. - */ -public class DynoClusteredAgentScheduler extends CatsModuleAware implements AgentScheduler, Runnable { - - private final static Logger log = LoggerFactory.getLogger(DynoClusteredAgentScheduler.class); - - private final static RetryPolicy ACQUIRE_LOCK_RETRY_POLICY = new RetryPolicy() - .retryOn(Arrays.asList(DynoException.class, JedisException.class)) - .withMaxRetries(3) - .withDelay(25, TimeUnit.MILLISECONDS); - - private static enum Status { - SUCCESS, - FAILURE - } - - private final DynomiteClientDelegate redisClientDelegate; - private final NodeIdentity nodeIdentity; - private final AgentIntervalProvider intervalProvider; - private final ExecutorService agentExecutionPool; - private final Map agents = new ConcurrentHashMap<>(); - private final Map activeAgents = new ConcurrentHashMap<>(); - private final NodeStatusProvider nodeStatusProvider; - - public DynoClusteredAgentScheduler(DynomiteClientDelegate redisClientDelegate, NodeIdentity nodeIdentity, AgentIntervalProvider intervalProvider, NodeStatusProvider nodeStatusProvider) { - this(redisClientDelegate, nodeIdentity, intervalProvider, nodeStatusProvider, Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(DynoClusteredAgentScheduler.class.getSimpleName())), Executors.newCachedThreadPool(new NamedThreadFactory(AgentExecutionAction.class.getSimpleName()))); - } - - public DynoClusteredAgentScheduler(DynomiteClientDelegate redisClientDelegate, NodeIdentity nodeIdentity, AgentIntervalProvider intervalProvider, NodeStatusProvider nodeStatusProvider, ScheduledExecutorService lockPollingScheduler, ExecutorService agentExecutionPool) { - this.redisClientDelegate = redisClientDelegate; - this.nodeIdentity = nodeIdentity; - this.intervalProvider = intervalProvider; - this.nodeStatusProvider = nodeStatusProvider; - this.agentExecutionPool = agentExecutionPool; - lockPollingScheduler.scheduleAtFixedRate(this, 0, 5, TimeUnit.SECONDS); - } - - @Override - public void schedule(Agent agent, AgentExecution agentExecution, ExecutionInstrumentation executionInstrumentation) { - if (agent instanceof AgentSchedulerAware) { - ((AgentSchedulerAware) agent).setAgentScheduler(this); - } - - final AgentExecutionAction agentExecutionAction = new AgentExecutionAction(agent, agentExecution, executionInstrumentation); - agents.put(agent.getAgentType(), agentExecutionAction); - } - - @Override - public void unschedule(Agent agent) { - releaseRunKey(agent.getAgentType(), 0); - agents.remove(agent.getAgentType()); - } - - @Override - public void run() { - if (!nodeStatusProvider.isNodeEnabled()) { - return; - } - try { - runAgents(); - } catch (Throwable t) { - log.error("Failed running cache agents", t); - } - } - - private Map acquire() { - Map acquired = new HashMap<>(agents.size()); - Set skip = new HashSet<>(activeAgents.keySet()); - agents.entrySet().stream() - .filter(a -> !skip.contains(a.getKey())) - .forEach(a -> { - final String agentType = a.getKey(); - AgentIntervalProvider.Interval interval = intervalProvider.getInterval(a.getValue().getAgent()); - if (acquireRunKey(agentType, interval.getTimeout())) { - acquired.put(agentType, new NextAttempt(System.currentTimeMillis(), interval.getInterval(), interval.getErrorInterval())); - } - }); - return acquired; - } - - private boolean acquireRunKey(String agentType, long timeout) { - // This isn't as safe as the vanilla Redis impl because the call isn't atomic, but it's the best we can do until - // dynomite adds support for `String set(String key, String value, String nxxx, String expx, long time)` (which - // they are working on). - String identity = nodeIdentity.getNodeIdentity(); - return redisClientDelegate.withCommandsClient(client -> { - return Failsafe - .with(ACQUIRE_LOCK_RETRY_POLICY) - .get(() -> { - String response = client.get(agentType); - if (response == null && client.setnx(agentType, identity) == 1) { - client.pexpireAt(agentType, System.currentTimeMillis() + timeout); - return true; - } - - if (client.ttl(agentType) == -1) { - log.warn("Detected potential deadlocked agent, removing lock key: " + agentType); - client.del(agentType); - } - return false; - }); - }); - } - - private void runAgents() { - Map thisRun = acquire(); - activeAgents.putAll(thisRun); - for (final Map.Entry toRun : thisRun.entrySet()) { - final AgentExecutionAction exec = agents.get(toRun.getKey()); - agentExecutionPool.submit(new AgentJob(toRun.getValue(), exec, this)); - } - } - - private void agentCompleted(String agentType, long nextExecutionTime) { - try { - releaseRunKey(agentType, nextExecutionTime); - } finally { - activeAgents.remove(agentType); - } - } - - private void releaseRunKey(String agentType, long when) { - final long newTtl = when - System.currentTimeMillis(); - final boolean delete = newTtl < 2500L; - redisClientDelegate.withCommandsClient(client -> { - if (delete) { - deleteLock(client, agentType); - } else { - ttlLock(client, agentType, newTtl); - } - }); - } - - private void deleteLock(JedisCommands client, String agentType) { - client.del(agentType); - } - - private void ttlLock(JedisCommands client, String agentType, long newTtl) { - String response = client.get(agentType); - if (nodeIdentity.getNodeIdentity().equals(response)) { - client.pexpireAt(agentType, System.currentTimeMillis() + newTtl); - } - } - - private static class NextAttempt { - private final long currentTime; - private final long successInterval; - private final long errorInterval; - - public NextAttempt(long currentTime, long successInterval, long errorInterval) { - this.currentTime = currentTime; - this.successInterval = successInterval; - this.errorInterval = errorInterval; - } - - public long getNextTime(Status status) { - if (status == Status.SUCCESS) { - return currentTime + successInterval; - } - - return currentTime + errorInterval; - } - } - - private static class AgentJob implements Runnable { - private final NextAttempt lockReleaseTime; - private final AgentExecutionAction action; - private final DynoClusteredAgentScheduler scheduler; - - public AgentJob(NextAttempt lockReleaseTime, AgentExecutionAction action, DynoClusteredAgentScheduler scheduler) { - this.lockReleaseTime = lockReleaseTime; - this.action = action; - this.scheduler = scheduler; - } - - @Override - public void run() { - Status status = Status.FAILURE; - try { - status = action.execute(); - } finally { - scheduler.agentCompleted(action.getAgent().getAgentType(), lockReleaseTime.getNextTime(status)); - } - } - } - - private static class AgentExecutionAction { - private final Agent agent; - private final AgentExecution agentExecution; - private final ExecutionInstrumentation executionInstrumentation; - - public AgentExecutionAction(Agent agent, AgentExecution agentExecution, ExecutionInstrumentation executionInstrumentation) { - this.agent = agent; - this.agentExecution = agentExecution; - this.executionInstrumentation = executionInstrumentation; - } - - public Agent getAgent() { - return agent; - } - - public Status execute() { - try { - executionInstrumentation.executionStarted(agent); - long startTime = System.nanoTime(); - agentExecution.executeAgent(agent); - executionInstrumentation.executionCompleted(agent, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)); - return Status.SUCCESS; - } catch (Throwable cause) { - executionInstrumentation.executionFailed(agent, cause); - return Status.FAILURE; - } - } - - } -} diff --git a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cluster/DynoClusteredSortAgentScheduler.java b/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cluster/DynoClusteredSortAgentScheduler.java deleted file mode 100644 index 0492a3483a3..00000000000 --- a/cats/cats-dynomite/src/main/java/com/netflix/spinnaker/cats/dynomite/cluster/DynoClusteredSortAgentScheduler.java +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.cats.dynomite.cluster; - - -import com.netflix.spinnaker.cats.agent.*; -import com.netflix.spinnaker.cats.dynomite.DynomiteUtils; -import com.netflix.spinnaker.cats.dynomite.ExcessiveDynoFailureRetries; -import com.netflix.spinnaker.cats.module.CatsModuleAware; -import com.netflix.spinnaker.cats.redis.cluster.AgentIntervalProvider; -import com.netflix.spinnaker.cats.redis.cluster.ClusteredSortAgentLock; -import com.netflix.spinnaker.cats.redis.cluster.NodeStatusProvider; -import com.netflix.spinnaker.cats.thread.NamedThreadFactory; -import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; -import net.jodah.failsafe.Failsafe; -import net.jodah.failsafe.RetryPolicy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.time.Clock; -import java.time.temporal.ChronoUnit; -import java.util.*; -import java.util.concurrent.*; - -import static java.lang.String.format; - -/** - * rz NOTE: This is functionally the same (changes listed below) as ClusteredSortAgentScheduler, but with early support - * for Dynomite. Has been tested more, it'll be rolled up into the original class. - * - * 1. Dynomite does not yet support scriptLoad/evalsha. This class just uses eval. - * 1. We need to ensure the WAITING and WORKING sets are on the same shard, so all keys use hashtags. - * 1. RedisClientDelgate over JedisPool. - * 1. Instead of retrieving the time from Redis, clouddriver is responsible for generating scores from Clock. Prefer - * how the Jedis-based class does it, but time is not supported in Dynomite. - * 1. Supports scheduling non-caching agent executions. Mandatory for AWS provider. - * 1. Retries on connection errors. - */ -public class DynoClusteredSortAgentScheduler extends CatsModuleAware implements AgentScheduler, Runnable { - private enum Status { - SUCCESS, - FAILURE - } - - private final Clock clock; - private final RedisClientDelegate redisClientDelegate; - private final NodeStatusProvider nodeStatusProvider; - private final AgentIntervalProvider intervalProvider; - private final ExecutorService agentWorkPool; - - private static final int NOW = 0; - private static final int REDIS_REFRESH_PERIOD = 30; - private int runCount = 0; - - private final Logger log; - - private Map agents; - private Optional runningAgents; - - // This code assumes that every agent being run is in exactly either the WAITING or WORKING set. - private static final String WAITING_SET = "{scheduler}:WAITZ"; - private static final String WORKING_SET = "{scheduler}:WORKZ"; - private static final String ADD_AGENT_SCRIPT = "addAgentScript"; - private static final String VALID_SCORE_SCRIPT = "validScoreScript"; - private static final String SWAP_SET_SCRIPT = "swapSetScript"; - private static final String REMOVE_AGENT_SCRIPT = "removeAgentScript"; - private static final String CONDITIONAL_SWAP_SET_SCRIPT = "conditionalSwapSetScript"; - - private static final RetryPolicy RETRY_POLICY = DynomiteUtils.greedyRetryPolicy(3000); - - private ConcurrentHashMap scripts; - - public DynoClusteredSortAgentScheduler(Clock clock, RedisClientDelegate redisClientDelegate, NodeStatusProvider nodeStatusProvider, AgentIntervalProvider intervalProvider, Integer parallelism) { - this.clock = clock; - this.redisClientDelegate = redisClientDelegate; - this.nodeStatusProvider = nodeStatusProvider; - this.agents = new ConcurrentHashMap<>(); - this.intervalProvider = intervalProvider; - this.log = LoggerFactory.getLogger(getClass()); - - if (parallelism == 0 || parallelism < -1) { - throw new IllegalArgumentException("Argument 'parallelism' must be positive, or -1 (for unlimited parallelism)."); - } else if (parallelism > 0) { - this.runningAgents = Optional.of(new Semaphore(parallelism)); - } else { - this.runningAgents = Optional.empty(); - } - - scripts = new ConcurrentHashMap<>(); - storeScripts(); - - this.agentWorkPool = Executors.newCachedThreadPool(new NamedThreadFactory(AgentWorker.class.getSimpleName())); - Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(DynoClusteredSortAgentScheduler.class.getSimpleName())) - .scheduleAtFixedRate(this, 0, 1, TimeUnit.SECONDS); - } - - private void storeScripts() { - // When we switch an agent from one set to another, we first make sure it exists in the set we are removing it - // from, and then we perform the swap. If this check fails, the thread performing the swap does not get ownership - // of the agent. - // Swap happens from KEYS[1] -> KEYS[2] with the agent type being ARGV[1], and the score being ARGV[2]. - scripts.put(SWAP_SET_SCRIPT, - "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + - "if score ~= nil then\n" + - " redis.call('zrem', KEYS[1], ARGV[1])\n" + - " redis.call('zadd', KEYS[2], ARGV[2], ARGV[1])\n" + - " return score\n" + - "else return nil end\n" - ); - - scripts.put(CONDITIONAL_SWAP_SET_SCRIPT, - "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + - "if score == ARGV[3] then\n" + - " redis.call('zrem', KEYS[1], ARGV[1])\n" + - " redis.call('zadd', KEYS[2], ARGV[2], ARGV[1])\n" + - " return score\n" + - "else return nil end\n" - ); - - scripts.put(VALID_SCORE_SCRIPT, - "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + - "if score == ARGV[2] then\n" + - " return score\n" + - "else return nil end\n" - ); - - // If the agent isn't present in either the WAITING or WORKING sets, it's safe to add. If it's present in either, - // it's being worked on or was recently run, so leave it be. - // KEYS[1] and KEYS[2] are checked for inclusion. If the agent is in neither ARGV[1] is added to KEYS[1] with score - // ARGV[2]. - scripts.put(ADD_AGENT_SCRIPT, - "if redis.call('zrank', KEYS[1], ARGV[1]) ~= nil then\n" + - " if redis.call('zrank', KEYS[2], ARGV[1]) ~= nil then\n" + - " return redis.call('zadd', KEYS[1], ARGV[2], ARGV[1])\n" + - " else return nil end\n" + - "else return nil end\n" - ); - - scripts.put(REMOVE_AGENT_SCRIPT, - "redis.call('zrem', KEYS[1], ARGV[1])\n" + - "redis.call('zrem', KEYS[2], ARGV[1])\n" - ); - } - - private String getScript(String scriptName) { - String scriptSha = scripts.get(scriptName); - if (scriptSha == null) { - storeScripts(); - scriptSha = scripts.get(scriptName); - if (scriptSha == null) { - throw new RuntimeException("Failed to load caching scripts."); - } - } - return scripts.get(scriptName); - } - - @Override - public void schedule(Agent agent, AgentExecution agentExecution, ExecutionInstrumentation executionInstrumentation) { - if (agent instanceof AgentSchedulerAware) { - ((AgentSchedulerAware)agent).setAgentScheduler(this); - } - - withRetry( - format("Scheduling %s", agent.getAgentType()), - () -> redisClientDelegate.withScriptingClient(client -> { - client.eval(getScript(ADD_AGENT_SCRIPT), 2, WAITING_SET, WORKING_SET, agent.getAgentType(), score(NOW)); - }) - ); - agents.put(agent.getAgentType(), new AgentWorker(agent, agentExecution, executionInstrumentation, this)); - } - - @Override - public ClusteredSortAgentLock tryLock(Agent agent) { - ScoreTuple scores = acquireAgent(agent); - if (scores != null) { - return new ClusteredSortAgentLock(agent, scores.acquireScore, scores.releaseScore); - } else { - return null; - } - } - - @Override - public boolean tryRelease(ClusteredSortAgentLock lock) { - return conditionalReleaseAgent(lock.getAgent(), lock.getAcquireScore(), lock.getReleaseScore()) != null; - } - - @Override - public boolean lockValid(ClusteredSortAgentLock lock) { - return withRetry( - format("Checking if lock is valid for %s", lock.getAgent().getAgentType()), - () -> redisClientDelegate.withScriptingClient(client -> client.eval( - getScript(VALID_SCORE_SCRIPT), 1, WORKING_SET, - lock.getAgent().getAgentType(), lock.getAcquireScore()) != null - ) - ); - } - - public void unschedule(Agent agent) { - agents.remove(agent.getAgentType()); - withRetry( - format("Unscheduling %s", agent.getAgentType()), - () -> redisClientDelegate.withScriptingClient(client -> { - client.eval(getScript(REMOVE_AGENT_SCRIPT), 2, WAITING_SET, WORKING_SET, agent.getAgentType()); - }) - ); - } - - @Override - public boolean isAtomic() { - return true; - } - - @Override - public void run() { - if (!nodeStatusProvider.isNodeEnabled()) { - return; - } - try { - saturatePool(); - } catch (Throwable t) { - log.error("Failed to run caching agents", t); - } finally { - runCount++; - } - } - - private String score(long offsetSeconds) { - return format("%d", clock.instant().plus(offsetSeconds, ChronoUnit.SECONDS).getEpochSecond()); - } - - private ScoreTuple acquireAgent(Agent agent) { - String acquireScore = score(intervalProvider.getInterval(agent).getTimeout()); - Object releaseScore = withRetry( - format("Acquiring lock on %s", agent.getAgentType()), - () -> redisClientDelegate.withScriptingClient(client -> { - return client.eval(getScript(SWAP_SET_SCRIPT), - Arrays.asList(WAITING_SET, WORKING_SET), - Arrays.asList(agent.getAgentType(), acquireScore)); - }) - ); - return releaseScore != null ? new ScoreTuple(acquireScore, releaseScore.toString()) : null; - } - - private ScoreTuple conditionalReleaseAgent(Agent agent, String acquireScore, Status status) { - long newInterval = status == Status.SUCCESS - ? intervalProvider.getInterval(agent).getInterval() - : intervalProvider.getInterval(agent).getErrorInterval(); - String newAcquireScore = score(newInterval); - - Object releaseScore = withRetry( - format("Conditionally releasing %s", agent.getAgentType()), - () -> redisClientDelegate.withScriptingClient(client -> { - return client.eval(getScript(CONDITIONAL_SWAP_SET_SCRIPT), - Arrays.asList(WORKING_SET, WAITING_SET), - Arrays.asList(agent.getAgentType(), newAcquireScore, - acquireScore)); - }) - ); - - return releaseScore != null ? new ScoreTuple(newAcquireScore, releaseScore.toString()) : null; - } - - private ScoreTuple conditionalReleaseAgent(Agent agent, String acquireScore, String newAcquireScore) { - Object releaseScore = withRetry( - format("Conditionally releasing %s", agent.getAgentType()), - () -> redisClientDelegate.withScriptingClient(client -> { - return client.eval(getScript(CONDITIONAL_SWAP_SET_SCRIPT), - Arrays.asList(WORKING_SET, WAITING_SET), - Arrays.asList(agent.getAgentType(), newAcquireScore, - acquireScore)).toString(); - }) - ); - return releaseScore != null ? new ScoreTuple(newAcquireScore, releaseScore.toString()) : null; - } - - private ScoreTuple releaseAgent(Agent agent) { - String acquireScore = score(intervalProvider.getInterval(agent).getInterval()); - Object releaseScore = withRetry( - format("Releasing %s", agent.getAgentType()), - () -> redisClientDelegate.withScriptingClient(client -> { - return client.eval(getScript(SWAP_SET_SCRIPT), - Arrays.asList(WORKING_SET, WAITING_SET), - Arrays.asList(agent.getAgentType(), acquireScore)).toString(); - }) - ); - return releaseScore != null ? new ScoreTuple(acquireScore, releaseScore.toString()) : null; - } - - private void saturatePool() { - withRetry( - "Repopulating agents into waiting set", - () -> redisClientDelegate.withScriptingClient(client -> { - // Occasionally repopulate the agents in case redis went down. If they already exist, this is a NOOP - if (runCount % REDIS_REFRESH_PERIOD == 0) { - for (String agent : agents.keySet()) { - client.eval(getScript(ADD_AGENT_SCRIPT), 2, WAITING_SET, WORKING_SET, agent, score(NOW)); - } - } - }) - ); - - List keys = withRetry( - "Getting available agents", - () -> redisClientDelegate.withCommandsClient(client -> { - // First cull threads in the WORKING set that have been there too long (TIMEOUT time). - Set oldKeys = client.zrangeByScore(WORKING_SET, "-inf", score(NOW)); - for (String key : oldKeys) { - // Ignore result, since if this agent was released between now and the above jedis call, our work was done - // for us. - AgentWorker worker = agents.get(key); - if (worker != null) { - releaseAgent(worker.agent); - } - } - - // Now look for agents that have been in the queue for at least INTERVAL time. - return new ArrayList<>(client.zrangeByScore(WAITING_SET, "-inf", score(NOW))); - }) - ); - - Set workers = new HashSet<>(); - - // Loop until we either run out of threads to use, or agents (which are keys) to run. - while (!keys.isEmpty() && runningAgents.map(Semaphore::tryAcquire).orElse(true)) { - String agent = keys.remove(0); - - AgentWorker worker = agents.get(agent); - ScoreTuple score; - if (worker != null && (score = acquireAgent(worker.agent)) != null) { - // This score is used to determine if the worker thread running the agent is allowed to store its results. - // If on release of this agent, the scores don't match, this agent was rescheduled by a separate thread. - worker.setScore(score.acquireScore); - workers.add(worker); - } - } - - for (AgentWorker worker : workers) { - agentWorkPool.submit(worker); - } - } - - private T withRetry(String description, Callable callback) { - return Failsafe - .with(RETRY_POLICY) - .onRetriesExceeded(failure -> { - throw new ExcessiveDynoFailureRetries(description, failure); - }) - .get(callback); - } - - private void withRetry(String description, Runnable callback) { - Failsafe - .with(RETRY_POLICY) - .onRetriesExceeded(failure -> { - throw new ExcessiveDynoFailureRetries(description, failure); - }) - .run(callback::run); - } - - private static class AgentWorker implements Runnable { - private final Agent agent; - private final AgentExecution agentExecution; - private final ExecutionInstrumentation executionInstrumentation; - private final DynoClusteredSortAgentScheduler scheduler; - private String acquireScore; - - AgentWorker(Agent agent, - AgentExecution agentExecution, - ExecutionInstrumentation executionInstrumentation, - DynoClusteredSortAgentScheduler scheduler) { - this.agent = agent; - this.agentExecution = agentExecution; - this.executionInstrumentation = executionInstrumentation; - this.scheduler = scheduler; - } - - public void setScore(String score) { - acquireScore = score; - } - - @Override - public void run() { - assert acquireScore != null; - - if (agentExecution instanceof CachingAgent.CacheExecution) { - runAsCache(); - } else { - runAsSideEffect(); - } - } - - private void runAsCache() { - if (!(agentExecution instanceof CachingAgent.CacheExecution)) { - // If this exception is hit, there's a bug in the main run() method. Definitely shouldn't happen. - throw new IllegalStateException("Agent execution must be a CacheExecution to runAsCache"); - } - CachingAgent.CacheExecution agentExecution = (CachingAgent.CacheExecution) this.agentExecution; - - CacheResult result = null; - Status status = Status.FAILURE; - try { - executionInstrumentation.executionStarted(agent); - long startTime = System.nanoTime(); - result = agentExecution.executeAgentWithoutStore(agent); - executionInstrumentation.executionCompleted(agent, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)); - status = Status.SUCCESS; - } catch (Throwable cause) { - executionInstrumentation.executionFailed(agent, cause); - } finally { - // Regardless of success or failure, we need to try and release this agent. If the release is successful (we - // own this agent), and a result was created, we can store it. - scheduler.runningAgents.ifPresent(Semaphore::release); - if (scheduler.conditionalReleaseAgent(agent, acquireScore, status) != null && result != null) { - agentExecution.storeAgentResult(agent, result); - } - } - } - - private void runAsSideEffect() { - Status status = Status.FAILURE; - try { - executionInstrumentation.executionStarted(agent); - long startTime = System.nanoTime(); - agentExecution.executeAgent(agent); - executionInstrumentation.executionCompleted(agent, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)); - status = Status.SUCCESS; - } catch (Throwable cause) { - executionInstrumentation.executionFailed(agent, cause); - } finally { - scheduler.runningAgents.ifPresent(Semaphore::release); - scheduler.conditionalReleaseAgent(agent, acquireScore, status); - } - } - } - - private static class ScoreTuple { - private final String acquireScore; - private final String releaseScore; - - public ScoreTuple(String acquireScore, String releaseScore) { - this.acquireScore = acquireScore; - this.releaseScore = releaseScore; - } - } - -} diff --git a/cats/cats-dynomite/src/test/groovy/com/netflix/spinnaker/cat/dynomite/cache/DynomiteCacheSpec.groovy b/cats/cats-dynomite/src/test/groovy/com/netflix/spinnaker/cat/dynomite/cache/DynomiteCacheSpec.groovy deleted file mode 100644 index 4ee371bcc29..00000000000 --- a/cats/cats-dynomite/src/test/groovy/com/netflix/spinnaker/cat/dynomite/cache/DynomiteCacheSpec.groovy +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.cat.dynomite.cache - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration.LoadBalancingStrategy -import com.netflix.dyno.connectionpool.Host -import com.netflix.dyno.connectionpool.Host.Status -import com.netflix.dyno.connectionpool.HostSupplier -import com.netflix.dyno.connectionpool.TokenMapSupplier -import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl -import com.netflix.dyno.connectionpool.impl.lb.HostToken -import com.netflix.dyno.jedis.DynoJedisClient -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.cache.WriteableCache -import com.netflix.spinnaker.cats.cache.WriteableCacheSpec -import com.netflix.spinnaker.cats.compression.NoopCompression -import com.netflix.spinnaker.cats.dynomite.cache.DynomiteCache -import com.netflix.spinnaker.cats.dynomite.cache.DynomiteCache.CacheMetrics -import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions -import com.netflix.spinnaker.kork.dynomite.DynomiteClientDelegate -import com.netflix.spinnaker.kork.jedis.EmbeddedRedis -import redis.clients.jedis.Jedis -import spock.lang.AutoCleanup -import spock.lang.IgnoreIf -import spock.lang.Shared -import spock.lang.Unroll - -// TODO rz - Figure out how to get dyno client to connect w/ an embedded redis -@IgnoreIf({ System.getProperty("dyno.address") == null }) -class DynomiteCacheSpec extends WriteableCacheSpec { - - static int MAX_MSET_SIZE = 2 - static int MAX_MERGE_COUNT = 1 - - @Shared - @AutoCleanup("destroy") - EmbeddedRedis embeddedRedis - - DynoJedisClient client - - Jedis directClient - - CacheMetrics cacheMetrics = Mock(CacheMetrics) - - @Override - Cache getSubject() { - initLocalDynoClusterClient() - - def delegate = new DynomiteClientDelegate(client) - - return new DynomiteCache( - 'test', - delegate, - new ObjectMapper(), - RedisCacheOptions.builder().maxMset(MAX_MSET_SIZE).maxMergeBatch(MAX_MERGE_COUNT).build(), - cacheMetrics, - new NoopCompression() - ) - } - - @Unroll - def 'attribute datatype handling #description'() { - setup: - def mergeData = createData('foo', [test: value], [:]) - cache.merge('test', mergeData) - - when: - def cacheData = cache.get('test', 'foo') - - then: - cacheData != null - cacheData.attributes.test == expected - - where: - value | expected | description - null | null | "null" - 1 | 1 | "Integer" - 2.0f | 2.0f | "Float" - "Bacon" | "Bacon" | "String" - true | true | "Boolean" - ['one', 'two'] | ['one', 'two'] | "Primitive list" - [key: 'value', key2: 10] | [key: 'value', key2: 10] | "Map" - new Bean('value', 10) | [key: 'value', key2: 10] | "Java object" - [key: 'value', key2: null] | [key: 'value'] | "Map with null" - new Bean('value', null) | [key: 'value', key2: null] | "Java object with null" - } - - @Unroll - def 'cache data will expire if ttl specified'() { - setup: - def mergeData = new DefaultCacheData('ttlTest', ttl, [test: 'test'], [:]) - cache.merge('test', mergeData); - - when: - def cacheData = cache.get('test', 'ttlTest') - - then: - cacheData.id == mergeData.id - - when: - Thread.sleep(Math.abs(ttl) * 1500) - cacheData = cache.get('test', 'ttlTest') - - then: - cacheData?.id == (ttl > 0 ? null : mergeData.id) - - where: - ttl || _ - -1 || _ - 1 || _ - - } - - def 'verify MSET chunking behavior (> MAX_MSET_SIZE)'() { - setup: - ((WriteableCache) cache).mergeAll('foo', [createData('bar'), createData('baz'), createData('bam')]) - - expect: - cache.getIdentifiers('foo').sort() == ['bam', 'bar', 'baz'] - } - - def 'should fail if maxMsetSize is not even'() { - when: - RedisCacheOptions.builder().maxMset(7).build() - - then: - thrown(IllegalArgumentException) - } - - def 'should not write an item if it is unchanged'() { - setup: - def data = createData('blerp', [a: 'b']) - - when: - ((WriteableCache) cache).merge('foo', data) - - then: - 1 * cacheMetrics.merge('test', 'foo', 1, 0, 0, 1, 2, 2, 1, 0) - - when: - ((WriteableCache) cache).merge('foo', data) - - then: - 1 * cacheMetrics.merge('test', 'foo', 1, 0, 1, 0, 0, 0, 0, 0) - } - - private static class Bean { - String key - Integer key2 - - Bean(String key, Integer key2) { - this.key = key - this.key2 = key2 - } - } - - private void initLocalDynoClusterClient() { - directClient = new Jedis('192.168.99.100', 22122) - directClient.flushAll() - directClient.close() - - // This setup assumes that you're running a single-node Dynomite cluster via the Docker image. - def localHost = new Host(Optional.ofNullable(System.getProperty('dyno.address')).orElse('192.168.99.100'), 8102, 'localrack', Status.Up) - - def localHostSupplier = new HostSupplier() { - @Override - List getHosts() { - return [localHost] - } - } - - def tokenSupplier = new TokenMapSupplier() { - final HostToken localHostToken = new HostToken(437425602L, localHost) - @Override - List getTokens(Set activeHosts) { - return [localHostToken] - } - - @Override - HostToken getTokenForHost(Host host, Set activeHosts) { - return localHostToken - } - } - - client = new DynoJedisClient.Builder() - .withApplicationName('catsTest') - .withDynomiteClusterName('dyn_o_mite') - .withHostSupplier(localHostSupplier) - .withCPConfig( - new ConnectionPoolConfigurationImpl('catsTest') - .withTokenSupplier(tokenSupplier) - .setLocalRack('localrack') - .setLocalDataCenter('localrac') - .setPoolShutdownDelay(2) - ) - .build() - - } - - private void initEmbeddedRedisClient() { - if (!embeddedRedis) { - embeddedRedis = EmbeddedRedis.embed() - } - Jedis jedis = embeddedRedis.jedis - try { - jedis.flushAll() - } finally { - jedis?.close() - } - - def localHost = new Host('localhost', embeddedRedis.port, 'localrack', Status.Up) - - def localHostSupplier = new HostSupplier() { - @Override - List getHosts() { - return [localHost] - } - } - - def tokenSupplier = new TokenMapSupplier() { - final HostToken localHostToken = new HostToken(437425602L, localHost) - @Override - List getTokens(Set activeHosts) { - return [localHostToken] - } - - @Override - HostToken getTokenForHost(Host host, Set activeHosts) { - return localHostToken - } - } - - client = new DynoJedisClient.Builder() - .withApplicationName('catsTest') - .withDynomiteClusterName('dyn_o_mite') - .withHostSupplier(localHostSupplier) - .withCPConfig( - new ConnectionPoolConfigurationImpl('catsTest') - .setLoadBalancingStrategy(LoadBalancingStrategy.RoundRobin) - .withTokenSupplier(tokenSupplier) - .setLocalRack('localrack') - .setLocalDataCenter('localrac') - ) - .build() - } -} - diff --git a/cats/cats-redis/cats-redis.gradle b/cats/cats-redis/cats-redis.gradle index 5f98a620dc3..92e2ab9a0d0 100644 --- a/cats/cats-redis/cats-redis.gradle +++ b/cats/cats-redis/cats-redis.gradle @@ -1,9 +1,19 @@ dependencies { - compile project(':cats:cats-core') - compile spinnaker.dependency('eurekaClient') - compile spinnaker.dependency('guava') - compile "com.netflix.spinnaker.kork:kork-jedis:${spinnaker.version("kork")}" - compile "com.fasterxml.jackson.core:jackson-databind:${spinnaker.version('jackson')}" - testCompile project(':cats:cats-test') - testCompile spinnaker.dependency('korkJedisTest') + implementation project(":cats:cats-core") + + compileOnly "org.projectlombok:lombok" + + implementation "org.apache.groovy:groovy" + implementation "com.fasterxml.jackson.core:jackson-databind" + implementation "io.spinnaker.kork:kork-jedis" + implementation "com.github.ben-manes.caffeine:guava" + + testImplementation project(":cats:cats-test") + testImplementation "io.spinnaker.kork:kork-jedis-test" + + testImplementation "org.apache.commons:commons-lang3" + testImplementation "org.assertj:assertj-core" + testImplementation "org.mockito:mockito-core" + testImplementation "org.spockframework:spock-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" } diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/AbstractRedisCache.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/AbstractRedisCache.java index ee38b1e48d0..0eb5090bb5d 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/AbstractRedisCache.java +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/AbstractRedisCache.java @@ -17,32 +17,28 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.SerializationFeature; import com.google.common.collect.Iterables; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.cats.cache.CacheFilter; import com.netflix.spinnaker.cats.cache.WriteableCache; import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; +import java.util.*; +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import redis.clients.jedis.Pipeline; import redis.clients.jedis.Response; import redis.clients.jedis.ScanParams; import redis.clients.jedis.ScanResult; -import java.util.*; -import java.util.stream.Collectors; - public abstract class AbstractRedisCache implements WriteableCache { - private static final TypeReference> RELATIONSHIPS_LIST = new TypeReference>() { - }; - private static final TypeReference> RELATIONSHIPS_SET = new TypeReference>() { - }; - - protected static final TypeReference> ATTRIBUTES = new TypeReference>() { - }; + private static final TypeReference> RELATIONSHIPS_LIST = + new TypeReference>() {}; + private static final TypeReference> RELATIONSHIPS_SET = + new TypeReference>() {}; + protected static final TypeReference> ATTRIBUTES = + new TypeReference>() {}; private final Logger log = LoggerFactory.getLogger(getClass()); @@ -51,21 +47,24 @@ public abstract class AbstractRedisCache implements WriteableCache { protected final ObjectMapper objectMapper; protected final RedisCacheOptions options; - protected AbstractRedisCache(String prefix, - RedisClientDelegate redisClientDelegate, - ObjectMapper objectMapper, - RedisCacheOptions options) { + protected AbstractRedisCache( + String prefix, + RedisClientDelegate redisClientDelegate, + ObjectMapper objectMapper, + RedisCacheOptions options) { this.prefix = prefix; this.redisClientDelegate = redisClientDelegate; - this.objectMapper = objectMapper.disable(SerializationFeature.WRITE_NULL_MAP_VALUES); + this.objectMapper = objectMapper; this.options = options; } - abstract protected void mergeItems(String type, Collection items); + protected abstract void mergeItems(String type, Collection items); - abstract protected void evictItems(String type, List identifiers, Collection allRelationships); + protected abstract void evictItems( + String type, List identifiers, Collection allRelationships); - abstract protected Collection getItems(String type, List ids, List knownRels); + protected abstract Collection getItems( + String type, List ids, List knownRels); @Override public void merge(String type, CacheData item) { @@ -90,7 +89,8 @@ public void evictAll(String type, Collection identifiers) { return; } final Collection allRelationships = scanMembers(allRelationshipsId(type)); - for (List items : Iterables.partition(new HashSet<>(identifiers), options.getMaxEvictBatchSize())) { + for (List items : + Iterables.partition(new HashSet<>(identifiers), options.getMaxEvictBatchSize())) { evictItems(type, items, allRelationships); } } @@ -112,18 +112,18 @@ public CacheData get(String type, String id, CacheFilter cacheFilter) { @Override public Collection existingIdentifiers(String type, Collection identifiers) { final Map> responses = new LinkedHashMap<>(); - redisClientDelegate.withPipeline(p -> { - for (String id : identifiers) { - responses.put(id, p.exists(attributesId(type, id))); - } - redisClientDelegate.syncPipeline(p); - }); + redisClientDelegate.withPipeline( + p -> { + for (String id : identifiers) { + responses.put(id, p.exists(attributesId(type, id))); + } + redisClientDelegate.syncPipeline(p); + }); - return responses.entrySet() - .stream() - .filter(e -> e.getValue().get()) - .map(Map.Entry::getKey) - .collect(Collectors.toList()); + return responses.entrySet().stream() + .filter(e -> e.getValue().get()) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); } @Override @@ -148,9 +148,8 @@ public Collection getAll(String type, Collection identifiers) } @Override - public Collection getAll(String type, - Collection identifiers, - CacheFilter cacheFilter) { + public Collection getAll( + String type, Collection identifiers, CacheFilter cacheFilter) { if (identifiers.isEmpty()) { return new ArrayList<>(); } @@ -160,7 +159,8 @@ public Collection getAll(String type, if (cacheFilter == null) { knownRels = new ArrayList<>(allRelationships); } else { - knownRels = new ArrayList<>(cacheFilter.filter(CacheFilter.Type.RELATIONSHIP, allRelationships)); + knownRels = + new ArrayList<>(cacheFilter.filter(CacheFilter.Type.RELATIONSHIP, allRelationships)); } Collection result = new ArrayList<>(ids.size()); @@ -187,52 +187,54 @@ private Set scanMembers(String setKey) { } protected Set scanMembers(String setKey, Optional glob) { - return redisClientDelegate.withCommandsClient(client -> { - final Set matches = new HashSet<>(); - final ScanParams scanParams = new ScanParams().count(options.getScanSize()); - glob.ifPresent(scanParams::match); - String cursor = "0"; - while (true) { - final ScanResult scanResult = client.sscan(setKey, cursor, scanParams); - matches.addAll(scanResult.getResult()); - cursor = scanResult.getStringCursor(); - if ("0".equals(cursor)) { - return matches; - } - } - }); + return redisClientDelegate.withCommandsClient( + client -> { + final Set matches = new HashSet<>(); + final ScanParams scanParams = new ScanParams().count(options.getScanSize()); + glob.ifPresent(scanParams::match); + String cursor = "0"; + while (true) { + final ScanResult scanResult = client.sscan(setKey, cursor, scanParams); + matches.addAll(scanResult.getResult()); + cursor = scanResult.getCursor(); + if ("0".equals(cursor)) { + return matches; + } + } + }); } protected boolean isHashingDisabled(String type) { if (!options.isHashingEnabled()) { return true; } - return redisClientDelegate.withCommandsClient(client -> { - return client.exists(hashesDisabled(type)); - }); + return redisClientDelegate.withCommandsClient( + client -> { + return client.exists(hashesDisabled(type)); + }); } protected String attributesId(String type, String id) { - return String.format("%s:%s:attributes:%s", prefix, type, id); + return String.join(":", prefix, type, "attributes", id); } protected String relationshipId(String type, String id, String relationship) { - return String.format("%s:%s:relationships:%s:%s", prefix, type, id, relationship); + return String.join(":", prefix, type, "relationships", id, relationship); } private String hashesDisabled(String type) { - return String.format("%s:%s:hashes.disabled", prefix, type); + return String.join(":", prefix, type, "hashes.disabled"); } protected String allRelationshipsId(String type) { - return String.format("%s:%s:relationships", prefix, type); + return String.join(":", prefix, type, "relationships"); } protected String allOfTypeId(String type) { - return String.format("%s:%s:members", prefix, type); + return String.join(":", prefix, type, "members"); } - protected TypeReference getRelationshipsTypeReference() { + protected TypeReference> getRelationshipsTypeReference() { return options.isTreatRelationshipsAsSet() ? RELATIONSHIPS_SET : RELATIONSHIPS_LIST; } } diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisCache.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisCache.java index 3a23bfce1f1..34d86e6d31b 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisCache.java +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisCache.java @@ -23,7 +23,6 @@ import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; - import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -41,54 +40,59 @@ import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; -import static java.nio.charset.StandardCharsets.UTF_8; - public class RedisCache extends AbstractRedisCache { public interface CacheMetrics { - default void merge(String prefix, - String type, - int itemCount, - int keysWritten, - int relationshipCount, - int hashMatches, - int hashUpdates, - int saddOperations, - int msetOperations, - int hmsetOperations, - int pipelineOperations, - int expireOperations) { - //noop + default void merge( + String prefix, + String type, + int itemCount, + int keysWritten, + int relationshipCount, + int hashMatches, + int hashUpdates, + int saddOperations, + int msetOperations, + int hmsetOperations, + int pipelineOperations, + int expireOperations) { + // noop } - default void evict(String prefix, - String type, - int itemCount, - int keysDeleted, - int hashesDeleted, - int delOperations, - int hdelOperations, - int sremOperations) { - //noop + default void evict( + String prefix, + String type, + int itemCount, + int keysDeleted, + int hashesDeleted, + int delOperations, + int hdelOperations, + int sremOperations) { + // noop } - default void get(String prefix, - String type, - int itemCount, - int requestedSize, - int keysRequested, - int relationshipsRequested, - int mgetOperations) { - //noop + default void get( + String prefix, + String type, + int itemCount, + int requestedSize, + int keysRequested, + int relationshipsRequested, + int mgetOperations) { + // noop } - class NOOP implements CacheMetrics { - } + class NOOP implements CacheMetrics {} } private final CacheMetrics cacheMetrics; - public RedisCache(String prefix, RedisClientDelegate redisClientDelegate, ObjectMapper objectMapper, RedisCacheOptions options, CacheMetrics cacheMetrics) { + public RedisCache( + String prefix, + RedisClientDelegate redisClientDelegate, + ObjectMapper objectMapper, + RedisCacheOptions options, + CacheMetrics cacheMetrics) { super(prefix, redisClientDelegate, objectMapper, options); this.cacheMetrics = cacheMetrics == null ? new CacheMetrics.NOOP() : cacheMetrics; } @@ -130,65 +134,75 @@ protected void mergeItems(String type, Collection items) { AtomicInteger pipelineOperations = new AtomicInteger(); AtomicInteger expireOperations = new AtomicInteger(); if (keysToSet.size() > 0) { - redisClientDelegate.withMultiKeyPipeline(pipeline -> { - for (List idPart : Iterables.partition(idSet, options.getMaxSaddSize())) { - final String[] ids = idPart.toArray(new String[idPart.size()]); - pipeline.sadd(allOfTypeId(type), ids); - saddOperations.incrementAndGet(); - } - - for (List keys : Lists.partition(keysToSet, options.getMaxMsetSize())) { - pipeline.mset(keys.toArray(new String[keys.size()])); - msetOperations.incrementAndGet(); - } - - if (!relationshipNames.isEmpty()) { - for (List relNamesPart : Iterables.partition(relationshipNames, options.getMaxSaddSize())) { - pipeline.sadd(allRelationshipsId(type), relNamesPart.toArray(new String[relNamesPart.size()])); - saddOperations.incrementAndGet(); - } - } - - if (!updatedHashes.isEmpty()) { - for (List hashPart : Iterables.partition(updatedHashes.keySet(), options.getMaxHmsetSize())) { - pipeline.hmset(hashesId(type), updatedHashes.subMap(hashPart.get(0), true, hashPart.get(hashPart.size() - 1), true)); - hmsetOperations.incrementAndGet(); - } - } - pipeline.sync(); - pipelineOperations.incrementAndGet(); - }); - - redisClientDelegate.withMultiKeyPipeline(pipeline -> { - for (List> ttlPart : Iterables.partition(ttlSecondsByKey.entrySet(), options.getMaxPipelineSize())) { - for (Map.Entry ttlEntry : ttlPart) { - pipeline.expire(ttlEntry.getKey(), ttlEntry.getValue()); - } - expireOperations.addAndGet(ttlPart.size()); - pipeline.sync(); - pipelineOperations.incrementAndGet(); - } - }); + redisClientDelegate.withMultiKeyPipeline( + pipeline -> { + for (List idPart : Iterables.partition(idSet, options.getMaxSaddSize())) { + final String[] ids = idPart.toArray(new String[idPart.size()]); + pipeline.sadd(allOfTypeId(type), ids); + saddOperations.incrementAndGet(); + } + + for (List keys : Lists.partition(keysToSet, options.getMaxMsetSize())) { + pipeline.mset(keys.toArray(new String[keys.size()])); + msetOperations.incrementAndGet(); + } + + if (!relationshipNames.isEmpty()) { + for (List relNamesPart : + Iterables.partition(relationshipNames, options.getMaxSaddSize())) { + pipeline.sadd( + allRelationshipsId(type), + relNamesPart.toArray(new String[relNamesPart.size()])); + saddOperations.incrementAndGet(); + } + } + + if (!updatedHashes.isEmpty()) { + for (List hashPart : + Iterables.partition(updatedHashes.keySet(), options.getMaxHmsetSize())) { + pipeline.hmset( + hashesId(type), + updatedHashes.subMap( + hashPart.get(0), true, hashPart.get(hashPart.size() - 1), true)); + hmsetOperations.incrementAndGet(); + } + } + pipeline.sync(); + pipelineOperations.incrementAndGet(); + }); + + redisClientDelegate.withMultiKeyPipeline( + pipeline -> { + for (List> ttlPart : + Iterables.partition(ttlSecondsByKey.entrySet(), options.getMaxPipelineSize())) { + for (Map.Entry ttlEntry : ttlPart) { + pipeline.expire(ttlEntry.getKey(), ttlEntry.getValue()); + } + expireOperations.addAndGet(ttlPart.size()); + pipeline.sync(); + pipelineOperations.incrementAndGet(); + } + }); } cacheMetrics.merge( - prefix, - type, - items.size(), - keysToSet.size() / 2, - relationshipNames.size(), - skippedWrites, - updatedHashes.size(), - saddOperations.get(), - msetOperations.get(), - hmsetOperations.get(), - pipelineOperations.get(), - expireOperations.get() - ); + prefix, + type, + items.size(), + keysToSet.size() / 2, + relationshipNames.size(), + skippedWrites, + updatedHashes.size(), + saddOperations.get(), + msetOperations.get(), + hmsetOperations.get(), + pipelineOperations.get(), + expireOperations.get()); } @Override - protected void evictItems(String type, List identifiers, Collection allRelationships) { + protected void evictItems( + String type, List identifiers, Collection allRelationships) { List delKeys = new ArrayList<>((allRelationships.size() + 1) * identifiers.size()); for (String id : identifiers) { for (String relationship : allRelationships) { @@ -200,33 +214,33 @@ protected void evictItems(String type, List identifiers, Collection { - for (List delPartition : Lists.partition(delKeys, options.getMaxDelSize())) { - pipeline.del(delPartition.toArray(new String[delPartition.size()])); - delOperations.incrementAndGet(); - pipeline.hdel(hashesId(type), delPartition.toArray(new String[delPartition.size()])); - hdelOperations.incrementAndGet(); - } + redisClientDelegate.withMultiKeyPipeline( + pipeline -> { + for (List delPartition : Lists.partition(delKeys, options.getMaxDelSize())) { + pipeline.del(delPartition.toArray(new String[delPartition.size()])); + delOperations.incrementAndGet(); + pipeline.hdel(hashesId(type), delPartition.toArray(new String[delPartition.size()])); + hdelOperations.incrementAndGet(); + } - for (List idPartition : Lists.partition(identifiers, options.getMaxDelSize())) { - String[] ids = idPartition.toArray(new String[idPartition.size()]); - pipeline.srem(allOfTypeId(type), ids); - sremOperations.incrementAndGet(); - } + for (List idPartition : Lists.partition(identifiers, options.getMaxDelSize())) { + String[] ids = idPartition.toArray(new String[idPartition.size()]); + pipeline.srem(allOfTypeId(type), ids); + sremOperations.incrementAndGet(); + } - pipeline.sync(); - }); + pipeline.sync(); + }); cacheMetrics.evict( - prefix, - type, - identifiers.size(), - delKeys.size(), - delKeys.size(), - delOperations.get(), - hdelOperations.get(), - sremOperations.get() - ); + prefix, + type, + identifiers.size(), + delKeys.size(), + delKeys.size(), + delOperations.get(), + hdelOperations.get(), + sremOperations.get()); } @Override @@ -243,14 +257,16 @@ protected Collection getItems(String type, List ids, List keyResult = new ArrayList<>(keysToGet.size()); - int mgetOperations = redisClientDelegate.withMultiClient(c -> { - int ops = 0; - for (List part : Lists.partition(keysToGet, options.getMaxMgetSize())) { - ops++; - keyResult.addAll(c.mget(part.toArray(new String[part.size()]))); - } - return ops; - }); + int mgetOperations = + redisClientDelegate.withMultiClient( + c -> { + int ops = 0; + for (List part : Lists.partition(keysToGet, options.getMaxMgetSize())) { + ops++; + keyResult.addAll(c.mget(part.toArray(new String[part.size()]))); + } + return ops; + }); if (keyResult.size() != keysToGet.size()) { throw new RuntimeException("Expected same size result as request"); @@ -259,13 +275,21 @@ protected Collection getItems(String type, List ids, List results = new ArrayList<>(ids.size()); Iterator idIterator = ids.iterator(); for (int ofs = 0; ofs < keyResult.size(); ofs += singleResultSize) { - CacheData item = extractItem(idIterator.next(), keyResult.subList(ofs, ofs + singleResultSize), knownRels); + CacheData item = + extractItem(idIterator.next(), keyResult.subList(ofs, ofs + singleResultSize), knownRels); if (item != null) { results.add(item); } } - cacheMetrics.get(prefix, type, results.size(), ids.size(), keysToGet.size(), knownRels.size(), mgetOperations); + cacheMetrics.get( + prefix, + type, + results.size(), + ids.size(), + keysToGet.size(), + knownRels.size(), + mgetOperations); return results; } @@ -281,10 +305,8 @@ private CacheData extractItem(String id, List keyResult, List kn String rel = keyResult.get(relIdx); if (rel != null) { String relType = knownRels.get(relIdx - 1); - Collection deserializedRel = objectMapper.readValue( - rel, - getRelationshipsTypeReference() - ); + Collection deserializedRel = + objectMapper.readValue(rel, getRelationshipsTypeReference()); relationships.put(relType, deserializedRel); } } @@ -302,7 +324,11 @@ private static class MergeOp { public final Map hashesToSet; public final int skippedWrites; - MergeOp(Set relNames, List keysToSet, Map hashesToSet, int skippedWrites) { + MergeOp( + Set relNames, + List keysToSet, + Map hashesToSet, + int skippedWrites) { this.relNames = relNames; this.keysToSet = keysToSet; this.hashesToSet = hashesToSet; @@ -326,26 +352,41 @@ private MergeOp buildMergeOp(String type, CacheData cacheData, Map hashesToSet = new HashMap<>(); final List keysToSet = new ArrayList<>((cacheData.getRelationships().size() + 1) * 2); - if (serializedAttributes != null && - hashCheck(hashes, attributesId(type, cacheData.getId()), serializedAttributes, keysToSet, hashesToSet, hasTtl)) { + if (serializedAttributes != null + && hashCheck( + hashes, + attributesId(type, cacheData.getId()), + serializedAttributes, + keysToSet, + hashesToSet, + hasTtl)) { skippedWrites++; } if (!cacheData.getRelationships().isEmpty()) { - for (Map.Entry> relationship : cacheData.getRelationships().entrySet()) { + for (Map.Entry> relationship : + cacheData.getRelationships().entrySet()) { final String relationshipValue; try { - relationshipValue = objectMapper.writeValueAsString(new LinkedHashSet<>(relationship.getValue())); + relationshipValue = + objectMapper.writeValueAsString(new LinkedHashSet<>(relationship.getValue())); } catch (JsonProcessingException serializationException) { throw new RuntimeException("Relationship serialization failed", serializationException); } - if (hashCheck(hashes, relationshipId(type, cacheData.getId(), relationship.getKey()), relationshipValue, keysToSet, hashesToSet, hasTtl)) { + if (hashCheck( + hashes, + relationshipId(type, cacheData.getId(), relationship.getKey()), + relationshipValue, + keysToSet, + hashesToSet, + hasTtl)) { skippedWrites++; } } } - return new MergeOp(cacheData.getRelationships().keySet(), keysToSet, hashesToSet, skippedWrites); + return new MergeOp( + cacheData.getRelationships().keySet(), keysToSet, hashesToSet, skippedWrites); } private List getKeys(String type, Collection cacheDatas) { @@ -365,11 +406,14 @@ private List getKeys(String type, Collection cacheDatas) { private List getHashValues(List hashKeys, String hashesId) { final List hashValues = new ArrayList<>(hashKeys.size()); - redisClientDelegate.withCommandsClient(c -> { - for (List hashPart : Lists.partition(hashKeys, options.getMaxHmgetSize())) { - hashValues.addAll(c.hmget(hashesId, Arrays.copyOf(hashPart.toArray(), hashPart.size(), String[].class))); - } - }); + redisClientDelegate.withCommandsClient( + c -> { + for (List hashPart : Lists.partition(hashKeys, options.getMaxHmgetSize())) { + hashValues.addAll( + c.hmget( + hashesId, Arrays.copyOf(hashPart.toArray(), hashPart.size(), String[].class))); + } + }); return hashValues; } @@ -377,17 +421,26 @@ private List getHashValues(List hashKeys, String hashesId) { * Compares the hash of serializedValue against an existing hash, if they do not match adds * serializedValue to keys and the new hash to updatedHashes. * - * @param hashes the existing hash values - * @param id the id of the item + * @param hashes the existing hash values + * @param id the id of the item * @param serializedValue the serialized value - * @param keys values to persist - if the hash does not match id and serializedValue are appended - * @param updatedHashes hashes to persist - if the hash does not match adds an entry of id -> computed hash - * @param hasTtl if the key has a ttl - generally this means the key should not be hashed due to consistency issues between the hash key, and the key itself + * @param keys values to persist - if the hash does not match id and serializedValue are appended + * @param updatedHashes hashes to persist - if the hash does not match adds an entry of id -> + * computed hash + * @param hasTtl if the key has a ttl - generally this means the key should not be hashed due to + * consistency issues between the hash key, and the key itself * @return true if the hash matched, false otherwise */ - private boolean hashCheck(Map hashes, String id, String serializedValue, List keys, Map updatedHashes, boolean hasTtl) { + private boolean hashCheck( + Map hashes, + String id, + String serializedValue, + List keys, + Map updatedHashes, + boolean hasTtl) { if (options.isHashingEnabled() && !hasTtl) { - final String hash = Hashing.sha1().newHasher().putString(serializedValue, UTF_8).hash().toString(); + final String hash = + Hashing.sha1().newHasher().putUnencodedChars(serializedValue).hash().toString(); final String existingHash = hashes.get(id); if (hash.equals(existingHash)) { return true; diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisCacheOptions.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisCacheOptions.java index 373b7740d18..4ff59f1e528 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisCacheOptions.java +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisCacheOptions.java @@ -19,352 +19,355 @@ import com.google.common.base.Preconditions; public class RedisCacheOptions { - public static Builder builder() { - return new Builder(); - } - private final int maxMsetSize; - private final int maxMgetSize; - private final int maxHmgetSize; - private final int maxHmsetSize; - private final int maxSaddSize; - private final int maxDelSize; - private final int maxPipelineSize; - private final int scanSize; - private final int maxMergeBatchSize; - private final int maxEvictBatchSize; - private final int maxGetBatchSize; - private final boolean hashingEnabled; - private final boolean treatRelationshipsAsSet; - - private static int posInt(String name, int value) { - Preconditions.checkArgument(value > 0, "%s must be a positive integer (%s)", name, value); - return value; - } - - private static int posEven(String name, int value) { - Preconditions.checkArgument(value > 0 && value % 2 == 0, "%s must be a positive even integer (%s)", name, value); - return value; - } - - public RedisCacheOptions(int maxMsetSize, - int maxMgetSize, - int maxHmgetSize, - int maxHmsetSize, - int maxSaddSize, - int maxDelSize, - int maxPipelineSize, - int scanSize, - int maxMergeBatchSize, - int maxEvictBatchSize, - int maxGetBatchSize, - boolean hashingEnabled, - boolean treatRelationshipsAsSet) { - this.maxMsetSize = posEven("maxMsetSize", maxMsetSize); - this.maxMgetSize = posInt("maxMgetSize", maxMgetSize); - this.maxHmgetSize = posInt("maxHmgetSize", maxHmgetSize); - this.maxHmsetSize = posInt("maxHmsetSize", maxHmsetSize); - this.maxSaddSize = posInt("maxSaddSize", maxSaddSize); - this.maxDelSize = posInt("maxDelSize", maxDelSize); - this.maxPipelineSize = posInt("maxPipelineSize", maxPipelineSize); - this.scanSize = posInt("scanSize", scanSize); - this.maxMergeBatchSize = posInt("maxMergeBatchSize", maxMergeBatchSize); - this.maxEvictBatchSize = posInt("maxEvictBatchSize", maxEvictBatchSize); - this.maxGetBatchSize = posInt("maxGetBatchSize", maxGetBatchSize); - this.hashingEnabled = hashingEnabled; - this.treatRelationshipsAsSet = treatRelationshipsAsSet; + public static Builder builder() { + return new Builder(); + } + + private final int maxMsetSize; + private final int maxMgetSize; + private final int maxHmgetSize; + private final int maxHmsetSize; + private final int maxSaddSize; + private final int maxDelSize; + private final int maxPipelineSize; + private final int scanSize; + private final int maxMergeBatchSize; + private final int maxEvictBatchSize; + private final int maxGetBatchSize; + private final boolean hashingEnabled; + private final boolean treatRelationshipsAsSet; + + private static int posInt(String name, int value) { + Preconditions.checkArgument(value > 0, "%s must be a positive integer (%s)", name, value); + return value; + } + + private static int posEven(String name, int value) { + Preconditions.checkArgument( + value > 0 && value % 2 == 0, "%s must be a positive even integer (%s)", name, value); + return value; + } + + public RedisCacheOptions( + int maxMsetSize, + int maxMgetSize, + int maxHmgetSize, + int maxHmsetSize, + int maxSaddSize, + int maxDelSize, + int maxPipelineSize, + int scanSize, + int maxMergeBatchSize, + int maxEvictBatchSize, + int maxGetBatchSize, + boolean hashingEnabled, + boolean treatRelationshipsAsSet) { + this.maxMsetSize = posEven("maxMsetSize", maxMsetSize); + this.maxMgetSize = posInt("maxMgetSize", maxMgetSize); + this.maxHmgetSize = posInt("maxHmgetSize", maxHmgetSize); + this.maxHmsetSize = posInt("maxHmsetSize", maxHmsetSize); + this.maxSaddSize = posInt("maxSaddSize", maxSaddSize); + this.maxDelSize = posInt("maxDelSize", maxDelSize); + this.maxPipelineSize = posInt("maxPipelineSize", maxPipelineSize); + this.scanSize = posInt("scanSize", scanSize); + this.maxMergeBatchSize = posInt("maxMergeBatchSize", maxMergeBatchSize); + this.maxEvictBatchSize = posInt("maxEvictBatchSize", maxEvictBatchSize); + this.maxGetBatchSize = posInt("maxGetBatchSize", maxGetBatchSize); + this.hashingEnabled = hashingEnabled; + this.treatRelationshipsAsSet = treatRelationshipsAsSet; + } + + public int getMaxMsetSize() { + return maxMsetSize; + } + + public int getMaxMgetSize() { + return maxMgetSize; + } + + public int getMaxHmgetSize() { + return maxHmgetSize; + } + + public int getMaxHmsetSize() { + return maxHmsetSize; + } + + public int getMaxSaddSize() { + return maxSaddSize; + } + + public int getMaxDelSize() { + return maxDelSize; + } + + public int getMaxPipelineSize() { + return maxPipelineSize; + } + + public int getScanSize() { + return scanSize; + } + + public int getMaxMergeBatchSize() { + return maxMergeBatchSize; + } + + public int getMaxEvictBatchSize() { + return maxEvictBatchSize; + } + + public int getMaxGetBatchSize() { + return maxGetBatchSize; + } + + public boolean isHashingEnabled() { + return hashingEnabled; + } + + public boolean isTreatRelationshipsAsSet() { + return treatRelationshipsAsSet; + } + + public static class Builder { + public static final int DEFAULT_MULTI_OP_SIZE = 200; + public static final int DEFAULT_BATCH_SIZE = 200; + public static final int DEFAULT_SCAN_SIZE = 200; + public static final int DEFAULT_MAX_PIPELINE_SIZE = 200; + public static final boolean DEFAULT_HASHING_ENABLED = true; + public static final boolean DEFAULT_TREAT_RELATIONSHIPS_AS_SET_DISABLED = false; + + int maxMsetSize; + int maxMgetSize; + int maxHmgetSize; + int maxHmsetSize; + int maxSaddSize; + int maxDelSize; + int maxPipelineSize; + int scanSize; + int maxMergeBatchSize; + int maxEvictBatchSize; + int maxGetBatchSize; + boolean hashingEnabled; + boolean treatRelationshipsAsSet; + + public Builder() { + batchSize(DEFAULT_BATCH_SIZE); + scan(DEFAULT_SCAN_SIZE); + multiOp(DEFAULT_MULTI_OP_SIZE); + maxPipeline(DEFAULT_MAX_PIPELINE_SIZE); + hashing(DEFAULT_HASHING_ENABLED); + treatRelationshipsAsSet(DEFAULT_TREAT_RELATIONSHIPS_AS_SET_DISABLED); + } + + public Builder maxMergeBatch(int maxMergeBatch) { + this.maxMergeBatchSize = maxMergeBatch; + return this; + } + + public Builder maxEvictBatch(int maxEvictBatch) { + this.maxEvictBatchSize = maxEvictBatch; + return this; + } + + public Builder maxGetBatch(int maxGetBatch) { + this.maxGetBatchSize = maxGetBatch; + return this; + } + + public Builder batchSize(int batchSize) { + return maxMergeBatch(batchSize).maxEvictBatch(batchSize).maxGetBatch(batchSize); + } + + public Builder scan(int scanSize) { + this.scanSize = scanSize; + return this; + } + + public Builder multiOp(int multiOpSize) { + return maxMkeyOp(multiOpSize).maxHmOp(multiOpSize).maxSadd(multiOpSize).maxDel(multiOpSize); + } + + public Builder maxMset(int maxMset) { + this.maxMsetSize = maxMset; + return this; + } + + public Builder maxMget(int maxMget) { + this.maxMgetSize = maxMget; + return this; + } + + public Builder maxMkeyOp(int maxMkey) { + return maxMset(maxMkey).maxMget(maxMkey); + } + + public Builder maxHmget(int maxHmget) { + this.maxHmgetSize = maxHmget; + return this; + } + + public Builder maxHmset(int maxHmset) { + this.maxHmsetSize = maxHmset; + return this; + } + + public Builder maxHmOp(int maxHmOp) { + return maxHmget(maxHmOp).maxHmset(maxHmOp); + } + + public Builder maxSadd(int maxSadd) { + this.maxSaddSize = maxSadd; + return this; + } + + public Builder maxDel(int maxDel) { + this.maxDelSize = maxDel; + return this; + } + + public Builder maxPipeline(int maxPipeline) { + this.maxPipelineSize = maxPipeline; + return this; + } + + public Builder hashing(boolean hashingEnabled) { + this.hashingEnabled = hashingEnabled; + return this; + } + + public Builder treatRelationshipsAsSet(boolean treatRelationshipsAsSet) { + this.treatRelationshipsAsSet = treatRelationshipsAsSet; + return this; + } + + public RedisCacheOptions build() { + return new RedisCacheOptions( + maxMsetSize, + maxMgetSize, + maxHmgetSize, + maxHmsetSize, + maxSaddSize, + maxDelSize, + maxPipelineSize, + scanSize, + maxMergeBatchSize, + maxEvictBatchSize, + maxGetBatchSize, + hashingEnabled, + treatRelationshipsAsSet); + } + + public void setBatchSize(int batchSize) { + batchSize(batchSize); + } + + public void setMultiOpSize(int multiOpSize) { + multiOp(multiOpSize); } public int getMaxMsetSize() { - return maxMsetSize; + return maxMsetSize; + } + + public void setMaxMsetSize(int maxMsetSize) { + this.maxMsetSize = maxMsetSize; } public int getMaxMgetSize() { - return maxMgetSize; + return maxMgetSize; + } + + public void setMaxMgetSize(int maxMgetSize) { + this.maxMgetSize = maxMgetSize; } public int getMaxHmgetSize() { - return maxHmgetSize; + return maxHmgetSize; + } + + public void setMaxHmgetSize(int maxHmgetSize) { + this.maxHmgetSize = maxHmgetSize; } public int getMaxHmsetSize() { - return maxHmsetSize; + return maxHmsetSize; + } + + public void setMaxHmsetSize(int maxHmsetSize) { + this.maxHmsetSize = maxHmsetSize; } public int getMaxSaddSize() { - return maxSaddSize; + return maxSaddSize; + } + + public void setMaxSaddSize(int maxSaddSize) { + this.maxSaddSize = maxSaddSize; } public int getMaxDelSize() { - return maxDelSize; + return maxDelSize; + } + + public void setMaxDelSize(int maxDelSize) { + this.maxDelSize = maxDelSize; } public int getMaxPipelineSize() { - return maxPipelineSize; + return maxPipelineSize; + } + + public void setMaxPipelineSize(int maxPipelineSize) { + this.maxPipelineSize = maxPipelineSize; } public int getScanSize() { - return scanSize; + return scanSize; + } + + public void setScanSize(int scanSize) { + this.scanSize = scanSize; } public int getMaxMergeBatchSize() { - return maxMergeBatchSize; + return maxMergeBatchSize; + } + + public void setMaxMergeBatchSize(int maxMergeBatchSize) { + this.maxMergeBatchSize = maxMergeBatchSize; } public int getMaxEvictBatchSize() { - return maxEvictBatchSize; + return maxEvictBatchSize; } - public int getMaxGetBatchSize() { - return maxGetBatchSize; + public void setMaxEvictBatchSize(int maxEvictBatchSize) { + this.maxEvictBatchSize = maxEvictBatchSize; } - public boolean isHashingEnabled() { - return hashingEnabled; + public int getMaxGetBatchSize() { + return maxGetBatchSize; } - public boolean isTreatRelationshipsAsSet() { - return treatRelationshipsAsSet; + public void setMaxGetBatchSize(int maxGetBatchSize) { + this.maxGetBatchSize = maxGetBatchSize; } - public static class Builder { - public static final int DEFAULT_MULTI_OP_SIZE = 200; - public static final int DEFAULT_BATCH_SIZE = 200; - public static final int DEFAULT_SCAN_SIZE = 200; - public static final int DEFAULT_MAX_PIPELINE_SIZE = 200; - public static final boolean DEFAULT_HASHING_ENABLED = true; - public static final boolean DEFAULT_TREAT_RELATIONSHIPS_AS_SET_DISABLED = false; - - int maxMsetSize; - int maxMgetSize; - int maxHmgetSize; - int maxHmsetSize; - int maxSaddSize; - int maxDelSize; - int maxPipelineSize; - int scanSize; - int maxMergeBatchSize; - int maxEvictBatchSize; - int maxGetBatchSize; - boolean hashingEnabled; - boolean treatRelationshipsAsSet; - - public Builder() { - batchSize(DEFAULT_BATCH_SIZE); - scan(DEFAULT_SCAN_SIZE); - multiOp(DEFAULT_MULTI_OP_SIZE); - maxPipeline(DEFAULT_MAX_PIPELINE_SIZE); - hashing(DEFAULT_HASHING_ENABLED); - treatRelationshipsAsSet(DEFAULT_TREAT_RELATIONSHIPS_AS_SET_DISABLED); - } - - public Builder maxMergeBatch(int maxMergeBatch) { - this.maxMergeBatchSize = maxMergeBatch; - return this; - } - - public Builder maxEvictBatch(int maxEvictBatch) { - this.maxEvictBatchSize = maxEvictBatch; - return this; - } - - public Builder maxGetBatch(int maxGetBatch) { - this.maxGetBatchSize = maxGetBatch; - return this; - } - - public Builder batchSize(int batchSize) { - return maxMergeBatch(batchSize).maxEvictBatch(batchSize).maxGetBatch(batchSize); - } - - public Builder scan(int scanSize) { - this.scanSize = scanSize; - return this; - } - - public Builder multiOp(int multiOpSize) { - return maxMkeyOp(multiOpSize).maxHmOp(multiOpSize).maxSadd(multiOpSize).maxDel(multiOpSize); - } - - public Builder maxMset(int maxMset) { - this.maxMsetSize = maxMset; - return this; - } - - public Builder maxMget(int maxMget) { - this.maxMgetSize = maxMget; - return this; - } - - public Builder maxMkeyOp(int maxMkey) { - return maxMset(maxMkey).maxMget(maxMkey); - } - - public Builder maxHmget(int maxHmget) { - this.maxHmgetSize = maxHmget; - return this; - } - - public Builder maxHmset(int maxHmset) { - this.maxHmsetSize = maxHmset; - return this; - } - - public Builder maxHmOp(int maxHmOp) { - return maxHmget(maxHmOp).maxHmset(maxHmOp); - } - - public Builder maxSadd(int maxSadd) { - this.maxSaddSize = maxSadd; - return this; - } - - public Builder maxDel(int maxDel) { - this.maxDelSize = maxDel; - return this; - } - - public Builder maxPipeline(int maxPipeline) { - this.maxPipelineSize = maxPipeline; - return this; - } - - public Builder hashing(boolean hashingEnabled) { - this.hashingEnabled = hashingEnabled; - return this; - } - - public Builder treatRelationshipsAsSet(boolean treatRelationshipsAsSet) { - this.treatRelationshipsAsSet = treatRelationshipsAsSet; - return this; - } - - public RedisCacheOptions build() { - return new RedisCacheOptions( - maxMsetSize, - maxMgetSize, - maxHmgetSize, - maxHmsetSize, - maxSaddSize, - maxDelSize, - maxPipelineSize, - scanSize, - maxMergeBatchSize, - maxEvictBatchSize, - maxGetBatchSize, - hashingEnabled, - treatRelationshipsAsSet); - } - - public void setBatchSize(int batchSize) { - batchSize(batchSize); - } - - public void setMultiOpSize(int multiOpSize) { - multiOp(multiOpSize); - } - - public int getMaxMsetSize() { - return maxMsetSize; - } - - public void setMaxMsetSize(int maxMsetSize) { - this.maxMsetSize = maxMsetSize; - } - - public int getMaxMgetSize() { - return maxMgetSize; - } - - public void setMaxMgetSize(int maxMgetSize) { - this.maxMgetSize = maxMgetSize; - } - - public int getMaxHmgetSize() { - return maxHmgetSize; - } - - public void setMaxHmgetSize(int maxHmgetSize) { - this.maxHmgetSize = maxHmgetSize; - } - - public int getMaxHmsetSize() { - return maxHmsetSize; - } - - public void setMaxHmsetSize(int maxHmsetSize) { - this.maxHmsetSize = maxHmsetSize; - } - - public int getMaxSaddSize() { - return maxSaddSize; - } - - public void setMaxSaddSize(int maxSaddSize) { - this.maxSaddSize = maxSaddSize; - } - - public int getMaxDelSize() { - return maxDelSize; - } - - public void setMaxDelSize(int maxDelSize) { - this.maxDelSize = maxDelSize; - } - - public int getMaxPipelineSize() { - return maxPipelineSize; - } - - public void setMaxPipelineSize(int maxPipelineSize) { - this.maxPipelineSize = maxPipelineSize; - } - - public int getScanSize() { - return scanSize; - } - - public void setScanSize(int scanSize) { - this.scanSize = scanSize; - } - - public int getMaxMergeBatchSize() { - return maxMergeBatchSize; - } - - public void setMaxMergeBatchSize(int maxMergeBatchSize) { - this.maxMergeBatchSize = maxMergeBatchSize; - } - - public int getMaxEvictBatchSize() { - return maxEvictBatchSize; - } - - public void setMaxEvictBatchSize(int maxEvictBatchSize) { - this.maxEvictBatchSize = maxEvictBatchSize; - } - - public int getMaxGetBatchSize() { - return maxGetBatchSize; - } - - public void setMaxGetBatchSize(int maxGetBatchSize) { - this.maxGetBatchSize = maxGetBatchSize; - } - - public boolean isHashingEnabled() { - return hashingEnabled; - } + public boolean isHashingEnabled() { + return hashingEnabled; + } - public void setHashingEnabled(boolean hashingEnabled) { - this.hashingEnabled = hashingEnabled; - } + public void setHashingEnabled(boolean hashingEnabled) { + this.hashingEnabled = hashingEnabled; + } - public boolean isTreatRelationshipsAsSet() { - return treatRelationshipsAsSet; - } + public boolean isTreatRelationshipsAsSet() { + return treatRelationshipsAsSet; + } - public void setTreatRelationshipsAsSet(boolean treatRelationshipsAsSet) { - this.treatRelationshipsAsSet = treatRelationshipsAsSet; - } + public void setTreatRelationshipsAsSet(boolean treatRelationshipsAsSet) { + this.treatRelationshipsAsSet = treatRelationshipsAsSet; + } } } diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisNamedCacheFactory.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisNamedCacheFactory.java index 1306bbf9383..5fbd6083291 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisNamedCacheFactory.java +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cache/RedisNamedCacheFactory.java @@ -24,20 +24,24 @@ public class RedisNamedCacheFactory implements NamedCacheFactory { - private final RedisClientDelegate redisClientDelegate; - private final ObjectMapper objectMapper; - private final RedisCacheOptions options; - private final CacheMetrics cacheMetrics; + private final RedisClientDelegate redisClientDelegate; + private final ObjectMapper objectMapper; + private final RedisCacheOptions options; + private final CacheMetrics cacheMetrics; - public RedisNamedCacheFactory(RedisClientDelegate redisClientDelegate, ObjectMapper objectMapper, RedisCacheOptions options, CacheMetrics cacheMetrics) { - this.redisClientDelegate = redisClientDelegate; - this.objectMapper = objectMapper; - this.options = options; - this.cacheMetrics = cacheMetrics; - } + public RedisNamedCacheFactory( + RedisClientDelegate redisClientDelegate, + ObjectMapper objectMapper, + RedisCacheOptions options, + CacheMetrics cacheMetrics) { + this.redisClientDelegate = redisClientDelegate; + this.objectMapper = objectMapper; + this.options = options; + this.cacheMetrics = cacheMetrics; + } - @Override - public WriteableCache getCache(String name) { - return new RedisCache(name, redisClientDelegate, objectMapper, options, cacheMetrics); - } + @Override + public WriteableCache getCache(String name) { + return new RedisCache(name, redisClientDelegate, objectMapper, options, cacheMetrics); + } } diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/AgentIntervalProvider.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/AgentIntervalProvider.java deleted file mode 100644 index f6d8f44ecf2..00000000000 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/AgentIntervalProvider.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.redis.cluster; - -import com.netflix.spinnaker.cats.agent.Agent; - -/** - * Provides a poll interval and timeout for an Agent. - */ -public interface AgentIntervalProvider { - public static class Interval { - final long interval; - final long errorInterval; - final long timeout; - - public Interval(long interval, long timeout) { - this(interval, interval, timeout); - } - - public Interval(long interval, long errorInterval, long timeout) { - this.interval = interval; - this.errorInterval = errorInterval; - this.timeout = timeout; - } - - /** - * @return how frequently the Agent should run in milliseconds - */ - public long getInterval() { - return interval; - } - - /** - * @return how frequently after an error the Agent should run in milliseconds - */ - public long getErrorInterval() { - return errorInterval; - } - - /** - * @return the maximum amount of time in milliseconds for an Agent to complete its run before the run is rescheduled - */ - public long getTimeout() { - return timeout; - } - } - - Interval getInterval(Agent agent); -} diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/CachingPodsObserver.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/CachingPodsObserver.java new file mode 100644 index 00000000000..63e93ca837f --- /dev/null +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/CachingPodsObserver.java @@ -0,0 +1,132 @@ +/* + * Copyright 2021 OpsMx. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.redis.cluster; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.cluster.NodeIdentity; +import com.netflix.spinnaker.cats.cluster.ShardingFilter; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class CachingPodsObserver implements ShardingFilter, Runnable { + + private static final Logger logger = LoggerFactory.getLogger(CachingPodsObserver.class); + private static final String REPLICA_SSET_KEY = "clouddriver:caching:replicas"; + private static final String CORE_PROVIDER = + "com.netflix.spinnaker.clouddriver.core.provider.CoreProvider"; + private final RedisClientDelegate redisClientDelegate; + private final NodeIdentity nodeIdentity; + private final long replicaKeyTtl; + private int podCount = 0; + private int podIndex = -1; + // this script adds or updates a unique id as a member of a sorted set with score equal to current + // time plus sharding.replica-key-ttl-seconds, deletes the members having scores less than current + // time(ms) and finally fetches list of all members of the sorted set which represent the live + // caching pods + private static final String HEARTBEAT_REFRESH_SCRIPT = + "redis.call('zadd', KEYS[1], ARGV[1], ARGV[2])" + + " redis.call('zremrangebyscore', KEYS[1], '-inf', ARGV[3])" + + " return redis.call('zrange', KEYS[1], '0', '-1')"; + + public CachingPodsObserver( + RedisClientDelegate redisClientDelegate, + NodeIdentity nodeIdentity, + DynamicConfigService dynamicConfigService) { + this.redisClientDelegate = redisClientDelegate; + this.nodeIdentity = nodeIdentity; + long observerIntervalSeconds = + dynamicConfigService.getConfig( + Integer.class, "cache-sharding.heartbeat-interval-seconds", 30); + replicaKeyTtl = + dynamicConfigService.getConfig(Integer.class, "cache-sharding.replica-ttl-seconds", 60); + ScheduledExecutorService podsObserverExecutorService = + Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder() + .setNameFormat(CachingPodsObserver.class.getSimpleName() + "-%d") + .build()); + podsObserverExecutorService.scheduleAtFixedRate( + this, 0, observerIntervalSeconds, TimeUnit.SECONDS); + refreshHeartbeat(); + logger.info("Account based sharding is enabled for all caching pods."); + } + + @Override + public void run() { + try { + refreshHeartbeat(); + } catch (Throwable t) { + logger.error("Failed to manage replicas heartbeat", t); + } + } + + private void refreshHeartbeat() { + String now = String.valueOf(System.currentTimeMillis()); + String expiry = + String.valueOf(System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(replicaKeyTtl)); + Object evalResponse = + redisClientDelegate.withScriptingClient( + client -> { + return client.eval( + HEARTBEAT_REFRESH_SCRIPT, + Collections.singletonList(REPLICA_SSET_KEY), + Arrays.asList(expiry, nodeIdentity.getNodeIdentity(), now)); + }); + if (evalResponse instanceof List) { + List replicaList = (List) evalResponse; + podCount = replicaList.size(); + podIndex = + replicaList.stream() + .sorted() + .collect(Collectors.toList()) + .indexOf(nodeIdentity.getNodeIdentity()); + logger.debug("caching pods = {} and this pod's index = {}", podCount, podIndex); + } else { + logger.error("Something is wrong, please check if the eval script and params are valid"); + } + + if (podCount == 0 || podIndex == -1) { + logger.error( + "No caching pod heartbeat records detected. Sharding logic can't be applied!!!!"); + } + } + + @Override + public boolean filter(Agent agent) { + if (agent.getProviderName().equals(CORE_PROVIDER)) { + return true; + } + return podCount == 1 + || Math.abs(getAccountName(agent.getAgentType()).hashCode() % podCount) == podIndex; + } + + private String getAccountName(String agentType) { + if (agentType.contains("/")) { + return agentType.substring(0, agentType.indexOf('/')); + } + return agentType; + } +} diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredAgentScheduler.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredAgentScheduler.java index 58542a205f8..4fc8c04fa5e 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredAgentScheduler.java +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredAgentScheduler.java @@ -16,19 +16,22 @@ package com.netflix.spinnaker.cats.redis.cluster; +import static com.netflix.spinnaker.cats.agent.ExecutionInstrumentation.elapsedTimeMs; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.netflix.spinnaker.cats.agent.Agent; import com.netflix.spinnaker.cats.agent.AgentExecution; import com.netflix.spinnaker.cats.agent.AgentLock; import com.netflix.spinnaker.cats.agent.AgentScheduler; import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation; +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider; +import com.netflix.spinnaker.cats.cluster.NodeIdentity; +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider; +import com.netflix.spinnaker.cats.cluster.ShardingFilter; import com.netflix.spinnaker.cats.module.CatsModuleAware; -import com.netflix.spinnaker.cats.thread.NamedThreadFactory; import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; @@ -37,9 +40,14 @@ import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import java.util.stream.Collectors; +import lombok.Getter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import redis.clients.jedis.params.SetParams; -public class ClusteredAgentScheduler extends CatsModuleAware implements AgentScheduler, Runnable { - private static enum Status { +public class ClusteredAgentScheduler extends CatsModuleAware + implements AgentScheduler, Runnable { + private enum Status { SUCCESS, FAILURE } @@ -52,40 +60,73 @@ private static enum Status { private final ExecutorService agentExecutionPool; private final Pattern enabledAgentPattern; + /** + * this contains all the known agents (from all cloud providers) that are candidates for execution + */ + @Getter // visible for tests private final Map agents = new ConcurrentHashMap<>(); + + /** This contains all the agents that are currently scheduled for execution */ + @Getter // visible for tests private final Map activeAgents = new ConcurrentHashMap<>(); + private final NodeStatusProvider nodeStatusProvider; private final DynamicConfigService dynamicConfigService; + private final ShardingFilter shardingFilter; - public ClusteredAgentScheduler(RedisClientDelegate redisClientDelegate, - NodeIdentity nodeIdentity, - AgentIntervalProvider intervalProvider, - NodeStatusProvider nodeStatusProvider, - String enabledAgentPattern, - Integer agentLockAcquisitionIntervalSeconds, - DynamicConfigService dynamicConfigService) { + private static final long MIN_TTL_THRESHOLD = 500L; + private static final String SET_IF_NOT_EXIST = "NX"; + private static final String SET_EXPIRE_TIME_MILLIS = "PX"; + private static final String SUCCESS_RESPONSE = "OK"; + private static final Long DEL_SUCCESS = 1L; + + @Getter // visible for tests + private static final String DELETE_LOCK_KEY = + "if redis.call('get', KEYS[1]) == ARGV[1] then return redis.call('del', KEYS[1]) else return 0 end"; + + @Getter // visible for tests + private static final String TTL_LOCK_KEY = + "if redis.call('get', KEYS[1]) == ARGV[1] then return redis.call('set', KEYS[1], ARGV[1], 'PX', ARGV[2], 'XX') else return nil end"; + + public ClusteredAgentScheduler( + RedisClientDelegate redisClientDelegate, + NodeIdentity nodeIdentity, + AgentIntervalProvider intervalProvider, + NodeStatusProvider nodeStatusProvider, + String enabledAgentPattern, + Integer agentLockAcquisitionIntervalSeconds, + DynamicConfigService dynamicConfigService, + ShardingFilter shardingFilter) { this( - redisClientDelegate, - nodeIdentity, - intervalProvider, - nodeStatusProvider, - Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(ClusteredAgentScheduler.class.getSimpleName())), - Executors.newCachedThreadPool(new NamedThreadFactory(AgentExecutionAction.class.getSimpleName())), - enabledAgentPattern, - agentLockAcquisitionIntervalSeconds, - dynamicConfigService - ); + redisClientDelegate, + nodeIdentity, + intervalProvider, + nodeStatusProvider, + Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder() + .setNameFormat(ClusteredAgentScheduler.class.getSimpleName() + "-%d") + .build()), + Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setNameFormat(AgentExecutionAction.class.getSimpleName() + "-%d") + .build()), + enabledAgentPattern, + agentLockAcquisitionIntervalSeconds, + dynamicConfigService, + shardingFilter); } - public ClusteredAgentScheduler(RedisClientDelegate redisClientDelegate, - NodeIdentity nodeIdentity, - AgentIntervalProvider intervalProvider, - NodeStatusProvider nodeStatusProvider, - ScheduledExecutorService lockPollingScheduler, - ExecutorService agentExecutionPool, - String enabledAgentPattern, - Integer agentLockAcquisitionIntervalSeconds, - DynamicConfigService dynamicConfigService) { + public ClusteredAgentScheduler( + RedisClientDelegate redisClientDelegate, + NodeIdentity nodeIdentity, + AgentIntervalProvider intervalProvider, + NodeStatusProvider nodeStatusProvider, + ScheduledExecutorService lockPollingScheduler, + ExecutorService agentExecutionPool, + String enabledAgentPattern, + Integer agentLockAcquisitionIntervalSeconds, + DynamicConfigService dynamicConfigService, + ShardingFilter shardingFilter) { this.redisClientDelegate = redisClientDelegate; this.nodeIdentity = nodeIdentity; this.intervalProvider = intervalProvider; @@ -93,33 +134,44 @@ public ClusteredAgentScheduler(RedisClientDelegate redisClientDelegate, this.agentExecutionPool = agentExecutionPool; this.enabledAgentPattern = Pattern.compile(enabledAgentPattern); this.dynamicConfigService = dynamicConfigService; - Integer lockInterval = agentLockAcquisitionIntervalSeconds == null ? 1 : agentLockAcquisitionIntervalSeconds; + this.shardingFilter = shardingFilter; + Integer lockInterval = + agentLockAcquisitionIntervalSeconds == null ? 1 : agentLockAcquisitionIntervalSeconds; lockPollingScheduler.scheduleAtFixedRate(this, 0, lockInterval, TimeUnit.SECONDS); } private Map acquire() { Set skip = new HashSet<>(activeAgents.keySet()); - Integer maxConcurrentAgents = dynamicConfigService.getConfig(Integer.class, "redis.agent.maxConcurrentAgents", 1000); + Integer maxConcurrentAgents = + dynamicConfigService.getConfig(Integer.class, "redis.agent.max-concurrent-agents", 1000); Integer availableAgents = maxConcurrentAgents - skip.size(); if (availableAgents <= 0) { - logger.debug("Not acquiring more locks (maxConcurrentAgents: {} activeAgents: {}, runningAgents: {})", - maxConcurrentAgents, - skip.size(), - skip.stream().sorted().collect(Collectors.joining(",")) - ); + logger.debug( + "Not acquiring more locks (maxConcurrentAgents: {} activeAgents: {}, runningAgents: {})", + maxConcurrentAgents, + skip.size(), + skip.stream().sorted().collect(Collectors.joining(","))); return Collections.emptyMap(); } Map acquired = new HashMap<>(agents.size()); // Shuffle the list before grabbing so that we don't favor some agents accidentally - List> agentsEntrySet = new ArrayList<>(agents.entrySet()); + List> agentsEntrySet = + new ArrayList<>(agents.entrySet()); Collections.shuffle(agentsEntrySet); for (Map.Entry agent : agentsEntrySet) { - if (!skip.contains(agent.getKey())) { + if (shardingFilter.filter(agent.getValue().getAgent()) && !skip.contains(agent.getKey())) { final String agentType = agent.getKey(); - AgentIntervalProvider.Interval interval = intervalProvider.getInterval(agent.getValue().getAgent()); + AgentIntervalProvider.Interval interval = + intervalProvider.getInterval(agent.getValue().getAgent()); if (acquireRunKey(agentType, interval.getTimeout())) { - acquired.put(agentType, new NextAttempt(System.currentTimeMillis(), interval.getInterval(), interval.getErrorInterval())); + acquired.put( + agentType, + new NextAttempt( + System.currentTimeMillis(), + interval.getInterval(), + interval.getErrorInterval(), + interval.getTimeout())); } } if (acquired.size() >= availableAgents) { @@ -135,49 +187,103 @@ public void run() { return; } try { + pruneActiveAgents(); runAgents(); } catch (Throwable t) { logger.error("Unable to run agents", t); } } + /** + * this method removes agents from the {@link #activeAgents} map based on the following criteria: + * + *

- each agent has a max timeout interval associated with it. If it is present in the {@link + * #activeAgents} map for longer than this timeout value, then it is removed from this map. + * + *

NOTE: This same timeout interval is used when {@link #acquireRunKey(String, long)} is + * invoked from {@link #acquire()}. + * + *

The motivation for actively cleaning such entries from the map is to ensure that no agent is + * in such a bad state that it can't be rescheduled again. In a normal workflow, the agent is + * removed from the map when {@link #agentCompleted(String, long)} is called from {@link #run()} + * method after its execution. But, if for some reason, that thread is killed, and the {@link + * #agentCompleted(String, long)} is not called, then this agent stays in the {@link + * #activeAgents} map, which means it won't be rescheduled again. So by actively doing something + * like this, we enable it to be rescheduled. + */ + private void pruneActiveAgents() { + final long currentTime = System.currentTimeMillis(); + int count = 0; + for (final Map.Entry activeAgent : activeAgents.entrySet()) { + // max time upto which an agent can remain active + long removalTime = activeAgent.getValue().currentTime + activeAgent.getValue().timeout; + + // atleast allow an agent to be active for MIN_TTL_THRESHOLD ms. + // this is the same threshold used in the releaseRunKey() as well + if (removalTime + MIN_TTL_THRESHOLD < currentTime) { + logger.info( + "removing agent: {} from the active agents map as its max execution time" + + " has elapsed", + activeAgent.getKey()); + activeAgents.remove(activeAgent.getKey()); + count++; + } + } + + if (count > 0) { + logger.info( + "removed {} accounts from the active agents map as their max execution times have elapsed", + count); + } + } + private void runAgents() { Map thisRun = acquire(); activeAgents.putAll(thisRun); + logger.debug( + "scheduling {} new agents, total number of active agents: {}", + thisRun.size(), + activeAgents.size()); for (final Map.Entry toRun : thisRun.entrySet()) { final AgentExecutionAction exec = agents.get(toRun.getKey()); agentExecutionPool.submit(new AgentJob(toRun.getValue(), exec, this)); } } - private static final long MIN_TTL_THRESHOLD = 500L; - private static final String SET_IF_NOT_EXIST = "NX"; - private static final String SET_EXPIRE_TIME_MILLIS = "PX"; - private static final String SUCCESS_RESPONSE = "OK"; - private static final Long DEL_SUCCESS = 1L; - - private static final String DELETE_LOCK_KEY = "if redis.call('get', KEYS[1]) == ARGV[1] then return redis.call('del', KEYS[1]) else return 0 end"; - private static final String TTL_LOCK_KEY = "if redis.call('get', KEYS[1]) == ARGV[1] then return redis.call('set', KEYS[1], ARGV[1], 'PX', ARGV[2], 'XX') else return nil end"; - private boolean acquireRunKey(String agentType, long timeout) { - return redisClientDelegate.withCommandsClient(client -> { - String response = client.set(agentType, nodeIdentity.getNodeIdentity(), SET_IF_NOT_EXIST, SET_EXPIRE_TIME_MILLIS, timeout); - return SUCCESS_RESPONSE.equals(response); - }); + return redisClientDelegate.withCommandsClient( + client -> { + String response = + client.set( + agentType, + nodeIdentity.getNodeIdentity(), + SetParams.setParams().nx().px(timeout)); + return SUCCESS_RESPONSE.equals(response); + }); } private boolean deleteLock(String agentType) { - return redisClientDelegate.withScriptingClient(client -> { - Object response = client.eval(DELETE_LOCK_KEY, Arrays.asList(agentType), Arrays.asList(nodeIdentity.getNodeIdentity())); - return DEL_SUCCESS.equals(response); - }); + return redisClientDelegate.withScriptingClient( + client -> { + Object response = + client.eval( + DELETE_LOCK_KEY, + Arrays.asList(agentType), + Arrays.asList(nodeIdentity.getNodeIdentity())); + return DEL_SUCCESS.equals(response); + }); } private boolean ttlLock(String agentType, long newTtl) { - return redisClientDelegate.withScriptingClient(client -> { - Object response = client.eval(TTL_LOCK_KEY, Arrays.asList(agentType), Arrays.asList(nodeIdentity.getNodeIdentity(), Long.toString(newTtl))); - return SUCCESS_RESPONSE.equals(response); - }); + return redisClientDelegate.withScriptingClient( + client -> { + Object response = + client.eval( + TTL_LOCK_KEY, + Arrays.asList(agentType), + Arrays.asList(nodeIdentity.getNodeIdentity(), Long.toString(newTtl))); + return SUCCESS_RESPONSE.equals(response); + }); } private void releaseRunKey(String agentType, long when) { @@ -206,44 +312,69 @@ private void agentCompleted(String agentType, long nextExecutionTime) { } @Override - public void schedule(Agent agent, - AgentExecution agentExecution, - ExecutionInstrumentation executionInstrumentation) { + public void schedule( + Agent agent, + AgentExecution agentExecution, + ExecutionInstrumentation executionInstrumentation) { if (!enabledAgentPattern.matcher(agent.getAgentType().toLowerCase()).matches()) { logger.debug( - "Agent is not enabled (agent: {}, agentType: {}, pattern: {})", - agent.getClass().getSimpleName(), - agent.getAgentType(), - enabledAgentPattern.pattern() - ); + "Agent is not enabled (agent: {}, agentType: {}, pattern: {})", + agent.getClass().getSimpleName(), + agent.getAgentType(), + enabledAgentPattern.pattern()); return; } if (agent instanceof AgentSchedulerAware) { - ((AgentSchedulerAware)agent).setAgentScheduler(this); + ((AgentSchedulerAware) agent).setAgentScheduler(this); } - AgentExecutionAction agentExecutionAction = new AgentExecutionAction( - agent, agentExecution, executionInstrumentation - ); + AgentExecutionAction agentExecutionAction = + new AgentExecutionAction(agent, agentExecution, executionInstrumentation); agents.put(agent.getAgentType(), agentExecutionAction); } + /** + * + * + *

+   * Removes an agent from redis, {@link #agents} and {@link #activeAgents} maps.
+   *
+   * NOTE: we are explicitly removing the agent from the {@link #activeAgents} map. Normally, the agent is
+   * removed from it when {@link #agentCompleted(String, long)} is called after it executes via
+   * {@link AgentJob#run()}. But if for some reason that thread is killed before
+   * {@link #agentCompleted(String, long)} is executed, then this agent is not removed from the
+   * {@link #activeAgents} map, and that means it won't be executed again if this agent is scheduled
+   * again in future.
+   *
+   * PS: if accounts are not updated/deleted dynamically, this method will not be invoked, so the
+   *     agent can still remain in the {@link #activeAgents} map
+   * 
+ * + * @param agent agent under consideration + */ @Override public void unschedule(Agent agent) { - releaseRunKey(agent.getAgentType(), 0); // Delete lock key now. - agents.remove(agent.getAgentType()); + try { + releaseRunKey(agent.getAgentType(), 0); // Delete lock key now. + } finally { + agents.remove(agent.getAgentType()); + // explicitly remove it from the active agents map + activeAgents.remove(agent.getAgentType()); + } } private static class NextAttempt { private final long currentTime; private final long successInterval; private final long errorInterval; + private final long timeout; - public NextAttempt(long currentTime, long successInterval, long errorInterval) { + public NextAttempt(long currentTime, long successInterval, long errorInterval, long timeout) { this.currentTime = currentTime; this.successInterval = successInterval; this.errorInterval = errorInterval; + this.timeout = timeout; } public long getNextTime(Status status) { @@ -260,7 +391,8 @@ private static class AgentJob implements Runnable { private final AgentExecutionAction action; private final ClusteredAgentScheduler scheduler; - public AgentJob(NextAttempt times, AgentExecutionAction action, ClusteredAgentScheduler scheduler) { + public AgentJob( + NextAttempt times, AgentExecutionAction action, ClusteredAgentScheduler scheduler) { this.lockReleaseTime = times; this.action = action; this.scheduler = scheduler; @@ -272,7 +404,8 @@ public void run() { try { status = action.execute(); } finally { - scheduler.agentCompleted(action.getAgent().getAgentType(), lockReleaseTime.getNextTime(status)); + scheduler.agentCompleted( + action.getAgent().getAgentType(), lockReleaseTime.getNextTime(status)); } } } @@ -282,7 +415,10 @@ private static class AgentExecutionAction { private final AgentExecution agentExecution; private final ExecutionInstrumentation executionInstrumentation; - public AgentExecutionAction(Agent agent, AgentExecution agentExecution, ExecutionInstrumentation executionInstrumentation) { + public AgentExecutionAction( + Agent agent, + AgentExecution agentExecution, + ExecutionInstrumentation executionInstrumentation) { this.agent = agent; this.agentExecution = agentExecution; this.executionInstrumentation = executionInstrumentation; @@ -293,17 +429,16 @@ public Agent getAgent() { } Status execute() { + long startTimeMs = System.currentTimeMillis(); try { executionInstrumentation.executionStarted(agent); - long startTime = System.nanoTime(); agentExecution.executeAgent(agent); - executionInstrumentation.executionCompleted(agent, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)); + executionInstrumentation.executionCompleted(agent, elapsedTimeMs(startTimeMs)); return Status.SUCCESS; } catch (Throwable cause) { - executionInstrumentation.executionFailed(agent, cause); + executionInstrumentation.executionFailed(agent, cause, elapsedTimeMs(startTimeMs)); return Status.FAILURE; } } - } } diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentLock.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentLock.java index 70d6e2c50ac..24493f8c413 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentLock.java +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentLock.java @@ -22,7 +22,8 @@ public class ClusteredSortAgentLock extends AgentLock { // The score the agent was acquired with (Used to ensure we own this agent on release). private final String acquireScore; - // The score the agent was release from the WAITING set with (Used to ensure it is readded to the WAITING set with the right score). + // The score the agent was release from the WAITING set with (Used to ensure it is readded to the + // WAITING set with the right score). private final String releaseScore; public ClusteredSortAgentLock(Agent agent, String acquireScore, String releaseScore) { diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentScheduler.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentScheduler.java index aabd57bfe16..2d11007190d 100644 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentScheduler.java +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentScheduler.java @@ -16,6 +16,10 @@ package com.netflix.spinnaker.cats.redis.cluster; +import static com.netflix.spinnaker.cats.agent.ExecutionInstrumentation.elapsedTimeMs; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.netflix.spinnaker.cats.agent.Agent; import com.netflix.spinnaker.cats.agent.AgentExecution; import com.netflix.spinnaker.cats.agent.AgentScheduler; @@ -23,13 +27,9 @@ import com.netflix.spinnaker.cats.agent.CacheResult; import com.netflix.spinnaker.cats.agent.CachingAgent; import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation; +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider; +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider; import com.netflix.spinnaker.cats.module.CatsModuleAware; -import com.netflix.spinnaker.cats.thread.NamedThreadFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import redis.clients.jedis.Jedis; -import redis.clients.jedis.JedisPool; - import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -42,6 +42,10 @@ import java.util.concurrent.Executors; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPool; /* * The idea behind this scheduler is simple. Every agent it owns is always in one of two sorted sets, @@ -54,7 +58,8 @@ * cache interval every key will only be removed from Redis once. If the interval is 60s, and the agent polls every 1s, * we already have a (30s / 1) * (# of clouddrivers) factor of improvement. */ -public class ClusteredSortAgentScheduler extends CatsModuleAware implements AgentScheduler, Runnable { +public class ClusteredSortAgentScheduler extends CatsModuleAware + implements AgentScheduler, Runnable { private static enum Status { SUCCESS, FAILURE @@ -75,8 +80,8 @@ private static enum Status { private Optional runningAgents; // This code assumes that every agent being run is in exactly either the WAITING or WORKING set. - private static final String WAITING_SET = "WAITZ"; - private static final String WORKING_SET = "WORKZ"; + @VisibleForTesting static final String WAITING_SET = "WAITZ"; + @VisibleForTesting static final String WORKING_SET = "WORKZ"; private static final String ADD_AGENT_SCRIPT = "addAgentScript"; private static final String VALID_SCORE_SCRIPT = "validScoreScript"; private static final String SWAP_SET_SCRIPT = "swapSetScript"; @@ -85,7 +90,11 @@ private static enum Status { private ConcurrentHashMap scriptShas; - public ClusteredSortAgentScheduler(JedisPool jedisPool, NodeStatusProvider nodeStatusProvider, AgentIntervalProvider intervalProvider, Integer parallelism) { + public ClusteredSortAgentScheduler( + JedisPool jedisPool, + NodeStatusProvider nodeStatusProvider, + AgentIntervalProvider intervalProvider, + Integer parallelism) { this.jedisPool = jedisPool; this.nodeStatusProvider = nodeStatusProvider; this.agents = new ConcurrentHashMap<>(); @@ -93,7 +102,8 @@ public ClusteredSortAgentScheduler(JedisPool jedisPool, NodeStatusProvider nodeS this.log = LoggerFactory.getLogger(getClass()); if (parallelism == 0 || parallelism < -1) { - throw new IllegalArgumentException("Argument 'parallelism' must be positive, or -1 (for unlimited parallelism)."); + throw new IllegalArgumentException( + "Argument 'parallelism' must be positive, or -1 (for unlimited parallelism)."); } else if (parallelism > 0) { this.runningAgents = Optional.of(new Semaphore(parallelism)); } else { @@ -103,56 +113,74 @@ public ClusteredSortAgentScheduler(JedisPool jedisPool, NodeStatusProvider nodeS scriptShas = new ConcurrentHashMap<>(); storeScripts(); - this.agentWorkPool = Executors.newCachedThreadPool(new NamedThreadFactory(AgentWorker.class.getSimpleName())); - Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(ClusteredSortAgentScheduler.class.getSimpleName())) - .scheduleAtFixedRate(this, 0, 1, TimeUnit.SECONDS); + this.agentWorkPool = + Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setNameFormat(AgentWorker.class.getSimpleName() + "-%d") + .build()); + Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder() + .setNameFormat(ClusteredSortAgentScheduler.class.getSimpleName() + "-%d") + .build()) + .scheduleAtFixedRate(this, 0, 1, TimeUnit.SECONDS); } private void storeScripts() { try (Jedis jedis = jedisPool.getResource()) { - // When we switch an agent from one set to another, we first make sure it exists in the set we are removing it - // from, and then we perform the swap. If this check fails, the thread performing the swap does not get ownership + // When we switch an agent from one set to another, we first make sure it exists in the set we + // are removing it + // from, and then we perform the swap. If this check fails, the thread performing the swap + // does not get ownership // of the agent. - // Swap happens from KEYS[1] -> KEYS[2] with the agent type being ARGV[1], and the score being ARGV[2]. - scriptShas.put(SWAP_SET_SCRIPT, jedis.scriptLoad( - "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + - "if score ~= nil then\n" + - " redis.call('zrem', KEYS[1], ARGV[1])\n" + - " redis.call('zadd', KEYS[2], ARGV[2], ARGV[1])\n" + - " return score\n" + - "else return nil end\n" - )); - - scriptShas.put(CONDITIONAL_SWAP_SET_SCRIPT, jedis.scriptLoad( - "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + - "if score == ARGV[3] then\n" + - " redis.call('zrem', KEYS[1], ARGV[1])\n" + - " redis.call('zadd', KEYS[2], ARGV[2], ARGV[1])\n" + - " return score\n" + - "else return nil end\n" - )); - - scriptShas.put(VALID_SCORE_SCRIPT, jedis.scriptLoad( - "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + - "if score == ARGV[2] then\n" + - " return score\n" + - "else return nil end\n" - )); - - // If the agent isn't present in either the WAITING or WORKING sets, it's safe to add. If it's present in either, + // Swap happens from KEYS[1] -> KEYS[2] with the agent type being ARGV[1], and the score being + // ARGV[2]. + scriptShas.put( + SWAP_SET_SCRIPT, + jedis.scriptLoad( + "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + + "if score ~= nil then\n" + + " redis.call('zrem', KEYS[1], ARGV[1])\n" + + " redis.call('zadd', KEYS[2], ARGV[2], ARGV[1])\n" + + " return score\n" + + "else return nil end\n")); + + scriptShas.put( + CONDITIONAL_SWAP_SET_SCRIPT, + jedis.scriptLoad( + "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + + "if score == ARGV[3] then\n" + + " redis.call('zrem', KEYS[1], ARGV[1])\n" + + " redis.call('zadd', KEYS[2], ARGV[2], ARGV[1])\n" + + " return score\n" + + "else return nil end\n")); + + scriptShas.put( + VALID_SCORE_SCRIPT, + jedis.scriptLoad( + "local score = redis.call('zscore', KEYS[1], ARGV[1])\n" + + "if score == ARGV[2] then\n" + + " return score\n" + + "else return nil end\n")); + + // If the agent isn't present in either the WAITING or WORKING sets, it's safe to add. If it's + // present in either, // it's being worked on or was recently run, so leave it be. - // KEYS[1] and KEYS[2] are checked for inclusion. If the agent is in neither ARGV[1] is added to KEYS[1] with score + // KEYS[1] and KEYS[2] are checked for inclusion. If the agent is in neither ARGV[1] is added + // to KEYS[1] with score // ARGV[2]. - scriptShas.put(ADD_AGENT_SCRIPT, jedis.scriptLoad( - "if redis.call('zrank', KEYS[1], ARGV[1]) ~= nil then\n" + - " if redis.call('zrank', KEYS[2], ARGV[1]) ~= nil then\n" + - " return redis.call('zadd', KEYS[1], ARGV[2], ARGV[1])\n" + - " else return nil end\n" + - "else return nil end\n")); - - scriptShas.put(REMOVE_AGENT_SCRIPT, jedis.scriptLoad( - "redis.call('zrem', KEYS[1], ARGV[1])\n" + - "redis.call('zrem', KEYS[2], ARGV[1])\n")); + scriptShas.put( + ADD_AGENT_SCRIPT, + jedis.scriptLoad( + "if redis.call('zrank', KEYS[1], ARGV[1]) ~= nil then\n" + + " if redis.call('zrank', KEYS[2], ARGV[1]) ~= nil then\n" + + " return redis.call('zadd', KEYS[1], ARGV[2], ARGV[1])\n" + + " else return nil end\n" + + "else return nil end\n")); + + scriptShas.put( + REMOVE_AGENT_SCRIPT, + jedis.scriptLoad( + "redis.call('zrem', KEYS[1], ARGV[1])\n" + "redis.call('zrem', KEYS[2], ARGV[1])\n")); } } @@ -174,18 +202,31 @@ private String getScriptSha(String scriptName, Jedis jedis) { } @Override - public void schedule(Agent agent, AgentExecution agentExecution, ExecutionInstrumentation executionInstrumentation) { + public void schedule( + Agent agent, + AgentExecution agentExecution, + ExecutionInstrumentation executionInstrumentation) { if (agent instanceof AgentSchedulerAware) { - ((AgentSchedulerAware)agent).setAgentScheduler(this); + ((AgentSchedulerAware) agent).setAgentScheduler(this); } if (!(agentExecution instanceof CachingAgent.CacheExecution)) { - throw new IllegalArgumentException("Sort scheduler requires agent executions to be of type CacheExecution"); + throw new IllegalArgumentException( + "Sort scheduler requires agent executions to be of type CacheExecution"); } - agents.put(agent.getAgentType(), new AgentWorker(agent, (CachingAgent.CacheExecution)agentExecution, executionInstrumentation, this)); + agents.put( + agent.getAgentType(), + new AgentWorker( + agent, (CachingAgent.CacheExecution) agentExecution, executionInstrumentation, this)); try (Jedis jedis = jedisPool.getResource()) { - jedis.evalsha(getScriptSha(ADD_AGENT_SCRIPT, jedis), 2, WAITING_SET, WORKING_SET, agent.getAgentType(), score(jedis, NOW)); + jedis.evalsha( + getScriptSha(ADD_AGENT_SCRIPT, jedis), + 2, + WAITING_SET, + WORKING_SET, + agent.getAgentType(), + score(jedis, NOW)); } } @@ -201,22 +242,32 @@ public ClusteredSortAgentLock tryLock(Agent agent) { @Override public boolean tryRelease(ClusteredSortAgentLock lock) { - return conditionalReleaseAgent(lock.getAgent(), lock.getAcquireScore(), lock.getReleaseScore()) != null; + return conditionalReleaseAgent(lock.getAgent(), lock.getAcquireScore(), lock.getReleaseScore()) + != null; } @Override public boolean lockValid(ClusteredSortAgentLock lock) { try (Jedis jedis = jedisPool.getResource()) { - return jedis.evalsha(getScriptSha(VALID_SCORE_SCRIPT, jedis), 1, WORKING_SET, - lock.getAgent().getAgentType(), - lock.getAcquireScore()) != null; + return jedis.evalsha( + getScriptSha(VALID_SCORE_SCRIPT, jedis), + 1, + WORKING_SET, + lock.getAgent().getAgentType(), + lock.getAcquireScore()) + != null; } } public void unschedule(Agent agent) { agents.remove(agent.getAgentType()); try (Jedis jedis = jedisPool.getResource()) { - jedis.evalsha(getScriptSha(REMOVE_AGENT_SCRIPT, jedis), 2, WAITING_SET, WORKING_SET, agent.getAgentType()); + jedis.evalsha( + getScriptSha(REMOVE_AGENT_SCRIPT, jedis), + 2, + WAITING_SET, + WORKING_SET, + agent.getAgentType()); } } @@ -267,9 +318,11 @@ private String agentScore(Agent agent) { private ScoreTuple acquireAgent(Agent agent) { try (Jedis jedis = jedisPool.getResource()) { String acquireScore = score(jedis, intervalProvider.getInterval(agent).getTimeout()); - Object releaseScore = jedis.evalsha(getScriptSha(SWAP_SET_SCRIPT, jedis), - Arrays.asList(WAITING_SET, WORKING_SET), - Arrays.asList(agent.getAgentType(), acquireScore)); + Object releaseScore = + jedis.evalsha( + getScriptSha(SWAP_SET_SCRIPT, jedis), + Arrays.asList(WAITING_SET, WORKING_SET), + Arrays.asList(agent.getAgentType(), acquireScore)); return releaseScore != null ? new ScoreTuple(acquireScore, releaseScore.toString()) : null; } @@ -277,25 +330,31 @@ private ScoreTuple acquireAgent(Agent agent) { private ScoreTuple conditionalReleaseAgent(Agent agent, String acquireScore, Status status) { try (Jedis jedis = jedisPool.getResource()) { - long newInterval = status == Status.SUCCESS - ? intervalProvider.getInterval(agent).getInterval() - : intervalProvider.getInterval(agent).getErrorInterval(); + long newInterval = + status == Status.SUCCESS + ? intervalProvider.getInterval(agent).getInterval() + : intervalProvider.getInterval(agent).getErrorInterval(); String newAcquireScore = score(jedis, newInterval); - Object releaseScore = jedis.evalsha(getScriptSha(CONDITIONAL_SWAP_SET_SCRIPT, jedis), - Arrays.asList(WORKING_SET, WAITING_SET), - Arrays.asList(agent.getAgentType(), newAcquireScore, - acquireScore)); + Object releaseScore = + jedis.evalsha( + getScriptSha(CONDITIONAL_SWAP_SET_SCRIPT, jedis), + Arrays.asList(WORKING_SET, WAITING_SET), + Arrays.asList(agent.getAgentType(), newAcquireScore, acquireScore)); return releaseScore != null ? new ScoreTuple(newAcquireScore, releaseScore.toString()) : null; } } - private ScoreTuple conditionalReleaseAgent(Agent agent, String acquireScore, String newAcquireScore) { + private ScoreTuple conditionalReleaseAgent( + Agent agent, String acquireScore, String newAcquireScore) { try (Jedis jedis = jedisPool.getResource()) { - Object releaseScore = jedis.evalsha(getScriptSha(CONDITIONAL_SWAP_SET_SCRIPT, jedis), - Arrays.asList(WORKING_SET, WAITING_SET), - Arrays.asList(agent.getAgentType(), newAcquireScore, - acquireScore)).toString(); + Object releaseScore = + jedis + .evalsha( + getScriptSha(CONDITIONAL_SWAP_SET_SCRIPT, jedis), + Arrays.asList(WORKING_SET, WAITING_SET), + Arrays.asList(agent.getAgentType(), newAcquireScore, acquireScore)) + .toString(); return releaseScore != null ? new ScoreTuple(newAcquireScore, releaseScore.toString()) : null; } @@ -304,27 +363,40 @@ private ScoreTuple conditionalReleaseAgent(Agent agent, String acquireScore, Str private ScoreTuple releaseAgent(Agent agent) { try (Jedis jedis = jedisPool.getResource()) { String acquireScore = score(jedis, intervalProvider.getInterval(agent).getInterval()); - Object releaseScore = jedis.evalsha(getScriptSha(SWAP_SET_SCRIPT, jedis), - Arrays.asList(WORKING_SET, WAITING_SET), - Arrays.asList(agent.getAgentType(), acquireScore)).toString(); + Object releaseScore = + jedis + .evalsha( + getScriptSha(SWAP_SET_SCRIPT, jedis), + Arrays.asList(WORKING_SET, WAITING_SET), + Arrays.asList(agent.getAgentType(), acquireScore)) + .toString(); return releaseScore != null ? new ScoreTuple(acquireScore, releaseScore.toString()) : null; } } - private void saturatePool() { + @VisibleForTesting + void saturatePool() { try (Jedis jedis = jedisPool.getResource()) { - // Occasionally repopulate the agents in case redis went down. If they already exist, this is a NOOP + // Occasionally repopulate the agents in case redis went down. If they already exist, this is + // a NOOP if (runCount % REDIS_REFRESH_PERIOD == 0) { for (String agent : agents.keySet()) { - jedis.evalsha(getScriptSha(ADD_AGENT_SCRIPT, jedis), 2, WAITING_SET, WORKING_SET, agent, score(jedis, NOW)); + jedis.evalsha( + getScriptSha(ADD_AGENT_SCRIPT, jedis), + 2, + WAITING_SET, + WORKING_SET, + agent, + score(jedis, NOW)); } } // First cull threads in the WORKING set that have been there too long (TIMEOUT time). Set oldKeys = jedis.zrangeByScore(WORKING_SET, "-inf", score(jedis, NOW)); for (String key : oldKeys) { - // Ignore result, since if this agent was released between now and the above jedis call, our work was done + // Ignore result, since if this agent was released between now and the above jedis call, our + // work was done // for us. AgentWorker worker = agents.get(key); if (worker != null) { @@ -339,21 +411,32 @@ private void saturatePool() { // Loop until we either run out of threads to use, or agents (which are keys) to run. while (!keys.isEmpty() && runningAgents.map(Semaphore::tryAcquire).orElse(true)) { - String agent = keys.remove(0); - - AgentWorker worker = agents.get(agent); - ScoreTuple score; - if (worker != null && (score = acquireAgent(worker.agent)) != null) { - // This score is used to determine if the worker thread running the agent is allowed to store its results. - // If on release of this agent, the scores don't match, this agent was rescheduled by a separate thread. - worker.setScore(score.acquireScore); - workers.add(worker); + try { + String agent = keys.remove(0); + + AgentWorker worker = agents.get(agent); + ScoreTuple score; + if (worker != null && (score = acquireAgent(worker.agent)) != null) { + // This score is used to determine if the worker thread running the agent is allowed to + // store its results. + // If on release of this agent, the scores don't match, this agent was rescheduled by a + // separate thread. + worker.setScore(score.acquireScore); + if (workers.add(worker)) { + agentWorkPool.submit(worker); + continue; + } + } + // This agent worker has not been submitted to agentWorkPool, and the acquired permit must + // be released to Semaphore. + runningAgents.ifPresent(Semaphore::release); + } catch (Throwable t) { + log.error("Failed to submit AgentWorker to agentWorkPool", t); + runningAgents.ifPresent(Semaphore::release); + // Better to ignore(not re-throw), so that the agentWorker can be submitted to the + // agentWorkPool as much as possible. } } - - for (AgentWorker worker : workers) { - agentWorkPool.submit(worker); - } } } @@ -364,7 +447,11 @@ private static class AgentWorker implements Runnable { private final ClusteredSortAgentScheduler scheduler; private String acquireScore; - AgentWorker(Agent agent, CachingAgent.CacheExecution agentExecution, ExecutionInstrumentation executionInstrumentation, ClusteredSortAgentScheduler scheduler) { + AgentWorker( + Agent agent, + CachingAgent.CacheExecution agentExecution, + ExecutionInstrumentation executionInstrumentation, + ClusteredSortAgentScheduler scheduler) { this.agent = agent; this.agentExecution = agentExecution; this.executionInstrumentation = executionInstrumentation; @@ -380,19 +467,21 @@ public void run() { assert acquireScore != null; CacheResult result = null; Status status = Status.FAILURE; + long startTimeMs = System.currentTimeMillis(); try { executionInstrumentation.executionStarted(agent); - long startTime = System.nanoTime(); result = agentExecution.executeAgentWithoutStore(agent); - executionInstrumentation.executionCompleted(agent, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)); + executionInstrumentation.executionCompleted(agent, elapsedTimeMs(startTimeMs)); status = Status.SUCCESS; } catch (Throwable cause) { - executionInstrumentation.executionFailed(agent, cause); + executionInstrumentation.executionFailed(agent, cause, elapsedTimeMs(startTimeMs)); } finally { - // Regardless of success or failure, we need to try and release this agent. If the release is successful (we + // Regardless of success or failure, we need to try and release this agent. If the release + // is successful (we // own this agent), and a result was created, we can store it. scheduler.runningAgents.ifPresent(Semaphore::release); - if (scheduler.conditionalReleaseAgent(agent, acquireScore, status) != null && result != null) { + if (scheduler.conditionalReleaseAgent(agent, acquireScore, status) != null + && result != null) { agentExecution.storeAgentResult(agent, result); } } @@ -408,5 +497,4 @@ public ScoreTuple(String acquireScore, String releaseScore) { this.releaseScore = releaseScore; } } - } diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultAgentIntervalProvider.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultAgentIntervalProvider.java deleted file mode 100644 index 48dc4fc8aa1..00000000000 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultAgentIntervalProvider.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.redis.cluster; - -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.agent.AgentIntervalAware; - -public class DefaultAgentIntervalProvider implements AgentIntervalProvider { - private final long interval; - private final long errorInterval; - private final long timeout; - - public DefaultAgentIntervalProvider(long interval) { - this(interval, interval * 2); - } - - public DefaultAgentIntervalProvider(long interval, long timeout) { - this(interval, interval, timeout); - } - - public DefaultAgentIntervalProvider(long interval, long errorInterval, long timeout) { - this.interval = interval; - this.errorInterval = errorInterval; - this.timeout = timeout; - } - - @Override - public Interval getInterval(Agent agent) { - if (agent instanceof AgentIntervalAware) { - Long agentInterval = ((AgentIntervalAware) agent).getAgentInterval(); - Long agentErrorInterval = ((AgentIntervalAware) agent).getAgentErrorInterval(); - if (agentInterval != null && agentInterval > 0) { - // Specify the caching agent timeout as twice the interval. This gives a high upper bound - // on the time it should take the agent to complete its work. The agent's lock is revoked - // after the timeout. - return new Interval(agentInterval, agentErrorInterval, 2 * agentInterval); - } - } - - return new Interval(interval, errorInterval, timeout); - } - - public long getInterval() { - return interval; - } - - public long getErrorInterval() { - return errorInterval; - } - - public long getTimeout() { - return timeout; - } -} diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeIdentity.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeIdentity.java deleted file mode 100644 index e2a917fb6f3..00000000000 --- a/cats/cats-redis/src/main/java/com/netflix/spinnaker/cats/redis/cluster/DefaultNodeIdentity.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.cats.redis.cluster; - -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.NetworkInterface; -import java.net.Socket; -import java.net.SocketException; -import java.util.Collections; -import java.util.Enumeration; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -public class DefaultNodeIdentity implements NodeIdentity { - - public static final String UNKNOWN_HOST = "UnknownHost"; - private static final long REFRESH_INTERVAL = TimeUnit.SECONDS.toMillis(30); - - @SuppressWarnings("PMD.EmptyCatchBlock") - private static String getHostName(String validationHost, int validationPort) { - final Enumeration interfaces; - try { - interfaces = NetworkInterface.getNetworkInterfaces(); - } catch (SocketException ignored) { - return UNKNOWN_HOST; - } - if (interfaces == null || validationHost == null) { - return UNKNOWN_HOST; - } - - for (NetworkInterface networkInterface : Collections.list(interfaces)) { - try { - if (networkInterface.isLoopback() && - !validationHost.equals("localhost") && - !validationHost.startsWith("127.")) { - continue; - } - - if (!networkInterface.isUp()) { - continue; - } - } catch (SocketException ignored) { - continue; - } - - for (InetAddress address : Collections.list(networkInterface.getInetAddresses())) { - Socket socket = null; - try { - socket = new Socket(); - socket.bind(new InetSocketAddress(address, 0)); - socket.connect(new InetSocketAddress(validationHost, validationPort), 125); - return address.getHostName(); - } catch (IOException ignored) { - //ignored - } finally { - if (socket != null) { - try { - socket.close(); - } catch (IOException ignored) { - //ignored - } - } - } - } - } - - return UNKNOWN_HOST; - } - - private final String validationAddress; - private final int validationPort; - private final String runtimeName; - private final AtomicReference identity = new AtomicReference<>(null); - private final AtomicBoolean validIdentity = new AtomicBoolean(false); - private final AtomicLong refreshTime = new AtomicLong(0); - private final Lock refreshLock = new ReentrantLock(); - private final long refreshInterval; - - public DefaultNodeIdentity() { - this("www.google.com", 80); - } - - public DefaultNodeIdentity(String validationAddress, int validationPort) { - this(validationAddress, validationPort, REFRESH_INTERVAL); - } - - public DefaultNodeIdentity(String validationAddress, int validationPort, long refreshInterval) { - this.validationAddress = validationAddress; - this.validationPort = validationPort; - this.runtimeName = ManagementFactory.getRuntimeMXBean().getName(); - this.refreshInterval = refreshInterval; - loadIdentity(); - - } - - @Override - public String getNodeIdentity() { - if (!validIdentity.get() && shouldRefresh()) { - refreshLock.lock(); - try { - if (!validIdentity.get() && shouldRefresh()) { - loadIdentity(); - } - } finally { - refreshLock.unlock(); - } - } - return identity.get(); - } - - private boolean shouldRefresh() { - return System.currentTimeMillis() - refreshTime.get() > refreshInterval; - } - - private void loadIdentity() { - identity.set(String.format("%s:%s", getHostName(validationAddress, validationPort), runtimeName)); - validIdentity.set(!identity.get().contains(UNKNOWN_HOST)); - refreshTime.set(System.currentTimeMillis()); - } -} diff --git a/cats/cats-redis/src/main/java/com/netflix/spinnaker/config/RedisShardingFilterConfiguration.java b/cats/cats-redis/src/main/java/com/netflix/spinnaker/config/RedisShardingFilterConfiguration.java new file mode 100644 index 00000000000..0e36afc1183 --- /dev/null +++ b/cats/cats-redis/src/main/java/com/netflix/spinnaker/config/RedisShardingFilterConfiguration.java @@ -0,0 +1,41 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.cats.cluster.DefaultNodeIdentity; +import com.netflix.spinnaker.cats.cluster.ShardingFilter; +import com.netflix.spinnaker.cats.redis.cluster.CachingPodsObserver; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty(value = "caching.write-enabled", matchIfMissing = true) +public class RedisShardingFilterConfiguration { + + @Bean + @ConditionalOnExpression( + "${redis.enabled:true} && ${redis.scheduler.enabled:true} && ${cache-sharding.enabled:false}") + ShardingFilter shardingFilter( + RedisClientDelegate redisClientDelegate, DynamicConfigService dynamicConfigService) { + return new CachingPodsObserver( + redisClientDelegate, new DefaultNodeIdentity(), dynamicConfigService); + } +} diff --git a/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cache/RedisCacheSpec.groovy b/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cache/RedisCacheSpec.groovy index 124b4209d98..40ff3523fe4 100644 --- a/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cache/RedisCacheSpec.groovy +++ b/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cache/RedisCacheSpec.groovy @@ -16,8 +16,10 @@ package com.netflix.spinnaker.cats.redis.cache +import com.fasterxml.jackson.annotation.JsonInclude import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.cache.WriteableCache import com.netflix.spinnaker.cats.cache.WriteableCacheSpec @@ -31,176 +33,213 @@ import spock.lang.Shared import spock.lang.Unroll class RedisCacheSpec extends WriteableCacheSpec { - static int MAX_MSET_SIZE = 2 - static int MAX_MERGE_COUNT = 1 - - CacheMetrics cacheMetrics = Mock() - JedisPool pool - - @Shared - @AutoCleanup("destroy") - EmbeddedRedis embeddedRedis - - @Override - Cache getSubject() { - if (!embeddedRedis) { - embeddedRedis = EmbeddedRedis.embed() - } - pool = embeddedRedis.pool as JedisPool - Jedis jedis - try { - jedis = pool.resource - jedis.flushAll() - } finally { - jedis?.close() - } - - def mapper = new ObjectMapper(); - return new RedisCache('test', new JedisClientDelegate(pool), mapper, RedisCacheOptions.builder().maxMset(MAX_MSET_SIZE).maxMergeBatch(MAX_MERGE_COUNT).build(), cacheMetrics) - } - - @Unroll - def 'attribute datatype handling #description'() { - setup: - def mergeData = createData('foo', [test: value], [:]) - cache.merge('test', mergeData) - - when: - def cacheData = cache.get('test', 'foo') - - then: - cacheData != null - cacheData.attributes.test == expected - - where: - value | expected | description - null | null | "null" - 1 | 1 | "Integer" - 2.0f | 2.0f | "Float" - "Bacon" | "Bacon" | "String" - true | true | "Boolean" - ['one', 'two'] | ['one', 'two'] | "Primitive list" - [key: 'value', key2: 10] | [key: 'value', key2: 10] | "Map" - new Bean('value', 10) | [key: 'value', key2: 10] | "Java object" - [key: 'value', key2: null] | [key: 'value'] | "Map with null" - new Bean('value', null) | [key: 'value', key2: null] | "Java object with null" - } - - @Unroll - def 'cache data will expire if ttl specified'() { - setup: - def mergeData = new DefaultCacheData('ttlTest', ttl, [test: 'test'], [:]) - cache.merge('test', mergeData); - - when: - def cacheData = cache.get('test', 'ttlTest') - - then: - cacheData.id == mergeData.id - - when: - Thread.sleep(Math.abs(ttl) * 1500) - cacheData = cache.get('test', 'ttlTest') - - then: - cacheData?.id == (ttl > 0 ? null : mergeData.id) + static int MAX_MSET_SIZE = 2 + static int MAX_MERGE_COUNT = 1 - where: - ttl || _ - -1 || _ - 1 || _ + CacheMetrics cacheMetrics = Mock() + JedisPool pool - } - - def 'verify MSET chunking behavior (> MAX_MSET_SIZE)'() { - setup: - ((WriteableCache) cache).mergeAll('foo', [createData('bar'), createData('baz'), createData('bam')]) + @Shared + @AutoCleanup("destroy") + EmbeddedRedis embeddedRedis - expect: - cache.getIdentifiers('foo').sort() == ['bam', 'bar', 'baz'] + @Override + Cache getSubject() { + if (!embeddedRedis) { + embeddedRedis = EmbeddedRedis.embed() } - - def 'should fail if maxMsetSize is not even'() { - when: - RedisCacheOptions.builder().maxMset(7).build() - - then: - thrown(IllegalArgumentException) + pool = embeddedRedis.pool as JedisPool + Jedis jedis + try { + jedis = pool.resource + jedis.flushAll() + } finally { + jedis?.close() } - def 'should ignore hashes if hashes disabled'() { - setup: - def data = createData('blerp', [a: 'b']) + def mapper = new ObjectMapper(); + mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL) - when: //initial write - ((WriteableCache) cache).merge('foo', data) + return new RedisCache('test', new JedisClientDelegate(pool), mapper, RedisCacheOptions.builder().maxMset(MAX_MSET_SIZE).maxMergeBatch(MAX_MERGE_COUNT).build(), cacheMetrics) + } - then: - 1 * cacheMetrics.merge('test', 'foo', 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, ) + def 'a cached value does not exist until it has attributes'() { + setup: + populateOne('foo', 'bar', createData('bar', [:])) - when: //second write, hash matches - ((WriteableCache) cache).merge('foo', data) + expect: + cache.get('foo', 'bar') == null + } - then: - 1 * cacheMetrics.merge('test', 'foo', 1, 0, 0, 1, 0, 0, 0, 0, 0, 0) - when: //third write, disable hashing - pool.resource.withCloseable { Jedis j -> j.set('test:foo:hashes.disabled', 'true')} - ((WriteableCache) cache).merge('foo', data) + @Unroll + def 'attribute datatype handling #description'() { + setup: + def mergeData = createData('foo', [test: value], [:]) + cache.merge('test', mergeData) - then: - 1 * cacheMetrics.merge('test', 'foo', 1, 1, 0, 0, 1, 1, 1, 1, 1, 0) - } - - def 'should not write an item if it is unchanged'() { - setup: - def data = createData('blerp', [a: 'b']) + when: + def cacheData = cache.get('test', 'foo') - when: - ((WriteableCache) cache).merge('foo', data) + then: + cacheData != null + cacheData.attributes.test == expected - then: - 1 * cacheMetrics.merge('test', 'foo', 1, 1, 0, 0, 1, 1, 1, 1, 1, 0) + where: + value | expected | description + null | null | "null" + 1 | 1 | "Integer" + 2.0f | 2.0f | "Float" + "Bacon" | "Bacon" | "String" + true | true | "Boolean" + ['one', 'two'] | ['one', 'two'] | "Primitive list" + [key: 'value', key2: 10] | [key: 'value', key2: 10] | "Map" + new Bean('value', 10) | [key: 'value', key2: 10] | "Java object" + [key: 'value', key2: null] | [key: 'value'] | "Map with null" + new Bean('value', null) | [key: 'value'] | "Java object with null" + } - when: - ((WriteableCache) cache).merge('foo', data) + @Unroll + def 'cache data will expire if ttl specified'() { + setup: + def mergeData = new DefaultCacheData('ttlTest', ttl, [test: 'test'], [:]) + cache.merge('test', mergeData); - then: - 1 * cacheMetrics.merge('test', 'foo', 1, 0, 0, 1, 0, 0, 0, 0, 0, 0) - } + when: + def cacheData = cache.get('test', 'ttlTest') + + then: + cacheData.id == mergeData.id - def 'should merge #mergeCount items at a time'() { - setup: - def cache = new RedisCache( - 'test', - new JedisClientDelegate(pool), - new ObjectMapper(), - RedisCacheOptions.builder().maxMergeBatch(mergeCount).maxMset(MAX_MSET_SIZE).hashing(false).build(), - cacheMetrics) - - when: - cache.mergeAll('foo', items) - - then: - - fullMerges * cacheMetrics.merge('test', 'foo', mergeCount, mergeCount, 0, 0, 0, 1, mergeCount, 0, 1, 0) - finalMergeCount * cacheMetrics.merge('test', 'foo', finalMerge, finalMerge, 0, 0, 0, 1, finalMerge, 0, 1, 0) - - where: - mergeCount << [ 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 100, 101, 131 ] - items = (0..100).collect { createData("blerp-$it") } - fullMerges = items.size() / mergeCount - finalMerge = items.size() % mergeCount - finalMergeCount = finalMerge > 0 ? 1 : 0 - } + when: + Thread.sleep(Math.abs(ttl) * 1500) + cacheData = cache.get('test', 'ttlTest') + + then: + cacheData?.id == (ttl > 0 ? null : mergeData.id) + + where: + ttl || _ + -1 || _ + 1 || _ + + } + + def 'verify MSET chunking behavior (> MAX_MSET_SIZE)'() { + setup: + ((WriteableCache) cache).mergeAll('foo', [createData('bar'), createData('baz'), createData('bam')]) + + expect: + cache.getIdentifiers('foo').sort() == ['bam', 'bar', 'baz'] + } + + def 'should fail if maxMsetSize is not even'() { + when: + RedisCacheOptions.builder().maxMset(7).build() + + then: + thrown(IllegalArgumentException) + } + + def 'should ignore hashes if hashes disabled'() { + setup: + def data = createData('blerp', [a: 'b']) + + when: //initial write + ((WriteableCache) cache).merge('foo', data) + + then: + 1 * cacheMetrics.merge('test', 'foo', 1, 1, 0, 0, 1, 1, 1, 1, 1, 0,) + + when: //second write, hash matches + ((WriteableCache) cache).merge('foo', data) + + then: + 1 * cacheMetrics.merge('test', 'foo', 1, 0, 0, 1, 0, 0, 0, 0, 0, 0) + + when: //third write, disable hashing + pool.resource.withCloseable { Jedis j -> j.set('test:foo:hashes.disabled', 'true') } + ((WriteableCache) cache).merge('foo', data) + + then: + 1 * cacheMetrics.merge('test', 'foo', 1, 1, 0, 0, 1, 1, 1, 1, 1, 0) + } + + def 'should not write an item if it is unchanged'() { + setup: + def data = createData('blerp', [a: 'b']) + + when: + ((WriteableCache) cache).merge('foo', data) + + then: + 1 * cacheMetrics.merge('test', 'foo', 1, 1, 0, 0, 1, 1, 1, 1, 1, 0) + + when: + ((WriteableCache) cache).merge('foo', data) + + then: + 1 * cacheMetrics.merge('test', 'foo', 1, 0, 0, 1, 0, 0, 0, 0, 0, 0) + } + + def 'should merge #mergeCount items at a time'() { + setup: + def mapper = new ObjectMapper(); + mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL) + def cache = new RedisCache( + 'test', + new JedisClientDelegate(pool), + mapper, + RedisCacheOptions.builder().maxMergeBatch(mergeCount).maxMset(MAX_MSET_SIZE).hashing(false).build(), + cacheMetrics) + + when: + cache.mergeAll('foo', items) + + then: - private static class Bean { - String key - Integer key2 + fullMerges * cacheMetrics.merge('test', 'foo', mergeCount, mergeCount, 0, 0, 0, 1, mergeCount, 0, 1, 0) + finalMergeCount * cacheMetrics.merge('test', 'foo', finalMerge, finalMerge, 0, 0, 0, 1, finalMerge, 0, 1, 0) + + where: + mergeCount << [1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 100, 101, 131] + items = (0..100).collect { createData("blerp-$it") } + fullMerges = items.size() / mergeCount + finalMerge = items.size() % mergeCount + finalMergeCount = finalMerge > 0 ? 1 : 0 + } + + def 'mergeAll with two items that have the same id uses the second item'() { + given: 'one item in the cache' + String id = 'bar' + def itemOneAttributes = [att1: 'val1'] + CacheData itemOne = createData(id, itemOneAttributes) + def itemTwoAttributes = [att2: 'val2'] + CacheData itemTwo = createData(id, itemTwoAttributes) + String type = 'foo' + cache.mergeAll(type, [ itemOne ]) + assert itemOneAttributes.equals(cache.get(type, id).attributes) + + when: 'adding both items' + cache.mergeAll(type, [ itemOne, itemTwo ]) - Bean(String key, Integer key2) { - this.key = key - this.key2 = key2 - } + then: 'itemTwo is in the cache' + itemTwoAttributes.equals(cache.get(type, id).attributes) + + when: 'storing the items again' + cache.mergeAll(type, [ itemOne, itemTwo ]) + + then: 'itemTwo is still in the cache' + itemTwoAttributes.equals(cache.get(type, id).attributes) + } + + private static class Bean { + String key + Integer key2 + + Bean(String key, Integer key2) { + this.key = key + this.key2 = key2 } + } } diff --git a/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cache/RedisNamedCacheFactorySpec.groovy b/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cache/RedisNamedCacheFactorySpec.groovy index 60d622fcbed..bf813e38757 100644 --- a/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cache/RedisNamedCacheFactorySpec.groovy +++ b/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cache/RedisNamedCacheFactorySpec.groovy @@ -23,7 +23,6 @@ import com.netflix.spinnaker.kork.jedis.JedisClientDelegate import redis.clients.jedis.Jedis import redis.clients.jedis.JedisPool import spock.lang.AutoCleanup -import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -32,7 +31,6 @@ class RedisNamedCacheFactorySpec extends Specification { @Subject RedisNamedCacheFactory factory - @Shared @AutoCleanup("destroy") EmbeddedRedis embeddedRedis diff --git a/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cluster/ClusteredAgentSchedulerSpec.groovy b/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cluster/ClusteredAgentSchedulerSpec.groovy index 66d176f262a..4976a26f274 100644 --- a/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cluster/ClusteredAgentSchedulerSpec.groovy +++ b/cats/cats-redis/src/test/groovy/com/netflix/spinnaker/cats/redis/cluster/ClusteredAgentSchedulerSpec.groovy @@ -16,17 +16,25 @@ package com.netflix.spinnaker.cats.redis.cluster +import com.google.common.util.concurrent.ThreadFactoryBuilder +import com.netflix.spinnaker.cats.agent.Agent import com.netflix.spinnaker.cats.agent.AgentExecution import com.netflix.spinnaker.cats.agent.CachingAgent import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation +import com.netflix.spinnaker.cats.cluster.DefaultAgentIntervalProvider +import com.netflix.spinnaker.cats.cluster.DefaultNodeIdentity +import com.netflix.spinnaker.cats.cluster.DefaultNodeStatusProvider +import com.netflix.spinnaker.cats.cluster.NoopShardingFilter import com.netflix.spinnaker.cats.test.ManualRunnableScheduler import com.netflix.spinnaker.cats.test.TestAgent import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService import com.netflix.spinnaker.kork.jedis.JedisClientDelegate import redis.clients.jedis.Jedis import redis.clients.jedis.JedisPool +import redis.clients.jedis.params.SetParams import spock.lang.Specification import spock.lang.Subject +import java.util.concurrent.Executors class ClusteredAgentSchedulerSpec extends Specification { @@ -34,6 +42,7 @@ class ClusteredAgentSchedulerSpec extends Specification { ClusteredAgentScheduler scheduler Jedis jedis + JedisPool jedisPool CachingAgent agent ManualRunnableScheduler lockPollingScheduler ManualRunnableScheduler agentExecutionScheduler @@ -47,7 +56,7 @@ class ClusteredAgentSchedulerSpec extends Specification { def interval = new DefaultAgentIntervalProvider(6000000) agent = new TestAgent() jedis = Mock(Jedis) - def jedisPool = Stub(JedisPool) { + jedisPool = Stub(JedisPool) { getResource() >> jedis } lockPollingScheduler = new ManualRunnableScheduler() @@ -61,7 +70,8 @@ class ClusteredAgentSchedulerSpec extends Specification { agentExecutionScheduler, ".*", null, - dcs + dcs, + new NoopShardingFilter() ) } @@ -73,7 +83,7 @@ class ClusteredAgentSchedulerSpec extends Specification { agentExecutionScheduler.runAll() then: - 1 * jedis.set(_ as String, _ as String, 'NX', 'PX', _ as Long) >> 'definitely not ok' + 1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'definitely not ok' 1 * jedis.close() 0 * _ } @@ -85,7 +95,7 @@ class ClusteredAgentSchedulerSpec extends Specification { agentExecutionScheduler.runAll() then: - 1 * jedis.set(_ as String, _ as String, 'NX', 'PX', _ as Long) >> 'OK' + 1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'OK' 1 * inst.executionStarted(agent) 1 * exec.executeAgent(agent) 1 * inst.executionCompleted(agent, _) @@ -104,12 +114,219 @@ class ClusteredAgentSchedulerSpec extends Specification { agentExecutionScheduler.runAll() then: - 1 * jedis.set(_ as String, _ as String, 'NX', 'PX', _ as Long) >> 'OK' + 1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'OK' 1 * inst.executionStarted(agent) 1 * exec.executeAgent(agent) >> { throw cause } - 1 * inst.executionFailed(agent, cause) + 1 * inst.executionFailed(agent, cause, _) 1 * jedis.eval(_ as String, _ as List, _ as List) 2 * jedis.close() 0 * _ } + + def 'test agent addition and removal from the agents and activeAgents maps in the schedule() -> run -> unschedule() flow'() { + when: + scheduler.schedule(agent, exec, inst) + then: + // scheduling an agent should add it to the agents map + scheduler.agents.containsKey(agent.agentType) + // unless we run this agent, it won't show up in the active agents map + !scheduler.activeAgents.containsKey(agent.agentType) + + when: + lockPollingScheduler.runAll() + agentExecutionScheduler.runAll() + + then: + // after running the agent, agents map should still contain it as we haven't explicitly + // removed it + scheduler.agents.containsKey(agent.agentType) + 1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'OK' + 1 * inst.executionStarted(agent) + 1 * exec.executeAgent(agent) + 1 * inst.executionCompleted(agent, _) + + // normal execution of the agent will call agentCompleted() which calls releaseRunKey() which makes + // the following jedis call, and then it removes it from the activeAgents map + 1 * jedis.eval(scheduler.TTL_LOCK_KEY, List.of(agent.agentType), _ as List) + !scheduler.activeAgents.containsKey(agent.agentType) + + 2 * jedis.close() + 0 * _ + + when: + scheduler.unschedule(agent) + + then: + // unschedule() should make this following jedis call + 1 * jedis.eval(scheduler.DELETE_LOCK_KEY, List.of(agent.agentType), _ as List) + + // unschedule() should remove the agent from both agents and activeAgents map + !scheduler.agents.containsKey(agent.agentType) + // in the context of this test, the agent was already removed from active agents before unschedule() + // was called + !scheduler.activeAgents.containsKey(agent.agentType) + } + + def 'test that a long-running/stuck agent is removed from the active agents map after sufficient time has elapsed'() { + given: + def arbitraryAgentInterval = 500l + // agent is configured with an interval of 500ms (so this agent is supposed to timeout after 2 * 500 = 1s) + agent = new TestAgent(arbitraryAgentInterval) + + def agentExecutionScheduler = Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setNameFormat(TestStuckAgentExecution.class.getSimpleName() + "-%d") + .build()) + + // this interval is only used if an agent doesn't provide its own interval... this value is not + // used in the tests since our test agent implements AgentIntervalAware + def interval = new DefaultAgentIntervalProvider(1) + + def realScheduler = new ClusteredAgentScheduler( + new JedisClientDelegate(jedisPool), + new DefaultNodeIdentity(), + interval, + new DefaultNodeStatusProvider(), + lockPollingScheduler, + agentExecutionScheduler, + ".*", + null, + dcs, + new NoopShardingFilter() + ) + + // sleep for 5s + def agentExecution = new TestStuckAgentExecution(10*arbitraryAgentInterval) + + when: + realScheduler.schedule(agent, agentExecution, inst) + + then: + // scheduling an agent should add it to the agents map + realScheduler.agents.containsKey(agent.agentType) + // unless we run this agent, it won't show up in the active agents map + !realScheduler.activeAgents.containsKey(agent.agentType) + + when: + // run the agent + lockPollingScheduler.runAll() + + then: + // since this is a long running agent execution, after running it, the agents map should still contain it as + // it hasn't completed just yet + realScheduler.agents.containsKey(agent.agentType) + 1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'OK' + + // verify that the agent hasn't completed its work + // normal execution of the agent will call agentCompleted() which calls releaseRunKey() which makes + // the following jedis call, and then it removes it from the activeAgents map. But since it hasn't + // completed its work, the above wouldn't be true + 0 * jedis.eval(realScheduler.TTL_LOCK_KEY, List.of(agent.agentType), _ as List) + // it should still be in the active agents map + realScheduler.activeAgents.containsKey(agent.agentType) + + 1 * jedis.close() + + when: + // arbitrary sleep interval > agent timeout + Thread.sleep(4 * arbitraryAgentInterval) + lockPollingScheduler.runAll() + + then: + // since enough time has elapsed, it should be removed from the active agents map. + !realScheduler.activeAgents.containsKey(agent.agentType) + + // verify that ttl lock key hasn't been updated just yet + // (i.e. it was automatically removed from active agents map) + 0 * jedis.eval(realScheduler.TTL_LOCK_KEY, List.of(agent.agentType), _ as List) + } + + def 'test that an agent is not removed from the active agents map if sufficient time has not elapsed'() { + given: + def arbitraryAgentInterval = 500l + // agent is configured with an interval of 500ms (so this agent is supposed to timeout after 2 * 500 = 1s) + agent = new TestAgent(arbitraryAgentInterval) + + def agentExecutionScheduler = Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setNameFormat(TestStuckAgentExecution.class.getSimpleName() + "-%d") + .build()) + + // this interval is only used if an agent doesn't provide its own interval... this value is not + // used in the tests since our test agent implements AgentIntervalAware + def interval = new DefaultAgentIntervalProvider(1) + + def realScheduler = new ClusteredAgentScheduler( + new JedisClientDelegate(jedisPool), + new DefaultNodeIdentity(), + interval, + new DefaultNodeStatusProvider(), + lockPollingScheduler, + agentExecutionScheduler, + ".*", + null, + dcs, + new NoopShardingFilter() + ) + + // sleep for 5s + def agentExecution = new TestStuckAgentExecution(10*arbitraryAgentInterval) + when: + realScheduler.schedule(agent, agentExecution, inst) + + then: + // scheduling an agent should add it to the agents map + realScheduler.agents.containsKey(agent.agentType) + // unless we run this agent, it won't show up in the active agents map + !realScheduler.activeAgents.containsKey(agent.agentType) + + when: + // run the agent + lockPollingScheduler.runAll() + + then: + // after running the agent, agents map should still contain it as we haven't explicitly + // removed it + realScheduler.agents.containsKey(agent.agentType) + 1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'OK' + + // verify that the agent hasn't completed its work + // normal execution of the agent will call agentCompleted() which calls releaseRunKey() which makes + // the following jedis call, and then it removes it from the activeAgents map. But since it hasn't + // completed its work, the above wouldn't be true + 0 * jedis.eval(realScheduler.TTL_LOCK_KEY, List.of(agent.agentType), _ as List) + // it should still be in the active agents map + realScheduler.activeAgents.containsKey(agent.agentType) + + 1 * jedis.close() + + when: + // initiate the next attempt to schedule new agents + lockPollingScheduler.runAll() + + then: + // since the agent is long running, and enough time hasn't elapsed, it will still be in the active agents map + realScheduler.activeAgents.containsKey(agent.agentType) + } + + /** + * a test {@link AgentExecution} class that simulates a long-running/stuck agent execution + */ + private class TestStuckAgentExecution implements AgentExecution { + private long sleepTime + + TestStuckAgentExecution(long sleepTime) { + this.sleepTime = sleepTime + } + + @Override + void executeAgent(Agent agent) { + try { + // an arbitrary long enough sleep value + Thread.sleep(sleepTime) + } catch(Exception ignored) { + // ignore + } + } + } } diff --git a/cats/cats-redis/src/test/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentSchedulerTest.java b/cats/cats-redis/src/test/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentSchedulerTest.java new file mode 100644 index 00000000000..4255df217ca --- /dev/null +++ b/cats/cats-redis/src/test/java/com/netflix/spinnaker/cats/redis/cluster/ClusteredSortAgentSchedulerTest.java @@ -0,0 +1,122 @@ +/* + * Copyright 2023 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.redis.cluster; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation; +import com.netflix.spinnaker.cats.cluster.DefaultAgentIntervalProvider; +import com.netflix.spinnaker.cats.test.TestAgent; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPool; + +public class ClusteredSortAgentSchedulerTest { + + private ClusteredSortAgentScheduler clusteredSortAgentScheduler; + + private Jedis jedis = mock(Jedis.class); + private JedisPool jedisPool = mock(JedisPool.class); + private Integer parallelism = 2; + private CachingAgent.CacheExecution agentExecution = mock(CachingAgent.CacheExecution.class); + private ExecutionInstrumentation executionInstrumentation = mock(ExecutionInstrumentation.class); + + private Optional runningAgents; + + @BeforeEach + public void setUp() throws IllegalAccessException { + when(jedisPool.getResource()).thenReturn(jedis); + when(jedis.scriptLoad(anyString())).thenReturn("testScriptSha"); + when(jedis.time()).thenReturn(List.of("1678784468", "374338")); + DefaultAgentIntervalProvider intervalProvider = new DefaultAgentIntervalProvider(6000000); + clusteredSortAgentScheduler = + new ClusteredSortAgentScheduler(jedisPool, () -> false, intervalProvider, parallelism); + + runningAgents = + (Optional) + FieldUtils.getDeclaredField(ClusteredSortAgentScheduler.class, "runningAgents", true) + .get(clusteredSortAgentScheduler); + } + + @Test + public void testRunningAgentsSemaphore() { + when(jedis.zrangeByScore(eq(ClusteredSortAgentScheduler.WORKING_SET), anyString(), anyString())) + .thenReturn(new HashSet<>()); + when(jedis.zrangeByScore(eq(ClusteredSortAgentScheduler.WAITING_SET), anyString(), anyString())) + .thenReturn(Set.of("testAgentType")); + + clusteredSortAgentScheduler.saturatePool(); + + assertThat(runningAgents) + .isPresent() + .hasValueSatisfying(s -> assertThat(s.availablePermits()).isEqualTo(parallelism)); + } + + @Test + public void testRunningAgentsSemaphoreWithException() throws InterruptedException { + TestAgent agent1 = new TestAgent(); + TestAgent agent2 = new TestAgent(); + CountDownLatch latch = new CountDownLatch(1); + + when(jedis.zrangeByScore(eq(ClusteredSortAgentScheduler.WORKING_SET), anyString(), anyString())) + .thenReturn(new HashSet<>()); + when(jedis.zrangeByScore(eq(ClusteredSortAgentScheduler.WAITING_SET), anyString(), anyString())) + .thenReturn(Set.of(agent1.getAgentType(), agent2.getAgentType())); + when(jedis.scriptExists(anyString())).thenReturn(true); + clusteredSortAgentScheduler.schedule(agent1, agentExecution, executionInstrumentation); + clusteredSortAgentScheduler.schedule(agent2, agentExecution, executionInstrumentation); + when(jedis.evalsha(anyString(), anyList(), anyList())) + .thenReturn("testReleaseScore") + .thenThrow(new RuntimeException("fail")) + .thenReturn("testReleaseScore"); + when(agentExecution.executeAgentWithoutStore(any())) + .thenReturn(new DefaultCacheResult(new HashMap<>())); + doAnswer( + (invocation) -> { + latch.countDown(); + return null; + }) + .when(agentExecution) + .storeAgentResult(any(), any()); + + clusteredSortAgentScheduler.saturatePool(); + latch.await(10, TimeUnit.SECONDS); + + assertThat(runningAgents) + .isPresent() + .hasValueSatisfying(s -> assertThat(s.availablePermits()).isEqualTo(parallelism)); + } +} diff --git a/cats/cats-sql/cats-sql.gradle b/cats/cats-sql/cats-sql.gradle new file mode 100644 index 00000000000..4966140211e --- /dev/null +++ b/cats/cats-sql/cats-sql.gradle @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +apply from: "$rootDir/gradle/kotlin.gradle" +apply from: "$rootDir/gradle/kotlin-test.gradle" +apply plugin: "groovy" + +tasks.compileGroovy.enabled = false + +dependencies { + implementation project(":cats:cats-core") + implementation project(":cats:cats-redis") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + implementation project(":clouddriver-sql") + + implementation "com.fasterxml.jackson.core:jackson-databind" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-sql" + implementation "de.huxhorn.sulky:de.huxhorn.sulky.ulid" + implementation "io.github.resilience4j:resilience4j-retry" + implementation "io.strikt:strikt-core" + implementation "io.vavr:vavr:0.10.0" + implementation "org.assertj:assertj-core" + implementation "org.jetbrains.kotlinx:kotlinx-coroutines-core-common:1.1.1" + implementation "org.jetbrains.kotlinx:kotlinx-coroutines-core:1.1.1" + implementation "org.jetbrains.kotlinx:kotlinx-coroutines-slf4j:1.1.1" + implementation("org.jooq:jooq") + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "com.google.guava:guava" + + testImplementation project(":cats:cats-test") + + testImplementation "cglib:cglib-nodep" + testImplementation "io.spinnaker.kork:kork-sql-test" + testImplementation "com.nhaarman:mockito-kotlin" + testImplementation "junit:junit" + testImplementation "org.hamcrest:hamcrest-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "org.testcontainers:mysql" + testImplementation "org.testcontainers:postgresql" + testImplementation "com.mysql:mysql-connector-j" + testImplementation "org.postgresql:postgresql" +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlProviderCache.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlProviderCache.kt new file mode 100644 index 00000000000..0af76150fca --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlProviderCache.kt @@ -0,0 +1,344 @@ +package com.netflix.spinnaker.cats.sql + +import com.netflix.spinnaker.cats.agent.CacheResult +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.CacheFilter +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.cats.sql.cache.SqlCache +import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.* +import org.slf4j.LoggerFactory +import org.slf4j.MDC +import kotlin.contracts.ExperimentalContracts + +@ExperimentalContracts +class SqlProviderCache(private val backingStore: WriteableCache) : ProviderCache { + + private val log = LoggerFactory.getLogger(javaClass) + + companion object { + private const val ALL_ID = "_ALL_" // this implementation ignores this entirely + } + + init { + if (backingStore !is SqlCache) { + throw IllegalStateException("SqlProviderCache must be wired with a SqlCache backingStore") + } + } + + /** + * Filters the supplied list of identifiers to only those that exist in the cache. + * + * @param type the type of the item + * @param identifiers the identifiers for the items + * @return the list of identifiers that are present in the cache from the provided identifiers + */ + override fun existingIdentifiers(type: String, identifiers: MutableCollection): MutableCollection { + return backingStore.existingIdentifiers(type, identifiers) + } + + /** + * Returns the identifiers for the specified type that match the provided glob. + * + * @param type The type for which to retrieve identifiers + * @param glob The glob to match against the identifiers + * @return the identifiers for the type that match the glob + */ + override fun filterIdentifiers(type: String?, glob: String?): MutableCollection { + return backingStore.filterIdentifiers(type, glob) + } + + /** + * Retrieves all the items for the specified type + * + * @param type the type for which to retrieve items + * @return all the items for the type + */ + override fun getAll(type: String): MutableCollection { + return getAll(type, null as CacheFilter?) + } + + override fun getAll(type: String, identifiers: MutableCollection?): MutableCollection { + return getAll(type, identifiers, null) + } + + override fun getAll(type: String, cacheFilter: CacheFilter?): MutableCollection { + validateTypes(type) + return backingStore.getAll(type, cacheFilter) + } + + override fun getAll( + type: String, + identifiers: MutableCollection?, + cacheFilter: CacheFilter? + ): MutableCollection { + validateTypes(type) + return backingStore.getAll(type, identifiers, cacheFilter) + } + + override fun supportsGetAllByApplication(): Boolean { + return true + } + + override fun getAllByApplication(type: String, application: String): Map> { + return getAllByApplication(type, application, null) + } + + override fun getAllByApplication( + type: String, + application: String, + cacheFilter: CacheFilter? + ): Map> { + validateTypes(type) + return backingStore.getAllByApplication(type, application, cacheFilter) + } + + override fun getAllByApplication( + types: Collection, + application: String, + filters: Map + ): Map> { + validateTypes(types) + return backingStore.getAllByApplication(types, application, filters) + } + + /** + * Retrieves the items for the specified type matching the provided identifiers + * + * @param type the type for which to retrieve items + * @param identifiers the identifiers + * @return the items matching the type and identifiers + */ + override fun getAll(type: String, vararg identifiers: String): MutableCollection { + return getAll(type, identifiers.toMutableList()) + } + + /** + * Gets a single item from the cache by type and id + * + * @param type the type of the item + * @param id the id of the item + * @return the item matching the type and id + */ + override fun get(type: String, id: String?): CacheData? { + return get(type, id, null) + } + + override fun get(type: String, id: String?, cacheFilter: CacheFilter?): CacheData? { + if (ALL_ID == id) { + log.warn("Unexpected request for $ALL_ID for type: $type, cacheFilter: $cacheFilter") + return null + } + validateTypes(type) + + return backingStore.get(type, id, cacheFilter) ?: return null + } + + override fun evictDeletedItems(type: String, ids: Collection) { + try { + MDC.put("agentClass", "evictDeletedItems") + + backingStore.evictAll(type, ids) + } finally { + MDC.remove("agentClass") + } + } + + /** + * Retrieves all the identifiers for a type + * + * @param type the type for which to retrieve identifiers + * @return the identifiers for the type + */ + override fun getIdentifiers(type: String): MutableCollection { + validateTypes(type) + return backingStore.getIdentifiers(type) + } + + override fun putCacheResult( + source: String, + authoritativeTypes: MutableCollection, + cacheResult: CacheResult + ) { + try { + MDC.put("agentClass", "$source putCacheResult") + + // This is a hack because some types are global and a single agent + // can't be authoritative for cleanup but can supply enough + // information to create the entity. For those types we need an out + // of band cleanup so they should be cached as authoritative but + // without cleanup + // + // TODO Consider adding a GLOBAL type for supported data types to + // allow caching agents to explicitly opt into this rather than + // encoding them in here.. + val globalTypes = getGlobalTypes(source, authoritativeTypes, cacheResult) + + cacheResult.cacheResults + .filter { + it.key.contains(ON_DEMAND.ns, ignoreCase = true) + } + .forEach { + authoritativeTypes.add(it.key) + } + + val cachedTypes = mutableSetOf() + // Update resource table from Authoritative sources only + when { + // OnDemand agents should only be treated as authoritative and don't use standard eviction logic + source.contains(ON_DEMAND.ns, ignoreCase = true) -> + cacheResult.cacheResults + // And OnDemand agents shouldn't update other resource type tables + .filter { + it.key.contains(ON_DEMAND.ns, ignoreCase = true) + } + .forEach { + cacheDataType(it.key, source, it.value, authoritative = true, cleanup = false) + } + authoritativeTypes.isNotEmpty() -> + cacheResult.cacheResults + .filter { + authoritativeTypes.contains(it.key) || globalTypes.contains(it.key) + } + .forEach { + cacheDataType(it.key, source, it.value, authoritative = true, cleanup = !globalTypes.contains(it.key)) + cachedTypes.add(it.key) + } + else -> // If there are no authoritative types in cacheResult, override all as authoritative without cleanup + cacheResult.cacheResults + .forEach { + cacheDataType(it.key, source, it.value, authoritative = true, cleanup = false) + cachedTypes.add(it.key) + } + } + + // Update relationships for non-authoritative types + if (!source.contains(ON_DEMAND.ns, ignoreCase = true)) { + cacheResult.cacheResults + .filter { + !cachedTypes.contains(it.key) + } + .forEach { + cacheDataType(it.key, source, it.value, authoritative = false) + } + } + + if (cacheResult.evictions.isNotEmpty()) { + cacheResult.evictions.forEach { + evictDeletedItems(it.key, it.value) + } + } + } finally { + MDC.remove("agentClass") + } + } + + override fun addCacheResult( + source: String, + authoritativeTypes: MutableCollection, + cacheResult: CacheResult + ) { + try { + MDC.put("agentClass", "$source putCacheResult") + + authoritativeTypes.addAll(getGlobalTypes(source, authoritativeTypes, cacheResult)); + + val cachedTypes = mutableSetOf() + + if (authoritativeTypes.isNotEmpty()) { + cacheResult.cacheResults + .filter { + authoritativeTypes.contains(it.key) + } + .forEach { + cacheDataType(it.key, source, it.value, authoritative = true, cleanup = false) + cachedTypes.add(it.key) + } + } + + cacheResult.cacheResults + .filter { !cachedTypes.contains(it.key) } + .forEach { + cacheDataType(it.key, source, it.value, authoritative = false, cleanup = false) + } + } finally { + MDC.remove("agentClass") + } + } + + override fun putCacheData(type: String, cacheData: CacheData) { + try { + MDC.put("agentClass", "putCacheData") + backingStore.merge(type, cacheData) + } finally { + MDC.remove("agentClass") + } + } + + fun cleanOnDemand(maxAgeMs: Long): Int { + return (backingStore as SqlCache).cleanOnDemand(maxAgeMs) + } + + private fun validateTypes(type: String) { + validateTypes(listOf(type)) + } + + private fun validateTypes(types: Collection) { + val invalid = types + .asSequence() + .filter { it.contains(":") } + .toSet() + + if (invalid.isNotEmpty()) { + throw IllegalArgumentException("Invalid types: $invalid") + } + } + + private fun cacheDataType(type: String, agent: String, items: Collection, authoritative: Boolean) { + cacheDataType(type, agent, items, authoritative, cleanup = true) + } + + private fun cacheDataType( + type: String, + agent: String, + items: Collection, + authoritative: Boolean, + cleanup: Boolean + ) { + val toStore = ArrayList(items.size + 1) + items.forEach { + toStore.add(uniqueifyRelationships(it, agent)) + } + + // OnDemand agents are always updated incrementally and should not trigger auto-cleanup at the WriteableCache layer + val cleanupOverride = + if (agent.contains(ON_DEMAND.ns, ignoreCase = true) || type.contains(ON_DEMAND.ns, ignoreCase = true)) { + false + } else { + cleanup + } + + (backingStore as SqlCache).mergeAll(type, agent, toStore, authoritative, cleanupOverride) + } + + private fun uniqueifyRelationships(source: CacheData, sourceAgentType: String): CacheData { + val relationships = HashMap>(source.relationships.size) + for ((key, value) in source.relationships) { + relationships["$key:$sourceAgentType"] = value + } + return DefaultCacheData(source.id, source.ttlSeconds, source.attributes, relationships) + } + + private fun getGlobalTypes(source: String, authoritativeTypes: Collection, cacheResult: CacheResult): Set = when { + (source.contains("clustercaching", ignoreCase = true) || + source.contains("titusstreaming", ignoreCase = true)) && + !authoritativeTypes.contains(CLUSTERS.ns) && + cacheResult.cacheResults + .any { + it.key.startsWith(CLUSTERS.ns) + } -> setOf(CLUSTERS.ns, APPLICATIONS.ns) + + else -> emptySet() + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlProviderRegistry.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlProviderRegistry.kt new file mode 100644 index 00000000000..3396ebe1998 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlProviderRegistry.kt @@ -0,0 +1,41 @@ +package com.netflix.spinnaker.cats.sql + +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.NamedCacheFactory +import com.netflix.spinnaker.cats.provider.Provider +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.cats.provider.ProviderCacheConfiguration +import com.netflix.spinnaker.cats.provider.ProviderRegistry +import java.util.concurrent.ConcurrentHashMap +import kotlin.contracts.ExperimentalContracts + +@ExperimentalContracts +class SqlProviderRegistry( + private val providerList: Collection, + private val cacheFactory: NamedCacheFactory +) : ProviderRegistry { + private val providerCaches = ConcurrentHashMap() + + init { + providerList.forEach { + if (it is ProviderCacheConfiguration) { + providerCaches[it.providerName] = SqlProviderCache(cacheFactory.getCache(it.providerName, it)) + } else { + providerCaches[it.providerName] = SqlProviderCache(cacheFactory.getCache(it.providerName)) + } + } + } + + override fun getProviderCache(providerName: String?): ProviderCache? { + return providerCaches[providerName] + } + + override fun getProviderCaches(): Collection { + // TODO unwind CompositeCache - there is only one sql cache + return listOf(providerCaches.values.first()) + } + + override fun getProviders(): Collection { + return providerList + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlUtil.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlUtil.kt new file mode 100644 index 00000000000..413d1a57fa4 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/SqlUtil.kt @@ -0,0 +1,54 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.sql + +import org.jooq.DSLContext +import org.jooq.Field +import org.jooq.SQLDialect +import org.jooq.impl.DSL +import java.sql.ResultSet + +object SqlUtil { + + fun createTableLike(jooq: DSLContext, baseName: String, template: String) { + when (jooq.dialect()) { + SQLDialect.POSTGRES -> + jooq.execute("CREATE TABLE IF NOT EXISTS $baseName (LIKE $template INCLUDING ALL)") + else -> + jooq.execute( + "CREATE TABLE IF NOT EXISTS $baseName LIKE $template" + ) + } + } + + fun getTablesLike(jooq: DSLContext, baseName: String): ResultSet { + return when (jooq.dialect()) { + SQLDialect.POSTGRES -> + jooq.select(DSL.field("tablename")) + .from(DSL.table("pg_catalog.pg_tables")) + .where(DSL.field("tablename").like("$baseName%")) + .fetch() + .intoResultSet() + else -> + jooq.fetch("show tables like '$baseName%'").intoResultSet() + } + } + + fun excluded(values: Field): Field { + return DSL.field("excluded.{0}", values.dataType, values) + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SpectatorSqlCacheMetrics.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SpectatorSqlCacheMetrics.kt new file mode 100644 index 00000000000..10933caa5e0 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SpectatorSqlCacheMetrics.kt @@ -0,0 +1,84 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.cats.sql.cache + +import com.netflix.spectator.api.BasicTag +import com.netflix.spectator.api.Registry +import com.netflix.spectator.api.Tag + +class SpectatorSqlCacheMetrics( + private val registry: Registry +) : SqlCacheMetrics { + + override fun merge( + prefix: String, + type: String, + itemCount: Int, + itemsStored: Int, + relationshipCount: Int, + relationshipsStored: Int, + selectOperations: Int, + writeOperations: Int, + deleteOperations: Int, + duplicates: Int + ) { + val tags = tags(prefix, type) + registry.counter(id("cats.sqlCache.merge", "itemCount", tags)).increment(itemCount.toLong()) + registry.counter(id("cats.sqlCache.merge", "itemsStored", tags)).increment(itemsStored.toLong()) + registry.counter(id("cats.sqlCache.merge", "relationshipCount", tags)).increment(relationshipCount.toLong()) + registry.counter(id("cats.sqlCache.merge", "relationshipsStored", tags)).increment(relationshipsStored.toLong()) + registry.counter(id("cats.sqlCache.merge", "selectOperations", tags)).increment(selectOperations.toLong()) + registry.counter(id("cats.sqlCache.merge", "writeOperations", tags)).increment(writeOperations.toLong()) + registry.counter(id("cats.sqlCache.merge", "deleteOperations", tags)).increment(deleteOperations.toLong()) + registry.counter(id("cats.sqlCache.merge", "duplicates", tags)).increment(duplicates.toLong()) + } + + override fun evict( + prefix: String, + type: String, + itemCount: Int, + itemsDeleted: Int, + deleteOperations: Int + ) { + val tags = tags(prefix, type) + registry.counter(id("cats.sqlCache.evict", "itemCount", tags)).increment(itemCount.toLong()) + registry.counter(id("cats.sqlCache.evict", "itemsDeleted", tags)).increment(itemsDeleted.toLong()) + registry.counter(id("cats.sqlCache.evict", "deleteOperations", tags)).increment(deleteOperations.toLong()) + super.evict(prefix, type, itemCount, itemsDeleted, deleteOperations) + } + + override fun get( + prefix: String, + type: String, + itemCount: Int, + requestedSize: Int, + relationshipsRequested: Int, + selectOperations: Int, + async: Boolean + ) { + val tags = tags(prefix, type, async) + registry.counter(id("cats.sqlCache.get", "itemCount", tags)).increment(itemCount.toLong()) + registry.counter(id("cats.sqlCache.get", "requestedSize", tags)).increment(requestedSize.toLong()) + registry.counter(id("cats.sqlCache.get", "relationshipsRequested", tags)).increment(relationshipsRequested.toLong()) + registry.counter(id("cats.sqlCache.get", "selectOperations", tags)).increment(selectOperations.toLong()) + } + + private fun id(metricGroup: String, metric: String, tags: Iterable) = + registry.createId("$metricGroup.$metric", tags) + + private fun tags(prefix: String, type: String, async: Boolean = false) = + listOf(BasicTag("prefix", prefix), BasicTag("type", type), BasicTag("async", async.toString())) +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCache.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCache.kt new file mode 100644 index 00000000000..e2f448af5b9 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCache.kt @@ -0,0 +1,1629 @@ +package com.netflix.spinnaker.cats.sql.cache + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.CacheFilter +import com.netflix.spinnaker.cats.cache.DefaultJsonCacheData +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.provider.ProviderCacheConfiguration +import com.netflix.spinnaker.cats.sql.SqlUtil +import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND +import com.netflix.spinnaker.config.SqlConstraints +import com.netflix.spinnaker.config.coroutineThreadPrefix +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.config.SqlRetryProperties +import de.huxhorn.sulky.ulid.ULID +import io.github.resilience4j.retry.Retry +import io.github.resilience4j.retry.RetryConfig +import io.vavr.control.Try +import java.security.MessageDigest +import java.sql.ResultSet +import java.sql.SQLException +import java.sql.SQLSyntaxErrorException +import java.time.Clock +import java.time.Duration +import java.util.Arrays +import java.util.concurrent.ConcurrentSkipListSet +import java.util.concurrent.atomic.AtomicInteger +import javax.annotation.PreDestroy +import kotlin.contracts.ExperimentalContracts +import kotlin.contracts.contract +import kotlin.coroutines.CoroutineContext +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Job +import kotlinx.coroutines.async +import kotlinx.coroutines.awaitAll +import kotlinx.coroutines.runBlocking +import org.jooq.Condition +import org.jooq.DSLContext +import org.jooq.SQLDialect +import org.jooq.exception.DataAccessException +import org.jooq.exception.SQLDialectNotSupportedException +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.noCondition +import org.jooq.impl.DSL.sql +import org.jooq.impl.DSL.table +import org.jooq.util.mysql.MySQLDSL +import org.slf4j.LoggerFactory +import org.springframework.jdbc.BadSqlGrammarException + +@ExperimentalContracts +class SqlCache( + private val name: String, + private val jooq: DSLContext, + private val mapper: ObjectMapper, + private val coroutineContext: CoroutineContext?, + private val clock: Clock, + private val sqlRetryProperties: SqlRetryProperties, + tableNamespace: String?, + private val cacheMetrics: SqlCacheMetrics, + private val dynamicConfigService: DynamicConfigService, + private val sqlConstraints: SqlConstraints, + private val providerCacheConfiguration: ProviderCacheConfiguration +) : WriteableCache { + + companion object { + private const val onDemandType = "onDemand" + + private val schemaVersion = SqlSchemaVersion.current() + private val useRegexp = + """.*[\?\[].*""".toRegex() + private val cleanRegexp = + """\.+\*""".toRegex() + + private val log = LoggerFactory.getLogger(SqlCache::class.java) + } + + private val sqlNames = SqlNames(tableNamespace, sqlConstraints) + + private var createdTables = ConcurrentSkipListSet() + + private val hexStrings: List + + init { + log.info("Configured for $name") + + // Pre-generating string representations for all possible byte values + // to avoid using `String#format` in frequently invoked methods like `getHash`. + // Invoking `String#format` in these hot methods creates excessive garbage. + this.hexStrings = arrayListOf() + for (byte in Byte.MIN_VALUE..Byte.MAX_VALUE) { + val str = "%02x".format(byte.toByte()) + this.hexStrings.add(str) + } + } + + /** + * Only evicts cache records but not relationship rows + */ + override fun evictAll(type: String, ids: Collection) { + if (ids.isEmpty()) { + return + } + + log.info("evicting ${ids.size} $type records") + + var deletedCount = 0 + var opCount = 0 + try { + ids.chunked(dynamicConfigService.getConfig(Int::class.java, "sql.cache.read-batch-size", 500)) { chunk -> + withRetry(RetryCategory.WRITE) { + jooq.deleteFrom(table(sqlNames.resourceTableName(type))) + .where(field("id").`in`(*chunk.toTypedArray())) + .execute() + } + deletedCount += chunk.size + opCount += 1 + } + } catch (e: Exception) { + log.error("error evicting records", e) + } + + cacheMetrics.evict( + prefix = name, + type = type, + itemCount = ids.size, + itemsDeleted = deletedCount, + deleteOperations = opCount + ) + } + + fun mergeAll( + type: String, + agentHint: String?, + items: MutableCollection?, + authoritative: Boolean, + cleanup: Boolean + ) { + if (type.isEmpty()) { + return + } + + createTables(type) + + if (!providerCacheConfiguration.supportsFullEviction()) { + if (items.isNullOrEmpty() || items.none { it.id != "_ALL_" }) { + return + } + } + + if (items.isNullOrEmpty()) { + log.warn("No cacheable items supplied, collection will be cleared (type: {}, agent: {})", type, agentHint) + } + + var agent: String? = agentHint + + val first: String? = items + ?.firstOrNull { it.relationships.isNotEmpty() } + ?.relationships + ?.keys + ?.firstOrNull() + + if (first != null && agent == null) { + agent = first.substringAfter(":", first) + } + + if (agent == null) { + log.debug("warning: null agent for type $type") + } + + val storeResult = if (authoritative) { + storeAuthoritative(type, agent, items ?: mutableListOf(), cleanup) + } else { + storeInformative(type, items ?: mutableListOf(), cleanup) + } + + cacheMetrics.merge( + prefix = name, + type = type, + itemCount = storeResult.itemCount.get(), + itemsStored = storeResult.itemsStored.get(), + relationshipCount = storeResult.relationshipCount.get(), + relationshipsStored = storeResult.relationshipsStored.get(), + selectOperations = storeResult.selectQueries.get(), + writeOperations = storeResult.writeQueries.get(), + deleteOperations = storeResult.deleteQueries.get(), + duplicates = storeResult.duplicates.get() + ) + } + + override fun mergeAll(type: String, items: MutableCollection?) { + mergeAll(type, null, items, true, true) + } + + /** + * Retrieves all the items for the specified type + * + * @param type the type for which to retrieve items + * @return all the items for the type + */ + override fun getAll(type: String): MutableCollection { + return getAll(type, null as CacheFilter?) + } + + override fun getAll(type: String, cacheFilter: CacheFilter?): MutableCollection { + val relationshipPrefixes = getRelationshipFilterPrefixes(cacheFilter) + + val result = if (relationshipPrefixes.isEmpty()) { + getDataWithoutRelationships(type) + } else { + getDataWithRelationships(type, relationshipPrefixes) + } + + if (result.selectQueries > -1) { + cacheMetrics.get( + prefix = name, + type = type, + itemCount = result.data.size, + requestedSize = result.data.size, + relationshipsRequested = result.relPointers.size, + selectOperations = result.selectQueries, + async = result.withAsync + ) + } + + return mergeDataAndRelationships(result.data, result.relPointers, relationshipPrefixes) + } + + /** + * Retrieves the items for the specified type matching the provided ids + * + * @param type the type for which to retrieve items + * @param ids the ids + * @return the items matching the type and ids + */ + override fun getAll(type: String, ids: MutableCollection?): MutableCollection { + return getAll(type, ids, null as CacheFilter?) + } + + override fun getAll( + type: String, + ids: MutableCollection?, + cacheFilter: CacheFilter? + ): MutableCollection { + if (ids.isNullOrEmpty()) { + cacheMetrics.get( + prefix = name, + type = type, + itemCount = 0, + requestedSize = 0, + relationshipsRequested = 0, + selectOperations = 0 + ) + return mutableListOf() + } + + val relationshipPrefixes = getRelationshipFilterPrefixes(cacheFilter) + + val result = if (relationshipPrefixes.isEmpty()) { + getDataWithoutRelationships(type, ids) + } else { + getDataWithRelationships(type, ids, relationshipPrefixes) + } + + if (result.selectQueries > -1) { + cacheMetrics.get( + prefix = name, + type = type, + itemCount = result.data.size, + requestedSize = ids.size, + relationshipsRequested = result.relPointers.size, + selectOperations = result.selectQueries, + async = result.withAsync + ) + } + + return mergeDataAndRelationships(result.data, result.relPointers, relationshipPrefixes) + } + + /** + * Retrieves the items for the specified type matching the provided identifiers + * + * @param type the type for which to retrieve items + * @param identifiers the identifiers + * @return the items matching the type and identifiers + */ + override fun getAll(type: String, vararg identifiers: String?): MutableCollection { + val ids = mutableListOf() + identifiers.forEach { ids.add(it!!) } + return getAll(type, ids) + } + + override fun supportsGetAllByApplication(): Boolean { + return true + } + + override fun getAllByApplication( + type: String, + application: String, + cacheFilter: CacheFilter? + ): Map> { + val relationshipPrefixes = getRelationshipFilterPrefixes(cacheFilter) + + val result = if (relationshipPrefixes.isEmpty()) { + getDataWithoutRelationshipsByApp(type, application) + } else { + getDataWithRelationshipsByApp(type, application, relationshipPrefixes) + } + + if (result.selectQueries > -1) { + cacheMetrics.get( + prefix = name, + type = type, + itemCount = result.data.size, + requestedSize = result.data.size, + relationshipsRequested = result.relPointers.size, + selectOperations = result.selectQueries, + async = wasAsync() + ) + } + + return mapOf(type to mergeDataAndRelationships(result.data, result.relPointers, relationshipPrefixes)) + } + + override fun getAllByApplication( + types: Collection, + application: String, + cacheFilters: Map + ): Map> { + val result = mutableMapOf>() + + if (coroutineContext.useAsync(this::asyncEnabled)) { + val scope = CatsCoroutineScope(coroutineContext) + + types.chunked(dynamicConfigService.getConfig(Int::class.java, "sql.cache.max-query-concurrency", 4)) { batch -> + val deferred = batch.map { type -> + scope.async { getAllByApplication(type, application, cacheFilters[type]) } + } + + runBlocking { + deferred.awaitAll().forEach { result.putAll(it) } + } + } + } else { + types.forEach { type -> + result.putAll(getAllByApplication(type, application, cacheFilters[type])) + } + } + + return result + } + + override fun merge(type: String, cacheData: CacheData) { + mergeAll(type, null, mutableListOf(cacheData), true, false) + } + + /** + * Retrieves all the identifiers for a type + * + * @param type the type for which to retrieve identifiers + * @return the identifiers for the type + */ + override fun getIdentifiers(type: String): MutableCollection { + val ids = try { + withRetry(RetryCategory.READ) { + jooq.select(field("id")) + .from(table(sqlNames.resourceTableName(type))) + .fetch() + .intoSet(field("id"), String::class.java) + } + } catch (e: BadSqlGrammarException) { + suppressedLog("Failed getting ids for type $type", e) + return mutableListOf() + } + + cacheMetrics.get( + prefix = name, + type = type, + itemCount = ids.size, + requestedSize = ids.size, + relationshipsRequested = 0, + selectOperations = 1 + ) + + return ids + } + + /** + * Filters the supplied list of identifiers to only those that exist in the cache. + * + * @param type the type of the item + * @param identifiers the identifiers for the items + * @return the list of identifiers that are present in the cache from the provided identifiers + */ + override fun existingIdentifiers(type: String, identifiers: MutableCollection): MutableCollection { + var selects = 0 + var withAsync = false + val existing = mutableListOf() + val batchSize = dynamicConfigService.getConfig(Int::class.java, "sql.cache.read-batch-size", 500) + + if (coroutineContext.useAsync(identifiers.size, this::useAsync)) { + withAsync = true + val scope = CatsCoroutineScope(coroutineContext) + + identifiers.chunked(batchSize).chunked( + dynamicConfigService.getConfig(Int::class.java, "sql.cache.max-query-concurrency", 4) + ) { batch -> + val deferred = batch.map { ids -> + scope.async { + selectIdentifiers(type, ids) + } + } + runBlocking { + existing.addAll(deferred.awaitAll().flatten()) + } + selects += deferred.size + } + } else { + identifiers.chunked(batchSize) { chunk -> + existing.addAll(selectIdentifiers(type, chunk)) + selects += 1 + } + } + + cacheMetrics.get( + prefix = name, + type = type, + itemCount = 0, + requestedSize = 0, + relationshipsRequested = 0, + selectOperations = selects, + async = withAsync + ) + + return existing + } + + /** + * Returns the identifiers for the specified type that match the provided glob. + * + * @param type The type for which to retrieve identifiers + * @param glob The glob to match against the identifiers + * @return the identifiers for the type that match the glob + */ + override fun filterIdentifiers(type: String, glob: String?): MutableCollection { + if (glob == null) { + return mutableSetOf() + } + + val sql = if (glob.matches(useRegexp)) { + val filter = glob.replace("?", ".", true).replace("*", ".*").replace(cleanRegexp, ".*") + jooq + .select(field("id")) + .from(table(sqlNames.resourceTableName(type))) + .where(field("id").likeRegex("^$filter$")) + } else { + jooq + .select(field("id")) + .from(table(sqlNames.resourceTableName(type))) + // The underscore is treated as a single character wildcard in currently supported sql backends (mysql/psql) + // leading to inconsistencies in current usages of `filterIdentifiers()`. + // + // If single character wildcard is desired, use '?' rather than '_'. + .where(field("id").like(glob.replace('*', '%').replace("_", """\_"""))) + } + + val ids = try { + withRetry(RetryCategory.READ) { + sql + .fetch(field("id"), String::class.java) + } + } catch (e: Exception) { + suppressedLog("Failed searching for identifiers type: $type glob: $glob reason: ${e.message}", e) + mutableSetOf() + } + + cacheMetrics.get( + prefix = name, + type = type, + itemCount = ids.size, + requestedSize = ids.size, + relationshipsRequested = 0, + selectOperations = 1 + ) + + return ids + } + + /** + * Gets a single item from the cache by type and id + * + * @param type the type of the item + * @param id the id of the item + * @return the item matching the type and id + */ + override fun get(type: String, id: String?): CacheData? { + return get(type, id, null) + } + + override fun get(type: String, id: String?, cacheFilter: CacheFilter?): CacheData? { + val result = getAll(type, Arrays.asList(id), cacheFilter) + return if (result.isEmpty()) { + null + } else result.iterator().next() + } + + override fun evict(type: String, id: String) { + evictAll(type, listOf(id)) + } + + fun cleanOnDemand(maxAgeMs: Long): Int { + val toClean = withRetry(RetryCategory.READ) { + jooq.select(field("id")) + .from(table(sqlNames.resourceTableName(onDemandType))) + .where(field("last_updated").lt(clock.millis() - maxAgeMs)) + .fetch() + .into(String::class.java) + } + + evictAll(onDemandType, toClean) + + return toClean.size + } + + private fun storeAuthoritative( + type: String, + agentHint: String?, + items: MutableCollection, + cleanup: Boolean + ): StoreResult { + val result = StoreResult() + result.itemCount.addAndGet(items.size) + + val agent = if (type == ON_DEMAND.ns) { + // onDemand keys aren't initially written by the agents that update and expire them. since agent is + // part of the primary key, we need to ensure a consistent value across initial and subsequent writes + ON_DEMAND.ns + } else { + agentHint ?: "unknown" + } + + val existingHashIds = getHashIds(type, agent) + result.selectQueries.incrementAndGet() + + val existingHashes = existingHashIds // ids previously store by the calling caching agent + .asSequence() + .map { it.body_hash } + .toSet() + val existingIds = existingHashIds + .asSequence() + .map { it.id } + .toSet() + val currentIds = mutableSetOf() // current ids from the caching agent + val toStore = mutableListOf() // ids that are new or changed + val bodies = mutableMapOf() // id to body + val hashes = mutableMapOf() // id to sha256(body) + val apps = mutableMapOf() + + items.filter { it.id.length > sqlConstraints.maxIdLength } + .forEach { + log.error("Dropping ${it.id} - character length exceeds MAX_ID_LENGTH ($sqlConstraints.maxIdLength)") + } + + items + .filter { it.id != "_ALL_" && it.id.length <= sqlConstraints.maxIdLength } + .forEach { + if (!currentIds.add(it.id)) { + log.warn("agent: '${agent}': type: '$type': only one item with id '${it.id}' allowed") + result.duplicates.incrementAndGet() + // Skip the rest of this iteration + return@forEach + } + val nullKeys = it.attributes + .filter { e -> e.value == null } + .keys + nullKeys.forEach { na -> it.attributes.remove(na) } + + if (it.attributes.containsKey("application")) { + apps[it.id] = it.attributes["application"] as String + } + + val keysToNormalize = it.relationships.keys.filter { k -> k.contains(':') } + if (keysToNormalize.isNotEmpty()) { + val normalized = normalizeRelationships(it.relationships, emptyList()) + keysToNormalize.forEach { k -> it.relationships.remove(k) } + it.relationships.putAll(normalized) + } + + val body: String? = mapper.writeValueAsString(it) + val bodyHash = getHash(body) + + if (body != null && bodyHash != null && !existingHashes.contains(bodyHash)) { + toStore.add(it.id) + bodies[it.id] = body + hashes[it.id] = bodyHash + } + } + + val now = clock.millis() + + toStore.chunked(dynamicConfigService.getConfig(Int::class.java, "sql.cache.write-batch-size", 100)) { chunk -> + try { + val insert = jooq.insertInto( + table(sqlNames.resourceTableName(type)), + field("id"), + field("agent"), + field("application"), + field("body_hash"), + field("body"), + field("last_updated") + ) + + insert.apply { + chunk.forEach { + values(it, sqlNames.checkAgentName(agent), apps[it], hashes[it], bodies[it], now) + when (jooq.dialect()) { + SQLDialect.POSTGRES -> + onConflict(field("id"), field("agent")) + .doUpdate() + .set(field("application"), SqlUtil.excluded(field("application")) as Any) + .set(field("body_hash"), SqlUtil.excluded(field("body_hash")) as Any) + .set(field("body"), SqlUtil.excluded(field("body")) as Any) + .set(field("last_updated"), SqlUtil.excluded(field("last_updated")) as Any) + else -> + onDuplicateKeyUpdate() + .set(field("application"), MySQLDSL.values(field("application")) as Any) + .set(field("body_hash"), MySQLDSL.values(field("body_hash")) as Any) + .set(field("body"), MySQLDSL.values(field("body")) as Any) + .set(field("last_updated"), MySQLDSL.values(field("last_updated")) as Any) + } + } + } + + withRetry(RetryCategory.WRITE) { + insert.execute() + } + result.itemsStored.addAndGet(chunk.size) + result.writeQueries.incrementAndGet() + } catch (e: DataAccessException) { + log.error("Error inserting ids: $chunk", e) + } catch (e: SQLDialectNotSupportedException) { + chunk.forEach { + val exists = withRetry(RetryCategory.READ) { + jooq.fetchExists( + jooq.select() + .from(sqlNames.resourceTableName(type)) + .where(field("id").eq(it), field("agent").eq(sqlNames.checkAgentName(agent))) + .forUpdate() + ) + } + result.selectQueries.incrementAndGet() + if (exists) { + withRetry(RetryCategory.WRITE) { + jooq.update(table(sqlNames.resourceTableName(type))) + .set(field("application"), apps[it]) + .set(field("body_hash"), hashes[it]) + .set(field("body"), bodies[it]) + .set(field("last_updated"), clock.millis()) + .where(field("id").eq(it), field("agent").eq(sqlNames.checkAgentName(agent))) + .execute() + } + result.writeQueries.incrementAndGet() + result.itemsStored.incrementAndGet() + } else { + withRetry(RetryCategory.WRITE) { + jooq.insertInto( + table(sqlNames.resourceTableName(type)), + field("id"), + field("agent"), + field("application"), + field("body_hash"), + field("body"), + field("last_updated") + ).values( + it, + sqlNames.checkAgentName(agent), + apps[it], + hashes[it], + bodies[it], + clock.millis() + ).execute() + } + result.writeQueries.incrementAndGet() + result.itemsStored.incrementAndGet() + } + } + } + } + + if (!cleanup) { + return result + } + + val toDelete = existingIds + .asSequence() + .filter { !currentIds.contains(it) } + .toSet() + + evictAll(type, toDelete) + + return result + } + + private fun storeInformative(type: String, items: MutableCollection, cleanup: Boolean): StoreResult { + val result = StoreResult() + + val sourceAgents = items.filter { it.relationships.isNotEmpty() } + .map { it.relationships.keys } + .flatten() + .toSet() + + if (sourceAgents.isEmpty()) { + log.info("no relationships found for type $type") + return result + } + + val existingFwdRelIds = sourceAgents + .map { + result.selectQueries.incrementAndGet() + getRelationshipKeys(type, it) + } + .flatten() + + val existingRevRelTypes = mutableSetOf() + items + .filter { it.id != "_ALL_" } + .forEach { cacheData -> + cacheData.relationships.entries.forEach { rels -> + val relType = rels.key.substringBefore(delimiter = ":", missingDelimiterValue = "") + existingRevRelTypes.add(relType) + } + } + + existingRevRelTypes.filter { !createdTables.contains(it) } + .forEach { createTables(it) } + + val existingRevRelIds = existingRevRelTypes + .map { relType -> + sourceAgents + .map { agent -> + result.selectQueries.incrementAndGet() + getRelationshipKeys(relType, type, agent) + } + .flatten() + } + .flatten() + + val oldFwdIds: Map = existingFwdRelIds + .asSequence() + .map { it.key() to it.uuid } + .toMap() + + val oldRevIds = mutableMapOf() + val oldRevIdsToType = mutableMapOf() + + existingRevRelIds + .forEach { + oldRevIds[it.key()] = it.uuid + oldRevIdsToType[it.key()] = it.rel_agent.substringBefore(delimiter = ":", missingDelimiterValue = "") + } + + val currentIds = mutableSetOf() + val newFwdRelPointers = mutableMapOf>() + val newRevRelIds = mutableSetOf() + + items + .filter { it.id != "_ALL_" && it.id.length <= sqlConstraints.maxIdLength } + .forEach { cacheData -> + cacheData.relationships.entries.forEach { rels -> + val relType = rels.key.substringBefore(delimiter = ":", missingDelimiterValue = "") + rels.value.filter { it.length <= sqlConstraints.maxIdLength } + .forEach { r -> + val fwdKey = "${cacheData.id}|$r" + val revKey = "$r|${cacheData.id}" + currentIds.add(fwdKey) + currentIds.add(revKey) + + result.relationshipCount.incrementAndGet() + + if (!oldFwdIds.contains(fwdKey)) { + newFwdRelPointers.getOrPut(relType) { mutableListOf() } + .add(RelPointer(cacheData.id, r, rels.key)) + } + + if (!oldRevIds.containsKey(revKey)) { + newRevRelIds.add(revKey) + } + } + } + } + + newFwdRelPointers.forEach { (relType, pointers) -> + val now = clock.millis() + var ulid = ULID().nextValue() + + pointers.chunked(dynamicConfigService.getConfig(Int::class.java, "sql.cache.write-batch-size", 100)) { chunk -> + try { + val insert = jooq.insertInto( + table(sqlNames.relTableName(type)), + field("uuid"), + field("id"), + field("rel_id"), + field("rel_agent"), + field("rel_type"), + field("last_updated") + ) + + insert.apply { + chunk.forEach { + values(ulid.toString(), it.id, it.rel_id, sqlNames.checkAgentName(it.rel_type), relType, now) + ulid = ULID().nextMonotonicValue(ulid) + } + } + + withRetry(RetryCategory.WRITE) { + insert.execute() + } + result.writeQueries.incrementAndGet() + result.relationshipsStored.addAndGet(chunk.size) + } catch (e: Exception) { + log.error("Error inserting forward relationships for $type -> $relType", e) + } + } + + pointers.asSequence().filter { newRevRelIds.contains("${it.rel_id}|${it.id}") } + .chunked(dynamicConfigService.getConfig(Int::class.java, "sql.cache.write-batch-size", 100)) { chunk -> + try { + val insert = jooq.insertInto( + table(sqlNames.relTableName(relType)), + field("uuid"), + field("id"), + field("rel_id"), + field("rel_agent"), + field("rel_type"), + field("last_updated") + ) + + insert.apply { + chunk.forEach { + values(ulid.toString(), it.rel_id, it.id, sqlNames.checkAgentName(it.rel_type), type, now) + ulid = ULID().nextMonotonicValue(ulid) + } + } + + withRetry(RetryCategory.WRITE) { + insert.execute() + } + result.writeQueries.incrementAndGet() + result.relationshipsStored.addAndGet(chunk.size) + } catch (e: Exception) { + log.error("Error inserting reverse relationships for $relType -> $type", e) + } + }.toList() + } + + if (!cleanup) { + return result + } + + val fwdToDelete = oldFwdIds.filter { !currentIds.contains(it.key) } + val revToDelete = oldRevIds.filter { !currentIds.contains(it.key) } + + if (fwdToDelete.isNotEmpty() || revToDelete.isNotEmpty()) { + try { + fwdToDelete.forEach { + withRetry(RetryCategory.WRITE) { + jooq.deleteFrom(table(sqlNames.relTableName(type))) + .where(field("uuid").eq(it.value)) + .execute() + } + result.deleteQueries.incrementAndGet() + } + revToDelete.forEach { + if (oldRevIdsToType.getOrDefault(it.key, "").isNotBlank()) { + withRetry(RetryCategory.WRITE) { + jooq.deleteFrom(table(sqlNames.relTableName(oldRevIdsToType[it.key]!!))) + .where(field("uuid").eq(it.value)) + .execute() + } + result.deleteQueries.incrementAndGet() + } else { + log.warn("Couldn't delete ${it.key}, no mapping to type") + } + } + } catch (e: Exception) { + log.error("Error deleting stale relationships", e) + } + } + + return result + } + + private fun createTables(type: String) { + if (!createdTables.contains(type)) { + try { + withRetry(RetryCategory.WRITE) { + SqlUtil.createTableLike(jooq, sqlNames.resourceTableName(type), "cats_v${schemaVersion}_resource_template") + SqlUtil.createTableLike(jooq, sqlNames.relTableName(type), "cats_v${schemaVersion}_rel_template") + } + + createdTables.add(type) + } catch (e: Exception) { + log.error("Error creating tables for type $type", e) + } + } + if (!createdTables.contains(onDemandType)) { + // TODO not sure if best schema for onDemand + try { + withRetry(RetryCategory.WRITE) { + SqlUtil.createTableLike(jooq, sqlNames.resourceTableName(onDemandType), "cats_v${schemaVersion}_resource_template") + SqlUtil.createTableLike(jooq, sqlNames.relTableName(onDemandType), "cats_v${schemaVersion}_rel_template") + } + + createdTables.add(onDemandType) + } catch (e: Exception) { + log.error("Error creating $onDemandType table", e) + } + } + } + + private fun getRelationshipFilterPrefixes(cacheFilter: CacheFilter?): List { + return if (cacheFilter == null) { + listOf("ALL") + } else { + try { + (cacheFilter as RelationshipCacheFilter).allowableRelationshipPrefixes + } catch (e: Exception) { + log.warn("Failed reading cacheFilter allowableRelationshipPrefixes", e) + emptyList() + } + } + } + + private fun getHash(body: String?): String? { + if (body.isNullOrBlank()) { + return null + } + return try { + val digest = MessageDigest.getInstance("SHA-256") + .digest(body.toByteArray()) + // The hash length is known, so a `StringBuilder` with a predefined capacity is used + // to prevent unnecessary array allocations inside the StringBuilder. + val builder = StringBuilder(64) + for (byte in digest) { + // Uses pre-generated string representations for each byte to optimize performance. + builder.append(this.hexStrings[byte - Byte.MIN_VALUE]) + } + builder.toString() + } catch (e: Exception) { + log.error("error calculating hash for body: $body", e) + null + } + } + + private fun getHashIds(type: String, agent: String?): List { + return withRetry(RetryCategory.READ) { + jooq + .select(field("body_hash"), field("id")) + .from(table(sqlNames.resourceTableName(type))) + .where( + field("agent").eq(sqlNames.checkAgentName(agent)) + ) + .fetch() + .into(HashId::class.java) + } + } + + private fun getRelationshipKeys(type: String, sourceAgent: String): MutableList { + return withRetry(RetryCategory.READ) { + jooq + .select(field("uuid"), field("id"), field("rel_id"), field("rel_agent")) + .from(table(sqlNames.relTableName(type))) + .where(field("rel_agent").eq(sqlNames.checkAgentName(sourceAgent))) + .fetch() + .into(RelId::class.java) + } + } + + private fun getRelationshipKeys(type: String, origType: String, sourceAgent: String): MutableList { + return withRetry(RetryCategory.READ) { + jooq + .select(field("uuid"), field("id"), field("rel_id"), field("rel_agent")) + .from(table(sqlNames.relTableName(type))) + .where( + field("rel_agent").eq(sqlNames.checkAgentName(sourceAgent)), + field("rel_type").eq(origType) + ) + .fetch() + .into(RelId::class.java) + } + } + + private fun getDataWithoutRelationships(type: String): DataWithRelationshipPointersResult { + return getDataWithoutRelationships(type, emptyList()) + } + + private fun getDataWithoutRelationships( + type: String, + ids: Collection + ): DataWithRelationshipPointersResult { + val cacheData = mutableListOf() + val relPointers = mutableSetOf() + val batchSize = dynamicConfigService.getConfig(Int::class.java, "sql.cache.read-batch-size", 500) + var selectQueries = 0 + var withAsync = false + + try { + if (ids.isEmpty()) { + withRetry(RetryCategory.READ) { + cacheData.addAll( + jooq.select(field("body")) + .from(table(sqlNames.resourceTableName(type))) + .fetch() + .getValues(0) + .asSequence() + .map { mapper.readValue(it as String, DefaultJsonCacheData::class.java) } + .toList() + ) + } + selectQueries += 1 + } else { + if (coroutineContext.useAsync(ids.size, this::useAsync)) { + withAsync = true + val scope = CatsCoroutineScope(coroutineContext) + + ids.chunked(batchSize).chunked( + dynamicConfigService.getConfig(Int::class.java, "sql.cache.max-query-concurrency", 4) + ) { batch -> + val deferred = batch.map { ids -> + scope.async { selectBodies(type, ids) } + } + runBlocking { + cacheData.addAll(deferred.awaitAll().flatten()) + } + selectQueries += deferred.size + } + } else { + ids.chunked(batchSize) { chunk -> + cacheData.addAll(selectBodies(type, chunk)) + selectQueries += 1 + } + } + } + + return DataWithRelationshipPointersResult(cacheData, relPointers, selectQueries, withAsync) + } catch (e: Exception) { + suppressedLog("Failed selecting ids for type $type", e) + + cacheMetrics.get( + prefix = name, + type = type, + itemCount = 0, + requestedSize = -1, + relationshipsRequested = -1, + selectOperations = selectQueries, + async = withAsync + ) + + selectQueries = -1 + + return DataWithRelationshipPointersResult(mutableListOf(), mutableSetOf(), selectQueries, withAsync) + } + } + + private fun getDataWithoutRelationshipsByApp(type: String, application: String): DataWithRelationshipPointersResult { + val cacheData = mutableListOf() + val relPointers = mutableSetOf() + var selectQueries = 0 + + try { + withRetry(RetryCategory.READ) { + cacheData.addAll( + jooq.select(field("body")) + .from(table(sqlNames.resourceTableName(type))) + .where(field("application").eq(application)) + .fetch() + .getValues(0) + .asSequence() + .map { mapper.readValue(it as String, DefaultJsonCacheData::class.java) } + .toList() + ) + } + selectQueries += 1 + return DataWithRelationshipPointersResult(cacheData, relPointers, selectQueries, false) + } catch (e: Exception) { + suppressedLog("Failed selecting resources of type $type for application $application", e) + + cacheMetrics.get( + prefix = name, + type = type, + itemCount = 0, + requestedSize = -1, + relationshipsRequested = -1, + selectOperations = selectQueries, + async = wasAsync() + ) + + selectQueries = -1 + + return DataWithRelationshipPointersResult(mutableListOf(), mutableSetOf(), selectQueries, false) + } + } + + private fun getDataWithRelationshipsByApp( + type: String, + application: String, + relationshipPrefixes: List + ): DataWithRelationshipPointersResult { + + /* + select body, null as id, null as rel_id, null as rel_type from cats_v1_b_instances + where application = 'titusagent' + UNION ALL + select null as body, rel.id, rel.rel_id, rel.rel_type from cats_v1_b_instances as r + left join cats_v1_b_instances_rel as rel on rel.id=r.id where r.application = "titusagent" + group by rel.rel_id, rel.id, rel.rel_type; + */ + val cacheData = mutableListOf() + val relPointers = mutableSetOf() + var selectQueries = 0 + + val relWhere = getRelWhere(relationshipPrefixes, field("r.application").eq(application)) + + try { + val resultSet = withRetry(RetryCategory.READ) { + jooq + .select( + field("body").`as`("body"), + field(sql("null")).`as`("id"), + field(sql("null")).`as`("rel_id"), + field(sql("null")).`as`("rel_type") + ) + .from(table(sqlNames.resourceTableName(type))) + .where(field("application").eq(application)) + .unionAll( + jooq.select( + field(sql("null")).`as`("body"), + field("rel.id").`as`("id"), + field("rel.rel_id").`as`("rel_id"), + field("rel.rel_type").`as`("rel_type") + ) + .from(table(sqlNames.resourceTableName(type)).`as`("r")) + .innerJoin(table(sqlNames.relTableName(type)).`as`("rel")) + .on(sql("rel.id=r.id")) + .where(relWhere) + .groupBy( + field("rel_id"), + field("id"), + field("rel_type") + ) + ) + .fetch() + .intoResultSet() + } + parseCacheRelResultSet(type, resultSet, cacheData, relPointers) + selectQueries += 1 + return DataWithRelationshipPointersResult(cacheData, relPointers, selectQueries, false) + } catch (e: Exception) { + suppressedLog("Failed selecting resources of type $type for application $application", e) + + cacheMetrics.get( + prefix = name, + type = type, + itemCount = 0, + requestedSize = -1, + relationshipsRequested = -1, + selectOperations = selectQueries, + async = wasAsync() + ) + + selectQueries = -1 + + return DataWithRelationshipPointersResult(mutableListOf(), mutableSetOf(), selectQueries, false) + } + } + + private fun getDataWithRelationships( + type: String, + relationshipPrefixes: List + ): + DataWithRelationshipPointersResult { + return getDataWithRelationships(type, emptyList(), relationshipPrefixes) + } + + private fun getDataWithRelationships( + type: String, + ids: Collection, + relationshipPrefixes: List + ): DataWithRelationshipPointersResult { + val cacheData = mutableListOf() + val relPointers = mutableSetOf() + var selectQueries = 0 + var withAsync = false + val batchSize = dynamicConfigService.getConfig(Int::class.java, "sql.cache.read-batch-size", 500) + + /* + Approximating the following query pattern in jooq: + + (select body, null as r_id, null as rel, null as rel_type from `cats_v1_a_applications` where id IN + ('aws:applications:spintest', 'aws:applications:spindemo')) UNION + (select null as body, id as r_id, rel_id as rel, rel_type from `cats_v1_a_applications_rel` where id IN + ('aws:applications:spintest', 'aws:applications:spindemo') and rel_type like 'load%'); + */ + + try { + if (ids.isEmpty()) { + + val relWhere = getRelWhere(relationshipPrefixes) + + val resultSet = withRetry(RetryCategory.READ) { + jooq + .select( + field("body").`as`("body"), + field(sql("null")).`as`("id"), + field(sql("null")).`as`("rel_id"), + field(sql("null")).`as`("rel_type") + ) + .from(table(sqlNames.resourceTableName(type))) + .unionAll( + jooq.select( + field(sql("null")).`as`("body"), + field("id").`as`("id"), + field("rel_id").`as`("rel_id"), + field("rel_type").`as`("rel_type") + ) + .from(table(sqlNames.relTableName(type))) + .where(relWhere) + ) + .fetch() + .intoResultSet() + } + + parseCacheRelResultSet(type, resultSet, cacheData, relPointers) + selectQueries += 1 + } else { + if (coroutineContext.useAsync(ids.size, this::useAsync)) { + withAsync = true + + ids.chunked(batchSize).chunked( + dynamicConfigService.getConfig(Int::class.java, "sql.cache.max-query-concurrency", 4) + ) { batch -> + val scope = CatsCoroutineScope(coroutineContext) + + val deferred = batch.map { chunk -> + scope.async { + selectBodiesWithRelationships(type, relationshipPrefixes, chunk) + } + } + + runBlocking { + deferred.awaitAll() + }.forEach { resultSet -> + parseCacheRelResultSet(type, resultSet, cacheData, relPointers) + selectQueries += 1 + } + } + } else { + ids.chunked(batchSize) { chunk -> + val resultSet = selectBodiesWithRelationships(type, relationshipPrefixes, chunk) + + parseCacheRelResultSet(type, resultSet, cacheData, relPointers) + selectQueries += 1 + } + } + } + return DataWithRelationshipPointersResult(cacheData, relPointers, selectQueries, withAsync) + } catch (e: Exception) { + suppressedLog("Failed selecting ids for type $type", e) + + cacheMetrics.get( + prefix = name, + type = type, + itemCount = 0, + requestedSize = -1, + relationshipsRequested = -1, + selectOperations = selectQueries, + async = withAsync + ) + + selectQueries = -1 + + return DataWithRelationshipPointersResult(mutableListOf(), mutableSetOf(), selectQueries, withAsync) + } + } + + private fun selectBodies(type: String, ids: List): Collection { + return withRetry(RetryCategory.READ) { + jooq.select(field("body")) + .from(table(sqlNames.resourceTableName(type))) + .where(field("ID").`in`(*ids.toTypedArray())) + .fetch() + .getValues(0) + .map { mapper.readValue(it as String, DefaultJsonCacheData::class.java) } + .toList() + } + } + + private fun selectBodiesWithRelationships( + type: String, + relationshipPrefixes: List, + ids: List + ): ResultSet { + val where = field("ID").`in`(*ids.toTypedArray()) + + val relWhere = getRelWhere(relationshipPrefixes, where) + + return withRetry(RetryCategory.READ) { + jooq + .select( + field("body").`as`("body"), + field(sql("null")).`as`("id"), + field(sql("null")).`as`("rel_id"), + field(sql("null")).`as`("rel_type") + ) + .from(table(sqlNames.resourceTableName(type))) + .where(where) + .unionAll( + jooq.select( + field(sql("null")).`as`("body"), + field("id").`as`("id"), + field("rel_id").`as`("rel_id"), + field("rel_type").`as`("rel_type") + ) + .from(table(sqlNames.relTableName(type))) + .where(relWhere) + ) + .fetch() + .intoResultSet() + } + } + + private fun selectIdentifiers(type: String, ids: List): MutableCollection { + return withRetry(RetryCategory.READ) { + jooq.select(field("id")) + .from(table(sqlNames.resourceTableName(type))) + .where(field("id").`in`(*ids.toTypedArray())) + .fetch() + .intoSet(field("id"), String::class.java) + } + } + + private fun parseCacheRelResultSet( + type: String, + resultSet: ResultSet, + cacheData: MutableList, + relPointers: MutableSet + ) { + while (resultSet.next()) { + if (!resultSet.getString(1).isNullOrBlank()) { + try { + cacheData.add(mapper.readValue(resultSet.getString(1), DefaultJsonCacheData::class.java)) + } catch (e: Exception) { + log.error("Failed to deserialize cached value: type $type, body ${resultSet.getString(1)}", e) + } + } else { + try { + relPointers.add(RelPointer(resultSet.getString(2), resultSet.getString(3), resultSet.getString(4))) + } catch (e: SQLException) { + log.error("Error reading relationship of type $type", e) + } + } + } + } + + private fun mergeDataAndRelationships( + cacheData: Collection, + relationshipPointers: Collection, + relationshipPrefixes: List + ): MutableCollection { + val data = mutableMapOf() + val relKeysToRemove = mutableMapOf>() + val filter = relationshipPrefixes.any { it != "ALL" } || relationshipPrefixes.isEmpty() + + // First merge any duplicate ids in cacheData + cacheData.forEach { item -> + if (!data.containsKey(item.id)) { + data[item.id] = item + // TODO a CacheSpec unit test verifies that an empty cache filter returns no relationshps, + // however I think we should leave relationships stored in a key's body and only use filter + // to prevent fetching more. TODO: update the test? + if (relationshipPrefixes.isNotEmpty()) { + if (item.relationships.any { it.key.contains(':') }) { + data[item.id]!!.relationships.putAll(normalizeRelationships(item.relationships, relationshipPrefixes)) + } + } else { + relKeysToRemove.getOrPut(item.id) { mutableSetOf() } + .addAll(data[item.id]!!.relationships.keys) + } + } else { + // TODO get rid of need for !!s + val rel = data[item.id]!!.relationships + val alt = data[item.id]!!.attributes + + if (relationshipPrefixes.isNotEmpty()) { + normalizeRelationships(item.relationships, relationshipPrefixes).forEach { + if (rel.contains(it.key)) { + it.value.forEach { rv -> + if (!rel[it.key]!!.contains(rv)) { + rel[it.key]!!.add(rv) + } + } + } else { + rel[it.key] = it.value + } + } + } else { + relKeysToRemove.getOrPut(item.id) { mutableSetOf() } + .addAll(rel.keys) + } + + item.attributes.forEach { + if (!alt.contains(it.key)) { + alt[it.key] = it.value + } + } + } + } + + // Then merge in additional relationships + if (relationshipPrefixes.isNotEmpty()) { + relationshipPointers + .filter { data.containsKey(it.id) } + .forEach { r -> + val existingRels = data[r.id]!!.relationships + .getOrPut(r.rel_type) { mutableListOf() } + + if (!existingRels.contains(r.rel_id)) { + existingRels.add(r.rel_id) + } + } + } + + // TODO this would be unnecessary if we only apply cacheFilters when fetching + // additional relationships + if (relKeysToRemove.isEmpty() && filter) { + data.values.forEach { cd -> + cd.relationships.keys.forEach { r -> + if (relationshipPrefixes.none { r.startsWith(it) }) { + relKeysToRemove.getOrPut(cd.id) { mutableSetOf() }.add(r) + } + } + } + } + + // TODO same as above + if (relKeysToRemove.isNotEmpty()) { + relKeysToRemove.forEach { k, v -> + v.forEach { + data[k]?.relationships?.remove(it) + } + } + } + + return data.values + } + + private fun normalizeRelationships( + rels: Map>, + filterPrefixes: List + ): Map> { + val filter = filterPrefixes.any { it != "ALL" } + val relationships = mutableMapOf>() + rels.entries.forEach { + val type = it.key.substringBefore(":", missingDelimiterValue = it.key) + if (!filter || filterPrefixes.any { type.startsWith(it) }) { + relationships.getOrPut(type) { mutableListOf() }.addAll(it.value) + } + } + + return relationships + } + + private fun getRelWhere(relationshipPrefixes: List, prefix: Condition? = null): Condition { + var relWhere: Condition = noCondition() + + if (relationshipPrefixes.isNotEmpty() && !relationshipPrefixes.contains("ALL")) { + relWhere = field("rel_type").like("${relationshipPrefixes[0]}%") + + for (i in 1 until relationshipPrefixes.size) { + relWhere = relWhere.or(field("rel_type").like("${relationshipPrefixes[i]}%")) + } + } + + if (prefix != null) { + return prefix.and(relWhere) + } + + return relWhere + } + + private enum class RetryCategory { + WRITE, READ + } + + private fun withRetry(category: RetryCategory, action: () -> T): T { + return if (category == RetryCategory.WRITE) { + val retry = Retry.of( + "sqlWrite", + RetryConfig.custom() + .maxAttempts(sqlRetryProperties.transactions.maxRetries) + .waitDuration(Duration.ofMillis(sqlRetryProperties.transactions.backoffMs)) + .ignoreExceptions(SQLDialectNotSupportedException::class.java) + .build() + ) + + Try.ofSupplier(Retry.decorateSupplier(retry, action)).get() + } else { + val retry = Retry.of( + "sqlRead", + RetryConfig.custom() + .maxAttempts(sqlRetryProperties.reads.maxRetries) + .waitDuration(Duration.ofMillis(sqlRetryProperties.reads.backoffMs)) + .ignoreExceptions(SQLDialectNotSupportedException::class.java) + .build() + ) + + Try.ofSupplier(Retry.decorateSupplier(retry, action)).get() + } + } + + @ExperimentalContracts + private fun useAsync(items: Int): Boolean { + return dynamicConfigService.getConfig(Int::class.java, "sql.cache.max-query-concurrency", 4) > 1 && + items > dynamicConfigService.getConfig(Int::class.java, "sql.cache.read-batch-size", 500) * 2 + } + + @ExperimentalContracts + private fun asyncEnabled(): Boolean { + return dynamicConfigService.getConfig(Int::class.java, "sql.cache.max-query-concurrency", 4) > 1 + } + + /** + * Provides best-effort suppression of "table doesn't exist" exceptions which come up in large volume during initial + * setup of a new SQL database. This isn't really an error we care to report, as the tables are created on-demand by + * the cache writer, but may be read against by readers before the caching agents have been run. + */ + private fun suppressTableNotExistsException(e: Exception): Exception? { + if (e is BadSqlGrammarException && e.sqlException is SQLSyntaxErrorException) { + // Best effort suppression of "table doesn't exist" exceptions. + return if ( + e.sqlException.sqlState.toLowerCase() == "42s02" || + e.sqlException.message?.matches(Regex("Table.*doesn't exist")) == true + ) null else e + } + return e + } + + private fun suppressedLog(message: String, e: Exception) { + if (suppressTableNotExistsException(e) != null) { + log.error(message, e) + } + } + + private fun wasAsync(): Boolean { + return Thread.currentThread().name.startsWith(coroutineThreadPrefix) + } + + // Assists with unit testing + fun clearCreatedTables() { + val tables = createdTables.toList() + createdTables.removeAll(tables) + } + + data class HashId( + val body_hash: String, + val id: String + ) + + data class RelId( + val uuid: String, + val id: String, + val rel_id: String, + val rel_agent: String + ) { + fun key(): String { + return "$id|$rel_id" + } + } + + data class RelPointer( + val id: String, + val rel_id: String, + val rel_type: String + ) + + private data class DataWithRelationshipPointersResult( + val data: MutableList, + val relPointers: MutableSet, + val selectQueries: Int, + val withAsync: Boolean = false + ) + + private inner class StoreResult { + val itemCount = AtomicInteger(0) + val itemsStored = AtomicInteger(0) + val relationshipCount = AtomicInteger(0) + val relationshipsStored = AtomicInteger(0) + val selectQueries = AtomicInteger(0) + val writeQueries = AtomicInteger(0) + val deleteQueries = AtomicInteger(0) + val duplicates = AtomicInteger(0) + } +} + +@ExperimentalContracts +fun CoroutineContext?.useAsync(size: Int, useAsync: (size: Int) -> Boolean): Boolean { + contract { + returns(true) implies (this@useAsync is CoroutineContext) + } + + return this != null && useAsync.invoke(size) +} + +@ExperimentalContracts +fun CoroutineContext?.useAsync(useAsync: () -> Boolean): Boolean { + contract { + returns(true) implies (this@useAsync is CoroutineContext) + } + + return this != null && useAsync.invoke() +} + +class CatsCoroutineScope(context: CoroutineContext) : CoroutineScope { + override val coroutineContext = context + private val jobs = Job() + + @PreDestroy + fun killChildJobs() = jobs.cancel() +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCacheMetrics.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCacheMetrics.kt new file mode 100644 index 00000000000..13f4d565351 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCacheMetrics.kt @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.cats.sql.cache + +interface SqlCacheMetrics { + fun merge( + prefix: String, + type: String, + itemCount: Int, + itemsStored: Int, + relationshipCount: Int, + relationshipsStored: Int, + selectOperations: Int, + writeOperations: Int, + deleteOperations: Int, + duplicates: Int + ) {} + + fun evict( + prefix: String, + type: String, + itemCount: Int, + itemsDeleted: Int, + deleteOperations: Int + ) {} + + fun get( + prefix: String, + type: String, + itemCount: Int, + requestedSize: Int, + relationshipsRequested: Int, + selectOperations: Int, + async: Boolean = false + ) {} +} + +class NoopCacheMetrics : SqlCacheMetrics diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCleanupStaleOnDemandCachesAgent.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCleanupStaleOnDemandCachesAgent.kt new file mode 100644 index 00000000000..1a0b7f7ea28 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlCleanupStaleOnDemandCachesAgent.kt @@ -0,0 +1,54 @@ +package com.netflix.spinnaker.cats.sql.cache + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.cats.agent.RunnableAgent +import com.netflix.spinnaker.cats.module.CatsModule +import com.netflix.spinnaker.cats.sql.SqlProviderCache +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider +import com.netflix.spinnaker.clouddriver.sql.SqlAgent +import java.time.Clock +import java.util.concurrent.TimeUnit +import kotlin.contracts.ExperimentalContracts +import org.slf4j.LoggerFactory +import org.springframework.context.ApplicationContext + +@ExperimentalContracts +class SqlCleanupStaleOnDemandCachesAgent( + private val applicationContext: ApplicationContext, + private val registry: Registry, + private val clock: Clock +) : RunnableAgent, CustomScheduledAgent, SqlAgent { + + companion object { + private val DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(20) + private val DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(3) + private val MAX_ONDEMAND_AGE_MILLIS = TimeUnit.MINUTES.toMillis(30) + + private val log = LoggerFactory.getLogger(SqlCleanupStaleOnDemandCachesAgent::class.java) + } + + private val countId = registry.createId("cats.sqlCache.cleanedStaleOnDemandKeys.count") + private val timeId = registry.createId("cats.sqlCache.cleanedStaleOnDemandKeys.time") + + override fun run() { + val start = clock.millis() + + val deleted = getCache().cleanOnDemand(MAX_ONDEMAND_AGE_MILLIS) + + registry.gauge(countId).set(deleted.toDouble()) + registry.gauge(timeId).set((clock.millis() - start).toDouble()) + } + + private fun getCache(): SqlProviderCache { + return applicationContext.getBean(CatsModule::class.java) + .providerRegistry + .providerCaches + .first() as SqlProviderCache + } + + override fun getAgentType(): String = javaClass.simpleName + override fun getProviderName(): String = CoreProvider.PROVIDER_NAME + override fun getPollIntervalMillis(): Long = DEFAULT_POLL_INTERVAL_MILLIS + override fun getTimeoutMillis(): Long = DEFAULT_TIMEOUT_MILLIS +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNamedCacheFactory.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNamedCacheFactory.kt new file mode 100644 index 00000000000..186b34ea489 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNamedCacheFactory.kt @@ -0,0 +1,50 @@ +package com.netflix.spinnaker.cats.sql.cache + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.NamedCacheFactory +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.provider.ProviderCacheConfiguration +import com.netflix.spinnaker.config.SqlConstraints +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.config.SqlRetryProperties +import java.time.Clock +import kotlin.contracts.ExperimentalContracts +import kotlin.coroutines.CoroutineContext +import org.jooq.DSLContext + +class SqlNamedCacheFactory( + private val jooq: DSLContext, + private val mapper: ObjectMapper, + private val dispatcher: CoroutineContext?, + private val clock: Clock, + private val sqlRetryProperties: SqlRetryProperties, + private val prefix: String?, + private val cacheMetrics: SqlCacheMetrics, + private val dynamicConfigService: DynamicConfigService, + private val sqlConstraints: SqlConstraints +) : NamedCacheFactory { + + @ExperimentalContracts + override fun getCache(name: String): WriteableCache { + return getCache(name, DefaultProviderCacheConfiguration()) + } + + @ExperimentalContracts + override fun getCache(name: String, providerCacheConfiguration: ProviderCacheConfiguration): WriteableCache { + return SqlCache( + name, + jooq, + mapper, + dispatcher, + clock, + sqlRetryProperties, + prefix, + cacheMetrics, + dynamicConfigService, + sqlConstraints, + providerCacheConfiguration + ) + } + + class DefaultProviderCacheConfiguration : ProviderCacheConfiguration +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNames.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNames.kt new file mode 100644 index 00000000000..bc755df9990 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNames.kt @@ -0,0 +1,111 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.cats.sql.cache + +import com.google.common.hash.Hashing +import com.netflix.spinnaker.config.SqlConstraints +import com.netflix.spinnaker.kork.annotations.VisibleForTesting + +/** + * Provides utility methods for clouddriver's SQL naming conventions. + */ +class SqlNames( + private val tableNamespace: String? = null, + private val sqlConstraints: SqlConstraints +) { + + /** + * Get the resource table name for a given agent type. + */ + fun resourceTableName(type: String): String = + checkTableName("cats_v${schemaVersion}_", sanitizeType(type), "") + + /** + * Get the relationship table name for a given agent type. + */ + fun relTableName(type: String): String = + checkTableName("cats_v${schemaVersion}_", sanitizeType(type), "_rel") + + private fun sanitizeType(type: String): String { + return type.replace(typeSanitization, "_") + } + + /** + * Computes the actual name of the table less than MAX_TABLE_NAME_LENGTH characters long. + * It always keeps prefix with tableNamespace but can shorten name and suffix in that order. + * @return computed table name + */ + @VisibleForTesting + internal fun checkTableName(prefix: String, name: String, suffix: String): String { + var base = prefix + if (tableNamespace != null) { + base = "${prefix + tableNamespace}_" + } + + // Optimistic and most frequent case + val tableName = base + name + suffix + if (tableName.length < sqlConstraints.maxTableNameLength) { + return tableName + } + + // Hash the name and keep the suffix + val hash = Hashing.murmur3_128().hashBytes((name + suffix).toByteArray()).toString().substring(0..15) + val available = sqlConstraints.maxTableNameLength - base.length - suffix.length - hash.length - 1 + if (available >= 0) { + return base + name.substring(0..available) + hash + suffix + } + + // Remove suffix + if (available + suffix.length >= 0) { + return base + name.substring(0..(available + suffix.length)) + hash + } + throw IllegalArgumentException("property sql.table-namespace $tableNamespace is too long") + } + + /** + * Truncates the agent string if too long to fit in @SqlConstraints.maxAgentLength + * @return agent string to store + */ + @VisibleForTesting + internal fun checkAgentName(agent: String?): String? { + if (agent == null) { + return null + } + if (agent.length <= sqlConstraints.maxAgentLength) { + return agent + } + + val hash = Hashing.murmur3_128().hashBytes((agent).toByteArray()).toString() + val colIdx = agent.indexOf(':') + + // We want to store at least : + if (colIdx > sqlConstraints.maxAgentLength - 2) { + throw IllegalArgumentException("Type ${agent.substring(0, colIdx)} is too long, record cannot be stored") + } + + // How much we can keep of the agent string, we need to preserve the colon + val available = Math.max(sqlConstraints.maxAgentLength - hash.length - 1, colIdx) + // How much of the hash will fit if we want to preserve the colon + val hashLength = Math.min(hash.length, sqlConstraints.maxAgentLength - colIdx - 1) + return agent.substring(0..available) + hash.substring(0, hashLength) + } + + companion object { + private val schemaVersion = SqlSchemaVersion.current() + private val typeSanitization = + """[^A-Za-z0-9_]""".toRegex() + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlSchemaVersion.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlSchemaVersion.kt new file mode 100644 index 00000000000..de6721e8a42 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlSchemaVersion.kt @@ -0,0 +1,9 @@ +package com.netflix.spinnaker.cats.sql.cache + +enum class SqlSchemaVersion(val version: Int) { + V1(1); + + companion object { + fun current(): Int = V1.version + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlTableMetricsAgent.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlTableMetricsAgent.kt new file mode 100644 index 00000000000..240ec42d967 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlTableMetricsAgent.kt @@ -0,0 +1,70 @@ +package com.netflix.spinnaker.cats.sql.cache + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.cats.agent.RunnableAgent +import com.netflix.spinnaker.cats.sql.SqlUtil +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider +import com.netflix.spinnaker.clouddriver.sql.SqlAgent +import java.time.Clock +import java.util.concurrent.TimeUnit +import org.jooq.DSLContext +import org.jooq.SQLDialect +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.table +import org.slf4j.LoggerFactory + +class SqlTableMetricsAgent( + private val jooq: DSLContext, + private val registry: Registry, + private val clock: Clock, + private val namespace: String? +) : RunnableAgent, CustomScheduledAgent, SqlAgent { + + companion object { + private val DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(1) + private val DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(2) + + private val log = LoggerFactory.getLogger(SqlTableMetricsAgent::class.java) + } + + private val countId = registry.createId("cats.sqlCache.tableMetricsAgent.count") + .withTag("namespace", namespace ?: "none") + + private val timingId = registry.createId("cats.sqlCache.tableMetricsAgent.timing") + .withTag("namespace", namespace ?: "none") + + override fun run() { + val start = clock.millis() + var tableCount = 0 + + val baseName = if (namespace == null) { + "cats_v${SqlSchemaVersion.current()}_" + } else { + "cats_v${SqlSchemaVersion.current()}_${namespace}_" + } + + val rs = SqlUtil.getTablesLike(jooq, baseName) + while (rs.next()) { + val tableName = rs.getString(1) + val type = tableName.replace(baseName, "") + + val count = jooq.selectCount() + .from(table(tableName)) + .fetchSingle() + .value1() + + registry.gauge(countId.withTag("type", type)).set(count.toDouble()) + tableCount++ + } + + val runTime = clock.millis() - start + registry.gauge(timingId).set(runTime.toDouble()) + log.info("Read counts for $tableCount tables in ${runTime}ms") + } + + override fun getAgentType(): String = javaClass.simpleName + override fun getProviderName(): String = CoreProvider.PROVIDER_NAME + override fun getPollIntervalMillis(): Long = DEFAULT_POLL_INTERVAL_MILLIS + override fun getTimeoutMillis(): Long = DEFAULT_TIMEOUT_MILLIS +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlUnknownAgentCleanupAgent.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlUnknownAgentCleanupAgent.kt new file mode 100644 index 00000000000..8efb9c14433 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlUnknownAgentCleanupAgent.kt @@ -0,0 +1,224 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.cats.sql.cache + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE +import com.netflix.spinnaker.cats.agent.CachingAgent +import com.netflix.spinnaker.cats.agent.RunnableAgent +import com.netflix.spinnaker.cats.provider.ProviderRegistry +import com.netflix.spinnaker.cats.sql.SqlUtil +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider +import com.netflix.spinnaker.clouddriver.sql.SqlAgent +import com.netflix.spinnaker.config.ConnectionPools +import com.netflix.spinnaker.kork.sql.routing.withPool +import java.sql.SQLException +import java.util.concurrent.TimeUnit +import org.jooq.DSLContext +import org.jooq.Field +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.table +import org.slf4j.LoggerFactory +import org.springframework.beans.factory.ObjectProvider + +/** + * Intermittently scans the entire database looking for records created by caching agents that + * are no longer configured. + */ +class SqlUnknownAgentCleanupAgent( + private val providerRegistry: ObjectProvider, + private val jooq: DSLContext, + private val registry: Registry, + private val sqlNames: SqlNames +) : RunnableAgent, CustomScheduledAgent, SqlAgent { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + private val deletedId = registry.createId("sql.cacheCleanupAgent.dataTypeRecordsDeleted") + private val timingId = registry.createId("sql.cacheCleanupAgent.dataTypeCleanupDuration") + + override fun run() { + log.info("Scanning for cache records to cleanup") + + val (agentTypes, agentDataTypes) = findAgentDataTypes() + val runState = RunState(agentTypes) + + val numDataTypes = agentDataTypes.size + log.info("Found {} cache data types generated from {} agent types", numDataTypes, agentTypes.size) + + var failures = 0 + withPool(ConnectionPools.CACHE_WRITER.value) { + agentDataTypes.forEachIndexed { i, dataType -> + log.info("Scanning '$dataType' (${i + 1}/$numDataTypes) cache records to cleanup") + try { + registry.timer(timingId.withTag("dataType", dataType)).record { + cleanTable(CacheTable.RELATIONSHIP, dataType, runState) + cleanTable(CacheTable.RESOURCE, dataType, runState) + } + } catch (e: SQLException) { + log.error("Failed to cleanup '$dataType'", e) + failures++ + } + } + } + + log.info("Finished cleanup ($failures failures)") + } + + /** + * If the table for [dataType] has not been touched yet, scan through each record it contains, + * deleting all records that do not correlate to a currently configured agent. + */ + private fun cleanTable(cacheTable: CacheTable, dataType: String, state: RunState) { + val tableName = cacheTable.getName(sqlNames, dataType) + + if (state.touchedTables.contains(tableName)) { + // Nothing to do here, we've already processed this table. + return + } + log.debug("Checking table '$tableName' for '$dataType' data cleanup") + + val tableExists = SqlUtil.getTablesLike(jooq, tableName) + + if (!tableExists.next()) { + log.debug("Table '$tableName' not found") + state.touchedTables.add(tableName) + return + } + + val rs = jooq.select(*cacheTable.fields) + .from(table(tableName)) + .fetch() + .intoResultSet() + + val cleanedAgentTypes = mutableSetOf() + val idsToClean = mutableListOf() + while (rs.next()) { + val agentType = processRelAgentTypeValue(rs.getString(2)) + if (!state.agentTypes.contains(agentType)) { + idsToClean.add(rs.getString(1)) + cleanedAgentTypes.add(agentType) + } + } + + if (idsToClean.isNotEmpty()) { + log.info( + "Found ${idsToClean.size} records to cleanup from '$tableName' for data type '$dataType'. " + + "Reason: Data generated by unknown caching agents ($cleanedAgentTypes})" + ) + idsToClean.chunked(100) { chunk -> + jooq.deleteFrom(table(tableName)) + .where(field(cacheTable.idColumn()).`in`(*chunk.toTypedArray())) + .execute() + } + } + + state.touchedTables.add(tableName) + + registry + .counter(deletedId.withTags("dataType", dataType, "table", cacheTable.name)) + .increment(idsToClean.size.toLong()) + } + + /** + * The "rel_agent" column value is a little wonky. It uses a format of `{dataType}:{agentName}`, but we only want the + * agent name, so we'll split on the colon value, removing the first element. + * + * TODO(rz): The Eureka health agents are particularly annoying, since they're just named after the HTTP endpoint + * they hit. This case is handled specifically, but we should just change the agent name to have better consistency + * with other agent names. + */ + private fun processRelAgentTypeValue(agentType: String): String = + agentType.split(":").let { + if (it.size == 1) { + agentType + } else { + // Gross little hack here for Eureka agents. + if (agentType.startsWith("http://") || agentType.startsWith("https://")) { + agentType + } else { + it.subList(1, it.size).joinToString(":") + } + } + } + + /** + * Returns a set of all known caching agent names and another set of all known authoritative + * data types from those caching agents. + * + * Agent names will be used to identify what records in the database are no longer attached + * to existing caching agents, whereas the data types themselves are needed to create the + * SQL table names, as the tables are derived from the data types, not the agents. + */ + private fun findAgentDataTypes(): Pair, Set> { + var result: Pair, Set> = Pair(setOf(), setOf()) + providerRegistry.ifAvailable { registry -> + val agents = registry.providers + .flatMap { it.agents } + .filterIsInstance() + + val dataTypes = agents + .flatMap { it.providedDataTypes } + .filter { it.authority == AUTHORITATIVE } + .map { it.typeName } + .toSet() + + result = Pair(agents.mapNotNull { sqlNames.checkAgentName(it.agentType) }.toSet(), dataTypes) + } + return result + } + + /** + * Contains per-run state of this cleanup agent. + */ + private data class RunState( + val agentTypes: Set, + val touchedTables: MutableList = mutableListOf() + ) + + /** + * Abstracts the logical differences--as far as this agent is concerned--between the two + * varieties of cache tables: The table names and the associated fields we need to read + * from the database. + */ + private enum class CacheTable(val fields: Array>) { + RESOURCE(arrayOf(field("id"), field("agent"))), + RELATIONSHIP(arrayOf(field("uuid"), field("rel_agent"))); + + fun idColumn(): String = + when (this) { + RESOURCE -> "id" + RELATIONSHIP -> "uuid" + } + + fun getName(sqlNames: SqlNames, dataType: String): String = + when (this) { + RESOURCE -> sqlNames.resourceTableName(dataType) + RELATIONSHIP -> sqlNames.relTableName(dataType) + } + } + + override fun getProviderName(): String = CoreProvider.PROVIDER_NAME + override fun getPollIntervalMillis(): Long = DEFAULT_POLL_INTERVAL + override fun getTimeoutMillis(): Long = DEFAULT_TIMEOUT + override fun getAgentType(): String = javaClass.simpleName + + companion object { + private val DEFAULT_POLL_INTERVAL = TimeUnit.MINUTES.toMillis(2) + private val DEFAULT_TIMEOUT = TimeUnit.MINUTES.toMillis(1) + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cluster/SqlCachingPodsObserver.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cluster/SqlCachingPodsObserver.kt new file mode 100644 index 00000000000..a6855941f85 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cluster/SqlCachingPodsObserver.kt @@ -0,0 +1,209 @@ +/* + * Copyright 2021 OpsMx + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.cats.sql.cluster + +import com.google.common.util.concurrent.ThreadFactoryBuilder +import com.netflix.spinnaker.cats.agent.Agent +import com.netflix.spinnaker.cats.cluster.NodeIdentity +import com.netflix.spinnaker.cats.cluster.ShardingFilter +import com.netflix.spinnaker.cats.sql.SqlUtil +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider +import com.netflix.spinnaker.config.ConnectionPools +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.routing.withPool +import org.jooq.DSLContext +import org.jooq.impl.DSL +import org.jooq.impl.DSL.table +import org.slf4j.LoggerFactory +import org.springframework.dao.DataIntegrityViolationException +import java.sql.SQLException +import java.util.concurrent.Executors +import java.util.concurrent.ScheduledExecutorService +import java.util.concurrent.TimeUnit +import kotlin.math.abs + +class SqlCachingPodsObserver ( + private val jooq: DSLContext, + private val nodeIdentity: NodeIdentity, + private val tableNamespace: String? = null, + private val dynamicConfigService : DynamicConfigService, + private val liveReplicasScheduler: ScheduledExecutorService = Executors.newSingleThreadScheduledExecutor( + ThreadFactoryBuilder().setNameFormat(SqlCachingPodsObserver::class.java.simpleName + "-%d").build() + ) +) : ShardingFilter, Runnable{ + private val log = LoggerFactory.getLogger(javaClass) + private var podCount: Int = 0 + private var podIndex: Int = -1 + private var ttlSeconds = dynamicConfigService.getConfig(Long::class.java, "cache-sharding.replica-ttl-seconds", 60) + + companion object { + private val POOL_NAME = ConnectionPools.CACHE_WRITER.value + const val LAST_HEARTBEAT_TIME = "last_heartbeat_time" + const val POD_ID = "pod_id" + } + private val replicasReferenceTable = "caching_replicas" + private val replicasTable = if (tableNamespace.isNullOrBlank()) { + replicasReferenceTable + } else { + "${replicasReferenceTable}_$tableNamespace" + } + + init { + if (!tableNamespace.isNullOrBlank()) { + withPool(POOL_NAME) { + SqlUtil.createTableLike(jooq, replicasTable, replicasReferenceTable) + } + } + refreshHeartbeat(TimeUnit.SECONDS.toMillis(ttlSeconds)) + val recheckInterval = + dynamicConfigService.getConfig(Long::class.java, "cache-sharding.heartbeat-interval-seconds", 30) + liveReplicasScheduler.scheduleAtFixedRate(this, 0, recheckInterval, TimeUnit.SECONDS) + log.info("Account based sharding across caching pods is enabled.") + } + + override fun run() { + try { + refreshHeartbeat(TimeUnit.SECONDS.toMillis(60)) + } catch (t: Throwable) { + log.error("Failed to manage replicas heartbeat", t) + } + + } + + private fun refreshHeartbeat(newTtl: Long){ + recordHeartbeat(newTtl) + deleteExpiredReplicas() + preFilter() + } + + private fun recordHeartbeat( newTtl: Long) { + try { + withPool(POOL_NAME) { + val currentPodRecord = jooq.select() + .from(table(replicasTable)) + .where( + DSL.field(POD_ID).eq(nodeIdentity.nodeIdentity)) + .fetch() + .intoResultSet() + // insert heartbeat + if (!currentPodRecord.next()) { + jooq.insertInto(table(replicasTable)) + .columns( + DSL.field(POD_ID), + DSL.field(LAST_HEARTBEAT_TIME) + ) + .values( + nodeIdentity.nodeIdentity, + System.currentTimeMillis() + newTtl + ) + .execute() + } else { + //update heartbeat + jooq.update(table(replicasTable)) + .set(DSL.field(LAST_HEARTBEAT_TIME), System.currentTimeMillis() + newTtl) + .where(DSL.field(POD_ID).eq(nodeIdentity.nodeIdentity)) + .execute() + } + + } + } catch (e: DataIntegrityViolationException) { + log.error("Unexpected DataIntegrityViolationException", e) + } catch (e: SQLException) { + log.error("Unexpected sql exception while trying to acquire agent lock", e) + } + } + + private fun deleteExpiredReplicas(){ + try { + withPool(POOL_NAME) { + val existingReplicas = jooq.select() + .from(table(replicasTable)) + .fetch() + .intoResultSet() + val now = System.currentTimeMillis() + while (existingReplicas.next()) { + val expiry = existingReplicas.getLong(LAST_HEARTBEAT_TIME) + val podId = existingReplicas.getString(POD_ID) + if (now > expiry) { + try { + jooq.deleteFrom(table(replicasTable)) + .where( + DSL.field(POD_ID).eq(podId) + .and(DSL.field(LAST_HEARTBEAT_TIME).eq(expiry)) + ) + .execute() + log.info("Deleted expired entry having id : {} and expiry millis : {}", podId, expiry) + } catch (e: SQLException) { + //this exception can be safely ignored as other pod might have succeeded + log.info( + "Unable to delete replica entry ${existingReplicas.getString(POD_ID)} with expiry " + + "$expiry, at the moment.", e ) + } + } + } + } + } catch (e: SQLException) { + log.error("Unexpected sql exception while trying to get replica records : ", e) + } + } + + private fun getAccountName(agentType: String): String{ + return if(agentType.contains("/")) agentType.substring(0,agentType.indexOf('/')) else agentType + } + + private fun preFilter(){ + var counter = 0 + var index = -1 + try { + withPool(POOL_NAME) { + val cachingPods = jooq.select() + .from(table(replicasTable)) + .orderBy(DSL.field(POD_ID)) + .fetch() + .intoResultSet() + + while (cachingPods.next()) { + if (cachingPods.getString(POD_ID).equals(nodeIdentity.nodeIdentity)) { + index = counter; + } + counter++ + } + } + }catch (e: SQLException){ + log.error( "Failed to fetch live pods count ${e.message}") + } + if(counter == 0 || index == -1){ + throw RuntimeException("No caching pod heartbeat records detected. Sharding logic can't be applied!!!!") + } + podCount = counter + podIndex = index + log.debug("Pod count : {} and current pod's index : {}", podCount, podIndex) + } + + override fun filter(agent: Agent) : Boolean{ + if(agent.providerName.equals(CoreProvider.PROVIDER_NAME)){ + return true + } + if (podCount == 1 || abs(getAccountName(agent.agentType).hashCode() % podCount) == podIndex) { + return true + } + return false + } + + +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cluster/SqlClusteredAgentScheduler.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cluster/SqlClusteredAgentScheduler.kt new file mode 100644 index 00000000000..b02e0cfb681 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/cluster/SqlClusteredAgentScheduler.kt @@ -0,0 +1,369 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.cats.sql.cluster + +import com.google.common.util.concurrent.ThreadFactoryBuilder +import com.netflix.spinnaker.cats.agent.Agent +import com.netflix.spinnaker.cats.agent.AgentExecution +import com.netflix.spinnaker.cats.agent.AgentLock +import com.netflix.spinnaker.cats.agent.AgentScheduler +import com.netflix.spinnaker.cats.agent.AgentSchedulerAware +import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation +import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation.elapsedTimeMs +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider +import com.netflix.spinnaker.cats.cluster.NodeIdentity +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider +import com.netflix.spinnaker.cats.cluster.ShardingFilter +import com.netflix.spinnaker.cats.module.CatsModuleAware +import com.netflix.spinnaker.cats.sql.SqlUtil +import com.netflix.spinnaker.config.ConnectionPools +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.routing.withPool +import java.sql.SQLException +import java.util.regex.Pattern +import java.util.regex.Pattern.CASE_INSENSITIVE +import org.jooq.DSLContext +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.table +import org.slf4j.LoggerFactory +import org.springframework.dao.DataIntegrityViolationException +import java.util.concurrent.* + +/** + * IMPORTANT: Using SQL for locking isn't a good idea. By enabling this scheduler, you'll be adding a fair amount of + * unnecessary load to your database. This implementation is offered for operational topology simplicity, but is not + * recommended for real workloads. Instead, use the Redis scheduler (`redis.scheduler.enabled=true` and + * `sql.scheduler.enabled=false`) or implement a scheduler based on ZooKeeper, etcd, consul, and so-on. + * + */ +class SqlClusteredAgentScheduler( + private val jooq: DSLContext, + private val nodeIdentity: NodeIdentity, + private val intervalProvider: AgentIntervalProvider, + private val nodeStatusProvider: NodeStatusProvider, + private val dynamicConfigService: DynamicConfigService, + enabledAgentPattern: String, + private val disabledAgentsConfig: List, + agentLockAcquisitionIntervalSeconds: Long? = null, + private val tableNamespace: String? = null, + private val agentExecutionPool: ExecutorService = Executors.newCachedThreadPool( + ThreadFactoryBuilder().setNameFormat(AgentExecutionAction::class.java.simpleName + "-%d").build() + ), + lockPollingScheduler: ScheduledExecutorService = Executors.newSingleThreadScheduledExecutor( + ThreadFactoryBuilder().setNameFormat(SqlClusteredAgentScheduler::class.java.simpleName + "-%d").build() + ), + private val shardingFilter: ShardingFilter +) : CatsModuleAware(), AgentScheduler, Runnable { + + private val log = LoggerFactory.getLogger(javaClass) + + private val agents: MutableMap = ConcurrentHashMap() + private val activeAgents: MutableMap = ConcurrentHashMap() + private val activeAgentsFutures: MutableMap> = ConcurrentHashMap() + private val enabledAgents: Pattern + + private val referenceTable = "cats_agent_locks" + private val lockTable = if (tableNamespace.isNullOrBlank()) { + referenceTable + } else { + "${referenceTable}_$tableNamespace" + } + + init { + if (!tableNamespace.isNullOrBlank()) { + withPool(POOL_NAME) { + SqlUtil.createTableLike(jooq, lockTable, referenceTable) + } + } + + val lockInterval = agentLockAcquisitionIntervalSeconds ?: 1L + lockPollingScheduler.scheduleAtFixedRate(this, 0, lockInterval, TimeUnit.SECONDS) + enabledAgents = Pattern.compile(enabledAgentPattern, CASE_INSENSITIVE) + } + + override fun schedule( + agent: Agent, + agentExecution: AgentExecution, + executionInstrumentation: ExecutionInstrumentation + ) { + if (agent is AgentSchedulerAware) { + agent.agentScheduler = this + } + agents[agent.agentType] = AgentExecutionAction(agent, agentExecution, executionInstrumentation) + } + + override fun unschedule(agent: Agent) { + releaseLock(agent.agentType, 0) // Release the lock immediately + agents.remove(agent.agentType) + } + + override fun run() { + if (nodeStatusProvider.isNodeEnabled) { + try { + runAgents() + } catch (t: Throwable) { + log.error("Failed running cache agents", t) + } + } + } + + private fun runAgents() { + val acquiredAgents = tryAcquire() + activeAgents.putAll(acquiredAgents) + acquiredAgents.forEach { agentType, nextAttempt -> + val exec = agents[agentType] + if (exec != null) { + activeAgentsFutures[agentType] = agentExecutionPool.submit(AgentJob(nextAttempt, exec, this::agentCompleted)) + } + } + } + + private fun tryAcquire(): Map { + return findCandidateAgentLocks() + .map { + val agentType = it.key + val agentExecution = it.value + val interval = intervalProvider.getInterval(agentExecution.agent) + + val currentTime = System.currentTimeMillis() + if (tryAcquireSingle(agentType, currentTime, interval.timeout)) { + Pair(agentType, NextAttempt(currentTime, interval.interval, interval.errorInterval)) + } else { + null + } + } + .filterNotNull() + .toMap() + } + + private fun findCandidateAgentLocks(): Map { + cleanupZombieAgents() + val skip = HashMap(activeAgents).entries + val maxConcurrentAgents = dynamicConfigService.getConfig(Int::class.java, "sql.agent.max-concurrent-agents", 100) + val availableAgents = maxConcurrentAgents - skip.size + if (availableAgents <= 0) { + log.debug( + "Not acquiring more locks (maxConcurrentAgents: {}, activeAgents: {}, runningAgents: {})", + maxConcurrentAgents, + skip.size, + skip.joinToString(",") + ) + return emptyMap() + } + + val disabledAgents = dynamicConfigService.getConfig( + String::class.java, + "sql.agent.disabled-agents", + disabledAgentsConfig.joinToString(",") + ).split(",").map { it.trim() } + + val candidateAgentLocks = agents + .filter { shardingFilter.filter(it.value.agent) } + .filter { !activeAgents.containsKey(it.key) } + .filter { enabledAgents.matcher(it.key).matches() } + .filterNot { disabledAgents.contains(it.key) } + .toMutableMap() + + log.debug("Agents running: {}, agents disabled: {}. Picking next agents to run from: {}", + activeAgents.keys, disabledAgents, candidateAgentLocks.keys) + + withPool(POOL_NAME) { + val existingLocks = jooq.select(field("agent_name"), field("lock_expiry")) + .from(table(lockTable)) + .fetch() + .intoResultSet() + + val now = System.currentTimeMillis() + while (existingLocks.next()) { + val lockExpiry = existingLocks.getLong("lock_expiry") + if (now > lockExpiry) { + try { + jooq.deleteFrom(table(lockTable)) + .where( + field("agent_name").eq(existingLocks.getString("agent_name")) + .and(field("lock_expiry").eq(lockExpiry)) + ) + .execute() + } catch (e: SQLException) { + log.error( + "Failed deleting agent lock ${existingLocks.getString("agent_name")} with expiry " + + lockExpiry, + e + ) + + candidateAgentLocks.remove(existingLocks.getString("agent_name")) + } + } else { + candidateAgentLocks.remove(existingLocks.getString("agent_name")) + } + } + } + + log.debug("Next agents to run: {}, max: {}", candidateAgentLocks.keys, availableAgents) + + val trimmedCandidates = mutableMapOf() + candidateAgentLocks.entries + .shuffled() + .forEach { + if (trimmedCandidates.size >= availableAgents) { + log.warn( + "Dropping caching agent: {}. Wanted to run {} agents, but a max of {} was configured and there are " + + "already {} currently running. Consider increasing sql.agent.max-concurrent-agents", + it.key, candidateAgentLocks.size, maxConcurrentAgents, skip) + return@forEach + } + trimmedCandidates[it.key] = it.value + } + + return trimmedCandidates + } + + private fun cleanupZombieAgents() { + val zombieAgentThreshold = dynamicConfigService.getConfig(Long::class.java, "sql.agent.zombie-threshold-ms", 3600000) + activeAgents + .filter { it.value.currentTime < System.currentTimeMillis() - zombieAgentThreshold } + .forEach { + log.warn("Found zombie agent {}, removing it", it.key) + activeAgents.remove(it.key, it.value) + // Cancel zombie futures interrupting their AgentExecutionAction threads + val f = activeAgentsFutures.remove(it.key) + if (f == null) { + log.warn("Agent execution without future for cancelling it: {}", it.key) + } else { + if (!f.cancel(true) && !f.isCancelled) { + log.error("Unable to cancel execution for agent {} after {}ms (Future.cancel returned false). This may leak resources!", it.key, zombieAgentThreshold) + } + } + } + } + + private fun tryAcquireSingle(agentType: String, now: Long, timeout: Long): Boolean { + try { + withPool(POOL_NAME) { + jooq.insertInto(table(lockTable)) + .columns( + field("agent_name"), + field("owner_id"), + field("lock_acquired"), + field("lock_expiry") + ) + .values( + agentType, + nodeIdentity.nodeIdentity, + now, + now + timeout + ) + .execute() + } + } catch (e: DataIntegrityViolationException) { + // Integrity constraint exceptions are ok: It means another clouddriver grabbed the lock before us. + return false + } catch (e: SQLException) { + log.error("Unexpected sql exception while trying to acquire agent lock", e) + return false + } + return true + } + + private fun releaseLock(agentType: String, nextExecutionTime: Long) { + val newTtl = nextExecutionTime - System.currentTimeMillis() + + withPool(POOL_NAME) { + if (newTtl < dynamicConfigService.getConfig(Long::class.java, "sql.agent.release-threshold-ms", 500)) { + try { + jooq.delete(table(lockTable)).where(field("agent_name").eq(agentType)).execute() + } catch (e: SQLException) { + log.error("Failed to immediately release lock for agent: $agentType", e) + } + } else { + try { + jooq.update(table(lockTable)) + .set(field("lock_expiry"), System.currentTimeMillis() + newTtl) + .where(field("agent_name").eq(agentType)) + .execute() + } catch (e: SQLException) { + log.error("Failed to update lock TTL for agent: $agentType", e) + } + } + } + } + + private fun agentCompleted(agentType: String, nextExecutionTime: Long) { + try { + releaseLock(agentType, nextExecutionTime) + } finally { + activeAgents.remove(agentType) + activeAgentsFutures.remove(agentType) + } + } + + companion object { + private val POOL_NAME = ConnectionPools.CACHE_WRITER.value + } +} + +private enum class Status { + SUCCESS, FAILURE +} + +private class AgentExecutionAction( + val agent: Agent, + val agentExecution: AgentExecution, + val executionInstrumentation: ExecutionInstrumentation +) { + + fun execute(): Status { + val startTimeMs = System.currentTimeMillis() + return try { + executionInstrumentation.executionStarted(agent) + agentExecution.executeAgent(agent) + executionInstrumentation.executionCompleted(agent, elapsedTimeMs(startTimeMs)) + Status.SUCCESS + } catch (t: Throwable) { + executionInstrumentation.executionFailed(agent, t, elapsedTimeMs(startTimeMs)) + Status.FAILURE + } + } +} + +private class AgentJob( + private val lockReleaseTime: NextAttempt, + private val action: AgentExecutionAction, + private val schedulerCallback: (agentType: String, nextExecutionTime: Long) -> Unit +) : Runnable { + + override fun run() { + var status = Status.FAILURE + try { + status = action.execute() + } finally { + schedulerCallback(action.agent.agentType, lockReleaseTime.getNextTime(status)) + } + } +} + +private data class NextAttempt( + val currentTime: Long, + val successInterval: Long, + val errorInterval: Long +) { + fun getNextTime(status: Status): Long = + if (status == Status.SUCCESS) { + currentTime + successInterval + } else { + currentTime + errorInterval + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/controllers/CatsSqlAdminController.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/controllers/CatsSqlAdminController.kt new file mode 100644 index 00000000000..2a58de334d2 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/cats/sql/controllers/CatsSqlAdminController.kt @@ -0,0 +1,138 @@ +package com.netflix.spinnaker.cats.sql.controllers + +import com.netflix.spinnaker.cats.sql.SqlUtil +import com.netflix.spinnaker.cats.sql.cache.SqlSchemaVersion +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator +import com.netflix.spinnaker.kork.sql.config.SqlProperties +import com.netflix.spinnaker.security.AuthenticatedRequest +import java.sql.DriverManager +import org.jooq.impl.DSL +import org.slf4j.LoggerFactory +import org.springframework.beans.factory.annotation.Value +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty +import org.springframework.boot.context.properties.EnableConfigurationProperties +import org.springframework.security.authentication.BadCredentialsException +import org.springframework.web.bind.annotation.PathVariable +import org.springframework.web.bind.annotation.PutMapping +import org.springframework.web.bind.annotation.RequestMapping +import org.springframework.web.bind.annotation.RestController + +// TODO: Replace validatePermissions() with a to-be-implemented fiat isAdmin() decorator + +@ConditionalOnProperty("sql.cache.enabled") +@EnableConfigurationProperties(SqlProperties::class) +@RestController +@RequestMapping("/admin/db") +class CatsSqlAdminController( + private val fiat: FiatPermissionEvaluator, + private val properties: SqlProperties +) { + + companion object { + private val log by lazy { LoggerFactory.getLogger(CatsSqlAdminController::class.java) } + } + + @PutMapping(path = ["/truncate/{namespace}"]) + fun truncateTables( + @PathVariable("namespace") truncateNamespace: String, + @Value("\${sql.table-namespace:#{null}}") currentNamespace: String? + ): CleanTablesResult { + + validatePermissions() + validateParams(currentNamespace, truncateNamespace) + + val conn = DriverManager.getConnection( + properties.migration.jdbcUrl, + properties.migration.user, + properties.migration.password + ) + + val tablesTruncated = mutableListOf() + + conn.use { c -> + val jooq = DSL.using(c, properties.getDefaultConnectionPoolProperties().dialect) + val rs = SqlUtil.getTablesLike(jooq, "cats_v${SqlSchemaVersion.current()}_${truncateNamespace}_") + + while (rs.next()) { + val table = rs.getString(1) + val truncateSql = "truncate table `$table`" + log.info("Truncating $table") + + jooq.query(truncateSql).execute() + tablesTruncated.add(table) + } + } + + return CleanTablesResult(tableCount = tablesTruncated.size, tables = tablesTruncated) + } + + @PutMapping(path = ["/drop/{namespace}"]) + fun dropTables( + @PathVariable("namespace") dropNamespace: String, + @Value("\${sql.table-namespace:#{null}}") currentNamespace: String? + ): CleanTablesResult { + + validatePermissions() + validateParams(currentNamespace, dropNamespace) + + val conn = DriverManager.getConnection( + properties.migration.jdbcUrl, + properties.migration.user, + properties.migration.password + ) + + val tablesDropped = mutableListOf() + + conn.use { c -> + val jooq = DSL.using(c, properties.getDefaultConnectionPoolProperties().dialect) + val rs = SqlUtil.getTablesLike(jooq, "cats_v${SqlSchemaVersion.current()}_${dropNamespace}_") + + while (rs.next()) { + val table = rs.getString(1) + val dropSql = "drop table `$table`" + log.info("Dropping $table") + + jooq.query(dropSql).execute() + tablesDropped.add(table) + } + } + + return CleanTablesResult(tableCount = tablesDropped.size, tables = tablesDropped) + } + + private fun validateParams(currentNamespace: String?, targetNamespace: String) { + if (currentNamespace == null) { + throw IllegalStateException("truncate can only be called when sql.tableNamespace is set") + } + + if (!targetNamespace.matches("""^\w+$""".toRegex())) { + throw IllegalArgumentException("tableNamespace can only contain characters [a-z, A-Z, 0-9, _]") + } + + if (currentNamespace.toLowerCase() == targetNamespace.toLowerCase()) { + throw IllegalArgumentException("truncate cannot be called for the currently active namespace") + } + } + + private fun validatePermissions() { + val user = AuthenticatedRequest.getSpinnakerUser() + if (!user.isPresent) { + throw BadCredentialsException("Unauthorized") + } + + try { + val perms = fiat.getPermission(user.get()) + if (!perms.isAdmin) { + throw BadCredentialsException("Unauthorized") + } + } catch (e: Exception) { + log.error("Failed looking up fiat permissions for user ${user.get()}") + throw BadCredentialsException("Unauthorized", e) + } + } +} + +data class CleanTablesResult( + val tableCount: Int, + val tables: Collection +) diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlAgentProperties.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlAgentProperties.kt new file mode 100644 index 00000000000..b04dd6899f2 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlAgentProperties.kt @@ -0,0 +1,38 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.config + +import org.springframework.boot.context.properties.ConfigurationProperties + +@ConfigurationProperties("sql.agent") +class SqlAgentProperties { + var enabledPattern: String = ".*" + + /** + * Optionally disable specific agents based on their fully qualified name + */ + var disabledAgents: List = emptyList() + + var maxConcurrentAgents: Int = 100 + var agentLockAcquisitionIntervalSeconds: Long = 1 + var poll: SqlPollProperties = SqlPollProperties() +} + +class SqlPollProperties { + var intervalSeconds: Long = 30 + var errorIntervalSeconds: Long = 30 + var timeoutSeconds: Long = 300 +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlAgentSchedulerConfiguration.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlAgentSchedulerConfiguration.kt new file mode 100644 index 00000000000..5445e708c14 --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlAgentSchedulerConfiguration.kt @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.config + +import com.netflix.spinnaker.cats.agent.AgentScheduler +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider +import com.netflix.spinnaker.cats.cluster.DefaultNodeIdentity +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider +import com.netflix.spinnaker.cats.cluster.ShardingFilter +import com.netflix.spinnaker.cats.sql.cluster.SqlClusteredAgentScheduler +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import org.jooq.DSLContext +import org.springframework.beans.factory.annotation.Value +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration + +@Configuration +@ConditionalOnProperty(value = ["caching.write-enabled"], matchIfMissing = true) +class SqlAgentSchedulerConfiguration { + + @Bean + @ConditionalOnProperty( + value = [ + "sql.enabled", + "sql.scheduler.enabled" + ] + ) + fun sqlAgentScheduler( + jooq: DSLContext, + agentIntervalProvider: AgentIntervalProvider, + nodeStatusProvider: NodeStatusProvider, + dynamicConfigService: DynamicConfigService, + @Value("\${sql.table-namespace:#{null}}") tableNamespace: String?, + sqlAgentProperties: SqlAgentProperties, + shardingFilter: ShardingFilter + ): AgentScheduler<*> { + return SqlClusteredAgentScheduler( + jooq = jooq, + nodeIdentity = DefaultNodeIdentity(), + intervalProvider = agentIntervalProvider, + nodeStatusProvider = nodeStatusProvider, + dynamicConfigService = dynamicConfigService, + enabledAgentPattern = sqlAgentProperties.enabledPattern, + disabledAgentsConfig = sqlAgentProperties.disabledAgents, + tableNamespace = tableNamespace, + agentLockAcquisitionIntervalSeconds = sqlAgentProperties.agentLockAcquisitionIntervalSeconds, + shardingFilter = shardingFilter + ) + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlCacheConfiguration.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlCacheConfiguration.kt new file mode 100644 index 00000000000..205bd2657bc --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlCacheConfiguration.kt @@ -0,0 +1,194 @@ +package com.netflix.spinnaker.config + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.cats.agent.AgentScheduler +import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation +import com.netflix.spinnaker.cats.cache.NamedCacheFactory +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider +import com.netflix.spinnaker.cats.module.CatsModule +import com.netflix.spinnaker.cats.provider.Provider +import com.netflix.spinnaker.cats.provider.ProviderRegistry +import com.netflix.spinnaker.cats.sql.SqlProviderRegistry +import com.netflix.spinnaker.cats.sql.cache.SpectatorSqlCacheMetrics +import com.netflix.spinnaker.cats.sql.cache.SqlCacheMetrics +import com.netflix.spinnaker.cats.sql.cache.SqlCleanupStaleOnDemandCachesAgent +import com.netflix.spinnaker.cats.sql.cache.SqlNamedCacheFactory +import com.netflix.spinnaker.cats.sql.cache.SqlNames +import com.netflix.spinnaker.cats.sql.cache.SqlTableMetricsAgent +import com.netflix.spinnaker.cats.sql.cache.SqlUnknownAgentCleanupAgent +import com.netflix.spinnaker.cats.cluster.NoopShardingFilter +import com.netflix.spinnaker.cats.cluster.ShardingFilter +import com.netflix.spinnaker.clouddriver.cache.CustomSchedulableAgentIntervalProvider +import com.netflix.spinnaker.clouddriver.cache.DiscoveryStatusNodeStatusProvider +import com.netflix.spinnaker.clouddriver.sql.SqlAgent +import com.netflix.spinnaker.clouddriver.sql.SqlProvider +import com.netflix.spinnaker.kork.discovery.DiscoveryStatusListener +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.config.DefaultSqlConfiguration +import com.netflix.spinnaker.kork.sql.config.SqlProperties +import java.time.Clock +import java.time.Duration +import kotlin.contracts.ExperimentalContracts +import kotlinx.coroutines.ObsoleteCoroutinesApi +import kotlinx.coroutines.newFixedThreadPoolContext +import kotlinx.coroutines.slf4j.MDCContext +import org.jooq.DSLContext +import org.slf4j.LoggerFactory +import org.springframework.beans.factory.ObjectProvider +import org.springframework.beans.factory.annotation.Value +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty +import org.springframework.boot.context.properties.EnableConfigurationProperties +import org.springframework.context.ApplicationContext +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.ComponentScan +import org.springframework.context.annotation.Configuration +import org.springframework.context.annotation.Import + +const val coroutineThreadPrefix = "catsSql" + +@ExperimentalContracts +@Configuration +@ConditionalOnProperty("sql.cache.enabled") +@Import(DefaultSqlConfiguration::class) +@EnableConfigurationProperties(SqlAgentProperties::class, SqlConstraintsProperties::class) +@ComponentScan("com.netflix.spinnaker.cats.sql.controllers") +class SqlCacheConfiguration { + + companion object { + private val log = LoggerFactory.getLogger(SqlCacheConfiguration::class.java) + } + + @Bean + fun sqlCacheMetrics(registry: Registry): SqlCacheMetrics { + return SpectatorSqlCacheMetrics(registry) + } + + @Bean + fun catsModule( + providers: List, + executionInstrumentation: List, + cacheFactory: NamedCacheFactory, + agentScheduler: AgentScheduler<*> + ): CatsModule { + return CatsModule.Builder() + .providerRegistry(SqlProviderRegistry(providers, cacheFactory)) + .cacheFactory(cacheFactory) + .scheduler(agentScheduler) + .instrumentation(executionInstrumentation) + .build(providers) + } + + @Bean + fun sqlConstraints(jooq: DSLContext, sqlConstraintsProperties: SqlConstraintsProperties): SqlConstraints = + SqlConstraints(SqlConstraintsInitializer.getDefaultSqlConstraints(jooq.dialect()), sqlConstraintsProperties) + + /** + * sql.cache.async.poolSize: If set to a positive integer, a fixed thread pool of this size is created + * as part of a coroutineContext. If sql.cache.maxQueryConcurrency is also >1 (default value: 4), + * sql queries to fetch > 2 * sql.cache.readBatchSize cache keys will be made asynchronously in batches of + * maxQueryConcurrency size. + * + * sql.tableNamespace: Name spaces data tables, as well as the agent lock table if using the SqlAgentScheduler. + * Table namespacing allows flipping to new/empty data tables within the same master if necessary to rebuild + * the cache from scratch, such as after disabling caching agents for an account/region. + */ + @ObsoleteCoroutinesApi + @Bean + fun cacheFactory( + jooq: DSLContext, + clock: Clock, + sqlProperties: SqlProperties, + cacheMetrics: SqlCacheMetrics, + dynamicConfigService: DynamicConfigService, + sqlConstraints: SqlConstraints, + mapper: ObjectMapper, + @Value("\${sql.cache.async-pool-size:0}") poolSize: Int, + @Value("\${sql.table-namespace:#{null}}") tableNamespace: String? + ): NamedCacheFactory { + if (tableNamespace != null && !tableNamespace.matches("""^\w+$""".toRegex())) { + throw IllegalArgumentException("tableNamespace can only contain characters [a-z, A-Z, 0-9, _]") + } + + /** + * newFixedThreadPoolContext was marked obsolete in Oct 2018, to be reimplemented as a new + * concurrency limiting threaded context factory with reduced context switch overhead. As of + * Feb 2019, the new implementation is unreleased. See: https://github.com/Kotlin/kotlinx.coroutines/issues/261 + * + * TODO: switch to newFixedThreadPoolContext's replacement when ready + */ + val dispatcher = if (poolSize < 1) { + null + } else { + newFixedThreadPoolContext(nThreads = poolSize, name = coroutineThreadPrefix) + MDCContext() + } + + if (dispatcher != null) { + log.info("Configured coroutine context with newFixedThreadPoolContext of $poolSize threads") + } + + return SqlNamedCacheFactory( + jooq, + mapper, + dispatcher, + clock, + sqlProperties.retries, + tableNamespace, + cacheMetrics, + dynamicConfigService, + sqlConstraints + ) + } + + @Bean + fun agentIntervalProvider(sqlAgentProperties: SqlAgentProperties): AgentIntervalProvider { + return CustomSchedulableAgentIntervalProvider( + Duration.ofSeconds(sqlAgentProperties.poll.intervalSeconds).toMillis(), + Duration.ofSeconds(sqlAgentProperties.poll.errorIntervalSeconds).toMillis(), + Duration.ofSeconds(sqlAgentProperties.poll.timeoutSeconds).toMillis() + ) + } + + @Bean + @ConditionalOnExpression("\${sql.read-only:false} == false") + fun sqlTableMetricsAgent( + jooq: DSLContext, + registry: Registry, + clock: Clock, + @Value("\${sql.table-namespace:#{null}}") namespace: String? + ): SqlTableMetricsAgent = + SqlTableMetricsAgent(jooq, registry, clock, namespace) + + @Bean + @ConditionalOnExpression("\${sql.read-only:false} == false") + fun sqlCleanupStaleOnDemandCachesAgent( + applicationContext: ApplicationContext, + registry: Registry, + clock: Clock + ): SqlCleanupStaleOnDemandCachesAgent = + SqlCleanupStaleOnDemandCachesAgent(applicationContext, registry, clock) + + @Bean + @ConditionalOnExpression("!\${sql.read-only:false} && \${sql.unknown-agent-cleanup-agent.enabled:false}") + fun sqlUnknownAgentCleanupAgent( + providerRegistry: ObjectProvider, + jooq: DSLContext, + registry: Registry, + sqlConstraints: SqlConstraints, + @Value("\${sql.table-namespace:#{null}}") tableNamespace: String? + ): SqlUnknownAgentCleanupAgent = + SqlUnknownAgentCleanupAgent(providerRegistry, jooq, registry, SqlNames(tableNamespace, sqlConstraints)) + + @Bean + @ConditionalOnExpression("\${sql.read-only:false} == false") + fun sqlAgentProvider(agents: List): SqlProvider = + SqlProvider(agents.toMutableList()) + + @Bean + fun nodeStatusProvider(discoveryStatusListener: DiscoveryStatusListener): NodeStatusProvider { + return DiscoveryStatusNodeStatusProvider(discoveryStatusListener) + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConstraints.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConstraints.kt new file mode 100644 index 00000000000..eb490000fbe --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConstraints.kt @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config + +import org.jooq.SQLDialect +import org.springframework.boot.context.properties.ConfigurationProperties + +class SqlConstraints( + val maxTableNameLength: Int, + val maxIdLength: Int, + val maxAgentLength: Int, +) { + + constructor(defaultConstraints: SqlConstraints, constraintsProperties: SqlConstraintsProperties) : this( + constraintsProperties.maxTableNameLength ?: defaultConstraints.maxTableNameLength, + constraintsProperties.maxIdLength ?: defaultConstraints.maxIdLength, + constraintsProperties.maxAgentLength ?: defaultConstraints.maxAgentLength + ) +} + +@ConfigurationProperties("sql.constraints") +class SqlConstraintsProperties { + var maxTableNameLength: Int? = null + var maxIdLength: Int? = null + var maxAgentLength: Int? = null +} + +object SqlConstraintsInitializer { + + fun getDefaultSqlConstraints(dialect: SQLDialect): SqlConstraints = + when(dialect) { + SQLDialect.POSTGRES -> + // https://www.postgresql.org/docs/current/limits.html + SqlConstraints(63, Int.MAX_VALUE, Int.MAX_VALUE) + else -> + // 352 * 2 + 64 (max rel_type length) == 768; 768 * 4 (utf8mb4) == 3072 == Aurora's max index length + SqlConstraints(64, 352, 127) + } +} diff --git a/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlShardingFilterConfiguration.kt b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlShardingFilterConfiguration.kt new file mode 100644 index 00000000000..8452e3ca40a --- /dev/null +++ b/cats/cats-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlShardingFilterConfiguration.kt @@ -0,0 +1,55 @@ +/* + * Copyright 2021 OpsMx + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.config + +import com.netflix.spinnaker.cats.cluster.DefaultNodeIdentity +import com.netflix.spinnaker.cats.sql.cluster.SqlCachingPodsObserver +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import org.jooq.DSLContext +import org.springframework.beans.factory.annotation.Value +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration + +@Configuration +@ConditionalOnProperty(value = ["caching.write-enabled"], matchIfMissing = true) +class SqlShardingFilterConfiguration { + + @Bean + @ConditionalOnProperty( + value = [ + "sql.enabled", + "sql.scheduler.enabled", + "cache-sharding.enabled" + ] + ) + fun shardingFilter( + jooq: DSLContext, + @Value("\${sql.table-namespace:#{null}}") tableNamespace: String?, + dynamicConfigService: DynamicConfigService + ): SqlCachingPodsObserver { + return SqlCachingPodsObserver( + jooq = jooq, + nodeIdentity = DefaultNodeIdentity(), + tableNamespace = tableNamespace, + dynamicConfigService = dynamicConfigService + ) + } + + +} diff --git a/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/MySqlCacheSpec.groovy b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/MySqlCacheSpec.groovy new file mode 100644 index 00000000000..8a689ee1612 --- /dev/null +++ b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/MySqlCacheSpec.groovy @@ -0,0 +1,101 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.sql + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.sql.cache.SqlCache +import com.netflix.spinnaker.cats.sql.cache.SqlCacheMetrics +import com.netflix.spinnaker.cats.sql.cache.SqlNamedCacheFactory +import com.netflix.spinnaker.config.SqlConstraintsInitializer +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.config.RetryProperties +import com.netflix.spinnaker.kork.sql.config.SqlRetryProperties +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import org.jooq.SQLDialect +import org.testcontainers.DockerClientFactory +import spock.lang.Requires + +import java.time.Clock +import java.time.Instant +import java.time.ZoneId + +@Requires({ DockerClientFactory.instance().isDockerAvailable() }) +class MySqlCacheSpec extends SqlCacheSpec { + def providerCacheConfiguration = new StaticProviderCacheConfiguration(supportsFullEviction: false) + + @Override + Cache getSubject() { + def mapper = new ObjectMapper() + def clock = new Clock.FixedClock(Instant.EPOCH, ZoneId.of("UTC")) + def sqlRetryProperties = new SqlRetryProperties(new RetryProperties(1, 10), new RetryProperties(1, 10)) + + def dynamicConfigService = Mock(DynamicConfigService) { + getConfig(_ as Class, _ as String, _) >> 2 + } + + SqlTestUtil.TestDatabase testDatabase = SqlTestUtil.initTcMysqlDatabase() + context = testDatabase.context + dataSource = testDatabase.dataSource + + return new SqlCache( + "test", + context, + mapper, + null, + clock, + sqlRetryProperties, + "test", + Mock(SqlCacheMetrics), + dynamicConfigService, + new SqlConstraintsInitializer().getDefaultSqlConstraints(SQLDialect.MYSQL), + providerCacheConfiguration + ) + } + + def cleanup() { + providerCacheConfiguration.supportsFullEviction = false + } + + def "mergeAll with full eviction support"() { + when: + providerCacheConfiguration.supportsFullEviction = false + ((WriteableCache) cache).mergeAll("keys", [ + createData("keys-1"), + createData("keys-2") + ]) + ((WriteableCache) cache).mergeAll("keys", []) + def retrieved = ((SqlCache) cache).getAll("keys") + + then: + retrieved.size() == 2 + + when: + providerCacheConfiguration.supportsFullEviction = true + ((WriteableCache) cache).mergeAll("keys", [ + createData("keys-1"), + createData("keys-2") + ]) + ((WriteableCache) cache).mergeAll("keys", []) + + retrieved = ((SqlCache) cache).getAll("keys") + + then: + retrieved.isEmpty() + } +} diff --git a/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/PostgreSqlCacheSpec.groovy b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/PostgreSqlCacheSpec.groovy new file mode 100644 index 00000000000..43ee687e44d --- /dev/null +++ b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/PostgreSqlCacheSpec.groovy @@ -0,0 +1,68 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.sql + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.sql.cache.SqlCache +import com.netflix.spinnaker.cats.sql.cache.SqlCacheMetrics +import com.netflix.spinnaker.config.SqlConstraintsInitializer +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.config.RetryProperties +import com.netflix.spinnaker.kork.sql.config.SqlRetryProperties +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import org.jooq.SQLDialect +import org.testcontainers.DockerClientFactory +import spock.lang.Requires + +import java.time.Clock +import java.time.Instant +import java.time.ZoneId + +@Requires({ DockerClientFactory.instance().isDockerAvailable() }) +class PostgreSqlCacheSpec extends SqlCacheSpec { + + @Override + Cache getSubject() { + def mapper = new ObjectMapper() + def clock = new Clock.FixedClock(Instant.EPOCH, ZoneId.of("UTC")) + def sqlRetryProperties = new SqlRetryProperties(new RetryProperties(1, 10), new RetryProperties(1, 10)) + + def dynamicConfigService = Mock(DynamicConfigService) { + getConfig(_ as Class, _ as String, _) >> 2 + } + + SqlTestUtil.TestDatabase testDatabase = SqlTestUtil.initTcPostgresDatabase() + context = testDatabase.context + dataSource = testDatabase.dataSource + + return new SqlCache( + "test", + context, + mapper, + null, + clock, + sqlRetryProperties, + "test", + Mock(SqlCacheMetrics), + dynamicConfigService, + new SqlConstraintsInitializer().getDefaultSqlConstraints(SQLDialect.POSTGRES), + new StaticProviderCacheConfiguration(supportsFullEviction: false) + ) + } + +} diff --git a/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/SqlCacheSpec.groovy b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/SqlCacheSpec.groovy new file mode 100644 index 00000000000..52a2a051315 --- /dev/null +++ b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/SqlCacheSpec.groovy @@ -0,0 +1,122 @@ +package com.netflix.spinnaker.cats.sql + +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter +import com.netflix.spinnaker.cats.cache.WriteableCacheSpec +import com.netflix.spinnaker.cats.sql.cache.SqlCache +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import com.zaxxer.hikari.HikariDataSource +import org.jooq.DSLContext +import org.jooq.impl.DSL +import spock.lang.AutoCleanup +import spock.lang.Shared +import spock.lang.Unroll + +abstract class SqlCacheSpec extends WriteableCacheSpec { + + @Shared + DSLContext context + + @AutoCleanup("close") + HikariDataSource dataSource + + def cleanup() { + if (context != null) { + SqlTestUtil.cleanupDb(context) + } + } + + def 'should handle invalid type'() { + given: + def data = createData('blerp', [a: 'b']) + ((SqlCache) cache).merge('foo.bar', data) + + when: + def retrieved = ((SqlCache) cache).getAll('foo.bar') + + then: + retrieved.size() == 1 + retrieved.findAll { it.id == "blerp" }.size() == 1 + } + + def 'should not write an item if it is unchanged'() { + setup: + def data = createData('blerp', [a: 'b']) + + when: + ((SqlCache) cache).merge('foo', data) + + then: + 1 * ((SqlCache) cache).cacheMetrics.merge('test', 'foo', 1, 1, 0, 0, 1, 1, 0, 0) + + when: + ((SqlCache) cache).merge('foo', data) + + then: + // SqlCacheMetrics currently sets items to # of items stored. The redis impl + // sets this to # of items passed to merge, regardless of how many are actually stored + // after deduplication. TODO: Having both metrics would be nice. + 1 * ((SqlCache) cache).cacheMetrics.merge('test', 'foo', 1, 0, 0, 0, 1, 0, 0, 0) + } + + def 'mergeAll with two items that have the same id preserves the existing item'() { + given: 'one item in the cache' + String id = 'bar' + def itemOneAttributes = [att1: 'val1'] + CacheData itemOne = createData(id, itemOneAttributes) + def itemTwoAttributes = [att2: 'val2'] + CacheData itemTwo = createData(id, itemTwoAttributes) + String type = 'foo' + cache.mergeAll(type, [ itemOne ]) + assert itemOneAttributes.equals(cache.get(type, id).attributes) + + when: 'adding both items' + cache.mergeAll(type, [ itemOne, itemTwo ]) + + then: 'itemOne is in the cache' + itemOneAttributes.equals(cache.get(type, id).attributes) + + and: 'the metrics report a duplicate' + 1 * ((SqlCache) cache).cacheMetrics.merge('test', type, 2, 0, 0, 0, 1, 0, 0, 1) + + when: 'storing the items again' + cache.mergeAll(type, [ itemOne, itemTwo ]) + + then: 'itemOne is in the cache' + itemOneAttributes.equals(cache.get(type, id).attributes) + + and: 'the metrics report a duplicate' + 1 * ((SqlCache) cache).cacheMetrics.merge('test', type, 2, 0, 0, 0, 1, 0, 0, 1) + } + + def 'all items are stored and retrieved when larger than sql chunk sizes'() { + given: + def data = (1..10).collect { createData("fnord-$it") } + ((SqlCache) cache).mergeAll('foo', data) + + when: + def retrieved = ((SqlCache) cache).getAll('foo') + + then: + retrieved.size() == 10 + retrieved.findAll { it.id == "fnord-5" }.size() == 1 + } + + @Unroll + def 'generates where clause based on cacheFilters'() { + when: + def relPrefixes = ((SqlCache) cache).getRelationshipFilterPrefixes(filter) + def where = ((SqlCache) cache).getRelWhere(relPrefixes, queryPrefix) + + then: + where.toString() == expected + + where: + filter || queryPrefix || expected + RelationshipCacheFilter.none() || DSL.field("meowdy").eq("partner") || "meowdy = 'partner'" + null || DSL.field("meowdy").eq("partner") || "meowdy = 'partner'" + RelationshipCacheFilter.include("instances", "images") || null || "(\n rel_type like 'instances%'\n or rel_type like 'images%'\n)" + RelationshipCacheFilter.include("images") || DSL.field("meowdy").eq("partner") || "(\n meowdy = 'partner'\n and rel_type like 'images%'\n)" + null || null || "true" + } +} diff --git a/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/SqlProviderCacheSpec.groovy b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/SqlProviderCacheSpec.groovy new file mode 100644 index 00000000000..6424996cf2d --- /dev/null +++ b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/SqlProviderCacheSpec.groovy @@ -0,0 +1,180 @@ +package com.netflix.spinnaker.cats.sql + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.cats.agent.DefaultCacheResult +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.provider.ProviderCacheSpec +import com.netflix.spinnaker.cats.sql.cache.SpectatorSqlCacheMetrics +import com.netflix.spinnaker.cats.sql.cache.SqlCache +import com.netflix.spinnaker.cats.sql.cache.SqlNamedCacheFactory +import com.netflix.spinnaker.config.SqlConstraintsInitializer +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.config.RetryProperties +import com.netflix.spinnaker.kork.sql.config.SqlRetryProperties +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import com.zaxxer.hikari.HikariDataSource +import org.jooq.DSLContext +import org.jooq.SQLDialect +import org.testcontainers.DockerClientFactory +import spock.lang.AutoCleanup +import spock.lang.Requires +import spock.lang.Shared +import spock.lang.Unroll + +import java.time.Clock +import java.time.Instant +import java.time.ZoneId + + +@Requires({ DockerClientFactory.instance().isDockerAvailable() }) +class SqlProviderCacheSpec extends ProviderCacheSpec { + + @Shared + DSLContext context + + @AutoCleanup("close") + HikariDataSource dataSource + + WriteableCache backingStore + + def cleanup() { + SqlTestUtil.cleanupDb(context) + } + + @Override + SqlProviderCache getDefaultProviderCache() { + getCache() as SqlProviderCache + } + + @Override + Cache getSubject() { + def mapper = new ObjectMapper() + def clock = new Clock.FixedClock(Instant.EPOCH, ZoneId.of("UTC")) + def sqlRetryProperties = new SqlRetryProperties(new RetryProperties(1, 10), new RetryProperties(1, 10)) + def sqlMetrics = new SpectatorSqlCacheMetrics(new NoopRegistry()) + def dynamicConfigService = Mock(DynamicConfigService) { + getConfig(_ as Class, _ as String, _) >> 10 + } + + SqlTestUtil.TestDatabase testDatabase = SqlTestUtil.initTcMysqlDatabase() + context = testDatabase.context + dataSource = testDatabase.dataSource + + backingStore = new SqlCache( + "test", + context, + mapper, + null, + clock, + sqlRetryProperties, + "test", + sqlMetrics, + dynamicConfigService, + new SqlConstraintsInitializer().getDefaultSqlConstraints(SQLDialect.MYSQL), + new SqlNamedCacheFactory.DefaultProviderCacheConfiguration() + ) + + return new SqlProviderCache(backingStore) + } + + @Unroll + def 'informative relationship filtering behaviour'() { + setup: + populateOne( + 'serverGroup', + 'foo', + createData('foo', [canhaz: "attributes"], [rel1: ["rel1"]]) + ) + + addInformative( + 'loadBalancer', + 'bar', + createData('bar', [canhaz: "attributes"], [serverGroup: ["foo"]]) + ) + + addInformative( + 'instances', + 'baz', + createData('baz', [canhaz: "attributes"], [serverGroup: ["foo"]]) + ) + + expect: + cache.get('serverGroup', 'foo').relationships.keySet() == ["instances", "loadBalancer", "rel1"] as Set + cache.get('serverGroup', 'foo', filter).relationships.keySet() == expectedRelationships as Set + + cache.getAll('serverGroup').iterator().next().relationships.keySet() == ["instances", "loadBalancer", "rel1"] as Set + cache.getAll('serverGroup', filter).iterator().next().relationships.keySet() == expectedRelationships as Set + + where: + filter || expectedRelationships + RelationshipCacheFilter.include("loadBalancer") || ["loadBalancer"] + RelationshipCacheFilter.include("instances", "loadBalancer") || ["instances", "loadBalancer"] + RelationshipCacheFilter.include("rel3") || [] + RelationshipCacheFilter.none() || [] + } + + def 'can index and retrieve by application'() { + setup: + def sgIdsForAppFoo = 'fooSg1'..'fooSg9' + def sgIdsForAppBar = 'barSg1'..'barSg9' + + sgIdsForAppFoo.each { + populateOne('serverGroup', it, createData(it, [application: "foo"], [:])) + } + + sgIdsForAppBar.each { + populateOne('serverGroup', it, createData(it, [application: "bar"], [:])) + } + + when: + def fooData = cache.getAllByApplication("serverGroup", "foo", RelationshipCacheFilter.none()) + def barData = cache.getAllByApplication("serverGroup", "bar", RelationshipCacheFilter.none()) + + then: + fooData["serverGroup"].findAll { it.attributes.application != "foo" } == [] + barData["serverGroup"].findAll { it.attributes.application != "bar" } == [] + fooData["serverGroup"].collect { it.id }.sort() == sgIdsForAppFoo + barData["serverGroup"].collect { it.id }.sort() == sgIdsForAppBar + } + + def 'can retrieve multiple types by application'() { + setup: + def sgIdsForAppFoo = 'fooSg1'..'fooSg3' + def sgIdsForAppBar = 'barSg1'..'barSg3' + def instanceIdsForAppFoo = 'fooInst1'..'fooInst3' + def instanceIdsForAppBar = 'barInst1'..'barInst3' + def filters = [serverGroup: RelationshipCacheFilter.none(), instances: RelationshipCacheFilter.none()] + + sgIdsForAppFoo.each { + populateOne('serverGroup', it, createData(it, [application: "foo"], [:])) + } + + sgIdsForAppBar.each { + populateOne('serverGroup', it, createData(it, [application: "bar"], [:])) + } + + instanceIdsForAppFoo.each { + populateOne('instances', it, createData(it, [application: "foo"], [:])) + } + + instanceIdsForAppBar.each { + populateOne('instances', it, createData(it, [application: "bar"], [:])) + } + + when: + def fooData = cache.getAllByApplication(["instances", "serverGroup"], "foo", filters) + + then: + fooData["instances"].collect { it.id }.sort() == instanceIdsForAppFoo + fooData["serverGroup"].collect { it.id }.sort() == sgIdsForAppFoo + } + + void addInformative(String type, String id, CacheData cacheData = createData(id)) { + defaultProviderCache.putCacheResult('testAgent', ['informative'], new DefaultCacheResult((type): [cacheData])) + } + +} diff --git a/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/StaticProviderCacheConfiguration.groovy b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/StaticProviderCacheConfiguration.groovy new file mode 100644 index 00000000000..5f576bfe41c --- /dev/null +++ b/cats/cats-sql/src/test/groovy/com/netflix/spinnaker/cats/sql/StaticProviderCacheConfiguration.groovy @@ -0,0 +1,28 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.sql + +import com.netflix.spinnaker.cats.provider.ProviderCacheConfiguration; + +class StaticProviderCacheConfiguration implements ProviderCacheConfiguration { + boolean supportsFullEviction = false + + @Override + boolean supportsFullEviction() { + return supportsFullEviction + } +} diff --git a/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNamesTest.kt b/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNamesTest.kt new file mode 100644 index 00000000000..e53fadbf838 --- /dev/null +++ b/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlNamesTest.kt @@ -0,0 +1,92 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.cats.sql.cache + +import com.netflix.spinnaker.config.SqlConstraints +import com.netflix.spinnaker.config.SqlConstraintsInitializer +import com.netflix.spinnaker.config.SqlConstraintsProperties +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import org.jooq.SQLDialect +import java.lang.IllegalArgumentException +import strikt.api.expect +import strikt.api.expectThat +import strikt.assertions.isA +import strikt.assertions.isEqualTo + +class SqlNamesTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + SqlNames(sqlConstraints = SqlConstraintsInitializer.getDefaultSqlConstraints(SQLDialect.MYSQL)) + } + + listOf( + TableName("hello", "", "cats_v1_hello"), + TableName("hello", "world", "cats_v1_helloworld"), + TableName("abcdefghij".repeat(10), "", "cats_v1_abcdefghijabcdefghijabcdefghijabcdefghijaa7d0fee7e891a66"), + TableName("abcdefghij".repeat(10), "_rel", "cats_v1_abcdefghijabcdefghijabcdefghijabcdef9246690b33571ecc_rel"), + TableName("abcdefghij".repeat(10), "suffix".repeat(10), "cats_v1_abcdefghijabcdefghijabcdefghijabcdefghijfe546a736182e553") + ).forEach { table -> + test("max length of table name is checked: $table") { + expectThat(checkTableName("cats_v1_", table.name, table.suffix)) + .isEqualTo(table.expected) + } + } + } + + fun agentTests() = rootContext { + fixture { + SqlNames(sqlConstraints = SqlConstraintsInitializer.getDefaultSqlConstraints(SQLDialect.MYSQL)) + } + listOf( + Pair(null, null), + Pair("myagent", "myagent"), + Pair( + "abcdefghij".repeat(20), + "abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdebb43b982e477772faa2e899f65d0a86b" + ), + Pair( + "abcdefghij".repeat(10) + ":" + "abcdefghij".repeat(10), + "abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij:20f5a9d8d3f4f18cfec8a40eda" + ), + Pair( + "abcdefghij:" + "abcdefghij".repeat(20), + "abcdefghij:abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcd5bfa5c163877f247769cd6b488dff339" + ) + ).forEach { test -> + test("max length of table name is checked: ${test.first}") { + expectThat(checkAgentName(test.first)) + .isEqualTo(test.second) + } + } + + test("do not accept types that are too long") { + expect { + that( + kotlin.runCatching { checkAgentName("abcdefghij".repeat(20) + ":abcdefghij") } + .exceptionOrNull() + ).isA() + } + } + } + + private inner class TableName( + val name: String, + val suffix: String, + val expected: String + ) +} diff --git a/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlUnknownAgentCleanupAgentTest.kt b/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlUnknownAgentCleanupAgentTest.kt new file mode 100644 index 00000000000..f883eace733 --- /dev/null +++ b/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cats/sql/cache/SqlUnknownAgentCleanupAgentTest.kt @@ -0,0 +1,286 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.cats.sql.cache + +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.cats.agent.CachingAgent +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.mem.InMemoryNamedCacheFactory +import com.netflix.spinnaker.cats.provider.DefaultProviderRegistry +import com.netflix.spinnaker.cats.provider.ProviderRegistry +import com.netflix.spinnaker.cats.test.TestAgent +import com.netflix.spinnaker.cats.test.TestProvider +import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES +import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS +import com.netflix.spinnaker.config.SqlConstraints +import com.netflix.spinnaker.config.SqlConstraintsInitializer +import com.netflix.spinnaker.config.SqlConstraintsProperties +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import de.huxhorn.sulky.ulid.ULID +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import org.jooq.SQLDialect +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.table +import org.junit.jupiter.api.Assumptions.assumeTrue +import org.junit.jupiter.api.assertDoesNotThrow +import org.springframework.beans.factory.ObjectProvider +import org.testcontainers.DockerClientFactory +import strikt.api.expect +import strikt.api.expectThat +import strikt.assertions.get +import strikt.assertions.hasSize +import strikt.assertions.isEqualTo + +class SqlUnknownAgentCleanupAgentTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { Fixture() } + + beforeAll { + assumeTrue(DockerClientFactory.instance().isDockerAvailable) + } + + after { + SqlTestUtil.cleanupDb(dslContext) + } + + context("test and prod accounts exist") { + deriveFixture { + fixture.providerAgents.addAll( + listOf( + testCachingAgent(), + prodCachingAgent() + ) + ) + seedDatabase(includeTestAccount = true, includeProdAccount = true) + fixture + } + + test("nothing happens") { + expect { + that(selectAllResources()).describedAs("initial resources").hasSize(2) + that(selectAllRels()).describedAs("initial relationships").hasSize(2) + } + + subject.run() + + expect { + that(selectAllResources()).describedAs("modified resources").hasSize(2) + that(selectAllRels()).describedAs("modified relationships").hasSize(2) + } + } + + context("test account is removed") { + modifyFixture { + fixture.providerAgents.removeIf { it.scope == "test" } + } + + before { subject.run() } + + test("relationships referencing old data are deleted") { + expectThat(selectAllResources()) + .hasSize(1)[0].isEqualTo("aws:instances:prod:us-east-1:i-abcd1234") + } + + test("resources referencing old data are deleted") { + expectThat(selectAllRels()) + .hasSize(1)[0].isEqualTo("aws:serverGroups:myapp-prod:prod:us-east-1:myapp-prod-v000") + } + } + } + context("add caching agent for uninstantiated custom resource definition") { + deriveFixture { + fixture.providerAgents.add(unregisteredCustomResourceCachingAgent()) + fixture + } + + test("error is not thrown when table does not exist for type for which agent is authoritative") { + assertDoesNotThrow { + subject.run() + } + } + } + } + + fun defaultSqlNames() : SqlNames = + SqlNames(sqlConstraints = SqlConstraintsInitializer.getDefaultSqlConstraints(SQLDialect.MYSQL)) + + + private inner class Fixture { + val testDatabase = SqlTestUtil.initTcMysqlDatabase() + val dslContext = testDatabase.context + + val providerAgents: MutableList = mutableListOf() + val providerRegistry: ProviderRegistry = DefaultProviderRegistry( + listOf(TestProvider(providerAgents as Collection)), + InMemoryNamedCacheFactory() + ) + val registry = NoopRegistry() + + val subject = SqlUnknownAgentCleanupAgent(StaticObjectProvider(providerRegistry), dslContext, registry, defaultSqlNames()) + + fun seedDatabase(includeTestAccount: Boolean, includeProdAccount: Boolean) { + defaultSqlNames().run { + val resource = resourceTableName("instances") + val rel = relTableName("instances") + dslContext.execute("CREATE TABLE IF NOT EXISTS $resource LIKE cats_v1_resource_template") + dslContext.execute("CREATE TABLE IF NOT EXISTS $rel LIKE cats_v1_rel_template") + } + + dslContext.insertInto(table("cats_v1_instances")) + .columns( + field("id"), field("agent"), field("application"), field("body_hash"), field("body"), field("last_updated") + ) + .let { + if (includeProdAccount) { + it + .values( + "aws:instances:prod:us-east-1:i-abcd1234", + "prod/TestAgent", + "myapp", + "", + "", + System.currentTimeMillis() + ) + } else { + it + } + } + .let { + if (includeTestAccount) { + it + .values( + "aws:instances:test:us-east-1:i-abcd1234", + "test/TestAgent", + "myapp", + "", + "", + System.currentTimeMillis() + ) + } else { + it + } + } + .execute() + + dslContext.insertInto(table("cats_v1_instances_rel")) + .columns( + field("uuid"), + field("id"), + field("rel_id"), + field("rel_agent"), + field("rel_type"), + field("last_updated") + ) + .let { + if (includeProdAccount) { + it + .values( + ULID().nextULID(), + "aws:instances:prod:us-east-1:i-abcd1234", + "aws:serverGroups:myapp-prod:prod:us-east-1:myapp-prod-v000", + "serverGroups:prod/TestAgent", + "serverGroups", + System.currentTimeMillis() + ) + } else { + it + } + } + .let { + if (includeTestAccount) { + it + .values( + ULID().nextULID(), + "aws:instances:test:us-east-1:i-abcd1234", + "aws:serverGroups:myapp-test:test:us-east-1:myapp-test-v000", + "serverGroups:test/TestAgent", + "serverGroups", + System.currentTimeMillis() + ) + } else { + it + } + } + .execute() + } + + fun testCachingAgent(): TestAgent = + TestAgent().also { + it.scope = "test" + it.types = setOf(INSTANCES.ns, SERVER_GROUPS.ns) + it.authoritative = setOf(INSTANCES.ns) + it.results = mapOf( + INSTANCES.ns to listOf( + DefaultCacheData( + "aws:instances:test:us-east-1:i-abcd1234", + mapOf(), + mapOf( + SERVER_GROUPS.ns to listOf( + "aws:serverGroups:myapp-test:test:us-east-1:myapp-test-v000" + ) + ) + ) + ) + ) + } + + fun prodCachingAgent(): TestAgent = + TestAgent().also { + it.scope = "prod" + it.types = setOf(INSTANCES.ns, SERVER_GROUPS.ns) + it.authoritative = setOf(INSTANCES.ns) + it.results = mapOf( + INSTANCES.ns to listOf( + DefaultCacheData( + "aws:instances:prod:us-east-1:i-abcd1234", + mapOf(), + mapOf( + SERVER_GROUPS.ns to listOf( + "aws:serverGroups:myapp-prod:prod:us-east-1:myapp-prod-v000" + ) + ) + ) + ) + ) + } + + fun unregisteredCustomResourceCachingAgent(): TestAgent = + TestAgent().also { + it.scope = "unregisteredCustomResources" + it.types = setOf("cloud.google.com.BackendConfig") + it.authoritative = setOf("cloud.google.com.BackendConfig") + } + + fun selectAllResources(): List = + dslContext.select(field("id")) + .from(table(defaultSqlNames().resourceTableName("instances"))) + .fetch(0, String::class.java) + + fun selectAllRels(): List = + dslContext.select(field("rel_id")) + .from(table(defaultSqlNames().relTableName("instances"))) + .fetch(0, String::class.java) + } + + private inner class StaticObjectProvider(val obj: ProviderRegistry) : ObjectProvider { + override fun getIfUnique(): ProviderRegistry = obj + override fun getObject(vararg args: Any?): ProviderRegistry = obj + override fun getObject(): ProviderRegistry = obj + override fun getIfAvailable(): ProviderRegistry = obj + } +} diff --git a/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cluster/SqlClusteredAgentSchedulerTest.kt b/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cluster/SqlClusteredAgentSchedulerTest.kt new file mode 100644 index 00000000000..c10d9b43607 --- /dev/null +++ b/cats/cats-sql/src/test/kotlin/com/netflix/spinnaker/cluster/SqlClusteredAgentSchedulerTest.kt @@ -0,0 +1,159 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.cluster + +import com.netflix.spinnaker.cats.agent.Agent +import com.netflix.spinnaker.cats.agent.AgentExecution +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider +import com.netflix.spinnaker.cats.cluster.NodeIdentity +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider +import com.netflix.spinnaker.cats.cluster.NoopShardingFilter +import com.netflix.spinnaker.cats.sql.cluster.SqlClusteredAgentScheduler +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.nhaarman.mockito_kotlin.any +import com.nhaarman.mockito_kotlin.eq +import com.nhaarman.mockito_kotlin.mock +import com.nhaarman.mockito_kotlin.whenever +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import org.jooq.* +import org.junit.jupiter.api.Assertions.assertFalse +import org.mockito.stubbing.Answer +import java.sql.ResultSet +import java.util.concurrent.ExecutorService +import java.util.concurrent.FutureTask +import java.util.concurrent.ScheduledExecutorService + +class SqlClusteredAgentSchedulerTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + Fixture() + } + + test("should shuffle agents") { + whenever(dynamicConfigService.getConfig(eq(Int::class.java), eq("sql.agent.max-concurrent-agents"), + any())).thenReturn(2) + val invocations = mutableListOf() + val agentExec = AgentExecution { + invocations.add(it.agentType) + } + scheduleAgent("account1/KubernetesCoreCachingAgent[1/4]", agentExec) + scheduleAgent("account1/KubernetesCoreCachingAgent[2/4]", agentExec) + scheduleAgent("account1/KubernetesCoreCachingAgent[3/4]", agentExec) + scheduleAgent("account1/KubernetesCoreCachingAgent[4/4]", agentExec) + + this.sqlClusteredAgentScheduler.run() + val actual1 = invocations.toList() + invocations.clear() + this.sqlClusteredAgentScheduler.run() + val actual2 = invocations.toList() + invocations.clear() + this.sqlClusteredAgentScheduler.run() + val actual3 = invocations.toList() + invocations.clear() + this.sqlClusteredAgentScheduler.run() + val actual4 = invocations.toList() + invocations.clear() + + println("Agent invocations:\n$actual1\n$actual2\n$actual3\n$actual4") + assertFalse( + actual1 == actual2 && + actual2 == actual3 && + actual3 == actual4, + "Expected variation in agent order of execution, " + + "but the same agents ran in the same order: " + actual1) + } + } + + private inner class Fixture { + val jooq: DSLContext = mock() + val nodeIdentity: NodeIdentity = mock() + val intervalProvider: AgentIntervalProvider = mock() + val nodeStatusProvider: NodeStatusProvider = mock() + val dynamicConfigService: DynamicConfigService = mock() + val enabledAgentPattern = ".*" + val disabledAgentsConfig = emptyList() + val agentLockInterval = 1L + val tableNamespace = "" + val agentExecutionPool: ExecutorService = mock() + val lockPollingScheduler: ScheduledExecutorService = mock() + val interval = AgentIntervalProvider.Interval(30L, 30L) + val shardingFilter = NoopShardingFilter() + val sqlClusteredAgentScheduler = SqlClusteredAgentScheduler( + jooq, + nodeIdentity, + intervalProvider, + nodeStatusProvider, + dynamicConfigService, + enabledAgentPattern, + disabledAgentsConfig, + agentLockInterval, + tableNamespace, + agentExecutionPool, + lockPollingScheduler, + shardingFilter + ) + + init { + whenever(nodeStatusProvider.isNodeEnabled).thenReturn(true) + whenever(nodeIdentity.nodeIdentity).thenReturn("node1") + whenever(dynamicConfigService.getConfig(eq(String::class.java), eq("sql.agent.disabled-agents"), + any())).thenReturn("") + whenever(dynamicConfigService.getConfig(eq(Long::class.java), eq("sql.agent.release-threshold-ms"), + any())).thenReturn(50000L) + + // empty agent locks in db + val sss: SelectSelectStep> = mock() + val sjs: SelectJoinStep> = mock() + val result: Result> = mock() + val resultSet: ResultSet = mock() + whenever(jooq.select(any>(), any>())).thenReturn(sss) + whenever(sss.from(any>())).thenReturn(sjs) + whenever(sjs.fetch()).thenReturn(result) + whenever(result.intoResultSet()).thenReturn(resultSet) + whenever(resultSet.next()).thenReturn(false) + + val iss: InsertSetStep = mock() + val ivsColumns: InsertValuesStep4 = mock() + val ivsValues: InsertValuesStep4 = mock() + whenever(jooq.insertInto(any>())).thenReturn(iss) + whenever(iss.columns(any>(), any>(), any>(), any>())).thenReturn(ivsColumns) + whenever(ivsColumns.values(any(), eq("node1"), any(), any())).thenReturn(ivsValues) + whenever(ivsValues.execute()).thenReturn(0) + + val dus: DeleteUsingStep = mock() + val dcs: DeleteConditionStep = mock() + whenever(jooq.delete(any>())).thenReturn(dus) + whenever(dus.where(any())).thenReturn(dcs) + + whenever(intervalProvider.getInterval(any())).thenReturn(interval) + whenever(agentExecutionPool.submit(any())).thenAnswer(Answer { + val r: Runnable = it.getArgument(0) + r.run() + object: FutureTask({ r }) { } + }) + } + + fun scheduleAgent(name: String, agentExec: AgentExecution) { + val agent: Agent = mock() + whenever(agent.agentType).thenReturn(name) + sqlClusteredAgentScheduler.schedule(agent, agentExec, mock()) + } + } +} diff --git a/cats/cats-test/cats-test.gradle b/cats/cats-test/cats-test.gradle index 443d5418792..7491388bec1 100644 --- a/cats/cats-test/cats-test.gradle +++ b/cats/cats-test/cats-test.gradle @@ -1,7 +1,12 @@ tasks.compileGroovy.enabled = true dependencies { - compile project(":cats:cats-core") - compile spinnaker.dependency('groovy') - compile spinnaker.dependency('spock') + implementation project(":cats:cats-core") + implementation "org.apache.groovy:groovy" + + implementation "org.springframework.boot:spring-boot-starter-test" + implementation "org.spockframework:spock-core" + implementation "org.spockframework:spock-spring" + implementation "cglib:cglib-nodep" + implementation "org.objenesis:objenesis" } diff --git a/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/cache/CacheSpec.groovy b/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/cache/CacheSpec.groovy index dda835d2525..a17241e60ff 100644 --- a/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/cache/CacheSpec.groovy +++ b/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/cache/CacheSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.cats.cache +import org.junit.jupiter.api.BeforeEach import spock.lang.Specification import spock.lang.Subject import spock.lang.Unroll @@ -25,7 +26,9 @@ abstract class CacheSpec extends Specification { @Subject Cache cache + @BeforeEach def setup() { + println "--------------- Test " + specificationContext.currentIteration.name cache = getSubject() } @@ -65,6 +68,22 @@ abstract class CacheSpec extends Specification { cache.getIdentifiers('foo').sort() == ['bar', 'baz'] } + def 'existingIdentifiers behavior'() { + given: + def writtenIds = ['this', 'is', 'not', 'a', 'test'] + def idsToCheck = ['foo', 'bar', 'test', 'baz', 'this'] + + for (String id : writtenIds) { + populateOne('foo', id) + } + + when: + def existingIds = cache.existingIdentifiers('foo', idsToCheck) + + then: + existingIds.sort() == ['test', 'this'] + } + def 'filterIdentifiers behaviour'() { setup: for (String id : identifiers) { @@ -79,20 +98,13 @@ abstract class CacheSpec extends Specification { '*TEST*' | ['blaTEST', 'blaTESTbla', 'TESTbla'] 'bla*' | ['blaTEST', 'blaTESTbla', 'blaPest', 'blaFEST'] 'bla[TF]EST' | ['blaTEST', 'blaFEST'] - 'bla?EST' | ['blaTEST', 'blaFEST'] + 'bla????' | ['blaTEST', 'blaPest', 'blaFEST'] + 'bla____' | [] '??a[FTP][Ee][Ss][Tt]*' | ['blaTEST', 'blaTESTbla', 'blaPest', 'blaFEST'] identifiers = ['blaTEST', 'TESTbla', 'blaTESTbla', 'blaPest', 'blaFEST'] } - def 'a cached value does not exist until it has attributes'() { - setup: - populateOne('foo', 'bar', createData('bar', [:])) - - expect: - cache.get('foo', 'bar') == null - } - def 'can getAll empty id collection'() { when: def results = cache.getAll('foo', []) diff --git a/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/provider/ProviderCacheSpec.groovy b/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/provider/ProviderCacheSpec.groovy new file mode 100644 index 00000000000..6ff7d08a86c --- /dev/null +++ b/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/provider/ProviderCacheSpec.groovy @@ -0,0 +1,144 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.provider + +import com.netflix.spinnaker.cats.agent.CacheResult +import com.netflix.spinnaker.cats.agent.DefaultCacheResult +import com.netflix.spinnaker.cats.cache.* +import com.netflix.spinnaker.cats.mem.InMemoryCache + +abstract class ProviderCacheSpec extends CacheSpec { + + WriteableCache backingStore + + @Override + Cache getSubject() { + backingStore = new InMemoryCache() + new DefaultProviderCache(backingStore) + } + + void populateOne(String type, String id, CacheData cacheData = createData(id)) { + defaultProviderCache.putCacheResult('testAgent', [], new DefaultCacheResult((type): [cacheData])) + } + + ProviderCache getDefaultProviderCache() { + getCache() as DefaultProviderCache + } + + def 'explicit evictions are removed from the cache'() { + setup: + String agent = 'agent' + CacheResult result = new DefaultCacheResult(test: [new DefaultCacheData('id', [id: 'id'], [:])]) + defaultProviderCache.putCacheResult(agent, [], result) + + when: + def data = defaultProviderCache.get('test', 'id') + + then: + data != null + data.id == 'id' + + when: + defaultProviderCache.putCacheResult(agent, [], new DefaultCacheResult([:], [test: ['id']])) + data = defaultProviderCache.get('test', 'id') + + then: + data == null + } + + def 'multiple agents can cache the same data type'() { + setup: + String usEast1Agent = 'AwsProvider:test/us-east-1/ClusterCachingAgent' + CacheResult testUsEast1 = buildCacheResult('test', 'us-east-1') + String usWest2Agent = 'AwsProvider:test/us-west-2/ClusterCachingAgent' + CacheResult testUsWest2 = buildCacheResult('test', 'us-west-2') + defaultProviderCache.putCacheResult(usEast1Agent, ['serverGroup', 'cluster', 'application'], testUsEast1) + defaultProviderCache.putCacheResult(usWest2Agent, ['serverGroup', 'cluster', 'application'], testUsWest2) + + when: + def app = defaultProviderCache.get('application', 'testapp') + + then: + app.attributes.accountName == 'test' + app.relationships.serverGroup.sort() == ['test/us-east-1/testapp-test-v001', 'test/us-west-2/testapp-test-v001'] + } + + def "an agents deletions don't affect another agent"() { + setup: + String usEast1Agent = 'AwsProvider:test/us-east-1/ClusterCachingAgent' + CacheResult testUsEast1 = buildCacheResult('test', 'us-east-1') + String usWest2Agent = 'AwsProvider:test/us-west-2/ClusterCachingAgent' + CacheResult testUsWest2 = buildCacheResult('test', 'us-west-2') + defaultProviderCache.putCacheResult(usEast1Agent, ['serverGroup', 'cluster', 'application'], testUsEast1) + defaultProviderCache.putCacheResult(usWest2Agent, ['serverGroup', 'cluster', 'application'], testUsWest2) + + when: + def app = defaultProviderCache.get('application', 'testapp') + + then: + app.attributes.accountName == 'test' + app.relationships.serverGroup.sort() == ['test/us-east-1/testapp-test-v001', 'test/us-west-2/testapp-test-v001'] + + when: + testUsEast1 = buildCacheResult('test', 'us-east-1', 'v002') + defaultProviderCache.putCacheResult(usEast1Agent, ['serverGroup', 'cluster', 'application'], testUsEast1) + app = defaultProviderCache.get('application', 'testapp') + + then: + app.relationships.serverGroup.sort() == ['test/us-east-1/testapp-test-v002', 'test/us-west-2/testapp-test-v001'] + + } + + def "items can be evicted by type and id"() { + setup: + String usEast1Agent = 'AwsProvider:test/us-east-1/ClusterCachingAgent' + CacheResult testUsEast1 = buildCacheResult('test', 'us-east-1') + defaultProviderCache.putCacheResult(usEast1Agent, ['serverGroup'], testUsEast1) + + when: + def sg = defaultProviderCache.get('serverGroup', 'test/us-east-1/testapp-test-v001') + + then: + sg != null + + when: + defaultProviderCache.evictDeletedItems('serverGroup', ['test/us-east-1/testapp-test-v001']) + sg = defaultProviderCache.get('serverGroup', 'test/us-east-1/testapp-test-v001') + + then: + sg == null + } + + private CacheResult buildCacheResult(String account, String region, String sgVersion = 'v001') { + String serverGroup = "$account/$region/testapp-test-$sgVersion" + String cluster = "$account/testapp-test" + String application = 'testapp' + String loadbalancer = "$account/$region/testapp--frontend" + Map serverGroupAtts = [ + name : 'testapp-test-v001', + account: account, + region : region + ] + + CacheData app = new DefaultCacheData(application, [accountName: account], [serverGroup: [serverGroup], cluster: [cluster]]) + CacheData sg = new DefaultCacheData(serverGroup, serverGroupAtts, [application: [application], cluster: [cluster], loadBalancer: [loadbalancer]]) + CacheData clu = new DefaultCacheData(cluster, [:], [application: [application], serverGroup: [serverGroup]]) + CacheData lb = new DefaultCacheData(loadbalancer, [:], [serverGroup: [serverGroup]]) + + new DefaultCacheResult([application: [app], serverGroup: [sg], cluster: [clu], loadBalancer: [lb]]) + } +} diff --git a/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/test/TestAgent.groovy b/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/test/TestAgent.groovy index 142a978e7d3..b17b8690250 100644 --- a/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/test/TestAgent.groovy +++ b/cats/cats-test/src/main/groovy/com/netflix/spinnaker/cats/test/TestAgent.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.cats.test import com.netflix.spinnaker.cats.agent.AgentDataType +import com.netflix.spinnaker.cats.agent.AgentIntervalAware import com.netflix.spinnaker.cats.agent.CacheResult import com.netflix.spinnaker.cats.agent.CachingAgent import com.netflix.spinnaker.cats.agent.DefaultCacheResult @@ -26,8 +27,17 @@ import com.netflix.spinnaker.cats.provider.ProviderCache import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE -class TestAgent implements CachingAgent { +class TestAgent implements CachingAgent, AgentIntervalAware { + private long agentInterval + TestAgent() { + // default max agent timeout of 1m + this(60000L) + } + + TestAgent(long agentInterval) { + this.agentInterval = agentInterval + } String scope = UUID.randomUUID().toString() Set authoritative = [] Set types = [] @@ -54,4 +64,9 @@ class TestAgent implements CachingAgent { CacheResult loadData(ProviderCache cache) { new DefaultCacheResult(results) } + + @Override + Long getAgentInterval() { + return agentInterval + } } diff --git a/cats/cats.gradle b/cats/cats.gradle index 8282989cf2c..9f6d7477256 100644 --- a/cats/cats.gradle +++ b/cats/cats.gradle @@ -1,22 +1,9 @@ -tasks.bintrayUpload.enabled = false +tasks.findByName('bintrayUpload')?.enabled = false subprojects { - apply plugin: 'findbugs' - apply plugin: 'pmd' - dependencies { - compileOnly 'com.google.code.findbugs:findbugs-annotations:3.0.1' - testCompile 'org.spockframework:spock-core:1.0-groovy-2.3' - testCompile 'cglib:cglib-nodep:3.2.0' - testCompile 'org.objenesis:objenesis:2.1' - pmd 'net.sourceforge.pmd:pmd:5.1.3' - } - - tasks.compileGroovy.enabled = false - tasks.findbugsTest.enabled = false - tasks.pmdTest.enabled = false + implementation project(":clouddriver-api") - findbugs { - ignoreFailures = true + implementation "org.apache.groovy:groovy" } } diff --git a/cloudbuild-tagged.yaml b/cloudbuild-tagged.yaml deleted file mode 100644 index 218742ceb68..00000000000 --- a/cloudbuild-tagged.yaml +++ /dev/null @@ -1,14 +0,0 @@ -steps: -- name: 'spinnakerrelease/gradle_cache' - env: ["GRADLE_USER_HOME=/gradle_cache/.gradle"] - entrypoint: "bash" - args: [ "-c", "./gradlew clouddriver-web:installDist -x test -Prelease.version=$TAG_NAME"] -- name: 'gcr.io/cloud-builders/docker' - args: ["build", - "-t", "gcr.io/$PROJECT_ID/$REPO_NAME:$COMMIT_SHA", - "-t", "gcr.io/$PROJECT_ID/$REPO_NAME:$TAG_NAME", - "-f", "Dockerfile.slim", - "."] -images: -- 'gcr.io/$PROJECT_ID/$REPO_NAME:$COMMIT_SHA' -- 'gcr.io/$PROJECT_ID/$REPO_NAME:$TAG_NAME' diff --git a/cloudbuild.yaml b/cloudbuild.yaml deleted file mode 100644 index 5e50fafc4a0..00000000000 --- a/cloudbuild.yaml +++ /dev/null @@ -1,11 +0,0 @@ -steps: -- name: 'spinnakerrelease/gradle_cache' - env: ["GRADLE_USER_HOME=/gradle_cache/.gradle"] - entrypoint: "bash" - args: [ "-c", "./gradlew clouddriver-web:installDist -x test"] -- name: 'gcr.io/cloud-builders/docker' - args: ["build", "-t", "gcr.io/$PROJECT_ID/$REPO_NAME:$COMMIT_SHA", "-t", "gcr.io/$PROJECT_ID/$REPO_NAME:latest", "-f", "Dockerfile.slim", "."] -images: -- 'gcr.io/$PROJECT_ID/$REPO_NAME:$COMMIT_SHA' -- 'gcr.io/$PROJECT_ID/$REPO_NAME:latest' -timeout: 15m diff --git a/clouddriver-alicloud/README.md b/clouddriver-alicloud/README.md new file mode 100644 index 00000000000..40e68ca8eb9 --- /dev/null +++ b/clouddriver-alicloud/README.md @@ -0,0 +1,5 @@ +## Alibaba Cloud Clouddriver + +The clouddriver-alicloud module aims to deploy an application on Alibaba Cloud. + +It is a work in progress diff --git a/clouddriver-alicloud/clouddriver-alicloud.gradle b/clouddriver-alicloud/clouddriver-alicloud.gradle new file mode 100644 index 00000000000..0ac9a0ae6dd --- /dev/null +++ b/clouddriver-alicloud/clouddriver-alicloud.gradle @@ -0,0 +1,35 @@ +dependencies { + implementation project(":cats:cats-core") + implementation project(":clouddriver-core") + implementation project(":clouddriver-eureka") + implementation project(":clouddriver-security") + + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-security" + implementation "com.netflix.frigga:frigga" + implementation "org.apache.httpcomponents:httpclient" + implementation "org.apache.httpcomponents:httpcore" + implementation "com.github.ben-manes.caffeine:guava" + implementation "io.spinnaker.kork:kork-moniker" + implementation "javax.servlet:javax.servlet-api" + + implementation 'com.aestasit.infrastructure.sshoogr:sshoogr:0.9.25' + implementation 'com.jcraft:jsch.agentproxy.jsch:0.0.9' + implementation 'com.jcraft:jsch.agentproxy.connector-factory:0.0.9' + implementation 'com.aliyun:aliyun-java-sdk-core:4.4.2' + implementation 'com.aliyun:aliyun-java-sdk-slb:3.2.9' + implementation 'com.aliyun:aliyun-java-sdk-vpc:3.0.6' + implementation 'com.aliyun:aliyun-java-sdk-ecs:4.16.10' + implementation 'com.aliyun:aliyun-java-sdk-ess:2.3.2' + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.apache.groovy:groovy" + implementation "org.apache.commons:commons-lang3" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.mockito:mockito-core" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/AliCloudOperation.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/AliCloudOperation.java new file mode 100644 index 00000000000..e8f1518b034 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/AliCloudOperation.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface AliCloudOperation { + String value(); +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/AliCloudProvider.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/AliCloudProvider.java new file mode 100644 index 00000000000..b526b8b323c --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/AliCloudProvider.java @@ -0,0 +1,47 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud; + +import com.netflix.spinnaker.clouddriver.core.CloudProvider; +import java.lang.annotation.Annotation; +import org.springframework.stereotype.Component; + +@Component +public class AliCloudProvider implements CloudProvider { + public static final String ID = "alicloud"; + + final String id = ID; + + final String displayName = "Alicloud"; + + final Class operationAnnotationType = AliCloudOperation.class; + + @Override + public String getId() { + return id; + } + + @Override + public String getDisplayName() { + return displayName; + } + + @Override + public Class getOperationAnnotationType() { + return operationAnnotationType; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/cache/Keys.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/cache/Keys.java new file mode 100644 index 00000000000..4ce9de2136e --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/cache/Keys.java @@ -0,0 +1,325 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.cache; + +import static com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider.ID; + +import com.google.common.base.CaseFormat; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.alicloud.model.AliCloudServerGroup; +import com.netflix.spinnaker.clouddriver.cache.KeyParser; +import java.util.HashMap; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; +import org.springframework.stereotype.Component; + +@Component("AliCloudKeys") +public class Keys implements KeyParser { + + public enum Namespace { + LOAD_BALANCERS, + SUBNETS, + INSTANCE_TYPES, + SECURITY_GROUPS, + ALI_CLOUD_KEY_PAIRS; + + public final String ns; + + Namespace() { + ns = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, this.name()); + } + + @Override + public String toString() { + return ns; + } + } + + public static final String SEPARATOR = ":"; + + public static String getLoadBalancerKey( + String loadBalancerName, String account, String region, String vpcId) { + String key = + ID + + SEPARATOR + + Namespace.LOAD_BALANCERS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + loadBalancerName; + if (!StringUtils.isEmpty(vpcId)) { + key = key + SEPARATOR + vpcId; + } + return key; + } + + public static String getSubnetKey(String vSwitchId, String region, String account) { + String key = + ID + + SEPARATOR + + Namespace.SUBNETS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + vSwitchId; + return key; + } + + public static String getImageKey(String imageId, String account, String region) { + String key = + ID + + SEPARATOR + + com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + imageId; + return key; + } + + public static String getNamedImageKey(String account, String imageName) { + String key = + ID + + SEPARATOR + + com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.NAMED_IMAGES + + SEPARATOR + + account + + SEPARATOR + + imageName; + return key; + } + + public static String getInstanceTypeKey(String account, String region, String zoneId) { + String key = + ID + + SEPARATOR + + Namespace.INSTANCE_TYPES + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + zoneId; + return key; + } + + public static String getSecurityGroupKey( + String securityGroupName, + String securityGroupId, + String region, + String account, + String vpcId) { + String key = + ID + + SEPARATOR + + Namespace.SECURITY_GROUPS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + securityGroupName + + SEPARATOR + + securityGroupId; + if (!StringUtils.isEmpty(vpcId)) { + key = key + SEPARATOR + vpcId; + } + return key; + } + + public static String getKeyPairKey(String keyPairName, String region, String account) { + String key = + ID + + SEPARATOR + + Namespace.ALI_CLOUD_KEY_PAIRS + + SEPARATOR + + keyPairName + + SEPARATOR + + account + + SEPARATOR + + region; + return key; + } + + public static String getServerGroupKey( + String autoScalingGroupName, String account, String region) { + AliCloudServerGroup serverGroup = new AliCloudServerGroup(); + serverGroup.setName(autoScalingGroupName); + // Names names = Names.parseName(autoScalingGroupName); + return getServerGroupKey( + serverGroup.getMoniker().getCluster(), autoScalingGroupName, account, region); + } + + public static String getServerGroupKey( + String cluster, String autoScalingGroupName, String account, String region) { + return ID + + SEPARATOR + + com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS + + SEPARATOR + + cluster + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + autoScalingGroupName; + } + + public static String getApplicationKey(String application) { + String key = + ID + + SEPARATOR + + com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.APPLICATIONS + + SEPARATOR + + application.toLowerCase(); + return key; + } + + public static String getClusterKey(String clusterName, String application, String account) { + String key = + ID + + SEPARATOR + + com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.CLUSTERS + + SEPARATOR + + application.toLowerCase() + + SEPARATOR + + account + + SEPARATOR + + clusterName; + return key; + } + + public static String getLaunchConfigKey(String launchConfigName, String account, String region) { + String key = + ID + + SEPARATOR + + com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LAUNCH_CONFIGS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + launchConfigName; + return key; + } + + public static String getInstanceKey(String instanceId, String account, String region) { + String key = + ID + + SEPARATOR + + com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + instanceId; + return key; + } + + public static String getInstanceHealthKey( + String loadBalancerId, + String instanceId, + String port, + String account, + String region, + String provider) { + String key = + ID + + SEPARATOR + + com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH + + SEPARATOR + + loadBalancerId + + SEPARATOR + + instanceId + + SEPARATOR + + port + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + provider; + return key; + } + + @Override + public Map parseKey(String key) { + return parse(key); + } + + public static Map parse(String key) { + Map result = new HashMap<>(); + String[] parts = key.split(SEPARATOR); + if (parts.length < 2) { + return null; + } + + result.put("provider", parts[0]); + result.put("type", parts[1]); + + if (!ID.equals(result.get("provider"))) { + return null; + } + + switch (result.get("type")) { + case "securityGroups": + if (parts.length >= 7 && !"null".equals(parts[6])) { + Names names = Names.parseName(parts[4]); + result.put("application", names.getApp()); + result.put("name", parts[4]); + result.put("id", parts[5]); + result.put("region", parts[3]); + result.put("account", parts[2]); + result.put("vpcId", parts[6]); + } else { + return null; + } + break; + default: + return null; + } + + return result; + } + + @Override + public String getCloudProvider() { + return ID; + } + + @Override + public Boolean canParseType(String type) { + for (Namespace key : Namespace.values()) { + if (key.toString().equals(type)) { + return true; + } + } + return false; + } + + @Override + public Boolean canParseField(String field) { + return false; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/common/ClientFactory.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/common/ClientFactory.java new file mode 100644 index 00000000000..238d5c8a541 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/common/ClientFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.common; + +import com.aliyuncs.DefaultAcsClient; +import com.aliyuncs.IAcsClient; +import com.aliyuncs.profile.DefaultProfile; +import org.springframework.stereotype.Component; + +@Component +public class ClientFactory { + + public IAcsClient createClient(String region, String accessKeyId, String accessSecretKey) { + DefaultProfile profile = DefaultProfile.getProfile(region, accessKeyId, accessSecretKey); + DefaultAcsClient defaultAcsClient = new DefaultAcsClient(profile); + defaultAcsClient.appendUserAgent("Spinnaker", "Clouddriver v1.0"); + return defaultAcsClient; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/common/HealthHelper.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/common/HealthHelper.java new file mode 100644 index 00000000000..afe2cd30ef0 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/common/HealthHelper.java @@ -0,0 +1,87 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.common; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import java.util.*; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.springframework.util.CollectionUtils; + +public class HealthHelper { + + private static boolean healthyStateMatcher(String key, String loadBalancerId, String instanceId) { + String regex; + if (StringUtils.isNotBlank(loadBalancerId)) { + regex = AliCloudProvider.ID + ":.*:" + loadBalancerId + ":" + instanceId + ":.*"; + return Pattern.matches(regex, key); + } else { + regex = AliCloudProvider.ID + ":.*:" + instanceId + ":.*"; + } + return Pattern.matches(regex, key); + } + + public static HealthState judgeInstanceHealthyState( + Collection allHealthyKeys, + List loadBalancerIds, + String instanceId, + Cache cacheView) { + Set healthyKeys = new HashSet<>(); + if (loadBalancerIds != null) { + for (String loadBalancerId : loadBalancerIds) { + List collect = + allHealthyKeys.stream() + .filter(tab -> HealthHelper.healthyStateMatcher(tab, loadBalancerId, instanceId)) + .collect(Collectors.toList()); + Collection healthData = cacheView.getAll(HEALTH.ns, collect, null); + if (CollectionUtils.isEmpty(healthData)) { + return HealthState.Unknown; + } + healthyKeys.addAll(collect); + } + } else { + List collect = + allHealthyKeys.stream() + .filter(tab -> HealthHelper.healthyStateMatcher(tab, null, instanceId)) + .collect(Collectors.toList()); + healthyKeys.addAll(collect); + } + Collection healthData = cacheView.getAll(HEALTH.ns, healthyKeys, null); + Map healthMap = new HashMap<>(16); + for (CacheData cacheData : healthData) { + String serverHealthStatus = cacheData.getAttributes().get("serverHealthStatus").toString(); + healthMap.put(serverHealthStatus, healthMap.getOrDefault(serverHealthStatus, 0) + 1); + } + Integer normal = healthMap.get("normal"); + Integer abnormal = healthMap.get("abnormal"); + if (normal != null && normal > 0 && abnormal == null) { + return HealthState.Up; + } else if (abnormal != null && abnormal > 0 && normal == null) { + return HealthState.Down; + } else if (abnormal == null && normal == null) { + return HealthState.Down; + } else { + return HealthState.Unknown; + } + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudImageController.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudImageController.java new file mode 100644 index 00000000000..41959022de5 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudImageController.java @@ -0,0 +1,175 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.controllers; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.NAMED_IMAGES; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException; +import groovy.util.logging.Slf4j; +import java.util.*; +import javax.servlet.http.HttpServletRequest; +import lombok.Data; +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RestController; + +@Slf4j +@RestController +@RequestMapping("/alicloud/images") +public class AliCloudImageController { + + private final Cache cacheView; + + @Autowired + public AliCloudImageController(Cache cacheView) { + this.cacheView = cacheView; + } + + @RequestMapping(value = "/find", method = RequestMethod.GET) + List list(LookupOptions lookupOptions, HttpServletRequest request) { + String glob = lookupOptions.getQ(); + if (StringUtils.isAllBlank(glob) && glob.length() < 3) { + throw new InvalidRequestException("Lost search condition or length less 3"); + } + glob = "*" + glob + "*"; + String imageSearchKey = + Keys.getImageKey( + glob, + StringUtils.isAllBlank(lookupOptions.account) ? "*" : lookupOptions.account, + StringUtils.isAllBlank(lookupOptions.region) ? "*" : lookupOptions.region); + Collection imageIdentifiers = cacheView.filterIdentifiers(IMAGES.ns, imageSearchKey); + Collection images = cacheView.getAll(IMAGES.ns, imageIdentifiers, null); + String nameKey = + Keys.getNamedImageKey( + StringUtils.isAllBlank(lookupOptions.account) ? "*" : lookupOptions.account, glob); + Collection nameImageIdentifiers = cacheView.filterIdentifiers(NAMED_IMAGES.ns, nameKey); + Collection nameImages = + cacheView.getAll(NAMED_IMAGES.ns, nameImageIdentifiers, null); + return filter(render(nameImages, images), extractTagFilters(request)); + } + + private static List filter(List namedImages, Map tagFilters) { + if (tagFilters.isEmpty()) { + return namedImages; + } + List filter = new ArrayList<>(); + for (Image namedImage : namedImages) { + if (checkInclude(namedImage, tagFilters)) { + filter.add(namedImage); + } + } + return filter; + } + + /* private static boolean checkInclude(Image image, Map tagFilters) { + boolean flag = false; + List tags = (List) image.getAttributes().get("tags"); + if (tags != null) { + for (Map tag : tags) { + String tagKey = tag.get("tagKey").toString(); + String tagValue = tag.get("tagValue").toString(); + if (StringUtils.isNotEmpty(tagFilters.get(tagKey)) + && tagFilters.get(tagKey).equalsIgnoreCase(tagValue)) { + flag = true; + } else { + flag = false; + break; + } + } + } + return flag; + }*/ + + private static boolean checkInclude(Image image, Map tagFilters) { + boolean flag = false; + List tags = (List) image.getAttributes().get("tags"); + Map imageMap = new HashMap<>(tags.size()); + for (Map tag : tags) { + imageMap.put(tag.get("tagKey").toString(), tag.get("tagValue").toString()); + } + for (Map.Entry entry : tagFilters.entrySet()) { + String tagKey = entry.getKey(); + String tagValue = entry.getValue(); + if (StringUtils.isNotEmpty(imageMap.get(tagKey)) + && imageMap.get(tagKey).equalsIgnoreCase(tagValue)) { + flag = true; + } else { + flag = false; + break; + } + } + return flag; + } + + private List render(Collection namedImages, Collection images) { + List list = new ArrayList<>(); + for (CacheData image : images) { + Map attributes = image.getAttributes(); + list.add(new Image(String.valueOf(attributes.get("imageName")), attributes)); + } + + for (CacheData nameImage : namedImages) { + Map attributes = nameImage.getAttributes(); + list.add(new Image(String.valueOf(attributes.get("imageName")), attributes)); + } + return list; + } + + private static Map extractTagFilters(HttpServletRequest request) { + Map parameters = new HashMap<>(16); + Enumeration parameterNames = request.getParameterNames(); + while (parameterNames.hasMoreElements()) { + String parameterName = parameterNames.nextElement(); + if (parameterName.toLowerCase().startsWith("tag:")) { + parameters.put( + parameterName.replaceAll("tag:", "").toLowerCase(), + request.getParameter(parameterName)); + } + } + return parameters; + } + + @Data + public static class Image { + public Image(String imageName, Map attributes) { + this.imageName = imageName; + this.attributes = attributes; + } + + String imageName; + Map attributes; + } + + @Data + static class LookupOptions { + String q; + String account; + String region; + } + + @Data + static class Tag { + String tagKey; + String tagValue; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudScalingActivitiesController.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudScalingActivitiesController.java new file mode 100644 index 00000000000..f305f43934b --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudScalingActivitiesController.java @@ -0,0 +1,98 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.controllers; + +import com.aliyuncs.IAcsClient; +import com.aliyuncs.ess.model.v20140828.DescribeScalingActivitiesRequest; +import com.aliyuncs.ess.model.v20140828.DescribeScalingActivitiesResponse; +import com.aliyuncs.ess.model.v20140828.DescribeScalingActivitiesResponse.ScalingActivity; +import com.aliyuncs.ess.model.v20140828.DescribeScalingGroupsRequest; +import com.aliyuncs.ess.model.v20140828.DescribeScalingGroupsResponse; +import com.aliyuncs.ess.model.v20140828.DescribeScalingGroupsResponse.ScalingGroup; +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.exceptions.ServerException; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.HttpStatus; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.*; + +@RestController +@RequestMapping( + "/applications/{application}/clusters/{account}/{clusterName}/alicloud/serverGroups/{serverGroupName}") +public class AliCloudScalingActivitiesController { + + private final AccountCredentialsProvider accountCredentialsProvider; + + private final ClientFactory clientFactory; + + @Autowired + public AliCloudScalingActivitiesController( + AccountCredentialsProvider accountCredentialsProvider, ClientFactory clientFactory) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.clientFactory = clientFactory; + } + + @RequestMapping(value = "/scalingActivities", method = RequestMethod.GET) + ResponseEntity getScalingActivities( + @PathVariable String account, + @PathVariable String serverGroupName, + @RequestParam(value = "region", required = true) String region) { + List resultList = new ArrayList<>(); + AccountCredentials credentials = accountCredentialsProvider.getCredentials(account); + if (!(credentials instanceof AliCloudCredentials)) { + Map messageMap = new HashMap<>(); + messageMap.put("message", "bad credentials"); + return new ResponseEntity(messageMap, HttpStatus.BAD_REQUEST); + } + AliCloudCredentials aliCloudCredentials = (AliCloudCredentials) credentials; + IAcsClient client = + clientFactory.createClient( + region, aliCloudCredentials.getAccessKeyId(), aliCloudCredentials.getAccessSecretKey()); + DescribeScalingGroupsRequest describeScalingGroupsRequest = new DescribeScalingGroupsRequest(); + describeScalingGroupsRequest.setScalingGroupName(serverGroupName); + describeScalingGroupsRequest.setPageSize(50); + DescribeScalingGroupsResponse describeScalingGroupsResponse; + try { + describeScalingGroupsResponse = client.getAcsResponse(describeScalingGroupsRequest); + if (describeScalingGroupsResponse.getScalingGroups().size() > 0) { + ScalingGroup scalingGroup = describeScalingGroupsResponse.getScalingGroups().get(0); + DescribeScalingActivitiesRequest activitiesRequest = new DescribeScalingActivitiesRequest(); + activitiesRequest.setScalingGroupId(scalingGroup.getScalingGroupId()); + activitiesRequest.setPageSize(50); + DescribeScalingActivitiesResponse activitiesResponse = + client.getAcsResponse(activitiesRequest); + resultList.addAll(activitiesResponse.getScalingActivities()); + } + + } catch (ServerException e) { + e.printStackTrace(); + throw new IllegalStateException(e.getMessage()); + } catch (ClientException e) { + e.printStackTrace(); + throw new IllegalStateException(e.getMessage()); + } + return new ResponseEntity(resultList, HttpStatus.OK); + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudLoadBalancerAtomicOperationConverter.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudLoadBalancerAtomicOperationConverter.java new file mode 100644 index 00000000000..0506c29f54c --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudLoadBalancerAtomicOperationConverter.java @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudOperation; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.DeleteAliCloudLoadBalancerClassicAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@AliCloudOperation(AtomicOperations.DELETE_LOAD_BALANCER) +@Component("deleteAliCloudLoadBalancerDescription") +public class DeleteAliCloudLoadBalancerAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + private final ClientFactory clientFactory; + + @Autowired + public DeleteAliCloudLoadBalancerAtomicOperationConverter(ClientFactory clientFactory) { + this.clientFactory = clientFactory; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteAliCloudLoadBalancerClassicAtomicOperation( + convertDescription(input), clientFactory); + } + + @Override + public UpsertAliCloudLoadBalancerDescription convertDescription(Map input) { + UpsertAliCloudLoadBalancerDescription converted = + getObjectMapper().convertValue(input, UpsertAliCloudLoadBalancerDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudSecurityGroupAtomicOperationConverter.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudSecurityGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..54d05a754f9 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudSecurityGroupAtomicOperationConverter.java @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudOperation; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.DeleteAliCloudSecurityGroupDescription; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.DeleteAliCloudSecurityGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@AliCloudOperation(AtomicOperations.DELETE_SECURITY_GROUP) +@Component("deleteAliCloudSecurityGroupDescription") +public class DeleteAliCloudSecurityGroupAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + private final ClientFactory clientFactory; + + @Autowired + public DeleteAliCloudSecurityGroupAtomicOperationConverter(ClientFactory clientFactory) { + this.clientFactory = clientFactory; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteAliCloudSecurityGroupAtomicOperation(convertDescription(input), clientFactory); + } + + @Override + public DeleteAliCloudSecurityGroupDescription convertDescription(Map input) { + DeleteAliCloudSecurityGroupDescription description = + getObjectMapper().convertValue(input, DeleteAliCloudSecurityGroupDescription.class); + description.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return description; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudLoadBalancerAtomicOperationConverter.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudLoadBalancerAtomicOperationConverter.java new file mode 100644 index 00000000000..407ee2011d3 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudLoadBalancerAtomicOperationConverter.java @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudOperation; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.UpsertAliCloudLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@AliCloudOperation(AtomicOperations.UPSERT_LOAD_BALANCER) +@Component("upsertAliCloudLoadBalancerDescription") +public class UpsertAliCloudLoadBalancerAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + private final ClientFactory clientFactory; + + @Autowired + public UpsertAliCloudLoadBalancerAtomicOperationConverter(ClientFactory clientFactory) { + this.clientFactory = clientFactory; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new UpsertAliCloudLoadBalancerAtomicOperation( + convertDescription(input), getObjectMapper(), clientFactory); + } + + @Override + public UpsertAliCloudLoadBalancerDescription convertDescription(Map input) { + UpsertAliCloudLoadBalancerDescription converted = + getObjectMapper().convertValue(input, UpsertAliCloudLoadBalancerDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudSecurityGroupAtomicOperationConverter.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudSecurityGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..42d55be8b5f --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudSecurityGroupAtomicOperationConverter.java @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudOperation; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudSecurityGroupDescription; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.UpsertAliCloudSecurityGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@AliCloudOperation(AtomicOperations.UPSERT_SECURITY_GROUP) +@Component("upsertAliCloudSecurityGroupDescription") +public class UpsertAliCloudSecurityGroupAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + private final ClientFactory clientFactory; + + @Autowired + public UpsertAliCloudSecurityGroupAtomicOperationConverter(ClientFactory clientFactory) { + this.clientFactory = clientFactory; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new UpsertAliCloudSecurityGroupAtomicOperation( + convertDescription(input), clientFactory, getObjectMapper()); + } + + @Override + public UpsertAliCloudSecurityGroupDescription convertDescription(Map input) { + UpsertAliCloudSecurityGroupDescription description = + getObjectMapper().convertValue(input, UpsertAliCloudSecurityGroupDescription.class); + description.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return description; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/BaseAliCloudDescription.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/BaseAliCloudDescription.java new file mode 100644 index 00000000000..d2c06683948 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/BaseAliCloudDescription.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import lombok.Data; + +@Data +public class BaseAliCloudDescription { + + @JsonIgnore private AliCloudCredentials credentials; + + private String region; +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/DeleteAliCloudSecurityGroupDescription.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/DeleteAliCloudSecurityGroupDescription.java new file mode 100644 index 00000000000..2d4661418cf --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/DeleteAliCloudSecurityGroupDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.description; + +import java.util.List; +import lombok.Data; + +@Data +public class DeleteAliCloudSecurityGroupDescription extends BaseAliCloudDescription { + + private String securityGroupName; + + private List regions; +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/UpsertAliCloudLoadBalancerDescription.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/UpsertAliCloudLoadBalancerDescription.java new file mode 100644 index 00000000000..6bac76d1da9 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/UpsertAliCloudLoadBalancerDescription.java @@ -0,0 +1,81 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.alicloud.model.Listener; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import java.util.List; +import lombok.Data; + +@Data +public class UpsertAliCloudLoadBalancerDescription { + + @JsonIgnore private AliCloudCredentials credentials; + + private String region; + + private List listeners; + + private String loadBalancerId; + + private Long resourceOwnerId; + + private String clientToken; + + private String addressIPVersion; + + private String masterZoneId; + + private Integer duration; + + private String resourceGroupId; + + private String loadBalancerName; + + private String addressType; + + private String slaveZoneId; + + private String loadBalancerSpec; + + private Boolean autoPay; + + private String address; + + private String resourceOwnerAccount; + + private Integer bandwidth; + + private String ownerAccount; + + private Long ownerId; + + @JsonProperty("vSwitchId") + private String vSwitchId; + + private String internetChargeType; + + private String vpcId; + + private String payType; + + private String pricingCycle; + + private String deleteProtection; +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/UpsertAliCloudSecurityGroupDescription.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/UpsertAliCloudSecurityGroupDescription.java new file mode 100644 index 00000000000..1369291413f --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/description/UpsertAliCloudSecurityGroupDescription.java @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.description; + +import com.aliyuncs.ecs.model.v20140526.AuthorizeSecurityGroupEgressRequest; +import com.aliyuncs.ecs.model.v20140526.AuthorizeSecurityGroupRequest; +import java.util.List; +import lombok.Data; + +@Data +public class UpsertAliCloudSecurityGroupDescription extends BaseAliCloudDescription { + + private Long resourceOwnerId; + + private String resourceOwnerAccount; + + private String clientToken; + + private String ownerAccount; + + private String description; + + private Long ownerId; + + private String securityGroupName; + + private String securityGroupType; + + private String resourceGroupId; + + private String vpcId; + + private List tags; + + private List securityGroupIngress; + + private List securityGroupEgress; + + @Data + public static class Tag { + + private String value; + + private String key; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudLoadBalancerClassicAtomicOperation.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudLoadBalancerClassicAtomicOperation.java new file mode 100644 index 00000000000..5171db4cc92 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudLoadBalancerClassicAtomicOperation.java @@ -0,0 +1,94 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import com.aliyuncs.IAcsClient; +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.exceptions.ServerException; +import com.aliyuncs.slb.model.v20140515.DeleteLoadBalancerRequest; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersRequest; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersResponse; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.alicloud.exception.AliCloudException; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import groovy.util.logging.Slf4j; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Slf4j +public class DeleteAliCloudLoadBalancerClassicAtomicOperation implements AtomicOperation { + + private final Logger log = + LoggerFactory.getLogger(DeleteAliCloudLoadBalancerClassicAtomicOperation.class); + + private final UpsertAliCloudLoadBalancerDescription description; + + private final ClientFactory clientFactory; + + public DeleteAliCloudLoadBalancerClassicAtomicOperation( + UpsertAliCloudLoadBalancerDescription description, ClientFactory clientFactory) { + this.description = description; + this.clientFactory = clientFactory; + } + + @Override + public Void operate(List priorOutputs) { + + IAcsClient client = + clientFactory.createClient( + description.getRegion(), + description.getCredentials().getAccessKeyId(), + description.getCredentials().getAccessSecretKey()); + DescribeLoadBalancersResponse.LoadBalancer loadBalancerT = null; + DescribeLoadBalancersRequest queryRequest = new DescribeLoadBalancersRequest(); + queryRequest.setLoadBalancerName(description.getLoadBalancerName()); + DescribeLoadBalancersResponse queryResponse; + try { + queryResponse = client.getAcsResponse(queryRequest); + description.setLoadBalancerId(queryRequest.getLoadBalancerId()); + + if (queryResponse.getLoadBalancers().size() > 0) { + loadBalancerT = queryResponse.getLoadBalancers().get(0); + } + + } catch (ServerException e) { + log.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } catch (ClientException e) { + log.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } + + if (loadBalancerT != null) { + DeleteLoadBalancerRequest request = new DeleteLoadBalancerRequest(); + request.setLoadBalancerId(loadBalancerT.getLoadBalancerId()); + try { + client.getAcsResponse(request); + } catch (ServerException e) { + log.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } catch (ClientException e) { + log.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } + } + + return null; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudSecurityGroupAtomicOperation.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudSecurityGroupAtomicOperation.java new file mode 100644 index 00000000000..3341324876b --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudSecurityGroupAtomicOperation.java @@ -0,0 +1,83 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import com.aliyuncs.IAcsClient; +import com.aliyuncs.ecs.model.v20140526.DeleteSecurityGroupRequest; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsRequest; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse.SecurityGroup; +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.exceptions.ServerException; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.DeleteAliCloudSecurityGroupDescription; +import com.netflix.spinnaker.clouddriver.alicloud.exception.AliCloudException; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import groovy.util.logging.Slf4j; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Slf4j +public class DeleteAliCloudSecurityGroupAtomicOperation implements AtomicOperation { + + private final Logger log = + LoggerFactory.getLogger(DeleteAliCloudSecurityGroupAtomicOperation.class); + + private final DeleteAliCloudSecurityGroupDescription description; + + private final ClientFactory clientFactory; + + public DeleteAliCloudSecurityGroupAtomicOperation( + DeleteAliCloudSecurityGroupDescription description, ClientFactory clientFactory) { + this.description = description; + this.clientFactory = clientFactory; + } + + @Override + public Void operate(List priorOutputs) { + for (String region : description.getRegions()) { + IAcsClient client = + clientFactory.createClient( + region, + description.getCredentials().getAccessKeyId(), + description.getCredentials().getAccessSecretKey()); + DescribeSecurityGroupsRequest describeSecurityGroupsRequest = + new DescribeSecurityGroupsRequest(); + describeSecurityGroupsRequest.setSecurityGroupName(description.getSecurityGroupName()); + describeSecurityGroupsRequest.setPageSize(50); + DescribeSecurityGroupsResponse describeSecurityGroupsResponse; + try { + describeSecurityGroupsResponse = client.getAcsResponse(describeSecurityGroupsRequest); + List securityGroups = describeSecurityGroupsResponse.getSecurityGroups(); + for (SecurityGroup securityGroup : securityGroups) { + DeleteSecurityGroupRequest request = new DeleteSecurityGroupRequest(); + request.setSecurityGroupId(securityGroup.getSecurityGroupId()); + client.getAcsResponse(request); + } + + } catch (ServerException e) { + log.info(e.getMessage()); + throw new AliCloudException(e); + } catch (ClientException e) { + log.info(e.getMessage()); + throw new AliCloudException(e); + } + } + return null; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerAtomicOperation.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..92676627949 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerAtomicOperation.java @@ -0,0 +1,340 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import com.aliyuncs.IAcsClient; +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.exceptions.ServerException; +import com.aliyuncs.slb.model.v20140515.CreateLoadBalancerHTTPListenerRequest; +import com.aliyuncs.slb.model.v20140515.CreateLoadBalancerHTTPSListenerRequest; +import com.aliyuncs.slb.model.v20140515.CreateLoadBalancerRequest; +import com.aliyuncs.slb.model.v20140515.CreateLoadBalancerResponse; +import com.aliyuncs.slb.model.v20140515.CreateLoadBalancerTCPListenerRequest; +import com.aliyuncs.slb.model.v20140515.CreateLoadBalancerUDPListenerRequest; +import com.aliyuncs.slb.model.v20140515.DeleteLoadBalancerListenerRequest; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancerAttributeRequest; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancerAttributeResponse; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancerAttributeResponse.ListenerPortAndProtocal; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersRequest; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersResponse; +import com.aliyuncs.slb.model.v20140515.SetLoadBalancerHTTPListenerAttributeRequest; +import com.aliyuncs.slb.model.v20140515.SetLoadBalancerHTTPSListenerAttributeRequest; +import com.aliyuncs.slb.model.v20140515.SetLoadBalancerStatusRequest; +import com.aliyuncs.slb.model.v20140515.SetLoadBalancerTCPListenerAttributeRequest; +import com.aliyuncs.slb.model.v20140515.SetLoadBalancerUDPListenerAttributeRequest; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.alicloud.exception.AliCloudException; +import com.netflix.spinnaker.clouddriver.alicloud.model.Listener; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import groovy.util.logging.Slf4j; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Slf4j +public class UpsertAliCloudLoadBalancerAtomicOperation implements AtomicOperation { + + private final Logger logger = + LoggerFactory.getLogger(UpsertAliCloudLoadBalancerAtomicOperation.class); + + private final ObjectMapper objectMapper; + + private final UpsertAliCloudLoadBalancerDescription description; + + private final ClientFactory clientFactory; + + public UpsertAliCloudLoadBalancerAtomicOperation( + UpsertAliCloudLoadBalancerDescription description, + ObjectMapper objectMapper, + ClientFactory clientFactory) { + this.description = description; + this.objectMapper = objectMapper; + this.clientFactory = clientFactory; + } + + private static final String STATUS = "active"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + /** + * crete loadbalancer operation + * + * @param priorOutputs + * @return + */ + @Override + public Map operate(List priorOutputs) { + Map resultMap = new HashMap<>(30); + + IAcsClient client = + clientFactory.createClient( + description.getRegion(), + description.getCredentials().getAccessKeyId(), + description.getCredentials().getAccessSecretKey()); + DescribeLoadBalancersResponse.LoadBalancer loadBalancerT = null; + // Create or Update load balancing instances + // Query all load balancing instances under this user + DescribeLoadBalancersRequest queryRequest = new DescribeLoadBalancersRequest(); + queryRequest.setLoadBalancerName(description.getLoadBalancerName()); + DescribeLoadBalancersResponse queryResponse; + try { + queryResponse = client.getAcsResponse(queryRequest); + description.setLoadBalancerId(queryRequest.getLoadBalancerId()); + + if (queryResponse.getLoadBalancers().size() > 0) { + loadBalancerT = queryResponse.getLoadBalancers().get(0); + } + + } catch (ServerException e) { + logger.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } catch (ClientException e) { + logger.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } + + if (loadBalancerT != null) { + description.setLoadBalancerId(loadBalancerT.getLoadBalancerId()); + } else { + CreateLoadBalancerRequest loadBalancerRequest = + objectMapper.convertValue(description, CreateLoadBalancerRequest.class); + loadBalancerRequest.setLoadBalancerName(description.getLoadBalancerName()); + if (!StringUtils.isEmpty(description.getVSwitchId())) { + loadBalancerRequest.setVSwitchId(description.getVSwitchId()); + } + if ("internet".equalsIgnoreCase(loadBalancerRequest.getAddressType())) { + loadBalancerRequest.setVSwitchId(""); + } + + // Instance delete protection off + loadBalancerRequest.setDeleteProtection("off"); + CreateLoadBalancerResponse loadBalancerResponse; + try { + loadBalancerResponse = client.getAcsResponse(loadBalancerRequest); + description.setLoadBalancerId(loadBalancerResponse.getLoadBalancerId()); + + } catch (ServerException e) { + logger.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } catch (ClientException e) { + logger.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } + } + + if (StringUtils.isEmpty(description.getLoadBalancerId())) { + return null; + } + + try { + createListener(loadBalancerT == null ? false : true, client); + } catch (ServerException e) { + logger.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } catch (ClientException e) { + logger.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } + + // Restart instance + SetLoadBalancerStatusRequest statusRequest = new SetLoadBalancerStatusRequest(); + statusRequest.setLoadBalancerId(description.getLoadBalancerId()); + statusRequest.setLoadBalancerStatus(STATUS); + try { + client.getAcsResponse(statusRequest); + } catch (ServerException e) { + logger.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } catch (ClientException e) { + logger.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } + + resultMap.put(description.getLoadBalancerName(), description.getLoadBalancerId()); + + return resultMap; + } + + private void createListener(boolean whetherToCreate, IAcsClient client) throws ClientException { + + if (!whetherToCreate) { + addListener(description.getListeners(), client); + } else { + // query loadbalancer Instance information,Get the current instance listener information + // (listener type, port number) + DescribeLoadBalancerAttributeRequest describeLoadBalancerAttributeRequest = + new DescribeLoadBalancerAttributeRequest(); + describeLoadBalancerAttributeRequest.setLoadBalancerId(description.getLoadBalancerId()); + DescribeLoadBalancerAttributeResponse describeLoadBalancerAttributeResponse = + client.getAcsResponse(describeLoadBalancerAttributeRequest); + List listenerPortsAndProtocal = + describeLoadBalancerAttributeResponse.getListenerPortsAndProtocal(); + Set deleteListenerList = new HashSet<>(); + Set updateListenerList = new HashSet<>(); + Set createListenerList = new HashSet<>(); + + for (ListenerPortAndProtocal listenerPortAndProtocal : listenerPortsAndProtocal) { + for (Listener listener : description.getListeners()) { + String sign = + listenerPortAndProtocal.getListenerProtocal().toUpperCase() + + listenerPortAndProtocal.getListenerPort(); + String sign2 = + listener.getListenerProtocal() + String.valueOf(listener.getListenerPort()); + if (sign.equals(sign2)) { + updateListenerList.add(listener); + } + } + } + + for (ListenerPortAndProtocal listenerPortAndProtocal : listenerPortsAndProtocal) { + if (updateListenerList.size() == 0) { + deleteListenerList.add(listenerPortAndProtocal.getListenerPort() + ""); + } else { + for (Listener listener : updateListenerList) { + String sign = + listenerPortAndProtocal.getListenerProtocal().toUpperCase() + + listenerPortAndProtocal.getListenerPort(); + String sign2 = + listener.getListenerProtocal() + String.valueOf(listener.getListenerPort()); + if (!sign.equals(sign2)) { + deleteListenerList.add(listenerPortAndProtocal.getListenerPort() + ""); + } + } + } + } + + // Filter out the data you need to create + for (Listener listener : description.getListeners()) { + if (updateListenerList.size() == 0) { + createListenerList.add(listener); + } else { + for (Listener updateListener : updateListenerList) { + if (listener.getListenerPort().intValue() + != updateListener.getListenerPort().intValue()) { + createListenerList.add(listener); + } + } + } + } + + // Delete listeners + for (String port : deleteListenerList) { + DeleteLoadBalancerListenerRequest deleteLoadBalancerListenerRequest = + new DeleteLoadBalancerListenerRequest(); + deleteLoadBalancerListenerRequest.setListenerPort(Integer.valueOf(port)); + deleteLoadBalancerListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(deleteLoadBalancerListenerRequest); + } + + // Modify listeners + for (Listener listener : updateListenerList) { + switch (listener.getListenerProtocal()) { + case HTTPS: + SetLoadBalancerHTTPSListenerAttributeRequest setCreateHTTPSListenerRequest = + objectMapper.convertValue( + listener, SetLoadBalancerHTTPSListenerAttributeRequest.class); + setCreateHTTPSListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(setCreateHTTPSListenerRequest); + break; + case TCP: + SetLoadBalancerTCPListenerAttributeRequest setCreateTCPListenerRequest = + objectMapper.convertValue( + listener, SetLoadBalancerTCPListenerAttributeRequest.class); + setCreateTCPListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(setCreateTCPListenerRequest); + break; + case UDP: + SetLoadBalancerUDPListenerAttributeRequest setCreateUDPListenerRequest = + objectMapper.convertValue( + listener, SetLoadBalancerUDPListenerAttributeRequest.class); + setCreateUDPListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(setCreateUDPListenerRequest); + break; + default: + SetLoadBalancerHTTPListenerAttributeRequest setHttpListenerRequest = + objectMapper.convertValue( + listener, SetLoadBalancerHTTPListenerAttributeRequest.class); + setHttpListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(setHttpListenerRequest); + break; + } + } + // Create listeners + addListener(new ArrayList<>(createListenerList), client); + } + } + + private void addListener(List createListenerList, IAcsClient client) + throws ClientException { + for (Listener listener : createListenerList) { + switch (listener.getListenerProtocal()) { + case HTTPS: + createHTTPSListener(client, listener); + break; + case TCP: + createTCPListener(client, listener); + break; + case UDP: + createUDPListener(client, listener); + break; + default: + createHTTPListener(client, listener); + break; + } + } + } + + private void createHTTPListener(IAcsClient client, Listener listener) throws ClientException { + CreateLoadBalancerHTTPListenerRequest httpListenerRequest = + objectMapper.convertValue(listener, CreateLoadBalancerHTTPListenerRequest.class); + httpListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(httpListenerRequest); + } + + private void createHTTPSListener(IAcsClient client, Listener listener) throws ClientException { + CreateLoadBalancerHTTPSListenerRequest createHTTPSListenerRequest = + objectMapper.convertValue(listener, CreateLoadBalancerHTTPSListenerRequest.class); + createHTTPSListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(createHTTPSListenerRequest); + } + + private void createTCPListener(IAcsClient client, Listener listener) throws ClientException { + CreateLoadBalancerTCPListenerRequest createTCPListenerRequest = + objectMapper.convertValue(listener, CreateLoadBalancerTCPListenerRequest.class); + createTCPListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(createTCPListenerRequest); + } + + private void createUDPListener(IAcsClient client, Listener listener) throws ClientException { + CreateLoadBalancerUDPListenerRequest createUDPListenerRequest = + objectMapper.convertValue(listener, CreateLoadBalancerUDPListenerRequest.class); + createUDPListenerRequest.setLoadBalancerId(description.getLoadBalancerId()); + client.getAcsResponse(createUDPListenerRequest); + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerResult.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerResult.java new file mode 100644 index 00000000000..09a2a4b7a64 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerResult.java @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import groovy.transform.Immutable; +import java.util.HashMap; +import java.util.Map; + +public class UpsertAliCloudLoadBalancerResult { + /** Association of region -> loadBalancer */ + Map loadBalancers = new HashMap<>(); + + @Immutable + static class LoadBalancer { + String name; + String dnsName; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudSecurityGroupAtomicOperation.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudSecurityGroupAtomicOperation.java new file mode 100644 index 00000000000..77202ac83f4 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudSecurityGroupAtomicOperation.java @@ -0,0 +1,448 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import com.aliyuncs.IAcsClient; +import com.aliyuncs.ecs.model.v20140526.*; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupAttributeResponse.Permission; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse.SecurityGroup; +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.exceptions.ServerException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudSecurityGroupDescription; +import com.netflix.spinnaker.clouddriver.alicloud.exception.AliCloudException; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import groovy.util.logging.Slf4j; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Slf4j +public class UpsertAliCloudSecurityGroupAtomicOperation implements AtomicOperation { + + private final Logger log = + LoggerFactory.getLogger(UpsertAliCloudSecurityGroupAtomicOperation.class); + + private final UpsertAliCloudSecurityGroupDescription description; + + private final ClientFactory clientFactory; + + private final ObjectMapper objectMapper; + + public UpsertAliCloudSecurityGroupAtomicOperation( + UpsertAliCloudSecurityGroupDescription description, + ClientFactory clientFactory, + ObjectMapper objectMapper) { + this.description = description; + this.clientFactory = clientFactory; + this.objectMapper = objectMapper; + } + + @Override + public Void operate(List priorOutputs) { + IAcsClient client = + clientFactory.createClient( + description.getRegion(), + description.getCredentials().getAccessKeyId(), + description.getCredentials().getAccessSecretKey()); + DescribeSecurityGroupsRequest describeSecurityGroupsRequest = + new DescribeSecurityGroupsRequest(); + describeSecurityGroupsRequest.setSecurityGroupName(description.getSecurityGroupName()); + describeSecurityGroupsRequest.setPageSize(50); + DescribeSecurityGroupsResponse describeSecurityGroupsResponse; + try { + describeSecurityGroupsResponse = client.getAcsResponse(describeSecurityGroupsRequest); + List securityGroups = describeSecurityGroupsResponse.getSecurityGroups(); + if (securityGroups.size() == 0) { + CreateSecurityGroupRequest createSecurityGroupRequest = + objectMapper.convertValue(description, CreateSecurityGroupRequest.class); + CreateSecurityGroupResponse createSecurityGroupResponse = + client.getAcsResponse(createSecurityGroupRequest); + SecurityGroup securityGroup = new SecurityGroup(); + securityGroup.setSecurityGroupId(createSecurityGroupResponse.getSecurityGroupId()); + securityGroups.add(securityGroup); + } + + buildIngressRule(client, securityGroups.get(0).getSecurityGroupId()); + + } catch (ServerException e) { + log.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } catch (ClientException e) { + log.info(e.getMessage()); + throw new AliCloudException(e.getMessage()); + } + + return null; + } + + private void buildIngressRule(IAcsClient client, String securityGroupId) + throws ClientException, ServerException { + DescribeSecurityGroupAttributeRequest securityGroupAttributeRequest = + new DescribeSecurityGroupAttributeRequest(); + securityGroupAttributeRequest.setSecurityGroupId(securityGroupId); + securityGroupAttributeRequest.setDirection("ingress"); + DescribeSecurityGroupAttributeResponse securityGroupAttribute = + client.getAcsResponse(securityGroupAttributeRequest); + + // If the incoming security rule is empty, delete all security rules under the current security + // group + if (description.getSecurityGroupIngress() == null + || description.getSecurityGroupIngress().size() == 0) { + for (Permission permission : securityGroupAttribute.getPermissions()) { + RevokeSecurityGroupRequest revokeSecurityGroupRequest = + objectMapper.convertValue(permission, RevokeSecurityGroupRequest.class); + revokeSecurityGroupRequest.setSecurityGroupId(securityGroupId); + client.getAcsResponse(revokeSecurityGroupRequest); + } + return; + } + + // At this point, it means that security rules have not been added to the security group, then + // all incoming rules are added. + if (securityGroupAttribute.getPermissions().size() == 0) { + // create security ingress rule + if (description.getSecurityGroupIngress() != null) { + for (AuthorizeSecurityGroupRequest securityGroupIngress : + description.getSecurityGroupIngress()) { + securityGroupIngress.setSecurityGroupId(securityGroupId); + client.getAcsResponse(securityGroupIngress); + } + } + return; + } + + // Data to be modified + List updateList = new ArrayList<>(); + + // Open Contrast Logic + // After filtering, the remaining elements in this collection are data that needs to be deleted + Iterator permissions = securityGroupAttribute.getPermissions().iterator(); + + while (permissions.hasNext()) { + Permission permission = permissions.next(); + // After filtering, the remaining elements in the subset are the data that needs to be added. + Iterator securityGroupIngress = + description.getSecurityGroupIngress().iterator(); + while (securityGroupIngress.hasNext()) { + AuthorizeSecurityGroupRequest ingress = securityGroupIngress.next(); + if (compareMethod(permission, ingress)) { + updateList.add(ingress); + securityGroupIngress.remove(); + permissions.remove(); + break; + } + } + } + + for (Permission permission : securityGroupAttribute.getPermissions()) { + RevokeSecurityGroupRequest revokeSecurityGroupRequest = + objectMapper.convertValue(permission, RevokeSecurityGroupRequest.class); + revokeSecurityGroupRequest.setSecurityGroupId(securityGroupId); + client.getAcsResponse(revokeSecurityGroupRequest); + } + + for (AuthorizeSecurityGroupRequest securityGroupIngress : + description.getSecurityGroupIngress()) { + securityGroupIngress.setSecurityGroupId(securityGroupId); + client.getAcsResponse(securityGroupIngress); + } + + for (AuthorizeSecurityGroupRequest authorizeSecurityGroupRequest : updateList) { + ModifySecurityGroupRuleRequest modifySecurityGroupRuleRequest = + objectMapper.convertValue( + authorizeSecurityGroupRequest, ModifySecurityGroupRuleRequest.class); + modifySecurityGroupRuleRequest.setSecurityGroupId(securityGroupId); + client.getAcsResponse(modifySecurityGroupRuleRequest); + } + } + + private boolean compareMethod(Permission permission, AuthorizeSecurityGroupRequest ingress) { + if (StringUtils.isNotEmpty(permission.getSourceCidrIp()) + && StringUtils.isNotEmpty(ingress.getSourceCidrIp())) { + CidrIp cidrIp1 = new CidrIp(); + cidrIp1.setIpProtocol( + StringUtils.isNotEmpty(permission.getIpProtocol()) + ? permission.getIpProtocol().toLowerCase() + : permission.getIpProtocol()); + cidrIp1.setPortRange(permission.getPortRange()); + cidrIp1.setSourcePortRange(permission.getSourcePortRange()); + cidrIp1.setNicType(permission.getNicType()); + cidrIp1.setPolicy( + StringUtils.isNotEmpty(permission.getPolicy()) + ? permission.getPolicy().toLowerCase() + : permission.getPolicy()); + cidrIp1.setDestCidrIp(permission.getDestCidrIp()); + cidrIp1.setSourceCidrIp(permission.getSourceCidrIp()); + + CidrIp cidrIp2 = new CidrIp(); + cidrIp2.setIpProtocol( + StringUtils.isNotEmpty(ingress.getIpProtocol()) + ? ingress.getIpProtocol().toLowerCase() + : ingress.getIpProtocol()); + cidrIp2.setPortRange(ingress.getPortRange()); + cidrIp2.setSourcePortRange(ingress.getSourcePortRange()); + cidrIp2.setNicType(ingress.getNicType()); + cidrIp2.setPolicy( + StringUtils.isNotEmpty(ingress.getPolicy()) + ? ingress.getPolicy().toLowerCase() + : ingress.getPolicy()); + cidrIp2.setDestCidrIp(ingress.getDestCidrIp()); + cidrIp2.setSourceCidrIp(ingress.getSourceCidrIp()); + return cidrIp1.equals(cidrIp2); + } + + if (StringUtils.isNotEmpty(permission.getSourceGroupId()) + && StringUtils.isNotEmpty(ingress.getSecurityGroupId())) { + GroupId groupId1 = new GroupId(); + groupId1.setIpProtocol( + StringUtils.isNotEmpty(permission.getIpProtocol()) + ? permission.getIpProtocol().toLowerCase() + : permission.getIpProtocol()); + groupId1.setPortRange(permission.getPortRange()); + groupId1.setSourcePortRange(permission.getSourcePortRange()); + groupId1.setNicType(permission.getNicType()); + groupId1.setPolicy( + StringUtils.isNotEmpty(permission.getPolicy()) + ? permission.getPolicy().toLowerCase() + : permission.getPolicy()); + groupId1.setDestCidrIp(permission.getDestCidrIp()); + groupId1.setSourceGroupOwnerAccount(permission.getSourceGroupOwnerAccount()); + groupId1.setSourceGroupId(permission.getSourceGroupId()); + + GroupId groupId2 = new GroupId(); + groupId2.setIpProtocol( + StringUtils.isNotEmpty(ingress.getIpProtocol()) + ? ingress.getIpProtocol().toLowerCase() + : ingress.getIpProtocol()); + groupId2.setPortRange(ingress.getPortRange()); + groupId2.setSourcePortRange(ingress.getSourcePortRange()); + groupId2.setNicType(ingress.getNicType()); + groupId2.setPolicy( + StringUtils.isNotEmpty(ingress.getPolicy()) + ? ingress.getPolicy().toLowerCase() + : ingress.getPolicy()); + groupId2.setDestCidrIp(ingress.getDestCidrIp()); + groupId2.setSourceGroupOwnerAccount(ingress.getSourceGroupOwnerAccount()); + groupId2.setSourceGroupId(ingress.getSourceGroupId()); + } + + return false; + } + + public static class CidrIp { + private String ipProtocol; + private String portRange; + private String sourcePortRange; + private String nicType; + private String policy; + private String destCidrIp; + private String sourceCidrIp; + + public String getIpProtocol() { + return ipProtocol; + } + + public void setIpProtocol(String ipProtocol) { + this.ipProtocol = ipProtocol; + } + + public String getPortRange() { + return portRange; + } + + public void setPortRange(String portRange) { + this.portRange = portRange; + } + + public String getSourcePortRange() { + return sourcePortRange; + } + + public void setSourcePortRange(String sourcePortRange) { + this.sourcePortRange = sourcePortRange; + } + + public String getNicType() { + return nicType; + } + + public void setNicType(String nicType) { + this.nicType = nicType; + } + + public String getPolicy() { + return policy; + } + + public void setPolicy(String policy) { + this.policy = policy; + } + + public String getDestCidrIp() { + return destCidrIp; + } + + public void setDestCidrIp(String destCidrIp) { + this.destCidrIp = destCidrIp; + } + + public String getSourceCidrIp() { + return sourceCidrIp; + } + + public void setSourceCidrIp(String sourceCidrIp) { + this.sourceCidrIp = sourceCidrIp; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CidrIp cidrIp = (CidrIp) o; + return Objects.equals(ipProtocol, cidrIp.ipProtocol) + && Objects.equals(portRange, cidrIp.portRange) + && Objects.equals(sourcePortRange, cidrIp.sourcePortRange) + && Objects.equals(nicType, cidrIp.nicType) + && Objects.equals(policy, cidrIp.policy) + && Objects.equals(destCidrIp, cidrIp.destCidrIp) + && Objects.equals(sourceCidrIp, cidrIp.sourceCidrIp); + } + + @Override + public int hashCode() { + return Objects.hash( + ipProtocol, portRange, sourcePortRange, nicType, policy, destCidrIp, sourceCidrIp); + } + } + + public static class GroupId { + private String ipProtocol; + private String portRange; + private String sourcePortRange; + private String nicType; + private String policy; + private String destCidrIp; + private String sourceGroupOwnerAccount; + private String sourceGroupId; + + public String getIpProtocol() { + return ipProtocol; + } + + public void setIpProtocol(String ipProtocol) { + this.ipProtocol = ipProtocol; + } + + public String getPortRange() { + return portRange; + } + + public void setPortRange(String portRange) { + this.portRange = portRange; + } + + public String getSourcePortRange() { + return sourcePortRange; + } + + public void setSourcePortRange(String sourcePortRange) { + this.sourcePortRange = sourcePortRange; + } + + public String getNicType() { + return nicType; + } + + public void setNicType(String nicType) { + this.nicType = nicType; + } + + public String getPolicy() { + return policy; + } + + public void setPolicy(String policy) { + this.policy = policy; + } + + public String getDestCidrIp() { + return destCidrIp; + } + + public void setDestCidrIp(String destCidrIp) { + this.destCidrIp = destCidrIp; + } + + public String getSourceGroupOwnerAccount() { + return sourceGroupOwnerAccount; + } + + public void setSourceGroupOwnerAccount(String sourceGroupOwnerAccount) { + this.sourceGroupOwnerAccount = sourceGroupOwnerAccount; + } + + public String getSourceGroupId() { + return sourceGroupId; + } + + public void setSourceGroupId(String sourceGroupId) { + this.sourceGroupId = sourceGroupId; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GroupId groupId = (GroupId) o; + return Objects.equals(ipProtocol, groupId.ipProtocol) + && Objects.equals(portRange, groupId.portRange) + && Objects.equals(sourcePortRange, groupId.sourcePortRange) + && Objects.equals(nicType, groupId.nicType) + && Objects.equals(policy, groupId.policy) + && Objects.equals(destCidrIp, groupId.destCidrIp) + && Objects.equals(sourceGroupOwnerAccount, groupId.sourceGroupOwnerAccount) + && Objects.equals(sourceGroupId, groupId.sourceGroupId); + } + + @Override + public int hashCode() { + return Objects.hash( + ipProtocol, + portRange, + sourcePortRange, + nicType, + policy, + destCidrIp, + sourceGroupOwnerAccount, + sourceGroupId); + } + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/exception/AliCloudException.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/exception/AliCloudException.java new file mode 100644 index 00000000000..1ddd247a2a8 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/exception/AliCloudException.java @@ -0,0 +1,14 @@ +package com.netflix.spinnaker.clouddriver.alicloud.exception; + +import com.netflix.spinnaker.kork.exceptions.IntegrationException; + +public class AliCloudException extends IntegrationException { + + public AliCloudException(String message) { + super(message); + } + + public AliCloudException(Exception e) { + super(e); + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudCluster.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudCluster.java new file mode 100644 index 00000000000..f5f91d0b015 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudCluster.java @@ -0,0 +1,68 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.model.Cluster; +import java.io.Serializable; +import java.util.Set; + +public class AliCloudCluster implements Cluster, Serializable { + + private String name; + private String type; + private String accountName; + private Set serverGroups; + private Set loadBalancers; + + public AliCloudCluster( + String name, + String type, + String accountName, + Set serverGroups, + Set loadBalancers) { + this.name = name; + this.type = type; + this.accountName = accountName; + this.serverGroups = serverGroups; + this.loadBalancers = loadBalancers; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getType() { + return type; + } + + @Override + public String getAccountName() { + return accountName; + } + + @Override + public Set getServerGroups() { + return serverGroups; + } + + @Override + public Set getLoadBalancers() { + return loadBalancers; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudInstance.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudInstance.java new file mode 100644 index 00000000000..ce09d370951 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudInstance.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.Instance; +import java.util.List; +import java.util.Map; + +public class AliCloudInstance implements Instance { + + private String name; + private Long launchTime; + private String zone; + private String providerType; + private String cloudProvider; + private HealthState healthState; + private List> health; + + public AliCloudInstance( + String name, + Long launchTime, + String zone, + String providerType, + String cloudProvider, + HealthState healthState, + List> health) { + this.name = name; + this.launchTime = launchTime; + this.zone = zone; + this.providerType = providerType; + this.cloudProvider = cloudProvider; + this.healthState = healthState; + this.health = health; + } + + @Override + public String getName() { + return name; + } + + @Override + public Long getLaunchTime() { + return launchTime; + } + + @Override + public String getZone() { + return zone; + } + + @Override + public String getProviderType() { + return providerType; + } + + @Override + public String getCloudProvider() { + return cloudProvider; + } + + @Override + public HealthState getHealthState() { + return healthState; + } + + @Override + public List> getHealth() { + return health; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudKeyPair.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudKeyPair.java new file mode 100644 index 00000000000..1dae3571fa6 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudKeyPair.java @@ -0,0 +1,58 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.model.KeyPair; + +public class AliCloudKeyPair implements KeyPair { + + String account; + String region; + String keyName; + String keyFingerprint; + String cloudProvider = AliCloudProvider.ID; + + public AliCloudKeyPair(String account, String region, String keyName, String keyFingerprint) { + this.account = account; + this.region = region; + this.keyName = keyName; + this.keyFingerprint = keyFingerprint; + } + + @Override + public String getKeyName() { + return keyName; + } + + @Override + public String getKeyFingerprint() { + return keyFingerprint; + } + + public String getAccount() { + return account; + } + + public String getRegion() { + return region; + } + + public String getCloudProvider() { + return cloudProvider; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudLoadBalancer.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudLoadBalancer.java new file mode 100644 index 00000000000..d6f41acf653 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudLoadBalancer.java @@ -0,0 +1,101 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.model.LoadBalancer; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class AliCloudLoadBalancer implements LoadBalancer { + + String account; + + String region; + + String name; + + String loadBalancerId; + + String type = AliCloudProvider.ID; + + String cloudProvider = AliCloudProvider.ID; + + String vpcId; + + Set serverGroups = new HashSet<>(); + + Map labels = new HashMap<>(); + + public AliCloudLoadBalancer( + String account, String region, String name, String vpcId, String loadBalancerId) { + this.account = account; + this.region = region; + this.name = name; + this.vpcId = vpcId; + this.loadBalancerId = loadBalancerId; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getType() { + return type; + } + + @Override + public String getCloudProvider() { + return cloudProvider; + } + + @Override + public String getAccount() { + return account; + } + + @Override + public Set getServerGroups() { + return serverGroups; + } + + @Override + public Map getLabels() { + return labels; + } + + public String getRegion() { + return region; + } + + public String getVpcId() { + return vpcId; + } + + public String getLoadBalancerId() { + return loadBalancerId; + } + + public void setServerGroups(Set serverGroups) { + this.serverGroups = serverGroups; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudLoadBalancerType.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudLoadBalancerType.java new file mode 100644 index 00000000000..ffab4f64c57 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudLoadBalancerType.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +public enum AliCloudLoadBalancerType { + CLASSIC; + + public static AliCloudLoadBalancerType getByValue(String value) { + for (AliCloudLoadBalancerType lbt : values()) { + if (lbt.toString().equals(value)) { + return lbt; + } + } + return null; + } + + @Override + public String toString() { + return this.name().toLowerCase(); + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroup.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroup.java new file mode 100644 index 00000000000..4f687684f1f --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroup.java @@ -0,0 +1,104 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.model.SecurityGroup; +import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; +import java.util.Set; + +public class AliCloudSecurityGroup implements SecurityGroup { + + String type = AliCloudProvider.ID; + String cloudProvider = AliCloudProvider.ID; + String id; + String name; + String application; + String accountName; + String region; + Set inboundRules; + Set outboundRules; + String vpcId; + + public AliCloudSecurityGroup( + String id, + String name, + String application, + String accountName, + String region, + String vpcId, + Set inboundRules) { + this.id = id; + this.name = name; + this.application = application; + this.accountName = accountName; + this.region = region; + this.vpcId = vpcId; + this.inboundRules = inboundRules; + } + + @Override + public String getType() { + return type; + } + + @Override + public String getCloudProvider() { + return cloudProvider; + } + + @Override + public String getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getApplication() { + return application; + } + + @Override + public String getAccountName() { + return accountName; + } + + @Override + public String getRegion() { + return region; + } + + @Override + public Set getInboundRules() { + return inboundRules; + } + + @Override + public Set getOutboundRules() { + return outboundRules; + } + + @Override + public SecurityGroupSummary getSummary() { + return new AliCloudSecurityGroupSummary(name, id, vpcId); + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroupRule.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroupRule.java new file mode 100644 index 00000000000..e3fd9ab72db --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroupRule.java @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; +import java.util.Map; +import java.util.SortedSet; + +public class AliCloudSecurityGroupRule implements Rule { + + private String protocol; + + private SortedSet portRanges; + + private Map permissions; + + public AliCloudSecurityGroupRule( + String protocol, SortedSet portRanges, Map permissions) { + this.protocol = protocol; + this.portRanges = portRanges; + this.permissions = permissions; + } + + @Override + public SortedSet getPortRanges() { + return portRanges; + } + + @Override + public String getProtocol() { + return protocol; + } + + public Map getPermissions() { + return permissions; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroupSummary.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroupSummary.java new file mode 100644 index 00000000000..95b8bd00581 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSecurityGroupSummary.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; + +public class AliCloudSecurityGroupSummary implements SecurityGroupSummary { + + String name; + String id; + String vpcId; + + public AliCloudSecurityGroupSummary(String name, String id, String vpcId) { + this.name = name; + this.id = id; + this.vpcId = vpcId; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getId() { + return id; + } + + public String getVpcId() { + return vpcId; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudServerGroup.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudServerGroup.java new file mode 100644 index 00000000000..d0b2fc53690 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudServerGroup.java @@ -0,0 +1,184 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import lombok.Data; + +@Data +public class AliCloudServerGroup implements ServerGroup, Serializable { + + private String name; + private String type; + private String cloudProvider; + private String region; + private boolean disabled; + private Long createdTime; + private Set zones; + private Set instances; + private Set loadBalancers; + private Set securityGroups; + private Map launchConfig; + private InstanceCounts instanceCounts; + private Capacity capacity; + private String creationTime; + private Map result; + private Map image; + private Map buildInfo; + + @Override + public String getName() { + return name; + } + + @Override + public String getType() { + return type; + } + + @Override + public String getCloudProvider() { + return cloudProvider; + } + + @Override + public String getRegion() { + return region; + } + + @Override + public Boolean isDisabled() { + return disabled; + } + + @Override + public Long getCreatedTime() { + return createdTime; + } + + @Override + public Set getZones() { + return zones; + } + + @Override + public Set getInstances() { + return instances; + } + + @Override + public Set getLoadBalancers() { + return loadBalancers; + } + + @Override + public Set getSecurityGroups() { + return securityGroups; + } + + @Override + public Map getLaunchConfig() { + return launchConfig; + } + + @Override + public InstanceCounts getInstanceCounts() { + return instanceCounts; + } + + @Override + public Capacity getCapacity() { + return capacity; + } + + public Map getImage() { + return image; + } + + public void setImage(Map image) { + this.image = image; + } + + public Map getBuildInfo() { + return buildInfo; + } + + public void setBuildInfo(Map buildInfo) { + this.buildInfo = buildInfo; + } + + @Override + public ImageSummary getImageSummary() { + return getImagesSummary().getSummaries().get(0); + } + + @Override + public ImagesSummary getImagesSummary() { + return new ImagesSummary() { + @Override + public List getSummaries() { + List list = new ArrayList<>(); + InnSum innSum = new InnSum(image, buildInfo, name); + list.add(innSum); + return list; + } + }; + } + + public class InnSum implements ImageSummary { + + private Map i; + private Map bi; + private String serverGroupName; + + public InnSum(Map i, Map bi, String serverGroupName) { + this.i = i; + this.bi = bi; + this.serverGroupName = serverGroupName; + } + + @Override + public String getServerGroupName() { + return serverGroupName; + } + + @Override + public String getImageId() { + return String.valueOf(i.get("imageId")); + } + + @Override + public String getImageName() { + return String.valueOf(i.get("name")); + } + + @Override + public Map getImage() { + return i; + } + + @Override + public Map getBuildInfo() { + return bi; + } + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSubnet.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSubnet.java new file mode 100644 index 00000000000..017f2e0171c --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/AliCloudSubnet.java @@ -0,0 +1,76 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.netflix.spinnaker.clouddriver.model.Subnet; +import lombok.Data; + +@Data +public class AliCloudSubnet implements Subnet { + + private String account; + + private String region; + + private String status; + + private String vSwitchId; + + private String vSwitchName; + + private String vpcId; + + private String zoneId; + + private String type; + + public AliCloudSubnet() {} + + public AliCloudSubnet( + String account, + String region, + String status, + String vSwitchId, + String vSwitchName, + String vpcId, + String zoneId, + String type) { + this.account = account; + this.region = region; + this.status = status; + this.vSwitchId = vSwitchId; + this.vSwitchName = vSwitchName; + this.vpcId = vpcId; + this.zoneId = zoneId; + this.type = type; + } + + @Override + public String getType() { + return type; + } + + @Override + public String getId() { + return vpcId; + } + + @Override + public String getPurpose() { + return vSwitchId; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/Listener.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/Listener.java new file mode 100644 index 00000000000..9bf168c68f1 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/Listener.java @@ -0,0 +1,128 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.alicloud.model.alienum.ListenerType; +import lombok.Data; + +@Data +public class Listener { + + ListenerType listenerProtocal; + + private String healthCheck = "on"; + + private Integer healthCheckTimeout = 5; + + private Integer unhealthyThreshold = 3; + + private Integer healthyThreshold = 3; + + private Integer healthCheckInterval = 2; + + private Long resourceOwnerId; + + private String listenerForward; + + @JsonProperty("xForwardedFor") + private String xForwardedFor; + + private String healthCheckURI; + + private String description; + + private String aclStatus; + + private String scheduler; + + private String aclType; + + private Integer forwardPort; + + private Integer cookieTimeout; + + private String stickySessionType; + + @JsonProperty("vServerGroupId") + private String vServerGroupId; + + private String aclId; + + private Integer listenerPort; + + private String cookie; + + private String resourceOwnerAccount; + + private Integer bandwidth; + + private String stickySession; + + private String healthCheckDomain; + + private Integer requestTimeout; + + private String ownerAccount; + + private String gzip; + + private Long ownerId; + + private Integer idleTimeout; + + private String loadBalancerId; + + @JsonProperty("xForwardedFor_SLBIP") + private String xForwardedFor_SLBIP; + + private Integer backendServerPort; + + @JsonProperty("xForwardedFor_proto") + private String xForwardedFor_proto; + + @JsonProperty("xForwardedFor_SLBID") + private String xForwardedFor_SLBID; + + private Integer healthCheckConnectPort; + + private String healthCheckHttpCode; + + private String enableHttp2; + + @JsonProperty("tLSCipherPolicy") + private String tLSCipherPolicy; + + private String serverCertificateId; + + @JsonProperty("cACertificateId") + private String cACertificateId; + + private Integer healthCheckConnectTimeout; + + private Integer establishedTimeout; + + private Integer persistenceTimeout; + + private String healthCheckType; + + private String masterSlaveServerGroupId; + + private String healthCheckReq; + + private String healthCheckExp; +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/alienum/ListenerType.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/alienum/ListenerType.java new file mode 100644 index 00000000000..d61d3f85aa5 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/model/alienum/ListenerType.java @@ -0,0 +1,24 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.alicloud.model.alienum; + +public enum ListenerType { + HTTP, + HTTPS, + TCP, + UDP +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/AliProvider.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/AliProvider.java new file mode 100644 index 00000000000..b43af1ec508 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/AliProvider.java @@ -0,0 +1,97 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider; + +import static com.netflix.spinnaker.clouddriver.alicloud.cache.Keys.Namespace.SECURITY_GROUPS; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.*; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.cache.SearchableProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import java.util.*; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; + +@ConditionalOnProperty("alicloud.enabled") +public class AliProvider extends AgentSchedulerAware implements SearchableProvider { + public static final String PROVIDER_NAME = AliProvider.class.getName(); + + private final AccountCredentialsRepository accountCredentialsRepository; + private final Collection agents; + + AliProvider(AccountCredentialsRepository accountCredentialsRepository, Collection agents) { + this.accountCredentialsRepository = accountCredentialsRepository; + this.agents = agents; + } + + @Override + public String getProviderName() { + return PROVIDER_NAME; + } + + @Override + public Collection getAgents() { + return agents; + } + + final Set defaultCaches = + new HashSet() { + { + add(LOAD_BALANCERS.ns); + add(CLUSTERS.ns); + add(SERVER_GROUPS.ns); + add(TARGET_GROUPS.ns); + add(INSTANCES.ns); + add(SECURITY_GROUPS.ns); + } + }; + + final Map urlMappingTemplates = + new HashMap() { + { + put( + SERVER_GROUPS.ns, + "/applications/${application.toLowerCase()}/clusters/$account/$cluster/$provider/serverGroups/$serverGroup?region=$region"); + put(LOAD_BALANCERS.ns, "/$provider/loadBalancers/$loadBalancer"); + put(CLUSTERS.ns, "/applications/${application.toLowerCase()}/clusters/$account/$cluster"); + put(SECURITY_GROUPS.ns, "/securityGroups/$account/$provider/$name?region=$region"); + } + }; + + final Map searchResultHydrators = + Collections.emptyMap(); + + @Override + public Set getDefaultCaches() { + return defaultCaches; + } + + @Override + public Map getUrlMappingTemplates() { + return urlMappingTemplates; + } + + @Override + public Map getSearchResultHydrators() { + return searchResultHydrators; + } + + @Override + public Map parseKey(String key) { + return Keys.parse(key); + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/AliProviderConfig.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/AliProviderConfig.java new file mode 100644 index 00000000000..afd6358668f --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/AliProviderConfig.java @@ -0,0 +1,133 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.provider.agent.*; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudClientProvider; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.ProviderUtils; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.DependsOn; +import org.springframework.context.annotation.Scope; + +@Configuration +public class AliProviderConfig { + + @Bean + @DependsOn("synchronizeAliCloudAccounts") + public AliProvider aliProvider( + AccountCredentialsRepository accountCredentialsRepository, + AliCloudClientProvider aliCloudClientProvider, + AliCloudCredentialsProvider aliCloudCredentialsProvider, + Registry registry, + ObjectMapper objectMapper, + AliCloudProvider aliCloudProvider, + ApplicationContext ctx, + ClientFactory clientFactory) { + AliProvider provider = + new AliProvider( + accountCredentialsRepository, + Collections.newSetFromMap(new ConcurrentHashMap())); + synchronizeAliProvider( + provider, + accountCredentialsRepository, + aliCloudClientProvider, + aliCloudCredentialsProvider, + registry, + objectMapper, + aliCloudProvider, + ctx, + clientFactory); + return provider; + } + + @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + @Bean + public AliProviderSynchronizer synchronizeAliProvider( + AliProvider aliProvider, + AccountCredentialsRepository accountCredentialsRepository, + AliCloudClientProvider aliCloudClientProvider, + AliCloudCredentialsProvider aliCloudCredentialsProvider, + Registry registry, + ObjectMapper objectMapper, + AliCloudProvider aliCloudProvider, + ApplicationContext ctx, + ClientFactory clientFactory) { + + Set scheduledAccounts = ProviderUtils.getScheduledAccounts(aliProvider); + Set allAccounts = + ProviderUtils.buildThreadSafeSetOfAccounts( + accountCredentialsRepository, AliCloudCredentials.class); + List newAgents = new LinkedList<>(); + + for (AliCloudCredentials credentials : allAccounts) { + if (credentials.getCloudProvider().equals(AliCloudProvider.ID)) { + + for (String region : credentials.getRegions()) { + if (!scheduledAccounts.contains(credentials.getName())) { + newAgents.add( + new AliCloudLoadBalancerCachingAgent( + aliProvider, + region, + aliCloudClientProvider, + aliCloudCredentialsProvider, + aliCloudProvider, + objectMapper, + registry, + credentials, + clientFactory.createClient( + region, credentials.getAccessKeyId(), credentials.getAccessSecretKey()))); + newAgents.add( + new AliCloudLoadBalancerInstanceStateCachingAgent( + ctx, + credentials, + region, + objectMapper, + clientFactory.createClient( + region, credentials.getAccessKeyId(), credentials.getAccessSecretKey()))); + newAgents.add( + new AliCloudSecurityGroupCachingAgent( + credentials, + region, + objectMapper, + clientFactory.createClient( + region, credentials.getAccessKeyId(), credentials.getAccessSecretKey()))); + } + } + } + } + + aliProvider.getAgents().addAll(newAgents); + return new AliProviderSynchronizer(); + } + + class AliProviderSynchronizer {} +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/Config.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/Config.java new file mode 100644 index 00000000000..4a010ec04dc --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/Config.java @@ -0,0 +1,18 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider; + +public class Config {} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerCachingAgent.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerCachingAgent.java new file mode 100644 index 00000000000..0a6dd0f950b --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerCachingAgent.java @@ -0,0 +1,399 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.*; + +import com.aliyuncs.IAcsClient; +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.exceptions.ServerException; +import com.aliyuncs.slb.model.v20140515.*; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancerAttributeResponse.ListenerPortAndProtocal; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersResponse.LoadBalancer; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.*; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.alicloud.provider.AliProvider; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudClientProvider; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentialsProvider; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import java.util.*; +import org.apache.commons.lang3.StringUtils; + +public class AliCloudLoadBalancerCachingAgent implements CachingAgent, AccountAware, OnDemandAgent { + + private AliProvider aliProvider; + private AliCloudClientProvider aliCloudClientProvider; + private AliCloudCredentials account; + private String region; + private AliCloudCredentialsProvider aliCloudCredentialsProvider; + ObjectMapper objectMapper; + OnDemandMetricsSupport metricsSupport; + IAcsClient client; + + public AliCloudLoadBalancerCachingAgent( + AliProvider aliProvider, + String region, + AliCloudClientProvider aliCloudClientProvider, + AliCloudCredentialsProvider aliCloudCredentialsProvider, + AliCloudProvider aliCloudProvider, + ObjectMapper objectMapper, + Registry registry, + AliCloudCredentials credentials, + IAcsClient client) { + this.account = credentials; + this.aliProvider = aliProvider; + this.region = region; + this.aliCloudClientProvider = aliCloudClientProvider; + this.aliCloudCredentialsProvider = aliCloudCredentialsProvider; + this.objectMapper = objectMapper; + this.metricsSupport = + new OnDemandMetricsSupport( + registry, + this, + aliCloudProvider.getId() + + ":" + + aliCloudProvider.getId() + + ":" + + OnDemandAgent.OnDemandType.LoadBalancer); + this.client = client; + } + + static final Collection types = + Collections.unmodifiableCollection( + new ArrayList() { + { + add(AUTHORITATIVE.forType(LOAD_BALANCERS.ns)); + add(INFORMATIVE.forType(INSTANCES.ns)); + } + }); + + @Override + public String getAccountName() { + return null; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + List loadBalancers = new ArrayList<>(); + Map loadBalancerAttributes = new HashMap<>(16); + + DescribeLoadBalancersRequest queryRequest = new DescribeLoadBalancersRequest(); + DescribeLoadBalancersResponse queryResponse; + try { + queryResponse = client.getAcsResponse(queryRequest); + if (queryResponse.getLoadBalancers().isEmpty()) { + return new DefaultCacheResult(new HashMap<>(16)); + } + + loadBalancers.addAll(queryResponse.getLoadBalancers()); + + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + + for (LoadBalancer loadBalancer : loadBalancers) { + + DescribeLoadBalancerAttributeRequest describeLoadBalancerAttributeRequest = + new DescribeLoadBalancerAttributeRequest(); + describeLoadBalancerAttributeRequest.setLoadBalancerId(loadBalancer.getLoadBalancerId()); + DescribeLoadBalancerAttributeResponse describeLoadBalancerAttributeResponse; + try { + describeLoadBalancerAttributeResponse = + client.getAcsResponse(describeLoadBalancerAttributeRequest); + loadBalancerAttributes.put( + loadBalancer.getLoadBalancerName(), describeLoadBalancerAttributeResponse); + + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + } + + return buildCacheResult(loadBalancers, loadBalancerAttributes, client); + } + + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + List loadBalancers = new ArrayList<>(); + Map loadBalancerAttributes = new HashMap<>(16); + + DescribeLoadBalancersRequest queryRequest = new DescribeLoadBalancersRequest(); + queryRequest.setLoadBalancerName((String) data.get("loadBalancerName")); + DescribeLoadBalancersResponse queryResponse; + String loadBalancerId = null; + + queryResponse = + metricsSupport.readData( + () -> { + try { + return client.getAcsResponse(queryRequest); + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + return null; + }); + + loadBalancers.addAll(queryResponse.getLoadBalancers()); + + if (StringUtils.isEmpty(loadBalancerId)) { + return null; + } + + DescribeLoadBalancerAttributeRequest describeLoadBalancerAttributeRequest = + new DescribeLoadBalancerAttributeRequest(); + describeLoadBalancerAttributeRequest.setLoadBalancerId(loadBalancerId); + DescribeLoadBalancerAttributeResponse describeLoadBalancerAttributeResponse; + describeLoadBalancerAttributeResponse = + metricsSupport.readData( + () -> { + try { + return client.getAcsResponse(describeLoadBalancerAttributeRequest); + + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + return null; + }); + + loadBalancerAttributes.put( + describeLoadBalancerAttributeResponse.getLoadBalancerName(), + describeLoadBalancerAttributeResponse); + + CacheResult cacheResult = buildCacheResult(loadBalancers, loadBalancerAttributes, client); + + if (cacheResult.getCacheResults().values().isEmpty()) { + providerCache.evictDeletedItems( + ON_DEMAND.ns, + Lists.newArrayList( + Keys.getLoadBalancerKey( + (String) data.get("loadBalancerName"), + account.getName(), + region, + (String) data.get("vpcId")))); + } else { + metricsSupport.onDemandStore( + () -> { + Map map = Maps.newHashMap(); + map.put("cacheTime", new Date()); + try { + map.put( + "cacheResults", objectMapper.writeValueAsString(cacheResult.getCacheResults())); + } catch (JsonProcessingException exception) { + exception.printStackTrace(); + } + + CacheData cacheData = + new DefaultCacheData( + Keys.getLoadBalancerKey( + (String) data.get("loadBalancerName"), + account.getName(), + region, + (String) data.get("vpcId")), + map, + Maps.newHashMap()); + + providerCache.putCacheData(ON_DEMAND.ns, cacheData); + return null; + }); + } + + OnDemandResult result = new OnDemandResult(getAgentType(), cacheResult, null); + + return result; + } + + private CacheResult buildCacheResult( + Collection loadBalancers, + Map loadBalancerAttributes, + IAcsClient client) { + + Map> cacheResults = new HashMap<>(16); + List list = new ArrayList(); + + for (LoadBalancer loadBalancer : loadBalancers) { + String loadBalancerName = loadBalancer.getLoadBalancerName(); + Map map = objectMapper.convertValue(loadBalancer, Map.class); + map.put("account", account.getName()); + + DescribeLoadBalancerAttributeResponse describeLoadBalancerAttributeResponse = + loadBalancerAttributes.get(loadBalancerName); + Map attributeMap = + objectMapper.convertValue(describeLoadBalancerAttributeResponse, Map.class); + + List listenerPortsAndProtocal = new ArrayList<>(); + for (ListenerPortAndProtocal listenerPortAndProtocal : + describeLoadBalancerAttributeResponse.getListenerPortsAndProtocal()) { + Integer listenerPort = listenerPortAndProtocal.getListenerPort(); + String listenerProtocal = listenerPortAndProtocal.getListenerProtocal().toUpperCase(); + Map portAndProtocalMap = + objectMapper.convertValue(listenerPortAndProtocal, Map.class); + + Map listenerMap = new HashMap<>(16); + + switch (listenerProtocal) { + case "HTTPS": + DescribeLoadBalancerHTTPSListenerAttributeRequest httpsListenerAttributeRequest = + new DescribeLoadBalancerHTTPSListenerAttributeRequest(); + httpsListenerAttributeRequest.setListenerPort(listenerPort); + httpsListenerAttributeRequest.setLoadBalancerId(loadBalancer.getLoadBalancerId()); + DescribeLoadBalancerHTTPSListenerAttributeResponse httpsListenerAttributeResponse; + try { + httpsListenerAttributeResponse = client.getAcsResponse(httpsListenerAttributeRequest); + listenerMap = objectMapper.convertValue(httpsListenerAttributeResponse, Map.class); + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + + break; + case "TCP": + DescribeLoadBalancerTCPListenerAttributeRequest tcpListenerAttributeRequest = + new DescribeLoadBalancerTCPListenerAttributeRequest(); + tcpListenerAttributeRequest.setListenerPort(listenerPort); + tcpListenerAttributeRequest.setLoadBalancerId(loadBalancer.getLoadBalancerId()); + DescribeLoadBalancerTCPListenerAttributeResponse tcpListenerAttributeResponse; + try { + tcpListenerAttributeResponse = client.getAcsResponse(tcpListenerAttributeRequest); + listenerMap = objectMapper.convertValue(tcpListenerAttributeResponse, Map.class); + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + + break; + case "UDP": + DescribeLoadBalancerUDPListenerAttributeRequest udpListenerAttributeRequest = + new DescribeLoadBalancerUDPListenerAttributeRequest(); + udpListenerAttributeRequest.setListenerPort(listenerPort); + udpListenerAttributeRequest.setLoadBalancerId(loadBalancer.getLoadBalancerId()); + DescribeLoadBalancerUDPListenerAttributeResponse udpListenerAttributeResponse; + try { + udpListenerAttributeResponse = client.getAcsResponse(udpListenerAttributeRequest); + listenerMap = objectMapper.convertValue(udpListenerAttributeResponse, Map.class); + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + + break; + default: + DescribeLoadBalancerHTTPListenerAttributeRequest httpListenerAttributeRequest = + new DescribeLoadBalancerHTTPListenerAttributeRequest(); + httpListenerAttributeRequest.setListenerPort(listenerPort); + httpListenerAttributeRequest.setLoadBalancerId(loadBalancer.getLoadBalancerId()); + DescribeLoadBalancerHTTPListenerAttributeResponse httpListenerAttributeResponse; + try { + httpListenerAttributeResponse = client.getAcsResponse(httpListenerAttributeRequest); + listenerMap = objectMapper.convertValue(httpListenerAttributeResponse, Map.class); + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + break; + } + listenerMap.putAll(portAndProtocalMap); + listenerPortsAndProtocal.add(listenerMap); + } + + attributeMap.put("listenerPortsAndProtocal", listenerPortsAndProtocal); + map.put("attributes", attributeMap); + DescribeVServerGroupsRequest describeVServerGroupsRequest = + new DescribeVServerGroupsRequest(); + describeVServerGroupsRequest.setLoadBalancerId(loadBalancer.getLoadBalancerId()); + try { + DescribeVServerGroupsResponse describeVServerGroupsResponse = + client.getAcsResponse(describeVServerGroupsRequest); + List vServerGroups = + describeVServerGroupsResponse.getVServerGroups(); + map.put("vServerGroups", vServerGroups); + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + list.add( + new DefaultCacheData( + Keys.getLoadBalancerKey( + loadBalancerName, account.getName(), region, loadBalancer.getVpcId()), + map, + Maps.newHashMap())); + } + cacheResults.put(LOAD_BALANCERS.ns, list); + + CacheResult cacheResult = new DefaultCacheResult(cacheResults); + + return cacheResult; + } + + @Override + public String getAgentType() { + return account.getName() + "/" + region + "/" + this.getClass().getSimpleName(); + } + + @Override + public String getProviderName() { + return AliProvider.PROVIDER_NAME; + } + + @Override + public String getOnDemandAgentType() { + return this.getAgentType() + "-OnDemand"; + } + + @Override + public OnDemandMetricsSupport getMetricsSupport() { + return null; + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return false; + } + + @Override + public Collection pendingOnDemandRequests(ProviderCache providerCache) { + return null; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerInstanceStateCachingAgent.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerInstanceStateCachingAgent.java new file mode 100644 index 00000000000..65de8a5a171 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerInstanceStateCachingAgent.java @@ -0,0 +1,148 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS; + +import com.aliyuncs.IAcsClient; +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.exceptions.ServerException; +import com.aliyuncs.slb.model.v20140515.DescribeHealthStatusRequest; +import com.aliyuncs.slb.model.v20140515.DescribeHealthStatusResponse; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.alicloud.provider.AliProvider; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import com.netflix.spinnaker.clouddriver.core.provider.agent.HealthProvidingCachingAgent; +import java.util.*; +import org.springframework.context.ApplicationContext; + +public class AliCloudLoadBalancerInstanceStateCachingAgent + implements CachingAgent, HealthProvidingCachingAgent { + AliCloudCredentials account; + String region; + ObjectMapper objectMapper; + IAcsClient client; + Cache cacheView; + ApplicationContext ctx; + static final String healthId = "alicloud-load-balancer-instance-health"; + + public AliCloudLoadBalancerInstanceStateCachingAgent( + ApplicationContext ctx, + AliCloudCredentials account, + String region, + ObjectMapper objectMapper, + IAcsClient client) { + this.ctx = ctx; + this.account = account; + this.region = region; + this.objectMapper = objectMapper; + this.client = client; + } + + static final Collection types = + Collections.unmodifiableCollection( + new ArrayList() { + { + add(AUTHORITATIVE.forType(HEALTH.ns)); + } + }); + + @Override + public CacheResult loadData(ProviderCache providerCache) { + Map> resultMap = new HashMap<>(16); + List instanceDatas = new ArrayList<>(); + + Collection allLoadBalancerKeys = getCacheView().getIdentifiers(LOAD_BALANCERS.ns); + Collection loadBalancerData = + getCacheView().getAll(LOAD_BALANCERS.ns, allLoadBalancerKeys, null); + DescribeHealthStatusRequest describeHealthStatusRequest = new DescribeHealthStatusRequest(); + DescribeHealthStatusResponse describeHealthStatusResponse; + for (CacheData cacheData : loadBalancerData) { + Map loadBalancerAttributes = + objectMapper.convertValue(cacheData.getAttributes(), Map.class); + String loadBalancerId = String.valueOf(loadBalancerAttributes.get("loadBalancerId")); + describeHealthStatusRequest.setLoadBalancerId(loadBalancerId); + String regionId = String.valueOf(loadBalancerAttributes.get("regionId")); + describeHealthStatusRequest.setSysRegionId(regionId); + try { + describeHealthStatusResponse = client.getAcsResponse(describeHealthStatusRequest); + for (DescribeHealthStatusResponse.BackendServer backendServer : + describeHealthStatusResponse.getBackendServers()) { + Map attributes = objectMapper.convertValue(backendServer, Map.class); + attributes.put("loadBalancerId", loadBalancerId); + CacheData data = + new DefaultCacheData( + Keys.getInstanceHealthKey( + loadBalancerId, + backendServer.getServerId(), + backendServer.getListenerPort().toString(), + account.getName(), + regionId, + healthId), + attributes, + new HashMap<>(16)); + instanceDatas.add(data); + } + + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + } + resultMap.put(HEALTH.ns, instanceDatas); + + return new DefaultCacheResult(resultMap); + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public String getAgentType() { + return account.getName() + "/" + region + "/" + this.getClass().getSimpleName(); + } + + @Override + public String getProviderName() { + return AliProvider.PROVIDER_NAME; + } + + @Override + public String getHealthId() { + return healthId; + } + + private Cache getCacheView() { + if (this.cacheView == null) { + this.cacheView = ctx.getBean(Cache.class); + } + return this.cacheView; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudSecurityGroupCachingAgent.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudSecurityGroupCachingAgent.java new file mode 100644 index 00000000000..701c9782236 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudSecurityGroupCachingAgent.java @@ -0,0 +1,185 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; + +import com.aliyuncs.IAcsClient; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupAttributeRequest; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupAttributeResponse; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsRequest; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse.SecurityGroup; +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.exceptions.ServerException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.*; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.alicloud.provider.AliProvider; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import java.util.*; + +public class AliCloudSecurityGroupCachingAgent + implements CachingAgent, OnDemandAgent, AccountAware { + + AliCloudCredentials account; + String region; + ObjectMapper objectMapper; + IAcsClient client; + + public AliCloudSecurityGroupCachingAgent( + AliCloudCredentials account, String region, ObjectMapper objectMapper, IAcsClient client) { + this.account = account; + this.region = region; + this.objectMapper = objectMapper; + this.client = client; + } + + static final Collection types = + Collections.unmodifiableCollection( + new ArrayList() { + { + add(AUTHORITATIVE.forType(Keys.Namespace.SECURITY_GROUPS.ns)); + } + }); + + @Override + public CacheResult loadData(ProviderCache providerCache) { + Map> resultMap = new HashMap<>(16); + List securityGroupDatas = new ArrayList<>(); + + DescribeSecurityGroupsRequest securityGroupsRequest = new DescribeSecurityGroupsRequest(); + securityGroupsRequest.setPageSize(50); + DescribeSecurityGroupsResponse securityGroupsResponse; + + try { + securityGroupsResponse = client.getAcsResponse(securityGroupsRequest); + for (SecurityGroup securityGroup : securityGroupsResponse.getSecurityGroups()) { + securityGroupDatas.add(buildCatchData(securityGroup)); + } + + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + + resultMap.put(Keys.Namespace.SECURITY_GROUPS.ns, securityGroupDatas); + + return new DefaultCacheResult(resultMap); + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public String getAgentType() { + return account.getName() + "/" + region + "/" + this.getClass().getSimpleName(); + } + + @Override + public String getProviderName() { + return AliProvider.PROVIDER_NAME; + } + + @Override + public String getOnDemandAgentType() { + return getAgentType(); + } + + @Override + public OnDemandMetricsSupport getMetricsSupport() { + return null; + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return OnDemandAgent.OnDemandType.SecurityGroup.equals(type) + && AliCloudProvider.ID.equals(cloudProvider); + } + + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + if (data.get("securityGroupName") == null) { + return null; + } + String securityGroupName = (String) data.get("securityGroupName"); + + DescribeSecurityGroupsRequest securityGroupsRequest = new DescribeSecurityGroupsRequest(); + securityGroupsRequest.setPageSize(50); + securityGroupsRequest.setSecurityGroupName(securityGroupName); + DescribeSecurityGroupsResponse securityGroupsResponse; + + try { + securityGroupsResponse = client.getAcsResponse(securityGroupsRequest); + if (securityGroupsResponse.getSecurityGroups().size() > 0) { + SecurityGroup securityGroup = securityGroupsResponse.getSecurityGroups().get(0); + CacheData cacheData = buildCatchData(securityGroup); + providerCache.putCacheData(Keys.Namespace.SECURITY_GROUPS.ns, cacheData); + } + } catch (ServerException e) { + e.printStackTrace(); + } catch (ClientException e) { + e.printStackTrace(); + } + return null; + } + + CacheData buildCatchData(SecurityGroup securityGroup) throws ClientException, ServerException { + Map attributes = objectMapper.convertValue(securityGroup, Map.class); + attributes.put("provider", AliCloudProvider.ID); + attributes.put("account", account.getName()); + attributes.put("regionId", region); + + DescribeSecurityGroupAttributeRequest securityGroupAttributeRequest = + new DescribeSecurityGroupAttributeRequest(); + securityGroupAttributeRequest.setSecurityGroupId(securityGroup.getSecurityGroupId()); + securityGroupAttributeRequest.setDirection("ingress"); + DescribeSecurityGroupAttributeResponse securityGroupAttribute = + client.getAcsResponse(securityGroupAttributeRequest); + attributes.put("permissions", securityGroupAttribute.getPermissions()); + return new DefaultCacheData( + Keys.getSecurityGroupKey( + securityGroup.getSecurityGroupName(), + securityGroup.getSecurityGroupId(), + region, + account.getName(), + securityGroup.getVpcId()), + attributes, + new HashMap<>(16)); + } + + @Override + public Collection pendingOnDemandRequests(ProviderCache providerCache) { + List resultList = new ArrayList<>(); + Map map = new HashMap<>(); + resultList.add(map); + return resultList; + } + + @Override + public String getAccountName() { + return account.getName(); + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudLoadBalancerProvider.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudLoadBalancerProvider.java new file mode 100644 index 00000000000..75d57597f9d --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudLoadBalancerProvider.java @@ -0,0 +1,274 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.view; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.CacheFilter; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.alicloud.common.HealthHelper; +import com.netflix.spinnaker.clouddriver.alicloud.model.AliCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import java.util.*; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class AliCloudLoadBalancerProvider implements LoadBalancerProvider { + + private final ObjectMapper objectMapper; + + private final Cache cacheView; + + private final AliCloudProvider provider; + + @Autowired + public AliCloudLoadBalancerProvider( + ObjectMapper objectMapper, Cache cacheView, AliCloudProvider provider) { + this.objectMapper = objectMapper; + this.cacheView = cacheView; + this.provider = provider; + } + + private static final String SURVIVE_STATUS = "Active"; + + @Override + public Set getApplicationLoadBalancers(String applicationName) { + Set loadBalancerKeys = new HashSet<>(); + Set loadBalances = new HashSet<>(); + + Collection applicationServerGroups = + getServerGroupCacheDataByApplication(applicationName); + Collection allHealthyKeys = cacheView.getIdentifiers(HEALTH.ns); + Collection allLoadBalancerKeys = cacheView.getIdentifiers(LOAD_BALANCERS.ns); + Collection loadBalancerKeyMatches = + allLoadBalancerKeys.stream() + .filter(tab -> applicationMatcher(tab, applicationName)) + .collect(Collectors.toList()); + loadBalancerKeys.addAll(loadBalancerKeyMatches); + Collection loadBalancerData = + cacheView.getAll(LOAD_BALANCERS.ns, loadBalancerKeys, null); + for (CacheData cacheData : loadBalancerData) { + Map attributes = + objectMapper.convertValue(cacheData.getAttributes(), Map.class); + String id = cacheData.getId(); + AliCloudLoadBalancer loadBalancer = + new AliCloudLoadBalancer( + String.valueOf(attributes.get("account")), + String.valueOf(attributes.get("regionIdAlias")), + String.valueOf(attributes.get("loadBalancerName")), + String.valueOf(attributes.get("vpcId")), + String.valueOf(attributes.get("loadBalancerId"))); + for (CacheData applicationServerGroup : applicationServerGroups) { + Collection loadBalancers = + applicationServerGroup.getRelationships().get("loadBalancers"); + for (String balancer : loadBalancers) { + if (id.startsWith(balancer)) { + addServerGroupToLoadBalancer(allHealthyKeys, loadBalancer, applicationServerGroup); + break; + } + } + } + loadBalances.add(loadBalancer); + } + return loadBalances; + } + + @Override + public List byAccountAndRegionAndName(String account, String region, String name) { + List results = new ArrayList<>(); + String searchKey = Keys.getLoadBalancerKey(name, account, region, null) + "*"; + Collection allLoadBalancerKeys = + cacheView.filterIdentifiers(LOAD_BALANCERS.ns, searchKey); + Collection loadBalancers = + cacheView.getAll(LOAD_BALANCERS.ns, allLoadBalancerKeys, null); + Collection allHealthyKeys = cacheView.getIdentifiers(HEALTH.ns); + for (CacheData loadBalancer : loadBalancers) { + ResultDetails resultDetails = new ResultDetails(); + Set serverGroups = new HashSet<>(); + String id = loadBalancer.getId(); + String applicationName = getApplicationByName(name); + Collection applicationServerGroups = + getServerGroupCacheDataByApplication(applicationName); + for (CacheData applicationServerGroup : applicationServerGroups) { + Collection relationships = + applicationServerGroup.getRelationships().get("loadBalancers"); + for (String loadBalancerId : relationships) { + if (id.startsWith(loadBalancerId)) { + LoadBalancerServerGroup loadBalancerServerGroup = + createLoadBalancerServerGroup( + allHealthyKeys, loadBalancerId, applicationServerGroup); + serverGroups.add(loadBalancerServerGroup); + break; + } + } + } + Map attributes = loadBalancer.getAttributes(); + attributes.put("serverGroups", serverGroups); + resultDetails.setResults(attributes); + results.add(resultDetails); + } + return results; + } + + @Override + public String getCloudProvider() { + return AliCloudProvider.ID; + } + + @Override + public List list() { + return null; + } + + @Override + public Item get(String name) { + return null; + } + + private static boolean applicationMatcher(String key, String applicationName) { + String regex1 = AliCloudProvider.ID + ":.*:" + applicationName + "-.*"; + String regex2 = AliCloudProvider.ID + ":.*:" + applicationName; + String regex3 = AliCloudProvider.ID + ":.*:" + applicationName + ":.*"; + return Pattern.matches(regex1, key) + || Pattern.matches(regex2, key) + || Pattern.matches(regex3, key); + } + + Collection resolveRelationshipData( + CacheData source, String relationship, CacheFilter cacheFilter) { + Map> relationships = source.getRelationships(); + Collection keys = relationships.get(relationship); + if (!keys.isEmpty()) { + return cacheView.getAll(relationship, keys, null); + } else { + return new ArrayList(); + } + } + + private LoadBalancerServerGroup createLoadBalancerServerGroup( + Collection allHealthyKeys, String loadBalancerId, CacheData applicationServerGroup) { + LoadBalancerServerGroup loadBalancerServerGroup = new LoadBalancerServerGroup(); + Map attributes = applicationServerGroup.getAttributes(); + loadBalancerServerGroup.setName(String.valueOf(attributes.get("name"))); + loadBalancerServerGroup.setCloudProvider(AliCloudProvider.ID); + loadBalancerServerGroup.setRegion(String.valueOf(attributes.get("region"))); + loadBalancerServerGroup.setAccount(String.valueOf(attributes.get("account"))); + Map scalingGroup = (Map) attributes.get("scalingGroup"); + String lifecycleState = (String) scalingGroup.get("lifecycleState"); + if (SURVIVE_STATUS.equals(lifecycleState)) { + loadBalancerServerGroup.setIsDisabled(false); + } else { + loadBalancerServerGroup.setIsDisabled(true); + } + Set detachedInstances = new HashSet<>(); + Set loadBalancerInstances = new HashSet<>(); + List instances = (List) attributes.get("instances"); + for (Map instance : instances) { + Object id = instance.get("instanceId"); + if (id != null) { + String instanceId = String.valueOf(id); + String healthStatus = (String) instance.get("healthStatus"); + boolean flag = "Healthy".equals(healthStatus); + Map health = new HashMap<>(); + health.put("type", provider.getDisplayName()); + health.put("healthClass", "platform"); + List loadBalancerIds = + new ArrayList() { + { + add(loadBalancerId); + } + }; + HealthState healthState = + HealthHelper.judgeInstanceHealthyState( + allHealthyKeys, loadBalancerIds, instanceId, cacheView); + health.put( + "state", + !"Active".equals(lifecycleState) + ? "unhealthy" + : !flag + ? "unhealthy" + : healthState.equals(HealthState.Up) + ? "healthy" + : healthState.equals(HealthState.Unknown) ? "unknown" : "unhealthy"); + String zone = (String) instance.get("creationType"); + LoadBalancerInstance loadBalancerInstance = new LoadBalancerInstance(); + loadBalancerInstance.setId(instanceId); + loadBalancerInstance.setName(instanceId); + loadBalancerInstance.setZone(zone); + loadBalancerInstance.setHealth(health); + loadBalancerInstances.add(loadBalancerInstance); + detachedInstances.add(instanceId); + } + } + // loadBalancerServerGroup.setDetachedInstances(detachedInstances); + loadBalancerServerGroup.setInstances(loadBalancerInstances); + return loadBalancerServerGroup; + } + + private void addServerGroupToLoadBalancer( + Collection allHealthyKeys, + AliCloudLoadBalancer loadBalancer, + CacheData applicationServerGroup) { + Set serverGroups = + loadBalancer.getServerGroups() != null ? loadBalancer.getServerGroups() : new HashSet<>(); + LoadBalancerServerGroup serverGroup = + createLoadBalancerServerGroup( + allHealthyKeys, loadBalancer.getLoadBalancerId(), applicationServerGroup); + serverGroups.add(serverGroup); + loadBalancer.setServerGroups(serverGroups); + } + + class ResultDetails implements Details { + Map results; + + public Map getResults() { + return results; + } + + public void setResults(Map results) { + this.results = results; + } + } + + private String getApplicationByName(String name) { + AliCloudLoadBalancer loadBalancer = new AliCloudLoadBalancer(null, null, name, null, null); + return loadBalancer.getMoniker().getApp(); + } + + private Collection getServerGroupCacheDataByApplication(String applicationName) { + CacheData application = cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(applicationName)); + Collection applicationServerGroups = new ArrayList<>(); + if (application != null) { + applicationServerGroups = + resolveRelationshipData( + application, + SERVER_GROUPS.ns, + RelationshipCacheFilter.include(INSTANCES.ns, LOAD_BALANCERS.ns)); + } + return applicationServerGroups; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudSecurityGroupProvider.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudSecurityGroupProvider.java new file mode 100644 index 00000000000..401969812bb --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudSecurityGroupProvider.java @@ -0,0 +1,165 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys.Namespace; +import com.netflix.spinnaker.clouddriver.alicloud.model.AliCloudSecurityGroup; +import com.netflix.spinnaker.clouddriver.alicloud.model.AliCloudSecurityGroupRule; +import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider; +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule.PortRange; +import java.util.*; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class AliCloudSecurityGroupProvider implements SecurityGroupProvider { + + private final ObjectMapper objectMapper; + + private final Cache cacheView; + + @Autowired + public AliCloudSecurityGroupProvider(ObjectMapper objectMapper, Cache cacheView) { + this.objectMapper = objectMapper; + this.cacheView = cacheView; + } + + @Override + public Collection getAllByAccount(boolean includeRules, String account) { + return buildSecurityGroup(Keys.getSecurityGroupKey("*", "*", "*", account, "*")); + } + + @Override + public AliCloudSecurityGroup get(String account, String region, String name, String vpcId) { + String key = Keys.getSecurityGroupKey(name, "*", region, account, vpcId); + return getByKey(key); + } + + @Override + public AliCloudSecurityGroup getById(String account, String region, String id, String vpcId) { + String key = Keys.getSecurityGroupKey("*", id, region, account, vpcId); + return getByKey(key); + } + + private AliCloudSecurityGroup getByKey(String key) { + Collection allSecurityGroupKeys = + cacheView.filterIdentifiers(Namespace.SECURITY_GROUPS.ns, key); + Collection allData = + cacheView.getAll( + Namespace.SECURITY_GROUPS.ns, allSecurityGroupKeys, RelationshipCacheFilter.none()); + if (allData.size() == 0) { + return null; + } + return buildSecurityGroup(allData.iterator().next()); + } + + @Override + public Collection getAll(boolean includeRules) { + Set results = new HashSet<>(); + String globalKey = Keys.getSecurityGroupKey("*", "*", "*", "*", "*"); + Collection keys = cacheView.filterIdentifiers(Namespace.SECURITY_GROUPS.ns, globalKey); + for (String key : keys) { + if (!includeRules) { + Map parse = Keys.parse(key); + results.add( + new AliCloudSecurityGroup( + parse.get("id"), + parse.get("name"), + null, + parse.get("account"), + parse.get("region"), + parse.get("vpcId"), + null)); + } else { + CacheData cacheData = cacheView.get(Namespace.SECURITY_GROUPS.ns, key); + results.add(buildSecurityGroup(cacheData)); + } + } + return results; + } + + @Override + public Collection getAllByRegion(boolean includeRules, String region) { + return buildSecurityGroup(Keys.getSecurityGroupKey("*", "*", region, "*", "*")); + } + + @Override + public Collection getAllByAccountAndName( + boolean includeRules, String account, String name) { + return buildSecurityGroup(Keys.getSecurityGroupKey(name, "*", "*", account, "*")); + } + + @Override + public Collection getAllByAccountAndRegion( + boolean includeRule, String account, String region) { + return buildSecurityGroup(Keys.getSecurityGroupKey("*", "*", region, account, "*")); + } + + private Collection buildSecurityGroup(String globalKey) { + Set results = new HashSet<>(); + Collection allSecurityGroupKeys = + cacheView.filterIdentifiers(Namespace.SECURITY_GROUPS.ns, globalKey); + Collection allData = + cacheView.getAll( + Namespace.SECURITY_GROUPS.ns, allSecurityGroupKeys, RelationshipCacheFilter.none()); + for (CacheData data : allData) { + results.add(buildSecurityGroup(data)); + } + return results; + } + + private AliCloudSecurityGroup buildSecurityGroup(CacheData data) { + Map attributes = data.getAttributes(); + Set rules = new HashSet<>(); + List> permissions = + objectMapper.convertValue(attributes.get("permissions"), List.class); + for (Map permission : permissions) { + String protocol = (String) permission.get("ipProtocol"); + String range = (String) permission.get("portRange"); + String[] split = range.split("/"); + SortedSet portRanges = new TreeSet<>(); + Rule.PortRange portRange = new PortRange(); + portRange.setStartPort(Integer.valueOf(split[0])); + portRange.setEndPort(Integer.valueOf(split[1])); + portRanges.add(portRange); + AliCloudSecurityGroupRule rule = + new AliCloudSecurityGroupRule(protocol, portRanges, permission); + rules.add(rule); + } + AliCloudSecurityGroup securityGroup = + new AliCloudSecurityGroup( + String.valueOf(attributes.get("securityGroupId")), + String.valueOf(attributes.get("securityGroupName")), + String.valueOf(attributes.get("description")), + String.valueOf(attributes.get("account")), + String.valueOf(attributes.get("regionId")), + String.valueOf(attributes.get("vpcId")), + rules); + return securityGroup; + } + + @Override + public String getCloudProvider() { + return AliCloudProvider.ID; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AccountCredentials.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AccountCredentials.java new file mode 100644 index 00000000000..34e6dd8b8a9 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AccountCredentials.java @@ -0,0 +1,18 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.security; + +public class AccountCredentials {} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudClientProvider.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudClientProvider.java new file mode 100644 index 00000000000..7e44a17ea83 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudClientProvider.java @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.security; + +import org.springframework.stereotype.Component; + +@Component +public class AliCloudClientProvider {} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentials.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentials.java new file mode 100644 index 00000000000..437100d32b1 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentials.java @@ -0,0 +1,94 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.security; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import java.util.List; + +public class AliCloudCredentials extends AbstractAccountCredentials { + + private static final String CLOUD_PROVIDER = "alicloud"; + + private String name; + + private String accessKeyId; + + private String accessSecretKey; + + private List regions; + + private List requiredGroupMembership; + + public void setName(String name) { + this.name = name; + } + + public String getAccessKeyId() { + return accessKeyId; + } + + public void setAccessKeyId(String accessKeyId) { + this.accessKeyId = accessKeyId; + } + + public String getAccessSecretKey() { + return accessSecretKey; + } + + public void setAccessSecretKey(String accessSecretKey) { + this.accessSecretKey = accessSecretKey; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getEnvironment() { + return null; + } + + @Override + public String getAccountType() { + return null; + } + + @Override + @JsonIgnore + public AccountCredentials getCredentials() { + return null; + } + + @Override + public String getCloudProvider() { + return CLOUD_PROVIDER; + } + + @Override + public List getRequiredGroupMembership() { + return requiredGroupMembership; + } + + public List getRegions() { + return regions; + } + + public void setRegions(List regions) { + this.regions = regions; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsConfig.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsConfig.java new file mode 100644 index 00000000000..28e95885d11 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsConfig.java @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.security; + +import java.util.List; +import lombok.Data; + +@Data +public class AliCloudCredentialsConfig { + + List accounts; + + @Data + public static class Account { + private String name; + private String aliAccount; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsInitializer.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsInitializer.java new file mode 100644 index 00000000000..38c953ac465 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsInitializer.java @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.security; + +import com.netflix.spinnaker.clouddriver.alicloud.security.config.AliCloudAccountConfig; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import java.util.ArrayList; +import java.util.List; +import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Scope; + +@Configuration +public class AliCloudCredentialsInitializer implements CredentialsInitializerSynchronizable { + + @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + @Bean + @ConfigurationProperties("alicloud") + AliCloudAccountConfig aliCloudAccountConfig() { + return new AliCloudAccountConfig(); + } + + @Bean + List synchronizeAliCloudAccounts( + AliCloudAccountConfig aliCloudAccountConfig, + AccountCredentialsRepository accountCredentialsRepository, + ApplicationContext applicationContext) { + List aliCloudCredentialsList = new ArrayList<>(); + aliCloudAccountConfig.getAccounts().stream() + .forEach( + account -> { + AliCloudCredentials aliCloudCredentials = new AliCloudCredentials(); + aliCloudCredentials.setName(account.getName()); + aliCloudCredentials.setAccessSecretKey(account.getAccessSecretKey()); + aliCloudCredentials.setAccessKeyId(account.getAccessKeyId()); + aliCloudCredentials.setRegions(account.getRegions()); + accountCredentialsRepository.save(account.getName(), aliCloudCredentials); + aliCloudCredentialsList.add(aliCloudCredentials); + }); + return aliCloudCredentialsList; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsProvider.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsProvider.java new file mode 100644 index 00000000000..3f564e7fbf4 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudCredentialsProvider.java @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.security; + +import org.springframework.stereotype.Component; + +@Component +public class AliCloudCredentialsProvider {} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudProxy.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudProxy.java new file mode 100644 index 00000000000..5631766d9a6 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/AliCloudProxy.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.security; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.stereotype.Component; + +@ConfigurationProperties(prefix = "alicloud.accounts") +@Component +public class AliCloudProxy { + + private String ak; + + private String sk; + + public String getAk() { + return ak; + } + + public void setAk(String ak) { + this.ak = ak; + } + + public String getSk() { + return sk; + } + + public void setSk(String sk) { + this.sk = sk; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/config/AliCloudAccountConfig.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/config/AliCloudAccountConfig.java new file mode 100644 index 00000000000..08c77b692a9 --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/clouddriver/alicloud/security/config/AliCloudAccountConfig.java @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.security.config; + +import java.util.List; +import lombok.Data; + +/** + * A mutable credentials configurations structure suitable for transformation into concrete + * credentials implementations. + */ +@Data +public class AliCloudAccountConfig { + + private List accounts; + + @Data + public static class Account { + + private String name; + + private String accessKeyId; + + private String accessSecretKey; + + private List regions; + } +} diff --git a/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/config/AliCloudConfiguration.java b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/config/AliCloudConfiguration.java new file mode 100644 index 00000000000..c6fd46da06b --- /dev/null +++ b/clouddriver-alicloud/src/main/java/com/netflix/spinnaker/config/AliCloudConfiguration.java @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("alicloud.enabled") +@ComponentScan("com.netflix.spinnaker.clouddriver.alicloud") +public class AliCloudConfiguration {} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/cache/KeysTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/cache/KeysTest.java new file mode 100644 index 00000000000..0ff7dab080d --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/cache/KeysTest.java @@ -0,0 +1,113 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.cache; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +public class KeysTest { + + static final String ACCOUNT = "test-account"; + static final String REGION = "cn-test"; + + @Test + public void testGetLoadBalancerKey() { + String key = "alicloud:loadBalancers:test-account:cn-test:test-loadBalancer"; + String loadBalancerKey = Keys.getLoadBalancerKey("test-loadBalancer", ACCOUNT, REGION, null); + assertTrue(key.equals(loadBalancerKey)); + } + + @Test + public void testGetSubnetKey() { + String key = "alicloud:subnets:test-account:cn-test:test-vswitchId"; + String subnetKey = Keys.getSubnetKey("test-vswitchId", REGION, ACCOUNT); + assertTrue(key.equals(subnetKey)); + } + + @Test + public void testGetImageKey() { + String key = "alicloud:images:test-account:cn-test:test-imageId"; + String imageKey = Keys.getImageKey("test-imageId", ACCOUNT, REGION); + assertTrue(key.equals(imageKey)); + } + + @Test + public void testGetNamedImageKey() { + String key = "alicloud:namedImages:test-account:test-imageName"; + String namedImageKey = Keys.getNamedImageKey(ACCOUNT, "test-imageName"); + assertTrue(key.equals(namedImageKey)); + } + + @Test + public void testGetInstanceTypeKey() { + String key = "alicloud:instanceTypes:test-account:cn-test:test-zoneId"; + String instanceTypeKey = Keys.getInstanceTypeKey(ACCOUNT, REGION, "test-zoneId"); + assertTrue(key.equals(instanceTypeKey)); + } + + @Test + public void testGetSecurityGroupKey() { + String key = + "alicloud:securityGroups:test-account:cn-test:test-SecurityGroupName:test-SecurityGroupId"; + String securityGroupKey = + Keys.getSecurityGroupKey( + "test-SecurityGroupName", "test-SecurityGroupId", REGION, ACCOUNT, null); + assertTrue(key.equals(securityGroupKey)); + } + + @Test + public void testGetKeyPairKey() { + String key = "alicloud:aliCloudKeyPairs:test-KeyPair:test-account:cn-test"; + String keyPairKey = Keys.getKeyPairKey("test-KeyPair", REGION, ACCOUNT); + assertTrue(key.equals(keyPairKey)); + } + + @Test + public void testGetServerGroupKey() { + String key = "alicloud:serverGroups:Spin63-test-ali:test-account:cn-test:Spin63-test-ali"; + String serverGroupKey = Keys.getServerGroupKey("Spin63-test-ali", ACCOUNT, REGION); + assertTrue(key.equals(serverGroupKey)); + } + + @Test + public void testGetApplicationKey() { + String key = "alicloud:applications:test-application"; + String applicationKey = Keys.getApplicationKey("test-Application"); + assertTrue(key.equals(applicationKey)); + } + + @Test + public void testGetClusterKey() { + String key = "alicloud:clusters:test-application:test-account:test-Cluster"; + String clusterKey = Keys.getClusterKey("test-Cluster", "test-Application", ACCOUNT); + assertTrue(key.equals(clusterKey)); + } + + @Test + public void testGetLaunchConfigKey() { + String key = "alicloud:launchConfigs:test-account:cn-test:test-LaunchConfigName"; + String launchConfigKey = Keys.getLaunchConfigKey("test-LaunchConfigName", ACCOUNT, REGION); + assertTrue(key.equals(launchConfigKey)); + } + + @Test + public void testGetInstanceKey() { + String key = "alicloud:instances:test-account:cn-test:test-instanceId"; + String instanceKey = Keys.getInstanceKey("test-instanceId", ACCOUNT, REGION); + assertTrue(key.equals(instanceKey)); + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudImageControllerTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudImageControllerTest.java new file mode 100644 index 00000000000..cdb264d9cfa --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudImageControllerTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.controllers; + +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.alicloud.controllers.AliCloudImageController.Image; +import com.netflix.spinnaker.clouddriver.alicloud.controllers.AliCloudImageController.LookupOptions; +import java.util.*; +import javax.servlet.http.HttpServletRequest; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class AliCloudImageControllerTest { + + private final String ACCOUNT = "test-account"; + private final String REGION = "cn-test"; + + final Cache cacheView = mock(Cache.class); + final LookupOptions lookupOptions = mock(LookupOptions.class); + final HttpServletRequest request = mock(HttpServletRequest.class); + + @Before + public void testBefore() { + when(cacheView.filterIdentifiers(anyString(), anyString())).thenAnswer(new FilterAnswer()); + when(cacheView.getAll(anyString(), any(), any())).thenAnswer(new CacheDataAnswer()); + when(lookupOptions.getQ()).thenReturn("test"); + when(request.getParameterNames()).thenAnswer(new RequestAnswer()); + } + + @Test + public void testList() { + AliCloudImageController controller = new AliCloudImageController(cacheView); + List list = controller.list(lookupOptions, request); + assertTrue(list.size() == 2); + } + + private class FilterAnswer implements Answer> { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + List list = new ArrayList<>(); + list.add("alicloud:images:ali-account:cn-hangzhou:win_xxx_xxx_xxx.vhd"); + return list; + } + } + + private class CacheDataAnswer implements Answer> { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + List cacheDatas = new ArrayList<>(); + Map attributes = new HashMap<>(); + attributes.put("account", ACCOUNT); + attributes.put("regionId", REGION); + attributes.put("imageName", "win_xxx_xxx_xxx.vhd"); + CacheData cacheData1 = + new DefaultCacheData( + "alicloud:images:ali-account:cn-hangzhou:win_xxx_xxx_xxx.vhd", attributes, null); + cacheDatas.add(cacheData1); + return cacheDatas; + } + } + + private class RequestAnswer implements Answer> { + @Override + public Enumeration answer(InvocationOnMock invocation) throws Throwable { + List list = new ArrayList<>(); + Enumeration enumeration = Collections.enumeration(list); + return enumeration; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudScalingActivitiesControllerTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudScalingActivitiesControllerTest.java new file mode 100644 index 00000000000..010e94736b0 --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/controllers/AliCloudScalingActivitiesControllerTest.java @@ -0,0 +1,89 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.controllers; + +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.aliyuncs.ess.model.v20140828.DescribeScalingActivitiesResponse; +import com.aliyuncs.ess.model.v20140828.DescribeScalingActivitiesResponse.ScalingActivity; +import com.aliyuncs.ess.model.v20140828.DescribeScalingGroupsResponse; +import com.aliyuncs.ess.model.v20140828.DescribeScalingGroupsResponse.ScalingGroup; +import com.aliyuncs.exceptions.ClientException; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.CommonAtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.ArrayList; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.springframework.http.ResponseEntity; + +public class AliCloudScalingActivitiesControllerTest extends CommonAtomicOperation { + + private final String SERVERGROUPNAME = "test-serverGroupName"; + + static AccountCredentialsProvider accountCredentialsProvider = + mock(AccountCredentialsProvider.class); + + @Before + public void testBefore() throws ClientException { + when(accountCredentialsProvider.getCredentials(anyString())).thenReturn(credentials); + when(client.getAcsResponse(any())) + .thenAnswer(new DescribeScalingGroupsAnswer()) + .thenAnswer(new DescribeScalingActivitiesAnswer()); + } + + @Test + public void testGetScalingActivities() { + AliCloudScalingActivitiesController controller = + new AliCloudScalingActivitiesController(accountCredentialsProvider, clientFactory); + ResponseEntity scalingActivities = + controller.getScalingActivities(ACCOUNT, SERVERGROUPNAME, REGION); + assertTrue(scalingActivities != null); + } + + private class DescribeScalingGroupsAnswer implements Answer { + @Override + public DescribeScalingGroupsResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeScalingGroupsResponse response = new DescribeScalingGroupsResponse(); + List scalingGroups = new ArrayList<>(); + ScalingGroup scalingGroup = new ScalingGroup(); + scalingGroup.setScalingGroupId("test-ID"); + scalingGroups.add(scalingGroup); + response.setScalingGroups(scalingGroups); + return response; + } + } + + private class DescribeScalingActivitiesAnswer + implements Answer { + @Override + public DescribeScalingActivitiesResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeScalingActivitiesResponse response = new DescribeScalingActivitiesResponse(); + List scalingActivities = new ArrayList<>(); + ScalingActivity scalingActivity = new ScalingActivity(); + scalingActivity.setStatusCode("test-statu"); + scalingActivities.add(scalingActivity); + response.setScalingActivities(scalingActivities); + return response; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/CommonConverter.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/CommonConverter.java new file mode 100644 index 00000000000..6a4171a2b53 --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/CommonConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.aliyuncs.IAcsClient; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; + +public class CommonConverter { + + public static final String ACCOUNT = "test-account"; + public static final String REGION = "cn-test"; + + static AliCloudCredentials credentials = mock(AliCloudCredentials.class); + static IAcsClient client = mock(IAcsClient.class); + static ClientFactory clientFactory = mock(ClientFactory.class); + static AccountCredentialsProvider accountCredentialsProvider = + mock(AccountCredentialsProvider.class); + static List clusterProviders = mock(List.class); + + static { + when(credentials.getName()).thenReturn(ACCOUNT); + when(credentials.getAccessKeyId()).thenReturn("test-ak"); + when(credentials.getAccessSecretKey()).thenReturn("test-sk"); + when(clientFactory.createClient(anyString(), anyString(), anyString())).thenReturn(client); + when(accountCredentialsProvider.getCredentials(anyString())).thenReturn(credentials); + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudLoadBalancerAtomicOperationConverterTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudLoadBalancerAtomicOperationConverterTest.java new file mode 100644 index 00000000000..e1fa96468fa --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudLoadBalancerAtomicOperationConverterTest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.DeleteAliCloudLoadBalancerClassicAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.HashMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; + +public class DeleteAliCloudLoadBalancerAtomicOperationConverterTest extends CommonConverter { + + DeleteAliCloudLoadBalancerAtomicOperationConverter converter = + new DeleteAliCloudLoadBalancerAtomicOperationConverter(clientFactory); + + @Before + public void testBefore() { + converter.setObjectMapper(new ObjectMapper()); + converter.setAccountCredentialsProvider(accountCredentialsProvider); + } + + @Test + public void testConvertOperation() { + AtomicOperation atomicOperation = converter.convertOperation(buildDescription()); + assertTrue(atomicOperation instanceof DeleteAliCloudLoadBalancerClassicAtomicOperation); + } + + @Test + public void testConvertDescription() { + UpsertAliCloudLoadBalancerDescription upsertAliCloudLoadBalancerDescription = + converter.convertDescription(buildDescription()); + assertTrue( + upsertAliCloudLoadBalancerDescription instanceof UpsertAliCloudLoadBalancerDescription); + } + + private Map buildDescription() { + Map description = new HashMap<>(); + description.put("region", REGION); + description.put("credentials", ACCOUNT); + return description; + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudSecurityGroupAtomicOperationConverterTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudSecurityGroupAtomicOperationConverterTest.java new file mode 100644 index 00000000000..6a77918bdf8 --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/DeleteAliCloudSecurityGroupAtomicOperationConverterTest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.DeleteAliCloudSecurityGroupDescription; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.DeleteAliCloudSecurityGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.HashMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; + +public class DeleteAliCloudSecurityGroupAtomicOperationConverterTest extends CommonConverter { + + DeleteAliCloudSecurityGroupAtomicOperationConverter converter = + new DeleteAliCloudSecurityGroupAtomicOperationConverter(clientFactory); + + @Before + public void testBefore() { + converter.setObjectMapper(new ObjectMapper()); + converter.setAccountCredentialsProvider(accountCredentialsProvider); + } + + @Test + public void testConvertOperation() { + AtomicOperation atomicOperation = converter.convertOperation(buildDescription()); + assertTrue(atomicOperation instanceof DeleteAliCloudSecurityGroupAtomicOperation); + } + + @Test + public void testConvertDescription() { + DeleteAliCloudSecurityGroupDescription deleteAliCloudSecurityGroupDescription = + converter.convertDescription(buildDescription()); + assertTrue( + deleteAliCloudSecurityGroupDescription instanceof DeleteAliCloudSecurityGroupDescription); + } + + private Map buildDescription() { + Map description = new HashMap<>(); + description.put("region", REGION); + description.put("credentials", ACCOUNT); + return description; + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudLoadBalancerAtomicOperationConverterTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudLoadBalancerAtomicOperationConverterTest.java new file mode 100644 index 00000000000..36320d0294f --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudLoadBalancerAtomicOperationConverterTest.java @@ -0,0 +1,81 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.UpsertAliCloudLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.alicloud.model.alienum.ListenerType; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; + +public class UpsertAliCloudLoadBalancerAtomicOperationConverterTest extends CommonConverter { + + UpsertAliCloudLoadBalancerAtomicOperationConverter converter = + new UpsertAliCloudLoadBalancerAtomicOperationConverter(clientFactory); + + @Before + public void testBefore() { + converter.setObjectMapper(new ObjectMapper()); + converter.setAccountCredentialsProvider(accountCredentialsProvider); + } + + @Test + public void testConvertDescription() { + AtomicOperation atomicOperation = converter.convertOperation(buildDescription()); + assertTrue(atomicOperation instanceof UpsertAliCloudLoadBalancerAtomicOperation); + } + + @Test + public void testConvertOperation() { + UpsertAliCloudLoadBalancerDescription upsertAliCloudLoadBalancerDescription = + converter.convertDescription(buildDescription()); + assertTrue( + upsertAliCloudLoadBalancerDescription instanceof UpsertAliCloudLoadBalancerDescription); + } + + private Map buildDescription() { + Map description = new HashMap<>(); + description.put("region", REGION); + description.put("credentials", ACCOUNT); + description.put("loadBalancerName", "test-loadBalancerName"); + List listeners = new ArrayList<>(); + Map listener = new HashMap<>(); + listener.put("listenerProtocal", ListenerType.HTTP); + listener.put("healthCheckURI", "/test/index.html"); + listener.put("healthCheck", "on"); + listener.put("healthCheckTimeout", 5); + listener.put("unhealthyThreshold", 3); + listener.put("healthyThreshold", 3); + listener.put("healthCheckInterval", 2); + listener.put("listenerPort", 80); + listener.put("bandwidth", 112); + listener.put("stickySession", "off"); + listener.put("backendServerPort", 90); + listeners.add(listener); + description.put("listeners", listeners); + description.put("vpcId", "vpc-test"); + description.put("vSwitchId", "111111"); + return description; + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudSecurityGroupAtomicOperationConverterTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudSecurityGroupAtomicOperationConverterTest.java new file mode 100644 index 00000000000..950097eb72b --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/converters/UpsertAliCloudSecurityGroupAtomicOperationConverterTest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.converters; + +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudSecurityGroupDescription; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.ops.UpsertAliCloudSecurityGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.HashMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; + +public class UpsertAliCloudSecurityGroupAtomicOperationConverterTest extends CommonConverter { + + UpsertAliCloudSecurityGroupAtomicOperationConverter converter = + new UpsertAliCloudSecurityGroupAtomicOperationConverter(clientFactory); + + @Before + public void testBefore() { + converter.setObjectMapper(new ObjectMapper()); + converter.setAccountCredentialsProvider(accountCredentialsProvider); + } + + @Test + public void testConvertOperation() { + AtomicOperation atomicOperation = converter.convertOperation(buildDescription()); + assertTrue(atomicOperation instanceof UpsertAliCloudSecurityGroupAtomicOperation); + } + + @Test + public void testConvertDescription() { + UpsertAliCloudSecurityGroupDescription upsertAliCloudSecurityGroupDescription = + converter.convertDescription(buildDescription()); + assertTrue( + upsertAliCloudSecurityGroupDescription instanceof UpsertAliCloudSecurityGroupDescription); + } + + private Map buildDescription() { + Map description = new HashMap<>(); + description.put("region", REGION); + description.put("credentials", ACCOUNT); + return description; + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/CommonAtomicOperation.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/CommonAtomicOperation.java new file mode 100644 index 00000000000..0b343e8d292 --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/CommonAtomicOperation.java @@ -0,0 +1,56 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.aliyuncs.IAcsClient; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.netflix.spinnaker.clouddriver.alicloud.common.ClientFactory; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import java.util.List; +import spock.lang.Subject; + +public class CommonAtomicOperation { + + public static final String ACCOUNT = "test-account"; + public static final String REGION = "cn-test"; + + public final List priorOutputs = mock(List.class); + + public static AliCloudCredentials credentials = mock(AliCloudCredentials.class); + public static IAcsClient client = mock(IAcsClient.class); + public static ClientFactory clientFactory = mock(ClientFactory.class); + static List clusterProviders = mock(List.class); + + static { + when(credentials.getName()).thenReturn(ACCOUNT); + when(credentials.getAccessKeyId()).thenReturn("test-ak"); + when(credentials.getAccessSecretKey()).thenReturn("test-sk"); + when(clientFactory.createClient(anyString(), anyString(), anyString())).thenReturn(client); + } + + @Subject + public ObjectMapper objectMapper = + new ObjectMapper() + .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudLoadBalancerClassicAtomicOperationTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudLoadBalancerClassicAtomicOperationTest.java new file mode 100644 index 00000000000..aeb696e85bb --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudLoadBalancerClassicAtomicOperationTest.java @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.slb.model.v20140515.DeleteLoadBalancerResponse; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersResponse; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudLoadBalancerDescription; +import java.util.ArrayList; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class DeleteAliCloudLoadBalancerClassicAtomicOperationTest extends CommonAtomicOperation { + + @Before + public void testBefore() throws ClientException { + when(client.getAcsResponse(any())) + .thenAnswer(new DescribeLoadBalancersAnswer()) + .thenAnswer(new DeleteLoadBalancerAnswer()); + } + + @Test + public void testOperate() { + DeleteAliCloudLoadBalancerClassicAtomicOperation operation = + new DeleteAliCloudLoadBalancerClassicAtomicOperation(buildDescription(), clientFactory); + operation.operate(priorOutputs); + } + + private UpsertAliCloudLoadBalancerDescription buildDescription() { + UpsertAliCloudLoadBalancerDescription description = new UpsertAliCloudLoadBalancerDescription(); + description.setRegion(REGION); + description.setCredentials(credentials); + description.setLoadBalancerName("test-lbName"); + return description; + } + + private class DescribeLoadBalancersAnswer implements Answer { + @Override + public DescribeLoadBalancersResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeLoadBalancersResponse response = new DescribeLoadBalancersResponse(); + response.setLoadBalancers(new ArrayList<>()); + return response; + } + } + + private class DeleteLoadBalancerAnswer implements Answer { + @Override + public DeleteLoadBalancerResponse answer(InvocationOnMock invocation) throws Throwable { + return null; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudSecurityGroupAtomicOperationTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudSecurityGroupAtomicOperationTest.java new file mode 100644 index 00000000000..3436e2395ae --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/DeleteAliCloudSecurityGroupAtomicOperationTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.aliyuncs.ecs.model.v20140526.DeleteSecurityGroupResponse; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse.SecurityGroup; +import com.aliyuncs.exceptions.ClientException; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.DeleteAliCloudSecurityGroupDescription; +import java.util.ArrayList; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class DeleteAliCloudSecurityGroupAtomicOperationTest extends CommonAtomicOperation { + + @Before + public void testBefore() throws ClientException { + when(client.getAcsResponse(any())) + .thenAnswer(new DescribeSecurityGroupsAnswer()) + .thenAnswer(new DeleteSecurityGroupAnswer()); + } + + @Test + public void testOperate() { + DeleteAliCloudSecurityGroupAtomicOperation operation = + new DeleteAliCloudSecurityGroupAtomicOperation(buildDescription(), clientFactory); + operation.operate(priorOutputs); + } + + private DeleteAliCloudSecurityGroupDescription buildDescription() { + DeleteAliCloudSecurityGroupDescription description = + new DeleteAliCloudSecurityGroupDescription(); + description.setCredentials(credentials); + description.setSecurityGroupName("test-SecurityGroupName"); + List regions = new ArrayList<>(); + regions.add(REGION); + description.setRegions(regions); + return description; + } + + private class DescribeSecurityGroupsAnswer implements Answer { + @Override + public DescribeSecurityGroupsResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeSecurityGroupsResponse response = new DescribeSecurityGroupsResponse(); + List securityGroups = new ArrayList<>(); + response.setSecurityGroups(securityGroups); + return response; + } + } + + private class DeleteSecurityGroupAnswer implements Answer { + @Override + public DeleteSecurityGroupResponse answer(InvocationOnMock invocation) throws Throwable { + DeleteSecurityGroupResponse response = new DeleteSecurityGroupResponse(); + return response; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerAtomicOperationTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerAtomicOperationTest.java new file mode 100644 index 00000000000..e25e136d3ea --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudLoadBalancerAtomicOperationTest.java @@ -0,0 +1,100 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.slb.model.v20140515.CreateLoadBalancerResponse; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersResponse; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.alicloud.model.alienum.ListenerType; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class UpsertAliCloudLoadBalancerAtomicOperationTest extends CommonAtomicOperation { + + @Before + public void testBefore() throws ClientException { + when(client.getAcsResponse(any())) + .thenAnswer(new DescribeLoadBalancersAnswer()) + .thenAnswer(new CreateLoadBalancerAnswer()); + } + + @Test + public void testOperate() { + Map description = buildDescription(); + UpsertAliCloudLoadBalancerDescription upsertAliCloudLoadBalancerDescription = + objectMapper.convertValue(description, UpsertAliCloudLoadBalancerDescription.class); + upsertAliCloudLoadBalancerDescription.setCredentials(credentials); + UpsertAliCloudLoadBalancerAtomicOperation operation = + new UpsertAliCloudLoadBalancerAtomicOperation( + upsertAliCloudLoadBalancerDescription, objectMapper, clientFactory); + Map operate = operation.operate(priorOutputs); + assertTrue(operate != null); + } + + private Map buildDescription() { + Map description = new HashMap<>(); + description.put("region", REGION); + description.put("credentials", ACCOUNT); + description.put("loadBalancerName", "test-loadBalancerName"); + List listeners = new ArrayList<>(); + Map listener = new HashMap<>(); + listener.put("listenerProtocal", ListenerType.HTTP); + listener.put("healthCheckURI", "/test/index.html"); + listener.put("healthCheck", "on"); + listener.put("healthCheckTimeout", 5); + listener.put("unhealthyThreshold", 3); + listener.put("healthyThreshold", 3); + listener.put("healthCheckInterval", 2); + listener.put("listenerPort", 80); + listener.put("bandwidth", 112); + listener.put("stickySession", "off"); + listener.put("backendServerPort", 90); + listeners.add(listener); + description.put("listeners", listeners); + description.put("vpcId", "vpc-test"); + description.put("vSwitchId", "111111"); + return description; + } + + private class CreateLoadBalancerAnswer implements Answer { + @Override + public CreateLoadBalancerResponse answer(InvocationOnMock invocation) throws Throwable { + CreateLoadBalancerResponse response = new CreateLoadBalancerResponse(); + response.setLoadBalancerId("test-lbId"); + return response; + } + } + + private class DescribeLoadBalancersAnswer implements Answer { + @Override + public DescribeLoadBalancersResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeLoadBalancersResponse response = new DescribeLoadBalancersResponse(); + response.setLoadBalancers(new ArrayList<>()); + return response; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudSecurityGroupAtomicOperationTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudSecurityGroupAtomicOperationTest.java new file mode 100644 index 00000000000..b90b39a27ed --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/deploy/ops/UpsertAliCloudSecurityGroupAtomicOperationTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.deploy.ops; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.aliyuncs.ecs.model.v20140526.*; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupAttributeResponse.Permission; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse.SecurityGroup; +import com.aliyuncs.exceptions.ClientException; +import com.netflix.spinnaker.clouddriver.alicloud.deploy.description.UpsertAliCloudSecurityGroupDescription; +import java.util.ArrayList; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class UpsertAliCloudSecurityGroupAtomicOperationTest extends CommonAtomicOperation { + + @Before + public void testBefore() throws ClientException { + when(client.getAcsResponse(any())) + .thenAnswer(new DescribeSecurityGroupsAnswer()) + .thenAnswer(new CreateSecurityGroupAnswer()) + .thenAnswer(new DescribeSecurityGroupAttributeAnswer()) + .thenAnswer(new AuthorizeSecurityGroupAnswer()); + } + + @Test + public void testOperate() { + UpsertAliCloudSecurityGroupAtomicOperation operation = + new UpsertAliCloudSecurityGroupAtomicOperation( + buildDescription(), clientFactory, objectMapper); + operation.operate(priorOutputs); + } + + private UpsertAliCloudSecurityGroupDescription buildDescription() { + UpsertAliCloudSecurityGroupDescription description = + new UpsertAliCloudSecurityGroupDescription(); + description.setRegion(REGION); + description.setCredentials(credentials); + description.setSecurityGroupName("test-SecurityGroupName"); + List securityGroupIngress = new ArrayList<>(); + AuthorizeSecurityGroupRequest request = new AuthorizeSecurityGroupRequest(); + request.setIpProtocol("tcp"); + request.setPortRange("1/200"); + request.setSourceCidrIp("10.0.0.0/8"); + securityGroupIngress.add(request); + description.setSecurityGroupIngress(securityGroupIngress); + return description; + } + + private class DescribeSecurityGroupsAnswer implements Answer { + @Override + public DescribeSecurityGroupsResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeSecurityGroupsResponse response = new DescribeSecurityGroupsResponse(); + List securityGroups = new ArrayList<>(); + response.setSecurityGroups(securityGroups); + return response; + } + } + + private class CreateSecurityGroupAnswer implements Answer { + @Override + public CreateSecurityGroupResponse answer(InvocationOnMock invocation) throws Throwable { + CreateSecurityGroupResponse response = new CreateSecurityGroupResponse(); + response.setSecurityGroupId("test-SecurityGroupId"); + return response; + } + } + + private class DescribeSecurityGroupAttributeAnswer + implements Answer { + @Override + public DescribeSecurityGroupAttributeResponse answer(InvocationOnMock invocation) + throws Throwable { + DescribeSecurityGroupAttributeResponse response = + new DescribeSecurityGroupAttributeResponse(); + List permissions = new ArrayList<>(); + response.setPermissions(permissions); + return response; + } + } + + private class AuthorizeSecurityGroupAnswer implements Answer { + @Override + public AuthorizeSecurityGroupResponse answer(InvocationOnMock invocation) throws Throwable { + AuthorizeSecurityGroupResponse response = new AuthorizeSecurityGroupResponse(); + return response; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerCachingAgentTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerCachingAgentTest.java new file mode 100644 index 00000000000..0c236e4c8dc --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudLoadBalancerCachingAgentTest.java @@ -0,0 +1,147 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.agent; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.aliyuncs.exceptions.ClientException; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancerAttributeResponse; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancerAttributeResponse.ListenerPortAndProtocal; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancerHTTPListenerAttributeResponse; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersResponse; +import com.aliyuncs.slb.model.v20140515.DescribeLoadBalancersResponse.LoadBalancer; +import com.aliyuncs.slb.model.v20140515.DescribeVServerGroupsResponse; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.alicloud.provider.AliProvider; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudClientProvider; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentialsProvider; +import java.util.ArrayList; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import spock.lang.Subject; + +public class AliCloudLoadBalancerCachingAgentTest extends CommonCachingAgentTest { + + private final String NAME = "lbName"; + private final String ID = "lbId"; + + @Subject AliProvider aliProvider = mock(AliProvider.class); + + @Subject AliCloudClientProvider aliCloudClientProvider = mock(AliCloudClientProvider.class); + + @Subject + AliCloudCredentialsProvider aliCloudCredentialsProvider = mock(AliCloudCredentialsProvider.class); + + @Subject AliCloudProvider aliCloudProvider = mock(AliCloudProvider.class); + + @Subject Registry registry = mock(Registry.class); + + @Before + public void testBefore() throws ClientException { + when(client.getAcsResponse(any())) + .thenAnswer(new LoadBalancersAnswer()) + .thenAnswer(new LoadBalancerAttributeAnswer()) + .thenAnswer(new HTTPSListenerAnswer()) + .thenAnswer(new DescribeVServerGroupsResponseAnswer()); + } + + @Test + public void testLoadData() { + AliCloudLoadBalancerCachingAgent agent = + new AliCloudLoadBalancerCachingAgent( + aliProvider, + REGION, + aliCloudClientProvider, + aliCloudCredentialsProvider, + aliCloudProvider, + objectMapper, + registry, + account, + client); + CacheResult result = agent.loadData(providerCache); + String key = Keys.getLoadBalancerKey(NAME, ACCOUNT, REGION, null); + List LoadBalancers = (List) result.getCacheResults().get(LOAD_BALANCERS.ns); + assertTrue(LoadBalancers.size() == 1); + assertTrue(key.equals(LoadBalancers.get(0).getId())); + } + + private class LoadBalancersAnswer implements Answer { + @Override + public DescribeLoadBalancersResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeLoadBalancersResponse response = new DescribeLoadBalancersResponse(); + List loadBalancers = new ArrayList<>(); + LoadBalancer loadBalancer = new LoadBalancer(); + loadBalancer.setLoadBalancerId(ID); + loadBalancer.setLoadBalancerName(NAME); + loadBalancers.add(loadBalancer); + response.setLoadBalancers(loadBalancers); + return response; + } + } + + private class LoadBalancerAttributeAnswer + implements Answer { + + @Override + public DescribeLoadBalancerAttributeResponse answer(InvocationOnMock invocation) + throws Throwable { + DescribeLoadBalancerAttributeResponse response = new DescribeLoadBalancerAttributeResponse(); + response.setLoadBalancerName(NAME); + response.setLoadBalancerId(ID); + List listenerPortsAndProtocal = new ArrayList<>(); + ListenerPortAndProtocal listenerPortAndProtocal = new ListenerPortAndProtocal(); + listenerPortAndProtocal.setListenerPort(80); + listenerPortAndProtocal.setListenerProtocal("http"); + listenerPortsAndProtocal.add(listenerPortAndProtocal); + response.setListenerPortsAndProtocal(listenerPortsAndProtocal); + return response; + } + } + + private class HTTPSListenerAnswer + implements Answer { + @Override + public DescribeLoadBalancerHTTPListenerAttributeResponse answer(InvocationOnMock invocation) + throws Throwable { + DescribeLoadBalancerHTTPListenerAttributeResponse response = + new DescribeLoadBalancerHTTPListenerAttributeResponse(); + response.setListenerPort(80); + return response; + } + } + + private class DescribeVServerGroupsResponseAnswer + implements Answer { + + @Override + public DescribeVServerGroupsResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeVServerGroupsResponse describeVServerGroupsResponse = + new DescribeVServerGroupsResponse(); + return describeVServerGroupsResponse; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudSecurityGroupCachingAgentTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudSecurityGroupCachingAgentTest.java new file mode 100644 index 00000000000..ad3c0bed1d6 --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/AliCloudSecurityGroupCachingAgentTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.agent; + +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupAttributeResponse; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse; +import com.aliyuncs.ecs.model.v20140526.DescribeSecurityGroupsResponse.SecurityGroup; +import com.aliyuncs.exceptions.ClientException; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.alicloud.cache.Keys; +import java.util.ArrayList; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class AliCloudSecurityGroupCachingAgentTest extends CommonCachingAgentTest { + + private final String NAME = "sgName"; + private final String ID = "sgId"; + private final String VPCID = "vpcId"; + + @Before + public void testBefore() throws ClientException { + when(client.getAcsResponse(any())) + .thenAnswer(new SecurityGroupsAnswer()) + .thenAnswer(new SecurityGroupAttributeAnswer()); + } + + @Test + public void testLoadData() { + AliCloudSecurityGroupCachingAgent agent = + new AliCloudSecurityGroupCachingAgent(account, REGION, objectMapper, client); + CacheResult result = agent.loadData(providerCache); + String key = Keys.getSecurityGroupKey(NAME, ID, REGION, ACCOUNT, VPCID); + List sg = (List) result.getCacheResults().get(Keys.Namespace.SECURITY_GROUPS.ns); + assertTrue(sg.size() == 1); + assertTrue(key.equals(sg.get(0).getId())); + } + + private class SecurityGroupsAnswer implements Answer { + @Override + public DescribeSecurityGroupsResponse answer(InvocationOnMock invocation) throws Throwable { + DescribeSecurityGroupsResponse response = new DescribeSecurityGroupsResponse(); + List securityGroups = new ArrayList<>(); + SecurityGroup securityGroup = new SecurityGroup(); + securityGroup.setSecurityGroupName(NAME); + securityGroup.setSecurityGroupId(ID); + securityGroup.setVpcId(VPCID); + securityGroups.add(securityGroup); + response.setSecurityGroups(securityGroups); + return response; + } + } + + private class SecurityGroupAttributeAnswer + implements Answer { + @Override + public DescribeSecurityGroupAttributeResponse answer(InvocationOnMock invocation) + throws Throwable { + DescribeSecurityGroupAttributeResponse response = + new DescribeSecurityGroupAttributeResponse(); + return response; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/CommonCachingAgentTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/CommonCachingAgentTest.java new file mode 100644 index 00000000000..be551c8f7eb --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/agent/CommonCachingAgentTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.agent; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.aliyuncs.IAcsClient; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.alicloud.security.AliCloudCredentials; +import spock.lang.Subject; + +public class CommonCachingAgentTest { + + static final String ACCOUNT = "test-account"; + static final String REGION = "cn-test"; + + @Subject ObjectMapper objectMapper = new ObjectMapper(); + + final IAcsClient client = mock(IAcsClient.class); + + static final AliCloudCredentials account; + + static { + account = mock(AliCloudCredentials.class); + when(account.getName()).thenReturn(ACCOUNT); + } + + final ProviderCache providerCache = mock(ProviderCache.class); +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudLoadBalancerProviderTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudLoadBalancerProviderTest.java new file mode 100644 index 00000000000..2a9aa5495e3 --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudLoadBalancerProviderTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.view; + +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.alicloud.model.AliCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.alicloud.provider.view.AliCloudLoadBalancerProvider.ResultDetails; +import java.util.*; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class AliCloudLoadBalancerProviderTest extends CommonProvider { + + @Before + public void testBefore() { + when(cacheView.getIdentifiers(anyString())).thenAnswer(new FilterAnswer()); + when(cacheView.getAll(anyString(), any(), any())).thenAnswer(new CacheDataAnswer()); + } + + @Test + public void testGetApplicationLoadBalancers() { + AliCloudLoadBalancerProvider provider = + new AliCloudLoadBalancerProvider(objectMapper, cacheView, oldProvider); + Set applicationLoadBalancers = + provider.getApplicationLoadBalancers("test-application"); + assertTrue(applicationLoadBalancers.size() == 1); + } + + @Test + public void testByAccountAndRegionAndName() { + AliCloudLoadBalancerProvider provider = + new AliCloudLoadBalancerProvider(objectMapper, cacheView, oldProvider); + List lbName = provider.byAccountAndRegionAndName(ACCOUNT, REGION, "lbName"); + assertTrue(lbName.size() == 1); + } + + private class FilterAnswer implements Answer> { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + List list = new ArrayList<>(); + list.add("alicloud:loadBalancers:test-account:cn-hangzhou:test-application"); + return list; + } + } + + private class CacheDataAnswer implements Answer> { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + List cacheDatas = new ArrayList<>(); + Map attributes = new HashMap<>(); + attributes.put("account", ACCOUNT); + attributes.put("regionId", REGION); + attributes.put("regionIdAlias", "test-alias"); + attributes.put("loadBalancerName", "lbName"); + attributes.put("vpcId", "test-vpcId"); + attributes.put("loadBalancerId", "lbId"); + CacheData cacheData1 = + new DefaultCacheData( + "alicloud:loadBalancers:test-account:cn-hangzhou:test-application", attributes, null); + cacheDatas.add(cacheData1); + return cacheDatas; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudSecurityGroupProviderTest.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudSecurityGroupProviderTest.java new file mode 100644 index 00000000000..b6a9e4fe2f0 --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/AliCloudSecurityGroupProviderTest.java @@ -0,0 +1,83 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.view; + +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.alicloud.model.AliCloudSecurityGroup; +import java.util.*; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class AliCloudSecurityGroupProviderTest extends CommonProvider { + + @Before + public void testBefore() { + when(cacheView.filterIdentifiers(anyString(), anyString())).thenAnswer(new FilterAnswer()); + when(cacheView.getAll(anyString(), any(), any())).thenAnswer(new CacheDataAnswer()); + } + + @Test + public void testGetAllByAccount() { + AliCloudSecurityGroupProvider provider = + new AliCloudSecurityGroupProvider(objectMapper, cacheView); + Collection allByAccounts = provider.getAllByAccount(true, ACCOUNT); + assertTrue(allByAccounts.size() == 1); + } + + private class FilterAnswer implements Answer> { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + List list = new ArrayList<>(); + list.add("alicloud:securityGroups:sg-test:sg-test:cn-hangzhou:test-account:vpc-test"); + return list; + } + } + + private class CacheDataAnswer implements Answer> { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + List cacheDatas = new ArrayList<>(); + Map attributes = new HashMap<>(); + attributes.put("account", ACCOUNT); + attributes.put("regionId", REGION); + attributes.put("securityGroupId", "sg-test"); + attributes.put("securityGroupName", "sg-test"); + attributes.put("description", "des"); + attributes.put("vpcId", "vpc-test"); + List> permissions = new ArrayList<>(); + Map permission1 = new HashMap<>(); + permission1.put("ipProtocol", "tcp"); + permission1.put("portRange", "1/200"); + permissions.add(permission1); + attributes.put("permissions", permissions); + CacheData cacheData1 = + new DefaultCacheData( + "alicloud:securityGroups:sg-test:sg-test:cn-hangzhou:test-account:vpc-test", + attributes, + null); + cacheDatas.add(cacheData1); + return cacheDatas; + } + } +} diff --git a/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/CommonProvider.java b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/CommonProvider.java new file mode 100644 index 00000000000..257c5b1f05c --- /dev/null +++ b/clouddriver-alicloud/src/test/java/com/netflix/spinnaker/clouddriver/alicloud/provider/view/CommonProvider.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Alibaba Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.alicloud.provider.view; + +import static org.mockito.Mockito.mock; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.alicloud.AliCloudProvider; +import spock.lang.Subject; + +public class CommonProvider { + + static final String ACCOUNT = "test-account"; + static final String REGION = "cn-test"; + + @Subject ObjectMapper objectMapper = new ObjectMapper(); + + @Subject AliCloudProvider oldProvider = new AliCloudProvider(); + + final Cache cacheView = mock(Cache.class); +} diff --git a/clouddriver-api-tck/README.md b/clouddriver-api-tck/README.md new file mode 100644 index 00000000000..75328a49ba7 --- /dev/null +++ b/clouddriver-api-tck/README.md @@ -0,0 +1,4 @@ +# clouddriver-api-tck + +Test harnesses and utilities for clouddriver-api consumers (plugin developers). + diff --git a/clouddriver-api-tck/clouddriver-api-tck.gradle b/clouddriver-api-tck/clouddriver-api-tck.gradle new file mode 100644 index 00000000000..7169aadad0f --- /dev/null +++ b/clouddriver-api-tck/clouddriver-api-tck.gradle @@ -0,0 +1,12 @@ +apply from: "${project.rootDir}/gradle/kotlin.gradle" +apply from: "${project.rootDir}/gradle/kotlin-test.gradle" + +dependencies { + implementation(project(":clouddriver-web")) + implementation(project(":clouddriver-core")) + implementation(project(":clouddriver-event")) + implementation(project(":cats:cats-core")) + + api("org.springframework.boot:spring-boot-starter-test") + api("dev.minutest:minutest") +} diff --git a/clouddriver-api-tck/src/main/kotlin/com/netflix/spinnaker/clouddriver/api/test/ClouddriverFixture.kt b/clouddriver-api-tck/src/main/kotlin/com/netflix/spinnaker/clouddriver/api/test/ClouddriverFixture.kt new file mode 100644 index 00000000000..276d970835f --- /dev/null +++ b/clouddriver-api-tck/src/main/kotlin/com/netflix/spinnaker/clouddriver/api/test/ClouddriverFixture.kt @@ -0,0 +1,45 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.api.test + +import com.netflix.spinnaker.clouddriver.Main +import dev.minutest.TestContextBuilder +import dev.minutest.TestDescriptor +import org.springframework.boot.test.context.SpringBootTest +import org.springframework.test.context.TestContextManager +import org.springframework.test.context.TestPropertySource + +/** + * ClouddriverFixture is the base configuration for a Clouddriver integration test. + */ +@SpringBootTest(classes = [Main::class]) +@TestPropertySource(properties = ["spring.config.location=classpath:clouddriver-test-app.yml"]) +abstract class ClouddriverFixture + +/** + * DSL for constructing a ClouddriverFixture within a Minutest suite. + */ +inline fun TestContextBuilder.clouddriverFixture( + crossinline factory: (Unit).(testDescriptor: TestDescriptor) -> F +) { + fixture { testDescriptor -> + factory(testDescriptor).also { + TestContextManager(F::class.java).prepareTestInstance(it) + } + } +} diff --git a/clouddriver-api-tck/src/main/resources/clouddriver-test-app.yml b/clouddriver-api-tck/src/main/resources/clouddriver-test-app.yml new file mode 100644 index 00000000000..ca1404ac823 --- /dev/null +++ b/clouddriver-api-tck/src/main/resources/clouddriver-test-app.yml @@ -0,0 +1,14 @@ +spring: + application: + name: clouddriver + +services: + fiat: + enabled: false + baseUrl: http://localhost:7003 + front50: + enabled: true + baseUrl: http://localhost:8080 + +redis: + enabled: false diff --git a/clouddriver-api-tck/src/test/kotlin/com/netflix/spinnaker/clouddriver/api/test/ClouddriverFixtureTest.kt b/clouddriver-api-tck/src/test/kotlin/com/netflix/spinnaker/clouddriver/api/test/ClouddriverFixtureTest.kt new file mode 100644 index 00000000000..ad8c9e7def6 --- /dev/null +++ b/clouddriver-api-tck/src/test/kotlin/com/netflix/spinnaker/clouddriver/api/test/ClouddriverFixtureTest.kt @@ -0,0 +1,63 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.api.test + +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.data.task.InMemoryTaskRepository +import com.netflix.spinnaker.clouddriver.event.persistence.InMemoryEventRepository +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository +import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.mem.InMemoryNamedCacheFactory +import com.netflix.spinnaker.cats.cache.NamedCacheFactory +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import org.springframework.beans.factory.annotation.Autowired +import strikt.api.expect +import strikt.assertions.isA + +class ClouddriverFixtureTest : JUnit5Minutests { + + fun tests() = rootContext { + context("a clouddriver integration test environment") { + clouddriverFixture { + Fixture() + } + + test("the application starts with expected in-memory beans") { + expect { + that(taskRepository).isA() + that(eventRepository).isA() + that(namedCacheFactory).isA() + } + } + } + } + + inner class Fixture : ClouddriverFixture() { + + @Autowired + lateinit var taskRepository: TaskRepository + + @Autowired + lateinit var eventRepository: EventRepository + + @Autowired + lateinit var namedCacheFactory: NamedCacheFactory + } +} diff --git a/clouddriver-api/api.md b/clouddriver-api/api.md new file mode 100644 index 00000000000..1f916830402 --- /dev/null +++ b/clouddriver-api/api.md @@ -0,0 +1,4 @@ +# Module clouddriver-api + +Contains all public Java APIs for Clouddriver. + diff --git a/clouddriver-api/clouddriver-api.gradle b/clouddriver-api/clouddriver-api.gradle new file mode 100644 index 00000000000..7c0fb81df29 --- /dev/null +++ b/clouddriver-api/clouddriver-api.gradle @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +apply plugin: 'java-library' + +dependencies { + implementation enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion") + annotationProcessor enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion") + + api("io.spinnaker.kork:kork-plugins-api") + api("io.spinnaker.kork:kork-credentials-api") + api("io.spinnaker.fiat:fiat-core:$fiatVersion") + + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.kork:kork-moniker" + + compileOnly("org.projectlombok:lombok") + annotationProcessor("org.projectlombok:lombok") +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AccountAware.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AccountAware.java similarity index 78% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AccountAware.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AccountAware.java index 310d28a0fff..ee505a748fb 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AccountAware.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AccountAware.java @@ -16,13 +16,14 @@ package com.netflix.spinnaker.cats.agent; +import com.netflix.spinnaker.kork.annotations.Beta; + /** - * This interface is used to identify classes (typically Agents) that are capable of returning the name of the account - * they are associated with. + * This interface is used to identify classes (typically Agents) that are capable of returning the + * name of the account they are associated with. */ +@Beta public interface AccountAware { - /** - * Get the name of the account this object is associated with. - */ + /** Get the name of the account this object is associated with. */ String getAccountName(); } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/Agent.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/Agent.java similarity index 81% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/Agent.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/Agent.java index dc8cdd21613..2d85402fab2 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/Agent.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/Agent.java @@ -17,11 +17,11 @@ package com.netflix.spinnaker.cats.agent; import com.netflix.spinnaker.cats.provider.ProviderRegistry; +import com.netflix.spinnaker.kork.annotations.Beta; +@Beta public interface Agent { - /** - * @return the type of this agent. - */ + /** @return the type of this agent. */ String getAgentType(); /** @@ -32,9 +32,9 @@ public interface Agent { AgentExecution getAgentExecution(ProviderRegistry providerRegistry); - default public boolean handlesAccount(String accountName) { + public default boolean handlesAccount(String accountName) { if (this instanceof AccountAware) { - return accountName.equals(((AccountAware)this).getAccountName()); + return accountName.equals(((AccountAware) this).getAccountName()); } return false; diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentDataType.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentDataType.java new file mode 100644 index 00000000000..4d5b20633fe --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentDataType.java @@ -0,0 +1,55 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.agent; + +import com.netflix.spinnaker.kork.annotations.Beta; + +/** + * Describes both the type name as well as authority for an Agent's provided data. + * + *

If an agent is an Authoritative source of data, then it's resulting data set will be + * considered the current complete set for that data source. If an agent is an Informative source of + * data, its results will contribute to the data set for that type, but is never considered the + * complete set of data, so will not result in deletions when elements are no longer present. + */ +@Beta +public class AgentDataType { + public enum Authority { + AUTHORITATIVE, + INFORMATIVE; + + public AgentDataType forType(String typeName) { + return new AgentDataType(typeName, this); + } + } + + private final String typeName; + private final Authority authority; + + private AgentDataType(String typeName, Authority authority) { + this.typeName = typeName; + this.authority = authority; + } + + public String getTypeName() { + return typeName; + } + + public Authority getAuthority() { + return authority; + } +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentExecution.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentExecution.java similarity index 87% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentExecution.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentExecution.java index 22991c8dd0b..8ab4b830411 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentExecution.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentExecution.java @@ -16,6 +16,9 @@ package com.netflix.spinnaker.cats.agent; +import com.netflix.spinnaker.kork.annotations.Beta; + +@Beta public interface AgentExecution { - void executeAgent(Agent agent); + void executeAgent(Agent agent); } diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentLock.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentLock.java similarity index 93% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentLock.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentLock.java index 1e8c9a7f98d..c42f2d5cb8a 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentLock.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentLock.java @@ -16,6 +16,9 @@ package com.netflix.spinnaker.cats.agent; +import com.netflix.spinnaker.kork.annotations.Beta; + +@Beta public class AgentLock { private final Agent agent; diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentProvider.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentProvider.java similarity index 79% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentProvider.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentProvider.java index 3f19018326d..f13c30038fa 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentProvider.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentProvider.java @@ -16,9 +16,15 @@ package com.netflix.spinnaker.cats.agent; +import com.netflix.spinnaker.credentials.Credentials; +import com.netflix.spinnaker.kork.annotations.Beta; import java.util.Collection; +@Beta public interface AgentProvider { boolean supports(String providerName); - Collection agents(); + + default Collection agents(Credentials credentials) { + return null; + } } diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentScheduler.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentScheduler.java new file mode 100644 index 00000000000..dbbba80eb32 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentScheduler.java @@ -0,0 +1,63 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.agent; + +import com.netflix.spinnaker.kork.annotations.Beta; + +/** An AgentScheduler manages the execution of a CachingAgent. */ +@Beta +public interface AgentScheduler { + void schedule( + Agent agent, + AgentExecution agentExecution, + ExecutionInstrumentation executionInstrumentation); + + default void unschedule(Agent agent) {} + + /** + * @return True if this scheduler supports synchronization between LoadData and OnDemand cache + * updates. + */ + default boolean isAtomic() { + return false; + } + + /** + * @param agent The agent being locked. + * @return A "Lock" that will allow exclusive access to updating this agent's cache data. null iff + * isAtomic == false. + */ + default T tryLock(Agent agent) { + return null; + } + + /** + * @param lock The lock being released. + * @return True iff the lock was still in our possession when the release call was made. + */ + default boolean tryRelease(T lock) { + return false; + } + + /** + * @param lock The lock being checked for validity. + * @return True iff the lock is still in our possession. + */ + default boolean lockValid(T lock) { + return false; + } +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentSchedulerAware.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentSchedulerAware.java similarity index 80% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentSchedulerAware.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentSchedulerAware.java index 863645d3338..e02461a154d 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/agent/AgentSchedulerAware.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/AgentSchedulerAware.java @@ -16,24 +16,23 @@ package com.netflix.spinnaker.cats.agent; +import com.netflix.spinnaker.kork.annotations.Beta; + /** - * This class is used to identify classes (typically Providers or Agents) that are capable of returning the agent - * scheduler they are associated with. + * This class is used to identify classes (typically Providers or Agents) that are capable of + * returning the agent scheduler they are associated with. */ +@Beta public abstract class AgentSchedulerAware { private AgentScheduler agentScheduler; - /** - * Set this object's agent scheduler. - */ + /** Set this object's agent scheduler. */ public void setAgentScheduler(AgentScheduler agentScheduler) { this.agentScheduler = agentScheduler; - }; + } - /** - * Get this object's agent scheduler. - */ + /** Get this object's agent scheduler. */ public AgentScheduler getAgentScheduler() { return agentScheduler; - }; + } } diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/CacheResult.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/CacheResult.java new file mode 100644 index 00000000000..9043f2785a1 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/CacheResult.java @@ -0,0 +1,56 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.agent; + +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +/** The result of a CachingAgent run. */ +@Beta +public interface CacheResult { + /** @return The CacheDatas to cache, keyed by item type. */ + Map> getCacheResults(); + + /** + * Provides a means to explicitly evict items as a result of a CachingAgent execution. + * + *

Note: Eviction will already occur based on the values in getCacheResults for all the types + * that the CachingAgent authoritatively caches - this collection is for additional items that + * were potentially cached out of band of a complete caching run. + * + * @return The ids of items that should be explicitly evicted. + */ + default Map> getEvictions() { + return Collections.emptyMap(); + } + + default Map getIntrospectionDetails() { + return Collections.emptyMap(); + } + + /** + * If true, no evictions of existing keys are done unless specified by the getEvictions() method. + * + * @return + */ + default boolean isPartialResult() { + return false; + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/CachingAgent.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/CachingAgent.java new file mode 100644 index 00000000000..2725d935372 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/CachingAgent.java @@ -0,0 +1,145 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.agent; + +import com.netflix.spinnaker.cats.cache.AgentIntrospection; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.CacheIntrospectionStore; +import com.netflix.spinnaker.cats.cache.DefaultAgentIntrospection; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderRegistry; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A CachingAgent loads one or more types of data. + * + *

The data set for a caching agent is scoped to the provider and agent type. For example an + * agent might load clusters for the AWS provider, and be scoped to a particular account and region. + */ +@Beta +public interface CachingAgent extends Agent { + /** + * @return the data types this Agent returns + * @see com.netflix.spinnaker.cats.agent.AgentDataType.Authority + */ + Collection getProvidedDataTypes(); + + /** + * Triggered by an AgentScheduler to tell this Agent to load its data. + * + * @param providerCache Cache associated with this Agent's provider + * @return the complete set of data for this Agent. + */ + CacheResult loadData(ProviderCache providerCache); + + default Optional> getCacheKeyPatterns() { + return Optional.empty(); + } + + default AgentExecution getAgentExecution(ProviderRegistry providerRegistry) { + return new CacheExecution(providerRegistry); + } + + class CacheExecution implements AgentExecution { + private final Logger log = LoggerFactory.getLogger(CacheExecution.class); + private final ProviderRegistry providerRegistry; + + public CacheExecution(ProviderRegistry providerRegistry) { + this.providerRegistry = providerRegistry; + } + + @Override + public void executeAgent(Agent agent) { + AgentIntrospection introspection = new DefaultAgentIntrospection(agent); + CacheResult result = executeAgentWithoutStore(agent); + introspection.finish(result); + CacheIntrospectionStore.getStore().recordAgent(introspection); + storeAgentResult(agent, result); + } + + public CacheResult executeAgentWithoutStore(Agent agent) { + CachingAgent cachingAgent = (CachingAgent) agent; + ProviderCache cache = providerRegistry.getProviderCache(cachingAgent.getProviderName()); + + return cachingAgent.loadData(cache); + } + + public void storeAgentResult(Agent agent, CacheResult result) { + CachingAgent cachingAgent = (CachingAgent) agent; + ProviderCache cache = providerRegistry.getProviderCache(cachingAgent.getProviderName()); + Collection providedTypes = cachingAgent.getProvidedDataTypes(); + Collection authoritative = new HashSet<>(providedTypes.size()); + for (AgentDataType type : providedTypes) { + if (type.getAuthority() == AgentDataType.Authority.AUTHORITATIVE) { + authoritative.add(type.getTypeName()); + } + } + + Optional> cacheKeyPatterns = cachingAgent.getCacheKeyPatterns(); + if (cacheKeyPatterns.isPresent()) { + for (String type : authoritative) { + String cacheKeyPatternForType = cacheKeyPatterns.get().get(type); + if (cacheKeyPatternForType != null) { + try { + Set cachedIdentifiersForType = + result.getCacheResults().get(type).stream() + .map(CacheData::getId) + .collect(Collectors.toSet()); + + Collection evictableIdentifiers = + cache.filterIdentifiers(type, cacheKeyPatternForType).stream() + .filter(i -> !cachedIdentifiersForType.contains(i)) + .collect(Collectors.toSet()); + + // any key that existed previously but was not re-cached by this agent is considered + // evictable + if (!evictableIdentifiers.isEmpty()) { + Collection evictionsForType = + result.getEvictions().computeIfAbsent(type, evictableKeys -> new ArrayList<>()); + evictionsForType.addAll(evictableIdentifiers); + + log.debug("Evicting stale identifiers: {}", evictableIdentifiers); + } + } catch (Exception e) { + log.error( + "Failed to check for stale identifiers (type: {}, pattern: {}, agent: {})", + type, + cacheKeyPatternForType, + agent, + e); + } + } + } + } + + if (result.isPartialResult()) { + cache.addCacheResult(agent.getAgentType(), authoritative, result); + } else { + cache.putCacheResult(agent.getAgentType(), authoritative, result); + } + } + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/DefaultCacheResult.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/DefaultCacheResult.java new file mode 100644 index 00000000000..14d57de2b2d --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/DefaultCacheResult.java @@ -0,0 +1,75 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.agent; + +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import lombok.Getter; + +/** An immutable CacheResult. */ +@Beta +public class DefaultCacheResult implements CacheResult { + private final Map> cacheResults; + private final Map> evictions; + @Getter private final Map introspectionDetails; + @Getter private final boolean partialResult; + + public DefaultCacheResult(Map> cacheResults) { + this(cacheResults, new HashMap<>()); + } + + public DefaultCacheResult( + Map> cacheResults, boolean partialResult) { + this(cacheResults, new HashMap<>(), new HashMap<>(), partialResult); + } + + public DefaultCacheResult( + Map> cacheResults, Map> evictions) { + this(cacheResults, evictions, new HashMap<>()); + } + + public DefaultCacheResult( + Map> cacheResults, + Map> evictions, + Map introspectionDetails) { + this(cacheResults, evictions, introspectionDetails, false); + } + + public DefaultCacheResult( + Map> cacheResults, + Map> evictions, + Map introspectionDetails, + boolean partialResult) { + this.cacheResults = cacheResults; + this.evictions = evictions; + this.introspectionDetails = introspectionDetails; + this.partialResult = partialResult; + } + + @Override + public Map> getCacheResults() { + return cacheResults; + } + + @Override + public Map> getEvictions() { + return evictions; + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/ExecutionInstrumentation.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/ExecutionInstrumentation.java new file mode 100644 index 00000000000..6440b05724b --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/agent/ExecutionInstrumentation.java @@ -0,0 +1,32 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.agent; + +import com.netflix.spinnaker.kork.annotations.Beta; + +@Beta +public interface ExecutionInstrumentation { + void executionStarted(Agent agent); + + void executionCompleted(Agent agent, long elapsedMs); + + void executionFailed(Agent agent, Throwable cause, long elapsedMs); + + static long elapsedTimeMs(long startTimeMs) { + return System.currentTimeMillis() - startTimeMs; + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/AgentIntrospection.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/AgentIntrospection.java new file mode 100644 index 00000000000..ebb1428f83d --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/AgentIntrospection.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.cache; + +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.kork.annotations.Beta; + +/** + * This is meant to store data about a single agent execution that _doesn't_ make sense to + * reasonably report to a monitoring system. Being able to inspect a single clouddriver node's use + * of these caching agents, having them report provider-specific details in the `details` field, + * (e.g. which namespaces/kinds are cached) and correlate that with details of how long the caching + * agents are executing, allowing users to both diagnose faulty/underprovisioned nodes, as well as + * tune their caching configuration by adjusting provider-specific fields. + */ +@Beta +public interface AgentIntrospection { + String getId(); + + String getProvider(); + + int getTotalAdditions(); + + int getTotalEvictions(); + + Long getLastExecutionDurationMs(); + + Long getLastExecutionStartMs(); + + Throwable getLastError(); + + String getLastExecutionStartDate(); + + void finishWithError(Throwable error, CacheResult result); + + void finish(CacheResult result); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/Cache.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/Cache.java new file mode 100644 index 00000000000..18b7554dd11 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/Cache.java @@ -0,0 +1,175 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.cache; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +/** Cache provides view access to data keyed by type and identifier. */ +@Beta +public interface Cache { + + /** + * Gets a single item from the cache by type and id + * + * @param type the type of the item + * @param id the id of the item + * @return the item matching the type and id + */ + CacheData get(String type, String id); + + CacheData get(String type, String id, CacheFilter cacheFilter); + + /** + * Determines if a specified id exists in the cache without loading the data. + * + * @param type the type of the item + * @param identifier the id of the item + * @return true iff the item is present in the cache + */ + default boolean exists(String type, String identifier) { + return !existingIdentifiers(type, Collections.singleton(identifier)).isEmpty(); + } + + /** + * Filters the supplied list of identifiers to only those that exist in the cache. + * + * @param type the type of the item + * @param identifiers the identifiers for the items + * @return the list of identifiers that are present in the cache from the provided identifiers + */ + default Collection existingIdentifiers(String type, String... identifiers) { + if (identifiers.length == 0) { + return Collections.emptySet(); + } + return existingIdentifiers(type, Arrays.asList(identifiers)); + } + + /** + * Filters the supplied list of identifiers to only those that exist in the cache. + * + * @param type the type of the item + * @param identifiers the identifiers for the items + * @return the list of identifiers that are present in the cache from the provided identifiers + */ + Collection existingIdentifiers(String type, Collection identifiers); + + /** + * Retrieves all the identifiers for a type + * + * @param type the type for which to retrieve identifiers + * @return the identifiers for the type + */ + Collection getIdentifiers(String type); + + /** + * Returns the identifiers for the specified type that match the provided glob. + * + * @param type The type for which to retrieve identifiers + * @param glob The glob to match against the identifiers + * @return the identifiers for the type that match the glob + */ + Collection filterIdentifiers(String type, String glob); + + /** + * Retrieves all the items for the specified type + * + * @param type the type for which to retrieve items + * @return all the items for the type + */ + Collection getAll(String type); + + Collection getAll(String type, CacheFilter cacheFilter); + + /** + * Retrieves the items for the specified type matching the provided identifiers + * + * @param type the type for which to retrieve items + * @param identifiers the identifiers + * @return the items matching the type and identifiers + */ + Collection getAll(String type, Collection identifiers); + + Collection getAll( + String type, Collection identifiers, CacheFilter cacheFilter); + + /** + * Retrieves the items for the specified type matching the provided identifiers + * + * @param type the type for which to retrieve items + * @param identifiers the identifiers + * @return the items matching the type and identifiers + */ + Collection getAll(String type, String... identifiers); + + /** Returns whether or not the three {@code getAllByApplication} methods are supported */ + default boolean supportsGetAllByApplication() { + return false; + } + + /** + * Retrieves all items for the specified type associated with the provided application. Requires a + * storeType with secondary indexes and support in the type's caching agent. + * + *

Clients should check {@link #supportsGetAllByApplication()} to check if this method is + * supported before calling it. + * + * @param type the type for which to retrieve items + * @param application the application name + * @return the matching items, keyed by type + */ + default Map> getAllByApplication(String type, String application) { + throw new UnsupportedCacheMethodException("Method only implemented for StoreType.SQL"); + } + + /** + * Retrieves all items for the specified type associated with the provided application. Requires a + * storeType with secondary indexes and support in the type's caching agent. + * + *

Clients should check {@link #supportsGetAllByApplication()} to check if this method is + * supported before calling it. + * + * @param type the type for which to retrieve items + * @param application the application name + * @param cacheFilter the cacheFilter to govern which relationships to fetch + * @return the matching items, keyed by type + */ + default Map> getAllByApplication( + String type, String application, CacheFilter cacheFilter) { + throw new UnsupportedCacheMethodException("Method only implemented for StoreType.SQL"); + } + + /** + * Retrieves all items for the specified type associated with the provided application. Requires a + * storeType with secondary indexes and support in the type's caching agent. + * + *

Clients should check {@link #supportsGetAllByApplication()} to check if this method is + * supported before calling it. + * + * @param types collection of types for which to retrieve items + * @param application the application name + * @param cacheFilters cacheFilters to govern which relationships to fetch, as type to filter + * @return the matching items, keyed by type + */ + default Map> getAllByApplication( + Collection types, String application, Map cacheFilters) { + throw new UnsupportedCacheMethodException("Method only implemented for StoreType.SQL"); + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheData.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheData.java new file mode 100644 index 00000000000..38ade97be54 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheData.java @@ -0,0 +1,43 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.cache; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Collection; +import java.util.Map; + +/** + * CacheData is stored in a Cache. Attributes are facts about the CacheData that can be updated by + * CachingAgents. Relationships are links to other CacheData. + * + *

Note: Not all caches may support a per record ttl + */ +@Beta +public interface CacheData { + String getId(); + + /** @return The ttl (in seconds) for this CacheData */ + int getTtlSeconds(); + + Map getAttributes(); + + /** + * @return relationships for this CacheData, keyed by type returning a collection of ids for that + * type + */ + Map> getRelationships(); +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CacheFilter.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheFilter.java similarity index 93% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CacheFilter.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheFilter.java index 791b94f30a0..074c9641f2a 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/cache/CacheFilter.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheFilter.java @@ -16,8 +16,10 @@ package com.netflix.spinnaker.cats.cache; +import com.netflix.spinnaker.kork.annotations.Beta; import java.util.Collection; +@Beta public interface CacheFilter { enum Type { RELATIONSHIP diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheIntrospectionStore.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheIntrospectionStore.java new file mode 100644 index 00000000000..cf0c365fb27 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/CacheIntrospectionStore.java @@ -0,0 +1,41 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.cache; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +@Beta +public class CacheIntrospectionStore { + private static final CacheIntrospectionStore store = new CacheIntrospectionStore(); + + private Map agents = new ConcurrentHashMap<>(); + + public static CacheIntrospectionStore getStore() { + return store; + } + + public Collection listAgentIntrospections() { + return agents.values(); + } + + public void recordAgent(AgentIntrospection agentIntrospection) { + agents.put(agentIntrospection.getId(), agentIntrospection); + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/DefaultAgentIntrospection.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/DefaultAgentIntrospection.java new file mode 100644 index 00000000000..a9a11d52273 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/DefaultAgentIntrospection.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.cache; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.text.SimpleDateFormat; +import java.util.Map; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Beta +@Data +@NoArgsConstructor +public class DefaultAgentIntrospection implements AgentIntrospection { + public DefaultAgentIntrospection(Agent agent) { + this.lastExecutionStartMs = System.currentTimeMillis(); + this.id = agent.getAgentType(); + this.provider = agent.getProviderName(); + } + + public void finishWithError(Throwable error, CacheResult result) { + lastError = error; + finish(result); + } + + public void finish(CacheResult result) { + lastExecutionDurationMs = System.currentTimeMillis() - lastExecutionStartMs; + details = result.getIntrospectionDetails(); + totalAdditions = + result.getCacheResults().values().stream() + .reduce(0, (a, b) -> a + b.size(), (a, b) -> a + b); + totalEvictions = + result.getEvictions().values().stream().reduce(0, (a, b) -> a + b.size(), (a, b) -> a + b); + } + + private String id; + private String provider; + private int totalAdditions; + private int totalEvictions; + private Map details; + private Throwable lastError; + private Long lastExecutionStartMs; + private Long lastExecutionDurationMs; + + public String getLastExecutionStartDate() { + return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS").format(lastExecutionStartMs); + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/DefaultCacheData.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/DefaultCacheData.java new file mode 100644 index 00000000000..7c58e19c790 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/DefaultCacheData.java @@ -0,0 +1,91 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.cats.cache; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.time.Clock; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +/** An immutable CacheData. */ +@Beta +public class DefaultCacheData implements CacheData { + private final String id; + private final int ttlSeconds; + private final Map attributes; + private final Map> relationships; + + public DefaultCacheData( + String id, Map attributes, Map> relationships) { + this(id, -1, attributes, relationships); + } + + public DefaultCacheData( + String id, + int ttlSeconds, + Map attributes, + Map> relationships) { + this(id, ttlSeconds, attributes, relationships, Clock.systemDefaultZone()); + } + + public DefaultCacheData( + String id, + int ttlSeconds, + Map attributes, + Map> relationships, + Clock clock) { + // ensure attributes is non-null and mutable given that `cacheExpiry` will be added + attributes = attributes == null ? new HashMap<>() : new HashMap<>(attributes); + + this.id = id; + this.attributes = attributes; + this.relationships = relationships; + + if (ttlSeconds > 0) { + Long cacheExpiry = clock.millis() + ttlSeconds * 1000; + this.attributes.put("cacheExpiry", cacheExpiry); + } + + if (ttlSeconds < 0 && attributes.containsKey("cacheExpiry")) { + ttlSeconds = (int) (clock.millis() - (long) attributes.get("cacheExpiry")) * -1 / 1000; + } + + this.ttlSeconds = ttlSeconds; + } + + @Override + public String getId() { + return id; + } + + @Override + public int getTtlSeconds() { + return ttlSeconds; + } + + @Override + public Map getAttributes() { + return attributes; + } + + @Override + public Map> getRelationships() { + return relationships; + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/UnsupportedCacheMethodException.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/UnsupportedCacheMethodException.java new file mode 100644 index 00000000000..b96a2f5a4d8 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/cache/UnsupportedCacheMethodException.java @@ -0,0 +1,10 @@ +package com.netflix.spinnaker.cats.cache; + +import com.netflix.spinnaker.kork.annotations.Beta; + +@Beta +public class UnsupportedCacheMethodException extends RuntimeException { + public UnsupportedCacheMethodException(String message) { + super(message); + } +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/Provider.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/Provider.java similarity index 82% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/Provider.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/Provider.java index 97999e4f7bd..7a6610b97b3 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/Provider.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/Provider.java @@ -17,15 +17,13 @@ package com.netflix.spinnaker.cats.provider; import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.agent.CachingAgent; - +import com.netflix.spinnaker.kork.annotations.Beta; import java.util.Collection; -/** - * A Provider has many Agents. - */ +/** A Provider has many Agents. */ +@Beta public interface Provider { - String getProviderName(); + String getProviderName(); - Collection getAgents(); + Collection getAgents(); } diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCache.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCache.java new file mode 100644 index 00000000000..90b9e23a2c6 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCache.java @@ -0,0 +1,76 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.provider; + +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Collection; + +@Beta +public interface ProviderCache extends Cache { + /** + * * Used for storing the complete result set generated by a caching agent. Stored items not + * contained in cacheResult are evicted. + * + * @param source The calling caching agent. + * @param authoritativeTypes Authoritative cache results define graph vertices, informative + * results create edges. + * @param cacheResult The definitive set of results from source. + */ + void putCacheResult( + String source, Collection authoritativeTypes, CacheResult cacheResult); + + /** + * * Equivalent to putCacheResult but no evictions are processed, even if explicitly within + * cacheResult + * + * @param source The calling caching agent + * @param authoritativeTypes Authoritative cache results define graph vertices, informative + * results create edges. May only be of use with some ProviderCache implementations. + * @param cacheResult Results to store. Since evictions are bypassed, partial authoritative + * results are supported. + */ + void addCacheResult( + String source, Collection authoritativeTypes, CacheResult cacheResult); + + /** + * * Add or update a single authoritative resource. + * + * @param type The calling caching agent. + * @param cacheData Item to store. + */ + void putCacheData(String type, CacheData cacheData); + + /** + * * Multi-get all identifiers of the given type. + * + * @param type All identifiers must be of this resource type. + * @param identifiers The identifiers to fetch. + * @return + */ + Collection getAll(String type, Collection identifiers); + + /** + * * Delete stored identifiers of the given type. + * + * @param type All identifiers must be of this resource type. + * @param ids The identifiers to delete. + */ + void evictDeletedItems(String type, Collection ids); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCacheConfiguration.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCacheConfiguration.java new file mode 100644 index 00000000000..a2c041debec --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderCacheConfiguration.java @@ -0,0 +1,32 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.cats.provider; + +/** Allows cache configuration to be overridden per-provider. */ +public interface ProviderCacheConfiguration { + /** + * By default a safe guard exists preventing removal of the last cached resource, even if it no + * longer exists in the underlying provider. + * + *

This allows a provider to opt out of this safe guard and allow full cache eviction. + * + * @return true if provider cache should support full eviction, false otherwise. + */ + default boolean supportsFullEviction() { + return false; + } +} diff --git a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderRegistry.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderRegistry.java similarity index 77% rename from cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderRegistry.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderRegistry.java index 6958076e6d1..90098afa8ed 100644 --- a/cats/cats-core/src/main/java/com/netflix/spinnaker/cats/provider/ProviderRegistry.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/cats/provider/ProviderRegistry.java @@ -17,17 +17,18 @@ package com.netflix.spinnaker.cats.provider; import com.netflix.spinnaker.cats.cache.Cache; - +import com.netflix.spinnaker.kork.annotations.Beta; import java.util.Collection; /** - * A ProviderRegistry has multiple providers, and provides access to the - * ProviderCaches for each provider. + * A ProviderRegistry has multiple providers, and provides access to the ProviderCaches for each + * provider. */ +@Beta public interface ProviderRegistry { - Collection getProviders(); + Collection getProviders(); - Collection getProviderCaches(); + Collection getProviderCaches(); - ProviderCache getProviderCache(String providerName); + ProviderCache getProviderCache(String providerName); } diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataInput.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataInput.java new file mode 100644 index 00000000000..8e0363b628b --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataInput.java @@ -0,0 +1,22 @@ +package com.netflix.spinnaker.clouddriver.aws.userdata; + +import lombok.Builder; +import lombok.Value; + +/** Input object when providing or tokenizing user data. */ +@Builder +@Value +public class UserDataInput { + String asgName; + String launchSettingName; + String region; + String account; + String environment; + String accountType; + Boolean launchTemplate; + Boolean legacyUdf; + UserDataOverride userDataOverride; + String base64UserData; + String iamRole; + String imageId; +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataOverride.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataOverride.java new file mode 100644 index 00000000000..1a1840f72c5 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataOverride.java @@ -0,0 +1,18 @@ +package com.netflix.spinnaker.clouddriver.aws.userdata; + +import javax.annotation.Nonnull; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * If enabled, overrides the default Spinnaker user data (represented via implementations of {@link + * UserDataProvider}. + */ +@Data +@NoArgsConstructor +public class UserDataOverride { + private boolean enabled; + + /** Identifies the implementation of {@link UserDataTokenizer} to use. */ + @Nonnull private String tokenizerName = "default"; +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataProvider.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataProvider.java new file mode 100644 index 00000000000..0126e9aed62 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataProvider.java @@ -0,0 +1,18 @@ +package com.netflix.spinnaker.clouddriver.aws.userdata; + +/** + * Implementations of this interface will provide user data to instances during the deployment + * process. + */ +public interface UserDataProvider { + + /** + * Provide user data from the specified request. + * + * @param userDataInput {@link UserDataInput} + * @return String + */ + default String getUserData(UserDataInput userDataInput) { + return ""; + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataTokenizer.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataTokenizer.java new file mode 100644 index 00000000000..f4308997b42 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/aws/userdata/UserDataTokenizer.java @@ -0,0 +1,31 @@ +package com.netflix.spinnaker.clouddriver.aws.userdata; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.kork.plugins.api.internal.SpinnakerExtensionPoint; + +/** Utility interface to replace tokens in user data templates. */ +public interface UserDataTokenizer extends SpinnakerExtensionPoint { + + /** + * If this instance supports the specified tokenizer. + * + * @param tokenizerName - the tokenizer the instance supports. The default tokenizer is "default" + * and is found first if multiple "default" supporting user data tokenizers are found. + * @return boolean + */ + default boolean supports(String tokenizerName) { + return tokenizerName.equals("default"); + } + + /** + * Replaces the tokens that are present in the supplied user data. + * + * @param names {@link Names} + * @param userDataInput {@link UserDataInput} + * @param rawUserData The user data to replace tokens in + * @param legacyUdf + * @return String + */ + String replaceTokens( + Names names, UserDataInput userDataInput, String rawUserData, Boolean legacyUdf); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/KeyParser.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/KeyParser.java new file mode 100644 index 00000000000..cd9fadec298 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/KeyParser.java @@ -0,0 +1,78 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Map; + +@Beta +public interface KeyParser { + + /** + * Returns the parsed property name for the specified cache that represents the + * "name" of the item being parsed. + * + *

For example, + * + *

Given the AWS key + * serverGroups:mycluster-stack-detail:some-account:some-region:myservergroup-stack-detail-v000 + * , we might store the server group name (the last part of the key) under a different + * property than name, e.g., serverGroup, in which case the mapping of + * Namespace.SERVER_GROUPS.ns to "serverGroup" would be needed. + * + * @param cache the name of the cache (key type) being parsed + * @return the mapping of the key name to the actual key property name for the specified + * cache or null if no mapping exists or is required (e.g., if the parsed + * key already contains a name property and it maps correctly). + */ + default String getNameMapping(String cache) { + return null; + } + + /** + * Indicates which provider this particular parser handles + * + * @return the cloud provider ID + */ + String getCloudProvider(); + + /** + * Parses the supplied key to an arbitrary Map of attributes + * + * @param key the full key + * @return a Map of the key attributes + */ + Map parseKey(String key); + + /** + * indicates whether this parser can parse the supplied type + * + * @param type the entity type, typically corresponding to a value in the implementing class's + * Namespace + * @return true if it can parse this type, false otherwise + */ + Boolean canParseType(String type); + + /** + * indicates whether this parser can parse the supplied field + * + * @param field the entity type field, typically corresponding to a value in the implementing + * class's parsed Namespace field + * @return true if it can parse this field, false otherwise + */ + Boolean canParseField(String field); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandAgent.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandAgent.java new file mode 100644 index 00000000000..86aeec76c9d --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandAgent.java @@ -0,0 +1,102 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.kork.annotations.Beta; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.Nullable; +import lombok.Data; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Beta +public interface OnDemandAgent { + Logger logger = LoggerFactory.getLogger(OnDemandAgent.class); + + String getProviderName(); + + String getOnDemandAgentType(); + + // TODO(ttomsu): This seems like it should go in a different interface. + OnDemandMetricsSupportable getMetricsSupport(); + + boolean handles(OnDemandType type, String cloudProvider); + + @Data + class OnDemandResult { + String sourceAgentType; + Collection authoritativeTypes = new ArrayList<>(); + CacheResult cacheResult; + Map> evictions = new HashMap<>(); + + public OnDemandResult() {} + + public OnDemandResult( + String sourceAgentType, + CacheResult cacheResult, + Map> evictions) { + this.sourceAgentType = sourceAgentType; + this.cacheResult = cacheResult; + this.evictions = evictions; + } + } + + /* + * WARNING: this is an interim solution while cloud providers write their own ways to derive monikers. + */ + default Moniker convertOnDemandDetails(Map details) { + if (details == null || details.isEmpty()) { + return null; + } + + try { + String sequence = details.get("sequence"); + + return Moniker.builder() + .app(details.get("application")) + .stack(details.get("stack")) + .detail(details.get("detail")) + .cluster(details.get("cluster")) + .sequence(sequence != null ? Integer.valueOf(sequence) : null) + .build(); + } catch (Exception e) { + logger.warn("Unable to build moniker", e); + return null; + } + } + + @Nullable + OnDemandResult handle(ProviderCache providerCache, Map data); + + Collection> pendingOnDemandRequests(ProviderCache providerCache); + + default Map pendingOnDemandRequest(ProviderCache providerCache, String id) { + Collection> pendingOnDemandRequests = + pendingOnDemandRequests(providerCache); + return pendingOnDemandRequests.stream() + .filter(m -> id.equals(m.get("id"))) + .findFirst() + .orElse(null); + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupportable.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupportable.java new file mode 100644 index 00000000000..fd50a2b7838 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupportable.java @@ -0,0 +1,65 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.function.Supplier; + +@Beta +public interface OnDemandMetricsSupportable { + String ON_DEMAND_TOTAL_TIME = "onDemand_total"; + String DATA_READ = "onDemand_read"; + String DATA_TRANSFORM = "onDemand_transform"; + String ON_DEMAND_STORE = "onDemand_store"; + String CACHE_WRITE = "onDemand_cache"; + String CACHE_EVICT = "onDemand_evict"; + String ON_DEMAND_ERROR = "onDemand_error"; + String ON_DEMAND_COUNT = "onDemand_count"; + + T readData(Supplier closure); + + T transformData(Supplier closure); + + T onDemandStore(Supplier closure); + + T cacheWrite(Supplier closure); + + default void cacheWrite(Runnable closure) { + cacheWrite( + () -> { + closure.run(); + return null; + }); + } + + T cacheEvict(Supplier closure); + + default void cacheEvict(Runnable closure) { + cacheEvict( + () -> { + closure.run(); + return null; + }); + } + + void countError(); + + void countOnDemand(); + + void recordTotalRunTimeNanos(long nanos); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandType.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandType.java new file mode 100644 index 00000000000..ef8bc540b05 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandType.java @@ -0,0 +1,59 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Objects; +import lombok.Data; + +@Beta +@Data +public class OnDemandType { + private final String value; + + public static OnDemandType fromString(String value) { + return new OnDemandType(value); + } + + public static final OnDemandType ServerGroup = new OnDemandType("ServerGroup"); + public static final OnDemandType SecurityGroup = new OnDemandType("SecurityGroup"); + public static final OnDemandType LoadBalancer = new OnDemandType("LoadBalancer"); + public static final OnDemandType Job = new OnDemandType("Job"); + public static final OnDemandType TargetGroup = new OnDemandType("TargetGroup"); + public static final OnDemandType CloudFormation = new OnDemandType("CloudFormation"); + public static final OnDemandType Manifest = new OnDemandType("Manifest"); + public static final OnDemandType Function = new OnDemandType("Function"); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + OnDemandType that = (OnDemandType) o; + return Objects.equals(value.toLowerCase(), that.value.toLowerCase()); + } + + @Override + public int hashCode() { + return Objects.hash(value.toLowerCase()); + } + + @Override + public String toString() { + return value; + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/SearchableProvider.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/SearchableProvider.java new file mode 100644 index 00000000000..16e17bc47eb --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/cache/SearchableProvider.java @@ -0,0 +1,113 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.provider.Provider; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Collection; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Beta +public interface SearchableProvider extends Provider { + + /** Names of caches to search by default */ + Set getDefaultCaches(); + + /** + * Map keyed by named cache to a template that produces a url for a search result. + * + *

The template will be supplied the result from calling parseKey on the search key + */ + Map getUrlMappingTemplates(); + + /** SearchResultHydrators for cache types */ + Map getSearchResultHydrators(); + + /** The parts of the key, if this Provider supports keys of this type, otherwise null. */ + Map parseKey(String key); + + default Optional getKeyParser() { + return Optional.empty(); + } + + /** + * Build a search term for querying. + * + *

If this SearchableProvider supplies a KeyParser then the search term is scoped to that + * KeyParsers cloudProvider, otherwise injects a wildcard glob at the start. + * + *

Supplying a KeyParser to provide a CloudProviderId to scope the search more narrowly results + * in improved search performance. + */ + default String buildSearchTerm(String type, String queryTerm) { + String prefix = getKeyParser().map(KeyParser::getCloudProvider).orElse("*"); + return prefix + ":" + type + ":*" + queryTerm + "*"; + } + + default boolean supportsSearch(String type, Map filters) { + final boolean filterMatch; + if (filters == null || !filters.containsKey("cloudProvider")) { + filterMatch = true; + } else { + filterMatch = + getKeyParser() + .map( + kp -> + kp.canParseType(type) + && kp.getCloudProvider().equals(filters.get("cloudProvider"))) + .orElse(true); + } + + return filterMatch && hasAgentForType(type, getAgents()); + } + + static boolean hasAgentForType(String type, Collection agents) { + return agents.stream() + .filter(CachingAgent.class::isInstance) + .map(CachingAgent.class::cast) + .anyMatch( + ca -> + ca.getProvidedDataTypes().stream().anyMatch(pdt -> pdt.getTypeName().equals(type))); + } + + /** + * A SearchResultHydrator provides a custom strategy for enhancing result data for a particular + * cache type. + */ + public static interface SearchResultHydrator { + Map hydrateResult(Cache cacheView, Map result, String id); + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class SearchableResource { + /** Lowercase name of a resource type. e.g. 'instances', 'load_balancers' */ + String resourceType; + + /** Lowercase name of the platform. e.g. 'aws', 'gce' */ + String platform; + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/core/CloudProvider.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/core/CloudProvider.java new file mode 100644 index 00000000000..5ea2c2a489c --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/core/CloudProvider.java @@ -0,0 +1,53 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.core; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.lang.annotation.Annotation; + +/** + * Different cloud providers (AWS, GCE, Titus, etc.) should implement this interface and annotate + * different implementations with annotation class indicated by {@code getAnnotation} method to + * identify the cloud provider specific implementations + */ +@Beta +public interface CloudProvider { + /** + * A unique string that identifies the cloud provider implementation + * + * @return + */ + String getId(); + + /** + * Display name or simply the name for the cloud provider. Use {@code getID()} for uniqueness + * constraints instead of this method + * + * @return + */ + String getDisplayName(); + + /** + * Annotation type that can be assigned to the implementations for operations, converters, + * validators, etc. to enable lookup based on the operation description name and cloud provider + * type + * + * @return + */ + Class getOperationAnnotationType(); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidator.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidator.java new file mode 100644 index 00000000000..7ae7bec16a5 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidator.java @@ -0,0 +1,43 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.deploy; + +import com.netflix.spinnaker.clouddriver.orchestration.VersionedCloudProviderOperation; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.List; + +@Beta +public abstract class DescriptionValidator implements VersionedCloudProviderOperation { + private static final String VALIDATOR_SUFFIX = "Validator"; + + public static String getValidatorName(String description) { + return description + VALIDATOR_SUFFIX; + } + + public static String getOperationName(String validator) { + if (validator == null || validator.length() == 0) { + return validator; + } + if (validator.endsWith(VALIDATOR_SUFFIX)) { + return validator.substring(0, validator.length() - VALIDATOR_SUFFIX.length()); + } + return validator; + } + + public abstract void validate(List priorDescriptions, T description, ValidationErrors errors); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/GlobalDescriptionValidator.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/GlobalDescriptionValidator.java new file mode 100644 index 00000000000..87789fb2dd6 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/GlobalDescriptionValidator.java @@ -0,0 +1,50 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.deploy; + +import com.netflix.spinnaker.kork.annotations.Beta; +import com.netflix.spinnaker.kork.plugins.api.internal.SpinnakerExtensionPoint; +import java.util.List; + +/** + * An extension point for adding additional validators for AtomicOperations. Can be used globally to + * add the same validation to all operations or can be narrowed to a subset of operations. + * + *

{@code OperationsService} calls out to {@code AnnotationsBasedAtomicOperationsRegistry} to + * fetch the validator for each atomic operation description. {@code + * AnnotationsBasedAtomicOperationsRegistry} contains an autowired List of {@code + * GlobalDescriptionValidator}s, which will pull in any bean that implements this interface. It then + * returns a {@code CompositeDescriptionValidator} to that will call {@code validate()} on the + * original atomic operation description validator and all {@code GlobalDescriptionValidator}s. + */ +@Beta +public interface GlobalDescriptionValidator extends SpinnakerExtensionPoint { + /** + * Whether or not the validator should be applied to the passed-in {@code OperationDescription}. + * This should return true if the validator should be applied to all operations. + * + * @return true if this validator should be applied, false otherwise + */ + boolean handles(T description); + + /** + * Validates the {@code description} and adds any validation errors to the {@code errors} + * parameter. + */ + void validate( + String operationName, List priorDescriptions, T description, ValidationErrors errors); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/ValidationErrors.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/ValidationErrors.java new file mode 100644 index 00000000000..b82e6c91b48 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/deploy/ValidationErrors.java @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.deploy; + +import com.netflix.spinnaker.kork.annotations.Beta; + +@Beta +public interface ValidationErrors { + + void reject(String errorCode); + + void reject(String errorCode, String defaultMessage); + + void reject(String errorCode, Object[] errorArgs, String defaultMessage); + + void rejectValue(String field, String errorCode); + + void rejectValue(String field, String errorCode, String defaultMessage); + + void rejectValue(String field, String errorCode, Object[] errorArgs, String defaultMessage); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperation.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperation.java new file mode 100644 index 00000000000..9f8dbef9407 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperation.java @@ -0,0 +1,45 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.orchestration; + +import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * An AtomicOperation is the most fundamental, low-level unit of work in a workflow. Implementations + * of this interface should perform the simplest form of work possible, often described by a + * description object. + */ +@Beta +public interface AtomicOperation { + /** + * This method will initiate the operation's work. In this, operation's can get a handle on prior + * output results from the required method argument. + * + * @param priorOutputs + * @return parameterized type + */ + R operate(List priorOutputs); + + default Collection getEvents() { + return Collections.emptyList(); + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverter.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverter.java new file mode 100644 index 00000000000..50256e6a29a --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.orchestration; + +import com.netflix.spinnaker.kork.annotations.Beta; +import com.netflix.spinnaker.orchestration.OperationDescription; +import java.util.Map; +import javax.annotation.Nullable; + +/** + * Implementations of this trait will provide an object capable of converting a Map of input + * parameters to an operation's description object and an {@link AtomicOperation} instance. + */ +@Beta +public interface AtomicOperationConverter extends VersionedCloudProviderOperation { + /** + * This method takes a Map input and converts it to an {@link AtomicOperation} instance. + * + * @param input + * @return atomic operation + */ + @Nullable + AtomicOperation convertOperation(Map input); + + /** + * This method takes a Map input and creates a description object, that will often be used by an + * {@link AtomicOperation}. + * + * @param input + * @return instance of an operation description object + */ + OperationDescription convertDescription(Map input); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/VersionedCloudProviderOperation.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/VersionedCloudProviderOperation.java new file mode 100644 index 00000000000..9feeaf51e86 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/VersionedCloudProviderOperation.java @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.orchestration; + +import com.netflix.spinnaker.kork.annotations.Beta; +import javax.annotation.Nullable; + +@Beta +public interface VersionedCloudProviderOperation { + /** + * Allows individual operations to be versioned. + * + *

A {@code version} may be null if no specific version is requested, which is the common case. + * + * @param version The operation version requested by the client + * @return true if the operation works with the provided version + */ + default boolean acceptsVersion(@Nullable String version) { + return true; + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEvent.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEvent.java similarity index 93% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEvent.java rename to clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEvent.java index 54cde0e9626..94325436c86 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEvent.java +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEvent.java @@ -16,8 +16,12 @@ package com.netflix.spinnaker.clouddriver.orchestration.events; +import com.netflix.spinnaker.kork.annotations.Beta; + +@Beta public interface OperationEvent { Type getType(); + Action getAction(); String getCloudProvider(); diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccessControlledAccountDefinition.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccessControlledAccountDefinition.java new file mode 100644 index 00000000000..22f1bb3f633 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccessControlledAccountDefinition.java @@ -0,0 +1,31 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.fiat.model.Authorization; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nonnull; + +/** Defines permissions on account credential definitions. */ +@Beta +public interface AccessControlledAccountDefinition extends CredentialsDefinition { + @Nonnull + Map> getPermissions(); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentials.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentials.java new file mode 100644 index 00000000000..d9e10b80e09 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentials.java @@ -0,0 +1,108 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.credentials.Credentials; +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Collections; +import java.util.List; + +/** + * Implementations of this interface will provide properties specific to a named account object, + * with capability to retrieve a type of credential object (such as AWSCredentials or + * GoogleCredentials). + * + * @param - type of credential object to be returned + */ +@Beta +public interface AccountCredentials extends Credentials { + /** + * Provides the name of the account to be returned. + * + *

Uniquely identifies the account. + * + * @return the name of the account + */ + String getName(); + + /** + * Provides the environment name for the account. + * + *

Many accounts can share the same environment (e.g. dev, test, prod) + * + * @return the Environment name + */ + String getEnvironment(); + + /** + * Provides the type for the account. + * + *

Account type is typically consistent among the set of credentials that represent a related + * set of environments. + * + *

e.g.: + * + *

    + *
  • account name: maindev, environment: dev, accountType: main + *
  • account name: maintest, environment: test, accountType: main + *
  • account name: mainprod, environment: prod, accountType: main + *
+ * + * @return the type for the account. + */ + String getAccountType(); + + /** @return the id for the account (may be null if not supported by underlying cloud provider) */ + default String getAccountId() { + return null; + } + + /** + * Returns an associated credentials object, which may be lazily initialized based off of some + * detail encapsulated within the implementation (like environment or keys, etc) + * + * @return typed credentials object + */ + T getCredentials(); + + /** + * Provides the name of the cloud provider. Typically something like 'aws', 'gce' or 'docker'. + * + * @return the name of the cloud provider + */ + String getCloudProvider(); + + @Override + default String getType() { + return getCloudProvider(); + } + + default boolean isEnabled() { + return true; + } + + /** + * A user in ANY required group should be allowed access to this account. + * + * @return the group names that govern access to this account, empty indicates a public account + * accessible by all. + */ + @Deprecated + default List getRequiredGroupMembership() { + return Collections.emptyList(); + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentialsProvider.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentialsProvider.java new file mode 100644 index 00000000000..a05803f1903 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentialsProvider.java @@ -0,0 +1,44 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Set; + +/** + * Implementations of this interface will provide a mechanism to store and retrieve {@link + * AccountCredentials} objects. For manipulating the backing of this provider, consumers of this API + * should get access to its corresponding {@link AccountCredentialsRepository} + */ +@Beta +public interface AccountCredentialsProvider { + + /** + * Returns all of the accounts known to the repository of this provider. + * + * @return a set of account names + */ + Set getAll(); + + /** + * Returns a specific {@link AccountCredentials} object a specified name + * + * @param name the name of the account + * @return account credentials object + */ + AccountCredentials getCredentials(String name); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentialsRepository.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentialsRepository.java new file mode 100644 index 00000000000..d3c30280e27 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountCredentialsRepository.java @@ -0,0 +1,70 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.kork.annotations.Beta; +import java.util.Set; + +/** + * Represents a repository for CRUD operations pertaining to {@link AccountCredentials}. May be + * required by the {@link AccountCredentialsProvider} to get a handle on credentials objects. + * Consumers should use this repository interface for manipulating the backing of the provider. + */ +@Beta +public interface AccountCredentialsRepository { + + /** + * Returns a single {@link AccountCredentials} object, referenced by the specified name + * + * @param key the key to retrieve from the repository + * @return account credentials + */ + AccountCredentials getOne(String key); + + /** + * Returns all {@link AccountCredentials} objects known to this repository + * + * @return a set of account credentials + */ + Set getAll(); + + /** + * Stores an {@link AccountCredentials} object at this repository. This is an identify function. + * + * @param key the key to associate with this account credentials object + * @param credentials account credentials object to save + * @return input + */ + AccountCredentials save(String key, AccountCredentials credentials); + + /** + * Indicates that the keyed reference should be updated with the provided {@link + * AccountCredentials} object. This is an identify function. + * + * @param key the key to associate with this account credentials object + * @param credentials account credentials object to associate with the provided key + * @return input + */ + AccountCredentials update(String key, AccountCredentials credentials); + + /** + * Should remove the keyed reference from the repository + * + * @param key ref to be removed + */ + void delete(String key); +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionRepository.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionRepository.java new file mode 100644 index 00000000000..0a025fdd9c5 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionRepository.java @@ -0,0 +1,151 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.kork.annotations.Beta; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.List; +import javax.annotation.Nullable; + +/** Provides CRUD persistence operations for account {@link CredentialsDefinition} instances. */ +@Beta +@NonnullByDefault +public interface AccountDefinitionRepository { + /** + * Looks up an account definition using the account name. + * + * @param name account name to look up + * @return the configured account definition or null if none is found + */ + @Nullable + CredentialsDefinition getByName(String name); + + /** + * Lists account definitions for a given account type. This API allows for infinite scrolling + * style pagination using the {@code limit} and {@code startingAccountName} parameters. + * + * @param typeName account type to search for (the value of the @JsonTypeName annotation on the + * corresponding CredentialsDefinition class + * @param limit max number of entries to return + * @param startingAccountName where to begin results list if specified or start at the beginning + * if null + * @return list of stored account definitions matching the given account type name (sorted + * alphabetically) + */ + List listByType( + String typeName, int limit, @Nullable String startingAccountName); + + /** + * Lists account definitions for a given account type. Account types correspond to the value in + * the {@code @JsonTypeName} annotation present on the corresponding {@code CredentialsDefinition} + * class. + * + * @param typeName account type to search for + * @return list of all stored account definitions matching the given account type name + */ + List listByType(String typeName); + + /** + * Creates a new account definition using the provided data. Secrets should use {@code + * UserSecretReference} encrypted URIs (e.g., {@code + * secret://secrets-manager?r=us-west-2&s=my-account-credentials}) when the underlying storage + * provider does not support row-level encryption or equivalent security features. Encrypted URIs + * will only be decrypted when loading account definitions, not when storing them. Note that + * account definitions correspond to the JSON representation of the underlying {@link + * CredentialsDefinition} object along with a JSON type discriminator field with the key {@code + * type} and value of the corresponding {@code @JsonTypeName} annotation. Note that in addition to + * user secret URIs, traditional {@code EncryptedSecret} URIs (like {@code + * encrypted:secrets-manager!r:us-west-2!s:my-account-credentials}) are also supported when + * Clouddriver is configured with global secret engines. + * + * @param definition account definition to store as a new account + */ + void create(CredentialsDefinition definition); + + /** + * Creates or updates an account definition using the provided data. This is also known as an + * upsert operation. See {@link #create(CredentialsDefinition)} for more details. + * + * @param definition account definition to save + */ + void save(CredentialsDefinition definition); + + /** + * Updates an existing account definition using the provided data. See details in {@link + * #create(CredentialsDefinition)} for details on the format. + * + * @param definition updated account definition to replace an existing account + * @see #create(CredentialsDefinition) + */ + void update(CredentialsDefinition definition); + + /** + * Deletes an account by name. + * + * @param name name of account to delete + */ + void delete(String name); + + /** + * Looks up the revision history of an account given its name. Revisions are sorted by latest + * version first. + * + * @param name account name to look up history for + * @return history of account updates for the given account name + */ + List revisionHistory(String name); + + /** + * Provides metadata for an account definition revision when making updates to an account via + * {@link AccountDefinitionRepository} APIs. + */ + class Revision { + private final int version; + private final long timestamp; + private final @Nullable CredentialsDefinition account; + + /** Constructs a revision entry with a version and account definition. */ + public Revision(int version, long timestamp, @Nullable CredentialsDefinition account) { + this.version = version; + this.timestamp = timestamp; + this.account = account; + } + + /** Returns the version number of this revision. Versions start at 1 and increase from there. */ + public int getVersion() { + return version; + } + + /** + * Returns the timestamp (in millis since the epoch) corresponding to when this revision was + * made. + */ + public long getTimestamp() { + return timestamp; + } + + /** + * Returns the account definition used in this revision. Returns {@code null} when this revision + * corresponds to a deletion. + */ + @Nullable + public CredentialsDefinition getAccount() { + return account; + } + } +} diff --git a/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionTypeProvider.java b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionTypeProvider.java new file mode 100644 index 00000000000..5dd3f77f081 --- /dev/null +++ b/clouddriver-api/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionTypeProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.kork.annotations.Beta; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Map; + +/** + * Provides subtypes of {@link CredentialsDefinition} for registration as account definition types. + * All beans of this type contribute zero or more account classes. + */ +@Beta +@NonnullByDefault +public interface AccountDefinitionTypeProvider { + Map> getCredentialsTypes(); +} diff --git a/clouddriver-appengine/clouddriver-appengine.gradle b/clouddriver-appengine/clouddriver-appengine.gradle index 17a1d2a291b..993614097ea 100644 --- a/clouddriver-appengine/clouddriver-appengine.gradle +++ b/clouddriver-appengine/clouddriver-appengine.gradle @@ -1,16 +1,47 @@ dependencies { - compile project(":clouddriver-artifacts") - compile project(":clouddriver-core") - compile project(":clouddriver-google-common") - compile spinnaker.dependency("frigga") - compile spinnaker.dependency("bootActuator") - compile spinnaker.dependency("bootWeb") - compile spinnaker.dependency("korkArtifacts") + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-artifacts") + implementation project(":clouddriver-core") + implementation project(":clouddriver-google-common") + implementation project(":clouddriver-security") - // TODO(dpeach): move to spinnaker/spinnaker-dependencies. - compile "com.google.apis:google-api-services-appengine:v1-rev32-1.23.0" - compile "org.eclipse.jgit:org.eclipse.jgit:4.5.0.201609210915-r" + implementation "com.google.apis:google-api-services-appengine" + implementation "com.google.apis:google-api-services-storage" + implementation 'com.google.auth:google-auth-library-oauth2-http' + implementation "com.jcraft:jsch" + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-config" + implementation "io.spinnaker.kork:kork-cloud-config-server" + implementation "io.spinnaker.kork:kork-moniker" + implementation "io.spinnaker.kork:kork-retrofit" + implementation "com.jakewharton.retrofit:retrofit1-okhttp3-client" + implementation "com.netflix.spectator:spectator-api" + implementation "com.squareup.retrofit:converter-jackson" + implementation "com.squareup.retrofit:retrofit" + implementation "commons-io:commons-io" + implementation "org.apache.commons:commons-compress:1.21" + implementation "org.apache.groovy:groovy" + implementation "org.apache.groovy:groovy-json" + implementation "org.eclipse.jgit:org.eclipse.jgit.ssh.jsch:5.13.1.202206130422-r" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.springframework.cloud:spring-cloud-context" + implementation "org.springframework.cloud:spring-cloud-config-server" + + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation "org.assertj:assertj-core" + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.mockito:mockito-core" - compile spinnaker.dependency("googleStorage") - compile "org.apache.commons:commons-compress:1.14" } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/AppengineJobExecutor.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/AppengineJobExecutor.groovy index 3c47929aa39..a4e6f385c26 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/AppengineJobExecutor.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/AppengineJobExecutor.groovy @@ -18,38 +18,25 @@ package com.netflix.spinnaker.clouddriver.appengine import com.netflix.spinnaker.clouddriver.jobs.JobExecutor import com.netflix.spinnaker.clouddriver.jobs.JobRequest -import com.netflix.spinnaker.clouddriver.jobs.JobStatus +import com.netflix.spinnaker.clouddriver.jobs.JobResult import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Value import org.springframework.stereotype.Component @Component class AppengineJobExecutor { - @Value('${appengine.jobSleepMs:1000}') + @Value('${appengine.job-sleep-ms:1000}') Long sleepMs @Autowired JobExecutor jobExecutor void runCommand(List command) { - String jobId = jobExecutor.startJob(new JobRequest(tokenizedCommand: command), - System.getenv(), - new ByteArrayInputStream()) - waitForJobCompletion(jobId) - } - - void waitForJobCompletion(String jobId) { - sleep(sleepMs) - JobStatus jobStatus = jobExecutor.updateJob(jobId) - while (jobStatus != null && jobStatus.state == JobStatus.State.RUNNING) { - sleep(sleepMs) - jobStatus = jobExecutor.updateJob(jobId) - } - if (jobStatus == null) { - throw new RuntimeException("job timed out or was cancelled") - } - if (jobStatus.result == JobStatus.Result.FAILURE && jobStatus.stdOut) { - throw new IllegalArgumentException("$jobStatus.stdOut + $jobStatus.stdErr") + JobResult jobStatus = jobExecutor.runJob(new JobRequest(command)) + if (jobStatus.getResult() == JobResult.Result.FAILURE) { + String stdOut = jobStatus.getOutput() + String stdErr = jobStatus.getError() + throw new IllegalArgumentException("stdout: $stdOut, stderr: $stdErr") } } } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/config/AppengineConfigurationProperties.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/config/AppengineConfigurationProperties.groovy deleted file mode 100644 index 808fdadd830..00000000000 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/config/AppengineConfigurationProperties.groovy +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.appengine.config - -import com.netflix.spinnaker.clouddriver.appengine.AppengineJobExecutor -import com.netflix.spinnaker.clouddriver.googlecommon.config.GoogleCommonManagedAccount -import com.squareup.okhttp.OkHttpClient -import groovy.json.JsonSlurper -import groovy.transform.ToString -import retrofit.RestAdapter -import retrofit.client.OkClient -import retrofit.client.Response -import retrofit.http.GET -import retrofit.http.Headers -import retrofit.mime.TypedByteArray - -class AppengineConfigurationProperties { - @ToString(includeNames = true) - static class ManagedAccount extends GoogleCommonManagedAccount { - static final String metadataUrl = "http://metadata.google.internal/computeMetadata/v1" - - String serviceAccountEmail - String localRepositoryDirectory - String gitHttpsUsername - String gitHttpsPassword - String githubOAuthAccessToken - String sshPrivateKeyFilePath - String sshPrivateKeyPassphrase - String sshKnownHostsFilePath - boolean sshTrustUnknownHosts - GcloudReleaseTrack gcloudReleaseTrack - - void initialize(AppengineJobExecutor jobExecutor) { - if (this.jsonPath) { - jobExecutor.runCommand(["gcloud", "auth", "activate-service-account", "--key-file", this.jsonPath]) - - def accountJson = new JsonSlurper().parse(new File(this.jsonPath)) - this.project = this.project ?: accountJson["project_id"] - this.serviceAccountEmail = this.serviceAccountEmail ?: accountJson["client_email"] - } else { - def metadataService = createMetadataService() - - try { - this.project = responseToString(metadataService.getProject()) - this.serviceAccountEmail = responseToString(metadataService.getApplicationDefaultServiceAccountEmail()) - } catch (e) { - throw new RuntimeException("Could not find application default credentials for App Engine.", e) - } - } - } - - static MetadataService createMetadataService() { - RestAdapter restAdapter = new RestAdapter.Builder() - .setEndpoint(metadataUrl) - .setClient(new OkClient(new OkHttpClient(retryOnConnectionFailure: true))) - .build() - - return restAdapter.create(MetadataService.class) - } - - static interface MetadataService { - @Headers("Metadata-Flavor: Google") - @GET("/project/project-id") - Response getProject() - - @Headers("Metadata-Flavor: Google") - @GET("/instance/service-accounts/default/email") - Response getApplicationDefaultServiceAccountEmail() - } - - static String responseToString(Response response) { - new String(((TypedByteArray) response.body).bytes) - } - - static enum GcloudReleaseTrack { - ALPHA, - BETA, - STABLE, - } - } - - List accounts = [] -} diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineMutexRepository.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineMutexRepository.groovy deleted file mode 100644 index 1e2f7284c2c..00000000000 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineMutexRepository.groovy +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.appengine.deploy - -import java.util.concurrent.Semaphore - -class AppengineMutexRepository { - static HashMap mutexRepository = new HashMap<>() - - static T atomicWrapper(String mutexKey, Closure doOperation) { - if (!mutexRepository.containsKey(mutexKey)) { - mutexRepository.put(mutexKey, new Mutex()) - } - Mutex mutex = mutexRepository.get(mutexKey) - - // Outside the try {} block, because in the case of an exception being thrown here, we don't want to try unlocking - // the mutex in the finally block. - mutex.lock() - T result = null - Exception failure - try { - result = doOperation() - } catch (Exception e) { - failure = e - } finally { - mutex.unlock() - if (failure) { - throw failure - } else { - return result - } - } - } - - static class Mutex { - Semaphore sem - - void lock() { - if (sem == null) { - sem = new Semaphore(1) - } - sem.acquire() - } - - void unlock() { - if (sem == null) { - throw new IllegalStateException("Attempt made to unlock mutex that was never locked") - } - sem.release() - } - } -} - diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineSafeRetry.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineSafeRetry.groovy deleted file mode 100644 index 6f244182c89..00000000000 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineSafeRetry.groovy +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.appengine.deploy - -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.appengine.deploy.exception.AppengineOperationException -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleCommonSafeRetry -import org.springframework.beans.factory.annotation.Value -import org.springframework.stereotype.Component - -@Component -class AppengineSafeRetry extends GoogleCommonSafeRetry { - @Value('${appengine.safeRetryMaxWaitIntervalMs:60000}') - Long maxWaitInterval - - @Value('${appengine.safeRetryRetryIntervalBaseSec:2}') - Long retryIntervalBase - - @Value('${appengine.safeRetryJitterMultiplier:1000}') - Long jitterMultiplier - - @Value('${appengine.safeRetryMaxRetries:10}') - Long maxRetries - - public Object doRetry(Closure operation, - String resource, - Task task, - List retryCodes, - List successfulErrorCodes, - Map tags, - Registry registry) { - return super.doRetry( - operation, - resource, - task, - retryCodes, - successfulErrorCodes, - maxWaitInterval, - retryIntervalBase, - jitterMultiplier, - maxRetries, - tags, - registry - ) - } - - @Override - AppengineOperationException providerOperationException(String message) { - new AppengineOperationException(message) - } -} diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineUtils.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineUtils.groovy index a9f4d2b504e..7508f2dacf1 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineUtils.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineUtils.groovy @@ -33,6 +33,8 @@ class AppengineUtils { task.updateStatus phase, "Querying all versions for project $project..." def services = queryAllServices(project, credentials, task, phase) + // TODO(jacobkiefer): Consider limiting batch sizes. + // https://github.com/spinnaker/spinnaker/issues/3564. BatchRequest batch = credentials.appengine.batch() def allVersions = [] diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/AppengineAtomicOperationConverterHelper.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/AppengineAtomicOperationConverterHelper.groovy index 783025a6245..db166abb29a 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/AppengineAtomicOperationConverterHelper.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/AppengineAtomicOperationConverterHelper.groovy @@ -17,13 +17,14 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.fasterxml.jackson.databind.DeserializationFeature +import com.netflix.spinnaker.clouddriver.appengine.deploy.description.AbstractAppengineCredentialsDescription import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter class AppengineAtomicOperationConverterHelper { - static Object convertDescription(Map input, - AbstractAtomicOperationsCredentialsSupport credentialsSupport, - Class targetDescriptionType) { + static T convertDescription(Map input, + AbstractAtomicOperationsCredentialsConverter credentialsSupport, + Class targetDescriptionType) { input.accountName = input.accountName ?: input.account ?: input.credentials if (input.accountName) { @@ -40,7 +41,7 @@ class AppengineAtomicOperationConverterHelper { .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) .convertValue(input, targetDescriptionType) - converted.credentials = credentials in AppengineNamedAccountCredentials ? credentials : null + converted.credentials = credentials as AppengineNamedAccountCredentials converted } } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeleteAppengineLoadBalancerAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeleteAppengineLoadBalancerAtomicOperationConverter.groovy index 8f3d7717fd4..ebfd6a761fb 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeleteAppengineLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeleteAppengineLoadBalancerAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeleteAppengineLoadBalancerDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DeleteAppengineLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.DELETE_LOAD_BALANCER) @Component -class DeleteAppengineLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DeleteAppengineLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new DeleteAppengineLoadBalancerAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineAtomicOperationConverter.groovy index 759253602a0..283482412b1 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineAtomicOperationConverter.groovy @@ -21,8 +21,10 @@ import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.exception.AppengineDescriptionConversionException import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DeployAppengineAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport import com.netflix.spinnaker.kork.artifacts.model.Artifact import org.springframework.beans.factory.annotation.Autowired @@ -32,7 +34,7 @@ import groovy.util.logging.Slf4j @AppengineOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component @Slf4j -class DeployAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DeployAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Autowired ObjectMapper objectMapper @@ -63,6 +65,10 @@ class DeployAppengineAtomicOperationConverter extends AbstractAtomicOperationsCr throw new AppengineDescriptionConversionException("Invalid artifact type for Appengine deploy: ${description.artifact.type}") } } + if (input.configArtifacts) { + def configArtifacts = input.configArtifacts + description.configArtifacts = configArtifacts.collect({ objectMapper.convertValue(it, Artifact) }) + } return description } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DestroyAppengineAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DestroyAppengineAtomicOperationConverter.groovy index 79936a51cf5..8decdf27ca9 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DestroyAppengineAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DestroyAppengineAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DestroyAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DestroyAppengineAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.DESTROY_SERVER_GROUP) @Component -class DestroyAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DestroyAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new DestroyAppengineAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DisableAppengineAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DisableAppengineAtomicOperationConverter.groovy index 81d2fd7edbb..21788837bb6 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DisableAppengineAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DisableAppengineAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.EnableDisableAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DisableAppengineAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.DISABLE_SERVER_GROUP) @Component -class DisableAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DisableAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new DisableAppengineAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/EnableAppengineAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/EnableAppengineAtomicOperationConverter.groovy index af3f8bd36e3..f8bde778b98 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/EnableAppengineAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/EnableAppengineAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.EnableDisableAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.EnableAppengineAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.ENABLE_SERVER_GROUP) @Component -class EnableAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class EnableAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new EnableAppengineAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StartAppengineAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StartAppengineAtomicOperationConverter.groovy index fa9046c74a9..be3d57ffc82 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StartAppengineAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StartAppengineAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.StartStopAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.StartAppengineAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.START_SERVER_GROUP) @Component -class StartAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class StartAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new StartAppengineAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StopAppengineAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StopAppengineAtomicOperationConverter.groovy index 059ec351911..83eaa64f5e0 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StopAppengineAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StopAppengineAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.StartStopAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.StopAppengineAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.STOP_SERVER_GROUP) @Component -class StopAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class StopAppengineAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new StopAppengineAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/TerminateAppengineInstancesAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/TerminateAppengineInstancesAtomicOperationConverter.groovy index fa24edb6e0c..da21322e47d 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/TerminateAppengineInstancesAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/TerminateAppengineInstancesAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.TerminateAppengineInstancesDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.TerminateAppengineInstancesAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.TERMINATE_INSTANCES) @Component -class TerminateAppengineInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class TerminateAppengineInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new TerminateAppengineInstancesAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineAutoscalingPolicyAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineAutoscalingPolicyAtomicOperationConverter.groovy index 89a647f47dd..f2e351dc7d4 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineAutoscalingPolicyAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineAutoscalingPolicyAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.UpsertAppengineAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.UpsertAppengineAutoscalingPolicyAtomicOperation +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.UPSERT_SCALING_POLICY) @Component -class UpsertAppengineAutoscalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class UpsertAppengineAutoscalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new UpsertAppengineAutoscalingPolicyAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineLoadBalancerAtomicOperationConverter.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineLoadBalancerAtomicOperationConverter.groovy index 09e8656bf0c..c28ac1a48d1 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineLoadBalancerAtomicOperationConverter.groovy @@ -20,14 +20,15 @@ import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.UpsertAppengineLoadBalancerDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.UpsertAppengineLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.model.AppengineModelUtil +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @AppengineOperation(AtomicOperations.UPSERT_LOAD_BALANCER) @Component -class UpsertAppengineLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class UpsertAppengineLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new UpsertAppengineLoadBalancerAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/description/DeployAppengineDescription.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/description/DeployAppengineDescription.groovy index 707c06355fe..a2141ae3c58 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/description/DeployAppengineDescription.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/description/DeployAppengineDescription.groovy @@ -37,7 +37,9 @@ class DeployAppengineDescription extends AbstractAppengineCredentialsDescription String applicationDirectoryRoot List configFilepaths List configFiles + List configArtifacts Boolean promote Boolean stopPreviousVersion + Boolean suppressVersionString String containerImageUrl // app engine flex only } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/exception/AppengineOperationException.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/exception/AppengineOperationException.groovy deleted file mode 100644 index 3608e17dac3..00000000000 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/exception/AppengineOperationException.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.appengine.deploy.exception - -import groovy.transform.InheritConstructors - -@InheritConstructors -class AppengineOperationException extends RuntimeException { } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/AbstractStartStopAppengineAtomicOperation.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/AbstractStartStopAppengineAtomicOperation.groovy index cbc3b7f4485..95eea35191b 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/AbstractStartStopAppengineAtomicOperation.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/AbstractStartStopAppengineAtomicOperation.groovy @@ -24,7 +24,6 @@ import com.netflix.spinnaker.clouddriver.appengine.model.AppengineServerGroup import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import org.springframework.beans.factory.annotation.Autowired @@ -76,7 +75,6 @@ abstract class AbstractStartStopAppengineAtomicOperation extends AppengineAtomic "version", task, [409], - [], [action: verb, phase: basePhase], registry ) diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineAtomicOperation.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineAtomicOperation.groovy index 4190a0ca8d1..3dac7815105 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineAtomicOperation.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineAtomicOperation.groovy @@ -24,13 +24,22 @@ import com.netflix.spinnaker.clouddriver.appengine.deploy.AppengineServerGroupNa import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.exception.AppengineOperationException import com.netflix.spinnaker.clouddriver.appengine.gcsClient.AppengineGcsRepositoryClient +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.kork.artifacts.model.Artifact + +import com.netflix.spectator.api.Registry +import groovy.util.logging.Slf4j + +import java.nio.file.Path import java.nio.file.Paths + import org.springframework.beans.factory.annotation.Autowired import static com.netflix.spinnaker.clouddriver.appengine.config.AppengineConfigurationProperties.ManagedAccount.GcloudReleaseTrack +import java.util.concurrent.TimeUnit class DeployAppengineAtomicOperation implements AtomicOperation { private static final String BASE_PHASE = "DEPLOY" @@ -39,6 +48,9 @@ class DeployAppengineAtomicOperation implements AtomicOperation - def deployCommand = ["gcloud"] + def configArtifactPaths = fetchConfigArtifacts(description.configArtifacts, repositoryPath, applicationDirectoryRoot) + + // runCommand expects a List and will fail if some of the arguments are GStrings (as that is not a subclass + // of String). It is thus important to only add Strings to deployCommand. For example, adding a flag "--test=$testvalue" + // below will cause deployments to fail unless you explicitly convert it to a String via "--test=$testvalue".toString() + def deployCommand = [description.credentials.gcloudPath] if (gcloudReleaseTrack != null && gcloudReleaseTrack != GcloudReleaseTrack.STABLE) { deployCommand << gcloudReleaseTrack.toString().toLowerCase() } - deployCommand += ["app", "deploy", *(repositoryFullConfigFilePaths + writtenFullConfigFilePaths)] - deployCommand << "--version=$versionName" + deployCommand += ["app", "deploy", *(repositoryFullConfigFilePaths + writtenFullConfigFilePaths + configArtifactPaths)] + deployCommand << "--version=" + versionName deployCommand << (description.promote ? "--promote" : "--no-promote") deployCommand << (description.stopPreviousVersion ? "--stop-previous-version": "--no-stop-previous-version") - deployCommand << "--project=$project" - deployCommand << "--account=$accountEmail" + deployCommand << "--project=" + project + deployCommand << "--account=" + accountEmail if (containerDeployment) { - deployCommand << "--image-url=$imageUrl" + deployCommand << "--image-url=" + imageUrl } task.updateStatus BASE_PHASE, "Deploying version $versionName..." + def startTime = registry.clock().monotonicTime() + def success = "false" try { jobExecutor.runCommand(deployCommand) + success = "true" } catch (e) { throw new AppengineOperationException("Failed to deploy to App Engine with command ${deployCommand.join(' ')}: ${e.getMessage()}") } finally { + def duration = registry.clock().monotonicTime() - startTime + def id = registry.createId("appengine.deploy", + "account", description.credentials.serviceAccountEmail, + "region", description.credentials.region, + "success", success) + registry.timer(id).record(duration, TimeUnit.NANOSECONDS); deleteFiles(writtenFullConfigFilePaths) } task.updateStatus BASE_PHASE, "Done deploying version $versionName..." return versionName } + List fetchConfigArtifacts(List configArtifacts, String repositoryPath, String applicationDirectoryRoot) { + if (!configArtifacts) { + return []; + } else { + return configArtifacts.collect { artifact -> + def path = generateRandomRepositoryFilePath(repositoryPath, applicationDirectoryRoot) + try { + path.toFile() << artifactDownloader.download(artifact) + } catch(e) { + throw new AppengineOperationException("Could not download artifact as config file: ${e.getMessage()}") + } + return path.toString() + } + } + } + static List writeConfigFiles(List configFiles, String repositoryPath, String applicationDirectoryRoot) { if (!configFiles) { return [] } else { return configFiles.collect { configFile -> - def name = UUID.randomUUID().toString() - def path = Paths.get(repositoryPath, applicationDirectoryRoot ?: ".", "${name}.yaml") + def path = generateRandomRepositoryFilePath(repositoryPath, applicationDirectoryRoot) try { path.toFile() << configFile } catch(e) { @@ -252,4 +340,9 @@ class DeployAppengineAtomicOperation implements AtomicOperation { } safeRetry.doRetry( - { appengine.apps().services().versions().delete(project, loadBalancerName, serverGroupName).execute() }, + { + appengine.apps().services().versions().delete(project, loadBalancerName, serverGroupName).execute() + }, "version", task, [409], - [], [action: "Destroy", phase: BASE_PHASE], registry ) diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DisableAppengineAtomicOperation.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DisableAppengineAtomicOperation.groovy index 2acc8dfee86..921620de5b5 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DisableAppengineAtomicOperation.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DisableAppengineAtomicOperation.groovy @@ -28,7 +28,6 @@ import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineCluste import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineLoadBalancerProvider import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import org.springframework.beans.factory.annotation.Autowired import java.math.RoundingMode @@ -70,11 +69,12 @@ class DisableAppengineAtomicOperation extends AppengineAtomicOperation { def loadBalancerName = serverGroup?.loadBalancers?.first() safeRetry.doRetry( - { buildNewLoadBalancerAndCallApi(credentials.project, loadBalancerName, serverGroupName, priorOutputs) }, + { + buildNewLoadBalancerAndCallApi(credentials.project, loadBalancerName, serverGroupName, priorOutputs) + }, "version", task, [409], - [], [action: "Disable", phase: BASE_PHASE], registry ) diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineAutoscalingPolicyAtomicOperation.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineAutoscalingPolicyAtomicOperation.groovy index 0823fd044a1..c8b1682e888 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineAutoscalingPolicyAtomicOperation.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineAutoscalingPolicyAtomicOperation.groovy @@ -79,7 +79,6 @@ class UpsertAppengineAutoscalingPolicyAtomicOperation extends AppengineAtomicOpe "version", task, [409], - [], [action: "upsertAutoscalingPolicy", phase: BASE_PHASE], registry) diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineLoadBalancerAtomicOperation.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineLoadBalancerAtomicOperation.groovy index ebed6434c39..d91792b7acc 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineLoadBalancerAtomicOperation.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineLoadBalancerAtomicOperation.groovy @@ -20,13 +20,11 @@ import com.google.api.services.appengine.v1.model.Operation import com.google.api.services.appengine.v1.model.Service import com.google.api.services.appengine.v1.model.TrafficSplit import com.netflix.spinnaker.clouddriver.appengine.deploy.AppengineSafeRetry -import com.netflix.spinnaker.clouddriver.appengine.deploy.AppengineUtils import com.netflix.spinnaker.clouddriver.appengine.deploy.description.UpsertAppengineLoadBalancerDescription import com.netflix.spinnaker.clouddriver.appengine.model.AppengineTrafficSplit import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineLoadBalancerProvider import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired @@ -86,7 +84,6 @@ class UpsertAppengineLoadBalancerAtomicOperation extends AppengineAtomicOperatio "service", task, [409], - [], [action: "Upsert", phase: BASE_PHASE], registry ) diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/AbstractStartStopAppengineDescriptionValidator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/AbstractStartStopAppengineDescriptionValidator.groovy index 4dadf285f43..33240df189b 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/AbstractStartStopAppengineDescriptionValidator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/AbstractStartStopAppengineDescriptionValidator.groovy @@ -18,15 +18,16 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.deploy.description.StartStopAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired -import org.springframework.validation.Errors abstract class AbstractStartStopAppengineDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired AppengineClusterProvider appengineClusterProvider @@ -34,10 +35,10 @@ abstract class AbstractStartStopAppengineDescriptionValidator extends Descriptio abstract String getDescriptionName() @Override - void validate(List priorDescriptions, StartStopAppengineDescription description, Errors errors) { + void validate(List priorDescriptions, StartStopAppengineDescription description, ValidationErrors errors) { def helper = new StandardAppengineAttributeValidator(descriptionName, errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) def nameNotEmpty = helper.validateNotEmpty(description.serverGroupName, "serverGroupName") if (nameNotEmpty) { diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeleteAppengineLoadBalancerDescriptionValidator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeleteAppengineLoadBalancerDescriptionValidator.groovy index 7d171a9a555..bbbd2dc633f 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeleteAppengineLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeleteAppengineLoadBalancerDescriptionValidator.groovy @@ -18,24 +18,25 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeleteAppengineLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AppengineOperation(AtomicOperations.DELETE_LOAD_BALANCER) @Component("deleteAppengineLoadBalancerDescriptionValidator") class DeleteAppengineLoadBalancerDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, DeleteAppengineLoadBalancerDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteAppengineLoadBalancerDescription description, ValidationErrors errors) { def helper = new StandardAppengineAttributeValidator("deleteAppengineLoadBalancerAtomicOperationDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateNotEmpty(description.loadBalancerName, "loadBalancerName") helper.validateLoadBalancerCanBeDeleted(description.loadBalancerName, "loadBalancerName") } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineDescriptionValidator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineDescriptionValidator.groovy index a5d64a14176..56d6734ebef 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineDescriptionValidator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineDescriptionValidator.groovy @@ -18,24 +18,25 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineDescription +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AppengineOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component("deployAppengineDescriptionValidator") class DeployAppengineDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, DeployAppengineDescription description, Errors errors) { + void validate(List priorDescriptions, DeployAppengineDescription description, ValidationErrors errors) { def helper = new StandardAppengineAttributeValidator("deployAppengineAtomicOperationDescription", errors) - if (!helper.validateCredentials(description.accountName, accountCredentialsProvider)) { + if (!helper.validateCredentials(description.accountName, credentialsRepository)) { return } @@ -69,7 +70,7 @@ class DeployAppengineDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, DestroyAppengineDescription description, Errors errors) { + void validate(List priorDescriptions, DestroyAppengineDescription description, ValidationErrors errors) { def helper = new StandardAppengineAttributeValidator("destroyAppengineAtomicOperationDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateNotEmpty(description.serverGroupName, "serverGroupName") } } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DisableAppengineDescriptionValidator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DisableAppengineDescriptionValidator.groovy index d5570e68eb9..ecf11ee6bf9 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DisableAppengineDescriptionValidator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DisableAppengineDescriptionValidator.groovy @@ -20,18 +20,19 @@ import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.EnableDisableAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineLoadBalancerProvider +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AppengineOperation(AtomicOperations.DISABLE_SERVER_GROUP) @Component("disableAppengineDescriptionValidator") class DisableAppengineDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired AppengineLoadBalancerProvider appengineLoadBalancerProvider @@ -40,10 +41,10 @@ class DisableAppengineDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired AppengineClusterProvider appengineClusterProvider @Override - void validate(List priorDescriptions, EnableDisableAppengineDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableAppengineDescription description, ValidationErrors errors) { def helper = new StandardAppengineAttributeValidator("enableAppengineAtomicOperationDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateNotEmpty(description.serverGroupName, "serverGroupName") helper.validateServerGroupsCanBeEnabled([description.serverGroupName], null, diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StandardAppengineAttributeValidator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StandardAppengineAttributeValidator.groovy index 15177cbd742..1b682bd9da8 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StandardAppengineAttributeValidator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StandardAppengineAttributeValidator.groovy @@ -27,28 +27,27 @@ import com.netflix.spinnaker.clouddriver.appengine.model.ShardBy import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineInstanceProvider import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineLoadBalancerProvider -import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.CredentialsRepository class StandardAppengineAttributeValidator { static final namePattern = /^[a-z0-9]+([-a-z0-9]*[a-z0-9])?$/ static final prefixPattern = /^[a-z0-9]+$/ String context - Errors errors + ValidationErrors errors - StandardAppengineAttributeValidator(String context, Errors errors) { + StandardAppengineAttributeValidator(String context, ValidationErrors errors) { this.context = context this.errors = errors } - def validateCredentials(String credentials, AccountCredentialsProvider accountCredentialsProvider) { + def validateCredentials(String credentials, CredentialsRepository credentialsRepository) { def result = validateNotEmpty(credentials, "account") if (result) { - def appengineCredentials = accountCredentialsProvider.getCredentials(credentials) - if (!(appengineCredentials?.credentials instanceof AppengineCredentials)) { + def appengineCredentials = credentialsRepository.getOne(credentials) + if (!appengineCredentials) { errors.rejectValue("${context}.account", "${context}.account.notFound") result = false } @@ -188,7 +187,7 @@ class StandardAppengineAttributeValidator { if (!serverGroup) { reject.notFound << serverGroupName return reject - } else if (loadBalancerName && serverGroup?.loadBalancers[0] != loadBalancerName) { + } else if (loadBalancerName && serverGroup?.loadBalancers.contains(loadBalancerName) != true ) { reject.notRegisteredWithLoadBalancer << serverGroupName return reject } else { diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/TerminateAppengineInstancesDescriptionValidator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/TerminateAppengineInstancesDescriptionValidator.groovy index 270e4e80e85..f9f9a2b8e8d 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/TerminateAppengineInstancesDescriptionValidator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/TerminateAppengineInstancesDescriptionValidator.groovy @@ -19,27 +19,28 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.TerminateAppengineInstancesDescription import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineInstanceProvider +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AppengineOperation(AtomicOperations.TERMINATE_INSTANCES) @Component("terminateAppengineInstancesDescriptionValidator") class TerminateAppengineInstancesDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired AppengineInstanceProvider appengineInstanceProvider @Override - void validate(List priorDescriptions, TerminateAppengineInstancesDescription description, Errors errors) { + void validate(List priorDescriptions, TerminateAppengineInstancesDescription description, ValidationErrors errors) { def helper = new StandardAppengineAttributeValidator("terminateAppengineInstancesAtomicOperationDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateNotEmpty(description.instanceIds, "instanceIds") helper.validateInstances(description.instanceIds, description.credentials, appengineInstanceProvider, "instanceIds") } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineAutoscalingPolicyDescriptionValidator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineAutoscalingPolicyDescriptionValidator.groovy index d98304aff59..90f83727b8c 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineAutoscalingPolicyDescriptionValidator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineAutoscalingPolicyDescriptionValidator.groovy @@ -19,27 +19,28 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.UpsertAppengineAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AppengineOperation(AtomicOperations.UPSERT_SCALING_POLICY) @Component class UpsertAppengineAutoscalingPolicyDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired AppengineClusterProvider appengineClusterProvider @Override - void validate(List priorDescriptions, UpsertAppengineAutoscalingPolicyDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertAppengineAutoscalingPolicyDescription description, ValidationErrors errors) { def helper = new StandardAppengineAttributeValidator("upsertAppengineAutoscalingPolicyAtomicOperationDescription", errors) - if (!helper.validateCredentials(description.accountName, accountCredentialsProvider)) { + if (!helper.validateCredentials(description.accountName, credentialsRepository)) { return } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineLoadBalancerDescriptionValidator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineLoadBalancerDescriptionValidator.groovy index a6531f8c96e..d3bafc3f68c 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineLoadBalancerDescriptionValidator.groovy @@ -19,27 +19,28 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation import com.netflix.spinnaker.clouddriver.appengine.deploy.description.UpsertAppengineLoadBalancerDescription import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AppengineOperation(AtomicOperations.UPSERT_LOAD_BALANCER) @Component("upsertAppengineLoadBalancerDescriptionValidator") class UpsertAppengineLoadBalancerDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired AppengineClusterProvider appengineClusterProvider @Override - void validate(List priorDescriptions, UpsertAppengineLoadBalancerDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertAppengineLoadBalancerDescription description, ValidationErrors errors) { def helper = new StandardAppengineAttributeValidator("upsertAppengineLoadBalancerAtomicOperationDescription", errors) - if (!helper.validateCredentials(description.accountName, accountCredentialsProvider)) { + if (!helper.validateCredentials(description.accountName, credentialsRepository)) { return } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/gcsClient/AppengineGcsRepositoryClient.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/gcsClient/AppengineGcsRepositoryClient.groovy index 1e8e04c4b1c..e781f6b84a1 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/gcsClient/AppengineGcsRepositoryClient.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/gcsClient/AppengineGcsRepositoryClient.groovy @@ -19,12 +19,14 @@ package com.netflix.spinnaker.clouddriver.appengine.gcsClient import com.netflix.spinnaker.clouddriver.appengine.AppengineJobExecutor import com.netflix.spinnaker.clouddriver.appengine.artifacts.GcsStorageService import com.netflix.spinnaker.clouddriver.appengine.model.AppengineRepositoryClient -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactUtils +import com.netflix.spinnaker.clouddriver.appengine.artifacts.ArtifactUtils +import groovy.transform.CompileStatic import groovy.transform.TupleConstructor import groovy.util.logging.Slf4j import org.apache.commons.io.FileUtils import org.apache.commons.io.IOUtils +@CompileStatic @Slf4j @TupleConstructor class AppengineGcsRepositoryClient implements AppengineRepositoryClient { @@ -57,6 +59,16 @@ class AppengineGcsRepositoryClient implements AppengineRepositoryClient { def slash = fullPath.indexOf("/") def bucketName = fullPath.substring(0, slash) def bucketPath = fullPath.substring(slash + 1) + Long version = null + + def versionSeparator = bucketPath.indexOf("#") + if (versionSeparator >= 0) { + String versionString = bucketPath.substring(versionSeparator + 1) + if (!versionString.isEmpty()) { + version = Long.parseLong(versionString) + } + bucketPath = bucketPath.substring(0, versionSeparator) + } // Start with a clean directory for each deployment. File targetDirectory = new File(targetDirectoryPath) @@ -67,8 +79,8 @@ class AppengineGcsRepositoryClient implements AppengineRepositoryClient { throw new IllegalArgumentException("GAE staging directory resolved to a file: ${}, failing...") } - if (fullPath.endsWith(".tar")) { - InputStream tas = storage.openObjectStream(bucketName, bucketPath) + if (bucketPath.endsWith(".tar")) { + InputStream tas = storage.openObjectStream(bucketName, bucketPath, version) // NOTE: We write the tar file out to an intermediate temp file because the tar input stream // directly from openObjectStream() closes unexpectedly when accessed from untarStreamToPath() diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/health/AppengineHealthIndicator.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/health/AppengineHealthIndicator.groovy index 3576b4c4496..2fa7c3da389 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/health/AppengineHealthIndicator.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/health/AppengineHealthIndicator.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.appengine.health import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsTypeBaseConfiguration import groovy.transform.InheritConstructors import org.slf4j.Logger import org.slf4j.LoggerFactory @@ -36,7 +36,7 @@ class AppengineHealthIndicator implements HealthIndicator { private static final Logger LOG = LoggerFactory.getLogger(AppengineHealthIndicator) @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsTypeBaseConfiguration credentialsTypeBaseConfiguration private final AtomicReference lastException = new AtomicReference<>(null) @@ -54,23 +54,18 @@ class AppengineHealthIndicator implements HealthIndicator { @Scheduled(fixedDelay = 300000L) void checkHealth() { try { - Set appengineCredentialsSet = accountCredentialsProvider.all.findAll { - it instanceof AppengineNamedAccountCredentials - } as Set - - for (AppengineNamedAccountCredentials accountCredentials in appengineCredentialsSet) { + credentialsTypeBaseConfiguration.credentialsRepository?.all?.forEach({ try { /* Location is the only App Engine resource guaranteed to exist. The API only accepts '-' here, rather than project name. To paraphrase the provided error, the list of locations is static and not a property of an individual project. */ - accountCredentials.appengine.apps().locations().list('-').execute() + it.appengine.apps().locations().list('-').execute() } catch (IOException e) { throw new AppengineIOException(e) } - } - + }) lastException.set(null) } catch (Exception ex) { LOG.warn "Unhealthy", ex diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/model/AppengineLoadBalancer.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/model/AppengineLoadBalancer.groovy index a8341fd113d..df684e17352 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/model/AppengineLoadBalancer.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/model/AppengineLoadBalancer.groovy @@ -75,7 +75,8 @@ class AppengineLoadBalancer implements LoadBalancer, Serializable { isDisabled: serverGroup.isDisabled(), allowsGradualTrafficMigration: serverGroup.allowsGradualTrafficMigration, instances: instances as Set, - detachedInstances: detachedInstances as Set + detachedInstances: detachedInstances as Set, + cloudProvider: AppengineCloudProvider.ID ) } as Set null @@ -100,4 +101,5 @@ enum ShardBy { UNSPECIFIED, COOKIE, IP, + RANDOM, } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/model/AppengineServerGroup.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/model/AppengineServerGroup.groovy index e09742f3f44..ea763d35b5a 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/model/AppengineServerGroup.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/model/AppengineServerGroup.groovy @@ -66,6 +66,9 @@ class AppengineServerGroup implements ServerGroup, Serializable { this.allowsGradualTrafficMigration = versionAllowsGradualTrafficMigration(version) } + Boolean isDisabled() { + disabled + } @Override ServerGroup.InstanceCounts getInstanceCounts() { new ServerGroup.InstanceCounts( @@ -78,6 +81,24 @@ class AppengineServerGroup implements ServerGroup, Serializable { ) } + def update(Version version, String account, String region, String loadBalancerName, Boolean isDisabled) { + this.account = account + this.region = region + this.name = version.getId() + this.loadBalancers.add(loadBalancerName) + this.createdTime = AppengineModelUtil.translateTime(version.getCreateTime()) + this.disabled = isDisabled + this.scalingPolicy = AppengineModelUtil.getScalingPolicy(version) + this.servingStatus = version.getServingStatus() ? ServingStatus.valueOf(version.getServingStatus()) : null + this.env = version.getEnv() ? Environment.valueOf(version.getEnv().toUpperCase()) : null + this.httpUrl = AppengineModelUtil.getHttpUrl(version.getName()) + this.httpsUrl = AppengineModelUtil.getHttpsUrl(version.getName()) + this.instanceClass = version.getInstanceClass() + this.launchConfig.instanceType = this.instanceClass + this.zones = [region] as Set + this.allowsGradualTrafficMigration = versionAllowsGradualTrafficMigration(version) + } + @Override ServerGroup.Capacity getCapacity() { Integer instanceCount = instances?.size() ?: 0 @@ -150,11 +171,6 @@ class AppengineServerGroup implements ServerGroup, Serializable { null } - @Override - Boolean isDisabled() { - disabled - } - enum ServingStatus { SERVING, STOPPED, diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/AppengineProvider.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/AppengineProvider.groovy index b570348e0b3..2958712cc76 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/AppengineProvider.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/AppengineProvider.groovy @@ -16,18 +16,16 @@ package com.netflix.spinnaker.clouddriver.appengine.provider -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware import com.netflix.spinnaker.clouddriver.appengine.AppengineCloudProvider import com.netflix.spinnaker.clouddriver.appengine.cache.Keys import com.netflix.spinnaker.clouddriver.cache.SearchableProvider +import com.netflix.spinnaker.clouddriver.security.BaseProvider -class AppengineProvider extends AgentSchedulerAware implements SearchableProvider { +class AppengineProvider extends BaseProvider implements SearchableProvider { public static final String PROVIDER_NAME = AppengineProvider.name final Map urlMappingTemplates = Collections.emptyMap() final Map searchResultHydrators = Collections.emptyMap() - final Collection agents final AppengineCloudProvider cloudProvider final Set defaultCaches = [ Keys.Namespace.APPLICATIONS.ns, @@ -37,9 +35,8 @@ class AppengineProvider extends AgentSchedulerAware implements SearchableProvide Keys.Namespace.LOAD_BALANCERS.ns, ].asImmutable() - AppengineProvider(AppengineCloudProvider cloudProvider, Collection agents) { + AppengineProvider(AppengineCloudProvider cloudProvider) { this.cloudProvider = cloudProvider - this.agents = agents } @Override diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AbstractAppengineCachingAgent.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AbstractAppengineCachingAgent.groovy index 4074a6b5be4..5133cfe9de7 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AbstractAppengineCachingAgent.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AbstractAppengineCachingAgent.groovy @@ -19,13 +19,16 @@ package com.netflix.spinnaker.clouddriver.appengine.provider.agent import com.fasterxml.jackson.databind.ObjectMapper import com.google.api.client.googleapis.batch.BatchRequest import com.netflix.spinnaker.cats.agent.AccountAware +import com.netflix.spinnaker.cats.agent.AgentIntervalAware import com.netflix.spinnaker.cats.agent.CachingAgent import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.clouddriver.appengine.AppengineCloudProvider import com.netflix.spinnaker.clouddriver.appengine.provider.AppengineProvider import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -abstract class AbstractAppengineCachingAgent implements CachingAgent, AccountAware { +import java.util.concurrent.TimeUnit + +abstract class AbstractAppengineCachingAgent implements CachingAgent, AccountAware, AgentIntervalAware { final String accountName final String providerName = AppengineProvider.PROVIDER_NAME final AppengineCloudProvider appengineCloudProvider = new AppengineCloudProvider() @@ -40,6 +43,30 @@ abstract class AbstractAppengineCachingAgent implements CachingAgent, AccountAwa this.credentials = credentials } + boolean shouldIgnoreLoadBalancer(String loadBalancerName) { + if (credentials.services != null && !credentials.services.isEmpty() && + credentials.services.every { !loadBalancerName.matches(it) }) { + return true + } + if (credentials.omitServices != null && !credentials.omitServices.isEmpty() && + credentials.omitServices.any { loadBalancerName.matches(it) }) { + return true + } + return false + } + + boolean shouldIgnoreServerGroup(String serverGroupName) { + if (credentials.versions != null && !credentials.versions.isEmpty() && + credentials.versions.every { !serverGroupName.matches(it) }) { + return true + } + if (credentials.omitVersions != null && !credentials.omitVersions.isEmpty() + && credentials.omitVersions.any { serverGroupName.matches(it) }) { + return true + } + return false + } + static void cache(Map> cacheResults, String cacheNamespace, Map cacheDataById) { @@ -62,5 +89,12 @@ abstract class AbstractAppengineCachingAgent implements CachingAgent, AccountAwa } } + Long getAgentInterval() { + if (this.credentials.cachingIntervalSeconds == null) { + return TimeUnit.SECONDS.toMillis(60) + } + return TimeUnit.SECONDS.toMillis(this.credentials.cachingIntervalSeconds) + } + abstract String getSimpleName() } diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppengineLoadBalancerCachingAgent.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppengineLoadBalancerCachingAgent.groovy index 0c3999a46e9..e280df5b5ec 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppengineLoadBalancerCachingAgent.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppengineLoadBalancerCachingAgent.groovy @@ -29,14 +29,15 @@ import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.appengine.AppengineCloudProvider import com.netflix.spinnaker.clouddriver.appengine.cache.Keys import com.netflix.spinnaker.clouddriver.appengine.model.AppengineLoadBalancer +import com.netflix.spinnaker.clouddriver.appengine.provider.view.MutableCacheData import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandResult import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import groovy.util.logging.Slf4j import java.util.concurrent.TimeUnit +import java.util.stream.Collectors import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE import static com.netflix.spinnaker.clouddriver.appengine.cache.Keys.Namespace.LOAD_BALANCERS @@ -77,12 +78,12 @@ class AppengineLoadBalancerCachingAgent extends AbstractAppengineCachingAgent im metricsSupport = new OnDemandMetricsSupport( registry, this, - "$AppengineCloudProvider.ID:$OnDemandAgent.OnDemandType.LoadBalancer") + "$AppengineCloudProvider.ID:$OnDemandType.LoadBalancer") } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.LoadBalancer && cloudProvider == AppengineCloudProvider.ID + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.LoadBalancer && cloudProvider == AppengineCloudProvider.ID } @Override @@ -93,6 +94,10 @@ class AppengineLoadBalancerCachingAgent extends AbstractAppengineCachingAgent im def loadBalancerName = data.loadBalancerName.toString() + if (shouldIgnoreLoadBalancer(loadBalancerName)) { + return null + } + Service service = metricsSupport.readData { loadService(loadBalancerName) } @@ -137,7 +142,7 @@ class AppengineLoadBalancerCachingAgent extends AbstractAppengineCachingAgent im @Override CacheResult loadData(ProviderCache providerCache) { Long start = System.currentTimeMillis() - List services = loadServices() + List services = loadServices().stream().filter { !shouldIgnoreLoadBalancer(it.getId()) }.collect(Collectors.toList()) List evictFromOnDemand = [] List keepInOnDemand = [] @@ -205,7 +210,7 @@ class AppengineLoadBalancerCachingAgent extends AbstractAppengineCachingAgent im } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { def keys = providerCache.getIdentifiers(ON_DEMAND.ns) keys = keys.findResults { def parse = Keys.parse(it) diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppenginePlatformApplicationCachingAgent.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppenginePlatformApplicationCachingAgent.groovy index 782e3708e5b..5b694022c72 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppenginePlatformApplicationCachingAgent.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppenginePlatformApplicationCachingAgent.groovy @@ -24,8 +24,8 @@ import com.netflix.spinnaker.cats.agent.DefaultCacheResult import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.appengine.cache.Keys import com.netflix.spinnaker.clouddriver.appengine.model.AppenginePlatformApplication +import com.netflix.spinnaker.clouddriver.appengine.provider.view.MutableCacheData import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData import groovy.util.logging.Slf4j import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppengineServerGroupCachingAgent.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppengineServerGroupCachingAgent.groovy index b4ee4473b3c..f4a42867c14 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppengineServerGroupCachingAgent.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/agent/AppengineServerGroupCachingAgent.groovy @@ -36,10 +36,11 @@ import com.netflix.spinnaker.clouddriver.appengine.model.AppengineInstance import com.netflix.spinnaker.clouddriver.appengine.model.AppengineLoadBalancer import com.netflix.spinnaker.clouddriver.appengine.model.AppengineServerGroup import com.netflix.spinnaker.clouddriver.appengine.provider.callbacks.AppengineCallback +import com.netflix.spinnaker.clouddriver.appengine.provider.view.MutableCacheData import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import groovy.util.logging.Slf4j import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE @@ -70,7 +71,7 @@ class AppengineServerGroupCachingAgent extends AbstractAppengineCachingAgent imp this.metricsSupport = new OnDemandMetricsSupport( registry, this, - "$AppengineCloudProvider.ID:$OnDemandAgent.OnDemandType.ServerGroup") + "$AppengineCloudProvider.ID:$OnDemandType.ServerGroup") } @Override @@ -89,8 +90,8 @@ class AppengineServerGroupCachingAgent extends AbstractAppengineCachingAgent imp } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.ServerGroup && cloudProvider == AppengineCloudProvider.ID + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.ServerGroup && cloudProvider == AppengineCloudProvider.ID } @Override @@ -100,6 +101,9 @@ class AppengineServerGroupCachingAgent extends AbstractAppengineCachingAgent imp } def serverGroupName = data.serverGroupName.toString() + if (shouldIgnoreServerGroup(serverGroupName)) { + return null + } def matchingServerGroupAndLoadBalancer = metricsSupport.readData { loadServerGroupAndLoadBalancer(serverGroupName) } @@ -263,11 +267,19 @@ class AppengineServerGroupCachingAgent extends AbstractAppengineCachingAgent imp cachedServerGroups[serverGroupKey].with { attributes.name = serverGroupName def isDisabled = !loadBalancer.getSplit().getAllocations().containsKey(serverGroupName); - attributes.serverGroup = new AppengineServerGroup(serverGroup, - accountName, - credentials.region, - loadBalancerName, - isDisabled) + if (attributes.serverGroup == null) { + attributes.serverGroup = new AppengineServerGroup(serverGroup, + accountName, + credentials.region, + loadBalancerName, + isDisabled) + } else { + attributes.serverGroup.update(serverGroup, + accountName, + credentials.region, + loadBalancerName, + isDisabled) + } relationships[APPLICATIONS.ns].add(applicationKey) relationships[CLUSTERS.ns].add(clusterKey) relationships[INSTANCES.ns].addAll(instanceKeys) @@ -304,16 +316,22 @@ class AppengineServerGroupCachingAgent extends AbstractAppengineCachingAgent imp Map> loadServerGroups() { def project = credentials.project def loadBalancers = credentials.appengine.apps().services().list(project).execute().getServices() ?: [] - BatchRequest batch = credentials.appengine.batch() + BatchRequest batch = credentials.appengine.batch() // TODO(jacobkiefer): Consider limiting batch sizes. https://github.com/spinnaker/spinnaker/issues/3564. Map> serverGroupsByLoadBalancer = [:].withDefault { [] } loadBalancers.each { loadBalancer -> def loadBalancerName = loadBalancer.getId() + if (shouldIgnoreLoadBalancer(loadBalancerName)) { + return + } def callback = new AppengineCallback() .success { ListVersionsResponse versionsResponse, HttpHeaders responseHeaders -> def versions = versionsResponse.getVersions() if (versions) { - serverGroupsByLoadBalancer[loadBalancer].addAll(versions) + versions.removeIf { shouldIgnoreServerGroup(it.getId()) } + if(versions) { + serverGroupsByLoadBalancer[loadBalancer].addAll(versions) + } } } @@ -325,12 +343,17 @@ class AppengineServerGroupCachingAgent extends AbstractAppengineCachingAgent imp } Map loadServerGroupAndLoadBalancer(String serverGroupName) { + if (shouldIgnoreServerGroup(serverGroupName)) { + return [:] + } def project = credentials.project def loadBalancers = credentials.appengine.apps().services().list(project).execute().getServices() ?: [] BatchRequest batch = credentials.appengine.batch() Service loadBalancer Version serverGroup + loadBalancers.removeIf { shouldIgnoreLoadBalancer(it.getName()) } + // We don't know where our server group is, so we have to check all of the load balancers. loadBalancers.each { Service lb -> def loadBalancerName = lb.getId() @@ -392,7 +415,7 @@ class AppengineServerGroupCachingAgent extends AbstractAppengineCachingAgent imp } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { def keys = providerCache.getIdentifiers(ON_DEMAND.ns) keys = keys.findResults { def parse = Keys.parse(it) diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/config/AppengineProviderConfig.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/config/AppengineProviderConfig.groovy deleted file mode 100644 index 3e5c77f616b..00000000000 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/config/AppengineProviderConfig.groovy +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.appengine.provider.config - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.clouddriver.appengine.AppengineCloudProvider -import com.netflix.spinnaker.clouddriver.appengine.provider.AppengineProvider -import com.netflix.spinnaker.clouddriver.appengine.provider.agent.AppenginePlatformApplicationCachingAgent -import com.netflix.spinnaker.clouddriver.appengine.provider.agent.AppengineLoadBalancerCachingAgent -import com.netflix.spinnaker.clouddriver.appengine.provider.agent.AppengineServerGroupCachingAgent -import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope - -import java.util.concurrent.ConcurrentHashMap - -@Configuration -class AppengineProviderConfig { - @Bean - @DependsOn('appengineNamedAccountCredentials') - AppengineProvider appengineProvider(AppengineCloudProvider appengineCloudProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - Registry registry) { - def appengineProvider = new AppengineProvider(appengineCloudProvider, - Collections.newSetFromMap(new ConcurrentHashMap())) - - synchronizeAppengineProvider(appengineProvider, - accountCredentialsRepository, - objectMapper, - registry) - appengineProvider - } - - @Bean - AppengineProviderSynchronizerTypeWrapper appengineProviderSynchronizerTypeWrapper() { - new AppengineProviderSynchronizerTypeWrapper() - } - - class AppengineProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return AppengineProviderSynchronizer - } - } - - class AppengineProviderSynchronizer { } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - AppengineProviderSynchronizer synchronizeAppengineProvider(AppengineProvider appengineProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - Registry registry) { - def scheduledAccounts = ProviderUtils.getScheduledAccounts(appengineProvider) - def allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, - AppengineNamedAccountCredentials) - - objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) - - def newlyAddedAgents = [] - allAccounts.each { AppengineNamedAccountCredentials credentials -> - if (!scheduledAccounts.contains(credentials.name)) { - newlyAddedAgents << new AppengineServerGroupCachingAgent(credentials.name, - credentials, - objectMapper, - registry) - - newlyAddedAgents << new AppengineLoadBalancerCachingAgent(credentials.name, - credentials, - objectMapper, - registry) - - newlyAddedAgents << new AppenginePlatformApplicationCachingAgent(credentials.name, - credentials, - objectMapper) - } - } - - if (!newlyAddedAgents.isEmpty()) { - appengineProvider.agents.addAll(newlyAddedAgents) - } - - new AppengineProviderSynchronizer() - } -} diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineApplicationProvider.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineApplicationProvider.groovy index 2f22088f7d1..7cdc80aed64 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineApplicationProvider.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineApplicationProvider.groovy @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.appengine.provider.view import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.discovery.converters.Auto import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineClusterProvider.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineClusterProvider.groovy index 6ad6b626408..a7415bf0b49 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineClusterProvider.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineClusterProvider.groovy @@ -98,12 +98,12 @@ class AppengineClusterProvider implements ClusterProvider { @Override Map> getClusterSummaries(String applicationName) { - translateClusters(getClusterData(applicationName), false)?.groupBy { it.accountName } as Map> + translateClusters(getClusterData(applicationName), false)?.groupBy { it.accountName }.collectEntries { k, v -> [k, new HashSet<>(v)] } } @Override Map> getClusterDetails(String applicationName) { - translateClusters(getClusterData(applicationName), true)?.groupBy { it.accountName } as Map> + translateClusters(getClusterData(applicationName), true)?.groupBy { it.accountName }.collectEntries { k, v -> [k, new HashSet<>(v)] } } Set getClusterData(String applicationName) { diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineInstanceProvider.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineInstanceProvider.groovy index 19dca7b49ab..21ef34cea4b 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineInstanceProvider.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/AppengineInstanceProvider.groovy @@ -32,7 +32,7 @@ import static com.netflix.spinnaker.clouddriver.appengine.cache.Keys.Namespace.L import static com.netflix.spinnaker.clouddriver.appengine.cache.Keys.Namespace.SERVER_GROUPS @Component -class AppengineInstanceProvider implements InstanceProvider { +class AppengineInstanceProvider implements InstanceProvider { @Autowired Cache cacheView diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/MutableCacheData.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/MutableCacheData.groovy index 8c731a10449..f3a49da44bb 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/MutableCacheData.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/provider/view/MutableCacheData.groovy @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view +package com.netflix.spinnaker.clouddriver.appengine.provider.view import com.fasterxml.jackson.annotation.JsonCreator import com.fasterxml.jackson.annotation.JsonProperty diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentials.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentials.groovy deleted file mode 100644 index 11e8441bd3f..00000000000 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentials.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.appengine.security - -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.http.HttpTransport -import com.google.api.client.json.JsonFactory -import com.google.api.client.json.jackson2.JacksonFactory -import com.google.api.services.appengine.v1.Appengine -import com.netflix.spinnaker.clouddriver.googlecommon.security.GoogleCommonCredentials -import groovy.transform.TupleConstructor - -@TupleConstructor -public class AppengineCredentials extends GoogleCommonCredentials { - final String project - - Appengine getAppengine(String applicationName) { - JsonFactory jsonFactory = JacksonFactory.getDefaultInstance() - HttpTransport httpTransport = buildHttpTransport() - GoogleCredential credential = getCredential(httpTransport, jsonFactory) - - new Appengine.Builder(httpTransport, jsonFactory, credential) - .setApplicationName(applicationName) - .build() - } -} diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentialsInitializer.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentialsInitializer.groovy deleted file mode 100644 index 0efa0aeee99..00000000000 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentialsInitializer.groovy +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.appengine.security - -import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.clouddriver.appengine.AppengineJobExecutor -import com.netflix.spinnaker.clouddriver.appengine.config.AppengineConfigurationProperties -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope - -@Slf4j -@Configuration -class AppengineCredentialsInitializer implements CredentialsInitializerSynchronizable { - @Bean - List appengineNamedAccountCredentials(String clouddriverUserAgentApplicationName, - AppengineConfigurationProperties appengineConfigurationProperties, - AccountCredentialsRepository accountCredentialsRepository, - AppengineJobExecutor jobExecutor) { - synchronizeAppengineAccounts(clouddriverUserAgentApplicationName, - appengineConfigurationProperties, - null, - accountCredentialsRepository, - jobExecutor) - } - - @Override - String getCredentialsSynchronizationBeanName() { - return "synchronizeAppengineAccounts" - } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - List synchronizeAppengineAccounts(String clouddriverUserAgentApplicationName, - AppengineConfigurationProperties appengineConfigurationProperties, - CatsModule catsModule, - AccountCredentialsRepository accountCredentialsRepository, - AppengineJobExecutor jobExecutor) { - def (ArrayList accountsToAdd, List namesOfDeletedAccounts) = - ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, - AppengineNamedAccountCredentials, - appengineConfigurationProperties.accounts) - - accountsToAdd.each { AppengineConfigurationProperties.ManagedAccount managedAccount -> - try { - managedAccount.initialize(jobExecutor) - - def jsonKey = AppengineCredentialsInitializer.getJsonKey(managedAccount) - def appengineAccount = new AppengineNamedAccountCredentials.Builder() - .name(managedAccount.name) - .environment(managedAccount.environment ?: managedAccount.name) - .accountType(managedAccount.accountType ?: managedAccount.name) - .project(managedAccount.project) - .jsonKey(jsonKey) - .applicationName(clouddriverUserAgentApplicationName) - .jsonPath(managedAccount.jsonPath) - .requiredGroupMembership(managedAccount.requiredGroupMembership) - .permissions(managedAccount.permissions.build()) - .serviceAccountEmail(managedAccount.serviceAccountEmail) - .localRepositoryDirectory(managedAccount.localRepositoryDirectory) - .gitHttpsUsername(managedAccount.gitHttpsUsername) - .gitHttpsPassword(managedAccount.gitHttpsPassword) - .githubOAuthAccessToken(managedAccount.githubOAuthAccessToken) - .sshPrivateKeyFilePath(managedAccount.sshPrivateKeyFilePath) - .sshPrivateKeyPassphrase(managedAccount.sshPrivateKeyPassphrase) - .sshKnownHostsFilePath(managedAccount.sshKnownHostsFilePath) - .sshTrustUnknownHosts(managedAccount.sshTrustUnknownHosts) - .gcloudReleaseTrack(managedAccount.gcloudReleaseTrack) - .build() - - accountCredentialsRepository.save(managedAccount.name, appengineAccount) - } catch (e) { - log.info("Could not load account $managedAccount.name for App Engine", e) - } - } - - ProviderUtils.unscheduleAndDeregisterAgents(namesOfDeletedAccounts, catsModule) - - accountCredentialsRepository.all.findAll { - it instanceof AppengineNamedAccountCredentials - } as List - } - - private static String getJsonKey(AppengineConfigurationProperties.ManagedAccount managedAccount) { - def inputStream = managedAccount.inputStream - - inputStream ? new String(inputStream.bytes) : null - } -} diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineJsonCredentials.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineJsonCredentials.groovy deleted file mode 100644 index 1453883f5de..00000000000 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineJsonCredentials.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.appengine.security - -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.http.HttpTransport -import com.google.api.client.json.JsonFactory -import com.google.api.services.appengine.v1.AppengineScopes -import com.netflix.spinnaker.clouddriver.googlecommon.security.GoogleCommonCredentialUtils - -class AppengineJsonCredentials extends AppengineCredentials { - final String jsonKey - - AppengineJsonCredentials(String project, String jsonKey) { - super(project) - this.jsonKey = jsonKey - } - - @Override - GoogleCredential getCredential(HttpTransport httpTransport, JsonFactory jsonFactory) { - GoogleCommonCredentialUtils.getCredentials(httpTransport, jsonFactory, jsonKey, AppengineScopes.CLOUD_PLATFORM); - } -} diff --git a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineNamedAccountCredentials.groovy b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineNamedAccountCredentials.groovy index 766cec4ad68..1ec5e89f97f 100644 --- a/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineNamedAccountCredentials.groovy +++ b/clouddriver-appengine/src/main/groovy/com/netflix/spinnaker/clouddriver/appengine/security/AppengineNamedAccountCredentials.groovy @@ -21,14 +21,17 @@ import com.google.api.services.appengine.v1.Appengine import com.netflix.spinnaker.clouddriver.appengine.AppengineCloudProvider import com.netflix.spinnaker.clouddriver.appengine.gitClient.AppengineGitCredentialType import com.netflix.spinnaker.clouddriver.appengine.gitClient.AppengineGitCredentials +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentials + import com.netflix.spinnaker.fiat.model.resources.Permissions import groovy.transform.TupleConstructor import static com.netflix.spinnaker.clouddriver.appengine.config.AppengineConfigurationProperties.ManagedAccount.GcloudReleaseTrack @TupleConstructor -class AppengineNamedAccountCredentials implements AccountCredentials { +class AppengineNamedAccountCredentials extends AbstractAccountCredentials { + public final static String CREDENTIALS_TYPE = "appengine"; final String name final String environment final String accountType @@ -41,6 +44,8 @@ class AppengineNamedAccountCredentials implements AccountCredentials supportedGitCredentialTypes + final List services + final List versions + final List omitServices + final List omitVersions + + final Long cachingIntervalSeconds + static class Builder { String name String environment @@ -65,6 +77,7 @@ class AppengineNamedAccountCredentials implements AccountCredentials services + List versions + List omitServices + List omitVersions + Long cachingIntervalSeconds /* * If true, the builder will overwrite region with a value from the platform. @@ -129,6 +147,11 @@ class AppengineNamedAccountCredentials implements AccountCredentials serviceNames) { + this.services = serviceNames + return this + } + + Builder versions(List versionNames) { + this.versions = versionNames + return this + } + + Builder omitServices(List serviceNames) { + this.omitServices = serviceNames + return this + } + + Builder omitVersions(List versionNames) { + this.omitVersions = versionNames + return this + } + + Builder cachingIntervalSeconds(Long interval) { + this.cachingIntervalSeconds = interval + return this + } + AppengineNamedAccountCredentials build() { credentials = credentials ?: jsonKey ? @@ -236,6 +284,7 @@ class AppengineNamedAccountCredentials implements AccountCredentials directoryStack = new Stack<>(); + + File baseDirectory = new File(basePath); + baseDirectory.mkdir(); + + TarArchiveInputStream tarStream = new TarArchiveInputStream(inputStream); + for (TarArchiveEntry entry = tarStream.getNextTarEntry(); + entry != null; + entry = tarStream.getNextTarEntry()) { + File target = new File(baseDirectory, entry.getName()); + + String canonicalTargetPath = target.getCanonicalPath(); + String canonicalBaseDirPath = baseDirectory.getCanonicalPath(); + + if (!canonicalTargetPath.startsWith(canonicalBaseDirPath)) { + throw new RuntimeException( + "Entry is outside of the target directory (" + entry.getName() + ")"); + } + + if (entry.isDirectory()) { + directoryStack.push(new DirectoryTimestamp(target, entry.getModTime().getTime())); + continue; + } + writeStreamToFile(tarStream, target); + target.setLastModified(entry.getModTime().getTime()); + } + + while (!directoryStack.empty()) { + DirectoryTimestamp info = directoryStack.pop(); + info.directory.setLastModified(info.millis); + } + tarStream.close(); + } + + public static void writeStreamToFile(InputStream sourceStream, File target) throws IOException { + File parent = target.getParentFile(); + if (!parent.exists()) { + parent.mkdirs(); + } + OutputStream targetStream = new FileOutputStream(target); + IOUtils.copy(sourceStream, targetStream); + targetStream.close(); + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/GcsStorageService.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/GcsStorageService.java index efc384e0938..b8075a69087 100644 --- a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/GcsStorageService.java +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/GcsStorageService.java @@ -16,19 +16,18 @@ package com.netflix.spinnaker.clouddriver.appengine.artifacts; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; +import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.http.HttpTransport; import com.google.api.client.json.JsonFactory; -import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.json.gson.GsonFactory; import com.google.api.services.storage.Storage; import com.google.api.services.storage.StorageScopes; import com.google.api.services.storage.model.Objects; import com.google.api.services.storage.model.StorageObject; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.google.auth.http.HttpCredentialsAdapter; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -39,13 +38,15 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class GcsStorageService { private static final Logger log = LoggerFactory.getLogger(GcsStorageService.class); public static interface VisitorOperation { void visit(StorageObject storageObj) throws IOException; - }; + } public static class Factory { private String applicationName_; @@ -55,7 +56,7 @@ public static class Factory { public Factory(String applicationName) throws IOException, GeneralSecurityException { applicationName_ = applicationName; transport_ = GoogleNetHttpTransport.newTrustedTransport(); - jsonFactory_ = JacksonFactory.getDefaultInstance(); + jsonFactory_ = GsonFactory.getDefaultInstance(); } public Factory(String applicationName, HttpTransport transport, JsonFactory jsonFactory) { @@ -65,29 +66,33 @@ public Factory(String applicationName, HttpTransport transport, JsonFactory json } public GcsStorageService newForCredentials(String credentialsPath) throws IOException { - GoogleCredential credential = loadCredential(credentialsPath); - Storage storage = new Storage.Builder(transport_, jsonFactory_, credential) - .setApplicationName(applicationName_) - .build(); + GoogleCredentials credentials = loadCredentials(credentialsPath); + HttpRequestInitializer requestInitializer = new HttpCredentialsAdapter(credentials); + Storage storage = + new Storage.Builder(transport_, jsonFactory_, requestInitializer) + .setApplicationName(applicationName_) + .build(); return new GcsStorageService(storage); } - private GoogleCredential loadCredential(String credentialsPath) throws IOException { - GoogleCredential credential; - if (!credentialsPath.isEmpty()) { + private GoogleCredentials loadCredentials(String credentialsPath) throws IOException { + GoogleCredentials credentials; + if (credentialsPath != null && !credentialsPath.isEmpty()) { FileInputStream stream = new FileInputStream(credentialsPath); - credential = GoogleCredential.fromStream(stream, transport_, jsonFactory_) - .createScoped(Collections.singleton(StorageScopes.DEVSTORAGE_READ_ONLY)); + credentials = + GoogleCredentials.fromStream(stream) + .createScoped(Collections.singleton(StorageScopes.DEVSTORAGE_READ_ONLY)); log.info("Loaded credentials from " + credentialsPath); } else { - log.info("spinnaker.gcs.enabled without spinnaker.gcs.jsonPath. " + - "Using default application credentials. Using default credentials."); - credential = GoogleCredential.getApplicationDefault(); + log.info( + "spinnaker.gcs.enabled without spinnaker.gcs.jsonPath. " + + "Using default application credentials. Using default credentials."); + credentials = GoogleCredentials.getApplicationDefault(); } - return credential; + return credentials; } - }; + } private Storage storage_; @@ -95,8 +100,12 @@ public GcsStorageService(Storage storage) { storage_ = storage; } - public InputStream openObjectStream(String bucketName, String path) throws IOException { + public InputStream openObjectStream(String bucketName, String path, Long generation) + throws IOException { Storage.Objects.Get get = storage_.objects().get(bucketName, path); + if (generation != null) { + get.setGeneration(generation); + } return get.executeMediaAsInputStream(); } @@ -105,20 +114,26 @@ public void visitObjects(String bucketName, String pathPrefix, VisitorOperation Storage.Objects.List listMethod = storage_.objects().list(bucketName); listMethod.setPrefix(pathPrefix); Objects objects; - ExecutorService executor = Executors.newFixedThreadPool(8); + ExecutorService executor = + Executors.newFixedThreadPool( + 8, + new ThreadFactoryBuilder() + .setNameFormat(GcsStorageService.class.getSimpleName() + "-%d") + .build()); do { objects = listMethod.execute(); List items = objects.getItems(); if (items != null) { for (StorageObject obj : items) { - executor.submit(() -> { - try { - op.visit(obj); - } catch (IOException ioex) { - throw new IllegalStateException(ioex); - } - }); + executor.submit( + () -> { + try { + op.visit(obj); + } catch (IOException ioex) { + throw new IllegalStateException(ioex); + } + }); } } listMethod.setPageToken(objects.getNextPageToken()); @@ -130,7 +145,7 @@ public void visitObjects(String bucketName, String pathPrefix, VisitorOperation throw new IllegalStateException("Timed out waiting to process StorageObjects."); } } catch (InterruptedException intex) { - throw new IllegalStateException(intex); + throw new IllegalStateException(intex); } } @@ -138,22 +153,27 @@ public void visitObjects(String bucketName, VisitorOperation op) throws IOExcept visitObjects(bucketName, "", op); } - public void downloadStorageObjectRelative(StorageObject obj, String ignorePrefix, String baseDirectory) - throws IOException { - InputStream stream = openObjectStream(obj.getBucket(), obj.getName()); + public void downloadStorageObjectRelative( + StorageObject obj, String ignorePrefix, String baseDirectory) throws IOException { String objPath = obj.getName(); if (!ignorePrefix.isEmpty()) { ignorePrefix += File.separator; if (!objPath.startsWith(ignorePrefix)) { - throw new IllegalArgumentException(objPath + " does not start with '" + ignorePrefix + "'"); + throw new IllegalArgumentException(objPath + " does not start with '" + ignorePrefix + "'"); } objPath = objPath.substring(ignorePrefix.length()); } + // Ignore folder placeholder objects created by Google Console UI + if (objPath.endsWith("/")) { + return; + } File target = new File(baseDirectory, objPath); - ArtifactUtils.writeStreamToFile(stream, target); + try (InputStream stream = + openObjectStream(obj.getBucket(), obj.getName(), obj.getGeneration())) { + ArtifactUtils.writeStreamToFile(stream, target); + } target.setLastModified(obj.getUpdated().getValue()); - stream.close(); } public void downloadStorageObject(StorageObject obj, String baseDirectory) throws IOException { diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/config/StorageConfigurationProperties.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/config/StorageConfigurationProperties.java index 4cb8dc31aba..fe8c8322309 100644 --- a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/config/StorageConfigurationProperties.java +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/config/StorageConfigurationProperties.java @@ -17,15 +17,14 @@ package com.netflix.spinnaker.clouddriver.appengine.artifacts.config; import groovy.transform.ToString; +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; import lombok.Data; import org.springframework.boot.context.properties.ConfigurationProperties; import retrofit.client.Response; import retrofit.mime.TypedByteArray; -import java.util.ArrayList; -import java.util.List; -import java.util.NoSuchElementException; - @Data @ConfigurationProperties("artifacts.gcs") public class StorageConfigurationProperties { diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/controllers/AppengineStorageController.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/controllers/AppengineStorageController.java index 9a003839132..a94c2b100f3 100644 --- a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/controllers/AppengineStorageController.java +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/controllers/AppengineStorageController.java @@ -17,17 +17,16 @@ package com.netflix.spinnaker.clouddriver.appengine.artifacts.controllers; import com.netflix.spinnaker.clouddriver.appengine.artifacts.config.StorageConfigurationProperties; - import groovy.util.logging.Slf4j; +import java.util.ArrayList; +import java.util.List; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RestController; -import java.util.ArrayList; -import java.util.List; - -// TODO(jacobkiefer): Refactor this Controller into a common controller with injected StorageService(s) when we +// TODO(jacobkiefer): Refactor this Controller into a common controller with injected +// StorageService(s) when we // add another storage account service. Leaving this in Appengine's scope for now. @Slf4j @RestController @@ -40,11 +39,11 @@ class AppengineStorageController { @RequestMapping(method = RequestMethod.GET) List list() { if (storageAccountInfo == null) { - return new ArrayList(); + return new ArrayList<>(); } List results = new ArrayList(storageAccountInfo.getAccounts().size()); for (StorageConfigurationProperties.ManagedAccount account : storageAccountInfo.getAccounts()) { - results.add(account.getName()); + results.add(account.getName()); } return results; } diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/config/AppengineConfigurationProperties.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/config/AppengineConfigurationProperties.java new file mode 100644 index 00000000000..899e97b84dd --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/config/AppengineConfigurationProperties.java @@ -0,0 +1,133 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.appengine.config; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.jakewharton.retrofit.Ok3Client; +import com.netflix.spinnaker.clouddriver.appengine.AppengineJobExecutor; +import com.netflix.spinnaker.clouddriver.googlecommon.config.GoogleCommonManagedAccount; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler; +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; +import okhttp3.OkHttpClient; +import org.springframework.util.StringUtils; +import retrofit.RestAdapter; +import retrofit.client.Response; +import retrofit.converter.JacksonConverter; +import retrofit.http.GET; +import retrofit.http.Headers; +import retrofit.mime.TypedByteArray; + +@Data +public class AppengineConfigurationProperties { + private List accounts = new ArrayList<>(); + private String gcloudPath; + + @Data + @EqualsAndHashCode(callSuper = true) + public static class ManagedAccount extends GoogleCommonManagedAccount { + public static final String metadataUrl = "http://metadata.google.internal/computeMetadata/v1"; + + private String serviceAccountEmail; + @EqualsAndHashCode.Exclude private String computedServiceAccountEmail; + private String localRepositoryDirectory = "/var/tmp/clouddriver"; + private String gitHttpsUsername; + private String gitHttpsPassword; + private String githubOAuthAccessToken; + private String sshPrivateKeyFilePath; + private String sshPrivateKeyPassphrase; + private String sshKnownHostsFilePath; + private boolean sshTrustUnknownHosts; + private GcloudReleaseTrack gcloudReleaseTrack; + private List services; + private List versions; + private List omitServices; + private List omitVersions; + private Long cachingIntervalSeconds; + + public void initialize(AppengineJobExecutor jobExecutor, String gcloudPath) { + if (!StringUtils.isEmpty(getJsonPath())) { + jobExecutor.runCommand( + List.of(gcloudPath, "auth", "activate-service-account", "--key-file", getJsonPath())); + ObjectMapper mapper = new ObjectMapper(); + try { + JsonNode node = mapper.readTree(new File(getJsonPath())); + if (StringUtils.isEmpty(getProject())) { + setProject(node.get("project_id").asText()); + } + if (StringUtils.isEmpty(serviceAccountEmail)) { + this.computedServiceAccountEmail = node.get("client_email").asText(); + } else { + this.computedServiceAccountEmail = serviceAccountEmail; + } + + } catch (Exception e) { + throw new RuntimeException("Could not find read JSON configuration file.", e); + } + } else { + MetadataService metadataService = createMetadataService(); + + try { + if (StringUtils.isEmpty(getProject())) { + setProject(responseToString(metadataService.getProject())); + } + this.computedServiceAccountEmail = + responseToString(metadataService.getApplicationDefaultServiceAccountEmail()); + } catch (Exception e) { + throw new RuntimeException( + "Could not find application default credentials for App Engine.", e); + } + } + } + + static MetadataService createMetadataService() { + OkHttpClient okHttpClient = new OkHttpClient.Builder().retryOnConnectionFailure(true).build(); + RestAdapter restAdapter = + new RestAdapter.Builder() + .setEndpoint(metadataUrl) + .setConverter(new JacksonConverter()) + .setClient(new Ok3Client(okHttpClient)) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) + .build(); + return restAdapter.create(MetadataService.class); + } + + interface MetadataService { + @Headers("Metadata-Flavor: Google") + @GET("/project/project-id") + Response getProject(); + + @Headers("Metadata-Flavor: Google") + @GET("/instance/service-accounts/default/email") + Response getApplicationDefaultServiceAccountEmail(); + } + + static String responseToString(Response response) { + return new String(((TypedByteArray) response.getBody()).getBytes()); + } + + public enum GcloudReleaseTrack { + ALPHA, + BETA, + STABLE, + } + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/config/AppengineCredentialsConfiguration.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/config/AppengineCredentialsConfiguration.java new file mode 100644 index 00000000000..d3ccaf99632 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/config/AppengineCredentialsConfiguration.java @@ -0,0 +1,134 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.appengine.config; + +import com.netflix.spinnaker.clouddriver.appengine.AppengineCloudProvider; +import com.netflix.spinnaker.clouddriver.appengine.AppengineJobExecutor; +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsTypeBaseConfiguration; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.AbstractCredentialsLoader; +import com.netflix.spinnaker.credentials.poller.Poller; +import com.netflix.spinnaker.kork.configserver.ConfigFileService; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class AppengineCredentialsConfiguration { + private static final Logger log = + LoggerFactory.getLogger(AppengineCredentialsConfiguration.class); + + @Bean + @ConditionalOnMissingBean( + value = AppengineNamedAccountCredentials.class, + parameterizedContainer = CredentialsRepository.class) + public CredentialsRepository appengineCredentialsRepository( + CredentialsLifecycleHandler eventHandler) { + return new MapBackedCredentialsRepository<>(AppengineCloudProvider.getID(), eventHandler); + } + + @Bean + public CredentialsTypeBaseConfiguration< + AppengineNamedAccountCredentials, AppengineConfigurationProperties.ManagedAccount> + appengineCredentialsProperties( + ApplicationContext applicationContext, + AppengineConfigurationProperties configurationProperties, + AppengineJobExecutor jobExecutor, + ConfigFileService configFileService, + String clouddriverUserAgentApplicationName) { + return new CredentialsTypeBaseConfiguration( + applicationContext, + CredentialsTypeProperties + . + builder() + .type(AppengineNamedAccountCredentials.CREDENTIALS_TYPE) + .credentialsDefinitionClass(AppengineConfigurationProperties.ManagedAccount.class) + .credentialsClass(AppengineNamedAccountCredentials.class) + .credentialsParser( + a -> { + try { + String gcloudPath = configurationProperties.getGcloudPath(); + if (StringUtils.isEmpty(gcloudPath)) { + gcloudPath = "gcloud"; + } + a.initialize(jobExecutor, gcloudPath); + + String jsonKey = configFileService.getContents(a.getJsonPath()); + return new AppengineNamedAccountCredentials.Builder() + .name(a.getName()) + .environment( + StringUtils.isEmpty(a.getEnvironment()) + ? a.getName() + : a.getEnvironment()) + .accountType( + StringUtils.isEmpty(a.getAccountType()) + ? a.getName() + : a.getAccountType()) + .project(a.getProject()) + .jsonKey(jsonKey) + .applicationName(clouddriverUserAgentApplicationName) + .gcloudPath(gcloudPath) + .jsonPath(a.getJsonPath()) + .requiredGroupMembership(a.getRequiredGroupMembership()) + .permissions(a.getPermissions().build()) + .serviceAccountEmail(a.getComputedServiceAccountEmail()) + .localRepositoryDirectory(a.getLocalRepositoryDirectory()) + .gitHttpsUsername(a.getGitHttpsUsername()) + .gitHttpsPassword(a.getGitHttpsPassword()) + .githubOAuthAccessToken(a.getGithubOAuthAccessToken()) + .sshPrivateKeyFilePath(a.getSshPrivateKeyFilePath()) + .sshPrivateKeyPassphrase(a.getSshPrivateKeyPassphrase()) + .sshKnownHostsFilePath(a.getSshKnownHostsFilePath()) + .sshTrustUnknownHosts(a.isSshTrustUnknownHosts()) + .gcloudReleaseTrack(a.getGcloudReleaseTrack()) + .services(a.getServices()) + .versions(a.getVersions()) + .omitServices(a.getOmitServices()) + .omitVersions(a.getOmitVersions()) + .cachingIntervalSeconds(a.getCachingIntervalSeconds()) + .build(); + } catch (Exception e) { + log.info( + String.format("Could not load account %s for App Engine", a.getName()), e); + return null; + } + }) + .defaultCredentialsSource(configurationProperties::getAccounts) + .build()); + } + + @Bean + public CredentialsInitializerSynchronizable appengineCredentialsInitializerSynchronizable( + AbstractCredentialsLoader loader) { + final Poller poller = new Poller<>(loader); + return new CredentialsInitializerSynchronizable() { + @Override + public void synchronize() { + poller.run(); + } + }; + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineMutexRepository.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineMutexRepository.java new file mode 100644 index 00000000000..a483b732997 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineMutexRepository.java @@ -0,0 +1,37 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.appengine.deploy; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; + +public class AppengineMutexRepository { + private static final ConcurrentHashMap mutexRepository = new ConcurrentHashMap<>(); + + public static T atomicWrapper(String mutexKey, Supplier doOperation) + throws InterruptedException { + final Lock lock = mutexRepository.computeIfAbsent(mutexKey, k -> new ReentrantLock()); + + lock.lockInterruptibly(); + try { + return doOperation.get(); + } finally { + lock.unlock(); + } + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineSafeRetry.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineSafeRetry.java new file mode 100644 index 00000000000..14cbc6cab98 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/AppengineSafeRetry.java @@ -0,0 +1,85 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.appengine.deploy; + +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.appengine.deploy.exception.AppengineOperationException; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleApiException; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleCommonSafeRetry; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import groovy.lang.Closure; +import java.util.List; +import java.util.Map; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNullableByDefault; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +public final class AppengineSafeRetry { + private final GoogleCommonSafeRetry googleCommonSafeRetry; + + @Autowired + @ParametersAreNullableByDefault + public AppengineSafeRetry( + @Value("${appengine.safe-retry-max-wait-interval-ms:60000}") Integer maxWaitInterval, + @Value("${appengine.safe-retry-retry-interval-base-sec:2}") Integer retryIntervalBase, + @Value("${appengine.safe-retry-jitter-multiplier:1000}") Integer jitterMultiplier, + @Value("${appengine.safe-retry-max-retries:10}") Integer maxRetries) { + googleCommonSafeRetry = + new GoogleCommonSafeRetry(maxWaitInterval, retryIntervalBase, jitterMultiplier, maxRetries); + } + + private AppengineSafeRetry(GoogleCommonSafeRetry googleCommonSafeRetry) { + this.googleCommonSafeRetry = googleCommonSafeRetry; + } + + /** + * Returns an instance of this class that never waits between retries, suitable for testing. + * + * @return An instance of {@link AppengineSafeRetry} + */ + public static AppengineSafeRetry withoutDelay() { + return new AppengineSafeRetry(GoogleCommonSafeRetry.withoutDelay()); + } + + @Nullable + public V doRetry( + Closure operation, + String resource, + @Nullable Task task, + List retryCodes, + Map tags, + Registry registry) { + String action = tags.get("action"); + String description = String.format("%s of %s", action, resource); + if (task != null) { + task.updateStatus(tags.get("phase"), String.format("Attempting %s...", description)); + } + + try { + return googleCommonSafeRetry.doRetry( + operation, description, retryCodes, ImmutableList.of(), tags, registry); + } catch (GoogleApiException e) { + throw new AppengineOperationException("Failed to " + description, e); + } + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineConfigAtomicOperationConverter.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineConfigAtomicOperationConverter.java new file mode 100644 index 00000000000..5721244d383 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineConfigAtomicOperationConverter.java @@ -0,0 +1,50 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.appengine.deploy.converters; + +import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation; +import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineConfigDescription; +import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DeployAppengineConfigAtomicOperation; +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import groovy.util.logging.Slf4j; +import java.util.Map; +import org.springframework.stereotype.Component; + +@AppengineOperation(AtomicOperations.DEPLOY_APPENGINE_CONFIG) +@Component +@Slf4j +public class DeployAppengineConfigAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeployAppengineConfigAtomicOperation(convertDescription(input)); + } + + @Override + public DeployAppengineConfigDescription convertDescription(Map input) { + DeployAppengineConfigDescription description = + (DeployAppengineConfigDescription) + AppengineAtomicOperationConverterHelper.convertDescription( + input, this, DeployAppengineConfigDescription.class); + return description; + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/description/DeployAppengineConfigDescription.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/description/DeployAppengineConfigDescription.java new file mode 100644 index 00000000000..7824dc3176e --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/description/DeployAppengineConfigDescription.java @@ -0,0 +1,32 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.appengine.deploy.description; + +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeployAppengineConfigDescription extends AbstractAppengineCredentialsDescription { + private String accountName; + private Artifact cronArtifact; + private Artifact dispatchArtifact; + private Artifact indexArtifact; + private Artifact queueArtifact; +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/exception/AppengineOperationException.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/exception/AppengineOperationException.java new file mode 100644 index 00000000000..2f4d31ca1f6 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/exception/AppengineOperationException.java @@ -0,0 +1,26 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.appengine.deploy.exception; + +public class AppengineOperationException extends RuntimeException { + public AppengineOperationException(String message) { + super(message); + } + + public AppengineOperationException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineConfigAtomicOperation.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineConfigAtomicOperation.java new file mode 100644 index 00000000000..0db521e8e7b --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineConfigAtomicOperation.java @@ -0,0 +1,179 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.appengine.deploy.ops; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.appengine.AppengineJobExecutor; +import com.netflix.spinnaker.clouddriver.appengine.config.AppengineConfigurationProperties; +import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineConfigDescription; +import com.netflix.spinnaker.clouddriver.appengine.deploy.exception.AppengineOperationException; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.apache.commons.compress.utils.IOUtils; +import org.apache.commons.io.FileUtils; +import org.springframework.beans.factory.annotation.Autowired; + +public class DeployAppengineConfigAtomicOperation implements AtomicOperation { + + private static final String BASE_PHASE = "DEPLOY_APPENGINE_CONFIG"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private DeployAppengineConfigDescription description; + + @Autowired private ArtifactDownloader artifactDownloader; + + @Autowired private Registry registry; + + @Autowired private AppengineJobExecutor jobExecutor; + + public DeployAppengineConfigAtomicOperation(DeployAppengineConfigDescription description) { + this.description = description; + } + + @Override + public Void operate(List priorOutputs) { + String serviceAccount = description.getCredentials().getServiceAccountEmail(); + String region = description.getCredentials().getRegion(); + + registry + .counter( + registry.createId( + "appengine.deployConfigStart", "account", serviceAccount, "region", region)) + .increment(); + long startTime = registry.clock().monotonicTime(); + + AppengineConfigurationProperties.ManagedAccount.GcloudReleaseTrack gCloudReleaseTrack = + description.getCredentials().getGcloudReleaseTrack(); + List deployCommand = new ArrayList<>(); + deployCommand.add(description.getCredentials().getGcloudPath()); + if (gCloudReleaseTrack != null + && gCloudReleaseTrack + != AppengineConfigurationProperties.ManagedAccount.GcloudReleaseTrack.STABLE) { + deployCommand.add(gCloudReleaseTrack.toString().toLowerCase()); + } + deployCommand.add("app"); + deployCommand.add("deploy"); + + Path directory = createEmptyDirectory(); + String success = "false"; + + try { + if (description.getCronArtifact() != null) { + getTask().updateStatus(BASE_PHASE, "Downloading cron configuration..."); + File cronFile = + downloadFileToDirectory( + description.getCronArtifact(), directory, SupportedConfigTypes.CRON); + deployCommand.add(cronFile.getPath()); + } + + if (description.getDispatchArtifact() != null) { + getTask().updateStatus(BASE_PHASE, "Downloading dispatch configuration..."); + File dispatchFile = + downloadFileToDirectory( + description.getDispatchArtifact(), directory, SupportedConfigTypes.DISPATCH); + deployCommand.add(dispatchFile.getPath()); + } + + if (description.getIndexArtifact() != null) { + getTask().updateStatus(BASE_PHASE, "Downloading index configuration..."); + File indexFile = + downloadFileToDirectory( + description.getIndexArtifact(), directory, SupportedConfigTypes.INDEX); + deployCommand.add(indexFile.getPath()); + } + + if (description.getQueueArtifact() != null) { + getTask().updateStatus(BASE_PHASE, "Downloading queue configuration..."); + File queueFile = + downloadFileToDirectory( + description.getQueueArtifact(), directory, SupportedConfigTypes.QUEUE); + deployCommand.add(queueFile.getPath()); + } + + deployCommand.add("--project=" + description.getCredentials().getProject()); + deployCommand.add("--account=" + description.getCredentials().getServiceAccountEmail()); + getTask().updateStatus(BASE_PHASE, "Deploying configuration..."); + jobExecutor.runCommand(deployCommand); + success = "true"; + getTask().updateStatus(BASE_PHASE, "Done deploying configuration"); + } catch (Exception e) { + throw new AppengineOperationException( + "Failed to deploy to App Engine with command: " + deployCommand); + } finally { + try { + long duration = registry.clock().monotonicTime() - startTime; + registry + .timer( + registry.createId( + "appengine.deployConfig", "account", serviceAccount, "success", success)) + .record(duration, TimeUnit.NANOSECONDS); + FileUtils.cleanDirectory(directory.toFile()); + FileUtils.forceDelete(directory.toFile()); + } catch (Exception e) { + throw new AppengineOperationException( + "Failed to clean up and delete directory: " + directory); + } + } + return null; + } + + Path createEmptyDirectory() { + Path path; + try { + path = Files.createTempDirectory("appengineconfig-"); + FileUtils.cleanDirectory(path.toFile()); + } catch (IOException ex) { + throw new AppengineOperationException("Failed to create directory"); + } + return path; + } + + File downloadFileToDirectory(Artifact artifact, Path directory, SupportedConfigTypes type) { + File targetFile; + try { + InputStream inStream = artifactDownloader.download(artifact); + targetFile = new File(directory + "/" + type.toString().toLowerCase() + ".yaml"); + FileUtils.copyInputStreamToFile(inStream, targetFile); + IOUtils.closeQuietly(inStream); + } catch (IOException e) { + throw new AppengineOperationException("Failed to download cron configuration"); + } + return targetFile; + } + + enum SupportedConfigTypes { + CRON, + QUEUE, + DISPATCH, + INDEX + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineConfigDescriptionValidator.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineConfigDescriptionValidator.java new file mode 100644 index 00000000000..2c999eb7e06 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineConfigDescriptionValidator.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.appengine.deploy.validators; + +import com.netflix.spinnaker.clouddriver.appengine.AppengineOperation; +import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineConfigDescription; +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@AppengineOperation(AtomicOperations.DEPLOY_APPENGINE_CONFIG) +@Component("deployAppengineConfigDescriptionValidator") +public class DeployAppengineConfigDescriptionValidator + extends DescriptionValidator { + + @Autowired private CredentialsRepository credentialsRepository; + + @Override + public void validate( + List priorDescriptions, + DeployAppengineConfigDescription description, + ValidationErrors errors) { + StandardAppengineAttributeValidator helper = + new StandardAppengineAttributeValidator( + "deployAppengineConfigAtomicOperationDescription", errors); + helper.validateCredentials(description.getAccountName(), credentialsRepository); + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentials.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentials.java new file mode 100644 index 00000000000..accc70eee11 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentials.java @@ -0,0 +1,46 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.appengine.security; + +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.HttpTransport; +import com.google.api.client.json.JsonFactory; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.services.appengine.v1.Appengine; +import com.google.auth.http.HttpCredentialsAdapter; +import com.google.auth.oauth2.GoogleCredentials; +import com.netflix.spinnaker.clouddriver.googlecommon.security.GoogleCommonCredentials; + +public class AppengineCredentials extends GoogleCommonCredentials { + + private final String project; + + public AppengineCredentials(String project) { + this.project = project; + } + + public Appengine getAppengine(String applicationName) { + HttpTransport httpTransport = buildHttpTransport(); + JsonFactory jsonFactory = GsonFactory.getDefaultInstance(); + GoogleCredentials credentials = getCredentials(); + HttpRequestInitializer requestInitializer = new HttpCredentialsAdapter(credentials); + + return new Appengine.Builder(httpTransport, jsonFactory, requestInitializer) + .setApplicationName(applicationName) + .build(); + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentialsLifecycleHandler.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentialsLifecycleHandler.java new file mode 100644 index 00000000000..9123debf487 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineCredentialsLifecycleHandler.java @@ -0,0 +1,65 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.appengine.security; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.appengine.provider.AppengineProvider; +import com.netflix.spinnaker.clouddriver.appengine.provider.agent.AppengineLoadBalancerCachingAgent; +import com.netflix.spinnaker.clouddriver.appengine.provider.agent.AppenginePlatformApplicationCachingAgent; +import com.netflix.spinnaker.clouddriver.appengine.provider.agent.AppengineServerGroupCachingAgent; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import java.util.Collections; +import java.util.List; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; + +@Component +@RequiredArgsConstructor +public class AppengineCredentialsLifecycleHandler + implements CredentialsLifecycleHandler { + private final AppengineProvider appengineProvider; + private final ObjectMapper objectMapper; + private final Registry registry; + + @Override + public void credentialsAdded(AppengineNamedAccountCredentials credentials) { + addAgentFor(credentials); + } + + @Override + public void credentialsUpdated(AppengineNamedAccountCredentials credentials) { + appengineProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + addAgentFor(credentials); + } + + @Override + public void credentialsDeleted(AppengineNamedAccountCredentials credentials) { + appengineProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + } + + private void addAgentFor(AppengineNamedAccountCredentials credentials) { + appengineProvider.addAgents( + List.of( + new AppengineServerGroupCachingAgent( + credentials.getName(), credentials, objectMapper, registry), + new AppengineLoadBalancerCachingAgent( + credentials.getName(), credentials, objectMapper, registry), + new AppenginePlatformApplicationCachingAgent( + credentials.getName(), credentials, objectMapper))); + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineJsonCredentials.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineJsonCredentials.java new file mode 100644 index 00000000000..2fc6c9ba787 --- /dev/null +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/clouddriver/appengine/security/AppengineJsonCredentials.java @@ -0,0 +1,40 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.appengine.security; + +import com.google.api.services.appengine.v1.AppengineScopes; +import com.google.auth.oauth2.GoogleCredentials; +import com.netflix.spinnaker.clouddriver.googlecommon.security.GoogleCommonCredentialUtils; + +public class AppengineJsonCredentials extends AppengineCredentials { + + private final String jsonKey; + + public AppengineJsonCredentials(String project, String jsonKey) { + super(project); + this.jsonKey = jsonKey; + } + + @Override + public GoogleCredentials getCredentials() { + return GoogleCommonCredentialUtils.getCredentials(jsonKey, AppengineScopes.CLOUD_PLATFORM); + } + + public final String getJsonKey() { + return jsonKey; + } +} diff --git a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/config/AppengineStorageConfiguration.java b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/config/AppengineStorageConfiguration.java index ada22b4d485..5da48c202ff 100644 --- a/clouddriver-appengine/src/main/java/com/netflix/spinnaker/config/AppengineStorageConfiguration.java +++ b/clouddriver-appengine/src/main/java/com/netflix/spinnaker/config/AppengineStorageConfiguration.java @@ -18,6 +18,8 @@ import com.netflix.spinnaker.clouddriver.appengine.artifacts.GcsStorageService.Factory; import com.netflix.spinnaker.clouddriver.appengine.artifacts.config.StorageConfigurationProperties; +import java.io.IOException; +import java.security.GeneralSecurityException; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; @@ -25,17 +27,12 @@ import org.springframework.context.annotation.Configuration; import org.springframework.scheduling.annotation.EnableScheduling; -import java.io.IOException; -import java.security.GeneralSecurityException; - - @Configuration @ConditionalOnProperty("artifacts.gcs.enabled") @EnableConfigurationProperties(StorageConfigurationProperties.class) @EnableScheduling public class AppengineStorageConfiguration { - @Autowired - StorageConfigurationProperties storageAccountInfo; + @Autowired StorageConfigurationProperties storageAccountInfo; @Bean Factory storageServiceFactory(String clouddriverUserAgentApplicationName) { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeleteAppengineLoadBalancerAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeleteAppengineLoadBalancerAtomicOperationConverterSpec.groovy index 2f39bd2f340..ae97b98b1fd 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeleteAppengineLoadBalancerAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeleteAppengineLoadBalancerAtomicOperationConverterSpec.groovy @@ -16,11 +16,10 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeleteAppengineLoadBalancerDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DeleteAppengineLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -28,18 +27,15 @@ class DeleteAppengineLoadBalancerAtomicOperationConverterSpec extends Specificat private static final ACCOUNT_NAME = "my-appengine-account" private static final LOAD_BALANCER_NAME = "mobile" - @Shared - ObjectMapper mapper = new ObjectMapper() - @Shared DeleteAppengineLoadBalancerAtomicOperationConverter converter def setupSpec() { - converter = new DeleteAppengineLoadBalancerAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter = new DeleteAppengineLoadBalancerAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "deleteAppengineLoadBalancerDescription type returns DeleteAppengineLoadBalancerDescription and DeleteAppengineLoadBalancerAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineAtomicOperationConverterSpec.groovy index 197613b9730..c421801a77f 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineAtomicOperationConverterSpec.groovy @@ -20,8 +20,8 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DeployAppengineAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.appengine.deploy.exception.AppengineDescriptionConversionException +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -40,10 +40,10 @@ class DeployAppengineAtomicOperationConverterSpec extends Specification { def setupSpec() { this.converter = new DeployAppengineAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "deployAppengineDescription type returns DeployAppengineDescription and DeployAppengineAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DestroyAppengineAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DestroyAppengineAtomicOperationConverterSpec.groovy index c8bf368d775..bbe6b8ee911 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DestroyAppengineAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DestroyAppengineAtomicOperationConverterSpec.groovy @@ -16,11 +16,10 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DestroyAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DestroyAppengineAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -28,18 +27,15 @@ class DestroyAppengineAtomicOperationConverterSpec extends Specification { private static final ACCOUNT_NAME = "my-appengine-account" private static final SERVER_GROUP_NAME = 'app-stack-detail-v000' - @Shared - ObjectMapper mapper = new ObjectMapper() - @Shared DestroyAppengineAtomicOperationConverter converter def setupSpec() { - converter = new DestroyAppengineAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter = new DestroyAppengineAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "destroyAppengineDescription type returns DestroyAppengineDescription and DestroyAppengineAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DisableAppengineAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DisableAppengineAtomicOperationConverterSpec.groovy index 1bd05bbd1bd..c294b69c0e0 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DisableAppengineAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DisableAppengineAtomicOperationConverterSpec.groovy @@ -16,11 +16,10 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.EnableDisableAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.DisableAppengineAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -28,18 +27,15 @@ class DisableAppengineAtomicOperationConverterSpec extends Specification { private static final ACCOUNT_NAME = "my-appengine-account" private static final SERVER_GROUP_NAME = 'app-stack-detail-v000' - @Shared - ObjectMapper mapper = new ObjectMapper() - @Shared DisableAppengineAtomicOperationConverter converter def setupSpec() { - converter = new DisableAppengineAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter = new DisableAppengineAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "disableAppengineDescription type returns EnableDisableAppengineDescription and DisableAppengineAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/EnableAppengineAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/EnableAppengineAtomicOperationConverterSpec.groovy index 00c9d3a9ec8..4155ff83a5a 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/EnableAppengineAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/EnableAppengineAtomicOperationConverterSpec.groovy @@ -16,11 +16,10 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.EnableDisableAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.EnableAppengineAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -28,18 +27,15 @@ class EnableAppengineAtomicOperationConverterSpec extends Specification { private static final ACCOUNT_NAME = "my-appengine-account" private static final SERVER_GROUP_NAME = 'app-stack-detail-v000' - @Shared - ObjectMapper mapper = new ObjectMapper() - @Shared EnableAppengineAtomicOperationConverter converter def setupSpec() { - converter = new EnableAppengineAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter = new EnableAppengineAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "enableAppengineDescription type returns EnableDisableAppengineDescription and EnableAppengineAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StartAppengineAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StartAppengineAtomicOperationConverterSpec.groovy index 88816384496..295abf7a7ff 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StartAppengineAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/StartAppengineAtomicOperationConverterSpec.groovy @@ -16,11 +16,10 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.StartStopAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.StartAppengineAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -28,18 +27,15 @@ class StartAppengineAtomicOperationConverterSpec extends Specification { private static final ACCOUNT_NAME = "my-appengine-account" private static final SERVER_GROUP_NAME = 'app-stack-detail-v000' - @Shared - ObjectMapper mapper = new ObjectMapper() - @Shared StartAppengineAtomicOperationConverter converter def setupSpec() { - converter = new StartAppengineAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter = new StartAppengineAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "startAppengineDescription type returns StartStopAppengineDescription and StartAppengineAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/TerminateAppengineInstancesAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/TerminateAppengineInstancesAtomicOperationConverterSpec.groovy index ac6401dbca4..0aae449a514 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/TerminateAppengineInstancesAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/TerminateAppengineInstancesAtomicOperationConverterSpec.groovy @@ -16,11 +16,10 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.TerminateAppengineInstancesDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.TerminateAppengineInstancesAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -28,18 +27,15 @@ class TerminateAppengineInstancesAtomicOperationConverterSpec extends Specificat private static final ACCOUNT_NAME = "my-appengine-account" private static final INSTANCE_IDS = ["instance-1", "instance-2"] - @Shared - ObjectMapper mapper = new ObjectMapper() - @Shared TerminateAppengineInstancesAtomicOperationConverter converter def setupSpec() { - converter = new TerminateAppengineInstancesAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter = new TerminateAppengineInstancesAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "terminateAppengineInstancesDescription type returns TerminateAppengineInstancesDescription and TerminateAppengineInstancesAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineAutoscalingPolicyAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineAutoscalingPolicyAtomicOperationConverterSpec.groovy index deb598030bf..b2d06bd3237 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineAutoscalingPolicyAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineAutoscalingPolicyAtomicOperationConverterSpec.groovy @@ -16,11 +16,10 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.UpsertAppengineAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.UpsertAppengineAutoscalingPolicyAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -30,18 +29,15 @@ class UpsertAppengineAutoscalingPolicyAtomicOperationConverterSpec extends Speci private static final MIN_IDLE_INSTANCES = 10 private static final MAX_IDLE_INSTANCES = 20 - @Shared - ObjectMapper mapper = new ObjectMapper() - @Shared UpsertAppengineAutoscalingPolicyAtomicOperationConverter converter def setupSpec() { - converter = new UpsertAppengineAutoscalingPolicyAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter = new UpsertAppengineAutoscalingPolicyAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "upsertAppengineAutoscalingPolicyDescription type returns UpsertAppengineAutoscalingPolicyDescription and UpsertAppengineAutoscalingPolicyAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineLoadBalancerAtomicOperationConverterSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineLoadBalancerAtomicOperationConverterSpec.groovy index 3d299d0fb06..05667daeae2 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineLoadBalancerAtomicOperationConverterSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/UpsertAppengineLoadBalancerAtomicOperationConverterSpec.groovy @@ -16,13 +16,12 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.converters -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.appengine.deploy.description.UpsertAppengineLoadBalancerDescription import com.netflix.spinnaker.clouddriver.appengine.deploy.ops.UpsertAppengineLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.appengine.model.AppengineTrafficSplit import com.netflix.spinnaker.clouddriver.appengine.model.ShardBy import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -41,18 +40,15 @@ class UpsertAppengineLoadBalancerAtomicOperationConverterSpec extends Specificat ] private static final MIGRATE_TRAFFIC = false - @Shared - ObjectMapper mapper = new ObjectMapper() - @Shared UpsertAppengineLoadBalancerAtomicOperationConverter converter def setupSpec() { - converter = new UpsertAppengineLoadBalancerAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter = new UpsertAppengineLoadBalancerAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(AppengineNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "upsertAppengineLoadBalancerDescription type returns UpsertAppengineLoadBalancerDescription and UpsertAppengineLoadBalancerAtomicOperation"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DestroyAppengineAtomicOperationSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DestroyAppengineAtomicOperationSpec.groovy index 6f2cf9ad925..cce48616dd4 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DestroyAppengineAtomicOperationSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DestroyAppengineAtomicOperationSpec.groovy @@ -43,7 +43,7 @@ class DestroyAppengineAtomicOperationSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new AppengineSafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = AppengineSafeRetry.withoutDelay() } void "can delete an Appengine server group"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DisableAppengineAtomicOperationSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DisableAppengineAtomicOperationSpec.groovy index 6a890e57049..e2de5165b61 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DisableAppengineAtomicOperationSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DisableAppengineAtomicOperationSpec.groovy @@ -50,7 +50,7 @@ class DisableAppengineAtomicOperationSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new AppengineSafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = AppengineSafeRetry.withoutDelay() } @Unroll diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/EnableAppengineAtomicOperationSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/EnableAppengineAtomicOperationSpec.groovy index a6f1c935cb8..da2f991fe87 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/EnableAppengineAtomicOperationSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/EnableAppengineAtomicOperationSpec.groovy @@ -48,7 +48,7 @@ class EnableAppengineAtomicOperationSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new AppengineSafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = AppengineSafeRetry.withoutDelay() } void "enable operation should set a server group's allocation to 1"() { diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/StartStopAppengineAtomicOperationSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/StartStopAppengineAtomicOperationSpec.groovy index 8f8bca8876c..70ae2b6e7cc 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/StartStopAppengineAtomicOperationSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/StartStopAppengineAtomicOperationSpec.groovy @@ -74,7 +74,7 @@ class StartStopAppengineAtomicOperationSpec extends Specification { new StopAppengineAtomicOperation(description) operation.appengineClusterProvider = clusterProviderMock operation.registry = new DefaultRegistry() - operation.safeRetry = new AppengineSafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + operation.safeRetry = AppengineSafeRetry.withoutDelay() when: operation.operate([]) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineAutoscalingPolicyAtomicOperationSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineAutoscalingPolicyAtomicOperationSpec.groovy index ebb8417c1c4..bc7a097ce7c 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineAutoscalingPolicyAtomicOperationSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineAutoscalingPolicyAtomicOperationSpec.groovy @@ -81,7 +81,7 @@ class UpsertAppengineAutoscalingPolicyAtomicOperationSpec extends Specification @Subject def operation = new UpsertAppengineAutoscalingPolicyAtomicOperation(description) operation.appengineClusterProvider = clusterProviderMock operation.registry = new DefaultRegistry() - operation.safeRetry = new AppengineSafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + operation.safeRetry = AppengineSafeRetry.withoutDelay() when: operation.operate([]) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineLoadBalancerAtomicOperationSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineLoadBalancerAtomicOperationSpec.groovy index 5427c779d47..1f7111052a9 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineLoadBalancerAtomicOperationSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/UpsertAppengineLoadBalancerAtomicOperationSpec.groovy @@ -54,7 +54,7 @@ class UpsertAppengineLoadBalancerAtomicOperationSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new AppengineSafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = AppengineSafeRetry.withoutDelay() } @Unroll diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeleteAppengineLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeleteAppengineLoadBalancerDescriptionValidatorSpec.groovy index 7a93b73aade..3c1d3cf9e6f 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeleteAppengineLoadBalancerDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeleteAppengineLoadBalancerDescriptionValidatorSpec.groovy @@ -19,9 +19,9 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeleteAppengineLoadBalancerDescription import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -37,7 +37,8 @@ class DeleteAppengineLoadBalancerDescriptionValidatorSpec extends Specification void setupSpec() { validator = new DeleteAppengineLoadBalancerDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsRepo = new MapBackedCredentialsRepository(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def mockCredentials = Mock(AppengineCredentials) def namedAccountCredentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -45,15 +46,15 @@ class DeleteAppengineLoadBalancerDescriptionValidatorSpec extends Specification .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, namedAccountCredentials) + credentialsRepo.save(namedAccountCredentials) - validator.accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { setup: def description = new DeleteAppengineLoadBalancerDescription(accountName: ACCOUNT_NAME, loadBalancerName: LOAD_BALANCER_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -65,7 +66,7 @@ class DeleteAppengineLoadBalancerDescriptionValidatorSpec extends Specification void "description with loadBalancerName == \"default\" fails validation"() { setup: def description = new DeleteAppengineLoadBalancerDescription(accountName: ACCOUNT_NAME, loadBalancerName: "default") - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -78,7 +79,7 @@ class DeleteAppengineLoadBalancerDescriptionValidatorSpec extends Specification void "null input fails validation"() { setup: def description = new DeleteAppengineLoadBalancerDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineDescriptionValidatorSpec.groovy index 7ee10f9562d..8125a84f27e 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DeployAppengineDescriptionValidatorSpec.groovy @@ -21,9 +21,9 @@ import com.netflix.spinnaker.clouddriver.appengine.gitClient.AppengineGitCredent import com.netflix.spinnaker.clouddriver.appengine.gitClient.AppengineGitCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -46,7 +46,8 @@ class DeployAppengineDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new DeployAppengineDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsRepo = new MapBackedCredentialsRepository(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def mockCredentials = Mock(AppengineCredentials) credentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -55,9 +56,9 @@ class DeployAppengineDescriptionValidatorSpec extends Specification { .credentials(mockCredentials) .gitCredentials(new AppengineGitCredentials()) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) - validator.accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -74,7 +75,7 @@ class DeployAppengineDescriptionValidatorSpec extends Specification { stopPreviousVersion: true, credentials: credentials, gitCredentialType: AppengineGitCredentialType.NONE) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -93,7 +94,7 @@ class DeployAppengineDescriptionValidatorSpec extends Specification { configFilepaths: CONFIG_FILEPATHS, credentials: credentials, gitCredentialType: AppengineGitCredentialType.NONE) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -105,7 +106,7 @@ class DeployAppengineDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new DeployAppengineDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DestroyAppengineDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DestroyAppengineDescriptionValidatorSpec.groovy index 6b81d77e4c1..38e84a959fd 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DestroyAppengineDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DestroyAppengineDescriptionValidatorSpec.groovy @@ -19,9 +19,9 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DestroyAppengineDescription import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -37,7 +37,6 @@ class DestroyAppengineDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new DestroyAppengineDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() def mockCredentials = Mock(AppengineCredentials) def namedAccountCredentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -45,15 +44,18 @@ class DestroyAppengineDescriptionValidatorSpec extends Specification { .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, namedAccountCredentials) - validator.accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository<>(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) + credentialsRepo.save(namedAccountCredentials) + validator.credentialsRepository = credentialsRepo + } void "pass validation with proper description inputs"() { setup: def description = new DestroyAppengineDescription(accountName: ACCOUNT_NAME, serverGroupName: SERVER_GROUP_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -65,7 +67,7 @@ class DestroyAppengineDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new DestroyAppengineDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DisableAppengineDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DisableAppengineDescriptionValidatorSpec.groovy index 77ba2cc9b90..03cb2c99e9c 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DisableAppengineDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/DisableAppengineDescriptionValidatorSpec.groovy @@ -24,9 +24,10 @@ import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineCluste import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineLoadBalancerProvider import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -45,10 +46,11 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { AppengineNamedAccountCredentials credentials @Shared - DefaultAccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository void setupSpec() { - def credentialsRepo = new MapBackedAccountCredentialsRepository() + credentialsRepository = new MapBackedCredentialsRepository(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def mockCredentials = Mock(AppengineCredentials) credentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -56,14 +58,13 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + credentialsRepository.save(credentials) } void "passes validation if server group to be disabled does not have allocation of 1"() { setup: def validator = new DisableAppengineDescriptionValidator() - validator.accountCredentialsProvider = accountCredentialsProvider + validator.credentialsRepository = credentialsRepository validator.appengineLoadBalancerProvider = Mock(AppengineLoadBalancerProvider) validator.appengineClusterProvider = Mock(AppengineClusterProvider) @@ -73,7 +74,7 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def loadBalancerWithValidAllocationsForDescription = new AppengineLoadBalancer( split: new AppengineTrafficSplit(allocations: [(SERVER_GROUP_NAME): 0.5, "another-server-group": 0.5]) @@ -92,7 +93,7 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { void "fails validation if server group to be disabled has allocation of 1"() { setup: def validator = new DisableAppengineDescriptionValidator() - validator.accountCredentialsProvider = accountCredentialsProvider + validator.credentialsRepository = credentialsRepository validator.appengineLoadBalancerProvider = Mock(AppengineLoadBalancerProvider) validator.appengineClusterProvider = Mock(AppengineClusterProvider) @@ -102,7 +103,7 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def loadBalancerWithInvalidAllocationsForDescription = new AppengineLoadBalancer( split: new AppengineTrafficSplit(allocations: [(SERVER_GROUP_NAME): 1]) @@ -124,7 +125,7 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { void "fails validation if server group cannot be found"() { setup: def validator = new DisableAppengineDescriptionValidator() - validator.accountCredentialsProvider = accountCredentialsProvider + validator.credentialsRepository = credentialsRepository validator.appengineLoadBalancerProvider = Mock(AppengineLoadBalancerProvider) validator.appengineClusterProvider = Mock(AppengineClusterProvider) @@ -134,7 +135,7 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -150,7 +151,7 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { void "fails validation if parent load balancer cannot be found"() { setup: def validator = new DisableAppengineDescriptionValidator() - validator.accountCredentialsProvider = accountCredentialsProvider + validator.credentialsRepository = credentialsRepository validator.appengineLoadBalancerProvider = Mock(AppengineLoadBalancerProvider) validator.appengineClusterProvider = Mock(AppengineClusterProvider) @@ -160,7 +161,7 @@ class DisableAppengineDescriptionValidatorSpec extends Specification { credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/EnableAppengineDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/EnableAppengineDescriptionValidatorSpec.groovy index a1e6d9ecf4b..4b6b0cfd029 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/EnableAppengineDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/EnableAppengineDescriptionValidatorSpec.groovy @@ -21,9 +21,10 @@ import com.netflix.spinnaker.clouddriver.appengine.model.AppengineServerGroup import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -42,10 +43,12 @@ class EnableAppengineDescriptionValidatorSpec extends Specification { AppengineNamedAccountCredentials credentials @Shared - DefaultAccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository void setupSpec() { - def credentialsRepo = new MapBackedAccountCredentialsRepository() + credentialsRepository = new MapBackedCredentialsRepository<>( + AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()); def mockCredentials = Mock(AppengineCredentials) credentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -53,14 +56,13 @@ class EnableAppengineDescriptionValidatorSpec extends Specification { .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + credentialsRepository.save(credentials) } void "fails validation if server group cannot be found"() { setup: def validator = new EnableAppengineDescriptionValidator() - validator.accountCredentialsProvider = accountCredentialsProvider + validator.credentialsRepository = credentialsRepository validator.appengineClusterProvider = Mock(AppengineClusterProvider) def description = new EnableDisableAppengineDescription( @@ -69,7 +71,7 @@ class EnableAppengineDescriptionValidatorSpec extends Specification { credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -85,7 +87,7 @@ class EnableAppengineDescriptionValidatorSpec extends Specification { void "passes validation if server group found in cache"() { setup: def validator = new EnableAppengineDescriptionValidator() - validator.accountCredentialsProvider = accountCredentialsProvider + validator.credentialsRepository = credentialsRepository validator.appengineClusterProvider = Mock(AppengineClusterProvider) def description = new EnableDisableAppengineDescription( @@ -94,7 +96,7 @@ class EnableAppengineDescriptionValidatorSpec extends Specification { credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StandardAppengineAttributeValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StandardAppengineAttributeValidatorSpec.groovy index cdf432c660b..ec68eb176b0 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StandardAppengineAttributeValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StandardAppengineAttributeValidatorSpec.groovy @@ -18,14 +18,17 @@ package com.netflix.spinnaker.clouddriver.appengine.deploy.validators import com.netflix.spinnaker.clouddriver.appengine.gitClient.AppengineGitCredentialType import com.netflix.spinnaker.clouddriver.appengine.gitClient.AppengineGitCredentials +import com.netflix.spinnaker.clouddriver.appengine.model.AppengineServerGroup import com.netflix.spinnaker.clouddriver.appengine.model.AppengineTrafficSplit +import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider import com.netflix.spinnaker.clouddriver.appengine.model.ShardBy import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -35,26 +38,52 @@ class StandardAppengineAttributeValidatorSpec extends Specification { private static final ACCOUNT_NAME = "my-appengine-account" private static final APPLICATION_NAME = "test-app" private static final REGION = "us-central" + private static final DEFAULT_LOAD_BALANCER_NAME = "default" + private static final BACKEND_LOAD_BALANCER_NAME = "backend" + private static final LATENCY_LOAD_BALANCER_NAME = "latency_sensitive" + + + private static final SERVER_GROUP_NAME_1 = "app-stack-detail-v000" + private static final SERVER_GROUP_1 = new AppengineServerGroup( + name: SERVER_GROUP_NAME_1, + loadBalancers: [DEFAULT_LOAD_BALANCER_NAME] + ) + + private static final SERVER_GROUP_NAME_2 = "app-stack-detail-v001" + private static final SERVER_GROUP_2 = new AppengineServerGroup( + name: SERVER_GROUP_NAME_2, + loadBalancers: [DEFAULT_LOAD_BALANCER_NAME, BACKEND_LOAD_BALANCER_NAME, LATENCY_LOAD_BALANCER_NAME] + ) + + private static final SERVER_GROUP_NAME_3 = "allows-gradual-migration" + private static final SERVER_GROUP_3 = new AppengineServerGroup( + name: SERVER_GROUP_NAME_3, + loadBalancers: [DEFAULT_LOAD_BALANCER_NAME], + allowsGradualTrafficMigration: true + ) @Shared - DefaultAccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Shared AppengineGitCredentials gitCredentials + @Shared + AppengineNamedAccountCredentials namedAccountCredentials + void setupSpec() { - def credentialsRepo = new MapBackedAccountCredentialsRepository() - accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + credentialsRepository = new MapBackedCredentialsRepository<>(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def mockCredentials = Mock(AppengineCredentials) - def namedAccountCredentials = new AppengineNamedAccountCredentials.Builder() + namedAccountCredentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) .region(REGION) .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, namedAccountCredentials) + credentialsRepository.save(namedAccountCredentials) gitCredentials = new AppengineGitCredentials( httpsUsernamePasswordCredentialsProvider: Mock(UsernamePasswordCredentialsProvider) @@ -63,7 +92,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "validate non-empty valid"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errors) def label = "attribute" @@ -90,7 +119,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "validate non-empty invalid"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errors) def label = "attribute" @@ -112,7 +141,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "validate by regex valid"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errors) def label = "attribute" @@ -124,7 +153,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "validate by regex invalid"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errors) def label = "attribute" def regex = /\w{3}-\w{6}/ @@ -137,17 +166,17 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "credentials reject (empty)"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) when: - validator.validateCredentials(null, accountCredentialsProvider) + validator.validateCredentials(null, credentialsRepository) then: 1 * errorsMock.rejectValue("${DECORATOR}.account", "${DECORATOR}.account.empty") 0 * errorsMock._ when: - validator.validateCredentials("", accountCredentialsProvider) + validator.validateCredentials("", credentialsRepository) then: 1 * errorsMock.rejectValue("${DECORATOR}.account", "${DECORATOR}.account.empty") 0 * errorsMock._ @@ -155,11 +184,11 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "credentials reject (unknown)"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) when: - validator.validateCredentials("You-don't-know-me", accountCredentialsProvider) + validator.validateCredentials("You-don't-know-me", credentialsRepository) then: 1 * errorsMock.rejectValue("${DECORATOR}.account", "${DECORATOR}.account.notFound") 0 * errorsMock._ @@ -167,18 +196,18 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "credentials accept"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) when: - validator.validateCredentials(ACCOUNT_NAME, accountCredentialsProvider) + validator.validateCredentials(ACCOUNT_NAME, credentialsRepository) then: 0 * errorsMock._ } void "git credentials reject"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) when: @@ -192,7 +221,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "git credentials reject (empty)"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) when: @@ -204,7 +233,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "git credentials accept"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) when: @@ -216,7 +245,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "details accept"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "label" @@ -243,7 +272,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "details reject"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "label" @@ -274,7 +303,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "application accept"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "label" @@ -296,7 +325,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "application reject"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "label" @@ -321,7 +350,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "stack accept"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "label" @@ -343,7 +372,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "stack reject"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "label" @@ -363,7 +392,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { @Unroll void "allocations accept"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "allocations" @@ -382,7 +411,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { @Unroll void "allocations reject (wrong number of decimal places)"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "allocations" @@ -400,7 +429,7 @@ class StandardAppengineAttributeValidatorSpec extends Specification { void "allocations reject (does not sum to 1)"() { setup: - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) def label = "allocations" @@ -409,5 +438,74 @@ class StandardAppengineAttributeValidatorSpec extends Specification { then: 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Allocations must sum to 1)") +} + +void "serverGroup reject not found"() { + setup: + def errorsMock = Mock(ValidationErrors) + def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) + def label = "allocations" + def serverGroupName = "not_exists" + def mockCluster = Mock(AppengineClusterProvider) + + when: + validator.validateServerGroupsCanBeEnabled([serverGroupName], DEFAULT_LOAD_BALANCER_NAME, namedAccountCredentials, mockCluster, "split.allocations") + + then: + 1 * errorsMock.rejectValue("${DECORATOR}.split.${label}", "${DECORATOR}.split.${label}.invalid (Server group ${serverGroupName} not found).") +} + + void "serverGroup valid"() { + setup: + def errorsMock = Mock(ValidationErrors) + def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) + def label = "allocations" + def mockCluster = Mock(AppengineClusterProvider) + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_1) >> SERVER_GROUP_1 + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_2) >> SERVER_GROUP_2 + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_3) >> SERVER_GROUP_3 + + when: + validator.validateServerGroupsCanBeEnabled([SERVER_GROUP_NAME_1], DEFAULT_LOAD_BALANCER_NAME, namedAccountCredentials, mockCluster, "split.allocations") + + then: + 0 * errorsMock._ + } + + void "same name serverGroup valid"() { + setup: + def errorsMock = Mock(ValidationErrors) + def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) + def label = "allocations" + def mockCluster = Mock(AppengineClusterProvider) + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_1) >> SERVER_GROUP_1 + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_2) >> SERVER_GROUP_2 + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_3) >> SERVER_GROUP_3 + + when: + validator.validateServerGroupsCanBeEnabled([SERVER_GROUP_NAME_2], DEFAULT_LOAD_BALANCER_NAME, namedAccountCredentials, mockCluster, "split.allocations") + validator.validateServerGroupsCanBeEnabled([SERVER_GROUP_NAME_2], BACKEND_LOAD_BALANCER_NAME, namedAccountCredentials, mockCluster, "split.allocations") + validator.validateServerGroupsCanBeEnabled([SERVER_GROUP_NAME_2], LATENCY_LOAD_BALANCER_NAME, namedAccountCredentials, mockCluster, "split.allocations") + + then: + 0 * errorsMock._ } + +void "serverGroup reject not registered with load balancer"() { + setup: + def errorsMock = Mock(ValidationErrors) + def validator = new StandardAppengineAttributeValidator(DECORATOR, errorsMock) + def label = "allocations" + def loadBalancer = "not_exists_loadBalancer" + def mockCluster = Mock(AppengineClusterProvider) + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_1) >> SERVER_GROUP_1 + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_2) >> SERVER_GROUP_2 + mockCluster.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_3) >> SERVER_GROUP_3 + + when: + validator.validateServerGroupsCanBeEnabled([SERVER_GROUP_NAME_2], loadBalancer, namedAccountCredentials, mockCluster, "split.allocations") + + then: + 1 * errorsMock.rejectValue("${DECORATOR}.split.${label}", "${DECORATOR}.split.${label}.invalid (Server group ${SERVER_GROUP_NAME_2} not registered with load balancer ${loadBalancer}).") +} } diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StartAppengineDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StartAppengineDescriptionValidatorSpec.groovy index 46450a0c370..18ed46f455b 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StartAppengineDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/StartAppengineDescriptionValidatorSpec.groovy @@ -23,9 +23,9 @@ import com.netflix.spinnaker.clouddriver.appengine.model.ScalingPolicyType import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -45,7 +45,8 @@ class StartAppengineDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new StartAppengineDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsRepo = new MapBackedCredentialsRepository(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def mockCredentials = Mock(AppengineCredentials) credentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -53,9 +54,9 @@ class StartAppengineDescriptionValidatorSpec extends Specification { .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) - validator.accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + validator.credentialsRepository = credentialsRepo } @Unroll @@ -66,7 +67,7 @@ class StartAppengineDescriptionValidatorSpec extends Specification { serverGroupName: SERVER_GROUP_NAME, credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) validator.appengineClusterProvider = Mock(AppengineClusterProvider) validator.appengineClusterProvider.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup @@ -92,7 +93,7 @@ class StartAppengineDescriptionValidatorSpec extends Specification { serverGroupName: SERVER_GROUP_NAME, credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) validator.appengineClusterProvider = Mock(AppengineClusterProvider) validator.appengineClusterProvider.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup @@ -119,7 +120,7 @@ class StartAppengineDescriptionValidatorSpec extends Specification { serverGroupName: SERVER_GROUP_NAME, credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) validator.appengineClusterProvider = Mock(AppengineClusterProvider) validator.appengineClusterProvider.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> null @@ -135,7 +136,7 @@ class StartAppengineDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new StartStopAppengineDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/TerminateAppengineInstancesDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/TerminateAppengineInstancesDescriptionValidatorSpec.groovy index b6e7e190a80..68c79f5fc1f 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/TerminateAppengineInstancesDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/TerminateAppengineInstancesDescriptionValidatorSpec.groovy @@ -21,9 +21,9 @@ import com.netflix.spinnaker.clouddriver.appengine.model.AppengineInstance import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineInstanceProvider import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -50,7 +50,8 @@ class TerminateAppengineInstancesDescriptionValidatorSpec extends Specification void setupSpec() { validator = new TerminateAppengineInstancesDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsRepo = new MapBackedCredentialsRepository(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def mockCredentials = Mock(AppengineCredentials) credentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -58,9 +59,9 @@ class TerminateAppengineInstancesDescriptionValidatorSpec extends Specification .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) - validator.accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + validator.credentialsRepository = credentialsRepo validator.appengineInstanceProvider = Mock(AppengineInstanceProvider) validator.appengineInstanceProvider.getInstance(ACCOUNT_NAME, REGION, "instance-1") >> INSTANCE @@ -74,7 +75,7 @@ class TerminateAppengineInstancesDescriptionValidatorSpec extends Specification instanceIds: INSTANCE_IDS, credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -90,7 +91,7 @@ class TerminateAppengineInstancesDescriptionValidatorSpec extends Specification instanceIds: ["instance-does-not-exist"], credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -108,7 +109,7 @@ class TerminateAppengineInstancesDescriptionValidatorSpec extends Specification instanceIds: ["instance-missing-fields"], credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -122,7 +123,7 @@ class TerminateAppengineInstancesDescriptionValidatorSpec extends Specification void "null input fails validation"() { setup: def description = new TerminateAppengineInstancesDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec.groovy index 4ffa05bbf0e..d44125fc462 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec.groovy @@ -23,9 +23,9 @@ import com.netflix.spinnaker.clouddriver.appengine.model.ScalingPolicyType import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -45,7 +45,8 @@ class UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec extends Specifica void setupSpec() { validator = new UpsertAppengineAutoscalingPolicyDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsRepo = new MapBackedCredentialsRepository(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def mockCredentials = Mock(AppengineCredentials) credentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -53,9 +54,9 @@ class UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec extends Specifica .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) - validator.accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -70,7 +71,7 @@ class UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec extends Specifica env: AppengineServerGroup.Environment.STANDARD, scalingPolicy: new AppengineScalingPolicy(type: ScalingPolicyType.AUTOMATIC)) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) validator.appengineClusterProvider = Mock(AppengineClusterProvider) validator.appengineClusterProvider.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup @@ -92,7 +93,7 @@ class UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec extends Specifica maxIdleInstances: 20) def serverGroup = new AppengineServerGroup(env: env, scalingPolicy: new AppengineScalingPolicy(type: type)) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) validator.appengineClusterProvider = Mock(AppengineClusterProvider) validator.appengineClusterProvider.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup @@ -120,7 +121,7 @@ class UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec extends Specifica credentials: credentials, minIdleInstances: 10, maxIdleInstances: 20) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) validator.appengineClusterProvider = Mock(AppengineClusterProvider) validator.appengineClusterProvider.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> null @@ -144,7 +145,7 @@ class UpsertAppengineAutoscalingPolicyDescriptionValidatorSpec extends Specifica def serverGroup = new AppengineServerGroup(env: AppengineServerGroup.Environment.STANDARD, scalingPolicy: new AppengineScalingPolicy(type: ScalingPolicyType.AUTOMATIC)) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) validator.appengineClusterProvider = Mock(AppengineClusterProvider) validator.appengineClusterProvider.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup diff --git a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineLoadBalancerDescriptionValidatorSpec.groovy index 5ac8dbccb64..6e7df0f981c 100644 --- a/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineLoadBalancerDescriptionValidatorSpec.groovy +++ b/clouddriver-appengine/src/test/groovy/com/netflix/spinnaker/clouddriver/appengine/deploy/validators/UpsertAppengineLoadBalancerDescriptionValidatorSpec.groovy @@ -23,9 +23,9 @@ import com.netflix.spinnaker.clouddriver.appengine.model.ShardBy import com.netflix.spinnaker.clouddriver.appengine.provider.view.AppengineClusterProvider import com.netflix.spinnaker.clouddriver.appengine.security.AppengineCredentials import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -65,7 +65,8 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification void setupSpec() { validator = new UpsertAppengineLoadBalancerDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsRepo = new MapBackedCredentialsRepository(AppengineNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def mockCredentials = Mock(AppengineCredentials) credentials = new AppengineNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -73,9 +74,9 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification .applicationName(APPLICATION_NAME) .credentials(mockCredentials) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) - validator.accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + validator.credentialsRepository = credentialsRepo validator.appengineClusterProvider = Mock(AppengineClusterProvider) validator.appengineClusterProvider.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME_1) >> SERVER_GROUP_1 @@ -94,7 +95,7 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification split: validSplit, migrateTraffic: MIGRATE_TRAFFIC, credentials: credentials) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -114,7 +115,7 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification split: validSplit, migrateTraffic: MIGRATE_TRAFFIC, credentials: credentials) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -134,7 +135,7 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification migrateTraffic: true, credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -173,7 +174,7 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification migrateTraffic: migrateTraffic, credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -209,7 +210,7 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification migrateTraffic: MIGRATE_TRAFFIC, credentials: credentials ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -229,7 +230,7 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification split: invalidSplit, migrateTraffic: MIGRATE_TRAFFIC, credentials: credentials) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -243,7 +244,7 @@ class UpsertAppengineLoadBalancerDescriptionValidatorSpec extends Specification void "null input fails validation"() { setup: def description = new UpsertAppengineLoadBalancerDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/ArtifactUtilsTest.java b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/ArtifactUtilsTest.java new file mode 100644 index 00000000000..afc251542fa --- /dev/null +++ b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/artifacts/ArtifactUtilsTest.java @@ -0,0 +1,50 @@ +package com.netflix.spinnaker.clouddriver.appengine.artifacts; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +class ArtifactUtilsTest { + + @Test + void testUntarStreamToPathWithEntryOutsideDestDirThrowsException() throws IOException { + + Exception ex = null; + String s = "target/zip-unarchiver-slip-tests"; + File testZip = new File(new File("").getAbsolutePath(), "src/test/zip-slip/zip-slip.tar"); + File outputDirectory = new File(new File("test-tar").getAbsolutePath(), s); + + outputDirectory.delete(); + + try { + ArtifactUtils.untarStreamToPath(new FileInputStream(testZip), outputDirectory.getPath()); + } catch (Exception e) { + ex = e; + } + + assertNotNull(ex); + assertTrue(ex.getMessage().startsWith("Entry is outside of the target directory")); + } + + @Test + void testUntarStreamDirDoesNotThrowsException() throws IOException { + + Exception ex = null; + String s = "target/zip-unarchiver-slip-tests"; + File testZip = new File(new File("").getAbsolutePath(), "src/test/zip-slip/normal-tar.tar"); + File outputDirectory = new File(new File("test-tar").getAbsolutePath(), s); + + outputDirectory.delete(); + + try { + ArtifactUtils.untarStreamToPath(new FileInputStream(testZip), outputDirectory.getPath()); + } catch (Exception e) { + ex = e; + } + + assertNull(ex); + } +} diff --git a/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/config/AccountDefinitionTest.java b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/config/AccountDefinitionTest.java new file mode 100644 index 00000000000..3a27d07d49a --- /dev/null +++ b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/config/AccountDefinitionTest.java @@ -0,0 +1,53 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.appengine.config; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.List; +import org.junit.jupiter.api.Test; + +public class AccountDefinitionTest { + + @Test + public void testCredentialsEquality() { + AppengineConfigurationProperties.ManagedAccount account1 = + new AppengineConfigurationProperties.ManagedAccount() + .setServiceAccountEmail("email@example.com") + .setServices(List.of("a")); + account1.setName("appengine-1"); + AppengineConfigurationProperties.ManagedAccount account2 = + new AppengineConfigurationProperties.ManagedAccount() + .setServiceAccountEmail("email@example.com") + .setServices(List.of("a")); + account2.setName("appengine-2"); + + assertThat(account1).isNotEqualTo(account2); + + // Check name is part of the comparison + account2.setName("appengine-1"); + assertThat(account1).isEqualTo(account1); + + // Check computedServiceAccount is not + account2.setComputedServiceAccountEmail("other@example.com"); + assertThat(account1).isEqualTo(account1); + + // Check that git password is in the same + account2.setServices(List.of("b")); + assertThat(account1).isNotEqualTo(account2); + } +} diff --git a/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/config/AppEngineAccountCredentialsRepoTest.java b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/config/AppEngineAccountCredentialsRepoTest.java new file mode 100644 index 00000000000..fd9ebede2fc --- /dev/null +++ b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/config/AppEngineAccountCredentialsRepoTest.java @@ -0,0 +1,79 @@ +package com.netflix.spinnaker.clouddriver.appengine.config; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.appengine.AppengineJobExecutor; +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.configserver.ConfigFileService; +import org.junit.jupiter.api.Test; +import org.springframework.boot.context.annotation.UserConfigurations; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.boot.test.context.runner.ApplicationContextRunner; +import org.springframework.context.annotation.Bean; + +public class AppEngineAccountCredentialsRepoTest { + + private final ApplicationContextRunner runner = + new ApplicationContextRunner() + .withConfiguration( + UserConfigurations.of( + AppengineCredentialsConfiguration.class, TestConfiguration.class)); + + @Test + void testCredentialsRepositoryBeanIsPresent() { + runner.run(ctx -> assertThat(ctx).hasSingleBean(CredentialsRepository.class)); + } + + static class TestConfiguration { + @Bean + ObjectMapper getObjectMapper() { + return new ObjectMapper(); + } + + @Bean + CredentialsLifecycleHandler getCredentialsLifecycleHandler() { + return mock(CredentialsLifecycleHandler.class); + } + + @Bean + NamerRegistry getNamerRegistry() { + return mock(NamerRegistry.class); + } + + @Bean + AppengineConfigurationProperties getAppengineConfigurationProperties() { + return mock(AppengineConfigurationProperties.class); + } + + @Bean + ConfigFileService getConfigFileService() { + return mock(ConfigFileService.class); + } + + @Bean + AppengineJobExecutor getAppengineExecutor() { + return mock(AppengineJobExecutor.class); + } + + @Bean + JobExecutor getJobExecutor() { + return mock(JobExecutor.class); + } + + @Bean + Registry getRegistry() { + return mock(Registry.class); + } + + @Bean + String getClouddriverUserAgentApplicationName() { + return "clouddriverUserAgentApplicationName"; + } + } +} diff --git a/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineConfigAtomicOperationConverterTest.java b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineConfigAtomicOperationConverterTest.java new file mode 100644 index 00000000000..5fd5401eaea --- /dev/null +++ b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/deploy/converters/DeployAppengineConfigAtomicOperationConverterTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.appengine.deploy.converters; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineConfigDescription; +import com.netflix.spinnaker.clouddriver.appengine.security.AppengineNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DeployAppengineConfigAtomicOperationConverterTest { + + DeployAppengineConfigAtomicOperationConverter converter; + CredentialsRepository credentialsRepository; + AppengineNamedAccountCredentials mockCredentials; + + @BeforeEach + public void init() { + converter = new DeployAppengineConfigAtomicOperationConverter(); + credentialsRepository = mock(CredentialsRepository.class); + mockCredentials = mock(AppengineNamedAccountCredentials.class); + converter.setCredentialsRepository(credentialsRepository); + } + + @Test + public void convertDescriptionShouldSucceed() { + + Map stage = new HashMap<>(); + stage.put( + "cronArtifact", + ImmutableMap.of( + "type", "http/file", + "reference", "url.com", + "artifactAccount", "httpacc")); + stage.put("account", "appengineacc"); + + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + DeployAppengineConfigDescription description = converter.convertDescription(stage); + assertTrue(description.getAccountName().equals("appengineacc")); + assertTrue(description.getCronArtifact() != null); + } +} diff --git a/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineConfigAtomicOperationTest.java b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineConfigAtomicOperationTest.java new file mode 100644 index 00000000000..dc1594cac5d --- /dev/null +++ b/clouddriver-appengine/src/test/java/com/netflix/spinnaker/clouddriver/appengine/deploy/ops/DeployAppengineConfigAtomicOperationTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.appengine.deploy.ops; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.appengine.deploy.description.DeployAppengineConfigDescription; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; +import com.netflix.spinnaker.kork.artifacts.ArtifactTypes; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import org.apache.commons.io.FileUtils; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +public class DeployAppengineConfigAtomicOperationTest { + + DeployAppengineConfigDescription description = new DeployAppengineConfigDescription(); + DeployAppengineConfigAtomicOperation deployAppengineConfigAtomicOperation; + ObjectMapper mapper; + ArtifactDownloader artifactDownloader = mock(ArtifactDownloader.class); + + @BeforeEach + public void init() { + deployAppengineConfigAtomicOperation = new DeployAppengineConfigAtomicOperation(description); + mapper = new ObjectMapper(); + ReflectionTestUtils.setField( + deployAppengineConfigAtomicOperation, "artifactDownloader", artifactDownloader); + } + + @Test + public void shouldCreateEmptyDirectory() throws IOException { + Path path = null; + try { + path = deployAppengineConfigAtomicOperation.createEmptyDirectory(); + assertTrue(path.isAbsolute()); + } finally { + FileUtils.cleanDirectory(path.toFile()); + FileUtils.forceDelete(path.toFile()); + } + } + + @Test + public void shouldDownloadFiletoDirectory() throws IOException { + InputStream is = new ByteArrayInputStream("dosomething".getBytes(StandardCharsets.UTF_8)); + Map artifactMap = new HashMap<>(); + artifactMap.put("artifactAccount", "embedded-artifact"); + artifactMap.put("id", "123abc"); + artifactMap.put("reference", "ZG9zb21ldGhpbmc="); + artifactMap.put("type", ArtifactTypes.EMBEDDED_BASE64.getMimeType()); + Artifact artifact = mapper.convertValue(artifactMap, Artifact.class); + + Path path = null; + try { + path = deployAppengineConfigAtomicOperation.createEmptyDirectory(); + when(artifactDownloader.download(any())).thenReturn(is); + File file = + deployAppengineConfigAtomicOperation.downloadFileToDirectory( + artifact, path, DeployAppengineConfigAtomicOperation.SupportedConfigTypes.CRON); + assertTrue(file.exists()); + assertTrue(file.canRead()); + } finally { + FileUtils.cleanDirectory(path.toFile()); + FileUtils.forceDelete(path.toFile()); + } + } +} diff --git a/clouddriver-appengine/src/test/zip-slip/normal-tar.tar b/clouddriver-appengine/src/test/zip-slip/normal-tar.tar new file mode 100644 index 00000000000..76eb0ee9e28 Binary files /dev/null and b/clouddriver-appengine/src/test/zip-slip/normal-tar.tar differ diff --git a/clouddriver-appengine/src/test/zip-slip/zip-slip.tar b/clouddriver-appengine/src/test/zip-slip/zip-slip.tar new file mode 100644 index 00000000000..264b25064d1 Binary files /dev/null and b/clouddriver-appengine/src/test/zip-slip/zip-slip.tar differ diff --git a/clouddriver-artifacts/clouddriver-artifacts.gradle b/clouddriver-artifacts/clouddriver-artifacts.gradle index bdb9b273102..d9855482aa2 100644 --- a/clouddriver-artifacts/clouddriver-artifacts.gradle +++ b/clouddriver-artifacts/clouddriver-artifacts.gradle @@ -1,12 +1,102 @@ +plugins { + id 'com.adarshr.test-logger' version '2.1.0' +} + +tasks.compileGroovy.enabled = false +sourceSets { + main { + java.srcDirs = ['src/main/java'] + } + integration { + java.srcDirs = ["src/integration/java"] + resources.srcDirs = ["src/integration/resources"] + } +} + +configurations { + integrationImplementation.extendsFrom testImplementation + integrationRuntime.extendsFrom testRuntime +} + dependencies { - compile project(":clouddriver-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + + implementation "com.amazonaws:aws-java-sdk-s3" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml" + implementation "com.google.apis:google-api-services-storage" + implementation 'com.google.auth:google-auth-library-oauth2-http' + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-security" + implementation "com.netflix.spectator:spectator-api" + implementation("com.netflix.spectator:spectator-ext-aws") { + // exclude transitives to keep spectator's version of the aws sdk from + // overriding what we specify elsewhere. It's not so much the aws sdk that + // causes problems, but its transitive dependencies -- jackson and then + // kotlin. + transitive = false + } + implementation "com.oracle.oci.sdk:oci-java-sdk-core" + implementation "com.sun.jersey:jersey-client:1.9.1" + implementation 'commons-io:commons-io:2.15.1' + implementation "com.squareup.okhttp3:okhttp" + implementation "org.apache.commons:commons-lang3" + implementation "org.apache.ivy:ivy:2.4.0" + implementation "org.apache.maven:maven-resolver-provider:3.5.4" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + + testImplementation "com.github.tomakehurst:wiremock-jre8-standalone" + testImplementation "io.spinnaker.kork:kork-aws" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit-pioneer:junit-pioneer:0.3.0" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.junit.jupiter:junit-jupiter-params" + testImplementation "org.mockito:mockito-core" + testImplementation "org.springframework.boot:spring-boot-starter-test" + testImplementation "org.testcontainers:testcontainers" + testImplementation "org.testcontainers:localstack" + testImplementation "ru.lanwen.wiremock:wiremock-junit5:1.2.0" + + integrationImplementation project(":clouddriver-web") + integrationImplementation project(":clouddriver-artifacts") + integrationImplementation "org.springframework.boot:spring-boot-starter-test" + integrationImplementation "org.testcontainers:testcontainers" + integrationImplementation "org.testcontainers:mysql" + integrationImplementation "org.testcontainers:junit-jupiter" + integrationImplementation "com.mysql:mysql-connector-j" + integrationImplementation "io.rest-assured:rest-assured" +} + +task integrationTest(type: Test) { + description = 'Runs artifacts integration tests.' + group = 'verification' + + useJUnitPlatform() + + environment "BUILD_DIR", buildDir + environment "GIT_WRAPPER", "${project.rootDir.toString()}/clouddriver-artifacts/src/integration/resources/git-wrapper.sh" + + testClassesDirs = sourceSets.integration.output.classesDirs + classpath = sourceSets.integration.runtimeClasspath + shouldRunAfter test + - compile spinnaker.dependency("frigga") - compile spinnaker.dependency("bootActuator") - compile spinnaker.dependency("bootWeb") - compile spinnaker.dependency("korkArtifacts") + minHeapSize = "512m" + maxHeapSize = "${testJvmMaxMemory}" + maxParallelForks = 4 - compile spinnaker.dependency("googleStorage") - compile spinnaker.dependency("awsS3") - compile "org.apache.commons:commons-compress:1.14" + testlogger { + theme 'standard' + showStandardStreams true + showPassedStandardStreams false + showFailedStandardStreams true + showPassed false + } } diff --git a/clouddriver-artifacts/src/integration/README.md b/clouddriver-artifacts/src/integration/README.md new file mode 100644 index 00000000000..657178c8d85 --- /dev/null +++ b/clouddriver-artifacts/src/integration/README.md @@ -0,0 +1,13 @@ +For starting manually gitea container to see how it works, you can use this `docker-compose.yml` file: + +```yaml +version: '2' +services: + web: + image: gitea/gitea:1.12.6 + ports: + - "3000:3000" + - "22:22" +``` + +And run `docker-compose up` diff --git a/clouddriver-artifacts/src/integration/java/com/netflix/spinnaker/clouddriver/artifacts/GitRepoTest.java b/clouddriver-artifacts/src/integration/java/com/netflix/spinnaker/clouddriver/artifacts/GitRepoTest.java new file mode 100644 index 00000000000..8c441a3efbd --- /dev/null +++ b/clouddriver-artifacts/src/integration/java/com/netflix/spinnaker/clouddriver/artifacts/GitRepoTest.java @@ -0,0 +1,412 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts; + +import static io.restassured.RestAssured.given; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.Main; +import com.netflix.spinnaker.clouddriver.artifacts.gitRepo.GitRepoArtifactProviderProperties; +import com.netflix.spinnaker.clouddriver.artifacts.gitRepo.GitRepoFileSystem; +import com.netflix.spinnaker.clouddriver.artifacts.utils.GiteaContainer; +import io.restassured.RestAssured; +import io.restassured.response.Response; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.web.server.LocalServerPort; +import org.springframework.test.context.TestPropertySource; + +@SpringBootTest( + classes = {Main.class}, + webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) +@TestPropertySource(properties = {"spring.config.location = classpath:clouddriver.yml"}) +public class GitRepoTest { + + private static final GiteaContainer giteaContainer = new GiteaContainer(); + @LocalServerPort int port; + + static { + RestAssured.enableLoggingOfRequestAndResponseIfValidationFails(); + giteaContainer.start(); + } + + public String baseUrl() { + return "http://localhost:" + port; + } + + @DisplayName( + ".\n===\n" + + "Given a gitrepo account with user/pass credentials\n" + + "When sending download artifact request\n" + + "Then the repo is downloaded\n===") + @Test + public void shouldDownloadGitRepoWithUserPass() throws IOException, InterruptedException { + // given + Map body = + ImmutableMap.of( + "artifactAccount", "basic-auth", + "reference", giteaContainer.httpUrl(), + "type", "git/repo", + "version", "master"); + deleteTmpClone(body); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md"); + } + + @DisplayName( + ".\n===\n" + + "Given a gitrepo account with access token\n" + + "When sending download artifact request\n" + + "Then the repo is downloaded\n===") + @Test + public void shouldDownloadGitRepoWithToken() throws IOException, InterruptedException { + // given + Map body = + ImmutableMap.of( + "artifactAccount", "token-auth", + "reference", giteaContainer.httpUrl(), + "type", "git/repo", + "version", "master"); + deleteTmpClone(body); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md"); + } + + @DisplayName( + ".\n===\n" + + "Given a git repo url without '.git' suffix\n" + + "When sending download artifact request\n" + + "Then the repo is downloaded\n===") + @Test + public void shouldDownloadGitRepoWithoutUrlSuffix() throws IOException, InterruptedException { + // given + Map body = + ImmutableMap.of( + "artifactAccount", "token-auth", + "reference", giteaContainer.httpUrl().replaceAll(".git$", ""), + "type", "git/repo", + "version", "master"); + deleteTmpClone(body); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md"); + } + + @DisplayName( + ".\n===\n" + + "Given a gitrepo account with ssh keys\n" + + "When sending download artifact request\n" + + "Then the repo is downloaded\n===") + @Test + public void shouldDownloadGitRepoWithSsh() throws IOException, InterruptedException { + // given + Map body = + ImmutableMap.of( + "artifactAccount", "ssh-auth", + "reference", giteaContainer.sshUrl(), + "type", "git/repo", + "version", "master"); + deleteTmpClone(body); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md"); + } + + @DisplayName( + ".\n===\n" + + "Given a gitrepo account with ssh keys\n" + + " And a known_hosts file\n" + + "When sending download artifact request\n" + + "Then the repo is downloaded\n===") + @Test + public void shouldDownloadGitRepoWithSshAndKnownHosts() throws IOException, InterruptedException { + // given + Map body = + ImmutableMap.of( + "artifactAccount", "ssh-auth-known-hosts", + "reference", giteaContainer.sshUrl(), + "type", "git/repo", + "version", "master"); + deleteTmpClone(body); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md"); + } + + @DisplayName( + ".\n===\n" + + "Given a previously downloaded gitrepo artifact\n" + + " And a new file added to the repo\n" + + "When sending a second download artifact request\n" + + "Then the new file is included\n===") + @Test + public void shouldDownloadGitRepoUpdates() throws IOException, InterruptedException { + // given + Map body = + ImmutableMap.of( + "artifactAccount", "token-auth", + "reference", giteaContainer.httpUrl(), + "type", "git/repo", + "version", "master"); + deleteTmpClone(body); + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + giteaContainer.addFileToRepo("newfile"); + + // when + response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md", "newfile"); + } + + @DisplayName( + ".\n===\n" + + "Given a gitrepo account\n" + + "When sending download artifact request including a subdirectory\n" + + "Only the subdirectory is downloaded\n===") + @Test + public void shouldDownloadGitRepoSubdir() throws IOException, InterruptedException { + // given + Map body = + ImmutableMap.of( + "artifactAccount", "token-auth", + "reference", giteaContainer.httpUrl(), + "type", "git/repo", + "version", "master", + "location", "subdir"); + deleteTmpClone(body); + giteaContainer.addFileToRepo("subdir/subfile"); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "subdir/subfile"); + } + + @DisplayName( + ".\n===\n" + + "Given a gitrepo token account\n" + + "When sending download artifact request including a specific SHA version\n" + + "Then the specified version is downloaded\n===") + @Test + public void shouldDownloadGitRepoShaWithToken() throws IOException, InterruptedException { + // given + giteaContainer.addFileToRepo("newfile"); + Map body = + ImmutableMap.of( + "artifactAccount", + "token-auth", + "reference", + giteaContainer.httpUrl(), + "type", + "git/repo", + "version", + giteaContainer.getFirstCommitSha()); + deleteTmpClone(body); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md"); + } + + @DisplayName( + ".\n===\n" + + "Given a gitrepo ssh account\n" + + "When sending download artifact request including a specific SHA version\n" + + "Then the specified version is downloaded\n===") + @Test + public void shouldDownloadGitRepoShaWithSsh() throws IOException, InterruptedException { + // given + giteaContainer.addFileToRepo("newfile"); + Map body = + ImmutableMap.of( + "artifactAccount", + "ssh-auth", + "reference", + giteaContainer.sshUrl(), + "type", + "git/repo", + "version", + giteaContainer.getFirstCommitSha()); + deleteTmpClone(body); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md"); + } + + @DisplayName( + ".\n===\n" + + "Given a gitrepo account\n" + + "When sending download artifact request including a short SHA version\n" + + "Then the specified version is downloaded\n===") + @Test + public void shouldDownloadGitRepoShortSha() throws IOException, InterruptedException { + // given + giteaContainer.addFileToRepo("newfile"); + Map body = + ImmutableMap.of( + "artifactAccount", + "token-auth", + "reference", + giteaContainer.httpUrl(), + "type", + "git/repo", + "version", + giteaContainer.getFirstCommitSha().substring(0, 7)); + deleteTmpClone(body); + + // when + Response response = + given().body(body).contentType("application/json").put(baseUrl() + "/artifacts/fetch"); + if (response.statusCode() != 200) { + response.prettyPrint(); + } + assertEquals(200, response.statusCode()); + + // then + byte[] bytes = response.getBody().asByteArray(); + assertBytesHaveFile(bytes, "README.md"); + } + + private void assertBytesHaveFile(byte[] bytes, String... files) + throws IOException, InterruptedException { + Path archive = Paths.get(System.getenv("BUILD_DIR"), "downloads", "repo.tgz"); + if (archive.toFile().getParentFile().exists()) { + FileUtils.forceDelete(archive.toFile().getParentFile()); + FileUtils.forceMkdir(archive.toFile().getParentFile()); + } + FileUtils.writeByteArrayToFile(archive.toFile(), bytes); + Process process = + new ProcessBuilder("tar", "-zxvf", "repo.tgz") + .directory(archive.toFile().getParentFile()) + .redirectErrorStream(true) + .start(); + process.waitFor(); + assertEquals( + 0, + process.exitValue(), + IOUtils.toString(process.getInputStream(), Charset.defaultCharset())); + for (String file : files) { + assertTrue( + Paths.get(archive.toFile().getParent(), file).toFile().exists(), + "File " + file + " not found in response"); + } + } + + private static void deleteTmpClone(Map artifact) throws IOException { + Path localClone = + new GitRepoFileSystem(new GitRepoArtifactProviderProperties()) + .getLocalClonePath( + (String) artifact.get("reference"), (String) artifact.get("version")); + if (localClone.toFile().exists()) { + FileUtils.forceDelete(localClone.toFile()); + } + FileUtils.forceMkdir(localClone.toFile()); + } +} diff --git a/clouddriver-artifacts/src/integration/java/com/netflix/spinnaker/clouddriver/artifacts/utils/GiteaContainer.java b/clouddriver-artifacts/src/integration/java/com/netflix/spinnaker/clouddriver/artifacts/utils/GiteaContainer.java new file mode 100644 index 00000000000..e903dbd38aa --- /dev/null +++ b/clouddriver-artifacts/src/integration/java/com/netflix/spinnaker/clouddriver/artifacts/utils/GiteaContainer.java @@ -0,0 +1,217 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.utils; + +import static io.restassured.RestAssured.given; +import static org.junit.jupiter.api.Assertions.*; + +import com.google.common.collect.ImmutableMap; +import io.restassured.response.Response; +import java.io.FileReader; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.Map; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.jetbrains.annotations.NotNull; +import org.testcontainers.containers.GenericContainer; + +public class GiteaContainer extends GenericContainer { + + private static final String DOCKER_IMAGE = "gitea/gitea:1.12.6"; + private static final String REPO_NAME = "test"; + private static final String USER = "test"; + private static final String PASS = "test"; + private static final String SSH_KEY_PASS = "@!JrU/+j3e6HyL#"; + private static final Path SSH_KEY_FILE = + Paths.get(System.getenv("BUILD_DIR"), "ssh", "id_rsa_test"); + + public GiteaContainer() { + super(DOCKER_IMAGE); + withExposedPorts(3000, 22); + } + + @Override + public void start() { + super.start(); + String baseUrl = getExternalBaseUrl(); + try { + if (SSH_KEY_FILE.toFile().getParentFile().exists()) { + FileUtils.forceDelete(SSH_KEY_FILE.toFile().getParentFile()); + } + FileUtils.forceMkdir(SSH_KEY_FILE.toFile().getParentFile()); + createUser(baseUrl); + String token = createToken(baseUrl); + System.setProperty("gitea_token", token); + createSshKey(baseUrl); + System.setProperty("ssh_key_file", SSH_KEY_FILE.toString()); + System.setProperty("ssh_key_pass", SSH_KEY_PASS); + createRepo(baseUrl); + Path knownHosts = generateKnownHosts(); + System.setProperty("known_hosts", knownHosts.toString()); + } catch (IOException | InterruptedException e) { + fail("Exception initializing gitea: " + e.getMessage()); + } + } + + @NotNull + private String getExternalBaseUrl() { + return "http://" + this.getContainerIpAddress() + ":" + this.getMappedPort(3000); + } + + public String httpUrl() { + return "http://localhost:3000/" + USER + "/" + REPO_NAME + ".git"; + } + + public String sshUrl() { + return "git@localhost:" + USER + "/" + REPO_NAME + ".git"; + } + + private void createUser(String baseUrl) { + String formBody = + "db_type=SQLite3&db_host=localhost%3A3306&db_user=root&db_passwd=&db_name=gitea&ssl_mode=disable&db_schema=&" + + "charset=utf8&db_path=%2Fdata%2Fgitea%2Fgitea.db&app_name=Gitea%3A+Git+with+a+cup+of+tea&" + + "repo_root_path=%2Fdata%2Fgit%2Frepositories&lfs_root_path=%2Fdata%2Fgit%2Flfs&" + + "run_user=git&domain=localhost&ssh_port=22&http_port=3000&app_url=http%3A%2F%2Flocalhost%3A3000%2F&" + + "log_root_path=%2Fdata%2Fgitea%2Flog&smtp_host=&smtp_from=&smtp_user=&smtp_passwd=&" + + "enable_federated_avatar=on&enable_open_id_sign_in=on&enable_open_id_sign_up=on&" + + "default_allow_create_organization=on&default_enable_timetracking=on&" + + "no_reply_address=noreply.localhost" + + "&admin_name=" + + USER + + "&admin_passwd=" + + PASS + + "&admin_confirm_passwd=" + + PASS + + "&admin_email=test@test.com"; + + given() + .contentType("application/x-www-form-urlencoded") + .body(formBody) + .post(baseUrl + "/install") + .then() + .statusCode(302); + } + + private String createToken(String baseUrl) { + Map body = ImmutableMap.of("name", "test_token"); + + Response resp = + given() + .auth() + .preemptive() + .basic(USER, PASS) + .contentType("application/json") + .body(body) + .post(baseUrl + "/api/v1/users/" + USER + "/tokens"); + resp.prettyPrint(); + resp.then().statusCode(201); + return resp.jsonPath().getString("sha1"); + } + + private void createSshKey(String baseUrl) throws IOException, InterruptedException { + ExecResult execResult = + execInContainer( + "ssh-keygen", + "-t", + "rsa", + "-C", + "test@test.com", + "-f", + "/tmp/id_rsa_test", + "-N", + SSH_KEY_PASS); + assertEquals( + 0, + execResult.getExitCode(), + String.format("Stdout: %s\nStderr: %s", execResult.getStdout(), execResult.getStderr())); + copyFileFromContainer("/tmp/id_rsa_test", SSH_KEY_FILE.toString()); + copyFileFromContainer("/tmp/id_rsa_test.pub", SSH_KEY_FILE.toString() + ".pub"); + + Map body = + ImmutableMap.of( + "key", + IOUtils.toString(new FileReader(SSH_KEY_FILE.toString() + ".pub")), + "read_only", + true, + "title", + "test_key"); + + Response resp = + given() + .auth() + .preemptive() + .basic(USER, PASS) + .contentType("application/json") + .body(body) + .post(baseUrl + "/api/v1/admin/users/" + USER + "/keys"); + resp.then().statusCode(201); + } + + private void createRepo(String baseUrl) { + Map body = + ImmutableMap.of("auto_init", true, "name", REPO_NAME, "private", true); + Response resp = + given() + .auth() + .preemptive() + .basic(USER, PASS) + .contentType("application/json") + .body(body) + .post(baseUrl + "/api/v1/user/repos"); + resp.then().statusCode(201); + } + + private Path generateKnownHosts() throws IOException, InterruptedException { + ExecResult execResult = + execInContainer("ssh-keygen -y -f /data/ssh/ssh_host_rsa_key".split(" ")); + String publicKey = execResult.getStdout(); + Path knownHosts = Paths.get(System.getenv("BUILD_DIR"), "ssh", "known_hosts"); + FileUtils.writeStringToFile( + knownHosts.toFile(), "localhost " + publicKey, Charset.defaultCharset()); + return knownHosts; + } + + public void addFileToRepo(String fileName) { + Map body = ImmutableMap.of("someKey", "someValue"); + Response resp = + given() + .auth() + .preemptive() + .basic(USER, PASS) + .contentType("application/json") + .body(body) + .post(getExternalBaseUrl() + "/api/v1/repos/test/test/contents/" + fileName); + int sc = resp.statusCode(); + assertTrue(sc == 201 || sc == 422); // 422 is returned when the file was already added + } + + public String getFirstCommitSha() { + Response resp = + given() + .auth() + .preemptive() + .basic(USER, PASS) + .get(getExternalBaseUrl() + "/api/v1/repos/test/test/commits"); + resp.then().statusCode(200); + List shas = resp.jsonPath().getList("sha"); + return shas.get(shas.size() - 1); + } +} diff --git a/clouddriver-artifacts/src/integration/resources/clouddriver.yml b/clouddriver-artifacts/src/integration/resources/clouddriver.yml new file mode 100644 index 00000000000..1ada447b319 --- /dev/null +++ b/clouddriver-artifacts/src/integration/resources/clouddriver.yml @@ -0,0 +1,61 @@ +artifacts: + git-repo: + enabled: true + git-executable: ${GIT_WRAPPER} + clone-retention-minutes: 60 + clone-retention-max-bytes: 104857600 + accounts: + - name: basic-auth + username: test + password: test + - name: token-auth + token: ${gitea_token} + - name: ssh-auth + sshPrivateKeyFilePath: ${ssh_key_file} + sshPrivateKeyPassphrase: ${ssh_key_pass} + sshTrustUnknownHosts: true + - name: ssh-auth-known-hosts + sshPrivateKeyFilePath: ${ssh_key_file} + sshPrivateKeyPassphrase: ${ssh_key_pass} + sshTrustUnknownHosts: false + sshKnownHostsFilePath: ${known_hosts} + +logging.level.com.netflix.spinnaker.clouddriver.artifacts.gitRepo: DEBUG + +spring: + application: + name: clouddriver + +sql: + enabled: true + taskRepository: + enabled: true + cache: + enabled: true + readBatchSize: 500 + writeBatchSize: 300 + scheduler: + enabled: true + connectionPools: + default: + default: true + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + tasks: + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + migration: + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + +redis: + enabled: false + cache: + enabled: false + scheduler: + enabled: false + taskRepository: + enabled: false + +services: + fiat: + baseUrl: http://fiat.net + front50: + baseUrl: http://front50.net diff --git a/clouddriver-artifacts/src/integration/resources/git-wrapper.sh b/clouddriver-artifacts/src/integration/resources/git-wrapper.sh new file mode 100755 index 00000000000..bc3a7de8af8 --- /dev/null +++ b/clouddriver-artifacts/src/integration/resources/git-wrapper.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +#------------------------------------------------------------------------------------- +# Wraps all git binary calls to the git binary inside gitea container, +# transferring any needed files and adjusting file paths between host and container. +#------------------------------------------------------------------------------------- + +set -e + +git_args=$@ + +# Change local paths for paths inside the container, and copy any needed files +function pre_process() { + container_name=$(docker ps | grep "gitea/gitea" | awk '{print $1}') + docker exec "$container_name" mkdir -p "$(pwd)" + if [[ $1 == "clone" || $1 == "init" ]]; then + docker exec -w "$(pwd)" "$container_name" rm -rf test + elif [[ $1 == "archive" ]]; then + { + while [[ "$#" -gt 0 ]]; do + case $1 in + --output) + dst_path=$2 + docker exec "$container_name" mkdir -p "$(dirname $dst_path)" + shift + ;; + esac + shift + done + } >/dev/null 2>&1 + fi + + docker exec -i "$container_name" mkdir -p "$BUILD_DIR/ssh" + docker cp "$BUILD_DIR/ssh/id_rsa_test" "$container_name":"$BUILD_DIR/ssh/id_rsa_test" + docker exec -i "$container_name" chmod 600 "$BUILD_DIR/ssh/id_rsa_test" + + docker cp "${BUILD_DIR}/ssh/known_hosts" "$container_name":"${BUILD_DIR}/ssh/known_hosts" + + if [[ -n "${SSH_ASKPASS}" ]] ; then + docker exec -i "$container_name" mkdir -p "$(dirname "$SSH_ASKPASS")" + docker cp "${SSH_ASKPASS}" "$container_name":"${SSH_ASKPASS}" + docker exec -i "$container_name" chmod +x "${SSH_ASKPASS}" + fi +} + +function execute() { + docker exec -w "$(pwd)" -i -e GIT_SSH_COMMAND -e SSH_ASKPASS -e SSH_KEY_PWD -e DISPLAY -e GIT_CURL_VERBOSE -e GIT_TRACE "$container_name" git $git_args +} + +# Make any changes locally reflecting the changes done inside the container +function post_process() { + if [[ $git_args =~ .*clone.* ]]; then + docker cp "$container_name":"$(pwd)/test" . + elif [[ $git_args =~ .*pull.* || $git_args =~ .*reset.* ]]; then + docker cp "$container_name":"$(pwd)" .. + elif [[ $git_args =~ .*archive.* ]]; then + docker cp "$container_name":"$dst_path" "$dst_path" + fi +} + +pre_process $@ +execute +post_process diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactCredentialsRepository.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactCredentialsRepository.java index c25c028b65c..b745540f025 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactCredentialsRepository.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactCredentialsRepository.java @@ -17,31 +17,40 @@ package com.netflix.spinnaker.clouddriver.artifacts; +import com.google.common.base.Strings; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.exceptions.MissingCredentialsException; import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; -@Component -public class ArtifactCredentialsRepository { - private Map> credentialsMap = new ConcurrentHashMap<>(); +@Slf4j +public class ArtifactCredentialsRepository + extends CompositeCredentialsRepository { - public void save(ArtifactCredentials credentials) { - String name = credentials.getName(); - List stored = credentialsMap.getOrDefault(name, new ArrayList<>()); - stored.add(credentials); - credentialsMap.put(credentials.getName(), stored); + public ArtifactCredentialsRepository( + List> repositories) { + super(repositories); } - public List getAllCredentials() { - return credentialsMap.values() - .stream() - .flatMap(Collection::stream) - .collect(Collectors.toList()); + public ArtifactCredentials getCredentialsForType(String name, String artifactType) { + if (Strings.isNullOrEmpty(name)) { + String message = "An artifact account must be supplied to download this artifact: " + name; + log.debug(message); + throw new IllegalArgumentException(message); + } + + return getAllCredentials().stream() + .filter(a -> a.getName().equals(name) && a.handlesType(artifactType)) + .findFirst() + .orElseThrow( + () -> + new MissingCredentialsException( + "Credentials '" + + name + + "' supporting artifact type '" + + artifactType + + "' cannot be found")); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactDownloader.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactDownloader.java index e2db3ff2da4..e32c7068dfe 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactDownloader.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactDownloader.java @@ -17,55 +17,26 @@ package com.netflix.spinnaker.clouddriver.artifacts; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import org.apache.commons.lang3.StringUtils; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.yaml.snakeyaml.Yaml; -import org.yaml.snakeyaml.constructor.SafeConstructor; - +import com.netflix.spinnaker.kork.exceptions.MissingCredentialsException; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; import java.io.IOException; import java.io.InputStream; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; @Component +@RequiredArgsConstructor public class ArtifactDownloader { - final private ArtifactCredentialsRepository artifactCredentialsRepository; - - final private ObjectMapper objectMapper; - - final private Yaml yamlParser; - - @Autowired - public ArtifactDownloader(ArtifactCredentialsRepository artifactCredentialsRepository, ObjectMapper objectMapper, Yaml yaml) { - this.artifactCredentialsRepository = artifactCredentialsRepository; - this.objectMapper = objectMapper; - this.yamlParser = yaml; - } + private final ArtifactCredentialsRepository artifactCredentialsRepository; public InputStream download(Artifact artifact) throws IOException { - String artifactAccount = artifact.getArtifactAccount(); - if (StringUtils.isEmpty(artifactAccount)) { - throw new IllegalArgumentException("An artifact account must be supplied to download this artifact: " + artifactAccount); + try { + return artifactCredentialsRepository + .getCredentialsForType(artifact.getArtifactAccount(), artifact.getType()) + .download(artifact); + } catch (MissingCredentialsException e) { + throw new NotFoundException(e); } - - ArtifactCredentials credentials = artifactCredentialsRepository.getAllCredentials() - .stream() - .filter(c -> c.getName().equals(artifactAccount)) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("No credentials with name '" + artifactAccount + "' could be found.")); - - if (!credentials.handlesType(artifact.getType())) { - throw new IllegalArgumentException("Artifact credentials '" + artifactAccount + "' cannot handle artifacts of type '" + artifact.getType() + "'"); - } - - return credentials.download(artifact); - } - - public T downloadAsYaml(Artifact artifact, Class clazz) throws IOException { - InputStream is = download(artifact); - Object parsed = yamlParser.load(is); - return objectMapper.convertValue(parsed, clazz); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactUtils.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactUtils.java deleted file mode 100644 index 8cbfb0c344c..00000000000 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactUtils.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.artifacts; -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; -import org.apache.commons.compress.utils.IOUtils; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.IOException; -import java.util.Stack; - -public class ArtifactUtils { - public static final String GCE_IMAGE_TYPE = "gce/image"; - - public static void untarStreamToPath(InputStream inputStream, String basePath) throws IOException { - class DirectoryTimestamp { - public DirectoryTimestamp(File d, long m) { - directory = d; - millis = m; - } - public File directory; - public long millis; - }; - // Directories come in hierarchical order within the stream, but - // we need to set their timestamps after their children have been written. - Stack directoryStack = new Stack(); - - File baseDirectory = new File(basePath); - baseDirectory.mkdir(); - - TarArchiveInputStream tarStream = new TarArchiveInputStream(inputStream); - for (TarArchiveEntry entry = tarStream.getNextTarEntry(); - entry != null; - entry = tarStream.getNextTarEntry()) { - File target = new File(baseDirectory, entry.getName()); - if (entry.isDirectory()) { - directoryStack.push(new DirectoryTimestamp(target, entry.getModTime().getTime())); - continue; - } - writeStreamToFile(tarStream, target); - target.setLastModified(entry.getModTime().getTime()); - } - - while (!directoryStack.empty()) { - DirectoryTimestamp info = directoryStack.pop(); - info.directory.setLastModified(info.millis); - } - tarStream.close(); - } - - public static void writeStreamToFile(InputStream sourceStream, File target) throws IOException { - File parent = target.getParentFile(); - if (!parent.exists()) { - parent.mkdirs(); - } - OutputStream targetStream = new FileOutputStream(target); - IOUtils.copy(sourceStream, targetStream); - targetStream.close(); - } -} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/CredentialReader.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/CredentialReader.java new file mode 100644 index 00000000000..69b00b300d7 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/CredentialReader.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; + +@Slf4j +public class CredentialReader { + public static String credentialsFromFile(String filename) { + try { + String credentials = FileUtils.readFileToString(new File(filename), Charset.defaultCharset()); + return credentials.replace("\n", ""); + } catch (IOException e) { + throw new IllegalStateException("Could not read credentials file: " + filename, e); + } + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactAccount.java index aa796008b89..63dacb229a2 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactAccount.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactAccount.java @@ -17,16 +17,48 @@ package com.netflix.spinnaker.clouddriver.artifacts.bitbucket; - +import com.google.common.base.Strings; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; -import lombok.Data; -import lombok.EqualsAndHashCode; +import com.netflix.spinnaker.clouddriver.artifacts.config.BasicAuth; +import com.netflix.spinnaker.clouddriver.artifacts.config.TokenAuth; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; + +@NonnullByDefault +@Value +public class BitbucketArtifactAccount implements ArtifactAccount, BasicAuth, TokenAuth { + String name; + Optional username; + Optional password; + Optional usernamePasswordFile; + Optional token; + Optional tokenFile; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + BitbucketArtifactAccount( + String name, + String username, + String password, + String usernamePasswordFile, + String token, + String tokenFile) { + this.name = Strings.nullToEmpty(name); + this.username = Optional.ofNullable(Strings.emptyToNull(username)); + this.password = Optional.ofNullable(Strings.emptyToNull(password)); + this.usernamePasswordFile = Optional.ofNullable(Strings.emptyToNull(usernamePasswordFile)); + this.token = Optional.ofNullable(Strings.emptyToNull(token)); + this.tokenFile = Optional.ofNullable(Strings.emptyToNull(tokenFile)); + } -@EqualsAndHashCode(callSuper = true) -@Data -public class BitbucketArtifactAccount extends ArtifactAccount { - private String name; - private String username; - private String password; - private String usernamePasswordFile; + @ParametersAreNullableByDefault + BitbucketArtifactAccount( + String name, String username, String password, String usernamePasswordFile) { + this(name, username, password, usernamePasswordFile, null, null); + } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactConfiguration.java index d15c2f5e817..d4ff9c5e6fe 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactConfiguration.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactConfiguration.java @@ -17,65 +17,33 @@ package com.netflix.spinnaker.clouddriver.artifacts.bitbucket; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; -import com.squareup.okhttp.OkHttpClient; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import okhttp3.OkHttpClient; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Lazy; -import org.springframework.context.annotation.Scope; -import org.springframework.scheduling.annotation.EnableScheduling; - -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; @Configuration @ConditionalOnProperty("artifacts.bitbucket.enabled") -@EnableScheduling +@EnableConfigurationProperties(BitbucketArtifactProviderProperties.class) +@RequiredArgsConstructor @Slf4j -public class BitbucketArtifactConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("artifacts.bitbucket") - BitbucketArtifactProviderProperties bitbucketArtifactProviderProperties() { - return new BitbucketArtifactProviderProperties(); - } - - @Autowired - BitbucketArtifactProviderProperties bitbucketArtifactProviderProperties; - - @Autowired - ArtifactCredentialsRepository artifactCredentialsRepository; - - @Autowired - ObjectMapper objectMapper; - - @Bean - OkHttpClient bitbucketOkHttpClient() { - return new OkHttpClient(); - } +class BitbucketArtifactConfiguration { + private final BitbucketArtifactProviderProperties bitbucketArtifactProviderProperties; @Bean - List bitbucketArtifactCredentials(OkHttpClient bitbucketOkHttpClient) { - return bitbucketArtifactProviderProperties.getAccounts() - .stream() - .map(a -> { - try { - BitbucketArtifactCredentials c = new BitbucketArtifactCredentials(a, bitbucketOkHttpClient, objectMapper); - artifactCredentialsRepository.save(c); - return c; - } catch (Exception e) { - log.warn("Failure instantiating Bitbucket artifact account {}: ", a, e); - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toList()); + public CredentialsTypeProperties + bitbucketCredentialsProperties(OkHttpClient okHttpClient) { + return CredentialsTypeProperties + .builder() + .type(BitbucketArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(BitbucketArtifactCredentials.class) + .credentialsDefinitionClass(BitbucketArtifactAccount.class) + .defaultCredentialsSource(bitbucketArtifactProviderProperties::getAccounts) + .credentialsParser(bc -> new BitbucketArtifactCredentials(bc, okHttpClient)) + .build(); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactCredentials.java index 1f28a21ee0c..4f24b035d8d 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactCredentials.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactCredentials.java @@ -17,103 +17,48 @@ package com.netflix.spinnaker.clouddriver.artifacts.bitbucket; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.ObjectMapper; +import static org.springframework.http.HttpHeaders.ACCEPT; +import static org.springframework.http.HttpHeaders.AUTHORIZATION; + +import com.google.common.collect.ImmutableList; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.squareup.okhttp.HttpUrl; -import com.squareup.okhttp.OkHttpClient; -import com.squareup.okhttp.Request; -import com.squareup.okhttp.Request.Builder; -import com.squareup.okhttp.Response; -import java.util.Arrays; -import java.util.List; -import lombok.Data; +import com.netflix.spinnaker.clouddriver.artifacts.config.SimpleHttpArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import lombok.Getter; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; +import okhttp3.Headers; +import okhttp3.OkHttpClient; +import org.springframework.http.MediaType; +@NonnullByDefault @Slf4j -@Data -public class BitbucketArtifactCredentials implements ArtifactCredentials { - private final String name; - private final List types = Arrays.asList("bitbucket/file"); - - @JsonIgnore - private final Builder requestBuilder; - - @JsonIgnore - OkHttpClient okHttpClient; +public class BitbucketArtifactCredentials + extends SimpleHttpArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-bitbucket"; + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("bitbucket/file"); - @JsonIgnore - ObjectMapper objectMapper; - - public BitbucketArtifactCredentials(BitbucketArtifactAccount account, OkHttpClient okHttpClient, ObjectMapper objectMapper) { + BitbucketArtifactCredentials(BitbucketArtifactAccount account, OkHttpClient okHttpClient) { + super(okHttpClient, account); this.name = account.getName(); - this.okHttpClient = okHttpClient; - this.objectMapper = objectMapper; - Builder builder = new Request.Builder(); - boolean useLogin = !StringUtils.isEmpty(account.getUsername()) && !StringUtils.isEmpty(account.getPassword()); - boolean useUsernamePasswordFile = !StringUtils.isEmpty(account.getUsernamePasswordFile()); - boolean useAuth = useLogin || useUsernamePasswordFile; - if (useAuth) { - String authHeader = ""; - if (useUsernamePasswordFile) { - authHeader = Base64.encodeBase64String((credentialsFromFile(account.getUsernamePasswordFile())).getBytes()); - } else if (useLogin) { - authHeader = Base64.encodeBase64String((account.getUsername() + ":" + account.getPassword()).getBytes()); - } - builder.header("Authorization: Basic ", authHeader); - log.info("Loaded credentials for Bitbucket artifact account {}", account.getName()); - } else { - log.info("No credentials included with Bitbucket artifact account {}", account.getName()); - } - requestBuilder = builder; } - private String credentialsFromFile(String filename) { - try { - String credentials = FileUtils.readFileToString(new File(filename)); - return credentials.replace("\n", ""); - } catch (IOException e) { - log.error("Could not read Bitbucket credentials file {}", filename, e); - return null; + @Override + protected Headers getHeaders(BitbucketArtifactAccount account) { + Headers.Builder headers = new Headers.Builder(); + Optional token = account.getTokenAsString(); + if (token.isPresent()) { + headers.set(AUTHORIZATION, "Bearer " + token.get()); + headers.set(ACCEPT, MediaType.APPLICATION_JSON_VALUE); + log.info("Loaded credentials for Bitbucket Artifact Account {}", account.getName()); + return headers.build(); } + return super.getHeaders(account); } - public InputStream download(Artifact artifact) throws IOException { - HttpUrl.Builder fileUrl; - try { - fileUrl = HttpUrl.parse(artifact.getReference()).newBuilder(); - } catch (Exception e) { - throw new IllegalArgumentException("Malformed Bitbucket content URL in 'reference'. Read more here https://www.spinnaker.io/reference/artifacts/types/bitbucket-file/: " + e.getMessage(), e); - } - - Request downloadRequest = requestBuilder - .url(artifact.getReference()) - .build(); - - try { - Response downloadResponse = okHttpClient.newCall(downloadRequest).execute(); - return downloadResponse.body().byteStream(); - } catch (IOException e) { - throw new FailedDownloadException("Unable to download the contents of artifact " + artifact + ": " + e.getMessage(), e); - } - } - - public class FailedDownloadException extends IOException { - public FailedDownloadException(String message) { - super(message); - } - - public FailedDownloadException(String message, Throwable cause) { - super(message, cause); - } + @Override + public String getType() { + return CREDENTIALS_TYPE; } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactProviderProperties.java index b9deca95c1c..27724205796 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactProviderProperties.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactProviderProperties.java @@ -18,15 +18,15 @@ package com.netflix.spinnaker.clouddriver.artifacts.bitbucket; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; -import lombok.Data; -import lombok.EqualsAndHashCode; - import java.util.ArrayList; import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; @Data -@EqualsAndHashCode(callSuper = false) -public class BitbucketArtifactProviderProperties extends ArtifactProvider { +@ConfigurationProperties("artifacts.bitbucket") +final class BitbucketArtifactProviderProperties + implements ArtifactProvider { private boolean enabled; private List accounts = new ArrayList<>(); } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactAccount.java index 30195e2f0ef..05f6ce951a9 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactAccount.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactAccount.java @@ -17,6 +17,8 @@ package com.netflix.spinnaker.clouddriver.artifacts.config; -public abstract class ArtifactAccount { - public abstract String getName(); +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; + +public interface ArtifactAccount extends CredentialsDefinition { + String getName(); } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactCredentials.java index 701aed34f02..380e75bd950 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactCredentials.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactCredentials.java @@ -17,18 +17,60 @@ package com.netflix.spinnaker.clouddriver.artifacts.config; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.credentials.Credentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; import com.netflix.spinnaker.kork.artifacts.model.Artifact; - import java.io.IOException; import java.io.InputStream; import java.util.List; +import java.util.Optional; +import org.apache.commons.lang3.NotImplementedException; -public interface ArtifactCredentials { +@NonnullByDefault +public interface ArtifactCredentials extends Credentials { String getName(); + + /** + * Returns the artifact types that are handled by these credentials. + * + * @return A list of artifact types, which should be immutable. + */ List getTypes(); + + /** + * Download the specified artifact + * + * @return a stream containing the contents of artifact. It is the caller's responsibility to + * close this stream as soon as possible. + */ InputStream download(Artifact artifact) throws IOException; + default Optional resolveArtifactName(Artifact artifact) { + return Optional.ofNullable(artifact.getName()); + } + + default Optional resolveArtifactVersion(Artifact artifact) { + return Optional.ofNullable(artifact.getVersion()); + } + + @JsonIgnore + default List getArtifactNames() { + throw new NotImplementedException( + "Artifact names are not supported for artifact types that '" + + getName() + + "' account handles"); + } + + @JsonIgnore + default List getArtifactVersions(String artifactName) { + throw new NotImplementedException( + "Artifact versions are not supported for artifact types that '" + + getName() + + "' account handles"); + } + default boolean handlesType(String type) { - return getTypes().stream().anyMatch(it -> it.equals(type)); + return getTypes().contains(type); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactProvider.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactProvider.java index 203351dd24d..2e57cb841fe 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactProvider.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/ArtifactProvider.java @@ -19,7 +19,8 @@ import java.util.List; -public abstract class ArtifactProvider { - public abstract boolean isEnabled(); - public abstract List getAccounts(); +public interface ArtifactProvider { + boolean isEnabled(); + + List getAccounts(); } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/BaseHttpArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/BaseHttpArtifactCredentials.java new file mode 100644 index 00000000000..768f9688f83 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/BaseHttpArtifactCredentials.java @@ -0,0 +1,89 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.config; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.io.IOException; +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; +import okhttp3.Headers; +import okhttp3.HttpUrl; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.Response; +import okhttp3.ResponseBody; + +@Slf4j +public abstract class BaseHttpArtifactCredentials { + @JsonIgnore private final OkHttpClient okHttpClient; + @JsonIgnore private final T account; + + protected BaseHttpArtifactCredentials(OkHttpClient okHttpClient, T account) { + this.okHttpClient = okHttpClient; + this.account = account; + } + + private Optional getAuthHeader(ArtifactAccount account) { + Optional authHeader = Optional.empty(); + + if (account instanceof TokenAuth) { + TokenAuth tokenAuth = (TokenAuth) account; + authHeader = tokenAuth.getTokenAuthHeader(); + } + + if (!authHeader.isPresent() && account instanceof BasicAuth) { + BasicAuth basicAuth = (BasicAuth) account; + authHeader = basicAuth.getBasicAuthHeader(); + } + return authHeader; + } + + protected Headers getHeaders(T account) { + Headers.Builder headers = new Headers.Builder(); + Optional authHeader = getAuthHeader(account); + if (authHeader.isPresent()) { + headers.set("Authorization", authHeader.get()); + log.info("Loaded credentials for artifact account {}", account.getName()); + } else { + log.info("No credentials included for artifact account {}", account.getName()); + } + return headers.build(); + } + + protected HttpUrl parseUrl(String stringUrl) { + HttpUrl httpUrl = HttpUrl.parse(stringUrl); + if (httpUrl == null) { + throw new IllegalArgumentException("Malformed URL: " + stringUrl); + } + return httpUrl; + } + + protected ResponseBody fetchUrl(String url) throws IOException { + return fetchUrl(parseUrl(url)); + } + + protected ResponseBody fetchUrl(HttpUrl url) throws IOException { + Request request = new Request.Builder().headers(getHeaders(account)).url(url).build(); + Response downloadResponse = okHttpClient.newCall(request).execute(); + if (!downloadResponse.isSuccessful()) { + downloadResponse.body().close(); + throw new IOException( + String.format("Received %d status code from %s", downloadResponse.code(), url.host())); + } + return downloadResponse.body(); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/BasicAuth.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/BasicAuth.java new file mode 100644 index 00000000000..bb2e78d5a44 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/BasicAuth.java @@ -0,0 +1,43 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.config; + +import com.netflix.spinnaker.clouddriver.artifacts.CredentialReader; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import org.apache.commons.codec.binary.Base64; + +@NonnullByDefault +public interface BasicAuth { + Optional getUsername(); + + Optional getPassword(); + + Optional getUsernamePasswordFile(); + + default Optional getBasicAuthHeader() { + String usernamePassword = null; + if (getUsernamePasswordFile().isPresent()) { + usernamePassword = CredentialReader.credentialsFromFile(getUsernamePasswordFile().get()); + } else if (getUsername().isPresent() && getPassword().isPresent()) { + usernamePassword = getUsername().get() + ":" + getPassword().get(); + } + + return Optional.ofNullable(usernamePassword) + .map(s -> "Basic " + Base64.encodeBase64String(s.getBytes())); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/SimpleHttpArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/SimpleHttpArtifactCredentials.java new file mode 100644 index 00000000000..671ef42ff30 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/SimpleHttpArtifactCredentials.java @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.config; + +import com.netflix.spinnaker.clouddriver.artifacts.exceptions.FailedDownloadException; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.IOException; +import java.io.InputStream; +import okhttp3.HttpUrl; +import okhttp3.OkHttpClient; + +@NonnullByDefault +public abstract class SimpleHttpArtifactCredentials + extends BaseHttpArtifactCredentials { + protected SimpleHttpArtifactCredentials(OkHttpClient okHttpClient, T account) { + super(okHttpClient, account); + } + + protected HttpUrl getDownloadUrl(Artifact artifact) throws IOException { + HttpUrl url = HttpUrl.parse(artifact.getReference()); + if (url == null) { + throw new IllegalArgumentException( + "Malformed content URL in reference: " + + artifact.getReference() + + ". Read more here https://www.spinnaker.io/reference/artifacts/types/"); + } + return url; + } + + public final InputStream download(Artifact artifact) throws IOException { + HttpUrl downloadUrl = getDownloadUrl(artifact); + try { + return fetchUrl(downloadUrl).byteStream(); + } catch (IOException e) { + throw new FailedDownloadException( + "Unable to download the contents of artifact " + artifact + ": " + e.getMessage(), e); + } + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/TokenAuth.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/TokenAuth.java new file mode 100644 index 00000000000..88792d05da6 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/config/TokenAuth.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.config; + +import com.netflix.spinnaker.clouddriver.artifacts.CredentialReader; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; + +@NonnullByDefault +public interface TokenAuth { + Optional getToken(); + + Optional getTokenFile(); + + default Optional getTokenAuthHeader() { + return getTokenAsString().map(t -> "token " + t); + } + + default Optional getTokenAsString() { + Optional result = getTokenFile().map(CredentialReader::credentialsFromFile); + if (result.isPresent()) { + return result; + } + return getToken(); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactAccount.java new file mode 100644 index 00000000000..8e34eb5aa30 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactAccount.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.custom; + +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import lombok.Value; + +@NonnullByDefault +@Value +final class CustomArtifactAccount implements ArtifactAccount { + private final String name = "custom-artifact"; +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactConfiguration.java new file mode 100644 index 00000000000..3102d4d3a2d --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactConfiguration.java @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.custom; + +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@RequiredArgsConstructor +@Slf4j +class CustomArtifactConfiguration { + + @Bean + public CredentialsRepository customArtifactCredentialsRepository() { + CredentialsRepository repository = + new MapBackedCredentialsRepository<>( + CustomArtifactCredentials.CREDENTIALS_TYPE, new NoopCredentialsLifecycleHandler<>()); + repository.save(new CustomArtifactCredentials(new CustomArtifactAccount())); + return repository; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactCredentials.java new file mode 100644 index 00000000000..99ceaf2ee2d --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/custom/CustomArtifactCredentials.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.custom; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.InputStream; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@NonnullByDefault +@Slf4j +final class CustomArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-custom"; + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("custom/object"); + + CustomArtifactCredentials(CustomArtifactAccount account) { + name = account.getName(); + } + + public InputStream download(Artifact artifact) { + throw new UnsupportedOperationException( + "Custom references are passed on to cloud platforms to handle or process"); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactAccount.java new file mode 100644 index 00000000000..e6105588fae --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactAccount.java @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.docker; + +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import lombok.Value; + +@NonnullByDefault +@Value +final class DockerArtifactAccount implements ArtifactAccount { + @Override + public String getName() { + return "docker-registry"; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactConfiguration.java new file mode 100644 index 00000000000..35172f10be9 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactConfiguration.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.docker; + +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnExpression( + "${kubernetes.enabled:false} || ${cloudrun.enabled:false} || ${dockerRegistry.enabled:false}") +@RequiredArgsConstructor +@Slf4j +class DockerArtifactConfiguration { + + @Bean + public CredentialsRepository dockerArtifactCredentialsRepository() { + CredentialsRepository repository = + new MapBackedCredentialsRepository<>( + DockerArtifactCredentials.CREDENTIALS_TYPE, new NoopCredentialsLifecycleHandler<>()); + repository.save(new DockerArtifactCredentials(new DockerArtifactAccount())); + return repository; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactCredentials.java new file mode 100644 index 00000000000..542c4324c25 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/docker/DockerArtifactCredentials.java @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.docker; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.InputStream; +import lombok.Value; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@NonnullByDefault +@Value +final class DockerArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-docker"; + public static final String TYPE = "docker/image"; + + private final String name; + private final ImmutableList types = ImmutableList.of(TYPE); + + DockerArtifactCredentials(DockerArtifactAccount account) { + this.name = account.getName(); + } + + public InputStream download(Artifact artifact) { + throw new UnsupportedOperationException( + "Docker references are passed on to cloud platforms who retrieve images directly"); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactAccount.java index ccd8ef6d201..7b8256cb096 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactAccount.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactAccount.java @@ -18,11 +18,11 @@ package com.netflix.spinnaker.clouddriver.artifacts.embedded; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; -import lombok.Data; -import lombok.EqualsAndHashCode; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import lombok.Value; -@EqualsAndHashCode(callSuper = true) -@Data -public class EmbeddedArtifactAccount extends ArtifactAccount { - private String name = "embedded-artifact"; +@NonnullByDefault +@Value +final class EmbeddedArtifactAccount implements ArtifactAccount { + private final String name = "embedded-artifact"; } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactConfiguration.java index 0b8871c00c1..fd95b06c6b2 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactConfiguration.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactConfiguration.java @@ -17,33 +17,25 @@ package com.netflix.spinnaker.clouddriver.artifacts.embedded; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.scheduling.annotation.EnableScheduling; - -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; @Configuration -@EnableScheduling +@RequiredArgsConstructor @Slf4j -public class EmbeddedArtifactConfiguration { - @Autowired - ArtifactCredentialsRepository artifactCredentialsRepository; - +class EmbeddedArtifactConfiguration { @Bean - List embeddedArtifactAccounts() { - EmbeddedArtifactAccount account = new EmbeddedArtifactAccount(); - EmbeddedArtifactCredentials credentials = new EmbeddedArtifactCredentials(account); - artifactCredentialsRepository.save(credentials); - - return Collections.singletonList(account); + public CredentialsRepository + embeddedArtifactCredentialsRepository() { + CredentialsRepository repository = + new MapBackedCredentialsRepository<>( + EmbeddedArtifactCredentials.CREDENTIALS_TYPE, new NoopCredentialsLifecycleHandler<>()); + repository.save(new EmbeddedArtifactCredentials(new EmbeddedArtifactAccount())); + return repository; } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactCredentials.java index ca26beb306c..1ddd38ee774 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactCredentials.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/embedded/EmbeddedArtifactCredentials.java @@ -18,36 +18,41 @@ package com.netflix.spinnaker.clouddriver.artifacts.embedded; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableList; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.ArtifactTypes; import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import java.util.Arrays; -import java.util.List; -import lombok.Data; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.NotImplementedException; - import java.io.ByteArrayInputStream; -import java.io.IOException; import java.io.InputStream; import java.util.Base64; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.NotImplementedException; +@NonnullByDefault @Slf4j -@Data -public class EmbeddedArtifactCredentials implements ArtifactCredentials { - private final String name; - private final List types = Arrays.asList("embedded/base64"); +final class EmbeddedArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-embedded"; + @Getter private final String name; - @JsonIgnore - private final Base64.Decoder base64Decoder; + @Getter + private final ImmutableList types = + ImmutableList.of( + ArtifactTypes.EMBEDDED_BASE64.getMimeType(), ArtifactTypes.REMOTE_BASE64.getMimeType()); - public EmbeddedArtifactCredentials(EmbeddedArtifactAccount account) { + @JsonIgnore private final Base64.Decoder base64Decoder; + + EmbeddedArtifactCredentials(EmbeddedArtifactAccount account) { name = account.getName(); base64Decoder = Base64.getDecoder(); } - public InputStream download(Artifact artifact) throws IOException { + public InputStream download(Artifact artifact) { String type = artifact.getType(); - if (type.equals("embedded/base64")) { + if (ArtifactTypes.EMBEDDED_BASE64.getMimeType().equals(type)) { + return fromBase64(artifact); + } else if (ArtifactTypes.REMOTE_BASE64.getMimeType().equals(type)) { return fromBase64(artifact); } else { throw new NotImplementedException("Embedded type '" + type + "' is not handled."); @@ -61,6 +66,11 @@ private InputStream fromBase64(Artifact artifact) { @Override public boolean handlesType(String type) { - return type.startsWith("embedded/"); + return type.startsWith("embedded/") || type.startsWith("remote/"); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/exceptions/FailedDownloadException.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/exceptions/FailedDownloadException.java new file mode 100644 index 00000000000..ebd3ffb909b --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/exceptions/FailedDownloadException.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.exceptions; + +import java.io.IOException; + +public class FailedDownloadException extends IOException { + public FailedDownloadException(String message) { + super(message); + } + + public FailedDownloadException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/front50/Front50ArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/front50/Front50ArtifactConfiguration.java new file mode 100644 index 00000000000..4e07690f03d --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/front50/Front50ArtifactConfiguration.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.front50; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@RequiredArgsConstructor +@Slf4j +class Front50ArtifactConfiguration { + @Bean + public CredentialsRepository front50ArtifactCredentialsRepository( + ObjectMapper objectMapper, Front50Service front50Service) { + CredentialsRepository repository = + new MapBackedCredentialsRepository<>( + Front50ArtifactCredentials.CREDENTIALS_TYPE, new NoopCredentialsLifecycleHandler<>()); + repository.save(new Front50ArtifactCredentials(objectMapper, front50Service)); + return repository; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/front50/Front50ArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/front50/Front50ArtifactCredentials.java new file mode 100644 index 00000000000..718af5c4905 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/front50/Front50ArtifactCredentials.java @@ -0,0 +1,123 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.front50; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.security.AuthenticatedRequest; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@NonnullByDefault +@Slf4j +final class Front50ArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-front50"; + private static final String ACCOUNT_NAME = "front50ArtifactCredentials"; + private static final String URL_PREFIX = "spinnaker://"; + + @Getter private final String name = ACCOUNT_NAME; + @Getter private final ImmutableList types = ImmutableList.of("front50/pipelineTemplate"); + + @JsonIgnore private final Front50Service front50Service; + @JsonIgnore private final ObjectMapper objectMapper; + + Front50ArtifactCredentials(ObjectMapper objectMapper, Front50Service front50Service) { + this.objectMapper = objectMapper; + this.front50Service = front50Service; + } + + @Override + public InputStream download(Artifact artifact) throws IOException { + String reference = Strings.nullToEmpty(artifact.getReference()); + if (!reference.startsWith(URL_PREFIX)) { + throw new IllegalArgumentException( + String.format( + "'front50/pipelineTemplate' artifacts must be specified with a " + + "'reference' starting with %s, got: %s'", + URL_PREFIX, artifact)); + } + + Map pipelineTemplate; + String artifactId = reference.substring(URL_PREFIX.length()); + if (artifactId.contains("@sha256:")) { + SplitResult result = splitReferenceOnToken(artifactId, "@sha256:"); + pipelineTemplate = + AuthenticatedRequest.allowAnonymous( + () -> + front50Service.getV2PipelineTemplate( + result.pipelineTemplateId, "", result.version)); + } else if (artifactId.contains(":")) { + SplitResult result = splitReferenceOnToken(artifactId, ":"); + pipelineTemplate = + AuthenticatedRequest.allowAnonymous( + () -> + front50Service.getV2PipelineTemplate( + result.pipelineTemplateId, result.version, "")); + } else { + pipelineTemplate = + AuthenticatedRequest.allowAnonymous( + () -> front50Service.getV2PipelineTemplate(artifactId, "", "")); + } + + return new ByteArrayInputStream(objectMapper.writeValueAsBytes(pipelineTemplate)); + } + + @Override + public List getArtifactNames() { + return front50Service.listV2PipelineTemplates(Collections.singletonList("global")).stream() + .map(t -> (String) t.get("id")) + .distinct() + .collect(Collectors.toList()); + } + + private SplitResult splitReferenceOnToken(String reference, String token) { + String[] refParts = reference.split(token); + if (refParts.length != 2) { + throw new IllegalArgumentException("Malformed Front50 artifact reference: " + reference); + } + return new SplitResult(refParts[0], refParts[1]); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } + + @Data + @AllArgsConstructor + private static class SplitResult { + private String pipelineTemplateId; + private String version; + } + + // TODO(jacobkiefer): Implement getArtifactVersions() +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactAccount.java index da4cdb3d060..ed8154dd287 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactAccount.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactAccount.java @@ -17,13 +17,26 @@ package com.netflix.spinnaker.clouddriver.artifacts.gcs; +import com.google.common.base.Strings; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; -import lombok.Data; -import lombok.EqualsAndHashCode; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; -@EqualsAndHashCode(callSuper = true) -@Data -public class GcsArtifactAccount extends ArtifactAccount { - private String name; - private String jsonPath; +@NonnullByDefault +@Value +public class GcsArtifactAccount implements ArtifactAccount { + private final String name; + private final Optional jsonPath; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + GcsArtifactAccount(String name, String jsonPath) { + this.name = Strings.nullToEmpty(name); + this.jsonPath = Optional.ofNullable(Strings.emptyToNull(jsonPath)); + } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactConfiguration.java index a9e01035ba3..f53287cb21f 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactConfiguration.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactConfiguration.java @@ -17,56 +17,41 @@ package com.netflix.spinnaker.clouddriver.artifacts.gcs; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import java.io.IOException; +import java.security.GeneralSecurityException; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Scope; -import org.springframework.scheduling.annotation.EnableScheduling; - -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; @Configuration @ConditionalOnProperty("artifacts.gcs.enabled") -@EnableScheduling +@EnableConfigurationProperties(GcsArtifactProviderProperties.class) +@RequiredArgsConstructor @Slf4j -public class GcsArtifactConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("artifacts.gcs") - GcsArtifactProviderProperties gcsArtifactProviderProperties() { - return new GcsArtifactProviderProperties(); - } - - @Autowired - GcsArtifactProviderProperties gcsArtifactProviderProperties; - - @Autowired - ArtifactCredentialsRepository artifactCredentialsRepository; +class GcsArtifactConfiguration { + private final GcsArtifactProviderProperties gcsArtifactProviderProperties; @Bean - List gcsArtifactCredentials(String clouddriverUserAgentApplicationName) { - return gcsArtifactProviderProperties.getAccounts() - .stream() - .map(a -> { - try { - GcsArtifactCredentials c = new GcsArtifactCredentials(clouddriverUserAgentApplicationName, a); - artifactCredentialsRepository.save(c); - return c; - } catch (IOException | GeneralSecurityException e) { - log.warn("Failure instantiating gcs artifact account {}: ", a, e); - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toList()); + public CredentialsTypeProperties + gcsCredentialsProperties(String clouddriverUserAgentApplicationName) { + return CredentialsTypeProperties.builder() + .type(GcsArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(GcsArtifactCredentials.class) + .credentialsDefinitionClass(GcsArtifactAccount.class) + .defaultCredentialsSource(gcsArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new GcsArtifactCredentials(clouddriverUserAgentApplicationName, a); + } catch (IOException | GeneralSecurityException e) { + log.warn("Failure instantiating gcs artifact account {}: ", a, e); + return null; + } + }) + .build(); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactCredentials.java index da6d6bc1e17..9c938110be3 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactCredentials.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactCredentials.java @@ -18,58 +18,65 @@ package com.netflix.spinnaker.clouddriver.artifacts.gcs; import com.fasterxml.jackson.annotation.JsonIgnore; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; +import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.http.HttpTransport; import com.google.api.client.json.JsonFactory; -import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.json.gson.GsonFactory; import com.google.api.services.storage.Storage; import com.google.api.services.storage.StorageScopes; +import com.google.auth.http.HttpCredentialsAdapter; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.common.collect.ImmutableList; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import java.util.Arrays; -import java.util.List; -import lombok.Data; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.StringUtils; - import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.Collections; +import java.util.Optional; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +@NonnullByDefault @Slf4j -@Data public class GcsArtifactCredentials implements ArtifactCredentials { - @JsonIgnore - private final Storage storage; - private final String name; - private final List types = Arrays.asList("gcs/object"); + public static final String CREDENTIALS_TYPE = "artifacts-gcs"; + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("gcs/object"); - public GcsArtifactCredentials(String applicationName, GcsArtifactAccount account) throws IOException, GeneralSecurityException { - HttpTransport transport = GoogleNetHttpTransport.newTrustedTransport(); - JsonFactory jsonFactory = JacksonFactory.getDefaultInstance(); - String credentialsPath = account.getJsonPath(); + @JsonIgnore private final Storage storage; - GoogleCredential credential; + GcsArtifactCredentials(String applicationName, GcsArtifactAccount account) + throws IOException, GeneralSecurityException { + HttpTransport transport = GoogleNetHttpTransport.newTrustedTransport(); + JsonFactory jsonFactory = GsonFactory.getDefaultInstance(); + Optional credentialsPath = account.getJsonPath(); - if (!StringUtils.isEmpty(credentialsPath)) { - FileInputStream stream = new FileInputStream(credentialsPath); - credential = GoogleCredential.fromStream(stream, transport, jsonFactory) - .createScoped(Collections.singleton(StorageScopes.DEVSTORAGE_READ_ONLY)); + GoogleCredentials credentials; + if (credentialsPath.isPresent()) { + FileInputStream stream = new FileInputStream(credentialsPath.get()); + credentials = + GoogleCredentials.fromStream(stream) + .createScoped(Collections.singleton(StorageScopes.DEVSTORAGE_READ_ONLY)); log.info("Loaded credentials from {}", credentialsPath); } else { - log.info("artifacts.gcs.enabled without artifacts.gcs.[].jsonPath. Using default application credentials."); + log.info( + "artifacts.gcs.enabled without artifacts.gcs.[].jsonPath. Using default application credentials."); - credential = GoogleCredential.getApplicationDefault(); + credentials = GoogleCredentials.getApplicationDefault(); } + HttpRequestInitializer requestInitializer = new HttpCredentialsAdapter(credentials); + name = account.getName(); - storage = new Storage.Builder(transport, jsonFactory, credential) - .setApplicationName(applicationName) - .build(); + storage = + new Storage.Builder(transport, jsonFactory, requestInitializer) + .setApplicationName(applicationName) + .build(); } public InputStream download(Artifact artifact) throws IOException { @@ -81,22 +88,26 @@ public InputStream download(Artifact artifact) throws IOException { int slash = reference.indexOf("/"); if (slash <= 0) { - throw new IllegalArgumentException("GCS references must be of the format gs:///, got: " + artifact); + throw new IllegalArgumentException( + "GCS references must be of the format gs:///, got: " + artifact); } String bucketName = reference.substring(0, slash); String path = reference.substring(slash + 1); - int pound = reference.lastIndexOf("#"); + int pound = path.lastIndexOf("#"); if (pound >= 0) { generation = Long.valueOf(path.substring(pound + 1)); path = path.substring(0, pound); } - Storage.Objects.Get get = storage.objects() - .get(bucketName, path) - .setGeneration(generation); + Storage.Objects.Get get = storage.objects().get(bucketName, path).setGeneration(generation); return get.executeMediaAsInputStream(); } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactProviderProperties.java index 66a76690761..5bf08d3b79a 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactProviderProperties.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gcs/GcsArtifactProviderProperties.java @@ -18,13 +18,14 @@ package com.netflix.spinnaker.clouddriver.artifacts.gcs; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; -import lombok.Data; - import java.util.ArrayList; import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; @Data -public class GcsArtifactProviderProperties extends ArtifactProvider { +@ConfigurationProperties("artifacts.gcs") +final class GcsArtifactProviderProperties implements ArtifactProvider { private boolean enabled; private List accounts = new ArrayList<>(); } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitJobExecutor.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitJobExecutor.java new file mode 100644 index 00000000000..1f0107aceb4 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitJobExecutor.java @@ -0,0 +1,470 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.gitRepo; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import com.netflix.spinnaker.clouddriver.jobs.JobRequest; +import com.netflix.spinnaker.clouddriver.jobs.JobResult; +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.nio.charset.Charset; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.regex.Pattern; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.jetbrains.annotations.NotNull; +import org.springframework.util.StringUtils; + +@Slf4j +public class GitJobExecutor { + + private static final String SSH_KEY_PWD_ENV_VAR = "SSH_KEY_PWD"; + private static final Pattern FULL_SHA_PATTERN = Pattern.compile("[0-9a-f]{40}"); + private static final Pattern SHORT_SHA_PATTERN = Pattern.compile("[0-9a-f]{7}"); + private static Path genericAskPassBinary; + + @Getter private final GitRepoArtifactAccount account; + private final JobExecutor jobExecutor; + private final String gitExecutable; + private final AuthType authType; + private final Path askPassBinary; + + private enum AuthType { + USER_PASS, + USER_TOKEN, + TOKEN, + SSH, + NONE + } + + public GitJobExecutor( + GitRepoArtifactAccount account, JobExecutor jobExecutor, String gitExecutable) + throws IOException { + this.account = account; + this.jobExecutor = jobExecutor; + this.gitExecutable = gitExecutable; + if (!StringUtils.isEmpty(account.getUsername()) + && !StringUtils.isEmpty(account.getPassword())) { + authType = AuthType.USER_PASS; + } else if (!StringUtils.isEmpty(account.getUsername()) + && account.getTokenAsString().filter(t -> !StringUtils.isEmpty(t)).isPresent()) { + authType = AuthType.USER_TOKEN; + } else if (account.getTokenAsString().filter(t -> !StringUtils.isEmpty(t)).isPresent()) { + authType = AuthType.TOKEN; + } else if (!StringUtils.isEmpty(account.getSshPrivateKeyFilePath())) { + authType = AuthType.SSH; + } else { + authType = AuthType.NONE; + } + askPassBinary = initAskPass(); + } + + public void cloneOrPull(String repoUrl, String branch, Path localPath, String repoBasename) + throws IOException { + File localPathFile = localPath.toFile(); + if (!localPathFile.exists()) { + clone(repoUrl, branch, localPath, repoBasename); + return; + } + // localPath exists + + if (!localPathFile.isDirectory()) { + throw new IllegalArgumentException( + "Local path " + localPath.toString() + " is not a directory"); + } + // localPath exists and is a directory + + File[] localPathFiles = localPathFile.listFiles(); + if (localPathFiles == null || localPathFiles.length == 0) { + clone(repoUrl, branch, localPath, repoBasename); + return; + } + // localPath exists, is a directory and has files in it + + Path dotGitPath = Paths.get(localPath.toString(), repoBasename, ".git"); + if (!dotGitPath.toFile().exists()) { + log.warn( + "Directory {} for git/repo {}, branch {} has files or directories but {} was not found. The directory will be recreated to start with a new clone.", + localPath.toString(), + repoUrl, + branch, + dotGitPath.toString()); + clone(repoUrl, branch, localPath, repoBasename); + return; + } + // localPath has "/.git" directory + + pull(repoUrl, branch, dotGitPath.getParent()); + } + + private void clone(String repoUrl, String branch, Path destination, String repoBasename) + throws IOException { + if (!isValidReference(repoUrl)) { + throw new IllegalArgumentException( + "Git reference \"" + + repoUrl + + "\" is invalid for credentials with auth type " + + authType); + } + + File destinationFile = destination.toFile(); + if (destinationFile.exists()) { + FileUtils.deleteDirectory(destinationFile); + } + FileUtils.forceMkdir(destinationFile); + + if (FULL_SHA_PATTERN.matcher(branch).matches()) { + fetchFullSha(repoUrl, branch, destination, repoBasename); + } else { + cloneBranchOrTag(repoUrl, branch, destination, repoBasename); + } + } + + private void cloneBranchOrTag( + String repoUrl, String branch, Path destination, String repoBasename) throws IOException { + log.info("Cloning git/repo {} into {}", repoUrl, destination.toString()); + + String command = + gitExecutable + " clone --branch " + branch + " --depth 1 " + repoUrlWithAuth(repoUrl); + JobResult result = new CommandChain(destination).addCommand(command).runAll(); + if (result.getResult() == JobResult.Result.SUCCESS) { + return; + } + + String errorMsg = + command + " failed. Error: " + result.getError() + " Output: " + result.getOutput(); + if (!SHORT_SHA_PATTERN.matcher(branch).matches()) { + throw new IOException(errorMsg); + } + + log.warn(errorMsg + ". Trying a full clone and checkout " + branch); + File destFile = destination.toFile(); + FileUtils.deleteDirectory(destFile); + FileUtils.forceMkdir(destFile); + cloneAndCheckoutSha(repoUrl, branch, destination, repoBasename); + } + + private void fetchFullSha(String repoUrl, String sha, Path destination, String repoBasename) + throws IOException { + log.info("Fetching git/repo {} sha {} into {}", repoUrl, sha, destination.toString()); + + Path repoPath = Paths.get(destination.toString(), repoBasename); + if (!repoPath.toFile().mkdirs()) { + throw new IOException("Unable to create directory " + repoPath.toString()); + } + + JobResult result = + new CommandChain(repoPath) + .addCommand(gitExecutable + " init") + .addCommand(gitExecutable + " remote add origin " + repoUrlWithAuth(repoUrl)) + .addCommand(gitExecutable + " fetch --depth 1 origin " + sha) + .addCommand(gitExecutable + " reset --hard FETCH_HEAD") + .runAll(); + if (result.getResult() == JobResult.Result.SUCCESS) { + return; + } + + // Some git servers don't allow to directly fetch specific commits + // (error: Server does not allow request for unadvertised object), + // fallback to full clone and checkout SHA + log.warn( + "Unable to directly fetch specific sha, trying full clone. Error: " + result.getError()); + + FileUtils.forceDelete(repoPath.toFile()); + + cloneAndCheckoutSha(repoUrl, sha, destination, repoBasename); + } + + private void cloneAndCheckoutSha( + String repoUrl, String sha, Path destination, String repoBasename) throws IOException { + Path repoPath = Paths.get(destination.toString(), repoBasename); + new CommandChain(destination) + .addCommand(gitExecutable + " clone " + repoUrlWithAuth(repoUrl)) + .runAllOrFail(); + new CommandChain(repoPath).addCommand(gitExecutable + " checkout " + sha).runAllOrFail(); + } + + private void pull(String repoUrl, String branch, Path localPath) throws IOException { + if (FULL_SHA_PATTERN.matcher(branch).matches()) { + log.info( + "Contents of git/repo {} for sha {} already downloaded, no \"git pull\" needed.", + repoUrl, + branch); + return; + } + + JobResult result = + new CommandChain(localPath).addCommand(gitExecutable + " symbolic-ref HEAD").runAll(); + if (result.getResult() != JobResult.Result.SUCCESS) { + // detached HEAD state happens when "branch" is actually a short commit SHA + log.info( + "git/repo {} is in detached HEAD state for version {}, skipping \"git pull\"", + repoUrl, + branch); + return; + } + + log.info("Pulling git/repo {} into {}", repoUrl, localPath.toString()); + + new CommandChain(localPath).addCommand(gitExecutable + " pull").runAllOrFail(); + + if (!localPath.getParent().toFile().setLastModified(System.currentTimeMillis())) { + log.warn("Unable to set last modified time on {}", localPath.getParent().toString()); + } + } + + public void archive(Path localClone, String branch, String subDir, Path outputFile) + throws IOException { + String cmd = + gitExecutable + " archive --format tgz --output " + outputFile.toString() + " " + branch; + + if (!StringUtils.isEmpty(subDir)) { + cmd += " " + subDir; + } + + new CommandChain(localClone).addCommand(cmd).runAllOrFail(); + } + + /** + * For SSH authentication if the private key is password protected, SSH_ASKPASS binary is used to + * supply the password. https://git-scm.com/docs/gitcredentials#_requesting_credentials + */ + private Path initAskPass() throws IOException { + if (authType != AuthType.SSH) { + return null; + } + + if (!StringUtils.isEmpty(account.getSshPrivateKeyPassphraseCmd())) { + File pwdCmd = new File(account.getSshPrivateKeyPassphraseCmd()); + if (!pwdCmd.exists() || !pwdCmd.isFile()) { + throw new IOException( + "SshPrivateKeyPassphraseCmd doesn't exist or is not a file: " + + account.getSshPrivateKeyPassphraseCmd()); + } + return Paths.get(account.getSshPrivateKeyPassphraseCmd()); + } + + if (genericAskPassBinary == null) { + File askpass = File.createTempFile("askpass", null); + if (!askpass.setExecutable(true)) { + throw new IOException( + "Unable to make executable askpass script at " + askpass.toPath().toString()); + } + + // Default way for supplying the password of a private ssh key is to echo an env var with the + // password. + // This env var is set at runtime when executing git commands that need it. + FileUtils.writeStringToFile( + askpass, + "#!/bin/sh\n" + "echo \"$" + SSH_KEY_PWD_ENV_VAR + "\"", + Charset.defaultCharset()); + genericAskPassBinary = askpass.toPath(); + } + + return genericAskPassBinary; + } + + private boolean isValidReference(String reference) { + if (authType == AuthType.USER_PASS + || authType == AuthType.USER_TOKEN + || authType == AuthType.TOKEN) { + return reference.startsWith("http"); + } + if (authType == AuthType.SSH) { + return reference.startsWith("ssh://") || reference.startsWith("git@"); + } + return true; + } + + private List cmdToList(String cmd) { + List cmdList = new ArrayList<>(); + switch (authType) { + case USER_PASS: + case USER_TOKEN: + case TOKEN: + // "sh" subshell is used so that environment variables can be used as part of the command + cmdList.add("sh"); + cmdList.add("-c"); + cmdList.add(cmd); + break; + case SSH: + default: + cmdList.addAll(Arrays.asList(cmd.split(" "))); + break; + } + return cmdList; + } + + private String repoUrlWithAuth(String repoUrl) { + if (authType != AuthType.USER_PASS + && authType != AuthType.USER_TOKEN + && authType != AuthType.TOKEN) { + return repoUrl; + } + + String authPart; + if (authType == AuthType.USER_PASS) { + authPart = "$GIT_USER:$GIT_PASS"; + } else if (authType == AuthType.USER_TOKEN) { + authPart = "$GIT_USER:$GIT_TOKEN"; + } else { + authPart = "token:$GIT_TOKEN"; + } + + try { + URI uri = new URI(repoUrl); + return String.format( + "%s://%s@%s%s%s", + uri.getScheme(), + authPart, + uri.getHost(), + (uri.getPort() > 0 ? ":" + uri.getPort() : ""), + uri.getRawPath()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Malformed git repo url " + repoUrl, e); + } + } + + private Map addEnvVars(Map env) { + Map result = new HashMap<>(env); + + switch (authType) { + case USER_PASS: + result.put("GIT_USER", encodeURIComponent(account.getUsername())); + result.put("GIT_PASS", encodeURIComponent(account.getPassword())); + break; + case USER_TOKEN: + result.put("GIT_USER", encodeURIComponent(account.getUsername())); + result.put( + "GIT_TOKEN", + encodeURIComponent( + account + .getTokenAsString() + .orElseThrow( + () -> + new IllegalArgumentException( + "Token or TokenFile must be present if using token auth.")))); + break; + case TOKEN: + result.put( + "GIT_TOKEN", + encodeURIComponent( + account + .getTokenAsString() + .orElseThrow( + () -> + new IllegalArgumentException( + "Token or TokenFile must be present if using token auth.")))); + break; + case SSH: + result.put("GIT_SSH_COMMAND", buildSshCommand()); + result.put("SSH_ASKPASS", askPassBinary.toString()); + result.put("DISPLAY", ":0"); + if (!StringUtils.isEmpty(account.getSshPrivateKeyPassphrase())) { + result.put(SSH_KEY_PWD_ENV_VAR, account.getSshPrivateKeyPassphrase()); + } + break; + } + + if (log.isDebugEnabled()) { + result.put("GIT_CURL_VERBOSE", "1"); + result.put("GIT_TRACE", "1"); + } + return result; + } + + @NotNull + private String buildSshCommand() { + String gitSshCmd = "setsid ssh"; + if (account.isSshTrustUnknownHosts()) { + gitSshCmd += " -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"; + } else if (!StringUtils.isEmpty(account.getSshKnownHostsFilePath())) { + gitSshCmd += " -o UserKnownHostsFile=" + account.getSshKnownHostsFilePath(); + } + if (!StringUtils.isEmpty(account.getSshPrivateKeyFilePath())) { + gitSshCmd += " -i " + account.getSshPrivateKeyFilePath(); + } + return gitSshCmd; + } + + private static String encodeURIComponent(String s) { + if (StringUtils.isEmpty(s)) { + return s; + } + String result; + result = + URLEncoder.encode(s, UTF_8) + .replaceAll("\\+", "%20") + .replaceAll("\\*", "%2A") + .replaceAll("%21", "!") + .replaceAll("%27", "'") + .replaceAll("%28", "(") + .replaceAll("%29", ")") + .replaceAll("%7E", "~"); + return result; + } + + private class CommandChain { + private final Collection commands = new ArrayList<>(); + private final Path workingDir; + + CommandChain(Path workingDir) { + this.workingDir = workingDir; + } + + CommandChain addCommand(String command) { + commands.add( + new JobRequest( + cmdToList(command), addEnvVars(System.getenv()), this.workingDir.toFile())); + return this; + } + + void runAllOrFail() throws IOException { + for (JobRequest command : commands) { + log.debug("Executing command: \"{}\"", String.join(" ", command.getTokenizedCommand())); + JobResult result = jobExecutor.runJob(command); + if (result.getResult() != JobResult.Result.SUCCESS) { + throw new IOException( + String.format( + "%s failed. Error: %s Output: %s", + command.getTokenizedCommand(), result.getError(), result.getOutput())); + } + } + } + + JobResult runAll() { + JobResult result = null; + for (JobRequest command : commands) { + log.debug("Executing command: \"{}\"", String.join(" ", command.getTokenizedCommand())); + result = jobExecutor.runJob(command); + if (result.getResult() != JobResult.Result.SUCCESS) { + break; + } + } + return result; + } + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactAccount.java new file mode 100644 index 00000000000..b55643b57e4 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactAccount.java @@ -0,0 +1,68 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.gitRepo; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.clouddriver.artifacts.config.TokenAuth; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; + +@NonnullByDefault +@Value +public class GitRepoArtifactAccount implements ArtifactAccount, TokenAuth { + private final String name; + private final String username; + private final String password; + private final Optional token; + private final Optional tokenFile; + private final String sshPrivateKeyFilePath; + private final String sshPrivateKeyPassphrase; + private final String sshPrivateKeyPassphraseCmd; + private final String sshKnownHostsFilePath; + private final boolean sshTrustUnknownHosts; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + public GitRepoArtifactAccount( + String name, + String username, + String password, + String token, + String tokenFile, + String sshPrivateKeyFilePath, + String sshPrivateKeyPassphrase, + String sshPrivateKeyPassphraseCmd, + String sshKnownHostsFilePath, + boolean sshTrustUnknownHosts) { + this.name = Strings.nullToEmpty(name); + this.username = Strings.nullToEmpty(username); + this.password = Strings.nullToEmpty(password); + this.token = Optional.ofNullable(Strings.emptyToNull(token)); + this.tokenFile = Optional.ofNullable(Strings.emptyToNull(tokenFile)); + this.sshPrivateKeyFilePath = Strings.nullToEmpty(sshPrivateKeyFilePath); + this.sshPrivateKeyPassphrase = Strings.nullToEmpty(sshPrivateKeyPassphrase); + this.sshPrivateKeyPassphraseCmd = Strings.nullToEmpty(sshPrivateKeyPassphraseCmd); + this.sshKnownHostsFilePath = Strings.nullToEmpty(sshKnownHostsFilePath); + this.sshTrustUnknownHosts = sshTrustUnknownHosts; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactConfiguration.java new file mode 100644 index 00000000000..1c0898dac0a --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactConfiguration.java @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.gitRepo; + +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import java.io.IOException; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("artifacts.git-repo.enabled") +@EnableConfigurationProperties(GitRepoArtifactProviderProperties.class) +@RequiredArgsConstructor +@Slf4j +class GitRepoArtifactConfiguration { + private final GitRepoArtifactProviderProperties gitRepoArtifactProviderProperties; + + @Bean + public CredentialsTypeProperties + gitCredentialsProperties( + @Value("${artifacts.git-repo.git-executable:git}") String gitExecutable, + JobExecutor jobExecutor, + GitRepoFileSystem gitRepoFileSystem) { + return CredentialsTypeProperties.builder() + .type(GitRepoArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(GitRepoArtifactCredentials.class) + .credentialsDefinitionClass(GitRepoArtifactAccount.class) + .defaultCredentialsSource(gitRepoArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new GitRepoArtifactCredentials( + new GitJobExecutor(a, jobExecutor, gitExecutable), gitRepoFileSystem); + } catch (IOException e) { + log.warn("Failure instantiating git artifact account {}: ", a, e); + return null; + } + }) + .build(); + } + + @Bean + public GitRepoFileSystem gitRepoFileSystem() { + return new GitRepoFileSystem(gitRepoArtifactProviderProperties); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactCredentials.java new file mode 100644 index 00000000000..589575a1b92 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactCredentials.java @@ -0,0 +1,151 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.gitRepo; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.jetbrains.annotations.NotNull; + +@NonnullByDefault +@Slf4j +public class GitRepoArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "git/repo"; + private static final Pattern GENERIC_URL_PATTERN = Pattern.compile("^.*/(.*)$"); + + @Getter private final ImmutableList types = ImmutableList.of("git/repo"); + @Getter private final String name; + + private final GitJobExecutor executor; + private final GitRepoFileSystem gitRepoFileSystem; + + public GitRepoArtifactCredentials(GitJobExecutor executor, GitRepoFileSystem gitRepoFileSystem) { + this.executor = executor; + this.gitRepoFileSystem = gitRepoFileSystem; + this.name = this.executor.getAccount().getName(); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } + + @Override + public InputStream download(Artifact artifact) throws IOException { + String repoUrl = artifact.getReference(); + String subPath = artifactSubPath(artifact); + String branch = artifactVersion(artifact); + Path stagingPath = gitRepoFileSystem.getLocalClonePath(repoUrl, branch); + String repoBasename = getRepoBasename(repoUrl); + Path outputFile = Paths.get(stagingPath.toString(), repoBasename + ".tgz"); + + try { + return getLockedInputStream(repoUrl, subPath, branch, stagingPath, repoBasename, outputFile); + } catch (InterruptedException e) { + throw new IOException( + "Interrupted while waiting to acquire file system lock for " + + repoUrl + + " (branch " + + branch + + ").", + e); + } + } + + @NotNull + private FileInputStream getLockedInputStream( + String repoUrl, + String subPath, + String branch, + Path stagingPath, + String repoBasename, + Path outputFile) + throws InterruptedException, IOException { + + if (gitRepoFileSystem.tryTimedLock(repoUrl, branch)) { + try { + return getInputStream(repoUrl, subPath, branch, stagingPath, repoBasename, outputFile); + + } finally { + // if not deleted explicitly, clones are deleted by + // gitRepoFileSystem depending on retention period + if (!gitRepoFileSystem.canRetainClone()) { + log.debug("Deleting clone for {} (branch {})", repoUrl, branch); + FileUtils.deleteDirectory(stagingPath.toFile()); + } + gitRepoFileSystem.unlock(repoUrl, branch); + } + + } else { + throw new IOException( + "Timeout waiting to acquire file system lock for " + + repoUrl + + " (branch " + + branch + + "). Waited " + + gitRepoFileSystem.getCloneWaitLockTimeoutSec() + + " seconds."); + } + } + + @NotNull + private FileInputStream getInputStream( + String repoUrl, + String subPath, + String branch, + Path stagingPath, + String repoBasename, + Path outputFile) + throws IOException { + executor.cloneOrPull(repoUrl, branch, stagingPath, repoBasename); + log.info("Creating archive for git/repo {}", repoUrl); + executor.archive(Paths.get(stagingPath.toString(), repoBasename), branch, subPath, outputFile); + return new FileInputStream(outputFile.toFile()); + } + + private String getRepoBasename(String url) { + Matcher matcher = GENERIC_URL_PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException( + "Git repo url " + url + " doesn't match regex " + GENERIC_URL_PATTERN); + } + return matcher.group(1).replaceAll("\\.git$", ""); + } + + private String artifactSubPath(Artifact artifact) { + if (!Strings.nullToEmpty(artifact.getLocation()).isEmpty()) { + return artifact.getLocation(); + } + return Strings.nullToEmpty((String) artifact.getMetadata("subPath")); + } + + private String artifactVersion(Artifact artifact) { + return !Strings.isNullOrEmpty(artifact.getVersion()) ? artifact.getVersion() : "master"; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactProviderProperties.java new file mode 100644 index 00000000000..623a69b095f --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactProviderProperties.java @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.gitRepo; + +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@Data +@ConfigurationProperties("artifacts.git-repo") +public class GitRepoArtifactProviderProperties implements ArtifactProvider { + public static final int DEFAULT_CLONE_RETENTION_CHECK_MS = 60000; + + private boolean enabled; + private int cloneRetentionMinutes = 0; + private int cloneRetentionCheckMs = DEFAULT_CLONE_RETENTION_CHECK_MS; + private long cloneRetentionMaxBytes = 1024 * 1024 * 100; // 100 MB + private int cloneWaitLockTimeoutSec = 60; + private List accounts = new ArrayList<>(); +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoFileSystem.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoFileSystem.java new file mode 100644 index 00000000000..7e28d500c3d --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoFileSystem.java @@ -0,0 +1,166 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.gitRepo; + +import com.google.common.hash.Hashing; +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.springframework.scheduling.annotation.Scheduled; + +@Slf4j +public class GitRepoFileSystem { + private static final Path CLONES_HOME = + Paths.get(System.getProperty("java.io.tmpdir"), "gitrepos"); + + private final GitRepoArtifactProviderProperties config; + private final Map pathLocks = new ConcurrentHashMap<>(); + + public GitRepoFileSystem(GitRepoArtifactProviderProperties properties) { + this.config = properties; + } + + public Path getLocalClonePath(String repoUrl, String branch) { + return Paths.get(CLONES_HOME.toString(), hashCoordinates(repoUrl, branch)); + } + + public int getCloneWaitLockTimeoutSec() { + return config.getCloneWaitLockTimeoutSec(); + } + + public boolean tryTimedLock(String repoUrl, String branch) throws InterruptedException { + String hash = hashCoordinates(repoUrl, branch); + + log.debug( + "Trying filesystem timed lock for {} (branch {}), hash: {} for {} seconds", + repoUrl, + branch, + hash, + config.getCloneWaitLockTimeoutSec()); + + Lock lock = createOrGetLock(hash); + boolean locked = lock.tryLock(config.getCloneWaitLockTimeoutSec(), TimeUnit.SECONDS); + log.debug( + "Lock {} acquired for {} (branch {}), hash {}, lock instance: {}", + (locked ? "" : "NOT"), + repoUrl, + branch, + hash, + lock); + return locked; + } + + private synchronized Lock createOrGetLock(String hash) { + if (!pathLocks.containsKey(hash)) { + log.debug("Creating new lock instance for hash: {}", hash); + pathLocks.put(hash, new ReentrantLock()); + } + return pathLocks.get(hash); + } + + public boolean tryLock(String cloneHashDir) { + log.debug("Trying filesystem lock for hash: {}", cloneHashDir); + Lock lock = createOrGetLock(cloneHashDir); + boolean locked = lock.tryLock(); + log.debug("Lock {} acquired for hash {}", (locked ? "" : "NOT"), cloneHashDir); + return locked; + } + + public void unlock(String repoUrl, String branch) { + String hash = hashCoordinates(repoUrl, branch); + log.debug("Unlocking filesystem for {} (branch {}), hash: {}", repoUrl, branch, hash); + unlock(hash); + } + + public synchronized void unlock(String cloneHashDir) { + if (!pathLocks.containsKey(cloneHashDir)) { + log.warn( + "Attempting to unlock filesystem with hash {} that doesn't have a lock", cloneHashDir); + return; + } + Lock lock = pathLocks.get(cloneHashDir); + log.debug("Unlocking filesystem for hash {}, lock instance: {}", cloneHashDir, lock); + lock.unlock(); + } + + public boolean canRetainClone() { + return config.getCloneRetentionMinutes() != 0 && hasFreeDisk(); + } + + private boolean hasFreeDisk() { + long currentSize = 0; + if (CLONES_HOME.toFile().exists()) { + currentSize = FileUtils.sizeOfDirectory(CLONES_HOME.toFile()); + } + return currentSize >= 0 && currentSize < config.getCloneRetentionMaxBytes(); + } + + private String hashCoordinates(String repoUrl, String branch) { + String coordinates = + String.format( + "%s-%s", + Optional.ofNullable(repoUrl).orElse("unknownUrl"), + Optional.ofNullable(branch).orElse("defaultBranch")); + return Hashing.sha256().hashString(coordinates, Charset.defaultCharset()).toString(); + } + + @Scheduled( + fixedDelayString = + "${artifacts.git-repo.clone-retention-check-ms:" + + GitRepoArtifactProviderProperties.DEFAULT_CLONE_RETENTION_CHECK_MS + + "}") + private void deleteExpiredRepos() { + try { + if (!CLONES_HOME.toFile().exists() || config.getCloneRetentionMinutes() < 0) { + return; + } + File[] repos = CLONES_HOME.toFile().listFiles(); + if (repos == null) { + return; + } + for (File r : repos) { + long ageMin = ((System.currentTimeMillis() - r.lastModified()) / 1000) / 60; + if (ageMin < config.getCloneRetentionMinutes()) { + continue; + } + if (!tryLock(r.getName())) { + // move on if the directory is locked by another thread, just wait for the next cycle + continue; + } + try { + log.info("Deleting expired git clone {}", r.getName()); + FileUtils.forceDelete(r); + } finally { + unlock(r.getName()); + } + } + } catch (IOException e) { + log.error("Error deleting expired git clones, ignoring", e); + } + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactAccount.java index f93e5bd9272..9cc464c7866 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactAccount.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactAccount.java @@ -17,25 +17,56 @@ package com.netflix.spinnaker.clouddriver.artifacts.github; - +import com.google.common.base.Strings; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; -import lombok.Data; -import lombok.EqualsAndHashCode; +import com.netflix.spinnaker.clouddriver.artifacts.config.BasicAuth; +import com.netflix.spinnaker.clouddriver.artifacts.config.TokenAuth; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.apache.commons.lang3.StringUtils; +import org.springframework.boot.context.properties.ConstructorBinding; -@EqualsAndHashCode(callSuper = true) -@Data -public class GitHubArtifactAccount extends ArtifactAccount { +@NonnullByDefault +@Value +public class GitHubArtifactAccount implements ArtifactAccount, BasicAuth, TokenAuth { private String name; /* - One of the following are required for auth: - - username and password - - usernamePasswordFile : path to file containing "username:password" - - token - - tokenFile : path to file containing token - */ - private String username; - private String password; - private String usernamePasswordFile; - private String token; - private String tokenFile; + One of the following are required for auth: + - username and password + - usernamePasswordFile : path to file containing "username:password" + - token + - tokenFile : path to file containing token + */ + private final Optional username; + private final Optional password; + private final Optional usernamePasswordFile; + private final Optional token; + private final Optional tokenFile; + private final String githubAPIVersion; + private final boolean useContentAPI; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + GitHubArtifactAccount( + String name, + String username, + String password, + String usernamePasswordFile, + String token, + String tokenFile, + String githubAPIVersion, + boolean useContentAPI) { + this.name = Strings.nullToEmpty(name); + this.username = Optional.ofNullable(Strings.emptyToNull(username)); + this.password = Optional.ofNullable(Strings.emptyToNull(password)); + this.usernamePasswordFile = Optional.ofNullable(Strings.emptyToNull(usernamePasswordFile)); + this.token = Optional.ofNullable(Strings.emptyToNull(token)); + this.tokenFile = Optional.ofNullable(Strings.emptyToNull(tokenFile)); + this.githubAPIVersion = StringUtils.defaultString(githubAPIVersion, "v3"); + this.useContentAPI = useContentAPI; + } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactConfiguration.java index 0bcb7da083d..fb9c155f13f 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactConfiguration.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactConfiguration.java @@ -18,64 +18,40 @@ package com.netflix.spinnaker.clouddriver.artifacts.github; import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; -import com.squareup.okhttp.OkHttpClient; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import okhttp3.OkHttpClient; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Lazy; -import org.springframework.context.annotation.Scope; -import org.springframework.scheduling.annotation.EnableScheduling; - -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; @Configuration @ConditionalOnProperty("artifacts.github.enabled") -@EnableScheduling +@EnableConfigurationProperties(GitHubArtifactProviderProperties.class) +@RequiredArgsConstructor @Slf4j -public class GitHubArtifactConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("artifacts.github") - GitHubArtifactProviderProperties githubArtifactProviderProperties() { - return new GitHubArtifactProviderProperties(); - } - - @Autowired - GitHubArtifactProviderProperties gitHubArtifactProviderProperties; - - @Autowired - ArtifactCredentialsRepository artifactCredentialsRepository; - - @Autowired - ObjectMapper objectMapper; - - @Bean - OkHttpClient gitHubOkHttpClient() { - return new OkHttpClient(); - } +class GitHubArtifactConfiguration { + private final GitHubArtifactProviderProperties gitHubArtifactProviderProperties; @Bean - List gitHubArtifactCredentials(OkHttpClient gitHubOkHttpClient) { - return gitHubArtifactProviderProperties.getAccounts() - .stream() - .map(a -> { - try { - GitHubArtifactCredentials c = new GitHubArtifactCredentials(a, gitHubOkHttpClient, objectMapper); - artifactCredentialsRepository.save(c); - return c; - } catch (Exception e) { - log.warn("Failure instantiating GitHub artifact account {}: ", a, e); - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toList()); + public CredentialsTypeProperties + githubCredentialsProperties(OkHttpClient okHttpClient, ObjectMapper objectMapper) { + return CredentialsTypeProperties.builder() + .type(GitHubArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(GitHubArtifactCredentials.class) + .credentialsDefinitionClass(GitHubArtifactAccount.class) + .defaultCredentialsSource(gitHubArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new GitHubArtifactCredentials(a, okHttpClient, objectMapper); + } catch (Exception e) { + log.warn("Failure instantiating GitHub artifact account {}: ", a, e); + return null; + } + }) + .build(); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactCredentials.java index f50d7ab2637..3acc8dc0588 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactCredentials.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactCredentials.java @@ -20,135 +20,99 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.artifacts.config.SimpleHttpArtifactCredentials; +import com.netflix.spinnaker.clouddriver.artifacts.exceptions.FailedDownloadException; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.squareup.okhttp.HttpUrl; -import com.squareup.okhttp.OkHttpClient; -import com.squareup.okhttp.Request; -import com.squareup.okhttp.Request.Builder; -import com.squareup.okhttp.Response; -import java.util.Arrays; -import java.util.List; +import java.io.IOException; +import javax.annotation.Nullable; import lombok.Data; +import lombok.Getter; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; +import okhttp3.Headers; +import okhttp3.HttpUrl; +import okhttp3.OkHttpClient; +import okhttp3.ResponseBody; +@NonnullByDefault @Slf4j -@Data -public class GitHubArtifactCredentials implements ArtifactCredentials { - private final String name; - private final List types = Arrays.asList("github/file"); - - @JsonIgnore - private final Builder requestBuilder; - - @JsonIgnore - OkHttpClient okHttpClient; - - @JsonIgnore - ObjectMapper objectMapper; - - public GitHubArtifactCredentials(GitHubArtifactAccount account, OkHttpClient okHttpClient, ObjectMapper objectMapper) { +public class GitHubArtifactCredentials extends SimpleHttpArtifactCredentials + implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-github"; + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("github/file"); + + @JsonIgnore private final ObjectMapper objectMapper; + private final boolean useContentAPI; + + GitHubArtifactCredentials( + GitHubArtifactAccount account, OkHttpClient okHttpClient, ObjectMapper objectMapper) { + super(okHttpClient, account); this.name = account.getName(); - this.okHttpClient = okHttpClient; this.objectMapper = objectMapper; - Builder builder = new Request.Builder(); - boolean useLogin = !StringUtils.isEmpty(account.getUsername()) && !StringUtils.isEmpty(account.getPassword()); - boolean useUsernamePasswordFile = !StringUtils.isEmpty(account.getUsernamePasswordFile()); - boolean useToken = !StringUtils.isEmpty(account.getToken()); - boolean useTokenFile = !StringUtils.isEmpty(account.getTokenFile()); - boolean useAuth = useLogin || useToken || useUsernamePasswordFile || useTokenFile; - if (useAuth) { - String authHeader = ""; - if (useTokenFile) { - authHeader = "token " + credentialsFromFile(account.getTokenFile()); - } else if (useUsernamePasswordFile) { - authHeader = "Basic " + Base64.encodeBase64String((credentialsFromFile(account.getUsernamePasswordFile())).getBytes()); - } else if (useToken) { - authHeader = "token " + account.getToken(); - } else if (useLogin) { - authHeader = "Basic " + Base64.encodeBase64String((account.getUsername() + ":" + account.getPassword()).getBytes()); - } - builder.header("Authorization", authHeader); - log.info("Loaded credentials for GitHub Artifact Account {}", account.getName()); - } else { - log.info("No credentials included with GitHub Artifact Account {}", account.getName()); - } - requestBuilder = builder; + this.useContentAPI = account.isUseContentAPI(); } - private String credentialsFromFile(String filename) { - try { - String credentials = FileUtils.readFileToString(new File(filename)); - return credentials.replace("\n", ""); - } catch (IOException e) { - log.error("Could not read GitHub credentials file {}", filename, e); - return null; + @Override + protected Headers getHeaders(GitHubArtifactAccount account) { + Headers headers = super.getHeaders(account); + if (account.isUseContentAPI()) { + return headers + .newBuilder() + .add( + "Accept", + String.format("application/vnd.github.%s.raw", account.getGithubAPIVersion())) + .build(); } + return headers; } - public InputStream download(Artifact artifact) throws IOException { - HttpUrl.Builder metadataUrlBuilder; - try { - metadataUrlBuilder = HttpUrl.parse(artifact.getReference()).newBuilder(); - } catch (Exception e) { - throw new IllegalArgumentException("Malformed github content URL in 'reference'. Read more here https://www.spinnaker.io/reference/artifacts/types/github-file/: " + e.getMessage(), e); - } - String version = artifact.getVersion(); - if (StringUtils.isEmpty(version)) { + private HttpUrl getMetadataUrl(Artifact artifact) { + String version = Strings.nullToEmpty(artifact.getVersion()); + if (version.isEmpty()) { log.info("No version specified for artifact {}, using 'master'.", version); version = "master"; } - metadataUrlBuilder.addQueryParameter("ref", version); - Request metadataRequest = requestBuilder - .url(metadataUrlBuilder.build().toString()) - .build(); + return parseUrl(artifact.getReference()).newBuilder().addQueryParameter("ref", version).build(); + } - Response metadataResponse; + @Override + protected HttpUrl getDownloadUrl(Artifact artifact) throws IOException { + if (this.useContentAPI) { + return getMetadataUrl(artifact); + } + ResponseBody metadataResponse; try { - metadataResponse = okHttpClient.newCall(metadataRequest).execute(); + metadataResponse = fetchUrl(getMetadataUrl(artifact)); } catch (IOException e) { - throw new FailedDownloadException("Unable to determine the download URL of artifact " + artifact + ": " + e.getMessage(), e); + throw new FailedDownloadException( + "Unable to determine the download URL of artifact " + artifact + ": " + e.getMessage(), + e); } - String body = metadataResponse.body().string(); - ContentMetadata metadata = objectMapper.readValue(body, ContentMetadata.class); - if (StringUtils.isEmpty(metadata.downloadUrl)) { - throw new FailedDownloadException("Failed to retrieve your github artifact's download URL. This is likely due to incorrect auth setup. Artifact: " + artifact); + ContentMetadata metadata = + objectMapper.readValue(metadataResponse.string(), ContentMetadata.class); + if (Strings.isNullOrEmpty(metadata.downloadUrl)) { + throw new FailedDownloadException( + "Failed to retrieve your github artifact's download URL. This is likely due to incorrect auth setup. Artifact: " + + artifact); } + return parseUrl(metadata.getDownloadUrl()); + } - Request downloadRequest = requestBuilder - .url(metadata.getDownloadUrl()) - .build(); - - try { - Response downloadResponse = okHttpClient.newCall(downloadRequest).execute(); - return downloadResponse.body().byteStream(); - } catch (IOException e) { - throw new FailedDownloadException("Unable to download the contents of artifact " + artifact + ": " + e.getMessage(), e); - } + @Override + public String getType() { + return CREDENTIALS_TYPE; } @Data - public static class ContentMetadata { + static class ContentMetadata { @JsonProperty("download_url") + @Nullable private String downloadUrl; } - - public class FailedDownloadException extends IOException { - public FailedDownloadException(String message) { - super(message); - } - - public FailedDownloadException(String message, Throwable cause) { - super(message, cause); - } - } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactProviderProperties.java index 87a05d1cd73..bc6ef4a3ac6 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactProviderProperties.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/github/GitHubArtifactProviderProperties.java @@ -18,15 +18,14 @@ package com.netflix.spinnaker.clouddriver.artifacts.github; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; -import lombok.Data; -import lombok.EqualsAndHashCode; - import java.util.ArrayList; import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; @Data -@EqualsAndHashCode(callSuper = false) -public class GitHubArtifactProviderProperties extends ArtifactProvider { +@ConfigurationProperties("artifacts.github") +final class GitHubArtifactProviderProperties implements ArtifactProvider { private boolean enabled; private List accounts = new ArrayList<>(); } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactAccount.java index a0949c4ba0a..ef457cc0ca3 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactAccount.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactAccount.java @@ -16,14 +16,29 @@ package com.netflix.spinnaker.clouddriver.artifacts.gitlab; +import com.google.common.base.Strings; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; -import lombok.Data; -import lombok.EqualsAndHashCode; +import com.netflix.spinnaker.clouddriver.artifacts.config.TokenAuth; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; -@EqualsAndHashCode(callSuper = true) -@Data -public class GitlabArtifactAccount extends ArtifactAccount { - private String name; - private String token; - private String tokenFile; +@NonnullByDefault +@Value +public class GitlabArtifactAccount implements ArtifactAccount, TokenAuth { + private final String name; + private final Optional token; + private final Optional tokenFile; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + GitlabArtifactAccount(String name, String token, String tokenFile) { + this.name = Strings.nullToEmpty(name); + this.token = Optional.ofNullable(Strings.emptyToNull(token)); + this.tokenFile = Optional.ofNullable(Strings.emptyToNull(tokenFile)); + } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactConfiguration.java index f92deb61e93..a794e44f208 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactConfiguration.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactConfiguration.java @@ -16,65 +16,40 @@ package com.netflix.spinnaker.clouddriver.artifacts.gitlab; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; -import com.squareup.okhttp.OkHttpClient; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import okhttp3.OkHttpClient; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Lazy; -import org.springframework.context.annotation.Scope; -import org.springframework.scheduling.annotation.EnableScheduling; - -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; @Configuration @ConditionalOnProperty("artifacts.gitlab.enabled") -@EnableScheduling +@EnableConfigurationProperties(GitlabArtifactProviderProperties.class) +@RequiredArgsConstructor @Slf4j -public class GitlabArtifactConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("artifacts.gitlab") - GitlabArtifactProviderProperties gitlabArtifactProviderProperties() { - return new GitlabArtifactProviderProperties(); - } - - @Autowired - GitlabArtifactProviderProperties gitlabArtifactProviderProperties; - - @Autowired - ArtifactCredentialsRepository artifactCredentialsRepository; - - @Autowired - ObjectMapper objectMapper; - - @Bean - OkHttpClient gitlabOkHttpClient() { - return new OkHttpClient(); - } +class GitlabArtifactConfiguration { + private final GitlabArtifactProviderProperties gitlabArtifactProviderProperties; @Bean - List gitlabArtifactCredentials(OkHttpClient gitlabOkHttpClient) { - return gitlabArtifactProviderProperties.getAccounts() - .stream() - .map(a -> { - try { - GitlabArtifactCredentials c = new GitlabArtifactCredentials(a, gitlabOkHttpClient, objectMapper); - artifactCredentialsRepository.save(c); - return c; - } catch (Exception e) { - log.warn("Failure instantiating Gitlab artifact account {}: ", a, e); - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toList()); + public CredentialsTypeProperties + gitlabCredentialsProperties(OkHttpClient okHttpClient) { + return CredentialsTypeProperties.builder() + .type(GitlabArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(GitlabArtifactCredentials.class) + .credentialsDefinitionClass(GitlabArtifactAccount.class) + .defaultCredentialsSource(gitlabArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new GitlabArtifactCredentials(a, okHttpClient); + } catch (Exception e) { + log.warn("Failure instantiating Gitlab artifact account {}: ", a, e); + return null; + } + }) + .build(); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactCredentials.java index 15c51f92924..d74d6d6899a 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactCredentials.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactCredentials.java @@ -16,107 +16,57 @@ package com.netflix.spinnaker.clouddriver.artifacts.gitlab; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.artifacts.config.SimpleHttpArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.squareup.okhttp.HttpUrl; -import com.squareup.okhttp.OkHttpClient; -import com.squareup.okhttp.Request; -import com.squareup.okhttp.Request.Builder; -import com.squareup.okhttp.Response; -import java.util.Arrays; -import java.util.List; -import lombok.Data; +import java.util.Optional; +import lombok.Getter; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; +import okhttp3.Headers; +import okhttp3.HttpUrl; +import okhttp3.OkHttpClient; +@NonnullByDefault @Slf4j -@Data -public class GitlabArtifactCredentials implements ArtifactCredentials { - private final String name; - private final List types = Arrays.asList("gitlab/file"); - - @JsonIgnore - private final Builder requestBuilder; - - @JsonIgnore - OkHttpClient okHttpClient; - - @JsonIgnore - ObjectMapper objectMapper; - - public GitlabArtifactCredentials(GitlabArtifactAccount account, OkHttpClient okHttpClient, ObjectMapper objectMapper) { +public class GitlabArtifactCredentials extends SimpleHttpArtifactCredentials + implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-gitlab"; + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("gitlab/file"); + + GitlabArtifactCredentials(GitlabArtifactAccount account, OkHttpClient okHttpClient) { + super(okHttpClient, account); this.name = account.getName(); - this.okHttpClient = okHttpClient; - this.objectMapper = objectMapper; - Builder builder = new Request.Builder(); - boolean useToken = !StringUtils.isEmpty(account.getToken()); - boolean useTokenFile = !StringUtils.isEmpty(account.getTokenFile()); - boolean useAuth = useToken || useTokenFile; - if (useAuth) { - String authHeader = ""; - if (useTokenFile) { - authHeader = credentialsFromFile(account.getTokenFile()); - } else if (useToken) { - authHeader = account.getToken(); - } - - builder.header("Private-Token", authHeader); - log.info("Loaded credentials for Gitlab Artifact Account {}", account.getName()); - } else { - log.info("No credentials included with Gitlab Artifact Account {}", account.getName()); - } - requestBuilder = builder; } - private String credentialsFromFile(String filename) { - try { - String credentials = FileUtils.readFileToString(new File(filename)); - return credentials.replace("\n", ""); - } catch (IOException e) { - log.error("Could not read Gitlab credentials file {}", filename, e); - return null; + @Override + protected Headers getHeaders(GitlabArtifactAccount account) { + Headers.Builder headers = new Headers.Builder(); + Optional token = account.getTokenAsString(); + if (token.isPresent()) { + headers.set("Private-Token", token.get()); + log.info("Loaded credentials for GitLab Artifact Account {}", account.getName()); + } else { + log.info("No credentials included with GitLab Artifact Account {}", account.getName()); } + return headers.build(); } - public InputStream download(Artifact artifact) throws IOException { - HttpUrl.Builder fileUrl; - try { - // reference should use the Gitlab raw file download url: https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository - fileUrl = HttpUrl.parse(artifact.getReference()).newBuilder(); - } catch (Exception e) { - throw new IllegalArgumentException("Malformed gitlab content URL in 'reference'. Read more here https://www.spinnaker.io/reference/artifacts/types/gitlab-file/: " + e.getMessage(), e); - } - - String version = artifact.getVersion(); - if (StringUtils.isEmpty(version)) { + @Override + protected HttpUrl getDownloadUrl(Artifact artifact) { + String version = Strings.nullToEmpty(artifact.getVersion()); + if (version.isEmpty()) { log.info("No version specified for artifact {}, using 'master'.", version); version = "master"; } - - fileUrl.addQueryParameter("ref", version); - Request fileRequest = requestBuilder - .url(fileUrl.build().toString()) - .build(); - - try { - Response downloadResponse = okHttpClient.newCall(fileRequest).execute(); - return downloadResponse.body().byteStream(); - } catch (IOException e) { - throw new com.netflix.spinnaker.clouddriver.artifacts.gitlab.GitlabArtifactCredentials.FailedDownloadException("Unable to download the contents of artifact " + artifact + ": " + e.getMessage(), e); - } + return parseUrl(artifact.getReference()).newBuilder().addQueryParameter("ref", version).build(); } - public class FailedDownloadException extends IOException { - - public FailedDownloadException(String message, Throwable cause) { - super(message, cause); - } + @Override + public String getType() { + return CREDENTIALS_TYPE; } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactProviderProperties.java index 83eed8744c1..89aea0db71f 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactProviderProperties.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactProviderProperties.java @@ -17,15 +17,14 @@ package com.netflix.spinnaker.clouddriver.artifacts.gitlab; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; -import lombok.Data; -import lombok.EqualsAndHashCode; - import java.util.ArrayList; import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; @Data -@EqualsAndHashCode(callSuper = false) -public class GitlabArtifactProviderProperties extends ArtifactProvider { +@ConfigurationProperties("artifacts.gitlab") +final class GitlabArtifactProviderProperties implements ArtifactProvider { private boolean enabled; private List accounts = new ArrayList<>(); } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactAccount.java new file mode 100644 index 00000000000..9fc3cc8ae33 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactAccount.java @@ -0,0 +1,59 @@ +/* + * Copyright 2018 Mirantis, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.helm; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.clouddriver.artifacts.config.BasicAuth; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; + +@NonnullByDefault +@Value +public class HelmArtifactAccount implements ArtifactAccount, BasicAuth { + private final String name; + /* + One of the following are required for auth: + - username and password + - usernamePasswordFile : path to file containing "username:password" + */ + private final Optional username; + private final Optional password; + private final Optional usernamePasswordFile; + private final String repository; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + public HelmArtifactAccount( + String name, + String username, + String password, + String usernamePasswordFile, + String repository) { + this.name = Strings.nullToEmpty(name); + this.username = Optional.ofNullable(Strings.emptyToNull(username)); + this.password = Optional.ofNullable(Strings.emptyToNull(password)); + this.usernamePasswordFile = Optional.ofNullable(Strings.emptyToNull(usernamePasswordFile)); + this.repository = Strings.nullToEmpty(repository); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactConfiguration.java new file mode 100644 index 00000000000..de13ac50756 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactConfiguration.java @@ -0,0 +1,56 @@ +/* + * Copyright 2018 Mirantis, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.helm; + +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import okhttp3.OkHttpClient; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("artifacts.helm.enabled") +@EnableConfigurationProperties(HelmArtifactProviderProperties.class) +@RequiredArgsConstructor +@Slf4j +class HelmArtifactConfiguration { + private final HelmArtifactProviderProperties helmArtifactProviderProperties; + + @Bean + public CredentialsTypeProperties + helmCredentialsProperties(OkHttpClient okHttpClient) { + return CredentialsTypeProperties.builder() + .type(HelmArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(HelmArtifactCredentials.class) + .credentialsDefinitionClass(HelmArtifactAccount.class) + .defaultCredentialsSource(helmArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new HelmArtifactCredentials(a, okHttpClient); + } catch (Exception e) { + log.warn("Failure instantiating Helm artifact account {}: ", a, e); + return null; + } + }) + .build(); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactCredentials.java new file mode 100644 index 00000000000..28e32a95112 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactCredentials.java @@ -0,0 +1,118 @@ +/* + * Copyright 2018 Mirantis, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.helm; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.artifacts.config.BaseHttpArtifactCredentials; +import com.netflix.spinnaker.clouddriver.artifacts.exceptions.FailedDownloadException; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import okhttp3.OkHttpClient; +import okhttp3.ResponseBody; + +@NonnullByDefault +@Slf4j +public class HelmArtifactCredentials extends BaseHttpArtifactCredentials + implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-helm"; + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("helm/chart", "helm/index"); + + @JsonIgnore private final IndexParser indexParser; + + @Override + public boolean handlesType(String type) { + return types.contains(type); + } + + public HelmArtifactCredentials(HelmArtifactAccount account, OkHttpClient okHttpClient) { + super(okHttpClient, account); + this.name = account.getName(); + this.indexParser = new IndexParser(account.getRepository()); + } + + @Override + public InputStream download(Artifact artifact) throws IOException { + InputStream index = downloadIndex(); + + if ("helm/index".equals(artifact.getType())) { + return index; + } + + List urls = indexParser.findUrls(index, artifact.getName(), artifact.getVersion()); + ResponseBody downloadResponse; + for (String url : urls) { + try { + downloadResponse = fetchUrl(url); + return downloadResponse.byteStream(); + } catch (IllegalArgumentException e) { + log.warn("Invalid url: ", url); + } + } + throw new FailedDownloadException("Unable to download the contents of artifact"); + } + + public List getArtifactNames() { + InputStream index; + List names; + try { + index = downloadIndex(); + names = indexParser.findNames(index); + } catch (IOException e) { + throw new NotFoundException("Failed to download chart names for '" + name + "' account", e); + } + return names; + } + + public List getArtifactVersions(String artifactName) { + InputStream index; + List versions; + try { + index = downloadIndex(); + versions = indexParser.findVersions(index, artifactName); + } catch (IOException e) { + throw new NotFoundException( + "Failed to download chart versions for '" + name + "' account", e); + } + return versions; + } + + private InputStream downloadIndex() throws IOException { + try { + ResponseBody indexDownloadResponse = fetchUrl(indexParser.indexPath()); + return indexDownloadResponse.byteStream(); + } catch (IOException e) { + throw new FailedDownloadException( + "Failed to download index.yaml file in '" + indexParser.getRepository() + "' repository", + e); + } + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactProviderProperties.java new file mode 100644 index 00000000000..9a089760bb4 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactProviderProperties.java @@ -0,0 +1,31 @@ +/* + * Copyright 2018 Mirantis, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.helm; + +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@Data +@ConfigurationProperties("artifacts.helm") +final class HelmArtifactProviderProperties implements ArtifactProvider { + private boolean enabled; + private List accounts = new ArrayList<>(); +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/IndexParser.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/IndexParser.java new file mode 100644 index 00000000000..ddf1f960517 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/helm/IndexParser.java @@ -0,0 +1,145 @@ +/* + * Copyright 2018 Mirantis, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.helm; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import java.io.IOException; +import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.*; +import java.util.stream.Collectors; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.apache.maven.artifact.versioning.ComparableVersion; + +@Slf4j +@Data +public class IndexParser { + private String repository; + + public IndexParser(String repository) { + this.repository = repository; + } + + public String indexPath() { + return repository + "/index.yaml"; + } + + public List findNames(InputStream in) throws IOException { + IndexConfig indexConfig = buildIndexConfig(in); + return new ArrayList<>(indexConfig.getEntries().keySet()); + } + + public List findVersions(InputStream in, String name) throws IOException { + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("Artifact name field should not be empty"); + } + List configs = buildEntryConfigsByName(buildIndexConfig(in), name); + List versions = new ArrayList<>(); + configs.forEach(e -> versions.add(e.getVersion())); + return versions; + } + + public List findUrls(InputStream in, String name, String version) throws IOException { + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("Artifact name field should not be empty"); + } + List configs = buildEntryConfigsByName(buildIndexConfig(in), name); + String validVersion = StringUtils.isBlank(version) ? findLatestVersion(configs) : version; + return resolveReferenceUrls(findUrlsByVersion(configs, validVersion)); + } + + private List resolveReferenceUrls(List urls) { + return urls.stream().map(this::resolveReferenceUrl).collect(Collectors.toList()); + } + + private String resolveReferenceUrl(String ref) { + String resolvedRef = ref; + String base = repository; + if (!base.endsWith("/")) { + base = base.concat("/"); + } + try { + URL baseUrl = new URL(base); + URL resolvedUrl = new URL(baseUrl, ref); + resolvedRef = resolvedUrl.toExternalForm(); + } catch (MalformedURLException e) { + log.error("Failed to resolve reference url:" + ref, e); + } + return resolvedRef; + } + + private List findUrlsByVersion(List configs, String version) { + List urls = new ArrayList<>(); + configs.forEach( + e -> { + if (e.getVersion().equals(version)) { + urls.addAll(e.getUrls()); + } + }); + if (urls.isEmpty()) { + throw new IllegalArgumentException( + "Could not find correct entry with artifact version " + version); + } + return urls; + } + + private String findLatestVersion(List configs) { + return configs.stream() + .map(c -> new ComparableVersion(c.getVersion())) + .max(ComparableVersion::compareTo) + .orElseGet(() -> new ComparableVersion("")) + .toString(); + } + + private IndexConfig buildIndexConfig(InputStream in) throws IOException { + ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); + IndexConfig indexConfig; + try { + indexConfig = mapper.readValue(in, IndexConfig.class); + } catch (IOException e) { + throw new IOException("Invalid index.yaml file in repository " + repository); + } + return indexConfig; + } + + private List buildEntryConfigsByName(IndexConfig indexConfig, String name) { + List configs = indexConfig.getEntries().get(name); + if (configs == null || configs.isEmpty()) { + throw new IllegalArgumentException("Could not find correct entry with artifact name " + name); + } + return configs; + } +} + +@Data +@JsonIgnoreProperties(ignoreUnknown = true) +class IndexConfig { + private Map> entries; +} + +@Data +@JsonIgnoreProperties(ignoreUnknown = true) +class EntryConfig { + private String name; + private String version; + private List urls; +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactAccount.java index 860cca79f3d..e87ff41ae51 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactAccount.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactAccount.java @@ -17,28 +17,43 @@ package com.netflix.spinnaker.clouddriver.artifacts.http; - import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.base.Strings; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; -import lombok.Data; -import lombok.EqualsAndHashCode; -import org.apache.commons.lang3.StringUtils; +import com.netflix.spinnaker.clouddriver.artifacts.config.BasicAuth; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; -@EqualsAndHashCode(callSuper = true) -@Data -public class HttpArtifactAccount extends ArtifactAccount { - private String name; +@NonnullByDefault +@Value +public class HttpArtifactAccount implements ArtifactAccount, BasicAuth { + private final String name; /* - One of the following are required for auth: - - username and password - - usernamePasswordFile : path to file containing "username:password" - */ - private String username; - private String password; - private String usernamePasswordFile; + One of the following are required for auth: + - username and password + - usernamePasswordFile : path to file containing "username:password" + */ + private final Optional username; + private final Optional password; + private final Optional usernamePasswordFile; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + public HttpArtifactAccount( + String name, String username, String password, String usernamePasswordFile) { + this.name = Strings.nullToEmpty(name); + this.username = Optional.ofNullable(Strings.emptyToNull(username)); + this.password = Optional.ofNullable(Strings.emptyToNull(password)); + this.usernamePasswordFile = Optional.ofNullable(Strings.emptyToNull(usernamePasswordFile)); + } @JsonIgnore - public boolean usesAuth() { - return !(StringUtils.isEmpty(username) && StringUtils.isEmpty(password) && StringUtils.isEmpty(usernamePasswordFile)); + boolean usesAuth() { + return username.isPresent() || password.isPresent() || usernamePasswordFile.isPresent(); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactConfiguration.java index d5b6e89f038..12eb2ad05a1 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactConfiguration.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactConfiguration.java @@ -17,70 +17,51 @@ package com.netflix.spinnaker.clouddriver.artifacts.http; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; -import com.squareup.okhttp.OkHttpClient; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import java.util.List; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import okhttp3.OkHttpClient; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Scope; -import org.springframework.scheduling.annotation.EnableScheduling; - -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; @Configuration @ConditionalOnProperty("artifacts.http.enabled") -@EnableScheduling +@EnableConfigurationProperties(HttpArtifactProviderProperties.class) +@RequiredArgsConstructor @Slf4j -public class HttpArtifactConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("artifacts.http") - HttpArtifactProviderProperties HttpArtifactProviderProperties() { - return new HttpArtifactProviderProperties(); - } - - @Autowired - HttpArtifactProviderProperties httpArtifactProviderProperties; - - @Autowired - ArtifactCredentialsRepository artifactCredentialsRepository; +class HttpArtifactConfiguration { + private final HttpArtifactProviderProperties httpArtifactProviderProperties; + private final HttpArtifactAccount noAuthAccount = + HttpArtifactAccount.builder().name("no-auth-http-account").build(); @Bean - OkHttpClient httpOkHttpClient() { - return new OkHttpClient(); + public CredentialsTypeProperties + httpCredentialsProperties(OkHttpClient okHttpClient) { + return CredentialsTypeProperties.builder() + .type(HttpArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(HttpArtifactCredentials.class) + .credentialsDefinitionClass(HttpArtifactAccount.class) + .defaultCredentialsSource(this::getHttpAccounts) + .credentialsParser( + a -> { + try { + return new HttpArtifactCredentials(a, okHttpClient); + } catch (Exception e) { + log.warn("Failure instantiating Http artifact account {}: ", a, e); + return null; + } + }) + .build(); } - @Bean - List httpArtifactCredentials(OkHttpClient httpOkHttpClient) { - List result = httpArtifactProviderProperties.getAccounts() - .stream() - .map(a -> { - try { - HttpArtifactCredentials c = new HttpArtifactCredentials(a, httpOkHttpClient); - artifactCredentialsRepository.save(c); - return c; - } catch (Exception e) { - log.warn("Failure instantiating Http artifact account {}: ", a, e); - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - - if (httpArtifactProviderProperties.getAccounts().stream().noneMatch(HttpArtifactAccount::usesAuth)) { - HttpArtifactAccount noAuthAccount = new HttpArtifactAccount() - .setName("no-auth-http-account"); - HttpArtifactCredentials noAuthCredentials = new HttpArtifactCredentials(noAuthAccount, httpOkHttpClient); - - result.add(noAuthCredentials); + private List getHttpAccounts() { + List accounts = httpArtifactProviderProperties.getAccounts(); + if (accounts.stream().noneMatch(HttpArtifactAccount::usesAuth)) { + accounts.add(noAuthAccount); } - - return result; + return accounts; } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactCredentials.java index d46af03c989..718c823b0a5 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactCredentials.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactCredentials.java @@ -17,83 +17,29 @@ package com.netflix.spinnaker.clouddriver.artifacts.http; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.squareup.okhttp.HttpUrl; -import com.squareup.okhttp.OkHttpClient; -import com.squareup.okhttp.Request; -import com.squareup.okhttp.Request.Builder; -import com.squareup.okhttp.Response; -import java.util.Arrays; -import java.util.List; -import lombok.Data; +import com.netflix.spinnaker.clouddriver.artifacts.config.SimpleHttpArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import lombok.Getter; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; +import okhttp3.OkHttpClient; +@NonnullByDefault @Slf4j -@Data -public class HttpArtifactCredentials implements ArtifactCredentials { - private final String name; - private final List types = Arrays.asList("http/file"); - - @JsonIgnore - private final Builder requestBuilder; - - @JsonIgnore - OkHttpClient okHttpClient; - - public HttpArtifactCredentials(HttpArtifactAccount account, OkHttpClient okHttpClient) { +public class HttpArtifactCredentials extends SimpleHttpArtifactCredentials + implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-http"; + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("http/file"); + + HttpArtifactCredentials(HttpArtifactAccount account, OkHttpClient okHttpClient) { + super(okHttpClient, account); this.name = account.getName(); - this.okHttpClient = okHttpClient; - Builder builder = new Request.Builder(); - boolean useLogin = !StringUtils.isEmpty(account.getUsername()) && !StringUtils.isEmpty(account.getPassword()); - boolean useUsernamePasswordFile = !StringUtils.isEmpty(account.getUsernamePasswordFile()); - boolean useAuth = useLogin || useUsernamePasswordFile; - if (useAuth) { - String authHeader = ""; - if (useUsernamePasswordFile) { - authHeader = "Basic " + Base64.encodeBase64String((credentialsFromFile(account.getUsernamePasswordFile())).getBytes()); - } else if (useLogin) { - authHeader = "Basic " + Base64.encodeBase64String((account.getUsername() + ":" + account.getPassword()).getBytes()); - } - builder.header("Authorization", authHeader); - log.info("Loaded credentials for http artifact account {}", account.getName()); - } else { - log.info("No credentials included with http artifact account {}", account.getName()); - } - requestBuilder = builder; - } - - private String credentialsFromFile(String filename) { - try { - String credentials = FileUtils.readFileToString(new File(filename)); - return credentials.replace("\n", ""); - } catch (IOException e) { - log.error("Could not read http credentials file {}", filename, e); - return null; - } - } - - public InputStream download(Artifact artifact) throws IOException { - Request downloadRequest = requestBuilder - .url(artifact.getReference()) - .build(); - - Response downloadResponse = okHttpClient.newCall(downloadRequest).execute(); - return downloadResponse.body().byteStream(); } @Override - public boolean handlesType(String type) { - return type.equals("http/file"); + public String getType() { + return CREDENTIALS_TYPE; } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactProviderProperties.java index 0b215273ab4..1ae76be0dcc 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactProviderProperties.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactProviderProperties.java @@ -18,15 +18,14 @@ package com.netflix.spinnaker.clouddriver.artifacts.http; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; -import lombok.Data; -import lombok.EqualsAndHashCode; - import java.util.ArrayList; import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; @Data -@EqualsAndHashCode(callSuper = false) -public class HttpArtifactProviderProperties extends ArtifactProvider { +@ConfigurationProperties("artifacts.http") +final class HttpArtifactProviderProperties implements ArtifactProvider { private boolean enabled; private List accounts = new ArrayList<>(); } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/DiskFreeingInputStream.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/DiskFreeingInputStream.java new file mode 100644 index 00000000000..968b3348eec --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/DiskFreeingInputStream.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Comparator; + +/** An {@link java.io.InputStream} that frees local disk resources when closed. */ +final class DiskFreeingInputStream extends InputStream { + private final InputStream delegate; + private final Path deleteOnClose; + + public DiskFreeingInputStream(InputStream delegate, Path deleteOnClose) { + this.delegate = delegate; + this.deleteOnClose = deleteOnClose; + } + + @Override + public int read() throws IOException { + return delegate.read(); + } + + @Override + public void close() throws IOException { + super.close(); + if (Files.exists(deleteOnClose)) { + Files.walk(deleteOnClose) + .map(Path::toFile) + .sorted(Comparator.reverseOrder()) + .forEach(File::delete); + } + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactAccount.java new file mode 100644 index 00000000000..9fd3885f1e9 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactAccount.java @@ -0,0 +1,47 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.clouddriver.artifacts.ivy.settings.IvySettings; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; + +@NonnullByDefault +@Value +public class IvyArtifactAccount implements ArtifactAccount { + private final String name; + @Nullable private final IvySettings settings; + + @JsonIgnore + private final ImmutableList resolveConfigurations = ImmutableList.of("master"); + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + public IvyArtifactAccount(String name, IvySettings settings) { + this.name = Strings.nullToEmpty(name); + this.settings = settings; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactConfiguration.java new file mode 100644 index 00000000000..8783336386c --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactConfiguration.java @@ -0,0 +1,54 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy; + +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("artifacts.ivy.enabled") +@EnableConfigurationProperties(IvyArtifactProviderProperties.class) +@RequiredArgsConstructor +@Slf4j +class IvyArtifactConfiguration { + private final IvyArtifactProviderProperties ivyArtifactProviderProperties; + + @Bean + public CredentialsTypeProperties + ivyCredentialsProperties() { + return CredentialsTypeProperties.builder() + .type(IvyArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(IvyArtifactCredentials.class) + .credentialsDefinitionClass(IvyArtifactAccount.class) + .defaultCredentialsSource(ivyArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new IvyArtifactCredentials(a); + } catch (Exception e) { + log.warn("Failure instantiating ivy artifact account {}: ", a, e); + return null; + } + }) + .build(); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactCredentials.java new file mode 100644 index 00000000000..2a88e2e116e --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactCredentials.java @@ -0,0 +1,157 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.*; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.text.ParseException; +import java.util.Arrays; +import java.util.UUID; +import java.util.function.Supplier; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.ivy.Ivy; +import org.apache.ivy.core.module.id.ModuleId; +import org.apache.ivy.core.module.id.ModuleRevisionId; +import org.apache.ivy.core.report.ResolveReport; +import org.apache.ivy.core.resolve.ResolveOptions; +import org.apache.ivy.util.AbstractMessageLogger; +import org.apache.ivy.util.Message; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@NonnullByDefault +@Slf4j +public class IvyArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-ivy"; + @Getter private final ImmutableList types = ImmutableList.of("ivy/file"); + private final IvyArtifactAccount account; + private final Supplier cacheBuilder; + + public IvyArtifactCredentials(IvyArtifactAccount account) { + this( + account, + () -> Paths.get(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString())); + } + + public IvyArtifactCredentials(IvyArtifactAccount account, Supplier cacheBuilder) { + this.cacheBuilder = cacheBuilder; + redirectIvyLogsToSlf4j(); + this.account = account; + } + + private static void redirectIvyLogsToSlf4j() { + Message.setDefaultLogger( + new AbstractMessageLogger() { + private final Logger logger = LoggerFactory.getLogger("org.apache.ivy"); + + @Override + protected void doProgress() {} + + @Override + protected void doEndProgress(String msg) { + log(msg, Message.MSG_INFO); + } + + @Override + public void log(String msg, int level) { + switch (level) { + case Message.MSG_ERR: + logger.error(msg); + break; + case Message.MSG_WARN: + logger.warn(msg); + break; + case Message.MSG_INFO: + logger.info(msg); + break; + case Message.MSG_DEBUG: + logger.debug(msg); + break; + case Message.MSG_VERBOSE: + logger.trace(msg); + default: + // do nothing + } + } + + @Override + public void rawlog(String msg, int level) { + log(msg, level); + } + }); + } + + public InputStream download(Artifact artifact) { + Path cacheDir = cacheBuilder.get(); + Ivy ivy = account.getSettings().toIvy(cacheDir); + + String[] parts = artifact.getReference().split(":"); + if (parts.length < 3) { + throw new IllegalArgumentException( + "Ivy artifact reference must have a group, artifact, and version separated by ':'"); + } + + ModuleRevisionId mrid = new ModuleRevisionId(new ModuleId(parts[0], parts[1]), parts[2]); + + try { + ResolveReport report = + ivy.resolve( + mrid, + (ResolveOptions) + new ResolveOptions() + .setTransitive(false) + .setConfs(account.getResolveConfigurations().toArray(new String[0])) + .setLog("download-only"), + true); + return Arrays.stream(report.getAllArtifactsReports()) + .findFirst() + .map( + rep -> { + try { + return new DiskFreeingInputStream( + new FileInputStream(rep.getLocalFile()), cacheDir); + } catch (FileNotFoundException e) { + throw new UncheckedIOException(e); + } + }) + .orElseThrow( + () -> + new IllegalArgumentException( + "Unable to resolve artifact for reference '" + + artifact.getReference() + + "'")); + } catch (ParseException | IOException e) { + throw new IllegalArgumentException(e); + } + } + + @Override + public String getName() { + return account.getName(); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactProviderProperties.java new file mode 100644 index 00000000000..ccee9f7f70c --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactProviderProperties.java @@ -0,0 +1,29 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy; + +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@Data +@ConfigurationProperties("artifacts.ivy") +final class IvyArtifactProviderProperties { + private boolean enabled; + private List accounts = new ArrayList<>(); +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/BintrayResolver.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/BintrayResolver.java new file mode 100644 index 00000000000..c787b3f8c20 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/BintrayResolver.java @@ -0,0 +1,45 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +final class BintrayResolver extends Resolver { + /** Bintray username of a repository owner. */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private String subject; + + /** User's repository name. */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private String repo; + + @Override + public org.apache.ivy.plugins.resolver.BintrayResolver toIvyModel() { + org.apache.ivy.plugins.resolver.BintrayResolver bintrayResolver = + new org.apache.ivy.plugins.resolver.BintrayResolver(); + bintrayResolver.setRepo(repo); + bintrayResolver.setSubject(subject); + return super.toIvyModel(bintrayResolver); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/ChainResolver.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/ChainResolver.java new file mode 100644 index 00000000000..432df5fe8ec --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/ChainResolver.java @@ -0,0 +1,73 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import java.util.List; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +final class ChainResolver extends Resolver { + @JsonIgnore private final Resolvers resolvers = new Resolvers(); + /** If the first found should be returned. */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private Boolean returnFirst; + /** If the chain should behave like a dual chain. */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private Boolean dual; + + @JacksonXmlElementWrapper(useWrapping = false) + public void setBintray(@Nullable List bintray) { + this.resolvers.setBintray(bintray); + } + + @JacksonXmlElementWrapper(useWrapping = false) + public void setUrl(@Nullable List url) { + this.resolvers.setUrl(url); + } + + @JacksonXmlElementWrapper(useWrapping = false) + public void setIbiblio(@Nullable List ibiblio) { + this.resolvers.setIbiblio(ibiblio); + } + + @JacksonXmlElementWrapper(useWrapping = false) + public void setSsh(@Nullable List ssh) { + this.resolvers.setSsh(ssh); + } + + @Override + public org.apache.ivy.plugins.resolver.ChainResolver toIvyModel() { + org.apache.ivy.plugins.resolver.ChainResolver chainResolver = + new org.apache.ivy.plugins.resolver.ChainResolver(); + if (returnFirst != null) { + chainResolver.setReturnFirst(returnFirst); + } + if (dual != null) { + chainResolver.setDual(dual); + } + resolvers.toDependencyResolvers().forEach(chainResolver::add); + return super.toIvyModel(chainResolver); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Credentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Credentials.java new file mode 100644 index 00000000000..6751f0932e6 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Credentials.java @@ -0,0 +1,37 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +final class Credentials { + @JacksonXmlProperty(isAttribute = true) + private String host; + + @Nullable + @JacksonXmlProperty(isAttribute = true) + private String realm; + + @JacksonXmlProperty(isAttribute = true) + private String username; + + @JacksonXmlProperty(isAttribute = true) + private String password; +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IBiblioResolver.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IBiblioResolver.java new file mode 100644 index 00000000000..451716038c9 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IBiblioResolver.java @@ -0,0 +1,78 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +final class IBiblioResolver extends Resolver { + /** The root of the artifact repository. */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private String root; + + /** + * A pattern describing the layout of the artifact repository. For example: {@code + * https://repo1.maven.org/maven2/[organisation]/[module]/[revision]/[artifact]-[revision].[ext]} + */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private String pattern; + + @JacksonXmlProperty(isAttribute = true) + @Nullable + private Boolean m2compatible; + + /** If this resolver should use Maven POMs when it is already in {@link #m2compatible} mode. */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private Boolean useMavenMetadata; + + /** + * If this resolver should use maven-metadata.xml files to list available revisions, otherwise use + * directory listing. + */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private Boolean usepoms; + + @Override + public org.apache.ivy.plugins.resolver.IBiblioResolver toIvyModel() { + org.apache.ivy.plugins.resolver.IBiblioResolver biblioResolver = + new org.apache.ivy.plugins.resolver.IBiblioResolver(); + if (pattern != null) { + biblioResolver.setPattern(pattern); + } + if (root != null) { + biblioResolver.setRoot(root); + } + if (m2compatible != null) { + biblioResolver.setM2compatible(m2compatible); + } + if (useMavenMetadata != null) { + biblioResolver.setUseMavenMetadata(useMavenMetadata); + } + if (usepoms != null) { + biblioResolver.setUsepoms(usepoms); + } + return super.toIvyModel(biblioResolver); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IvySettings.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IvySettings.java new file mode 100644 index 00000000000..4ade01d2201 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IvySettings.java @@ -0,0 +1,79 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.List; +import javax.annotation.Nullable; +import lombok.Data; +import org.apache.ivy.Ivy; +import org.apache.ivy.plugins.resolver.DependencyResolver; +import org.apache.ivy.util.url.CredentialsStore; + +@JacksonXmlRootElement(localName = "ivysettings") +@Data +public final class IvySettings { + private Resolvers resolvers = new Resolvers(); + private Settings settings = new Settings(); + + @Nullable private Credentials credentials; + + public static IvySettings parse(String xml) { + try { + return new XmlMapper() + .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) + .readValue(xml, IvySettings.class); + } catch (IOException e) { + throw new UncheckedIOException("Unable to read Ivy settings", e); + } + } + + public Ivy toIvy(Path cache) { + return Ivy.newInstance(toIvySettings(cache)); + } + + org.apache.ivy.core.settings.IvySettings toIvySettings(Path cache) { + org.apache.ivy.core.settings.IvySettings ivySettings = + new org.apache.ivy.core.settings.IvySettings(); + List dependencyResolvers = resolvers.toDependencyResolvers(); + if (dependencyResolvers.isEmpty()) { + throw new IllegalArgumentException("At least one ivy resolver is required"); + } + + dependencyResolvers.forEach(ivySettings::addResolver); + String defaultResolver = settings.getDefaultResolver(); + ivySettings.setDefaultResolver( + defaultResolver == null + ? dependencyResolvers.iterator().next().getName() + : defaultResolver); + if (credentials != null) { + CredentialsStore.INSTANCE.addCredentials( + credentials.getRealm(), + credentials.getHost(), + credentials.getUsername(), + credentials.getPassword()); + } + ivySettings.setDefaultCache(cache.toFile()); + ivySettings.validate(); + return ivySettings; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Pattern.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Pattern.java new file mode 100644 index 00000000000..815b68ccf84 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Pattern.java @@ -0,0 +1,24 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import lombok.Data; + +@Data +final class Pattern { + private String pattern; +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Resolver.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Resolver.java new file mode 100644 index 00000000000..ee3fb61f65f --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Resolver.java @@ -0,0 +1,35 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import lombok.Data; +import org.apache.ivy.plugins.resolver.DependencyResolver; + +@Data +abstract class Resolver { + /** The name which identifies the resolver. */ + @JacksonXmlProperty(isAttribute = true) + private String name; + + public abstract M toIvyModel(); + + protected M toIvyModel(M partial) { + partial.setName(name); + return partial; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Resolvers.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Resolvers.java new file mode 100644 index 00000000000..cc767d6cf78 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Resolvers.java @@ -0,0 +1,67 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import java.util.ArrayList; +import java.util.List; +import javax.annotation.Nullable; +import lombok.Data; +import org.apache.ivy.plugins.resolver.DependencyResolver; + +@Data +final class Resolvers { + @JacksonXmlElementWrapper(useWrapping = false) + @Nullable + private List bintray; + + @JacksonXmlElementWrapper(useWrapping = false) + @Nullable + private List url; + + @JacksonXmlElementWrapper(useWrapping = false) + @Nullable + private List ibiblio; + + @JacksonXmlElementWrapper(useWrapping = false) + @Nullable + private List ssh; + + @JacksonXmlElementWrapper(useWrapping = false) + @Nullable + private List chain; + + public List toDependencyResolvers() { + List resolvers = new ArrayList<>(); + if (bintray != null) { + bintray.forEach(r -> resolvers.add(r.toIvyModel())); + } + if (url != null) { + url.forEach(r -> resolvers.add(r.toIvyModel())); + } + if (ibiblio != null) { + ibiblio.forEach(r -> resolvers.add(r.toIvyModel())); + } + if (ssh != null) { + ssh.forEach(r -> resolvers.add(r.toIvyModel())); + } + if (chain != null) { + chain.forEach(r -> resolvers.add(r.toIvyModel())); + } + return resolvers; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Settings.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Settings.java new file mode 100644 index 00000000000..10dcc278efc --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/Settings.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +final class Settings { + @Nullable + @JacksonXmlProperty(isAttribute = true) + private String defaultResolver; +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/SshResolver.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/SshResolver.java new file mode 100644 index 00000000000..4a2dba62cc5 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/SshResolver.java @@ -0,0 +1,72 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import java.util.List; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +final class SshResolver extends Resolver { + /** The username to provide as a credential. */ + @JacksonXmlProperty(isAttribute = true) + private String user; + + /** The password to provide as a credential. */ + @JacksonXmlProperty(isAttribute = true) + private String password; + + /** The host to connect to. Defaults to host given on the patterns, failing if none is set. */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private String host; + + /** The port to connect to. */ + @JacksonXmlProperty(isAttribute = true) + @Nullable + private Integer port; + + /** Defines a pattern for Ivy files, using the pattern attribute. */ + @JacksonXmlElementWrapper(useWrapping = false) + @Nullable + private List ivy; + + /** Defines a pattern for artifacts, using the pattern attribute */ + @JacksonXmlElementWrapper(useWrapping = false) + private List artifact; + + @Override + public org.apache.ivy.plugins.resolver.SshResolver toIvyModel() { + org.apache.ivy.plugins.resolver.SshResolver sshResolver = + new org.apache.ivy.plugins.resolver.SshResolver(); + sshResolver.setHost(host); + if (port != null) { + sshResolver.setPort(port); + } + sshResolver.setUser(user); + sshResolver.setUserPassword(password); + if (ivy != null) { + ivy.forEach(pattern -> sshResolver.addIvyPattern(pattern.getPattern())); + } + artifact.forEach(pattern -> sshResolver.addArtifactPattern(pattern.getPattern())); + return super.toIvyModel(sshResolver); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/UrlResolver.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/UrlResolver.java new file mode 100644 index 00000000000..14f6c808643 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/UrlResolver.java @@ -0,0 +1,55 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import java.util.List; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; +import org.apache.ivy.plugins.resolver.URLResolver; + +@EqualsAndHashCode(callSuper = true) +@Data +final class UrlResolver extends Resolver { + @JacksonXmlProperty(isAttribute = true) + @Nullable + private Boolean m2compatible; + + /** Defines a pattern for Ivy files, using the pattern attribute. */ + @JacksonXmlElementWrapper(useWrapping = false) + @Nullable + private List ivy; + + /** Defines a pattern for artifacts, using the pattern attribute */ + @JacksonXmlElementWrapper(useWrapping = false) + private List artifact; + + @Override + public URLResolver toIvyModel() { + URLResolver urlResolver = new URLResolver(); + if (m2compatible != null) { + urlResolver.setM2compatible(m2compatible); + } + if (ivy != null) { + ivy.forEach(pattern -> urlResolver.addIvyPattern(pattern.getPattern())); + } + artifact.forEach(pattern -> urlResolver.addArtifactPattern(pattern.getPattern())); + return super.toIvyModel(urlResolver); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactAccount.java new file mode 100644 index 00000000000..fef3cac442b --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactAccount.java @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.jenkins; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.clouddriver.artifacts.config.BasicAuth; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; + +@NonnullByDefault +@Value +public class JenkinsArtifactAccount implements ArtifactAccount, BasicAuth { + private final String name; + private final Optional username; + private final Optional password; + private final String address; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + JenkinsArtifactAccount(String name, String username, String password, String address) { + this.name = Strings.nullToEmpty(name); + this.username = Optional.ofNullable(Strings.emptyToNull(username)); + this.password = Optional.ofNullable(Strings.emptyToNull(password)); + this.address = Strings.nullToEmpty(address); + } + + @Override + public Optional getUsernamePasswordFile() { + return Optional.empty(); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactConfiguration.java new file mode 100644 index 00000000000..fb642a01bba --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactConfiguration.java @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.jenkins; + +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import okhttp3.OkHttpClient; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("jenkins.enabled") +@EnableConfigurationProperties(JenkinsProperties.class) +@RequiredArgsConstructor +@Slf4j +class JenkinsArtifactConfiguration { + private final JenkinsProperties jenkinsProperties; + + @Bean + public CredentialsTypeProperties + jenkinsCredentialsProperties(OkHttpClient okHttpClient) { + return CredentialsTypeProperties.builder() + .type(JenkinsArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(JenkinsArtifactCredentials.class) + .credentialsDefinitionClass(JenkinsArtifactAccount.class) + .defaultCredentialsSource( + () -> + jenkinsProperties.getMasters().stream() + .map( + m -> + new JenkinsArtifactAccount( + m.getName(), m.getUsername(), m.getPassword(), m.getAddress())) + .collect(Collectors.toList())) + .credentialsParser( + a -> { + try { + return new JenkinsArtifactCredentials(a, okHttpClient); + } catch (Exception e) { + log.warn("Failure instantiating jenkins artifact account {}: ", a, e); + return null; + } + }) + .build(); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactCredentials.java new file mode 100644 index 00000000000..2a4690efde4 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsArtifactCredentials.java @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.jenkins; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.artifacts.config.SimpleHttpArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import okhttp3.HttpUrl; +import okhttp3.OkHttpClient; + +@NonnullByDefault +@Slf4j +public class JenkinsArtifactCredentials + extends SimpleHttpArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-jenkins"; + private static final String TYPE = "jenkins/file"; + + @Getter private final String name; + + @Getter private final ImmutableList types = ImmutableList.of(TYPE); + + private final JenkinsArtifactAccount jenkinsArtifactAccount; + + JenkinsArtifactCredentials(JenkinsArtifactAccount account, OkHttpClient okHttpClient) { + super(okHttpClient, account); + this.jenkinsArtifactAccount = account; + this.name = account.getName(); + } + + @Override + protected HttpUrl getDownloadUrl(Artifact artifact) { + String formattedJenkinsAddress = + jenkinsArtifactAccount.getAddress().endsWith("/") + ? jenkinsArtifactAccount.getAddress() + : jenkinsArtifactAccount.getAddress() + "/"; + String formattedReference = + artifact.getReference().startsWith("/") + ? artifact.getReference() + : "/" + artifact.getReference(); + String buildUrl = + formattedJenkinsAddress + + "job/" + + artifact.getName() + + "/" + + artifact.getVersion() + + "/artifact" + + formattedReference; + HttpUrl url = HttpUrl.parse(buildUrl); + if (url == null) { + throw new IllegalArgumentException( + "Malformed content URL in reference: " + + buildUrl + + ". Read more here https://www.spinnaker.io/reference/artifacts/types/"); + } + return url; + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsProperties.java new file mode 100644 index 00000000000..e6d2784eb45 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/jenkins/JenkinsProperties.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.jenkins; + +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@Data +@ConfigurationProperties("jenkins") +final class JenkinsProperties { + private boolean enabled; + private List masters = new ArrayList<>(); + + @Data + static final class Master { + private String name; + private String address; + private String username; + private String password; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactAccount.java new file mode 100644 index 00000000000..df9242d7812 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactAccount.java @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.kubernetes; + +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import lombok.Value; + +@NonnullByDefault +@Value +final class KubernetesArtifactAccount implements ArtifactAccount { + @Override + public String getName() { + return "kubernetes"; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactConfiguration.java new file mode 100644 index 00000000000..71ec4f9ff82 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactConfiguration.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.kubernetes; + +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("kubernetes.enabled") +@RequiredArgsConstructor +@Slf4j +class KubernetesArtifactConfiguration { + @Bean + public CredentialsRepository + kubernetesArtifactCredentialsRepository() { + CredentialsRepository repository = + new MapBackedCredentialsRepository<>( + KubernetesArtifactCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()); + repository.save(new KubernetesArtifactCredentials(new KubernetesArtifactAccount())); + return repository; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactCredentials.java new file mode 100644 index 00000000000..221ad427cbc --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactCredentials.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.kubernetes; + +import static com.google.common.collect.ImmutableList.toImmutableList; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.InputStream; +import java.util.Arrays; +import lombok.Value; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Value +@NonnullByDefault +final class KubernetesArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-kubernetes"; + private final String name; + private final ImmutableList types; + + KubernetesArtifactCredentials(KubernetesArtifactAccount account) { + this.name = account.getName(); + this.types = + Arrays.stream(KubernetesArtifactType.values()) + .filter(t -> t != KubernetesArtifactType.DockerImage) + .map(KubernetesArtifactType::getType) + .collect(toImmutableList()); + } + + public InputStream download(Artifact artifact) { + throw new UnsupportedOperationException( + "Kubernetes artifacts are retrieved by kubernetes directly"); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactType.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactType.java new file mode 100644 index 00000000000..06f1dda633d --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/kubernetes/KubernetesArtifactType.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts.kubernetes; + +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; + +@NonnullByDefault +public enum KubernetesArtifactType { + DockerImage("docker/image"), + ConfigMap("kubernetes/configMap"), + Deployment("kubernetes/deployment"), + ReplicaSet("kubernetes/replicaSet"), + Secret("kubernetes/secret"); + + private final String type; + + KubernetesArtifactType(String type) { + this.type = type; + } + + public String getType() { + return type; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactAccount.java new file mode 100644 index 00000000000..73574185326 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactAccount.java @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.maven; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; + +@NonnullByDefault +@Value +public class MavenArtifactAccount implements ArtifactAccount { + private final String name; + private final String repositoryUrl; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + public MavenArtifactAccount(String name, String repositoryUrl) { + this.name = Strings.nullToEmpty(name); + this.repositoryUrl = Strings.nullToEmpty(repositoryUrl); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactConfiguration.java new file mode 100644 index 00000000000..6c6829092e5 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactConfiguration.java @@ -0,0 +1,55 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.maven; + +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import okhttp3.OkHttpClient; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("artifacts.maven.enabled") +@EnableConfigurationProperties(MavenArtifactProviderProperties.class) +@RequiredArgsConstructor +@Slf4j +class MavenArtifactConfiguration { + private final MavenArtifactProviderProperties mavenArtifactProviderProperties; + + @Bean + public CredentialsTypeProperties + mavenCredentialsProperties(OkHttpClient okHttpClient) { + return CredentialsTypeProperties.builder() + .type(MavenArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(MavenArtifactCredentials.class) + .credentialsDefinitionClass(MavenArtifactAccount.class) + .defaultCredentialsSource(mavenArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new MavenArtifactCredentials(a, okHttpClient); + } catch (Exception e) { + log.warn("Failure instantiating maven artifact account {}: ", a, e); + return null; + } + }) + .build(); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactCredentials.java new file mode 100644 index 00000000000..14fe0dca790 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactCredentials.java @@ -0,0 +1,257 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.maven; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.util.Optional; +import lombok.Getter; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.Response; +import org.apache.maven.artifact.repository.metadata.SnapshotVersion; +import org.apache.maven.artifact.repository.metadata.Versioning; +import org.apache.maven.artifact.repository.metadata.io.xpp3.MetadataXpp3Reader; +import org.apache.maven.repository.internal.MavenRepositorySystemUtils; +import org.codehaus.plexus.util.xml.pull.XmlPullParserException; +import org.eclipse.aether.artifact.DefaultArtifact; +import org.eclipse.aether.internal.impl.DefaultRepositoryLayoutProvider; +import org.eclipse.aether.metadata.DefaultMetadata; +import org.eclipse.aether.metadata.Metadata; +import org.eclipse.aether.repository.RemoteRepository; +import org.eclipse.aether.spi.connector.layout.RepositoryLayout; +import org.eclipse.aether.spi.connector.layout.RepositoryLayoutProvider; +import org.eclipse.aether.transfer.NoRepositoryLayoutException; +import org.eclipse.aether.util.version.GenericVersionScheme; +import org.eclipse.aether.version.InvalidVersionSpecificationException; +import org.eclipse.aether.version.Version; +import org.eclipse.aether.version.VersionConstraint; +import org.eclipse.aether.version.VersionScheme; + +@NonnullByDefault +public final class MavenArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-maven"; + private static final String RELEASE = "RELEASE"; + private static final String SNAPSHOT = "SNAPSHOT"; + private static final String LATEST = "LATEST"; + private static final String MAVEN_METADATA_XML = "maven-metadata.xml"; + + public static final ImmutableList TYPES = ImmutableList.of("maven/file"); + + private final MavenArtifactAccount account; + private final OkHttpClient okHttpClient; + private final RepositoryLayout repositoryLayout; + + @Getter private final ImmutableList types = TYPES; + + public MavenArtifactCredentials(MavenArtifactAccount account, OkHttpClient okHttpClient) { + this.account = account; + this.okHttpClient = okHttpClient; + + try { + RemoteRepository remoteRepository = + new RemoteRepository.Builder(account.getName(), "default", account.getRepositoryUrl()) + .build(); + this.repositoryLayout = + MavenRepositorySystemUtils.newServiceLocator() + .addService(RepositoryLayoutProvider.class, DefaultRepositoryLayoutProvider.class) + .getService(RepositoryLayoutProvider.class) + .newRepositoryLayout(MavenRepositorySystemUtils.newSession(), remoteRepository); + } catch (NoRepositoryLayoutException e) { + throw new IllegalStateException(e); + } + } + + @Override + public String getName() { + return account.getName(); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } + + @Override + public InputStream download(Artifact artifact) { + try { + DefaultArtifact requestedArtifact = new DefaultArtifact(artifact.getReference()); + String artifactPath = + resolveVersion(requestedArtifact) + .map( + version -> + repositoryLayout.getLocation(withVersion(requestedArtifact, version), false)) + .map(URI::getPath) + .orElseThrow( + () -> + new IllegalStateException( + "No versions matching constraint '" + + artifact.getVersion() + + "' for '" + + artifact.getReference() + + "'")); + + Request artifactRequest = + new Request.Builder().url(account.getRepositoryUrl() + "/" + artifactPath).get().build(); + + Response artifactResponse = okHttpClient.newCall(artifactRequest).execute(); + if (artifactResponse.isSuccessful()) { + return artifactResponse.body().byteStream(); + } + throw new IllegalStateException( + "Unable to download artifact with reference '" + + artifact.getReference() + + "'. HTTP " + + artifactResponse.code()); + } catch (IOException | ArtifactDownloadException e) { + throw new IllegalStateException( + "Unable to download artifact with reference '" + artifact.getReference() + "'", e); + } + } + + public Optional resolveArtifactName(Artifact artifact) { + try { + final DefaultArtifact aetherArtifact = new DefaultArtifact(artifact.getReference()); + return Optional.of(aetherArtifact.getGroupId() + ":" + aetherArtifact.getArtifactId()); + } catch (Exception e) { + return Optional.empty(); + } + } + + public Optional resolveArtifactVersion(Artifact artifact) { + try { + return resolveVersion(new DefaultArtifact(artifact.getReference())); + } catch (Exception e) { + return Optional.empty(); + } + } + + private Optional resolveVersion(org.eclipse.aether.artifact.Artifact artifact) { + try { + String metadataPath = metadataUri(artifact).getPath(); + Request metadataRequest = + new Request.Builder().url(account.getRepositoryUrl() + "/" + metadataPath).get().build(); + Response response = okHttpClient.newCall(metadataRequest).execute(); + + if (response.isSuccessful()) { + VersionScheme versionScheme = new GenericVersionScheme(); + VersionConstraint versionConstraint = + versionScheme.parseVersionConstraint(artifact.getVersion()); + Versioning versioning = + new MetadataXpp3Reader().read(response.body().byteStream(), false).getVersioning(); + + if (isRelease(artifact)) { + return Optional.ofNullable(versioning.getRelease()); + } else if (isLatestSnapshot(artifact)) { + return resolveVersion(withVersion(artifact, versioning.getLatest())); + } else if (isLatest(artifact)) { + String latestVersion = versioning.getLatest(); + return latestVersion != null && latestVersion.endsWith("-SNAPSHOT") + ? resolveVersion(withVersion(artifact, latestVersion)) + : Optional.ofNullable(latestVersion); + } else if (artifact.getVersion().endsWith("-SNAPSHOT")) { + String requestedClassifier = + artifact.getClassifier() == null ? "" : artifact.getClassifier(); + return versioning.getSnapshotVersions().stream() + .filter(v -> v.getClassifier().equals(requestedClassifier)) + .map(SnapshotVersion::getVersion) + .findFirst(); + } else { + return versioning.getVersions().stream() + .map( + v -> { + try { + return versionScheme.parseVersion(v); + } catch (InvalidVersionSpecificationException e) { + throw new ArtifactDownloadException(e); + } + }) + .filter(versionConstraint::containsVersion) + .max(Version::compareTo) + .map(Version::toString); + } + } else { + throw new IOException( + "Unsuccessful response retrieving maven-metadata.xml " + response.code()); + } + } catch (IOException | XmlPullParserException | InvalidVersionSpecificationException e) { + throw new ArtifactDownloadException(e); + } + } + + private DefaultArtifact withVersion( + org.eclipse.aether.artifact.Artifact artifact, String version) { + return new DefaultArtifact( + artifact.getGroupId(), + artifact.getArtifactId(), + artifact.getClassifier(), + artifact.getExtension(), + version); + } + + private URI metadataUri(org.eclipse.aether.artifact.Artifact artifact) { + String group = artifact.getGroupId(); + String artifactId = artifact.getArtifactId(); + String version = artifact.getVersion(); + + Metadata metadata; + if (artifact.getVersion().endsWith("-SNAPSHOT")) { + metadata = + new DefaultMetadata( + group, artifactId, version, MAVEN_METADATA_XML, Metadata.Nature.SNAPSHOT); + } else if (isRelease(artifact)) { + metadata = + new DefaultMetadata(group, artifactId, MAVEN_METADATA_XML, Metadata.Nature.RELEASE); + } else if (isLatestSnapshot(artifact)) { + metadata = + new DefaultMetadata(group, artifactId, MAVEN_METADATA_XML, Metadata.Nature.SNAPSHOT); + } else if (isLatest(artifact) || version.startsWith("[") || version.startsWith("(")) { + metadata = + new DefaultMetadata( + group, artifactId, MAVEN_METADATA_XML, Metadata.Nature.RELEASE_OR_SNAPSHOT); + } else { + metadata = + new DefaultMetadata(group, artifactId, MAVEN_METADATA_XML, Metadata.Nature.RELEASE); + } + + return repositoryLayout.getLocation(metadata, false); + } + + private boolean isRelease(org.eclipse.aether.artifact.Artifact artifact) { + return RELEASE.equals(artifact.getVersion()) || "latest.release".equals(artifact.getVersion()); + } + + private boolean isLatestSnapshot(org.eclipse.aether.artifact.Artifact artifact) { + return SNAPSHOT.equals(artifact.getVersion()) + || "latest.integration".equals(artifact.getVersion()); + } + + private boolean isLatest(org.eclipse.aether.artifact.Artifact artifact) { + return LATEST.equals(artifact.getVersion()); + } + + private static class ArtifactDownloadException extends RuntimeException { + ArtifactDownloadException(Throwable cause) { + super(cause); + } + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactProviderProperties.java new file mode 100644 index 00000000000..2a42698bd6d --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactProviderProperties.java @@ -0,0 +1,29 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.maven; + +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@Data +@ConfigurationProperties("artifacts.maven") +final class MavenArtifactProviderProperties { + private boolean enabled; + private List accounts = new ArrayList<>(); +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactAccount.java new file mode 100644 index 00000000000..e77433d1851 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactAccount.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2017, 2018, Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ + +package com.netflix.spinnaker.clouddriver.artifacts.oracle; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; + +@NonnullByDefault +@Value +public class OracleArtifactAccount implements ArtifactAccount { + private final String name; + private final String namespace; + private final String region; + private final String userId; + private final String fingerprint; + private final String sshPrivateKeyFilePath; + private final String privateKeyPassphrase; + private final String tenancyId; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + OracleArtifactAccount( + String name, + String namespace, + String region, + String userId, + String fingerprint, + String sshPrivateKeyFilePath, + String privateKeyPassphrase, + String tenancyId) { + this.name = Strings.nullToEmpty(name); + this.namespace = Strings.nullToEmpty(namespace); + this.region = Strings.nullToEmpty(region); + this.userId = Strings.nullToEmpty(userId); + this.fingerprint = Strings.nullToEmpty(fingerprint); + this.sshPrivateKeyFilePath = Strings.nullToEmpty(sshPrivateKeyFilePath); + this.privateKeyPassphrase = Strings.nullToEmpty(privateKeyPassphrase); + this.tenancyId = Strings.nullToEmpty(tenancyId); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactClient.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactClient.java new file mode 100644 index 00000000000..e3c5ba23724 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactClient.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2017, 2018, Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ + +package com.netflix.spinnaker.clouddriver.artifacts.oracle; + +import com.oracle.bmc.auth.AuthenticationDetailsProvider; +import com.oracle.bmc.auth.SimpleAuthenticationDetailsProvider; +import com.oracle.bmc.auth.SimplePrivateKeySupplier; +import com.oracle.bmc.http.signing.DefaultRequestSigner; +import com.oracle.bmc.http.signing.RequestSigner; +import com.sun.jersey.api.client.*; +import com.sun.jersey.api.client.config.ClientConfig; +import com.sun.jersey.api.client.config.DefaultClientConfig; +import com.sun.jersey.api.client.filter.ClientFilter; +import com.sun.jersey.client.urlconnection.URLConnectionClientHandler; +import java.io.InputStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import javax.ws.rs.core.MediaType; + +class OracleArtifactClient { + + private final Client client; + + OracleArtifactClient( + String userId, + String sshPrivateKeyFilePath, + String privateKeyPassphrase, + String fingerprint, + String tenancyId) { + Supplier privateKeySupplier = new SimplePrivateKeySupplier(sshPrivateKeyFilePath); + AuthenticationDetailsProvider provider = + SimpleAuthenticationDetailsProvider.builder() + .userId(userId) + .fingerprint(fingerprint) + .privateKeySupplier(privateKeySupplier) + .passPhrase(privateKeyPassphrase) + .tenantId(tenancyId) + .build(); + + RequestSigner requestSigner = DefaultRequestSigner.createRequestSigner(provider); + + ClientConfig clientConfig = new DefaultClientConfig(); + client = new Client(new URLConnectionClientHandler(), clientConfig); + client.addFilter(new RequestSigningFilter(requestSigner)); + } + + InputStream readObject(URI uri) { + WebResource wr = client.resource(uri); + wr.accept(MediaType.APPLICATION_OCTET_STREAM_TYPE); + return wr.get(InputStream.class); + } + + private class RequestSigningFilter extends ClientFilter { + private final RequestSigner signer; + + private RequestSigningFilter(RequestSigner requestSigner) { + this.signer = requestSigner; + } + + @Override + public ClientResponse handle(ClientRequest cr) throws ClientHandlerException { + Map> stringHeaders = new HashMap<>(); + for (String key : cr.getHeaders().keySet()) { + List vals = new ArrayList<>(); + for (Object val : cr.getHeaders().get(key)) { + vals.add((String) val); + } + stringHeaders.put(key, vals); + } + + Map signedHeaders = + signer.signRequest(cr.getURI(), cr.getMethod(), stringHeaders, cr.getEntity()); + for (String key : signedHeaders.keySet()) { + cr.getHeaders().putSingle(key, signedHeaders.get(key)); + } + + return getNext().handle(cr); + } + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactConfiguration.java new file mode 100644 index 00000000000..3d6c05e8f4f --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactConfiguration.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017, 2018, Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ + +package com.netflix.spinnaker.clouddriver.artifacts.oracle; + +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("artifacts.oracle.enabled") +@EnableConfigurationProperties(OracleArtifactProviderProperties.class) +@RequiredArgsConstructor +@Slf4j +class OracleArtifactConfiguration { + private final OracleArtifactProviderProperties oracleArtifactProviderProperties; + + @Bean + public CredentialsTypeProperties + oracleCredentialsProperties(String clouddriverUserAgentApplicationName) { + return CredentialsTypeProperties.builder() + .type(OracleArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(OracleArtifactCredentials.class) + .credentialsDefinitionClass(OracleArtifactAccount.class) + .defaultCredentialsSource(oracleArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new OracleArtifactCredentials(clouddriverUserAgentApplicationName, a); + } catch (Exception e) { + log.warn("Failure instantiating oracle artifact account {}: ", a, e); + return null; + } + }) + .build(); + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactCredentials.java new file mode 100644 index 00000000000..ec03c4bfca9 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactCredentials.java @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2017, 2018, Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ + +package com.netflix.spinnaker.clouddriver.artifacts.oracle; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.sun.jersey.api.client.UniformInterfaceException; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import javax.ws.rs.core.UriBuilder; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@NonnullByDefault +@Slf4j +public class OracleArtifactCredentials implements ArtifactCredentials { + public static final String CREDENTIALS_TYPE = "artifacts-oracle"; + private static final String ARTIFACT_REFERENCE_PREFIX = "oci://"; + private static final String ARTIFACT_VERSION_QUERY_PARAM = "versionId"; + private static final String ARTIFACT_URI = + "https://objectstorage.{arg0}.oraclecloud.com/n/{arg1}/b/{arg2}/o/{arg3}"; + + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("oracle/object"); + + private final String namespace; + private final String region; + private final String userId; + private final String fingerprint; + private final String sshPrivateKeyFilePath; + private final String privateKeyPassphrase; + private final String tenancyId; + + @JsonIgnore private final OracleArtifactClient client; + + OracleArtifactCredentials(String applicationName, OracleArtifactAccount account) { + this.name = account.getName(); + this.namespace = account.getNamespace(); + this.region = account.getRegion(); + this.userId = account.getUserId(); + this.fingerprint = account.getFingerprint(); + this.sshPrivateKeyFilePath = account.getSshPrivateKeyFilePath(); + this.privateKeyPassphrase = account.getPrivateKeyPassphrase(); + this.tenancyId = account.getTenancyId(); + + this.client = + new OracleArtifactClient( + userId, sshPrivateKeyFilePath, privateKeyPassphrase, fingerprint, tenancyId); + } + + public InputStream download(Artifact artifact) throws IOException { + String reference = artifact.getReference(); + if (reference.startsWith(ARTIFACT_REFERENCE_PREFIX)) { + reference = reference.substring(ARTIFACT_REFERENCE_PREFIX.length()); + } + + int slash = reference.indexOf("/"); + if (slash <= 0) { + throw new IllegalArgumentException( + "Oracle references must be of the format oci:///, got: " + artifact); + } + + String bucketName = reference.substring(0, slash); + String fullPath = reference.substring(slash + 1); + String path = fullPath; + UriBuilder uriBuilder = UriBuilder.fromPath(ARTIFACT_URI); + int versionIndex = fullPath.indexOf("#"); + if (versionIndex > 0) { + path = fullPath.substring(0, versionIndex); + uriBuilder = + uriBuilder.queryParam(ARTIFACT_VERSION_QUERY_PARAM, fullPath.substring(versionIndex + 1)); + } + + URI uri = uriBuilder.build(region, namespace, bucketName, path); + + try { + return client.readObject(uri); + } catch (UniformInterfaceException e) { + if (e.getResponse().getStatus() == 404) { + throw new IOException("Object not found (key: " + path + ")"); + } + throw e; + } + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactProviderProperties.java new file mode 100644 index 00000000000..9d42c164660 --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/oracle/OracleArtifactProviderProperties.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2017, 2018, Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ + +package com.netflix.spinnaker.clouddriver.artifacts.oracle; + +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@Data +@ConfigurationProperties("artifacts.oracle") +final class OracleArtifactProviderProperties implements ArtifactProvider { + private boolean enabled; + private List accounts = new ArrayList<>(); +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactAccount.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactAccount.java index df687e92cc0..08d7193cccf 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactAccount.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactAccount.java @@ -16,16 +16,42 @@ package com.netflix.spinnaker.clouddriver.artifacts.s3; +import com.google.common.base.Strings; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; -import lombok.Data; -import lombok.EqualsAndHashCode; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; +import org.springframework.boot.context.properties.ConstructorBinding; -@EqualsAndHashCode(callSuper = true) -@Data -public class S3ArtifactAccount extends ArtifactAccount -{ - private String name; - private String apiEndpoint; - private String apiRegion; - private String region; +@NonnullByDefault +@Value +public class S3ArtifactAccount implements ArtifactAccount { + private final String name; + private final String apiEndpoint; + private final String apiRegion; + private final String region; + private final String awsAccessKeyId; + private final String awsSecretAccessKey; + private final String signerOverride; + + @Builder + @ConstructorBinding + @ParametersAreNullableByDefault + public S3ArtifactAccount( + String name, + String apiEndpoint, + String apiRegion, + String region, + String awsAccessKeyId, + String awsSecretAccessKey, + String signerOverride) { + this.name = Strings.nullToEmpty(name); + this.apiEndpoint = Strings.nullToEmpty(apiEndpoint); + this.apiRegion = Strings.nullToEmpty(apiRegion); + this.region = Strings.nullToEmpty(region); + this.awsAccessKeyId = Strings.nullToEmpty(awsAccessKeyId); + this.awsSecretAccessKey = Strings.nullToEmpty(awsSecretAccessKey); + this.signerOverride = Strings.nullToEmpty(signerOverride); + } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactConfiguration.java index ef7796f7c56..393b1045b67 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactConfiguration.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactConfiguration.java @@ -16,52 +16,43 @@ package com.netflix.spinnaker.clouddriver.artifacts.s3; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import java.util.Optional; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Scope; -import org.springframework.scheduling.annotation.EnableScheduling; - -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; @Configuration @ConditionalOnProperty("artifacts.s3.enabled") -@EnableScheduling +@EnableConfigurationProperties(S3ArtifactProviderProperties.class) +@RequiredArgsConstructor @Slf4j -public class S3ArtifactConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("artifacts.s3") - S3ArtifactProviderProperties s3ArtifactProviderProperties() { return new S3ArtifactProviderProperties(); } - - @Autowired - S3ArtifactProviderProperties s3ArtifactProviderProperties; - - @Autowired - ArtifactCredentialsRepository artifactCredentialsRepository; +class S3ArtifactConfiguration { + private final S3ArtifactProviderProperties s3ArtifactProviderProperties; @Bean - List s3ArtifactCredentials() { - return s3ArtifactProviderProperties.getAccounts() - .stream() - .map(a -> { - try { - S3ArtifactCredentials c = new S3ArtifactCredentials(a); - artifactCredentialsRepository.save(c); - return c; - } catch (IllegalArgumentException e) { - log.warn("Failure instantiating s3 artifact account {}: ", a, e); - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toList()); + public CredentialsTypeProperties + s3CredentialsProperties( + Optional s3ArtifactValidator, + S3ArtifactProviderProperties s3ArtifactProviderProperties) { + return CredentialsTypeProperties.builder() + .type(S3ArtifactCredentials.CREDENTIALS_TYPE) + .credentialsClass(S3ArtifactCredentials.class) + .credentialsDefinitionClass(S3ArtifactAccount.class) + .defaultCredentialsSource(s3ArtifactProviderProperties::getAccounts) + .credentialsParser( + a -> { + try { + return new S3ArtifactCredentials( + a, s3ArtifactValidator, s3ArtifactProviderProperties); + } catch (Exception e) { + log.warn("Failure instantiating s3 artifact account {}: ", a, e); + return null; + } + }) + .build(); } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactCredentials.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactCredentials.java index cd709d378ab..c365f4693d6 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactCredentials.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactCredentials.java @@ -16,49 +16,107 @@ package com.netflix.spinnaker.clouddriver.artifacts.s3; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.PredefinedClientConfigurations; +import com.amazonaws.Request; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.handlers.RequestHandler2; import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.S3Object; -import java.util.Arrays; -import java.util.List; -import org.apache.commons.lang3.StringUtils; - +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.aws.SpectatorRequestMetricCollector; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import groovy.util.logging.Slf4j; -import lombok.Data; - +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; import java.io.InputStream; +import java.util.Optional; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; @Slf4j -@Data +@NonnullByDefault public class S3ArtifactCredentials implements ArtifactCredentials { - private final String name; + public static final String CREDENTIALS_TYPE = "artifacts-s3"; + @Getter private final String name; + @Getter private final ImmutableList types = ImmutableList.of("s3/object"); + private final String apiEndpoint; private final String apiRegion; private final String region; - private final List types = Arrays.asList("s3/object"); + private final String awsAccessKeyId; + private final String awsSecretAccessKey; + private final String signerOverride; + private final Optional s3ArtifactValidator; + private final S3ArtifactProviderProperties s3ArtifactProviderProperties; + private final S3ArtifactRequestHandler s3ArtifactRequestHandler; + + private AmazonS3 amazonS3; + + S3ArtifactCredentials( + S3ArtifactAccount account, + Optional s3ArtifactValidator, + S3ArtifactProviderProperties s3ArtifactProviderProperties) { + this(account, s3ArtifactValidator, null, s3ArtifactProviderProperties); + } + + S3ArtifactCredentials( + S3ArtifactAccount account, + @Nullable AmazonS3 amazonS3, + S3ArtifactProviderProperties s3ArtifactProviderProperties) { + this(account, Optional.empty(), amazonS3, s3ArtifactProviderProperties); + } - public S3ArtifactCredentials(S3ArtifactAccount account) throws IllegalArgumentException { + S3ArtifactCredentials( + S3ArtifactAccount account, + Optional s3ArtifactValidator, + @Nullable AmazonS3 amazonS3, + S3ArtifactProviderProperties s3ArtifactProviderProperties) + throws IllegalArgumentException { name = account.getName(); apiEndpoint = account.getApiEndpoint(); apiRegion = account.getApiRegion(); region = account.getRegion(); + awsAccessKeyId = account.getAwsAccessKeyId(); + awsSecretAccessKey = account.getAwsSecretAccessKey(); + signerOverride = account.getSignerOverride(); + this.s3ArtifactValidator = s3ArtifactValidator; + this.amazonS3 = amazonS3; + this.s3ArtifactProviderProperties = s3ArtifactProviderProperties; + s3ArtifactRequestHandler = new S3ArtifactRequestHandler(name); } - protected AmazonS3 getS3Client() { + private AmazonS3 getS3Client() { + if (amazonS3 != null) { + return amazonS3; + } + AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); + builder.setClientConfiguration(getClientConfiguration()); + builder.setRequestHandlers(s3ArtifactRequestHandler); - if (!StringUtils.isEmpty(apiEndpoint)) { - AwsClientBuilder.EndpointConfiguration endpoint = new AwsClientBuilder.EndpointConfiguration(apiEndpoint, apiRegion); + if (!apiEndpoint.isEmpty()) { + AwsClientBuilder.EndpointConfiguration endpoint = + new AwsClientBuilder.EndpointConfiguration(apiEndpoint, apiRegion); builder.setEndpointConfiguration(endpoint); builder.setPathStyleAccessEnabled(true); - } else if (!StringUtils.isEmpty(region)) { + } else if (!region.isEmpty()) { builder.setRegion(region); } - return builder.build(); + if (!awsAccessKeyId.isEmpty() && !awsSecretAccessKey.isEmpty()) { + BasicAWSCredentials awsStaticCreds = + new BasicAWSCredentials(awsAccessKeyId, awsSecretAccessKey); + builder.withCredentials(new AWSStaticCredentialsProvider(awsStaticCreds)); + } + + amazonS3 = builder.build(); + return amazonS3; } @Override @@ -70,11 +128,99 @@ public InputStream download(Artifact artifact) throws IllegalArgumentException { int slash = reference.indexOf("/"); if (slash <= 0) { - throw new IllegalArgumentException("S3 references must be of the format s3:///, got: " + artifact); + throw new IllegalArgumentException( + "S3 references must be of the format s3:///, got: " + artifact); } String bucketName = reference.substring(0, slash); String path = reference.substring(slash + 1); - S3Object s3obj = getS3Client().getObject(bucketName, path); - return s3obj.getObjectContent(); + S3Object s3obj; + try { + s3obj = getS3Client().getObject(bucketName, path); + } catch (AmazonS3Exception e) { + // An out-of-the-box AmazonS3Exception doesn't include the bucket/key + // name so it's hard to know what's actually failing. + log.error("exception getting object: s3://{}/{}: '{}'", bucketName, path, e.getMessage()); + + // In case this is a "file not found" error, throw a more specific + // exception, to get more info to the caller, and such that the resulting + // http response code isn't 500 since that this isn't a server error, nor + // is retryable. + // + // See + // https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + // for the list of error codes. + if ("NoSuchKey".equals(e.getErrorCode())) { + throw new NotFoundException("s3://" + bucketName + "/" + path + " not found", e); + } + throw e; + } + if (s3ArtifactValidator.isEmpty()) { + return s3obj.getObjectContent(); + } + return s3ArtifactValidator.get().validate(getS3Client(), s3obj); + } + + @Override + public String getType() { + return CREDENTIALS_TYPE; + } + + private ClientConfiguration getClientConfiguration() { + ClientConfiguration configuration = + configuration = PredefinedClientConfigurations.defaultConfig(); + + if (!signerOverride.isEmpty()) { + configuration.setSignerOverride(signerOverride); + } + + if (s3ArtifactProviderProperties.getClientExecutionTimeout() != null) { + configuration.setClientExecutionTimeout( + s3ArtifactProviderProperties.getClientExecutionTimeout()); + } + if (s3ArtifactProviderProperties.getConnectionMaxIdleMillis() != null) { + configuration.setConnectionMaxIdleMillis( + s3ArtifactProviderProperties.getConnectionMaxIdleMillis()); + } + if (s3ArtifactProviderProperties.getConnectionTimeout() != null) { + configuration.setConnectionTimeout(s3ArtifactProviderProperties.getConnectionTimeout()); + } + if (s3ArtifactProviderProperties.getConnectionTTL() != null) { + configuration.setConnectionTTL(s3ArtifactProviderProperties.getConnectionTTL()); + } + if (s3ArtifactProviderProperties.getMaxConnections() != null) { + configuration.setMaxConnections(s3ArtifactProviderProperties.getMaxConnections()); + } + if (s3ArtifactProviderProperties.getRequestTimeout() != null) { + configuration.setRequestTimeout(s3ArtifactProviderProperties.getRequestTimeout()); + } + if (s3ArtifactProviderProperties.getSocketTimeout() != null) { + configuration.setSocketTimeout(s3ArtifactProviderProperties.getSocketTimeout()); + } + if (s3ArtifactProviderProperties.getValidateAfterInactivityMillis() != null) { + configuration.setValidateAfterInactivityMillis( + s3ArtifactProviderProperties.getValidateAfterInactivityMillis()); + } + return configuration; + } + + @NonnullByDefault + static class S3ArtifactRequestHandler extends RequestHandler2 { + + private final String value; + + S3ArtifactRequestHandler(String value) { + this.value = value; + } + + @Override + public void beforeRequest(Request request) { + // To get connection pool metrics, differentiate our client from others. + // Use SpectatorRequestMetricCollector.DEFAULT_HANDLER_CONTEXT_KEY to + // match what SpectatorRequestMetricCollector uses. See + // https://github.com/Netflix/spectator/blob/v1.0.6/spectator-ext-aws/src/main/java/com/netflix/spectator/aws/SpectatorRequestMetricCollector.java#L108 + // and + // https://github.com/Netflix/spectator/blob/v1.0.6/spectator-ext-aws/src/main/java/com/netflix/spectator/aws/SpectatorRequestMetricCollector.java#L177-L186. + request.addHandlerContext(SpectatorRequestMetricCollector.DEFAULT_HANDLER_CONTEXT_KEY, value); + } } } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactProviderProperties.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactProviderProperties.java index 6eaac7f72bd..3a294cd9dbb 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactProviderProperties.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactProviderProperties.java @@ -17,15 +17,65 @@ package com.netflix.spinnaker.clouddriver.artifacts.s3; import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactProvider; -import lombok.Data; -import lombok.EqualsAndHashCode; - import java.util.ArrayList; import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; -@EqualsAndHashCode(callSuper = true) @Data -public class S3ArtifactProviderProperties extends ArtifactProvider { +@ConfigurationProperties("artifacts.s3") +final class S3ArtifactProviderProperties implements ArtifactProvider { private boolean enabled; private List accounts = new ArrayList<>(); + + /** + * See + * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setClientExecutionTimeout-int- + */ + private Integer clientExecutionTimeout; + + /** + * See + * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setConnectionMaxIdleMillis-long- + */ + private Long connectionMaxIdleMillis; + + /** + * See + * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setConnectionTimeout-int- + */ + private Integer connectionTimeout; + + /** + * See + * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setConnectionTTL-long- + * The units are milliseconds. + */ + private Long connectionTTL; + + /** + * See + * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setMaxConnections-int- + */ + private Integer maxConnections; + + /** + * See + * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setRequestTimeout-int- + * The units are milliseconds. + */ + private Integer requestTimeout; + + /** + * See + * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setSocketTimeout-int- + * The units are milliseconds. + */ + private Integer socketTimeout; + + /** + * See + * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setValidateAfterInactivityMillis-int- + */ + private Integer validateAfterInactivityMillis; } diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactValidator.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactValidator.java new file mode 100644 index 00000000000..11bba39f92d --- /dev/null +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactValidator.java @@ -0,0 +1,36 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.s3; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.S3Object; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.io.InputStream; + +@NonnullByDefault +public interface S3ArtifactValidator { + /** + * Validate an S3 artifact. Throw an exception if invalid. + * + * @param amazonS3 the S3 client used to retrieve the artifact to validate + * @param s3Obj the artifact to validate. It is the implementation's responsibility to either + * return the input stream from this object to the caller, or close it. + * @return the validated S3 artifact (e.g. s3obj.getObjectContent()). It it the caller's + * responsibility to close this stream as soon as possible. + */ + InputStream validate(AmazonS3 amazonS3, S3Object s3obj); +} diff --git a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/config/ArtifactConfiguration.java b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/config/ArtifactConfiguration.java index 4c1acebdcb2..eb8ea460f38 100644 --- a/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/config/ArtifactConfiguration.java +++ b/clouddriver-artifacts/src/main/java/com/netflix/spinnaker/config/ArtifactConfiguration.java @@ -17,41 +17,47 @@ package com.netflix.spinnaker.config; import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; -import com.netflix.spinnaker.clouddriver.artifacts.embedded.EmbeddedArtifactConfiguration; -import com.netflix.spinnaker.clouddriver.artifacts.gcs.GcsArtifactConfiguration; -import com.netflix.spinnaker.clouddriver.artifacts.github.GitHubArtifactConfiguration; -import com.netflix.spinnaker.clouddriver.artifacts.http.HttpArtifactConfiguration; -import com.netflix.spinnaker.clouddriver.artifacts.s3.S3ArtifactConfiguration; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactAccount; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsTypeBaseConfiguration; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import java.util.List; +import java.util.stream.Collectors; +import okhttp3.OkHttpClient; import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.ApplicationContext; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Import; import org.springframework.scheduling.annotation.EnableScheduling; import org.springframework.stereotype.Component; -import org.yaml.snakeyaml.Yaml; -import org.yaml.snakeyaml.constructor.SafeConstructor; @Configuration @EnableConfigurationProperties @EnableScheduling @Component -@ComponentScan({"com.netflix.spinnaker.clouddriver.artifacts"}) -@Import({ - EmbeddedArtifactConfiguration.class, - GcsArtifactConfiguration.class, - GitHubArtifactConfiguration.class, - HttpArtifactConfiguration.class, - S3ArtifactConfiguration.class -}) +@ComponentScan("com.netflix.spinnaker.clouddriver.artifacts") public class ArtifactConfiguration { @Bean - ArtifactCredentialsRepository artifactCredentialsRepository() { - return new ArtifactCredentialsRepository(); + OkHttpClient okHttpClient() { + return new OkHttpClient(); } @Bean - Yaml yaml() { - return new Yaml(new SafeConstructor()); + public ArtifactCredentialsRepository artifactCredentialsRepository( + ApplicationContext applicationContext, + List> + credentialsTypes, + List> defaultRepositories) { + List> repositories = + credentialsTypes.stream() + .map(c -> new CredentialsTypeBaseConfiguration<>(applicationContext, c)) + .peek(CredentialsTypeBaseConfiguration::afterPropertiesSet) + .map(CredentialsTypeBaseConfiguration::getCredentialsRepository) + .collect(Collectors.toList()); + + repositories.addAll(defaultRepositories); + return new ArtifactCredentialsRepository(repositories); } } diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactDownloaderTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactDownloaderTest.java new file mode 100644 index 00000000000..f74ccf30e2b --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ArtifactDownloaderTest.java @@ -0,0 +1,86 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.artifacts; + +import static org.assertj.core.api.Assertions.*; + +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.kork.exceptions.MissingCredentialsException; +import java.io.InputStream; +import java.util.List; +import org.junit.jupiter.api.Test; + +public class ArtifactDownloaderTest { + + @Test + public void testNoSupportedArtifactType() { + CredentialsRepository repository = + new MapBackedCredentialsRepository<>( + TestArtifactCredentials.artifactType, new NoopCredentialsLifecycleHandler<>()); + TestArtifactCredentials credentials = new TestArtifactCredentials("test"); + repository.save(credentials); + + ArtifactCredentialsRepository artifactsCredentials = + new ArtifactCredentialsRepository(List.of(repository)); + + assertThat(artifactsCredentials.getCredentialsForType("test", "type1")).isEqualTo(credentials); + assertThatExceptionOfType(MissingCredentialsException.class) + .isThrownBy(() -> artifactsCredentials.getCredentialsForType("unknown", "type1")); + assertThatExceptionOfType(MissingCredentialsException.class) + .isThrownBy(() -> artifactsCredentials.getCredentialsForType("test", "unsupportedType")); + } + + private static final class TestArtifactCredentials implements ArtifactCredentials { + static final String artifactType = "artifactType"; + private static final List types = List.of("type1", "type2"); + private String name; + + public TestArtifactCredentials(String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getType() { + return artifactType; + } + + @Override + public List getTypes() { + return types; + } + + @Override + public InputStream download(Artifact artifact) { + return null; + } + + @Override + public boolean handlesType(String type) { + return types.contains(type); + } + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactCredentialsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactCredentialsTest.java new file mode 100644 index 00000000000..c64fad0913f --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/bitbucket/BitbucketArtifactCredentialsTest.java @@ -0,0 +1,175 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.bitbucket; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.springframework.http.HttpHeaders.ACCEPT; +import static org.springframework.http.HttpHeaders.AUTHORIZATION; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.client.MappingBuilder; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.function.Function; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; +import org.springframework.http.MediaType; +import ru.lanwen.wiremock.ext.WiremockResolver; + +@ExtendWith({WiremockResolver.class, TempDirectory.class}) +class BitbucketArtifactCredentialsTest { + private final OkHttpClient okHttpClient = new OkHttpClient(); + + private final String DOWNLOAD_PATH = "/repos/spinnaker/testing/manifest.yml"; + private final String FILE_CONTENTS = "file contents"; + + @Test + void downloadWithToken(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + BitbucketArtifactAccount account = + BitbucketArtifactAccount.builder().name("my-bitbucket-account").token("abc").build(); + + runTestCase( + server, + account, + m -> + m.withHeader(AUTHORIZATION, equalTo("Bearer abc")) + .withHeader(ACCEPT, equalTo(MediaType.APPLICATION_JSON_VALUE))); + } + + @Test + void downloadWithTokenFromFile( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "zzz".getBytes()); + + BitbucketArtifactAccount account = + BitbucketArtifactAccount.builder() + .name("my-bitbucket-account") + .tokenFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase( + server, + account, + m -> + m.withHeader(AUTHORIZATION, equalTo("Bearer zzz")) + .withHeader(ACCEPT, equalTo(MediaType.APPLICATION_JSON_VALUE))); + } + + @Test + void downloadWithTokenFromFileWithReloadHeaders( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "zzz".getBytes()); + + BitbucketArtifactAccount account = + BitbucketArtifactAccount.builder() + .name("my-bitbucket-account") + .tokenFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase( + server, + account, + m -> + m.withHeader(AUTHORIZATION, equalTo("Bearer zzz")) + .withHeader(ACCEPT, equalTo(MediaType.APPLICATION_JSON_VALUE))); + + Files.write(authFile, "aaa".getBytes()); + + runTestCase( + server, + account, + m -> + m.withHeader(AUTHORIZATION, equalTo("Bearer aaa")) + .withHeader(ACCEPT, equalTo(MediaType.APPLICATION_JSON_VALUE))); + } + + @Test + void downloadWithBasicAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + BitbucketArtifactAccount account = + BitbucketArtifactAccount.builder() + .name("my-bitbucket-account") + .username("user") + .password("passw0rd") + .build(); + + runTestCase(server, account, m -> m.withBasicAuth("user", "passw0rd")); + } + + @Test + void downloadWithBasicAuthFromFile( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "someuser:somepassw0rd!".getBytes()); + + BitbucketArtifactAccount account = + BitbucketArtifactAccount.builder() + .name("my-bitbucket-account") + .usernamePasswordFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase(server, account, m -> m.withBasicAuth("someuser", "somepassw0rd!")); + } + + @Test + void downloadWithNoAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + BitbucketArtifactAccount account = + BitbucketArtifactAccount.builder().name("my-bitbucket-account").build(); + + runTestCase(server, account, m -> m.withHeader(AUTHORIZATION, absent())); + } + + private void runTestCase( + WireMockServer server, + BitbucketArtifactAccount account, + Function expectedAuth) + throws IOException { + BitbucketArtifactCredentials credentials = + new BitbucketArtifactCredentials(account, okHttpClient); + + Artifact artifact = + Artifact.builder() + .reference(server.baseUrl() + DOWNLOAD_PATH) + .version("master") + .type("bitbucket/file") + .build(); + + prepareServer(server, expectedAuth); + + assertThat(credentials.download(artifact)) + .hasSameContentAs(new ByteArrayInputStream(FILE_CONTENTS.getBytes(StandardCharsets.UTF_8))); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + private void prepareServer( + WireMockServer server, Function withAuth) { + server.stubFor( + withAuth.apply( + any(urlPathEqualTo(DOWNLOAD_PATH)).willReturn(aResponse().withBody(FILE_CONTENTS)))); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactAccountTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactAccountTest.java new file mode 100644 index 00000000000..1e081d48dba --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/gitRepo/GitRepoArtifactAccountTest.java @@ -0,0 +1,52 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.gitRepo; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; + +@ExtendWith({TempDirectory.class}) +public class GitRepoArtifactAccountTest { + + @Test + void shouldGetTokenFromFile(@TempDirectory.TempDir Path tempDir) throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "zzz".getBytes()); + + GitRepoArtifactAccount account = + GitRepoArtifactAccount.builder() + .name("gitRepo-account") + .tokenFile(authFile.toAbsolutePath().toString()) + .build(); + + assertThat(account.getTokenAsString().get()).isEqualTo("zzz"); + } + + @Test + void shouldGetTokenFromProperty() { + GitRepoArtifactAccount account = + GitRepoArtifactAccount.builder().name("gitRepo-account").token("tokentoken").build(); + + assertThat(account.getTokenAsString().get()).isEqualTo("tokentoken"); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/github/GithubArtifactCredentialsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/github/GithubArtifactCredentialsTest.java new file mode 100644 index 00000000000..37f57025fa0 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/github/GithubArtifactCredentialsTest.java @@ -0,0 +1,212 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.github; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.client.MappingBuilder; +import com.github.tomakehurst.wiremock.matching.RegexPattern; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.function.Function; +import okhttp3.OkHttpClient; +import org.apache.commons.io.Charsets; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; +import ru.lanwen.wiremock.ext.WiremockResolver; + +@ExtendWith({WiremockResolver.class, TempDirectory.class}) +class GithubArtifactCredentialsTest { + private final ObjectMapper objectMapper = new ObjectMapper(); + private final OkHttpClient okHttpClient = new OkHttpClient(); + + private final String METADATA_PATH = "/repos/spinnaker/testing/manifest.yml"; + private final String FILE_CONTENTS = "file contents"; + + @Test + void downloadWithToken(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + GitHubArtifactAccount account = + GitHubArtifactAccount.builder().name("my-github-account").token("abc").build(); + + runTestCase(server, account, m -> m.withHeader("Authorization", equalTo("token abc"))); + } + + @Test + void downloadWithTokenFromFile( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "zzz".getBytes()); + + GitHubArtifactAccount account = + GitHubArtifactAccount.builder() + .name("my-github-account") + .tokenFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase(server, account, m -> m.withHeader("Authorization", equalTo("token zzz"))); + } + + @Test + void downloadWithTokenFromFileWithReloadHeaders( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "zzz".getBytes()); + + GitHubArtifactAccount account = + GitHubArtifactAccount.builder() + .name("my-github-account") + .tokenFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase(server, account, m -> m.withHeader("Authorization", equalTo("token zzz"))); + + Files.write(authFile, "aaa".getBytes()); + + runTestCase(server, account, m -> m.withHeader("Authorization", equalTo("token aaa"))); + } + + @Test + void downloadWithBasicAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + GitHubArtifactAccount account = + GitHubArtifactAccount.builder() + .name("my-github-account") + .username("user") + .password("passw0rd") + .build(); + + runTestCase(server, account, m -> m.withBasicAuth("user", "passw0rd")); + } + + @Test + void downloadWithBasicAuthFromFile( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "someuser:somepassw0rd!".getBytes()); + + GitHubArtifactAccount account = + GitHubArtifactAccount.builder() + .name("my-github-account") + .usernamePasswordFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase(server, account, m -> m.withBasicAuth("someuser", "somepassw0rd!")); + } + + @Test + void downloadWithNoAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + GitHubArtifactAccount account = + GitHubArtifactAccount.builder().name("my-github-account").build(); + + runTestCase(server, account, m -> m.withHeader("Authorization", absent())); + } + + @Test + void useGitHubAPIs(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + GitHubArtifactAccount account = + GitHubArtifactAccount.builder() + .name("my-github-account") + .token("zzz") + .useContentAPI(true) + .build(); + + runTestCase( + server, + account, + m -> + m.withHeader("Authorization", equalTo("token zzz")) + .withHeader("Accept", equalTo("application/vnd.github.v3.raw"))); + } + + @Test + void useGitHubAPIsSpecificVersion(@WiremockResolver.Wiremock WireMockServer server) + throws IOException { + GitHubArtifactAccount account = + GitHubArtifactAccount.builder() + .name("my-github-account") + .token("zzz") + .useContentAPI(true) + .githubAPIVersion("v10") + .build(); + + runTestCase( + server, + account, + m -> + m.withHeader("Authorization", equalTo("token zzz")) + .withHeader("Accept", equalTo("application/vnd.github.v10.raw"))); + } + + private void runTestCase( + WireMockServer server, + GitHubArtifactAccount account, + Function expectedAuth) + throws IOException { + GitHubArtifactCredentials credentials = + new GitHubArtifactCredentials(account, okHttpClient, objectMapper); + + Artifact artifact = + Artifact.builder() + .reference(server.baseUrl() + METADATA_PATH) + .version("master") + .type("github/file") + .build(); + + prepareServer(server, expectedAuth); + + assertThat(credentials.download(artifact)) + .hasSameContentAs(new ByteArrayInputStream(FILE_CONTENTS.getBytes(Charsets.UTF_8))); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + private void prepareServer( + WireMockServer server, Function withAuth) throws IOException { + final String downloadPath = "/download/spinnaker/testing/master/manifest.yml"; + + GitHubArtifactCredentials.ContentMetadata contentMetadata = + new GitHubArtifactCredentials.ContentMetadata() + .setDownloadUrl(server.baseUrl() + downloadPath); + + server.stubFor( + withAuth.apply( + any(urlPathEqualTo(METADATA_PATH)) + .withQueryParam("ref", equalTo("master")) + .willReturn( + aResponse().withBody(objectMapper.writeValueAsString(contentMetadata))))); + + server.stubFor( + withAuth.apply( + any(urlPathEqualTo(METADATA_PATH)) + .withQueryParam("ref", equalTo("master")) + .withHeader( + "Accept", new RegexPattern("application\\/vnd\\.github\\.v(\\d+)\\.raw")) + .willReturn(aResponse().withBody(FILE_CONTENTS)))); + + server.stubFor( + withAuth.apply( + any(urlPathEqualTo(downloadPath)).willReturn(aResponse().withBody(FILE_CONTENTS)))); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactCredentialsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactCredentialsTest.java new file mode 100644 index 00000000000..e8db60dc508 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/gitlab/GitlabArtifactCredentialsTest.java @@ -0,0 +1,105 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.gitlab; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.client.MappingBuilder; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.function.Function; +import okhttp3.OkHttpClient; +import org.apache.commons.io.Charsets; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; +import ru.lanwen.wiremock.ext.WiremockResolver; + +@ExtendWith({WiremockResolver.class, TempDirectory.class}) +class GitlabArtifactCredentialsTest { + private final ObjectMapper objectMapper = new ObjectMapper(); + private final OkHttpClient okHttpClient = new OkHttpClient(); + + private final String DOWNLOAD_PATH = "/repos/spinnaker/testing/manifest.yml"; + private final String FILE_CONTENTS = "file contents"; + + @Test + void downloadWithToken(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + GitlabArtifactAccount account = + GitlabArtifactAccount.builder().name("my-gitlab-account").token("abc").build(); + + runTestCase(server, account, m -> m.withHeader("Private-Token", equalTo("abc"))); + } + + @Test + void downloadWithTokenFromFile( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "zzz".getBytes()); + + GitlabArtifactAccount account = + GitlabArtifactAccount.builder() + .name("my-gitlab-account") + .tokenFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase(server, account, m -> m.withHeader("Private-Token", equalTo("zzz"))); + } + + @Test + void downloadWithNoAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + GitlabArtifactAccount account = + GitlabArtifactAccount.builder().name("my-gitlab-account").build(); + + runTestCase(server, account, m -> m.withHeader("Authorization", absent())); + } + + private void runTestCase( + WireMockServer server, + GitlabArtifactAccount account, + Function expectedAuth) + throws IOException { + GitlabArtifactCredentials credentials = new GitlabArtifactCredentials(account, okHttpClient); + + Artifact artifact = + Artifact.builder() + .reference(server.baseUrl() + DOWNLOAD_PATH) + .version("master") + .type("gitlab/file") + .build(); + + prepareServer(server, expectedAuth); + + assertThat(credentials.download(artifact)) + .hasSameContentAs(new ByteArrayInputStream(FILE_CONTENTS.getBytes(Charsets.UTF_8))); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + private void prepareServer( + WireMockServer server, Function withAuth) { + server.stubFor( + withAuth.apply( + any(urlPathEqualTo(DOWNLOAD_PATH)).willReturn(aResponse().withBody(FILE_CONTENTS)))); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactCredentialsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactCredentialsTest.java new file mode 100644 index 00000000000..812a42ebe73 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/helm/HelmArtifactCredentialsTest.java @@ -0,0 +1,173 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.helm; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatExceptionOfType; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.client.MappingBuilder; +import com.github.tomakehurst.wiremock.http.Fault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.function.Function; +import okhttp3.OkHttpClient; +import org.apache.commons.io.Charsets; +import org.assertj.core.api.Condition; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; +import ru.lanwen.wiremock.ext.WiremockResolver; + +@ExtendWith({WiremockResolver.class, TempDirectory.class}) +class HelmArtifactCredentialsTest { + private final ObjectMapper objectMapper = new ObjectMapper(); + private final OkHttpClient okHttpClient = new OkHttpClient(); + + private final String REPOSITORY = "my-repository"; + private final String CHART_PATH = "/my-chart/data.tgz"; + private final String CHART_NAME = "my-chart"; + private final String CHART_VERSION = "1.0.0"; + private final String FILE_CONTENTS = "file contents"; + + @Test + void downloadWithBasicAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + HelmArtifactAccount account = + HelmArtifactAccount.builder() + .repository(server.baseUrl() + "/" + REPOSITORY) + .name("my-helm-account") + .username("user") + .password("passw0rd") + .build(); + + runTestCase(server, account, m -> m.withBasicAuth("user", "passw0rd")); + } + + @Test + void downloadWithBasicAuthFromFile( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "someuser:somepassw0rd!".getBytes()); + + HelmArtifactAccount account = + HelmArtifactAccount.builder() + .repository(server.baseUrl() + "/" + REPOSITORY) + .name("my-helm-account") + .usernamePasswordFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase(server, account, m -> m.withBasicAuth("someuser", "somepassw0rd!")); + } + + @Test + void downloadWithNoAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + HelmArtifactAccount account = + HelmArtifactAccount.builder() + .repository(server.baseUrl() + "/" + REPOSITORY) + .name("my-helm-account") + .build(); + + runTestCase(server, account, m -> m.withHeader("Authorization", absent())); + } + + @Test + void getArtifactNamesWithFailure(@WiremockResolver.Wiremock WireMockServer server) + throws IOException { + HelmArtifactAccount account = + HelmArtifactAccount.builder() + .repository(server.baseUrl() + "/" + REPOSITORY) + .name("my-helm-account") + .build(); + + runGetArtifactNamesWithFailureTestCase( + server, account, m -> m.withHeader("Authorization", absent())); + } + + private void runGetArtifactNamesWithFailureTestCase( + WireMockServer server, + HelmArtifactAccount account, + Function expectedAuth) { + HelmArtifactCredentials credentials = new HelmArtifactCredentials(account, okHttpClient); + + final String indexPath = "/" + REPOSITORY + "/index.yaml"; + + server.stubFor( + expectedAuth.apply( + any(urlPathEqualTo(indexPath)) + .willReturn(aResponse().withFault(Fault.CONNECTION_RESET_BY_PEER)))); + + assertThatExceptionOfType(NotFoundException.class) + .isThrownBy(credentials::getArtifactNames) + .has( + new Condition<>( + e -> e.getCause() != null && e.getCause().getCause() != null, "innerException")); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + private void runTestCase( + WireMockServer server, + HelmArtifactAccount account, + Function expectedAuth) + throws IOException { + HelmArtifactCredentials credentials = new HelmArtifactCredentials(account, okHttpClient); + + Artifact artifact = + Artifact.builder().name(CHART_NAME).version(CHART_VERSION).type("helm/chart").build(); + + prepareServer(server, expectedAuth); + + assertThat(credentials.download(artifact)) + .hasSameContentAs(new ByteArrayInputStream(FILE_CONTENTS.getBytes(Charsets.UTF_8))); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + private void prepareServer( + WireMockServer server, Function withAuth) throws IOException { + final String indexPath = "/" + REPOSITORY + "/index.yaml"; + IndexConfig indexConfig = getIndexConfig(server.baseUrl()); + + server.stubFor( + withAuth.apply( + any(urlPathEqualTo(indexPath)) + .willReturn(aResponse().withBody(objectMapper.writeValueAsString(indexConfig))))); + + server.stubFor( + withAuth.apply( + any(urlPathEqualTo(CHART_PATH)).willReturn(aResponse().withBody(FILE_CONTENTS)))); + } + + private IndexConfig getIndexConfig(String baseUrl) { + EntryConfig entryConfig = new EntryConfig(); + entryConfig.setName(CHART_NAME); + entryConfig.setVersion(CHART_VERSION); + entryConfig.setUrls(Collections.singletonList(baseUrl + CHART_PATH)); + + IndexConfig indexConfig = new IndexConfig(); + indexConfig.setEntries( + Collections.singletonMap("my-chart", Collections.singletonList(entryConfig))); + + return indexConfig; + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/helm/IndexParserTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/helm/IndexParserTest.java new file mode 100644 index 00000000000..ca706a8e2ca --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/helm/IndexParserTest.java @@ -0,0 +1,142 @@ +package com.netflix.spinnaker.clouddriver.artifacts.helm; + +import static org.assertj.core.api.Assertions.*; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.List; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +public class IndexParserTest { + + private String buildIndexYaml(String chartName, String version, List urls) { + StringBuilder indexYamlBuilder = new StringBuilder(); + indexYamlBuilder + .append("---\n") + .append("apiVersion: '1.0'\n") + .append("entries:\n") + .append(" ") + .append(chartName) + .append(":\n") + .append(" - name: ") + .append(chartName) + .append("\n") + .append(" version: ") + .append(version) + .append("\n") + .append(" urls:\n"); + urls.forEach(url -> indexYamlBuilder.append(" - ").append(url).append("\n")); + return indexYamlBuilder.toString(); + } + + private String addIndexEntry( + String index, String chartName, String newVersion, List urls) { + StringBuilder indexYamlBuilder = new StringBuilder(index); + indexYamlBuilder + .append(" - name: ") + .append(chartName) + .append("\n") + .append(" version: ") + .append(newVersion) + .append("\n") + .append(" urls:\n"); + urls.forEach(url -> indexYamlBuilder.append(" - ").append(url).append("\n")); + return indexYamlBuilder.toString(); + } + + @Test + public void findUrlsShouldResolveRelativeChartUrls() throws IOException { + IndexParser parser = new IndexParser("http://localhost/test/"); + + String indexYaml = + buildIndexYaml("test-chart1", "0.0.1", Arrays.asList("test-chart1-0.0.1.tgz")); + try (InputStream is = new ByteArrayInputStream(indexYaml.getBytes())) { + List actualUrls = parser.findUrls(is, "test-chart1", "0.0.1"); + assertThat(actualUrls).containsOnly("http://localhost/test/test-chart1-0.0.1.tgz"); + } + } + + @Test + public void findUrlsShouldResolveRelativeChartUrlsIfTrailingSlashMissingFromRepositoryUrl() + throws IOException { + IndexParser parser = new IndexParser("http://localhost/test"); + + String indexYaml = + buildIndexYaml("test-chart1", "0.0.1", Arrays.asList("test-chart1-0.0.1.tgz")); + try (InputStream is = new ByteArrayInputStream(indexYaml.getBytes())) { + List actualUrls = parser.findUrls(is, "test-chart1", "0.0.1"); + assertThat(actualUrls).containsOnly("http://localhost/test/test-chart1-0.0.1.tgz"); + } + } + + @Test + public void findUrlsShouldHandleAbsoluteChartUrls() throws IOException { + IndexParser parser = new IndexParser("http://localhost/test/"); + + String indexYaml = + buildIndexYaml( + "test-chart1", + "0.0.1", + Arrays.asList("https://absolute.url/test/test-chart1-0.0.1.tgz")); + try (InputStream is = new ByteArrayInputStream(indexYaml.getBytes())) { + List actualUrls = parser.findUrls(is, "test-chart1", "0.0.1"); + assertThat(actualUrls).containsOnly("https://absolute.url/test/test-chart1-0.0.1.tgz"); + } + } + + @Test + public void findUrlsShouldHandleMixedChartUrls() throws IOException { + IndexParser parser = new IndexParser("http://localhost/test/"); + + String indexYaml = + buildIndexYaml( + "test-chart1", + "0.0.1", + Arrays.asList( + "https://absolute.url/test/test-chart1-0.0.1.tgz", "test-chart1-0.0.1.tgz")); + try (InputStream is = new ByteArrayInputStream(indexYaml.getBytes())) { + List actualUrls = parser.findUrls(is, "test-chart1", "0.0.1"); + assertThat(actualUrls) + .containsExactlyInAnyOrder( + "https://absolute.url/test/test-chart1-0.0.1.tgz", + "http://localhost/test/test-chart1-0.0.1.tgz"); + } + } + + @ParameterizedTest + @ValueSource(strings = {"1.0.10", "1.0.90"}) + public void findUrlsShouldFindLatestNumericVersion(String maxVersion) throws IOException { + IndexParser parser = new IndexParser("http://localhost/test/"); + + String indexYaml = + buildIndexYaml("test-chart1", "1.0.9", Arrays.asList("test-chart1-1.0.9.tgz")); + indexYaml = + addIndexEntry( + indexYaml, + "test-chart1", + maxVersion, + Arrays.asList("test-chart1-" + maxVersion + ".tgz")); + try (InputStream is = new ByteArrayInputStream(indexYaml.getBytes())) { + List actualUrls = parser.findUrls(is, "test-chart1", null); + assertThat(actualUrls) + .containsOnly("http://localhost/test/test-chart1-" + maxVersion + ".tgz"); + } + } + + @Test + public void findUrlsShouldFindLatestNonNumericVersion() throws IOException { + IndexParser parser = new IndexParser("http://localhost/test/"); + + String indexYaml = buildIndexYaml("test-chart1", "abc", Arrays.asList("test-chart1-abc.tgz")); + indexYaml = + addIndexEntry(indexYaml, "test-chart1", "def", Arrays.asList("test-chart1-def.tgz")); + try (InputStream is = new ByteArrayInputStream(indexYaml.getBytes())) { + List actualUrls = parser.findUrls(is, "test-chart1", null); + assertThat(actualUrls).containsOnly("http://localhost/test/test-chart1-def.tgz"); + } + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactCredentialsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactCredentialsTest.java new file mode 100644 index 00000000000..f6d70b59669 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/http/HttpArtifactCredentialsTest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.http; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.client.MappingBuilder; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.function.Function; +import okhttp3.OkHttpClient; +import org.apache.commons.io.Charsets; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; +import ru.lanwen.wiremock.ext.WiremockResolver; + +@ExtendWith({WiremockResolver.class, TempDirectory.class}) +class HttpArtifactCredentialsTest { + private final OkHttpClient okHttpClient = new OkHttpClient(); + + private final String URL = "/my/file.yaml"; + private final String FILE_CONTENTS = "file contents"; + + @Test + void downloadWithBasicAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + HttpArtifactAccount account = + HttpArtifactAccount.builder() + .name("my-http-account") + .username("user") + .password("passw0rd") + .build(); + + runTestCase(server, account, m -> m.withBasicAuth("user", "passw0rd")); + } + + @Test + void downloadWithBasicAuthFromFile( + @TempDirectory.TempDir Path tempDir, @WiremockResolver.Wiremock WireMockServer server) + throws IOException { + Path authFile = tempDir.resolve("auth-file"); + Files.write(authFile, "someuser:somepassw0rd!".getBytes()); + + HttpArtifactAccount account = + HttpArtifactAccount.builder() + .name("my-http-account") + .usernamePasswordFile(authFile.toAbsolutePath().toString()) + .build(); + + runTestCase(server, account, m -> m.withBasicAuth("someuser", "somepassw0rd!")); + } + + @Test + void downloadWithNoAuth(@WiremockResolver.Wiremock WireMockServer server) throws IOException { + HttpArtifactAccount account = HttpArtifactAccount.builder().name("my-http-account").build(); + + runTestCase(server, account, m -> m.withHeader("Authorization", absent())); + } + + @Test + void throwExceptionOnNonSuccessfulResponse(@WiremockResolver.Wiremock WireMockServer server) { + HttpArtifactAccount account = HttpArtifactAccount.builder().name("my-http-account").build(); + HttpArtifactCredentials credentials = new HttpArtifactCredentials(account, okHttpClient); + Artifact artifact = + Artifact.builder().reference(server.baseUrl() + URL).type("http/file").build(); + server.stubFor(any(urlPathEqualTo(URL)).willReturn(aResponse().withStatus(404))); + + Throwable thrown = catchThrowable(() -> credentials.download(artifact)); + + assertThat(thrown) + .isInstanceOf(IOException.class) + .hasMessageContaining("404") + .hasMessageContaining(server.baseUrl()); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + private void runTestCase( + WireMockServer server, + HttpArtifactAccount account, + Function expectedAuth) + throws IOException { + HttpArtifactCredentials credentials = new HttpArtifactCredentials(account, okHttpClient); + + Artifact artifact = + Artifact.builder().reference(server.baseUrl() + URL).type("http/file").build(); + + prepareServer(server, expectedAuth); + + assertThat(credentials.download(artifact)) + .hasSameContentAs(new ByteArrayInputStream(FILE_CONTENTS.getBytes(Charsets.UTF_8))); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + private void prepareServer( + WireMockServer server, Function withAuth) { + server.stubFor( + withAuth.apply(any(urlPathEqualTo(URL)).willReturn(aResponse().withBody(FILE_CONTENTS)))); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/DiskFreeingInputStreamTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/DiskFreeingInputStreamTest.java new file mode 100644 index 00000000000..0a19ba01088 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/DiskFreeingInputStreamTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.ByteArrayInputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; + +@ExtendWith(TempDirectory.class) +class DiskFreeingInputStreamTest { + @Test + void onlyFreeResourcesOnce(@TempDirectory.TempDir Path tempDir) throws IOException { + Path temp = tempDir.resolve("temp"); + Files.createDirectories(temp); + Path test = temp.resolve("test.txt"); + Files.write(test, "hello world".getBytes()); + FileInputStream fis = new FileInputStream(test.toFile()); + DiskFreeingInputStream dfis = new DiskFreeingInputStream(fis, temp); + assertThat(dfis) + .hasSameContentAs(new ByteArrayInputStream("hello world".getBytes())); // closes once + dfis.close(); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactCredentialsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactCredentialsTest.java new file mode 100644 index 00000000000..0a0c374e50a --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/IvyArtifactCredentialsTest.java @@ -0,0 +1,137 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.netflix.spinnaker.clouddriver.artifacts.ivy.settings.IvySettings; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.apache.commons.io.Charsets; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; +import ru.lanwen.wiremock.ext.WiremockResolver; + +@ExtendWith({WiremockResolver.class, TempDirectory.class}) +class IvyArtifactCredentialsTest { + @Test + void downloadIvyBasedJar( + @WiremockResolver.Wiremock WireMockServer server, @TempDirectory.TempDir Path tempDir) + throws IOException { + server.stubFor( + any(urlEqualTo("/com/test/app/1.0/app-1.0.jar")) + .willReturn(aResponse().withBody("contents"))); + server.stubFor( + any(urlEqualTo("/com/test/app/1.0/app-1.0.xml")) + .willReturn( + aResponse() + .withBody( + "\n" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + ""))); + + String ivySettingsXml = + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + ""; + + assertDownloadArtifact(tempDir, ivySettingsXml); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + @Test + void downloadMavenBasedJarFromIvySettings( + @WiremockResolver.Wiremock WireMockServer server, @TempDirectory.TempDir Path tempDir) + throws IOException { + server.stubFor( + any(urlEqualTo("/com/test/app/1.0/app-1.0.jar")) + .willReturn(aResponse().withBody("contents"))); + + // only HEAD requests, should not be downloaded + server.stubFor( + head(urlEqualTo("/com/test/app/1.0/app-1.0-sources.jar")) + .willReturn(aResponse().withBody("contents"))); + server.stubFor( + head(urlEqualTo("/com/test/app/1.0/app-1.0-javadoc.jar")) + .willReturn(aResponse().withBody("contents"))); + + server.stubFor( + any(urlEqualTo("/com/test/app/1.0/app-1.0.pom")) + .willReturn( + aResponse() + .withBody( + "\n" + + " 4.0.0\n" + + " com.test\n" + + " app\n" + + " 1.0\n" + + ""))); + + String ivySettingsXml = + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + ""; + + assertDownloadArtifact(tempDir, ivySettingsXml); + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } + + private void assertDownloadArtifact(@TempDirectory.TempDir Path tempDir, String ivySettingsXml) + throws IOException { + IvyArtifactAccount account = + IvyArtifactAccount.builder().settings(IvySettings.parse(ivySettingsXml)).build(); + + Path cache = tempDir.resolve("cache"); + Files.createDirectories(cache); + + Artifact artifact = Artifact.builder().reference("com.test:app:1.0").build(); + + assertThat(new IvyArtifactCredentials(account, () -> cache).download(artifact)) + .hasSameContentAs(new ByteArrayInputStream("contents".getBytes(Charsets.UTF_8))); + assertThat(cache).doesNotExist(); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/ChainResolverTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/ChainResolverTest.java new file mode 100644 index 00000000000..b1983b5adb4 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/ChainResolverTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import static org.assertj.core.api.Java6Assertions.assertThat; + +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +class ChainResolverTest { + @Test + void resolversAreUnwrapped() throws IOException { + ChainResolver chainResolver = + new XmlMapper() + .readValue( + "\n" + + " \n" + + "", + ChainResolver.class); + + assertThat(chainResolver.getResolvers().getIbiblio()).hasSize(1); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IvySettingsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IvySettingsTest.java new file mode 100644 index 00000000000..6c593ffe0a9 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/IvySettingsTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collections; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junitpioneer.jupiter.TempDirectory; + +@ExtendWith(TempDirectory.class) +class IvySettingsTest { + @Test + void minimalJCenterSettings(@TempDirectory.TempDir Path tempDir) { + BintrayResolver bintray = new BintrayResolver(); + bintray.setName("jcenter"); + + Resolvers resolvers = new Resolvers(); + resolvers.setBintray(Collections.singletonList(bintray)); + + IvySettings settings = new IvySettings(); + settings.setResolvers(resolvers); + + settings.toIvy(tempDir); + } + + @Test + void parseIvySettingsGeneratedForMavenRepositoryInArtifactory( + @TempDirectory.TempDir Path tempDir) { + String ivySettingsXml = + "\n" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + ""; + + IvySettings settings = IvySettings.parse(ivySettingsXml); + settings.toIvy(tempDir); + + assertThat(settings.getSettings().getDefaultResolver()).isEqualTo("main"); + } + + @Test + void atLeastOneResolverIsRequired() { + IvySettings settings = new IvySettings(); + assertThatThrownBy(() -> settings.toIvySettings(Paths.get("./"))) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/SshResolverTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/SshResolverTest.java new file mode 100644 index 00000000000..96075c976a4 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/SshResolverTest.java @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +class SshResolverTest { + @Test + void patternsAreDeserialized() throws IOException { + SshResolver sshResolver = + new XmlMapper() + .readValue( + "\n" + + " \n" + + " \n" + + "", + SshResolver.class); + + assertThat(sshResolver.getIvy()).hasSize(1); + assertThat(sshResolver.getArtifact()).hasSize(1); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/UrlResolverTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/UrlResolverTest.java new file mode 100644 index 00000000000..365b09c953a --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/ivy/settings/UrlResolverTest.java @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.ivy.settings; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +class UrlResolverTest { + @Test + void patternsAreDeserialized() throws IOException { + UrlResolver urlResolver = + new XmlMapper() + .readValue( + "\n" + + " \n" + + " \n" + + "", + UrlResolver.class); + + assertThat(urlResolver.getIvy()).hasSize(1); + assertThat(urlResolver.getArtifact()).hasSize(1); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactCredentialsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactCredentialsTest.java new file mode 100644 index 00000000000..ae32edf9a52 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/maven/MavenArtifactCredentialsTest.java @@ -0,0 +1,187 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.maven; + +import static com.github.tomakehurst.wiremock.client.WireMock.*; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import javax.annotation.Nullable; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import ru.lanwen.wiremock.ext.WiremockResolver; + +@ExtendWith(WiremockResolver.class) +class MavenArtifactCredentialsTest { + @Test + void release(@WiremockResolver.Wiremock WireMockServer server) { + server.stubFor( + any(urlPathMatching("/com/test/app/maven-metadata.xml")) + .willReturn( + aResponse() + .withBody( + "\n" + + "\n" + + " com.test\n" + + " app\n" + + " 1.1\n" + + " \n" + + " 1.1\n" + + " 1.1\n" + + " \n" + + " 1.0\n" + + " 1.1\n" + + " \n" + + " 20190322061505\n" + + " \n" + + ""))); + + assertResolvable(server, "latest.release", "1.1"); + assertResolvable(server, "RELEASE", "1.1"); + assertResolvable(server, "LATEST", "1.1"); + assertResolvable(server, "1.1", "1.1"); + assertResolvable(server, "1.0", "1.0"); + } + + @Test + void snapshot(@WiremockResolver.Wiremock WireMockServer server) { + server.stubFor( + any(urlPathMatching("/com/test/app/maven-metadata.xml")) + .willReturn( + aResponse() + .withBody( + "\n" + + "\n" + + " com.test\n" + + " app\n" + + " 1.1-SNAPSHOT\n" + + " \n" + + " 1.1-SNAPSHOT\n" + + " \n" + + " 1.0-SNAPSHOT\n" + + " 1.1-SNAPSHOT\n" + + " \n" + + " 20190322061505\n" + + " \n" + + ""))); + + server.stubFor( + any(urlPathMatching("/com/test/app/1.1-SNAPSHOT/maven-metadata.xml")) + .willReturn( + aResponse() + .withBody( + "\n" + + "\n" + + " com.test\n" + + " app\n" + + " 1.1-SNAPSHOT\n" + + " \n" + + " \n" + + " 20190322.061344\n" + + " 90\n" + + " \n" + + " 20190322061504\n" + + " \n" + + " \n" + + " sources\n" + + " jar\n" + + " 1.1-20190322.061344-90\n" + + " 20190322061344\n" + + " \n" + + " \n" + + " jar\n" + + " 1.1-20190322.061344-90\n" + + " 20190322061344\n" + + " \n" + + " \n" + + " pom\n" + + " 1.1-20190322.061344-90\n" + + " 20190322061344\n" + + " \n" + + " \n" + + " \n" + + ""))); + + assertResolvable(server, "latest.integration", "1.1-20190322.061344-90", "1.1-SNAPSHOT"); + assertResolvable(server, "LATEST", "1.1-20190322.061344-90", "1.1-SNAPSHOT"); + assertResolvable(server, "SNAPSHOT", "1.1-20190322.061344-90", "1.1-SNAPSHOT"); + assertResolvable(server, "1.1-SNAPSHOT", "1.1-20190322.061344-90", "1.1-SNAPSHOT"); + } + + @Test + void rangeVersion(@WiremockResolver.Wiremock WireMockServer server) { + server.stubFor( + any(urlPathMatching("/com/test/app/maven-metadata.xml")) + .willReturn( + aResponse() + .withBody( + "\n" + + "\n" + + " com.test\n" + + " app\n" + + " 2.0\n" + + " \n" + + " 2.0\n" + + " 2.0\n" + + " \n" + + " 1.0\n" + + " 1.1\n" + + " 2.0\n" + + " \n" + + " 20190322061505\n" + + " \n" + + ""))); + + assertResolvable(server, "[1.0,)", "2.0"); + assertResolvable(server, "[1.0,2.0)", "1.1"); + assertResolvable(server, "(,2.0]", "2.0"); + } + + private void assertResolvable(WireMockServer server, String version, String expectedVersion) { + assertResolvable(server, version, expectedVersion, null); + } + + private void assertResolvable( + WireMockServer server, + String version, + String expectedVersion, + @Nullable String expectedSnapshotVersion) { + String jarUrl = + "/com/test/app/" + + (expectedSnapshotVersion == null ? expectedVersion : expectedSnapshotVersion) + + "/app-" + + expectedVersion + + ".jar"; + + server.stubFor(any(urlEqualTo(jarUrl)).willReturn(aResponse().withBody(expectedVersion))); + + MavenArtifactAccount account = + MavenArtifactAccount.builder().repositoryUrl(server.baseUrl()).build(); + + Artifact artifact = Artifact.builder().reference("com.test:app:" + version).build(); + + assertThat(new MavenArtifactCredentials(account, new OkHttpClient()).download(artifact)) + .hasSameContentAs( + new ByteArrayInputStream(expectedVersion.getBytes(StandardCharsets.UTF_8))); + + assertThat(server.findUnmatchedRequests().getRequests()).isEmpty(); + } +} diff --git a/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactCredentialsTest.java b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactCredentialsTest.java new file mode 100644 index 00000000000..ed7270c72e1 --- /dev/null +++ b/clouddriver-artifacts/src/test/java/com/netflix/spinnaker/clouddriver/artifacts/s3/S3ArtifactCredentialsTest.java @@ -0,0 +1,220 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.artifacts.s3; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.testcontainers.containers.localstack.LocalStackContainer.Service.S3; + +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.model.AmazonS3Exception; +import com.amazonaws.services.s3.model.S3Object; +import com.netflix.spectator.api.Counter; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.Functions; +import com.netflix.spectator.api.Gauge; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Tag; +import com.netflix.spectator.aws.SpectatorRequestMetricCollector; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.kork.aws.AwsComponents; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Optional; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit.jupiter.SpringExtension; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.localstack.LocalStackContainer; +import org.testcontainers.utility.DockerImageName; + +@ExtendWith(SpringExtension.class) +// Include AwsComponents for the SpectatorMetricCollector bean +@ContextConfiguration( + classes = {AwsComponents.class, S3ArtifactCredentialsTest.TestConfiguration.class}) +class S3ArtifactCredentialsTest { + + private static DockerImageName localstackImage = + DockerImageName.parse( + "localstack/localstack:0.12.18"); // 0.12.18 is the latest as of 1-oct-21 + + private static LocalStackContainer localstack = + DockerClientFactory.instance().isDockerAvailable() + ? new LocalStackContainer(localstackImage).withServices(S3) + : null; + + private static AmazonS3 amazonS3; + + private static final S3ArtifactAccount account = + S3ArtifactAccount.builder().name("my-s3-account").build(); + + private static final String BUCKET_NAME = "my-bucket"; + + private static final String KEY_NAME = "my-file"; + + private static final String CONTENTS = "arbitrary file contents"; + + private final Artifact artifact = + Artifact.builder() + .name("my-s3-artifact") + .reference("s3://" + BUCKET_NAME + "/" + KEY_NAME) + .build(); + + private final S3ArtifactProviderProperties s3ArtifactProviderProperties = + new S3ArtifactProviderProperties(); + + @Autowired Registry registry; + + @BeforeAll + static void setupOnce() { + assumeTrue(DockerClientFactory.instance().isDockerAvailable()); + localstack.start(); + amazonS3 = + AmazonS3ClientBuilder.standard() + .withEndpointConfiguration( + new AwsClientBuilder.EndpointConfiguration( + localstack.getEndpoint().toString(), localstack.getRegion())) + .withCredentials( + new AWSStaticCredentialsProvider( + new BasicAWSCredentials(localstack.getAccessKey(), localstack.getSecretKey()))) + .withRequestHandlers( + new S3ArtifactCredentials.S3ArtifactRequestHandler(account.getName())) + .build(); + + // Create a bucket so there's a place to retrieve from + amazonS3.createBucket(BUCKET_NAME); + + // Create a file so there's something to download + amazonS3.putObject(BUCKET_NAME, KEY_NAME, CONTENTS); + } + + @Test + void normalDownload() throws IOException { + S3ArtifactCredentials s3ArtifactCredentials = + new S3ArtifactCredentials(account, amazonS3, s3ArtifactProviderProperties); + try (InputStream artifactStream = s3ArtifactCredentials.download(artifact)) { + String actual = new String(artifactStream.readAllBytes(), StandardCharsets.UTF_8); + assertEquals(CONTENTS, actual); + } + + // Verify that metrics were reported + assertThat(registry.counters()).hasSize(1); + Counter counter = registry.counters().findFirst().orElseThrow(AssertionError::new); + assertThat(counter.id().name()).isEqualTo("aws.request.requestCount"); + assertThat(counter.id().tags()).contains(Tag.of("serviceName", "Amazon S3")); + assertThat(counter.actualCount()).isEqualTo(1); + assertThat(registry.gauges()).hasSize(3); + + // Verify that the tag that comes from the request handler is present on an + // arbitrary gauge + Gauge gauge = + registry + .gauges() + .filter(Functions.nameEquals("aws.request.httpClientPoolAvailableCount")) + .findFirst() + .orElseThrow(AssertionError::new); + assertThat(gauge.id().tags()) + .contains( + Tag.of( + SpectatorRequestMetricCollector.DEFAULT_HANDLER_CONTEXT_KEY.getName(), + account.getName())); + } + + @Test + void normalDownloadWithValidation() throws IOException { + S3ArtifactValidator s3ArtifactValidator = spy(DummyS3ArtifactValidator.class); + S3ArtifactCredentials s3ArtifactCredentials = + new S3ArtifactCredentials( + account, Optional.of(s3ArtifactValidator), amazonS3, s3ArtifactProviderProperties); + try (InputStream artifactStream = s3ArtifactCredentials.download(artifact)) { + String actual = new String(artifactStream.readAllBytes(), StandardCharsets.UTF_8); + assertEquals(CONTENTS, actual); + } + + ArgumentCaptor s3ObjectCaptor = ArgumentCaptor.forClass(S3Object.class); + verify(s3ArtifactValidator).validate(eq(amazonS3), s3ObjectCaptor.capture()); + + assertEquals(BUCKET_NAME, s3ObjectCaptor.getValue().getBucketName()); + assertEquals(KEY_NAME, s3ObjectCaptor.getValue().getKey()); + } + + @Test + void invalidReference() { + Artifact otherArtifact = + Artifact.builder().name("invalid-reference").reference("no-s3-prefix").build(); + S3ArtifactCredentials s3ArtifactCredentials = + new S3ArtifactCredentials(account, amazonS3, s3ArtifactProviderProperties); + assertThatThrownBy(() -> s3ArtifactCredentials.download(otherArtifact)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + void bucketDoesNotExist() { + Artifact otherArtifact = + Artifact.builder() + .name("does-not-exist-artifact") + .reference("s3://does-not-exist/foo") + .build(); + S3ArtifactCredentials s3ArtifactCredentials = + new S3ArtifactCredentials(account, amazonS3, s3ArtifactProviderProperties); + assertThatThrownBy(() -> s3ArtifactCredentials.download(otherArtifact)) + .isInstanceOf(AmazonS3Exception.class) + .hasMessageContaining("The specified bucket does not exist"); + } + + @Test + void fileNotFound() { + String bucketName = "s3://" + BUCKET_NAME + "/does-not-exist"; + Artifact otherArtifact = + Artifact.builder().name("file-not-found-artifact").reference(bucketName).build(); + S3ArtifactCredentials s3ArtifactCredentials = + new S3ArtifactCredentials(account, amazonS3, s3ArtifactProviderProperties); + assertThatThrownBy(() -> s3ArtifactCredentials.download(otherArtifact)) + .isInstanceOf(NotFoundException.class) + .hasMessageContaining(bucketName + " not found"); + } + + static class DummyS3ArtifactValidator implements S3ArtifactValidator { + @Override + public InputStream validate(AmazonS3 amazonS3, S3Object s3obj) { + return s3obj.getObjectContent(); + } + } + + static class TestConfiguration { + @Bean + Registry registry() { + return new DefaultRegistry(); + } + } +} diff --git a/clouddriver-artifacts/src/test/resources/logback.xml b/clouddriver-artifacts/src/test/resources/logback.xml new file mode 100644 index 00000000000..60077bbaf0d --- /dev/null +++ b/clouddriver-artifacts/src/test/resources/logback.xml @@ -0,0 +1,37 @@ + + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + + + + \ No newline at end of file diff --git a/clouddriver-aws/README.md b/clouddriver-aws/README.md index f90cf619e90..04798189e2c 100644 --- a/clouddriver-aws/README.md +++ b/clouddriver-aws/README.md @@ -8,8 +8,8 @@ account. In the managing account, the AWS credentials need to have an IAM policy that grants the ability to STS assumeRole to the target accounts. In each managed account there needs to exist an IAM role that provides access to -all the AWS operations that Spinnaker perform against that account. That IAM role -as to have a trust relationship to the IAM ARN in the managing account (either a +all the AWS operations that Spinnaker performs against that account. That IAM role +has to have a trust relationship to the IAM ARN in the managing account (either a user ARN if connecting with an access key, or a role ARN if authenticating with an EC2 instance profile). diff --git a/clouddriver-aws/UserData.md b/clouddriver-aws/UserData.md index 8d818114730..0fcc465ad6f 100644 --- a/clouddriver-aws/UserData.md +++ b/clouddriver-aws/UserData.md @@ -13,10 +13,10 @@ The location of the template file is controlled by the `udf.udfRoot` property an ````yaml udf: udfRoot: /apps/nflx-udf - defaultLegacyUdf: true + defaultLegacyUdf: false ```` -You almost certainly want to change `udf.defaultLegacyUdf=false`, and possibly want to change the location on the filesystem where the template file lives to suit your deployment. +As of Spinnaker 1.15, `udf.defaultLegacyUdf` defaults to false. You almost certainly want to change the location on the filesystem where the template file lives to suit your deployment. ## Template file @@ -68,4 +68,3 @@ EC2_REGION="us-east-1" # Customizing user data per deploy The AWS create/clone server group operations support a `base64UserData` attribute which is appended to any existing template to allow any custom user data to be injected during a deployment. - diff --git a/clouddriver-aws/clouddriver-aws.gradle b/clouddriver-aws/clouddriver-aws.gradle index 5609c9f0125..e3c9306d498 100644 --- a/clouddriver-aws/clouddriver-aws.gradle +++ b/clouddriver-aws/clouddriver-aws.gradle @@ -1,20 +1,93 @@ +plugins { + id 'com.adarshr.test-logger' version '2.1.0' +} + +sourceSets { + integration { + java.srcDirs = ["src/integration/java"] + resources.srcDirs = ["src/integration/resources"] + compileClasspath += main.output + test.output + } +} + +configurations { + integrationImplementation.extendsFrom testImplementation + integrationRuntime.extendsFrom testRuntime +} + dependencies { - compile project(":clouddriver-core") - compile project(":clouddriver-eureka") - - spinnaker.group('amazon') - spinnaker.group('retrofitDefault') - - compile spinnaker.dependency('kork') - compile spinnaker.dependency('korkExceptions') - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') - compile spinnaker.dependency('frigga') - compile spinnaker.dependency('rxJava') - compile spinnaker.dependency('httpclient') - compile spinnaker.dependency('guava') - - compile 'com.aestasit.infrastructure.sshoogr:sshoogr:0.9.25' - compile 'com.jcraft:jsch.agentproxy.jsch:0.0.9' - compile 'com.jcraft:jsch.agentproxy.connector-factory:0.0.9' + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-configserver") + implementation project(":clouddriver-core") + implementation project(":clouddriver-eureka") + implementation project(":clouddriver-security") + implementation project(":clouddriver-saga") + + implementation "javax.inject:javax.inject:1" + implementation "com.amazonaws:aws-java-sdk" + implementation "com.github.ben-manes.caffeine:guava" + implementation "com.netflix.awsobjectmapper:awsobjectmapper" + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-cloud-config-server" + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-aws" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-secrets" + implementation "io.spinnaker.kork:kork-security" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-moniker" + implementation "io.spinnaker.kork:kork-retrofit" + implementation "com.squareup.retrofit:converter-jackson" + implementation "com.squareup.retrofit:retrofit" + implementation "io.reactivex:rxjava" + implementation "org.apache.httpcomponents:httpclient" + implementation "org.apache.httpcomponents:httpcore" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation 'com.aestasit.infrastructure.sshoogr:sshoogr:0.9.25' + implementation 'com.jcraft:jsch.agentproxy.connector-factory:0.0.9' + implementation 'com.jcraft:jsch.agentproxy.jsch:0.0.9' + implementation "com.github.wnameless.json:json-flattener:0.11.1" + + testImplementation "io.spinnaker.kork:kork-exceptions" + testImplementation "cglib:cglib-nodep" + testImplementation "com.natpryce:hamkrest" + testImplementation "com.google.guava:guava" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework.boot:spring-boot-starter-test" + testImplementation "org.springframework:spring-test" + + integrationImplementation project(":clouddriver-web") + integrationImplementation "org.springframework:spring-test" + integrationImplementation sourceSets.test.output + integrationImplementation sourceSets.main.output + integrationImplementation "io.rest-assured:rest-assured" +} + +task integrationTest(type: Test) { + description = "Runs AWS EC2 provider integration tests." + group = 'verification' + + environment "PROJECT_ROOT", project.rootDir.toString() + useJUnitPlatform() + + testClassesDirs = sourceSets.integration.output.classesDirs + classpath = sourceSets.integration.runtimeClasspath + shouldRunAfter test + maxParallelForks = 4 + + minHeapSize = "512m" + maxHeapSize = "${testJvmMaxMemory}" + + testlogger { + theme 'mocha' + showFailedStandardStreams true + } } diff --git a/clouddriver-aws/src/integration/README.md b/clouddriver-aws/src/integration/README.md new file mode 100644 index 00000000000..7886d6ff5a3 --- /dev/null +++ b/clouddriver-aws/src/integration/README.md @@ -0,0 +1,29 @@ +# AWS EC2 Integration tests + +## Running the tests + +To manually run the gradle task: +```bash +$> ./gradlew :clouddriver-aws:integrationTest +``` + +## Guidance for modifying this package + +### When to add a new test + +Existing or new AWS EC2 provider features of significant scope should include integration tests. + +Examples of qualifying changes include (but are not limited to): +* Changes to validations +* New or significant changes to an existing atomic operation +* Addition of new AWS launch templates features +* Addition of new AWS Autoscaling features +* Complex refactoring of code areas not included in integration tests + +### Changing existing tests + +In general, existing test cases should function as-is after new contributions to ensure existing features continue to function as expected. +Possible exceptions to this guidance may include: + +* Updates to internal implementation details (required `@Beans`, etc.) that don't effect operation success or API response content +* Adding and asserting on *new* data in a `clouddriver` API response diff --git a/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/AwsBaseSpec.java b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/AwsBaseSpec.java new file mode 100644 index 00000000000..1c948161f12 --- /dev/null +++ b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/AwsBaseSpec.java @@ -0,0 +1,129 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws; + +import static io.restassured.RestAssured.get; +import static org.junit.jupiter.api.Assertions.fail; + +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.Main; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import io.restassured.http.ContentType; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BooleanSupplier; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.mock.mockito.MockBean; +import org.springframework.boot.web.server.LocalServerPort; +import org.springframework.context.annotation.Import; +import org.springframework.test.context.TestPropertySource; + +/** Base class with common config and helper methods for test classes */ +@Import(AwsTestConfiguration.class) +@SpringBootTest( + classes = {Main.class}, + webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) +@TestPropertySource(properties = {"spring.config.location = classpath:clouddriver.yml"}) +public abstract class AwsBaseSpec { + + @Value("${aws.primaryAccount}") + protected String AWS_ACCOUNT_NAME; + + @Value("${aws.enabled}") + protected Boolean AWS_ENABLED; + + @MockBean protected AmazonClientProvider mockAwsClientProvider; + @MockBean protected RegionScopedProviderFactory.RegionScopedProvider mockRegionScopedProvider; + @MockBean protected Front50Service mockFront50Service; + + @LocalServerPort int port; + + protected final int TASK_RETRY_SECONDS = 3; + + protected static final String PATH_PREFIX = "classpath:testinputs/"; + protected static final String GET_TASK_PATH = "/task/"; + protected static final String EXPECTED_DEPLOY_SUCCESS_MSG = "Deployed EC2 server group"; + + protected static final String CREATE_SERVER_GROUP_OP_PATH = "/aws/ops/createServerGroup"; + protected static final String UPDATE_LAUNCH_TEMPLATE_OP_PATH = "/aws/ops/updateLaunchTemplate"; + + protected String getBaseUrl() { + return "http://localhost:" + port; + } + + protected void retryUntilTrue(BooleanSupplier func, String failMsg, int retrySeconds) + throws InterruptedException { + for (int i = 0; i < retrySeconds; i++) { + if (!func.getAsBoolean()) { + Thread.sleep(1000); + } else { + return; + } + } + fail(failMsg); + } + + protected String getTaskUpdatesAfterCompletion(String taskId) throws InterruptedException { + AtomicReference taskHistoryToRet = new AtomicReference<>(); + retryUntilTrue( + () -> { + List taskHistory = + get(getBaseUrl() + GET_TASK_PATH + taskId) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + + // try until the response indicates that orchestration has completed or failed + if (!taskHistory.toString().contains("Orchestration completed") + && !taskHistory.toString().contains("Orchestration failed")) { + return false; + } + + taskHistoryToRet.set(taskHistory.toString()); + return true; + }, + String.format( + "Failed to retrieve all task updates from task response in %s seconds.", + TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + return taskHistoryToRet.get(); + } + + protected DefaultCacheResult buildCacheResult( + Map attributes, String namespace, String key) { + Collection dataPoints = new LinkedList<>(); + dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); + + Map> dataMap = new HashMap<>(); + dataMap.put(namespace, dataPoints); + + return new DefaultCacheResult(dataMap); + } +} diff --git a/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/AwsTestConfiguration.java b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/AwsTestConfiguration.java new file mode 100644 index 00000000000..385b58f4f7c --- /dev/null +++ b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/AwsTestConfiguration.java @@ -0,0 +1,70 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; + +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.CredentialsParser; +import org.mockito.stubbing.Answer; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Primary; +import org.springframework.test.context.TestPropertySource; + +@TestConfiguration +@TestPropertySource(properties = {"spring.config.location = classpath:clouddriver.yml"}) +public class AwsTestConfiguration { + + @Value("${aws.primaryAccount}") + private String AWS_ACCOUNT_NAME; + + @Bean + @Primary + public CompositeCredentialsRepository compositeCredentialsRepository() { + NetflixAmazonCredentials awsCreds = TestCredential.named(AWS_ACCOUNT_NAME); + CompositeCredentialsRepository repo = + mock(CompositeCredentialsRepository.class); + when(repo.getCredentials(eq(AWS_ACCOUNT_NAME), eq("aws"))).thenReturn(awsCreds); + when(repo.getFirstCredentialsWithName(AWS_ACCOUNT_NAME)).thenReturn(awsCreds); + return repo; + } + + @Bean + @Primary + public CredentialsParser amazonCredentialsParser() { + CredentialsParser parser = mock(CredentialsParser.class, withSettings().verboseLogging()); + when(parser.parse(any())) + .thenAnswer( + (Answer) + invocation -> { + AccountsConfiguration.Account account = + invocation.getArgument(0, AccountsConfiguration.Account.class); + return TestCredential.named(account.getName()); + }); + return parser; + } +} diff --git a/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupLaunchConfigEnabledSpec.java b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupLaunchConfigEnabledSpec.java new file mode 100644 index 00000000000..0057622bc6a --- /dev/null +++ b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupLaunchConfigEnabledSpec.java @@ -0,0 +1,288 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.test; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest; +import com.amazonaws.services.autoscaling.model.CreateLaunchConfigurationRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult; +import com.amazonaws.services.autoscaling.model.DescribeLaunchConfigurationsRequest; +import com.amazonaws.services.autoscaling.model.DescribeLaunchConfigurationsResult; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.DescribeAddressesResult; +import com.amazonaws.services.ec2.model.DescribeImagesRequest; +import com.amazonaws.services.ec2.model.DescribeImagesResult; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesRequest; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult; +import com.amazonaws.services.ec2.model.DescribeInstancesRequest; +import com.amazonaws.services.ec2.model.DescribeInstancesResult; +import com.amazonaws.services.ec2.model.DescribeKeyPairsResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; +import com.amazonaws.services.ec2.model.DescribeSubnetsResult; +import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult; +import com.amazonaws.services.ec2.model.DescribeVpcsResult; +import com.amazonaws.services.ec2.model.Image; +import com.amazonaws.services.ec2.model.InstanceTypeInfo; +import com.amazonaws.services.ec2.model.ProcessorInfo; +import com.amazonaws.services.ec2.model.SecurityGroup; +import com.amazonaws.services.ec2.model.Subnet; +import com.amazonaws.services.ec2.model.Tag; +import com.netflix.spinnaker.clouddriver.aws.AwsBaseSpec; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.utils.TestUtils; +import io.restassured.http.ContentType; +import java.util.Arrays; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.springframework.beans.factory.annotation.Value; + +/** + * Test class with launch configuration settings enabled in clouddriver.yml, for CreateServerGroup + * operation. + */ +public class CreateServerGroupLaunchConfigEnabledSpec extends AwsBaseSpec { + + @Value("${aws.features.launch-templates:#{null}}") + Boolean AWS_LAUNCH_TEMPLATES; + + private final String EXPECTED_SERVER_GROUP_NAME = "myAwsApp-myStack-v000"; + private final String EXPECTED_LAUNCH_CONFIG_NAME = + EXPECTED_SERVER_GROUP_NAME + "-"; // partial name without the timestamp part + private final String EXPECTED_DEPLOY_WITH_LC_MSG_FMT = + "Deploying ASG %s with launch configuration %s"; + + private AmazonAutoScaling mockAutoScaling = mock(AmazonAutoScaling.class); + private AmazonEC2 mockEc2 = mock(AmazonEC2.class); + + @BeforeEach + public void setup() { + + // mock EC2 responses + when(mockRegionScopedProvider.getAmazonEC2()).thenReturn(mockEc2); + when(mockAwsClientProvider.getAmazonEC2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockEc2); + + when(mockEc2.describeSecurityGroups(any(DescribeSecurityGroupsRequest.class))) + .thenReturn( + new DescribeSecurityGroupsResult() + .withSecurityGroups( + new SecurityGroup().withGroupId("sg-123").withGroupName("myAwsApp"))); + when(mockEc2.describeVpcClassicLink()).thenReturn(new DescribeVpcClassicLinkResult()); + when(mockEc2.describeAddresses()).thenReturn(new DescribeAddressesResult()); + when(mockEc2.describeVpcs()).thenReturn(new DescribeVpcsResult()); + when(mockEc2.describeKeyPairs()).thenReturn(new DescribeKeyPairsResult()); + when(mockEc2.describeInstances(any(DescribeInstancesRequest.class))) + .thenReturn(new DescribeInstancesResult()); + when(mockEc2.describeImages(any(DescribeImagesRequest.class))) + .thenReturn( + new DescribeImagesResult() + .withImages( + new Image() + .withImageId("ami-12345") + .withVirtualizationType("hvm") + .withArchitecture("x86_64"))); + when(mockEc2.describeInstanceTypes(any(DescribeInstanceTypesRequest.class))) + .thenReturn( + new DescribeInstanceTypesResult() + .withInstanceTypes( + new InstanceTypeInfo() + .withInstanceType("c3.large") + .withProcessorInfo( + new ProcessorInfo().withSupportedArchitectures("i386", "x86_64")) + .withSupportedVirtualizationTypes(Arrays.asList("hvm", "paravirtual")))); + when(mockEc2.describeSubnets()) + .thenReturn( + new DescribeSubnetsResult() + .withSubnets( + Arrays.asList( + new Subnet() + .withSubnetId("subnetId1") + .withAvailabilityZone("us-west-1a") + .withTags( + new Tag() + .withKey("immutable_metadata") + .withValue( + "{\"purpose\": \"internal\", \"target\": \"ec2\" }")), + new Subnet() + .withSubnetId("subnetId2") + .withAvailabilityZone("us-west-2a")))); + + // mock autoscaling response + when(mockAwsClientProvider.getAutoScaling(any(NetflixAmazonCredentials.class), anyString())) + .thenReturn(mockAutoScaling); + when(mockAwsClientProvider.getAutoScaling( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockAutoScaling); + + when(mockAutoScaling.describeAutoScalingGroups(any(DescribeAutoScalingGroupsRequest.class))) + .thenReturn(new DescribeAutoScalingGroupsResult()); + when(mockAutoScaling.describeLaunchConfigurations( + any(DescribeLaunchConfigurationsRequest.class))) + .thenReturn(new DescribeLaunchConfigurationsResult()); + } + + @DisplayName("Assert AWS is enabled and launch template features are disabled") + @Test + public void configTest() { + assertTrue(AWS_ENABLED); + assertEquals("aws-account1", AWS_ACCOUNT_NAME); + assertNull(AWS_LAUNCH_TEMPLATES); // assert that launch template config is absent / disabled + } + + @DisplayName( + "Given request with launch configuration and default settings with EC2 on-demand, " + + "successfully submit createServerGroup operation with requested configuration") + @Test + public void createServerGroup_defaultSettings_expect_launchConfiguration() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("tags", Map.of("testPurpose", "testing default settings")) + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + String.format( + EXPECTED_DEPLOY_WITH_LC_MSG_FMT, + EXPECTED_SERVER_GROUP_NAME, + EXPECTED_LAUNCH_CONFIG_NAME))); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // capture and assert arguments + ArgumentCaptor createLaunchConfigArgs = + ArgumentCaptor.forClass(CreateLaunchConfigurationRequest.class); + verify(mockAutoScaling).createLaunchConfiguration(createLaunchConfigArgs.capture()); + CreateLaunchConfigurationRequest createLcReq = createLaunchConfigArgs.getValue(); + + assertTrue(createLcReq.getLaunchConfigurationName().contains(EXPECTED_LAUNCH_CONFIG_NAME)); + assertEquals("ami-12345", createLcReq.getImageId()); + assertEquals("c3.large", createLcReq.getInstanceType()); + assertEquals(1, createLcReq.getSecurityGroups().size()); + assertEquals("sg-123", createLcReq.getSecurityGroups().get(0)); + + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + + assertTrue(createAsgReq.getLaunchConfigurationName().contains(EXPECTED_LAUNCH_CONFIG_NAME)); + assertEquals(1, createAsgReq.getTags().size()); + assertEquals("testPurpose", createAsgReq.getTags().get(0).getKey()); + assertEquals("testing default settings", createAsgReq.getTags().get(0).getValue()); + } + + @DisplayName( + "Given request with launch configuration and default settings with Ec2 Spot, " + + "successfully submit createServerGroup operation with requested configuration") + @Test + public void createServerGroup_lcAndSpot_expect_launchConfiguration() throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("spotPrice", "1.5") + .withValue("securityGroup", new String[] {"myAwsApp"}) + .withValue("setLaunchTemplate", false) + .withValue("tags", Map.of("testPurpose", "testing default settings for spot")) + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + String.format( + EXPECTED_DEPLOY_WITH_LC_MSG_FMT, + EXPECTED_SERVER_GROUP_NAME, + EXPECTED_LAUNCH_CONFIG_NAME))); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // capture and assert arguments + ArgumentCaptor createLaunchConfigArgs = + ArgumentCaptor.forClass(CreateLaunchConfigurationRequest.class); + verify(mockAutoScaling).createLaunchConfiguration(createLaunchConfigArgs.capture()); + CreateLaunchConfigurationRequest createLcReq = createLaunchConfigArgs.getValue(); + + assertTrue(createLcReq.getLaunchConfigurationName().contains(EXPECTED_LAUNCH_CONFIG_NAME)); + assertEquals("ami-12345", createLcReq.getImageId()); + assertEquals("c3.large", createLcReq.getInstanceType()); + assertEquals("1.5", createLcReq.getSpotPrice()); + assertEquals(1, createLcReq.getSecurityGroups().size()); + assertEquals("sg-123", createLcReq.getSecurityGroups().get(0)); + + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + + assertTrue(createAsgReq.getLaunchConfigurationName().contains(EXPECTED_LAUNCH_CONFIG_NAME)); + assertEquals(1, createAsgReq.getTags().size()); + assertEquals("testPurpose", createAsgReq.getTags().get(0).getKey()); + assertEquals("testing default settings for spot", createAsgReq.getTags().get(0).getValue()); + } +} diff --git a/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupLaunchTemplatesEnabledSpec.java b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupLaunchTemplatesEnabledSpec.java new file mode 100644 index 00000000000..aa93d5eaa26 --- /dev/null +++ b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupLaunchTemplatesEnabledSpec.java @@ -0,0 +1,525 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.test; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest; +import com.amazonaws.services.autoscaling.model.CreateLaunchConfigurationRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateResult; +import com.amazonaws.services.ec2.model.DescribeAddressesResult; +import com.amazonaws.services.ec2.model.DescribeImagesRequest; +import com.amazonaws.services.ec2.model.DescribeImagesResult; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesRequest; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult; +import com.amazonaws.services.ec2.model.DescribeInstancesRequest; +import com.amazonaws.services.ec2.model.DescribeInstancesResult; +import com.amazonaws.services.ec2.model.DescribeKeyPairsResult; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; +import com.amazonaws.services.ec2.model.DescribeSubnetsResult; +import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult; +import com.amazonaws.services.ec2.model.DescribeVpcsResult; +import com.amazonaws.services.ec2.model.Image; +import com.amazonaws.services.ec2.model.InstanceTypeInfo; +import com.amazonaws.services.ec2.model.LaunchTemplate; +import com.amazonaws.services.ec2.model.ProcessorInfo; +import com.amazonaws.services.ec2.model.SecurityGroup; +import com.amazonaws.services.ec2.model.Subnet; +import com.amazonaws.services.ec2.model.Tag; +import com.amazonaws.services.ec2.model.VirtualizationType; +import com.netflix.spinnaker.clouddriver.aws.AwsBaseSpec; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.utils.TestUtils; +import io.restassured.http.ContentType; +import java.util.Arrays; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.test.context.ActiveProfiles; + +/** + * Test class with launch template settings enabled in clouddriver.yml, for CreateServerGroup + * operation. + */ +@ActiveProfiles("launch-templates") +public class CreateServerGroupLaunchTemplatesEnabledSpec extends AwsBaseSpec { + @Value("${aws.features.launch-templates.enabled}") + private Boolean AWS_LAUNCH_TEMPLATES_ENABLED; + + @Value("${aws.features.launch-templates.allowed-applications}") + private String AWS_LAUNCH_TEMPLATES_ALLOWED_APPS; + + @Value("${aws.features.launch-templates.excluded-applications}") + private String AWS_LAUNCH_TEMPLATES_EXCLUDED_APPS; + + private final String EXPECTED_SERVER_GROUP_NAME = "myAwsApp-myStack-v000"; + private final String EXPECTED_LAUNCH_TEMPLATE_ID = "lt-1"; + private final LaunchTemplateSpecification EXPECTED_LAUNCH_TEMPLATE_SPEC = + new LaunchTemplateSpecification().withLaunchTemplateId("lt-1").withVersion("1"); + private final String EXPECTED_DEPLOY_WITH_LT_MSG_FMT = "Deploying ASG %s with launch template %s"; + + private final String EXPECTED_DEPLOY_WITH_LC_MSG_FMT = + "Deploying ASG %s with launch configuration %s"; + private final String EXPECTED_LAUNCH_CONFIG_NAME = + "myAwsApp-myStack-v000-"; // partial name without the timestamp part + + private AmazonAutoScaling mockAutoScaling = mock(AmazonAutoScaling.class); + private AmazonEC2 mockEc2 = mock(AmazonEC2.class); + + @BeforeEach + public void setup() { + // mock EC2 responses + when(mockRegionScopedProvider.getAmazonEC2()).thenReturn(mockEc2); + when(mockAwsClientProvider.getAmazonEC2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockEc2); + + when(mockEc2.describeSecurityGroups(any(DescribeSecurityGroupsRequest.class))) + .thenReturn( + new DescribeSecurityGroupsResult() + .withSecurityGroups( + new SecurityGroup().withGroupId("sg-123").withGroupName("myAwsApp"))); + when(mockEc2.describeVpcClassicLink()).thenReturn(new DescribeVpcClassicLinkResult()); + when(mockEc2.describeAddresses()).thenReturn(new DescribeAddressesResult()); + when(mockEc2.describeVpcs()).thenReturn(new DescribeVpcsResult()); + when(mockEc2.describeKeyPairs()).thenReturn(new DescribeKeyPairsResult()); + when(mockEc2.describeInstances(any(DescribeInstancesRequest.class))) + .thenReturn(new DescribeInstancesResult()); + when(mockEc2.describeImages(any(DescribeImagesRequest.class))) + .thenReturn( + new DescribeImagesResult() + .withImages( + new Image() + .withImageId("ami-12345") + .withVirtualizationType("hvm") + .withArchitecture("x86_64"))); + when(mockEc2.describeInstanceTypes(any(DescribeInstanceTypesRequest.class))) + .thenReturn( + new DescribeInstanceTypesResult() + .withInstanceTypes( + new InstanceTypeInfo() + .withInstanceType("t3.medium") + .withProcessorInfo(new ProcessorInfo().withSupportedArchitectures("x86_64")) + .withSupportedVirtualizationTypes(Arrays.asList("hvm")), + new InstanceTypeInfo() + .withInstanceType("c3.large") + .withProcessorInfo( + new ProcessorInfo().withSupportedArchitectures("i386", "x86_64")) + .withSupportedVirtualizationTypes(Arrays.asList("hvm", "paravirtual")))); + when(mockEc2.describeSubnets()) + .thenReturn( + new DescribeSubnetsResult() + .withSubnets( + Arrays.asList( + new Subnet() + .withSubnetId("subnetId1") + .withAvailabilityZone("us-west-1a") + .withTags( + new Tag() + .withKey("immutable_metadata") + .withValue( + "{\"purpose\": \"internal\", \"target\": \"ec2\" }")), + new Subnet() + .withSubnetId("subnetId2") + .withAvailabilityZone("us-west-2a")))); + + when(mockEc2.describeLaunchTemplates(any(DescribeLaunchTemplatesRequest.class))) + .thenReturn( + new DescribeLaunchTemplatesResult() + .withLaunchTemplates( + Arrays.asList( + new LaunchTemplate() + .withLaunchTemplateId("lt-1") + .withLatestVersionNumber(1L) + .withDefaultVersionNumber(0L), + new LaunchTemplate() + .withLaunchTemplateId("lt-2") + .withLatestVersionNumber(1L) + .withDefaultVersionNumber(0L)))); + + when(mockEc2.createLaunchTemplate(any(CreateLaunchTemplateRequest.class))) + .thenReturn( + new CreateLaunchTemplateResult() + .withLaunchTemplate( + new LaunchTemplate().withLaunchTemplateId("lt-1").withLatestVersionNumber(1L))); + + // mock autoscaling response + when(mockAwsClientProvider.getAutoScaling(any(NetflixAmazonCredentials.class), anyString())) + .thenReturn(mockAutoScaling); + when(mockAwsClientProvider.getAutoScaling( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockAutoScaling); + when(mockAutoScaling.describeAutoScalingGroups(any(DescribeAutoScalingGroupsRequest.class))) + .thenReturn(new DescribeAutoScalingGroupsResult()); + } + + @DisplayName("Assert AWS and launch template features are enabled") + @Test + public void configTest() { + assertTrue(AWS_ENABLED); + assertEquals("aws-account1", AWS_ACCOUNT_NAME); + + assertTrue(AWS_LAUNCH_TEMPLATES_ENABLED); + assertEquals("myAwsApp:aws-account1:us-west-1", AWS_LAUNCH_TEMPLATES_ALLOWED_APPS); + assertEquals("myExcludedApp:aws-account1:us-west-1", AWS_LAUNCH_TEMPLATES_EXCLUDED_APPS); + } + + @DisplayName( + "Given request for server group with launch template features, " + + "successfully submit createServerGroup operation with launch template") + @Test + public void createServerGroup_ltFeatures_used_expect_launchTemplate() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("setLaunchTemplate", true) + .withValue("requireIMDSv2", true) + .withValue("associateIPv6Address", true) + .withValue("unlimitedCpuCredits", true) + .withValue("instanceType", "t3.medium") + .withValue("securityGroup", new String[] {"myAwsApp"}) + .withValue( + "tags", Map.of("testPurpose", "testing server group with launch template features")) + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + String.format( + EXPECTED_DEPLOY_WITH_LT_MSG_FMT, + EXPECTED_SERVER_GROUP_NAME, + EXPECTED_LAUNCH_TEMPLATE_ID))); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // capture and assert arguments + ArgumentCaptor createLaunchTemplateArgs = + ArgumentCaptor.forClass(CreateLaunchTemplateRequest.class); + verify(mockEc2).createLaunchTemplate(createLaunchTemplateArgs.capture()); + CreateLaunchTemplateRequest createLtReq = createLaunchTemplateArgs.getValue(); + + assertTrue(createLtReq.getLaunchTemplateName().contains("myAwsApp-myStack-v000-")); + assertEquals("ami-12345", createLtReq.getLaunchTemplateData().getImageId()); + assertEquals("t3.medium", createLtReq.getLaunchTemplateData().getInstanceType()); + assertEquals( + 1, createLtReq.getLaunchTemplateData().getNetworkInterfaces().get(0).getGroups().size()); + assertEquals( + "sg-123", + createLtReq.getLaunchTemplateData().getNetworkInterfaces().get(0).getGroups().get(0)); + + assertEquals( + "unlimited", createLtReq.getLaunchTemplateData().getCreditSpecification().getCpuCredits()); + assertEquals( + "required", createLtReq.getLaunchTemplateData().getMetadataOptions().getHttpTokens()); + assertEquals( + 1, createLtReq.getLaunchTemplateData().getNetworkInterfaces().get(0).getIpv6AddressCount()); + + assertNull(createLtReq.getLaunchTemplateData().getInstanceMarketOptions()); + assertNull(createLtReq.getLaunchTemplateData().getPlacement()); + assertTrue(createLtReq.getLaunchTemplateData().getLicenseSpecifications().isEmpty()); + + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + + assertEquals(EXPECTED_LAUNCH_TEMPLATE_SPEC, createAsgReq.getLaunchTemplate()); + assertEquals(1, createAsgReq.getTags().size()); + assertEquals("testPurpose", createAsgReq.getTags().get(0).getKey()); + assertEquals( + "testing server group with launch template features", + createAsgReq.getTags().get(0).getValue()); + } + + @DisplayName( + "Given request for server group with launch template, and EC2 Spot, " + + "successfully submit createServerGroup operation with launch template") + @Test + public void createServerGroup_ltAndSpot_expect_launchTemplate() throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("setLaunchTemplate", true) + .withValue("spotPrice", "1.5") + .withValue("securityGroup", new String[] {"myAwsApp"}) + .withValue("instanceType", "t3.medium") + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + String.format( + EXPECTED_DEPLOY_WITH_LT_MSG_FMT, + EXPECTED_SERVER_GROUP_NAME, + EXPECTED_LAUNCH_TEMPLATE_ID))); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // capture and assert arguments + ArgumentCaptor createLaunchTemplateArgs = + ArgumentCaptor.forClass(CreateLaunchTemplateRequest.class); + verify(mockEc2).createLaunchTemplate(createLaunchTemplateArgs.capture()); + CreateLaunchTemplateRequest createLtReq = createLaunchTemplateArgs.getValue(); + + assertTrue(createLtReq.getLaunchTemplateName().contains("myAwsApp-myStack-v000-")); + assertEquals("ami-12345", createLtReq.getLaunchTemplateData().getImageId()); + assertEquals("t3.medium", createLtReq.getLaunchTemplateData().getInstanceType()); + assertEquals( + "spot", createLtReq.getLaunchTemplateData().getInstanceMarketOptions().getMarketType()); + assertEquals( + "1.5", + createLtReq + .getLaunchTemplateData() + .getInstanceMarketOptions() + .getSpotOptions() + .getMaxPrice()); + assertEquals( + "standard", + createLtReq + .getLaunchTemplateData() + .getCreditSpecification() + .getCpuCredits()); // default for t3 type + + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + + assertEquals(EXPECTED_LAUNCH_TEMPLATE_SPEC, createAsgReq.getLaunchTemplate()); + } + + @DisplayName("Given request with incompatible AMI and instance type, fail with accurate message") + @Test + public void createServerGroup_incompatible_ami_and_instanceType_expect_exception() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("setLaunchTemplate", true) + .withValue("spotPrice", "1.5") + .withValue("securityGroup", new String[] {"myAwsApp"}) + .withValue("instanceType", "t3.medium") + .asMap(); + when(mockEc2.describeImages(any(DescribeImagesRequest.class))) + .thenReturn( + new DescribeImagesResult() + .withImages( + new Image() + .withImageId("img-1") + .withName("test-image") + .withVirtualizationType(VirtualizationType.Paravirtual))); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + "Orchestration failed: DeployAtomicOperation | IllegalArgumentException: [Instance type t3.medium does not support virtualization type paravirtual. Please select a different image or instance type.]")); + } + + @DisplayName( + "Given request with setLaunchTemplate disabled, " + + "successfully submit createServerGroup operation with launch configuration") + @Test + public void createServerGroup_setLaunchTemplateDisabled_expect_launchConfiguration() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("setLaunchTemplate", false) + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + String.format( + EXPECTED_DEPLOY_WITH_LC_MSG_FMT, + EXPECTED_SERVER_GROUP_NAME, + EXPECTED_LAUNCH_CONFIG_NAME))); + assertTrue( + taskHistory.contains( + String.format(EXPECTED_SERVER_GROUP_NAME, EXPECTED_SERVER_GROUP_NAME))); + + // capture and assert arguments + ArgumentCaptor createLaunchConfigArgs = + ArgumentCaptor.forClass(CreateLaunchConfigurationRequest.class); + verify(mockAutoScaling).createLaunchConfiguration(createLaunchConfigArgs.capture()); + CreateLaunchConfigurationRequest createLcReq = createLaunchConfigArgs.getValue(); + + assertTrue(createLcReq.getLaunchConfigurationName().contains(EXPECTED_LAUNCH_CONFIG_NAME)); + assertEquals(1, createLcReq.getSecurityGroups().size()); + assertEquals("sg-123", createLcReq.getSecurityGroups().get(0)); + assertEquals("ami-12345", createLcReq.getImageId()); + assertEquals("c3.large", createLcReq.getInstanceType()); + + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + + assertTrue(createAsgReq.getLaunchConfigurationName().contains(EXPECTED_LAUNCH_CONFIG_NAME)); + } + + @DisplayName( + "Given request with setLaunchTemplate enabled for an excluded application, " + + "successfully submit createServerGroup operation with launch configuration") + @Test + public void createServerGroup_ltEnabled_and_excludedApp_expect_launchConfiguration() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("application", "myExcludedApp") + .withValue("instanceType", "c3.large") + .withValue("setLaunchTemplate", false) + .asMap(); + when(mockEc2.describeSecurityGroups(any(DescribeSecurityGroupsRequest.class))) + .thenReturn( + new DescribeSecurityGroupsResult() + .withSecurityGroups( + new SecurityGroup().withGroupId("sg-123").withGroupName("myExcludedApp"))); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String expectedSgName = "myExcludedApp-myStack-v000"; + final String expectedLcName = + expectedSgName + "-"; // partial launch config name without the timestamp + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + String.format(EXPECTED_DEPLOY_WITH_LC_MSG_FMT, expectedSgName, expectedLcName))); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // capture and assert arguments + ArgumentCaptor createLaunchConfigArgs = + ArgumentCaptor.forClass(CreateLaunchConfigurationRequest.class); + verify(mockAutoScaling).createLaunchConfiguration(createLaunchConfigArgs.capture()); + CreateLaunchConfigurationRequest createLcReq = createLaunchConfigArgs.getValue(); + + assertTrue(createLcReq.getLaunchConfigurationName().contains(expectedLcName)); + assertEquals(1, createLcReq.getSecurityGroups().size()); + assertEquals("sg-123", createLcReq.getSecurityGroups().get(0)); + assertEquals("ami-12345", createLcReq.getImageId()); + assertEquals("c3.large", createLcReq.getInstanceType()); + + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + + assertTrue(createAsgReq.getLaunchConfigurationName().contains(expectedLcName)); + } +} diff --git a/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupMixedInstancesPolicySpec.java b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupMixedInstancesPolicySpec.java new file mode 100644 index 00000000000..80bb1a50b1a --- /dev/null +++ b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupMixedInstancesPolicySpec.java @@ -0,0 +1,341 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.test; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateResult; +import com.amazonaws.services.ec2.model.DescribeAddressesResult; +import com.amazonaws.services.ec2.model.DescribeImagesRequest; +import com.amazonaws.services.ec2.model.DescribeImagesResult; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesRequest; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult; +import com.amazonaws.services.ec2.model.DescribeInstancesRequest; +import com.amazonaws.services.ec2.model.DescribeInstancesResult; +import com.amazonaws.services.ec2.model.DescribeKeyPairsResult; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; +import com.amazonaws.services.ec2.model.DescribeSubnetsResult; +import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult; +import com.amazonaws.services.ec2.model.DescribeVpcsResult; +import com.amazonaws.services.ec2.model.Image; +import com.amazonaws.services.ec2.model.InstanceTypeInfo; +import com.amazonaws.services.ec2.model.LaunchTemplate; +import com.amazonaws.services.ec2.model.ProcessorInfo; +import com.amazonaws.services.ec2.model.SecurityGroup; +import com.amazonaws.services.ec2.model.Subnet; +import com.amazonaws.services.ec2.model.Tag; +import com.netflix.spinnaker.clouddriver.aws.AwsBaseSpec; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.utils.TestUtils; +import io.restassured.http.ContentType; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.test.context.ActiveProfiles; + +/** + * Test class with launch template settings enabled in clouddriver.yml, for CreateServerGroup + * operation. + */ +@ActiveProfiles("launch-templates") +public class CreateServerGroupMixedInstancesPolicySpec extends AwsBaseSpec { + @Value("${aws.features.launch-templates.enabled}") + private Boolean AWS_LAUNCH_TEMPLATES_ENABLED; + + @Value("${aws.features.launch-templates.allowed-applications}") + private String AWS_LAUNCH_TEMPLATES_ALLOWED_APPS; + + private final String EXPECTED_SERVER_GROUP_NAME = "myAwsApp-myStack-v000"; + private final LaunchTemplateSpecification EXPECTED_LAUNCH_TEMPLATE_SPEC = + new LaunchTemplateSpecification().withLaunchTemplateId("lt-1").withVersion("$Latest"); + private final String EXPECTED_DEPLOY_WITH_MIP_MSG_FMT = + "Deploying ASG %s with mixed instances policy"; + + private AmazonAutoScaling mockAutoScaling = mock(AmazonAutoScaling.class); + private AmazonEC2 mockEc2 = mock(AmazonEC2.class); + + @BeforeEach + public void setup() { + // mock EC2 responses + when(mockRegionScopedProvider.getAmazonEC2()).thenReturn(mockEc2); + when(mockAwsClientProvider.getAmazonEC2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockEc2); + + when(mockEc2.describeSecurityGroups(any(DescribeSecurityGroupsRequest.class))) + .thenReturn( + new DescribeSecurityGroupsResult() + .withSecurityGroups( + new SecurityGroup().withGroupId("sg-123").withGroupName("myAwsApp"))); + when(mockEc2.describeVpcClassicLink()).thenReturn(new DescribeVpcClassicLinkResult()); + when(mockEc2.describeAddresses()).thenReturn(new DescribeAddressesResult()); + when(mockEc2.describeVpcs()).thenReturn(new DescribeVpcsResult()); + when(mockEc2.describeKeyPairs()).thenReturn(new DescribeKeyPairsResult()); + when(mockEc2.describeInstances(any(DescribeInstancesRequest.class))) + .thenReturn(new DescribeInstancesResult()); + when(mockEc2.describeImages(any(DescribeImagesRequest.class))) + .thenReturn( + new DescribeImagesResult() + .withImages( + new Image() + .withImageId("ami-12345") + .withVirtualizationType("hvm") + .withArchitecture("x86_64"))); + when(mockEc2.describeInstanceTypes(any(DescribeInstanceTypesRequest.class))) + .thenReturn( + new DescribeInstanceTypesResult() + .withInstanceTypes( + new InstanceTypeInfo() + .withInstanceType("t3.medium") + .withProcessorInfo(new ProcessorInfo().withSupportedArchitectures("x86_64")) + .withSupportedVirtualizationTypes(Arrays.asList("hvm")), + new InstanceTypeInfo() + .withInstanceType("c3.large") + .withProcessorInfo( + new ProcessorInfo().withSupportedArchitectures("i386", "x86_64")) + .withSupportedVirtualizationTypes(Arrays.asList("hvm", "paravirtual")), + new InstanceTypeInfo() + .withInstanceType("c3.xlarge") + .withProcessorInfo(new ProcessorInfo().withSupportedArchitectures("x86_64")) + .withSupportedVirtualizationTypes(Arrays.asList("hvm", "paravirtual")))); + when(mockEc2.describeSubnets()) + .thenReturn( + new DescribeSubnetsResult() + .withSubnets( + Arrays.asList( + new Subnet() + .withSubnetId("subnetId1") + .withAvailabilityZone("us-west-1a") + .withTags( + new Tag() + .withKey("immutable_metadata") + .withValue( + "{\"purpose\": \"internal\", \"target\": \"ec2\" }")), + new Subnet() + .withSubnetId("subnetId2") + .withAvailabilityZone("us-west-2a")))); + + when(mockEc2.describeLaunchTemplates(any(DescribeLaunchTemplatesRequest.class))) + .thenReturn( + new DescribeLaunchTemplatesResult() + .withLaunchTemplates( + Arrays.asList( + new LaunchTemplate() + .withLaunchTemplateId("lt-1") + .withLatestVersionNumber(1L) + .withDefaultVersionNumber(0L), + new LaunchTemplate() + .withLaunchTemplateId("lt-2") + .withLatestVersionNumber(1L) + .withDefaultVersionNumber(0L)))); + + when(mockEc2.createLaunchTemplate(any(CreateLaunchTemplateRequest.class))) + .thenReturn( + new CreateLaunchTemplateResult() + .withLaunchTemplate( + new LaunchTemplate().withLaunchTemplateId("lt-1").withLatestVersionNumber(1L))); + + // mock autoscaling response + when(mockAwsClientProvider.getAutoScaling(any(NetflixAmazonCredentials.class), anyString())) + .thenReturn(mockAutoScaling); + when(mockAwsClientProvider.getAutoScaling( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockAutoScaling); + when(mockAutoScaling.describeAutoScalingGroups(any(DescribeAutoScalingGroupsRequest.class))) + .thenReturn(new DescribeAutoScalingGroupsResult()); + } + + @DisplayName("Assert AWS and launch template features are enabled") + @Test + public void configTest() { + assertTrue(AWS_ENABLED); + assertEquals("aws-account1", AWS_ACCOUNT_NAME); + + // launch templates need to be enabled to use AWS ASG MixedInstancesPolicy + assertTrue(AWS_LAUNCH_TEMPLATES_ENABLED); + assertEquals("myAwsApp:aws-account1:us-west-1", AWS_LAUNCH_TEMPLATES_ALLOWED_APPS); + } + + @DisplayName( + "Given request for server group with instances distribution features, " + + "successfully submit createServerGroup operation with mixed instances policy") + @Test + public void createServerGroup_instancesDistribution_used_expect_mixedInstancesPolicy() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("setLaunchTemplate", true) + .withValue("onDemandBaseCapacity", 1) + .withValue("onDemandPercentageAboveBaseCapacity", 50) + .withValue("spotAllocationStrategy", "capacity-optimized") + .withValue("spotPrice", "1.5") + .withValue("instanceType", "c3.large") + .withValue("securityGroup", new String[] {"myAwsApp"}) + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + String.format(EXPECTED_DEPLOY_WITH_MIP_MSG_FMT, EXPECTED_SERVER_GROUP_NAME))); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // capture and assert arguments + ArgumentCaptor createLaunchTemplateArgs = + ArgumentCaptor.forClass(CreateLaunchTemplateRequest.class); + verify(mockEc2).createLaunchTemplate(createLaunchTemplateArgs.capture()); + CreateLaunchTemplateRequest createLtReq = createLaunchTemplateArgs.getValue(); + assertTrue(createLtReq.getLaunchTemplateName().contains("myAwsApp-myStack-v000-")); + assertEquals("c3.large", createLtReq.getLaunchTemplateData().getInstanceType()); + assertEquals( + 1, createLtReq.getLaunchTemplateData().getNetworkInterfaces().get(0).getGroups().size()); + assertEquals( + "sg-123", + createLtReq.getLaunchTemplateData().getNetworkInterfaces().get(0).getGroups().get(0)); + ; + + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + assertNull(createAsgReq.getLaunchTemplate()); + assertEquals( + EXPECTED_LAUNCH_TEMPLATE_SPEC, + createAsgReq + .getMixedInstancesPolicy() + .getLaunchTemplate() + .getLaunchTemplateSpecification()); + assertEquals( + "{OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 1.5}", + createAsgReq.getMixedInstancesPolicy().getInstancesDistribution().toString()); + } + + @DisplayName( + "Given request for server group with multiple instance types / launch template overrides, " + + "successfully submit createServerGroup operation with mixed instances policy") + @Test + public void createServerGroup_multiInstanceTypes_used_expect_mixedInstancesPolicy() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("setLaunchTemplate", true) + .withValue( + "launchTemplateOverridesForInstanceType", + List.of( + Map.of("instanceType", "t3.large", "weightedCapacity", "1"), + Map.of("instanceType", "c3.large", "weightedCapacity", "1"), + Map.of("instanceType", "c3.xlarge", "weightedCapacity", "2"))) + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + String.format(EXPECTED_DEPLOY_WITH_MIP_MSG_FMT, EXPECTED_SERVER_GROUP_NAME))); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // capture and assert arguments + ArgumentCaptor createLaunchTemplateArgs = + ArgumentCaptor.forClass(CreateLaunchTemplateRequest.class); + verify(mockEc2).createLaunchTemplate(createLaunchTemplateArgs.capture()); + CreateLaunchTemplateRequest createLtReq = createLaunchTemplateArgs.getValue(); + assertTrue(createLtReq.getLaunchTemplateName().contains("myAwsApp-myStack-v000-")); + assertEquals("c3.large", createLtReq.getLaunchTemplateData().getInstanceType()); + assertEquals( + 1, createLtReq.getLaunchTemplateData().getNetworkInterfaces().get(0).getGroups().size()); + assertEquals( + "sg-123", + createLtReq.getLaunchTemplateData().getNetworkInterfaces().get(0).getGroups().get(0)); + ; + + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + assertNull(createAsgReq.getLaunchTemplate()); + assertEquals( + EXPECTED_LAUNCH_TEMPLATE_SPEC, + createAsgReq + .getMixedInstancesPolicy() + .getLaunchTemplate() + .getLaunchTemplateSpecification()); + assertEquals( + "[{InstanceType: t3.large,WeightedCapacity: 1,}, {InstanceType: c3.large,WeightedCapacity: 1,}, {InstanceType: c3.xlarge,WeightedCapacity: 2,}]", + createAsgReq.getMixedInstancesPolicy().getLaunchTemplate().getOverrides().toString()); + assertEquals( + "{}", createAsgReq.getMixedInstancesPolicy().getInstancesDistribution().toString()); + } +} diff --git a/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupSpec.java b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupSpec.java new file mode 100644 index 00000000000..3aac1f1c980 --- /dev/null +++ b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/CreateServerGroupSpec.java @@ -0,0 +1,494 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.test; + +import static io.restassured.RestAssured.given; +import static org.assertj.core.api.Assertions.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.AlreadyExistsException; +import com.amazonaws.services.autoscaling.model.AutoScalingGroup; +import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateResult; +import com.amazonaws.services.ec2.model.DescribeAddressesResult; +import com.amazonaws.services.ec2.model.DescribeImagesRequest; +import com.amazonaws.services.ec2.model.DescribeImagesResult; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesRequest; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult; +import com.amazonaws.services.ec2.model.DescribeInstancesRequest; +import com.amazonaws.services.ec2.model.DescribeInstancesResult; +import com.amazonaws.services.ec2.model.DescribeKeyPairsResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; +import com.amazonaws.services.ec2.model.DescribeSubnetsResult; +import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult; +import com.amazonaws.services.ec2.model.DescribeVpcsResult; +import com.amazonaws.services.ec2.model.Image; +import com.amazonaws.services.ec2.model.InstanceTypeInfo; +import com.amazonaws.services.ec2.model.LaunchTemplate; +import com.amazonaws.services.ec2.model.ProcessorInfo; +import com.amazonaws.services.ec2.model.SecurityGroup; +import com.amazonaws.services.ec2.model.Subnet; +import com.amazonaws.services.ec2.model.Tag; +import com.netflix.spinnaker.clouddriver.aws.AwsBaseSpec; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.utils.TestUtils; +import io.restassured.http.ContentType; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Arrays; +import java.util.Date; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import org.mockito.ArgumentCaptor; +import org.springframework.test.context.ActiveProfiles; + +/** + * Test class for general test cases related to CreateServerGroup operation. Note: launch template + * settings are enabled in clouddriver.yml + */ +@ActiveProfiles("launch-templates") +public class CreateServerGroupSpec extends AwsBaseSpec { + private AmazonAutoScaling mockAutoScaling = mock(AmazonAutoScaling.class); + private AmazonEC2 mockEc2 = mock(AmazonEC2.class); + + @BeforeEach + void init(TestInfo testInfo) { + System.out.println("--------------- Test " + testInfo.getDisplayName()); + + // mock EC2 responses + when(mockRegionScopedProvider.getAmazonEC2()).thenReturn(mockEc2); + when(mockAwsClientProvider.getAmazonEC2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockEc2); + + when(mockEc2.describeSecurityGroups(any(DescribeSecurityGroupsRequest.class))) + .thenReturn( + new DescribeSecurityGroupsResult() + .withSecurityGroups( + new SecurityGroup().withGroupId("sg-123").withGroupName("myAwsApp"))); + when(mockEc2.describeVpcClassicLink()).thenReturn(new DescribeVpcClassicLinkResult()); + when(mockEc2.describeAddresses()).thenReturn(new DescribeAddressesResult()); + when(mockEc2.describeVpcs()).thenReturn(new DescribeVpcsResult()); + when(mockEc2.describeKeyPairs()).thenReturn(new DescribeKeyPairsResult()); + when(mockEc2.describeInstances(any(DescribeInstancesRequest.class))) + .thenReturn(new DescribeInstancesResult()); + when(mockEc2.describeImages(any(DescribeImagesRequest.class))) + .thenReturn( + new DescribeImagesResult() + .withImages( + new Image() + .withImageId("ami-12345") + .withVirtualizationType("hvm") + .withArchitecture("x86_64"))); + when(mockEc2.describeInstanceTypes(any(DescribeInstanceTypesRequest.class))) + .thenReturn( + new DescribeInstanceTypesResult() + .withInstanceTypes( + new InstanceTypeInfo() + .withInstanceType("c3.large") + .withProcessorInfo( + new ProcessorInfo().withSupportedArchitectures("i386", "x86_64")) + .withSupportedVirtualizationTypes(Arrays.asList("hvm", "paravirtual")))); + when(mockEc2.describeSubnets()) + .thenReturn( + new DescribeSubnetsResult() + .withSubnets( + Arrays.asList( + new Subnet() + .withSubnetId("subnetId1") + .withAvailabilityZone("us-west-1a") + .withTags( + new Tag() + .withKey("immutable_metadata") + .withValue( + "{\"purpose\": \"internal\", \"target\": \"ec2\" }")), + new Subnet() + .withSubnetId("subnetId2") + .withAvailabilityZone("us-west-2a")))); + + when(mockEc2.createLaunchTemplate(any(CreateLaunchTemplateRequest.class))) + .thenReturn( + new CreateLaunchTemplateResult() + .withLaunchTemplate( + new LaunchTemplate().withLaunchTemplateId("lt-1").withLatestVersionNumber(1L))); + + // mock autoscaling response + when(mockAwsClientProvider.getAutoScaling(any(NetflixAmazonCredentials.class), anyString())) + .thenReturn(mockAutoScaling); + when(mockAwsClientProvider.getAutoScaling( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockAutoScaling); + when(mockAutoScaling.describeAutoScalingGroups(any(DescribeAutoScalingGroupsRequest.class))) + .thenReturn(new DescribeAutoScalingGroupsResult()); + } + + @DisplayName("Given invalid requests, successfully validate with error messages") + @Test + public void createServerGroup_invalidRequests_expect_validationFailure() { + final String invalidReqDir = "/createServerGroup_invalid_requests/"; + final String pattern = PATH_PREFIX + invalidReqDir + "*.json"; + TestUtils.loadResourcesFromDir(pattern).stream() + .forEach( + ti -> { + final String testFileName = ti.getFilename(); + System.out.println("\nRunning tests for " + invalidReqDir + testFileName); + + // given + Map requestBody = TestUtils.loadJson(ti).asMap(); + + // when, then + final String expectedValidationError = + (testFileName.contains("-") + ? StringUtils.substringAfterLast(testFileName, "-") + : testFileName) + .split(".json")[0]; + + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(400) + .contentType(ContentType.JSON) + .assertThat() + .body("message", Matchers.equalTo("Validation Failed")) + .body("errors.size()", Matchers.equalTo(1)) + .body("errors[0]", Matchers.endsWith(expectedValidationError)); + }); + } + + @DisplayName("Given request with subnet type, successfully submit deployment to subnet IDs") + @Test + public void createServerGroup_subnetType_expect_deploymentToSubnetIds() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("setLaunchTemplate", false) + .withValue("subnetType", "internal") + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + assertTrue(taskHistory.contains("Deploying to subnetIds: subnetId1")); + + // capture and assert arguments + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + + assertEquals("subnetId1", createAsgReq.getVPCZoneIdentifier()); + assertTrue(createAsgReq.getAvailabilityZones().isEmpty()); + } + + @DisplayName("Given request with invalid subnet type, fail with accurate message") + @Test + public void createServerGroup_invalid_subnetType_expect_error() throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("subnetType", "unknown") + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + "Orchestration failed: DeployAtomicOperation | RuntimeException: [No suitable subnet was found for internal subnet purpose 'unknown'!]")); + } + + @DisplayName( + "Given request without subnet type, successfully submit deployment to availability zones") + @Test + public void createServerGroup_noSubnetType_expect_deploymentToAZs() throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json").asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue(taskHistory.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + assertTrue(taskHistory.contains("Deploying to availabilityZones: [us-west-1a, us-west-1c]")); + + // capture and assert arguments + ArgumentCaptor createAsgArgs = + ArgumentCaptor.forClass(CreateAutoScalingGroupRequest.class); + verify(mockAutoScaling).createAutoScalingGroup(createAsgArgs.capture()); + CreateAutoScalingGroupRequest createAsgReq = createAsgArgs.getValue(); + + assertEquals(Arrays.asList("us-west-1a", "us-west-1c"), createAsgReq.getAvailabilityZones()); + assertNull(createAsgReq.getVPCZoneIdentifier()); + } + + @DisplayName( + "Given request to create server group that already exists " + + "and creation time not in safety window, fail with accurate message") + @Test + public void createServerGroup_alreadyExists_notInSafetyWindow_expect_exception() + throws InterruptedException { + // given + final String expectedServerGroupName = "myAwsApp-myStack-v100"; + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("sequence", "100") + .withValue("setLaunchTemplate", true) + .asMap(); + + // when - create myAwsApp-myStack-v100 first and verify + String taskId1 = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + String taskHistory1 = getTaskUpdatesAfterCompletion(taskId1); + assertTrue(taskHistory1.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // when + final Date notWithinOneHour = Date.from(Instant.now().minus(2, ChronoUnit.HOURS)); + when(mockAutoScaling.createAutoScalingGroup(any(CreateAutoScalingGroupRequest.class))) + .thenThrow(AlreadyExistsException.class); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(expectedServerGroupName))) + .thenReturn( + new DescribeAutoScalingGroupsResult() + .withAutoScalingGroups( + Arrays.asList( + new AutoScalingGroup() + .withAutoScalingGroupName(expectedServerGroupName) + .withHealthCheckType("EC2") + .withLaunchTemplate( + new LaunchTemplateSpecification() + .withLaunchTemplateId("lt-1") + .withVersion("1")) + .withAvailabilityZones(Arrays.asList("us-west-1a", "us-west-1c")) + .withCreatedTime(notWithinOneHour)))); + + // then, try to create myAwsApp-myStack-v100 again + String taskId2 = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + String taskHistory2 = getTaskUpdatesAfterCompletion(taskId2); + assertThat(taskHistory2) + .contains( + expectedServerGroupName + + " already exists and appears to be valid, but falls outside of safety window for idempotent deploy (1 hour)"); + assertThat(taskHistory2) + .contains("Orchestration failed: DeployAtomicOperation | AlreadyExistsException"); + } + + @DisplayName( + "Given request to create server group that already exists " + + "and creation time in safety window, fail with accurate message") + @Test + public void createServerGroup_alreadyExists_inSafetyWindow_expect_success() + throws InterruptedException { + // given + final String expectedServerGroupName = "myAwsApp-myStack-v200"; + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("sequence", "200") + .withValue("setLaunchTemplate", true) + .asMap(); + + // when - create myAwsApp-myStack-v200 first and verify + String taskId1 = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + String taskHistory1 = getTaskUpdatesAfterCompletion(taskId1); + assertTrue(taskHistory1.contains(EXPECTED_DEPLOY_SUCCESS_MSG)); + + // when + final Date withinOneHour = Date.from(Instant.now().minus(2, ChronoUnit.MINUTES)); + when(mockAutoScaling.createAutoScalingGroup(any(CreateAutoScalingGroupRequest.class))) + .thenThrow(AlreadyExistsException.class); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(expectedServerGroupName))) + .thenReturn( + new DescribeAutoScalingGroupsResult() + .withAutoScalingGroups( + Arrays.asList( + new AutoScalingGroup() + .withAutoScalingGroupName(expectedServerGroupName) + .withHealthCheckType("EC2") + .withLaunchTemplate( + new LaunchTemplateSpecification() + .withLaunchTemplateId("lt-1") + .withVersion("1")) + .withAvailabilityZones(Arrays.asList("us-west-1a", "us-west-1c")) + .withCreatedTime(withinOneHour)))); + + // then, try to create myAwsApp-myStack-v200 again + String taskId2 = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + String taskHistory2 = getTaskUpdatesAfterCompletion(taskId2); + assertThat(taskHistory2).contains(EXPECTED_DEPLOY_SUCCESS_MSG); + } + + @DisplayName( + "Given request to create server group with monitoring enabled, " + + "successfully submit create server group operation") + @Test + public void createServerGroup_metrics_monitoring_enabled_expect_success() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "createServerGroup-basic.json") + .withValue("instanceMonitoring", true) + .withValue("enabledMetrics", new String[] {"GroupMinSize", "GroupMaxSize"}) + .withValue("securityGroup", new String[] {"myAwsApp"}) + .asMap(); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + CREATE_SERVER_GROUP_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue(taskHistory.contains("Enabling metrics collection for:")); + } +} diff --git a/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/ModifyServerGroupLaunchTemplateSpec.java b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/ModifyServerGroupLaunchTemplateSpec.java new file mode 100644 index 00000000000..c4a7238e137 --- /dev/null +++ b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/test/ModifyServerGroupLaunchTemplateSpec.java @@ -0,0 +1,929 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.test; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.AmazonAutoScalingException; +import com.amazonaws.services.autoscaling.model.AutoScalingGroup; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult; +import com.amazonaws.services.autoscaling.model.InstancesDistribution; +import com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy; +import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupRequest; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionRequest; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionResult; +import com.amazonaws.services.ec2.model.CreditSpecification; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsRequest; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResponseErrorItem; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResponseSuccessItem; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResult; +import com.amazonaws.services.ec2.model.DescribeImagesRequest; +import com.amazonaws.services.ec2.model.DescribeImagesResult; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsResult; +import com.amazonaws.services.ec2.model.Image; +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceMarketOptions; +import com.amazonaws.services.ec2.model.LaunchTemplateSpotMarketOptions; +import com.amazonaws.services.ec2.model.LaunchTemplateVersion; +import com.amazonaws.services.ec2.model.ResponseError; +import com.amazonaws.services.ec2.model.ResponseLaunchTemplateData; +import com.netflix.spinnaker.clouddriver.aws.AwsBaseSpec; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.services.AsgService; +import com.netflix.spinnaker.clouddriver.aws.utils.TestUtils; +import io.restassured.http.ContentType; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.ApplicationContext; +import org.springframework.test.context.ActiveProfiles; + +/** + * Test class for general test cases related to CreateServerGroup operation. Note: launch template + * settings are enabled in clouddriver.yml + */ +@ActiveProfiles("launch-templates") +public class ModifyServerGroupLaunchTemplateSpec extends AwsBaseSpec { + @Autowired ApplicationContext context; + + private AsgService mockAsgService = mock(AsgService.class); + private AmazonEC2 mockEc2 = mock(AmazonEC2.class); + private AmazonAutoScaling mockAutoScaling = mock(AmazonAutoScaling.class); + + private static final String ASG_NAME = "myasg"; + + // ASG with Launch Template + private final LaunchTemplateVersion ltVersionOld = + new LaunchTemplateVersion() + .withLaunchTemplateId("lt-1") + .withLaunchTemplateName("lt-1") + .withVersionNumber(1L) + .withLaunchTemplateData( + new ResponseLaunchTemplateData() + .withImageId("ami-12345") + .withInstanceType("t3.large")); + + private final LaunchTemplateVersion ltVersionNew = + new LaunchTemplateVersion() + .withLaunchTemplateId("lt-1") + .withLaunchTemplateName("lt-1") + .withVersionNumber(2L) + .withLaunchTemplateData( + new ResponseLaunchTemplateData() + .withImageId("ami-12345") + .withInstanceType("t3.large")); + + private final AutoScalingGroup asgWithLt = + new AutoScalingGroup() + .withAutoScalingGroupName(ASG_NAME) + .withLaunchTemplate( + new LaunchTemplateSpecification() + .withLaunchTemplateId(ltVersionOld.getLaunchTemplateId()) + .withVersion(String.valueOf(ltVersionOld.getVersionNumber()))); + + // ASG with Mixed Instances Policy + BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType override1 = + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType.Builder() + .instanceType("some.type.large") + .weightedCapacity("2") + .build(); + BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType override2 = + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType.Builder() + .instanceType("some.type.xlarge") + .weightedCapacity("4") + .build(); + List ltOverrides = + Arrays.asList( + new LaunchTemplateOverrides() + .withInstanceType(override1.getInstanceType()) + .withWeightedCapacity(override1.getWeightedCapacity()), + new LaunchTemplateOverrides() + .withInstanceType(override2.getInstanceType()) + .withWeightedCapacity(override2.getWeightedCapacity())); + InstancesDistribution instancesDist = + new InstancesDistribution() + .withOnDemandBaseCapacity(1) + .withOnDemandPercentageAboveBaseCapacity(50) + .withSpotInstancePools(5) + .withSpotAllocationStrategy("lowest-price") + .withSpotMaxPrice("1.5"); + private final AutoScalingGroup asgWithMip = + new AutoScalingGroup() + .withAutoScalingGroupName(ASG_NAME) + .withMixedInstancesPolicy( + new MixedInstancesPolicy() + .withLaunchTemplate( + new com.amazonaws.services.autoscaling.model.LaunchTemplate() + .withOverrides(ltOverrides) + .withLaunchTemplateSpecification( + new LaunchTemplateSpecification() + .withLaunchTemplateId(ltVersionOld.getLaunchTemplateId()) + .withVersion("$Latest"))) + .withInstancesDistribution(instancesDist)); + + @BeforeEach + public void setup() { + + // mock autoscaling responses + when(mockAwsClientProvider.getAutoScaling( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockAutoScaling); + when(mockRegionScopedProvider.getAsgService()).thenReturn(mockAsgService); + + // mock Front50 service responses + Map applicationMap = new HashMap(); + applicationMap.put("application", "myAwsApp"); + applicationMap.put("legacyUdf", null); + when(mockFront50Service.getApplication(ASG_NAME)).thenReturn(applicationMap); + + // mock EC2 responses + when(mockRegionScopedProvider.getAmazonEC2()).thenReturn(mockEc2); + when(mockAwsClientProvider.getAmazonEC2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockEc2); + when(mockEc2.describeLaunchTemplateVersions(any(DescribeLaunchTemplateVersionsRequest.class))) + .thenReturn( + new DescribeLaunchTemplateVersionsResult().withLaunchTemplateVersions(ltVersionOld)); + when(mockEc2.describeImages(any(DescribeImagesRequest.class))) + .thenReturn(new DescribeImagesResult().withImages(new Image().withImageId("ami-12345"))); + when(mockEc2.createLaunchTemplateVersion(any(CreateLaunchTemplateVersionRequest.class))) + .thenReturn( + new CreateLaunchTemplateVersionResult().withLaunchTemplateVersion(ltVersionNew)); + } + + @DisplayName("Given invalid requests, successfully validate with error messages") + @Test + public void modifyServerGroupLaunchTemplate_invalidRequests_expect_validationFailure() { + final String invalidReqDir = "/modifySgLaunchTemplate_invalid_requests/"; + final String pattern = PATH_PREFIX + invalidReqDir + "*.json"; + TestUtils.loadResourcesFromDir(pattern).stream() + .forEach( + ti -> { + final String testFileName = ti.getFilename(); + System.out.println("\nRunning tests for " + invalidReqDir + testFileName); + + // given + Map requestBody = TestUtils.loadJson(ti).asMap(); + + // when, then + final String expectedValidationMsg = + (testFileName.contains("-") + ? StringUtils.substringAfterLast(testFileName, "-") + : testFileName) + .split(".json")[0]; + + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(400) + .contentType(ContentType.JSON) + .assertThat() + .body("message", Matchers.equalTo("Validation Failed")) + .body("errors.size()", Matchers.equalTo(1)) + .body("errors[0]", Matchers.endsWith(expectedValidationMsg)); + }); + } + + @DisplayName( + "Given request to update launch template for a server group NOT backed by launch template, " + + "throws exception") + @Test + public void modifyServerGroupLaunchTemplate_sgWithLaunchConfig_expect_exception() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("instanceType", "c4.large") + .asMap(); + AutoScalingGroup asgWithLc = + new AutoScalingGroup() + .withAutoScalingGroupName(ASG_NAME) + .withLaunchConfigurationName("some-launch-config"); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithLc)); + + // when, then + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .prettyPrint(); + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + "Orchestration failed: ModifyServerGroupLaunchTemplateAtomicOperation | IllegalArgumentException: " + + "[Server group is not backed by a launch template.\n" + + asgWithLc + + "]")); + } + + @DisplayName( + "Given request to update launch template, " + + "successfully submit update auto scaling group request with expected configuration.") + @Test + public void modifyServerGroupLaunchTemplate_sgWithLaunchTemplate_success() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("spotPrice", "0.5") + .withValue("instanceType", "t3.large") + .asMap(); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithLt)); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + assertNotNull(getTaskUpdatesAfterCompletion(taskId)); + + // capture and assert arguments + ArgumentCaptor createLtVersionArgs = + ArgumentCaptor.forClass(CreateLaunchTemplateVersionRequest.class); + verify(mockEc2).createLaunchTemplateVersion(createLtVersionArgs.capture()); + CreateLaunchTemplateVersionRequest createLtVersionReq = createLtVersionArgs.getValue(); + + assertEquals("lt-1", createLtVersionReq.getLaunchTemplateId()); + assertEquals("ami-12345", createLtVersionReq.getLaunchTemplateData().getImageId()); + assertEquals("t3.large", createLtVersionReq.getLaunchTemplateData().getInstanceType()); + assertEquals( + "spot", + createLtVersionReq.getLaunchTemplateData().getInstanceMarketOptions().getMarketType()); + assertEquals( + "0.5", + createLtVersionReq + .getLaunchTemplateData() + .getInstanceMarketOptions() + .getSpotOptions() + .getMaxPrice()); + + ArgumentCaptor updateAsgArgs = + ArgumentCaptor.forClass(UpdateAutoScalingGroupRequest.class); + verify(mockAutoScaling).updateAutoScalingGroup(updateAsgArgs.capture()); + UpdateAutoScalingGroupRequest updateAsgReq = updateAsgArgs.getValue(); + + assertEquals(ASG_NAME, updateAsgReq.getAutoScalingGroupName()); + assertEquals("2", updateAsgReq.getLaunchTemplate().getVersion()); + + assertNull(updateAsgReq.getMixedInstancesPolicy()); + } + + @DisplayName( + "Given request to update launch template along with mixed instances policy properties, for a server group with launch template, " + + "creates new launch template version and submits update auto scaling group request with mixed instances policy.") + @Test + public void modifyServerGroupLaunchTemplate_ltAndMipFields_createsNewLaunchTemplateVersion() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("unlimitedCpuCredits", true) + .withValue("spotAllocationStrategy", "capacity-optimized") + .withValue( + "launchTemplateOverridesForInstanceType", + List.of( + Map.of("instanceType", "t3.large", "weightedCapacity", "2"), + Map.of("instanceType", "t3.xlarge", "weightedCapacity", "4"))) + .asMap(); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithLt)); + + ResponseLaunchTemplateData ltData = + ltVersionNew + .getLaunchTemplateData() + .withCreditSpecification(new CreditSpecification().withCpuCredits("unlimited")); + when(mockEc2.createLaunchTemplateVersion(any(CreateLaunchTemplateVersionRequest.class))) + .thenReturn( + new CreateLaunchTemplateVersionResult() + .withLaunchTemplateVersion( + new LaunchTemplateVersion() + .withLaunchTemplateData(ltData) + .withLaunchTemplateId("lt-1") + .withLaunchTemplateName("lt-1") + .withVersionNumber(2L))); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + assertNotNull(getTaskUpdatesAfterCompletion(taskId)); + + // capture and assert arguments + ArgumentCaptor createLtVersionArgs = + ArgumentCaptor.forClass(CreateLaunchTemplateVersionRequest.class); + verify(mockEc2).createLaunchTemplateVersion(createLtVersionArgs.capture()); + CreateLaunchTemplateVersionRequest createLtVersionReq = createLtVersionArgs.getValue(); + + assertEquals("lt-1", createLtVersionReq.getLaunchTemplateId()); + assertEquals( + "unlimited", + createLtVersionReq.getLaunchTemplateData().getCreditSpecification().getCpuCredits()); + + ArgumentCaptor updateAsgArgs = + ArgumentCaptor.forClass(UpdateAutoScalingGroupRequest.class); + verify(mockAutoScaling).updateAutoScalingGroup(updateAsgArgs.capture()); + UpdateAutoScalingGroupRequest updateAsgReq = updateAsgArgs.getValue(); + + assertEquals(ASG_NAME, updateAsgReq.getAutoScalingGroupName()); + assertNull(updateAsgReq.getLaunchTemplate()); + + MixedInstancesPolicy mipInUpdateReq = updateAsgReq.getMixedInstancesPolicy(); + assertNotNull(mipInUpdateReq); + assertEquals( + "lt-1", + mipInUpdateReq.getLaunchTemplate().getLaunchTemplateSpecification().getLaunchTemplateId()); + assertEquals( + "2", mipInUpdateReq.getLaunchTemplate().getLaunchTemplateSpecification().getVersion()); + assertEquals( + "capacity-optimized", + mipInUpdateReq.getInstancesDistribution().getSpotAllocationStrategy()); + assertEquals( + "[{InstanceType: t3.large,WeightedCapacity: 2,}, {InstanceType: t3.xlarge,WeightedCapacity: 4,}]", + mipInUpdateReq.getLaunchTemplate().getOverrides().toString()); + } + + @DisplayName( + "Given request to update mixed instances policy properties only, for a server group with launch template and spotMaxPrice set, " + + "creates new launch template version and submits update auto scaling group request with mixed instances policy.") + @Test + public void + modifyServerGroupLaunchTemplate_convert_SgWithLtSpot_To_SgWithMip_createsNewLaunchTemplateVersion() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("asgName", ASG_NAME) + .withValue("spotAllocationStrategy", "capacity-optimized") + .withValue( + "launchTemplateOverridesForInstanceType", + List.of( + Map.of("instanceType", "t3.large", "weightedCapacity", "2"), + Map.of("instanceType", "t3.xlarge", "weightedCapacity", "4"))) + .asMap(); + + LaunchTemplateVersion ltVersionOldLocal = + new LaunchTemplateVersion() + .withLaunchTemplateId("lt-1") + .withLaunchTemplateName("lt-spot-1") + .withVersionNumber(1L) + .withLaunchTemplateData( + new ResponseLaunchTemplateData() + .withImageId("ami-12345") + .withInstanceType("c3.large") + .withInstanceMarketOptions( + new LaunchTemplateInstanceMarketOptions() + .withMarketType("spot") + .withSpotOptions( + new LaunchTemplateSpotMarketOptions().withMaxPrice("0.5")))); + + LaunchTemplateVersion ltVersionNewLocal = + new LaunchTemplateVersion() + .withLaunchTemplateId(ltVersionOldLocal.getLaunchTemplateId()) + .withLaunchTemplateName(ltVersionOldLocal.getLaunchTemplateName()) + .withVersionNumber(2L) + .withLaunchTemplateData( + new ResponseLaunchTemplateData() + .withImageId("ami-12345") + .withInstanceType("c3.large")); + + AutoScalingGroup asgWithLtSpot = + new AutoScalingGroup() + .withAutoScalingGroupName(ASG_NAME) + .withLaunchTemplate( + new LaunchTemplateSpecification() + .withLaunchTemplateId(ltVersionOldLocal.getLaunchTemplateId()) + .withVersion(String.valueOf(ltVersionOldLocal.getVersionNumber()))); + + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithLtSpot)); + when(mockEc2.describeLaunchTemplateVersions(any(DescribeLaunchTemplateVersionsRequest.class))) + .thenReturn( + new DescribeLaunchTemplateVersionsResult() + .withLaunchTemplateVersions(ltVersionOldLocal)); + when(mockEc2.createLaunchTemplateVersion(any(CreateLaunchTemplateVersionRequest.class))) + .thenReturn( + new CreateLaunchTemplateVersionResult().withLaunchTemplateVersion(ltVersionNewLocal)); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + assertNotNull(getTaskUpdatesAfterCompletion(taskId)); + + // capture and assert arguments + ArgumentCaptor createLtVersionArgs = + ArgumentCaptor.forClass(CreateLaunchTemplateVersionRequest.class); + verify(mockEc2).createLaunchTemplateVersion(createLtVersionArgs.capture()); + CreateLaunchTemplateVersionRequest createLtVersionReq = createLtVersionArgs.getValue(); + + assertEquals("lt-1", createLtVersionReq.getLaunchTemplateId()); + assertNull( + createLtVersionReq + .getLaunchTemplateData() + .getInstanceMarketOptions()); // spotMaxPrice was removed + + ArgumentCaptor updateAsgArgs = + ArgumentCaptor.forClass(UpdateAutoScalingGroupRequest.class); + verify(mockAutoScaling).updateAutoScalingGroup(updateAsgArgs.capture()); + UpdateAutoScalingGroupRequest updateAsgReq = updateAsgArgs.getValue(); + + assertEquals(ASG_NAME, updateAsgReq.getAutoScalingGroupName()); + assertNull( + updateAsgReq + .getLaunchTemplate()); // assert updated ASG uses mixed instances policy instead of + // launch template + + MixedInstancesPolicy mipInUpdateReq = updateAsgReq.getMixedInstancesPolicy(); + assertNotNull(mipInUpdateReq); + assertEquals( + "lt-1", + mipInUpdateReq.getLaunchTemplate().getLaunchTemplateSpecification().getLaunchTemplateId()); + assertEquals( + "2", mipInUpdateReq.getLaunchTemplate().getLaunchTemplateSpecification().getVersion()); + assertEquals( + "capacity-optimized", + mipInUpdateReq.getInstancesDistribution().getSpotAllocationStrategy()); + assertEquals( + "0.5", + mipInUpdateReq + .getInstancesDistribution() + .getSpotMaxPrice()); // spot max price was moved from LTData to MIP + assertEquals( + "[{InstanceType: t3.large,WeightedCapacity: 2,}, {InstanceType: t3.xlarge,WeightedCapacity: 4,}]", + mipInUpdateReq.getLaunchTemplate().getOverrides().toString()); + } + + @DisplayName( + "Given request to modify mixed instances policy fields, for a server group with mixed instances policy, " + + "successfully skips creating new launch template version and submits update auto scaling group request.") + @Test + public void + modifyMipOnlyFields_sgWithMixedInstancesPolicy_skips_newLaunchTemplateVersionCreation() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("onDemandBaseCapacity", 2) + .withValue("onDemandPercentageAboveBaseCapacity", 25) + .withValue("spotAllocationStrategy", "capacity-optimized") + .withValue( + "launchTemplateOverridesForInstanceType", + List.of( + Map.of("instanceType", "c3.large", "weightedCapacity", "2"), + Map.of("instanceType", "c3.xlarge", "weightedCapacity", "4"))) + .asMap(); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithMip)); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + assertNotNull(getTaskUpdatesAfterCompletion(taskId)); + + // verify new launch template version was NOT created + verify(mockEc2, never()) + .createLaunchTemplateVersion(any(CreateLaunchTemplateVersionRequest.class)); + + // capture and assert arguments + ArgumentCaptor updateAsgArgs = + ArgumentCaptor.forClass(UpdateAutoScalingGroupRequest.class); + verify(mockAutoScaling).updateAutoScalingGroup(updateAsgArgs.capture()); + UpdateAutoScalingGroupRequest updateAsgReq = updateAsgArgs.getValue(); + + assertEquals(ASG_NAME, updateAsgReq.getAutoScalingGroupName()); + assertNull(updateAsgReq.getLaunchTemplate()); + + MixedInstancesPolicy mipInUpdateReq = updateAsgReq.getMixedInstancesPolicy(); + assertNotNull(mipInUpdateReq); + assertEquals( + "lt-1", + mipInUpdateReq.getLaunchTemplate().getLaunchTemplateSpecification().getLaunchTemplateId()); + assertEquals( + "1", mipInUpdateReq.getLaunchTemplate().getLaunchTemplateSpecification().getVersion()); + assertEquals(2, mipInUpdateReq.getInstancesDistribution().getOnDemandBaseCapacity()); + assertEquals( + 25, mipInUpdateReq.getInstancesDistribution().getOnDemandPercentageAboveBaseCapacity()); + assertEquals( + "capacity-optimized", + mipInUpdateReq.getInstancesDistribution().getSpotAllocationStrategy()); + assertEquals(null, mipInUpdateReq.getInstancesDistribution().getSpotInstancePools()); + assertEquals( + "1.5", + mipInUpdateReq + .getInstancesDistribution() + .getSpotMaxPrice()); // spot max price in MIP wasn't modified + assertEquals( + "[{InstanceType: c3.large,WeightedCapacity: 2,}, {InstanceType: c3.xlarge,WeightedCapacity: 4,}]", + mipInUpdateReq.getLaunchTemplate().getOverrides().toString()); + } + + @DisplayName( + "Given request update mixed instances policy fields only, for a server group with launch template and NO spot options set, " + + "successfully skips creating new launch template version and updates auto scaling group request with mixed instances policy.") + @Test + public void modifyMipOnlyFields_sgWithLtOnDemand_skips_newLaunchTemplateVersionCreation() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("spotPrice", "0.5") + .withValue("spotAllocationStrategy", "lowest-price") + .withValue("spotInstancePools", "6") + .withValue( + "launchTemplateOverridesForInstanceType", + List.of( + Map.of("instanceType", "c3.large", "weightedCapacity", "2"), + Map.of("instanceType", "c4.large", "weightedCapacity", "2"), + Map.of("instanceType", "c4.xlarge", "weightedCapacity", "4"), + Map.of("instanceType", "c3.xlarge", "weightedCapacity", "4"))) + .asMap(); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithLt)); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + assertNotNull(getTaskUpdatesAfterCompletion(taskId)); + + // verify new launch template version was NOT created + verify(mockEc2, never()) + .createLaunchTemplateVersion(any(CreateLaunchTemplateVersionRequest.class)); + + // capture and assert arguments + ArgumentCaptor updateAsgArgs = + ArgumentCaptor.forClass(UpdateAutoScalingGroupRequest.class); + verify(mockAutoScaling).updateAutoScalingGroup(updateAsgArgs.capture()); + UpdateAutoScalingGroupRequest updateAsgReq = updateAsgArgs.getValue(); + + assertEquals(ASG_NAME, updateAsgReq.getAutoScalingGroupName()); + assertNull(updateAsgReq.getLaunchTemplate()); + + MixedInstancesPolicy mipInUpdateReq = updateAsgReq.getMixedInstancesPolicy(); + assertNotNull(mipInUpdateReq); + assertEquals( + "lt-1", + mipInUpdateReq.getLaunchTemplate().getLaunchTemplateSpecification().getLaunchTemplateId()); + assertEquals( + "1", mipInUpdateReq.getLaunchTemplate().getLaunchTemplateSpecification().getVersion()); + assertEquals( + "lowest-price", mipInUpdateReq.getInstancesDistribution().getSpotAllocationStrategy()); + assertEquals(6, mipInUpdateReq.getInstancesDistribution().getSpotInstancePools()); + assertEquals("0.5", mipInUpdateReq.getInstancesDistribution().getSpotMaxPrice()); + assertEquals( + "[{InstanceType: c3.large,WeightedCapacity: 2,}, {InstanceType: c4.large,WeightedCapacity: 2,}, {InstanceType: c4.xlarge,WeightedCapacity: 4,}, {InstanceType: c3.xlarge,WeightedCapacity: 4,}]", + mipInUpdateReq.getLaunchTemplate().getOverrides().toString()); + } + + @DisplayName( + "Given request to modify spot max price, for a server group with mixed instances policy, " + + "successfully skips creating new launch template version and submits update auto scaling group request.") + @Test + public void modifySpotPrice_sgWithMixedInstancesPolicy_skips_newLaunchTemplateVersionCreation() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("spotPrice", "2") + .asMap(); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithMip)); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + assertNotNull(getTaskUpdatesAfterCompletion(taskId)); + + // verify new launch template version creation was SKIPPED + verify(mockEc2, never()) + .createLaunchTemplateVersion(any(CreateLaunchTemplateVersionRequest.class)); + + // capture and assert arguments + ArgumentCaptor updateAsgArgs = + ArgumentCaptor.forClass(UpdateAutoScalingGroupRequest.class); + verify(mockAutoScaling).updateAutoScalingGroup(updateAsgArgs.capture()); + UpdateAutoScalingGroupRequest updateAsgReq = updateAsgArgs.getValue(); + + assertEquals(ASG_NAME, updateAsgReq.getAutoScalingGroupName()); + assertNull(updateAsgReq.getLaunchTemplate()); + + MixedInstancesPolicy mipInUpdateReq = updateAsgReq.getMixedInstancesPolicy(); + assertNotNull(mipInUpdateReq); + assertEquals("2", mipInUpdateReq.getInstancesDistribution().getSpotMaxPrice()); + } + + @DisplayName( + "Given request to modify launch template, and new launch template version is created successfully, but update AutoScalingGroup fails, " + + "successfully deletes newly created launch template version to maintain atomicity.") + @Test + public void modifyLaunchTemplate_newLaunchTemplateVersionCreated_andDeleted_onUpdateFailure() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("instanceType", "t3.large") + .asMap(); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithLt)); + + UpdateAutoScalingGroupRequest updateAsgReq = + new UpdateAutoScalingGroupRequest() + .withAutoScalingGroupName(ASG_NAME) + .withLaunchTemplate( + new LaunchTemplateSpecification() + .withLaunchTemplateId(ltVersionNew.getLaunchTemplateId()) + .withVersion(String.valueOf(ltVersionNew.getVersionNumber()))); + when(mockAutoScaling.updateAutoScalingGroup(updateAsgReq)) + .thenThrow(new AmazonAutoScalingException("Something went wrong.")); + + when(mockEc2.deleteLaunchTemplateVersions( + new DeleteLaunchTemplateVersionsRequest() + .withLaunchTemplateId(ltVersionNew.getLaunchTemplateId()) + .withVersions(String.valueOf(ltVersionNew.getVersionNumber())))) + .thenReturn( + new DeleteLaunchTemplateVersionsResult() + .withSuccessfullyDeletedLaunchTemplateVersions( + new DeleteLaunchTemplateVersionsResponseSuccessItem() + .withLaunchTemplateId(ltVersionNew.getLaunchTemplateId()) + .withVersionNumber(ltVersionNew.getVersionNumber()))); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + assertNotNull(getTaskUpdatesAfterCompletion(taskId)); + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + "Orchestration failed: ModifyServerGroupLaunchTemplateAtomicOperation | LaunchTemplateException: [Failed to update server group myasg.Error: Something went wrong.")); + + // verify new launch template version was created + verify(mockEc2).createLaunchTemplateVersion(any(CreateLaunchTemplateVersionRequest.class)); + + // verify updateAutoScalingGroup throws exception + assertThrows( + AmazonAutoScalingException.class, + () -> mockAutoScaling.updateAutoScalingGroup(updateAsgReq)); + + // verify newly create launch template version was deleted + ArgumentCaptor deleteLtVersionArgs = + ArgumentCaptor.forClass(DeleteLaunchTemplateVersionsRequest.class); + verify(mockEc2).deleteLaunchTemplateVersions(deleteLtVersionArgs.capture()); + DeleteLaunchTemplateVersionsRequest deleteLtVersionReq = deleteLtVersionArgs.getValue(); + + assertEquals(ltVersionNew.getLaunchTemplateId(), deleteLtVersionReq.getLaunchTemplateId()); + assertEquals(1, deleteLtVersionReq.getVersions().size()); + assertEquals( + String.valueOf(ltVersionNew.getVersionNumber()), deleteLtVersionReq.getVersions().get(0)); + } + + @DisplayName( + "Given request to modify launch template, and new launch template version is created successfully, but update AutoScalingGroup fails, " + + "and delete of newly created launch template version fails, exception is reported correctly.") + @Test() + public void + modifyLaunchTemplate_onUpdateFailure_andDeletionOfLtVersionFailure_exceptionReportedCorrectly() + throws InterruptedException { + // given + Map requestBody = + TestUtils.loadJson(PATH_PREFIX + "modifyServerGroupLaunchTemplate-basic.json") + .withValue("instanceType", "t3.large") + .asMap(); + when(mockAutoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(Collections.singletonList(ASG_NAME)))) + .thenReturn(new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asgWithLt)); + + UpdateAutoScalingGroupRequest updateAsgReq = + new UpdateAutoScalingGroupRequest() + .withAutoScalingGroupName(ASG_NAME) + .withLaunchTemplate( + new LaunchTemplateSpecification() + .withLaunchTemplateId(ltVersionNew.getLaunchTemplateId()) + .withVersion(String.valueOf(ltVersionNew.getVersionNumber()))); + when(mockAutoScaling.updateAutoScalingGroup(updateAsgReq)) + .thenThrow(AmazonAutoScalingException.class); + + when(mockEc2.deleteLaunchTemplateVersions( + new DeleteLaunchTemplateVersionsRequest() + .withLaunchTemplateId(ltVersionNew.getLaunchTemplateId()) + .withVersions(String.valueOf(ltVersionNew.getVersionNumber())))) + .thenReturn( + new DeleteLaunchTemplateVersionsResult() + .withUnsuccessfullyDeletedLaunchTemplateVersions( + new DeleteLaunchTemplateVersionsResponseErrorItem() + .withLaunchTemplateId(ltVersionNew.getLaunchTemplateId()) + .withVersionNumber(ltVersionNew.getVersionNumber()) + .withResponseError(new ResponseError().withCode("unexpectedError")))); + + // when, then + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(getBaseUrl() + UPDATE_LAUNCH_TEMPLATE_OP_PATH) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + final String taskHistory = getTaskUpdatesAfterCompletion(taskId); + assertTrue( + taskHistory.contains( + "Orchestration failed: ModifyServerGroupLaunchTemplateAtomicOperation | LaunchTemplateException: " + + "[Failed to update server group myasg.Error: null")); + assertTrue( + taskHistory.contains( + "Failed to clean up launch template version! Error: Failed to delete launch template version 2 for launch template ID lt-1 because of error 'unexpectedError'")); + + // verify new launch template version was created + verify(mockEc2).createLaunchTemplateVersion(any(CreateLaunchTemplateVersionRequest.class)); + + // verify updateAutoScalingGroup throws exception + assertThrows( + AmazonAutoScalingException.class, + () -> mockAutoScaling.updateAutoScalingGroup(updateAsgReq)); + + // verify newly create launch template version was attempted to be deleted + ArgumentCaptor deleteLtVersionArgs = + ArgumentCaptor.forClass(DeleteLaunchTemplateVersionsRequest.class); + verify(mockEc2).deleteLaunchTemplateVersions(deleteLtVersionArgs.capture()); + DeleteLaunchTemplateVersionsRequest deleteLtVersionReq = deleteLtVersionArgs.getValue(); + + assertEquals(ltVersionNew.getLaunchTemplateId(), deleteLtVersionReq.getLaunchTemplateId()); + assertEquals(1, deleteLtVersionReq.getVersions().size()); + assertEquals( + String.valueOf(ltVersionNew.getVersionNumber()), deleteLtVersionReq.getVersions().get(0)); + } +} diff --git a/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/utils/TestUtils.java b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/utils/TestUtils.java new file mode 100644 index 00000000000..331427f4c28 --- /dev/null +++ b/clouddriver-aws/src/integration/java/com/netflix/spinnaker/clouddriver/aws/utils/TestUtils.java @@ -0,0 +1,122 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.utils; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Splitter; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.springframework.core.io.DefaultResourceLoader; +import org.springframework.core.io.Resource; +import org.springframework.core.io.ResourceLoader; +import org.springframework.core.io.support.ResourcePatternUtils; + +public class TestUtils { + + public static List loadResourcesFromDir(String path) { + try { + return Arrays.asList( + ResourcePatternUtils.getResourcePatternResolver(new DefaultResourceLoader()) + .getResources(path)); + } catch (IOException ex) { + throw new RuntimeException("Failed to load resources from directory " + path, ex); + } + } + + public static TestResourceFile loadJson(Resource resource) { + try (InputStream is = resource.getInputStream()) { + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode jsonNode = objectMapper.readTree(is); + List> content; + + if (jsonNode.isArray()) { + content = objectMapper.convertValue(jsonNode, new TypeReference<>() {}); + } else { + content = + Collections.singletonList( + objectMapper.convertValue(jsonNode, new TypeReference<>() {})); + } + return new TestResourceFile(content); + } catch (IOException ex) { + throw new RuntimeException( + "Failed to load test input from file " + resource.getFilename(), ex); + } + } + + public static TestResourceFile loadJson(String path) { + ResourceLoader resourceLoader = new DefaultResourceLoader(); + return loadJson(resourceLoader.getResource(path)); + } + + public static class TestResourceFile { + private final List> content; + + public TestResourceFile(List> content) { + this.content = content; + } + + public List> asList() { + return content; + } + + public Map asMap() { + return content.get(0); + } + + @SuppressWarnings("unchecked") + public TestResourceFile withValue(String path, Object value) { + List parts = Splitter.on('.').splitToList(path); + + for (Map entry : content) { + for (int i = 0; i < parts.size(); i++) { + if (parts.get(i).matches("^.*\\[[0-9]*]$")) { + String key = parts.get(i).substring(0, parts.get(i).indexOf('[')); + int index = + Integer.parseInt( + parts + .get(i) + .substring(parts.get(i).indexOf('[') + 1, parts.get(i).indexOf(']'))); + List> list = (List>) entry.get(key); + if (i == parts.size() - 1) { + list.add(index, (Map) value); + break; + } + entry = list.get(index); + } else if (i == parts.size() - 1) { + entry.put(parts.get(i), value); + break; + } else if (!entry.containsKey(parts.get(i))) { + entry.put(parts.get(i), new HashMap<>()); + entry = (Map) entry.get(parts.get(i)); + } else { + entry = (Map) entry.get(parts.get(i)); + } + } + } + + return this; + } + } +} diff --git a/clouddriver-aws/src/integration/resources/clouddriver.yml b/clouddriver-aws/src/integration/resources/clouddriver.yml new file mode 100644 index 00000000000..ca4208b7392 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/clouddriver.yml @@ -0,0 +1,51 @@ +spring: + application: + name: clouddriver + +aws: + enabled: true + primaryAccount: aws-account1 + accounts: + - name: aws-account1 + requiredGroupMembership: [] + providerVersion: V1 + permissions: {} + accountId: '123456789012' + regions: + - name: us-west-1 + assumeRole: role/SpinnakerManaged + bakeryDefaults: + baseImages: [] + defaultKeyPairTemplate: '{{name}}-keypair' + defaultRegions: + - name: us-west-1 + defaults: + iamRole: BaseIAMRole + +redis: + enabled: false + cache: + enabled: false + scheduler: + enabled: false + taskRepository: + enabled: false + +services: + fiat: + baseUrl: http://fiat.net + front50: + baseUrl: http://front50.net + +--- + +spring: + profiles: launch-templates + +aws: + enabled: true + features: + launch-templates: + enabled: true + allowed-applications: "myAwsApp:aws-account1:us-west-1" + excluded-applications: "myExcludedApp:aws-account1:us-west-1" diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup-basic.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup-basic.json new file mode 100644 index 00000000000..2901608dda2 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup-basic.json @@ -0,0 +1,24 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "instanceType":"c3.large", + "legacyUdf": false +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/Region not configured.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/Region not configured.json new file mode 100644 index 00000000000..b5c4963f2b8 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/Region not configured.json @@ -0,0 +1,22 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-2": ["us-west-2a"] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "setLaunchTemplate": true, + "instanceType":"c3.small" +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/amiName.empty.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/amiName.empty.json new file mode 100644 index 00000000000..82768f6fac5 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/amiName.empty.json @@ -0,0 +1,23 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "instanceType": "m5.medium" +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/application.empty.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/application.empty.json new file mode 100644 index 00000000000..abfb9927edc --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/application.empty.json @@ -0,0 +1,23 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "instanceType":"c3.small", + "legacyUdf": false +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/availabilityZones.empty.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/availabilityZones.empty.json new file mode 100644 index 00000000000..884edf68087 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/availabilityZones.empty.json @@ -0,0 +1,19 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "instanceType":"t2.small", + "legacyUdf": false, + "subnetType": "internal" +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/burstingOff-bursting.not.supported.by.instanceType.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/burstingOff-bursting.not.supported.by.instanceType.json new file mode 100644 index 00000000000..435de416650 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/burstingOff-bursting.not.supported.by.instanceType.json @@ -0,0 +1,26 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "setLaunchTemplate": true, + "instanceType":"c3.small", + "unlimitedCpuCredits": false +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/burstingOn-bursting.not.supported.by.instanceType.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/burstingOn-bursting.not.supported.by.instanceType.json new file mode 100644 index 00000000000..99ef4982385 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/burstingOn-bursting.not.supported.by.instanceType.json @@ -0,0 +1,26 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "setLaunchTemplate": true, + "instanceType":"c3.small", + "unlimitedCpuCredits": true +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-amiName.empty.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-amiName.empty.json new file mode 100644 index 00000000000..82768f6fac5 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-amiName.empty.json @@ -0,0 +1,23 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "instanceType": "m5.medium" +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-application.empty.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-application.empty.json new file mode 100644 index 00000000000..abfb9927edc --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-application.empty.json @@ -0,0 +1,23 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "instanceType":"c3.small", + "legacyUdf": false +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-availabilityZones.empty.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-availabilityZones.empty.json new file mode 100644 index 00000000000..884edf68087 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-availabilityZones.empty.json @@ -0,0 +1,19 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "instanceType":"t2.small", + "legacyUdf": false, + "subnetType": "internal" +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-burstingOff-bursting.not.supported.by.instanceType.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-burstingOff-bursting.not.supported.by.instanceType.json new file mode 100644 index 00000000000..435de416650 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-burstingOff-bursting.not.supported.by.instanceType.json @@ -0,0 +1,26 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "setLaunchTemplate": true, + "instanceType":"c3.small", + "unlimitedCpuCredits": false +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-burstingOn-bursting.not.supported.by.instanceType.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-burstingOn-bursting.not.supported.by.instanceType.json new file mode 100644 index 00000000000..99ef4982385 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-burstingOn-bursting.not.supported.by.instanceType.json @@ -0,0 +1,26 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "setLaunchTemplate": true, + "instanceType":"c3.small", + "unlimitedCpuCredits": true +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-instanceDistribution-spotInstancePools.not.supported.for.spotAllocationStrategy.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-instanceDistribution-spotInstancePools.not.supported.for.spotAllocationStrategy.json new file mode 100644 index 00000000000..27a75618f3d --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-instanceDistribution-spotInstancePools.not.supported.for.spotAllocationStrategy.json @@ -0,0 +1,27 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "stack": "myStack", + "application": "myAwsApp", + "amiName": "ami-12345", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "instanceType": "m5.medium", + "setLaunchTemplate": true, + "spotAllocationStrategy":"capacity-optimized", + "spotInstancePools": 3 +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-instanceType.empty.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-instanceType.empty.json new file mode 100644 index 00000000000..f95734195b8 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-instanceType.empty.json @@ -0,0 +1,23 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-multiTypes-bursting.not.supported.by.instanceType.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-multiTypes-bursting.not.supported.by.instanceType.json new file mode 100644 index 00000000000..158fae85201 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/createServerGroup-multiTypes-bursting.not.supported.by.instanceType.json @@ -0,0 +1,45 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "stack": "myStack", + "application": "myAwsApp", + "amiName": "ami-12345", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false, + "instanceType": "t2.large", + "setLaunchTemplate": true, + "unlimitedCpuCredits": true, + "onDemandBaseCapacity":1, + "onDemandPercentageAboveBaseCapacity":50, + "onDemandAllocationStrategy": "prioritized", + "spotAllocationStrategy":"capacity-optimized", + "spotPrice": "0.5", + "launchTemplateOverridesForInstanceType":[ + { + "instanceType":"t3.large", + "weightedCapacity":"1" + }, + { + "instanceType":"c3.large", + "weightedCapacity":"1" + }, + { + "instanceType":"c3.xlarge", + "weightedCapacity":"2" + } + ] +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/instanceType.empty.json b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/instanceType.empty.json new file mode 100644 index 00000000000..f95734195b8 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/createServerGroup_invalid_requests/instanceType.empty.json @@ -0,0 +1,23 @@ +{ + "type":"createServerGroup", + "account": "aws_account", + "amiName": "ami-12345", + "stack": "myStack", + "application": "myAwsApp", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "credentials": "aws-account1", + "healthCheckType":"EC2", + "iamRole":"BaseInstanceProfile", + "legacyUdf": false +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/modifyServerGroupLaunchTemplate-basic.json b/clouddriver-aws/src/integration/resources/testinputs/modifyServerGroupLaunchTemplate-basic.json new file mode 100644 index 00000000000..02cf511bb20 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/modifyServerGroupLaunchTemplate-basic.json @@ -0,0 +1,9 @@ +{ + "type":"updateLaunchTemplate", + "cloudProvider": "aws", + "account": "aws_account", + "application": "myAwsApp", + "credentials": "aws-account1", + "region": "us-west-1", + "asgName": "myasg" +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/No changes requested to launch template or related server group fields for modifyServerGroupLaunchTemplate operation..json b/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/No changes requested to launch template or related server group fields for modifyServerGroupLaunchTemplate operation..json new file mode 100644 index 00000000000..442ba3f3f95 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/No changes requested to launch template or related server group fields for modifyServerGroupLaunchTemplate operation..json @@ -0,0 +1,11 @@ +{ + "type":"modifyServerGroupLaunchTemplate", + "cloudProvider": "aws", + "account": "aws_account", + "application": "myAwsApp", + "credentials": "aws-account1", + "region": "us-west-1", + "asgName": "asg-test-v000", + "securityGroupsAppendOnly": true, + "copySourceCustomBlockDeviceMappings": true +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/bursting.not.supported.by.instanceType.json b/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/bursting.not.supported.by.instanceType.json new file mode 100644 index 00000000000..4b4ecabca50 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/bursting.not.supported.by.instanceType.json @@ -0,0 +1,14 @@ +{ + "type":"modifyServerGroupLaunchTemplate", + "cloudProvider": "aws", + "account": "aws_account", + "amiName": "ami-12345", + "application": "myAwsApp", + "credentials": "aws-account1", + "iamRole":"BaseInstanceProfile", + "region": "us-west-1", + "legacyUdf": false, + "asgName": "asg-test-v000", + "instanceType":"c3.large", + "unlimitedCpuCredits": false +} diff --git a/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/spotInstancePools.not.supported.for.spotAllocationStrategy.json b/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/spotInstancePools.not.supported.for.spotAllocationStrategy.json new file mode 100644 index 00000000000..9e1bfb42393 --- /dev/null +++ b/clouddriver-aws/src/integration/resources/testinputs/modifySgLaunchTemplate_invalid_requests/spotInstancePools.not.supported.for.spotAllocationStrategy.json @@ -0,0 +1,15 @@ +{ + "type":"modifyServerGroupLaunchTemplate", + "cloudProvider": "aws", + "account": "aws_account", + "amiName": "ami-12345", + "application": "myAwsApp", + "credentials": "aws-account1", + "iamRole":"BaseInstanceProfile", + "region": "us-west-1", + "legacyUdf": false, + "asgName": "asg-test-v000", + "instanceType":"c3.large", + "spotAllocationStrategy": "capacity-optimized", + "spotInstancePools": 3 +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/AwsConfigurationProperties.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/AwsConfigurationProperties.groovy index 9504f680b3a..88bff09f858 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/AwsConfigurationProperties.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/AwsConfigurationProperties.groovy @@ -39,6 +39,7 @@ class AwsConfigurationProperties { static class AlarmsConfig { boolean enabled = false int daysToKeep = 90 + String alarmsNamePattern = ".+-v[0-9]{3}-alarm-.+" } @NestedConfigurationProperty @@ -46,8 +47,19 @@ class AwsConfigurationProperties { } @Canonical - static class MigrationConfig { - List infrastructureApplications = [] + static class CloudFormationConfig { + boolean changeSetsIncludeNestedStacks = false + } + + /** + * health check related config settings + */ + @Canonical + static class HealthConfig { + /** + * flag to toggle verifying account health check. by default, account health check is enabled. + */ + boolean verifyAccountHealth = true } @NestedConfigurationProperty @@ -55,5 +67,7 @@ class AwsConfigurationProperties { @NestedConfigurationProperty final CleanupConfig cleanup = new CleanupConfig() @NestedConfigurationProperty - final MigrationConfig migration = new MigrationConfig() + final CloudFormationConfig cloudformation = new CloudFormationConfig() + @NestedConfigurationProperty + final HealthConfig health = new HealthConfig() } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupAlarmsAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupAlarmsAgent.groovy index ea6c12b5edc..947f37ef868 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupAlarmsAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupAlarmsAgent.groovy @@ -28,8 +28,7 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils +import com.netflix.spinnaker.credentials.CredentialsRepository import groovy.util.logging.Slf4j import org.joda.time.DateTime @@ -41,31 +40,35 @@ class CleanupAlarmsAgent implements RunnableAgent, CustomScheduledAgent { public static final long POLL_INTERVAL_MILLIS = TimeUnit.HOURS.toMillis(24) public static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(20) - public static final Pattern ALARM_NAME_PATTERN = Pattern.compile(".+-v[0-9]{3}-alarm-.+") + public final Pattern ALARM_NAME_PATTERN = Pattern.compile(alarmsNamePattern) final AmazonClientProvider amazonClientProvider - final AccountCredentialsRepository accountCredentialsRepository + final CredentialsRepository credentialsRepository final long pollIntervalMillis final long timeoutMillis final int daysToLeave + final String alarmsNamePattern; CleanupAlarmsAgent(AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository, - int daysToLeave) { - this(amazonClientProvider, accountCredentialsRepository, POLL_INTERVAL_MILLIS, DEFAULT_TIMEOUT_MILLIS, daysToLeave) + CredentialsRepository credentialsRepository, + int daysToLeave, + String alarmsNamePattern) { + this(amazonClientProvider, credentialsRepository, POLL_INTERVAL_MILLIS, DEFAULT_TIMEOUT_MILLIS, daysToLeave, alarmsNamePattern) } CleanupAlarmsAgent(AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository, + CredentialsRepository credentialsRepository, long pollIntervalMillis, long timeoutMills, - int daysToLeave) { + int daysToLeave, + String alarmsNamePattern) { this.amazonClientProvider = amazonClientProvider - this.accountCredentialsRepository = accountCredentialsRepository + this.credentialsRepository = credentialsRepository this.pollIntervalMillis = pollIntervalMillis this.timeoutMillis = timeoutMills this.daysToLeave = daysToLeave + this.alarmsNamePattern = alarmsNamePattern } @Override @@ -83,43 +86,45 @@ class CleanupAlarmsAgent implements RunnableAgent, CustomScheduledAgent { getAccounts().each { NetflixAmazonCredentials credentials -> credentials.regions.each { AmazonCredentials.AWSRegion region -> log.info("Looking for alarms to delete") - - def cloudWatch = amazonClientProvider.getCloudWatch(credentials, region.name) - Set attachedAlarms = getAttachedAlarms(amazonClientProvider.getAutoScaling(credentials, region.name)) - def describeAlarmsRequest = new DescribeAlarmsRequest().withStateValue(StateValue.INSUFFICIENT_DATA) - - while (true) { - def result = cloudWatch.describeAlarms(describeAlarmsRequest) - - List alarmsToDelete = result.metricAlarms.findAll { - it.stateUpdatedTimestamp.before(DateTime.now().minusDays(daysToLeave).toDate()) && - !attachedAlarms.contains(it.alarmName) && - ALARM_NAME_PATTERN.matcher(it.alarmName).matches() - } - - if (alarmsToDelete) { - // terminate up to 20 alarms at a time (avoids any AWS limits on # of concurrent deletes) - alarmsToDelete.collate(20).each { - log.info("Deleting ${it.size()} alarms in ${credentials.name}/${region.name} " + - "(alarms: ${it.alarmName.join(", ")})") - cloudWatch.deleteAlarms(new DeleteAlarmsRequest().withAlarmNames(it.alarmName)) - Thread.sleep(500) + try { + def cloudWatch = amazonClientProvider.getCloudWatch(credentials, region.name) + Set attachedAlarms = getAttachedAlarms(amazonClientProvider.getAutoScaling(credentials, region.name)) + def describeAlarmsRequest = new DescribeAlarmsRequest().withStateValue(StateValue.INSUFFICIENT_DATA) + + while (true) { + def result = cloudWatch.describeAlarms(describeAlarmsRequest) + + List alarmsToDelete = result.metricAlarms.findAll { + it.stateUpdatedTimestamp.before(DateTime.now().minusDays(daysToLeave).toDate()) && + !attachedAlarms.contains(it.alarmName) && + ALARM_NAME_PATTERN.matcher(it.alarmName).matches() } - } + if (alarmsToDelete) { + // terminate up to 20 alarms at a time (avoids any AWS limits on # of concurrent deletes) + alarmsToDelete.collate(20).each { + log.info("Deleting ${it.size()} alarms in ${credentials.name}/${region.name} " + + "(alarms: ${it.alarmName.join(", ")})") + cloudWatch.deleteAlarms(new DeleteAlarmsRequest().withAlarmNames(it.alarmName)) + Thread.sleep(500) + } + } - if (result.nextToken) { - describeAlarmsRequest.withNextToken(result.nextToken) - } else { - break + if (result.nextToken) { + describeAlarmsRequest.withNextToken(result.nextToken) + } else { + break + } } + } catch (Exception e) { + log.error("Error occurred while processing alarms for ${credentials.name}/${region.name}: ${e.message}", e) } } } } private Set getAccounts() { - ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, NetflixAmazonCredentials) + return credentialsRepository.getAll() } private static Set getAttachedAlarms(AmazonAutoScaling autoScaling) { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupDetachedInstancesAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupDetachedInstancesAgent.groovy index 533d7546337..f19623d7b05 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupDetachedInstancesAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupDetachedInstancesAgent.groovy @@ -21,14 +21,15 @@ import com.amazonaws.services.ec2.model.Filter import com.amazonaws.services.ec2.model.Instance import com.amazonaws.services.ec2.model.TerminateInstancesRequest import com.netflix.spinnaker.cats.agent.RunnableAgent +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DetachInstancesAtomicOperation import com.netflix.spinnaker.clouddriver.aws.provider.AwsCleanupProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository import com.netflix.spinnaker.clouddriver.security.ProviderUtils +import com.netflix.spinnaker.credentials.CredentialsRepository import groovy.util.logging.Slf4j import java.util.concurrent.TimeUnit @@ -39,17 +40,17 @@ class CleanupDetachedInstancesAgent implements RunnableAgent, CustomScheduledAge public static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(20) final AmazonClientProvider amazonClientProvider - final AccountCredentialsRepository accountCredentialsRepository + final CredentialsRepository accountCredentialsRepository final long pollIntervalMillis final long timeoutMillis CleanupDetachedInstancesAgent(AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository) { + CredentialsRepository accountCredentialsRepository) { this(amazonClientProvider, accountCredentialsRepository, DEFAULT_POLL_INTERVAL_MILLIS, DEFAULT_TIMEOUT_MILLIS) } CleanupDetachedInstancesAgent(AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository, + CredentialsRepository accountCredentialsRepository, long pollIntervalMillis, long timeoutMills) { this.amazonClientProvider = amazonClientProvider @@ -73,7 +74,7 @@ class CleanupDetachedInstancesAgent implements RunnableAgent, CustomScheduledAge getAccounts().each { NetflixAmazonCredentials credentials -> credentials.regions.each { AmazonCredentials.AWSRegion region -> log.info("Looking for instances pending termination in ${credentials.name}:${region.name}") - + try { def amazonEC2 = amazonClientProvider.getAmazonEC2(credentials, region.name, true) def describeInstancesRequest = new DescribeInstancesRequest().withFilters( new Filter("tag-key", [DetachInstancesAtomicOperation.TAG_PENDING_TERMINATION]) @@ -102,12 +103,15 @@ class CleanupDetachedInstancesAgent implements RunnableAgent, CustomScheduledAge break } } + } catch (Exception e) { + log.error("Error occurred while processing instances pending termination for ${credentials.name}/${region.name}: ${e.message}", e) + } } } } private Set getAccounts() { - ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, NetflixAmazonCredentials) + return accountCredentialsRepository.getAll() } /** diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgent.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgent.java deleted file mode 100644 index 03b8a6291f5..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgent.java +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.agent; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.AttachClassicLinkVpcRequest; -import com.amazonaws.services.ec2.model.ClassicLinkInstance; -import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesResult; -import com.amazonaws.services.ec2.model.DescribeInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeInstancesResult; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.Filter; -import com.amazonaws.services.ec2.model.GroupIdentifier; -import com.amazonaws.services.ec2.model.Instance; -import com.amazonaws.services.ec2.model.SecurityGroup; -import com.amazonaws.services.ec2.model.Tag; -import com.amazonaws.services.ec2.model.VpcClassicLink; -import com.google.common.base.Strings; -import com.netflix.frigga.Names; -import com.netflix.spinnaker.cats.agent.AccountAware; -import com.netflix.spinnaker.cats.agent.RunnableAgent; -import com.netflix.spinnaker.config.AwsConfiguration; -import com.netflix.spinnaker.clouddriver.aws.provider.AwsCleanupProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.time.Clock; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -public class ReconcileClassicLinkSecurityGroupsAgent implements RunnableAgent, CustomScheduledAgent, AccountAware { - - static final String AUTOSCALING_TAG = "aws:autoscaling:groupName"; - static final int RUNNING_STATE = 16; - - private final Logger log = LoggerFactory.getLogger(getClass()); - public static final long DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.SECONDS.toMillis(30); - public static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(5); - public static final long DEFAULT_REQUIRED_INSTANCE_LIFETIME = TimeUnit.MINUTES.toMillis(5); - - private final AmazonClientProvider amazonClientProvider; - private final NetflixAmazonCredentials account; - private final String region; - private final AwsConfiguration.DeployDefaults deployDefaults; - private final long pollIntervalMillis; - private final long timeoutMillis; - private final long requiredInstanceLifetime; - private final Clock clock; - - - @Override - public String getAccountName() { - return account.getName(); - } - - public ReconcileClassicLinkSecurityGroupsAgent(AmazonClientProvider amazonClientProvider, - NetflixAmazonCredentials account, - String region, - AwsConfiguration.DeployDefaults deployDefaults) { - this(amazonClientProvider, account, region, deployDefaults, DEFAULT_POLL_INTERVAL_MILLIS, DEFAULT_TIMEOUT_MILLIS, DEFAULT_REQUIRED_INSTANCE_LIFETIME, Clock.systemUTC()); - } - - public ReconcileClassicLinkSecurityGroupsAgent(AmazonClientProvider amazonClientProvider, - NetflixAmazonCredentials account, - String region, - AwsConfiguration.DeployDefaults deployDefaults, - long pollIntervalMillis, - long timeoutMillis, - long requiredInstanceLifetime, - Clock clock) { - this.amazonClientProvider = amazonClientProvider; - this.account = account; - this.region = region; - this.deployDefaults = deployDefaults; - this.pollIntervalMillis = pollIntervalMillis; - this.timeoutMillis = timeoutMillis; - this.requiredInstanceLifetime = requiredInstanceLifetime; - this.clock = clock; - } - - - @Override - public void run() { - if (!deployDefaults.isReconcileClassicLinkAccount(account)) { - return; - } - log.info("Checking classic link security groups in {}/{}", account.getName(), region); - AmazonEC2 ec2 = amazonClientProvider.getAmazonEC2(account, region, true); - List classicLinkVpcIds = ec2.describeVpcClassicLink().getVpcs().stream().filter(VpcClassicLink::getClassicLinkEnabled).map(VpcClassicLink::getVpcId).collect(Collectors.toList()); - if (classicLinkVpcIds.size() > 1) { - log.warn("Multiple classicLinkVpcs found: {}", classicLinkVpcIds); - throw new IllegalStateException("More than 1 classicLinkVpc found: " + classicLinkVpcIds); - } - - if (classicLinkVpcIds.isEmpty()) { - return; - } - String classicLinkVpcId = classicLinkVpcIds.get(0); - - final Map classicLinkInstances = new HashMap<>(); - DescribeInstancesRequest describeInstances = new DescribeInstancesRequest().withMaxResults(500); - while (true) { - DescribeInstancesResult instanceResult = ec2.describeInstances(describeInstances); - instanceResult.getReservations().stream() - .flatMap(r -> r.getInstances().stream()) - .filter(i -> i.getVpcId() == null) - .filter(i -> Optional.ofNullable(i.getState()).filter(is -> is.getCode() == RUNNING_STATE).isPresent()) - .filter(this::isInstanceOldEnough) - .map(i -> new ClassicLinkInstance().withInstanceId(i.getInstanceId()).withVpcId(classicLinkVpcId).withTags(i.getTags())) - .forEach(cli -> classicLinkInstances.put(cli.getInstanceId(), cli)); - - if (instanceResult.getNextToken() == null) { - break; - } - describeInstances.setNextToken(instanceResult.getNextToken()); - } - - DescribeClassicLinkInstancesRequest request = new DescribeClassicLinkInstancesRequest().withMaxResults(1000); - while (true) { - DescribeClassicLinkInstancesResult result = ec2.describeClassicLinkInstances(request); - result.getInstances().forEach(i -> classicLinkInstances.put(i.getInstanceId(), i)); - if (result.getNextToken() == null) { - break; - } - request.setNextToken(result.getNextToken()); - } - - log.info("{} existing classic instances in {}/{}", classicLinkInstances.size(), account.getName(), region); - - Map groupNamesToIds = ec2.describeSecurityGroups( - new DescribeSecurityGroupsRequest() - .withFilters( - new Filter("vpc-id").withValues(classicLinkVpcId))) - .getSecurityGroups() - .stream() - .collect(Collectors.toMap( - SecurityGroup::getGroupName, - SecurityGroup::getGroupId)); - - reconcileInstances(ec2, groupNamesToIds, classicLinkInstances.values()); - } - - boolean isInstanceOldEnough(Instance instance) { - return Optional.ofNullable(instance.getLaunchTime()) - .map(Date::getTime) - .map(Instant::ofEpochMilli) - .map(i -> i.plusMillis(requiredInstanceLifetime)) - .map(i -> clock.instant().isAfter(i)) - .orElse(false); - } - - void reconcileInstances(AmazonEC2 ec2, Map groupNamesToIds, Collection instances) { - StringBuilder report = new StringBuilder(); - for (ClassicLinkInstance i : instances) { - List existingClassicLinkGroups = i.getGroups().stream() - .map(GroupIdentifier::getGroupId) - .collect(Collectors.toList()); - - int maxNewGroups = deployDefaults.getMaxClassicLinkSecurityGroups() - existingClassicLinkGroups.size(); - if (maxNewGroups > 0) { - String asgName = i.getTags() - .stream() - .filter(t -> AUTOSCALING_TAG.equals(t.getKey())) - .map(Tag::getValue) - .findFirst() - .orElse(null); - - List candidateGroupNames = getSecurityGroupNames(asgName); - - List missingGroupIds = candidateGroupNames - .stream() - .map(groupNamesToIds::get) - .filter(name -> name != null && !existingClassicLinkGroups.contains(name)) - .limit(maxNewGroups) - .collect(Collectors.toList()); - - if (!missingGroupIds.isEmpty()) { - List groupIds = new ArrayList<>(existingClassicLinkGroups); - groupIds.addAll(missingGroupIds); - if (deployDefaults.getReconcileClassicLinkSecurityGroups() == AwsConfiguration.DeployDefaults.ReconcileMode.MODIFY) { - try { - ec2.attachClassicLinkVpc(new AttachClassicLinkVpcRequest() - .withVpcId(i.getVpcId()) - .withGroups(groupIds) - .withInstanceId(i.getInstanceId())); - } catch (AmazonServiceException ase) { - log.warn("Failed calling attachClassicLinkVpc", ase); - } - } - report.append("\n\t").append(Strings.padStart(i.getInstanceId(), 24, ' ')).append(missingGroupIds); - } - } - } - if (report.length() > 0) { - log.info("Attach to classicLinkVpc: account: " + account.getName() + ", region: " + region + report); - } - } - - private List getSecurityGroupNames(String asgName) { - Set groups = new LinkedHashSet<>(); - Optional.ofNullable(deployDefaults.getClassicLinkSecurityGroupName()).ifPresent(groups::add); - if (deployDefaults.isAddAppGroupsToClassicLink()) { - Optional.ofNullable(asgName).map(Names::parseName).ifPresent(names -> - Optional.ofNullable(names.getApp()).ifPresent(appGroup -> { - groups.add(appGroup); - Optional stackGroup = Optional.ofNullable(names.getStack()).map(stack -> appGroup + "-" + stack); - stackGroup.ifPresent(groups::add); - Optional detailGroup = Optional.ofNullable(names.getDetail()).map(detail -> stackGroup.orElse(appGroup + "-") + "-" + detail); - detailGroup.ifPresent(groups::add); - })); - } - return groups.stream().collect(Collectors.toList()); - } - - @Override - public long getPollIntervalMillis() { - return pollIntervalMillis; - } - - @Override - public long getTimeoutMillis() { - return timeoutMillis; - } - - @Override - public String getAgentType() { - return account.getName() + "/" + region + "/" + getClass().getSimpleName(); - } - - @Override - public String getProviderName() { - return AwsCleanupProvider.PROVIDER_NAME; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionConfig.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionConfig.groovy deleted file mode 100644 index 459ccce3d99..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionConfig.groovy +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.bastion - -import com.amazonaws.auth.AWSCredentialsProvider -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.ConfigurationProperties -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration - -@Configuration -@ConditionalOnProperty('bastion.enabled') -@EnableConfigurationProperties(BastionProperties) -class BastionConfig { - @Bean - AWSCredentialsProvider bastionCredentialsProvider(BastionProperties bastionConfiguration) { - def provider = new BastionCredentialsProvider(bastionConfiguration.user, bastionConfiguration.host, bastionConfiguration.port, bastionConfiguration.proxyCluster, - bastionConfiguration.proxyRegion, bastionConfiguration.accountIamRole) - - provider.refresh() - - provider - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionCredentialsProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionCredentialsProvider.groovy deleted file mode 100644 index aa271380184..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionCredentialsProvider.groovy +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.bastion - -import com.aestasit.infrastructure.ssh.SshOptions -import com.aestasit.infrastructure.ssh.dsl.CommandOutput -import com.aestasit.infrastructure.ssh.dsl.SshDslEngine -import com.amazonaws.auth.AWSCredentials -import com.amazonaws.auth.AWSCredentialsProvider -import com.amazonaws.auth.BasicSessionCredentials -import com.jcraft.jsch.IdentityRepository -import com.jcraft.jsch.agentproxy.ConnectorFactory -import com.jcraft.jsch.agentproxy.RemoteIdentityRepository -import groovy.json.JsonSlurper -import groovy.util.logging.Slf4j - -import java.text.SimpleDateFormat - -@Slf4j -class BastionCredentialsProvider implements AWSCredentialsProvider { - private static final JsonSlurper slurper = new JsonSlurper() - - private final String user - private final String host - private final Integer port - private final String proxyCluster - private final String proxyRegion - private final String iamRole - - private Date expiration - private AWSCredentials credentials - private final IdentityRepository identityRepository - - BastionCredentialsProvider(String user, String host, Integer port, String proxyCluster, String proxyRegion, String iamRole) { - this.user = user ?: System.properties["user.name"] - this.host = host - this.port = port - this.proxyCluster = proxyCluster - this.proxyRegion = proxyRegion - this.iamRole = iamRole - this.identityRepository = new RemoteIdentityRepository(ConnectorFactory.default.createConnector()) - } - - @Override - AWSCredentials getCredentials() { - if (!expiration || expiration.before(new Date())) { - this.credentials = getRemoteCredentials() - } - this.credentials - } - - @Override - void refresh() { - this.credentials = getRemoteCredentials() - } - - private AWSCredentials getRemoteCredentials() { - SimpleDateFormat format = new SimpleDateFormat( - "yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.US); - def engine = new SshDslEngine(new SshOptions(defaultPassword: '', trustUnknownHosts: true, jschProperties: [(SshDslEngine.SSH_PREFERRED_AUTHENTICATIONS): 'publickey'])) - engine.jsch.setIdentityRepository(identityRepository) - def command = "oq-ssh -r ${proxyRegion} ${proxyCluster},0 'curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/${iamRole}'".toString() - CommandOutput output - engine.remoteSession("${user}@${host}:${port}") { - output = exec command: command - } - def jsonText = output.output.substring(output.output.indexOf('{')) - def json = slurper.parseText(jsonText) as Map - expiration = format.parse(json.Expiration as String) - new BasicSessionCredentials(json.AccessKeyId as String, json.SecretAccessKey as String, json.Token as String) - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionProperties.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionProperties.groovy deleted file mode 100644 index 83d39c8b10f..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/bastion/BastionProperties.groovy +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.bastion - -import org.springframework.boot.context.properties.ConfigurationProperties - -@ConfigurationProperties("bastion") -class BastionProperties { - Boolean enabled - String host - String user - Integer port - String proxyCluster - String proxyRegion - String accountIamRole -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/cache/Keys.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/cache/Keys.groovy index 8623da9cbc2..f228b02e52f 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/cache/Keys.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/cache/Keys.groovy @@ -35,6 +35,7 @@ class Keys implements KeyParser { KEY_PAIRS, INSTANCE_TYPES, ELASTIC_IPS, + STACKS, ON_DEMAND final String ns @@ -87,8 +88,13 @@ class Keys implements KeyParser { switch (result.type) { case Namespace.SECURITY_GROUPS.ns: - def names = Names.parseName(parts[2]) - result << [application: names.app, name: parts[2], id: parts[3], region: parts[4], account: parts[5], vpcId: parts[6] == "null" ? null : parts[6]] + // AWS Security Group names can contain `:` which breaks simple splitting + // However, security group IDs must always start with `sg-` so we can use + // that to find the end of the group name as the ID is always after the name + def idIndex = parts.findIndexOf {it.startsWith('sg-') } + def name = parts[2.. credentialsRepository @Autowired AmazonClientProvider amazonClientProvider @@ -40,8 +40,8 @@ class AmazonClusterController { @RequestMapping(value = "/scalingActivities", method = RequestMethod.GET) ResponseEntity getScalingActivities(@PathVariable String account, @PathVariable String serverGroupName, @RequestParam(value = "region", required = true) String region) { - def credentials = accountCredentialsProvider.getCredentials(account) - if (!(credentials instanceof NetflixAmazonCredentials)) { + def credentials = credentialsRepository.getOne(account) + if (credentials == null) { return new ResponseEntity([message: "bad credentials"], HttpStatus.BAD_REQUEST) } def autoScaling = amazonClientProvider.getAutoScaling(credentials, region) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonNamedImageLookupController.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonNamedImageLookupController.groovy index a39d0934ee8..2b645f9f28e 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonNamedImageLookupController.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonNamedImageLookupController.groovy @@ -84,15 +84,16 @@ class AmazonNamedImageLookupController { Collection namedImageIdentifiers = !isAmi ? cacheView.filterIdentifiers(NAMED_IMAGES.ns, namedImageSearch) : [] Collection imageIdentifiers = namedImageIdentifiers.isEmpty() ? cacheView.filterIdentifiers(IMAGES.ns, imageSearch) : [] - namedImageIdentifiers = (namedImageIdentifiers as List).subList(0, Math.min(MAX_SEARCH_RESULTS, namedImageIdentifiers.size())) Collection matchesByName = cacheView.getAll(NAMED_IMAGES.ns, namedImageIdentifiers, RelationshipCacheFilter.include(IMAGES.ns)) Collection matchesByImageId = cacheView.getAll(IMAGES.ns, imageIdentifiers) - return filter( + List allFilteredImages = filter( render(matchesByName, matchesByImageId, lookupOptions.q, lookupOptions.region), extractTagFilters(request) ) + + return allFilteredImages.subList(0, Math.min(MAX_SEARCH_RESULTS, allFilteredImages.size())) } private List render(Collection namedImages, Collection images, String requestedName = null, String requiredRegion = null) { @@ -125,6 +126,7 @@ class AmazonNamedImageLookupController { Map namedImageKeyParts = Keys.parse(data.relationships[NAMED_IMAGES.ns][0]) NamedImage thisImage = byImageName[namedImageKeyParts.imageName] thisImage.attributes.virtualizationType = data.attributes.virtualizationType + thisImage.attributes.architecture = data.attributes.architecture thisImage.attributes.creationDate = data.attributes.creationDate thisImage.accounts.add(namedImageKeyParts.account) thisImage.amis[amiKeyParts.region].add(amiKeyParts.imageId) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/data/Keys.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/data/Keys.groovy index 4233dc0bf18..0df56a4d5f9 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/data/Keys.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/data/Keys.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.data +import com.amazonaws.services.elasticloadbalancingv2.model.TargetTypeEnum import com.google.common.collect.ImmutableMap import com.google.common.collect.ImmutableSet import com.netflix.frigga.Names @@ -40,6 +41,7 @@ class Keys implements KeyParser { .put(Namespace.TARGET_GROUPS.ns, "targetGroup") .put(Namespace.CLUSTERS.ns, "cluster") .put(Namespace.APPLICATIONS.ns, "application") + .put(Namespace.STACKS.ns, "stacks") .build() private static final Set PARSEABLE_FIELDS = @@ -125,6 +127,20 @@ class Keys implements KeyParser { break case Namespace.HEALTH.ns: result << [instanceId: parts[2], account: parts[3], region: parts[4], provider: parts[5]] + break + case Namespace.STACKS.ns: + result << [stackId: parts[2], account: parts[3], region: parts[4]] + break + case Namespace.LAUNCH_TEMPLATES.ns: + def names = Names.parseName(parts[4]) + result << [ + account: parts[2], + region: parts[ 3], + launchTemplateName: parts[4], + application: names.app?.toLowerCase(), + stack: names.stack + ] + break default: return null @@ -171,7 +187,12 @@ class Keys implements KeyParser { } static String getTargetGroupKey(String targetGroupName, String account, String region, String targetGroupType, String vpcId) { - "${ID}:${Namespace.TARGET_GROUPS}:${account}:${region}:${targetGroupName}:${targetGroupType}:${vpcId}" + //Lambda targetGroup don't have the vpcId + if (TargetTypeEnum.Lambda.toString().equalsIgnoreCase(targetGroupType)) { + "${ID}:${Namespace.TARGET_GROUPS}:${account}:${region}:${targetGroupName}:${targetGroupType}" + } else { + "${ID}:${Namespace.TARGET_GROUPS}:${account}:${region}:${targetGroupName}:${targetGroupType}:${vpcId}" + } } static String getClusterKey(String clusterName, String application, String account) { @@ -189,4 +210,13 @@ class Keys implements KeyParser { static String getReservedInstancesKey(String reservedInstancesId, String account, String region) { "${ID}:${Namespace.RESERVED_INSTANCES}:${account}:${region}:${reservedInstancesId}" } + + static String getCloudFormationKey(String stackId, String accountName, String region) { + "${ID}:${Namespace.STACKS}:${accountName}:${region}:${stackId}" + } + + static String getLaunchTemplateKey( + String launchTemplateName, String account, String region) { + "${ID}:${Namespace.LAUNCH_TEMPLATES}:${account}:${region}:${launchTemplateName}" + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AmiIdResolver.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AmiIdResolver.groovy index 37d33622892..0e43095a872 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AmiIdResolver.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AmiIdResolver.groovy @@ -41,7 +41,15 @@ class AmiIdResolver { } Image resolvedImage = amazonEC2.describeImages(req)?.images?.getAt(0) if (resolvedImage) { - return new ResolvedAmiResult(nameOrId, region, resolvedImage.imageId, resolvedImage.virtualizationType, resolvedImage.ownerId, resolvedImage.blockDeviceMappings, resolvedImage.public) + return new ResolvedAmiResult( + nameOrId, + region, + resolvedImage.imageId, + resolvedImage.virtualizationType, + resolvedImage.ownerId, + resolvedImage.blockDeviceMappings, + resolvedImage.public, + resolvedImage.architecture) } return null diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AutoScalingWorker.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AutoScalingWorker.groovy deleted file mode 100644 index beca9de9b50..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AutoScalingWorker.groovy +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy - -import com.amazonaws.services.autoscaling.AmazonAutoScaling -import com.amazonaws.services.autoscaling.model.AlreadyExistsException -import com.amazonaws.services.autoscaling.model.AutoScalingGroup -import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest -import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest -import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult -import com.amazonaws.services.autoscaling.model.EnableMetricsCollectionRequest -import com.amazonaws.services.autoscaling.model.SuspendProcessesRequest -import com.amazonaws.services.autoscaling.model.Tag -import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupRequest -import com.amazonaws.services.ec2.model.DescribeSubnetsResult -import com.amazonaws.services.ec2.model.Subnet -import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice -import com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType -import com.netflix.spinnaker.clouddriver.aws.model.SubnetData -import com.netflix.spinnaker.clouddriver.aws.model.SubnetTarget -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.kork.core.RetrySupport -import groovy.util.logging.Slf4j - -import java.time.Instant -import java.time.temporal.ChronoUnit -import java.util.function.Supplier - -/** - * A worker class dedicated to the deployment of "applications", following many of Netflix's common AWS conventions. - * - * - */ -@Slf4j -class AutoScalingWorker { - private static final String AWS_PHASE = "AWS_DEPLOY" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - private final RetrySupport retrySupport = new RetrySupport() - - private String application - private String region - private NetflixAmazonCredentials credentials - private String stack - private String freeFormDetails - private String ami - private String classicLinkVpcId - private List classicLinkVpcSecurityGroups - private String instanceType - private String iamRole - private String keyPair - private String base64UserData - private Boolean legacyUdf - private Integer sequence - private Boolean ignoreSequence - private Boolean startDisabled - private Boolean associatePublicIpAddress - private String subnetType - private List subnetIds - private Integer cooldown - private Collection enabledMetrics - private Integer healthCheckGracePeriod - private String healthCheckType - private String spotPrice - private Set suspendedProcesses - private Collection terminationPolicies - private String kernelId - private String ramdiskId - private Boolean instanceMonitoring - private Boolean ebsOptimized - private Collection classicLoadBalancers - private Collection targetGroupArns - private List securityGroups - private List availabilityZones - private List blockDevices - private Map tags - - private int minInstances - private int maxInstances - private int desiredInstances - - private RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider - - AutoScalingWorker() { - } - - /** - * Initiates the activity of deployment. This will involve: - *
    - *
  1. Lookup or create if not found, a security group with a name that matches the supplied "application";
  2. - *
  3. Looking up security group ids for the names provided as "securityGroups";
  4. - *
  5. Look up an ancestor ASG based on Netflix naming conventions, and bring its security groups to the new ASG;
  6. - *
  7. Retrieve user data from all available {@link com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProvider}s;
  8. - *
  9. Create the ASG's Launch Configuration with User Data and Security Groups;
  10. - *
  11. Create a new ASG in the subnets found from the optionally supplied subnetType.
  12. - *
- * - * @return the name of the newly deployed ASG - */ - String deploy() { - task.updateStatus AWS_PHASE, "Beginning Amazon deployment." - - if (startDisabled) { - suspendedProcesses.addAll(AutoScalingProcessType.getDisableProcesses()*.name()) - } - - task.updateStatus AWS_PHASE, "Beginning ASG deployment." - - AWSServerGroupNameResolver awsServerGroupNameResolver = regionScopedProvider.AWSServerGroupNameResolver - String asgName - if (sequence != null) { - asgName = awsServerGroupNameResolver.generateServerGroupName(application, stack, freeFormDetails, sequence, false) - } else { - asgName = awsServerGroupNameResolver.resolveNextServerGroupName(application, stack, freeFormDetails, ignoreSequence) - } - - def settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: credentials.name, - environment: credentials.environment, - accountType: credentials.accountType, - region: region, - baseName: asgName, - suffix: null, - ami: ami, - iamRole: iamRole, - classicLinkVpcId: classicLinkVpcId, - classicLinkVpcSecurityGroups: classicLinkVpcSecurityGroups, - instanceType: instanceType, - keyPair: keyPair, - base64UserData: base64UserData?.trim(), - associatePublicIpAddress: associatePublicIpAddress, - kernelId: kernelId, - ramdiskId: ramdiskId, - ebsOptimized: ebsOptimized, - spotPrice: spotPrice, - instanceMonitoring: instanceMonitoring, - blockDevices: blockDevices, - securityGroups: securityGroups) - - String launchConfigName = regionScopedProvider.getLaunchConfigurationBuilder().buildLaunchConfiguration(application, subnetType, settings, legacyUdf) - - task.updateStatus AWS_PHASE, "Deploying ASG: $asgName" - - createAutoScalingGroup(asgName, launchConfigName) - } - - /** - * This is an obscure rule that Subnets are tagged at Amazon with a data structure, which defines their purpose and - * what type of resources (elb or ec2) are able to make use of them. We also need to ensure that the Subnet IDs that - * we provide back are able to be deployed to based off of the supplied availability zones. - * - * @return list of subnet ids applicable to this deployment. - */ - List getSubnetIds(List allSubnetsForTypeAndAvailabilityZone) { - def subnetIds = allSubnetsForTypeAndAvailabilityZone*.subnetId - - def invalidSubnetIds = (this.subnetIds ?: []).findAll { !subnetIds.contains(it) } - if (invalidSubnetIds) { - throw new IllegalStateException( - "One or more subnet ids are not valid (invalidSubnetIds: ${invalidSubnetIds.join(", ")}, availabilityZones: ${availabilityZones})" - ) - } - - return this.subnetIds ?: subnetIds - } - - private List getSubnets(boolean filterForSubnetPurposeTags = true) { - if (!subnetType) { - return [] - } - - DescribeSubnetsResult result = regionScopedProvider.amazonEC2.describeSubnets() - List mySubnets = [] - for (subnet in result.subnets) { - if (availabilityZones && !availabilityZones.contains(subnet.availabilityZone)) { - continue - } - if (filterForSubnetPurposeTags) { - SubnetData sd = SubnetData.from(subnet) - if (sd.purpose == subnetType && (sd.target == null || sd.target == SubnetTarget.EC2)) { - mySubnets << subnet - } - } else { - mySubnets << subnet - } - } - mySubnets - } - - /** - * Deploys a new ASG with as much data collected as possible. - * - * @param asgName - * @param launchConfigurationName - * @return - */ - String createAutoScalingGroup(String asgName, String launchConfigurationName) { - CreateAutoScalingGroupRequest request = new CreateAutoScalingGroupRequest() - .withAutoScalingGroupName(asgName) - .withLaunchConfigurationName(launchConfigurationName) - .withMinSize(0) - .withMaxSize(0) - .withDesiredCapacity(0) - .withLoadBalancerNames(classicLoadBalancers) - .withTargetGroupARNs(targetGroupArns) - .withDefaultCooldown(cooldown) - .withHealthCheckGracePeriod(healthCheckGracePeriod) - .withHealthCheckType(healthCheckType) - .withTerminationPolicies(terminationPolicies) - - tags?.each { key, value -> - request.withTags(new Tag() - .withKey(key) - .withValue(value) - .withPropagateAtLaunch(true)) - } - - // if we have explicitly specified subnetIds, don't require that they are tagged with a subnetType/purpose - boolean filterForSubnetPurposeTags = !this.subnetIds - // Favor subnetIds over availability zones - def subnetIds = getSubnetIds(getSubnets(filterForSubnetPurposeTags))?.join(',') - if (subnetIds) { - task.updateStatus AWS_PHASE, " > Deploying to subnetIds: $subnetIds" - request.withVPCZoneIdentifier(subnetIds) - } else if (subnetType && !getSubnets()) { - throw new RuntimeException("No suitable subnet was found for internal subnet purpose '${subnetType}'!") - } else { - task.updateStatus AWS_PHASE, "Deploying to availabilityZones: $availabilityZones" - request.withAvailabilityZones(availabilityZones) - } - - def autoScaling = regionScopedProvider.autoScaling - Exception ex = retrySupport.retry({ -> - try { - autoScaling.createAutoScalingGroup(request) - return null - } catch (AlreadyExistsException e) { - if (!shouldProceedWithExistingState(autoScaling, asgName, request)) { - return e - } - log.debug("Determined pre-existing ASG is desired state, continuing...", e) - return null - } - }, 10, 1000, false) - if (ex != null) { - throw ex - } - - if (suspendedProcesses) { - retrySupport.retry({ -> - autoScaling.suspendProcesses(new SuspendProcessesRequest(autoScalingGroupName: asgName, scalingProcesses: suspendedProcesses)) - }, 10, 1000, false) - } - if (enabledMetrics && instanceMonitoring) { - task.updateStatus AWS_PHASE, "Enabling metrics collection for: $asgName" - retrySupport.retry({ -> - autoScaling.enableMetricsCollection(new EnableMetricsCollectionRequest() - .withAutoScalingGroupName(asgName) - .withGranularity('1Minute') - .withMetrics(enabledMetrics)) - }, 10, 1000, false) - } - - retrySupport.retry({ -> - autoScaling.updateAutoScalingGroup( - new UpdateAutoScalingGroupRequest( - autoScalingGroupName: asgName, - minSize: minInstances, - maxSize: maxInstances, - desiredCapacity: desiredInstances - ) - ) - }, 10, 1000, false) - - asgName - } - - private boolean shouldProceedWithExistingState(AmazonAutoScaling autoScaling, String asgName, CreateAutoScalingGroupRequest request) { - DescribeAutoScalingGroupsResult result = autoScaling.describeAutoScalingGroups( - new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(asgName) - ) - if (result.autoScalingGroups.isEmpty()) { - // This will only happen if we get an AlreadyExistsException from AWS, then immediately after describing it, we - // don't get a result back. We'll continue with trying to create because who knows... may as well try. - log.error("Attempted to find pre-existing ASG but none was found: $asgName") - return true - } - AutoScalingGroup existingAsg = result.autoScalingGroups.first() - - Set failedPredicates = [ - "launch configuration": { return existingAsg.launchConfigurationName == request.launchConfigurationName }, - "availability zones": { return existingAsg.availabilityZones.sort() == request.availabilityZones.sort() }, - "subnets": { return existingAsg.getVPCZoneIdentifier()?.split(",")?.sort()?.toList() == request.getVPCZoneIdentifier()?.split(",")?.sort()?.toList() }, - "load balancers": { return existingAsg.loadBalancerNames.sort() == request.loadBalancerNames.sort() }, - "target groups": { return existingAsg.targetGroupARNs.sort() == request.targetGroupARNs.sort() }, - "cooldown": { return existingAsg.defaultCooldown == request.defaultCooldown }, - "health check grace period": { return existingAsg.healthCheckGracePeriod == request.healthCheckGracePeriod }, - "health check type": { return existingAsg.healthCheckType == request.healthCheckType }, - "termination policies": { return existingAsg.terminationPolicies.sort() == request.terminationPolicies.sort() } - ].findAll { !((Supplier) it.value).get() }.keySet() - - if (!failedPredicates.isEmpty()) { - task.updateStatus AWS_PHASE, "$asgName already exists and does not seem to match desired state on: ${failedPredicates.join(", ")}" - return false - } - if (existingAsg.createdTime.toInstant().isBefore(Instant.now().minus(1, ChronoUnit.HOURS))) { - task.updateStatus AWS_PHASE, "$asgName already exists and appears to be valid, but falls outside of safety window for idempotent deploy (1 hour)" - return false - } - - return true - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/BlockDeviceConfig.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/BlockDeviceConfig.groovy deleted file mode 100644 index d19b5d5b90b..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/BlockDeviceConfig.groovy +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy - -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice - -class BlockDeviceConfig { - - private final DeployDefaults deployDefaults - private final Map> blockDevicesByInstanceType - - BlockDeviceConfig(DeployDefaults deployDefaults) { - this.deployDefaults = deployDefaults - blockDevicesByInstanceType = [ - "c1.medium" : enumeratedBlockDevicesWithVirtualName(1), - "c1.xlarge" : enumeratedBlockDevicesWithVirtualName(4), - - "c3.large" : enumeratedBlockDevicesWithVirtualName(2), - "c3.xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "c3.2xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "c3.4xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "c3.8xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - "c4.large" : defaultBlockDevicesForEbsOnly(), - "c4.xlarge" : defaultBlockDevicesForEbsOnly(), - "c4.2xlarge" : defaultBlockDevicesForEbsOnly(), - "c4.4xlarge" : defaultBlockDevicesForEbsOnly(), - "c4.8xlarge" : defaultBlockDevicesForEbsOnly(), - - "c5.large" : defaultBlockDevicesForEbsOnly(), - "c5.xlarge" : defaultBlockDevicesForEbsOnly(), - "c5.2xlarge" : defaultBlockDevicesForEbsOnly(), - "c5.4xlarge" : defaultBlockDevicesForEbsOnly(), - "c5.9xlarge" : defaultBlockDevicesForEbsOnly(), - "c5.18xlarge" : defaultBlockDevicesForEbsOnly(), - - "c5d.large" : enumeratedBlockDevicesWithVirtualName(1), - "c5d.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "c5d.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "c5d.4xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "c5d.9xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "c5d.18xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - "cc2.8xlarge" : enumeratedBlockDevicesWithVirtualName(4), - - "cg1.4xlarge" : sizedBlockDevicesForEbs(120), - - "cr1.8xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - "d2.xlarge" : enumeratedBlockDevicesWithVirtualName(3), - "d2.2xlarge" : enumeratedBlockDevicesWithVirtualName(6), - "d2.4xlarge" : enumeratedBlockDevicesWithVirtualName(12), - "d2.8xlarge" : enumeratedBlockDevicesWithVirtualName(24), - - "f1.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "f1.16xlarge" : enumeratedBlockDevicesWithVirtualName(4), - - "g2.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "g2.8xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - "g3.4xlarge" : sizedBlockDevicesForEbs(120), - "g3.8xlarge" : sizedBlockDevicesForEbs(120), - "g3.16xlarge" : sizedBlockDevicesForEbs(120), - - "h1.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "h1.4xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "h1.8xlarge" : enumeratedBlockDevicesWithVirtualName(4), - "h1.16xlarge" : enumeratedBlockDevicesWithVirtualName(8), - - "hs1.8xlarge" : enumeratedBlockDevicesWithVirtualName(24), - - "i2.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "i2.2xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "i2.4xlarge" : enumeratedBlockDevicesWithVirtualName(4), - "i2.8xlarge" : enumeratedBlockDevicesWithVirtualName(8), - - "i3.large" : enumeratedBlockDevicesWithVirtualName(1), - "i3.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "i3.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "i3.4xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "i3.8xlarge" : enumeratedBlockDevicesWithVirtualName(4), - "i3.16xlarge" : enumeratedBlockDevicesWithVirtualName(8), - "i3.metal" : enumeratedBlockDevicesWithVirtualName(8), - - "m1.small" : enumeratedBlockDevicesWithVirtualName(1), - "m1.medium" : enumeratedBlockDevicesWithVirtualName(1), - "m1.large" : enumeratedBlockDevicesWithVirtualName(2), - "m1.xlarge" : enumeratedBlockDevicesWithVirtualName(4), - - "m2.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "m2.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "m2.4xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - "m3.medium" : enumeratedBlockDevicesWithVirtualName(1), - "m3.large" : enumeratedBlockDevicesWithVirtualName(1), - "m3.xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "m3.2xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - "m4.large" : sizedBlockDevicesForEbs(40), - "m4.xlarge" : sizedBlockDevicesForEbs(80), - "m4.2xlarge" : sizedBlockDevicesForEbs(80), - "m4.4xlarge" : sizedBlockDevicesForEbs(120), - "m4.10xlarge" : sizedBlockDevicesForEbs(120), - "m4.16xlarge" : sizedBlockDevicesForEbs(120), - - "m5.large" : sizedBlockDevicesForEbs(40), - "m5.xlarge" : sizedBlockDevicesForEbs(80), - "m5.2xlarge" : sizedBlockDevicesForEbs(80), - "m5.4xlarge" : sizedBlockDevicesForEbs(120), - "m5.12xlarge" : sizedBlockDevicesForEbs(120), - "m5.24xlarge" : sizedBlockDevicesForEbs(120), - - "m5d.large" : enumeratedBlockDevicesWithVirtualName(1), - "m5d.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "m5d.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "m5d.4xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "m5d.12xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "m5d.24xlarge" : enumeratedBlockDevicesWithVirtualName(4), - - "r3.large" : enumeratedBlockDevicesWithVirtualName(1), - "r3.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "r3.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "r3.4xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "r3.8xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - "r4.large" : sizedBlockDevicesForEbs(40), - "r4.xlarge" : sizedBlockDevicesForEbs(80), - "r4.2xlarge" : sizedBlockDevicesForEbs(80), - "r4.4xlarge" : sizedBlockDevicesForEbs(120), - "r4.8xlarge" : sizedBlockDevicesForEbs(120), - "r4.16xlarge" : sizedBlockDevicesForEbs(120), - - "r5.large" : sizedBlockDevicesForEbs(40), - "r5.xlarge" : sizedBlockDevicesForEbs(80), - "r5.2xlarge" : sizedBlockDevicesForEbs(80), - "r5.4xlarge" : sizedBlockDevicesForEbs(120), - "r5.12xlarge" : sizedBlockDevicesForEbs(120), - "r5.24xlarge" : sizedBlockDevicesForEbs(120), - - "r5d.large" : enumeratedBlockDevicesWithVirtualName(1), - "r5d.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "r5d.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "r5d.4xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "r5d.12xlarge" : enumeratedBlockDevicesWithVirtualName(2), - "r5d.24xlarge" : enumeratedBlockDevicesWithVirtualName(4), - - "p2.xlarge" : sizedBlockDevicesForEbs(80), - "p2.8xlarge" : sizedBlockDevicesForEbs(120), - "p2.16xlarge" : sizedBlockDevicesForEbs(120), - - "p3.2xlarge" : sizedBlockDevicesForEbs(80), - "p3.8xlarge" : sizedBlockDevicesForEbs(120), - "p3.16xlarge" : sizedBlockDevicesForEbs(120), - - "t1.micro" : [], - - "t2.nano" : [], - "t2.micro" : [], - "t2.small" : [], - "t2.medium" : [], - "t2.large" : [], - "t2.xlarge" : [], - "t2.2xlarge" : [], - - "t3.nano" : [], - "t3.micro" : [], - "t3.small" : [], - "t3.medium" : [], - "t3.large" : [], - "t3.xlarge" : [], - "t3.2xlarge" : [], - - "x1.16xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "x1.32xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - "x1e.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "x1e.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "x1e.4xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "x1e.8xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "x1e.16xlarge": enumeratedBlockDevicesWithVirtualName(1), - "x1e.32xlarge": enumeratedBlockDevicesWithVirtualName(2), - - "z1d.large" : enumeratedBlockDevicesWithVirtualName(1), - "z1d.xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "z1d.2xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "z1d.3xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "z1d.6xlarge" : enumeratedBlockDevicesWithVirtualName(1), - "z1d.12xlarge" : enumeratedBlockDevicesWithVirtualName(2), - - ].asImmutable() - } - - List enumeratedBlockDevicesWithVirtualName(int size) { - def letters = ('a'..'z').collect { it } - (0.. getBlockDevicesForInstanceType(String instanceType) { - def blockDevices = blockDevicesByInstanceType[instanceType] - if (blockDevices == null && deployDefaults.unknownInstanceTypeBlockDevice) { - // return a default block device mapping if no instance-specific default exists - return [deployDefaults.unknownInstanceTypeBlockDevice] - } - - return blockDevices - } - - Set getInstanceTypesWithBlockDeviceMappings() { - return blockDevicesByInstanceType.keySet() - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultLaunchConfigurationBuilder.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultLaunchConfigurationBuilder.groovy deleted file mode 100644 index 84e0ca0e3a6..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultLaunchConfigurationBuilder.groovy +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy - -import com.amazonaws.services.autoscaling.AmazonAutoScaling -import com.amazonaws.services.autoscaling.model.AlreadyExistsException -import com.amazonaws.services.autoscaling.model.BlockDeviceMapping -import com.amazonaws.services.autoscaling.model.CreateLaunchConfigurationRequest -import com.amazonaws.services.autoscaling.model.Ebs -import com.amazonaws.services.autoscaling.model.InstanceMonitoring -import com.amazonaws.services.autoscaling.model.LaunchConfiguration -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.LocalFileUserDataProperties -import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProvider -import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice -import com.netflix.spinnaker.clouddriver.aws.services.AsgService -import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService -import com.netflix.spinnaker.clouddriver.helpers.OperationPoller -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import groovy.util.logging.Slf4j -import org.apache.commons.codec.binary.Base64 -import org.joda.time.LocalDateTime - -import java.nio.charset.Charset -import java.util.regex.Pattern - -@Slf4j -class DefaultLaunchConfigurationBuilder implements LaunchConfigurationBuilder { - private static final Pattern SG_PATTERN = Pattern.compile(/^sg-[0-9a-f]+$/) - - final AmazonAutoScaling autoScaling - final AsgService asgService - final SecurityGroupService securityGroupService - final List userDataProviders - final LocalFileUserDataProperties localFileUserDataProperties - final DeployDefaults deployDefaults - - DefaultLaunchConfigurationBuilder(AmazonAutoScaling autoScaling, AsgService asgService, - SecurityGroupService securityGroupService, List userDataProviders, - LocalFileUserDataProperties localFileUserDataProperties, - DeployDefaults deployDefaults) { - this.autoScaling = autoScaling - this.asgService = asgService - this.securityGroupService = securityGroupService - this.userDataProviders = (userDataProviders ?: Collections.emptyList()) as List - this.localFileUserDataProperties = localFileUserDataProperties - this.deployDefaults = deployDefaults - } - - /** - * Extracts the LaunchConfigurationSettings from an existing LaunchConfiguration. - * @param account the account in which to find the launch configuration - * @param region the region in which to find the launch configuration - * @param launchConfigurationName the name of the launch configuration - * @return LaunchConfigurationSettings for the launch configuration - */ - @Override - LaunchConfigurationSettings buildSettingsFromLaunchConfiguration(AccountCredentials account, String region, String launchConfigurationName) { - LaunchConfiguration lc = asgService.getLaunchConfiguration(launchConfigurationName) - - String baseName = lc.launchConfigurationName - String suffix = null - int suffixLoc = lc.launchConfigurationName.lastIndexOf('-') - if (suffixLoc != -1) { - baseName = lc.launchConfigurationName.substring(0, suffixLoc) - suffix = lc.launchConfigurationName.substring(suffixLoc + 1) - } - - List blockDevices = lc.blockDeviceMappings.collect { BlockDeviceMapping mapping -> - if (mapping.ebs) { - new AmazonBlockDevice(deviceName: mapping.deviceName, - size: mapping.ebs.volumeSize, - volumeType: mapping.ebs.volumeType, - deleteOnTermination: mapping.ebs.deleteOnTermination, - iops: mapping.ebs.iops, - snapshotId: mapping.ebs.snapshotId, - encrypted: mapping.ebs.encrypted) - } else { - new AmazonBlockDevice(deviceName: mapping.deviceName, virtualName: mapping.virtualName) - } - } - - /* - Copy over the original user data only if the UserDataProviders behavior is disabled. - This is to avoid having duplicate user data. - */ - String base64UserData = (localFileUserDataProperties && !localFileUserDataProperties.enabled) ? lc.userData : null - - new LaunchConfigurationSettings( - account: account.name, - environment: account.environment, - accountType: account.accountType, - region: region, - baseName: baseName, - suffix: suffix, - ami: lc.imageId, - iamRole: lc.iamInstanceProfile, - classicLinkVpcId: lc.classicLinkVPCId, - classicLinkVpcSecurityGroups: lc.classicLinkVPCSecurityGroups, - instanceType: lc.instanceType, - keyPair: lc.keyName, - associatePublicIpAddress: lc.associatePublicIpAddress, - kernelId: lc.kernelId ?: null, - ramdiskId: lc.ramdiskId ?: null, - ebsOptimized: lc.ebsOptimized, - spotPrice: lc.spotPrice, - instanceMonitoring: lc.instanceMonitoring == null ? false : lc.instanceMonitoring.enabled, - blockDevices: blockDevices, - securityGroups: lc.securityGroups, - base64UserData: base64UserData - ) - } - - /** - * Constructs a LaunchConfiguration with the provided settings - * @param application the name of the application - used to construct a default security group if none are present - * @param subnetType the subnet type for security groups in the launch configuration - * @param settings the settings for the launch configuration - * @param whether to explicitly use or not use legacyUdf mode - can be null which will fall through to application default - * @return the name of the new launch configuration - */ - @Override - String buildLaunchConfiguration(String application, String subnetType, LaunchConfigurationSettings settings, Boolean legacyUdf) { - if (settings.suffix == null) { - settings = settings.copyWith(suffix: createDefaultSuffix()) - } - - Set securityGroupIds = resolveSecurityGroupIds(settings.securityGroups, subnetType).toSet() - if (!securityGroupIds || (deployDefaults.addAppGroupToServerGroup && securityGroupIds.size() < deployDefaults.maxSecurityGroups)) { - def names = securityGroupService.getSecurityGroupNamesFromIds(securityGroupIds) - - String existingAppGroup = names.keySet().find { it.contains(application) } - if (!existingAppGroup) { - OperationPoller.retryWithBackoff({o -> - String applicationSecurityGroup = securityGroupService.getSecurityGroupForApplication(application, subnetType) - if (!applicationSecurityGroup) { - applicationSecurityGroup = securityGroupService.createSecurityGroup(application, subnetType) - } - - securityGroupIds << applicationSecurityGroup - }, 500, 3); - } - } - settings = settings.copyWith(securityGroups: securityGroupIds.toList()) - - if (settings.classicLinkVpcSecurityGroups) { - if (!settings.classicLinkVpcId) { - throw new IllegalStateException("Can't provide classic link security groups without classiclink vpc Id") - } - List classicLinkIds = resolveSecurityGroupIdsInVpc(settings.classicLinkVpcSecurityGroups, settings.classicLinkVpcId) - settings = settings.copyWith(classicLinkVpcSecurityGroups: classicLinkIds) - } - - String name = createName(settings) - String userData = getUserData(name, settings, legacyUdf) - createLaunchConfiguration(name, userData, settings) - } - - private String createDefaultSuffix() { - new LocalDateTime().toString("MMddYYYYHHmmss") - } - - private String createName(LaunchConfigurationSettings settings) { - createName(settings.baseName, settings.suffix) - } - - private String createName(String baseName, String suffix) { - StringBuilder name = new StringBuilder(baseName) - if (suffix) { - name.append('-').append(suffix) - } - name.toString() - } - - private List resolveSecurityGroupIdsByStrategy(List securityGroupNamesAndIds, Closure> nameResolver) { - if (securityGroupNamesAndIds) { - Collection names = securityGroupNamesAndIds.toSet() - Collection ids = names.findAll { SG_PATTERN.matcher(it).matches() } as Set - names.removeAll(ids) - if (names) { - def resolvedIds = nameResolver.call(names.toList()) - ids.addAll(resolvedIds.values()) - } - return ids.toList() - } else { - return [] - } - } - - private List resolveSecurityGroupIds(List securityGroupNamesAndIds, String subnetType) { - return resolveSecurityGroupIdsByStrategy(securityGroupNamesAndIds) { List names -> - securityGroupService.getSecurityGroupIdsWithSubnetPurpose(names, subnetType) - } - } - - private List resolveSecurityGroupIdsInVpc(List securityGroupNamesAndIds, String vpcId) { - return resolveSecurityGroupIdsByStrategy(securityGroupNamesAndIds) { List names -> - securityGroupService.getSecurityGroupIds(names, vpcId) - } - } - - private String getUserData(String launchConfigName, LaunchConfigurationSettings settings, Boolean legacyUdf) { - String data = userDataProviders?.collect { udp -> - udp.getUserData(launchConfigName, settings, legacyUdf) - }?.join("\n") - String userDataDecoded = new String((settings.base64UserData ?: '').decodeBase64(), Charset.forName("UTF-8")) - data = [data, userDataDecoded].findResults { it }.join("\n") - if (data && data.startsWith("\n")) { - data = data.trim() - } - data ? new String(Base64.encodeBase64(data.bytes), Charset.forName("UTF-8")) : null - } - - private String createLaunchConfiguration(String name, String userData, LaunchConfigurationSettings settings) { - - CreateLaunchConfigurationRequest request = new CreateLaunchConfigurationRequest() - .withImageId(settings.ami) - .withIamInstanceProfile(settings.iamRole) - .withLaunchConfigurationName(name) - .withUserData(userData) - .withInstanceType(settings.instanceType) - .withSecurityGroups(settings.securityGroups) - .withKeyName(settings.keyPair) - .withAssociatePublicIpAddress(settings.associatePublicIpAddress) - .withKernelId(settings.kernelId ?: null) - .withRamdiskId(settings.ramdiskId ?: null) - .withEbsOptimized(settings.ebsOptimized) - .withSpotPrice(settings.spotPrice) - .withClassicLinkVPCId(settings.classicLinkVpcId) - .withClassicLinkVPCSecurityGroups(settings.classicLinkVpcSecurityGroups) - .withInstanceMonitoring(new InstanceMonitoring(enabled: settings.instanceMonitoring)) - - if (settings.blockDevices) { - def mappings = [] - for (blockDevice in settings.blockDevices) { - def mapping = new BlockDeviceMapping(deviceName: blockDevice.deviceName) - if (blockDevice.virtualName) { - mapping.withVirtualName(blockDevice.virtualName) - } else { - def ebs = new Ebs() - blockDevice.with { - ebs.withVolumeSize(size) - if (deleteOnTermination != null) { - ebs.withDeleteOnTermination(deleteOnTermination) - } - if (volumeType) { - ebs.withVolumeType(volumeType) - } - if (iops) { - ebs.withIops(iops) - } - if (snapshotId) { - ebs.withSnapshotId(snapshotId) - } - if (encrypted) { - ebs.withEncrypted(encrypted) - } - } - mapping.withEbs(ebs) - } - mappings << mapping - } - request.withBlockDeviceMappings(mappings) - } - - try { - OperationPoller.retryWithBackoff({ o -> - CreateLaunchConfigurationRequest debugRequest = request.clone() - debugRequest.setUserData(null); - log.debug("Creating launch configuration (${name}): ${debugRequest}") - - autoScaling.createLaunchConfiguration(request) - }, 1500, 3); - } catch (AlreadyExistsException e) { - log.debug("Launch configuration already exists, continuing... (${e.message})") - } - - name - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/LaunchConfigurationBuilder.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/LaunchConfigurationBuilder.groovy deleted file mode 100644 index c6635dd810f..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/LaunchConfigurationBuilder.groovy +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy - -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice -import groovy.transform.Immutable - -interface LaunchConfigurationBuilder { - - @Immutable(copyWith = true, knownImmutableClasses = [AmazonBlockDevice]) - static class LaunchConfigurationSettings { - String account - String environment - String accountType - String region - String baseName - String suffix - - String ami - String iamRole - String classicLinkVpcId - List classicLinkVpcSecurityGroups - String instanceType - String keyPair - String base64UserData - Boolean associatePublicIpAddress - String kernelId - String ramdiskId - boolean ebsOptimized - String spotPrice - boolean instanceMonitoring - List blockDevices - List securityGroups - } - - /** - * Extracts the LaunchConfigurationSettings from an existing LaunchConfiguration. - * @param account the account in which to find the launch configuration - * @param region the region in which to find the launch configuration - * @param launchConfigurationName the name of the launch configuration - * @return LaunchConfigurationSettings for the launch configuration - */ - LaunchConfigurationSettings buildSettingsFromLaunchConfiguration(AccountCredentials credentials, String region, String launchConfigurationName) - - /** - * Constructs an LaunchConfiguration with the provided settings - * @param application the name of the application - used to construct a default security group if none are present - * @param subnetType the subnet type for security groups in the launch configuration - * @param settings the settings for the launch configuration - * @param whether to explicitly use or not use legacyUdf mode - can be null which will fall through to application default - * @return the name of the new launch configuration - */ - String buildLaunchConfiguration(String application, String subnetType, LaunchConfigurationSettings settings, Boolean legacyUdf) -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ResolvedAmiResult.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ResolvedAmiResult.groovy index fb730124828..03f717ffb22 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ResolvedAmiResult.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ResolvedAmiResult.groovy @@ -31,4 +31,5 @@ class ResolvedAmiResult { String ownerId List blockDeviceMappings Boolean isPublic + String architecture } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AWSServerGroupNameResolver.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AWSServerGroupNameResolver.groovy similarity index 97% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AWSServerGroupNameResolver.groovy rename to clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AWSServerGroupNameResolver.groovy index 97ca45a1a15..d8fb1c60b64 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AWSServerGroupNameResolver.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AWSServerGroupNameResolver.groovy @@ -1,20 +1,21 @@ /* - * Copyright 2015 Netflix, Inc. + * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package com.netflix.spinnaker.clouddriver.aws.deploy +package com.netflix.spinnaker.clouddriver.aws.deploy.asg import com.netflix.frigga.NameConstants import com.netflix.frigga.NameValidation diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgLifecycleHookWorker.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgLifecycleHookWorker.groovy similarity index 98% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgLifecycleHookWorker.groovy rename to clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgLifecycleHookWorker.groovy index e216b1a950d..335a7a4f699 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgLifecycleHookWorker.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgLifecycleHookWorker.groovy @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.netflix.spinnaker.clouddriver.aws.deploy +package com.netflix.spinnaker.clouddriver.aws.deploy.asg import com.amazonaws.services.autoscaling.AmazonAutoScaling import com.amazonaws.services.autoscaling.model.PutLifecycleHookRequest diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgReferenceCopier.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgReferenceCopier.groovy similarity index 98% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgReferenceCopier.groovy rename to clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgReferenceCopier.groovy index 787dfb6275b..d4b64e2bec4 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgReferenceCopier.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgReferenceCopier.groovy @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.netflix.spinnaker.clouddriver.aws.deploy +package com.netflix.spinnaker.clouddriver.aws.deploy.asg import com.amazonaws.services.autoscaling.AmazonAutoScaling import com.amazonaws.services.autoscaling.model.AlreadyExistsException diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/DefaultLaunchConfigurationBuilder.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/DefaultLaunchConfigurationBuilder.groovy new file mode 100644 index 00000000000..55e52b404c3 --- /dev/null +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/DefaultLaunchConfigurationBuilder.groovy @@ -0,0 +1,291 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg + +import com.amazonaws.services.autoscaling.AmazonAutoScaling +import com.amazonaws.services.autoscaling.model.AlreadyExistsException +import com.amazonaws.services.autoscaling.model.BlockDeviceMapping +import com.amazonaws.services.autoscaling.model.CreateLaunchConfigurationRequest +import com.amazonaws.services.autoscaling.model.Ebs +import com.amazonaws.services.autoscaling.model.InstanceMonitoring +import com.amazonaws.services.autoscaling.model.LaunchConfiguration +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProviderAggregator +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataInput +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride +import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.LocalFileUserDataProperties +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.clouddriver.aws.services.AsgService +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller + +import groovy.util.logging.Slf4j +import org.joda.time.LocalDateTime + +@Slf4j +class DefaultLaunchConfigurationBuilder implements LaunchConfigurationBuilder { + + final AmazonAutoScaling autoScaling + final AsgService asgService + final SecurityGroupService securityGroupService + final UserDataProviderAggregator userDataProviderAggregator + final LocalFileUserDataProperties localFileUserDataProperties + final DeployDefaults deployDefaults + + DefaultLaunchConfigurationBuilder(AmazonAutoScaling autoScaling, AsgService asgService, + SecurityGroupService securityGroupService, + UserDataProviderAggregator userDataProviderAggregator, + LocalFileUserDataProperties localFileUserDataProperties, + DeployDefaults deployDefaults) { + this.autoScaling = autoScaling + this.asgService = asgService + this.securityGroupService = securityGroupService + this.userDataProviderAggregator = userDataProviderAggregator + this.localFileUserDataProperties = localFileUserDataProperties + this.deployDefaults = deployDefaults + } + + /** + * Extracts the LaunchConfigurationSettings from an existing LaunchConfiguration. + * @param account the account in which to find the launch configuration + * @param region the region in which to find the launch configuration + * @param launchConfigurationName the name of the launch configuration + * @return LaunchConfigurationSettings for the launch configuration + */ + @Override + LaunchConfigurationSettings buildSettingsFromLaunchConfiguration(AccountCredentials account, String region, String launchConfigurationName) { + LaunchConfiguration lc = asgService.getLaunchConfiguration(launchConfigurationName) + + String baseName = lc.launchConfigurationName + String suffix = null + int suffixLoc = lc.launchConfigurationName.lastIndexOf('-') + if (suffixLoc != -1) { + baseName = lc.launchConfigurationName.substring(0, suffixLoc) + suffix = lc.launchConfigurationName.substring(suffixLoc + 1) + } + + List blockDevices = lc.blockDeviceMappings.collect { BlockDeviceMapping mapping -> + if (mapping.ebs) { + new AmazonBlockDevice(deviceName: mapping.deviceName, + size: mapping.ebs.volumeSize, + volumeType: mapping.ebs.volumeType, + deleteOnTermination: mapping.ebs.deleteOnTermination, + iops: mapping.ebs.iops, + throughput: mapping.ebs.throughput, + snapshotId: mapping.ebs.snapshotId, + encrypted: mapping.ebs.encrypted) + } else { + new AmazonBlockDevice(deviceName: mapping.deviceName, virtualName: mapping.virtualName) + } + } + + /* + Copy over the original user data only if the UserDataProviders behavior is disabled. + This is to avoid having duplicate user data. + */ + String base64UserData = (localFileUserDataProperties && !localFileUserDataProperties.enabled) ? lc.userData : null + + LaunchConfigurationSettings.builder() + .account(account.name) + .environment(account.environment) + .accountType(account.accountType) + .region(region) + .baseName(baseName) + .suffix(suffix) + .ami(lc.imageId) + .iamRole(lc.iamInstanceProfile) + .classicLinkVpcId(lc.classicLinkVPCId) + .classicLinkVpcSecurityGroups(lc.classicLinkVPCSecurityGroups) + .instanceType(lc.instanceType) + .keyPair(lc.keyName) + .associatePublicIpAddress(lc.associatePublicIpAddress) + .kernelId(lc.kernelId ?: null) + .ramdiskId(lc.ramdiskId ?: null) + .ebsOptimized(lc.ebsOptimized) + .spotMaxPrice(lc.spotPrice) + .instanceMonitoring(lc.instanceMonitoring == null ? false : lc.instanceMonitoring.enabled) + .blockDevices(blockDevices) + .securityGroups(lc.securityGroups) + .base64UserData(base64UserData) + .build() + } + + /** + * Constructs a LaunchConfiguration with the provided settings + * @param application the name of the application - used to construct a default security group if none are present + * @param subnetType the subnet type for security groups in the launch configuration + * @param settings the settings for the launch configuration + * @param whether to explicitly use or not use legacyUdf mode - can be null which will fall through to application default + * @return the name of the new launch configuration + */ + @Override + String buildLaunchConfiguration(String application, String subnetType, LaunchConfigurationSettings settings, Boolean legacyUdf, UserDataOverride userDataOverride) { + settings = setAppSecurityGroup(application, subnetType, deployDefaults, securityGroupService, settings) + + String name = createName(settings) + String userData = getUserData(name, settings, legacyUdf, userDataOverride) + createLaunchConfiguration(name, userData, settings) + } + + private static String createDefaultSuffix() { + new LocalDateTime().toString("MMddYYYYHHmmss") + } + + static String createName(LaunchConfigurationSettings settings) { + createName0(settings.baseName, settings.suffix) + } + + private static String createName0(String baseName, String suffix) { + StringBuilder name = new StringBuilder(baseName) + if (suffix) { + name.append('-').append(suffix) + } + name.toString() + } + + private String getUserData(String launchConfigName, LaunchConfigurationSettings settings, Boolean legacyUdf, UserDataOverride userDataOverride) { + UserDataInput userDataRequest = + UserDataInput.builder() + .launchTemplate(false) + .asgName(settings.baseName) + .launchSettingName(launchConfigName) + .region(settings.region) + .account(settings.account) + .accountType(settings.accountType) + .environment(settings.environment) + .iamRole(settings.iamRole) + .imageId(settings.ami) + .legacyUdf(legacyUdf) + .userDataOverride(userDataOverride) + .base64UserData(settings.base64UserData) + .build() + + return userDataProviderAggregator.aggregate(userDataRequest) + } + + static LaunchConfigurationSettings setAppSecurityGroup( + String application, + String subnetType, + DeployDefaults deployDefaults, + SecurityGroupService securityGroupService, + LaunchConfigurationSettings settings + ) { + if (settings.suffix == null) { + settings = settings.toBuilder().suffix(createDefaultSuffix()).build() + } + + Set securityGroupIds = securityGroupService.resolveSecurityGroupIdsWithSubnetType(settings.securityGroups, subnetType).toSet() + + if (!securityGroupIds || (deployDefaults.addAppGroupToServerGroup && securityGroupIds.size() < deployDefaults.maxSecurityGroups)) { + def names = securityGroupService.getSecurityGroupNamesFromIds(securityGroupIds) + + String existingAppGroup = names.keySet().find { it.contains(application) } + if (!existingAppGroup) { + OperationPoller.retryWithBackoff({o -> + String applicationSecurityGroup = securityGroupService.getSecurityGroupForApplication(application, subnetType) + if (!applicationSecurityGroup) { + applicationSecurityGroup = securityGroupService.createSecurityGroup(application, subnetType) + } + securityGroupIds << applicationSecurityGroup + }, 500, 3) + } + } + settings = settings.toBuilder().securityGroups(securityGroupIds.toList()).build() + + if (settings.classicLinkVpcSecurityGroups) { + if (!settings.classicLinkVpcId) { + throw new IllegalStateException("Can't provide classic link security groups without classiclink vpc Id") + } + List classicLinkIds = securityGroupService.resolveSecurityGroupIdsInVpc(settings.classicLinkVpcSecurityGroups, settings.classicLinkVpcId) + settings = settings.toBuilder().classicLinkVpcSecurityGroups(classicLinkIds).build() + } + + log.info("Configured resolved security groups {} for application {}.", securityGroupIds, application) + return settings + } + + private String createLaunchConfiguration(String name, String userData, LaunchConfigurationSettings settings) { + + CreateLaunchConfigurationRequest request = new CreateLaunchConfigurationRequest() + .withImageId(settings.ami) + .withIamInstanceProfile(settings.iamRole) + .withLaunchConfigurationName(name) + .withUserData(userData) + .withInstanceType(settings.instanceType) + .withSecurityGroups(settings.securityGroups) + .withKeyName(settings.keyPair) + .withAssociatePublicIpAddress(settings.associatePublicIpAddress) + .withKernelId(settings.kernelId ?: null) + .withRamdiskId(settings.ramdiskId ?: null) + .withEbsOptimized(settings.ebsOptimized) + .withSpotPrice(settings.spotMaxPrice) + .withClassicLinkVPCId(settings.classicLinkVpcId) + .withClassicLinkVPCSecurityGroups(settings.classicLinkVpcSecurityGroups) + .withInstanceMonitoring(new InstanceMonitoring(enabled: settings.instanceMonitoring)) + + if (settings.blockDevices) { + def mappings = [] + for (blockDevice in settings.blockDevices) { + def mapping = new BlockDeviceMapping(deviceName: blockDevice.deviceName) + if (blockDevice.virtualName) { + mapping.withVirtualName(blockDevice.virtualName) + } else { + def ebs = new Ebs() + blockDevice.with { + ebs.withVolumeSize(size) + if (deleteOnTermination != null) { + ebs.withDeleteOnTermination(deleteOnTermination) + } + if (volumeType) { + ebs.withVolumeType(volumeType) + } + if (iops) { + ebs.withIops(iops) + } + if (throughput) { + ebs.withThroughput(throughput) + } + if (snapshotId) { + ebs.withSnapshotId(snapshotId) + } + if (encrypted) { + ebs.withEncrypted(encrypted) + } + } + mapping.withEbs(ebs) + } + mappings << mapping + } + request.withBlockDeviceMappings(mappings) + } + + try { + OperationPoller.retryWithBackoff({ o -> + CreateLaunchConfigurationRequest debugRequest = request.clone() + debugRequest.setUserData(null); + log.debug("Creating launch configuration (${name}): ${debugRequest}") + + autoScaling.createLaunchConfiguration(request) + }, 1500, 3); + } catch (AlreadyExistsException e) { + log.debug("Launch configuration already exists, continuing... (${e.message})") + } + + name + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/BasicAmazonDeployAtomicOperationConverter.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/BasicAmazonDeployAtomicOperationConverter.groovy index 306796ff563..bd215eeb86e 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/BasicAmazonDeployAtomicOperationConverter.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/BasicAmazonDeployAtomicOperationConverter.groovy @@ -17,16 +17,23 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.converters import com.netflix.spinnaker.clouddriver.aws.AmazonOperation +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService import com.netflix.spinnaker.clouddriver.deploy.DeployAtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @AmazonOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component("basicAmazonDeployDescription") class BasicAmazonDeployAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { + + @Autowired + RegionScopedProviderFactory regionScopedProviderFactory + AtomicOperation convertOperation(Map input) { new DeployAtomicOperation(convertDescription(input)) } @@ -34,6 +41,24 @@ class BasicAmazonDeployAtomicOperationConverter extends AbstractAtomicOperations BasicAmazonDeployDescription convertDescription(Map input) { def converted = objectMapper.convertValue(input, BasicAmazonDeployDescription) converted.credentials = getCredentialsObject(input.credentials as String) - converted + + if (converted.securityGroups != null && !converted.securityGroups.isEmpty()) { + for (Map.Entry> entry : converted.availabilityZones) { + String region = entry.key + + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider = + regionScopedProviderFactory.forRegion(converted.credentials, region) + + SecurityGroupService securityGroupService = regionScopedProvider.getSecurityGroupService() + + converted.securityGroupNames.addAll( + securityGroupService.resolveSecurityGroupNamesByStrategy(converted.securityGroups) { List ids -> + securityGroupService.getSecurityGroupNamesFromIds(ids) + } + ) + } + } + + return converted } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateClusterConfigurationsAtomicOperationConverter.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateClusterConfigurationsAtomicOperationConverter.groovy deleted file mode 100644 index 94ed8f9229a..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateClusterConfigurationsAtomicOperationConverter.groovy +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.converters - -import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateClusterConfigurationsDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.MigrateClusterConfigurationsAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@AmazonOperation(AtomicOperations.MIGRATE_CLUSTER_CONFIGURATIONS) -@Component("migrateClusterConfigurationsDescription") -class MigrateClusterConfigurationsAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new MigrateClusterConfigurationsAtomicOperation(convertDescription(input)) - } - - @Override - MigrateClusterConfigurationsDescription convertDescription(Map input) { - if (input.regionMapping) { - ((Map) input.regionMapping).keySet().each { i -> - if (input.regionMapping[i] instanceof String) { - input.regionMapping[i] = [ (input.regionMapping[i]) : []] - } - }; - } - def converted = objectMapper.convertValue(input, MigrateClusterConfigurationsDescription) - converted.sources.each { - it.credentials = getCredentialsObject(it.cluster.account as String) - converted.credentials.add(it.credentials) - } - converted.accountMapping.values().each { converted.credentials.add(getCredentialsObject(it)) } - converted - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateLoadBalancerAtomicOperationConverter.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateLoadBalancerAtomicOperationConverter.groovy deleted file mode 100644 index e6ba388f1a0..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateLoadBalancerAtomicOperationConverter.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.converters - -import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.MigrateLoadBalancerAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@AmazonOperation(AtomicOperations.MIGRATE_LOAD_BALANCER) -@Component("migrateLoadBalancerDescription") -class MigrateLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new MigrateLoadBalancerAtomicOperation(convertDescription(input)) - } - - @Override - MigrateLoadBalancerDescription convertDescription(Map input) { - def converted = objectMapper.convertValue(input, MigrateLoadBalancerDescription) - converted.source.credentials = getCredentialsObject(input.source.credentials as String) - converted.target.credentials = getCredentialsObject(input.target.credentials as String) - converted.credentials.add(converted.source.credentials) - converted.credentials.add(converted.target.credentials) - converted - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateSecurityGroupAtomicOperationConverter.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateSecurityGroupAtomicOperationConverter.groovy deleted file mode 100644 index 2e3b12aaeae..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateSecurityGroupAtomicOperationConverter.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.converters - -import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@AmazonOperation(AtomicOperations.MIGRATE_SECURITY_GROUP) -@Component("migrateSecurityGroupDescription") -class MigrateSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new MigrateSecurityGroupAtomicOperation(convertDescription(input)) - } - - @Override - MigrateSecurityGroupDescription convertDescription(Map input) { - def converted = objectMapper.convertValue(input, MigrateSecurityGroupDescription) - converted.source.credentials = getCredentialsObject(input.source.credentials as String) - converted.target.credentials = getCredentialsObject(input.target.credentials as String) - converted.credentials.add(converted.source.credentials) - converted.credentials.add(converted.target.credentials) - converted - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateServerGroupAtomicOperationConverter.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateServerGroupAtomicOperationConverter.groovy deleted file mode 100644 index b8a78feded8..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateServerGroupAtomicOperationConverter.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.converters - -import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateServerGroupDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.MigrateServerGroupAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@AmazonOperation(AtomicOperations.MIGRATE_SERVER_GROUP) -@Component("migrateServerGroupDescription") -class MigrateServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new MigrateServerGroupAtomicOperation(convertDescription(input)) - } - - @Override - MigrateServerGroupDescription convertDescription(Map input) { - def converted = objectMapper.convertValue(input, MigrateServerGroupDescription) - converted.source.credentials = getCredentialsObject(input.source.credentials as String) - converted.target.credentials = getCredentialsObject(input.target.credentials as String) - converted.credentials.add(converted.source.credentials) - converted.credentials.add(converted.target.credentials) - converted - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RebootInstancesAtomicOperationConverter.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RebootInstancesAtomicOperationConverter.groovy index 4d50e1e6e88..94a0071ce35 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RebootInstancesAtomicOperationConverter.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RebootInstancesAtomicOperationConverter.groovy @@ -17,16 +17,23 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.converters import com.netflix.spinnaker.clouddriver.aws.AmazonOperation +import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonInstanceProvider import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport import com.netflix.spinnaker.clouddriver.aws.deploy.description.RebootInstancesDescription import com.netflix.spinnaker.clouddriver.aws.deploy.ops.RebootInstancesAtomicOperation +import groovy.util.logging.Slf4j +import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component +@Slf4j @AmazonOperation(AtomicOperations.REBOOT_INSTANCES) @Component("rebootInstancesDescription") class RebootInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { + @Autowired + AmazonInstanceProvider amazonInstanceProvider + @Override AtomicOperation convertOperation(Map input) { new RebootInstancesAtomicOperation(convertDescription(input)) @@ -36,6 +43,24 @@ class RebootInstancesAtomicOperationConverter extends AbstractAtomicOperationsCr RebootInstancesDescription convertDescription(Map input) { def converted = objectMapper.convertValue(input, RebootInstancesDescription) converted.credentials = getCredentialsObject(input.credentials as String) + + try { + def serverGroups = converted.instanceIds.findResults { + def instance = amazonInstanceProvider.getInstance(converted.credentials.name, converted.region, it) + return instance?.extraAttributes?.serverGroup + } as Set + converted.serverGroups = serverGroups + } catch (Exception e) { + converted.serverGroups = [] + log.error( + "Unable to determine server groups for instances (instanceIds: {}, account: {}, region: {})", + converted.instanceIds, + converted.credentials.name, + converted.region, + e + ) + } + converted } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/TerminateInstancesAtomicOperationConverter.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/TerminateInstancesAtomicOperationConverter.groovy index ce76c352d52..d42a7d32364 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/TerminateInstancesAtomicOperationConverter.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/TerminateInstancesAtomicOperationConverter.groovy @@ -16,16 +16,23 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.converters import com.netflix.spinnaker.clouddriver.aws.AmazonOperation +import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonInstanceProvider import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport import com.netflix.spinnaker.clouddriver.aws.deploy.description.TerminateInstancesDescription import com.netflix.spinnaker.clouddriver.aws.deploy.ops.TerminateInstancesAtomicOperation +import groovy.util.logging.Slf4j +import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -@AmazonOperation(AtomicOperations.TERMINATE_INSTANCES) +@Slf4j @Component("terminateInstancesDescription") +@AmazonOperation(AtomicOperations.TERMINATE_INSTANCES) class TerminateInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { + @Autowired + AmazonInstanceProvider amazonInstanceProvider + @Override AtomicOperation convertOperation(Map input) { new TerminateInstancesAtomicOperation(convertDescription(input)) @@ -35,6 +42,24 @@ class TerminateInstancesAtomicOperationConverter extends AbstractAtomicOperation TerminateInstancesDescription convertDescription(Map input) { def converted = objectMapper.convertValue(input, TerminateInstancesDescription) converted.credentials = getCredentialsObject(input.credentials as String) + + try { + def serverGroups = converted.instanceIds.findResults { + def instance = amazonInstanceProvider.getInstance(converted.credentials.name, converted.region, it) + return instance?.extraAttributes?.get("serverGroup") + } as Set + converted.serverGroups = serverGroups + } catch (Exception e) { + converted.serverGroups = [] + log.error( + "Unable to determine server groups for instances (instanceIds: {}, account: {}, region: {})", + converted.instanceIds, + converted.credentials.name, + converted.region, + e + ) + } + converted } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AbstractAmazonCredentialsDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AbstractAmazonCredentialsDescription.groovy index 8040af769d9..583a9d23ab8 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AbstractAmazonCredentialsDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AbstractAmazonCredentialsDescription.groovy @@ -24,9 +24,11 @@ import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable abstract class AbstractAmazonCredentialsDescription implements CredentialsNameable { @JsonIgnore NetflixAmazonCredentials credentials + String account - @JsonProperty("credentials") - String getCredentialAccount() { - this.credentials?.name + @JsonProperty + @Override + String getAccount() { + return credentials?.name ?: account } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AbstractRegionAsgInstanceIdsDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AbstractRegionAsgInstanceIdsDescription.groovy index 4b5a2682bfe..d02ba026ad4 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AbstractRegionAsgInstanceIdsDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AbstractRegionAsgInstanceIdsDescription.groovy @@ -16,9 +16,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -abstract class AbstractRegionAsgInstanceIdsDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +abstract class AbstractRegionAsgInstanceIdsDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String region String asgName List instanceIds Integer targetHealthyDeployPercentage + + @Override + Collection getServerGroupNames() { + return [asgName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AllowLaunchDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AllowLaunchDescription.groovy index 36836db2eec..12ddd910ae5 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AllowLaunchDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/AllowLaunchDescription.groovy @@ -16,8 +16,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description +import com.netflix.spinnaker.orchestration.OperationDescription + class AllowLaunchDescription extends AbstractAmazonCredentialsDescription { - String account + String targetAccount String amiName String region + + @Override + boolean requiresApplicationRestriction() { + return false + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/BasicAmazonDeployDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/BasicAmazonDeployDescription.groovy index 1684280011e..59d012944a7 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/BasicAmazonDeployDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/BasicAmazonDeployDescription.groovy @@ -18,14 +18,17 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride import com.netflix.spinnaker.clouddriver.deploy.DeployDescription import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable import groovy.transform.AutoClone import groovy.transform.Canonical @AutoClone @Canonical -class BasicAmazonDeployDescription extends AbstractAmazonCredentialsDescription implements DeployDescription { +class BasicAmazonDeployDescription extends AbstractAmazonCredentialsDescription implements + DeployDescription, ApplicationNameable { String application String amiName String stack @@ -40,7 +43,6 @@ class BasicAmazonDeployDescription extends AbstractAmazonCredentialsDescription Collection enabledMetrics Integer healthCheckGracePeriod String healthCheckType - String spotPrice Set suspendedProcesses = [] Collection terminationPolicies String kernelId @@ -49,10 +51,10 @@ class BasicAmazonDeployDescription extends AbstractAmazonCredentialsDescription Boolean ebsOptimized String base64UserData Boolean legacyUdf + UserDataOverride userDataOverride = new UserDataOverride() Collection events = [] - //Default behaviour (legacy reasons) is to carry forward some settings (even on a deploy vs a cloneServerGroup) from an ancestor ASG // the following flags disable copying of those attributes: @@ -90,11 +92,180 @@ class BasicAmazonDeployDescription extends AbstractAmazonCredentialsDescription List loadBalancers List targetGroups List securityGroups + List securityGroupNames = [] List lifecycleHooks = [] Map> availabilityZones = [:] Capacity capacity = new Capacity() Source source = new Source() Map tags + Map blockDeviceTags + + /** + * Amazon EC2 Auto Scaling attempts to proactively replace Spot Instances in the group + * that have received a rebalance recommendation, BEFORE it is interrupted by AWS EC2. + * Note: Enabling this feature could exceed the server group's max capacity for a brief period of time, leading to higher costs. + * + * https://docs.aws.amazon.com/autoscaling/ec2/userguide/capacity-rebalance.html + * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/rebalance-recommendations.html + */ + Boolean capacityRebalance + + // Launch Template features:start + /** + * When set to true, the created server group will use a launch template instead of a launch configuration. + */ + Boolean setLaunchTemplate = true + + /** + * When set to true, the created server group will be configured with IMDSv2. + * This is a Launch Template only feature + * * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + */ + Boolean requireIMDSv2 = false + + /** + * Associate an IPv6 address + * This is a Launch Template only feature + */ + Boolean associateIPv6Address + + /** + * Applicable only for burstable performance instance types like t2/t3. + * * set to null when not applicable / by default. + * + * The burstable performance instances in the created server group will have: + * * unlimited CPU credits, when set to true + * * standard CPU credits, when set to false + * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances-unlimited-mode.html + * + * This is a Launch Template only feature. + */ + Boolean unlimitedCpuCredits + + /** + * When set to true, the created server group will be configured with Nitro Enclaves enabled + * This is a Launch Template only feature + * * https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html + */ + Boolean enableEnclave + + /** + * Launch template placement details, see {@link com.amazonaws.services.ec2.model.LaunchTemplatePlacementRequest}. + */ + LaunchTemplatePlacement placement + + /** + * Launch template license specifications, see {@link com.amazonaws.services.ec2.model.LaunchTemplateLicenseConfigurationRequest}. + */ + List licenseSpecifications + + /** + * Indicates how to allocate instance types to fulfill On-Demand capacity. + * https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html#asg-allocation-strategies + * default: prioritized, use LaunchTemplateOverridesForInstanceType#priority to define the launch priority of each instance type. + */ + String onDemandAllocationStrategy + + /** The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. + * If weights are specified for the instance types in the overrides, + * set the value of OnDemandBaseCapacity in terms of the number of capacity units, and not number of instances. + * default: 0 + */ + Integer onDemandBaseCapacity + + /** + * The percentages of On-Demand Instances and Spot Instances for additional capacity beyond OnDemandBaseCapacity + * default: 100, i.e. only On-Demand instances + */ + Integer onDemandPercentageAboveBaseCapacity + + /** + * Indicates how to allocate instances across Spot Instance pools. + * https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html#asg-allocation-strategies + * For strategies like '*prioritized', use LaunchTemplateOverridesForInstanceType#priority to define the launch priority of each instance type. + * default: lowest-price + */ + String spotAllocationStrategy + + /** + * The number of Spot Instance pools across which to allocate Spot Instances. The Spot pools are determined from the different instance types in the overrides. + * default: 2, only applicable with 'lowest-price' spotAllocationStrategy + * limits: 1 to 20 + */ + Integer spotInstancePools + + /** + * The maximum price per unit hour that the user is willing to pay for a Spot Instance. + * default: On-Demand price for the configuration + */ + String spotPrice + + /** + * A list of parameters to override corresponding parameters in the launch template. + * limits: + * * instances that can be associated with an ASG: 40 + * * distinct launch templates that can be associated with an ASG: 20 + */ + List launchTemplateOverridesForInstanceType + + static Set getLaunchTemplateOnlyFieldNames() { + return ["requireIMDSv2", "associateIPv6Address", "unlimitedCpuCredits", + "placement", "licenseSpecifications", "onDemandAllocationStrategy", + "onDemandBaseCapacity", "onDemandPercentageAboveBaseCapacity", "spotAllocationStrategy", + "spotInstancePools", "launchTemplateOverridesForInstanceType", "enableEnclave"].toSet() + } + + static Set getMixedInstancesPolicyFieldNames() { + return ["onDemandAllocationStrategy", "onDemandBaseCapacity", "onDemandPercentageAboveBaseCapacity", + "spotAllocationStrategy", "spotInstancePools", "launchTemplateOverridesForInstanceType"].toSet() + } + + /** + * Get all instance types in description. + * + * Why does this method exist? + * When launchTemplateOverrides are specified, either the overrides or instanceType is used, + * but all instance type inputs are returned by this method. + * When is this method used? + * Used primarily for validation purposes, to ensure all instance types in request are compatible with + * other validated configuration parameters (to prevent ambiguity). + * + * @return all instance type(s) + */ + Set getAllInstanceTypes() { + Set instanceTypes = [instanceType] + if (launchTemplateOverridesForInstanceType) { + launchTemplateOverridesForInstanceType.each { + instanceTypes << it.instanceType + } + } + return instanceTypes + } + + /** + * Get allowed instance types in description. These are the instance types that an ASG can realistically launch. + * + * Why does this method exist? + * If launchTemplateOverrides are specified, they will override the same properties in launch template e.g. instanceType + * https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_LaunchTemplate.html. + * When is this method used? + * Used for functional purposes and when the result is used for further actions like deriving certain defaults, whether to allow modifying cpu credit spec or not. + * + * @return allowed instance type(s) + */ + Set getAllowedInstanceTypes() { + if (launchTemplateOverridesForInstanceType) { + launchTemplateOverridesForInstanceType.collect{ it.instanceType }.toSet() + } else { + Collections.singleton(instanceType) + } + } + // Launch Template features:end + + @Override + Collection getApplications() { + return [application] + } @Canonical static class Capacity { @@ -110,4 +281,105 @@ class BasicAmazonDeployDescription extends AbstractAmazonCredentialsDescription String asgName Boolean useSourceCapacity } + + @Canonical + static class LaunchTemplatePlacement { + String affinity + String availabilityZone + String groupName + String hostId + String tenancy + String spreadDomain + String hostResourceGroupArn + Integer partitionNumber + } + + @Canonical + static class LaunchTemplateLicenseSpecification { + String arn + } + + /** + * Support for multiple instance types. + * This class encapsulates configuration mapped to a particular instance type. + */ + @Canonical + static class LaunchTemplateOverridesForInstanceType { + /** + * An instance type that is supported in the requested region and availability zone. + * Required field when instanceTypeConfigOverrides is used. + */ + String instanceType + + /** + * The number of capacity units provided by {@link #instanceType} in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. + * When an instance of type {@link #instanceType} is provisioned, it's capacity units count toward the desired capacity. + */ + String weightedCapacity + + /** + * Optional priority for instance type. + * Valid values: integer > 0. Lower the number, higher the priority. If unset, the launch template override has the lowest priority. + * The order of instance types in the list of launch template overrides sent to AWS is set from highest to lowest priority. + * + * When to use? + * Use when the order of instance types matter with '*prioritized' allocation strategies. + * With OnDemandAllocationStrategy "prioritized", priority is used to determine which launch template override to use first in fulfilling On-Demand capacity. + * With SpotAllocationStrategy "capacity-optimized-prioritized", priority is used on a best-effort basis to determine which launch template override to use first in fulfilling Spot capacity, but AWS optimizes for capacity first. + * + * Example: + * In the example below, the bigger instance type is prioritized over the smaller type. The integer priority is used to transform unordered list to an ordered list. + * + * LaunchTemplateOverridesForInstanceType[ ------> LaunchTemplateOverrides[ + * { { + * "instanceType": "c5.large", "instanceType": "c5.XLARGE", + * "weightedCapacity": 2, "weightedCapacity": 4, + * "priority": 2 }, + * }, { + * { "instanceType": "c5.large", + * "instanceType": "c5.XLARGE", "weightedCapacity": 2, + * "weightedCapacity": 4, } + * "priority": 1 ], + * }, { + * { "instanceType": "c4.large", + * "instanceType": "c4.large", "weightedCapacity": 2, + * "weightedCapacity": 2 ] + * } + * ] + */ + Integer priority + + LaunchTemplateOverridesForInstanceType() {} + + private LaunchTemplateOverridesForInstanceType(Builder builder) { + instanceType = builder.instanceType + weightedCapacity = builder.weightedCapacity + priority = builder.priority + } + + static class Builder { + String instanceType + String weightedCapacity + Integer priority + + Builder instanceType(String instanceType) { + this.instanceType = instanceType + return this + } + + Builder weightedCapacity(String weightedCapacity) { + this.weightedCapacity = weightedCapacity + return this + } + + Builder priority(Integer priority) { + this.priority = priority + return this + } + + LaunchTemplateOverridesForInstanceType build() { + return new LaunchTemplateOverridesForInstanceType(this) + } + } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLoadBalancerDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLoadBalancerDescription.groovy index 7e148fa0ad1..82489e03589 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLoadBalancerDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLoadBalancerDescription.groovy @@ -17,9 +17,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description import com.netflix.spinnaker.clouddriver.aws.model.AmazonLoadBalancerType +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable -class DeleteAmazonLoadBalancerDescription extends AbstractAmazonCredentialsDescription { +class DeleteAmazonLoadBalancerDescription extends AbstractAmazonCredentialsDescription implements ResourcesNameable { AmazonLoadBalancerType loadBalancerType String loadBalancerName Set regions + + @Override + Collection getNames() { + return [loadBalancerName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAsgTagsDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAsgTagsDescription.groovy index 70252935016..a4e4a4fcf86 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAsgTagsDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAsgTagsDescription.groovy @@ -15,7 +15,9 @@ */ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class DeleteAsgTagsDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class DeleteAsgTagsDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String serverGroupName String region @@ -28,4 +30,9 @@ class DeleteAsgTagsDescription extends AbstractAmazonCredentialsDescription { @Deprecated List regions = [] + + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteScalingPolicyDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteScalingPolicyDescription.groovy index c1e42504167..7755f82f9bd 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteScalingPolicyDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteScalingPolicyDescription.groovy @@ -16,8 +16,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class DeleteScalingPolicyDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class DeleteScalingPolicyDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String policyName String serverGroupName String region + + @Override + Collection getServerGroupNames() { + return [serverGroupName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteSecurityGroupDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteSecurityGroupDescription.groovy index 57a5bc3b15d..e9e3f9a7e9a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteSecurityGroupDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteSecurityGroupDescription.groovy @@ -15,8 +15,15 @@ */ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class DeleteSecurityGroupDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable + +class DeleteSecurityGroupDescription extends AbstractAmazonCredentialsDescription implements ResourcesNameable { String securityGroupName String vpcId Set regions + + @Override + Collection getNames() { + return [securityGroupName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DestroyAsgDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DestroyAsgDescription.groovy index 9c615e1abce..7a50d0b7d8c 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DestroyAsgDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DestroyAsgDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class DestroyAsgDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class DestroyAsgDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String serverGroupName String region @@ -28,4 +30,9 @@ class DestroyAsgDescription extends AbstractAmazonCredentialsDescription { @Deprecated List regions = [] + + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DetachInstancesDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DetachInstancesDescription.groovy index 962a00b6a99..b47584aa18c 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DetachInstancesDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DetachInstancesDescription.groovy @@ -16,11 +16,18 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class DetachInstancesDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class DetachInstancesDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String region String asgName List instanceIds boolean decrementDesiredCapacity boolean terminateDetachedInstances boolean adjustMinIfNecessary + + @Override + Collection getServerGroupNames() { + return [asgName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/EnableDisableAsgDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/EnableDisableAsgDescription.groovy index b2904d6481e..f71e661e820 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/EnableDisableAsgDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/EnableDisableAsgDescription.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescriptionTrait +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable /** * Description for "enabling" a supplied ASG. "Enabling" means Resuming "AddToLoadBalancer", "Launch", and "Terminate" processes on an ASG. If Eureka/Discovery is available, setting a status @@ -24,7 +25,7 @@ import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescrip * Description for "disabling" a supplied ASG. "Disabling" means Suspending "AddToLoadBalancer", "Launch", and "Terminate" processes on an ASG. If Eureka/Discovery is available, setting a status * override will also be achieved. */ -class EnableDisableAsgDescription extends AbstractAmazonCredentialsDescription implements EnableDisableDescriptionTrait { +class EnableDisableAsgDescription extends AbstractAmazonCredentialsDescription implements EnableDisableDescriptionTrait, ServerGroupsNameable { String region List asgs = [] @@ -38,4 +39,9 @@ class EnableDisableAsgDescription extends AbstractAmazonCredentialsDescription i Integer desiredPercentage Integer targetHealthyDeployPercentage + + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/InstanceLoadBalancerRegistrationDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/InstanceLoadBalancerRegistrationDescription.groovy index ea5d3f2f363..6f23305e380 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/InstanceLoadBalancerRegistrationDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/InstanceLoadBalancerRegistrationDescription.groovy @@ -16,6 +16,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class InstanceLoadBalancerRegistrationDescription extends AbstractRegionAsgInstanceIdsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable + +class InstanceLoadBalancerRegistrationDescription extends AbstractRegionAsgInstanceIdsDescription implements ResourcesNameable { List loadBalancerNames + + @Override + Collection getNames() { + return loadBalancerNames ?: [] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/InstanceTargetGroupRegistrationDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/InstanceTargetGroupRegistrationDescription.groovy index afb600bbf3f..57f9a3ce885 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/InstanceTargetGroupRegistrationDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/InstanceTargetGroupRegistrationDescription.groovy @@ -16,6 +16,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class InstanceTargetGroupRegistrationDescription extends AbstractRegionAsgInstanceIdsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable + +class InstanceTargetGroupRegistrationDescription extends AbstractRegionAsgInstanceIdsDescription implements ResourcesNameable { List targetGroupNames + + @Override + Collection getNames() { + return targetGroupNames ?: [] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateClusterConfigurationsDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateClusterConfigurationsDescription.groovy deleted file mode 100644 index 8b1638849b9..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateClusterConfigurationsDescription.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.description - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfiguration -import com.netflix.spinnaker.clouddriver.security.AccountCredentials - -class MigrateClusterConfigurationsDescription { - List sources = [] - Map>> regionMapping = [:] - Map subnetTypeMapping = [:] - Map elbSubnetTypeMapping = [:] - Map accountMapping = [:] - Map iamRoleMapping = [:] - Map keyPairMapping = [:] - Map loadBalancerNameMapping = [:] - boolean allowIngressFromClassic - boolean dryRun - - @JsonIgnore - Set credentials = []; - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateLoadBalancerDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateLoadBalancerDescription.groovy deleted file mode 100644 index a84bbf8ee28..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateLoadBalancerDescription.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.description - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.LoadBalancerLocation -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.TargetLoadBalancerLocation -import com.netflix.spinnaker.clouddriver.security.AccountCredentials - - -class MigrateLoadBalancerDescription { - - LoadBalancerLocation source - TargetLoadBalancerLocation target - String subnetType - String application - boolean allowIngressFromClassic - boolean dryRun - - @JsonIgnore - Set credentials = []; - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateSecurityGroupDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateSecurityGroupDescription.groovy deleted file mode 100644 index 316f5bcf29f..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateSecurityGroupDescription.groovy +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.description - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator.SecurityGroupLocation -import com.netflix.spinnaker.clouddriver.security.AccountCredentials - -class MigrateSecurityGroupDescription { - SecurityGroupLocation source - SecurityGroupLocation target - boolean dryRun - - @JsonIgnore - Set credentials = []; -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateServerGroupDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateServerGroupDescription.groovy deleted file mode 100644 index 4de2cdc23f7..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/MigrateServerGroupDescription.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.description - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentials - -import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ServerGroupMigrator.ServerGroupLocation - -class MigrateServerGroupDescription { - - ServerGroupLocation source - ServerGroupLocation target - String subnetType - String elbSubnetType - String iamRole - String keyPair - String targetAmi - Map loadBalancerNameMapping = [:] - boolean allowIngressFromClassic - boolean dryRun - - @JsonIgnore - Set credentials = []; - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyAsgDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyAsgDescription.groovy index f6e2d3685a3..8d1ff14443e 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyAsgDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyAsgDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class ModifyAsgDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class ModifyAsgDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { List asgs = [] Integer cooldown @@ -24,5 +26,10 @@ class ModifyAsgDescription extends AbstractAmazonCredentialsDescription { String healthCheckType List enabledMetrics List terminationPolicies + Boolean capacityRebalance + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyAsgLaunchConfigurationDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyAsgLaunchConfigurationDescription.groovy index bb6522702db..57d84df4560 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyAsgLaunchConfigurationDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyAsgLaunchConfigurationDescription.groovy @@ -17,12 +17,14 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable import groovy.transform.AutoClone import groovy.transform.Canonical @AutoClone @Canonical -class ModifyAsgLaunchConfigurationDescription extends AbstractAmazonCredentialsDescription { +class ModifyAsgLaunchConfigurationDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String region String asgName String amiName @@ -38,6 +40,8 @@ class ModifyAsgLaunchConfigurationDescription extends AbstractAmazonCredentialsD String classicLinkVpcId List classicLinkVpcSecurityGroups Boolean legacyUdf + String base64UserData + UserDataOverride userDataOverride = new UserDataOverride() List blockDevices List securityGroups @@ -48,4 +52,8 @@ class ModifyAsgLaunchConfigurationDescription extends AbstractAmazonCredentialsD */ boolean copySourceCustomBlockDeviceMappings = true + @Override + Collection getServerGroupNames() { + return [asgName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/RebootInstancesDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/RebootInstancesDescription.groovy index 7ca6353a103..8cada838166 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/RebootInstancesDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/RebootInstancesDescription.groovy @@ -16,7 +16,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class RebootInstancesDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class RebootInstancesDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String region List instanceIds + + Set serverGroups + + @Override + Collection getServerGroupNames() { + return serverGroups + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ResizeAsgDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ResizeAsgDescription.groovy index 1130b5967a1..d4e995309f0 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ResizeAsgDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ResizeAsgDescription.groovy @@ -17,18 +17,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description import com.netflix.spinnaker.clouddriver.model.ServerGroup +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable -class ResizeAsgDescription extends AbstractAmazonCredentialsDescription { - String serverGroupName - String region +class ResizeAsgDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { ServerGroup.Capacity capacity List asgs = [] - @Deprecated - String asgName - - @Deprecated - List regions = [] + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } static class Constraints { ServerGroup.Capacity capacity diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ResumeAsgProcessesDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ResumeAsgProcessesDescription.groovy index 15e215df1cb..0c027fc390b 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ResumeAsgProcessesDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/ResumeAsgProcessesDescription.groovy @@ -15,7 +15,9 @@ */ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class ResumeAsgProcessesDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class ResumeAsgProcessesDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String serverGroupName String region @@ -28,4 +30,9 @@ class ResumeAsgProcessesDescription extends AbstractAmazonCredentialsDescription @Deprecated List regions = [] + + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/SuspendAsgProcessesDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/SuspendAsgProcessesDescription.groovy index 46d1831f6ab..74f24f4f141 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/SuspendAsgProcessesDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/SuspendAsgProcessesDescription.groovy @@ -15,7 +15,9 @@ */ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class SuspendAsgProcessesDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class SuspendAsgProcessesDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String serverGroupName String region @@ -28,4 +30,9 @@ class SuspendAsgProcessesDescription extends AbstractAmazonCredentialsDescriptio @Deprecated List regions = [] + + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/TerminateInstanceAndDecrementAsgDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/TerminateInstanceAndDecrementAsgDescription.groovy index faf7f2cbe12..e0756431924 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/TerminateInstanceAndDecrementAsgDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/TerminateInstanceAndDecrementAsgDescription.groovy @@ -16,10 +16,17 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class TerminateInstanceAndDecrementAsgDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class TerminateInstanceAndDecrementAsgDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String asgName String instance String region Boolean adjustMinIfNecessary Boolean setMaxToNewDesired + + @Override + Collection getServerGroupNames() { + return [asgName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/TerminateInstancesDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/TerminateInstancesDescription.groovy index f01de3cb2b6..db3644e35e9 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/TerminateInstancesDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/TerminateInstancesDescription.groovy @@ -15,7 +15,25 @@ */ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class TerminateInstancesDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class TerminateInstancesDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String region List instanceIds + + Set serverGroups + + @Override + Collection getServerGroupNames() { + return serverGroups + } + + @Override + boolean requiresApplicationRestriction() { + if (instanceIds == null || instanceIds.isEmpty()) { + return false + } + + return true + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpdateInstancesDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpdateInstancesDescription.groovy index df5729fe5d7..f8a69f2935b 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpdateInstancesDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpdateInstancesDescription.groovy @@ -16,9 +16,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class UpdateInstancesDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class UpdateInstancesDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String serverGroupName String region List securityGroups Boolean securityGroupsAppendOnly + + @Override + Collection getServerGroupNames() { + return [serverGroupName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAlarmDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAlarmDescription.groovy index 33b2c7567e1..ad74fe984fa 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAlarmDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAlarmDescription.groovy @@ -21,8 +21,9 @@ import com.amazonaws.services.cloudwatch.model.Dimension import com.amazonaws.services.cloudwatch.model.PutMetricAlarmRequest import com.amazonaws.services.cloudwatch.model.StandardUnit import com.amazonaws.services.cloudwatch.model.Statistic +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable -class UpsertAlarmDescription extends AbstractAmazonCredentialsDescription { +class UpsertAlarmDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String name String asgName String region @@ -68,4 +69,9 @@ class UpsertAlarmDescription extends AbstractAmazonCredentialsDescription { oKActions: okActionArns ) } + + @Override + Collection getServerGroupNames() { + return [asgName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerV2Description.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerV2Description.java deleted file mode 100644 index 84bfd7e592e..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerV2Description.java +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.description; - -import com.amazonaws.services.elasticloadbalancingv2.model.ActionTypeEnum; -import com.amazonaws.services.elasticloadbalancingv2.model.AuthenticateOidcActionConfig; -import com.amazonaws.services.elasticloadbalancingv2.model.Certificate; -import com.amazonaws.services.elasticloadbalancingv2.model.ProtocolEnum; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -public class UpsertAmazonLoadBalancerV2Description extends UpsertAmazonLoadBalancerDescription { - public List listeners = new ArrayList<>(); - public List targetGroups = new ArrayList<>(); - - public static class TargetGroup { - private String name; - private ProtocolEnum protocol; - private Integer port; - private Attributes attributes; // TODO: Support target group attributes - private String targetType = "instance"; - - private ProtocolEnum healthCheckProtocol; - private String healthCheckPath; - private String healthCheckPort; - private Integer healthCheckInterval = 10; - private Integer healthCheckTimeout = 5; - private Integer unhealthyThreshold = 2; - private Integer healthyThreshold = 10; - private String healthCheckMatcher = "200-299"; // string of ranges or individual http status codes, separated by commas - - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public ProtocolEnum getProtocol() { - return protocol; - } - - public void setProtocol(ProtocolEnum protocol) { - this.protocol = protocol; - } - - public Integer getPort() { - return port; - } - - public void setPort(Integer port) { - this.port = port; - } - - public Attributes getAttributes() { - return attributes; - } - - public void setAttributes(Attributes attributes) { - this.attributes = attributes; - } - - public String getTargetType() { return targetType; } - - public void setTargetType(String targetType) { this.targetType = targetType; } - - public ProtocolEnum getHealthCheckProtocol() { - return healthCheckProtocol; - } - - public void setHealthCheckProtocol(ProtocolEnum healthCheckProtocol) { - this.healthCheckProtocol = healthCheckProtocol; - } - - public String getHealthCheckPath() { - return healthCheckPath; - } - - public void setHealthCheckPath(String healthCheckPath) { - this.healthCheckPath = healthCheckPath; - } - - public String getHealthCheckPort() { - return healthCheckPort; - } - - public void setHealthCheckPort(String healthCheckPort) { - this.healthCheckPort = healthCheckPort; - } - - public Integer getHealthCheckInterval() { - return healthCheckInterval; - } - - public void setHealthCheckInterval(Integer healthCheckInterval) { - this.healthCheckInterval = healthCheckInterval; - } - - public Integer getHealthCheckTimeout() { - return healthCheckTimeout; - } - - public void setHealthCheckTimeout(Integer healthCheckTimeout) { - this.healthCheckTimeout = healthCheckTimeout; - } - - public Integer getUnhealthyThreshold() { - return unhealthyThreshold; - } - - public void setUnhealthyThreshold(Integer unhealthyThreshold) { - this.unhealthyThreshold = unhealthyThreshold; - } - - public Integer getHealthyThreshold() { - return healthyThreshold; - } - - public void setHealthyThreshold(Integer healthyThreshold) { - this.healthyThreshold = healthyThreshold; - } - - public String getHealthCheckMatcher() { - return healthCheckMatcher; - } - - public void setHealthCheckMatcher(String healthCheckMatcher) { - this.healthCheckMatcher = healthCheckMatcher; - } - - public Boolean compare(com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup awsTargetGroup) { - return this.name.equals(awsTargetGroup.getTargetGroupName()) && - this.protocol.toString().equals(awsTargetGroup.getProtocol()) && - this.port.equals(awsTargetGroup.getPort()) && - this.healthCheckProtocol.toString().equals(awsTargetGroup.getHealthCheckProtocol()) && - this.healthCheckPath.equals(awsTargetGroup.getHealthCheckPath()) && - this.healthCheckPort.equals(awsTargetGroup.getHealthCheckPort()) && - this.healthCheckInterval.equals(awsTargetGroup.getHealthCheckIntervalSeconds()) && - this.healthCheckTimeout.equals(awsTargetGroup.getHealthCheckTimeoutSeconds()) && - this.healthyThreshold.equals(awsTargetGroup.getHealthyThresholdCount()) && - this.unhealthyThreshold.equals(awsTargetGroup.getUnhealthyThresholdCount()) && - this.healthCheckMatcher.equals(awsTargetGroup.getMatcher().getHttpCode()); - - } - } - - public static class Listener { - private List certificates; - private ProtocolEnum protocol; - private Integer port; - private String sslPolicy; - private List defaultActions; - private List rules = new ArrayList<>(); - - public List getCertificates() { - return certificates; - } - - public void setCertificates(List certificates) { - this.certificates = certificates; - } - - public ProtocolEnum getProtocol() { - return protocol; - } - - public void setProtocol(ProtocolEnum protocol) { - this.protocol = protocol; - } - - public Integer getPort() { - return port; - } - - public void setPort(Integer port) { - this.port = port; - } - - public String getSslPolicy() { - return sslPolicy; - } - - public void setSslPolicy(String sslPolicy) { - this.sslPolicy = sslPolicy; - } - - public List getDefaultActions() { - return defaultActions; - } - - public void setDefaultActions(List defaultActions) { - this.defaultActions = defaultActions; - } - - public List getRules() { - return rules; - } - - public void setRules(List rules) { - this.rules = rules; - } - - public Boolean compare(com.amazonaws.services.elasticloadbalancingv2.model.Listener awsListener, - List actions, - List existingRules, - List rules) { - if (existingRules == null) { - existingRules = new ArrayList<>(); - } - if (rules == null) { - rules = new ArrayList<>(); - } - - int awsCertificateCount = awsListener.getCertificates() != null ? awsListener.getCertificates().size() : 0; - int certificateCount = certificates != null ? certificates.size() : 0; - Boolean certificatesSame = awsCertificateCount == certificateCount; - if (certificatesSame) { - Set awsListenerArns = new HashSet<>(); - Set thisListenerArns = new HashSet<>(); - if (awsListener.getCertificates() != null) { - awsListener.getCertificates().forEach(cert -> awsListenerArns.add(cert.getCertificateArn())); - } - if (certificates != null) { - certificates.forEach(cert -> thisListenerArns.add(cert.getCertificateArn())); - } - certificatesSame = awsListenerArns.equals(thisListenerArns); - } - - Boolean rulesSame = existingRules.size() == rules.size() + 1; // existing rules has the default rule, rules does not - if (rulesSame) { - for (com.amazonaws.services.elasticloadbalancingv2.model.Rule existingRule : existingRules) { - boolean match = true; - if (!existingRule.isDefault()) { - match = false; - for (com.amazonaws.services.elasticloadbalancingv2.model.Rule rule : rules) { - if (existingRule.getActions().equals(rule.getActions()) - && existingRule.getConditions().equals(rule.getConditions()) - && existingRule.getPriority().equals(rule.getPriority())) { - match = true; - break; - } - } - } - rulesSame = match; - if (!rulesSame) { - break; - } - } - } - - Boolean actionsSame = awsListener.getDefaultActions().containsAll(actions) && - actions.containsAll(awsListener.getDefaultActions()); - - return (this.protocol != null && this.protocol.toString().equals(awsListener.getProtocol())) && - (this.port != null && this.port.equals(awsListener.getPort())) && - actionsSame && - rulesSame && - certificatesSame; - } - } - - public static class Action { - private String type = ActionTypeEnum.Forward.toString(); - private String targetGroupName; - private AuthenticateOidcActionConfig authenticateOidcActionConfig; - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getTargetGroupName() { - return targetGroupName; - } - - public void setTargetGroupName(String targetGroupName) { - this.targetGroupName = targetGroupName; - } - - public AuthenticateOidcActionConfig getAuthenticateOidcActionConfig() { - return authenticateOidcActionConfig; - } - - public void setAuthenticateOidcActionConfig(AuthenticateOidcActionConfig authenticateOidcActionConfig) { - this.authenticateOidcActionConfig = authenticateOidcActionConfig; - } - } - - public static class Attributes { - private Integer deregistrationDelay = 300; - private Boolean stickinessEnabled = false; - private String stickinessType = "lb_cookie"; - private Integer stickinessDuration = 86400; - private Boolean proxyProtocolV2 = false; - - public Integer getDeregistrationDelay() { - return deregistrationDelay; - } - - public void setDeregistrationDelay(Integer deregistrationDelay) { - this.deregistrationDelay = deregistrationDelay; - } - - public Boolean getStickinessEnabled() { - return stickinessEnabled; - } - - public void setStickinessEnabled(Boolean stickinessEnabled) { - this.stickinessEnabled = stickinessEnabled; - } - - public String getStickinessType() { - return stickinessType; - } - - public void setStickinessType(String stickinessType) { - this.stickinessType = stickinessType; - } - - public Integer getStickinessDuration() { - return stickinessDuration; - } - - public void setStickinessDuration(Integer stickinessDuration) { - this.stickinessDuration = stickinessDuration; - } - - public Boolean getProxyProtocolV2() { return proxyProtocolV2; } - - public void setProxyProtocolV2(Boolean proxyProtocolV2) { this.proxyProtocolV2 = proxyProtocolV2; } - - } - - public static class RuleCondition { - private String field; - private List values; - - public String getField() { - return field; - } - - public void setField(String field) { - this.field = field; - } - - public List getValues() { - return values; - } - - public void setValues(List values) { - this.values = values; - } - } - - public static class Rule { - private Integer priority; - private List actions; - private List conditions; - - public Integer getPriority() { - return priority; - } - - public void setPriority(Integer priority) { - this.priority = priority; - } - - public List getActions() { - return actions; - } - - public void setActions(List actions) { - this.actions = actions; - } - - public List getConditions() { - return conditions; - } - - public void setConditions(List conditions) { - this.conditions = conditions; - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmiTagsDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmiTagsDescription.groovy index 1e52d52a416..7343c3878b1 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmiTagsDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmiTagsDescription.groovy @@ -17,8 +17,20 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description +import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig + class UpsertAmiTagsDescription extends AbstractAmazonCredentialsDescription { String amiName Collection regions Map tags + + @Override + boolean requiresApplicationRestriction() { + return false + } + + @Override + boolean requiresAuthorization(SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps) { + return !opsSecurityConfigProps.allowUnauthenticatedImageTaggingInAccounts.contains(account) + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgLifecycleHookDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgLifecycleHookDescription.groovy index 774cff3ab72..1db9ebb287c 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgLifecycleHookDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgLifecycleHookDescription.groovy @@ -18,8 +18,9 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook.DefaultResult import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook.Transition +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable -class UpsertAsgLifecycleHookDescription extends AbstractAmazonCredentialsDescription { +class UpsertAsgLifecycleHookDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { // required String serverGroupName @@ -33,5 +34,10 @@ class UpsertAsgLifecycleHookDescription extends AbstractAmazonCredentialsDescrip String notificationMetadata Integer heartbeatTimeout = 3600 DefaultResult defaultResult = DefaultResult.ABANDON + + @Override + Collection getServerGroupNames() { + return [serverGroupName] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgScheduledActionsDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgScheduledActionsDescription.groovy index 585ae85ca99..e19ec48c00a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgScheduledActionsDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgScheduledActionsDescription.groovy @@ -16,9 +16,10 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable import groovy.transform.ToString -class UpsertAsgScheduledActionsDescription extends AbstractAmazonCredentialsDescription { +class UpsertAsgScheduledActionsDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { List asgs = [] List scheduledActions @@ -31,4 +32,8 @@ class UpsertAsgScheduledActionsDescription extends AbstractAmazonCredentialsDesc Integer desiredCapacity } + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgTagsDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgTagsDescription.groovy index dc6c0916916..816f1452b97 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgTagsDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAsgTagsDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description -class UpsertAsgTagsDescription extends AbstractAmazonCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class UpsertAsgTagsDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { String serverGroupName String region @@ -29,4 +31,9 @@ class UpsertAsgTagsDescription extends AbstractAmazonCredentialsDescription { @Deprecated List regions = [] + + @Override + Collection getServerGroupNames() { + return asgs.collect { it.serverGroupName } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertScalingPolicyDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertScalingPolicyDescription.groovy index 990885d19e6..eda74b87ab3 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertScalingPolicyDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertScalingPolicyDescription.groovy @@ -18,8 +18,9 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description import com.amazonaws.services.autoscaling.model.StepAdjustment import com.amazonaws.services.autoscaling.model.TargetTrackingConfiguration +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable -class UpsertScalingPolicyDescription extends AbstractAmazonCredentialsDescription { +class UpsertScalingPolicyDescription extends AbstractAmazonCredentialsDescription implements ServerGroupsNameable { // required String region @@ -39,9 +40,14 @@ class UpsertScalingPolicyDescription extends AbstractAmazonCredentialsDescriptio UpsertAlarmDescription alarm + @Override + Collection getServerGroupNames() { + return [serverGroupName] + } + static class Simple { Integer cooldown = 600 - Integer scalingAdjustment + Integer scalingAdjustment } static class Step { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertSecurityGroupDescription.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertSecurityGroupDescription.groovy index 0436d82ed47..2fa0f6ae721 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertSecurityGroupDescription.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertSecurityGroupDescription.groovy @@ -16,9 +16,11 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description +import com.amazonaws.services.ec2.model.Tag +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable import groovy.transform.Canonical -class UpsertSecurityGroupDescription extends AbstractAmazonCredentialsDescription { +class UpsertSecurityGroupDescription extends AbstractAmazonCredentialsDescription implements ResourcesNameable { String name String description String vpcId @@ -29,10 +31,18 @@ class UpsertSecurityGroupDescription extends AbstractAmazonCredentialsDescriptio boolean ingressAppendOnly = false + Map tags + + @Override + Collection getNames() { + return [name] + } + static abstract class Ingress { Integer startPort Integer endPort String ipProtocol + String description @Deprecated void setType(String ipProtocol) { this.ipProtocol = ipProtocol diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/BasicAmazonDeployHandler.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/BasicAmazonDeployHandler.groovy index 3ed0806be1f..d6418983daf 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/BasicAmazonDeployHandler.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/BasicAmazonDeployHandler.groovy @@ -16,18 +16,20 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.handlers -import com.amazonaws.services.autoscaling.model.BlockDeviceMapping +import com.amazonaws.services.autoscaling.model.AutoScalingGroup import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest -import com.amazonaws.services.autoscaling.model.LaunchConfiguration import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest import com.google.common.annotations.VisibleForTesting import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchTemplateRollOutConfig import com.netflix.spinnaker.config.AwsConfiguration import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults import com.netflix.spinnaker.clouddriver.aws.deploy.AmiIdResolver -import com.netflix.spinnaker.clouddriver.aws.deploy.AutoScalingWorker -import com.netflix.spinnaker.clouddriver.aws.deploy.BlockDeviceConfig +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils.BlockDeviceConfig import com.netflix.spinnaker.clouddriver.aws.deploy.ResolvedAmiResult import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerLookupHelper @@ -44,7 +46,7 @@ import com.netflix.spinnaker.clouddriver.deploy.DeployDescription import com.netflix.spinnaker.clouddriver.deploy.DeployHandler import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult import com.netflix.spinnaker.clouddriver.orchestration.events.CreateServerGroupEvent -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsRepository import groovy.transform.PackageScope import groovy.util.logging.Slf4j @@ -55,42 +57,34 @@ class BasicAmazonDeployHandler implements DeployHandler accountCredentialsRepository private final AwsConfiguration.AmazonServerGroupProvider amazonServerGroupProvider private final AwsConfiguration.DeployDefaults deployDefaults private final ScalingPolicyCopier scalingPolicyCopier private final BlockDeviceConfig blockDeviceConfig + private final LaunchTemplateRollOutConfig launchTemplateRollOutConfig private List deployEvents = [] BasicAmazonDeployHandler(RegionScopedProviderFactory regionScopedProviderFactory, - AccountCredentialsRepository accountCredentialsRepository, + CredentialsRepository accountCredentialsRepository, AwsConfiguration.AmazonServerGroupProvider amazonServerGroupProvider, AwsConfiguration.DeployDefaults deployDefaults, ScalingPolicyCopier scalingPolicyCopier, - BlockDeviceConfig blockDeviceConfig) { + BlockDeviceConfig blockDeviceConfig, + LaunchTemplateRollOutConfig launchTemplateRollOutConfig) { this.regionScopedProviderFactory = regionScopedProviderFactory this.accountCredentialsRepository = accountCredentialsRepository this.amazonServerGroupProvider = amazonServerGroupProvider this.deployDefaults = deployDefaults this.scalingPolicyCopier = scalingPolicyCopier this.blockDeviceConfig = blockDeviceConfig + this.launchTemplateRollOutConfig = launchTemplateRollOutConfig } @Override @@ -100,9 +94,10 @@ class BasicAmazonDeployHandler implements DeployHandler> entry : description.availabilityZones) { String region = entry.key @@ -226,95 +221,135 @@ class BasicAmazonDeployHandler implements DeployHandler lifecycleHooks = getLifecycleHooks(targetCredentials, description) - if (lifecycleHooks.size() > 0) { - targetRegionScopedProvider.asgLifecycleHookWorker.attach(task, lifecycleHooks, targetAsgName) - } - } catch (Exception e) { - task.updateStatus(BASE_PHASE, "Unable to attach lifecycle hooks to ASG ($targetAsgName): ${e.message}") - } - } - @VisibleForTesting @PackageScope static List getLifecycleHooks(NetflixAmazonCredentials credentials, BasicAmazonDeployDescription description) { @@ -472,110 +485,30 @@ class BasicAmazonDeployHandler implements DeployHandler convertBlockDevices(List blockDeviceMappings) { - blockDeviceMappings.collect { - def device = new AmazonBlockDevice(deviceName: it.deviceName, virtualName: it.virtualName) - it.ebs?.with { - device.iops = iops - device.deleteOnTermination = deleteOnTermination - device.size = volumeSize - device.volumeType = volumeType - device.snapshotId = snapshotId - if (snapshotId == null) { - // only set encryption if snapshotId isn't provided. AWS will error out otherwise - device.encrypted = encrypted - } - } - device - } - } - - static String iamRole(BasicAmazonDeployDescription description, DeployDefaults deployDefaults) { - def iamRole = description.iamRole ?: deployDefaults.iamRole - return description.application ? iamRole.replaceAll(Pattern.quote('{{application}}'), description.application) : iamRole - } - - private RegionScopedProviderFactory.RegionScopedProvider buildSourceRegionScopedProvider(Task task, - BasicAmazonDeployDescription.Source source) { - if (source.account && source.region && source.asgName) { - def sourceRegion = source.region - def sourceAsgCredentials = accountCredentialsRepository.getOne(source.account) as NetflixAmazonCredentials - def regionScopedProvider = regionScopedProviderFactory.forRegion(sourceAsgCredentials, sourceRegion) - - def sourceAsgs = regionScopedProvider.autoScaling.describeAutoScalingGroups( - new DescribeAutoScalingGroupsRequest(autoScalingGroupNames: [source.asgName]) - ) - - if (!sourceAsgs.autoScalingGroups) { - task.updateStatus BASE_PHASE, "Unable to locate source asg (${source.account}:${source.region}:${source.asgName})" - return null - } - - return regionScopedProvider - } - - return null - } - - private static void validateInstanceType(ResolvedAmiResult ami, String instanceType) { - String family = instanceType?.contains('.') ? instanceType.split("\\.")[0] : '' - boolean familyIsKnown = KNOWN_VIRTUALIZATION_FAMILIES.containsKey(ami.virtualizationType) && - KNOWN_VIRTUALIZATION_FAMILIES.any { it.value.contains(family) } - if (familyIsKnown && !KNOWN_VIRTUALIZATION_FAMILIES[ami.virtualizationType].contains(family)) { - throw new IllegalArgumentException("Instance type ${instanceType} does not support " + - "virtualization type ${ami.virtualizationType}. Please select a different image or instance type.") - } - } - - private static boolean getDefaultEbsOptimizedFlag(String instanceType) { - String family = instanceType?.contains('.') ? instanceType.split("\\.")[0] : '' - return DEFAULT_EBS_OPTIMIZED_FAMILIES.contains(family) - } - /** - * Determine block devices + * Default unlimitedCpuCredits to false if applicable (i.e. burstable performance instance type), and not specified. * - * If: - * - The source launch configuration is using default block device mappings - * - The instance type has changed + * For the multiple instance types case, the Spinnaker default false is used ONLY if all types support bursting to ensure compatibility with ALL instance types. + * In such cases, the AWS default comes into play. * - * Then: - * - Re-generate block device mappings based on the new instance type + * Reasoning: + * 1) consistent default cpu credits value for burstable performance instance families + * AWS default mode if cpu credits is not specified depends on the instance family: + * * t2: standard + * * t3/t3a: unlimited * - * Otherwise: - * - Continue to use any custom block device mappings (if set) + * 2) let users explicitly choose 'unlimited' bursting which could translate to higher instance costs, depending on usage */ @VisibleForTesting - @PackageScope - Collection buildBlockDeviceMappings( - BasicAmazonDeployDescription description, - LaunchConfiguration sourceLaunchConfiguration - ) { - if (description.blockDevices != null) { - // block device mappings have been explicitly specified and should be used regardless of instance type - return description.blockDevices - } + static Boolean getDefaultUnlimitedCpuCredits(final Set instanceTypes) { - if (sourceLaunchConfiguration.instanceType != description.instanceType) { - // instance type has changed, verify that the block device mappings are still legitimate (ebs vs. ephemeral) - def blockDevicesForSourceAsg = sourceLaunchConfiguration.blockDeviceMappings.collect { - [deviceName: it.deviceName, virtualName: it.virtualName, size: it.ebs?.volumeSize] - }.sort { it.deviceName } - def blockDevicesForSourceInstanceType = blockDeviceConfig.getBlockDevicesForInstanceType( - sourceLaunchConfiguration.instanceType - ).collect { - [deviceName: it.deviceName, virtualName: it.virtualName, size: it.size] - }.sort { it.deviceName } - - if (blockDevicesForSourceAsg == blockDevicesForSourceInstanceType) { - // use default block mappings for the new instance type (since default block mappings were used on the previous instance type) - return blockDeviceConfig.getBlockDevicesForInstanceType(description.instanceType) - } - } + // return the default, false only if all instance types support bursting + return InstanceTypeUtils.isBurstingSupportedByAllTypes(instanceTypes) ? false : null + } - return convertBlockDevices(sourceLaunchConfiguration.blockDeviceMappings) + static String iamRole(BasicAmazonDeployDescription description, DeployDefaults deployDefaults) { + def iamRole = description.iamRole ?: deployDefaults.iamRole + return description.application ? iamRole.replaceAll(Pattern.quote('{{application}}'), description.application) : iamRole } @VisibleForTesting @@ -619,4 +552,68 @@ class BasicAmazonDeployHandler implements DeployHandler buildBlockDeviceMappingsFromSourceAsg( + RegionScopedProviderFactory.RegionScopedProvider sourceAsgRegionScopedProvider, + AutoScalingGroup sourceAsg, + BasicAmazonDeployDescription newAsgDescription) { + + // if block device mappings are explicitly specified, they should be used regardless of source ASG settings + if (newAsgDescription.blockDevices != null) { + return newAsgDescription.blockDevices + } + + if (newAsgDescription.getInstanceType() != AsgConfigHelper.getTopLevelInstanceTypeForAsg(sourceAsg, sourceAsgRegionScopedProvider)) { + // If instance type(s) being requested is NOT the same as those in source ASG, + // get default mapping for the new type ONLY IF that same logic was applied for source ASG. + // For the case of multiple instance types in request, top-level instance type is used to derive defaults. + // Top-level instance type is nothing but the description.instanceType + def blockDevicesForSourceAsg = AsgConfigHelper.getBlockDeviceMappingForAsg(sourceAsg, sourceAsgRegionScopedProvider) + .collect { [deviceName: it.deviceName, virtualName: it.virtualName, size: it.size] } + .sort { it.deviceName } + + def defaultBlockDevicesForSourceInsType = + blockDeviceConfig.getBlockDevicesForInstanceType(AsgConfigHelper.getTopLevelInstanceTypeForAsg(sourceAsg, sourceAsgRegionScopedProvider)) + .collect { [deviceName: it.deviceName, virtualName: it.virtualName, size: it.size] } + .sort { it.deviceName } + + boolean isDefaultMappingUsedInSourceAsg = blockDevicesForSourceAsg == defaultBlockDevicesForSourceInsType + if (isDefaultMappingUsedInSourceAsg) { + return blockDeviceConfig.getBlockDevicesForInstanceType(newAsgDescription.getInstanceType()) + } + } + + return AsgConfigHelper.getBlockDeviceMappingForAsg(sourceAsg, sourceAsgRegionScopedProvider) + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateClusterConfigurationStrategy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateClusterConfigurationStrategy.java deleted file mode 100644 index fd3ac289339..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateClusterConfigurationStrategy.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.netflix.spinnaker.config.AwsConfiguration; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; - -public class DefaultMigrateClusterConfigurationStrategy extends MigrateClusterConfigurationStrategy { - - private AmazonClientProvider amazonClientProvider; - private RegionScopedProviderFactory regionScopedProviderFactory; - private AwsConfiguration.DeployDefaults deployDefaults; - - public DefaultMigrateClusterConfigurationStrategy(AmazonClientProvider amazonClientProvider, - RegionScopedProviderFactory regionScopedProviderFactory, - AwsConfiguration.DeployDefaults deployDefaults) { - this.amazonClientProvider = amazonClientProvider; - this.regionScopedProviderFactory = regionScopedProviderFactory; - this.deployDefaults = deployDefaults; - } - - @Override - public AmazonClientProvider getAmazonClientProvider() { - return amazonClientProvider; - } - - @Override - public RegionScopedProviderFactory getRegionScopedProviderFactory() { - return regionScopedProviderFactory; - } - - @Override - public AwsConfiguration.DeployDefaults getDeployDefaults() { - return deployDefaults; - } - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateLoadBalancerStrategy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateLoadBalancerStrategy.java deleted file mode 100644 index 7bf26912f38..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateLoadBalancerStrategy.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; - -public class DefaultMigrateLoadBalancerStrategy extends MigrateLoadBalancerStrategy { - - private AmazonClientProvider amazonClientProvider; - private RegionScopedProviderFactory regionScopedProviderFactory; - private DeployDefaults deployDefaults; - - @Override - public AmazonClientProvider getAmazonClientProvider() { - return amazonClientProvider; - } - - @Override - public RegionScopedProviderFactory getRegionScopedProviderFactory() { - return regionScopedProviderFactory; - } - - @Override - public DeployDefaults getDeployDefaults() { - return deployDefaults; - } - - public DefaultMigrateLoadBalancerStrategy(AmazonClientProvider amazonClientProvider, - RegionScopedProviderFactory regionScopedProviderFactory, - DeployDefaults deployDefaults) { - this.amazonClientProvider = amazonClientProvider; - this.regionScopedProviderFactory = regionScopedProviderFactory; - this.deployDefaults = deployDefaults; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateSecurityGroupStrategy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateSecurityGroupStrategy.java deleted file mode 100644 index a0316a1c04b..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateSecurityGroupStrategy.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; - -import java.util.ArrayList; -import java.util.List; - -public class DefaultMigrateSecurityGroupStrategy extends MigrateSecurityGroupStrategy { - - private AmazonClientProvider amazonClientProvider; - - private List infrastructureApplications; - - public AmazonClientProvider getAmazonClientProvider() { - return amazonClientProvider; - } - - @Override - public List getInfrastructureApplications() { - return infrastructureApplications != null ? infrastructureApplications : new ArrayList<>(); - } - - public DefaultMigrateSecurityGroupStrategy(AmazonClientProvider amazonClientProvider, List infrastructureApplications) { - this.amazonClientProvider = amazonClientProvider; - this.infrastructureApplications = infrastructureApplications; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateServerGroupStrategy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateServerGroupStrategy.java deleted file mode 100644 index c67b8af12e9..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/DefaultMigrateServerGroupStrategy.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; -import com.netflix.spinnaker.clouddriver.aws.deploy.converters.AllowLaunchAtomicOperationConverter; -import com.netflix.spinnaker.clouddriver.aws.deploy.validators.BasicAmazonDeployDescriptionValidator; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; - -public class DefaultMigrateServerGroupStrategy extends MigrateServerGroupStrategy { - - private AmazonClientProvider amazonClientProvider; - private RegionScopedProviderFactory regionScopedProviderFactory; - private DeployDefaults deployDefaults; - private BasicAmazonDeployHandler basicAmazonDeployHandler; - private BasicAmazonDeployDescriptionValidator basicAmazonDeployDescriptionValidator; - private AllowLaunchAtomicOperationConverter allowLaunchAtomicOperationConverter; - - public DefaultMigrateServerGroupStrategy(AmazonClientProvider amazonClientProvider, - BasicAmazonDeployHandler basicAmazonDeployHandler, - RegionScopedProviderFactory regionScopedProviderFactory, - BasicAmazonDeployDescriptionValidator basicAmazonDeployDescriptionValidator, - AllowLaunchAtomicOperationConverter allowLaunchAtomicOperationConverter, - DeployDefaults deployDefaults) { - - this.amazonClientProvider = amazonClientProvider; - this.basicAmazonDeployHandler = basicAmazonDeployHandler; - this.regionScopedProviderFactory = regionScopedProviderFactory; - this.basicAmazonDeployDescriptionValidator = basicAmazonDeployDescriptionValidator; - this.allowLaunchAtomicOperationConverter = allowLaunchAtomicOperationConverter; - this.deployDefaults = deployDefaults; - } - - @Override - public AmazonClientProvider getAmazonClientProvider() { - return amazonClientProvider; - } - - @Override - public RegionScopedProviderFactory getRegionScopedProviderFactory() { - return regionScopedProviderFactory; - } - - @Override - public DeployDefaults getDeployDefaults() { - return deployDefaults; - } - - @Override - public BasicAmazonDeployHandler getBasicAmazonDeployHandler() { - return basicAmazonDeployHandler; - } - - @Override - public BasicAmazonDeployDescriptionValidator getBasicAmazonDeployDescriptionValidator() { - return basicAmazonDeployDescriptionValidator; - } - - @Override - AllowLaunchAtomicOperationConverter getAllowLaunchAtomicOperationConverter() { - return allowLaunchAtomicOperationConverter; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/LoadBalancerUpsertHandler.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/LoadBalancerUpsertHandler.groovy index d45ec15895d..012682bcc4e 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/LoadBalancerUpsertHandler.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/LoadBalancerUpsertHandler.groovy @@ -23,13 +23,17 @@ import com.amazonaws.services.elasticloadbalancing.model.CreateLoadBalancerListe import com.amazonaws.services.elasticloadbalancing.model.CreateLoadBalancerRequest import com.amazonaws.services.elasticloadbalancing.model.DeleteLoadBalancerListenersRequest import com.amazonaws.services.elasticloadbalancing.model.Listener +import com.amazonaws.services.elasticloadbalancing.model.ListenerDescription import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription import com.amazonaws.services.elasticloadbalancing.model.ModifyLoadBalancerAttributesRequest +import com.amazonaws.services.elasticloadbalancing.model.SetLoadBalancerPoliciesOfListenerRequest import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationException +import groovy.util.logging.Slf4j +@Slf4j class LoadBalancerUpsertHandler { private static final String BASE_PHASE = "UPSERT_ELB" @@ -67,20 +71,27 @@ class LoadBalancerUpsertHandler { // no need to recreate existing listeners listeners.removeAll(existingListeners) + final List listenerDescriptionsToRemove = loadBalancer + .listenerDescriptions + .findAll { + it.listener in listenersToRemove + } - listenersToRemove.each { - loadBalancing.deleteLoadBalancerListeners( - new DeleteLoadBalancerListenersRequest(loadBalancerName, [it.loadBalancerPort]) - ) - task.updateStatus BASE_PHASE, "Listener removed from ${loadBalancerName} (${it.loadBalancerPort}:${it.protocol}:${it.instancePort})." - } - - def createListener = { Listener listener, boolean isRollback -> + def createListener = { ListenerDescription listenerDescription, boolean isRollback -> try { - loadBalancing.createLoadBalancerListeners(new CreateLoadBalancerListenersRequest(loadBalancerName, [listener])) - task.updateStatus BASE_PHASE, "Listener ${isRollback ? 'rolled back on' : 'added to'} ${loadBalancerName} (${listener.loadBalancerPort}:${listener.protocol}:${listener.instancePort})." + loadBalancing.createLoadBalancerListeners(new CreateLoadBalancerListenersRequest(loadBalancerName, [listenerDescription.listener])) + if (!listenerDescription.policyNames.isEmpty()) { + ensureSetLoadBalancerListenerPolicies(loadBalancerName, listenerDescription, loadBalancing) + } + + task.updateStatus BASE_PHASE, + "Listener ${isRollback ? 'rolled back on' : 'added to'} ${loadBalancerName} " + + "(${listenerDescription.listener.loadBalancerPort}:${listenerDescription.listener.protocol}:${listenerDescription.listener.instancePort})." } catch (AmazonServiceException e) { - def exceptionMessage = "Failed to ${isRollback ? 'roll back' : 'add'} listener to ${loadBalancerName} (${listener.loadBalancerPort}:${listener.protocol}:${listener.instancePort}) - reason: ${e.errorMessage}." + def exceptionMessage = "Failed to ${isRollback ? 'roll back' : 'add'} listener to ${loadBalancerName} " + + "(${listenerDescription.listener.loadBalancerPort}:${listenerDescription.listener.protocol}:${listenerDescription.listener.instancePort}) " + + "- reason: ${e.errorMessage}." + task.updateStatus BASE_PHASE, exceptionMessage amazonErrors << exceptionMessage return false @@ -89,16 +100,36 @@ class LoadBalancerUpsertHandler { } boolean rollback = false - listeners - .each { Listener listener -> - if (!createListener(listener, false)) { - rollback = true - } + listenerDescriptionsToRemove.each { + try { + loadBalancing.deleteLoadBalancerListeners( + new DeleteLoadBalancerListenersRequest(loadBalancerName, [it.listener.loadBalancerPort]) + ) + + task.updateStatus BASE_PHASE, + "Listener removed from ${loadBalancerName} (${it.listener.loadBalancerPort}:${it.listener.protocol}:${it.listener.instancePort})." + } catch(AmazonServiceException e) { + // Rollback as this failure will result in an exception when creating listeners. + task.updateStatus BASE_PHASE, "Failed to remove listener $it: $e.errorMessage." + amazonErrors << e.errorMessage } + } + + listeners.each { listener -> + final List policyNames = loadBalancer + .listenerDescriptions.find { + it.listener.loadBalancerPort == listener.loadBalancerPort && it.listener.protocol == listener.protocol + }?.policyNames + + final ListenerDescription description = new ListenerDescription(listener: listener, policyNames: policyNames) + if (!createListener(description, false)) { + rollback = true + } + } - if (rollback) { - listenersToRemove.each { Listener listener -> - createListener(listener, true) + if (amazonErrors || rollback) { + listenerDescriptionsToRemove.each { + createListener(it, true) } } } @@ -149,4 +180,21 @@ class LoadBalancerUpsertHandler { listener.instancePort != 0 && listener.loadBalancerPort != 0 && listener.protocol } + /** + * Ensures policies set in the request are applied to the load balancer + */ + private static void ensureSetLoadBalancerListenerPolicies( + String loadBalancerName, ListenerDescription listenerDescription, AmazonElasticLoadBalancing loadBalancing) { + final SetLoadBalancerPoliciesOfListenerRequest policyRequest = new SetLoadBalancerPoliciesOfListenerRequest() + .withLoadBalancerName(loadBalancerName) + .withLoadBalancerPort(listenerDescription.listener.loadBalancerPort) + + try { + loadBalancing.setLoadBalancerPoliciesOfListener( + policyRequest.withPolicyNames(listenerDescription.policyNames) + ) + } catch(AmazonServiceException e) { + log.error("Failed to set listener policies on loadbalancer $loadBalancerName: $e.errorMessage") + } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/LoadBalancerV2UpsertHandler.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/LoadBalancerV2UpsertHandler.groovy index 866de0878fe..f46c9816dfb 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/LoadBalancerV2UpsertHandler.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/LoadBalancerV2UpsertHandler.groovy @@ -18,85 +18,126 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.handlers import com.amazonaws.AmazonServiceException import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing -import com.amazonaws.services.elasticloadbalancingv2.model.Action -import com.amazonaws.services.elasticloadbalancingv2.model.CreateListenerRequest -import com.amazonaws.services.elasticloadbalancingv2.model.CreateListenerResult -import com.amazonaws.services.elasticloadbalancingv2.model.CreateLoadBalancerRequest -import com.amazonaws.services.elasticloadbalancingv2.model.CreateRuleRequest -import com.amazonaws.services.elasticloadbalancingv2.model.CreateTargetGroupRequest -import com.amazonaws.services.elasticloadbalancingv2.model.CreateTargetGroupResult -import com.amazonaws.services.elasticloadbalancingv2.model.DeleteListenerRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DeleteRuleRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DeleteTargetGroupRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeListenersRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeRulesRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest -import com.amazonaws.services.elasticloadbalancingv2.model.Listener -import com.amazonaws.services.elasticloadbalancingv2.model.ListenerNotFoundException -import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancer -import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancerTypeEnum -import com.amazonaws.services.elasticloadbalancingv2.model.Matcher -import com.amazonaws.services.elasticloadbalancingv2.model.ModifyListenerRequest -import com.amazonaws.services.elasticloadbalancingv2.model.ModifyTargetGroupAttributesRequest -import com.amazonaws.services.elasticloadbalancingv2.model.ModifyTargetGroupRequest -import com.amazonaws.services.elasticloadbalancingv2.model.ProtocolEnum -import com.amazonaws.services.elasticloadbalancingv2.model.ResourceInUseException -import com.amazonaws.services.elasticloadbalancingv2.model.Rule -import com.amazonaws.services.elasticloadbalancingv2.model.RuleCondition -import com.amazonaws.services.elasticloadbalancingv2.model.SetSecurityGroupsRequest -import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup -import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupAttribute -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults +import com.amazonaws.services.elasticloadbalancingv2.model.* import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerV2Description import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationException +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults +import groovy.util.logging.Slf4j +@Slf4j class LoadBalancerV2UpsertHandler { private static final String BASE_PHASE = "UPSERT_ELB_V2" + private static final String ATTRIBUTE_IDLE_TIMEOUT = "idle_timeout.timeout_seconds" + private static final String ATTRIBUTE_DELETION_PROTECTION = "deletion_protection.enabled" + private static final String ATTRIBUTE_LOAD_BALANCING_CROSS_ZONE = "load_balancing.cross_zone.enabled" + + //Defaults for Target Group Attributes + private static final String DEREGISTRATION_DELAY = "300" + private static final Boolean STICKINESS_ENABLED = false + private static final String STICKINESS_TYPE = "lb_cookie" + private static final String STICKINESS_DURATION = "86400" + private static final Boolean PROXY_PROTOCOL_V2 = false + private static final Boolean CONNECTION_TERMINATION = false + /** The following attribute is supported only if the target is a Lambda function. */ + private static final Boolean MULTI_VALUE_HEADERS_ENABLED = false + private static Task getTask() { TaskRepository.threadLocalTask.get() } - private static String modifyTargetGroupAttributes(AmazonElasticLoadBalancing loadBalancing, LoadBalancer loadBalancer, TargetGroup targetGroup, UpsertAmazonLoadBalancerV2Description.Attributes attributes) { - return modifyTargetGroupAttributes(loadBalancing, loadBalancer, targetGroup, attributes, null) - } - private static String modifyTargetGroupAttributes(AmazonElasticLoadBalancing loadBalancing, LoadBalancer loadBalancer, TargetGroup targetGroup, UpsertAmazonLoadBalancerV2Description.Attributes attributes, DeployDefaults deployDefaults) { + //Create Target Group Attributes with values provided in description, set to defaults other wise + static String createTargetGroupAttributes(AmazonElasticLoadBalancing loadBalancing, LoadBalancer loadBalancer, TargetGroup targetGroup, UpsertAmazonLoadBalancerV2Description.Attributes attributes, DeployDefaults deployDefaults) { def targetGroupAttributes = [] + log.info("Creating target group attributes for targetGroup {}", targetGroup.targetGroupName) if (attributes) { - Integer deregistrationDelay = [attributes.deregistrationDelay, deployDefaults?.loadBalancing?.deregistrationDelayDefault].findResult(Closure.IDENTITY) - if (deregistrationDelay != null) { - targetGroupAttributes.add(new TargetGroupAttribute(key: "deregistration_delay.timeout_seconds", value: deregistrationDelay.toString())) + if (TargetTypeEnum.Lambda.toString().equalsIgnoreCase(targetGroup.getTargetType())) { + def multiValueHeaderAttribute = attributes.multiValueHeadersEnabled ?: MULTI_VALUE_HEADERS_ENABLED + targetGroupAttributes.add(new TargetGroupAttribute(key: "lambda.multi_value_headers.enabled", value: multiValueHeaderAttribute)) + + } else { + Integer deregistrationDelay = [attributes.deregistrationDelay, deployDefaults?.loadBalancing?.deregistrationDelayDefault].findResult(Closure.IDENTITY) + + def deregistrationDealyAttribute = deregistrationDelay?.toString() ?: DEREGISTRATION_DELAY + targetGroupAttributes.add(new TargetGroupAttribute(key: "deregistration_delay.timeout_seconds", value: deregistrationDealyAttribute)) } if (loadBalancer.type == 'application') { - if (attributes.stickinessEnabled != null) { - targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.enabled", value: attributes.stickinessEnabled.toString())) + def stickinessEnabledAttribute = attributes.stickinessEnabled?.toString() ?: STICKINESS_ENABLED + targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.enabled", value: stickinessEnabledAttribute)) + + def stickinessTypeAttribute = attributes.stickinessType ?: STICKINESS_TYPE + targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.type", value: stickinessTypeAttribute)) + + def stickinessDurationAttribute = attributes.stickinessDuration?.toString() ?: STICKINESS_DURATION + targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.lb_cookie.duration_seconds", value: stickinessDurationAttribute)) + + } + if (loadBalancer.type == 'network') { + def proxyProtocolV2Attribute = attributes.proxyProtocolV2 ?: PROXY_PROTOCOL_V2 + targetGroupAttributes.add(new TargetGroupAttribute(key: "proxy_protocol_v2.enabled", value: proxyProtocolV2Attribute)) + + def enableConnectionTermination = attributes.deregistrationDelayConnectionTermination ?: CONNECTION_TERMINATION + targetGroupAttributes.add(new TargetGroupAttribute(key: "deregistration_delay.connection_termination.enabled", value: enableConnectionTermination)) + + } + } + return updateTargetGroupAttributes(loadBalancing, targetGroup, targetGroupAttributes) + } + + // Modify target group attributes with attributes that are set in the description , do not update attributes that are not set + private static String modifyTargetGroupAttributes(AmazonElasticLoadBalancing loadBalancing, LoadBalancer loadBalancer, TargetGroup targetGroup, UpsertAmazonLoadBalancerV2Description.Attributes attributes, DeployDefaults deployDefaults) { + + log.info("Update target group attributes for targetGroup {}", targetGroup.targetGroupName) + def targetGroupAttributes = [] + if (attributes) { + if (TargetTypeEnum.Lambda.toString().equalsIgnoreCase(targetGroup.getTargetType())) { + if (attributes.multiValueHeadersEnabled != null) { + targetGroupAttributes.add(new TargetGroupAttribute(key: "lambda.multi_value_headers.enabled", value: attributes.multiValueHeadersEnabled)) } - if (attributes.stickinessType != null) { - targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.type", value: attributes.stickinessType)) + } else { + Integer deregistrationDelay = [attributes.deregistrationDelay, deployDefaults?.loadBalancing?.deregistrationDelayDefault].findResult(Closure.IDENTITY) + if (deregistrationDelay != null) { + targetGroupAttributes.add(new TargetGroupAttribute(key: "deregistration_delay.timeout_seconds", value: deregistrationDelay.toString())) } - if (attributes.stickinessDuration != null) { - targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.lb_cookie.duration_seconds", value: attributes.stickinessDuration.toString())) + if (loadBalancer.type == 'application') { + if (attributes.stickinessEnabled != null) { + targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.enabled", value: attributes.stickinessEnabled.toString())) + } + if (attributes.stickinessType != null) { + targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.type", value: attributes.stickinessType)) + } + if (attributes.stickinessDuration != null) { + targetGroupAttributes.add(new TargetGroupAttribute(key: "stickiness.lb_cookie.duration_seconds", value: attributes.stickinessDuration.toString())) + } } - } - if(loadBalancer.type == 'network' ){ - if(attributes.proxyProtocolV2 != null){ - targetGroupAttributes.add(new TargetGroupAttribute(key: "proxy_protocol_v2.enabled", value: attributes.proxyProtocolV2)) + if (loadBalancer.type == 'network') { + if (attributes.proxyProtocolV2 != null) { + targetGroupAttributes.add(new TargetGroupAttribute(key: "proxy_protocol_v2.enabled", value: attributes.proxyProtocolV2)) + } + + if(attributes.deregistrationDelayConnectionTermination != null) { + targetGroupAttributes.add(new TargetGroupAttribute(key: "deregistration_delay.connection_termination.enabled", value: attributes.deregistrationDelayConnectionTermination)) + } + } } } + return updateTargetGroupAttributes(loadBalancing, targetGroup, targetGroupAttributes) + } - try { - loadBalancing.modifyTargetGroupAttributes(new ModifyTargetGroupAttributesRequest() - .withTargetGroupArn(targetGroup.targetGroupArn) - .withAttributes(targetGroupAttributes)) - task.updateStatus BASE_PHASE, "Modified target group ${targetGroup.targetGroupName} attributes." - } catch (AmazonServiceException e) { - def exceptionMessage = "Failed to modify attributes for target group ${targetGroup.targetGroupName} - reason: ${e.errorMessage}." - task.updateStatus BASE_PHASE, exceptionMessage - return exceptionMessage + static String updateTargetGroupAttributes(AmazonElasticLoadBalancing loadBalancing, TargetGroup targetGroup, List targetGroupAttributes) { + if (!targetGroupAttributes.isEmpty()) { + try { + loadBalancing.modifyTargetGroupAttributes(new ModifyTargetGroupAttributesRequest() + .withTargetGroupArn(targetGroup.targetGroupArn) + .withAttributes(targetGroupAttributes)) + task.updateStatus BASE_PHASE, "Modified target group ${targetGroup.targetGroupName} attributes." + } catch (AmazonServiceException e) { + return handleError("Failed to modify attributes for target group ${targetGroup.targetGroupName} - reason: ${e.toString()}.", e) + } } return null } @@ -108,32 +149,52 @@ class LoadBalancerV2UpsertHandler { targetGroupsToCreate.each { targetGroup -> TargetGroup createdTargetGroup try { + String status = "Target group created in ${loadBalancerName} (${targetGroup.name}:${targetGroup.port}:${targetGroup.protocol})." + CreateTargetGroupRequest createTargetGroupRequest = new CreateTargetGroupRequest(); + if (TargetTypeEnum.Lambda.toString().equalsIgnoreCase(targetGroup.targetType)) { - CreateTargetGroupRequest createTargetGroupRequest = new CreateTargetGroupRequest() - .withProtocol(targetGroup.protocol) - .withPort(targetGroup.port) - .withName(targetGroup.name) - .withVpcId(loadBalancer.vpcId) - .withHealthCheckIntervalSeconds(targetGroup.healthCheckInterval) - .withHealthCheckPort(targetGroup.healthCheckPort) - .withHealthCheckProtocol(targetGroup.healthCheckProtocol) - .withHealthyThresholdCount(targetGroup.healthyThreshold) - .withUnhealthyThresholdCount(targetGroup.unhealthyThreshold) - .withTargetType(targetGroup.targetType) - - if (targetGroup.healthCheckProtocol in [ProtocolEnum.HTTP, ProtocolEnum.HTTPS]) { - createTargetGroupRequest.withMatcher(new Matcher().withHttpCode(targetGroup.healthCheckMatcher)) - .withHealthCheckPath(targetGroup.healthCheckPath) + createTargetGroupRequest.withName(targetGroup.name) + .withHealthCheckIntervalSeconds(targetGroup.healthCheckInterval) .withHealthCheckTimeoutSeconds(targetGroup.healthCheckTimeout) - } + .withHealthyThresholdCount(targetGroup.healthyThreshold) + .withUnhealthyThresholdCount(targetGroup.unhealthyThreshold) + .withTargetType(targetGroup.targetType) + .withMatcher(new Matcher().withHttpCode(targetGroup.healthCheckMatcher)) + .withHealthCheckPath(targetGroup.healthCheckPath) + status = "Lambda Target group created in ${loadBalancerName} (${targetGroup.name})." + + } else { + createTargetGroupRequest.withProtocol(targetGroup.protocol) + .withPort(targetGroup.port) + .withName(targetGroup.name) + .withVpcId(loadBalancer.vpcId) + .withHealthCheckIntervalSeconds(targetGroup.healthCheckInterval) + .withHealthCheckPort(targetGroup.healthCheckPort) + .withHealthCheckProtocol(targetGroup.healthCheckProtocol) + .withHealthyThresholdCount(targetGroup.healthyThreshold) + .withUnhealthyThresholdCount(targetGroup.unhealthyThreshold) + .withTargetType(targetGroup.targetType) + + if (targetGroup.healthCheckProtocol in [ProtocolEnum.HTTP, ProtocolEnum.HTTPS]) { + createTargetGroupRequest + .withHealthCheckPath(targetGroup.healthCheckPath) + + // HTTP(s) health checks for TCP does not support custom matchers and timeouts. Also, health thresholds must be equal. + if (targetGroup.protocol == ProtocolEnum.TCP) { + createTargetGroupRequest.withUnhealthyThresholdCount(createTargetGroupRequest.getHealthyThresholdCount()) + } else { + createTargetGroupRequest.withMatcher(new Matcher().withHttpCode(targetGroup.healthCheckMatcher)) + .withHealthCheckTimeoutSeconds(targetGroup.healthCheckTimeout) + } + } + } CreateTargetGroupResult createTargetGroupResult = loadBalancing.createTargetGroup( createTargetGroupRequest ) - task.updateStatus BASE_PHASE, "Target group created in ${loadBalancerName} (${targetGroup.name}:${targetGroup.port}:${targetGroup.protocol})." + task.updateStatus BASE_PHASE, status createdTargetGroup = createTargetGroupResult.getTargetGroups().get(0) + } catch (AmazonServiceException e) { - String exceptionMessage = "Failed to create target group ${targetGroup.name} for ${loadBalancerName} - reason: ${e.errorMessage}." - task.updateStatus BASE_PHASE, exceptionMessage - amazonErrors << exceptionMessage + amazonErrors << handleError("Failed to create target group ${targetGroup.name} for ${loadBalancerName} - reason: ${e.toString()}.", e) } if (createdTargetGroup != null) { @@ -141,7 +202,7 @@ class LoadBalancerV2UpsertHandler { createdTargetGroups.add(createdTargetGroup) // Add attributes - String exceptionMessage = modifyTargetGroupAttributes(loadBalancing, loadBalancer, createdTargetGroup, targetGroup.attributes, deployDefaults) + String exceptionMessage = createTargetGroupAttributes(loadBalancing, loadBalancer, createdTargetGroup, targetGroup.attributes, deployDefaults) if (exceptionMessage) { amazonErrors << exceptionMessage } @@ -159,9 +220,7 @@ class LoadBalancerV2UpsertHandler { removedTargetGroups.push(it) task.updateStatus BASE_PHASE, "Target group removed from ${loadBalancer.loadBalancerName} (${it.targetGroupName}:${it.port}:${it.protocol})." } catch (ResourceInUseException e) { - String exceptionMessage = "Failed to delete target group ${it.targetGroupName} from ${loadBalancer.loadBalancerName} - reason: ${e.errorMessage}." - task.updateStatus BASE_PHASE, exceptionMessage - amazonErrors << exceptionMessage + amazonErrors << handleError("Failed to delete target group ${it.targetGroupName} from ${loadBalancer.loadBalancerName} - reason: ${e.toString()}.", e) } } return removedTargetGroups @@ -180,16 +239,23 @@ class LoadBalancerV2UpsertHandler { .withUnhealthyThresholdCount(targetGroup.unhealthyThreshold) if (targetGroup.healthCheckProtocol in [ProtocolEnum.HTTP, ProtocolEnum.HTTPS]) { - modifyTargetGroupRequest.withMatcher(new Matcher().withHttpCode(targetGroup.healthCheckMatcher)) + modifyTargetGroupRequest .withHealthCheckPath(targetGroup.healthCheckPath) - .withHealthCheckTimeoutSeconds(targetGroup.healthCheckTimeout) + + // HTTP(s) health checks for TCP does not support custom matchers and timeouts. Also, health thresholds must be equal. + if (targetGroup.protocol == ProtocolEnum.TCP) { + modifyTargetGroupRequest.withUnhealthyThresholdCount(modifyTargetGroupRequest.getHealthyThresholdCount()) + } else { + modifyTargetGroupRequest.withMatcher(new Matcher().withHttpCode(targetGroup.healthCheckMatcher)) + .withHealthCheckTimeoutSeconds(targetGroup.healthCheckTimeout) + } } loadBalancing.modifyTargetGroup(modifyTargetGroupRequest) task.updateStatus BASE_PHASE, "Target group updated in ${loadBalancer.loadBalancerName} (${awsTargetGroup.targetGroupName}:${awsTargetGroup.port}:${awsTargetGroup.protocol})." // Update attributes - String exceptionMessage = modifyTargetGroupAttributes(loadBalancing, loadBalancer, awsTargetGroup, targetGroup.attributes) + String exceptionMessage = modifyTargetGroupAttributes(loadBalancing, loadBalancer, awsTargetGroup, targetGroup.attributes, null) if (exceptionMessage) { amazonErrors << exceptionMessage } @@ -208,9 +274,7 @@ class LoadBalancerV2UpsertHandler { .withDefaultActions(defaultActions)) task.updateStatus BASE_PHASE, "Listener added to ${loadBalancer.loadBalancerName} (${listener.port}:${listener.protocol})." } catch (AmazonServiceException e) { - String exceptionMessage = "Failed to add listener to ${loadBalancer.loadBalancerName} (${listener.port}:${listener.protocol}) - reason: ${e.errorMessage}." - task.updateStatus BASE_PHASE, exceptionMessage - amazonErrors << exceptionMessage + amazonErrors << handleError("Failed to add listener to ${loadBalancer.loadBalancerName} (${listener.port}:${listener.protocol}) - reason: ${e.toString()}.", e) return false } @@ -221,9 +285,7 @@ class LoadBalancerV2UpsertHandler { loadBalancing.createRule(new CreateRuleRequest(listenerArn: listenerArn, conditions: rule.conditions, actions: rule.actions, priority: Integer.valueOf(rule.priority))) } } catch (AmazonServiceException e) { - String exceptionMessage = "Failed to add rule to listener ${loadBalancer.loadBalancerName} (${listener.port}:${listener.protocol}) reason: ${e.errorMessage}." - task.updateStatus BASE_PHASE, exceptionMessage - amazonErrors << exceptionMessage + amazonErrors << handleError("Failed to add rule to listener ${loadBalancer.loadBalancerName} (${listener.port}:${listener.protocol}) reason: ${e.toString()}.", e) return false } } @@ -257,9 +319,7 @@ class LoadBalancerV2UpsertHandler { .withDefaultActions(defaultActions)) task.updateStatus BASE_PHASE, "Listener ${listenerArn} updated (${listener.port}:${listener.protocol})." } catch (AmazonServiceException e) { - String exceptionMessage = "Failed to modify listener ${listenerArn} (${listener.port}:${listener.protocol}) - reason: ${e.errorMessage}." - task.updateStatus BASE_PHASE, exceptionMessage - amazonErrors << exceptionMessage + amazonErrors << handleError("Failed to modify listener ${listenerArn} (${listener.port}:${listener.protocol}) - reason: ${e.toString()}.", e) } // Compare the old rules; if any are different, just replace them all. @@ -279,9 +339,7 @@ class LoadBalancerV2UpsertHandler { try { loadBalancing.createRule(new CreateRuleRequest(listenerArn: listenerArn, conditions: rule.conditions, actions: rule.actions, priority: Integer.valueOf(rule.priority))) } catch (AmazonServiceException e) { - String exceptionMessage = "Failed to add rule to listener ${listenerArn} (${listener.port}:${listener.protocol}) reason: ${e.errorMessage}." - task.updateStatus BASE_PHASE, exceptionMessage - amazonErrors << exceptionMessage + amazonErrors << handleError("Failed to add rule to listener ${listenerArn} (${listener.port}:${listener.protocol}) reason: ${e.toString()}.", e) } } } @@ -294,7 +352,7 @@ class LoadBalancerV2UpsertHandler { task.updateStatus BASE_PHASE, "Listener removed from ${loadBalancer.loadBalancerName} (${it.port}:${it.protocol})." existingListeners.remove(it) } catch (ListenerNotFoundException e) { - task.updateStatus BASE_PHASE, "Failed to delete listener ${it.listenerArn}. Listener could not be found. ${e.errorMessage}" + handleError("Failed to delete listener ${it.listenerArn}. Listener could not be found. ${e.toString()}", e) } } } @@ -315,6 +373,9 @@ class LoadBalancerV2UpsertHandler { } else if (action.type == "authenticate-oidc") { Action awsAction = new Action().withType(action.type).withAuthenticateOidcConfig(action.authenticateOidcActionConfig).withOrder(index + 1) awsActions.add(awsAction) + } else if (action.type == "redirect") { + Action awsAction = new Action().withType(action.type).withRedirectConfig(action.redirectActionConfig).withOrder(index + 1) + awsActions.add(awsAction) } } awsActions @@ -325,7 +386,12 @@ class LoadBalancerV2UpsertHandler { Collection securityGroups, List targetGroups, List listeners, - DeployDefaults deployDefaults) { + DeployDefaults deployDefaults, + Integer idleTimeout, + Boolean deletionProtection, + Boolean loadBalancingCrossZone, + String ipAddressType + ) { def amazonErrors = [] def loadBalancerName = loadBalancer.loadBalancerName def loadBalancerArn = loadBalancer.loadBalancerArn @@ -344,6 +410,59 @@ class LoadBalancerV2UpsertHandler { } } + def currentIpAddressType = loadBalancer.ipAddressType + if (ipAddressType && ipAddressType != currentIpAddressType && (loadBalancer.type == 'application' || loadBalancer.type == 'network')) { + def newIpAddressType = loadBalancer.scheme == 'internal' ? 'ipv4' : ipAddressType + loadBalancing.setIpAddressType(new SetIpAddressTypeRequest( + loadBalancerArn: loadBalancerArn, + ipAddressType: newIpAddressType + )) + task.updateStatus BASE_PHASE, "IP Address type updated ${loadBalancerName}." + } + + // Update load balancer attributes + def currentAttributes = loadBalancing.describeLoadBalancerAttributes( + new DescribeLoadBalancerAttributesRequest() + .withLoadBalancerArn(loadBalancerArn) + ).attributes + + List attributes = [] + + // idle timeout is only supported in application load balancers + if (loadBalancer.type == 'application') { + String currentIdleTimeout = currentAttributes.find { it.key == ATTRIBUTE_IDLE_TIMEOUT }?.getValue() + String newIdleTimeout = [idleTimeout, deployDefaults.loadBalancing.idleTimeout].findResult(Closure.IDENTITY).toString() + if (currentIdleTimeout != newIdleTimeout) { + task.updateStatus BASE_PHASE, "Setting idle timeout on ${loadBalancerName} to ${newIdleTimeout}." + attributes.add(new LoadBalancerAttribute().withKey(ATTRIBUTE_IDLE_TIMEOUT).withValue(newIdleTimeout)) + } + } + + String currentDeletionProtections = currentAttributes.find { it.key == ATTRIBUTE_DELETION_PROTECTION }?.getValue() + String newDeletionProtection = [deletionProtection, deployDefaults.loadBalancing.deletionProtection].findResult(Boolean.FALSE, Closure.IDENTITY).toString() + if (currentDeletionProtections != newDeletionProtection) { + task.updateStatus BASE_PHASE, "Setting deletion protection on ${loadBalancerName} to ${newDeletionProtection}." + attributes.add(new LoadBalancerAttribute().withKey(ATTRIBUTE_DELETION_PROTECTION).withValue(newDeletionProtection)) + } + + // Cross-Zone Load Balancing is only supported in network load balancers + if (loadBalancer.type == 'network' && loadBalancingCrossZone != null) { + String currentLoadBalancingCrossZone = currentAttributes.find { it.key == ATTRIBUTE_LOAD_BALANCING_CROSS_ZONE }?.getValue() + String newLoadBalancingCrossZone = [loadBalancingCrossZone, deployDefaults.loadBalancing.crossZoneBalancingDefault].findResult(Boolean.TRUE, Closure.IDENTITY).toString() + if (currentLoadBalancingCrossZone != newLoadBalancingCrossZone) { + task.updateStatus BASE_PHASE, "Setting Cross-Zone Load Balancing on ${loadBalancerName} to ${newLoadBalancingCrossZone}." + attributes.add(new LoadBalancerAttribute().withKey(ATTRIBUTE_LOAD_BALANCING_CROSS_ZONE).withValue(newLoadBalancingCrossZone)) + } + } + + if (!attributes.isEmpty()) { + loadBalancing.modifyLoadBalancerAttributes( + new ModifyLoadBalancerAttributesRequest() + .withLoadBalancerArn(loadBalancerArn) + .withAttributes(attributes) + ) + } + // Get the state of this load balancer from aws List existingTargetGroups = [] existingTargetGroups = loadBalancing.describeTargetGroups( @@ -359,8 +478,8 @@ class LoadBalancerV2UpsertHandler { // Can't modify the port or protocol of a target group, so if changed, have to delete/recreate List> targetGroupsSplit = existingTargetGroups.split { awsTargetGroup -> (targetGroups.find { it.name == awsTargetGroup.targetGroupName && - it.port == awsTargetGroup.port && - it.protocol.toString() == awsTargetGroup.protocol }) == null + it.port == awsTargetGroup.port && + it.protocol.toString() == awsTargetGroup.protocol }) == null } List targetGroupsToRemove = targetGroupsSplit[0] List targetGroupsToUpdate = targetGroupsSplit[1] @@ -402,10 +521,15 @@ class LoadBalancerV2UpsertHandler { List actions = getAmazonActionsFromDescription(rule.actions, existingTargetGroups, amazonErrors) List conditions = rule.conditions.collect { condition -> - new RuleCondition().withField(condition.field).withValues(condition.values) + if (condition.field == 'http-request-method') { + HttpRequestMethodConditionConfig httpRequestMethodConditionConfig = new HttpRequestMethodConditionConfig().withValues(condition.values) + new RuleCondition().withField(condition.field).withHttpRequestMethodConfig(httpRequestMethodConditionConfig) + } else { + new RuleCondition().withField(condition.field).withValues(condition.values) + } } - rules.add(new Rule().withActions(actions).withConditions(conditions).withPriority(Integer.toString(rule.priority))) + rules.add(new Rule().withActions(actions).withConditions(conditions).withPriority(rule.priority)) } listenerToRules.put(listener, rules) } @@ -446,13 +570,24 @@ class LoadBalancerV2UpsertHandler { } } - static LoadBalancer createLoadBalancer(AmazonElasticLoadBalancing loadBalancing, String loadBalancerName, boolean isInternal, + static LoadBalancer createLoadBalancer(AmazonElasticLoadBalancing loadBalancing, String loadBalancerName, + boolean isInternal, Collection subnetIds, Collection securityGroups, List targetGroups, List listeners, DeployDefaults deployDefaults, - String type) { - def request = new CreateLoadBalancerRequest().withName(loadBalancerName) + String type, + Integer idleTimeout, + boolean deletionProtection, + boolean loadBalancingCrossZone, + String ipAddressType + ) { + def request = new CreateLoadBalancerRequest().withName(loadBalancerName); + + if (ipAddressType && (type == 'application' || type == 'network')) { + def addressType = isInternal ? 'ipv4' : ipAddressType + request.withIpAddressType(addressType) + } // Networking Related if (subnetIds) { @@ -477,17 +612,23 @@ class LoadBalancerV2UpsertHandler { try { result = loadBalancing.createLoadBalancer(request) } catch (AmazonServiceException e) { - def errors = [] - errors << e.errorMessage - throw new AtomicOperationException("Failed to create load balancer.", errors) + log.error("Failed to create load balancer", e) + throw new AtomicOperationException("Failed to create load balancer.", [e.toString()]) } LoadBalancer createdLoadBalancer = null List loadBalancers = result.getLoadBalancers() if (loadBalancers != null && loadBalancers.size() > 0) { createdLoadBalancer = loadBalancers.get(0) - updateLoadBalancer(loadBalancing, createdLoadBalancer, securityGroups, targetGroups, listeners, deployDefaults) + updateLoadBalancer(loadBalancing, createdLoadBalancer, securityGroups, targetGroups, listeners, deployDefaults, idleTimeout, deletionProtection, loadBalancingCrossZone, ipAddressType) } + createdLoadBalancer } + + private static String handleError(String message, Exception e) { + log.error(message, e) + task.updateStatus BASE_PHASE, message + return message + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateClusterConfigurationStrategy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateClusterConfigurationStrategy.java deleted file mode 100644 index fd3b7882c82..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateClusterConfigurationStrategy.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.netflix.spinnaker.config.AwsConfiguration; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.MigrateLoadBalancerResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator.SecurityGroupLocation; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfiguration; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfigurationTarget; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.MigrateClusterConfigurationResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.MigrateClusterConfigurationsAtomicOperation; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; - -public abstract class MigrateClusterConfigurationStrategy implements MigrateStrategySupport { - - protected SecurityGroupLookup sourceLookup; - protected SecurityGroupLookup targetLookup; - protected ClusterConfiguration source; - protected ClusterConfigurationTarget target; - protected String subnetType; - protected String elbSubnetType; - protected String iamRole; - protected String keyPair; - protected Map loadBalancerNameMapping; - protected boolean allowIngressFromClassic; - protected boolean dryRun; - - protected MigrateSecurityGroupStrategy migrateSecurityGroupStrategy; - protected MigrateLoadBalancerStrategy getMigrateLoadBalancerStrategy; - - abstract AmazonClientProvider getAmazonClientProvider(); - - abstract RegionScopedProviderFactory getRegionScopedProviderFactory(); - - abstract AwsConfiguration.DeployDefaults getDeployDefaults(); - - /** - * Migrates load balancers and security groups in a cluster configuration, returning the mutations and the cluster - * configuration, with load balancers and security group collections updated, as well as subnetType, iamRole, - * and keyPair - * - * @param source the source configuration - * @param target the target location - * @param sourceLookup a security group lookup cache for the source region - * @param targetLookup a security group lookup cache for the target region (may be the same object as - * the sourceLookup) - * @param migrateLoadBalancerStrategy the load balancer migration strategy - * @param migrateSecurityGroupStrategy the security group migration strategy - * @param subnetType the subnetType in which to migrate the dependencies (should be null for EC - * Classic migrations) - * @param elbSubnetType the subnetType in which to migrate dependent load balancers (should be null - * for EC Classic migrations) - * @param iamRole the iamRole to apply when migrating (optional) - * @param keyPair the keyPair to apply when migrating (optional) - * @param allowIngressFromClassic whether app security groups should granted ingress to classic link - * @param dryRun whether to perform the migration or simply calculate the migration - * @return a result set with the new cluster configuration and a collection of load balancers and security groups - * required to perform the migration - */ - public synchronized MigrateClusterConfigurationResult generateResults(ClusterConfiguration source, ClusterConfigurationTarget target, - SecurityGroupLookup sourceLookup, SecurityGroupLookup targetLookup, - MigrateLoadBalancerStrategy migrateLoadBalancerStrategy, - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy, - String subnetType, String elbSubnetType, String iamRole, String keyPair, - Map loadBalancerNameMapping, - boolean allowIngressFromClassic, boolean dryRun) { - this.sourceLookup = sourceLookup; - this.targetLookup = targetLookup; - this.source = source; - this.target = target; - this.subnetType = subnetType; - this.elbSubnetType = elbSubnetType; - this.iamRole = iamRole; - this.keyPair = keyPair; - this.loadBalancerNameMapping = loadBalancerNameMapping; - this.allowIngressFromClassic = allowIngressFromClassic; - this.dryRun = dryRun; - - this.migrateSecurityGroupStrategy = migrateSecurityGroupStrategy; - this.getMigrateLoadBalancerStrategy = migrateLoadBalancerStrategy; - - MigrateClusterConfigurationResult result = new MigrateClusterConfigurationResult(); - - List targetLoadBalancers = generateTargetLoadBalancers(); - List targetSecurityGroups = generateTargetSecurityGroups(result); - - result.setLoadBalancerMigrations(targetLoadBalancers); - result.setSecurityGroupMigrations(targetSecurityGroups); - - Map cluster = source.getCluster(); - - Map> zones = new HashMap<>(); - zones.put(target.getRegion(), target.getAvailabilityZones()); - cluster.put("availabilityZones", zones); - - cluster.put("loadBalancers", targetLoadBalancers.stream().map(MigrateLoadBalancerResult::getTargetName).collect(Collectors.toList())); - cluster.put("securityGroups", targetSecurityGroups.stream() - .filter(g -> !g.getSkipped().contains(g.getTarget())) - .map(s -> s.getTarget().getTargetId()).collect(Collectors.toList())); - cluster.put("account", target.getCredentialAccount()); - if (MigrateClusterConfigurationsAtomicOperation.CLASSIC_SUBNET_KEY.equals(subnetType)) { - cluster.remove("subnetType"); - } else { - cluster.put("subnetType", subnetType); - } - if (iamRole != null) { - cluster.put("iamRole", iamRole); - } - if (keyPair != null) { - cluster.put("keyPair", keyPair); - } - result.setCluster(cluster); - - return result; - } - - protected List generateTargetSecurityGroups(MigrateClusterConfigurationResult result) { - - source.getSecurityGroupIds().stream() - .filter(g -> !sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()).isPresent()) - .forEach(m -> result.getWarnings().add("Skipping creation of security group: " + m - + " (could not be found in source location)")); - - List securityGroupNames = source.getSecurityGroupIds().stream() - .filter(g -> sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()).isPresent()) - .map(g -> sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()).get()) - .map(g -> g.getSecurityGroup().getGroupName()) - .collect(Collectors.toList()); - - List targetSecurityGroups = securityGroupNames.stream() - .map(this::getMigrateSecurityGroupResult) - .collect(Collectors.toList()); - - if (getDeployDefaults().getAddAppGroupToServerGroup()) { - // if the app security group is already present, don't include it twice - Optional appGroup = targetSecurityGroups.stream() - .filter(r -> source.getApplication().equals(r.getTarget().getTargetName())).findFirst(); - if (!appGroup.isPresent()) { - appGroup = Optional.of(generateAppSecurityGroup()); - targetSecurityGroups.add(appGroup.get()); - } - handleClassicLinkIngress(appGroup.get().getTarget().getTargetId()); - } - - return targetSecurityGroups; - } - - protected List generateTargetLoadBalancers() { - return source.getLoadBalancerNames().stream() - .map(this::getMigrateLoadBalancerResult) - .collect(Collectors.toList()); - } - - protected MigrateSecurityGroupResult generateAppSecurityGroup() { - SecurityGroupMigrator.SecurityGroupLocation appGroupLocation = new SecurityGroupMigrator.SecurityGroupLocation(); - appGroupLocation.setName(source.getApplication()); - appGroupLocation.setRegion(source.getRegion()); - appGroupLocation.setCredentials(source.getCredentials()); - appGroupLocation.setVpcId(source.getVpcId()); - SecurityGroupMigrator migrator = new SecurityGroupMigrator(sourceLookup, targetLookup, migrateSecurityGroupStrategy, - appGroupLocation, new SecurityGroupLocation(target)); - migrator.setCreateIfSourceMissing(true); - return migrator.migrate(dryRun); - } - - protected void handleClassicLinkIngress(String securityGroupId) { - if (!dryRun && allowIngressFromClassic) { - addClassicLinkIngress(targetLookup, getDeployDefaults().getClassicLinkSecurityGroupName(), - securityGroupId, target.getCredentials(), target.getVpcId()); - } - } - - private MigrateSecurityGroupResult getMigrateSecurityGroupResult(String group) { - SecurityGroupMigrator.SecurityGroupLocation sourceLocation = new SecurityGroupMigrator.SecurityGroupLocation(); - sourceLocation.setName(group); - sourceLocation.setRegion(source.getRegion()); - sourceLocation.setCredentials(source.getCredentials()); - sourceLocation.setVpcId(source.getVpcId()); - return new SecurityGroupMigrator(sourceLookup, targetLookup, migrateSecurityGroupStrategy, - sourceLocation, new SecurityGroupMigrator.SecurityGroupLocation(target)).migrate(dryRun); - } - - private MigrateLoadBalancerResult getMigrateLoadBalancerResult(String lbName) { - LoadBalancerMigrator.LoadBalancerLocation sourceLocation = new LoadBalancerMigrator.LoadBalancerLocation(); - sourceLocation.setName(lbName); - sourceLocation.setRegion(source.getRegion()); - sourceLocation.setVpcId(source.getVpcId()); - sourceLocation.setCredentials(source.getCredentials()); - LoadBalancerMigrator.TargetLoadBalancerLocation targetLocation = new LoadBalancerMigrator.TargetLoadBalancerLocation(sourceLocation, target); - if (loadBalancerNameMapping.containsKey(lbName)) { - targetLocation.setName(loadBalancerNameMapping.get(lbName)); - } - return new LoadBalancerMigrator(sourceLookup, targetLookup, getAmazonClientProvider(), getRegionScopedProviderFactory(), - migrateSecurityGroupStrategy, getDeployDefaults(), getMigrateLoadBalancerStrategy, sourceLocation, - targetLocation, elbSubnetType, source.getApplication(), allowIngressFromClassic).migrate(dryRun); - } - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateLoadBalancerStrategy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateLoadBalancerStrategy.java deleted file mode 100644 index 63bf9757f03..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateLoadBalancerStrategy.java +++ /dev/null @@ -1,625 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.*; -import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing; -import com.amazonaws.services.elasticloadbalancing.model.*; -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGroupDescription; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.LoadBalancerLocation; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.TargetLoadBalancerLocation; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.MigrateLoadBalancerResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupReference; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator.SecurityGroupLocation; -import com.netflix.spinnaker.clouddriver.aws.model.SubnetTarget; -import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonVpcProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; - -import java.util.*; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -public abstract class MigrateLoadBalancerStrategy implements MigrateStrategySupport { - - protected SecurityGroupLookup sourceLookup; - protected SecurityGroupLookup targetLookup; - protected MigrateSecurityGroupStrategy migrateSecurityGroupStrategy; - - protected LoadBalancerLocation source; - protected TargetLoadBalancerLocation target; - protected String subnetType; - protected String applicationName; - protected boolean allowIngressFromClassic; - protected boolean dryRun; - - abstract AmazonClientProvider getAmazonClientProvider(); - - abstract RegionScopedProviderFactory getRegionScopedProviderFactory(); - - abstract DeployDefaults getDeployDefaults(); - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - /** - * Generates a result set describing the actions required to migrate the source load balancer to the target. - * - * @param sourceLookup a security group lookup cache for the source region - * @param targetLookup a security group lookup cache for the target region (may be the same object as the sourceLookup) - * @param source the source load balancer - * @param target the target location - * @param subnetType the subnetType in which to migrate the load balancer (should be null for EC Classic migrations) - * @param applicationName the name of the source application - * @param allowIngressFromClassic whether ingress should be granted from classic link - * @param dryRun whether to actually perform the migration - * @return the result set - */ - public synchronized MigrateLoadBalancerResult generateResults(SecurityGroupLookup sourceLookup, SecurityGroupLookup targetLookup, - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy, - LoadBalancerLocation source, TargetLoadBalancerLocation target, - String subnetType, String applicationName, - boolean allowIngressFromClassic, boolean dryRun) { - - this.migrateSecurityGroupStrategy = migrateSecurityGroupStrategy; - this.sourceLookup = sourceLookup; - this.targetLookup = targetLookup; - this.source = source; - this.target = target; - this.subnetType = subnetType; - this.applicationName = applicationName; - this.allowIngressFromClassic = allowIngressFromClassic; - this.dryRun = dryRun; - - if (!target.isUseZonesFromSource() && (target.getAvailabilityZones() == null || target.getAvailabilityZones().isEmpty())) { - throw new IllegalStateException("No availability zones specified for load balancer migration"); - } - - final MigrateLoadBalancerResult result = new MigrateLoadBalancerResult(); - - LoadBalancerDescription sourceLoadBalancer = getLoadBalancer(source.getCredentials(), source.getRegion(), source.getName()); - if (sourceLoadBalancer == null) { - throw new IllegalStateException("Source load balancer not found: " + source); - } - - if (target.isUseZonesFromSource()) { - target.setAvailabilityZones(sourceLoadBalancer.getAvailabilityZones()); - if (target.getAvailabilityZones() == null || target.getAvailabilityZones().isEmpty()) { - throw new IllegalStateException("No availability zones specified for load balancer migration"); - } - } - - Vpc sourceVpc = getVpc(source); - Vpc targetVpc = getVpc(target); - - String targetName = target.getName() != null ? target.getName() : generateLoadBalancerName(source.getName(), sourceVpc, targetVpc); - LoadBalancerDescription targetLoadBalancer = getLoadBalancer(target.getCredentials(), target.getRegion(), targetName); - verifyLoadBalancerName(result, targetName, targetLoadBalancer); - - List targetSecurityGroups = getTargetSecurityGroups(sourceLoadBalancer, result); - - List securityGroups = targetSecurityGroups.stream() - .filter(g -> !g.getSkipped().contains(g.getTarget())) - .map(g -> g.getTarget().getTargetId()).distinct().collect(Collectors.toList()); - securityGroups.addAll(buildExtraSecurityGroups(sourceLoadBalancer, result)); - - result.getSecurityGroups().addAll(targetSecurityGroups); - - result.setTargetName(targetName); - result.setTargetExists(targetLoadBalancer != null); - if (!dryRun) { - updateTargetLoadBalancer(sourceLoadBalancer, targetLoadBalancer, targetName, securityGroups, result); - } - - return result; - } - - /* - If migrating from Classic to VPC and the name cannot be updated, require a new name - If this is not a dry run, we'll throw an exception to halt the migration - */ - private void verifyLoadBalancerName(MigrateLoadBalancerResult result, String targetName, LoadBalancerDescription targetLoadBalancer) { - boolean invalid = target.getVpcId() != null && targetLoadBalancer != null && targetLoadBalancer.getVPCId() == null; - if (targetName.equals(source.getName()) && target.getCredentialAccount().equals(source.getCredentialAccount()) && target.getRegion().equals(source.getRegion())) { - invalid = true; - } - if (invalid) { - if (dryRun) { - result.setNewNameRequired(true); - } else { - throw new IllegalStateException("A load balancer named '" + targetName + "' already exists in EC2 Classic and cannot be reused when migrating to VPC"); - } - } - } - - /** - * Performs the actual upsert operation against the target load balancer - * - * @param sourceLoadBalancer the Amazon load balancer description of the source load balancer - * @param targetLoadBalancer the Amazon load balancer description of the target load balancer (may be null) - * @param targetName the name of the target load balancer - * @param securityGroups a list of security group names to attach to the load balancer - */ - protected void updateTargetLoadBalancer(LoadBalancerDescription sourceLoadBalancer, - LoadBalancerDescription targetLoadBalancer, - String targetName, Collection securityGroups, - MigrateLoadBalancerResult result) { - - List listeners = getListeners(sourceLoadBalancer, result); - - List subnetIds = subnetType != null ? - getRegionScopedProviderFactory().forRegion(target.getCredentials(), target.getRegion()) - .getSubnetAnalyzer().getSubnetIdsForZones(target.getAvailabilityZones(), subnetType, SubnetTarget.ELB, 1) : - new ArrayList(); - if (subnetIds != null && subnetIds.isEmpty() && subnetType != null) { - throw new IllegalStateException("Cannot " + targetLoadBalancer == null ? "create" : "update" + targetName + ". No subnets found for subnet type: " + subnetType); - } - AmazonElasticLoadBalancing sourceClient = getAmazonClientProvider() - .getAmazonElasticLoadBalancing(source.getCredentials(), source.getRegion(), true); - AmazonElasticLoadBalancing targetClient = getAmazonClientProvider() - .getAmazonElasticLoadBalancing(target.getCredentials(), target.getRegion(), true); - if (targetLoadBalancer == null) { - boolean isInternal = subnetType == null || subnetType.contains("internal"); - LoadBalancerAttributes sourceAttributes = getLoadBalancerAttributes(sourceLoadBalancer, sourceClient); - LoadBalancerUpsertHandler.createLoadBalancer( - targetClient, targetName, isInternal, target.getAvailabilityZones(), subnetIds, listeners, securityGroups, sourceAttributes); - configureHealthCheck(targetClient, sourceLoadBalancer, targetName); - } else { - LoadBalancerUpsertHandler.updateLoadBalancer(targetClient, targetLoadBalancer, listeners, securityGroups); - } - applyListenerPolicies(sourceClient, targetClient, sourceLoadBalancer, targetName); - } - - public List getListeners(LoadBalancerDescription sourceLoadBalancer, MigrateLoadBalancerResult result) { - List unmigratableListeners = sourceLoadBalancer.getListenerDescriptions().stream() - .map(ListenerDescription::getListener) - .filter(listenerCannotBeMigrated(source, target)).collect(Collectors.toList()); - - unmigratableListeners.forEach(l -> result.getWarnings().add( - "The following listeners could not be created: " + - l.getProtocol() + ":" + l.getLoadBalancerPort() + " => " + - l.getInstanceProtocol() + ":" + l.getInstancePort() + " (certificate: " + l.getSSLCertificateId() + ")." - )); - - List listeners = sourceLoadBalancer.getListenerDescriptions().stream() - .map(ListenerDescription::getListener) - .filter(l -> l.getInstancePort() > 0) // strip out invalid load balancer listeners from legacy ELBs - .collect(Collectors.toList()); - - listeners.removeAll(unmigratableListeners); - return listeners; - } - - public LoadBalancerAttributes getLoadBalancerAttributes(LoadBalancerDescription sourceLoadBalancer, AmazonElasticLoadBalancing sourceClient) { - LoadBalancerAttributes sourceAttributes = sourceClient.describeLoadBalancerAttributes( - new DescribeLoadBalancerAttributesRequest().withLoadBalancerName(sourceLoadBalancer.getLoadBalancerName())).getLoadBalancerAttributes(); - if (sourceLoadBalancer.getListenerDescriptions().stream().anyMatch(l -> l.getListener().getInstancePort() == 0)) { - sourceAttributes.setCrossZoneLoadBalancing(new CrossZoneLoadBalancing().withEnabled(true)); - } - return sourceAttributes; - } - - - /** - * Applies any listener policies from the source load balancer to the target load balancer. - * - * Since policy names are unique to each load balancer, two policies with the same name in different load balancers - * may contain different policy attributes. For the sake of simplicity, we assume that policies with the same name - * are structurally the same, and do not attempt to reconcile any differences between attributes. - * - * We will, however, attempt to override the policies applied to a given listener if it's different, e.g., if the - * source load balancer has policy "a" on port 7000, and the target load balancer has policy "b" on port 7000, we - * will: - * 1. create policy "a" if it doesn't exist on the target load balancer, then - * 2. update the target load balancer so port 7000 will have only policy "a" - */ - public void applyListenerPolicies(AmazonElasticLoadBalancing sourceClient, AmazonElasticLoadBalancing targetClient, - LoadBalancerDescription source, String loadBalancerName) { - Set policiesToRetrieve = new HashSet<>(); - Map policyNameMap = new HashMap<>(); - source.getListenerDescriptions().forEach(d -> policiesToRetrieve.addAll(d.getPolicyNames())); - List sourcePolicies = sourceClient.describeLoadBalancerPolicies( - new DescribeLoadBalancerPoliciesRequest() - .withLoadBalancerName(source.getLoadBalancerName()) - .withPolicyNames(policiesToRetrieve)).getPolicyDescriptions(); - List targetPolicies = targetClient.describeLoadBalancerPolicies( - new DescribeLoadBalancerPoliciesRequest() - .withLoadBalancerName(loadBalancerName) - ).getPolicyDescriptions(); - - sourcePolicies.forEach(p -> { - Optional match = targetPolicies.stream().filter(tp -> - tp.getPolicyAttributeDescriptions().size() == p.getPolicyAttributeDescriptions().size() - && tp.getPolicyAttributeDescriptions().containsAll(p.getPolicyAttributeDescriptions())) - .findFirst(); - - if (match.isPresent()) { - policyNameMap.put(p.getPolicyName(), match.get().getPolicyName()); - } else { - String policyName = p.getPolicyName(); - if (policyName.startsWith("ELBSample-") || policyName.startsWith("ELBSecurityPolicy-")) { - policyName = "migrated-" + policyName; - } - policyNameMap.put(p.getPolicyName(), policyName); - CreateLoadBalancerPolicyRequest request = new CreateLoadBalancerPolicyRequest() - .withPolicyName(policyName) - .withLoadBalancerName(loadBalancerName) - .withPolicyTypeName(p.getPolicyTypeName()); - // only copy policy attributes if this is not a pre-defined policy - // (as defined by the presence of 'Reference-Security-Policy' - Optional referencePolicy = p.getPolicyAttributeDescriptions().stream() - .filter(d -> d.getAttributeName().equals("Reference-Security-Policy")).findFirst(); - if (referencePolicy.isPresent()) { - request.withPolicyAttributes( - new PolicyAttribute(referencePolicy.get().getAttributeName(), referencePolicy.get().getAttributeValue())); - } else { - request.withPolicyAttributes(p.getPolicyAttributeDescriptions().stream().map(d -> - new PolicyAttribute(d.getAttributeName(), d.getAttributeValue())).collect(Collectors.toList())); - } - targetClient.createLoadBalancerPolicy(request); - } - }); - source.getListenerDescriptions().forEach(l -> targetClient.setLoadBalancerPoliciesOfListener( - new SetLoadBalancerPoliciesOfListenerRequest() - .withLoadBalancerName(loadBalancerName) - .withLoadBalancerPort(l.getListener().getLoadBalancerPort()) - .withPolicyNames(l.getPolicyNames().stream().map(policyNameMap::get).collect(Collectors.toList())) - ) - ); - } - - - private void configureHealthCheck(AmazonElasticLoadBalancing loadBalancing, - LoadBalancerDescription source, String loadBalancerName) { - HealthCheck healthCheck = new HealthCheck() - .withTarget(source.getHealthCheck().getTarget()) - .withInterval(source.getHealthCheck().getInterval()) - .withTimeout(source.getHealthCheck().getTimeout()) - .withUnhealthyThreshold(source.getHealthCheck().getUnhealthyThreshold()) - .withHealthyThreshold(source.getHealthCheck().getHealthyThreshold()); - - loadBalancing.configureHealthCheck(new ConfigureHealthCheckRequest(loadBalancerName, healthCheck)); - } - - private Predicate listenerCannotBeMigrated(LoadBalancerLocation source, LoadBalancerLocation target) { - return l -> l.getSSLCertificateId() != null && !source.getCredentialAccount().equals(target.getCredentialAccount()); - } - - private LoadBalancerDescription getLoadBalancer(NetflixAmazonCredentials credentials, String region, String name) { - try { - AmazonElasticLoadBalancing client = getAmazonClientProvider() - .getAmazonElasticLoadBalancing(credentials, region, true); - DescribeLoadBalancersResult targetLookup = client.describeLoadBalancers( - new DescribeLoadBalancersRequest().withLoadBalancerNames(name)); - return targetLookup.getLoadBalancerDescriptions().get(0); - } catch (Exception ignored) { - return null; - } - } - - /** - * Generates a list of security groups to add to the load balancer in addition to those on the source load balancer - * - * @param sourceDescription the AWS description of the source load balancer - * @param result the result set for the load balancer migration - this will potentially be mutated as a side effect - * @return a list security group ids that should be added to the load balancer - */ - protected List buildExtraSecurityGroups(LoadBalancerDescription sourceDescription, - MigrateLoadBalancerResult result) { - ArrayList newGroups = new ArrayList<>(); - if (target.getVpcId() != null) { - AmazonEC2 targetAmazonEC2 = getAmazonClientProvider().getAmazonEC2(target.getCredentials(), target.getRegion(), true); - List appGroups = new ArrayList<>(); - try { - List groupNames = Arrays.asList(applicationName, applicationName + "-elb"); - appGroups = targetAmazonEC2.describeSecurityGroups(new DescribeSecurityGroupsRequest().withFilters( - new Filter("group-name", groupNames))).getSecurityGroups(); - } catch (Exception ignored) { - } - - String elbGroupId = buildElbSecurityGroup(sourceDescription, appGroups, result); - newGroups.add(elbGroupId); - } - return newGroups; - } - - /** - * Creates an elb specific security group, or returns the ID of one if it already exists - * - * @param sourceDescription the AWS description of the source load balancer - * @param appGroups list of existing security groups in which to look for existing elb security group - * @param result the result set for the load balancer migration - this will potentially be mutated as a side effect - * @return the groupId of the elb security group - */ - protected String buildElbSecurityGroup(LoadBalancerDescription sourceDescription, List appGroups, - MigrateLoadBalancerResult result) { - String elbGroupId = null; - Optional existingGroup = appGroups.stream() - .filter(g -> g.getVpcId() != null && g.getVpcId().equals(target.getVpcId()) && g.getGroupName().equals(applicationName + "-elb")) - .findFirst(); - if (existingGroup.isPresent()) { - if (!dryRun && allowIngressFromClassic) { - addClassicLinkIngress(targetLookup, getDeployDefaults().getClassicLinkSecurityGroupName(), - existingGroup.get().getGroupId(), target.getCredentials(), target.getVpcId()); - } - return existingGroup.get().getGroupId(); - } - MigrateSecurityGroupReference elbGroup = new MigrateSecurityGroupReference(); - elbGroup.setAccountId(target.getCredentials().getAccountId()); - elbGroup.setVpcId(target.getVpcId()); - elbGroup.setTargetName(applicationName + "-elb"); - MigrateSecurityGroupResult addedGroup = new MigrateSecurityGroupResult(); - addedGroup.setTarget(elbGroup); - addedGroup.getCreated().add(elbGroup); - result.getSecurityGroups().add(addedGroup); - if (!dryRun) { - UpsertSecurityGroupDescription upsertDescription = new UpsertSecurityGroupDescription(); - upsertDescription.setDescription("Application load balancer security group for " + applicationName); - upsertDescription.setName(applicationName + "-elb"); - upsertDescription.setVpcId(target.getVpcId()); - upsertDescription.setRegion(target.getRegion()); - upsertDescription.setCredentials(target.getCredentials()); - getTask().updateStatus(LoadBalancerMigrator.BASE_PHASE, "Creating load balancer security group " + - upsertDescription.getName() + " in " + target.getCredentialAccount() + "/" + target.getRegion() + "/" + target.getVpcId()); - elbGroupId = targetLookup.createSecurityGroup(upsertDescription).getSecurityGroup().getGroupId(); - AmazonEC2 targetAmazonEC2 = getAmazonClientProvider().getAmazonEC2(target.getCredentials(), target.getRegion(), true); - elbGroup.setTargetId(elbGroupId); - if (source.getVpcId() == null && allowIngressFromClassic) { - addClassicLinkIngress(targetLookup, getDeployDefaults().getClassicLinkSecurityGroupName(), - elbGroupId, target.getCredentials(), target.getVpcId()); - addPublicIngress(targetAmazonEC2, elbGroupId, sourceDescription); - } - } - if (getDeployDefaults().getAddAppGroupToServerGroup()) { - buildApplicationSecurityGroup(sourceDescription, appGroups, addedGroup); - } - - return elbGroupId; - } - - /** - * Creates the app specific security group, or returns the ID of one if it already exists - * - * @param appGroups list of existing security groups in which to look for existing app security group - * @param elbGroup the elb specific security group, which will allow ingress permission from the - * app specific security group - */ - protected void buildApplicationSecurityGroup(LoadBalancerDescription sourceDescription, List appGroups, - MigrateSecurityGroupResult elbGroup) { - if (getDeployDefaults().getAddAppGroupToServerGroup()) { - AmazonEC2 targetAmazonEC2 = getAmazonClientProvider().getAmazonEC2(target.getCredentials(), target.getRegion(), true); - Optional existing = appGroups.stream().filter(isAppSecurityGroup()).findFirst(); - MigrateSecurityGroupReference appGroupReference = new MigrateSecurityGroupReference(); - appGroupReference.setAccountId(target.getCredentials().getAccountId()); - appGroupReference.setVpcId(target.getVpcId()); - appGroupReference.setTargetName(applicationName); - if (existing.isPresent()) { - elbGroup.getReused().add(appGroupReference); - } else { - elbGroup.getCreated().add(appGroupReference); - if (!dryRun) { - UpsertSecurityGroupDescription upsertDescription = new UpsertSecurityGroupDescription(); - upsertDescription.setDescription("Application security group for " + applicationName); - upsertDescription.setName(applicationName); - upsertDescription.setVpcId(target.getVpcId()); - upsertDescription.setRegion(target.getRegion()); - upsertDescription.setCredentials(target.getCredentials()); - getTask().updateStatus(LoadBalancerMigrator.BASE_PHASE, "Creating security group " + - upsertDescription.getName() + " in " + target.getCredentialAccount() + "/" + target.getRegion() + "/" + target.getVpcId()); - String newGroupId = targetLookup.createSecurityGroup(upsertDescription).getSecurityGroup().getGroupId(); - // After the create request completes, there is a brief period where the security group might not be - // available and subsequent operations on it will fail, so make sure it's there - OperationPoller.retryWithBackoff( - o -> appGroups.addAll(targetAmazonEC2.describeSecurityGroups( - new DescribeSecurityGroupsRequest().withGroupIds(newGroupId)).getSecurityGroups()), - 200, 5); - } - } - if (!dryRun) { - String elbGroupId = elbGroup.getTarget().getTargetId(); - SecurityGroup appGroup = appGroups.stream().filter(isAppSecurityGroup()).findFirst().get(); - if (allowIngressFromClassic) { - addClassicLinkIngress(targetLookup, getDeployDefaults().getClassicLinkSecurityGroupName(), - appGroup.getGroupId(), target.getCredentials(), target.getVpcId()); - } - boolean hasElbIngressPermission = appGroup.getIpPermissions().stream() - .anyMatch(p -> p.getUserIdGroupPairs().stream().anyMatch(u -> u.getGroupId().equals(elbGroupId))); - if (!hasElbIngressPermission) { - sourceDescription.getListenerDescriptions().forEach(l -> { - Listener listener = l.getListener(); - IpPermission newPermission = new IpPermission().withIpProtocol("tcp") - .withFromPort(listener.getInstancePort()).withToPort(listener.getInstancePort()) - .withUserIdGroupPairs(new UserIdGroupPair().withGroupId(elbGroupId).withVpcId(target.getVpcId())); - targetAmazonEC2.authorizeSecurityGroupIngress(new AuthorizeSecurityGroupIngressRequest() - .withGroupId(appGroup.getGroupId()) - .withIpPermissions(newPermission) - ); - }); - } - } - } - } - - private Predicate isAppSecurityGroup() { - return g -> { - if (!g.getGroupName().equals(applicationName)) { - return false; - } - if (g.getVpcId() == null) { - return target.getVpcId() == null; - } - return g.getVpcId().equals(target.getVpcId()); - }; - } - - // Adds a default public ingress for the load balancer. Called when migrating from Classic to VPC - private void addPublicIngress(AmazonEC2 targetAmazonEC2, String elbGroupId, LoadBalancerDescription sourceDescription) { - List permissions = sourceDescription.getListenerDescriptions().stream().map(l -> new IpPermission() - .withIpProtocol("tcp") - .withFromPort(l.getListener().getLoadBalancerPort()) - .withToPort(l.getListener().getLoadBalancerPort()) - .withIpv4Ranges(new IpRange().withCidrIp("0.0.0.0/0")) - //TODO(cfieber)-ipv6 - ).collect(Collectors.toList()); - - targetAmazonEC2.authorizeSecurityGroupIngress(new AuthorizeSecurityGroupIngressRequest() - .withGroupId(elbGroupId) - .withIpPermissions(permissions) - ); - } - - /** - * Generates a list of security groups that should be applied to the target load balancer - * - * @param sourceDescription AWS descriptor of source load balancer - * @param result result object of the calling migate operation - * @return the list of security groups that will be created or added, excluding the elb-specific security group - */ - protected List getTargetSecurityGroups(LoadBalancerDescription sourceDescription, - MigrateLoadBalancerResult result) { - sourceDescription.getSecurityGroups().stream() - .filter(g -> !sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()).isPresent()) - .forEach(m -> result.getWarnings().add("Skipping creation of security group: " + m + " (could not be found in source location)")); - List currentGroups = sourceDescription.getSecurityGroups().stream() - .filter(g -> sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()).isPresent()) - .map(g -> sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()) - .get().getSecurityGroup()).collect(Collectors.toList()); - - return sourceDescription.getSecurityGroups().stream() - .filter(g -> currentGroups.stream().anyMatch(g2 -> g2.getGroupId().equals(g))) - .map(g -> { - SecurityGroup match = currentGroups.stream().filter(g3 -> g3.getGroupId().equals(g)).findFirst().get(); - SecurityGroupLocation sourceLocation = new SecurityGroupLocation(); - sourceLocation.setName(match.getGroupName()); - sourceLocation.setRegion(source.getRegion()); - sourceLocation.setCredentials(source.getCredentials()); - sourceLocation.setVpcId(source.getVpcId()); - return new SecurityGroupMigrator(sourceLookup, targetLookup, migrateSecurityGroupStrategy, - sourceLocation, new SecurityGroupLocation(target)).migrate(dryRun); - }) - .collect(Collectors.toList()); - } - - private Vpc getVpc(LoadBalancerLocation source) { - if (source.getVpcId() != null) { - DescribeVpcsResult vpcLookup = getAmazonClientProvider().getAmazonEC2(source.getCredentials(), source.getRegion()) - .describeVpcs(new DescribeVpcsRequest().withVpcIds(source.getVpcId())); - if (vpcLookup.getVpcs().isEmpty()) { - throw new IllegalStateException(String.format("Could not find VPC %s in %s/%s", - source.getVpcId(), source.getCredentialAccount(), source.getRegion())); - } - - return vpcLookup.getVpcs().get(0); - } - - return null; - } - - /** - * Generates the name of the new load balancer. By default, removes a number of suffixes, then adds the name - * of the VPC (if any), and shrinks the load balancer name to 32 characters if necessary - * - * @param sourceName the base name - * @param sourceVpc the source VPC - * @param targetVpc the target VPC - * @return the final name of the load balancer - */ - protected String generateLoadBalancerName(String sourceName, Vpc sourceVpc, Vpc targetVpc) { - String targetName = sourceName; - targetName = removeSuffix(targetName, AmazonVpcProvider.getVpcName(sourceVpc)); - targetName = removeSuffix(targetName, "classic"); - targetName = removeSuffix(targetName, "frontend"); - targetName = removeSuffix(targetName, "vpc"); - if (targetVpc != null) { - targetName += "-" + AmazonVpcProvider.getVpcName(targetVpc); - } - - return shrinkName(targetName); - } - - private String removeSuffix(String name, String suffix) { - if (name.endsWith("-" + suffix)) { - name = name.substring(0, name.length() - suffix.length() - 1); - } - return name; - } - - /** - * Reduces name to 32 characters - * - * @param name the name - * @return the short version of the name - */ - protected String shrinkName(String name) { - final int MAX_LENGTH = 32; - - if (name.length() > MAX_LENGTH) { - name = name - .replace("-internal", "-int") - .replace("-external", "-ext") - .replace("-elb", ""); - } - - - if (name.length() > MAX_LENGTH) { - name = name - .replace("-dev", "-d") - .replace("-test", "-t") - .replace("-prod", "-p") - .replace("-main", "-m") - .replace("-legacy", "-l") - .replace("-backend", "-b") - .replace("-front", "-f") - .replace("-release", "-r") - .replace("-private", "-p") - .replace("-edge", "-e") - .replace("-global", "-g"); - } - - - if (name.length() > MAX_LENGTH) { - name = name - .replace("internal", "int") - .replace("external", "ext") - .replace("backend", "b") - .replace("frontend", "f") - .replace("east", "e") - .replace("west", "w") - .replace("north", "n") - .replace("south", "s"); - } - - - if (name.length() > MAX_LENGTH) { - name = name.substring(0, MAX_LENGTH); - } - - return name; - } - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateSecurityGroupStrategy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateSecurityGroupStrategy.java deleted file mode 100644 index 552daf325d5..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateSecurityGroupStrategy.java +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.IpPermission; -import com.amazonaws.services.ec2.model.SecurityGroup; -import com.amazonaws.services.ec2.model.UserIdGroupPair; -import com.amazonaws.services.ec2.model.Vpc; -import com.netflix.frigga.Names; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGroupDescription; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupReference; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupIngressConverter; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupUpdater; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator.SecurityGroupLocation; -import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonVpcProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; - -import java.util.*; -import java.util.stream.Collectors; - -public abstract class MigrateSecurityGroupStrategy { - - protected SecurityGroupLookup sourceLookup; - protected SecurityGroupLookup targetLookup; - protected SecurityGroupLocation source; - protected SecurityGroupLocation target; - protected boolean dryRun; - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - abstract AmazonClientProvider getAmazonClientProvider(); - - /** - * Infrastructure applications are treated as optional, non-managed resources when performing migrations - */ - abstract List getInfrastructureApplications(); - - /** - * Generates a result set, describing the actions required to migrate the source security group to the target. - * - * @param source the source security group - * @param target the target location - * @param sourceLookup a lookup cache for the source region - * @param targetLookup a lookup cache for the target region (may be the same as the sourceLookup) - * @param createIfSourceMissing indicates whether the operation should proceed if the source does not already exist; - * if false and the source group cannot be found, an IllegalStateException will be thrown - * @return the result set - */ - public synchronized MigrateSecurityGroupResult generateResults(SecurityGroupLocation source, SecurityGroupLocation target, - SecurityGroupLookup sourceLookup, SecurityGroupLookup targetLookup, - boolean createIfSourceMissing, boolean dryRun) { - - this.sourceLookup = sourceLookup; - this.targetLookup = targetLookup; - this.source = source; - this.target = target; - this.dryRun = dryRun; - - final MigrateSecurityGroupResult result = new MigrateSecurityGroupResult(); - final Optional sourceUpdater = sourceLookup.getSecurityGroupByName( - source.getCredentialAccount(), - source.getName(), - source.getVpcId() - ); - if (!createIfSourceMissing && !sourceUpdater.isPresent()) { - throw new IllegalStateException("Security group does not exist: " + source.toString()); - } - - if (shouldSkipSource()) { - MigrateSecurityGroupReference skipped = createTargetSecurityGroupReference(null); - result.getSkipped().add(skipped); - result.setTarget(skipped); - return result; - } - - Set targetReferences = new HashSet<>(); - String sourceGroupId = null; - if (sourceUpdater.isPresent()) { - sourceGroupId = sourceUpdater.get().getSecurityGroup().getGroupId(); - targetReferences = getTargetReferences(sourceUpdater.get()); - - // convert names before determining if rules are important - targetReferences.forEach(reference -> reference.setTargetName(getTargetName(reference.getSourceName()))); - - result.setErrors(shouldError(targetReferences)); - if (!result.getErrors().isEmpty()) { - return result; - } - - result.setSkipped(shouldSkipWithoutWarning(targetReferences)); - Set toCheck = new HashSet<>(targetReferences); - toCheck.removeAll(result.getSkipped()); - - result.setWarnings(shouldWarn(toCheck)); - toCheck.removeAll(result.getWarnings()); - } - result.setTarget(createTargetSecurityGroupReference(sourceGroupId)); - - Set toVerify = new HashSet<>(targetReferences); - toVerify.add(result.getTarget()); - toVerify.removeAll(result.getWarnings()); - toVerify.removeAll(result.getSkipped()); - - result.setCreated(shouldCreate(toVerify)); - - List reused = new ArrayList<>(toVerify); - reused.removeAll(result.getCreated()); - result.setReused(reused); - - if (!dryRun) { - performMigration(result); - } - - return result; - } - - private void performMigration(MigrateSecurityGroupResult results) { - final Optional sourceGroupUpdater = sourceLookup.getSecurityGroupByName( - source.getCredentialAccount(), - source.getName(), - source.getVpcId()); - - final SecurityGroup securityGroup = sourceGroupUpdater.isPresent() ? sourceGroupUpdater.get().getSecurityGroup() : null; - - Set targetGroups = new HashSet<>(results.getCreated()); - targetGroups.addAll(results.getReused()); - if (!results.targetExists()) { - targetGroups.add(results.getTarget()); - } - results.getCreated().forEach(r -> r.setTargetId( - createDependentSecurityGroup(r).getSecurityGroup().getGroupId())); - - Optional targetGroup = targetLookup.getSecurityGroupByName( - target.getCredentialAccount(), - results.getTarget().getTargetName(), - target.getVpcId() - ); - - if (!targetGroup.isPresent()) { - throw new IllegalStateException("Target group cannot be found: " + results.getTarget().getTargetName()); - } - - if (sourceGroupUpdater.isPresent() && shouldCreateTargetPermissions(sourceGroupUpdater.get().getSecurityGroup())) { - createTargetPermissions(securityGroup, targetGroup.get(), targetGroups, results); - } - results.getTarget().setTargetId(targetGroup.get().getSecurityGroup().getGroupId()); - } - - /** - * Returns references to all security groups that should be created for the target - * - * @param references the collection of potential security groups to select from; implementations can choose to provide - * additional security groups that are *not* members of this set - * @return a list of security groups that need to be created in order to migrate the target security group - */ - protected Set shouldCreate(Set references) { - - List credentials = references.stream() - .map(AbstractAmazonCredentialsDescription::getCredentials).distinct().collect(Collectors.toList()); - Map vpcMappings = getVpcMappings(credentials); - - return references.stream().distinct().filter(reference -> { - String targetVpc = vpcMappings.get(reference.getAccountId()); - reference.setVpcId(targetVpc); - - Optional targetMatch = - targetLookup.getSecurityGroupByName(reference.getCredentialAccount(), reference.getTargetName(), targetVpc); - if (targetMatch.isPresent()) { - reference.setTargetId(targetMatch.get().getSecurityGroup().getGroupId()); - return false; - } - return true; - }).collect(Collectors.toSet()); - } - - /** - * Determines whether this security group should be skipped without consideration of ingress rules - * - * @return true if it should be skipped, false otherwise - */ - protected boolean shouldSkipSource() { - if (getInfrastructureApplications().contains(Names.parseName(source.getName()).getApp())) { - Optional targetGroup = targetLookup.getSecurityGroupByName(target.getCredentialAccount(), getTargetName(source.getName()), target.getVpcId()); - return !targetGroup.isPresent(); - } - return false; - } - - /** - * Returns references to all security groups that should halt the migration - * - * @param references the collection of potential security groups to select from; implementations can choose to provide - * additional security groups that are *not* members of this set - * @return a list of security groups that will fail the migration; if this call returns anything, additional checks - * will not run - */ - protected Set shouldError(Set references) { - return new HashSet<>(); - } - - /** - * Returns references to all security groups that cannot be created for the target and should be presented to the user - * - * @param references the collection of potential security groups to select from; implementations can choose to provide - * additional security groups that are *not* members of this set - * @return a list of security groups that cannot be created but should prompt the user - */ - protected Set shouldWarn(Set references) { - return references.stream().filter(reference -> { - if (!targetLookup.accountIdExists(reference.getAccountId())) { - reference.setExplanation("Spinnaker does not manage the account " + reference.getAccountId()); - return true; - } - return false; - }).collect(Collectors.toSet()); - } - - /** - * Returns references to all security groups that will be skipped - but not visually reported to the user - when - * migrating the target. This includes security groups - * - * @param references the collection of potential security groups to select from; implementations can choose to provide - * additional security groups that are *not* members of this set - * @return a list of security groups that will be skipped when migrating the target security group - */ - protected Set shouldSkipWithoutWarning(Set references) { - - return references.stream().filter(reference -> { - if (reference.getAccountId().equals("amazon-elb") && target.getVpcId() != null) { - reference.setExplanation("amazon-elb groups are not required in VPC environments"); - return true; - } - String targetName = reference.getTargetName(); - String targetApp = Names.parseName(targetName).getApp(); - String account = targetLookup.getAccountNameForId(reference.getAccountId()); - if (getInfrastructureApplications().contains(targetApp) && - !targetLookup.getSecurityGroupByName(account, targetName, reference.getVpcId()).isPresent()) { - reference.setExplanation(targetName + " does not exist in destination"); - return true; - } - return false; - }).collect(Collectors.toSet()); - } - - /** - * Determines whether ingress rules should be updated when migrating the security group - for example, you may - * not want to touch security groups that are managed by a different team, or security groups in a specific service - * - * @param securityGroup the security group - * @return true if ingress rules should be updated, false otherwise - */ - protected boolean shouldCreateTargetPermissions(SecurityGroup securityGroup) { - return !getInfrastructureApplications().contains(Names.parseName(securityGroup.getGroupName()).getApp()); - } - - /** - * Returns the name of the target security group based on the name of the source security group. Useful (at least at - * Netflix) when migrating to remove legacy naming conventions - * - * @param sourceName the source name - * @return the target name - */ - protected String getTargetName(String sourceName) { - String targetName = sourceName; - if (targetName.endsWith("-vpc")) { - targetName = targetName.substring(0, targetName.length() - 4); - } - return targetName; - } - - private Set getTargetReferences(SecurityGroupUpdater source) { - SecurityGroup group = source.getSecurityGroup(); - if (getInfrastructureApplications().contains(Names.parseName(group.getGroupName()).getApp())) { - return new HashSet<>(); - } - return group.getIpPermissions() - .stream() - .map(IpPermission::getUserIdGroupPairs) - .flatMap(List::stream) - .filter(pair -> !pair.getGroupId().equals(group.getGroupId()) || !pair.getUserId().equals(group.getOwnerId())) - .map(pair -> { - NetflixAmazonCredentials account = sourceLookup.getCredentialsForId(pair.getUserId()); - if (pair.getGroupName() == null) { - if (account == null) { - pair.setGroupName(pair.getGroupId()); - } else { - sourceLookup.getSecurityGroupById(account.getName(), pair.getGroupId(), pair.getVpcId()) - .ifPresent(u -> pair.setGroupName(u.getSecurityGroup().getGroupName())); - } - } - return new MigrateSecurityGroupReference(pair, account); - }) - .collect(Collectors.toSet()); - } - - private MigrateSecurityGroupReference createTargetSecurityGroupReference(String sourceGroupId) { - MigrateSecurityGroupReference ref = new MigrateSecurityGroupReference(); - ref.setSourceName(source.getName()); - ref.setAccountId(target.getCredentials().getAccountId()); - ref.setVpcId(target.getVpcId()); - ref.setSourceId(sourceGroupId); - ref.setCredentials(target.getCredentials()); - ref.setTargetName(getTargetName(source.getName())); - return ref; - } - - private Map getVpcMappings(List accounts) { - Map mappings = new HashMap<>(); - if (target.getVpcId() == null) { - return mappings; - } - AmazonEC2 baseTarget = getAmazonClientProvider().getAmazonEC2(target.getCredentials(), target.getRegion()); - Vpc targetVpc = baseTarget.describeVpcs().getVpcs().stream() - .filter(vpc -> vpc.getVpcId().equals(target.getVpcId())) - .findFirst().orElse(null); - - String targetName = AmazonVpcProvider.getVpcName(targetVpc); - - accounts.forEach(account -> { - List vpcs = getAmazonClientProvider().getAmazonEC2(account, target.getRegion()).describeVpcs().getVpcs(); - Vpc match = vpcs.stream() - .filter(vpc -> AmazonVpcProvider.getVpcName(vpc).equals(targetName)) - .findFirst().orElse(null); - mappings.put(account.getAccountId(), match.getVpcId()); - }); - return mappings; - } - - // Creates a security group in the target location with no ingress rules - private SecurityGroupUpdater createDependentSecurityGroup(MigrateSecurityGroupReference reference) { - String sourceAccount = sourceLookup.getAccountNameForId(reference.getAccountId()); - Optional sourceGroup = sourceLookup.getSecurityGroupByName(sourceAccount, reference.getTargetName(), reference.getVpcId()); - String description = "Security group " + reference.getTargetName(); - if (sourceGroup.isPresent()) { - description = sourceGroup.get().getSecurityGroup().getDescription(); - } - UpsertSecurityGroupDescription upsertDescription = new UpsertSecurityGroupDescription(); - upsertDescription.setName(reference.getTargetName()); - upsertDescription.setCredentials(reference.getCredentials()); - upsertDescription.setDescription(description); - upsertDescription.setVpcId(reference.getVpcId()); - - getTask().updateStatus(SecurityGroupMigrator.BASE_PHASE, "Creating dependent security group " + - reference.getTargetName() + " in " + reference.getCredentialAccount() + "/" + target.getRegion() + "/" + target.getVpcId()); - return targetLookup.createSecurityGroup(upsertDescription); - } - - private void createTargetPermissions(SecurityGroup sourceGroup, - SecurityGroupUpdater targetGroup, - Set targetGroups, - MigrateSecurityGroupResult results) { - - List targetPermissions = SecurityGroupIngressConverter - .flattenPermissions(sourceGroup) - .stream() - .map(p -> { - p.setUserIdGroupPairs(p.getUserIdGroupPairs().stream().map(UserIdGroupPair::clone).collect(Collectors.toList())); - return p; - }) - .filter(p -> p.getUserIdGroupPairs().isEmpty() || - p.getUserIdGroupPairs().stream().allMatch(pair -> targetGroups.stream() - .anyMatch(g -> g.getSourceId().equals(pair.getGroupId())))) - .collect(Collectors.toList()); - - targetPermissions.forEach(permission -> - permission.getUserIdGroupPairs().forEach(pair -> { - MigrateSecurityGroupReference targetReference = targetGroups.stream().filter(group -> - group.getSourceId().equals(pair.getGroupId()) - ).findFirst().get(); - pair.setGroupId(targetReference.getTargetId()); - pair.setGroupName(null); - if (!targetGroup.getSecurityGroup().getOwnerId().equals(targetReference.getAccountId())) { - pair.setVpcId(targetReference.getVpcId()); - } - }) - ); - - filterOutExistingRules(targetPermissions, targetGroup.getSecurityGroup()); - results.setIngressUpdates(targetPermissions.stream().filter(p -> !p.getUserIdGroupPairs().isEmpty() || !p.getIpRanges().isEmpty()).collect(Collectors.toList())); - - if (!results.getIngressUpdates().isEmpty()) { - targetGroup.addIngress(targetPermissions); - } - } - - private void filterOutExistingRules(List permissionsToApply, SecurityGroup targetGroup) { - permissionsToApply.forEach(permission -> { - permission.getUserIdGroupPairs().removeIf(pair -> - targetGroup.getIpPermissions().stream().anyMatch(targetPermission -> - targetPermission.getFromPort().equals(permission.getFromPort()) - && targetPermission.getToPort().equals(permission.getToPort()) - && targetPermission.getUserIdGroupPairs().stream().anyMatch(t -> t.getGroupId().equals(pair.getGroupId())) - ) - ); - permission.getIpv4Ranges().removeIf(range -> - targetGroup.getIpPermissions().stream().anyMatch(targetPermission -> - targetPermission.getFromPort().equals(permission.getFromPort()) - && targetPermission.getToPort().equals(permission.getToPort()) - && targetPermission.getIpv4Ranges().contains(range) - ) - ); - permission.getIpv6Ranges().removeIf(range -> - targetGroup.getIpPermissions().stream().anyMatch(targetPermission -> - targetPermission.getFromPort().equals(permission.getFromPort()) - && targetPermission.getToPort().equals(permission.getToPort()) - && targetPermission.getIpv6Ranges().contains(range) - ) - ); - }); - } - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateServerGroupStrategy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateServerGroupStrategy.java deleted file mode 100644 index a41d500507b..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateServerGroupStrategy.java +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.amazonaws.services.autoscaling.model.AutoScalingGroup; -import com.amazonaws.services.autoscaling.model.LaunchConfiguration; -import com.amazonaws.services.autoscaling.model.SuspendedProcess; -import com.netflix.frigga.Names; -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; -import com.netflix.spinnaker.clouddriver.aws.deploy.converters.AllowLaunchAtomicOperationConverter; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription.Capacity; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription.Source; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.AllowLaunchAtomicOperation; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.LoadBalancerLocation; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.TargetLoadBalancerLocation; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.MigrateLoadBalancerResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator.SecurityGroupLocation; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.MigrateServerGroupResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ServerGroupMigrator.ServerGroupLocation; -import com.netflix.spinnaker.clouddriver.aws.deploy.validators.BasicAmazonDeployDescriptionValidator; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.services.AsgService; -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationErrors; -import org.springframework.validation.Errors; - -import java.util.*; -import java.util.stream.Collectors; - -public abstract class MigrateServerGroupStrategy implements MigrateStrategySupport { - - protected SecurityGroupLookup sourceLookup; - protected SecurityGroupLookup targetLookup; - protected ServerGroupLocation source; - protected ServerGroupLocation target; - protected boolean allowIngressFromClassic; - protected boolean dryRun; - protected String subnetType; - protected String elbSubnetType; - protected Map loadBalancerNameMapping; - - protected MigrateSecurityGroupStrategy migrateSecurityGroupStrategy; - protected MigrateLoadBalancerStrategy getMigrateLoadBalancerStrategy; - - abstract AmazonClientProvider getAmazonClientProvider(); - - abstract RegionScopedProviderFactory getRegionScopedProviderFactory(); - - abstract DeployDefaults getDeployDefaults(); - - abstract BasicAmazonDeployHandler getBasicAmazonDeployHandler(); - - abstract BasicAmazonDeployDescriptionValidator getBasicAmazonDeployDescriptionValidator(); - - abstract AllowLaunchAtomicOperationConverter getAllowLaunchAtomicOperationConverter(); - - - /** - * Migrates a server group and its associated load balancers and security groups from one location to another - * - * @param source the source server group - * @param target the target location in which to migrate - * @param sourceLookup a security group lookup cache for the source region - * @param targetLookup a security group lookup cache for the target region (may be the same object as the sourceLookup) - * @param migrateLoadBalancerStrategy the load balancer migration strategy - * @param migrateSecurityGroupStrategy the security group migration strategy - * @param subnetType the subnetType in which to migrate the server group (should be null for EC Classic migrations) - * @param elbSubnetType the subnetType in which to migrate load balancers - * @param iamRole the iamRole to use when migrating (optional) - * @param keyPair the keyPair to use when migrating (optional) - * @param targetAmi the target imageId to use when migrating (optional) - * @param loadBalancerNameMapping a mapping of source-to-target load balancer names - * @param allowIngressFromClassic if subnetType is present, and this is true, and app security groups are created - * via the deployDefaults, will add broad (80-65535) ingress from the classic link - * security group - * @param dryRun whether to perform the migration or simply calculate the migration - * @return a result set indicating the components required to perform the migration (if a dry run), or the objects - * updated by the migration (if not a dry run) - */ - public synchronized MigrateServerGroupResult generateResults(ServerGroupLocation source, ServerGroupLocation target, - SecurityGroupLookup sourceLookup, SecurityGroupLookup targetLookup, - MigrateLoadBalancerStrategy migrateLoadBalancerStrategy, - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy, - String subnetType, String elbSubnetType, String iamRole, String keyPair, - String targetAmi, Map loadBalancerNameMapping, - boolean allowIngressFromClassic, boolean dryRun) { - - this.sourceLookup = sourceLookup; - this.targetLookup = targetLookup; - this.source = source; - this.target = target; - this.subnetType = subnetType; - this.elbSubnetType = elbSubnetType; - this.allowIngressFromClassic = allowIngressFromClassic; - this.loadBalancerNameMapping = loadBalancerNameMapping; - this.dryRun = dryRun; - this.migrateSecurityGroupStrategy = migrateSecurityGroupStrategy; - this.getMigrateLoadBalancerStrategy = migrateLoadBalancerStrategy; - - AsgService asgService = getRegionScopedProviderFactory().forRegion(source.getCredentials(), source.getRegion()) - .getAsgService(); - - AutoScalingGroup sourceGroup = asgService.getAutoScalingGroup(source.getName()); - - if (sourceGroup == null) { - throw new IllegalStateException("Error retrieving source server group: " + source.getName()); - } - - LaunchConfiguration launchConfig = asgService.getLaunchConfiguration(sourceGroup.getLaunchConfigurationName()); - - if (launchConfig == null) { - throw new IllegalStateException("Could not find launch config: " + sourceGroup.getLaunchConfigurationName()); - } - - Names names = Names.parseName(source.getName()); - - List targetLoadBalancers = generateTargetLoadBalancers(sourceGroup); - - MigrateServerGroupResult migrateResult = new MigrateServerGroupResult(); - - List targetSecurityGroups = generateTargetSecurityGroups(launchConfig, migrateResult); - - Map> zones = new HashMap<>(); - zones.put(target.getRegion(), target.getAvailabilityZones()); - - DeploymentResult result; - if (!dryRun) { - Capacity capacity = getCapacity(); - BasicAmazonDeployDescription deployDescription = new BasicAmazonDeployDescription(); - deployDescription.setSource(getSource(source)); - deployDescription.setCredentials(target.getCredentials()); - deployDescription.setAmiName(targetAmi != null ? targetAmi : launchConfig.getImageId()); - deployDescription.setApplication(names.getApp()); - deployDescription.setStack(names.getStack()); - deployDescription.setFreeFormDetails(names.getDetail()); - deployDescription.setInstanceMonitoring(launchConfig.getInstanceMonitoring().getEnabled()); - deployDescription.setInstanceType(launchConfig.getInstanceType()); - deployDescription.setIamRole(iamRole != null ? iamRole : launchConfig.getIamInstanceProfile()); - deployDescription.setKeyPair(keyPair != null ? keyPair : launchConfig.getKeyName()); - deployDescription.setAssociatePublicIpAddress(launchConfig.getAssociatePublicIpAddress()); - deployDescription.setCooldown(sourceGroup.getDefaultCooldown()); - deployDescription.setHealthCheckGracePeriod(sourceGroup.getHealthCheckGracePeriod()); - deployDescription.setHealthCheckType(sourceGroup.getHealthCheckType()); - deployDescription.setSuspendedProcesses(sourceGroup.getSuspendedProcesses().stream() - .map(SuspendedProcess::getProcessName).collect(Collectors.toSet())); - deployDescription.setTerminationPolicies(sourceGroup.getTerminationPolicies()); - deployDescription.setKernelId(launchConfig.getKernelId()); - deployDescription.setEbsOptimized(launchConfig.getEbsOptimized()); - deployDescription.setLoadBalancers(targetLoadBalancers.stream() - .map(MigrateLoadBalancerResult::getTargetName).collect(Collectors.toList())); - deployDescription.setSecurityGroups(targetSecurityGroups.stream() - .filter(sg -> !sg.getSkipped().contains(sg.getTarget())) - .map(sg -> sg.getTarget().getTargetName()).collect(Collectors.toList())); - deployDescription.setAvailabilityZones(zones); - deployDescription.setStartDisabled(true); - deployDescription.setCapacity(capacity); - deployDescription.setSubnetType(subnetType); - - BasicAmazonDeployDescription description = generateDescription(deployDescription); - - if (!source.getCredentialAccount().equals(target.getCredentialAccount())) { - Map allowLaunchMap = new HashMap<>(); - allowLaunchMap.put("credentials", source.getCredentialAccount()); - allowLaunchMap.put("account", target.getCredentialAccount()); - allowLaunchMap.put("region", target.getRegion()); - allowLaunchMap.put("amiName", deployDescription.getAmiName()); - AllowLaunchAtomicOperation operation = getAllowLaunchAtomicOperationConverter().convertOperation(allowLaunchMap); - - operation.operate(null); - } - - result = getBasicAmazonDeployHandler().handle(description, new ArrayList()); - } else { - result = new DeploymentResult(); - String targetName = getRegionScopedProviderFactory().forRegion(target.getCredentials(), target.getRegion()) - .getAWSServerGroupNameResolver() - .resolveNextServerGroupName(names.getApp(), names.getStack(), names.getDetail(), false); - - result.setServerGroupNames(Collections.singletonList(targetName)); - } - migrateResult.setServerGroupNames(result.getServerGroupNames()); - migrateResult.setLoadBalancers(targetLoadBalancers); - migrateResult.setSecurityGroups(targetSecurityGroups); - return migrateResult; - } - - private BasicAmazonDeployDescription generateDescription(BasicAmazonDeployDescription deployDescription) { - BasicAmazonDeployDescription description = getBasicAmazonDeployHandler().copySourceAttributes( - getRegionScopedProviderFactory().forRegion(source.getCredentials(), source.getRegion()), source.getName(), - false, deployDescription); - - validateDeployDescription(description); - - return description; - } - - private void validateDeployDescription(BasicAmazonDeployDescription description) { - Errors errors = new DescriptionValidationErrors(description); - - getBasicAmazonDeployDescriptionValidator().validate(new ArrayList(), description, errors); - - if (errors.hasErrors()) { - throw new IllegalStateException("Invalid deployment configuration. Errors: " - + errors.getAllErrors().stream().flatMap(s -> Arrays.stream(s.getCodes())).collect(Collectors.toList())); - } - } - - private static Source getSource(ServerGroupLocation source) { - Source deploySource = new Source(); - deploySource.setAccount(source.getCredentialAccount()); - deploySource.setRegion(source.getRegion()); - deploySource.setAsgName(source.getName()); - return deploySource; - } - - private static Capacity getCapacity() { - Capacity capacity = new Capacity(); - capacity.setMin(0); - capacity.setMax(0); - capacity.setDesired(0); - return capacity; - } - - protected List generateTargetSecurityGroups(LaunchConfiguration sourceLaunchConfig, - MigrateServerGroupResult result) { - - sourceLaunchConfig.getSecurityGroups().stream() - .filter(g -> !sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()).isPresent()) - .forEach(m -> result.getWarnings().add("Skipping creation of security group: " + m - + " (could not be found in source location)")); - - List securityGroupNames = sourceLaunchConfig.getSecurityGroups().stream() - .filter(g -> sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()).isPresent()) - .map(g -> sourceLookup.getSecurityGroupById(source.getCredentialAccount(), g, source.getVpcId()).get()) - .map(g -> g.getSecurityGroup().getGroupName()) - .collect(Collectors.toList()); - - List targetSecurityGroups = securityGroupNames.stream().map(group -> - getMigrateSecurityGroupResult(group) - ).collect(Collectors.toList()); - - if (getDeployDefaults().getAddAppGroupToServerGroup()) { - Names names = Names.parseName(source.getName()); - // if the app security group is already present, don't include it twice - Optional appGroup = targetSecurityGroups.stream() - .filter(r -> names.getApp().equals(r.getTarget().getTargetName())).findFirst(); - if (!appGroup.isPresent()) { - appGroup = Optional.of(generateAppSecurityGroup()); - targetSecurityGroups.add(appGroup.get()); - } - handleClassicLinkIngress(appGroup.get().getTarget().getTargetId()); - } - - return targetSecurityGroups; - } - - protected List generateTargetLoadBalancers(AutoScalingGroup sourceGroup) { - return sourceGroup.getLoadBalancerNames().stream() - .map(this::getMigrateLoadBalancerResult) - .collect(Collectors.toList()); - } - - protected MigrateSecurityGroupResult generateAppSecurityGroup() { - Names names = Names.parseName(source.getName()); - SecurityGroupLocation appGroupLocation = new SecurityGroupLocation(); - appGroupLocation.setName(names.getApp()); - appGroupLocation.setRegion(source.getRegion()); - appGroupLocation.setCredentials(source.getCredentials()); - appGroupLocation.setVpcId(source.getVpcId()); - SecurityGroupMigrator migrator = new SecurityGroupMigrator(sourceLookup, targetLookup, migrateSecurityGroupStrategy, - appGroupLocation, new SecurityGroupLocation(target)); - migrator.setCreateIfSourceMissing(true); - MigrateSecurityGroupResult result = migrator.migrate(dryRun); - handleClassicLinkIngress(result.getTarget().getTargetId()); - return result; - } - - protected void handleClassicLinkIngress(String securityGroupId) { - if (!dryRun && allowIngressFromClassic) { - addClassicLinkIngress(targetLookup, getDeployDefaults().getClassicLinkSecurityGroupName(), - securityGroupId, target.getCredentials(), target.getVpcId()); - } - } - - private MigrateSecurityGroupResult getMigrateSecurityGroupResult(String group) { - SecurityGroupLocation sourceLocation = new SecurityGroupLocation(); - sourceLocation.setName(group); - sourceLocation.setRegion(source.getRegion()); - sourceLocation.setCredentials(source.getCredentials()); - sourceLocation.setVpcId(source.getVpcId()); - return new SecurityGroupMigrator(sourceLookup, targetLookup, migrateSecurityGroupStrategy, - sourceLocation, new SecurityGroupLocation(target)).migrate(dryRun); - } - - private MigrateLoadBalancerResult getMigrateLoadBalancerResult(String lbName) { - Names names = Names.parseName(source.getName()); - LoadBalancerLocation sourceLocation = new LoadBalancerLocation(); - sourceLocation.setName(lbName); - sourceLocation.setRegion(source.getRegion()); - sourceLocation.setVpcId(source.getVpcId()); - sourceLocation.setCredentials(source.getCredentials()); - TargetLoadBalancerLocation loadBalancerTarget = new TargetLoadBalancerLocation(sourceLocation, target); - if (loadBalancerNameMapping.containsKey(lbName)) { - loadBalancerTarget.setName(loadBalancerNameMapping.get(lbName)); - } - return new LoadBalancerMigrator(sourceLookup, targetLookup, getAmazonClientProvider(), getRegionScopedProviderFactory(), - migrateSecurityGroupStrategy, getDeployDefaults(), getMigrateLoadBalancerStrategy, sourceLocation, - loadBalancerTarget, elbSubnetType, names.getApp(), allowIngressFromClassic).migrate(dryRun); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateStrategySupport.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateStrategySupport.java deleted file mode 100644 index 2bcf82a1d34..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateStrategySupport.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers; - -import com.amazonaws.services.ec2.model.IpPermission; -import com.amazonaws.services.ec2.model.SecurityGroup; -import com.amazonaws.services.ec2.model.UserIdGroupPair; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; - -import java.util.Collections; - -public interface MigrateStrategySupport { - - default void addClassicLinkIngress(SecurityGroupLookup lookup, String classicLinkGroupName, String groupId, NetflixAmazonCredentials credentials, String vpcId) { - if (classicLinkGroupName == null) { - return; - } - lookup.getSecurityGroupById(credentials.getName(), groupId, vpcId).ifPresent(targetGroupUpdater -> { - SecurityGroup targetGroup = targetGroupUpdater.getSecurityGroup(); - lookup.getSecurityGroupByName(credentials.getName(), classicLinkGroupName, vpcId) - .map(updater -> updater.getSecurityGroup().getGroupId()) - .ifPresent(classicLinkGroupId -> { - // don't attach if there's already some rule already configured - if (targetGroup.getIpPermissions().stream() - .anyMatch(p -> p.getUserIdGroupPairs().stream() - .anyMatch(p2 -> p2.getGroupId().equals(classicLinkGroupId)))) { - return; - } - targetGroupUpdater.addIngress(Collections.singletonList( - new IpPermission() - .withIpProtocol("tcp").withFromPort(80).withToPort(65535) - .withUserIdGroupPairs( - new UserIdGroupPair() - .withUserId(credentials.getAccountId()) - .withGroupId(classicLinkGroupId) - .withVpcId(vpcId) - ) - )); - }); - }); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AbstractEnableDisableAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AbstractEnableDisableAtomicOperation.groovy index fc1b2e4490c..2abb0aa6463 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AbstractEnableDisableAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AbstractEnableDisableAtomicOperation.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops import com.amazonaws.services.ec2.model.DescribeInstancesRequest import com.amazonaws.services.ec2.model.DescribeInstancesResult +import com.amazonaws.services.ec2.model.Filter import com.amazonaws.services.ec2.model.InstanceStateName import com.amazonaws.services.ec2.model.Reservation import com.amazonaws.services.elasticloadbalancing.model.DeregisterInstancesFromLoadBalancerRequest @@ -126,26 +127,40 @@ abstract class AbstractEnableDisableAtomicOperation implements AtomicOperation= 10) { task.updateStatus phaseName, "Failed to describe instances 10 times, aborting. This may happen if the server group has been disabled for a long period of time." - return false + + try { + // fallback to a tag-based instance lookup (will be slower in large region/accounts) + task.updateStatus phaseName, "Falling back to tag-based instance lookup" + reservations.addAll(fetchInstancesTaggedWithServerGroup(regionScopedProvider, serverGroupName)) + break + } catch (Exception e2) { + task.updateStatus phaseName, "Failed to lookup instances server group tag" + log.error( + "Failed to describe instances using server group name filter (serverGroup: {})", + serverGroupName, + e2 + ) + return false + } } } } - List filteredInstanceIds = [] + Set filteredInstanceIds = [] for (Reservation reservation : reservations) { filteredInstanceIds += reservation.getInstances().findAll { [ InstanceStateName.Running, InstanceStateName.Pending ].contains(InstanceStateName.fromValue(it.getState().getName())) }*.instanceId } - instanceIds = filteredInstanceIds + instanceIds = filteredInstanceIds as List } - if (instanceIds && credentials.discoveryEnabled && description.desiredPercentage && disable) { + if (instanceIds && description.desiredPercentage && disable) { instanceIds = discoverySupport.getInstanceToModify(credentials.name, region, serverGroupName, instanceIds, description.desiredPercentage) task.updateStatus phaseName, "Only disabling instances $instanceIds on ASG $serverGroupName with percentage ${description.desiredPercentage}" @@ -185,7 +200,7 @@ abstract class AbstractEnableDisableAtomicOperation implements AtomicOperation fetchInstancesTaggedWithServerGroup( + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider, + String serverGroupName + ) { + DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest().withFilters( + new Filter().withName(INSTANCE_ASG_TAG_NAME).withValues(serverGroupName) + ) + + DescribeInstancesResult describeInstancesResult = regionScopedProvider.amazonEC2.describeInstances( + describeInstancesRequest + ) + List reservations = describeInstancesResult.getReservations() + + while (describeInstancesResult.getNextToken()) { + describeInstancesRequest.setNextToken(describeInstancesResult.getNextToken()) + describeInstancesResult = regionScopedProvider.amazonEC2.describeInstances(describeInstancesRequest) + reservations += describeInstancesResult.getReservations() + } + + return reservations + } + private static void changeRegistrationOfInstancesWithTargetGroups(Collection targetGroupArns, Collection instanceIds, Closure actOnInstancesAndTargetGroup) { handleInstancesWithLoadBalancing(targetGroupArns, instanceIds, { new TargetDescription().withId(it) }, actOnInstancesAndTargetGroup) } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AllowLaunchAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AllowLaunchAtomicOperation.groovy index c766e48baf6..e120a823ceb 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AllowLaunchAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AllowLaunchAtomicOperation.groovy @@ -18,6 +18,10 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops import com.amazonaws.services.ec2.AmazonEC2 import com.amazonaws.services.ec2.model.* +import com.netflix.spinnaker.clouddriver.aws.deploy.AmiIdResolver +import com.netflix.spinnaker.clouddriver.aws.deploy.ResolvedAmiResult +import com.netflix.spinnaker.clouddriver.aws.deploy.description.AllowLaunchDescription +import com.netflix.spinnaker.clouddriver.aws.model.AwsResultsRetriever import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials @@ -25,11 +29,8 @@ import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.helpers.OperationPoller import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.aws.deploy.AmiIdResolver -import com.netflix.spinnaker.clouddriver.aws.deploy.ResolvedAmiResult -import com.netflix.spinnaker.clouddriver.aws.deploy.description.AllowLaunchDescription -import com.netflix.spinnaker.clouddriver.aws.model.AwsResultsRetriever +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.kork.core.RetrySupport import groovy.transform.Canonical import org.springframework.beans.factory.annotation.Autowired @@ -50,39 +51,24 @@ class AllowLaunchAtomicOperation implements AtomicOperation { AmazonClientProvider amazonClientProvider @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override ResolvedAmiResult operate(List priorOutputs) { task.updateStatus BASE_PHASE, "Initializing Allow Launch Operation..." def sourceCredentials = description.credentials - def targetCredentials = accountCredentialsProvider.getCredentials(description.account) as NetflixAmazonCredentials + def targetCredentials = credentialsRepository.getOne(description.targetAccount) def sourceAmazonEC2 = amazonClientProvider.getAmazonEC2(description.credentials, description.region, true) def targetAmazonEC2 = amazonClientProvider.getAmazonEC2(targetCredentials, description.region, true) + def (ResolvedAmiResult resolvedAmi, ResolvedAmiLocation amiLocation) = new RetrySupport().retry({ -> + resolveAmi(targetCredentials, sourceAmazonEC2, targetAmazonEC2) + }, 5, 1000, false) - task.updateStatus BASE_PHASE, "Looking up AMI imageId '$description.amiName' in target accountId='$targetCredentials.accountId'" - ResolvedAmiResult resolvedAmi = AmiIdResolver.resolveAmiIdFromAllSources(targetAmazonEC2, description.region, description.amiName, targetCredentials.accountId) - - boolean existsInTarget = false - if (!resolvedAmi) { - task.updateStatus BASE_PHASE, "Looking up AMI imageId '$description.amiName' in source accountId='$description.credentials.accountId'" - resolvedAmi = AmiIdResolver.resolveAmiIdFromAllSources(sourceAmazonEC2, description.region, description.amiName, description.credentials.accountId) - } else { - existsInTarget = true - } - - if (!resolvedAmi && targetCredentials.allowPrivateThirdPartyImages) { - resolvedAmi = AmiIdResolver.resolveAmiId(targetAmazonEC2, description.region, description.amiName) - if (resolvedAmi) { - task.updateStatus BASE_PHASE, "AMI appears to be from a private third-party, which is permitted on this target account: skipping allow launch" - return resolvedAmi - } - } - - if (!resolvedAmi) { - throw new IllegalArgumentException("unable to resolve AMI imageId from '$description.amiName': If this is a private AMI owned by a third-party, you will need to contact them to share the AMI to your desired account(s)") + if (amiLocation == ResolvedAmiLocation.THIRD_PARTY) { + task.updateStatus BASE_PHASE, "AMI appears to be from a private third-party, which is permitted on this target account: skipping allow launch" + return resolvedAmi } // If the AMI is public, this is a no-op @@ -91,41 +77,53 @@ class AllowLaunchAtomicOperation implements AtomicOperation { return resolvedAmi } - // If the AMI was created/owned by a different account, switch to using that for modifying the image + def ownerCredentials = sourceCredentials + def ownerAmazonEC2 = sourceAmazonEC2 + // If the AMI was created/owned by a different account that is also managed by + // Spinnaker, switch to using that for modifying the image if (resolvedAmi.ownerId != sourceCredentials.accountId) { - if (resolvedAmi.getRegion()) - sourceCredentials = accountCredentialsProvider.all.find { accountCredentials -> - accountCredentials instanceof NetflixAmazonCredentials && - ((AmazonCredentials) accountCredentials).accountId == resolvedAmi.ownerId - } as NetflixAmazonCredentials - if (!sourceCredentials) { - throw new IllegalArgumentException("Unable to find owner of resolved AMI $resolvedAmi") + if (resolvedAmi.getRegion()) { + ownerCredentials = credentialsRepository.getAll().find { accountCredentials -> + ((AmazonCredentials) accountCredentials).accountId == resolvedAmi.ownerId + } } - sourceAmazonEC2 = amazonClientProvider.getAmazonEC2(sourceCredentials, description.region, true) + if (ownerCredentials) { + ownerAmazonEC2 = amazonClientProvider.getAmazonEC2(ownerCredentials, description.region, true) + } + } + + if (amiLocation == ResolvedAmiLocation.TARGET && !ownerCredentials) { + task.updateStatus BASE_PHASE, "AMI found in target account, but the AMI owner account is unmanaged: skipping allow launch and tag syncing" + return resolvedAmi + } + + if (amiLocation == ResolvedAmiLocation.SOURCE && !ownerCredentials) { + task.updateStatus BASE_PHASE, "AMI found in source account, but the owner account is unmanaged: unable to share AMI" + throw new IllegalArgumentException("Unable to find owner of resolved AMI $resolvedAmi") } - if (existsInTarget) { + if (amiLocation == ResolvedAmiLocation.TARGET) { task.updateStatus BASE_PHASE, "AMI found in target account: skipping allow launch" } else { - task.updateStatus BASE_PHASE, "Allowing launch of $description.amiName from $description.account" + task.updateStatus BASE_PHASE, "Allowing launch of $description.amiName from $description.account/$description.region to $description.targetAccount" OperationPoller.retryWithBackoff({ o -> - sourceAmazonEC2.modifyImageAttribute(new ModifyImageAttributeRequest().withImageId(resolvedAmi.amiId).withLaunchPermission( - new LaunchPermissionModifications().withAdd(new LaunchPermission().withUserId(targetCredentials.accountId)))) - }, 500, 3) + ownerAmazonEC2.modifyImageAttribute(new ModifyImageAttributeRequest().withImageId(resolvedAmi.amiId).withLaunchPermission( + new LaunchPermissionModifications().withAdd(new LaunchPermission().withUserId(targetCredentials.accountId)))) + }, 500, 3) } - if (sourceCredentials == targetCredentials) { + if (ownerCredentials == targetCredentials) { task.updateStatus BASE_PHASE, "Tag replication not required" } else { def request = new DescribeTagsRequest().withFilters(new Filter("resource-id").withValues(resolvedAmi.amiId)) Closure> getTags = { DescribeTagsRequest req, TagsRetriever ret -> new HashSet(ret.retrieve(req).collect { new Tag(it.key, it.value) }) }.curry(request) - Set sourceTags = getTags(new TagsRetriever(sourceAmazonEC2)) + Set sourceTags = getTags(new TagsRetriever(ownerAmazonEC2)) if (sourceTags.isEmpty()) { Thread.sleep(200) - sourceTags = getTags(new TagsRetriever(sourceAmazonEC2)) + sourceTags = getTags(new TagsRetriever(ownerAmazonEC2)) } if (sourceTags.isEmpty()) { task.updateStatus BASE_PHASE, "WARNING: empty tag set returned from DescribeTags, skipping tag sync" @@ -149,10 +147,41 @@ class AllowLaunchAtomicOperation implements AtomicOperation { } } - task.updateStatus BASE_PHASE, "Done allowing launch of $description.amiName from $description.account." + task.updateStatus BASE_PHASE, "Done allowing launch of $description.amiName from $description.account/$description.region to $description.targetAccount." resolvedAmi } + private static enum ResolvedAmiLocation { + TARGET, SOURCE, THIRD_PARTY + } + + private Tuple2 resolveAmi( + NetflixAmazonCredentials targetCredentials, + AmazonEC2 sourceAmazonEC2, + AmazonEC2 targetAmazonEC2 + ) { + task.updateStatus BASE_PHASE, "Looking up AMI imageId '$description.amiName' in target accountId='$targetCredentials.accountId'" + ResolvedAmiResult resolvedAmi = AmiIdResolver.resolveAmiIdFromAllSources(targetAmazonEC2, description.region, description.amiName, targetCredentials.accountId) + if (resolvedAmi) { + return new Tuple2(resolvedAmi, ResolvedAmiLocation.TARGET) + } + + task.updateStatus BASE_PHASE, "Looking up AMI imageId '$description.amiName' in source accountId='$description.credentials.accountId'" + resolvedAmi = AmiIdResolver.resolveAmiIdFromAllSources(sourceAmazonEC2, description.region, description.amiName, description.credentials.accountId) + if (resolvedAmi) { + return new Tuple2(resolvedAmi, ResolvedAmiLocation.SOURCE) + } + + if (targetCredentials.allowPrivateThirdPartyImages) { + resolvedAmi = AmiIdResolver.resolveAmiId(targetAmazonEC2, description.region, description.amiName) + if (resolvedAmi) { + return new Tuple2(resolvedAmi, ResolvedAmiLocation.THIRD_PARTY) + } + } + + throw new IllegalArgumentException("unable to resolve AMI imageId from '$description.amiName': If this is a private AMI owned by a third-party, you will need to contact them to share the AMI to your desired account(s)") + } + @Canonical static class TagsRetriever extends AwsResultsRetriever { final AmazonEC2 amazonEC2 diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/CopyLastAsgAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/CopyLastAsgAtomicOperation.groovy index ab5ff3c2e89..44ad2834bf7 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/CopyLastAsgAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/CopyLastAsgAtomicOperation.groovy @@ -15,27 +15,35 @@ */ package com.netflix.spinnaker.clouddriver.aws.deploy.ops + import com.amazonaws.services.autoscaling.model.AutoScalingGroup import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest +import com.amazonaws.services.autoscaling.model.DescribeLifecycleHooksRequest +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification import com.amazonaws.services.ec2.model.DescribeSubnetsRequest +import com.amazonaws.services.ec2.model.LaunchTemplateVersion +import com.amazonaws.services.ec2.model.ResponseLaunchTemplateData import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest import com.netflix.frigga.Names import com.netflix.frigga.autoscaling.AutoScalingGroupNameBuilder +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.BasicAmazonDeployHandler +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.LocalFileUserDataProperties import com.netflix.spinnaker.clouddriver.aws.deploy.validators.BasicAmazonDeployDescriptionValidator +import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook +import com.netflix.spinnaker.clouddriver.aws.model.SubnetData import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationErrors import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationException import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.BasicAmazonDeployHandler -import com.netflix.spinnaker.clouddriver.aws.model.SubnetData -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired class CopyLastAsgAtomicOperation implements AtomicOperation { @@ -55,7 +63,7 @@ class CopyLastAsgAtomicOperation implements AtomicOperation { AmazonClientProvider amazonClientProvider @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired RegionScopedProviderFactory regionScopedProviderFactory @@ -89,7 +97,7 @@ class CopyLastAsgAtomicOperation implements AtomicOperation { def sourceAsgCredentials if (description.source.account && description.source.region && description.source.asgName) { sourceRegion = description.source.region - sourceAsgCredentials = accountCredentialsProvider.getCredentials(description.source.account) as NetflixAmazonCredentials + sourceAsgCredentials = credentialsRepository.getOne(description.source.account) def sourceAutoScaling = amazonClientProvider.getAutoScaling(sourceAsgCredentials, sourceRegion, true) def request = new DescribeAutoScalingGroupsRequest(autoScalingGroupNames: [description.source.asgName]) List ancestorAsgs = sourceAutoScaling.describeAutoScalingGroups(request).autoScalingGroups @@ -130,8 +138,108 @@ class CopyLastAsgAtomicOperation implements AtomicOperation { } if (ancestorAsg) { + String iamInstanceProfile + String imageId + String instanceType + String spotMaxPrice + String keyName + String kernelId + String ramdiskId + String userData + String classicLinkVPCId = null + + Boolean ebsOptimized + Boolean instanceMonitoring + Boolean associatePublicIpAddress + + List securityGroups + List classicLinkVPCSecurityGroups = null + if (ancestorAsg.launchTemplate != null || ancestorAsg.mixedInstancesPolicy != null) { + final boolean isMip = ancestorAsg.mixedInstancesPolicy != null + + LaunchTemplateSpecification ancestorLtSpec + if (isMip) { + ancestorLtSpec = ancestorAsg.mixedInstancesPolicy.launchTemplate.launchTemplateSpecification + } else { + ancestorLtSpec = ancestorAsg.launchTemplate + } - def ancestorLaunchConfiguration = sourceRegionScopedProvider.asgService.getLaunchConfiguration(ancestorAsg.launchConfigurationName) + LaunchTemplateVersion launchTemplateVersion = sourceRegionScopedProvider + .launchTemplateService.getLaunchTemplateVersion(ancestorLtSpec) + .orElseThrow({ + new IllegalStateException("Requested launch template $ancestorLtSpec was not found") + }) + final ResponseLaunchTemplateData ancestorLtData = launchTemplateVersion.getLaunchTemplateData() + + imageId = ancestorLtData.imageId + keyName = ancestorLtData.keyName + kernelId = ancestorLtData.kernelId + userData = ancestorLtData.userData + ramdiskId = ancestorLtData.ramDiskId + instanceType = ancestorLtData.instanceType + securityGroups = ancestorLtData.securityGroups + ebsOptimized = ancestorLtData.ebsOptimized + iamInstanceProfile = ancestorLtData.iamInstanceProfile?.name + instanceMonitoring = ancestorLtData.monitoring?.enabled + spotMaxPrice = isMip + ? ancestorAsg.mixedInstancesPolicy.instancesDistribution.spotMaxPrice + : ancestorLtData.instanceMarketOptions?.spotOptions?.maxPrice + + newDescription.setLaunchTemplate = true + newDescription.enableEnclave = description.enableEnclave != null ? description.enableEnclave : ancestorLtData.enclaveOptions?.getEnabled() + newDescription.requireIMDSv2 = description.requireIMDSv2 != null ? description.requireIMDSv2 : ancestorLtData.metadataOptions?.httpTokens == "required" + newDescription.associateIPv6Address = description.associateIPv6Address + newDescription.unlimitedCpuCredits = description.unlimitedCpuCredits != null + ? description.unlimitedCpuCredits + : AsgConfigHelper.getUnlimitedCpuCreditsFromAncestorLt(ancestorLtData.creditSpecification, InstanceTypeUtils.isBurstingSupportedByAllTypes(description.getAllInstanceTypes())) + + if (!ancestorLtData.networkInterfaces?.empty && ancestorLtData.networkInterfaces*.associatePublicIpAddress?.any()) { + associatePublicIpAddress = true + } + if (!ancestorLtData.networkInterfaces?.empty) { + // Network interfaces are the source of truth for launch template security groups + def networkInterface = ancestorLtData.networkInterfaces.find({it.deviceIndex == 0 }) + if (networkInterface != null) { + securityGroups = networkInterface.groups + if (description.associateIPv6Address == null) { + newDescription.associateIPv6Address = networkInterface.getIpv6AddressCount() > 0 ? true : false + } + } + } + + if (isMip) { + def ancestorInstancesDistribution = ancestorAsg.mixedInstancesPolicy.instancesDistribution + newDescription.onDemandAllocationStrategy = description.onDemandAllocationStrategy != null ? description.onDemandAllocationStrategy : ancestorInstancesDistribution.onDemandAllocationStrategy + newDescription.onDemandBaseCapacity = description.onDemandBaseCapacity != null ? description.onDemandBaseCapacity : ancestorInstancesDistribution.onDemandBaseCapacity + newDescription.onDemandPercentageAboveBaseCapacity = description.onDemandPercentageAboveBaseCapacity != null ? description.onDemandPercentageAboveBaseCapacity : ancestorInstancesDistribution.onDemandPercentageAboveBaseCapacity + newDescription.spotAllocationStrategy = description.spotAllocationStrategy != null ? description.spotAllocationStrategy : ancestorInstancesDistribution.spotAllocationStrategy + newDescription.spotInstancePools = description.spotInstancePools != null + ? description.spotInstancePools + : newDescription.spotAllocationStrategy == "lowest-price" ? ancestorInstancesDistribution.spotInstancePools : null // return the spotInstancePools in ASG iff it is compatible with the spotAllocationStrategy + + newDescription.launchTemplateOverridesForInstanceType = description.launchTemplateOverridesForInstanceType != null && !description.launchTemplateOverridesForInstanceType.isEmpty() + ? description.launchTemplateOverridesForInstanceType + : AsgConfigHelper.getDescriptionOverrides(ancestorAsg.mixedInstancesPolicy.launchTemplate.overrides) + } + } else { + def ancestorLaunchConfiguration = sourceRegionScopedProvider + .asgService.getLaunchConfiguration(ancestorAsg.launchConfigurationName) + + keyName = ancestorLaunchConfiguration.keyName + imageId = ancestorLaunchConfiguration.imageId + kernelId = ancestorLaunchConfiguration.kernelId + userData = ancestorLaunchConfiguration.userData + ramdiskId = ancestorLaunchConfiguration.ramdiskId + spotMaxPrice = ancestorLaunchConfiguration.spotPrice + ebsOptimized = ancestorLaunchConfiguration.ebsOptimized + instanceType = ancestorLaunchConfiguration.instanceType + securityGroups = ancestorLaunchConfiguration.securityGroups + classicLinkVPCId = ancestorLaunchConfiguration.classicLinkVPCId + iamInstanceProfile = ancestorLaunchConfiguration.iamInstanceProfile + instanceMonitoring = ancestorLaunchConfiguration.instanceMonitoring?.enabled + associatePublicIpAddress = ancestorLaunchConfiguration.associatePublicIpAddress + classicLinkVPCSecurityGroups = ancestorLaunchConfiguration.classicLinkVPCSecurityGroups + } if (ancestorAsg.VPCZoneIdentifier) { task.updateStatus BASE_PHASE, "Looking up subnet type..." @@ -139,10 +247,10 @@ class CopyLastAsgAtomicOperation implements AtomicOperation { task.updateStatus BASE_PHASE, "Found: ${newDescription.subnetType}." } - newDescription.iamRole = description.iamRole ?: ancestorLaunchConfiguration.iamInstanceProfile - newDescription.amiName = description.amiName ?: ancestorLaunchConfiguration.imageId + newDescription.iamRole = description.iamRole ?: iamInstanceProfile + newDescription.amiName = description.amiName ?: imageId newDescription.availabilityZones = [(targetRegion): description.availabilityZones[targetRegion] ?: ancestorAsg.availabilityZones] - newDescription.instanceType = description.instanceType ?: ancestorLaunchConfiguration.instanceType + newDescription.instanceType = description.instanceType ?: instanceType newDescription.loadBalancers = description.loadBalancers != null ? description.loadBalancers : ancestorAsg.loadBalancerNames newDescription.targetGroups = description.targetGroups if (newDescription.targetGroups == null && ancestorAsg.targetGroupARNs && ancestorAsg.targetGroupARNs.size() > 0) { @@ -151,27 +259,32 @@ class CopyLastAsgAtomicOperation implements AtomicOperation { newDescription.targetGroups = targetGroupNames } - newDescription.securityGroups = description.securityGroups != null ? description.securityGroups : translateSecurityGroupIds(ancestorLaunchConfiguration.securityGroups) + newDescription.securityGroups = description.securityGroups != null ? description.securityGroups : translateSecurityGroupIds(securityGroups) newDescription.capacity.min = description.capacity?.min != null ? description.capacity.min : ancestorAsg.minSize newDescription.capacity.max = description.capacity?.max != null ? description.capacity.max : ancestorAsg.maxSize newDescription.capacity.desired = description.capacity?.desired != null ? description.capacity.desired : ancestorAsg.desiredCapacity - newDescription.keyPair = description.keyPair ?: (sourceIsTarget ? ancestorLaunchConfiguration.keyName : description.credentials.defaultKeyPair) - newDescription.associatePublicIpAddress = description.associatePublicIpAddress != null ? description.associatePublicIpAddress : ancestorLaunchConfiguration.associatePublicIpAddress + newDescription.keyPair = description.keyPair ?: (sourceIsTarget ? keyName : description.credentials.defaultKeyPair) + newDescription.associatePublicIpAddress = description.associatePublicIpAddress != null ? description.associatePublicIpAddress : associatePublicIpAddress newDescription.cooldown = description.cooldown != null ? description.cooldown : ancestorAsg.defaultCooldown newDescription.enabledMetrics = description.enabledMetrics != null ? description.enabledMetrics : ancestorAsg.enabledMetrics*.metric newDescription.healthCheckGracePeriod = description.healthCheckGracePeriod != null ? description.healthCheckGracePeriod : ancestorAsg.healthCheckGracePeriod newDescription.healthCheckType = description.healthCheckType ?: ancestorAsg.healthCheckType newDescription.suspendedProcesses = description.suspendedProcesses != null ? description.suspendedProcesses : ancestorAsg.suspendedProcesses*.processName newDescription.terminationPolicies = description.terminationPolicies != null ? description.terminationPolicies : ancestorAsg.terminationPolicies - newDescription.kernelId = description.kernelId ?: (ancestorLaunchConfiguration.kernelId ?: null) - newDescription.ramdiskId = description.ramdiskId ?: (ancestorLaunchConfiguration.ramdiskId ?: null) - newDescription.instanceMonitoring = description.instanceMonitoring != null ? description.instanceMonitoring : ancestorLaunchConfiguration.instanceMonitoring?.enabled - newDescription.ebsOptimized = description.ebsOptimized != null ? description.ebsOptimized : ancestorLaunchConfiguration.ebsOptimized - newDescription.classicLinkVpcId = description.classicLinkVpcId != null ? description.classicLinkVpcId : ancestorLaunchConfiguration.classicLinkVPCId - newDescription.classicLinkVpcSecurityGroups = description.classicLinkVpcSecurityGroups != null ? description.classicLinkVpcSecurityGroups : translateSecurityGroupIds(ancestorLaunchConfiguration.classicLinkVPCSecurityGroups) - newDescription.tags = description.tags != null ? description.tags : ancestorAsg.tags.collectEntries { + newDescription.kernelId = description.kernelId ?: (kernelId ?: null) + newDescription.ramdiskId = description.ramdiskId ?: (ramdiskId ?: null) + newDescription.instanceMonitoring = description.instanceMonitoring != null ? description.instanceMonitoring : instanceMonitoring + newDescription.ebsOptimized = description.ebsOptimized != null ? description.ebsOptimized : ebsOptimized + newDescription.classicLinkVpcId = description.classicLinkVpcId != null ? description.classicLinkVpcId : classicLinkVPCId + newDescription.classicLinkVpcSecurityGroups = description.classicLinkVpcSecurityGroups != null ? description.classicLinkVpcSecurityGroups : translateSecurityGroupIds(classicLinkVPCSecurityGroups) + newDescription.tags = description.tags != null ? description.tags : ancestorAsg.tags?.collectEntries { [(it.getKey()): it.getValue()] } + newDescription.blockDevices = description.blockDevices != null ? description.blockDevices : basicAmazonDeployHandler.buildBlockDeviceMappingsFromSourceAsg(sourceRegionScopedProvider, ancestorAsg, description) + newDescription.capacityRebalance = description.capacityRebalance != null ? description.capacityRebalance : ancestorAsg.capacityRebalance + newDescription.lifecycleHooks = description.lifecycleHooks != null && !description.lifecycleHooks.isEmpty() + ? description.lifecycleHooks + : getLifecycleHooksFromAncestor(sourceRegion, ancestorAsg.autoScalingGroupName, description) /* Copy over the ancestor user data only if the UserDataProviders behavior is disabled and no user data is provided @@ -179,11 +292,11 @@ class CopyLastAsgAtomicOperation implements AtomicOperation { This is to avoid having duplicate user data. */ if (localFileUserDataProperties && !localFileUserDataProperties.enabled) { - newDescription.base64UserData = description.base64UserData != null ? description.base64UserData : ancestorLaunchConfiguration.userData + newDescription.base64UserData = description.base64UserData != null ? description.base64UserData : userData } if (description.spotPrice == null) { - newDescription.spotPrice = ancestorLaunchConfiguration.spotPrice + newDescription.spotPrice = spotMaxPrice } else if (description.spotPrice) { newDescription.spotPrice = description.spotPrice } else { // "" @@ -208,6 +321,8 @@ class CopyLastAsgAtomicOperation implements AtomicOperation { result.serverGroupNames.addAll(thisResult.serverGroupNames) result.deployedNames.addAll(thisResult.deployedNames) + result.deployments.addAll(thisResult.deployments) + result.createdArtifacts.addAll(thisResult.createdArtifacts) result.messages.addAll(thisResult.messages) thisResult.serverGroupNameByRegion.entrySet().each { result.serverGroupNameByRegion[it.key] = it.value } thisResult.deployedNamesByLocation.entrySet().each { result.deployedNamesByLocation[it.key] = it.value } @@ -228,4 +343,23 @@ class CopyLastAsgAtomicOperation implements AtomicOperation { } return null } + + private List getLifecycleHooksFromAncestor(String region, String ancestorAsgName, BasicAmazonDeployDescription description) { + def autoscaling = amazonClientProvider.getAutoScaling(description.credentials, region, true) + def result = autoscaling.describeLifecycleHooks(new DescribeLifecycleHooksRequest().withAutoScalingGroupName(ancestorAsgName)) + if (result && result.lifecycleHooks) { + return result.lifecycleHooks + .stream() + .collect { new AmazonAsgLifecycleHook( + roleARN: it.roleARN, + notificationTargetARN: it.notificationTargetARN, + notificationMetadata: it.notificationMetadata, + lifecycleTransition: AmazonAsgLifecycleHook.Transition.valueOfName(it.lifecycleTransition), + heartbeatTimeout: it.heartbeatTimeout, + defaultResult: it.defaultResult) + } + } + + return [] + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DestroyAsgAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DestroyAsgAtomicOperation.groovy index 3089cc8992a..9cb4b311fb9 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DestroyAsgAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DestroyAsgAtomicOperation.groovy @@ -17,11 +17,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops import com.amazonaws.AmazonClientException +import com.amazonaws.services.autoscaling.AmazonAutoScaling import com.amazonaws.services.autoscaling.model.AmazonAutoScalingException import com.amazonaws.services.autoscaling.model.AutoScalingGroup import com.amazonaws.services.autoscaling.model.DeleteAutoScalingGroupRequest import com.amazonaws.services.autoscaling.model.DeleteLaunchConfigurationRequest import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.AmazonEC2Exception +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateRequest import com.amazonaws.services.ec2.model.TerminateInstancesRequest import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.aws.deploy.description.DestroyAsgDescription @@ -31,8 +35,11 @@ import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.events.DeleteServerGroupEvent import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent +import com.netflix.spinnaker.kork.core.RetrySupport import org.springframework.beans.factory.annotation.Autowired +import java.time.Duration + class DestroyAsgAtomicOperation implements AtomicOperation { protected static final MAX_SIMULTANEOUS_TERMINATIONS = 100 private static final String BASE_PHASE = "DESTROY_ASG" @@ -46,6 +53,7 @@ class DestroyAsgAtomicOperation implements AtomicOperation { private final DestroyAsgDescription description private final Collection events = [] + private final RetrySupport retrySupport = new RetrySupport() DestroyAsgAtomicOperation(DestroyAsgDescription description) { this.description = description @@ -92,20 +100,8 @@ class DestroyAsgAtomicOperation implements AtomicOperation { autoScaling.deleteAutoScalingGroup(new DeleteAutoScalingGroupRequest( autoScalingGroupName: asgName, forceDelete: true)) - if (autoScalingGroup.launchConfigurationName) { - task.updateStatus BASE_PHASE, "Deleting launch config ${autoScalingGroup.launchConfigurationName} in $region." - try { - autoScaling.deleteLaunchConfiguration( - new DeleteLaunchConfigurationRequest(launchConfigurationName: autoScalingGroup.launchConfigurationName) - ) - } catch (AmazonAutoScalingException e) { - // Ignore not found exception - if (!e.message.toLowerCase().contains("launch configuration name not found")) { - throw e - } - } - } def ec2 = amazonClientProvider.getAmazonEC2(credentials, region, true) + deleteLaunchSetting(autoScalingGroup, autoScaling, ec2, region) for (int i = 0; i < instanceIds.size(); i += MAX_SIMULTANEOUS_TERMINATIONS) { int end = Math.min(instanceIds.size(), i + MAX_SIMULTANEOUS_TERMINATIONS) @@ -118,4 +114,37 @@ class DestroyAsgAtomicOperation implements AtomicOperation { } } + private void deleteLaunchSetting( + AutoScalingGroup autoScalingGroup, AmazonAutoScaling autoScaling, AmazonEC2 amazonEC2, String region) { + retrySupport.retry({ + if (autoScalingGroup.launchConfigurationName) { + String lcName = autoScalingGroup.launchConfigurationName + + getTask().updateStatus BASE_PHASE, "Deleting launch config $lcName in $region." + try { + autoScaling.deleteLaunchConfiguration( + new DeleteLaunchConfigurationRequest(launchConfigurationName: lcName)) + } catch (AmazonAutoScalingException e) { + if (!e.message.toLowerCase().contains("launch configuration name not found")) { + throw e + } + } + } else if (autoScalingGroup.launchTemplate || autoScalingGroup.mixedInstancesPolicy) { + String launchTemplateId = autoScalingGroup.launchTemplate + ? autoScalingGroup.launchTemplate.launchTemplateId + : autoScalingGroup.mixedInstancesPolicy?.launchTemplate?.launchTemplateSpecification.launchTemplateId + + getTask().updateStatus BASE_PHASE, "Deleting launch template $launchTemplateId in $region." + try { + amazonEC2.deleteLaunchTemplate( + new DeleteLaunchTemplateRequest(launchTemplateId: launchTemplateId)) + } catch (AmazonEC2Exception e) { + // Ignore not found exception + if (!e.message.toLowerCase().contains("does not exist")) { + throw e + } + } + } + }, 5, Duration.ofSeconds(1), true) + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DetachInstancesAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DetachInstancesAtomicOperation.groovy index dde790dfa4d..b4b435a9888 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DetachInstancesAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DetachInstancesAtomicOperation.groovy @@ -78,9 +78,9 @@ class DetachInstancesAtomicOperation implements AtomicOperation { return false } - if ((autoScalingGroup.desiredCapacity - validInstanceIds.size()) < autoScalingGroup.minSize) { + int newMin = autoScalingGroup.desiredCapacity - validInstanceIds.size() + if (description.decrementDesiredCapacity && newMin < autoScalingGroup.minSize) { if (description.adjustMinIfNecessary) { - int newMin = autoScalingGroup.desiredCapacity - validInstanceIds.size() if (newMin < 0) { task.updateStatus BASE_PHASE, "Cannot adjust min size below 0" } else { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgAtomicOperation.groovy index db8283b3ee3..24a9ff656f0 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgAtomicOperation.groovy @@ -80,7 +80,9 @@ class ModifyAsgAtomicOperation implements AtomicOperation { .withHealthCheckGracePeriod(description.healthCheckGracePeriod) .withHealthCheckType(description.healthCheckType) .withTerminationPolicies(description.terminationPolicies) + .withCapacityRebalance(description.capacityRebalance) + // enable / disable metrics def desiredMetrics = description.enabledMetrics ?: [] def metricsToDisable = [] asg.enabledMetrics.each { @@ -109,6 +111,7 @@ class ModifyAsgAtomicOperation implements AtomicOperation { .withMetrics(metricsToEnable)) } + // update sever group autoScaling.updateAutoScalingGroup(updateRequest) task.updateStatus BASE_PHASE, "Updated $asgName in $region..." diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgLaunchConfigurationOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgLaunchConfigurationOperation.groovy index 283b904a872..e200d04d8b5 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgLaunchConfigurationOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgLaunchConfigurationOperation.groovy @@ -21,9 +21,10 @@ import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupRequest import com.netflix.frigga.Names import com.netflix.spinnaker.config.AwsConfiguration import com.netflix.spinnaker.clouddriver.aws.deploy.AmiIdResolver -import com.netflix.spinnaker.clouddriver.aws.deploy.BlockDeviceConfig +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils.BlockDeviceConfig import com.netflix.spinnaker.clouddriver.aws.deploy.ResolvedAmiResult import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyAsgLaunchConfigurationDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchConfigurationBuilder.LaunchConfigurationSettings import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository @@ -63,43 +64,32 @@ class ModifyAsgLaunchConfigurationOperation implements AtomicOperation { def settings = lcBuilder.buildSettingsFromLaunchConfiguration(description.credentials, description.region, existingLc) - def props = [:] + LaunchConfigurationSettings.LaunchConfigurationSettingsBuilder newSettingsBuilder = settings.toBuilder() if (!asg.getVPCZoneIdentifier() && !settings.classicLinkVpcId) { def classicLinkVpc = regionScopedProvider.amazonEC2.describeVpcClassicLink().vpcs.find { it.classicLinkEnabled } if (classicLinkVpc) { - props.classicLinkVpcId = classicLinkVpc.vpcId + newSettingsBuilder.classicLinkVpcId(classicLinkVpc.vpcId) if (deployDefaults.classicLinkSecurityGroupName) { - props.classicLinkVpcSecurityGroups = [ deployDefaults.classicLinkSecurityGroupName ] + newSettingsBuilder.classicLinkVpcSecurityGroups([deployDefaults.classicLinkSecurityGroupName]) } } } - def settingsKeys = settings.properties.keySet() - props = props + description.properties.findResults { k, v -> (v != null && settingsKeys.contains(k)) ? [k, v] : null }.collectEntries() - props.remove('class') - - if (props.spotPrice == "") { - // a spotPrice of "" indicates that it should be removed regardless of value on source launch configuration - props.spotPrice = null - } + newSettingsBuilder = copyFromDescription(description, newSettingsBuilder, settings) if (description.amiName) { def amazonEC2 = regionScopedProvider.amazonEC2 ResolvedAmiResult ami = priorOutputs.find({ - it instanceof ResolvedAmiResult && it.region == description.region && it.amiName == description.amiName + it instanceof ResolvedAmiResult && it.region == description.region && (it.amiName == description.amiName || it.amiId == description.amiName) }) ?: AmiIdResolver.resolveAmiIdFromAllSources(amazonEC2, description.region, description.amiName, description.credentials.accountId) - props.ami = ami.amiId - } - - if (description.securityGroupsAppendOnly) { - props.securityGroups = settings.securityGroups + description.securityGroups + newSettingsBuilder.ami(ami.amiId) } //if we are changing instance types and don't have explicitly supplied block device mappings if (!description.blockDevices && description.instanceType != null && description.instanceType != settings.instanceType) { if (!description.copySourceCustomBlockDeviceMappings) { - props.blockDevices = blockDeviceConfig.getBlockDevicesForInstanceType(description.instanceType) + newSettingsBuilder.blockDevices(blockDeviceConfig.getBlockDevicesForInstanceType(description.instanceType)) } else { def blockDevicesForSourceLaunchConfig = settings.blockDevices.collect { [deviceName: it.deviceName, virtualName: it.virtualName, size: it.size] @@ -112,20 +102,21 @@ class ModifyAsgLaunchConfigurationOperation implements AtomicOperation { if (blockDevicesForSourceLaunchConfig == blockDevicesForSourceInstanceType) { // use default block mappings for the new instance type (since default block mappings were used on the previous instance type) - props.blockDevices = blockDeviceConfig.getBlockDevicesForInstanceType(description.instanceType) + newSettingsBuilder.blockDevices(blockDeviceConfig.getBlockDevicesForInstanceType(description.instanceType)) } } } - def newSettings = settings.copyWith(props) - + def newSettings = newSettingsBuilder.build() + String resultLaunchConfigName = existingLc if (newSettings == settings && description.legacyUdf == null) { task.updateStatus BASE_PHASE, "No changes required for launch configuration on $description.asgName in $description.region" } else { - newSettings = newSettings.copyWith(suffix: null) + newSettings = newSettings.toBuilder().suffix(null).build() def name = Names.parseName(description.asgName) - def newLc = lcBuilder.buildLaunchConfiguration(name.app, description.subnetType, newSettings, description.legacyUdf) + def newLc = lcBuilder.buildLaunchConfiguration(name.app, description.subnetType, newSettings, description.legacyUdf, description.getUserDataOverride()) + resultLaunchConfigName = newLc def autoScaling = regionScopedProvider.autoScaling if (!newSettings.instanceMonitoring && settings.instanceMonitoring) { @@ -140,8 +131,38 @@ class ModifyAsgLaunchConfigurationOperation implements AtomicOperation { .withLaunchConfigurationName(newLc)) } + task.addResultObjects([[launchConfigurationName: resultLaunchConfigName]]) task.updateStatus BASE_PHASE, "completed for $description.asgName in $description.region." null } + private LaunchConfigurationSettings.LaunchConfigurationSettingsBuilder copyFromDescription( + ModifyAsgLaunchConfigurationDescription description, + LaunchConfigurationSettings.LaunchConfigurationSettingsBuilder settingsBuilder, + LaunchConfigurationSettings srcSettings) { + + Optional.ofNullable(description.region).ifPresent { settingsBuilder.region(description.region) } + Optional.ofNullable(description.instanceType).ifPresent { settingsBuilder.instanceType(description.instanceType) } + Optional.ofNullable(description.iamRole).ifPresent { settingsBuilder.iamRole(description.iamRole) } + Optional.ofNullable(description.keyPair).ifPresent { settingsBuilder.keyPair(description.keyPair) } + Optional.ofNullable(description.associatePublicIpAddress).ifPresent { + settingsBuilder.associatePublicIpAddress(description.associatePublicIpAddress) } + Optional.ofNullable(description.spotPrice).ifPresent { + settingsBuilder.spotMaxPrice(description.spotPrice == "" ? null : description.spotPrice)} // a spotMaxPrice of "" indicates that it should be removed regardless of value on source launch configuration + Optional.ofNullable(description.ramdiskId).ifPresent { settingsBuilder.ramdiskId(description.ramdiskId) } + Optional.ofNullable(description.instanceMonitoring).ifPresent { + settingsBuilder.instanceMonitoring(new Boolean(description.instanceMonitoring)) } + Optional.ofNullable(description.ebsOptimized).ifPresent { settingsBuilder.ebsOptimized(new Boolean(description.ebsOptimized)) } + Optional.ofNullable(description.classicLinkVpcId).ifPresent { settingsBuilder.classicLinkVpcId(description.classicLinkVpcId) } + Optional.ofNullable(description.classicLinkVpcSecurityGroups).ifPresent { settingsBuilder.classicLinkVpcSecurityGroups(description.classicLinkVpcSecurityGroups) } + Optional.ofNullable(description.base64UserData).ifPresent { settingsBuilder.base64UserData(description.base64UserData) } + Optional.ofNullable(description.blockDevices).ifPresent { settingsBuilder.blockDevices(description.blockDevices) } + Optional.ofNullable(description.securityGroups).ifPresent { + settingsBuilder.securityGroups(description.securityGroupsAppendOnly + ? srcSettings.securityGroups + description.securityGroups + : description.securityGroups)} + + return settingsBuilder + } + } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/TerminateInstanceAndDecrementAsgAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/TerminateInstanceAndDecrementAsgAtomicOperation.groovy index 7a203614c16..e9a19df02d5 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/TerminateInstanceAndDecrementAsgAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/TerminateInstanceAndDecrementAsgAtomicOperation.groovy @@ -52,6 +52,11 @@ class TerminateInstanceAndDecrementAsgAtomicOperation implements AtomicOperation task.updateStatus BASE_PHASE, "Initializing termination of $description.instance in $description.asgName" def autoScaling = amazonClientProvider.getAutoScaling(description.credentials, description.region, true) def asg = getAsg(autoScaling, description.asgName) + if (!asg.instances*.instanceId.contains(description.instance)) { + task.updateStatus BASE_PHASE, "Cannot terminate invalid instance $description.instance in server group $asg.autoScalingGroupName" + throw new IllegalArgumentException("Invalid instance $description.instance for $asg.autoScalingGroupName") + } + if (asg.minSize == asg.desiredCapacity) { if (description.adjustMinIfNecessary) { int newMin = asg.minSize - 1 diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAsgLifecycleHookAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAsgLifecycleHookAtomicOperation.groovy index a1890ee3319..622b5a5dc57 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAsgLifecycleHookAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAsgLifecycleHookAtomicOperation.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops import com.amazonaws.services.autoscaling.model.PutLifecycleHookRequest -import com.netflix.spinnaker.clouddriver.aws.deploy.AsgLifecycleHookWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAsgLifecycleHookDescription import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.services.IdGenerator diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/AbstractEnableDisableInstanceDiscoveryAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/AbstractEnableDisableInstanceDiscoveryAtomicOperation.groovy index 97ab765652e..6bf1db1620a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/AbstractEnableDisableInstanceDiscoveryAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/AbstractEnableDisableInstanceDiscoveryAtomicOperation.groovy @@ -68,9 +68,9 @@ abstract class AbstractEnableDisableInstanceDiscoveryAtomicOperation implements return } - def status = isEnable() ? AbstractEurekaSupport.DiscoveryStatus.Enable : AbstractEurekaSupport.DiscoveryStatus.Disable + def status = isEnable() ? AbstractEurekaSupport.DiscoveryStatus.UP : AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE discoverySupport.updateDiscoveryStatusForInstances( - description, task, phaseName, status, instancesInAsg*.instanceId + description, task, phaseName, status, instancesInAsg*.instanceId, true ) } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/AwsEurekaSupport.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/AwsEurekaSupport.groovy index e8aac1ff012..29ee150b742 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/AwsEurekaSupport.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/AwsEurekaSupport.groovy @@ -45,8 +45,6 @@ class AwsEurekaSupport extends AbstractEurekaSupport { return eureka } - @VisibleForTesting - @PackageScope boolean verifyInstanceAndAsgExist(def credentials, String region, String instanceId, diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/DeleteAmazonLoadBalancerV2AtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/DeleteAmazonLoadBalancerV2AtomicOperation.groovy index 177ec93abb1..3e91f986c66 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/DeleteAmazonLoadBalancerV2AtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/DeleteAmazonLoadBalancerV2AtomicOperation.groovy @@ -17,23 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer import com.amazonaws.AmazonServiceException -import com.amazonaws.services.elasticloadbalancingv2.model.DeleteListenerRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DeleteLoadBalancerRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DeleteTargetGroupRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeListenersRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeListenersResult -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersResult -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult -import com.amazonaws.services.elasticloadbalancingv2.model.Listener -import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancer -import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup +import com.amazonaws.services.elasticloadbalancingv2.model.* import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonLoadBalancerDescription import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import groovy.transform.InheritConstructors import org.springframework.beans.factory.annotation.Autowired class DeleteAmazonLoadBalancerV2AtomicOperation implements AtomicOperation { @@ -62,7 +52,7 @@ class DeleteAmazonLoadBalancerV2AtomicOperation implements AtomicOperation // Make sure load balancer exists LoadBalancer loadBalancer try { - DescribeLoadBalancersResult result = loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: [description.loadBalancerName] )) + DescribeLoadBalancersResult result = loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: [description.loadBalancerName])) loadBalancer = result.loadBalancers.size() > 0 ? result.loadBalancers.get(0) : null } catch (AmazonServiceException ignore) { } @@ -70,6 +60,13 @@ class DeleteAmazonLoadBalancerV2AtomicOperation implements AtomicOperation if (loadBalancer) { task.updateStatus BASE_PHASE, "Deleting ${description.loadBalancerName} in ${region} for ${description.credentials.name}." + // fail if deletion protection is enabled for the load balancer + def attributes = loadBalancing.describeLoadBalancerAttributes(new DescribeLoadBalancerAttributesRequest().withLoadBalancerArn(loadBalancer.loadBalancerArn))?.attributes ?: [] + LoadBalancerAttribute deleteProtectionAttribute = attributes.find { it.key == "deletion_protection.enabled" } + if (deleteProtectionAttribute != null && deleteProtectionAttribute.getValue().toString().equals("true")) { + throw new DeletionProtectionEnabledException("Load Balancer ${loadBalancer.loadBalancerName} has deletion protection enabled. Aborting delete operation.") + } + // Describe target groups and listeners for the load balancer. // We have to describe them both both first because you cant delete a target group that has a listener associated with it // and if you delete the listener, it loses its association with the load balancer @@ -79,7 +76,7 @@ class DeleteAmazonLoadBalancerV2AtomicOperation implements AtomicOperation List listeners = listenersResult.listeners // Delete listeners - for(Listener listener : listeners) { + for (Listener listener : listeners) { DeleteListenerRequest deleteListenerRequest = new DeleteListenerRequest(listenerArn: listener.listenerArn) try { loadBalancing.deleteListener(deleteListenerRequest) @@ -90,7 +87,7 @@ class DeleteAmazonLoadBalancerV2AtomicOperation implements AtomicOperation } // Delete target groups - for(TargetGroup targetGroup: targetGroups) { + for (TargetGroup targetGroup : targetGroups) { DeleteTargetGroupRequest deleteTargetGroupRequest = new DeleteTargetGroupRequest(targetGroupArn: targetGroup.targetGroupArn) try { loadBalancing.deleteTargetGroup(deleteTargetGroupRequest) @@ -110,4 +107,8 @@ class DeleteAmazonLoadBalancerV2AtomicOperation implements AtomicOperation task.updateStatus BASE_PHASE, "Done deleting ${description.loadBalancerName} in ${description.regions} for ${description.credentials.name}." null } + + @InheritConstructors + static class DeletionProtectionEnabledException extends Exception {} + } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/LoadBalancerMigrator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/LoadBalancerMigrator.groovy deleted file mode 100644 index 9b1ebcaf672..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/LoadBalancerMigrator.groovy +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer - -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfigurationTarget -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ServerGroupMigrator.ServerGroupLocation -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository - -import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup - -class LoadBalancerMigrator { - - public static final String BASE_PHASE = "MIGRATE_LOAD_BALANCER" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - AmazonClientProvider amazonClientProvider - RegionScopedProviderFactory regionScopedProviderFactory - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy - DeployDefaults deployDefaults - - MigrateLoadBalancerStrategy migrationStrategy - LoadBalancerLocation source - TargetLoadBalancerLocation target - SecurityGroupLookup sourceLookup - SecurityGroupLookup targetLookup - String applicationName - String subnetType - boolean allowIngressFromClassic - - LoadBalancerMigrator(SecurityGroupLookup sourceLookup, - SecurityGroupLookup targetLookup, - AmazonClientProvider amazonClientProvider, - RegionScopedProviderFactory regionScopedProviderFactory, - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy, - DeployDefaults deployDefaults, - MigrateLoadBalancerStrategy migrationStrategy, - LoadBalancerLocation source, - TargetLoadBalancerLocation target, - String subnetType, - String applicationName, - boolean allowIngressFromClassic) { - - this.sourceLookup = sourceLookup - this.targetLookup = targetLookup - this.amazonClientProvider = amazonClientProvider - this.regionScopedProviderFactory = regionScopedProviderFactory - this.migrateSecurityGroupStrategy = migrateSecurityGroupStrategy - this.deployDefaults = deployDefaults - this.migrationStrategy = migrationStrategy - this.source = source - this.target = target - this.subnetType = subnetType - this.applicationName = applicationName - this.allowIngressFromClassic = allowIngressFromClassic - } - - public MigrateLoadBalancerResult migrate(boolean dryRun) { - task.updateStatus BASE_PHASE, (dryRun ? "Calculating" : "Beginning") + " migration of load balancer " + source.toString() - def results = migrationStrategy.generateResults(sourceLookup, targetLookup, migrateSecurityGroupStrategy, - source, target, subnetType, applicationName, allowIngressFromClassic, dryRun) - task.updateStatus BASE_PHASE, "Migration of load balancer " + source.toString() + - (dryRun ? " calculated" : " completed") + ". Migrated load balancer name: " + results.targetName + - (results.targetExists ? " (already exists)": "") - results - } - - public static class LoadBalancerLocation extends AbstractAmazonCredentialsDescription { - String name - String region - String vpcId - - @Override - String toString() { - "$name in $credentialAccount/$region" + (vpcId ? "/$vpcId" : "") - } - } - - public static class TargetLoadBalancerLocation extends LoadBalancerLocation { - List availabilityZones - boolean useZonesFromSource - - TargetLoadBalancerLocation() {} - - TargetLoadBalancerLocation(LoadBalancerLocation sourceLocation, ServerGroupLocation serverGroupLocation) { - this.credentials = serverGroupLocation.credentials - this.region = serverGroupLocation.region - this.vpcId = serverGroupLocation.vpcId - this.useZonesFromSource = isSameRegion(sourceLocation) - this.availabilityZones = useZonesFromSource ? [] : serverGroupLocation.availabilityZones - } - - TargetLoadBalancerLocation(LoadBalancerLocation sourceLocation, ClusterConfigurationTarget clusterConfigurationTarget) { - this.credentials = clusterConfigurationTarget.credentials - this.region = clusterConfigurationTarget.region - this.vpcId = clusterConfigurationTarget.vpcId - this.useZonesFromSource = isSameRegion(sourceLocation) - this.availabilityZones = useZonesFromSource ? [] : clusterConfigurationTarget.availabilityZones - } - - - private boolean isSameRegion(LoadBalancerLocation sourceLocation) { - return credentialAccount == sourceLocation?.credentialAccount && region == sourceLocation?.region - } - - - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerAtomicOperation.groovy deleted file mode 100644 index 21ce7e6d79d..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerAtomicOperation.groovy +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer - -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import org.springframework.beans.factory.annotation.Autowired - -import javax.inject.Provider - -class MigrateLoadBalancerAtomicOperation implements AtomicOperation { - - final MigrateLoadBalancerDescription description - - MigrateLoadBalancerAtomicOperation(MigrateLoadBalancerDescription description) { - this.description = description - } - - @Autowired - Provider migrationStrategy - - @Autowired - SecurityGroupLookupFactory securityGroupLookupFactory - - @Autowired - AmazonClientProvider amazonClientProvider - - @Autowired - RegionScopedProviderFactory regionScopedProviderFactory - - @Autowired - Provider migrateSecurityGroupStrategy - - @Autowired - DeployDefaults deployDefaults - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Override - Void operate(List priorOutputs) { - SecurityGroupLookup sourceLookup = securityGroupLookupFactory.getInstance(description.source.region, false) - SecurityGroupLookup targetLookup = description.source.region == description.target.region ? - sourceLookup : - securityGroupLookupFactory.getInstance(description.target.region, false) - - def migrator = new LoadBalancerMigrator(sourceLookup, targetLookup, amazonClientProvider, - regionScopedProviderFactory, migrateSecurityGroupStrategy.get(), deployDefaults, migrationStrategy.get(), - description.source, description.target, description.subnetType, description.application, - description.allowIngressFromClassic) - - task.addResultObjects([migrator.migrate(description.dryRun)]) - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerResult.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerResult.groovy deleted file mode 100644 index 5eacd57e4c7..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerResult.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer - -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult - -class MigrateLoadBalancerResult { - List securityGroups = [] - boolean targetExists - boolean newNameRequired - String targetName - List warnings = [] -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/TargetGroupLookupHelper.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/TargetGroupLookupHelper.groovy index 6944c371430..300015174b7 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/TargetGroupLookupHelper.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/TargetGroupLookupHelper.groovy @@ -16,12 +16,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer +import com.amazonaws.AmazonServiceException import com.amazonaws.services.autoscaling.model.AutoScalingGroup import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancerNotFoundException import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupNotFoundException import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import java.lang.reflect.InvocationTargetException +import java.lang.reflect.UndeclaredThrowableException + class TargetGroupLookupHelper { static class TargetGroupLookupResult { @@ -55,8 +59,32 @@ class TargetGroupLookupHelper { // ignore } catch (TargetGroupNotFoundException ignore) { // ignore + } catch (UndeclaredThrowableException e) { + // There are edda calls hidden behind an .invoke from Aws SDK. Exceptions from + // those methods show up wrapped in UndeclaredThrowable and/or InvocationTarget + // If it is a legitimate failure, makes sense to throw the actual error + boolean rethrow = true + Throwable toRethrow = e.undeclaredThrowable + + if (e.undeclaredThrowable instanceof InvocationTargetException) { + InvocationTargetException ite = (InvocationTargetException)e.undeclaredThrowable + + toRethrow = ite.targetException + if (ite.targetException instanceof AmazonServiceException) { + AmazonServiceException ase = (AmazonServiceException)ite.targetException + + if (ase.statusCode == 404) { + rethrow = false + } + } + } + + if (rethrow) { + throw toRethrow + } } } + allTargetGroups.removeAll(targetGroups) result.unknownTargetGroups.addAll(allTargetGroups) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerAtomicOperation.groovy index 2be53b8e84b..0d2afb2a68c 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerAtomicOperation.groovy @@ -17,44 +17,26 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer import com.amazonaws.AmazonServiceException -import com.amazonaws.services.ec2.model.IpPermission -import com.amazonaws.services.ec2.model.SecurityGroup -import com.amazonaws.services.ec2.model.UserIdGroupPair -import com.amazonaws.services.elasticloadbalancing.model.ConfigureHealthCheckRequest -import com.amazonaws.services.elasticloadbalancing.model.ConnectionDraining -import com.amazonaws.services.elasticloadbalancing.model.CrossZoneLoadBalancing -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancerAttributesRequest -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersRequest -import com.amazonaws.services.elasticloadbalancing.model.HealthCheck -import com.amazonaws.services.elasticloadbalancing.model.Listener -import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes -import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription -import com.amazonaws.services.elasticloadbalancing.model.ModifyLoadBalancerAttributesRequest +import com.amazonaws.services.elasticloadbalancing.model.* import com.amazonaws.services.shield.AWSShield import com.amazonaws.services.shield.model.CreateProtectionRequest import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.frigga.Names -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerClassicDescription import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGroupDescription import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.LoadBalancerUpsertHandler import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.UpsertAmazonLoadBalancerResult.LoadBalancer -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupIngressConverter import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory import com.netflix.spinnaker.clouddriver.aws.model.SubnetTarget import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.helpers.OperationPoller import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import groovy.transform.InheritConstructors +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired -import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupUpdater /** * An AtomicOperation for creating an Elastic Load Balancer from the description of {@link UpsertAmazonLoadBalancerClassicDescription}. * @@ -77,6 +59,9 @@ class UpsertAmazonLoadBalancerAtomicOperation implements AtomicOperation listeners = [] description.listeners .each { UpsertAmazonLoadBalancerClassicDescription.Listener listener -> def awsListener = new Listener() @@ -135,8 +120,8 @@ class UpsertAmazonLoadBalancerAtomicOperation implements AtomicOperation securityGroups = regionScopedProvider.securityGroupService.getSecurityGroupIds(description.securityGroups, description.vpcId) + .collect { it.value } log.info("security groups on {} {}", description.name, securityGroups) String dnsName if (!loadBalancer) { @@ -147,23 +132,29 @@ class UpsertAmazonLoadBalancerAtomicOperation implements AtomicOperation ports = listeners.collect { l -> l.getInstancePort() } + if (description.healthCheckPort) { + ports.add(description.healthCheckPort) + } + IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult ingressLoadBalancerResult = ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup( application, region, - listeners, + description.account, + description.credentials, + description.vpcId, + ports, securityGroupLookupFactory ) - securityGroupNamesToIds.put(ingressLoadBalancerResult.groupName, ingressLoadBalancerResult.groupId) task.updateStatus BASE_PHASE, "Authorized app ELB Security Group ${ingressLoadBalancerResult}" + securityGroups.add(ingressLoadBalancerResult.groupId) } catch (Exception e) { log.error("Failed to authorize app ELB security group {}-elb on application security group", application, e) task.updateStatus BASE_PHASE, "Failed to authorize app ELB security group ${application}-elb on application security group" @@ -207,6 +198,7 @@ class UpsertAmazonLoadBalancerAtomicOperation implements AtomicOperation loadBalancerListeners, - SecurityGroupLookupFactory securityGroupLookupFactory) throws FailedSecurityGroupIngressException { - SecurityGroupLookup securityGroupLookup = securityGroupLookupFactory.getInstance(region) - - // 1. get app load balancer security group & app security group. create if doesn't exist - SecurityGroupUpdater applicationLoadBalancerSecurityGroupUpdater = getOrCreateSecurityGroup( - application + "-elb", - region, - "Application ELB Security Group for $application", - description, - securityGroupLookup - ) - - SecurityGroupUpdater applicationSecurityGroupUpdater = getOrCreateSecurityGroup( - application, - region, - "Application Security Group for $application", - description, - securityGroupLookup - ) - - def source = applicationLoadBalancerSecurityGroupUpdater.securityGroup - def target = applicationSecurityGroupUpdater.securityGroup - List currentPermissions = SecurityGroupIngressConverter.flattenPermissions(target) - List targetPermissions = loadBalancerListeners.collect { - newIpPermissionWithSourceAndPort(source.groupId, it.getInstancePort()) - } - - if (!includesRulesWithHealthCheckPort(targetPermissions, description, source) && description.healthCheckPort) { - targetPermissions.add( - newIpPermissionWithSourceAndPort(source.groupId, description.healthCheckPort) - ) - } - - filterOutExistingPermissions(targetPermissions, currentPermissions) - if (targetPermissions) { - try { - applicationSecurityGroupUpdater.addIngress(targetPermissions) - } catch (Exception e) { - throw new FailedSecurityGroupIngressException(e) - } - } - - return new IngressLoadBalancerGroupResult(source.groupId, source.groupName) - } - - private static class IngressLoadBalancerGroupResult { - private final String groupId - private final String groupName - - IngressLoadBalancerGroupResult(String groupId, String groupName) { - this.groupId = groupId - this.groupName = groupName - } - - @Override - String toString() { - return "IngressLoadBalancerGroupResult{" + - "groupId='" + groupId + '\'' + - ", groupName='" + groupName + '\'' + - '}' - } - } - - private static IpPermission newIpPermissionWithSourceAndPort(String sourceGroupId, int port) { - return new IpPermission( - ipProtocol: "tcp", - fromPort: port, - toPort: port, - userIdGroupPairs: [ - new UserIdGroupPair().withGroupId(sourceGroupId) - ] - ) - } - - private static boolean includesRulesWithHealthCheckPort(List targetPermissions, - UpsertAmazonLoadBalancerClassicDescription description, - SecurityGroup source) { - return targetPermissions.find { - description.healthCheckPort && it.fromPort == description.healthCheckPort && - it.toPort == description.healthCheckPort && source.groupId in it.userIdGroupPairs*.groupId - } != null - } - - private static SecurityGroupUpdater getOrCreateSecurityGroup(String groupName, - String region, - String descriptionText, - UpsertAmazonLoadBalancerClassicDescription description, - SecurityGroupLookup securityGroupLookup) { - SecurityGroupUpdater securityGroupUpdater = null - OperationPoller.retryWithBackoff({ - securityGroupUpdater = securityGroupLookup.getSecurityGroupByName( - description.credentialAccount, - groupName, - description.vpcId - ).orElse(null) - - if (!securityGroupUpdater) { - securityGroupUpdater = securityGroupLookup.createSecurityGroup( - new UpsertSecurityGroupDescription( - name: groupName, - description: descriptionText, - vpcId: description.vpcId, - region: region, - credentials: description.credentials - ) - ) - } - }, 500, 3) - - return securityGroupUpdater - } - - private static void filterOutExistingPermissions(List permissionsToAdd, - List existingPermissions) { - permissionsToAdd.each { permission -> - permission.getUserIdGroupPairs().removeIf { pair -> - existingPermissions.find { p -> - p.getFromPort() == permission.getFromPort() && - p.getToPort() == permission.getToPort() && - pair.groupId && pair.groupId in p.userIdGroupPairs*.groupId - } != null - } - - permission.getIpv4Ranges().removeIf { range -> - existingPermissions.find { p -> - p.getFromPort() == permission.getFromPort() && - p.getToPort() == permission.getToPort() && - range in p.ipv4Ranges - } != null - } - - permission.getIpv6Ranges().removeIf { range -> - existingPermissions.find { p -> - p.getFromPort() == permission.getFromPort() && - p.getToPort() == permission.getToPort() && - range in p.ipv6Ranges - } != null - } - } - - permissionsToAdd.removeIf { permission -> !permission.userIdGroupPairs } - } - private static String loadBalancerArn(String accountId, String region, String name) { return "arn:aws:elasticloadbalancing:$accountId:$region:loadbalancer/$name" } - - @InheritConstructors - static class FailedSecurityGroupIngressException extends Exception {} } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerV2AtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerV2AtomicOperation.groovy index 31671dcf42d..70d92618b93 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerV2AtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerV2AtomicOperation.groovy @@ -23,16 +23,19 @@ import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancer import com.amazonaws.services.shield.AWSShield import com.amazonaws.services.shield.model.CreateProtectionRequest import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults +import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerDescription import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerV2Description import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.LoadBalancerV2UpsertHandler +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory import com.netflix.spinnaker.clouddriver.aws.model.SubnetTarget import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired /** @@ -40,6 +43,7 @@ import org.springframework.beans.factory.annotation.Autowired * * */ +@Slf4j class UpsertAmazonLoadBalancerV2AtomicOperation implements AtomicOperation { private static final String BASE_PHASE = "CREATE_ELB_V2" @@ -53,6 +57,12 @@ class UpsertAmazonLoadBalancerV2AtomicOperation implements AtomicOperation 0 ? result.loadBalancers.get(0) : null } catch (AmazonServiceException ignore) { } @@ -100,9 +110,10 @@ class UpsertAmazonLoadBalancerV2AtomicOperation implements AtomicOperation securityGroups) { + // require that we have addAppGroupToServerGroup as well as createLoadBalancerIngressPermissions + // set since the load balancer ingress assumes that application group is the target of those + // permissions + if (deployDefaults.createLoadBalancerIngressPermissions && deployDefaults.addAppGroupToServerGroup) { + String application = null + try { + application = Names.parseName(description.name).getApp() ?: Names.parseName(description.clusterName).getApp() + Set ports = [] + description.targetGroups.each { tg -> + ports.add(tg.port) + if (tg.healthCheckPort && tg.healthCheckPort != "traffic-port") { + ports.add(Integer.parseInt(tg.healthCheckPort, 10)) + } + } + IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult ingressLoadBalancerResult = ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup( + application, + region, + description.account, + description.credentials, + description.vpcId, + ports, + securityGroupLookupFactory + ) + if (!securityGroups.any { it == ingressLoadBalancerResult.groupId }) { + securityGroups.add(ingressLoadBalancerResult.groupId) + } + + task.updateStatus BASE_PHASE, "Authorized app ELB Security Group ${ingressLoadBalancerResult}" + } catch (Exception e) { + log.error("Failed to authorize app LB security group {}-elb on application security group", application, e) + task.updateStatus BASE_PHASE, "Failed to authorize app ELB security group ${application}-elb on application security group" + } + } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupAtomicOperation.groovy deleted file mode 100644 index 20994606206..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupAtomicOperation.groovy +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup - -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import org.springframework.beans.factory.annotation.Autowired - -import javax.inject.Provider - -import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup - -class MigrateSecurityGroupAtomicOperation implements AtomicOperation { - - final MigrateSecurityGroupDescription description - - MigrateSecurityGroupAtomicOperation(MigrateSecurityGroupDescription description) { - this.description = description - } - - @Autowired - SecurityGroupLookupFactory securityGroupLookupFactory - - @Autowired - Provider migrationStrategy - - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Override - Void operate(List priorOutputs) { - SecurityGroupLookup sourceLookup = securityGroupLookupFactory.getInstance(description.source.region, false) - SecurityGroupLookup targetLookup = description.source.region == description.target.region ? - sourceLookup : - securityGroupLookupFactory.getInstance(description.target.region, false) - - task.addResultObjects( [new SecurityGroupMigrator(sourceLookup, targetLookup, migrationStrategy.get(), description.source, description.target) - .migrate(description.dryRun)]) - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupReference.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupReference.groovy deleted file mode 100644 index b4346219845..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupReference.groovy +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup - -import com.amazonaws.services.ec2.model.UserIdGroupPair -import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import groovy.transform.EqualsAndHashCode - -@EqualsAndHashCode -class MigrateSecurityGroupReference extends AbstractAmazonCredentialsDescription { - String accountId - String vpcId - String targetName - String sourceName - String explanation - String sourceId - String targetId - - MigrateSecurityGroupReference() {} - - MigrateSecurityGroupReference(UserIdGroupPair pair, NetflixAmazonCredentials credentials) { - this.accountId = pair.userId - this.vpcId = pair.vpcId - this.sourceName = pair.groupName - this.sourceId = pair.groupId - this.credentials = credentials - } - - @Override - String getAccount() { - getCredentialAccount() - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupResult.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupResult.groovy deleted file mode 100644 index 64bdf2c69fc..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/MigrateSecurityGroupResult.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup - -import com.amazonaws.services.ec2.model.IpPermission -import com.fasterxml.jackson.annotation.JsonProperty - -class MigrateSecurityGroupResult { - Collection skipped = [] - Collection warnings = [] - Collection created = [] - Collection reused = [] - Collection errors = [] - MigrateSecurityGroupReference target - Collection ingressUpdates = [] - - @JsonProperty("targetExists") - boolean targetExists() { - reused.contains(target) - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupIngressConverter.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupIngressConverter.groovy index e960669fb64..a282a34c45f 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupIngressConverter.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupIngressConverter.groovy @@ -61,31 +61,31 @@ class SecurityGroupIngressConverter { List ipPermissions = description.ipIngress.collect { ingress -> IpPermission permission = new IpPermission(ipProtocol: ingress.ipProtocol, fromPort: ingress.startPort, toPort: ingress.endPort) if (ingress.cidr?.contains(':')) { - permission.ipv6Ranges = [new Ipv6Range().withCidrIpv6(ingress.cidr)] + permission.ipv6Ranges = [new Ipv6Range().withCidrIpv6(ingress.cidr).withDescription(ingress.description)] } else { - permission.ipv4Ranges = [new IpRange().withCidrIp(ingress.cidr)] + permission.ipv4Ranges = [new IpRange().withCidrIp(ingress.cidr).withDescription(ingress.description)] } permission } description.securityGroupIngress.each { ingress -> - final accountName = ingress.accountName ?: description.credentialAccount + final accountName = ingress.accountName ?: description.account final accountId = ingress.accountId ?: securityGroupLookup.getAccountIdForName(accountName) final vpcId = ingress.vpcId ?: description.vpcId def newUserIdGroupPair = null if (ingress.id) { newUserIdGroupPair = new UserIdGroupPair(userId: accountId, groupId: ingress.id, vpcId: ingress.vpcId) } else { - final ingressSecurityGroup = securityGroupLookup.getSecurityGroupByName(accountName, ingress.name, vpcId) - if (ingressSecurityGroup.present) { - final groupId = ingressSecurityGroup.get().getSecurityGroup().groupId - newUserIdGroupPair = new UserIdGroupPair(userId: accountId, groupId: groupId, vpcId: ingress.vpcId) + final ingressSecurityGroup = securityGroupLookup.getSecurityGroupByName(accountName, ingress.name, vpcId) + if (ingressSecurityGroup.present) { + final groupId = ingressSecurityGroup.get().getSecurityGroup().groupId + newUserIdGroupPair = new UserIdGroupPair(userId: accountId, groupId: groupId, vpcId: ingress.vpcId) + } else { + if (description.vpcId) { + missing.add(ingress) } else { - if (description.vpcId) { - missing.add(ingress) - } else { - newUserIdGroupPair = new UserIdGroupPair(userId: accountId, groupName: ingress.name) - } + newUserIdGroupPair = new UserIdGroupPair(userId: accountId, groupName: ingress.name) } + } } if (newUserIdGroupPair) { @@ -96,7 +96,7 @@ class SecurityGroupIngressConverter { } new ConvertedIngress(ipPermissions, new MissingSecurityGroups( all: missing, - selfReferencing: missing.findAll { it.name == description.name && it.accountName == description.credentialAccount } + selfReferencing: missing.findAll { it.name == description.name && it.accountName == description.account } )) } @@ -107,21 +107,18 @@ class SecurityGroupIngressConverter { it.groupName = null it.peeringStatus = null it.vpcPeeringConnectionId = null - it.description = null // not passed in via the UI new IpPermission() .withFromPort(ipPermission.fromPort) .withToPort(ipPermission.toPort) .withIpProtocol(ipPermission.ipProtocol) .withUserIdGroupPairs(it) } + ipPermission.ipv4Ranges.collect { - it.description = null // not passed in via the UI new IpPermission() .withFromPort(ipPermission.fromPort) .withToPort(ipPermission.toPort) .withIpProtocol(ipPermission.ipProtocol) .withIpv4Ranges(it) } + ipPermission.ipv6Ranges.collect { - it.description = null // not passed in via the UI new IpPermission() .withFromPort(ipPermission.fromPort) .withToPort(ipPermission.toPort) @@ -130,4 +127,109 @@ class SecurityGroupIngressConverter { } }.flatten().unique() } + + /** + * + * @param newList from description + * @param existingRules + * @return Map of rules that needs to be added , removed and updated + * Computes the delta between the existing rules and new rule + * Any rule present in description and not in the existing rule gets added to addition list. + * Any rule in description but present in existing rule get added to the remove list. + * Any rule with a change in description only gets added to the update list based on the following, + * - If a new rule has description value add it to update list to make it consistent. + * - If new rule has no description value set, ignore. + */ + static IpRuleDelta computeIpRuleDelta(List newList, List existingRules) { + List tobeAdded = new ArrayList<>() + List tobeRemoved = new ArrayList<>() + List tobeUpdated = new ArrayList<>() + List filteredNewList = newList.findAll { ipPermission -> ipPermission.userIdGroupPairs.isEmpty() } + List filteredExistingRuleList = existingRules.findAll { existingRule -> existingRule.userIdGroupPairs.isEmpty()} + filteredNewList.forEach({ newListEntry -> + IpPermission match = findIpPermission(filteredExistingRuleList, newListEntry) + if (match) { + if (newListEntry.ipv4Ranges.collect { it.description }.any() + || newListEntry.ipv6Ranges.collect { it.description }.any()) { + tobeUpdated.add(newListEntry) // matches old rule , needs an update for description + } + filteredExistingRuleList.remove(match) // remove from future processing + } else { + tobeAdded.add(newListEntry) //no match in old rule so must be added + } + }) + tobeRemoved = filteredExistingRuleList // rules that needs to be removed + return new IpRuleDelta(tobeAdded, tobeRemoved, tobeUpdated) + } + + static IpPermission findIpPermission(List existingList, IpPermission ipPermission) { + existingList.find { it -> + (((it.ipv4Ranges.collect { it.cidrIp }.sort() == ipPermission.ipv4Ranges.collect { it.cidrIp }.sort() + && it.fromPort == ipPermission.fromPort + && it.toPort == ipPermission.toPort + && it.ipProtocol == ipPermission.ipProtocol) && !ipPermission.ipv4Ranges.isEmpty()) + || ((it.ipv6Ranges.collect { it.cidrIpv6 }.sort() == ipPermission.ipv6Ranges.collect { it.cidrIpv6 }.sort() + && it.fromPort == ipPermission.fromPort + && it.toPort == ipPermission.toPort + && it.ipProtocol == ipPermission.ipProtocol) && !ipPermission.ipv6Ranges.isEmpty())) + } + } + + /** + * + * @param newList from description + * @param existingRules + * @return Map of rules that needs to be added , removed and updated + * Computes the delta between the existing rules and new rule + * Any rule present in description and not in the existing rule gets added to addition list. + * Any rule not present in description but present in existing rule get added to the remove list. + * Any rule with a change in description only gets added to the update list based on the following, + * - If a new rule has description value add it to update list to make it consistent. + * - If new rule has no description value set, ignore. + */ + static UserIdGroupPairsDelta computeUserIdGroupPairsDelta(List newList, List existingRules) { + List tobeAdded = new ArrayList<>() + List tobeRemoved = new ArrayList<>() + List tobeUpdated = new ArrayList<>() + List filteredNewList = newList.findAll { ipPermission -> ipPermission.userIdGroupPairs.size() != 0 } + List filteredExistingRuleList = existingRules.findAll { existingRule -> existingRule.userIdGroupPairs.size() != 0 } + filteredNewList.forEach({ newListEntry -> + IpPermission match = findUserIdGroupPermission(filteredExistingRuleList, newListEntry) + if (match) { + if (newListEntry.userIdGroupPairs.collect { it.description }.any()) { + tobeUpdated.add(newListEntry) // matches old rule , needs an update for description + } + filteredExistingRuleList.remove(match) // remove from future processing + } else { + tobeAdded.add(newListEntry) //no match in old rule so must be added + } + }) + tobeRemoved = filteredExistingRuleList // rules that needs to be removed + return new UserIdGroupPairsDelta(tobeAdded, tobeRemoved, tobeUpdated) + } + + static IpPermission findUserIdGroupPermission(List existingList, IpPermission ipPermission) { + existingList.find { it -> + (it.userIdGroupPairs.collect { it.groupId }.sort() == ipPermission.userIdGroupPairs.collect { it.groupId }.sort() + && it.userIdGroupPairs.collect { it.userId }.sort() == ipPermission.userIdGroupPairs.collect { it.userId }.sort() + && it.fromPort == ipPermission.fromPort + && it.toPort == ipPermission.toPort + && it.ipProtocol == ipPermission.ipProtocol) + } + } + + @Canonical + static class IpRuleDelta { + List toAdd + List toRemove + List toUpdate + } + + @Canonical + static class UserIdGroupPairsDelta { + List toAdd + List toRemove + List toUpdate + } + } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupLookupFactory.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupLookupFactory.groovy index f2060c357e8..64c1fdeb97d 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupLookupFactory.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupLookupFactory.groovy @@ -17,30 +17,27 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest -import com.amazonaws.services.ec2.model.CreateSecurityGroupRequest -import com.amazonaws.services.ec2.model.CreateTagsRequest -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest -import com.amazonaws.services.ec2.model.Filter -import com.amazonaws.services.ec2.model.IpPermission -import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressRequest -import com.amazonaws.services.ec2.model.SecurityGroup -import com.amazonaws.services.ec2.model.Tag +import com.amazonaws.services.ec2.model.* import com.google.common.collect.ImmutableSet import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGroupDescription import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.kork.core.RetrySupport +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.exceptions.IntegrationException +import org.slf4j.Logger +import org.slf4j.LoggerFactory class SecurityGroupLookupFactory { private final AmazonClientProvider amazonClientProvider - private final AccountCredentialsRepository accountCredentialsRepository + private final CredentialsRepository credentialsRepository SecurityGroupLookupFactory(AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository) { + CredentialsRepository credentialsRepository) { this.amazonClientProvider = amazonClientProvider - this.accountCredentialsRepository = accountCredentialsRepository + this.credentialsRepository = credentialsRepository } SecurityGroupLookup getInstance(String region) { @@ -48,9 +45,7 @@ class SecurityGroupLookupFactory { } SecurityGroupLookup getInstance(String region, boolean skipEdda) { - final allNetflixAmazonCredentials = (Set) accountCredentialsRepository.all.findAll { - it instanceof NetflixAmazonCredentials - } + final allNetflixAmazonCredentials = credentialsRepository.getAll() final accounts = ImmutableSet.copyOf(allNetflixAmazonCredentials) new SecurityGroupLookup(amazonClientProvider, region, accounts, skipEdda) } @@ -60,6 +55,9 @@ class SecurityGroupLookupFactory { * Can also be used to create a security group from a description. */ static class SecurityGroupLookup { + private final Logger log = LoggerFactory.getLogger(getClass()); + private final RetrySupport retrySupport = new RetrySupport() + private final AmazonClientProvider amazonClientProvider private final String region private final ImmutableSet accounts @@ -106,26 +104,73 @@ class SecurityGroupLookupFactory { } SecurityGroupUpdater createSecurityGroup(UpsertSecurityGroupDescription description) { - final credentials = getCredentialsForName(description.credentialAccount) + final credentials = getCredentialsForName(description.account) final request = new CreateSecurityGroupRequest(description.name, description.description) if (description.vpcId) { request.withVpcId(description.vpcId) } final amazonEC2 = amazonClientProvider.getAmazonEC2(credentials, region, true) final result = amazonEC2.createSecurityGroup(request) - final newSecurityGroup = new SecurityGroup(ownerId: credentials.accountId, groupId: result.groupId, - groupName: description.name, description: description.description, vpcId: description.vpcId) + final newSecurityGroup = new SecurityGroup( + ownerId: credentials.accountId, + groupId: result.groupId, + groupName: description.name, + description: description.description, + vpcId: description.vpcId + ) securityGroupById.put(result.groupId, newSecurityGroup) securityGroupByName.put(description.name, newSecurityGroup) - List tags = new ArrayList() - tags.add(new Tag("Name", description.name)) - CreateTagsRequest createTagRequest = new CreateTagsRequest() - createTagRequest.withResources(result.groupId).withTags(tags) - amazonEC2.createTags(createTagRequest) + try { + /* + * `createSecurityGroup` is eventually consistent hence the need for retries in the event that the newly + * created security group is not immediately taggable. + */ + retrySupport.retry({ + CreateTagsRequest createTagRequest = new CreateTagsRequest() + Collection tags = new HashSet() + tags.add(new Tag("Name", description.name)) + description.tags.each { + entry -> tags.add(new Tag(entry.key, entry.value)) + } + createTagRequest.withResources(result.groupId).withTags(tags) + + try { + amazonEC2.createTags(createTagRequest) + } catch (Exception e) { + log.warn("Unable to tag newly created security group '${description.name}', reason: ${e.getMessage()}") + throw e + } + + log.info("Succesfully tagged newly created security group '${description.name}'") + + try { + def describeSecurityGroupsRequest = new DescribeSecurityGroupsRequest().withFilters( + new Filter("group-name", [description.name]) + ) + def securityGroups = amazonEC2.describeSecurityGroups(describeSecurityGroupsRequest).securityGroups + if (!securityGroups) { + throw new IntegrationException("Not Found!").setRetryable(true) + } + } catch (Exception e) { + log.warn("Unable to describe newly created security group '${description.name}', reason: ${e.getMessage()}") + throw e + } + + log.info("Succesfully described newly created security group '${description.name}'") + }, 15, 3000, false); + } catch (Exception e) { + log.error( + "Unable to tag or describe newly created security group (groupName: {}, groupId: {}, accountId: {})", + description.name, + result.groupId, + credentials.accountId, + e + ) + } if (!skipEdda) { - getEddaSecurityGroups(amazonEC2, description.credentialAccount, region).add(newSecurityGroup) + getEddaSecurityGroups(amazonEC2, description.account, region).add(newSecurityGroup) } new SecurityGroupUpdater(newSecurityGroup, amazonEC2) } @@ -204,6 +249,7 @@ class SecurityGroupLookupFactory { static class SecurityGroupUpdater { final SecurityGroup securityGroup private final AmazonEC2 amazonEC2 + private final Logger log = LoggerFactory.getLogger(getClass()) SecurityGroup getSecurityGroup() { securityGroup @@ -214,6 +260,13 @@ class SecurityGroupLookupFactory { this.amazonEC2 = amazonEC2 } + void updateIngress(List ipPermissionsToUpdate) { + amazonEC2.updateSecurityGroupRuleDescriptionsIngress(new UpdateSecurityGroupRuleDescriptionsIngressRequest( + groupId: securityGroup.groupId, + ipPermissions: ipPermissionsToUpdate + )) + } + void addIngress(List ipPermissionsToAdd) { amazonEC2.authorizeSecurityGroupIngress(new AuthorizeSecurityGroupIngressRequest( groupId: securityGroup.groupId, @@ -230,6 +283,50 @@ class SecurityGroupLookupFactory { securityGroup.ipPermissions.removeAll(ipPermissionsToRemove) } + void updateTags(UpsertSecurityGroupDescription description, DynamicConfigService dynamicConfigService) { + String groupId = securityGroup.groupId + try { + + //fetch -> delete -> create new tags to ensure they are consistent + DescribeTagsRequest describeTagsRequest = new DescribeTagsRequest().withFilters( + new Filter("resource-id", [groupId]) + ) + DescribeTagsResult tagsResult = amazonEC2.describeTags(describeTagsRequest) + List currentTags = tagsResult.getTags() + Collection oldTags = new HashSet() + // Filter Spinnaker specific tags, update to other tags might result in permission errors + def additionalTags = dynamicConfigService.getConfig(String.class, "aws.features.security-group.additional-tags","") + currentTags.each { + it -> + if (it.key.equals("Name") || description.tags?.keySet()?.contains(it.key) || additionalTags.contains(it.key)) { + oldTags.add(new Tag(it.key, it.value)) + } + } + + DeleteTagsRequest deleteTagsRequest = new DeleteTagsRequest() + .withResources(groupId) + .withTags(oldTags) + amazonEC2.deleteTags(deleteTagsRequest) + + CreateTagsRequest createTagRequest = new CreateTagsRequest() + Collection tags = new HashSet() + tags.add(new Tag("Name", description.name)) + description.tags.each { + entry -> tags.add(new Tag(entry.key, entry.value)) + } + createTagRequest.withResources(groupId).withTags(tags) + amazonEC2.createTags(createTagRequest) + + } catch (Exception e) { + log.error( + "Unable to update tags for security group (groupName: {}, groupId: {})", + description.name, + groupId, + e + ) + } + } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupMigrator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupMigrator.groovy deleted file mode 100644 index 7dfd2762e4e..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupMigrator.groovy +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup - -import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.LoadBalancerLocation -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfigurationTarget -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ServerGroupMigrator.ServerGroupLocation -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository - -import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup - -class SecurityGroupMigrator { - - public static final String BASE_PHASE = "MIGRATE_SECURITY_GROUP" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - MigrateSecurityGroupStrategy migrationStrategy - SecurityGroupLocation source - SecurityGroupLocation target - SecurityGroupLookup sourceLookup - SecurityGroupLookup targetLookup - boolean createIfSourceMissing - - SecurityGroupMigrator(SecurityGroupLookup sourceLookup, - SecurityGroupLookup targetLookup, - MigrateSecurityGroupStrategy migrationStrategy, - SecurityGroupLocation source, - SecurityGroupLocation target) { - - this.sourceLookup = sourceLookup - this.targetLookup = targetLookup - this.migrationStrategy = migrationStrategy - this.source = source - this.target = target - } - - public MigrateSecurityGroupResult migrate(boolean dryRun) { - task.updateStatus BASE_PHASE, "Calculating security group migration requirements for ${source.name}" - def results = migrationStrategy.generateResults(source, target, sourceLookup, targetLookup, createIfSourceMissing, dryRun) - task.updateStatus BASE_PHASE, "Migration of security group " + source.toString() + - (dryRun ? " calculated" : " completed") + ". Migrated security group name: " + results.target.targetName + - (results.targetExists() ? " (already exists)": "") - results - } - - public static class SecurityGroupLocation extends AbstractAmazonCredentialsDescription { - String name - String region - String vpcId - - @Override - String toString() { - "${name ?: '(no name)'} in $credentialAccount/$region" + (vpcId ? "/$vpcId" : "") - } - SecurityGroupLocation() {} - - SecurityGroupLocation(ClusterConfigurationTarget clusterConfigurationTarget) { - this.credentials = clusterConfigurationTarget.credentials - this.region = clusterConfigurationTarget.region - this.vpcId = clusterConfigurationTarget.vpcId - } - - SecurityGroupLocation(LoadBalancerLocation loadBalancerLocation) { - this.credentials = loadBalancerLocation.credentials - this.region = loadBalancerLocation.region - this.vpcId = loadBalancerLocation.vpcId - } - - SecurityGroupLocation(ServerGroupLocation serverGroupLocation) { - this.credentials = serverGroupLocation.credentials - this.region = serverGroupLocation.region - this.vpcId = serverGroupLocation.vpcId - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/UpsertSecurityGroupAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/UpsertSecurityGroupAtomicOperation.groovy index 3101c6dfc2b..9a46eca5279 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/UpsertSecurityGroupAtomicOperation.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/UpsertSecurityGroupAtomicOperation.groovy @@ -24,6 +24,7 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGr import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired @@ -31,6 +32,14 @@ import org.springframework.beans.factory.annotation.Autowired class UpsertSecurityGroupAtomicOperation implements AtomicOperation { private static final String BASE_PHASE = "UPSERT_SG" + /** + * An arbitrary limit on the number of rules we will enumerate in the operation "status" logs. + * + * If the number of rules is greater than this number, we should just render the number of changes, rather than + * each of the differences. + */ + private static final int MAX_RULES_FOR_STATUS = 50 + final UpsertSecurityGroupDescription description UpsertSecurityGroupAtomicOperation(UpsertSecurityGroupDescription description) { @@ -40,6 +49,9 @@ class UpsertSecurityGroupAtomicOperation implements AtomicOperation { @Autowired SecurityGroupLookupFactory securityGroupLookupFactory + @Autowired + DynamicConfigService dynamicConfigService; + private static Task getTask() { TaskRepository.threadLocalTask.get() } @@ -55,7 +67,7 @@ class UpsertSecurityGroupAtomicOperation implements AtomicOperation { ConvertedIngress ipPermissionsFromDescription = convertDescriptionToIngress(securityGroupLookup, description, true) def securityGroupUpdater = securityGroupLookup.getSecurityGroupByName( - description.credentialAccount, + description.account, description.name, description.vpcId ) @@ -68,20 +80,21 @@ class UpsertSecurityGroupAtomicOperation implements AtomicOperation { flattenPermissions(securityGroupUpdater.securityGroup) } else { try { + task.updateStatus BASE_PHASE, "Creating Security Group ${description.name}" securityGroupUpdater = securityGroupLookup.createSecurityGroup(description) - task.updateStatus BASE_PHASE, "Security group created: ${securityGroupUpdater.securityGroup}." + task.updateStatus BASE_PHASE, "Created Security Group ${securityGroupUpdater.securityGroup}" existingIpPermissions = [] } catch (AmazonServiceException e) { if (e.errorCode == "InvalidGroup.Duplicate") { securityGroupUpdater = securityGroupLookup.getSecurityGroupByName( - description.credentialAccount, + description.account, description.name, description.vpcId ).get() existingIpPermissions = SecurityGroupIngressConverter. flattenPermissions(securityGroupUpdater.securityGroup) } else { - task.updateStatus BASE_PHASE, "Failed to create security group '${description.name}' in ${description.credentialAccount}: ${e.errorMessage}" + task.updateStatus BASE_PHASE, "Failed to create security group '${description.name}' in ${description.account}: ${e.errorMessage}" throw e } } @@ -90,26 +103,79 @@ class UpsertSecurityGroupAtomicOperation implements AtomicOperation { // Second conversion of desired security group rules. If any upstream groups (including self-referential) are // missing, the operation will fail. if (!ipPermissionsFromDescription.missingSecurityGroups.selfReferencing.isEmpty()) { + task.updateStatus BASE_PHASE, "Extracting ip permissions" ipPermissionsFromDescription = convertDescriptionToIngress(securityGroupLookup, description, false) + task.updateStatus BASE_PHASE, "Extracted ip permissions (${ipPermissionsFromDescription})" } - List ipPermissionsToAdd = ipPermissionsFromDescription.converted - existingIpPermissions - List ipPermissionsToRemove = existingIpPermissions - ipPermissionsFromDescription.converted + SecurityGroupIngressConverter.IpRuleDelta ipRuleDelta = SecurityGroupIngressConverter.computeIpRuleDelta(ipPermissionsFromDescription.converted, existingIpPermissions) + List ipPermissionsToAdd = ipRuleDelta.toAdd + + SecurityGroupIngressConverter.UserIdGroupPairsDelta userIdGroupPairsDelta = SecurityGroupIngressConverter.computeUserIdGroupPairsDelta(ipPermissionsFromDescription.converted,existingIpPermissions) + + ipPermissionsToAdd = ipPermissionsToAdd + userIdGroupPairsDelta.toAdd + + List ipPermissionsToRemove = ipRuleDelta.toRemove + + ipPermissionsToRemove = ipPermissionsToRemove + userIdGroupPairsDelta.toRemove + + List tobeUpdated = ipRuleDelta.toUpdate + userIdGroupPairsDelta.toUpdate + + //Update rules that are already present on the security group + if(tobeUpdated) { + String status = "Permissions updated to '${description.name}'" + if (tobeUpdated.size() > MAX_RULES_FOR_STATUS) { + status = "$status (${tobeUpdated.size()} rules updated)." + } else { + status = "$status ($tobeUpdated)." + } + try { + task.updateStatus BASE_PHASE, "Updating Ingress (${tobeUpdated})" + securityGroupUpdater.updateIngress(tobeUpdated) + //Update tags to ensure they are consistent with rule changes + securityGroupUpdater.updateTags(description, dynamicConfigService) + task.updateStatus BASE_PHASE, status + } catch (AmazonServiceException e) { + task.updateStatus BASE_PHASE, "Error updating ingress to '${description.name}' - ${e.errorMessage}" + throw e + } + } // Converge on the desired final set of security group rules if (ipPermissionsToAdd) { + String status = "Permissions added to '${description.name}'" + if (ipPermissionsToAdd.size() > MAX_RULES_FOR_STATUS) { + status = "$status (${ipPermissionsToAdd.size()} rules added)." + } else { + status = "$status ($ipPermissionsToAdd)." + } + try { + task.updateStatus BASE_PHASE, "Adding Ingress (${ipPermissionsToAdd})" securityGroupUpdater.addIngress(ipPermissionsToAdd) - task.updateStatus BASE_PHASE, "Permissions added to '${description.name}' (${ipPermissionsToAdd})." + //Update tags to ensure they are consistent with rule changes + securityGroupUpdater.updateTags(description, dynamicConfigService) + task.updateStatus BASE_PHASE, status } catch (AmazonServiceException e) { task.updateStatus BASE_PHASE, "Error adding ingress to '${description.name}' - ${e.errorMessage}" throw e } } + if (ipPermissionsToRemove && !description.ingressAppendOnly) { + String status = "Permissions removed from '${description.name}'" + if (ipPermissionsToRemove.size() > MAX_RULES_FOR_STATUS) { + status = "$status (${ipPermissionsToRemove.size()} rules removed)." + } else { + status = "$status ($ipPermissionsToRemove)." + } + try { + task.updateStatus BASE_PHASE, "Removing Ingress (${ipPermissionsToRemove})" securityGroupUpdater.removeIngress(ipPermissionsToRemove) - task.updateStatus BASE_PHASE, "Permissions removed from ${description.name} (${ipPermissionsToRemove})." + //Update tags to ensure they are consistent with rule changes + securityGroupUpdater.updateTags(description, dynamicConfigService) + task.updateStatus BASE_PHASE, status } catch (AmazonServiceException e) { task.updateStatus BASE_PHASE, "Error removing ingress from ${description.name}: ${e.errorMessage}" throw e @@ -124,9 +190,9 @@ class UpsertSecurityGroupAtomicOperation implements AtomicOperation { if (ipPermissionsFromDescription.missingSecurityGroups.anyMissing(ignoreSelfReferencingRules)) { def missingSecurityGroupDescriptions = ipPermissionsFromDescription.missingSecurityGroups.all.collect { - "'${it.name ?: it.id}' in '${it.accountName ?: description.credentialAccount}' ${it.vpcId ?: description.vpcId ?: 'EC2-classic'}" + "'${it.name ?: it.id}' in '${it.accountName ?: description.account}' ${it.vpcId ?: description.vpcId ?: 'EC2-classic'}" } - def securityGroupsDoNotExistErrorMessage = "The following security groups do not exist: ${missingSecurityGroupDescriptions.join(", ")}" + def securityGroupsDoNotExistErrorMessage = "The following security groups do not exist: ${missingSecurityGroupDescriptions.join(", ")} (ignoreSelfReferencingRules: ${ignoreSelfReferencingRules})" task.updateStatus BASE_PHASE, securityGroupsDoNotExistErrorMessage throw new IllegalStateException(securityGroupsDoNotExistErrorMessage) } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/ClusterConfigurationMigrator.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/ClusterConfigurationMigrator.java deleted file mode 100644 index 0f69c5d0c13..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/ClusterConfigurationMigrator.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup; - -import com.netflix.frigga.autoscaling.AutoScalingGroupNameBuilder; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription; -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateClusterConfigurationStrategy; -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateLoadBalancerStrategy; -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup; -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; - -import java.util.List; -import java.util.Map; -import static java.util.Collections.emptyList; - -public class ClusterConfigurationMigrator { - - private static final String BASE_PHASE = "MIGRATE_CLUSTER_CONFIG"; - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - private ClusterConfiguration source; - private ClusterConfigurationTarget target; - private MigrateClusterConfigurationStrategy migrationStrategy; - private MigrateLoadBalancerStrategy migrateLoadBalancerStrategy; - private MigrateSecurityGroupStrategy migrateSecurityGroupStrategy; - private SecurityGroupLookup sourceLookup; - private SecurityGroupLookup targetLookup; - private String iamRole; - private String keyPair; - private String subnetType; - private String elbSubnetType; - private Map loadBalancerNameMapping; - private boolean allowIngressFromClassic; - - public ClusterConfigurationMigrator(MigrateClusterConfigurationStrategy migrationStrategy, - ClusterConfiguration source, ClusterConfigurationTarget target, - SecurityGroupLookup sourceLookup, SecurityGroupLookup targetLookup, - MigrateLoadBalancerStrategy migrateLoadBalancerStrategy, - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy, - String iamRole, String keyPair, String subnetType, String elbSubnetType, - Map loadBalancerNameMapping, - boolean allowIngressFromClassic) { - this.migrationStrategy = migrationStrategy; - this.source = source; - this.target = target; - this.sourceLookup = sourceLookup; - this.targetLookup = targetLookup; - this.migrateLoadBalancerStrategy = migrateLoadBalancerStrategy; - this.migrateSecurityGroupStrategy = migrateSecurityGroupStrategy; - this.iamRole = iamRole; - this.keyPair = keyPair; - this.subnetType = subnetType; - this.elbSubnetType = elbSubnetType; - this.loadBalancerNameMapping = loadBalancerNameMapping; - this.allowIngressFromClassic = allowIngressFromClassic; - } - - public MigrateClusterConfigurationResult migrate(boolean dryRun) { - getTask().updateStatus(BASE_PHASE, (dryRun ? "Calculating" : "Beginning") + " migration of cluster config " + source.toString()); - MigrateClusterConfigurationResult result = migrationStrategy.generateResults(source, target, sourceLookup, targetLookup, migrateLoadBalancerStrategy, - migrateSecurityGroupStrategy, subnetType, elbSubnetType, iamRole, keyPair, loadBalancerNameMapping, allowIngressFromClassic, dryRun); - getTask().updateStatus(BASE_PHASE, "Migration of cluster configuration " + source.toString() + - (dryRun ? " calculated" : " completed") + "."); - return result; - } - - public static class ClusterConfiguration extends AbstractAmazonCredentialsDescription { - private Map cluster; - - public Map getCluster() { - return cluster; - } - - public void setCluster(Map cluster) { - this.cluster = cluster; - } - - public List getSecurityGroupIds() { - return (List) cluster.get("securityGroups"); - } - - public String getVpcId() { - return (String) cluster.get("vpcId"); - } - - public String getApplication() { - return (String) cluster.get("application"); - } - - public List getLoadBalancerNames() { - return (List) cluster.getOrDefault("loadBalancers", emptyList()); - } - - public String getRegion() { - return ((Map>) cluster.get("availabilityZones")).keySet().iterator().next(); - } - - @Override - public String toString() { - AutoScalingGroupNameBuilder nameBuilder = new AutoScalingGroupNameBuilder(); - nameBuilder.setAppName(getApplication()); - nameBuilder.setStack((String) cluster.get("stack")); - nameBuilder.setDetail((String) cluster.get("freeFormDetails")); - return nameBuilder.buildGroupName() + " in " + cluster.get("account") + "/" + getRegion() + - (getVpcId() != null ? "/" + getVpcId() : ""); - } - } - - public static class ClusterConfigurationTarget extends AbstractAmazonCredentialsDescription { - private String region; - private String vpcId; - private List availabilityZones; - - public String getRegion() { - return region; - } - - public void setRegion(String region) { - this.region = region; - } - - public String getVpcId() { - return vpcId; - } - - public void setVpcId(String vpcId) { - this.vpcId = vpcId; - } - - public List getAvailabilityZones() { - return availabilityZones; - } - - public void setAvailabilityZones(List availabilityZones) { - this.availabilityZones = availabilityZones; - } - - @Override - public String toString() { - return getCredentialAccount() + "/" + region + (vpcId != null ? "/" + vpcId : ""); - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateClusterConfigurationResult.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateClusterConfigurationResult.java deleted file mode 100644 index e618665cc0e..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateClusterConfigurationResult.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup; - -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.MigrateLoadBalancerResult; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -public class MigrateClusterConfigurationResult { - - private Map cluster; - private List securityGroupMigrations; - private List loadBalancerMigrations; - private List warnings = new ArrayList<>(); - - public Map getCluster() { - return cluster; - } - - public void setCluster(Map cluster) { - this.cluster = cluster; - } - - public List getSecurityGroupMigrations() { - return securityGroupMigrations; - } - - public void setSecurityGroupMigrations(List securityGroupMigrations) { - this.securityGroupMigrations = securityGroupMigrations; - } - - public List getLoadBalancerMigrations() { - return loadBalancerMigrations; - } - - public void setLoadBalancerMigrations(List loadBalancerMigrations) { - this.loadBalancerMigrations = loadBalancerMigrations; - } - - public List getWarnings() { - return warnings; - } - - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateClusterConfigurationsAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateClusterConfigurationsAtomicOperation.groovy deleted file mode 100644 index e3fc5de2413..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateClusterConfigurationsAtomicOperation.groovy +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateClusterConfigurationsDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateClusterConfigurationStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfigurationTarget -import com.netflix.spinnaker.clouddriver.aws.model.SubnetAnalyzer -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import org.springframework.beans.factory.annotation.Autowired - -import javax.inject.Provider - -class MigrateClusterConfigurationsAtomicOperation implements AtomicOperation { - - final MigrateClusterConfigurationsDescription description - - // When migrating from EC2-Classic to VPC, this is the key that should be passed in with the subnetTypeMappings - public final static String CLASSIC_SUBNET_KEY = 'EC2-CLASSIC' - - MigrateClusterConfigurationsAtomicOperation(MigrateClusterConfigurationsDescription description) { - this.description = description - } - - @Autowired - Provider migrationStrategy - - @Autowired - SecurityGroupLookupFactory securityGroupLookupFactory - - @Autowired - Provider migrateSecurityGroupStrategy - - @Autowired - Provider migrateLoadBalancerStrategy - - @Autowired - RegionScopedProviderFactory regionScopedProviderFactory - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Override - Void operate(List priorOutputs) { - Map lookups = [:] - Map subnetAnalyzers = [:] - List results = [] - - description.sources.each { source -> - String targetRegion = source.region - List targetZones = ((Map) source.cluster.availabilityZones).get(source.region) as List - if (description.regionMapping.containsKey(source.region)) { - def targetRegionZone = description.regionMapping.get(source.region) - targetRegion = targetRegionZone.keySet().first() - targetZones = targetRegionZone.values().first() - } - String sourceAccount = (String) source.cluster.account - String sourceIamRole = (String) source.cluster.iamRole - String sourceSubnetType = (String) source.cluster.subnetType ?: CLASSIC_SUBNET_KEY - String sourceKeyPair = (String) source.cluster.keyPair - String account = description.accountMapping.getOrDefault(sourceAccount, sourceAccount) - String iamRole = description.iamRoleMapping.getOrDefault(sourceIamRole, sourceIamRole) - String subnetType = description.subnetTypeMapping.getOrDefault(sourceSubnetType, sourceSubnetType) - String keyPair = description.keyPairMapping.getOrDefault(sourceKeyPair, sourceKeyPair) - String elbSubnetType = description.elbSubnetTypeMapping.getOrDefault(sourceSubnetType, sourceSubnetType) - - // nothing changed? don't calculate anything for this cluster - if (sourceAccount == account && source.region == targetRegion && sourceSubnetType == subnetType) { - MigrateClusterConfigurationResult result = new MigrateClusterConfigurationResult(cluster: source.cluster) - results.add(result) - } else { - SecurityGroupLookup sourceLookup = lookups.get(source.region) - if (!sourceLookup) { - sourceLookup = securityGroupLookupFactory.getInstance(source.region, false) - lookups.put(source.region, sourceLookup) - } - SecurityGroupLookup targetLookup = lookups.get(targetRegion) - if (!targetLookup) { - targetLookup = securityGroupLookupFactory.getInstance(targetRegion, false) - lookups.put(targetRegion, targetLookup) - } - def credentials = targetLookup.getCredentialsForName(account) - - if (targetZones.empty) { - targetZones = credentials.getRegions().find { it.name == targetRegion }.preferredZones - } - SubnetAnalyzer subnetAnalyzer = subnetAnalyzers.get(targetRegion + ':' + credentials.name) - if (!subnetAnalyzer) { - subnetAnalyzer = regionScopedProviderFactory.forRegion(credentials, targetRegion).subnetAnalyzer - subnetAnalyzers.put(targetRegion + ':' + credentials.name, subnetAnalyzer) - } - - ClusterConfigurationTarget target = new ClusterConfigurationTarget(region: targetRegion, credentials: credentials, - availabilityZones: targetZones, vpcId: subnetAnalyzer.getVpcIdForSubnetPurpose(subnetType)) - - def migrator = new ClusterConfigurationMigrator(migrationStrategy.get(), source, target, - sourceLookup, targetLookup, - migrateLoadBalancerStrategy.get(), migrateSecurityGroupStrategy.get(), iamRole, keyPair, subnetType, - elbSubnetType, description.loadBalancerNameMapping, description.allowIngressFromClassic) - - results.add(migrator.migrate(description.dryRun)) - } - } - task.addResultObjects(results) - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateServerGroupAtomicOperation.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateServerGroupAtomicOperation.groovy deleted file mode 100644 index ab024cad68c..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateServerGroupAtomicOperation.groovy +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateServerGroupDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateServerGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import org.springframework.beans.factory.annotation.Autowired - -import javax.inject.Provider - -class MigrateServerGroupAtomicOperation implements AtomicOperation { - - final MigrateServerGroupDescription description - - MigrateServerGroupAtomicOperation(MigrateServerGroupDescription description) { - this.description = description - } - - @Autowired - Provider migrationStrategy - - @Autowired - SecurityGroupLookupFactory securityGroupLookupFactory - - @Autowired - Provider migrateSecurityGroupStrategy - - @Autowired - Provider migrateLoadBalancerStrategy - - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Override - Void operate(List priorOutputs) { - SecurityGroupLookup sourceLookup = securityGroupLookupFactory.getInstance(description.source.region, false) - SecurityGroupLookup targetLookup = description.source.region == description.target.region ? - sourceLookup : - securityGroupLookupFactory.getInstance(description.target.region, false) - - def migrator = new ServerGroupMigrator(migrationStrategy.get(), description.source, description.target, - sourceLookup, targetLookup, migrateLoadBalancerStrategy.get(), migrateSecurityGroupStrategy.get(), - description.subnetType, description.elbSubnetType, description.iamRole, description.keyPair, - description.targetAmi, description.loadBalancerNameMapping, description.allowIngressFromClassic) - - task.addResultObjects([migrator.migrate(description.dryRun)]) - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateServerGroupResult.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateServerGroupResult.groovy deleted file mode 100644 index 4e9d8eed27a..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/MigrateServerGroupResult.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.MigrateLoadBalancerResult -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult - -class MigrateServerGroupResult { - List serverGroupNames - List warnings = [] - List loadBalancers - List securityGroups -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/ServerGroupMigrator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/ServerGroupMigrator.groovy deleted file mode 100644 index da68fd9b694..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/servergroup/ServerGroupMigrator.groovy +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateServerGroupStrategy -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository - -import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup - -class ServerGroupMigrator { - - private static final String BASE_PHASE = "MIGRATE_SERVER_GROUP" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - MigrateServerGroupStrategy migrationStrategy - MigrateLoadBalancerStrategy migrateLoadBalancerStrategy - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy - ServerGroupLocation source - ServerGroupLocation target - SecurityGroupLookup sourceLookup - SecurityGroupLookup targetLookup - String iamRole - String keyPair - String subnetType - String elbSubnetType - String targetAmi - Map loadBalancerNameMapping - boolean allowIngressFromClassic - - ServerGroupMigrator(MigrateServerGroupStrategy migrationStrategy, - ServerGroupLocation source, - ServerGroupLocation target, - SecurityGroupLookup sourceLookup, - SecurityGroupLookup targetLookup, - MigrateLoadBalancerStrategy migrateLoadBalancerStrategy, - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy, - String subnetType, - String elbSubnetType, - String iamRole, - String keyPair, - String targetAmi, - Map loadBalancerNameMapping, - boolean allowIngressFromClassic) { - - this.migrationStrategy = migrationStrategy - this.migrateLoadBalancerStrategy = migrateLoadBalancerStrategy - this.migrateSecurityGroupStrategy = migrateSecurityGroupStrategy - this.source = source - this.target = target - this.sourceLookup = sourceLookup - this.targetLookup = targetLookup - this.iamRole = iamRole - this.keyPair = keyPair - this.subnetType = subnetType - this.elbSubnetType = elbSubnetType - this.targetAmi = targetAmi - this.loadBalancerNameMapping = loadBalancerNameMapping - this.allowIngressFromClassic = allowIngressFromClassic - } - - public MigrateServerGroupResult migrate(boolean dryRun) { - task.updateStatus BASE_PHASE, (dryRun ? "Calculating" : "Beginning") + " migration of server group " + source.toString() - MigrateServerGroupResult results = migrationStrategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, subnetType, elbSubnetType, iamRole, keyPair, targetAmi, - loadBalancerNameMapping, allowIngressFromClassic, dryRun) - task.updateStatus BASE_PHASE, "Migration of server group " + source.toString() + - (dryRun ? " calculated" : " completed") + ". Migrated server group name: " + results.serverGroupNames.get(0) - results - } - - - public static class ServerGroupLocation extends AbstractAmazonCredentialsDescription { - String name - String region - String vpcId - List availabilityZones - - @Override - String toString() { - "$name in $credentialAccount/$region" + (vpcId ? "/$vpcId" : "") - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/scalingpolicy/DefaultScalingPolicyCopier.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/scalingpolicy/DefaultScalingPolicyCopier.groovy index 5b50ee5618a..49aa8ee8079 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/scalingpolicy/DefaultScalingPolicyCopier.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/scalingpolicy/DefaultScalingPolicyCopier.groovy @@ -42,12 +42,25 @@ class DefaultScalingPolicyCopier implements ScalingPolicyCopier { public static final DIMENSION_NAME_FOR_ASG = 'AutoScalingGroupName' - @Autowired AmazonClientProvider amazonClientProvider - @Autowired IdGenerator idGenerator + PolicyNameGenerator policyNameGenerator + + @Autowired + DefaultScalingPolicyCopier(AmazonClientProvider amazonClientProvider, IdGenerator idGenerator) { + this.amazonClientProvider = amazonClientProvider + this.idGenerator = idGenerator + this.policyNameGenerator = new PolicyNameGenerator(idGenerator, amazonClientProvider) + } + + DefaultScalingPolicyCopier(AmazonClientProvider amazonClientProvider, IdGenerator idGenerator, PolicyNameGenerator policyNameGenerator) { + this.amazonClientProvider = amazonClientProvider + this.idGenerator = idGenerator + this.policyNameGenerator = policyNameGenerator + } + @Override void copyScalingPolicies(Task task, String sourceAsgName, @@ -64,7 +77,7 @@ class DefaultScalingPolicyCopier implements ScalingPolicyCopier { Map sourcePolicyArnToTargetPolicyArn = [:] sourceAsgScalingPolicies.each { sourceAsgScalingPolicy -> - String newPolicyName = [targetAsgName, 'policy', idGenerator.nextId()].join('-') + String newPolicyName = policyNameGenerator.generateScalingPolicyName(sourceCredentials, sourceRegion, sourceAsgName, targetAsgName, sourceAsgScalingPolicy) def policyRequest = buildNewPolicyRequest(newPolicyName, sourceAsgScalingPolicy, targetAsgName) task.updateStatus "AWS_DEPLOY", "Creating scaling policy (${policyRequest}) on ${targetRegion}/${targetAsgName} from ${sourceRegion}/${sourceAsgName}..." @@ -87,11 +100,11 @@ class DefaultScalingPolicyCopier implements ScalingPolicyCopier { // the same thing with simple and step policies and have not had any issues thus far sourceAsgScalingPolicy.targetTrackingConfiguration.customizedMetricSpecification.dimensions .findAll { d -> - d.name == DIMENSION_NAME_FOR_ASG - } - .each { d -> - d.value = targetAsgName - } + d.name == DIMENSION_NAME_FOR_ASG + } + .each { d -> + d.value = targetAsgName + } } } return new PutScalingPolicyRequest( @@ -143,8 +156,8 @@ class DefaultScalingPolicyCopier implements ScalingPolicyCopier { List sourceAlarms = new AlarmRetriever(sourceCloudWatch).retrieve(new DescribeAlarmsRequest(alarmNames: sourceAlarmNames)) log.info("Copying scaling policy alarms for $newAutoScalingGroupName: $sourceAlarms") - - sourceAlarms.findAll{ shouldCopySourceAlarm(it) }.each { alarm -> + + sourceAlarms.findAll { shouldCopySourceAlarm(it) }.each { alarm -> List newDimensions = Lists.newArrayList(alarm.dimensions) Dimension asgDimension = newDimensions.find { it.name == DIMENSION_NAME_FOR_ASG } if (asgDimension) { @@ -208,4 +221,43 @@ class DefaultScalingPolicyCopier implements ScalingPolicyCopier { result.metricAlarms } } + + static class PolicyNameGenerator { + private IdGenerator idGenerator + + private AmazonClientProvider amazonClientProvider + + PolicyNameGenerator(IdGenerator idGenerator, AmazonClientProvider amazonClientProvider) { + this.idGenerator = idGenerator + this.amazonClientProvider = amazonClientProvider + } + + String generateScalingPolicyName(NetflixAmazonCredentials sourceCredentials, String sourceRegion, String sourceAsgName, String targetAsgName, ScalingPolicy policy) { + policy.policyName.replaceAll(sourceAsgName, targetAsgName) + String fallback = policy.policyName.contains(sourceAsgName) ? + policy.policyName.replaceAll(sourceAsgName, targetAsgName) : + [policy.policyName, 'no-alarm', idGenerator.nextId()].join('-') + + if (policy.alarms.isEmpty()) { + return fallback + } + AmazonCloudWatch sourceCloudWatch = amazonClientProvider.getCloudWatch(sourceCredentials, sourceRegion, true) + List sourceAlarms = new AlarmRetriever(sourceCloudWatch).retrieve(new DescribeAlarmsRequest(alarmNames: [policy.alarms[0].alarmName])) + if (sourceAlarms.isEmpty()) { + return fallback + } + MetricAlarm alarm = sourceAlarms[0] + // 'PolicyName' cannot contain a ':' character but it is a valid character in Cloudwatch Namespace and Metric names. + return [ + targetAsgName, + alarm.namespace, + alarm.metricName, + alarm.comparisonOperator, + alarm.threshold, + alarm.evaluationPeriods, + alarm.period, + new Date().getTime() + ].join('-').replace(':', '-') + } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProperties.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProperties.groovy index 46c3e0b5ed3..0cc3b50f59a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProperties.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProperties.groovy @@ -28,5 +28,5 @@ class LocalFileUserDataProperties { */ boolean enabled = true String udfRoot = '/apps/nflx-udf' - boolean defaultLegacyUdf = true + boolean defaultLegacyUdf = false } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProvider.groovy index fc94eeca55d..c97dd0de408 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProvider.groovy @@ -16,19 +16,38 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.userdata import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.aws.deploy.LaunchConfigurationBuilder +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataInput +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataProvider import com.netflix.spinnaker.clouddriver.core.services.Front50Service -import org.springframework.beans.factory.annotation.Autowired -import retrofit.RetrofitError +import com.netflix.spinnaker.kork.annotations.VisibleForTesting +import com.netflix.spinnaker.kork.core.RetrySupport +import com.netflix.spinnaker.kork.exceptions.SpinnakerException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException +import org.springframework.http.HttpStatus + +import java.time.Duration class LocalFileUserDataProvider implements UserDataProvider { private static final INSERTION_MARKER = '\nexport EC2_REGION=' - @Autowired - LocalFileUserDataProperties localFileUserDataProperties + @VisibleForTesting LocalFileUserDataProperties localFileUserDataProperties + @VisibleForTesting Front50Service front50Service + @VisibleForTesting DefaultUserDataTokenizer defaultUserDataTokenizer + + private final RetrySupport retrySupport = new RetrySupport() - @Autowired - Front50Service front50Service + @VisibleForTesting + LocalFileUserDataProvider(){} + + LocalFileUserDataProvider(LocalFileUserDataProperties localFileUserDataProperties, + Front50Service front50Service, + DefaultUserDataTokenizer defaultUserDataTokenizer) { + this.localFileUserDataProperties = localFileUserDataProperties + this.front50Service = front50Service + this.defaultUserDataTokenizer = defaultUserDataTokenizer + } boolean isLegacyUdf(String account, String applicationName) { Closure result = { @@ -38,35 +57,30 @@ class LocalFileUserDataProvider implements UserDataProvider { return localFileUserDataProperties.defaultLegacyUdf } return Boolean.valueOf(application.legacyUdf) - } catch (RetrofitError re) { - if (re.kind == RetrofitError.Kind.HTTP && re.response.status == 404) { + } catch (SpinnakerHttpException e) { + if (e.getResponseCode() == HttpStatus.NOT_FOUND.value()) { return localFileUserDataProperties.defaultLegacyUdf } - throw re + throw e + } catch (SpinnakerServerException e) { + throw e } } - final int maxRetry = 5 - final int retryBackoff = 500 - final Set retryStatus = [429, 500] - for (int i = 0; i < maxRetry; i++) { - try { - return result.call() - } catch (RetrofitError re) { - if (re.kind == RetrofitError.Kind.NETWORK || (re.kind == RetrofitError.Kind.HTTP && retryStatus.contains(re.response.status))) { - Thread.sleep(retryBackoff) - } - } + try { + return retrySupport.retry(result, 5, Duration.ofMillis(500), true) + } catch (SpinnakerException e) { + throw new IllegalStateException("Failed to read legacyUdf preference from front50 for $account/$applicationName", e) } - throw new IllegalStateException("Failed to read legacyUdf preference from front50 for $account/$applicationName") } @Override - String getUserData(String launchConfigName, LaunchConfigurationBuilder.LaunchConfigurationSettings settings, Boolean legacyUdf) { - def names = Names.parseName(settings.baseName) - boolean useLegacyUdf = legacyUdf != null ? legacyUdf : isLegacyUdf(settings.account, names.app) - def rawUserData = assembleUserData(useLegacyUdf, names, settings.region, settings.account) - replaceUserDataTokens useLegacyUdf, names, launchConfigName, settings.region, settings.account, settings.environment, settings.accountType, rawUserData + String getUserData(UserDataInput userDataInput) { + def names = Names.parseName(userDataInput.asgName) + boolean useLegacyUdf = userDataInput.legacyUdf != null ? userDataInput.legacyUdf : isLegacyUdf(userDataInput.account, names.app) + def rawUserData = assembleUserData(useLegacyUdf, names, userDataInput.region, userDataInput.account) + String userData = defaultUserDataTokenizer.replaceTokens(names, userDataInput, rawUserData, useLegacyUdf) + return addAdditionalEnvVars(names, userData) } String assembleUserData(boolean legacyUdf, Names names, String region, String account) { @@ -99,36 +113,7 @@ class LocalFileUserDataProvider implements UserDataProvider { udfPaths.collect { String path -> getContents(path) }.join('') } - static String replaceUserDataTokens(boolean useAccountNameAsEnvironment, Names names, String launchConfigName, String region, String account, String environment, String accountType, String rawUserData) { - String stack = names.stack ?: '' - String cluster = names.cluster ?: '' - String revision = names.revision ?: '' - String countries = names.countries ?: '' - String devPhase = names.devPhase ?: '' - String hardware = names.hardware ?: '' - String zone = names.zone ?: '' - String detail = names.detail ?: '' - - // Replace the tokens & return the result - String result = rawUserData - .replace('%%account%%', account) - .replace('%%accounttype%%', accountType) - .replace('%%env%%', useAccountNameAsEnvironment ? account : environment) - .replace('%%app%%', names.app) - .replace('%%region%%', region) - .replace('%%group%%', names.group) - .replace('%%autogrp%%', names.group) - .replace('%%revision%%', revision) - .replace('%%countries%%', countries) - .replace('%%devPhase%%', devPhase) - .replace('%%hardware%%', hardware) - .replace('%%zone%%', zone) - .replace('%%cluster%%', cluster) - .replace('%%stack%%', stack) - .replace('%%detail%%', detail) - .replace('%%launchconfig%%', launchConfigName) - .replace('%%tier%%', '') - + private static String addAdditionalEnvVars(Names names, String userData) { List additionalEnvVars = [] additionalEnvVars << names.countries ? "NETFLIX_COUNTRIES=${names.countries}" : null additionalEnvVars << names.devPhase ? "NETFLIX_DEV_PHASE=${names.devPhase}" : null @@ -142,9 +127,9 @@ class LocalFileUserDataProvider implements UserDataProvider { if (additionalEnvVars) { String insertion = "\n${additionalEnvVars.join('\n')}" - result = result.replace(INSERTION_MARKER, "\n${insertion}${INSERTION_MARKER}") + userData = userData.replace(INSERTION_MARKER, "\n${insertion}${INSERTION_MARKER}") } - result + return userData } private String getContents(String filePath) { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/NullOpUserDataProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/NullOpUserDataProvider.groovy index b1a1db7e33a..f26bd28c0df 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/NullOpUserDataProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/NullOpUserDataProvider.groovy @@ -16,5 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.userdata +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataProvider + class NullOpUserDataProvider implements UserDataProvider { } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProvider.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProvider.java deleted file mode 100644 index 61befe38f6a..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProvider.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.userdata; - -import com.netflix.spinnaker.clouddriver.aws.deploy.LaunchConfigurationBuilder.LaunchConfigurationSettings; - -/** - * Implementations of this interface will provide user data to instances during the deployment process - */ -public interface UserDataProvider { - /** - * Returns user data that will be applied to a new instance. The launch configuration will not have been created at - * this point in the workflow, but the name is provided, as it may be needed when building user data detail. - * - * @deprecated use getUserData(launchConfigName, settings, legacyUdf) instead - */ - @Deprecated - default String getUserData(String asgName, String launchConfigName, String region, String account, String environment, String accountType, Boolean legacyUdf) { - return ""; - } - - default String getUserData(String launchConfigName, LaunchConfigurationSettings settings, Boolean legacyUdf) { - return getUserData(settings.getBaseName(), launchConfigName, settings.getRegion(), settings.getAccount(), settings.getEnvironment(), settings.getAccountType(), legacyUdf); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractEnableDisableAsgDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractEnableDisableAsgDescriptionValidator.groovy index 67c906ff06f..42ce5317a08 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractEnableDisableAsgDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractEnableDisableAsgDescriptionValidator.groovy @@ -16,11 +16,11 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.EnableDisableAsgDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors abstract class AbstractEnableDisableAsgDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, EnableDisableAsgDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableAsgDescription description, ValidationErrors errors) { validateAsgs description, errors } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AllowLaunchDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AllowLaunchDescriptionValidator.groovy index f8c230a6bde..4d2c7030305 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AllowLaunchDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AllowLaunchDescriptionValidator.groovy @@ -16,30 +16,31 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.aws.deploy.description.AllowLaunchDescription +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("allowLaunchDescriptionValidator") class AllowLaunchDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, AllowLaunchDescription description, Errors errors) { + void validate(List priorDescriptions, AllowLaunchDescription description, ValidationErrors errors) { if (!description.amiName) { errors.rejectValue("amiName", "allowLaunchDescription.amiName.empty") } if (!description.region) { errors.rejectValue("region", "allowLaunchDescription.region.empty") } - if (!description.account) { - errors.rejectValue("account", "allowLaunchDescription.account.empty") - } else if (!accountCredentialsProvider.all.collect { it.name }.contains(description.account)) { - errors.rejectValue("account", "allowLaunchDescription.account.not.configured") + if (!description.targetAccount) { + errors.rejectValue("targetAccount", "allowLaunchDescription.targetAccount.empty") + } else if (credentialsRepository.getOne(description.targetAccount) == null) { + errors.rejectValue("targetAccount", "allowLaunchDescription.targetAccount.not.configured") } } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AmazonDescriptionValidationSupport.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AmazonDescriptionValidationSupport.groovy index 5717f99086a..3d6b8905196 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AmazonDescriptionValidationSupport.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AmazonDescriptionValidationSupport.groovy @@ -20,13 +20,13 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCr import com.netflix.spinnaker.clouddriver.aws.deploy.description.AsgDescription import com.netflix.spinnaker.clouddriver.aws.deploy.description.ResizeAsgDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors public abstract class AmazonDescriptionValidationSupport extends DescriptionValidator { - abstract void validate(List priorDescriptions, T description, Errors errors) + abstract void validate(List priorDescriptions, T description, ValidationErrors errors) - void validateAsgs(T description, Errors errors) { + void validateAsgs(T description, ValidationErrors errors) { if (!description.asgs) { errors.rejectValue("asgs", "${description.getClass().simpleName}.empty") } else { @@ -36,7 +36,7 @@ public abstract class AmazonDescriptionValidationSupport regionNames, String errorKey, Errors errors, String attributeName = "regions") { + void validateRegions(T description, Collection regionNames, String errorKey, ValidationErrors errors, String attributeName = "regions") { if (!regionNames) { errors.rejectValue(attributeName, "${errorKey}.${attributeName}.empty") } else { @@ -86,7 +86,7 @@ public abstract class AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, AttachClassicLinkVpcDescription description, Errors errors) { + void validate(List priorDescriptions, AttachClassicLinkVpcDescription description, ValidationErrors errors) { def key = AttachClassicLinkVpcDescription.class.simpleName if (!description.instanceId) { errors.rejectValue("instanceId", "${key}.instanceId.invalid") diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/BasicAmazonDeployDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/BasicAmazonDeployDescriptionValidator.groovy index 8ce2adcaf3f..6625df5ee8f 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/BasicAmazonDeployDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/BasicAmazonDeployDescriptionValidator.groovy @@ -16,33 +16,38 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators +import com.google.common.annotations.VisibleForTesting import com.netflix.spinnaker.clouddriver.aws.AmazonOperation +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice -import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import com.netflix.spinnaker.credentials.CredentialsRepository import groovy.transform.stc.ClosureParams import groovy.transform.stc.SimpleType +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors +@Slf4j @Component("basicAmazonDeployDescriptionValidator") @AmazonOperation(AtomicOperations.CREATE_SERVER_GROUP) class BasicAmazonDeployDescriptionValidator extends AmazonDescriptionValidationSupport { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, BasicAmazonDeployDescription description, Errors errors) { + void validate(List priorDescriptions, BasicAmazonDeployDescription description, ValidationErrors errors) { def credentials = null if (!description.credentials) { errors.rejectValue "credentials", "basicAmazonDeployDescription.credentials.empty" } else { - credentials = accountCredentialsProvider.getCredentials(description?.credentials?.name) - if (!(credentials instanceof AmazonCredentials)) { + credentials = credentialsRepository.getOne(description?.credentials?.name) + if (credentials == null) { errors.rejectValue("credentials", "basicAmazonDeployDescription.credentials.invalid") } } @@ -73,22 +78,73 @@ class BasicAmazonDeployDescriptionValidator extends AmazonDescriptionValidationS if (!description.source?.useSourceCapacity) { validateCapacity description, errors } + + // unlimitedCpuCredits (set to true / false) is valid only with supported instance types + if (description.unlimitedCpuCredits != null + && !InstanceTypeUtils.isBurstingSupportedByAllTypes(description.getAllInstanceTypes())) { + errors.rejectValue "unlimitedCpuCredits", "basicAmazonDeployDescription.bursting.not.supported.by.instanceType" + } + + // spotInstancePools is applicable only for 'lowest-price' spotAllocationStrategy + if (description.spotInstancePools && description.spotInstancePools > 0 && description.spotAllocationStrategy != "lowest-price") { + errors.rejectValue "spotInstancePools", "basicAmazonDeployDescription.spotInstancePools.not.supported.for.spotAllocationStrategy" + } + + // log warnings + final String warnings = getWarnings(description) + if (!warnings.isEmpty()) { + log.warn(warnings) + } + } + + /** + * Log warnings to indicate potential user error, invalid configurations that could result in unexpected outcome, etc. + */ + @VisibleForTesting + private String getWarnings(BasicAmazonDeployDescription description) { + List warnings = [] + + // certain features work as expected only when AWS EC2 Launch Template feature is enabled and used + if (!description.setLaunchTemplate) { + def ltFeaturesEnabled = getLtFeaturesEnabled(description) + + if (ltFeaturesEnabled) { + warnings.add("WARNING: The following fields ${ltFeaturesEnabled} work as expected only with AWS EC2 Launch Template, " + + "but 'setLaunchTemplate' is set to false in request with account: ${description.account}, " + + "application: ${description.application}, stack: ${description.stack})") + } + } + return warnings.join("\n") + } + + private List getLtFeaturesEnabled(final BasicAmazonDeployDescription descToValidate) { + def allLtFeatures = BasicAmazonDeployDescription.getLaunchTemplateOnlyFieldNames() + def descWithDefaults = new BasicAmazonDeployDescription() + def ltFeaturesEnabled = [] + + allLtFeatures.each({ + if (descToValidate."$it" != descWithDefaults."$it") { + ltFeaturesEnabled.add(it) + } + }) + + return ltFeaturesEnabled.sort() } enum BlockDeviceRules { - deviceNameNotNull({ AmazonBlockDevice device, Errors errors -> + deviceNameNotNull({ AmazonBlockDevice device, ValidationErrors errors -> if (!device.deviceName) { errors.rejectValue "blockDevices", "basicAmazonDeployDescription.block.device.not.named", [] as String[], "Device name is required for block device" } }), - ephemeralConfigWrong({ AmazonBlockDevice device, Errors errors -> + ephemeralConfigWrong({ AmazonBlockDevice device, ValidationErrors errors -> if (device.virtualName && (device.deleteOnTermination != null || device.iops || device.size || device.snapshotId || device.volumeType)) { errors.rejectValue "blockDevices", "basicAmazonDeployDescription.block.device.ephemeral.config", [device.virtualName] as String[], "Ephemeral block device $device.deviceName with EBS configuration parameters" } }), - ebsConfigWrong({ AmazonBlockDevice device, Errors errors -> + ebsConfigWrong({ AmazonBlockDevice device, ValidationErrors errors -> if (!device.virtualName && !device.size) { errors.rejectValue "blockDevices", "basicAmazonDeployDescription.block.device.ebs.config", [device.deviceName] as String[], "EBS device $device.deviceName missing required value size" } @@ -103,11 +159,11 @@ class BasicAmazonDeployDescriptionValidator extends AmazonDescriptionValidationS this.validationRule = validationRule } - void validateDevice(AmazonBlockDevice device, Errors errors) { + void validateDevice(AmazonBlockDevice device, ValidationErrors errors) { validationRule(device, errors) } - static void validate(AmazonBlockDevice device, Errors errors) { + static void validate(AmazonBlockDevice device, ValidationErrors errors) { for (rule in values()) { rule.validateDevice device, errors } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateAmazonLoadBalancerDescriptionValidator.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateAmazonLoadBalancerDescriptionValidator.java deleted file mode 100644 index e42d4c1cfda..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateAmazonLoadBalancerDescriptionValidator.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.validators; - -import com.amazonaws.services.elasticloadbalancingv2.model.AuthenticateOidcActionConfig; -import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerClassicDescription; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerV2Description; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerDescription; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -@AmazonOperation(AtomicOperations.UPSERT_LOAD_BALANCER) -@Component("createAmazonLoadBalancerDescriptionValidator") -class CreateAmazonLoadBalancerDescriptionValidator extends AmazonDescriptionValidationSupport { - private void validateActions(List actions, Set allTargetGroupNames, Set unusedTargetGroupNames, Errors errors) { - for (UpsertAmazonLoadBalancerV2Description.Action action : actions) { - if (action.getType().equals("forward")) { - String targetGroupName = action.getTargetGroupName(); - if (!allTargetGroupNames.contains(targetGroupName)) { - errors.rejectValue("listeners", "createAmazonLoadBalancerDescription.listeners.invalid.targetGroup"); - } - unusedTargetGroupNames.remove(action.getTargetGroupName()); - } - - if (action.getType().equals("authenticate-oidc")) { - AuthenticateOidcActionConfig config = action.getAuthenticateOidcActionConfig(); - if (config.getClientId() == null) { - errors.rejectValue("listeners", "createAmazonLoadBalancerDescription.listeners.invalid.oidcConfig"); - } - } - } - } - - @Override - public void validate(List priorDescriptions, UpsertAmazonLoadBalancerDescription description, Errors errors) { - // Common fields to validate - if (description.getName() == null && description.getClusterName() == null) { - errors.rejectValue("clusterName", "createAmazonLoadBalancerDescription.missing.name.or.clusterName"); - } - if (description.getSubnetType() == null && description.getAvailabilityZones() == null) { - errors.rejectValue("availabilityZones", "createAmazonLoadBalancerDescription.missing.subnetType.or.availabilityZones"); - } - - if (description.getAvailabilityZones() != null) { - for (Map.Entry> entry : description.getAvailabilityZones().entrySet()) { - String region = entry.getKey(); - List azs = entry.getValue(); - - AmazonCredentials.AWSRegion acctRegion = description.getCredentials().getRegions().stream().filter(r -> r.getName().equals(region)).findFirst().orElse(null); - if (acctRegion == null) { - errors.rejectValue("availabilityZones", "createAmazonLoadBalancerDescription.region.not.configured"); - } - if (description.getSubnetType() == null && azs == null) { - errors.rejectValue("availabilityZones", "createAmazonLoadBalancerDescription.missing.subnetType.or.availabilityZones"); - break; - } - if (description.getSubnetType() == null && acctRegion != null && !acctRegion.getAvailabilityZones().containsAll(azs)) { - errors.rejectValue("availabilityZones", "createAmazonLoadBalancerDescription.zone.not.configured"); - } - } - } - - switch (description.getLoadBalancerType()) { - case CLASSIC: - UpsertAmazonLoadBalancerClassicDescription classicDescription = (UpsertAmazonLoadBalancerClassicDescription) description; - if (classicDescription.getListeners() == null || classicDescription.getListeners().size() == 0) { - errors.rejectValue("listeners", "createAmazonLoadBalancerDescription.listeners.empty"); - } - - if (classicDescription.getDeregistrationDelay() != null) { - if (classicDescription.getDeregistrationDelay() < 1 || classicDescription.getDeregistrationDelay() > 3600) { - errors.rejectValue("deregistrationDelay", "createAmazonLoadBalancerDescription.deregistrationDelay.invalid"); - } - } - break; - case APPLICATION: - case NETWORK: - UpsertAmazonLoadBalancerV2Description albDescription = (UpsertAmazonLoadBalancerV2Description) description; - if (albDescription.targetGroups == null || albDescription.targetGroups.size() == 0) { - errors.rejectValue("targetGroups", "createAmazonLoadBalancerDescription.targetGroups.empty"); - } - - Set allTargetGroupNames = new HashSet<>(); - for (UpsertAmazonLoadBalancerV2Description.TargetGroup targetGroup : albDescription.targetGroups) { - allTargetGroupNames.add(targetGroup.getName()); - if (targetGroup.getName() == null || targetGroup.getName().isEmpty()) { - errors.rejectValue("targetGroups", "createAmazonLoadBalancerDescription.targetGroups.name.missing"); - } - if (targetGroup.getProtocol() == null) { - errors.rejectValue("targetGroups", "createAmazonLoadBalancerDescription.targetGroups.protocol.missing"); - } - if (targetGroup.getPort() == null) { - errors.rejectValue("targetGroups", "createAmazonLoadBalancerDescription.targetGroups.port.missing"); - } - } - Set unusedTargetGroupNames = new HashSet<>(); - unusedTargetGroupNames.addAll(allTargetGroupNames); - - for (UpsertAmazonLoadBalancerV2Description.Listener listener : albDescription.listeners) { - if (listener.getDefaultActions().size() == 0) { - errors.rejectValue("listeners", "createAmazonLoadBalancerDescription.listeners.missing.defaultAction"); - } - this.validateActions(listener.getDefaultActions(), allTargetGroupNames, unusedTargetGroupNames, errors); - for (UpsertAmazonLoadBalancerV2Description.Rule rule : listener.getRules()) { - this.validateActions(rule.getActions(), allTargetGroupNames, unusedTargetGroupNames, errors); - } - } - if (unusedTargetGroupNames.size() > 0) { - errors.rejectValue("targetGroups", "createAmazonLoadBalancerDescription.targetGroups.unused"); - } - break; - default: - errors.rejectValue("loadBalancerType", "createAmazonLoadBalancerDescription.loadBalancerType.invalid"); - break; - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateNetworkInterfaceDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateNetworkInterfaceDescriptionValidator.groovy index f22e4c57e6e..abb2f568349 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateNetworkInterfaceDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateNetworkInterfaceDescriptionValidator.groovy @@ -16,13 +16,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.CreateNetworkInterfaceDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("createNetworkInterfaceDescriptionValidator") class CreateNetworkInterfaceDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, CreateNetworkInterfaceDescription description, Errors errors) { + void validate(List priorDescriptions, CreateNetworkInterfaceDescription description, ValidationErrors errors) { Set regions = description.availabilityZonesGroupedByRegion?.keySet() if (!regions) { errors.rejectValue "regions", "createNetworkInterfaceDescription.regions.not.supplied" diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAlarmDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAlarmDescriptionValidator.groovy index 8d5e776a7ac..6fb92e1b442 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAlarmDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAlarmDescriptionValidator.groovy @@ -17,13 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAlarmDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("deleteAlarmDescriptionValidator") class DeleteAlarmDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, DeleteAlarmDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteAlarmDescription description, ValidationErrors errors) { validateRegions(description, [description.region], "deleteAlarmDescription", errors) if (!description.names) { @@ -32,7 +32,7 @@ class DeleteAlarmDescriptionValidator extends AmazonDescriptionValidationSupport } - static void rejectNull(String field, Errors errors) { + static void rejectNull(String field, ValidationErrors errors) { errors.rejectValue(field, "deleteAlarmDescription.${field}.not.nullable") } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonLoadBalancerDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonLoadBalancerDescriptionValidator.groovy index 641b8495fd1..8f00ade0e46 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonLoadBalancerDescriptionValidator.groovy @@ -17,17 +17,17 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.DELETE_LOAD_BALANCER) @Component("deleteAmazonLoadBalancerDescriptionValidator") class DeleteAmazonLoadBalancerDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, DeleteAmazonLoadBalancerDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteAmazonLoadBalancerDescription description, ValidationErrors errors) { validateRegions(description, description.regions, "deleteAmazonLoadBalancerDescription", errors) if (!description.loadBalancerName) { errors.rejectValue "loadBalancerName", "deleteAmazonLoadBalancerDescription.loadBalancerName.empty" diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAsgTagsDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAsgTagsDescriptionValidator.groovy index 929bf95757a..a704561d566 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAsgTagsDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAsgTagsDescriptionValidator.groovy @@ -16,13 +16,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAsgTagsDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("deleteAsgTagsDescriptionValidator") class DeleteAsgTagsDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, DeleteAsgTagsDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteAsgTagsDescription description, ValidationErrors errors) { validateAsgs description, errors description.tagKeys.each { if (!it) { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteScalingPolicyDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteScalingPolicyDescriptionValidator.groovy index 02d28d72d83..e8866d7804d 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteScalingPolicyDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteScalingPolicyDescriptionValidator.groovy @@ -17,13 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteScalingPolicyDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("deleteScalingPolicyDescriptionValidator") class DeleteScalingPolicyDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, DeleteScalingPolicyDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteScalingPolicyDescription description, ValidationErrors errors) { validateRegions(description, [description.region], "deleteScalingPolicyDescription", errors) if (!description.serverGroupName && !description.asgName) { @@ -36,7 +36,7 @@ class DeleteScalingPolicyDescriptionValidator extends AmazonDescriptionValidatio } - static void rejectNull(String field, Errors errors) { + static void rejectNull(String field, ValidationErrors errors) { errors.rejectValue(field, "deleteScalingPolicyDescription.${field}.not.nullable") } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteSecurityGroupDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteSecurityGroupDescriptionValidator.groovy index 065d0d93ef2..2e48570635e 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteSecurityGroupDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteSecurityGroupDescriptionValidator.groovy @@ -17,12 +17,12 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteSecurityGroupDescription import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteSecurityGroupDescription import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.DELETE_SECURITY_GROUP) @Component("deleteSecurityGroupDescriptionValidator") @@ -32,7 +32,7 @@ class DeleteSecurityGroupDescriptionValidator extends AmazonDescriptionValidatio AmazonClientProvider amazonClientProvider @Override - void validate(List priorDescriptions, DeleteSecurityGroupDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteSecurityGroupDescription description, ValidationErrors errors) { validateRegions(description, description.regions, "deleteSecurityGroupDescription", errors) if (!description.securityGroupName) { errors.rejectValue "securityGroupName", "deleteSecurityGroupDescription.securityGroupName.empty" diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeregisterInstancesFromLoadBalancerDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeregisterInstancesFromLoadBalancerDescriptionValidator.groovy index f5c52a9bfc3..9947f4adeb1 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeregisterInstancesFromLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeregisterInstancesFromLoadBalancerDescriptionValidator.groovy @@ -18,15 +18,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractRegionAsgInstanceIdsDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.DEREGISTER_INSTANCES_FROM_LOAD_BALANCER) @Component("deregisterInstancesFromLoadBalancerDescriptionValidator") class DeregisterInstancesFromLoadBalancerDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, AbstractRegionAsgInstanceIdsDescription description, Errors errors) { + void validate(List priorDescriptions, AbstractRegionAsgInstanceIdsDescription description, ValidationErrors errors) { validateAsgNameAndRegionAndInstanceIds(description, errors) } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DestroyAsgDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DestroyAsgDescriptionValidator.groovy index a1cd99e52a1..e4600192b6a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DestroyAsgDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DestroyAsgDescriptionValidator.groovy @@ -18,15 +18,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation import com.netflix.spinnaker.clouddriver.aws.deploy.description.DestroyAsgDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.DESTROY_SERVER_GROUP) @Component("destroyAsgDescriptionValidator") class DestroyAsgDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, DestroyAsgDescription description, Errors errors) { + void validate(List priorDescriptions, DestroyAsgDescription description, ValidationErrors errors) { validateAsgs description, errors } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DetachInstancesDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DetachInstancesDescriptionValidator.groovy index 016fd9ac705..1a4c21b3595 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DetachInstancesDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DetachInstancesDescriptionValidator.groovy @@ -17,13 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.DetachInstancesDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("detachInstancesDescriptionValidator") class DetachInstancesDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, DetachInstancesDescription description, Errors errors) { + void validate(List priorDescriptions, DetachInstancesDescription description, ValidationErrors errors) { def key = DetachInstancesDescription.class.simpleName description.instanceIds.each { if (!it) { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DisableInstancesInDiscoveryDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DisableInstancesInDiscoveryDescriptionValidator.groovy index 24958d0c9a0..393f77f05ba 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DisableInstancesInDiscoveryDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DisableInstancesInDiscoveryDescriptionValidator.groovy @@ -17,14 +17,14 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.EnableDisableInstanceDiscoveryDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("disableInstancesInDiscoveryDescriptionValidator") class DisableInstancesInDiscoveryDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, EnableDisableInstanceDiscoveryDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableInstanceDiscoveryDescription description, ValidationErrors errors) { def key = description.class.simpleName validateAsgNameAndRegionAndInstanceIds(description, errors) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/EnableInstancesInDiscoveryDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/EnableInstancesInDiscoveryDescriptionValidator.groovy index 0962fafeb7f..70aef5ec280 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/EnableInstancesInDiscoveryDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/EnableInstancesInDiscoveryDescriptionValidator.groovy @@ -17,13 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.EnableDisableInstanceDiscoveryDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("enableInstancesInDiscoveryDescriptionValidator") class EnableInstancesInDiscoveryDescriptionValidator extends AmazonDescriptionValidationSupport { - void validate(List priorDescriptions, EnableDisableInstanceDiscoveryDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableInstanceDiscoveryDescription description, ValidationErrors errors) { def key = description.class.simpleName validateAsgNameAndRegionAndInstanceIds(description, errors) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyAsgLaunchConfigurationDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyAsgLaunchConfigurationDescriptionValidator.groovy index 49ca2c8e988..aebf29f2da1 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyAsgLaunchConfigurationDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyAsgLaunchConfigurationDescriptionValidator.groovy @@ -17,31 +17,32 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyAsgLaunchConfigurationDescription import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.UPDATE_LAUNCH_CONFIG) @Component("modifyAsgLaunchConfigurationDescriptionValidator") class ModifyAsgLaunchConfigurationDescriptionValidator extends AmazonDescriptionValidationSupport { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, ModifyAsgLaunchConfigurationDescription description, Errors errors) { + void validate(List priorDescriptions, ModifyAsgLaunchConfigurationDescription description, ValidationErrors errors) { def key = ModifyAsgLaunchConfigurationDescription.class.simpleName validateRegion(description, description.region, key, errors) if (!description.credentials) { errors.rejectValue "credentials", "modifyAsgLaunchConfigurationDescription.credentials.empty" } else { - def credentials = accountCredentialsProvider.getCredentials(description?.credentials?.name) - if (!(credentials instanceof AmazonCredentials)) { + def credentials = credentialsRepository.getOne(description?.credentials?.name) + if (credentials == null) { errors.rejectValue("credentials", "modifyAsgLaunchConfigurationDescription.credentials.invalid") } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/RebootInstancesDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/RebootInstancesDescriptionValidator.groovy index 29f10441093..f1e7c187568 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/RebootInstancesDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/RebootInstancesDescriptionValidator.groovy @@ -17,16 +17,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.aws.deploy.description.RebootInstancesDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.REBOOT_INSTANCES) @Component("rebootInstancesDescriptionValidator") class RebootInstancesDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, RebootInstancesDescription description, Errors errors) { + void validate(List priorDescriptions, RebootInstancesDescription description, ValidationErrors errors) { def key = RebootInstancesDescription.class.simpleName if (!description.instanceIds) { errors.rejectValue("instanceIds", "${key}.instanceIds.empty") diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/RegisterInstancesWithLoadBalancerDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/RegisterInstancesWithLoadBalancerDescriptionValidator.groovy index 89ce0ca0fd9..2d0d5fd65c8 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/RegisterInstancesWithLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/RegisterInstancesWithLoadBalancerDescriptionValidator.groovy @@ -18,15 +18,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractRegionAsgInstanceIdsDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.REGISTER_INSTANCES_WITH_LOAD_BALANCER) @Component("registerInstancesWithLoadBalancerDescriptionValidator") class RegisterInstancesWithLoadBalancerDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, AbstractRegionAsgInstanceIdsDescription description, Errors errors) { + void validate(List priorDescriptions, AbstractRegionAsgInstanceIdsDescription description, ValidationErrors errors) { validateAsgNameAndRegionAndInstanceIds(description, errors) } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResizeAsgDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResizeAsgDescriptionValidator.groovy index 6c3c510b3bf..2b1ac114d28 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResizeAsgDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResizeAsgDescriptionValidator.groovy @@ -18,15 +18,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation import com.netflix.spinnaker.clouddriver.aws.deploy.description.ResizeAsgDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("resizeAsgDescriptionValidator") @AmazonOperation(AtomicOperations.RESIZE_SERVER_GROUP) class ResizeAsgDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, ResizeAsgDescription description, Errors errors) { + void validate(List priorDescriptions, ResizeAsgDescription description, ValidationErrors errors) { validateAsgsWithCapacity description, errors } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResumeAsgProcessesDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResumeAsgProcessesDescriptionValidator.groovy index a110e06f677..6e4a827b784 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResumeAsgProcessesDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResumeAsgProcessesDescriptionValidator.groovy @@ -17,13 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.ResumeAsgProcessesDescription import com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component class ResumeAsgProcessesDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, ResumeAsgProcessesDescription description, Errors errors) { + void validate(List priorDescriptions, ResumeAsgProcessesDescription description, ValidationErrors errors) { validateAsgs description, errors def invalidProcessTypes = description.processes.findAll { !AutoScalingProcessType.parse(it) } if (invalidProcessTypes) { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/SuspendAsgProcessesDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/SuspendAsgProcessesDescriptionValidator.groovy index 7d75569547a..77a6cb7e8ef 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/SuspendAsgProcessesDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/SuspendAsgProcessesDescriptionValidator.groovy @@ -17,13 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.SuspendAsgProcessesDescription import com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component class SuspendAsgProcessesDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, SuspendAsgProcessesDescription description, Errors errors) { + void validate(List priorDescriptions, SuspendAsgProcessesDescription description, ValidationErrors errors) { validateAsgs description, errors def invalidProcessTypes = description.processes.findAll { !AutoScalingProcessType.parse(it) } if (invalidProcessTypes) { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstanceAndDecrementAsgDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstanceAndDecrementAsgDescriptionValidator.groovy index d9b12294702..11f3b64e931 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstanceAndDecrementAsgDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstanceAndDecrementAsgDescriptionValidator.groovy @@ -17,16 +17,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.aws.deploy.description.TerminateInstanceAndDecrementAsgDescription import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.TERMINATE_INSTANCE_AND_DECREMENT) @Component("terminateInstanceAndDecrementAsgDescriptionValidator") class TerminateInstanceAndDecrementAsgDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, TerminateInstanceAndDecrementAsgDescription description, Errors errors) { + void validate(List priorDescriptions, TerminateInstanceAndDecrementAsgDescription description, ValidationErrors errors) { def key = TerminateInstanceAndDecrementAsgDescription.class.simpleName validateAsgName description, errors diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstancesDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstancesDescriptionValidator.groovy index 57cff26f424..064c7b6bcdf 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstancesDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstancesDescriptionValidator.groovy @@ -16,16 +16,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.aws.deploy.description.TerminateInstancesDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.TERMINATE_INSTANCES) @Component("terminateInstancesDescriptionValidator") class TerminateInstancesDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, TerminateInstancesDescription description, Errors errors) { + void validate(List priorDescriptions, TerminateInstancesDescription description, ValidationErrors errors) { def key = TerminateInstancesDescription.class.simpleName description.instanceIds.each { if (!it) { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpdateInstancesDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpdateInstancesDescriptionValidator.groovy index eab180f74cd..8d66f471d47 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpdateInstancesDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpdateInstancesDescriptionValidator.groovy @@ -18,16 +18,16 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpdateInstancesDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.UPDATE_INSTANCES) @Component("updateInstancesDescriptionValidator") class UpdateInstancesDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, UpdateInstancesDescription description, Errors errors) { + void validate(List priorDescriptions, UpdateInstancesDescription description, ValidationErrors errors) { if (!description.serverGroupName) { errors.rejectValue("name", "updateSecurityGroupsDescription.name.not.nullable") } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAlarmDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAlarmDescriptionValidator.groovy index 304f2c04ce1..d05eb4fb44d 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAlarmDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAlarmDescriptionValidator.groovy @@ -17,13 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAlarmDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("upsertAlarmDescriptionValidator") class UpsertAlarmDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, UpsertAlarmDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertAlarmDescription description, ValidationErrors errors) { validateRegions(description, [description.region], "upsertAlarmDescription", errors) if (!description.metricName) { @@ -52,7 +52,7 @@ class UpsertAlarmDescriptionValidator extends AmazonDescriptionValidationSupport } - static void rejectNull(String field, Errors errors) { + static void rejectNull(String field, ValidationErrors errors) { errors.rejectValue(field, "upsertAlarmDescription.${field}.not.nullable") } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonDNSDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonDNSDescriptionValidator.groovy index cc336f1bf5a..81e9b7b40ef 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonDNSDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonDNSDescriptionValidator.groovy @@ -16,12 +16,12 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonDNSDescription import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("upsertAmazonDNSDescriptionValidator") class UpsertAmazonDNSDescriptionValidator extends AmazonDescriptionValidationSupport { @@ -31,7 +31,7 @@ class UpsertAmazonDNSDescriptionValidator extends AmazonDescriptionValidationSup AmazonClientProvider amazonClientProvider @Override - void validate(List priorDescriptions, UpsertAmazonDNSDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertAmazonDNSDescription description, ValidationErrors errors) { def upstreamElb = priorDescriptions.find { it instanceof UpsertAmazonLoadBalancerDescription } if (!upstreamElb && !description.target) { errors.rejectValue("target", "upsertAmazonDNSDescription.target.empty") diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgLifecycleHookDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgLifecycleHookDescriptionValidator.groovy index 9359079b9eb..198d6811b41 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgLifecycleHookDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgLifecycleHookDescriptionValidator.groovy @@ -17,14 +17,14 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAsgLifecycleHookDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("upsertAsgLifecycleHookDescriptionValidator") class UpsertAsgLifecycleHookDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, UpsertAsgLifecycleHookDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertAsgLifecycleHookDescription description, ValidationErrors errors) { validateRegions(description, [description.region], "upsertAsgLifecycleHookDescription", errors) if (!description.serverGroupName) { @@ -48,7 +48,7 @@ class UpsertAsgLifecycleHookDescriptionValidator extends AmazonDescriptionValida } } - static void rejectNull(String field, Errors errors) { + static void rejectNull(String field, ValidationErrors errors) { errors.rejectValue(field, "upsertAsgLifecycleHookDescription.${field}.not.nullable") } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgTagsDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgTagsDescriptionValidator.groovy index a2c42c9d2ee..c558eec6854 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgTagsDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgTagsDescriptionValidator.groovy @@ -18,15 +18,15 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAsgTagsDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.UPSERT_SERVER_GROUP_TAGS) @Component("upsertAsgTagsDescriptionValidator") class UpsertAsgTagsDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, UpsertAsgTagsDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertAsgTagsDescription description, ValidationErrors errors) { validateAsgs description, errors if (!description.tags) { errors.rejectValue("tags", "upsertAsgTagsDescription.tags.empty") diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertScalingPolicyDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertScalingPolicyDescriptionValidator.groovy index 4f95f78f0af..69ada29c481 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertScalingPolicyDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertScalingPolicyDescriptionValidator.groovy @@ -18,13 +18,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.AdjustmentType import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertScalingPolicyDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("upsertScalingPolicyDescriptionValidator") class UpsertScalingPolicyDescriptionValidator extends AmazonDescriptionValidationSupport { @Override - void validate(List priorDescriptions, UpsertScalingPolicyDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertScalingPolicyDescription description, ValidationErrors errors) { validateRegions(description, [description.region], "upsertScalingPolicyDescription", errors) if (!description.serverGroupName && !description.asgName) { @@ -48,7 +48,7 @@ class UpsertScalingPolicyDescriptionValidator extends AmazonDescriptionValidatio } } - static void rejectNull(String field, Errors errors) { + static void rejectNull(String field, ValidationErrors errors) { errors.rejectValue(field, "upsertScalingPolicyDescription.${field}.not.nullable") } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertSecurityGroupDescriptionValidator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertSecurityGroupDescriptionValidator.groovy index 2af75e71395..cf9814687e4 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertSecurityGroupDescriptionValidator.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertSecurityGroupDescriptionValidator.groovy @@ -17,13 +17,12 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.AmazonOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.aws.model.SecurityGroupNotFoundException import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AmazonOperation(AtomicOperations.UPSERT_SECURITY_GROUP) @Component("upsertSecurityGroupDescriptionValidator") @@ -32,7 +31,7 @@ class UpsertSecurityGroupDescriptionValidator extends AmazonDescriptionValidatio RegionScopedProviderFactory regionScopedProviderFactory @Override - void validate(List priorDescriptions, UpsertSecurityGroupDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertSecurityGroupDescription description, ValidationErrors errors) { if (!description.name) { errors.rejectValue("name", "upsertSecurityGroupDescription.name.not.nullable") } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaApi.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaApi.groovy index cbf7554b585..9bc3e57e153 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaApi.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaApi.groovy @@ -17,6 +17,8 @@ package com.netflix.spinnaker.clouddriver.aws.edda import com.amazonaws.services.elasticloadbalancingv2.model.Listener +import com.netflix.spinnaker.clouddriver.aws.model.edda.ApplicationLoadBalancerAttributes +import com.netflix.spinnaker.clouddriver.aws.model.edda.ClassicLoadBalancerAttributes import com.netflix.spinnaker.clouddriver.aws.model.edda.EddaRule import com.netflix.spinnaker.clouddriver.aws.model.edda.LoadBalancerInstanceState import com.netflix.spinnaker.clouddriver.aws.model.edda.TargetGroupAttributes @@ -45,4 +47,10 @@ interface EddaApi { @GET('/REST/v2/view/appLoadBalancerRules;_expand') List> allRules() + + @GET('/REST/v2/view/appLoadBalancerAttributes;_expand') + List applicationLoadBalancerAttributes() + + @GET('/REST/v2/view/loadBalancerAttributes;_expand') + List classicLoadBalancerAttributes() } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaApiFactory.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaApiFactory.groovy index 19d4bd7f0f4..3335dc3925d 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaApiFactory.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaApiFactory.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.edda +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import retrofit.RestAdapter import retrofit.converter.Converter @@ -33,6 +34,7 @@ class EddaApiFactory { return new RestAdapter.Builder() .setConverter(eddaConverter) .setEndpoint(endpointTemplate.replaceAll(Pattern.quote('{{region}}'), region)) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .build() .create(EddaApi) } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaConfiguration.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaConfiguration.groovy index e2142a23214..2b08bc715cf 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaConfiguration.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/edda/EddaConfiguration.groovy @@ -18,6 +18,8 @@ package com.netflix.spinnaker.clouddriver.aws.edda import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.awsobjectmapper.AmazonObjectMapper +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration import retrofit.converter.Converter @@ -27,7 +29,7 @@ import retrofit.converter.JacksonConverter class EddaConfiguration { @Bean Converter eddaConverter() { - new JacksonConverter(new ObjectMapper() + new JacksonConverter(AmazonObjectMapperConfigurer.createConfigured() .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)) } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEventHandler.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEventHandler.java deleted file mode 100644 index 755b4cd1114..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEventHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.event; - -import com.amazonaws.services.autoscaling.model.AutoScalingGroup; -import com.amazonaws.services.autoscaling.model.Instance; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.TerminateInstancesRequest; -import com.google.common.collect.Lists; -import com.netflix.spinnaker.clouddriver.data.task.Task; - -import java.util.List; -import java.util.stream.Collectors; - -public interface AfterResizeEventHandler { - int MAX_SIMULTANEOUS_TERMINATIONS = 100; - String PHASE = "RESIZE"; - - void handle(AfterResizeEvent event); - - default void terminateInstancesInAutoScalingGroup(Task task, AmazonEC2 amazonEC2, AutoScalingGroup autoScalingGroup) { - String serverGroupName = autoScalingGroup.getAutoScalingGroupName(); - - List instanceIds = autoScalingGroup - .getInstances() - .stream() - .map(Instance::getInstanceId) - .collect(Collectors.toList()); - - int terminatedCount = 0; - for (List partition : Lists.partition(instanceIds, MAX_SIMULTANEOUS_TERMINATIONS)) { - try { - terminatedCount += partition.size(); - task.updateStatus( - PHASE, - String.format("Terminating %d of %d instances in %s", terminatedCount, instanceIds.size(), serverGroupName) - ); - amazonEC2.terminateInstances(new TerminateInstancesRequest().withInstanceIds(partition)); - } catch (Exception e) { - task.updateStatus( - PHASE, - String.format("Unable to terminate instances, reason: '%s'", e.getMessage()) - ); - } - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/DefaultAfterResizeEventHandler.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/DefaultAfterResizeEventHandler.java deleted file mode 100644 index 0d413d51378..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/DefaultAfterResizeEventHandler.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.event; - -import com.amazonaws.services.autoscaling.AmazonAutoScaling; -import com.amazonaws.services.autoscaling.model.AutoScalingGroup; -import com.amazonaws.services.autoscaling.model.DescribeLifecycleHooksRequest; -import com.amazonaws.services.autoscaling.model.DescribeLifecycleHooksResult; -import com.amazonaws.services.autoscaling.model.LifecycleHook; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.stream.Collectors; - -public class DefaultAfterResizeEventHandler implements AfterResizeEventHandler { - private final Logger log = LoggerFactory.getLogger(getClass()); - - /** - * There is an opportunity to expedite a resize to zero by explicitly terminating instances - * (server group _must not_ be attached to a load balancer nor have any life cycle hooks) - */ - @Override - public void handle(AfterResizeEvent event) { - AutoScalingGroup autoScalingGroup = event.getAutoScalingGroup(); - - if (event.getCapacity() == null || event.getCapacity().getDesired() == null) { - return; - } - - if (event.getCapacity().getDesired() > 0) { - return; - } - - if (!autoScalingGroup.getLoadBalancerNames().isEmpty() || !autoScalingGroup.getTargetGroupARNs().isEmpty()) { - event.getTask().updateStatus( - PHASE, - "Skipping explicit instance termination, server group is attached to one or more load balancers" - ); - return; - } - - try { - List existingLifecycleHooks = fetchTerminatingLifecycleHooks( - event.getAmazonAutoScaling(), - autoScalingGroup.getAutoScalingGroupName() - ); - if (!existingLifecycleHooks.isEmpty()) { - event.getTask().updateStatus( - PHASE, - "Skipping explicit instance termination, server group has one or more lifecycle hooks" - ); - return; - } - } catch (Exception e) { - log.error( - "Unable to fetch lifecycle hooks (serverGroupName: {}, arn: {})", - autoScalingGroup.getAutoScalingGroupName(), - autoScalingGroup.getAutoScalingGroupARN(), - e - ); - - event.getTask().updateStatus( - PHASE, - String.format( - "Skipping explicit instance termination, unable to fetch lifecycle hooks (reason: '%s')", - e.getMessage() - ) - ); - return; - } - - terminateInstancesInAutoScalingGroup( - event.getTask(), event.getAmazonEC2(), event.getAutoScalingGroup() - ); - } - - private static List fetchTerminatingLifecycleHooks(AmazonAutoScaling amazonAutoScaling, - String serverGroupName) { - DescribeLifecycleHooksRequest request = new DescribeLifecycleHooksRequest() - .withAutoScalingGroupName(serverGroupName); - - return amazonAutoScaling.describeLifecycleHooks(request) - .getLifecycleHooks() - .stream() - .filter(h -> "autoscaling:EC2_INSTANCE_TERMINATING".equalsIgnoreCase(h.getLifecycleTransition())) - .collect(Collectors.toList()); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicator.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicator.groovy deleted file mode 100644 index 45f10119cd9..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicator.groovy +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.health - -import com.amazonaws.AmazonClientException -import com.amazonaws.AmazonServiceException -import com.amazonaws.services.ec2.model.AmazonEC2Exception -import com.netflix.spectator.api.Counter -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import groovy.transform.InheritConstructors -import org.slf4j.Logger -import org.slf4j.LoggerFactory -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.boot.actuate.health.Health -import org.springframework.boot.actuate.health.HealthIndicator -import org.springframework.http.HttpStatus -import org.springframework.scheduling.annotation.Scheduled -import org.springframework.stereotype.Component -import org.springframework.web.bind.annotation.ResponseStatus - -import java.util.concurrent.atomic.AtomicLong -import java.util.concurrent.atomic.AtomicReference - -@Component -class AmazonHealthIndicator implements HealthIndicator { - - private static final Logger LOG = LoggerFactory.getLogger(AmazonHealthIndicator) - - private final AccountCredentialsProvider accountCredentialsProvider - private final AmazonClientProvider amazonClientProvider - - private final AtomicReference lastException = new AtomicReference<>(null) - private final AtomicReference hasInitialized = new AtomicReference<>(null) - - private final AtomicLong errors; - - @Autowired - AmazonHealthIndicator(AccountCredentialsProvider accountCredentialsProvider, - AmazonClientProvider amazonClientProvider, - Registry registry) { - this.accountCredentialsProvider = accountCredentialsProvider - this.amazonClientProvider = amazonClientProvider - - this.errors = registry.gauge("health.amazon.errors", new AtomicLong(0)) - } - - @Override - Health health() { - if (hasInitialized.get() == Boolean.TRUE) { - // avoid being marked unhealthy once connectivity to all accounts has been verified at least once - return new Health.Builder().up().build() - } - - def ex = lastException.get() - if (ex) { - throw ex - } - - return new Health.Builder().unknown().build() - } - - @Scheduled(fixedDelay = 120000L) - void checkHealth() { - try { - Set amazonCredentials = accountCredentialsProvider.all.findAll { - it instanceof NetflixAmazonCredentials - } as Set - for (NetflixAmazonCredentials credentials in amazonCredentials) { - try { - def ec2 = amazonClientProvider.getAmazonEC2(credentials, AmazonClientProvider.DEFAULT_REGION, true) - if (!ec2) { - throw new AmazonClientException("Could not create Amazon client for ${credentials.name}") - } - ec2.describeAccountAttributes() - } catch (AmazonServiceException e) { - throw new AmazonUnreachableException("Failed to describe account attributes for '${credentials.name}'", e) - } - } - hasInitialized.set(Boolean.TRUE) - lastException.set(null) - errors.set(0) - } catch (Exception ex) { - LOG.error "Unhealthy", ex - lastException.set(ex) - errors.set(1) - } - } - - @ResponseStatus(value = HttpStatus.SERVICE_UNAVAILABLE, reason = 'Could not reach Amazon.') - @InheritConstructors - static class AmazonUnreachableException extends RuntimeException {} -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/ARN.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/ARN.java deleted file mode 100644 index ac9c85a3524..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/ARN.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.aws.lifecycle; - -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; - -import java.util.Collection; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -class ARN { - - static final Pattern PATTERN = Pattern.compile("arn:aws(?:-cn|-us-gov)?:.*:(.*):(\\d+):(.*)"); - - String arn; - String region; - String name; - - NetflixAmazonCredentials account; - - ARN(Collection accountCredentials, String arn) { - this.arn = arn; - - Matcher sqsMatcher = PATTERN.matcher(arn); - if (!sqsMatcher.matches()) { - throw new IllegalArgumentException(arn + " is not a valid SNS or SQS ARN"); - } - - this.region = sqsMatcher.group(1); - this.name = sqsMatcher.group(3); - - String accountId = sqsMatcher.group(2); - this.account = (NetflixAmazonCredentials) accountCredentials.stream() - .filter(c -> accountId.equals(c.getAccountId())) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("No account credentials found for " + accountId)); - - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationConfigurationProperties.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationConfigurationProperties.groovy index 4edb620516a..891d054d854 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationConfigurationProperties.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationConfigurationProperties.groovy @@ -18,7 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.lifecycle import org.springframework.boot.context.properties.ConfigurationProperties -@ConfigurationProperties("aws.lifecycleSubscribers.instanceTermination") +@ConfigurationProperties("aws.lifecycle-subscribers.instance-termination") class InstanceTerminationConfigurationProperties { String accountName String queueARN diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorker.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorker.java deleted file mode 100644 index cb957692d1b..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorker.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.aws.lifecycle; - -import com.amazonaws.auth.policy.*; -import com.amazonaws.auth.policy.Statement.Effect; -import com.amazonaws.auth.policy.actions.SNSActions; -import com.amazonaws.auth.policy.actions.SQSActions; -import com.amazonaws.services.sns.AmazonSNS; -import com.amazonaws.services.sns.model.SetTopicAttributesRequest; -import com.amazonaws.services.sqs.AmazonSQS; -import com.amazonaws.services.sqs.model.Message; -import com.amazonaws.services.sqs.model.ReceiptHandleIsInvalidException; -import com.amazonaws.services.sqs.model.ReceiveMessageRequest; -import com.amazonaws.services.sqs.model.ReceiveMessageResult; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.frigga.Names; -import com.netflix.spectator.api.Id; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.discovery.AwsEurekaSupport; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials.LifecycleHook; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.eureka.api.Eureka; -import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport.DiscoveryStatus; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.http.HttpStatus; -import retrofit.RetrofitError; - -import javax.inject.Provider; -import java.io.IOException; -import java.time.Duration; -import java.util.*; -import java.util.stream.Collectors; - -public class InstanceTerminationLifecycleWorker implements Runnable { - - private static final Logger log = LoggerFactory.getLogger(InstanceTerminationLifecycleWorker.class); - - private static final int AWS_MAX_NUMBER_OF_MESSAGES = 10; - private static final String SUPPORTED_LIFECYCLE_TRANSITION = "autoscaling:EC2_INSTANCE_TERMINATING"; - - ObjectMapper objectMapper; - AmazonClientProvider amazonClientProvider; - AccountCredentialsProvider accountCredentialsProvider; - InstanceTerminationConfigurationProperties properties; - Provider discoverySupport; - Registry registry; - - private final ARN queueARN; - private final ARN topicARN; - - private String queueId = null; - - public InstanceTerminationLifecycleWorker(ObjectMapper objectMapper, - AmazonClientProvider amazonClientProvider, - AccountCredentialsProvider accountCredentialsProvider, - InstanceTerminationConfigurationProperties properties, - Provider discoverySupport, - Registry registry) { - this.objectMapper = objectMapper; - this.amazonClientProvider = amazonClientProvider; - this.accountCredentialsProvider = accountCredentialsProvider; - this.properties = properties; - this.discoverySupport = discoverySupport; - this.registry = registry; - - Set accountCredentials = accountCredentialsProvider.getAll(); - this.queueARN = new ARN(accountCredentials, properties.getQueueARN()); - this.topicARN = new ARN(accountCredentials, properties.getTopicARN()); - } - - public String getWorkerName() { - return queueARN.account.getName() + "/" + queueARN.region + "/" + InstanceTerminationLifecycleWorker.class.getSimpleName(); - } - - @Override - public void run() { - log.info("Starting " + getWorkerName()); - - while (true) { - try { - listenForMessages(); - } catch (Throwable e) { - log.error("Unexpected error running " + getWorkerName() + ", restarting", e); - } - } - } - - private void listenForMessages() { - AmazonSQS amazonSQS = amazonClientProvider.getAmazonSQS(queueARN.account, queueARN.region); - AmazonSNS amazonSNS = amazonClientProvider.getAmazonSNS(topicARN.account, topicARN.region); - - Set accountCredentials = accountCredentialsProvider.getAll(); - List allAccountIds = getAllAccountIds(accountCredentials); - - this.queueId = ensureQueueExists( - amazonSQS, queueARN, topicARN, getSourceRoleArns(accountCredentials), properties.getSqsMessageRetentionPeriodSeconds() - ); - ensureTopicExists(amazonSNS, topicARN, allAccountIds, queueARN); - - while (true) { - ReceiveMessageResult receiveMessageResult = amazonSQS.receiveMessage( - new ReceiveMessageRequest(queueId) - .withMaxNumberOfMessages(AWS_MAX_NUMBER_OF_MESSAGES) - .withVisibilityTimeout(properties.getVisibilityTimeout()) - .withWaitTimeSeconds(properties.getWaitTimeSeconds()) - ); - - if (receiveMessageResult.getMessages().isEmpty()) { - // No messages - continue; - } - - receiveMessageResult.getMessages().forEach(message -> { - LifecycleMessage lifecycleMessage = unmarshalLifecycleMessage(message.getBody()); - - if (lifecycleMessage != null) { - if (!SUPPORTED_LIFECYCLE_TRANSITION.equalsIgnoreCase(lifecycleMessage.lifecycleTransition)) { - log.info("Ignoring unsupported lifecycle transition: " + lifecycleMessage.lifecycleTransition); - deleteMessage(amazonSQS, queueId, message); - return; - } - handleMessage(lifecycleMessage); - } - - deleteMessage(amazonSQS, queueId, message); - registry.counter(getProcessedMetricId(queueARN.region)).increment(); - }); - } - } - - private LifecycleMessage unmarshalLifecycleMessage(String messageBody) { - String body = messageBody; - try { - NotificationMessageWrapper wrapper = objectMapper.readValue(messageBody, NotificationMessageWrapper.class); - if (wrapper != null && wrapper.message != null) { - body = wrapper.message; - } - } catch (IOException e) { - // Try to unwrap a notification message; if that doesn't work, - // assume that we're dealing with a message directly from SQS. - log.debug("Unable unmarshal NotificationMessageWrapper. Assuming SQS message. (body: {})", messageBody, e); - } - - LifecycleMessage lifecycleMessage = null; - try { - lifecycleMessage = objectMapper.readValue(body, LifecycleMessage.class); - } catch (IOException e) { - log.error("Unable to unmarshal LifecycleMessage (body: {})", body, e); - } - - return lifecycleMessage; - } - - private void handleMessage(LifecycleMessage message) { - NetflixAmazonCredentials credentials = getAccountCredentialsById(message.accountId); - if (credentials == null) { - log.error("Unable to find credentials for account id: {}", message.accountId); - return; - } - - Names names = Names.parseName(message.autoScalingGroupName); - Eureka eureka = discoverySupport.get().getEureka(credentials, queueARN.region); - - if (!updateInstanceStatus(eureka, names.getApp(), message.ec2InstanceId)) { - registry.counter(getFailedMetricId(queueARN.region)).increment(); - } - recordLag(message.time, queueARN.region, message.accountId, message.autoScalingGroupName, message.ec2InstanceId); - } - - private boolean updateInstanceStatus(Eureka eureka, String app, String instanceId) { - int retry = 0; - while (retry < properties.getEurekaUpdateStatusRetryMax()) { - retry++; - try { - eureka.updateInstanceStatus(app, instanceId, DiscoveryStatus.Disable.getValue()); - return true; - } catch (RetrofitError e) { - final String recoverableMessage = "Failed marking app out of service (status: {}, app: {}, instance: {}, retry: {})"; - if (HttpStatus.NOT_FOUND.value() == e.getResponse().getStatus()) { - log.warn(recoverableMessage, e.getResponse().getStatus(), app, instanceId, retry); - } else if (e.getKind() == RetrofitError.Kind.NETWORK) { - log.error(recoverableMessage, e.getResponse().getStatus(), app, instanceId, retry, e); - } else { - log.error("Irrecoverable error while marking app out of service (app: {}, instance: {}, retry: {})", app, instanceId, retry, e); - break; - } - } - } - return false; - } - - private static void deleteMessage(AmazonSQS amazonSQS, String queueUrl, Message message) { - try { - amazonSQS.deleteMessage(queueUrl, message.getReceiptHandle()); - } catch (ReceiptHandleIsInvalidException e) { - log.warn("Error deleting lifecycle message, reason: {} (receiptHandle: {})", e.getMessage(), message.getReceiptHandle()); - } - } - - private NetflixAmazonCredentials getAccountCredentialsById(String accountId) { - for (AccountCredentials credentials : accountCredentialsProvider.getAll()) { - if (credentials.getAccountId() != null && credentials.getAccountId().equals(accountId)) { - return (NetflixAmazonCredentials) credentials; - } - } - return null; - } - - private static String ensureTopicExists(AmazonSNS amazonSNS, - ARN topicARN, - List allAccountIds, - ARN queueARN) { - topicARN.arn = amazonSNS.createTopic(topicARN.name).getTopicArn(); - - amazonSNS.setTopicAttributes( - new SetTopicAttributesRequest() - .withTopicArn(topicARN.arn) - .withAttributeName("Policy") - .withAttributeValue(buildSNSPolicy(topicARN, allAccountIds).toJson()) - ); - - amazonSNS.subscribe(topicARN.arn, "sqs", queueARN.arn); - - return topicARN.arn; - } - - private static Policy buildSNSPolicy(ARN topicARN, List allAccountIds) { - Statement statement = new Statement(Statement.Effect.Allow).withActions(SNSActions.Publish); - statement.setPrincipals(allAccountIds.stream().map(Principal::new).collect(Collectors.toList())); - statement.setResources(Collections.singletonList(new Resource(topicARN.arn))); - - return new Policy("allow-remote-account-send", Collections.singletonList(statement)); - } - - private static String ensureQueueExists(AmazonSQS amazonSQS, - ARN queueARN, - ARN topicARN, - Set terminatingRoleArns, - int sqsMessageRetentionPeriodSeconds) { - String queueUrl = amazonSQS.createQueue(queueARN.name).getQueueUrl(); - - HashMap attributes = new HashMap<>(); - attributes.put("Policy", buildSQSPolicy(queueARN, topicARN, terminatingRoleArns).toJson()); - attributes.put("MessageRetentionPeriod", Integer.toString(sqsMessageRetentionPeriodSeconds)); - amazonSQS.setQueueAttributes( - queueUrl, - attributes - ); - - return queueUrl; - } - - /** - * This policy allows operators to choose whether or not to have lifecycle hooks to be sent via SNS for fanout, or - * be sent directly to an SQS queue from the autoscaling group. - */ - private static Policy buildSQSPolicy(ARN queue, ARN topic, Set terminatingRoleArns) { - Statement snsStatement = new Statement(Effect.Allow).withActions(SQSActions.SendMessage); - snsStatement.setPrincipals(Principal.All); - snsStatement.setResources(Collections.singletonList(new Resource(queue.arn))); - snsStatement.setConditions(Collections.singletonList( - new Condition().withType("ArnEquals").withConditionKey("aws:SourceArn").withValues(topic.arn) - )); - - Statement sqsStatement = new Statement(Effect.Allow).withActions(SQSActions.SendMessage, SQSActions.GetQueueUrl); - sqsStatement.setPrincipals(terminatingRoleArns.stream().map(Principal::new).collect(Collectors.toList())); - sqsStatement.setResources(Collections.singletonList(new Resource(queue.arn))); - - return new Policy("allow-sns-or-sqs-send", Arrays.asList(snsStatement, sqsStatement)); - } - - Id getLagMetricId(String region) { - return registry.createId("terminationLifecycle.lag", "region", region); - } - - void recordLag(Date start, String region, String account, String serverGroup, String instanceId) { - if (start != null) { - Long lag = registry.clock().wallTime() - start.getTime(); - log.info("Lifecycle message processed (account: {}, serverGroup: {}, instance: {}, lagSeconds: {})", account, serverGroup, instanceId, Duration.ofMillis(lag).getSeconds()); - registry.gauge(getLagMetricId(region), lag); - } - } - - Id getProcessedMetricId(String region) { - return registry.createId("terminationLifecycle.totalProcessed", "region", region); - } - - Id getFailedMetricId(String region) { - return registry.createId("terminationLifecycle.totalFailed", "region", region); - } - - private static List getAllAccountIds(Set accountCredentials) { - return accountCredentials - .stream() - .map(AccountCredentials::getAccountId) - .filter(a -> a != null) - .collect(Collectors.toList()); - } - - private static Set getSourceRoleArns(Set allCredentials) { - Set sourceRoleArns = new HashSet<>(); - for (T credentials : allCredentials) { - if (credentials instanceof NetflixAmazonCredentials) { - NetflixAmazonCredentials c = (NetflixAmazonCredentials) credentials; - if (c.getLifecycleHooks() != null) { - sourceRoleArns.addAll(c.getLifecycleHooks() - .stream() - .filter(h -> "autoscaling:EC2_INSTANCE_TERMINATING".equals(h.getLifecycleTransition())) - .map(LifecycleHook::getRoleARN) - .collect(Collectors.toSet())); - } - } - } - return sourceRoleArns; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerProvider.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerProvider.java deleted file mode 100644 index 13a111c3a63..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerProvider.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.aws.lifecycle; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.discovery.AwsEurekaSupport; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; -import org.springframework.stereotype.Component; - -import javax.annotation.PostConstruct; -import javax.inject.Provider; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.regex.Pattern; - -@Component -@ConditionalOnExpression("${aws.lifecycleSubscribers.instanceTermination.enabled:false} && ${caching.writeEnabled:true}") -public class InstanceTerminationLifecycleWorkerProvider { - private final static String REGION_TEMPLATE_PATTERN = Pattern.quote("{{region}}"); - private final static String ACCOUNT_ID_TEMPLATE_PATTERN = Pattern.quote("{{accountId}}"); - - private static final Logger log = LoggerFactory.getLogger(InstanceTerminationLifecycleWorkerProvider.class); - - private final ObjectMapper objectMapper; - private final AmazonClientProvider amazonClientProvider; - private final AccountCredentialsProvider accountCredentialsProvider; - private final InstanceTerminationConfigurationProperties properties; - private final Provider discoverySupport; - private final Registry registry; - - @Autowired - InstanceTerminationLifecycleWorkerProvider(ObjectMapper objectMapper, - AmazonClientProvider amazonClientProvider, - AccountCredentialsProvider accountCredentialsProvider, - InstanceTerminationConfigurationProperties properties, - Provider discoverySupport, - Registry registry) { - this.objectMapper = objectMapper; - this.amazonClientProvider = amazonClientProvider; - this.accountCredentialsProvider = accountCredentialsProvider; - this.properties = properties; - this.discoverySupport = discoverySupport; - this.registry = registry; - } - - @PostConstruct - public void start() { - NetflixAmazonCredentials credentials = (NetflixAmazonCredentials) accountCredentialsProvider.getCredentials( - properties.getAccountName() - ); - ExecutorService executorService = Executors.newFixedThreadPool(credentials.getRegions().size()); - - credentials.getRegions().forEach(region -> { - InstanceTerminationLifecycleWorker worker = new InstanceTerminationLifecycleWorker( - objectMapper, - amazonClientProvider, - accountCredentialsProvider, - new InstanceTerminationConfigurationProperties( - properties.getAccountName(), - properties - .getQueueARN() - .replaceAll(REGION_TEMPLATE_PATTERN, region.getName()) - .replaceAll(ACCOUNT_ID_TEMPLATE_PATTERN, credentials.getAccountId()), - properties.getTopicARN() - .replaceAll(REGION_TEMPLATE_PATTERN, region.getName()) - .replaceAll(ACCOUNT_ID_TEMPLATE_PATTERN, credentials.getAccountId()), - properties.getVisibilityTimeout(), - properties.getWaitTimeSeconds(), - properties.getSqsMessageRetentionPeriodSeconds(), - properties.getEurekaUpdateStatusRetryMax() - ), - discoverySupport, - registry - ); - try { - executorService.submit(worker); - } catch (RejectedExecutionException e) { - log.error("Could not start " + worker.getWorkerName(), e); - } - }); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureConfigurationProperties.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureConfigurationProperties.groovy index 9d883741d0b..253f818439a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureConfigurationProperties.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureConfigurationProperties.groovy @@ -18,7 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.lifecycle import org.springframework.boot.context.properties.ConfigurationProperties -@ConfigurationProperties("aws.lifecycleSubscribers.launchFailure") +@ConfigurationProperties("aws.lifecycle-subscribers.launch-failure") class LaunchFailureConfigurationProperties { String accountName String topicARN diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgent.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgent.java deleted file mode 100644 index 118979a84f3..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgent.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.lifecycle; - -import com.amazonaws.auth.policy.Condition; -import com.amazonaws.auth.policy.Policy; -import com.amazonaws.auth.policy.Principal; -import com.amazonaws.auth.policy.Resource; -import com.amazonaws.auth.policy.Statement; -import com.amazonaws.auth.policy.actions.SNSActions; -import com.amazonaws.auth.policy.actions.SQSActions; -import com.amazonaws.services.sns.AmazonSNS; -import com.amazonaws.services.sns.model.SetTopicAttributesRequest; -import com.amazonaws.services.sqs.AmazonSQS; -import com.amazonaws.services.sqs.model.Message; -import com.amazonaws.services.sqs.model.ReceiptHandleIsInvalidException; -import com.amazonaws.services.sqs.model.ReceiveMessageRequest; -import com.amazonaws.services.sqs.model.ReceiveMessageResult; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.cats.agent.RunnableAgent; -import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; -import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.tags.EntityTagger; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.regex.Matcher; -import java.util.stream.Collectors; - -/** - * An Agent that subscribes to a particular SQS queue and tags any server groups that had launch errors. - */ -class LaunchFailureNotificationAgent implements RunnableAgent, CustomScheduledAgent { - private static final Logger log = LoggerFactory.getLogger(LaunchFailureNotificationAgent.class); - - private static final String SUPPORTED_LIFECYCLE_TRANSITION = "autoscaling:EC2_INSTANCE_LAUNCH_ERROR"; - private static final int AWS_MAX_NUMBER_OF_MESSAGES = 10; - - private final ObjectMapper objectMapper; - private final AmazonClientProvider amazonClientProvider; - private final AccountCredentialsProvider accountCredentialsProvider; - private final LaunchFailureConfigurationProperties properties; - private final EntityTagger serverGroupTagger; - - private final ARN topicARN; - private final ARN queueARN; - - private String topicId = null; // the ARN for the topic - private String queueId = null; // the URL for the queue - - LaunchFailureNotificationAgent(ObjectMapper objectMapper, - AmazonClientProvider amazonClientProvider, - AccountCredentialsProvider accountCredentialsProvider, - LaunchFailureConfigurationProperties properties, - EntityTagger serverGroupTagger) { - this.objectMapper = objectMapper; - this.amazonClientProvider = amazonClientProvider; - this.accountCredentialsProvider = accountCredentialsProvider; - this.properties = properties; - this.serverGroupTagger = serverGroupTagger; - - Set accountCredentials = accountCredentialsProvider.getAll(); - this.topicARN = new ARN(accountCredentials, properties.getTopicARN()); - this.queueARN = new ARN(accountCredentials, properties.getQueueARN()); - } - - @Override - public String getAgentType() { - return queueARN.account.getName() + "/" + queueARN.region + "/" + LaunchFailureNotificationAgent.class.getSimpleName(); - } - - @Override - public String getProviderName() { - return AwsProvider.PROVIDER_NAME; - } - - @Override - public long getPollIntervalMillis() { - return TimeUnit.MINUTES.toMillis(1); - } - - @Override - public long getTimeoutMillis() { - return -1; - } - - @Override - public void run() { - List allAccountIds = accountCredentialsProvider.getAll().stream() - .filter(c -> c instanceof NetflixAmazonCredentials) - .map(AccountCredentials::getAccountId) - .collect(Collectors.toList()); - - AmazonSQS amazonSQS = amazonClientProvider.getAmazonSQS(queueARN.account, queueARN.region); - this.queueId = ensureQueueExists(amazonSQS, queueARN, topicARN); - - AmazonSNS amazonSNS = amazonClientProvider.getAmazonSNS(topicARN.account, topicARN.region); - this.topicId = ensureTopicExists(amazonSNS, topicARN, allAccountIds, queueARN); - - AtomicInteger messagesProcessed = new AtomicInteger(0); - while (messagesProcessed.get() < properties.getMaxMessagesPerCycle()) { - ReceiveMessageResult receiveMessageResult = amazonSQS.receiveMessage( - new ReceiveMessageRequest(queueId) - .withMaxNumberOfMessages(AWS_MAX_NUMBER_OF_MESSAGES) - .withVisibilityTimeout(properties.getVisibilityTimeout()) - .withWaitTimeSeconds(properties.getWaitTimeSeconds()) - ); - - receiveMessageResult.getMessages().forEach(message -> { - try { - NotificationMessageWrapper notificationMessageWrapper = objectMapper.readValue( - message.getBody(), NotificationMessageWrapper.class - ); - - NotificationMessage notificationMessage = objectMapper.readValue( - notificationMessageWrapper.message, NotificationMessage.class - ); - - if (SUPPORTED_LIFECYCLE_TRANSITION.equalsIgnoreCase(notificationMessage.event)) { - handleMessage(serverGroupTagger, notificationMessage); - } - } catch (IOException e) { - log.error("Unable to convert NotificationMessage (body: {})", message.getBody(), e); - } - - deleteMessage(amazonSQS, queueId, message); - messagesProcessed.incrementAndGet(); - }); - - if (receiveMessageResult.getMessages().isEmpty()) { - // no messages received, stop polling. - break; - } - } - - log.info("Processed {} messages (queueARN: {})", messagesProcessed.get(), queueARN.arn); - } - - private static void handleMessage(EntityTagger serverGroupTagger, NotificationMessage notificationMessage) { - log.info( - "Failed to launch instance (asgName: {}, reason: {})", - notificationMessage.autoScalingGroupName, - notificationMessage.statusMessage - ); - - Matcher sqsMatcher = ARN.PATTERN.matcher(notificationMessage.autoScalingGroupARN); - if (!sqsMatcher.matches()) { - throw new IllegalArgumentException(notificationMessage.autoScalingGroupARN + " is not a valid ARN"); - } - - String region = sqsMatcher.group(1); - String accountId = sqsMatcher.group(2); - - serverGroupTagger.alert( - AmazonCloudProvider.ID, - accountId, - region, - null, // no category - EntityTagger.ENTITY_TYPE_SERVER_GROUP, - notificationMessage.autoScalingGroupName, - notificationMessage.event, - notificationMessage.statusMessage, - null // no last modified timestamp - ); - } - - /** - * Ensure that the topic exists and has a policy granting all accounts permission to publish messages to it - */ - private static String ensureTopicExists(AmazonSNS amazonSNS, - ARN topicARN, - List allAccountIds, - ARN queueARN) { - topicARN.arn = amazonSNS.createTopic(topicARN.name).getTopicArn(); - - amazonSNS.setTopicAttributes( - new SetTopicAttributesRequest() - .withTopicArn(topicARN.arn) - .withAttributeName("Policy") - .withAttributeValue(buildSNSPolicy(topicARN, allAccountIds).toJson()) - ); - - amazonSNS.subscribe(topicARN.arn, "sqs", queueARN.arn); - - return topicARN.arn; - } - - /** - * Ensure that the queue exists and has a policy granting the source topic permission to send messages to it - */ - private static String ensureQueueExists(AmazonSQS amazonSQS, ARN queueARN, ARN topicARN) { - String queueUrl; - - try { - queueUrl = amazonSQS.getQueueUrl(queueARN.name).getQueueUrl(); - } catch (Exception e) { - queueUrl = amazonSQS.createQueue(queueARN.name).getQueueUrl(); - } - - amazonSQS.setQueueAttributes( - queueUrl, Collections.singletonMap("Policy", buildSQSPolicy(queueARN, topicARN).toJson()) - ); - - return queueUrl; - } - - private static Policy buildSNSPolicy(ARN topicARN, List allAccountIds) { - Statement statement = new Statement(Statement.Effect.Allow).withActions(SNSActions.Publish); - statement.setPrincipals(allAccountIds.stream().map(Principal::new).collect(Collectors.toList())); - statement.setResources(Collections.singletonList(new Resource(topicARN.arn))); - - return new Policy("allow-remote-account-send", Collections.singletonList(statement)); - } - - private static Policy buildSQSPolicy(ARN queue, ARN topic) { - Statement statement = new Statement(Statement.Effect.Allow).withActions(SQSActions.SendMessage); - statement.setPrincipals(Principal.All); - statement.setResources(Collections.singletonList(new Resource(queue.arn))); - statement.setConditions(Collections.singletonList( - new Condition().withType("ArnEquals").withConditionKey("aws:SourceArn").withValues(topic.arn) - )); - - return new Policy("allow-sns-topic-send", Collections.singletonList(statement)); - } - - private static void deleteMessage(AmazonSQS amazonSQS, String queueUrl, Message message) { - try { - amazonSQS.deleteMessage(queueUrl, message.getReceiptHandle()); - } catch (ReceiptHandleIsInvalidException e) { - log.warn("Error deleting lifecycle message, reason: {} (receiptHandle: {})", e.getMessage(), message.getReceiptHandle()); - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProvider.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProvider.java deleted file mode 100644 index 29c2d859d26..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProvider.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.lifecycle; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.agent.AgentProvider; -import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.tags.EntityTagger; - -import java.util.Collection; -import java.util.List; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -public class LaunchFailureNotificationAgentProvider implements AgentProvider { - private final static String REGION_TEMPLATE_PATTERN = Pattern.quote("{{region}}"); - private final static String ACCOUNT_ID_TEMPLATE_PATTERN = Pattern.quote("{{accountId}}"); - - private final ObjectMapper objectMapper; - private final AmazonClientProvider amazonClientProvider; - private final AccountCredentialsProvider accountCredentialsProvider; - private final LaunchFailureConfigurationProperties properties; - private final EntityTagger entityTagger; - - LaunchFailureNotificationAgentProvider(ObjectMapper objectMapper, - AmazonClientProvider amazonClientProvider, - AccountCredentialsProvider accountCredentialsProvider, - LaunchFailureConfigurationProperties properties, - EntityTagger entityTagger) { - this.objectMapper = objectMapper; - this.amazonClientProvider = amazonClientProvider; - this.accountCredentialsProvider = accountCredentialsProvider; - this.properties = properties; - this.entityTagger = entityTagger; - } - - @Override - public boolean supports(String providerName) { - return providerName.equalsIgnoreCase(AwsProvider.PROVIDER_NAME); - } - - @Override - public Collection agents() { - NetflixAmazonCredentials credentials = (NetflixAmazonCredentials) accountCredentialsProvider.getCredentials( - properties.getAccountName() - ); - - // an agent for each region in the specified account - List agents = credentials.getRegions().stream() - .map(region -> new LaunchFailureNotificationAgent( - objectMapper, - amazonClientProvider, - accountCredentialsProvider, - new LaunchFailureConfigurationProperties( - properties.getAccountName(), - properties - .getTopicARN() - .replaceAll(REGION_TEMPLATE_PATTERN, region.getName()) - .replaceAll(ACCOUNT_ID_TEMPLATE_PATTERN, credentials.getAccountId()), - properties - .getQueueARN() - .replaceAll(REGION_TEMPLATE_PATTERN, region.getName()) - .replaceAll(ACCOUNT_ID_TEMPLATE_PATTERN, credentials.getAccountId()), - properties.getMaxMessagesPerCycle(), - properties.getVisibilityTimeout(), - properties.getWaitTimeSeconds() - ), - entityTagger - )) - .collect(Collectors.toList()); - - // an agent that will cleanup stale notifications across all accounts + region - agents.add(new LaunchFailureNotificationCleanupAgent( - amazonClientProvider, accountCredentialsProvider, entityTagger - )); - - return agents; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgent.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgent.java deleted file mode 100644 index 4da68d66d89..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgent.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.lifecycle; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.autoscaling.AmazonAutoScaling; -import com.amazonaws.services.autoscaling.model.Activity; -import com.amazonaws.services.autoscaling.model.DescribeScalingActivitiesRequest; -import com.amazonaws.services.autoscaling.model.DescribeScalingActivitiesResult; -import com.amazonaws.services.autoscaling.model.ScalingActivityStatusCode; -import com.netflix.spinnaker.cats.agent.RunnableAgent; -import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; -import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; -import com.netflix.spinnaker.clouddriver.model.EntityTags; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.tags.EntityTagger; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.UndeclaredThrowableException; -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.TimeUnit; - -public class LaunchFailureNotificationCleanupAgent implements RunnableAgent, CustomScheduledAgent { - private static final Logger log = LoggerFactory.getLogger(LaunchFailureNotificationAgent.class); - - private static final String TAG_NAME = "spinnaker_ui_alert:autoscaling:ec2_instance_launch_error"; - private static final int MAX_RESULTS = 10000; - - private final AmazonClientProvider amazonClientProvider; - private final AccountCredentialsProvider accountCredentialsProvider; - private final EntityTagger serverGroupTagger; - - LaunchFailureNotificationCleanupAgent(AmazonClientProvider amazonClientProvider, - AccountCredentialsProvider accountCredentialsProvider, - EntityTagger serverGroupTagger) { - this.amazonClientProvider = amazonClientProvider; - this.accountCredentialsProvider = accountCredentialsProvider; - this.serverGroupTagger = serverGroupTagger; - } - - @Override - public String getAgentType() { - return LaunchFailureNotificationCleanupAgent.class.getSimpleName(); - } - - @Override - public String getProviderName() { - return AwsProvider.PROVIDER_NAME; - } - - @Override - public long getPollIntervalMillis() { - return TimeUnit.MINUTES.toMillis(5); - } - - @Override - public long getTimeoutMillis() { - return -1; - } - - @Override - public void run() { - Collection taggedEntities = serverGroupTagger.taggedEntities( - AmazonCloudProvider.ID, - null, // all accounts - EntityTagger.ENTITY_TYPE_SERVER_GROUP, - TAG_NAME, - MAX_RESULTS - ); - - taggedEntities.forEach(entityTags -> { - EntityTags.EntityRef entityRef = entityTags.getEntityRef(); - Optional credentials = Optional.ofNullable( - accountCredentialsProvider.getCredentials(entityRef.getAccount())) - .filter((c) -> c instanceof NetflixAmazonCredentials) - .map(NetflixAmazonCredentials.class::cast); - - if (!credentials.isPresent()) { - log.warn("No account configuration for {}. Unable to determine if '{}' has launch failures", entityRef.getAccount(), entityTags.getId()); - return; - } - - AmazonAutoScaling amazonAutoScaling = amazonClientProvider.getAutoScaling( - credentials.get(), - entityRef.getRegion() - ); - - try { - if (hasLaunchFailures(amazonAutoScaling, entityTags)) { - return; - } - - serverGroupTagger.delete( - AmazonCloudProvider.ID, - entityRef.getAccountId(), - entityRef.getRegion(), - EntityTagger.ENTITY_TYPE_SERVER_GROUP, - entityRef.getEntityId(), - TAG_NAME - ); - } catch (Exception e) { - log.error("Unable to determine if '{}' has launch failures", entityTags.getId(), e); - } - }); - } - - /** - * Fetch scaling activities and determine if the most recent activity was successful. - * - * A successful scaling activity is sufficient to indicate that a server group is no longer having launch failures. - */ - protected boolean hasLaunchFailures(AmazonAutoScaling amazonAutoScaling, EntityTags entityTags) { - EntityTags.EntityRef entityRef = entityTags.getEntityRef(); - - try { - DescribeScalingActivitiesResult describeScalingActivitiesResult = amazonAutoScaling.describeScalingActivities( - new DescribeScalingActivitiesRequest().withAutoScalingGroupName(entityRef.getEntityId()) - ); - - List activities = describeScalingActivitiesResult.getActivities(); - return !activities.isEmpty() && !activities.get(0).getStatusCode().equals(ScalingActivityStatusCode.Successful.toString()); - } catch (Exception e) { - AmazonServiceException amazonServiceException = amazonServiceException(e); - if (amazonServiceException != null) { - if (amazonServiceException.getErrorMessage().toLowerCase().contains("name not found")) { - return false; - } - } - - throw e; - } - } - - private static AmazonServiceException amazonServiceException(Exception e) { - if (e instanceof AmazonServiceException) { - return (AmazonServiceException) e; - } - - if (!(e instanceof UndeclaredThrowableException)) { - return null; - } - - UndeclaredThrowableException ute = (UndeclaredThrowableException) e; - - if (!(ute.getUndeclaredThrowable() instanceof InvocationTargetException)) { - return null; - } - - InvocationTargetException ite = (InvocationTargetException) ute.getUndeclaredThrowable(); - if (!(ite.getTargetException() instanceof AmazonServiceException)) { - return null; - } - - return (AmazonServiceException) ite.getTargetException(); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleSubscriberConfiguration.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleSubscriberConfiguration.java deleted file mode 100644 index 0d06674c672..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleSubscriberConfiguration.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.lifecycle; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.tags.EntityTagger; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -@Configuration -@EnableConfigurationProperties({LaunchFailureConfigurationProperties.class, InstanceTerminationConfigurationProperties.class}) -class LifecycleSubscriberConfiguration { - - @Bean - @ConditionalOnProperty("aws.lifecycleSubscribers.launchFailure.enabled") - LaunchFailureNotificationAgentProvider launchFailureNotificationAgentProvider(ObjectMapper objectMapper, - AmazonClientProvider amazonClientProvider, - AccountCredentialsProvider accountCredentialsProvider, - LaunchFailureConfigurationProperties properties, - EntityTagger entityTagger) { - return new LaunchFailureNotificationAgentProvider( - objectMapper, amazonClientProvider, accountCredentialsProvider, properties, entityTagger - ); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonAsgLifecycleHook.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonAsgLifecycleHook.groovy index 3bb8236bcd5..6d71af14efe 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonAsgLifecycleHook.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonAsgLifecycleHook.groovy @@ -16,9 +16,11 @@ package com.netflix.spinnaker.clouddriver.aws.model +import groovy.transform.Canonical import groovy.transform.CompileStatic @CompileStatic +@Canonical class AmazonAsgLifecycleHook { String name diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonBlockDevice.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonBlockDevice.groovy index 363b6d1b6eb..46f08881d93 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonBlockDevice.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonBlockDevice.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.aws.model -import groovy.transform.Canonical +import groovy.transform.EqualsAndHashCode +import groovy.transform.ToString +import groovy.transform.TupleConstructor /** * Model for a block device mapping @@ -25,10 +27,10 @@ import groovy.transform.Canonical * (deviceName + virtualName) * or an EBS device: * (deviceName + size + (optionally) volumeType, deleteOnTermination, iops, snapshotId) - * - * */ -@Canonical +@ToString +@EqualsAndHashCode +@TupleConstructor(force = true) class AmazonBlockDevice { // Required for all: @@ -65,6 +67,11 @@ class AmazonBlockDevice { */ Integer iops + /** + * The throughput for the volume + */ + Integer throughput + /** * The snapshot id to mount as the EBS volume */ @@ -75,4 +82,88 @@ class AmazonBlockDevice { */ Boolean encrypted + /** + * The KMS key id to encrypt EBS + * This is available only for LaunchTemplate + */ + String kmsKeyId + + private AmazonBlockDevice(Builder builder) { + deviceName = builder.deviceName + virtualName = builder.virtualName + size = builder.size + volumeType = builder.volumeType + throughput = builder.throughput + deleteOnTermination = builder.deleteOnTermination + iops = builder.iops + snapshotId = builder.snapshotId + encrypted = builder.encrypted + kmsKeyId = builder.kmsKeyId + } + + static class Builder { + String deviceName + String virtualName + Integer size + String volumeType + Boolean deleteOnTermination + Integer iops + Integer throughput + String snapshotId + Boolean encrypted + String kmsKeyId + + Builder deviceName(String deviceName) { + this.deviceName = deviceName + return this + } + + Builder virtualName(String virtualName) { + this.virtualName = virtualName + return this + } + + Builder size(Integer size) { + this.size = size + return this + } + + Builder volumeType(String volumeType) { + this.volumeType = volumeType + return this + } + + Builder deleteOnTermination(Boolean deleteOnTermination) { + this.deleteOnTermination = deleteOnTermination + return this + } + + Builder iops(Integer iops) { + this.iops = iops + return this + } + + Builder throughput(Integer throughput) { + this.throughput = throughput + return this + } + Builder snapshotId(String snapshotId) { + this.snapshotId = snapshotId + return this + } + + Builder encrypted(Boolean encrypted) { + this.encrypted = encrypted + return this + } + + Builder kmsKeyId(String kmsKeyId) { + this.kmsKeyId = kmsKeyId + return this + } + + AmazonBlockDevice build() { + return new AmazonBlockDevice(this) + } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonCluster.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonCluster.groovy index e27b9ba8d0f..1f361223d03 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonCluster.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonCluster.groovy @@ -16,6 +16,9 @@ package com.netflix.spinnaker.clouddriver.aws.model +import com.fasterxml.jackson.annotation.JsonAnyGetter +import com.fasterxml.jackson.annotation.JsonAnySetter +import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.model.Cluster import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode @@ -29,4 +32,27 @@ class AmazonCluster implements Cluster, Serializable { Set serverGroups = Collections.synchronizedSet(new HashSet()) Set targetGroups = Collections.synchronizedSet(new HashSet()) Set loadBalancers = Collections.synchronizedSet(new HashSet()) + + @JsonIgnore + private Map extraAttributes = new LinkedHashMap() + + @JsonAnyGetter + @Override + Map getExtraAttributes() { + return extraAttributes + } + + /** + * Setter for non explicitly defined values. + * + * Used for both Jackson mapping {@code @JsonAnySetter} as well + * as Groovy's implicit Map constructor (this is the reason the + * method is named {@code set(String name, Object value)} + * @param name The property name + * @param value The property value + */ + @JsonAnySetter + void set(String name, Object value) { + extraAttributes.put(name, value) + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstance.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstance.groovy index b7f5cd0c872..0cde4850d15 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstance.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstance.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.model import com.fasterxml.jackson.annotation.JsonAnyGetter import com.fasterxml.jackson.annotation.JsonAnySetter +import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.model.Instance @@ -32,16 +33,26 @@ class AmazonInstance implements Instance, Serializable { final String providerType = AmazonCloudProvider.ID final String cloudProvider = AmazonCloudProvider.ID - private Map dynamicProperties = new HashMap() + @JsonIgnore + private Map extraAttributes = new LinkedHashMap() @JsonAnyGetter - public Map any() { - return dynamicProperties; + Map getExtraAttributes() { + return extraAttributes } + /** + * Setter for non explicitly defined values. + * + * Used for both Jackson mapping {@code @JsonAnySetter} as well + * as Groovy's implicit Map constructor (this is the reason the + * method is named {@code set(String name, Object value)} + * @param name The property name + * @param value The property value + */ @JsonAnySetter - public void set(String name, Object value) { - dynamicProperties.put(name, value); + void set(String name, Object value) { + extraAttributes.put(name, value) } @Override @@ -74,7 +85,11 @@ class AmazonInstance implements Instance, Serializable { @Override String getZone() { - any().get("placement")?.availabilityZone + getExtraAttributes().get("placement")?.availabilityZone + } + + String getAvailabilityZone() { + return this.getZone() } @Override diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstanceType.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstanceType.groovy index f63d5379549..8ebe2bd4446 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstanceType.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstanceType.groovy @@ -14,15 +14,54 @@ * limitations under the License. */ - - package com.netflix.spinnaker.clouddriver.aws.model import com.netflix.spinnaker.clouddriver.model.InstanceType import groovy.transform.Canonical @Canonical class AmazonInstanceType implements InstanceType { - String account - String region - String name + String account + String region + String name + Integer defaultVCpus + Long memoryInGiB + String hypervisor + AmazonInstanceStorageInfo instanceStorageInfo + AmazonInstanceEbsInfo ebsInfo + AmazonInstanceGpuInfo gpuInfo + + Boolean instanceStorageSupported + Boolean currentGeneration + Boolean bareMetal + Boolean ipv6Supported + Boolean burstablePerformanceSupported + + List supportedArchitectures + List supportedUsageClasses + List supportedRootDeviceTypes + List supportedVirtualizationTypes +} + +class AmazonInstanceStorageInfo { + String storageTypes + Long totalSizeInGB + String nvmeSupport +} + +class AmazonInstanceEbsInfo { + String ebsOptimizedSupport + String nvmeSupport + String encryptionSupport +} + +class AmazonInstanceGpuInfo { + Integer totalGpuMemoryInMiB + List gpus +} + +class AmazonInstanceGpuDeviceInfo { + String name + String manufacturer + Integer count + Integer gpuSizeInMiB } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonMetricDescriptor.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonMetricDescriptor.groovy index 25accbe25ba..e61a6b45bf6 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonMetricDescriptor.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonMetricDescriptor.groovy @@ -20,9 +20,7 @@ import com.amazonaws.services.cloudwatch.model.Dimension import com.amazonaws.services.cloudwatch.model.Metric import com.fasterxml.jackson.annotation.JsonInclude import com.netflix.spinnaker.clouddriver.model.CloudMetricDescriptor -import groovy.transform.Immutable -@Immutable @JsonInclude(JsonInclude.Include.NON_EMPTY) class AmazonMetricDescriptor implements CloudMetricDescriptor { final String cloudProvider = 'aws' @@ -30,6 +28,12 @@ class AmazonMetricDescriptor implements CloudMetricDescriptor { final String name final List dimensions + AmazonMetricDescriptor (String cloudProvider, String namespace, String name, List dimensions) { + this.cloudProvider = cloudProvider + this.namespace = namespace + this.name = name + this.dimensions = dimensions + } static AmazonMetricDescriptor from(Metric metric) { new AmazonMetricDescriptor('aws', metric.namespace, metric.metricName, metric.dimensions) } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonReservationReport.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonReservationReport.groovy index 23324d28016..6910b614047 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonReservationReport.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonReservationReport.groovy @@ -260,6 +260,7 @@ class AmazonReservationReport implements ReservationReport { */ static String normalizeInstanceType(String instanceType) { def instanceClassRankings = [ + 'metal' : 9, // highest priority 'xlarge': 6, 'large' : 5, 'medium': 4, diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonReservationReportBuilder.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonReservationReportBuilder.groovy index c84294fe8dc..b46aa9bc2ce 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonReservationReportBuilder.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonReservationReportBuilder.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.model +import com.netflix.spectator.api.Registry import com.netflix.spinnaker.clouddriver.aws.model.AmazonReservationReport.OverallReservationDetail import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3DataProvider import com.netflix.spinnaker.clouddriver.model.DataProvider @@ -44,19 +45,30 @@ interface AmazonReservationReportBuilder { "m3.large" : 0.5, "m4.large" : 0.5, "m5.large" : 0.5, + "m5.metal" : 24, + + "m5d.large": 0.5, + "m5d.metal": 24, "c1.medium": 0.25, "c3.large" : 0.5, "c4.large" : 0.5, "c5.large" : 0.5, + "c5.metal" : 24, "r3.large" : 0.5, "r4.large" : 0.5, + "r5.large" : 0.5, + "r5.metal" : 24, + + "r5d.metal": 24, - "i3.large" : 0.5 + "i3.large" : 0.5, + "i3.metal" : 16 ] - List aggregateRegionalReservations(List reservations) { + List aggregateRegionalReservations(Registry registry, + List reservations) { def regionalReservations = filterRegionalReservations(reservations) regionalReservations.groupBy { "${it.region}-${it.instanceFamily()}-${it.os}" }.collect { @@ -72,6 +84,8 @@ interface AmazonReservationReportBuilder { double multiplier = getMultiplier(o.instanceType) if (!multiplier) { log.warn("Unable to determine multiplier for instance type '${o.instanceType}'") + registry.counter("reservedInstances.unsupportedType").increment() + return } @@ -156,16 +170,16 @@ interface AmazonReservationReportBuilder { class V3 implements AmazonReservationReportBuilder { private final Support support = new Support() - AmazonReservationReport build(AmazonReservationReport source) { + AmazonReservationReport build(Registry registry, AmazonReservationReport source) { def reservations = source.reservations.sort( false, new AmazonReservationReport.DescendingOverallReservationDetailComparator() ) // aggregate all regional reservations for each instance family - def regionalReservations = support.aggregateRegionalReservations(reservations) + def regionalReservations = support.aggregateRegionalReservations(registry, reservations) // remove any regional reservations that have been fully utilized (ie. they've all been converted to xlarge) - reservations.removeAll { it.availabilityZone == "*" && it.totalSurplus() == 0 } + reservations.removeAll { it.availabilityZone == "*" } // add the aggregated regional reservations reservations.addAll(regionalReservations) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonSecurityGroup.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonSecurityGroup.groovy index b6a707f29eb..93c3d53f698 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonSecurityGroup.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonSecurityGroup.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.model +import com.amazonaws.services.ec2.model.Tag import com.fasterxml.jackson.annotation.JsonInclude import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.model.SecurityGroup @@ -39,6 +40,8 @@ class AmazonSecurityGroup implements SecurityGroup { final String region final Set inboundRules final Set outboundRules + final List tags + void setMoniker(Moniker _ignored) {} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonServerGroup.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonServerGroup.groovy index 5553a959fb4..f822aaf2e5a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonServerGroup.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonServerGroup.groovy @@ -16,12 +16,16 @@ package com.netflix.spinnaker.clouddriver.aws.model +import com.amazonaws.services.ec2.model.RequestLaunchTemplateData import com.fasterxml.jackson.annotation.JsonAnyGetter import com.fasterxml.jackson.annotation.JsonAnySetter +import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.documentation.Empty import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.model.Instance import com.netflix.spinnaker.clouddriver.model.ServerGroup +import com.netflix.spinnaker.kork.annotations.Alpha import groovy.transform.CompileStatic @CompileStatic @@ -33,7 +37,18 @@ class AmazonServerGroup implements ServerGroup, Serializable { Set instances Set health Map image + + /** + * An ASG can be configured with a launchConfig or launchTemplate or mixedInstancesPolicy. + */ Map launchConfig + Map launchTemplate + /** + * The fields marked @Alpha are subject to change in the future due to addition of newer ASG features like multiple launch templates. + */ + @Alpha + MixedInstancesPolicySettings mixedInstancesPolicy + Map asg List scalingPolicies List scheduledActions @@ -44,16 +59,27 @@ class AmazonServerGroup implements ServerGroup, Serializable { Set targetGroups - private Map dynamicProperties = new HashMap() + @JsonIgnore + private Map extraAttributes = new LinkedHashMap() + @Override @JsonAnyGetter - Map any() { - return dynamicProperties + Map getExtraAttributes() { + return extraAttributes } + /** + * Setter for non explicitly defined values. + * + * Used for both Jackson mapping {@code @JsonAnySetter} as well + * as Groovy's implicit Map constructor (this is the reason the + * method is named {@code set(String name, Object value)} + * @param name The property name + * @param value The property value + */ @JsonAnySetter void set(String name, Object value) { - dynamicProperties.put(name, value) + extraAttributes.put(name, value) } @Override @@ -99,7 +125,52 @@ class AmazonServerGroup implements ServerGroup, Serializable { if (launchConfig && launchConfig.containsKey("securityGroups")) { securityGroups = (Set) launchConfig.securityGroups } - securityGroups + + RequestLaunchTemplateData launchTemplateData = null + if (launchTemplate) { + launchTemplateData = (RequestLaunchTemplateData) launchTemplate["launchTemplateData"] + } else if (mixedInstancesPolicy) { + launchTemplateData = (RequestLaunchTemplateData) mixedInstancesPolicy.launchTemplates[0]["launchTemplateData"] + } + if (launchTemplateData) { + def securityGroupIds = (Set) launchTemplateData?.securityGroupIds ?: [] + + if (securityGroupIds?.size()) { + securityGroups = (Set) securityGroupIds + } + + if (!securityGroupIds?.size()) { + def networkInterface = launchTemplateData?.networkInterfaces?.find({ it["deviceIndex"] == 0 }) + def groups = networkInterface?.groups ?: [] + securityGroups = (Set) groups + } + } + + return securityGroups + } + + /** + * Get a single instance type for a server group. + * Used for the case of single instance type i.e. ASG with launch config, launch template, mixed instances policy without overrides + * + * For the case of multiple instance types i.e. ASG MixedInstancesPolicy with overrides, use mixedInstancesPolicy.allowedInstanceTypes. + * @return a single instance type for ASG + */ + @Override + String getInstanceType() { + if (launchConfig) { + return launchConfig.instanceType + } else if (launchTemplate) { + return ((Map)launchTemplate.launchTemplateData).instanceType + } else if (mixedInstancesPolicy) { + if (!mixedInstancesPolicy.launchTemplateOverridesForInstanceType) { + def mipLt = (Map) mixedInstancesPolicy.launchTemplates.get(0) + return mipLt["launchTemplateData"]["instanceType"] + } + return null + } else { + return null + } } @Override @@ -152,13 +223,54 @@ class AmazonServerGroup implements ServerGroup, Serializable { } } - @Override + @Override ServerGroup.ImageSummary getImageSummary() { imagesSummary?.summaries?.get(0) } + @Empty + @JsonIgnore + @Alpha + Map getLaunchTemplateSpecification() { + if (this.asg?.launchTemplate) { + return this.asg.launchTemplate as Map + } else if (this.asg?.mixedInstancesPolicy) { + return this.asg.mixedInstancesPolicy["launchTemplate"]["launchTemplateSpecification"] as Map + } else { + return null + } + } + static Collection filterInstancesByHealthState(Collection instances, HealthState healthState) { instances.findAll { Instance it -> it.getHealthState() == healthState } } + /** + * MixedInstancesPolicySettings represents the configuration parameters for an ASG with mixed instances policy + */ + static class MixedInstancesPolicySettings { + /** + * Instance types that the Amazon Server Group can realistically launch. + * + * If launchTemplateOverrides are specified, they will override the same properties in launch template e.g. instanceType + * https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_LaunchTemplate.html + */ + List allowedInstanceTypes + + /** + * Instances distribution configuration. + */ + Map instancesDistribution + + /** + * A list of launch template objects configured in the ASG. + */ + List launchTemplates + + /** + * A list of overrides, one for each instance type configured in the ASG. + * Each override includes the information in LaunchTemplateOverrides https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_LaunchTemplateOverrides.html + */ + List launchTemplateOverridesForInstanceType + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonSubnet.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonSubnet.groovy index 74ee2d82af3..b03bf2aafd8 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonSubnet.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonSubnet.groovy @@ -26,6 +26,7 @@ class AmazonSubnet implements Subnet { String cidrBlock Integer availableIpAddressCount String account + String accountId String region String availabilityZone String purpose diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/InstanceTargetGroupState.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/InstanceTargetGroupState.groovy index c9fd6060c7a..ba7c5213f2c 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/InstanceTargetGroupState.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/InstanceTargetGroupState.groovy @@ -40,16 +40,17 @@ class InstanceTargetGroupState { private HealthState deriveHealthState() { //ELBv2 has concrete states: unused -> initial -> healthy -> draining // \-> unhealthy -/ - if (state == 'healthy') { - return HealthState.Up + switch (state) { + case 'healthy': + return HealthState.Up + case 'initial': + return HealthState.Starting + case 'unused': + return HealthState.OutOfService + case 'draining': + return HealthState.Draining + default: + return HealthState.Down } - - if (state == 'initial') { - return HealthState.Starting - } - if (state == 'unused' || state == 'draining') { - return HealthState.OutOfService - } - return HealthState.Down } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/InstanceTargetGroups.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/InstanceTargetGroups.groovy index ea5839f90f1..a73864e49a0 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/InstanceTargetGroups.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/InstanceTargetGroups.groovy @@ -38,6 +38,7 @@ class InstanceTargetGroups { instanceTargetGroupStates.any { it.healthState == HealthState.Starting } ? HealthState.Starting : instanceTargetGroupStates.any { it.healthState == HealthState.Down } ? HealthState.Down : instanceTargetGroupStates.any { it.healthState == HealthState.OutOfService } ? HealthState.OutOfService : + instanceTargetGroupStates.any { it.healthState == HealthState.Draining } ? HealthState.Draining : HealthState.Up } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsCleanupProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsCleanupProvider.groovy index 14220624f76..a52dd2e6f92 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsCleanupProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsCleanupProvider.groovy @@ -16,26 +16,14 @@ package com.netflix.spinnaker.clouddriver.aws.provider -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware -import com.netflix.spinnaker.cats.provider.Provider -class AwsCleanupProvider extends AgentSchedulerAware implements Provider { - public static final String PROVIDER_NAME = AwsCleanupProvider.name - - private final Collection agents +import com.netflix.spinnaker.clouddriver.security.BaseProvider - AwsCleanupProvider(Collection agents) { - this.agents = agents - } +class AwsCleanupProvider extends BaseProvider { + public static final String PROVIDER_NAME = AwsCleanupProvider.name @Override String getProviderName() { return PROVIDER_NAME } - - @Override - Collection getAgents() { - return agents - } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsInfrastructureProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsInfrastructureProvider.groovy index d205c2250aa..2f6fc7ff968 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsInfrastructureProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsInfrastructureProvider.groovy @@ -17,46 +17,32 @@ package com.netflix.spinnaker.clouddriver.aws.provider import com.fasterxml.jackson.core.type.TypeReference -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware -import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.cache.Keys import com.netflix.spinnaker.clouddriver.cache.KeyParser import com.netflix.spinnaker.clouddriver.cache.SearchableProvider -import com.netflix.spinnaker.clouddriver.aws.cache.Keys +import com.netflix.spinnaker.clouddriver.security.BaseProvider import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.SECURITY_GROUPS -import static com.netflix.spinnaker.clouddriver.cache.SearchableProvider.SearchableResource -class AwsInfrastructureProvider extends AgentSchedulerAware implements SearchableProvider { +class AwsInfrastructureProvider extends BaseProvider implements SearchableProvider { public static final TypeReference> ATTRIBUTES = new TypeReference>() {} public static final String PROVIDER_NAME = AwsInfrastructureProvider.name - private final Collection agents - private final KeyParser keyParser = new Keys() - AwsInfrastructureProvider(Collection agents) { - this.agents = agents - } - @Override String getProviderName() { return PROVIDER_NAME } - @Override - Collection getAgents() { - agents - } - final Set defaultCaches = [SECURITY_GROUPS.ns].asImmutable() final Map urlMappingTemplates = [ (SECURITY_GROUPS.ns): '/securityGroups/$account/$provider/$name?region=$region' ] - final Map searchResultHydrators = Collections.emptyMap() + final Map searchResultHydrators = Collections.emptyMap() @Override Map parseKey(String key) { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsProvider.groovy index c790c7a9446..ac3765c7828 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsProvider.groovy @@ -16,26 +16,26 @@ package com.netflix.spinnaker.clouddriver.aws.provider -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware + import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.clouddriver.aws.data.Keys import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.cache.KeyParser import com.netflix.spinnaker.clouddriver.cache.SearchableProvider -import com.netflix.spinnaker.clouddriver.eureka.provider.agent.EurekaAwareProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.aws.data.Keys import com.netflix.spinnaker.clouddriver.core.provider.agent.HealthProvidingCachingAgent +import com.netflix.spinnaker.clouddriver.eureka.provider.agent.EurekaAwareProvider +import com.netflix.spinnaker.clouddriver.security.BaseProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.* -class AwsProvider extends AgentSchedulerAware implements SearchableProvider, EurekaAwareProvider { +class AwsProvider extends BaseProvider implements SearchableProvider, EurekaAwareProvider { public static final String PROVIDER_NAME = AwsProvider.name final KeyParser keyParser = new Keys() - final AccountCredentialsRepository accountCredentialsRepository + final CredentialsRepository credentialsRepository final Set defaultCaches = [ LOAD_BALANCERS.ns, @@ -55,12 +55,11 @@ class AwsProvider extends AgentSchedulerAware implements SearchableProvider, Eur (new AmazonSearchableResource(INSTANCES.ns)): new InstanceSearchResultHydrator(), ] - final Collection agents private Collection healthAgents - AwsProvider(AccountCredentialsRepository accountCredentialsRepository, Collection agents) { - this.agents = agents - this.accountCredentialsRepository = accountCredentialsRepository + AwsProvider(CredentialsRepository credentialsRepository) { + super() + this.credentialsRepository = credentialsRepository synchronizeHealthAgents() } @@ -76,9 +75,7 @@ class AwsProvider extends AgentSchedulerAware implements SearchableProvider, Eur } Collection getHealthAgents() { - def allHealthAgents = [] - allHealthAgents.addAll(this.healthAgents) - Collections.unmodifiableCollection(allHealthAgents) + Collections.unmodifiableCollection(this.healthAgents) } @Override @@ -91,7 +88,7 @@ class AwsProvider extends AgentSchedulerAware implements SearchableProvider, Eur return Optional.of(keyParser) } - private static class InstanceSearchResultHydrator implements SearchableProvider.SearchResultHydrator { + private static class InstanceSearchResultHydrator implements SearchResultHydrator { @Override Map hydrateResult(Cache cacheView, Map result, String id) { def item = cacheView.get(INSTANCES.ns, id) @@ -133,15 +130,13 @@ class AwsProvider extends AgentSchedulerAware implements SearchableProvider, Eur private String getCredentialName(String accountId, boolean allowMultipleEurekaPerAccount, String eurekaAccountName) { if (allowMultipleEurekaPerAccount) { - def credentialName = accountCredentialsRepository.all.find { - it instanceof NetflixAmazonCredentials && it.accountId == accountId && it.name == eurekaAccountName - }?.name - if (credentialName) { - return credentialName + def credentials = credentialsRepository.getOne(eurekaAccountName) + if (credentials && credentials.accountId == accountId) { + return credentials.getName() } } - return accountCredentialsRepository.all.find { - it instanceof NetflixAmazonCredentials && it.accountId == accountId + return credentialsRepository.all.find { + it.accountId == accountId }?.name } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AbstractAmazonLoadBalancerCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AbstractAmazonLoadBalancerCachingAgent.groovy index 9627fe78ec3..97800e940a9 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AbstractAmazonLoadBalancerCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AbstractAmazonLoadBalancerCachingAgent.groovy @@ -34,6 +34,7 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import org.slf4j.Logger import org.slf4j.LoggerFactory @@ -103,6 +104,7 @@ abstract class AbstractAmazonLoadBalancerCachingAgent implements CachingAgent, O final String region final ObjectMapper objectMapper final Registry registry + final AmazonCachingAgentFilter amazonCachingAgentFilter final OnDemandMetricsSupport metricsSupport AbstractAmazonLoadBalancerCachingAgent(AmazonCloudProvider amazonCloudProvider, @@ -110,25 +112,27 @@ abstract class AbstractAmazonLoadBalancerCachingAgent implements CachingAgent, O NetflixAmazonCredentials account, String region, ObjectMapper objectMapper, - Registry registry) { + Registry registry, + AmazonCachingAgentFilter amazonCachingAgentFilter) { this.amazonCloudProvider = amazonCloudProvider this.amazonClientProvider = amazonClientProvider this.account = account this.region = region - this.objectMapper = objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) + this.objectMapper = objectMapper.copy().enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) this.registry = registry - this.metricsSupport = new OnDemandMetricsSupport(registry, this, amazonCloudProvider.id + ":" + "${amazonCloudProvider.id}:${OnDemandAgent.OnDemandType.LoadBalancer}") + this.amazonCachingAgentFilter = amazonCachingAgentFilter + this.metricsSupport = new OnDemandMetricsSupport(registry, this, amazonCloudProvider.id + ":" + "${amazonCloudProvider.id}:${OnDemandType.LoadBalancer}") } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.LoadBalancer && cloudProvider == amazonCloudProvider.id + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.LoadBalancer && cloudProvider == amazonCloudProvider.id } abstract CacheResult loadDataInternal(ProviderCache providerCache) @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { return [] } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonApplicationLoadBalancerCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonApplicationLoadBalancerCachingAgent.groovy index bd2931be54d..23c9231ad79 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonApplicationLoadBalancerCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonApplicationLoadBalancerCachingAgent.groovy @@ -17,22 +17,7 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeListenersRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeListenersResult -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeRulesRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeRulesResult -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupAttributesRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthRequest -import com.amazonaws.services.elasticloadbalancingv2.model.Listener -import com.amazonaws.services.elasticloadbalancingv2.model.ListenerNotFoundException -import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancer -import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancerNotFoundException -import com.amazonaws.services.elasticloadbalancingv2.model.Rule -import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup -import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupAttribute -import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription +import com.amazonaws.services.elasticloadbalancingv2.model.* import com.fasterxml.jackson.core.type.TypeReference import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spectator.api.Registry @@ -41,6 +26,7 @@ import com.netflix.spinnaker.cats.agent.CacheResult import com.netflix.spinnaker.cats.agent.DefaultCacheResult import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.aws.data.ArnUtils @@ -56,15 +42,10 @@ import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.core.provider.agent.HealthProvidingCachingAgent -import retrofit.RetrofitError import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.* class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAgent implements HealthProvidingCachingAgent { final EddaApi eddaApi @@ -78,6 +59,7 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc ]) private static final String HEALTH_ID = "aws-load-balancer-v2-target-group-instance-health" + private static final int DESCRIBE_TAG_LIMIT = 20 AmazonApplicationLoadBalancerCachingAgent(AmazonCloudProvider amazonCloudProvider, AmazonClientProvider amazonClientProvider, @@ -86,8 +68,9 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc EddaApi eddaApi, ObjectMapper objectMapper, Registry registry, - EddaTimeoutConfig eddaTimeoutConfig) { - super(amazonCloudProvider, amazonClientProvider, account, region, objectMapper, registry) + EddaTimeoutConfig eddaTimeoutConfig, + AmazonCachingAgentFilter amazonCachingAgentFilter) { + super(amazonCloudProvider, amazonClientProvider, account, region, objectMapper, registry, amazonCachingAgentFilter) this.eddaApi = eddaApi this.eddaTimeoutConfig = eddaTimeoutConfig } @@ -99,9 +82,9 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc @Override Optional> getCacheKeyPatterns() { - return [ + return Optional.of([ (LOAD_BALANCERS.ns): Keys.getLoadBalancerKey('*', account.name, region, 'vpc-????????', '*') - ] + ]) } @Override @@ -151,10 +134,13 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc TargetGroupAssociations targetGroupAssociations = this.buildTargetGroupAssociations(loadBalancing, targetGroups, false) ListenerAssociations listenerAssociations = this.buildListenerAssociations(loadBalancing, [loadBalancer], false) + Map> loadBalancerAttributes = this.buildLoadBalancerAttributes(loadBalancing, [loadBalancer], false) def cacheResult = metricsSupport.transformData { buildCacheResult( + providerCache, [loadBalancer], + loadBalancerAttributes, targetGroups, targetGroupAssociations, listenerAssociations, @@ -166,13 +152,13 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc if (cacheResult.cacheResults.values().flatten().isEmpty()) { // avoid writing an empty onDemand cache record (instead delete any that may have previously existed) providerCache.evictDeletedItems(ON_DEMAND.ns, [ - Keys.getLoadBalancerKey( - data.loadBalancerName as String, - account.name, - region, - data.vpcId as String, - data.loadBalancerType as String - ) + Keys.getLoadBalancerKey( + data.loadBalancerName as String, + account.name, + region, + data.vpcId as String, + data.loadBalancerType as String + ) ]) } else { metricsSupport.onDemandStore { @@ -185,7 +171,7 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc ), 10 * 60, [ - cacheTime : new Date(), + cacheTime: new Date(), cacheResults: objectMapper.writeValueAsString(cacheResult.cacheResults) ], [:] @@ -219,7 +205,7 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc List targetGroupAttributesList = eddaApi.targetGroupAttributes() List targetGroupHealthList = eddaApi.targetGroupHealth() targetGroupArnToAttributes = targetGroupAttributesList.collectEntries { [(it.targetGroupArn): it.attributes] } - targetGroupArnToHealths = targetGroupHealthList.collectEntries { [(it.targetGroupArn): it.health]} + targetGroupArnToHealths = targetGroupHealthList.collectEntries { [(it.targetGroupArn): it.health] } } else { targetGroupArnToHealths = new HashMap>() targetGroupArnToAttributes = new HashMap>() @@ -237,15 +223,33 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc return [ targetGroupArnToAttributes: targetGroupArnToAttributes, - targetGroupArnToHealths: targetGroupArnToHealths + targetGroupArnToHealths : targetGroupArnToHealths ] } + Map> buildLoadBalancerAttributes(AmazonElasticLoadBalancing loadBalancing, + List allLoadBalancers, + boolean useEdda) { + Map> loadBalancerArnToAttributes + if (useEdda) { + loadBalancerArnToAttributes = eddaApi.applicationLoadBalancerAttributes().collectEntries { + [(it.loadBalancerArn): it.attributes] + } + } else { + loadBalancerArnToAttributes = new HashMap>() + for (LoadBalancer loadBalancer : allLoadBalancers) { + loadBalancerArnToAttributes.put(loadBalancer.loadBalancerArn, loadBalancing.describeLoadBalancerAttributes( + new DescribeLoadBalancerAttributesRequest().withLoadBalancerArn(loadBalancer.loadBalancerArn)).attributes) + } + } + return loadBalancerArnToAttributes + } + ListenerAssociations buildListenerAssociations(AmazonElasticLoadBalancing loadBalancing, List allLoadBalancers, boolean useEdda) { Map> loadBalancerArnToListeners = allLoadBalancers.collectEntries { - [(it.loadBalancerArn) : []] + [(it.loadBalancerArn): []] } Map> listenerToRules = [:].withDefault { [] } @@ -315,7 +319,6 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc if (useEdda) { start = amazonClientProvider.lastModified ?: 0 } - allLoadBalancers.addAll(resp.loadBalancers) if (resp.nextMarker) { describeLoadBalancerRequest.withMarker(resp.nextMarker) @@ -324,12 +327,43 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc } } + // filter load balancers if there is any filter configuration established + if (amazonCachingAgentFilter.hasTagFilter()) { + def loadBalancerPartitions = allLoadBalancers*.loadBalancerArn.collate(DESCRIBE_TAG_LIMIT) + Map> loadBalancerTags = [:] + loadBalancerPartitions.each {loadBalancerPartition -> + def tagsRequest = new DescribeTagsRequest().withResourceArns(loadBalancerPartition) + def tagsResponse = loadBalancing.describeTags(tagsRequest) + loadBalancerTags.putAll(tagsResponse.tagDescriptions?.collectEntries { + [(it.resourceArn): it.tags?.collect {new AmazonCachingAgentFilter.ResourceTag(it.key, it.value)} ] + }) + } + + allLoadBalancers = allLoadBalancers.findAll { lb -> + return amazonCachingAgentFilter.shouldRetainResource(loadBalancerTags?.get(lb.loadBalancerArn)) + } + } + + def loadBalancerAttributes = this.buildLoadBalancerAttributes(loadBalancing, allLoadBalancers, useEdda) + // Get all the target groups List allTargetGroups = [] DescribeTargetGroupsRequest describeTargetGroupsRequest = new DescribeTargetGroupsRequest() + HashSet allLoadBalancerArns = new HashSet(allLoadBalancers*.loadBalancerArn) while (true) { def resp = loadBalancing.describeTargetGroups(describeTargetGroupsRequest) - allTargetGroups.addAll(resp.targetGroups) + + // only keep target groups which are for the set of filtered load balancers + def targetGroups = resp.targetGroups + if (amazonCachingAgentFilter.hasTagFilter()) { + targetGroups?.retainAll{ tg -> + tg.loadBalancerArns?.find { tgLB -> + allLoadBalancerArns.contains(tgLB) + } != null + } + } + + allTargetGroups.addAll(targetGroups) if (resp.nextMarker) { describeTargetGroupsRequest.withMarker(resp.nextMarker) } else { @@ -360,13 +394,15 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc def pendingOnDemandRequestsForLoadBalancers = providerCache.getAll(ON_DEMAND.ns, pendingOnDemandRequestKeys) pendingOnDemandRequestsForLoadBalancers.each { if (it.attributes.cacheTime < start) { -// evictableOnDemandCacheDatas << it + evictableOnDemandCacheDatas << it } else { usableOnDemandCacheDatas << it } } - return buildCacheResult(allLoadBalancers, + return buildCacheResult(providerCache, + allLoadBalancers, + loadBalancerAttributes, allTargetGroups, targetGroupAssociations, listenerAssociations, @@ -376,7 +412,33 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc ) } - private CacheResult buildCacheResult(Collection allLoadBalancers, + @Override + Collection> pendingOnDemandRequests(ProviderCache providerCache) { + Collection keys = providerCache.filterIdentifiers( + ON_DEMAND.ns, + Keys.getLoadBalancerKey("*", "*", "*", "*", "*") + ) + + if (keys.isEmpty()) { + return [] + } else { + return providerCache.getAll(ON_DEMAND.ns, keys, RelationshipCacheFilter.none()).collect { + def details = Keys.parse(it.id) + + return [ + id: it.id, + details: details, + cacheTime : it.attributes.cacheTime, + cacheExpiry : it.attributes.cacheExpiry, + processedTime : it.attributes.processedTime + ] + } + } + } + + private CacheResult buildCacheResult(ProviderCache providerCache, + Collection allLoadBalancers, + Map loadBalancerAttributes, Collection allTargetGroups, TargetGroupAssociations targetGroupAssociations, ListenerAssociations listenerAssociations, @@ -397,7 +459,8 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc if (onDemandCacheData) { log.info("Using onDemand cache value (${onDemandCacheData.id})") - Map> cacheResults = objectMapper.readValue(onDemandCacheData.attributes.cacheResults as String, new TypeReference>>() {}) + Map> cacheResults = objectMapper.readValue(onDemandCacheData.attributes.cacheResults as String, new TypeReference>>() { + }) CacheHelpers.cache(cacheResults["loadBalancers"], loadBalancers) CacheHelpers.cache(cacheResults["targetGroups"], targetGroups) } else { @@ -410,7 +473,7 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc // Translate availabilityZones to the format we expect List availabilityZones = new ArrayList() List subnets = new ArrayList() - ((List>)lbAttributes.availabilityZones).each { az -> + ((List>) lbAttributes.availabilityZones).each { az -> availabilityZones.push(az.zoneName) subnets.push(az.subnetId) } @@ -425,9 +488,9 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc for (Listener listener : listenerData) { Map listenerAttributes = objectMapper.convertValue(listener, ATTRIBUTES) - listenerAttributes.loadBalancerName = ArnUtils.extractLoadBalancerName((String)listenerAttributes.loadBalancerArn).get() + listenerAttributes.loadBalancerName = ArnUtils.extractLoadBalancerName((String) listenerAttributes.loadBalancerArn).get() listenerAttributes.remove('loadBalancerArn') - for (Map action : (List>)listenerAttributes.defaultActions) { + for (Map action : (List>) listenerAttributes.defaultActions) { if (!action.targetGroupArn) { continue } @@ -444,7 +507,7 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc List rules = new ArrayList<>() for (Rule rule : listenerAssociations.listenerToRules.get(listener)) { Map ruleAttributes = objectMapper.convertValue(rule, ATTRIBUTES) - for (Map action : (List>)ruleAttributes.actions) { + for (Map action : (List>) ruleAttributes.actions) { if (!action.targetGroupArn) { continue } @@ -466,6 +529,28 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc lbAttributes.listeners = listeners + if (loadBalancerAttributes.containsKey(lb.loadBalancerArn)) { + lbAttributes.attributes = loadBalancerAttributes.get(lb.loadBalancerArn) + LoadBalancerAttribute deletionProtectionAttribute = lbAttributes.attributes?.find { + it.key == 'deletion_protection.enabled' + } + if (deletionProtectionAttribute != null) { + lbAttributes.deletionProtection = Boolean.parseBoolean(deletionProtectionAttribute.getValue()) + } + LoadBalancerAttribute loadBalancingCrossZoneAttribute = lbAttributes.attributes?.find { + it.key == 'load_balancing.cross_zone.enabled' + } + if (loadBalancingCrossZoneAttribute != null) { + lbAttributes.loadBalancingCrossZone = Boolean.parseBoolean(loadBalancingCrossZoneAttribute.getValue()) + } + LoadBalancerAttribute idleTimeoutAttribute = lbAttributes.attributes?.find { + it.key == 'idle_timeout.timeout_seconds' + } + if (idleTimeoutAttribute != null) { + lbAttributes.idleTimeout = Integer.parseInt(idleTimeoutAttribute.getValue()) + } + } + loadBalancers[loadBalancerKey].with { attributes.putAll(lbAttributes) relationships[TARGET_GROUPS.ns].addAll(allTargetGroupKeys) @@ -526,28 +611,45 @@ class AmazonApplicationLoadBalancerCachingAgent extends AbstractAmazonLoadBalanc // Have to do this separately because an instance can be in multiple target groups List itgs = InstanceTargetGroups.fromInstanceTargetGroupStates(instanceTargetGroupStates) Collection tgHealths = [] - Collection instances = [] + Collection instanceRels = [] + + Collection instanceIds = itgs.collect { + Keys.getInstanceKey(it.instanceId, account.name, region) + } + Map instances = providerCache + .getAll(INSTANCES.ns, instanceIds, RelationshipCacheFilter.none()) + .collectEntries { [(it.id): it] } for (InstanceTargetGroups itg in itgs) { String instanceId = Keys.getInstanceKey(itg.instanceId, account.name, region) String healthId = Keys.getInstanceHealthKey(itg.instanceId, account.name, region, healthId) Map attributes = objectMapper.convertValue(itg, ATTRIBUTES) Map> relationships = [(INSTANCES.ns): [instanceId]] + + // An ALB can potentially have target groups spanning logical applications, + // so we cannot derive instance -> application mappings from load balancer data + if (instances[instanceId] != null) { + String application = instances[instanceId].attributes.get("application") + if (application != null) { + attributes.put("application", application) + } + } + tgHealths.add(new DefaultCacheData(healthId, attributes, relationships)) - instances.add(new DefaultCacheData(instanceId, [:], [(HEALTH.ns): [healthId]])) + instanceRels.add(new DefaultCacheData(instanceId, [:], [(HEALTH.ns): [healthId]])) } log.info("Caching ${loadBalancers.size()} load balancers in ${agentType}") if (evictableOnDemandCacheDatas) { - log.info("Evicting onDemand cache keys (${evictableOnDemandCacheDatas.collect { "${it.id}/${start - (long)it.attributes.cacheTime}ms"}.join(", ")})") + log.info("Evicting onDemand cache keys (${evictableOnDemandCacheDatas.collect { "${it.id}/${start - (long) it.attributes.cacheTime}ms" }.join(", ")})") } new DefaultCacheResult([ - (LOAD_BALANCERS.ns): loadBalancers.values(), - (TARGET_GROUPS.ns): targetGroups.values(), - (HEALTH.ns): tgHealths, - (INSTANCES.ns): instances - ],[ + (LOAD_BALANCERS.ns): loadBalancers.values(), + (TARGET_GROUPS.ns) : targetGroups.values(), + (HEALTH.ns) : tgHealths, + (INSTANCES.ns) : instanceRels + ], [ (ON_DEMAND.ns): evictableOnDemandCacheDatas*.id]) } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonElasticIpCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonElasticIpCachingAgent.groovy index 6cb72aacac2..feb243fca05 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonElasticIpCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonElasticIpCachingAgent.groovy @@ -29,6 +29,9 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.aws.cache.Keys import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent + +import java.util.concurrent.TimeUnit import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.ELASTIC_IPS @@ -36,7 +39,9 @@ import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.ELASTIC import groovy.util.logging.Slf4j @Slf4j -class AmazonElasticIpCachingAgent implements CachingAgent, AccountAware { +class AmazonElasticIpCachingAgent implements CachingAgent, AccountAware, CustomScheduledAgent { + private static final long DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(5) + private static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(5) final AmazonClientProvider amazonClientProvider final NetflixAmazonCredentials account @@ -92,4 +97,14 @@ class AmazonElasticIpCachingAgent implements CachingAgent, AccountAware { log.info("Caching ${data.size()} items in ${agentType}") new DefaultCacheResult([(ELASTIC_IPS.ns): data]) } + + @Override + long getPollIntervalMillis() { + return DEFAULT_POLL_INTERVAL_MILLIS + } + + @Override + long getTimeoutMillis() { + return DEFAULT_TIMEOUT_MILLIS + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgent.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgent.java deleted file mode 100644 index fec70634bd8..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgent.java +++ /dev/null @@ -1,223 +0,0 @@ -package com.netflix.spinnaker.clouddriver.aws.provider.agent; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.cats.agent.AgentDataType; -import com.netflix.spinnaker.cats.agent.CacheResult; -import com.netflix.spinnaker.cats.agent.CachingAgent; -import com.netflix.spinnaker.cats.agent.DefaultCacheResult; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.cache.DefaultCacheData; -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.clouddriver.aws.cache.Keys; -import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; -import org.apache.http.*; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.impl.client.HttpClients; -import org.apache.http.util.EntityUtils; - -import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.util.*; -import java.util.stream.Collectors; - -public class AmazonInstanceTypeCachingAgent implements CachingAgent { - - private static final TypeReference> ATTRIBUTES - = new TypeReference>() {}; - - // https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/us-west-2/index.json - private final String region; - private final AccountCredentialsRepository accountCredentialsRepository; - private final URI pricingUri; - private final HttpHost pricingHost; - private final HttpClient httpClient; - private final ObjectMapper objectMapper = - new ObjectMapper().disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); - - - public AmazonInstanceTypeCachingAgent(String region, - AccountCredentialsRepository accountCredentialsRepository) { - this(region, accountCredentialsRepository, HttpClients.createDefault()); - } - - //VisibleForTesting - AmazonInstanceTypeCachingAgent(String region, - AccountCredentialsRepository accountCredentialsRepository, - HttpClient httpClient) { - this.region = region; - this.accountCredentialsRepository = accountCredentialsRepository; - pricingHost = HttpHost.create("https://pricing.us-east-1.amazonaws.com"); - pricingUri = URI.create("/offers/v1.0/aws/AmazonEC2/current/" + region + "/index.json"); - this.httpClient = httpClient; - } - - @Override - public Collection getProvidedDataTypes() { - return Collections.unmodifiableList( - Arrays.asList( - new AgentDataType( - Keys.Namespace.INSTANCE_TYPES.getNs(), AgentDataType.Authority.AUTHORITATIVE), - new AgentDataType( - getAgentType(), AgentDataType.Authority.AUTHORITATIVE))); - } - - @Override - public CacheResult loadData(ProviderCache providerCache) { - try { - Set matchingAccounts = accountCredentialsRepository.getAll() - .stream() - .filter(AmazonCredentials.class::isInstance) - .map(AmazonCredentials.class::cast) - .filter(ac -> ac.getRegions().stream().anyMatch(r -> region.equals(r.getName()))) - .map(AccountCredentials::getName) - .collect(Collectors.toSet()); - - if (matchingAccounts.isEmpty()) { - return new DefaultCacheResult(Collections.emptyMap()); - } - - CacheData metadata = providerCache.get( - getAgentType(), - "metadata", - RelationshipCacheFilter.none()); - MetadataAttributes metadataAttributes = null; - if (metadata != null) { - metadataAttributes = objectMapper.convertValue(metadata.getAttributes(), MetadataAttributes.class); - } - - Set instanceTypes = null; - if (metadataAttributes != null - && metadataAttributes.etag != null - && metadataAttributes.cachedInstanceTypes != null) { - - //we have enough from a previous request to not re-request if the etag is unchanged.. - HttpResponse headResponse = httpClient.execute(pricingHost, new HttpHead(pricingUri)); - EntityUtils.consumeQuietly(headResponse.getEntity()); - if (headResponse.getStatusLine().getStatusCode() != 200) { - throw new Exception("failed to read instance type metadata for " + region + ": " - + headResponse.getStatusLine().toString()); - } - - Optional etag = getEtagHeader(headResponse); - - if (etag.filter(metadataAttributes.etag::equals).isPresent()) { - instanceTypes = metadataAttributes.cachedInstanceTypes; - } - } - if (instanceTypes == null) { - HttpResponse getResponse = httpClient.execute(pricingHost, new HttpGet(pricingUri)); - if (getResponse.getStatusLine().getStatusCode() != 200) { - EntityUtils.consumeQuietly(getResponse.getEntity()); - throw new Exception("failed to read instance type data for " + region + ": " - + getResponse.getStatusLine().toString()); - } - Optional etag = getEtagHeader(getResponse); - HttpEntity entity = getResponse.getEntity(); - instanceTypes = fromStream(entity.getContent()); - EntityUtils.consumeQuietly(entity); - if (etag.isPresent()) { - metadataAttributes = new MetadataAttributes(); - metadataAttributes.etag = etag.get(); - metadataAttributes.cachedInstanceTypes = new HashSet<>(instanceTypes); - metadata = new DefaultCacheData( - "metadata", - objectMapper.convertValue(metadataAttributes, ATTRIBUTES), - Collections.emptyMap()); - } else { - metadata = null; - } - } - Map> evictions = new HashMap<>(); - Map> cacheResults = new HashMap<>(); - List instanceTypeData = new ArrayList<>(); - cacheResults.put(Keys.Namespace.INSTANCE_TYPES.getNs(), instanceTypeData); - if (metadata != null) { - cacheResults.put(getAgentType(), Collections.singleton(metadata)); - } else { - evictions.put(getAgentType(), Collections.singleton("metadata")); - } - - for (String instanceType : instanceTypes) { - for (String account : matchingAccounts) { - Map instanceTypeAttributes = new HashMap<>(); - instanceTypeAttributes.put("account", account); - instanceTypeAttributes.put("region", region); - instanceTypeAttributes.put("name", instanceType); - instanceTypeData.add( - new DefaultCacheData( - Keys.getInstanceTypeKey(instanceType, region, account), - instanceTypeAttributes, - Collections.emptyMap())); - } - } - - return new DefaultCacheResult(cacheResults, evictions); - } catch (Exception ex) { - throw new RuntimeException(ex); - } - } - - Optional getEtagHeader(HttpResponse response) { - return Optional.ofNullable(response) - .map(r -> r.getFirstHeader("ETag")) - .map(Header::getElements) - .filter(e -> e.length > 0) - .map(e -> e[0].getName()); - } - - @Override - public String getAgentType() { - return getClass().getSimpleName() + "/" + region; - } - - @Override - public String getProviderName() { - return AwsInfrastructureProvider.PROVIDER_NAME; - } - - static class Offering { - public String productFamily; - public ComputeInstanceAttributes attributes; - } - - static class ComputeInstanceAttributes { - public String instanceType; - - @Override - public String toString() { - return instanceType; - } - } - - static class Offerings { - public Map products; - } - - static class MetadataAttributes { - public String etag; - public Set cachedInstanceTypes; - } - - - //visible for testing - Set fromStream(InputStream is) throws IOException { - Offerings offerings = objectMapper.readValue(is, Offerings.class); - Set instanceTypes = offerings.products.values() - .stream() - .filter(o -> o.productFamily != null && o.productFamily.startsWith("Compute Instance")) - .map(o -> o.attributes.instanceType) - .filter(it -> it != null && !it.isEmpty()) - .collect(Collectors.toSet()); - - return instanceTypes; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonKeyPairCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonKeyPairCachingAgent.groovy index 1df37e41ed9..2edc7834f6c 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonKeyPairCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonKeyPairCachingAgent.groovy @@ -29,6 +29,9 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.aws.cache.Keys import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent + +import java.util.concurrent.TimeUnit import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.KEY_PAIRS @@ -36,7 +39,9 @@ import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.KEY_PAI import groovy.util.logging.Slf4j @Slf4j -class AmazonKeyPairCachingAgent implements CachingAgent, AccountAware { +class AmazonKeyPairCachingAgent implements CachingAgent, AccountAware, CustomScheduledAgent { + private static final long DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(5) + private static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(5) final AmazonClientProvider amazonClientProvider final NetflixAmazonCredentials account @@ -89,4 +94,14 @@ class AmazonKeyPairCachingAgent implements CachingAgent, AccountAware { log.info("Caching ${data.size()} items in ${agentType}") new DefaultCacheResult([(KEY_PAIRS.ns): data]) } + + @Override + long getPollIntervalMillis() { + return DEFAULT_POLL_INTERVAL_MILLIS + } + + @Override + long getTimeoutMillis() { + return DEFAULT_TIMEOUT_MILLIS + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerCachingAgent.groovy index 3e9cfd62b76..49a26f7dfa7 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerCachingAgent.groovy @@ -16,7 +16,11 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent +import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing +import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancerAttributesRequest import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersRequest +import com.amazonaws.services.elasticloadbalancing.model.DescribeTagsRequest +import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerNotFoundException import com.fasterxml.jackson.core.type.TypeReference @@ -29,6 +33,7 @@ import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.aws.data.Keys +import com.netflix.spinnaker.clouddriver.aws.edda.EddaApi import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent @@ -38,20 +43,27 @@ import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LO import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND class AmazonLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAgent { + + private EddaApi eddaApi + private static final int DESCRIBE_TAG_LIMIT = 20 + AmazonLoadBalancerCachingAgent(AmazonCloudProvider amazonCloudProvider, AmazonClientProvider amazonClientProvider, NetflixAmazonCredentials account, String region, + EddaApi eddaApi, ObjectMapper objectMapper, - Registry registry) { - super(amazonCloudProvider, amazonClientProvider, account, region, objectMapper, registry) + Registry registry, + AmazonCachingAgentFilter amazonCachingAgentFilter) { + super(amazonCloudProvider, amazonClientProvider, account, region, objectMapper, registry, amazonCachingAgentFilter) + this.eddaApi = eddaApi } @Override Optional> getCacheKeyPatterns() { - return [ + return Optional.of([ (LOAD_BALANCERS.ns): Keys.getLoadBalancerKey('*', account.name, region, 'vpc-????????', null) - ] + ]) } @Override @@ -59,6 +71,9 @@ class AmazonLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAg if (!data.containsKey("loadBalancerName")) { return null } + if ("application".equals(data.loadBalancerType)) { + return null + } if (!data.containsKey("account")) { return null } @@ -74,8 +89,9 @@ class AmazonLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAg return null } + def loadBalancing = amazonClientProvider.getAmazonElasticLoadBalancing(account, region, false) + List loadBalancers = metricsSupport.readData { - def loadBalancing = amazonClientProvider.getAmazonElasticLoadBalancing(account, region, true) try { return loadBalancing.describeLoadBalancers( new DescribeLoadBalancersRequest().withLoadBalancerNames(data.loadBalancerName as String) @@ -85,7 +101,15 @@ class AmazonLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAg } } - def cacheResult = metricsSupport.transformData { buildCacheResult(loadBalancers, [:], System.currentTimeMillis(), []) } + Map loadBalancerAttributes = metricsSupport.readData() { + Map lbA = [:] + for (LoadBalancerDescription lb : loadBalancers) { + lbA.put( lb.loadBalancerName, loadBalancing.describeLoadBalancerAttributes(new DescribeLoadBalancerAttributesRequest().withLoadBalancerName(lb.loadBalancerName))) + } + return lbA + } + + def cacheResult = metricsSupport.transformData { buildCacheResult(loadBalancers, loadBalancerAttributes, [:], System.currentTimeMillis(), []) } if (cacheResult.cacheResults.values().flatten().isEmpty()) { // avoid writing an empty onDemand cache record (instead delete any that may have previously existed) providerCache.evictDeletedItems(ON_DEMAND.ns, [Keys.getLoadBalancerKey(data.loadBalancerName as String, account.name, region, data.vpcId as String, null)]) @@ -138,6 +162,26 @@ class AmazonLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAg } } + // filter load balancers if there is any filter configuration established + if (amazonCachingAgentFilter.hasTagFilter()) { + + def loadBalancerPartitions = allLoadBalancers*.loadBalancerName.collate(DESCRIBE_TAG_LIMIT) + Map> loadBalancerTags = [:] + loadBalancerPartitions.each {loadBalancerPartition -> + def tagsRequest = new DescribeTagsRequest().withLoadBalancerNames(loadBalancerPartition) + def tagsResponse = loadBalancing.describeTags(tagsRequest) + loadBalancerTags.putAll(tagsResponse.tagDescriptions?.collectEntries { + [(it.loadBalancerName): it.tags?.collect {new AmazonCachingAgentFilter.ResourceTag(it.key, it.value)} ] + }) + } + + allLoadBalancers = allLoadBalancers.findAll { lb -> + return amazonCachingAgentFilter.shouldRetainResource(loadBalancerTags?.get(lb.loadBalancerName)) + } + } + + Map loadBalancerAttributes = buildLoadBalancerAttributes(loadBalancing, allLoadBalancers, account.eddaEnabled) + if (!start) { if (account.eddaEnabled && allLoadBalancers) { log.warn("${agentType} did not receive lastModified value in response metadata") @@ -164,10 +208,28 @@ class AmazonLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAg } } - buildCacheResult(allLoadBalancers, usableOnDemandCacheDatas.collectEntries { [it.id, it] }, start, evictableOnDemandCacheDatas) + buildCacheResult(allLoadBalancers, loadBalancerAttributes, usableOnDemandCacheDatas.collectEntries { [it.id, it] }, start, evictableOnDemandCacheDatas) } - private CacheResult buildCacheResult(Collection allLoadBalancers, Map onDemandCacheDataByLb, long start, Collection evictableOnDemandCacheDatas) { + Map buildLoadBalancerAttributes(AmazonElasticLoadBalancing loadBalancing, + List allLoadBalancers, + boolean useEdda) { + Map loadBalancerNameToAttributes + if (useEdda) { + loadBalancerNameToAttributes = eddaApi.classicLoadBalancerAttributes().collectEntries { + [(it.name): it.attributes] + } + } else { + loadBalancerNameToAttributes = allLoadBalancers.collectEntries { + [(it.loadBalancerName): loadBalancing.describeLoadBalancerAttributes( + new DescribeLoadBalancerAttributesRequest().withLoadBalancerName(it.loadBalancerName) + ).loadBalancerAttributes] + } + } + return loadBalancerNameToAttributes + } + + private CacheResult buildCacheResult(Collection allLoadBalancers, Map loadBalancerAttributes, Map onDemandCacheDataByLb, long start, Collection evictableOnDemandCacheDatas) { Map instances = CacheHelpers.cache() Map loadBalancers = CacheHelpers.cache() @@ -183,6 +245,12 @@ class AmazonLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAg Collection instanceIds = lb.instances.collect { Keys.getInstanceKey(it.instanceId, account.name, region) } Map lbAttributes = objectMapper.convertValue(lb, ATTRIBUTES) String loadBalancerId = Keys.getLoadBalancerKey(lb.loadBalancerName, account.name, region, lb.getVPCId(), null) + if (loadBalancerAttributes.containsKey(lb.loadBalancerName)) { + lbAttributes.put('attributes', loadBalancerAttributes.get(lb.loadBalancerName)) + if (loadBalancerAttributes.get(lb.loadBalancerName).connectionSettings != null) { + lbAttributes.put('idleTimeout', loadBalancerAttributes.get(lb.loadBalancerName).connectionSettings.idleTimeout) + } + } loadBalancers[loadBalancerId].with { attributes.putAll(lbAttributes) relationships[INSTANCES.ns].addAll(instanceIds) @@ -208,4 +276,3 @@ class AmazonLoadBalancerCachingAgent extends AbstractAmazonLoadBalancerCachingAg ]) } } - diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerInstanceStateCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerInstanceStateCachingAgent.groovy index b03ab087aaf..147e2c4a4e6 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerInstanceStateCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerInstanceStateCachingAgent.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent import com.amazonaws.services.elasticloadbalancing.model.DescribeInstanceHealthRequest +import com.amazonaws.services.elasticloadbalancing.model.DescribeInstanceHealthResult import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerNotFoundException import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature @@ -28,6 +29,7 @@ import com.netflix.spinnaker.cats.agent.DefaultCacheResult import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials @@ -46,6 +48,10 @@ import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LO @Slf4j class AmazonLoadBalancerInstanceStateCachingAgent implements CachingAgent, HealthProvidingCachingAgent, AccountAware { + + final static String healthId = "aws-load-balancer-instance-health" + private final static String STILL_REGISTERING_DESCRIPTION = "Instance registration is still in progress." + final AmazonClientProvider amazonClientProvider final NetflixAmazonCredentials account final String region @@ -53,8 +59,6 @@ class AmazonLoadBalancerInstanceStateCachingAgent implements CachingAgent, Healt final ApplicationContext ctx private Cache cacheView - final static String healthId = "aws-load-balancer-instance-health" - AmazonLoadBalancerInstanceStateCachingAgent(AmazonClientProvider amazonClientProvider, NetflixAmazonCredentials account, String region, @@ -98,31 +102,91 @@ class AmazonLoadBalancerInstanceStateCachingAgent implements CachingAgent, Healt def loadBalancing = amazonClientProvider.getAmazonElasticLoadBalancing(account, region) def allVpcsGlob = Keys.getLoadBalancerKey('*', account.name, region, '*', null) def nonVpcGlob = Keys.getLoadBalancerKey('*', account.name, region, null, null) - def loadBalancerKeys = - getCacheView().filterIdentifiers(LOAD_BALANCERS.ns, allVpcsGlob) + getCacheView().filterIdentifiers(LOAD_BALANCERS.ns, nonVpcGlob) + def loadBalancerKeys = getCacheView() + .filterIdentifiers(LOAD_BALANCERS.ns, allVpcsGlob) + + getCacheView().filterIdentifiers(LOAD_BALANCERS.ns, nonVpcGlob) + + Map lbHealths = new HashMap<>() + Collection instanceRels = new ArrayList<>() - Collection lbHealths = [] - Collection instances = [] for (loadBalancerKey in loadBalancerKeys) { try { Map idObj = Keys.parse(loadBalancerKey) - def lbName = idObj.loadBalancer - def result = loadBalancing.describeInstanceHealth(new DescribeInstanceHealthRequest(lbName)) - def loadBalancerInstances = [] + String lbName = idObj.loadBalancer + if (idObj.loadBalancerType && idObj.loadBalancerType != 'classic') + continue + + List loadBalancerInstances = new ArrayList<>() + DescribeInstanceHealthResult result = loadBalancing + .describeInstanceHealth(new DescribeInstanceHealthRequest(lbName)) + for (instanceState in result.instanceStates) { - def loadBalancerInstance = new LoadBalancerInstance(instanceState.instanceId, instanceState.state, instanceState.reasonCode, instanceState.description) + LoadBalancerInstance loadBalancerInstance = new LoadBalancerInstance( + instanceState.instanceId, + instanceState.state, + instanceState.reasonCode, + instanceState.description) loadBalancerInstances << loadBalancerInstance + + // We want to track how long instances remain in a "still registering" state. Logging any time we + // see an instance with this description is a poor man's way of getting the metrics we need, without + // having to do expensive lookups - we can defer this to our logging platform to do the maths. + // TODO(rz): This kind of metric may be easier to create if we had a method of emitting events when + // cache state changes. + if (instanceState.description == STILL_REGISTERING_DESCRIPTION) { + log.info("Instance '${instanceState.instanceId}' is still registering with load balancer '$lbName'") + } } - def loadBalancerInstanceState = new LoadBalancerInstanceState(name: lbName, instances: loadBalancerInstances) - def ilbs = InstanceLoadBalancers.fromLoadBalancerInstanceState([loadBalancerInstanceState]) + + LoadBalancerInstanceState loadBalancerInstanceState = new LoadBalancerInstanceState( + name: lbName, + instances: loadBalancerInstances) + List ilbs = InstanceLoadBalancers + .fromLoadBalancerInstanceState([loadBalancerInstanceState]) + Collection instanceIds = ilbs.collect { + Keys.getInstanceKey(it.instanceId, account.name, region) + } + Map instances = providerCache + .getAll(INSTANCES.ns, instanceIds, RelationshipCacheFilter.none()) + .collectEntries { [(it.id): it] } for (InstanceLoadBalancers ilb in ilbs) { String instanceId = Keys.getInstanceKey(ilb.instanceId, account.name, region) String healthId = Keys.getInstanceHealthKey(ilb.instanceId, account.name, region, healthId) Map attributes = objectMapper.convertValue(ilb, ATTRIBUTES) Map> relationships = [(INSTANCES.ns): [instanceId]] - lbHealths.add(new DefaultCacheData(healthId, attributes, relationships)) - instances.add(new DefaultCacheData(instanceId, [:], [(HEALTH.ns): [healthId]])) + + if (instances[instanceId] != null) { + String application = instances[instanceId].attributes.get("application") + if (application != null) { + attributes.put("application", application) + } + } + + CacheData lbHealth = new DefaultCacheData(healthId, attributes, relationships); + CacheData previousLbHealth = lbHealths.put(healthId, lbHealth); + if (previousLbHealth != null) { + // We already had health information about this instance from one + // load balancer It would be nice to add this health information to + // what we already had, and + // com.netflix.spinnaker.clouddriver.aws.model.edda.InstanceLoadBalancers + // does have a List that we could in + // theory add to, but it's only got one HealthState and multiple + // load balancers could have different opinions about that. + // + // So for now at least, drop the instance state information from + // this previous load balancer on the floor. Log it, but at debug + // since this can happen frequently. + // + // This effectively retains instance health information from the + // last load balancer that supports it, which is consistent with the + // way the redis cache behaves when presented with multiple pieces + // of information. + log.debug("replaced instance health information for {}: was {}, is now {}", + instanceId, previousLbHealth.attributes, attributes) + continue + } + instanceRels.add(new DefaultCacheData(instanceId, [:], [(HEALTH.ns): [healthId]])) } } catch (LoadBalancerNotFoundException e) { // this is acceptable since we may be waiting for the caches to catch up @@ -130,8 +194,8 @@ class AmazonLoadBalancerInstanceStateCachingAgent implements CachingAgent, Healt } log.info("Caching ${lbHealths.size()} items in ${agentType}") new DefaultCacheResult( - (HEALTH.ns): lbHealths, - (INSTANCES.ns): instances) + (HEALTH.ns): lbHealths.values(), + (INSTANCES.ns): instanceRels) } private Cache getCacheView() { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSecurityGroupCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSecurityGroupCachingAgent.groovy index 051fc2ce99b..09c7a0a4cac 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSecurityGroupCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSecurityGroupCachingAgent.groovy @@ -36,6 +36,7 @@ import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport import com.netflix.spinnaker.clouddriver.aws.cache.Keys import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import groovy.util.logging.Slf4j import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE @@ -71,7 +72,7 @@ class AmazonSecurityGroupCachingAgent implements CachingAgent, OnDemandAgent, Ac this.objectMapper = objectMapper this.registry = registry this.eddaTimeoutConfig = eddaTimeoutConfig - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${AmazonCloudProvider.ID}:${OnDemandAgent.OnDemandType.SecurityGroup}") + this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${AmazonCloudProvider.ID}:${OnDemandType.SecurityGroup}") this.lastModifiedKey = Keys.getSecurityGroupKey('LAST_MODIFIED', 'LAST_MODIFIED', region, account.name, null) } @@ -125,8 +126,8 @@ class AmazonSecurityGroupCachingAgent implements CachingAgent, OnDemandAgent, Ac } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.SecurityGroup && cloudProvider == AmazonCloudProvider.ID + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.SecurityGroup && cloudProvider == AmazonCloudProvider.ID } @Override @@ -164,7 +165,7 @@ class AmazonSecurityGroupCachingAgent implements CachingAgent, OnDemandAgent, Ac } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { return [] } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSubnetCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSubnetCachingAgent.groovy index 9fd27594fb4..6f6809b7f3e 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSubnetCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSubnetCachingAgent.groovy @@ -16,33 +16,28 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent -import com.netflix.spinnaker.cats.agent.AccountAware -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.SUBNETS - import com.amazonaws.services.ec2.model.Subnet -import com.netflix.awsobjectmapper.AmazonObjectMapper -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.CachingAgent -import com.netflix.spinnaker.cats.agent.DefaultCacheResult +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.agent.* import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.cache.Keys import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import groovy.util.logging.Slf4j +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE +import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.SUBNETS + @Slf4j class AmazonSubnetCachingAgent implements CachingAgent, AccountAware { final AmazonClientProvider amazonClientProvider final NetflixAmazonCredentials account final String region - final AmazonObjectMapper objectMapper + final ObjectMapper amazonObjectMapper static final Set types = Collections.unmodifiableSet([ AUTHORITATIVE.forType(SUBNETS.ns) @@ -51,11 +46,11 @@ class AmazonSubnetCachingAgent implements CachingAgent, AccountAware { AmazonSubnetCachingAgent(AmazonClientProvider amazonClientProvider, NetflixAmazonCredentials account, String region, - AmazonObjectMapper objectMapper) { + ObjectMapper amazonObjectMapper) { this.amazonClientProvider = amazonClientProvider this.account = account this.region = region - this.objectMapper = objectMapper + this.amazonObjectMapper = amazonObjectMapper } @Override @@ -85,7 +80,8 @@ class AmazonSubnetCachingAgent implements CachingAgent, AccountAware { def subnets = ec2.describeSubnets().subnets List data = subnets.collect { Subnet subnet -> - Map attributes = objectMapper.convertValue(subnet, AwsInfrastructureProvider.ATTRIBUTES) + Map attributes = amazonObjectMapper.convertValue(subnet, AwsInfrastructureProvider.ATTRIBUTES) + attributes.putIfAbsent("accountId", account.accountId) new DefaultCacheData(Keys.getSubnetKey(subnet.subnetId, region, account.name), attributes, [:]) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCachingAgent.groovy index d9479a8e695..b57f13b7347 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCachingAgent.groovy @@ -52,6 +52,7 @@ import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport import com.netflix.spinnaker.clouddriver.aws.data.Keys +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import org.slf4j.Logger import org.slf4j.LoggerFactory @@ -69,12 +70,18 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, static final Set types = Collections.unmodifiableSet([ AUTHORITATIVE.forType(SERVER_GROUPS.ns), - AUTHORITATIVE.forType(APPLICATIONS.ns), + // clusters exist globally and the caching agent only + // caches regionally so we can't authoritatively evict + // clusters. There is a ClusterCleanupAgent that handles + // eviction of clusters that no longer contain + // server groups. INFORMATIVE.forType(CLUSTERS.ns), + INFORMATIVE.forType(APPLICATIONS.ns), INFORMATIVE.forType(LOAD_BALANCERS.ns), INFORMATIVE.forType(TARGET_GROUPS.ns), INFORMATIVE.forType(LAUNCH_CONFIGS.ns), - INFORMATIVE.forType(INSTANCES.ns) + INFORMATIVE.forType(INSTANCES.ns), + INFORMATIVE.forType(LAUNCH_TEMPLATES.ns) ] as Set) final AmazonCloudProvider amazonCloudProvider @@ -84,6 +91,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, final ObjectMapper objectMapper final Registry registry final EddaTimeoutConfig eddaTimeoutConfig + final AmazonCachingAgentFilter amazonCachingAgentFilter final OnDemandMetricsSupport metricsSupport @@ -93,7 +101,8 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, String region, ObjectMapper objectMapper, Registry registry, - EddaTimeoutConfig eddaTimeoutConfig) { + EddaTimeoutConfig eddaTimeoutConfig, + AmazonCachingAgentFilter amazonCachingAgentFilter) { this.amazonCloudProvider = amazonCloudProvider this.amazonClientProvider = amazonClientProvider this.account = account @@ -101,7 +110,8 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, this.objectMapper = objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) this.registry = registry this.eddaTimeoutConfig = eddaTimeoutConfig - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${amazonCloudProvider.id}:${OnDemandAgent.OnDemandType.ServerGroup}") + this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${amazonCloudProvider.id}:${OnDemandType.ServerGroup}") + this.amazonCachingAgentFilter = amazonCachingAgentFilter } @Override @@ -131,9 +141,9 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, @Override Optional> getCacheKeyPatterns() { - return [ + return Optional.of([ (SERVER_GROUPS.ns): Keys.getServerGroupKey('*', '*', account.name, region) - ] + ]) } static class AmazonClients { @@ -169,8 +179,8 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.ServerGroup && cloudProvider == amazonCloudProvider.id + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.ServerGroup && cloudProvider == amazonCloudProvider.id } @Override @@ -247,7 +257,10 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, log.info("onDemand cache refresh (data: ${data}, evictions: ${evictions}, cacheResult: ${cacheResultAsJson})") return new OnDemandAgent.OnDemandResult( - sourceAgentType: getOnDemandAgentType(), cacheResult: cacheResult, evictions: evictions + sourceAgentType: getOnDemandAgentType(), + cacheResult: cacheResult, + evictions: evictions, + authoritativeTypes: types.findAll { it.authority == AgentDataType.Authority.AUTHORITATIVE }.collect { it.typeName } ) } @@ -301,6 +314,17 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, // A non-null status indicates that the ASG is in the process of being destroyed (no sense indexing) asgs = asgs.findAll { it.status == null } + // filter asg if there is any filter configuration established + if (amazonCachingAgentFilter.hasTagFilter()) { + asgs = asgs.findAll { asg -> + def asgTags = asg.tags?.collect { + new AmazonCachingAgentFilter.ResourceTag(it.key, it.value) + } + + return amazonCachingAgentFilter.shouldRetainResource(asgTags) + } + } + new AutoScalingGroupsResults(start: start, asgs: asgs) } @@ -443,6 +467,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, log.debug("Caching ${cacheResults[TARGET_GROUPS.ns]?.size()} target groups in ${agentType}") log.debug("Caching ${cacheResults[LAUNCH_CONFIGS.ns]?.size()} launch configs in ${agentType}") log.debug("Caching ${cacheResults[INSTANCES.ns]?.size()} instances in ${agentType}") + log.debug("Caching ${cacheResults[LAUNCH_TEMPLATES.ns]?.size()} launch templates in ${agentType}") if (evictableOnDemandCacheDatas) { log.info("Evicting onDemand cache keys (${evictableOnDemandCacheDatas.collect { "${it.id}/${start - it.attributes.cacheTime}ms" }.join(", ")})") } @@ -456,7 +481,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { def keys = providerCache.filterIdentifiers(ON_DEMAND.ns, Keys.getServerGroupKey("*", "*", account.name, region)) return fetchPendingOnDemandRequests(providerCache, keys) } @@ -496,6 +521,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, Map targetGroups = cache() Map launchConfigs = cache() Map instances = cache() + Map launchTemplates = cache() for (AutoScalingGroup asg : asgs) { def onDemandCacheData = onDemandCacheDataByAsg ? onDemandCacheDataByAsg[Keys.getServerGroupKey(asg.autoScalingGroupName, account.name, region)] : null @@ -511,6 +537,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, cache(cacheResults["targetGroups"], targetGroups) cache(cacheResults["launchConfigs"], launchConfigs) cache(cacheResults["instances"], instances) + cache(cacheResults["launchTemplates"], launchTemplates) } else { try { AsgData data = new AsgData(asg, scalingPolicies[asg.autoScalingGroupName], scheduledActions[asg.autoScalingGroupName], account.name, region, subnetMap) @@ -521,6 +548,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, cacheInstances(data, instances) cacheLoadBalancers(data, loadBalancers) cacheTargetGroups(data, targetGroups) + cacheLaunchTemplate(data, launchTemplates) } catch (Exception ex) { log.warn("Failed to cache ${asg.autoScalingGroupName} in ${account.name}/${region}", ex) } @@ -535,6 +563,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, (TARGET_GROUPS.ns): targetGroups.values(), (LAUNCH_CONFIGS.ns): launchConfigs.values(), (INSTANCES.ns) : instances.values(), + (LAUNCH_TEMPLATES.ns): launchTemplates.values(), (ON_DEMAND.ns) : onDemandCacheDataByAsg.values() ], [ (ON_DEMAND.ns) : evictableOnDemandCacheDataIdentifiers @@ -548,6 +577,10 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, cacheDataById[it.id] = it } else { existingCacheData.attributes.putAll(it.attributes) + def application = getApplicationForKey(it) + if (application) { + existingCacheData.attributes.put("application", application) + } it.relationships.each { String relationshipName, Collection relationships -> existingCacheData.relationships[relationshipName].addAll(relationships) } @@ -555,6 +588,20 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, } } + private String getApplicationForKey(CacheData item) { + String application + try { + application = Keys.parse(item.id).get("application") + if (application == null && item.relationships.containsKey("serverGroups")) { + application = Keys.parse(item.relationships.serverGroups[0]).get("application") + } + return application + } catch(Exception e) { + log.error("Failed determining application for {}", item.id, e) + return null + } + } + private void cacheApplication(AsgData data, Map applications) { applications[data.appName].with { attributes.name = data.name.app @@ -568,6 +615,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, private void cacheCluster(AsgData data, Map clusters) { clusters[data.cluster].with { attributes.name = data.name.cluster + attributes.application = data.name.app relationships[APPLICATIONS.ns].add(data.appName) relationships[SERVER_GROUPS.ns].add(data.serverGroup) relationships[LOAD_BALANCERS.ns].addAll(data.loadBalancerNames) @@ -577,6 +625,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, private void cacheServerGroup(AsgData data, Map serverGroups) { serverGroups[data.serverGroup].with { + attributes.application = data.name.app attributes.asg = objectMapper.convertValue(data.asg, ATTRIBUTES) attributes.region = region attributes.name = data.asg.autoScalingGroupName @@ -593,13 +642,16 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, relationships[LOAD_BALANCERS.ns].addAll(data.loadBalancerNames) relationships[TARGET_GROUPS.ns].addAll(data.targetGroupKeys) relationships[LAUNCH_CONFIGS.ns].add(data.launchConfig) + relationships[LAUNCH_TEMPLATES.ns].add(data.launchTemplate) relationships[INSTANCES.ns].addAll(data.instanceIds) } } private void cacheLaunchConfig(AsgData data, Map launchConfigs) { - launchConfigs[data.launchConfig].with { - relationships[SERVER_GROUPS.ns].add(data.serverGroup) + if (data.launchConfig) { + launchConfigs[data.launchConfig].with { + relationships[SERVER_GROUPS.ns].add(data.serverGroup) + } } } @@ -629,6 +681,14 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, } } + private void cacheLaunchTemplate(AsgData data, Map launchTemplates) { + if (data.launchTemplate) { + launchTemplates[data.launchTemplate].with { + relationships[SERVER_GROUPS.ns].add(data.serverGroup) + } + } + } + private AutoScalingGroup loadAutoScalingGroup(String autoScalingGroupName, boolean skipEdda) { def autoScaling = amazonClientProvider.getAutoScaling(account, region, skipEdda) def result = autoScaling.describeAutoScalingGroups( @@ -683,6 +743,7 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, final String serverGroup final String vpcId final String launchConfig + final String launchTemplate final Set loadBalancerNames final Set targetGroupKeys final Set targetGroupNames @@ -714,7 +775,14 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware, vpcId = vpcIds.first() } this.vpcId = vpcId - launchConfig = Keys.getLaunchConfigKey(asg.launchConfigurationName, account, region) + if (asg.launchTemplate) { + launchTemplate = Keys.getLaunchTemplateKey(asg.launchTemplate.launchTemplateName, account, region) + } else if (asg.mixedInstancesPolicy) { + launchTemplate = Keys.getLaunchTemplateKey(asg.mixedInstancesPolicy.launchTemplate.launchTemplateSpecification.launchTemplateName, account, region) + } else { + launchConfig = Keys.getLaunchConfigKey(asg.launchConfigurationName, account, region) + } + loadBalancerNames = (asg.loadBalancerNames.collect { Keys.getLoadBalancerKey(it, account, region, vpcId, null) } as Set).asImmutable() diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/EddaLoadBalancerCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/EddaLoadBalancerCachingAgent.groovy index 2d900326a39..f9529fc5252 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/EddaLoadBalancerCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/EddaLoadBalancerCachingAgent.groovy @@ -25,6 +25,7 @@ import com.netflix.spinnaker.cats.agent.CachingAgent import com.netflix.spinnaker.cats.agent.DefaultCacheResult import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.aws.edda.EddaApi @@ -81,20 +82,35 @@ class EddaLoadBalancerCachingAgent implements CachingAgent, HealthProvidingCachi List ilbs = InstanceLoadBalancers.fromLoadBalancerInstanceState(balancerInstances) Collection lbHealths = new ArrayList(ilbs.size()) - Collection instances = new ArrayList(ilbs.size()) + Collection instanceRels = new ArrayList(ilbs.size()) + + Collection instanceIds = ilbs.collect { + Keys.getInstanceKey(it.instanceId, account.name, region) + } + Map instances = providerCache + .getAll(INSTANCES.ns, instanceIds, RelationshipCacheFilter.none()) + .collectEntries { [(it.id): it] } for (InstanceLoadBalancers ilb : ilbs) { String instanceId = Keys.getInstanceKey(ilb.instanceId, account.name, region) String healthId = Keys.getInstanceHealthKey(ilb.instanceId, account.name, region, healthId) Map attributes = objectMapper.convertValue(ilb, ATTRIBUTES) Map> relationships = [(INSTANCES.ns): [instanceId]] + + if (instances[instanceId] != null) { + String application = instances[instanceId].attributes.get("application") + if (application != null) { + attributes.put("application", application) + } + } + lbHealths.add(new DefaultCacheData(healthId, attributes, relationships)) - instances.add(new DefaultCacheData(instanceId, [:], [(HEALTH.ns): [healthId]])) + instanceRels.add(new DefaultCacheData(instanceId, [:], [(HEALTH.ns): [healthId]])) } - log.info("Caching ${instances.size()} items in ${agentType}") + log.info("Caching ${instanceRels.size()} items in ${agentType}") new DefaultCacheResult( (HEALTH.ns): lbHealths, - (INSTANCES.ns): instances) + (INSTANCES.ns): instanceRels) } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ImageCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ImageCachingAgent.groovy index ea667afcf29..4daa47140bc 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ImageCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ImageCachingAgent.groovy @@ -37,11 +37,12 @@ import com.netflix.spinnaker.clouddriver.aws.data.Keys import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import lombok.Getter +import lombok.Setter import org.slf4j.Logger import org.slf4j.LoggerFactory import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.NAMED_IMAGES @@ -53,7 +54,7 @@ class ImageCachingAgent implements CachingAgent, AccountAware, DriftMetric, Cust final Set types = Collections.unmodifiableSet([ AUTHORITATIVE.forType(IMAGES.ns), - INFORMATIVE.forType(NAMED_IMAGES.ns) + AUTHORITATIVE.forType(NAMED_IMAGES.ns) ] as Set) final AmazonClientProvider amazonClientProvider @@ -61,7 +62,9 @@ class ImageCachingAgent implements CachingAgent, AccountAware, DriftMetric, Cust final String region final ObjectMapper objectMapper final Registry registry - final boolean includePublicImages + @Getter + @Setter + boolean includePublicImages final long pollIntervalMillis final DynamicConfigService dynamicConfigService @@ -116,7 +119,7 @@ class ImageCachingAgent implements CachingAgent, AccountAware, DriftMetric, Cust @Override CacheResult loadData(ProviderCache providerCache) { - if (includePublicImages && !dynamicConfigService.isEnabled("aws.defaults.publicImages", true)) { + if (includePublicImages && !dynamicConfigService.isEnabled("aws.defaults.public-images", true)) { log.info("short-circuiting with empty result set for public images in ${agentType}") return new DefaultCacheResult((IMAGES.ns): [], (NAMED_IMAGES.ns): []) } @@ -130,6 +133,12 @@ class ImageCachingAgent implements CachingAgent, AccountAware, DriftMetric, Cust request.withFilters(new Filter('is-public', ['false'])) } + List imageStates = dynamicConfigService.getConfig(List, "aws.defaults.image-states", List.of()); + if (!imageStates.isEmpty()) { + log.debug("using image state filter '${imageStates}'") + request.withFilters(new Filter('state', imageStates)) + } + List images = amazonEC2.describeImages(request).images Long start = null if (account.eddaEnabled) { @@ -143,23 +152,35 @@ class ImageCachingAgent implements CachingAgent, AccountAware, DriftMetric, Cust } Collection imageCacheData = new ArrayList<>(images.size()) - Collection namedImageCacheData = new ArrayList<>(images.size()) + Map namedImageCacheDataMap = new HashMap<>(images.size()) for (Image image : images) { Map attributes = objectMapper.convertValue(image, ATTRIBUTES) def imageId = Keys.getImageKey(image.imageId, account.name, region) def namedImageId = Keys.getNamedImageKey(account.name, image.name) imageCacheData.add(new DefaultCacheData(imageId, attributes, [(NAMED_IMAGES.ns): [namedImageId]])) - namedImageCacheData.add(new DefaultCacheData(namedImageId, [ - name : image.name, - virtualizationType: image.virtualizationType, - creationDate : image.creationDate - ], [(IMAGES.ns): [imageId]])) + + CacheData namedImageCacheData = namedImageCacheDataMap.get(namedImageId); + if (namedImageCacheData == null) { + namedImageCacheDataMap.put(namedImageId, new DefaultCacheData(namedImageId, [ + name : image.name, + virtualizationType: image.virtualizationType, + architecture : image.architecture, + creationDate : image.creationDate + ], [(IMAGES.ns): [imageId]])) + } else { + // There's already a named image with this name, so add the imageId to + // the IMAGES.ns relationship. Note though that there is only one + // virtualizationType and one creationDate per named image....so maybe + // those attributes don't really belong with named images? + Map> relationships = namedImageCacheData.getRelationships(); + Collection imageRelationships = relationships.get(IMAGES.ns); + imageRelationships.add(imageId); + } } recordDrift(start) log.info("Caching ${imageCacheData.size()} items in ${agentType}") - new DefaultCacheResult((IMAGES.ns): imageCacheData, (NAMED_IMAGES.ns): namedImageCacheData) + new DefaultCacheResult((IMAGES.ns): imageCacheData, (NAMED_IMAGES.ns): namedImageCacheDataMap.values()) } - } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/InstanceCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/InstanceCachingAgent.groovy index b4374d70786..0c40532c19a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/InstanceCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/InstanceCachingAgent.groovy @@ -196,9 +196,18 @@ class InstanceCachingAgent implements CachingAgent, AccountAware, DriftMetric { relationships[IMAGES.ns].add(data.imageId) if (data.serverGroup) { relationships[SERVER_GROUPS.ns].add(data.serverGroup) + + def application = Keys.parse(data.serverGroup).get("application") + if (application != null) { + attributes.put("application", application) + } } else { relationships[SERVER_GROUPS.ns].clear() } + def capacityType = getCapacityType(data.instance) + if (capacityType) { + attributes.put("capacityType", capacityType) + } } } @@ -217,6 +226,18 @@ class InstanceCachingAgent implements CachingAgent, AccountAware, DriftMetric { awsInstanceHealth } + private String getCapacityType(Instance instance) { + if (instance.instanceLifecycle == null) { + return "on-demand" + } + + if (instance.instanceLifecycle.toString().equalsIgnoreCase("spot")) { + return "spot" + } + + return null + } + private static class InstanceData { static final String ASG_TAG_NAME = "aws:autoscaling:groupName" static final String SHUTTING_DOWN = InstanceStateName.ShuttingDown.toString() diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/LaunchConfigCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/LaunchConfigCachingAgent.groovy index ca2992f2bd7..fdb2be8a752 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/LaunchConfigCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/LaunchConfigCachingAgent.groovy @@ -105,9 +105,16 @@ class LaunchConfigCachingAgent implements CachingAgent, AccountAware, DriftMetri } Collection launchConfigData = launchConfigs.collect { LaunchConfiguration lc -> + String key = Keys.getLaunchConfigKey(lc.launchConfigurationName, account.name, region) + String application = Keys.parse(key).get("application") Map attributes = objectMapper.convertValue(lc, ATTRIBUTES); + + if (application != null) { + attributes.put("application", application) + } + Map> relationships = [(IMAGES.ns):[Keys.getImageKey(lc.imageId, account.name, region)]] - new DefaultCacheData(Keys.getLaunchConfigKey(lc.launchConfigurationName, account.name, region), attributes, relationships) + new DefaultCacheData(key, attributes, relationships) } recordDrift(start) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ReservationReportCachingAgent.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ReservationReportCachingAgent.groovy index f9a31155aba..f2e06d7b4e2 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ReservationReportCachingAgent.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ReservationReportCachingAgent.groovy @@ -16,7 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent -import com.amazonaws.services.ec2.model.DescribeAccountAttributesRequest + import com.amazonaws.services.ec2.model.DescribeInstancesRequest import com.fasterxml.jackson.annotation.JsonCreator import com.fasterxml.jackson.annotation.JsonProperty @@ -50,6 +50,7 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent +import com.netflix.spinnaker.credentials.CredentialsRepository import groovy.util.logging.Slf4j import org.springframework.context.ApplicationContext @@ -79,10 +80,9 @@ class ReservationReportCachingAgent implements CachingAgent, CustomScheduledAgen final AmazonClientProvider amazonClientProvider final AmazonS3DataProvider amazonS3DataProvider - final Collection accounts + final CredentialsRepository credentialsRepository; final ObjectMapper objectMapper final AccountReservationDetailSerializer accountReservationDetailSerializer - final Set vpcOnlyAccounts final MetricsSupport metricsSupport final Registry registry @@ -90,13 +90,13 @@ class ReservationReportCachingAgent implements CachingAgent, CustomScheduledAgen ReservationReportCachingAgent(Registry registry, AmazonClientProvider amazonClientProvider, AmazonS3DataProvider amazonS3DataProvider, - Collection accounts, + CredentialsRepository credentialsRepository, ObjectMapper objectMapper, ExecutorService reservationReportPool, ApplicationContext ctx) { this.amazonClientProvider = amazonClientProvider this.amazonS3DataProvider = amazonS3DataProvider - this.accounts = accounts + this.credentialsRepository = credentialsRepository def module = new SimpleModule() accountReservationDetailSerializer = new AccountReservationDetailSerializer() @@ -105,28 +105,10 @@ class ReservationReportCachingAgent implements CachingAgent, CustomScheduledAgen this.objectMapper = objectMapper.copy().enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS).registerModule(module) this.reservationReportPool = reservationReportPool this.ctx = ctx - this.vpcOnlyAccounts = determineVpcOnlyAccounts() this.metricsSupport = new MetricsSupport(objectMapper, registry, { getCacheView() }) this.registry = registry } - private Set determineVpcOnlyAccounts() { - def vpcOnlyAccounts = [] - - accounts.each { credentials -> - def amazonEC2 = amazonClientProvider.getAmazonEC2(credentials, credentials.regions[0].name) - def describeAccountAttributesResult = amazonEC2.describeAccountAttributes( - new DescribeAccountAttributesRequest().withAttributeNames("supported-platforms") - ) - if (describeAccountAttributesResult.accountAttributes[0].attributeValues*.attributeValue == ["VPC"]) { - vpcOnlyAccounts << credentials.name - } - } - - log.info("VPC Only Accounts: ${vpcOnlyAccounts.join(", ")}") - return vpcOnlyAccounts - } - @Override long getPollIntervalMillis() { return DEFAULT_POLL_INTERVAL_MILLIS @@ -173,14 +155,13 @@ class ReservationReportCachingAgent implements CachingAgent, CustomScheduledAgen } public Collection getAccounts() { - return accounts; + return credentialsRepository.getAll(); } @Override CacheResult loadData(ProviderCache providerCache) { long startTime = System.currentTimeMillis() log.info("Describing items in ${agentType}") - ConcurrentHashMap reservations = new ConcurrentHashMap<>() ConcurrentHashMap> errorsByRegion = new ConcurrentHashMap<>() @@ -211,15 +192,6 @@ class ReservationReportCachingAgent implements CachingAgent, CustomScheduledAgen !errorsByRegion.containsKey(it.region()) } - // v1 is a legacy report that does not differentiate between vpc and non-vpc reserved instances - accountReservationDetailSerializer.mergeVpcReservations = true - def v1 = objectMapper.readValue( - objectMapper - .writerWithView(AmazonReservationReport.Views.V1.class) - .writeValueAsString(amazonReservationReport), - Map - ) - // v2 differentiates reservations between vpc and non-vpc accountReservationDetailSerializer.mergeVpcReservations = false def v2 = objectMapper.readValue( @@ -234,7 +206,7 @@ class ReservationReportCachingAgent implements CachingAgent, CustomScheduledAgen objectMapper .writerWithView(AmazonReservationReport.Views.V3.class) .writeValueAsString( - new AmazonReservationReportBuilder.V3().build(objectMapper.convertValue(v2, AmazonReservationReport)) + new AmazonReservationReportBuilder.V3().build(registry, objectMapper.convertValue(v2, AmazonReservationReport)) ), Map ) @@ -262,7 +234,6 @@ class ReservationReportCachingAgent implements CachingAgent, CustomScheduledAgen return new DefaultCacheResult( (RESERVATION_REPORTS.ns): [ - new MutableCacheData("v1", ["report": v1], [:]), new MutableCacheData("v2", ["report": v2], [:]), // temporarily backport the changes from v4 to v3 (leaving v2_5 to be what 'v3' used to be) @@ -319,12 +290,7 @@ class ReservationReportCachingAgent implements CachingAgent, CustomScheduledAgen def osType = operatingSystemType(it.productDescription) def reservation = getReservation(region.name, it.availabilityZone, osType.name, it.instanceType) reservation.totalReserved.addAndGet(it.instanceCount) - - if (osType.isVpc || vpcOnlyAccounts.contains(credentials.name)) { - reservation.getAccount(credentials.name).reservedVpc.addAndGet(it.instanceCount) - } else { - reservation.getAccount(credentials.name).reserved.addAndGet(it.instanceCount) - } + reservation.getAccount(credentials.name).reservedVpc.addAndGet(it.instanceCount) } startTime = System.currentTimeMillis() diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/config/AwsInfrastructureProviderConfig.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/config/AwsInfrastructureProviderConfig.groovy index fc3fdf2f552..318087b9883 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/config/AwsInfrastructureProviderConfig.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/config/AwsInfrastructureProviderConfig.groovy @@ -16,105 +16,16 @@ package com.netflix.spinnaker.clouddriver.aws.provider.config -import com.netflix.awsobjectmapper.AmazonObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper + import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonElasticIpCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonInstanceTypeCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonKeyPairCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonSecurityGroupCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonSubnetCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonVpcCachingAgent -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope - -import java.util.concurrent.ConcurrentHashMap @Configuration class AwsInfrastructureProviderConfig { @Bean - @DependsOn('netflixAmazonCredentials') - AwsInfrastructureProvider awsInfrastructureProvider(AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository, - AmazonObjectMapper amazonObjectMapper, - Registry registry, - EddaTimeoutConfig eddaTimeoutConfig) { - def awsInfrastructureProvider = - new AwsInfrastructureProvider(Collections.newSetFromMap(new ConcurrentHashMap())) - - synchronizeAwsInfrastructureProvider(awsInfrastructureProvider, - amazonClientProvider, - accountCredentialsRepository, - amazonObjectMapper, - registry, - eddaTimeoutConfig) - - awsInfrastructureProvider - } - - @Bean - AwsInfrastructureProviderSynchronizerTypeWrapper awsInfrastructureProviderSynchronizerTypeWrapper() { - new AwsInfrastructureProviderSynchronizerTypeWrapper() - } - - class AwsInfrastructureProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return AwsInfrastructureProviderSynchronizer - } - } - - class AwsInfrastructureProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - AwsInfrastructureProviderSynchronizer synchronizeAwsInfrastructureProvider(AwsInfrastructureProvider awsInfrastructureProvider, - AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository, - AmazonObjectMapper amazonObjectMapper, - Registry registry, - EddaTimeoutConfig eddaTimeoutConfig) { - def scheduledAccounts = ProviderUtils.getScheduledAccounts(awsInfrastructureProvider) - def allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, NetflixAmazonCredentials) - - Set regions = new HashSet<>(); - allAccounts.each { NetflixAmazonCredentials credentials -> - for (AmazonCredentials.AWSRegion region : credentials.regions) { - if (!scheduledAccounts.contains(credentials.name)) { - def newlyAddedAgents = [] - - if (regions.add(region.name)) { - newlyAddedAgents << new AmazonInstanceTypeCachingAgent(region.name, accountCredentialsRepository) - } - - newlyAddedAgents << new AmazonElasticIpCachingAgent(amazonClientProvider, credentials, region.name) - newlyAddedAgents << new AmazonKeyPairCachingAgent(amazonClientProvider, credentials, region.name) - newlyAddedAgents << new AmazonSecurityGroupCachingAgent(amazonClientProvider, credentials, region.name, amazonObjectMapper, registry, eddaTimeoutConfig) - newlyAddedAgents << new AmazonSubnetCachingAgent(amazonClientProvider, credentials, region.name, amazonObjectMapper) - newlyAddedAgents << new AmazonVpcCachingAgent(amazonClientProvider, credentials, region.name, amazonObjectMapper) - - // If there is an agent scheduler, then this provider has been through the AgentController in the past. - // In that case, we need to do the scheduling here (because accounts have been added to a running system). - if (awsInfrastructureProvider.agentScheduler) { - ProviderUtils.rescheduleAgents(awsInfrastructureProvider, newlyAddedAgents) - } - - awsInfrastructureProvider.agents.addAll(newlyAddedAgents) - } - } - } - - new AwsInfrastructureProviderSynchronizer() + AwsInfrastructureProvider awsInfrastructureProvider() { + return new AwsInfrastructureProvider() } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/config/AwsProviderConfig.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/config/AwsProviderConfig.groovy index 192768a6d03..d9834050993 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/config/AwsProviderConfig.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/config/AwsProviderConfig.groovy @@ -16,43 +16,16 @@ package com.netflix.spinnaker.clouddriver.aws.provider.config -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentProvider -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonApplicationLoadBalancerCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonCertificateCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonLoadBalancerCachingAgent - -import com.netflix.spinnaker.clouddriver.aws.provider.agent.ReservedInstancesCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3DataProvider -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import com.netflix.spinnaker.clouddriver.aws.edda.EddaApiFactory +import com.google.common.util.concurrent.ThreadFactoryBuilder import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider -import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonLoadBalancerInstanceStateCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.ClusterCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.EddaLoadBalancerCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.ImageCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.InstanceCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.LaunchConfigCachingAgent -import com.netflix.spinnaker.clouddriver.aws.provider.agent.ReservationReportCachingAgent -import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService -import org.springframework.beans.factory.config.ConfigurableBeanFactory +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.model.ReservationReport +import com.netflix.spinnaker.credentials.CredentialsRepository +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.ApplicationContext import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope -import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ExecutorService import java.util.concurrent.Executors @@ -60,156 +33,17 @@ import java.util.concurrent.Executors @EnableConfigurationProperties(ReservationReportConfigurationProperties) class AwsProviderConfig { @Bean - @DependsOn('netflixAmazonCredentials') - AwsProvider awsProvider(AmazonCloudProvider amazonCloudProvider, - AmazonClientProvider amazonClientProvider, - AmazonS3DataProvider amazonS3DataProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - EddaApiFactory eddaApiFactory, - ApplicationContext ctx, - Registry registry, - ExecutorService reservationReportPool, - Optional> agentProviders, - EddaTimeoutConfig eddaTimeoutConfig, - DynamicConfigService dynamicConfigService) { - def awsProvider = - new AwsProvider(accountCredentialsRepository, Collections.newSetFromMap(new ConcurrentHashMap())) - - synchronizeAwsProvider(awsProvider, - amazonCloudProvider, - amazonClientProvider, - amazonS3DataProvider, - accountCredentialsRepository, - objectMapper, - eddaApiFactory, - ctx, - registry, - reservationReportPool, - agentProviders.orElse(Collections.emptyList()), - eddaTimeoutConfig, - dynamicConfigService) - - awsProvider + AwsProvider awsProvider(CredentialsRepository accountCredentialsRepository) { + return new AwsProvider(accountCredentialsRepository) } @Bean + @ConditionalOnProperty("reports.reservation.enabled") ExecutorService reservationReportPool(ReservationReportConfigurationProperties reservationReportConfigurationProperties) { - return Executors.newFixedThreadPool(reservationReportConfigurationProperties.threadPoolSize) - } - - @Bean - AwsProviderSynchronizerTypeWrapper awsProviderSynchronizerTypeWrapper() { - new AwsProviderSynchronizerTypeWrapper() - } - - class AwsProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return AwsProviderSynchronizer - } - } - - class AwsProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - AwsProviderSynchronizer synchronizeAwsProvider(AwsProvider awsProvider, - AmazonCloudProvider amazonCloudProvider, - AmazonClientProvider amazonClientProvider, - AmazonS3DataProvider amazonS3DataProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - EddaApiFactory eddaApiFactory, - ApplicationContext ctx, - Registry registry, - ExecutorService reservationReportPool, - Collection agentProviders, - EddaTimeoutConfig eddaTimeoutConfig, - DynamicConfigService dynamicConfigService) { - def scheduledAccounts = ProviderUtils.getScheduledAccounts(awsProvider) - Set allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, NetflixAmazonCredentials) - - List newlyAddedAgents = [] - - //only index public images once per region - Set publicRegions = [] - - //sort the accounts in case of a reconfigure, we are more likely to re-index the public images in the same caching agent - //TODO(cfieber)-rework this is after rework of AWS Image/NamedImage keys - allAccounts.sort { it.name }.each { NetflixAmazonCredentials credentials -> - for (AmazonCredentials.AWSRegion region : credentials.regions) { - if (!scheduledAccounts.contains(credentials.name)) { - newlyAddedAgents << new ClusterCachingAgent(amazonCloudProvider, amazonClientProvider, credentials, region.name, objectMapper, registry, eddaTimeoutConfig) - newlyAddedAgents << new LaunchConfigCachingAgent(amazonClientProvider, credentials, region.name, objectMapper, registry) - newlyAddedAgents << new ImageCachingAgent(amazonClientProvider, credentials, region.name, objectMapper, registry, false, dynamicConfigService) - if (!publicRegions.contains(region.name)) { - newlyAddedAgents << new ImageCachingAgent(amazonClientProvider, credentials, region.name, objectMapper, registry, true, dynamicConfigService) - publicRegions.add(region.name) - } - newlyAddedAgents << new InstanceCachingAgent(amazonClientProvider, credentials, region.name, objectMapper, registry) - newlyAddedAgents << new AmazonLoadBalancerCachingAgent(amazonCloudProvider, amazonClientProvider, credentials, region.name, objectMapper, registry) - newlyAddedAgents << new AmazonApplicationLoadBalancerCachingAgent(amazonCloudProvider, amazonClientProvider, credentials, region.name, eddaApiFactory.createApi(credentials.edda, region.name), objectMapper, registry, eddaTimeoutConfig) - newlyAddedAgents << new ReservedInstancesCachingAgent(amazonClientProvider, credentials, region.name, objectMapper, registry) - newlyAddedAgents << new AmazonCertificateCachingAgent(amazonClientProvider, credentials, region.name, objectMapper, registry) - if (credentials.eddaEnabled && !eddaTimeoutConfig.disabledRegions.contains(region.name)) { - newlyAddedAgents << new EddaLoadBalancerCachingAgent(eddaApiFactory.createApi(credentials.edda, region.name), credentials, region.name, objectMapper) - } else { - newlyAddedAgents << new AmazonLoadBalancerInstanceStateCachingAgent( - amazonClientProvider, credentials, region.name, objectMapper, ctx - ) - } - } - } - } - - // If there is an agent scheduler, then this provider has been through the AgentController in the past. - if (awsProvider.agentScheduler) { - synchronizeReservationReportCachingAgentAccounts(awsProvider, allAccounts) - } else { - // This caching agent runs across all accounts in one iteration (to maintain consistency). - newlyAddedAgents << new ReservationReportCachingAgent( - registry, amazonClientProvider, amazonS3DataProvider, allAccounts, objectMapper, reservationReportPool, ctx - ) - } - - agentProviders.findAll { it.supports(AwsProvider.PROVIDER_NAME) }.each { - newlyAddedAgents.addAll(it.agents()) - } - - awsProvider.agents.addAll(newlyAddedAgents) - awsProvider.synchronizeHealthAgents() - - new AwsProviderSynchronizer() - } - - private void synchronizeReservationReportCachingAgentAccounts(AwsProvider awsProvider, Collection allAccounts) { - ReservationReportCachingAgent reservationReportCachingAgent = awsProvider.agents.find { agent -> - agent instanceof ReservationReportCachingAgent - } - - if (reservationReportCachingAgent) { - def reservationReportAccounts = reservationReportCachingAgent.accounts - def oldAccountNames = reservationReportAccounts.collect { it.name } - def newAccountNames = allAccounts.collect { it.name } - def accountNamesToDelete = oldAccountNames - newAccountNames - def accountNamesToAdd = newAccountNames - oldAccountNames - - accountNamesToDelete.each { accountNameToDelete -> - def accountToDelete = reservationReportAccounts.find { it.name == accountNameToDelete } - - if (accountToDelete) { - reservationReportAccounts.remove(accountToDelete) - } - } - - accountNamesToAdd.each { accountNameToAdd -> - def accountToAdd = allAccounts.find { it.name == accountNameToAdd } - - if (accountToAdd) { - reservationReportAccounts.add(accountToAdd) - } - } - } + return Executors.newFixedThreadPool( + reservationReportConfigurationProperties.threadPoolSize, + new ThreadFactoryBuilder() + .setNameFormat(ReservationReport.class.getSimpleName() + "-%d") + .build()); } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonApplicationProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonApplicationProvider.groovy index 7ca01afe4e1..f05c8b7dddd 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonApplicationProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonApplicationProvider.groovy @@ -16,12 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.provider.view -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.model.Application import com.netflix.spinnaker.clouddriver.model.ApplicationProvider @@ -35,48 +30,53 @@ import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.* class AmazonApplicationProvider implements ApplicationProvider { private final AmazonCloudProvider amazonCloudProvider private final Cache cacheView - private final ObjectMapper objectMapper @Autowired - AmazonApplicationProvider(AmazonCloudProvider amazonCloudProvider, Cache cacheView, ObjectMapper objectMapper) { + AmazonApplicationProvider(AmazonCloudProvider amazonCloudProvider, Cache cacheView) { this.amazonCloudProvider = amazonCloudProvider this.cacheView = cacheView - this.objectMapper = objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) } @Override Set getApplications(boolean expand) { - def relationships = expand ? RelationshipCacheFilter.include(CLUSTERS.ns) : RelationshipCacheFilter.none() - Collection applications = cacheView.getAll( - APPLICATIONS.ns, cacheView.filterIdentifiers(APPLICATIONS.ns, "${amazonCloudProvider.id}:*"), relationships - ) - applications.collect this.&translate + String allAwsGlob = "${amazonCloudProvider.id}:*" + + // ignoring expand since we are deriving existence of the app by presence of server groups + // rather than the application cacheData which is not reliably updated or evicted + Map>> appClusters = getAppClustersByAccount(allAwsGlob) + return appClusters.findResults {translate(it.key, appClusters) } } @Override Application getApplication(String name) { - translate(cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(name))) + name = name.toLowerCase() + String glob = Keys.getServerGroupKey("${name}*", "*", "*", "*") + return translate(name, getAppClustersByAccount(glob)) } - Application translate(CacheData cacheData) { - if (cacheData == null) { - return null + private Map>> getAppClustersByAccount(String glob) { + // app -> account -> [clusterName..] + Map>> appClustersByAccount = [:].withDefault { [:].withDefault { [] as Set } } + Collection serverGroupKeys = cacheView.filterIdentifiers(SERVER_GROUPS.ns, glob) + for (String key : serverGroupKeys) { + Map sg = Keys.parse(key) + if (sg && sg.application && sg.cluster && sg.account) { + appClustersByAccount.get(sg.application).get(sg.account).add(sg.cluster) + } } + return appClustersByAccount + } - String name = Keys.parse(cacheData.id).application - Map attributes = objectMapper.convertValue(cacheData.attributes, CatsApplication.ATTRIBUTES) - Map> clusterNames = [:].withDefault { new HashSet() } - for (String clusterId : cacheData.relationships[CLUSTERS.ns]) { - Map cluster = Keys.parse(clusterId) - if (cluster.account && cluster.cluster) { - clusterNames[cluster.account].add(cluster.cluster) - } + Application translate(String name, Map>> appClustersByAccount) { + Map> clusterNames = appClustersByAccount.get(name) + if (!clusterNames) { + return null } - new CatsApplication(name, attributes, clusterNames) + Map attributes = Map.of("name", name) + return new CatsApplication(name, attributes, clusterNames) } private static class CatsApplication implements Application { - public static final TypeReference> ATTRIBUTES = new TypeReference>() {} final String name final Map attributes final Map> clusterNames diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudMetricProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudMetricProvider.groovy index 0f82abbe445..a7962b01487 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudMetricProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudMetricProvider.groovy @@ -17,19 +17,14 @@ package com.netflix.spinnaker.clouddriver.aws.provider.view import com.amazonaws.services.cloudwatch.AmazonCloudWatch -import com.amazonaws.services.cloudwatch.model.Dimension -import com.amazonaws.services.cloudwatch.model.DimensionFilter -import com.amazonaws.services.cloudwatch.model.GetMetricStatisticsRequest -import com.amazonaws.services.cloudwatch.model.GetMetricStatisticsResult -import com.amazonaws.services.cloudwatch.model.ListMetricsRequest +import com.amazonaws.services.cloudwatch.model.* import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider -import com.netflix.spinnaker.clouddriver.aws.model.AmazonMetricDatapoint import com.netflix.spinnaker.clouddriver.aws.model.AmazonMetricDescriptor import com.netflix.spinnaker.clouddriver.aws.model.AmazonMetricStatistics import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.model.CloudMetricProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @@ -37,15 +32,15 @@ import org.springframework.stereotype.Component class AmazonCloudMetricProvider implements CloudMetricProvider { final AmazonClientProvider amazonClientProvider - final AccountCredentialsProvider accountCredentialsProvider + final CredentialsRepository credentialsRepository final AmazonCloudProvider amazonCloudProvider @Autowired AmazonCloudMetricProvider(AmazonClientProvider amazonClientProvider, - AccountCredentialsProvider accountCredentialsProvider, + CredentialsRepository credentialsRepository, AmazonCloudProvider amazonCloudProvider) { this.amazonClientProvider = amazonClientProvider - this.accountCredentialsProvider = accountCredentialsProvider + this.credentialsRepository = credentialsRepository this.amazonCloudProvider = amazonCloudProvider } @@ -120,7 +115,7 @@ class AmazonCloudMetricProvider implements CloudMetricProvider, ServerGroupProvider { @@ -48,6 +50,9 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro @Value('${default.build.host:http://builds.netflix.com/}') String defaultBuildHost + @Value('${sql.cache.enabled:false}') + Boolean sqlEnabled + @Autowired AmazonClusterProvider(AmazonCloudProvider amazonCloudProvider, Cache cacheView, AwsProvider awsProvider) { this.amazonCloudProvider = amazonCloudProvider @@ -81,19 +86,31 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro } def asg = serverGroupData.attributes["asg"] + def serverGroupById = [(serverGroupData.id): new AmazonServerGroup(serverGroupData.attributes)] + def serverGroup = serverGroupById.values().first() + + String imageId + Map ltSpec = serverGroup.getLaunchTemplateSpecification() + if (ltSpec) { + String launchTemplateName = ltSpec.get('launchTemplateName') + String launchTemplateKey = Keys.getLaunchTemplateKey(launchTemplateName, account, region) + CacheData launchTemplate = cacheView.get(LAUNCH_TEMPLATES.ns, launchTemplateKey) + updateServerGroupLaunchSettings(serverGroupById, [launchTemplate]) + def launchTemplateData = (launchTemplate?.attributes?.get("latestVersion") as Map)?.get("launchTemplateData") + imageId = (launchTemplateData as Map)?.get("imageId") + } else { + String launchConfigKey = Keys.getLaunchConfigKey(serverGroupData?.attributes['launchConfigName'] as String, account, region) + CacheData launchConfigs = cacheView.get(LAUNCH_CONFIGS.ns, launchConfigKey) + updateServerGroupLaunchSettings(serverGroupById, [launchConfigs]) + imageId = launchConfigs?.attributes?.get('imageId') + } - String launchConfigKey = Keys.getLaunchConfigKey(serverGroupData?.attributes['launchConfigName'] as String, account, region ) - CacheData launchConfigs = cacheView.get(LAUNCH_CONFIGS.ns, launchConfigKey) - - String imageId = launchConfigs?.attributes?.get('imageId') CacheData imageConfigs = imageId ? cacheView.get(IMAGES.ns, Keys.getImageKey(imageId, account, region)) : null - - def serverGroup = new AmazonServerGroup(serverGroupData.attributes) - serverGroup.accountName = account - serverGroup.launchConfig = launchConfigs ? launchConfigs.attributes : null serverGroup.image = imageConfigs ? imageConfigs.attributes : null serverGroup.buildInfo = imageConfigs ? getBuildInfoFromImage(imageConfigs) : null + serverGroup.accountName = account + if (includeDetails) { Set asgInstances = getAsgInstanceKeys(asg, account, region) Closure instanceFilter = { rel -> @@ -136,15 +153,75 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro private static Map translateLoadBalancers(Collection loadBalancerData) { loadBalancerData.collectEntries { loadBalancerEntry -> Map lbKey = Keys.parse(loadBalancerEntry.id) - [(loadBalancerEntry.id) : new AmazonLoadBalancer(name: lbKey.loadBalancer, account: lbKey.account, region: lbKey.region)] + [(loadBalancerEntry.id): new AmazonLoadBalancer(name: lbKey.loadBalancer, account: lbKey.account, region: lbKey.region)] } } private static Map translateTargetGroups(Collection targetGroupData) { targetGroupData.collectEntries { targetGroupEntry -> Map tgKey = Keys.parse(targetGroupEntry.id) - [(targetGroupEntry.id) : new AmazonTargetGroup(name: tgKey.loadBalancer, account: tgKey.account, region: tgKey.region)] + [(targetGroupEntry.id): new AmazonTargetGroup(name: tgKey.loadBalancer, account: tgKey.account, region: tgKey.region)] + } + } + + private Collection allClustersByApplication(String application) { + // TODO: only supports the equiv of includeDetails=true, consider adding support for the inverse + + List toFetch = [CLUSTERS.ns, SERVER_GROUPS.ns, LAUNCH_CONFIGS.ns, INSTANCES.ns, LAUNCH_TEMPLATES.ns] + Map filters = [:] + filters[SERVER_GROUPS.ns] = RelationshipCacheFilter.include(INSTANCES.ns, LAUNCH_CONFIGS.ns, LAUNCH_TEMPLATES.ns) + filters[LAUNCH_CONFIGS.ns] = RelationshipCacheFilter.include(IMAGES.ns, SERVER_GROUPS.ns) + filters[LAUNCH_TEMPLATES.ns] = RelationshipCacheFilter.include(IMAGES.ns, SERVER_GROUPS.ns) + filters[INSTANCES.ns] = RelationshipCacheFilter.include(SERVER_GROUPS.ns) + + def cacheResults = cacheView.getAllByApplication(toFetch, application, filters) + + // lbs and images can span applications and can't currently be indexed by app + Collection allLoadBalancers = resolveRelationshipDataForCollection( + cacheResults[CLUSTERS.ns], + LOAD_BALANCERS.ns, + RelationshipCacheFilter.none() + ) + Collection allTargetGroups = resolveRelationshipDataForCollection( + cacheResults[CLUSTERS.ns], + TARGET_GROUPS.ns, + RelationshipCacheFilter.none() + ) + + Collection allImages = [] + allImages.addAll( + resolveRelationshipDataForCollection(cacheResults[LAUNCH_CONFIGS.ns], IMAGES.ns, RelationshipCacheFilter.none()) + ) + + allImages.addAll( + resolveRelationshipDataForCollection(cacheResults[LAUNCH_TEMPLATES.ns], IMAGES.ns, RelationshipCacheFilter.none()) + ) + + Map loadBalancers = translateLoadBalancers(allLoadBalancers) + Map targetGroups = translateTargetGroups(allTargetGroups) + Map serverGroups = translateServerGroups( + cacheResults[SERVER_GROUPS.ns], + cacheResults[INSTANCES.ns], + cacheResults[LAUNCH_CONFIGS.ns], + cacheResults[LAUNCH_TEMPLATES.ns], + allImages + ) + + Collection clusters = cacheResults[CLUSTERS.ns].collect { clusterData -> + Map clusterKey = Keys.parse(clusterData.id) + + AmazonCluster cluster = new AmazonCluster() + cluster.accountName = clusterKey.account + cluster.name = clusterKey.cluster + + cluster.serverGroups = clusterData.relationships[SERVER_GROUPS.ns]?.findResults { serverGroups.get(it) } + cluster.loadBalancers = clusterData.relationships[LOAD_BALANCERS.ns]?.findResults { loadBalancers.get(it) } + cluster.targetGroups = clusterData.relationships[TARGET_GROUPS.ns]?.findResults { targetGroups.get(it) } + + cluster } + + return clusters } private Collection translateClusters(Collection clusterData, boolean includeDetails) { @@ -156,11 +233,13 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro if (includeDetails) { Collection allLoadBalancers = resolveRelationshipDataForCollection(clusterData, LOAD_BALANCERS.ns) Collection allTargetGroups = resolveRelationshipDataForCollection(clusterData, TARGET_GROUPS.ns) - Collection allServerGroups = resolveRelationshipDataForCollection(clusterData, SERVER_GROUPS.ns, RelationshipCacheFilter.include(INSTANCES.ns, LAUNCH_CONFIGS.ns)) + Collection allServerGroups = resolveRelationshipDataForCollection( + clusterData, SERVER_GROUPS.ns, RelationshipCacheFilter.include(INSTANCES.ns, LAUNCH_CONFIGS.ns, LAUNCH_TEMPLATES.ns)) loadBalancers = translateLoadBalancers(allLoadBalancers) targetGroups = translateTargetGroups(allTargetGroups) - serverGroups = translateServerGroups(allServerGroups, false) // instance relationships were expanded so no need to consider partial instances + serverGroups = translateServerGroups(allServerGroups, false) + // instance relationships were expanded so no need to consider partial instances } else { Collection allServerGroups = resolveRelationshipDataForCollection(clusterData, SERVER_GROUPS.ns, RelationshipCacheFilter.none()) serverGroups = translateServerGroups(allServerGroups, true) @@ -194,14 +273,58 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro } private Map> getClusters0(String applicationName, boolean includeDetails) { - CacheData application = cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(applicationName)) - if (application == null) { + Collection clusters + + if (includeDetails && cacheView.supportsGetAllByApplication()) { + clusters = allClustersByApplication(applicationName) + } else { + Collection clusterKeys = cacheView.filterIdentifiers(CLUSTERS.ns, Keys.getClusterKey("*", applicationName, "*")) + Collection clusterData = cacheView.getAll(CLUSTERS.ns, clusterKeys) + clusters = translateClusters(clusterData, includeDetails) + } + if (!clusters) { return null } - Collection clusters = translateClusters(resolveRelationshipData(application, CLUSTERS.ns), includeDetails) + mapResponse(clusters) } + private Map translateServerGroups( + Collection serverGroupData, + Collection instanceData, + Collection launchConfigData, + Collection launchTemplateData, + Collection imageData + ) { + Map instances = translateInstances(instanceData) + + Map serverGroups = serverGroupData?.collectEntries { sg -> + Map parsed = Keys.parse(sg.id) + AmazonServerGroup serverGroup = new AmazonServerGroup(sg.attributes) + Set asgInstanceSet = getAsgInstanceKeys(serverGroup.asg, parsed.account, parsed.region) + + serverGroup.instances = asgInstanceSet + .findAll { instances.containsKey(it) } + .collect { instances.get(it) } + + [(sg.id): serverGroup] + } + + // expand and set launch templates or mixed instances policy + updateServerGroupLaunchSettings(serverGroups, launchTemplateData) + + // expand and set launch configs + updateServerGroupLaunchSettings(serverGroups, launchConfigData) + + // update build info for launch templates + updateServerGroupBuildInfo(serverGroups, launchTemplateData, imageData) + + // update build info for launch configs + updateServerGroupBuildInfo(serverGroups, launchConfigData, imageData) + + serverGroups + } + private Map translateServerGroups(Collection serverGroupData, boolean includePartialInstances) { Collection allInstances = resolveRelationshipDataForCollection(serverGroupData, INSTANCES.ns, RelationshipCacheFilter.none()) @@ -237,12 +360,21 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro } } - [(serverGroupEntry.id) : serverGroup] + [(serverGroupEntry.id): serverGroup] } - Map launchConfigurations = serverGroupData.findAll { it.relationships[LAUNCH_CONFIGS.ns] }.collectEntries { - [(it.relationships[LAUNCH_CONFIGS.ns].first()) : it.id] + Map launchConfigurations = serverGroupData.findAll { + it.relationships[LAUNCH_CONFIGS.ns] + }.collectEntries { + [(it.relationships[LAUNCH_CONFIGS.ns].first()): it.id] } + + Map templates = serverGroupData.findAll { + it.relationships[LAUNCH_TEMPLATES.ns] + }.collectEntries { + [(it.relationships[LAUNCH_TEMPLATES.ns].first()): it.id] + } + Collection launchConfigs = cacheView.getAll(LAUNCH_CONFIGS.ns, launchConfigurations.keySet()) Map> allImages = [:] launchConfigs.each { launchConfig -> @@ -256,6 +388,20 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro allImages[imageId] << serverGroupId } } + + Collection launchTemplates = cacheView.getAll(LAUNCH_TEMPLATES.ns, templates.keySet()) + launchTemplates.each { launchTemplate -> + def serverGroupId = templates[launchTemplate.id] + populateServerGroupWithLtOrMip(serverGroups[serverGroupId], launchTemplate) + String imageId = launchTemplate.relationships[IMAGES.ns]?.first() + if (imageId) { + if (!allImages.containsKey(imageId)) { + allImages.put(imageId, []) + } + allImages[imageId] << serverGroupId + } + } + Collection images = cacheView.getAll(IMAGES.ns, allImages.keySet()) images.each { image -> def serverGroupIds = allImages[image.id] @@ -304,19 +450,17 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro } instances.values().each { instance -> - instance.isHealthy = instance.health.any { it.state == 'Up' } && instance.health.every { it.state == 'Up' || it.state == 'Unknown' } + instance.isHealthy = instance.health.any { it.state == 'Up' } && instance.health.every { + it.state == 'Up' || it.state == 'Unknown' + } } } private Collection resolveRelationshipDataForCollection(Collection sources, String relationship, CacheFilter cacheFilter = null) { - Collection relationships = sources?.findResults { it.relationships[relationship]?: [] }?.flatten() ?: [] + Collection relationships = sources?.findResults { it.relationships[relationship] ?: [] }?.flatten() ?: [] relationships ? cacheView.getAll(relationship, relationships, cacheFilter) : [] } - private Collection resolveRelationshipData(CacheData source, String relationship) { - resolveRelationshipData(source, relationship) { true } - } - private Collection resolveRelationshipData(CacheData source, String relationship, Closure relFilter, CacheFilter cacheFilter = null) { Collection filteredRelationships = source.relationships[relationship]?.findAll(relFilter) filteredRelationships ? cacheView.getAll(relationship, filteredRelationships, cacheFilter) : [] @@ -349,11 +493,7 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro @Override Set getClusters(String applicationName, String account) { - CacheData application = cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(applicationName), RelationshipCacheFilter.include(CLUSTERS.ns)) - if (application == null) { - return [] as Set - } - Collection clusterKeys = application.relationships[CLUSTERS.ns].findAll { Keys.parse(it).account == account } + Collection clusterKeys = cacheView.filterIdentifiers(CLUSTERS.ns, Keys.getClusterKey("*", applicationName, account)) Collection clusters = cacheView.getAll(CLUSTERS.ns, clusterKeys) translateClusters(clusters, true) as Set } @@ -385,4 +525,118 @@ class AmazonClusterProvider implements ClusterProvider, ServerGro String buildServerGroupIdentifier(String account, String region, String serverGroupName) { return Keys.getServerGroupKey(serverGroupName, account, region) } + + /** + * Gets a launch template by version + */ + private static Map getLaunchTemplateForVersion(CacheData launchTemplate, String version) { + if (!launchTemplate) { + return null + } + + Map launchTemplateAttrs = launchTemplate.attributes + def versions = launchTemplateAttrs["versions"] as List + + // Handle special version placeholders: $Latest and $Default + if (version == '$Latest') { + return launchTemplateAttrs["latestVersion"] as Map + } else if (version == '$Default') { + return versions.find { + it["defaultVersion"] as Boolean + } + } else { + return versions.find { + it["versionNumber"] == version.toInteger() + } + } + } + + /** + * Updates server groups launch config or launch template or mixed instances policy + */ + private static void updateServerGroupLaunchSettings(Map serverGroups, Collection launchData) { + for (ld in launchData) { + if (ld?.relationships?.containsKey(SERVER_GROUPS.ns)) { + ld.relationships[SERVER_GROUPS.ns].each { + def serverGroup = serverGroups[it] + if (serverGroup != null) { + if (serverGroup.getLaunchTemplateSpecification()) { + populateServerGroupWithLtOrMip(serverGroup, ld) + } else { + serverGroup.launchConfig = ld.attributes + } + } + } + } + } + } + + /** + * Populate server group launch template or mixed instances policy to surface launch settings with launch template. + */ + private static void populateServerGroupWithLtOrMip(AmazonServerGroup serverGroup, CacheData launchData) { + + // get launch template for version specified + def ltSpec = serverGroup.getLaunchTemplateSpecification() + log.debug("Attempting to populate server group $serverGroup.name with launch template $ltSpec.") + Map ec2Lt = getLaunchTemplateForVersion(launchData, ltSpec["version"] as String) + + if (!ec2Lt) { + return + } + + if (serverGroup.asg?.launchTemplate) { + serverGroup.launchTemplate = ec2Lt + } else if (serverGroup.asg?.mixedInstancesPolicy) { + def mip = serverGroup.asg.mixedInstancesPolicy + + // single instance type case + if (!mip["launchTemplate"]["overrides"]) { + serverGroup.mixedInstancesPolicy = new AmazonServerGroup.MixedInstancesPolicySettings().tap { + allowedInstanceTypes = [ec2Lt["launchTemplateData"]["instanceType"]] + instancesDistribution = serverGroup.asg.mixedInstancesPolicy["instancesDistribution"] + launchTemplates = [ec2Lt] + } + } else { + // multiple instance types case + def overrides = new ArrayList<>(mip["launchTemplate"]["overrides"]) + def types = [] + overrides.each { + types.add(it["instanceType"]) + } + + // launchTemplate#instanceType is ignored when it is overridden. So, remove it to prevent accidental misuse / ambiguity. + Map ec2LtDataMinusType = ec2Lt?."launchTemplateData".findAll {it.key != "instanceType"} + ec2Lt?.replace("launchTemplateData", ec2LtDataMinusType) + + serverGroup.mixedInstancesPolicy = new AmazonServerGroup.MixedInstancesPolicySettings().tap { + allowedInstanceTypes = types.sort() + instancesDistribution = serverGroup.asg.mixedInstancesPolicy["instancesDistribution"] + launchTemplates = [ ec2Lt ] + launchTemplateOverridesForInstanceType = overrides + } + } + } + } + + /** + * Updates server groups build info + */ + private void updateServerGroupBuildInfo( + Map serverGroups, Collection launchData, Collection imageData) { + Map images = imageData?.collectEntries { image -> + [(image.id): image] + } + + launchData.each { ld -> + if (ld?.relationships?.containsKey(SERVER_GROUPS.ns)) { + def serverGroup = serverGroups[ld.relationships[SERVER_GROUPS.ns].first()] + def imageId = ld.relationships[IMAGES.ns]?.first() + if (serverGroup && imageId && images.containsKey(imageId)) { + serverGroup.image = images[imageId].attributes + serverGroup.buildInfo = getBuildInfoFromImage(images[imageId]) + } + } + } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonElasticIpProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonElasticIpProvider.groovy index cec372107dc..20be3350402 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonElasticIpProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonElasticIpProvider.groovy @@ -24,6 +24,7 @@ import com.netflix.spinnaker.clouddriver.model.ElasticIpProvider import com.netflix.spinnaker.clouddriver.aws.cache.Keys import com.netflix.spinnaker.clouddriver.aws.model.AmazonElasticIp import org.springframework.beans.factory.annotation.Autowired +import org.springframework.beans.factory.annotation.Qualifier import org.springframework.stereotype.Component import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.ELASTIC_IPS @@ -35,7 +36,7 @@ class AmazonElasticIpProvider implements ElasticIpProvider { private final ObjectMapper objectMapper @Autowired - AmazonElasticIpProvider(Cache cacheView, ObjectMapper objectMapper) { + AmazonElasticIpProvider(Cache cacheView, @Qualifier("amazonObjectMapper") ObjectMapper objectMapper) { this.cacheView = cacheView this.objectMapper = objectMapper } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonImageProvider.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonImageProvider.java deleted file mode 100644 index 8ec088ccf1e..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonImageProvider.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2018 Schibsted ASA. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.provider.view; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.cats.cache.Cache; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; -import com.netflix.spinnaker.config.AwsConfiguration; -import com.netflix.spinnaker.clouddriver.aws.data.Keys; -import com.netflix.spinnaker.clouddriver.aws.model.AmazonImage; -import com.netflix.spinnaker.clouddriver.aws.model.AmazonServerGroup; -import com.netflix.spinnaker.clouddriver.model.Image; -import com.netflix.spinnaker.clouddriver.model.ImageProvider; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES; -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS; - - -@Component -public class AmazonImageProvider implements ImageProvider { - - private final Cache cacheView; - private final AwsConfiguration.AmazonServerGroupProvider amazonServerGroupProvider; - private final ObjectMapper objectMapper; - - @Autowired - AmazonImageProvider(Cache cacheView, AwsConfiguration.AmazonServerGroupProvider amazonServerGroupProvider, ObjectMapper objectMapper) { - this.cacheView = cacheView; - this.amazonServerGroupProvider = amazonServerGroupProvider; - this.objectMapper = objectMapper; - } - - @Override - public Optional getImageById(String imageId) { - - if (!imageId.startsWith("ami-")) { - throw new RuntimeException("Image Id provided (" + imageId + ") is not a valid id for the provider " + getCloudProvider()); - } - - List imageIdList = new ArrayList<>(cacheView.filterIdentifiers(IMAGES.toString(), "*" + imageId)); - - if (imageIdList.isEmpty()) { - return Optional.empty(); - } - - List imageCacheList = new ArrayList<>(cacheView.getAll(IMAGES.toString(), imageIdList)); - - AmazonImage image = objectMapper.convertValue(imageCacheList.get(0).getAttributes(), AmazonImage.class); - - image.setRegion(Keys.parse(imageCacheList.get(0).getId()).get("region")); - - List serverGroupList = imageCacheList.stream() - .filter(imageCache -> imageCache.getRelationships().get(SERVER_GROUPS.toString()) != null) - .map(imageCache -> imageCache.getRelationships().get(SERVER_GROUPS.toString())) - .flatMap(Collection::stream) - .map(this::getServerGroupData) - .collect(Collectors.toList()); - - image.setServerGroups(serverGroupList); - return Optional.of(image); - } - - @Override - public String getCloudProvider() { - return AmazonCloudProvider.ID; - } - - private AmazonServerGroup getServerGroupData(String serverGroupCacheKey) { - Map parsedServerGroupKey = Keys.parse(serverGroupCacheKey); - return amazonServerGroupProvider.getServerGroup(parsedServerGroupKey.get("account"), parsedServerGroupKey.get("region"), parsedServerGroupKey.get("serverGroup")); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceProvider.groovy index c35167e3b1b..e72038719b2 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceProvider.groovy @@ -20,22 +20,20 @@ import com.amazonaws.services.ec2.model.GetConsoleOutputRequest import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.data.Keys +import com.netflix.spinnaker.clouddriver.aws.model.AmazonInstance import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.core.provider.agent.ExternalHealthProvider import com.netflix.spinnaker.clouddriver.model.InstanceProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.aws.data.Keys -import com.netflix.spinnaker.clouddriver.aws.model.AmazonInstance +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.* @Component -class AmazonInstanceProvider implements InstanceProvider { +class AmazonInstanceProvider implements InstanceProvider { final String cloudProvider = AmazonCloudProvider.ID private final Cache cacheView @@ -52,7 +50,7 @@ class AmazonInstanceProvider implements InstanceProvider { AmazonClientProvider amazonClientProvider @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override AmazonInstance getInstance(String account, String region, String id) { @@ -67,26 +65,26 @@ class AmazonInstanceProvider implements InstanceProvider { instance.serverGroup = serverGroup.serverGroup instance.cluster = serverGroup.cluster } + def healthKeys = [] if (instanceEntry.relationships[HEALTH.ns]) { - instance.health.addAll(cacheView.getAll(HEALTH.ns, instanceEntry.relationships[HEALTH.ns])*.attributes) + healthKeys.addAll(instanceEntry.relationships[HEALTH.ns]) } externalHealthProviders.each { externalHealthProvider -> - def healthKeys = [] externalHealthProvider.agents.each { externalHealthAgent -> healthKeys << Keys.getInstanceHealthKey(instance.name, account, region, externalHealthAgent.healthId) } - healthKeys.unique().each { key -> - def externalHealth = cacheView.getAll(HEALTH.ns, key) - if (externalHealth) { - instance.health.addAll(externalHealth*.attributes) - } + } + healthKeys.unique().each { key -> + def instanceHealth = cacheView.getAll(HEALTH.ns, key) + if (instanceHealth) { + instance.health.addAll(instanceHealth*.attributes) } } instance } String getConsoleOutput(String account, String region, String id) { - def credentials = accountCredentialsProvider.getCredentials(account) + def credentials = credentialsRepository.getOne(account) if (!(credentials instanceof NetflixAmazonCredentials)) { throw new IllegalArgumentException("Invalid credentials: ${account}:${region}") } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceTypeProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceTypeProvider.groovy index fcbc61483d6..0639c68bc07 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceTypeProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceTypeProvider.groovy @@ -22,6 +22,7 @@ import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.model.InstanceTypeProvider import com.netflix.spinnaker.clouddriver.aws.model.AmazonInstanceType import org.springframework.beans.factory.annotation.Autowired +import org.springframework.beans.factory.annotation.Qualifier import org.springframework.stereotype.Component import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.INSTANCE_TYPES @@ -34,7 +35,7 @@ class AmazonInstanceTypeProvider implements InstanceTypeProvider targetGroupServerGroupProviders = Collections.EMPTY_LIST; @Autowired @@ -54,8 +56,8 @@ class AmazonLoadBalancerProvider implements LoadBalancerProvider resolveRelationshipData(CacheData source, String relationship) { - source.relationships[relationship] ? cacheView.getAll(relationship, source.relationships[relationship]) : [] + Collection resolveRelationshipData(CacheData source, String relationship, CacheFilter cacheFilter = null) { + source.relationships[relationship] ? cacheView.getAll(relationship, source.relationships[relationship], cacheFilter) : [] } private Collection resolveRelationshipDataForCollection(Collection sources, String relationship, CacheFilter cacheFilter = null) { @@ -64,7 +66,8 @@ class AmazonLoadBalancerProvider implements LoadBalancerProvider applicationServerGroups = application ? resolveRelationshipData(application, SERVER_GROUPS.ns) : [] + Collection applicationServerGroups = [] + if (application) { + applicationServerGroups = resolveRelationshipData( + application, + SERVER_GROUPS.ns, + RelationshipCacheFilter.include(INSTANCES.ns, LOAD_BALANCERS.ns, TARGET_GROUPS.ns) + ) + } + Collection allLoadBalancerKeys = cacheView.getIdentifiers(LOAD_BALANCERS.ns) Collection allTargetGroupKeys = cacheView.getIdentifiers(TARGET_GROUPS.ns) @@ -105,7 +116,12 @@ class AmazonLoadBalancerProvider implements LoadBalancerProvider tgd = cacheView.getAll(TARGET_GROUPS.ns, targetGroupKeys) + Collection tgd = cacheView.getAll( + TARGET_GROUPS.ns, + targetGroupKeys, + RelationshipCacheFilter.include(LOAD_BALANCERS.ns) + ) + tgd.each { targetGroup -> Collection targetGroupLoadBalancers = targetGroup.relationships[LOAD_BALANCERS.ns] ?: [] targetGroupLoadBalancers.each { @@ -116,16 +132,42 @@ class AmazonLoadBalancerProvider implements LoadBalancerProvider loadBalancerData = cacheView.getAll(LOAD_BALANCERS.ns, loadBalancerKeys) - Collection allLoadBalancerServerGroups = resolveRelationshipDataForCollection(loadBalancerData, SERVER_GROUPS.ns) - Collection allLoadBalancerInstances = resolveRelationshipDataForCollection(allLoadBalancerServerGroups, INSTANCES.ns, RelationshipCacheFilter.none()) + Collection loadBalancerData = cacheView.getAll( + LOAD_BALANCERS.ns, + loadBalancerKeys, + RelationshipCacheFilter.include(TARGET_GROUPS.ns, SERVER_GROUPS.ns, INSTANCES.ns) + ) + Collection allLoadBalancerServerGroups = resolveRelationshipDataForCollection( + loadBalancerData, + SERVER_GROUPS.ns, + RelationshipCacheFilter.include(INSTANCES.ns) + ) + Collection allLoadBalancerInstances = resolveRelationshipDataForCollection( + allLoadBalancerServerGroups, + INSTANCES.ns, + RelationshipCacheFilter.none() + ) + Map loadBalancerInstances = translateInstances(allLoadBalancerInstances) Map loadBalancerServerGroups = translateServerGroups(allLoadBalancerServerGroups, loadBalancerInstances) // Get all target groups - Collection targetGroupData = resolveRelationshipDataForCollection(loadBalancerData, TARGET_GROUPS.ns) - Collection allTargetGroupServerGroups = resolveRelationshipDataForCollection(targetGroupData, SERVER_GROUPS.ns) - Collection allTargetGroupInstances = resolveRelationshipDataForCollection(allTargetGroupServerGroups, INSTANCES.ns, RelationshipCacheFilter.none()) + Collection targetGroupData = resolveRelationshipDataForCollection( + loadBalancerData, + TARGET_GROUPS.ns, + RelationshipCacheFilter.include(SERVER_GROUPS.ns, INSTANCES.ns) + ) + Collection allTargetGroupServerGroups = resolveRelationshipDataForCollection( + targetGroupData, + SERVER_GROUPS.ns, + RelationshipCacheFilter.include(INSTANCES.ns) + ) + Collection allTargetGroupInstances = resolveRelationshipDataForCollection( + allTargetGroupServerGroups, + INSTANCES.ns, + RelationshipCacheFilter.none() + ) + Map targetGroupInstances = translateInstances(allTargetGroupInstances) Map targetGroupServerGroups = translateServerGroups(allTargetGroupServerGroups, targetGroupInstances) Map allTargetGroups = translateTargetGroups(targetGroupData, targetGroupServerGroups) @@ -159,7 +201,8 @@ class AmazonLoadBalancerProvider implements LoadBalancerProvider supportedIdentifiers; - - private final LoadingCache staticCache = CacheBuilder.newBuilder() - .expireAfterWrite(1, TimeUnit.MINUTES) - .recordStats() - .build( - new CacheLoader() { - public Object load(String id) throws IOException { - StaticRecord record = configuration.getStaticRecord(id); - S3Object s3Object = fetchObject( - record.getBucketAccount(), record.getBucketRegion(), record.getBucketName(), record.getBucketKey() - ); - - switch (record.getType()) { - case list: - return objectMapper.readValue(s3Object.getObjectContent(), List.class); - case object: - return objectMapper.readValue(s3Object.getObjectContent(), Map.class); - } - - return IOUtils.toString(s3Object.getObjectContent()); - } - }); - - @Autowired - public AmazonS3DataProvider(ObjectMapper objectMapper, - AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository, - AmazonS3StaticDataProviderConfiguration configuration) { - this.objectMapper = objectMapper; - this.amazonClientProvider = amazonClientProvider; - this.accountCredentialsRepository = accountCredentialsRepository; - this.configuration = configuration; - - this.supportedIdentifiers = configuration.getStaticRecords() - .stream() - .map(r -> r.getId().toLowerCase()) - .collect(Collectors.toSet()); - } - - @Override - public Object getStaticData(String id, Map filters) { - try { - Object contents = staticCache.get(id); - if (filters.isEmpty() || !(contents instanceof List)) { - return contents; - } - - return ((List) contents) - .stream() - .filter(r -> { - // currently only support filtering against first level attributes (TBD whether this is even necessary) - return filters.entrySet() - .stream() - .anyMatch(f -> r.get(f.getKey()).equals(f.getValue())); - }) - .collect(Collectors.toList()); - } catch (ExecutionException e) { - throw new IllegalStateException(e); - } - } - - @Override - public void getAdhocData(String groupId, String bucketId, String objectId, OutputStream outputStream) { - String[] bucketCoordinates = bucketId.split(":"); - if (bucketCoordinates.length != 3) { - throw new IllegalArgumentException("'bucketId' must be of the form {account}:{region}:{name}"); - } - - String bucketAccount = getAccountName(bucketCoordinates[0]); - String bucketRegion = bucketCoordinates[1]; - String bucketName = bucketCoordinates[2]; - - AdhocRecord record = configuration.getAdhocRecord(groupId); - Matcher bucketNameMatcher = record.getBucketNamePattern().matcher(bucketName); - Matcher objectKeyMatcher = record.getObjectKeyPattern().matcher(objectId); - - if (!bucketNameMatcher.matches() || !objectKeyMatcher.matches()) { - throw new AccessDeniedException("Access denied (bucket: " + bucketName + ", object: " + objectId + ")"); - } - - try { - S3Object s3Object = fetchObject(bucketAccount, bucketRegion, bucketName, objectId); - IOUtils.copy(s3Object.getObjectContent(), outputStream); - } catch (IOException e) { - throw new IllegalStateException(e); - } - } - - @Override - public String getAccountForIdentifier(IdentifierType identifierType, String id) { - switch (identifierType) { - case Static: - return configuration.getStaticRecord(id).getBucketAccount(); - case Adhoc: - return getAccountName(id.split(":")[0]); - } - - throw new IllegalArgumentException("Unsupported identifierType (" + identifierType + ")"); - } - - @Override - public boolean supportsIdentifier(IdentifierType identifierType, String id) { - switch (identifierType) { - case Static: - return supportedIdentifiers.contains(id.toLowerCase()); - case Adhoc: - return configuration.getAdhocRecords() - .stream() - .anyMatch(r -> r.getId().equalsIgnoreCase(id)); - } - - throw new IllegalArgumentException("Unsupported identifierType (" + identifierType + ")"); - } - - CacheStats getStaticCacheStats() { - return staticCache.stats(); - } - - protected S3Object fetchObject(String bucketAccount, String bucketRegion, String bucketName, String objectId) { - NetflixAmazonCredentials account = (NetflixAmazonCredentials) accountCredentialsRepository.getOne(bucketAccount); - - AmazonS3 amazonS3 = amazonClientProvider.getAmazonS3(account, bucketRegion); - return amazonS3.getObject(bucketName, objectId); - } - - private String getAccountName(String accountIdOrName) { - return accountCredentialsRepository.getAll() - .stream() - .filter(c -> accountIdOrName.equalsIgnoreCase(c.getAccountId()) || accountIdOrName.equalsIgnoreCase(c.getName())) - .map(AccountCredentials::getName) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("Unsupported account identifier (accountId: " + accountIdOrName + ")")); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSecurityGroupProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSecurityGroupProvider.groovy index 4c37a3ca216..a357a01f485 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSecurityGroupProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSecurityGroupProvider.groovy @@ -20,22 +20,22 @@ import com.amazonaws.services.ec2.model.IpPermission import com.amazonaws.services.ec2.model.SecurityGroup import com.amazonaws.services.ec2.model.UserIdGroupPair import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.ImmutableSet import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.aws.cache.Keys import com.netflix.spinnaker.clouddriver.aws.model.AmazonSecurityGroup -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.model.AddressableRange import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule import com.netflix.spinnaker.clouddriver.model.securitygroups.SecurityGroupRule -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import groovy.transform.Canonical import org.springframework.beans.factory.annotation.Autowired +import org.springframework.beans.factory.annotation.Qualifier import org.springframework.stereotype.Component import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.SECURITY_GROUPS @@ -44,23 +44,17 @@ import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.SECURIT class AmazonSecurityGroupProvider implements SecurityGroupProvider { final String cloudProvider = AmazonCloudProvider.ID - final AccountCredentialsProvider accountCredentialsProvider + final CredentialsRepository credentialsRepository final Cache cacheView final ObjectMapper objectMapper - final Set accounts @Autowired - AmazonSecurityGroupProvider(AccountCredentialsProvider accountCredentialsProvider, + AmazonSecurityGroupProvider(CredentialsRepository credentialsRepository, Cache cacheView, - ObjectMapper objectMapper) { - this.accountCredentialsProvider = accountCredentialsProvider + @Qualifier("amazonObjectMapper") ObjectMapper objectMapper) { + this.credentialsRepository = credentialsRepository this.cacheView = cacheView this.objectMapper = objectMapper - - final allAmazonCredentials = (Set) accountCredentialsProvider.all.findAll { - it instanceof AmazonCredentials - } - accounts = ImmutableSet.copyOf(allAmazonCredentials) } @Override @@ -108,10 +102,46 @@ class AmazonSecurityGroupProvider implements SecurityGroupProvider getAllMatchingKeyPattern(String pattern, boolean includeRules) { loadResults(includeRules, cacheView.filterIdentifiers(SECURITY_GROUPS.ns, pattern)) } @@ -151,7 +181,8 @@ class AmazonSecurityGroupProvider implements SecurityGroupProvider def groupAndProtocol = new GroupAndProtocol(sg.groupId, permission.ipProtocol) if (!rules.containsKey(groupAndProtocol)) { - final ingressAccount = accounts.find { it.accountId == sg.userId } + final ingressAccount = credentialsRepository.getAll().find {it.accountId == sg.userId} Map ingressGroupSummary = getIngressGroupNameAndVpcId(sg, account, ingressAccount?.name, region, vpcId) rules.put(groupAndProtocol, [ protocol : permission.ipProtocol, @@ -231,7 +263,8 @@ class AmazonSecurityGroupProvider implements SecurityGroupProvider { private static final String DEPRECATED_TAG_KEY = 'is_deprecated' private final Cache cacheView - private final AmazonObjectMapper objectMapper + private final ObjectMapper amazonObjectMapper final String cloudProvider = AmazonCloudProvider.ID @Autowired - AmazonSubnetProvider(Cache cacheView, AmazonObjectMapper objectMapper) { + AmazonSubnetProvider(Cache cacheView, @Qualifier("amazonObjectMapper") ObjectMapper amazonObjectMapper) { this.cacheView = cacheView - this.objectMapper = objectMapper + this.amazonObjectMapper = amazonObjectMapper } @Override @@ -69,7 +69,7 @@ class AmazonSubnetProvider implements SubnetProvider { AmazonSubnet fromCacheData(CacheData cacheData) { def parts = Keys.parse(cacheData.id) - def subnet = objectMapper.convertValue(cacheData.attributes, Subnet) + def subnet = amazonObjectMapper.convertValue(cacheData.attributes, Subnet) def tag = subnet.tags.find { it.key == METADATA_TAG_KEY } def isDeprecated = subnet.tags.find { it.key == DEPRECATED_TAG_KEY }?.value String json = tag?.value @@ -105,6 +105,7 @@ class AmazonSubnetProvider implements SubnetProvider { cidrBlock: subnet.cidrBlock, availableIpAddressCount: subnet.availableIpAddressCount, account: parts.account, + accountId: cacheData.attributes.accountId, region: parts.region, availabilityZone: subnet.availabilityZone, purpose: purpose, diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonVpcProvider.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonVpcProvider.groovy index d4d706f0d0f..2333e3f6eed 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonVpcProvider.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonVpcProvider.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.aws.provider.view import com.amazonaws.services.ec2.model.Vpc -import com.netflix.awsobjectmapper.AmazonObjectMapper +import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter @@ -26,6 +26,7 @@ import com.netflix.spinnaker.clouddriver.model.NetworkProvider import com.netflix.spinnaker.clouddriver.aws.cache.Keys import com.netflix.spinnaker.clouddriver.aws.model.AmazonVpc import org.springframework.beans.factory.annotation.Autowired +import org.springframework.beans.factory.annotation.Qualifier import org.springframework.stereotype.Component import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.VPCS @@ -37,12 +38,12 @@ class AmazonVpcProvider implements NetworkProvider { private static final String DEPRECATED_TAG_KEY = 'is_deprecated' private final Cache cacheView - private final AmazonObjectMapper objectMapper + private final ObjectMapper amazonObjectMapper @Autowired - AmazonVpcProvider(Cache cacheView, AmazonObjectMapper amazonObjectMapper) { + AmazonVpcProvider(Cache cacheView, @Qualifier("amazonObjectMapper") ObjectMapper amazonObjectMapper) { this.cacheView = cacheView - this.objectMapper = amazonObjectMapper + this.amazonObjectMapper = amazonObjectMapper } @Override @@ -57,7 +58,7 @@ class AmazonVpcProvider implements NetworkProvider { AmazonVpc fromCacheData(CacheData cacheData) { def parts = Keys.parse(cacheData.id) - def vpc = objectMapper.convertValue(cacheData.attributes, Vpc) + def vpc = amazonObjectMapper.convertValue(cacheData.attributes, Vpc) def isDeprecated = vpc.tags.find { it.key == DEPRECATED_TAG_KEY }?.value new AmazonVpc( cloudProvider: AmazonCloudProvider.ID, diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AWSProxy.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AWSProxy.java deleted file mode 100644 index f59b09c79c2..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AWSProxy.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.stereotype.Component; - -/** - * AWS Proxy Configuration - */ -@ConfigurationProperties(prefix = "aws.proxy") -@Component -public class AWSProxy { - - - private String proxyHost; - private String proxyPort; - private String proxyUsername; - private String proxyPassword; - private String proxyDomain; - private String proxyWorkstation; - private String protocol; - - public AWSProxy() { - this(null, null, null, null, null, null, null); - } - - public AWSProxy(String proxyHost, String proxyPort, String proxyUsername, String proxyPassword, String protocol) { - this(proxyHost, proxyPort, proxyUsername, proxyPassword, null, null, protocol); - } - - public AWSProxy(String proxyHost, - String proxyPort, - String proxyUsername, - String proxyPassword, - String proxyDomain, - String proxyWorkstation, - String protocol - ) { - this.proxyHost = proxyHost; - this.proxyPort = proxyPort; - this.proxyUsername = proxyUsername; - this.proxyDomain = proxyDomain; - this.proxyWorkstation = proxyWorkstation; - this.proxyPassword = proxyPassword; - this.protocol = protocol; - - - } - - public String getProxyHost() { - return proxyHost; - } - - public void setProxyHost(String proxyHost) { - this.proxyHost = proxyHost; - } - - public String getProxyUsername() { - return proxyUsername; - } - - public void setProxyUsername(String proxyUsername) { - this.proxyUsername = proxyUsername; - } - - public String getProxyPort() { - return proxyPort; - } - - public void setProxyPort(String proxyPort) { - this.proxyPort = proxyPort; - } - - public String getProxyPassword() { - return proxyPassword; - } - - public void setProxyPassword(String proxyPassword) { - this.proxyPassword = proxyPassword; - } - - public String getProxyDomain() { - return proxyDomain; - } - - public void setProxyDomain(String proxyDomain) { - this.proxyDomain = proxyDomain; - } - - public String getProxyWorkstation() { - return proxyWorkstation; - } - - public void setProxyWorkstation(String proxyWorkstation) { - this.proxyWorkstation = proxyWorkstation; - } - - public String getProtocol() { - return protocol; - } - - public void setProxyProtocol(String protocol) { - this.protocol = protocol; - } - - public void apply(ClientConfiguration clientConfiguration) { - - clientConfiguration.setProxyHost(proxyHost); - clientConfiguration.setProxyPort(Integer.parseInt(proxyPort)); - clientConfiguration.setProxyUsername(proxyUsername); - clientConfiguration.setProxyPassword(proxyPassword); - - Protocol awsProtocol = Protocol.HTTP; - - if ("HTTPS".equalsIgnoreCase(protocol)) { - awsProtocol = Protocol.HTTPS; - } - - clientConfiguration.setProtocol(awsProtocol); - - if (isNTLMProxy()) { - clientConfiguration.setProxyDomain(proxyDomain); - clientConfiguration.setProxyWorkstation(proxyWorkstation); - } - } - - public boolean isNTLMProxy() { - - boolean isNTLMProxy = false; - - if (getProxyHost() != null && getProxyPort() != null && getProxyDomain() != null && getProxyWorkstation() != null) { - isNTLMProxy = true; - } - - - return isNTLMProxy; - } - - public boolean isProxyConfigMode() { - - boolean isProxy = false; - - if (getProxyHost() != null && getProxyPort() != null) { - isProxy = true; - - try { - Integer.parseInt(getProxyPort()); - } catch (NumberFormatException nfe) { - isProxy = false; - } - } - - return isProxy; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - AWSProxy awsProxy = (AWSProxy) o; - - return proxyHost.equals(awsProxy.proxyHost) && - proxyPort.equals(awsProxy.proxyPort) && - protocol.equals(awsProxy.protocol); - } - - @Override - public int hashCode() { - int result = proxyHost.hashCode(); - result = 31 - * result - + proxyPort.hashCode() - + protocol.hashCode(); - - return result; - } - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonClientProvider.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonClientProvider.java deleted file mode 100644 index cda89e3362c..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonClientProvider.java +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.handlers.RequestHandler2; -import com.amazonaws.retry.PredefinedRetryPolicies; -import com.amazonaws.retry.RetryPolicy; -import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; -import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScalingClientBuilder; -import com.amazonaws.services.autoscaling.AmazonAutoScaling; -import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder; -import com.amazonaws.services.cloudwatch.AmazonCloudWatch; -import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; -import com.amazonaws.services.ecr.AmazonECR; -import com.amazonaws.services.ecr.AmazonECRClientBuilder; -import com.amazonaws.services.ecs.AmazonECS; -import com.amazonaws.services.ecs.AmazonECSClientBuilder; -import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing; -import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancingClientBuilder; -import com.amazonaws.services.identitymanagement.AmazonIdentityManagement; -import com.amazonaws.services.identitymanagement.AmazonIdentityManagementClientBuilder; -import com.amazonaws.services.lambda.AWSLambda; -import com.amazonaws.services.lambda.AWSLambdaAsync; -import com.amazonaws.services.lambda.AWSLambdaAsyncClientBuilder; -import com.amazonaws.services.lambda.AWSLambdaClientBuilder; -import com.amazonaws.services.route53.AmazonRoute53; -import com.amazonaws.services.route53.AmazonRoute53ClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.shield.AWSShield; -import com.amazonaws.services.shield.AWSShieldClientBuilder; -import com.amazonaws.services.simpleworkflow.AmazonSimpleWorkflow; -import com.amazonaws.services.simpleworkflow.AmazonSimpleWorkflowClientBuilder; -import com.amazonaws.services.sns.AmazonSNS; -import com.amazonaws.services.sns.AmazonSNSClientBuilder; -import com.amazonaws.services.sqs.AmazonSQS; -import com.amazonaws.services.sqs.AmazonSQSClientBuilder; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.NoopRegistry; -import com.netflix.spectator.api.Registry; -import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer; -import com.netflix.spinnaker.clouddriver.aws.security.sdkclient.AmazonClientInvocationHandler; -import com.netflix.spinnaker.clouddriver.aws.security.sdkclient.AwsSdkClientSupplier; -import com.netflix.spinnaker.clouddriver.aws.security.sdkclient.ProxyHandlerBuilder; -import com.netflix.spinnaker.clouddriver.aws.security.sdkclient.RateLimiterSupplier; -import com.netflix.spinnaker.clouddriver.aws.security.sdkclient.SpinnakerAwsRegionProvider; -import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration; -import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfigurationBuilder; -import org.apache.http.client.HttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.impl.client.HttpClients; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - * Provider of Amazon SDK Clients that can read through Edda. - */ -public class AmazonClientProvider { - - /** - * This constant (as null) indicates that whatever the current region from the - * AWS SDKs perspective should be used. - * - * The region to use will be resolved dynamically by {@link SpinnakerAwsRegionProvider} - * which supports all the standard SDK means of explicitly specifying the current region, - * (environment variable, instance profile, instance metadata). - */ - public static final String DEFAULT_REGION = null; - - private final AwsSdkClientSupplier awsSdkClientSupplier; - private final ProxyHandlerBuilder proxyHandlerBuilder; - - public static class Builder { - private HttpClient httpClient; - private ObjectMapper objectMapper; - private EddaTemplater eddaTemplater; - private RetryPolicy.RetryCondition retryCondition; - private RetryPolicy.BackoffStrategy backoffStrategy; - private Integer maxErrorRetry; - private List requestHandlers = new ArrayList<>(); - private AWSProxy proxy; - private EddaTimeoutConfig eddaTimeoutConfig; - private int maxConnections = 200; - private int maxConnectionsPerRoute = 20; - private boolean uzeGzip = true; - private boolean addSpinnakerUserToUserAgent = false; - private ServiceLimitConfiguration serviceLimitConfiguration = new ServiceLimitConfigurationBuilder().build(); - private Registry registry = new NoopRegistry(); - - public Builder httpClient(HttpClient httpClient) { - this.httpClient = httpClient; - return this; - } - - public Builder proxy(AWSProxy proxy) { - this.proxy = proxy; - return this; - } - - public Builder objectMapper(ObjectMapper objectMapper) { - this.objectMapper = objectMapper; - return this; - } - - public Builder eddaTemplater(EddaTemplater eddaTemplater) { - this.eddaTemplater = eddaTemplater; - return this; - } - - public Builder retryCondition(RetryPolicy.RetryCondition retryCondition) { - this.retryCondition = retryCondition; - return this; - } - - public Builder backoffStrategy(RetryPolicy.BackoffStrategy backoffStrategy) { - this.backoffStrategy = backoffStrategy; - return this; - } - - public Builder maxErrorRetry(Integer maxErrorRetry) { - this.maxErrorRetry = maxErrorRetry; - return this; - } - - public Builder requestHandler(RequestHandler2 requestHandler) { - this.requestHandlers.add(requestHandler); - return this; - } - - public Builder eddaTimeoutConfig(EddaTimeoutConfig eddaTimeoutConfig) { - this.eddaTimeoutConfig = eddaTimeoutConfig; - return this; - } - - public Builder maxConnections(int maxConnections) { - this.maxConnections = maxConnections; - return this; - } - - public Builder maxConnectionsPerRoute(int maxConnectionsPerRoute) { - this.maxConnectionsPerRoute = maxConnectionsPerRoute; - return this; - } - - public Builder useGzip(boolean useGzip) { - this.uzeGzip = useGzip; - return this; - } - - public Builder serviceLimitConfiguration(ServiceLimitConfiguration serviceLimitConfiguration) { - this.serviceLimitConfiguration = serviceLimitConfiguration; - return this; - } - - public Builder registry(Registry registry) { - this.registry = registry; - return this; - } - - public Builder addSpinnakerUserToUserAgent(boolean addSpinnakerUserToUserAgent) { - this.addSpinnakerUserToUserAgent = addSpinnakerUserToUserAgent; - return this; - } - - public AmazonClientProvider build() { - HttpClient client = this.httpClient; - if (client == null) { - HttpClientBuilder builder = HttpClientBuilder.create(); - builder.setMaxConnTotal(this.maxConnections); - builder.setMaxConnPerRoute(this.maxConnectionsPerRoute); - client = builder.build(); - } - - ObjectMapper mapper = this.objectMapper == null ? AmazonObjectMapperConfigurer.createConfigured() : this.objectMapper; - EddaTemplater templater = this.eddaTemplater == null ? EddaTemplater.defaultTemplater() : this.eddaTemplater; - RetryPolicy policy = buildPolicy(); - AWSProxy proxy = this.proxy; - EddaTimeoutConfig eddaTimeoutConfig = this.eddaTimeoutConfig == null ? EddaTimeoutConfig.DEFAULT : this.eddaTimeoutConfig; - - final List requestHandlers; - if (addSpinnakerUserToUserAgent) { - requestHandlers = new ArrayList<>(this.requestHandlers.size() + 1); - requestHandlers.addAll(this.requestHandlers); - requestHandlers.add(new AddSpinnakerUserToUserAgentRequestHandler()); - } else { - requestHandlers = this.requestHandlers; - } - - return new AmazonClientProvider(client, mapper, templater, policy, requestHandlers, proxy, eddaTimeoutConfig, uzeGzip, serviceLimitConfiguration, registry); - } - - private RetryPolicy buildPolicy() { - if (retryCondition == null && backoffStrategy == null) { - if (maxErrorRetry == null) { - return PredefinedRetryPolicies.getDefaultRetryPolicy(); - } - return PredefinedRetryPolicies.getDefaultRetryPolicyWithCustomMaxRetries(maxErrorRetry); - } - RetryPolicy.RetryCondition condition = this.retryCondition == null ? PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION : this.retryCondition; - RetryPolicy.BackoffStrategy strategy = this.backoffStrategy == null ? PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY : this.backoffStrategy; - int retry = this.maxErrorRetry == null ? PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY : this.maxErrorRetry; - - return new RetryPolicy(condition, strategy, retry, true); - } - } - - public AmazonClientProvider() { - this((HttpClient) null); - } - - public AmazonClientProvider(HttpClient httpClient) { - this(httpClient, AmazonObjectMapperConfigurer.createConfigured()); - } - - public AmazonClientProvider(ObjectMapper objectMapper) { - this(null, objectMapper); - } - - public AmazonClientProvider(HttpClient httpClient, ObjectMapper objectMapper) { - this(httpClient == null ? HttpClients.createDefault() : httpClient, - objectMapper == null ? AmazonObjectMapperConfigurer.createConfigured() : objectMapper, - EddaTemplater.defaultTemplater(), - PredefinedRetryPolicies.getDefaultRetryPolicy(), - Collections.emptyList(), - null, - EddaTimeoutConfig.DEFAULT, - true, - new ServiceLimitConfigurationBuilder().build(), - new NoopRegistry()); - } - - public AmazonClientProvider(HttpClient httpClient, - ObjectMapper objectMapper, - EddaTemplater eddaTemplater, - RetryPolicy retryPolicy, - List requestHandlers, - AWSProxy proxy, - EddaTimeoutConfig eddaTimeoutConfig, - boolean useGzip, - ServiceLimitConfiguration serviceLimitConfiguration, - Registry registry) { - RateLimiterSupplier rateLimiterSupplier = new RateLimiterSupplier(serviceLimitConfiguration, registry); - this.awsSdkClientSupplier = new AwsSdkClientSupplier(rateLimiterSupplier, registry, retryPolicy, requestHandlers, proxy, useGzip); - this.proxyHandlerBuilder = new ProxyHandlerBuilder(awsSdkClientSupplier, httpClient, objectMapper, eddaTemplater, eddaTimeoutConfig, registry); - } - - /** - * When edda serves the request, the last-modified time is captured from the response metadata. - * @return the last-modified timestamp, if available. - */ - public Long getLastModified() { - return AmazonClientInvocationHandler.lastModified.get(); - } - - public AmazonEC2 getAmazonEC2(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonEC2(amazonCredentials, region, false); - } - - public AmazonEC2 getAmazonEC2(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonEC2.class, AmazonEC2ClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonEC2 getAmazonEC2(AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonEC2ClientBuilder.class, AmazonEC2.class, "UNSPECIFIED_ACCOUNT", awsCredentialsProvider, region); - } - - public AmazonEC2 getAmazonEC2(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonEC2ClientBuilder.class, AmazonEC2.class, accountName, awsCredentialsProvider, region); - } - - public AmazonECS getAmazonEcs(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonECS.class, AmazonECSClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonIdentityManagement getIam(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonIdentityManagement.class, AmazonIdentityManagementClientBuilder.class, amazonCredentials, region, skipEdda); - // return awsSdkClientSupplier.getClient(AmazonIdentityManagementClientBuilder.class, AmazonIdentityManagement.class, accountName, awsCredentialsProvider, region); - } - - public AWSLambda getAmazonLambda(NetflixAmazonCredentials amazonCredentials, String region) { - return proxyHandlerBuilder.getProxyHandler(AWSLambda.class, AWSLambdaClientBuilder.class, amazonCredentials, region); - } - - public AWSLambda getAmazonLambda(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AWSLambdaClientBuilder.class, AWSLambda.class, accountName, awsCredentialsProvider, region); - } - - public AWSLambdaAsync getAmazonLambdaAsync(NetflixAmazonCredentials amazonCredentials, String region) { - return proxyHandlerBuilder.getProxyHandler(AWSLambdaAsync.class, AWSLambdaAsyncClientBuilder.class, amazonCredentials, region); - } - - public AWSLambdaAsync getAmazonLambdaAsync(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AWSLambdaAsyncClientBuilder.class, AWSLambdaAsync.class, accountName, awsCredentialsProvider, region); - } - - public AmazonS3 getAmazonS3(NetflixAmazonCredentials amazonCredentials, String region) { - return proxyHandlerBuilder.getProxyHandler(AmazonS3.class, AmazonS3ClientBuilder.class, amazonCredentials, region, true); - } - - public AmazonAutoScaling getAutoScaling(NetflixAmazonCredentials amazonCredentials, String region) { - return getAutoScaling(amazonCredentials, region, false); - } - - public AmazonAutoScaling getAutoScaling(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonAutoScaling.class, AmazonAutoScalingClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonAutoScaling getAutoScaling(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonAutoScalingClientBuilder.class, AmazonAutoScaling.class, accountName, awsCredentialsProvider, region); - } - - public AmazonRoute53 getAmazonRoute53(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonRoute53(amazonCredentials, region, false); - } - - public AmazonRoute53 getAmazonRoute53(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonRoute53.class, AmazonRoute53ClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonRoute53 getAmazonRoute53(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonRoute53ClientBuilder.class, AmazonRoute53.class, accountName, awsCredentialsProvider, region); - } - - public AmazonElasticLoadBalancing getAmazonElasticLoadBalancing(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonElasticLoadBalancing(amazonCredentials, region, false); - } - - public AmazonElasticLoadBalancing getAmazonElasticLoadBalancing(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonElasticLoadBalancing.class, AmazonElasticLoadBalancingClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonElasticLoadBalancing getAmazonElasticLoadBalancing(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonElasticLoadBalancingClientBuilder.class, AmazonElasticLoadBalancing.class, accountName, awsCredentialsProvider, region); - } - - public com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing getAmazonElasticLoadBalancingV2(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonElasticLoadBalancingV2(amazonCredentials, region, false); - } - - public com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing getAmazonElasticLoadBalancingV2(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing.class, com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing getAmazonElasticLoadBalancingV2(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient( - com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingClientBuilder.class, - com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing.class, - accountName, - awsCredentialsProvider, - region); - } - - public AmazonSimpleWorkflow getAmazonSimpleWorkflow(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonSimpleWorkflow(amazonCredentials, region, false); - } - - public AmazonSimpleWorkflow getAmazonSimpleWorkflow(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonSimpleWorkflow.class, AmazonSimpleWorkflowClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonSimpleWorkflow getAmazonSimpleWorkflow(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonSimpleWorkflowClientBuilder.class, AmazonSimpleWorkflow.class, accountName, awsCredentialsProvider, region); - } - - public AmazonCloudWatch getAmazonCloudWatch(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonCloudWatch(amazonCredentials, region, false); - } - - public AmazonCloudWatch getAmazonCloudWatch(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonCloudWatch.class, AmazonCloudWatchClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonCloudWatch getAmazonCloudWatch(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonCloudWatchClientBuilder.class, AmazonCloudWatch.class, accountName, awsCredentialsProvider, region); - } - - public AmazonCloudWatch getCloudWatch(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonCloudWatch(amazonCredentials, region); - } - - public AmazonCloudWatch getCloudWatch(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return getAmazonCloudWatch(amazonCredentials, region, skipEdda); - } - - public AmazonSNS getAmazonSNS(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonSNS(amazonCredentials, region, false); - } - - public AmazonSNS getAmazonSNS(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonSNS.class, AmazonSNSClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonSNS getAmazonSNS(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonSNSClientBuilder.class, AmazonSNS.class, accountName, awsCredentialsProvider, region); - } - - public AmazonSQS getAmazonSQS(NetflixAmazonCredentials amazonCredentials, String region) { - return proxyHandlerBuilder.getProxyHandler(AmazonSQS.class, AmazonSQSClientBuilder.class, amazonCredentials, region, false); - } - - public AmazonIdentityManagement getAmazonIdentityManagement(NetflixAmazonCredentials amazonCredentials, String region) { - return getAmazonIdentityManagement(amazonCredentials, region, false); - } - - public AmazonIdentityManagement getAmazonIdentityManagement(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonIdentityManagement.class, AmazonIdentityManagementClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonIdentityManagement getAmazonIdentityManagement(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AmazonIdentityManagementClientBuilder.class, AmazonIdentityManagement.class, accountName, awsCredentialsProvider, region); - } - - public AWSShield getAmazonShield(NetflixAmazonCredentials amazonCredentials, String region) { - return proxyHandlerBuilder.getProxyHandler(AWSShield.class, AWSShieldClientBuilder.class, amazonCredentials, region, true); - } - - public AWSShield getAmazonShield(String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { - return awsSdkClientSupplier.getClient(AWSShieldClientBuilder.class, AWSShield.class, accountName, awsCredentialsProvider, region); - } - - public AWSApplicationAutoScaling getAmazonApplicationAutoScaling(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AWSApplicationAutoScaling.class, AWSApplicationAutoScalingClientBuilder.class, amazonCredentials, region, skipEdda); - } - - public AmazonECR getAmazonEcr(NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - return proxyHandlerBuilder.getProxyHandler(AmazonECR.class, AmazonECRClientBuilder.class, amazonCredentials, region, skipEdda); - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentials.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentials.java deleted file mode 100644 index dc61b42ddfa..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentials.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.fiat.model.resources.Permissions; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Objects; - -import static java.util.Objects.requireNonNull; - -/** - * Basic set of Amazon credentials that will a provided {@link com.amazonaws.auth.AWSCredentialsProvider} to resolve account credentials. - * If none provided, the {@link com.amazonaws.auth.DefaultAWSCredentialsProviderChain} will be used. The account's active - * regions and availability zones can be specified as well. - * - * - */ -public class AmazonCredentials implements AccountCredentials { - private static final String CLOUD_PROVIDER = "aws"; - - private final String name; - private final String environment; - private final String accountType; - private final String accountId; - private final String defaultKeyPair; - private final List requiredGroupMembership; - private final Permissions permissions; - private final List regions; - private final List defaultSecurityGroups; - private final List lifecycleHooks; - private final boolean allowPrivateThirdPartyImages; - private final AWSCredentialsProvider credentialsProvider; - - public static AmazonCredentials fromAWSCredentials(String name, - String environment, - String accountType, - AWSCredentialsProvider credentialsProvider, - AmazonClientProvider amazonClientProvider) { - return fromAWSCredentials(name, - environment, - accountType, - null, - credentialsProvider, - amazonClientProvider); - } - - public static AmazonCredentials fromAWSCredentials( - String name, - String environment, - String accountType, - String defaultKeyPair, - AWSCredentialsProvider credentialsProvider, - AmazonClientProvider amazonClientProvider) { - AWSAccountInfoLookup lookup = new DefaultAWSAccountInfoLookup(credentialsProvider, - amazonClientProvider); - final String accountId = lookup.findAccountId(); - final List regions = lookup.listRegions(); - return new AmazonCredentials(name, - environment, - accountType, - accountId, - defaultKeyPair, - regions, - null, - null, - null, - null, - false, - credentialsProvider); - } - - public AmazonCredentials(@JsonProperty("name") String name, - @JsonProperty("environment") String environment, - @JsonProperty("accountType") String accountType, - @JsonProperty("accountId") String accountId, - @JsonProperty("defaultKeyPair") String defaultKeyPair, - @JsonProperty("regions") List regions, - @JsonProperty("defaultSecurityGroups") List defaultSecurityGroups, - @JsonProperty("requiredGroupMembership") List requiredGroupMembership, - @JsonProperty("permissions") Permissions permissions, - @JsonProperty("lifecycleHooks") List lifecycleHooks, - @JsonProperty("allowPrivateThirdPartyImages") Boolean allowPrivateThirdPartyImages) { - this(name, - environment, - accountType, - accountId, - defaultKeyPair, - regions, - defaultSecurityGroups, - requiredGroupMembership, - permissions, - lifecycleHooks, - allowPrivateThirdPartyImages, - null); - } - - public AmazonCredentials(AmazonCredentials source, AWSCredentialsProvider credentialsProvider) { - this( - source.getName(), - source.getEnvironment(), - source.getAccountType(), - source.getAccountId(), - source.getDefaultKeyPair(), - source.getRegions(), - source.getDefaultSecurityGroups(), - source.getRequiredGroupMembership(), - source.getPermissions(), - source.getLifecycleHooks(), - source.getAllowPrivateThirdPartyImages(), - credentialsProvider - ); - } - - AmazonCredentials(String name, - String environment, - String accountType, - String accountId, - String defaultKeyPair, - List regions, - List defaultSecurityGroups, - List requiredGroupMembership, - Permissions permissions, - List lifecycleHooks, - boolean allowPrivateThirdPartyImages, - AWSCredentialsProvider credentialsProvider) { - this.name = requireNonNull(name, "name"); - this.environment = requireNonNull(environment, "environment"); - this.accountType = requireNonNull(accountType, "accountType"); - this.accountId = requireNonNull(accountId, "accountId"); - this.defaultKeyPair = defaultKeyPair; - this.regions = regions == null ? Collections.emptyList() : Collections.unmodifiableList(regions); - this.defaultSecurityGroups = defaultSecurityGroups == null ? null : Collections.unmodifiableList(defaultSecurityGroups); - this.requiredGroupMembership = requiredGroupMembership == null ? Collections.emptyList() : Collections.unmodifiableList(requiredGroupMembership); - this.permissions = permissions == null ? Permissions.EMPTY : permissions; - this.lifecycleHooks = lifecycleHooks == null ? Collections.emptyList() : Collections.unmodifiableList(lifecycleHooks); - this.allowPrivateThirdPartyImages = allowPrivateThirdPartyImages; - this.credentialsProvider = credentialsProvider; - } - - @Override - public String getName() { - return name; - } - - @Override - public String getEnvironment() { - return environment; - } - - @Override - public String getAccountType() { - return accountType; - } - - @Override - public String getAccountId() { - return accountId; - } - - public String getDefaultKeyPair() { - return defaultKeyPair; - } - - public List getRegions() { - return regions; - } - - public List getDefaultSecurityGroups() { - return defaultSecurityGroups; - } - - public List getLifecycleHooks() { - return lifecycleHooks; - } - - public boolean getAllowPrivateThirdPartyImages() { - return allowPrivateThirdPartyImages; - } - - @JsonIgnore - public AWSCredentialsProvider getCredentialsProvider() { - return credentialsProvider; - } - - @Override - @JsonIgnore - public AWSCredentials getCredentials() { - return credentialsProvider.getCredentials(); - } - - @Override - public String getCloudProvider() { - return CLOUD_PROVIDER; - } - - @Override - public List getRequiredGroupMembership() { - return requiredGroupMembership; - } - - public Permissions getPermissions() { - return this.permissions; - } - - public static class AWSRegion { - - private final String name; - private final Boolean deprecated; - private final List availabilityZones; - private final List preferredZones; - - public AWSRegion(@JsonProperty("name") String name, - @JsonProperty("availabilityZones") List availabilityZones, - @JsonProperty("preferredZones") List preferredZones, - @JsonProperty("deprecated") Boolean deprecated) { - this.name = Objects.requireNonNull(name, "name"); - this.availabilityZones = availabilityZones == null ? Collections.emptyList() : Collections.unmodifiableList(availabilityZones); - List preferred = (preferredZones == null || preferredZones.isEmpty()) ? new ArrayList<>(this.availabilityZones) : new ArrayList<>(preferredZones); - preferred.retainAll(this.availabilityZones); - this.preferredZones = Collections.unmodifiableList(preferred); - - if (deprecated == null) { - deprecated = Boolean.FALSE; - } - this.deprecated = deprecated; - } - - public AWSRegion(String name, List availabilityZones) { - this(name, availabilityZones, Collections.emptyList(), null); - } - - public String getName() { - return name; - } - - public Collection getAvailabilityZones() { - return availabilityZones; - } - - public Collection getPreferredZones() { - return preferredZones; - } - - public Boolean getDeprecated() { - return deprecated; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - AWSRegion awsRegion = (AWSRegion) o; - - return name.equals(awsRegion.name) && - availabilityZones.equals(awsRegion.availabilityZones) && - preferredZones.equals(awsRegion.preferredZones); - } - - @Override - public int hashCode() { - int result = name.hashCode(); - result = 31 - * result - + availabilityZones.hashCode() - + preferredZones.hashCode(); - return result; - } - } - - public static class LifecycleHook { - - private final String roleARN; - private final String notificationTargetARN; - private final String lifecycleTransition; - private final Integer heartbeatTimeout; - private final String defaultResult; - - public LifecycleHook(@JsonProperty("roleARN") String roleARN, - @JsonProperty("notificationTargetARN") String notificationTargetARN, - @JsonProperty("lifecycleTransition") String lifecycleTransition, - @JsonProperty("heartbeatTimeout") Integer heartbeatTimeout, - @JsonProperty("defaultResult") String defaultResult) { - this.roleARN = roleARN; - this.notificationTargetARN = notificationTargetARN; - this.lifecycleTransition = lifecycleTransition; - this.heartbeatTimeout = heartbeatTimeout; - this.defaultResult = defaultResult; - } - - public String getRoleARN() { - return roleARN; - } - - public String getNotificationTargetARN() { - return notificationTargetARN; - } - - public String getLifecycleTransition() { - return lifecycleTransition; - } - - public Integer getHeartbeatTimeout() { - return heartbeatTimeout; - } - - public String getDefaultResult() { - return defaultResult; - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsInitializer.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsInitializer.groovy index c28bb1a33f7..d2b6906f531 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsInitializer.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsInitializer.groovy @@ -17,107 +17,127 @@ package com.netflix.spinnaker.clouddriver.aws.security import com.amazonaws.auth.AWSCredentialsProvider -import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration.Account +import com.netflix.spinnaker.clouddriver.aws.security.config.AmazonCredentialsParser import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig -import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsLoader -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.springframework.beans.factory.config.ConfigurableBeanFactory +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.definition.AbstractCredentialsLoader +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource +import com.netflix.spinnaker.credentials.definition.CredentialsParser +import com.netflix.spinnaker.credentials.poller.Poller +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean import org.springframework.boot.context.properties.ConfigurationProperties import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.ApplicationContext import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope +import org.springframework.context.annotation.Lazy +import org.springframework.context.annotation.Primary -import static com.amazonaws.regions.Regions.EU_WEST_1 -import static com.amazonaws.regions.Regions.US_EAST_1 -import static com.amazonaws.regions.Regions.US_WEST_1 -import static com.amazonaws.regions.Regions.US_WEST_2 +import javax.annotation.Nullable @Configuration @EnableConfigurationProperties(DefaultAccountConfigurationProperties) -class AmazonCredentialsInitializer implements CredentialsInitializerSynchronizable { +class AmazonCredentialsInitializer { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean + @ConditionalOnMissingBean(CredentialsConfig.class) @ConfigurationProperties('aws') CredentialsConfig credentialsConfig() { new CredentialsConfig() } - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean - Class credentialsType(CredentialsConfig credentialsConfig) { - if (!credentialsConfig.accounts && !credentialsConfig.defaultAssumeRole) { + @ConditionalOnMissingBean(AccountsConfiguration.class) + @ConfigurationProperties('aws') + AccountsConfiguration accountsConfiguration() { + new AccountsConfiguration() + } + + @Bean + Class credentialsType( + CredentialsConfig credentialsConfig, + AccountsConfiguration accountsConfig + ) { + if (!accountsConfig.accounts && !credentialsConfig.defaultAssumeRole) { NetflixAmazonCredentials } else { NetflixAssumeRoleAmazonCredentials } } - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean - CredentialsLoader credentialsLoader(AWSCredentialsProvider awsCredentialsProvider, - AmazonClientProvider amazonClientProvider, - Class credentialsType) { - new CredentialsLoader(awsCredentialsProvider, amazonClientProvider, credentialsType) + @ConditionalOnMissingBean( + name = "amazonCredentialsParser" + ) + CredentialsParser amazonCredentialsParser( + AWSCredentialsProvider awsCredentialsProvider, + AmazonClientProvider amazonClientProvider, + Class credentialsType, + CredentialsConfig credentialsConfig, + AccountsConfiguration accountsConfig + ) { + new AmazonCredentialsParser<>( + awsCredentialsProvider, + amazonClientProvider, + credentialsType, + credentialsConfig, + accountsConfig) } @Bean - List netflixAmazonCredentials(CredentialsLoader credentialsLoader, - CredentialsConfig credentialsConfig, - AccountCredentialsRepository accountCredentialsRepository, - DefaultAccountConfigurationProperties defaultAccountConfigurationProperties, - ApplicationContext applicationContext, - List providerSynchronizerTypeWrappers) { - synchronizeAmazonAccounts(credentialsLoader, credentialsConfig, accountCredentialsRepository, defaultAccountConfigurationProperties, null, applicationContext, providerSynchronizerTypeWrappers) + @Primary + @ConditionalOnMissingBean( + name = "amazonCredentialsRepository" + ) + CredentialsRepository amazonCredentialsRepository( + @Lazy CredentialsLifecycleHandler eventHandler + ) { + return new MapBackedCredentialsRepository(AmazonCloudProvider.ID, eventHandler) } - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean - List synchronizeAmazonAccounts(CredentialsLoader credentialsLoader, - CredentialsConfig credentialsConfig, - AccountCredentialsRepository accountCredentialsRepository, - DefaultAccountConfigurationProperties defaultAccountConfigurationProperties, - CatsModule catsModule, - ApplicationContext applicationContext, - List providerSynchronizerTypeWrappers) { - if (!credentialsConfig.accounts && !credentialsConfig.defaultAssumeRole) { - def defaultEnvironment = defaultAccountConfigurationProperties.environment ?: defaultAccountConfigurationProperties.env - def defaultAccountType = defaultAccountConfigurationProperties.accountType ?: defaultAccountConfigurationProperties.env - credentialsConfig.accounts = [new CredentialsConfig.Account(name: defaultAccountConfigurationProperties.env, environment: defaultEnvironment, accountType: defaultAccountType)] - if (!credentialsConfig.defaultRegions) { - credentialsConfig.defaultRegions = [US_EAST_1, US_WEST_1, US_WEST_2, EU_WEST_1].collect { - new CredentialsConfig.Region(name: it.name) - } - } - } - - List accounts = credentialsLoader.load(credentialsConfig) - - def (ArrayList accountsToAdd, List namesOfDeletedAccounts) = - ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, NetflixAmazonCredentials, accounts) - - accountsToAdd.each { NetflixAmazonCredentials account -> - accountCredentialsRepository.save(account.name, account) - } - - ProviderUtils.unscheduleAndDeregisterAgents(namesOfDeletedAccounts, catsModule) - - if ((namesOfDeletedAccounts || accountsToAdd) && catsModule) { - ProviderUtils.synchronizeAgentProviders(applicationContext, providerSynchronizerTypeWrappers) + @ConditionalOnMissingBean( + name = "amazonCredentialsLoader" + ) + AbstractCredentialsLoader amazonCredentialsLoader( + CredentialsParser amazonCredentialsParser, + @Nullable CredentialsDefinitionSource amazonCredentialsSource, + CredentialsConfig credentialsConfig, + AccountsConfiguration accountsConfig, + CredentialsRepository repository, + DefaultAccountConfigurationProperties defaultAccountConfigurationProperties + ) { + if (amazonCredentialsSource == null) { + amazonCredentialsSource = { -> accountsConfig.getAccounts() } as CredentialsDefinitionSource } - - accountCredentialsRepository.all.findAll { - it instanceof NetflixAmazonCredentials - } as List + return new AmazonBasicCredentialsLoader( + amazonCredentialsSource, + amazonCredentialsParser, + repository, + credentialsConfig, + accountsConfig, + defaultAccountConfigurationProperties + ) } - @Override - String getCredentialsSynchronizationBeanName() { - return "synchronizeAmazonAccounts" + @Bean + @ConditionalOnMissingBean( + name = "amazonCredentialsInitializerSynchronizable" + ) + CredentialsInitializerSynchronizable amazonCredentialsInitializerSynchronizable( + AbstractCredentialsLoader amazonCredentialsLoader + ) { + final Poller poller = new Poller<>(amazonCredentialsLoader); + return new CredentialsInitializerSynchronizable() { + @Override + void synchronize() { + poller.run() + } + } } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCustomBinderConfiguration.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCustomBinderConfiguration.java new file mode 100644 index 00000000000..e931a67297d --- /dev/null +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCustomBinderConfiguration.java @@ -0,0 +1,43 @@ +/* + * Copyright 2021 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration; +import com.netflix.spinnaker.kork.configserver.CloudConfigResourceService; +import com.netflix.spinnaker.kork.secrets.SecretManager; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty({"aws.enabled", "aws.custom-property-binding-enabled"}) +public class AmazonCustomBinderConfiguration { + @Bean + CustomAccountsConfigurationProvider customAccountsConfigurationProvider( + ConfigurableApplicationContext context, + CloudConfigResourceService configResourceService, + SecretManager secretManager) { + return new CustomAccountsConfigurationProvider(context, configResourceService, secretManager); + } + + @Bean + AccountsConfiguration accountsConfiguration( + CustomAccountsConfigurationProvider bootstrapCredentialsConfigurationProvider) { + return bootstrapCredentialsConfigurationProvider.getConfigurationProperties(); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AssumeRoleAmazonCredentials.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AssumeRoleAmazonCredentials.java deleted file mode 100644 index 10bfa20a1f4..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AssumeRoleAmazonCredentials.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import com.amazonaws.auth.AWSCredentialsProvider; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.netflix.spinnaker.fiat.model.resources.Permissions; - -import java.util.List; -import java.util.Objects; - -/** - * Provides an Amazon credential pack that uses Assume Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-assume-role.html) to provide API access to the account. - * This class allows you to use your credentials, provided via the supplied {@link com.amazonaws.auth.AWSCredentialsProvider} to act-as the target account ID with the privileges desribed through the assumeRole role - * - * - */ -public class AssumeRoleAmazonCredentials extends AmazonCredentials { - static final String DEFAULT_SESSION_NAME = "Spinnaker"; - - static AWSCredentialsProvider createSTSCredentialsProvider(AWSCredentialsProvider credentialsProvider, String accountId, String assumeRole, String sessionName) { - String assumeRoleValue = Objects.requireNonNull(assumeRole, "assumeRole"); - if (!assumeRoleValue.startsWith("arn:")) { - assumeRoleValue = String.format("arn:aws:iam::%s:%s", Objects.requireNonNull(accountId, "accountId"), assumeRoleValue); - } - return credentialsProvider == null ? null : new NetflixSTSAssumeRoleSessionCredentialsProvider( - credentialsProvider, - assumeRoleValue, - Objects.requireNonNull(sessionName, "sessionName"), - accountId - ); - } - - /** - * The role to assume on the target account. - */ - private final String assumeRole; - private final String sessionName; - - public AssumeRoleAmazonCredentials(@JsonProperty("name") String name, - @JsonProperty("environment") String environment, - @JsonProperty("accountType") String accountType, - @JsonProperty("accountId") String accountId, - @JsonProperty("defaultKeyPair") String defaultKeyPair, - @JsonProperty("regions") List regions, - @JsonProperty("defaultSecurityGroups") List defaultSecurityGroups, - @JsonProperty("requiredGroupMembership") List requiredGroupMembership, - @JsonProperty("permissions") Permissions permissions, - @JsonProperty("lifecycleHooks") List lifecycleHooks, - @JsonProperty("allowPrivateThirdPartyImages") boolean allowPrivateThirdPartyImages, - @JsonProperty("assumeRole") String assumeRole, - @JsonProperty("sessionName") String sessionName) { - this(name, - environment, - accountType, - accountId, - defaultKeyPair, - regions, - defaultSecurityGroups, - requiredGroupMembership, - permissions, - lifecycleHooks, - allowPrivateThirdPartyImages, - null, - assumeRole, - sessionName); - } - - public AssumeRoleAmazonCredentials(AssumeRoleAmazonCredentials copy, AWSCredentialsProvider credentialsProvider) { - this(copy.getName(), - copy.getEnvironment(), - copy.getAccountType(), - copy.getAccountId(), - copy.getDefaultKeyPair(), - copy.getRegions(), - copy.getDefaultSecurityGroups(), - copy.getRequiredGroupMembership(), - copy.getPermissions(), - copy.getLifecycleHooks(), - copy.getAllowPrivateThirdPartyImages(), - credentialsProvider, - copy.getAssumeRole(), - copy.getSessionName()); - } - - AssumeRoleAmazonCredentials(String name, - String environment, - String accountType, - String accountId, - String defaultKeyPair, - List regions, - List defaultSecurityGroups, - List requiredGroupMembership, - Permissions permissions, - List lifecycleHooks, - boolean allowPrivateThirdPartyImages, - AWSCredentialsProvider credentialsProvider, - String assumeRole, - String sessionName) { - super(name, - environment, - accountType, - accountId, - defaultKeyPair, - regions, - defaultSecurityGroups, - requiredGroupMembership, - permissions, - lifecycleHooks, - allowPrivateThirdPartyImages, - createSTSCredentialsProvider(credentialsProvider, - accountId, - assumeRole, - sessionName == null ? DEFAULT_SESSION_NAME : sessionName)); - this.assumeRole = assumeRole; - this.sessionName = sessionName == null ? DEFAULT_SESSION_NAME : sessionName; - } - - public String getAssumeRole() { - return assumeRole; - } - - public String getSessionName() { - return sessionName; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/CustomAccountsConfigurationProvider.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/CustomAccountsConfigurationProvider.java new file mode 100644 index 00000000000..5ba3b9e2213 --- /dev/null +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/CustomAccountsConfigurationProvider.java @@ -0,0 +1,77 @@ +/* + * Copyright 2021 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.github.wnameless.json.unflattener.JsonUnflattener; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration; +import com.netflix.spinnaker.clouddriver.config.AbstractBootstrapCredentialsConfigurationProvider; +import com.netflix.spinnaker.kork.configserver.CloudConfigResourceService; +import com.netflix.spinnaker.kork.secrets.SecretManager; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.context.properties.bind.BindResult; +import org.springframework.context.ConfigurableApplicationContext; + +/** + * If a configuration properties file has a large number of aws accounts, as-is SpringBoot + * implementation of properties binding is inefficient. Hence, a custom logic for binding just the + * {@link AccountsConfiguration} is written but it still uses SpringBoot's Binder class. {@link + * CustomAccountsConfigurationProvider} class fetches the flattened aws account properties from + * Spring Cloud Config's BootstrapPropertySource and creates an {@link AccountsConfiguration} + * object. + */ +@Slf4j +public class CustomAccountsConfigurationProvider + extends AbstractBootstrapCredentialsConfigurationProvider { + private final String FIRST_ACCOUNT_NAME_KEY = "aws.accounts[0].name"; + + public CustomAccountsConfigurationProvider( + ConfigurableApplicationContext applicationContext, + CloudConfigResourceService configResourceService, + SecretManager secretManager) { + super(applicationContext, configResourceService, secretManager); + } + + public AccountsConfiguration getConfigurationProperties() { + return getAccounts(getPropertiesMap(FIRST_ACCOUNT_NAME_KEY)); + } + + @SuppressWarnings("unchecked") + private AccountsConfiguration getAccounts(Map credentialsPropertiesMap) { + log.info("Started loading aws accounts from the configuration file"); + AccountsConfiguration accountConfig = new AccountsConfiguration(); + BindResult result; + + // unflatten + Map propertiesMap = + (Map) JsonUnflattener.unflattenAsMap(credentialsPropertiesMap).get("aws"); + + List accounts = new ArrayList<>(); + + // loop through each account and bind + for (Map unflattendAcc : + ((List>) propertiesMap.get("accounts"))) { + result = bind(getFlatMap(unflattendAcc), AccountsConfiguration.Account.class); + accounts.add((AccountsConfiguration.Account) result.get()); + } + accountConfig.setAccounts(accounts); + log.info("Finished loading aws accounts"); + return accountConfig; + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/DefaultAWSAccountInfoLookup.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/DefaultAWSAccountInfoLookup.java deleted file mode 100644 index 865d75d0482..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/DefaultAWSAccountInfoLookup.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.AmazonEC2Client; -import com.amazonaws.services.ec2.model.*; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials.AWSRegion; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class DefaultAWSAccountInfoLookup implements AWSAccountInfoLookup { - private static final String DEFAULT_SECURITY_GROUP_NAME = "default"; - private static final Pattern IAM_ARN_PATTERN = Pattern.compile(".*?arn:aws(?:-cn|-us-gov)?:(?:iam|sts)::(\\d+):.*"); - - private final AWSCredentialsProvider credentialsProvider; - private final AmazonClientProvider amazonClientProvider; - - public DefaultAWSAccountInfoLookup(AWSCredentialsProvider credentialsProvider, AmazonClientProvider amazonClientProvider) { - this.credentialsProvider = credentialsProvider; - this.amazonClientProvider = amazonClientProvider; - } - - @Override - public String findAccountId() { - AmazonEC2 ec2 = amazonClientProvider.getAmazonEC2(credentialsProvider, AmazonClientProvider.DEFAULT_REGION); - try { - List vpcs = ec2.describeVpcs().getVpcs(); - boolean supportsByName = false; - if (vpcs.isEmpty()) { - supportsByName = true; - } else { - for (Vpc vpc : vpcs) { - if (vpc.getIsDefault()) { - supportsByName = true; - break; - } - } - } - - DescribeSecurityGroupsRequest request = new DescribeSecurityGroupsRequest(); - if (supportsByName) { - request.withGroupNames(DEFAULT_SECURITY_GROUP_NAME); - } - DescribeSecurityGroupsResult result = ec2.describeSecurityGroups(request); - - for (SecurityGroup sg : result.getSecurityGroups()) { - //if there is a vpcId or it is the default security group it won't be an EC2 cross account group - if ((sg.getVpcId() != null && sg.getVpcId().length() > 0) || DEFAULT_SECURITY_GROUP_NAME.equals(sg.getGroupName())) { - return sg.getOwnerId(); - } - } - - throw new IllegalArgumentException("Unable to lookup accountId with provided credentials"); - } catch (AmazonServiceException ase) { - if ("AccessDenied".equals(ase.getErrorCode())) { - String message = ase.getMessage(); - Matcher matcher = IAM_ARN_PATTERN.matcher(message); - if (matcher.matches()) { - return matcher.group(1); - } - } - throw ase; - } - } - - @Override - public List listAvailabilityZones(String regionName) { - List regions = listRegions(regionName); - if (regions.isEmpty()) { - throw new IllegalArgumentException("Unknown region: " + regionName); - } - return new ArrayList<>(regions.get(0).getAvailabilityZones()); - } - - public List listRegions(String... regionNames) { - return listRegions(Arrays.asList(regionNames)); - } - - @Override - public List listRegions(Collection regionNames) { - Set nameSet = new HashSet<>(regionNames); - AmazonEC2 ec2 = amazonClientProvider.getAmazonEC2(credentialsProvider, AmazonClientProvider.DEFAULT_REGION); - - DescribeRegionsRequest request = new DescribeRegionsRequest(); - if (!nameSet.isEmpty()) { - request.withRegionNames(regionNames); - } - List regions = ec2.describeRegions(request).getRegions(); - if (regions.size() != nameSet.size()) { - Set missingSet = new HashSet<>(nameSet); - for (Region region : regions) { - missingSet.remove(region.getRegionName()); - } - throw new IllegalArgumentException("Unknown region" + (missingSet.size() > 1 ? "s: " : ": ") + missingSet); - } - List awsRegions = new ArrayList<>(regions.size()); - for (Region region : regions) { - AmazonEC2 regionalEc2 = amazonClientProvider.getAmazonEC2(credentialsProvider, region.getRegionName()); - List azs = regionalEc2.describeAvailabilityZones().getAvailabilityZones(); - List availabilityZoneNames = new ArrayList<>(azs.size()); - for (AvailabilityZone az : azs) { - availabilityZoneNames.add(az.getZoneName()); - } - - awsRegions.add(new AWSRegion(region.getRegionName(), availabilityZoneNames)); - } - return awsRegions; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixAmazonCredentials.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixAmazonCredentials.java deleted file mode 100644 index f6af2de84ab..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixAmazonCredentials.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import com.amazonaws.auth.AWSCredentialsProvider; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.netflix.spinnaker.fiat.model.resources.Permissions; - -import java.util.List; - -/** - * An implementation of {@link AmazonCredentials} that is decorated with Netflix concepts like Edda, Discovery, Front50, - */ -public class NetflixAmazonCredentials extends AmazonCredentials { - private final String edda; - private final boolean eddaEnabled; - private final String discovery; - private final boolean discoveryEnabled; - private final String front50; - private final boolean front50Enabled; - private final String bastionHost; - private final boolean bastionEnabled; - private final boolean shieldEnabled; - - public NetflixAmazonCredentials(@JsonProperty("name") String name, - @JsonProperty("environment") String environment, - @JsonProperty("accountType") String accountType, - @JsonProperty("accountId") String accountId, - @JsonProperty("defaultKeyPair") String defaultKeyPair, - @JsonProperty("regions") List regions, - @JsonProperty("defaultSecurityGroups") List defaultSecurityGroups, - @JsonProperty("requiredGroupMembership") List requiredGroupMembership, - @JsonProperty("permissions") Permissions permissions, - @JsonProperty("lifecycleHooks") List lifecycleHooks, - @JsonProperty("allowPrivateThirdPartyImages") boolean allowPrivateThirdPartyImages, - @JsonProperty("edda") String edda, - @JsonProperty("eddaEnabled") Boolean eddaEnabled, - @JsonProperty("discovery") String discovery, - @JsonProperty("discoveryEnabled") Boolean discoveryEnabled, - @JsonProperty("front50") String front50, - @JsonProperty("front50Enabled") Boolean front50Enabled, - @JsonProperty("bastionHost") String bastionHost, - @JsonProperty("bastionEnabled") Boolean bastionEnabled, - @JsonProperty("shieldEnabled") Boolean shieldEnabled) { - this(name, - environment, - accountType, - accountId, - defaultKeyPair, - regions, - defaultSecurityGroups, - requiredGroupMembership, - permissions, - lifecycleHooks, - allowPrivateThirdPartyImages, - null, - edda, - eddaEnabled, - discovery, - discoveryEnabled, - front50, - front50Enabled, - bastionHost, - bastionEnabled, - shieldEnabled); - } - - private static boolean flagValue(String serviceUrl, Boolean flag) { - return (!(serviceUrl == null || serviceUrl.trim().length() == 0) && (flag != null ? flag : true)); - } - - public NetflixAmazonCredentials(NetflixAmazonCredentials copy, AWSCredentialsProvider credentialsProvider) { - this(copy.getName(), - copy.getEnvironment(), - copy.getAccountType(), - copy.getAccountId(), - copy.getDefaultKeyPair(), - copy.getRegions(), - copy.getDefaultSecurityGroups(), - copy.getRequiredGroupMembership(), - copy.getPermissions(), - copy.getLifecycleHooks(), - copy.getAllowPrivateThirdPartyImages(), - credentialsProvider, - copy.getEdda(), - copy.getEddaEnabled(), - copy.getDiscovery(), - copy.getDiscoveryEnabled(), - copy.getFront50(), - copy.getFront50Enabled(), - copy.getBastionHost(), - copy.getBastionEnabled(), - copy.getShieldEnabled()); - } - - NetflixAmazonCredentials(String name, - String environment, - String accountType, - String accountId, - String defaultKeyPair, - List regions, - List defaultSecurityGroups, - List requiredGroupMembership, - Permissions permissions, - List lifecycleHooks, - boolean allowPrivateThirdPartyImages, - AWSCredentialsProvider credentialsProvider, - String edda, - Boolean eddaEnabled, - String discovery, - Boolean discoveryEnabled, - String front50, - Boolean front50Enabled, - String bastionHost, - Boolean bastionEnabled, - Boolean shieldEnabled) { - super(name, - environment, - accountType, - accountId, - defaultKeyPair, - regions, - defaultSecurityGroups, - requiredGroupMembership, - permissions, - lifecycleHooks, - allowPrivateThirdPartyImages, - credentialsProvider); - this.edda = edda; - this.eddaEnabled = flagValue(edda, eddaEnabled); - this.discovery = discovery; - this.discoveryEnabled = flagValue(discovery, discoveryEnabled); - this.front50 = front50; - this.front50Enabled = flagValue(front50, front50Enabled); - this.bastionHost = bastionHost; - this.bastionEnabled = flagValue(bastionHost, bastionEnabled); - this.shieldEnabled = (shieldEnabled == null) ? false : shieldEnabled; - } - - public String getEdda() { - return edda; - } - - public String getDiscovery() { - return discovery; - } - - public String getFront50() { - return front50; - } - - public String getBastionHost() { - return bastionHost; - } - - public boolean getEddaEnabled() { - return eddaEnabled; - } - - public boolean getDiscoveryEnabled() { - return discoveryEnabled; - } - - public boolean getFront50Enabled() { - return front50Enabled; - } - - public boolean getBastionEnabled() { - return bastionEnabled; - } - - public boolean getShieldEnabled() { - return shieldEnabled; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixAssumeRoleAmazonCredentials.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixAssumeRoleAmazonCredentials.java deleted file mode 100644 index 835257d728d..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixAssumeRoleAmazonCredentials.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import com.amazonaws.auth.AWSCredentialsProvider; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.netflix.spinnaker.fiat.model.resources.Permissions; - -import java.util.List; - -/** - * - * @see AssumeRoleAmazonCredentials - */ -public class NetflixAssumeRoleAmazonCredentials extends NetflixAmazonCredentials { - - /** - * The role to assume on the target account. - */ - private final String assumeRole; - private final String sessionName; - - public NetflixAssumeRoleAmazonCredentials(@JsonProperty("name") String name, - @JsonProperty("environment") String environment, - @JsonProperty("accountType") String accountType, - @JsonProperty("accountId") String accountId, - @JsonProperty("defaultKeyPair") String defaultKeyPair, - @JsonProperty("regions") List regions, - @JsonProperty("defaultSecurityGroups") List defaultSecurityGroups, - @JsonProperty("requiredGroupMembership") List requiredGroupMembership, - @JsonProperty("permissions") Permissions permissions, - @JsonProperty("lifecycleHooks") List lifecycleHooks, - @JsonProperty("allowPrivateThirdPartyImages") boolean allowPrivateThirdPartyImages, - @JsonProperty("edda") String edda, - @JsonProperty("eddaEnabled") Boolean eddaEnabled, - @JsonProperty("discovery") String discovery, - @JsonProperty("discoveryEnabled") Boolean discoveryEnabled, - @JsonProperty("front50") String front50, - @JsonProperty("front50Enabled") Boolean front50Enabled, - @JsonProperty("bastionHost") String bastionHost, - @JsonProperty("bastionEnabled") Boolean bastionEnabled, - @JsonProperty("shieldEnabled") Boolean shieldEnabled, - @JsonProperty("assumeRole") String assumeRole, - @JsonProperty("sessionName") String sessionName) { - - this(name, - environment, - accountType, - accountId, - defaultKeyPair, - regions, - defaultSecurityGroups, - requiredGroupMembership, - permissions, - lifecycleHooks, - allowPrivateThirdPartyImages, - null, - edda, - eddaEnabled, - discovery, - discoveryEnabled, - front50, - front50Enabled, - bastionHost, - bastionEnabled, - shieldEnabled, - assumeRole, - sessionName); - } - - public NetflixAssumeRoleAmazonCredentials(NetflixAssumeRoleAmazonCredentials copy, AWSCredentialsProvider credentialsProvider) { - this(copy.getName(), - copy.getEnvironment(), - copy.getAccountType(), - copy.getAccountId(), - copy.getDefaultKeyPair(), - copy.getRegions(), - copy.getDefaultSecurityGroups(), - copy.getRequiredGroupMembership(), - copy.getPermissions(), - copy.getLifecycleHooks(), - copy.getAllowPrivateThirdPartyImages(), - credentialsProvider, - copy.getEdda(), - copy.getEddaEnabled(), - copy.getDiscovery(), - copy.getDiscoveryEnabled(), - copy.getFront50(), - copy.getFront50Enabled(), - copy.getBastionHost(), - copy.getBastionEnabled(), - copy.getShieldEnabled(), - copy.getAssumeRole(), - copy.getSessionName()); - } - - NetflixAssumeRoleAmazonCredentials(String name, - String environment, - String accountType, - String accountId, - String defaultKeyPair, - List regions, - List defaultSecurityGroups, - List requiredGroupMembership, - Permissions permissions, - List lifecycleHooks, - boolean allowPrivateThirdPartyImages, - AWSCredentialsProvider credentialsProvider, - String edda, - Boolean eddaEnabled, - String discovery, - Boolean discoveryEnabled, - String front50, - Boolean front50Enabled, - String bastionHost, - Boolean bastionEnabled, - Boolean shieldEnabled, - String assumeRole, - String sessionName) { - super(name, - environment, - accountType, - accountId, - defaultKeyPair, - regions, - defaultSecurityGroups, - requiredGroupMembership, - permissions, - lifecycleHooks, - allowPrivateThirdPartyImages, - AssumeRoleAmazonCredentials.createSTSCredentialsProvider(credentialsProvider, - accountId, - assumeRole, - sessionName == null ? AssumeRoleAmazonCredentials.DEFAULT_SESSION_NAME : sessionName), - edda, - eddaEnabled, - discovery, - discoveryEnabled, - front50, - front50Enabled, - bastionHost, - bastionEnabled, - shieldEnabled); - this.assumeRole = assumeRole; - this.sessionName = sessionName == null ? AssumeRoleAmazonCredentials.DEFAULT_SESSION_NAME : sessionName; - } - - public String getAssumeRole() { - return assumeRole; - } - - public String getSessionName() { - return sessionName; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixSTSAssumeRoleSessionCredentialsProvider.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixSTSAssumeRoleSessionCredentialsProvider.java deleted file mode 100644 index 1d62d9f6bb9..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/NetflixSTSAssumeRoleSessionCredentialsProvider.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider; - -public class NetflixSTSAssumeRoleSessionCredentialsProvider extends STSAssumeRoleSessionCredentialsProvider { - private final String accountId; - - public NetflixSTSAssumeRoleSessionCredentialsProvider(AWSCredentialsProvider longLivedCredentialsProvider, - String roleArn, - String roleSessionName, - String accountId) { - super(longLivedCredentialsProvider, roleArn, roleSessionName); - this.accountId = accountId; - } - - public String getAccountId() { - return accountId; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/StaticAWSAccountInfoLookup.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/StaticAWSAccountInfoLookup.java deleted file mode 100644 index 7308b1b521e..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/StaticAWSAccountInfoLookup.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -public class StaticAWSAccountInfoLookup implements AWSAccountInfoLookup { - private final String accountId; - private final List knownRegions; - - public StaticAWSAccountInfoLookup(String accountId, List knownRegions) { - this.accountId = accountId; - this.knownRegions = knownRegions; - } - - @Override - public String findAccountId() { - return accountId; - } - - @Override - public List listRegions(String... regionNames) { - return listRegions(Arrays.asList(regionNames)); - } - - @Override - public List listRegions(Collection regionNames) { - Set nameSet = new HashSet<>(regionNames); - List result = new ArrayList<>(nameSet.size()); - for (AmazonCredentials.AWSRegion region : knownRegions) { - if (nameSet.isEmpty() || nameSet.contains(region.getName())) { - result.add(region); - } - } - return result; - } - - @Override - public List listAvailabilityZones(String regionName) { - for (AmazonCredentials.AWSRegion region : knownRegions) { - if (region.getName().equals(regionName)) { - return new ArrayList<>(region.getAvailabilityZones()); - } - } - return null; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsConfig.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsConfig.java deleted file mode 100644 index bb16b157919..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsConfig.java +++ /dev/null @@ -1,444 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security.config; - -import com.netflix.spinnaker.fiat.model.resources.Permissions; - -import java.util.List; - -/** - * A mutable credentials configurations structure suitable for transformation into concrete - * credentials implementations. - */ -public class CredentialsConfig { - public static class Region { - private String name; - private List availabilityZones; - private List preferredZones; - private Boolean deprecated; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public List getAvailabilityZones() { - return availabilityZones; - } - - public void setAvailabilityZones(List availabilityZones) { - this.availabilityZones = availabilityZones; - } - - public List getPreferredZones() { - return preferredZones; - } - - public void setPreferredZones(List preferredZones) { - this.preferredZones = preferredZones; - } - - public Boolean getDeprecated() { - return deprecated; - } - - public void setDeprecated(Boolean deprecated) { - this.deprecated = deprecated; - } - - Region copyOf() { - Region clone = new Region(); - clone.setName(getName()); - clone.setAvailabilityZones(getAvailabilityZones()); - clone.setPreferredZones(getPreferredZones()); - clone.setDeprecated(getDeprecated()); - - return clone; - } - } - - public static class LifecycleHook { - private String name; - private String roleARN; - private String notificationTargetARN; - private String lifecycleTransition; - private Integer heartbeatTimeout; - private String defaultResult; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getRoleARN() { - return roleARN; - } - - public void setRoleARN(String roleARN) { - this.roleARN = roleARN; - } - - public String getNotificationTargetARN() { - return notificationTargetARN; - } - - public void setNotificationTargetARN(String notificationTargetARN) { - this.notificationTargetARN = notificationTargetARN; - } - - public String getLifecycleTransition() { - return lifecycleTransition; - } - - public void setLifecycleTransition(String lifecycleTransition) { - this.lifecycleTransition = lifecycleTransition; - } - - public Integer getHeartbeatTimeout() { - return heartbeatTimeout; - } - - public void setHeartbeatTimeout(Integer heartbeatTimeout) { - this.heartbeatTimeout = heartbeatTimeout; - } - - public String getDefaultResult() { - return defaultResult; - } - - public void setDefaultResult(String defaultResult) { - this.defaultResult = defaultResult; - } - } - - public static class Account { - private String name; - private String environment; - private String accountType; - private String accountId; - private String defaultKeyPair; - private List regions; - private List defaultSecurityGroups; - private List requiredGroupMembership; - private Permissions.Builder permissions; - private String edda; - private Boolean eddaEnabled; - private String discovery; - private Boolean discoveryEnabled; - private String front50; - private Boolean front50Enabled; - private String bastionHost; - private Boolean bastionEnabled; - private String assumeRole; - private String sessionName; - private List lifecycleHooks; - private boolean allowPrivateThirdPartyImages; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getEnvironment() { - return environment; - } - - public void setEnvironment(String environment) { - this.environment = environment; - } - - public String getAccountType() { - return accountType; - } - - public void setAccountType(String accountType) { - this.accountType = accountType; - } - - public String getAccountId() { - return accountId; - } - - public void setAccountId(String accountId) { - this.accountId = accountId; - } - - public String getDefaultKeyPair() { - return defaultKeyPair; - } - - public void setDefaultKeyPair(String defaultKeyPair) { - this.defaultKeyPair = defaultKeyPair; - } - - public List getRegions() { - return regions; - } - - public void setRegions(List regions) { - this.regions = regions; - } - - public List getDefaultSecurityGroups() { - return defaultSecurityGroups; - } - - public void setDefaultSecurityGroups(List defaultSecurityGroups) { - this.defaultSecurityGroups = defaultSecurityGroups; - } - - public List getRequiredGroupMembership() { - return requiredGroupMembership; - } - - public void setRequiredGroupMembership(List requiredGroupMembership) { - this.requiredGroupMembership = requiredGroupMembership; - } - - public Permissions.Builder getPermissions() { - return permissions; - } - - public void setPermissions(Permissions.Builder permissions) { - this.permissions = permissions; - } - - public String getEdda() { - return edda; - } - - public void setEdda(String edda) { - this.edda = edda; - } - - public Boolean getEddaEnabled() { - return eddaEnabled; - } - - public void setEddaEnabled(Boolean eddaEnabled) { - this.eddaEnabled = eddaEnabled; - } - - public String getDiscovery() { - return discovery; - } - - public void setDiscovery(String discovery) { - this.discovery = discovery; - } - - public Boolean getDiscoveryEnabled() { - return discoveryEnabled; - } - - public void setDiscoveryEnabled(Boolean discoveryEnabled) { - this.discoveryEnabled = discoveryEnabled; - } - - public String getFront50() { - return front50; - } - - public void setFront50(String front50) { - this.front50 = front50; - } - - public Boolean getFront50Enabled() { - return front50Enabled; - } - - public void setFront50Enabled(Boolean front50Enabled) { - this.front50Enabled = front50Enabled; - } - - public String getBastionHost() { - return bastionHost; - } - - public void setBastionHost(String bastionHost) { - this.bastionHost = bastionHost; - } - - public Boolean getBastionEnabled() { - return bastionEnabled; - } - - public void setBastionEnabled(Boolean bastionEnabled) { - this.bastionEnabled = bastionEnabled; - } - - public String getAssumeRole() { - return assumeRole; - } - - public void setAssumeRole(String assumeRole) { - this.assumeRole = assumeRole; - } - - public String getSessionName() { - return sessionName; - } - - public void setSessionName(String sessionName) { - this.sessionName = sessionName; - } - - public List getLifecycleHooks() { - return lifecycleHooks; - } - - public void setLifecycleHooks(List lifecycleHooks) { - this.lifecycleHooks = lifecycleHooks; - } - - public Boolean getAllowPrivateThirdPartyImages() { - return allowPrivateThirdPartyImages; - } - - public void setAllowPrivateThirdPartyImages(Boolean allowPrivateThirdPartyImages) { - this.allowPrivateThirdPartyImages = allowPrivateThirdPartyImages; - } - } - - private String defaultKeyPairTemplate; - private List defaultRegions; - private List defaultSecurityGroups; - private List defaultLifecycleHooks; - private String defaultEddaTemplate; - private String defaultFront50Template; - private String defaultBastionHostTemplate; - private String defaultDiscoveryTemplate; - private String defaultAssumeRole; - private String defaultSessionName; - private String defaultLifecycleHookRoleARNTemplate; - private String defaultLifecycleHookNotificationTargetARNTemplate; - - private List accounts; - - public String getDefaultKeyPairTemplate() { - return defaultKeyPairTemplate; - } - - public void setDefaultKeyPairTemplate(String defaultKeyPairTemplate) { - this.defaultKeyPairTemplate = defaultKeyPairTemplate; - } - - public List getDefaultRegions() { - return defaultRegions; - } - - public void setDefaultRegions(List defaultRegions) { - this.defaultRegions = defaultRegions; - } - - public List getDefaultSecurityGroups() { - return defaultSecurityGroups; - } - - public void setDefaultSecurityGroups(List defaultSecurityGroups) { - this.defaultSecurityGroups = defaultSecurityGroups; - } - - public String getDefaultEddaTemplate() { - return defaultEddaTemplate; - } - - public void setDefaultEddaTemplate(String defaultEddaTemplate) { - this.defaultEddaTemplate = defaultEddaTemplate; - } - - public String getDefaultFront50Template() { - return defaultFront50Template; - } - - public void setDefaultFront50Template(String defaultFront50Template) { - this.defaultFront50Template = defaultFront50Template; - } - - public String getDefaultBastionHostTemplate() { - return defaultBastionHostTemplate; - } - - public void setDefaultBastionHostTemplate(String defaultBastionHostTemplate) { - this.defaultBastionHostTemplate = defaultBastionHostTemplate; - } - - public String getDefaultDiscoveryTemplate() { - return defaultDiscoveryTemplate; - } - - public void setDefaultDiscoveryTemplate(String defaultDiscoveryTemplate) { - this.defaultDiscoveryTemplate = defaultDiscoveryTemplate; - } - - public String getDefaultAssumeRole() { - return defaultAssumeRole; - } - - public void setDefaultAssumeRole(String defaultAssumeRole) { - this.defaultAssumeRole = defaultAssumeRole; - } - - public String getDefaultSessionName() { - return defaultSessionName; - } - - public void setDefaultSessionName(String defaultSessionName) { - this.defaultSessionName = defaultSessionName; - } - - public List getAccounts() { - return accounts; - } - - public void setAccounts(List accounts) { - this.accounts = accounts; - } - - public List getDefaultLifecycleHooks() { - return defaultLifecycleHooks; - } - - public void setDefaultLifecycleHooks(List defaultLifecycleHooks) { - this.defaultLifecycleHooks = defaultLifecycleHooks; - } - - public String getDefaultLifecycleHookRoleARNTemplate() { - return defaultLifecycleHookRoleARNTemplate; - } - - public void setDefaultLifecycleHookRoleARNTemplate(String defaultLifecycleHookRoleARNTemplate) { - this.defaultLifecycleHookRoleARNTemplate = defaultLifecycleHookRoleARNTemplate; - } - - public String getDefaultLifecycleHookNotificationTargetARNTemplate() { - return defaultLifecycleHookNotificationTargetARNTemplate; - } - - public void setDefaultLifecycleHookNotificationTargetARNTemplate(String defaultLifecycleHookNotificationTargetARNTemplate) { - this.defaultLifecycleHookNotificationTargetARNTemplate = defaultLifecycleHookNotificationTargetARNTemplate; - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsLoader.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsLoader.java deleted file mode 100644 index 080bc0c67b9..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsLoader.java +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security.config; - -import com.amazonaws.auth.AWSCredentialsProvider; -import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.clouddriver.aws.security.AWSAccountInfoLookup; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; -import com.netflix.spinnaker.clouddriver.aws.security.DefaultAWSAccountInfoLookup; -import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig.Account; -import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig.Region; - -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -public class CredentialsLoader { - - private final AWSCredentialsProvider credentialsProvider; - private final AWSAccountInfoLookup awsAccountInfoLookup; - private final Map templateValues; - private final CredentialTranslator credentialTranslator; - private final ObjectMapper objectMapper; - - public CredentialsLoader(AWSCredentialsProvider credentialsProvider, AmazonClientProvider amazonClientProvider, Class credentialsType) { - this(credentialsProvider, amazonClientProvider, credentialsType, Collections.emptyMap()); - } - - public CredentialsLoader(AWSCredentialsProvider credentialsProvider, AmazonClientProvider amazonClientProvider,Class credentialsType, Map templateValues) { - this(credentialsProvider, new DefaultAWSAccountInfoLookup(credentialsProvider, amazonClientProvider), credentialsType, templateValues); - } - - public CredentialsLoader(AWSCredentialsProvider credentialsProvider, AWSAccountInfoLookup awsAccountInfoLookup, Class credentialsType) { - this(credentialsProvider, awsAccountInfoLookup, credentialsType, Collections.emptyMap()); - } - - public CredentialsLoader(AWSCredentialsProvider credentialsProvider, AWSAccountInfoLookup awsAccountInfoLookup, Class credentialsType, Map templateValues) { - this.credentialsProvider = Objects.requireNonNull(credentialsProvider, "credentialsProvider"); - this.awsAccountInfoLookup = awsAccountInfoLookup; - this.templateValues = templateValues; - this.objectMapper = new ObjectMapper(); - this.credentialTranslator = findTranslator(credentialsType, this.objectMapper); - } - - private Lazy> createDefaults(final List defaults) { - return new Lazy<>(new Lazy.Loader>() { - @Override - public List get() { - if (defaults == null) { - return toRegion(awsAccountInfoLookup.listRegions()); - } else { - List result = new ArrayList<>(defaults.size()); - List toLookup = new ArrayList<>(); - for (Region def : defaults) { - if (def.getAvailabilityZones() == null || def.getAvailabilityZones().isEmpty()) { - toLookup.add(def.getName()); - } else { - result.add(def); - } - } - if (!toLookup.isEmpty()) { - List resolved = toRegion(awsAccountInfoLookup.listRegions(toLookup)); - for (Region region : resolved) { - Region fromDefault = find(defaults, region.getName()); - if (fromDefault != null) { - region.setPreferredZones(fromDefault.getPreferredZones()); - region.setDeprecated(fromDefault.getDeprecated()); - } - } - result.addAll(resolved); - } - return result; - } - } - }); - } - - private List initRegions(Lazy> defaults, List toInit) { - if (toInit == null) { - return defaults.get(); - } - - Map toInitByName = toInit.stream().collect( - Collectors.toMap(Region::getName, Function.identity()) - ); - - List result = new ArrayList<>(toInit.size()); - List toLookup = new ArrayList<>(); - for (Region r : toInit) { - if (r.getAvailabilityZones() == null || r.getAvailabilityZones().isEmpty()) { - toLookup.add(r.getName()); - } else { - result.add(r); - } - } - - for (Iterator lookups = toLookup.iterator(); lookups.hasNext(); ) { - Region fromDefault = find(defaults.get(), lookups.next()); - if (fromDefault != null) { - lookups.remove(); - result.add(fromDefault); - } - } - if (!toLookup.isEmpty()) { - List resolved = toRegion(awsAccountInfoLookup.listRegions(toLookup)); - for (Region region : resolved) { - Region src = find(toInit, region.getName()); - if (src == null || src.getPreferredZones() == null) { - src = find(defaults.get(), region.getName()); - } - - if (src != null) { - region.setPreferredZones(src.getPreferredZones()); - } - } - result.addAll(resolved); - } - - // make a clone of all regions such that modifications apply only to this specific instance (and not global defaults) - result = result.stream().map(Region::copyOf).collect(Collectors.toList()); - - for (Region r : result) { - Region toInitRegion = toInitByName.get(r.getName()); - if (toInitRegion != null && toInitRegion.getDeprecated() != null) { - r.setDeprecated(toInitRegion.getDeprecated()); - } - } - - return result; - } - - private static Region find(List src, String name) { - if (src != null) { - for (Region r : src) { - if (r.getName().equals(name)) { - return r; - } - } - } - return null; - } - - private static List toRegion(List src) { - List result = new ArrayList<>(src.size()); - for (AmazonCredentials.AWSRegion r : src) { - Region region = new Region(); - region.setName(r.getName()); - region.setAvailabilityZones(new ArrayList<>(r.getAvailabilityZones())); - region.setPreferredZones(new ArrayList<>(r.getPreferredZones())); - result.add(region); - } - return result; - } - - public T load(String accountName) throws Throwable { - CredentialsConfig config = new CredentialsConfig(); - Account account = new Account(); - account.setName(accountName); - config.setAccounts(Arrays.asList(account)); - List result = load(config); - if (result.size() != 1) { - throw new IllegalStateException("failed to create account"); - } - return result.get(0); - } - - public List load(CredentialsConfig source) throws Throwable { - final CredentialsConfig config = objectMapper.convertValue(source, CredentialsConfig.class); - - if (config.getAccounts() == null || config.getAccounts().isEmpty()) { - return Collections.emptyList(); - } - - Lazy> defaultRegions = createDefaults(config.getDefaultRegions()); - List initializedAccounts = new ArrayList<>(config.getAccounts().size()); - for (Account account : config.getAccounts()) { - if (account.getAccountId() == null) { - if (!credentialTranslator.resolveAccountId()) { - throw new IllegalArgumentException("accountId is required and not resolvable for this credentials type"); - } - account.setAccountId(awsAccountInfoLookup.findAccountId()); - } - - if (account.getEnvironment() == null) { - account.setEnvironment(account.getName()); - } - - if (account.getAccountType() == null) { - account.setAccountType(account.getName()); - } - - account.setRegions(initRegions(defaultRegions, account.getRegions())); - account.setDefaultSecurityGroups(account.getDefaultSecurityGroups() != null ? account.getDefaultSecurityGroups() : config.getDefaultSecurityGroups()); - account.setLifecycleHooks(account.getLifecycleHooks() != null ? account.getLifecycleHooks() : config.getDefaultLifecycleHooks()); - - Map templateContext = new HashMap<>(templateValues); - templateContext.put("name", account.getName()); - templateContext.put("accountId", account.getAccountId()); - templateContext.put("environment", account.getEnvironment()); - templateContext.put("accountType", account.getAccountType()); - - account.setDefaultKeyPair(templateFirstNonNull(templateContext, account.getDefaultKeyPair(), config.getDefaultKeyPairTemplate())); - account.setEdda(templateFirstNonNull(templateContext, account.getEdda(), config.getDefaultEddaTemplate())); - account.setFront50(templateFirstNonNull(templateContext, account.getFront50(), config.getDefaultFront50Template())); - account.setDiscovery(templateFirstNonNull(templateContext, account.getDiscovery(), config.getDefaultDiscoveryTemplate())); - account.setAssumeRole(templateFirstNonNull(templateContext, account.getAssumeRole(), config.getDefaultAssumeRole())); - account.setSessionName(templateFirstNonNull(templateContext, account.getSessionName(), config.getDefaultSessionName())); - account.setBastionHost(templateFirstNonNull(templateContext, account.getBastionHost(), config.getDefaultBastionHostTemplate())); - - if (account.getLifecycleHooks() != null) { - for (CredentialsConfig.LifecycleHook lifecycleHook : account.getLifecycleHooks()) { - lifecycleHook.setRoleARN(templateFirstNonNull(templateContext, lifecycleHook.getRoleARN(), config.getDefaultLifecycleHookRoleARNTemplate())); - lifecycleHook.setNotificationTargetARN(templateFirstNonNull(templateContext, lifecycleHook.getNotificationTargetARN(), config.getDefaultLifecycleHookNotificationTargetARNTemplate())); - } - } - - initializedAccounts.add(credentialTranslator.translate(credentialsProvider, account)); - } - return initializedAccounts; - } - - private static class Lazy { - public static interface Loader { - T get(); - } - - private final Loader loader; - private final AtomicReference ref = new AtomicReference<>(); - - public Lazy(Loader loader) { - this.loader = loader; - } - - public T get() { - if (ref.get() == null) { - ref.set(loader.get()); - } - return ref.get(); - } - } - - private static String templateFirstNonNull(Map substitutions, String... values) { - for (String value : values) { - if (value != null) { - return StringTemplater.render(value, substitutions); - } - } - return null; - } - - static CredentialTranslator findTranslator(Class credentialsType, ObjectMapper objectMapper) { - return new CopyConstructorTranslator<>(objectMapper, credentialsType); - } - - static interface CredentialTranslator { - Class getCredentialType(); - - boolean resolveAccountId(); - - T translate(AWSCredentialsProvider credentialsProvider, Account account) throws Throwable; - } - - static class CopyConstructorTranslator implements CredentialTranslator { - - private final ObjectMapper objectMapper; - private final Class credentialType; - private final Constructor copyConstructor; - - public CopyConstructorTranslator(ObjectMapper objectMapper, Class credentialType) { - this.objectMapper = objectMapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); - this.credentialType = credentialType; - try { - copyConstructor = credentialType.getConstructor(credentialType, AWSCredentialsProvider.class); - } catch (NoSuchMethodException nsme) { - throw new IllegalArgumentException("Class " + credentialType + " must supply a constructor with " + credentialType + ", " + AWSCredentialsProvider.class + " args."); - } - } - - @Override - public Class getCredentialType() { - return credentialType; - } - - @Override - public boolean resolveAccountId() { - try { - credentialType.getMethod("getAssumeRole"); - return false; - } catch (NoSuchMethodException nsme) { - return true; - } - } - - @Override - public T translate(AWSCredentialsProvider credentialsProvider, Account account) throws Throwable { - T immutableInstance = objectMapper.convertValue(account, credentialType); - try { - return copyConstructor.newInstance(immutableInstance, credentialsProvider); - } catch (InvocationTargetException ite) { - throw ite.getTargetException(); - } - } - } - - static class StringTemplater { - public static String render(String template, Map substitutions) { - String base = template; - int iterations = 0; - boolean changed = true; - while (changed && iterations < 10) { - iterations++; - String previous = base; - for (Map.Entry substitution : substitutions.entrySet()) { - base = base.replaceAll(Pattern.quote("{{" + substitution.getKey() + "}}"), substitution.getValue()); - } - changed = !previous.equals(base); - } - if (changed) { - throw new RuntimeException("too many levels of templatery"); - } - return base; - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AmazonClientInvocationHandler.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AmazonClientInvocationHandler.java deleted file mode 100644 index df60c44b2e5..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AmazonClientInvocationHandler.java +++ /dev/null @@ -1,454 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security.sdkclient; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.AmazonWebServiceRequest; -import com.amazonaws.services.autoscaling.model.*; -import com.amazonaws.services.cloudwatch.model.DescribeAlarmsRequest; -import com.amazonaws.services.cloudwatch.model.DescribeAlarmsResult; -import com.amazonaws.services.cloudwatch.model.MetricAlarm; -import com.amazonaws.services.ec2.model.*; -import com.amazonaws.services.ec2.model.Instance; -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersRequest; -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersResult; -import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription; -import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; -import com.amazonaws.services.elasticloadbalancingv2.model.*; -import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; -import com.netflix.spectator.api.Id; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.HttpStatus; -import org.apache.http.client.HttpClient; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.util.EntityUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.*; -import java.util.concurrent.TimeUnit; - -public class AmazonClientInvocationHandler implements InvocationHandler { - - private static final Logger log = LoggerFactory.getLogger(AmazonClientInvocationHandler.class); - - public static final ThreadLocal lastModified = new ThreadLocal<>(); - - private final String edda; - private final HttpClient httpClient; - private final Object delegate; - private final String serviceName; - private final ObjectMapper objectMapper; - private final EddaTimeoutConfig eddaTimeoutConfig; - private final Registry registry; - private final Map metricTags; - - public AmazonClientInvocationHandler(Object delegate, - String serviceName, - String edda, - HttpClient httpClient, - ObjectMapper objectMapper, - EddaTimeoutConfig eddaTimeoutConfig, - Registry registry, - Map metricTags) { - this.edda = edda; - this.httpClient = httpClient; - this.objectMapper = objectMapper; - this.delegate = delegate; - this.serviceName = serviceName; - this.eddaTimeoutConfig = eddaTimeoutConfig == null ? EddaTimeoutConfig.DEFAULT : eddaTimeoutConfig; - this.registry = registry; - this.metricTags = ImmutableMap.copyOf(metricTags); - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { - final Id id = registry.createId("awsClientProxy.invoke", metricTags).withTag("method", method.getName()); - final long startTime = System.nanoTime(); - boolean wasDelegated = false; - - try { - if (!eddaTimeoutConfig.getAlbEnabled() && method.getDeclaringClass().equals(AmazonElasticLoadBalancing.class)) { - throw new NoSuchMethodException(); - } - Method thisMethod = this.getClass().getMethod(method.getName(), args != null && args.length > 0 ? - getClassArgs(args) : new Class[0]); - return thisMethod.invoke(this, args); - } catch (NoSuchMethodException e) { - wasDelegated = true; - try { - return method.invoke(delegate, args); - } catch (InvocationTargetException ite) { - throw ite.getCause(); - } - } finally { - registry.timer(id.withTag("requestMode", wasDelegated ? "sdkClient" : "edda")).record(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); - } - } - - static Class[] getClassArgs(Object[] args) { - List classes = new ArrayList<>(); - for (Object object : args) { - classes.add(object.getClass()); - } - return classes.toArray(new Class[classes.size()]); - } - - //////////////////////////////////// - // - // AmazonAutoScaling - // - //////////////////////////////////// - public DescribeAutoScalingGroupsResult describeAutoScalingGroups() { - return describeAutoScalingGroups(null); - } - - public DescribeAutoScalingGroupsResult describeAutoScalingGroups(DescribeAutoScalingGroupsRequest request) { - return new DescribeAutoScalingGroupsResult() - .withAutoScalingGroups( - describe(request, "autoScalingGroupNames", "autoScalingGroups", AutoScalingGroup.class)); - } - - //////////////////////////////////// - // - // AmazonCloudWatch - // - //////////////////////////////////// - public DescribeAlarmsResult describeAlarms() { - return describeAlarms(null); - } - - public DescribeAlarmsResult describeAlarms(DescribeAlarmsRequest request) { - return new DescribeAlarmsResult() - .withMetricAlarms( - describe(request, "alarmNames", "alarms", MetricAlarm.class)); - } - - public DescribeScheduledActionsResult describeScheduledActions() { - return describeScheduledActions(null); - } - - public DescribeScheduledActionsResult describeScheduledActions(DescribeScheduledActionsRequest request) { - return new DescribeScheduledActionsResult() - .withScheduledUpdateGroupActions( - describe(request, "scheduledActionNames", "scheduledActions", ScheduledUpdateGroupAction.class)); - } - - public DescribePoliciesResult describePolicies() { - return describePolicies(null); - } - - public DescribePoliciesResult describePolicies(DescribePoliciesRequest request) { - return new DescribePoliciesResult() - .withScalingPolicies( - describe(request, "policyNames", "scalingPolicies", ScalingPolicy.class)); - } - - //////////////////////////////////// - // - // AmazonEC2 - // - //////////////////////////////////// - public DescribeImagesResult describeImages() { - return describeImages(null); - } - - public DescribeImagesResult describeImages(DescribeImagesRequest request) { - return new DescribeImagesResult() - .withImages( - describe(request, "imageIds", "images", Image.class)); - } - - public DescribeInstancesResult describeInstances() { - return describeInstances(null); - } - - public DescribeInstancesResult describeInstances(DescribeInstancesRequest request) { - return new DescribeInstancesResult() - .withReservations(new Reservation() - .withReservationId("1234") - .withInstances( - describe(request, "instanceIds", "../view/instances", Instance.class))); - } - - public DescribeLaunchConfigurationsResult describeLaunchConfigurations() { - return describeLaunchConfigurations(null); - } - - public DescribeLaunchConfigurationsResult describeLaunchConfigurations(DescribeLaunchConfigurationsRequest request) { - return new DescribeLaunchConfigurationsResult() - .withLaunchConfigurations( - describe(request, "launchConfigurationNames", "launchConfigurations", LaunchConfiguration.class)); - } - - public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings() { - return describeReservedInstancesOfferings(null); - } - - public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings(DescribeReservedInstancesOfferingsRequest request) { - return new DescribeReservedInstancesOfferingsResult() - .withReservedInstancesOfferings( - describe(request, "reservedInstancesOfferingIds", "reservedInstancesOfferings", ReservedInstancesOffering.class)); - } - - public DescribeSecurityGroupsResult describeSecurityGroups() { - return describeSecurityGroups(null); - } - - public DescribeSecurityGroupsResult describeSecurityGroups(DescribeSecurityGroupsRequest request) { - return new DescribeSecurityGroupsResult() - .withSecurityGroups( - describe(request, "groupIds", "securityGroups", SecurityGroup.class)); - } - - public DescribeSubnetsResult describeSubnets() { - return describeSubnets(null); - } - - public DescribeSubnetsResult describeSubnets(DescribeSubnetsRequest request) { - return new DescribeSubnetsResult() - .withSubnets( - describe(request, "subnetIds", "subnets", Subnet.class)); - } - - public DescribeVpcsResult describeVpcs() { - return describeVpcs(null); - } - - public DescribeVpcsResult describeVpcs(DescribeVpcsRequest request) { - return new DescribeVpcsResult() - .withVpcs( - describe(request, "vpcIds", "vpcs", Vpc.class)); - } - - public DescribeVpcClassicLinkResult describeVpcClassicLink() { - return describeVpcClassicLink(null); - } - - public DescribeVpcClassicLinkResult describeVpcClassicLink(DescribeVpcClassicLinkRequest request) { - return new DescribeVpcClassicLinkResult() - .withVpcs( - describe(request, "vpcIds", "vpcClassicLinks", VpcClassicLink.class)); - } - - public DescribeClassicLinkInstancesResult describeClassicLinkInstances() { - return describeClassicLinkInstances(null); - } - - public DescribeClassicLinkInstancesResult describeClassicLinkInstances(DescribeClassicLinkInstancesRequest request) { - return new DescribeClassicLinkInstancesResult() - .withInstances( - describe(request, "instanceIds", "classicLinkInstances", ClassicLinkInstance.class)); - } - - //////////////////////////////////// - // - // AmazonElasticLoadBalancing - // - //////////////////////////////////// - public DescribeLoadBalancersResult describeLoadBalancers() { - return describeLoadBalancers((DescribeLoadBalancersRequest)null); - } - - public DescribeLoadBalancersResult describeLoadBalancers(DescribeLoadBalancersRequest request) { - return new DescribeLoadBalancersResult() - .withLoadBalancerDescriptions( - describe(request, "loadBalancerNames", "loadBalancers", LoadBalancerDescription.class)); - } - - // Cannot have overloaded method with same parameters and different return types, for now, no calls to this parameter-less function, so commenting out for now - // public com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersResult describeLoadBalancers() { - // return describeLoadBalancers((com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersRequest)null); - // } - - public com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersResult describeLoadBalancers(com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersRequest request) { - return new com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersResult() - .withLoadBalancers( - describe(request, "names", "appLoadBalancers", LoadBalancer.class)); - } - - public DescribeTargetGroupsResult describeTargetGroups() { - return describeTargetGroups(null); - } - - public DescribeTargetGroupsResult describeTargetGroups(DescribeTargetGroupsRequest request) { - return new DescribeTargetGroupsResult() - .withTargetGroups( - describe(request, "names", "targetGroups", TargetGroup.class)); - } - //////////////////////////////////// - - private List describe(AmazonWebServiceRequest request, String idKey, final String object, final Class singleType) { - lastModified.set(null); - final Map metricTags = new HashMap<>(this.metricTags); - metricTags.put("collection", object); - try { - final Collection ids = getRequestIds(request, idKey); - metricTags.put("collectionMode", ids.isEmpty() ? "full" : "byId"); - final JavaType singleMeta = objectMapper.getTypeFactory().constructParametrizedType(Metadata.class, Metadata.class, singleType); - Long mtime = null; - final List results = new ArrayList<>(); - - final Id deserializeJsonTimer = registry.createId("edda.deserializeJson", metricTags); - final Id resultSizeCounter = registry.createId("edda.resultSize", metricTags); - if (ids.isEmpty()) { - HttpEntity entity = getHttpEntity(metricTags, object, null); - try { - final JavaType listMeta = objectMapper.getTypeFactory().constructParametrizedType(List.class, List.class, singleMeta); - final List> metadataResults = registry.timer(deserializeJsonTimer).record(() -> objectMapper.readValue(entity.getContent(), listMeta)); - for (Metadata meta : metadataResults) { - mtime = mtime == null ? meta.mtime : Math.min(mtime, meta.mtime); - results.add(meta.data); - } - } finally { - EntityUtils.consume(entity); - } - } else { - for (String id : ids) { - HttpEntity entity = getHttpEntity(metricTags, object, id); - try { - final Metadata result = registry.timer(deserializeJsonTimer).record(() -> objectMapper.readValue(entity.getContent(), singleMeta)); - mtime = mtime == null ? result.mtime : Math.min(mtime, result.mtime); - results.add(result.data); - } finally { - EntityUtils.consume(entity); - } - } - } - registry.counter(resultSizeCounter).increment(results.size()); - lastModified.set(mtime); - return results; - } catch (Exception e) { - log.error(e.getMessage() + " (retries exhausted)"); - - registry.counter(registry.createId("edda.failures", metricTags)).increment(); - final AmazonServiceException ex = new AmazonServiceException("400 Bad Request -- Edda could not find one of the managed objects requested.", e); - ex.setStatusCode(400); - ex.setServiceName(serviceName); - ex.setErrorType(AmazonServiceException.ErrorType.Unknown); - throw ex; - } - } - - private static Collection getRequestIds(AmazonWebServiceRequest request, String idFieldName) { - if (request == null) { - return Collections.emptySet(); - } - try { - Field field = request.getClass().getDeclaredField(idFieldName); - field.setAccessible(true); - Collection collection = (Collection) field.get(request); - return collection == null ? Collections.emptySet() : collection; - } catch (NoSuchFieldException | IllegalAccessException e) { - throw new RuntimeException(e); - } - } - - private HttpEntity getHttpEntity(Map metricTags, String objectName, String key) throws IOException { - final String url = edda + "/REST/v2/aws/" + objectName + (key == null ? ";_expand" : "/" + key) + ";_meta"; - final HttpGet get = new HttpGet(url); - get.setConfig( - RequestConfig - .custom() - .setConnectTimeout(eddaTimeoutConfig.getConnectTimeout()) - .setConnectionRequestTimeout(eddaTimeoutConfig.getConnectionRequestTimeout()) - .setSocketTimeout(eddaTimeoutConfig.getSocketTimeout()) - .build() - ); - - long retryDelay = eddaTimeoutConfig.getRetryBase(); - int retryAttempts = 0; - String lastException = ""; - String lastUrl = ""; - Random r = new Random(); - Exception ex; - - final Id httpExecuteTime = registry.createId("edda.httpExecute", metricTags); - final Id httpErrors = registry.createId("edda.errors", metricTags).withTag("errorType", "http"); - final Id networkErrors = registry.createId("edda.errors", metricTags).withTag("errorType", "network"); - final Id retryDelayMillis = registry.createId("edda.retryDelayMillis", metricTags); - final Id retries = registry.createId("edda.retries", metricTags); - while (retryAttempts < eddaTimeoutConfig.getMaxAttempts()) { - ex = null; - HttpEntity entity = null; - - try { - final HttpResponse response = registry.timer(httpExecuteTime).record(() -> httpClient.execute(get)); - final int statusCode = response.getStatusLine().getStatusCode(); - entity = response.getEntity(); - if (statusCode != HttpStatus.SC_OK) { - lastException = response.getProtocolVersion().toString() + " " + response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase(); - registry.counter(httpErrors.withTag("statusCode", Integer.toString(statusCode))).increment(); - } else { - return entity; - } - } catch (Exception e) { - lastException = e.getClass().getSimpleName() + ": " + e.getMessage(); - ex = e; - registry.counter(networkErrors.withTag("exceptionType", e.getClass().getSimpleName())).increment(); - } finally { - lastUrl = url; - } - - // ensure that the content stream is closed on a non-200 response from edda - EntityUtils.consumeQuietly(entity); - - final String exceptionFormat = "Edda request {} failed with {}"; - if (ex == null) { - log.warn(exceptionFormat, url, lastException); - } else { - log.warn(exceptionFormat, url, lastException, ex); - } - try { - registry.counter(retryDelayMillis).increment(retryDelay); - Thread.sleep(retryDelay); - } catch (InterruptedException inter) { - break; - } - registry.counter(retries).increment(); - retryAttempts++; - retryDelay += r.nextInt(eddaTimeoutConfig.getBackoffMillis()); - } - throw new IOException("Edda request " + lastUrl + " failed with " + lastException); - } - - private static class Metadata { - final Long mtime; - final T data; - - @JsonCreator - public Metadata(@JsonProperty("mtime") Long mtime, - @JsonProperty("data") T data) { - this.mtime = mtime; - this.data = data; - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AwsSdkClientSupplier.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AwsSdkClientSupplier.java deleted file mode 100644 index b474ff112d9..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AwsSdkClientSupplier.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security.sdkclient; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.handlers.RequestHandler2; -import com.amazonaws.regions.Regions; -import com.amazonaws.retry.RetryPolicy; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.RateLimiter; -import com.netflix.spectator.api.Counter; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.aws.security.AWSProxy; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixSTSAssumeRoleSessionCredentialsProvider; - -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - -import static java.util.Objects.requireNonNull; - -/** - * Factory for shared instances of AWS SDK clients. - */ -public class AwsSdkClientSupplier { - - private final Registry registry; - private final LoadingCache, ?> awsSdkClients; - private final RateLimiterSupplier rateLimiterSupplier; - - public AwsSdkClientSupplier(RateLimiterSupplier rateLimiterSupplier, Registry registry, RetryPolicy retryPolicy, List requestHandlers, AWSProxy proxy, boolean useGzip) { - this.rateLimiterSupplier = Objects.requireNonNull(rateLimiterSupplier); - this.registry = Objects.requireNonNull(registry); - awsSdkClients = CacheBuilder - .newBuilder() - .recordStats() - .expireAfterAccess(10, TimeUnit.MINUTES) - .build( - new SdkClientCacheLoader(retryPolicy, requestHandlers, proxy, useGzip) - ); - LoadingCacheMetrics.instrument("awsSdkClientSupplier", registry, awsSdkClients); - } - - public T getClient(Class> impl, Class iface, String account, AWSCredentialsProvider awsCredentialsProvider, String region) { - final RequestHandler2 handler = getRateLimiterHandler(iface, account, region); - final AmazonClientKey key = new AmazonClientKey<>(impl, awsCredentialsProvider, region, handler); - - try { - return iface.cast(awsSdkClients.get(key)); - } catch (ExecutionException executionException) { - if (executionException.getCause() instanceof RuntimeException) { - throw (RuntimeException) executionException.getCause(); - } - throw new RuntimeException("Failed creating amazon client", executionException.getCause()); - } - } - - private RequestHandler2 getRateLimiterHandler(Class sdkInterface, String account, String region) { - final RateLimiter limiter = rateLimiterSupplier.getRateLimiter(sdkInterface, account, region); - final Counter rateLimitCounter = registry.counter("amazonClientProvider.rateLimitDelayMillis", - "clientType", sdkInterface.getSimpleName(), - "account", account, - "region", region == null ? "UNSPECIFIED" : region); - return new RateLimitingRequestHandler(rateLimitCounter, limiter); - } - - private static class SdkClientCacheLoader extends CacheLoader, Object> { - private final RetryPolicy retryPolicy; - private final List requestHandlers; - private final AWSProxy proxy; - private final boolean useGzip; - - public SdkClientCacheLoader(RetryPolicy retryPolicy, List requestHandlers, AWSProxy proxy, boolean useGzip) { - this.retryPolicy = Objects.requireNonNull(retryPolicy); - this.requestHandlers = requestHandlers == null ? Collections.emptyList() : ImmutableList.copyOf(requestHandlers); - this.proxy = proxy; - this.useGzip = useGzip; - } - - @Override - public Object load(AmazonClientKey key) throws Exception { - Method m = key.implClass.getDeclaredMethod("standard"); - AwsClientBuilder builder = key.implClass.cast(m.invoke(null)); - - ClientConfiguration clientConfiguration = new ClientConfiguration(); - clientConfiguration.setRetryPolicy(getRetryPolicy(key)); - clientConfiguration.setUseGzip(useGzip); - clientConfiguration.setUserAgentSuffix("spinnaker"); - - if (proxy != null && proxy.isProxyConfigMode()) { - proxy.apply(clientConfiguration); - } - - builder.withCredentials(key.awsCredentialsProvider) - .withClientConfiguration(clientConfiguration); - getRequestHandlers(key).ifPresent(builder::withRequestHandlers); - builder.withRegion(key.getRegion().orElseGet(() -> new SpinnakerAwsRegionProvider().getRegion())); - - return builder.build(); - } - - private Optional getRequestHandlers(AmazonClientKey key) { - List handlers = new ArrayList<>(requestHandlers.size() + 1); - key.getRequestHandler().ifPresent(handlers::add); - handlers.addAll(requestHandlers); - if (handlers.isEmpty()) { - return Optional.empty(); - } - return Optional.of(handlers.toArray(new RequestHandler2[handlers.size()])); - } - - private RetryPolicy getRetryPolicy(AmazonClientKey key) { - - if (!(key.getAwsCredentialsProvider() instanceof NetflixSTSAssumeRoleSessionCredentialsProvider)) { - return retryPolicy; - } - - final RetryPolicy.RetryCondition delegatingRetryCondition = (originalRequest, exception, retriesAttempted) -> { - NetflixSTSAssumeRoleSessionCredentialsProvider stsCredentialsProvider = (NetflixSTSAssumeRoleSessionCredentialsProvider) key.getAwsCredentialsProvider(); - if (exception instanceof AmazonServiceException) { - ((AmazonServiceException) exception).getHttpHeaders().put("targetAccountId", stsCredentialsProvider.getAccountId()); - } - return retryPolicy.getRetryCondition().shouldRetry(originalRequest, exception, retriesAttempted); - }; - - return new RetryPolicy( - delegatingRetryCondition, - retryPolicy.getBackoffStrategy(), - retryPolicy.getMaxErrorRetry(), - retryPolicy.isMaxErrorRetryInClientConfigHonored() - ); - - } - } - - private static class AmazonClientKey { - private final Class> implClass; - private final AWSCredentialsProvider awsCredentialsProvider; - private final Regions region; - private final RequestHandler2 requestHandler; - - public AmazonClientKey(Class> implClass, AWSCredentialsProvider awsCredentialsProvider, String region, RequestHandler2 requestHandler) { - this.implClass = requireNonNull(implClass); - this.awsCredentialsProvider = requireNonNull(awsCredentialsProvider); - this.region = region == null ? null : Regions.fromName(region); - this.requestHandler = requestHandler; - } - - public Class> getImplClass() { - return implClass; - } - - public AWSCredentialsProvider getAwsCredentialsProvider() { - return awsCredentialsProvider; - } - - public Optional getRegion() { - return Optional.ofNullable(region).map(Regions::getName); - } - - public Optional getRequestHandler() { - return Optional.ofNullable(requestHandler); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - AmazonClientKey that = (AmazonClientKey) o; - - if (!implClass.equals(that.implClass)) return false; - if (!awsCredentialsProvider.equals(that.awsCredentialsProvider)) return false; - if (region != that.region) return false; - return requestHandler != null ? requestHandler.equals(that.requestHandler) : that.requestHandler == null; - } - - @Override - public int hashCode() { - int result = implClass.hashCode(); - result = 31 * result + awsCredentialsProvider.hashCode(); - result = 31 * result + (region != null ? region.hashCode() : 0); - result = 31 * result + (requestHandler != null ? requestHandler.hashCode() : 0); - return result; - } - } -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/ProxyHandlerBuilder.java b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/ProxyHandlerBuilder.java deleted file mode 100644 index 441a6859ccb..00000000000 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/ProxyHandlerBuilder.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.security.sdkclient; - -import com.amazonaws.client.builder.AwsClientBuilder; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.aws.security.EddaTemplater; -import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import org.apache.http.client.HttpClient; - -import java.lang.reflect.Proxy; -import java.util.Map; - -import static java.util.Objects.requireNonNull; - -/** - * Constructs a JDK dynamic proxy for an AWS service interface that (if enabled for an account) will - * delegate read requests to Edda and otherwise fallback to the underlying SDK client. - */ -public class ProxyHandlerBuilder { - private final AwsSdkClientSupplier awsSdkClientSupplier; - private final HttpClient httpClient; - private final ObjectMapper objectMapper; - private final EddaTemplater eddaTemplater; - private final EddaTimeoutConfig eddaTimeoutConfig; - private final Registry registry; - - public ProxyHandlerBuilder(AwsSdkClientSupplier awsSdkClientSupplier, HttpClient httpClient, ObjectMapper objectMapper, EddaTemplater eddaTemplater, EddaTimeoutConfig eddaTimeoutConfig, Registry registry) { - this.awsSdkClientSupplier = requireNonNull(awsSdkClientSupplier); - this.httpClient = requireNonNull(httpClient); - this.objectMapper = requireNonNull(objectMapper); - this.eddaTemplater = requireNonNull(eddaTemplater); - this.eddaTimeoutConfig = eddaTimeoutConfig; - this.registry = requireNonNull(registry); - } - - public , U> U getProxyHandler(Class interfaceKlazz, Class impl, NetflixAmazonCredentials amazonCredentials, String region) { - return getProxyHandler(interfaceKlazz, impl, amazonCredentials, region, false); - } - - public , U> U getProxyHandler(Class interfaceKlazz, Class impl, NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { - requireNonNull(amazonCredentials, "Credentials cannot be null"); - try { - U delegate = awsSdkClientSupplier.getClient(impl, interfaceKlazz, amazonCredentials.getName(), amazonCredentials.getCredentialsProvider(), region); - if (skipEdda || !amazonCredentials.getEddaEnabled() || eddaTimeoutConfig.getDisabledRegions().contains(region)) { - return delegate; - } - return interfaceKlazz.cast(Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{interfaceKlazz}, - getInvocationHandler(delegate, interfaceKlazz.getSimpleName(), region, amazonCredentials))); - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new RuntimeException("Instantiation of client implementation failed!", e); - } - } - - protected AmazonClientInvocationHandler getInvocationHandler(Object client, String serviceName, String region, NetflixAmazonCredentials amazonCredentials) { - final Map baseTags = ImmutableMap.of( - "account", amazonCredentials.getName(), - "region", region, - "serviceName", serviceName); - return new AmazonClientInvocationHandler(client, serviceName, eddaTemplater.getUrl(amazonCredentials.getEdda(), region), - this.httpClient, objectMapper, eddaTimeoutConfig, registry, baseTags); - } - -} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/services/RegionScopedProviderFactory.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/services/RegionScopedProviderFactory.groovy index 38204566163..7f00565d68d 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/services/RegionScopedProviderFactory.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/services/RegionScopedProviderFactory.groovy @@ -17,14 +17,17 @@ package com.netflix.spinnaker.clouddriver.aws.services import com.amazonaws.services.autoscaling.AmazonAutoScaling import com.amazonaws.services.ec2.AmazonEC2 +import com.netflix.spinnaker.clouddriver.aws.deploy.AmazonResourceTagger +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProviderAggregator import com.netflix.spinnaker.config.AwsConfiguration -import com.netflix.spinnaker.clouddriver.aws.deploy.AWSServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.aws.deploy.AsgLifecycleHookWorker -import com.netflix.spinnaker.clouddriver.aws.deploy.AsgReferenceCopier -import com.netflix.spinnaker.clouddriver.aws.deploy.DefaultLaunchConfigurationBuilder -import com.netflix.spinnaker.clouddriver.aws.deploy.LaunchConfigurationBuilder +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders.* +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AWSServerGroupNameResolver +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgReferenceCopier +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchConfigurationBuilder +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.DefaultLaunchConfigurationBuilder + import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.LocalFileUserDataProperties -import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProvider import com.netflix.spinnaker.clouddriver.aws.model.SubnetAnalyzer import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials @@ -41,7 +44,7 @@ class RegionScopedProviderFactory { AmazonClientProvider amazonClientProvider @Autowired - List userDataProviders + UserDataProviderAggregator userDataProviderAggregator @Autowired LocalFileUserDataProperties localFileUserDataProperties @@ -52,6 +55,9 @@ class RegionScopedProviderFactory { @Autowired List clusterProviders + @Autowired(required = false) + Collection amazonResourceTaggers + RegionScopedProvider forRegion(NetflixAmazonCredentials amazonCredentials, String region) { new RegionScopedProvider(amazonCredentials, region) } @@ -111,7 +117,17 @@ class RegionScopedProviderFactory { } LaunchConfigurationBuilder getLaunchConfigurationBuilder() { - new DefaultLaunchConfigurationBuilder(getAutoScaling(), getAsgService(), getSecurityGroupService(), userDataProviders, localFileUserDataProperties, deployDefaults) + new DefaultLaunchConfigurationBuilder(getAutoScaling(), getAsgService(), getSecurityGroupService(), userDataProviderAggregator, localFileUserDataProperties, deployDefaults) + } + + LaunchTemplateService getLaunchTemplateService() { + return new LaunchTemplateService( + amazonEC2, userDataProviderAggregator, localFileUserDataProperties, amazonResourceTaggers + ) + } + + AwsConfiguration.DeployDefaults getDeploymentDefaults() { + return deployDefaults } Eureka getEureka() { @@ -120,5 +136,17 @@ class RegionScopedProviderFactory { } EurekaUtil.getWritableEureka(amazonCredentials.discovery, region) } + + AsgBuilder getAsgBuilderForLaunchConfiguration() { + new AsgWithLaunchConfigurationBuilder(getLaunchConfigurationBuilder(), getAutoScaling(), getAmazonEC2(), getAsgLifecycleHookWorker()) + } + + AsgBuilder getAsgBuilderForLaunchTemplate() { + new AsgWithLaunchTemplateBuilder(getLaunchTemplateService(), getSecurityGroupService(), deployDefaults, getAutoScaling(), getAmazonEC2(), getAsgLifecycleHookWorker()) + } + + AsgBuilder getAsgBuilderForMixedInstancesPolicy() { + new AsgWithMixedInstancesPolicyBuilder(getLaunchTemplateService(), getSecurityGroupService(), deployDefaults, getAutoScaling(), getAmazonEC2(), getAsgLifecycleHookWorker()) + } } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/services/SecurityGroupService.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/services/SecurityGroupService.groovy index 503878bb312..047cfa81b45 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/services/SecurityGroupService.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/services/SecurityGroupService.groovy @@ -24,7 +24,10 @@ import com.amazonaws.services.ec2.model.Filter import com.netflix.spinnaker.clouddriver.aws.model.SecurityGroupNotFoundException import com.netflix.spinnaker.clouddriver.aws.model.SubnetAnalyzer +import java.util.regex.Pattern + class SecurityGroupService { + private static final Pattern SG_PATTERN = Pattern.compile(/^sg-[0-9a-f]+$/) private final AmazonEC2 amazonEC2 private final SubnetAnalyzer subnetAnalyzer @@ -127,4 +130,47 @@ class SecurityGroupService { } ?: [:] } + List resolveSecurityGroupNamesByStrategy(List securityGroupNamesAndIds, + Closure> idResolver) { + if (securityGroupNamesAndIds) { + Collection ids = securityGroupNamesAndIds.toSet() + Collection names = ids.findAll { !SG_PATTERN.matcher(it).matches() } as Set + ids.removeAll(names) + if (ids) { + Map resolvedNames = idResolver.call(ids.toList()) + names.addAll(resolvedNames.keySet()) + } + return names.toList() + } else { + return [] + } + } + + List resolveSecurityGroupIdsByStrategy(List securityGroupNamesAndIds, + Closure> nameResolver) { + if (securityGroupNamesAndIds) { + Collection names = securityGroupNamesAndIds.toSet() + Collection ids = names.findAll { SG_PATTERN.matcher(it).matches() } as Set + names.removeAll(ids) + if (names) { + def resolvedIds = nameResolver.call(names.toList()) + ids.addAll(resolvedIds.values()) + } + return ids.toList() + } else { + return [] + } + } + + List resolveSecurityGroupIdsWithSubnetType(List securityGroupNamesAndIds, String subnetPurpose) { + return this.resolveSecurityGroupIdsByStrategy(securityGroupNamesAndIds) { List names -> + this.getSecurityGroupIdsWithSubnetPurpose(names, subnetPurpose) + } + } + + List resolveSecurityGroupIdsInVpc(List securityGroupNamesAndIds, String vpcId) { + return this.resolveSecurityGroupIdsByStrategy(securityGroupNamesAndIds) { List names -> + this.getSecurityGroupIds(names, vpcId) + } + } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/config/AwsConfiguration.groovy b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/config/AwsConfiguration.groovy index fb1315d4a10..2758050964d 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/config/AwsConfiguration.groovy +++ b/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/config/AwsConfiguration.groovy @@ -19,64 +19,51 @@ package com.netflix.spinnaker.config import com.amazonaws.retry.RetryPolicy.BackoffStrategy import com.amazonaws.retry.RetryPolicy.RetryCondition import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.awsobjectmapper.AmazonObjectMapper +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper import com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties -import com.netflix.spinnaker.clouddriver.aws.agent.CleanupAlarmsAgent -import com.netflix.spinnaker.clouddriver.aws.agent.CleanupDetachedInstancesAgent -import com.netflix.spinnaker.clouddriver.aws.agent.ReconcileClassicLinkSecurityGroupsAgent -import com.netflix.spinnaker.clouddriver.aws.bastion.BastionConfig -import com.netflix.spinnaker.clouddriver.aws.deploy.BlockDeviceConfig -import com.netflix.spinnaker.clouddriver.aws.deploy.converters.AllowLaunchAtomicOperationConverter +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils.BlockDeviceConfig +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchTemplateRollOutConfig import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.BasicAmazonDeployHandler -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.DefaultMigrateClusterConfigurationStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.DefaultMigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.DefaultMigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.DefaultMigrateServerGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateClusterConfigurationStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateServerGroupStrategy import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory import com.netflix.spinnaker.clouddriver.aws.deploy.scalingpolicy.DefaultScalingPolicyCopier import com.netflix.spinnaker.clouddriver.aws.deploy.scalingpolicy.ScalingPolicyCopier +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.DefaultUserDataTokenizer +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.LocalFileUserDataProperties import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.LocalFileUserDataProvider import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.NullOpUserDataProvider -import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProvider -import com.netflix.spinnaker.clouddriver.aws.deploy.validators.BasicAmazonDeployDescriptionValidator +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProviderAggregator import com.netflix.spinnaker.clouddriver.aws.event.AfterResizeEventHandler import com.netflix.spinnaker.clouddriver.aws.event.DefaultAfterResizeEventHandler +import com.netflix.spinnaker.clouddriver.aws.health.AmazonHealthIndicator import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice import com.netflix.spinnaker.clouddriver.aws.model.AmazonServerGroup import com.netflix.spinnaker.clouddriver.aws.provider.AwsCleanupProvider import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonClusterProvider -import com.netflix.spinnaker.clouddriver.aws.security.AWSProxy -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentialsInitializer -import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig +import com.netflix.spinnaker.clouddriver.aws.security.* import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig.Builder -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.services.IdGenerator import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataProvider +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataTokenizer import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils +import com.netflix.spinnaker.clouddriver.core.services.Front50Service +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import com.netflix.spinnaker.clouddriver.saga.config.SagaAutoConfiguration +import com.netflix.spinnaker.credentials.CredentialsRepository import com.netflix.spinnaker.kork.aws.AwsComponents -import org.springframework.beans.factory.config.ConfigurableBeanFactory +import com.netflix.spinnaker.kork.aws.bastion.BastionConfig +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer +import org.springframework.beans.factory.annotation.Qualifier import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty import org.springframework.boot.context.properties.ConfigurationProperties import org.springframework.boot.context.properties.EnableConfigurationProperties import org.springframework.context.ApplicationContext -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.ComponentScan -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Import -import org.springframework.context.annotation.Scope - -import java.util.concurrent.ConcurrentHashMap +import org.springframework.context.annotation.* +import org.springframework.core.Ordered +import org.springframework.core.annotation.Order @Configuration @ConditionalOnProperty('aws.enabled') @@ -85,7 +72,8 @@ import java.util.concurrent.ConcurrentHashMap @Import([ BastionConfig, AmazonCredentialsInitializer, - AwsComponents + AwsComponents, + SagaAutoConfiguration ]) class AwsConfiguration { @@ -119,20 +107,35 @@ class AwsConfiguration { } @Bean + @Qualifier("amazonObjectMapper") ObjectMapper amazonObjectMapper() { - return new AmazonObjectMapper() + return new AmazonObjectMapperConfigurer().createConfigured() + } + + @Bean + @Order(Ordered.HIGHEST_PRECEDENCE) + static DefaultUserDataTokenizer defaultUserDataTokenizer() { + return new DefaultUserDataTokenizer() + } + + @Bean + UserDataProviderAggregator userDataProviderAggregator(List userDataProviders, + List userDataTokenizers) { + return new UserDataProviderAggregator(userDataProviders, userDataTokenizers) } @Bean @ConditionalOnProperty(value = 'udf.enabled', matchIfMissing = true) - UserDataProvider userDataProvider() { - new LocalFileUserDataProvider() + UserDataProvider localFileUserDataProvider(LocalFileUserDataProperties localFileUserDataProperties, + Front50Service front50Service, + DefaultUserDataTokenizer defaultUserDataTokenizer) { + return new LocalFileUserDataProvider(localFileUserDataProperties, front50Service, defaultUserDataTokenizer) } @Bean @ConditionalOnMissingBean(ScalingPolicyCopier) - DefaultScalingPolicyCopier defaultScalingPolicyCopier() { - new DefaultScalingPolicyCopier() + DefaultScalingPolicyCopier defaultScalingPolicyCopier(AmazonClientProvider amazonClientProvider, IdGenerator idGenerator) { + new DefaultScalingPolicyCopier(amazonClientProvider, idGenerator) } @Bean @@ -142,48 +145,32 @@ class AwsConfiguration { } @Bean - @ConditionalOnMissingBean - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy(AwsConfigurationProperties awsConfigurationProperties, AmazonClientProvider amazonClientProvider) { - new DefaultMigrateSecurityGroupStrategy(amazonClientProvider, awsConfigurationProperties.migration.infrastructureApplications) - } - - @Bean - @ConditionalOnMissingBean - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - MigrateLoadBalancerStrategy migrateLoadBalancerStrategy(AmazonClientProvider amazonClientProvider, - RegionScopedProviderFactory regionScopedProviderFactory, - DeployDefaults deployDefaults) { - new DefaultMigrateLoadBalancerStrategy(amazonClientProvider, regionScopedProviderFactory, deployDefaults) + @ConfigurationProperties('aws.defaults') + DeployDefaults deployDefaults() { + new DeployDefaults() } @Bean - @ConditionalOnMissingBean - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - MigrateServerGroupStrategy migrateServerGroupStrategy(AmazonClientProvider amazonClientProvider, - BasicAmazonDeployHandler basicAmazonDeployHandler, - RegionScopedProviderFactory regionScopedProviderFactory, - BasicAmazonDeployDescriptionValidator basicAmazonDeployDescriptionValidator, - AllowLaunchAtomicOperationConverter allowLaunchAtomicOperationConverter, - DeployDefaults deployDefaults) { - new DefaultMigrateServerGroupStrategy(amazonClientProvider, basicAmazonDeployHandler, - regionScopedProviderFactory, basicAmazonDeployDescriptionValidator, allowLaunchAtomicOperationConverter, - deployDefaults) + ObjectMapperSubtypeConfigurer.SubtypeLocator awsEventSubtypeLocator() { + return new ObjectMapperSubtypeConfigurer.ClassSubtypeLocator( + SpinnakerEvent.class, + Collections.singletonList("com.netflix.spinnaker.clouddriver.aws") + ); } @Bean - @ConditionalOnMissingBean - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - MigrateClusterConfigurationStrategy migrateClusterConfigurationStrategy(AmazonClientProvider amazonClientProvider, - RegionScopedProviderFactory regionScopedProviderFactory, - DeployDefaults deployDefaults) { - new DefaultMigrateClusterConfigurationStrategy(amazonClientProvider, regionScopedProviderFactory, deployDefaults) + AwsConfigurationProperties awsConfigurationProperties() { + return new AwsConfigurationProperties() } @Bean - @ConfigurationProperties('aws.defaults') - DeployDefaults deployDefaults() { - new DeployDefaults() + AmazonHealthIndicator amazonHealthIndicator( + Registry registry, + CredentialsRepository credentialsRepository, + AmazonClientProvider amazonClientProvider, + AwsConfigurationProperties awsConfigurationProperties + ) { + return new AmazonHealthIndicator(registry, credentialsRepository, amazonClientProvider, awsConfigurationProperties) } public static class DeployDefaults { @@ -197,6 +184,8 @@ class AwsConfiguration { Boolean crossZoneBalancingDefault = true Boolean connectionDrainingDefault = false Integer deregistrationDelayDefault = null + Integer idleTimeout = 60 + Boolean deletionProtection = false } String iamRole String classicLinkSecurityGroupName @@ -225,96 +214,51 @@ class AwsConfiguration { } @Bean - @DependsOn('netflixAmazonCredentials') - BasicAmazonDeployHandler basicAmazonDeployHandler(RegionScopedProviderFactory regionScopedProviderFactory, - AccountCredentialsRepository accountCredentialsRepository, - DeployDefaults deployDefaults, - ScalingPolicyCopier scalingPolicyCopier, - BlockDeviceConfig blockDeviceConfig, - AmazonServerGroupProvider amazonServerGroupProvider) { + @DependsOn('amazonCredentialsRepository') + BasicAmazonDeployHandler basicAmazonDeployHandler( + RegionScopedProviderFactory regionScopedProviderFactory, + CredentialsRepository credentialsRepository, + DeployDefaults deployDefaults, + ScalingPolicyCopier scalingPolicyCopier, + BlockDeviceConfig blockDeviceConfig, + LaunchTemplateRollOutConfig launchTemplateRollOutConfig, + AmazonServerGroupProvider amazonServerGroupProvider + ) { new BasicAmazonDeployHandler( regionScopedProviderFactory, - accountCredentialsRepository, + credentialsRepository, amazonServerGroupProvider, deployDefaults, scalingPolicyCopier, - blockDeviceConfig + blockDeviceConfig, + launchTemplateRollOutConfig ) } @Bean - @DependsOn('deployDefaults') - BlockDeviceConfig blockDeviceConfig(DeployDefaults deployDefaults) { - new BlockDeviceConfig(deployDefaults) + LaunchTemplateRollOutConfig launchTemplateRollOutConfig( + DynamicConfigService dynamicConfigService) { + new LaunchTemplateRollOutConfig(dynamicConfigService) } @Bean - @DependsOn('netflixAmazonCredentials') - AwsCleanupProvider awsOperationProvider(AwsConfigurationProperties awsConfigurationProperties, - AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository, - DeployDefaults deployDefaults) { - def awsCleanupProvider = new AwsCleanupProvider(Collections.newSetFromMap(new ConcurrentHashMap())) - - synchronizeAwsCleanupProvider(awsConfigurationProperties, awsCleanupProvider, amazonClientProvider, accountCredentialsRepository, deployDefaults) - - awsCleanupProvider + @DependsOn('deployDefaults') + BlockDeviceConfig blockDeviceConfig(DeployDefaults deployDefaults) { + new BlockDeviceConfig(deployDefaults) } @Bean - @DependsOn('netflixAmazonCredentials') - SecurityGroupLookupFactory securityGroupLookup(AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository) { - new SecurityGroupLookupFactory(amazonClientProvider, accountCredentialsRepository) + AwsCleanupProvider awsOperationProvider() { + return new AwsCleanupProvider() } @Bean - AwsCleanupProviderSynchronizerTypeWrapper awsCleanupProviderSynchronizerTypeWrapper() { - new AwsCleanupProviderSynchronizerTypeWrapper() - } - - class AwsCleanupProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return AwsCleanupProviderSynchronizer - } - } - - class AwsCleanupProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - AwsCleanupProviderSynchronizer synchronizeAwsCleanupProvider(AwsConfigurationProperties awsConfigurationProperties, - AwsCleanupProvider awsCleanupProvider, - AmazonClientProvider amazonClientProvider, - AccountCredentialsRepository accountCredentialsRepository, - DeployDefaults deployDefaults) { - def scheduledAccounts = ProviderUtils.getScheduledAccounts(awsCleanupProvider) - Set allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, NetflixAmazonCredentials) - - List newlyAddedAgents = [] - - allAccounts.each { account -> - if (!scheduledAccounts.contains(account)) { - account.regions.each { region -> - if (deployDefaults.isReconcileClassicLinkAccount(account)) { - newlyAddedAgents << new ReconcileClassicLinkSecurityGroupsAgent( - amazonClientProvider, account, region.name, deployDefaults - ) - } - } - } - } - - if (!awsCleanupProvider.agentScheduler) { - if (awsConfigurationProperties.cleanup.alarms.enabled) { - awsCleanupProvider.agents.add(new CleanupAlarmsAgent(amazonClientProvider, accountCredentialsRepository, awsConfigurationProperties.cleanup.alarms.daysToKeep)) - } - awsCleanupProvider.agents.add(new CleanupDetachedInstancesAgent(amazonClientProvider, accountCredentialsRepository)) - } - awsCleanupProvider.agents.addAll(newlyAddedAgents) - - new AwsCleanupProviderSynchronizer() + @DependsOn('amazonCredentialsRepository') + SecurityGroupLookupFactory securityGroupLookup( + AmazonClientProvider amazonClientProvider, + CredentialsRepository credentialsRepository + ) { + new SecurityGroupLookupFactory(amazonClientProvider, credentialsRepository) } @Bean @@ -325,7 +269,7 @@ class AwsConfiguration { @Bean @ConditionalOnMissingBean(AfterResizeEventHandler) DefaultAfterResizeEventHandler defaultAfterResizeEventHandler() { - return new DefaultAfterResizeEventHandler(); + return new DefaultAfterResizeEventHandler() } class AmazonServerGroupProvider { diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgent.java new file mode 100644 index 00000000000..6e359d4d9cd --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgent.java @@ -0,0 +1,311 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.agent; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.AttachClassicLinkVpcRequest; +import com.amazonaws.services.ec2.model.ClassicLinkInstance; +import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesRequest; +import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesResult; +import com.amazonaws.services.ec2.model.DescribeInstancesRequest; +import com.amazonaws.services.ec2.model.DescribeInstancesResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.Filter; +import com.amazonaws.services.ec2.model.GroupIdentifier; +import com.amazonaws.services.ec2.model.Instance; +import com.amazonaws.services.ec2.model.SecurityGroup; +import com.amazonaws.services.ec2.model.Tag; +import com.amazonaws.services.ec2.model.VpcClassicLink; +import com.google.common.base.Strings; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.RunnableAgent; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsCleanupProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; +import com.netflix.spinnaker.config.AwsConfiguration; +import java.time.Clock; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ReconcileClassicLinkSecurityGroupsAgent + implements RunnableAgent, CustomScheduledAgent, AccountAware { + + static final String AUTOSCALING_TAG = "aws:autoscaling:groupName"; + static final int RUNNING_STATE = 16; + + private final Logger log = LoggerFactory.getLogger(getClass()); + public static final long DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.SECONDS.toMillis(30); + public static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(5); + public static final long DEFAULT_REQUIRED_INSTANCE_LIFETIME = TimeUnit.MINUTES.toMillis(5); + + private final AmazonClientProvider amazonClientProvider; + private final NetflixAmazonCredentials account; + private final String region; + private final AwsConfiguration.DeployDefaults deployDefaults; + private final long pollIntervalMillis; + private final long timeoutMillis; + private final long requiredInstanceLifetime; + private final Clock clock; + + @Override + public String getAccountName() { + return account.getName(); + } + + public ReconcileClassicLinkSecurityGroupsAgent( + AmazonClientProvider amazonClientProvider, + NetflixAmazonCredentials account, + String region, + AwsConfiguration.DeployDefaults deployDefaults) { + this( + amazonClientProvider, + account, + region, + deployDefaults, + DEFAULT_POLL_INTERVAL_MILLIS, + DEFAULT_TIMEOUT_MILLIS, + DEFAULT_REQUIRED_INSTANCE_LIFETIME, + Clock.systemUTC()); + } + + public ReconcileClassicLinkSecurityGroupsAgent( + AmazonClientProvider amazonClientProvider, + NetflixAmazonCredentials account, + String region, + AwsConfiguration.DeployDefaults deployDefaults, + long pollIntervalMillis, + long timeoutMillis, + long requiredInstanceLifetime, + Clock clock) { + this.amazonClientProvider = amazonClientProvider; + this.account = account; + this.region = region; + this.deployDefaults = deployDefaults; + this.pollIntervalMillis = pollIntervalMillis; + this.timeoutMillis = timeoutMillis; + this.requiredInstanceLifetime = requiredInstanceLifetime; + this.clock = clock; + } + + @Override + public void run() { + if (!deployDefaults.isReconcileClassicLinkAccount(account)) { + return; + } + log.info("Checking classic link security groups in {}/{}", account.getName(), region); + AmazonEC2 ec2 = amazonClientProvider.getAmazonEC2(account, region, true); + List classicLinkVpcIds = + ec2.describeVpcClassicLink().getVpcs().stream() + .filter(VpcClassicLink::getClassicLinkEnabled) + .map(VpcClassicLink::getVpcId) + .collect(Collectors.toList()); + if (classicLinkVpcIds.size() > 1) { + log.warn("Multiple classicLinkVpcs found: {}", classicLinkVpcIds); + throw new IllegalStateException("More than 1 classicLinkVpc found: " + classicLinkVpcIds); + } + + if (classicLinkVpcIds.isEmpty()) { + return; + } + String classicLinkVpcId = classicLinkVpcIds.get(0); + + final Map classicLinkInstances = new HashMap<>(); + DescribeInstancesRequest describeInstances = new DescribeInstancesRequest().withMaxResults(500); + while (true) { + DescribeInstancesResult instanceResult = ec2.describeInstances(describeInstances); + instanceResult.getReservations().stream() + .flatMap(r -> r.getInstances().stream()) + .filter(i -> i.getVpcId() == null) + .filter( + i -> + Optional.ofNullable(i.getState()) + .filter(is -> is.getCode() == RUNNING_STATE) + .isPresent()) + .filter(this::isInstanceOldEnough) + .map( + i -> + new ClassicLinkInstance() + .withInstanceId(i.getInstanceId()) + .withVpcId(classicLinkVpcId) + .withTags(i.getTags())) + .forEach(cli -> classicLinkInstances.put(cli.getInstanceId(), cli)); + + if (instanceResult.getNextToken() == null) { + break; + } + describeInstances.setNextToken(instanceResult.getNextToken()); + } + + DescribeClassicLinkInstancesRequest request = + new DescribeClassicLinkInstancesRequest().withMaxResults(1000); + while (true) { + DescribeClassicLinkInstancesResult result = ec2.describeClassicLinkInstances(request); + result.getInstances().forEach(i -> classicLinkInstances.put(i.getInstanceId(), i)); + if (result.getNextToken() == null) { + break; + } + request.setNextToken(result.getNextToken()); + } + + log.info( + "{} existing classic instances in {}/{}", + classicLinkInstances.size(), + account.getName(), + region); + + Map groupNamesToIds = + ec2 + .describeSecurityGroups( + new DescribeSecurityGroupsRequest() + .withFilters(new Filter("vpc-id").withValues(classicLinkVpcId))) + .getSecurityGroups() + .stream() + .collect(Collectors.toMap(SecurityGroup::getGroupName, SecurityGroup::getGroupId)); + + reconcileInstances(ec2, groupNamesToIds, classicLinkInstances.values()); + } + + boolean isInstanceOldEnough(Instance instance) { + return Optional.ofNullable(instance.getLaunchTime()) + .map(Date::getTime) + .map(Instant::ofEpochMilli) + .map(i -> i.plusMillis(requiredInstanceLifetime)) + .map(i -> clock.instant().isAfter(i)) + .orElse(false); + } + + void reconcileInstances( + AmazonEC2 ec2, + Map groupNamesToIds, + Collection instances) { + StringBuilder report = new StringBuilder(); + for (ClassicLinkInstance i : instances) { + List existingClassicLinkGroups = + i.getGroups().stream().map(GroupIdentifier::getGroupId).collect(Collectors.toList()); + + int maxNewGroups = + deployDefaults.getMaxClassicLinkSecurityGroups() - existingClassicLinkGroups.size(); + if (maxNewGroups > 0) { + String asgName = + i.getTags().stream() + .filter(t -> AUTOSCALING_TAG.equals(t.getKey())) + .map(Tag::getValue) + .findFirst() + .orElse(null); + + List candidateGroupNames = getSecurityGroupNames(asgName); + + List missingGroupIds = + candidateGroupNames.stream() + .map(groupNamesToIds::get) + .filter(name -> name != null && !existingClassicLinkGroups.contains(name)) + .limit(maxNewGroups) + .collect(Collectors.toList()); + + if (!missingGroupIds.isEmpty()) { + List groupIds = new ArrayList<>(existingClassicLinkGroups); + groupIds.addAll(missingGroupIds); + if (deployDefaults.getReconcileClassicLinkSecurityGroups() + == AwsConfiguration.DeployDefaults.ReconcileMode.MODIFY) { + try { + ec2.attachClassicLinkVpc( + new AttachClassicLinkVpcRequest() + .withVpcId(i.getVpcId()) + .withGroups(groupIds) + .withInstanceId(i.getInstanceId())); + } catch (AmazonServiceException ase) { + log.warn("Failed calling attachClassicLinkVpc", ase); + } + } + report + .append("\n\t") + .append(Strings.padStart(i.getInstanceId(), 24, ' ')) + .append(missingGroupIds); + } + } + } + if (report.length() > 0) { + log.info( + "Attach to classicLinkVpc: account: " + + account.getName() + + ", region: " + + region + + report); + } + } + + private List getSecurityGroupNames(String asgName) { + Set groups = new LinkedHashSet<>(); + Optional.ofNullable(deployDefaults.getClassicLinkSecurityGroupName()).ifPresent(groups::add); + if (deployDefaults.isAddAppGroupsToClassicLink()) { + Optional.ofNullable(asgName) + .map(Names::parseName) + .ifPresent( + names -> + Optional.ofNullable(names.getApp()) + .ifPresent( + appGroup -> { + groups.add(appGroup); + Optional stackGroup = + Optional.ofNullable(names.getStack()) + .map(stack -> appGroup + "-" + stack); + stackGroup.ifPresent(groups::add); + Optional detailGroup = + Optional.ofNullable(names.getDetail()) + .map( + detail -> stackGroup.orElse(appGroup + "-") + "-" + detail); + detailGroup.ifPresent(groups::add); + })); + } + return groups.stream().collect(Collectors.toList()); + } + + @Override + public long getPollIntervalMillis() { + return pollIntervalMillis; + } + + @Override + public long getTimeoutMillis() { + return timeoutMillis; + } + + @Override + public String getAgentType() { + return account.getName() + "/" + region + "/" + getClass().getSimpleName(); + } + + @Override + public String getProviderName() { + return AwsCleanupProvider.PROVIDER_NAME; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/controllers/CloudFormationController.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/controllers/CloudFormationController.java new file mode 100644 index 00000000000..1f11cba6169 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/controllers/CloudFormationController.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.controllers; + +import com.netflix.spinnaker.clouddriver.aws.model.AmazonCloudFormationStack; +import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonCloudFormationProvider; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.util.List; +import javax.servlet.http.HttpServletRequest; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.util.AntPathMatcher; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.servlet.HandlerMapping; + +@Slf4j +@RequestMapping("/aws/cloudFormation/stacks") +@RestController +class CloudFormationController { + + private AmazonCloudFormationProvider cloudFormationProvider; + + @Autowired + public CloudFormationController(AmazonCloudFormationProvider cloudFormationProvider) { + this.cloudFormationProvider = cloudFormationProvider; + } + + @RequestMapping(method = RequestMethod.GET) + List list( + @RequestParam String accountName, + @RequestParam(required = false, defaultValue = "*") String region) { + log.debug("Cloud formation list stacks for account {}", accountName); + return cloudFormationProvider.list(accountName, region); + } + + @RequestMapping(method = RequestMethod.GET, value = "/**") + AmazonCloudFormationStack get(HttpServletRequest request) { + String pattern = (String) request.getAttribute(HandlerMapping.BEST_MATCHING_PATTERN_ATTRIBUTE); + String stackId = + new AntPathMatcher().extractPathWithinPattern(pattern, request.getRequestURI()); + log.debug("Cloud formation get stack with id {}", stackId); + return cloudFormationProvider + .get(stackId) + .orElseThrow( + () -> + new NotFoundException( + String.format("Cloud Formation stackId %s not found.", stackId))); + } +} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/RoleController.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/controllers/RoleController.java similarity index 86% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/RoleController.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/controllers/RoleController.java index c8ffde927ef..1b6e1a74ffd 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/RoleController.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/controllers/RoleController.java @@ -18,17 +18,16 @@ import com.netflix.spinnaker.clouddriver.aws.model.Role; import com.netflix.spinnaker.clouddriver.aws.model.RoleProvider; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.web.bind.annotation.PathVariable; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RequestMethod; -import org.springframework.web.bind.annotation.RestController; - import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RestController; @RestController @RequestMapping("/roles") @@ -43,10 +42,11 @@ Collection getRoles(@PathVariable String cloudProvider) { return Collections.emptyList(); } - Set roles = roleProviders.stream() - .filter(roleProvider -> roleProvider.getCloudProvider().equals(cloudProvider)) - .flatMap(roleProvider -> roleProvider.getAll().stream()) - .collect(Collectors.toSet()); + Set roles = + roleProviders.stream() + .filter(roleProvider -> roleProvider.getCloudProvider().equals(cloudProvider)) + .flatMap(roleProvider -> roleProvider.getAll().stream()) + .collect(Collectors.toSet()); return roles; } diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/AmazonResourceTagger.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/AmazonResourceTagger.java new file mode 100644 index 00000000000..69d752b2224 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/AmazonResourceTagger.java @@ -0,0 +1,42 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import lombok.Data; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Allows for custom tags to be set on resources created as a result of autoscaling activity + * (requires usage of launch templates). + */ +public interface AmazonResourceTagger { + @NotNull + default Collection volumeTags( + @Nullable Map blockDeviceTags, @NotNull String serverGroupName) { + return Collections.emptyList(); + } + + @Data(staticConstructor = "of") + class Tag { + final String key; + final String value; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultAmazonResourceTagger.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultAmazonResourceTagger.java new file mode 100644 index 00000000000..21b0686f2a3 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultAmazonResourceTagger.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy; + +import com.netflix.frigga.Names; +import java.util.*; +import java.util.stream.Collectors; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +/** + * Applies an application and cluster tag on ebs volumes. + * + *

By default tag names are of the form 'spinnaker:application' and 'spinnaker:cluster'. + */ +@Component +@ConditionalOnProperty( + name = "aws.defaults.resourceTagging.enabled", + havingValue = "true", + matchIfMissing = true) +public class DefaultAmazonResourceTagger implements AmazonResourceTagger { + private final String clusterTag; + private final String applicationTag; + + @Autowired + public DefaultAmazonResourceTagger( + @Value("${aws.defaults.resourceTagging.applicationTag:spinnaker:application}") + String applicationTag, + @Value("${aws.defaults.resourceTagging.clusterTag:spinnaker:cluster}") String clusterTag) { + this.clusterTag = clusterTag; + this.applicationTag = applicationTag; + } + + @NotNull + @Override + public Collection volumeTags( + @Nullable Map blockDeviceTags, @NotNull String serverGroupName) { + Names names = Names.parseName(serverGroupName); + + List tags = new ArrayList<>(); + tags.add(Tag.of(applicationTag, names.getApp())); + tags.add(Tag.of(clusterTag, names.getCluster())); + tags.addAll( + Optional.ofNullable(blockDeviceTags).orElse(Collections.emptyMap()).entrySet().stream() + .map(e -> Tag.of(e.getKey(), e.getValue())) + .collect(Collectors.toList())); + + return tags; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/InstanceTypeUtils.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/InstanceTypeUtils.java new file mode 100644 index 00000000000..9ebcc0580ac --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/InstanceTypeUtils.java @@ -0,0 +1,585 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy; + +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesRequest; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult; +import com.amazonaws.services.ec2.model.InstanceTypeInfo; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice; +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +/** Utility class for AWS EC2 instance types. */ +public class InstanceTypeUtils { + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html + private static final Set DEFAULT_EBS_OPTIMIZED_FAMILIES = + ImmutableSet.of( + "a1", "c4", "c5", "d2", "f1", "g3", "i3", "m4", "m5", "p2", "p3", "r4", "r5", "x1", "t3"); + + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html + private static final Set BURSTABLE_PERFORMANCE_FAMILIES = + ImmutableSet.of("t2", "t3", "t3a", "t4g"); + + /** + * Validate compatibility of instance types and AMI. + * + *

AWS supports two types of virtualization: paravirtual (PV) and hardware virtual machine + * (HVM). All current generation instance types support HVM AMIs only. The previous generation + * instance families that support PV are listed in {@link #PARAVIRTUAL_FAMILIES}. + * + *

For the case of single / multiple instance types, all of them have to match the architecture + * supported by the AMI. + * + * @param ec2 Amazon ec2 client + * @param ami resolvedAMI + * @param instanceTypes set of one or more instance types requested + */ + public static void validateCompatibilityWithAmi( + AmazonEC2 ec2, ResolvedAmiResult ami, Set instanceTypes) { + final String amiVirtualizationType = ami.getVirtualizationType(); + final String amiArchitecture = ami.getArchitecture(); + final List instanceTypeInfos = getInstanceTypesInfo(ec2, instanceTypes); + + instanceTypeInfos.forEach( + instanceTypeInfo -> { + if (!instanceTypeInfo + .getSupportedVirtualizationTypes() + .contains(ami.getVirtualizationType())) { + throw new IllegalArgumentException( + "Instance type " + + instanceTypeInfo.getInstanceType() + + " does not support " + + "virtualization type " + + amiVirtualizationType + + ". Please select a different image or instance type."); + } + + if (!instanceTypeInfo + .getProcessorInfo() + .getSupportedArchitectures() + .contains(amiArchitecture)) { + throw new IllegalArgumentException( + "Instance type " + + instanceTypeInfo.getInstanceType() + + " does not support " + + "architecture type " + + amiArchitecture + + ". Please select a different image or instance type."); + } + }); + } + + public static boolean getDefaultEbsOptimizedFlag(String instanceType) { + return DEFAULT_EBS_OPTIMIZED_FAMILIES.contains(getInstanceFamily(instanceType)); + } + + public static boolean isBurstingSupported(String instanceType) { + return BURSTABLE_PERFORMANCE_FAMILIES.contains(getInstanceFamily(instanceType)); + } + + public static boolean isBurstingSupportedByAllTypes(Set instanceTypes) { + // return true iff all instance types support bursting + for (String type : instanceTypes) { + if (!isBurstingSupported(type)) { + return false; + } + } + return true; + } + + private static String getInstanceFamily(String instanceType) { + if (instanceType != null && instanceType.contains(".")) { + return instanceType.split("\\.")[0]; + } + + return ""; + } + + private static List getInstanceTypesInfo( + AmazonEC2 ec2, Set instanceTypesReq) { + final List allInstanceTypesInfo = new ArrayList<>(); + final DescribeInstanceTypesRequest request = new DescribeInstanceTypesRequest(); + while (true) { + final DescribeInstanceTypesResult result = ec2.describeInstanceTypes(request); + allInstanceTypesInfo.addAll(result.getInstanceTypes()); + if (result.getNextToken() != null) { + request.withNextToken(result.getNextToken()); + } else { + break; + } + } + + return allInstanceTypesInfo.stream() + .filter(info -> instanceTypesReq.contains(info.getInstanceType())) + .collect(Collectors.toList()); + } + + /** Class to handle AWS EC2 block device configuration. */ + public static class BlockDeviceConfig { + + private final DeployDefaults deployDefaults; + private final Map> blockDevicesByInstanceType; + + public BlockDeviceConfig(DeployDefaults deployDefaults) { + this.deployDefaults = deployDefaults; + this.blockDevicesByInstanceType = + ImmutableMap.>builder() + .put("a1.medium", sizedBlockDevicesForEbs(40)) + .put("a1.large", sizedBlockDevicesForEbs(40)) + .put("a1.xlarge", sizedBlockDevicesForEbs(80)) + .put("a1.2xlarge", sizedBlockDevicesForEbs(80)) + .put("a1.4xlarge", sizedBlockDevicesForEbs(120)) + .put("a1.metal", sizedBlockDevicesForEbs(120)) + .put("c1.medium", enumeratedBlockDevicesWithVirtualName(1)) + .put("c1.xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("c3.large", enumeratedBlockDevicesWithVirtualName(2)) + .put("c3.xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c3.2xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c3.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c3.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c4.large", sizedBlockDevicesForEbs(40)) + .put("c4.xlarge", sizedBlockDevicesForEbs(80)) + .put("c4.2xlarge", sizedBlockDevicesForEbs(80)) + .put("c4.4xlarge", sizedBlockDevicesForEbs(120)) + .put("c4.8xlarge", sizedBlockDevicesForEbs(120)) + .put("c5.large", sizedBlockDevicesForEbs(40)) + .put("c5.xlarge", sizedBlockDevicesForEbs(80)) + .put("c5.2xlarge", sizedBlockDevicesForEbs(80)) + .put("c5.4xlarge", sizedBlockDevicesForEbs(120)) + .put("c5.9xlarge", sizedBlockDevicesForEbs(120)) + .put("c5.12xlarge", sizedBlockDevicesForEbs(120)) + .put("c5.18xlarge", sizedBlockDevicesForEbs(120)) + .put("c5.24xlarge", sizedBlockDevicesForEbs(120)) + .put("c5d.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("c5d.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c5d.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c5d.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c5d.9xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c5d.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c5d.18xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c5d.24xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("c5d.metal", enumeratedBlockDevicesWithVirtualName(4)) + .put("c5n.large", sizedBlockDevicesForEbs(40)) + .put("c5n.xlarge", sizedBlockDevicesForEbs(80)) + .put("c5n.2xlarge", sizedBlockDevicesForEbs(80)) + .put("c5n.4xlarge", sizedBlockDevicesForEbs(120)) + .put("c5n.9xlarge", sizedBlockDevicesForEbs(120)) + .put("c5n.18xlarge", sizedBlockDevicesForEbs(120)) + .put("c5n.metal", sizedBlockDevicesForEbs(120)) + .put("c5a.large", sizedBlockDevicesForEbs(40)) + .put("c5a.xlarge", sizedBlockDevicesForEbs(80)) + .put("c5a.2xlarge", sizedBlockDevicesForEbs(80)) + .put("c5a.4xlarge", sizedBlockDevicesForEbs(120)) + .put("c5a.8xlarge", sizedBlockDevicesForEbs(120)) + .put("c5a.12xlarge", sizedBlockDevicesForEbs(120)) + .put("c5a.16xlarge", sizedBlockDevicesForEbs(120)) + .put("c5a.24xlarge", sizedBlockDevicesForEbs(120)) + .put("c5ad.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("c5ad.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c5ad.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c5ad.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c5ad.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c5ad.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c5ad.16xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c5ad.24xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c6g.medium", sizedBlockDevicesForEbs(40)) + .put("c6g.large", sizedBlockDevicesForEbs(40)) + .put("c6g.xlarge", sizedBlockDevicesForEbs(80)) + .put("c6g.2xlarge", sizedBlockDevicesForEbs(80)) + .put("c6g.4xlarge", sizedBlockDevicesForEbs(120)) + .put("c6g.8xlarge", sizedBlockDevicesForEbs(120)) + .put("c6g.12xlarge", sizedBlockDevicesForEbs(120)) + .put("c6g.16xlarge", sizedBlockDevicesForEbs(120)) + .put("c6g.metal", sizedBlockDevicesForEbs(120)) + .put("c6gd.medium", enumeratedBlockDevicesWithVirtualName(1)) + .put("c6gd.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("c6gd.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c6gd.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c6gd.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c6gd.8xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("c6gd.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c6gd.16xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("c6gd.metal", enumeratedBlockDevicesWithVirtualName(2)) + .put("c6gn.medium", sizedBlockDevicesForEbs(40)) + .put("c6gn.large", sizedBlockDevicesForEbs(40)) + .put("c6gn.xlarge", sizedBlockDevicesForEbs(80)) + .put("c6gn.2xlarge", sizedBlockDevicesForEbs(80)) + .put("c6gn.4xlarge", sizedBlockDevicesForEbs(120)) + .put("c6gn.8xlarge", sizedBlockDevicesForEbs(120)) + .put("c6gn.12xlarge", sizedBlockDevicesForEbs(120)) + .put("c6gn.16xlarge", sizedBlockDevicesForEbs(120)) + .put("cc2.8xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("cg1.4xlarge", sizedBlockDevicesForEbs(120)) + .put("cr1.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("d2.xlarge", enumeratedBlockDevicesWithVirtualName(3)) + .put("d2.2xlarge", enumeratedBlockDevicesWithVirtualName(6)) + .put("d2.4xlarge", enumeratedBlockDevicesWithVirtualName(12)) + .put("d2.8xlarge", enumeratedBlockDevicesWithVirtualName(24)) + .put("d3.xlarge", enumeratedBlockDevicesWithVirtualName(3)) + .put("d3.2xlarge", enumeratedBlockDevicesWithVirtualName(6)) + .put("d3.4xlarge", enumeratedBlockDevicesWithVirtualName(12)) + .put("d3.8xlarge", enumeratedBlockDevicesWithVirtualName(24)) + .put("d3en.xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("d3en.2xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("d3en.4xlarge", enumeratedBlockDevicesWithVirtualName(8)) + .put("d3en.6xlarge", enumeratedBlockDevicesWithVirtualName(12)) + .put("d3en.8xlarge", enumeratedBlockDevicesWithVirtualName(16)) + .put("d3en.12xlarge", enumeratedBlockDevicesWithVirtualName(24)) + .put("f1.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("f1.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("f1.16xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("g2.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g2.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("g3s.xlarge", sizedBlockDevicesForEbs(80)) + .put("g3.4xlarge", sizedBlockDevicesForEbs(120)) + .put("g3.8xlarge", sizedBlockDevicesForEbs(120)) + .put("g3.16xlarge", sizedBlockDevicesForEbs(120)) + .put("g4ad.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g4ad.8xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g4ad.16xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("g4dn.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g4dn.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g4dn.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g4dn.8xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g4dn.12xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g4dn.16xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("g4dn.metal", enumeratedBlockDevicesWithVirtualName(2)) + .put("h1.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("h1.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("h1.8xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("h1.16xlarge", enumeratedBlockDevicesWithVirtualName(8)) + .put("hs1.8xlarge", enumeratedBlockDevicesWithVirtualName(24)) + .put("i2.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("i2.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("i2.2xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("i2.4xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("i2.8xlarge", enumeratedBlockDevicesWithVirtualName(8)) + .put("i3.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("i3.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("i3.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("i3.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("i3.8xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("i3.16xlarge", enumeratedBlockDevicesWithVirtualName(8)) + .put("i3.metal", enumeratedBlockDevicesWithVirtualName(8)) + .put("i3en.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("i3en.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("i3en.2xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("i3en.3xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("i3en.6xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("i3en.12xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("i3en.24xlarge", enumeratedBlockDevicesWithVirtualName(8)) + .put("inf1.xlarge", sizedBlockDevicesForEbs(80)) + .put("inf1.2xlarge", sizedBlockDevicesForEbs(80)) + .put("inf1.6xlarge", sizedBlockDevicesForEbs(120)) + .put("inf1.24xlarge", sizedBlockDevicesForEbs(120)) + .put("m1.small", enumeratedBlockDevicesWithVirtualName(1)) + .put("m1.medium", enumeratedBlockDevicesWithVirtualName(1)) + .put("m1.large", enumeratedBlockDevicesWithVirtualName(2)) + .put("m1.xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("m2.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m2.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m2.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m3.medium", enumeratedBlockDevicesWithVirtualName(1)) + .put("m3.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("m3.xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m3.2xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m4.large", sizedBlockDevicesForEbs(40)) + .put("m4.xlarge", sizedBlockDevicesForEbs(80)) + .put("m4.2xlarge", sizedBlockDevicesForEbs(80)) + .put("m4.4xlarge", sizedBlockDevicesForEbs(120)) + .put("m4.10xlarge", sizedBlockDevicesForEbs(120)) + .put("m4.16xlarge", sizedBlockDevicesForEbs(120)) + .put("m5.large", sizedBlockDevicesForEbs(40)) + .put("m5.xlarge", sizedBlockDevicesForEbs(80)) + .put("m5.2xlarge", sizedBlockDevicesForEbs(80)) + .put("m5.4xlarge", sizedBlockDevicesForEbs(120)) + .put("m5.8xlarge", sizedBlockDevicesForEbs(120)) + .put("m5.12xlarge", sizedBlockDevicesForEbs(120)) + .put("m5.16xlarge", sizedBlockDevicesForEbs(120)) + .put("m5.24xlarge", sizedBlockDevicesForEbs(120)) + .put("m5d.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5d.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5d.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5d.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5d.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5d.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5d.16xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("m5d.24xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("m5n.large", sizedBlockDevicesForEbs(40)) + .put("m5n.xlarge", sizedBlockDevicesForEbs(80)) + .put("m5n.2xlarge", sizedBlockDevicesForEbs(80)) + .put("m5n.4xlarge", sizedBlockDevicesForEbs(120)) + .put("m5n.8xlarge", sizedBlockDevicesForEbs(120)) + .put("m5n.12xlarge", sizedBlockDevicesForEbs(120)) + .put("m5n.16xlarge", sizedBlockDevicesForEbs(120)) + .put("m5n.24xlarge", sizedBlockDevicesForEbs(120)) + .put("m5dn.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5dn.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5dn.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5dn.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5dn.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5dn.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5dn.16xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("m5dn.24xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("m5a.large", sizedBlockDevicesForEbs(40)) + .put("m5a.xlarge", sizedBlockDevicesForEbs(80)) + .put("m5a.2xlarge", sizedBlockDevicesForEbs(80)) + .put("m5a.4xlarge", sizedBlockDevicesForEbs(120)) + .put("m5a.8xlarge", sizedBlockDevicesForEbs(120)) + .put("m5a.12xlarge", sizedBlockDevicesForEbs(120)) + .put("m5a.16xlarge", sizedBlockDevicesForEbs(120)) + .put("m5a.24xlarge", sizedBlockDevicesForEbs(120)) + .put("m5ad.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5ad.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5ad.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m5ad.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5ad.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5ad.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5ad.16xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m5ad.24xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("m5zn.large", sizedBlockDevicesForEbs(40)) + .put("m5zn.xlarge", sizedBlockDevicesForEbs(80)) + .put("m5zn.2xlarge", sizedBlockDevicesForEbs(80)) + .put("m5zn.3xlarge", sizedBlockDevicesForEbs(120)) + .put("m5zn.6xlarge", sizedBlockDevicesForEbs(120)) + .put("m5zn.12xlarge", sizedBlockDevicesForEbs(120)) + .put("m6g.medium", sizedBlockDevicesForEbs(40)) + .put("m6g.large", sizedBlockDevicesForEbs(40)) + .put("m6g.xlarge", sizedBlockDevicesForEbs(80)) + .put("m6g.2xlarge", sizedBlockDevicesForEbs(80)) + .put("m6g.4xlarge", sizedBlockDevicesForEbs(120)) + .put("m6g.8xlarge", sizedBlockDevicesForEbs(120)) + .put("m6g.12xlarge", sizedBlockDevicesForEbs(120)) + .put("m6g.16xlarge", sizedBlockDevicesForEbs(120)) + .put("m6g.metal", sizedBlockDevicesForEbs(120)) + .put("m6gd.medium", enumeratedBlockDevicesWithVirtualName(1)) + .put("m6gd.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("m6gd.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m6gd.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m6gd.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m6gd.8xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("m6gd.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m6gd.16xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("m6gd.metal", enumeratedBlockDevicesWithVirtualName(2)) + .put("r3.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("r3.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r3.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r3.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r3.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r4.large", sizedBlockDevicesForEbs(40)) + .put("r4.xlarge", sizedBlockDevicesForEbs(80)) + .put("r4.2xlarge", sizedBlockDevicesForEbs(80)) + .put("r4.4xlarge", sizedBlockDevicesForEbs(120)) + .put("r4.8xlarge", sizedBlockDevicesForEbs(120)) + .put("r4.16xlarge", sizedBlockDevicesForEbs(120)) + .put("r5.large", sizedBlockDevicesForEbs(40)) + .put("r5.xlarge", sizedBlockDevicesForEbs(80)) + .put("r5.2xlarge", sizedBlockDevicesForEbs(80)) + .put("r5.4xlarge", sizedBlockDevicesForEbs(120)) + .put("r5.8xlarge", sizedBlockDevicesForEbs(120)) + .put("r5.12xlarge", sizedBlockDevicesForEbs(120)) + .put("r5.16xlarge", sizedBlockDevicesForEbs(120)) + .put("r5.24xlarge", sizedBlockDevicesForEbs(120)) + .put("r5d.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5d.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5d.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5d.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5d.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5d.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5d.16xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("r5d.24xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("r5n.large", sizedBlockDevicesForEbs(40)) + .put("r5n.xlarge", sizedBlockDevicesForEbs(80)) + .put("r5n.2xlarge", sizedBlockDevicesForEbs(80)) + .put("r5n.4xlarge", sizedBlockDevicesForEbs(120)) + .put("r5n.8xlarge", sizedBlockDevicesForEbs(120)) + .put("r5n.12xlarge", sizedBlockDevicesForEbs(120)) + .put("r5n.16xlarge", sizedBlockDevicesForEbs(120)) + .put("r5n.24xlarge", sizedBlockDevicesForEbs(120)) + .put("r5dn.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5dn.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5dn.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5dn.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5dn.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5dn.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5dn.16xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("r5dn.24xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("r5a.large", sizedBlockDevicesForEbs(40)) + .put("r5a.xlarge", sizedBlockDevicesForEbs(80)) + .put("r5a.2xlarge", sizedBlockDevicesForEbs(80)) + .put("r5a.4xlarge", sizedBlockDevicesForEbs(120)) + .put("r5a.8xlarge", sizedBlockDevicesForEbs(120)) + .put("r5a.12xlarge", sizedBlockDevicesForEbs(120)) + .put("r5a.16xlarge", sizedBlockDevicesForEbs(120)) + .put("r5a.24xlarge", sizedBlockDevicesForEbs(120)) + .put("r5ad.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5ad.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5ad.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r5ad.4xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5ad.8xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5ad.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5ad.16xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r5ad.24xlarge", enumeratedBlockDevicesWithVirtualName(4)) + .put("r5b.large", sizedBlockDevicesForEbs(40)) + .put("r5b.xlarge", sizedBlockDevicesForEbs(80)) + .put("r5b.2xlarge", sizedBlockDevicesForEbs(80)) + .put("r5b.4xlarge", sizedBlockDevicesForEbs(120)) + .put("r5b.8xlarge", sizedBlockDevicesForEbs(120)) + .put("r5b.12xlarge", sizedBlockDevicesForEbs(120)) + .put("r5b.16xlarge", sizedBlockDevicesForEbs(120)) + .put("r5b.24xlarge", sizedBlockDevicesForEbs(120)) + .put("r6g.medium", sizedBlockDevicesForEbs(40)) + .put("r6g.large", sizedBlockDevicesForEbs(40)) + .put("r6g.xlarge", sizedBlockDevicesForEbs(80)) + .put("r6g.2xlarge", sizedBlockDevicesForEbs(80)) + .put("r6g.4xlarge", sizedBlockDevicesForEbs(120)) + .put("r6g.8xlarge", sizedBlockDevicesForEbs(120)) + .put("r6g.12xlarge", sizedBlockDevicesForEbs(120)) + .put("r6g.16xlarge", sizedBlockDevicesForEbs(120)) + .put("r6g.metal", sizedBlockDevicesForEbs(120)) + .put("r6gd.medium", enumeratedBlockDevicesWithVirtualName(1)) + .put("r6gd.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("r6gd.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r6gd.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r6gd.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r6gd.8xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("r6gd.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r6gd.16xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("r6gd.metal", enumeratedBlockDevicesWithVirtualName(2)) + .put("p2.xlarge", sizedBlockDevicesForEbs(80)) + .put("p2.8xlarge", sizedBlockDevicesForEbs(120)) + .put("p2.16xlarge", sizedBlockDevicesForEbs(120)) + .put("p3.2xlarge", sizedBlockDevicesForEbs(80)) + .put("p3.8xlarge", sizedBlockDevicesForEbs(120)) + .put("p3.16xlarge", sizedBlockDevicesForEbs(120)) + .put("p3dn.24xlarge", sizedBlockDevicesForEbs(120)) + .put("p4d.24xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("t1.micro", Collections.emptyList()) + .put("t2.nano", Collections.emptyList()) + .put("t2.micro", Collections.emptyList()) + .put("t2.small", Collections.emptyList()) + .put("t2.medium", Collections.emptyList()) + .put("t2.large", Collections.emptyList()) + .put("t2.xlarge", Collections.emptyList()) + .put("t2.2xlarge", Collections.emptyList()) + .put("t3.nano", Collections.emptyList()) + .put("t3.micro", Collections.emptyList()) + .put("t3.small", Collections.emptyList()) + .put("t3.medium", Collections.emptyList()) + .put("t3.large", Collections.emptyList()) + .put("t3.xlarge", Collections.emptyList()) + .put("t3.2xlarge", Collections.emptyList()) + .put("t3a.nano", Collections.emptyList()) + .put("t3a.micro", Collections.emptyList()) + .put("t3a.small", Collections.emptyList()) + .put("t3a.medium", Collections.emptyList()) + .put("t3a.large", Collections.emptyList()) + .put("t3a.xlarge", Collections.emptyList()) + .put("t3a.2xlarge", Collections.emptyList()) + .put("t4g.nano", Collections.emptyList()) + .put("t4g.micro", Collections.emptyList()) + .put("t4g.small", Collections.emptyList()) + .put("t4g.medium", Collections.emptyList()) + .put("t4g.large", Collections.emptyList()) + .put("t4g.xlarge", Collections.emptyList()) + .put("t4g.2xlarge", Collections.emptyList()) + .put("x1.16xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x1.32xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("x1e.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x1e.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x1e.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x1e.8xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x1e.16xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x1e.32xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("x2gd.medium", enumeratedBlockDevicesWithVirtualName(1)) + .put("x2gd.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("x2gd.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x2gd.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x2gd.4xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x2gd.8xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("x2gd.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("x2gd.16xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .put("z1d.large", enumeratedBlockDevicesWithVirtualName(1)) + .put("z1d.xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("z1d.2xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("z1d.3xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("z1d.6xlarge", enumeratedBlockDevicesWithVirtualName(1)) + .put("z1d.12xlarge", enumeratedBlockDevicesWithVirtualName(2)) + .build(); + } + + public List getBlockDevicesForInstanceType(String instanceType) { + final List blockDevices = blockDevicesByInstanceType.get(instanceType); + if (blockDevices == null && deployDefaults.getUnknownInstanceTypeBlockDevice() != null) { + // return a default block device mapping if no instance-specific default exists + return ImmutableList.of(deployDefaults.getUnknownInstanceTypeBlockDevice()); + } + + return blockDevices; + } + + public Set getInstanceTypesWithBlockDeviceMappings() { + return blockDevicesByInstanceType.keySet(); + } + + private List enumeratedBlockDevicesWithVirtualName(int size) { + char[] letters = "abcdefghijklmnopqrstuvwxyz".toCharArray(); + return IntStream.range(0, size) + .mapToObj( + i -> + new AmazonBlockDevice.Builder() + .deviceName("/dev/sd" + letters[i + 1]) + .virtualName("ephemeral" + i) + .build()) + .collect(ImmutableList.toImmutableList()); + } + + private List defaultBlockDevicesForEbsOnly() { + return ImmutableList.of( + new AmazonBlockDevice.Builder() + .deviceName("/dev/sdb") + .size(125) + .volumeType(deployDefaults.getDefaultBlockDeviceType()) + .build(), + new AmazonBlockDevice.Builder() + .deviceName("/dev/sdc") + .size(125) + .volumeType(deployDefaults.getDefaultBlockDeviceType()) + .build()); + } + + private List sizedBlockDevicesForEbs(int capacity) { + return ImmutableList.of( + new AmazonBlockDevice.Builder() + .deviceName("/dev/sdb") + .size(capacity) + .volumeType(deployDefaults.getDefaultBlockDeviceType()) + .build()); + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ModifyServerGroupUtils.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ModifyServerGroupUtils.java new file mode 100644 index 00000000000..bc3c7ac0f05 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ModifyServerGroupUtils.java @@ -0,0 +1,61 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy; + +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import java.lang.reflect.Field; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.commons.lang3.reflect.FieldUtils; + +public class ModifyServerGroupUtils { + + public static Set getNonMetadataFieldsSetInReq( + final ModifyServerGroupLaunchTemplateDescription reqDescription) { + final ModifyServerGroupLaunchTemplateDescription descWithDefaults = + new ModifyServerGroupLaunchTemplateDescription(); + final Set nonMetadataFieldsSet = new HashSet<>(); + + // get all field names for the description type + final Set allFieldNames = + FieldUtils.getAllFieldsList(reqDescription.getClass()).stream() + .map(Field::getName) + .collect(Collectors.toSet()); + + // get the fields set in the request + allFieldNames.stream() + .forEach( + fieldName -> { + // ignore Groovy object's special fields + if (fieldName.contains("$") || fieldName.equals("metaClass")) { + return; + } + Object defaultValue = descWithDefaults.getProperty(fieldName); + Object requestedValue = reqDescription.getProperty(fieldName); + boolean isMetadataField = + ModifyServerGroupLaunchTemplateDescription.getMetadataFieldNames() + .contains(fieldName); + if (!Objects.equals(requestedValue, defaultValue) && !isMetadataField) { + nonMetadataFieldsSet.add(fieldName); + } + }); + + return nonMetadataFieldsSet; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgConfigHelper.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgConfigHelper.java new file mode 100644 index 00000000000..3a215cf907c --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgConfigHelper.java @@ -0,0 +1,529 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg; + +import com.amazonaws.services.autoscaling.model.AutoScalingGroup; +import com.amazonaws.services.autoscaling.model.Ebs; +import com.amazonaws.services.autoscaling.model.LaunchConfiguration; +import com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.ec2.model.CreditSpecification; +import com.amazonaws.services.ec2.model.EbsBlockDevice; +import com.amazonaws.services.ec2.model.LaunchTemplateBlockDeviceMapping; +import com.amazonaws.services.ec2.model.LaunchTemplateEbsBlockDevice; +import com.amazonaws.services.ec2.model.LaunchTemplateVersion; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice; +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; +import com.netflix.spinnaker.kork.annotations.VisibleForTesting; +import java.time.Clock; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.temporal.ChronoField; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +/** + * A helper class for utility methods related to {@link AutoScalingWorker.AsgConfiguration} and + * general ASG related configuration. + */ +@Slf4j +public class AsgConfigHelper { + + public static String createName(String baseName, String suffix) { + StringBuilder name = new StringBuilder(baseName); + if (StringUtils.isNotEmpty(suffix)) { + name.append('-').append(suffix); + } else { + name.append('-').append(createDefaultSuffix()); + } + return name.toString(); + } + + /** + * Set resolved security groups for an application. + * + * @param asgConfig Asg Configuration to work with + * @param securityGroupService SecurityGroup service + * @param deployDefaults defaults + * @return asgConfig with resolved security groups and classicLinkVpcSecurityGroups + */ + public static AutoScalingWorker.AsgConfiguration setAppSecurityGroups( + AutoScalingWorker.AsgConfiguration asgConfig, + SecurityGroupService securityGroupService, + DeployDefaults deployDefaults) { + + // resolve security group ids and names in request + List securityGroupIds = + securityGroupService.resolveSecurityGroupIdsWithSubnetType( + asgConfig.getSecurityGroups(), asgConfig.getSubnetType()); + + // conditionally, find or create an application security group + if ((securityGroupIds == null || securityGroupIds.isEmpty()) + || (deployDefaults.isAddAppGroupToServerGroup() + && securityGroupIds.size() < deployDefaults.getMaxSecurityGroups())) { + + // get a mapping of security group names to ids and find an existing security group for + // application + final Map names = + securityGroupService.getSecurityGroupNamesFromIds(securityGroupIds); + String existingAppGroup = + (names != null && !names.isEmpty()) + ? names.keySet().stream() + .filter(it -> it.contains(asgConfig.getApplication())) + .findFirst() + .orElse(null) + : null; + + // if no existing security group, find by subnet type / create a new security group for + // application + if (StringUtils.isEmpty(existingAppGroup)) { + String applicationSecurityGroupId = + (String) + OperationPoller.retryWithBackoff( + o -> + createSecurityGroupForApp( + securityGroupService, + asgConfig.getApplication(), + asgConfig.getSubnetType()), + 500, + 3); + securityGroupIds.add(applicationSecurityGroupId); + } + } + asgConfig.setSecurityGroups(securityGroupIds.stream().distinct().collect(Collectors.toList())); + + if (asgConfig.getClassicLinkVpcSecurityGroups() != null + && !asgConfig.getClassicLinkVpcSecurityGroups().isEmpty()) { + if (StringUtils.isEmpty(asgConfig.getClassicLinkVpcId())) { + throw new IllegalStateException( + "Can't provide classic link security groups without classiclink vpc Id"); + } + List classicLinkIds = + securityGroupService.resolveSecurityGroupIdsInVpc( + asgConfig.getClassicLinkVpcSecurityGroups(), asgConfig.getClassicLinkVpcId()); + asgConfig.setClassicLinkVpcSecurityGroups(classicLinkIds); + } + + log.info( + "Configured resolved security groups {} for application {}.", + securityGroupIds, + asgConfig.getApplication()); + return asgConfig; + } + + /** + * Get block device mappings specified in an ASG. + * + * @param asg AWS AutoScalingGroup + * @param asgRegionScopedProvider regionScopedProvider for the asg + * @return a list of AmazonBlockDevice indicating the block device mapping specified in the asg + * @throws IllegalStateException if certain AWS entities are not found / show error conditions. + */ + public static List getBlockDeviceMappingForAsg( + final AutoScalingGroup asg, + RegionScopedProviderFactory.RegionScopedProvider asgRegionScopedProvider) { + if (asg.getLaunchConfigurationName() != null) { + final LaunchConfiguration lc = + asgRegionScopedProvider + .getAsgService() + .getLaunchConfiguration(asg.getLaunchConfigurationName()); + if (lc == null) { + throw new IllegalStateException( + "Launch configuration " + + asg.getLaunchConfigurationName() + + " was requested but was not found for ASG with launch configuration " + + asg.getAutoScalingGroupName()); + } + return transformBlockDeviceMapping(lc.getBlockDeviceMappings()); + } else if (asg.getLaunchTemplate() != null) { + final LaunchTemplateVersion ltVersion = + asgRegionScopedProvider + .getLaunchTemplateService() + .getLaunchTemplateVersion(asg.getLaunchTemplate()) + .orElseThrow( + () -> + new IllegalStateException( + "Launch template " + + asg.getLaunchTemplate() + + " was requested but was not found for ASG with launch template " + + asg.getAutoScalingGroupName())); + return transformLaunchTemplateBlockDeviceMapping( + ltVersion.getLaunchTemplateData().getBlockDeviceMappings()); + } else if (asg.getMixedInstancesPolicy() != null) { + final LaunchTemplateSpecification ltSpec = + asg.getMixedInstancesPolicy().getLaunchTemplate().getLaunchTemplateSpecification(); + final LaunchTemplateVersion ltVersion = + asgRegionScopedProvider + .getLaunchTemplateService() + .getLaunchTemplateVersion(ltSpec) + .orElseThrow( + () -> + new IllegalStateException( + "Launch template " + + ltSpec + + " was requested but was not found for ASG with mixed instances policy " + + asg.getAutoScalingGroupName())); + return transformLaunchTemplateBlockDeviceMapping( + ltVersion.getLaunchTemplateData().getBlockDeviceMappings()); + } else { + throw new IllegalStateException( + String.format( + "An AWS ASG %s is expected to have a launch configuration or launch template or mixed instances policy", + asg.getAutoScalingGroupName())); + } + } + + /** + * Get instance types specified in an ASG. + * + * @param asg AWS AutoScalingGroup + * @param asgRegionScopedProvider regionScopedProvider for the asg + * @return a list of instance types with a single type for launch configuration / launch template + * and multiple types for mixed instances policy backed ASG + * @throws IllegalStateException if certain AWS entities are not found / show error conditions. + */ + public static Set getAllowedInstanceTypesForAsg( + final AutoScalingGroup asg, + RegionScopedProviderFactory.RegionScopedProvider asgRegionScopedProvider) { + if (asg.getMixedInstancesPolicy() != null + && asg.getMixedInstancesPolicy().getLaunchTemplate().getOverrides() != null) { + return asg.getMixedInstancesPolicy().getLaunchTemplate().getOverrides().stream() + .map(override -> override.getInstanceType()) + .collect(Collectors.toSet()); + } else { + return Collections.singleton(getTopLevelInstanceTypeForAsg(asg, asgRegionScopedProvider)); + } + } + + /** + * Get the top-level instance type specified in an ASG. For the case of multiple instance types, + * top-level instance type is the one specified in the launch template (NOT overrides). The + * top-level instance type in an ASG is nothing but the {@link + * BasicAmazonDeployDescription#getInstanceType()} + * + * @param asg asg AWS AutoScalingGroup + * @param asgRegionScopedProvider regionScopedProvider for the asg + * @return a single instance type which corresponds to {@link + * BasicAmazonDeployDescription#getInstanceType()} + * @throws IllegalStateException if certain AWS entities are not found / show error conditions. + */ + public static String getTopLevelInstanceTypeForAsg( + final AutoScalingGroup asg, + RegionScopedProviderFactory.RegionScopedProvider asgRegionScopedProvider) { + if (asg.getLaunchConfigurationName() != null) { + final LaunchConfiguration lc = + asgRegionScopedProvider + .getAsgService() + .getLaunchConfiguration(asg.getLaunchConfigurationName()); + if (lc == null) { + throw new IllegalStateException( + String.format( + "Launch configuration %s was requested but was not found for ASG with launch configuration %s.", + asg.getLaunchConfigurationName(), asg.getAutoScalingGroupName())); + } + return lc.getInstanceType(); + } else if (asg.getLaunchTemplate() != null) { + final LaunchTemplateVersion ltVersion = + asgRegionScopedProvider + .getLaunchTemplateService() + .getLaunchTemplateVersion(asg.getLaunchTemplate()) + .orElseThrow( + () -> + new IllegalStateException( + String.format( + "Launch template %s was requested but was not found for ASG with launch template %s.", + asg.getLaunchTemplate(), asg.getAutoScalingGroupName()))); + + return ltVersion.getLaunchTemplateData().getInstanceType(); + } else if (asg.getMixedInstancesPolicy() != null) { + final LaunchTemplateSpecification ltSpec = + asg.getMixedInstancesPolicy().getLaunchTemplate().getLaunchTemplateSpecification(); + final LaunchTemplateVersion ltVersion = + asgRegionScopedProvider + .getLaunchTemplateService() + .getLaunchTemplateVersion(ltSpec) + .orElseThrow( + () -> + new IllegalStateException( + String.format( + "Launch template %s was requested but was not found for ASG with mixed instances policy %s.", + ltSpec, asg.getAutoScalingGroupName()))); + + return ltVersion.getLaunchTemplateData().getInstanceType(); + } else { + throw new IllegalStateException( + "An AWS ASG is expected to include a launch configuration or launch template or mixed instances policy " + + "but neither was found in ASG " + + asg); + } + } + + private static String createSecurityGroupForApp( + SecurityGroupService securityGroupService, String application, String subnetType) { + + // find security group by subnet type + String applicationSecurityGroupId = + securityGroupService.getSecurityGroupForApplication(application, subnetType); + + // conditionally, create security group + if (StringUtils.isEmpty(applicationSecurityGroupId)) { + log.debug("Creating security group for application {}", application); + applicationSecurityGroupId = + securityGroupService.createSecurityGroup(application, subnetType); + } + + return applicationSecurityGroupId; + } + + private static final AtomicReference CLOCK_REF = + new AtomicReference<>(Clock.systemDefaultZone()); + + @VisibleForTesting + static void setClock(Clock clock) { + CLOCK_REF.setOpaque(clock); + } + + private static final DateTimeFormatter SUFFIX_DATE_FORMATTER = + new DateTimeFormatterBuilder() + .appendValue(ChronoField.MONTH_OF_YEAR, 2) + .appendValue(ChronoField.DAY_OF_MONTH, 2) + .appendValue(ChronoField.YEAR) + .appendValue(ChronoField.HOUR_OF_DAY, 2) + .appendValue(ChronoField.MINUTE_OF_HOUR, 2) + .appendValue(ChronoField.SECOND_OF_MINUTE, 2) + .toFormatter(); + + @VisibleForTesting + static String createDefaultSuffix() { + return LocalDateTime.now(CLOCK_REF.getOpaque()).format(SUFFIX_DATE_FORMATTER); + } + + /** + * Transform AWS BlockDeviceMapping to {@link AmazonBlockDevice}. Used while extracting launch + * settings from AWS AutoScalingGroup or AMI. + * + * @param blockDeviceMappings AWS BlockDeviceMappings + * @return list of AmazonBlockDevice + */ + protected static List transformBlockDeviceMapping( + List blockDeviceMappings) { + return blockDeviceMappings.stream() + .map( + bdm -> { + AmazonBlockDevice amzBd = + new AmazonBlockDevice.Builder() + .deviceName(bdm.getDeviceName()) + .virtualName(bdm.getVirtualName()) + .build(); + + if (bdm.getEbs() != null) { + final Ebs ebs = bdm.getEbs(); + amzBd.setIops(ebs.getIops()); + amzBd.setThroughput(ebs.getThroughput()); + amzBd.setDeleteOnTermination(ebs.getDeleteOnTermination()); + amzBd.setSize(ebs.getVolumeSize()); + amzBd.setVolumeType(ebs.getVolumeType()); + amzBd.setSnapshotId(ebs.getSnapshotId()); + if (ebs.getSnapshotId() == null) { + // only set encryption if snapshotId isn't provided. AWS will error out otherwise + amzBd.setEncrypted(ebs.getEncrypted()); + } + } + return amzBd; + }) + .collect(Collectors.toUnmodifiableList()); + } + + /** + * Transform AWS EC2 BlockDeviceMapping to {@link AmazonBlockDevice}. Used to convert the AMI + * BlockDevices information into AmazonBlockDevice + * + * @param blockDeviceMappings AWS EC2 BlockDeviceMappings + * @return list of AmazonBlockDevice + */ + protected static List convertBlockDevices( + List blockDeviceMappings) { + return blockDeviceMappings.stream() + .map( + bdm -> { + AmazonBlockDevice amzBd = + new AmazonBlockDevice.Builder() + .deviceName(bdm.getDeviceName()) + .virtualName(bdm.getVirtualName()) + .build(); + + if (bdm.getEbs() != null) { + final EbsBlockDevice ebs = bdm.getEbs(); + amzBd.setIops(ebs.getIops()); + amzBd.setDeleteOnTermination(ebs.getDeleteOnTermination()); + amzBd.setSize(ebs.getVolumeSize()); + amzBd.setVolumeType(ebs.getVolumeType()); + amzBd.setSnapshotId(ebs.getSnapshotId()); + if (ebs.getKmsKeyId() != null) { + amzBd.setKmsKeyId(ebs.getKmsKeyId()); + } + if (ebs.getSnapshotId() == null) { + // only set encryption if snapshotId isn't provided. AWS will error out otherwise + amzBd.setEncrypted(ebs.getEncrypted()); + } + } + return amzBd; + }) + .collect(Collectors.toList()); + } + + /** + * Transform AWS BlockDeviceMapping (found in EC2 LaunchTemplate) to {@link AmazonBlockDevice}. + * Used while extracting launch settings from AWS AutoScalingGroup. + * + * @param launchTemplateBlockDeviceMappings AWS LaunchTemplate BlockDeviceMappings + * @return list of AmazonBlockDevice + */ + public static List transformLaunchTemplateBlockDeviceMapping( + List launchTemplateBlockDeviceMappings) { + return launchTemplateBlockDeviceMappings.stream() + .map( + ltBdm -> { + AmazonBlockDevice amzBd = + new AmazonBlockDevice.Builder() + .deviceName(ltBdm.getDeviceName()) + .virtualName(ltBdm.getVirtualName()) + .build(); + + if (ltBdm.getEbs() != null) { + final LaunchTemplateEbsBlockDevice ebs = ltBdm.getEbs(); + amzBd.setIops(ebs.getIops()); + amzBd.setThroughput(ebs.getThroughput()); + amzBd.setDeleteOnTermination(ebs.getDeleteOnTermination()); + amzBd.setSize(ebs.getVolumeSize()); + amzBd.setVolumeType(ebs.getVolumeType()); + amzBd.setSnapshotId(ebs.getSnapshotId()); + if (ebs.getSnapshotId() == null) { + // only set encryption if snapshotId isn't provided. AWS will error out otherwise + amzBd.setEncrypted(ebs.getEncrypted()); + } + } + return amzBd; + }) + .collect(Collectors.toUnmodifiableList()); + } + + /** + * Method to evaluate the value to be set for unlimitedCpuCredits given a value from source ASG. + * Used during CloneServerGroup and ModifyServerGroupLaunchTemplate operations, when the value is + * set in source ASG. + * + * @param sourceAsgCreditSpec credit specification from a source ASG. + * @param isBurstingSupportedByAllTypesRequested boolean, true if bursting is supported by all + * instance types in request, includes changed types, if any. + * @return Boolean, non-null only if all instance types(description.instanceType and + * description.launchTemplateOverridesForInstanceType.instanceType) support bursting. The + * non-null value comes from source credit specification. + */ + public static Boolean getUnlimitedCpuCreditsFromAncestorLt( + final CreditSpecification sourceAsgCreditSpec, + boolean isBurstingSupportedByAllTypesRequested) { + if (sourceAsgCreditSpec == null) { + return null; + } + + // return non-null unlimitedCpuCredits iff ALL requested instance types (includes changed types, + // if any) support CPU credits specification, to ensure compatibility + return isBurstingSupportedByAllTypesRequested + ? sourceAsgCreditSpec.getCpuCredits().equals("unlimited") ? true : false + : null; + } + + /** + * Transform overrides of type BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType + * to AWS type LaunchTemplateOverrides. + * + * @param overridesInReq + * @return LaunchTemplateOverrides + */ + public static List getLaunchTemplateOverrides( + List overridesInReq) { + if (overridesInReq == null || overridesInReq.isEmpty()) { + return null; + } + + // sort overrides by priority + overridesInReq.sort( + Comparator.comparing( + BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType::getPriority, + Comparator.nullsLast(Comparator.naturalOrder()))); + + // transform to LaunchTemplateOverrides + List ltOverrides = + overridesInReq.stream() + .map( + o -> + new LaunchTemplateOverrides() + .withInstanceType(o.getInstanceType()) + .withWeightedCapacity(o.getWeightedCapacity())) + .collect(Collectors.toCollection(ArrayList::new)); + + return ltOverrides; + } + + /** + * Transform overrides of AWS type LaunchTemplateOverrides to type + * BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType. There is no way to get + * priority numbers to match the ones in original description as AWS ASG just uses an ordered list + * to maintain order. Hence, priority is just assigned in sequential order. + * + * @param ltOverrides + * @return BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType + */ + public static List + getDescriptionOverrides(List ltOverrides) { + if (ltOverrides == null || ltOverrides.isEmpty()) { + return null; + } + + // transform to BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType + AtomicInteger priority = new AtomicInteger(1); + List descOverrides = + ltOverrides.stream() + .map( + ltOv -> + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType + .Builder() + .instanceType(ltOv.getInstanceType()) + .weightedCapacity(ltOv.getWeightedCapacity()) + .priority(priority.getAndIncrement()) + .build()) + .collect(Collectors.toList()); + + return descOverrides; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AutoScalingWorker.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AutoScalingWorker.java new file mode 100644 index 00000000000..1ca6f9b1dbd --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AutoScalingWorker.java @@ -0,0 +1,237 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg; + +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders.AsgBuilder; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice; +import com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +/** + * A worker class dedicated to the deployment of "applications", following many of Netflix's common + * AWS conventions. + */ +@Slf4j +public class AutoScalingWorker { + private static final String AWS_PHASE = "AWS_DEPLOY"; + private RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider; + private LaunchTemplateRollOutConfig launchTemplateRollOutConfig; + + AutoScalingWorker( + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider, + LaunchTemplateRollOutConfig launchTemplateRollOutConfig) { + this.regionScopedProvider = regionScopedProvider; + this.launchTemplateRollOutConfig = launchTemplateRollOutConfig; + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + /** + * Initiates the activity of deployment. This will involve: + * + *

    + *
  1. Lookup or create if not found, a security group with a name that matches the supplied + * "application"; + *
  2. Looking up security group ids for the names provided as "securityGroups"; + *
  3. Look up an ancestor ASG based on Netflix naming conventions, and bring its security + * groups to the new ASG; + *
  4. Retrieve user data from all available {@link + * com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProvider}s; + *
  5. Create the ASG's Launch Configuration or Launch Template with User Data and Security + * Groups; + *
  6. Create a new ASG in the subnets found from the optionally supplied subnetType. + *
+ * + * @return the name of the newly deployed ASG + */ + public String deploy(AsgConfiguration asgConfig) { + getTask().updateStatus(AWS_PHASE, "Beginning Amazon deployment."); + + if (asgConfig.startDisabled != null && asgConfig.startDisabled) { + asgConfig.suspendedProcesses.addAll( + AutoScalingProcessType.getDisableProcesses().stream() + .map(AutoScalingProcessType::name) + .collect(Collectors.toList())); + } + + getTask().updateStatus(AWS_PHASE, "Beginning ASG deployment."); + + AWSServerGroupNameResolver awsServerGroupNameResolver = + regionScopedProvider.getAWSServerGroupNameResolver(); + String asgName; + if (asgConfig.sequence != null) { + asgName = + awsServerGroupNameResolver.generateServerGroupName( + asgConfig.application, + asgConfig.stack, + asgConfig.freeFormDetails, + asgConfig.sequence, + false); + } else { + asgName = + awsServerGroupNameResolver.resolveNextServerGroupName( + asgConfig.application, + asgConfig.stack, + asgConfig.freeFormDetails, + asgConfig.ignoreSequence); + } + + AsgBuilder asgBuilder; + if (shouldSetLaunchTemplate(asgConfig)) { + // process the IPv6 setting conditionally + if (asgConfig.getAssociateIPv6Address() == null) { + String asgConfigEnv = asgConfig.getCredentials().getEnvironment(); + Boolean autoEnableIPv6 = launchTemplateRollOutConfig.isIpv6EnabledForEnv(asgConfigEnv); + asgConfig.setAssociateIPv6Address(autoEnableIPv6); + } + + if (asgConfig.shouldUseMixedInstancesPolicy()) { + asgBuilder = regionScopedProvider.getAsgBuilderForMixedInstancesPolicy(); + } else { + asgBuilder = regionScopedProvider.getAsgBuilderForLaunchTemplate(); + } + } else { + asgBuilder = regionScopedProvider.getAsgBuilderForLaunchConfiguration(); + } + + return asgBuilder.build(getTask(), AWS_PHASE, asgName, asgConfig); + } + + /** This is used to gradually roll out launch template. */ + private boolean shouldSetLaunchTemplate(final AsgConfiguration asgConfig) { + // Request level flag that forces launch configurations. + if (asgConfig.setLaunchTemplate == null || !asgConfig.setLaunchTemplate) { + return false; + } + + return launchTemplateRollOutConfig.shouldUseLaunchTemplateForReq( + asgConfig.application, asgConfig.credentials, asgConfig.region); + } + + @Data + @Builder + @AllArgsConstructor + public static class AsgConfiguration { + private String application; + private String region; + private NetflixAmazonCredentials credentials; + private String stack; + private String freeFormDetails; + private String ami; + private String classicLinkVpcId; + private List classicLinkVpcSecurityGroups; + private String instanceType; + private String iamRole; + private String keyPair; + private String base64UserData; + private Boolean legacyUdf; + private UserDataOverride userDataOverride; + private Integer sequence; + private Boolean ignoreSequence; + private Boolean startDisabled; + private Boolean associatePublicIpAddress; + private String subnetType; + private List subnetIds; + private Integer cooldown; + private Collection enabledMetrics; + private Integer healthCheckGracePeriod; + private String healthCheckType; + private String spotMaxPrice; + private Set suspendedProcesses; + private Collection terminationPolicies; + private String kernelId; + private String ramdiskId; + private Boolean instanceMonitoring; + private Boolean ebsOptimized; + private Collection classicLoadBalancers; + private Collection targetGroupArns; + private List securityGroups; + private List availabilityZones; + private List blockDevices; + private Map tags; + private Map blockDeviceTags; + private List lifecycleHooks; + private Boolean capacityRebalance; + private int minInstances; + private int maxInstances; + private int desiredInstances; + + /** Launch Templates properties * */ + private Boolean setLaunchTemplate; + + private Boolean requireIMDSv2; + private Boolean associateIPv6Address; + private Boolean unlimitedCpuCredits; + private BasicAmazonDeployDescription.LaunchTemplatePlacement placement; + private List + licenseSpecifications; + private Boolean enableEnclave; + + /** Mixed Instances Policy properties * */ + private String onDemandAllocationStrategy; + + private Integer onDemandBaseCapacity; + private Integer onDemandPercentageAboveBaseCapacity; + private String spotAllocationStrategy; + private Integer spotInstancePools; + private List + launchTemplateOverridesForInstanceType; + + /** + * AsgConfiguration object makes sense only when created with all or some of the configuration + * fields. + */ + private AsgConfiguration() {} + + /** + * Helper function to determine if the ASG should be created with mixed instances policy, when + * launch templates are enabled + * + * @return boolean true if mixed instances policy parameters are used, false otherwise + */ + public boolean shouldUseMixedInstancesPolicy() { + for (String fieldName : BasicAmazonDeployDescription.getMixedInstancesPolicyFieldNames()) { + try { + if (this.getClass().getDeclaredField(fieldName).get(this) != null) { + return true; + } + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + return false; + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchConfigurationBuilder.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchConfigurationBuilder.java new file mode 100644 index 00000000000..5bb10198779 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchConfigurationBuilder.java @@ -0,0 +1,134 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg; + +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import java.util.List; +import java.util.Map; +import lombok.Builder; +import lombok.Value; + +public interface LaunchConfigurationBuilder { + + /** + * Extracts the LaunchConfigurationSettings from an existing LaunchConfiguration. + * + * @param credentials the account in which to find the launch configuration + * @param region the region in which to find the launch configuration + * @param launchConfigurationName the name of the launch configuration + * @return LaunchConfigurationSettings for the launch configuration + */ + LaunchConfigurationSettings buildSettingsFromLaunchConfiguration( + AccountCredentials credentials, String region, String launchConfigurationName); + /** + * Constructs an LaunchConfiguration with the provided settings + * + * @param application the name of the application - used to construct a default security group if + * none are present + * @param subnetType the subnet type for security groups in the launch configuration + * @param settings the settings for the launch configuration + * @param legacyUdf whether to explicitly use or not use legacyUdf mode - can be null which will + * fall through to application default + * @param userDataOverride - whether to allow the user supplied user data to override any default + * user data + * @return the name of the new launch configuration + */ + String buildLaunchConfiguration( + String application, + String subnetType, + LaunchConfigurationSettings settings, + Boolean legacyUdf, + UserDataOverride userDataOverride); + + @Value + class LaunchConfigurationSettings { + String account; + String environment; + String accountType; + String region; + String baseName; + String suffix; + String ami; + String iamRole; + String classicLinkVpcId; + List classicLinkVpcSecurityGroups; + String instanceType; + String keyPair; + String base64UserData; + Boolean associatePublicIpAddress; + String kernelId; + String ramdiskId; + boolean ebsOptimized; + String spotMaxPrice; + boolean instanceMonitoring; + List blockDevices; + List securityGroups; + Map blockDeviceTags; + + @Builder(toBuilder = true) + private LaunchConfigurationSettings( + String account, + String environment, + String accountType, + String region, + String baseName, + String suffix, + String ami, + String iamRole, + String classicLinkVpcId, + List classicLinkVpcSecurityGroups, + String instanceType, + String keyPair, + String base64UserData, + Boolean associatePublicIpAddress, + String kernelId, + String ramdiskId, + boolean ebsOptimized, + String spotMaxPrice, + boolean instanceMonitoring, + List blockDevices, + List securityGroups, + Map blockDeviceTags) { + this.account = account; + this.environment = environment; + this.accountType = accountType; + this.region = region; + this.baseName = baseName; + this.suffix = suffix; + this.ami = ami; + this.iamRole = iamRole; + this.classicLinkVpcId = classicLinkVpcId; + this.classicLinkVpcSecurityGroups = classicLinkVpcSecurityGroups; + this.instanceType = instanceType; + this.keyPair = keyPair; + this.base64UserData = base64UserData; + this.associatePublicIpAddress = associatePublicIpAddress; + this.kernelId = kernelId; + this.ramdiskId = ramdiskId; + this.ebsOptimized = ebsOptimized; + this.spotMaxPrice = spotMaxPrice; + this.instanceMonitoring = instanceMonitoring; + this.blockDevices = blockDevices; + this.securityGroups = securityGroups; + this.blockDeviceTags = blockDeviceTags; + ; + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchTemplateRollOutConfig.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchTemplateRollOutConfig.java new file mode 100644 index 00000000000..73d0878f4e1 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchTemplateRollOutConfig.java @@ -0,0 +1,149 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg; + +import com.google.common.annotations.VisibleForTesting; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import java.util.Arrays; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +@Slf4j +public class LaunchTemplateRollOutConfig { + private DynamicConfigService dynamicConfigService; + + public LaunchTemplateRollOutConfig(DynamicConfigService dynamicConfigService) { + this.dynamicConfigService = dynamicConfigService; + } + + public boolean isIpv6EnabledForEnv(String env) { + return dynamicConfigService.isEnabled("aws.features.launch-templates.ipv6." + env, false); + } + + /** This is used to gradually roll out launch template. */ + public boolean shouldUseLaunchTemplateForReq( + String applicationInReq, NetflixAmazonCredentials credentialsInReq, String regionInReq) { + + // Property flag to turn off launch template feature. Caching agent might require bouncing the + // java process + if (!dynamicConfigService.isEnabled("aws.features.launch-templates", false)) { + log.debug("Launch Template feature disabled via configuration."); + return false; + } + + // This is a comma separated list of applications to exclude + String excludedApps = + dynamicConfigService.getConfig( + String.class, "aws.features.launch-templates.excluded-applications", ""); + for (String excludedApp : excludedApps.split(",")) { + if (excludedApp.trim().equals(applicationInReq)) { + return false; + } + } + + // This is a comma separated list of accounts to exclude + String excludedAccounts = + dynamicConfigService.getConfig( + String.class, "aws.features.launch-templates.excluded-accounts", ""); + for (String excludedAccount : excludedAccounts.split(",")) { + if (excludedAccount.trim().equals(credentialsInReq.getName())) { + return false; + } + } + + // Allows everything that is not excluded + if (dynamicConfigService.isEnabled("aws.features.launch-templates.all-applications", false)) { + return true; + } + + // Application allow list with the following format: + // app1:account:region1,app2:account:region1 + // This allows more control over what account and region pairs to enable for this deployment. + String allowedApps = + dynamicConfigService.getConfig( + String.class, "aws.features.launch-templates.allowed-applications", ""); + if (matchesAppAccountAndRegion( + applicationInReq, credentialsInReq.getName(), regionInReq, allowedApps.split(","))) { + return true; + } + + // An allow list for account/region pairs with the following format: + // account:region + String allowedAccountsAndRegions = + dynamicConfigService.getConfig( + String.class, "aws.features.launch-templates.allowed-accounts-regions", ""); + for (String accountRegion : allowedAccountsAndRegions.split(",")) { + if (StringUtils.isNotBlank(accountRegion) && accountRegion.contains(":")) { + String[] parts = accountRegion.split(":"); + String account = parts[0]; + String region = parts[1]; + if (account.trim().equals(credentialsInReq.getName()) + && region.trim().equals(regionInReq)) { + return true; + } + } + } + + // This is a comma separated list of accounts to allow + String allowedAccounts = + dynamicConfigService.getConfig( + String.class, "aws.features.launch-templates.allowed-accounts", ""); + for (String allowedAccount : allowedAccounts.split(",")) { + if (allowedAccount.trim().equals(credentialsInReq.getName())) { + return true; + } + } + + return false; + } + + /** + * Helper function to parse and match an array of app:account:region1,...,app:account,region to + * the specified application, account and region Used to flag launch template feature and rollout + */ + @VisibleForTesting + private boolean matchesAppAccountAndRegion( + String application, String accountName, String region, String[] applicationAccountRegions) { + if (applicationAccountRegions != null && applicationAccountRegions.length <= 0) { + return false; + } + + for (String appAccountRegion : applicationAccountRegions) { + if (StringUtils.isNotBlank(appAccountRegion) && appAccountRegion.contains(":")) { + try { + String[] parts = appAccountRegion.split(":"); + String app = parts[0]; + String account = parts[1]; + String regions = parts[2]; + if (app.equals(application) + && account.equals(accountName) + && Arrays.asList(regions.split(",")).contains(region)) { + return true; + } + } catch (Exception e) { + log.error( + "Unable to verify if application is allowed in shouldSetLaunchTemplate: {}", + appAccountRegion); + return false; + } + } + } + + return false; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgBuilder.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgBuilder.java new file mode 100644 index 00000000000..e1ae278bfd6 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgBuilder.java @@ -0,0 +1,431 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.AlreadyExistsException; +import com.amazonaws.services.autoscaling.model.AutoScalingGroup; +import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest; +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult; +import com.amazonaws.services.autoscaling.model.EnableMetricsCollectionRequest; +import com.amazonaws.services.autoscaling.model.SuspendProcessesRequest; +import com.amazonaws.services.autoscaling.model.Tag; +import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupRequest; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.DescribeSubnetsResult; +import com.amazonaws.services.ec2.model.Subnet; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker.AsgConfiguration; +import com.netflix.spinnaker.clouddriver.aws.model.SubnetData; +import com.netflix.spinnaker.clouddriver.aws.model.SubnetTarget; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.kork.core.RetrySupport; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +/** A builder used to build an AWS Autoscaling group. */ +@Slf4j +public abstract class AsgBuilder { + private final RetrySupport retrySupport = new RetrySupport(); + + private AmazonAutoScaling autoScaling; + private AmazonEC2 ec2; + private AsgLifecycleHookWorker asgLifecycleHookWorker; + + AsgBuilder( + AmazonAutoScaling autoScaling, AmazonEC2 ec2, AsgLifecycleHookWorker asgLifecycleHookWorker) { + this.autoScaling = autoScaling; + this.ec2 = ec2; + this.asgLifecycleHookWorker = asgLifecycleHookWorker; + } + + /** + * Abstract method to build a CreateAutoScalingGroupRequest given the input parameters in form of + * AsgConfiguration. + * + * @return the CreateAutoScalingGroupRequest built + */ + protected abstract CreateAutoScalingGroupRequest buildRequest( + Task task, String taskPhase, String asgName, AsgConfiguration cfg); + + /** + * Build and launch an ASG. + * + * @return the ASG name + */ + public String build(Task task, String taskPhase, String asgName, AsgConfiguration cfg) { + return createAsg(task, taskPhase, buildRequest(task, taskPhase, asgName, cfg), cfg); + } + + /** + * Build partial CreateAutoScalingGroupRequest. All parameters except launchConfiguration / + * launchTemplate are configured. + * + * @return CreateAutoScalingGroupRequest with all but 1 parameter configured + */ + protected CreateAutoScalingGroupRequest buildPartialRequest( + Task task, String taskPhase, String name, AsgConfiguration cfg) { + CreateAutoScalingGroupRequest request = + new CreateAutoScalingGroupRequest() + .withAutoScalingGroupName(name) + .withMinSize(cfg.getMinInstances()) + .withMaxSize(cfg.getMaxInstances()) + .withDesiredCapacity(cfg.getDesiredInstances()) + .withLoadBalancerNames(cfg.getClassicLoadBalancers()) + .withTargetGroupARNs(cfg.getTargetGroupArns()) + .withDefaultCooldown(cfg.getCooldown()) + .withHealthCheckGracePeriod(cfg.getHealthCheckGracePeriod()) + .withHealthCheckType(cfg.getHealthCheckType()) + .withTerminationPolicies(cfg.getTerminationPolicies()); + + if (cfg.getTags() != null && !cfg.getTags().isEmpty()) { + task.updateStatus(taskPhase, "Adding tags for " + name); + cfg.getTags().entrySet().stream() + .forEach( + e -> + request.withTags( + new Tag() + .withKey(e.getKey()) + .withValue(e.getValue()) + .withPropagateAtLaunch(true))); + } + + // if we have explicitly specified subnetIds, don't require that they are tagged with a + // subnetType/purpose + boolean filterForSubnetPurposeTags = cfg.getSubnetIds() == null || cfg.getSubnetIds().isEmpty(); + + // favor subnetIds over availability zones + final String subnetIds = + String.join( + ",", + getSubnetIds( + getSubnets( + filterForSubnetPurposeTags, cfg.getSubnetType(), cfg.getAvailabilityZones()), + cfg.getSubnetIds(), + cfg.getAvailabilityZones())); + + List subnets = getSubnets(true, cfg.getSubnetType(), cfg.getAvailabilityZones()); + if (StringUtils.isNotEmpty(subnetIds)) { + task.updateStatus(taskPhase, " > Deploying to subnetIds: " + subnetIds); + request.withVPCZoneIdentifier(subnetIds); + } else if (StringUtils.isNotEmpty(cfg.getSubnetType()) + && (subnets == null || subnets.isEmpty())) { + throw new RuntimeException( + String.format( + "No suitable subnet was found for internal subnet purpose '%s'!", + cfg.getSubnetType())); + } else { + task.updateStatus(taskPhase, "Deploying to availabilityZones: " + cfg.getAvailabilityZones()); + request.withAvailabilityZones(cfg.getAvailabilityZones()); + } + + // configure capacity rebalance + if (cfg.getCapacityRebalance() != null) { + task.updateStatus( + taskPhase, + "Setting capacity rebalance to " + cfg.getCapacityRebalance() + " for " + name); + request.withCapacityRebalance(cfg.getCapacityRebalance()); + } + + return request; + } + + private String createAsg( + Task task, String taskPhase, CreateAutoScalingGroupRequest request, AsgConfiguration cfg) { + final String asgName = request.getAutoScalingGroupName(); + + // create ASG + final RuntimeException ex = + retrySupport.retry( + () -> { + try { + autoScaling.createAutoScalingGroup(request); + return null; + } catch (AlreadyExistsException e) { + if (!shouldProceedWithExistingState( + autoScaling, asgName, request, task, taskPhase)) { + return e; + } + log.debug("Determined pre-existing ASG is desired state, continuing...", e); + return null; + } + }, + 10, + 1000, + false); + if (ex != null) { + throw ex; + } + + // configure lifecycle hooks + if (cfg.getLifecycleHooks() != null && !cfg.getLifecycleHooks().isEmpty()) { + final Exception e = + retrySupport.retry( + () -> { + task.updateStatus(taskPhase, "Creating lifecycle hooks for: " + asgName); + asgLifecycleHookWorker.attach(task, cfg.getLifecycleHooks(), asgName); + return null; + }, + 10, + 1000, + false); + if (e != null) { + task.updateStatus( + taskPhase, + "Unable to attach lifecycle hooks to ASG (" + asgName + "): " + e.getMessage()); + } + } + + // suspend auto scaling processes + if (cfg.getSuspendedProcesses() != null && !cfg.getSuspendedProcesses().isEmpty()) { + task.updateStatus(taskPhase, "Suspending processes for: " + asgName); + retrySupport.retry( + () -> + autoScaling.suspendProcesses( + new SuspendProcessesRequest() + .withAutoScalingGroupName(asgName) + .withScalingProcesses(cfg.getSuspendedProcesses())), + 10, + 1000, + false); + } + + // enable metrics and monitoring + if (cfg.getEnabledMetrics() != null + && cfg.getInstanceMonitoring() != null + && cfg.getInstanceMonitoring()) { + task.updateStatus(taskPhase, "Enabling metrics collection for: " + asgName); + retrySupport.retry( + () -> + autoScaling.enableMetricsCollection( + new EnableMetricsCollectionRequest() + .withAutoScalingGroupName(asgName) + .withGranularity("1Minute") + .withMetrics(cfg.getEnabledMetrics())), + 10, + 1000, + false); + } + + // udpate ASG + retrySupport.retry( + () -> { + task.updateStatus( + taskPhase, + String.format( + "Setting size of %s in %s/%s to [min=%s, max=%s, desired=%s]", + asgName, + cfg.getCredentials().getName(), + cfg.getRegion(), + cfg.getMinInstances(), + cfg.getMaxInstances(), + cfg.getDesiredInstances())); + autoScaling.updateAutoScalingGroup( + new UpdateAutoScalingGroupRequest() + .withAutoScalingGroupName(asgName) + .withMinSize(cfg.getMinInstances()) + .withMaxSize(cfg.getMaxInstances()) + .withDesiredCapacity(cfg.getDesiredInstances())); + return true; + }, + 10, + 1000, + false); + + task.updateStatus(taskPhase, "Deployed EC2 server group named " + asgName); + return asgName; + } + + private boolean shouldProceedWithExistingState( + AmazonAutoScaling autoScaling, + String asgName, + CreateAutoScalingGroupRequest request, + Task task, + String taskPhase) { + final DescribeAutoScalingGroupsResult result = + autoScaling.describeAutoScalingGroups( + new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(asgName)); + if (result.getAutoScalingGroups().isEmpty()) { + // This will only happen if we get an AlreadyExistsException from AWS, then immediately after + // describing it, we + // don't get a result back. We'll continue with trying to create because who knows may as well + // try. + log.error("Attempted to find pre-existing ASG but none was found: " + asgName); + return true; + } + final AutoScalingGroup existingAsg = result.getAutoScalingGroups().get(0); + + // build predicates and identify failed ones + List existingAsgSubnetIds = null; + if (StringUtils.isNotEmpty(existingAsg.getVPCZoneIdentifier())) { + existingAsgSubnetIds = sortList(Arrays.asList(existingAsg.getVPCZoneIdentifier().split(","))); + } + List requestedSubnetIds = null; + if (StringUtils.isNotEmpty(request.getVPCZoneIdentifier())) { + requestedSubnetIds = sortList(Arrays.asList(request.getVPCZoneIdentifier().split(","))); + } + Map predicates = + ImmutableMap.builder() + .put( + "launch configuration", + Objects.equals( + existingAsg.getLaunchConfigurationName(), request.getLaunchConfigurationName())) + .put( + "launch template", + Objects.equals(existingAsg.getLaunchTemplate(), request.getLaunchTemplate())) + .put( + "mixed instances policy", + Objects.equals( + existingAsg.getMixedInstancesPolicy(), request.getMixedInstancesPolicy())) + .put( + "availability zones", + Objects.equals( + sortList(existingAsg.getAvailabilityZones()), + sortList(request.getAvailabilityZones()))) + .put("subnets", Objects.equals(existingAsgSubnetIds, requestedSubnetIds)) + .put( + "load balancers", + Objects.equals( + sortList(existingAsg.getLoadBalancerNames()), + sortList(request.getLoadBalancerNames()))) + .put( + "target groups", + Objects.equals( + sortList(existingAsg.getTargetGroupARNs()), + sortList(request.getTargetGroupARNs()))) + .put("cooldown", existingAsg.getDefaultCooldown() == request.getDefaultCooldown()) + .put( + "health check grace period", + existingAsg.getHealthCheckGracePeriod() == request.getHealthCheckGracePeriod()) + .put( + "health check type", + Objects.equals(existingAsg.getHealthCheckType(), request.getHealthCheckType())) + .put( + "termination policies", + Objects.equals( + sortList(existingAsg.getTerminationPolicies()), + sortList(request.getTerminationPolicies()))) + .build(); + final Set failedPredicates = + predicates.entrySet().stream() + .filter(p -> !p.getValue()) + .map(Map.Entry::getKey) + .collect(Collectors.toSet()); + + if (!failedPredicates.isEmpty()) { + task.updateStatus( + taskPhase, + String.format( + "%s already exists and does not seem to match desired state on: %s", + asgName, String.join(",", failedPredicates))); + + log.debug("Failed predicates: " + predicates); + return false; + } + + if (existingAsg + .getCreatedTime() + .toInstant() + .isBefore(Instant.now().minus(1, ChronoUnit.HOURS))) { + task.updateStatus( + taskPhase, + asgName + + " already exists and appears to be valid, but falls outside of safety window for idempotent deploy (1 hour)"); + return false; + } + + return true; + } + + /** + * This is an obscure rule that Subnets are tagged at Amazon with a data structure, which defines + * their purpose and what type of resources (elb or ec2) are able to make use of them. We also + * need to ensure that the Subnet IDs that we provide back are able to be deployed to based off of + * the supplied availability zones. + * + * @return list of subnet ids applicable to this deployment. + */ + private List getSubnetIds( + List allSubnetsForTypeAndAvailabilityZone, + List subnetIds, + List availabilityZones) { + final List allSubnetIds = + allSubnetsForTypeAndAvailabilityZone.stream() + .map(s -> s.getSubnetId()) + .collect(Collectors.toList()); + + List invalidSubnetIds = null; + if (subnetIds != null && !subnetIds.isEmpty()) { + invalidSubnetIds = + subnetIds.stream().filter(it -> !allSubnetIds.contains(it)).collect(Collectors.toList()); + } + + if (invalidSubnetIds != null && !invalidSubnetIds.isEmpty()) { + throw new IllegalStateException( + String.format( + "One or more subnet ids are not valid (invalidSubnetIds: %s, availabilityZones: %s)", + String.join(",", invalidSubnetIds), String.join(",", availabilityZones))); + } + + return (subnetIds != null && !subnetIds.isEmpty()) ? subnetIds : allSubnetIds; + } + + private List getSubnets( + boolean filterForSubnetPurposeTags, String subnetType, List availabilityZones) { + if (StringUtils.isEmpty(subnetType)) { + return Collections.emptyList(); + } + + final DescribeSubnetsResult result = ec2.describeSubnets(); + List mySubnets = new ArrayList<>(); + for (Subnet subnet : result.getSubnets()) { + if (availabilityZones != null + && !availabilityZones.isEmpty() + && !availabilityZones.contains(subnet.getAvailabilityZone())) { + continue; + } + if (filterForSubnetPurposeTags) { + final SubnetData sd = SubnetData.from(subnet); + if ((sd.getPurpose() != null && sd.getPurpose().equals(subnetType)) + && (sd.getTarget() == null || sd.getTarget() == SubnetTarget.EC2)) { + mySubnets.add(subnet); + } + } else { + mySubnets.add(subnet); + } + } + return mySubnets; + } + + private List sortList(List list) { + return list.stream().sorted(Comparator.naturalOrder()).collect(Collectors.toList()); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchConfigurationBuilder.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchConfigurationBuilder.java new file mode 100644 index 00000000000..4119adaf21f --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchConfigurationBuilder.java @@ -0,0 +1,89 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest; +import com.amazonaws.services.ec2.AmazonEC2; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker.AsgConfiguration; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchConfigurationBuilder; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import lombok.extern.slf4j.Slf4j; + +/** A builder used to build an AWS Autoscaling group with launch configuration. */ +@Slf4j +public class AsgWithLaunchConfigurationBuilder extends AsgBuilder { + private LaunchConfigurationBuilder lcBuilder; + + public AsgWithLaunchConfigurationBuilder( + LaunchConfigurationBuilder lcBuilder, + AmazonAutoScaling autoScaling, + AmazonEC2 ec2, + AsgLifecycleHookWorker asgLifecycleHookWorker) { + super(autoScaling, ec2, asgLifecycleHookWorker); + + this.lcBuilder = lcBuilder; + } + + @Override + protected CreateAutoScalingGroupRequest buildRequest( + Task task, String taskPhase, String asgName, AsgConfiguration cfg) { + + // create LC settings + LaunchConfigurationBuilder.LaunchConfigurationSettings settings = + LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(cfg.getCredentials().getName()) + .environment(cfg.getCredentials().getEnvironment()) + .accountType(cfg.getCredentials().getAccountType()) + .region(cfg.getRegion()) + .baseName(asgName) + .suffix(null) + .ami(cfg.getAmi()) + .iamRole(cfg.getIamRole()) + .classicLinkVpcId(cfg.getClassicLinkVpcId()) + .classicLinkVpcSecurityGroups(cfg.getClassicLinkVpcSecurityGroups()) + .instanceType(cfg.getInstanceType()) + .keyPair(cfg.getKeyPair()) + .base64UserData(cfg.getBase64UserData()) + .associatePublicIpAddress(cfg.getAssociatePublicIpAddress()) + .kernelId(cfg.getKernelId()) + .ramdiskId(cfg.getRamdiskId()) + .ebsOptimized(cfg.getEbsOptimized() != null ? cfg.getEbsOptimized() : false) + .spotMaxPrice(cfg.getSpotMaxPrice()) + .instanceMonitoring( + cfg.getInstanceMonitoring() != null ? cfg.getInstanceMonitoring() : false) + .blockDevices(cfg.getBlockDevices()) + .securityGroups(cfg.getSecurityGroups()) + .build(); + + String launchConfigName = + lcBuilder.buildLaunchConfiguration( + cfg.getApplication(), + cfg.getSubnetType(), + settings, + cfg.getLegacyUdf(), + cfg.getUserDataOverride()); + + task.updateStatus( + taskPhase, "Deploying ASG " + asgName + " with launch configuration " + launchConfigName); + CreateAutoScalingGroupRequest request = buildPartialRequest(task, taskPhase, asgName, cfg); + + return request.withLaunchConfigurationName(launchConfigName); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchTemplateBuilder.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchTemplateBuilder.java new file mode 100644 index 00000000000..fc4018e10d8 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchTemplateBuilder.java @@ -0,0 +1,77 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.LaunchTemplate; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker.AsgConfiguration; +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService; +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; +import lombok.extern.slf4j.Slf4j; + +/** A builder used to build an AWS Autoscaling group with launch template. */ +@Slf4j +public class AsgWithLaunchTemplateBuilder extends AsgBuilder { + private LaunchTemplateService ltService; + private SecurityGroupService securityGroupService; + private DeployDefaults deployDefaults; + + public AsgWithLaunchTemplateBuilder( + LaunchTemplateService ltService, + SecurityGroupService securityGroupService, + DeployDefaults deployDefaults, + AmazonAutoScaling autoScaling, + AmazonEC2 ec2, + AsgLifecycleHookWorker asgLifecycleHookWorker) { + super(autoScaling, ec2, asgLifecycleHookWorker); + + this.ltService = ltService; + this.securityGroupService = securityGroupService; + this.deployDefaults = deployDefaults; + } + + @Override + protected CreateAutoScalingGroupRequest buildRequest( + Task task, String taskPhase, String asgName, AsgConfiguration config) { + + // resolve security groups + config = AsgConfigHelper.setAppSecurityGroups(config, securityGroupService, deployDefaults); + + final LaunchTemplate lt = + ltService.createLaunchTemplate(config, asgName, AsgConfigHelper.createName(asgName, null)); + + final LaunchTemplateSpecification ltSpec = + (new LaunchTemplateSpecification() + .withLaunchTemplateId(lt.getLaunchTemplateId()) + .withVersion(lt.getLatestVersionNumber().toString())); + + task.updateStatus( + taskPhase, + "Deploying ASG " + asgName + " with launch template " + lt.getLaunchTemplateId()); + CreateAutoScalingGroupRequest request = buildPartialRequest(task, taskPhase, asgName, config); + + return request.withLaunchTemplate(ltSpec); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithMixedInstancesPolicyBuilder.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithMixedInstancesPolicyBuilder.java new file mode 100644 index 00000000000..deec6a9e33d --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithMixedInstancesPolicyBuilder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest; +import com.amazonaws.services.autoscaling.model.InstancesDistribution; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy; +import com.amazonaws.services.ec2.AmazonEC2; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker.AsgConfiguration; +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService; +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; +import lombok.extern.slf4j.Slf4j; + +/** + * A builder used to build an AWS Autoscaling group with mixed instances policy, backed by EC2 + * launch template. https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html + */ +@Slf4j +public class AsgWithMixedInstancesPolicyBuilder extends AsgBuilder { + private LaunchTemplateService ec2LtService; + private SecurityGroupService securityGroupService; + private DeployDefaults deployDefaults; + + public AsgWithMixedInstancesPolicyBuilder( + LaunchTemplateService ec2LtService, + SecurityGroupService securityGroupService, + DeployDefaults deployDefaults, + AmazonAutoScaling autoScaling, + AmazonEC2 ec2, + AsgLifecycleHookWorker asgLifecycleHookWorker) { + super(autoScaling, ec2, asgLifecycleHookWorker); + + this.securityGroupService = securityGroupService; + this.deployDefaults = deployDefaults; + this.ec2LtService = ec2LtService; + } + + @Override + public CreateAutoScalingGroupRequest buildRequest( + Task task, String taskPhase, String asgName, AsgConfiguration config) { + + // resolve security groups + config = AsgConfigHelper.setAppSecurityGroups(config, securityGroupService, deployDefaults); + + // create EC2 LaunchTemplate + final com.amazonaws.services.ec2.model.LaunchTemplate ec2Lt = + ec2LtService.createLaunchTemplate( + config, asgName, AsgConfigHelper.createName(asgName, null)); + + // create ASG LaunchTemplate spec + LaunchTemplateSpecification asgLtSpec = + new LaunchTemplateSpecification() + .withLaunchTemplateId(ec2Lt.getLaunchTemplateId()) + .withVersion("$Latest"); + + // create ASG LaunchTemplate + com.amazonaws.services.autoscaling.model.LaunchTemplate asgLt = + new com.amazonaws.services.autoscaling.model.LaunchTemplate() + .withLaunchTemplateSpecification(asgLtSpec); + + // create and add overrides + // https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-override-options.html + asgLt.withOverrides( + AsgConfigHelper.getLaunchTemplateOverrides( + config.getLaunchTemplateOverridesForInstanceType())); + + // configure instance distribution + // https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html + InstancesDistribution dist = + new InstancesDistribution() + .withOnDemandBaseCapacity(config.getOnDemandBaseCapacity()) + .withOnDemandPercentageAboveBaseCapacity( + config.getOnDemandPercentageAboveBaseCapacity()) + .withSpotInstancePools(config.getSpotInstancePools()) + .withSpotMaxPrice(config.getSpotMaxPrice()) + .withSpotAllocationStrategy(config.getSpotAllocationStrategy()); + + // create mixed instances policy with overrides and instance distribution + final MixedInstancesPolicy mixedInsPolicy = + new MixedInstancesPolicy().withLaunchTemplate(asgLt).withInstancesDistribution(dist); + + task.updateStatus( + taskPhase, + "Deploying ASG " + asgName + " with mixed instances policy " + mixedInsPolicy.toString()); + CreateAutoScalingGroupRequest request = buildPartialRequest(task, taskPhase, asgName, config); + + return request.withMixedInstancesPolicy(mixedInsPolicy); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonImageAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonImageAtomicOperationConverter.java similarity index 87% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonImageAtomicOperationConverter.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonImageAtomicOperationConverter.java index 82bfe9dac91..0260e0a2e47 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonImageAtomicOperationConverter.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonImageAtomicOperationConverter.java @@ -22,13 +22,13 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @AmazonOperation(AtomicOperations.DEREGISTER_IMAGE) @Component -public class DeleteAmazonImageAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class DeleteAmazonImageAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { return new DeleteAmazonImageAtomicOperation(convertDescription(input)); @@ -36,8 +36,8 @@ public AtomicOperation convertOperation(Map input) { @Override public DeleteAmazonImageDescription convertDescription(Map input) { - DeleteAmazonImageDescription converted = getObjectMapper() - .convertValue(input, DeleteAmazonImageDescription.class); + DeleteAmazonImageDescription converted = + getObjectMapper().convertValue(input, DeleteAmazonImageDescription.class); converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); return converted; } diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonLaunchConfigurationAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonLaunchConfigurationAtomicOperationConverter.java new file mode 100644 index 00000000000..8f84454498c --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonLaunchConfigurationAtomicOperationConverter.java @@ -0,0 +1,44 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.converters; + +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonLaunchConfigurationDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeleteAmazonLaunchConfigurationAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.DELETE_LAUNCH_CONFIGURATION) +@Component +public class DeleteAmazonLaunchConfigurationAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteAmazonLaunchConfigurationAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteAmazonLaunchConfigurationDescription convertDescription(Map input) { + DeleteAmazonLaunchConfigurationDescription converted = + getObjectMapper().convertValue(input, DeleteAmazonLaunchConfigurationDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonLaunchTemplateAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonLaunchTemplateAtomicOperationConverter.java new file mode 100644 index 00000000000..e9fe192319e --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonLaunchTemplateAtomicOperationConverter.java @@ -0,0 +1,44 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.converters; + +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeleteLaunchTemplateAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.DELETE_LAUNCH_TEMPLATE) +@Component +public class DeleteAmazonLaunchTemplateAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteLaunchTemplateAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteAmazonLaunchTemplateDescription convertDescription(Map input) { + DeleteAmazonLaunchTemplateDescription converted = + getObjectMapper().convertValue(input, DeleteAmazonLaunchTemplateDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonSnapshotAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonSnapshotAtomicOperationConverter.java new file mode 100644 index 00000000000..1ae796e47fd --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteAmazonSnapshotAtomicOperationConverter.java @@ -0,0 +1,55 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.converters; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonSnapshotDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeleteAmazonSnapshotAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.DELETE_SNAPSHOT) +@Component +public class DeleteAmazonSnapshotAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + private final Registry registry; + + @Autowired + public DeleteAmazonSnapshotAtomicOperationConverter(Registry registry) { + this.registry = registry; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteAmazonSnapshotAtomicOperation(convertDescription(input), registry); + } + + @Override + public DeleteAmazonSnapshotDescription convertDescription(Map input) { + DeleteAmazonSnapshotDescription converted = + getObjectMapper().convertValue(input, DeleteAmazonSnapshotDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationAtomicOperationConverter.java new file mode 100644 index 00000000000..61cf30eb467 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationAtomicOperationConverter.java @@ -0,0 +1,44 @@ +/* + * Copyright 2021 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.converters; + +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteCloudFormationDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeleteCloudFormationAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.DELETE_CLOUDFORMATION_STACK) +@Component("deleteCloudFormationDescription") +public class DeleteCloudFormationAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteCloudFormationAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteCloudFormationDescription convertDescription(Map input) { + DeleteCloudFormationDescription converted = + getObjectMapper().convertValue(input, DeleteCloudFormationDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationChangeSetAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationChangeSetAtomicOperationConverter.java new file mode 100644 index 00000000000..56759be688a --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationChangeSetAtomicOperationConverter.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 Adevinta + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.converters; + +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteCloudFormationChangeSetDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeleteCloudFormationChangeSetAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.DELETE_CLOUDFORMATION_CHANGESET) +@Component("deleteCloudFormationChangeSetDescription") +public class DeleteCloudFormationChangeSetAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteCloudFormationChangeSetAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteCloudFormationChangeSetDescription convertDescription(Map input) { + DeleteCloudFormationChangeSetDescription converted = + getObjectMapper().convertValue(input, DeleteCloudFormationChangeSetDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeployCloudFormationAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeployCloudFormationAtomicOperationConverter.java new file mode 100644 index 00000000000..b151ea4b100 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeployCloudFormationAtomicOperationConverter.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.converters; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeployCloudFormationDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeployCloudFormationAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.DEPLOY_CLOUDFORMATION_STACK) +@Component("deployCloudFormationDescription") +public class DeployCloudFormationAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new DeployCloudFormationAtomicOperation(convertDescription(input)); + } + + @Override + public DeployCloudFormationDescription convertDescription(Map input) { + input = fixTemplateBody(input); + + DeployCloudFormationDescription converted = + getObjectMapper().convertValue(input, DeployCloudFormationDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } + + /** Previous implementation processed templateBody as a Map, now it is a string */ + private Map fixTemplateBody(Map input) { + if (input.get("templateBody") != null && !(input.get("templateBody") instanceof String)) { + String template; + try { + template = getObjectMapper().writeValueAsString(input.get("templateBody")); + } catch (JsonProcessingException e) { + throw new IllegalArgumentException( + "Could not serialize CloudFormation Stack template body", e); + } + input.put("templateBody", template); + } + return input; + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeregisterInstancesFromLoadBalancerAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeregisterInstancesFromLoadBalancerAtomicOperationConverter.java similarity index 79% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeregisterInstancesFromLoadBalancerAtomicOperationConverter.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeregisterInstancesFromLoadBalancerAtomicOperationConverter.java index 63f506628b3..4d8e2d0b2e8 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeregisterInstancesFromLoadBalancerAtomicOperationConverter.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeregisterInstancesFromLoadBalancerAtomicOperationConverter.java @@ -18,22 +18,24 @@ import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractRegionAsgInstanceIdsDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceLoadBalancerRegistrationDescription; import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceTargetGroupRegistrationDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeregisterInstancesFromLoadBalancerAtomicOperation; import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeregisterInstancesFromTargetGroupAtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceLoadBalancerRegistrationDescription; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeregisterInstancesFromLoadBalancerAtomicOperation; -import org.springframework.stereotype.Component; - +import java.util.List; import java.util.Map; +import org.springframework.stereotype.Component; @AmazonOperation(AtomicOperations.DEREGISTER_INSTANCES_FROM_LOAD_BALANCER) @Component("deregisterInstancesFromLoadBalancerDescription") -public class DeregisterInstancesFromLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - private Boolean isClassic (Map input) { - return !input.containsKey("targetGroupNames"); +public class DeregisterInstancesFromLoadBalancerAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + private Boolean isClassic(Map input) { + return !input.containsKey("targetGroupNames") + || getObjectMapper().convertValue(input.get("targetGroupNames"), List.class).isEmpty(); } @Override @@ -48,12 +50,14 @@ public AtomicOperation convertOperation(Map input) { public AbstractRegionAsgInstanceIdsDescription convertDescription(Map input) { AbstractRegionAsgInstanceIdsDescription converted; if (isClassic(input)) { - converted = getObjectMapper().convertValue(input, InstanceLoadBalancerRegistrationDescription.class); + converted = + getObjectMapper().convertValue(input, InstanceLoadBalancerRegistrationDescription.class); } else { - converted = getObjectMapper().convertValue(input, InstanceTargetGroupRegistrationDescription.class); + converted = + getObjectMapper().convertValue(input, InstanceTargetGroupRegistrationDescription.class); } - converted.setCredentials(getCredentialsObject((String)input.get("credentials"))); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); return converted; } } diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ExecuteCloudFormationChangeSetAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ExecuteCloudFormationChangeSetAtomicOperationConverter.java new file mode 100644 index 00000000000..31efcc6c8d5 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ExecuteCloudFormationChangeSetAtomicOperationConverter.java @@ -0,0 +1,43 @@ +/* + * Copyright 2019 Adevinta + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.converters; + +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ExecuteCloudFormationChangeSetDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.ExecuteCloudFormationChangeSetAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.EXECUTE_CLOUDFORMATION_CHANGESET) +@Component("executeCloudFormationChangeSetDescription") +public class ExecuteCloudFormationChangeSetAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new ExecuteCloudFormationChangeSetAtomicOperation(convertDescription(input)); + } + + @Override + public ExecuteCloudFormationChangeSetDescription convertDescription(Map input) { + ExecuteCloudFormationChangeSetDescription converted = + getObjectMapper().convertValue(input, ExecuteCloudFormationChangeSetDescription.class); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); + return converted; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ModifyServerGroupLaunchTemplateAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ModifyServerGroupLaunchTemplateAtomicOperationConverter.java new file mode 100644 index 00000000000..2fda86ca3a9 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ModifyServerGroupLaunchTemplateAtomicOperationConverter.java @@ -0,0 +1,45 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.converters; + +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.ModifyServerGroupLaunchTemplateAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.UPDATE_LAUNCH_TEMPLATE) +@Component("modifyServerGroupLaunchTemplateDescription") +public class ModifyServerGroupLaunchTemplateAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new ModifyServerGroupLaunchTemplateAtomicOperation(convertDescription(input)); + } + + @Override + public ModifyServerGroupLaunchTemplateDescription convertDescription(Map input) { + final ModifyServerGroupLaunchTemplateDescription converted = + getObjectMapper().convertValue(input, ModifyServerGroupLaunchTemplateDescription.class); + + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + return converted; + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RegisterInstancesWithLoadBalancerAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RegisterInstancesWithLoadBalancerAtomicOperationConverter.java similarity index 80% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RegisterInstancesWithLoadBalancerAtomicOperationConverter.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RegisterInstancesWithLoadBalancerAtomicOperationConverter.java index e42c1651d7a..daa715b67fe 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RegisterInstancesWithLoadBalancerAtomicOperationConverter.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/RegisterInstancesWithLoadBalancerAtomicOperationConverter.java @@ -18,22 +18,24 @@ import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractRegionAsgInstanceIdsDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceLoadBalancerRegistrationDescription; import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceTargetGroupRegistrationDescription; import com.netflix.spinnaker.clouddriver.aws.deploy.ops.RegisterInstancesWithLoadBalancerAtomicOperation; import com.netflix.spinnaker.clouddriver.aws.deploy.ops.RegisterInstancesWithTargetGroupAtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceLoadBalancerRegistrationDescription; -import org.springframework.stereotype.Component; - +import java.util.List; import java.util.Map; +import org.springframework.stereotype.Component; @AmazonOperation(AtomicOperations.REGISTER_INSTANCES_WITH_LOAD_BALANCER) @Component("registerInstancesFromLoadBalancerDescription") -class RegisterInstancesWithLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - private Boolean isClassic (Map input) { - return !input.containsKey("targetGroupNames"); +class RegisterInstancesWithLoadBalancerAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + private Boolean isClassic(Map input) { + return !input.containsKey("targetGroupNames") + || getObjectMapper().convertValue(input.get("targetGroupNames"), List.class).isEmpty(); } @Override @@ -48,9 +50,11 @@ public AtomicOperation convertOperation(Map input) { public AbstractRegionAsgInstanceIdsDescription convertDescription(Map input) { AbstractRegionAsgInstanceIdsDescription converted; if (isClassic(input)) { - converted = getObjectMapper().convertValue(input, InstanceLoadBalancerRegistrationDescription.class); + converted = + getObjectMapper().convertValue(input, InstanceLoadBalancerRegistrationDescription.class); } else { - converted = getObjectMapper().convertValue(input, InstanceTargetGroupRegistrationDescription.class); + converted = + getObjectMapper().convertValue(input, InstanceTargetGroupRegistrationDescription.class); } converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/UpsertAmazonLoadBalancerAtomicOperationConverter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/UpsertAmazonLoadBalancerAtomicOperationConverter.java similarity index 82% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/UpsertAmazonLoadBalancerAtomicOperationConverter.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/UpsertAmazonLoadBalancerAtomicOperationConverter.java index 1d32093a264..59b8de747fd 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/UpsertAmazonLoadBalancerAtomicOperationConverter.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/converters/UpsertAmazonLoadBalancerAtomicOperationConverter.java @@ -18,23 +18,22 @@ import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerClassicDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerDescription; import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerV2Description; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.UpsertAmazonLoadBalancerAtomicOperation; import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.UpsertAmazonLoadBalancerV2AtomicOperation; -import com.netflix.spinnaker.clouddriver.aws.model.AmazonLoadBalancer; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonLoadBalancerType; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerDescription; -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.UpsertAmazonLoadBalancerAtomicOperation; -import com.netflix.spinnaker.clouddriver.aws.model.AmazonLoadBalancerType; -import org.springframework.stereotype.Component; - import java.util.HashMap; import java.util.Map; +import org.springframework.stereotype.Component; @AmazonOperation(AtomicOperations.UPSERT_LOAD_BALANCER) @Component("upsertAmazonLoadBalancerDescription") -class UpsertAmazonLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class UpsertAmazonLoadBalancerAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { private void sanitizeInput(Map input) { // default to classic load balancer if no type specified if (!input.containsKey("loadBalancerType")) { @@ -60,15 +59,20 @@ public UpsertAmazonLoadBalancerDescription convertDescription(Map input) { Map description = new HashMap<>(); description.putAll(input); - description.put("loadBalancerType", AmazonLoadBalancerType.getByValue((String)description.get("loadBalancerType"))); + description.put( + "loadBalancerType", + AmazonLoadBalancerType.getByValue((String) description.get("loadBalancerType"))); if (description.get("loadBalancerType") == AmazonLoadBalancerType.CLASSIC) { - converted = getObjectMapper().convertValue(description, UpsertAmazonLoadBalancerClassicDescription.class); + converted = + getObjectMapper() + .convertValue(description, UpsertAmazonLoadBalancerClassicDescription.class); } else { - converted = getObjectMapper().convertValue(description, UpsertAmazonLoadBalancerV2Description.class); + converted = + getObjectMapper().convertValue(description, UpsertAmazonLoadBalancerV2Description.class); } - converted.setCredentials(getCredentialsObject((String)input.get("credentials"))); + converted.setCredentials(getCredentialsObject((String) input.get("credentials"))); return converted; } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonImageDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonImageDescription.java similarity index 92% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonImageDescription.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonImageDescription.java index cdbe0c81337..2eeb494a578 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonImageDescription.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonImageDescription.java @@ -35,4 +35,9 @@ public String getRegion() { public void setRegion(String region) { this.region = region; } + + @Override + public boolean requiresApplicationRestriction() { + return false; + } } diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLaunchConfigurationDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLaunchConfigurationDescription.java new file mode 100644 index 00000000000..f6c9f443db4 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLaunchConfigurationDescription.java @@ -0,0 +1,31 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +import lombok.Data; + +@Data +public class DeleteAmazonLaunchConfigurationDescription + extends AbstractAmazonCredentialsDescription { + private String region; + private String launchConfigurationName; + + @Override + public boolean requiresApplicationRestriction() { + return false; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLaunchTemplateDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLaunchTemplateDescription.java new file mode 100644 index 00000000000..45b3949a1b8 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonLaunchTemplateDescription.java @@ -0,0 +1,43 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +public class DeleteAmazonLaunchTemplateDescription extends AbstractAmazonCredentialsDescription { + private String launchTemplateId; + private String region; + + public String getLaunchTemplateId() { + return launchTemplateId; + } + + public void setLaunchTemplateId(String launchTemplateId) { + this.launchTemplateId = launchTemplateId; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + @Override + public boolean requiresApplicationRestriction() { + return false; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonSnapshotDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonSnapshotDescription.java new file mode 100644 index 00000000000..1bdb18f95bb --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteAmazonSnapshotDescription.java @@ -0,0 +1,45 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +public class DeleteAmazonSnapshotDescription extends AbstractAmazonCredentialsDescription { + private String snapshotId; + private String region; + + public String getSnapshotId() { + return snapshotId; + } + + public void setSnapshotId(String snapshotId) { + this.snapshotId = snapshotId; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + @Override + public boolean requiresApplicationRestriction() { + return false; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteCloudFormationChangeSetDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteCloudFormationChangeSetDescription.java new file mode 100644 index 00000000000..b28595ce2e5 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteCloudFormationChangeSetDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2019 Adevinta + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = false) +@Data +public class DeleteCloudFormationChangeSetDescription extends AbstractAmazonCredentialsDescription { + + private String stackName; + private String changeSetName; + private String region; +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteCloudFormationDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteCloudFormationDescription.java new file mode 100644 index 00000000000..27f7d52333c --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeleteCloudFormationDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2021 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = false) +@Data +public class DeleteCloudFormationDescription extends AbstractAmazonCredentialsDescription { + + private String stackName; + private String roleARN; + private String region; +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeployCloudFormationDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeployCloudFormationDescription.java new file mode 100644 index 00000000000..145e5943648 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/DeployCloudFormationDescription.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = false) +@Data +public class DeployCloudFormationDescription extends AbstractAmazonCredentialsDescription { + + private String stackName; + private String templateBody; + private String templateURL; + private String roleARN; + private Map parameters = new HashMap<>(); + private Map tags = new HashMap<>(); + private String region; + private List capabilities = new ArrayList<>(); + private List notificationARNs = new ArrayList<>(); + + @JsonProperty("isChangeSet") + private boolean isChangeSet; + + private String changeSetName; +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/ExecuteCloudFormationChangeSetDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/ExecuteCloudFormationChangeSetDescription.java new file mode 100644 index 00000000000..eecbb10551b --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/ExecuteCloudFormationChangeSetDescription.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Adevinta. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = false) +@Data +public class ExecuteCloudFormationChangeSetDescription + extends AbstractAmazonCredentialsDescription { + + private String stackName; + private String changeSetName; + private String region; +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyServerGroupLaunchTemplateDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyServerGroupLaunchTemplateDescription.java new file mode 100644 index 00000000000..1b527fa788d --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/ModifyServerGroupLaunchTemplateDescription.java @@ -0,0 +1,204 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +import com.amazonaws.util.CollectionUtils; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride; +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import lombok.Getter; +import lombok.Setter; +import org.apache.commons.lang3.StringUtils; + +/** + * Description type that encapsulates properties associated with 1. EC2 launch template + * (https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/ec2/model/ResponseLaunchTemplateData.html) + * 2. Launch template overrides + * (https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/autoscaling/model/LaunchTemplate.html) + * 3. Instances distribution + * (https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/autoscaling/model/InstancesDistribution.html) + * + *

Used to modify properties associated with the AWS entities listed above. Applicable to AWS + * AutoScalingGroups backed by EC2 launch template with / without mixed instances policy + * (https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/autoscaling/model/MixedInstancesPolicy.html) + */ +@Getter +@Setter +public class ModifyServerGroupLaunchTemplateDescription extends AbstractAmazonCredentialsDescription + implements ServerGroupsNameable { + String region; + String asgName; + String amiName; + String instanceType; + String subnetType; + String iamRole; + String keyPair; + Boolean associatePublicIpAddress; + String spotPrice; + String ramdiskId; + Boolean instanceMonitoring; + Boolean ebsOptimized; + String classicLinkVpcId; + List classicLinkVpcSecurityGroups; + Boolean legacyUdf; + String base64UserData; + UserDataOverride userDataOverride = new UserDataOverride(); + List blockDevices; + List securityGroups; + Boolean securityGroupsAppendOnly; + + /** + * If false, the newly created server group will not pick up block device mapping customizations + * from an ancestor group + */ + boolean copySourceCustomBlockDeviceMappings = true; + + // Launch Template only fields + private Boolean requireIMDV2; + private String kernelId; + private String imageId; + private Boolean associateIPv6Address; + private Boolean unlimitedCpuCredits; + private Boolean enableEnclave; + + /** + * Mixed Instances Policy properties. + * + *

Why are these properties here instead of {@link + * com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyAsgDescription}?: Although most + * of these properties are associated with the server group itself rather than it's launch + * template, these properties are closely associated with and modify certain launch template + * properties e.g. LaunchTemplateOverridesForInstanceType, spotPrice. + */ + private String onDemandAllocationStrategy; + + private Integer onDemandBaseCapacity; + private Integer onDemandPercentageAboveBaseCapacity; + private String spotAllocationStrategy; + private Integer spotInstancePools; + private List + launchTemplateOverridesForInstanceType; + + @Override + @JsonIgnore + public Collection getServerGroupNames() { + return Collections.singletonList(asgName); + } + + public static Set getMetadataFieldNames() { + return ImmutableSet.of( + // read-only fields: serverGroupNames, applications + "account", + "region", + "asgName", + "credentials", + "securityGroupsAppendOnly", + "copySourceCustomBlockDeviceMappings"); + } + + public static Set getMixedInstancesPolicyOnlyFieldNames() { + return ImmutableSet.of( + "onDemandAllocationStrategy", + "onDemandBaseCapacity", + "onDemandPercentageAboveBaseCapacity", + "spotAllocationStrategy", + "spotInstancePools", + "launchTemplateOverridesForInstanceType"); + } + + public static Set getMixedInstancesPolicyFieldNames() { + return ImmutableSet.of( + "onDemandAllocationStrategy", + "onDemandBaseCapacity", + "onDemandPercentageAboveBaseCapacity", + "spotAllocationStrategy", + "spotInstancePools", + "spotPrice", // spotMaxPrice + "launchTemplateOverridesForInstanceType"); + } + + /** + * Get all instance types in description. + * + *

Why does this method exist? When launchTemplateOverrides are specified, either the overrides + * or instanceType is used, but all instance type inputs are returned by this method. When is this + * method used? Used primarily for validation purposes, to ensure all instance types in request + * are compatible with other validated configuration parameters (to prevent ambiguity). + * + * @return all instance type(s) + */ + public Set getAllInstanceTypes() { + Set instanceTypes = new HashSet(); + if (StringUtils.isNotBlank(this.getInstanceType())) { + instanceTypes.add(this.getInstanceType()); + } + if (!CollectionUtils.isNullOrEmpty(launchTemplateOverridesForInstanceType)) { + launchTemplateOverridesForInstanceType.forEach( + override -> instanceTypes.add(override.getInstanceType())); + } + return instanceTypes; + } + + @Override + public String toString() { + return new StringBuilder("ModifyServerGroupLaunchTemplateDescription{") + .append("region=" + region) + .append(", asgName=" + asgName) + .append(", amiName=" + amiName) + .append(", instanceType=" + instanceType) + .append(", subnetType=" + subnetType) + .append(", iamRole=" + iamRole) + .append(", keyPair=" + keyPair) + .append(", associatePublicIpAddress=" + associatePublicIpAddress) + .append(", spotPrice=" + spotPrice) + .append(", ramdiskId=" + ramdiskId) + .append(", instanceMonitoring=" + instanceMonitoring) + .append(", ebsOptimized=" + ebsOptimized) + .append(", classicLinkVpcId=" + classicLinkVpcId) + .append(", classicLinkVpcSecurityGroups=" + classicLinkVpcSecurityGroups) + .append(", legacyUdf=" + legacyUdf) + .append(", base64UserData=" + base64UserData) + .append(", userDataOverride=" + userDataOverride) + .append(", blockDevices=" + blockDevices) + .append(", securityGroups=" + securityGroups) + .append(", securityGroupsAppendOnly=" + securityGroupsAppendOnly) + .append(", copySourceCustomBlockDeviceMappings=" + copySourceCustomBlockDeviceMappings) + .append(", requireIMDV2=" + requireIMDV2) + .append(", kernelId=" + kernelId) + .append(", imageId=" + imageId) + .append(", associateIPv6Address=" + associateIPv6Address) + .append(", unlimitedCpuCredits=" + unlimitedCpuCredits) + .append(", onDemandAllocationStrategy=" + onDemandAllocationStrategy) + .append(", onDemandBaseCapacity=" + onDemandBaseCapacity) + .append(", onDemandPercentageAboveBaseCapacity=" + onDemandPercentageAboveBaseCapacity) + .append(", spotAllocationStrategy=" + spotAllocationStrategy) + .append(", spotInstancePools=" + spotInstancePools) + .append( + ", launchTemplateOverridesForInstanceType=" + launchTemplateOverridesForInstanceType) + .append("}") + .toString() + .replaceAll(",\\s[a-zA-Z0-9]+=null", ""); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerClassicDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerClassicDescription.java similarity index 92% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerClassicDescription.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerClassicDescription.java index cc2d4608fa8..d62dc7d13a2 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerClassicDescription.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerClassicDescription.java @@ -18,7 +18,8 @@ import java.util.List; -public class UpsertAmazonLoadBalancerClassicDescription extends UpsertAmazonLoadBalancerDescription { +public class UpsertAmazonLoadBalancerClassicDescription + extends UpsertAmazonLoadBalancerDescription { private List listeners; private String healthCheck; private Integer healthCheckPort; @@ -122,7 +123,10 @@ public void setDeregistrationDelay(Integer deregistrationDelay) { public static class Listener { public enum ListenerType { - HTTP, HTTPS, TCP, SSL + HTTP, + HTTPS, + TCP, + SSL } private ListenerType externalProtocol; @@ -132,7 +136,7 @@ public enum ListenerType { private Integer internalPort; private String sslCertificateId; - + private List policyNames; public ListenerType getExternalProtocol() { return externalProtocol; @@ -173,5 +177,13 @@ public String getSslCertificateId() { public void setSslCertificateId(String sslCertificateId) { this.sslCertificateId = sslCertificateId; } + + public List getPolicyNames() { + return policyNames; + } + + public void setPolicyNames(List policyNames) { + this.policyNames = policyNames; + } } } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerDescription.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerDescription.java similarity index 84% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerDescription.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerDescription.java index 844804daadd..8f19979db93 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerDescription.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerDescription.java @@ -17,11 +17,14 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.description; import com.netflix.spinnaker.clouddriver.aws.model.AmazonLoadBalancerType; - +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; -public class UpsertAmazonLoadBalancerDescription extends AbstractAmazonCredentialsDescription { +public class UpsertAmazonLoadBalancerDescription extends AbstractAmazonCredentialsDescription + implements ResourcesNameable { private AmazonLoadBalancerType loadBalancerType = AmazonLoadBalancerType.CLASSIC; private String clusterName; @@ -29,6 +32,7 @@ public class UpsertAmazonLoadBalancerDescription extends AbstractAmazonCredentia private String vpcId; private Boolean isInternal; private String subnetType; + private Integer idleTimeout = 60; private List securityGroups; private Map> availabilityZones; @@ -106,4 +110,17 @@ public boolean getShieldProtectionEnabled() { public void setShieldProtectionEnabled(boolean shieldProtectionEnabled) { this.shieldProtectionEnabled = shieldProtectionEnabled; } + + public Integer getIdleTimeout() { + return idleTimeout; + } + + public void setIdleTimeout(Integer idleTimeout) { + this.idleTimeout = idleTimeout; + } + + @Override + public Collection getNames() { + return Collections.singletonList(name); + } } diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerV2Description.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerV2Description.java new file mode 100644 index 00000000000..908df1b7122 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/description/UpsertAmazonLoadBalancerV2Description.java @@ -0,0 +1,452 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.description; + +import com.amazonaws.services.elasticloadbalancingv2.model.*; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class UpsertAmazonLoadBalancerV2Description extends UpsertAmazonLoadBalancerDescription { + public List listeners = new ArrayList<>(); + public List targetGroups = new ArrayList<>(); + public Boolean deletionProtection = false; + public Boolean loadBalancingCrossZone; + public String ipAddressType = "ipv4"; + + public static class TargetGroup { + private String name; + private ProtocolEnum protocol; + private Integer port; + private Attributes attributes; // TODO: Support target group attributes + private String targetType = "instance"; + + private ProtocolEnum healthCheckProtocol; + private String healthCheckPath; + private String healthCheckPort; + private Integer healthCheckInterval = 10; + private Integer healthCheckTimeout = 5; + private Integer unhealthyThreshold = 2; + private Integer healthyThreshold = 10; + private String healthCheckMatcher = + "200-299"; // string of ranges or individual http status codes, separated by commas + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public ProtocolEnum getProtocol() { + return protocol; + } + + public void setProtocol(ProtocolEnum protocol) { + this.protocol = protocol; + } + + public Integer getPort() { + return port; + } + + public void setPort(Integer port) { + this.port = port; + } + + public Attributes getAttributes() { + return attributes; + } + + public void setAttributes(Attributes attributes) { + this.attributes = attributes; + } + + public String getTargetType() { + return targetType; + } + + public void setTargetType(String targetType) { + this.targetType = targetType; + } + + public ProtocolEnum getHealthCheckProtocol() { + return healthCheckProtocol; + } + + public void setHealthCheckProtocol(ProtocolEnum healthCheckProtocol) { + this.healthCheckProtocol = healthCheckProtocol; + } + + public String getHealthCheckPath() { + return healthCheckPath; + } + + public void setHealthCheckPath(String healthCheckPath) { + this.healthCheckPath = healthCheckPath; + } + + public String getHealthCheckPort() { + return healthCheckPort; + } + + public void setHealthCheckPort(String healthCheckPort) { + this.healthCheckPort = healthCheckPort; + } + + public Integer getHealthCheckInterval() { + return healthCheckInterval; + } + + public void setHealthCheckInterval(Integer healthCheckInterval) { + this.healthCheckInterval = healthCheckInterval; + } + + public Integer getHealthCheckTimeout() { + return healthCheckTimeout; + } + + public void setHealthCheckTimeout(Integer healthCheckTimeout) { + this.healthCheckTimeout = healthCheckTimeout; + } + + public Integer getUnhealthyThreshold() { + return unhealthyThreshold; + } + + public void setUnhealthyThreshold(Integer unhealthyThreshold) { + this.unhealthyThreshold = unhealthyThreshold; + } + + public Integer getHealthyThreshold() { + return healthyThreshold; + } + + public void setHealthyThreshold(Integer healthyThreshold) { + this.healthyThreshold = healthyThreshold; + } + + public String getHealthCheckMatcher() { + return healthCheckMatcher; + } + + public void setHealthCheckMatcher(String healthCheckMatcher) { + this.healthCheckMatcher = healthCheckMatcher; + } + + public Boolean compare( + com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup awsTargetGroup) { + return this.name.equals(awsTargetGroup.getTargetGroupName()) + && this.protocol.toString().equals(awsTargetGroup.getProtocol()) + && this.port.equals(awsTargetGroup.getPort()) + && this.healthCheckProtocol.toString().equals(awsTargetGroup.getHealthCheckProtocol()) + && this.healthCheckPath.equals(awsTargetGroup.getHealthCheckPath()) + && this.healthCheckPort.equals(awsTargetGroup.getHealthCheckPort()) + && this.healthCheckInterval.equals(awsTargetGroup.getHealthCheckIntervalSeconds()) + && this.healthCheckTimeout.equals(awsTargetGroup.getHealthCheckTimeoutSeconds()) + && this.healthyThreshold.equals(awsTargetGroup.getHealthyThresholdCount()) + && this.unhealthyThreshold.equals(awsTargetGroup.getUnhealthyThresholdCount()) + && this.healthCheckMatcher.equals(awsTargetGroup.getMatcher().getHttpCode()); + } + } + + public static class Listener { + private List certificates; + private ProtocolEnum protocol; + private Integer port; + private String sslPolicy; + private List defaultActions; + private List rules = new ArrayList<>(); + + public List getCertificates() { + return certificates; + } + + public void setCertificates(List certificates) { + this.certificates = certificates; + } + + public ProtocolEnum getProtocol() { + return protocol; + } + + public void setProtocol(ProtocolEnum protocol) { + this.protocol = protocol; + } + + public Integer getPort() { + return port; + } + + public void setPort(Integer port) { + this.port = port; + } + + public String getSslPolicy() { + return sslPolicy; + } + + public void setSslPolicy(String sslPolicy) { + this.sslPolicy = sslPolicy; + } + + public List getDefaultActions() { + return defaultActions; + } + + public void setDefaultActions(List defaultActions) { + this.defaultActions = defaultActions; + } + + public List getRules() { + return rules; + } + + public void setRules(List rules) { + this.rules = rules; + } + + public Boolean compare( + com.amazonaws.services.elasticloadbalancingv2.model.Listener awsListener, + List actions, + List existingRules, + List rules) { + if (existingRules == null) { + existingRules = new ArrayList<>(); + } + if (rules == null) { + rules = new ArrayList<>(); + } + + int awsCertificateCount = + awsListener.getCertificates() != null ? awsListener.getCertificates().size() : 0; + int certificateCount = certificates != null ? certificates.size() : 0; + Boolean certificatesSame = awsCertificateCount == certificateCount; + if (certificatesSame) { + Set awsListenerArns = new HashSet<>(); + Set thisListenerArns = new HashSet<>(); + if (awsListener.getCertificates() != null) { + awsListener + .getCertificates() + .forEach(cert -> awsListenerArns.add(cert.getCertificateArn())); + } + if (certificates != null) { + certificates.forEach(cert -> thisListenerArns.add(cert.getCertificateArn())); + } + certificatesSame = awsListenerArns.equals(thisListenerArns); + } + + Boolean rulesSame = + existingRules.size() + == rules.size() + 1; // existing rules has the default rule, rules does not + if (rulesSame) { + for (com.amazonaws.services.elasticloadbalancingv2.model.Rule existingRule : + existingRules) { + boolean match = true; + if (!existingRule.isDefault()) { + match = false; + for (com.amazonaws.services.elasticloadbalancingv2.model.Rule rule : rules) { + if (existingRule.getActions().equals(rule.getActions()) + && existingRule.getConditions().equals(rule.getConditions()) + && existingRule.getPriority().equals(rule.getPriority())) { + match = true; + break; + } + } + } + rulesSame = match; + if (!rulesSame) { + break; + } + } + } + + Boolean actionsSame = + awsListener.getDefaultActions().containsAll(actions) + && actions.containsAll(awsListener.getDefaultActions()); + + return (this.protocol != null && this.protocol.toString().equals(awsListener.getProtocol())) + && (this.port != null && this.port.equals(awsListener.getPort())) + && actionsSame + && rulesSame + && certificatesSame; + } + } + + public static class Action { + private String type = ActionTypeEnum.Forward.toString(); + private String targetGroupName; + private AuthenticateOidcActionConfig authenticateOidcActionConfig; + + private RedirectActionConfig redirectActionConfig; + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getTargetGroupName() { + return targetGroupName; + } + + public void setTargetGroupName(String targetGroupName) { + this.targetGroupName = targetGroupName; + } + + public AuthenticateOidcActionConfig getAuthenticateOidcActionConfig() { + return authenticateOidcActionConfig; + } + + public void setAuthenticateOidcActionConfig( + AuthenticateOidcActionConfig authenticateOidcActionConfig) { + this.authenticateOidcActionConfig = authenticateOidcActionConfig; + } + + public RedirectActionConfig getRedirectActionConfig() { + return redirectActionConfig; + } + + public void setRedirectActionConfig(RedirectActionConfig redirectActionConfig) { + this.redirectActionConfig = redirectActionConfig; + } + } + + public static class Attributes { + private Integer deregistrationDelay; + private Boolean stickinessEnabled; + private String stickinessType; + private Integer stickinessDuration; + private Boolean proxyProtocolV2; + private Boolean deregistrationDelayConnectionTermination; + + /** The following attribute is supported only if the target is a Lambda function. */ + private Boolean multiValueHeadersEnabled; + + public Integer getDeregistrationDelay() { + return deregistrationDelay; + } + + public void setDeregistrationDelay(Integer deregistrationDelay) { + this.deregistrationDelay = deregistrationDelay; + } + + public Boolean getStickinessEnabled() { + return stickinessEnabled; + } + + public void setStickinessEnabled(Boolean stickinessEnabled) { + this.stickinessEnabled = stickinessEnabled; + } + + public String getStickinessType() { + return stickinessType; + } + + public void setStickinessType(String stickinessType) { + this.stickinessType = stickinessType; + } + + public Integer getStickinessDuration() { + return stickinessDuration; + } + + public void setStickinessDuration(Integer stickinessDuration) { + this.stickinessDuration = stickinessDuration; + } + + public Boolean getProxyProtocolV2() { + return proxyProtocolV2; + } + + public void setProxyProtocolV2(Boolean proxyProtocolV2) { + this.proxyProtocolV2 = proxyProtocolV2; + } + + public Boolean getMultiValueHeadersEnabled() { + return multiValueHeadersEnabled; + } + + public void setMultiValueHeadersEnabled(Boolean multiValueHeadersEnabled) { + this.multiValueHeadersEnabled = multiValueHeadersEnabled; + } + + public Boolean getDeregistrationDelayConnectionTermination() { + return deregistrationDelayConnectionTermination; + } + + public void setDeregistrationDelayConnectionTermination( + Boolean deregistrationDelayConnectionTermination) { + this.deregistrationDelayConnectionTermination = deregistrationDelayConnectionTermination; + } + } + + public static class RuleCondition { + private String field; + private List values; + + public String getField() { + return field; + } + + public void setField(String field) { + this.field = field; + } + + public List getValues() { + return values; + } + + public void setValues(List values) { + this.values = values; + } + } + + public static class Rule { + private String priority; + private List actions; + private List conditions; + + public String getPriority() { + return priority; + } + + public void setPriority(String priority) { + this.priority = priority; + } + + public List getActions() { + return actions; + } + + public void setActions(List actions) { + this.actions = actions; + } + + public List getConditions() { + return conditions; + } + + public void setConditions(List conditions) { + this.conditions = conditions; + } + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonImageAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonImageAtomicOperation.java similarity index 75% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonImageAtomicOperation.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonImageAtomicOperation.java index 5fd940d4d37..3ff905c0b81 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonImageAtomicOperation.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonImageAtomicOperation.java @@ -22,9 +22,8 @@ import com.netflix.spinnaker.clouddriver.data.task.Task; import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import org.springframework.beans.factory.annotation.Autowired; - import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; public class DeleteAmazonImageAtomicOperation implements AtomicOperation { private static final String BASE_PHASE = "DELETE_IMAGE"; @@ -34,22 +33,27 @@ private static Task getTask() { } private final DeleteAmazonImageDescription description; + public DeleteAmazonImageAtomicOperation(DeleteAmazonImageDescription description) { this.description = description; } - @Autowired - private AmazonClientProvider amazonClientProvider; + @Autowired private AmazonClientProvider amazonClientProvider; @Override public Void operate(List priorOutputs) { - getTask().updateStatus(BASE_PHASE, String.format("Initializing Delete Image operation for %s", description)); + getTask() + .updateStatus( + BASE_PHASE, String.format("Initializing Delete Image operation for %s", description)); amazonClientProvider - .getAmazonEC2(description.getCredentials(), description.getRegion()) - .deregisterImage(new DeregisterImageRequest().withImageId(description.getImageId())); - - getTask().updateStatus(BASE_PHASE, String.format("Deleted Image %s in %s", - description.getImageId(), description.getRegion())); + .getAmazonEC2(description.getCredentials(), description.getRegion()) + .deregisterImage(new DeregisterImageRequest().withImageId(description.getImageId())); + + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Deleted Image %s in %s", description.getImageId(), description.getRegion())); return null; } } diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonLaunchConfigurationAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonLaunchConfigurationAtomicOperation.java new file mode 100644 index 00000000000..faeabc47a4e --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonLaunchConfigurationAtomicOperation.java @@ -0,0 +1,90 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.AmazonAutoScalingException; +import com.amazonaws.services.autoscaling.model.DeleteLaunchConfigurationRequest; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonLaunchConfigurationDescription; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.core.RetrySupport; +import com.netflix.spinnaker.kork.exceptions.IntegrationException; +import java.time.Duration; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; + +public class DeleteAmazonLaunchConfigurationAtomicOperation implements AtomicOperation { + private static final String BASE_PHASE = "DELETE_LAUNCH_CONFIGURATION"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Autowired private AmazonClientProvider amazonClientProvider; + @Autowired private RetrySupport retrySupport; + + private final DeleteAmazonLaunchConfigurationDescription description; + + public DeleteAmazonLaunchConfigurationAtomicOperation( + DeleteAmazonLaunchConfigurationDescription description) { + this.description = description; + } + + @Override + public Void operate(List priorOutputs) { + final String region = description.getRegion(); + final NetflixAmazonCredentials credentials = description.getCredentials(); + final String launchConfigurationName = description.getLaunchConfigurationName(); + + final AmazonAutoScaling autoScaling = + amazonClientProvider.getAutoScaling(credentials, region, true); + getTask() + .updateStatus( + BASE_PHASE, "Deleting launch config " + launchConfigurationName + " in " + region); + + retrySupport.retry( + () -> deleteLaunchConfiguration(launchConfigurationName, autoScaling), + 5, + Duration.ofSeconds(1), + true); + getTask() + .updateStatus( + BASE_PHASE, + "Finished Delete Launch Config operation for " + launchConfigurationName + ""); + return null; + } + + private Boolean deleteLaunchConfiguration( + String launchConfigurationName, AmazonAutoScaling autoScaling) { + try { + autoScaling.deleteLaunchConfiguration( + new DeleteLaunchConfigurationRequest() + .withLaunchConfigurationName(launchConfigurationName)); + return true; + } catch (AmazonAutoScalingException e) { + if (!e.getMessage().toLowerCase().contains("launch configuration name not found")) { + throw new IntegrationException(e).setRetryable(true); + } + } + + return false; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonSnapshotAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonSnapshotAtomicOperation.java new file mode 100644 index 00000000000..3d10e1b4333 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonSnapshotAtomicOperation.java @@ -0,0 +1,103 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops; + +import com.amazonaws.services.ec2.model.AmazonEC2Exception; +import com.amazonaws.services.ec2.model.DeleteSnapshotRequest; +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonSnapshotDescription; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; + +public class DeleteAmazonSnapshotAtomicOperation implements AtomicOperation { + private static final String BASE_PHASE = "DELETE_SNAPSHOT"; + private final Logger log = LoggerFactory.getLogger(getClass()); + + private final DeleteAmazonSnapshotDescription description; + private final Registry registry; + private final Id deleteSnapshotTaskId; + + public DeleteAmazonSnapshotAtomicOperation( + DeleteAmazonSnapshotDescription description, Registry registry) { + this.description = description; + this.registry = registry; + this.deleteSnapshotTaskId = registry.createId("tasks.DeleteAmazonSnapshot"); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Autowired private AmazonClientProvider amazonClientProvider; + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + BASE_PHASE, + String.format("Initializing Delete Snapshot operation for %s", description)); + try { + amazonClientProvider + .getAmazonEC2(description.getCredentials(), description.getRegion()) + .deleteSnapshot(new DeleteSnapshotRequest().withSnapshotId(description.getSnapshotId())); + } catch (AmazonEC2Exception e) { + if (e.getStatusCode() == 400 + && e.getErrorCode().equalsIgnoreCase("InvalidSnapshot.NotFound")) { + log.debug("Snapshot does not exist, ignoring."); + } else { + throw e; + } + } catch (Exception e) { + registry + .counter( + deleteSnapshotTaskId + .withTag("success", false) + .withTag("region", description.getRegion()) + .withTag("account", description.getAccount())) + .increment(); + log.error( + String.format( + "Failed to delete snapshotId: %s , region: %s , account: %s", + description.getSnapshotId(), description.getRegion(), description.getAccount()), + e); + throw e; + } + registry + .counter( + deleteSnapshotTaskId + .withTag("success", true) + .withTag("region", description.getRegion()) + .withTag("account", description.getAccount())) + .increment(); + + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Deleted Snapshot %s in %s", description.getSnapshotId(), description.getRegion())); + return null; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationAtomicOperation.java new file mode 100644 index 00000000000..168b23c7662 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationAtomicOperation.java @@ -0,0 +1,76 @@ +/* + * Copyright 2021 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.ops; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.cloudformation.AmazonCloudFormation; +import com.amazonaws.services.cloudformation.model.*; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteCloudFormationDescription; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.util.StringUtils; + +@Slf4j +public class DeleteCloudFormationAtomicOperation implements AtomicOperation { + + private static final String BASE_PHASE = "DELETE_CLOUDFORMATION_STACK"; + + @Autowired AmazonClientProvider amazonClientProvider; + + @Autowired + @Qualifier("amazonObjectMapper") + private ObjectMapper objectMapper; + + private DeleteCloudFormationDescription description; + + public DeleteCloudFormationAtomicOperation( + DeleteCloudFormationDescription deleteCloudFormationDescription) { + this.description = deleteCloudFormationDescription; + } + + @Override + public Map operate(List priorOutputs) { + Task task = TaskRepository.threadLocalTask.get(); + AmazonCloudFormation amazonCloudFormation = + amazonClientProvider.getAmazonCloudFormation( + description.getCredentials(), description.getRegion()); + + DeleteStackRequest deleteStackRequest = + new DeleteStackRequest().withStackName(description.getStackName()); + + if (StringUtils.hasText(description.getRoleARN())) { + deleteStackRequest.setRoleARN(description.getRoleARN()); + } + + try { + task.updateStatus(BASE_PHASE, "Deleting CloudFormation Stack"); + amazonCloudFormation.deleteStack(deleteStackRequest); + } catch (AmazonServiceException e) { + log.error("Error deleting stack {}", description.getStackName(), e); + throw e; + } + return Collections.emptyMap(); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationChangeSetAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationChangeSetAtomicOperation.java new file mode 100644 index 00000000000..5e721c5ae31 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationChangeSetAtomicOperation.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2019 Adevinta + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.ops; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.cloudformation.AmazonCloudFormation; +import com.amazonaws.services.cloudformation.model.*; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteCloudFormationChangeSetDescription; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; + +@Slf4j +public class DeleteCloudFormationChangeSetAtomicOperation implements AtomicOperation { + + private static final String BASE_PHASE = "DELETE_CLOUDFORMATION_CHANGESET"; + + @Autowired AmazonClientProvider amazonClientProvider; + + private DeleteCloudFormationChangeSetDescription description; + + public DeleteCloudFormationChangeSetAtomicOperation( + DeleteCloudFormationChangeSetDescription deployCloudFormationDescription) { + this.description = deployCloudFormationDescription; + } + + @Override + public Map operate(List priorOutputs) { + Task task = TaskRepository.threadLocalTask.get(); + AmazonCloudFormation amazonCloudFormation = + amazonClientProvider.getAmazonCloudFormation( + description.getCredentials(), description.getRegion()); + + DeleteChangeSetRequest deleteChangeSetRequest = + new DeleteChangeSetRequest() + .withStackName(description.getStackName()) + .withChangeSetName(description.getChangeSetName()); + try { + task.updateStatus(BASE_PHASE, "Deleting CloudFormation ChangeSet"); + amazonCloudFormation.deleteChangeSet(deleteChangeSetRequest); + } catch (AmazonServiceException e) { + log.error( + "Error removing change set " + + description.getChangeSetName() + + " on stack " + + description.getStackName(), + e); + throw e; + } + return Collections.emptyMap(); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteLaunchTemplateAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteLaunchTemplateAtomicOperation.java new file mode 100644 index 00000000000..728afba92b2 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteLaunchTemplateAtomicOperation.java @@ -0,0 +1,84 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops; + +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateRequest; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.core.RetrySupport; +import java.time.Duration; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; + +public class DeleteLaunchTemplateAtomicOperation implements AtomicOperation { + private static final String BASE_PHASE = "DELETE_LAUNCH_TEMPLATE"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private final DeleteAmazonLaunchTemplateDescription description; + + public DeleteLaunchTemplateAtomicOperation(DeleteAmazonLaunchTemplateDescription description) { + this.description = description; + } + + @Autowired private AmazonClientProvider amazonClientProvider; + @Autowired private RetrySupport retrySupport; + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + BASE_PHASE, + String.format("Initializing Delete Launch Template operation for %s", description)); + + AmazonEC2 ec2 = + amazonClientProvider.getAmazonEC2(description.getCredentials(), description.getRegion()); + retrySupport.retry( + () -> deleteLaunchTemplate(description.getLaunchTemplateId(), ec2), + 3, + Duration.ofSeconds(3), + false); + + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Launch Template %s in %s", + description.getLaunchTemplateId(), description.getRegion())); + return null; + } + + private Boolean deleteLaunchTemplate(String launchTemplateId, AmazonEC2 ec2) { + try { + ec2.deleteLaunchTemplate( + new DeleteLaunchTemplateRequest().withLaunchTemplateId(launchTemplateId)); + return true; + } catch (Exception e) { + if (e.getMessage().toLowerCase().contains("does not exist")) { + return true; + } + + throw e; + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeployCloudFormationAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeployCloudFormationAtomicOperation.java new file mode 100644 index 00000000000..2344660b3c2 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeployCloudFormationAtomicOperation.java @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.ops; + +import com.amazonaws.services.cloudformation.AmazonCloudFormation; +import com.amazonaws.services.cloudformation.model.*; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeployCloudFormationDescription; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.util.StringUtils; + +@Slf4j +public class DeployCloudFormationAtomicOperation implements AtomicOperation { + + private static final String BASE_PHASE = "DEPLOY_CLOUDFORMATION_STACK"; + private static final String NO_CHANGE_STACK_ERROR_MESSAGE = "No updates"; + + @Autowired AmazonClientProvider amazonClientProvider; + @Autowired AwsConfigurationProperties awsConfigurationProperties; + + @Autowired + @Qualifier("amazonObjectMapper") + private ObjectMapper objectMapper; + + private DeployCloudFormationDescription description; + + public DeployCloudFormationAtomicOperation( + DeployCloudFormationDescription deployCloudFormationDescription) { + this.description = deployCloudFormationDescription; + } + + @Override + public Map operate(List priorOutputs) { + Task task = TaskRepository.threadLocalTask.get(); + task.updateStatus(BASE_PHASE, "Configuring CloudFormation Stack"); + AmazonCloudFormation amazonCloudFormation = + amazonClientProvider.getAmazonCloudFormation( + description.getCredentials(), description.getRegion()); + String templateURL = description.getTemplateURL(); + String templateBody = description.getTemplateBody(); + validateTemplate(amazonCloudFormation, templateURL, templateBody); + String roleARN = description.getRoleARN(); + List parameters = + description.getParameters().entrySet().stream() + .map( + entry -> + new Parameter() + .withParameterKey(entry.getKey()) + .withParameterValue(entry.getValue())) + .collect(Collectors.toList()); + List tags = + description.getTags().entrySet().stream() + .map(entry -> new Tag().withKey(entry.getKey()).withValue(entry.getValue())) + .collect(Collectors.toList()); + + List notificationARNs = + Optional.ofNullable(description.getNotificationARNs()).orElse(Collections.emptyList()); + + boolean stackExists = stackExists(amazonCloudFormation); + + String stackId; + if (description.isChangeSet()) { + ChangeSetType changeSetType = stackExists ? ChangeSetType.UPDATE : ChangeSetType.CREATE; + log.info("{} change set for stack: {}", changeSetType, description); + stackId = + createChangeSet( + amazonCloudFormation, + templateURL, + templateBody, + roleARN, + parameters, + tags, + description.getCapabilities(), + notificationARNs, + changeSetType); + } else { + if (stackExists) { + log.info("Updating existing stack {}", description); + stackId = + updateStack( + amazonCloudFormation, + templateURL, + templateBody, + roleARN, + parameters, + tags, + description.getCapabilities(), + notificationARNs); + } else { + log.info("Creating new stack: {}", description); + stackId = + createStack( + amazonCloudFormation, + templateURL, + templateBody, + roleARN, + parameters, + tags, + description.getCapabilities(), + notificationARNs); + } + } + return Collections.singletonMap("stackId", stackId); + } + + private String createStack( + AmazonCloudFormation amazonCloudFormation, + String templateURL, + String templateBody, + String roleARN, + List parameters, + List tags, + List capabilities, + List notificationARNs) { + Task task = TaskRepository.threadLocalTask.get(); + task.updateStatus(BASE_PHASE, "Preparing CloudFormation Stack"); + CreateStackRequest createStackRequest = + new CreateStackRequest() + .withStackName(description.getStackName()) + .withParameters(parameters) + .withTags(tags) + .withCapabilities(capabilities) + .withNotificationARNs(notificationARNs); + + if (StringUtils.hasText(templateURL)) { + createStackRequest.setTemplateURL(templateURL); + } else { + createStackRequest.setTemplateBody(templateBody); + } + + if (StringUtils.hasText(roleARN)) { + createStackRequest.setRoleARN(roleARN); + } + task.updateStatus(BASE_PHASE, "Uploading CloudFormation Stack"); + CreateStackResult createStackResult = amazonCloudFormation.createStack(createStackRequest); + return createStackResult.getStackId(); + } + + private String updateStack( + AmazonCloudFormation amazonCloudFormation, + String templateURL, + String templateBody, + String roleARN, + List parameters, + List tags, + List capabilities, + List notificationARNs) { + Task task = TaskRepository.threadLocalTask.get(); + task.updateStatus(BASE_PHASE, "CloudFormation Stack exists. Updating it"); + UpdateStackRequest updateStackRequest = + new UpdateStackRequest() + .withStackName(description.getStackName()) + .withParameters(parameters) + .withTags(tags) + .withCapabilities(capabilities) + .withNotificationARNs(notificationARNs); + + if (StringUtils.hasText(templateURL)) { + updateStackRequest.setTemplateURL(templateURL); + } else { + updateStackRequest.setTemplateBody(templateBody); + } + + if (StringUtils.hasText(roleARN)) { + updateStackRequest.setRoleARN(roleARN); + } + task.updateStatus(BASE_PHASE, "Uploading CloudFormation Stack"); + try { + UpdateStackResult updateStackResult = amazonCloudFormation.updateStack(updateStackRequest); + return updateStackResult.getStackId(); + } catch (AmazonCloudFormationException e) { + + if (e.getMessage().contains(NO_CHANGE_STACK_ERROR_MESSAGE)) { + // No changes on the stack, ignore failure + return getStackId(amazonCloudFormation); + } + log.error("Error updating stack", e); + throw e; + } + } + + private String createChangeSet( + AmazonCloudFormation amazonCloudFormation, + String templateURL, + String templateBody, + String roleARN, + List parameters, + List tags, + List capabilities, + List notificationARNs, + ChangeSetType changeSetType) { + Task task = TaskRepository.threadLocalTask.get(); + task.updateStatus(BASE_PHASE, "CloudFormation Stack exists. Creating a change set"); + CreateChangeSetRequest createChangeSetRequest = + new CreateChangeSetRequest() + .withStackName(description.getStackName()) + .withChangeSetName(description.getChangeSetName()) + .withParameters(parameters) + .withTags(tags) + .withCapabilities(capabilities) + .withNotificationARNs(notificationARNs) + .withChangeSetType(changeSetType) + .withIncludeNestedStacks( + awsConfigurationProperties.getCloudformation().getChangeSetsIncludeNestedStacks()); + + if (StringUtils.hasText(templateURL)) { + createChangeSetRequest.setTemplateURL(templateURL); + } else { + createChangeSetRequest.setTemplateBody(templateBody); + } + + if (StringUtils.hasText(roleARN)) { + createChangeSetRequest.setRoleARN(roleARN); + } + + task.updateStatus(BASE_PHASE, "Uploading CloudFormation ChangeSet"); + try { + CreateChangeSetResult createChangeSetResult = + amazonCloudFormation.createChangeSet(createChangeSetRequest); + return createChangeSetResult.getStackId(); + } catch (AmazonCloudFormationException e) { + log.error("Error creating change set", e); + throw e; + } + } + + private boolean stackExists(AmazonCloudFormation amazonCloudFormation) { + try { + getStackId(amazonCloudFormation); + return true; + } catch (Exception e) { + return false; + } + } + + private String getStackId(AmazonCloudFormation amazonCloudFormation) { + return amazonCloudFormation + .describeStacks(new DescribeStacksRequest().withStackName(description.getStackName())) + .getStacks() + .stream() + .findFirst() + .orElseThrow( + () -> + new IllegalArgumentException( + "No CloudFormation Stack found with stack name " + description.getStackName())) + .getStackId(); + } + + private void validateTemplate( + AmazonCloudFormation amazonCloudFormation, String templateURL, String templateBody) { + try { + ValidateTemplateRequest validateTemplateRequest = new ValidateTemplateRequest(); + + if (StringUtils.hasText(templateURL)) { + validateTemplateRequest.setTemplateURL(templateURL); + } else { + validateTemplateRequest.setTemplateBody(templateBody); + } + + amazonCloudFormation.validateTemplate(validateTemplateRequest); + } catch (AmazonCloudFormationException e) { + log.error("Error validating cloudformation template", e); + throw e; + } + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromLoadBalancerAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromLoadBalancerAtomicOperation.java similarity index 76% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromLoadBalancerAtomicOperation.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromLoadBalancerAtomicOperation.java index 5be8ec4763b..08189271251 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromLoadBalancerAtomicOperation.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromLoadBalancerAtomicOperation.java @@ -19,9 +19,11 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractRegionAsgInstanceIdsDescription; import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceLoadBalancerRegistrationDescription; -public class DeregisterInstancesFromLoadBalancerAtomicOperation extends AbstractInstanceLoadBalancerRegistrationAtomicOperation { - public DeregisterInstancesFromLoadBalancerAtomicOperation(AbstractRegionAsgInstanceIdsDescription description) { - super((InstanceLoadBalancerRegistrationDescription)description); +public class DeregisterInstancesFromLoadBalancerAtomicOperation + extends AbstractInstanceLoadBalancerRegistrationAtomicOperation { + public DeregisterInstancesFromLoadBalancerAtomicOperation( + AbstractRegionAsgInstanceIdsDescription description) { + super((InstanceLoadBalancerRegistrationDescription) description); } @Override diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromTargetGroupAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromTargetGroupAtomicOperation.java similarity index 77% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromTargetGroupAtomicOperation.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromTargetGroupAtomicOperation.java index 1386a4d4fd6..a6cd1d61058 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromTargetGroupAtomicOperation.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeregisterInstancesFromTargetGroupAtomicOperation.java @@ -19,9 +19,11 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractRegionAsgInstanceIdsDescription; import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceTargetGroupRegistrationDescription; -public class DeregisterInstancesFromTargetGroupAtomicOperation extends AbstractInstanceTargetGroupRegistrationAtomicOperation { - public DeregisterInstancesFromTargetGroupAtomicOperation(AbstractRegionAsgInstanceIdsDescription description) { - super((InstanceTargetGroupRegistrationDescription)description); +public class DeregisterInstancesFromTargetGroupAtomicOperation + extends AbstractInstanceTargetGroupRegistrationAtomicOperation { + public DeregisterInstancesFromTargetGroupAtomicOperation( + AbstractRegionAsgInstanceIdsDescription description) { + super((InstanceTargetGroupRegistrationDescription) description); } @Override diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ExecuteCloudFormationChangeSetAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ExecuteCloudFormationChangeSetAtomicOperation.java new file mode 100644 index 00000000000..07a40a23f2e --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ExecuteCloudFormationChangeSetAtomicOperation.java @@ -0,0 +1,93 @@ +/* + * Copyright 2019 Adevinta. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.ops; + +import com.amazonaws.services.cloudformation.AmazonCloudFormation; +import com.amazonaws.services.cloudformation.model.AmazonCloudFormationException; +import com.amazonaws.services.cloudformation.model.DescribeStacksRequest; +import com.amazonaws.services.cloudformation.model.ExecuteChangeSetRequest; +import com.amazonaws.services.cloudformation.model.ExecuteChangeSetResult; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ExecuteCloudFormationChangeSetDescription; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; + +@Slf4j +public class ExecuteCloudFormationChangeSetAtomicOperation implements AtomicOperation { + + private static final String BASE_PHASE = "EXECUTE_CLOUDFORMATION_CHANGESET"; + + @Autowired AmazonClientProvider amazonClientProvider; + + @Autowired + @Qualifier("amazonObjectMapper") + private ObjectMapper objectMapper; + + private ExecuteCloudFormationChangeSetDescription description; + + public ExecuteCloudFormationChangeSetAtomicOperation( + ExecuteCloudFormationChangeSetDescription executeCloudFormationChangeSetDescription) { + this.description = executeCloudFormationChangeSetDescription; + } + + @Override + public Map operate(List priorOutputs) { + Task task = TaskRepository.threadLocalTask.get(); + task.updateStatus(BASE_PHASE, "Configuring CloudFormation Stack"); + AmazonCloudFormation amazonCloudFormation = + amazonClientProvider.getAmazonCloudFormation( + description.getCredentials(), description.getRegion()); + + String stackName = description.getStackName(); + String changeSetName = description.getChangeSetName(); + + ExecuteChangeSetRequest executeChangeSetRequest = + new ExecuteChangeSetRequest() + .withStackName(description.getStackName()) + .withChangeSetName(description.getChangeSetName()); + task.updateStatus(BASE_PHASE, "Executing CloudFormation ChangeSet"); + try { + ExecuteChangeSetResult executeChangeSetResult = + amazonCloudFormation.executeChangeSet(executeChangeSetRequest); + return Collections.singletonMap("stackId", getStackId(amazonCloudFormation)); + } catch (AmazonCloudFormationException e) { + log.error("Error executing change set", e); + throw e; + } + } + + private String getStackId(AmazonCloudFormation amazonCloudFormation) { + return amazonCloudFormation + .describeStacks(new DescribeStacksRequest().withStackName(description.getStackName())) + .getStacks() + .stream() + .findFirst() + .orElseThrow( + () -> + new IllegalArgumentException( + "No CloudFormation Stack found with stack name " + description.getStackName())) + .getStackId(); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyServerGroupLaunchTemplateAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyServerGroupLaunchTemplateAtomicOperation.java new file mode 100644 index 00000000000..db52503ca40 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyServerGroupLaunchTemplateAtomicOperation.java @@ -0,0 +1,79 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions.PrepareModifyServerGroupLaunchTemplate.PrepareModifyServerGroupLaunchTemplateCommand; + +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions.ModifyServerGroupLaunchTemplate; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions.PrepareModifyServerGroupLaunchTemplate; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions.PrepareUpdateAutoScalingGroup; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions.UpdateAutoScalingGroup; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow; +import com.netflix.spinnaker.kork.exceptions.IntegrationException; +import java.util.List; +import javax.annotation.Nonnull; + +/** + * Atomic Operation Usage: To modify properties associated with the AWS entities associated with + * {@link ModifyServerGroupLaunchTemplateDescription}. Applicable to AWS AutoScalingGroups backed by + * EC2 launch template with / without mixed instances policy + * (https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/autoscaling/model/MixedInstancesPolicy.html) + */ +public class ModifyServerGroupLaunchTemplateAtomicOperation + extends AbstractSagaAtomicOperation { + public ModifyServerGroupLaunchTemplateAtomicOperation( + ModifyServerGroupLaunchTemplateDescription description) { + super(description); + } + + @Nonnull + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(PrepareModifyServerGroupLaunchTemplate.class) + .then(ModifyServerGroupLaunchTemplate.class) + .then(PrepareUpdateAutoScalingGroup.class) + .then(UpdateAutoScalingGroup.class); + } + + @Override + protected void configureSagaBridge( + @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + PrepareModifyServerGroupLaunchTemplateCommand.builder().description(description).build()); + } + + @Override + protected Void parseSagaResult(@Nonnull Object result) { + return null; + } + + public static class LaunchTemplateException extends IntegrationException { + public LaunchTemplateException(String message) { + super(message); + setRetryable(true); + } + + public LaunchTemplateException(String message, Throwable cause) { + super(message, cause); + setRetryable(true); + } + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithLoadBalancerAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithLoadBalancerAtomicOperation.java similarity index 82% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithLoadBalancerAtomicOperation.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithLoadBalancerAtomicOperation.java index 4afea58d03f..8919bc019cc 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithLoadBalancerAtomicOperation.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithLoadBalancerAtomicOperation.java @@ -19,8 +19,10 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractRegionAsgInstanceIdsDescription; import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceLoadBalancerRegistrationDescription; -public class RegisterInstancesWithLoadBalancerAtomicOperation extends AbstractInstanceLoadBalancerRegistrationAtomicOperation { - public RegisterInstancesWithLoadBalancerAtomicOperation(AbstractRegionAsgInstanceIdsDescription description) { +public class RegisterInstancesWithLoadBalancerAtomicOperation + extends AbstractInstanceLoadBalancerRegistrationAtomicOperation { + public RegisterInstancesWithLoadBalancerAtomicOperation( + AbstractRegionAsgInstanceIdsDescription description) { super((InstanceLoadBalancerRegistrationDescription) description); } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithTargetGroupAtomicOperation.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithTargetGroupAtomicOperation.java similarity index 82% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithTargetGroupAtomicOperation.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithTargetGroupAtomicOperation.java index 32d045e474a..179d0a52598 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithTargetGroupAtomicOperation.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegisterInstancesWithTargetGroupAtomicOperation.java @@ -19,8 +19,10 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractRegionAsgInstanceIdsDescription; import com.netflix.spinnaker.clouddriver.aws.deploy.description.InstanceTargetGroupRegistrationDescription; -public class RegisterInstancesWithTargetGroupAtomicOperation extends AbstractInstanceTargetGroupRegistrationAtomicOperation { - public RegisterInstancesWithTargetGroupAtomicOperation(AbstractRegionAsgInstanceIdsDescription description) { +public class RegisterInstancesWithTargetGroupAtomicOperation + extends AbstractInstanceTargetGroupRegistrationAtomicOperation { + public RegisterInstancesWithTargetGroupAtomicOperation( + AbstractRegionAsgInstanceIdsDescription description) { super((InstanceTargetGroupRegistrationDescription) description); } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegistrationAction.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegistrationAction.java similarity index 100% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegistrationAction.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/RegistrationAction.java diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/ModifyServerGroupLaunchTemplate.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/ModifyServerGroupLaunchTemplate.java new file mode 100644 index 00000000000..5b8e007a8f5 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/ModifyServerGroupLaunchTemplate.java @@ -0,0 +1,135 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions; + +import com.amazonaws.services.ec2.model.LaunchTemplateVersion; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.ModifyServerGroupLaunchTemplateAtomicOperation.LaunchTemplateException; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService; +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Collections; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.stereotype.Component; + +/** + * Action to modify a EC2 launch template i.e. create a new version of the launch template with + * requested changes. This action may be skipped if no launch template changes are requested. + */ +@Component +public class ModifyServerGroupLaunchTemplate + implements SagaAction { + private final CredentialsRepository credentialsRepository; + private final RegionScopedProviderFactory regionScopedProviderFactory; + + public ModifyServerGroupLaunchTemplate( + CredentialsRepository credentialsRepository, + RegionScopedProviderFactory regionScopedProviderFactory) { + this.credentialsRepository = credentialsRepository; + this.regionScopedProviderFactory = regionScopedProviderFactory; + } + + @NotNull + @Override + public Result apply(@NotNull ModifyServerGroupLaunchTemplateCommand command, @NotNull Saga saga) { + ModifyServerGroupLaunchTemplateDescription description = command.description; + + if (!command.isReqToModifyLaunchTemplate) { + saga.log( + "[SAGA_ACTION] Skipping ModifyServerGroupLaunchTemplate as only mixed instances policy will be updated."); + + return new Result( + PrepareUpdateAutoScalingGroup.PrepareUpdateAutoScalingGroupCommand.builder() + .description(description) + .launchTemplateVersion(command.sourceVersion) + .newLaunchTemplateVersionNumber(null) + .isReqToUpgradeAsgToMixedInstancesPolicy( + command.isReqToUpgradeAsgToMixedInstancesPolicy) + .build(), + Collections.emptyList()); + } + + NetflixAmazonCredentials credentials = + (NetflixAmazonCredentials) credentialsRepository.getOne(description.getAccount()); + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider = + regionScopedProviderFactory.forRegion(credentials, description.getRegion()); + + saga.log( + "[SAGA_ACTION] Modifying launch template (i.e. creating a new version) for EC2 Auto Scaling Group " + + description.getAsgName()); + + LaunchTemplateService launchTemplateService = regionScopedProvider.getLaunchTemplateService(); + LaunchTemplateVersion newVersion; + try { + boolean shouldUseMixedInstancesPolicy = + command.isAsgBackedByMixedInstancesPolicy + || command.isReqToUpgradeAsgToMixedInstancesPolicy; + newVersion = + launchTemplateService.modifyLaunchTemplate( + credentials, description, command.sourceVersion, shouldUseMixedInstancesPolicy); + } catch (Exception e) { + throw new LaunchTemplateException("Failed to modify launch template", e); + } + + return new Result( + PrepareUpdateAutoScalingGroup.PrepareUpdateAutoScalingGroupCommand.builder() + .description(description) + .launchTemplateVersion(newVersion) + .newLaunchTemplateVersionNumber(newVersion.getVersionNumber()) + .isReqToUpgradeAsgToMixedInstancesPolicy( + command.isReqToUpgradeAsgToMixedInstancesPolicy) + .build(), + Collections.emptyList()); + } + + @Builder(builderClassName = "ModifyServerGroupLaunchTemplateCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + ModifyServerGroupLaunchTemplateCommand.ModifyServerGroupLaunchTemplateCommandBuilder + .class) + @JsonTypeName("modifyServerGroupLaunchTemplateCommand") + @Value + public static class ModifyServerGroupLaunchTemplateCommand implements SagaCommand { + @Nonnull private LaunchTemplateVersion sourceVersion; + @Nonnull private ModifyServerGroupLaunchTemplateDescription description; + @Nonnull private Boolean isReqToModifyLaunchTemplate; + @Nonnull private Boolean isReqToUpgradeAsgToMixedInstancesPolicy; + @Nonnull private Boolean isAsgBackedByMixedInstancesPolicy; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(@NotNull EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class ModifyServerGroupLaunchTemplateCommandBuilder {} + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareModifyServerGroupLaunchTemplate.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareModifyServerGroupLaunchTemplate.java new file mode 100644 index 00000000000..0b45ad106a3 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareModifyServerGroupLaunchTemplate.java @@ -0,0 +1,483 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions; + +import com.amazonaws.services.autoscaling.model.AutoScalingGroup; +import com.amazonaws.services.autoscaling.model.InstancesDistribution; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.LaunchTemplateBlockDeviceMapping; +import com.amazonaws.services.ec2.model.LaunchTemplateIamInstanceProfileSpecification; +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceMarketOptions; +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceNetworkInterfaceSpecification; +import com.amazonaws.services.ec2.model.LaunchTemplateVersion; +import com.amazonaws.services.ec2.model.ResponseLaunchTemplateData; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.aws.deploy.AmiIdResolver; +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils.BlockDeviceConfig; +import com.netflix.spinnaker.clouddriver.aws.deploy.ModifyServerGroupUtils; +import com.netflix.spinnaker.clouddriver.aws.deploy.ResolvedAmiResult; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.ModifyServerGroupLaunchTemplateAtomicOperation.LaunchTemplateException; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory.RegionScopedProvider; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.stereotype.Component; + +/** + * Action to prepare the description of type ModifyServerGroupLaunchTemplateDescription for launch + * template and server group configuration changes. Steps: 1. populate description with config from + * server group's mixed instances policy 2. populate description with config from current launch + * template version, in preparation to create a new version + * + *

Step 2 of this action may be skipped if no launch template changes are requested. + */ +@Component +public class PrepareModifyServerGroupLaunchTemplate + implements SagaAction< + PrepareModifyServerGroupLaunchTemplate.PrepareModifyServerGroupLaunchTemplateCommand> { + private final BlockDeviceConfig blockDeviceConfig; + private final CredentialsRepository credentialsRepository; + private final RegionScopedProviderFactory regionScopedProviderFactory; + + public PrepareModifyServerGroupLaunchTemplate( + BlockDeviceConfig blockDeviceConfig, + CredentialsRepository credentialsRepository, + RegionScopedProviderFactory regionScopedProviderFactory) { + this.blockDeviceConfig = blockDeviceConfig; + this.credentialsRepository = credentialsRepository; + this.regionScopedProviderFactory = regionScopedProviderFactory; + } + + @NotNull + @Override + public Result apply( + @NotNull PrepareModifyServerGroupLaunchTemplateCommand command, @NotNull Saga saga) { + ModifyServerGroupLaunchTemplateDescription description = command.description; + NetflixAmazonCredentials credentials = + (NetflixAmazonCredentials) credentialsRepository.getOne(description.getAccount()); + + saga.log( + "[SAGA_ACTION] Performing modifyServerGroupLaunchTemplate operation for server group " + + description.getAsgName()); + + RegionScopedProvider regionScopedProvider = + regionScopedProviderFactory.forRegion(credentials, description.getRegion()); + AutoScalingGroup autoScalingGroup = + getAutoScalingGroup(description.getAsgName(), regionScopedProvider); + LaunchTemplateVersion launchTemplateVersion = + getLaunchTemplateVersion(autoScalingGroup, regionScopedProvider); + ResponseLaunchTemplateData launchTemplateData = launchTemplateVersion.getLaunchTemplateData(); + boolean isAsgBackedByMip = autoScalingGroup.getMixedInstancesPolicy() != null; + + // Step #1: populate description with config from server group's mixed instances policy + if (autoScalingGroup.getMixedInstancesPolicy() != null) { + populateDescWithMipFields(description, autoScalingGroup); + } + + // Determine if step #2(populate description with config from current launch template version) + // can be skipped + boolean asgUsesSpotLt = launchTemplateData.getInstanceMarketOptions() != null; + + /** + * A valid request should include fields mapped to either launch template or AWS ASG config or + * both. ModifyServerGroupLaunchTemplateValidator rejects requests with only metadata fields + * i.e. no launch template or ASG config changes. + */ + final Set nonMetadataFieldsSet = + ModifyServerGroupUtils.getNonMetadataFieldsSetInReq(description); + boolean isReqToModifyMipFieldsOnly = + nonMetadataFieldsSet.stream() + .allMatch( + f -> + ModifyServerGroupLaunchTemplateDescription.getMixedInstancesPolicyFieldNames() + .contains(f)); + + // Selectively skip launch template modification in some cases when NO launch template changes + // are required: + // 1. ASG with MIP + isReqToModifyMixedInstancesPolicyOnlyFields (including spotMaxPrice) + // 2. ASG with OD LT + isReqToModifyMixedInstancesPolicyOnlyFields (including spotMaxPrice) + // Reason is to prevent an error like -> + // AmazonAutoScalingException: Incompatible launch template: + // You cannot use a launch template that is set to request Spot Instances + // (InstanceMarketOptions) when you configure an Auto Scaling group with a mixed + // instances policy. + // Add a different launch template to the group and try again. + if (isReqToModifyMipFieldsOnly && (isAsgBackedByMip || !asgUsesSpotLt)) { + saga.log( + "[SAGA_ACTION] Skipping PrepareModifyServerGroupLaunchTemplate as only mixed instances policy will be updated."); + + return new Result( + ModifyServerGroupLaunchTemplate.ModifyServerGroupLaunchTemplateCommand.builder() + .description(description) + .isReqToModifyLaunchTemplate(false) + .isAsgBackedByMixedInstancesPolicy(isAsgBackedByMip) + .isReqToUpgradeAsgToMixedInstancesPolicy(!isAsgBackedByMip) + .sourceVersion(launchTemplateVersion) + .build(), + Collections.emptyList()); + } + + saga.log("[SAGA_ACTION] Preparing for launch template modification"); + populateDescWithLaunchTemplateVersion( + saga, + description, + launchTemplateVersion, + credentials.getAccountId(), + regionScopedProvider.getAmazonEC2(), + autoScalingGroup); + + boolean isReqToModifyAtleastOneMipOnlyField = + nonMetadataFieldsSet.stream() + .anyMatch( + f -> + ModifyServerGroupLaunchTemplateDescription + .getMixedInstancesPolicyOnlyFieldNames() + .contains(f)); + + return new Result( + ModifyServerGroupLaunchTemplate.ModifyServerGroupLaunchTemplateCommand.builder() + .description(description) + .isReqToModifyLaunchTemplate(true) + .isAsgBackedByMixedInstancesPolicy(isAsgBackedByMip) + .isReqToUpgradeAsgToMixedInstancesPolicy( + !isAsgBackedByMip + && isReqToModifyAtleastOneMipOnlyField) // upgrade to MIP if request includes at + // least 1 MIP field (along with 1 or + // more launch template fields) + .sourceVersion(launchTemplateVersion) + .build(), + Collections.emptyList()); + } + + private AutoScalingGroup getAutoScalingGroup( + String autoScalingGroupName, RegionScopedProvider regionScopedProvider) { + try { + return regionScopedProvider.getAsgService().getAutoScalingGroup(autoScalingGroupName); + } catch (Exception e) { + throw new LaunchTemplateException( + String.format("Failed to get server group %s.", autoScalingGroupName), e); + } + } + + private LaunchTemplateVersion getLaunchTemplateVersion( + AutoScalingGroup autoScalingGroup, RegionScopedProvider regionScopedProvider) { + LaunchTemplateSpecification launchTemplateSpec = + Optional.ofNullable( + autoScalingGroup.getMixedInstancesPolicy() != null + ? autoScalingGroup + .getMixedInstancesPolicy() + .getLaunchTemplate() + .getLaunchTemplateSpecification() + : autoScalingGroup.getLaunchTemplate()) + .orElseThrow( + () -> + new IllegalArgumentException( + String.format( + "Server group is not backed by a launch template.\n%s", + autoScalingGroup))); + + return regionScopedProvider + .getLaunchTemplateService() + .getLaunchTemplateVersion(launchTemplateSpec) + .orElseThrow( + () -> + new IllegalStateException( + String.format( + "Requested launch template %s does not exist.", launchTemplateSpec))); + } + + private void populateDescWithMipFields( + ModifyServerGroupLaunchTemplateDescription modifyDesc, AutoScalingGroup autoScalingGroup) { + final InstancesDistribution distInAsg = + autoScalingGroup.getMixedInstancesPolicy().getInstancesDistribution(); + + modifyDesc.setOnDemandAllocationStrategy( + Optional.ofNullable(modifyDesc.getOnDemandAllocationStrategy()) + .orElse(distInAsg.getOnDemandAllocationStrategy())); + modifyDesc.setOnDemandBaseCapacity( + Optional.ofNullable(modifyDesc.getOnDemandBaseCapacity()) + .orElse(distInAsg.getOnDemandBaseCapacity())); + modifyDesc.setOnDemandPercentageAboveBaseCapacity( + Optional.ofNullable(modifyDesc.getOnDemandPercentageAboveBaseCapacity()) + .orElse(distInAsg.getOnDemandPercentageAboveBaseCapacity())); + modifyDesc.setSpotAllocationStrategy( + Optional.ofNullable(modifyDesc.getSpotAllocationStrategy()) + .orElse(distInAsg.getSpotAllocationStrategy())); + modifyDesc.setSpotInstancePools( + Optional.ofNullable(modifyDesc.getSpotInstancePools()) + .orElse( + // return the spotInstancePools in ASG iff it is compatible with the + // spotAllocationStrategy + modifyDesc.getSpotAllocationStrategy().equals("lowest-price") + ? distInAsg.getSpotInstancePools() + : null)); + modifyDesc.setLaunchTemplateOverridesForInstanceType( + Optional.ofNullable(modifyDesc.getLaunchTemplateOverridesForInstanceType()) + .orElse( + AsgConfigHelper.getDescriptionOverrides( + autoScalingGroup + .getMixedInstancesPolicy() + .getLaunchTemplate() + .getOverrides()))); + + modifyDesc.setSpotPrice(getSpotMaxPrice(modifyDesc.getSpotPrice(), autoScalingGroup, null)); + } + + private void populateDescWithLaunchTemplateVersion( + Saga saga, + ModifyServerGroupLaunchTemplateDescription modifyDesc, + LaunchTemplateVersion sourceLtVersion, + String accountId, + AmazonEC2 amazonEC2, + AutoScalingGroup autoScalingGroup) { + ResponseLaunchTemplateData sourceLtData = sourceLtVersion.getLaunchTemplateData(); + + modifyDesc.setSpotPrice( + getSpotMaxPrice(modifyDesc.getSpotPrice(), autoScalingGroup, sourceLtData)); + modifyDesc.setImageId( + getImageId(saga, amazonEC2, accountId, modifyDesc).orElse(sourceLtData.getImageId())); + + Set securityGroups = new HashSet<>(); + if (modifyDesc.getSecurityGroups() != null) { + securityGroups.addAll(modifyDesc.getSecurityGroups()); + } + + Boolean includePreviousGroups = + Optional.ofNullable(modifyDesc.getSecurityGroupsAppendOnly()) + .orElseGet(securityGroups::isEmpty); + if (includePreviousGroups) { + securityGroups.addAll( + sourceLtData.getNetworkInterfaces().stream() + .filter(i -> i.getDeviceIndex() == 0) + .findFirst() + .map(LaunchTemplateInstanceNetworkInterfaceSpecification::getGroups) + .orElse(Collections.emptyList())); + } + modifyDesc.setSecurityGroups(new ArrayList<>(securityGroups)); + + LaunchTemplateIamInstanceProfileSpecification iamInstanceProfileInLt = + sourceLtData.getIamInstanceProfile(); + String iamRoleInLt = null; + if (iamInstanceProfileInLt != null) { + iamRoleInLt = iamInstanceProfileInLt.getName(); + } + modifyDesc.setIamRole(Optional.ofNullable(modifyDesc.getIamRole()).orElse(iamRoleInLt)); + modifyDesc.setKeyPair( + Optional.ofNullable(modifyDesc.getKeyPair()).orElseGet(sourceLtData::getKeyName)); + modifyDesc.setRamdiskId( + Optional.ofNullable(modifyDesc.getRamdiskId()).orElseGet(sourceLtData::getRamDiskId)); + modifyDesc.setBlockDevices(getBlockDeviceMapping(modifyDesc, sourceLtData)); + } + + private List getBlockDeviceMapping( + ModifyServerGroupLaunchTemplateDescription modifyDesc, + ResponseLaunchTemplateData ltDataOldVersion) { + + // if block device mappings are explicitly specified, use them + if (modifyDesc.getBlockDevices() != null) { + return modifyDesc.getBlockDevices(); + } + + // modify mapping iff instance type has changed. + // for multiple instance types case, use the top-level instance type as it is used to derive + // defaults in {@link BasicAmazonDeployHandler} + if (modifyDesc.getInstanceType() != null + && !modifyDesc.getInstanceType().equals(ltDataOldVersion.getInstanceType())) { + final List defaultBdmForNewType = + blockDeviceConfig.getBlockDevicesForInstanceType(modifyDesc.getInstanceType()); + // if copy from source flag is unset, use default mapping for the modified instance type + if (!modifyDesc.isCopySourceCustomBlockDeviceMappings()) { + return defaultBdmForNewType; + } else { + // if prior version used default mapping do use default mapping on new version as well + List defaultBdmForOldType = + blockDeviceConfig.getBlockDevicesForInstanceType(ltDataOldVersion.getInstanceType()); + if (matchingBlockDevices(ltDataOldVersion.getBlockDeviceMappings(), defaultBdmForOldType)) { + return defaultBdmForNewType; + } + } + } + return null; + } + + private Optional getImageId( + Saga saga, + AmazonEC2 ec2, + String accountId, + ModifyServerGroupLaunchTemplateDescription modifyDesc) { + if (modifyDesc.getImageId() != null) { + return Optional.of(modifyDesc.getImageId()); + } + + String amiNameInReq = modifyDesc.getAmiName(); + if (amiNameInReq != null) { + saga.log("Resolving Image Id for " + amiNameInReq); + try { + ResolvedAmiResult ami = + AmiIdResolver.resolveAmiIdFromAllSources( + ec2, modifyDesc.getRegion(), amiNameInReq, accountId); + return Optional.ofNullable(ami.getAmiId()); + } catch (Exception e) { + throw new LaunchTemplateException( + String.format("Failed to resolve image id for %s", amiNameInReq), e) + .setRetryable(true); + } + } + + return Optional.empty(); + } + + private String getSpotMaxPrice( + String spotMaxPriceInReq, + AutoScalingGroup autoScalingGroup, + ResponseLaunchTemplateData ltData) { + if (spotMaxPriceInReq != null) { + return spotMaxPriceInReq.trim().equals("") ? null : spotMaxPriceInReq; + } + + Optional spotMaxPriceForAsg = Optional.empty(); + if (autoScalingGroup.getMixedInstancesPolicy() != null) { + spotMaxPriceForAsg = + Optional.ofNullable( + autoScalingGroup + .getMixedInstancesPolicy() + .getInstancesDistribution() + .getSpotMaxPrice()); + } else { + LaunchTemplateInstanceMarketOptions marketOptions = ltData.getInstanceMarketOptions(); + if (marketOptions != null && marketOptions.getSpotOptions() != null) { + spotMaxPriceForAsg = Optional.ofNullable(marketOptions.getSpotOptions().getMaxPrice()); + } + } + if (spotMaxPriceForAsg.isPresent()) { + return spotMaxPriceForAsg.get().trim().equals("") ? null : spotMaxPriceForAsg.get(); + } + + return null; + } + + private boolean matchingBlockDevices( + List mappings, + List defaultBlockDevicesForInstanceType) { + for (LaunchTemplateBlockDeviceMapping mapping : mappings) { + if (defaultBlockDevicesForInstanceType.stream() + .anyMatch(deviceForType -> !matchesDevice(deviceForType, mapping))) { + return false; + } + } + + return true; + } + + private boolean matchesDevice( + AmazonBlockDevice deviceForType, LaunchTemplateBlockDeviceMapping mapping) { + BlockDevice device1 = + new BlockDevice() + .withDeviceName(deviceForType.getDeviceName()) + .withVirtualName(deviceForType.getVirtualName()) + .withSize(deviceForType.getSize()); + + BlockDevice device2 = + new BlockDevice() + .withDeviceName(mapping.getDeviceName()) + .withVirtualName(mapping.getVirtualName()) + .withSize(mapping.getEbs().getVolumeSize()); + + return device1.equals(device2); + } + + private static class BlockDevice { + private String deviceName; + private String virtualName; + private Integer size; + + public BlockDevice withDeviceName(String deviceName) { + this.deviceName = deviceName; + return this; + } + + public BlockDevice withVirtualName(String virtualName) { + this.virtualName = virtualName; + return this; + } + + public BlockDevice withSize(Integer size) { + this.size = size; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BlockDevice that = (BlockDevice) o; + return Objects.equals(deviceName, that.deviceName) + && Objects.equals(virtualName, that.virtualName) + && Objects.equals(size, that.size); + } + + @Override + public int hashCode() { + return Objects.hash(deviceName, virtualName, size); + } + } + + @Builder( + builderClassName = "PrepareModifyServerGroupLaunchTemplateCommandBuilder", + toBuilder = true) + @JsonDeserialize( + builder = + PrepareModifyServerGroupLaunchTemplateCommand + .PrepareModifyServerGroupLaunchTemplateCommandBuilder.class) + @JsonTypeName("prepareModifyServerGroupLaunchTemplateCommand") + @Value + public static class PrepareModifyServerGroupLaunchTemplateCommand implements SagaCommand { + @Nonnull private ModifyServerGroupLaunchTemplateDescription description; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(@NotNull EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class PrepareModifyServerGroupLaunchTemplateCommandBuilder {} + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareUpdateAutoScalingGroup.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareUpdateAutoScalingGroup.java new file mode 100644 index 00000000000..2888ca7724e --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareUpdateAutoScalingGroup.java @@ -0,0 +1,109 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions; + +import com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides; +import com.amazonaws.services.ec2.model.LaunchTemplateVersion; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.validators.ModifyServerGroupLaunchTemplateValidator; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationErrors; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationException; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import java.util.Collections; +import java.util.List; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.stereotype.Component; + +/** Action to prepare for AWS EC2 Auto Scaling Group update. */ +@Component +public class PrepareUpdateAutoScalingGroup + implements SagaAction { + private final ModifyServerGroupLaunchTemplateValidator validator; + + public PrepareUpdateAutoScalingGroup(ModifyServerGroupLaunchTemplateValidator validator) { + this.validator = validator; + } + + @NotNull + @Override + public Result apply(@NotNull PrepareUpdateAutoScalingGroupCommand command, @NotNull Saga saga) { + ModifyServerGroupLaunchTemplateDescription description = command.description; + + saga.log( + "[SAGA_ACTION] Preparing to update EC2 Auto Scaling Group " + description.getAsgName()); + + // validate description before proceeding with the update + saga.log("[SAGA_ACTION] Validating configuration for modify"); + DescriptionValidationErrors validationErrors = new DescriptionValidationErrors(description); + validator.validate(Collections.emptyList(), description, validationErrors); + if (validationErrors.hasErrors()) { + saga.log("[SAGA_ACTION] Validation failed with errors: " + validationErrors.toString()); + throw new DescriptionValidationException(validationErrors); + } + + // transform overrides + List ltOverrides = + AsgConfigHelper.getLaunchTemplateOverrides( + description.getLaunchTemplateOverridesForInstanceType()); + + UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand updateCommand = + UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand.builder() + .description(description) + .launchTemplateVersion(command.launchTemplateVersion) + .launchTemplateOverrides(ltOverrides) + .isReqToUpgradeAsgToMixedInstancesPolicy( + command.isReqToUpgradeAsgToMixedInstancesPolicy) + .newLaunchTemplateVersionNumber(command.newLaunchTemplateVersionNumber) + .build(); + return new Result(updateCommand, Collections.emptyList()); + } + + @Builder(builderClassName = "PrepareUpdateAutoScalingGroupCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + PrepareUpdateAutoScalingGroupCommand.PrepareUpdateAutoScalingGroupCommandBuilder.class) + @JsonTypeName("PrepareUpdateAutoScalingGroupCommand") + @Value + public static class PrepareUpdateAutoScalingGroupCommand implements SagaCommand { + @Nonnull private ModifyServerGroupLaunchTemplateDescription description; + @Nonnull private LaunchTemplateVersion launchTemplateVersion; + @Nonnull private Boolean isReqToUpgradeAsgToMixedInstancesPolicy; + @Nullable private Long newLaunchTemplateVersionNumber; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(@NotNull EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class PrepareUpdateAutoScalingGroupCommandBuilder {} + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/UpdateAutoScalingGroup.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/UpdateAutoScalingGroup.java new file mode 100644 index 00000000000..5dd4936f5b0 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/UpdateAutoScalingGroup.java @@ -0,0 +1,186 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions; + +import com.amazonaws.services.autoscaling.model.AutoScalingGroup; +import com.amazonaws.services.autoscaling.model.InstancesDistribution; +import com.amazonaws.services.autoscaling.model.LaunchTemplate; +import com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides; +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy; +import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupRequest; +import com.amazonaws.services.ec2.model.LaunchTemplateVersion; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.ModifyServerGroupLaunchTemplateAtomicOperation.LaunchTemplateException; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService; +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.List; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; +import org.springframework.stereotype.Component; + +/** Action to update an AWS EC2 Auto Scaling Group. */ +@Slf4j +@Component +public class UpdateAutoScalingGroup + implements SagaAction { + private final RegionScopedProviderFactory regionScopedProviderFactory; + private final CredentialsRepository credentialsRepository; + + public UpdateAutoScalingGroup( + RegionScopedProviderFactory regionScopedProviderFactory, + CredentialsRepository credentialsRepository) { + this.regionScopedProviderFactory = regionScopedProviderFactory; + this.credentialsRepository = credentialsRepository; + } + + @NotNull + @Override + public Result apply( + @NotNull UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand command, @NotNull Saga saga) { + ModifyServerGroupLaunchTemplateDescription description = command.description; + NetflixAmazonCredentials credentials = + (NetflixAmazonCredentials) credentialsRepository.getOne(description.getAccount()); + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider = + regionScopedProviderFactory.forRegion(credentials, description.getRegion()); + + saga.log("[SAGA_ACTION] Updating EC2 Auto Scaling Group " + description.getAsgName()); + + // build update request + UpdateAutoScalingGroupRequest updateReq = + new UpdateAutoScalingGroupRequest().withAutoScalingGroupName(description.getAsgName()); + + AutoScalingGroup autoScalingGroup = + getAutoScalingGroup(description.getAsgName(), regionScopedProvider); + boolean isAsgBackedByMip = autoScalingGroup.getMixedInstancesPolicy() != null; + + String ltId = command.launchTemplateVersion.getLaunchTemplateId(); + String ltVersion = String.valueOf(command.launchTemplateVersion.getVersionNumber()); + + if (isAsgBackedByMip || command.isReqToUpgradeAsgToMixedInstancesPolicy) { + final MixedInstancesPolicy mip = + new MixedInstancesPolicy() + .withLaunchTemplate( + new LaunchTemplate() + .withLaunchTemplateSpecification( + new LaunchTemplateSpecification() + .withLaunchTemplateId(ltId) + .withVersion(ltVersion)) + .withOverrides(command.launchTemplateOverrides)) + .withInstancesDistribution( + new InstancesDistribution() + .withOnDemandAllocationStrategy(description.getOnDemandAllocationStrategy()) + .withOnDemandBaseCapacity(description.getOnDemandBaseCapacity()) + .withOnDemandPercentageAboveBaseCapacity( + description.getOnDemandPercentageAboveBaseCapacity()) + .withSpotAllocationStrategy(description.getSpotAllocationStrategy()) + .withSpotInstancePools(description.getSpotInstancePools()) + .withSpotMaxPrice(description.getSpotPrice())); + + updateReq.withMixedInstancesPolicy(mip); + } else { + updateReq.withLaunchTemplate( + new LaunchTemplateSpecification().withLaunchTemplateId(ltId).withVersion(ltVersion)); + } + + try { + regionScopedProvider.getAutoScaling().updateAutoScalingGroup(updateReq); + } catch (Exception e) { + StringBuilder exceptionMsg = + new StringBuilder( + String.format("Failed to update server group %s.", description.getAsgName())); + if (StringUtils.isNotBlank(e.getMessage())) { + exceptionMsg.append(String.format("Error: %s\n", e.getMessage())); + } + + try { + // Clean up newly created launch template version by the Saga Flow, in order to keep the + // latest version unaltered. + // This step is required because only the default and latest launch template versions for a + // launch template are cached. + // Not cleaning up will result in Internal Server Error for Clouddriver API requests and + // subsequent Deck errors. + if (command.getNewLaunchTemplateVersionNumber() != null) { + saga.log("[SAGA_ACTION] Cleaning up to keep the operation atomic."); + cleanUpOnFailure( + regionScopedProvider.getLaunchTemplateService(), + command.getLaunchTemplateVersion().getLaunchTemplateId(), + command.getNewLaunchTemplateVersionNumber()); + } + } catch (Exception ex) { + exceptionMsg.append( + "Failed to clean up launch template version! Error: " + ex.getMessage()); + } + throw new LaunchTemplateException(exceptionMsg.toString(), e); + } + return new Result(); + } + + private AutoScalingGroup getAutoScalingGroup( + String autoScalingGroupName, + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider) { + try { + return regionScopedProvider.getAsgService().getAutoScalingGroup(autoScalingGroupName); + } catch (Exception e) { + throw new LaunchTemplateException( + String.format("Failed to get server group %s.", autoScalingGroupName), e); + } + } + + private void cleanUpOnFailure( + LaunchTemplateService ltService, String launchTemplateId, Long ltVersionToDelete) { + ltService.deleteLaunchTemplateVersion(launchTemplateId, ltVersionToDelete); + } + + @Builder(builderClassName = "UpdateAutoScalingGroupCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = UpdateAutoScalingGroupCommand.UpdateAutoScalingGroupCommandBuilder.class) + @JsonTypeName("updateAutoScalingGroupCommand") + @Value + public static class UpdateAutoScalingGroupCommand implements SagaCommand { + @Nonnull private ModifyServerGroupLaunchTemplateDescription description; + @Nonnull private LaunchTemplateVersion launchTemplateVersion; + @Nullable private Long newLaunchTemplateVersionNumber; + @Nullable private List launchTemplateOverrides; + @Nonnull private Boolean isReqToUpgradeAsgToMixedInstancesPolicy; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(@NotNull EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class UpdateAutoScalingGroupCommandBuilder {} + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/IngressLoadBalancerBuilder.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/IngressLoadBalancerBuilder.java new file mode 100644 index 00000000000..2ee995b34a3 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/IngressLoadBalancerBuilder.java @@ -0,0 +1,167 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer; + +import com.amazonaws.services.ec2.model.IpPermission; +import com.amazonaws.services.ec2.model.SecurityGroup; +import com.amazonaws.services.ec2.model.UserIdGroupPair; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGroupDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupIngressConverter; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; +import org.springframework.stereotype.Component; + +@Component +public class IngressLoadBalancerBuilder { + + public IngressLoadBalancerGroupResult ingressApplicationLoadBalancerGroup( + String application, + String region, + String credentialAccount, + NetflixAmazonCredentials credentials, + String vpcId, + Collection ports, + SecurityGroupLookupFactory securityGroupLookupFactory) + throws FailedSecurityGroupIngressException { + SecurityGroupLookupFactory.SecurityGroupLookup securityGroupLookup = + securityGroupLookupFactory.getInstance(region); + + // 1. get app load balancer security group & app security group. create if doesn't exist + SecurityGroupLookupFactory.SecurityGroupUpdater applicationLoadBalancerSecurityGroupUpdater = + getOrCreateSecurityGroup( + application + "-elb", + region, + "Application ELB Security Group for " + application, + credentialAccount, + credentials, + vpcId, + securityGroupLookup); + + SecurityGroupLookupFactory.SecurityGroupUpdater applicationSecurityGroupUpdater = + getOrCreateSecurityGroup( + application, + region, + "Application Security Group for " + application, + credentialAccount, + credentials, + vpcId, + securityGroupLookup); + + SecurityGroup source = applicationLoadBalancerSecurityGroupUpdater.getSecurityGroup(); + SecurityGroup target = applicationSecurityGroupUpdater.getSecurityGroup(); + List currentPermissions = + SecurityGroupIngressConverter.flattenPermissions(target); + List targetPermissions = + ports.stream() + .map(port -> newIpPermissionWithSourceAndPort(source.getGroupId(), port)) + .collect(Collectors.toList()); + + filterOutExistingPermissions(targetPermissions, currentPermissions); + if (!targetPermissions.isEmpty()) { + try { + applicationSecurityGroupUpdater.addIngress(targetPermissions); + } catch (Exception e) { + throw new FailedSecurityGroupIngressException(e); + } + } + + return new IngressLoadBalancerGroupResult(source.getGroupId(), source.getGroupName()); + } + + private SecurityGroupLookupFactory.SecurityGroupUpdater getOrCreateSecurityGroup( + String groupName, + String region, + String descriptionText, + String credentialAccount, + NetflixAmazonCredentials credentials, + String vpcId, + SecurityGroupLookupFactory.SecurityGroupLookup securityGroupLookup) { + return (SecurityGroupLookupFactory.SecurityGroupUpdater) + OperationPoller.retryWithBackoff( + o -> { + SecurityGroupLookupFactory.SecurityGroupUpdater securityGroupUpdater = + securityGroupLookup + .getSecurityGroupByName(credentialAccount, groupName, vpcId) + .orElse(null); + + if (securityGroupUpdater == null) { + UpsertSecurityGroupDescription description = new UpsertSecurityGroupDescription(); + description.setName(groupName); + description.setDescription(descriptionText); + description.setVpcId(vpcId); + description.setRegion(region); + description.setCredentials(credentials); + return securityGroupLookup.createSecurityGroup(description); + } + return securityGroupUpdater; + }, + 500, + 3); + } + + private void filterOutExistingPermissions( + List permissionsToAdd, List existingPermissions) { + permissionsToAdd.forEach( + permission -> + permission + .getUserIdGroupPairs() + .removeIf( + pair -> + existingPermissions.stream() + .anyMatch( + p -> + p.getFromPort().equals(permission.getFromPort()) + && p.getToPort().equals(permission.getToPort()) + && pair.getGroupId() != null + && p.getUserIdGroupPairs().stream() + .anyMatch( + gp -> + gp.getGroupId() != null + && gp.getGroupId() + .equals(pair.getGroupId()))))); + + permissionsToAdd.removeIf(permission -> permission.getUserIdGroupPairs().isEmpty()); + } + + public static class IngressLoadBalancerGroupResult { + public final String groupId; + public final String groupName; + + IngressLoadBalancerGroupResult(String groupId, String groupName) { + this.groupId = groupId; + this.groupName = groupName; + } + } + + private IpPermission newIpPermissionWithSourceAndPort(String sourceGroupId, int port) { + return new IpPermission() + .withIpProtocol("tcp") + .withFromPort(port) + .withToPort(port) + .withUserIdGroupPairs(new UserIdGroupPair().withGroupId(sourceGroupId)); + } + + static class FailedSecurityGroupIngressException extends Exception { + FailedSecurityGroupIngressException(Exception e) { + super(e); + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/preprocessors/AllowLaunchPreProcessor.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/preprocessors/AllowLaunchPreProcessor.java new file mode 100644 index 00000000000..2a953ca7156 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/preprocessors/AllowLaunchPreProcessor.java @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.preprocessors; + +import com.netflix.spinnaker.clouddriver.aws.deploy.description.AllowLaunchDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationDescriptionPreProcessor; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component +public class AllowLaunchPreProcessor implements AtomicOperationDescriptionPreProcessor { + @Override + public boolean supports(Class descriptionClass) { + return AllowLaunchDescription.class.isAssignableFrom(descriptionClass); + } + + @Override + public Map process(Map description) { + // Backwards-compatibility from when AllowLaunch used to overload "account" from the abstract + // AWS description. + description.putIfAbsent("targetAccount", description.get("account")); + description.put("account", description.get("credentials")); + return description; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/preprocessors/CredentialsAccountNormalizationPreProcessor.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/preprocessors/CredentialsAccountNormalizationPreProcessor.java new file mode 100644 index 00000000000..db4c45aff20 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/preprocessors/CredentialsAccountNormalizationPreProcessor.java @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.deploy.preprocessors; + +import com.netflix.spinnaker.clouddriver.aws.deploy.description.AllowLaunchDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationDescriptionPreProcessor; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +/** + * Normalizes the use of `account` vs `credentials`, ensuring that both are always set; prefers the + * value from `credentials`. + */ +@Slf4j +@Component +public class CredentialsAccountNormalizationPreProcessor + implements AtomicOperationDescriptionPreProcessor { + @Override + public boolean supports(Class descriptionClass) { + return !AllowLaunchDescription.class.isAssignableFrom(descriptionClass); + } + + @Override + public Map process(Map description) { + final String account = (String) description.get("account"); + final String credentials = (String) description.get("credentials"); + + if (account != null && credentials != null && !account.equals(credentials)) { + log.warn( + "Passed 'account' ({}) and 'credentials' ({}), but values are not equal", + account, + credentials); + description.put("account", credentials); + } + + if (credentials == null && account != null) { + description.put("credentials", account); + } + if (account == null && credentials != null) { + description.put("account", credentials); + } + + return description; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/DefaultUserDataTokenizer.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/DefaultUserDataTokenizer.java new file mode 100644 index 00000000000..770940b59b4 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/DefaultUserDataTokenizer.java @@ -0,0 +1,64 @@ +package com.netflix.spinnaker.clouddriver.aws.deploy.userdata; + +import com.google.common.base.Strings; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataInput; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataTokenizer; + +public class DefaultUserDataTokenizer implements UserDataTokenizer { + + @Override + public String replaceTokens( + Names names, UserDataInput userDataInput, String rawUserData, Boolean legacyUdf) { + String stack = isPresent(names.getStack()) ? names.getStack() : ""; + String cluster = isPresent(names.getCluster()) ? names.getCluster() : ""; + String revision = isPresent(names.getRevision()) ? names.getRevision() : ""; + String countries = isPresent(names.getCountries()) ? names.getCountries() : ""; + String devPhase = isPresent(names.getDevPhase()) ? names.getDevPhase() : ""; + String hardware = isPresent(names.getHardware()) ? names.getHardware() : ""; + String zone = isPresent(names.getZone()) ? names.getZone() : ""; + String detail = isPresent(names.getDetail()) ? names.getDetail() : ""; + + // Replace the tokens & return the result + String result = + rawUserData + .replace("%%account%%", userDataInput.getAccount()) + .replace("%%accounttype%%", userDataInput.getAccountType()) + .replace( + "%%env%%", + (legacyUdf != null && legacyUdf) + ? userDataInput.getAccount() + : userDataInput.getEnvironment()) + .replace("%%app%%", names.getApp()) + .replace("%%region%%", userDataInput.getRegion()) + .replace("%%group%%", names.getGroup()) + .replace("%%autogrp%%", names.getGroup()) + .replace("%%revision%%", revision) + .replace("%%countries%%", countries) + .replace("%%devPhase%%", devPhase) + .replace("%%hardware%%", hardware) + .replace("%%zone%%", zone) + .replace("%%cluster%%", cluster) + .replace("%%stack%%", stack) + .replace("%%detail%%", detail) + .replace("%%tier%%", ""); + + if (userDataInput.getLaunchTemplate() != null && userDataInput.getLaunchTemplate()) { + result = + result + .replace("%%launchtemplate%%", userDataInput.getLaunchSettingName()) + .replace("%%launchconfig%%", ""); + } else { + result = + result + .replace("%%launchconfig%%", userDataInput.getLaunchSettingName()) + .replace("%%launchtemplate%%", ""); + } + + return result; + } + + private static boolean isPresent(String value) { + return !Strings.isNullOrEmpty(value); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProviderAggregator.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProviderAggregator.java new file mode 100644 index 00000000000..90bdd30e088 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProviderAggregator.java @@ -0,0 +1,95 @@ +package com.netflix.spinnaker.clouddriver.aws.deploy.userdata; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataInput; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataProvider; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataTokenizer; +import com.netflix.spinnaker.kork.exceptions.UserException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Aggregates all user data from the configured list of providers (see {@link UserDataProvider}). + */ +public class UserDataProviderAggregator { + + private final List providers; + private final List tokenizers; + + public UserDataProviderAggregator( + List providers, List tokenizers) { + this.providers = providers; + this.tokenizers = tokenizers; + } + + /** + * Aggregates all user data. First iterates through all providers and joins user data with a + * newline. Then, adds the user supplied base64 encoded user data and again joins with a newline. + * The result is such that the user supplied base64 encoded user data is always appended last to + * the user data. + * + *

Note, if {@link UserDataOverride#isEnabled()} is true, then the user data from the providers + * is skipped and the user supplied base64 encoded user data is used as the override. If this is + * the case, user data format tokens (either a custom or default set) are replaced in the user + * data - effectively processing the user data as a UDF template. + * + * @param userDataInput {@link UserDataInput} + * @return String + */ + public String aggregate(UserDataInput userDataInput) { + byte[] bytes = + Base64.getDecoder() + .decode(Optional.ofNullable(userDataInput.getBase64UserData()).orElse("")); + String userDataDecoded = new String(bytes, StandardCharsets.UTF_8); + + // If override default user data then we process tokens + if (userDataInput.getUserDataOverride().isEnabled()) { + List udts = + tokenizers.stream() + .filter(it -> it.supports(userDataInput.getUserDataOverride().getTokenizerName())) + .collect(Collectors.toList()); + + if (udts.isEmpty()) { + throw new UserException( + "Unable to find supporting user data tokenizer for {}", + userDataInput.getUserDataOverride().getTokenizerName()); + } + + for (UserDataTokenizer t : udts) { + userDataDecoded = + t.replaceTokens( + Names.parseName(userDataInput.getAsgName()), userDataInput, userDataDecoded, false); + } + return result(Collections.singletonList(userDataDecoded)); + } + + List allUserData = new ArrayList<>(); + if (providers != null && !userDataInput.getUserDataOverride().isEnabled()) { + allUserData = + providers.stream().map(p -> p.getUserData(userDataInput)).collect(Collectors.toList()); + } + String data = String.join("\n", allUserData); + + return result(Arrays.asList(data, userDataDecoded)); + } + + private String result(List parts) { + String result = String.join("\n", parts); + if (result.startsWith("\n")) { + result = result.trim(); + } + + if (result.isEmpty()) { + return null; + } + + return Base64.getEncoder().encodeToString(result.getBytes(StandardCharsets.UTF_8)); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateAmazonLoadBalancerDescriptionValidator.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateAmazonLoadBalancerDescriptionValidator.java new file mode 100644 index 00000000000..764e1403ba3 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateAmazonLoadBalancerDescriptionValidator.java @@ -0,0 +1,211 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.validators; + +import com.amazonaws.services.elasticloadbalancingv2.model.AuthenticateOidcActionConfig; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetTypeEnum; +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerClassicDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerV2Description; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonLoadBalancerType; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.UPSERT_LOAD_BALANCER) +@Component("createAmazonLoadBalancerDescriptionValidator") +class CreateAmazonLoadBalancerDescriptionValidator + extends AmazonDescriptionValidationSupport { + private void validateActions( + List actions, + Set allTargetGroupNames, + Set unusedTargetGroupNames, + ValidationErrors errors) { + for (UpsertAmazonLoadBalancerV2Description.Action action : actions) { + if (action.getType().equals("forward")) { + String targetGroupName = action.getTargetGroupName(); + if (!allTargetGroupNames.contains(targetGroupName)) { + errors.rejectValue( + "listeners", "createAmazonLoadBalancerDescription.listeners.invalid.targetGroup"); + } + unusedTargetGroupNames.remove(action.getTargetGroupName()); + } + + if (action.getType().equals("authenticate-oidc")) { + AuthenticateOidcActionConfig config = action.getAuthenticateOidcActionConfig(); + if (config.getClientId() == null) { + errors.rejectValue( + "listeners", "createAmazonLoadBalancerDescription.listeners.invalid.oidcConfig"); + } + } + } + } + + @Override + public void validate( + List priorDescriptions, + UpsertAmazonLoadBalancerDescription description, + ValidationErrors errors) { + // Common fields to validate + if (description.getName() == null && description.getClusterName() == null) { + errors.rejectValue( + "clusterName", "createAmazonLoadBalancerDescription.missing.name.or.clusterName"); + } + if (description.getSubnetType() == null && description.getAvailabilityZones() == null) { + errors.rejectValue( + "availabilityZones", + "createAmazonLoadBalancerDescription.missing.subnetType.or.availabilityZones"); + } + + if (description.getAvailabilityZones() != null) { + for (Map.Entry> entry : description.getAvailabilityZones().entrySet()) { + String region = entry.getKey(); + List azs = entry.getValue(); + + AmazonCredentials.AWSRegion acctRegion = + description.getCredentials().getRegions().stream() + .filter(r -> r.getName().equals(region)) + .findFirst() + .orElse(null); + if (acctRegion == null) { + errors.rejectValue( + "availabilityZones", "createAmazonLoadBalancerDescription.region.not.configured"); + } + if (description.getSubnetType() == null && azs == null) { + errors.rejectValue( + "availabilityZones", + "createAmazonLoadBalancerDescription.missing.subnetType.or.availabilityZones"); + break; + } + if (description.getSubnetType() == null + && acctRegion != null + && !acctRegion.getAvailabilityZones().containsAll(azs)) { + errors.rejectValue( + "availabilityZones", "createAmazonLoadBalancerDescription.zone.not.configured"); + } + } + } + + switch (description.getLoadBalancerType()) { + case CLASSIC: + UpsertAmazonLoadBalancerClassicDescription classicDescription = + (UpsertAmazonLoadBalancerClassicDescription) description; + if (classicDescription.getListeners() == null + || classicDescription.getListeners().size() == 0) { + errors.rejectValue("listeners", "createAmazonLoadBalancerDescription.listeners.empty"); + } + + if (classicDescription.getDeregistrationDelay() != null) { + if (classicDescription.getDeregistrationDelay() < 1 + || classicDescription.getDeregistrationDelay() > 3600) { + errors.rejectValue( + "deregistrationDelay", + "createAmazonLoadBalancerDescription.deregistrationDelay.invalid"); + } + } + break; + case APPLICATION: + case NETWORK: + UpsertAmazonLoadBalancerV2Description albDescription = + (UpsertAmazonLoadBalancerV2Description) description; + if (albDescription.targetGroups == null || albDescription.targetGroups.size() == 0) { + errors.rejectValue( + "targetGroups", "createAmazonLoadBalancerDescription.targetGroups.empty"); + } + + Set allTargetGroupNames = new HashSet<>(); + for (UpsertAmazonLoadBalancerV2Description.TargetGroup targetGroup : + albDescription.targetGroups) { + allTargetGroupNames.add(targetGroup.getName()); + if (targetGroup.getName() == null || targetGroup.getName().isEmpty()) { + errors.rejectValue( + "targetGroups", "createAmazonLoadBalancerDescription.targetGroups.name.missing"); + } + if (TargetTypeEnum.Lambda.toString().equalsIgnoreCase(targetGroup.getTargetType())) { + validateLambdaTargetGroup(albDescription, targetGroup, errors); + } else { + if (targetGroup.getProtocol() == null) { + errors.rejectValue( + "targetGroups", + "createAmazonLoadBalancerDescription.targetGroups.protocol.missing"); + } + if (targetGroup.getPort() == null) { + errors.rejectValue( + "targetGroups", "createAmazonLoadBalancerDescription.targetGroups.port.missing"); + } + } + } + Set unusedTargetGroupNames = new HashSet<>(); + unusedTargetGroupNames.addAll(allTargetGroupNames); + + for (UpsertAmazonLoadBalancerV2Description.Listener listener : albDescription.listeners) { + if (listener.getDefaultActions().size() == 0) { + errors.rejectValue( + "listeners", "createAmazonLoadBalancerDescription.listeners.missing.defaultAction"); + } + this.validateActions( + listener.getDefaultActions(), allTargetGroupNames, unusedTargetGroupNames, errors); + for (UpsertAmazonLoadBalancerV2Description.Rule rule : listener.getRules()) { + this.validateActions( + rule.getActions(), allTargetGroupNames, unusedTargetGroupNames, errors); + } + } + if (unusedTargetGroupNames.size() > 0) { + errors.rejectValue( + "targetGroups", "createAmazonLoadBalancerDescription.targetGroups.unused"); + } + + // Verify that listeners on this load balancer all have a unique port, this validation + // mimics what the UI is enforcing. + List allPorts = + albDescription.listeners.stream() + .map(UpsertAmazonLoadBalancerV2Description.Listener::getPort) + .collect(Collectors.toList()); + Set uniquePorts = new HashSet<>(allPorts); + + if (uniquePorts.size() != allPorts.size()) { + errors.rejectValue("listeners", "Multiple listeners cannot use the same port"); + } + + break; + default: + errors.rejectValue( + "loadBalancerType", "createAmazonLoadBalancerDescription.loadBalancerType.invalid"); + break; + } + } + + private void validateLambdaTargetGroup( + UpsertAmazonLoadBalancerV2Description albDescription, + UpsertAmazonLoadBalancerV2Description.TargetGroup targetGroup, + ValidationErrors errors) { + // Add lambda specific validation, if required. + if (!AmazonLoadBalancerType.APPLICATION + .toString() + .equalsIgnoreCase(albDescription.getLoadBalancerType().toString())) { + errors.rejectValue( + "loadBalancerType", "createAmazonLoadBalancerDescription.loadBalancerType.invalid"); + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonSnapshotDescriptionValidator.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonSnapshotDescriptionValidator.java new file mode 100644 index 00000000000..adffabc1e1a --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonSnapshotDescriptionValidator.java @@ -0,0 +1,59 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.validators; + +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonSnapshotDescription; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.DELETE_SNAPSHOT) +@Component +public class DeleteAmazonSnapshotDescriptionValidator + extends AmazonDescriptionValidationSupport { + + CredentialsRepository credentialsRepository; + + @Autowired + public DeleteAmazonSnapshotDescriptionValidator( + CredentialsRepository credentialsRepository) { + this.credentialsRepository = credentialsRepository; + } + + @Override + public void validate( + List priorDescriptions, + DeleteAmazonSnapshotDescription description, + ValidationErrors errors) { + String key = DeleteAmazonSnapshotDescription.class.getSimpleName(); + validateRegion(description, description.getRegion(), key, errors); + + if (description.getRegion().equals("") || description.getRegion() == null) { + errors.rejectValue("region", "deleteAmazonSnapshotDescription.region.empty"); + } + if (description.getSnapshotId().equals("") || description.getSnapshotId() == null) { + errors.rejectValue("snapshotId", "deleteAmazonSnapshotDescription.snapshotId.empty"); + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyServerGroupLaunchTemplateValidator.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyServerGroupLaunchTemplateValidator.java new file mode 100644 index 00000000000..6eea61a7cad --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyServerGroupLaunchTemplateValidator.java @@ -0,0 +1,131 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.validators; + +import com.netflix.spinnaker.clouddriver.aws.AmazonOperation; +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils; +import com.netflix.spinnaker.clouddriver.aws.deploy.ModifyServerGroupUtils; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.List; +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@AmazonOperation(AtomicOperations.UPDATE_LAUNCH_TEMPLATE) +@Component("modifyServerGroupLaunchTemplateDescriptionValidator") +public class ModifyServerGroupLaunchTemplateValidator + extends AmazonDescriptionValidationSupport { + private CredentialsRepository credentialsRepository; + + @Autowired + public ModifyServerGroupLaunchTemplateValidator( + CredentialsRepository credentialsRepository) { + this.credentialsRepository = credentialsRepository; + } + + @Override + public void validate( + List priorDescriptions, + ModifyServerGroupLaunchTemplateDescription description, + ValidationErrors errors) { + + // if only metadata fields are set, fail fast and alert the user as there is nothing to modify + // in the server group launch template or related config + if (!areNonMetadataFieldsSet(description)) { + errors.rejectValue( + "multiple fields", + "modifyservergrouplaunchtemplatedescription.launchTemplateAndServerGroupFields.empty", + "No changes requested to launch template or related server group fields for modifyServerGroupLaunchTemplate operation."); + } + + String key = ModifyServerGroupLaunchTemplateDescription.class.getSimpleName(); + validateRegion(description, description.getRegion(), key, errors); + + if (description.getCredentials() == null) { + errors.rejectValue( + "credentials", "modifyservergrouplaunchtemplatedescription.credentials.empty"); + } else { + AccountCredentials credentials = + credentialsRepository.getOne(description.getCredentials().getName()); + if (credentials == null) { + errors.rejectValue( + "credentials", "modifyservergrouplaunchtemplatedescription.credentials.invalid"); + } + } + + if (description.getRegion() == null) { + errors.rejectValue("region", "modifyservergrouplaunchtemplatedescription.region.empty"); + } + + if (StringUtils.isBlank(description.getAsgName())) { + errors.rejectValue("asgName", "modifyservergrouplaunchtemplatedescription.asgName.empty"); + } + + if (description.getAssociatePublicIpAddress() != null + && description.getAssociatePublicIpAddress() != null + && description.getAssociatePublicIpAddress() + && description.getSubnetType() == null) { + errors.rejectValue( + "associatePublicIpAddress", + "modifyservergrouplaunchtemplatedescription.associatePublicIpAddress.subnetType.not.supplied"); + } + + if (description.getBlockDevices() != null) { + for (AmazonBlockDevice device : description.getBlockDevices()) { + BasicAmazonDeployDescriptionValidator.BlockDeviceRules.validate(device, errors); + } + } + + // unlimitedCpuCredits (set to true / false) is valid only with supported instance types + if (description.getUnlimitedCpuCredits() != null + && !InstanceTypeUtils.isBurstingSupportedByAllTypes(description.getAllInstanceTypes())) { + errors.rejectValue( + "unlimitedCpuCredits", + "modifyservergrouplaunchtemplatedescription.bursting.not.supported.by.instanceType"); + } + + // spotInstancePools is applicable only for 'lowest-price' spotAllocationStrategy + if (description.getSpotInstancePools() != null + && description.getSpotInstancePools() > 0 + && !description.getSpotAllocationStrategy().equals("lowest-price")) { + errors.rejectValue( + "spotInstancePools", + "modifyservergrouplaunchtemplatedescription.spotInstancePools.not.supported.for.spotAllocationStrategy"); + } + } + + /** + * Method that returns a boolean indicating if the description in request has at least 1 + * non-metadata field set. + * + * @param descToValidate description to validate + * @return a boolean, true if at least 1 non-metadata field is set, false otherwise. + */ + private boolean areNonMetadataFieldsSet( + final ModifyServerGroupLaunchTemplateDescription descToValidate) { + return !ModifyServerGroupUtils.getNonMetadataFieldsSetInReq(descToValidate).isEmpty() + ? true + : false; + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEvent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEvent.java similarity index 87% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEvent.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEvent.java index 51bccd11b3d..de794fc813e 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEvent.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEvent.java @@ -29,10 +29,12 @@ public class AfterResizeEvent { private final AutoScalingGroup autoScalingGroup; private final ServerGroup.Capacity capacity; - public AfterResizeEvent(Task task, - AmazonEC2 amazonEC2, - AmazonAutoScaling amazonAutoScaling, AutoScalingGroup autoScalingGroup, - ServerGroup.Capacity capacity) { + public AfterResizeEvent( + Task task, + AmazonEC2 amazonEC2, + AmazonAutoScaling amazonAutoScaling, + AutoScalingGroup autoScalingGroup, + ServerGroup.Capacity capacity) { this.task = task; this.amazonEC2 = amazonEC2; this.amazonAutoScaling = amazonAutoScaling; diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEventHandler.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEventHandler.java new file mode 100644 index 00000000000..49ebf3d41aa --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/AfterResizeEventHandler.java @@ -0,0 +1,59 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.event; + +import com.amazonaws.services.autoscaling.model.AutoScalingGroup; +import com.amazonaws.services.autoscaling.model.Instance; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.TerminateInstancesRequest; +import com.google.common.collect.Lists; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.List; +import java.util.stream.Collectors; + +public interface AfterResizeEventHandler { + int MAX_SIMULTANEOUS_TERMINATIONS = 100; + String PHASE = "RESIZE"; + + void handle(AfterResizeEvent event); + + default void terminateInstancesInAutoScalingGroup( + Task task, AmazonEC2 amazonEC2, AutoScalingGroup autoScalingGroup) { + String serverGroupName = autoScalingGroup.getAutoScalingGroupName(); + + List instanceIds = + autoScalingGroup.getInstances().stream() + .map(Instance::getInstanceId) + .collect(Collectors.toList()); + + int terminatedCount = 0; + for (List partition : Lists.partition(instanceIds, MAX_SIMULTANEOUS_TERMINATIONS)) { + try { + terminatedCount += partition.size(); + task.updateStatus( + PHASE, + String.format( + "Terminating %d of %d instances in %s", + terminatedCount, instanceIds.size(), serverGroupName)); + amazonEC2.terminateInstances(new TerminateInstancesRequest().withInstanceIds(partition)); + } catch (Exception e) { + task.updateStatus( + PHASE, String.format("Unable to terminate instances, reason: '%s'", e.getMessage())); + } + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/DefaultAfterResizeEventHandler.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/DefaultAfterResizeEventHandler.java new file mode 100644 index 00000000000..d47ede2712a --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/event/DefaultAfterResizeEventHandler.java @@ -0,0 +1,101 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.event; + +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.AutoScalingGroup; +import com.amazonaws.services.autoscaling.model.DescribeLifecycleHooksRequest; +import com.amazonaws.services.autoscaling.model.LifecycleHook; +import java.util.List; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DefaultAfterResizeEventHandler implements AfterResizeEventHandler { + private final Logger log = LoggerFactory.getLogger(getClass()); + + /** + * There is an opportunity to expedite a resize to zero by explicitly terminating instances + * (server group _must not_ be attached to a load balancer nor have any life cycle hooks) + */ + @Override + public void handle(AfterResizeEvent event) { + AutoScalingGroup autoScalingGroup = event.getAutoScalingGroup(); + + if (event.getCapacity() == null || event.getCapacity().getDesired() == null) { + return; + } + + if (event.getCapacity().getDesired() > 0) { + return; + } + + if (!autoScalingGroup.getLoadBalancerNames().isEmpty() + || !autoScalingGroup.getTargetGroupARNs().isEmpty()) { + event + .getTask() + .updateStatus( + PHASE, + "Skipping explicit instance termination, server group is attached to one or more load balancers"); + return; + } + + try { + List existingLifecycleHooks = + fetchTerminatingLifecycleHooks( + event.getAmazonAutoScaling(), autoScalingGroup.getAutoScalingGroupName()); + if (!existingLifecycleHooks.isEmpty()) { + event + .getTask() + .updateStatus( + PHASE, + "Skipping explicit instance termination, server group has one or more lifecycle hooks"); + return; + } + } catch (Exception e) { + log.error( + "Unable to fetch lifecycle hooks (serverGroupName: {}, arn: {})", + autoScalingGroup.getAutoScalingGroupName(), + autoScalingGroup.getAutoScalingGroupARN(), + e); + + event + .getTask() + .updateStatus( + PHASE, + String.format( + "Skipping explicit instance termination, unable to fetch lifecycle hooks (reason: '%s')", + e.getMessage())); + return; + } + + terminateInstancesInAutoScalingGroup( + event.getTask(), event.getAmazonEC2(), event.getAutoScalingGroup()); + } + + private static List fetchTerminatingLifecycleHooks( + AmazonAutoScaling amazonAutoScaling, String serverGroupName) { + DescribeLifecycleHooksRequest request = + new DescribeLifecycleHooksRequest().withAutoScalingGroupName(serverGroupName); + + return amazonAutoScaling.describeLifecycleHooks(request).getLifecycleHooks().stream() + .filter( + h -> + "autoscaling:EC2_INSTANCE_TERMINATING".equalsIgnoreCase(h.getLifecycleTransition())) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicator.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicator.java new file mode 100644 index 00000000000..43454d2c13f --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicator.java @@ -0,0 +1,70 @@ +package com.netflix.spinnaker.clouddriver.aws.health; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.ec2.AmazonEC2; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.core.AccountHealthIndicator; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class AmazonHealthIndicator extends AccountHealthIndicator { + + private static final String ID = "aws"; + private final CredentialsRepository credentialsRepository; + private final AmazonClientProvider amazonClientProvider; + private final AwsConfigurationProperties awsConfigurationProperties; + + public AmazonHealthIndicator( + Registry registry, + CredentialsRepository credentialsRepository, + AmazonClientProvider amazonClientProvider, + AwsConfigurationProperties awsConfigurationProperties) { + super(ID, registry); + this.credentialsRepository = credentialsRepository; + this.amazonClientProvider = amazonClientProvider; + this.awsConfigurationProperties = awsConfigurationProperties; + } + + @Override + protected Iterable getAccounts() { + return credentialsRepository.getAll(); + } + + @Override + protected Optional accountHealth(NetflixAmazonCredentials account) { + if (awsConfigurationProperties.getHealth().getVerifyAccountHealth()) { + log.info( + "aws.health.verifyAccountHealth flag is enabled - verifying connection to the EC2 accounts"); + try { + AmazonEC2 ec2 = + amazonClientProvider.getAmazonEC2(account, AmazonClientProvider.DEFAULT_REGION, true); + if (ec2 == null) { + return Optional.of( + String.format("Could not create Amazon client for '%s'", account.getName())); + } + + ec2.describeAccountAttributes(); + + } catch (AmazonServiceException e) { + String errorCode = e.getErrorCode(); + + if (!"RequestLimitExceeded".equalsIgnoreCase(errorCode)) { + return Optional.of( + String.format( + "Failed to describe account attributes for '%s'. Message: '%s'", + account.getName(), e.getMessage())); + } + } + } else { + log.info( + "aws.health.verifyAccountHealth flag is disabled - not verifying connection to the EC2 accounts"); + } + + return Optional.empty(); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/ARN.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/ARN.java new file mode 100644 index 00000000000..2dca865129f --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/ARN.java @@ -0,0 +1,75 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.lifecycle; + +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import java.util.Collection; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +class ARN { + + static final Pattern PATTERN = Pattern.compile("arn:aws(?:-cn|-us-gov)?:.*:(.*):(\\d+):(.*)"); + + String arn; + String region; + String name; + + NetflixAmazonCredentials account; + + ARN(Collection accountCredentials, String arn) { + this.arn = arn; + + Matcher sqsMatcher = PATTERN.matcher(arn); + if (!sqsMatcher.matches()) { + throw new IllegalArgumentException(arn + " is not a valid SNS or SQS ARN"); + } + + this.region = sqsMatcher.group(1); + this.name = sqsMatcher.group(3); + + String accountId = sqsMatcher.group(2); + this.account = + (NetflixAmazonCredentials) + accountCredentials.stream() + .filter(c -> accountId.equals(c.getAccountId())) + .findFirst() + .orElseThrow( + () -> + new IllegalArgumentException( + "No account credentials found for " + accountId)); + } + + ARN(NetflixAmazonCredentials netflixAmazonCredentials, String arn) { + this.arn = arn; + + Matcher sqsMatcher = PATTERN.matcher(arn); + if (!sqsMatcher.matches()) { + throw new IllegalArgumentException(arn + " is not a valid SNS or SQS ARN"); + } + + this.region = sqsMatcher.group(1); + this.name = sqsMatcher.group(3); + + String accountId = sqsMatcher.group(2); + if (accountId.equals(netflixAmazonCredentials.getAccountId())) { + this.account = netflixAmazonCredentials; + } else { + throw new IllegalArgumentException("No account credentials found for " + accountId); + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorker.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorker.java new file mode 100644 index 00000000000..0e05983fcc0 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorker.java @@ -0,0 +1,391 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.lifecycle; + +import com.amazonaws.auth.policy.Condition; +import com.amazonaws.auth.policy.Policy; +import com.amazonaws.auth.policy.Principal; +import com.amazonaws.auth.policy.Resource; +import com.amazonaws.auth.policy.Statement; +import com.amazonaws.auth.policy.Statement.Effect; +import com.amazonaws.auth.policy.actions.SNSActions; +import com.amazonaws.auth.policy.actions.SQSActions; +import com.amazonaws.services.sns.AmazonSNS; +import com.amazonaws.services.sns.model.SetTopicAttributesRequest; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.model.Message; +import com.amazonaws.services.sqs.model.ReceiptHandleIsInvalidException; +import com.amazonaws.services.sqs.model.ReceiveMessageRequest; +import com.amazonaws.services.sqs.model.ReceiveMessageResult; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.frigga.Names; +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.discovery.AwsEurekaSupport; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials.LifecycleHook; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.eureka.api.Eureka; +import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport.DiscoveryStatus; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException; +import java.io.IOException; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import javax.inject.Provider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.http.HttpStatus; + +public class InstanceTerminationLifecycleWorker implements Runnable { + + private static final Logger log = + LoggerFactory.getLogger(InstanceTerminationLifecycleWorker.class); + + private static final int AWS_MAX_NUMBER_OF_MESSAGES = 10; + private static final String SUPPORTED_LIFECYCLE_TRANSITION = + "autoscaling:EC2_INSTANCE_TERMINATING"; + + ObjectMapper objectMapper; + AmazonClientProvider amazonClientProvider; + CredentialsRepository credentialsRepository; + InstanceTerminationConfigurationProperties properties; + Provider discoverySupport; + Registry registry; + + private final ARN queueARN; + private final ARN topicARN; + + private String queueId = null; + + public InstanceTerminationLifecycleWorker( + ObjectMapper objectMapper, + AmazonClientProvider amazonClientProvider, + CredentialsRepository credentialsRepository, + InstanceTerminationConfigurationProperties properties, + Provider discoverySupport, + Registry registry) { + this.objectMapper = objectMapper; + this.amazonClientProvider = amazonClientProvider; + this.credentialsRepository = credentialsRepository; + this.properties = properties; + this.discoverySupport = discoverySupport; + this.registry = registry; + + Set accountCredentials = credentialsRepository.getAll(); + this.queueARN = new ARN(accountCredentials, properties.getQueueARN()); + this.topicARN = new ARN(accountCredentials, properties.getTopicARN()); + } + + public String getWorkerName() { + return queueARN.account.getName() + + "/" + + queueARN.region + + "/" + + InstanceTerminationLifecycleWorker.class.getSimpleName(); + } + + @Override + public void run() { + log.info("Starting " + getWorkerName()); + + while (true) { + try { + listenForMessages(); + } catch (Throwable e) { + log.error("Unexpected error running " + getWorkerName() + ", restarting", e); + } + } + } + + private void listenForMessages() { + AmazonSQS amazonSQS = amazonClientProvider.getAmazonSQS(queueARN.account, queueARN.region); + AmazonSNS amazonSNS = amazonClientProvider.getAmazonSNS(topicARN.account, topicARN.region); + + Set accountCredentials = credentialsRepository.getAll(); + List allAccountIds = getAllAccountIds(accountCredentials); + + this.queueId = + ensureQueueExists( + amazonSQS, + queueARN, + topicARN, + getSourceRoleArns(accountCredentials), + properties.getSqsMessageRetentionPeriodSeconds()); + ensureTopicExists(amazonSNS, topicARN, allAccountIds, queueARN); + + while (true) { + ReceiveMessageResult receiveMessageResult = + amazonSQS.receiveMessage( + new ReceiveMessageRequest(queueId) + .withMaxNumberOfMessages(AWS_MAX_NUMBER_OF_MESSAGES) + .withVisibilityTimeout(properties.getVisibilityTimeout()) + .withWaitTimeSeconds(properties.getWaitTimeSeconds())); + + if (receiveMessageResult.getMessages().isEmpty()) { + // No messages + continue; + } + + receiveMessageResult + .getMessages() + .forEach( + message -> { + LifecycleMessage lifecycleMessage = unmarshalLifecycleMessage(message.getBody()); + + if (lifecycleMessage != null) { + if (!SUPPORTED_LIFECYCLE_TRANSITION.equalsIgnoreCase( + lifecycleMessage.lifecycleTransition)) { + log.info( + "Ignoring unsupported lifecycle transition: " + + lifecycleMessage.lifecycleTransition); + deleteMessage(amazonSQS, queueId, message); + return; + } + handleMessage(lifecycleMessage); + } + + deleteMessage(amazonSQS, queueId, message); + registry.counter(getProcessedMetricId(queueARN.region)).increment(); + }); + } + } + + private LifecycleMessage unmarshalLifecycleMessage(String messageBody) { + String body = messageBody; + try { + NotificationMessageWrapper wrapper = + objectMapper.readValue(messageBody, NotificationMessageWrapper.class); + if (wrapper != null && wrapper.message != null) { + body = wrapper.message; + } + } catch (IOException e) { + // Try to unwrap a notification message; if that doesn't work, + // assume that we're dealing with a message directly from SQS. + log.debug( + "Unable unmarshal NotificationMessageWrapper. Assuming SQS message. (body: {})", + messageBody, + e); + } + + LifecycleMessage lifecycleMessage = null; + try { + lifecycleMessage = objectMapper.readValue(body, LifecycleMessage.class); + } catch (IOException e) { + log.error("Unable to unmarshal LifecycleMessage (body: {})", body, e); + } + + return lifecycleMessage; + } + + private void handleMessage(LifecycleMessage message) { + NetflixAmazonCredentials credentials = getAccountCredentialsById(message.accountId); + if (credentials == null) { + log.error("Unable to find credentials for account id: {}", message.accountId); + return; + } + + Names names = Names.parseName(message.autoScalingGroupName); + Eureka eureka = discoverySupport.get().getEureka(credentials, queueARN.region); + + if (!updateInstanceStatus(eureka, names.getApp(), message.ec2InstanceId)) { + registry.counter(getFailedMetricId(queueARN.region)).increment(); + } + recordLag( + message.time, + queueARN.region, + message.accountId, + message.autoScalingGroupName, + message.ec2InstanceId); + } + + private boolean updateInstanceStatus(Eureka eureka, String app, String instanceId) { + int retry = 0; + while (retry < properties.getEurekaUpdateStatusRetryMax()) { + retry++; + try { + eureka.updateInstanceStatus(app, instanceId, DiscoveryStatus.OUT_OF_SERVICE.getValue()); + return true; + } catch (SpinnakerServerException e) { + final String recoverableMessage = + "Failed marking app out of service (status: {}, app: {}, instance: {}, retry: {})"; + if (e instanceof SpinnakerHttpException + && HttpStatus.NOT_FOUND.value() == ((SpinnakerHttpException) e).getResponseCode()) { + log.warn(recoverableMessage, 404, app, instanceId, retry); + } else if (e instanceof SpinnakerNetworkException) { + log.error(recoverableMessage, "none", app, instanceId, retry, e); + } else { + log.error( + "Irrecoverable error while marking app out of service (app: {}, instance: {}, retry: {})", + app, + instanceId, + retry, + e); + break; + } + } + } + + return false; + } + + private static void deleteMessage(AmazonSQS amazonSQS, String queueUrl, Message message) { + try { + amazonSQS.deleteMessage(queueUrl, message.getReceiptHandle()); + } catch (ReceiptHandleIsInvalidException e) { + log.warn( + "Error deleting lifecycle message, reason: {} (receiptHandle: {})", + e.getMessage(), + message.getReceiptHandle()); + } + } + + private NetflixAmazonCredentials getAccountCredentialsById(String accountId) { + for (NetflixAmazonCredentials credentials : credentialsRepository.getAll()) { + if (credentials.getAccountId() != null && credentials.getAccountId().equals(accountId)) { + return credentials; + } + } + return null; + } + + private static String ensureTopicExists( + AmazonSNS amazonSNS, ARN topicARN, List allAccountIds, ARN queueARN) { + topicARN.arn = amazonSNS.createTopic(topicARN.name).getTopicArn(); + + amazonSNS.setTopicAttributes( + new SetTopicAttributesRequest() + .withTopicArn(topicARN.arn) + .withAttributeName("Policy") + .withAttributeValue(buildSNSPolicy(topicARN, allAccountIds).toJson())); + + amazonSNS.subscribe(topicARN.arn, "sqs", queueARN.arn); + + return topicARN.arn; + } + + private static Policy buildSNSPolicy(ARN topicARN, List allAccountIds) { + Statement statement = new Statement(Statement.Effect.Allow).withActions(SNSActions.Publish); + statement.setPrincipals( + allAccountIds.stream().map(Principal::new).collect(Collectors.toList())); + statement.setResources(Collections.singletonList(new Resource(topicARN.arn))); + + return new Policy("allow-remote-account-send", Collections.singletonList(statement)); + } + + private static String ensureQueueExists( + AmazonSQS amazonSQS, + ARN queueARN, + ARN topicARN, + Set terminatingRoleArns, + int sqsMessageRetentionPeriodSeconds) { + String queueUrl = amazonSQS.createQueue(queueARN.name).getQueueUrl(); + + HashMap attributes = new HashMap<>(); + attributes.put("Policy", buildSQSPolicy(queueARN, topicARN, terminatingRoleArns).toJson()); + attributes.put("MessageRetentionPeriod", Integer.toString(sqsMessageRetentionPeriodSeconds)); + amazonSQS.setQueueAttributes(queueUrl, attributes); + + return queueUrl; + } + + /** + * This policy allows operators to choose whether or not to have lifecycle hooks to be sent via + * SNS for fanout, or be sent directly to an SQS queue from the autoscaling group. + */ + private static Policy buildSQSPolicy(ARN queue, ARN topic, Set terminatingRoleArns) { + Statement snsStatement = new Statement(Effect.Allow).withActions(SQSActions.SendMessage); + snsStatement.setPrincipals(Principal.All); + snsStatement.setResources(Collections.singletonList(new Resource(queue.arn))); + snsStatement.setConditions( + Collections.singletonList( + new Condition() + .withType("ArnEquals") + .withConditionKey("aws:SourceArn") + .withValues(topic.arn))); + + Statement sqsStatement = + new Statement(Effect.Allow).withActions(SQSActions.SendMessage, SQSActions.GetQueueUrl); + sqsStatement.setPrincipals( + terminatingRoleArns.stream().map(Principal::new).collect(Collectors.toList())); + sqsStatement.setResources(Collections.singletonList(new Resource(queue.arn))); + + return new Policy("allow-sns-or-sqs-send", Arrays.asList(snsStatement, sqsStatement)); + } + + Id getLagMetricId(String region) { + return registry.createId("terminationLifecycle.lag", "region", region); + } + + void recordLag(Date start, String region, String account, String serverGroup, String instanceId) { + if (start != null) { + Long lag = registry.clock().wallTime() - start.getTime(); + log.info( + "Lifecycle message processed (account: {}, serverGroup: {}, instance: {}, lagSeconds: {})", + account, + serverGroup, + instanceId, + Duration.ofMillis(lag).getSeconds()); + registry.gauge(getLagMetricId(region), lag); + } + } + + Id getProcessedMetricId(String region) { + return registry.createId("terminationLifecycle.totalProcessed", "region", region); + } + + Id getFailedMetricId(String region) { + return registry.createId("terminationLifecycle.totalFailed", "region", region); + } + + private static List getAllAccountIds( + Set accountCredentials) { + return accountCredentials.stream() + .map(AccountCredentials::getAccountId) + .filter(a -> a != null) + .collect(Collectors.toList()); + } + + private static Set getSourceRoleArns( + Set allCredentials) { + Set sourceRoleArns = new HashSet<>(); + for (T credentials : allCredentials) { + if (credentials instanceof NetflixAmazonCredentials) { + NetflixAmazonCredentials c = (NetflixAmazonCredentials) credentials; + if (c.getLifecycleHooks() != null) { + sourceRoleArns.addAll( + c.getLifecycleHooks().stream() + .filter( + h -> + "autoscaling:EC2_INSTANCE_TERMINATING".equals(h.getLifecycleTransition())) + .map(LifecycleHook::getRoleARN) + .collect(Collectors.toSet())); + } + } + } + return sourceRoleArns; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerProvider.java new file mode 100644 index 00000000000..ea2b63f4b4e --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerProvider.java @@ -0,0 +1,117 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.lifecycle; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.discovery.AwsEurekaSupport; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.regex.Pattern; +import javax.annotation.PostConstruct; +import javax.inject.Provider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.context.annotation.DependsOn; +import org.springframework.stereotype.Component; + +@Component +@DependsOn("amazonCredentialsLoader") +@ConditionalOnExpression( + "${aws.lifecycle-subscribers.instance-termination.enabled:false} && ${caching.write-enabled:true}") +public class InstanceTerminationLifecycleWorkerProvider { + private static final String REGION_TEMPLATE_PATTERN = Pattern.quote("{{region}}"); + private static final String ACCOUNT_ID_TEMPLATE_PATTERN = Pattern.quote("{{accountId}}"); + + private static final Logger log = + LoggerFactory.getLogger(InstanceTerminationLifecycleWorkerProvider.class); + + private final ObjectMapper objectMapper; + private final AmazonClientProvider amazonClientProvider; + private final CredentialsRepository credentialsRepository; + private final InstanceTerminationConfigurationProperties properties; + private final Provider discoverySupport; + private final Registry registry; + + @Autowired + InstanceTerminationLifecycleWorkerProvider( + @Qualifier("amazonObjectMapper") ObjectMapper objectMapper, + AmazonClientProvider amazonClientProvider, + CredentialsRepository credentialsRepository, + InstanceTerminationConfigurationProperties properties, + Provider discoverySupport, + Registry registry) { + this.objectMapper = objectMapper; + this.amazonClientProvider = amazonClientProvider; + this.credentialsRepository = credentialsRepository; + this.properties = properties; + this.discoverySupport = discoverySupport; + this.registry = registry; + } + + @PostConstruct + public void start() { + NetflixAmazonCredentials credentials = + credentialsRepository.getOne(properties.getAccountName()); + ExecutorService executorService = + Executors.newFixedThreadPool( + credentials.getRegions().size(), + new ThreadFactoryBuilder() + .setNameFormat( + InstanceTerminationLifecycleWorkerProvider.class.getSimpleName() + "-%d") + .build()); + + credentials + .getRegions() + .forEach( + region -> { + InstanceTerminationLifecycleWorker worker = + new InstanceTerminationLifecycleWorker( + objectMapper, + amazonClientProvider, + credentialsRepository, + new InstanceTerminationConfigurationProperties( + properties.getAccountName(), + properties + .getQueueARN() + .replaceAll(REGION_TEMPLATE_PATTERN, region.getName()) + .replaceAll(ACCOUNT_ID_TEMPLATE_PATTERN, credentials.getAccountId()), + properties + .getTopicARN() + .replaceAll(REGION_TEMPLATE_PATTERN, region.getName()) + .replaceAll(ACCOUNT_ID_TEMPLATE_PATTERN, credentials.getAccountId()), + properties.getVisibilityTimeout(), + properties.getWaitTimeSeconds(), + properties.getSqsMessageRetentionPeriodSeconds(), + properties.getEurekaUpdateStatusRetryMax()), + discoverySupport, + registry); + try { + executorService.submit(worker); + } catch (RejectedExecutionException e) { + log.error("Could not start " + worker.getWorkerName(), e); + } + }); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgent.java new file mode 100644 index 00000000000..c5f3117d63e --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgent.java @@ -0,0 +1,272 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.lifecycle; + +import com.amazonaws.auth.policy.Condition; +import com.amazonaws.auth.policy.Policy; +import com.amazonaws.auth.policy.Principal; +import com.amazonaws.auth.policy.Resource; +import com.amazonaws.auth.policy.Statement; +import com.amazonaws.auth.policy.actions.SNSActions; +import com.amazonaws.auth.policy.actions.SQSActions; +import com.amazonaws.services.sns.AmazonSNS; +import com.amazonaws.services.sns.model.SetTopicAttributesRequest; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.model.Message; +import com.amazonaws.services.sqs.model.ReceiptHandleIsInvalidException; +import com.amazonaws.services.sqs.model.ReceiveMessageRequest; +import com.amazonaws.services.sqs.model.ReceiveMessageResult; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.RunnableAgent; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.tags.EntityTagger; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.regex.Matcher; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An Agent that subscribes to a particular SQS queue and tags any server groups that had launch + * errors. + */ +class LaunchFailureNotificationAgent implements RunnableAgent, CustomScheduledAgent { + private static final Logger log = LoggerFactory.getLogger(LaunchFailureNotificationAgent.class); + + private static final String SUPPORTED_LIFECYCLE_TRANSITION = + "autoscaling:EC2_INSTANCE_LAUNCH_ERROR"; + private static final int AWS_MAX_NUMBER_OF_MESSAGES = 10; + + private final ObjectMapper objectMapper; + private final AmazonClientProvider amazonClientProvider; + private final CredentialsRepository credentialsRepository; + private final LaunchFailureConfigurationProperties properties; + private final EntityTagger serverGroupTagger; + + private final ARN topicARN; + private final ARN queueARN; + + private String topicId = null; // the ARN for the topic + private String queueId = null; // the URL for the queue + + LaunchFailureNotificationAgent( + ObjectMapper objectMapper, + AmazonClientProvider amazonClientProvider, + NetflixAmazonCredentials netflixAmazonCredentials, + CredentialsRepository credentialsRepository, + LaunchFailureConfigurationProperties properties, + EntityTagger serverGroupTagger) { + this.objectMapper = objectMapper; + this.amazonClientProvider = amazonClientProvider; + this.credentialsRepository = credentialsRepository; + this.properties = properties; + this.serverGroupTagger = serverGroupTagger; + + this.topicARN = new ARN(netflixAmazonCredentials, properties.getTopicARN()); + this.queueARN = new ARN(netflixAmazonCredentials, properties.getQueueARN()); + } + + @Override + public String getAgentType() { + return queueARN.account.getName() + + "/" + + queueARN.region + + "/" + + LaunchFailureNotificationAgent.class.getSimpleName(); + } + + @Override + public String getProviderName() { + return AwsProvider.PROVIDER_NAME; + } + + @Override + public long getPollIntervalMillis() { + return TimeUnit.MINUTES.toMillis(1); + } + + @Override + public long getTimeoutMillis() { + return -1; + } + + @Override + public void run() { + List allAccountIds = + credentialsRepository.getAll().stream() + .map(AccountCredentials::getAccountId) + .collect(Collectors.toList()); + + AmazonSQS amazonSQS = amazonClientProvider.getAmazonSQS(queueARN.account, queueARN.region); + this.queueId = ensureQueueExists(amazonSQS, queueARN, topicARN); + + AmazonSNS amazonSNS = amazonClientProvider.getAmazonSNS(topicARN.account, topicARN.region); + this.topicId = ensureTopicExists(amazonSNS, topicARN, allAccountIds, queueARN); + + AtomicInteger messagesProcessed = new AtomicInteger(0); + while (messagesProcessed.get() < properties.getMaxMessagesPerCycle()) { + ReceiveMessageResult receiveMessageResult = + amazonSQS.receiveMessage( + new ReceiveMessageRequest(queueId) + .withMaxNumberOfMessages(AWS_MAX_NUMBER_OF_MESSAGES) + .withVisibilityTimeout(properties.getVisibilityTimeout()) + .withWaitTimeSeconds(properties.getWaitTimeSeconds())); + + receiveMessageResult + .getMessages() + .forEach( + message -> { + try { + NotificationMessageWrapper notificationMessageWrapper = + objectMapper.readValue(message.getBody(), NotificationMessageWrapper.class); + + NotificationMessage notificationMessage = + objectMapper.readValue( + notificationMessageWrapper.message, NotificationMessage.class); + + if (SUPPORTED_LIFECYCLE_TRANSITION.equalsIgnoreCase(notificationMessage.event)) { + handleMessage(serverGroupTagger, notificationMessage); + } + } catch (IOException e) { + log.error( + "Unable to convert NotificationMessage (body: {})", message.getBody(), e); + } + + deleteMessage(amazonSQS, queueId, message); + messagesProcessed.incrementAndGet(); + }); + + if (receiveMessageResult.getMessages().isEmpty()) { + // no messages received, stop polling. + break; + } + } + + log.info("Processed {} messages (queueARN: {})", messagesProcessed.get(), queueARN.arn); + } + + private static void handleMessage( + EntityTagger serverGroupTagger, NotificationMessage notificationMessage) { + log.info( + "Failed to launch instance (asgName: {}, reason: {})", + notificationMessage.autoScalingGroupName, + notificationMessage.statusMessage); + + Matcher sqsMatcher = ARN.PATTERN.matcher(notificationMessage.autoScalingGroupARN); + if (!sqsMatcher.matches()) { + throw new IllegalArgumentException( + notificationMessage.autoScalingGroupARN + " is not a valid ARN"); + } + + String region = sqsMatcher.group(1); + String accountId = sqsMatcher.group(2); + + serverGroupTagger.alert( + AmazonCloudProvider.ID, + accountId, + region, + null, // no category + EntityTagger.ENTITY_TYPE_SERVER_GROUP, + notificationMessage.autoScalingGroupName, + notificationMessage.event, + notificationMessage.statusMessage, + null // no last modified timestamp + ); + } + + /** + * Ensure that the topic exists and has a policy granting all accounts permission to publish + * messages to it + */ + private static String ensureTopicExists( + AmazonSNS amazonSNS, ARN topicARN, List allAccountIds, ARN queueARN) { + topicARN.arn = amazonSNS.createTopic(topicARN.name).getTopicArn(); + + amazonSNS.setTopicAttributes( + new SetTopicAttributesRequest() + .withTopicArn(topicARN.arn) + .withAttributeName("Policy") + .withAttributeValue(buildSNSPolicy(topicARN, allAccountIds).toJson())); + + amazonSNS.subscribe(topicARN.arn, "sqs", queueARN.arn); + + return topicARN.arn; + } + + /** + * Ensure that the queue exists and has a policy granting the source topic permission to send + * messages to it + */ + private static String ensureQueueExists(AmazonSQS amazonSQS, ARN queueARN, ARN topicARN) { + String queueUrl; + + try { + queueUrl = amazonSQS.getQueueUrl(queueARN.name).getQueueUrl(); + } catch (Exception e) { + queueUrl = amazonSQS.createQueue(queueARN.name).getQueueUrl(); + } + + amazonSQS.setQueueAttributes( + queueUrl, Collections.singletonMap("Policy", buildSQSPolicy(queueARN, topicARN).toJson())); + + return queueUrl; + } + + private static Policy buildSNSPolicy(ARN topicARN, List allAccountIds) { + Statement statement = new Statement(Statement.Effect.Allow).withActions(SNSActions.Publish); + statement.setPrincipals( + allAccountIds.stream().map(Principal::new).collect(Collectors.toList())); + statement.setResources(Collections.singletonList(new Resource(topicARN.arn))); + + return new Policy("allow-remote-account-send", Collections.singletonList(statement)); + } + + private static Policy buildSQSPolicy(ARN queue, ARN topic) { + Statement statement = new Statement(Statement.Effect.Allow).withActions(SQSActions.SendMessage); + statement.setPrincipals(Principal.All); + statement.setResources(Collections.singletonList(new Resource(queue.arn))); + statement.setConditions( + Collections.singletonList( + new Condition() + .withType("ArnEquals") + .withConditionKey("aws:SourceArn") + .withValues(topic.arn))); + + return new Policy("allow-sns-topic-send", Collections.singletonList(statement)); + } + + private static void deleteMessage(AmazonSQS amazonSQS, String queueUrl, Message message) { + try { + amazonSQS.deleteMessage(queueUrl, message.getReceiptHandle()); + } catch (ReceiptHandleIsInvalidException e) { + log.warn( + "Error deleting lifecycle message, reason: {} (receiptHandle: {})", + e.getMessage(), + message.getReceiptHandle()); + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProvider.java new file mode 100644 index 00000000000..de98ad6e060 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProvider.java @@ -0,0 +1,104 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.lifecycle; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.tags.EntityTagger; +import com.netflix.spinnaker.credentials.Credentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public class LaunchFailureNotificationAgentProvider implements AgentProvider { + private static final String REGION_TEMPLATE_PATTERN = Pattern.quote("{{region}}"); + private static final String ACCOUNT_ID_TEMPLATE_PATTERN = Pattern.quote("{{accountId}}"); + + private final ObjectMapper objectMapper; + private final AmazonClientProvider amazonClientProvider; + private final CredentialsRepository credentialsRepository; + private final LaunchFailureConfigurationProperties properties; + private final EntityTagger entityTagger; + + LaunchFailureNotificationAgentProvider( + ObjectMapper objectMapper, + AmazonClientProvider amazonClientProvider, + CredentialsRepository credentialsRepository, + LaunchFailureConfigurationProperties properties, + EntityTagger entityTagger) { + this.objectMapper = objectMapper; + this.amazonClientProvider = amazonClientProvider; + this.credentialsRepository = credentialsRepository; + this.properties = properties; + this.entityTagger = entityTagger; + } + + @Override + public boolean supports(String providerName) { + return providerName.equalsIgnoreCase(AwsProvider.PROVIDER_NAME); + } + + @Override + public Collection agents(Credentials credentials) { + NetflixAmazonCredentials netflixAmazonCredentials = (NetflixAmazonCredentials) credentials; + + if (!credentials.getName().equals(properties.getAccountName())) { + // LaunchFailureNotificationAgent only supports the account specified in + // `properties.getAccountName()` + return Collections.emptyList(); + } + + // an agent for each region in the specified account + List agents = + netflixAmazonCredentials.getRegions().stream() + .map( + region -> + new LaunchFailureNotificationAgent( + objectMapper, + amazonClientProvider, + netflixAmazonCredentials, + credentialsRepository, + new LaunchFailureConfigurationProperties( + properties.getAccountName(), + properties + .getTopicARN() + .replaceAll(REGION_TEMPLATE_PATTERN, region.getName()) + .replaceAll( + ACCOUNT_ID_TEMPLATE_PATTERN, + netflixAmazonCredentials.getAccountId()), + properties + .getQueueARN() + .replaceAll(REGION_TEMPLATE_PATTERN, region.getName()) + .replaceAll( + ACCOUNT_ID_TEMPLATE_PATTERN, + netflixAmazonCredentials.getAccountId()), + properties.getMaxMessagesPerCycle(), + properties.getVisibilityTimeout(), + properties.getWaitTimeSeconds()), + entityTagger)) + .collect(Collectors.toList()); + + return agents; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgent.java new file mode 100644 index 00000000000..9f0021b674f --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgent.java @@ -0,0 +1,183 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.lifecycle; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.model.Activity; +import com.amazonaws.services.autoscaling.model.DescribeScalingActivitiesRequest; +import com.amazonaws.services.autoscaling.model.DescribeScalingActivitiesResult; +import com.amazonaws.services.autoscaling.model.ScalingActivityStatusCode; +import com.netflix.spinnaker.cats.agent.RunnableAgent; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; +import com.netflix.spinnaker.clouddriver.model.EntityTags; +import com.netflix.spinnaker.clouddriver.tags.EntityTagger; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.UndeclaredThrowableException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class LaunchFailureNotificationCleanupAgent implements RunnableAgent, CustomScheduledAgent { + private static final Logger log = LoggerFactory.getLogger(LaunchFailureNotificationAgent.class); + + private static final String TAG_NAME = "spinnaker_ui_alert:autoscaling:ec2_instance_launch_error"; + private static final int MAX_RESULTS = 10000; + + private final AmazonClientProvider amazonClientProvider; + private final CredentialsRepository credentialsRepository; + private final EntityTagger serverGroupTagger; + + LaunchFailureNotificationCleanupAgent( + AmazonClientProvider amazonClientProvider, + CredentialsRepository credentialsRepository, + EntityTagger serverGroupTagger) { + this.amazonClientProvider = amazonClientProvider; + this.credentialsRepository = credentialsRepository; + this.serverGroupTagger = serverGroupTagger; + } + + @Override + public String getAgentType() { + return LaunchFailureNotificationCleanupAgent.class.getSimpleName(); + } + + @Override + public String getProviderName() { + return AwsProvider.PROVIDER_NAME; + } + + @Override + public long getPollIntervalMillis() { + return TimeUnit.MINUTES.toMillis(5); + } + + @Override + public long getTimeoutMillis() { + return -1; + } + + @Override + public void run() { + Collection taggedEntities = + serverGroupTagger.taggedEntities( + AmazonCloudProvider.ID, + null, // all accounts + EntityTagger.ENTITY_TYPE_SERVER_GROUP, + TAG_NAME, + MAX_RESULTS); + + taggedEntities.forEach( + entityTags -> { + EntityTags.EntityRef entityRef = entityTags.getEntityRef(); + Optional credentials = + Optional.ofNullable(credentialsRepository.getOne(entityRef.getAccount())) + .map(NetflixAmazonCredentials.class::cast); + + if (!credentials.isPresent()) { + log.warn( + "No account configuration for {}. Unable to determine if '{}' has launch failures", + entityRef.getAccount(), + entityTags.getId()); + return; + } + + AmazonAutoScaling amazonAutoScaling = + amazonClientProvider.getAutoScaling(credentials.get(), entityRef.getRegion()); + + try { + if (hasLaunchFailures(amazonAutoScaling, entityTags)) { + return; + } + + serverGroupTagger.delete( + AmazonCloudProvider.ID, + entityRef.getAccountId(), + entityRef.getRegion(), + EntityTagger.ENTITY_TYPE_SERVER_GROUP, + entityRef.getEntityId(), + TAG_NAME); + } catch (Exception e) { + log.error("Unable to determine if '{}' has launch failures", entityTags.getId(), e); + } + }); + } + + /** + * Fetch scaling activities and determine if the most recent activity was successful. + * + *

A successful scaling activity is sufficient to indicate that a server group is no longer + * having launch failures. + */ + protected boolean hasLaunchFailures(AmazonAutoScaling amazonAutoScaling, EntityTags entityTags) { + EntityTags.EntityRef entityRef = entityTags.getEntityRef(); + + try { + DescribeScalingActivitiesResult describeScalingActivitiesResult = + amazonAutoScaling.describeScalingActivities( + new DescribeScalingActivitiesRequest() + .withAutoScalingGroupName(entityRef.getEntityId())); + + List activities = describeScalingActivitiesResult.getActivities(); + return !activities.isEmpty() + && !activities + .get(0) + .getStatusCode() + .equals(ScalingActivityStatusCode.Successful.toString()); + } catch (Exception e) { + AmazonServiceException amazonServiceException = amazonServiceException(e); + if (amazonServiceException != null) { + if (amazonServiceException.getErrorMessage().toLowerCase().contains("name not found")) { + return false; + } + } + + throw e; + } + } + + private static AmazonServiceException amazonServiceException(Exception e) { + if (e instanceof AmazonServiceException) { + return (AmazonServiceException) e; + } + + if (!(e instanceof UndeclaredThrowableException)) { + return null; + } + + UndeclaredThrowableException ute = (UndeclaredThrowableException) e; + + if (!(ute.getUndeclaredThrowable() instanceof InvocationTargetException)) { + return null; + } + + InvocationTargetException ite = (InvocationTargetException) ute.getUndeclaredThrowable(); + if (!(ite.getTargetException() instanceof AmazonServiceException)) { + return null; + } + + return (AmazonServiceException) ite.getTargetException(); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleMessage.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleMessage.java similarity index 99% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleMessage.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleMessage.java index db2fd27a287..d86d5397345 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleMessage.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleMessage.java @@ -17,7 +17,6 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; - import java.util.Date; @JsonIgnoreProperties(ignoreUnknown = true) diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleSubscriberConfiguration.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleSubscriberConfiguration.java new file mode 100644 index 00000000000..4c56554ad8d --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/LifecycleSubscriberConfiguration.java @@ -0,0 +1,57 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.lifecycle; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.tags.EntityTagger; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.ArrayList; +import java.util.Collections; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@EnableConfigurationProperties({ + LaunchFailureConfigurationProperties.class, + InstanceTerminationConfigurationProperties.class +}) +class LifecycleSubscriberConfiguration { + + @Bean + @ConditionalOnProperty("aws.lifecycle-subscribers.launch-failure.enabled") + LaunchFailureNotificationAgentProvider launchFailureNotificationAgentProvider( + @Qualifier("amazonObjectMapper") ObjectMapper objectMapper, + AmazonClientProvider amazonClientProvider, + CredentialsRepository credentialsRepository, + LaunchFailureConfigurationProperties properties, + EntityTagger entityTagger, + AwsProvider awsProvider) { + awsProvider.addAgents( + new ArrayList<>( + Collections.singletonList( + new LaunchFailureNotificationCleanupAgent( + amazonClientProvider, credentialsRepository, entityTagger)))); + return new LaunchFailureNotificationAgentProvider( + objectMapper, amazonClientProvider, credentialsRepository, properties, entityTagger); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessage.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessage.java similarity index 99% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessage.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessage.java index c3dd3d157a8..83365bee8f6 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessage.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessage.java @@ -18,7 +18,6 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; - import java.util.Map; @JsonIgnoreProperties(ignoreUnknown = true) diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessageWrapper.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessageWrapper.java similarity index 100% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessageWrapper.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/lifecycle/NotificationMessageWrapper.java diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationChangeSet.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationChangeSet.java new file mode 100644 index 00000000000..64da278fe7f --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationChangeSet.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019 Adevinta. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.model; + +import com.amazonaws.services.cloudformation.model.Change; +import com.fasterxml.jackson.annotation.JsonInclude; +import java.util.List; + +@JsonInclude(JsonInclude.Include.NON_EMPTY) +public class AmazonCloudFormationChangeSet { + + private String name; + private String status; + private String statusReason; + private List changes; + + public String getName() { + return name; + } + + public String getStatus() { + return status; + } + + public String getStatusReason() { + return statusReason; + } + + public List getChanges() { + return changes; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationStack.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationStack.java new file mode 100644 index 00000000000..2185ade3a56 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationStack.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; + +@JsonInclude(JsonInclude.Include.NON_EMPTY) +public class AmazonCloudFormationStack { + final String type = "aws"; + private String stackId; + private Map tags; + private Map outputs; + private String stackName; + private String region; + private String stackStatus; + private String stackStatusReason; + private String accountName; + private String accountId; + private List changeSets; + private Date creationTime; + + public String getStackId() { + return stackId; + } + + public Map getTags() { + return tags; + } + + public Map getOutputs() { + return outputs; + } + + public String getStackName() { + return stackName; + } + + public String getRegion() { + return region; + } + + public String getAccountName() { + return accountName; + } + + public String getAccountId() { + return accountId; + } + + public String getStackStatus() { + return stackStatus; + } + + public String getStackStatusReason() { + return stackStatusReason; + } + + public List getChangeSets() { + return (changeSets == null) ? null : new ArrayList<>(changeSets); + } + + public Date getCreationTime() { + return creationTime; + } + + @Override + public boolean equals(Object cf) { + if (cf instanceof AmazonCloudFormationStack) { + return stackId.equals(((AmazonCloudFormationStack) cf).stackId); + } else { + return false; + } + } + + @Override + public int hashCode() { + return stackId.hashCode(); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonImage.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonImage.java similarity index 94% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonImage.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonImage.java index ee5741dbc64..90bbf77bc08 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonImage.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonImage.java @@ -18,11 +18,10 @@ import com.fasterxml.jackson.annotation.JsonUnwrapped; import com.netflix.spinnaker.clouddriver.model.Image; -import lombok.Data; -import lombok.NoArgsConstructor; - import java.util.ArrayList; import java.util.List; +import lombok.Data; +import lombok.NoArgsConstructor; @Data @NoArgsConstructor @@ -31,8 +30,7 @@ public class AmazonImage implements Image { String region; List serverGroups = new ArrayList<>(); - @JsonUnwrapped - com.amazonaws.services.ec2.model.Image image; + @JsonUnwrapped com.amazonaws.services.ec2.model.Image image; public String getName() { return image.getName(); diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonLoadBalancerType.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonLoadBalancerType.java similarity index 95% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonLoadBalancerType.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonLoadBalancerType.java index 0b5ccbd6487..7c10da26925 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonLoadBalancerType.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/AmazonLoadBalancerType.java @@ -22,7 +22,7 @@ public enum AmazonLoadBalancerType { NETWORK; public static AmazonLoadBalancerType getByValue(String value) { - for(AmazonLoadBalancerType lbt: values()) { + for (AmazonLoadBalancerType lbt : values()) { if (lbt.toString().equals(value)) { return lbt; } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/Role.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/Role.java similarity index 99% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/Role.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/Role.java index 4db8c1ff782..e450b991c40 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/Role.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/Role.java @@ -21,6 +21,8 @@ public interface Role { String getName(); + String getId(); + Collection getTrustRelationships(); } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/RoleProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/RoleProvider.java similarity index 100% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/RoleProvider.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/RoleProvider.java diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/TargetGroupServerGroupProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/TargetGroupServerGroupProvider.java similarity index 81% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/TargetGroupServerGroupProvider.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/TargetGroupServerGroupProvider.java index 911fd350c08..db42a8affa3 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/TargetGroupServerGroupProvider.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/TargetGroupServerGroupProvider.java @@ -17,12 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.model; import com.netflix.spinnaker.cats.cache.CacheData; - import java.util.Collection; import java.util.Map; public interface TargetGroupServerGroupProvider { - Map getServerGroups(String applicationName, Map allTargetGroups, Collection targetGroupData); - + Map getServerGroups( + String applicationName, + Map allTargetGroups, + Collection targetGroupData); } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/TrustRelationship.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/TrustRelationship.java similarity index 91% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/TrustRelationship.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/TrustRelationship.java index a4853571934..8d05cac0649 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/TrustRelationship.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/TrustRelationship.java @@ -16,9 +16,7 @@ package com.netflix.spinnaker.clouddriver.aws.model; -/** - * A trust relationship allows a user or service to assume that role. - */ +/** A trust relationship allows a user or service to assume that role. */ public interface TrustRelationship { String getType(); diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/ApplicationLoadBalancerAttributes.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/ApplicationLoadBalancerAttributes.java new file mode 100644 index 00000000000..facba72f5b3 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/ApplicationLoadBalancerAttributes.java @@ -0,0 +1,42 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.model.edda; + +import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancerAttribute; +import java.util.List; + +public class ApplicationLoadBalancerAttributes { + + private String loadBalancerArn; + private List attributes; + + public String setLoadBalancerArn() { + return loadBalancerArn; + } + + public void setLoadBalancerArn(String loadBalancerArn) { + this.loadBalancerArn = loadBalancerArn; + } + + public List getAttributes() { + return attributes; + } + + public void setAttributes(List attributes) { + this.attributes = attributes; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/ClassicLoadBalancerAttributes.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/ClassicLoadBalancerAttributes.java new file mode 100644 index 00000000000..73cebea7176 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/ClassicLoadBalancerAttributes.java @@ -0,0 +1,41 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.model.edda; + +import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes; + +public class ClassicLoadBalancerAttributes { + + private String name; + private LoadBalancerAttributes attributes; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public LoadBalancerAttributes getAttributes() { + return attributes; + } + + public void setAttributes(LoadBalancerAttributes attributes) { + this.attributes = attributes; + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/EddaRule.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/EddaRule.java similarity index 99% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/EddaRule.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/EddaRule.java index 89976fbf711..ed8fc0eabe9 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/EddaRule.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/EddaRule.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.aws.model.edda; import com.amazonaws.services.elasticloadbalancingv2.model.Rule; - import java.util.List; public class EddaRule { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupAttributes.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupAttributes.java similarity index 99% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupAttributes.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupAttributes.java index 2d3b01a49ff..78f91a5e116 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupAttributes.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupAttributes.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.aws.model.edda; import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupAttribute; - import java.util.List; public class TargetGroupAttributes { diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupHealth.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupHealth.java similarity index 99% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupHealth.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupHealth.java index 67498321477..de9e61d6748 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupHealth.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/model/edda/TargetGroupHealth.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.aws.model.edda; import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription; - import java.util.List; public class TargetGroupHealth { diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AbstractClusterCleanupAgent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AbstractClusterCleanupAgent.java new file mode 100644 index 00000000000..4dea13aa55c --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AbstractClusterCleanupAgent.java @@ -0,0 +1,92 @@ +package com.netflix.spinnaker.clouddriver.aws.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.*; + +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; + +/** + * A caching agent that handles eviction of clusters that no longer have server groups. + * + *

Clusters exist due to the existence of server groups, but the caching agents that supply + * server groups (can can cause clusters to exist) do not have a global view of the data so they can + * not definitively say a cluster should be removed once there are no server groups in that region. + * + *

This agent just indexes the server groups that exist to find clusters that should be removed + * and causes them to be evicted. + * + *

This class is abstract to allow for an AWS and Titus subclass to handle the differentiation in + * cache key parsing, globbing, and construction but otherwise the logic is the same across both + * providers. + */ +@Slf4j +public abstract class AbstractClusterCleanupAgent implements CachingAgent { + + @Override + public String getAgentType() { + return getCloudProviderId() + "/" + getClass().getSimpleName(); + } + + @Override + public Collection getProvidedDataTypes() { + return Collections.singleton(INFORMATIVE.forType(CLUSTERS.ns)); + } + + protected abstract String getCloudProviderId(); + + @Override + public CacheResult loadData(ProviderCache providerCache) { + final Collection serverGroups = + providerCache.filterIdentifiers( + SERVER_GROUPS.ns, buildMatchAllGlob(getCloudProviderId(), SERVER_GROUPS.ns)); + final Collection clusters = + new HashSet<>( + providerCache.filterIdentifiers( + CLUSTERS.ns, buildMatchAllGlob(getCloudProviderId(), CLUSTERS.ns))); + + for (String sgId : serverGroups) { + final Map parts = parseServerGroupId(sgId); + if (parts != null + && parts.containsKey("cluster") + && parts.containsKey("application") + && parts.containsKey("account")) { + final String clusterId = + buildClusterId(parts.get("cluster"), parts.get("application"), parts.get("account")); + clusters.remove(clusterId); + } + } + + if (clusters.isEmpty()) { + return new DefaultCacheResult(Collections.emptyMap()); + } + + if (log.isDebugEnabled()) { + log.debug( + "Evicting {} clusters. Count: {}, keys: {}", + getCloudProviderId(), + clusters.size(), + clusters); + } else { + log.info("Evicting {} clusters. Count: {}", getCloudProviderId(), clusters.size()); + } + + return new DefaultCacheResult(Collections.emptyMap(), Map.of(CLUSTERS.ns, clusters)); + } + + protected abstract Map parseServerGroupId(String serverGroupId); + + protected abstract String buildClusterId(String cluster, String application, String account); + + protected static String buildMatchAllGlob(String cloudProviderId, String type) { + return cloudProviderId + ":" + type + ":*"; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCachingAgentFilter.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCachingAgentFilter.java new file mode 100644 index 00000000000..07507c61350 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCachingAgentFilter.java @@ -0,0 +1,128 @@ +package com.netflix.spinnaker.clouddriver.aws.provider.agent; + +import java.util.List; +import org.apache.commons.lang3.StringUtils; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.stereotype.Component; + +@Component +@ConfigurationProperties("aws.caching.filter") +public class AmazonCachingAgentFilter { + + List includeTags; + List excludeTags; + + public static class TagFilterOption { + String name; + String value; + + public TagFilterOption() {} + + public TagFilterOption(String name, String value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + } + + public List getIncludeTags() { + return includeTags; + } + + public void setIncludeTags(List includeTags) { + this.includeTags = includeTags; + } + + public List getExcludeTags() { + return excludeTags; + } + + public void setExcludeTags(List excludeTags) { + this.excludeTags = excludeTags; + } + + public boolean hasTagFilter() { + return this.hasIncludeTagFilter() || this.hasExcludeTagFilter(); + } + + public boolean hasIncludeTagFilter() { + return this.includeTags != null && this.includeTags.size() > 0; + } + + public boolean hasExcludeTagFilter() { + return this.excludeTags != null && this.excludeTags.size() > 0; + } + + /** + * ResourceTag is a helper wrapper for AWS resources which are taggable, to convert their specific + * types from the AWS SDK into a generic type for comparison by this agent filter. + */ + public static class ResourceTag { + final String key; + final String value; + + public ResourceTag(String key, String value) { + this.key = key; + this.value = value; + } + } + + /** + * Determine if the resource with the given set of tags should be retained, that is, if the caller + * should discard or keep it based on the include/exclude filters configured. + */ + public boolean shouldRetainResource(List tags) { + + // retain the resource by default if there isn't an include filter setup + boolean retainResource = !this.hasIncludeTagFilter(); + if (tags == null || tags.size() == 0) { + return retainResource; + } + + if (this.hasIncludeTagFilter()) { + retainResource = + this.includeTags.stream() + .anyMatch( + filter -> + tags.stream() + .anyMatch(tag -> tagFilterOptionMatchesResourceTag(filter, tag))); + } + + // exclude takes precedence over include so runs second if the resource is still being retained + if (retainResource && this.hasExcludeTagFilter()) { + retainResource = + this.excludeTags.stream() + .noneMatch( + filter -> + tags.stream() + .anyMatch(tag -> tagFilterOptionMatchesResourceTag(filter, tag))); + } + + return retainResource; + } + + private boolean tagFilterOptionMatchesResourceTag( + TagFilterOption tagFilterOption, ResourceTag resourceTag) { + if (resourceTag.key == null || !resourceTag.key.matches(tagFilterOption.name)) { + return false; + } + + return StringUtils.isEmpty(tagFilterOption.getValue()) + || (resourceTag.value != null && resourceTag.value.matches(tagFilterOption.value)); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCloudFormationCachingAgent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCloudFormationCachingAgent.java new file mode 100644 index 00000000000..5c3a6be6d57 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCloudFormationCachingAgent.java @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.provider.agent; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.STACKS; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND; + +import com.amazonaws.services.cloudformation.AmazonCloudFormation; +import com.amazonaws.services.cloudformation.model.*; +import com.amazonaws.services.cloudformation.model.Stack; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.*; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.cache.Keys; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import java.time.Duration; +import java.util.*; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.springframework.util.CollectionUtils; + +@Slf4j +public class AmazonCloudFormationCachingAgent + implements CachingAgent, OnDemandAgent, AccountAware, AgentIntervalAware { + private final AmazonClientProvider amazonClientProvider; + private final NetflixAmazonCredentials account; + private final String region; + private final OnDemandMetricsSupport metricsSupport; + protected static final String ON_DEMAND_TYPE = "onDemand"; + + static final Set types = + new HashSet<>(Collections.singletonList(AUTHORITATIVE.forType(STACKS.getNs()))); + + public AmazonCloudFormationCachingAgent( + AmazonClientProvider amazonClientProvider, + NetflixAmazonCredentials account, + String region, + Registry registry) { + this.amazonClientProvider = amazonClientProvider; + this.account = account; + this.region = region; + this.metricsSupport = + new OnDemandMetricsSupport( + registry, + this, + String.format("%s:%s", AmazonCloudProvider.ID, OnDemandType.CloudFormation)); + } + + @Override + public String getProviderName() { + return AwsInfrastructureProvider.PROVIDER_NAME; + } + + @Override + public String getOnDemandAgentType() { + return getAgentType(); + } + + @Override + public OnDemandMetricsSupport getMetricsSupport() { + return this.metricsSupport; + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return OnDemandType.CloudFormation.equals(type) && cloudProvider.equals(AmazonCloudProvider.ID); + } + + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + if (shouldHandle(data)) { + log.info( + "Updating CloudFormation cache for account: {} and region: {}", + account.getName(), + this.region); + + DescribeStacksRequest describeStacksRequest = + Optional.ofNullable((String) data.get("stackName")) + .map(stackName -> new DescribeStacksRequest().withStackName(stackName)) + .orElse(new DescribeStacksRequest()); + + CacheResult result = queryStacks(providerCache, describeStacksRequest, true); + Collection keys = + result.getCacheResults().get("stacks").stream() + .map(cachedata -> cachedata.getId()) + .collect(Collectors.toList()); + + keys.forEach( + key -> { + CacheData cacheData = + new DefaultCacheData( + key, + (int) Duration.ofMinutes(10).getSeconds(), + ImmutableMap.of( + "cacheTime", + System.currentTimeMillis(), + "cacheResults", + result, + "processedCount", + 0), + /* relationShips= */ ImmutableMap.of()); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData); + }); + return new OnDemandResult(getOnDemandAgentType(), result, Collections.emptyMap()); + } else { + return null; + } + } + + private boolean shouldHandle(Map data) { + String credentials = (String) data.get("credentials"); + List region = (List) data.get("region"); + return data.isEmpty() + || (account.getName().equals(credentials) + && region != null + && region.contains(this.region)); + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + Collection ownedKeys = + providerCache.filterIdentifiers( + ON_DEMAND.getNs(), Keys.getCloudFormationKey("*", this.region, this.getAccountName())); + Collection> onDemandEntriesToReturn = + providerCache.getAll(ON_DEMAND.getNs(), ownedKeys).stream() + .map( + cacheData -> { + Map map = new HashMap<>(); + map.put("details", Keys.parse(cacheData.getId())); + map.put("moniker", cacheData.getAttributes().get("moniker")); + map.put("cacheTime", cacheData.getAttributes().get("cacheTime")); + map.put("processedCount", cacheData.getAttributes().get("processedCount")); + map.put("processedTime", cacheData.getAttributes().get("processedTime")); + return map; + }) + .collect(toImmutableList()); + + return onDemandEntriesToReturn; + } + + @Override + public String getAgentType() { + return String.format( + "%s/%s/%s", + account.getName(), region, AmazonCloudFormationCachingAgent.class.getSimpleName()); + } + + @Override + public String getAccountName() { + return account.getName(); + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + log.info(getAgentType() + ": agent is starting"); + + List keepInOnDemand = new ArrayList<>(); + List evictFromOnDemand = new ArrayList<>(); + Long start = System.currentTimeMillis(); + + CacheResult stacks = queryStacks(providerCache, new DescribeStacksRequest(), false); + Collection keys = + stacks.getCacheResults().get("stacks").stream() + .map(cachedata -> cachedata.getId()) + .collect(Collectors.toList()); + + Collection onDemandEntries = providerCache.getAll(ON_DEMAND.getNs(), keys); + if (!CollectionUtils.isEmpty(onDemandEntries)) { + onDemandEntries.forEach( + cacheData -> { + long cacheTime = (long) cacheData.getAttributes().get("cacheTime"); + if (cacheTime < start && (int) cacheData.getAttributes().get("processedCount") > 0) { + evictFromOnDemand.add(cacheData.getId()); + } else { + keepInOnDemand.add(cacheData.getId()); + } + }); + } + onDemandEntries = providerCache.getAll(ON_DEMAND.getNs(), keepInOnDemand); + if (!CollectionUtils.isEmpty(onDemandEntries)) { + providerCache + .getAll(ON_DEMAND.getNs(), keepInOnDemand) + .forEach( + cacheData -> { + cacheData.getAttributes().put("processedTime", System.currentTimeMillis()); + int processedCount = (Integer) cacheData.getAttributes().get("processedCount"); + cacheData.getAttributes().put("processedCount", processedCount + 1); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData); + }); + } + providerCache.evictDeletedItems(ON_DEMAND.getNs(), evictFromOnDemand); + + return stacks; + } + + public CacheResult queryStacks( + ProviderCache providerCache, + DescribeStacksRequest describeStacksRequest, + boolean isPartialResult) { + log.info("Describing items in {}, partial result: {}", getAgentType(), isPartialResult); + AmazonCloudFormation cloudformation = + amazonClientProvider.getAmazonCloudFormation(account, region); + + ArrayList stackCacheData = new ArrayList<>(); + + try { + while (true) { + DescribeStacksResult describeStacksResult = + cloudformation.describeStacks(describeStacksRequest); + List stacks = describeStacksResult.getStacks(); + + for (Stack stack : stacks) { + Map stackAttributes = getStackAttributes(stack, cloudformation); + String stackCacheKey = + Keys.getCloudFormationKey(stack.getStackId(), region, account.getName()); + Map> relationships = new HashMap<>(); + relationships.put(STACKS.getNs(), Collections.singletonList(stackCacheKey)); + stackCacheData.add(new DefaultCacheData(stackCacheKey, stackAttributes, relationships)); + } + + if (describeStacksResult.getNextToken() != null) { + describeStacksRequest.withNextToken(describeStacksResult.getNextToken()); + } else { + break; + } + } + } catch (AmazonCloudFormationException e) { + log.error("Error retrieving stacks", e); + } + + log.info("Caching {} items in {}", stackCacheData.size(), getAgentType()); + HashMap> result = new HashMap<>(); + result.put(STACKS.getNs(), stackCacheData); + return new DefaultCacheResult(result, isPartialResult); + } + + private Map getStackAttributes(Stack stack, AmazonCloudFormation cloudformation) { + Map stackAttributes = new HashMap<>(); + stackAttributes.put("stackId", stack.getStackId()); + stackAttributes.put( + "tags", stack.getTags().stream().collect(Collectors.toMap(Tag::getKey, Tag::getValue))); + stackAttributes.put( + "outputs", + stack.getOutputs().stream() + .collect(Collectors.toMap(Output::getOutputKey, Output::getOutputValue))); + stackAttributes.put("stackName", stack.getStackName()); + stackAttributes.put("region", region); + stackAttributes.put("accountName", account.getName()); + stackAttributes.put("accountId", account.getAccountId()); + stackAttributes.put("stackStatus", stack.getStackStatus()); + stackAttributes.put("creationTime", stack.getCreationTime()); + stackAttributes.put("changeSets", getChangeSets(stack, cloudformation)); + getStackStatusReason(stack, cloudformation) + .map(statusReason -> stackAttributes.put("stackStatusReason", statusReason)); + return stackAttributes; + } + + private List> getChangeSets( + Stack stack, AmazonCloudFormation cloudformation) { + ListChangeSetsRequest listChangeSetsRequest = + new ListChangeSetsRequest().withStackName(stack.getStackName()); + + List> changeSets = new ArrayList<>(); + while (true) { + ListChangeSetsResult listChangeSetsResult = + cloudformation.listChangeSets(listChangeSetsRequest); + + changeSets.addAll( + listChangeSetsResult.getSummaries().stream() + .map( + summary -> { + Map changeSetAttributes = new HashMap<>(); + changeSetAttributes.put("name", summary.getChangeSetName()); + changeSetAttributes.put("status", summary.getStatus()); + changeSetAttributes.put("statusReason", summary.getStatusReason()); + DescribeChangeSetRequest describeChangeSetRequest = + new DescribeChangeSetRequest() + .withChangeSetName(summary.getChangeSetName()) + .withStackName(stack.getStackName()); + DescribeChangeSetResult describeChangeSetResult = + cloudformation.describeChangeSet(describeChangeSetRequest); + changeSetAttributes.put("changes", describeChangeSetResult.getChanges()); + log.debug( + "Adding change set attributes for stack {}: {}", + stack.getStackName(), + changeSetAttributes); + return changeSetAttributes; + }) + .collect(Collectors.toList())); + + if (listChangeSetsResult.getNextToken() != null) { + listChangeSetsRequest.withNextToken(listChangeSetsResult.getNextToken()); + } else { + break; + } + } + + return changeSets; + } + + private Optional getStackStatusReason(Stack stack, AmazonCloudFormation cloudformation) { + if (stack.getStackStatus().endsWith("ROLLBACK_COMPLETE")) { + DescribeStackEventsRequest request = + new DescribeStackEventsRequest().withStackName(stack.getStackName()); + return cloudformation.describeStackEvents(request).getStackEvents().stream() + .filter(e -> e.getResourceStatus().endsWith("FAILED")) + .findFirst() + .map(StackEvent::getResourceStatusReason); + } else { + return Optional.empty(); + } + } + + @Override + public Long getAgentInterval() { + return 60000L; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgent.java new file mode 100644 index 00000000000..405af40ee4c --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgent.java @@ -0,0 +1,228 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; + +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesRequest; +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult; +import com.amazonaws.services.ec2.model.GpuInfo; +import com.amazonaws.services.ec2.model.InstanceStorageInfo; +import com.amazonaws.services.ec2.model.InstanceTypeInfo; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.cache.Keys; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class AmazonInstanceTypeCachingAgent implements CachingAgent, AccountAware { + + private static final TypeReference> ATTRIBUTES = + new TypeReference>() {}; + + private final String region; + private final AmazonClientProvider amazonClientProvider; + private final NetflixAmazonCredentials account; + private final ObjectMapper objectMapper; + + public AmazonInstanceTypeCachingAgent( + String region, + AmazonClientProvider amazonClientProvider, + NetflixAmazonCredentials account, + ObjectMapper objectMapper) { + this.account = account; + this.amazonClientProvider = amazonClientProvider; + this.region = region; + this.objectMapper = objectMapper; + } + + @Override + public Collection getProvidedDataTypes() { + return Collections.unmodifiableList( + Arrays.asList( + AUTHORITATIVE.forType(Keys.Namespace.INSTANCE_TYPES.getNs()), + AUTHORITATIVE.forType(getAgentType()))); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + AmazonEC2 amazonEC2 = amazonClientProvider.getAmazonEC2(this.account, this.region); + final List instanceTypesInfo = getInstanceTypes(amazonEC2); + + Map> cacheResults = new HashMap<>(); + + // cache instance types for key "metadata" for backwards compatibility + Set instanceTypes = + instanceTypesInfo.stream() + .map(InstanceTypeInfo::getInstanceType) + .collect(Collectors.toSet()); + DefaultCacheData metadata = buildCacheDataForMetadataKey(providerCache, instanceTypes); + cacheResults.put(getAgentType(), Collections.singleton(metadata)); + + // cache instance types info + if (instanceTypesInfo == null || instanceTypesInfo.isEmpty()) { + return new DefaultCacheResult(cacheResults); + } + + List instanceTypeData = + instanceTypesInfo.stream() + .map( + i -> { + Map attributes = objectMapper.convertValue(i, ATTRIBUTES); + attributes.put("account", account.getName()); + attributes.put("region", region); + attributes.put("name", i.getInstanceType()); + attributes.put("defaultVCpus", i.getVCpuInfo().getDefaultVCpus()); + attributes.put("memoryInGiB", i.getMemoryInfo().getSizeInMiB() / 1024); + attributes.put( + "supportedArchitectures", i.getProcessorInfo().getSupportedArchitectures()); + + if (i.getInstanceStorageInfo() != null) { + InstanceStorageInfo info = i.getInstanceStorageInfo(); + Map instanceStorageAttributes = new HashMap<>(); + + instanceStorageAttributes.put("totalSizeInGB", info.getTotalSizeInGB()); + if (info.getDisks() != null && info.getDisks().size() > 0) { + instanceStorageAttributes.put( + "storageTypes", + info.getDisks().stream() + .map(d -> d.getType()) + .collect(Collectors.joining(","))); + } + if (info.getNvmeSupport() != null) { + instanceStorageAttributes.put("nvmeSupport", info.getNvmeSupport()); + } + attributes.put("instanceStorageInfo", instanceStorageAttributes); + } + + if (i.getGpuInfo() != null) { + GpuInfo info = i.getGpuInfo(); + Map gpuInfoAttributes = new HashMap<>(); + + if (info.getTotalGpuMemoryInMiB() != null) { + gpuInfoAttributes.put("totalGpuMemoryInMiB", info.getTotalGpuMemoryInMiB()); + } + if (info.getGpus() != null) { + gpuInfoAttributes.put( + "gpus", + info.getGpus().stream() + .map( + g -> { + Map gpuDeviceInfo = new HashMap<>(); + gpuDeviceInfo.put("name", g.getName()); + gpuDeviceInfo.put("manufacturer", g.getManufacturer()); + gpuDeviceInfo.put("count", g.getCount()); + gpuDeviceInfo.put( + "gpuSizeInMiB", g.getMemoryInfo().getSizeInMiB()); + return gpuDeviceInfo; + }) + .collect(Collectors.toList())); + } + attributes.put("gpuInfo", gpuInfoAttributes); + } + + if (i.getNetworkInfo() != null) { + attributes.put("ipv6Supported", i.getNetworkInfo().getIpv6Supported()); + } + + return new DefaultCacheData( + Keys.getInstanceTypeKey(i.getInstanceType(), region, account.getName()), + attributes, + Collections.emptyMap()); + }) + .collect(Collectors.toList()); + cacheResults.put(Keys.Namespace.INSTANCE_TYPES.getNs(), instanceTypeData); + + return new DefaultCacheResult(cacheResults); + } + + private DefaultCacheData buildCacheDataForMetadataKey( + ProviderCache providerCache, final Set instanceTypes) { + CacheData metadata = + providerCache.get(getAgentType(), "metadata", RelationshipCacheFilter.none()); + MetadataAttributes metadataAttributes; + + if (metadata != null) { + metadataAttributes = + objectMapper.convertValue(metadata.getAttributes(), MetadataAttributes.class); + } else { + MetadataAttributes newMetadataAttributes = new MetadataAttributes(); + newMetadataAttributes.cachedInstanceTypes = instanceTypes; + metadataAttributes = newMetadataAttributes; + } + + return new DefaultCacheData( + "metadata", + objectMapper.convertValue(metadataAttributes, ATTRIBUTES), + Collections.emptyMap()); + } + + private List getInstanceTypes(AmazonEC2 ec2) { + final List instanceTypeInfoList = new ArrayList<>(); + final DescribeInstanceTypesRequest request = new DescribeInstanceTypesRequest(); + while (true) { + final DescribeInstanceTypesResult result = ec2.describeInstanceTypes(request); + instanceTypeInfoList.addAll(result.getInstanceTypes()); + if (result.getNextToken() != null) { + request.withNextToken(result.getNextToken()); + } else { + break; + } + } + + return instanceTypeInfoList; + } + + @Override + public String getAgentType() { + return getClass().getSimpleName() + "/" + region; + } + + @Override + public String getProviderName() { + return AwsInfrastructureProvider.PROVIDER_NAME; + } + + @Override + public String getAccountName() { + return account.getName(); + } + + static class MetadataAttributes { + public Set cachedInstanceTypes; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLaunchTemplateCachingAgent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLaunchTemplateCachingAgent.java new file mode 100644 index 00000000000..bdd30931b05 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLaunchTemplateCachingAgent.java @@ -0,0 +1,189 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LAUNCH_TEMPLATES; +import static java.util.stream.Collectors.toSet; + +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsResult; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesResult; +import com.amazonaws.services.ec2.model.LaunchTemplate; +import com.amazonaws.services.ec2.model.LaunchTemplateVersion; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.data.Keys; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AmazonLaunchTemplateCachingAgent implements CachingAgent, AccountAware { + private final Logger log = LoggerFactory.getLogger(getClass()); + private final AmazonClientProvider amazonClientProvider; + private final NetflixAmazonCredentials account; + private final ObjectMapper objectMapper; + private final String region; + + private static final String[] DEFAULT_VERSIONS = new String[] {"$Default", "$Latest"}; + + private static final TypeReference> ATTRIBUTES = + new TypeReference>() {}; + private static final Set types = + new HashSet<>(Collections.singletonList(AUTHORITATIVE.forType(LAUNCH_TEMPLATES.getNs()))); + + public AmazonLaunchTemplateCachingAgent( + AmazonClientProvider amazonClientProvider, + NetflixAmazonCredentials account, + String region, + ObjectMapper objectMapper, + Registry registry) { + this.amazonClientProvider = amazonClientProvider; + this.account = account; + this.region = region; + this.objectMapper = objectMapper; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + final AmazonEC2 ec2 = amazonClientProvider.getAmazonEC2(account, region); + final List launchTemplates = getLaunchTemplates(ec2); + final List launchTemplateVersions = + getLaunchTemplateVersions(ec2, DEFAULT_VERSIONS); + final List cachedData = new ArrayList<>(); + for (LaunchTemplate launchTemplate : launchTemplates) { + Set versions = + launchTemplateVersions.stream() + .filter(t -> t.getLaunchTemplateId().equals(launchTemplate.getLaunchTemplateId())) + .collect(toSet()); + + // store the latest template version info + Optional latest = + versions.stream() + .filter(v -> v.getVersionNumber().equals(launchTemplate.getLatestVersionNumber())) + .findFirst(); + + if (latest.isEmpty()) { + log.debug("No latest version found for template {}", launchTemplate); + continue; + } + + String key = + Keys.getLaunchTemplateKey( + launchTemplate.getLaunchTemplateName(), account.getName(), region); + Map attributes = objectMapper.convertValue(launchTemplate, ATTRIBUTES); + + attributes.put("application", Keys.parse(key).get("application")); + attributes.put("latestVersion", latest.get()); + + // include version info + attributes.put("versions", versions); + + Set images = + versions.stream() + .map( + i -> + Keys.getImageKey( + i.getLaunchTemplateData().getImageId(), account.getName(), region)) + .collect(Collectors.toSet()); + + Map> relationships = Collections.singletonMap(IMAGES.ns, images); + cachedData.add(new DefaultCacheData(key, attributes, relationships)); + } + + return new DefaultCacheResult(Collections.singletonMap(LAUNCH_TEMPLATES.ns, cachedData)); + } + + /** Gets a list of ec2 Launch templates */ + private List getLaunchTemplates(AmazonEC2 ec2) { + final List launchTemplates = new ArrayList<>(); + final DescribeLaunchTemplatesRequest request = new DescribeLaunchTemplatesRequest(); + while (true) { + final DescribeLaunchTemplatesResult result = ec2.describeLaunchTemplates(request); + launchTemplates.addAll(result.getLaunchTemplates()); + if (result.getNextToken() != null) { + request.withNextToken(result.getNextToken()); + } else { + break; + } + } + + return launchTemplates; + } + + /** Gets a list of ec2 Launch template versions for a Launch template */ + private List getLaunchTemplateVersions(AmazonEC2 ec2, String... versions) { + final List launchTemplateVersions = new ArrayList<>(); + final DescribeLaunchTemplateVersionsRequest request = + new DescribeLaunchTemplateVersionsRequest().withVersions(versions); + while (true) { + final DescribeLaunchTemplateVersionsResult result = + ec2.describeLaunchTemplateVersions(request); + launchTemplateVersions.addAll(result.getLaunchTemplateVersions()); + if (result.getNextToken() != null) { + request.withNextToken(result.getNextToken()); + } else { + break; + } + } + + return launchTemplateVersions; + } + + @Override + public String getAgentType() { + return String.format("%s/%s/%s", account.getName(), region, getClass().getSimpleName()); + } + + @Override + public String getProviderName() { + return AwsProvider.PROVIDER_NAME; + } + + @Override + public String getAccountName() { + return account.getName(); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCleanupAgent.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCleanupAgent.java new file mode 100644 index 00000000000..db91521e330 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCleanupAgent.java @@ -0,0 +1,29 @@ +package com.netflix.spinnaker.clouddriver.aws.provider.agent; + +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.data.Keys; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import java.util.Map; + +public class ClusterCleanupAgent extends AbstractClusterCleanupAgent { + + @Override + public String getProviderName() { + return AwsProvider.PROVIDER_NAME; + } + + @Override + protected String getCloudProviderId() { + return AmazonCloudProvider.ID; + } + + @Override + protected Map parseServerGroupId(String serverGroupId) { + return Keys.parse(serverGroupId); + } + + @Override + protected String buildClusterId(String cluster, String application, String account) { + return Keys.getClusterKey(cluster, application, account); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/DriftMetric.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/DriftMetric.java similarity index 77% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/DriftMetric.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/DriftMetric.java index 424fef0536a..3922de95b05 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/DriftMetric.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/DriftMetric.java @@ -16,27 +16,39 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent; -import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Gauge; import com.netflix.spectator.api.Registry; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import org.slf4j.Logger; public interface DriftMetric { Registry getRegistry(); + NetflixAmazonCredentials getAccount(); + String getRegion(); + Logger getLog(); + String getAgentType(); - default Id getDriftMetricId() { - return getRegistry().createId("cache.drift", "agent", getClass().getSimpleName(), "account", getAccount().getName(), "region", getRegion()); + default Gauge getDriftMetricGauge() { + return getRegistry() + .gauge( + "cache.drift", + "agent", + getClass().getSimpleName(), + "account", + getAccount().getName(), + "region", + getRegion()); } default void recordDrift(Long start) { if (start != null && start != 0L) { Long drift = getRegistry().clock().wallTime() - start; getLog().info("{}/drift - {} milliseconds", getAgentType(), drift); - getRegistry().gauge(getDriftMetricId(), drift); + getDriftMetricGauge().set(drift); } } } diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/config/ProviderHelpers.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/config/ProviderHelpers.java new file mode 100644 index 00000000000..ace0c676a1a --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/config/ProviderHelpers.java @@ -0,0 +1,261 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.config; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentProvider; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties; +import com.netflix.spinnaker.clouddriver.aws.agent.CleanupAlarmsAgent; +import com.netflix.spinnaker.clouddriver.aws.agent.CleanupDetachedInstancesAgent; +import com.netflix.spinnaker.clouddriver.aws.agent.ReconcileClassicLinkSecurityGroupsAgent; +import com.netflix.spinnaker.clouddriver.aws.edda.EddaApiFactory; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsCleanupProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.agent.*; +import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3DataProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.security.ProviderUtils; +import com.netflix.spinnaker.config.AwsConfiguration; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.springframework.context.ApplicationContext; + +public class ProviderHelpers { + + @Getter + @RequiredArgsConstructor + public static class BuildResult { + private final List agents; + private final Set regionsToAdd; + } + + public static BuildResult buildAwsInfrastructureAgents( + NetflixAmazonCredentials credentials, + AwsInfrastructureProvider awsInfrastructureProvider, + CredentialsRepository credentialsRepository, + AmazonClientProvider amazonClientProvider, + ObjectMapper amazonObjectMapper, + Registry registry, + EddaTimeoutConfig eddaTimeoutConfig, + Set regions) { + Set scheduledAccounts = ProviderUtils.getScheduledAccounts(awsInfrastructureProvider); + List newlyAddedAgents = new ArrayList<>(); + for (NetflixAmazonCredentials.AWSRegion region : credentials.getRegions()) { + if (!scheduledAccounts.contains(credentials.getName())) { + if (regions.add(region.getName())) { + newlyAddedAgents.add( + new AmazonInstanceTypeCachingAgent( + region.getName(), amazonClientProvider, credentials, amazonObjectMapper)); + } + newlyAddedAgents.add( + new AmazonElasticIpCachingAgent(amazonClientProvider, credentials, region.getName())); + newlyAddedAgents.add( + new AmazonKeyPairCachingAgent(amazonClientProvider, credentials, region.getName())); + newlyAddedAgents.add( + new AmazonSecurityGroupCachingAgent( + amazonClientProvider, + credentials, + region.getName(), + amazonObjectMapper, + registry, + eddaTimeoutConfig)); + newlyAddedAgents.add( + new AmazonSubnetCachingAgent( + amazonClientProvider, credentials, region.getName(), amazonObjectMapper)); + newlyAddedAgents.add( + new AmazonVpcCachingAgent( + amazonClientProvider, credentials, region.getName(), amazonObjectMapper)); + } + } + return new BuildResult(newlyAddedAgents, regions); + } + + public static BuildResult buildAwsProviderAgents( + NetflixAmazonCredentials credentials, + CredentialsRepository credentialsRepository, + AmazonClientProvider amazonClientProvider, + ObjectMapper objectMapper, + Registry registry, + EddaTimeoutConfig eddaTimeoutConfig, + AmazonCachingAgentFilter amazonCachingAgentFilter, + AwsProvider awsProvider, + AmazonCloudProvider amazonCloudProvider, + DynamicConfigService dynamicConfigService, + EddaApiFactory eddaApiFactory, + Optional reservationReportPool, + Optional> agentProviders, + ApplicationContext ctx, + AmazonS3DataProvider amazonS3DataProvider, + Set publicRegions) { + Set scheduledAccounts = ProviderUtils.getScheduledAccounts(awsProvider); + List newlyAddedAgents = new ArrayList<>(); + newlyAddedAgents.add(new ClusterCleanupAgent()); + for (NetflixAmazonCredentials.AWSRegion region : credentials.getRegions()) { + if (!scheduledAccounts.contains(credentials.getName())) { + newlyAddedAgents.add( + new ClusterCachingAgent( + amazonCloudProvider, + amazonClientProvider, + credentials, + region.getName(), + objectMapper, + registry, + eddaTimeoutConfig, + amazonCachingAgentFilter)); + newlyAddedAgents.add( + new LaunchConfigCachingAgent( + amazonClientProvider, credentials, region.getName(), objectMapper, registry)); + + // always index private images per account/region + newlyAddedAgents.add( + new ImageCachingAgent( + amazonClientProvider, + credentials, + region.getName(), + objectMapper, + registry, + false, + dynamicConfigService)); + + if (!publicRegions.contains(region.getName())) { + // only index public images once per region (regardless of account) + publicRegions.add(region.getName()); + newlyAddedAgents.add( + new ImageCachingAgent( + amazonClientProvider, + credentials, + region.getName(), + objectMapper, + registry, + true, + dynamicConfigService)); + } + + newlyAddedAgents.add( + new InstanceCachingAgent( + amazonClientProvider, credentials, region.getName(), objectMapper, registry)); + newlyAddedAgents.add( + new AmazonLoadBalancerCachingAgent( + amazonCloudProvider, + amazonClientProvider, + credentials, + region.getName(), + eddaApiFactory.createApi(credentials.getEdda(), region.getName()), + objectMapper, + registry, + amazonCachingAgentFilter)); + newlyAddedAgents.add( + new AmazonApplicationLoadBalancerCachingAgent( + amazonCloudProvider, + amazonClientProvider, + credentials, + region.getName(), + eddaApiFactory.createApi(credentials.getEdda(), region.getName()), + objectMapper, + registry, + eddaTimeoutConfig, + amazonCachingAgentFilter)); + newlyAddedAgents.add( + new ReservedInstancesCachingAgent( + amazonClientProvider, credentials, region.getName(), objectMapper, registry)); + newlyAddedAgents.add( + new AmazonCertificateCachingAgent( + amazonClientProvider, credentials, region.getName(), objectMapper, registry)); + + if (dynamicConfigService.isEnabled("aws.features.cloud-formation", false)) { + newlyAddedAgents.add( + new AmazonCloudFormationCachingAgent( + amazonClientProvider, credentials, region.getName(), registry)); + } + if (credentials.getEddaEnabled() + && !eddaTimeoutConfig.getDisabledRegions().contains(region.getName())) { + newlyAddedAgents.add( + new EddaLoadBalancerCachingAgent( + eddaApiFactory.createApi(credentials.getEdda(), region.getName()), + credentials, + region.getName(), + objectMapper)); + } else { + newlyAddedAgents.add( + new AmazonLoadBalancerInstanceStateCachingAgent( + amazonClientProvider, credentials, region.getName(), objectMapper, ctx)); + } + if (dynamicConfigService.isEnabled("aws.features.launch-templates", false)) { + newlyAddedAgents.add( + new AmazonLaunchTemplateCachingAgent( + amazonClientProvider, credentials, region.getName(), objectMapper, registry)); + } + } + } + agentProviders.ifPresent( + providers -> + providers.stream() + .filter(it -> it.supports(AwsProvider.PROVIDER_NAME)) + .forEach(provider -> newlyAddedAgents.addAll(provider.agents(credentials)))); + return new BuildResult(newlyAddedAgents, publicRegions); + } + + public static List buildAwsCleanupAgents( + NetflixAmazonCredentials credentials, + CredentialsRepository credentialsRepository, + AmazonClientProvider amazonClientProvider, + AwsCleanupProvider awsCleanupProvider, + AwsConfiguration.DeployDefaults deployDefaults, + AwsConfigurationProperties awsConfigurationProperties, + boolean hasPreviouslyScheduledCleanupAgents) { + Set scheduledAccounts = ProviderUtils.getScheduledAccounts(awsCleanupProvider); + List newlyAddedAgents = new ArrayList<>(); + if (!scheduledAccounts.contains(credentials.getName())) { + for (NetflixAmazonCredentials.AWSRegion region : credentials.getRegions()) { + if (deployDefaults.isReconcileClassicLinkAccount(credentials)) { + newlyAddedAgents.add( + new ReconcileClassicLinkSecurityGroupsAgent( + amazonClientProvider, credentials, region.getName(), deployDefaults)); + } + } + } + + if (!hasPreviouslyScheduledCleanupAgents) { + if (awsConfigurationProperties.getCleanup().getAlarms().getEnabled()) { + newlyAddedAgents.add( + new CleanupAlarmsAgent( + amazonClientProvider, + credentialsRepository, + awsConfigurationProperties.getCleanup().getAlarms().getDaysToKeep(), + awsConfigurationProperties.getCleanup().getAlarms().getAlarmsNamePattern())); + } + newlyAddedAgents.add( + new CleanupDetachedInstancesAgent(amazonClientProvider, credentialsRepository)); + } + return newlyAddedAgents; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudFormationProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudFormationProvider.java new file mode 100644 index 00000000000..da681f810e0 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudFormationProvider.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.view; + +import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.STACKS; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.aws.cache.Keys; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonCloudFormationStack; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@Slf4j +@Component +public class AmazonCloudFormationProvider { + + private final Cache cacheView; + private final ObjectMapper objectMapper; + + @Autowired + public AmazonCloudFormationProvider( + Cache cacheView, @Qualifier("amazonObjectMapper") ObjectMapper objectMapper) { + this.cacheView = cacheView; + this.objectMapper = objectMapper; + } + + public List list(String accountName, String region) { + String filter = Keys.getCloudFormationKey("*", region, accountName); + log.debug("List all stacks with filter {}", filter); + return loadResults(cacheView.filterIdentifiers(STACKS.getNs(), filter)); + } + + public Optional get(String stackId) { + String filter = Keys.getCloudFormationKey(stackId, "*", "*"); + log.debug("Get stack with filter {}", filter); + return loadResults(cacheView.filterIdentifiers(STACKS.getNs(), filter)).stream().findFirst(); + } + + List loadResults(Collection identifiers) { + return cacheView.getAll(STACKS.getNs(), identifiers, RelationshipCacheFilter.none()).stream() + .map( + data -> { + log.debug("Cloud formation cached properties {}", data.getAttributes()); + return objectMapper.convertValue( + data.getAttributes(), AmazonCloudFormationStack.class); + }) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonImageProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonImageProvider.java new file mode 100644 index 00000000000..099f3319696 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonImageProvider.java @@ -0,0 +1,112 @@ +/* + * Copyright 2018 Schibsted ASA. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.view; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.data.Keys; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonImage; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonServerGroup; +import com.netflix.spinnaker.clouddriver.model.Image; +import com.netflix.spinnaker.clouddriver.model.ImageProvider; +import com.netflix.spinnaker.config.AwsConfiguration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@Component +public class AmazonImageProvider implements ImageProvider { + + private final Cache cacheView; + private final AwsConfiguration.AmazonServerGroupProvider amazonServerGroupProvider; + private final ObjectMapper objectMapper; + + @Autowired + AmazonImageProvider( + Cache cacheView, + AwsConfiguration.AmazonServerGroupProvider amazonServerGroupProvider, + @Qualifier("amazonObjectMapper") ObjectMapper objectMapper) { + this.cacheView = cacheView; + this.amazonServerGroupProvider = amazonServerGroupProvider; + this.objectMapper = objectMapper; + } + + @Override + public Optional getImageById(String imageId) { + + if (!imageId.startsWith("ami-")) { + throw new RuntimeException( + "Image Id provided (" + + imageId + + ") is not a valid id for the provider " + + getCloudProvider()); + } + + List imageIdList = + new ArrayList<>(cacheView.filterIdentifiers(IMAGES.toString(), "*" + imageId)); + + if (imageIdList.isEmpty()) { + return Optional.empty(); + } + + List imageCacheList = + new ArrayList<>(cacheView.getAll(IMAGES.toString(), imageIdList)); + + AmazonImage image = + objectMapper.convertValue(imageCacheList.get(0).getAttributes(), AmazonImage.class); + + image.setRegion(Keys.parse(imageCacheList.get(0).getId()).get("region")); + + List serverGroupList = + imageCacheList.stream() + .filter( + imageCache -> imageCache.getRelationships().get(SERVER_GROUPS.toString()) != null) + .map(imageCache -> imageCache.getRelationships().get(SERVER_GROUPS.toString())) + .flatMap(Collection::stream) + .map(this::getServerGroupData) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + + image.setServerGroups(serverGroupList); + return Optional.of(image); + } + + @Override + public String getCloudProvider() { + return AmazonCloudProvider.ID; + } + + private AmazonServerGroup getServerGroupData(String serverGroupCacheKey) { + Map parsedServerGroupKey = Keys.parse(serverGroupCacheKey); + return amazonServerGroupProvider.getServerGroup( + parsedServerGroupKey.get("account"), + parsedServerGroupKey.get("region"), + parsedServerGroupKey.get("serverGroup")); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonS3DataProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonS3DataProvider.java new file mode 100644 index 00000000000..9466a58cf2e --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonS3DataProvider.java @@ -0,0 +1,205 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.view; + +import static com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3StaticDataProviderConfiguration.AdhocRecord; +import static com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3StaticDataProviderConfiguration.StaticRecord; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.S3Object; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.CacheStats; +import com.google.common.cache.LoadingCache; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.model.DataProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.stream.Collectors; +import org.apache.commons.io.IOUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.security.access.AccessDeniedException; +import org.springframework.stereotype.Component; + +@Component +public class AmazonS3DataProvider implements DataProvider { + private final ObjectMapper objectMapper; + private final AmazonClientProvider amazonClientProvider; + private final CredentialsRepository accountCredentialsRepository; + private final AmazonS3StaticDataProviderConfiguration configuration; + + private final Set supportedIdentifiers; + + private final LoadingCache staticCache = + CacheBuilder.newBuilder() + .expireAfterWrite(1, TimeUnit.MINUTES) + .recordStats() + .build( + new CacheLoader() { + public Object load(String id) throws IOException { + StaticRecord record = configuration.getStaticRecord(id); + S3Object s3Object = + fetchObject( + record.getBucketAccount(), + record.getBucketRegion(), + record.getBucketName(), + record.getBucketKey()); + + switch (record.getType()) { + case list: + return objectMapper.readValue(s3Object.getObjectContent(), List.class); + case object: + return objectMapper.readValue(s3Object.getObjectContent(), Map.class); + } + + return IOUtils.toString(s3Object.getObjectContent()); + } + }); + + @Autowired + public AmazonS3DataProvider( + @Qualifier("amazonObjectMapper") ObjectMapper objectMapper, + AmazonClientProvider amazonClientProvider, + CredentialsRepository accountCredentialsRepository, + AmazonS3StaticDataProviderConfiguration configuration) { + this.objectMapper = objectMapper; + this.amazonClientProvider = amazonClientProvider; + this.accountCredentialsRepository = accountCredentialsRepository; + this.configuration = configuration; + + this.supportedIdentifiers = + configuration.getStaticRecords().stream() + .map(r -> r.getId().toLowerCase()) + .collect(Collectors.toSet()); + } + + @Override + public Object getStaticData(String id, Map filters) { + try { + Object contents = staticCache.get(id); + if (filters.isEmpty() || !(contents instanceof List)) { + return contents; + } + + return ((List) contents) + .stream() + .filter( + r -> { + // currently only support filtering against first level attributes (TBD whether + // this is even necessary) + return filters.entrySet().stream() + .anyMatch(f -> r.get(f.getKey()).equals(f.getValue())); + }) + .collect(Collectors.toList()); + } catch (ExecutionException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void getAdhocData( + String groupId, String bucketId, String objectId, OutputStream outputStream) { + String[] bucketCoordinates = bucketId.split(":"); + if (bucketCoordinates.length != 3) { + throw new IllegalArgumentException( + "'bucketId' must be of the form {account}:{region}:{name}"); + } + + String bucketAccount = getAccountName(bucketCoordinates[0]); + String bucketRegion = bucketCoordinates[1]; + String bucketName = bucketCoordinates[2]; + + AdhocRecord record = configuration.getAdhocRecord(groupId); + Matcher bucketNameMatcher = record.getBucketNamePattern().matcher(bucketName); + Matcher objectKeyMatcher = record.getObjectKeyPattern().matcher(objectId); + + if (!bucketNameMatcher.matches() || !objectKeyMatcher.matches()) { + throw new AccessDeniedException( + "Access denied (bucket: " + bucketName + ", object: " + objectId + ")"); + } + + try { + S3Object s3Object = fetchObject(bucketAccount, bucketRegion, bucketName, objectId); + IOUtils.copy(s3Object.getObjectContent(), outputStream); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public String getAccountForIdentifier(IdentifierType identifierType, String id) { + switch (identifierType) { + case Static: + return configuration.getStaticRecord(id).getBucketAccount(); + case Adhoc: + return getAccountName(id.split(":")[0]); + } + + throw new IllegalArgumentException("Unsupported identifierType (" + identifierType + ")"); + } + + @Override + public boolean supportsIdentifier(IdentifierType identifierType, String id) { + switch (identifierType) { + case Static: + return supportedIdentifiers.contains(id.toLowerCase()); + case Adhoc: + return configuration.getAdhocRecords().stream() + .anyMatch(r -> r.getId().equalsIgnoreCase(id)); + } + + throw new IllegalArgumentException("Unsupported identifierType (" + identifierType + ")"); + } + + CacheStats getStaticCacheStats() { + return staticCache.stats(); + } + + protected S3Object fetchObject( + String bucketAccount, String bucketRegion, String bucketName, String objectId) { + NetflixAmazonCredentials account = + (NetflixAmazonCredentials) accountCredentialsRepository.getOne(bucketAccount); + + AmazonS3 amazonS3 = amazonClientProvider.getAmazonS3(account, bucketRegion); + return amazonS3.getObject(bucketName, objectId); + } + + private String getAccountName(String accountIdOrName) { + return accountCredentialsRepository.getAll().stream() + .filter( + c -> + accountIdOrName.equalsIgnoreCase(c.getAccountId()) + || accountIdOrName.equalsIgnoreCase(c.getName())) + .map(AccountCredentials::getName) + .findFirst() + .orElseThrow( + () -> + new IllegalArgumentException( + "Unsupported account identifier (accountId: " + accountIdOrName + ")")); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AWSAccountInfoLookup.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AWSAccountInfoLookup.java similarity index 75% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AWSAccountInfoLookup.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AWSAccountInfoLookup.java index f5e5d5df3a1..5465455ffb4 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AWSAccountInfoLookup.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AWSAccountInfoLookup.java @@ -20,11 +20,11 @@ import java.util.List; public interface AWSAccountInfoLookup { - String findAccountId(); + String findAccountId(); - List listRegions(Collection regionNames); + List listRegions(Collection regionNames); - List listRegions(String... regionNames); + List listRegions(String... regionNames); - List listAvailabilityZones(String region); + List listAvailabilityZones(String region); } diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AWSProxy.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AWSProxy.java new file mode 100644 index 00000000000..5e6db16a5f9 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AWSProxy.java @@ -0,0 +1,194 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.stereotype.Component; + +/** AWS Proxy Configuration */ +@ConfigurationProperties(prefix = "aws.proxy") +@Component +public class AWSProxy { + + private String proxyHost; + private String proxyPort; + private String proxyUsername; + private String proxyPassword; + private String proxyDomain; + private String proxyWorkstation; + private String protocol; + + public AWSProxy() { + this(null, null, null, null, null, null, null); + } + + public AWSProxy( + String proxyHost, + String proxyPort, + String proxyUsername, + String proxyPassword, + String protocol) { + this(proxyHost, proxyPort, proxyUsername, proxyPassword, null, null, protocol); + } + + public AWSProxy( + String proxyHost, + String proxyPort, + String proxyUsername, + String proxyPassword, + String proxyDomain, + String proxyWorkstation, + String protocol) { + this.proxyHost = proxyHost; + this.proxyPort = proxyPort; + this.proxyUsername = proxyUsername; + this.proxyDomain = proxyDomain; + this.proxyWorkstation = proxyWorkstation; + this.proxyPassword = proxyPassword; + this.protocol = protocol; + } + + public String getProxyHost() { + return proxyHost; + } + + public void setProxyHost(String proxyHost) { + this.proxyHost = proxyHost; + } + + public String getProxyUsername() { + return proxyUsername; + } + + public void setProxyUsername(String proxyUsername) { + this.proxyUsername = proxyUsername; + } + + public String getProxyPort() { + return proxyPort; + } + + public void setProxyPort(String proxyPort) { + this.proxyPort = proxyPort; + } + + public String getProxyPassword() { + return proxyPassword; + } + + public void setProxyPassword(String proxyPassword) { + this.proxyPassword = proxyPassword; + } + + public String getProxyDomain() { + return proxyDomain; + } + + public void setProxyDomain(String proxyDomain) { + this.proxyDomain = proxyDomain; + } + + public String getProxyWorkstation() { + return proxyWorkstation; + } + + public void setProxyWorkstation(String proxyWorkstation) { + this.proxyWorkstation = proxyWorkstation; + } + + public String getProtocol() { + return protocol; + } + + public void setProxyProtocol(String protocol) { + this.protocol = protocol; + } + + public void apply(ClientConfiguration clientConfiguration) { + + clientConfiguration.setProxyHost(proxyHost); + clientConfiguration.setProxyPort(Integer.parseInt(proxyPort)); + clientConfiguration.setProxyUsername(proxyUsername); + clientConfiguration.setProxyPassword(proxyPassword); + + Protocol awsProtocol = Protocol.HTTP; + + if ("HTTPS".equalsIgnoreCase(protocol)) { + awsProtocol = Protocol.HTTPS; + } + + clientConfiguration.setProtocol(awsProtocol); + + if (isNTLMProxy()) { + clientConfiguration.setProxyDomain(proxyDomain); + clientConfiguration.setProxyWorkstation(proxyWorkstation); + } + } + + public boolean isNTLMProxy() { + + boolean isNTLMProxy = false; + + if (getProxyHost() != null + && getProxyPort() != null + && getProxyDomain() != null + && getProxyWorkstation() != null) { + isNTLMProxy = true; + } + + return isNTLMProxy; + } + + public boolean isProxyConfigMode() { + + boolean isProxy = false; + + if (getProxyHost() != null && getProxyPort() != null) { + isProxy = true; + + try { + Integer.parseInt(getProxyPort()); + } catch (NumberFormatException nfe) { + isProxy = false; + } + } + + return isProxy; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + AWSProxy awsProxy = (AWSProxy) o; + + return proxyHost.equals(awsProxy.proxyHost) + && proxyPort.equals(awsProxy.proxyPort) + && protocol.equals(awsProxy.protocol); + } + + @Override + public int hashCode() { + int result = proxyHost.hashCode(); + result = 31 * result + proxyPort.hashCode() + protocol.hashCode(); + + return result; + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AddSpinnakerUserToUserAgentRequestHandler.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AddSpinnakerUserToUserAgentRequestHandler.java similarity index 81% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AddSpinnakerUserToUserAgentRequestHandler.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AddSpinnakerUserToUserAgentRequestHandler.java index 2d2c246ea45..ba3fb978c2e 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/AddSpinnakerUserToUserAgentRequestHandler.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AddSpinnakerUserToUserAgentRequestHandler.java @@ -23,9 +23,11 @@ public class AddSpinnakerUserToUserAgentRequestHandler extends RequestHandler2 { @Override public AmazonWebServiceRequest beforeMarshalling(AmazonWebServiceRequest request) { - final String userAgent = String.format("spinnaker-user/%s spinnaker-executionId/%s", - AuthenticatedRequest.getSpinnakerUser().orElse("unknown"), - AuthenticatedRequest.getSpinnakerExecutionId().orElse("unknown")); + final String userAgent = + String.format( + "spinnaker-user/%s spinnaker-executionId/%s", + AuthenticatedRequest.getSpinnakerUser().orElse("unknown"), + AuthenticatedRequest.getSpinnakerExecutionId().orElse("unknown")); final AmazonWebServiceRequest cloned = request.clone(); diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonBasicCredentialsLoader.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonBasicCredentialsLoader.java new file mode 100644 index 00000000000..e427d82a003 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonBasicCredentialsLoader.java @@ -0,0 +1,225 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.amazonaws.SDKGlobalConfiguration; +import com.amazonaws.regions.Regions; +import com.amazonaws.util.CollectionUtils; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration; +import com.netflix.spinnaker.clouddriver.aws.security.config.AmazonCredentialsParser; +import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig; +import com.netflix.spinnaker.credentials.Credentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.definition.BasicCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource; +import com.netflix.spinnaker.credentials.definition.CredentialsParser; +import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; + +@Slf4j +public class AmazonBasicCredentialsLoader< + T extends AccountsConfiguration.Account, U extends NetflixAmazonCredentials> + extends BasicCredentialsLoader { + protected final CredentialsConfig credentialsConfig; + protected final AccountsConfiguration accountsConfig; + protected final DefaultAccountConfigurationProperties defaultAccountConfigurationProperties; + protected String defaultEnvironment; + protected String defaultAccountType; + + public AmazonBasicCredentialsLoader( + CredentialsDefinitionSource definitionSource, + CredentialsParser parser, + CredentialsRepository credentialsRepository, + CredentialsConfig credentialsConfig, + AccountsConfiguration accountsConfig, + DefaultAccountConfigurationProperties defaultAccountConfigurationProperties) { + super(definitionSource, parser, credentialsRepository); + this.credentialsConfig = credentialsConfig; + this.accountsConfig = accountsConfig; + this.defaultAccountConfigurationProperties = defaultAccountConfigurationProperties; + this.defaultEnvironment = + defaultAccountConfigurationProperties.getEnvironment() != null + ? defaultAccountConfigurationProperties.getEnvironment() + : defaultAccountConfigurationProperties.getEnv(); + this.defaultAccountType = + defaultAccountConfigurationProperties.getAccountType() != null + ? defaultAccountConfigurationProperties.getAccountType() + : defaultAccountConfigurationProperties.getEnv(); + if (!StringUtils.isEmpty(credentialsConfig.getAccessKeyId())) { + System.setProperty( + SDKGlobalConfiguration.ACCESS_KEY_SYSTEM_PROPERTY, credentialsConfig.getAccessKeyId()); + } + if (!StringUtils.isEmpty(credentialsConfig.getSecretAccessKey())) { + System.setProperty( + SDKGlobalConfiguration.SECRET_KEY_SYSTEM_PROPERTY, + credentialsConfig.getSecretAccessKey()); + } + } + + @Override + public void load() { + if (CollectionUtils.isNullOrEmpty(accountsConfig.getAccounts()) + && (StringUtils.isEmpty(credentialsConfig.getDefaultAssumeRole()))) { + accountsConfig.setAccounts( + Collections.singletonList( + new AccountsConfiguration.Account() { + { + setName(defaultAccountConfigurationProperties.getEnv()); + setEnvironment(defaultEnvironment); + setAccountType(defaultAccountType); + } + })); + if (CollectionUtils.isNullOrEmpty(credentialsConfig.getDefaultRegions())) { + List regions = + new ArrayList<>( + Arrays.asList( + Regions.US_EAST_1, Regions.US_WEST_1, Regions.US_WEST_2, Regions.EU_WEST_1)); + credentialsConfig.setDefaultRegions( + regions.stream() + .map( + it -> + new CredentialsConfig.Region() { + { + setName(it.getName()); + } + }) + .collect(Collectors.toList())); + } + } + this.parse(definitionSource.getCredentialsDefinitions()); + } + + @Override + protected void parse(Collection definitions) { + log.info("attempting to parse {} amazon accounts provided as input", definitions.size()); + Set definitionNames = definitions.stream().map(T::getName).collect(Collectors.toSet()); + + // TODO: make a change in BasicCredentialsLoader in kork to separate this out into a new method + log.info( + "removing all the accounts from the credentials repository that are not present in the provided input"); + credentialsRepository.getAll().stream() + .map(Credentials::getName) + .filter(name -> !definitionNames.contains(name)) + .peek(loadedDefinitions::remove) + .forEach(credentialsRepository::delete); + + // adding this after the delete from credentials repository step. This is to ensure that if the + // new input does not + // contain any accounts, then that should be reflected in the credentials repository + // appropriately + if (definitionNames.size() == 0) { + log.info("did not find any aws account definitions to parse"); + return; + } + + List toApply = new ArrayList<>(); + if (credentialsConfig.getLoadAccounts().isMultiThreadingEnabled()) { + log.info( + "Multi-threading is enabled for loading aws accounts. Using {} threads, with timeout: {}s", + credentialsConfig.getLoadAccounts().getNumberOfThreads(), + credentialsConfig.getLoadAccounts().getTimeoutInSeconds()); + toApply = multiThreadedParseAccounts(definitions); + } else { + log.info("Multi-threading is disabled. AWS accounts will be loaded serially"); + for (T definition : definitions) { + if (!loadedDefinitions.containsKey(definition.getName())) { + U cred = parser.parse(definition); + if (cred != null) { + toApply.add(cred); + // Add to loaded definition now in case we trigger another parse before this one + // finishes + loadedDefinitions.put(definition.getName(), definition); + } + } else if (!loadedDefinitions.get(definition.getName()).equals(definition)) { + U cred = parser.parse(definition); + if (cred != null) { + toApply.add(cred); + loadedDefinitions.put(definition.getName(), definition); + } + } + } + } + + log.info("saving aws accounts in the credentials repository"); + Stream stream = parallel ? toApply.parallelStream() : toApply.stream(); + stream.forEach(credentialsRepository::save); + log.info("parsed and saved {} aws accounts", credentialsRepository.getAll().size()); + } + + /** + * parses aws accounts using a configurable fixed thread pool. + * + * @param definitions - the list of aws accounts to parse + * @return - a list of parsed aws accounts + */ + private List multiThreadedParseAccounts(Collection definitions) { + List toApply = new ArrayList<>(); + final ExecutorService executorService = + Executors.newFixedThreadPool( + credentialsConfig.getLoadAccounts().getNumberOfThreads(), + new ThreadFactoryBuilder() + .setNameFormat(AmazonCredentialsParser.class.getSimpleName() + "-%d") + .build()); + + final ArrayList> futures = new ArrayList<>(definitions.size()); + for (T definition : definitions) { + if (!loadedDefinitions.containsKey(definition.getName()) + || !loadedDefinitions.get(definition.getName()).equals(definition)) { + futures.add(executorService.submit(() -> parser.parse(definition))); + } + } + for (Future future : futures) { + try { + U cred = + future.get(credentialsConfig.getLoadAccounts().getTimeoutInSeconds(), TimeUnit.SECONDS); + if (cred != null) { + toApply.add(cred); + // Add to loaded definition now in case we trigger another parse before this one finishes + definitions.stream() + .filter(t -> t.getName().equals(cred.getName())) + .findFirst() + .ifPresentOrElse( + definition -> loadedDefinitions.put(cred.getName(), definition), + () -> + log.warn( + "could not find the parsed aws account: '{}' in the input credential definitions.", + cred.getName())); + } + } catch (Exception e) { + // failure to load an account should not prevent clouddriver from starting up. + log.error("Failed to load aws account: ", e); + } + } + try { + // attempt to shutdown the executor service + executorService.shutdownNow(); + } catch (Exception e) { + log.error("Failed to shutdown the aws account loading executor service.", e); + } + + return toApply; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonClientProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonClientProvider.java new file mode 100644 index 00000000000..87e9a68bd78 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonClientProvider.java @@ -0,0 +1,685 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.retry.PredefinedRetryPolicies; +import com.amazonaws.retry.RetryPolicy; +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScalingClientBuilder; +import com.amazonaws.services.autoscaling.AmazonAutoScaling; +import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder; +import com.amazonaws.services.cloudformation.AmazonCloudFormation; +import com.amazonaws.services.cloudformation.AmazonCloudFormationClientBuilder; +import com.amazonaws.services.cloudwatch.AmazonCloudWatch; +import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; +import com.amazonaws.services.ecr.AmazonECR; +import com.amazonaws.services.ecr.AmazonECRClientBuilder; +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.AmazonECSClientBuilder; +import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancingClientBuilder; +import com.amazonaws.services.identitymanagement.AmazonIdentityManagement; +import com.amazonaws.services.identitymanagement.AmazonIdentityManagementClientBuilder; +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.AWSLambdaClientBuilder; +import com.amazonaws.services.route53.AmazonRoute53; +import com.amazonaws.services.route53.AmazonRoute53ClientBuilder; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.secretsmanager.AWSSecretsManager; +import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; +import com.amazonaws.services.servicediscovery.AWSServiceDiscovery; +import com.amazonaws.services.servicediscovery.AWSServiceDiscoveryClientBuilder; +import com.amazonaws.services.shield.AWSShield; +import com.amazonaws.services.shield.AWSShieldClientBuilder; +import com.amazonaws.services.simpleworkflow.AmazonSimpleWorkflow; +import com.amazonaws.services.simpleworkflow.AmazonSimpleWorkflowClientBuilder; +import com.amazonaws.services.sns.AmazonSNS; +import com.amazonaws.services.sns.AmazonSNSClientBuilder; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.AmazonSQSClientBuilder; +import com.amazonaws.services.support.AWSSupport; +import com.amazonaws.services.support.AWSSupportClientBuilder; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.security.sdkclient.*; +import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration; +import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfigurationBuilder; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.apache.http.client.HttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; + +/** Provider of Amazon SDK Clients that can read through Edda. */ +public class AmazonClientProvider { + + /** + * This constant (as null) indicates that whatever the current region from the AWS SDKs + * perspective should be used. + * + *

The region to use will be resolved dynamically by {@link SpinnakerAwsRegionProvider} which + * supports all the standard SDK means of explicitly specifying the current region, (environment + * variable, instance profile, instance metadata). + */ + public static final String DEFAULT_REGION = null; + + private final AwsSdkClientSupplier awsSdkClientSupplier; + private final ProxyHandlerBuilder proxyHandlerBuilder; + + public static class Builder { + private HttpClient httpClient; + private ObjectMapper objectMapper; + private EddaTemplater eddaTemplater; + private RetryPolicy.RetryCondition retryCondition; + private RetryPolicy.BackoffStrategy backoffStrategy; + private Integer maxErrorRetry; + private List requestHandlers = new ArrayList<>(); + private AWSProxy proxy; + private EddaTimeoutConfig eddaTimeoutConfig; + private int maxConnections = 200; + private int maxConnectionsPerRoute = 20; + private boolean uzeGzip = true; + private boolean addSpinnakerUserToUserAgent = false; + private ServiceLimitConfiguration serviceLimitConfiguration = + new ServiceLimitConfigurationBuilder().build(); + private Registry registry = new NoopRegistry(); + + public Builder httpClient(HttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + public Builder proxy(AWSProxy proxy) { + this.proxy = proxy; + return this; + } + + public Builder objectMapper(ObjectMapper objectMapper) { + this.objectMapper = objectMapper; + return this; + } + + public Builder eddaTemplater(EddaTemplater eddaTemplater) { + this.eddaTemplater = eddaTemplater; + return this; + } + + public Builder retryCondition(RetryPolicy.RetryCondition retryCondition) { + this.retryCondition = retryCondition; + return this; + } + + public Builder backoffStrategy(RetryPolicy.BackoffStrategy backoffStrategy) { + this.backoffStrategy = backoffStrategy; + return this; + } + + public Builder maxErrorRetry(Integer maxErrorRetry) { + this.maxErrorRetry = maxErrorRetry; + return this; + } + + public Builder requestHandler(RequestHandler2 requestHandler) { + this.requestHandlers.add(requestHandler); + return this; + } + + public Builder eddaTimeoutConfig(EddaTimeoutConfig eddaTimeoutConfig) { + this.eddaTimeoutConfig = eddaTimeoutConfig; + return this; + } + + public Builder maxConnections(int maxConnections) { + this.maxConnections = maxConnections; + return this; + } + + public Builder maxConnectionsPerRoute(int maxConnectionsPerRoute) { + this.maxConnectionsPerRoute = maxConnectionsPerRoute; + return this; + } + + public Builder useGzip(boolean useGzip) { + this.uzeGzip = useGzip; + return this; + } + + public Builder serviceLimitConfiguration(ServiceLimitConfiguration serviceLimitConfiguration) { + this.serviceLimitConfiguration = serviceLimitConfiguration; + return this; + } + + public Builder registry(Registry registry) { + this.registry = registry; + return this; + } + + public Builder addSpinnakerUserToUserAgent(boolean addSpinnakerUserToUserAgent) { + this.addSpinnakerUserToUserAgent = addSpinnakerUserToUserAgent; + return this; + } + + public AmazonClientProvider build() { + HttpClient client = this.httpClient; + if (client == null) { + HttpClientBuilder builder = HttpClientBuilder.create(); + builder.setMaxConnTotal(this.maxConnections); + builder.setMaxConnPerRoute(this.maxConnectionsPerRoute); + client = builder.build(); + } + + ObjectMapper mapper = + this.objectMapper == null + ? AmazonObjectMapperConfigurer.createConfigured() + : this.objectMapper; + EddaTemplater templater = + this.eddaTemplater == null ? EddaTemplater.defaultTemplater() : this.eddaTemplater; + RetryPolicy policy = buildPolicy(); + AWSProxy proxy = this.proxy; + EddaTimeoutConfig eddaTimeoutConfig = + this.eddaTimeoutConfig == null ? EddaTimeoutConfig.DEFAULT : this.eddaTimeoutConfig; + + final List requestHandlers; + if (addSpinnakerUserToUserAgent) { + requestHandlers = new ArrayList<>(this.requestHandlers.size() + 1); + requestHandlers.addAll(this.requestHandlers); + requestHandlers.add(new AddSpinnakerUserToUserAgentRequestHandler()); + } else { + requestHandlers = this.requestHandlers; + } + + return new AmazonClientProvider( + client, + mapper, + templater, + policy, + requestHandlers, + proxy, + eddaTimeoutConfig, + uzeGzip, + serviceLimitConfiguration, + registry); + } + + private RetryPolicy buildPolicy() { + if (retryCondition == null && backoffStrategy == null) { + if (maxErrorRetry == null) { + return PredefinedRetryPolicies.getDefaultRetryPolicy(); + } + return new RetryPolicy( + PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, + PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, + maxErrorRetry, + true); + } + RetryPolicy.RetryCondition condition = + this.retryCondition == null + ? PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION + : this.retryCondition; + RetryPolicy.BackoffStrategy strategy = + this.backoffStrategy == null + ? PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY + : this.backoffStrategy; + int retry = + this.maxErrorRetry == null + ? PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY + : this.maxErrorRetry; + + return new RetryPolicy(condition, strategy, retry, true); + } + } + + public AmazonClientProvider() { + this((HttpClient) null); + } + + public AmazonClientProvider(HttpClient httpClient) { + this(httpClient, AmazonObjectMapperConfigurer.createConfigured()); + } + + public AmazonClientProvider(ObjectMapper objectMapper) { + this(null, objectMapper); + } + + public AmazonClientProvider(HttpClient httpClient, ObjectMapper objectMapper) { + this( + httpClient == null ? HttpClients.createDefault() : httpClient, + objectMapper == null ? AmazonObjectMapperConfigurer.createConfigured() : objectMapper, + EddaTemplater.defaultTemplater(), + PredefinedRetryPolicies.getDefaultRetryPolicy(), + Collections.emptyList(), + null, + EddaTimeoutConfig.DEFAULT, + true, + new ServiceLimitConfigurationBuilder().build(), + new NoopRegistry()); + } + + public AmazonClientProvider( + HttpClient httpClient, + ObjectMapper objectMapper, + EddaTemplater eddaTemplater, + RetryPolicy retryPolicy, + List requestHandlers, + AWSProxy proxy, + EddaTimeoutConfig eddaTimeoutConfig, + boolean useGzip, + ServiceLimitConfiguration serviceLimitConfiguration, + Registry registry) { + RateLimiterSupplier rateLimiterSupplier = + new RateLimiterSupplier(serviceLimitConfiguration, registry); + this.awsSdkClientSupplier = + new AwsSdkClientSupplier( + rateLimiterSupplier, registry, retryPolicy, requestHandlers, proxy, useGzip); + this.proxyHandlerBuilder = + new ProxyHandlerBuilder( + awsSdkClientSupplier, + httpClient, + objectMapper, + eddaTemplater, + eddaTimeoutConfig, + registry); + } + + /** + * When edda serves the request, the last-modified time is captured from the response metadata. + * + * @return the last-modified timestamp, if available. + */ + public Long getLastModified() { + return AmazonClientInvocationHandler.lastModified.get(); + } + + public AmazonEC2 getAmazonEC2(NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonEC2(amazonCredentials, region, false); + } + + public AmazonEC2 getAmazonEC2( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonEC2.class, AmazonEC2ClientBuilder.class, amazonCredentials, region, skipEdda); + } + + public AmazonEC2 getAmazonEC2(AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonEC2ClientBuilder.class, + AmazonEC2.class, + "UNSPECIFIED_ACCOUNT", + awsCredentialsProvider, + region); + } + + public AmazonEC2 getAmazonEC2( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonEC2ClientBuilder.class, AmazonEC2.class, accountName, awsCredentialsProvider, region); + } + + public AmazonECS getAmazonEcs( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonECS.class, AmazonECSClientBuilder.class, amazonCredentials, region, skipEdda); + } + + public AmazonIdentityManagement getIam( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonIdentityManagement.class, + AmazonIdentityManagementClientBuilder.class, + amazonCredentials, + region, + skipEdda); + // return awsSdkClientSupplier.getClient(AmazonIdentityManagementClientBuilder.class, + // AmazonIdentityManagement.class, accountName, awsCredentialsProvider, region); + } + + // public AWSLambda getAmazonLambda(NetflixAmazonCredentials amazonCredentials, String region) { + // return proxyHandlerBuilder.getProxyHandler( + // AWSLambda.class, AWSLambdaClientBuilder.class, amazonCredentials, region); + // } + + /** + * It's VERY important that this be called (currently) with only one standardized + * ClientConfiguration. That initial client config will be used for the cache for ALL requests + * there-after, despite any changes to that clientConfiguration. As such it's recommend to make + * use of the AbstractLambdaProvider class which loads the config parameters and timeouts. + * + * @param amazonCredentials + * @param clientConfiguration + * @param region + * @return + */ + public AWSLambda getAmazonLambda( + NetflixAmazonCredentials amazonCredentials, + ClientConfiguration clientConfiguration, + String region) { + return proxyHandlerBuilder.getProxyHandler( + AWSLambda.class, + AWSLambdaClientBuilder.class, + amazonCredentials, + region, + clientConfiguration); + } + + // public AWSLambda getAmazonLambda( + // String accountName, + // ClientConfiguration clientConfiguration, + // AWSCredentialsProvider awsCredentialsProvider, + // String region) { + // return awsSdkClientSupplier.getClient( + // AWSLambdaClientBuilder.class, + // AWSLambda.class, + // accountName, + // awsCredentialsProvider, + // region, + // clientConfiguration); + // } + + // public AWSLambdaAsync getAmazonLambdaAsync( + // NetflixAmazonCredentials amazonCredentials, String region) { + // return proxyHandlerBuilder.getProxyHandler( + // AWSLambdaAsync.class, AWSLambdaAsyncClientBuilder.class, amazonCredentials, region); + // } + // + // public AWSLambdaAsync getAmazonLambdaAsync( + // String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + // return awsSdkClientSupplier.getClient( + // AWSLambdaAsyncClientBuilder.class, + // AWSLambdaAsync.class, + // accountName, + // awsCredentialsProvider, + // region); + // } + + public AmazonS3 getAmazonS3(NetflixAmazonCredentials amazonCredentials, String region) { + return proxyHandlerBuilder.getProxyHandler( + AmazonS3.class, AmazonS3ClientBuilder.class, amazonCredentials, region, true); + } + + public AmazonCloudFormation getAmazonCloudFormation( + NetflixAmazonCredentials amazonCredentials, String region) { + return proxyHandlerBuilder.getProxyHandler( + AmazonCloudFormation.class, + AmazonCloudFormationClientBuilder.class, + amazonCredentials, + region, + true); + } + + public AmazonAutoScaling getAutoScaling( + NetflixAmazonCredentials amazonCredentials, String region) { + return getAutoScaling(amazonCredentials, region, false); + } + + public AmazonAutoScaling getAutoScaling( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonAutoScaling.class, + AmazonAutoScalingClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public AmazonAutoScaling getAutoScaling( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonAutoScalingClientBuilder.class, + AmazonAutoScaling.class, + accountName, + awsCredentialsProvider, + region); + } + + public AmazonRoute53 getAmazonRoute53(NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonRoute53(amazonCredentials, region, false); + } + + public AmazonRoute53 getAmazonRoute53( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonRoute53.class, AmazonRoute53ClientBuilder.class, amazonCredentials, region, skipEdda); + } + + public AmazonRoute53 getAmazonRoute53( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonRoute53ClientBuilder.class, + AmazonRoute53.class, + accountName, + awsCredentialsProvider, + region); + } + + public AmazonElasticLoadBalancing getAmazonElasticLoadBalancing( + NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonElasticLoadBalancing(amazonCredentials, region, false); + } + + public AmazonElasticLoadBalancing getAmazonElasticLoadBalancing( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonElasticLoadBalancing.class, + AmazonElasticLoadBalancingClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public AmazonElasticLoadBalancing getAmazonElasticLoadBalancing( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonElasticLoadBalancingClientBuilder.class, + AmazonElasticLoadBalancing.class, + accountName, + awsCredentialsProvider, + region); + } + + public com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing + getAmazonElasticLoadBalancingV2(NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonElasticLoadBalancingV2(amazonCredentials, region, false); + } + + public com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing + getAmazonElasticLoadBalancingV2( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing.class, + com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing + getAmazonElasticLoadBalancingV2( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancingClientBuilder.class, + com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing.class, + accountName, + awsCredentialsProvider, + region); + } + + public AmazonSimpleWorkflow getAmazonSimpleWorkflow( + NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonSimpleWorkflow(amazonCredentials, region, false); + } + + public AmazonSimpleWorkflow getAmazonSimpleWorkflow( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonSimpleWorkflow.class, + AmazonSimpleWorkflowClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public AmazonSimpleWorkflow getAmazonSimpleWorkflow( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonSimpleWorkflowClientBuilder.class, + AmazonSimpleWorkflow.class, + accountName, + awsCredentialsProvider, + region); + } + + public AmazonCloudWatch getAmazonCloudWatch( + NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonCloudWatch(amazonCredentials, region, false); + } + + public AmazonCloudWatch getAmazonCloudWatch( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonCloudWatch.class, + AmazonCloudWatchClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public AmazonCloudWatch getAmazonCloudWatch( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonCloudWatchClientBuilder.class, + AmazonCloudWatch.class, + accountName, + awsCredentialsProvider, + region); + } + + public AmazonCloudWatch getCloudWatch(NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonCloudWatch(amazonCredentials, region); + } + + public AmazonCloudWatch getCloudWatch( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return getAmazonCloudWatch(amazonCredentials, region, skipEdda); + } + + public AmazonSNS getAmazonSNS(NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonSNS(amazonCredentials, region, false); + } + + public AmazonSNS getAmazonSNS( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonSNS.class, AmazonSNSClientBuilder.class, amazonCredentials, region, skipEdda); + } + + public AmazonSNS getAmazonSNS( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonSNSClientBuilder.class, AmazonSNS.class, accountName, awsCredentialsProvider, region); + } + + public AmazonSQS getAmazonSQS(NetflixAmazonCredentials amazonCredentials, String region) { + return proxyHandlerBuilder.getProxyHandler( + AmazonSQS.class, AmazonSQSClientBuilder.class, amazonCredentials, region, false); + } + + public AmazonIdentityManagement getAmazonIdentityManagement( + NetflixAmazonCredentials amazonCredentials, String region) { + return getAmazonIdentityManagement(amazonCredentials, region, false); + } + + public AmazonIdentityManagement getAmazonIdentityManagement( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonIdentityManagement.class, + AmazonIdentityManagementClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public AmazonIdentityManagement getAmazonIdentityManagement( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AmazonIdentityManagementClientBuilder.class, + AmazonIdentityManagement.class, + accountName, + awsCredentialsProvider, + region); + } + + public AWSShield getAmazonShield(NetflixAmazonCredentials amazonCredentials, String region) { + return proxyHandlerBuilder.getProxyHandler( + AWSShield.class, AWSShieldClientBuilder.class, amazonCredentials, region, true); + } + + public AWSShield getAmazonShield( + String accountName, AWSCredentialsProvider awsCredentialsProvider, String region) { + return awsSdkClientSupplier.getClient( + AWSShieldClientBuilder.class, AWSShield.class, accountName, awsCredentialsProvider, region); + } + + public AWSApplicationAutoScaling getAmazonApplicationAutoScaling( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AWSApplicationAutoScaling.class, + AWSApplicationAutoScalingClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public AmazonECR getAmazonEcr( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AmazonECR.class, AmazonECRClientBuilder.class, amazonCredentials, region, skipEdda); + } + + public AWSSecretsManager getAmazonSecretsManager( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AWSSecretsManager.class, + AWSSecretsManagerClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public AWSServiceDiscovery getAmazonServiceDiscovery( + NetflixAmazonCredentials amazonCredentials, String region, boolean skipEdda) { + return proxyHandlerBuilder.getProxyHandler( + AWSServiceDiscovery.class, + AWSServiceDiscoveryClientBuilder.class, + amazonCredentials, + region, + skipEdda); + } + + public AWSSupport getAmazonSupport(NetflixAmazonCredentials amazonCredentials, String region) { + return proxyHandlerBuilder.getProxyHandler( + AWSSupport.class, AWSSupportClientBuilder.class, amazonCredentials, region, true); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentials.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentials.java new file mode 100644 index 00000000000..980156ad60b --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentials.java @@ -0,0 +1,359 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import static java.util.Objects.requireNonNull; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Basic set of Amazon credentials that will a provided {@link + * com.amazonaws.auth.AWSCredentialsProvider} to resolve account credentials. If none provided, the + * {@link com.amazonaws.auth.DefaultAWSCredentialsProviderChain} will be used. The account's active + * regions and availability zones can be specified as well. + */ +public class AmazonCredentials extends AbstractAccountCredentials { + private static final String CLOUD_PROVIDER = "aws"; + + private final String name; + private final String environment; + private final String accountType; + private final String accountId; + private final String defaultKeyPair; + private final Boolean enabled; + private final List requiredGroupMembership; + private final Permissions permissions; + private final List regions; + private final List defaultSecurityGroups; + private final List lifecycleHooks; + private final boolean allowPrivateThirdPartyImages; + private final AWSCredentialsProvider credentialsProvider; + + public static AmazonCredentials fromAWSCredentials( + String name, + String environment, + String accountType, + AWSCredentialsProvider credentialsProvider, + AmazonClientProvider amazonClientProvider) { + return fromAWSCredentials( + name, environment, accountType, null, credentialsProvider, amazonClientProvider); + } + + public static AmazonCredentials fromAWSCredentials( + String name, + String environment, + String accountType, + String defaultKeyPair, + AWSCredentialsProvider credentialsProvider, + AmazonClientProvider amazonClientProvider) { + AWSAccountInfoLookup lookup = + new DefaultAWSAccountInfoLookup(credentialsProvider, amazonClientProvider); + final String accountId = lookup.findAccountId(); + final List regions = lookup.listRegions(); + return new AmazonCredentials( + name, + environment, + accountType, + accountId, + defaultKeyPair, + true, + regions, + null, + null, + null, + null, + false, + credentialsProvider); + } + + public AmazonCredentials( + @JsonProperty("name") String name, + @JsonProperty("environment") String environment, + @JsonProperty("accountType") String accountType, + @JsonProperty("accountId") String accountId, + @JsonProperty("defaultKeyPair") String defaultKeyPair, + @JsonProperty("enabled") Boolean enabled, + @JsonProperty("regions") List regions, + @JsonProperty("defaultSecurityGroups") List defaultSecurityGroups, + @JsonProperty("requiredGroupMembership") List requiredGroupMembership, + @JsonProperty("permissions") Permissions permissions, + @JsonProperty("lifecycleHooks") List lifecycleHooks, + @JsonProperty("allowPrivateThirdPartyImages") Boolean allowPrivateThirdPartyImages) { + this( + name, + environment, + accountType, + accountId, + defaultKeyPair, + enabled, + regions, + defaultSecurityGroups, + requiredGroupMembership, + permissions, + lifecycleHooks, + allowPrivateThirdPartyImages, + null); + } + + public AmazonCredentials(AmazonCredentials source, AWSCredentialsProvider credentialsProvider) { + this( + source.getName(), + source.getEnvironment(), + source.getAccountType(), + source.getAccountId(), + source.getDefaultKeyPair(), + source.isEnabled(), + source.getRegions(), + source.getDefaultSecurityGroups(), + source.getRequiredGroupMembership(), + source.getPermissions(), + source.getLifecycleHooks(), + source.getAllowPrivateThirdPartyImages(), + credentialsProvider); + } + + AmazonCredentials( + String name, + String environment, + String accountType, + String accountId, + String defaultKeyPair, + Boolean enabled, + List regions, + List defaultSecurityGroups, + List requiredGroupMembership, + Permissions permissions, + List lifecycleHooks, + boolean allowPrivateThirdPartyImages, + AWSCredentialsProvider credentialsProvider) { + this.name = requireNonNull(name, "name"); + this.environment = requireNonNull(environment, "environment"); + this.accountType = requireNonNull(accountType, "accountType"); + this.accountId = requireNonNull(accountId, "accountId"); + this.defaultKeyPair = defaultKeyPair; + this.enabled = enabled != null ? enabled : true; + this.regions = + regions == null + ? Collections.emptyList() + : Collections.unmodifiableList(regions); + this.defaultSecurityGroups = + defaultSecurityGroups == null ? null : Collections.unmodifiableList(defaultSecurityGroups); + this.requiredGroupMembership = + requiredGroupMembership == null + ? Collections.emptyList() + : Collections.unmodifiableList(requiredGroupMembership); + this.permissions = permissions == null ? Permissions.EMPTY : permissions; + this.lifecycleHooks = + lifecycleHooks == null + ? Collections.emptyList() + : Collections.unmodifiableList(lifecycleHooks); + this.allowPrivateThirdPartyImages = allowPrivateThirdPartyImages; + this.credentialsProvider = credentialsProvider; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getEnvironment() { + return environment; + } + + @Override + public String getAccountType() { + return accountType; + } + + @Override + public String getAccountId() { + return accountId; + } + + public String getDefaultKeyPair() { + return defaultKeyPair; + } + + public List getRegions() { + return regions; + } + + public List getDefaultSecurityGroups() { + return defaultSecurityGroups; + } + + public List getLifecycleHooks() { + return lifecycleHooks; + } + + public boolean getAllowPrivateThirdPartyImages() { + return allowPrivateThirdPartyImages; + } + + @JsonIgnore + public AWSCredentialsProvider getCredentialsProvider() { + return credentialsProvider; + } + + @Override + @JsonIgnore + public AWSCredentials getCredentials() { + return credentialsProvider.getCredentials(); + } + + @Override + public String getCloudProvider() { + return CLOUD_PROVIDER; + } + + @Override + public List getRequiredGroupMembership() { + return requiredGroupMembership; + } + + public Permissions getPermissions() { + return this.permissions; + } + + @Override + public boolean isEnabled() { + return enabled; + } + + public static class AWSRegion { + + private final String name; + private final Boolean deprecated; + private final List availabilityZones; + private final List preferredZones; + + public AWSRegion( + @JsonProperty("name") String name, + @JsonProperty("availabilityZones") List availabilityZones, + @JsonProperty("preferredZones") List preferredZones, + @JsonProperty("deprecated") Boolean deprecated) { + this.name = Objects.requireNonNull(name, "name"); + this.availabilityZones = + availabilityZones == null + ? Collections.emptyList() + : Collections.unmodifiableList(availabilityZones); + List preferred = + (preferredZones == null || preferredZones.isEmpty()) + ? new ArrayList<>(this.availabilityZones) + : new ArrayList<>(preferredZones); + preferred.retainAll(this.availabilityZones); + this.preferredZones = Collections.unmodifiableList(preferred); + + if (deprecated == null) { + deprecated = Boolean.FALSE; + } + this.deprecated = deprecated; + } + + public AWSRegion(String name, List availabilityZones) { + this(name, availabilityZones, Collections.emptyList(), null); + } + + public String getName() { + return name; + } + + public Collection getAvailabilityZones() { + return availabilityZones; + } + + public Collection getPreferredZones() { + return preferredZones; + } + + public Boolean getDeprecated() { + return deprecated; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + AWSRegion awsRegion = (AWSRegion) o; + + return name.equals(awsRegion.name) + && availabilityZones.equals(awsRegion.availabilityZones) + && preferredZones.equals(awsRegion.preferredZones); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + availabilityZones.hashCode() + preferredZones.hashCode(); + return result; + } + } + + public static class LifecycleHook { + + private final String roleARN; + private final String notificationTargetARN; + private final String lifecycleTransition; + private final Integer heartbeatTimeout; + private final String defaultResult; + + public LifecycleHook( + @JsonProperty("roleARN") String roleARN, + @JsonProperty("notificationTargetARN") String notificationTargetARN, + @JsonProperty("lifecycleTransition") String lifecycleTransition, + @JsonProperty("heartbeatTimeout") Integer heartbeatTimeout, + @JsonProperty("defaultResult") String defaultResult) { + this.roleARN = roleARN; + this.notificationTargetARN = notificationTargetARN; + this.lifecycleTransition = lifecycleTransition; + this.heartbeatTimeout = heartbeatTimeout; + this.defaultResult = defaultResult; + } + + public String getRoleARN() { + return roleARN; + } + + public String getNotificationTargetARN() { + return notificationTargetARN; + } + + public String getLifecycleTransition() { + return lifecycleTransition; + } + + public Integer getHeartbeatTimeout() { + return heartbeatTimeout; + } + + public String getDefaultResult() { + return defaultResult; + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsLifecycleHandler.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsLifecycleHandler.java new file mode 100644 index 00000000000..2e673a23f7e --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsLifecycleHandler.java @@ -0,0 +1,239 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentProvider; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties; +import com.netflix.spinnaker.clouddriver.aws.edda.EddaApiFactory; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsCleanupProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonCachingAgentFilter; +import com.netflix.spinnaker.clouddriver.aws.provider.agent.ImageCachingAgent; +import com.netflix.spinnaker.clouddriver.aws.provider.agent.ReservationReportCachingAgent; +import com.netflix.spinnaker.clouddriver.aws.provider.config.ProviderHelpers; +import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3DataProvider; +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Lazy; +import org.springframework.stereotype.Component; + +@Component +@Lazy +@Slf4j +@RequiredArgsConstructor +public class AmazonCredentialsLifecycleHandler + implements CredentialsLifecycleHandler { + private final AwsCleanupProvider awsCleanupProvider; + private final AwsInfrastructureProvider awsInfrastructureProvider; + private final AwsProvider awsProvider; + private final AmazonCloudProvider amazonCloudProvider; + private final AmazonClientProvider amazonClientProvider; + private final AmazonS3DataProvider amazonS3DataProvider; + + private final AwsConfigurationProperties awsConfigurationProperties; + private final ObjectMapper objectMapper; + private final @Qualifier("amazonObjectMapper") ObjectMapper amazonObjectMapper; + private final EddaApiFactory eddaApiFactory; + private final ApplicationContext ctx; + private final Registry registry; + private final Optional reservationReportPool; + private final Optional> agentProviders; + private final EddaTimeoutConfig eddaTimeoutConfig; + private final AmazonCachingAgentFilter amazonCachingAgentFilter; + private final DynamicConfigService dynamicConfigService; + private final DeployDefaults deployDefaults; + private final CredentialsRepository + credentialsRepository; // Circular dependency. + protected Set publicRegions = new HashSet<>(); + protected Set awsInfraRegions = new HashSet<>(); + protected boolean reservationReportCachingAgentScheduled = false; + protected boolean hasPreviouslyScheduledCleanupAgents = false; + + @Override + public void credentialsAdded(@NotNull NetflixAmazonCredentials credentials) { + scheduleAgents(credentials); + scheduleReservationReportCachingAgent(); + } + + @Override + public void credentialsUpdated(@NotNull NetflixAmazonCredentials credentials) { + unscheduleAgents(credentials); + scheduleAgents(credentials); + } + + @Override + public void credentialsDeleted(@NotNull NetflixAmazonCredentials credentials) { + replaceCurrentImageCachingAgent(credentials); + unscheduleAgents(credentials); + } + + private void replaceCurrentImageCachingAgent(NetflixAmazonCredentials credentials) { + List currentImageCachingAgents = + awsProvider.getAgents().stream() + .filter( + agent -> + agent.handlesAccount(credentials.getName()) + && agent instanceof ImageCachingAgent + && ((ImageCachingAgent) agent).getIncludePublicImages()) + .map(agent -> (ImageCachingAgent) agent) + .collect(Collectors.toList()); + + for (ImageCachingAgent imageCachingAgent : currentImageCachingAgents) { + NetflixAmazonCredentials replacementCredentials = + credentialsRepository.getAll().stream() + .filter(cred -> !cred.getName().equals(credentials.getName())) + .filter( + cred -> + cred.getRegions().stream() + .map(AmazonCredentials.AWSRegion::getName) + .collect(Collectors.toSet()) + .contains(imageCachingAgent.getRegion())) + .findFirst() + .orElse(null); + if (replacementCredentials != null) { + awsProvider.addAgents( + Collections.singletonList( + new ImageCachingAgent( + amazonClientProvider, + replacementCredentials, + imageCachingAgent.getRegion(), + objectMapper, + registry, + true, + dynamicConfigService))); + continue; + } + publicRegions.remove(imageCachingAgent.getRegion()); + } + } + + private void unscheduleAgents(NetflixAmazonCredentials credentials) { + awsInfrastructureProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + awsCleanupProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + awsProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + } + + private void scheduleAgents(NetflixAmazonCredentials credentials) { + scheduleAWSProviderAgents(credentials); + scheduleAwsInfrastructureProviderAgents(credentials); + scheduleAwsCleanupAgents(credentials); + } + + private void scheduleAwsInfrastructureProviderAgents(NetflixAmazonCredentials credentials) { + ProviderHelpers.BuildResult result = + ProviderHelpers.buildAwsInfrastructureAgents( + credentials, + awsInfrastructureProvider, + credentialsRepository, + amazonClientProvider, + amazonObjectMapper, + registry, + eddaTimeoutConfig, + this.awsInfraRegions); + awsInfrastructureProvider.addAgents(result.getAgents()); + this.awsInfraRegions.addAll(result.getRegionsToAdd()); + } + + private void scheduleAWSProviderAgents(NetflixAmazonCredentials credentials) { + ProviderHelpers.BuildResult buildResult = + ProviderHelpers.buildAwsProviderAgents( + credentials, + credentialsRepository, + amazonClientProvider, + objectMapper, + registry, + eddaTimeoutConfig, + amazonCachingAgentFilter, + awsProvider, + amazonCloudProvider, + dynamicConfigService, + eddaApiFactory, + reservationReportPool, + agentProviders, + ctx, + amazonS3DataProvider, + publicRegions); + + awsProvider.addAgents(buildResult.getAgents()); + this.publicRegions.addAll(buildResult.getRegionsToAdd()); + awsProvider.synchronizeHealthAgents(); + } + + private void scheduleAwsCleanupAgents(NetflixAmazonCredentials credentials) { + List newlyAddedAgents = + ProviderHelpers.buildAwsCleanupAgents( + credentials, + credentialsRepository, + amazonClientProvider, + awsCleanupProvider, + deployDefaults, + awsConfigurationProperties, + hasPreviouslyScheduledCleanupAgents); + + awsCleanupProvider.addAgents(newlyAddedAgents); + + log.info( + "The following cleanup agents have been added: {} (awsCleanupProvider.getAgentScheduler: {})", + newlyAddedAgents, + awsCleanupProvider.getAgentScheduler()); + + hasPreviouslyScheduledCleanupAgents = true; + } + + private void scheduleReservationReportCachingAgent() { + if (reservationReportPool.isPresent() && !reservationReportCachingAgentScheduled) { + for (Agent agent : awsProvider.getAgents()) { + if (agent instanceof ReservationReportCachingAgent) { + reservationReportCachingAgentScheduled = true; + return; + } + } + awsProvider.addAgents( + Collections.singleton( + new ReservationReportCachingAgent( + registry, + amazonClientProvider, + amazonS3DataProvider, + credentialsRepository, + objectMapper, + reservationReportPool.get(), + ctx))); + reservationReportCachingAgentScheduled = true; + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AssumeRoleAmazonCredentials.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AssumeRoleAmazonCredentials.java new file mode 100644 index 00000000000..09fa36e4b82 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/AssumeRoleAmazonCredentials.java @@ -0,0 +1,180 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.List; +import java.util.Objects; + +/** + * Provides an Amazon credential pack that uses Assume Role + * (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-assume-role.html) to provide API access to + * the account. This class allows you to use your credentials, provided via the supplied {@link + * com.amazonaws.auth.AWSCredentialsProvider} to act-as the target account ID with the privileges + * desribed through the assumeRole role + */ +public class AssumeRoleAmazonCredentials extends AmazonCredentials { + static final String DEFAULT_SESSION_NAME = "Spinnaker"; + + static AWSCredentialsProvider createSTSCredentialsProvider( + AWSCredentialsProvider credentialsProvider, + String accountId, + String assumeRole, + String sessionName, + String externalId) { + String assumeRoleValue = Objects.requireNonNull(assumeRole, "assumeRole"); + if (!assumeRoleValue.startsWith("arn:")) { + + /** + * GovCloud and China regions need to have the full arn passed because of differing formats + * Govcloud: arn:aws-us-gov:iam China: arn:aws-cn:iam Longer term fix is to have separate + * providers for aws-ec2-gov and aws-ec2-cn since their IAM realms are separate from standard + * AWS cloud + */ + assumeRoleValue = + String.format( + "arn:aws:iam::%s:%s", + Objects.requireNonNull(accountId, "accountId"), assumeRoleValue); + } + return credentialsProvider == null + ? null + : new NetflixSTSAssumeRoleSessionCredentialsProvider( + credentialsProvider, + assumeRoleValue, + Objects.requireNonNull(sessionName, "sessionName"), + accountId, + externalId); + } + + /** The role to assume on the target account. */ + private final String assumeRole; + + private final String sessionName; + + private final String externalId; + + public AssumeRoleAmazonCredentials( + @JsonProperty("name") String name, + @JsonProperty("environment") String environment, + @JsonProperty("accountType") String accountType, + @JsonProperty("accountId") String accountId, + @JsonProperty("defaultKeyPair") String defaultKeyPair, + @JsonProperty("enabled") Boolean enabled, + @JsonProperty("regions") List regions, + @JsonProperty("defaultSecurityGroups") List defaultSecurityGroups, + @JsonProperty("requiredGroupMembership") List requiredGroupMembership, + @JsonProperty("permissions") Permissions permissions, + @JsonProperty("lifecycleHooks") List lifecycleHooks, + @JsonProperty("allowPrivateThirdPartyImages") boolean allowPrivateThirdPartyImages, + @JsonProperty("assumeRole") String assumeRole, + @JsonProperty("sessionName") String sessionName, + @JsonProperty("externalId") String externalId) { + this( + name, + environment, + accountType, + accountId, + defaultKeyPair, + enabled, + regions, + defaultSecurityGroups, + requiredGroupMembership, + permissions, + lifecycleHooks, + allowPrivateThirdPartyImages, + null, + assumeRole, + sessionName, + externalId); + } + + public AssumeRoleAmazonCredentials( + AssumeRoleAmazonCredentials copy, AWSCredentialsProvider credentialsProvider) { + this( + copy.getName(), + copy.getEnvironment(), + copy.getAccountType(), + copy.getAccountId(), + copy.getDefaultKeyPair(), + copy.isEnabled(), + copy.getRegions(), + copy.getDefaultSecurityGroups(), + copy.getRequiredGroupMembership(), + copy.getPermissions(), + copy.getLifecycleHooks(), + copy.getAllowPrivateThirdPartyImages(), + credentialsProvider, + copy.getAssumeRole(), + copy.getSessionName(), + copy.getExternalId()); + } + + AssumeRoleAmazonCredentials( + String name, + String environment, + String accountType, + String accountId, + String defaultKeyPair, + Boolean enabled, + List regions, + List defaultSecurityGroups, + List requiredGroupMembership, + Permissions permissions, + List lifecycleHooks, + boolean allowPrivateThirdPartyImages, + AWSCredentialsProvider credentialsProvider, + String assumeRole, + String sessionName, + String externalId) { + super( + name, + environment, + accountType, + accountId, + defaultKeyPair, + enabled, + regions, + defaultSecurityGroups, + requiredGroupMembership, + permissions, + lifecycleHooks, + allowPrivateThirdPartyImages, + createSTSCredentialsProvider( + credentialsProvider, + accountId, + assumeRole, + sessionName == null ? DEFAULT_SESSION_NAME : sessionName, + externalId)); + this.assumeRole = assumeRole; + this.sessionName = sessionName == null ? DEFAULT_SESSION_NAME : sessionName; + this.externalId = externalId; + } + + public String getAssumeRole() { + return assumeRole; + } + + public String getSessionName() { + return sessionName; + } + + public String getExternalId() { + return externalId; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/DefaultAWSAccountInfoLookup.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/DefaultAWSAccountInfoLookup.java new file mode 100644 index 00000000000..5eb6899f361 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/DefaultAWSAccountInfoLookup.java @@ -0,0 +1,139 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.*; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials.AWSRegion; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class DefaultAWSAccountInfoLookup implements AWSAccountInfoLookup { + private static final String DEFAULT_SECURITY_GROUP_NAME = "default"; + private static final Pattern IAM_ARN_PATTERN = + Pattern.compile(".*?arn:aws(?:-cn|-us-gov)?:(?:iam|sts)::(\\d+):.*"); + + private final AWSCredentialsProvider credentialsProvider; + private final AmazonClientProvider amazonClientProvider; + + public DefaultAWSAccountInfoLookup( + AWSCredentialsProvider credentialsProvider, AmazonClientProvider amazonClientProvider) { + this.credentialsProvider = credentialsProvider; + this.amazonClientProvider = amazonClientProvider; + } + + @Override + public String findAccountId() { + AmazonEC2 ec2 = + amazonClientProvider.getAmazonEC2(credentialsProvider, AmazonClientProvider.DEFAULT_REGION); + try { + List vpcs = ec2.describeVpcs().getVpcs(); + boolean supportsByName = false; + if (vpcs.isEmpty()) { + supportsByName = true; + } else { + for (Vpc vpc : vpcs) { + if (vpc.getIsDefault()) { + supportsByName = true; + break; + } + } + } + + DescribeSecurityGroupsRequest request = new DescribeSecurityGroupsRequest(); + if (supportsByName) { + request.withGroupNames(DEFAULT_SECURITY_GROUP_NAME); + } + DescribeSecurityGroupsResult result = ec2.describeSecurityGroups(request); + + for (SecurityGroup sg : result.getSecurityGroups()) { + // if there is a vpcId or it is the default security group it won't be an EC2 cross account + // group + if ((sg.getVpcId() != null && sg.getVpcId().length() > 0) + || DEFAULT_SECURITY_GROUP_NAME.equals(sg.getGroupName())) { + return sg.getOwnerId(); + } + } + + throw new IllegalArgumentException("Unable to lookup accountId with provided credentials"); + } catch (AmazonServiceException ase) { + if ("AccessDenied".equals(ase.getErrorCode())) { + String message = ase.getMessage(); + Matcher matcher = IAM_ARN_PATTERN.matcher(message); + if (matcher.matches()) { + return matcher.group(1); + } + } + throw ase; + } + } + + @Override + public List listAvailabilityZones(String regionName) { + List regions = listRegions(regionName); + if (regions.isEmpty()) { + throw new IllegalArgumentException("Unknown region: " + regionName); + } + return new ArrayList<>(regions.get(0).getAvailabilityZones()); + } + + public List listRegions(String... regionNames) { + return listRegions(Arrays.asList(regionNames)); + } + + @Override + public List listRegions(Collection regionNames) { + Set nameSet = new HashSet<>(regionNames); + AmazonEC2 ec2 = + amazonClientProvider.getAmazonEC2(credentialsProvider, AmazonClientProvider.DEFAULT_REGION); + + DescribeRegionsRequest request = new DescribeRegionsRequest(); + if (!nameSet.isEmpty()) { + request.withRegionNames(regionNames); + } + List regions = ec2.describeRegions(request).getRegions(); + if (regions.size() != nameSet.size()) { + Set missingSet = new HashSet<>(nameSet); + for (Region region : regions) { + missingSet.remove(region.getRegionName()); + } + throw new IllegalArgumentException( + "Unknown region" + (missingSet.size() > 1 ? "s: " : ": ") + missingSet); + } + List awsRegions = new ArrayList<>(regions.size()); + for (Region region : regions) { + AmazonEC2 regionalEc2 = + amazonClientProvider.getAmazonEC2(credentialsProvider, region.getRegionName()); + List azs = regionalEc2.describeAvailabilityZones().getAvailabilityZones(); + List availabilityZoneNames = new ArrayList<>(azs.size()); + for (AvailabilityZone az : azs) { + availabilityZoneNames.add(az.getZoneName()); + } + + awsRegions.add(new AWSRegion(region.getRegionName(), availabilityZoneNames)); + } + return awsRegions; + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/EddaTemplater.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/EddaTemplater.java similarity index 100% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/EddaTemplater.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/EddaTemplater.java diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/EddaTimeoutConfig.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/EddaTimeoutConfig.java similarity index 85% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/EddaTimeoutConfig.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/EddaTimeoutConfig.java index 0abd1a4c4d7..d74260170bc 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/EddaTimeoutConfig.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/EddaTimeoutConfig.java @@ -40,21 +40,25 @@ public class EddaTimeoutConfig { private final Set disabledRegions; private boolean albEnabled; - public EddaTimeoutConfig(long retryBase, - int backoffMillis, - int maxAttempts, - int connectTimeout, - int connectionRequestTimeout, - int socketTimeout, - Collection disabledRegions, - boolean albEnabled) { + public EddaTimeoutConfig( + long retryBase, + int backoffMillis, + int maxAttempts, + int connectTimeout, + int connectionRequestTimeout, + int socketTimeout, + Collection disabledRegions, + boolean albEnabled) { this.retryBase = retryBase; this.backoffMillis = backoffMillis; this.maxAttempts = maxAttempts; this.connectTimeout = connectTimeout; this.connectionRequestTimeout = connectionRequestTimeout; this.socketTimeout = socketTimeout; - this.disabledRegions = disabledRegions == null || disabledRegions.isEmpty() ? Collections.emptySet() : Collections.unmodifiableSet(new LinkedHashSet<>(disabledRegions)); + this.disabledRegions = + disabledRegions == null || disabledRegions.isEmpty() + ? Collections.emptySet() + : Collections.unmodifiableSet(new LinkedHashSet<>(disabledRegions)); this.albEnabled = albEnabled; } @@ -113,15 +117,14 @@ public Builder() { public EddaTimeoutConfig build() { return new EddaTimeoutConfig( - retryBase, - backoffMillis, - maxAttempts, - connectTimeout, - connectionRequestTimeout, - socketTimeout, - disabledRegions, - albEnabled - ); + retryBase, + backoffMillis, + maxAttempts, + connectTimeout, + connectionRequestTimeout, + socketTimeout, + disabledRegions, + albEnabled); } public long getRetryBase() { diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixAmazonCredentials.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixAmazonCredentials.java new file mode 100644 index 00000000000..edc9c9aeaa9 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixAmazonCredentials.java @@ -0,0 +1,211 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.List; + +/** + * An implementation of {@link AmazonCredentials} that is decorated with Netflix concepts like Edda, + * Discovery, Front50, + */ +public class NetflixAmazonCredentials extends AmazonCredentials { + private final String edda; + private final boolean eddaEnabled; + private final String discovery; + private final boolean discoveryEnabled; + private final String front50; + private final boolean front50Enabled; + private final String bastionHost; + private final boolean bastionEnabled; + private final boolean shieldEnabled; + private final boolean lambdaEnabled; + + public NetflixAmazonCredentials( + @JsonProperty("name") String name, + @JsonProperty("environment") String environment, + @JsonProperty("accountType") String accountType, + @JsonProperty("accountId") String accountId, + @JsonProperty("defaultKeyPair") String defaultKeyPair, + @JsonProperty("enabled") Boolean enabled, + @JsonProperty("regions") List regions, + @JsonProperty("defaultSecurityGroups") List defaultSecurityGroups, + @JsonProperty("requiredGroupMembership") List requiredGroupMembership, + @JsonProperty("permissions") Permissions permissions, + @JsonProperty("lifecycleHooks") List lifecycleHooks, + @JsonProperty("allowPrivateThirdPartyImages") boolean allowPrivateThirdPartyImages, + @JsonProperty("edda") String edda, + @JsonProperty("eddaEnabled") Boolean eddaEnabled, + @JsonProperty("discovery") String discovery, + @JsonProperty("discoveryEnabled") Boolean discoveryEnabled, + @JsonProperty("front50") String front50, + @JsonProperty("front50Enabled") Boolean front50Enabled, + @JsonProperty("bastionHost") String bastionHost, + @JsonProperty("bastionEnabled") Boolean bastionEnabled, + @JsonProperty("shieldEnabled") Boolean shieldEnabled, + @JsonProperty("lambdaEnabled") Boolean lambdaEnabled) { + this( + name, + environment, + accountType, + accountId, + defaultKeyPair, + enabled, + regions, + defaultSecurityGroups, + requiredGroupMembership, + permissions, + lifecycleHooks, + allowPrivateThirdPartyImages, + null, + edda, + eddaEnabled, + discovery, + discoveryEnabled, + front50, + front50Enabled, + bastionHost, + bastionEnabled, + shieldEnabled, + lambdaEnabled); + } + + private static boolean flagValue(String serviceUrl, Boolean flag) { + return (!(serviceUrl == null || serviceUrl.trim().length() == 0) + && (flag != null ? flag : true)); + } + + public NetflixAmazonCredentials( + NetflixAmazonCredentials copy, AWSCredentialsProvider credentialsProvider) { + this( + copy.getName(), + copy.getEnvironment(), + copy.getAccountType(), + copy.getAccountId(), + copy.getDefaultKeyPair(), + copy.isEnabled(), + copy.getRegions(), + copy.getDefaultSecurityGroups(), + copy.getRequiredGroupMembership(), + copy.getPermissions(), + copy.getLifecycleHooks(), + copy.getAllowPrivateThirdPartyImages(), + credentialsProvider, + copy.getEdda(), + copy.getEddaEnabled(), + copy.getDiscovery(), + copy.getDiscoveryEnabled(), + copy.getFront50(), + copy.getFront50Enabled(), + copy.getBastionHost(), + copy.getBastionEnabled(), + copy.getShieldEnabled(), + copy.getLambdaEnabled()); + } + + NetflixAmazonCredentials( + String name, + String environment, + String accountType, + String accountId, + String defaultKeyPair, + Boolean enabled, + List regions, + List defaultSecurityGroups, + List requiredGroupMembership, + Permissions permissions, + List lifecycleHooks, + boolean allowPrivateThirdPartyImages, + AWSCredentialsProvider credentialsProvider, + String edda, + Boolean eddaEnabled, + String discovery, + Boolean discoveryEnabled, + String front50, + Boolean front50Enabled, + String bastionHost, + Boolean bastionEnabled, + Boolean shieldEnabled, + Boolean lambdaEnabled) { + super( + name, + environment, + accountType, + accountId, + defaultKeyPair, + enabled, + regions, + defaultSecurityGroups, + requiredGroupMembership, + permissions, + lifecycleHooks, + allowPrivateThirdPartyImages, + credentialsProvider); + this.edda = edda; + this.eddaEnabled = flagValue(edda, eddaEnabled); + this.discovery = discovery; + this.discoveryEnabled = flagValue(discovery, discoveryEnabled); + this.front50 = front50; + this.front50Enabled = flagValue(front50, front50Enabled); + this.bastionHost = bastionHost; + this.bastionEnabled = flagValue(bastionHost, bastionEnabled); + this.shieldEnabled = (shieldEnabled == null) ? false : shieldEnabled; + this.lambdaEnabled = (lambdaEnabled == null) ? false : lambdaEnabled; + } + + public String getEdda() { + return edda; + } + + public String getDiscovery() { + return discovery; + } + + public String getFront50() { + return front50; + } + + public String getBastionHost() { + return bastionHost; + } + + public boolean getEddaEnabled() { + return eddaEnabled; + } + + public boolean getDiscoveryEnabled() { + return discoveryEnabled; + } + + public boolean getFront50Enabled() { + return front50Enabled; + } + + public boolean getBastionEnabled() { + return bastionEnabled; + } + + public boolean getShieldEnabled() { + return shieldEnabled; + } + + public boolean getLambdaEnabled() { + return lambdaEnabled; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixAssumeRoleAmazonCredentials.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixAssumeRoleAmazonCredentials.java new file mode 100644 index 00000000000..8df211dd38f --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixAssumeRoleAmazonCredentials.java @@ -0,0 +1,194 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.List; + +/** @see AssumeRoleAmazonCredentials */ +public class NetflixAssumeRoleAmazonCredentials extends NetflixAmazonCredentials { + + /** The role to assume on the target account. */ + private final String assumeRole; + + private final String sessionName; + + private final String externalId; + + public NetflixAssumeRoleAmazonCredentials( + @JsonProperty("name") String name, + @JsonProperty("environment") String environment, + @JsonProperty("accountType") String accountType, + @JsonProperty("accountId") String accountId, + @JsonProperty("defaultKeyPair") String defaultKeyPair, + @JsonProperty("enabled") Boolean enabled, + @JsonProperty("regions") List regions, + @JsonProperty("defaultSecurityGroups") List defaultSecurityGroups, + @JsonProperty("requiredGroupMembership") List requiredGroupMembership, + @JsonProperty("permissions") Permissions permissions, + @JsonProperty("lifecycleHooks") List lifecycleHooks, + @JsonProperty("allowPrivateThirdPartyImages") boolean allowPrivateThirdPartyImages, + @JsonProperty("edda") String edda, + @JsonProperty("eddaEnabled") Boolean eddaEnabled, + @JsonProperty("discovery") String discovery, + @JsonProperty("discoveryEnabled") Boolean discoveryEnabled, + @JsonProperty("front50") String front50, + @JsonProperty("front50Enabled") Boolean front50Enabled, + @JsonProperty("bastionHost") String bastionHost, + @JsonProperty("bastionEnabled") Boolean bastionEnabled, + @JsonProperty("shieldEnabled") Boolean shieldEnabled, + @JsonProperty("assumeRole") String assumeRole, + @JsonProperty("sessionName") String sessionName, + @JsonProperty("lambdaEnabled") Boolean lambdaEnabled, + @JsonProperty("externalId") String externalId) { + + this( + name, + environment, + accountType, + accountId, + defaultKeyPair, + enabled, + regions, + defaultSecurityGroups, + requiredGroupMembership, + permissions, + lifecycleHooks, + allowPrivateThirdPartyImages, + null, + edda, + eddaEnabled, + discovery, + discoveryEnabled, + front50, + front50Enabled, + bastionHost, + bastionEnabled, + shieldEnabled, + assumeRole, + sessionName, + lambdaEnabled, + externalId); + } + + public NetflixAssumeRoleAmazonCredentials( + NetflixAssumeRoleAmazonCredentials copy, AWSCredentialsProvider credentialsProvider) { + this( + copy.getName(), + copy.getEnvironment(), + copy.getAccountType(), + copy.getAccountId(), + copy.getDefaultKeyPair(), + copy.isEnabled(), + copy.getRegions(), + copy.getDefaultSecurityGroups(), + copy.getRequiredGroupMembership(), + copy.getPermissions(), + copy.getLifecycleHooks(), + copy.getAllowPrivateThirdPartyImages(), + credentialsProvider, + copy.getEdda(), + copy.getEddaEnabled(), + copy.getDiscovery(), + copy.getDiscoveryEnabled(), + copy.getFront50(), + copy.getFront50Enabled(), + copy.getBastionHost(), + copy.getBastionEnabled(), + copy.getShieldEnabled(), + copy.getAssumeRole(), + copy.getSessionName(), + copy.getLambdaEnabled(), + copy.getExternalId()); + } + + NetflixAssumeRoleAmazonCredentials( + String name, + String environment, + String accountType, + String accountId, + String defaultKeyPair, + Boolean enabled, + List regions, + List defaultSecurityGroups, + List requiredGroupMembership, + Permissions permissions, + List lifecycleHooks, + boolean allowPrivateThirdPartyImages, + AWSCredentialsProvider credentialsProvider, + String edda, + Boolean eddaEnabled, + String discovery, + Boolean discoveryEnabled, + String front50, + Boolean front50Enabled, + String bastionHost, + Boolean bastionEnabled, + Boolean shieldEnabled, + String assumeRole, + String sessionName, + Boolean lambdaEnabled, + String externalId) { + super( + name, + environment, + accountType, + accountId, + defaultKeyPair, + enabled, + regions, + defaultSecurityGroups, + requiredGroupMembership, + permissions, + lifecycleHooks, + allowPrivateThirdPartyImages, + AssumeRoleAmazonCredentials.createSTSCredentialsProvider( + credentialsProvider, + accountId, + assumeRole, + sessionName == null ? AssumeRoleAmazonCredentials.DEFAULT_SESSION_NAME : sessionName, + externalId), + edda, + eddaEnabled, + discovery, + discoveryEnabled, + front50, + front50Enabled, + bastionHost, + bastionEnabled, + shieldEnabled, + lambdaEnabled); + this.assumeRole = assumeRole; + this.sessionName = + sessionName == null ? AssumeRoleAmazonCredentials.DEFAULT_SESSION_NAME : sessionName; + this.externalId = externalId; + } + + public String getAssumeRole() { + return assumeRole; + } + + public String getSessionName() { + return sessionName; + } + + public String getExternalId() { + return externalId; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixSTSAssumeRoleSessionCredentialsProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixSTSAssumeRoleSessionCredentialsProvider.java new file mode 100644 index 00000000000..be22543ff6f --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/NetflixSTSAssumeRoleSessionCredentialsProvider.java @@ -0,0 +1,83 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSSessionCredentials; +import com.amazonaws.auth.AWSSessionCredentialsProvider; +import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider; +import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; +import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient; +import com.netflix.spinnaker.clouddriver.aws.security.sdkclient.SpinnakerAwsRegionProvider; +import java.io.Closeable; + +public class NetflixSTSAssumeRoleSessionCredentialsProvider + implements AWSSessionCredentialsProvider, Closeable { + + private final String accountId; + private final STSAssumeRoleSessionCredentialsProvider delegate; + + public NetflixSTSAssumeRoleSessionCredentialsProvider( + AWSCredentialsProvider longLivedCredentialsProvider, + String roleArn, + String roleSessionName, + String accountId, + String externalId) { + this.accountId = accountId; + + var chain = new SpinnakerAwsRegionProvider(); + var region = chain.getRegion(); + + var stsClientBuilder = + AWSSecurityTokenServiceClient.builder().withCredentials(longLivedCredentialsProvider); + + if (roleArn.contains("aws-us-gov")) { + stsClientBuilder.withEndpointConfiguration( + new EndpointConfiguration("sts.us-gov-west-1.amazonaws.com", region)); + } else if (roleArn.contains("aws-cn")) { + stsClientBuilder.withEndpointConfiguration( + new EndpointConfiguration("sts.cn-north-1.amazonaws.com.cn", region)); + } else { + stsClientBuilder.withRegion(region); + } + + delegate = + new STSAssumeRoleSessionCredentialsProvider.Builder(roleArn, roleSessionName) + .withExternalId(externalId) + .withStsClient(stsClientBuilder.build()) + .build(); + } + + public String getAccountId() { + return accountId; + } + + @Override + public AWSSessionCredentials getCredentials() { + return delegate.getCredentials(); + } + + @Override + public void refresh() { + delegate.refresh(); + } + + @Override + public void close() { + delegate.close(); + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/StaticAWSAccountInfoLookup.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/StaticAWSAccountInfoLookup.java new file mode 100644 index 00000000000..52438f8bf14 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/StaticAWSAccountInfoLookup.java @@ -0,0 +1,67 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class StaticAWSAccountInfoLookup implements AWSAccountInfoLookup { + private final String accountId; + private final List knownRegions; + + public StaticAWSAccountInfoLookup( + String accountId, List knownRegions) { + this.accountId = accountId; + this.knownRegions = knownRegions; + } + + @Override + public String findAccountId() { + return accountId; + } + + @Override + public List listRegions(String... regionNames) { + return listRegions(Arrays.asList(regionNames)); + } + + @Override + public List listRegions(Collection regionNames) { + Set nameSet = new HashSet<>(regionNames); + List result = new ArrayList<>(nameSet.size()); + for (AmazonCredentials.AWSRegion region : knownRegions) { + if (nameSet.isEmpty() || nameSet.contains(region.getName())) { + result.add(region); + } + } + return result; + } + + @Override + public List listAvailabilityZones(String regionName) { + for (AmazonCredentials.AWSRegion region : knownRegions) { + if (region.getName().equals(regionName)) { + return new ArrayList<>(region.getAvailabilityZones()); + } + } + return null; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/AccountsConfiguration.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/AccountsConfiguration.java new file mode 100644 index 00000000000..9d6c3f111b4 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/AccountsConfiguration.java @@ -0,0 +1,285 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security.config; + +import static lombok.EqualsAndHashCode.Include; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.netflix.spinnaker.clouddriver.security.AccessControlledAccountDefinition; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import lombok.EqualsAndHashCode; + +/** + * Previously, accounts were stored in the {@link CredentialsConfig} class. If there are loads of + * accounts defined in a configuration properties file, then letting Spring boot read and bind them + * is a fairly time-consuming process. For 400 accounts, we observed that it took ~3-5m to load + * them, with the variance depending on the node configuration. + * + *

To speed this up, a feature-flagged change will be introduced in a follow up PR to let us do a + * manual binding of the properties directly, instead of letting spring boot do it. This results in + * the load times dropping to ~1-2s. But the main drawback of this manual binding is the fact that + * we have to explicitly define all the properties that we need to bind. For example, if accounts + * are defined in one configuration file and the other properties are defined in a different file, + * those other properties will not be loaded unless they are defined in the same configuration file. + * Also, for that to work, we have to explicitly bind these properties to the target class. + * + *

By moving accounts out of the {@link CredentialsConfig} class, we don't need to do any manual + * binding for those other properties. And we do the manual binding for accounts only, which makes + * it more maintainable. + */ +public class AccountsConfiguration { + + @EqualsAndHashCode(onlyExplicitlyIncluded = true) + @JsonTypeName("aws") + public static class Account implements AccessControlledAccountDefinition { + @Include private String name; + @Include private String environment; + @Include private String accountType; + @Include private String accountId; + @Include private String defaultKeyPair; + @Include private Boolean enabled; + @Include private List regions; + @Include private List defaultSecurityGroups; + private List requiredGroupMembership; + @Include private Permissions.Builder permissions; + @Include private String edda; + @Include private Boolean eddaEnabled; + @Include private Boolean lambdaEnabled; + @Include private String discovery; + @Include private Boolean discoveryEnabled; + @Include private String front50; + @Include private Boolean front50Enabled; + @Include private String bastionHost; + @Include private Boolean bastionEnabled; + @Include private String assumeRole; + @Include private String sessionName; + @Include private String externalId; + @Include private List lifecycleHooks; + @Include private boolean allowPrivateThirdPartyImages; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getEnvironment() { + return environment; + } + + public void setEnvironment(String environment) { + this.environment = environment; + } + + public String getAccountType() { + return accountType; + } + + public void setAccountType(String accountType) { + this.accountType = accountType; + } + + public String getAccountId() { + return accountId; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } + + public String getDefaultKeyPair() { + return defaultKeyPair; + } + + public void setDefaultKeyPair(String defaultKeyPair) { + this.defaultKeyPair = defaultKeyPair; + } + + public Boolean getEnabled() { + return enabled; + } + + public void setEnabled(Boolean enabled) { + this.enabled = enabled; + } + + public List getRegions() { + return regions; + } + + public void setRegions(List regions) { + if (regions != null) { + regions.sort(Comparator.comparing(CredentialsConfig.Region::getName)); + } + this.regions = regions; + } + + public List getDefaultSecurityGroups() { + return defaultSecurityGroups; + } + + public void setDefaultSecurityGroups(List defaultSecurityGroups) { + if (defaultSecurityGroups != null) { + Collections.sort(defaultSecurityGroups); + } + this.defaultSecurityGroups = defaultSecurityGroups; + } + + public List getRequiredGroupMembership() { + return requiredGroupMembership; + } + + public void setRequiredGroupMembership(List requiredGroupMembership) { + this.requiredGroupMembership = requiredGroupMembership; + } + + public Permissions.Builder getPermissions() { + return permissions; + } + + public void setPermissions(Permissions.Builder permissions) { + this.permissions = permissions; + } + + public String getEdda() { + return edda; + } + + public void setEdda(String edda) { + this.edda = edda; + } + + public Boolean getEddaEnabled() { + return eddaEnabled; + } + + public void setEddaEnabled(Boolean eddaEnabled) { + this.eddaEnabled = eddaEnabled; + } + + public String getDiscovery() { + return discovery; + } + + public void setDiscovery(String discovery) { + this.discovery = discovery; + } + + public Boolean getDiscoveryEnabled() { + return discoveryEnabled; + } + + public void setDiscoveryEnabled(Boolean discoveryEnabled) { + this.discoveryEnabled = discoveryEnabled; + } + + public String getFront50() { + return front50; + } + + public void setFront50(String front50) { + this.front50 = front50; + } + + public Boolean getFront50Enabled() { + return front50Enabled; + } + + public void setFront50Enabled(Boolean front50Enabled) { + this.front50Enabled = front50Enabled; + } + + public String getBastionHost() { + return bastionHost; + } + + public void setBastionHost(String bastionHost) { + this.bastionHost = bastionHost; + } + + public Boolean getBastionEnabled() { + return bastionEnabled; + } + + public void setBastionEnabled(Boolean bastionEnabled) { + this.bastionEnabled = bastionEnabled; + } + + public String getAssumeRole() { + return assumeRole; + } + + public void setAssumeRole(String assumeRole) { + this.assumeRole = assumeRole; + } + + public String getSessionName() { + return sessionName; + } + + public void setSessionName(String sessionName) { + this.sessionName = sessionName; + } + + public String getExternalId() { + return externalId; + } + + public void setExternalId(String externalId) { + this.externalId = externalId; + } + + public List getLifecycleHooks() { + return lifecycleHooks; + } + + public void setLifecycleHooks(List lifecycleHooks) { + this.lifecycleHooks = lifecycleHooks; + } + + public Boolean getAllowPrivateThirdPartyImages() { + return allowPrivateThirdPartyImages; + } + + public void setAllowPrivateThirdPartyImages(Boolean allowPrivateThirdPartyImages) { + this.allowPrivateThirdPartyImages = allowPrivateThirdPartyImages; + } + + public Boolean getLambdaEnabled() { + return lambdaEnabled; + } + + public void setLambdaEnabled(Boolean lambdaEnabled) { + this.lambdaEnabled = lambdaEnabled; + } + } + + private List accounts; + + public List getAccounts() { + return accounts; + } + + public void setAccounts(List accounts) { + this.accounts = accounts; + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/AmazonCredentialsParser.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/AmazonCredentialsParser.java new file mode 100644 index 00000000000..05c5929e3e8 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/AmazonCredentialsParser.java @@ -0,0 +1,486 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.security.config; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.util.CollectionUtils; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Maps; +import com.netflix.spinnaker.clouddriver.aws.security.*; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration.Account; +import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig.Region; +import com.netflix.spinnaker.credentials.definition.CredentialsParser; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.*; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +@Slf4j +public class AmazonCredentialsParser< + U extends AccountsConfiguration.Account, V extends NetflixAmazonCredentials> + implements CredentialsParser { + + private final AWSCredentialsProvider credentialsProvider; + private final AWSAccountInfoLookup awsAccountInfoLookup; + private final Map templateValues; + private final CredentialTranslator credentialTranslator; + private final ObjectMapper objectMapper; + private final CredentialsConfig credentialsConfig; + private final AccountsConfiguration accountsConfig; + // this is used to cache all the regions found while parsing the accounts. This helps in + // reducing the number of API calls made since known regions are already cached. + private final ConcurrentMap regionCache; + private List defaultRegionNames; + + // this is a key used in the regions cache to indicate that default regions have been + // processed + private static final String DEFAULT_REGIONS_PROCESSED_KEY = "default_regions_processed"; + + public AmazonCredentialsParser( + AWSCredentialsProvider credentialsProvider, + AmazonClientProvider amazonClientProvider, + Class credentialsType, + CredentialsConfig credentialsConfig, + AccountsConfiguration accountsConfig) { + this( + credentialsProvider, + new DefaultAWSAccountInfoLookup(credentialsProvider, amazonClientProvider), + credentialsType, + credentialsConfig, + accountsConfig); + } + + public AmazonCredentialsParser( + AWSCredentialsProvider credentialsProvider, + AWSAccountInfoLookup awsAccountInfoLookup, + Class credentialsType, + CredentialsConfig credentialsConfig, + AccountsConfiguration accountsConfig) { + this.credentialsProvider = Objects.requireNonNull(credentialsProvider, "credentialsProvider"); + this.awsAccountInfoLookup = awsAccountInfoLookup; + this.templateValues = Collections.emptyMap(); + this.objectMapper = new ObjectMapper(); + this.credentialTranslator = findTranslator(credentialsType, this.objectMapper); + this.credentialsConfig = credentialsConfig; + this.accountsConfig = accountsConfig; + this.regionCache = Maps.newConcurrentMap(); + this.defaultRegionNames = new ArrayList<>(); + + // look in the credentials config to find default region names + if (!CollectionUtils.isNullOrEmpty(credentialsConfig.getDefaultRegions())) { + this.defaultRegionNames = + credentialsConfig.getDefaultRegions().stream() + .map(Region::getName) + .collect(Collectors.toList()); + } + } + + /** + * method to initialize the regions specified in an AWS account in the configuration. + * + *

Live call to get regions from the AWS API will be made if: + * + *

- An account's region does not have availability zones defined and that region doesn't exist + * in the region cache. + */ + private List initRegions(List toInit) { + // initialize regions cache if it hasn't been done already. We do this here and not in + // toInit.isNullOrEmpty() because we need the default region values if a region in toInit list + // has no availability zones specified. + initializeRegionsCacheWithDefaultRegions(); + + if (CollectionUtils.isNullOrEmpty(toInit)) { + return getRegionsFromCache(this.defaultRegionNames); + } + + Map toInitByName = + toInit.stream().collect(Collectors.toMap(Region::getName, Function.identity())); + + List result = new ArrayList<>(toInit.size()); + List toLookup = new ArrayList<>(); + for (Region region : toInit) { + // only attempt to lookup regions that don't have any availability zones set in the config + if (CollectionUtils.isNullOrEmpty(region.getAvailabilityZones())) { + Region fromCache = regionCache.get(region.getName()); + // no need to lookup the region if it already exists in the cache + if (fromCache != null) { + result.add(fromCache); + } else { + toLookup.add(region.getName()); + } + } else { + result.add(region); + } + } + + // toLookup now contains the list of regions that we need to fetch from the cache and/or AWS API + if (!toLookup.isEmpty()) { + List resolved = getRegionsFromCache(toLookup); + for (Region region : resolved) { + Region src = find(toInit, region.getName()); + if (src != null) { + region.setPreferredZones(src.getPreferredZones()); + } + } + result.addAll(resolved); + } + + // make a clone of all regions such that modifications apply only to this specific instance (and + // not global defaults) + result = result.stream().map(Region::copyOf).collect(Collectors.toList()); + + for (Region r : result) { + Region toInitRegion = toInitByName.get(r.getName()); + if (toInitRegion != null && toInitRegion.getDeprecated() != null) { + r.setDeprecated(toInitRegion.getDeprecated()); + } + } + + return result; + } + + /** + * method to initialize the regions cache by processing the default regions which may have been + * specified in the configuration. + * + *

Live call to get regions from the AWS API will be made if: + * + *

1. no default regions exist in the config - in this case, it will fetch all AWS regions + * + *

2. default regions exist in the config but they don't have availability zones defined + */ + private void initializeRegionsCacheWithDefaultRegions() { + // synchronized block is added here to handle the multi-threading case where multiple threads + // may attempt to initialize the regions cache at the same time when it is empty in the + // beginning. This block will reduce the number of api calls made to look up regions + // by only allowing one of the threads to do that. + synchronized (this) { + if (!regionCache.containsKey(DEFAULT_REGIONS_PROCESSED_KEY)) { + // if there are no default regions specified, then fetch all the AWS regions. + if (defaultRegionNames.isEmpty()) { + log.info("No default regions specified in the configuration. Retrieving all the regions"); + // save all the newly found regions in the cache + toRegion(awsAccountInfoLookup.listRegions()) + .forEach( + region -> { + log.info("adding region: {} to regions cache", region.getName()); + regionCache.putIfAbsent(region.getName(), region); + }); + } else { + List toLookup = new ArrayList<>(); + for (Region region : credentialsConfig.getDefaultRegions()) { + log.info("Found default region: {} in the configuration", region.getName()); + if (region.getAvailabilityZones() != null && !region.getAvailabilityZones().isEmpty()) { + log.info("Adding default region: {} to the regions cache", region.getName()); + regionCache.put(region.getName(), region); + } else { + toLookup.add(region.getName()); + } + } + + if (!toLookup.isEmpty()) { + log.info("Fetching default regions: {}", toLookup); + List newRegions = + awsAccountInfoLookup.listRegions(toLookup); + + // save all the newly found regions in the cache + toRegion(newRegions) + .forEach( + region -> { + log.info("adding default region: {} to the regions cache", region.getName()); + Region fromDefault = + find(credentialsConfig.getDefaultRegions(), region.getName()); + if (fromDefault != null) { + region.setPreferredZones(fromDefault.getPreferredZones()); + region.setDeprecated(fromDefault.getDeprecated()); + } + regionCache.put(region.getName(), region); + }); + } + } + // this helps us know that we have processed default regions. The value here doesn't matter. + regionCache.put(DEFAULT_REGIONS_PROCESSED_KEY, new Region()); + } + } + } + + private static Region find(List src, String name) { + if (src != null) { + for (Region r : src) { + if (r.getName().equals(name)) { + return r; + } + } + } + return null; + } + + private List getRegionsFromCache(final List regionNames) { + // if no region names are provided, return everything from the cache except the + // DEFAULT_REGIONS_PROCESSED_KEY + if (regionNames.isEmpty()) { + return regionCache.entrySet().stream() + .filter(entry -> !entry.getKey().equals(DEFAULT_REGIONS_PROCESSED_KEY)) + .map(Map.Entry::getValue) + .collect(Collectors.toList()); + } + + // determine if any regions are missing from the cache + List cacheMisses = new ArrayList<>(); + for (String region : regionNames) { + if (!regionCache.containsKey(region)) { + cacheMisses.add(region); + } + } + + if (!cacheMisses.isEmpty()) { + List newRegions; + log.info("Regions: {} do not exist in the regions cache", cacheMisses); + newRegions = awsAccountInfoLookup.listRegions(cacheMisses); + // save all the newly found regions in the cache + toRegion(newRegions) + .forEach( + region -> { + log.info("adding region: {} to regions cache", region.getName()); + regionCache.putIfAbsent(region.getName(), region); + }); + } + return regionNames.stream().map(regionCache::get).collect(Collectors.toList()); + } + + private static List toRegion(List src) { + List result = new ArrayList<>(src.size()); + for (AmazonCredentials.AWSRegion r : src) { + Region region = new Region(); + region.setName(r.getName()); + region.setAvailabilityZones(new ArrayList<>(r.getAvailabilityZones())); + region.setPreferredZones(new ArrayList<>(r.getPreferredZones())); + result.add(region); + } + return result; + } + + // TODO: verify if this is safe to be removed if it is not used anywhere else apart from tests + public List load(CredentialsConfig source) throws Throwable { + final CredentialsConfig config = objectMapper.convertValue(source, CredentialsConfig.class); + if (accountsConfig.getAccounts() == null || accountsConfig.getAccounts().isEmpty()) { + return Collections.emptyList(); + } + List initializedAccounts = new ArrayList<>(accountsConfig.getAccounts().size()); + for (Account account : accountsConfig.getAccounts()) { + initializedAccounts.add(parseAccount(config, account)); + } + return initializedAccounts.stream() + .filter(AmazonCredentials::isEnabled) + .collect(Collectors.toList()); + } + + @Nullable + @Override + public V parse(@NotNull U account) { + try { + log.info("Parsing aws account: {}", account.getName()); + V a = parseAccount(credentialsConfig, account); + if (a.isEnabled()) { + log.info("AWS account: {} is enabled", account.getName()); + return a; + } else { + log.info("AWS account: {} is disabled", account.getName()); + } + } catch (Throwable t) { + log.warn("Failed to parse aws account: {}. Error: ", account.getName(), t); + } + return null; + } + + private V parseAccount(CredentialsConfig config, Account account) throws Throwable { + if (account.getAccountId() == null) { + if (!credentialTranslator.resolveAccountId()) { + throw new IllegalArgumentException( + "accountId is required and not resolvable for this credentials type"); + } + account.setAccountId(awsAccountInfoLookup.findAccountId()); + } + + if (account.getEnvironment() == null) { + account.setEnvironment(account.getName()); + } + + if (account.getAccountType() == null) { + account.setAccountType(account.getName()); + } + + log.info("Setting regions for aws account: {}", account.getName()); + account.setRegions(initRegions(account.getRegions())); + account.setDefaultSecurityGroups( + account.getDefaultSecurityGroups() != null + ? account.getDefaultSecurityGroups() + : config.getDefaultSecurityGroups()); + account.setLifecycleHooks( + account.getLifecycleHooks() != null + ? account.getLifecycleHooks() + : config.getDefaultLifecycleHooks()); + account.setEnabled(Optional.ofNullable(account.getEnabled()).orElse(true)); + + Map templateContext = new HashMap<>(templateValues); + templateContext.put("name", account.getName()); + templateContext.put("accountId", account.getAccountId()); + templateContext.put("environment", account.getEnvironment()); + templateContext.put("accountType", account.getAccountType()); + + account.setDefaultKeyPair( + templateFirstNonNull( + templateContext, account.getDefaultKeyPair(), config.getDefaultKeyPairTemplate())); + account.setEdda( + templateFirstNonNull(templateContext, account.getEdda(), config.getDefaultEddaTemplate())); + account.setFront50( + templateFirstNonNull( + templateContext, account.getFront50(), config.getDefaultFront50Template())); + account.setDiscovery( + templateFirstNonNull( + templateContext, account.getDiscovery(), config.getDefaultDiscoveryTemplate())); + account.setAssumeRole( + templateFirstNonNull( + templateContext, account.getAssumeRole(), config.getDefaultAssumeRole())); + account.setSessionName( + templateFirstNonNull( + templateContext, account.getSessionName(), config.getDefaultSessionName())); + account.setBastionHost( + templateFirstNonNull( + templateContext, account.getBastionHost(), config.getDefaultBastionHostTemplate())); + + if (account.getLifecycleHooks() != null) { + for (CredentialsConfig.LifecycleHook lifecycleHook : account.getLifecycleHooks()) { + lifecycleHook.setRoleARN( + templateFirstNonNull( + templateContext, + lifecycleHook.getRoleARN(), + config.getDefaultLifecycleHookRoleARNTemplate())); + lifecycleHook.setNotificationTargetARN( + templateFirstNonNull( + templateContext, + lifecycleHook.getNotificationTargetARN(), + config.getDefaultLifecycleHookNotificationTargetARNTemplate())); + } + } + return credentialTranslator.translate(credentialsProvider, account); + } + + private static String templateFirstNonNull(Map substitutions, String... values) { + for (String value : values) { + if (value != null) { + return StringTemplater.render(value, substitutions); + } + } + return null; + } + + static CredentialTranslator findTranslator( + Class credentialsType, ObjectMapper objectMapper) { + return new CopyConstructorTranslator<>(objectMapper, credentialsType); + } + + interface CredentialTranslator { + Class getCredentialType(); + + boolean resolveAccountId(); + + T translate(AWSCredentialsProvider credentialsProvider, Account account) throws Throwable; + } + + static class CopyConstructorTranslator + implements CredentialTranslator { + + private final ObjectMapper objectMapper; + private final Class credentialType; + private final Constructor copyConstructor; + + public CopyConstructorTranslator(ObjectMapper objectMapper, Class credentialType) { + this.objectMapper = objectMapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); + this.credentialType = credentialType; + try { + copyConstructor = + credentialType.getConstructor(credentialType, AWSCredentialsProvider.class); + } catch (NoSuchMethodException nsme) { + throw new IllegalArgumentException( + "Class " + + credentialType + + " must supply a constructor with " + + credentialType + + ", " + + AWSCredentialsProvider.class + + " args."); + } + } + + @Override + public Class getCredentialType() { + return credentialType; + } + + @Override + public boolean resolveAccountId() { + try { + credentialType.getMethod("getAssumeRole"); + return false; + } catch (NoSuchMethodException nsme) { + return true; + } + } + + @Override + public T translate(AWSCredentialsProvider credentialsProvider, Account account) + throws Throwable { + T immutableInstance = objectMapper.convertValue(account, credentialType); + try { + return copyConstructor.newInstance(immutableInstance, credentialsProvider); + } catch (InvocationTargetException ite) { + throw ite.getTargetException(); + } + } + } + + static class StringTemplater { + public static String render(String template, Map substitutions) { + String base = template; + int iterations = 0; + boolean changed = true; + while (changed && iterations < 10) { + iterations++; + String previous = base; + for (Map.Entry substitution : substitutions.entrySet()) { + base = + base.replaceAll( + Pattern.quote("{{" + substitution.getKey() + "}}"), substitution.getValue()); + } + changed = !previous.equals(base); + } + if (changed) { + throw new RuntimeException("too many levels of templatery"); + } + return base; + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsConfig.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsConfig.java new file mode 100644 index 00000000000..b9097038109 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsConfig.java @@ -0,0 +1,297 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security.config; + +import static lombok.EqualsAndHashCode.Include; + +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.Setter; + +/** + * A mutable credentials configurations structure suitable for transformation into concrete + * credentials implementations. + */ +public class CredentialsConfig { + @EqualsAndHashCode(onlyExplicitlyIncluded = true) + public static class Region { + @Include private String name; + private List availabilityZones; + private List preferredZones; + private Boolean deprecated; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public List getAvailabilityZones() { + return availabilityZones; + } + + public void setAvailabilityZones(List availabilityZones) { + this.availabilityZones = availabilityZones; + } + + public List getPreferredZones() { + return preferredZones; + } + + public void setPreferredZones(List preferredZones) { + this.preferredZones = preferredZones; + } + + public Boolean getDeprecated() { + return deprecated; + } + + public void setDeprecated(Boolean deprecated) { + this.deprecated = deprecated; + } + + public Region copyOf() { + Region clone = new Region(); + clone.setName(getName()); + clone.setAvailabilityZones(getAvailabilityZones()); + clone.setPreferredZones(getPreferredZones()); + clone.setDeprecated(getDeprecated()); + + return clone; + } + } + + @EqualsAndHashCode(onlyExplicitlyIncluded = true) + public static class LifecycleHook { + @Include private String name; + @Include private String roleARN; + @Include private String notificationTargetARN; + @Include private String lifecycleTransition; + @Include private Integer heartbeatTimeout; + @Include private String defaultResult; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getRoleARN() { + return roleARN; + } + + public void setRoleARN(String roleARN) { + this.roleARN = roleARN; + } + + public String getNotificationTargetARN() { + return notificationTargetARN; + } + + public void setNotificationTargetARN(String notificationTargetARN) { + this.notificationTargetARN = notificationTargetARN; + } + + public String getLifecycleTransition() { + return lifecycleTransition; + } + + public void setLifecycleTransition(String lifecycleTransition) { + this.lifecycleTransition = lifecycleTransition; + } + + public Integer getHeartbeatTimeout() { + return heartbeatTimeout; + } + + public void setHeartbeatTimeout(Integer heartbeatTimeout) { + this.heartbeatTimeout = heartbeatTimeout; + } + + public String getDefaultResult() { + return defaultResult; + } + + public void setDefaultResult(String defaultResult) { + this.defaultResult = defaultResult; + } + } + + /** LoadAccounts class contains configuration related to loading aws accounts at start up. */ + @Data + public static class LoadAccounts { + /** + * flag to enable loading aws accounts using multiple threads. This is turned off by default. + */ + private boolean multiThreadingEnabled = false; + + /** + * Only applicable when multiThreadingEnabled: true. This specifies the number of threads that + * should be used to load the aws accounts. + * + *

Adjust this number appropriately based on: + * + *

- number of aws many accounts + * + *

- number of clouddriver pods (so that aws api calls are not rate-limited) + */ + private int numberOfThreads = 15; + + /** + * Only applicable when multiThreadingEnabled: true. This specifies the max amount of time for + * loading an aws account, after which a timeout exception will occur. + */ + private int timeoutInSeconds = 180; + } + + private String accessKeyId; + private String secretAccessKey; + private String defaultKeyPairTemplate; + private List defaultRegions; + private List defaultSecurityGroups; + private List defaultLifecycleHooks; + private String defaultEddaTemplate; + private String defaultFront50Template; + private String defaultBastionHostTemplate; + private String defaultDiscoveryTemplate; + private String defaultAssumeRole; + private String defaultSessionName; + private String defaultLifecycleHookRoleARNTemplate; + private String defaultLifecycleHookNotificationTargetARNTemplate; + + public String getDefaultKeyPairTemplate() { + return defaultKeyPairTemplate; + } + + public void setDefaultKeyPairTemplate(String defaultKeyPairTemplate) { + this.defaultKeyPairTemplate = defaultKeyPairTemplate; + } + + public List getDefaultRegions() { + return defaultRegions; + } + + public void setDefaultRegions(List defaultRegions) { + this.defaultRegions = defaultRegions; + } + + public List getDefaultSecurityGroups() { + return defaultSecurityGroups; + } + + public void setDefaultSecurityGroups(List defaultSecurityGroups) { + this.defaultSecurityGroups = defaultSecurityGroups; + } + + public String getDefaultEddaTemplate() { + return defaultEddaTemplate; + } + + public void setDefaultEddaTemplate(String defaultEddaTemplate) { + this.defaultEddaTemplate = defaultEddaTemplate; + } + + public String getDefaultFront50Template() { + return defaultFront50Template; + } + + public void setDefaultFront50Template(String defaultFront50Template) { + this.defaultFront50Template = defaultFront50Template; + } + + public String getDefaultBastionHostTemplate() { + return defaultBastionHostTemplate; + } + + public void setDefaultBastionHostTemplate(String defaultBastionHostTemplate) { + this.defaultBastionHostTemplate = defaultBastionHostTemplate; + } + + public String getDefaultDiscoveryTemplate() { + return defaultDiscoveryTemplate; + } + + public void setDefaultDiscoveryTemplate(String defaultDiscoveryTemplate) { + this.defaultDiscoveryTemplate = defaultDiscoveryTemplate; + } + + public String getDefaultAssumeRole() { + return defaultAssumeRole; + } + + public void setDefaultAssumeRole(String defaultAssumeRole) { + this.defaultAssumeRole = defaultAssumeRole; + } + + public String getDefaultSessionName() { + return defaultSessionName; + } + + public void setDefaultSessionName(String defaultSessionName) { + this.defaultSessionName = defaultSessionName; + } + + public List getDefaultLifecycleHooks() { + return defaultLifecycleHooks; + } + + public void setDefaultLifecycleHooks(List defaultLifecycleHooks) { + this.defaultLifecycleHooks = defaultLifecycleHooks; + } + + public String getDefaultLifecycleHookRoleARNTemplate() { + return defaultLifecycleHookRoleARNTemplate; + } + + public void setDefaultLifecycleHookRoleARNTemplate(String defaultLifecycleHookRoleARNTemplate) { + this.defaultLifecycleHookRoleARNTemplate = defaultLifecycleHookRoleARNTemplate; + } + + public String getDefaultLifecycleHookNotificationTargetARNTemplate() { + return defaultLifecycleHookNotificationTargetARNTemplate; + } + + public void setDefaultLifecycleHookNotificationTargetARNTemplate( + String defaultLifecycleHookNotificationTargetARNTemplate) { + this.defaultLifecycleHookNotificationTargetARNTemplate = + defaultLifecycleHookNotificationTargetARNTemplate; + } + + public String getAccessKeyId() { + return accessKeyId; + } + + public void setAccessKeyId(String accessKeyId) { + this.accessKeyId = accessKeyId; + } + + public String getSecretAccessKey() { + return secretAccessKey; + } + + public void setSecretAccessKey(String secretAccessKey) { + this.secretAccessKey = secretAccessKey; + } + + @Getter @Setter private LoadAccounts loadAccounts = new LoadAccounts(); +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AmazonClientInvocationHandler.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AmazonClientInvocationHandler.java new file mode 100644 index 00000000000..568edfac4d4 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AmazonClientInvocationHandler.java @@ -0,0 +1,568 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security.sdkclient; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.services.autoscaling.model.*; +import com.amazonaws.services.cloudwatch.model.DescribeAlarmsRequest; +import com.amazonaws.services.cloudwatch.model.DescribeAlarmsResult; +import com.amazonaws.services.cloudwatch.model.MetricAlarm; +import com.amazonaws.services.ec2.model.*; +import com.amazonaws.services.ec2.model.Instance; +import com.amazonaws.services.ec2.model.LaunchTemplate; +import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersRequest; +import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersResult; +import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription; +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancingv2.model.*; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.HttpStatus; +import org.apache.http.client.HttpClient; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.util.EntityUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.web.client.HttpClientErrorException; + +public class AmazonClientInvocationHandler implements InvocationHandler { + + private static final Logger log = LoggerFactory.getLogger(AmazonClientInvocationHandler.class); + + public static final ThreadLocal lastModified = new ThreadLocal<>(); + + private final String edda; + private final HttpClient httpClient; + private final Object delegate; + private final String serviceName; + private final ObjectMapper objectMapper; + private final EddaTimeoutConfig eddaTimeoutConfig; + private final Registry registry; + private final Map metricTags; + + public AmazonClientInvocationHandler( + Object delegate, + String serviceName, + String edda, + HttpClient httpClient, + ObjectMapper objectMapper, + EddaTimeoutConfig eddaTimeoutConfig, + Registry registry, + Map metricTags) { + this.edda = edda; + this.httpClient = httpClient; + this.objectMapper = objectMapper; + this.delegate = delegate; + this.serviceName = serviceName; + this.eddaTimeoutConfig = + eddaTimeoutConfig == null ? EddaTimeoutConfig.DEFAULT : eddaTimeoutConfig; + this.registry = registry; + this.metricTags = ImmutableMap.copyOf(metricTags); + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + final Id id = + registry.createId("awsClientProxy.invoke", metricTags).withTag("method", method.getName()); + final long startTime = System.nanoTime(); + boolean wasDelegated = false; + + try { + if (!eddaTimeoutConfig.getAlbEnabled() + && method.getDeclaringClass().equals(AmazonElasticLoadBalancing.class)) { + throw new NoSuchMethodException(); + } + Method thisMethod = + this.getClass() + .getMethod( + method.getName(), + args != null && args.length > 0 ? getClassArgs(args) : new Class[0]); + return thisMethod.invoke(this, args); + } catch (NoSuchMethodException e) { + wasDelegated = true; + try { + return method.invoke(delegate, args); + } catch (InvocationTargetException ite) { + throw ite.getCause(); + } + } finally { + registry + .timer(id.withTag("requestMode", wasDelegated ? "sdkClient" : "edda")) + .record(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); + } + } + + static Class[] getClassArgs(Object[] args) { + List classes = new ArrayList<>(); + for (Object object : args) { + classes.add(object.getClass()); + } + return classes.toArray(new Class[classes.size()]); + } + + //////////////////////////////////// + // + // AmazonAutoScaling + // + //////////////////////////////////// + public DescribeAutoScalingGroupsResult describeAutoScalingGroups() { + return describeAutoScalingGroups(null); + } + + public DescribeAutoScalingGroupsResult describeAutoScalingGroups( + DescribeAutoScalingGroupsRequest request) { + return new DescribeAutoScalingGroupsResult() + .withAutoScalingGroups( + describe( + request, "autoScalingGroupNames", "autoScalingGroups", AutoScalingGroup.class)); + } + + //////////////////////////////////// + // + // AmazonCloudWatch + // + //////////////////////////////////// + public DescribeAlarmsResult describeAlarms() { + return describeAlarms(null); + } + + public DescribeAlarmsResult describeAlarms(DescribeAlarmsRequest request) { + return new DescribeAlarmsResult() + .withMetricAlarms(describe(request, "alarmNames", "alarms", MetricAlarm.class)); + } + + public DescribeScheduledActionsResult describeScheduledActions() { + return describeScheduledActions(null); + } + + public DescribeScheduledActionsResult describeScheduledActions( + DescribeScheduledActionsRequest request) { + return new DescribeScheduledActionsResult() + .withScheduledUpdateGroupActions( + describe( + request, + "scheduledActionNames", + "scheduledActions", + ScheduledUpdateGroupAction.class)); + } + + public DescribePoliciesResult describePolicies() { + return describePolicies(null); + } + + public DescribePoliciesResult describePolicies(DescribePoliciesRequest request) { + return new DescribePoliciesResult() + .withScalingPolicies( + describe(request, "policyNames", "scalingPolicies", ScalingPolicy.class)); + } + + //////////////////////////////////// + // + // AmazonEC2 + // + //////////////////////////////////// + public DescribeLaunchTemplatesResult describeLaunchTemplates() { + return describeLaunchTemplates(null); + } + + public DescribeLaunchTemplatesResult describeLaunchTemplates( + DescribeLaunchTemplatesRequest request) { + return new DescribeLaunchTemplatesResult() + .withLaunchTemplates( + describe(request, "launchTemplateNames", "launchTemplates", LaunchTemplate.class)); + } + + private static final Function> + LAUNCH_TEMPLATE_VERSION_EXTRACTOR = + (r) -> { + if (r == null) { + return Collections.emptyList(); + } + DescribeLaunchTemplateVersionsRequest req = (DescribeLaunchTemplateVersionsRequest) r; + if (req.getLaunchTemplateId() == null) { + return Collections.emptyList(); + } + if (req.getVersions() == null || req.getVersions().isEmpty()) { + String defaultId = req.getLaunchTemplateId() + ":$Default"; + return Collections.singletonList(defaultId); + } + return req.getVersions().stream() + .map(v -> req.getLaunchTemplateId() + ":" + v) + .collect(Collectors.toCollection(ArrayList::new)); + }; + + public DescribeLaunchTemplateVersionsResult describeLaunchTemplateVersions() { + return describeLaunchTemplateVersions(null); + } + + public DescribeLaunchTemplateVersionsResult describeLaunchTemplateVersions( + DescribeLaunchTemplateVersionsRequest request) { + return new DescribeLaunchTemplateVersionsResult() + .withLaunchTemplateVersions( + describe( + request, + LAUNCH_TEMPLATE_VERSION_EXTRACTOR, + "launchTemplateVersions", + LaunchTemplateVersion.class)); + } + + public DescribeImagesResult describeImages() { + return describeImages(null); + } + + public DescribeImagesResult describeImages(DescribeImagesRequest request) { + return new DescribeImagesResult() + .withImages(describe(request, "imageIds", "images", Image.class)); + } + + public DescribeInstancesResult describeInstances() { + return describeInstances(null); + } + + public DescribeInstancesResult describeInstances(DescribeInstancesRequest request) { + return new DescribeInstancesResult() + .withReservations( + new Reservation() + .withReservationId("1234") + .withInstances( + describe(request, "instanceIds", "../view/instances", Instance.class))); + } + + public DescribeLaunchConfigurationsResult describeLaunchConfigurations() { + return describeLaunchConfigurations(null); + } + + public DescribeLaunchConfigurationsResult describeLaunchConfigurations( + DescribeLaunchConfigurationsRequest request) { + return new DescribeLaunchConfigurationsResult() + .withLaunchConfigurations( + describe( + request, + "launchConfigurationNames", + "launchConfigurations", + LaunchConfiguration.class)); + } + + public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings() { + return describeReservedInstancesOfferings(null); + } + + public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings( + DescribeReservedInstancesOfferingsRequest request) { + return new DescribeReservedInstancesOfferingsResult() + .withReservedInstancesOfferings( + describe( + request, + "reservedInstancesOfferingIds", + "reservedInstancesOfferings", + ReservedInstancesOffering.class)); + } + + public DescribeSecurityGroupsResult describeSecurityGroups() { + return describeSecurityGroups(null); + } + + public DescribeSecurityGroupsResult describeSecurityGroups( + DescribeSecurityGroupsRequest request) { + return new DescribeSecurityGroupsResult() + .withSecurityGroups(describe(request, "groupIds", "securityGroups", SecurityGroup.class)); + } + + public DescribeSubnetsResult describeSubnets() { + return describeSubnets(null); + } + + public DescribeSubnetsResult describeSubnets(DescribeSubnetsRequest request) { + return new DescribeSubnetsResult() + .withSubnets(describe(request, "subnetIds", "subnets", Subnet.class)); + } + + public DescribeVpcsResult describeVpcs() { + return describeVpcs(null); + } + + public DescribeVpcsResult describeVpcs(DescribeVpcsRequest request) { + return new DescribeVpcsResult().withVpcs(describe(request, "vpcIds", "vpcs", Vpc.class)); + } + + public DescribeVpcClassicLinkResult describeVpcClassicLink() { + return describeVpcClassicLink(null); + } + + public DescribeVpcClassicLinkResult describeVpcClassicLink( + DescribeVpcClassicLinkRequest request) { + return new DescribeVpcClassicLinkResult() + .withVpcs(describe(request, "vpcIds", "vpcClassicLinks", VpcClassicLink.class)); + } + + public DescribeClassicLinkInstancesResult describeClassicLinkInstances() { + return describeClassicLinkInstances(null); + } + + public DescribeClassicLinkInstancesResult describeClassicLinkInstances( + DescribeClassicLinkInstancesRequest request) { + return new DescribeClassicLinkInstancesResult() + .withInstances( + describe(request, "instanceIds", "classicLinkInstances", ClassicLinkInstance.class)); + } + + //////////////////////////////////// + // + // AmazonElasticLoadBalancing + // + //////////////////////////////////// + public DescribeLoadBalancersResult describeLoadBalancers() { + return describeLoadBalancers((DescribeLoadBalancersRequest) null); + } + + public DescribeLoadBalancersResult describeLoadBalancers(DescribeLoadBalancersRequest request) { + return new DescribeLoadBalancersResult() + .withLoadBalancerDescriptions( + describe(request, "loadBalancerNames", "loadBalancers", LoadBalancerDescription.class)); + } + + // Cannot have overloaded method with same parameters and different return types, for now, no + // calls to this parameter-less function, so commenting out for now + // public com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersResult + // describeLoadBalancers() { + // return + // describeLoadBalancers((com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersRequest)null); + // } + + public com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersResult + describeLoadBalancers( + com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersRequest + request) { + return new com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersResult() + .withLoadBalancers(describe(request, "names", "appLoadBalancers", LoadBalancer.class)); + } + + public DescribeTargetGroupsResult describeTargetGroups() { + return describeTargetGroups(null); + } + + public DescribeTargetGroupsResult describeTargetGroups(DescribeTargetGroupsRequest request) { + return new DescribeTargetGroupsResult() + .withTargetGroups(describe(request, "names", "targetGroups", TargetGroup.class)); + } + //////////////////////////////////// + + private List describe( + AmazonWebServiceRequest request, + String idKey, + final String object, + final Class singleType) { + return describe(request, r -> getRequestIds(r, idKey), object, singleType); + } + + private List describe( + AmazonWebServiceRequest request, + Function> idExtractor, + final String object, + final Class singleType) { + lastModified.set(null); + final Map metricTags = new HashMap<>(this.metricTags); + metricTags.put("collection", object); + try { + final Collection ids = idExtractor.apply(request); + metricTags.put("collectionMode", ids.isEmpty() ? "full" : "byId"); + final JavaType singleMeta = + objectMapper + .getTypeFactory() + .constructParametrizedType(Metadata.class, Metadata.class, singleType); + Long mtime = null; + final List results = new ArrayList<>(); + + final Id deserializeJsonTimer = registry.createId("edda.deserializeJson", metricTags); + final Id resultSizeCounter = registry.createId("edda.resultSize", metricTags); + if (ids.isEmpty()) { + HttpEntity entity = getHttpEntity(metricTags, object, null); + try { + final JavaType listMeta = + objectMapper + .getTypeFactory() + .constructParametrizedType(List.class, List.class, singleMeta); + final List> metadataResults = + registry + .timer(deserializeJsonTimer) + .record(() -> objectMapper.readValue(entity.getContent(), listMeta)); + for (Metadata meta : metadataResults) { + mtime = mtime == null ? meta.mtime : Math.min(mtime, meta.mtime); + results.add(meta.data); + } + } finally { + EntityUtils.consume(entity); + } + } else { + for (String id : ids) { + HttpEntity entity = getHttpEntity(metricTags, object, id); + try { + final Metadata result = + registry + .timer(deserializeJsonTimer) + .record(() -> objectMapper.readValue(entity.getContent(), singleMeta)); + mtime = mtime == null ? result.mtime : Math.min(mtime, result.mtime); + results.add(result.data); + } finally { + EntityUtils.consume(entity); + } + } + } + registry.counter(resultSizeCounter).increment(results.size()); + lastModified.set(mtime); + return results; + } catch (Exception e) { + log.error(e.getMessage() + " (retries exhausted)"); + + registry.counter(registry.createId("edda.failures", metricTags)).increment(); + final AmazonServiceException ex = + new AmazonServiceException("Edda failed locating the managed objects requested.", e); + if (e.getCause() instanceof HttpClientErrorException) { + ex.setStatusCode(((HttpClientErrorException) e.getCause()).getRawStatusCode()); + } else { + ex.setStatusCode(400); + } + ex.setServiceName(serviceName); + ex.setErrorType(AmazonServiceException.ErrorType.Unknown); + throw ex; + } + } + + private static Collection getRequestIds( + AmazonWebServiceRequest request, String idFieldName) { + if (request == null) { + return Collections.emptySet(); + } + try { + Field field = request.getClass().getDeclaredField(idFieldName); + field.setAccessible(true); + Collection collection = (Collection) field.get(request); + return collection == null ? Collections.emptySet() : collection; + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + private HttpEntity getHttpEntity(Map metricTags, String objectName, String key) + throws EddaException { + final String url = + edda + "/REST/v2/aws/" + objectName + (key == null ? ";_expand" : "/" + key) + ";_meta"; + final HttpGet get = new HttpGet(url); + get.setConfig( + RequestConfig.custom() + .setConnectTimeout(eddaTimeoutConfig.getConnectTimeout()) + .setConnectionRequestTimeout(eddaTimeoutConfig.getConnectionRequestTimeout()) + .setSocketTimeout(eddaTimeoutConfig.getSocketTimeout()) + .build()); + + long retryDelay = eddaTimeoutConfig.getRetryBase(); + int retryAttempts = 0; + String lastExceptionMessage = ""; + String lastUrl = ""; + Random r = new Random(); + Exception ex = null; + + final Id httpExecuteTime = registry.createId("edda.httpExecute", metricTags); + final Id httpErrors = registry.createId("edda.errors", metricTags).withTag("errorType", "http"); + final Id networkErrors = + registry.createId("edda.errors", metricTags).withTag("errorType", "network"); + final Id retryDelayMillis = registry.createId("edda.retryDelayMillis", metricTags); + final Id retries = registry.createId("edda.retries", metricTags); + while (retryAttempts < eddaTimeoutConfig.getMaxAttempts()) { + HttpEntity entity = null; + + try { + final HttpResponse response = + registry.timer(httpExecuteTime).record(() -> httpClient.execute(get)); + final int statusCode = response.getStatusLine().getStatusCode(); + entity = response.getEntity(); + if (statusCode != HttpStatus.SC_OK) { + lastExceptionMessage = + response.getProtocolVersion().toString() + + " " + + response.getStatusLine().getStatusCode() + + " " + + response.getStatusLine().getReasonPhrase(); + + registry + .counter(httpErrors.withTag("statusCode", Integer.toString(statusCode))) + .increment(); + + throw new HttpClientErrorException( + org.springframework.http.HttpStatus.valueOf(statusCode), lastExceptionMessage); + } else { + return entity; + } + } catch (Exception e) { + lastExceptionMessage = e.getClass().getSimpleName() + ": " + e.getMessage(); + ex = e; + registry + .counter(networkErrors.withTag("exceptionType", e.getClass().getSimpleName())) + .increment(); + } finally { + lastUrl = url; + } + + // ensure that the content stream is closed on a non-200 response from edda + EntityUtils.consumeQuietly(entity); + + final String exceptionFormat = "Edda request {} failed with {}"; + log.warn(exceptionFormat, url, lastExceptionMessage, ex); + + try { + registry.counter(retryDelayMillis).increment(retryDelay); + Thread.sleep(retryDelay); + } catch (InterruptedException inter) { + break; + } + registry.counter(retries).increment(); + retryAttempts++; + retryDelay += r.nextInt(eddaTimeoutConfig.getBackoffMillis()); + } + throw new EddaException("Edda request " + lastUrl + " failed with " + lastExceptionMessage, ex); + } + + private static class Metadata { + final Long mtime; + final T data; + + @JsonCreator + public Metadata(@JsonProperty("mtime") Long mtime, @JsonProperty("data") T data) { + this.mtime = mtime; + this.data = data; + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AwsSdkClientSupplier.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AwsSdkClientSupplier.java new file mode 100644 index 00000000000..9e49bfaf6cb --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/AwsSdkClientSupplier.java @@ -0,0 +1,263 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security.sdkclient; + +import static java.util.Objects.requireNonNull; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.regions.Region; +import com.amazonaws.regions.RegionUtils; +import com.amazonaws.retry.RetryPolicy; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.RateLimiter; +import com.netflix.spectator.api.Counter; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.security.AWSProxy; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixSTSAssumeRoleSessionCredentialsProvider; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.springframework.util.ReflectionUtils; + +/** Factory for shared instances of AWS SDK clients. */ +public class AwsSdkClientSupplier { + + private final Registry registry; + private final LoadingCache, ?> awsSdkClients; + private final RateLimiterSupplier rateLimiterSupplier; + + public AwsSdkClientSupplier( + RateLimiterSupplier rateLimiterSupplier, + Registry registry, + RetryPolicy retryPolicy, + List requestHandlers, + AWSProxy proxy, + boolean useGzip) { + this.rateLimiterSupplier = Objects.requireNonNull(rateLimiterSupplier); + this.registry = Objects.requireNonNull(registry); + awsSdkClients = + CacheBuilder.newBuilder() + .recordStats() + .expireAfterAccess(10, TimeUnit.MINUTES) + .build(new SdkClientCacheLoader(retryPolicy, requestHandlers, proxy, useGzip)); + LoadingCacheMetrics.instrument("awsSdkClientSupplier", registry, awsSdkClients); + } + + public T getClient( + Class> impl, + Class iface, + String account, + AWSCredentialsProvider awsCredentialsProvider, + String region) { + return getClient(impl, iface, account, awsCredentialsProvider, region, null); + } + + public T getClient( + Class> impl, + Class iface, + String account, + AWSCredentialsProvider awsCredentialsProvider, + String region, + ClientConfiguration clientConfig) { + final RequestHandler2 handler = getRateLimiterHandler(iface, account, region); + final AmazonClientKey key = + new AmazonClientKey<>(impl, awsCredentialsProvider, region, handler, clientConfig); + + try { + return iface.cast(awsSdkClients.get(key)); + } catch (ExecutionException executionException) { + if (executionException.getCause() instanceof RuntimeException) { + throw (RuntimeException) executionException.getCause(); + } + throw new RuntimeException("Failed creating amazon client", executionException.getCause()); + } + } + + private RequestHandler2 getRateLimiterHandler( + Class sdkInterface, String account, String region) { + final RateLimiter limiter = rateLimiterSupplier.getRateLimiter(sdkInterface, account, region); + final Counter rateLimitCounter = + registry.counter( + "amazonClientProvider.rateLimitDelayMillis", + "clientType", + sdkInterface.getSimpleName(), + "account", + account, + "region", + region == null ? "UNSPECIFIED" : region); + return new RateLimitingRequestHandler(rateLimitCounter, limiter); + } + + private static class SdkClientCacheLoader extends CacheLoader, Object> { + private final RetryPolicy retryPolicy; + private final List requestHandlers; + private final AWSProxy proxy; + private final boolean useGzip; + + public SdkClientCacheLoader( + RetryPolicy retryPolicy, + List requestHandlers, + AWSProxy proxy, + boolean useGzip) { + this.retryPolicy = Objects.requireNonNull(retryPolicy); + this.requestHandlers = + requestHandlers == null ? Collections.emptyList() : ImmutableList.copyOf(requestHandlers); + this.proxy = proxy; + this.useGzip = useGzip; + } + + @Override + public Object load(AmazonClientKey key) throws Exception { + Method m = key.implClass.getDeclaredMethod("standard"); + AwsClientBuilder builder = key.implClass.cast(m.invoke(null)); + + ClientConfiguration clientConfiguration = new ClientConfiguration(); + if (key.getClientConfiguration() != null) { + ReflectionUtils.shallowCopyFieldState(key.getClientConfiguration(), clientConfiguration); + } + clientConfiguration.setRetryPolicy(getRetryPolicy(key)); + clientConfiguration.setUseGzip(useGzip); + clientConfiguration.setUserAgentSuffix("spinnaker"); + + if (proxy != null && proxy.isProxyConfigMode()) { + proxy.apply(clientConfiguration); + } + + builder + .withCredentials(key.awsCredentialsProvider) + .withClientConfiguration(clientConfiguration); + getRequestHandlers(key).ifPresent(builder::withRequestHandlers); + builder.withRegion( + key.getRegion().orElseGet(() -> new SpinnakerAwsRegionProvider().getRegion())); + + return builder.build(); + } + + private Optional getRequestHandlers(AmazonClientKey key) { + List handlers = new ArrayList<>(requestHandlers.size() + 1); + key.getRequestHandler().ifPresent(handlers::add); + handlers.addAll(requestHandlers); + if (handlers.isEmpty()) { + return Optional.empty(); + } + return Optional.of(handlers.toArray(new RequestHandler2[handlers.size()])); + } + + private RetryPolicy getRetryPolicy(AmazonClientKey key) { + + if (!(key.getAwsCredentialsProvider() + instanceof NetflixSTSAssumeRoleSessionCredentialsProvider)) { + return retryPolicy; + } + + final RetryPolicy.RetryCondition delegatingRetryCondition = + (originalRequest, exception, retriesAttempted) -> { + NetflixSTSAssumeRoleSessionCredentialsProvider stsCredentialsProvider = + (NetflixSTSAssumeRoleSessionCredentialsProvider) key.getAwsCredentialsProvider(); + if (exception instanceof AmazonServiceException) { + ((AmazonServiceException) exception) + .getHttpHeaders() + .put("targetAccountId", stsCredentialsProvider.getAccountId()); + } + return retryPolicy + .getRetryCondition() + .shouldRetry(originalRequest, exception, retriesAttempted); + }; + + return new RetryPolicy( + delegatingRetryCondition, + retryPolicy.getBackoffStrategy(), + retryPolicy.getMaxErrorRetry(), + retryPolicy.isMaxErrorRetryInClientConfigHonored()); + } + } + + private static class AmazonClientKey { + private final Class> implClass; + private final AWSCredentialsProvider awsCredentialsProvider; + private final Region region; + private final RequestHandler2 requestHandler; + private final ClientConfiguration clientConfiguration; + + public AmazonClientKey( + Class> implClass, + AWSCredentialsProvider awsCredentialsProvider, + String region, + RequestHandler2 requestHandler, + ClientConfiguration configuration) { + this.implClass = requireNonNull(implClass); + this.awsCredentialsProvider = requireNonNull(awsCredentialsProvider); + this.region = region == null ? null : RegionUtils.getRegion(region); + this.requestHandler = requestHandler; + this.clientConfiguration = configuration; + } + + public Class> getImplClass() { + return implClass; + } + + public AWSCredentialsProvider getAwsCredentialsProvider() { + return awsCredentialsProvider; + } + + public Optional getRegion() { + return Optional.ofNullable(region).map(Region::getName); + } + + public ClientConfiguration getClientConfiguration() { + return clientConfiguration; + } + + public Optional getRequestHandler() { + return Optional.ofNullable(requestHandler); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + AmazonClientKey that = (AmazonClientKey) o; + + if (!implClass.equals(that.implClass)) return false; + if (!awsCredentialsProvider.equals(that.awsCredentialsProvider)) return false; + if (region != that.region) return false; + return Objects.equals(requestHandler, that.requestHandler); + } + + @Override + public int hashCode() { + int result = implClass.hashCode(); + result = 31 * result + awsCredentialsProvider.hashCode(); + result = 31 * result + (region != null ? region.hashCode() : 0); + result = 31 * result + (requestHandler != null ? requestHandler.hashCode() : 0); + return result; + } + } +} diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/EddaException.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/EddaException.java new file mode 100644 index 00000000000..1428bf0cb2c --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/EddaException.java @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security.sdkclient; + +class EddaException extends Exception { + EddaException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/LoadingCacheMetrics.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/LoadingCacheMetrics.java similarity index 100% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/LoadingCacheMetrics.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/LoadingCacheMetrics.java diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/ProxyHandlerBuilder.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/ProxyHandlerBuilder.java new file mode 100644 index 00000000000..ce772be38e0 --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/ProxyHandlerBuilder.java @@ -0,0 +1,141 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.security.sdkclient; + +import static java.util.Objects.requireNonNull; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.aws.security.EddaTemplater; +import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import java.lang.reflect.Proxy; +import java.util.Map; +import org.apache.http.client.HttpClient; + +/** + * Constructs a JDK dynamic proxy for an AWS service interface that (if enabled for an account) will + * delegate read requests to Edda and otherwise fallback to the underlying SDK client. + */ +public class ProxyHandlerBuilder { + private final AwsSdkClientSupplier awsSdkClientSupplier; + private final HttpClient httpClient; + private final ObjectMapper objectMapper; + private final EddaTemplater eddaTemplater; + private final EddaTimeoutConfig eddaTimeoutConfig; + private final Registry registry; + + public ProxyHandlerBuilder( + AwsSdkClientSupplier awsSdkClientSupplier, + HttpClient httpClient, + ObjectMapper objectMapper, + EddaTemplater eddaTemplater, + EddaTimeoutConfig eddaTimeoutConfig, + Registry registry) { + this.awsSdkClientSupplier = requireNonNull(awsSdkClientSupplier); + this.httpClient = requireNonNull(httpClient); + this.objectMapper = requireNonNull(objectMapper); + this.eddaTemplater = requireNonNull(eddaTemplater); + this.eddaTimeoutConfig = eddaTimeoutConfig; + this.registry = requireNonNull(registry); + } + + public , U> U getProxyHandler( + Class interfaceKlazz, + Class impl, + NetflixAmazonCredentials amazonCredentials, + String region) { + return getProxyHandler(interfaceKlazz, impl, amazonCredentials, region, false); + } + + public , U> U getProxyHandler( + Class interfaceKlazz, + Class impl, + NetflixAmazonCredentials amazonCredentials, + String region, + boolean skipEdda) { + return getProxyHandler(interfaceKlazz, impl, amazonCredentials, region, skipEdda, null); + } + + public , U> U getProxyHandler( + Class interfaceKlazz, + Class impl, + NetflixAmazonCredentials amazonCredentials, + String region, + ClientConfiguration clientConfig) { + return getProxyHandler(interfaceKlazz, impl, amazonCredentials, region, false, clientConfig); + } + + public , U> U getProxyHandler( + Class interfaceKlazz, + Class impl, + NetflixAmazonCredentials amazonCredentials, + String region, + boolean skipEdda, + ClientConfiguration clientConfiguration) { + requireNonNull(amazonCredentials, "Credentials cannot be null"); + try { + U delegate = + awsSdkClientSupplier.getClient( + impl, + interfaceKlazz, + amazonCredentials.getName(), + amazonCredentials.getCredentialsProvider(), + region, + clientConfiguration); + if (skipEdda + || !amazonCredentials.getEddaEnabled() + || eddaTimeoutConfig.getDisabledRegions().contains(region)) { + return delegate; + } + return interfaceKlazz.cast( + Proxy.newProxyInstance( + getClass().getClassLoader(), + new Class[] {interfaceKlazz}, + getInvocationHandler( + delegate, interfaceKlazz.getSimpleName(), region, amazonCredentials))); + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new RuntimeException("Instantiation of client implementation failed!", e); + } + } + + protected AmazonClientInvocationHandler getInvocationHandler( + Object client, + String serviceName, + String region, + NetflixAmazonCredentials amazonCredentials) { + final Map baseTags = + ImmutableMap.of( + "account", amazonCredentials.getName(), + "region", region, + "serviceName", serviceName); + return new AmazonClientInvocationHandler( + client, + serviceName, + eddaTemplater.getUrl(amazonCredentials.getEdda(), region), + this.httpClient, + objectMapper, + eddaTimeoutConfig, + registry, + baseTags); + } +} diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimiterSupplier.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimiterSupplier.java similarity index 82% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimiterSupplier.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimiterSupplier.java index 8f5b6d59b80..3bc85786c2a 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimiterSupplier.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimiterSupplier.java @@ -23,20 +23,20 @@ import com.netflix.spectator.api.Registry; import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration; - import java.util.Objects; import java.util.concurrent.ExecutionException; - -/** - * Factory for shared RateLimiters by SDK client interface/account/region. - */ +/** Factory for shared RateLimiters by SDK client interface/account/region. */ public class RateLimiterSupplier { private final LoadingCache rateLimiters; - public RateLimiterSupplier(ServiceLimitConfiguration serviceLimitConfiguration, Registry registry) { - rateLimiters = CacheBuilder.newBuilder().recordStats().build(new RateLimitCacheLoader(serviceLimitConfiguration)); + public RateLimiterSupplier( + ServiceLimitConfiguration serviceLimitConfiguration, Registry registry) { + rateLimiters = + CacheBuilder.newBuilder() + .recordStats() + .build(new RateLimitCacheLoader(serviceLimitConfiguration)); LoadingCacheMetrics.instrument("rateLimiterSupplier", registry, rateLimiters); } @@ -61,19 +61,21 @@ public RateLimitCacheLoader(ServiceLimitConfiguration serviceLimitConfiguration) this(serviceLimitConfiguration, DEFAULT_LIMIT); } - public RateLimitCacheLoader(ServiceLimitConfiguration serviceLimitConfiguration, double defaultLimit) { + public RateLimitCacheLoader( + ServiceLimitConfiguration serviceLimitConfiguration, double defaultLimit) { this.serviceLimitConfiguration = Objects.requireNonNull(serviceLimitConfiguration); this.defaultLimit = defaultLimit; } @Override public RateLimiter load(RateLimitKey key) throws Exception { - double rateLimit = serviceLimitConfiguration.getLimit( - ServiceLimitConfiguration.API_RATE_LIMIT, - key.implementationClass.getSimpleName(), - key.account, - AmazonCloudProvider.ID, - defaultLimit); + double rateLimit = + serviceLimitConfiguration.getLimit( + ServiceLimitConfiguration.API_RATE_LIMIT, + key.implementationClass.getSimpleName(), + key.account, + AmazonCloudProvider.ID, + defaultLimit); return RateLimiter.create(rateLimit); } diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimitingRequestHandler.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimitingRequestHandler.java similarity index 95% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimitingRequestHandler.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimitingRequestHandler.java index 8d6237609f6..07c18c43689 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimitingRequestHandler.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/RateLimitingRequestHandler.java @@ -16,18 +16,15 @@ package com.netflix.spinnaker.clouddriver.aws.security.sdkclient; +import static java.util.Objects.requireNonNull; + import com.amazonaws.Request; import com.amazonaws.handlers.RequestHandler2; import com.google.common.util.concurrent.RateLimiter; import com.netflix.spectator.api.Counter; - import java.util.Objects; -import static java.util.Objects.requireNonNull; - -/** - * A RequestHandler that will throttle requests via the supplied RateLimiter. - */ +/** A RequestHandler that will throttle requests via the supplied RateLimiter. */ public class RateLimitingRequestHandler extends RequestHandler2 { private final Counter counter; private final RateLimiter rateLimiter; diff --git a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/SpinnakerAwsRegionProvider.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/SpinnakerAwsRegionProvider.java similarity index 90% rename from clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/SpinnakerAwsRegionProvider.java rename to clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/SpinnakerAwsRegionProvider.java index dc7f7afd0ff..7c9ad19d030 100644 --- a/clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/SpinnakerAwsRegionProvider.java +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/security/sdkclient/SpinnakerAwsRegionProvider.java @@ -26,11 +26,10 @@ public class SpinnakerAwsRegionProvider extends AwsRegionProviderChain { public SpinnakerAwsRegionProvider() { super( - new Ec2RegionEnvVarRegionProvider(), - new DefaultAwsRegionProviderChain(), - new RegionsCurrentRegionProvider(), - new DefaultRegionProvider() - ); + new Ec2RegionEnvVarRegionProvider(), + new DefaultAwsRegionProviderChain(), + new RegionsCurrentRegionProvider(), + new DefaultRegionProvider()); } private static class Ec2RegionEnvVarRegionProvider extends AwsRegionProvider { diff --git a/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/services/LaunchTemplateService.java b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/services/LaunchTemplateService.java new file mode 100644 index 00000000000..ec36f8e43ca --- /dev/null +++ b/clouddriver-aws/src/main/java/com/netflix/spinnaker/clouddriver/aws/services/LaunchTemplateService.java @@ -0,0 +1,612 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.services; + +import static java.util.Comparator.comparing; + +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification; +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.*; +import com.netflix.spinnaker.clouddriver.aws.deploy.AmazonResourceTagger; +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper; +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker.AsgConfiguration; +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription; +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.LocalFileUserDataProperties; +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProviderAggregator; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataInput; +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride; +import com.netflix.spinnaker.kork.core.RetrySupport; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +@Slf4j +public class LaunchTemplateService { + private final AmazonEC2 ec2; + private final UserDataProviderAggregator userDataProviderAggregator; + private final LocalFileUserDataProperties localFileUserDataProperties; + private final Collection amazonResourceTaggers; + private final RetrySupport retrySupport = new RetrySupport(); + + /** + * Traditional Amazon EC2 instance types provide fixed CPU utilization, while burstable + * performance instances provide a baseline level of CPU utilization with the ability to burst CPU + * utilization above the baseline level. The baseline utilization and ability to burst are + * governed by CPU credits. + * + *

CPU credits can be configured with 2 modes: (1) unlimited: Can sustain high CPU utilization + * for any period of time whenever required. If the average CPU usage over a rolling 24-hour + * period exceeds the baseline, charges for surplus credits will apply. (2) standard: Suited to + * workloads with an average CPU utilization that is consistently below the baseline CPU + * utilization of the instance. To burst above the baseline, the instance spends credits that it + * has accrued in its CPU credit balance. + * + *

https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html + */ + private static final String UNLIMITED_CPU_CREDITS = "unlimited"; + + private static final String STANDARD_CPU_CREDITS = "standard"; + + public LaunchTemplateService( + AmazonEC2 ec2, + UserDataProviderAggregator userDataProviderAggregator, + LocalFileUserDataProperties localFileUserDataProperties, + Collection amazonResourceTaggers) { + this.ec2 = ec2; + this.userDataProviderAggregator = userDataProviderAggregator; + this.localFileUserDataProperties = localFileUserDataProperties; + this.amazonResourceTaggers = amazonResourceTaggers; + } + + public Optional getLaunchTemplateVersion( + LaunchTemplateSpecification launchTemplateSpecification) { + final List versions = new ArrayList<>(); + final String version = launchTemplateSpecification.getVersion(); + final DescribeLaunchTemplateVersionsRequest request = + new DescribeLaunchTemplateVersionsRequest() + .withLaunchTemplateId(launchTemplateSpecification.getLaunchTemplateId()); + + while (true) { + final DescribeLaunchTemplateVersionsResult result = + ec2.describeLaunchTemplateVersions(request); + versions.addAll(result.getLaunchTemplateVersions()); + if (result.getNextToken() != null) { + request.withNextToken(result.getNextToken()); + } else { + break; + } + } + + if ("$Latest".equals(version)) { + return versions.stream().max(comparing(LaunchTemplateVersion::getVersionNumber)); + } else if ("$Default".equals(version)) { + return versions.stream().filter(LaunchTemplateVersion::isDefaultVersion).findFirst(); + } + + return versions.stream() + .filter(i -> i.getVersionNumber().equals(Long.parseLong(version))) + .findFirst(); + } + + public LaunchTemplate createLaunchTemplate( + AsgConfiguration asgConfig, String asgName, String launchTemplateName) { + final RequestLaunchTemplateData data = + buildLaunchTemplateData(asgConfig, asgName, launchTemplateName); + log.info("Creating launch template with name {}", launchTemplateName); + return retrySupport.retry( + () -> { + final CreateLaunchTemplateRequest launchTemplateRequest = + new CreateLaunchTemplateRequest() + .withLaunchTemplateName(launchTemplateName) + .withLaunchTemplateData(data); + return ec2.createLaunchTemplate(launchTemplateRequest).getLaunchTemplate(); + }, + 3, + Duration.ofMillis(3000), + false); + } + + public LaunchTemplateVersion modifyLaunchTemplate( + NetflixAmazonCredentials credentials, + ModifyServerGroupLaunchTemplateDescription description, + LaunchTemplateVersion sourceLtVersion, + boolean shouldUseMixedInstancesPolicy) { + + RequestLaunchTemplateData data = + buildLaunchTemplateDataForModify( + credentials, description, sourceLtVersion, shouldUseMixedInstancesPolicy); + CreateLaunchTemplateVersionResult result = + ec2.createLaunchTemplateVersion( + new CreateLaunchTemplateVersionRequest() + .withLaunchTemplateId(sourceLtVersion.getLaunchTemplateId()) + .withLaunchTemplateData(data)); + + log.info( + String.format( + "Created new launch template version %s for launch template ID %s", + result.getLaunchTemplateVersion().getVersionNumber(), + result.getLaunchTemplateVersion().getLaunchTemplateId())); + + return result.getLaunchTemplateVersion(); + } + + /** + * Delete a launch template version. A new launch template when it is modified. + * https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteLaunchTemplateVersions.html + * + * @param launchTemplateId launch template ID for the version to delete + * @param versionToDelete launch template version to delete + */ + public void deleteLaunchTemplateVersion(String launchTemplateId, Long versionToDelete) { + log.info( + String.format( + "Attempting to delete launch template version %s for launch template ID %s.", + versionToDelete, launchTemplateId)); + + DeleteLaunchTemplateVersionsResult result = + ec2.deleteLaunchTemplateVersions( + new DeleteLaunchTemplateVersionsRequest() + .withLaunchTemplateId(launchTemplateId) + .withVersions(String.valueOf(versionToDelete))); + + if (result.getUnsuccessfullyDeletedLaunchTemplateVersions() != null + && !result.getUnsuccessfullyDeletedLaunchTemplateVersions().isEmpty()) { + DeleteLaunchTemplateVersionsResponseErrorItem responseErrorItem = + result.getUnsuccessfullyDeletedLaunchTemplateVersions().get(0); + ResponseError failureResponseError = responseErrorItem.getResponseError(); + + // certain error codes can be considered success when they match the desired end state. + // this also acts as a safety net in retry scenarios. + List codesConsideredSuccess = + List.of("launchTemplateIdDoesNotExist", "launchTemplateVersionDoesNotExist"); + + if (failureResponseError != null + && !codesConsideredSuccess.contains(failureResponseError.getCode())) { + throw new RuntimeException( + String.format( + "Failed to delete launch template version %s for launch template ID %s because of error '%s'", + responseErrorItem.getVersionNumber(), + responseErrorItem.getLaunchTemplateId(), + failureResponseError.getCode())); + } + } + } + + /** + * Build launch template data for launch template modification i.e. new launch template version + */ + private RequestLaunchTemplateData buildLaunchTemplateDataForModify( + NetflixAmazonCredentials credentials, + ModifyServerGroupLaunchTemplateDescription modifyDesc, + LaunchTemplateVersion sourceLtVersion, + boolean shouldUseMixedInstancesPolicy) { + + ResponseLaunchTemplateData sourceLtData = sourceLtVersion.getLaunchTemplateData(); + + RequestLaunchTemplateData request = + new RequestLaunchTemplateData() + .withImageId( + modifyDesc.getImageId() != null + ? modifyDesc.getImageId() + : sourceLtData.getImageId()) + .withKernelId( + StringUtils.isNotBlank(modifyDesc.getKernelId()) + ? modifyDesc.getKernelId() + : sourceLtData.getKernelId()) + .withInstanceType( + StringUtils.isNotBlank(modifyDesc.getInstanceType()) + ? modifyDesc.getInstanceType() + : sourceLtData.getInstanceType()) + .withRamDiskId( + StringUtils.isNotBlank(modifyDesc.getRamdiskId()) + ? modifyDesc.getRamdiskId() + : sourceLtData.getRamDiskId()) + .withEbsOptimized( + Optional.ofNullable(modifyDesc.getEbsOptimized()) + .orElseGet(sourceLtData::getEbsOptimized)); + + // key name + if (StringUtils.isNotBlank(modifyDesc.getKeyPair())) { + request.withKeyName(modifyDesc.getKeyPair()); + } else if (StringUtils.isNotBlank(sourceLtData.getKeyName())) { + request.withKeyName(sourceLtData.getKeyName()); + } + + // iam instance profile + if (StringUtils.isNotBlank(modifyDesc.getIamRole())) { + request.withIamInstanceProfile( + new LaunchTemplateIamInstanceProfileSpecificationRequest() + .withName(modifyDesc.getIamRole())); + } else if (sourceLtData.getIamInstanceProfile() != null + && StringUtils.isNotBlank(sourceLtData.getIamInstanceProfile().getName())) { + request.withIamInstanceProfile( + new LaunchTemplateIamInstanceProfileSpecificationRequest() + .withName(sourceLtData.getIamInstanceProfile().getName())); + } + + // instance monitoring + if (modifyDesc.getInstanceMonitoring() != null) { + request.setMonitoring( + new LaunchTemplatesMonitoringRequest().withEnabled(modifyDesc.getInstanceMonitoring())); + } else if (sourceLtData.getMonitoring() != null + && sourceLtData.getMonitoring().getEnabled() != null) { + request.setMonitoring( + new LaunchTemplatesMonitoringRequest() + .withEnabled(sourceLtData.getMonitoring().getEnabled())); + } + + // block device mappings + if (modifyDesc.getBlockDevices() != null) { + request.setBlockDeviceMappings(buildDeviceMapping(modifyDesc.getBlockDevices())); + } else if (sourceLtData.getBlockDeviceMappings() != null) { + request.setBlockDeviceMappings( + buildDeviceMapping( + AsgConfigHelper.transformLaunchTemplateBlockDeviceMapping( + sourceLtData.getBlockDeviceMappings()))); + } + + // tags + Optional tagSpecification = + tagSpecification(amazonResourceTaggers, null, modifyDesc.getAsgName()); + if (tagSpecification.isPresent()) { + request = request.withTagSpecifications(tagSpecification.get()); + } + + /* + Copy over the original user data only if the UserDataProviders behavior is disabled. + This is to avoid having duplicate user data. + */ + String base64UserData = + (localFileUserDataProperties != null && !localFileUserDataProperties.isEnabled()) + ? sourceLtData.getUserData() + : null; + setUserData( + request, + modifyDesc.getAsgName(), + sourceLtVersion.getLaunchTemplateName(), + modifyDesc.getRegion(), + modifyDesc.getAccount(), + credentials.getEnvironment(), + credentials.getAccountType(), + modifyDesc.getIamRole(), + modifyDesc.getImageId(), + base64UserData, + modifyDesc.getLegacyUdf(), + modifyDesc.getUserDataOverride()); + + // metadata options + if (modifyDesc.getRequireIMDV2() != null) { + request.setMetadataOptions( + new LaunchTemplateInstanceMetadataOptionsRequest() + .withHttpTokens(modifyDesc.getRequireIMDV2() ? "required" : "")); + } else if (sourceLtData.getMetadataOptions() != null) { + request.setMetadataOptions( + new LaunchTemplateInstanceMetadataOptionsRequest() + .withHttpTokens(sourceLtData.getMetadataOptions().getHttpTokens())); + } + + // set instance market options only when mixed instances policy is NOT used in order to maintain + // launch template compatibility + if (!shouldUseMixedInstancesPolicy) { + setSpotInstanceMarketOptions(request, modifyDesc.getSpotPrice()); + } + + // credit specification + if (modifyDesc.getUnlimitedCpuCredits() != null) { + // compatibility is already validated by validator + setCreditSpecification(request, modifyDesc.getUnlimitedCpuCredits()); + } else if (sourceLtData.getCreditSpecification() != null) { + // The description might include changed instance types. + // Ensure compatibility before using value from sourceLtData. + Boolean unlimitedCpuCreditsFromSrcAsg = + AsgConfigHelper.getUnlimitedCpuCreditsFromAncestorLt( + sourceLtData.getCreditSpecification(), + InstanceTypeUtils.isBurstingSupportedByAllTypes(modifyDesc.getAllInstanceTypes())); + setCreditSpecification(request, unlimitedCpuCreditsFromSrcAsg); + } + + // network interfaces + LaunchTemplateInstanceNetworkInterfaceSpecification defaultInterface; + if (sourceLtData.getNetworkInterfaces() != null + && !sourceLtData.getNetworkInterfaces().isEmpty()) { + defaultInterface = + sourceLtData.getNetworkInterfaces().stream() + .filter(i -> i.getDeviceIndex() == 0) + .findFirst() + .orElseGet(LaunchTemplateInstanceNetworkInterfaceSpecification::new); + } else { + defaultInterface = new LaunchTemplateInstanceNetworkInterfaceSpecification(); + } + + request.withNetworkInterfaces( + new LaunchTemplateInstanceNetworkInterfaceSpecificationRequest() + .withAssociatePublicIpAddress( + Optional.ofNullable(modifyDesc.getAssociatePublicIpAddress()) + .orElseGet(() -> defaultInterface.getAssociatePublicIpAddress())) + .withIpv6AddressCount( + modifyDesc.getAssociateIPv6Address() != null + ? modifyDesc.getAssociateIPv6Address() ? 1 : 0 + : defaultInterface.getIpv6AddressCount() != null + && defaultInterface.getIpv6AddressCount() > 0 + ? 1 + : 0) + .withGroups( + modifyDesc.getSecurityGroups() != null && !modifyDesc.getSecurityGroups().isEmpty() + ? modifyDesc.getSecurityGroups() + : defaultInterface.getGroups()) + .withDeviceIndex(0)); + + // Nitro Enclave options + if (modifyDesc.getEnableEnclave() != null) { + request.setEnclaveOptions( + new LaunchTemplateEnclaveOptionsRequest().withEnabled(modifyDesc.getEnableEnclave())); + } else if (sourceLtData.getEnclaveOptions() != null) { + request.setEnclaveOptions( + new LaunchTemplateEnclaveOptionsRequest() + .withEnabled(sourceLtData.getEnclaveOptions().getEnabled())); + } + + return request; + } + + /** Build launch template data for new launch template creation */ + private RequestLaunchTemplateData buildLaunchTemplateData( + AsgConfiguration asgConfig, String asgName, String launchTemplateName) { + RequestLaunchTemplateData request = + new RequestLaunchTemplateData() + .withImageId(asgConfig.getAmi()) + .withKernelId(asgConfig.getKernelId()) + .withInstanceType(asgConfig.getInstanceType()) + .withRamDiskId(asgConfig.getRamdiskId()) + .withEbsOptimized(asgConfig.getEbsOptimized()) + .withKeyName(asgConfig.getKeyPair()) + .withIamInstanceProfile( + new LaunchTemplateIamInstanceProfileSpecificationRequest() + .withName(asgConfig.getIamRole())) + .withMonitoring( + new LaunchTemplatesMonitoringRequest() + .withEnabled(asgConfig.getInstanceMonitoring())); + + Optional tagSpecification = + tagSpecification(amazonResourceTaggers, asgConfig.getBlockDeviceTags(), asgName); + if (tagSpecification.isPresent()) { + request = request.withTagSpecifications(tagSpecification.get()); + } + + if (asgConfig.getPlacement() != null) { + request = + request.withPlacement( + new LaunchTemplatePlacementRequest() + .withAffinity(asgConfig.getPlacement().getAffinity()) + .withAvailabilityZone(asgConfig.getPlacement().getAvailabilityZone()) + .withGroupName(asgConfig.getPlacement().getGroupName()) + .withHostId(asgConfig.getPlacement().getHostId()) + .withTenancy(asgConfig.getPlacement().getTenancy()) + .withHostResourceGroupArn(asgConfig.getPlacement().getHostResourceGroupArn()) + .withPartitionNumber(asgConfig.getPlacement().getPartitionNumber()) + .withSpreadDomain(asgConfig.getPlacement().getSpreadDomain())); + } + + if (asgConfig.getLicenseSpecifications() != null) { + request = + request.withLicenseSpecifications( + asgConfig.getLicenseSpecifications().stream() + .map( + licenseSpecification -> + new LaunchTemplateLicenseConfigurationRequest() + .withLicenseConfigurationArn(licenseSpecification.getArn())) + .collect(Collectors.toList())); + } + + setUserData( + request, + asgName, + launchTemplateName, + asgConfig.getRegion(), + asgConfig.getCredentials().getName(), + asgConfig.getCredentials().getEnvironment(), + asgConfig.getCredentials().getAccountType(), + asgConfig.getIamRole(), + asgConfig.getAmi(), + asgConfig.getBase64UserData(), + asgConfig.getLegacyUdf(), + asgConfig.getUserDataOverride()); + + // metadata options + if (asgConfig.getRequireIMDSv2() != null && asgConfig.getRequireIMDSv2()) { + request.setMetadataOptions( + new LaunchTemplateInstanceMetadataOptionsRequest().withHttpTokens("required")); + } + + // set instance market options only when mixed instances policy is NOT used in order to maintain + // launch template compatibility + if (!asgConfig.shouldUseMixedInstancesPolicy()) { + setSpotInstanceMarketOptions(request, asgConfig.getSpotMaxPrice()); + } + + setCreditSpecification(request, asgConfig.getUnlimitedCpuCredits()); + + // network interfaces + request.withNetworkInterfaces( + new LaunchTemplateInstanceNetworkInterfaceSpecificationRequest() + .withAssociatePublicIpAddress(asgConfig.getAssociatePublicIpAddress()) + .withIpv6AddressCount(asgConfig.getAssociateIPv6Address() ? 1 : 0) + .withGroups(asgConfig.getSecurityGroups()) + .withDeviceIndex(0)); + + // Nitro Enclave options + if (asgConfig.getEnableEnclave() != null) { + request.setEnclaveOptions( + new LaunchTemplateEnclaveOptionsRequest().withEnabled(asgConfig.getEnableEnclave())); + } + + // block device mappings + if (asgConfig.getBlockDevices() != null && !asgConfig.getBlockDevices().isEmpty()) { + request.setBlockDeviceMappings(buildDeviceMapping(asgConfig.getBlockDevices())); + } + + return request; + } + + /** Set credit option for burstable performance instances to 'unlimited' only if explicitly set */ + private void setCreditSpecification( + RequestLaunchTemplateData request, Boolean unlimitedCpuCredits) { + if (unlimitedCpuCredits != null) { + request.setCreditSpecification( + new CreditSpecificationRequest() + .withCpuCredits(unlimitedCpuCredits ? UNLIMITED_CPU_CREDITS : STANDARD_CPU_CREDITS)); + } + } + + /** + * Set instance market options, required when launching spot instances + * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata-instancemarketoptions.html + */ + private void setSpotInstanceMarketOptions( + RequestLaunchTemplateData request, String maxSpotPrice) { + if (maxSpotPrice != null && StringUtils.isNotEmpty(maxSpotPrice.trim())) { + request.setInstanceMarketOptions( + new LaunchTemplateInstanceMarketOptionsRequest() + .withMarketType(MarketType.Spot) + .withSpotOptions( + new LaunchTemplateSpotMarketOptionsRequest().withMaxPrice(maxSpotPrice))); + } + } + + private void setUserData( + RequestLaunchTemplateData request, + String asgName, + String launchTemplateName, + String region, + String account, + String env, + String accType, + String iamRole, + String imageId, + String base64UserData, + Boolean legacyUdf, + UserDataOverride userDataOverride) { + final UserDataInput userDataRequest = + UserDataInput.builder() + .launchTemplate(true) + .asgName(asgName) + .launchSettingName(launchTemplateName) + .region(region) + .account(account) + .environment(env) + .accountType(accType) + .iamRole(iamRole) + .imageId(imageId) + .userDataOverride(userDataOverride) + .base64UserData(base64UserData) + .legacyUdf(legacyUdf) + .build(); + + request.setUserData(userDataProviderAggregator.aggregate(userDataRequest)); + } + + private List buildDeviceMapping( + List amazonBlockDevices) { + if (amazonBlockDevices == null || amazonBlockDevices.isEmpty()) { + return null; + } + + final List mappings = new ArrayList<>(); + for (AmazonBlockDevice blockDevice : amazonBlockDevices) { + LaunchTemplateBlockDeviceMappingRequest mapping = + new LaunchTemplateBlockDeviceMappingRequest().withDeviceName(blockDevice.getDeviceName()); + if (blockDevice.getVirtualName() != null) { + mapping.setVirtualName(blockDevice.getVirtualName()); + } else { + mapping.setEbs(getLaunchTemplateEbsBlockDeviceRequest(blockDevice)); + } + + mappings.add(mapping); + } + return mappings; + } + + private LaunchTemplateEbsBlockDeviceRequest getLaunchTemplateEbsBlockDeviceRequest( + AmazonBlockDevice blockDevice) { + final LaunchTemplateEbsBlockDeviceRequest blockDeviceRequest = + new LaunchTemplateEbsBlockDeviceRequest().withVolumeSize(blockDevice.getSize()); + + if (blockDevice.getDeleteOnTermination() != null) { + blockDeviceRequest.setDeleteOnTermination(blockDevice.getDeleteOnTermination()); + } + + if (blockDevice.getVolumeType() != null) { + blockDeviceRequest.setVolumeType(blockDevice.getVolumeType()); + } + + if (blockDevice.getIops() != null) { + blockDeviceRequest.setIops(blockDevice.getIops()); + } + + if (blockDevice.getThroughput() != null) { + blockDeviceRequest.setThroughput(blockDevice.getThroughput()); + } + + if (blockDevice.getSnapshotId() != null) { + blockDeviceRequest.setSnapshotId(blockDevice.getSnapshotId()); + } + + if (blockDevice.getEncrypted() != null) { + blockDeviceRequest.setEncrypted(blockDevice.getEncrypted()); + } + + if (blockDevice.getKmsKeyId() != null) { + blockDeviceRequest.setKmsKeyId(blockDevice.getKmsKeyId()); + } + return blockDeviceRequest; + } + + @NotNull + private Optional tagSpecification( + Collection amazonResourceTaggers, + @Nullable Map blockDeviceTags, + @NotNull String serverGroupName) { + if (amazonResourceTaggers != null && !amazonResourceTaggers.isEmpty()) { + List volumeTags = + amazonResourceTaggers.stream() + .flatMap(t -> t.volumeTags(blockDeviceTags, serverGroupName).stream()) + .map(t -> new Tag(t.getKey(), t.getValue())) + .collect(Collectors.toList()); + + if (!volumeTags.isEmpty()) { + return Optional.of( + new LaunchTemplateTagSpecificationRequest() + .withResourceType("volume") + .withTags(volumeTags)); + } + } + + return Optional.empty(); + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupAlarmsAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupAlarmsAgentSpec.groovy index bc72777abf2..87e821796b4 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupAlarmsAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupAlarmsAgentSpec.groovy @@ -25,7 +25,7 @@ import com.amazonaws.services.cloudwatch.model.DescribeAlarmsResult import com.amazonaws.services.cloudwatch.model.MetricAlarm import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsRepository import org.joda.time.DateTime import spock.lang.Shared import spock.lang.Specification @@ -40,7 +40,7 @@ class CleanupAlarmsAgentSpec extends Specification { AmazonCloudWatch cloudWatchUSW AmazonCloudWatch cloudWatchUSE AmazonClientProvider amazonClientProvider - AccountCredentialsRepository accountCredentialsRepository + CredentialsRepository credentialsRepository CleanupAlarmsAgent agent String validUuid = UUID.randomUUID().toString() String deletableAlarmName = "clouddriver-test-v123-alarm-" + validUuid @@ -59,12 +59,12 @@ class CleanupAlarmsAgentSpec extends Specification { 0 * _ } - accountCredentialsRepository = Mock(AccountCredentialsRepository) { + credentialsRepository = Mock(CredentialsRepository) { 1 * getAll() >> [test] 0 * _ } - agent = new CleanupAlarmsAgent(amazonClientProvider, accountCredentialsRepository, 10L, 10L, 90) + agent = new CleanupAlarmsAgent(amazonClientProvider, credentialsRepository, 10L, 10L, 90, ".+-v[0-9]{3}-alarm-.+") } void "should run across all regions/accounts and delete in each"() { @@ -128,9 +128,55 @@ class CleanupAlarmsAgentSpec extends Specification { } + void "should delete alarms that match a user defined pattern"() { + agent = new CleanupAlarmsAgent(amazonClientProvider, credentialsRepository, 10L, 10L, 90, ".+-v[0-9]{3}-CustomAlarm-.+") + + given: + MetricAlarm alarmA = buildAlarm("some-other-v000-CustomAlarm-${validUuid}", 91) + MetricAlarm alarmB = buildAlarm("some-other-alarm-v000-${validUuid}", 91) // missing "-alarm-" + + when: + agent.run() + + then: + 1 * autoScalingUSE.describePolicies() >> new DescribePoliciesResult() + 1 * cloudWatchUSE.describeAlarms(_) >> new DescribeAlarmsResult() + 1 * autoScalingUSW.describePolicies() >> new DescribePoliciesResult() + 1 * cloudWatchUSW.describeAlarms(_) >> new DescribeAlarmsResult().withMetricAlarms([alarmA, alarmB]) + 1 * cloudWatchUSW.deleteAlarms({ DeleteAlarmsRequest request -> + request.alarmNames == ["some-other-v000-CustomAlarm-${validUuid}"] + }) + 0 * cloudWatchUSW.deleteAlarms({ DeleteAlarmsRequest request -> + request.alarmNames == ["some-other-alarm-v000-${validUuid}"] + }) + } + + + void "should delete alarms that match a user defined multiple pattern"() { + agent = new CleanupAlarmsAgent(amazonClientProvider, credentialsRepository, 10L, 10L, 90, ".+-v[0-9]{3}-CustomAlarm-.+|^some-other-alarm-v[0-9]{3}-.+") + + given: + MetricAlarm alarmA = buildAlarm("some-other-v000-CustomAlarm-${validUuid}", 91) + MetricAlarm alarmB = buildAlarm("some-other-alarm-v000-${validUuid}", 91) // missing "-alarm-" + + when: + agent.run() + + then: + 1 * autoScalingUSE.describePolicies() >> new DescribePoliciesResult() + 1 * cloudWatchUSE.describeAlarms(_) >> new DescribeAlarmsResult() + 1 * autoScalingUSW.describePolicies() >> new DescribePoliciesResult() + 1 * cloudWatchUSW.describeAlarms(_) >> new DescribeAlarmsResult().withMetricAlarms([alarmA, alarmB]) + 1 * cloudWatchUSW.deleteAlarms({ DeleteAlarmsRequest request -> + request.alarmNames == ["some-other-v000-CustomAlarm-${validUuid}", "some-other-alarm-v000-${validUuid}"] + }) + } private static MetricAlarm buildAlarm(String name, int dataDays) { new MetricAlarm(alarmName: name, stateUpdatedTimestamp: DateTime.now().minusDays(dataDays).toDate()) } + + + } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupDetachedInstancesAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupDetachedInstancesAgentSpec.groovy index 1a9c3177ffd..50f58699e00 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupDetachedInstancesAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/CleanupDetachedInstancesAgentSpec.groovy @@ -17,17 +17,12 @@ package com.netflix.spinnaker.clouddriver.aws.agent import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.DescribeInstancesRequest -import com.amazonaws.services.ec2.model.DescribeInstancesResult -import com.amazonaws.services.ec2.model.Instance -import com.amazonaws.services.ec2.model.InstanceState -import com.amazonaws.services.ec2.model.Reservation -import com.amazonaws.services.ec2.model.Tag -import com.amazonaws.services.ec2.model.TerminateInstancesRequest +import com.amazonaws.services.ec2.model.* import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DetachInstancesAtomicOperation -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -46,12 +41,10 @@ class CleanupDetachedInstancesAgentSpec extends Specification { 1 * getAmazonEC2(test, "us-east-1", true) >> { amazonEC2USE } 0 * _ } - - def accountCredentialsRepository = Mock(AccountCredentialsRepository) { - 1 * getAll() >> [test] - 0 * _ + CredentialsRepository credentialsRepository = Stub(CredentialsRepository) { + getAll() >> [test] } - def agent = new CleanupDetachedInstancesAgent(amazonClientProvider, accountCredentialsRepository) + def agent = new CleanupDetachedInstancesAgent(amazonClientProvider, credentialsRepository) when: agent.run() diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgentSpec.groovy index c79892c58a6..4d646e9ecbd 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/agent/ReconcileClassicLinkSecurityGroupsAgentSpec.groovy @@ -32,6 +32,7 @@ import spock.lang.Specification import java.time.Clock import java.time.Instant import java.time.ZoneId +import java.time.temporal.ChronoUnit /** * ReconcileClassicLinkSecurityGroupsAgentSpec. @@ -53,8 +54,12 @@ class ReconcileClassicLinkSecurityGroupsAgentSpec extends Specification { def agent = buildAgent(test) + // We convert this to a Date in "should filter instances that havent been up + // long enough", but Date objects can't store nanosecond precision, meaning + // our before/after calculations are off by nanoseconds. Just truncate this + // down to something a Date can handle. @Shared - Instant currentTime = Instant.now() + Instant currentTime = Instant.now().truncatedTo(ChronoUnit.MILLIS) private ReconcileClassicLinkSecurityGroupsAgent buildAgent(NetflixAmazonCredentials account) { return new ReconcileClassicLinkSecurityGroupsAgent( diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/cache/KeysSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/cache/KeysSpec.groovy index 12b6062a6cb..76aac074f5d 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/cache/KeysSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/cache/KeysSpec.groovy @@ -29,7 +29,28 @@ class KeysSpec extends Specification { where: - key | namespace - "aws:securityGroups:appname:appname-stack-detail:test:us-west-1:appname-stack-detail-v000:stack:detail:000" | Keys.Namespace.SECURITY_GROUPS + key | namespace + "aws:securityGroups:app-stack-detail:sg-12345:us-west-2:0123456789:vpc-1234" | Keys.Namespace.SECURITY_GROUPS + } + + @Unroll + def 'parse security group keys with special characters'() { + given: + def parsedKey = Keys.parse(key) + + expect: + with(parsedKey) { + name == expectedName + application == expectedApp + } + + where: + + key || expectedName | expectedApp + "aws:securityGroups:app-stack-detail:sg-12345:us-west-2:0123456789:vpc-1234" || 'app-stack-detail' | 'app' + "aws:securityGroups:app:stack%detail:sg-12345:us-west-2:0123456789:vpc-1234" || 'app:stack%detail' | null + "aws:securityGroups:app:stack:detail:sg-12345:us-west-2:0123456789:vpc-1234" || 'app:stack:detail' | null + "aws:securityGroups:app%stack%detail:sg-12345:us-west-2:0123456789:vpc-1234" || 'app%stack%detail' | null + "aws:securityGroups:app%stack:detail:sg-12345:us-west-2:0123456789:vpc-1234" || 'app%stack:detail' | null } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonClusterControllerSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonClusterControllerSpec.groovy index f9a42e0a5ad..6ab4843285a 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonClusterControllerSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonClusterControllerSpec.groovy @@ -23,6 +23,7 @@ import com.amazonaws.services.autoscaling.model.DescribeScalingActivitiesResult import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.http.HttpStatus import spock.lang.Specification import spock.lang.Subject @@ -34,13 +35,14 @@ class AmazonClusterControllerSpec extends Specification { void "should perform real-time AWS call for auto-scaling activities"() { setup: def creds = Stub(NetflixAmazonCredentials) - def credsProvider = Stub(AccountCredentialsProvider) - credsProvider.getCredentials(account) >> creds + def credsProvider = Stub(CredentialsRepository) { + getOne(account) >> creds + } def autoScaling = Mock(AmazonAutoScaling) def provider = Stub(AmazonClientProvider) provider.getAutoScaling(creds, region) >> autoScaling controller.amazonClientProvider = provider - controller.accountCredentialsProvider = credsProvider + controller.credentialsRepository = credsProvider when: def result = controller.getScalingActivities(account, asgName, region) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonNamedImageLookupControllerSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonNamedImageLookupControllerSpec.groovy index 477637b956f..f19da8ae3fa 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonNamedImageLookupControllerSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/AmazonNamedImageLookupControllerSpec.groovy @@ -18,13 +18,20 @@ package com.netflix.spinnaker.clouddriver.aws.controllers import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.aws.controllers.AmazonNamedImageLookupController.LookupOptions +import com.netflix.spinnaker.clouddriver.aws.data.Keys import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException import spock.lang.Specification import spock.lang.Unroll import javax.servlet.http.HttpServletRequest +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.NAMED_IMAGES + class AmazonNamedImageLookupControllerSpec extends Specification { void "should extract tags from query parameters"() { @@ -93,6 +100,211 @@ class AmazonNamedImageLookupControllerSpec extends Specification { query << ["ami_", "ami-12345678", "sami", "ami_12345678", "ami-1234567890abcdef0"] } + void "find by ami id interacts with the cache as expected"() { + given: + def httpServletRequest = httpServletRequest([:]) + Cache cacheView = Mock(Cache) + def controller = new AmazonNamedImageLookupController(cacheView) + def amiId = 'ami-12345678' + def amiName = 'myAmi' + def account = 'account' + def region = 'region' + def imageTags = [att1: 'value1', att2: 'value2'] + def query = amiId + // Yes, this is insanely detailed, but it's what the render method expects + // (and what ImageCachingAgent provides). + def imageId = Keys.getImageKey(amiId, account, region) + def namedImageId = Keys.getNamedImageKey(account, amiName) + def tagsAsAttributes = imageTags.collect { key, value -> [key: key, value: value] } + def virtualizationType = 'virtualizationType' + def architecture = 'architecture' + def creationDate = 'creationDate' + def Collection imageCacheData = [new DefaultCacheData(imageId, + [name: amiName, + tags: tagsAsAttributes, + imageId: amiId, + virtualizationType: virtualizationType, + architecture: architecture, + creationDate: creationDate], + [(NAMED_IMAGES.ns): [namedImageId]])] + + when: + List results = controller.list(new LookupOptions(q: query), httpServletRequest) + + then: + // Expect no identifier lookup by name since we query for an AMI id + 0 * cacheView.filterIdentifiers(NAMED_IMAGES.ns, _) + + // Expect to lookup image identifiers + 1 * cacheView.filterIdentifiers(IMAGES.ns, _) >> [amiId] + + // Expect a lookup by name, but with no items to look in since we query for + // an AMI id + 1 * cacheView.getAll(NAMED_IMAGES.ns, [], _) >> [] + + // Expect a lookup by image + 1 * cacheView.getAll(IMAGES.ns, _) >> imageCacheData + + // And then in render, expect another image lookup, this time with no images + // to look in because we're not querying by name. + 1 * cacheView.getAll(IMAGES.ns, []) >> [] + + and: + results.size() == 1 + with(results[0]) { + imageName == amiName + attributes == [virtualizationType: virtualizationType, creationDate: creationDate, architecture: architecture] + tagsByImageId == [(amiId): imageTags] + accounts == [account] as Set + amis == [region: [amiId] as Set] + tags == imageTags + } + } + + void "find by name interacts with the cache as expected"() { + given: + def httpServletRequest = httpServletRequest([:]) + Cache cacheView = Mock(Cache) + def controller = new AmazonNamedImageLookupController(cacheView) + def amiId = 'ami-12345678' + def amiName = 'myAmi' + def account = 'account' + def region = 'region' + def imageTags = [att1: 'value1', att2: 'value2'] + def query = amiName + // Yes, this is insanely detailed, but it's what the render method expects + // (and what ImageCachingAgent provides). + def imageId = Keys.getImageKey(amiId, account, region) + def namedImageId = Keys.getNamedImageKey(account, amiName) + def tagsAsAttributes = imageTags.collect { key, value -> [key: key, value: value] } + def Collection imageCacheData = [new DefaultCacheData(imageId, + [name: amiName, + tags: tagsAsAttributes, + imageId: amiId], + [(NAMED_IMAGES.ns): [namedImageId]])] + def namedImageCacheAttributes = [name: amiName, + virtualizationType: 'hvm', // arbitrary + creationDate: '2021-08-03T22:27:50.000Z'] // arbitrary + + def Collection namedImageCacheData = [new DefaultCacheData(namedImageId, + namedImageCacheAttributes, + [(IMAGES.ns): [imageId]])] + + when: + List results = controller.list(new LookupOptions(q: query), httpServletRequest) + + then: + // Expect an identifier lookup by name + 1 * cacheView.filterIdentifiers(NAMED_IMAGES.ns, _) >> [amiName] + + // Expect no image identifiers since the identifier lookup by name returned + // something + 0 * cacheView.filterIdentifiers(IMAGES.ns, _) + + // Expect a lookup by name, with the one available name + 1 * cacheView.getAll(NAMED_IMAGES.ns, [amiName], _) >> namedImageCacheData + + // Expect a lookup by image, but with no items to look in since the + // identifier lookup by name returned something + 1 * cacheView.getAll(IMAGES.ns, []) >> [] + + // And then in render, expect another image lookup, this time with an image + // id because our named image is related to a "real" image. + 1 * cacheView.getAll(IMAGES.ns, [imageId]) >> imageCacheData + + and: + results.size() == 1 + with(results[0]) { + imageName == amiName + // When there's a named image that matches the given query, these are the + // attributes that render populates. + attributes == namedImageCacheAttributes - [name: amiName] + tagsByImageId == [(amiId): imageTags] + accounts == [account] as Set + amis == [region: [amiId] as Set] + // When there's a named image that matches the given query, render doesn't + // currently populate tags, only tagsByImageId, as tags is deprecated. + tags == [:] + } + } + + void "find by name when two amis in the same region have the same name"() { + given: + def httpServletRequest = httpServletRequest([:]) + Cache cacheView = Mock(Cache) + def controller = new AmazonNamedImageLookupController(cacheView) + def amiIdOne = 'ami-12345678' + def amiIdTwo = 'ami-5678abcd' + def amiName = 'myAmi' + def account = 'account' + def region = 'region' + def imageOneTags = [att1: 'value1', att2: 'value2'] + def imageTwoTags = [att3: 'value3', att3: 'value3'] + def query = amiName + // Yes, this is insanely detailed, but it's what the render method expects + // (and what ImageCachingAgent provides). + def imageIdOne = Keys.getImageKey(amiIdOne, account, region) + def imageIdTwo = Keys.getImageKey(amiIdTwo, account, region) + def namedImageId = Keys.getNamedImageKey(account, amiName) + def tagsAsAttributesOne = imageOneTags.collect { key, value -> [key: key, value: value] } + def tagsAsAttributesTwo = imageTwoTags.collect { key, value -> [key: key, value: value] } + def Collection imageCacheData = [new DefaultCacheData(imageIdOne, + [name: amiName, + tags: tagsAsAttributesOne, + imageId: amiIdOne], + [(NAMED_IMAGES.ns): [namedImageId]]), + new DefaultCacheData(imageIdTwo, + [name: amiName, + tags: tagsAsAttributesTwo, + imageId: amiIdTwo], + [(NAMED_IMAGES.ns): [namedImageId]])] + + def namedImageCacheAttributes = [name: amiName, + virtualizationType: 'hvm', // arbitrary + creationDate: '2021-08-03T22:27:50.000Z'] // arbitrary + + def Collection namedImageCacheData = [new DefaultCacheData(namedImageId, + namedImageCacheAttributes, + [(IMAGES.ns): [imageIdOne, imageIdTwo]])] + + when: + List results = controller.list(new LookupOptions(q: query), httpServletRequest) + + then: + // Expect an identifier lookup by name + 1 * cacheView.filterIdentifiers(NAMED_IMAGES.ns, _) >> [amiName] + + // Expect no image identifiers since the identifier lookup by name returned + // something + 0 * cacheView.filterIdentifiers(IMAGES.ns, _) + + // Expect a lookup by name, with the one available name + 1 * cacheView.getAll(NAMED_IMAGES.ns, [amiName], _) >> namedImageCacheData + + // Expect a lookup by image, but with no items to look in since the + // identifier lookup by name returned something + 1 * cacheView.getAll(IMAGES.ns, []) >> [] + + // And then in render, expect another image lookup, this time with an image + // id because our named image is related to at least one "real" image. + 1 * cacheView.getAll(IMAGES.ns, [imageIdOne, imageIdTwo]) >> imageCacheData + + and: + results.size() == 1 + with(results[0]) { + imageName == amiName + // When there's a named image that matches the given query, these are the + // attributes that render populates. + attributes == namedImageCacheAttributes - [name: amiName] + tagsByImageId == [(amiIdOne): imageOneTags, (amiIdTwo): imageTwoTags] + accounts == [account] as Set + amis == [region: [amiIdOne, amiIdTwo] as Set] + // When there's a named image that matches the given query, render doesn't + // currently populate tags, only tagsByImageId, as tags is deprecated. + tags == [:] + } + } + private HttpServletRequest httpServletRequest(Map tagFilters) { return Mock(HttpServletRequest) { getParameterNames() >> { diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/CloudFormationControllerSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/CloudFormationControllerSpec.groovy new file mode 100644 index 00000000000..24fe7ed43f4 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/controllers/CloudFormationControllerSpec.groovy @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.controllers + +import com.netflix.spinnaker.clouddriver.aws.model.AmazonCloudFormationStack +import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonCloudFormationProvider +import org.springframework.test.web.servlet.MockMvc +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException +import org.springframework.test.web.servlet.setup.MockMvcBuilders +import spock.lang.Specification + +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status + +class CloudFormationControllerSpec extends Specification { + + MockMvc mockMvc + + AmazonCloudFormationProvider cloudFormationProvider = Mock(AmazonCloudFormationProvider) + + void setup() { + mockMvc = MockMvcBuilders.standaloneSetup( + new CloudFormationController(cloudFormationProvider) + ).build() + } + + def "request a list of stacks returns all the stacks for a given account (any region)"() { + given: + def accountName = 'aws-account-name' + cloudFormationProvider.list(accountName, '*') >> [ new AmazonCloudFormationStack(accountName: accountName) ] + + when: + def results = mockMvc.perform(get("/aws/cloudFormation/stacks?accountName=$accountName")) + + then: + results.andExpect(status().is2xxSuccessful()) + results.andExpect(jsonPath('$[0].accountName').value(accountName)) + } + + def "request a list of stacks returns all the stacks for a given account filtering by region (if specified)"() { + given: + def accountName = 'aws-account-name' + def region = 'region' + cloudFormationProvider.list(accountName, region) >> [ new AmazonCloudFormationStack(accountName: accountName, region: region) ] + + when: + def results = mockMvc.perform(get("/aws/cloudFormation/stacks?accountName=$accountName®ion=$region")) + + then: + results.andExpect(status().is2xxSuccessful()) + results.andExpect(jsonPath('$[0].accountName').value(accountName)) + results.andExpect(jsonPath('$[0].region').value(region)) + } + + def "requesting a single stack by stackId"() { + given: + def stackId = "arn:cloudformation:stack/name" + cloudFormationProvider.get(stackId) >> Optional.of(new AmazonCloudFormationStack(stackId: stackId)) + + when: + def results = mockMvc.perform(get("/aws/cloudFormation/stacks/$stackId")) + + then: + results.andExpect(status().is2xxSuccessful()) + results.andExpect(jsonPath('$.stackId').value(stackId)) + } + + def "requesting a non existing stack returns a 404"() { + given: + def stackId = "arn:cloudformation:non-existing" + cloudFormationProvider.get(stackId) >> { throw new NotFoundException() } + + when: + mockMvc.perform(get("/aws/cloudFormation/stacks/$stackId")) + + then: + thrown(Exception) //loosened because we removed the dependency on spring data rest + } + +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/data/KeysSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/data/KeysSpec.groovy index cf0820fec20..e83705b78e3 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/data/KeysSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/data/KeysSpec.groovy @@ -43,6 +43,7 @@ class KeysSpec extends Specification { Keys.parse(Keys.getClusterKey('cluster', 'application', 'account')) == [provider: 'aws', type: Namespace.CLUSTERS.ns, cluster: 'cluster', application: 'application', account: 'account', stack: null, detail: null] Keys.parse(Keys.getClusterKey('cluster-test', 'application', 'account')) == [provider: 'aws', type: Namespace.CLUSTERS.ns, cluster: 'cluster-test', application: 'application', account: 'account', stack: 'test', detail: null] Keys.parse(Keys.getClusterKey('cluster-test-useast1', 'application', 'account')) == [provider: 'aws', type: Namespace.CLUSTERS.ns, cluster: 'cluster-test-useast1', application: 'application', account: 'account', stack: 'test', detail: 'useast1'] + Keys.parse(Keys.getNamedImageKey('account', 'imageName')) == [provider: 'aws', type: Namespace.NAMED_IMAGES.ns, account: 'account', imageName: 'imageName'] Keys.parse(Keys.getImageKey('image', 'account', 'region')) == [provider: 'aws', type: Namespace.IMAGES.ns, imageId: 'image', region: 'region', account: 'account'] Keys.parse(Keys.getInstanceHealthKey('instanceId', 'account', 'region', 'provider')) == [provider: 'aws', type: Namespace.HEALTH.ns, instanceId: 'instanceId', account: 'account', region: 'region', provider: 'provider'] Keys.parse(Keys.getLaunchConfigKey('kato-main-v056-10062014221307', 'account', 'region')) == [provider: 'aws', type: Namespace.LAUNCH_CONFIGS.ns, launchConfig: 'kato-main-v056-10062014221307', region: 'region', account: 'account', application: 'kato', stack: 'main'] @@ -50,6 +51,7 @@ class KeysSpec extends Specification { Keys.parse(Keys.getLoadBalancerKey('kato-main-frontend', 'account', 'region', null, 'classic')) == [provider: 'aws', type: Namespace.LOAD_BALANCERS.ns, loadBalancer: 'kato-main-frontend', account: 'account', region: 'region', vpcId: null, loadBalancerType: 'classic', stack: 'main', detail: 'frontend', application: 'kato'] Keys.parse(Keys.getLoadBalancerKey('kato-main-frontend', 'account', 'region', null, null)) == [provider: 'aws', type: Namespace.LOAD_BALANCERS.ns, loadBalancer: 'kato-main-frontend', account: 'account', region: 'region', vpcId: null, loadBalancerType: 'classic', stack: 'main', detail: 'frontend', application: 'kato'] Keys.parse(Keys.getLoadBalancerKey('loadBalancer', 'account', 'region', 'vpc-12345', 'application')) == [provider: 'aws', type: Namespace.LOAD_BALANCERS.ns, loadBalancer: 'loadBalancer', account: 'account', region: 'region', vpcId: 'vpc-12345', loadBalancerType: 'application', application: 'loadbalancer', stack: null, detail: null] + Keys.parse(Keys.getLaunchTemplateKey('kato-main-v056-10062014221307', 'account', 'region')) == [provider: 'aws', type: Namespace.LAUNCH_TEMPLATES.ns, launchTemplateName: 'kato-main-v056-10062014221307', region: 'region', account: 'account', application: 'kato', stack: 'main'] } def 'load balancer key backwards compatibility'() { diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AmiIdResolverSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AmiIdResolverSpec.groovy index 8564f6c5113..30ce1b061b2 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AmiIdResolverSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AmiIdResolverSpec.groovy @@ -27,8 +27,8 @@ import spock.lang.Specification class AmiIdResolverSpec extends Specification { - private final String amiId = 'ami-12345' - private final String accountId = '98765' + private String amiId = 'ami-12345' + private String accountId = '98765' def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AutoScalingWorkerUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AutoScalingWorkerUnitSpec.groovy deleted file mode 100644 index 7d403a26365..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AutoScalingWorkerUnitSpec.groovy +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy - -import com.amazonaws.services.autoscaling.AmazonAutoScaling -import com.amazonaws.services.autoscaling.model.AlreadyExistsException -import com.amazonaws.services.autoscaling.model.AutoScalingGroup -import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.Subnet -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.services.AsgService -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.data.task.DefaultTask -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import org.springframework.beans.factory.annotation.Autowired -import spock.lang.Specification -import spock.lang.Unroll - -import java.time.Instant -import java.time.temporal.ChronoUnit - -class AutoScalingWorkerUnitSpec extends Specification { - - @Autowired - TaskRepository taskRepository - - def lcBuilder = Mock(LaunchConfigurationBuilder) - def asgService = Mock(AsgService) - def autoScaling = Mock(AmazonAutoScaling) - def clusterProvider = Mock(ClusterProvider) - def amazonEC2 = Mock(AmazonEC2) - def awsServerGroupNameResolver = new AWSServerGroupNameResolver('test', 'us-east-1', asgService, [clusterProvider]) - def credential = TestCredential.named('foo') - def regionScopedProvider = Stub(RegionScopedProviderFactory.RegionScopedProvider) { - getAutoScaling() >> autoScaling - getLaunchConfigurationBuilder() >> lcBuilder - getAsgService() >> asgService - getAWSServerGroupNameResolver() >> awsServerGroupNameResolver - getAmazonEC2() >> amazonEC2 - } - - def setup() { - Task task = new DefaultTask("task") - TaskRepository.threadLocalTask.set(task) - } - - @Unroll - void "deploy workflow is create launch config, create asg"() { - setup: - def launchConfigName = "launchConfig" - def mockAutoScalingWorker = Spy(AutoScalingWorker) - mockAutoScalingWorker.application = "myasg" - mockAutoScalingWorker.stack = "stack" - mockAutoScalingWorker.freeFormDetails = "details" - mockAutoScalingWorker.credentials = credential - mockAutoScalingWorker.regionScopedProvider = regionScopedProvider - mockAutoScalingWorker.sequence = sequence - - when: - mockAutoScalingWorker.deploy() - - then: - 1 * lcBuilder.buildLaunchConfiguration('myasg', null, _, null) >> launchConfigName - 1 * mockAutoScalingWorker.createAutoScalingGroup(expectedAsgName, launchConfigName) >> {} - (sequence == null ? 1 : 0) * clusterProvider.getCluster('myasg', 'test', 'myasg-stack-details') >> { null } - 0 * clusterProvider._ - - where: - sequence || expectedAsgName - null || "myasg-stack-details-v000" - 0 || "myasg-stack-details-v000" - 1 || "myasg-stack-details-v001" - 11 || "myasg-stack-details-v011" - 111 || "myasg-stack-details-v111" - } - - void "deploy derives name from ancestor asg and sets the ancestor asg name in the task result"() { - setup: - def autoScalingWorker = new AutoScalingWorker( - regionScopedProvider: regionScopedProvider, - credentials: credential, - application: "myasg", - region: "us-east-1" - ) - - when: - String asgName = autoScalingWorker.deploy() - - then: - 1 * lcBuilder.buildLaunchConfiguration('myasg', null, _, null) >> 'lcName' - 1 * clusterProvider.getCluster('myasg', 'test', 'myasg') >> { - new Cluster.SimpleCluster(type: 'aws', serverGroups: [ - sG('myasg-v011', 0, 'us-east-1'), sG('myasg-v099', 1, 'us-west-1') - ]) - } - 1 * asgService.getAutoScalingGroup('myasg-v011') >> { new AutoScalingGroup() } - 1 * asgService.getAutoScalingGroup('myasg-v012') >> { new AutoScalingGroup() } - 1 * asgService.getAutoScalingGroup('myasg-v013') >> { null } - 1 * autoScaling.createAutoScalingGroup(_) - 1 * autoScaling.updateAutoScalingGroup(_) - 0 * _ - - asgName == 'myasg-v013' - awsServerGroupNameResolver.getTask().resultObjects[0].ancestorServerGroupNameByRegion.get("us-east-1") == "myasg-v011" - } - - void "does not enable metrics collection when enabledMetrics are absent"() { - setup: - def autoScalingWorker = new AutoScalingWorker( - enabledMetrics: [], - instanceMonitoring: true, - regionScopedProvider: regionScopedProvider, - credentials: credential, - application: "myasg", - region: "us-east-1" - ) - - when: - String asgName = autoScalingWorker.deploy() - - then: - 0 * autoScaling.enableMetricsCollection(_) - } - - void "does not enable metrics collection when instanceMonitoring is set to false"() { - setup: - def autoScalingWorker = new AutoScalingWorker( - enabledMetrics: ['GroupMinSize', 'GroupMaxSize'], - instanceMonitoring: false, - regionScopedProvider: regionScopedProvider, - credentials: credential, - application: "myasg", - region: "us-east-1" - ) - - when: - String asgName = autoScalingWorker.deploy() - - then: - 0 * autoScaling.enableMetricsCollection(_) - } - - - void "enables metrics collection for specified metrics when enabledMetrics are present"() { - setup: - def autoScalingWorker = new AutoScalingWorker( - enabledMetrics: ['GroupMinSize', 'GroupMaxSize'], - instanceMonitoring: true, - regionScopedProvider: regionScopedProvider, - credentials: credential, - application: "myasg", - region: "us-east-1" - ) - - when: - String asgName = autoScalingWorker.deploy() - - then: - 1 * autoScaling.enableMetricsCollection({ it.metrics == ['GroupMinSize', 'GroupMaxSize'] }) - } - - void "continues if serverGroup already exists, is reasonably the same and within safety window"() { - setup: - def autoScalingWorker = new AutoScalingWorker( - enabledMetrics: ['GroupMinSize', 'GroupMaxSize'], - instanceMonitoring: true, - regionScopedProvider: regionScopedProvider, - credentials: credential, - application: "myasg", - region: "us-east-1", - classicLoadBalancers: ["one", "two"] - ) - - when: - String asgName = autoScalingWorker.deploy() - - then: - noExceptionThrown() - 1 * lcBuilder.buildLaunchConfiguration('myasg', null, _, null) >> "myasg-12345" - 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } - 1 * autoScaling.describeAutoScalingGroups(_) >> { - new DescribeAutoScalingGroupsResult( - autoScalingGroups: [ - new AutoScalingGroup( - autoScalingGroupName: "myasg-v000", - launchConfigurationName: "myasg-12345", - loadBalancerNames: ["one", "two"], - createdTime: new Date() - ) - ] - ) - } - asgName == "myasg-v000" - } - - void "throws duplicate exception if existing autoscaling group was created before safety window"() { - setup: - def autoScalingWorker = new AutoScalingWorker( - enabledMetrics: ['GroupMinSize', 'GroupMaxSize'], - instanceMonitoring: true, - regionScopedProvider: regionScopedProvider, - credentials: credential, - application: "myasg", - region: "us-east-1", - classicLoadBalancers: ["one", "two"] - ) - - when: - autoScalingWorker.deploy() - - then: - thrown(AlreadyExistsException) - 1 * lcBuilder.buildLaunchConfiguration('myasg', null, _, null) >> "myasg-12345" - _ * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } - _ * autoScaling.describeAutoScalingGroups(_) >> { - new DescribeAutoScalingGroupsResult( - autoScalingGroups: [ - new AutoScalingGroup( - autoScalingGroupName: "myasg-v000", - launchConfigurationName: "myasg-12345", - loadBalancerNames: ["one", "two"], - createdTime: new Date(Instant.now().minus(3, ChronoUnit.HOURS).toEpochMilli()) - ) - ] - ) - } - } - - void "throws duplicate exception if existing and desired autoscaling group differ settings"() { - setup: - def autoScalingWorker = new AutoScalingWorker( - enabledMetrics: ['GroupMinSize', 'GroupMaxSize'], - instanceMonitoring: true, - regionScopedProvider: regionScopedProvider, - credentials: credential, - application: "myasg", - region: "us-east-1", - classicLoadBalancers: ["one", "two"] - ) - - when: - autoScalingWorker.deploy() - - then: - thrown(AlreadyExistsException) - 1 * lcBuilder.buildLaunchConfiguration('myasg', null, _, null) >> "myasg-12345" - _ * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } - _ * autoScaling.describeAutoScalingGroups(_) >> { - new DescribeAutoScalingGroupsResult( - autoScalingGroups: [ - new AutoScalingGroup( - autoScalingGroupName: "myasg-v000", - launchConfigurationName: "different", - loadBalancerNames: ["three"], - createdTime: new Date() - ) - ] - ) - } - } - - @Unroll - void "should validate provided subnet ids against those available for subnet type"() { - given: - def autoScalingWorker = new AutoScalingWorker( - subnetIds: subnetIds - ) - - when: - def filteredSubnetIds = autoScalingWorker.getSubnetIds(allSubnets) - - then: - filteredSubnetIds == expectedSubnetIds - - when: - autoScalingWorker = new AutoScalingWorker( - subnetIds: ["invalid-subnet-id"] - ) - autoScalingWorker.getSubnetIds(allSubnets) - - then: - def e = thrown(IllegalStateException) - e.message.startsWith("One or more subnet ids are not valid (invalidSubnetIds: invalid-subnet-id") - - where: - subnetIds | allSubnets || expectedSubnetIds - ["subnet-1"] | [subnet("subnet-1"), subnet("subnet-2")] || ["subnet-1"] - null | [subnet("subnet-1"), subnet("subnet-2")] || ["subnet-1", "subnet-2"] - } - - static Subnet subnet(String subnetId) { - return new Subnet().withSubnetId(subnetId) - } - - static ServerGroup sG(String name, Long createdTime, String region) { - return new SimpleServerGroup(name: name, createdTime: createdTime, region: region) - } -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/BlockDeviceConfigSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/BlockDeviceConfigSpec.groovy deleted file mode 100644 index ba769758e6a..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/BlockDeviceConfigSpec.groovy +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy - -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -class BlockDeviceConfigSpec extends Specification { - - @Shared - static def defaultBlockDevice = new AmazonBlockDevice(deviceName: "/dev/sdb", size: 40) - - @Shared - static def expectedD28xlargeBlockDevices = [ - new AmazonBlockDevice(deviceName: "/dev/sdb", virtualName: "ephemeral0"), - new AmazonBlockDevice(deviceName: "/dev/sdc", virtualName: "ephemeral1"), - new AmazonBlockDevice(deviceName: "/dev/sdd", virtualName: "ephemeral2"), - new AmazonBlockDevice(deviceName: "/dev/sde", virtualName: "ephemeral3"), - new AmazonBlockDevice(deviceName: "/dev/sdf", virtualName: "ephemeral4"), - new AmazonBlockDevice(deviceName: "/dev/sdg", virtualName: "ephemeral5"), - new AmazonBlockDevice(deviceName: "/dev/sdh", virtualName: "ephemeral6"), - new AmazonBlockDevice(deviceName: "/dev/sdi", virtualName: "ephemeral7"), - new AmazonBlockDevice(deviceName: "/dev/sdj", virtualName: "ephemeral8"), - new AmazonBlockDevice(deviceName: "/dev/sdk", virtualName: "ephemeral9"), - new AmazonBlockDevice(deviceName: "/dev/sdl", virtualName: "ephemeral10"), - new AmazonBlockDevice(deviceName: "/dev/sdm", virtualName: "ephemeral11"), - new AmazonBlockDevice(deviceName: "/dev/sdn", virtualName: "ephemeral12"), - new AmazonBlockDevice(deviceName: "/dev/sdo", virtualName: "ephemeral13"), - new AmazonBlockDevice(deviceName: "/dev/sdp", virtualName: "ephemeral14"), - new AmazonBlockDevice(deviceName: "/dev/sdq", virtualName: "ephemeral15"), - new AmazonBlockDevice(deviceName: "/dev/sdr", virtualName: "ephemeral16"), - new AmazonBlockDevice(deviceName: "/dev/sds", virtualName: "ephemeral17"), - new AmazonBlockDevice(deviceName: "/dev/sdt", virtualName: "ephemeral18"), - new AmazonBlockDevice(deviceName: "/dev/sdu", virtualName: "ephemeral19"), - new AmazonBlockDevice(deviceName: "/dev/sdv", virtualName: "ephemeral20"), - new AmazonBlockDevice(deviceName: "/dev/sdw", virtualName: "ephemeral21"), - new AmazonBlockDevice(deviceName: "/dev/sdx", virtualName: "ephemeral22"), - new AmazonBlockDevice(deviceName: "/dev/sdy", virtualName: "ephemeral23"), - ] - - @Unroll - void "should return block devices for instance type"() { - - DeployDefaults deployDefaults = new DeployDefaults(unknownInstanceTypeBlockDevice: unknownInstanceTypeBlockDevice) - if (defaultVolumeType) { - deployDefaults.defaultBlockDeviceType = defaultVolumeType - } - BlockDeviceConfig blockDeviceConfig = new BlockDeviceConfig(deployDefaults) - - expect: - blockDevices == blockDeviceConfig.getBlockDevicesForInstanceType(instanceType) - - where: - unknownInstanceTypeBlockDevice | defaultVolumeType | instanceType || blockDevices - null | null | "wat" || null - defaultBlockDevice | null | "wat" || [defaultBlockDevice] - null | null | "t2.small" || [] - defaultBlockDevice | null | "t2.small" || [] - null | null | "m4.xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 80, volumeType: "standard")] - defaultBlockDevice | null | "m4.xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 80, volumeType: "standard")] - null | null | "m4.large" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 40, volumeType: "standard")] - null | null | "m4.16xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 120, volumeType: "standard")] - null | null | "c4.8xlarge" || getExpectedBlockDevicesForEbsOnly("standard") - null | null | "c5.9xlarge" || getExpectedBlockDevicesForEbsOnly("standard") - null | null | "m3.medium" || [new AmazonBlockDevice(deviceName: "/dev/sdb", virtualName: "ephemeral0")] - null | null | "i2.2xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", virtualName: "ephemeral0"), new AmazonBlockDevice(deviceName: "/dev/sdc", virtualName: "ephemeral1")] - null | null | "d2.8xlarge" || expectedD28xlargeBlockDevices - null | "gp2" | "m4.xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 80, volumeType: defaultVolumeType)] - null | "gp2" | "c4.8xlarge" || getExpectedBlockDevicesForEbsOnly("gp2") - } - - private Collection getExpectedBlockDevicesForEbsOnly(String volumeType) { - [ - new AmazonBlockDevice(deviceName: "/dev/sdb", size: 125, volumeType: volumeType), - new AmazonBlockDevice(deviceName: "/dev/sdc", size: 125, volumeType: volumeType), - ] - } -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultLaunchConfigurationBuilderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultLaunchConfigurationBuilderSpec.groovy deleted file mode 100644 index 1c5a828e2a2..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/DefaultLaunchConfigurationBuilderSpec.groovy +++ /dev/null @@ -1,421 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy - -import com.amazonaws.services.autoscaling.AmazonAutoScaling -import com.amazonaws.services.autoscaling.model.CreateLaunchConfigurationRequest -import com.netflix.spinnaker.config.AwsConfiguration -import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProvider -import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice -import com.netflix.spinnaker.clouddriver.aws.services.AsgService -import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService -import spock.lang.Specification -import spock.lang.Subject - -class DefaultLaunchConfigurationBuilderSpec extends Specification { - def autoScaling = Mock(AmazonAutoScaling) - def asgService = Mock(AsgService) - def securityGroupService = Mock(SecurityGroupService) - def userDataProvider = Stub(UserDataProvider) { - getUserData(_, _, _) >> 'userdata' - } - def deployDefaults = new AwsConfiguration.DeployDefaults() - - @Subject - DefaultLaunchConfigurationBuilder builder = new DefaultLaunchConfigurationBuilder(autoScaling, asgService, - securityGroupService, [userDataProvider], null, deployDefaults) - - void "should lookup security groups when provided by name"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupIdsWithSubnetPurpose(_, _) >> { groups, subnet -> - assert subnet == subnetType - groups.collectEntries { String group -> [(group): "sg-$group".toString()] } - } - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() - } - 0 * _ - - where: - securityGroups | expectedResolve | expectedGroups - ['named', 'sg-feef000'] | ['named'] | ['sg-feef000', 'sg-named'] - - application = 'foo' - subnetType = null - account = 'prod' - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: securityGroups) - } - - void "should attach an existing application security group if no security groups provided"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] - 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> application - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = [] - expectedGroups = [application] - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: securityGroups) - } - - void "should add user data to launchconfig with combination from user data provider and description"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] - 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> application - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.getUserData() == expectedUserData - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = [] - expectedGroups = [application] - expectedUserData = 'dXNlcmRhdGEKZXhwb3J0IFVTRVJEQVRBPTEK' - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - base64UserData: 'ZXhwb3J0IFVTRVJEQVRBPTEK', - securityGroups: securityGroups) - } - - void "should add user data to launchconfig with user data provider if description userdata ommitted"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] - 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> application - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.getUserData() == expectedUserData - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = [] - expectedGroups = [application] - expectedUserData = 'dXNlcmRhdGEK' - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: securityGroups) - } - - void "should create an application security group if none exists and no security groups provided"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] - 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> null - 1 * securityGroupService.createSecurityGroup(application, subnetType) >> "sg-$application" - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = [] - expectedGroups = ["sg-$application"] - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: securityGroups) - - } - - void "should attach classic link security group if vpc is linked"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.classicLinkVPCId == "vpc-123" - assert req.classicLinkVPCSecurityGroups == ["sg-123", "sg-456"] - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - expectedGroups = [application] - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: ["sg-000"], - classicLinkVpcId: "vpc-123", - classicLinkVpcSecurityGroups: ["sg-123", "sg-456"]) - } - - void "should try to look up classic link security group if vpc is linked"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.classicLinkVPCId == "vpc-123" - assert req.classicLinkVPCSecurityGroups == [] - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - expectedGroups = [application] - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: ["sg-000"], - classicLinkVpcId: "vpc-123") - } - - void "if existing requested group contains app name don't lookup/create app group"() { - given: - deployDefaults.addAppGroupToServerGroup = true - - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [(appGroup): securityGroups[0]] - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = ["sg-12345"] - appGroup = "sg-$application" - expectedGroups = securityGroups - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: securityGroups) - } - - void "if creating an app security group would exceed the maximum number of security groups, use the provided groups"() { - given: - deployDefaults.addAppGroupToServerGroup = true - - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = ["sg-12345", "sg-23456", "sg-34567", "sg-45678", "sg-56789"] - sgResult = securityGroups.collectEntries { [(it): it] } - expectedGroups = securityGroups - appGroup = "sg-$application" - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: securityGroups) - } - - void "should add existing app security group if configured to do so"() { - given: - deployDefaults.addAppGroupToServerGroup = true - - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [notappgroup: securityGroups[0]] - 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> appGroup - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = ["sg-12345"] - appGroup = "sg-$application" - expectedGroups = securityGroups + appGroup - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: securityGroups) - } - - void "should create app security group if addAppGroupToServerGroup and no app group present"() { - given: - deployDefaults.addAppGroupToServerGroup = true - - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] - 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> null - 1 * securityGroupService.createSecurityGroup(application, subnetType) >> appGroup - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = ["sg-12345"] - appGroup = "sg-$application" - expectedGroups = securityGroups + appGroup - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: securityGroups) - - } - - void "should look up and attach classic link security group if vpc is linked"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupIds(["nf-classiclink"], "vpc-123") >> ["nf-classiclink": "sg-123"] - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.classicLinkVPCId == "vpc-123" - assert req.classicLinkVPCSecurityGroups == ["sg-123"] - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - expectedGroups = [application] - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - securityGroups: ["sg-000"], - classicLinkVpcId: "vpc-123", - classicLinkVpcSecurityGroups: ["nf-classiclink"]) - } - - void "handles block device mappings"() { - when: - builder.buildLaunchConfiguration(application, subnetType, settings, null) - - then: - 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] - 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> "sg-$application" - 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> - assert req.blockDeviceMappings.size() == 2 - req.blockDeviceMappings.first().with { - assert deviceName == "/dev/sdb" - assert virtualName == 'ephemeral1' - assert ebs == null - } - req.blockDeviceMappings.last().with { - assert deviceName == '/dev/sdc' - assert virtualName == null - assert ebs.snapshotId == 's-69' - assert ebs.volumeType == 'io1' - assert ebs.deleteOnTermination == false - assert ebs.iops == 100 - assert ebs.volumeSize == 125 - assert ebs.encrypted == true - } - } - 0 * _ - - where: - application = 'foo' - subnetType = null - account = 'prod' - securityGroups = [] - expectedGroups = ["sg-$application"] - settings = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: 'prod', - region: 'us-east-1', - baseName: 'fooapp-v001', - suffix: '20150515', - blockDevices: [ - new AmazonBlockDevice(deviceName: '/dev/sdb', virtualName: 'ephemeral1'), - new AmazonBlockDevice(deviceName: "/dev/sdc", size: 125, iops: 100, deleteOnTermination: false, volumeType: 'io1', snapshotId: 's-69', encrypted: true)], - securityGroups: securityGroups) - } -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/InstanceTypeUtilsSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/InstanceTypeUtilsSpec.groovy new file mode 100644 index 00000000000..68a17a2543e --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/InstanceTypeUtilsSpec.groovy @@ -0,0 +1,220 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy + +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult +import com.amazonaws.services.ec2.model.InstanceTypeInfo +import com.amazonaws.services.ec2.model.ProcessorInfo +import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils.BlockDeviceConfig +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +class InstanceTypeUtilsSpec extends Specification { + + @Shared + static def defaultBlockDevice = new AmazonBlockDevice(deviceName: "/dev/sdb", size: 40) + + @Shared + static def expectedD28xlargeBlockDevices = [ + new AmazonBlockDevice(deviceName: "/dev/sdb", virtualName: "ephemeral0"), + new AmazonBlockDevice(deviceName: "/dev/sdc", virtualName: "ephemeral1"), + new AmazonBlockDevice(deviceName: "/dev/sdd", virtualName: "ephemeral2"), + new AmazonBlockDevice(deviceName: "/dev/sde", virtualName: "ephemeral3"), + new AmazonBlockDevice(deviceName: "/dev/sdf", virtualName: "ephemeral4"), + new AmazonBlockDevice(deviceName: "/dev/sdg", virtualName: "ephemeral5"), + new AmazonBlockDevice(deviceName: "/dev/sdh", virtualName: "ephemeral6"), + new AmazonBlockDevice(deviceName: "/dev/sdi", virtualName: "ephemeral7"), + new AmazonBlockDevice(deviceName: "/dev/sdj", virtualName: "ephemeral8"), + new AmazonBlockDevice(deviceName: "/dev/sdk", virtualName: "ephemeral9"), + new AmazonBlockDevice(deviceName: "/dev/sdl", virtualName: "ephemeral10"), + new AmazonBlockDevice(deviceName: "/dev/sdm", virtualName: "ephemeral11"), + new AmazonBlockDevice(deviceName: "/dev/sdn", virtualName: "ephemeral12"), + new AmazonBlockDevice(deviceName: "/dev/sdo", virtualName: "ephemeral13"), + new AmazonBlockDevice(deviceName: "/dev/sdp", virtualName: "ephemeral14"), + new AmazonBlockDevice(deviceName: "/dev/sdq", virtualName: "ephemeral15"), + new AmazonBlockDevice(deviceName: "/dev/sdr", virtualName: "ephemeral16"), + new AmazonBlockDevice(deviceName: "/dev/sds", virtualName: "ephemeral17"), + new AmazonBlockDevice(deviceName: "/dev/sdt", virtualName: "ephemeral18"), + new AmazonBlockDevice(deviceName: "/dev/sdu", virtualName: "ephemeral19"), + new AmazonBlockDevice(deviceName: "/dev/sdv", virtualName: "ephemeral20"), + new AmazonBlockDevice(deviceName: "/dev/sdw", virtualName: "ephemeral21"), + new AmazonBlockDevice(deviceName: "/dev/sdx", virtualName: "ephemeral22"), + new AmazonBlockDevice(deviceName: "/dev/sdy", virtualName: "ephemeral23"), + ] + + @Unroll + void "should return block devices for instance type"() { + + DeployDefaults deployDefaults = new DeployDefaults(unknownInstanceTypeBlockDevice: unknownInstanceTypeBlockDevice) + if (defaultVolumeType) { + deployDefaults.defaultBlockDeviceType = defaultVolumeType + } + BlockDeviceConfig blockDeviceConfig = new BlockDeviceConfig(deployDefaults) + + expect: + blockDevices == blockDeviceConfig.getBlockDevicesForInstanceType(instanceType) + + where: + unknownInstanceTypeBlockDevice | defaultVolumeType | instanceType || blockDevices + null | null | "wat" || null + defaultBlockDevice | null | "wat" || [defaultBlockDevice] + null | null | "t2.small" || [] + defaultBlockDevice | null | "t2.small" || [] + null | null | "m4.xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 80, volumeType: "standard")] + defaultBlockDevice | null | "m4.xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 80, volumeType: "standard")] + null | null | "m4.large" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 40, volumeType: "standard")] + null | null | "m4.16xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 120, volumeType: "standard")] + null | null | "c4.8xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 120, volumeType: "standard")] + null | null | "c5.9xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 120, volumeType: "standard")] + null | null | "m3.medium" || [new AmazonBlockDevice(deviceName: "/dev/sdb", virtualName: "ephemeral0")] + null | null | "i2.2xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", virtualName: "ephemeral0"), new AmazonBlockDevice(deviceName: "/dev/sdc", virtualName: "ephemeral1")] + null | null | "d2.8xlarge" || expectedD28xlargeBlockDevices + null | "gp2" | "m4.xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 80, volumeType: defaultVolumeType)] + null | "gp2" | "c4.8xlarge" || [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 120, volumeType: "gp2")] + } + + private Collection getExpectedBlockDevicesForEbsOnly(String volumeType) { + [ + new AmazonBlockDevice(deviceName: "/dev/sdb", size: 125, volumeType: volumeType), + new AmazonBlockDevice(deviceName: "/dev/sdc", size: 125, volumeType: volumeType), + ] + } + + @Unroll + def 'support for bursting is reported correctly for instance type'() { + when: + def result = InstanceTypeUtils.isBurstingSupported(instanceType) + + then: + result == expectedResult + + where: + instanceType | expectedResult + 't2.large' | true + 't3.small' | true + 't3a.micro' | true + 't4g.nano' | true + 'c3.large' | false + 'invalid' | false + null | false + '' | false + } + + @Unroll + def 'support for bursting is reported correctly for multiple instance types'() { + when: + def result = InstanceTypeUtils.isBurstingSupportedByAllTypes(instanceTypes as Set) + + then: + result == expectedResult + + where: + instanceTypes | expectedResult + ['t2.large', 't3.large'] | true + ['t3.small', 'm5.small'] | false + ['c3.large'] | false + ['t3.small', null] | false + ['t3.small', ''] | false + } + + @Unroll + def 'compatibility among ami virtualization #amiVirtualization, ami architecture #amiArchitecture and instance type is determined correctly'() { + given: + AmazonEC2 ec2 = Mock(AmazonEC2) + + when: + InstanceTypeUtils.validateCompatibilityWithAmi( + ec2, + new ResolvedAmiResult(amiId: 'ami-1', region: 'us-east-1', virtualizationType: amiVirtualization, architecture: amiArchitecture), + instanceTypes.toSet()) + + then: + 1 * ec2.describeInstanceTypes(_) >> new DescribeInstanceTypesResult(instanceTypes: [ + new InstanceTypeInfo( + instanceType: "test1.large", + processorInfo: new ProcessorInfo(supportedArchitectures: ["i386","x86_64"], sustainedClockSpeedInGhz: 2.8), + supportedVirtualizationTypes: ["hvm","paravirtual"], + ), + new InstanceTypeInfo( + instanceType: "test2.large", + processorInfo: new ProcessorInfo(supportedArchitectures: ["arm64", "x86_64"], sustainedClockSpeedInGhz: 2.8), + supportedVirtualizationTypes: ["hvm"], + )]) + + and: + noExceptionThrown() + + where: + amiVirtualization | amiArchitecture | instanceTypes + 'hvm' | 'x86_64' | ['test1.large', 'test2.large'] + 'paravirtual' | 'i386' | ['test1.large'] + 'hvm' | 'arm64' | ['test2.large'] + } + + @Unroll + def 'incompatible ami virtualization #amiVirtualization and instance family throws exception'() { + given: + def ec2 = Mock(AmazonEC2) + + when: + InstanceTypeUtils.validateCompatibilityWithAmi( + ec2, + new ResolvedAmiResult(amiId: 'ami-1', region: 'us-east-1', virtualizationType: amiVirtualization, architecture: amiArchitecture), + instanceTypes.toSet()) + + then: + 1 * ec2.describeInstanceTypes(_) >> new DescribeInstanceTypesResult(instanceTypes: [ + new InstanceTypeInfo( + instanceType: "test1.large", + processorInfo: new ProcessorInfo(supportedArchitectures: ["i386","x86_64"], sustainedClockSpeedInGhz: 2.8), + supportedVirtualizationTypes: ["hvm","paravirtual"], + ), + new InstanceTypeInfo( + instanceType: "test2.large", + processorInfo: new ProcessorInfo(supportedArchitectures: ["arm64", "x86_64"], sustainedClockSpeedInGhz: 2.8), + supportedVirtualizationTypes: ["hvm"], + )]) + + and: + thrown(IllegalArgumentException) + + where: + amiVirtualization | amiArchitecture | instanceTypes + 'paravirtual' | 'x86_64' | ['test1.large', 'test2.large'] + 'hvm' | 'i386' | ['test2.large'] + } + + @Unroll + def 'default ebs optimized is reported correctly for instance type'() { + expect: + InstanceTypeUtils.getDefaultEbsOptimizedFlag(instanceType) == expectedResult + + where: + instanceType | expectedResult + 'c4.small' | true + 'm4.large' | true + 'm5.large' | true + 't2.large' | false + 'c3.large' | false + 'invalid' | false + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/SimpleServerGroup.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/SimpleServerGroup.groovy deleted file mode 100644 index 12fccdf0ee9..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/SimpleServerGroup.groovy +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy - -import com.netflix.spinnaker.clouddriver.model.Instance -import com.netflix.spinnaker.clouddriver.model.ServerGroup - -class SimpleServerGroup implements ServerGroup { - String name - String type - String cloudProvider - String region - Long createdTime - Boolean disabled - Set zones - Set instances - Set loadBalancers - Set securityGroups - Map launchConfig - ServerGroup.InstanceCounts instanceCounts - ServerGroup.Capacity capacity - ServerGroup.ImageSummary imageSummary - ServerGroup.ImagesSummary imagesSummary - - @Override - Boolean isDisabled() { - return disabled - } -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AWSServerGroupNameResolverSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AWSServerGroupNameResolverSpec.groovy similarity index 97% rename from clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AWSServerGroupNameResolverSpec.groovy rename to clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AWSServerGroupNameResolverSpec.groovy index 76fdfc1ec2e..dde93fdc089 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AWSServerGroupNameResolverSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AWSServerGroupNameResolverSpec.groovy @@ -1,21 +1,21 @@ /* - * Copyright 2016 Netflix, Inc. + * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ - -package com.netflix.spinnaker.clouddriver.aws.deploy +package com.netflix.spinnaker.clouddriver.aws.deploy.asg import com.amazonaws.services.autoscaling.model.AutoScalingGroup import com.netflix.spinnaker.clouddriver.aws.services.AsgService diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgConfigHelperSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgConfigHelperSpec.groovy new file mode 100644 index 00000000000..13a347ca65f --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgConfigHelperSpec.groovy @@ -0,0 +1,353 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg + +import com.amazonaws.services.autoscaling.model.BlockDeviceMapping +import com.amazonaws.services.autoscaling.model.Ebs +import com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides +import com.amazonaws.services.ec2.model.LaunchTemplateBlockDeviceMapping +import com.amazonaws.services.ec2.model.LaunchTemplateEbsBlockDevice +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.config.AwsConfiguration +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService +import spock.lang.Specification +import spock.lang.Unroll + +import java.time.Clock +import java.time.Instant +import java.time.ZoneId + +class AsgConfigHelperSpec extends Specification { + def securityGroupServiceMock = Mock(SecurityGroupService) + def deployDefaults = new AwsConfiguration.DeployDefaults() + def asgConfig = AutoScalingWorker.AsgConfiguration.builder() + .application("fooTest") + .stack("stack").build() + + void setupSpec() { + // test code shouldn't assume it will run in less than one second, so let's control the clock + // we use setupSpec rather than setup because setup is called after a where block + AsgConfigHelper.clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()) + } + + void cleanupSpec() { + AsgConfigHelper.clock = Clock.systemDefaultZone() + } + + void "should return name correctly"() { + when: + AsgConfigHelper.clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()) + def actualName = AsgConfigHelper.createName(baseName, suffix) + + //ignore the end precision for tests. + then: + actualName.contains(expectedName.substring(0, expectedName.length() - 3)) + + where: + baseName | suffix || expectedName + "base" | "suffix" || "base-suffix" + "base" | null || "base-${AsgConfigHelper.createDefaultSuffix()}" + "base" | "" || "base-${AsgConfigHelper.createDefaultSuffix()}" + } + + void "should lookup security groups when provided by name"() { + given: + asgConfig.subnetType = null + asgConfig.classicLinkVpcSecurityGroups = null + asgConfig.securityGroups = securityGroupsInput + + and: + deployDefaults.addAppGroupToServerGroup = false + + when: + def actualAsgCfg = AsgConfigHelper.setAppSecurityGroups(asgConfig, securityGroupServiceMock, deployDefaults) + + then: + 1 * securityGroupServiceMock.resolveSecurityGroupIdsWithSubnetType(_, _) >> securityGroupsRet + 0 * _ + + and: + actualAsgCfg.securityGroups == expectedGroups + + where: + securityGroupsInput | securityGroupsRet || expectedGroups + ['foo'] | ['sg-12345'] || ['sg-12345'] + ['bar'] | ['sg-45678'] || ['sg-45678'] + ['foo', 'bar', 'sg-45678'] | ['sg-45678', 'sg-12345'] || ['sg-45678', 'sg-12345'] + ['bar', 'sg-45678'] | ['sg-45678'] || ['sg-45678'] + ['sg-12345'] | ['sg-12345'] || ['sg-12345'] + } + + void "should attach an existing application security group based on deploy defaults"() { + given: + asgConfig.application = "foo" + asgConfig.subnetType = null + asgConfig.classicLinkVpcSecurityGroups = null + asgConfig.securityGroups = securityGroupsInput + + and: + deployDefaults.addAppGroupToServerGroup = appGroupToServerGroup + deployDefaults.maxSecurityGroups = maxSgs + + when: + def actualAsgCfg = AsgConfigHelper.setAppSecurityGroups(asgConfig, securityGroupServiceMock, deployDefaults) + + then: + 1 * securityGroupServiceMock.resolveSecurityGroupIdsWithSubnetType(_, _) >> securityGroupsRet + getNamesCount * securityGroupServiceMock.getSecurityGroupNamesFromIds(_) >> securityGroupNamesFromIds + 0 * _ + actualAsgCfg.securityGroups == expectedGroups + + where: + securityGroupsInput | securityGroupsRet | appGroupToServerGroup | maxSgs | securityGroupNamesFromIds | getNamesCount || expectedGroups + ['sg-12345'] | ['sg-12345'] | true | 5 | ['foo': 'sg-12345'] | 1 || ['sg-12345'] + ['foo'] | ['sg-12345'] | true | 5 | ['foo': 'sg-12345'] | 1 || ['sg-12345'] + ['sg-12345'] | ['sg-12345'] | true | 1 | _ | 0 || ['sg-12345'] + ['sg-12345'] | ['sg-12345'] | false | 5 | _ | 0 || ['sg-12345'] + } + + void "should attach application security group using subnet purpose if no security groups provided or no existing app security group found"() { + given: + asgConfig.application = "foo" + asgConfig.subnetType = null + asgConfig.classicLinkVpcSecurityGroups = null + asgConfig.securityGroups = securityGroupsInput + + and: + deployDefaults.addAppGroupToServerGroup = true + deployDefaults.maxSecurityGroups = 5 + + when: + def actualAsgCfg = AsgConfigHelper.setAppSecurityGroups(asgConfig, securityGroupServiceMock, deployDefaults) + + then: + 1 * securityGroupServiceMock.resolveSecurityGroupIdsWithSubnetType(_, _) >> securityGroupsRet + 1 * securityGroupServiceMock.getSecurityGroupNamesFromIds(_) >> securityGroupNamesFromIds + 1 * securityGroupServiceMock.getSecurityGroupForApplication(_, _) >> 'sg-12345' + actualAsgCfg.securityGroups == expectedGroups + 0 * _ + + where: + securityGroupsInput | securityGroupsRet | securityGroupNamesFromIds || expectedGroups + null | [] | [:] || ['sg-12345'] + [] | [] | [:] || ['sg-12345'] + ['sg-45678'] | ['sg-45678'] | ['bar': 'sg-45678'] || ['sg-45678', 'sg-12345'] + ['bar'] | ['sg-45678'] | ['bar': 'sg-45678'] || ['sg-45678', 'sg-12345'] + } + + void "should create an application security group conditionally"() { + given: + asgConfig.application = "foo" + asgConfig.subnetType = null + asgConfig.classicLinkVpcSecurityGroups = null + asgConfig.securityGroups = securityGroupsInput + + and: + deployDefaults.addAppGroupToServerGroup = true + deployDefaults.maxSecurityGroups = 5 + + when: + def actualAsgCfg = AsgConfigHelper.setAppSecurityGroups(asgConfig, securityGroupServiceMock, deployDefaults) + + then: + 1 * securityGroupServiceMock.resolveSecurityGroupIdsWithSubnetType(_, _) >> securityGroupsRet + 1 * securityGroupServiceMock.getSecurityGroupNamesFromIds(_) >> securityGroupNamesFromIds + 1 * securityGroupServiceMock.getSecurityGroupForApplication(_, _) >> null + 1 * securityGroupServiceMock.createSecurityGroup(_, _) >> 'sg-000-new' + actualAsgCfg.securityGroups == expectedGroups + 0 * _ + + where: + securityGroupsInput | securityGroupsRet | securityGroupNamesFromIds || expectedGroups + null | [] | [:] || ['sg-000-new'] + [] | [] | [:] || ['sg-000-new'] + ['sg-45678'] | ['sg-45678'] | ['bar': 'sg-45678'] || ['sg-45678', 'sg-000-new'] + ['bar'] | ['sg-45678'] | ['bar': 'sg-45678'] || ['sg-45678', 'sg-000-new'] + } + + void "throws exception if asked to attach classic link security group without providing classic link VPC id"() { + given: + asgConfig.subnetType = null + asgConfig.classicLinkVpcSecurityGroups = ["sg-12345"] + asgConfig.classicLinkVpcId = classicLinkVpcId + + when: + AsgConfigHelper.setAppSecurityGroups(asgConfig, securityGroupServiceMock, deployDefaults) + + then: + 1 * securityGroupServiceMock.resolveSecurityGroupIdsWithSubnetType(_, _) >> ["sg-12345"] + 0 * _ + + and: + def ex = thrown(IllegalStateException) + ex.message == "Can't provide classic link security groups without classiclink vpc Id" + + where: + classicLinkVpcId << ['', null] + } + + void "should attach classic link security group if vpc is linked"() { + given: + asgConfig.subnetType = null + asgConfig.securityGroups = ["sg-00000"] + asgConfig.classicLinkVpcId = "vpc-123" + asgConfig.classicLinkVpcSecurityGroups = classicLinkVpcSecurityGroups + + when: + def actualAsgCfg = AsgConfigHelper.setAppSecurityGroups(asgConfig, securityGroupServiceMock, deployDefaults) + + then: + 1 * securityGroupServiceMock.resolveSecurityGroupIdsWithSubnetType(_,_) >> ["sg-12345"] + callCount * securityGroupServiceMock.resolveSecurityGroupIdsInVpc(_,_) >> classicLinkVpcSGsRet + 0 * _ + actualAsgCfg.classicLinkVpcSecurityGroups == expectedClassicLinkVpcSGs + + where: + classicLinkVpcSecurityGroups | classicLinkVpcSGsRet | callCount || expectedClassicLinkVpcSGs + null | _ | 0 || null + [] | _ | 0 || [] + ['sg-45678'] | ['sg-45678'] | 1 || ['sg-45678'] + ['bar'] | ['sg-45678'] | 1 || ['sg-45678'] + ["sg-12345"] | ['sg-45678'] | 1 || ['sg-45678'] + } + + @Unroll + void "should convert launch configuration's block device mappings to AmazonBlockDevices"() { + expect: + AsgConfigHelper.transformBlockDeviceMapping([sourceDevice]) == [targetDevice] + + where: + sourceDevice || targetDevice + new BlockDeviceMapping().withDeviceName("Device1").withVirtualName("virtualName") || new AmazonBlockDevice("Device1", "virtualName", null, null, null, null, null, null, null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withIops(500)) || new AmazonBlockDevice("Device1", null, null, null, null, 500, null, null, null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withThroughput(250)) || new AmazonBlockDevice("Device1", null, null, null, null, null, 250, null, null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withDeleteOnTermination(true)) || new AmazonBlockDevice("Device1", null, null, null, true, null, null, null, null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withVolumeSize(1024)) || new AmazonBlockDevice("Device1", null, 1024, null, null, null, null, null, null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withVolumeType("volumeType")) || new AmazonBlockDevice("Device1", null, null, "volumeType", null, null, null, null, null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withSnapshotId("snapshotId")) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, "snapshotId", null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs()) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null, null) + + // if snapshot is not provided, we should set encryption correctly + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withEncrypted(null)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null, null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withEncrypted(true)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null, true) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withEncrypted(false)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null, false) + + // if snapshot is provided, then we should use the snapshot's encryption value + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withSnapshotId("snap-123").withEncrypted(null)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, "snap-123", null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withSnapshotId("snap-123").withEncrypted(true)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, "snap-123", null) + new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withSnapshotId("snap-123").withEncrypted(false)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, "snap-123", null) + } + + @Unroll + void "should convert launch template block device mappings to AmazonBlockDevices"() { + expect: + AsgConfigHelper.transformLaunchTemplateBlockDeviceMapping([sourceDevice]) == [targetDevice] + + where: + sourceDevice || targetDevice + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withVirtualName("virtualName") || new AmazonBlockDevice("Device1", "virtualName", null, null, null, null, null, null, null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withIops(500)) || new AmazonBlockDevice("Device1", null, null, null, null, 500, null, null, null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withThroughput(250)) || new AmazonBlockDevice("Device1", null, null, null, null, null, 250, null, null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withDeleteOnTermination(true)) || new AmazonBlockDevice("Device1", null, null, null, true, null, null, null, null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withVolumeSize(1024)) || new AmazonBlockDevice("Device1", null, 1024, null, null, null, null, null, null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withVolumeType("volumeType")) || new AmazonBlockDevice("Device1", null, null, "volumeType", null, null, null, null, null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withSnapshotId("snapshotId")) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, "snapshotId", null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice()) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null, null) + + // if snapshot is not provided, we should set encryption correctly + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withEncrypted(null)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null, null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withEncrypted(true)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null, true) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withEncrypted(false)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null, false) + + // if snapshot is provided, then we should use the snapshot's encryption value + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withSnapshotId("snap-123").withEncrypted(null)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, "snap-123", null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withSnapshotId("snap-123").withEncrypted(true)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, "snap-123", null) + new LaunchTemplateBlockDeviceMapping().withDeviceName("Device1").withEbs(new LaunchTemplateEbsBlockDevice().withSnapshotId("snap-123").withEncrypted(false)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, "snap-123", null) + } + + @Unroll + void "should transform description overrides to launch template overrides and vice versa with correct priority"() { + + // description overrides to launch template overrides + expect: + AsgConfigHelper.getLaunchTemplateOverrides(descOverrides) == expectedLtOverrides + + // launch template overrides to description overrides + and: + AsgConfigHelper.getDescriptionOverrides(expectedLtOverrides) == descOverridesAgain + + where: + descOverrides << [ + // 1 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c4.large", weightedCapacity: "1", priority: 2)], + // 2 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 2), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.xlarge", weightedCapacity: "2", priority: 1)], + // 3 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c4.large", weightedCapacity: "1", priority: 1)], // same priority + // 4 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1"), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.xlarge", weightedCapacity: "2")], // no priority + // 5 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.2xlarge", weightedCapacity: "4"), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.xlarge", weightedCapacity: "2", priority: 2)], // mixed, some overrides have priority + ] + + expectedLtOverrides << [ + // 1 + [new LaunchTemplateOverrides().withInstanceType("c5.large").withWeightedCapacity("1"), + new LaunchTemplateOverrides().withInstanceType("c4.large").withWeightedCapacity("1")], + // 2 + [new LaunchTemplateOverrides().withInstanceType("c5.xlarge").withWeightedCapacity("2"), + new LaunchTemplateOverrides().withInstanceType("c5.large").withWeightedCapacity("1")], + // 3 + [new LaunchTemplateOverrides().withInstanceType("c5.large").withWeightedCapacity("1"), + new LaunchTemplateOverrides().withInstanceType("c4.large").withWeightedCapacity("1")], + // 4 + [new LaunchTemplateOverrides().withInstanceType("c5.large").withWeightedCapacity("1"), + new LaunchTemplateOverrides().withInstanceType("c5.xlarge").withWeightedCapacity("2")], + // 5 + [new LaunchTemplateOverrides().withInstanceType("c5.large").withWeightedCapacity("1"), + new LaunchTemplateOverrides().withInstanceType("c5.xlarge").withWeightedCapacity("2"), + new LaunchTemplateOverrides().withInstanceType("c5.2xlarge").withWeightedCapacity("4")], // no priority = last priority + ] + + descOverridesAgain << [ + // 1 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c4.large", weightedCapacity: "1", priority: 2)], + // 2 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.xlarge", weightedCapacity: "2", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 2)], + // 3 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c4.large", weightedCapacity: "1", priority: 2)], // same priority became sequential + // 4 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.xlarge", weightedCapacity: "2", priority: 2)], // no priority originally, now sequential + // 5 + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.large", weightedCapacity: "1", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.xlarge", weightedCapacity: "2", priority: 2), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c5.2xlarge", weightedCapacity: "4", priority: 3)], + ] + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgLifecycleHookWorkerSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgLifecycleHookWorkerSpec.groovy similarity index 97% rename from clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgLifecycleHookWorkerSpec.groovy rename to clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgLifecycleHookWorkerSpec.groovy index 6c0c771aab3..9078a2283df 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgLifecycleHookWorkerSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgLifecycleHookWorkerSpec.groovy @@ -1,19 +1,20 @@ /* - * Copyright 2016 Netflix, Inc. + * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package com.netflix.spinnaker.clouddriver.aws.deploy +package com.netflix.spinnaker.clouddriver.aws.deploy.asg import com.amazonaws.services.autoscaling.AmazonAutoScaling import com.amazonaws.services.autoscaling.model.PutLifecycleHookRequest diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgReferenceCopierSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgReferenceCopierSpec.groovy similarity index 97% rename from clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgReferenceCopierSpec.groovy rename to clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgReferenceCopierSpec.groovy index 172155859e5..d8f230ceb60 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/AsgReferenceCopierSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AsgReferenceCopierSpec.groovy @@ -1,19 +1,20 @@ /* - * Copyright 2014 Netflix, Inc. + * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package com.netflix.spinnaker.clouddriver.aws.deploy +package com.netflix.spinnaker.clouddriver.aws.deploy.asg import com.amazonaws.services.autoscaling.AmazonAutoScaling import com.amazonaws.services.autoscaling.model.DescribeScheduledActionsRequest diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AutoScalingWorkerUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AutoScalingWorkerUnitSpec.groovy new file mode 100644 index 00000000000..01a8727e6d3 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/AutoScalingWorkerUnitSpec.groovy @@ -0,0 +1,183 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg + +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders.AsgWithLaunchConfigurationBuilder +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders.AsgWithLaunchTemplateBuilder +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders.AsgWithMixedInstancesPolicyBuilder +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import org.springframework.beans.factory.annotation.Autowired +import spock.lang.Specification +import spock.lang.Unroll + +class AutoScalingWorkerUnitSpec extends Specification { + + @Autowired + TaskRepository taskRepository + + def awsServerGroupNameResolver = Mock(AWSServerGroupNameResolver) + def regionScopedProvider = Stub(RegionScopedProviderFactory.RegionScopedProvider) { + getAWSServerGroupNameResolver() >> awsServerGroupNameResolver + } + def launchTemplateRollOutConfig = Mock(LaunchTemplateRollOutConfig) + + def userDataOverride = new UserDataOverride() + def credential = TestCredential.named('foo') + def task = new DefaultTask("task") + def taskPhase = "AWS_DEPLOY" + + def setup() { + TaskRepository.threadLocalTask.set(task) + } + + @Unroll + void "deploy workflow creates asg backed by launch config"() { + setup: + def autoScalingWorker = new AutoScalingWorker(regionScopedProvider, launchTemplateRollOutConfig) + def asgConfig = AutoScalingWorker.AsgConfiguration.builder() + .application("myasg") + .stack("stack") + .freeFormDetails("details") + .credentials(credential) + .sequence(sequence) + .userDataOverride(userDataOverride) + .ignoreSequence(ignoreSequence) + .build() + and: + def asgBuilder = Mock(AsgWithLaunchConfigurationBuilder) + regionScopedProvider.getAsgBuilderForLaunchConfiguration() >> asgBuilder + + when: + autoScalingWorker.deploy(asgConfig) + + then: + if (sequence) { + awsServerGroupNameResolver.generateServerGroupName('myasg', 'stack', 'details', sequence, ignoreSequence) >> expectedAsgName + } else { + awsServerGroupNameResolver.resolveNextServerGroupName('myasg', 'stack', 'details', ignoreSequence) >> expectedAsgName + } + 0 * awsServerGroupNameResolver._ + + and: + 1 * asgBuilder.build(task, taskPhase, expectedAsgName, asgConfig) + 0 * asgBuilder._ + + where: + sequence || expectedAsgName || ignoreSequence + null || "myasg-stack-details-v000" || true + null || "myasg-stack-details" || false + 0 || "myasg-stack-details-v000" || false + 1 || "myasg-stack-details-v001" || false + 11 || "myasg-stack-details-v011" || false + 111 || "myasg-stack-details-v111" || false + } + + @Unroll + void "deploy workflow creates asg backed by launch template if enabled"() { + setup: + def autoScalingWorker = new AutoScalingWorker(regionScopedProvider, launchTemplateRollOutConfig) + def asgConfig = AutoScalingWorker.AsgConfiguration.builder() + .application("myasg") + .stack("stack") + .region("us-east-1") + .freeFormDetails("details") + .credentials(credential) + .sequence(sequence) + .setLaunchTemplate(true) + .ignoreSequence(ignoreSequence) + .build() + + and: + def asgBuilder = Mock(AsgWithLaunchTemplateBuilder) + regionScopedProvider.getAsgBuilderForLaunchTemplate() >> asgBuilder + + when: + autoScalingWorker.deploy(asgConfig) + + then: + 1 * launchTemplateRollOutConfig.shouldUseLaunchTemplateForReq("myasg", credential, "us-east-1") >> true + + and: + if (sequence) { + awsServerGroupNameResolver.generateServerGroupName('myasg', 'stack', 'details', sequence, ignoreSequence) >> expectedAsgName + } else { + awsServerGroupNameResolver.resolveNextServerGroupName('myasg', 'stack', 'details', ignoreSequence) >> expectedAsgName + } + 0 * awsServerGroupNameResolver._ + + and: + 1 * asgBuilder.build(task, taskPhase, expectedAsgName, asgConfig) + 0 * asgBuilder._ + + where: + sequence || expectedAsgName || ignoreSequence + null || "myasg-stack-details-v000" || true + null || "myasg-stack-details" || false + 0 || "myasg-stack-details-v000" || false + 1 || "myasg-stack-details-v001" || false + 11 || "myasg-stack-details-v011" || false + 111 || "myasg-stack-details-v111" || false + } + + @Unroll + void "deploy workflow creates asg backed by mixed instances policy if certain fields are set"() { + setup: + def autoScalingWorker = new AutoScalingWorker(regionScopedProvider, launchTemplateRollOutConfig) + def asgConfig = AutoScalingWorker.AsgConfiguration.builder() + .application("myasg") + .stack("stack") + .region("us-east-1") + .freeFormDetails("details") + .credentials(credential) + .sequence(1) + .setLaunchTemplate(true) + .ignoreSequence(false) + .build() + asgConfig."$mipFieldName" = mipFieldValue + + and: + def asgBuilder = Mock(AsgWithMixedInstancesPolicyBuilder) + regionScopedProvider.getAsgBuilderForMixedInstancesPolicy() >> asgBuilder + + when: + autoScalingWorker.deploy(asgConfig) + + then: + 1 * launchTemplateRollOutConfig.shouldUseLaunchTemplateForReq('myasg', credential, 'us-east-1') >> true + + and: + awsServerGroupNameResolver.generateServerGroupName('myasg', 'stack', 'details', 1, false) >> "myasg-stack-details-v001" + + and: + 1 * asgBuilder.build(task, taskPhase, "myasg-stack-details-v001", asgConfig) + 0 * asgBuilder._ + + where: + mipFieldName | mipFieldValue + "onDemandBaseCapacity" | 1 + "onDemandPercentageAboveBaseCapacity" | 50 + "spotAllocationStrategy" |"lowest-price" + "spotAllocationStrategy" |"capacity-optimized" + "spotInstancePools" | 3 + "launchTemplateOverridesForInstanceType" |[new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "t.test", weightedCapacity: 2)] + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/DefaultLaunchConfigurationBuilderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/DefaultLaunchConfigurationBuilderSpec.groovy new file mode 100644 index 00000000000..2787593e4af --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/DefaultLaunchConfigurationBuilderSpec.groovy @@ -0,0 +1,512 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg + +import com.amazonaws.services.autoscaling.AmazonAutoScaling +import com.amazonaws.services.autoscaling.model.BlockDeviceMapping +import com.amazonaws.services.autoscaling.model.CreateLaunchConfigurationRequest +import com.amazonaws.services.autoscaling.model.Ebs +import com.amazonaws.services.autoscaling.model.LaunchConfiguration +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.DefaultUserDataTokenizer +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProviderAggregator +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataProvider +import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.config.AwsConfiguration +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.clouddriver.aws.services.AsgService +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService +import spock.lang.Specification +import spock.lang.Subject + +class DefaultLaunchConfigurationBuilderSpec extends Specification { + def autoScaling = Mock(AmazonAutoScaling) + def asgService = Mock(AsgService) + def securityGroupService = Mock(SecurityGroupService) + def userDataOverride = new UserDataOverride() + def userDataProvider = Stub(UserDataProvider) { + getUserData(_) >> 'userdata' + } + def userDataTokenizer = new DefaultUserDataTokenizer() + UserDataProviderAggregator userDataProviderAggregator = new UserDataProviderAggregator([userDataProvider], [userDataTokenizer]) + def deployDefaults = new AwsConfiguration.DeployDefaults() + + @Subject + DefaultLaunchConfigurationBuilder builder = new DefaultLaunchConfigurationBuilder(autoScaling, asgService, + securityGroupService, userDataProviderAggregator, null, deployDefaults) + + void "should lookup security groups when provided by name"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> ['sg-feef000', 'sg-named'] + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() + } + 0 * _ + + where: + securityGroups | expectedResolve | expectedGroups + ['named', 'sg-feef000'] | ['named'] | ['sg-feef000', 'sg-named'] + + application = 'foo' + subnetType = null + account = 'prod' + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(securityGroups) + .build() + } + + void "should attach an existing application security group if no security groups provided"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> [] + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] + 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> application + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = [] + expectedGroups = [application] + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(securityGroups) + .build() + } + + void "should add user data to launchconfig with combination from user data provider and description"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> [] + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] + 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> application + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.getUserData() == expectedUserData + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = [] + expectedGroups = [application] + expectedUserData = 'dXNlcmRhdGEKZXhwb3J0IFVTRVJEQVRBPTEK' + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .base64UserData('ZXhwb3J0IFVTRVJEQVRBPTEK') + .securityGroups(securityGroups) + .build() + } + + void "should only use base64 user data launchconfig when override is set to true"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, new UserDataOverride(enabled: true)) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> [] + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] + 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> application + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.getUserData() == expectedUserData + } + 0 * _ + + where: + override = true + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = [] + expectedGroups = [application] + expectedUserData = 'ZXhwb3J0IFVTRVJEQVRBPTEK' + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .accountType('prod') + .environment('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .base64UserData('ZXhwb3J0IFVTRVJEQVRBPTEK') + .securityGroups(securityGroups) + .build() + } + + void "should add user data to launchconfig with user data provider if description userdata ommitted"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> [] + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] + 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> application + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.getUserData() == expectedUserData + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = [] + expectedGroups = [application] + expectedUserData = 'dXNlcmRhdGEK' + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(securityGroups) + .build() + } + + void "should create an application security group if none exists and no security groups provided"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> [] + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] + 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> null + 1 * securityGroupService.createSecurityGroup(application, subnetType) >> "sg-$application" + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = [] + expectedGroups = ["sg-$application"] + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(securityGroups) + .build() + } + + void "should attach classic link security group if vpc is linked"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> ["sg-123", "sg-456"] + 1 * securityGroupService.resolveSecurityGroupIdsInVpc(_, _) >> ["sg-123", "sg-456"] + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.classicLinkVPCId == "vpc-123" + assert req.classicLinkVPCSecurityGroups == ["sg-123", "sg-456"] + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + expectedGroups = [application] + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(["sg-000"]) + .classicLinkVpcId("vpc-123") + .classicLinkVpcSecurityGroups(["sg-123", "sg-456"]) + .build() + } + + void "should try to look up classic link security group if vpc is linked"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> ["sg-123"] + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.classicLinkVPCId == "vpc-123" + assert req.classicLinkVPCSecurityGroups == [] + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + expectedGroups = [application] + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(["sg-000"]) + .classicLinkVpcId("vpc-123") + .build() + } + + void "if existing requested group contains app name don't lookup/create app group"() { + given: + deployDefaults.addAppGroupToServerGroup = true + + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> securityGroups + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [(appGroup): securityGroups[0]] + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = ["sg-12345"] + appGroup = "sg-$application" + expectedGroups = securityGroups + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(securityGroups) + .build() + } + + void "if creating an app security group would exceed the maximum number of security groups, use the provided groups"() { + given: + deployDefaults.addAppGroupToServerGroup = true + + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> securityGroups + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = ["sg-12345", "sg-23456", "sg-34567", "sg-45678", "sg-56789"] + sgResult = securityGroups.collectEntries { [(it): it] } + expectedGroups = securityGroups + appGroup = "sg-$application" + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(securityGroups) + .build() + } + + void "should add existing app security group if configured to do so"() { + given: + deployDefaults.addAppGroupToServerGroup = true + + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> securityGroups + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [notappgroup: securityGroups[0]] + 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> appGroup + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = ["sg-12345"] + appGroup = "sg-$application" + expectedGroups = securityGroups + appGroup + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(securityGroups) + .build() + } + + void "should create app security group if addAppGroupToServerGroup and no app group present"() { + given: + deployDefaults.addAppGroupToServerGroup = true + + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> securityGroups + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] + 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> null + 1 * securityGroupService.createSecurityGroup(application, subnetType) >> appGroup + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.securityGroups.toList().sort() == expectedGroups.toList().sort() + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = ["sg-12345"] + appGroup = "sg-$application" + expectedGroups = securityGroups + appGroup + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(securityGroups) + .build() + } + + void "should look up and attach classic link security group if vpc is linked"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> ["sg-123"] + 1 * securityGroupService.resolveSecurityGroupIdsInVpc(_, _) >> ["sg-123"] + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.classicLinkVPCId == "vpc-123" + assert req.classicLinkVPCSecurityGroups == ["sg-123"] + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + expectedGroups = [application] + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .securityGroups(["sg-000"]) + .classicLinkVpcId("vpc-123") + .classicLinkVpcSecurityGroups(["nf-classiclink"]) + .build() + } + + void "handles block device mappings"() { + when: + builder.buildLaunchConfiguration(application, subnetType, settings, null, userDataOverride) + + then: + 1 * securityGroupService.resolveSecurityGroupIdsWithSubnetType(_, _) >> [] + 1 * securityGroupService.getSecurityGroupNamesFromIds(_) >> [:] + 1 * securityGroupService.getSecurityGroupForApplication(application, subnetType) >> "sg-$application" + 1 * autoScaling.createLaunchConfiguration(_ as CreateLaunchConfigurationRequest) >> { CreateLaunchConfigurationRequest req -> + assert req.blockDeviceMappings.size() == 2 + req.blockDeviceMappings.first().with { + assert deviceName == "/dev/sdb" + assert virtualName == 'ephemeral1' + assert ebs == null + } + req.blockDeviceMappings.last().with { + assert deviceName == '/dev/sdc' + assert virtualName == null + assert ebs.snapshotId == 's-69' + assert ebs.volumeType == 'io1' + assert ebs.deleteOnTermination == false + assert ebs.iops == 100 + assert ebs.throughput == 250 + assert ebs.volumeSize == 125 + assert ebs.encrypted == true + } + } + 0 * _ + + where: + application = 'foo' + subnetType = null + account = 'prod' + securityGroups = [] + expectedGroups = ["sg-$application"] + settings = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account('prod') + .region('us-east-1') + .baseName('fooapp-v001') + .suffix('20150515') + .blockDevices([ + new AmazonBlockDevice(deviceName: '/dev/sdb', virtualName: 'ephemeral1'), + new AmazonBlockDevice(deviceName: "/dev/sdc", size: 125, iops: 100, throughput: 250, deleteOnTermination: false, volumeType: 'io1', snapshotId: 's-69', encrypted: true)]) + .securityGroups(securityGroups) + .build() + } + + void "buildSettingsFromLaunchConfiguration handles ebs throughput"() { + given: + AccountCredentials account = Mock(AccountCredentials) + String launchConfigurationName = 'source-launch-config' + LaunchConfiguration launchConfiguration = new LaunchConfiguration() + launchConfiguration.setLaunchConfigurationName(launchConfigurationName) + int throughput = 250 + Ebs ebs = new Ebs() + ebs.setThroughput(throughput) + BlockDeviceMapping blockDeviceMapping = new BlockDeviceMapping() + blockDeviceMapping.setEbs(ebs) + launchConfiguration.setBlockDeviceMappings([blockDeviceMapping]) + launchConfiguration.setEbsOptimized(false) // arbitrary, must not be null + when: + LaunchConfigurationBuilder.LaunchConfigurationSettings result = builder.buildSettingsFromLaunchConfiguration(account,'region', launchConfigurationName) + + then: + 1 * asgService.getLaunchConfiguration(launchConfigurationName) >> launchConfiguration + 1 * account.name >> 'account-name' + 1 * account.environment >> 'environment' + 1 * account.accountType >> 'account-type' + result.blockDevices.size() == 1 + result.blockDevices[0].throughput == throughput + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchTemplateRollOutConfigSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchTemplateRollOutConfigSpec.groovy new file mode 100644 index 00000000000..cd5a1a7de81 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/LaunchTemplateRollOutConfigSpec.groovy @@ -0,0 +1,95 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg + +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import spock.lang.Specification +import spock.lang.Unroll + +class LaunchTemplateRollOutConfigSpec extends Specification { + def dynamicConfigService = Mock(DynamicConfigService) + def launchTemplateRollOutConfig = new LaunchTemplateRollOutConfig(dynamicConfigService) + + void 'isIpv6EnabledForEnv returns expected results'() { + when: + def res = launchTemplateRollOutConfig.isIpv6EnabledForEnv("foo") + + then: + 1 * dynamicConfigService.isEnabled('aws.features.launch-templates.ipv6.foo', false) >> enabled + + and: + res == enabled + + where: + enabled << [true, false] + } + + @Unroll + void 'shouldUseLaunchTemplateForReq returns expected resultsrr'() { + given: + def excludedApp = "excludedApp" + def excludedAcc = "excludedAcc" + def allowAllApps = false + + def allowedApp = "myasg" + def allowedAcc = "foo" + def allowedReg = "us-east-1" + + when: + def res = launchTemplateRollOutConfig.shouldUseLaunchTemplateForReq(app, TestCredential.named(account), region) + + then: + callCounts[0] * dynamicConfigService.isEnabled('aws.features.launch-templates', false) >> ltEnabled + + callCounts[1] * dynamicConfigService.getConfig(String.class,"aws.features.launch-templates.excluded-applications", "") >> excludedApp + callCounts[2] * dynamicConfigService.getConfig(String.class, "aws.features.launch-templates.excluded-accounts", "") >> excludedAcc + callCounts[3] * dynamicConfigService.isEnabled('aws.features.launch-templates.all-applications', false) >> allowAllApps + callCounts[4] * dynamicConfigService.getConfig(String.class,"aws.features.launch-templates.allowed-applications", "") >> allowedApp + ":" + allowedAcc + ":" + allowedReg + callCounts[5] * dynamicConfigService.getConfig(String.class, "aws.features.launch-templates.allowed-accounts-regions", "") >> allowedAcc + ":" + allowedReg + callCounts[6] * dynamicConfigService.getConfig(String.class, "aws.features.launch-templates.allowed-accounts", "") >> allowedAcc + 0 * dynamicConfigService._ + + and: + res == result + + where: + ltEnabled | app | region | account | callCounts || result + false | "myasg" | "us-east-1" | "foo" | [1,0,0,0,0,0,0] || false + true |"excludedApp"| "us-east-1" | "foo" | [1,1,0,0,0,0,0] || false + true | "myasg" | "us-east-1" |"excludedAcc"| [1,1,1,0,0,0,0] || false + true | "myasg" | "us-east-1" | "foo" | [1,1,1,1,1,0,0] || true + true | "asg" | "us-east-1" | "foo" | [1,1,1,1,1,1,0] || true + true | "asg" | "us-west-1" | "foo" | [1,1,1,1,1,1,1] || true + true | "asg" | "us-east-1" | "acc" | [1,1,1,1,1,1,1] || false + } + + @Unroll + void "should check if current app, account and region match launch template flag"() { + when: + def result = launchTemplateRollOutConfig.matchesAppAccountAndRegion(application, accountName, region, applicationAccountRegions) + + then: + result == matches + + where: + applicationAccountRegions | application | accountName | region || matches + "foo:test:us-east-1" | "foo" | "test" | "us-east-1" || true + "foo:test:us-east-1,us-west-2" | "foo" | "test" | "eu-west-1" || false + "foo:prod:us-east-1" | "foo" | "test" | "us-east-1" || false + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/SimpleServerGroup.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/SimpleServerGroup.groovy new file mode 100644 index 00000000000..81d62bf46d5 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/SimpleServerGroup.groovy @@ -0,0 +1,43 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg + +import com.netflix.spinnaker.clouddriver.model.Instance +import com.netflix.spinnaker.clouddriver.model.ServerGroup + +class SimpleServerGroup implements ServerGroup { + String name + String type + String cloudProvider + String region + Long createdTime + Boolean disabled + Set zones + Set instances + Set loadBalancers + Set securityGroups + Map launchConfig + ServerGroup.InstanceCounts instanceCounts + ServerGroup.Capacity capacity + ServerGroup.ImageSummary imageSummary + ServerGroup.ImagesSummary imagesSummary + + Boolean isDisabled() { + disabled + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchConfigurationBuilderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchConfigurationBuilderSpec.groovy new file mode 100755 index 00000000000..ed5bfd16373 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchConfigurationBuilderSpec.groovy @@ -0,0 +1,614 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders + +import com.amazonaws.services.autoscaling.AmazonAutoScaling +import com.amazonaws.services.autoscaling.model.AlreadyExistsException +import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.DescribeSubnetsResult +import com.amazonaws.services.ec2.model.Subnet +import com.amazonaws.services.ec2.model.Tag +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker.AsgConfiguration +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchConfigurationBuilder +import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride +import com.netflix.spinnaker.clouddriver.data.task.Task +import org.apache.commons.lang3.StringUtils +import spock.lang.Specification +import spock.lang.Unroll + +import java.time.Instant +import java.time.temporal.ChronoUnit + +import static com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook.DefaultResult.CONTINUE +import static com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook.Transition.EC2InstanceLaunching + +class AsgWithLaunchConfigurationBuilderSpec extends Specification { + + def lcBuilder = Mock(LaunchConfigurationBuilder) + def autoScaling = Mock(AmazonAutoScaling) + def amazonEC2 = Mock(AmazonEC2) + def asgLifecycleHookWorker = Mock(AsgLifecycleHookWorker) + + def credential = TestCredential.named('foo') + def userDataOverride = Mock(UserDataOverride) + def task = Mock(Task) + def taskPhase = "AWS_DEPLOY_TEST" + def asgConfig, asgName, launchConfigName + + def setup() { + asgConfig = AsgConfiguration.builder() + .credentials(credential) + .application("myasg") + .region("us-east-1") + .minInstances(1) + .maxInstances(3) + .desiredInstances(2) + .instanceType("t1.test") + .build() + asgName = "myasg-v000" + launchConfigName = "$asgName-20210119" + } + + private LaunchConfigurationBuilder.LaunchConfigurationSettings getLcSettings(String asgName, AsgConfiguration cfg) { + return LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(cfg.getCredentials().getName()) + .environment(cfg.getCredentials().getEnvironment()) + .accountType(cfg.getCredentials().getAccountType()) + .region(cfg.getRegion()) + .baseName(asgName) + .suffix(null) + .ami(cfg.getAmi()) + .iamRole(cfg.getIamRole()) + .classicLinkVpcId(cfg.getClassicLinkVpcId()) + .classicLinkVpcSecurityGroups(cfg.getClassicLinkVpcSecurityGroups()) + .instanceType(cfg.getInstanceType()) + .keyPair(cfg.getKeyPair()) + .base64UserData(cfg.getBase64UserData()) + .associatePublicIpAddress(cfg.getAssociatePublicIpAddress()) + .kernelId(cfg.getKernelId()) + .ramdiskId(cfg.getRamdiskId()) + .ebsOptimized(cfg.getEbsOptimized() != null ? cfg.getEbsOptimized() : false) + .spotMaxPrice(cfg.getSpotMaxPrice()) + .instanceMonitoring(cfg.getInstanceMonitoring() != null ? cfg.getInstanceMonitoring() : false) + .blockDevices(cfg.getBlockDevices()) + .securityGroups(cfg.getSecurityGroups()) + .build() + } + + private DescribeSubnetsResult getDescribeSubnetsResult() { + return new DescribeSubnetsResult(subnets: [ + new Subnet(subnetId: 'subnetId1', availabilityZone: 'us-east-1a', tags: [new Tag(key: 'immutable_metadata', value: '{"purpose": "internal", "target": "ec2" }')]), + new Subnet(subnetId: 'subnetId2', availabilityZone: 'us-west-2a'), + new Subnet(subnetId: 'subnetId3', availabilityZone: 'us-west-2a'), + ]) + } + + void "should build ASG request with launch config correctly"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetType = "internal" + asgConfig.legacyUdf = false + asgConfig.spotMaxPrice = 0.5 + asgConfig.classicLoadBalancers = ["one", "two"] + asgConfig.targetGroupArns = ["tg1", "tg2"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "ec2" + asgConfig.terminationPolicies = ["Default", "OldestInstance"] + asgConfig.userDataOverride = userDataOverride + asgConfig.ebsOptimized = true + asgConfig.securityGroups = ["mysg"] + def settings = getLcSettings(asgName, asgConfig) + + when: + def request = asgWithLcBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * lcBuilder.buildLaunchConfiguration("myasg", "internal", settings, false, userDataOverride) >> launchConfigName + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + + and: + request.getLaunchConfigurationName() == launchConfigName + request.getAutoScalingGroupName() == asgName + request.getMinSize() == 1 + request.getMaxSize() == 3 + request.getDesiredCapacity() == 2 + request.getLoadBalancerNames() == ["one", "two"] + request.getTargetGroupARNs() == ["tg1", "tg2"] + request.getDefaultCooldown() == 5 + request.getHealthCheckGracePeriod() == 5 + request.getHealthCheckType() == "ec2" + request.getTerminationPolicies() == ["Default", "OldestInstance"] + } + + void "should build ASG request with tags correctly"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.tags = [foo: "bar"] + def settings = getLcSettings(asgName, asgConfig) + + when: + def request = asgWithLcBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * lcBuilder.buildLaunchConfiguration("myasg", null, settings, null, null) >> launchConfigName + request.getLaunchConfigurationName() == launchConfigName + def tag = request.getTags()[0] + tag.getKey() == "foo" + tag.getValue() == "bar" + tag.getPropagateAtLaunch() == true + } + + @Unroll + void "should favor subnetIds over AZ while building ASG request"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-west-2a"] + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = subnetType + def settings = getLcSettings(asgName, asgConfig) + + when: + def request = asgWithLcBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * lcBuilder.buildLaunchConfiguration("myasg", subnetType, settings, null, null) >> launchConfigName + (StringUtils.isEmpty(subnetType) ? 0 : 2) * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch configuration $launchConfigName") + 1 * task.updateStatus(taskPhase, deployMsg) + 0 * _ + + and: + request.getLaunchConfigurationName() == launchConfigName + request.getVPCZoneIdentifier() == subnetIdsForAsg + subnetIdsForAsg == null + ? request.getAvailabilityZones() == ["us-west-2a"] + : request.getAvailabilityZones() == [] + + where: + subnetType | subnetIds | subnetIdsForAsg || deployMsg + "internal" |["subnetId3"] | "subnetId3" || " > Deploying to subnetIds: subnetId3" + "internal" |["subnetId2"] | "subnetId2" || " > Deploying to subnetIds: subnetId2" + null | null | null || "Deploying to availabilityZones: [us-west-2a]" + null | [] | null || "Deploying to availabilityZones: [us-west-2a]" + } + + @Unroll + void "should filter and validate subnets by AZ while building ASG request"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = "internal" + def settings = getLcSettings(asgName, asgConfig) + + when: + def request = asgWithLcBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * lcBuilder.buildLaunchConfiguration("myasg", "internal", settings, null, null) >> launchConfigName + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch configuration $launchConfigName") + 1 * task.updateStatus(taskPhase, " > Deploying to subnetIds: subnetId1") + 0 * _ + + and: + request.getLaunchConfigurationName() == launchConfigName + request.getVPCZoneIdentifier() == subnetIdsForAsg + request.getAvailabilityZones() == [] + + where: + subnetIds | subnetIdsForAsg + [] | "subnetId1" // filter subnets by AZ + ["subnetId1"] | "subnetId1" // validate subnets by AZ + } + + @Unroll + void "throws exception when invalid subnet IDs are specified while building ASG request"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = availabilityZones + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = subnetType + def settings = getLcSettings(asgName, asgConfig) + + when: + asgWithLcBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * lcBuilder.buildLaunchConfiguration("myasg", subnetType, settings, null, null) >> launchConfigName + (StringUtils.isEmpty(subnetType) ? 0 : 1) * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch configuration $launchConfigName") + 0 * _ + + and: + def ex = thrown(IllegalStateException) + ex.getMessage() == String.format( + "One or more subnet ids are not valid (invalidSubnetIds: %s, availabilityZones: %s)", + invalidSubnetIds, String.join(",", availabilityZones)) + + where: + subnetType | subnetIds | availabilityZones | invalidSubnetIds + null | ["subnetId1"] | ["us-west-2a"] | "subnetId1" + "" | ["subnetId2"] | ["us-east-1a"] | "subnetId2" + "internal" | ["subnetId3"] | ["us-east-1a"] | "subnetId3" + "internal" | ["invalidSubnetId"] | [] | "invalidSubnetId" + "internal" | ["subnetId1", "subnetId2"] | ["us-east-1a"] | "subnetId2" + "internal" | ["subnetId1", "subnetId2", "subnetId3"] | ["us-west-2a"] | "subnetId1" + } + + @Unroll + void "should filter subnets by subnet purpose conditionally while building ASG request"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = [] + asgConfig.subnetType = "internal" + asgConfig.subnetIds = subnetIds + def settings = getLcSettings(asgName, asgConfig) + + when: + def request = asgWithLcBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * lcBuilder.buildLaunchConfiguration("myasg", "internal", settings, null, null) >> launchConfigName + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch configuration $launchConfigName") + 1 * task.updateStatus(taskPhase, deployMsg) + 0 * _ + + and: + request.getLaunchConfigurationName() == launchConfigName + request.getVPCZoneIdentifier() == subnetIdsForAsg + request.getAvailabilityZones() == [] + + where: + subnetIds | subnetIdsForAsg || deployMsg + [] | "subnetId1" || " > Deploying to subnetIds: subnetId1" // filtered by subnet purpose tags + ["subnetId2"] | "subnetId2" || " > Deploying to subnetIds: subnetId2" // not filtered by subnet purpose tags + ["subnetId1","subnetId2"] | "subnetId1,subnetId2" || " > Deploying to subnetIds: subnetId1,subnetId2" // not filtered by subnet purpose tags + } + + @Unroll + void "throws exception when subnetIds are not specified and no suitable subnet found for subnet purpose while building ASG request"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetIds = [] + asgConfig.availabilityZones = availabilityZones + asgConfig.subnetType = subnetType + def settings = getLcSettings(asgName, asgConfig) + + when: + asgWithLcBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * lcBuilder.buildLaunchConfiguration("myasg", subnetType, settings, null, null) >> launchConfigName + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch configuration $launchConfigName") + 0 * _ + + and: + def ex = thrown(RuntimeException) + ex.getMessage() == "No suitable subnet was found for internal subnet purpose '$subnetType'!" + + where: + subnetType | availabilityZones + "internal" | ["eu-central-1a"] + "internal" | ["us-west-2a"] + "unknown" | ["us-west-1b"] + } + + void "should build ASG with launch config correctly"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetType = "internal" + asgConfig.legacyUdf = false + asgConfig.spotMaxPrice = 0.5 + asgConfig.classicLoadBalancers = ["one", "two"] + asgConfig.targetGroupArns = ["tg1", "tg2"] + asgConfig.userDataOverride = userDataOverride + asgConfig.securityGroups = ["mysg"] + def settings = getLcSettings(asgName, asgConfig) + + when: + def asgNameRes = asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + asgNameRes == asgName + 1 * lcBuilder.buildLaunchConfiguration("myasg", "internal", settings, false, userDataOverride) >> launchConfigName + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * autoScaling.createAutoScalingGroup(_) + 1 * autoScaling.updateAutoScalingGroup(_) + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch configuration $launchConfigName") + 1 * task.updateStatus(taskPhase, ' > Deploying to subnetIds: subnetId1') + 1 * task.updateStatus(taskPhase, 'Setting size of myasg-v000 in foo/us-east-1 to [min=1, max=3, desired=2]') + 1 * task.updateStatus(taskPhase, "Deployed EC2 server group named $asgName") + 0 * _ + } + + @Unroll + void "does not enable metrics collection when enabledMetrics are absent or instanceMonitoring is falsy"() { + setup: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.enabledMetrics = enabledMetrics + asgConfig.instanceMonitoring = instanceMonitoring + + when: + asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + count * autoScaling.enableMetricsCollection({count == 1 ? it.metrics == ['GroupMinSize', 'GroupMaxSize'] : _ }) + + where: + enabledMetrics | instanceMonitoring | count + null | null | 0 + [] | null | 0 + [] | false | 0 + ['GroupMinSize', 'GroupMaxSize'] | null | 0 + ['GroupMinSize', 'GroupMaxSize'] | [] | 0 + ['GroupMinSize', 'GroupMaxSize'] | false | 0 + ['GroupMinSize', 'GroupMaxSize'] | true | 1 + } + + void "enables metrics collection for all metrics when enabledMetrics is an empty list and instanceMonitoring is true"() { + setup: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.enabledMetrics = [] + asgConfig.instanceMonitoring = true + + when: + asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + // According to + // https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html, + // specifying granularity with no metrics means all metrics. + 1 * autoScaling.enableMetricsCollection({ (it.granularity == '1Minute') && (it.metrics == []) }) + } + + void "continues if serverGroup already exists, is reasonably the same and within safety window"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.classicLoadBalancers = ["one", "two"] + + when: + asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + noExceptionThrown() + 1 * lcBuilder.buildLaunchConfiguration("myasg", null, _, null, null) >> launchConfigName + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + launchConfigurationName: launchConfigName, + loadBalancerNames: ["one", "two"], + createdTime: new Date() + ) + ] + ) + } + } + + @Unroll + void "continues if serverGroup already exists, and existing and desired autoscaling group have the same configuration"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetType = sbTypeReq + asgConfig.subnetIds = sbReq + asgConfig.availabilityZones = azReq + asgConfig.legacyUdf = false + asgConfig.spotMaxPrice = 0.5 + asgConfig.classicLoadBalancers = ["two", "one"] + asgConfig.targetGroupArns = ["tg2", "tg1"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "ec2" + asgConfig.terminationPolicies = ["tp2", "tp1"] + + when: + asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + noExceptionThrown() + 1 * lcBuilder.buildLaunchConfiguration("myasg", asgConfig.subnetType, _, false, null) >> launchConfigName + if (sbTypeReq != null) { + 2 * amazonEC2.describeSubnets() >> new DescribeSubnetsResult(subnets: [new Subnet(subnetId: 'sb1', availabilityZone: 'us-east-1a'), new Subnet(subnetId: 'sb2', availabilityZone: 'us-east-1b'),]) + } + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + launchConfigurationName: launchConfigName, + availabilityZones: az, + vPCZoneIdentifier: sb, + loadBalancerNames: ["one", "two"], + targetGroupARNs: ["tg1", "tg2"], + defaultCooldown: 5, + healthCheckGracePeriod: 5, + healthCheckType: "ec2", + terminationPolicies: ["tp1", "tp2"], + createdTime: new Date() + ) + ] + ) + } + 1 * task.updateStatus('AWS_DEPLOY_TEST', deployMsg) + + where: + sbTypeReq | sbReq | sb | azReq | az | deployMsg + "internal"|["sb2","sb1"]|"sb1,sb2"| null | null |' > Deploying to subnetIds: sb2,sb1' + "internal"|["sb2","sb1"]|"sb1,sb2"|["us-east-1b", "us-east-1a"]| null |' > Deploying to subnetIds: sb2,sb1' + null | null | null |["us-east-1b", "us-east-1a"]|["us-east-1a", "us-east-1b"]|'Deploying to availabilityZones: [us-east-1b, us-east-1a]' + } + + void "throws duplicate exception if existing autoscaling group was created before safety window"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.classicLoadBalancers = ["one", "two"] + + when: + asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + thrown(AlreadyExistsException) + 1 * lcBuilder.buildLaunchConfiguration("myasg", null, _, null, null) >> launchConfigName + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + launchConfigurationName: launchConfigName, + loadBalancerNames: ["one", "two"], + createdTime: new Date(Instant.now().minus(3, ChronoUnit.HOURS).toEpochMilli()) + ) + ] + ) + } + } + + @Unroll + void "throws duplicate exception if existing and desired autoscaling group differ in configuration"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetType = sb == null ? null :"internal" + asgConfig.subnetIds = sb == null ? null : ["sb2","sb1"] + asgConfig.availabilityZones = azReq + asgConfig.legacyUdf = false + asgConfig.spotMaxPrice = 0.5 + asgConfig.classicLoadBalancers = ["two", "one"] + asgConfig.targetGroupArns = ["tg2", "tg1"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "ec2" + asgConfig.terminationPolicies = ["tp2", "tp1"] + + when: + asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + thrown(AlreadyExistsException) + 1 * lcBuilder.buildLaunchConfiguration("myasg", asgConfig.subnetType, _, false, null) >> launchConfigName + _ * amazonEC2.describeSubnets() >> new DescribeSubnetsResult(subnets: [new Subnet(subnetId: 'sb1', availabilityZone: 'az1'),new Subnet(subnetId: 'sb2', availabilityZone: 'az2'),]) + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + launchConfigurationName: lc, + availabilityZones: null, + vPCZoneIdentifier: sb, + loadBalancerNames: lb, + targetGroupARNs: tg, + defaultCooldown: cd, + healthCheckGracePeriod: hcGp, + healthCheckType: hc, + terminationPolicies: tp, + createdTime: new Date() + ) + ] + ) + } + 1 * task.updateStatus('AWS_DEPLOY_TEST', "Deploying ASG myasg-v000 with launch configuration $launchConfigName") + 1 * task.updateStatus(taskPhase, "$asgName already exists and does not seem to match desired state on: $failedPredicates") + + where: + lc | sb | azReq | lb | tg |cd | hcGp | hc | tp || failedPredicates + "blah" | "blah" | [] | ["blah"] | ["blah"] | 0 | 0 | "blah" | ["blah"] || "health check type,target groups,health check grace period,cooldown,subnets,termination policies,launch configuration,load balancers" + "blah" |"sb1,sb2"| null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "launch configuration" + "myasg-v000-20210119" | "blah" | null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "subnets" + "myasg-v000-20210119" | null |["az3","az2","az1"]|["one", "two"]|["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "availability zones" + "myasg-v000-20210119" |"sb1,sb2"| ["az2","az1"] | ["blah"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "load balancers" + "myasg-v000-20210119" |"sb1,sb2"| ["az2","az1"] |["one", "two"]| ["blah"] | 5 | 5 | "ec2" |["tp1", "tp2"]|| "target groups" + "myasg-v000-20210119" |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 0 | 5 | "ec2" |["tp1", "tp2"]|| "cooldown" + "myasg-v000-20210119" |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 5 | 0 | "ec2" |["tp1", "tp2"]|| "health check grace period" + "myasg-v000-20210119" |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 5 | 5 | "blah" |["tp1", "tp2"]|| "health check type" + "myasg-v000-20210119" |"sb1,sb2"| null |["one", "two"]|["tg1", "tg2"]| 5 | 5 | "ec2" | ["blah"] || "termination policies" + } + + void "creates lifecycle hooks before scaling out asg"() { + setup: + def hooks = [getHook(), getHook()] + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.lifecycleHooks = hooks + + when: + asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * autoScaling.createAutoScalingGroup(_) + 1 * asgLifecycleHookWorker.attach(_, hooks, "myasg-v000") + 1 * autoScaling.updateAutoScalingGroup(*_) + } + + def getHook() { + new AmazonAsgLifecycleHook( + name: "hook-name-" + new Random().nextInt(), + roleARN: "role-rn", + notificationTargetARN: "target-arn", + notificationMetadata: null, + lifecycleTransition: EC2InstanceLaunching, + heartbeatTimeout: 300, + defaultResult: CONTINUE + ) + } + + void "should suspend auto scaling processes if specified"() { + setup: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.suspendedProcesses = ["Launch"] + + when: + asgWithLcBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * autoScaling.createAutoScalingGroup(_) + 1 * autoScaling.suspendProcesses(_) + 1 * autoScaling.updateAutoScalingGroup(*_) + } + + @Unroll + void "should enable capacity rebalance, if specified"() { + given: + def asgWithLcBuilder = new AsgWithLaunchConfigurationBuilder(lcBuilder, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.capacityRebalance = capacityRebalance + def settings = getLcSettings(asgName, asgConfig) + + when: + def request = asgWithLcBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * lcBuilder.buildLaunchConfiguration("myasg", null, settings, null, null) >> launchConfigName + request.capacityRebalance == capacityRebalance + + where: + capacityRebalance << [true, false, null] + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchTemplateBuilderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchTemplateBuilderSpec.groovy new file mode 100644 index 00000000000..6b47593313d --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithLaunchTemplateBuilderSpec.groovy @@ -0,0 +1,590 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders + +import com.amazonaws.services.autoscaling.AmazonAutoScaling +import com.amazonaws.services.autoscaling.model.AlreadyExistsException +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.DescribeSubnetsResult +import com.amazonaws.services.ec2.model.LaunchTemplate +import com.amazonaws.services.ec2.model.Subnet +import com.amazonaws.services.ec2.model.Tag +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService +import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.config.AwsConfiguration +import org.apache.commons.lang3.StringUtils +import spock.lang.Specification +import spock.lang.Unroll + +import java.time.Instant +import java.time.temporal.ChronoUnit + +import static com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook.DefaultResult.CONTINUE +import static com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook.Transition.EC2InstanceLaunching + +class AsgWithLaunchTemplateBuilderSpec extends Specification { + + def ltService = Mock(LaunchTemplateService) + def securityGroupService = Mock(SecurityGroupService) + def deployDefaults = Mock(AwsConfiguration.DeployDefaults) + def autoScaling = Mock(AmazonAutoScaling) + def amazonEC2 = Mock(AmazonEC2) + def asgLifecycleHookWorker = Mock(AsgLifecycleHookWorker) + + def credential = TestCredential.named('foo') + def userDataOverride = Mock(UserDataOverride) + def task = Mock(Task) + def taskPhase = "AWS_DEPLOY_TEST" + def asgConfig, asgName + def lt, ltSpec, asgConfigHelper + + def setup() { + asgConfigHelper = Spy(AsgConfigHelper) + asgConfig = AutoScalingWorker.AsgConfiguration.builder() + .setLaunchTemplate(true) + .credentials(credential) + .legacyUdf(false) + .application("myasg") + .region("us-east-1") + .minInstances(1) + .maxInstances(3) + .desiredInstances(2) + .instanceType("t1.test") + .securityGroups(["my-sg"]) + .build() + + asgName = "myasg-v000" + lt = new LaunchTemplate(launchTemplateName: "lt-1", launchTemplateId: "lt-1", latestVersionNumber: 1, defaultVersionNumber: 0) + ltSpec = new LaunchTemplateSpecification(launchTemplateId: "lt-1", version: "1") + securityGroupService.resolveSecurityGroupIdsWithSubnetType(_,_) >> ["sg-1"] + } + + private DescribeSubnetsResult getDescribeSubnetsResult() { + return new DescribeSubnetsResult(subnets: [ + new Subnet(subnetId: 'subnetId1', availabilityZone: 'us-east-1a', tags: [new Tag(key: 'immutable_metadata', value: '{"purpose": "internal", "target": "ec2" }')]), + new Subnet(subnetId: 'subnetId2', availabilityZone: 'us-west-2a'), + new Subnet(subnetId: 'subnetId3', availabilityZone: 'us-west-2a'), + ]) + } + + void "should build ASG request with launch template correctly"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetType = "internal" + asgConfig.spotMaxPrice = "0.5" + asgConfig.classicLoadBalancers = ["one", "two"] + asgConfig.targetGroupArns = ["tg1", "tg2"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "ec2" + asgConfig.terminationPolicies = ["Default", "OldestInstance"] + asgConfig.userDataOverride = userDataOverride + asgConfig.ebsOptimized = true + + when: + def request = asgWithLtBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch template lt-1") + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + + and: + request.getLaunchTemplate() == ltSpec + request.getAutoScalingGroupName() == asgName + request.getMinSize() == 1 + request.getMaxSize() == 3 + request.getDesiredCapacity() == 2 + request.getLoadBalancerNames() == ["one", "two"] + request.getTargetGroupARNs() == ["tg1", "tg2"] + request.getDefaultCooldown() == 5 + request.getHealthCheckGracePeriod() == 5 + request.getHealthCheckType() == "ec2" + request.getTerminationPolicies() == ["Default", "OldestInstance"] + } + + void "should build ASG request with tags correctly"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.tags = [foo: "bar"] + + when: + def request = asgWithLtBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + request.getLaunchTemplate() == ltSpec + def tag = request.getTags()[0] + tag.getKey() == "foo" + tag.getValue() == "bar" + tag.getPropagateAtLaunch() == true + } + + @Unroll + void "should favor subnetIds over AZ while building ASG request"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-west-2a"] + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = subnetType + + when: + def request = asgWithLtBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + (StringUtils.isEmpty(subnetType) ? 0 : 2) * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch template lt-1") + 1 * task.updateStatus(taskPhase, deployMsg) + + and: + request.getLaunchTemplate() == ltSpec + request.getVPCZoneIdentifier() == subnetIdsForAsg + subnetIdsForAsg == null + ? request.getAvailabilityZones() == ["us-west-2a"] + : request.getAvailabilityZones() == [] + + where: + subnetType | subnetIds | subnetIdsForAsg || deployMsg + "internal" |["subnetId3"] | "subnetId3" || " > Deploying to subnetIds: subnetId3" + "internal" |["subnetId2"] | "subnetId2" || " > Deploying to subnetIds: subnetId2" + null | null | null || "Deploying to availabilityZones: [us-west-2a]" + null | [] | null || "Deploying to availabilityZones: [us-west-2a]" + } + + @Unroll + void "should filter and validate subnets by AZ while building ASG request"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = "internal" + + when: + def request = asgWithLtBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch template lt-1") + 1 * task.updateStatus(taskPhase, " > Deploying to subnetIds: subnetId1") + + and: + request.getLaunchTemplate() == ltSpec + request.getVPCZoneIdentifier() == subnetIdsForAsg + request.getAvailabilityZones() == [] + + where: + subnetIds | subnetIdsForAsg + [] | "subnetId1" // filter subnets by AZ + ["subnetId1"] | "subnetId1" // validate subnets by AZ + } + + @Unroll + void "throws exception when invalid subnet IDs are specified while building ASG request"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = availabilityZones + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = subnetType + + + when: + asgWithLtBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + (StringUtils.isEmpty(subnetType) ? 0 : 1) * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch template lt-1") + + and: + def ex = thrown(IllegalStateException) + ex.getMessage() == String.format( + "One or more subnet ids are not valid (invalidSubnetIds: %s, availabilityZones: %s)", + invalidSubnetIds, String.join(",", availabilityZones)) + + where: + subnetType | subnetIds | availabilityZones | invalidSubnetIds + null | ["subnetId1"] | ["us-west-2a"] | "subnetId1" + "" | ["subnetId2"] | ["us-east-1a"] | "subnetId2" + "internal" | ["subnetId3"] | ["us-east-1a"] | "subnetId3" + "internal" | ["invalidSubnetId"] | [] | "invalidSubnetId" + "internal" | ["subnetId1", "subnetId2"] | ["us-east-1a"] | "subnetId2" + "internal" | ["subnetId1", "subnetId2", "subnetId3"] | ["us-west-2a"] | "subnetId1" + } + + @Unroll + void "should filter subnets by subnet purpose conditionally while building ASG request"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = [] + asgConfig.subnetType = "internal" + asgConfig.subnetIds = subnetIds + + when: + def request = asgWithLtBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch template lt-1") + 1 * task.updateStatus(taskPhase, deployMsg) + + and: + request.getLaunchTemplate() == ltSpec + request.getVPCZoneIdentifier() == subnetIdsForAsg + request.getAvailabilityZones() == [] + + where: + subnetIds | subnetIdsForAsg || deployMsg + [] | "subnetId1" || " > Deploying to subnetIds: subnetId1" // filtered by subnet purpose tags + ["subnetId2"] | "subnetId2" || " > Deploying to subnetIds: subnetId2" // not filtered by subnet purpose tags + ["subnetId1","subnetId2"] | "subnetId1,subnetId2" || " > Deploying to subnetIds: subnetId1,subnetId2" // not filtered by subnet purpose tags + } + + @Unroll + void "throws exception when subnetIds are not specified and no suitable subnet found for subnet purpose while building ASG request"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetIds = [] + asgConfig.availabilityZones = availabilityZones + asgConfig.subnetType = subnetType + + when: + asgWithLtBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch template lt-1") + + and: + def ex = thrown(RuntimeException) + ex.getMessage() == "No suitable subnet was found for internal subnet purpose '$subnetType'!" + + where: + subnetType | availabilityZones + "internal" | ["eu-central-1a"] + "internal" | ["us-west-2a"] + "unknown" | ["us-west-1b"] + } + + void "should build ASG with launch template correctly"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetType = "internal" + asgConfig.classicLoadBalancers = ["one", "two"] + asgConfig.targetGroupArns = ["tg1", "tg2"] + asgConfig.userDataOverride = userDataOverride + asgConfig.securityGroups = ["mysg"] + + when: + def asgNameRes = asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + asgNameRes == asgName + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * autoScaling.createAutoScalingGroup(_) + 1 * autoScaling.updateAutoScalingGroup(_) + 0 * autoScaling. _ + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch template lt-1") + 1 * task.updateStatus(taskPhase, ' > Deploying to subnetIds: subnetId1') + 1 * task.updateStatus(taskPhase, 'Setting size of myasg-v000 in foo/us-east-1 to [min=1, max=3, desired=2]') + 1 * task.updateStatus(taskPhase, "Deployed EC2 server group named $asgName") + } + + @Unroll + void "does not enable metrics collection when enabledMetrics are absent or instanceMonitoring is falsy"() { + setup: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.enabledMetrics = enabledMetrics + asgConfig.instanceMonitoring = instanceMonitoring + + when: + asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + count * autoScaling.enableMetricsCollection({count == 1 ? it.metrics == ['GroupMinSize', 'GroupMaxSize'] : _ }) + + where: + enabledMetrics | instanceMonitoring | count + null | null | 0 + [] | null | 0 + [] | false | 0 + ['GroupMinSize', 'GroupMaxSize'] | null | 0 + ['GroupMinSize', 'GroupMaxSize'] | [] | 0 + ['GroupMinSize', 'GroupMaxSize'] | false | 0 + ['GroupMinSize', 'GroupMaxSize'] | true | 1 + } + + void "enables metrics collection for all metrics when enabledMetrics is an empty list and instanceMonitoring is true"() { + setup: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.enabledMetrics = [] + asgConfig.instanceMonitoring = true + + when: + asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + // According to + // https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html, + // specifying granularity with no metrics means all metrics. + 1 * autoScaling.enableMetricsCollection({ (it.granularity == '1Minute') && (it.metrics == []) }) + } + + void "continues if serverGroup already exists, is reasonably the same and within safety window"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.classicLoadBalancers = ["one", "two"] + + when: + asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + noExceptionThrown() + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + launchTemplate: ltSpec, + loadBalancerNames: ["one", "two"], + createdTime: new Date() + ) + ] + ) + } + } + + @Unroll + void "continues if serverGroup already exists, and existing and desired autoscaling group have the same configuration"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetType = sbTypeReq + asgConfig.subnetIds = sbReq + asgConfig.availabilityZones = azReq + asgConfig.spotMaxPrice = "0.5" + asgConfig.classicLoadBalancers = ["two", "one"] + asgConfig.targetGroupArns = ["tg2", "tg1"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "ec2" + asgConfig.terminationPolicies = ["tp2", "tp1"] + + when: + asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + noExceptionThrown() + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + if (sbTypeReq != null) { + 2 * amazonEC2.describeSubnets() >> new DescribeSubnetsResult(subnets: [new Subnet(subnetId: 'sb1', availabilityZone: 'us-east-1a'), new Subnet(subnetId: 'sb2', availabilityZone: 'us-east-1b'),]) + } + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + launchTemplate: ltSpec, + availabilityZones: az, + vPCZoneIdentifier: sb, + loadBalancerNames: ["one", "two"], + targetGroupARNs: ["tg1", "tg2"], + defaultCooldown: 5, + healthCheckGracePeriod: 5, + healthCheckType: "ec2", + terminationPolicies: ["tp1", "tp2"], + createdTime: new Date() + ) + ] + ) + } + 1 * task.updateStatus('AWS_DEPLOY_TEST', deployMsg) + + where: + sbTypeReq | sbReq | sb | azReq | az | deployMsg + "internal"|["sb2","sb1"]|"sb1,sb2"| null | null |' > Deploying to subnetIds: sb2,sb1' + "internal"|["sb2","sb1"]|"sb1,sb2"|["us-east-1b", "us-east-1a"]| null |' > Deploying to subnetIds: sb2,sb1' + null | null | null |["us-east-1b", "us-east-1a"]|["us-east-1a", "us-east-1b"]|'Deploying to availabilityZones: [us-east-1b, us-east-1a]' + } + + void "throws duplicate exception if existing autoscaling group was created before safety window"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.classicLoadBalancers = ["one", "two"] + + when: + asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + thrown(AlreadyExistsException) + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + launchTemplate: ltSpec, + loadBalancerNames: ["one", "two"], + createdTime: new Date(Instant.now().minus(3, ChronoUnit.HOURS).toEpochMilli()) + ) + ] + ) + } + } + + @Unroll + void "throws duplicate exception if existing and desired autoscaling group differ in configuration"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetType = sb == null ? null :"internal" + asgConfig.subnetIds = sb == null ? null : ["sb2","sb1"] + asgConfig.availabilityZones = azReq + asgConfig.spotMaxPrice = "0.5" + asgConfig.classicLoadBalancers = ["two", "one"] + asgConfig.targetGroupArns = ["tg2", "tg1"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "ec2" + asgConfig.terminationPolicies = ["tp2", "tp1"] + + when: + asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + thrown(AlreadyExistsException) + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + _ * amazonEC2.describeSubnets() >> new DescribeSubnetsResult(subnets: [new Subnet(subnetId: 'sb1', availabilityZone: 'az1'),new Subnet(subnetId: 'sb2', availabilityZone: 'az2'),]) + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + launchTemplate: new LaunchTemplateSpecification(launchTemplateId: ltId, version: "1"), + availabilityZones: null, + vPCZoneIdentifier: sb, + loadBalancerNames: lb, + targetGroupARNs: tg, + defaultCooldown: cd, + healthCheckGracePeriod: hcGp, + healthCheckType: hc, + terminationPolicies: tp, + createdTime: new Date() + ) + ] + ) + } + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with launch template lt-1") + 1 * task.updateStatus(taskPhase, "$asgName already exists and does not seem to match desired state on: $failedPredicates") + + where: + ltId | sb | azReq | lb | tg |cd | hcGp | hc | tp || failedPredicates + "blah" | "blah" | [] | ["blah"] | ["blah"] | 0 | 0 | "blah" | ["blah"] || "health check type,target groups,health check grace period,launch template,cooldown,subnets,termination policies,load balancers" + "blah" |"sb1,sb2"| null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "launch template" + "lt-1" | "blah" | null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "subnets" + "lt-1" | null |["az3","az2","az1"]|["one", "two"]|["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "availability zones" + "lt-1" |"sb1,sb2"| ["az2","az1"] | ["blah"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "load balancers" + "lt-1" |"sb1,sb2"| ["az2","az1"] |["one", "two"]| ["blah"] | 5 | 5 | "ec2" |["tp1", "tp2"]|| "target groups" + "lt-1" |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 0 | 5 | "ec2" |["tp1", "tp2"]|| "cooldown" + "lt-1" |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 5 | 0 | "ec2" |["tp1", "tp2"]|| "health check grace period" + "lt-1" |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 5 | 5 | "blah" |["tp1", "tp2"]|| "health check type" + "lt-1" |"sb1,sb2"| null |["one", "two"]|["tg1", "tg2"]| 5 | 5 | "ec2" | ["blah"] || "termination policies" + } + + void "creates lifecycle hooks before scaling out asg"() { + setup: + def hooks = [getHook(), getHook()] + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.lifecycleHooks = hooks + + when: + asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 1 * autoScaling.createAutoScalingGroup(_) + 1 * asgLifecycleHookWorker.attach(_, hooks, "myasg-v000") + 1 * autoScaling.updateAutoScalingGroup(*_) + } + + def getHook() { + new AmazonAsgLifecycleHook( + name: "hook-name-" + new Random().nextInt(), + roleARN: "role-rn", + notificationTargetARN: "target-arn", + notificationMetadata: null, + lifecycleTransition: EC2InstanceLaunching, + heartbeatTimeout: 300, + defaultResult: CONTINUE + ) + } + + void "should suspend auto scaling processes if specified"() { + setup: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.suspendedProcesses = ["Launch"] + + when: + asgWithLtBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + 1 * autoScaling.createAutoScalingGroup(_) + 1 * autoScaling.suspendProcesses(_) + 1 * autoScaling.updateAutoScalingGroup(*_) + } + + @Unroll + void "should enable capacity rebalance, if specified"() { + given: + def asgWithLtBuilder = new AsgWithLaunchTemplateBuilder(ltService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.capacityRebalance = capacityRebalance + + when: + def request = asgWithLtBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ltService.createLaunchTemplate(asgConfig, asgName, _) >> lt + request.capacityRebalance == capacityRebalance + + where: + capacityRebalance << [true, false, null] + } + +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithMixedInstancesPolicyBuilderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithMixedInstancesPolicyBuilderSpec.groovy new file mode 100644 index 00000000000..498ba880ba0 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/asg/asgbuilders/AsgWithMixedInstancesPolicyBuilderSpec.groovy @@ -0,0 +1,678 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.asg.asgbuilders + +import com.amazonaws.services.autoscaling.AmazonAutoScaling +import com.amazonaws.services.autoscaling.model.AlreadyExistsException +import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult +import com.amazonaws.services.autoscaling.model.InstancesDistribution +import com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.DescribeSubnetsResult +import com.amazonaws.services.ec2.model.LaunchTemplate +import com.amazonaws.services.ec2.model.Subnet +import com.amazonaws.services.ec2.model.Tag +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgConfigHelper +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgLifecycleHookWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.config.AwsConfiguration +import org.apache.commons.lang3.StringUtils +import spock.lang.Specification +import spock.lang.Unroll + +import java.time.Instant +import java.time.temporal.ChronoUnit + +import static com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook.DefaultResult.CONTINUE +import static com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook.Transition.EC2InstanceLaunching + +class AsgWithMixedInstancesPolicyBuilderSpec extends Specification { + + def ec2LtService = Mock(LaunchTemplateService) + def securityGroupService = Mock(SecurityGroupService) + def deployDefaults = Mock(AwsConfiguration.DeployDefaults) + def autoScaling = Mock(AmazonAutoScaling) + def amazonEC2 = Mock(AmazonEC2) + def asgLifecycleHookWorker = Mock(AsgLifecycleHookWorker) + + def credential = TestCredential.named('foo') + def userDataOverride = Mock(UserDataOverride) + def task = Mock(Task) + def taskPhase = "AWS_DEPLOY_TEST" + def asgConfig, asgName, asgConfigHelper + def ec2Lt, asgLtSpec, asgLt, overrides, instancesDist, mip, override1, override2 + + def setup() { + asgConfigHelper = Spy(AsgConfigHelper) + override1 = new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "some.type.large", + weightedCapacity: 2) + override2 = new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "some.type.xlarge", + weightedCapacity: 4) + asgConfig = AutoScalingWorker.AsgConfiguration.builder() + .setLaunchTemplate(true) + .credentials(credential) + .legacyUdf(false) + .application("myasg") + .region("us-east-1") + .minInstances(1) + .maxInstances(3) + .desiredInstances(2) + .instanceType("some.type.medium") + .securityGroups(["my-sg"]) + .spotMaxPrice("2") + .onDemandBaseCapacity(1) + .onDemandPercentageAboveBaseCapacity(50) + .spotAllocationStrategy("capacity-optimized") + .launchTemplateOverridesForInstanceType([override1, override2]) + .build() + + asgName = "myasg-v000" + securityGroupService.resolveSecurityGroupIdsWithSubnetType(_,_) >> ["sg-1"] + + // general expected parameters in request + ec2Lt = new LaunchTemplate(launchTemplateName: "lt-1", launchTemplateId: "lt-1", latestVersionNumber: 1, defaultVersionNumber: 0) + asgLtSpec = new com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification(launchTemplateId: ec2Lt.getLaunchTemplateId(), version: "\$Latest") + overrides = [ + new LaunchTemplateOverrides().withInstanceType(override1.instanceType).withWeightedCapacity(override1.weightedCapacity), + new LaunchTemplateOverrides().withInstanceType(override2.instanceType).withWeightedCapacity(override2.weightedCapacity) + ] + asgLt = new com.amazonaws.services.autoscaling.model.LaunchTemplate(launchTemplateSpecification: asgLtSpec, overrides: overrides) + instancesDist = new InstancesDistribution( + onDemandBaseCapacity: 1, + onDemandPercentageAboveBaseCapacity: 50, + spotMaxPrice: "2", + spotAllocationStrategy: "capacity-optimized" + ) + mip = new MixedInstancesPolicy().withInstancesDistribution(instancesDist).withLaunchTemplate(asgLt) + } + + private DescribeSubnetsResult getDescribeSubnetsResult() { + return new DescribeSubnetsResult(subnets: [ + new Subnet(subnetId: 'subnetId1', availabilityZone: 'us-east-1a', tags: [new Tag(key: 'immutable_metadata', value: '{"purpose": "internal", "target": "ec2" }')]), + new Subnet(subnetId: 'subnetId2', availabilityZone: 'us-west-2a'), + new Subnet(subnetId: 'subnetId3', availabilityZone: 'us-west-2a'), + ]) + } + + void "should build ASG request with mixed instances policy correctly"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetType = "internal" + asgConfig.classicLoadBalancers = ["one", "two"] + asgConfig.targetGroupArns = ["tg1", "tg2"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "ec2" + asgConfig.terminationPolicies = ["Default", "OldestInstance"] + asgConfig.userDataOverride = userDataOverride + asgConfig.ebsOptimized = true + + when: + def request = asgWithMipBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with mixed instances policy " + + "{LaunchTemplate: {LaunchTemplateSpecification: {LaunchTemplateId: lt-1,Version: \$Latest},Overrides: [{InstanceType: some.type.large,WeightedCapacity: 2,}, {InstanceType: some.type.xlarge,WeightedCapacity: 4,}]}," + + "InstancesDistribution: {OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 2}}") + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + + and: + request.getLaunchTemplate() == null + request.getMixedInstancesPolicy() == mip + + and: + request.getAutoScalingGroupName() == asgName + request.getMinSize() == 1 + request.getMaxSize() == 3 + request.getDesiredCapacity() == 2 + request.getLoadBalancerNames() == ["one", "two"] + request.getTargetGroupARNs() == ["tg1", "tg2"] + request.getDefaultCooldown() == 5 + request.getHealthCheckGracePeriod() == 5 + request.getHealthCheckType() == "ec2" + request.getTerminationPolicies() == ["Default", "OldestInstance"] + } + + void "should build ASG request with tags correctly"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.tags = [foo: "bar"] + + when: + def request = asgWithMipBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + request.getMixedInstancesPolicy() == mip + def tag = request.getTags()[0] + tag.getKey() == "foo" + tag.getValue() == "bar" + tag.getPropagateAtLaunch() == true + } + + @Unroll + void "should favor subnetIds over AZ while building ASG request"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-west-2a"] + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = subnetType + + when: + def request = asgWithMipBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + (StringUtils.isEmpty(subnetType) ? 0 : 2) * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with mixed instances policy " + + "{LaunchTemplate: {LaunchTemplateSpecification: {LaunchTemplateId: lt-1,Version: \$Latest},Overrides: [{InstanceType: some.type.large,WeightedCapacity: 2,}, {InstanceType: some.type.xlarge,WeightedCapacity: 4,}]}," + + "InstancesDistribution: {OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 2}}") + 1 * task.updateStatus(taskPhase, deployMsg) + + and: + request.getMixedInstancesPolicy() == mip + request.getVPCZoneIdentifier() == subnetIdsForAsg + subnetIdsForAsg == null + ? request.getAvailabilityZones() == ["us-west-2a"] + : request.getAvailabilityZones() == [] + + where: + subnetType | subnetIds | subnetIdsForAsg || deployMsg + "internal" |["subnetId3"] | "subnetId3" || " > Deploying to subnetIds: subnetId3" + "internal" |["subnetId2"] | "subnetId2" || " > Deploying to subnetIds: subnetId2" + null | null | null || "Deploying to availabilityZones: [us-west-2a]" + null | [] | null || "Deploying to availabilityZones: [us-west-2a]" + } + + @Unroll + void "should filter and validate subnets by AZ while building ASG request"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = "internal" + + when: + def request = asgWithMipBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with mixed instances policy " + + "{LaunchTemplate: {LaunchTemplateSpecification: {LaunchTemplateId: lt-1,Version: \$Latest},Overrides: [{InstanceType: some.type.large,WeightedCapacity: 2,}, {InstanceType: some.type.xlarge,WeightedCapacity: 4,}]}," + + "InstancesDistribution: {OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 2}}") + 1 * task.updateStatus(taskPhase, " > Deploying to subnetIds: subnetId1") + + and: + request.getMixedInstancesPolicy() == mip + request.getVPCZoneIdentifier() == subnetIdsForAsg + request.getAvailabilityZones() == [] + + where: + subnetIds | subnetIdsForAsg + [] | "subnetId1" // filter subnets by AZ + ["subnetId1"] | "subnetId1" // validate subnets by AZ + } + + @Unroll + void "throws exception when invalid subnet IDs are specified while building ASG request"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = availabilityZones + asgConfig.subnetIds = subnetIds + asgConfig.subnetType = subnetType + + when: + asgWithMipBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + (StringUtils.isEmpty(subnetType) ? 0 : 1) * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with mixed instances policy " + + "{LaunchTemplate: {LaunchTemplateSpecification: {LaunchTemplateId: lt-1,Version: \$Latest},Overrides: [{InstanceType: some.type.large,WeightedCapacity: 2,}, {InstanceType: some.type.xlarge,WeightedCapacity: 4,}]}," + + "InstancesDistribution: {OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 2}}") + + and: + def ex = thrown(IllegalStateException) + ex.getMessage() == String.format( + "One or more subnet ids are not valid (invalidSubnetIds: %s, availabilityZones: %s)", + invalidSubnetIds, String.join(",", availabilityZones)) + + where: + subnetType | subnetIds | availabilityZones | invalidSubnetIds + null | ["subnetId1"] | ["us-west-2a"] | "subnetId1" + "" | ["subnetId2"] | ["us-east-1a"] | "subnetId2" + "internal" | ["subnetId3"] | ["us-east-1a"] | "subnetId3" + "internal" | ["invalidSubnetId"] | [] | "invalidSubnetId" + "internal" | ["subnetId1", "subnetId2"] | ["us-east-1a"] | "subnetId2" + "internal" | ["subnetId1", "subnetId2", "subnetId3"] | ["us-west-2a"] | "subnetId1" + } + + @Unroll + void "should filter subnets by subnet purpose conditionally while building ASG request"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = [] + asgConfig.subnetType = "internal" + asgConfig.subnetIds = subnetIds + + when: + def request = asgWithMipBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with mixed instances policy " + + "{LaunchTemplate: {LaunchTemplateSpecification: {LaunchTemplateId: lt-1,Version: \$Latest},Overrides: [{InstanceType: some.type.large,WeightedCapacity: 2,}, {InstanceType: some.type.xlarge,WeightedCapacity: 4,}]}," + + "InstancesDistribution: {OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 2}}") + 1 * task.updateStatus(taskPhase, deployMsg) + + and: + request.getMixedInstancesPolicy() == mip + request.getVPCZoneIdentifier() == subnetIdsForAsg + request.getAvailabilityZones() == [] + + where: + subnetIds | subnetIdsForAsg || deployMsg + [] | "subnetId1" || " > Deploying to subnetIds: subnetId1" // filtered by subnet purpose tags + ["subnetId2"] | "subnetId2" || " > Deploying to subnetIds: subnetId2" // not filtered by subnet purpose tags + ["subnetId1","subnetId2"] | "subnetId1,subnetId2" || " > Deploying to subnetIds: subnetId1,subnetId2" // not filtered by subnet purpose tags + } + + @Unroll + void "throws exception when subnetIds are not specified and no suitable subnet found for subnet purpose while building ASG request"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetIds = [] + asgConfig.availabilityZones = availabilityZones + asgConfig.subnetType = subnetType + + when: + asgWithMipBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with mixed instances policy " + + "{LaunchTemplate: {LaunchTemplateSpecification: {LaunchTemplateId: lt-1,Version: \$Latest},Overrides: [{InstanceType: some.type.large,WeightedCapacity: 2,}, {InstanceType: some.type.xlarge,WeightedCapacity: 4,}]}," + + "InstancesDistribution: {OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 2}}") + + and: + def ex = thrown(RuntimeException) + ex.getMessage() == "No suitable subnet was found for internal subnet purpose '$subnetType'!" + + where: + subnetType | availabilityZones + "internal" | ["eu-central-1a"] + "internal" | ["us-west-2a"] + "unknown" | ["us-west-1b"] + } + + void "should build ASG with mixed instances policy correctly"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.availabilityZones = ["us-east-1a"] + asgConfig.subnetType = "internal" + asgConfig.classicLoadBalancers = ["one", "two"] + asgConfig.targetGroupArns = ["tg1", "tg2"] + asgConfig.userDataOverride = userDataOverride + asgConfig.securityGroups = ["mysg"] + + when: + def asgNameRes = asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + asgNameRes == asgName + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + 2 * amazonEC2.describeSubnets() >> getDescribeSubnetsResult() + 1 * autoScaling.createAutoScalingGroup(_) + 1 * autoScaling.updateAutoScalingGroup(_) + 0 * autoScaling. _ + + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with mixed instances policy " + + "{LaunchTemplate: {LaunchTemplateSpecification: {LaunchTemplateId: lt-1,Version: \$Latest},Overrides: [{InstanceType: some.type.large,WeightedCapacity: 2,}, {InstanceType: some.type.xlarge,WeightedCapacity: 4,}]}," + + "InstancesDistribution: {OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 2}}") + 1 * task.updateStatus(taskPhase, ' > Deploying to subnetIds: subnetId1') + 1 * task.updateStatus(taskPhase, 'Setting size of myasg-v000 in foo/us-east-1 to [min=1, max=3, desired=2]') + 1 * task.updateStatus(taskPhase, "Deployed EC2 server group named $asgName") + } + + @Unroll + void "does not enable metrics collection when enabledMetrics are absent or instanceMonitoring is falsy"() { + setup: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.enabledMetrics = enabledMetrics + asgConfig.instanceMonitoring = instanceMonitoring + + when: + asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + count * autoScaling.enableMetricsCollection({count == 1 ? it.metrics == ['GroupMinSize', 'GroupMaxSize'] : _ }) + + where: + enabledMetrics | instanceMonitoring | count + null | null | 0 + [] | null | 0 + [] | false | 0 + ['GroupMinSize', 'GroupMaxSize'] | null | 0 + ['GroupMinSize', 'GroupMaxSize'] | [] | 0 + ['GroupMinSize', 'GroupMaxSize'] | false | 0 + ['GroupMinSize', 'GroupMaxSize'] | true | 1 + } + + void "enables metrics collection for all metrics when enabledMetrics is an empty list and instanceMonitoring is true"() { + setup: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.enabledMetrics = [] + asgConfig.instanceMonitoring = true + + when: + asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + // According to + // https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html, + // specifying granularity with no metrics means all metrics. + 1 * autoScaling.enableMetricsCollection({ (it.granularity == '1Minute') && (it.metrics == []) }) + } + + void "continues if serverGroup already exists, is reasonably the same and within safety window"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.classicLoadBalancers = ["one", "two"] + + when: + asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + noExceptionThrown() + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + mixedInstancesPolicy: mip, + loadBalancerNames: ["one", "two"], + createdTime: new Date() + ) + ] + ) + } + } + + @Unroll + void "continues if serverGroup already exists, and existing and desired autoscaling group have the same configuration"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetType = sbTypeReq + asgConfig.subnetIds = sbReq + asgConfig.availabilityZones = azReq + asgConfig.classicLoadBalancers = ["two", "one"] + asgConfig.targetGroupArns = ["tg2", "tg1"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "elb" + asgConfig.terminationPolicies = ["tp2", "tp1"] + + when: + asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + noExceptionThrown() + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + if (sbTypeReq != null) { + 2 * amazonEC2.describeSubnets() >> new DescribeSubnetsResult(subnets: [new Subnet(subnetId: 'sb1', availabilityZone: 'us-east-1a'), new Subnet(subnetId: 'sb2', availabilityZone: 'us-east-1b'),]) + } + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + mixedInstancesPolicy: new MixedInstancesPolicy( + instancesDistribution: new InstancesDistribution( + onDemandBaseCapacity: 1, + onDemandPercentageAboveBaseCapacity: 50, + spotMaxPrice: "2", + spotAllocationStrategy: "capacity-optimized" + ), + launchTemplate: new com.amazonaws.services.autoscaling.model.LaunchTemplate( + launchTemplateSpecification: new LaunchTemplateSpecification( + launchTemplateId: ec2Lt.getLaunchTemplateId(), + version: "\$Latest" + ), + overrides:[ + new LaunchTemplateOverrides(instanceType: "some.type.large", weightedCapacity: 2), + new LaunchTemplateOverrides(instanceType: "some.type.xlarge", weightedCapacity: 4)] + ) + ), + availabilityZones: az, + vPCZoneIdentifier: sb, + loadBalancerNames: ["one", "two"], + targetGroupARNs: ["tg1", "tg2"], + defaultCooldown: 5, + healthCheckGracePeriod: 5, + healthCheckType: "elb", + terminationPolicies: ["tp1", "tp2"], + createdTime: new Date() + ) + ] + ) + } + 1 * task.updateStatus('AWS_DEPLOY_TEST', deployMsg) + + where: + sbTypeReq | sbReq | sb | azReq | az | deployMsg + "internal"|["sb2","sb1"]|"sb1,sb2"| null | null |' > Deploying to subnetIds: sb2,sb1' + "internal"|["sb2","sb1"]|"sb1,sb2"|["us-east-1b", "us-east-1a"]| null |' > Deploying to subnetIds: sb2,sb1' + null | null | null |["us-east-1b", "us-east-1a"]|["us-east-1a", "us-east-1b"]|'Deploying to availabilityZones: [us-east-1b, us-east-1a]' + } + + void "throws duplicate exception if existing autoscaling group was created before safety window"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.classicLoadBalancers = ["one", "two"] + + when: + asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + thrown(AlreadyExistsException) + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + mixedInstancesPolicy: mip, + loadBalancerNames: ["one", "two"], + createdTime: new Date(Instant.now().minus(3, ChronoUnit.HOURS).toEpochMilli()) + ) + ] + ) + } + } + + @Unroll + void "throws duplicate exception if existing and desired autoscaling group differ in configuration"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.subnetType = sb == null ? null :"internal" + asgConfig.subnetIds = sb == null ? null : ["sb2","sb1"] + asgConfig.availabilityZones = azReq + asgConfig.classicLoadBalancers = ["two", "one"] + asgConfig.targetGroupArns = ["tg2", "tg1"] + asgConfig.cooldown = 5 + asgConfig.healthCheckGracePeriod = 5 + asgConfig.healthCheckType = "ec2" + asgConfig.terminationPolicies = ["tp2", "tp1"] + + when: + asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + thrown(AlreadyExistsException) + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + _ * amazonEC2.describeSubnets() >> new DescribeSubnetsResult(subnets: [new Subnet(subnetId: 'sb1', availabilityZone: 'az1'),new Subnet(subnetId: 'sb2', availabilityZone: 'az2'),]) + 1 * autoScaling.createAutoScalingGroup(_) >> { throw new AlreadyExistsException("Already exists, man") } + 1 * autoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult( + autoScalingGroups: [ + new AutoScalingGroup( + autoScalingGroupName: "myasg-v000", + mixedInstancesPolicy: new MixedInstancesPolicy( + instancesDistribution: new InstancesDistribution( + onDemandBaseCapacity: 1, + onDemandPercentageAboveBaseCapacity: 50, + spotInstancePools: spotInstancePools, + spotMaxPrice: spotMaxPrice, + spotAllocationStrategy: spotAllocStrategy + ), + launchTemplate: new com.amazonaws.services.autoscaling.model.LaunchTemplate( + launchTemplateSpecification: new LaunchTemplateSpecification( + launchTemplateId: ec2LtId, + version: "\$Latest" + ), + overrides:[ + new LaunchTemplateOverrides(instanceType: override1InstType, weightedCapacity: override1Wgt), + new LaunchTemplateOverrides(instanceType: "some.type.xlarge", weightedCapacity: 4)] + ) + ), + availabilityZones: null, + vPCZoneIdentifier: sb, + loadBalancerNames: lb, + targetGroupARNs: tg, + defaultCooldown: cd, + healthCheckGracePeriod: hcGp, + healthCheckType: hc, + terminationPolicies: tp, + createdTime: new Date() + ) + ] + ) + } + + 1 * task.updateStatus(taskPhase, "Deploying ASG $asgName with mixed instances policy " + + "{LaunchTemplate: {LaunchTemplateSpecification: {LaunchTemplateId: lt-1,Version: \$Latest},Overrides: [{InstanceType: some.type.large,WeightedCapacity: 2,}, {InstanceType: some.type.xlarge,WeightedCapacity: 4,}]}," + + "InstancesDistribution: {OnDemandBaseCapacity: 1,OnDemandPercentageAboveBaseCapacity: 50,SpotAllocationStrategy: capacity-optimized,SpotMaxPrice: 2}}") + 1 * task.updateStatus(taskPhase, "$asgName already exists and does not seem to match desired state on: $failedPredicates") + + where: + ec2LtId | override1InstType | override1Wgt| spotAllocStrategy | spotMaxPrice | spotInstancePools | sb | azReq | lb | tg |cd | hcGp | hc | tp || failedPredicates + "blah" | "blah" | 1 | "blah" | "2" | null | "blah" | [] | ["blah"] | ["blah"] | 0 | 0 | "blah" | ["blah"] || "health check type,target groups,mixed instances policy,health check grace period,cooldown,subnets,termination policies,load balancers" + "blah" | "some.type.large" | 2 |"capacity-optimized"| "2" | null |"sb1,sb2"| null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "mixed instances policy" + "lt-1" | "blah" | 2 |"capacity-optimized"| "2" | null |"sb1,sb2"| null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "mixed instances policy" + "lt-1" | "some.type.large" | 3 |"capacity-optimized"| "1.5" | null |"sb1,sb2"| null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "mixed instances policy" + "lt-1" | "some.type.large" | 2 | "lowest-price" | "2" | 5 |"sb1,sb2"| null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "mixed instances policy" + "lt-1" | "some.type.large" | 2 | "blah" | "2" | null |"sb1,sb2"| null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "mixed instances policy" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "1" | null |"sb1,sb2"| null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "mixed instances policy" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "2" | null | "blah" | null |["one","two"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "subnets" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "2" | null | null |["az3","az2","az1"]|["one", "two"]|["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "availability zones" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "2" | null |"sb1,sb2"| ["az2","az1"] | ["blah"] |["tg1", "tg2"]| 5 | 5 | "ec2" |["tp1", "tp2"]|| "load balancers" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "2" | null |"sb1,sb2"| ["az2","az1"] |["one", "two"]| ["blah"] | 5 | 5 | "ec2" |["tp1", "tp2"]|| "target groups" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "2" | null |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 0 | 5 | "ec2" |["tp1", "tp2"]|| "cooldown" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "2" | null |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 5 | 0 | "ec2" |["tp1", "tp2"]|| "health check grace period" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "2" | null |"sb1,sb2"| ["az2","az1"] |["one", "two"]|["tg1", "tg2"]| 5 | 5 | "blah" |["tp1", "tp2"]|| "health check type" + "lt-1" | "some.type.large" | 2 |"capacity-optimized"| "2" | null |"sb1,sb2"| null |["one", "two"]|["tg1", "tg2"]| 5 | 5 | "ec2" | ["blah"] || "termination policies" + } + + void "creates lifecycle hooks before scaling out asg"() { + setup: + def hooks = [getHook(), getHook()] + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.lifecycleHooks = hooks + + when: + asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + 1 * autoScaling.createAutoScalingGroup(_) + 1 * asgLifecycleHookWorker.attach(_, hooks, "myasg-v000") + 1 * autoScaling.updateAutoScalingGroup(*_) + } + + def getHook() { + new AmazonAsgLifecycleHook( + name: "hook-name-" + new Random().nextInt(), + roleARN: "role-rn", + notificationTargetARN: "target-arn", + notificationMetadata: null, + lifecycleTransition: EC2InstanceLaunching, + heartbeatTimeout: 300, + defaultResult: CONTINUE + ) + } + + void "should suspend auto scaling processes if specified"() { + setup: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.suspendedProcesses = ["Launch"] + + when: + asgWithMipBuilder.build(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + 1 * autoScaling.createAutoScalingGroup(_) + 1 * autoScaling.suspendProcesses(_) + 1 * autoScaling.updateAutoScalingGroup(*_) + } + + @Unroll + void "should enable capacity rebalance, if specified"() { + given: + def asgWithMipBuilder = new AsgWithMixedInstancesPolicyBuilder(ec2LtService, securityGroupService, deployDefaults, autoScaling, amazonEC2, asgLifecycleHookWorker) + asgConfig.capacityRebalance = capacityRebalance + + when: + def request = asgWithMipBuilder.buildRequest(task, taskPhase, asgName, asgConfig) + + then: + 1 * ec2LtService.createLaunchTemplate(asgConfig, asgName, _) >> ec2Lt + request.capacityRebalance == capacityRebalance + + where: + capacityRebalance << [true, false, null] + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/BasicAmazonDeployAtomicOperationConverterUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/BasicAmazonDeployAtomicOperationConverterUnitSpec.groovy index 37ef680c0d3..f4b1be11ad6 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/BasicAmazonDeployAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/BasicAmazonDeployAtomicOperationConverterUnitSpec.groovy @@ -16,9 +16,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.converters +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.LaunchTemplatePlacementRequest import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.aws.deploy.converters.BasicAmazonDeployAtomicOperationConverter +import com.netflix.spinnaker.clouddriver.aws.model.SubnetAnalyzer import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService import com.netflix.spinnaker.clouddriver.deploy.DeployAtomicOperation import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription @@ -30,14 +34,49 @@ class BasicAmazonDeployAtomicOperationConverterUnitSpec extends Specification { @Shared ObjectMapper mapper = new ObjectMapper() + @Shared + RegionScopedProviderFactory regionScopedProviderFactory + @Shared BasicAmazonDeployAtomicOperationConverter converter + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider = + Mock(RegionScopedProviderFactory.RegionScopedProvider) + + SecurityGroupService securityGroupService = new SecurityGroupService(Mock(AmazonEC2), Mock(SubnetAnalyzer)) + def setupSpec() { def accountCredentialsProvider = Stub(AccountCredentialsProvider) { getCredentials('test') >> Stub(NetflixAmazonCredentials) } - this.converter = new BasicAmazonDeployAtomicOperationConverter(objectMapper: mapper, accountCredentialsProvider: accountCredentialsProvider) + this.regionScopedProviderFactory = Stub(RegionScopedProviderFactory) + this.converter = new BasicAmazonDeployAtomicOperationConverter(objectMapper: mapper, + accountCredentialsProvider: accountCredentialsProvider, + regionScopedProviderFactory: regionScopedProviderFactory) + } + + void "converts securityGroups to securityGroupNames"() { + setup: + def securityGroups = ["sg-12345678", "sg-87654321"] + def input = [application : "asgard", amiName: "ami-000", stack: "asgard-test", instanceType: "m3.medium", + availabilityZones: ["us-west-1": ["us-west-1a"]], capacity: [min: 1, max: 2, desired: 5], + credentials : "test", securityGroups: securityGroups] + + regionScopedProviderFactory.forRegion(_ as NetflixAmazonCredentials, _ as String) >> regionScopedProvider + regionScopedProvider.getSecurityGroupService() >> securityGroupService + securityGroupService.getSecurityGroupNamesFromIds(_ as Collection) >> [(input.application): input.securityGroups[0]] + + when: + def description = converter.convertDescription(input) + + then: + description instanceof BasicAmazonDeployDescription + + when: + def operation = converter.convertOperation(input) + + then: + operation instanceof DeployAtomicOperation } void "basicAmazonDeployDescription type returns BasicAmazonDeployDescription and DeployAtomicOperation"() { @@ -90,4 +129,28 @@ class BasicAmazonDeployAtomicOperationConverterUnitSpec extends Specification { max = "10" desired = "8" } + + void "should serialize launch template fields correctly"() { + setup: + def input = [application: "kato", credentials: 'test', + setLaunchTemplate: true, requireIMDSv2: true, associateIPv6Address: true, unlimitedCpuCredits: true, + placement: [groupName: "test-placement"], licenseSpecifications: [[arn: "test-arn"]], + onDemandAllocationStrategy: "prioritized", onDemandBaseCapacity: 2, onDemandPercentageAboveBaseCapacity: 50, spotAllocationStrategy: "lowest-price", + spotInstancePools: 3, spotPrice: "0.5", launchTemplateOverridesForInstanceType: [[instanceType: "some.type.large", weightedCapacity: 2]]] + + when: + def description = converter.convertDescription(input) + + then: + description.application == "kato" + description.placement == new BasicAmazonDeployDescription.LaunchTemplatePlacement(groupName: "test-placement") + description.licenseSpecifications == [new BasicAmazonDeployDescription.LaunchTemplateLicenseSpecification(arn: "test-arn")] + description.onDemandAllocationStrategy == "prioritized" + description.onDemandBaseCapacity == 2 + description.onDemandPercentageAboveBaseCapacity == 50 + description.spotAllocationStrategy == "lowest-price" + description.spotInstancePools == 3 + description.spotPrice == "0.5" + description.launchTemplateOverridesForInstanceType == [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "some.type.large", weightedCapacity: 2)] + } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationAtomicOperationConverterSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationAtomicOperationConverterSpec.groovy new file mode 100644 index 00000000000..f046434bf8b --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationAtomicOperationConverterSpec.groovy @@ -0,0 +1,63 @@ +/* + * Copyright 2021 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.converters + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteCloudFormationDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeleteCloudFormationAtomicOperation +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import spock.lang.Shared +import spock.lang.Specification + +class DeleteCloudFormationAtomicOperationConverterSpec extends Specification { + + @Shared + ObjectMapper mapper = new ObjectMapper() + + @Shared + DeleteCloudFormationAtomicOperationConverter converter + + def setupSpec() { + this.converter = new DeleteCloudFormationAtomicOperationConverter(objectMapper: mapper) + def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def mockCredentials = Mock(NetflixAmazonCredentials) + accountCredentialsProvider.getCredentials(_) >> mockCredentials + converter.accountCredentialsProvider = accountCredentialsProvider + } + + void "DeleteCloudFormationConverter returns DeleteCloudFormationDescription"() { + setup: + def input = [stackName : "stack", + region : "eu-west-1", + credentials : "credentials"] + + when: + DeleteCloudFormationDescription description = converter.convertDescription(input) + + then: + description instanceof DeleteCloudFormationDescription + description.stackName == "stack" + description.region == "eu-west-1" + + when: + def operation = converter.convertOperation(input) + + then: + operation instanceof DeleteCloudFormationAtomicOperation + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationChangeSetAtomicOperationConverterSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationChangeSetAtomicOperationConverterSpec.groovy new file mode 100644 index 00000000000..c58ba5adc1c --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeleteCloudFormationChangeSetAtomicOperationConverterSpec.groovy @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2019 Adevinta + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.converters + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteCloudFormationChangeSetDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeleteCloudFormationChangeSetAtomicOperation +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import spock.lang.Shared +import spock.lang.Specification + +class DeleteCloudFormationChangeSetAtomicOperationConverterSpec extends Specification { + + @Shared + ObjectMapper mapper = new ObjectMapper() + + @Shared + DeleteCloudFormationChangeSetAtomicOperationConverter converter + + def setupSpec() { + this.converter = new DeleteCloudFormationChangeSetAtomicOperationConverter(objectMapper: mapper) + def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def mockCredentials = Mock(NetflixAmazonCredentials) + accountCredentialsProvider.getCredentials(_) >> mockCredentials + converter.accountCredentialsProvider = accountCredentialsProvider + } + + void "DeleteCloudFormationChangeSetConverter returns DeleteCloudFormationChangeSetDescription"() { + setup: + def input = [stackName : "stack", + changeSetName : "changeset", + region : "eu-west-1", + credentials : "credentials"] + + when: + DeleteCloudFormationChangeSetDescription description = converter.convertDescription(input) + + then: + description instanceof DeleteCloudFormationChangeSetDescription + description.stackName == "stack" + description.changeSetName == "changeset" + description.region == "eu-west-1" + + when: + def operation = converter.convertOperation(input) + + then: + operation instanceof DeleteCloudFormationChangeSetAtomicOperation + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeployCloudFormationAtomicOperationConverterSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeployCloudFormationAtomicOperationConverterSpec.groovy new file mode 100644 index 00000000000..7833118aeb4 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/DeployCloudFormationAtomicOperationConverterSpec.groovy @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.converters + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeployCloudFormationDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.DeployCloudFormationAtomicOperation +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import spock.lang.Shared +import spock.lang.Specification + +class DeployCloudFormationAtomicOperationConverterSpec extends Specification { + + @Shared + ObjectMapper mapper = new ObjectMapper() + + @Shared + DeployCloudFormationAtomicOperationConverter converter + + def setupSpec() { + this.converter = new DeployCloudFormationAtomicOperationConverter(objectMapper: mapper) + def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def mockCredentials = Mock(NetflixAmazonCredentials) + accountCredentialsProvider.getCredentials(_) >> mockCredentials + converter.accountCredentialsProvider = accountCredentialsProvider + } + + void "DeployCloudFormationConverter returns DeployCloudFormationDescription with Map templateBody"() { + setup: + def input = [stackName : "asgard", + templateBody : [ field1: "field1" ], + parameters : [ param1: "param1" ], + tags : [ tag1: "tag1" ], + capabilities : [ "cap1", "cap2" ], + region : "eu-west_1", + credentials : "credentials", + isChangeSet : true, + changeSetName : "changeSetName"] + + when: + def description = converter.convertDescription(input) + + then: + description instanceof DeployCloudFormationDescription + ((DeployCloudFormationDescription) description).templateBody == '{"field1":"field1"}' + + when: + def operation = converter.convertOperation(input) + + then: + operation instanceof DeployCloudFormationAtomicOperation + } + + void "DeployCloudFormationConverter returns DeployCloudFormationDescription with string templateBody"() { + setup: + def input = [stackName : "asgard", + templateBody : 'field1: "field1"', + parameters : [ param1: "param1" ], + tags : [ tag1: "tag1" ], + capabilities : [ "cap1", "cap2" ], + region : "eu-west_1", + credentials : "credentials"] + + when: + def description = converter.convertDescription(input) + + then: + description instanceof DeployCloudFormationDescription + ((DeployCloudFormationDescription) description).templateBody == 'field1: "field1"' + + when: + def operation = converter.convertOperation(input) + + then: + operation instanceof DeployCloudFormationAtomicOperation + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ExecuteCloudFormationChangeSetAtomicOperationConverterSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ExecuteCloudFormationChangeSetAtomicOperationConverterSpec.groovy new file mode 100644 index 00000000000..e31291c6908 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/ExecuteCloudFormationChangeSetAtomicOperationConverterSpec.groovy @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Adevinta. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.converters + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ExecuteCloudFormationChangeSetDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.ExecuteCloudFormationChangeSetAtomicOperation +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import spock.lang.Shared +import spock.lang.Specification + +class ExecuteCloudFormationChangeSetAtomicOperationConverterSpec extends Specification { + + @Shared + ObjectMapper mapper = new ObjectMapper() + + @Shared + ExecuteCloudFormationChangeSetAtomicOperationConverter converter + + def setupSpec() { + this.converter = new ExecuteCloudFormationChangeSetAtomicOperationConverter(objectMapper: mapper) + def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def mockCredentials = Mock(NetflixAmazonCredentials) + accountCredentialsProvider.getCredentials(_) >> mockCredentials + converter.accountCredentialsProvider = accountCredentialsProvider + } + + void "ExecuteCloudFormationChangeSetConverter return ExecuteCloudFormationChangeSetDescription"() { + setup: + def input = [stackName : "stack", + changeSetName : "changeset", + region : "eu-west-1", + credentials : "credentials"] + + when: + ExecuteCloudFormationChangeSetDescription description = converter.convertDescription(input) + + then: + description instanceof ExecuteCloudFormationChangeSetDescription + description.stackName == "stack" + description.changeSetName == "changeset" + description.region == "eu-west-1" + + when: + def operation = converter.convertOperation(input) + + then: + operation instanceof ExecuteCloudFormationChangeSetAtomicOperation + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateClusterConfigurationsAtomicOperationConverterSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateClusterConfigurationsAtomicOperationConverterSpec.groovy deleted file mode 100644 index 950fd04bede..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/converters/MigrateClusterConfigurationsAtomicOperationConverterSpec.groovy +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.converters - -import com.fasterxml.jackson.databind.ObjectMapper -import spock.lang.Shared -import spock.lang.Specification - -class MigrateClusterConfigurationsAtomicOperationConverterSpec extends Specification { - - @Shared - MigrateClusterConfigurationsAtomicOperationConverter converter - - def setupSpec() { - converter = new MigrateClusterConfigurationsAtomicOperationConverter(objectMapper: new ObjectMapper()) - } - - void 'converts regionMappings to maps if input as string:string'() { - setup: - def input = [ - regionMapping: [ - 'us-east-1': 'us-west-1', - 'us-west-2': ['eu-west-1': ['eu-west-1a']] - ] - ] - - when: - def description = converter.convertDescription(input) - - then: - description.regionMapping['us-east-1'] == ['us-west-1': []] - description.regionMapping['us-west-2'] == ['eu-west-1': ['eu-west-1a']] - } -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/BasicAmazonDeployHandlerUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/BasicAmazonDeployHandlerUnitSpec.groovy index ae62a7a3341..26604f8baea 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/BasicAmazonDeployHandlerUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/BasicAmazonDeployHandlerUnitSpec.groovy @@ -20,13 +20,23 @@ import com.amazonaws.services.autoscaling.AmazonAutoScaling import com.amazonaws.services.autoscaling.model.AutoScalingGroup import com.amazonaws.services.autoscaling.model.BlockDeviceMapping import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult -import com.amazonaws.services.autoscaling.model.Ebs import com.amazonaws.services.autoscaling.model.LaunchConfiguration +import com.amazonaws.services.autoscaling.model.LaunchTemplate +import com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy import com.amazonaws.services.ec2.AmazonEC2 import com.amazonaws.services.ec2.model.DescribeImagesRequest import com.amazonaws.services.ec2.model.DescribeImagesResult +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult +import com.amazonaws.services.ec2.model.EbsBlockDevice import com.amazonaws.services.ec2.model.Image +import com.amazonaws.services.ec2.model.InstanceTypeInfo +import com.amazonaws.services.ec2.model.LaunchTemplateBlockDeviceMapping +import com.amazonaws.services.ec2.model.LaunchTemplateVersion +import com.amazonaws.services.ec2.model.ProcessorInfo +import com.amazonaws.services.ec2.model.ResponseLaunchTemplateData import com.amazonaws.services.ec2.model.VpcClassicLink import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing as AmazonELBV1 import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersResult @@ -36,12 +46,17 @@ import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchTemplateRollOutConfig +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult import com.netflix.spinnaker.config.AwsConfiguration import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.AsgReferenceCopier -import com.netflix.spinnaker.clouddriver.aws.deploy.AutoScalingWorker -import com.netflix.spinnaker.clouddriver.aws.deploy.BlockDeviceConfig +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgReferenceCopier +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker.AsgConfiguration +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils.BlockDeviceConfig import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerLookupHelper import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.UpsertAmazonLoadBalancerResult @@ -56,7 +71,7 @@ import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactor import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory.RegionScopedProvider import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -88,13 +103,24 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { AmazonELBV1 elbV1 = Mock(AmazonELBV1) AwsConfiguration.AmazonServerGroupProvider amazonServerGroupProvider = Mock(AwsConfiguration.AmazonServerGroupProvider) + String instanceType List blockDevices ScalingPolicyCopier scalingPolicyCopier = Mock(ScalingPolicyCopier) def setup() { - amazonEC2.describeImages(_) >> new DescribeImagesResult().withImages(new Image().withImageId("ami-12345")) + this.instanceType = 'test.large' this.blockDevices = [new AmazonBlockDevice(deviceName: "/dev/sdb", virtualName: "ephemeral0")] + + amazonEC2.describeImages(_) >> new DescribeImagesResult() + .withImages(new Image().withImageId("ami-12345").withVirtualizationType('hvm').withArchitecture('x86_64')) + amazonEC2.describeInstanceTypes(_) >> new DescribeInstanceTypesResult(instanceTypes: [ + new InstanceTypeInfo( + instanceType: this.instanceType, + supportedVirtualizationTypes: ['hvm'], + processorInfo: new ProcessorInfo(supportedArchitectures: ['x86_64'], sustainedClockSpeedInGhz: 2.8), + )]) + def rspf = Stub(RegionScopedProviderFactory) { forRegion(_, _) >> Stub(RegionScopedProviderFactory.RegionScopedProvider) { getAutoScaling() >> Stub(AmazonAutoScaling) @@ -104,10 +130,11 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { } } def defaults = new AwsConfiguration.DeployDefaults(iamRole: 'IamRole') - def credsRepo = new MapBackedAccountCredentialsRepository() - credsRepo.save('baz', TestCredential.named('baz')) + def credsRepo = Stub(CredentialsRepository) { + getOne("baz") >> {TestCredential.named("baz")} + } this.handler = new BasicAmazonDeployHandler( - rspf, credsRepo, amazonServerGroupProvider, defaults, scalingPolicyCopier, blockDeviceConfig + rspf, credsRepo, amazonServerGroupProvider, defaults, scalingPolicyCopier, blockDeviceConfig, Mock(LaunchTemplateRollOutConfig) ) { @Override LoadBalancerLookupHelper loadBalancerLookupHelper() { @@ -123,6 +150,7 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { def cleanupSpec() { AutoScalingWorker.metaClass = null + InstanceTypeUtils.metaClass = null } void "handler supports basic deploy description type"() { @@ -136,8 +164,11 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void "handler invokes a deploy feature for each specified region"() { setup: def deployCallCounts = 0 - AutoScalingWorker.metaClass.deploy = { deployCallCounts++; "foo" } - def description = new BasicAmazonDeployDescription(amiName: "ami-12345") + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgConfig -> + deployCallCounts++ + "foo" + } + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: this.instanceType) description.availabilityZones = ["us-west-1": [], "us-east-1": []] description.credentials = TestCredential.named('baz') @@ -152,10 +183,14 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void "classic load balancer names are derived from prior execution results"() { setup: + def classicLbs = [] def setlbCalls = 0 - AutoScalingWorker.metaClass.deploy = {} - AutoScalingWorker.metaClass.setClassicLoadBalancers = { setlbCalls++ } - def description = new BasicAmazonDeployDescription(amiName: "ami-12345") + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + setlbCalls++ + classicLbs.addAll(asgCfg.classicLoadBalancers as Collection) + "foo" + } + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: this.instanceType) description.availabilityZones = ["us-east-1": []] description.credentials = TestCredential.named('baz') @@ -164,15 +199,18 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { then: setlbCalls + classicLbs == ['lb'] 1 * elbV1.describeLoadBalancers(_) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(new LoadBalancerDescription().withLoadBalancerName("lb")) 1 * amazonEC2.describeVpcClassicLink() >> new DescribeVpcClassicLinkResult() } void "handles classic load balancers"() { - def classicLbs = [] - AutoScalingWorker.metaClass.setClassicLoadBalancers = { Collection lbs -> classicLbs.addAll(lbs) } - def description = new BasicAmazonDeployDescription(amiName: "ami-12345", loadBalancers: ["lb"]) + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + classicLbs.addAll(asgCfg.classicLoadBalancers as Collection) + "foo" + } + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: this.instanceType, loadBalancers: ["lb"]) description.availabilityZones = ["us-east-1": []] description.credentials = TestCredential.named('baz') @@ -186,11 +224,34 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { classicLbs == ['lb'] } + void "should store capacity on DeploymentResult"() { + given: + def description = new BasicAmazonDeployDescription( + amiName: "ami-12345", + instanceType: this.instanceType, + capacity: new BasicAmazonDeployDescription.Capacity(min: 1, max: 10, desired: 5), + availabilityZones: ["us-east-1": []], + credentials: TestCredential.named('baz') + ) + + when: + def deploymentResult = handler.handle(description, []) + + then: + 1 * amazonEC2.describeVpcClassicLink() >> new DescribeVpcClassicLinkResult() + + deploymentResult.deployments.size() == 1 + deploymentResult.deployments[0].capacity == new DeploymentResult.Deployment.Capacity(min: 1, max: 10, desired: 5) + } + void "handles application load balancers"() { def targetGroupARNs = [] - AutoScalingWorker.metaClass.setTargetGroupArns = { Collection arns -> targetGroupARNs.addAll(arns) } - def description = new BasicAmazonDeployDescription(amiName: "ami-12345", targetGroups: ["tg"]) + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + targetGroupARNs.addAll(asgCfg.targetGroupArns as Collection) + "foo" + } + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: this.instanceType, targetGroups: ["tg"]) description.availabilityZones = ["us-east-1": []] description.credentials = TestCredential.named('baz') @@ -205,7 +266,7 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { } void "fails if load balancer name is not in classic load balancer"() { - def description = new BasicAmazonDeployDescription(amiName: "ami-12345", loadBalancers: ["lb"]) + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: this.instanceType, loadBalancers: ["lb"]) description.availabilityZones = ["us-east-1": []] description.credentials = TestCredential.named('baz') @@ -221,12 +282,13 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void "should populate classic link VPC Id when classic link is enabled"() { def actualClassicLinkVpcId - AutoScalingWorker.metaClass.deploy = { - actualClassicLinkVpcId = classicLinkVpcId + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + actualClassicLinkVpcId = asgCfg.classicLinkVpcId "foo" } def description = new BasicAmazonDeployDescription( amiName: "ami-12345", + instanceType: this.instanceType, availabilityZones: ["us-west-1": []], credentials: TestCredential.named('baz') ) @@ -245,12 +307,13 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void "should not populate classic link VPC Id when there is a subnetType"() { def actualClassicLinkVpcId - AutoScalingWorker.metaClass.deploy = { - actualClassicLinkVpcId = classicLinkVpcId + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + actualClassicLinkVpcId = asgCfg.classicLinkVpcId "foo" } def description = new BasicAmazonDeployDescription( amiName: "ami-12345", + instanceType: this.instanceType, availabilityZones: ["us-west-1": []], credentials: TestCredential.named('baz'), subnetType: "internal" @@ -263,15 +326,54 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { actualClassicLinkVpcId == null } - void "should send instance class block devices to AutoScalingWorker when matched and none are specified"() { + void "should not modify unlimited cpu credits if applicable, and specified"() { + setup: + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: "t2.large", subnetType: "internal") + description.availabilityZones = ["us-west-1": [], "us-east-1": []] + description.credentials = TestCredential.named('baz') + description.unlimitedCpuCredits = unlimitedCreditsInput + + and: + def unlimitedCpuCreditsPassed = null + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + unlimitedCpuCreditsPassed = asgCfg.unlimitedCpuCredits + "foo" + } + + when: + handler.handle(description, []) + + then: + unlimitedCpuCreditsPassed == unlimitedCreditsInput + + where: + unlimitedCreditsInput << [true, false] + } + + void "should set unlimited cpu credits to the default false only if applicable to all instance types"() { + + expect: + handler.getDefaultUnlimitedCpuCredits(instanceTypes as Set) == expectedDefault + + where: + instanceTypes || expectedDefault + ["t2.small"] || false + ["c3.large"] || null + ["t2.large", "t3.large"] || false + ["t2.small", "c3.large"] || null + ["m4.large", "c3.large"] || null + } + + void "should send instance class block devices to AutoScalingWorker when matched and none are specified and absence of source ASG"() { setup: def deployCallCounts = 0 - AutoScalingWorker.metaClass.deploy = { deployCallCounts++; "foo" } def setBlockDevices = [] - AutoScalingWorker.metaClass.setBlockDevices = { List blockDevices -> - setBlockDevices = blockDevices + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + deployCallCounts++ + setBlockDevices = asgCfg.blockDevices + "foo" } - def description = new BasicAmazonDeployDescription(amiName: "ami-12345") + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: this.instanceType) description.instanceType = "m3.medium" description.availabilityZones = ["us-west-1": [], "us-east-1": []] description.credentials = TestCredential.named('baz') @@ -291,13 +393,13 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void "should favour explicit description block devices over default config"() { setup: def deployCallCounts = 0 - AutoScalingWorker.metaClass.deploy = { deployCallCounts++; "foo" } List setBlockDevices = [] - AutoScalingWorker.metaClass.setBlockDevices = { List blockDevices -> - setBlockDevices = blockDevices + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + setBlockDevices = asgCfg.blockDevices + deployCallCounts++ + "foo" } - def description = new BasicAmazonDeployDescription(amiName: "ami-12345") - description.instanceType = "m3.medium" + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: this.instanceType) description.blockDevices = [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 125)] description.availabilityZones = ["us-west-1": [], "us-east-1": []] description.credentials = TestCredential.named('baz') @@ -312,20 +414,20 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { setBlockDevices == description.blockDevices 2 * amazonEC2.describeVpcClassicLink() >> new DescribeVpcClassicLinkResult() 2 * amazonEC2.describeImages(_) >> new DescribeImagesResult().withImages(new Image().withImageId('ami-12345') - .withVirtualizationType('hvm')) + .withVirtualizationType('hvm').withArchitecture('x86_64')) } @Unroll void "should favour ami block device mappings over explicit description block devices and default config, if useAmiBlockDeviceMappings is set"() { setup: def deployCallCounts = 0 - AutoScalingWorker.metaClass.deploy = { deployCallCounts++; "foo" } List setBlockDevices = [] - AutoScalingWorker.metaClass.setBlockDevices = { List blockDevices -> - setBlockDevices = blockDevices + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + deployCallCounts++ + setBlockDevices = asgCfg.blockDevices + "foo" } - def description = new BasicAmazonDeployDescription(amiName: "ami-12345") - description.instanceType = "m3.medium" + def description = new BasicAmazonDeployDescription(amiName: "ami-12345", instanceType: this.instanceType) description.blockDevices = [new AmazonBlockDevice(deviceName: "/dev/sdb", size: 125)] description.useAmiBlockDeviceMappings = useAmiBlockDeviceMappings description.availabilityZones = ["us-west-1": [], "us-east-1": []] @@ -342,10 +444,11 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { new DescribeImagesResult() .withImages(new Image() .withImageId('ami-12345') - .withBlockDeviceMappings([new BlockDeviceMapping() - .withDeviceName("/dev/sdh") - .withEbs(new Ebs().withVolumeSize(500))]) - .withVirtualizationType('hvm')) + .withBlockDeviceMappings([new com.amazonaws.services.ec2.model.BlockDeviceMapping() + .withDeviceName("/dev/sdh") + .withEbs(new EbsBlockDevice().withVolumeSize(500))]) + .withVirtualizationType('hvm') + .withArchitecture('x86_64')) setBlockDevices == expectedBlockDevices where: @@ -358,11 +461,13 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void "should resolve amiId from amiName"() { setup: def deployCallCounts = 0 - AutoScalingWorker.metaClass.deploy = { deployCallCounts++; "foo" } + AutoScalingWorker.metaClass.deploy = { AsgConfiguration asgCfg -> + deployCallCounts++ + "foo" + } - def description = new BasicAmazonDeployDescription(amiName: "the-greatest-ami-in-the-world", availabilityZones: ['us-west-1': []]) + def description = new BasicAmazonDeployDescription(amiName: "the-greatest-ami-in-the-world", instanceType: this.instanceType, availabilityZones: ['us-west-1': []]) description.credentials = TestCredential.named('baz') - description.instanceType = "m3.medium" when: def results = handler.handle(description, []) @@ -373,29 +478,29 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { assert req.filters.first().name == 'name' assert req.filters.first().values == ['the-greatest-ami-in-the-world'] - return new DescribeImagesResult().withImages(new Image().withImageId('ami-12345').withVirtualizationType('hvm')) + return new DescribeImagesResult().withImages(new Image().withImageId('ami-12345').withVirtualizationType('hvm').withArchitecture('x86_64')) } 1 * amazonEC2.describeVpcClassicLink() >> new DescribeVpcClassicLinkResult() deployCallCounts == 1 } @Unroll - void "should copy block devices from source provider if not specified explicitly"() { + void "should copy block devices from source provider using a launch configuration if not specified explicitly and instance types match"() { given: def asgService = Mock(AsgService) { - (launchConfig ? 1 : 0) * getLaunchConfiguration(_) >> { + expectedCallsToAws * getLaunchConfiguration(_) >> { return new LaunchConfiguration() .withBlockDeviceMappings(new BlockDeviceMapping().withDeviceName("OLD_DEVICE") ) } } def sourceRegionScopedProvider = Mock(RegionScopedProvider) { - (launchConfig ? 1 : 0) * getAsgService() >> { return asgService } + expectedCallsToAws * getAsgService() >> { return asgService } 1 * getAutoScaling() >> { return Mock(AmazonAutoScaling) { 1 * describeAutoScalingGroups(_) >> { return new DescribeAutoScalingGroupsResult().withAutoScalingGroups( - new AutoScalingGroup().withLaunchConfigurationName(launchConfig)) + new AutoScalingGroup().withLaunchConfigurationName("launchConfig")) } } } @@ -410,10 +515,64 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { targetDescription.blockDevices*.deviceName == expectedBlockDevices where: - description | launchConfig || expectedBlockDevices - new BasicAmazonDeployDescription() | "launchConfig" || ["OLD_DEVICE"] - new BasicAmazonDeployDescription(blockDevices: []) | "launchConfig" || [] - new BasicAmazonDeployDescription(blockDevices: [new AmazonBlockDevice(deviceName: "DEVICE")]) | "launchConfig" || ["DEVICE"] + description | expectedCallsToAws || expectedBlockDevices + new BasicAmazonDeployDescription(instanceType: this.instanceType) | 2 || ["OLD_DEVICE"] + new BasicAmazonDeployDescription(blockDevices: [], instanceType: this.instanceType) | 0 || [] + new BasicAmazonDeployDescription(blockDevices: [new AmazonBlockDevice(deviceName: "DEVICE")], + instanceType: this.instanceType) | 0 || ["DEVICE"] + } + + @Unroll + void "should copy block devices from source provider using a launch template if not specified explicitly and instance types match"() { + given: + def launchTemplateVersion = new LaunchTemplateVersion( + launchTemplateName: "lt", + launchTemplateId: "id", + versionNumber: 0, + launchTemplateData: new ResponseLaunchTemplateData( + blockDeviceMappings: [new LaunchTemplateBlockDeviceMapping(deviceName: "OLD_DEVICE")] + ) + ) + + def launchTemplate = new LaunchTemplateSpecification( + launchTemplateName: launchTemplateVersion.launchTemplateName, + launchTemplateId: launchTemplateVersion.launchTemplateId, + version: launchTemplateVersion.versionNumber.toString(), + ) + + and: + def launchTemplateService = Mock(LaunchTemplateService) { + getLaunchTemplateVersion({it.launchTemplateId == launchTemplate.launchTemplateId} as LaunchTemplateSpecification) >> Optional.of(launchTemplateVersion) + } + + def autoScaling = Mock(AmazonAutoScaling) { + describeAutoScalingGroups(_) >> { + return new DescribeAutoScalingGroupsResult().withAutoScalingGroups( + new AutoScalingGroup().withLaunchTemplate( + launchTemplate + )) + } + } + + def sourceRegionScopedProvider = Mock(RegionScopedProvider) { + getLaunchTemplateService() >> launchTemplateService + getAutoScaling() >> autoScaling + } + + when: + def targetDescription = handler.copySourceAttributes( + sourceRegionScopedProvider, "sourceAsg", null, description + ) + + then: + targetDescription.blockDevices*.deviceName == expectedBlockDevices + + where: + description || expectedBlockDevices + new BasicAmazonDeployDescription(instanceType: this.instanceType) || ["OLD_DEVICE"] + new BasicAmazonDeployDescription(blockDevices: [], instanceType: this.instanceType) || [] + new BasicAmazonDeployDescription(blockDevices: [new AmazonBlockDevice(deviceName: "DEVICE")], + instanceType: this.instanceType) || ["DEVICE"] } @Unroll @@ -421,6 +580,7 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { given: def regionScopedProvider = new RegionScopedProviderFactory().forRegion(testCredentials, "us-west-2") def description = new BasicAmazonDeployDescription( + instanceType: this.instanceType, subnetIds: subnetIds, copySourceCustomBlockDeviceMappings: false, tags: [:] @@ -453,18 +613,18 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void "copy source block devices #copySourceBlockDevices feature flags"() { given: if (copySourceBlockDevices != null) { - description.copySourceCustomBlockDeviceMappings = copySourceBlockDevices + description.copySourceCustomBlockDeviceMappings = copySourceBlockDevices // default copySourceCustomBlockDeviceMappings is true } - int expectedCalls = description.copySourceCustomBlockDeviceMappings ? 1 : 0 + int expectedCallsToAws = description.copySourceCustomBlockDeviceMappings ? 2 : 0 def asgService = Mock(AsgService) { - (expectedCalls) * getLaunchConfiguration(_) >> { + (expectedCallsToAws) * getLaunchConfiguration(_) >> { return new LaunchConfiguration() .withBlockDeviceMappings(new BlockDeviceMapping().withDeviceName("OLD_DEVICE") ) } } def sourceRegionScopedProvider = Mock(RegionScopedProvider) { - (expectedCalls) * getAsgService() >> { return asgService } + (expectedCallsToAws) * getAsgService() >> { return asgService } 1 * getAutoScaling() >> { return Mock(AmazonAutoScaling) { 1 * describeAutoScalingGroups(_) >> { @@ -484,16 +644,15 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { targetDescription.blockDevices?.deviceName == expectedBlockDevices where: - description | copySourceBlockDevices || expectedBlockDevices - new BasicAmazonDeployDescription() | null || ["OLD_DEVICE"] - new BasicAmazonDeployDescription() | true || ["OLD_DEVICE"] - new BasicAmazonDeployDescription() | false || null + description | copySourceBlockDevices || expectedBlockDevices + new BasicAmazonDeployDescription(instanceType: this.instanceType) | null || ["OLD_DEVICE"] + new BasicAmazonDeployDescription(instanceType: this.instanceType) | true || ["OLD_DEVICE"] + new BasicAmazonDeployDescription(instanceType: this.instanceType) | false || null } - void 'should fail if useSourceCapacity requested, and source not available'() { given: - def description = new BasicAmazonDeployDescription(capacity: descriptionCapacity) + def description = new BasicAmazonDeployDescription(capacity: descriptionCapacity, instanceType: this.instanceType) def sourceRegionScopedProvider = null when: @@ -511,7 +670,7 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void 'should fail if ASG not found and useSourceCapacity requested'() { given: - def description = new BasicAmazonDeployDescription(capacity: descriptionCapacity) + def description = new BasicAmazonDeployDescription(capacity: descriptionCapacity, instanceType: this.instanceType) def sourceRegionScopedProvider = Stub(RegionScopedProvider) { getAutoScaling() >> Stub(AmazonAutoScaling) { describeAutoScalingGroups(_) >> new DescribeAutoScalingGroupsResult() @@ -522,7 +681,6 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { handler.copySourceAttributes( sourceRegionScopedProvider, "sourceAsg", useSource, description ) - then: thrown(IllegalStateException) @@ -533,7 +691,7 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { void 'should copy capacity from source if specified'() { given: - def description = new BasicAmazonDeployDescription(capacity: descriptionCapacity) + def description = new BasicAmazonDeployDescription(capacity: descriptionCapacity, instanceType: this.instanceType) def asgService = Stub(AsgService) { getLaunchConfiguration(_) >> new LaunchConfiguration() } @@ -599,7 +757,7 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { given: def credentials = TestCredential.named('test', [lifecycleHooks: accountLifecycleHooks]) - def description = new BasicAmazonDeployDescription(lifecycleHooks: lifecycleHooks, includeAccountLifecycleHooks: includeAccount) + def description = new BasicAmazonDeployDescription(instanceType: this.instanceType, lifecycleHooks: lifecycleHooks, includeAccountLifecycleHooks: includeAccount) when: def result = BasicAmazonDeployHandler.getLifecycleHooks(credentials, description) @@ -623,7 +781,7 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { ]) def description = new BasicAmazonDeployDescription( - includeAccountLifecycleHooks: true + includeAccountLifecycleHooks: true, instanceType: this.instanceType ) when: @@ -633,32 +791,6 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { thrown(IllegalArgumentException) } - @Unroll - void "should convert block device mappings to AmazonBlockDevices"() { - expect: - handler.convertBlockDevices([sourceDevice]) == [targetDevice] - - where: - sourceDevice || targetDevice - new BlockDeviceMapping().withDeviceName("Device1").withVirtualName("virtualName") || new AmazonBlockDevice("Device1", "virtualName", null, null, null, null, null, null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withIops(500)) || new AmazonBlockDevice("Device1", null, null, null, null, 500, null, null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withDeleteOnTermination(true)) || new AmazonBlockDevice("Device1", null, null, null, true, null, null, null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withVolumeSize(1024)) || new AmazonBlockDevice("Device1", null, 1024, null, null, null, null, null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withVolumeType("volumeType")) || new AmazonBlockDevice("Device1", null, null, "volumeType", null, null, null, null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withSnapshotId("snapshotId")) || new AmazonBlockDevice("Device1", null, null, null, null, null, "snapshotId", null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs()) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null) - - // if snapshot is not provided, we should set encryption correctly - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withEncrypted(null)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withEncrypted(true)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, true) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withEncrypted(false)) || new AmazonBlockDevice("Device1", null, null, null, null, null, null, false) - - // if snapshot is provided, then we should use the snapshot's encryption value - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withSnapshotId("snap-123").withEncrypted(null)) || new AmazonBlockDevice("Device1", null, null, null, null, null, "snap-123", null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withSnapshotId("snap-123").withEncrypted(true)) || new AmazonBlockDevice("Device1", null, null, null, null, null, "snap-123", null) - new BlockDeviceMapping().withDeviceName("Device1").withEbs(new Ebs().withSnapshotId("snap-123").withEncrypted(false)) || new AmazonBlockDevice("Device1", null, null, null, null, null, "snap-123", null) - } - @Unroll void "should throw exception when instance type does not match image virtualization type"() { setup: @@ -673,16 +805,22 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { 1 * amazonEC2.describeImages(_) >> new DescribeImagesResult().withImages(new Image().withImageId('ami-12345') .withVirtualizationType(virtualizationType)) 1 * amazonEC2.describeVpcClassicLink() >> new DescribeVpcClassicLinkResult() + 1 * amazonEC2.describeInstanceTypes(_) >> new DescribeInstanceTypesResult(instanceTypes: [ + new InstanceTypeInfo(instanceType: "r3.xlarge", supportedVirtualizationTypes: ["hvm"]), + new InstanceTypeInfo(instanceType: "t3.micro", supportedVirtualizationTypes: ["hvm"]) + ]) + + and: thrown IllegalArgumentException where: instanceType | virtualizationType - 'c1.large' | 'hvm' 'r3.xlarge' | 'paravirtual' + 't3.micro' | 'paravirtual' } @Unroll - void "should not throw exception when instance type matches image virtualization type or is unknown"() { + void "should not throw exception when instance type matches image virtualization type"() { setup: def description = new BasicAmazonDeployDescription(amiName: "a-cool-ami", availabilityZones: ['us-west-1': []]) description.credentials = TestCredential.named('baz') @@ -693,8 +831,18 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { then: 1 * amazonEC2.describeImages(_) >> new DescribeImagesResult().withImages(new Image().withImageId('ami-12345') - .withVirtualizationType(virtualizationType)) + .withVirtualizationType(virtualizationType) + .withArchitecture("x86_64")) 1 * amazonEC2.describeVpcClassicLink() >> new DescribeVpcClassicLinkResult() + 1 * amazonEC2.describeInstanceTypes(_) >> new DescribeInstanceTypesResult(instanceTypes: [ + new InstanceTypeInfo( + instanceType: this.instanceType, + supportedVirtualizationTypes: [virtualizationType], + processorInfo: new ProcessorInfo(supportedArchitectures: ["x86_64"], sustainedClockSpeedInGhz: 2.8), + )]) + + and: + noExceptionThrown() where: instanceType | virtualizationType @@ -703,7 +851,6 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { 'c3.large' | 'hvm' 'c3.xlarge' | 'paravirtual' 'mystery.big' | 'hvm' - 'mystery.big' | 'paravirtual' 'what.the' | 'heck' } @@ -715,16 +862,32 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { blockDevices: descriptionBlockDevices ) def launchConfiguration = new LaunchConfiguration() + .withLaunchConfigurationName('lc') .withInstanceType(sourceInstanceType) .withBlockDeviceMappings(sourceBlockDevices?.collect { new BlockDeviceMapping().withVirtualName(it.virtualName).withDeviceName(it.deviceName) }) + def sourceAsg = new AutoScalingGroup() + .withLaunchConfigurationName(launchConfiguration.getLaunchConfigurationName()) + + def asgService = Mock(AsgService) { + getLaunchConfiguration(_) >> launchConfiguration + } + def sourceRegionScopedProvider = Stub(RegionScopedProvider) { + getAsgService() >> asgService + getAutoScaling() >> Stub(AmazonAutoScaling) { + describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult().withAutoScalingGroups( + sourceAsg) + } + } + } when: - def blockDeviceMappings = handler.buildBlockDeviceMappings(description, launchConfiguration) + def blockDeviceMappings = handler.buildBlockDeviceMappingsFromSourceAsg(sourceRegionScopedProvider, sourceAsg, description) then: - convertBlockDeviceMappings(blockDeviceMappings) == convertBlockDeviceMappings(expectedTargetBlockDevices) + blockDeviceMappings == expectedTargetBlockDevices where: sourceInstanceType | targetInstanceType | sourceBlockDevices | descriptionBlockDevices || expectedTargetBlockDevices @@ -735,10 +898,75 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { "c3.xlarge" | "r4.100xlarge" | bD("c3.xlarge") | null || [deployDefaults.unknownInstanceTypeBlockDevice] // no mapping for r4.100xlarge, use the default for unknown instance types } + @Unroll + void "should regenerate block device mappings conditionally, for source ASG with mixed instances policy"() { + setup: + def launchTemplateVersion = new LaunchTemplateVersion( + launchTemplateName: "lt", + launchTemplateId: "id", + versionNumber: 0, + launchTemplateData: new ResponseLaunchTemplateData( + instanceType: sourceInstanceType, + blockDeviceMappings: sourceBlockDevices?.collect { + new LaunchTemplateBlockDeviceMapping().withVirtualName(it.virtualName).withDeviceName(it.deviceName) + })) + def mixedInstancesPolicy = new MixedInstancesPolicy() + .withLaunchTemplate(new LaunchTemplate() + .withLaunchTemplateSpecification(new LaunchTemplateSpecification() + .withLaunchTemplateId(launchTemplateVersion.launchTemplateId) + .withLaunchTemplateName(launchTemplateVersion.launchTemplateName) + .withVersion(launchTemplateVersion.versionNumber.toString())) + .withOverrides( + new LaunchTemplateOverrides().withInstanceType("c3.large").withWeightedCapacity("2"), + new LaunchTemplateOverrides().withInstanceType("c3.xlarge").withWeightedCapacity("4"))) + + def sourceAsg = new AutoScalingGroup().withMixedInstancesPolicy(mixedInstancesPolicy) + + and: + def launchTemplateService = Mock(LaunchTemplateService) { + getLaunchTemplateVersion({it.launchTemplateId == launchTemplateVersion.launchTemplateId} as LaunchTemplateSpecification) >> Optional.of(launchTemplateVersion) + } + + def sourceRegionScopedProvider = Mock(RegionScopedProvider) { + getLaunchTemplateService() >> launchTemplateService + getAutoScaling() >> Stub(AmazonAutoScaling) { + describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult().withAutoScalingGroups(sourceAsg) + } + } + } + + and: + def description = new BasicAmazonDeployDescription( + instanceType: descInstanceType, + launchTemplateOverridesForInstanceType: [ + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c4.large", weightedCapacity: "2"), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "c4.xlarge", weightedCapacity: "4") + ], + blockDevices: descBlockDevices + ) + + when: + def blockDeviceMappings = handler.buildBlockDeviceMappingsFromSourceAsg(sourceRegionScopedProvider, sourceAsg, description) + + then: + blockDeviceMappings == expectedTargetBlockDevices + + where: + sourceInstanceType| descInstanceType| sourceBlockDevices | descBlockDevices || expectedTargetBlockDevices + "c3.xlarge" | "c4.xlarge" | bD("c3.xlarge") | bD("c3.xlarge") || bD("c3.xlarge") // use the explicitly provided block devices even if instance type has changed + "c3.xlarge" | "c4.xlarge" | bD("c3.xlarge") | [] || [] // use the explicitly provided block devices even if an empty list + "c3.xlarge" | "c4.xlarge" | bD("c3.xlarge") | null || bD("c4.xlarge") // source ASG used default block devices, so use default block devices for top-level instance type in description i.e. descInstanceType + "c3.xlarge" | "c4.xlarge" |[new AmazonBlockDevice( + deviceName: "/dev/xxx")] | null || [new AmazonBlockDevice(deviceName: "/dev/xxx")] // custom block devices should be preserved + "c3.xlarge" | "c4.100xlarge" | bD("c3.xlarge") | null || [deployDefaults.unknownInstanceTypeBlockDevice] // source ASG used default bD, so use default bD but no mapping for c4.200xlarge, use the default for unknown instance types + "c3.xlarge" | "c3.xlarge" | bD("c3.xlarge") | null || bD("c3.xlarge") // top-level instance types match, use source ASG's block devices + } + @Unroll void "should substitute {{application}} in iamRole"() { given: - def description = new BasicAmazonDeployDescription(application: application, iamRole: iamRole) + def description = new BasicAmazonDeployDescription(application: application, iamRole: iamRole, instanceType: this.instanceType) def deployDefaults = new AwsConfiguration.DeployDefaults(iamRole: defaultIamRole) expect: @@ -753,24 +981,13 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { null | null | "{{application}}IamRole" || "{{application}}IamRole" } - @Unroll - void "should assign default EBS optimized flag if unset"() { - expect: - BasicAmazonDeployHandler.getDefaultEbsOptimizedFlag(instanceType) == expectedFlag - - where: - instanceType || expectedFlag - 'invalid' || false - 'm3.medium' || false - 'm4.large' || true - } - @Unroll void "should apply app/stack/detail tags when `addAppStackDetailTags` is enabled"() { given: def deployDefaults = new DeployDefaults(addAppStackDetailTags: addAppStackDetailTags) def description = new BasicAmazonDeployDescription( application: application, + instanceType: this.instanceType, stack: stack, freeFormDetails: details, tags: initialTags @@ -829,10 +1046,4 @@ class BasicAmazonDeployHandlerUnitSpec extends Specification { private Collection bD(String instanceType) { return blockDeviceConfig.getBlockDevicesForInstanceType(instanceType) } - - private Collection convertBlockDeviceMappings(Collection blockDevices) { - return blockDevices.collect { - [deviceName: it.deviceName, virtualName: it.virtualName] - }.sort { it.deviceName } - } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateClusterConfigurationStrategySpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateClusterConfigurationStrategySpec.groovy deleted file mode 100644 index 0fdb5cfcebb..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateClusterConfigurationStrategySpec.groovy +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers - -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.SecurityGroup -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.MigrateLoadBalancerResult -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupReference -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupUpdater -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfiguration -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfigurationTarget -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Subject - -class MigrateClusterConfigurationStrategySpec extends Specification { - - @Subject - MigrateClusterConfigurationStrategy strategy - - @Shared - NetflixAmazonCredentials testCredentials = TestCredential.named('test') - - @Shared - NetflixAmazonCredentials prodCredentials = TestCredential.named('prod') - - AmazonClientProvider amazonClientProvider = Mock(AmazonClientProvider) - - RegionScopedProviderFactory regionScopedProviderFactory = Mock(RegionScopedProviderFactory) - - DeployDefaults deployDefaults = Mock(DeployDefaults) - - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy = Mock(MigrateSecurityGroupStrategy) - - MigrateLoadBalancerStrategy migrateLoadBalancerStrategy = Mock(MigrateLoadBalancerStrategy) - - SecurityGroupLookup sourceLookup = Mock(SecurityGroupLookup) - - SecurityGroupLookup targetLookup = Mock(SecurityGroupLookup) - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - - void setup() { - TaskRepository.threadLocalTask.set(Stub(Task)) - strategy = new DefaultMigrateClusterConfigurationStrategy(amazonClientProvider, - regionScopedProviderFactory, - deployDefaults) - } - - void 'sets availability zones, subnetType, iamRole, keyPair on target'() { - given: - ClusterConfigurationTarget target = new ClusterConfigurationTarget(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1', availabilityZones: ['eu-west-1b']) - Map cluster = [ - loadBalancers : [], - securityGroups: [], - region: 'us-east-1', - availabilityZones: [ 'us-east-1': ['us-east-1c']] - ] - ClusterConfiguration source = new ClusterConfiguration(credentials: testCredentials, cluster: cluster) - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, 'external', 'external', 'newIamRole', 'newKeyPair', [:], false, true) - - then: - results.loadBalancerMigrations.empty - results.securityGroupMigrations.empty - results.cluster.subnetType == 'external' - results.cluster.iamRole == 'newIamRole' - results.cluster.keyPair == 'newKeyPair' - results.cluster.loadBalancers == [] - results.cluster.securityGroups == [] - results.cluster.availabilityZones == [ 'eu-west-1': ['eu-west-1b']] - 1 * deployDefaults.getAddAppGroupToServerGroup() >> false - 0 * _ - } - - void 'generates load balancers from config'() { - given: - ClusterConfigurationTarget target = new ClusterConfigurationTarget(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1', availabilityZones: ['eu-west-1b']) - Map cluster = [ - loadBalancers : ['lb-a', 'lb-b'], - securityGroups: [], - region: 'us-east-1', - availabilityZones: [ 'us-east-1': ['us-east-1c']] - ] - ClusterConfiguration source = new ClusterConfiguration(credentials: testCredentials, cluster: cluster) - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, null, null, null, null, [:], false, true) - - then: - results.loadBalancerMigrations.size() == 2 - results.cluster.loadBalancers == ['lb-a2', 'lb-b2'] - 1 * migrateLoadBalancerStrategy.generateResults(sourceLookup, targetLookup, migrateSecurityGroupStrategy, - { it.name == 'lb-a' && it.region == 'us-east-1'}, { it.credentials == prodCredentials && it.region == 'eu-west-1'}, null, null, false, true) >> new MigrateLoadBalancerResult(targetName: 'lb-a2') - 1 * migrateLoadBalancerStrategy.generateResults(sourceLookup, targetLookup, migrateSecurityGroupStrategy, - { it.name == 'lb-b' && it.region == 'us-east-1'}, { it.credentials == prodCredentials && it.region == 'eu-west-1'}, null, null, false, true) >> new MigrateLoadBalancerResult(targetName: 'lb-b2') - 1 * deployDefaults.getAddAppGroupToServerGroup() >> false - 0 * _ - } - - void 'handles missing loadBalancers key'() { - given: - ClusterConfigurationTarget target = new ClusterConfigurationTarget(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1', availabilityZones: ['eu-west-1b']) - Map cluster = [ - securityGroups: [], - region: 'us-east-1', - availabilityZones: [ 'us-east-1': ['us-east-1c']] - ] - ClusterConfiguration source = new ClusterConfiguration(credentials: testCredentials, cluster: cluster) - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, null, null, null, null, [:], false, true) - - then: - results.loadBalancerMigrations.size() == 0 - results.cluster.loadBalancers == [] - } - - void 'generates security groups from config, omitting skipped ones'() { - given: - ClusterConfigurationTarget target = new ClusterConfigurationTarget(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1', availabilityZones: ['eu-west-1b']) - Map cluster = [ - loadBalancers : [], - securityGroups: ['sg-1', 'sg-2', 'sg-3'], - region: 'us-east-1', - availabilityZones: [ 'us-east-1': ['us-east-1c']] - ] - ClusterConfiguration source = new ClusterConfiguration(credentials: testCredentials, cluster: cluster) - - SecurityGroup group1 = new SecurityGroup(groupId: 'sg-1a', groupName: 'group1', vpcId: 'vpc-1') - SecurityGroup group2 = new SecurityGroup(groupId: 'sg-2a', groupName: 'group2', vpcId: 'vpc-1') - SecurityGroup skippedGroup = new SecurityGroup(groupId: 'sg-3a', groupName: 'group3', vpcId: 'vpc-1') - SecurityGroupUpdater updater1 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> group1 - } - SecurityGroupUpdater updater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> group2 - } - SecurityGroupUpdater skipper = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> skippedGroup - } - MigrateSecurityGroupReference skippedReference = new MigrateSecurityGroupReference() - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, null, null, null, null, [:], false, false) - - then: - results.securityGroupMigrations.size() == 3 - results.cluster.securityGroups == ['sg-1a', 'sg-2a'] - - 3 * sourceLookup.getSecurityGroupById('test', 'sg-1', null) >> Optional.of(updater1) - 3 * sourceLookup.getSecurityGroupById('test', 'sg-2', null) >> Optional.of(updater2) - 3 * sourceLookup.getSecurityGroupById('test', 'sg-3', null) >> Optional.of(skipper) - 1 * migrateSecurityGroupStrategy.generateResults({it.name == 'group1'}, { it.region == 'eu-west-1' }, sourceLookup, targetLookup, false, false) >> new MigrateSecurityGroupResult(target: new MigrateSecurityGroupReference(targetId: 'sg-1a')) - 1 * migrateSecurityGroupStrategy.generateResults({it.name == 'group2'}, { it.region == 'eu-west-1' }, sourceLookup, targetLookup, false, false) >> new MigrateSecurityGroupResult(target: new MigrateSecurityGroupReference(targetId: 'sg-2a')) - 1 * migrateSecurityGroupStrategy.generateResults({it.name == 'group3'}, { it.region == 'eu-west-1' }, sourceLookup, targetLookup, false, false) >> new MigrateSecurityGroupResult(target: skippedReference, skipped: [skippedReference]) - 1 * deployDefaults.getAddAppGroupToServerGroup() >> false - 0 * _ - } - - void 'adds app security group if configured in deployDefaults'() { - given: - ClusterConfigurationTarget target = new ClusterConfigurationTarget(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - Map cluster = [ - application: 'theapp', - loadBalancers : [], - securityGroups: [], - region: 'us-east-1', - availabilityZones: [ 'us-east-1': ['us-east-1c']] - ] - ClusterConfiguration source = new ClusterConfiguration(credentials: testCredentials, cluster: cluster) - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, null, null, null, null, [:], false, false) - - then: - results.cluster.securityGroups == ['sg-1a'] - 1 * deployDefaults.getAddAppGroupToServerGroup() >> true - 1 * migrateSecurityGroupStrategy.generateResults( - {s -> s.name == 'theapp' && s.region == 'us-east-1' && s.credentials == testCredentials}, - {s -> s.region == 'eu-west-1' && s.credentials == prodCredentials}, - sourceLookup, targetLookup, true, false) >> new MigrateSecurityGroupResult(target: new MigrateSecurityGroupReference(targetId: 'sg-1a')) - 0 * _ - } - - void 'replaces app security group if it is already there'() { - given: - ClusterConfigurationTarget target = new ClusterConfigurationTarget(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - Map cluster = [ - application: 'theapp', - loadBalancers : [], - securityGroups: ['sg-1'], - region: 'us-east-1', - availabilityZones: [ 'us-east-1': ['us-east-1c']] - ] - ClusterConfiguration source = new ClusterConfiguration(credentials: testCredentials, cluster: cluster) - - SecurityGroup appGroup = new SecurityGroup(groupId: 'sg-1', groupName: 'theapp', vpcId: 'vpc-1') - SecurityGroupUpdater updater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> appGroup - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, null, null, null, null, [:], false, true) - - then: - results.cluster.securityGroups == ['sg-1a'] - 3 * sourceLookup.getSecurityGroupById('test', 'sg-1', null) >> Optional.of(updater) - 1 * deployDefaults.getAddAppGroupToServerGroup() >> true - 1 * migrateSecurityGroupStrategy.generateResults( - {s -> s.name == 'theapp' && s.region == 'us-east-1' && s.credentials == testCredentials}, - {s -> s.region == 'eu-west-1' && s.credentials == prodCredentials}, - sourceLookup, targetLookup, false, true) >> new MigrateSecurityGroupResult(target: new MigrateSecurityGroupReference(targetId: 'sg-1a', targetName: 'theapp')) - 0 * _ - } -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateSecurityGroupStrategySpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateSecurityGroupStrategySpec.groovy deleted file mode 100644 index 5538742a3ec..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateSecurityGroupStrategySpec.groovy +++ /dev/null @@ -1,593 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers - -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.DescribeVpcsResult -import com.amazonaws.services.ec2.model.IpPermission -import com.amazonaws.services.ec2.model.IpRange -import com.amazonaws.services.ec2.model.SecurityGroup -import com.amazonaws.services.ec2.model.Tag -import com.amazonaws.services.ec2.model.UserIdGroupPair -import com.amazonaws.services.ec2.model.Vpc -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupReference -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupUpdater -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupMigrator.SecurityGroupLocation -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Subject - -class MigrateSecurityGroupStrategySpec extends Specification { - - @Subject - MigrateSecurityGroupStrategy strategy - - @Shared - NetflixAmazonCredentials testCredentials = TestCredential.named('test') - - @Shared - NetflixAmazonCredentials prodCredentials = TestCredential.named('prod') - - SecurityGroupLookup sourceLookup = Mock(SecurityGroupLookup) - - SecurityGroupLookup targetLookup = Mock(SecurityGroupLookup) - - AmazonClientProvider amazonClientProvider = Mock(AmazonClientProvider) - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - strategy = new DefaultMigrateSecurityGroupStrategy(amazonClientProvider, ['infra']) - - sourceLookup.getCredentialsForId(testCredentials.accountId) >> testCredentials - targetLookup.getCredentialsForId(testCredentials.accountId) >> testCredentials - sourceLookup.getCredentialsForName(testCredentials.name) >> testCredentials - targetLookup.getCredentialsForName(testCredentials.name) >> testCredentials - - sourceLookup.getAccountNameForId(testCredentials.accountId) >> 'test' - targetLookup.getAccountNameForId(testCredentials.accountId) >> 'test' - - targetLookup.accountIdExists(testCredentials.accountId) >> true - - sourceLookup.getCredentialsForId(prodCredentials.accountId) >> prodCredentials - targetLookup.getCredentialsForId(prodCredentials.accountId) >> prodCredentials - sourceLookup.getCredentialsForName(prodCredentials.name) >> prodCredentials - targetLookup.getCredentialsForName(prodCredentials.name) >> prodCredentials - - sourceLookup.getAccountNameForId(prodCredentials.accountId) >> 'prod' - targetLookup.getAccountNameForId(prodCredentials.accountId) >> 'prod' - - targetLookup.accountIdExists(prodCredentials.accountId) >> true - - } - - void 'should create target group if createIfSourceMissing is true and source is not found'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'groupA') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, true, true) - - then: - results.target.targetName == 'groupA' - results.created[0] == results.target - 1 * sourceLookup.getSecurityGroupByName('test', 'groupA', null) >> Optional.empty() - 1 * targetLookup.getSecurityGroupByName('prod', 'groupA', null) >> Optional.empty() - 0 * _ - } - - void 'should throw exception if createIfSourceMissing is false and source is not found'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'groupA') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - - when: - strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - thrown IllegalStateException - 1 * sourceLookup.getSecurityGroupByName('test', 'groupA', null) >> Optional.empty() - 0 * _ - } - - void 'should generate target references, ignoring self'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs(new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-2', groupName: 'group2'), new UserIdGroupPair(userId: prodCredentials.accountId, groupId: 'sg-3', groupName: 'group3')), - new IpPermission().withUserIdGroupPairs(new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-1', groupName: 'group1')) - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.created.size() == 3 - results.created.targetName.sort() == ['group1', 'group2', 'group3'] - 1 * sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - 1 * targetLookup.getSecurityGroupByName('prod', 'group1', null) >> Optional.empty() - 1 * targetLookup.getSecurityGroupByName('test', 'group2', null) >> Optional.empty() - 1 * targetLookup.getSecurityGroupByName('prod', 'group3', null) >> Optional.empty() - } - - void 'should generate target cross-account references'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', vpcId: 'vpc-1t') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs( - new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-2', groupName: 'group2'), - new UserIdGroupPair(userId: prodCredentials.accountId, groupId: 'sg-3', groupName: 'group3')), - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - AmazonEC2 testAmazonEC2 = Mock(AmazonEC2) - AmazonEC2 prodAmazonEC2 = Mock(AmazonEC2) - - def createdGroup1 = new SecurityGroup(ownerId: testCredentials.accountId) - def createdGroup2 = new SecurityGroup(ownerId: testCredentials.accountId) - def createdGroup3 = new SecurityGroup(ownerId: prodCredentials.accountId) - def createdUpdater1 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> createdGroup1 - } - def createdUpdater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> createdGroup2 - } - def createdUpdater3 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> createdGroup3 - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, sourceLookup, false, false) - - then: - results.created.size() == 3 - results.created.targetName.sort() == ['group1', 'group2', 'group3'] - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1') >> testAmazonEC2 - amazonClientProvider.getAmazonEC2(prodCredentials, 'us-east-1') >> prodAmazonEC2 - testAmazonEC2.describeVpcs() >> new DescribeVpcsResult().withVpcs(new Vpc().withVpcId('vpc-1t').withTags(new Tag("Name", "vpc1"))) - prodAmazonEC2.describeVpcs() >> new DescribeVpcsResult().withVpcs(new Vpc().withVpcId('vpc-1p').withTags(new Tag("Name", "vpc1"))) - 2 * sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - sourceLookup.getSecurityGroupByName('test', 'group1', 'vpc-1t') >>> [Optional.empty(), Optional.empty(), Optional.of(createdUpdater1)] - sourceLookup.getSecurityGroupByName('test', 'group2', 'vpc-1t') >>> [Optional.empty(), Optional.empty(), Optional.of(createdUpdater2)] - sourceLookup.getSecurityGroupByName('prod', 'group3', 'vpc-1p') >>> [Optional.empty(), Optional.empty(), Optional.of(createdUpdater3)] - sourceLookup.accountIdExists(testCredentials.accountId) >> true - sourceLookup.accountIdExists(prodCredentials.accountId) >> true - 1 * sourceLookup.createSecurityGroup({ - it.credentials == testCredentials && it.vpcId == 'vpc-1t' - }) >> createdUpdater1 - 1 * sourceLookup.createSecurityGroup({ - it.credentials == testCredentials && it.vpcId == 'vpc-1t' - }) >> createdUpdater2 - 1 * sourceLookup.createSecurityGroup({ - it.credentials == prodCredentials && it.vpcId == 'vpc-1p' - }) >> createdUpdater3 - - } - - void 'should warn on references in unknown accounts'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def mysteryAccount = TestCredential.named('test2') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs(new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-2', groupName: 'group2'), new UserIdGroupPair(userId: mysteryAccount.accountId, groupId: 'sg-3', groupName: 'group3')), - new IpPermission().withUserIdGroupPairs(new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-1', groupName: 'group1')) - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.created.size() == 2 - results.created.targetName.sort() == ['group1', 'group2'] - results.warnings.sourceId == ['sg-3'] - results.warnings.accountId == [mysteryAccount.accountId] - results.warnings.explanation == ["Spinnaker does not manage the account $mysteryAccount.accountId".toString()] - sourceLookup.getCredentialsForId(mysteryAccount.accountId) >> null - targetLookup.accountIdExists(mysteryAccount.accountId) >> false - targetLookup.getAccountNameForId(mysteryAccount.accountId) >> mysteryAccount.accountId - 1 * sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - 1 * targetLookup.getSecurityGroupByName('prod', 'group1', null) >> Optional.empty() - 1 * targetLookup.getSecurityGroupByName('test', 'group2', null) >> Optional.empty() - } - - void 'should skip infrastructure app dependencies'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'infra-g1') - def target = new SecurityGroupLocation(credentials: testCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'infra-g1', groupId: 'sg-1', ownerId: testCredentials.accountId) - def targetGroup = new SecurityGroup(groupName: 'infra-g1', groupId: 'sg-3', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs( - new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-2', groupName: 'group1')), - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - def targetUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.reused.size() == 1 - results.reused.targetName == ['infra-g1'] - results.skipped.empty - results.created.empty - 1 * sourceLookup.getSecurityGroupByName('test', 'infra-g1', null) >> Optional.of(sourceUpdater) - 2 * targetLookup.getSecurityGroupByName('test', 'infra-g1', null) >> Optional.of(targetUpdater) - } - - void 'should skip infrastructure groups if not present in target'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'infra-g1') - def target = new SecurityGroupLocation(credentials: testCredentials, region: 'us-west-1', vpcId: 'vpc-2') - def sourceGroup = new SecurityGroup(groupName: 'infra-g1', groupId: 'sg-1', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.created.size() == 0 - results.skipped.targetName == ['infra-g1'] - 1 * sourceLookup.getSecurityGroupByName('test', 'infra-g1', null) >> Optional.of(sourceUpdater) - 1 * targetLookup.getSecurityGroupByName('test', 'infra-g1', 'vpc-2') >> Optional.empty() - } - - void 'should skip missing dependencies if they belong to infrastructure apps'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: testCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs( - new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-2', groupName: 'infra-g1')), - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.created.size() == 1 - results.created.targetName == ['group1'] - results.skipped.size() == 1 - results.skipped.targetName == ['infra-g1'] - results.reused.empty - 1 * sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - 1 * targetLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.empty() - 1 * targetLookup.getSecurityGroupByName('test', 'infra-g1', null) >> Optional.empty() - } - - void 'should skip amazon-elb group without warning when target is in VPC, using groupId as name if not present'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: testCredentials, region: 'us-west-1', vpcId: 'vpc-2') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs( - new UserIdGroupPair(userId: 'amazon-elb', groupId: 'sg-2'), - new UserIdGroupPair(userId: 'amazon-elb', groupId: 'sg-3', groupName: 'do-not-copy')), - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.created.size() == 1 - results.created.targetName == ['group1'] - results.skipped.targetName.sort() == ['do-not-copy', 'sg-2'] - sourceLookup.getCredentialsForId('amazon-elb') >> null - targetLookup.accountIdExists('amazon-elb') >> false - 2 * amazonClientProvider.getAmazonEC2(testCredentials, 'us-west-1') >> amazonEC2 - amazonEC2.describeVpcs() >> new DescribeVpcsResult().withVpcs(new Vpc().withVpcId('vpc-2').withTags(new Tag("Name", "vpc2"))) - 1 * sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - 1 * targetLookup.getSecurityGroupByName('test', 'group1', 'vpc-2') >> Optional.empty() - } - - void 'should include target reference'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.target.targetName == 'group1' - results.created.targetName == ['group1'] - !results.targetExists() - targetLookup.accountIdExists(_) >> true - 1 * sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - 1 * targetLookup.getSecurityGroupByName('prod', 'group1', null) >> Optional.empty() - } - - void 'should flag target as existing if it exists in target location'() { - given: - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [] - def sourceUpdater = Stub(SecurityGroupUpdater) { getSecurityGroup() >> sourceGroup } - def targetUpdater = Stub(SecurityGroupUpdater) { getSecurityGroup() >> sourceGroup } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.target.targetName == 'group1' - results.reused.targetName == ['group1'] - results.created.empty - results.targetExists() - 1 * sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - 1 * targetLookup.getSecurityGroupByName('prod', 'group1', null) >> Optional.of(targetUpdater) - } - - void 'should halt if any errors are found'() { - given: - strategy = new ErrorfulMigrationStrategy(amazonClientProvider) - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs(new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-2', groupName: 'group2')) - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { getSecurityGroup() >> sourceGroup } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, true) - - then: - results.errors.sourceName == ['group2'] - results.reused.empty - results.created.empty - !results.targetExists() - 1 * sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - } - - void 'generates ingress rules based on source'() { - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - def targetGroup1 = new SecurityGroup(groupName: 'group1', groupId: 'sg-5', ownerId: prodCredentials.accountId) - def targetGroup2 = new SecurityGroup(groupName: 'group2', groupId: 'sg-6', ownerId: prodCredentials.accountId) - def targetGroup3 = new SecurityGroup(groupName: 'group3', groupId: 'sg-7', ownerId: prodCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission() - .withUserIdGroupPairs( - new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-2', groupName: 'group2'), - new UserIdGroupPair(userId: prodCredentials.accountId, groupId: 'sg-3', groupName: 'group3')) - .withFromPort(7001).withToPort(7003), - new IpPermission() - .withUserIdGroupPairs(new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-1', groupName: 'group1')) - .withFromPort(7000).withToPort(7002) - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - def targetUpdater1 = Mock(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup1 - } - def targetUpdater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup2 - } - def targetUpdater3 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup3 - } - - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, false) - - then: - results.ingressUpdates.size() == 3 - results.target.targetId == 'sg-5' - sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - targetLookup.getSecurityGroupByName('prod', 'group1', null) >>> [Optional.empty(), Optional.of(targetUpdater1)] - targetLookup.getSecurityGroupByName('test', 'group2', null) >>> [Optional.empty(), Optional.of(targetUpdater2)] - targetLookup.getSecurityGroupByName('prod', 'group3', null) >>> [Optional.empty(), Optional.of(targetUpdater3)] - sourceLookup.getSecurityGroupByName(_, _, _) >> Optional.empty() - 1 * targetLookup.createSecurityGroup({t -> t.name == 'group1'}) >> targetUpdater1 - 1 * targetLookup.createSecurityGroup({t -> t.name == 'group2'}) >> targetUpdater2 - 1 * targetLookup.createSecurityGroup({t -> t.name == 'group3'}) >> targetUpdater3 - 1 * targetUpdater1.addIngress({t -> - t.size() == 3 && - t[0].fromPort == 7001 && t[0].toPort == 7003 && t[0].userIdGroupPairs.groupId == ['sg-6'] && - t[1].fromPort == 7001 && t[1].toPort == 7003 && t[1].userIdGroupPairs.groupId == ['sg-7'] && - t[2].fromPort == 7000 && t[2].toPort == 7002 && t[2].userIdGroupPairs.groupId == ['sg-5'] - }) - } - - void 'skips existing ingress rules'() { - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - def targetGroup1 = new SecurityGroup(groupName: 'group1', groupId: 'sg-5', ownerId: prodCredentials.accountId) - def targetGroup2 = new SecurityGroup(groupName: 'group2', groupId: 'sg-6', ownerId: prodCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission() - .withUserIdGroupPairs( - new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-2', groupName: 'group2')) - .withFromPort(7001).withToPort(7003), - new IpPermission() - .withUserIdGroupPairs(new UserIdGroupPair(userId: testCredentials.accountId, groupId: 'sg-1', groupName: 'group1')) - .withFromPort(7000).withToPort(7002) - ] - targetGroup1.ipPermissions = [ - new IpPermission() - .withUserIdGroupPairs(new UserIdGroupPair(userId: prodCredentials.accountId, groupId: 'sg-5', groupName: 'group1')) - .withFromPort(7000).withToPort(7002) - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - def targetUpdater1 = Mock(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup1 - } - def targetUpdater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup2 - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, false) - - then: - results.ingressUpdates.size() == 1 - results.target.targetId == 'sg-5' - sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - targetLookup.getSecurityGroupByName('prod', 'group1', null) >> Optional.of(targetUpdater1) - targetLookup.getSecurityGroupByName('test', 'group2', null) >>> [Optional.empty(), Optional.of(targetUpdater2)] - sourceLookup.getSecurityGroupByName(_, _, _) >> Optional.empty() - 0 * targetLookup.createSecurityGroup({t -> t.name == 'group1'}) >> targetUpdater1 - 1 * targetLookup.createSecurityGroup({t -> t.name == 'group2'}) >> targetUpdater2 - 1 * targetUpdater1.addIngress({t -> t.size() == 1 - t[0].fromPort == 7001 && t[0].toPort == 7003 && t[0].userIdGroupPairs.groupId == ['sg-6'] - }) - } - - void 'creates range rules that do not exist in target'() { - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - def targetGroup1 = new SecurityGroup(groupName: 'group1', groupId: 'sg-5', ownerId: prodCredentials.accountId) - def targetGroup2 = new SecurityGroup(groupName: 'group2', groupId: 'sg-6', ownerId: prodCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs([]).withIpRanges("1.2.3.4").withFromPort(7001).withToPort(7003) - ] - targetGroup1.ipPermissions = [ - new IpPermission().withUserIdGroupPairs([]).withIpRanges("1.2.3.5").withFromPort(7001).withToPort(7003), - new IpPermission().withUserIdGroupPairs([]).withIpRanges("1.2.3.4").withFromPort(7004).withToPort(7004) - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - def targetUpdater1 = Mock(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup1 - } - def targetUpdater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup2 - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, false) - - then: - results.ingressUpdates.size() == 1 - results.target.targetId == 'sg-5' - sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - targetLookup.getSecurityGroupByName('prod', 'group1', null) >> Optional.of(targetUpdater1) - targetLookup.getSecurityGroupByName('test', 'group2', null) >>> [Optional.empty(), Optional.of(targetUpdater2)] - 1 * targetUpdater1.addIngress({t -> t.size() == 1 - t[0].fromPort == 7001 && t[0].toPort == 7003 && t[0].ipRanges == ["1.2.3.4"] - }) - } - - void 'skips existing range rules'() { - def source = new SecurityGroupLocation(credentials: testCredentials, region: 'us-east-1', name: 'group1') - def target = new SecurityGroupLocation(credentials: prodCredentials, region: 'us-west-1') - def sourceGroup = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - def targetGroup1 = new SecurityGroup(groupName: 'group1', groupId: 'sg-5', ownerId: prodCredentials.accountId) - def targetGroup2 = new SecurityGroup(groupName: 'group2', groupId: 'sg-6', ownerId: prodCredentials.accountId) - sourceGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs([]).withIpv4Ranges(new IpRange().withCidrIp("1.2.3.4")).withFromPort(7001).withToPort(7003) - ] - targetGroup1.ipPermissions = [ - new IpPermission().withUserIdGroupPairs([]).withIpv4Ranges(new IpRange().withCidrIp("1.2.3.4")).withFromPort(7001).withToPort(7003) - ] - def sourceUpdater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup - } - def targetUpdater1 = Mock(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup1 - } - def targetUpdater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> targetGroup2 - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, false, false) - - then: - results.ingressUpdates.empty - results.target.targetId == 'sg-5' - sourceLookup.getSecurityGroupByName('test', 'group1', null) >> Optional.of(sourceUpdater) - targetLookup.getSecurityGroupByName('prod', 'group1', null) >> Optional.of(targetUpdater1) - targetLookup.getSecurityGroupByName('test', 'group2', null) >>> [Optional.empty(), Optional.of(targetUpdater2)] - } - - private static class ErrorfulMigrationStrategy extends MigrateSecurityGroupStrategy { - - private AmazonClientProvider amazonClientProvider - - @Override - public AmazonClientProvider getAmazonClientProvider() { - return amazonClientProvider - } - - @Override - public List getInfrastructureApplications() { - return new ArrayList<>(); - } - - public ErrorfulMigrationStrategy(AmazonClientProvider amazonClientProvider) { - this.amazonClientProvider = amazonClientProvider - } - - @Override - Set shouldError(Set references) { - return references; - } - } -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateServerGroupStrategySpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateServerGroupStrategySpec.groovy deleted file mode 100644 index 9ad3605eff3..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateServerGroupStrategySpec.groovy +++ /dev/null @@ -1,369 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers - -import com.amazonaws.services.autoscaling.AmazonAutoScaling -import com.amazonaws.services.autoscaling.model.AutoScalingGroup -import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult -import com.amazonaws.services.autoscaling.model.InstanceMonitoring -import com.amazonaws.services.autoscaling.model.LaunchConfiguration -import com.amazonaws.services.autoscaling.model.SuspendedProcess -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.SecurityGroup -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.AWSServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.aws.deploy.converters.AllowLaunchAtomicOperationConverter -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.AllowLaunchAtomicOperation -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.MigrateLoadBalancerResult -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupReference -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupUpdater -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ServerGroupMigrator.ServerGroupLocation -import com.netflix.spinnaker.clouddriver.aws.deploy.validators.BasicAmazonDeployDescriptionValidator -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.services.AsgService -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory.RegionScopedProvider -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Subject - -class MigrateServerGroupStrategySpec extends Specification { - - @Subject - MigrateServerGroupStrategy strategy - - @Shared - NetflixAmazonCredentials testCredentials = TestCredential.named('test') - - @Shared - NetflixAmazonCredentials prodCredentials = TestCredential.named('prod') - - AmazonClientProvider amazonClientProvider = Mock(AmazonClientProvider) - - RegionScopedProviderFactory regionScopedProviderFactory = Mock(RegionScopedProviderFactory) - - DeployDefaults deployDefaults = Mock(DeployDefaults) - - MigrateSecurityGroupStrategy migrateSecurityGroupStrategy = Mock(MigrateSecurityGroupStrategy) - - MigrateLoadBalancerStrategy migrateLoadBalancerStrategy = Mock(MigrateLoadBalancerStrategy) - - BasicAmazonDeployHandler basicAmazonDeployHandler = Mock(BasicAmazonDeployHandler) - - SecurityGroupLookup sourceLookup = Mock(SecurityGroupLookup) - - SecurityGroupLookup targetLookup = Mock(SecurityGroupLookup) - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - - AmazonAutoScaling amazonAutoScaling = Mock(AmazonAutoScaling) - - AsgService asgService = Mock(AsgService) - - BasicAmazonDeployDescriptionValidator validator = Stub(BasicAmazonDeployDescriptionValidator) - - AllowLaunchAtomicOperationConverter allowLaunchAtomicOperationConverter = Mock(AllowLaunchAtomicOperationConverter) - - AllowLaunchAtomicOperation allowLaunchOperation = Mock(AllowLaunchAtomicOperation) - - void setup() { - TaskRepository.threadLocalTask.set(Stub(Task)) - strategy = new DefaultMigrateServerGroupStrategy(amazonClientProvider, basicAmazonDeployHandler, - regionScopedProviderFactory, validator, allowLaunchAtomicOperationConverter, deployDefaults) - } - - void 'generates load balancers from launch config'() { - given: - ServerGroupLocation source = new ServerGroupLocation(name: 'asg-v001', credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - ServerGroupLocation target = new ServerGroupLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1', availabilityZones: ['eu-west-1b']) - - AutoScalingGroup asg = new AutoScalingGroup(launchConfigurationName: 'asg-v001-lc', loadBalancerNames: ['lb-1']) - LaunchConfiguration lc = new LaunchConfiguration( - instanceMonitoring: new InstanceMonitoring(enabled: false), - ) - AWSServerGroupNameResolver nameResolver = Mock(AWSServerGroupNameResolver) - RegionScopedProvider regionScopedProvider = Stub(RegionScopedProvider) { - getAsgService() >> asgService - getAWSServerGroupNameResolver() >> nameResolver - } - - when: - strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, 'internal', 'external', null, null, null, [:], false, false) - - then: - amazonClientProvider.getAutoScaling(testCredentials, 'us-east-1', true) >> amazonAutoScaling - amazonAutoScaling.describeAutoScalingGroups() >> new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionScopedProvider - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionScopedProvider - 1 * asgService.getAutoScalingGroup('asg-v001') >> asg - 1 * asgService.getLaunchConfiguration('asg-v001-lc') >> lc - 1 * deployDefaults.getAddAppGroupToServerGroup() >> false - 1 * allowLaunchAtomicOperationConverter.convertOperation(_) >> allowLaunchOperation - 1 * allowLaunchOperation.operate(null) - 1 * migrateLoadBalancerStrategy.generateResults(sourceLookup, targetLookup, - migrateSecurityGroupStrategy, - {s -> s.name == 'lb-1' && s.region == 'us-east-1' && s.credentials == testCredentials && s.vpcId == 'vpc-1'}, - {s -> !s.name && s.region == 'eu-west-1' && s.credentials == prodCredentials && s.vpcId == 'vpc-2' && s.availabilityZones == ['eu-west-1b']}, - 'external', _, false, false) >> new MigrateLoadBalancerResult() - 1 * basicAmazonDeployHandler.copySourceAttributes(regionScopedProvider, 'asg-v001', false, _) >> { a, b, c, d -> d } - 1 * basicAmazonDeployHandler.handle(_, []) >> new DeploymentResult(serverGroupNames: ['asg-v003']) - 0 * _ - } - - void 'generates security groups from launch config, filtering skipped ones'() { - ServerGroupLocation source = new ServerGroupLocation(name: 'asg-v001', credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - ServerGroupLocation target = new ServerGroupLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - SecurityGroup group1 = new SecurityGroup(groupId: 'sg-1', groupName: 'group1', vpcId: 'vpc-1') - SecurityGroup group2 = new SecurityGroup(groupId: 'sg-2', groupName: 'group2', vpcId: 'vpc-1') - SecurityGroup skippedGroup = new SecurityGroup(groupId: 'sg-3', groupName: 'group3', vpcId: 'vpc-1') - SecurityGroupUpdater updater1 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> group1 - } - SecurityGroupUpdater updater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> group2 - } - SecurityGroupUpdater skipper = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> skippedGroup - } - MigrateSecurityGroupReference skippedReference = new MigrateSecurityGroupReference() - - AutoScalingGroup asg = new AutoScalingGroup(launchConfigurationName: 'asg-v001-lc') - LaunchConfiguration lc = new LaunchConfiguration( - instanceMonitoring: new InstanceMonitoring(enabled: false), - securityGroups: [ 'sg-1', 'sg-2', 'sg-3' ] - ) - RegionScopedProvider regionScopedProvider = Stub(RegionScopedProvider) { - getAsgService() >> asgService - } - - when: - strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, 'internal', 'external', null, null, null, [:], false, false) - - then: - amazonClientProvider.getAutoScaling(testCredentials, 'us-east-1', true) >> amazonAutoScaling - amazonAutoScaling.describeAutoScalingGroups() >> new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionScopedProvider - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionScopedProvider - 3 * sourceLookup.getSecurityGroupById('test', 'sg-1', 'vpc-1') >> Optional.of(updater1) - 3 * sourceLookup.getSecurityGroupById('test', 'sg-2', 'vpc-1') >> Optional.of(updater2) - 3 * sourceLookup.getSecurityGroupById('test', 'sg-3', 'vpc-1') >> Optional.of(skipper) - 1 * asgService.getAutoScalingGroup('asg-v001') >> asg - 1 * asgService.getLaunchConfiguration('asg-v001-lc') >> lc - 1 * deployDefaults.getAddAppGroupToServerGroup() >> false - 1 * allowLaunchAtomicOperationConverter.convertOperation(_) >> allowLaunchOperation - 1 * allowLaunchOperation.operate(null) - 1 * migrateSecurityGroupStrategy.generateResults( - {s -> s.name == 'group1' && s.region == 'us-east-1' && s.credentials == testCredentials}, - {s -> s.region == 'eu-west-1' && s.credentials == prodCredentials}, - sourceLookup, targetLookup, false, false) >> new MigrateSecurityGroupResult(target: new MigrateSecurityGroupReference(targetName: 'group1-vpc')) - 1 * migrateSecurityGroupStrategy.generateResults( - {s -> s.name == 'group2' && s.region == 'us-east-1' && s.credentials == testCredentials}, - {s -> s.region == 'eu-west-1' && s.credentials == prodCredentials}, - sourceLookup, targetLookup, false, false) >> new MigrateSecurityGroupResult(target: new MigrateSecurityGroupReference(targetName: 'group2-vpc')) - 1 * migrateSecurityGroupStrategy.generateResults( - {s -> s.name == 'group3' && s.region == 'us-east-1' && s.credentials == testCredentials}, - {s -> s.region == 'eu-west-1' && s.credentials == prodCredentials}, - sourceLookup, targetLookup, false, false) >> new MigrateSecurityGroupResult(target: skippedReference, skipped: [skippedReference]) - 1 * basicAmazonDeployHandler.copySourceAttributes(regionScopedProvider, 'asg-v001', false, _) >> { a, b, c, d -> d } - 1 * basicAmazonDeployHandler.handle({ - {d -> d.securityGroups == ['group1-vpc', 'group2-vpc']} - }, []) >> new DeploymentResult(serverGroupNames: ['asg-v003']) - 0 * _ - } - - void 'adds app security group if configured in deployDefaults'() { - given: - ServerGroupLocation source = new ServerGroupLocation(name: 'asg-v001', credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - ServerGroupLocation target = new ServerGroupLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - AutoScalingGroup asg = new AutoScalingGroup(launchConfigurationName: 'asg-v001-lc') - LaunchConfiguration lc = new LaunchConfiguration( - instanceMonitoring: new InstanceMonitoring(enabled: false) - ) - RegionScopedProvider regionScopedProvider = Stub(RegionScopedProvider) { - getAsgService() >> asgService - } - - when: - strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, 'internal', 'external', null, null, null, [:], false, false) - - then: - amazonClientProvider.getAutoScaling(testCredentials, 'us-east-1', true) >> amazonAutoScaling - amazonAutoScaling.describeAutoScalingGroups() >> new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionScopedProvider - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionScopedProvider - 1 * asgService.getAutoScalingGroup('asg-v001') >> asg - 1 * asgService.getLaunchConfiguration('asg-v001-lc') >> lc - 1 * deployDefaults.getAddAppGroupToServerGroup() >> true - 1 * allowLaunchAtomicOperationConverter.convertOperation(_) >> allowLaunchOperation - 1 * allowLaunchOperation.operate(null) - 1 * migrateSecurityGroupStrategy.generateResults( - {s -> s.name == 'asg' && s.region == 'us-east-1' && s.credentials == testCredentials}, - {s -> s.region == 'eu-west-1' && s.credentials == prodCredentials}, - sourceLookup, targetLookup, true, false) >> new MigrateSecurityGroupResult(target: new MigrateSecurityGroupReference()) - 1 * basicAmazonDeployHandler.copySourceAttributes(regionScopedProvider, 'asg-v001', false, _) >> { a, b, c, d -> d } - 1 * basicAmazonDeployHandler.handle(_, []) >> new DeploymentResult(serverGroupNames: ['asg-v003']) - 0 * _ - } - - void 'does not add app security group if it is already there'() { - given: - ServerGroupLocation source = new ServerGroupLocation(name: 'theapp-v001', credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - ServerGroupLocation target = new ServerGroupLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - AutoScalingGroup asg = new AutoScalingGroup(launchConfigurationName: 'theapp-v001-lc') - LaunchConfiguration lc = new LaunchConfiguration( - instanceMonitoring: new InstanceMonitoring(enabled: false), - securityGroups: ['sg-1'] - ) - RegionScopedProvider regionScopedProvider = Stub(RegionScopedProvider) { - getAsgService() >> asgService - } - SecurityGroup appGroup = new SecurityGroup(groupId: 'sg-1', groupName: 'theapp', vpcId: 'vpc-1') - SecurityGroupUpdater updater = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> appGroup - } - - when: - strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, 'internal', 'external', null, null, null, [:], false, true) - - then: - amazonClientProvider.getAutoScaling(testCredentials, 'us-east-1', true) >> amazonAutoScaling - amazonAutoScaling.describeAutoScalingGroups() >> new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionScopedProvider - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionScopedProvider - sourceLookup.getSecurityGroupById('test', 'sg-1', 'vpc-1') >> Optional.of(updater) - 1 * asgService.getAutoScalingGroup('theapp-v001') >> asg - 1 * asgService.getLaunchConfiguration('theapp-v001-lc') >> lc - 1 * deployDefaults.getAddAppGroupToServerGroup() >> true - 1 * migrateSecurityGroupStrategy.generateResults( - {s -> s.name == 'theapp' && s.region == 'us-east-1' && s.credentials == testCredentials}, - {s -> s.region == 'eu-west-1' && s.credentials == prodCredentials}, - sourceLookup, targetLookup, false, true) >> new MigrateSecurityGroupResult(target: new MigrateSecurityGroupReference(targetName: 'theapp')) - 0 * _ - } - - void 'copies over suspended processes'() { - given: - ServerGroupLocation source = new ServerGroupLocation(name: 'asg-v001', credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - ServerGroupLocation target = new ServerGroupLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - AutoScalingGroup asg = new AutoScalingGroup(launchConfigurationName: 'asg-v001-lc') - .withSuspendedProcesses([ - new SuspendedProcess().withProcessName('someProcess'), - new SuspendedProcess().withProcessName('otherProcess') - ]) - LaunchConfiguration lc = new LaunchConfiguration( - instanceMonitoring: new InstanceMonitoring(enabled: false) - ) - AWSServerGroupNameResolver nameResolver = Mock(AWSServerGroupNameResolver) - RegionScopedProvider regionScopedProvider = Stub(RegionScopedProvider) { - getAsgService() >> asgService - getAWSServerGroupNameResolver() >> nameResolver - } - - when: - strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, 'internal', 'external', null, null, null, [:], false, false) - - then: - amazonClientProvider.getAutoScaling(testCredentials, 'us-east-1', true) >> amazonAutoScaling - amazonAutoScaling.describeAutoScalingGroups() >> new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionScopedProvider - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionScopedProvider - 1 * asgService.getAutoScalingGroup('asg-v001') >> asg - 1 * asgService.getLaunchConfiguration('asg-v001-lc') >> lc - 1 * deployDefaults.getAddAppGroupToServerGroup() >> false - 1 * allowLaunchAtomicOperationConverter.convertOperation(_) >> allowLaunchOperation - 1 * allowLaunchOperation.operate(null) - 1 * basicAmazonDeployHandler.copySourceAttributes(regionScopedProvider, 'asg-v001', false, _) >> { a, b, c, d -> d } - 1 * basicAmazonDeployHandler.handle({d -> d.suspendedProcesses.sort() == ['otherProcess', 'someProcess']}, []) >> new DeploymentResult(serverGroupNames: ['asg-v003']) - 0 * _ - } - - void 'sets source on deploy handler from source parameter and server group name on results'() { - given: - ServerGroupLocation source = new ServerGroupLocation(name: 'asg-v001', credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - ServerGroupLocation target = new ServerGroupLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - AutoScalingGroup asg = new AutoScalingGroup(launchConfigurationName: 'asg-v001-lc') - LaunchConfiguration lc = new LaunchConfiguration( - instanceMonitoring: new InstanceMonitoring(enabled: false) - ) - AWSServerGroupNameResolver nameResolver = Mock(AWSServerGroupNameResolver) - RegionScopedProvider regionScopedProvider = Stub(RegionScopedProvider) { - getAsgService() >> asgService - getAWSServerGroupNameResolver() >> nameResolver - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, 'internal', 'external', null, null, null, [:], false, false) - - then: - amazonClientProvider.getAutoScaling(testCredentials, 'us-east-1', true) >> amazonAutoScaling - amazonAutoScaling.describeAutoScalingGroups() >> new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionScopedProvider - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionScopedProvider - 1 * asgService.getAutoScalingGroup('asg-v001') >> asg - 1 * asgService.getLaunchConfiguration('asg-v001-lc') >> lc - 1 * deployDefaults.getAddAppGroupToServerGroup() >> false - 1 * allowLaunchAtomicOperationConverter.convertOperation(_) >> allowLaunchOperation - 1 * allowLaunchOperation.operate(null) - 1 * basicAmazonDeployHandler.copySourceAttributes(regionScopedProvider, 'asg-v001', false, _) >> { a, b, c, d -> d } - 1 * basicAmazonDeployHandler.handle(_, []) >> new DeploymentResult(serverGroupNames: ['asg-v003']) - 0 * _ - results.serverGroupNames == ['asg-v003'] - } - - void 'sets name on dryRun'() { - given: - ServerGroupLocation source = new ServerGroupLocation(name: 'asg-v001', credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - ServerGroupLocation target = new ServerGroupLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - AutoScalingGroup asg = new AutoScalingGroup(launchConfigurationName: 'asg-v001-lc') - LaunchConfiguration lc = new LaunchConfiguration() - AWSServerGroupNameResolver nameResolver = Mock(AWSServerGroupNameResolver) - RegionScopedProvider regionScopedProvider = Stub(RegionScopedProvider) { - getAsgService() >> asgService - getAWSServerGroupNameResolver() >> nameResolver - } - - when: - def results = strategy.generateResults(source, target, sourceLookup, targetLookup, - migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, 'internal', 'external', null, null, null, [:], false, true) - - then: - amazonClientProvider.getAutoScaling(testCredentials, 'us-east-1', true) >> amazonAutoScaling - amazonAutoScaling.describeAutoScalingGroups() >> new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionScopedProvider - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionScopedProvider - 1 * asgService.getAutoScalingGroup('asg-v001') >> asg - 1 * asgService.getLaunchConfiguration('asg-v001-lc') >> lc - 1 * deployDefaults.getAddAppGroupToServerGroup() >> false - 1 * nameResolver.resolveNextServerGroupName('asg', null, null, false) >> 'asg-v002' - 0 * _ - results.serverGroupNames == ['asg-v002'] - } - -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateStrategySupportSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateStrategySupportSpec.groovy deleted file mode 100644 index 67e2e10daf9..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/handlers/MigrateStrategySupportSpec.groovy +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.handlers - -import com.amazonaws.services.ec2.model.IpPermission -import com.amazonaws.services.ec2.model.SecurityGroup -import com.amazonaws.services.ec2.model.UserIdGroupPair -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupUpdater -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Subject -import spock.lang.Unroll - -class MigrateStrategySupportSpec extends Specification { - - @Subject - MigrateStrategySupport strategy = new TestMigrateStrategy() - - @Shared - NetflixAmazonCredentials testCredentials = TestCredential.named('test') - - SecurityGroupLookup lookup - - void setup() { - lookup = Mock() - } - - void 'returns immediately when classicLinkGroupName is null'() { - when: - strategy.addClassicLinkIngress(lookup, null, 'sg-1', testCredentials, 'vpc-1') - - then: - 0 * _ - } - - void 'returns without adding an ingress or looking up classic link group if target not found'() { - when: - strategy.addClassicLinkIngress(lookup, 'nf-classic-link', 'sg-1', testCredentials, 'vpc-1') - - then: - 1 * lookup.getSecurityGroupById('test', 'sg-1', 'vpc-1') >> Optional.empty() - 0 * _ - } - - void 'returns without adding an ingress when classic link group not found'() { - given: - SecurityGroup securityGroup = new SecurityGroup() - SecurityGroupUpdater updater = Stub() { - getSecurityGroup() >> securityGroup - } - - when: - strategy.addClassicLinkIngress(lookup, 'nf-classic-link', 'sg-1', testCredentials, 'vpc-1') - - then: - 1 * lookup.getSecurityGroupById('test', 'sg-1', 'vpc-1') >> Optional.of(updater) - 1 * lookup.getSecurityGroupByName('test', 'nf-classic-link', 'vpc-1') >> Optional.empty() - 0 * _ - } - - void 'returns without adding an ingress when classic link group already has ingress'() { - given: - SecurityGroup securityGroup = new SecurityGroup() - SecurityGroup classicLinkGroup = new SecurityGroup(groupId: 'sg-c1') - securityGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs(new UserIdGroupPair().withGroupId('sg-c')) - ] - SecurityGroupUpdater updater = Stub() { - getSecurityGroup() >> securityGroup - } - SecurityGroupUpdater classicLinkUpdater = Stub() { - getSecurityGroup() >> classicLinkGroup - } - - when: - strategy.addClassicLinkIngress(lookup, 'nf-classic-link', 'sg-1', testCredentials, 'vpc-1') - - then: - 1 * lookup.getSecurityGroupById('test', 'sg-1', 'vpc-1') >> Optional.of(updater) - 1 * lookup.getSecurityGroupByName('test', 'nf-classic-link', 'vpc-1') >> Optional.of(classicLinkUpdater) - 0 * _ - } - - @Unroll - void 'adds an ingress when one not already present for classic link group'() { - given: - SecurityGroup securityGroup = new SecurityGroup() - SecurityGroup classicLinkGroup = new SecurityGroup(groupId: 'sg-c1') - securityGroup.ipPermissions = [ - new IpPermission().withUserIdGroupPairs(userIdGroupPairs) - ] - SecurityGroupUpdater updater = Mock() - SecurityGroupUpdater classicLinkUpdater = Stub() { - getSecurityGroup() >> classicLinkGroup - } - - when: - strategy.addClassicLinkIngress(lookup, 'nf-classic-link', 'sg-1', testCredentials, 'vpc-1') - - then: - 1 * lookup.getSecurityGroupById('test', 'sg-1', 'vpc-1') >> Optional.of(updater) - 1 * lookup.getSecurityGroupByName('test', 'nf-classic-link', 'vpc-1') >> Optional.of(classicLinkUpdater) - 1 * updater.getSecurityGroup() >> securityGroup - 1 * updater.addIngress({ rules -> - def rule = rules[0] - def pair = rule.userIdGroupPairs[0] - rule.ipProtocol == 'tcp' && rule.fromPort == 80 && rule.toPort == 65535 && - pair.groupId == 'sg-c1' && pair.vpcId == 'vpc-1' && pair.userId == testCredentials.accountId - }) - 0 * _ - - where: - userIdGroupPairs << [ - [], - [new UserIdGroupPair().withGroupId('sg-d')], - [new UserIdGroupPair().withGroupId('sg-d'), new UserIdGroupPair().withGroupId('sg-e')] - ] - } - - - static class TestMigrateStrategy implements MigrateStrategySupport {} -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AllowLaunchAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AllowLaunchAtomicOperationUnitSpec.groovy index 6b5c69f2a15..c20916da87b 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AllowLaunchAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/AllowLaunchAtomicOperationUnitSpec.groovy @@ -17,22 +17,14 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.CreateTagsRequest -import com.amazonaws.services.ec2.model.DeleteTagsRequest -import com.amazonaws.services.ec2.model.DescribeImagesRequest -import com.amazonaws.services.ec2.model.DescribeImagesResult -import com.amazonaws.services.ec2.model.DescribeTagsResult -import com.amazonaws.services.ec2.model.Image -import com.amazonaws.services.ec2.model.ModifyImageAttributeRequest -import com.amazonaws.services.ec2.model.Tag -import com.amazonaws.services.ec2.model.TagDescription +import com.amazonaws.services.ec2.model.* import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.AllowLaunchDescription import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification class AllowLaunchAtomicOperationUnitSpec extends Specification { @@ -56,11 +48,11 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { getAccountId() >> '67890' } - def creds = Stub(AccountCredentialsProvider) { - getCredentials('target') >> target + def creds = Stub(CredentialsRepository) { + getOne('target') >> target } - def op = new AllowLaunchAtomicOperation(new AllowLaunchDescription(amiName: 'super-awesome-ami', account: 'target', credentials: source)) - op.accountCredentialsProvider = creds + def op = new AllowLaunchAtomicOperation(new AllowLaunchDescription(amiName: 'super-awesome-ami', targetAccount: 'target', credentials: source)) + op.credentialsRepository = creds op.amazonClientProvider = provider when: @@ -95,17 +87,17 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { describeImages(_) >> null } def provider = Mock(AmazonClientProvider) - def description = new AllowLaunchDescription(account: "prod", amiName: "ami-123456", region: "us-west-1", credentials: testCredentials) + def description = new AllowLaunchDescription(targetAccount: "prod", amiName: "ami-123456", region: "us-west-1", credentials: testCredentials) def op = new AllowLaunchAtomicOperation(description) op.amazonClientProvider = provider - op.accountCredentialsProvider = Mock(AccountCredentialsProvider) + op.credentialsRepository = Mock(CredentialsRepository) when: op.operate([]) then: - with(op.accountCredentialsProvider){ - 1 * getCredentials("prod") >> prodCredentials + with(op.credentialsRepository){ + 1 * getOne("prod") >> prodCredentials } with(provider) { 1 * getAmazonEC2(testCredentials, _, true) >> sourceAmazonEc2 @@ -126,17 +118,17 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { def targetAmazonEc2 = Mock(AmazonEC2) def provider = Mock(AmazonClientProvider) - def description = new AllowLaunchDescription(account: "prod", amiName: "ami-123456", region: "us-west-1", credentials: testCredentials) + def description = new AllowLaunchDescription(targetAccount: "prod", amiName: "ami-123456", region: "us-west-1", credentials: testCredentials) def op = new AllowLaunchAtomicOperation(description) op.amazonClientProvider = provider - op.accountCredentialsProvider = Mock(AccountCredentialsProvider) + op.credentialsRepository = Mock(CredentialsRepository) when: op.operate([]) then: - with(op.accountCredentialsProvider){ - 1 * getCredentials("prod") >> prodCredentials + with(op.credentialsRepository){ + 1 * getOne("prod") >> prodCredentials } with(provider) { 1 * getAmazonEC2(testCredentials, _, true) >> sourceAmazonEc2 @@ -161,17 +153,17 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { def targetAmazonEc2 = Mock(AmazonEC2) def provider = Mock(AmazonClientProvider) - def description = new AllowLaunchDescription(account: "test", amiName: "ami-123456", region: "us-west-1", credentials: testCredentials) + def description = new AllowLaunchDescription(targetAccount: "test", amiName: "ami-123456", region: "us-west-1", credentials: testCredentials) def op = new AllowLaunchAtomicOperation(description) op.amazonClientProvider = provider - op.accountCredentialsProvider = Mock(AccountCredentialsProvider) + op.credentialsRepository = Mock(CredentialsRepository) when: op.operate([]) then: - with(op.accountCredentialsProvider){ - 1 * getCredentials("test") >> testCredentials + with(op.credentialsRepository){ + 1 * getOne("test") >> testCredentials } with(provider) { 1 * getAmazonEC2(testCredentials, _, true) >> sourceAmazonEc2 @@ -194,17 +186,17 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { def sourceAmazonEc2 = Mock(AmazonEC2) def targetAmazonEc2 = Mock(AmazonEC2) - def description = new AllowLaunchDescription(account: 'target', amiName: 'ami-123456', region: 'us-west-1', credentials: sourceCredentials) + def description = new AllowLaunchDescription(targetAccount: 'target', amiName: 'ami-123456', region: 'us-west-1', credentials: sourceCredentials) def op = new AllowLaunchAtomicOperation(description) op.amazonClientProvider = Mock(AmazonClientProvider) - op.accountCredentialsProvider = Mock(AccountCredentialsProvider) + op.credentialsRepository = Mock(CredentialsRepository) when: op.operate([]) then: - with(op.accountCredentialsProvider) { - 1 * getCredentials('target') >> targetCredentials + with(op.credentialsRepository) { + 1 * getOne('target') >> targetCredentials 1 * getAll() >> [sourceCredentials, targetCredentials, ownerCredentials] } @@ -235,18 +227,18 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { def ownerAmazonEc2 = Mock(AmazonEC2) def targetAmazonEc2 = Mock(AmazonEC2) - def description = new AllowLaunchDescription(account: 'target', amiName: 'ami-123456', region: 'us-west-1', credentials: ownerCredentials) + def description = new AllowLaunchDescription(targetAccount: 'target', amiName: 'ami-123456', region: 'us-west-1', credentials: ownerCredentials) def op = new AllowLaunchAtomicOperation(description) op.amazonClientProvider = Mock(AmazonClientProvider) - op.accountCredentialsProvider = Mock(AccountCredentialsProvider) + op.credentialsRepository = Mock(CredentialsRepository) when: op.operate([]) then: - with(op.accountCredentialsProvider) { - 1 * getCredentials('target') >> targetCredentials + with(op.credentialsRepository) { + 1 * getOne('target') >> targetCredentials } with(op.amazonClientProvider) { @@ -272,17 +264,17 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { def sourceAmazonEc2 = Mock(AmazonEC2) def targetAmazonEc2 = Mock(AmazonEC2) - def description = new AllowLaunchDescription(account: 'target', amiName: 'ami-123456', region: 'us-west-2', credentials: sourceCredentials) + def description = new AllowLaunchDescription(targetAccount: 'target', amiName: 'ami-123456', region: 'us-west-2', credentials: sourceCredentials) def op = new AllowLaunchAtomicOperation(description) op.amazonClientProvider = Mock(AmazonClientProvider) - op.accountCredentialsProvider = Mock(AccountCredentialsProvider) + op.credentialsRepository = Mock(CredentialsRepository) when: op.operate([]) then: - with(op.accountCredentialsProvider) { - 1 * getCredentials('target') >> targetCredentials + with(op.credentialsRepository) { + 1 * getOne('target') >> targetCredentials } with(op.amazonClientProvider) { 1 * getAmazonEC2(targetCredentials, _, true) >> targetAmazonEc2 @@ -314,17 +306,17 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { def ownerAmazonEc2 = Mock(AmazonEC2) def targetAmazonEc2 = Mock(AmazonEC2) - def description = new AllowLaunchDescription(account: 'target', amiName: 'ami-123456', region: 'us-west-1', credentials: ownerCredentials) + def description = new AllowLaunchDescription(targetAccount: 'target', amiName: 'ami-123456', region: 'us-west-1', credentials: ownerCredentials) def op = new AllowLaunchAtomicOperation(description) op.amazonClientProvider = Mock(AmazonClientProvider) - op.accountCredentialsProvider = Mock(AccountCredentialsProvider) + op.credentialsRepository = Mock(CredentialsRepository) when: op.operate([]) then: - with(op.accountCredentialsProvider) { - 1 * getCredentials('target') >> targetCredentials + with(op.credentialsRepository) { + 1 * getOne('target') >> targetCredentials } with(op.amazonClientProvider) { @@ -350,4 +342,73 @@ class AllowLaunchAtomicOperationUnitSpec extends Specification { } + void "should skip allow launch and tag syncing on resolved target AMI if owner account cannot be resolved"() { + setup: + def sourceCredentials = TestCredential.named('source') + def targetCredentials = TestCredential.named('target') + def sourceAmazonEc2 = Mock(AmazonEC2) + def targetAmazonEc2 = Mock(AmazonEC2) + def description = new AllowLaunchDescription(targetAccount: 'target', amiName: 'ami-123456', region: 'us-west-1', credentials: sourceCredentials) + def op = new AllowLaunchAtomicOperation(description) + op.amazonClientProvider = Mock(AmazonClientProvider) + op.credentialsRepository = Mock(CredentialsRepository) + + when: + op.operate([]) + + then: + with(op.credentialsRepository) { + 1 * getOne('target') >> targetCredentials + 1 * getAll() >> [sourceCredentials, targetCredentials] + } + with(op.amazonClientProvider) { + 1 * getAmazonEC2(sourceCredentials, _, true) >> sourceAmazonEc2 + 1 * getAmazonEC2(targetCredentials, _, true) >> targetAmazonEc2 + } + with(targetAmazonEc2) { + 1 * describeImages(_) >> new DescribeImagesResult().withImages( + new Image() + .withImageId("ami-123456") + .withOwnerId('unknown') + ) + } + 0 * _ + } + + void "should throw exception when AMI is resolved in source but the owner account cannot be resolved"() { + setup: + def sourceCredentials = TestCredential.named('source') + def targetCredentials = TestCredential.named('target') + def sourceAmazonEc2 = Mock(AmazonEC2) + def targetAmazonEc2 = Mock(AmazonEC2) + def description = new AllowLaunchDescription(targetAccount: 'target', amiName: 'ami-123456', region: 'us-west-1', credentials: sourceCredentials) + def op = new AllowLaunchAtomicOperation(description) + op.amazonClientProvider = Mock(AmazonClientProvider) + op.credentialsRepository = Mock(CredentialsRepository) + + when: + op.operate([]) + + then: + thrown IllegalArgumentException + with(op.credentialsRepository) { + 1 * getOne('target') >> targetCredentials + 1 * getAll() >> [sourceCredentials, targetCredentials] + } + with(op.amazonClientProvider) { + 1 * getAmazonEC2(sourceCredentials, _, true) >> sourceAmazonEc2 + 1 * getAmazonEC2(targetCredentials, _, true) >> targetAmazonEc2 + } + with(targetAmazonEc2) { + 3 * describeImages(_) >> new DescribeImagesResult() + } + with(sourceAmazonEc2) { + 1 * describeImages(_) >> new DescribeImagesResult().withImages( + new Image() + .withImageId("ami-123456") + .withOwnerId('unknown') + ) + } + 0 * _ + } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/CopyLastAsgAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/CopyLastAsgAtomicOperationUnitSpec.groovy index e496ec5f60d..2dfa8622912 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/CopyLastAsgAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/CopyLastAsgAtomicOperationUnitSpec.groovy @@ -21,73 +21,166 @@ import com.amazonaws.services.autoscaling.model.BlockDeviceMapping import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult import com.amazonaws.services.autoscaling.model.DescribeLaunchConfigurationsRequest import com.amazonaws.services.autoscaling.model.DescribeLaunchConfigurationsResult +import com.amazonaws.services.autoscaling.model.DescribeLifecycleHooksRequest +import com.amazonaws.services.autoscaling.model.DescribeLifecycleHooksResult import com.amazonaws.services.autoscaling.model.Ebs +import com.amazonaws.services.autoscaling.model.InstancesDistribution import com.amazonaws.services.autoscaling.model.LaunchConfiguration +import com.amazonaws.services.autoscaling.model.LaunchTemplate +import com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.autoscaling.model.LifecycleHook +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy import com.amazonaws.services.autoscaling.model.TagDescription import com.amazonaws.services.ec2.AmazonEC2 -import com.netflix.spinnaker.clouddriver.aws.deploy.AWSServerGroupNameResolver +import com.amazonaws.services.ec2.model.CreditSpecification +import com.amazonaws.services.ec2.model.LaunchTemplateBlockDeviceMapping +import com.amazonaws.services.ec2.model.LaunchTemplateEbsBlockDevice +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceMarketOptions +import com.amazonaws.services.ec2.model.LaunchTemplateSpotMarketOptions +import com.amazonaws.services.ec2.model.LaunchTemplateVersion +import com.amazonaws.services.ec2.model.ResponseLaunchTemplateData +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AWSServerGroupNameResolver +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType import com.netflix.spinnaker.clouddriver.aws.deploy.validators.BasicAmazonDeployDescriptionValidator +import com.netflix.spinnaker.clouddriver.aws.model.AmazonAsgLifecycleHook +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.services.AsgService +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.AsgReferenceCopier +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgReferenceCopier import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.BasicAmazonDeployHandler import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory import spock.lang.Specification +import spock.lang.Subject import spock.lang.Unroll class CopyLastAsgAtomicOperationUnitSpec extends Specification { - def setupSpec() { + def deployHandler = Mock(BasicAmazonDeployHandler) + def mockAutoScaling = Mock(AmazonAutoScaling) + def ec2 = Mock(AmazonEC2) + def mockProvider = Mock(AmazonClientProvider) + def mockAsgReferenceCopier = Mock(AsgReferenceCopier) + def asgService = new AsgService(mockAutoScaling) + def serverGroupNameResolver = Mock(AWSServerGroupNameResolver) + def regionScopedProviderStub = Stub(RegionScopedProviderFactory.RegionScopedProvider) + + def description = new BasicAmazonDeployDescription( + application: "asgard", + stack: "stack", + availabilityZones: [ + 'us-east-1': [], + 'us-west-1': [] + ], + credentials: TestCredential.named('baz'), + securityGroups: ["someGroupName", "sg-12345a"], + capacity: new BasicAmazonDeployDescription.Capacity(min: 1, max: 3, desired: 5)) + + @Subject def op = new CopyLastAsgAtomicOperation(description) + + def setup() { TaskRepository.threadLocalTask.set(Mock(Task)) - } - @Unroll - void "operation builds description based on ancestor asg"() { - setup: - def deployHandler = Mock(BasicAmazonDeployHandler) - def description = new BasicAmazonDeployDescription(application: "asgard", stack: "stack") - description.availabilityZones = ['us-east-1': [], 'us-west-1': []] - description.credentials = TestCredential.named('baz') - description.securityGroups = ['someGroupName', 'sg-12345a'] - description.capacity = new BasicAmazonDeployDescription.Capacity(min: 1, max: 3, desired: 5) - description.spotPrice = requestSpotPrice - def mockEC2 = Mock(AmazonEC2) - def mockAutoScaling = Mock(AmazonAutoScaling) - def mockProvider = Mock(AmazonClientProvider) - mockProvider.getAmazonEC2(_, _, true) >> mockEC2 + mockProvider.getAmazonEC2(_, _, true) >> ec2 mockProvider.getAutoScaling(_, _, true) >> mockAutoScaling - def op = new CopyLastAsgAtomicOperation(description) + + regionScopedProviderStub.getAsgReferenceCopier(_, _) >> mockAsgReferenceCopier + regionScopedProviderStub.getAsgService() >> asgService + regionScopedProviderStub.getAWSServerGroupNameResolver() >> serverGroupNameResolver + op.amazonClientProvider = mockProvider op.basicAmazonDeployHandler = deployHandler - - def mockAsgReferenceCopier = Mock(AsgReferenceCopier) - def asgService = new AsgService(mockAutoScaling) - def serverGroupNameResolver = Mock(AWSServerGroupNameResolver) op.regionScopedProviderFactory = Stub(RegionScopedProviderFactory) { - forRegion(_, _) >> Stub(RegionScopedProviderFactory.RegionScopedProvider) { - getAsgReferenceCopier(_, _) >> mockAsgReferenceCopier - getAsgService() >> asgService - getAWSServerGroupNameResolver() >> serverGroupNameResolver - } + forRegion(_, _) >> regionScopedProviderStub } op.basicAmazonDeployDescriptionValidator = Stub(BasicAmazonDeployDescriptionValidator) - def expectedDeployDescription = { region -> - new BasicAmazonDeployDescription(application: 'asgard', stack: 'stack', keyPair: 'key-pair-name', - securityGroups: ['someGroupName', 'sg-12345a'], availabilityZones: [(region): null], - capacity: new BasicAmazonDeployDescription.Capacity(min: 1, max: 3, desired: 5), - tags: [Name: 'name-tag'], - spotPrice: expectedSpotPrice, - source: new BasicAmazonDeployDescription.Source( - asgName: "asgard-stack-v000", - account: 'baz', - region: null - )) + } + + @Unroll + void "operation builds description based on ancestor asg backed by a launch template"() { + given: + description.availabilityZones = ['us-east-1': []] + description.spotPrice = reqSpotPrice + description.blockDevices = reqBlockDevices + + and: + def blockDevicesFromSrcAsg = [new AmazonBlockDevice(deviceName: "/dev/src")] + def launchTemplateVersion = new LaunchTemplateVersion( + launchTemplateName: "foo", + launchTemplateId: "foo", + versionNumber: 0, + launchTemplateData: new ResponseLaunchTemplateData( + keyName: "key-pair-name", + instanceMarketOptions: new LaunchTemplateInstanceMarketOptions( + spotOptions: new LaunchTemplateSpotMarketOptions( + maxPrice: ancestorSpotPrice + ) + ), + blockDeviceMappings: blockDevicesFromSrcAsg?.collect { + new LaunchTemplateBlockDeviceMapping().withVirtualName(it.virtualName).withDeviceName(it.deviceName) + } + ) + ) + + def launchTemplateSpec = new LaunchTemplateSpecification( + launchTemplateName: launchTemplateVersion.launchTemplateName, + launchTemplateId: launchTemplateVersion.launchTemplateId, + version: launchTemplateVersion.versionNumber.toString(), + ) + + and: + regionScopedProviderStub.getLaunchTemplateService() >> Mock(LaunchTemplateService) { + getLaunchTemplateVersion(launchTemplateSpec) >> Optional.of(launchTemplateVersion) } + def mockAncestorAsg = Mock(AutoScalingGroup) + mockAncestorAsg.getAutoScalingGroupName() >> "asgard-stack-v000" + mockAncestorAsg.getMinSize() >> 0 + mockAncestorAsg.getMaxSize() >> 2 + mockAncestorAsg.getDesiredCapacity() >> 4 + mockAncestorAsg.getLaunchTemplate() >> launchTemplateSpec + mockAncestorAsg.getTags() >> [new TagDescription().withKey('Name').withValue('name-tag')] + deployHandler.buildBlockDeviceMappingsFromSourceAsg(regionScopedProviderStub, mockAncestorAsg, description) >> blockDevicesFromSrcAsg + + when: + def result = op.operate([]) + + then: + result.serverGroupNameByRegion['us-east-1'] == 'asgard-stack-v001' + result.serverGroupNames == ['asgard-stack-v001'] + + 1 * mockAutoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult().withAutoScalingGroups([mockAncestorAsg]) + } + + 1 * serverGroupNameResolver.resolveLatestServerGroupName("asgard-stack") >> { "asgard-stack-v000" } + 0 * serverGroupNameResolver._ + 1 * deployHandler.handle(_ as BasicAmazonDeployDescription, _) >> { arguments -> + BasicAmazonDeployDescription actualDesc = arguments[0] + def expectedDesc = expectedDescription(expectedSpotPrice, "us-east-1", null,null,null,null, expectedBlockDevices) + + assert actualDesc.blockDevices == expectedDesc.blockDevices + assert actualDesc == expectedDesc; new DeploymentResult(serverGroupNames: ['asgard-stack-v001'], serverGroupNameByRegion: ['us-east-1': 'asgard-stack-v001']) + } + + where: + reqSpotPrice | ancestorSpotPrice || expectedSpotPrice || reqBlockDevices || expectedBlockDevices + "0.25" | null || "0.25" || null || [new AmazonBlockDevice(deviceName: "/dev/src")] + "0.25" | "0.5" || "0.25" || [] || [] + null | "0.25" || "0.25" || [new AmazonBlockDevice(deviceName: "/dev/req")] || [new AmazonBlockDevice(deviceName: "/dev/req")] + "" | "0.25" || null || [new AmazonBlockDevice(deviceName: "/dev/req")] || [new AmazonBlockDevice(deviceName: "/dev/req")] + null | null || null || [new AmazonBlockDevice(deviceName: "/dev/req")] || [new AmazonBlockDevice(deviceName: "/dev/req")] + } + + @Unroll + void "operation builds description based on ancestor asg backed by launch configuration"() { + setup: + description.spotPrice = requestSpotPrice when: def result = op.operate([]) @@ -96,6 +189,7 @@ class CopyLastAsgAtomicOperationUnitSpec extends Specification { result.serverGroupNameByRegion['us-east-1'] == 'asgard-stack-v001' result.serverGroupNameByRegion['us-west-1'] == 'asgard-stack-v001' result.serverGroupNames == ['asgard-stack-v001', 'asgard-stack-v001'] + 2 * mockAutoScaling.describeLaunchConfigurations(_) >> { DescribeLaunchConfigurationsRequest request -> assert request.launchConfigurationNames == ['foo'] def mockLaunch = Mock(LaunchConfiguration) @@ -115,17 +209,342 @@ class CopyLastAsgAtomicOperationUnitSpec extends Specification { mockAsg.getTags() >> [new TagDescription().withKey('Name').withValue('name-tag')] new DescribeAutoScalingGroupsResult().withAutoScalingGroups([mockAsg]) } + 2 * serverGroupNameResolver.resolveLatestServerGroupName("asgard-stack") >> { "asgard-stack-v000" } 0 * serverGroupNameResolver._ - 1 * deployHandler.handle(expectedDeployDescription('us-east-1'), _) >> new DeploymentResult(serverGroupNames: ['asgard-stack-v001'], serverGroupNameByRegion: ['us-east-1': 'asgard-stack-v001']) - 1 * deployHandler.handle(expectedDeployDescription('us-west-1'), _) >> new DeploymentResult(serverGroupNames: ['asgard-stack-v001'], serverGroupNameByRegion: ['us-west-1': 'asgard-stack-v001']) + 1 * deployHandler.handle(expectedDescription(expectedSpotPrice, 'us-east-1'), _) >> + new DeploymentResult(serverGroupNames: ['asgard-stack-v001'], serverGroupNameByRegion: ['us-east-1': 'asgard-stack-v001']) + 1 * deployHandler.handle(expectedDescription(expectedSpotPrice, 'us-west-1'), _) >> + new DeploymentResult(serverGroupNames: ['asgard-stack-v001'], serverGroupNameByRegion: ['us-west-1': 'asgard-stack-v001']) where: requestSpotPrice | ancestorSpotPrice || expectedSpotPrice - 0.25 | null || 0.25 - 0.25 | 0.5 || 0.25 - null | 0.25 || 0.25 - "" | 0.25 || null + "0.25" | null || "0.25" + "0.25" | "0.5" || "0.25" + null | "0.25" || "0.25" + "" | "0.25" || null null | null || null } + + @Unroll + void "operation builds new description with correct cpu credits based on ancestor asg and request"() { + given: + description.availabilityZones = ['us-east-1': []] + description.setLaunchTemplate = true + description.unlimitedCpuCredits = unlimitedCpuCreditsInReq + description.instanceType = instanceTypeInReq + + def overrides = null + if (instanceTypeOverride2InReq) { + overrides = [ new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "t3.large", weightedCapacity: "2"), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: instanceTypeOverride2InReq, weightedCapacity: "4")] + description.launchTemplateOverridesForInstanceType = overrides + } + + and: + def launchTemplateVersion = new LaunchTemplateVersion( + launchTemplateName: "foo", + launchTemplateId: "foo", + versionNumber: 0, + launchTemplateData: new ResponseLaunchTemplateData( + keyName: "key-pair-name" + ) + ) + if (ancestorUnlimitedCpuCredits != null) { + launchTemplateVersion.launchTemplateData.creditSpecification = new CreditSpecification( + cpuCredits: ancestorUnlimitedCpuCredits + ) + } + def launchTemplateSpec = new LaunchTemplateSpecification( + launchTemplateName: launchTemplateVersion.launchTemplateName, + launchTemplateId: launchTemplateVersion.launchTemplateId, + version: launchTemplateVersion.versionNumber.toString(), + ) + + and: + regionScopedProviderStub.getLaunchTemplateService() >> Mock(LaunchTemplateService) { + getLaunchTemplateVersion(launchTemplateSpec) >> Optional.of(launchTemplateVersion) + } + + and: + def mockAncestorAsg = Mock(AutoScalingGroup) + mockAncestorAsg.getAutoScalingGroupName() >> "asgard-stack-v000" + mockAncestorAsg.getMinSize() >> 0 + mockAncestorAsg.getMaxSize() >> 2 + mockAncestorAsg.getDesiredCapacity() >> 4 + mockAncestorAsg.getLaunchTemplate() >> launchTemplateSpec + mockAncestorAsg.getTags() >> [new TagDescription().withKey('Name').withValue('name-tag')] + mockAutoScaling.describeAutoScalingGroups(_) >> { + new DescribeAutoScalingGroupsResult().withAutoScalingGroups([mockAncestorAsg]) + } + serverGroupNameResolver.resolveLatestServerGroupName("asgard-stack") >> { "asgard-stack-v000" } + + when: + op.operate([]) + + then: + 1 * deployHandler.handle(expectedDescription(null, 'us-east-1', instanceTypeInReq, expectedUnlimitedCpuCredits, null, overrides), _) >> + new DeploymentResult(serverGroupNames: ['asgard-stack-v001'], serverGroupNameByRegion: ['us-east-1': 'asgard-stack-v001']) + + where: + ancestorUnlimitedCpuCredits || unlimitedCpuCreditsInReq || instanceTypeInReq || instanceTypeOverride2InReq || expectedUnlimitedCpuCredits + "standard" || true || 't2.large' || null || true + "standard" || false || 't2.large' || null || false + "unlimited" || true || 't2.large' || null || true + "unlimited" || false || 't2.large' || null || false + "standard" || null || 'c3.large' || null || null // unsupported type, do NOT copy from ancestor + "standard" || null || 't3.large' || null || false // supported type, copy from ancestor + "unlimited" || null || 'c3.large' || null || null // unsupported type, do NOT copy from ancestor + "unlimited" || null || 't3.large' || null || true // supported type, copy from ancestor + "standard" || null || 't2.large' || ['c4.large'] || null // not all types supported, do NOT copy from ancestor + "unlimited" || null || 't2.large' || ['c4.large'] || null // not all types supported, do NOTcopy from ancestor + } + + @Unroll + void "operation builds description based on ancestor asg backed by mixed instances policy with launch template"() { + given: + description.availabilityZones = ['us-east-1': []] + description.spotPrice = requestSpotPrice + description.spotAllocationStrategy = requestSpotAllocStrategy + description.launchTemplateOverridesForInstanceType = requestOverrides + + and: + def launchTemplateVersion = new LaunchTemplateVersion( + launchTemplateName: "foo", + launchTemplateId: "foo", + versionNumber: 0, + launchTemplateData: new ResponseLaunchTemplateData( + keyName: "key-pair-name", + blockDeviceMappings: [ + new LaunchTemplateBlockDeviceMapping( + deviceName: "/dev/sdb", + ebs: new LaunchTemplateEbsBlockDevice( + volumeSize: 125 + ) + ), + new LaunchTemplateBlockDeviceMapping( + deviceName: "/dev/sdc", + virtualName: "ephemeral1" + ) + ] + ) + ) + def launchTemplateSpec = new LaunchTemplateSpecification( + launchTemplateName: launchTemplateVersion.launchTemplateName, + launchTemplateId: launchTemplateVersion.launchTemplateId, + version: launchTemplateVersion.versionNumber.toString(), + ) + def ancestorMixedInstancesPolicy = new MixedInstancesPolicy( + launchTemplate: new LaunchTemplate( + launchTemplateSpecification: launchTemplateSpec, + overrides: ancestorOverrides + ), + instancesDistribution: new InstancesDistribution( + onDemandAllocationStrategy: "prioritized", + onDemandBaseCapacity: 2, + onDemandPercentageAboveBaseCapacity: 50, + spotAllocationStrategy: ancestorSpotAllocStrategy, // AWS default is lowest-price + spotInstancePools: ancestorSpotAllocStrategy == "lowest-price" ? 2 : null, // AWS default is 2 + spotMaxPrice: ancestorSpotPrice, + ) + ) + + and: + regionScopedProviderStub.getLaunchTemplateService() >> Mock(LaunchTemplateService) { + getLaunchTemplateVersion(launchTemplateSpec) >> Optional.of(launchTemplateVersion) + } + + when: + def result = op.operate([]) + + then: + result.serverGroupNameByRegion['us-east-1'] == 'asgard-stack-v001' + result.serverGroupNames == ['asgard-stack-v001'] + + 1 * mockAutoScaling.describeAutoScalingGroups(_) >> { + def mockAsg = Mock(AutoScalingGroup) + mockAsg.getAutoScalingGroupName() >> "asgard-stack-v000" + mockAsg.getMinSize() >> 0 + mockAsg.getMaxSize() >> 2 + mockAsg.getDesiredCapacity() >> 4 + mockAsg.getMixedInstancesPolicy() >> ancestorMixedInstancesPolicy + mockAsg.getTags() >> [new TagDescription().withKey('Name').withValue('name-tag')] + new DescribeAutoScalingGroupsResult().withAutoScalingGroups([mockAsg]) + } + + and: + 1 * serverGroupNameResolver.resolveLatestServerGroupName("asgard-stack") >> { "asgard-stack-v000" } + 0 * serverGroupNameResolver._ + 1 * deployHandler.handle(_ as BasicAmazonDeployDescription, _) >> { arguments -> + def expectedMip = ancestorMixedInstancesPolicy + expectedMip.setInstancesDistribution(ancestorMixedInstancesPolicy.getInstancesDistribution() + .withSpotAllocationStrategy(expectedSpotAllocStrategy) + .withSpotMaxPrice(expectedSpotPrice) + .withSpotInstancePools(expectedSpotAllocStrategy == "lowest-price" ? 2 : null)) + expectedMip.setLaunchTemplate(ancestorMixedInstancesPolicy.getLaunchTemplate().withOverrides(expectedOverrides)) + def expectedDesc = expectedDescription(null, "us-east-1", null, null, expectedMip) + def actualDesc = arguments[0] + + assert actualDesc == expectedDesc; new DeploymentResult(serverGroupNames: ['asgard-stack-v001'], serverGroupNameByRegion: ['us-east-1': 'asgard-stack-v001']) + } + + where: + requestSpotPrice | ancestorSpotPrice || expectedSpotPrice | requestSpotAllocStrategy | ancestorSpotAllocStrategy || expectedSpotAllocStrategy | requestOverrides | ancestorOverrides || expectedOverrides + "0.25" | null || "0.25" | null | "lowest-price" || "lowest-price" | null | null || null + "0.25" | "0.5" || "0.25" | "capacity-optimized" | "lowest-price" || "capacity-optimized" |[new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "c5.large", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "c4.large", priority: 2)] |[new LaunchTemplateOverrides().withInstanceType("m5.large") + .withWeightedCapacity("1")] ||[new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "c5.large", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "c4.large", priority: 2)] + null | "0.25" || "0.25" | null | "lowest-price" || "lowest-price" | [] | null || null + "" | "0.25" || null | "capacity-optimized" | "lowest-price" || "capacity-optimized" | null |[new LaunchTemplateOverrides().withInstanceType("m5.large") + .withWeightedCapacity("1"), + new LaunchTemplateOverrides().withInstanceType("m5.xlarge") + .withWeightedCapacity("2")] ||[new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "m5.large", weightedCapacity: "1", priority: 1), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "m5.xlarge", weightedCapacity: "2", priority: 2)] + null | null || null | null | "lowest-price" || "lowest-price" | null | null || null + } + + @Unroll + void "operation populates ASG lifecycle hooks and capacity rebalance in description as expected"() { + given: + description.availabilityZones = ['us-east-1': []] + description.lifecycleHooks = requestLifecycleHooks + description.capacityRebalance = requestCapRebalance + + def launchTemplateVersion = new LaunchTemplateVersion( + launchTemplateName: "foo", + launchTemplateId: "foo", + versionNumber: 0, + launchTemplateData: new ResponseLaunchTemplateData( + keyName: "key-pair-name", + ) + ) + + def launchTemplateSpec = new LaunchTemplateSpecification( + launchTemplateName: launchTemplateVersion.launchTemplateName, + launchTemplateId: launchTemplateVersion.launchTemplateId, + version: launchTemplateVersion.versionNumber.toString(), + ) + + and: + regionScopedProviderStub.getLaunchTemplateService() >> Mock(LaunchTemplateService) { + getLaunchTemplateVersion(launchTemplateSpec) >> Optional.of(launchTemplateVersion) + } + + when: + op.operate([]) + + then: + 1 * mockAutoScaling.describeAutoScalingGroups(_) >> { + def mockAsg = Mock(AutoScalingGroup) + mockAsg.getAutoScalingGroupName() >> "asgard-stack-v000" + mockAsg.getMinSize() >> 0 + mockAsg.getMaxSize() >> 2 + mockAsg.getDesiredCapacity() >> 4 + mockAsg.getLaunchTemplate() >> launchTemplateSpec + mockAsg.getCapacityRebalance() >> ancestorCapRebalance + new DescribeAutoScalingGroupsResult().withAutoScalingGroups([mockAsg]) + } + (requestLifecycleHooks ? 0 : 1) * mockAutoScaling.describeLifecycleHooks(_ as DescribeLifecycleHooksRequest) >> { arguments -> + DescribeLifecycleHooksRequest req = arguments[0] + assert req.getAutoScalingGroupName() == "asgard-stack-v000"; new DescribeLifecycleHooksResult().withLifecycleHooks(ancestorLifecycleHooks) + } + + and: + 1 * serverGroupNameResolver.resolveLatestServerGroupName("asgard-stack") >> { "asgard-stack-v000" } + 0 * serverGroupNameResolver._ + 1 * deployHandler.handle(_ as BasicAmazonDeployDescription, _) >> { arguments -> + BasicAmazonDeployDescription actualDesc = arguments[0] + + assert actualDesc.capacityRebalance == expectedCapRebalance + assert actualDesc.lifecycleHooks == expectedLifecycleHooks; new DeploymentResult(serverGroupNames: ['asgard-stack-v001'], serverGroupNameByRegion: ['us-east-1': 'asgard-stack-v001']) + } + + where: + requestCapRebalance | ancestorCapRebalance || expectedCapRebalance | requestLifecycleHooks | ancestorLifecycleHooks || expectedLifecycleHooks + null | false || false | null | null || [] + null | true || true | null | [new LifecycleHook( + lifecycleTransition: 'autoscaling:EC2_INSTANCE_TERMINATING', + heartbeatTimeout: 1800, + defaultResult: 'CONTINUE')] || [new AmazonAsgLifecycleHook( + lifecycleTransition: AmazonAsgLifecycleHook.Transition.EC2InstanceTerminating, + heartbeatTimeout: 1800, + defaultResult: AmazonAsgLifecycleHook.DefaultResult.CONTINUE)] + false | false || false |[] | [new LifecycleHook( + lifecycleTransition: 'autoscaling:EC2_INSTANCE_TERMINATING', + heartbeatTimeout: 1800, + defaultResult: 'CONTINUE')] || [new AmazonAsgLifecycleHook( + lifecycleTransition: AmazonAsgLifecycleHook.Transition.EC2InstanceTerminating, + heartbeatTimeout: 1800, + defaultResult: AmazonAsgLifecycleHook.DefaultResult.CONTINUE)] + true | false || true |[new AmazonAsgLifecycleHook( + roleARN: 'role-arn', + notificationTargetARN: 'target-arn', + notificationMetadata: 'metadata', + lifecycleTransition: AmazonAsgLifecycleHook.Transition.EC2InstanceTerminating, + heartbeatTimeout: 3600, + defaultResult: AmazonAsgLifecycleHook.DefaultResult.ABANDON + )] | null || [new AmazonAsgLifecycleHook( + roleARN: 'role-arn', + notificationTargetARN: 'target-arn', + notificationMetadata: 'metadata', + lifecycleTransition: AmazonAsgLifecycleHook.Transition.EC2InstanceTerminating, + heartbeatTimeout: 3600, + defaultResult: AmazonAsgLifecycleHook.DefaultResult.ABANDON)] + } + + private static BasicAmazonDeployDescription expectedDescription( + String expectedSpotPrice = null, + String region, + String instanceType = null, + Boolean unlimitedCpuCredits = null, + MixedInstancesPolicy mip = null, + List overrides = null, + List blockDevices = null + ) { + def desc = new BasicAmazonDeployDescription( + application: 'asgard', + stack: 'stack', + credentials: TestCredential.named('baz'), + keyPair: 'key-pair-name', + securityGroups: ['someGroupName', 'sg-12345a'], + availabilityZones: [(region): null], + capacity: new BasicAmazonDeployDescription.Capacity(min: 1, max: 3, desired: 5), + tags: [Name: 'name-tag'], + lifecycleHooks: [], + spotPrice: mip ? mip.getInstancesDistribution().getSpotMaxPrice() : expectedSpotPrice, + source: new BasicAmazonDeployDescription.Source( + asgName: "asgard-stack-v000", + account: 'baz', + region: null + ), + unlimitedCpuCredits: unlimitedCpuCredits, + instanceType: instanceType, + blockDevices: blockDevices + ) + + if (mip) { + desc.onDemandAllocationStrategy = mip.instancesDistribution.onDemandAllocationStrategy + desc.onDemandBaseCapacity = mip.instancesDistribution.onDemandBaseCapacity + desc.onDemandPercentageAboveBaseCapacity = mip.instancesDistribution.onDemandPercentageAboveBaseCapacity + desc.spotAllocationStrategy = mip.instancesDistribution.spotAllocationStrategy + desc.spotInstancePools = mip.instancesDistribution.spotInstancePools + int priority = 1 + desc.launchTemplateOverridesForInstanceType = mip.launchTemplate.overrides ? mip.launchTemplate.overrides.collect { + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: it.instanceType, weightedCapacity: it.weightedCapacity, priority: priority++) + }.toList() : null + } + if (overrides) { + desc.launchTemplateOverridesForInstanceType = overrides + } + + return desc + } } + diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAlarmOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAlarmOperationUnitSpec.groovy index 22f72e8a060..2bcef6a3ef3 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAlarmOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAlarmOperationUnitSpec.groovy @@ -43,12 +43,12 @@ class DeleteAlarmOperationUnitSpec extends Specification { credentials: credz ) - final cloudWatch = Mock(AmazonCloudWatch) - final amazonClientProvider = Stub(AmazonClientProvider) { + def cloudWatch = Mock(AmazonCloudWatch) + def amazonClientProvider = Stub(AmazonClientProvider) { getCloudWatch(credz, "us-west-1", true) >> cloudWatch } - @Subject final op = new DeleteAlarmAtomicOperation(description) + @Subject def op = new DeleteAlarmAtomicOperation(description) def setup() { op.amazonClientProvider = amazonClientProvider diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonSnapshotAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonSnapshotAtomicOperationSpec.groovy new file mode 100644 index 00000000000..97147bd135f --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteAmazonSnapshotAtomicOperationSpec.groovy @@ -0,0 +1,68 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops + +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.DeleteSnapshotRequest +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonSnapshotDescription +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import spock.lang.Specification +import spock.lang.Subject + +class DeleteAmazonSnapshotAtomicOperationSpec extends Specification { + def credentials = Stub(NetflixAmazonCredentials) { + getName() >> "test" + } + + def description = new DeleteAmazonSnapshotDescription( + snapshotId: "snap-123", + region: "us-east-1", + credentials: credentials + ) + + @Subject + def deleteSnapshotOp = new DeleteAmazonSnapshotAtomicOperation(description, new NoopRegistry()) + + def setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + } + + def "should delete a snapshot"() { + given: + def ec2 = Mock(AmazonEC2) + def amazonClientProvider = Stub(AmazonClientProvider) { + getAmazonEC2(credentials, "us-east-1") >> ec2 + } + + and: + deleteSnapshotOp.amazonClientProvider = amazonClientProvider + + when: + deleteSnapshotOp.operate([]) + + then: + 1 * ec2.deleteSnapshot(_) >> { DeleteSnapshotRequest request -> + assert request.snapshotId == description.snapshotId + } + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationAtomicOperationSpec.groovy new file mode 100644 index 00000000000..af5c827d81c --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationAtomicOperationSpec.groovy @@ -0,0 +1,94 @@ +/* + * Copyright 2021 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops + +import com.amazonaws.AmazonServiceException +import com.amazonaws.services.cloudformation.AmazonCloudFormation +import com.amazonaws.services.cloudformation.model.DeleteStackRequest +import com.amazonaws.services.cloudformation.model.DeleteStackResult +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteCloudFormationDescription +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import spock.lang.Specification + +class DeleteCloudFormationAtomicOperationSpec extends Specification { + void setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + } + + void "should build a DeleteStackRequest and submit through aws client"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def deleteStackResult = Mock(DeleteStackResult) + def op = new DeleteCloudFormationAtomicOperation( + new DeleteCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.deleteStack(_) >> { DeleteStackRequest request -> + assert request.getStackName() == "stackTest" + deleteStackResult + } + } + + void "should propagate exceptions when deleting the stack"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def op = new DeleteCloudFormationAtomicOperation( + new DeleteCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + def exception = new AmazonServiceException("error") + + when: + try { + op.operate([]) + } + catch (Exception e) { + e instanceof AmazonServiceException + } + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.deleteStack(_) >> { + throw exception + } + } + + +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationChangeSetAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationChangeSetAtomicOperationSpec.groovy new file mode 100644 index 00000000000..7d94b659482 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteCloudFormationChangeSetAtomicOperationSpec.groovy @@ -0,0 +1,96 @@ +/* + * Copyright 2019 Adevinta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops + +import com.amazonaws.AmazonServiceException +import com.amazonaws.services.cloudformation.AmazonCloudFormation +import com.amazonaws.services.cloudformation.model.* +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteCloudFormationChangeSetDescription +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import spock.lang.Specification + +class DeleteCloudFormationChangeSetAtomicOperationSpec extends Specification { + void setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + } + + void "should build a DeleteChangeSetRequest and submit through aws client"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def deleteChangeSetResult = Mock(DeleteChangeSetResult) + def op = new DeleteCloudFormationChangeSetAtomicOperation( + new DeleteCloudFormationChangeSetDescription( + [ + stackName: "stackTest", + changeSetName: "changeSetName", + region: "eu-west-1", + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.deleteChangeSet(_) >> { DeleteChangeSetRequest request -> + assert request.getStackName() == "stackTest" + assert request.getChangeSetName() == "changeSetName" + deleteChangeSetResult + } + } + + void "should propagate exceptions when deleting the change set"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def op = new DeleteCloudFormationChangeSetAtomicOperation( + new DeleteCloudFormationChangeSetDescription( + [ + stackName: "stackTest", + changeSetName: "changeSetName", + region: "eu-west-1", + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + def exception = new AmazonServiceException("error") + + when: + try { + op.operate([]) + } + catch (Exception e) { + e instanceof AmazonServiceException + } + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.deleteChangeSet(_) >> { + throw exception + } + } + + +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteScalingPolicyAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteScalingPolicyAtomicOperationUnitSpec.groovy index dcf054a34e3..27971288333 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteScalingPolicyAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeleteScalingPolicyAtomicOperationUnitSpec.groovy @@ -47,21 +47,21 @@ class DeleteScalingPolicyAtomicOperationUnitSpec extends Specification { TaskRepository.threadLocalTask.set(Mock(Task)) } - final description = new DeleteScalingPolicyDescription( + def description = new DeleteScalingPolicyDescription( serverGroupName: "kato-main-v000", policyName: "scalingPolicy1", region: "us-west-1", credentials: credz ) - final autoScaling = Mock(AmazonAutoScaling) - final cloudWatch = Mock(AmazonCloudWatch) - final amazonClientProvider = Stub(AmazonClientProvider) { + def autoScaling = Mock(AmazonAutoScaling) + def cloudWatch = Mock(AmazonCloudWatch) + def amazonClientProvider = Stub(AmazonClientProvider) { getAutoScaling(credz, "us-west-1", true) >> autoScaling getCloudWatch(credz, "us-west-1", true) >> cloudWatch } - @Subject final op = new DeleteScalingPolicyAtomicOperation(description) + @Subject def op = new DeleteScalingPolicyAtomicOperation(description) def setup() { op.amazonClientProvider = amazonClientProvider diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeployCloudFormationAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeployCloudFormationAtomicOperationSpec.groovy new file mode 100644 index 00000000000..90fe54266d3 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DeployCloudFormationAtomicOperationSpec.groovy @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops + +import com.amazonaws.services.cloudformation.AmazonCloudFormation +import com.amazonaws.services.cloudformation.model.AmazonCloudFormationException +import com.amazonaws.services.cloudformation.model.ChangeSetType +import com.amazonaws.services.cloudformation.model.CreateChangeSetRequest +import com.amazonaws.services.cloudformation.model.CreateChangeSetResult +import com.amazonaws.services.cloudformation.model.CreateStackRequest +import com.amazonaws.services.cloudformation.model.CreateStackResult +import com.amazonaws.services.cloudformation.model.DescribeStacksResult +import com.amazonaws.services.cloudformation.model.Parameter +import com.amazonaws.services.cloudformation.model.Stack +import com.amazonaws.services.cloudformation.model.Tag +import com.amazonaws.services.cloudformation.model.UpdateStackRequest +import com.amazonaws.services.cloudformation.model.UpdateStackResult +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeployCloudFormationDescription +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import spock.lang.Specification +import spock.lang.Unroll + +class DeployCloudFormationAtomicOperationSpec extends Specification { + void setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + } + + @Unroll + void "should build a CreateStackRequest if stack doesn't exist and submit through aws client"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def createStackResult = Mock(CreateStackResult) + def stackId = "stackId" + def op = new DeployCloudFormationAtomicOperation( + new DeployCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + templateBody: '{"key":"value"}', + roleARN: roleARN, + parameters: [ key: "value"], + tags: [ key: "value" ], + capabilities: ["cap1", "cap2"], + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> { throw new IllegalArgumentException() } + 1 * amazonCloudFormation.createStack(_) >> { CreateStackRequest request -> + assert request.getStackName() == "stackTest" + assert request.getTemplateBody() == '{"key":"value"}' + assert request.getRoleARN() == expectedRoleARN + assert request.getParameters() == [ new Parameter().withParameterKey("key").withParameterValue("value") ] + assert request.getTags() == [ new Tag().withKey("key").withValue("value") ] + assert request.getCapabilities() == ["cap1", "cap2"] + createStackResult + } + 1 * createStackResult.getStackId() >> stackId + + where: + roleARN || expectedRoleARN + "arn:aws:iam:123456789012:role/test" || "arn:aws:iam:123456789012:role/test" + "" || null + " " || null + null || null + } + + @Unroll + void "should build an UpdateStackRequest if stack exists and submit through aws client"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def updateStackRequest = Mock(UpdateStackResult) + def stackId = "stackId" + def op = new DeployCloudFormationAtomicOperation( + new DeployCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + templateBody: '{"key":"value"}', + roleARN: roleARN, + parameters: [ key: "value" ], + tags: [ key: "value" ], + capabilities: ["cap1", "cap2"], + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> { + new DescribeStacksResult().withStacks([new Stack().withStackId("stackId")] as Collection) + } + 1 * amazonCloudFormation.updateStack(_) >> { UpdateStackRequest request -> + assert request.getStackName() == "stackTest" + assert request.getTemplateBody() == '{"key":"value"}' + assert request.getRoleARN() == expectedRoleARN + assert request.getParameters() == [ new Parameter().withParameterKey("key").withParameterValue("value") ] + assert request.getTags() == [ new Tag().withKey("key").withValue("value") ] + assert request.getCapabilities() == ["cap1", "cap2"] + updateStackRequest + } + 1 * updateStackRequest.getStackId() >> stackId + + where: + roleARN || expectedRoleARN + "arn:aws:iam:123456789012:role/test" || "arn:aws:iam:123456789012:role/test" + "" || null + " " || null + null || null + } + + @Unroll + void "should build an CreateChangeSetRequest if it's a changeset and submit though aws client"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def createChangeSetResult = Mock(CreateChangeSetResult) + + def awsConfigurationProperties = new AwsConfigurationProperties() + + def stackId = "stackId" + def op = new DeployCloudFormationAtomicOperation( + new DeployCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + templateBody: 'key: "value"', + roleARN: roleARN, + parameters: [ key: "value" ], + tags: [ key: "value" ], + capabilities: ["cap1", "cap2"], + credentials: TestCredential.named("test"), + isChangeSet: true, + changeSetName: "changeSetTest" + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.awsConfigurationProperties = awsConfigurationProperties + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> { + if (existingStack) { + new DescribeStacksResult().withStacks([new Stack().withStackId("stackId")] as Collection) + } else { + new DescribeStacksResult().withStacks([] as Collection) + } + } + 1* amazonCloudFormation.createChangeSet(_) >> { CreateChangeSetRequest request -> + assert request.getStackName() == "stackTest" + assert request.getTemplateBody() == 'key: "value"' + assert request.getRoleARN() == expectedRoleARN + assert request.getParameters() == [ new Parameter().withParameterKey("key").withParameterValue("value") ] + assert request.getTags() == [ new Tag().withKey("key").withValue("value") ] + assert request.getCapabilities() == ["cap1", "cap2"] + assert request.getChangeSetName() == "changeSetTest" + assert request.getChangeSetType() == changeSetType + assert request.isIncludeNestedStacks() == false + createChangeSetResult + } + 1 * createChangeSetResult.getStackId() >> stackId + + where: + roleARN | expectedRoleARN | existingStack || changeSetType + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | true || ChangeSetType.UPDATE.toString() + "" | null | true || ChangeSetType.UPDATE.toString() + " " | null | true || ChangeSetType.UPDATE.toString() + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | true || ChangeSetType.UPDATE.toString() + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | false || ChangeSetType.CREATE.toString() + } + + @Unroll + void "should build an CreateChangeSetRequest with templateURL if set"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def createChangeSetResult = Mock(CreateChangeSetResult) + + def awsConfigurationProperties = new AwsConfigurationProperties() + + def stackId = "stackId" + def op = new DeployCloudFormationAtomicOperation( + new DeployCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + templateURL: 's3://my-bucket/cfn/my-template.yaml', + roleARN: roleARN, + parameters: [ key: "value" ], + tags: [ key: "value" ], + capabilities: ["cap1", "cap2"], + credentials: TestCredential.named("test"), + isChangeSet: true, + changeSetName: "changeSetTest" + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.awsConfigurationProperties = awsConfigurationProperties + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> { + if (existingStack) { + new DescribeStacksResult().withStacks([new Stack().withStackId("stackId")] as Collection) + } else { + new DescribeStacksResult().withStacks([] as Collection) + } + } + 1* amazonCloudFormation.createChangeSet(_) >> { CreateChangeSetRequest request -> + assert request.getStackName() == "stackTest" + assert request.getTemplateBody() == null + assert request.getTemplateURL() == 's3://my-bucket/cfn/my-template.yaml' + assert request.getRoleARN() == expectedRoleARN + assert request.getParameters() == [ new Parameter().withParameterKey("key").withParameterValue("value") ] + assert request.getTags() == [ new Tag().withKey("key").withValue("value") ] + assert request.getCapabilities() == ["cap1", "cap2"] + assert request.getChangeSetName() == "changeSetTest" + assert request.getChangeSetType() == changeSetType + assert request.isIncludeNestedStacks() == false + createChangeSetResult + } + 1 * createChangeSetResult.getStackId() >> stackId + + where: + roleARN | expectedRoleARN | existingStack || changeSetType + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | true || ChangeSetType.UPDATE.toString() + "" | null | true || ChangeSetType.UPDATE.toString() + " " | null | true || ChangeSetType.UPDATE.toString() + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | true || ChangeSetType.UPDATE.toString() + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | false || ChangeSetType.CREATE.toString() + } + + @Unroll + void "should build an CreateChangeSetRequest with includeNestedStacks if set"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def createChangeSetResult = Mock(CreateChangeSetResult) + + def awsConfigurationProperties = new AwsConfigurationProperties() + awsConfigurationProperties.cloudformation.changeSetsIncludeNestedStacks = true + + def stackId = "stackId" + def op = new DeployCloudFormationAtomicOperation( + new DeployCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + templateBody: 'key: "value"', + roleARN: roleARN, + parameters: [ key: "value" ], + tags: [ key: "value" ], + capabilities: ["cap1", "cap2"], + credentials: TestCredential.named("test"), + isChangeSet: true, + changeSetName: "changeSetTest" + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.awsConfigurationProperties = awsConfigurationProperties + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> { + if (existingStack) { + new DescribeStacksResult().withStacks([new Stack().withStackId("stackId")] as Collection) + } else { + new DescribeStacksResult().withStacks([] as Collection) + } + } + 1* amazonCloudFormation.createChangeSet(_) >> { CreateChangeSetRequest request -> + assert request.getStackName() == "stackTest" + assert request.getTemplateBody() == 'key: "value"' + assert request.getRoleARN() == expectedRoleARN + assert request.getParameters() == [ new Parameter().withParameterKey("key").withParameterValue("value") ] + assert request.getTags() == [ new Tag().withKey("key").withValue("value") ] + assert request.getCapabilities() == ["cap1", "cap2"] + assert request.getChangeSetName() == "changeSetTest" + assert request.getChangeSetType() == changeSetType + assert request.isIncludeNestedStacks() == true + createChangeSetResult + } + 1 * createChangeSetResult.getStackId() >> stackId + + where: + roleARN | expectedRoleARN | existingStack || changeSetType + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | true || ChangeSetType.UPDATE.toString() + "" | null | true || ChangeSetType.UPDATE.toString() + " " | null | true || ChangeSetType.UPDATE.toString() + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | true || ChangeSetType.UPDATE.toString() + "arn:aws:iam:123456789012:role/test" | "arn:aws:iam:123456789012:role/test" | false || ChangeSetType.CREATE.toString() + } + + @Unroll + void "should fail when AWS fails to update stack"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def op = new DeployCloudFormationAtomicOperation( + new DeployCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + templateBody: '{"key":"value"}', + roleARN: "arn:aws:iam:123456789012:role/test", + parameters: [ key: "value" ], + tags: [ key: "value" ], + capabilities: ["cap1", "cap2"], + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> { + new DescribeStacksResult().withStacks([new Stack().withStackId("stackId")] as Collection) + } + 1 * amazonCloudFormation.updateStack(_) >> { throw new AmazonCloudFormationException() } + thrown(AmazonCloudFormationException) + } + + @Unroll + void "should success when updating stack and no change needed"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def op = new DeployCloudFormationAtomicOperation( + new DeployCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + templateBody: '{"key":"value"}', + roleARN: "arn:aws:iam:123456789012:role/test", + parameters: [ key: "value" ], + tags: [ key: "value" ], + capabilities: ["cap1", "cap2"], + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 2 * amazonCloudFormation.describeStacks(_) >> { + new DescribeStacksResult().withStacks([new Stack().withStackId("stackId")] as Collection) + } + 1 * amazonCloudFormation.updateStack(_) >> { throw new AmazonCloudFormationException("No updates are to be performed") } + } + + @Unroll + void "should fail when invalid template"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def op = new DeployCloudFormationAtomicOperation( + new DeployCloudFormationDescription( + [ + stackName: "stackTest", + region: "eu-west-1", + templateBody: '{"key":"value"}', + roleARN: "arn:aws:iam:123456789012:role/test", + parameters: [ key: "value" ], + tags: [ key: "value" ], + capabilities: ["cap1", "cap2"], + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.validateTemplate(_) >> { throw new AmazonCloudFormationException() } + thrown(AmazonCloudFormationException) + } + +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DestroyAsgAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DestroyAsgAtomicOperationUnitSpec.groovy index 2596cbbd9f6..eee478c4add 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DestroyAsgAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DestroyAsgAtomicOperationUnitSpec.groovy @@ -21,7 +21,11 @@ import com.amazonaws.services.autoscaling.model.DeleteAutoScalingGroupRequest import com.amazonaws.services.autoscaling.model.DeleteLaunchConfigurationRequest import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult import com.amazonaws.services.autoscaling.model.Instance +import com.amazonaws.services.autoscaling.model.LaunchTemplate +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateRequest import com.amazonaws.services.ec2.model.TerminateInstancesRequest import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.DestroyAsgDescription @@ -91,6 +95,50 @@ class DestroyAsgAtomicOperationUnitSpec extends Specification { 0 * mockAutoScaling._ } + void "should delete ASG and Launch Template and terminate instances"() { + setup: + def op = new DestroyAsgAtomicOperation( + new DestroyAsgDescription( + asgs: [[ + serverGroupName: "my-stack-v000", + region : "us-east-1" + ]], + credentials: TestCredential.named('baz'))) + op.amazonClientProvider = provider + + when: + op.operate([]) + + then: + 1 * mockAutoScaling.describeAutoScalingGroups(_) >> new DescribeAutoScalingGroupsResult(autoScalingGroups: [asg]) + 1 * mockAutoScaling.deleteAutoScalingGroup( + new DeleteAutoScalingGroupRequest(autoScalingGroupName: "my-stack-v000", forceDelete: true)) + 1 * mockEC2.deleteLaunchTemplate( + new DeleteLaunchTemplateRequest(launchTemplateId: "lt-1")) + 1 * mockEC2.terminateInstances(new TerminateInstancesRequest(instanceIds: ["i-123456"])) + 0 * mockAutoScaling._ + + where: + asg << [ + new AutoScalingGroup( + instances: [new Instance(instanceId: "i-123456")], + launchTemplate: new LaunchTemplateSpecification() + .withLaunchTemplateId("lt-1") + .withVersion("1") + ), + new AutoScalingGroup( + instances: [new Instance(instanceId: "i-123456")], + mixedInstancesPolicy: new MixedInstancesPolicy( + launchTemplate: new LaunchTemplate( + launchTemplateSpecification: new LaunchTemplateSpecification() + .withLaunchTemplateId("lt-1") + .withVersion("1") + ) + ) + ) + ] + } + void "should not delete launch config when not available"() { setup: def op = new DestroyAsgAtomicOperation( diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DetachInstancesAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DetachInstancesAtomicOperationSpec.groovy index cbaea2eaa80..77741d9e49f 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DetachInstancesAtomicOperationSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DetachInstancesAtomicOperationSpec.groovy @@ -118,7 +118,7 @@ class DetachInstancesAtomicOperationSpec extends Specification { def description = new DetachInstancesDescription( instanceIds: ["i-000001", "i-000002"], terminateDetachedInstances: false, - decrementDesiredCapacity: false, + decrementDesiredCapacity: true, adjustMinIfNecessary: true ) @@ -146,14 +146,17 @@ class DetachInstancesAtomicOperationSpec extends Specification { request.minSize == 0 } as UpdateAutoScalingGroupRequest) 1 * amazonAutoScaling.detachInstances({ DetachInstancesRequest request -> - request.instanceIds == ["i-000001"] && !request.shouldDecrementDesiredCapacity + request.instanceIds == ["i-000001"] && request.shouldDecrementDesiredCapacity } as DetachInstancesRequest) 0 * _ } void "should fail if minSize adjustment is necessary but not allowed"() { given: - def description = new DetachInstancesDescription(instanceIds: ["i-000001", "i-000002"]) + def description = new DetachInstancesDescription( + instanceIds: ["i-000001", "i-000002"], + decrementDesiredCapacity: true + ) and: def operation = new DetachInstancesAtomicOperation(description) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DisableAsgAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DisableAsgAtomicOperationUnitSpec.groovy index c72a4e4ba32..b21f54f6ccf 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DisableAsgAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/DisableAsgAtomicOperationUnitSpec.groovy @@ -29,8 +29,8 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.ops.discovery.AwsEurekaSuppo import com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus import com.netflix.spinnaker.clouddriver.data.task.TaskState -import com.netflix.spinnaker.clouddriver.eureka.model.EurekaApplication -import com.netflix.spinnaker.clouddriver.eureka.model.EurekaInstance +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import retrofit.RetrofitError import retrofit.client.Response import spock.lang.Unroll @@ -96,7 +96,7 @@ class DisableAsgAtomicOperationUnitSpec extends EnableDisableAtomicOperationUnit ] ] 1 * eureka.updateInstanceStatus('asg1', 'i1', 'OUT_OF_SERVICE') >> new Response('http://foo', 200, 'OK', [], null) - 2 * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + 2 * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 0 * task.fail() } @@ -113,7 +113,7 @@ class DisableAsgAtomicOperationUnitSpec extends EnableDisableAtomicOperationUnit then: 1 * amazonEc2.describeInstances(_) >> describeInstanceResult - 2 * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + 2 * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 1 * asgService.getAutoScalingGroup(_) >> asg 1 * eureka.getInstanceInfo('i1') >> [ @@ -121,7 +121,37 @@ class DisableAsgAtomicOperationUnitSpec extends EnableDisableAtomicOperationUnit app: "asg1" ] ] - 1 * eureka.updateInstanceStatus('asg1', 'i1', 'OUT_OF_SERVICE') + 1 * eureka.updateInstanceStatus('asg1', 'i1', 'OUT_OF_SERVICE') >> new Response('http://foo', 200, 'OK', [], null) + } + + def 'should not fail because of discovery errors on disable'() { + given: + def asg = Mock(AutoScalingGroup) + asg.getInstances() >> [new com.amazonaws.services.autoscaling.model.Instance().withInstanceId("i1").withLifecycleState("InService")] + def instance = new Instance().withState(new InstanceState().withName("running")).withInstanceId("i1") + def describeInstanceResult = Mock(DescribeInstancesResult) + describeInstanceResult.getReservations() >> [new Reservation().withInstances(instance)] + + eureka.updateInstanceStatus('asg1', 'i1', 'OUT_OF_SERVICE') >> { + throw new SpinnakerHttpException(new RetrofitError("error", "url", + new Response("url", 503, "service unavailable", [], null), + null, null, null, null)) + } + + when: + op.operate([]) + + then: + _ * amazonEc2.describeInstances(_) >> describeInstanceResult + _ * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) + _ * asgService.getAutoScalingGroup(_) >> asg + _ * eureka.getInstanceInfo('i1') >> + [ + instance: [ + app: "asg1" + ] + ] + 0 * task.fail() } def 'should skip discovery if not enabled for account'() { @@ -198,7 +228,7 @@ class DisableAsgAtomicOperationUnitSpec extends EnableDisableAtomicOperationUnit null | null || 4 } - @Unroll("Should invoke supend process #invocations times when desiredPercentage is #desiredPercentage") + @Unroll("Should invoke suspend process #invocations times when desiredPercentage is #desiredPercentage") void 'should suspend processes only if desired percentage is null or 100'() { given: def asg = Mock(AutoScalingGroup) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/EnableAsgAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/EnableAsgAtomicOperationUnitSpec.groovy index 6fcc5562db3..2be187ac1c0 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/EnableAsgAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/EnableAsgAtomicOperationUnitSpec.groovy @@ -26,6 +26,7 @@ import com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus import com.netflix.spinnaker.clouddriver.data.task.TaskState import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport +import retrofit.client.Response class EnableAsgAtomicOperationUnitSpec extends EnableDisableAtomicOperationUnitSpecSupport { @@ -78,15 +79,16 @@ class EnableAsgAtomicOperationUnitSpec extends EnableDisableAtomicOperationUnitS then: 1 * amazonEc2.describeInstances(_) >> describeInstanceResult - 2 * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + 2 * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 1 * asgService.getAutoScalingGroup(_) >> asg 1 * eureka.getInstanceInfo('i1') >> [ instance: [ - app: "asg1" + app: "asg1", + status: "OUT_OF_SERVICE" ] ] - 1 * eureka.resetInstanceStatus('asg1', 'i1', AbstractEurekaSupport.DiscoveryStatus.Disable.value) + 1 * eureka.resetInstanceStatus("asg1", "i1", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> new Response('http://foo', 200, '', [], null) } def 'should skip discovery if not enabled for account'() { diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/EnableDisableAtomicOperationUnitSpecSupport.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/EnableDisableAtomicOperationUnitSpecSupport.groovy index 6e46681310b..05bcab04146 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/EnableDisableAtomicOperationUnitSpecSupport.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/EnableDisableAtomicOperationUnitSpecSupport.groovy @@ -94,6 +94,8 @@ abstract class EnableDisableAtomicOperationUnitSpecSupport extends Specification } op.discoverySupport.regionScopedProviderFactory = regionScopedProviderFactory op.discoverySupport.eurekaSupportConfigurationProperties = new EurekaSupportConfigurationProperties() + op.discoverySupport.eurekaSupportConfigurationProperties.retryIntervalMillis = 0 + op.discoverySupport.eurekaSupportConfigurationProperties.throttleMillis = 0 op.regionScopedProviderFactory = regionScopedProviderFactory } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ExecuteCloudFormationChangeSetAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ExecuteCloudFormationChangeSetAtomicOperationSpec.groovy new file mode 100644 index 00000000000..6505fe05034 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ExecuteCloudFormationChangeSetAtomicOperationSpec.groovy @@ -0,0 +1,104 @@ +/* + * Copyright 2019 Adevinta. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops + +import com.amazonaws.AmazonServiceException +import com.amazonaws.services.cloudformation.AmazonCloudFormation +import com.amazonaws.services.cloudformation.model.ExecuteChangeSetRequest +import com.amazonaws.services.cloudformation.model.DescribeStacksResult +import com.amazonaws.services.cloudformation.model.ExecuteChangeSetResult +import com.amazonaws.services.cloudformation.model.Stack +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ExecuteCloudFormationChangeSetDescription +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import spock.lang.Specification + +class ExecuteCloudFormationChangeSetAtomicOperationSpec extends Specification { + void setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + } + + void "should build a executeChangeSetRequest and submit it through aws client"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def executeChangeSetResult = Mock(ExecuteChangeSetResult) + + def stackId = "stackId" + def op = new ExecuteCloudFormationChangeSetAtomicOperation( + new ExecuteCloudFormationChangeSetDescription( + [ + stackName: "stackTest", + changeSetName: "changeSetName", + region: "eu-west-1", + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + op.objectMapper = new ObjectMapper() + + when: + op.operate([]) + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> { + new DescribeStacksResult().withStacks([new Stack().withStackId("stackId")] as Collection) + } + 1 * amazonCloudFormation.executeChangeSet(_) >> { ExecuteChangeSetRequest request -> + assert request.getStackName() == "stackTest" + assert request.getChangeSetName() == "changeSetName" + executeChangeSetResult + } + } + + void "should propagate exceptions when executing the change set"() { + given: + def amazonClientProvider = Mock(AmazonClientProvider) + def amazonCloudFormation = Mock(AmazonCloudFormation) + def op = new ExecuteCloudFormationChangeSetAtomicOperation( + new ExecuteCloudFormationChangeSetDescription( + [ + stackName: "stackTest", + changeSetName: "changeSetName", + region: "eu-west-1", + credentials: TestCredential.named("test") + ] + ) + ) + op.amazonClientProvider = amazonClientProvider + def exception = new AmazonServiceException("error") + + when: + try { + op.operate([]) + } + catch (Exception e) { + e instanceof AmazonServiceException + } + + then: + 1 * amazonClientProvider.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.executeChangeSet(_) >> { + throw exception + } + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/MigrateClusterConfigurationsAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/MigrateClusterConfigurationsAtomicOperationSpec.groovy deleted file mode 100644 index 774c73a62bc..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/MigrateClusterConfigurationsAtomicOperationSpec.groovy +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops - -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.description.MigrateClusterConfigurationsDescription -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateClusterConfigurationStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.ClusterConfigurationMigrator.ClusterConfiguration -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.MigrateClusterConfigurationResult -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.servergroup.MigrateClusterConfigurationsAtomicOperation -import com.netflix.spinnaker.clouddriver.aws.model.SubnetAnalyzer -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory.RegionScopedProvider -import com.netflix.spinnaker.clouddriver.data.task.DefaultTask -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import spock.lang.Shared -import spock.lang.Specification - -import javax.inject.Provider - -class MigrateClusterConfigurationsAtomicOperationSpec extends Specification { - - Task task - - @Shared - NetflixAmazonCredentials testCredentials = TestCredential.named('test') - - @Shared - NetflixAmazonCredentials prodCredentials = TestCredential.named('prod') - - def setup() { - task = new DefaultTask("taskId") - TaskRepository.threadLocalTask.set(task) - } - - void 'performs no migrations when clusters are not matched on account, region, or subnet mappings'() { - given: - def clusters = [ - [ application: 'theapp', - availabilityZones: ['us-east-1': ['us-east-1c']], - region: 'us-east-1', - account: 'test', - iamRole: 'iam', - keyPair: 'kp-1', - credentials: testCredentials - ], - [ application: 'theapp', - availabilityZones: ['us-east-1': ['us-east-1c']], - region: 'us-east-1', - account: 'prod', - iamRole: 'iam2', - keyPair: 'kp-2', - credentials: prodCredentials - ] - ] - def description = new MigrateClusterConfigurationsDescription( - sources: clusters.collect { new ClusterConfiguration(cluster: it)}, - subnetTypeMapping: [ 'old-internal': 'internal'], - accountMapping: [ 'other': 'someOther'], - regionMapping: ['us-west-1':['us-east-1':['us-east-1c']]], - iamRoleMapping: ['iam': 'iam3', 'iam2': 'iam4'] - ) - def operation = new MigrateClusterConfigurationsAtomicOperation(description) - - when: - operation.operate([]) - - then: - task.resultObjects.size() == 2 - task.resultObjects.cluster == clusters - 0 * _ - } - - void 'migrates matched clusters, reusing security group lookups and subnet analyzers when possible'() { - given: - def clusters = [ - [ application: 'theapp', - availabilityZones: ['us-east-1': ['us-east-1c']], - region: 'us-east-1', - account: 'test', - iamRole: 'iam', - keyPair: 'kp-1', - credentials: testCredentials - ], - [ application: 'theapp', - stack: 'a', - availabilityZones: ['us-east-1': ['us-east-1c']], - region: 'us-east-1', - account: 'prod', - iamRole: 'iam2', - keyPair: 'kp-2', - credentials: prodCredentials - ], - [ application: 'theapp', - stack: 'b', - availabilityZones: ['us-east-1': ['us-east-1c']], - region: 'us-east-1', - account: 'prod', - iamRole: 'iam3', - keyPair: 'kp-3', - credentials: prodCredentials - ] - ] - def source1 = new ClusterConfiguration(cluster: clusters[0]) - def source2 = new ClusterConfiguration(cluster: clusters[1]) - def source3 = new ClusterConfiguration(cluster: clusters[2]) - def description = new MigrateClusterConfigurationsDescription( - elbSubnetTypeMapping: [ (MigrateClusterConfigurationsAtomicOperation.CLASSIC_SUBNET_KEY): 'external' ], - sources: [source1, source2, source3], - subnetTypeMapping: [ (MigrateClusterConfigurationsAtomicOperation.CLASSIC_SUBNET_KEY): 'internal']) - def operation = new MigrateClusterConfigurationsAtomicOperation(description) - - SecurityGroupLookup lookup = Mock(SecurityGroupLookup) { - 1 * getCredentialsForName('test') >> testCredentials - 2 * getCredentialsForName('prod') >> prodCredentials - } - SecurityGroupLookupFactory securityGroupLookupFactory = Mock(SecurityGroupLookupFactory) { - 1 * getInstance('us-east-1', false) >> lookup - } - SubnetAnalyzer testSubnetAnalyzer = Mock(SubnetAnalyzer) { - 1 * getVpcIdForSubnetPurpose('internal') >> 'vpc-test' - } - SubnetAnalyzer prodSubnetAnalyzer = Mock(SubnetAnalyzer) { - 2 * getVpcIdForSubnetPurpose('internal') >> 'vpc-prod' - } - RegionScopedProvider testScopedProvider = Mock(RegionScopedProvider) { - 1 * getSubnetAnalyzer() >> testSubnetAnalyzer - } - RegionScopedProvider prodScopedProvider = Mock(RegionScopedProvider) { - 1 * getSubnetAnalyzer() >> prodSubnetAnalyzer - } - RegionScopedProviderFactory regionScopedProviderFactory = Mock(RegionScopedProviderFactory) { - 1 * forRegion(testCredentials, 'us-east-1') >> testScopedProvider - 1 * forRegion(prodCredentials, 'us-east-1') >> prodScopedProvider - } - MigrateClusterConfigurationStrategy clusterMigrateStrategy = Mock(MigrateClusterConfigurationStrategy) - - operation.regionScopedProviderFactory = regionScopedProviderFactory; - operation.securityGroupLookupFactory = securityGroupLookupFactory; - operation.migrationStrategy = Stub(Provider) { - get() >> clusterMigrateStrategy - } - operation.migrateLoadBalancerStrategy = Mock(Provider) - operation.migrateSecurityGroupStrategy = Mock(Provider) - - when: - operation.operate([]) - - then: - 1 * clusterMigrateStrategy.generateResults(source1, { - it.region == 'us-east-1' && it.credentials == testCredentials && it.vpcId == 'vpc-test' - }, lookup, lookup, _, _, 'internal', 'external', 'iam', 'kp-1', [:], false, false) >> new MigrateClusterConfigurationResult() - 1 * clusterMigrateStrategy.generateResults(source2, { - it.region == 'us-east-1' && it.credentials == prodCredentials && it.vpcId == 'vpc-prod' - }, lookup, lookup, _, _, 'internal', 'external', 'iam2', 'kp-2', [:], false, false) >> new MigrateClusterConfigurationResult() - 1 * clusterMigrateStrategy.generateResults(source3, { - it.region == 'us-east-1' && it.credentials == prodCredentials && it.vpcId == 'vpc-prod' - }, lookup, lookup, _, _, 'internal', 'external', 'iam3', 'kp-3', [:], false, false) >> new MigrateClusterConfigurationResult() - task.resultObjects.size() == 3 - } -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgLaunchConfigurationOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgLaunchConfigurationOperationSpec.groovy index 4fe8e88bfa3..785eb7ade67 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgLaunchConfigurationOperationSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ModifyAsgLaunchConfigurationOperationSpec.groovy @@ -25,14 +25,16 @@ import com.amazonaws.services.ec2.model.DescribeImagesRequest import com.amazonaws.services.ec2.model.DescribeImagesResult import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult import com.amazonaws.services.ec2.model.Image +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride import com.netflix.spinnaker.config.AwsConfiguration -import com.netflix.spinnaker.clouddriver.aws.deploy.BlockDeviceConfig +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils.BlockDeviceConfig +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchConfigurationBuilder import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.amazonaws.services.ec2.model.VpcClassicLink import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.LaunchConfigurationBuilder + import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyAsgLaunchConfigurationDescription import com.netflix.spinnaker.clouddriver.aws.services.AsgService import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory @@ -102,9 +104,9 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { then: 1 * asgService.getAutoScalingGroup(asgName) >> new AutoScalingGroup().withLaunchConfigurationName(existingLc) - 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region, name -> + 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region_local, name -> assert act == credential - assert region == region + assert region_local == region assert name == existingLc existing @@ -121,22 +123,22 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { newLc = "$asgName-20150516".toString() existingAmi = 'ami-f000fee' iamRole = 'BaseIAMRole' - existing = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: account, - environment: 'test', - accountType: 'test', - classicLinkVpcId: 'vpc-456', - region: region, - baseName: asgName, - suffix: suffix, - ami: existingAmi, - iamRole: iamRole, - instanceType: 'm3.xlarge', - keyPair: 'sekret', - associatePublicIpAddress: false, - ebsOptimized: true, - securityGroups: ['sg-12345', 'sg-34567'] - ) + existing = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(account) + .environment('test') + .accountType('test') + .classicLinkVpcId('vpc-456') + .region(region) + .baseName(asgName) + .suffix(suffix) + .ami(existingAmi) + .iamRole(iamRole) + .instanceType('m3.xlarge') + .keyPair('sekret') + .associatePublicIpAddress(false) + .ebsOptimized(true) + .securityGroups(['sg-12345', 'sg-34567']) + .build() } void 'should apply description fields over existing settings'() { @@ -152,21 +154,23 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { then: 1 * asgService.getAutoScalingGroup(asgName) >> new AutoScalingGroup().withLaunchConfigurationName(existingLc) - 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region, name -> + 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region_local, name -> assert act == credential - assert region == region + assert region_local == region assert name == existingLc existing } - 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _) >> { appName, subnetType, settings, legacyUdf -> + 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _, _) >> { appName, subnetType, settings, legacyUdf, userDataOverride -> assert appName == app assert subnetType == null assert settings.ami == newAmi assert settings.iamRole == existing.iamRole assert settings.suffix == null assert legacyUdf == null + assert userDataOverride.isEnabled() == false + assert userDataOverride.getTokenizerName() == "default" return newLc } @@ -186,21 +190,21 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { existingLc = "$asgName-$suffix".toString() newLc = "$asgName-20150516".toString() newAmi = 'ami-f000fee' - existing = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: account, - environment: 'test', - accountType: 'test', - region: region, - baseName: asgName, - suffix: suffix, - ami: 'ami-f111f333', - iamRole: 'BaseIAMRole', - instanceType: 'm3.xlarge', - keyPair: 'sekret', - associatePublicIpAddress: false, - ebsOptimized: true, - securityGroups: ['sg-12345', 'sg-34567'] - ) + existing = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(account) + .environment('test') + .accountType('test') + .region(region) + .baseName(asgName) + .suffix(suffix) + .ami('ami-f111f333') + .iamRole('BaseIAMRole') + .instanceType('m3.xlarge') + .keyPair('sekret') + .associatePublicIpAddress(false) + .ebsOptimized(true) + .securityGroups(['sg-12345', 'sg-34567']) + .build() } void 'should disable monitoring if instance monitoring goes from enabled to disabled'() { @@ -216,19 +220,21 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { then: 1 * asgService.getAutoScalingGroup(asgName) >> new AutoScalingGroup().withLaunchConfigurationName(existingLc) - 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region, name -> + 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region_local, name -> assert act == credential - assert region == region + assert region_local == region assert name == existingLc existing } - 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _) >> { appName, subnetType, settings, legacyUdf -> + 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _, _) >> { appName, subnetType, settings, legacyUdf, userDataOverride -> assert appName == app assert subnetType == null assert settings.suffix == null assert settings.instanceMonitoring == false assert legacyUdf == null + assert userDataOverride.isEnabled() == false + assert userDataOverride.getTokenizerName() == "default" return newLc } @@ -250,15 +256,15 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { newLc = "$asgName-20150516".toString() existingAmi = 'ami-f000fee' iamRole = 'BaseIAMRole' - existing = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: account, - environment: 'test', - accountType: 'test', - region: region, - baseName: asgName, - suffix: suffix, - instanceMonitoring: true, - ) + existing = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(account) + .environment('test') + .accountType('test') + .region(region) + .baseName(asgName) + .suffix(suffix) + .instanceMonitoring(true) + .build() } void 'should attach classic linked VPC'() { @@ -274,19 +280,21 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { then: 1 * asgService.getAutoScalingGroup(asgName) >> new AutoScalingGroup().withLaunchConfigurationName(existingLc) - 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region, name -> + 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region_local, name -> assert act == credential - assert region == region + assert region_local == region assert name == existingLc existing } - 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _) >> { appName, subnetType, settings, legacyUdf -> + 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _, _) >> { appName, subnetType, settings, legacyUdf, userDataOverride -> assert appName == app assert subnetType == null assert settings.suffix == null assert settings.classicLinkVpcId == "vpc-456" assert legacyUdf == null + assert userDataOverride.isEnabled() == false + assert userDataOverride.getTokenizerName() == "default" return newLc } @@ -301,15 +309,15 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { newLc = "$asgName-20150516".toString() existingAmi = 'ami-f000fee' iamRole = 'BaseIAMRole' - existing = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: account, - environment: 'test', - accountType: 'test', - region: region, - baseName: asgName, - suffix: suffix, - instanceMonitoring: true, - ) + existing = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(account) + .environment('test') + .accountType('test') + .region(region) + .baseName(asgName) + .suffix(suffix) + .instanceMonitoring(true) + .build() } void 'should append security groups if flag is set'() { @@ -326,15 +334,17 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { then: 1 * asgService.getAutoScalingGroup(asgName) >> new AutoScalingGroup().withLaunchConfigurationName(existingLc) - 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region, name -> + 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region_local, name -> assert act == credential - assert region == region + assert region_local == region assert name == existingLc existing } - 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _) >> { appName, subnetType, settings, legacyUdf -> + 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _, _) >> { appName, subnetType, settings, legacyUdf, userDataOverride -> assert settings.securityGroups == ['sg-1', 'sg-2', 'sg-3'] + assert userDataOverride.isEnabled() == false + assert userDataOverride.getTokenizerName() == "default" return newLc } @@ -348,16 +358,16 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { newLc = "$asgName-20150516".toString() existingAmi = 'ami-f000fee' iamRole = 'BaseIAMRole' - existing = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: account, - environment: 'test', - accountType: 'test', - region: region, - baseName: asgName, - suffix: suffix, - instanceMonitoring: true, - securityGroups: ['sg-1', 'sg-2'] - ) + existing = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(account) + .environment('test') + .accountType('test') + .region(region) + .baseName(asgName) + .suffix(suffix) + .instanceMonitoring(true) + .securityGroups(['sg-1', 'sg-2']) + .build() } void 'should reset non customized block devices when changing instance type'() { @@ -373,15 +383,15 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { then: 1 * asgService.getAutoScalingGroup(asgName) >> new AutoScalingGroup().withLaunchConfigurationName(existingLc) - 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region, name -> + 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region_local, name -> assert act == credential - assert region == region + assert region_local == region assert name == existingLc existing } - 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _) >> { appName, subnetType, settings, legacyUdf -> + 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _, _) >> { appName, subnetType, settings, legacyUdf, userDataOverride -> assert appName == app assert subnetType == null assert settings.iamRole == existing.iamRole @@ -389,6 +399,8 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { assert legacyUdf == null assert settings.instanceType == 'm4.xlarge' assert settings.blockDevices == blockDeviceConfig.getBlockDevicesForInstanceType('m4.xlarge') + assert userDataOverride.isEnabled() == false + assert userDataOverride.getTokenizerName() == "default" return newLc } @@ -407,22 +419,22 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { suffix = '20150515' existingLc = "$asgName-$suffix".toString() newLc = "$asgName-20150516".toString() - existing = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: account, - environment: 'test', - accountType: 'test', - region: region, - baseName: asgName, - suffix: suffix, - ami: 'ami-f111f333', - iamRole: 'BaseIAMRole', - instanceType: 'm3.xlarge', - blockDevices: blockDeviceConfig.getBlockDevicesForInstanceType('m3.xlarge'), - keyPair: 'sekret', - associatePublicIpAddress: false, - ebsOptimized: true, - securityGroups: ['sg-12345', 'sg-34567'] - ) + existing = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(account) + .environment('test') + .accountType('test') + .region(region) + .baseName(asgName) + .suffix(suffix) + .ami('ami-f111f333') + .iamRole('BaseIAMRole') + .instanceType('m3.xlarge') + .blockDevices(blockDeviceConfig.getBlockDevicesForInstanceType('m3.xlarge')) + .keyPair('sekret') + .associatePublicIpAddress(false) + .ebsOptimized(true) + .securityGroups(['sg-12345', 'sg-34567']) + .build() } void 'should not reset custom block devices when changing instance type'() { @@ -438,15 +450,15 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { then: 1 * asgService.getAutoScalingGroup(asgName) >> new AutoScalingGroup().withLaunchConfigurationName(existingLc) - 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region, name -> + 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region_local, name -> assert act == credential - assert region == region + assert region_local == region assert name == existingLc existing } - 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _) >> { appName, subnetType, settings, legacyUdf -> + 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _, _) >> { appName, subnetType, settings, legacyUdf, userDataOverride -> assert appName == app assert subnetType == null assert settings.iamRole == existing.iamRole @@ -454,6 +466,8 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { assert legacyUdf == null assert settings.instanceType == 'm4.xlarge' assert settings.blockDevices == blockDevices + assert userDataOverride.isEnabled() == false + assert userDataOverride.getTokenizerName() == "default" return newLc } @@ -473,22 +487,22 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { existingLc = "$asgName-$suffix".toString() newLc = "$asgName-20150516".toString() blockDevices = [new AmazonBlockDevice(deviceName: '/dev/sdb', size: 500)] - existing = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: account, - environment: 'test', - accountType: 'test', - region: region, - baseName: asgName, - suffix: suffix, - ami: 'ami-f111f333', - iamRole: 'BaseIAMRole', - instanceType: 'm3.xlarge', - blockDevices: blockDevices, - keyPair: 'sekret', - associatePublicIpAddress: false, - ebsOptimized: true, - securityGroups: ['sg-12345', 'sg-34567'] - ) + existing = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(account) + .environment('test') + .accountType('test') + .region(region) + .baseName(asgName) + .suffix(suffix) + .ami('ami-f111f333') + .iamRole('BaseIAMRole') + .instanceType('m3.xlarge') + .blockDevices(blockDevices) + .keyPair('sekret') + .associatePublicIpAddress(false) + .ebsOptimized(true) + .securityGroups(['sg-12345', 'sg-34567']) + .build() } void 'should reset custom block devices when changing instance type if explicitly requested'() { @@ -505,15 +519,15 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { then: 1 * asgService.getAutoScalingGroup(asgName) >> new AutoScalingGroup().withLaunchConfigurationName(existingLc) - 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region, name -> + 1 * lcBuilder.buildSettingsFromLaunchConfiguration(_, _, _) >> { act, region_local, name -> assert act == credential - assert region == region + assert region_local == region assert name == existingLc existing } - 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _) >> { appName, subnetType, settings, legacyUdf -> + 1 * lcBuilder.buildLaunchConfiguration(_, _, _, _, _) >> { appName, subnetType, settings, legacyUdf, userDataOverride -> assert appName == app assert subnetType == null assert settings.iamRole == existing.iamRole @@ -521,6 +535,8 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { assert legacyUdf == null assert settings.instanceType == 'm4.xlarge' assert settings.blockDevices == blockDeviceConfig.getBlockDevicesForInstanceType('m4.xlarge') + assert userDataOverride.isEnabled() == false + assert userDataOverride.getTokenizerName() == "default" return newLc } @@ -540,22 +556,22 @@ class ModifyAsgLaunchConfigurationOperationSpec extends Specification { existingLc = "$asgName-$suffix".toString() newLc = "$asgName-20150516".toString() blockDevices = [new AmazonBlockDevice(deviceName: '/dev/sdb', size: 500)] - existing = new LaunchConfigurationBuilder.LaunchConfigurationSettings( - account: account, - environment: 'test', - accountType: 'test', - region: region, - baseName: asgName, - suffix: suffix, - ami: 'ami-f111f333', - iamRole: 'BaseIAMRole', - instanceType: 'm3.xlarge', - blockDevices: blockDevices, - keyPair: 'sekret', - associatePublicIpAddress: false, - ebsOptimized: true, - securityGroups: ['sg-12345', 'sg-34567'] - ) + existing = LaunchConfigurationBuilder.LaunchConfigurationSettings.builder() + .account(account) + .environment('test') + .accountType('test') + .region(region) + .baseName(asgName) + .suffix(suffix) + .ami('ami-f111f333') + .iamRole('BaseIAMRole') + .instanceType('m3.xlarge') + .blockDevices(blockDevices) + .keyPair('sekret') + .associatePublicIpAddress(false) + .ebsOptimized(true) + .securityGroups(['sg-12345', 'sg-34567']) + .build() } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ResumeAsgProcessesAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ResumeAsgProcessesAtomicOperationSpec.groovy index fc0ee5fdbaf..190c0a9b5a5 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ResumeAsgProcessesAtomicOperationSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/ResumeAsgProcessesAtomicOperationSpec.groovy @@ -26,6 +26,9 @@ import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactor import spock.lang.Specification import spock.lang.Subject +import static com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType.Launch +import static com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType.Terminate + class ResumeAsgProcessesAtomicOperationSpec extends Specification { def mockAsgService = Mock(AsgService) @@ -62,9 +65,9 @@ class ResumeAsgProcessesAtomicOperationSpec extends Specification { operation.operate([]) then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.resumeProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) + then: 1 * mockAsgService.resumeProcesses("asg1", [Launch, Terminate]) then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.resumeProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) + then: 1 * mockAsgService.resumeProcesses("asg1", [Launch, Terminate]) and: task.history*.status == [ @@ -99,7 +102,7 @@ class ResumeAsgProcessesAtomicOperationSpec extends Specification { then: 1 * mockAsgService.getAutoScalingGroup('asg1') then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.resumeProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) + then: 1 * mockAsgService.resumeProcesses("asg1", [Launch, Terminate]) and: task.history*.status == [ @@ -133,11 +136,11 @@ class ResumeAsgProcessesAtomicOperationSpec extends Specification { operation.operate([]) then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.resumeProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) >> { + then: 1 * mockAsgService.resumeProcesses("asg1", [Launch, Terminate]) >> { throw new Exception('Uh oh!') } then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.resumeProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) + then: 1 * mockAsgService.resumeProcesses("asg1", [Launch, Terminate]) and: task.history*.status == [ diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/SuspendAsgProcessesAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/SuspendAsgProcessesAtomicOperationSpec.groovy index 4818796afcf..564407948a9 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/SuspendAsgProcessesAtomicOperationSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/SuspendAsgProcessesAtomicOperationSpec.groovy @@ -15,6 +15,7 @@ */ package com.netflix.spinnaker.clouddriver.aws.deploy.ops import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.dynamodbv2.xspec.M import com.netflix.spinnaker.clouddriver.aws.deploy.description.SuspendAsgProcessesDescription import com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType import com.netflix.spinnaker.clouddriver.aws.services.AsgService @@ -24,6 +25,9 @@ import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import spock.lang.Specification import spock.lang.Subject +import static com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType.Launch +import static com.netflix.spinnaker.clouddriver.aws.model.AutoScalingProcessType.Terminate + class SuspendAsgProcessesAtomicOperationSpec extends Specification { def mockAsgService = Mock(AsgService) @@ -60,9 +64,9 @@ class SuspendAsgProcessesAtomicOperationSpec extends Specification { operation.operate([]) then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.suspendProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) + then: 1 * mockAsgService.suspendProcesses("asg1", [Launch, Terminate]) then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.suspendProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) + then: 1 * mockAsgService.suspendProcesses("asg1", [Launch, Terminate]) and: task.history*.status == [ @@ -97,7 +101,7 @@ class SuspendAsgProcessesAtomicOperationSpec extends Specification { then: 1 * mockAsgService.getAutoScalingGroup('asg1') then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.suspendProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) + then: 1 * mockAsgService.suspendProcesses("asg1", [Launch, Terminate]) and: task.history*.status == [ @@ -131,11 +135,11 @@ class SuspendAsgProcessesAtomicOperationSpec extends Specification { operation.operate([]) then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.suspendProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) >> { + then: 1 * mockAsgService.suspendProcesses("asg1", [Launch, Terminate]) >> { throw new Exception('Uh oh!') } then: 1 * mockAsgService.getAutoScalingGroup('asg1') >> new AutoScalingGroup() - then: 1 * mockAsgService.suspendProcesses("asg1", AutoScalingProcessType.with { [Launch, Terminate] }) + then: 1 * mockAsgService.suspendProcesses("asg1", [Launch, Terminate]) and: task.history*.status == [ diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec.groovy index eb1345c120c..ca2758b0001 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec.groovy @@ -33,6 +33,7 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.TerminateInstanc import spock.lang.Specification class TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec extends Specification { + def instance = new com.amazonaws.services.autoscaling.model.Instance() def setupSpec() { TaskRepository.threadLocalTask.set(Stub(Task)) } @@ -58,6 +59,7 @@ class TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec extends Specificat getAutoScalingGroupName() >> "myasg-stack-v000" getMinSize() >> 1 getDesiredCapacity() >> 2 + getInstances() >> [instance.withInstanceId(description.instance)] } new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) } @@ -92,6 +94,7 @@ class TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec extends Specificat getMinSize() >> 1 getDesiredCapacity() >> 2 getLoadBalancerNames() >> ['myasg--frontend'] + getInstances() >> [instance.withInstanceId(description.instance)] } new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) } @@ -127,6 +130,7 @@ class TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec extends Specificat getAutoScalingGroupName() >> "myasg-stack-v000" getMinSize() >> 1 getDesiredCapacity() >> 1 + getInstances() >> [instance.withInstanceId(description.instance)] } new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) } @@ -162,6 +166,7 @@ class TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec extends Specificat getAutoScalingGroupName() >> "myasg-stack-v000" getMinSize() >> 1 getDesiredCapacity() >> 1 + getInstances() >> [instance.withInstanceId(description.instance)] } new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) } @@ -191,6 +196,7 @@ class TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec extends Specificat getMinSize() >> 1 getDesiredCapacity() >> 2 getMaxSize() >> 2 + getInstances() >> [instance.withInstanceId(description.instance)] } new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) } >> @@ -201,6 +207,7 @@ class TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec extends Specificat getMinSize() >> 1 getDesiredCapacity() >> 1 getMaxSize() >> 2 + getInstances() >> [instance.withInstanceId(description.instance)] } new DescribeAutoScalingGroupsResult().withAutoScalingGroups(asg) } @@ -214,4 +221,39 @@ class TerminateInstanceAndDecrementAsgAtomicOperationUnitSpec extends Specificat 0 * _ } + + void "should fail operation if the instance isn't part of the Server Group"() { + given: + def amazonAutoScaling = Mock(AmazonAutoScaling) + def amazonClientProvider = Stub(AmazonClientProvider) { + getAutoScaling(_, _, true) >> amazonAutoScaling + } + + def description = new TerminateInstanceAndDecrementAsgDescription( + asgName: "test-v001", + region: "us-east-1", + instance: "i-123", + setMaxToNewDesired: true + ) + + def serverGroup = new AutoScalingGroup( + autoScalingGroupName: description.asgName, + instances: [new com.amazonaws.services.autoscaling.model.Instance(instanceId: "i-456")], + desiredCapacity: 3, + minSize: 1 + ) + + and: + 1 * amazonAutoScaling.describeAutoScalingGroups(_) >> + new DescribeAutoScalingGroupsResult().withAutoScalingGroups(serverGroup) + + def operation = new TerminateInstanceAndDecrementAsgAtomicOperation(description) + operation.amazonClientProvider = amazonClientProvider + + when: + operation.operate([]) + + then: + thrown(IllegalArgumentException) + } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAlarmOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAlarmOperationUnitSpec.groovy index 839f4118653..21072bc4c3e 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAlarmOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAlarmOperationUnitSpec.groovy @@ -56,12 +56,12 @@ class UpsertAlarmOperationUnitSpec extends Specification { unit: StandardUnit.Percent, ) - final cloudWatch = Mock(AmazonCloudWatch) - final amazonClientProvider = Stub(AmazonClientProvider) { + def cloudWatch = Mock(AmazonCloudWatch) + def amazonClientProvider = Stub(AmazonClientProvider) { getCloudWatch(_, _, true) >> cloudWatch } - @Subject final op = new UpsertAlarmAtomicOperation(description) + @Subject def op = new UpsertAlarmAtomicOperation(description) def setup() { op.amazonClientProvider = amazonClientProvider diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAsgLifecycleHookAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAsgLifecycleHookAtomicOperationUnitSpec.groovy index 2aedfd2af57..e04c018cc14 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAsgLifecycleHookAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertAsgLifecycleHookAtomicOperationUnitSpec.groovy @@ -31,17 +31,17 @@ class UpsertAsgLifecycleHookAtomicOperationUnitSpec extends Specification { TaskRepository.threadLocalTask.set(Mock(Task)) } - final description = new UpsertAsgLifecycleHookDescription( + def description = new UpsertAsgLifecycleHookDescription( serverGroupName: 'asg-v000', region: 'us-west-1', roleARN: 'arn:aws:iam::123456789012:role/my-notification-role', notificationTargetARN: 'arn:aws:sns:us-west-1:123456789012:my-sns-topic' ) - @Subject final op = new UpsertAsgLifecycleHookAtomicOperation(description) + @Subject def op = new UpsertAsgLifecycleHookAtomicOperation(description) - final autoScaling = Mock(AmazonAutoScaling) - final amazonClientProvider = Stub(AmazonClientProvider) { + def autoScaling = Mock(AmazonAutoScaling) + def amazonClientProvider = Stub(AmazonClientProvider) { getAutoScaling(_, _, true) >> autoScaling } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertScalingPolicyAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertScalingPolicyAtomicOperationUnitSpec.groovy index 22c9d692416..942d524c235 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertScalingPolicyAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/UpsertScalingPolicyAtomicOperationUnitSpec.groovy @@ -37,7 +37,7 @@ class UpsertScalingPolicyAtomicOperationUnitSpec extends Specification { TaskRepository.threadLocalTask.set(Mock(Task)) } - final description = new UpsertScalingPolicyDescription( + def description = new UpsertScalingPolicyDescription( serverGroupName: "kato-main-v000", region: "us-west-1", adjustmentType: AdjustmentType.PercentChangeInCapacity, @@ -48,14 +48,14 @@ class UpsertScalingPolicyAtomicOperationUnitSpec extends Specification { ) ) - final autoScaling = Mock(AmazonAutoScaling) - final cloudWatch = Mock(AmazonCloudWatch) - final amazonClientProvider = Stub(AmazonClientProvider) { + def autoScaling = Mock(AmazonAutoScaling) + def cloudWatch = Mock(AmazonCloudWatch) + def amazonClientProvider = Stub(AmazonClientProvider) { getAutoScaling(_, _, true) >> autoScaling getCloudWatch(_, _, true) >> cloudWatch } - @Subject final op = new UpsertScalingPolicyAtomicOperation(description) + @Subject def op = new UpsertScalingPolicyAtomicOperation(description) def setup() { op.amazonClientProvider = amazonClientProvider diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/ModifyServerGroupLaunchTemplateSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/ModifyServerGroupLaunchTemplateSpec.groovy new file mode 100644 index 00000000000..477046d87ae --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/ModifyServerGroupLaunchTemplateSpec.groovy @@ -0,0 +1,96 @@ +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions + +import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.ec2.model.* +import com.fasterxml.jackson.databind.JsonMappingException +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class ModifyServerGroupLaunchTemplateSpec extends Specification { + def credentials = TestCredential.named("test") + def ltService = Mock(LaunchTemplateService) + def credentialsRepository = Stub(MapBackedCredentialsRepository) { + getOne(_) >> credentials + } + + def autoScalingGroupWithLt = new AutoScalingGroup( + autoScalingGroupName: "test-v001", + launchTemplate: new LaunchTemplateSpecification(launchTemplateName: "lt-1", version: "1") + ) + + def regionScopedProvider = Stub(RegionScopedProviderFactory.RegionScopedProvider) { + getLaunchTemplateService() >> ltService + } + + def regionScopedProviderFactory = Mock(RegionScopedProviderFactory) { + forRegion(_, _) >> regionScopedProvider + } + + def dummyDescription = new ModifyServerGroupLaunchTemplateDescription( + region: "us-east-1", + asgName: autoScalingGroupWithLt.autoScalingGroupName, + amiName: "ami-1", + credentials: credentials + ) + + @Subject + def modifyAction = new ModifyServerGroupLaunchTemplate(credentialsRepository, regionScopedProviderFactory) + + @Unroll + def "should modify launch template as expected"() { + given: + def dummySrcVersion = new LaunchTemplateVersion() + def modifyCommand = new ModifyServerGroupLaunchTemplate.ModifyServerGroupLaunchTemplateCommand.ModifyServerGroupLaunchTemplateCommandBuilder() + .description(dummyDescription) + .isAsgBackedByMixedInstancesPolicy(asgBackedByMip) + .isReqToModifyLaunchTemplate(reqToModifyLaunchTemplate) + .isReqToUpgradeAsgToMixedInstancesPolicy(convertAsgToUseMip) + .sourceVersion(dummySrcVersion) + .build() + + def newDummyVersion = new LaunchTemplateVersion(launchTemplateId: "lt-1", versionNumber: 2L) + + when: + SagaAction.Result result = modifyAction.apply(modifyCommand, new Saga("test", "test")) + + then: + result.nextCommand instanceof PrepareUpdateAutoScalingGroup.PrepareUpdateAutoScalingGroupCommand + ((PrepareUpdateAutoScalingGroup.PrepareUpdateAutoScalingGroupCommand) result.nextCommand).newLaunchTemplateVersionNumber == newLtVersionNumber + ((PrepareUpdateAutoScalingGroup.PrepareUpdateAutoScalingGroupCommand) result.nextCommand).isReqToUpgradeAsgToMixedInstancesPolicy == convertAsgToUseMip + if (reqToModifyLaunchTemplate) { + 1 * ltService.modifyLaunchTemplate(credentials, dummyDescription, dummySrcVersion, shouldUseMipInModify) >> newDummyVersion + } else { + // modify action skipped + 0 * ltService.modifyLaunchTemplate(credentials, dummyDescription, dummySrcVersion, shouldUseMipInModify) >> newDummyVersion + } + + where: + asgBackedByMip | reqToModifyLaunchTemplate | convertAsgToUseMip || shouldUseMipInModify || newLtVersionNumber + true | true | false || true || 2L // update ASG MIP with new LT version + true | false | false || true || null // update ASG MIP properties without creating a new LT version + false | true | true || true || 2L // update ASG LT with new LT version and convert ASG to use MIP + false | true | false || false || 2L // update ASG LT with new LT version, but don't use MIP + } + + def "should not throw JsonProcessingException when deserializing"() { + given: + def objectMapper = AmazonObjectMapperConfigurer.createConfigured() + def json = objectMapper.writeValueAsString(dummyDescription) + + when: + objectMapper.readValue(json, ModifyServerGroupLaunchTemplateDescription.class) + + then: + notThrown(JsonMappingException) + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareModifyServerGroupLaunchTemplateSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareModifyServerGroupLaunchTemplateSpec.groovy new file mode 100644 index 00000000000..27cd1706a7d --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareModifyServerGroupLaunchTemplateSpec.groovy @@ -0,0 +1,336 @@ +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions + +import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.autoscaling.model.InstancesDistribution +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.* +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.InstanceTypeUtils.BlockDeviceConfig +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.clouddriver.aws.services.AsgService +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class PrepareModifyServerGroupLaunchTemplateSpec extends Specification { + private static final String LT_ID_1 = "lt-1", LT_ID_1_V = "1" + + def credentials = TestCredential.named("test") + def ltService = Mock(LaunchTemplateService) + def asgService = Mock(AsgService) + def ec2 = Mock(AmazonEC2) + def blockDeviceConfig = Mock(BlockDeviceConfig) + def credentialsRepository = Stub(MapBackedCredentialsRepository) { + getOne(_) >> credentials + } + + def autoScalingGroupWithLt = new AutoScalingGroup( + autoScalingGroupName: "test-v001", + launchTemplate: new LaunchTemplateSpecification(launchTemplateName: LT_ID_1, version: LT_ID_1_V) + ) + + def regionScopedProvider = Stub(RegionScopedProviderFactory.RegionScopedProvider) { + getAmazonEC2() >> ec2 + getAsgService() >> asgService + getLaunchTemplateService() >> ltService + } + + def regionScopedProviderFactory = Mock(RegionScopedProviderFactory) { + forRegion(_, _) >> regionScopedProvider + } + + @Subject + def prepareAction = new PrepareModifyServerGroupLaunchTemplate( + blockDeviceConfig, credentialsRepository, regionScopedProviderFactory) + + ModifyServerGroupLaunchTemplateDescription modifyDescription + def prepareCommand + + def setup() { + modifyDescription = new ModifyServerGroupLaunchTemplateDescription( + region: "us-east-1", + asgName: autoScalingGroupWithLt.autoScalingGroupName, + credentials: credentials) + + prepareCommand = new PrepareModifyServerGroupLaunchTemplate.PrepareModifyServerGroupLaunchTemplateCommand.PrepareModifyServerGroupLaunchTemplateCommandBuilder().description(modifyDescription).build() + } + + def "should prepare for launch template update"() { + given: + modifyDescription.instanceType = "c5.large" + + and: + def ltVersionBeforeModify = new LaunchTemplateVersion( + launchTemplateName: LT_ID_1, + versionNumber: LT_ID_1_V, + launchTemplateData: new ResponseLaunchTemplateData( + imageId: "ami-1", + instanceType: "c3.large" + ) + ) + + when: + prepareAction.apply(prepareCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup("test-v001") >> autoScalingGroupWithLt + 1 * ltService.getLaunchTemplateVersion(autoScalingGroupWithLt.launchTemplate) >> Optional.of(ltVersionBeforeModify) + 1 * blockDeviceConfig.getBlockDevicesForInstanceType("c5.large") + 1 * blockDeviceConfig.getBlockDevicesForInstanceType("c3.large") + } + + @Unroll + def "should prepare for launch template and ASG update for a server group backed by mixed instances policy"() { + given: + modifyDescription.spotAllocationStrategy = spotAllocationStrategy // Mixed instances policy property + modifyDescription.instanceType = instanceType // Launch template property + modifyDescription.spotPrice = spotPrice // Mixed instances policy property as ASG is backed by MIP + + and: + def mixedInstancesPolicy = new MixedInstancesPolicy( + launchTemplate: new com.amazonaws.services.autoscaling.model.LaunchTemplate( + launchTemplateSpecification: autoScalingGroupWithLt.launchTemplate, + overrides: [ + new com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides(instanceType: "c3.large", weightedCapacity: "2"), + new com.amazonaws.services.autoscaling.model.LaunchTemplateOverrides(instanceType: "c3.xlarge", weightedCapacity: "4") + ] + ), + instancesDistribution: new InstancesDistribution( + onDemandAllocationStrategy: "prioritized", + onDemandBaseCapacity: 2, + onDemandPercentageAboveBaseCapacity: 50, + spotAllocationStrategy: "lowest-price", + spotInstancePools: 2, + spotMaxPrice: "1" + ) + ) + def autoScalingGroup = new AutoScalingGroup( + autoScalingGroupName: "test-v001", + mixedInstancesPolicy: mixedInstancesPolicy + ) + + def ltVersionBeforeModify = new LaunchTemplateVersion( + launchTemplateName: LT_ID_1, + versionNumber: LT_ID_1_V, + launchTemplateData: new ResponseLaunchTemplateData( + imageId: "ami-1", + instanceType: "m5.large" + ) + ) + + when: + SagaAction.Result result = prepareAction.apply(prepareCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup("test-v001") >> autoScalingGroup + 1 * ltService.getLaunchTemplateVersion(autoScalingGroup.mixedInstancesPolicy.launchTemplate.launchTemplateSpecification) >> Optional.of(ltVersionBeforeModify) + if (!expectedToSkipStep) { + 1 * blockDeviceConfig.getBlockDevicesForInstanceType("c3.large") + 1 * blockDeviceConfig.getBlockDevicesForInstanceType("m5.large") + } + + and: + def nextCommand = ((ModifyServerGroupLaunchTemplate.ModifyServerGroupLaunchTemplateCommand) result.nextCommand) + nextCommand instanceof ModifyServerGroupLaunchTemplate.ModifyServerGroupLaunchTemplateCommand + nextCommand.isReqToModifyLaunchTemplate == !expectedToSkipStep + nextCommand.isAsgBackedByMixedInstancesPolicy == true + nextCommand.isReqToUpgradeAsgToMixedInstancesPolicy == false + + // assert description fields + nextCommand.description.spotAllocationStrategy == expectedSpotALlocStrategy + nextCommand.description.spotInstancePools == expectedSpotInstancePools + nextCommand.description.onDemandAllocationStrategy == mixedInstancesPolicy.getInstancesDistribution().getOnDemandAllocationStrategy() + nextCommand.description.onDemandBaseCapacity == mixedInstancesPolicy.getInstancesDistribution().getOnDemandBaseCapacity() + nextCommand.description.onDemandPercentageAboveBaseCapacity == mixedInstancesPolicy.getInstancesDistribution().getOnDemandPercentageAboveBaseCapacity() + + where: + spotAllocationStrategy | spotPrice | instanceType|| expectedSpotALlocStrategy || expectedSpotInstancePools || expectedToSkipStep + "capacity-optimized" | null | "c3.large" || "capacity-optimized" || null || false // isReqToModifyMipFieldsOnly is false + null | "1" | "c3.large" || "lowest-price" || 2 || false // isReqToModifyMipFieldsOnly is false + "lowest-price" | "1" | "c3.large" || "lowest-price" || 2 || false // isReqToModifyMipFieldsOnly is false + "capacity-optimized-prioritized"| null | null ||"capacity-optimized-prioritized"|| null || true // isReqToModifyMipFieldsOnly is true + null | "1" | null || "lowest-price" || 2 || true // isReqToModifyMipFieldsOnly is true + } + + @Unroll + def "should prepare for launch template and ASG update for a server group backed by launch template and to be updated to use mixed instances policy"() { + given: + modifyDescription.spotAllocationStrategy = spotAllocationStrategy + modifyDescription.spotPrice = newSpotPrice + + and: + def autoScalingGroup = new AutoScalingGroup( + autoScalingGroupName: "test-v001", + launchTemplate: new LaunchTemplateSpecification(launchTemplateName: LT_ID_1, version: LT_ID_1_V) + ) + + def ltVersionBeforeModify = new LaunchTemplateVersion( + launchTemplateName: LT_ID_1, + versionNumber: LT_ID_1_V, + launchTemplateData: new ResponseLaunchTemplateData( + imageId: "ami-1", + instanceMarketOptions: asgHasSpotLt + ? new LaunchTemplateInstanceMarketOptions().withMarketType("spot").withSpotOptions(new LaunchTemplateSpotMarketOptions(maxPrice: "0.5")) + : null + ) + ) + + when: + SagaAction.Result result = prepareAction.apply(prepareCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup("test-v001") >> autoScalingGroup + 1 * ltService.getLaunchTemplateVersion(autoScalingGroup.launchTemplate) >> Optional.of(ltVersionBeforeModify) + + and: + def nextCommand = ((ModifyServerGroupLaunchTemplate.ModifyServerGroupLaunchTemplateCommand) result.nextCommand) + nextCommand instanceof ModifyServerGroupLaunchTemplate.ModifyServerGroupLaunchTemplateCommand + nextCommand.isReqToModifyLaunchTemplate == !expectedToSkipStep + nextCommand.isAsgBackedByMixedInstancesPolicy == false + nextCommand.isReqToUpgradeAsgToMixedInstancesPolicy == true + + and: + def descPassed = nextCommand.description + descPassed.spotPrice == expectedSpotPrice + + where: + spotAllocationStrategy | newSpotPrice| asgHasSpotLt || expectedSpotPrice || expectedToSkipStep + "capacity-optimized" | "1" | true || "1" || false // modify LT, create a new LT version with new spot max price + "capacity-optimized" | "" | true || null || false // modify LT, create a new LT version withOUT spot options + "capacity-optimized" | null | true || "0.5" || false // modify LT, create a new LT version with new spot max price + null | "1" | false || "1" || true // skip new LT version, and upgrade to MIP + "capacity-optimized" | null | false || null || true // skip new LT version, and upgrade to MIP + + } + + @Unroll + def "should resolve image id correctly, with precedence give to imageId first, ami name second"() { + given: + modifyDescription.imageId = imageIdInReq + modifyDescription.amiName = amiNameInReq + + def ltVersionBeforeModify = new LaunchTemplateVersion( + launchTemplateName: LT_ID_1, + versionNumber: 1, + launchTemplateData: new ResponseLaunchTemplateData( + imageId: imageIdInSrc + ) + ) + + when: + prepareAction.apply(prepareCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup(autoScalingGroupWithLt.autoScalingGroupName) >> autoScalingGroupWithLt + 1 * ltService.getLaunchTemplateVersion(autoScalingGroupWithLt.launchTemplate) >> Optional.of(ltVersionBeforeModify) + resolveAmiCallCount * ec2.describeImages(_) >> { DescribeImagesRequest req -> + new DescribeImagesResult().withImages(req.imageIds.collect { new Image(imageId: "img-from-ami") }) + } + + where: + imageIdInReq | amiNameInReq | resolveAmiCallCount | imageIdInSrc || expectedImageIdPassed + "img-req" | "ami-1" | 0 | "img-src" || "img-req" + null | "ami-1" | 1 | "img-src" || "img-from-ami" + null | null | 0 | "img-src" || "img-src" + } + + @Unroll + def "should include security groups from previous launch template: #desc"() { + given: + modifyDescription.securityGroups = securityGroups + modifyDescription.securityGroupsAppendOnly = sgAppendOnly + modifyDescription.amiName = "ami-1" + + def launchTemplateData = new ResponseLaunchTemplateData( + imageId: "ami-1", + networkInterfaces: [ + new LaunchTemplateInstanceNetworkInterfaceSpecification( + deviceIndex: 0, + groups: ["sg-1"] + ) + ], + ) + + def ltVersionBeforeModify = new LaunchTemplateVersion( + launchTemplateName: LT_ID_1, + versionNumber: 1, + launchTemplateData: launchTemplateData + ) + + when: + prepareAction.apply(prepareCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup(autoScalingGroupWithLt.autoScalingGroupName) >> autoScalingGroupWithLt + 1 * ltService.getLaunchTemplateVersion(autoScalingGroupWithLt.launchTemplate) >> Optional.of(ltVersionBeforeModify) + 1 * ec2.describeImages(_) >> { DescribeImagesRequest req -> + new DescribeImagesResult().withImages(req.imageIds.collect { new Image(imageId: it) }) + } + modifyDescription.getSecurityGroups().sort() == expectedGroups + + where: + securityGroups | sgAppendOnly || expectedGroups || desc + null | null | ["sg-1"] || "No specified groups and no specified appendOnly includes existing groups" + null | false | [] || "With appendOnly explicitly false, clear groups if non supplied" + null | true | ["sg-1"] || "With appendOnly true, always add existing groups" + ["sg-2"] | null | ["sg-2"] || "With no specified appendOnly but provided groups, only use provided groups" + ["sg-2"] | false | ["sg-2"] || "With appendOnly false, use the specified groups only" + ["sg-2"] | true | ["sg-1", "sg-2"] || "With appendOnly true, merge provided and existing groups" + } + + def "should reset custom block devices when changing instance type"() { + given: + String newInstanceType = "m3-large" + modifyDescription.amiName = "ami-1" + modifyDescription.imageId = "ami-1" + modifyDescription.instanceType = newInstanceType + modifyDescription.blockDevices = null + + def launchTemplateData = new ResponseLaunchTemplateData( + imageId: "ami-1", + instanceType: "m3-medium", + networkInterfaces: [ + new LaunchTemplateInstanceNetworkInterfaceSpecification( + deviceIndex: "0", + groups: ["sg-1"] + ) + ], + blockDeviceMappings: [ + new LaunchTemplateBlockDeviceMapping( + deviceName: "/dev/sdb", + ebs: new LaunchTemplateEbsBlockDevice(volumeSize: 40) + ) + ] + ) + + def ltVersionBeforeModify = new LaunchTemplateVersion( + launchTemplateName: LT_ID_1, + versionNumber: 1, + launchTemplateData: launchTemplateData + ) + + when: + prepareAction.apply(prepareCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup("test-v001") >> autoScalingGroupWithLt + 1 * ltService.getLaunchTemplateVersion(autoScalingGroupWithLt.launchTemplate) >> Optional.of(ltVersionBeforeModify) + 1 * blockDeviceConfig.getBlockDevicesForInstanceType(launchTemplateData.instanceType) >> [ + new AmazonBlockDevice(deviceName: '/dev/sdb', size: 40) + ] + 1 * blockDeviceConfig.getBlockDevicesForInstanceType(newInstanceType) >> [ + new AmazonBlockDevice(deviceName: '/dev/sdb', size: 80) + ] + + modifyDescription.blockDevices.size() == 1 + modifyDescription.blockDevices[0].size == 80 + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareUpdateAutoScalingGroupSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareUpdateAutoScalingGroupSpec.groovy new file mode 100644 index 00000000000..5df992166ab --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/PrepareUpdateAutoScalingGroupSpec.groovy @@ -0,0 +1,72 @@ +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions + +import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.ec2.model.* +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.validators.ModifyServerGroupLaunchTemplateValidator +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class PrepareUpdateAutoScalingGroupSpec extends Specification { + @Shared + ModifyServerGroupLaunchTemplateValidator validator + + def autoScalingGroupWithLt = new AutoScalingGroup( + autoScalingGroupName: "test-v001", + launchTemplate: new LaunchTemplateSpecification(launchTemplateName: "lt-1", version: "1") + ) + + def description = new ModifyServerGroupLaunchTemplateDescription( + region: "us-east-1", + asgName: autoScalingGroupWithLt.autoScalingGroupName, + amiName: "ami-1" + ) + void setupSpec() { + validator = Stub(ModifyServerGroupLaunchTemplateValidator) + } + + @Subject + def prepareAction = new PrepareUpdateAutoScalingGroup(validator) + + @Unroll + def "should prepare for update ASG as expected"() { + given: + description.launchTemplateOverridesForInstanceType = descOverrides + + def newDummyVersion = new LaunchTemplateVersion(launchTemplateId: "lt-1", versionNumber: 2L) + def prepareCommand = new PrepareUpdateAutoScalingGroup.PrepareUpdateAutoScalingGroupCommand.PrepareUpdateAutoScalingGroupCommandBuilder() + .description(description) + .launchTemplateVersion(newDummyVersion) + .isReqToUpgradeAsgToMixedInstancesPolicy(false) + .newLaunchTemplateVersionNumber(2L) + .build() + + when: + SagaAction.Result result = prepareAction.apply(prepareCommand, new Saga("test", "test")) + + then: + result.nextCommand instanceof UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand + ((UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand) result.nextCommand).launchTemplateVersion == prepareCommand.launchTemplateVersion + ((UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand) result.nextCommand).isReqToUpgradeAsgToMixedInstancesPolicy == prepareCommand.isReqToUpgradeAsgToMixedInstancesPolicy + ((UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand) result.nextCommand).newLaunchTemplateVersionNumber == prepareCommand.newLaunchTemplateVersionNumber + + if (descOverrides) { + ((UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand) result.nextCommand).launchTemplateOverrides == [new LaunchTemplateOverrides().withWeightedCapacity(2).withInstanceType("m5.xlarge")] + } else { + ((UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand) result.nextCommand).launchTemplateOverrides == null + } + + where: + descOverrides << [ + [new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "m5.xlarge", weightedCapacity: 2)], + null, + [] + ] + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/UpdateAutoScalingGroupSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/UpdateAutoScalingGroupSpec.groovy new file mode 100644 index 00000000000..edb8abe8be5 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/actions/UpdateAutoScalingGroupSpec.groovy @@ -0,0 +1,258 @@ +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.actions + +import com.amazonaws.services.autoscaling.AmazonAutoScaling +import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.autoscaling.model.InstancesDistribution +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy +import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupRequest +import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupResult +import com.amazonaws.services.ec2.model.* +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.ModifyServerGroupLaunchTemplateAtomicOperation +import com.netflix.spinnaker.clouddriver.aws.services.AsgService +import com.netflix.spinnaker.clouddriver.aws.services.LaunchTemplateService +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class UpdateAutoScalingGroupSpec extends Specification { + def credentials = TestCredential.named("test") + def autoScaling = Mock(AmazonAutoScaling) + def ltService = Mock(LaunchTemplateService) + def asgService = Mock(AsgService) + def credentialsRepository = Stub(MapBackedCredentialsRepository) { + getOne(_) >> credentials + } + + def regionScopedProvider = Stub(RegionScopedProviderFactory.RegionScopedProvider) { + getAsgService() >> asgService + getAutoScaling() >> autoScaling + getLaunchTemplateService() >> ltService + } + + def regionScopedProviderFactory = Mock(RegionScopedProviderFactory) { + forRegion(_, _) >> regionScopedProvider + } + + @Subject + def updateAction = new UpdateAutoScalingGroup(regionScopedProviderFactory, credentialsRepository) + + def asgName = "test-v001" + def ltVersion = new LaunchTemplateVersion( + launchTemplateId: "lt-1", + launchTemplateData: new ResponseLaunchTemplateData( + imageId: "ami-1", + ), + versionNumber: 3L + ) + + def "should update ASG backed by mixed instances policy correctly"() { + given: + def modifyDesc = new ModifyServerGroupLaunchTemplateDescription( + region: "us-east-1", + asgName: asgName, + amiName: "ami-1", + credentials: credentials, + spotAllocationStrategy: "capacity-optimized", + onDemandAllocationStrategy: "prioritized", + onDemandBaseCapacity: 2, + onDemandPercentageAboveBaseCapacity: 50, + spotPrice: "1" + ) + + and: + def asgWithMip = new AutoScalingGroup( + autoScalingGroupName: asgName, + mixedInstancesPolicy: new MixedInstancesPolicy() // description is already populated with MIP values from existing ASG at this point, use a dummy MIP here + ) + + and: + def updateCommand = new UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand.UpdateAutoScalingGroupCommandBuilder() + .description(modifyDesc) + .launchTemplateVersion(ltVersion) + .newLaunchTemplateVersionNumber(ltVersion.getVersionNumber()) + .launchTemplateOverrides(null) + .isReqToUpgradeAsgToMixedInstancesPolicy(false) + .build() + + when: + updateAction.apply(updateCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup(asgName) >> asgWithMip + 1 * autoScaling.updateAutoScalingGroup(_ as UpdateAutoScalingGroupRequest) >> { arguments -> + // assert arguments passed and return + UpdateAutoScalingGroupRequest updateReq = arguments[0] + + assert updateReq.autoScalingGroupName == asgName + assert updateReq.launchTemplate == null + + assert updateReq.mixedInstancesPolicy.instancesDistribution == new InstancesDistribution( + onDemandAllocationStrategy: "prioritized", + onDemandBaseCapacity: 2, + onDemandPercentageAboveBaseCapacity: 50, + spotAllocationStrategy: "capacity-optimized", + spotInstancePools: null, + spotMaxPrice: "1" + ) + assert updateReq.mixedInstancesPolicy.launchTemplate.launchTemplateSpecification == new LaunchTemplateSpecification( + launchTemplateId: ltVersion.launchTemplateId, + version: String.valueOf(ltVersion.getVersionNumber()) + ) + } + } + + def "should update ASG backed by launch template correctly"() { + given: + def modifyDesc = new ModifyServerGroupLaunchTemplateDescription( + region: "us-east-1", + asgName: asgName, + amiName: "ami-1", + credentials: credentials, + instanceType: "new.type" + ) + + and: + def asgWithLt = new AutoScalingGroup( + autoScalingGroupName: asgName, + launchTemplate: new LaunchTemplateSpecification(launchTemplateName: ltVersion.launchTemplateId, version: String.valueOf(ltVersion.getVersionNumber())) + ) + + and: + def updateCommand = new UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand.UpdateAutoScalingGroupCommandBuilder() + .description(modifyDesc) + .launchTemplateVersion(ltVersion) + .newLaunchTemplateVersionNumber(ltVersion.getVersionNumber()) + .launchTemplateOverrides(null) + .isReqToUpgradeAsgToMixedInstancesPolicy(false) + .build() + + when: + updateAction.apply(updateCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup(asgName) >> asgWithLt + 1 * autoScaling.updateAutoScalingGroup(_ as UpdateAutoScalingGroupRequest) >> { arguments -> + // assert arguments passed and return + UpdateAutoScalingGroupRequest updateReq = arguments[0] + + assert updateReq.autoScalingGroupName == asgName + assert updateReq.mixedInstancesPolicy == null + + assert updateReq.launchTemplate.launchTemplateId == ltVersion.launchTemplateId + assert updateReq.launchTemplate.version == String.valueOf(ltVersion.getVersionNumber()); new UpdateAutoScalingGroupResult() + } + } + + def "should convert ASG backed by launch template to use mixed instances policy correctly"() { + given: + def modifyDesc = new ModifyServerGroupLaunchTemplateDescription( + region: "us-east-1", + asgName: asgName, + amiName: "ami-1", + credentials: credentials, + spotAllocationStrategy: "capacity-optimized", + spotPrice: "1" + ) + + and: + def asgWithLt = new AutoScalingGroup( + autoScalingGroupName: asgName, + launchTemplate: new LaunchTemplateSpecification(launchTemplateId: ltVersion.launchTemplateId, version: String.valueOf(ltVersion.getVersionNumber())) + ) + + and: + def updateCommand = new UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand.UpdateAutoScalingGroupCommandBuilder() + .description(modifyDesc) + .launchTemplateVersion(ltVersion) + .newLaunchTemplateVersionNumber(ltVersion.getVersionNumber()) + .launchTemplateOverrides(null) + .isReqToUpgradeAsgToMixedInstancesPolicy(true) + .build() + + when: + updateAction.apply(updateCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup(asgName) >> asgWithLt + 1 * autoScaling.updateAutoScalingGroup(_ as UpdateAutoScalingGroupRequest) >> { arguments -> + // assert arguments passed and return + UpdateAutoScalingGroupRequest updateReq = arguments[0] + + assert updateReq.autoScalingGroupName == asgName + assert updateReq.launchTemplate == null + + // null values will take AWS defaults + assert updateReq.mixedInstancesPolicy.instancesDistribution == new InstancesDistribution( + onDemandAllocationStrategy: null, + onDemandBaseCapacity: null, + onDemandPercentageAboveBaseCapacity: null, + spotAllocationStrategy: "capacity-optimized", + spotInstancePools: null, + spotMaxPrice: "1" + ) + assert updateReq.mixedInstancesPolicy.launchTemplate.launchTemplateSpecification == new LaunchTemplateSpecification( + launchTemplateId: ltVersion.launchTemplateId, + version: String.valueOf(ltVersion.getVersionNumber()) + ) + + assert updateReq.mixedInstancesPolicy.launchTemplate.overrides == []; new UpdateAutoScalingGroupResult() + } + } + + @Unroll + def "should clean up newly created launch template version on failure"() { + given: + def modifyDesc = new ModifyServerGroupLaunchTemplateDescription( + region: "us-east-1", + asgName: asgName, + amiName: "ami-1", + credentials: credentials, + instanceType: "new.type" + ) + + and: + def asgWithLt = new AutoScalingGroup( + autoScalingGroupName: asgName, + launchTemplate: new LaunchTemplateSpecification(launchTemplateName: ltVersion.launchTemplateId, version: String.valueOf(ltVersion.getVersionNumber())) + ) + + and: + def updateCommand = new UpdateAutoScalingGroup.UpdateAutoScalingGroupCommand.UpdateAutoScalingGroupCommandBuilder() + .description(modifyDesc) + .launchTemplateVersion(ltVersion) + .newLaunchTemplateVersionNumber(newLtVersionNum) + .launchTemplateOverrides(null) + .isReqToUpgradeAsgToMixedInstancesPolicy(false) + .build() + + when: + updateAction.apply(updateCommand, new Saga("test", "test")) + + then: + 1 * asgService.getAutoScalingGroup(asgName) >> asgWithLt + 1 * autoScaling.updateAutoScalingGroup(_) >> { throw new RuntimeException("Update ASG failed!")} + Exception ex = thrown(ModifyServerGroupLaunchTemplateAtomicOperation.LaunchTemplateException.class) + + // verify clean up and exception message + if (newLtVersionNum) { + if (deleteLtVersionFailed) { + 1 * ltService.deleteLaunchTemplateVersion(ltVersion.launchTemplateId, newLtVersionNum) >> { throw new RuntimeException("Failed to delete launch template version $newLtVersionNum for launch template ID $ltVersion.launchTemplateId because of error 'unexpectedError'") } + } else { + 1 * ltService.deleteLaunchTemplateVersion(ltVersion.launchTemplateId, newLtVersionNum) + } + } + ex.message == exceptionMsgExpected + + where: + newLtVersionNum | deleteLtVersionFailed | exceptionMsgExpected + null | _ | "Failed to update server group test-v001.Error: Update ASG failed!\n" + 3L | true | "Failed to update server group test-v001.Error: Update ASG failed!\nFailed to clean up launch template version! Error: Failed to delete launch template version 3 for launch template ID lt-1 because of error 'unexpectedError'" + 3L | false | "Failed to update server group test-v001.Error: Update ASG failed!\n" + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/DiscoverySupportUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/DiscoverySupportUnitSpec.groovy index 90e77afeecd..3972659057b 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/DiscoverySupportUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/DiscoverySupportUnitSpec.groovy @@ -24,7 +24,11 @@ import com.amazonaws.services.ec2.model.DescribeInstancesResult import com.amazonaws.services.ec2.model.InstanceState import com.amazonaws.services.ec2.model.InstanceStateName import com.amazonaws.services.ec2.model.Reservation +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.description.EnableDisableInstanceDiscoveryDescription import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.services.AsgService +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskState @@ -33,10 +37,7 @@ import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.EurekaSupportConfigurationProperties import com.netflix.spinnaker.clouddriver.model.ClusterProvider import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.description.EnableDisableInstanceDiscoveryDescription -import com.netflix.spinnaker.clouddriver.aws.services.AsgService -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException import retrofit.RetrofitError import retrofit.client.Response import spock.lang.Specification @@ -100,12 +101,12 @@ class DiscoverySupportUnitSpec extends Specification { when: discoverySupport.updateDiscoveryStatusForInstances( - description, task, "phase", AbstractEurekaSupport.DiscoveryStatus.Disable, instances + description, task, "phase", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE, instances ) then: thrown(AbstractEurekaSupport.RetryableException) - discoverySupport.eurekaSupportConfigurationProperties.retryMax * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + discoverySupport.eurekaSupportConfigurationProperties.retryMax * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 0 * eureka.updateInstanceStatus(*_) } @@ -150,11 +151,11 @@ class DiscoverySupportUnitSpec extends Specification { when: discoverySupport.updateDiscoveryStatusForInstances( - description, task, "phase", AbstractEurekaSupport.DiscoveryStatus.Disable, instances + description, task, "phase", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE, instances ) then: - discoverySupport.eurekaSupportConfigurationProperties.retryMax * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + discoverySupport.eurekaSupportConfigurationProperties.retryMax * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 0 * eureka.updateInstanceStatus(*_) 1 * task.updateStatus(_, "Could not find application name in Discovery or AWS, short-circuiting (asg: myapp-test-v000, region: us-east-1)") } @@ -173,23 +174,24 @@ class DiscoverySupportUnitSpec extends Specification { ) then: - (instanceIds.size() + 1) * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + (instanceIds.size() + 1) * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 1 * eureka.getInstanceInfo(_) >> [ instance: [ - app: appName + app: appName, + status: "OUT_OF_SERVICE" ] ] 0 * task.fail() instanceIds.each { - 1 * eureka.resetInstanceStatus(appName, it, AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> response(200) + 1 * eureka.resetInstanceStatus(appName, it, AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> response(200) } where: discoveryUrl = "http://us-west-1.discovery.netflix.net" region = "us-west-1" - discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.Enable + discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.UP appName = "kato" instanceIds = ["i-123", "i-456"] } @@ -203,27 +205,27 @@ class DiscoverySupportUnitSpec extends Specification { ) when: - discoverySupport.updateDiscoveryStatusForInstances(description, task, "PHASE", discoveryStatus, instanceIds) + discoverySupport.updateDiscoveryStatusForInstances(description, task, "PHASE", discoveryStatus, instanceIds, true) then: - task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 1 * task.fail() - 1 * eureka.getInstanceInfo(_) >> [ instance: [ app: appName ] ] - 1 * eureka.resetInstanceStatus(appName, "bad", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> httpError(500) - 1 * eureka.resetInstanceStatus(appName, "good", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> response(200) - 1 * eureka.resetInstanceStatus(appName, "also-bad", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> httpError(500) + 1 * eureka.getInstanceInfo(_) >> [ instance: [ app: appName, status: "OUT_OF_SERVICE" ] ] + 1 * eureka.resetInstanceStatus(appName, "bad", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> {throw new SpinnakerHttpException(httpError(400))} + 1 * eureka.resetInstanceStatus(appName, "good", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> {response(200)} + 1 * eureka.resetInstanceStatus(appName, "also-bad", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> {throw new SpinnakerHttpException(httpError(400))} 1 * task.updateStatus("PHASE", { it.startsWith("Looking up discovery") }) 3 * task.updateStatus("PHASE", { it.startsWith("Attempting to mark") }) + 2 * task.updateStatus('PHASE', { it.startsWith("Failed updating status") }) 1 * task.updateStatus("PHASE", { it.startsWith("Failed marking instances 'UP'")}) 0 * _ where: discoveryUrl = "http://us-west-1.discovery.netflix.net" region = "us-west-1" - discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.Enable + discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.UP appName = "kato" instanceIds = ["good", "bad", "also-bad"] - } void "should succeed despite some failures due to targetHealthyDeployPercentage"() { @@ -234,26 +236,29 @@ class DiscoverySupportUnitSpec extends Specification { credentials: TestCredential.named('test', [discovery: discoveryUrl]), targetHealthyDeployPercentage: 20 ) + discoverySupport.eurekaSupportConfigurationProperties.retryMax = 1 when: discoverySupport.updateDiscoveryStatusForInstances(description, task, "PHASE", discoveryStatus, instanceIds) then: - task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) - 1 * eureka.getInstanceInfo(_) >> [ instance: [ app: appName ] ] - 1 * eureka.resetInstanceStatus(appName, "bad", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> httpError(500) - 1 * eureka.resetInstanceStatus(appName, "good", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> response(200) - 1 * eureka.resetInstanceStatus(appName, "also-bad", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> httpError(500) + task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) + 1 * eureka.getInstanceInfo(_) >> [ instance: [ app: appName, status: "OUT_OF_SERVICE" ] ] + 1 * eureka.resetInstanceStatus(appName, "bad", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> {throw new SpinnakerHttpException(httpError(500))} + 1 * eureka.resetInstanceStatus(appName, "good", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> response(200) + 1 * eureka.resetInstanceStatus(appName, "also-bad", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> {throw new SpinnakerHttpException(httpError(500))} 1 * task.updateStatus("PHASE", { it.startsWith("Looking up discovery") }) 3 * task.updateStatus("PHASE", { it.startsWith("Attempting to mark") }) 0 * task.updateStatus("PHASE", { it.startsWith("Failed marking instances 'UP'")}) + 2 * task.updateStatus('PHASE', { it.startsWith("Failed updating status")}) + 1 * task.addResultObjects([['discoverySkippedInstanceIds':['bad', 'also-bad']]]) 0 * task.fail() 0 * _ where: discoveryUrl = "http://us-west-1.discovery.netflix.net" region = "us-west-1" - discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.Enable + discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.UP appName = "kato" instanceIds = ["good", "bad", "also-bad"] @@ -267,70 +272,35 @@ class DiscoverySupportUnitSpec extends Specification { credentials: TestCredential.named('test', [discovery: discoveryUrl]), targetHealthyDeployPercentage: 50 ) + discoverySupport.eurekaSupportConfigurationProperties.retryMax = 1 when: - discoverySupport.updateDiscoveryStatusForInstances(description, task, "PHASE", discoveryStatus, instanceIds) + discoverySupport.updateDiscoveryStatusForInstances(description, task, "PHASE", discoveryStatus, instanceIds, true) then: - task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) - 1 * eureka.getInstanceInfo(_) >> [ instance: [ app: appName ] ] - 1 * eureka.resetInstanceStatus(appName, "bad", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> httpError(500) - 1 * eureka.resetInstanceStatus(appName, "good", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> response(200) - 1 * eureka.resetInstanceStatus(appName, "also-bad", AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> httpError(500) + task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) + 1 * eureka.getInstanceInfo(_) >> [ instance: [ app: appName, status: "OUT_OF_SERVICE" ] ] + 1 * eureka.resetInstanceStatus(appName, "bad", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> {throw new SpinnakerHttpException(httpError(500))} + 1 * eureka.resetInstanceStatus(appName, "good", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> response(200) + 1 * eureka.resetInstanceStatus(appName, "also-bad", AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> {throw new SpinnakerHttpException(httpError(500))} 1 * task.updateStatus("PHASE", { it.startsWith("Looking up discovery") }) 3 * task.updateStatus("PHASE", { it.startsWith("Attempting to mark") }) 1 * task.updateStatus("PHASE", { it.startsWith("Failed marking instances 'UP'")}) + 2 * task.updateStatus('PHASE', { it.startsWith("Failed updating status of")}) 1 * task.fail() 0 * _ where: discoveryUrl = "http://us-west-1.discovery.netflix.net" region = "us-west-1" - discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.Enable + discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.UP appName = "kato" instanceIds = ["good", "bad", "also-bad"] - } - void "should retry on http errors from discovery"() { - given: - def task = Mock(Task) - def description = new EnableDisableInstanceDiscoveryDescription( - region: 'us-west-1', - credentials: TestCredential.named('test', [discovery: discoveryUrl]) - ) - - when: - discoverySupport.updateDiscoveryStatusForInstances(description, task, "PHASE", discoveryStatus, instanceIds) - - then: "should retry on NOT_FOUND" - 3 * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) - 0 * task.fail() - 2 * eureka.getInstanceInfo(_) >> { - throw failure - } >> - [ - instance: [ - app: appName - ] - ] - instanceIds.each { - 1 * eureka.resetInstanceStatus(appName, it, AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> response(200) - } - - where: - failure << [httpError(404), httpError(406), httpError(503), amazonError(503)] - - discoveryUrl = "http://us-west-1.discovery.netflix.net" - region = "us-west-1" - discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.Enable - appName = "kato" - instanceIds = ["i-123"] - - } - - void "should retry on NOT_FOUND from discovery up to DISCOVERY_RETRY_MAX times"() { + @Unroll + void "should retry on NOT_FOUND from getInstanceInfo up to DISCOVERY_RETRY_MAX times"() { given: def task = Mock(Task) def description = new EnableDisableInstanceDiscoveryDescription( @@ -342,19 +312,20 @@ class DiscoverySupportUnitSpec extends Specification { discoverySupport.updateDiscoveryStatusForInstances(description, task, "PHASE", discoveryStatus, instanceIds) then: "should only retry a maximum of DISCOVERY_RETRY_MAX times on NOT_FOUND" - discoverySupport.eurekaSupportConfigurationProperties.retryMax * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + discoverySupport.eurekaSupportConfigurationProperties.retryMax * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) discoverySupport.eurekaSupportConfigurationProperties.retryMax * eureka.getInstanceInfo(_) >> { - throw httpError(404) + throw new SpinnakerHttpException(httpError(errorCode)) } 0 * task.fail() - thrown(RetrofitError) + thrown(SpinnakerHttpException) where: discoveryUrl = "http://us-west-1.discovery.netflix.net" region = "us-west-1" - discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.Enable + discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.UP appName = "kato" instanceIds = ["i-123"] + errorCode << [404, 406, 503] } void "should retry on non 200 response from discovery"() { @@ -372,24 +343,25 @@ class DiscoverySupportUnitSpec extends Specification { 1 * eureka.getInstanceInfo('i-123') >> [ instance: [ - app: appName + app: appName, + status: "OUT_OF_SERVICE" ] ] - 3 * eureka.resetInstanceStatus(appName, 'i-123', AbstractEurekaSupport.DiscoveryStatus.Disable.value) >>> [response(302), response(201), response(200)] - 4 * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + 3 * eureka.resetInstanceStatus(appName, 'i-123', AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >>> [response(302), response(201), response(200)] + 4 * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 0 * task.fail() where: discoveryUrl = "http://us-west-1.discovery.netflix.net" region = "us-west-1" - discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.Enable + discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.UP appName = "kato" instanceIds = ["i-123"] } - void "should NOT fail disable operation if instance is not found"() { + @Unroll + void "should NOT fails if strict=#strict for #status operation if instance is not found"() { given: - def status = AbstractEurekaSupport.DiscoveryStatus.Disable def task = Mock(Task) def description = new EnableDisableInstanceDiscoveryDescription( region: 'us-east-1', @@ -406,12 +378,18 @@ class DiscoverySupportUnitSpec extends Specification { app: appName ] ] - eureka.updateInstanceStatus(appName, 'i-123', status.value) >> { throw httpError(404) } - task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + eureka.updateInstanceStatus(appName, 'i-123', status.value) >> { throw new SpinnakerHttpException(httpError(404)) } + eureka.resetInstanceStatus(appName, 'i-123', AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> { throw new SpinnakerHttpException(httpError(404)) } + task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 0 * task.fail() where: appName = "kato" + status | strict + AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE | false + AbstractEurekaSupport.DiscoveryStatus.UP | false + AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE | true + AbstractEurekaSupport.DiscoveryStatus.UP | true } void "should attempt to mark each instance in discovery even if some fail"() { @@ -426,16 +404,17 @@ class DiscoverySupportUnitSpec extends Specification { discoverySupport.updateDiscoveryStatusForInstances(description, task, "PHASE", discoveryStatus, instanceIds) then: "should retry on NOT_FOUND" - (instanceIds.size() + 1) * task.getStatus() >> new DefaultTaskStatus(state: TaskState.STARTED) + (instanceIds.size() + 1) * task.getStatus() >> new DefaultTaskStatus(TaskState.STARTED) 1 * eureka.getInstanceInfo(_) >> [ instance: [ - app: appName + app: appName, + status: "OUT_OF_SERVICE" ] ] 1 * task.fail() instanceIds.eachWithIndex { it, idx -> - 1 * eureka.resetInstanceStatus(appName, it, AbstractEurekaSupport.DiscoveryStatus.Disable.value) >> { + 1 * eureka.resetInstanceStatus(appName, it, AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE.value) >> { if (!result[idx]) { throw new RuntimeException("blammo") } @@ -446,7 +425,7 @@ class DiscoverySupportUnitSpec extends Specification { where: discoveryUrl = "http://us-west-1.discovery.netflix.net" region = "us-west-1" - discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.Enable + discoveryStatus = AbstractEurekaSupport.DiscoveryStatus.UP appName = "kato" instanceIds = ["i-123", "i-345", "i-456"] result = [true, false, true] @@ -591,8 +570,11 @@ class DiscoverySupportUnitSpec extends Specification { Map launchConfig ServerGroup.InstanceCounts instanceCounts ServerGroup.Capacity capacity - Boolean isDisabled() {disabled} ServerGroup.ImageSummary getImageSummary() {} ServerGroup.ImagesSummary getImagesSummary() {} + + Boolean isDisabled() { + disabled + } } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/EnableDisableInstancesInDiscoveryAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/EnableDisableInstancesInDiscoveryAtomicOperationUnitSpec.groovy index 30dad1a2ab8..e73a1352151 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/EnableDisableInstancesInDiscoveryAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/discovery/EnableDisableInstancesInDiscoveryAtomicOperationUnitSpec.groovy @@ -65,13 +65,13 @@ class EnableDisableInstancesInDiscoveryAtomicOperationUnitSpec extends Specifica then: 1 * operation.discoverySupport.updateDiscoveryStatusForInstances( - _, _, _, expectedDiscoveryStatus, description.instanceIds + _, _, _, expectedDiscoveryStatus, description.instanceIds, true ) where: operation || expectedDiscoveryStatus - new EnableInstancesInDiscoveryAtomicOperation(description) || DiscoveryStatus.Enable - new DisableInstancesInDiscoveryAtomicOperation(description) || DiscoveryStatus.Disable + new EnableInstancesInDiscoveryAtomicOperation(description) || DiscoveryStatus.UP + new DisableInstancesInDiscoveryAtomicOperation(description) || DiscoveryStatus.OUT_OF_SERVICE } @Unroll diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/DeleteAmazonLoadBalancerV2AtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/DeleteAmazonLoadBalancerV2AtomicOperationSpec.groovy index bb2009865fd..8a3dcafad43 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/DeleteAmazonLoadBalancerV2AtomicOperationSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/DeleteAmazonLoadBalancerV2AtomicOperationSpec.groovy @@ -28,6 +28,7 @@ import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsR import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult import com.amazonaws.services.elasticloadbalancingv2.model.Listener import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancer +import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancerAttribute import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonLoadBalancerDescription import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider @@ -66,14 +67,34 @@ class DeleteAmazonLoadBalancerV2AtomicOperationSpec extends Specification { op.operate([]) then: - 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: [description.loadBalancerName])) >> new DescribeLoadBalancersResult(loadBalancers: [ new LoadBalancer(loadBalancerArn: loadBalancerArn) ]) - 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: [ new Listener(listenerArn: listenerArn) ]) + 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: [description.loadBalancerName])) >> new DescribeLoadBalancersResult(loadBalancers: [new LoadBalancer(loadBalancerArn: loadBalancerArn)]) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: [new LoadBalancerAttribute().withKey("deletion_protection.enabled").withValue("false")]] + 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: [new Listener(listenerArn: listenerArn)]) 1 * loadBalancing.deleteListener(new DeleteListenerRequest(listenerArn: listenerArn)) - 1 * loadBalancing.describeTargetGroups((new DescribeTargetGroupsRequest(loadBalancerArn: loadBalancerArn))) >> new DescribeTargetGroupsResult(targetGroups: [ new TargetGroup(targetGroupArn: targetGroupArn) ]) + 1 * loadBalancing.describeTargetGroups((new DescribeTargetGroupsRequest(loadBalancerArn: loadBalancerArn))) >> new DescribeTargetGroupsResult(targetGroups: [new TargetGroup(targetGroupArn: targetGroupArn)]) 1 * loadBalancing.deleteTargetGroup(new DeleteTargetGroupRequest(targetGroupArn: targetGroupArn)) 1 * loadBalancing.deleteLoadBalancer(_) >> { DeleteLoadBalancerRequest req -> assert req.loadBalancerArn == loadBalancerArn } 0 * _ } + + void "should abort if deletion protection is enabled"() { + setup: + def loadBalancerArn = "foo:test" + def loadBalancing = Mock(AmazonElasticLoadBalancing) + def amazonClientProvider = Stub(AmazonClientProvider) + amazonClientProvider.getAmazonElasticLoadBalancingV2(credz, _, true) >> loadBalancing + op.amazonClientProvider = amazonClientProvider + + when: + op.operate([]) + + then: + 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: [description.loadBalancerName])) >> new DescribeLoadBalancersResult(loadBalancers: [new LoadBalancer(loadBalancerArn: loadBalancerArn, loadBalancerName: 'test')]) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: [new LoadBalancerAttribute().withKey("deletion_protection.enabled").withValue("true")]] + 0 * _ + DeleteAmazonLoadBalancerV2AtomicOperation.DeletionProtectionEnabledException ex = thrown() + ex.message == "Load Balancer test has deletion protection enabled. Aborting delete operation." + } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/IngressLoadBalancerBuilderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/IngressLoadBalancerBuilderSpec.groovy new file mode 100644 index 00000000000..8054f90a1f8 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/IngressLoadBalancerBuilderSpec.groovy @@ -0,0 +1,117 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer + +import com.amazonaws.services.ec2.model.IpPermission +import com.amazonaws.services.ec2.model.SecurityGroup +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory +import spock.lang.Specification +import spock.lang.Subject + +class IngressLoadBalancerBuilderSpec extends Specification { + + def securityGroupLookup = Mock(SecurityGroupLookupFactory.SecurityGroupLookup) + def securityGroupLookupFactory = Stub(SecurityGroupLookupFactory) { + getInstance("us-east-1") >> securityGroupLookup + } + + def elbSecurityGroup = new SecurityGroup() + .withVpcId("vpcId") + .withGroupId("sg-1234") + .withGroupName("kato-elb") + + def applicationSecurityGroup = new SecurityGroup() + .withVpcId("vpcId") + .withGroupId("sg-1111") + .withGroupName("kato") + + def elbSecurityGroupUpdater = Mock(SecurityGroupLookupFactory.SecurityGroupUpdater) + def appSecurityGroupUpdater = Mock(SecurityGroupLookupFactory.SecurityGroupUpdater) + def credentials = TestCredential.named('bar') + + @Subject IngressLoadBalancerBuilder builder = new IngressLoadBalancerBuilder() + + void "should add ingress if not already present"() { + given: + Set ports = [7001, 8501] + + when: + builder.ingressApplicationLoadBalancerGroup("kato", "us-east-1", "bar", credentials, "vpcId", ports, securityGroupLookupFactory) + + then: + 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.of(elbSecurityGroupUpdater) + 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.of(appSecurityGroupUpdater) + 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup + 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup + 1 * appSecurityGroupUpdater.addIngress(_) >> { + def permissions = it[0] as List + assert permissions.size() == 2 + assert 7001 in permissions*.fromPort && 8501 in permissions*.fromPort + assert 7001 in permissions*.toPort && 8501 in permissions*.toPort + assert elbSecurityGroup.groupId in permissions[0].userIdGroupPairs*.groupId + assert elbSecurityGroup.groupId in permissions[1].userIdGroupPairs*.groupId + } + } + + void "should auto-create application load balancer security group"() { + given: + Set ports = [7001, 8501] + + when: + builder.ingressApplicationLoadBalancerGroup("kato", "us-east-1", "bar", credentials, "vpcId", ports, securityGroupLookupFactory) + + then: "an application elb group should be created and ingressed properly" + 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.empty() + 1 * securityGroupLookup.createSecurityGroup(_) >> elbSecurityGroupUpdater + 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.of(appSecurityGroupUpdater) + 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup + 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup + 1 * appSecurityGroupUpdater.addIngress(_) >> { + def permissions = it[0] as List + assert permissions.size() == 2 + assert permissions*.fromPort.sort() == [7001, 8501] && permissions*.toPort.sort() == [7001, 8501] + assert elbSecurityGroup.groupId in permissions[0].userIdGroupPairs*.groupId + assert elbSecurityGroup.groupId in permissions[1].userIdGroupPairs*.groupId + } + } + + void "should auto-create application load balancer and application security groups"() { + given: + Set ports = [7001, 8501] + + when: + builder.ingressApplicationLoadBalancerGroup("kato", "us-east-1", "bar", credentials, "vpcId", ports, securityGroupLookupFactory) + + then: + 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.empty() + 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.empty() + 1 * securityGroupLookup.createSecurityGroup( { it.name == 'kato-elb'}) >> elbSecurityGroupUpdater + 1 * securityGroupLookup.createSecurityGroup( { it.name == 'kato'}) >> appSecurityGroupUpdater + 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup + 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup + 1 * appSecurityGroupUpdater.addIngress(_) >> { + def permissions = it[0] as List + assert permissions.size() == 2 + assert permissions*.fromPort.sort() == [7001, 8501] && permissions*.toPort.sort() == [7001, 8501] + assert elbSecurityGroup.groupId in permissions[0].userIdGroupPairs*.groupId + assert elbSecurityGroup.groupId in permissions[1].userIdGroupPairs*.groupId + } + + } + +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerStrategySpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerStrategySpec.groovy deleted file mode 100644 index dbd833154ee..00000000000 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/MigrateLoadBalancerStrategySpec.groovy +++ /dev/null @@ -1,757 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer - -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult -import com.amazonaws.services.ec2.model.DescribeVpcsResult -import com.amazonaws.services.ec2.model.SecurityGroup -import com.amazonaws.services.ec2.model.Tag -import com.amazonaws.services.ec2.model.Vpc -import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing -import com.amazonaws.services.elasticloadbalancing.model.CreateLoadBalancerResult -import com.amazonaws.services.elasticloadbalancing.model.CrossZoneLoadBalancing -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancerAttributesResult -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancerPoliciesResult -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersResult -import com.amazonaws.services.elasticloadbalancing.model.HealthCheck -import com.amazonaws.services.elasticloadbalancing.model.Listener -import com.amazonaws.services.elasticloadbalancing.model.ListenerDescription -import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes -import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription -import com.amazonaws.services.elasticloadbalancing.model.PolicyAttributeDescription -import com.amazonaws.services.elasticloadbalancing.model.PolicyDescription -import com.netflix.spinnaker.config.AwsConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.DefaultMigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateLoadBalancerStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.handlers.MigrateSecurityGroupStrategy -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.LoadBalancerLocation -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.LoadBalancerMigrator.TargetLoadBalancerLocation -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupReference -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.MigrateSecurityGroupResult -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupLookup -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupUpdater -import com.netflix.spinnaker.clouddriver.aws.model.SubnetAnalyzer -import com.netflix.spinnaker.clouddriver.aws.model.SubnetTarget -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory.RegionScopedProvider -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Subject -import spock.lang.Unroll - -class MigrateLoadBalancerStrategySpec extends Specification { - - @Subject - MigrateLoadBalancerStrategy strategy - - @Shared - NetflixAmazonCredentials testCredentials = TestCredential.named('test') - - @Shared - NetflixAmazonCredentials prodCredentials = TestCredential.named('prod') - - MigrateSecurityGroupStrategy securityGroupStrategy = Mock(MigrateSecurityGroupStrategy) - - RegionScopedProviderFactory regionScopedProviderFactory = Mock(RegionScopedProviderFactory) - - DeployDefaults deployDefaults = Mock(DeployDefaults) - - SecurityGroupLookup sourceLookup = Mock(SecurityGroupLookup) - - SecurityGroupLookup targetLookup = Mock(SecurityGroupLookup) - - AmazonClientProvider amazonClientProvider = Mock(AmazonClientProvider) - - def setup() { - TaskRepository.threadLocalTask.set(Stub(Task)) - strategy = new DefaultMigrateLoadBalancerStrategy(amazonClientProvider, regionScopedProviderFactory, deployDefaults) - strategy.sourceLookup = sourceLookup - strategy.targetLookup = targetLookup - strategy.migrateSecurityGroupStrategy = securityGroupStrategy - - sourceLookup.getCredentialsForId(testCredentials.accountId) >> testCredentials - targetLookup.getCredentialsForId(testCredentials.accountId) >> testCredentials - - sourceLookup.getAccountNameForId(testCredentials.accountId) >> 'test' - targetLookup.getAccountNameForId(testCredentials.accountId) >> 'test' - - targetLookup.accountIdExists(testCredentials.accountId) >> true - - sourceLookup.getCredentialsForId(prodCredentials.accountId) >> prodCredentials - targetLookup.getCredentialsForId(prodCredentials.accountId) >> prodCredentials - - sourceLookup.getAccountNameForId(prodCredentials.accountId) >> 'prod' - targetLookup.getAccountNameForId(prodCredentials.accountId) >> 'prod' - - targetLookup.accountIdExists(prodCredentials.accountId) >> true - } - - void 'throws exception if no availability zones are supplied'() { - given: - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: 'app-elb') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', vpcId: 'vpc-1', useZonesFromSource: false) - - when: - strategy.generateResults(sourceLookup, sourceLookup, securityGroupStrategy, source, target, 'internal', 'app', true, false) - - then: - thrown(IllegalStateException) - 0 * _ - } - - void 'resolves zones from source load balancer if requested'() { - given: - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: 'app-elb') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', vpcId: 'vpc-1', useZonesFromSource: true) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.generateResults(sourceLookup, sourceLookup, securityGroupStrategy, source, target, 'internal', 'app', true, false) - - then: - 1 * amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - 1 * loadBalancing.describeLoadBalancers(_) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions( - new LoadBalancerDescription().withLoadBalancerName('app-elb').withAvailabilityZones([]) - ) - thrown(IllegalStateException) - 0 * _ - } - - void 'throws exception when migrating to VPC and load balancer name (not changed) already exists in Classic'() { - given: - def loadBalancerName = '12345678901234567890123456789012' - LoadBalancerDescription sourceDescription = new LoadBalancerDescription().withLoadBalancerName(loadBalancerName) - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: loadBalancerName) - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', vpcId: 'vpc-1', availabilityZones: ['us-east-1a']) - strategy.source = source - strategy.target = target - strategy.dryRun = false - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.generateResults(sourceLookup, sourceLookup, securityGroupStrategy, source, target, 'internal', 'app', true, false) - - then: - thrown(IllegalStateException) - - 2 * loadBalancing.describeLoadBalancers(_) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1') >> amazonEC2 - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - amazonEC2.describeVpcs(_) >> new DescribeVpcsResult().withVpcs(new Vpc().withTags(new Tag("Name", "vpc1"))) - } - - void 'throws exception when migrating to VPC and no subnets found for subnetType'() { - given: - def loadBalancerName = 'app-elb' - def newLoadBalancerName = 'app-elb-vpc1' - LoadBalancerDescription sourceDescription = new LoadBalancerDescription().withLoadBalancerName(loadBalancerName) - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: 'app-elb') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', vpcId: 'vpc-1', availabilityZones: ['us-east-1a']) - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - RegionScopedProvider regionProvider = Mock(RegionScopedProvider) - SubnetAnalyzer subnetAnalyzer = Mock(SubnetAnalyzer) - - when: - strategy.generateResults(sourceLookup, sourceLookup, securityGroupStrategy, source, target, 'internal', 'app', true, false) - - then: - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == [loadBalancerName]}) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == [newLoadBalancerName]}) >> new DescribeLoadBalancersResult() - - 1 * amazonEC2.describeSecurityGroups(_) >> new DescribeSecurityGroupsResult().withSecurityGroups([new SecurityGroup(vpcId: 'vpc-1', groupName: 'app-elb'), new SecurityGroup()]) - - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1') >> amazonEC2 - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1', true) >> amazonEC2 - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionProvider - deployDefaults.addAppGroupToServerGroup >> false - regionProvider.getSubnetAnalyzer() >> subnetAnalyzer - amazonEC2.describeVpcs(_) >> new DescribeVpcsResult().withVpcs(new Vpc().withTags(new Tag("Name", "vpc1"))) - 1 * subnetAnalyzer.getSubnetIdsForZones(['us-east-1a'], 'internal', SubnetTarget.ELB, 1) >> [] - thrown(IllegalStateException) - } - - void 'throws exception when migrating to VPC and new load balancer name already exists in Classic'() { - given: - def loadBalancerName = 'app-elb' - def newLoadBalancerName = 'app-elb-vpc1' - LoadBalancerDescription sourceDescription = new LoadBalancerDescription().withLoadBalancerName(loadBalancerName) - LoadBalancerDescription targetDescription = new LoadBalancerDescription().withLoadBalancerName(newLoadBalancerName) - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: loadBalancerName) - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', vpcId: 'vpc-1', name: newLoadBalancerName, availabilityZones: ['us-east-1a']) - strategy.source = source - strategy.target = target - strategy.dryRun = false - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.generateResults(sourceLookup, sourceLookup, securityGroupStrategy, source, target, 'internal', 'app', true, false) - - then: - thrown(IllegalStateException) - - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == [loadBalancerName]}) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == [newLoadBalancerName]}) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(targetDescription) - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1') >> amazonEC2 - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - amazonEC2.describeVpcs(_) >> new DescribeVpcsResult().withVpcs(new Vpc().withTags(new Tag("Name", "vpc1"))) - } - - void 'getTargetSecurityGroups maps security group IDs to actual security groups'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription().withSecurityGroups('sg-1', 'sg-2') - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1') - strategy.source = source - strategy.target = target - strategy.dryRun = true - - MigrateSecurityGroupReference targetGroup1 = new MigrateSecurityGroupReference(targetName: 'group1') - MigrateSecurityGroupReference targetGroup2 = new MigrateSecurityGroupReference(targetName: 'group2') - - def sourceGroup1 = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - def sourceGroup2 = new SecurityGroup(groupName: 'group2', groupId: 'sg-2', ownerId: testCredentials.accountId) - - def sourceUpdater1 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup1 - } - def sourceUpdater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup2 - } - - when: - def targets = strategy.getTargetSecurityGroups(sourceDescription, new MigrateLoadBalancerResult()) - - then: - targets.target == [targetGroup1, targetGroup2] - 1 * securityGroupStrategy.generateResults({s -> s.name == 'group1'}, _, _, _, _, _) >> new MigrateSecurityGroupResult(target: targetGroup1) - 1 * securityGroupStrategy.generateResults({s -> s.name == 'group2'}, _, _, _, _, _) >> new MigrateSecurityGroupResult(target: targetGroup2) - sourceLookup.getSecurityGroupById('test', 'sg-1', 'vpc-1') >> Optional.of(sourceUpdater1) - sourceLookup.getSecurityGroupById('test', 'sg-2', 'vpc-1') >> Optional.of(sourceUpdater2) - } - - void 'warns when migrating across accounts on secured listeners'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(443).withInstancePort(7000).withSSLCertificateId('does:not:matter')), - new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(80).withInstancePort(7000)) - ] - ) - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: 'app-elb') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: prodCredentials, region: 'eu-west-1', availabilityZones: ['eu-west-1a']) - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - AmazonElasticLoadBalancing targetLoadBalancing = Mock(AmazonElasticLoadBalancing) - RegionScopedProvider regionProvider = Mock(RegionScopedProvider) - - when: - def result = strategy.generateResults(sourceLookup, targetLookup, securityGroupStrategy, source, target, null, 'app', false, false) - - then: - result.warnings.size() == 1 - amazonClientProvider.getAmazonEC2(prodCredentials, 'eu-west-1', true) >> amazonEC2 - amazonClientProvider.getAmazonEC2(prodCredentials, 'eu-west-1') >> amazonEC2 - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - amazonClientProvider.getAmazonElasticLoadBalancing(prodCredentials, 'eu-west-1', true) >> targetLoadBalancing - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionProvider - 1 * loadBalancing.describeLoadBalancers(_) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - 1 * loadBalancing.describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() - 1 * targetLoadBalancing.describeLoadBalancers(_) >> null - 1 * loadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - 1 * targetLoadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - 1 * targetLoadBalancing.createLoadBalancer({ it.listeners.loadBalancerPort == [80]}) >> new CreateLoadBalancerResult().withDNSName('new-elb-dns') - 1 * targetLoadBalancing.configureHealthCheck(_) - 0 * amazonEC2.authorizeSecurityGroupIngress(_) - } - - void 'skips skipped security groups'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(healthCheck: new HealthCheck(), - listenerDescriptions: []).withSecurityGroups('sg-1', 'sg-2') - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, vpcId: 'vpc-1', region: 'us-east-1') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1', name: 'new-elb', availabilityZones: ['eu-west-1a']) - strategy.source = source - strategy.target = target - strategy.dryRun = true - - MigrateSecurityGroupReference targetGroup1 = new MigrateSecurityGroupReference(targetName: 'group1', targetId: 'sg-1b') - MigrateSecurityGroupReference targetGroup2 = new MigrateSecurityGroupReference(targetName: 'group2', targetId: 'sg-2b') - - def sourceGroup1 = new SecurityGroup(groupName: 'group1', groupId: 'sg-1', ownerId: testCredentials.accountId) - def sourceGroup2 = new SecurityGroup(groupName: 'group2', groupId: 'sg-2', ownerId: testCredentials.accountId) - def appGroup = new SecurityGroup(groupName: 'app-elb', groupId: 'sg-elb', ownerId: prodCredentials.accountId, vpcId: 'vpc-2') - - def sourceUpdater1 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup1 - } - def sourceUpdater2 = Stub(SecurityGroupUpdater) { - getSecurityGroup() >> sourceGroup2 - } - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - AmazonElasticLoadBalancing targetLoadBalancing = Mock(AmazonElasticLoadBalancing) - RegionScopedProvider regionProvider = Mock(RegionScopedProvider) - SubnetAnalyzer subnetAnalyzer = Mock(SubnetAnalyzer) - - when: - def results = strategy.generateResults(sourceLookup, targetLookup, securityGroupStrategy, source, target, 'internal', 'app', true, false) - - then: - results.securityGroups.size() == 2 - 1 * securityGroupStrategy.generateResults({s -> s.name == 'group1'}, _, _, _, _, _) >> new MigrateSecurityGroupResult(target: targetGroup1, reused: [targetGroup1]) - 1 * securityGroupStrategy.generateResults({s -> s.name == 'group2'}, _, _, _, _, _) >> new MigrateSecurityGroupResult(target: targetGroup2, skipped: [targetGroup2]) - sourceLookup.getSecurityGroupById('test', 'sg-1', 'vpc-1') >> Optional.of(sourceUpdater1) - sourceLookup.getSecurityGroupById('test', 'sg-2', 'vpc-1') >> Optional.of(sourceUpdater2) - - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1') >> amazonEC2 - amazonClientProvider.getAmazonEC2(prodCredentials, 'eu-west-1') >> amazonEC2 - amazonClientProvider.getAmazonEC2(prodCredentials, 'eu-west-1', true) >> amazonEC2 - 2 * amazonEC2.describeVpcs(_) >> new DescribeVpcsResult().withVpcs(new Vpc()) - - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - 1 * loadBalancing.describeLoadBalancers(_) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - - 1 * amazonEC2.describeSecurityGroups(_) >> new DescribeSecurityGroupsResult().withSecurityGroups([appGroup]) - - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionProvider - regionProvider.getSubnetAnalyzer() >> subnetAnalyzer - 1 * subnetAnalyzer.getSubnetIdsForZones(['eu-west-1a'], 'internal', _, _) >> ['subnet-1'] - amazonClientProvider.getAmazonElasticLoadBalancing(prodCredentials, 'eu-west-1', true) >> targetLoadBalancing - - 1 * targetLoadBalancing.createLoadBalancer({it.securityGroups == ['sg-1b', 'sg-elb']}) >> new CreateLoadBalancerResult().withDNSName('new-elb-dns') - 1 * loadBalancing.describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() - 1 * loadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - 1 * targetLoadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - } - - - void 'creates app group and elb group and adds classic link ingress when moving from non-VPC to VPC'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(80).withInstancePort(7000)) - ] - ) - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: 'app-elb') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: prodCredentials, vpcId: 'vpc-2', region: 'eu-west-1', availabilityZones: ['eu-west-1a']) - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - AmazonElasticLoadBalancing targetLoadBalancing = Mock(AmazonElasticLoadBalancing) - RegionScopedProvider regionProvider = Mock(RegionScopedProvider) - SubnetAnalyzer subnetAnalyzer = Mock(SubnetAnalyzer) - - def appGroup = new SecurityGroup(groupName: 'app', groupId: 'sg-3', ownerId: prodCredentials.accountId, vpcId: 'vpc-2') - def elbGroup = new SecurityGroup(groupName: 'app-elb', groupId: 'sg-4', ownerId: prodCredentials.accountId, vpcId: 'vpc-2') - def classicGroup = new SecurityGroup(groupName: 'classic-link', groupId: 'sg-5', ownerId: prodCredentials.accountId, vpcId: 'vpc-2') - - when: - def results = strategy.generateResults(sourceLookup, targetLookup, securityGroupStrategy, source, target, 'internal', 'app', true, false) - - then: - results.securityGroups[0].created.targetName.sort() == ['app', 'app-elb'] - results.securityGroups[0].target.targetName == 'app-elb' - amazonClientProvider.getAmazonEC2(prodCredentials, 'eu-west-1', true) >> amazonEC2 - amazonClientProvider.getAmazonEC2(prodCredentials, 'eu-west-1') >> amazonEC2 - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - amazonClientProvider.getAmazonElasticLoadBalancing(prodCredentials, 'eu-west-1', true) >> targetLoadBalancing - regionScopedProviderFactory.forRegion(prodCredentials, 'eu-west-1') >> regionProvider - regionProvider.getSubnetAnalyzer() >> subnetAnalyzer - 1 * loadBalancing.describeLoadBalancers(_) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - 1 * targetLoadBalancing.createLoadBalancer(_) >> new CreateLoadBalancerResult().withDNSName('new-elb-dns') - 1 * targetLoadBalancing.configureHealthCheck(_) - 1 * loadBalancing.describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() - 1 * loadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - 1 * targetLoadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - 2 * targetLookup.getSecurityGroupByName(prodCredentials.name, 'classic-link', 'vpc-2') >> Optional.of(new SecurityGroupUpdater(classicGroup, amazonEC2)) - 1 * targetLookup.getSecurityGroupById(prodCredentials.name, 'sg-4', 'vpc-2') >> Optional.of(new SecurityGroupUpdater(elbGroup, amazonEC2)) - 1 * targetLookup.getSecurityGroupById(prodCredentials.name, 'sg-3', 'vpc-2') >> Optional.of(new SecurityGroupUpdater(appGroup, amazonEC2)) - 1 * targetLookup.createSecurityGroup({ it.name == 'app' && it.vpcId == 'vpc-2' && it.credentials == prodCredentials}) >> new SecurityGroupUpdater(appGroup, amazonEC2) - 1 * targetLookup.createSecurityGroup({ it.name == 'app-elb' && it.vpcId == 'vpc-2' && it.credentials == prodCredentials}) >> new SecurityGroupUpdater(elbGroup, amazonEC2) - 1 * amazonEC2.describeSecurityGroups({r -> r.groupIds == ['sg-3']}) >> new DescribeSecurityGroupsResult().withSecurityGroups([appGroup]) - 1 * amazonEC2.describeSecurityGroups({r -> r.filters[0].values == ['app', 'app-elb']}) >> new DescribeSecurityGroupsResult().withSecurityGroups([]) - 1 * amazonEC2.describeVpcs(_) >> new DescribeVpcsResult().withVpcs(new Vpc()) - 1 * amazonEC2.authorizeSecurityGroupIngress({r -> r.groupId == 'sg-4' && - !r.ipPermissions.empty && - r.ipPermissions[0].userIdGroupPairs && - r.ipPermissions[0].userIdGroupPairs[0].groupId == 'sg-5' && - r.ipPermissions[0].fromPort == 80 && - r.ipPermissions[0].toPort == 65535 && - r.ipPermissions[0].ipProtocol == 'tcp'}) - 1 * amazonEC2.authorizeSecurityGroupIngress({r -> r.groupId == 'sg-3' && - !r.ipPermissions.empty && - r.ipPermissions[0].userIdGroupPairs && - r.ipPermissions[0].userIdGroupPairs[0].groupId == 'sg-5' && - r.ipPermissions[0].fromPort == 80 && - r.ipPermissions[0].toPort == 65535 && - r.ipPermissions[0].ipProtocol == 'tcp'}) - 1 * amazonEC2.authorizeSecurityGroupIngress({r -> r.groupId == 'sg-4' && - r.ipPermissions[0].fromPort == 80 && - r.ipPermissions[0].toPort == 80 && - r.ipPermissions[0].ipProtocol == 'tcp'}) - 1 * amazonEC2.authorizeSecurityGroupIngress({r -> r.groupId == 'sg-3' && - r.ipPermissions[0].fromPort == 7000 && - r.ipPermissions[0].toPort == 7000 && - r.ipPermissions[0].ipProtocol == 'tcp'}) - deployDefaults.getClassicLinkSecurityGroupName() >> 'classic-link' - deployDefaults.getAddAppGroupToServerGroup() >> true - } - - @Unroll - void 'name generator converts #loadBalancerName to #result'() { - when: - Vpc sourceVpc = sourceVpcName ? new Vpc().withTags(new Tag('Name', sourceVpcName)) : null - Vpc targetVpc = targetVpcName ? new Vpc().withTags(new Tag('Name', targetVpcName)) : null - - then: - strategy.generateLoadBalancerName(loadBalancerName, sourceVpc, targetVpc) == result - - where: - loadBalancerName | sourceVpcName | targetVpcName || result - '12345678901234567890123456789012' | null | null || '12345678901234567890123456789012' - '123456789012345678901234567890123' | null | null || '12345678901234567890123456789012' - 'abc-vpc0' | 'vpc0' | 'vpc2' || 'abc-vpc2' - 'abc-vpc' | 'vpc0' | 'vpc2' || 'abc-vpc2' - 'abc-vpc1' | 'vpc0' | 'vpc2' || 'abc-vpc1-vpc2' - '123456789012345678901234567-elb' | null | 'vpc2' || '123456789012345678901234567-vpc2' - '12345678901234567890123-elb' | null | 'vpc2' || '12345678901234567890123-elb-vpc2' - } - - void 'uses supplied target name if present'() { - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [] - ) - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: 'app-elb') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', availabilityZones: ['us-east-1a'], name: 'newapp-elb') - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - RegionScopedProvider regionProvider = Mock(RegionScopedProvider) - - when: - strategy.generateResults(sourceLookup, targetLookup, securityGroupStrategy, source, target, null, 'app', false, false) - - then: - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1', true) >> amazonEC2 - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1') >> amazonEC2 - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionProvider - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == ['app-elb']}) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == ['newapp-elb']}) >> new DescribeLoadBalancersResult() - 1 * loadBalancing.createLoadBalancer({ it.loadBalancerName == 'newapp-elb'}) >> new CreateLoadBalancerResult().withDNSName('new-elb-dns') - 1 * loadBalancing.configureHealthCheck(_) - 1 * loadBalancing.describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() - 2 * loadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - 0 * amazonEC2.authorizeSecurityGroupIngress(_) - } - - void 'generates target name, removing old suffixes'() { - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [] - ) - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: 'app-elb-frontend') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', availabilityZones: ['us-east-1a']) - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - RegionScopedProvider regionProvider = Mock(RegionScopedProvider) - - when: - strategy.generateResults(sourceLookup, targetLookup, securityGroupStrategy, source, target, null, 'app', false, false) - - then: - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1', true) >> amazonEC2 - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1') >> amazonEC2 - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionProvider - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == ['app-elb-frontend']}) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == ['app-elb']}) >> new DescribeLoadBalancersResult() - 1 * loadBalancing.createLoadBalancer({ it.loadBalancerName == 'app-elb'}) >> new CreateLoadBalancerResult().withDNSName('new-elb-dns') - 1 * loadBalancing.configureHealthCheck(_) - 1 * loadBalancing.describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() - 2 * loadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - 0 * amazonEC2.authorizeSecurityGroupIngress(_) - } - - void 'applies health check properties to new load balancer'() { - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(target: 'the-target', healthyThreshold: 3, unhealthyThreshold: 4, timeout: 5, interval: 6, ), - listenerDescriptions: [] - ) - LoadBalancerLocation source = new LoadBalancerLocation(credentials: testCredentials, region: 'us-east-1', name: 'app-elb') - TargetLoadBalancerLocation target = new TargetLoadBalancerLocation(credentials: testCredentials, region: 'us-west-1', availabilityZones: ['us-east-1a']) - - AmazonEC2 amazonEC2 = Mock(AmazonEC2) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - AmazonElasticLoadBalancing targetLoadBalancing = Mock(AmazonElasticLoadBalancing) - RegionScopedProvider regionProvider = Mock(RegionScopedProvider) - - when: - strategy.generateResults(sourceLookup, targetLookup, securityGroupStrategy, source, target, null, 'app', false, false) - - then: - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1', true) >> amazonEC2 - amazonClientProvider.getAmazonEC2(testCredentials, 'us-east-1') >> amazonEC2 - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-east-1', true) >> loadBalancing - amazonClientProvider.getAmazonElasticLoadBalancing(testCredentials, 'us-west-1', true) >> targetLoadBalancing - regionScopedProviderFactory.forRegion(testCredentials, 'us-east-1') >> regionProvider - 1 * loadBalancing.describeLoadBalancers({ it.loadBalancerNames == ['app-elb']}) >> new DescribeLoadBalancersResult().withLoadBalancerDescriptions(sourceDescription) - 1 * targetLoadBalancing.describeLoadBalancers({ it.loadBalancerNames == ['app-elb']}) >> new DescribeLoadBalancersResult() - 1 * targetLoadBalancing.createLoadBalancer({ it.loadBalancerName == 'app-elb'}) >> new CreateLoadBalancerResult().withDNSName('new-elb-dns') - 1 * targetLoadBalancing.configureHealthCheck({ it.loadBalancerName == 'app-elb' && - it.healthCheck.target == 'the-target' && - it.healthCheck.interval == 6 && - it.healthCheck.healthyThreshold == 3 && - it.healthCheck.unhealthyThreshold == 4 && - it.healthCheck.timeout == 5 - }) - 1 * loadBalancing.describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() - 1 * loadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - 1 * targetLoadBalancing.describeLoadBalancerPolicies(_) >> new DescribeLoadBalancerPoliciesResult() - } - - void 'applies load balancer policies to new load balancer'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(443).withInstancePort(7000)).withPolicyNames("custom-policy") - ] - ) - - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.applyListenerPolicies(loadBalancing, loadBalancing, sourceDescription, 'app-elb-vpc1') - - then: - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: 'custom-policy')) - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb-vpc1'}) >> new DescribeLoadBalancerPoliciesResult() - 1 * loadBalancing.createLoadBalancerPolicy({ it.loadBalancerName == 'app-elb-vpc1' && it.policyName == 'custom-policy'}) - 1 * loadBalancing.setLoadBalancerPoliciesOfListener({ - it.loadBalancerName == 'app-elb-vpc1' && it.loadBalancerPort == 443 && it.policyNames == ['custom-policy']}) - - } - - void 'updates load balancer policies on existing load balancer to match source load balancer'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(443).withInstancePort(7000)).withPolicyNames("custom-policy") - ] - ) - - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.applyListenerPolicies(loadBalancing, loadBalancing, sourceDescription, 'app-elb-vpc1') - - then: - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: 'custom-policy')) - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb-vpc1'}) >> new DescribeLoadBalancerPoliciesResult() - 1 * loadBalancing.createLoadBalancerPolicy({ it.loadBalancerName == 'app-elb-vpc1' && it.policyName == 'custom-policy'}) - 1 * loadBalancing.setLoadBalancerPoliciesOfListener({ - it.loadBalancerName == 'app-elb-vpc1' && it.loadBalancerPort == 443 && it.policyNames == ['custom-policy']}) - - } - - void 'does not try to recreate policies on existing load balancer if they already exist'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(443).withInstancePort(7000)).withPolicyNames("custom-policy") - ] - ) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.applyListenerPolicies(loadBalancing, loadBalancing, sourceDescription, 'app-elb-vpc1') - - then: - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: 'custom-policy')) - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb-vpc1'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: 'custom-policy')) - 0 * loadBalancing.createLoadBalancerPolicy(_) - 1 * loadBalancing.setLoadBalancerPoliciesOfListener({ - it.loadBalancerName == 'app-elb-vpc1' && it.loadBalancerPort == 443 && it.policyNames == ['custom-policy']}) - - } - - @Unroll - void 'only adds Reference-Security-Policy attribute if present when creating policy'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(443).withInstancePort(7000)).withPolicyNames("ref-policy") - ] - ) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.applyListenerPolicies(loadBalancing, loadBalancing, sourceDescription, 'app-elb-vpc1') - - then: - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: 'ref-policy') - .withPolicyAttributeDescriptions( - attributes.findResults { new PolicyAttributeDescription(attributeName: it, attributeValue: it + "-v") } - )) - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb-vpc1'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: 'other-policy')) - 1 * loadBalancing.createLoadBalancerPolicy({ it.policyAttributes.attributeName == requestAttributes}) - 1 * loadBalancing.setLoadBalancerPoliciesOfListener({ - it.loadBalancerName == 'app-elb-vpc1' && it.loadBalancerPort == 443 && it.policyNames == ['ref-policy']}) - - where: - attributes || requestAttributes - ["Reference-Security-Policy", "cipher1"] || ["Reference-Security-Policy"] - ["cipher1", "cipher2"] || ["cipher1", "cipher2"] - } - - @Unroll - void 'prefixes policy name if reserved'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(443).withInstancePort(7000)).withPolicyNames(policyName) - ] - ) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.applyListenerPolicies(loadBalancing, loadBalancing, sourceDescription, 'app-elb-vpc1') - - then: - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: policyName).withPolicyAttributeDescriptions( - new PolicyAttributeDescription(attributeName: 'some-cipher', attributeValue: 'some-value') - )) - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb-vpc1'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: policyName)) - 1 * loadBalancing.createLoadBalancerPolicy({ it.policyName == newName}) - 1 * loadBalancing.setLoadBalancerPoliciesOfListener({ - it.loadBalancerName == 'app-elb-vpc1' && it.loadBalancerPort == 443 && it.policyNames == [newName]}) - - where: - policyName || newName - 'not-reserved' || 'not-reserved' - 'ELBSecurityPolicy-1' || 'migrated-ELBSecurityPolicy-1' - 'ELBSample-1' || 'migrated-ELBSample-1' - } - - void 'reuses policies when attributes match'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener( - new Listener().withLoadBalancerPort(443).withInstancePort(7000)).withPolicyNames("custom-policy") - ] - ) - AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) - - when: - strategy.applyListenerPolicies(loadBalancing, loadBalancing, sourceDescription, 'app-elb-vpc1') - - then: - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: 'custom-policy').withPolicyAttributeDescriptions( - new PolicyAttributeDescription(attributeName: 'some-attr', attributeValue: 'some-val') - )) - 1 * loadBalancing.describeLoadBalancerPolicies({ it.loadBalancerName == 'app-elb-vpc1'}) >> new DescribeLoadBalancerPoliciesResult() - .withPolicyDescriptions(new PolicyDescription(policyName: 'custom-policy-vpc').withPolicyAttributeDescriptions( - new PolicyAttributeDescription(attributeName: 'some-attr', attributeValue: 'some-val') - )) - 0 * loadBalancing.createLoadBalancerPolicy(_) - 1 * loadBalancing.setLoadBalancerPoliciesOfListener({ - it.loadBalancerName == 'app-elb-vpc1' && it.loadBalancerPort == 443 && it.policyNames == ['custom-policy-vpc']}) - } - - void 'sets cross-zone load balancing flag when legacy listener is present'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener(new Listener().withLoadBalancerPort(0).withInstancePort(0)) - ] - ) - AmazonElasticLoadBalancing client = Mock(AmazonElasticLoadBalancing) - - when: - def attributes = strategy.getLoadBalancerAttributes(sourceDescription, client) - - then: - 1 * client.describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() - .withLoadBalancerAttributes(new LoadBalancerAttributes() - .withCrossZoneLoadBalancing(new CrossZoneLoadBalancing().withEnabled(false))) - - attributes.crossZoneLoadBalancing.isEnabled() - } - - void 'ignores legacy listeners when generating listener lists'() { - given: - LoadBalancerDescription sourceDescription = new LoadBalancerDescription(loadBalancerName: 'app-elb', - healthCheck: new HealthCheck(), - listenerDescriptions: [ - new ListenerDescription().withListener(new Listener().withLoadBalancerPort(443).withInstancePort(7000)), - new ListenerDescription().withListener(new Listener().withLoadBalancerPort(0).withInstancePort(0)) - ] - ) - MigrateLoadBalancerResult result = new MigrateLoadBalancerResult() - - when: - def listeners = strategy.getListeners(sourceDescription, result) - - then: - listeners.size() == 1 - listeners.instancePort == [7000] - } - -} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerClassicAtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerClassicAtomicOperationSpec.groovy index 542b65e8aad..6c444f3094d 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerClassicAtomicOperationSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerClassicAtomicOperationSpec.groovy @@ -17,47 +17,25 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer import com.amazonaws.AmazonServiceException -import com.amazonaws.services.ec2.model.IpPermission -import com.amazonaws.services.ec2.model.SecurityGroup import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing -import com.amazonaws.services.elasticloadbalancing.model.ApplySecurityGroupsToLoadBalancerRequest -import com.amazonaws.services.elasticloadbalancing.model.ConfigureHealthCheckRequest -import com.amazonaws.services.elasticloadbalancing.model.ConnectionDraining -import com.amazonaws.services.elasticloadbalancing.model.CreateLoadBalancerListenersRequest -import com.amazonaws.services.elasticloadbalancing.model.CreateLoadBalancerRequest -import com.amazonaws.services.elasticloadbalancing.model.CreateLoadBalancerResult -import com.amazonaws.services.elasticloadbalancing.model.CrossZoneLoadBalancing -import com.amazonaws.services.elasticloadbalancing.model.DeleteLoadBalancerListenersRequest -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancerAttributesRequest -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancerAttributesResult -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersRequest -import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersResult -import com.amazonaws.services.elasticloadbalancing.model.HealthCheck -import com.amazonaws.services.elasticloadbalancing.model.Listener -import com.amazonaws.services.elasticloadbalancing.model.ListenerDescription -import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes -import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription -import com.amazonaws.services.elasticloadbalancing.model.ModifyLoadBalancerAttributesRequest +import com.amazonaws.services.elasticloadbalancing.model.* import com.amazonaws.services.shield.AWSShield import com.amazonaws.services.shield.model.CreateProtectionRequest -import com.netflix.spinnaker.config.AwsConfiguration -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationException import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerClassicDescription import com.netflix.spinnaker.clouddriver.aws.model.SubnetAnalyzer import com.netflix.spinnaker.clouddriver.aws.model.SubnetTarget +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationException +import com.netflix.spinnaker.config.AwsConfiguration import spock.lang.Specification import spock.lang.Subject import spock.lang.Unroll -import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.* - class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { def setupSpec() { @@ -97,23 +75,7 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { forRegion(_, "us-east-1") >> regionScopedProvider } - def securityGroupLookup = Mock(SecurityGroupLookup) - def securityGroupLookupFactory = Stub(SecurityGroupLookupFactory) { - getInstance("us-east-1") >> securityGroupLookup - } - - def elbSecurityGroup = new SecurityGroup() - .withVpcId(description.vpcId) - .withGroupId("sg-1234") - .withGroupName("kato-elb") - - def applicationSecurityGroup = new SecurityGroup() - .withVpcId(description.vpcId) - .withGroupId("sg-1111") - .withGroupName("kato") - - def elbSecurityGroupUpdater = Mock(SecurityGroupUpdater) - def appSecurityGroupUpdater = Mock(SecurityGroupUpdater) + def ingressLoadBalancerBuilder = Mock(IngressLoadBalancerBuilder) @Subject operation = new UpsertAmazonLoadBalancerAtomicOperation(description) @@ -121,7 +83,7 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { operation.deployDefaults = new AwsConfiguration.DeployDefaults(addAppGroupToServerGroup: true, createLoadBalancerIngressPermissions: true) operation.amazonClientProvider = mockAmazonClientProvider operation.regionScopedProviderFactory = regionScopedProviderFactory - operation.securityGroupLookupFactory = securityGroupLookupFactory + operation.ingressLoadBalancerBuilder = ingressLoadBalancerBuilder } void "should create load balancer"() { @@ -134,18 +96,14 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { operation.operate([]) then: - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.of(elbSecurityGroupUpdater) - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.of(appSecurityGroupUpdater) - 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup - 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup - 1 * appSecurityGroupUpdater.addIngress(_) >> { - def permissions = it[0] as List - assert permissions.size() == 2 - assert 7001 in permissions*.fromPort && 8501 in permissions*.fromPort - assert 7001 in permissions*.toPort && 8501 in permissions*.toPort - assert elbSecurityGroup.groupId in permissions[0].userIdGroupPairs*.groupId - assert elbSecurityGroup.groupId in permissions[1].userIdGroupPairs*.groupId - } + 1 * ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup( + 'kato', + 'us-east-1', + 'bar', + description.credentials, + "vpcId", + { it.toList().sort() == [7001, 8501] }, + _) >> new IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult("sg-1234", "kato-elb") and: 1 * mockSubnetAnalyzer.getSubnetIdsForZones(['us-east-1a'], 'internal', SubnetTarget.ELB, 1) >> ["subnet-1"] @@ -177,7 +135,8 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { loadBalancerAttributes: new LoadBalancerAttributes( crossZoneLoadBalancing: new CrossZoneLoadBalancing(enabled: true), connectionDraining: new ConnectionDraining(enabled: false), - additionalAttributes: [] + additionalAttributes: [], + connectionSettings: new ConnectionSettings(idleTimeout: 60) ) )) 0 * _ @@ -199,10 +158,6 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { and: 'auto-creating groups fails' description.securityGroups = [] description.vpcId = "vpcId" - securityGroupLookupFactory.getInstance("us-east-1") >> securityGroupLookup - _* securityGroupLookup.getSecurityGroupByName(_ as String, _ as String, _ as String) >> { - throw new Exception() - } when: operation.operate([]) @@ -246,10 +201,7 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { when: operation.operate([]) - then: 'should not auto create elb sg on update' - 0 * appSecurityGroupUpdater.addIngress(_) - - and: + then: 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(loadBalancerNames: ["kato-main-frontend"])) >> new DescribeLoadBalancersResult(loadBalancerDescriptions: existingLoadBalancers) 1 * loadBalancing.createLoadBalancerListeners(new CreateLoadBalancerListenersRequest( @@ -279,6 +231,7 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { loadBalancerName: "kato-main-frontend", loadBalancerAttributes: new LoadBalancerAttributes( crossZoneLoadBalancing: new CrossZoneLoadBalancing(enabled: true), + connectionSettings: new ConnectionSettings(idleTimeout: 60), additionalAttributes: [] ) )) @@ -323,7 +276,8 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { new DescribeLoadBalancerAttributesResult(loadBalancerAttributes: new LoadBalancerAttributes( crossZoneLoadBalancing: new CrossZoneLoadBalancing(enabled: existingCrossZone), - connectionDraining: new ConnectionDraining(enabled: existingDraining, timeout: existingTimeout))) + connectionDraining: new ConnectionDraining(enabled: existingDraining, timeout: existingTimeout), + connectionSettings: new ConnectionSettings(idleTimeout: existingIdleTimeout))) expectedInv * loadBalancing.modifyLoadBalancerAttributes(new ModifyLoadBalancerAttributesRequest( loadBalancerName: "kato-main-frontend", loadBalancerAttributes: expectedAttributes)) @@ -331,21 +285,25 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { where: - desc | expectedInv | existingCrossZone | descriptionCrossZone | existingDraining | existingTimeout | descriptionDraining | descriptionTimeout - "make no changes" | 0 | true | null | true | 300 | null | null - "enable cross zone" | 1 | false | true | true | 123 | null | null - "enable draining" | 1 | true | null | false | 300 | true | null - "modify timeout" | 1 | true | null | false | 300 | null | 150 - - expectedAttributes = expectedAttributes(existingCrossZone, descriptionCrossZone, existingDraining, existingTimeout, descriptionDraining, descriptionTimeout) + desc | expectedInv | existingCrossZone | descriptionCrossZone | existingDraining | existingTimeout | descriptionDraining | descriptionTimeout | existingIdleTimeout | descriptionIdleTimeout + "make no changes" | 0 | true | null | true | 300 | null | null | 60 | 60 + "enable cross zone" | 1 | false | true | true | 123 | null | null | 60 | 60 + "enable draining" | 1 | true | null | false | 300 | true | null | 60 | 60 + "modify timeout" | 1 | true | null | false | 300 | null | 150 | 60 | 60 + "modify idle timeout" | 0 | true | null | true | 300 | null | null | 60 | 120 + + expectedAttributes = expectedAttributes(existingCrossZone, descriptionCrossZone, existingDraining, existingTimeout, descriptionDraining, descriptionTimeout, existingIdleTimeout, descriptionIdleTimeout) } - private LoadBalancerAttributes expectedAttributes(existingCrossZone, descriptionCrossZone, existingDraining, existingTimeout, descriptionDraining, descriptionTimeout) { + private LoadBalancerAttributes expectedAttributes(existingCrossZone, descriptionCrossZone, existingDraining, existingTimeout, descriptionDraining, descriptionTimeout, existingIdleTimeout, descriptionIdleTimeout) { CrossZoneLoadBalancing czlb = null if (existingCrossZone != descriptionCrossZone && descriptionCrossZone != null) { czlb = new CrossZoneLoadBalancing(enabled: descriptionCrossZone) } - + ConnectionSettings cs = null + if (existingIdleTimeout != descriptionIdleTimeout) { + cs = new ConnectionSettings(idleTimeout: descriptionIdleTimeout) + } ConnectionDraining cd = null if ((descriptionDraining != null || descriptionTimeout != null) && (existingDraining != descriptionDraining || existingTimeout != descriptionTimeout)) { cd = new ConnectionDraining(enabled: [descriptionDraining, existingDraining].findResult(Closure.IDENTITY), timeout: [descriptionTimeout, existingTimeout].findResult(Closure.IDENTITY)) @@ -360,9 +318,94 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { if (czlb != null) { lba.setCrossZoneLoadBalancing(czlb) } + if (cs != null) { + lba.setConnectionSettings(cs) + } return lba } + void "should restore listener policies when updating an existing load balancer"() { + given: + def httpListener = new Listener(protocol: "HTTP", loadBalancerPort: 80, instanceProtocol: "HTTP", instancePort: 8502) + def httpsListener = new Listener(protocol: "HTTPS", loadBalancerPort: 443, instanceProtocol: "HTTP", instancePort: 7001, sSLCertificateId: "foo") + def policies = ["cookiePolicy"] + + def existingLB = new LoadBalancerDescription( + loadBalancerName: "kato-main-frontend", + listenerDescriptions: [ + new ListenerDescription(listener: httpListener), + new ListenerDescription(listener: httpsListener, policyNames: policies) + ] + ) + + and: + description.subnetType = "internal" + description.setIsInternal(true) + description.vpcId = "vpcId" + + // request listeners + description.listeners.clear() + description.listeners.addAll( + [ + new UpsertAmazonLoadBalancerClassicDescription.Listener( + externalProtocol: UpsertAmazonLoadBalancerClassicDescription.Listener.ListenerType.HTTP, + externalPort: httpListener.loadBalancerPort, + internalPort: httpListener.instancePort + ), + new UpsertAmazonLoadBalancerClassicDescription.Listener( + externalProtocol: UpsertAmazonLoadBalancerClassicDescription.Listener.ListenerType.HTTPS, + externalPort: httpsListener.loadBalancerPort, + internalPort: httpsListener.instancePort, + sslCertificateId: "bar" //updated cert on listener + ) + ]) + + when: + operation.operate([]) + + then: + 1 * loadBalancing.describeLoadBalancers(_) >> new DescribeLoadBalancersResult(loadBalancerDescriptions: [existingLB]) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() + 1 * loadBalancing.deleteLoadBalancerListeners({ + it.loadBalancerPorts == [httpsListener.loadBalancerPort] + } as DeleteLoadBalancerListenersRequest) + + 1 * loadBalancing.createLoadBalancerListeners(*_) >> { args -> + def request = args[0] as CreateLoadBalancerListenersRequest + assert request.loadBalancerName == description.name + assert request.listeners.size() == 1 + assert request.listeners*.loadBalancerPort == [ httpsListener.loadBalancerPort ] + } + + 1 * loadBalancing.configureHealthCheck(new ConfigureHealthCheckRequest( + loadBalancerName: "kato-main-frontend", + healthCheck: new HealthCheck( + target: "HTTP:7001/health", + interval: 10, + timeout: 5, + unhealthyThreshold: 2, + healthyThreshold: 10 + ) + )) + + 1 * loadBalancing.modifyLoadBalancerAttributes(new ModifyLoadBalancerAttributesRequest( + loadBalancerName: "kato-main-frontend", + loadBalancerAttributes: new LoadBalancerAttributes( + crossZoneLoadBalancing: new CrossZoneLoadBalancing(enabled: true), + connectionDraining: new ConnectionDraining(enabled: false), + additionalAttributes: [], + connectionSettings: new ConnectionSettings(idleTimeout: 60) + ) + )) + + 1 * loadBalancing.setLoadBalancerPoliciesOfListener(*_) >> { args -> + def request = args[0] as SetLoadBalancerPoliciesOfListenerRequest + assert request.loadBalancerName == description.name + assert request.policyNames == policies + assert request.loadBalancerPort == httpsListener.loadBalancerPort + } + } + void "should attempt to apply all listener modifications regardless of individual failures"() { given: def existingLoadBalancers = [ @@ -445,11 +488,7 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { operation.operate([]) then: - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.of(elbSecurityGroupUpdater) - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.of(appSecurityGroupUpdater) - 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup - 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup - 1 * appSecurityGroupUpdater.addIngress(_) + 1 * ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup(_, _, _, _, _, _, _) >> new IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult("sg-1234", "kato-elb") and: 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(loadBalancerNames: ["kato-main-frontend"])) >> null @@ -476,11 +515,7 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { operation.operate([]) then: - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.of(elbSecurityGroupUpdater) - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.of(appSecurityGroupUpdater) - 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup - 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup - 1 * appSecurityGroupUpdater.addIngress(_) + 1 * ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup(_, _, _, _, _, _, _) >> new IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult("sg-1234", "kato-elb") and: 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(loadBalancerNames: ["kato-main-frontend"])) >> null @@ -507,11 +542,7 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { operation.operate([]) then: - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.of(elbSecurityGroupUpdater) - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.of(appSecurityGroupUpdater) - 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup - 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup - 1 * appSecurityGroupUpdater.addIngress(_) + 1 * ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup(_, _, _, _, _, _, _) >> new IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult("sg-1234", "kato-elb") and: 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(loadBalancerNames: ["kato-test-frontend"])) >> @@ -560,100 +591,6 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { 0 * loadBalancing.createLoadBalancerListeners(_) } - void "should permit ingress from application elb security group to application security group"() { - given: 'an application load balancer' - def applicationName = "foo" - description.name = applicationName - description.application = applicationName - description.securityGroups = [] - description.vpcId = "vpcId" - description.listeners = [ - new UpsertAmazonLoadBalancerClassicDescription.Listener( - externalPort: 80, - externalProtocol: "HTTP", - internalPort: 7001, - internalProtocol: "HTTP" - ) - ] - - elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup - appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup - securityGroupLookup.getSecurityGroupByName( - description.credentialAccount, - applicationName + "-elb", - description.vpcId - ) >> Optional.of(elbSecurityGroupUpdater) - - securityGroupLookup.getSecurityGroupByName( - description.credentialAccount, - applicationName, - description.vpcId - ) >> Optional.of(appSecurityGroupUpdater) - - when: - operation.operate([]) - - then: - 1 * appSecurityGroupUpdater.addIngress(_) >> { - def permissions = it[0] as List - assert permissions.size() == 1 - assert permissions[0].fromPort == 7001 && permissions[0].toPort == 7001 - assert elbSecurityGroup.groupId in permissions[0].userIdGroupPairs*.groupId - } - - 1 * loadBalancing.createLoadBalancer(_ as CreateLoadBalancerRequest) >> new CreateLoadBalancerResult(dNSName: 'dnsName1') - } - - void "should auto-create application load balancer security group"() { - given: "an elb with a healthCheck port" - description.securityGroups = [] - description.vpcId = "vpcId" - - when: - operation.operate([]) - - then: "an application elb group should be created and ingressed properly" - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.empty() - 1 * securityGroupLookup.createSecurityGroup(_) >> elbSecurityGroupUpdater - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.of(appSecurityGroupUpdater) - 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup - 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup - 1 * appSecurityGroupUpdater.addIngress(_) >> { - def permissions = it[0] as List - assert permissions.size() == 2 - assert permissions*.fromPort == [8501, 7001] && permissions*.toPort == [8501, 7001] - assert elbSecurityGroup.groupId in permissions[0].userIdGroupPairs*.groupId - assert elbSecurityGroup.groupId in permissions[1].userIdGroupPairs*.groupId - } - - 1 * loadBalancing.createLoadBalancer(_ as CreateLoadBalancerRequest) >> new CreateLoadBalancerResult(dNSName: 'dnsName1') - } - - void "should auto-create application load balancer and application security groups"() { - given: - description.securityGroups = [] - description.vpcId = "vpcId" - - when: - operation.operate([]) - - then: - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.empty() - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.empty() - 1 * securityGroupLookup.createSecurityGroup( { it.name == 'kato-elb'}) >> elbSecurityGroupUpdater - 1 * securityGroupLookup.createSecurityGroup( { it.name == 'kato'}) >> appSecurityGroupUpdater - 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup - 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup - 1 * appSecurityGroupUpdater.addIngress(_) >> { - def permissions = it[0] as List - assert permissions.size() == 2 - assert permissions*.fromPort == [8501, 7001] && permissions*.toPort == [8501, 7001] - assert elbSecurityGroup.groupId in permissions[0].userIdGroupPairs*.groupId - assert elbSecurityGroup.groupId in permissions[1].userIdGroupPairs*.groupId - } - - 1 * loadBalancing.createLoadBalancer(_ as CreateLoadBalancerRequest) >> new CreateLoadBalancerResult(dNSName: 'dnsName1') - } @Unroll void "should enable AWS Shield protection if external ELB"() { @@ -666,10 +603,7 @@ class UpsertAmazonLoadBalancerClassicAtomicOperationSpec extends Specification { operation.operate([]) then: - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato-elb', 'vpcId') >> Optional.of(elbSecurityGroupUpdater) - 1 * securityGroupLookup.getSecurityGroupByName('bar', 'kato', 'vpcId') >> Optional.of(appSecurityGroupUpdater) - 1 * elbSecurityGroupUpdater.getSecurityGroup() >> elbSecurityGroup - 1 * appSecurityGroupUpdater.getSecurityGroup() >> applicationSecurityGroup + 1 * ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup(_, _, _, _, _, _, _) >> new IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult("sg-1234", "kato-elb") 1 * loadBalancing.createLoadBalancer(_ as CreateLoadBalancerRequest) >> new CreateLoadBalancerResult(dNSName: 'dnsName1') (shouldProtect ? 1 : 0) * awsShield.createProtection(new CreateProtectionRequest( diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerV2AtomicOperationSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerV2AtomicOperationSpec.groovy index 68b01342dd3..08bc4575ff4 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerV2AtomicOperationSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/loadbalancer/UpsertAmazonLoadBalancerV2AtomicOperationSpec.groovy @@ -68,20 +68,125 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { healthCheckPort: 8080, attributes: [ deregistrationDelay: 300, - stickinessEnabled: false, - stickinessType: "lb_cookie", - stickinessDuration: 86400 + stickinessEnabled : false, + stickinessType : "lb_cookie", + stickinessDuration : 86400 ] ) ], subnetType: "internal", + idleTimeout: 60, + deletionProtection: true ) + UpsertAmazonLoadBalancerV2Description updateDescription = new UpsertAmazonLoadBalancerV2Description( + loadBalancerType: AmazonLoadBalancerType.APPLICATION, + name: "foo-main-frontend", + availabilityZones: ["us-east-1": ["us-east-1a"]], + listeners: [ + new UpsertAmazonLoadBalancerV2Description.Listener( + port: 80, + protocol: ProtocolEnum.HTTP, + defaultActions: [ + new UpsertAmazonLoadBalancerV2Description.Action( + targetGroupName: targetGroupName + ) + ] + ) + ], + securityGroups: ["foo"], + credentials: TestCredential.named('bar'), + targetGroups: [ + new UpsertAmazonLoadBalancerV2Description.TargetGroup( + name: "target-group-foo", + protocol: ProtocolEnum.HTTP, + port: 80, + healthCheckProtocol: ProtocolEnum.HTTP, + healthCheckPort: 8080, + attributes: [ + deregistrationDelay: 300, + ] + ) + ], + subnetType: "internal", + idleTimeout: 60, + deletionProtection: true + ) + UpsertAmazonLoadBalancerV2Description descriptionWithNoAttributes = new UpsertAmazonLoadBalancerV2Description( + loadBalancerType: AmazonLoadBalancerType.APPLICATION, + name: "foo-main-frontend", + availabilityZones: ["us-east-1": ["us-east-1a"]], + listeners: [ + new UpsertAmazonLoadBalancerV2Description.Listener( + port: 80, + protocol: ProtocolEnum.HTTP, + defaultActions: [ + new UpsertAmazonLoadBalancerV2Description.Action( + targetGroupName: targetGroupName + ) + ] + ) + ], + securityGroups: ["foo"], + credentials: TestCredential.named('bar'), + targetGroups: [ + new UpsertAmazonLoadBalancerV2Description.TargetGroup( + name: "target-group-foo", + protocol: ProtocolEnum.HTTP, + port: 80, + healthCheckProtocol: ProtocolEnum.HTTP, + healthCheckPort: 8080, + attributes: [ + ] + ) + ], + subnetType: "internal", + idleTimeout: 60, + deletionProtection: true + ) + UpsertAmazonLoadBalancerV2Description nlbDescription = new UpsertAmazonLoadBalancerV2Description( + loadBalancerType: AmazonLoadBalancerType.NETWORK, + name: "foo-main-frontend", + availabilityZones: ["us-east-1": ["us-east-1a"]], + listeners: [ + new UpsertAmazonLoadBalancerV2Description.Listener( + port: 80, + protocol: ProtocolEnum.HTTP, + defaultActions: [ + new UpsertAmazonLoadBalancerV2Description.Action( + targetGroupName: targetGroupName + ) + ] + ) + ], + securityGroups: ["foo"], + credentials: TestCredential.named('bar'), + targetGroups: [ + new UpsertAmazonLoadBalancerV2Description.TargetGroup( + name: "target-group-foo", + protocol: ProtocolEnum.HTTP, + port: 80, + healthCheckProtocol: ProtocolEnum.HTTP, + healthCheckPort: 8080, + attributes: [ + deregistrationDelay: 300, + stickinessEnabled : false, + stickinessType : "lb_cookie", + stickinessDuration : 86400 + ] + ) + ], + subnetType: "internal", + idleTimeout: 60, + deletionProtection: true, + loadBalancingCrossZone: true + ) + def loadBalancerArn = "test:arn" def targetGroupArn = "test:target:group:arn" def targetGroup = new TargetGroup(targetGroupArn: targetGroupArn, targetGroupName: targetGroupName, port: 80, protocol: ProtocolEnum.HTTP) def targetGroupOld = new TargetGroup(targetGroupArn: targetGroupArn, targetGroupName: "target-group-foo-existing", port: 80, protocol: ProtocolEnum.HTTP) def loadBalancerOld = new LoadBalancer(loadBalancerName: "foo-main-frontend", loadBalancerArn: loadBalancerArn, type: "application") - + def loadBalancerAttributes = [new LoadBalancerAttribute().withKey("idle_timeout.timeout_seconds").withValue("60"), new LoadBalancerAttribute().withKey("deletion_protection.enabled").withValue("true")] AWSShield awsShield = Mock(AWSShield) AmazonElasticLoadBalancing loadBalancing = Mock(AmazonElasticLoadBalancing) @@ -100,12 +205,17 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { def regionScopedProviderFactory = Stub(RegionScopedProviderFactory) { forRegion(_, "us-east-1") >> regionScopedProvider } - @Subject operation = new UpsertAmazonLoadBalancerV2AtomicOperation(description) + + def ingressLoadBalancerBuilder = Mock(IngressLoadBalancerBuilder) + + @Subject + operation = new UpsertAmazonLoadBalancerV2AtomicOperation(description) def setup() { operation.amazonClientProvider = mockAmazonClientProvider operation.regionScopedProviderFactory = regionScopedProviderFactory - operation.deployDefaults = new AwsConfiguration.DeployDefaults() + operation.deployDefaults = new AwsConfiguration.DeployDefaults(addAppGroupToServerGroup: true, createLoadBalancerIngressPermissions: true) + operation.ingressLoadBalancerBuilder = ingressLoadBalancerBuilder } void "should create load balancer"() { @@ -113,6 +223,7 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { def existingLoadBalancers = [] def existingTargetGroups = [] def existingListeners = [] + description.vpcId = 'vpcId' when: operation.operate([]) @@ -120,14 +231,24 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { then: 1 * mockSubnetAnalyzer.getSubnetIdsForZones(['us-east-1a'], 'internal', SubnetTarget.ELB, 1) >> ["subnet-1"] 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: ["foo-main-frontend"])) >> - new DescribeLoadBalancersResult(loadBalancers: existingLoadBalancers) + new DescribeLoadBalancersResult(loadBalancers: existingLoadBalancers) + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') 1 * loadBalancing.createLoadBalancer(new CreateLoadBalancerRequest( - name: "foo-main-frontend", - subnets: ["subnet-1"], - securityGroups: ["sg-1234"], - scheme: "internal", - type: "application" + ipAddressType: 'ipv4', + name: "foo-main-frontend", + subnets: ["subnet-1"], + securityGroups: ["sg-1234"], + scheme: "internal", + type: "application" )) >> new CreateLoadBalancerResult(loadBalancers: [new LoadBalancer(dNSName: "dnsName1", loadBalancerArn: loadBalancerArn, type: "application")]) + 1 * ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup( + 'foo', + 'us-east-1', + 'bar', + description.credentials, + "vpcId", + { it.toList().sort() == [80, 8080] }, + _) >> new IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult("sg-1234", "kato-elb") 1 * loadBalancing.setSecurityGroups(new SetSecurityGroupsRequest( loadBalancerArn: loadBalancerArn, securityGroups: ["sg-1234"] @@ -137,12 +258,13 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { 1 * loadBalancing.modifyTargetGroupAttributes(_ as ModifyTargetGroupAttributesRequest) 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] 0 * _ } void "should create target group for existing load balancer"() { setup: - def existingLoadBalancers = [ loadBalancerOld ] + def existingLoadBalancers = [loadBalancerOld] def existingTargetGroups = [] def existingListeners = [] @@ -161,13 +283,179 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { 1 * loadBalancing.modifyTargetGroupAttributes(_ as ModifyTargetGroupAttributesRequest) 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') + 0 * _ + } + + void "should create target group attributes passed for existing load balancer"() { + setup: + def existingLoadBalancers = [loadBalancerOld] + def existingTargetGroups = [] + def existingListeners = [] + + when: + operation.operate([]) + + then: + 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: ["foo-main-frontend"])) >> + new DescribeLoadBalancersResult(loadBalancers: existingLoadBalancers) + 1 * loadBalancing.setSecurityGroups(new SetSecurityGroupsRequest( + loadBalancerArn: loadBalancerArn, + securityGroups: ["sg-1234"] + )) + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') + 1 * loadBalancing.describeTargetGroups(new DescribeTargetGroupsRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeTargetGroupsResult(targetGroups: existingTargetGroups) + 1 * loadBalancing.createTargetGroup(_ as CreateTargetGroupRequest) >> new CreateTargetGroupResult(targetGroups: [targetGroup]) + 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) + 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] + 1 * loadBalancing.modifyTargetGroupAttributes(_) >> { ModifyTargetGroupAttributesRequest request -> + assert request.attributes.find { it.key == 'deregistration_delay.timeout_seconds' }.value == "300" + assert request.attributes.find { it.key == 'stickiness.enabled' }.value == "false" + assert request.attributes.find { it.key == 'stickiness.type' }.value == "lb_cookie" + assert request.attributes.find { it.key == 'stickiness.lb_cookie.duration_seconds' }.value == "86400" + assert request.targetGroupArn == "test:target:group:arn" + return new ModifyTargetGroupAttributesResult() + } + 0 * _ + } + + void "should create target group attributes with defaults for existing load balancer"() { + @Subject createOperation = new UpsertAmazonLoadBalancerV2AtomicOperation(descriptionWithNoAttributes) + setup: + def existingLoadBalancers = [loadBalancerOld] + def existingTargetGroups = [] + def existingListeners = [] + createOperation.amazonClientProvider = mockAmazonClientProvider + createOperation.regionScopedProviderFactory = regionScopedProviderFactory + createOperation.deployDefaults = new AwsConfiguration.DeployDefaults(addAppGroupToServerGroup: true, createLoadBalancerIngressPermissions: true) + createOperation.ingressLoadBalancerBuilder = ingressLoadBalancerBuilder + when: + createOperation.operate([]) + + then: + 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: ["foo-main-frontend"])) >> + new DescribeLoadBalancersResult(loadBalancers: existingLoadBalancers) + 1 * loadBalancing.setSecurityGroups(new SetSecurityGroupsRequest( + loadBalancerArn: loadBalancerArn, + securityGroups: ["sg-1234"] + )) + 1 * loadBalancing.describeTargetGroups(new DescribeTargetGroupsRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeTargetGroupsResult(targetGroups: existingTargetGroups) + 1 * loadBalancing.createTargetGroup(_ as CreateTargetGroupRequest) >> new CreateTargetGroupResult(targetGroups: [targetGroup]) + 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) + 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] + 1 * loadBalancing.modifyTargetGroupAttributes(_) >> { ModifyTargetGroupAttributesRequest request -> + assert request.attributes.find { it.key == 'deregistration_delay.timeout_seconds' }.value == "300" + assert request.attributes.find { it.key == 'stickiness.enabled' }.value == "false" + assert request.attributes.find { it.key == 'stickiness.type' }.value == "lb_cookie" + assert request.attributes.find { it.key == 'stickiness.lb_cookie.duration_seconds' }.value == "86400" + assert request.targetGroupArn == "test:target:group:arn" + return new ModifyTargetGroupAttributesResult() + } + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') + 0 * _ + } + + void "should create target group attributes with defaults for existing nlb"() { + @Subject createNlbOperation = new UpsertAmazonLoadBalancerV2AtomicOperation(nlbDescription) + setup: + createNlbOperation.amazonClientProvider = mockAmazonClientProvider + createNlbOperation.regionScopedProviderFactory = regionScopedProviderFactory + createNlbOperation.deployDefaults = new AwsConfiguration.DeployDefaults(addAppGroupToServerGroup: true, createLoadBalancerIngressPermissions: true) + createNlbOperation.ingressLoadBalancerBuilder = ingressLoadBalancerBuilder + def existingLoadBalancers = [] + def existingTargetGroups = [] + def existingListeners = [] + def nlbLoadBalancerAttributes = [new LoadBalancerAttribute().withKey("idle_timeout.timeout_seconds").withValue("60"), + new LoadBalancerAttribute().withKey("deletion_protection.enabled").withValue("true"), + new LoadBalancerAttribute().withKey("load_balancing.cross_zone.enabled").withValue("false")] + + nlbDescription.vpcId = 'vpcId' + + when: + createNlbOperation.operate([]) + + then: + 1 * mockSubnetAnalyzer.getSubnetIdsForZones(['us-east-1a'], 'internal', SubnetTarget.ELB, 1) >> ["subnet-1"] + 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: ["foo-main-frontend"])) >> + new DescribeLoadBalancersResult(loadBalancers: existingLoadBalancers) + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') + 1 * loadBalancing.createLoadBalancer(new CreateLoadBalancerRequest( + ipAddressType: 'ipv4', + name: "foo-main-frontend", + subnets: ["subnet-1"], + scheme: "internal", + type: "network" + )) >> new CreateLoadBalancerResult(loadBalancers: [new LoadBalancer(dNSName: "dnsName1", loadBalancerArn: loadBalancerArn, type: "network")]) + 1 * ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup( + 'foo', + 'us-east-1', + 'bar', + nlbDescription.credentials, + "vpcId", + { it.toList().sort() == [80, 8080] }, + _) >> new IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult("sg-1234", "kato-elb") + 1 * loadBalancing.describeTargetGroups(new DescribeTargetGroupsRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeTargetGroupsResult(targetGroups: existingTargetGroups) + 1 * loadBalancing.createTargetGroup(_ as CreateTargetGroupRequest) >> new CreateTargetGroupResult(targetGroups: [targetGroup]) + 1 * loadBalancing.modifyLoadBalancerAttributes(_) >> { ModifyLoadBalancerAttributesRequest request -> + assert request.attributes.find { it.key == 'load_balancing.cross_zone.enabled' }.withValue("true") + assert request.loadBalancerArn == "test:arn" + return new ModifyLoadBalancerAttributesResult() + } + 1 * loadBalancing.modifyTargetGroupAttributes(_) >> { ModifyTargetGroupAttributesRequest request -> + assert request.attributes.find { it.key == 'deregistration_delay.timeout_seconds' }.value == "300" + assert request.attributes.find { it.key == 'proxy_protocol_v2.enabled' }.withValue("false") + assert request.attributes.find { it.key == 'deregistration_delay.connection_termination.enabled' }.withValue("false") + assert request.targetGroupArn == "test:target:group:arn" + return new ModifyTargetGroupAttributesResult() + } + 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) + 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: nlbLoadBalancerAttributes] + 0 * _ + } + + void "should create nlb and update cross_zone_enabled attribute only when it is updated"() { + @Subject createNlbOperation = new UpsertAmazonLoadBalancerV2AtomicOperation(nlbDescription) + setup: + createNlbOperation.amazonClientProvider = mockAmazonClientProvider + createNlbOperation.regionScopedProviderFactory = regionScopedProviderFactory + createNlbOperation.deployDefaults = new AwsConfiguration.DeployDefaults(addAppGroupToServerGroup: true, createLoadBalancerIngressPermissions: true) + createNlbOperation.ingressLoadBalancerBuilder = ingressLoadBalancerBuilder + def loadBalancerOld = new LoadBalancer(loadBalancerName: "foo-main-frontend", loadBalancerArn: loadBalancerArn, type: "network") + def existingLoadBalancers = [loadBalancerOld] + def existingTargetGroups = [] + def existingListeners = [] + def nlbLoadBalancerAttributes = [new LoadBalancerAttribute().withKey("idle_timeout.timeout_seconds").withValue("60"), + new LoadBalancerAttribute().withKey("deletion_protection.enabled").withValue("true"), + new LoadBalancerAttribute().withKey("load_balancing.cross_zone.enabled").withValue("true")] + + nlbDescription.vpcId = 'vpcId' + + when: + createNlbOperation.operate([]) + + then: + + 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: ["foo-main-frontend"])) >> + new DescribeLoadBalancersResult(loadBalancers: existingLoadBalancers) + 1 * loadBalancing.describeTargetGroups(new DescribeTargetGroupsRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeTargetGroupsResult(targetGroups: existingTargetGroups) + 1 * loadBalancing.createTargetGroup(_ as CreateTargetGroupRequest) >> new CreateTargetGroupResult(targetGroups: [targetGroup]) + 0 * loadBalancing.modifyLoadBalancerAttributes(_) + 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) + 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: nlbLoadBalancerAttributes] + 1 * loadBalancing.modifyTargetGroupAttributes(_ as ModifyTargetGroupAttributesRequest) + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') 0 * _ } void "should modify target group of existing load balancer"() { setup: - def existingLoadBalancers = [ loadBalancerOld ] - def existingTargetGroups = [ targetGroup ] + def existingLoadBalancers = [loadBalancerOld] + def existingTargetGroups = [targetGroup] def existingListeners = [] when: @@ -185,13 +473,53 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { 1 * loadBalancing.modifyTargetGroupAttributes(_ as ModifyTargetGroupAttributesRequest) 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') + 0 * _ + } + + void "should modify only target group attributes that are passed of existing load balancer"() { + @Subject updateOperation = new UpsertAmazonLoadBalancerV2AtomicOperation(updateDescription) + + setup: + def existingLoadBalancers = [loadBalancerOld] + def existingTargetGroups = [targetGroup] + def existingListeners = [] + updateOperation.amazonClientProvider = mockAmazonClientProvider + updateOperation.regionScopedProviderFactory = regionScopedProviderFactory + updateOperation.deployDefaults = new AwsConfiguration.DeployDefaults(addAppGroupToServerGroup: true, createLoadBalancerIngressPermissions: true) + updateOperation.ingressLoadBalancerBuilder = ingressLoadBalancerBuilder + + when: + updateOperation.operate([]) + + then: + 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: ["foo-main-frontend"])) >> + new DescribeLoadBalancersResult(loadBalancers: existingLoadBalancers) + 1 * loadBalancing.setSecurityGroups(new SetSecurityGroupsRequest( + loadBalancerArn: loadBalancerArn, + securityGroups: ["sg-1234"] + )) + 1 * loadBalancing.describeTargetGroups(new DescribeTargetGroupsRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeTargetGroupsResult(targetGroups: existingTargetGroups) + 1 * loadBalancing.modifyTargetGroup(_ as ModifyTargetGroupRequest) + 1 * loadBalancing.modifyTargetGroupAttributes(_) >> { ModifyTargetGroupAttributesRequest request -> + assert request.attributes.find { it.key == 'deregistration_delay.timeout_seconds' }.value == "300" + assert request.attributes.find { it.key == 'stickiness.enabled' } == null + assert request.attributes.find { it.key == 'load_balancing.cross_zone.enabled' } == null + assert request.targetGroupArn == "test:target:group:arn" + return new ModifyTargetGroupAttributesResult() + } + 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) + 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') 0 * _ } void "should remove missing target group of existing load balancer"() { setup: - def existingLoadBalancers = [ loadBalancerOld ] - def existingTargetGroups = [ targetGroupOld ] + def existingLoadBalancers = [loadBalancerOld] + def existingTargetGroups = [targetGroupOld] def existingListeners = [] when: @@ -210,13 +538,15 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { 1 * loadBalancing.modifyTargetGroupAttributes(_ as ModifyTargetGroupAttributesRequest) 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') 0 * _ } void "should throw error updating a load balancer if listener targets a non-existent target group"() { setup: - def existingLoadBalancers = [ loadBalancerOld ] - def existingTargetGroups = [ targetGroupOld ] + def existingLoadBalancers = [loadBalancerOld] + def existingTargetGroups = [targetGroupOld] def existingListeners = [] when: @@ -236,6 +566,8 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { 1 * loadBalancing.modifyTargetGroupAttributes(_ as ModifyTargetGroupAttributesRequest) 1 * loadBalancing.describeListeners(new DescribeListenersRequest(loadBalancerArn: loadBalancerArn)) >> new DescribeListenersResult(listeners: existingListeners) 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') 0 * _ thrown AtomicOperationException } @@ -243,9 +575,9 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { void "should remove and recreate listeners that have changed on an existing load balancer"() { setup: def listenerArn = "test:listener:arn" - def existingLoadBalancers = [ loadBalancerOld ] - def existingTargetGroups = [ targetGroupOld ] - def existingListeners = [ new Listener(listenerArn: listenerArn, defaultActions: [])] + def existingLoadBalancers = [loadBalancerOld] + def existingTargetGroups = [targetGroupOld] + def existingListeners = [new Listener(listenerArn: listenerArn, defaultActions: [])] when: operation.operate([]) @@ -262,6 +594,8 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { 1 * loadBalancing.describeRules(new DescribeRulesRequest(listenerArn: listenerArn)) >> new DescribeRulesResult(rules: []) 1 * loadBalancing.deleteListener(new DeleteListenerRequest(listenerArn: listenerArn)) 1 * loadBalancing.createListener(new CreateListenerRequest(loadBalancerArn: loadBalancerArn, port: 80, protocol: "HTTP", defaultActions: [new Action(targetGroupArn: targetGroupArn, type: ActionTypeEnum.Forward, order: 1)])) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') 0 * _ } @@ -270,6 +604,7 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { description.credentials = TestCredential.named('bar', [shieldEnabled: true]) description.isInternal = false description.subnetType = 'internet-facing' + description.vpcId = 'vpcId' def existingLoadBalancers = [] def existingTargetGroups = [] def existingListeners = [] @@ -278,10 +613,13 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { operation.operate([]) then: + 1 * ingressLoadBalancerBuilder.ingressApplicationLoadBalancerGroup(_, _, _, _, _, _, _) >> new IngressLoadBalancerBuilder.IngressLoadBalancerGroupResult("sg-1234", "foo-elb") 1 * mockSubnetAnalyzer.getSubnetIdsForZones(['us-east-1a'], 'internet-facing', SubnetTarget.ELB, 1) >> ["subnet-1"] 1 * loadBalancing.describeLoadBalancers(new DescribeLoadBalancersRequest(names: ["foo-main-frontend"])) >> new DescribeLoadBalancersResult(loadBalancers: existingLoadBalancers) + 1 * loadBalancing.setIpAddressType(new SetIpAddressTypeRequest (loadBalancerArn: loadBalancerArn, ipAddressType: 'ipv4')) >> new SetIpAddressTypeResult(ipAddressType: 'ipv4') 1 * loadBalancing.createLoadBalancer(new CreateLoadBalancerRequest( + ipAddressType: "ipv4", name: "foo-main-frontend", subnets: ["subnet-1"], securityGroups: ["sg-1234"], @@ -300,6 +638,7 @@ class UpsertAmazonLoadBalancerV2AtomicOperationSpec extends Specification { name: 'foo-main-frontend', resourceArn: loadBalancerArn )) + 1 * loadBalancing.describeLoadBalancerAttributes(_) >> [attributes: loadBalancerAttributes] 0 * _ } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupLookupSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupLookupSpec.groovy index 3e957e3cc33..543287de6f6 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupLookupSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/SecurityGroupLookupSpec.groovy @@ -17,28 +17,22 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest -import com.amazonaws.services.ec2.model.CreateSecurityGroupRequest -import com.amazonaws.services.ec2.model.CreateSecurityGroupResult -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult -import com.amazonaws.services.ec2.model.IpPermission -import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressRequest -import com.amazonaws.services.ec2.model.SecurityGroup +import com.amazonaws.services.ec2.model.* import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGroupDescription import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGroupLookupFactory.SecurityGroupUpdater import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification import spock.lang.Subject class SecurityGroupLookupSpec extends Specification { - final amazonEC2 = Mock(AmazonEC2) - final amazonClientProvider = Stub(AmazonClientProvider) { + def amazonEC2 = Mock(AmazonEC2) + def amazonClientProvider = Stub(AmazonClientProvider) { getAmazonEC2(_, "us-east-1", _) >> amazonEC2 } - final accountCredentialsRepository = Stub(AccountCredentialsRepository) { + def accountCredentialsRepository = Stub(CredentialsRepository) { getAll() >> [ Stub(NetflixAmazonCredentials) { getName() >> "test" @@ -51,11 +45,11 @@ class SecurityGroupLookupSpec extends Specification { ] } - final securityGroupLookupFactory = new SecurityGroupLookupFactory(amazonClientProvider, + def securityGroupLookupFactory = new SecurityGroupLookupFactory(amazonClientProvider, accountCredentialsRepository) @Subject - final securityGroupLookup = securityGroupLookupFactory.getInstance("us-east-1") + def securityGroupLookup = securityGroupLookupFactory.getInstance("us-east-1") void "should create security group"() { when: @@ -102,7 +96,7 @@ class SecurityGroupLookupSpec extends Specification { void "should look up security group, but not call AWS again"() { when: - final result = securityGroupLookup.getSecurityGroupByName("test", "wideOpen", "vpc-1").get() + def result = securityGroupLookup.getSecurityGroupByName("test", "wideOpen", "vpc-1").get() then: 1 * amazonEC2.describeSecurityGroups(_) >> new DescribeSecurityGroupsResult( diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/UpsertSecurityGroupAtomicOperationUnitSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/UpsertSecurityGroupAtomicOperationUnitSpec.groovy index 93e5a132f75..60af63066a5 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/UpsertSecurityGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/ops/securitygroup/UpsertSecurityGroupAtomicOperationUnitSpec.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup import com.amazonaws.AmazonServiceException import com.amazonaws.services.ec2.model.IpPermission +import com.amazonaws.services.ec2.model.IpRange import com.amazonaws.services.ec2.model.SecurityGroup import com.amazonaws.services.ec2.model.UserIdGroupPair import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGroupDescription @@ -27,6 +28,7 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.ops.securitygroup.SecurityGr import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService import spock.lang.Specification import spock.lang.Subject @@ -46,9 +48,11 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { ) - final securityGroupLookup = Mock(SecurityGroupLookupFactory.SecurityGroupLookup) + def securityGroupLookup = Mock(SecurityGroupLookupFactory.SecurityGroupLookup) - final securityGroupLookupFactory = Stub(SecurityGroupLookupFactory) { + def dynamicConfigService = Mock(DynamicConfigService) + + def securityGroupLookupFactory = Stub(SecurityGroupLookupFactory) { getInstance(_) >> securityGroupLookup } @@ -58,6 +62,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { def setup() { op.securityGroupLookupFactory = securityGroupLookupFactory + op.dynamicConfigService = dynamicConfigService } void "non-existent security group should be created"() { @@ -101,7 +106,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") ]) ]) - 0 * _ + 1 * createdSecurityGroup.updateTags(description, dynamicConfigService) } void "non-existent security group that is found on create should be updated"() { @@ -133,9 +138,9 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { } 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", "vpc-123") >> Optional.of(existingSecurityGroup) 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(ipPermissions: [ - new IpPermission(fromPort: 211, toPort: 212, ipProtocol: "tcp", userIdGroupPairs: [ - new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") - ]) + new IpPermission(fromPort: 211, toPort: 212, ipProtocol: "tcp", userIdGroupPairs: [ + new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") + ]) ]) then: @@ -144,7 +149,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") ]) ]) - 0 * _ + 1 * existingSecurityGroup.updateTags(description, dynamicConfigService) } void "existing security group should be unchanged"() { @@ -181,7 +186,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") ]) ]) - 0 * _ + 1 * existingSecurityGroup.updateTags(description, dynamicConfigService) } void "existing security group should be updated with ingress by id"() { @@ -206,7 +211,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") ]) ]) - 0 * _ + 1 * existingSecurityGroup.updateTags(description, dynamicConfigService) } void "existing security group should be updated with ingress from another account"() { @@ -233,7 +238,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { new UserIdGroupPair(userId: "accountId2", groupId: "id-bar") ]) ]) - 0 * _ + 1 * existingSecurityGroup.updateTags(description, dynamicConfigService) } void "existing permissions should not be re-created when a security group is modified"() { @@ -261,16 +266,16 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { then: 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", "vpc-123") >> Optional.of(existingSecurityGroup) 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "foo", groupId: "123", ipPermissions: [ - new IpPermission(fromPort: 80, toPort: 81, - userIdGroupPairs: [ - new UserIdGroupPair(userId: "accountId1", groupId: "grp"), - new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") - ], - ipRanges: ["10.0.0.1/32"], ipProtocol: "tcp" - ), - new IpPermission(fromPort: 25, toPort: 25, - userIdGroupPairs: [new UserIdGroupPair(userId: "accountId1", groupId: "id-bar")], ipProtocol: "tcp"), - ]) + new IpPermission(fromPort: 80, toPort: 81, + userIdGroupPairs: [ + new UserIdGroupPair(userId: "accountId1", groupId: "grp"), + new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") + ], + ipRanges: ["10.0.0.1/32"], ipProtocol: "tcp" + ), + new IpPermission(fromPort: 25, toPort: 25, + userIdGroupPairs: [new UserIdGroupPair(userId: "accountId1", groupId: "id-bar")], ipProtocol: "tcp"), + ]) then: 1 * existingSecurityGroup.addIngress([ @@ -283,7 +288,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { new UserIdGroupPair(userId: "accountId1", groupId: "grp") ]) ]) - 0 * _ + 2 * existingSecurityGroup.updateTags(description, dynamicConfigService) } void "should only append security group ingress"() { @@ -326,7 +331,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") ]) ]) - 0 * _ + 1 * existingSecurityGroup.updateTags(description, dynamicConfigService) } void "should fail for missing ingress security group in vpc"() { @@ -344,7 +349,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { then: IllegalStateException ex = thrown() - ex.message == "The following security groups do not exist: 'bar' in 'test' vpc-123" + ex.message == "The following security groups do not exist: 'bar' in 'test' vpc-123 (ignoreSelfReferencingRules: true)" } void "should two-phase create self-referential security group in vpc"() { @@ -391,7 +396,6 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { then: 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", null) >> Optional.of(existingSecurityGroup) 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "foo", groupId: "123", ipPermissions: []) - 0 * _ then: 1 * existingSecurityGroup.addIngress([ @@ -399,6 +403,7 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { new UserIdGroupPair(userId: "accountId1", groupName: "bar") ]) ]) + 1 * existingSecurityGroup.updateTags(description, dynamicConfigService) } @@ -428,4 +433,184 @@ class UpsertSecurityGroupAtomicOperationUnitSpec extends Specification { } + void "existing permissions should not be re-created when a security group with description is modified"() { + final existingSecurityGroup = Mock(SecurityGroupUpdater) + + description.securityGroupIngress = [ + new SecurityGroupIngress(name: "bar", startPort: 111, endPort: 112, ipProtocol: "tcp"), + new SecurityGroupIngress(name: "bar", startPort: 25, endPort: 25, ipProtocol: "tcp"), + new SecurityGroupIngress(name: "bar", startPort: 80, endPort: 81, ipProtocol: "tcp") + ] + description.ipIngress = [ + new IpIngress(cidr: "10.0.0.1/32", startPort: 80, endPort: 81, ipProtocol: "tcp") + ] + + when: + op.operate([]) + + then: + 3 * securityGroupLookup.getAccountIdForName("test") >> "accountId1" + 3 * securityGroupLookup.getSecurityGroupByName("test", "bar", "vpc-123") >> Optional.of(new SecurityGroupUpdater( + new SecurityGroup(groupId: "id-bar"), + null + )) + + then: + 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", "vpc-123") >> Optional.of(existingSecurityGroup) + 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "foo", groupId: "123", ipPermissions: [ + new IpPermission(fromPort: 80, toPort: 81, + userIdGroupPairs: [ + new UserIdGroupPair(userId: "accountId1", groupId: "grp", description: "sg description" ), + new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") + ], + ipRanges: ["10.0.0.1/32"], ipProtocol: "tcp" + ), + new IpPermission(fromPort: 25, toPort: 25, + userIdGroupPairs: [new UserIdGroupPair(userId: "accountId1", groupId: "id-bar")], ipProtocol: "tcp"), + ]) + + then: + 1 * existingSecurityGroup.addIngress([ + new IpPermission(ipProtocol: "tcp", fromPort: 111, toPort: 112, userIdGroupPairs: [ + new UserIdGroupPair(userId: "accountId1", groupId: "id-bar") + ]) + ]) + 1 * existingSecurityGroup.removeIngress([ + new IpPermission(ipProtocol: "tcp", fromPort: 80, toPort: 81, userIdGroupPairs: [ + new UserIdGroupPair(userId: "accountId1", groupId: "grp", description: "sg description") + ]) + ]) + 2 * existingSecurityGroup.updateTags(description, dynamicConfigService) + } + + void "should update ingress and add by name for missing ingress security group in EC2 classic"() { + final existingSecurityGroup = Mock(SecurityGroupUpdater) + final ingressSecurityGroup = Mock(SecurityGroupUpdater) + description.securityGroupIngress = [ + new SecurityGroupIngress(name: "bar", startPort: 111, endPort: 112, vpcId: "vpc-123", ipProtocol: "tcp", accountName: "test") + ] + description.vpcId = null + description.ipIngress = [ + new IpIngress(cidr: "123.23.45.6/12", startPort: 7002, endPort: 7004, description: "foo", ipProtocol: "tcp") + ] + + when: + op.operate([]) + + then: + 1 * securityGroupLookup.getAccountIdForName("test") >> "accountId1" + 1 * securityGroupLookup.getSecurityGroupByName("test", "bar", "vpc-123") >> Optional.of(ingressSecurityGroup) + + then: + 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", null) >> Optional.of(existingSecurityGroup) + 1 * ingressSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "bar", groupId: "124", vpcId: "vpc-123") + 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "foo", groupId: "123", ipPermissions: [ + new IpPermission(ipProtocol: "tcp", fromPort: 7002, toPort: 7004, ipv4Ranges: [new IpRange(description: "foo", cidrIp:"123.23.45.6/12")]) + ]) + + then: + 1 * existingSecurityGroup.addIngress(_) + 1 * existingSecurityGroup.updateIngress(_) + 2 * existingSecurityGroup.updateTags(description, dynamicConfigService) + } + + void "should only update ingress of existing ingress when description is not null in the input"() { + final existingSecurityGroup = Mock(SecurityGroupUpdater) + final ingressSecurityGroup = Mock(SecurityGroupUpdater) + description.vpcId = null + description.ipIngress = [ + new IpIngress(cidr: "123.23.45.6/12", startPort: 7002, endPort: 7004, description: "foo", ipProtocol: "tcp") + ] + + when: + op.operate([]) + + then: + 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", null) >> Optional.of(existingSecurityGroup) + 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "foo", groupId: "123", ipPermissions: [ + new IpPermission(ipProtocol: "tcp", fromPort: 7002, toPort: 7004, ipv4Ranges: [new IpRange(description: "foo", cidrIp:"123.23.45.6/12")]) + ]) + + then: + 1 * existingSecurityGroup.updateIngress(_) + 1 * existingSecurityGroup.updateTags(description, dynamicConfigService) + } + + void "should update existing ingress with description when description is null for existing rule"() { + final existingSecurityGroup = Mock(SecurityGroupUpdater) + final ingressSecurityGroup = Mock(SecurityGroupUpdater) + description.vpcId = null + description.ipIngress = [ + new IpIngress(cidr: "123.23.45.6/12", startPort: 7002, endPort: 7004, description: "foo", ipProtocol: "tcp") + ] + + when: + op.operate([]) + + then: + 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", null) >> Optional.of(existingSecurityGroup) + 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "foo", groupId: "123", ipPermissions: [ + new IpPermission(ipProtocol: "tcp", fromPort: 7002, toPort: 7004, ipv4Ranges: [new IpRange(description: null, cidrIp:"123.23.45.6/12")]) + ]) + + then: + 1 * existingSecurityGroup.updateIngress(_) + 1 * existingSecurityGroup.updateTags(description, dynamicConfigService) + } + + void "should not update ingress existing ingress with description for the same rule"() { + final existingSecurityGroup = Mock(SecurityGroupUpdater) + final ingressSecurityGroup = Mock(SecurityGroupUpdater) + description.vpcId = null + description.ipIngress = [ + new IpIngress(cidr: "123.23.45.6/12", startPort: 7002, endPort: 7004, description: null, ipProtocol: "tcp") + ] + + when: + op.operate([]) + + then: + 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", null) >> Optional.of(existingSecurityGroup) + 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "foo", groupId: "123", ipPermissions: [ + new IpPermission(ipProtocol: "tcp", fromPort: 7002, toPort: 7004, ipv4Ranges: [new IpRange(description: "foo", cidrIp:"123.23.45.6/12")]) + ]) + + then: + 0 * existingSecurityGroup.updateIngress(_) + 0 * existingSecurityGroup.updateTags(description, dynamicConfigService) + } + + void "should add, remove and update security group ingress rules"() { + final existingSecurityGroup = Mock(SecurityGroupUpdater) + final ingressSecurityGroup = Mock(SecurityGroupUpdater) + description.securityGroupIngress = [ + new SecurityGroupIngress(name: "bar", startPort: 111, endPort: 112, vpcId: "vpc-123", ipProtocol: "tcp", accountName: "test") + ] + description.vpcId = null + description.ipIngress = [ + new IpIngress(cidr: "123.23.45.6/12", startPort: 7002, endPort: 7004, description: "foo", ipProtocol: "tcp") + ] + + when: + op.operate([]) + + then: + 1 * securityGroupLookup.getAccountIdForName("test") >> "accountId1" + 1 * securityGroupLookup.getSecurityGroupByName("test", "bar", "vpc-123") >> Optional.of(ingressSecurityGroup) + + then: + 1 * securityGroupLookup.getSecurityGroupByName("test", "foo", null) >> Optional.of(existingSecurityGroup) + 1 * ingressSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "bar", groupId: "124", vpcId: "vpc-123") + 1 * existingSecurityGroup.getSecurityGroup() >> new SecurityGroup(groupName: "foo", groupId: "123", ipPermissions: [ + new IpPermission(ipProtocol: "tcp", fromPort: 7002, toPort: 7004, ipv4Ranges: [new IpRange(description: "foo", cidrIp:"123.23.45.6/12")]), + new IpPermission(ipProtocol: "tcp", fromPort: 7002, toPort: 7004, ipv4Ranges: [new IpRange(description: "baz", cidrIp:"103.23.45.6/12")]) + ]) + + then: + 1 * existingSecurityGroup.addIngress(_) + 1 * existingSecurityGroup.updateIngress(_) + 3 * existingSecurityGroup.updateTags(description, dynamicConfigService) + 1 * existingSecurityGroup.removeIngress(_) + } + } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/scalingpolicy/DefaultScalingPolicyCopierSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/scalingpolicy/DefaultScalingPolicyCopierSpec.groovy index de7ee642b78..32b3edc6cd5 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/scalingpolicy/DefaultScalingPolicyCopierSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/scalingpolicy/DefaultScalingPolicyCopierSpec.groovy @@ -30,7 +30,7 @@ import com.amazonaws.services.cloudwatch.model.DescribeAlarmsResult import com.amazonaws.services.cloudwatch.model.Dimension import com.amazonaws.services.cloudwatch.model.MetricAlarm import com.amazonaws.services.cloudwatch.model.PutMetricAlarmRequest -import com.netflix.spinnaker.clouddriver.aws.deploy.AsgReferenceCopier +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AsgReferenceCopier import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.aws.services.IdGenerator @@ -56,14 +56,24 @@ class DefaultScalingPolicyCopierSpec extends Specification { getCloudWatch(_, 'us-east-1', true) >> sourceCloudWatch getCloudWatch(_, 'us-west-1', true) >> targetCloudWatch } + String newPolicyName = 'new_policy_name' + + def policyNameGenerator = Stub(DefaultScalingPolicyCopier.PolicyNameGenerator) { + generateScalingPolicyName(_, _, _, _, _) >> + 'new_policy_name' + } int count = 0 def idGenerator = Stub(IdGenerator) { nextId() >> { (++count).toString() } } + void cleanup() { + count = 0 + } + @Subject - ScalingPolicyCopier scalingPolicyCopier = new DefaultScalingPolicyCopier(amazonClientProvider: amazonClientProvider, idGenerator: idGenerator) + ScalingPolicyCopier scalingPolicyCopier = new DefaultScalingPolicyCopier(amazonClientProvider, idGenerator, policyNameGenerator) void 'should copy nothing when there are no scaling policies'() { when: @@ -89,6 +99,115 @@ class DefaultScalingPolicyCopierSpec extends Specification { replacedActions == ['ok-one', 'newPolicyARN'] } + void 'generates a semantically meaningful alarm name'() { + given: + ScalingPolicy policy = new ScalingPolicy( + policyARN: 'oldPolicyARN1', + autoScalingGroupName: 'asgard-v000', + policyName: 'policy1', + scalingAdjustment: 5, + adjustmentType: 'ChangeInCapacity', + cooldown: 100, + minAdjustmentStep: 2, + alarms: [new Alarm(alarmName: 'alarm1')] + ) + MetricAlarm alarm = new MetricAlarm( + alarmName: 'alarm1', + alarmDescription: 'alarm 1 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['oldPolicyARN1'], + insufficientDataActions: [], + metricName: 'metric1', + namespace: 'namespace1', + statistic: 'statistic1', + dimensions: [ + new Dimension(name: AsgReferenceCopier.DIMENSION_NAME_FOR_ASG, value: 'asgard-v000') + ], + period: 1, + unit: 'unit1', + evaluationPeriods: 2, + threshold: 4.2, + comparisonOperator: 'GreaterThanOrEqualToThreshold' + ) + + DefaultScalingPolicyCopier.PolicyNameGenerator generator = new DefaultScalingPolicyCopier.PolicyNameGenerator(idGenerator, amazonClientProvider) + + when: + String result = generator.generateScalingPolicyName(sourceCredentials, 'us-east-1', 'asgard-v010', 'asgard-v011', policy) + + then: + result.startsWith('asgard-v011-namespace1-metric1-GreaterThanOrEqualToThreshold-4.2-2-1-') + 1 * sourceCloudWatch.describeAlarms(new DescribeAlarmsRequest(alarmNames: ['alarm1'])) >> new DescribeAlarmsResult(metricAlarms: [ + alarm + ]) + } + + void 'generates a valid Scaling Policy Name'() { + given: + ScalingPolicy policy = new ScalingPolicy( + policyARN: 'oldPolicyARN1', + autoScalingGroupName: 'asgard-v000', + policyName: 'policy1', + scalingAdjustment: 5, + adjustmentType: 'ChangeInCapacity', + cooldown: 100, + minAdjustmentStep: 2, + alarms: [new Alarm(alarmName: 'alarm1')] + ) + MetricAlarm alarm = new MetricAlarm( + alarmName: 'alarm1', + alarmDescription: 'alarm 1 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['oldPolicyARN1'], + insufficientDataActions: [], + metricName: 'Metric1.with-all_acceptable/special#chars:defined', + namespace: 'Namespace1.with-all_acceptable/special#chars:defined', + statistic: 'statistic1', + dimensions: [ + new Dimension(name: AsgReferenceCopier.DIMENSION_NAME_FOR_ASG, value: 'asgard-v000') + ], + period: 1, + unit: 'unit1', + evaluationPeriods: 2, + threshold: 4.2, + comparisonOperator: 'GreaterThanOrEqualToThreshold' + ) + + DefaultScalingPolicyCopier.PolicyNameGenerator generator = new DefaultScalingPolicyCopier.PolicyNameGenerator(idGenerator, amazonClientProvider) + + when: + String result = generator.generateScalingPolicyName(sourceCredentials, 'us-east-1', 'asgard-v010', 'asgard-v011', policy) + + then: + result.startsWith('asgard-v011-Namespace1.with-all_acceptable/special#chars-defined-Metric1.with-all_acceptable/special#chars-defined-GreaterThanOrEqualToThreshold-4.2-2-1-') + 1 * sourceCloudWatch.describeAlarms(new DescribeAlarmsRequest(alarmNames: ['alarm1'])) >> new DescribeAlarmsResult(metricAlarms: [ + alarm + ]) + } + + void 'falls back to asg name replacement when no alarms found'() { + given: + ScalingPolicy policy = new ScalingPolicy( + policyARN: 'oldPolicyARN1', + autoScalingGroupName: 'asgard-v000', + policyName: 'asgard-v010-blah-blah-blah', + scalingAdjustment: 5, + adjustmentType: 'ChangeInCapacity', + cooldown: 100, + minAdjustmentStep: 2, + alarms: [] + ) + DefaultScalingPolicyCopier.PolicyNameGenerator generator = new DefaultScalingPolicyCopier.PolicyNameGenerator(idGenerator, amazonClientProvider) + + when: + String result = generator.generateScalingPolicyName(sourceCredentials, 'us-east-1', 'asgard-v010', 'asgard-v011', policy) + + then: + result == 'asgard-v011-blah-blah-blah' + } + void 'should copy scaling policies and alarms'() { when: scalingPolicyCopier.copyScalingPolicies(Mock(Task), 'asgard-v000', 'asgard-v001', sourceCredentials, targetCredentials, 'us-east-1', 'us-west-1') @@ -131,7 +250,7 @@ class DefaultScalingPolicyCopierSpec extends Specification { ) 1 * targetAutoScaling.putScalingPolicy(new PutScalingPolicyRequest( autoScalingGroupName: 'asgard-v001', - policyName: 'asgard-v001-policy-1', + policyName: newPolicyName, scalingAdjustment: 5, adjustmentType: 'ChangeInCapacity', cooldown: 100, @@ -139,7 +258,7 @@ class DefaultScalingPolicyCopierSpec extends Specification { )) >> new PutScalingPolicyResult(policyARN: 'newPolicyARN1') 1 * targetAutoScaling.putScalingPolicy(new PutScalingPolicyRequest( autoScalingGroupName: 'asgard-v001', - policyName: 'asgard-v001-policy-2', + policyName: newPolicyName, scalingAdjustment: 10, adjustmentType: 'PercentChangeInCapacity', cooldown: 200, @@ -216,7 +335,7 @@ class DefaultScalingPolicyCopierSpec extends Specification { ), ]) 1 * targetCloudWatch.putMetricAlarm(new PutMetricAlarmRequest( - alarmName: 'asgard-v001-alarm-3', + alarmName: 'asgard-v001-alarm-1', alarmDescription: 'alarm 1 description', actionsEnabled: true, oKActions: [], @@ -235,7 +354,7 @@ class DefaultScalingPolicyCopierSpec extends Specification { comparisonOperator: 'GreaterThanOrEqualToThreshold' )) 1 * targetCloudWatch.putMetricAlarm(new PutMetricAlarmRequest( - alarmName: 'asgard-v001-alarm-4', + alarmName: 'asgard-v001-alarm-2', alarmDescription: 'alarm 2 description', actionsEnabled: true, oKActions: [], @@ -255,7 +374,7 @@ class DefaultScalingPolicyCopierSpec extends Specification { comparisonOperator: 'LessThanOrEqualToThreshold' )) 1 * targetCloudWatch.putMetricAlarm(new PutMetricAlarmRequest( - alarmName: 'asgard-v001-alarm-5', + alarmName: 'asgard-v001-alarm-3', alarmDescription: 'alarm 3 description', actionsEnabled: false, oKActions: [], diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProviderSpec.groovy index e71c5705ffe..2b4355ede93 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProviderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/LocalFileUserDataProviderSpec.groovy @@ -17,14 +17,19 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.userdata -import com.netflix.spinnaker.clouddriver.aws.deploy.LaunchConfigurationBuilder.LaunchConfigurationSettings +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.LaunchConfigurationBuilder.LaunchConfigurationSettings +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataInput +import com.netflix.spinnaker.clouddriver.core.services.Front50Service +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import org.springframework.http.HttpStatus +import retrofit.RetrofitError +import retrofit.client.Response import spock.lang.Specification class LocalFileUserDataProviderSpec extends Specification { static final String APP = 'app' static final String STACK = 'stack' - static final String DETAIL = 'detail' static final String COUNTRIES = 'countries' static final String DEV_PHASE = 'devPhase' static final String HARDWARE = 'hardware' @@ -35,26 +40,39 @@ class LocalFileUserDataProviderSpec extends Specification { static final String ACCOUNT = 'account' static final String ENVIRONMENT = 'environment' static final String ACCOUNT_TYPE = 'accountType' + static final String DETAIL = "detail-c0${COUNTRIES}-d0${DEV_PHASE}-h0${HARDWARE}-p0${PARTNERS}-r0${REVISION}-z0${ZONE}" - static final String ASG_NAME = "${APP}-${STACK}-${DETAIL}-c0${COUNTRIES}-d0${DEV_PHASE}-h0${HARDWARE}-p0${PARTNERS}-r0${REVISION}-z0${ZONE}" + static final String ASG_NAME = "${APP}-${STACK}-${DETAIL}" static final String LAUNCH_CONFIG_NAME = 'launchConfigName' - static final LaunchConfigurationSettings SETTINGS = new LaunchConfigurationSettings( - baseName: ASG_NAME, - region: REGION, - account: ACCOUNT, - environment: ENVIRONMENT, - accountType: ACCOUNT_TYPE) + static final LaunchConfigurationSettings SETTINGS = LaunchConfigurationSettings.builder() + .baseName(ASG_NAME) + .region(REGION) + .account(ACCOUNT) + .environment(ENVIRONMENT) + .accountType(ACCOUNT_TYPE) + .build() + + static final UserDataInput INPUT = UserDataInput + .builder() + .asgName(SETTINGS.baseName) + .launchSettingName(LAUNCH_CONFIG_NAME) + .environment(SETTINGS.environment) + .region(SETTINGS.region) + .account(SETTINGS.account) + .accountType(SETTINGS.accountType) + .build() void "replaces expected strings"() { given: LocalFileUserDataProvider localFileUserDataProvider = GroovySpy() localFileUserDataProvider.localFileUserDataProperties = new LocalFileUserDataProperties() + localFileUserDataProvider.defaultUserDataTokenizer = new DefaultUserDataTokenizer() localFileUserDataProvider.isLegacyUdf(_, _) >> legacyUdf localFileUserDataProvider.assembleUserData(legacyUdf, _, _, _) >> getRawUserData() when: - def userData = localFileUserDataProvider.getUserData(LAUNCH_CONFIG_NAME, SETTINGS, null) + def userData = localFileUserDataProvider.getUserData(INPUT) then: userData == getFormattedUserData(expectedEnvironment) @@ -65,6 +83,46 @@ class LocalFileUserDataProviderSpec extends Specification { false | ENVIRONMENT } + void "return defaultLegacyUdf if front50.getApplication throws SpinnakerHttpException with NOT_FOUND status"() { + given: + RetrofitError notFoundRetrofitError = RetrofitError.httpError("url", + new Response("url", HttpStatus.NOT_FOUND.value(), "Application Not Found", [], null), + null, null) + LocalFileUserDataProvider localFileUserDataProvider = new LocalFileUserDataProvider() + localFileUserDataProvider.localFileUserDataProperties = new LocalFileUserDataProperties() + localFileUserDataProvider.front50Service = Mock(Front50Service) + localFileUserDataProvider.front50Service.getApplication(_) >> {throw new SpinnakerHttpException(notFoundRetrofitError)} + + when: + def useLegacyUdf = localFileUserDataProvider.isLegacyUdf("test_account", "unknown_application") + + then: + useLegacyUdf == localFileUserDataProvider.localFileUserDataProperties.defaultLegacyUdf + } + + void "isLegacyUdf includes the exception from front50 when failing to read the legacyUdf preference"() { + given: + // anything other than a 404/not found works here. On 404, isLegacyUdf falls back to a default. + RetrofitError arbitraryRetrofitError = RetrofitError.httpError("url", + new Response("url", HttpStatus.INTERNAL_SERVER_ERROR.value(), "some error", [], null), + null, null) + SpinnakerHttpException spinnakerHttpException = new SpinnakerHttpException(arbitraryRetrofitError) + // To speed up the test by avoiding a bunch of retries, set retryable to + // false. + spinnakerHttpException.setRetryable(false) + LocalFileUserDataProvider localFileUserDataProvider = new LocalFileUserDataProvider() + localFileUserDataProvider.localFileUserDataProperties = new LocalFileUserDataProperties() + localFileUserDataProvider.front50Service = Mock(Front50Service) + localFileUserDataProvider.front50Service.getApplication(_) >> {throw spinnakerHttpException} + + when: + localFileUserDataProvider.isLegacyUdf("test_account", "unknown_application") + + then: + IllegalStateException ex = thrown() + ex.cause == spinnakerHttpException + } + static String getRawUserData() { return [ "export ACCOUNT=%%account%%", @@ -106,5 +164,4 @@ class LocalFileUserDataProviderSpec extends Specification { "export LAUNCH_CONFIG=${LAUNCH_CONFIG_NAME}", ].join('\n') } - } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProviderAggregatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProviderAggregatorSpec.groovy new file mode 100644 index 00000000000..dbc4738ee3c --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/UserDataProviderAggregatorSpec.groovy @@ -0,0 +1,117 @@ +package com.netflix.spinnaker.clouddriver.aws.deploy.userdata + +import com.amazonaws.services.ec2.model.UserData +import com.netflix.frigga.Names +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataInput +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataOverride +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataProvider +import com.netflix.spinnaker.clouddriver.aws.userdata.UserDataTokenizer +import org.apache.commons.io.IOUtils +import spock.lang.Specification +import spock.lang.Unroll + +class UserDataProviderAggregatorSpec extends Specification { + + UserDataProviderAggregator userDataProviderAggregator = new UserDataProviderAggregator([new UserDataProviderA(), new UserDataProviderB()], [new DefaultUserDataTokenizer(), new CustomTokenizer()]) + + static final String APP = 'app' + static final String STACK = 'stack' + static final String COUNTRIES = 'countries' + static final String DEV_PHASE = 'devPhase' + static final String HARDWARE = 'hardware' + static final String PARTNERS = 'partners' + static final String REVISION = 99 + static final String ZONE = 'zone' + static final String REGION = 'region' + static final String ACCOUNT = 'account' + static final String ENVIRONMENT = 'environment' + static final String ACCOUNT_TYPE = 'accountType' + static final String DETAIL = "detail-c0${COUNTRIES}-d0${DEV_PHASE}-h0${HARDWARE}-p0${PARTNERS}-r0${REVISION}-z0${ZONE}" + static final String ASG_NAME = "${APP}-${STACK}-${DETAIL}" + static final String LAUNCH_CONFIG_NAME = 'launchConfigName' + + void "User data is aggregated correctly; a -> b -> user supplied user data"() { + given: + UserDataInput request = UserDataInput + .builder() + .asgName(ASG_NAME) + .launchSettingName(LAUNCH_CONFIG_NAME) + .environment(ENVIRONMENT) + .region(REGION) + .account(ACCOUNT) + .accountType(ACCOUNT_TYPE) + .userDataOverride(new UserDataOverride()) + .base64UserData("ZXhwb3J0IFVTRVJEQVRBPTEK") + .build() + + when: + //export USERDATA=1 + String result = userDataProviderAggregator.aggregate(request) + + then: + //a + //b + //export USERDATA=1 + result == "YQpiCmV4cG9ydCBVU0VSREFUQT0xCg==" + } + + @Unroll + void "User data is overrode with the user supplied base64 encoded user data and tokens are replaced correctly - #userDataFileName"() { + given: + String tokenizedUserdata = IOUtils.toString(getClass().getResourceAsStream("${userDataFileName}-tokenized.txt")) + String expectedResult = Base64.getEncoder().encodeToString(tokenizedUserdata.getBytes("utf-8")) + + String userdata = IOUtils.toString(getClass().getResourceAsStream("${userDataFileName}.txt")) + String base64String = Base64.getEncoder().encodeToString(userdata.getBytes("utf-8")) + + UserDataInput request = UserDataInput + .builder() + .asgName(ASG_NAME) + .launchSettingName(LAUNCH_CONFIG_NAME) + .environment(ENVIRONMENT) + .region(REGION) + .account(ACCOUNT) + .accountType(ACCOUNT_TYPE) + .userDataOverride(userDataOverride) + .base64UserData(base64String) + .build() + + when: + String result = userDataProviderAggregator.aggregate(request) + + then: + result == expectedResult + + where: + userDataOverride | userDataFileName + new UserDataOverride(enabled: true) | "default-token-userdata" + new UserDataOverride(enabled: true, tokenizerName: "custom") | "custom-token-userdata" + } +} + +class UserDataProviderA implements UserDataProvider { + String getUserData(UserDataInput userDataRequest) { + return "a" + } +} + +class UserDataProviderB implements UserDataProvider { + String getUserData(UserDataInput userDataRequest) { + return "b" + } +} + +class CustomTokenizer implements UserDataTokenizer { + + @Override + boolean supports(String tokenizerName) { + return tokenizerName == "custom" + } + + @Override + String replaceTokens(Names names, UserDataInput userDataInput, String rawUserData, Boolean legacyUdf) { + return rawUserData + .replace("%%custom_token_a%%", "custom-a") + .replace("%%custom_token_b%%", "custom-b") + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractConfiguredRegionAndInstanceIdsValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractConfiguredRegionAndInstanceIdsValidatorSpec.groovy index 3f68f75cb39..a11c9673832 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractConfiguredRegionAndInstanceIdsValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractConfiguredRegionAndInstanceIdsValidatorSpec.groovy @@ -19,7 +19,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification @@ -36,7 +36,7 @@ abstract class AbstractConfiguredRegionAndInstanceIdsValidatorSpec extends Speci setup: def description = getDescription() description.credentials = description.credentials ?: TestCredential.named('test') - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -52,7 +52,7 @@ abstract class AbstractConfiguredRegionAndInstanceIdsValidatorSpec extends Speci def description = getDescription() description.instanceIds = [""] description.region = "us-west-1" - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -68,7 +68,7 @@ abstract class AbstractConfiguredRegionAndInstanceIdsValidatorSpec extends Speci description.credentials = description.credentials ?: TestCredential.named('test') description.region = "us-west-5" description.instanceIds = ["i-123456"] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractConfiguredRegionsValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractConfiguredRegionsValidatorSpec.groovy index 80560b6634d..83a85c73be3 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractConfiguredRegionsValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AbstractConfiguredRegionsValidatorSpec.groovy @@ -20,7 +20,7 @@ import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription import com.netflix.spinnaker.clouddriver.aws.deploy.description.AsgDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification @@ -37,7 +37,7 @@ abstract class AbstractConfiguredRegionsValidatorSpec extends Specification { setup: def description = getDescription() description.credentials = TestCredential.named('test') - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -53,7 +53,7 @@ abstract class AbstractConfiguredRegionsValidatorSpec extends Specification { description.asgs = [new AsgDescription( region: "us-east-5" )] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AllowLaunchDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AllowLaunchDescriptionValidatorSpec.groovy index 27ba1e5c925..83b917b5a11 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AllowLaunchDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AllowLaunchDescriptionValidatorSpec.groovy @@ -16,10 +16,11 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.AllowLaunchDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification class AllowLaunchDescriptionValidatorSpec extends Specification { @@ -28,7 +29,7 @@ class AllowLaunchDescriptionValidatorSpec extends Specification { setup: AllowLaunchDescriptionValidator validator = new AllowLaunchDescriptionValidator() def description = new AllowLaunchDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -36,22 +37,22 @@ class AllowLaunchDescriptionValidatorSpec extends Specification { then: 1 * errors.rejectValue("amiName", _) 1 * errors.rejectValue("region", _) - 1 * errors.rejectValue("account", _) + 1 * errors.rejectValue("targetAccount", _) } void "unconfigured account is rejected"() { setup: AllowLaunchDescriptionValidator validator = new AllowLaunchDescriptionValidator() - def credentialsHolder = Mock(AccountCredentialsProvider) - validator.accountCredentialsProvider = credentialsHolder - def description = new AllowLaunchDescription(account: "foo") - def errors = Mock(Errors) + def credentialsHolder = Mock(CredentialsRepository) + validator.credentialsRepository = credentialsHolder + def description = new AllowLaunchDescription(targetAccount: "foo") + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) then: - 1 * credentialsHolder.getAll() >> { [TestCredential.named('prod')] } - 1 * errors.rejectValue("account", _) + 1 * credentialsHolder.getOne("foo") >> { null } + 1 * errors.rejectValue("targetAccount", _) } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AttachClassicLinkVpcDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AttachClassicLinkVpcDescriptionValidatorSpec.groovy index 2394fc38224..b577e7bf532 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AttachClassicLinkVpcDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/AttachClassicLinkVpcDescriptionValidatorSpec.groovy @@ -18,7 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.AttachClassicLinkVpcDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification @@ -34,7 +34,7 @@ class AttachClassicLinkVpcDescriptionValidatorSpec extends Specification { void "invalid instanceId fails validation"() { setup: def description = new AttachClassicLinkVpcDescription(vpcId: "vpc-123") - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -46,7 +46,7 @@ class AttachClassicLinkVpcDescriptionValidatorSpec extends Specification { void "invalid vpcId fails validation"() { setup: def description = new AttachClassicLinkVpcDescription(instanceId: "i-123") - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -59,7 +59,7 @@ class AttachClassicLinkVpcDescriptionValidatorSpec extends Specification { setup: def description = new AttachClassicLinkVpcDescription(credentials: TestCredential.named('test')) description.region = "us-west-5" - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/BasicAmazonDeployDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/BasicAmazonDeployDescriptionValidatorSpec.groovy index 0b1cd1c52a2..5ce255efd2c 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/BasicAmazonDeployDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/BasicAmazonDeployDescriptionValidatorSpec.groovy @@ -17,12 +17,13 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -36,19 +37,20 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { @Shared NetflixAmazonCredentials amazonCredentials = TestCredential.named(ACCOUNT_NAME) + @Shared + def credentialsRepository = Stub(CredentialsRepository) { + getOne("auto") >> {amazonCredentials} + } void setupSpec() { validator = new BasicAmazonDeployDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) - credentialsRepo.save(ACCOUNT_NAME, amazonCredentials) - validator.accountCredentialsProvider = credentialsProvider + validator.credentialsRepository = credentialsRepository } void "pass validation with proper description inputs"() { setup: def description = new BasicAmazonDeployDescription(application: "foo", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, availabilityZones: ["us-east-1": []], capacity: [min: 1, max: 1, desired: 1], subnetType: "internal") - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -61,7 +63,7 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { setup: def description = new BasicAmazonDeployDescription(application: "foo", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, availabilityZones: ["us-east-1": []], capacity: [min: 1, max: 1, desired: 1], associatePublicIpAddress: true) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -80,7 +82,7 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new BasicAmazonDeployDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -98,7 +100,7 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { setup: def description = new BasicAmazonDeployDescription(application: "foo", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, availabilityZones: ["us-east-1": []]) description.capacity = [ min, max, desired ] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -127,7 +129,7 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { void "unconfigured region fails validation"() { setup: def description = new BasicAmazonDeployDescription(application: "foo", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, availabilityZones: ["eu-west-5": []]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -139,7 +141,7 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { void "unconfigured account region fails validation"() { setup: def description = new BasicAmazonDeployDescription(application: "foo", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, availabilityZones: ["us-west-2": []]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -153,7 +155,7 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { setup: def description = new BasicAmazonDeployDescription(application: "foo", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, availabilityZones: ["us-east-1": []], capacity: [min: 1, max: 1, desired: 1], subnetType: "internal", blockDevices: [blockDevice]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -166,7 +168,6 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { new AmazonBlockDevice() | 'basicAmazonDeployDescription.block.device.not.named' new AmazonBlockDevice(deviceName: '/dev/sdb', virtualName: 'ephemeral0', size: 69) | 'basicAmazonDeployDescription.block.device.ephemeral.config' new AmazonBlockDevice(deviceName: '/dev/sdb', iops: 1) | 'basicAmazonDeployDescription.block.device.ebs.config' - } void "valid block devices validate"() { @@ -177,12 +178,275 @@ class BasicAmazonDeployDescriptionValidatorSpec extends Specification { ] def description = new BasicAmazonDeployDescription(application: "foo", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, availabilityZones: ["us-east-1": []], capacity: [min: 1, max: 1, desired: 1], subnetType: "internal", blockDevices: blockDevices) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors._ + } + + @Unroll + void "valid request with launch template only feature #ltOnlyProperty and launch template enabled succeeds validation"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: true, instanceType: instanceType, + application: "foo", amiName: "foo", credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + description."${ltOnlyProperty}" = propertyEnabled + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors._ + + where: + ltOnlyProperty | propertyEnabled | instanceType + 'unlimitedCpuCredits' | true | 't3.large' + 'unlimitedCpuCredits' | false | 't3.large' + 'requireIMDSv2' | true | 'c3.small' + 'associateIPv6Address' | true | 'm5.large' + 'enableEnclave' | true | 'm5.large' + } + + void "valid request with launch template disabled and all launch template only features omitted succeeds validation"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: false, instanceType: "c3.large", + application: "foo", amiName: "foo", credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors._ + } + + void "valid request with launch template enabled and all launch template only features omitted succeeds validation"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: true, instanceType: instanceType, + application: "foo", amiName: "foo",credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors._ + + where: + instanceType << ['t2.large', 'c3.small'] + } + + void "valid request with unlimited cpu credits enabled succeeds validation for supported instance types"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: true, unlimitedCpuCredits: true, instanceType: instanceType, + application: "foo", amiName: "foo", credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors._ + + where: + instanceType << ['t2.large', 't3.small', 't3a.micro'] + } + + void "valid request with unlimited cpu credits disabled succeeds validation for supported instance types"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: true, unlimitedCpuCredits: false, instanceType: instanceType, + application: "foo", amiName: "foo", credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors._ + + where: + instanceType << ['t2.large', 't3.small', 't3a.micro'] + } + + @Unroll + void "request with launch template disabled but launch template only features enabled, ignores related values and succeeds validation"() { + setup: + def description = new BasicAmazonDeployDescription( + instanceType: "t3.large", application: "foo", amiName: "foo", credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + + and: + description.setLaunchTemplate = false + ltOnlyPropertyAndValue.each { entry -> + description."$entry.key" = entry.value + } + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) then: 0 * errors._ + + where: + ltOnlyPropertyAndValue << [[associateIPv6Address: true, requireIMDSv2: false], + [requireIMDSv2: true, unlimitedCpuCredits: true], + [associateIPv6Address: false, unlimitedCpuCredits: false], + [requireIMDSv2: true, associateIPv6Address: true, unlimitedCpuCredits: true], + [onDemandBaseCapacity: 2, spotAllocationStrategy: "capacity-optimized"], + [onDemandBaseCapacity: 2, onDemandPercentageAboveBaseCapacity: 50, spotAllocationStrategy: "lowest-price"], + [spotAllocationStrategy: "capacity-optimized"], + [spotInstancePools: 3, spotAllocationStrategy: "lowest-price"], + [spotAllocationStrategy: "lowest-price", launchTemplateOverridesForInstanceType:[new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType(instanceType: "m5.xlarge", weightedCapacity: 2)]]] + } + + @Unroll + void "request with launch template disabled but launch template only features enabled generates warnings correctly"() { + setup: + def description = new BasicAmazonDeployDescription( + instanceType: "t3.large", application: "foo", amiName: "foo", credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + + and: + description.setLaunchTemplate = false + ltOnlyPropertyAndValue.each { entry -> + description."$entry.key" = entry.value + } + + when: + def actualWarning = validator.getWarnings(description) + + then: + final String expectedWarning = "WARNING: The following fields ${expectedFieldsInWarning.sort()} work as expected only with AWS EC2 Launch Template, " + + "but 'setLaunchTemplate' is set to false in request with account: ${description.account}, " + + "application: ${description.application}, stack: ${description.stack})" + expectedWarning == actualWarning + + where: + ltOnlyPropertyAndValue || expectedFieldsInWarning + [requireIMDSv2: true, associateIPv6Address: null, unlimitedCpuCredits: false] || ["requireIMDSv2", "unlimitedCpuCredits"] + [requireIMDSv2: false, associateIPv6Address: true, unlimitedCpuCredits: true] || ["associateIPv6Address", "unlimitedCpuCredits"] + [requireIMDSv2: true, associateIPv6Address: true, unlimitedCpuCredits: null] || ["associateIPv6Address", "requireIMDSv2"] + [requireIMDSv2: true, associateIPv6Address: false, unlimitedCpuCredits: false] || ["associateIPv6Address", "requireIMDSv2", "unlimitedCpuCredits"] + [requireIMDSv2: true, associateIPv6Address: true, unlimitedCpuCredits: true] || ["associateIPv6Address", "requireIMDSv2", "unlimitedCpuCredits"] + [onDemandBaseCapacity: 2, spotAllocationStrategy: "capacity-optimized"] || ["onDemandBaseCapacity", "spotAllocationStrategy"] + [onDemandBaseCapacity: 2, onDemandPercentageAboveBaseCapacity: 50, + spotAllocationStrategy: "lowest-price"] || ["onDemandBaseCapacity", "onDemandPercentageAboveBaseCapacity", "spotAllocationStrategy"] + [spotAllocationStrategy: "capacity-optimized"] || ["spotAllocationStrategy"] + [spotInstancePools: 3, spotAllocationStrategy: "lowest-price"] || ["spotAllocationStrategy", "spotInstancePools"] + [spotAllocationStrategy: "lowest-price", + launchTemplateOverridesForInstanceType:[new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "m5.xlarge", weightedCapacity: 2)]] || ["launchTemplateOverridesForInstanceType", "spotAllocationStrategy"] + } + + void "invalid request with unlimited cpu credits and unsupported instance type fails validation"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: true, unlimitedCpuCredits: true, instanceType: instanceType, + application: "foo", amiName: "foo", credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue("unlimitedCpuCredits", rejection) + + where: + instanceType | rejection + 'c3.large' | 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + 'm5.xlarge' | 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + 'r5.small' | 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + } + + void "invalid request with standard (non-unlimited) cpu credits and unsupported / invalid instance type fails validation"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: true, unlimitedCpuCredits: false, instanceType: instanceType, + application: "foo", amiName: "foo", credentials: amazonCredentials, subnetType: "private-subnet", + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], capacity: [min: 1, max: 1, desired: 1]) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue("unlimitedCpuCredits", rejection) + + where: + instanceType | rejection + 'c3.large' | 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + 'm5.xlarge' | 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + 'r5.small' | 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + } + + void "invalid request with spotInstancePools and unsupported spotAllocationStrategy fails validation"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: false, spotInstancePools: 3, spotAllocationStrategy: "capacity-optimized" + ) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue("spotInstancePools", "basicAmazonDeployDescription.spotInstancePools.not.supported.for.spotAllocationStrategy") } + + void "validate all instance types in a request with multiple instance types"() { + setup: + def description = new BasicAmazonDeployDescription( + setLaunchTemplate: true, + application: "foo", amiName: "foo", credentials: amazonCredentials, + availabilityZones: ["us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"]], + capacity: [min: 1, max: 1, desired: 1]) + def errors = Mock(ValidationErrors) + + and: + ltOnlyPropertyAndValue.each { entry -> + description."$entry.key" = entry.value + } + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue("unlimitedCpuCredits", rejection) + + where: + ltOnlyPropertyAndValue || rejection + [instanceType: "t2.large", unlimitedCpuCredits: true, + launchTemplateOverridesForInstanceType:[ + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "m5.xlarge", weightedCapacity: 4)]] || 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + + [instanceType: "c3.large", unlimitedCpuCredits: true, + launchTemplateOverridesForInstanceType:[ + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "t2.xlarge", weightedCapacity: 2)]] || 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + + [instanceType: "t3.small", unlimitedCpuCredits: true, + spotAllocationStrategy: "lowest-price", + launchTemplateOverridesForInstanceType:[ + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "c3.large", weightedCapacity: 4), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "t3.large", weightedCapacity: 2)]] || 'basicAmazonDeployDescription.bursting.not.supported.by.instanceType' + } + } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateNetworkInterfaceDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateNetworkInterfaceDescriptionValidatorSpec.groovy index 005cbe84fd6..db5cc10d6a2 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateNetworkInterfaceDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/CreateNetworkInterfaceDescriptionValidatorSpec.groovy @@ -19,7 +19,7 @@ import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.CreateNetworkInterfaceDescription import com.netflix.spinnaker.clouddriver.aws.model.AwsNetworkInterface -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification @@ -47,7 +47,7 @@ class CreateNetworkInterfaceDescriptionValidatorSpec extends Specification { secondaryPrivateIpAddresses: ["127.0.0.2", "127.0.0.3"] ) ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -59,7 +59,7 @@ class CreateNetworkInterfaceDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new CreateNetworkInterfaceDescription(credentials: credentials) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -74,7 +74,7 @@ class CreateNetworkInterfaceDescriptionValidatorSpec extends Specification { void "unconfigured region fails validation"() { setup: def description = new CreateNetworkInterfaceDescription(credentials: credentials, availabilityZonesGroupedByRegion: ["eu-west-5": []]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonLoadBalancerDescriptionValidatorSpec.groovy index ed1b226222a..78e76a9fc5e 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonLoadBalancerDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAmazonLoadBalancerDescriptionValidatorSpec.groovy @@ -19,7 +19,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAmazonLoadBalancerDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Specification import spock.lang.Subject @@ -29,7 +29,7 @@ class DeleteAmazonLoadBalancerDescriptionValidatorSpec extends Specification { void "should fail validation with invalid load balancer name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def description = new DeleteAmazonLoadBalancerDescription(regions: ["us-east-1"], credentials: Stub(NetflixAmazonCredentials)) when: @@ -44,7 +44,7 @@ class DeleteAmazonLoadBalancerDescriptionValidatorSpec extends Specification { def creds = TestCredential.named('test') def description = new DeleteAmazonLoadBalancerDescription(loadBalancerName: "foo--frontend", credentials: creds) description.regions = ["us-east-5"] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAsgTagsDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAsgTagsDescriptionValidatorSpec.groovy index 50470740566..4c6d8ced92a 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAsgTagsDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteAsgTagsDescriptionValidatorSpec.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteAsgTagsDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors class DeleteAsgTagsDescriptionValidatorSpec extends AbstractConfiguredRegionsValidatorSpec { @@ -35,7 +35,7 @@ class DeleteAsgTagsDescriptionValidatorSpec extends AbstractConfiguredRegionsVal setup: def description = new DeleteAsgTagsDescription() description.tagKeys = [null] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteSecurityGroupDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteSecurityGroupDescriptionValidatorSpec.groovy index 7b69b406fc5..c29f906a225 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteSecurityGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DeleteSecurityGroupDescriptionValidatorSpec.groovy @@ -20,7 +20,7 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.DeleteSecurityGroupDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -38,7 +38,7 @@ class DeleteSecurityGroupDescriptionValidatorSpec extends Specification { void "should fail validation with invalid security group name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def description = new DeleteSecurityGroupDescription(regions: ["us-east-1"], credentials: Stub(NetflixAmazonCredentials)) validator.amazonClientProvider = amazonClientProvider @@ -54,7 +54,7 @@ class DeleteSecurityGroupDescriptionValidatorSpec extends Specification { def creds = TestCredential.named('test') def description = new DeleteSecurityGroupDescription(securityGroupName: "foo", credentials: creds) description.regions = ["us-east-5"] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DisableInstancesInDiscoveryValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DisableInstancesInDiscoveryValidatorSpec.groovy index 82e74369820..056e3c1b226 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DisableInstancesInDiscoveryValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/DisableInstancesInDiscoveryValidatorSpec.groovy @@ -19,7 +19,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.EnableDisableInstanceDiscoveryDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors class DisableInstancesInDiscoveryValidatorSpec extends AbstractConfiguredRegionAndInstanceIdsValidatorSpec { @Override @@ -42,7 +42,7 @@ class DisableInstancesInDiscoveryValidatorSpec extends AbstractConfiguredRegionA description.instanceIds = ["i-123456"] description.credentials = TestCredential.named('test') - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: getDescriptionValidator().validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/EnableInstancesInDiscoveryValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/EnableInstancesInDiscoveryValidatorSpec.groovy index 95556c01e6d..256535c83b1 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/EnableInstancesInDiscoveryValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/EnableInstancesInDiscoveryValidatorSpec.groovy @@ -19,7 +19,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.EnableDisableInstanceDiscoveryDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors class EnableInstancesInDiscoveryValidatorSpec extends AbstractConfiguredRegionAndInstanceIdsValidatorSpec { @Override @@ -42,7 +42,7 @@ class EnableInstancesInDiscoveryValidatorSpec extends AbstractConfiguredRegionAn description.instanceIds = ["i-123456"] description.credentials = TestCredential.named('test') - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: getDescriptionValidator().validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyServerGroupLaunchTemplateValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyServerGroupLaunchTemplateValidatorSpec.groovy new file mode 100644 index 00000000000..a6c8294260e --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ModifyServerGroupLaunchTemplateValidatorSpec.groovy @@ -0,0 +1,169 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.deploy.validators + +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.credentials.CredentialsRepository +import spock.lang.Shared +import spock.lang.Specification + +class ModifyServerGroupLaunchTemplateValidatorSpec extends Specification { + + private static final ACCOUNT_NAME = "auto" + + @Shared + ModifyServerGroupLaunchTemplateValidator validator + + @Shared + NetflixAmazonCredentials amazonCredentials = TestCredential.named(ACCOUNT_NAME) + + @Shared + def credentialsRepository = Stub(CredentialsRepository) { + getOne("auto") >> {amazonCredentials} + } + + void setupSpec() { + validator = new ModifyServerGroupLaunchTemplateValidator() + validator.credentialsRepository = credentialsRepository + } + + void "pass validation with proper description inputs"() { + setup: + def description = new ModifyServerGroupLaunchTemplateDescription( + asgName: "my-asg-v000", region: "us-east-1", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, subnetType: "internal") + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors._ + } + + void "should fail validation if asgName is unset"() { + setup: + def description = new ModifyServerGroupLaunchTemplateDescription( + asgName: asgName, region: "us-east-1", amiName: "foo", instanceType: "foo", credentials: amazonCredentials, subnetType: "internal") + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue("asgName", "modifyservergrouplaunchtemplatedescription.asgName.empty") + + where: + asgName << [null, "", " "] + } + + void "should fail validation if only metadata fields are set"() { + setup: + def description = new ModifyServerGroupLaunchTemplateDescription( + asgName: "my-asg-v000", region: "us-east-1", credentials: amazonCredentials) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue("multiple fields", + "modifyservergrouplaunchtemplatedescription.launchTemplateAndServerGroupFields.empty", + "No changes requested to launch template or related server group fields for modifyServerGroupLaunchTemplate operation.") + } + + void "valid request with unlimited cpu credits succeeds validation for supported instance types"() { + setup: + def description = new ModifyServerGroupLaunchTemplateDescription( + asgName: "my-asg-v000", region: "us-east-1", amiName: "foo", instanceType: instanceType, + credentials: amazonCredentials, subnetType: "internal", unlimitedCpuCredits: unlimitedCpuCredits) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + if(expectedError == null) { + 0 * errors._ + } else { + 1 * errors.rejectValue("unlimitedCpuCredits", expectedError) + } + + where: + instanceType | unlimitedCpuCredits || expectedError + 't2.large' | null || null + 't3.small' | true || null + 't3a.micro' | false || null + 'c3.large' | false || 'modifyservergrouplaunchtemplatedescription.bursting.not.supported.by.instanceType' + 'c3.large' | true || 'modifyservergrouplaunchtemplatedescription.bursting.not.supported.by.instanceType' + } + + void "validate all instance types in a request with multiple instance types"() { + setup: + def description = new ModifyServerGroupLaunchTemplateDescription( + asgName: "my-asg-v000", amiName: "foo", region: "us-east-1", credentials: amazonCredentials, subnetType: "internal") + def errors = Mock(ValidationErrors) + + and: + ltOnlyPropertyAndValue.each { entry -> + description."$entry.key" = entry.value + } + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue("unlimitedCpuCredits", rejection) + + where: + ltOnlyPropertyAndValue || rejection + [instanceType: "t2.large", unlimitedCpuCredits: true, + launchTemplateOverridesForInstanceType:[ + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "m5.xlarge", weightedCapacity: 4)]] || 'modifyservergrouplaunchtemplatedescription.bursting.not.supported.by.instanceType' + + [instanceType: "c3.large", unlimitedCpuCredits: true, + launchTemplateOverridesForInstanceType:[ + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "t2.xlarge", weightedCapacity: 2)]] || 'modifyservergrouplaunchtemplatedescription.bursting.not.supported.by.instanceType' + + [instanceType: "t3.small", unlimitedCpuCredits: true, + spotAllocationStrategy: "lowest-price", + launchTemplateOverridesForInstanceType:[ + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "c3.large", weightedCapacity: 4), + new BasicAmazonDeployDescription.LaunchTemplateOverridesForInstanceType( + instanceType: "t3.large", weightedCapacity: 2)]] || 'modifyservergrouplaunchtemplatedescription.bursting.not.supported.by.instanceType' + } + + void "invalid request with spotInstancePools and unsupported spotAllocationStrategy fails validation"() { + setup: + def description = new ModifyServerGroupLaunchTemplateDescription( + asgName: "my-asg-v000", region: "us-east-1", amiName: "foo", credentials: amazonCredentials, + subnetType: "internal", spotInstancePools: 3, spotAllocationStrategy: "capacity-optimized") + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue("spotInstancePools", "modifyservergrouplaunchtemplatedescription.spotInstancePools.not.supported.for.spotAllocationStrategy") + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResizeAsgDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResizeAsgDescriptionValidatorSpec.groovy index 2667da5a9ec..da64c2f9472 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResizeAsgDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResizeAsgDescriptionValidatorSpec.groovy @@ -20,8 +20,8 @@ import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator import com.netflix.spinnaker.clouddriver.aws.deploy.description.ResizeAsgDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.model.ServerGroup -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Specification @@ -40,7 +40,7 @@ class ResizeAsgDescriptionValidatorSpec extends Specification { )], credentials: Stub(NetflixAmazonCredentials) ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -60,7 +60,7 @@ class ResizeAsgDescriptionValidatorSpec extends Specification { setup: def description = new ResizeAsgDescription() description.credentials = TestCredential.named('test') - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -76,7 +76,7 @@ class ResizeAsgDescriptionValidatorSpec extends Specification { description.asgs = [new ResizeAsgDescription.AsgTargetDescription( region: "us-east-5" )] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResumeAsgProcessesDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResumeAsgProcessesDescriptionValidatorSpec.groovy index fd8f92bae1b..5d7979f8649 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResumeAsgProcessesDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/ResumeAsgProcessesDescriptionValidatorSpec.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.AsgDescription import com.netflix.spinnaker.clouddriver.aws.deploy.description.ResumeAsgProcessesDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Specification class ResumeAsgProcessesDescriptionValidatorSpec extends Specification { @@ -38,7 +38,7 @@ class ResumeAsgProcessesDescriptionValidatorSpec extends Specification { ], processes: ["Launch", "Terminate"] ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -61,7 +61,7 @@ class ResumeAsgProcessesDescriptionValidatorSpec extends Specification { ], processes: ["Laugh", "Terminate"] ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/SuspendAsgProcessesDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/SuspendAsgProcessesDescriptionValidatorSpec.groovy index fdd662fda3d..1f95168c591 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/SuspendAsgProcessesDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/SuspendAsgProcessesDescriptionValidatorSpec.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.deploy.description.AsgDescription import com.netflix.spinnaker.clouddriver.aws.deploy.description.SuspendAsgProcessesDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Specification class SuspendAsgProcessesDescriptionValidatorSpec extends Specification { @@ -38,7 +38,7 @@ class SuspendAsgProcessesDescriptionValidatorSpec extends Specification { ], processes: ["Launch", "Terminate"] ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -61,7 +61,7 @@ class SuspendAsgProcessesDescriptionValidatorSpec extends Specification { ], processes: ["Laugh", "Terminate"] ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstanceAndDecrementAsgDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstanceAndDecrementAsgDescriptionValidatorSpec.groovy index 632168b0989..85546698f08 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstanceAndDecrementAsgDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstanceAndDecrementAsgDescriptionValidatorSpec.groovy @@ -18,7 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.TerminateInstanceAndDecrementAsgDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification @@ -34,7 +34,7 @@ class TerminateInstanceAndDecrementAsgDescriptionValidatorSpec extends Specifica void "empty description fails validation"() { setup: def description = new TerminateInstanceAndDecrementAsgDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -49,7 +49,7 @@ class TerminateInstanceAndDecrementAsgDescriptionValidatorSpec extends Specifica setup: def description = new TerminateInstanceAndDecrementAsgDescription(credentials: TestCredential.named('test')) description.region = "us-west-5" - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstancesDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstancesDescriptionValidatorSpec.groovy index 012db1bd9ba..c6a94125456 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstancesDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/TerminateInstancesDescriptionValidatorSpec.groovy @@ -20,7 +20,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.TerminateInstancesDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification @@ -36,7 +36,7 @@ class TerminateInstancesDescriptionValidatorSpec extends Specification { void "invalid instanceIds fail validation"() { setup: def description = new TerminateInstancesDescription(instanceIds: [""]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -49,7 +49,7 @@ class TerminateInstancesDescriptionValidatorSpec extends Specification { setup: def description = new TerminateInstancesDescription(credentials: TestCredential.named('test')) description.region = "us-west-5" - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonDNSDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonDNSDescriptionValidatorSpec.groovy index 0bff49ed95e..35f34edc9d6 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonDNSDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonDNSDescriptionValidatorSpec.groovy @@ -23,7 +23,7 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.validators.UpsertAmazonDNSDe import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerDescription import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonDNSDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification @@ -35,7 +35,7 @@ class UpsertAmazonDNSDescriptionValidatorSpec extends Specification { void "empty description fails validation"() { setup: def description = new UpsertAmazonDNSDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -51,7 +51,7 @@ class UpsertAmazonDNSDescriptionValidatorSpec extends Specification { setup: def description = new UpsertAmazonDNSDescription() description.target = "foo" - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -70,7 +70,7 @@ class UpsertAmazonDNSDescriptionValidatorSpec extends Specification { setup: def elbDescription = new UpsertAmazonLoadBalancerDescription() def description = new UpsertAmazonDNSDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([elbDescription], description, errors) @@ -85,7 +85,7 @@ class UpsertAmazonDNSDescriptionValidatorSpec extends Specification { description.type = "CNAME" description.target = "foo.netflix.net." description.hostedZoneName = "netflix.net." - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def route53 = Mock(AmazonRoute53) validator.amazonClientProvider = Mock(AmazonClientProvider) validator.amazonClientProvider.getAmazonRoute53(_, _, true) >> route53 diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec.groovy index 873423c2920..f3183af58e6 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec.groovy @@ -18,7 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAmazonLoadBalancerClassicDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification @@ -39,7 +39,7 @@ class UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec extends Specificat void "empty parameters fails validation"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -53,7 +53,7 @@ class UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec extends Specificat void "unconfigured region is rejected"() { setup: description.availabilityZones = ["us-west-5": ["us-west-5a"]] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -65,7 +65,7 @@ class UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec extends Specificat void "availability zone not configured for account is rejected"() { setup: description.availabilityZones = ["us-west-1": ["us-west-1c"]] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -78,7 +78,7 @@ class UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec extends Specificat void "subnetType supercedes availabilityZones"() { setup: description.subnetType = "internal" - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -90,7 +90,7 @@ class UpsertAmazonLoadBalancerClassicDescriptionValidatorSpec extends Specificat void "availabilityZones if not subnetType"() { setup: description.availabilityZones = ["us-west-1": ["us-west-1a"]] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgTagsDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgTagsDescriptionValidatorSpec.groovy index 801ad9923b4..92965f68bf4 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgTagsDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertAsgTagsDescriptionValidatorSpec.groovy @@ -18,7 +18,7 @@ package com.netflix.spinnaker.clouddriver.aws.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAsgTagsDescription -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors class UpsertAsgTagsDescriptionValidatorSpec extends AbstractConfiguredRegionsValidatorSpec { @@ -35,7 +35,7 @@ class UpsertAsgTagsDescriptionValidatorSpec extends AbstractConfiguredRegionsVal void "empty tags fails validation"() { setup: def description = new UpsertAsgTagsDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -48,7 +48,7 @@ class UpsertAsgTagsDescriptionValidatorSpec extends AbstractConfiguredRegionsVal setup: def description = new UpsertAsgTagsDescription() description.tags = ["tag": null] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertSecurityGroupDescriptionValidatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertSecurityGroupDescriptionValidatorSpec.groovy index 43166bb0f35..706da4a708c 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertSecurityGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/deploy/validators/UpsertSecurityGroupDescriptionValidatorSpec.groovy @@ -22,7 +22,7 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertSecurityGr import com.netflix.spinnaker.clouddriver.aws.model.SecurityGroupNotFoundException import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -32,9 +32,9 @@ class UpsertSecurityGroupDescriptionValidatorSpec extends Specification { @Subject validator = new UpsertSecurityGroupDescriptionValidator() SecurityGroupService securityGroupService = Mock(SecurityGroupService) - Errors errors = Mock(Errors) + ValidationErrors errors = Mock(ValidationErrors) - final description = new UpsertSecurityGroupDescription( + def description = new UpsertSecurityGroupDescription( credentials: TestCredential.named('test'), region: "us-east-1", name: "foo", @@ -50,7 +50,7 @@ class UpsertSecurityGroupDescriptionValidatorSpec extends Specification { def setup() { securityGroupService = Mock(SecurityGroupService) - errors = Mock(Errors) + errors = Mock(ValidationErrors) def regionScopedProviderFactory = Mock(RegionScopedProviderFactory) def regionScopedProvider = Mock(RegionScopedProviderFactory.RegionScopedProvider) regionScopedProvider.getSecurityGroupService() >> securityGroupService diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicatorSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicatorSpec.groovy index 5d1c745e0ef..ec06bf43ba6 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicatorSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/health/AmazonHealthIndicatorSpec.groovy @@ -19,24 +19,31 @@ package com.netflix.spinnaker.clouddriver.aws.health import com.amazonaws.AmazonServiceException import com.amazonaws.services.ec2.AmazonEC2 import com.amazonaws.services.ec2.model.DescribeAccountAttributesResult -import com.netflix.spectator.api.Counter +import com.netflix.spectator.api.NoopRegistry import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.boot.actuate.health.Status import spock.lang.Specification - -import java.util.concurrent.atomic.AtomicLong +import spock.lang.Unroll class AmazonHealthIndicatorSpec extends Specification { - def "health fails when amazon appears unreachable"() { + private static final Registry REGISTRY = new NoopRegistry() + AwsConfigurationProperties awsConfigurationProperties + + void setup(){ + awsConfigurationProperties = new AwsConfigurationProperties() + } + + @Unroll + def "health details contains warning when amazon appears unreachable"() { setup: def creds = [TestCredential.named('foo')] - def holder = Stub(AccountCredentialsProvider) { + def credentialsRepository = Stub(CredentialsRepository) { getAll() >> creds - getCredentials("foo") >> creds[0] } def mockEc2 = Stub(AmazonEC2) { describeAccountAttributes() >> { throw new AmazonServiceException("fail") } @@ -44,28 +51,38 @@ class AmazonHealthIndicatorSpec extends Specification { def mockAmazonClientProvider = Stub(AmazonClientProvider) { getAmazonEC2(*_) >> mockEc2 } - def counter = new AtomicLong(0) - def mockRegistry = Stub(Registry) { - gauge(_, _) >> counter - } - def indicator = new AmazonHealthIndicator(holder, mockAmazonClientProvider, mockRegistry) + awsConfigurationProperties.health.setVerifyAccountHealth(verifyAccountHealth) + + def indicator = new AmazonHealthIndicator( + REGISTRY, + credentialsRepository, + mockAmazonClientProvider, + awsConfigurationProperties) when: indicator.checkHealth() - indicator.health() + def health = indicator.health() then: - thrown AmazonHealthIndicator.AmazonUnreachableException - counter.get() == 1 + health.status == Status.UP + if (verifyAccountHealth) { + (health.details['foo'] as String).startsWith("Failed to describe account attributes for 'foo'.") + } else { + health.details.isEmpty() + } + + where: + verifyAccountHealth | _ + true | _ + false | _ } def "health succeeds when amazon is reachable"() { setup: def creds = [TestCredential.named('foo')] - def holder = Stub(AccountCredentialsProvider) { + def credentialsRepository = Stub(CredentialsRepository) { getAll() >> creds - getCredentials("foo") >> creds[0] } def mockEc2 = Stub(AmazonEC2) { describeAccountAttributes() >> { Mock(DescribeAccountAttributesResult) } @@ -74,12 +91,70 @@ class AmazonHealthIndicatorSpec extends Specification { getAmazonEC2(*_) >> mockEc2 } - def counter = new AtomicLong(0) - def mockRegistry = Stub(Registry) { - gauge(_, _) >> counter + def indicator = new AmazonHealthIndicator( + REGISTRY, + credentialsRepository, + mockAmazonClientProvider, + awsConfigurationProperties + ) + + when: + indicator.checkHealth() + def health = indicator.health() + + then: + health.status == Status.UP + health.details.isEmpty() + } + + def "health succeeds when no amazon accounts"() { + setup: + def credentialsRepository = Stub(CredentialsRepository) { + getAll() >> [] + } + def mockEc2 = Stub(AmazonEC2) { + describeAccountAttributes() >> { Mock(DescribeAccountAttributesResult) } + } + def mockAmazonClientProvider = Stub(AmazonClientProvider) { + getAmazonEC2(*_) >> mockEc2 + } + + def indicator = new AmazonHealthIndicator( + REGISTRY, + credentialsRepository, + mockAmazonClientProvider, + awsConfigurationProperties + ) + + when: + indicator.checkHealth() + def health = indicator.health() + + then: + health.status == Status.UP + health.details.isEmpty() + } + + def "health details contains warnings when there are multiple errors"() { + setup: + def creds = [TestCredential.named('foo'), TestCredential.named('bar')] + def credentialsRepository = Stub(CredentialsRepository) { + getAll() >> creds + } + def mockEc2 = Stub(AmazonEC2) { + describeAccountAttributes() >> { throw new AmazonServiceException("fail") } + } + + def mockAmazonClientProvider = Stub(AmazonClientProvider) { + getAmazonEC2(*_) >> mockEc2 } - def indicator = new AmazonHealthIndicator(holder, mockAmazonClientProvider, mockRegistry) + def indicator = new AmazonHealthIndicator( + REGISTRY, + credentialsRepository, + mockAmazonClientProvider, + awsConfigurationProperties + ) when: indicator.checkHealth() @@ -87,6 +162,7 @@ class AmazonHealthIndicatorSpec extends Specification { then: health.status == Status.UP - counter.get() == 0 + (health.details['foo'] as String).startsWith("Failed to describe account attributes for 'foo'.") + (health.details['bar'] as String).startsWith("Failed to describe account attributes for 'bar'.") } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerSpec.groovy index 3d42324d969..1c68d2c40dd 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorkerSpec.groovy @@ -28,11 +28,9 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.eureka.api.Eureka import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport.DiscoveryStatus -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException import retrofit.RetrofitError -import retrofit.RetrofitError.Kind -import retrofit.client.Response -import retrofit.converter.Converter import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -52,7 +50,7 @@ class InstanceTerminationLifecycleWorkerSpec extends Specification { AmazonSQS amazonSQS = Mock() AmazonSNS amazonSNS = Mock() - AccountCredentialsProvider accountCredentialsProvider = Mock() { + CredentialsRepository credentialsRepository = Mock(CredentialsRepository) { getAll() >>[mgmtCredentials, testCredentials] } Provider awsEurekaSupportProvider = Mock() @@ -70,7 +68,7 @@ class InstanceTerminationLifecycleWorkerSpec extends Specification { def subject = new InstanceTerminationLifecycleWorker( objectMapper, Mock(AmazonClientProvider), - accountCredentialsProvider, + credentialsRepository, new InstanceTerminationConfigurationProperties( 'mgmt', queueARN.arn, @@ -133,10 +131,10 @@ class InstanceTerminationLifecycleWorkerSpec extends Specification { subject.handleMessage(message) then: - 1 * accountCredentialsProvider.getAll() >> [mgmtCredentials, testCredentials] + 1 * credentialsRepository.getAll() >> [mgmtCredentials, testCredentials] 1 * awsEurekaSupportProvider.get() >> awsEurekaSupport 1 * awsEurekaSupport.getEureka(_, 'us-west-2') >> eureka - 1 * eureka.updateInstanceStatus('clouddriver', 'i-1234', DiscoveryStatus.Disable.value) + 1 * eureka.updateInstanceStatus('clouddriver', 'i-1234', DiscoveryStatus.OUT_OF_SERVICE.value) } def 'should process both sns and sqs messages'() { @@ -213,8 +211,7 @@ class InstanceTerminationLifecycleWorkerSpec extends Specification { then: 1 * eureka.updateInstanceStatus(_, _, _) >> { - throw new RetrofitError("cannot connect", "http://discovery", new Response("http://discovery", 400, "reason", [], null), Mock(Converter), String, Kind.NETWORK, Mock(Throwable)) - } + throw new SpinnakerNetworkException(RetrofitError.networkError("http://some-url", new IOException("cannot connect"))) } 1 * eureka.updateInstanceStatus(_, _, _) 0 * eureka.updateInstanceStatus(_, _, _) } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProviderSpec.groovy index a7ec855a4e2..da5ac097e70 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProviderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationAgentProviderSpec.groovy @@ -22,7 +22,7 @@ import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import com.netflix.spinnaker.clouddriver.tags.EntityTagger import spock.lang.Specification import spock.lang.Subject @@ -46,7 +46,7 @@ import spock.lang.Subject class LaunchFailureNotificationAgentProviderSpec extends Specification { def objectMapper = Mock(ObjectMapper) def amazonClientProvider = Mock(AmazonClientProvider) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def credentialsRepository = Mock(CredentialsRepository) def serverGroupTagger = Mock(EntityTagger) def launchFailureConfigurationProperties = new LaunchFailureConfigurationProperties( @@ -62,7 +62,7 @@ class LaunchFailureNotificationAgentProviderSpec extends Specification { def agentProvider = new LaunchFailureNotificationAgentProvider( objectMapper, amazonClientProvider, - accountCredentialsProvider, + credentialsRepository, launchFailureConfigurationProperties, serverGroupTagger ) @@ -85,14 +85,11 @@ class LaunchFailureNotificationAgentProviderSpec extends Specification { } when: - def agents = agentProvider.agents() + def agents = agentProvider.agents(mgmtCredentials) then: regions.each { String region -> assert agents.find { it.agentType == "mgmt/${region}/LaunchFailureNotificationAgent".toString() } != null } - - 1 * accountCredentialsProvider.getCredentials("mgmt") >> { return mgmtCredentials } - 4 * accountCredentialsProvider.getAll() >> { return [mgmtCredentials] } } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgentSpec.groovy index 738290efce0..15a311c8186 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/LaunchFailureNotificationCleanupAgentSpec.groovy @@ -23,21 +23,21 @@ import com.amazonaws.services.autoscaling.model.DescribeScalingActivitiesResult import com.netflix.spinnaker.clouddriver.aws.TestCredential import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.model.EntityTags -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.tags.EntityTagger +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification import spock.lang.Unroll import java.lang.reflect.InvocationTargetException -import java.lang.reflect.UndeclaredThrowableException; +import java.lang.reflect.UndeclaredThrowableException class LaunchFailureNotificationCleanupAgentSpec extends Specification { static final LAUNCH_FAILURE_TAG_NAME = "spinnaker_ui_alert:autoscaling:ec2_instance_launch_error" def serverGroupTagger = Mock(EntityTagger) def amazonAutoScaling = Mock(AmazonAutoScaling) - def accountCredentialsProvider = Stub(AccountCredentialsProvider) { - getCredentials(_) >> { String name -> + def credentialsRepository = Stub(CredentialsRepository) { + getOne(_) >> { String name -> TestCredential.named(name) } } @@ -45,7 +45,7 @@ class LaunchFailureNotificationCleanupAgentSpec extends Specification { void "should delete launch failure notification tag if server group has no launch failures"() { given: def agent = new LaunchFailureNotificationCleanupAgent( - Mock(AmazonClientProvider), accountCredentialsProvider, serverGroupTagger + Mock(AmazonClientProvider), credentialsRepository, serverGroupTagger ) { @Override protected boolean hasLaunchFailures(AmazonAutoScaling amazonAutoScaling, EntityTags entityTags) { @@ -84,7 +84,7 @@ class LaunchFailureNotificationCleanupAgentSpec extends Specification { given: def entityTags = new EntityTags(entityRef: new EntityTags.EntityRef(account: "test", entityId: "test-v002")) def agent = new LaunchFailureNotificationCleanupAgent( - Mock(AmazonClientProvider), accountCredentialsProvider, Mock(EntityTagger) + Mock(AmazonClientProvider), credentialsRepository, Mock(EntityTagger) ) when: @@ -109,7 +109,7 @@ class LaunchFailureNotificationCleanupAgentSpec extends Specification { given: def entityTags = new EntityTags(entityRef: new EntityTags.EntityRef(account: "test", entityId: "test-v002")) def agent = new LaunchFailureNotificationCleanupAgent( - Mock(AmazonClientProvider), accountCredentialsProvider, Mock(EntityTagger) + Mock(AmazonClientProvider), credentialsRepository, Mock(EntityTagger) ) and: diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationStackSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationStackSpec.groovy new file mode 100644 index 00000000000..2e0572a763b --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonCloudFormationStackSpec.groovy @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.model + +import com.amazonaws.services.cloudformation.model.Change +import com.fasterxml.jackson.databind.ObjectMapper +import spock.lang.Specification + +class AmazonCloudFormationStackSpec extends Specification { + + def objectMapper = new ObjectMapper() + + def "should deserialize a full cached object"() { + given: + def attributes = [ + stackId: "stackId", + tags: [tag1: "tag1", tag2: "tag2"], + outputs: [out1: "out1", out2: "out2"], + stackName: "stackName", + region: "region", + accountName: "accountName", + accountId: "accountId", + stackStatus: "stackStatus", + stackStatusReason: "stackStatusReason", + changeSets: [ + [ + name: "name", + status: "status", + statusReason: "statusReason", + changes: [ + new Change().withType("type") + ] + ] + ] + ] + + when: + def cf = objectMapper.convertValue(attributes, AmazonCloudFormationStack) + + then: + assert cf instanceof AmazonCloudFormationStack + with(cf) { + stackId == "stackId" + tags == [tag1: "tag1", tag2: "tag2"] + outputs == [out1: "out1", out2: "out2"] + stackName == "stackName" + region == "region" + accountName == "accountName" + accountId == "accountId" + stackStatus == "stackStatus" + stackStatusReason == "stackStatusReason" + cf.changeSets.each { + it.name == "name" + it.status == "status" + it.statusReason == "statusReason" + it.changes.size() == 1 + it.changes.get(0).type == "type" + } + } + } + + def "should deserialize object with missing fields"() { + given: + def attributes = [stackId: "stackId"] + + when: + def cf = objectMapper.convertValue(attributes, AmazonCloudFormationStack) + + then: + assert cf instanceof AmazonCloudFormationStack + with(cf) { + stackId == "stackId" + tags == null + outputs == null + stackName == null + region == null + accountName == null + accountId == null + stackStatus == null + stackStatusReason == null + changeSets == null + } + } + +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonClusterSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonClusterSpec.groovy index e737605cf24..091178f47e8 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonClusterSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonClusterSpec.groovy @@ -18,11 +18,12 @@ package com.netflix.spinnaker.clouddriver.aws.model import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.jackson.ClouddriverApiModule import spock.lang.Specification class AmazonClusterSpec extends Specification { void "should serialize null loadBalancers and serverGroups as empty arrays"() { - def objectMapper = new ObjectMapper() + def objectMapper = new ObjectMapper().registerModule(new ClouddriverApiModule()) when: def nullCluster = objectMapper.convertValue( diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstanceSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstanceSpec.groovy index 19eff9ea82d..161cc428681 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstanceSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonInstanceSpec.groovy @@ -26,12 +26,20 @@ import spock.lang.Unroll */ class AmazonInstanceSpec extends Specification { - Instance instance + AmazonInstance instance def setup() { instance = new AmazonInstance(name: 'foo') } + def "get availability zone from extra attributes"() { + when: + instance = new AmazonInstance([placement: [availabilityZone: "us-east-1"]]) + + then: + instance.getAvailabilityZone() == "us-east-1" + } + def "getHealthState for ALL UP health states"() { given: instance.health = [[type: "Amazon", healthClass: 'platform', state: "Unknown"], [type: "Discovery", state: 'Up'], [type: "LoadBalancer", state: "Up"]] diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonServerGroupSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonServerGroupSpec.groovy index dc957e054e8..4a69cff3f78 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonServerGroupSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/model/AmazonServerGroupSpec.groovy @@ -19,16 +19,15 @@ package com.netflix.spinnaker.clouddriver.aws.model import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.model.Instance import com.netflix.spinnaker.clouddriver.model.ServerGroup +import org.junit.jupiter.api.BeforeEach import spock.lang.Specification import spock.lang.Unroll -/** - * Created by zthrash on 1/7/15. - */ class AmazonServerGroupSpec extends Specification { ServerGroup serverGroup + @BeforeEach def setup() { serverGroup = new AmazonServerGroup() } @@ -80,4 +79,206 @@ class AmazonServerGroupSpec extends Specification { instance } + void 'server group instance type is extracted as expected for asg with launch configuration'() { + when: + serverGroup.launchConfig = [ + application: "app", + createdTime: 1612794814579, + imageId: "ami-1", + instanceType: "some.type.1", + launchConfigurationARN: "arn:aws:autoscaling:us-east-1:00000000:launchConfiguration:000-000-000:launchConfigurationName/app-stack-v000-000", + launchConfigurationName: "app-stack-v000-000" + ] + + then: + serverGroup.getInstanceType() == "some.type.1" + } + + void 'server group instance type is extracted as expected for asg with launch template'() { + when: + serverGroup.launchTemplate = [ + createTime: 1612794814579, + launchTemplateData: [ + imageId: "ami-1", + instanceType: "some.type.1", + ], + launchTemplateId: "lt-1", + launchTemplateName: "app-stack-v000-000", + version: "1" + ] + + then: + serverGroup.getInstanceType() == "some.type.1" + } + + @Unroll + void 'server group instance type is extracted as expected for asg with mixed instances policy'() { + when: + serverGroup.mixedInstancesPolicy = new AmazonServerGroup.MixedInstancesPolicySettings().tap { + instancesDistribution = [ + onDemandAllocationStrategy: "prioritized", + onDemandBaseCapacity: 1, + onDemandPercentageAboveBaseCapacity: 50, + spotAllocationStrategy: "lowest-price", + spotInstancePools: 4, + spotMaxPrice: "1" + ] + launchTemplates = [ + [ + createTime: 1612794814579, + launchTemplateData: [ + imageId: "ami-1", + instanceType: "some.type.1", + ], + launchTemplateId: "lt-1", + launchTemplateName: "app-stack-v000-000", + versionNumber: 1, + ]] + launchTemplateOverridesForInstanceType = overrides + } + + then: + serverGroup.getInstanceType() == expectedInstanceType + + where: + overrides || expectedInstanceType + null || "some.type.1" + [[instanceType: "some.type.2", weightedCapacity: "2"], + [instanceType: "some.type.3", weightedCapacity: "4"]] || null + [[instanceType: "some.type.2", weightedCapacity: "2"]] || null + } + + void 'server group launch template specification is null for asg with launch configuration'() { + when: + serverGroup.asg = [launchConfigurationName: "app-stack-v000-000"] + def ltSpec = serverGroup.getLaunchTemplateSpecification() + + then: + ltSpec == null + } + + void 'server group launch template specification is identified as expected for asg with launch template'() { + when: + serverGroup.asg = [launchTemplate: [ + launchTemplateId: "lt-1", + launchTemplateName: "app-stack-v000-000", + version: "1" + ]] + def ltSpec = serverGroup.getLaunchTemplateSpecification() + + then: + ltSpec.launchTemplateId == "lt-1" + ltSpec.launchTemplateName == "app-stack-v000-000" + ltSpec.version == "1" + } + + void 'server group launch template specification is identified as expected for asg with mixed instances policy'() { + when: + serverGroup.asg = [ + mixedInstancesPolicy: [ + instancesDistribution: [ + onDemandAllocationStrategy: "prioritized", + onDemandBaseCapacity: 1, + onDemandPercentageAboveBaseCapacity: 50, + spotAllocationStrategy: "lowest-price", + spotInstancePools: 4, + spotMaxPrice: "1" + ], + launchTemplate: [ + launchTemplateSpecification: [ + launchTemplateId: "lt-1", + launchTemplateName: "app-stack-v000-000", + version: "1" + ] + ] + ] + ] + def ltSpec = serverGroup.getLaunchTemplateSpecification() + + then: + ltSpec.launchTemplateId == "lt-1" + ltSpec.launchTemplateName == "app-stack-v000-000" + ltSpec.version == "1" + } + + void 'security group is extracted as expected for asg with launch configuration'() { + when: + serverGroup.launchConfig = [ + application: "app", + createdTime: 1612794814579, + imageId: "ami-1", + instanceType: "some.type.1", + launchConfigurationARN: "arn:aws:autoscaling:us-east-1:00000000:launchConfiguration:000-000-000:launchConfigurationName/app-stack-v000-000", + launchConfigurationName: "app-stack-v000-000", + securityGroups: ["sg-123"] + ] + + then: + serverGroup.getSecurityGroups() == ["sg-123"].toSet() + } + + @Unroll + void 'security group is extracted as expected for asg with launch template'() { + when: + serverGroup.launchTemplate = [ + createTime: 1612794814579, + launchTemplateData: [ + imageId: "ami-1", + instanceType: "some.type.1", + securityGroupIds: secGroupIds, + networkInterfaces: networkInterfaceInput + ], + launchTemplateId: "lt-1", + launchTemplateName: "app-stack-v000-000", + version: "1" + ] + + then: + serverGroup.getSecurityGroups() == expectedSecGroupsIds.toSet() + + where: + secGroupIds | networkInterfaceInput || expectedSecGroupsIds + null | [[deviceIndex: 0, + groups: ["sg-123"]]] || ["sg-123"] + ["sg-123"] | null || ["sg-123"] + null | null || [] + } + + @Unroll + void 'security group is extracted as expected for asg with mixed instances policy'() { + when: + serverGroup.mixedInstancesPolicy = new AmazonServerGroup.MixedInstancesPolicySettings().tap { + allowedInstanceTypes = ["some.type.1"] + instancesDistribution = [ + onDemandAllocationStrategy: "prioritized", + onDemandBaseCapacity: 1, + onDemandPercentageAboveBaseCapacity: 50, + spotAllocationStrategy: "lowest-price", + spotInstancePools: 4, + spotMaxPrice: "1" + ] + launchTemplates = [[ + createTime: 1612794814579, + launchTemplateData: [ + imageId: "ami-1", + instanceType: "some.type.1", + securityGroupIds: secGroupIds, + networkInterfaces: networkInterfaceInput + ], + launchTemplateId: "lt-1", + launchTemplateName: "app-stack-v000-000", + versionNumber: 1, + ]] + } + + then: + serverGroup.getSecurityGroups() == expectedSecGroupsIds.toSet() + + where: + secGroupIds | networkInterfaceInput || expectedSecGroupsIds + null | [[deviceIndex: 0, + groups: ["sg-123"]]] || ["sg-123"] + ["sg-123"] | null || ["sg-123"] + null | null || [] + } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsProviderSpec.groovy index de348605ecf..7211c8d6358 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsProviderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/AwsProviderSpec.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.aws.provider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification import spock.lang.Subject @@ -35,6 +35,7 @@ class AwsProviderSpec extends Specification { "my-ci-account", "123", "my-ci-account-keypair", + true, null, null, null, @@ -49,12 +50,14 @@ class AwsProviderSpec extends Specification { false, null, false, + false, false) def eurekaAccount2 = new NetflixAmazonCredentials("my-qa-account", "qa", "my-qa-account", "123", "my-qa-account-keypair", + true, null, null, null, @@ -69,21 +72,25 @@ class AwsProviderSpec extends Specification { false, null, false, + false, false) def eurekaAccounts = [ eurekaAccount1, eurekaAccount2 ] - def eurekaRepos = Stub(AccountCredentialsRepository) { + def eurekaRepos = Stub(CredentialsRepository) { + getOne("my-ci-account") >> eurekaAccount1 + getOne("my-qa-account") >> eurekaAccount2 getAll() >> eurekaAccounts } - eurekaAwsProvider = new AwsProvider(eurekaRepos, []) + eurekaAwsProvider = new AwsProvider(eurekaRepos) def account1 = new NetflixAmazonCredentials("my-ci-account", "ci", "my-ci-account", "123", "my-ci-account-keypair", + true, null, null, null, @@ -98,12 +105,14 @@ class AwsProviderSpec extends Specification { false, null, false, + false, false) def account2 = new NetflixAmazonCredentials("my-qa-account", "qa", "my-qa-account", "456", "my-qa-account-keypair", + true, null, null, null, @@ -118,15 +127,18 @@ class AwsProviderSpec extends Specification { false, null, false, + false, false) def accounts = [ account1, account2 ] - def repos = Stub(AccountCredentialsRepository) { + def repos = Stub(CredentialsRepository) { + getOne("my-ci-account") >> account1 + getOne("my-qa-account") >> account2 getAll() >> accounts } - awsProvider = new AwsProvider(repos, []) + awsProvider = new AwsProvider(repos) } void "getInstanceKey returns CI account that matches both AWS account ID and eureka host"() { diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonApplicationLoadBalancerCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonApplicationLoadBalancerCachingAgentSpec.groovy new file mode 100644 index 00000000000..0303ad46d34 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonApplicationLoadBalancerCachingAgentSpec.groovy @@ -0,0 +1,176 @@ +package com.netflix.spinnaker.clouddriver.aws.provider.agent + +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeListenersResult +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeLoadBalancersResult +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTagsResult +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult +import com.amazonaws.services.elasticloadbalancingv2.model.Listener +import com.amazonaws.services.elasticloadbalancingv2.model.LoadBalancer +import com.amazonaws.services.elasticloadbalancingv2.model.Tag +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup +import com.amazonaws.services.elasticloadbalancingv2.model.TagDescription +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer +import com.netflix.spectator.api.Spectator +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.edda.EddaApi +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import spock.lang.Shared +import spock.lang.Specification + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS + +class AmazonApplicationLoadBalancerCachingAgentSpec extends Specification { + static String region = 'region' + static String accountName = 'accountName' + static String accountId = 'accountId' + + @Shared + AmazonElasticLoadBalancing elasticLoadBalancing = Mock(AmazonElasticLoadBalancing) + + @Shared + EddaApi eddaApi = Mock(EddaApi) + + @Shared + EddaTimeoutConfig eddaTimeoutConfig = Mock(EddaTimeoutConfig) + + @Shared + AmazonCachingAgentFilter filter = new AmazonCachingAgentFilter() + + def getAgent() { + def creds = Stub(NetflixAmazonCredentials) { + getName() >> accountName + it.getAccountId() >> accountId + } + def cloud = Stub(AmazonCloudProvider) + def client = Stub(AmazonClientProvider) { + getAmazonElasticLoadBalancingV2(_, _) >> Stub(AmazonElasticLoadBalancing) { + describeLoadBalancers(_) >> new DescribeLoadBalancersResult() { + List getLoadBalancers() { + return filterableLBs().keySet() as List + } + } + + describeTags(_) >> new DescribeTagsResult() { + List getTagDescriptions() { + return filterableLBs().values().flatten() + } + } + + describeTargetGroups(_) >> new DescribeTargetGroupsResult() { + List getTargetGroups() { + return filterableTargetGroups() + } + } + + describeListeners(_) >> new DescribeListenersResult() { + List getListeners() { + return [] + } + } + } + } + + new AmazonApplicationLoadBalancerCachingAgent(cloud, client, creds, region, eddaApi, AmazonObjectMapperConfigurer.createConfigured(), Spectator.globalRegistry(), eddaTimeoutConfig, filter) + } + + void "should filter by tags"() { + given: + def agent = getAgent() + filter.includeTags = includeTags + filter.excludeTags = excludeTags + ProviderCache providerCache = Stub(ProviderCache) { + getAll(_, _) >> { + return [] + } + } + providerCache.addCacheResult(INSTANCES.ns, [], null) + + when: + def result = agent.loadDataInternal(providerCache) + + then: + result.cacheResults[LOAD_BALANCERS.ns]*.getId() == expected + result.cacheResults[TARGET_GROUPS.ns]*.relationships[LOAD_BALANCERS.ns].flatten() == expected + + where: + includeTags | excludeTags | expected + null | null | filterableLBs()*.getKey().collect { buildCacheKey(it.loadBalancerName) } + [taggify("hello")] | null | buildCacheKeys(["test-hello-tag-value", "test-hello-tag-value-different", "test-hello-tag-no-value"]) + [taggify("hello", "goodbye")] | null | buildCacheKeys(["test-hello-tag-value"]) + [taggify("hello", "goo")] | null | buildCacheKeys([]) + [taggify("hello", ".*bye")] | null | buildCacheKeys(["test-hello-tag-value"]) + [taggify(".*a.*")] | null | buildCacheKeys(["test-no-hello-tag"]) + null | [taggify("hello")] | buildCacheKeys(["test-no-hello-tag", "test-no-tags"]) + null | [taggify("hello", "goodbye")] | buildCacheKeys(["test-hello-tag-value-different", "test-hello-tag-no-value", "test-no-hello-tag", "test-no-tags"]) + [taggify("hello", "goodbye")] | [taggify("hello")] | buildCacheKeys([]) + [taggify(".*", "ciao")] | [taggify("hello", ".*")] | buildCacheKeys([]) + } + + void "should get correct cache key pattern"() { + given: + def agent = getAgent() + + when: + def cacheKeyPatterns = agent.getCacheKeyPatterns() + + then: + cacheKeyPatterns.isPresent() + cacheKeyPatterns.get() == [ + loadBalancers: buildCacheKey("*:vpc-????????:*") + ] + } + + private static final Map> filterableLBs() { + return [ + (new LoadBalancer().withLoadBalancerName("test-hello-tag-value").withLoadBalancerArn(buildELBArn("test-hello-tag-value"))) : + [new TagDescription().withResourceArn(buildELBArn("test-hello-tag-value")).withTags(new Tag().withKey("hello").withValue("goodbye"))], + (new LoadBalancer().withLoadBalancerName("test-hello-tag-value-different").withLoadBalancerArn(buildELBArn("test-hello-tag-value-different"))): + [new TagDescription().withResourceArn(buildELBArn("test-hello-tag-value-different")).withTags(new Tag().withKey("hello").withValue("ciao"))], + (new LoadBalancer().withLoadBalancerName("test-hello-tag-no-value").withLoadBalancerArn(buildELBArn("test-hello-tag-no-value"))) : + [new TagDescription().withResourceArn(buildELBArn("test-hello-tag-no-value")).withTags(new Tag().withKey("hello"))], + (new LoadBalancer().withLoadBalancerName("test-no-hello-tag").withLoadBalancerArn(buildELBArn("test-no-hello-tag"))) : + [new TagDescription().withResourceArn(buildELBArn("test-no-hello-tag")).withTags(new Tag().withKey("Name"))], + (new LoadBalancer().withLoadBalancerName("test-no-tags").withLoadBalancerArn(buildELBArn("test-no-tags"))) : [] + ] as Map + } + + private static final List filterableTargetGroups() { + return [ + new TargetGroup().withTargetGroupName("tg-test-hello-tag-value").withLoadBalancerArns(buildELBArn("test-hello-tag-value")), + new TargetGroup().withTargetGroupName("tg-test-hello-tag-value-different").withLoadBalancerArns(buildELBArn("test-hello-tag-value-different")), + new TargetGroup().withTargetGroupName("tg-test-hello-tag-no-value").withLoadBalancerArns(buildELBArn("test-hello-tag-no-value")), + new TargetGroup().withTargetGroupName("tg-test-no-hello-tag").withLoadBalancerArns(buildELBArn("test-no-hello-tag")), + new TargetGroup().withTargetGroupName("tg-test-no-tags").withLoadBalancerArns(buildELBArn("test-no-tags")), + ] + } + + private static String buildCacheKey(String name) { + return "aws:loadBalancers:accountName:region:${name}" + } + + private static List buildCacheKeys(List names) { + return names.collect {"aws:loadBalancers:accountName:region:${it}" } as List + } + + private static String buildTargetGroupCacheKey(String name) { + return "aws:targetGroups:accountName:region:${name}:null:null" + } + + private static List buildTargetGroupCacheKeys(List names) { + return names.collect {"aws:targetGroups:accountName:region:${it}:null:null" } as List + } + + private static String buildELBArn(String name) { + return "arn:aws:elasticloadbalancing:${region}:${accountId}:loadbalancer/net/${name}/1234567890" + } + + private static def taggify(String name = null, String value = null) { + return new AmazonCachingAgentFilter.TagFilterOption(name, value) + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCachingAgentFilterSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCachingAgentFilterSpec.groovy new file mode 100644 index 00000000000..2869f4d15e3 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCachingAgentFilterSpec.groovy @@ -0,0 +1,53 @@ +package com.netflix.spinnaker.clouddriver.aws.provider.agent + +import spock.lang.Shared +import spock.lang.Specification + +class AmazonCachingAgentFilterSpec extends Specification { + + @Shared + AmazonCachingAgentFilter filter = new AmazonCachingAgentFilter() + + void "should retain based on tag criteria"() { + given: + filter.includeTags = includeTags + filter.excludeTags = excludeTags + + when: + def result = filter.shouldRetainResource(resourceTags) + + then: + result == expected + + where: + resourceTags | includeTags | excludeTags | expected + [resourceTag("hello", "goodbye")] | null | null | true + [resourceTag("hello", "goodbye")] | [filterTag("hello")] | null | true + [resourceTag("hello", "goodbye")] | [filterTag("hello", "goodbye")] | null | true + [resourceTag("hello", "goodbye")] | [filterTag("hello", "goo")] | null | false + [resourceTag("hello", "goodbye")] | [filterTag("hello", ".*bye")] | null | true + [resourceTag("hello", "goodbye")] | [filterTag(".*a.*")] | null | false + [resourceTag("hello", "goodbye")] | null | [filterTag("hello")] | false + [resourceTag("hello", "goodbye")] | null | [filterTag("hello", "goodbye")] | false + [resourceTag("hello", "goodbye")] | null | [filterTag(".*a.*")] | true + [resourceTag("hello", "goodbye")] | [filterTag("hello", "goodbye")] | [filterTag("Name")] | true + [resourceTag("hello", "goodbye")] | [filterTag("hello", "goodbye")] | [filterTag("hello")] | false + [resourceTag("hello", "goodbye")] | [filterTag(".*", "ciao")] | [filterTag("hello", ".*")] | false + [resourceTag("hello", "goodbye"), + resourceTag("Name", "primary"),] | [filterTag("hello")] | null | true + [resourceTag("hello", "goodbye"), + resourceTag("Name", "primary"),] | null | [filterTag("hello")] | false + null | [filterTag("hello")] | null | false + null | [filterTag("hello")] | [] | false + null | null | [filterTag("hello")] | true + null | [] | [filterTag("hello")] | true + } + + private static def resourceTag(String name = null, String value = null) { + return new AmazonCachingAgentFilter.ResourceTag(name, value) + } + + private static def filterTag(String name = null, String value = null) { + return new AmazonCachingAgentFilter.TagFilterOption(name, value) + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCloudFormationCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCloudFormationCachingAgentSpec.groovy new file mode 100644 index 00000000000..67cbbe21fa6 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonCloudFormationCachingAgentSpec.groovy @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.agent + +import com.amazonaws.services.cloudformation.AmazonCloudFormation +import com.amazonaws.services.cloudformation.model.Change +import com.amazonaws.services.cloudformation.model.ChangeSetSummary +import com.amazonaws.services.cloudformation.model.DescribeChangeSetResult +import com.amazonaws.services.cloudformation.model.DescribeStackEventsResult +import com.amazonaws.services.cloudformation.model.DescribeStacksResult +import com.amazonaws.services.cloudformation.model.ListChangeSetsResult +import com.amazonaws.services.cloudformation.model.Stack +import com.amazonaws.services.cloudformation.model.StackEvent +import com.amazonaws.services.ec2.AmazonEC2 +import com.netflix.spectator.api.Registry +import com.google.common.collect.ImmutableMap +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.cache.Keys +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.cache.OnDemandType +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll +import java.time.Instant + +class AmazonCloudFormationCachingAgentSpec extends Specification { + static String region = 'region' + static String accountName = 'accountName' + + @Subject + AmazonCloudFormationCachingAgent agent + + @Shared + ProviderCache providerCache = Mock(ProviderCache) + + @Shared + AmazonEC2 ec2 + + @Shared + AmazonClientProvider acp + + @Shared + Registry registry + + def setup() { + ec2 = Mock(AmazonEC2) + def creds = Stub(NetflixAmazonCredentials) { + getName() >> accountName + } + acp = Mock(AmazonClientProvider) + registry = Mock(Registry) + agent = new AmazonCloudFormationCachingAgent(acp, creds, region, registry) + } + + void "should add cloud formations on initial run"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResults = Mock(DescribeStacksResult) + def stack1 = new Stack().withStackId("stack1").withStackStatus("CREATE_SUCCESS") + def stack2 = new Stack().withStackId("stack2").withStackStatus("CREATE_SUCCESS") + def stackChangeSetsResults = Mock(ListChangeSetsResult) + + when: + def cache = agent.loadData(providerCache) + def results = cache.cacheResults[Keys.Namespace.STACKS.ns] + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * stackResults.stacks >> [ stack1, stack2 ] + 2 * amazonCloudFormation.listChangeSets(_) >> stackChangeSetsResults + 2 * stackChangeSetsResults.getSummaries() >> new ArrayList() + + results.find { it.id == Keys.getCloudFormationKey("stack1", "region", "accountName") }.attributes.'stackId' == stack1.stackId + results.find { it.id == Keys.getCloudFormationKey("stack2", "region", "accountName") }.attributes.'stackId' == stack2.stackId + } + + void "should evict cloudformations when not found on subsequent runs"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResults = Mock(DescribeStacksResult) + def stack1 = new Stack().withStackId("stack1").withStackStatus("CREATE_SUCCESS") + def stack2 = new Stack().withStackId("stack2").withStackStatus("CREATE_SUCCESS") + def stackChangeSetsResults = Mock(ListChangeSetsResult) + + when: + def cache = agent.loadData(providerCache) + def results = cache.cacheResults[Keys.Namespace.STACKS.ns] + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * stackResults.stacks >> [ stack1, stack2 ] + 2 * amazonCloudFormation.listChangeSets(_) >> stackChangeSetsResults + 2 * stackChangeSetsResults.getSummaries() >> new ArrayList() + + results.find { it.id == Keys.getCloudFormationKey("stack1", "region", "accountName") }.attributes.'stackId' == stack1.stackId + results.find { it.id == Keys.getCloudFormationKey("stack2", "region", "accountName") }.attributes.'stackId' == stack2.stackId + + when: + cache = agent.loadData(providerCache) + results = cache.cacheResults[Keys.Namespace.STACKS.ns] + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * stackResults.stacks >> [ stack1 ] + 1 * amazonCloudFormation.listChangeSets(_) >> stackChangeSetsResults + 1 * stackChangeSetsResults.getSummaries() >> new ArrayList() + + results.find { it.id == Keys.getCloudFormationKey("stack1", "region", "accountName") }.attributes.'stackId' == stack1.stackId + results.find { it.id == Keys.getCloudFormationKey("stack2", "region", "accountName") } == null + } + + @Unroll + void "should include stack status reason when state is ROLLBACK_COMPLETE (failed)"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stack = new Stack().withStackId("stack1").withStackStatus(stackStatus) + def stackResults = Mock(DescribeStacksResult) + def stackEvent = new StackEvent().withResourceStatus(resourceStatus).withResourceStatusReason(expectedReason) + def stackEventResults = Mock(DescribeStackEventsResult) + def stackChangeSetsResults = Mock(ListChangeSetsResult) + + when: + def cache = agent.loadData(providerCache) + def results = cache.cacheResults[Keys.Namespace.STACKS.ns] + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * amazonCloudFormation.listChangeSets(_) >> stackChangeSetsResults + 1 * stackChangeSetsResults.getSummaries() >> new ArrayList() + 1 * stackResults.stacks >> [ stack ] + 1 * amazonCloudFormation.describeStackEvents(_) >> stackEventResults + 1 * stackEventResults.getStackEvents() >> [ stackEvent ] + + results.find { it.id == Keys.getCloudFormationKey("stack1", "region", "accountName") }.attributes.'stackStatusReason' == expectedReason + + where: + resourceStatus | stackStatus || expectedReason + 'CREATE_FAILED' | 'ROLLBACK_COMPLETE' || "create failed" + 'UPDATE_FAILED' | 'ROLLBACK_COMPLETE' || "update failed" + 'CREATE_FAILED' | 'UPDATE_ROLLBACK_COMPLETE' || "create failed" + 'UPDATE_FAILED' | 'UPDATE_ROLLBACK_COMPLETE' || "update failed" + } + + void "should include stack change sets if any available"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stack = new Stack().withStackId("stack1").withStackStatus("CREATE_COMPLETE") + def stackResults = Mock(DescribeStacksResult) + def listChangeSetsResult = Mock(ListChangeSetsResult) + def changeSet = new ChangeSetSummary() + .withChangeSetName("name") + .withStatus("status") + .withStatusReason("statusReason") + def describeChangeSetResult = Mock(DescribeChangeSetResult) + def change = new Change().withType("type") + + when: + def cache = agent.loadData(providerCache) + def results = cache.cacheResults[Keys.Namespace.STACKS.ns] + def cachedStack = results.find { + it.id == Keys.getCloudFormationKey("stack1", "region", "accountName") + } + def cachedCangeSets = cachedStack.attributes.'changeSets' + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * amazonCloudFormation.listChangeSets(_) >> listChangeSetsResult + 1 * listChangeSetsResult.getSummaries() >> Collections.singletonList(changeSet) + 1 * amazonCloudFormation.describeChangeSet(_) >> describeChangeSetResult + 1 * describeChangeSetResult.getChanges() >> Collections.singletonList(change) + 1 * stackResults.stacks >> [ stack ] + + cachedCangeSets.size() == 1 + with (cachedCangeSets.get(0)) { + name == "name" + status == "status" + statusReason == "statusReason" + changes.size() == 1 + with(changes.get(0)) { + type == "type" + } + } + + } + + @Unroll + void "OnDemand request should be handled for type '#onDemandType' and provider '#provider': '#expected'"() { + when: + def result = agent.handles(onDemandType, provider) + + then: + result == expected + + where: + onDemandType | provider || expected + OnDemandType.CloudFormation | AmazonCloudProvider.ID || true + OnDemandType.CloudFormation | "other" || false + OnDemandType.Job | AmazonCloudProvider.ID || false + } + + @Unroll + void "OnDemand request should be handled for the specific account and region"() { + when: + def result = agent.shouldHandle(data) + + then: + result == expected + + where: + data | expected + [:] | true // backwards compatiblity + [credentials: accountName, region: [region]] | true + [credentials: null, region: null] | false + [credentials: accountName, region: null] | false + [credentials: null, region: [region]] | false + [credentials: "other", region: [region]] | false + [credentials: accountName, region: ["other"]] | false + [credentials: "other", region: ["other"]] | false + } + + void "OnDemand handle method should get the same cache data as when reloading the cache"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResults = Mock(DescribeStacksResult) + def stack1 = new Stack().withStackId("stack1").withStackStatus("CREATE_SUCCESS") + def stack2 = new Stack().withStackId("stack2").withStackStatus("CREATE_SUCCESS") + def stackChangeSetsResults = Mock(ListChangeSetsResult) + + when: + def cache = agent.loadData(providerCache) + def results = agent.handle(providerCache, Collections.emptyMap()) + + then: + 2 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 2 * amazonCloudFormation.describeStacks(_) >> stackResults + 2 * stackResults.stacks >> [ stack1, stack2 ] + 4 * amazonCloudFormation.listChangeSets(_) >> stackChangeSetsResults + 4 * stackChangeSetsResults.getSummaries() >> new ArrayList() + + def expected = cache.cacheResults.get(Keys.Namespace.STACKS.ns).collect { it.attributes } as Set + def onDemand = results.cacheResult.cacheResults.get(Keys.Namespace.STACKS.ns).collect { it.attributes } as Set + expected == onDemand + } + + void "should evict processed onDemand entries"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResults = Mock(DescribeStacksResult) + def providerCache = Mock(ProviderCache) + def id = "aws:stacks:account:region:arn:aws:cloudformation:region:accountid:stackname" + def cacheData = new DefaultCacheData(id, (int) 20, + ImmutableMap.of("cacheTime", (long) 10 , "processedCount", 1), ImmutableMap.of()) + + + when: + agent.loadData(providerCache) + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * stackResults.stacks >> [ ] + 3 * providerCache.getAll(Keys.Namespace.ON_DEMAND.ns,_) >> [ cacheData ] + 1 * providerCache.evictDeletedItems(Keys.Namespace.ON_DEMAND.ns, [ id ]) + } + + void "should insert onDemand requests into onDemand NS"() { + given: + def postData = [ credentials: "accountName", stackName: "stackName", region: ["region"]] + def stack1 = new Stack().withStackId("stack1").withStackStatus("CREATE_SUCCESS") + def stack2 = new Stack().withStackId("stack1").withStackStatus("CREATE_SUCCESS") + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResults = Mock(DescribeStacksResult) + def providerCache = Mock(ProviderCache) + def stackChangeSetsResults = Mock(ListChangeSetsResult) + + when: + agent.handle(providerCache, postData) + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * stackResults.stacks >> [ stack1, stack2 ] + 2 * amazonCloudFormation.listChangeSets(_) >> stackChangeSetsResults + 2 * stackChangeSetsResults.getSummaries() >> new ArrayList() + 2 * providerCache.putCacheData(Keys.Namespace.ON_DEMAND.ns, _) + } + + + void "should keep unprocessed onDemand entries"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResults = Mock(DescribeStacksResult) + def providerCache = Mock(ProviderCache) + def id = "aws:stacks:account:region:arn:aws:cloudformation:region:accountid:stackname" + def cacheData = new DefaultCacheData(id, (int) 20, + ImmutableMap.of("cacheTime", (long) 1, "processedCount", 0), ImmutableMap.of()) + + when: + agent.loadData(providerCache) + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * stackResults.stacks >> [ ] + 3 * providerCache.getAll(Keys.Namespace.ON_DEMAND.ns,_) >> [ cacheData ] + 1 * providerCache.putCacheData(Keys.Namespace.ON_DEMAND.ns, cacheData ) + 1 * providerCache.evictDeletedItems(Keys.Namespace.ON_DEMAND.ns, []) + } + + void "should keep newer onDemand entries"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResults = Mock(DescribeStacksResult) + def providerCache = Mock(ProviderCache) + def now = Instant.now() + def id = "aws:stacks:account:region:arn:aws:cloudformation:region:accountid:stackname" + def cacheData = new DefaultCacheData(id, (int) 20, + ImmutableMap.of("cacheTime", (long) now.plusMillis(100).toEpochMilli(), + "processedCount", 1), ImmutableMap.of()) + + when: + agent.loadData(providerCache) + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResults + 1 * stackResults.stacks >> [ ] + 3 * providerCache.getAll(Keys.Namespace.ON_DEMAND.ns,_) >> [ cacheData ] + 1 * providerCache.putCacheData(Keys.Namespace.ON_DEMAND.ns, cacheData) + 1 * providerCache.evictDeletedItems(Keys.Namespace.ON_DEMAND.ns, []) + } + + void "should paginate through all stacks"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResultFirstPage = Mock(DescribeStacksResult) + def stackResultSecondPage = Mock(DescribeStacksResult) + def stack1 = new Stack().withStackId("stack1").withStackStatus("CREATE_SUCCESS") + def stack2 = new Stack().withStackId("stack2").withStackStatus("CREATE_SUCCESS") + def stackChangeSetsResult = Mock(ListChangeSetsResult) + def nextPageToken = "test pagination token" + + when: + def cache = agent.loadData(providerCache) + def results = cache.cacheResults[Keys.Namespace.STACKS.ns] + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + + // first page returns stack1 + 1 * amazonCloudFormation.describeStacks({ it.getNextToken() == null }) >> stackResultFirstPage + 1 * stackResultFirstPage.stacks >> [stack1] + 2 * stackResultFirstPage.getNextToken() >> nextPageToken + + // second page returns stack2 and is the last one + 1 * amazonCloudFormation.describeStacks({ it.getNextToken() == nextPageToken }) >> stackResultSecondPage + 1 * stackResultSecondPage.stacks >> [stack2] + 1 * stackResultSecondPage.getNextToken() >> null + + // there are no ChangeSets + 2 * amazonCloudFormation.listChangeSets(_) >> stackChangeSetsResult + 2 * stackChangeSetsResult.getSummaries() >> new ArrayList() + + results.size() == 2 + results.find { it.id == Keys.getCloudFormationKey("stack1", "region", "accountName") }.attributes.'stackId' == stack1.stackId + results.find { it.id == Keys.getCloudFormationKey("stack2", "region", "accountName") }.attributes.'stackId' == stack2.stackId + } + + void "should paginate through all changesets"() { + given: + def amazonCloudFormation = Mock(AmazonCloudFormation) + def stackResult = Mock(DescribeStacksResult) + def stack = new Stack().withStackId("stack").withStackStatus("CREATE_SUCCESS") + def stackChangeSetsResultFirstPage = Mock(ListChangeSetsResult) + def stackChangeSetsResultSecondPage = Mock(ListChangeSetsResult) + def changeSet1 = new ChangeSetSummary().withChangeSetName("changeSet1") + def changeSet2 = new ChangeSetSummary().withChangeSetName("changeSet2") + def describeChangeSetResult = Mock(DescribeChangeSetResult) + def change = new Change().withType("type") + def nextPageToken = "test pagination token" + + when: + def cache = agent.loadData(providerCache) + def results = cache.cacheResults[Keys.Namespace.STACKS.ns] + def cachedStack = results.find { + it.id == Keys.getCloudFormationKey("stack", "region", "accountName") + } + def cachedChangeSets = cachedStack.attributes.'changeSets' + + then: + 1 * acp.getAmazonCloudFormation(_, _) >> amazonCloudFormation + 1 * amazonCloudFormation.describeStacks(_) >> stackResult + 1 * stackResult.stacks >> [stack] + + // first page returns changeSet1 + 1 * amazonCloudFormation.listChangeSets({ it.getNextToken() == null }) >> stackChangeSetsResultFirstPage + 1 * stackChangeSetsResultFirstPage.getSummaries() >> [changeSet1] + 2 * stackChangeSetsResultFirstPage.getNextToken() >> nextPageToken + + // second page returns changeSet2 and is the last one + 1 * amazonCloudFormation.listChangeSets({ it.getNextToken() == nextPageToken }) >> stackChangeSetsResultSecondPage + 1 * stackChangeSetsResultSecondPage.getSummaries() >> [changeSet2] + 1 * stackChangeSetsResultSecondPage.getNextToken() >> null + + // return a Change for each ChangeSet + 2 * amazonCloudFormation.describeChangeSet(_) >> describeChangeSetResult + 2 * describeChangeSetResult.getChanges() >> [change] + + cachedChangeSets.size() == 2 + cachedChangeSets.any { it.name == changeSet1.getChangeSetName() } + cachedChangeSets.any { it.name == changeSet2.getChangeSetName() } + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgentSpec.groovy index 35cb2e90202..57ad9ebf1db 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonInstanceTypeCachingAgentSpec.groovy @@ -1,183 +1,193 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.DescribeInstanceTypesResult +import com.amazonaws.services.ec2.model.InstanceTypeInfo +import com.amazonaws.services.ec2.model.ProcessorInfo +import com.amazonaws.services.ec2.model.VCpuInfo +import com.amazonaws.services.ec2.model.MemoryInfo +import com.amazonaws.services.ec2.model.InstanceStorageInfo +import com.amazonaws.services.ec2.model.EbsInfo +import com.amazonaws.services.ec2.model.EbsOptimizedInfo +import com.amazonaws.services.ec2.model.NetworkInfo +import com.amazonaws.services.ec2.model.GpuInfo +import com.amazonaws.services.ec2.model.GpuDeviceInfo +import com.amazonaws.services.ec2.model.GpuDeviceMemoryInfo +import com.amazonaws.services.ec2.model.DiskInfo +import com.amazonaws.services.ec2.model.NetworkCardInfo + +import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.aws.TestCredential -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import org.apache.http.HttpHost -import org.apache.http.ProtocolVersion -import org.apache.http.client.HttpClient -import org.apache.http.client.methods.HttpGet -import org.apache.http.client.methods.HttpHead -import org.apache.http.entity.BasicHttpEntity -import org.apache.http.message.BasicHttpResponse +import com.netflix.spinnaker.clouddriver.aws.cache.Keys +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import spock.lang.Shared import spock.lang.Specification +import spock.lang.Subject class AmazonInstanceTypeCachingAgentSpec extends Specification { - - static final Set TEST_DATA_SET_INSTANCE_TYPES = - ['d2.8xlarge', 'c5.xlarge', 'h1.2xlarge', 'c4.8xlarge', 'c3.large', 'i3.metal'] - - static final US_WEST_2_ACCT = TestCredential.named("test", - [regions: [ - [name: 'us-west-2', - availabilityZones: ['us-west-2a', 'us-west-2b', 'us-west-2c'] - ] - ]]) - - AccountCredentialsRepository repo = Stub(AccountCredentialsRepository) { - getAll() >> [US_WEST_2_ACCT] + def region = "us-east-1" + def objectMapper = new ObjectMapper() + def amazonClientProvider = Mock(AmazonClientProvider) + def account = "test" + def credentials = Stub(NetflixAmazonCredentials) { + getName() >> account } - def httpClient = Mock(HttpClient) - def providerCache = Mock(ProviderCache) + @Shared + ProviderCache providerCache = Mock(ProviderCache) - def "can deserialize response payload"() { - when: - def instanceTypes = getTestSubject().fromStream(cannedDataSet()) + @Subject + def agent = new AmazonInstanceTypeCachingAgent(region, amazonClientProvider, credentials, objectMapper) - then: - instanceTypes == TEST_DATA_SET_INSTANCE_TYPES - } - - def "noop if no matching accounts"() { - given: - def agent = getTestSubject('us-east-1') + @Shared + AmazonEC2 ec2 - when: - def instanceTypes = agent.loadData(providerCache) + @Shared + def it1, it2 - then: - instanceTypes.cacheResults.isEmpty() - instanceTypes.evictions.isEmpty() - 0 * _ + def setup() { + ec2 = Mock(AmazonEC2) + it1 = getInstanceTypeWithEbs() + it2 = getInstanceTypeWithGpu() } - def "skip data load if etags match"() { - given: - def agent = getTestSubject() - + def "should cache ec2 instance types info and metadata"() { when: - def instanceTypes = agent.loadData(providerCache) + def result = agent.loadData(providerCache) + def cache = result.cacheResults then: - 1 * providerCache.get(agent.getAgentType(), 'metadata', _ as RelationshipCacheFilter) >> - metadata('bacon', expectedTypes) - 1 * httpClient.execute(_ as HttpHost, _ as HttpHead) >> basicResponse('bacon') - instanceTypes.evictions.isEmpty() - metadataMatches(agent.agentType, instanceTypes, 'bacon', expectedTypes) - instanceTypesMatch(instanceTypes, expectedTypes) - 0 * _ - - where: - expectedTypes = ['m1.megabig', 't2.arnold'] + 1 * amazonClientProvider.getAmazonEC2(credentials, region) >> ec2 + 1 * ec2.describeInstanceTypes(_) >> new DescribeInstanceTypesResult(instanceTypes: [it1, it2]) + + and: + cache.size() == 2 + cache.keySet() == [agent.getAgentType(), Keys.Namespace.INSTANCE_TYPES.getNs()] as Set + (cache.get(agent.getAgentType())[0] as DefaultCacheData).getId() == "metadata" && cache.get(agent.getAgentType()) != null + cache.get(Keys.Namespace.INSTANCE_TYPES.getNs()) != null } - def "load data if no metadata"() { - given: - def agent = getTestSubject() - + def "should cache expected attributes for instance types"() { when: - def instanceTypes = agent.loadData(providerCache) + def result = agent.loadData(providerCache) + def cache = result.cacheResults then: - 1 * providerCache.get(agent.getAgentType(), 'metadata', _ as RelationshipCacheFilter) >> null - 1 * httpClient.execute(_ as HttpHost, _ as HttpGet) >> getResponse('baloney') - - instanceTypes.evictions.isEmpty() - metadataMatches(agent.agentType, instanceTypes, 'baloney', TEST_DATA_SET_INSTANCE_TYPES) - instanceTypesMatch(instanceTypes, TEST_DATA_SET_INSTANCE_TYPES) - 0 * _ + 1 * amazonClientProvider.getAmazonEC2(credentials, region) >> ec2 + 1 * ec2.describeInstanceTypes(_) >> new DescribeInstanceTypesResult(instanceTypes: [it1, it2]) + + and: + def instanceTypesInfo = cache.get(Keys.Namespace.INSTANCE_TYPES.getNs()) + instanceTypesInfo.size() == 2 + def it1Result = instanceTypesInfo.find{ it.attributes.name == "test.large" } + it1Result != null + def it2Result = instanceTypesInfo.find{ it.attributes.name == "test.xlarge" } + it2Result != null } - def "load data if metadata mismatch"() { - given: - def agent = getTestSubject() - + def "should cache a list of instance types under metadata"() { when: - def instanceTypes = agent.loadData(providerCache) + def result = agent.loadData(providerCache) + def cache = result.cacheResults then: - 1 * providerCache.get(agent.getAgentType(), 'metadata', _ as RelationshipCacheFilter) >> - metadata('mustard', ['t7.shouldntmatter']) - 1 * httpClient.execute(_ as HttpHost, _ as HttpHead) >> basicResponse('baloney') - 1 * httpClient.execute(_ as HttpHost, _ as HttpGet) >> getResponse('baloney') - - instanceTypes.evictions.isEmpty() - metadataMatches(agent.agentType, instanceTypes, 'baloney', TEST_DATA_SET_INSTANCE_TYPES) - instanceTypesMatch(instanceTypes, TEST_DATA_SET_INSTANCE_TYPES) - 0 * _ + 1 * amazonClientProvider.getAmazonEC2(credentials, region) >> ec2 + 1 * ec2.describeInstanceTypes(_) >> new DescribeInstanceTypesResult(instanceTypes: [it1, it2]) + + and: + def metadata = cache.get(agent.getAgentType())?.head() + metadata != null && metadata.id == "metadata" + def cachedInstanceTypes = metadata.attributes.cachedInstanceTypes as Set + cachedInstanceTypes.size() == 2 + cachedInstanceTypes == ["test.large", "test.xlarge"] as Set } - def "evict metadata if no etag"() { - given: - def agent = getTestSubject() - - when: - def instanceTypes = agent.loadData(providerCache) - - then: - 1 * providerCache.get(agent.getAgentType(), 'metadata', _ as RelationshipCacheFilter) >> null - 1 * httpClient.execute(_ as HttpHost, _ as HttpGet) >> getResponse(null) - - instanceTypes.evictions.get(agent.agentType).head() == 'metadata' - !instanceTypes.cacheResults.get(agent.agentType) - instanceTypesMatch(instanceTypes, TEST_DATA_SET_INSTANCE_TYPES) - 0 * _ - - } - - CacheData metadata(String etag, Collection instanceTypes) { - new DefaultCacheData('metadata', [etag: etag, cachedInstanceTypes: instanceTypes], [:]) - - } - - boolean metadataMatches(String agentType, - CacheResult result, - String expectedEtag, - Collection expectedTypes) { - def meta = result?.cacheResults?.get(agentType)?.head() - if (!meta) { - return false - } - meta.id == 'metadata' && - meta.attributes.etag == expectedEtag && - meta.attributes.cachedInstanceTypes as Set == expectedTypes as Set - } - - boolean instanceTypesMatch(CacheResult result, Collection expectedTypes) { - result?.cacheResults?.instanceTypes?.collect { it.id } as Set == - expectedTypes.collect { "aws:instanceTypes:$it:test:us-west-2".toString() } as Set - } - - BasicHttpResponse basicResponse(String etag, int statusCode = 200) { - def r = new BasicHttpResponse( - new ProtocolVersion('HTTP', 1, 1), - statusCode, - 'because reasons') - if (etag) { - r.setHeader("ETag", etag) - } - return r - } - - BasicHttpResponse getResponse(String etag) { - def r = basicResponse(etag) - def e = new BasicHttpEntity() - e.setContent(cannedDataSet()) - r.setEntity(e) - return r - } - - InputStream cannedDataSet() { - getClass().getResourceAsStream("us-west-2.json") + InstanceTypeInfo getInstanceTypeWithEbs() { + return new InstanceTypeInfo( + instanceType: "test.large", + currentGeneration: false, + supportedUsageClasses: ["on-demand","spot"], + supportedRootDeviceTypes: ["ebs","instance-store"], + supportedVirtualizationTypes: ["hvm","paravirtual"], + bareMetal: false, + hypervisor: "xen", + processorInfo: new ProcessorInfo(supportedArchitectures: ["i386","x86_64"], sustainedClockSpeedInGhz: 2.8), + vCpuInfo: new VCpuInfo( + defaultVCpus: 2, + defaultCores: 1, + defaultThreadsPerCore: 2, + validCores: [1], + validThreadsPerCore: [1, 2] + ), + memoryInfo: new MemoryInfo(sizeInMiB: 3840), + instanceStorageSupported: true, + instanceStorageInfo: new InstanceStorageInfo( + totalSizeInGB: 32, + disks: [new DiskInfo(sizeInGB: 16, count: 2, type: "ssd")], + nvmeSupport: "unsupported" + ), + ebsInfo: new EbsInfo( + ebsOptimizedSupport: "unsupported", + encryptionSupport: "supported", + nvmeSupport: "unsupported" + ), + networkInfo: new NetworkInfo( + ipv6Supported: true, + ), + burstablePerformanceSupported: false) } - AmazonInstanceTypeCachingAgent getTestSubject(String region = 'us-west-2') { - return new AmazonInstanceTypeCachingAgent(region, repo, httpClient) + InstanceTypeInfo getInstanceTypeWithGpu() { + return new InstanceTypeInfo( + instanceType: "test.xlarge", + currentGeneration: true, + supportedUsageClasses: ["on-demand","spot"], + supportedRootDeviceTypes: ["ebs"], + supportedVirtualizationTypes: ["hvm"], + bareMetal: false, + hypervisor: "xen", + processorInfo: new ProcessorInfo( + supportedArchitectures: ["x86_64"], + sustainedClockSpeedInGhz: 2.7 + ), + vCpuInfo: new VCpuInfo( + defaultVCpus: 32, + defaultCores: 16, + defaultThreadsPerCore: 2, + validCores: [1,2,3], + validThreadsPerCore: [1,2] + ), + memoryInfo: new MemoryInfo(sizeInMiB: 249856), + instanceStorageSupported: false, + ebsInfo: new EbsInfo( + ebsOptimizedSupport: "default", + encryptionSupport: "supported", + ebsOptimizedInfo: new EbsOptimizedInfo( + baselineBandwidthInMbps: 7000, + baselineThroughputInMBps: 875.0, + baselineIops: 40000, + maximumBandwidthInMbps: 7000, + maximumThroughputInMBps: 875.0, + maximumIops: 40000 + ), + nvmeSupport: "unsupported" + ), + networkInfo: new NetworkInfo( + ipv6Supported: true, + ), + gpuInfo: new GpuInfo( + gpus: [ + new GpuDeviceInfo( + name: "V100", + manufacturer: "NVIDIA", + count: 4, + memoryInfo: new GpuDeviceMemoryInfo(sizeInMiB: 16384)) + ], + totalGpuMemoryInMiB: 65536 + ), + burstablePerformanceSupported: false, + ) } - } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLaunchTemplateCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLaunchTemplateCachingAgentSpec.groovy new file mode 100644 index 00000000000..4bd23c4a8aa --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLaunchTemplateCachingAgentSpec.groovy @@ -0,0 +1,107 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.agent + +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsRequest +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsResult +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesResult +import com.amazonaws.services.ec2.model.LaunchTemplate +import com.amazonaws.services.ec2.model.LaunchTemplateVersion +import com.amazonaws.services.ec2.model.ResponseLaunchTemplateData +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.data.Keys +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LAUNCH_TEMPLATES + +class AmazonLaunchTemplateCachingAgentSpec extends Specification { + def registry = new NoopRegistry() + def objectMapper = new ObjectMapper() + def region = "us-east-1" + def account = "test" + def amazonClientProvider = Mock(AmazonClientProvider) + + def credentials = Stub(NetflixAmazonCredentials) { + getName() >> account + } + + @Shared + def providerCache = Mock(ProviderCache) + + @Subject + def agent = new AmazonLaunchTemplateCachingAgent(amazonClientProvider, credentials, region, objectMapper, registry) + + def "should load launch templates"() { + given: + def ec2 = Mock(AmazonEC2) + def lt1 = new LaunchTemplate(launchTemplateName: "lt-1", launchTemplateId: "lt-1", latestVersionNumber: 1, defaultVersionNumber: 0) + def lt2 = new LaunchTemplate(launchTemplateName: "lt-2", launchTemplateId: "lt-2", latestVersionNumber: 0, defaultVersionNumber: 0) + + def lt1Version0 = new LaunchTemplateVersion( + launchTemplateId: lt1.launchTemplateId, + defaultVersion: true, + versionNumber: 0, + launchTemplateData: new ResponseLaunchTemplateData(imageId: "ami-10") + ) + + def lt1Version1 = new LaunchTemplateVersion( + launchTemplateId: lt1.launchTemplateId, + defaultVersion: false, + versionNumber: 1, + launchTemplateData: new ResponseLaunchTemplateData(imageId: "ami-11") + ) + + def lt2Version0 = new LaunchTemplateVersion( + launchTemplateId: lt2.launchTemplateId, + defaultVersion: true, + versionNumber: 0, + launchTemplateData: new ResponseLaunchTemplateData(imageId: "ami-20") + ) + + and: + amazonClientProvider.getAmazonEC2(credentials, region) >> ec2 + + when: + def result = agent.loadData(providerCache).cacheResults[LAUNCH_TEMPLATES.ns] + + then: + 1 * ec2.describeLaunchTemplates(_) >> new DescribeLaunchTemplatesResult(launchTemplates: [lt1, lt2]) + 1 * ec2.describeLaunchTemplateVersions(_) >> new DescribeLaunchTemplateVersionsResult(launchTemplateVersions: [lt1Version0, lt1Version1, lt2Version0]) + + result.size() == 2 + def lt1Result = result.find { it.attributes.launchTemplateName == lt1.launchTemplateName } + def lt1v1Image = Keys.getImageKey(lt1Version1.launchTemplateData.imageId, account, region) + def lt1v0Image = Keys.getImageKey(lt1Version0.launchTemplateData.imageId, account, region) + + def lt2Result = result.find { it.attributes.launchTemplateName == lt2.launchTemplateName } + def lt2Image = Keys.getImageKey(lt2Version0.launchTemplateData.imageId, account, region) + lt1Result.attributes.versions.size() == 2 + lt1Result.attributes.latestVersion == lt1Version1 + lt1Result.relationships.images == [lt1v0Image, lt1v1Image] as Set + + lt2Result.attributes.versions.size() == 1 + lt2Result.attributes.latestVersion == lt2Version0 + lt2Result.relationships.images == [lt2Image] as Set + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerCachingAgentSpec.groovy new file mode 100644 index 00000000000..059d9ce4c74 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerCachingAgentSpec.groovy @@ -0,0 +1,137 @@ +package com.netflix.spinnaker.clouddriver.aws.provider.agent + +import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing +import com.amazonaws.services.elasticloadbalancing.model.* +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer +import com.netflix.spectator.api.Spectator +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.edda.EddaApi +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import spock.lang.Shared +import spock.lang.Specification + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.* + +class AmazonLoadBalancerCachingAgentSpec extends Specification { + static String region = 'region' + static String accountName = 'accountName' + static String accountId = 'accountId' + + @Shared + AmazonElasticLoadBalancing elasticLoadBalancing = Mock(AmazonElasticLoadBalancing) + + @Shared + EddaApi eddaApi = Mock(EddaApi) + + @Shared + EddaTimeoutConfig eddaTimeoutConfig = Mock(EddaTimeoutConfig) + + @Shared + AmazonCachingAgentFilter filter = new AmazonCachingAgentFilter() + + def getAgent() { + def creds = Stub(NetflixAmazonCredentials) { + getName() >> accountName + it.getAccountId() >> accountId + } + def cloud = Stub(AmazonCloudProvider) + def client = Stub(AmazonClientProvider) { + getAmazonElasticLoadBalancing(_, _) >> Stub(AmazonElasticLoadBalancing) { + describeLoadBalancers(_) >> new DescribeLoadBalancersResult() { + List getLoadBalancerDescriptions() { + return filterableLBs().keySet() as List + } + } + + describeTags(_) >> new DescribeTagsResult() { + List getTagDescriptions() { + return filterableLBs().values().flatten() + } + } + + describeLoadBalancerAttributes(_) >> new DescribeLoadBalancerAttributesResult() { + LoadBalancerAttributes getLoadBalancerAttributes() { + return new LoadBalancerAttributes() + } + } + } + } + + new AmazonLoadBalancerCachingAgent(cloud, client, creds, region, eddaApi, AmazonObjectMapperConfigurer.createConfigured(), Spectator.globalRegistry(), filter) + } + + void "should filter by tags"() { + given: + def agent = getAgent() + filter.includeTags = includeTags + filter.excludeTags = excludeTags + ProviderCache providerCache = Stub(ProviderCache) { + getAll(_, _) >> { + return [] + } + } + providerCache.addCacheResult(INSTANCES.ns, [], null) + + when: + def result = agent.loadDataInternal(providerCache) + + then: + result.cacheResults[LOAD_BALANCERS.ns]*.getId() == expected + + where: + includeTags | excludeTags | expected + null | null | filterableLBs()*.getKey().collect { buildCacheKey(it.loadBalancerName) } + [taggify("hello")] | null | buildCacheKeys(["test-hello-tag-value", "test-hello-tag-value-different", "test-hello-tag-no-value"]) + [taggify("hello", "goodbye")] | null | buildCacheKeys(["test-hello-tag-value"]) + [taggify("hello", "goo")] | null | buildCacheKeys([]) + [taggify("hello", ".*bye")] | null | buildCacheKeys(["test-hello-tag-value"]) + [taggify(".*a.*")] | null | buildCacheKeys(["test-no-hello-tag"]) + null | [taggify("hello")] | buildCacheKeys(["test-no-hello-tag", "test-no-tags"]) + null | [taggify("hello", "goodbye")] | buildCacheKeys(["test-hello-tag-value-different", "test-hello-tag-no-value", "test-no-hello-tag", "test-no-tags"]) + [taggify("hello", "goodbye")] | [taggify("hello")] | buildCacheKeys([]) + [taggify(".*", "ciao")] | [taggify("hello", ".*")] | buildCacheKeys([]) + } + + void "should get correct cache key pattern"() { + given: + def agent = getAgent() + + when: + def cacheKeyPatterns = agent.getCacheKeyPatterns() + + then: + cacheKeyPatterns.isPresent() + cacheKeyPatterns.get() == [ + loadBalancers: buildCacheKey("*:vpc-????????") + ] + } + + private static final Map> filterableLBs() { + return [ + (new LoadBalancerDescription().withLoadBalancerName("test-hello-tag-value")): + [new TagDescription().withLoadBalancerName("test-hello-tag-value").withTags(new Tag().withKey("hello").withValue("goodbye"))], + (new LoadBalancerDescription().withLoadBalancerName("test-hello-tag-value-different")): + [new TagDescription().withLoadBalancerName("test-hello-tag-value-different").withTags(new Tag().withKey("hello").withValue("ciao"))], + (new LoadBalancerDescription().withLoadBalancerName("test-hello-tag-no-value")): + [new TagDescription().withLoadBalancerName("test-hello-tag-no-value").withTags(new Tag().withKey("hello"))], + (new LoadBalancerDescription().withLoadBalancerName("test-no-hello-tag")): + [new TagDescription().withLoadBalancerName("test-no-hello-tag").withTags(new Tag().withKey("Name"))], + (new LoadBalancerDescription().withLoadBalancerName("test-no-tags")):[] + ] as Map + } + + private static String buildCacheKey(String name) { + return "aws:loadBalancers:accountName:region:${name}" + } + + private static List buildCacheKeys(List names) { + return names.collect {"aws:loadBalancers:accountName:region:${it}" } as List + } + + private static def taggify(String name = null, String value = null) { + return new AmazonCachingAgentFilter.TagFilterOption(name, value) + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSecurityGroupCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSecurityGroupCachingAgentSpec.groovy index a202cbd6378..d00050a3b18 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSecurityGroupCachingAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSecurityGroupCachingAgentSpec.groovy @@ -19,7 +19,8 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent import com.amazonaws.services.ec2.AmazonEC2 import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult import com.amazonaws.services.ec2.model.SecurityGroup -import com.netflix.awsobjectmapper.AmazonObjectMapper +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer import com.netflix.spectator.api.Spectator import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData @@ -47,7 +48,7 @@ class AmazonSecurityGroupCachingAgentSpec extends Specification { getLastModified() >> 12345L } ProviderCache providerCache = Mock(ProviderCache) - AmazonObjectMapper mapper = new AmazonObjectMapper() + ObjectMapper mapper = new AmazonObjectMapperConfigurer().createConfigured() EddaTimeoutConfig eddaTimeoutConfig = new EddaTimeoutConfig.Builder().build() @Subject AmazonSecurityGroupCachingAgent agent = new AmazonSecurityGroupCachingAgent( @@ -97,4 +98,18 @@ class AmazonSecurityGroupCachingAgentSpec extends Specification { cache.cacheResults[SECURITY_GROUPS.ns] == existingCacheData } + + void "should get correct cache key pattern"() { + given: + def agent = getAgent() + + when: + def cacheKeyPatterns = agent.getCacheKeyPatterns() + + then: + cacheKeyPatterns.isPresent() + cacheKeyPatterns.get() == [ + (SECURITY_GROUPS.ns): "aws:securityGroups:*:*:region:account:*" + ] + } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSubnetCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSubnetCachingAgentSpec.groovy index 3cff1b0f206..862cbd2a79d 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSubnetCachingAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonSubnetCachingAgentSpec.groovy @@ -19,7 +19,9 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent import com.amazonaws.services.ec2.AmazonEC2 import com.amazonaws.services.ec2.model.DescribeSubnetsResult import com.amazonaws.services.ec2.model.Subnet +import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.awsobjectmapper.AmazonObjectMapper +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider @@ -45,7 +47,7 @@ class AmazonSubnetCachingAgentSpec extends Specification { ProviderCache providerCache = Mock(ProviderCache) - AmazonObjectMapper amazonObjectMapper = new AmazonObjectMapper() + ObjectMapper amazonObjectMapper = new AmazonObjectMapperConfigurer().createConfigured() @Subject AmazonSubnetCachingAgent agent = new AmazonSubnetCachingAgent( diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCachingAgentSpec.groovy index 6175d46988a..fbc7699c625 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCachingAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ClusterCachingAgentSpec.groovy @@ -17,12 +17,33 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent +import com.amazonaws.services.autoscaling.AmazonAutoScaling import com.amazonaws.services.autoscaling.model.AutoScalingGroup +import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult +import com.amazonaws.services.autoscaling.model.LaunchTemplate +import com.amazonaws.services.autoscaling.model.LaunchTemplateSpecification +import com.amazonaws.services.autoscaling.model.MixedInstancesPolicy import com.amazonaws.services.autoscaling.model.SuspendedProcess +import com.amazonaws.services.autoscaling.model.TagDescription +import com.amazonaws.services.ec2.AmazonEC2 +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer +import com.netflix.spectator.api.Spectator +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.aws.security.EddaTimeoutConfig +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS + class ClusterCachingAgentSpec extends Specification { + static String region = 'region' + static String accountName = 'accountName' + static String accountId = 'accountId' + static int defaultMin = 1 static int defaultMax = 1 static int defaultDesired = 1 @@ -38,6 +59,30 @@ class ClusterCachingAgentSpec extends Specification { .withSuspendedProcesses(defaultSuspendedProcesses.collect { new SuspendedProcess().withProcessName(it) } ) + @Shared + ProviderCache providerCache = Mock(ProviderCache) + + @Shared + AmazonEC2 ec2 = Mock(AmazonEC2) + + @Shared + EddaTimeoutConfig edda = Mock(EddaTimeoutConfig) + + @Shared + AmazonCachingAgentFilter filter = new AmazonCachingAgentFilter() + + def getAgent() { + def creds = Stub(NetflixAmazonCredentials) { + getName() >> accountName + it.getAccountId() >> accountId + } + def cloud = Stub(AmazonCloudProvider) + def client = Stub(AmazonClientProvider) { + getAmazonEC2(creds, region, _) >> ec2 + } + new ClusterCachingAgent(cloud, client, creds, region, AmazonObjectMapperConfigurer.createConfigured(), Spectator.globalRegistry(), edda, filter) + } + @Unroll def "should compare capacity and suspended processes when determining if ASGs are similar"() { given: @@ -86,6 +131,128 @@ class ClusterCachingAgentSpec extends Specification { e.message.startsWith("failed to resolve only one vpc") } + @Unroll + def "should create launchTemplate/Config key correctly for all types of asg"() { + given: + AutoScalingGroup asg = new AutoScalingGroup() + .withAutoScalingGroupName("app-stack-v000") + .withDesiredCapacity(defaultDesired) + .withMinSize(defaultMin) + .withMaxSize(defaultMax) + ."$asgPropKey"(asgPropValue) + + when: + def asgData = new ClusterCachingAgent.AsgData(asg, null, null, "acc", "us-west-1", null) + + then: + asgData.launchConfig == launchConfigKey + asgData.launchTemplate == launchTemplateKey + + where: + asgPropKey | asgPropValue || launchTemplateKey || launchConfigKey + "withLaunchConfigurationName" | "launchConfig-1" || null || "aws:launchConfigs:acc:us-west-1:launchConfig-1" + "withLaunchTemplate" | new LaunchTemplateSpecification() + .withLaunchTemplateName("lt-1") + .withVersion("2") ||"aws:launchTemplates:acc:us-west-1:lt-1" || null + "withMixedInstancesPolicy" | new MixedInstancesPolicy() + .withLaunchTemplate(new LaunchTemplate() + .withLaunchTemplateSpecification( + new LaunchTemplateSpecification() + .withLaunchTemplateName("lt-1") + .withVersion("\$Latest") + ) + ) ||"aws:launchTemplates:acc:us-west-1:lt-1" || null + } + + def "on demand update result should have authoritative types correctly set"() { + given: + def agent = getAgent() + def data = [ + asgName: "asgName", + serverGroupName: "serverGroupName", + region: region, + account: accountName + ] + + when: + def result = agent.handle(providerCache, data) + + then: + result.authoritativeTypes as Set == ["serverGroups"] as Set + } + + void "asg should filter excluded tags"() { + given: + def agent = getAgent() + def client = Stub(AmazonClientProvider) { + getAutoScaling(_, _, _) >> Stub(AmazonAutoScaling) { + describeAutoScalingGroups(_) >> new DescribeAutoScalingGroupsResult() { + List getAutoScalingGroups() { + return filterableASGs + } + } + } + } + + def clients = new ClusterCachingAgent.AmazonClients(client, agent.account, agent.region, false) + filter.includeTags = includeTags + filter.excludeTags = excludeTags + + when: + def result = agent.loadAutoScalingGroups(clients) + + then: + result.asgs*.autoScalingGroupName == expected + + where: + includeTags | excludeTags | expected + null | null | filterableASGs*.autoScalingGroupName + [taggify("hello")] | null | ["test-hello-tag-value", "test-hello-tag-value-different", "test-hello-tag-no-value"] + [taggify("hello", "goodbye")] | null | ["test-hello-tag-value"] + [taggify("hello", "goo")] | null | [] + [taggify("hello", ".*bye")] | null | ["test-hello-tag-value"] + [taggify(".*a.*")] | null | ["test-no-hello-tag"] + null | [taggify("hello")] | ["test-no-hello-tag", "test-no-tags"] + null | [taggify("hello", "goodbye")] | ["test-hello-tag-value-different", "test-hello-tag-no-value", "test-no-hello-tag", "test-no-tags"] + [taggify("hello", "goodbye")] | [taggify("hello")] | [] + [taggify(".*", "ciao")] | [taggify("hello", ".*")] | [] + } + + void "should get correct cache key pattern"() { + given: + def agent = getAgent() + + when: + def cacheKeyPatterns = agent.getCacheKeyPatterns() + + then: + cacheKeyPatterns.isPresent() + cacheKeyPatterns.get() == [ + (SERVER_GROUPS.ns): "aws:serverGroups:*:accountName:region:*" + ] + } + + private static final List filterableASGs = [ + new AutoScalingGroup() + .withAutoScalingGroupName("test-hello-tag-value") + .withTags(new TagDescription().withKey("hello").withValue("goodbye")), + new AutoScalingGroup() + .withAutoScalingGroupName("test-hello-tag-value-different") + .withTags(new TagDescription().withKey("hello").withValue("ciao")), + new AutoScalingGroup() + .withAutoScalingGroupName("test-hello-tag-no-value") + .withTags(new TagDescription().withKey("hello")), + new AutoScalingGroup() + .withAutoScalingGroupName("test-no-hello-tag") + .withTags(new TagDescription().withKey("Name")), + new AutoScalingGroup() + .withAutoScalingGroupName("test-no-tags"), + ] + + private static def taggify(String name = null, String value = null) { + return new AmazonCachingAgentFilter.TagFilterOption(name, value) + } + private SuspendedProcess sP(String processName) { return new SuspendedProcess().withProcessName(processName) } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ImageCachingAgentSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ImageCachingAgentSpec.groovy index 7cde45f87e5..627615a453a 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ImageCachingAgentSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/agent/ImageCachingAgentSpec.groovy @@ -19,21 +19,17 @@ package com.netflix.spinnaker.clouddriver.aws.provider.agent import com.amazonaws.services.ec2.AmazonEC2 import com.amazonaws.services.ec2.model.DescribeImagesRequest import com.amazonaws.services.ec2.model.DescribeImagesResult -import com.amazonaws.services.ec2.model.DomainType import com.amazonaws.services.ec2.model.Filter import com.amazonaws.services.ec2.model.Image -import com.netflix.awsobjectmapper.AmazonObjectMapper import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer import com.netflix.spectator.api.Spectator import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.aws.data.Keys import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService import spock.lang.Shared import spock.lang.Specification -import spock.lang.Subject import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.NAMED_IMAGES @@ -81,13 +77,18 @@ class ImageCachingAgentSpec extends Specification { } def getAgent(boolean publicImages, boolean eddaEnabled) { + getAgent(publicImages, eddaEnabled, List.of()) + } + + def getAgent(boolean publicImages, boolean eddaEnabled, List imageStates) { def creds = Stub(NetflixAmazonCredentials) { getName() >> accountName - getAccountId() >> accountId + it.getAccountId() >> accountId getEddaEnabled() >> eddaEnabled } def dcs = Stub(DynamicConfigService) { isEnabled(_ as String, true) >> true + getConfig(List, "aws.defaults.image-states", List.of()) >> imageStates } def acp = Stub(AmazonClientProvider) { getAmazonEC2(creds, region, _) >> ec2 @@ -95,6 +96,55 @@ class ImageCachingAgentSpec extends Specification { new ImageCachingAgent(acp, creds, region, AmazonObjectMapperConfigurer.createConfigured(), Spectator.globalRegistry(), publicImages, dcs) } + void "two images with the same name result in one named image"() { + given: 'two images with the same name' + // amis have unique ids, but it's possible for two amis with the same name + // to exist in the same account (and potentially the same region). + String imageName = 'foo' + Image imageOne = new Image().withImageId('ami-1').withName(imageName) + Image imageTwo = new Image().withImageId('ami-2').withName(imageName) + String imageOneKey = Keys.getNamedImageKey(accountName, imageOne.getName()) + String imageTwoKey = Keys.getNamedImageKey(accountName, imageTwo.getName()) + + and: + // arbitrary values for publicImages and eddaEnabled, but the expected + // request corresponds to them + def agent = getAgent(false, false) + def request = new DescribeImagesRequest().withFilters(new Filter('is-public', ['false'])) + + when: + def result = agent.loadData(providerCache) + + then: 'the result has one named image' + 1 * ec2.describeImages(request) >> new DescribeImagesResult(images: [imageOne, imageTwo]) + 0 * _ + + result.cacheResults[NAMED_IMAGES.ns].size() == 1 + + and: 'the named image is related to both amis' + def imageRelationships = result.cacheResults[NAMED_IMAGES.ns][0].relationships[IMAGES.ns] + imageRelationships.size() == 2 + imageRelationships.containsAll(Keys.getImageKey('ami-1', accountName, region), + Keys.getImageKey('ami-2', accountName, region)) + } + + void "include the filter corresponding to the configured image states"() { + given: + // arbitrary values for publicImages and eddaEnabled, but the expected + // request corresponds to them + def imageStates = ['available', 'failed'] + def agent = getAgent(false, false, imageStates) + def request = new DescribeImagesRequest().withFilters(new Filter('is-public', ['false']), new Filter('state', imageStates)) + + when: + def result = agent.loadData(providerCache) + + then: + // arbitrarily choose the image to return + 1 * ec2.describeImages(request) >> new DescribeImagesResult(images: [privateImage]) + 0 * _ + } + void "should include only private images"() { given: def agent = getAgent(false, false) diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudFormationProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudFormationProviderSpec.groovy new file mode 100644 index 00000000000..3975c1e01c6 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudFormationProviderSpec.groovy @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2019 Schibsted Media Group. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.view + +import com.fasterxml.jackson.core.type.TypeReference +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.clouddriver.aws.cache.Keys +import com.netflix.spinnaker.clouddriver.aws.model.AmazonCloudFormationStack +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +import static com.netflix.spinnaker.clouddriver.aws.cache.Keys.Namespace.STACKS + +class AmazonCloudFormationProviderSpec extends Specification { + static final TypeReference> ATTRIBUTES = new TypeReference>() {} + + @Subject + AmazonCloudFormationProvider provider + + ObjectMapper objectMapper = new ObjectMapper() + + def setup() { + def cache = new InMemoryCache() + cloudFormations.each { + cache.merge(STACKS.ns, + new DefaultCacheData(makeKey(it), objectMapper.convertValue(it, ATTRIBUTES), [:])) + } + + provider = new AmazonCloudFormationProvider(cache, objectMapper) + } + + @Unroll + void "list all cloud formations by account (any region)"() { + when: + def result = provider.list(accountId, '*') as Set + + then: + result == expected + + where: + accountId || expected + "account1" || [ stack1, stack2 ] as Set + "account2" || [ stack3 ] as Set + "unknown" || [] as Set + null || [] as Set + } + + @Unroll + void "list all cloud formations by account and region"() { + when: + def result = provider.list(account, region) as Set + + then: + result == expected + + where: + account | region || expected + "account1" | "region1" || [ stack1 ] as Set + "account1" | "region2" || [ stack2 ] as Set + "account1" | "region3" || [] as Set + "account1" | null || [] as Set + "account2" | "region1" || [ stack3 ] as Set + "unknown" | "unknown" || [] as Set + } + + @Unroll + void "get a cloud formation by stackId"() { + when: + def result = provider.get(stackId) + + then: + result == expected + + where: + stackId || expected + "stack1" || Optional.of(stack1) + "stack2" || Optional.of(stack2) + "stack3" || Optional.of(stack3) + "unkown" || Optional.empty() + null || Optional.empty() + } + + @Shared + def stack1 = new AmazonCloudFormationStack(stackId: "stack1", region: "region1", accountId: "account1") + @Shared + def stack2 = new AmazonCloudFormationStack(stackId: "stack2", region: "region2", accountId: "account1") + @Shared + def stack3 = new AmazonCloudFormationStack(stackId: "stack3", region: "region1", accountId: "account2") + + @Shared + Set cloudFormations = [stack1, stack2, stack3] + + private static String makeKey(AmazonCloudFormationStack stack) { + Keys.getCloudFormationKey(stack.stackId, stack.region, stack.accountId) + } + +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudMetricProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudMetricProviderSpec.groovy index 03e888df947..3ce9fb46519 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudMetricProviderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonCloudMetricProviderSpec.groovy @@ -17,16 +17,12 @@ package com.netflix.spinnaker.clouddriver.aws.provider.view import com.amazonaws.services.cloudwatch.AmazonCloudWatch -import com.amazonaws.services.cloudwatch.model.Dimension -import com.amazonaws.services.cloudwatch.model.GetMetricStatisticsRequest -import com.amazonaws.services.cloudwatch.model.GetMetricStatisticsResult -import com.amazonaws.services.cloudwatch.model.ListMetricsResult -import com.amazonaws.services.cloudwatch.model.Metric +import com.amazonaws.services.cloudwatch.model.* import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.aws.model.AmazonMetricDescriptor import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -41,14 +37,14 @@ class AmazonCloudMetricProviderSpec extends Specification { def setup() { cloudWatch = Mock(AmazonCloudWatch) - AccountCredentialsProvider accountCredentialsProvider = Stub(AccountCredentialsProvider) { - getCredentials(_) >> Stub(NetflixAmazonCredentials) + CredentialsRepository credentialsRepository = Stub(CredentialsRepository) { + getOne(_) >> Stub(NetflixAmazonCredentials) } AmazonClientProvider amazonClientProvider = Stub(AmazonClientProvider) { getCloudWatch(_, _) >> cloudWatch } AmazonCloudProvider amazonCloudProvider = Mock(AmazonCloudProvider) - provider = new AmazonCloudMetricProvider(amazonClientProvider, accountCredentialsProvider, amazonCloudProvider) + provider = new AmazonCloudMetricProvider(amazonClientProvider, credentialsRepository, amazonCloudProvider) } void "getMetric returns null when none found"() { diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonClusterProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonClusterProviderSpec.groovy new file mode 100644 index 00000000000..3648bda7785 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonClusterProviderSpec.groovy @@ -0,0 +1,406 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.view + +import com.fasterxml.jackson.databind.DeserializationFeature +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheFilter +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.data.Keys +import com.netflix.spinnaker.clouddriver.aws.model.AmazonServerGroup +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider +import org.junit.jupiter.api.BeforeEach +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.APPLICATIONS +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.CLUSTERS +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LAUNCH_CONFIGS +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LAUNCH_TEMPLATES +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS + +class AmazonClusterProviderSpec extends Specification { + def cacheView = Mock(Cache) + def objectMapper = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + + def amazonCloudProvider = Mock(AmazonCloudProvider) + def awsProvider = Mock(AwsProvider) + + @Subject + def provider = new AmazonClusterProvider(amazonCloudProvider, cacheView, awsProvider) + + def app = "app" + def account = "test" + def region = "us-east-1" + def clusterName = "app-main" + String clusterId = Keys.getClusterKey(clusterName, app, account) + def clusterAttributes = [name: clusterName, application: app] + + def serverGroupName = "app-main-v000" + String serverGroupId = Keys.getServerGroupKey(clusterName, serverGroupName, account, region) + def serverGroup = [ + name: serverGroupName, + instances: [], + asg: [:] + ] + + def launchTemplateName = "$serverGroupName-123" + def launchConfigName = "$serverGroupName-123" + + @BeforeEach + def setup() { + serverGroup.asg.clear() + } + + def "should get cluster details with build info"() { + given: + def imageId = "ami-1" + def imageKey = Keys.getImageKey(imageId, account, region) + def imageAttributes = [ + imageId: imageId, + tags: [appversion: "app-0.487.0-h514.f4be391/job/1"] + ] + + serverGroup.asg = [ launchConfigurationName: launchConfigName] + def launchConfiguration = new DefaultCacheData( + Keys.getLaunchConfigKey(launchConfigName, account, region), + [imageId: imageId], + [images: [imageKey]] + ) + + and: + cacheView.supportsGetAllByApplication() >> false + cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(app)) >> new DefaultCacheData( + Keys.getApplicationKey(app), [name: app], [serverGroups: [serverGroupId], clusters: [clusterId]] + ) + cacheView.getAll(LAUNCH_CONFIGS.ns, _ as Set) >> [launchConfiguration] + cacheView.filterIdentifiers(CLUSTERS.ns, _) >> [clusterId] + cacheView.getAll(CLUSTERS.ns, _ as Collection) >> [new DefaultCacheData(clusterId, clusterAttributes, [serverGroups: [serverGroupId]])] + cacheView.getAll(SERVER_GROUPS.ns, [ serverGroupId ], _ as CacheFilter) >> [ + new DefaultCacheData(serverGroupId, serverGroup, [launchConfigs: [launchConfiguration.id]]) + ] + cacheView.getAll(IMAGES.ns, _ as Set) >> [ + new DefaultCacheData(imageKey, imageAttributes, [:]) + ] + + when: + def result = provider.getClusterDetails(app) + + then: + def clusters = result.values() + def allServerGroups = clusters*.serverGroups.flatten() as Set + + clusters.size() == 1 + allServerGroups.size() == 1 + allServerGroups[0].launchConfig != null + allServerGroups[0].buildInfo != null + allServerGroups[0].image != null + allServerGroups[0].launchTemplate == null + allServerGroups[0].mixedInstancesPolicy == null + } + + def "should get cluster details by app with build info"() { + given: + def imageId = "ami-1" + def imageKey = Keys.getImageKey(imageId, account, region) + def imageAttributes = [ + imageId: imageId, + tags: [appversion: "app-0.487.0-h514.f4be391/job/514"] + ] + + serverGroup.asg = [ launchConfigurationName: launchConfigName] + def launchConfiguration = new DefaultCacheData( + Keys.getLaunchConfigKey(launchConfigName, account, region), + [imageId: imageId], + [images: [imageKey]] + ) + + def cluster = new DefaultCacheData(clusterId, clusterAttributes, [serverGroups: [serverGroupId]]) + def serverGroup = new DefaultCacheData(serverGroupId, serverGroup, [launchConfigs: [launchConfiguration.id]]) + def image = new DefaultCacheData(imageKey, imageAttributes, [:]) + + and: + cacheView.getAllByApplication(_, _, _) >> [ + serverGroups: [serverGroup], + clusters: [cluster], + launchConfigs: [launchConfiguration], + images: [image] + ] + + cacheView.getAll(LAUNCH_CONFIGS.ns, _ as Set) >> [launchConfiguration] + cacheView.filterIdentifiers(CLUSTERS.ns, _) >> [cluster.id] + cacheView.getAll(CLUSTERS.ns, _ as Collection) >> [cluster] + cacheView.getAll(SERVER_GROUPS.ns, [ serverGroupId ], _ as CacheFilter) >> [serverGroup] + + cacheView.getAll(IMAGES.ns, _ as Set) >> [image] + + when: + def result = provider.getClusterDetails(app) + + then: + def clusters = result.values() + def allServerGroups = clusters*.serverGroups.flatten() as Set + + clusters.size() == 1 + allServerGroups.size() == 1 + allServerGroups[0].launchConfig != null + allServerGroups[0].buildInfo != null + allServerGroups[0].image != null + allServerGroups[0].launchTemplate == null + allServerGroups[0].mixedInstancesPolicy == null + } + + def "should resolve server group launch config"() { + given: + serverGroup.asg = [ launchConfigurationName: launchConfigName] + def launchConfiguration = new DefaultCacheData( + Keys.getLaunchConfigKey(launchConfigName, account, "us-east-1"), [ imageId: "ami-1"], [:]) + + and: + cacheView.getAll(LAUNCH_CONFIGS.ns, _ as Set) >> [launchConfiguration] + cacheView.get(CLUSTERS.ns, clusterId) >> new DefaultCacheData(clusterId, clusterAttributes, [serverGroups: [serverGroupId]]) + cacheView.getAll(SERVER_GROUPS.ns, [ serverGroupId ], _ as CacheFilter) >> [ + new DefaultCacheData(serverGroupId, serverGroup, [launchConfigs: [launchConfiguration.id]]) + ] + + when: + def result = provider.getCluster(app, account, clusterName) + + then: + with(result) { + def sg = serverGroups.first() + type == "aws" + name == clusterName + accountName == account + result.serverGroups.size() == 1 + sg.launchConfig == launchConfiguration.attributes + sg.launchTemplate == null + sg.mixedInstancesPolicy == null + } + } + + @Unroll + def "should resolve server group launch template"() { + given: + serverGroup.asg = [ + launchTemplate: [ + launchTemplateName: launchTemplateName, + version: asgLaunchTemplateVersion + ] + ] + + def defaultVersion = [ + launchTemplateName: launchTemplateName, + versionNumber: 0, + defaultVersion: true, + launchTemplateData: [ + imageId: "ami-345" + ] + ] + + def latestVersion = [ + launchTemplateName: launchTemplateName, + versionNumber: 1, + defaultVersion: false, + launchTemplateData: [ + imageId: "ami-123" + ] + ] + + def launchTemplate = new DefaultCacheData( + Keys.getLaunchTemplateKey(launchTemplateName, account, "us-east-1"), + [ + launchTemplateName: launchTemplateName, + latestVersion: latestVersion, + versions: [ + defaultVersion, + latestVersion + ] + ], [:]) + + and: + cacheView.getAll(LAUNCH_TEMPLATES.ns, _ as Set) >> [launchTemplate] + cacheView.get(CLUSTERS.ns, clusterId) >> new DefaultCacheData(clusterId, clusterAttributes, [serverGroups: [serverGroupId]]) + cacheView.getAll(SERVER_GROUPS.ns, [ serverGroupId ], _ as CacheFilter) >> [ + new DefaultCacheData(serverGroupId, serverGroup, [launchTemplates: [launchTemplate.id]]) + ] + + when: + def result = provider.getCluster(app, account, clusterName) + + then: + result.serverGroups.size() == 1 + result.serverGroups[0].launchConfig == null + result.serverGroups[0].mixedInstancesPolicy == null + result.serverGroups[0].launchTemplate.versionNumber == resolvedVersion + + where: + asgLaunchTemplateVersion | resolvedVersion + '1' | 1 + '$Default' | 0 + '$Latest' | 1 + } + + def "should get server group with expected properties for asg with launch configuration"() { + given: + def lcKey = Keys.getLaunchConfigKey(launchConfigName, account, region) + def lcCacheAttr = [imageId: "ami-1"] + def lcCache = new DefaultCacheData(lcKey, lcCacheAttr, [serverGroups: [serverGroupId]]) + + and: + def imageKey = Keys.getImageKey("ami-1", account, region) + def imageCacheAttr = [imageId: "ami-1", tags: [appversion: "app-0.487.0-h514.f4be391/job/1"]] + def imageCache = new DefaultCacheData(imageKey, imageCacheAttr, [:]) + + and: + serverGroup.asg = [ launchConfigName: launchConfigName] + serverGroup["launchConfigName"] = launchConfigName + def sgCache = new DefaultCacheData(serverGroupId, serverGroup, [launchConfigs: [lcCache.id]]) + + and: + cacheView.get(SERVER_GROUPS.ns, serverGroupId) >> sgCache + cacheView.get(LAUNCH_CONFIGS.ns, lcKey) >> lcCache + cacheView.get(IMAGES.ns, imageKey) >> imageCache + + when: + def actualServerGroup = provider.getServerGroup(account, region, serverGroupName, false) + + then: + actualServerGroup.image["imageId"] == imageCacheAttr["imageId"] + actualServerGroup.launchConfig == lcCacheAttr + actualServerGroup.launchTemplate == null + actualServerGroup.mixedInstancesPolicy == null + } + + def "should get server group with expected properties for asg with launch template"() { + given: + def latestVersion = [ + launchTemplateName: launchTemplateName, + versionNumber: 1, + defaultVersion: false, + launchTemplateData: [ + imageId: "ami-1" + ] + ] + def ltKey = Keys.getLaunchTemplateKey(launchTemplateName, account, region) + def ltCacheAttr = [ launchTemplateName: launchTemplateName, + latestVersion: latestVersion, + versions: [latestVersion]] + def ltCache = new DefaultCacheData(ltKey, ltCacheAttr, [serverGroups: [serverGroupId]]) + + and: + def imageKey = Keys.getImageKey("ami-1", account, region) + def imageCacheAttr = [imageId: "ami-1", tags: [appversion: "app-0.487.0-h514.f4be391/job/1"]] + def imageCache = new DefaultCacheData(imageKey, imageCacheAttr, [:]) + + and: + serverGroup.asg = [ + launchTemplate: [ + launchTemplateName: launchTemplateName, + version: '1' + ] + ] + def sgCache = new DefaultCacheData(serverGroupId, serverGroup, [launchTemplates: [ltCache.id]]) + + and: + cacheView.get(SERVER_GROUPS.ns, serverGroupId) >> sgCache + cacheView.get(LAUNCH_TEMPLATES.ns, ltKey) >> ltCache + cacheView.get(IMAGES.ns, imageKey) >> imageCache + + when: + def actualServerGroup = provider.getServerGroup(account, region, serverGroupName, false) + + then: + actualServerGroup.image["imageId"] == imageCacheAttr["imageId"] + actualServerGroup.launchTemplate == latestVersion + actualServerGroup.launchConfig == null + actualServerGroup.mixedInstancesPolicy == null + } + + def "should get server group with expected properties for asg with mixed instances policy"() { + given: + def latestVersion = [ + launchTemplateName: launchTemplateName, + versionNumber: 1, + defaultVersion: false, + launchTemplateData: [ + imageId: "ami-1", + instanceType: "some.type.small" + ] + ] + def ltKey = Keys.getLaunchTemplateKey(launchTemplateName, account, region) + def ltCacheAttr = [ launchTemplateName: launchTemplateName, + latestVersion: latestVersion, + versions: [latestVersion]] + def ltCache = new DefaultCacheData(ltKey, ltCacheAttr, [serverGroups: [serverGroupId]]) + + and: + def imageKey = Keys.getImageKey("ami-1", account, region) + def imageCacheAttr = [imageId: "ami-1", tags: [appversion: "app-0.487.0-h514.f4be391/job/1"]] + def imageCache = new DefaultCacheData(imageKey, imageCacheAttr, [:]) + + and: + serverGroup.asg = [ + mixedInstancesPolicy: [ + instancesDistribution: [ + onDemandAllocationStrategy: "prioritized", + onDemandBaseCapacity: 1, + onDemandPercentageAboveBaseCapacity: 50, + spotAllocationStrategy: "lowest-price", + spotInstancePools: 4, + spotMaxPrice: "1" + ], + launchTemplate: [ + launchTemplateSpecification: [ + launchTemplateName: launchTemplateName, + version: "\$Latest" + ], + overrides: overrides + ] + ] + ] + def sgCache = new DefaultCacheData(serverGroupId, serverGroup, [launchTemplates: [ltCache.id]]) + + and: + cacheView.get(SERVER_GROUPS.ns, serverGroupId) >> sgCache + cacheView.get(LAUNCH_TEMPLATES.ns, ltKey) >> ltCache + cacheView.get(IMAGES.ns, imageKey) >> imageCache + + when: + def actualServerGroup = provider.getServerGroup(account, region, serverGroupName, false) + + then: + actualServerGroup.image["imageId"] == imageCacheAttr["imageId"] + actualServerGroup.launchConfig == null + actualServerGroup.launchTemplate == null + actualServerGroup.mixedInstancesPolicy.allowedInstanceTypes == expectedAllowedInstanceTypes + actualServerGroup.mixedInstancesPolicy.instancesDistribution == serverGroup.asg["mixedInstancesPolicy"]["instancesDistribution"] + actualServerGroup.mixedInstancesPolicy.launchTemplates == expectedInstanceTypeInLtData ? latestVersion : {latestVersion.clone(); latestVersion["launchTemplateData"].remove("instanceType")} + actualServerGroup.mixedInstancesPolicy.launchTemplateOverridesForInstanceType == overrides + + where: + overrides || expectedInstanceTypeInLtData | expectedAllowedInstanceTypes + null || "some.type.small" | ["some.type.small"] + [[instanceType: "some.type.large", weightedCapacity: 2], + [instanceType: "some.type.xlarge", weightedCapacity: 4]] || null | ["some.type.large", "some.type.xlarge"] + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceTypeProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceTypeProviderSpec.groovy index b145d0ce149..79da42a6151 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceTypeProviderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonInstanceTypeProviderSpec.groovy @@ -69,7 +69,8 @@ class AmazonInstanceTypeProviderSpec extends Specification { given: config.setExcluded([ new AmazonInstanceTypeProviderConfiguration.InstanceTypeOption('m1.regionfiltered', ['us-east-1']), - new AmazonInstanceTypeProviderConfiguration.InstanceTypeOption('m1.allfiltered')]) + new AmazonInstanceTypeProviderConfiguration.InstanceTypeOption('m1.allfiltered'), + new AmazonInstanceTypeProviderConfiguration.InstanceTypeOption('m2.*')]) when: def result = provider.getAll() @@ -119,6 +120,10 @@ class AmazonInstanceTypeProviderSpec extends Specification { account : 'test', region : 'us-west-2', name : 'm1.allfiltered']), + itData('m2.filtered', [ + account : 'test', + region : 'us-west-2', + name : 'm2.filtered']), ] } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonLoadBalancerProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonLoadBalancerProviderSpec.groovy new file mode 100644 index 00000000000..10dfddb5851 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonLoadBalancerProviderSpec.groovy @@ -0,0 +1,74 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.view + +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheFilter +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider +import spock.lang.Specification +import spock.lang.Subject + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.* + +class AmazonLoadBalancerProviderSpec extends Specification { + def cacheView = Mock(Cache) + def awsProvider = Mock(AwsProvider) + + @Subject + def provider = new AmazonLoadBalancerProvider(cacheView, awsProvider) + + def app = "app" + def account = "some_long_account_name" + def region = "us-east-1" + def stack = "stack" + def detail = "detail" + def vpc = "vpc-a45e72d1" + + def "should get load balance by application"() { + given: + cacheView.getIdentifiers(LOAD_BALANCERS.ns) >> ["aws:loadBalancers:$account:$region:$app:$vpc", + "aws:loadBalancers:$account:$region:$app-$stack:$vpc:network", + "aws:loadBalancers:$account:$region:$app-$stack-$detail:$vpc:albFunction", + "aws:loadBalancers:$account:$region:wrong$app-$stack-$detail:$vpc:albFunction"] + cacheView.getIdentifiers(TARGET_GROUPS.ns) >> [] + + cacheView.getAll(_, _, _) >> { String collection, Set keys, CacheFilter filter -> + return keys.collect { + new DefaultCacheData( + it, + [:], + [:] + ) + } + } + + cacheView.getAll(TARGET_GROUPS.ns, _, _) >> [] + + when: 'Requesting LBs for our app' + def result = provider.getApplicationLoadBalancers(app) + + then: 'We get them all' + result.size() == 3 + + when: 'Requesting all network LBs' + result = provider.getApplicationLoadBalancers("network") + + then: 'We get nothing since "network" is not the name of the app' + result.size() == 0 + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonS3DataProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonS3DataProviderSpec.groovy index 6795057c1f7..a1dd2862c73 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonS3DataProviderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonS3DataProviderSpec.groovy @@ -20,22 +20,24 @@ import com.amazonaws.services.s3.model.S3Object import com.amazonaws.services.s3.model.S3ObjectInputStream import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.security.access.AccessDeniedException import spock.lang.Specification import spock.lang.Subject import spock.lang.Unroll -import java.util.regex.Pattern; +import java.util.regex.Pattern -import static com.netflix.spinnaker.clouddriver.model.DataProvider.IdentifierType.* -import static com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3StaticDataProviderConfiguration.StaticRecordType.* +import static com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3StaticDataProviderConfiguration.StaticRecordType.list +import static com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3StaticDataProviderConfiguration.StaticRecordType.string +import static com.netflix.spinnaker.clouddriver.model.DataProvider.IdentifierType.Adhoc +import static com.netflix.spinnaker.clouddriver.model.DataProvider.IdentifierType.Static class AmazonS3DataProviderSpec extends Specification { def objectMapper = new ObjectMapper() def amazonClientProvider = Mock(AmazonClientProvider) - def accountCredentialsRepository = Mock(AccountCredentialsRepository) + def accountCredentialsRepository = Stub(CredentialsRepository) def configuration = new AmazonS3StaticDataProviderConfiguration([ new AmazonS3StaticDataProviderConfiguration.StaticRecord("staticId", string, "accountName", "us-east-1", "bucket", "key"), new AmazonS3StaticDataProviderConfiguration.StaticRecord("staticListId", list, "accountName", "us-east-1", "bucket", "listKey") @@ -60,7 +62,7 @@ class AmazonS3DataProviderSpec extends Specification { void setup() { accountCredentialsRepository.getAll() >> { [ - Mock(AccountCredentials) { + Mock(NetflixAmazonCredentials) { getAccountId() >> "12345678910" getName() >> "accountName" } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSecurityGroupProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSecurityGroupProviderSpec.groovy index 65e95eb9336..7b8a8f4f74d 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSecurityGroupProviderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSecurityGroupProviderSpec.groovy @@ -16,25 +16,20 @@ package com.netflix.spinnaker.clouddriver.aws.provider.view -import com.amazonaws.services.ec2.model.IpPermission -import com.amazonaws.services.ec2.model.IpRange -import com.amazonaws.services.ec2.model.Ipv6Range -import com.amazonaws.services.ec2.model.SecurityGroup -import com.amazonaws.services.ec2.model.UserIdGroupPair +import com.amazonaws.services.ec2.model.* import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.cache.WriteableCache import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.clouddriver.aws.cache.Keys +import com.netflix.spinnaker.clouddriver.aws.model.AmazonSecurityGroup +import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule import com.netflix.spinnaker.clouddriver.model.securitygroups.SecurityGroupRule -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.aws.cache.Keys -import com.netflix.spinnaker.clouddriver.aws.model.AmazonSecurityGroup -import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -47,32 +42,23 @@ class AmazonSecurityGroupProviderSpec extends Specification { WriteableCache cache = new InMemoryCache() ObjectMapper mapper = new ObjectMapper() - final credential1 = Stub(NetflixAmazonCredentials) { + def credential1 = Stub(NetflixAmazonCredentials) { getName() >> "accountName1" getAccountId() >> "accountId1" } - final credential2 = Stub(NetflixAmazonCredentials) { + def credential2 = Stub(NetflixAmazonCredentials) { getName() >> "accountName2" getAccountId() >> "accountId2" } - final accountCredentialsProvider = new AccountCredentialsProvider() { - - @Override - Set getAll() { - [credential1, credential2] - } - - @Override - AccountCredentials getCredentials(String name) { - return null - } + def credentialsRepository = Stub(CredentialsRepository) { + getAll() >> [credential1, credential2] } def setup() { - provider = new AmazonSecurityGroupProvider(accountCredentialsProvider, cache, mapper) + provider = new AmazonSecurityGroupProvider(credentialsRepository, cache, mapper) cache.mergeAll(Keys.Namespace.SECURITY_GROUPS.ns, getAllGroups()) } @@ -162,9 +148,26 @@ class AmazonSecurityGroupProviderSpec extends Specification { } + void "getById returns match based on account, region, and id"() { + + when: + def result = provider.getById(account, region, id, null) + + then: + result != null + result.accountName == account + result.region == region + result.id == id + + where: + account = 'prod' + region = 'us-east-1' + id = 'sg-a' + } + void "should add both ipRangeRules and securityGroup rules"() { given: - String groupId = 'id-a' + String groupId = 'sg-a' String groupName = 'name-a' String vpcId = null SecurityGroup mixedRangedGroupA = new SecurityGroup( @@ -180,7 +183,7 @@ class AmazonSecurityGroupProviderSpec extends Specification { ipv4Ranges: [new IpRange(cidrIp: '0.0.0.0/32'), new IpRange(cidrIp: '0.0.0.1/31')], ipv6Ranges: [new Ipv6Range(cidrIpv6: '::/0')], userIdGroupPairs: [ - new UserIdGroupPair(groupId: 'id-b', groupName: 'name-b', userId: 'test') + new UserIdGroupPair(groupId: 'sg-b', groupName: 'name-b', userId: 'test') ] ) ]) @@ -210,8 +213,8 @@ class AmazonSecurityGroupProviderSpec extends Specification { void "should add security group ingress with different protocols"() { given: - SecurityGroup securityGroupA = new SecurityGroup(ownerId: "accountId1", groupId: 'id-a', groupName: 'name-a', description: 'a') - SecurityGroup securityGroupB = new SecurityGroup(ownerId: "accountId1", groupId: 'id-b', groupName: 'name-b', description: 'b') + SecurityGroup securityGroupA = new SecurityGroup(ownerId: "accountId1", groupId: 'sg-a', groupName: 'name-a', description: 'a') + SecurityGroup securityGroupB = new SecurityGroup(ownerId: "accountId1", groupId: 'sg-b', groupName: 'name-b', description: 'b') securityGroupB.ipPermissions = [ new IpPermission(ipProtocol: "TCP", fromPort: 7001, toPort: 7001, userIdGroupPairs: [ new UserIdGroupPair(userId: "accountId1", groupId: securityGroupA.groupId, groupName: securityGroupA.groupName) @@ -222,7 +225,7 @@ class AmazonSecurityGroupProviderSpec extends Specification { ] String account = 'test' String region = 'us-east-1' - def key = Keys.getSecurityGroupKey('name-b', 'id-b', region, account, null) + def key = Keys.getSecurityGroupKey('name-b', 'sg-b', region, account, null) Map attributes = mapper.convertValue(securityGroupB, AwsInfrastructureProvider.ATTRIBUTES) def cacheData = new DefaultCacheData(key, attributes, [:]) cache.merge(Keys.Namespace.SECURITY_GROUPS.ns, cacheData) @@ -231,11 +234,11 @@ class AmazonSecurityGroupProviderSpec extends Specification { def sg = provider.get(account, region, 'name-b', null) then: - sg == new AmazonSecurityGroup(id: "id-b", name: "name-b", description: "b", + sg == new AmazonSecurityGroup(id: "sg-b", name: "name-b", description: "b", accountName: account, region: region, inboundRules: [ new SecurityGroupRule(protocol: "TCP", securityGroup: new AmazonSecurityGroup( - id: 'id-a', + id: 'sg-a', name: 'name-a', accountName: "accountName1", accountId: "accountId1", @@ -247,7 +250,7 @@ class AmazonSecurityGroupProviderSpec extends Specification { ), new SecurityGroupRule(protocol: "UDP", securityGroup: new AmazonSecurityGroup( - id: 'id-a', + id: 'sg-a', name: 'name-a', accountName: "accountName1", accountId: "accountId1", @@ -257,7 +260,8 @@ class AmazonSecurityGroupProviderSpec extends Specification { new Rule.PortRange(startPort: 7001, endPort: 7001) ] as SortedSet ) - ]) + ], + tags: []) 0 * _ } @@ -266,7 +270,7 @@ class AmazonSecurityGroupProviderSpec extends Specification { String account = 'test' String region = 'us-east-1' SecurityGroup group = new SecurityGroup( - groupId: 'id-a', + groupId: 'sg-a', groupName: 'name-a', description: 'a', ipPermissions: [ @@ -283,7 +287,7 @@ class AmazonSecurityGroupProviderSpec extends Specification { ipRanges: ['0.0.0.0/32', '0.0.0.1/31'] ) ]) - def key = Keys.getSecurityGroupKey('name-a', 'id-a', region, account, null) + def key = Keys.getSecurityGroupKey('name-a', 'sg-a', region, account, null) Map attributes = mapper.convertValue(group, AwsInfrastructureProvider.ATTRIBUTES) def cacheData = new DefaultCacheData(key, attributes, [:]) cache.merge(Keys.Namespace.SECURITY_GROUPS.ns, cacheData) @@ -307,7 +311,7 @@ class AmazonSecurityGroupProviderSpec extends Specification { String account = 'test' String region = 'us-east-1' SecurityGroup group = new SecurityGroup( - groupId: 'id-a', + groupId: 'sg-a', groupName: 'name-a', description: 'a', ipPermissions: [ @@ -324,7 +328,7 @@ class AmazonSecurityGroupProviderSpec extends Specification { ipRanges: ['0.0.0.0/32', '0.0.0.1/31'] ) ]) - def key = Keys.getSecurityGroupKey('name-a', 'id-a', region, account, null) + def key = Keys.getSecurityGroupKey('name-a', 'sg-a', region, account, null) Map attributes = mapper.convertValue(group, AwsInfrastructureProvider.ATTRIBUTES) def cacheData = new DefaultCacheData(key, attributes, [:]) cache.merge(Keys.Namespace.SECURITY_GROUPS.ns, cacheData) @@ -346,15 +350,15 @@ class AmazonSecurityGroupProviderSpec extends Specification { String vpcId = 'vpc-1234' String account = 'accountName1' String region = 'us-east-1' - SecurityGroup securityGroupA = new SecurityGroup(ownerId: account, groupId: 'id-a', groupName: 'name-a', description: 'a', vpcId: vpcId) - SecurityGroup securityGroupB = new SecurityGroup(ownerId: account, groupId: 'id-b', groupName: 'name-b', description: 'b', vpcId: vpcId) + SecurityGroup securityGroupA = new SecurityGroup(ownerId: account, groupId: 'sg-a', groupName: 'name-a', description: 'a', vpcId: vpcId) + SecurityGroup securityGroupB = new SecurityGroup(ownerId: account, groupId: 'sg-b', groupName: 'name-b', description: 'b', vpcId: vpcId) securityGroupA.ipPermissions = [ new IpPermission(ipProtocol: "TCP", fromPort: 7001, toPort: 7001, userIdGroupPairs: [ new UserIdGroupPair(userId: "accountId1", groupId: securityGroupB.groupId) ]) ] - def keyA = Keys.getSecurityGroupKey('name-a', 'id-a', region, account, vpcId) - def keyB = Keys.getSecurityGroupKey('name-b', 'id-b', region, account, vpcId) + def keyA = Keys.getSecurityGroupKey('name-a', 'sg-a', region, account, vpcId) + def keyB = Keys.getSecurityGroupKey('name-b', 'sg-b', region, account, vpcId) Map attributesA = mapper.convertValue(securityGroupA, AwsInfrastructureProvider.ATTRIBUTES) Map attributesB = mapper.convertValue(securityGroupB, AwsInfrastructureProvider.ATTRIBUTES) def cacheDataA = new DefaultCacheData(keyA, attributesA, [:]) @@ -377,15 +381,15 @@ class AmazonSecurityGroupProviderSpec extends Specification { String account1 = 'accountName1' String account2 = 'accountName2' String region = 'us-east-1' - SecurityGroup securityGroupA = new SecurityGroup(ownerId: account1, groupId: 'id-a', groupName: 'name-a', description: 'a', vpcId: vpcId1) - SecurityGroup securityGroupB = new SecurityGroup(ownerId: account2, groupId: 'id-b', groupName: 'name-b', description: 'b', vpcId: vpcId2) + SecurityGroup securityGroupA = new SecurityGroup(ownerId: account1, groupId: 'sg-a', groupName: 'name-a', description: 'a', vpcId: vpcId1) + SecurityGroup securityGroupB = new SecurityGroup(ownerId: account2, groupId: 'sg-b', groupName: 'name-b', description: 'b', vpcId: vpcId2) securityGroupA.ipPermissions = [ new IpPermission(ipProtocol: "TCP", fromPort: 7001, toPort: 7001, userIdGroupPairs: [ new UserIdGroupPair(userId: "accountId2", groupId: securityGroupB.groupId) ]) ] - def keyA = Keys.getSecurityGroupKey('name-a', 'id-a', region, account1, vpcId1) - def keyB = Keys.getSecurityGroupKey('name-b', 'id-b', region, account2, vpcId2) + def keyA = Keys.getSecurityGroupKey('name-a', 'sg-a', region, account1, vpcId1) + def keyB = Keys.getSecurityGroupKey('name-b', 'sg-b', region, account2, vpcId2) Map attributesA = mapper.convertValue(securityGroupA, AwsInfrastructureProvider.ATTRIBUTES) Map attributesB = mapper.convertValue(securityGroupB, AwsInfrastructureProvider.ATTRIBUTES) def cacheDataA = new DefaultCacheData(keyA, attributesA, [:]) @@ -406,22 +410,22 @@ class AmazonSecurityGroupProviderSpec extends Specification { Map>> securityGroupMap = [ prod: [ 'us-east-1': [ - new SecurityGroup(groupId: 'a', groupName: 'a'), - new SecurityGroup(groupId: 'b', groupName: 'b'), + new SecurityGroup(groupId: 'sg-a', groupName: 'a'), + new SecurityGroup(groupId: 'sg-b', groupName: 'b'), ], 'us-west-1': [ - new SecurityGroup(groupId: 'a', groupName: 'a'), - new SecurityGroup(groupId: 'b', groupName: 'b'), + new SecurityGroup(groupId: 'sg-a', groupName: 'a'), + new SecurityGroup(groupId: 'sg-b', groupName: 'b'), ] ], test: [ 'us-east-1': [ - new SecurityGroup(groupId: 'a', groupName: 'a'), - new SecurityGroup(groupId: 'b', groupName: 'b'), + new SecurityGroup(groupId: 'sg-a', groupName: 'a'), + new SecurityGroup(groupId: 'sg-b', groupName: 'b'), ], 'us-west-1': [ - new SecurityGroup(groupId: 'a', groupName: 'a'), - new SecurityGroup(groupId: 'b', groupName: 'b'), + new SecurityGroup(groupId: 'sg-a', groupName: 'a'), + new SecurityGroup(groupId: 'sg-b', groupName: 'b'), ] ] ] diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSubnetProviderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSubnetProviderSpec.groovy index 2ad17dc6a28..c71a34e3bda 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSubnetProviderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/provider/view/AmazonSubnetProviderSpec.groovy @@ -19,7 +19,7 @@ package com.netflix.spinnaker.clouddriver.aws.provider.view import com.amazonaws.services.ec2.model.Subnet import com.amazonaws.services.ec2.model.Tag import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.awsobjectmapper.AmazonObjectMapper +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.CacheFilter @@ -33,7 +33,7 @@ import spock.lang.Subject class AmazonSubnetProviderSpec extends Specification { Cache cache = Mock(Cache) - ObjectMapper mapper = new AmazonObjectMapper() + ObjectMapper mapper = new AmazonObjectMapperConfigurer().createConfigured() @Subject AmazonSubnetProvider provider = new AmazonSubnetProvider(cache, mapper) @@ -52,6 +52,7 @@ class AmazonSubnetProviderSpec extends Specification { cidrBlock: '10', availableIpAddressCount: 1, account: 'test', + accountId: '1', region: 'us-east-1', availabilityZone: 'us-east-1a', purpose: 'internal', @@ -66,6 +67,7 @@ class AmazonSubnetProviderSpec extends Specification { cidrBlock: '11', availableIpAddressCount: 2, account: 'prod', + accountId: '2', region: 'us-west-1', availabilityZone: 'us-west-1a', purpose: 'external', @@ -77,7 +79,7 @@ class AmazonSubnetProviderSpec extends Specification { and: 1 * cache.filterIdentifiers(Keys.Namespace.SUBNETS.ns, "aws:$Keys.Namespace.SUBNETS.ns:*:*:*") 1 * cache.getAll(Keys.Namespace.SUBNETS.ns, _, _ as CacheFilter) >> [ - snData('test', 'us-east-1', + snData('test', '1', 'us-east-1', new Subnet( subnetId: 'subnet-00000001', state: 'available', @@ -87,7 +89,7 @@ class AmazonSubnetProviderSpec extends Specification { availabilityZone: 'us-east-1a', tags: [new Tag(key: 'immutable_metadata', value: '{"purpose": "internal", "target": "EC2"}')] )), - snData('prod', 'us-west-1', new Subnet( + snData('prod', '2','us-west-1', new Subnet( subnetId: 'subnet-00000002', state: 'available', vpcId: 'vpc-1', @@ -112,6 +114,7 @@ class AmazonSubnetProviderSpec extends Specification { cidrBlock: '10', availableIpAddressCount: 1, account: 'test', + accountId: '1', region: 'us-east-1', availabilityZone: 'us-east-1a', purpose: 'external (vpc0)', @@ -121,7 +124,7 @@ class AmazonSubnetProviderSpec extends Specification { and: 1 * cache.filterIdentifiers(Keys.Namespace.SUBNETS.ns, "aws:$Keys.Namespace.SUBNETS.ns:*:*:*") - 1 * cache.getAll(Keys.Namespace.SUBNETS.ns, _, _ as CacheFilter) >> [snData('test', 'us-east-1', new Subnet( + 1 * cache.getAll(Keys.Namespace.SUBNETS.ns, _, _ as CacheFilter) >> [snData('test', '1','us-east-1', new Subnet( subnetId: 'subnet-00000001', state: 'available', vpcId: 'vpc-1', @@ -149,6 +152,7 @@ class AmazonSubnetProviderSpec extends Specification { cidrBlock: '10', availableIpAddressCount: 1, account: 'test', + accountId: 1, region: 'us-east-1', availabilityZone: 'us-east-1a', purpose: 'external (vpc0)', @@ -159,7 +163,7 @@ class AmazonSubnetProviderSpec extends Specification { and: 1 * cache.filterIdentifiers(Keys.Namespace.SUBNETS.ns, "aws:$Keys.Namespace.SUBNETS.ns:*:*:*") - 1 * cache.getAll(Keys.Namespace.SUBNETS.ns, _, _ as CacheFilter) >> [snData('test', 'us-east-1', new Subnet( + 1 * cache.getAll(Keys.Namespace.SUBNETS.ns, _, _ as CacheFilter) >> [snData('test', '1','us-east-1', new Subnet( subnetId: 'subnet-00000001', state: 'available', vpcId: 'vpc-1', @@ -174,8 +178,9 @@ class AmazonSubnetProviderSpec extends Specification { ))] } - CacheData snData(String account, String region, Subnet subnet) { + CacheData snData(String account, String accountId, String region, Subnet subnet) { Map attributes = mapper.convertValue(subnet, AwsInfrastructureProvider.ATTRIBUTES) + attributes.putIfAbsent("accountId", accountId) new DefaultCacheData(Keys.getSubnetKey(subnet.subnetId, region, account), attributes, [:] @@ -196,6 +201,7 @@ class AmazonSubnetProviderSpec extends Specification { cidrBlock: '10', availableIpAddressCount: 1, account: 'test', + accountId: '1', region: 'us-east-1', availabilityZone: 'us-east-1a', purpose: 'external (vpc0)', @@ -204,7 +210,7 @@ class AmazonSubnetProviderSpec extends Specification { and: 1 * cache.filterIdentifiers(Keys.Namespace.SUBNETS.ns, "aws:$Keys.Namespace.SUBNETS.ns:*:*:*") - 1 * cache.getAll(Keys.Namespace.SUBNETS.ns, _, _ as CacheFilter) >> [snData('test', 'us-east-1', new Subnet( + 1 * cache.getAll(Keys.Namespace.SUBNETS.ns, _, _ as CacheFilter) >> [snData('test', '1', 'us-east-1', new Subnet( subnetId: 'subnet-00000001', state: 'available', vpcId: 'vpc-1', diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonBasicCredentialsLoaderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonBasicCredentialsLoaderSpec.groovy new file mode 100644 index 00000000000..41ad2cfbc32 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonBasicCredentialsLoaderSpec.groovy @@ -0,0 +1,266 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.security + +import com.amazonaws.SDKGlobalConfiguration +import com.amazonaws.auth.AWSCredentialsProvider +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.security.config.AmazonCredentialsParser +import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration.Account +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +import java.util.stream.Collectors + +class AmazonBasicCredentialsLoaderSpec extends Specification{ + @Shared + def defaultAccountConfigurationProperties = new DefaultAccountConfigurationProperties() + + def 'should set defaults'() { + setup: + def credentialsConfig = new CredentialsConfig(){{ + setAccessKeyId("accessKey") + setSecretAccessKey("secret") + }} + def definitionSource = Mock(CredentialsDefinitionSource) { + getCredentialsDefinitions() >> [] + } + def credentialsRepository = new MapBackedCredentialsRepository(AmazonCloudProvider.ID, null) + AccountsConfiguration accountsConfig = new AccountsConfiguration() + def loader = new AmazonBasicCredentialsLoader( + definitionSource, + null, + credentialsRepository, + credentialsConfig, + accountsConfig, + defaultAccountConfigurationProperties + ) + + when: + loader.load() + + then: + accountsConfig.getAccounts().size() == 1 + with (accountsConfig.getAccounts().first()) { Account account -> + account.name == "default" + account.environment == "default" + account.accountType == "default" + } + + credentialsConfig.getDefaultRegions().size() == 4 + System.getProperty(SDKGlobalConfiguration.ACCESS_KEY_SYSTEM_PROPERTY) == "accessKey" + System.getProperty(SDKGlobalConfiguration.SECRET_KEY_SYSTEM_PROPERTY) == "secret" + } + + @Unroll("should load and parse a large number of accounts having different regions when default regions: #defaultRegionsInConfig are specified in the config and with multi-threading: #multiThreadingEnabled") + def 'should load and parse a large number of accounts having different regions'() { + setup: + def credentialsRepository = new MapBackedCredentialsRepository(AmazonCloudProvider.ID, null) + + // create 500 accounts having a mix of regions. Some will have regions that match default regions, some will + // have regions that don't match default regions, and some will not have regions at all + List accounts = new ArrayList<>() + for (number in 0..499) { + Account account = new Account(name: 'prod' + number, accountId: number) + if (number == 0) { + // test an account having a region that matches one of the default regions + account.setRegions([ + new CredentialsConfig.Region(name: 'us-west-2') + ]) + } else if (number == 100) { + // test an account whose region shouldn't be there in the region cache + account.setRegions([ + new CredentialsConfig.Region(name: 'ap-southeast-1') + ]) + } else if (number == 200 || number == 400) { + // test an account which has a region not contained in the region cache for account number 200, but should have + // it for account number 400 + account.setRegions([ + new CredentialsConfig.Region(name: 'ap-southeast-1'), + new CredentialsConfig.Region(name: 'ap-southeast-2'), + new CredentialsConfig.Region(name: 'us-west-2') + ]) + } + + // all other accounts would end up using the default regions from credentials config in this case. This is + // to test that with multiThreading enabled, we don't run into ConcurrentModificationException errors when + // sorting these regions per account + accounts.add(account) + } + AccountsConfiguration accountsConfig = new AccountsConfiguration(accounts: accounts) + + AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) + AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) + + CredentialsConfig credentialsConfig = new CredentialsConfig(){{ + setAccessKeyId("accessKey") + setSecretAccessKey("secret") + }} + + credentialsConfig.loadAccounts.setMultiThreadingEnabled(multiThreadingEnabled) + + credentialsConfig.setDefaultRegions( + defaultRegionsInConfig.stream() + .map( + { it -> + new CredentialsConfig.Region() { + { + setName(it) + } + } + }) + .collect(Collectors.toList())) + + CredentialsDefinitionSource amazonCredentialsSource = { -> accountsConfig.getAccounts() } as CredentialsDefinitionSource + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, lookup, NetflixAmazonCredentials.class, credentialsConfig, accountsConfig) + def loader = new AmazonBasicCredentialsLoader( + amazonCredentialsSource, ci, credentialsRepository, credentialsConfig, accountsConfig, defaultAccountConfigurationProperties + ) + + when: + loader.load() + + then: + // verify invocations to list regions + if (defaultRegionsInConfig.isEmpty()) { + // just the one call to load all the regions will be made in the absence of any default regions in the config + 1 * lookup.listRegions() >> [ + new AmazonCredentials.AWSRegion('us-east-1', ['us-east-1a', 'us-east-1b']), + new AmazonCredentials.AWSRegion('us-west-2', ['us-west-2a']), + new AmazonCredentials.AWSRegion('ap-southeast-1', ['ap-southeast-1a', 'ap-southeast-1b']), + new AmazonCredentials.AWSRegion('ap-southeast-2', ['ap-southeast-2a', 'ap-southeast-2b']) + ] + } else { + 1 * lookup.listRegions(['us-east-1', 'us-west-2']) >> [ + new AmazonCredentials.AWSRegion('us-east-1', ['us-east-1a', 'us-east-1b']), + new AmazonCredentials.AWSRegion('us-west-2', ['us-west-2a']), + ] + if (multiThreadingEnabled) { + // in a multi-threaded setup, it may so happen that the thread processing account200 may end up + // running before the thread processing account100. At that point, it may not find + // 'ap-southeast-1' in the cache, so it could make a list regions call to look up both + // 'ap-southeast-1' and 'ap-southeast-2'. This will, in turn, determine if a list regions call + // with just 'ap-southeast-1' or 'ap-southeast-2' gets invoked or not. Hence, we have the + // cardinality of these calls specified in a range. + (0..1) * lookup.listRegions(['ap-southeast-1', 'ap-southeast-2']) >> [ + new AmazonCredentials.AWSRegion('ap-southeast-1', ['ap-southeast-1a', 'ap-southeast-1b']), + new AmazonCredentials.AWSRegion('ap-southeast-2', ['ap-southeast-2a', 'ap-southeast-2b']) + ] + + (0..1) * lookup.listRegions(['ap-southeast-1']) >> [ + new AmazonCredentials.AWSRegion('ap-southeast-1', ['ap-southeast-1a', 'ap-southeast-1b']) + ] + + (0..1) * lookup.listRegions(['ap-southeast-2']) >> [ + new AmazonCredentials.AWSRegion('ap-southeast-2', ['ap-southeast-2a', 'ap-southeast-2b']) + ] + } else { + // in a non-multi-threaded setup, we are sure that these calls should only be invoked once. + 1 * lookup.listRegions(['ap-southeast-1']) >> [ + new AmazonCredentials.AWSRegion('ap-southeast-1', ['ap-southeast-1a', 'ap-southeast-1b']) + ] + + 1 * lookup.listRegions(['ap-southeast-2']) >> [ + new AmazonCredentials.AWSRegion('ap-southeast-2', ['ap-southeast-2a', 'ap-southeast-2b']) + ] + } + + } + + 0 * lookup.listRegions + + // verify accounts + accountsConfig.getAccounts().size() == 500 + + // verify we have saved 500 accounts in the credentials repository + credentialsRepository.getAll().size() == 500 + + // test an account that has 1 region which is a default region + with (accountsConfig.getAccounts().first()) { Account account -> + account.name == "prod0" + account.environment == "prod0" + account.accountType == "prod0" + account.regions.size() == 1 + account.regions.first().name == 'us-west-2' + account.regions.first().availabilityZones.toList().sort() == ['us-west-2a'] + } + + // test an account that has 1 region which is not a default region + with (accountsConfig.getAccounts().get(100)) { Account account -> + account.name == "prod100" + account.environment == "prod100" + account.accountType == "prod100" + account.regions.size() == 1 + account.regions.first().name == 'ap-southeast-1' + account.regions.first().availabilityZones.toList().sort() == ['ap-southeast-1a', 'ap-southeast-1b'] + } + + // test an account that has multiple regions + with (accountsConfig.getAccounts().get(200)) { Account account -> + account.name == "prod200" + account.environment == "prod200" + account.accountType == "prod200" + account.regions.size() == 3 + account.regions.find { it.name == 'ap-southeast-1' }.availabilityZones.size() == 2 + (!account.regions.find { it.name == 'ap-southeast-1' }.deprecated) + account.regions.find { it.name == 'ap-southeast-2' }.availabilityZones.size() == 2 + (!account.regions.find { it.name == 'ap-southeast-2' }.deprecated) + account.regions.find { it.name == 'us-west-2' }.availabilityZones.size() == 1 + (!account.regions.find { it.name == 'us-west-2' }.deprecated) + } + + // test an account that did not have any default regions specified in the account definition. + // It should use the defaults created for the accounts based on what was set as the default + // in the credentials config + with (accountsConfig.getAccounts().last()) { Account account -> + account.name == "prod499" + account.environment == "prod499" + account.accountType == "prod499" + if (defaultRegionsInConfig.isEmpty()) { + account.regions.size() == 4 + account.regions.find { it.name == 'us-east-1' }.availabilityZones.size() == 2 + (!account.regions.find { it.name == 'us-east-1' }.deprecated) + account.regions.find { it.name == 'ap-southeast-1' }.availabilityZones.size() == 2 + (!account.regions.find { it.name == 'ap-southeast-1' }.deprecated) + account.regions.find { it.name == 'ap-southeast-2' }.availabilityZones.size() == 2 + (!account.regions.find { it.name == 'ap-southeast-2' }.deprecated) + account.regions.find { it.name == 'us-west-2' }.availabilityZones.size() == 1 + (!account.regions.find { it.name == 'us-west-2' }.deprecated) + } else { + account.regions.size() == 2 + account.regions.find { it.name == 'us-east-1' }.availabilityZones.size() == 2 + (!account.regions.find { it.name == 'us-east-1' }.deprecated) + account.regions.find { it.name == 'us-west-2' }.availabilityZones.size() == 1 + (!account.regions.find { it.name == 'us-west-2' }.deprecated) + } + } + + where: + multiThreadingEnabled | defaultRegionsInConfig + true | ['us-east-1','us-west-2'] + false | ['us-east-1','us-west-2'] + true | [] + false | [] + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsLifecycleHandlerSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsLifecycleHandlerSpec.groovy new file mode 100644 index 00000000000..52a20a14fd9 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/AmazonCredentialsLifecycleHandlerSpec.groovy @@ -0,0 +1,178 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.aws.security + +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.AccountAttribute +import com.amazonaws.services.ec2.model.AccountAttributeValue +import com.amazonaws.services.ec2.model.DescribeAccountAttributesResult +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.cats.agent.AgentProvider +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.edda.EddaApiFactory +import com.netflix.spinnaker.clouddriver.aws.provider.AwsCleanupProvider +import com.netflix.spinnaker.clouddriver.aws.provider.AwsInfrastructureProvider +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider +import com.netflix.spinnaker.clouddriver.aws.provider.agent.ImageCachingAgent +import com.netflix.spinnaker.clouddriver.aws.provider.agent.ReservationReportCachingAgent +import com.netflix.spinnaker.config.AwsConfiguration +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import spock.lang.Specification + +import java.util.concurrent.ExecutorService +import java.util.stream.Collectors + +class AmazonCredentialsLifecycleHandlerSpec extends Specification { + AwsCleanupProvider awsCleanupProvider + AwsInfrastructureProvider awsInfrastructureProvider + AwsProvider awsProvider + Optional> agentProviders = Optional.empty() + def amazonCloudProvider = new AmazonCloudProvider() + def registry = new DefaultRegistry() + def eddaApiFactory = new EddaApiFactory() + def dynamicConfigService = Mock(DynamicConfigService) { + isEnabled("aws.features.cloud-formation", false) >> false + isEnabled("aws.features.launch-templates", false) >> false + } + def objectMapper = new ObjectMapper() + def credOne = TestCredential.named('one') + def credTwo = TestCredential.named('two') + def credThree = TestCredential.named('three') + def credentialsRepository = Mock(CredentialsRepository) { + getAll() >> [credOne, credTwo] + } + Optional reservationReportPool = Optional.of( + Mock(ExecutorService) + ) + def deployDefaults = new AwsConfiguration.DeployDefaults() + + def awsConfigurationProperties = new AwsConfigurationProperties() + + def setup() { + awsCleanupProvider = new AwsCleanupProvider() + awsInfrastructureProvider = new AwsInfrastructureProvider() + awsProvider = new AwsProvider(credentialsRepository) + } + + + def 'it should replace current public image caching agent'() { + def imageCachingAgentOne = new ImageCachingAgent(null, credOne, "us-east-1", objectMapper, null, true, null) + def imageCachingAgentTwo = new ImageCachingAgent(null, credTwo, "us-east-1", objectMapper, null, false, null) + awsProvider.addAgents([imageCachingAgentOne, imageCachingAgentTwo]) + def handler = new AmazonCredentialsLifecycleHandler(awsCleanupProvider, awsInfrastructureProvider, awsProvider, + null, null, null, null, objectMapper, null, null, null, null, null, null, null, null, null, null, + credentialsRepository) + + when: + handler.credentialsDeleted(credOne) + + then: + awsProvider.getAgents().stream() + .filter({ agent -> agent.handlesAccount("two")}) + .filter({ agent -> ((ImageCachingAgent) agent).getIncludePublicImages() }) + .collect(Collectors.toList()).size() == 1 + } + + def 'it should remove region not used by public image caching agent'() { + def imageCachingAgentOne = new ImageCachingAgent(null, credOne, "us-west-2", objectMapper, null, true, null) + def imageCachingAgentTwo = new ImageCachingAgent(null, credTwo, "us-east-1", objectMapper, null, false, null) + awsProvider.addAgents([imageCachingAgentOne, imageCachingAgentTwo]) + def handler = new AmazonCredentialsLifecycleHandler(awsCleanupProvider, awsInfrastructureProvider, awsProvider, + null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, + credentialsRepository) + handler.publicRegions.add("us-west-2") + + when: + handler.credentialsDeleted(credOne) + + then: + !handler.publicRegions.contains("us-west-2") + } + + def 'it should add agents'() { + def amazonEC2 = Mock(AmazonEC2) + def amazonClientProvider = Mock(AmazonClientProvider) { + getAmazonEC2(_, _) >> amazonEC2 + } + def handler = new AmazonCredentialsLifecycleHandler(awsCleanupProvider, awsInfrastructureProvider, awsProvider, + amazonCloudProvider, amazonClientProvider, null, awsConfigurationProperties, objectMapper, null, eddaApiFactory, null, registry, reservationReportPool, agentProviders, null, null, dynamicConfigService, deployDefaults, + credentialsRepository) + def credThree = TestCredential.named('three') + + when: + handler.credentialsAdded(credThree) + + then: + awsInfrastructureProvider.getAgents().size() == 12 + awsProvider.getAgents().size() == 22 + handler.publicRegions.size() == 2 + handler.awsInfraRegions.size() == 2 + handler.reservationReportCachingAgentScheduled + def reservationReportCachingAgent = awsProvider.getAgents().stream() + .filter({ agent -> agent instanceof ReservationReportCachingAgent }) + .map({ agent -> (ReservationReportCachingAgent) agent }) + .findFirst().get() + } + + def 'subsequent call should not add reservation caching agents'() { + def handler = new AmazonCredentialsLifecycleHandler(awsCleanupProvider, awsInfrastructureProvider, awsProvider, + amazonCloudProvider, null, null, awsConfigurationProperties, objectMapper, null, eddaApiFactory, null, registry, reservationReportPool, agentProviders, null, null, dynamicConfigService, deployDefaults, + credentialsRepository) + def credThree = TestCredential.named('three') + handler.reservationReportCachingAgentScheduled = true + + when: + handler.credentialsAdded(credThree) + + then: + awsProvider.getAgents().stream().filter({ agent -> agent instanceof ReservationReportCachingAgent }) + .collect(Collectors.toList()).isEmpty() + handler.reservationReportCachingAgentScheduled + } + + def 'account should be removed from reservation agent'() { + def amazonEC2 = Mock(AmazonEC2) { + describeAccountAttributes(_) >> new DescribeAccountAttributesResult().withAccountAttributes( + new AccountAttribute().withAttributeName("supported-platforms").withAttributeValues( + new AccountAttributeValue().withAttributeValue("VPC") + )) + } + def amazonClientProvider = Mock(AmazonClientProvider) { + getAmazonEC2(_, _) >> amazonEC2 + } + def handler = new AmazonCredentialsLifecycleHandler(awsCleanupProvider, awsInfrastructureProvider, awsProvider, + amazonCloudProvider, amazonClientProvider, null, awsConfigurationProperties, objectMapper, null, eddaApiFactory, null, registry, reservationReportPool, agentProviders, null, null, dynamicConfigService, deployDefaults, + credentialsRepository) + def credThree = TestCredential.named('three') + handler.credentialsAdded(credThree) + + when: + handler.credentialsDeleted(credThree) + + then: + handler.reservationReportCachingAgentScheduled + def reservationReportCachingAgent = awsProvider.getAgents().stream() + .filter({ agent -> agent instanceof ReservationReportCachingAgent }) + .map({ agent -> (ReservationReportCachingAgent) agent }) + .findFirst().get() + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsLoaderSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsLoaderSpec.groovy index 26ec1b1f569..3bad723fa1f 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsLoaderSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/CredentialsLoaderSpec.groovy @@ -18,10 +18,11 @@ package com.netflix.spinnaker.clouddriver.aws.security.config import com.amazonaws.auth.AWSCredentialsProvider import com.netflix.spinnaker.clouddriver.aws.security.AWSAccountInfoLookup +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.security.AssumeRoleAmazonCredentials import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig.Account +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration.Account import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig.LifecycleHook import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig.Region import spock.lang.Specification @@ -37,20 +38,22 @@ class CredentialsLoaderSpec extends Specification { defaultEddaTemplate: 'http://edda-main.%s.{{name}}.netflix.net', defaultFront50Template: 'http://front50.prod.netflix.net/{{name}}', defaultDiscoveryTemplate: 'http://%s.discovery{{name}}.netflix.net', - defaultAssumeRole: 'role/asgard', - accounts: [ - new Account(name: 'test', accountId: 12345, regions: [ - new Region(name: 'us-west-2', deprecated: true) - ]), - new Account(name: 'prod', accountId: 67890) - ] + defaultAssumeRole: 'role/asgard' ) + def accountsConfig = new AccountsConfiguration( accounts: [ + new Account(name: 'test', accountId: 12345, regions: [ + new Region(name: 'us-west-2', deprecated: true) + ]), + new Account(name: 'prod', accountId: 67890) + ]) + AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) - AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) - CredentialsLoader ci = new CredentialsLoader<>(provider, lookup, AmazonCredentials) + AmazonClientProvider amazonClientProvider = Mock(AmazonClientProvider) + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, amazonClientProvider, NetflixAmazonCredentials.class, config, accountsConfig) when: - List creds = ci.load(config) + List creds = ci.load(config) then: creds.size() == 2 @@ -77,13 +80,16 @@ class CredentialsLoaderSpec extends Specification { def 'account resolves defaults'() { setup: - def config = new CredentialsConfig(accounts: [new Account(name: 'default')]) + def config = new CredentialsConfig() + def accountsConfig = new AccountsConfiguration(accounts: [new Account(name: 'default')]) + AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) - def ci = new CredentialsLoader(provider, lookup, AmazonCredentials) + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, lookup, NetflixAmazonCredentials.class, config, accountsConfig) when: - List creds = ci.load(config) + List creds = ci.load(config) then: 1 * lookup.findAccountId() >> 696969 @@ -100,13 +106,17 @@ class CredentialsLoaderSpec extends Specification { } 0 * _ } - +// def 'availibilityZones are resolved in default regions only once'() { setup: - def config = new CredentialsConfig(defaultRegions: [new Region(name: 'us-east-1'), new Region(name: 'us-west-2')], accounts: [new Account(name: 'default', accountId: 1), new Account(name: 'other', accountId: 2)]) + def config = new CredentialsConfig(defaultRegions: [new Region(name: 'us-east-1'), new Region(name: 'us-west-2')]) + def accountsConfig = new AccountsConfiguration(accounts: [ + new Account(name: 'default', accountId: 1), new Account(name: 'other', accountId: 2) + ]) AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) - CredentialsLoader ci = new CredentialsLoader<>(provider, lookup, AmazonCredentials) + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, lookup, NetflixAmazonCredentials.class, config, accountsConfig) when: List creds = ci.load(config) @@ -122,16 +132,18 @@ class CredentialsLoaderSpec extends Specification { } def 'availabilityZones are resolved for account-specific region if not defined in defaults'() { - def config = new CredentialsConfig( - defaultRegions: [new Region(name: 'us-east-1')], - accounts: [ - new Account( - name: 'default', - accountId: 1, - regions: [ new Region(name: 'us-west-2')])]) + def config = new CredentialsConfig(defaultRegions: [new Region(name: 'us-east-1')]) + + def accountsConfig = new AccountsConfiguration(accounts: [ + new Account( + name: 'default', + accountId: 1, + regions: [ new Region(name: 'us-west-2')])] + ) AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) - CredentialsLoader ci = new CredentialsLoader<>(provider, lookup, AmazonCredentials) + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, lookup, NetflixAmazonCredentials.class, config, accountsConfig) when: List creds = ci.load(config) @@ -158,27 +170,28 @@ class CredentialsLoaderSpec extends Specification { defaultDiscoveryTemplate: 'http://%s.discovery{{name}}.netflix.net', defaultAssumeRole: 'role/asgard', defaultLifecycleHookRoleARNTemplate: 'arn:aws:iam::{{accountId}}:role/my-notification-role', - defaultLifecycleHookNotificationTargetARNTemplate: 'arn:aws:sns:{{region}}:{{accountId}}:my-sns-topic', - accounts: [ - new Account( - name: 'test', - accountId: 12345, - regions: [new Region(name: 'us-west-1', availabilityZones: ['us-west-1a'])], - discovery: 'us-west-1.discoveryqa.netflix.net', - eddaEnabled: false, - defaultKeyPair: 'oss-{{accountId}}-keypair', - lifecycleHooks: [ - new LifecycleHook( - lifecycleTransition: 'autoscaling:EC2_INSTANCE_TERMINATING', - heartbeatTimeout: 1800, - defaultResult: 'CONTINUE' - ) - ]) - ] + defaultLifecycleHookNotificationTargetARNTemplate: 'arn:aws:sns:{{region}}:{{accountId}}:my-sns-topic' ) + def accountsConfig = new AccountsConfiguration(accounts: [ + new Account( + name: 'test', + accountId: 12345, + regions: [new Region(name: 'us-west-1', availabilityZones: ['us-west-1a'])], + discovery: 'us-west-1.discoveryqa.netflix.net', + eddaEnabled: false, + defaultKeyPair: 'oss-{{accountId}}-keypair', + lifecycleHooks: [ + new LifecycleHook( + lifecycleTransition: 'autoscaling:EC2_INSTANCE_TERMINATING', + heartbeatTimeout: 1800, + defaultResult: 'CONTINUE' + ) + ]) + ]) AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) - CredentialsLoader ci = new CredentialsLoader<>(provider, lookup, NetflixAmazonCredentials) + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, lookup, NetflixAmazonCredentials.class, config, accountsConfig) when: List creds = ci.load(config) @@ -209,36 +222,16 @@ class CredentialsLoaderSpec extends Specification { 0 * _ } - def 'create single default account'() { - setup: - AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) - AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) - CredentialsLoader ci = new CredentialsLoader<>(provider, lookup, NetflixAmazonCredentials) - - when: - NetflixAmazonCredentials cred = ci.load('default') - - then: - 1 * lookup.findAccountId() >> 12345 - 1 * lookup.listRegions() >> [new AmazonCredentials.AWSRegion('us-east-1', ['us-east-1a', 'us-east-1b'])] - cred.name == 'default' - cred.regions.size() == 1 - cred.regions.first().name == 'us-east-1' - cred.regions.first().availabilityZones.toList().sort() == ['us-east-1a', 'us-east-1b'] - !cred.discoveryEnabled - !cred.eddaEnabled - !cred.front50Enabled - cred.lifecycleHooks.size() == 0 - } - def 'accountId must be provided for assumeRole account types'() { setup: def config = new CredentialsConfig( - defaultRegions: [new Region(name: 'us-east-1', availabilityZones: ['us-east-1a'])], - accounts: [new Account(name: 'gonnaFail')]) + defaultRegions: [new Region(name: 'us-east-1', availabilityZones: ['us-east-1a'])]) + + def accountsConfig = new AccountsConfiguration(accounts: [new Account(name: 'gonnaFail')]) AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) - CredentialsLoader ci = new CredentialsLoader<>(provider, lookup, AssumeRoleAmazonCredentials) + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, lookup, NetflixAssumeRoleAmazonCredentials.class, config, accountsConfig) when: ci.load(config) @@ -248,4 +241,104 @@ class CredentialsLoaderSpec extends Specification { ex.getMessage().startsWith'accountId is required' 0 * _ } + + def 'assumeRole account type overrides defaults'() { + setup: + def config = new CredentialsConfig(defaultRegions: [ + new Region(name: 'us-west-2', availabilityZones: ['us-west-2a', 'us-west-2b'])], + defaultKeyPairTemplate: 'nf-{{name}}-keypair-a', + defaultAssumeRole: 'role/asgard', + defaultSessionName: 'spinnaker', + defaultLifecycleHookRoleARNTemplate: 'arn:aws:iam::{{accountId}}:role/my-notification-role', + defaultLifecycleHookNotificationTargetARNTemplate: 'arn:aws:sns:{{region}}:{{accountId}}:my-sns-topic' + ) + + def accountsConfig = new AccountsConfiguration(accounts: [ + new Account( + name: 'test', + accountId: 12345, + regions: [new Region(name: 'us-west-1', availabilityZones: ['us-west-1a'])], + defaultKeyPair: 'oss-{{accountId}}-keypair', + assumeRole: 'role/spinnakerManaged', + externalId: '56789', + sessionName: 'spinnakerManaged', + lifecycleHooks: [ + new LifecycleHook( + lifecycleTransition: 'autoscaling:EC2_INSTANCE_TERMINATING', + heartbeatTimeout: 1800, + defaultResult: 'CONTINUE' + ) + ]) + ]) + + AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) + AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, lookup, NetflixAssumeRoleAmazonCredentials.class, config, accountsConfig) + + when: + List creds = ci.load(config) + + then: + creds.size() == 1 + with(creds.first()) { NetflixAssumeRoleAmazonCredentials cred -> + cred.name == 'test' + cred.accountId == "12345" + cred.defaultKeyPair == 'oss-12345-keypair' + cred.regions.size() == 1 + cred.regions.first().name == 'us-west-1' + cred.regions.first().availabilityZones == ['us-west-1a'] + cred.assumeRole == 'role/spinnakerManaged' + cred.externalId == '56789' + cred.sessionName == 'spinnakerManaged' + cred.lifecycleHooks.size() == 1 + cred.lifecycleHooks.first().roleARN == 'arn:aws:iam::12345:role/my-notification-role' + cred.lifecycleHooks.first().notificationTargetARN == 'arn:aws:sns:{{region}}:12345:my-sns-topic' + cred.lifecycleHooks.first().lifecycleTransition == 'autoscaling:EC2_INSTANCE_TERMINATING' + cred.lifecycleHooks.first().heartbeatTimeout == 1800 + cred.lifecycleHooks.first().defaultResult == 'CONTINUE' + } + 0 * _ + } + + def 'assumeRole account type test with defaults'() { + setup: + def config = new CredentialsConfig(defaultRegions: [ + new Region(name: 'us-east-1', availabilityZones: ['us-east-1c', 'us-east-1d', 'us-east-1e']), + new Region(name: 'us-west-2', availabilityZones: ['us-west-2a', 'us-west-2b'])], + defaultKeyPairTemplate: 'nf-{{name}}-keypair-a', + defaultEddaTemplate: 'http://edda-main.%s.{{name}}.netflix.net', + defaultFront50Template: 'http://front50.prod.netflix.net/{{name}}', + defaultDiscoveryTemplate: 'http://%s.discovery{{name}}.netflix.net', + defaultAssumeRole: 'role/asgard', + defaultSessionName: 'spinnaker' + ) + + def accountsConfig = new AccountsConfiguration(accounts: [ + new Account(name: 'prod', accountId: 67890) + ]) + AWSCredentialsProvider provider = Mock(AWSCredentialsProvider) + AWSAccountInfoLookup lookup = Mock(AWSAccountInfoLookup) + AmazonCredentialsParser ci = new AmazonCredentialsParser<>( + provider, lookup, NetflixAssumeRoleAmazonCredentials.class, config, accountsConfig) + + when: + List creds = ci.load(config) + + then: + creds.size() == 1 + with(creds.find { it.name == 'prod' }) { NetflixAssumeRoleAmazonCredentials cred -> + cred.accountId == "67890" + cred.defaultKeyPair == 'nf-prod-keypair-a' + cred.regions.size() == 2 + cred.regions.find { it.name == 'us-east-1' }.availabilityZones.size() == 3 + cred.regions.find { it.name == 'us-east-1' }.deprecated == false + cred.regions.find { it.name == 'us-west-2' }.availabilityZones.size() == 2 + cred.regions.find { it.name == 'us-west-2' }.deprecated == false + cred.assumeRole == 'role/asgard' + cred.externalId == null + cred.sessionName == 'spinnaker' + } + 0 * _ + } } diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/StringTemplaterSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/StringTemplaterSpec.groovy index 1f5a62eb3bc..a894e64991c 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/StringTemplaterSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/security/config/StringTemplaterSpec.groovy @@ -22,7 +22,7 @@ class StringTemplaterSpec extends Specification { def 'it should work'(String template, Map params, String expected) { expect: - CredentialsLoader.StringTemplater.render(template, params) == expected + AmazonCredentialsParser.StringTemplater.render(template, params) == expected where: template | params || expected diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/services/LaunchTemplateServiceSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/services/LaunchTemplateServiceSpec.groovy new file mode 100644 index 00000000000..c2c4a4738c9 --- /dev/null +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/services/LaunchTemplateServiceSpec.groovy @@ -0,0 +1,435 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.aws.services + +import com.amazonaws.services.ec2.model.CreateLaunchTemplateRequest +import com.amazonaws.services.ec2.model.CreateLaunchTemplateResult +import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionRequest +import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionResult +import com.amazonaws.services.ec2.model.CreditSpecification +import com.amazonaws.services.ec2.model.CreditSpecificationRequest +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsRequest +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResponseErrorItem +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResponseSuccessItem +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResult +import com.amazonaws.services.ec2.model.LaunchTemplate +import com.amazonaws.services.ec2.model.LaunchTemplateBlockDeviceMapping +import com.amazonaws.services.ec2.model.LaunchTemplateBlockDeviceMappingRequest +import com.amazonaws.services.ec2.model.LaunchTemplateEbsBlockDevice +import com.amazonaws.services.ec2.model.LaunchTemplateEbsBlockDeviceRequest +import com.amazonaws.services.ec2.model.LaunchTemplateEnclaveOptions +import com.amazonaws.services.ec2.model.LaunchTemplateEnclaveOptionsRequest +import com.amazonaws.services.ec2.model.LaunchTemplateIamInstanceProfileSpecification +import com.amazonaws.services.ec2.model.LaunchTemplateIamInstanceProfileSpecificationRequest +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceMarketOptions +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceMarketOptionsRequest +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceMetadataOptions +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceMetadataOptionsRequest +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceNetworkInterfaceSpecification +import com.amazonaws.services.ec2.model.LaunchTemplateInstanceNetworkInterfaceSpecificationRequest +import com.amazonaws.services.ec2.model.LaunchTemplateSpotMarketOptions +import com.amazonaws.services.ec2.model.LaunchTemplateSpotMarketOptionsRequest +import com.amazonaws.services.ec2.model.LaunchTemplateTagSpecificationRequest +import com.amazonaws.services.ec2.model.LaunchTemplateVersion +import com.amazonaws.services.ec2.model.LaunchTemplatesMonitoring +import com.amazonaws.services.ec2.model.LaunchTemplatesMonitoringRequest +import com.amazonaws.services.ec2.model.RequestLaunchTemplateData +import com.amazonaws.services.ec2.model.ResponseError +import com.amazonaws.services.ec2.model.ResponseLaunchTemplateData +import com.amazonaws.services.ec2.AmazonEC2 +import com.amazonaws.services.ec2.model.Tag +import com.netflix.spinnaker.clouddriver.aws.TestCredential +import com.netflix.spinnaker.clouddriver.aws.deploy.AmazonResourceTagger +import com.netflix.spinnaker.clouddriver.aws.deploy.DefaultAmazonResourceTagger +import com.netflix.spinnaker.clouddriver.aws.deploy.asg.AutoScalingWorker +import com.netflix.spinnaker.clouddriver.aws.deploy.description.ModifyServerGroupLaunchTemplateDescription +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.LocalFileUserDataProperties +import com.netflix.spinnaker.clouddriver.aws.deploy.userdata.UserDataProviderAggregator +import com.netflix.spinnaker.clouddriver.aws.model.AmazonBlockDevice +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class LaunchTemplateServiceSpec extends Specification { + private static final String LT_ID_1 = "lt-1" + private static final String USER_DATA_STR = "my-userdata" + + def mockEc2 = Mock(AmazonEC2) + def mockUserDataAggregator = Mock(UserDataProviderAggregator) + + @Shared + NetflixAmazonCredentials testCredentials = TestCredential.named('test') + + @Subject + @Shared + def launchTemplateService + + def setup() { + mockUserDataAggregator.aggregate(_) >> USER_DATA_STR + + launchTemplateService = new LaunchTemplateService( + mockEc2, + mockUserDataAggregator, + Mock(LocalFileUserDataProperties), + null + ) + } + + @Unroll + void 'should match ebs encryption'() { + when: + def result = launchTemplateService.getLaunchTemplateEbsBlockDeviceRequest(blockDevice) + + then: + result.getEncrypted() == encrypted && result.getKmsKeyId() == kmsKeyId + + where: + blockDevice | encrypted | kmsKeyId + new AmazonBlockDevice() | null | null + new AmazonBlockDevice(encrypted: true) | true | null + new AmazonBlockDevice(encrypted: true, kmsKeyId: "xxx") | true | "xxx" + } + + @Unroll + void 'matches throughput'() { + when: + def result = launchTemplateService.getLaunchTemplateEbsBlockDeviceRequest(blockDevice) + + then: + result.getThroughput() == blockDevice.getThroughput() + + where: + blockDevice | _ + new AmazonBlockDevice(throughput: 250) | _ + } + + @Unroll + void 'should generate volume tags'() { + given: + launchTemplateService = new LaunchTemplateService( + mockEc2, + mockUserDataAggregator, + Mock(LocalFileUserDataProperties), + Collections.singletonList( + new DefaultAmazonResourceTagger("spinnaker:application", "spinnaker:cluster") + )) + + expect: + launchTemplateService.tagSpecification( + amazonResourceTaggers, + ["blockKey": "blockValue"], + "application-stack-details-v001" + ) == result + + where: + amazonResourceTaggers << [ + null, + [], + [new AmazonResourceTagger() {}], + [new DefaultAmazonResourceTagger("spinnaker:application", "spinnaker:cluster")] + ] + result << [ + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of( + new LaunchTemplateTagSpecificationRequest() + .withResourceType("volume") + .withTags([ + new Tag("spinnaker:application", "application"), + new Tag("spinnaker:cluster", "application-stack-details"), + new Tag("blockKey", "blockValue") + ]) + ) + ] + } + + @Unroll + void 'should create launch template data with expected configuration, for create operation'() { + given: + def asgConfig = AutoScalingWorker.AsgConfiguration.builder() + .setLaunchTemplate(true) + .credentials(testCredentials) + .legacyUdf(false) + .application("myasg-001") + .region("us-east-1") + .minInstances(1) + .maxInstances(3) + .desiredInstances(2) + .instanceType("some.type.medium") + .securityGroups(["my-sg"]) + .ami("ami-1") + .kernelId("kernel-id-1") + .ramdiskId("ramdisk-id-1") + .ebsOptimized(true) + .keyPair("my-key-name") + .iamRole("my-iam-role") + .instanceMonitoring(true) + .base64UserData(USER_DATA_STR) + .requireIMDSv2(true) + .spotMaxPrice("0.5") + .unlimitedCpuCredits(true) + .associatePublicIpAddress(true) + .associateIPv6Address(true) + .blockDevices([new AmazonBlockDevice(deviceName: "/dev/sdb", size: 40, volumeType: "standard")]) + .enableEnclave(true) + .spotAllocationStrategy(spotAllocationStrategy) + .build() + + def expectedLtDataInReq = new RequestLaunchTemplateData( + imageId: "ami-1", + kernelId: "kernel-id-1", + instanceType: "some.type.medium", + ramDiskId: "ramdisk-id-1", + ebsOptimized: true, + keyName: "my-key-name", + iamInstanceProfile: new LaunchTemplateIamInstanceProfileSpecificationRequest().withName("my-iam-role"), + monitoring: new LaunchTemplatesMonitoringRequest().withEnabled(true), + userData: USER_DATA_STR, + metadataOptions: new LaunchTemplateInstanceMetadataOptionsRequest().withHttpTokens("required"), + instanceMarketOptions: + setSpotOptions + ? new LaunchTemplateInstanceMarketOptionsRequest().withMarketType("spot").withSpotOptions(new LaunchTemplateSpotMarketOptionsRequest().withMaxPrice("0.5")) + : null, + creditSpecification: new CreditSpecificationRequest().withCpuCredits("unlimited"), + networkInterfaces: [ + new LaunchTemplateInstanceNetworkInterfaceSpecificationRequest( + deviceIndex: 0, + groups: ["my-sg"], + associatePublicIpAddress: true, + ipv6AddressCount: 1 + ) + ], + blockDeviceMappings: [ + new LaunchTemplateBlockDeviceMappingRequest( + deviceName: "/dev/sdb", + ebs: new LaunchTemplateEbsBlockDeviceRequest(volumeSize: 40, volumeType: "standard") + ) + ], + enclaveOptions: new LaunchTemplateEnclaveOptionsRequest().withEnabled(true) + ) + + when: + launchTemplateService.createLaunchTemplate(asgConfig, "myasg-001", "my-lt-001") + + then: + 1 * mockEc2.createLaunchTemplate(_ as CreateLaunchTemplateRequest) >> { arguments -> + // assert arguments passed and return dummy result + CreateLaunchTemplateRequest reqInArg = arguments[0] + assert reqInArg.launchTemplateName == "my-lt-001" && reqInArg.launchTemplateData == expectedLtDataInReq ; new CreateLaunchTemplateResult() + .withLaunchTemplate(new LaunchTemplate( + launchTemplateId: LT_ID_1, + launchTemplateName: "my-lt-001", + defaultVersionNumber: 1L, + latestVersionNumber: 1L)) + } + + where: + spotAllocationStrategy|| setSpotOptions + "capacity-optimized" || false + null || true + } + + @Unroll + void 'should generate launch template data for modify operation, with precedence given to description values first and then to source version values'() { + given: + def modifyDesc = new ModifyServerGroupLaunchTemplateDescription( + region: "us-east-1", + asgName: "myasg", + amiName: "ami-1", + credentials: testCredentials, + spotPrice: maxSpotPrice, + instanceType: instanceType, + securityGroups: secGroupsInDesc, + ) + + def srcLtVersionDataRespWithSpotOptions = new ResponseLaunchTemplateData( + imageId: "ami-1", + kernelId: "kernel-id-1", + instanceType: "t2.large", + ramDiskId: "ramdisk-id-1", + ebsOptimized: true, + keyName: "my-key-name", + iamInstanceProfile: new LaunchTemplateIamInstanceProfileSpecification().withName("my-iam-role"), + monitoring: new LaunchTemplatesMonitoring().withEnabled(true), + userData: USER_DATA_STR, + metadataOptions: new LaunchTemplateInstanceMetadataOptions().withHttpTokens("required"), + instanceMarketOptions: new LaunchTemplateInstanceMarketOptions().withMarketType("spot").withSpotOptions(new LaunchTemplateSpotMarketOptions().withMaxPrice("0.5")), + creditSpecification: new CreditSpecification().withCpuCredits("standard"), + networkInterfaces: [ + new LaunchTemplateInstanceNetworkInterfaceSpecification( + deviceIndex: 0, + groups: secGroupsInSrc, + associatePublicIpAddress: true, + ipv6AddressCount: 1 + ) + ], + blockDeviceMappings: [ + new LaunchTemplateBlockDeviceMapping( + deviceName: "/dev/sdb", + ebs: new LaunchTemplateEbsBlockDevice(volumeSize: 40) + ) + ], + enclaveOptions: new LaunchTemplateEnclaveOptions().withEnabled(true) + ) + + def sourceLtVersion = new LaunchTemplateVersion( + launchTemplateId: LT_ID_1, + versionNumber: 1, + launchTemplateData: srcLtVersionDataRespWithSpotOptions + ) + + // RequestLaunchTemplateData built in the class under test + def expectedNewLtVersionDataReq = new RequestLaunchTemplateData( + imageId: "ami-1", + kernelId: "kernel-id-1", + instanceType: instanceType, + ramDiskId: "ramdisk-id-1", + ebsOptimized: true, + keyName: "my-key-name", + iamInstanceProfile: new LaunchTemplateIamInstanceProfileSpecificationRequest().withName("my-iam-role"), + monitoring: new LaunchTemplatesMonitoringRequest().withEnabled(true), + userData: USER_DATA_STR, + metadataOptions: new LaunchTemplateInstanceMetadataOptionsRequest().withHttpTokens("required"), + instanceMarketOptions: + setSpotOptions + ? new LaunchTemplateInstanceMarketOptionsRequest().withMarketType("spot").withSpotOptions(new LaunchTemplateSpotMarketOptionsRequest().withMaxPrice("0.5")) + : null, + creditSpecification: + copyCpuCreditSpecFromSrc ? new CreditSpecificationRequest().withCpuCredits("standard") : null, + networkInterfaces: [ + new LaunchTemplateInstanceNetworkInterfaceSpecificationRequest( + deviceIndex: 0, + groups: expectedSecGroups, + associatePublicIpAddress: true, + ipv6AddressCount: 1 + ) + ], + blockDeviceMappings: [ + new LaunchTemplateBlockDeviceMappingRequest( + deviceName: "/dev/sdb", + ebs: new LaunchTemplateEbsBlockDeviceRequest(volumeSize: 40) + ) + ], + enclaveOptions: new LaunchTemplateEnclaveOptionsRequest().withEnabled(true) + ) + + when: + launchTemplateService.modifyLaunchTemplate(testCredentials, modifyDesc, sourceLtVersion, shouldUseMixedInstancesPolicy) + + then: + 1 * mockEc2.createLaunchTemplateVersion(_ as CreateLaunchTemplateVersionRequest) >> { arguments -> + // assert arguments passed and return dummy result + CreateLaunchTemplateVersionRequest reqInArg = arguments[0] + assert reqInArg.launchTemplateId == LT_ID_1 && reqInArg.launchTemplateData == expectedNewLtVersionDataReq ; new CreateLaunchTemplateVersionResult() + .withLaunchTemplateVersion(new LaunchTemplateVersion( + launchTemplateId: LT_ID_1, + versionNumber: 2L, + launchTemplateData: new ResponseLaunchTemplateData())) + } + + where: + shouldUseMixedInstancesPolicy | maxSpotPrice || setSpotOptions | instanceType || copyCpuCreditSpecFromSrc | secGroupsInDesc | secGroupsInSrc || expectedSecGroups + true | _ || false | 't3.large' || true | ["new-sg-2"] | ["src-sg-1"] || ["new-sg-2"] + false | "" || false | 'c3.large' || false | [] | ["src-sg-1"] || ["src-sg-1"] + false | null || false | 't3.large' || true | null | ["src-sg-1"] || ["src-sg-1"] + false | "0.5" || true | 'm5.large' || false | null | null || null + } + + @Unroll + void 'delete launch template version success scenarios are handled as expected'() { + given: + def versionToDelete = 2L + + DeleteLaunchTemplateVersionsResponseSuccessItem successItem = ltIdSuccess + ? new DeleteLaunchTemplateVersionsResponseSuccessItem() + .withLaunchTemplateId(ltIdSuccess) + .withVersionNumber(versionToDelete) + : null + + DeleteLaunchTemplateVersionsResponseErrorItem errorItem = ltIdFailure + ? new DeleteLaunchTemplateVersionsResponseErrorItem() + .withLaunchTemplateId(ltIdFailure) + .withVersionNumber(versionToDelete) + .withResponseError(new ResponseError().withCode(errorCode)) + : null + + DeleteLaunchTemplateVersionsResult result = new DeleteLaunchTemplateVersionsResult() + .withSuccessfullyDeletedLaunchTemplateVersions(successItem) + .withUnsuccessfullyDeletedLaunchTemplateVersions(errorItem) + + when: + launchTemplateService.deleteLaunchTemplateVersion(LT_ID_1, versionToDelete) + + then: + 1 * mockEc2.deleteLaunchTemplateVersions(new DeleteLaunchTemplateVersionsRequest() + .withLaunchTemplateId(LT_ID_1) + .withVersions(String.valueOf(versionToDelete))) >> result + + and: + noExceptionThrown() + + where: + ltIdSuccess | ltIdFailure | errorCode + LT_ID_1 | null | null // success + null | LT_ID_1 | "launchTemplateIdDoesNotExist" // failed with error code considered success + null | LT_ID_1 | "launchTemplateVersionDoesNotExist" // failed with error code considered success + } + + @Unroll + void 'delete launch template version should handle errors as expected'() { + given: + def versionToDelete = 2L + + DeleteLaunchTemplateVersionsResponseSuccessItem successItem = ltIdSuccess + ? new DeleteLaunchTemplateVersionsResponseSuccessItem() + .withLaunchTemplateId(ltIdSuccess) + .withVersionNumber(versionToDelete) + : null + + DeleteLaunchTemplateVersionsResponseErrorItem errorItem = ltIdFailure + ? new DeleteLaunchTemplateVersionsResponseErrorItem() + .withLaunchTemplateId(ltIdFailure) + .withVersionNumber(versionToDelete) + .withResponseError(new ResponseError().withCode(errorCode)) + : null + + DeleteLaunchTemplateVersionsResult result = new DeleteLaunchTemplateVersionsResult() + .withSuccessfullyDeletedLaunchTemplateVersions(successItem) + .withUnsuccessfullyDeletedLaunchTemplateVersions(errorItem) + + when: + launchTemplateService.deleteLaunchTemplateVersion(LT_ID_1, versionToDelete) + + then: + 1 * mockEc2.deleteLaunchTemplateVersions(new DeleteLaunchTemplateVersionsRequest() + .withLaunchTemplateId(LT_ID_1) + .withVersions(String.valueOf(versionToDelete))) >> result + + and: + def ex = thrown(RuntimeException) + errorCode + ? ex.message == "Failed to delete launch template version 2 for launch template ID lt-1 because of error '" + errorCode + "'" + : ex == null + + where: + ltIdSuccess | ltIdFailure | errorCode + null | LT_ID_1 | "unexpectedError" + null | LT_ID_1 | "launchTemplateIdMalformed" + } +} diff --git a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/services/SecurityGroupServiceSpec.groovy b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/services/SecurityGroupServiceSpec.groovy index 854863540d6..b0c5babe803 100644 --- a/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/services/SecurityGroupServiceSpec.groovy +++ b/clouddriver-aws/src/test/groovy/com/netflix/spinnaker/clouddriver/aws/services/SecurityGroupServiceSpec.groovy @@ -132,6 +132,76 @@ class SecurityGroupServiceSpec extends Specification { result == "sg-123" } + void "Resolve security group names from list of security group IDs and names"() { + when: + def result = securityGroupService.resolveSecurityGroupNamesByStrategy(["sg-123", "name"]) { List ids -> + securityGroupService.getSecurityGroupNamesFromIds(ids) + } + + then: + 1 * securityGroupService.amazonEC2.describeSecurityGroups(_) >> new DescribeSecurityGroupsResult(securityGroups: [ + new SecurityGroup(groupId: "sg-123", groupName: "test", vpcId: "vpc1234") + ]) + result == ["name", "test"] + } + + void "Resolve security group IDs from list of security group names and IDs"() { + when: + def result = securityGroupService.resolveSecurityGroupIdsByStrategy(["test", "sg-456"]) { List names -> + securityGroupService.getSecurityGroupIds(names, "vpc1234") + } + + then: + 1 * securityGroupService.amazonEC2.describeSecurityGroups(_) >> new DescribeSecurityGroupsResult(securityGroups: [ + new SecurityGroup(groupId: "sg-123", groupName: "test", vpcId: "vpc1234") + ]) + result == ["sg-456", "sg-123"] + } + + void "should resolve Security Group for Application given security group names and subnet purpose"() { + def callCount = 0 + def sgNamesInCall = [] + def subnetPurposeInCall = "" + securityGroupService.metaClass.getSecurityGroupIdsWithSubnetPurpose = { List sgNames, String subnetPurpose -> + sgNamesInCall.addAll(sgNames) + subnetPurposeInCall = subnetPurpose + callCount++ + ["myApp": "sg-123"] + } + + when: + def result = securityGroupService.resolveSecurityGroupIdsWithSubnetType(["myApp", "sg-456"], "internal") + + then: + result == ["sg-456","sg-123"] + callCount == 1 + sgNamesInCall == ["myApp"] + subnetPurposeInCall == "internal" + 0 * _ + } + + void "should resolve Security Group for Application given security group names and vpc id"() { + def callCount = 0 + def sgNamesInCall = [] + def vpcIdInCall = "" + securityGroupService.metaClass.getSecurityGroupIds = { List sgNames, String vpcId -> + sgNamesInCall.addAll(sgNames) + vpcIdInCall = vpcId + callCount++ + ["myApp": "sg-123"] + } + + when: + def result = securityGroupService.resolveSecurityGroupIdsInVpc(["myApp", "sg-456"], "vpc-1234") + + then: + result == ["sg-456","sg-123"] + callCount == 1 + sgNamesInCall == ["myApp"] + vpcIdInCall == "vpc-1234" + 0 * _ + } + private Matcher matchRequest(String... groupNames) { hasProperty("filters", contains(new Filter("group-name", groupNames.toList()))) } diff --git a/clouddriver-aws/src/test/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerInstanceStateCachingAgentTest.java b/clouddriver-aws/src/test/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerInstanceStateCachingAgentTest.java new file mode 100644 index 00000000000..a7142165551 --- /dev/null +++ b/clouddriver-aws/src/test/java/com/netflix/spinnaker/clouddriver/aws/provider/agent/AmazonLoadBalancerInstanceStateCachingAgentTest.java @@ -0,0 +1,138 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.aws.provider.agent; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancing.model.DescribeInstanceHealthRequest; +import com.amazonaws.services.elasticloadbalancing.model.DescribeInstanceHealthResult; +import com.amazonaws.services.elasticloadbalancing.model.InstanceState; +import com.google.common.collect.Iterables; +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.data.Keys; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.context.ApplicationContext; + +@ExtendWith(MockitoExtension.class) +class AmazonLoadBalancerInstanceStateCachingAgentTest { + private static final String region = "region"; + private static final String accountName = "accountName"; + private static final String accountId = "accountId"; + + @Mock private ProviderCache providerCache; + + @Mock private NetflixAmazonCredentials creds; + + @Mock private AmazonElasticLoadBalancing loadBalancing; + + @Mock private Cache cache; + + @Mock private ApplicationContext ctx; + + private AmazonLoadBalancerInstanceStateCachingAgent getAgent() { + when(creds.getName()).thenReturn(accountName); + AmazonClientProvider acp = mock(AmazonClientProvider.class); + when(acp.getAmazonElasticLoadBalancing(creds, region)).thenReturn(loadBalancing); + return new AmazonLoadBalancerInstanceStateCachingAgent( + acp, creds, region, AmazonObjectMapperConfigurer.createConfigured(), ctx); + } + + @SuppressWarnings("unchecked") + @Test + void twoLoadBalancersWithTheSameInstance() { + // given + String vpcId = "vpc-11223344556677889"; + String loadBalancerOneName = "lbOneName"; + String loadBalancerTwoName = "lbTwoName"; + + // One instance registered with two load balancers is enough for this test. + // We don't need multiple instances. We don't even need different opinions + // of instance state. Two different load balancers reporting the same state + // is enough. + String instanceId = "instanceId"; + String instanceStateString = "instanceState"; + String reasonCode = "reasonCode"; + String description = "description"; + + InstanceState instanceState = + new InstanceState() + .withInstanceId(instanceId) + .withState(instanceStateString) + .withReasonCode(reasonCode) + .withDescription(description); + + AmazonLoadBalancerInstanceStateCachingAgent agent = getAgent(); + + // and + when(ctx.getBean(Cache.class)).thenReturn(cache); + when(cache.filterIdentifiers(eq(LOAD_BALANCERS.ns), anyString())) + .thenReturn( + List.of( + Keys.getLoadBalancerKey(loadBalancerOneName, accountId, region, vpcId, "classic"), + Keys.getLoadBalancerKey(loadBalancerTwoName, accountId, region, vpcId, "classic")), + List.of()); // nonvpc + + when(loadBalancing.describeInstanceHealth(any(DescribeInstanceHealthRequest.class))) + .thenReturn(new DescribeInstanceHealthResult().withInstanceStates(instanceState)); + + // when + CacheResult result = agent.loadData(providerCache); + + // then + verify(ctx).getBean(Cache.class); + verify(cache, times(2)).filterIdentifiers(eq(LOAD_BALANCERS.ns), anyString()); + verify(loadBalancing, times(2)) + .describeInstanceHealth(any(DescribeInstanceHealthRequest.class)); + + // and: 'there's one health item in the cache result' + assertThat(result.getCacheResults().get(HEALTH.ns)).hasSize(1); + + // and: 'the health item has information from the last load balancer' + Map healthAttributes = + Iterables.getOnlyElement(result.getCacheResults().get(HEALTH.ns)).getAttributes(); + assertThat(healthAttributes.get("loadBalancers")).isInstanceOf(List.class); + List loadBalancers = (List) healthAttributes.get("loadBalancers"); + assertThat(loadBalancers).hasSize(1); + List loadBalancerNames = + loadBalancers.stream() + .map(loadBalancer -> ((Map) loadBalancer).get("loadBalancerName")) + .collect(Collectors.toList()); + + assertThat(loadBalancerNames).containsAll(List.of(loadBalancerTwoName)); + } +} diff --git a/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/custom-token-userdata-tokenized.txt b/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/custom-token-userdata-tokenized.txt new file mode 100644 index 00000000000..9032a2b9dfe --- /dev/null +++ b/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/custom-token-userdata-tokenized.txt @@ -0,0 +1,2 @@ +CUSTOM_A="custom-a" +CUSTOM_B="custom-b" diff --git a/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/custom-token-userdata.txt b/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/custom-token-userdata.txt new file mode 100644 index 00000000000..fe9be8b82e1 --- /dev/null +++ b/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/custom-token-userdata.txt @@ -0,0 +1,2 @@ +CUSTOM_A="%%custom_token_a%%" +CUSTOM_B="%%custom_token_b%%" diff --git a/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/default-token-userdata-tokenized.txt b/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/default-token-userdata-tokenized.txt new file mode 100644 index 00000000000..49ae2e60333 --- /dev/null +++ b/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/default-token-userdata-tokenized.txt @@ -0,0 +1,12 @@ +NETFLIX_ACCOUNT="account" +NETFLIX_ACCOUNT_TYPE="accountType" +NETFLIX_ENVIRONMENT="environment" +NETFLIX_APP="app" +NETFLIX_APPUSER="app" +NETFLIX_STACK="stack" +NETFLIX_CLUSTER="app-stack-detail-c0countries-d0devPhase-h0hardware-p0partners-r099-z0zone" +NETFLIX_DETAIL="detail-c0countries-d0devPhase-h0hardware-p0partners-r099-z0zone" +NETFLIX_AUTO_SCALE_GROUP="app-stack-detail-c0countries-d0devPhase-h0hardware-p0partners-r099-z0zone" +NETFLIX_LAUNCH_CONFIG="launchConfigName" +NETFLIX_LAUNCH_TEMPLATE="" +EC2_REGION="region" diff --git a/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/default-token-userdata.txt b/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/default-token-userdata.txt new file mode 100644 index 00000000000..b0f2dc6d8e1 --- /dev/null +++ b/clouddriver-aws/src/test/resources/com/netflix/spinnaker/clouddriver/aws/deploy/userdata/default-token-userdata.txt @@ -0,0 +1,12 @@ +NETFLIX_ACCOUNT="%%account%%" +NETFLIX_ACCOUNT_TYPE="%%accounttype%%" +NETFLIX_ENVIRONMENT="%%env%%" +NETFLIX_APP="%%app%%" +NETFLIX_APPUSER="%%app%%" +NETFLIX_STACK="%%stack%%" +NETFLIX_CLUSTER="%%cluster%%" +NETFLIX_DETAIL="%%detail%%" +NETFLIX_AUTO_SCALE_GROUP="%%autogrp%%" +NETFLIX_LAUNCH_CONFIG="%%launchconfig%%" +NETFLIX_LAUNCH_TEMPLATE="%%launchtemplate%%" +EC2_REGION="%%region%%" diff --git a/clouddriver-azure/clouddriver-azure.gradle b/clouddriver-azure/clouddriver-azure.gradle index 19f11784d4c..09ea2e74ed1 100644 --- a/clouddriver-azure/clouddriver-azure.gradle +++ b/clouddriver-azure/clouddriver-azure.gradle @@ -1,25 +1,37 @@ -repositories { - maven { url "http://adxsnapshots.azurewebsites.net" } -} - dependencies { - compile project(":clouddriver-core") - compile spinnaker.dependency('frigga') - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') - compile 'com.microsoft.azure:adal4j:1.1.2' - compile 'com.microsoft.azure:azure:1.0.0-beta1' - compile 'com.microsoft.azure:azure-mgmt-compute:1.0.0-beta1' - compile 'com.microsoft.azure:azure-mgmt-network:1.0.0-beta1' - compile 'com.microsoft.azure:azure-mgmt-storage:1.0.0-beta1' - compile 'com.microsoft.azure:azure-mgmt-resources:1.0.0-beta1' - compile 'com.microsoft.azure:azure-client-authentication:1.0.0-beta1' - compile 'com.microsoft.azure:azure-storage:3.1.0' - compile 'com.microsoft.rest:client-runtime:1.0.0-20160309.002843-19' + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + implementation project(":cats:cats-core") + + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-moniker" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "com.azure.resourcemanager:azure-resourcemanager:2.19.0" + implementation "com.azure:azure-identity:1.6.0" + implementation "com.azure:azure-storage-blob:12.19.1" + implementation "io.projectreactor:reactor-core" + implementation "com.google.guava:guava:31.1-jre" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation 'org.mockito:mockito-inline:4.8.0' + testImplementation 'org.assertj:assertj-core:3.23.1' + + } configurations.all { - resolutionStrategy.force 'com.microsoft.rest:client-runtime:1.0.0-20160309.002843-19' + resolutionStrategy.force 'io.projectreactor:reactor-core:3.4.23' } - - diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureBaseClient.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureBaseClient.groovy index 3bf245e3d81..df24f14e5ee 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureBaseClient.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureBaseClient.groovy @@ -16,37 +16,47 @@ package com.netflix.spinnaker.clouddriver.azure.client +import com.azure.core.credential.TokenCredential +import com.azure.core.http.policy.HttpLogDetailLevel +import com.azure.core.http.rest.Response +import com.azure.core.management.AzureEnvironment +import com.azure.core.management.exception.ManagementException +import com.azure.core.management.profile.AzureProfile +import com.azure.identity.ClientSecretCredentialBuilder +import com.azure.resourcemanager.AzureResourceManager import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature -import com.microsoft.azure.AzureServiceClient -import com.microsoft.azure.CloudException -import com.microsoft.azure.credentials.ApplicationTokenCredentials -import com.microsoft.azure.credentials.AzureEnvironment -import com.microsoft.rest.ServiceResponse + import groovy.transform.CompileStatic import groovy.util.logging.Slf4j -import okhttp3.Interceptor -import okhttp3.Request -import okhttp3.Response @Slf4j @CompileStatic -public abstract class AzureBaseClient { +abstract class AzureBaseClient { final String subscriptionId final static long AZURE_ATOMICOPERATION_RETRY = 5 static ObjectMapper mapper - final String userAgentApplicationName + final AzureResourceManager azure /** * Constructor * @param subscriptionId - the Azure subscription to use */ - protected AzureBaseClient(String subscriptionId, String userAgentAppName) { + protected AzureBaseClient(String subscriptionId, AzureProfile azureProfile, TokenCredential credentials) { this.subscriptionId = subscriptionId mapper = new ObjectMapper().configure(SerializationFeature.INDENT_OUTPUT, true) mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) - this.userAgentApplicationName = userAgentAppName + this.azure = initialize(credentials, subscriptionId, azureProfile) + } + + private AzureResourceManager initialize(TokenCredential credentials, String subscriptionId, AzureProfile azureProfile) { + + AzureResourceManager + .configure() + .withLogLevel(HttpLogDetailLevel.NONE) + .authenticate(credentials, azureProfile) + .withSubscription(subscriptionId) } /** @@ -54,10 +64,34 @@ public abstract class AzureBaseClient { * @param clientId * @param tenantId * @param secret + * @param configuredAzureEnvironment * @return */ - static ApplicationTokenCredentials getTokenCredentials(String clientId, String tenantId, String secret) { - new ApplicationTokenCredentials(clientId, tenantId, secret, AzureEnvironment.AZURE) + + static TokenCredential getTokenCredentials(String clientId, String tenantId, String secret, String configuredAzureEnvironment) { + + def azureProfile = getAzureProfile(configuredAzureEnvironment) + + return new ClientSecretCredentialBuilder() + .clientId(clientId) + .clientSecret(secret) + .tenantId(tenantId) + .authorityHost(azureProfile.getEnvironment().getActiveDirectoryEndpoint()) + .build() + + } + + static AzureProfile getAzureProfile(String configuredAzureEnvironment) { + switch (configuredAzureEnvironment) { + case "AZURE_US_GOVERNMENT": + return new AzureProfile(AzureEnvironment.AZURE_US_GOVERNMENT) + case "AZURE_CHINA": + return new AzureProfile(AzureEnvironment.AZURE_CHINA) + case "AZURE_GERMANY": + return new AzureProfile(AzureEnvironment.AZURE_GERMANY) + default: + return new AzureProfile(AzureEnvironment.AZURE) + } } /** @@ -66,7 +100,7 @@ public abstract class AzureBaseClient { * @param count - number of retry attempts * @return ServiceRespone returned from operation. If response results in 404 then return null */ - static ServiceResponse executeOp(Closure> operation, long count = AZURE_ATOMICOPERATION_RETRY) { + static T executeOp(Closure operation, long count = AZURE_ATOMICOPERATION_RETRY) { // Ensure that the operation will always at least try once long retryCount = count <= 0 ? count - 1 : 0 @@ -106,8 +140,8 @@ public abstract class AzureBaseClient { */ private static boolean canRetry(Exception e) { boolean retry = false - if (e.class == CloudException) { - def code = (e as CloudException).response.code() + if (e.class == ManagementException) { + def code = (e as ManagementException).getResponse().getStatusCode() retry = (code == HttpURLConnection.HTTP_CLIENT_TIMEOUT || (code >= HttpURLConnection.HTTP_INTERNAL_ERROR && code <= HttpURLConnection.HTTP_GATEWAY_TIMEOUT)) } else if (e.class == SocketTimeoutException) { @@ -124,9 +158,9 @@ public abstract class AzureBaseClient { * @return True if the exception encountered was a 429 Response and it was handled */ private static boolean handleTooManyRequestsResponse(Exception e) { - if (e.class == CloudException.class) { - if ((e as CloudException).response.code() == 429) { - int retryAfterIntervalSec = (e as CloudException).response.headers().get("Retry-After").toInteger() + if (e.class == ManagementException.class) { + if ((e as ManagementException).getResponse().getStatusCode() == 429) { + int retryAfterIntervalSec = (e as ManagementException).getResponse().getHeaderValue("Retry-After").toInteger() if (retryAfterIntervalSec) { log.warn("Received 'Too Many Requests' (429) response from Azure. Retrying in $retryAfterIntervalSec seconds") sleep(retryAfterIntervalSec * 1000) // convert to milliseconds @@ -137,10 +171,10 @@ public abstract class AzureBaseClient { false } - static ServiceResponse deleteAzureResource( Closure azureOps, String resourceGroup, String resourceName, String parentResourceName, String msgRetry, String msgFail, long count = AZURE_ATOMICOPERATION_RETRY) { + static Response deleteAzureResource(Closure azureOps, String resourceGroup, String resourceName, String parentResourceName, String msgRetry, String msgFail, long count = AZURE_ATOMICOPERATION_RETRY) { // The API call might return a timeout exception or some other Azure CloudException that is not the direct result of the operation // we are trying to execute; retry and if the final retry fails then throw - ServiceResponse result = null + Response result = null long operationRetry = 0 while (operationRetry < count) { try { @@ -152,7 +186,7 @@ public abstract class AzureBaseClient { } operationRetry = count } - catch (CloudException e) { + catch (ManagementException e) { if (resourceNotFound(e)) { // resource was not found; must have been deleted already operationRetry = count @@ -177,7 +211,7 @@ public abstract class AzureBaseClient { } static Boolean resourceNotFound(Exception e) { - e.class == CloudException ? (e as CloudException).response.code() == HttpURLConnection.HTTP_NOT_FOUND : false + e.class == ManagementException ? (e as ManagementException).getResponse().getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND : false } /*** @@ -195,22 +229,4 @@ public abstract class AzureBaseClient { * @return namespace of the resource provider */ protected abstract String getProviderNamespace() - - protected static void setUserAgent(AzureServiceClient client, String userAgentString, boolean useUniqueID = false) { - client.getClientInterceptors().add(new Interceptor() { - @Override - Response intercept(Interceptor.Chain chain) throws IOException { - Request.Builder builder = chain.request().newBuilder() - def oldHeaderValue = chain.request().header("User-Agent") - def userAgentValue = oldHeaderValue ? "${userAgentString} ${oldHeaderValue}" : "${userAgentString}" - builder.header("User-Agent", userAgentValue) - // TODO: work around for SDK issue; not all the API's will accept the same client-request-id - if (useUniqueID) { - builder.header("x-ms-client-request-id", UUID.randomUUID().toString()) - } - return chain.proceed(builder.build()) - } - }) - } - } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureComputeClient.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureComputeClient.groovy index eb4266f42bd..4883e11a1ec 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureComputeClient.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureComputeClient.groovy @@ -16,103 +16,118 @@ package com.netflix.spinnaker.clouddriver.azure.client -import com.microsoft.azure.CloudException -import com.microsoft.azure.credentials.ApplicationTokenCredentials -import com.microsoft.azure.management.compute.ComputeManagementClient -import com.microsoft.azure.management.compute.ComputeManagementClientImpl -import com.microsoft.azure.management.compute.VirtualMachineImagesOperations -import com.microsoft.azure.management.compute.VirtualMachineScaleSetVMsOperations -import com.microsoft.azure.management.compute.VirtualMachineScaleSetsOperations -import com.microsoft.azure.management.compute.models.VirtualMachineImage -import com.microsoft.rest.ServiceResponse +import com.azure.core.credential.TokenCredential +import com.azure.core.http.rest.Response +import com.azure.core.management.exception.ManagementException +import com.azure.core.management.profile.AzureProfile +import com.azure.resourcemanager.compute.models.VirtualMachineCustomImage +import com.azure.resourcemanager.compute.models.VirtualMachineImage +import com.azure.resourcemanager.compute.models.VirtualMachineOffer +import com.azure.resourcemanager.compute.models.VirtualMachinePublisher +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetVM +import com.azure.resourcemanager.compute.models.VirtualMachineSizes +import com.azure.resourcemanager.compute.models.VirtualMachineSku import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureInstance import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureManagedVMImage import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureVMImage +import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.model.HealthState +import groovy.transform.Canonical +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j -import okhttp3.logging.HttpLoggingInterceptor +import java.util.stream.Collectors @Slf4j +@CompileStatic public class AzureComputeClient extends AzureBaseClient { - private final ComputeManagementClient client - AzureComputeClient(String subscriptionId, ComputeManagementClient client) { - super(subscriptionId, "") - this.client = client + AzureComputeClient(String subscriptionId, TokenCredential credentials, AzureProfile azureProfile) { + super(subscriptionId, azureProfile, credentials) } - AzureComputeClient(String subscriptionId, ApplicationTokenCredentials credentials, String userAgentApplicationName) { - super(subscriptionId, userAgentApplicationName) - this.client = this.initialize(credentials) - } - - - @Lazy - private VirtualMachineImagesOperations vmImageOps = {client.getVirtualMachineImagesOperations()}() - @Lazy - private VirtualMachineScaleSetsOperations scaleSetOps = {client.getVirtualMachineScaleSetsOperations()}() - - @Lazy - private VirtualMachineScaleSetVMsOperations scaleSetVMOps = {client.getVirtualMachineScaleSetVMsOperations()}() /** - * get the ComputeManagementClient which will be used for all interaction related to compute resources in Azure - * @param creds the credentials to use when communicating to the Azure subscription(s) - * @return an instance of the Azure ComputeManagementClient + * Return list of available Managed VM images + * @param resourceGroup - filter by resource group + * @param region - filter by region + * @return List of AzureManagedVMImage */ - private ComputeManagementClient initialize(ApplicationTokenCredentials tokenCredentials) { - ComputeManagementClient computeClient = new ComputeManagementClientImpl(tokenCredentials) - computeClient.setSubscriptionId(this.subscriptionId) - computeClient.setLogLevel(HttpLoggingInterceptor.Level.NONE) + List getAllVMCustomImages(String resourceGroup, String region) { - // Add Azure Spinnaker telemetry capturing - setUserAgent(computeClient, userAgentApplicationName) + def result = [] as List + try { + List virtualMachineCustomImages = executeOp({ + azure.virtualMachineCustomImages() + .listByResourceGroup(resourceGroup) + .asList() + .stream() + .filter({ vm -> vm.regionName().equals(region) }) + .collect(Collectors.toList()) + }) + + virtualMachineCustomImages.each {vm -> + result += new AzureManagedVMImage( + name: vm.name(), + resourceGroup: vm.resourceGroupName(), + region: vm.regionName(), + osType: vm.osDiskImage().osType().name()) + } - computeClient + } catch (Exception e) { + log.error("getAllVMCustomImages -> Unexpected exception ", e) + } + + result } + /** * Return list of available VM images * @param location - filter for images to given location * @return List of AzureVMImages */ - List getVMImagesAll(String location){ + List getVMImagesAll(String location) { def result = [] as List - try { - // Usage of local variables to ease with debugging the code; keeping the content retrieved from Azure JSDK call to help with stepping through the code and inspect the values - //def ops = client.getVirtualMachineImagesOperations() + List publishers = executeOp({ + azure.virtualMachineImages() + .publishers() + .listByRegion(location) + .asList() + }) + - List publishers = executeOp({vmImageOps?.listPublishers(location)}).body.collect { it.name } - log.info("getVMImagesAll-> Found ${publishers.size()} publisher items in azure/${location}/${ComputeManagementClient.simpleName}") + log.info("getVMImagesAll-> Found ${publishers.size()} publisher items in azure/${location}") publishers?.each { publisher -> - List offers = executeOp({vmImageOps?.listOffers(location, publisher)})?.body?.collect { - it.name - } - log.info("getVMImagesAll-> Found ${offers.size()} offer items for ${publisher} in azure/${location}/${ComputeManagementClient.simpleName}") + List offers = executeOp({ + publisher.offers().list().asList() + }) + log.info("getVMImagesAll-> Found ${offers.size()} offer items for ${publisher} in azure/${location}") offers?.each { offer -> - List skus = executeOp({ vmImageOps?.listSkus(location, publisher, offer)})?.body?.collect { - it.name - } - log.info("getVMImagesAll-> Found ${skus.size()} SKU items for ${publisher}/${offer} in azure/${location}/${ComputeManagementClient.simpleName}") + List skus = executeOp({ + offer.skus().list().asList() + }) + log.info("getVMImagesAll-> Found ${skus.size()} SKU items for ${publisher}/${offer} in azure/${location}") skus?.each { sku -> // Add a try/catch here in order to avoid an all-or-nothing return try { - List versions = executeOp({vmImageOps?.list(location, publisher, offer, sku, null, 100, "name")})?.body?.collect { - it.name - } - log.info("getVMImagesAll-> Found ${skus.size()} version items for ${publisher}/${offer}/${sku} in azure/${location}/${ComputeManagementClient.simpleName}") + List images = executeOp({ + sku.images().list().asList() + }) + log.info("getVMImagesAll-> Found ${skus.size()} version items for ${publisher}/${offer}/${sku} in azure/${location}") - versions?.each { version -> + images?.each { image -> result += new AzureVMImage( - publisher: publisher, - offer: offer, - sku: sku, - version: version) + publisher: publisher.name(), + offer: offer.name(), + sku: sku.name(), + version: image.version()) } } catch (Exception e) { @@ -130,7 +145,10 @@ public class AzureComputeClient extends AzureBaseClient { } VirtualMachineImage getVMImage(String location, String publisher, String offer, String skus, String version) { - executeOp({vmImageOps.get(location, publisher, offer, skus,version)})?.body + executeOp({ + azure.virtualMachineImages() + .getImage(location, publisher, offer, skus, version) + }) } /** @@ -143,17 +161,19 @@ public class AzureComputeClient extends AzureBaseClient { def lastReadTime = System.currentTimeMillis() try { - def vmssList = resourceGroup ? executeOp({ scaleSetOps?.list(resourceGroup) }).body - : executeOp({scaleSetOps?.listAll()}).body + def vmssList = executeOp({ + resourceGroup ? azure.virtualMachineScaleSets().listByResourceGroup(resourceGroup) : + azure.virtualMachineScaleSets().list() + }) vmssList?.each { scaleSet -> - if (scaleSet.location == region) { + if (scaleSet.regionName() == region) { try { - def sg = AzureServerGroupDescription.build(scaleSet) + def sg = AzureServerGroupDescription.build(scaleSet.innerModel()) sg.lastReadTime = lastReadTime serverGroups.add(sg) } catch (Exception e) { - log.warn("Unable to parse scale set ${scaleSet.name} from Azure: ${e.message}") + log.warn("Unable to parse scale set ${scaleSet.name()} from Azure: ${e.message}") } } } @@ -166,15 +186,16 @@ public class AzureComputeClient extends AzureBaseClient { AzureServerGroupDescription getServerGroup(String resourceGroupName, String serverGroupName) { try { - def vmss = executeOp({scaleSetOps?.get(resourceGroupName, serverGroupName)})?.body - def sg = AzureServerGroupDescription.build(vmss) + def vmss = executeOp({ + azure.virtualMachineScaleSets().getByResourceGroup(resourceGroupName, serverGroupName) + }) + def sg = AzureServerGroupDescription.build(vmss.innerModel()) sg.lastReadTime = System.currentTimeMillis() return sg - } catch (CloudException e) { + } catch (ManagementException e) { if (resourceNotFound(e)) { log.warn("ServerGroup: ${e.message} (${serverGroupName} was not found)") - } - else { + } else { throw e } } @@ -187,10 +208,9 @@ public class AzureComputeClient extends AzureBaseClient { * @param serverGroupName - name of the server group * @return a ServiceResponse object */ - ServiceResponse destroyServerGroup(String resourceGroupName, String serverGroupName) { - + Response destroyServerGroup(String resourceGroupName, String serverGroupName) { deleteAzureResource( - scaleSetOps.&delete, + azure.virtualMachineScaleSets().&deleteByResourceGroup, resourceGroupName, serverGroupName, null, @@ -208,14 +228,71 @@ public class AzureComputeClient extends AzureBaseClient { Collection getServerGroupInstances(String resourceGroupName, String serverGroupName) { def instances = new ArrayList() - executeOp({scaleSetVMOps.list(resourceGroupName, serverGroupName, null, null, "instanceView")})?.body?.each { - instances.add(AzureInstance.build(it)) - } + executeOp({ + List vms = azure.virtualMachineScaleSets().getByResourceGroup(resourceGroupName, serverGroupName)?.virtualMachines()?.list()?.asList() + vms?.each { + instances.add(AzureInstance.build(it)) + } + }) instances } + /** + * check the scale set's health status, wait for the timeout return true when healthy, false if we hit the timeout + */ + Boolean waitForScaleSetHealthy(String resourceGroupName, String serverGroupName, long timeoutMillis) { + def now = System.nanoTime() + def currentTime = System.nanoTime() + // TODO: use available health probes to determine the sleep time + def sleepTimeSeconds = 5 + + while (currentTime - now < timeoutMillis * 1000000) { + def instances = getServerGroupInstances(resourceGroupName, serverGroupName) + if (!instances.any { it.healthState != HealthState.Up }) { + return true + } + + Thread.sleep(sleepTimeSeconds * 1000) + currentTime = System.nanoTime() + } + + false + } + + Map> getVirtualMachineSizesByRegions(List regions) { + HashMap> result = new HashMap<>() + executeOp({ + VirtualMachineSizes sizes = azure.virtualMachines().sizes() + for (AzureNamedAccountCredentials.AzureRegion region : regions) { + List regionSizes = sizes.listByRegion(region.name).toList().collect { new VirtualMachineSize(name: it.name())} + result.put(region.name, regionSizes) + } + }) + result + } + + Response resizeServerGroup(String resourceGroupName, String serverGroupName, int capacity) { + try { + def vmss = executeOp({ + azure.virtualMachineScaleSets().getByResourceGroup(resourceGroupName, serverGroupName) + }) + vmss.update().withCapacity(capacity).apply() + } catch (ManagementException e) { + if (resourceNotFound(e)) { + log.warn("ServerGroup: ${e.message} (${serverGroupName} was not found)") + } else { + throw e + } + } + null + } + + @Canonical + static class VirtualMachineSize { + String name + } /*** * The namespace for the Azure Resource Provider diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureNetworkClient.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureNetworkClient.groovy index c0cb718d9b6..6bd562a7a01 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureNetworkClient.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureNetworkClient.groovy @@ -16,25 +16,19 @@ package com.netflix.spinnaker.clouddriver.azure.client -import com.microsoft.azure.CloudException -import com.microsoft.azure.credentials.ApplicationTokenCredentials -import com.microsoft.azure.management.network.ApplicationGatewaysOperations -import com.microsoft.azure.management.network.LoadBalancersOperations -import com.microsoft.azure.management.network.NetworkManagementClient -import com.microsoft.azure.management.network.NetworkManagementClientImpl -import com.microsoft.azure.management.network.NetworkSecurityGroupsOperations -import com.microsoft.azure.management.network.PublicIPAddressesOperations -import com.microsoft.azure.management.network.SubnetsOperations -import com.microsoft.azure.management.network.VirtualNetworksOperations -import com.microsoft.azure.management.network.models.AddressSpace -import com.microsoft.azure.management.network.models.ApplicationGatewayBackendAddressPool -import com.microsoft.azure.management.network.models.DhcpOptions -import com.microsoft.azure.management.network.models.NetworkSecurityGroup -import com.microsoft.azure.management.network.models.PublicIPAddress -import com.microsoft.azure.management.network.models.Subnet -import com.microsoft.azure.management.network.models.VirtualNetwork - -import com.microsoft.rest.ServiceResponse +import com.azure.core.credential.TokenCredential +import com.azure.core.http.rest.Response +import com.azure.core.management.exception.ManagementException +import com.azure.core.management.profile.AzureProfile +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSet +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetVMs +import com.azure.resourcemanager.network.fluent.models.NetworkSecurityGroupInner +import com.azure.resourcemanager.network.models.LoadBalancer +import com.azure.resourcemanager.network.models.LoadBalancerInboundNatPool +import com.azure.resourcemanager.network.models.LoadBalancingRule +import com.azure.resourcemanager.network.models.Network +import com.azure.resourcemanager.network.models.PublicIpAddress +import com.azure.resourcemanager.network.models.TransportProtocol import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription @@ -43,38 +37,24 @@ import com.netflix.spinnaker.clouddriver.azure.resources.network.model.AzureVirt import com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.model.AzureSecurityGroupDescription import com.netflix.spinnaker.clouddriver.azure.resources.subnet.model.AzureSubnetDescription import com.netflix.spinnaker.clouddriver.azure.templates.AzureAppGatewayResourceTemplate +import com.netflix.spinnaker.clouddriver.azure.templates.AzureLoadBalancerResourceTemplate +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j -import okhttp3.logging.HttpLoggingInterceptor +import java.lang.reflect.InvocationTargetException +import java.lang.reflect.Method + +@CompileStatic @Slf4j class AzureNetworkClient extends AzureBaseClient { + private final Integer NAT_POOL_PORT_START = 50000 + private final Integer NAT_POOL_PORT_END = 59999 + private final Integer NAT_POOL_PORT_NUMBER_PER_POOL = 100 - private final NetworkManagementClient client - - AzureNetworkClient(String subscriptionId, ApplicationTokenCredentials credentials, String userAgentApplicationName) { - super(subscriptionId, userAgentApplicationName) - this.client = initializeClient(credentials) + AzureNetworkClient(String subscriptionId, TokenCredential credentials, AzureProfile azureProfile) { + super(subscriptionId, azureProfile, credentials) } - @Lazy - private LoadBalancersOperations loadBalancerOps = { client.getLoadBalancersOperations() }() - - @Lazy - private ApplicationGatewaysOperations appGatewayOps = { client.getApplicationGatewaysOperations() }() - - @Lazy - private VirtualNetworksOperations virtualNetworksOperations = { client.getVirtualNetworksOperations() }() - - @Lazy - private SubnetsOperations subnetOperations = { client.getSubnetsOperations() }() - - @Lazy - private NetworkSecurityGroupsOperations networkSecurityGroupOperations = { client.getNetworkSecurityGroupsOperations() }() - - @Lazy - private PublicIPAddressesOperations publicIPAddressOperations = {client.getPublicIPAddressesOperations() }() - - /** * Retrieve a collection of all load balancer for a give set of credentials and the location * @param region the location of the virtual network @@ -84,22 +64,26 @@ class AzureNetworkClient extends AzureBaseClient { def result = new ArrayList() try { - def loadBalancers = executeOp({loadBalancerOps.listAll()})?.body + def loadBalancers = executeOp({ + azure.loadBalancers().list().asList() + }) def currentTime = System.currentTimeMillis() - loadBalancers?.each {item -> - if (item.location == region) { + loadBalancers?.each { item -> + if (item.innerModel().location() == region) { try { - def lbItem = AzureLoadBalancerDescription.build(item) - lbItem.dnsName = getDnsNameForPublicIp( - AzureUtilities.getResourceGroupNameFromResourceId(item.id), - AzureUtilities.getNameFromResourceId(item.frontendIPConfigurations?.first()?.getPublicIPAddress()?.id) - ) + def lbItem = AzureLoadBalancerDescription.build(item.innerModel()) + if (item.publicIpAddressIds() && !item.publicIpAddressIds().isEmpty()) { + lbItem.dnsName = getDnsNameForPublicIp( + AzureUtilities.getResourceGroupNameFromResourceId(item.id()), + AzureUtilities.getNameFromResourceId(item.publicIpAddressIds()?.first()) + ) + } lbItem.lastReadTime = currentTime result += lbItem } catch (RuntimeException re) { // if we get a runtime exception here, log it but keep processing the rest of the // load balancers - log.error("Unable to process load balancer ${item.name}: ${re.message}") + log.error("Unable to process load balancer ${item.name()}: ${re.message}") } } } @@ -119,17 +103,19 @@ class AzureNetworkClient extends AzureBaseClient { AzureLoadBalancerDescription getLoadBalancer(String resourceGroupName, String loadBalancerName) { try { def currentTime = System.currentTimeMillis() - def item = executeOp({loadBalancerOps.get(resourceGroupName, loadBalancerName, null)})?.body + def item = executeOp({ + azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) + }) if (item) { - def lbItem = AzureLoadBalancerDescription.build(item) + def lbItem = AzureLoadBalancerDescription.build(item.innerModel()) lbItem.dnsName = getDnsNameForPublicIp( - AzureUtilities.getResourceGroupNameFromResourceId(item.id), - AzureUtilities.getNameFromResourceId(item.frontendIPConfigurations?.first()?.getPublicIPAddress()?.id) + AzureUtilities.getResourceGroupNameFromResourceId(item.id()), + AzureUtilities.getNameFromResourceId(item.publicIpAddressIds()?.first()) ) lbItem.lastReadTime = currentTime return lbItem } - } catch (CloudException e) { + } catch (ManagementException e) { log.error("getLoadBalancer(${resourceGroupName},${loadBalancerName}) -> Cloud Exception ", e) } @@ -142,17 +128,17 @@ class AzureNetworkClient extends AzureBaseClient { * @param loadBalancerName name of the load balancer to delete * @return a ServiceResponse object */ - ServiceResponse deleteLoadBalancer(String resourceGroupName, String loadBalancerName) { - def loadBalancer = executeOp({loadBalancerOps.get(resourceGroupName, loadBalancerName, null)})?.body + Response deleteLoadBalancer(String resourceGroupName, String loadBalancerName) { + def loadBalancer = azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) - if (loadBalancer?.frontendIPConfigurations?.size() != 1) { + if (loadBalancer?.publicIpAddressIds()?.size() != 1) { throw new RuntimeException("Unexpected number of public IP addresses associated with the load balancer (should always be only one)!") } - def publicIpAddressName = AzureUtilities.getNameFromResourceId(loadBalancer.frontendIPConfigurations.first().getPublicIPAddress().id) + def publicIpAddressName = AzureUtilities.getNameFromResourceId(loadBalancer.publicIpAddressIds()?.first()) deleteAzureResource( - loadBalancerOps.&delete, + azure.loadBalancers().&deleteByResourceGroup, resourceGroupName, loadBalancerName, null, @@ -170,10 +156,10 @@ class AzureNetworkClient extends AzureBaseClient { * @param publicIpName name of the publicIp resource to delete * @return a ServiceResponse object */ - ServiceResponse deletePublicIp(String resourceGroupName, String publicIpName) { + Response deletePublicIp(String resourceGroupName, String publicIpName) { deleteAzureResource( - publicIPAddressOperations.&delete, + azure.publicIpAddresses().&deleteByResourceGroup, resourceGroupName, publicIpName, null, @@ -191,12 +177,14 @@ class AzureNetworkClient extends AzureBaseClient { AzureAppGatewayDescription getAppGateway(String resourceGroupName, String appGatewayName) { try { def currentTime = System.currentTimeMillis() - def appGateway = executeOp({appGatewayOps.get(resourceGroupName, appGatewayName)})?.body + def appGateway = executeOp({ + azure.applicationGateways().getByResourceGroup(resourceGroupName, appGatewayName) + }) if (appGateway) { - def agItem = AzureAppGatewayDescription.getDescriptionForAppGateway(appGateway) + def agItem = AzureAppGatewayDescription.getDescriptionForAppGateway(appGateway.innerModel()) agItem.dnsName = getDnsNameForPublicIp( - AzureUtilities.getResourceGroupNameFromResourceId(appGateway.id), - AzureUtilities.getNameFromResourceId(appGateway.frontendIPConfigurations?.first()?.getPublicIPAddress()?.id) + AzureUtilities.getResourceGroupNameFromResourceId(appGateway.id()), + AzureUtilities.getNameFromResourceId(appGateway.defaultPublicFrontend().publicIpAddressId()) ) agItem.lastReadTime = currentTime return agItem @@ -214,26 +202,28 @@ class AzureNetworkClient extends AzureBaseClient { * @return a Collection of objects which represent an Application Gateway in Azure */ Collection getAppGatewaysAll(String region) { - def result = [] + Collection result = [] try { def currentTime = System.currentTimeMillis() - def appGateways = executeOp({appGatewayOps.listAll()})?.body + def appGateways = executeOp({ + azure.applicationGateways().list() + }) - appGateways.each {item -> - if (item.location == region) { + appGateways.each { item -> + if (item.innerModel().location() == region) { try { - def agItem = AzureAppGatewayDescription.getDescriptionForAppGateway(item) + def agItem = AzureAppGatewayDescription.getDescriptionForAppGateway(item.innerModel()) agItem.dnsName = getDnsNameForPublicIp( - AzureUtilities.getResourceGroupNameFromResourceId(item.id), - AzureUtilities.getNameFromResourceId(item.frontendIPConfigurations?.first()?.getPublicIPAddress()?.id) + AzureUtilities.getResourceGroupNameFromResourceId(item.id()), + AzureUtilities.getNameFromResourceId(item.defaultPublicFrontend().publicIpAddressId()) ) agItem.lastReadTime = currentTime result << agItem } catch (RuntimeException re) { // if we get a runtime exception here, log it but keep processing the rest of the // load balancers - log.error("Unable to process application gateway ${item.name}: ${re.message}") + log.error("Unable to process application gateway ${item.name()}: ${re.message}") } } } @@ -250,23 +240,25 @@ class AzureNetworkClient extends AzureBaseClient { * @param appGatewayName name of the Application Gateway resource to delete * @return a ServiceResponse object or an Exception if we can't delete */ - ServiceResponse deleteAppGateway(String resourceGroupName, String appGatewayName) { - ServiceResponse result - def appGateway = executeOp({appGatewayOps.get(resourceGroupName, appGatewayName)})?.body + Response deleteAppGateway(String resourceGroupName, String appGatewayName) { + Response result + def appGateway = executeOp({ + azure.applicationGateways().getByResourceGroup(resourceGroupName, appGatewayName) + }) - if (appGateway?.tags?.cluster) { + if (appGateway?.tags()?.cluster) { // The selected can not be deleted because there are active server groups associated with - def errMsg = "Failed to delete ${appGatewayName}; the application gateway is still associated with server groups in ${appGateway.tags.cluster} cluster" + def errMsg = "Failed to delete ${appGatewayName}; the application gateway is still associated with server groups in ${appGateway?.tags()?.cluster} cluster. Please delete associated server groups before deleting the load balancer." log.error(errMsg) throw new RuntimeException(errMsg) } // TODO: retrieve private IP address name when support for it is added // First item in the application gateway frontend IP configurations is the public IP address we are loking for - def publicIpAddressName = AzureUtilities.getNameFromResourceId(appGateway?.frontendIPConfigurations?.first()?.getPublicIPAddress()?.id) + def publicIpAddressName = AzureUtilities.getNameFromResourceId(appGateway?.defaultPublicFrontend().publicIpAddressId()) result = deleteAzureResource( - appGatewayOps.&delete, + azure.applicationGateways().&deleteByResourceGroup, resourceGroupName, appGatewayName, null, @@ -289,10 +281,12 @@ class AzureNetworkClient extends AzureBaseClient { * @return a resource id for the backend address pool that got created or null/Runtime Exception if something went wrong */ String createAppGatewayBAPforServerGroup(String resourceGroupName, String appGatewayName, String serverGroupName) { - def appGateway = executeOp({appGatewayOps.get(resourceGroupName, appGatewayName)})?.body + def appGateway = executeOp({ + azure.applicationGateways().getByResourceGroup(resourceGroupName, appGatewayName) + }) if (appGateway) { - def agDescription = AzureAppGatewayDescription.getDescriptionForAppGateway(appGateway) + def agDescription = AzureAppGatewayDescription.getDescriptionForAppGateway(appGateway.innerModel()) def parsedName = Names.parseName(serverGroupName) if (!agDescription || (agDescription.cluster && agDescription.cluster != parsedName.cluster)) { @@ -304,22 +298,23 @@ class AzureNetworkClient extends AzureBaseClient { } // the application gateway must have an backend address pool list (even if it might be empty) - if (!appGateway.backendAddressPools.find {it.name == serverGroupName}) { - appGateway.backendAddressPools.add(new ApplicationGatewayBackendAddressPool(name: serverGroupName)) + if (!appGateway.backends()?.containsKey(serverGroupName)) { if (agDescription.serverGroups) { agDescription.serverGroups << serverGroupName } else { agDescription.serverGroups = [serverGroupName] } - appGateway.tags.cluster = parsedName.cluster - // TODO: debug only; remove this as part of the cleanup - appGateway.tags.serverGroups = agDescription.serverGroups.join(" ") - log.info("Adding backend address pool to ${appGateway.name} for server group ${serverGroupName}") - executeOp({appGatewayOps.createOrUpdate(resourceGroupName, appGatewayName, appGateway)}) - log.info("Backend address pool added") + + appGateway.update() + .withTag("cluster", parsedName.cluster) + .defineBackend(serverGroupName) + .attach() + .apply() + + log.info("Adding backend address pool to ${appGateway.name()} for server group ${serverGroupName}") } - return "${appGateway.id}/backendAddressPools/${serverGroupName}" + return "${appGateway.id()}/backendAddressPools/${serverGroupName}" } null @@ -333,42 +328,225 @@ class AzureNetworkClient extends AzureBaseClient { * @return a resource id for the backend address pool that was removed or null/Runtime Exception if something went wrong */ String removeAppGatewayBAPforServerGroup(String resourceGroupName, String appGatewayName, String serverGroupName) { - def appGateway = executeOp({appGatewayOps.get(resourceGroupName, appGatewayName)})?.body + def appGateway = executeOp({ + azure.applicationGateways().getByResourceGroup(resourceGroupName, appGatewayName) + }) if (appGateway) { - def agDescription = AzureAppGatewayDescription.getDescriptionForAppGateway(appGateway) + def agDescription = AzureAppGatewayDescription.getDescriptionForAppGateway(appGateway.innerModel()) if (!agDescription) { def errMsg = "Failed to disassociate ${serverGroupName} from ${appGatewayName}; could not find ${appGatewayName}" log.error(errMsg) throw new RuntimeException(errMsg) } - def agBAP = appGateway.backendAddressPools?.find { it.name == serverGroupName} + def agBAP = appGateway.backends().get(serverGroupName) if (agBAP) { - appGateway.backendAddressPools.remove(agBAP) - if (appGateway.backendAddressPools.size() == 1) { + def chain = appGateway.update() + .withoutBackend(agBAP.name()) + if (appGateway.backends().size() == 1) { // There are no server groups assigned to ths application gateway; we can make it available now - appGateway.tags.remove("cluster") + chain = chain.withoutTag("cluster") } - // TODO: debug only; remove this as part of the cleanup - agDescription.serverGroups?.remove(serverGroupName) - if (!agDescription.serverGroups || agDescription.serverGroups.isEmpty()) { - appGateway.tags.remove("serverGroups") - } else { - appGateway.tags.remove("serverGroups") - appGateway.tags.serverGroups = agDescription.serverGroups.join(" ") + chain.apply() + } + + return "${appGateway.id()}/backendAddressPools/${serverGroupName}" + } + + null + } + + /** + * It creates the server group corresponding backend address pool entry in the selected load balancer + * This will be later used as a parameter in the create server group deployment template + * @param resourceGroupName the name of the resource group to look into + * @param loadBalancerName the of the application gateway + * @param serverGroupName the of the application gateway + * @return a resource id for the backend address pool that got created or null/Runtime Exception if something went wrong + */ + String createLoadBalancerAPforServerGroup(String resourceGroupName, String loadBalancerName, String serverGroupName) { + def loadBalancer = executeOp({ + azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) + }) + + if(loadBalancer) { + // the application gateway must have an backend address pool list (even if it might be empty) + if (!loadBalancer.backends()?.containsKey(serverGroupName)) { + loadBalancer.update() + .defineBackend(serverGroupName) + .attach() + .apply() + + log.info("Adding backend address pool to ${loadBalancer.name()} for server group ${serverGroupName}") + } + + return "${loadBalancer.id()}/backendAddressPools/${serverGroupName}" + } else { + throw new RuntimeException("Load balancer ${loadBalancerName} not found in resource group ${resourceGroupName}") + } + + return null + } + + /** + * It removes the server group corresponding backend address pool item from the selected load balancer (see disable/destroy server group op) + * @param resourceGroupName the name of the resource group to look into + * @param loadBalancerName the name of the load balancer + * @param serverGroupName the name of the server group + * @return a resource id for the backend address pool that was removed or null/Runtime Exception if something went wrong + */ + String removeLoadBalancerAPforServerGroup(String resourceGroupName, String loadBalancerName, String serverGroupName) { + def loadBalancer = executeOp({ + azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) + }) + + if (loadBalancer) { + def lbAP = loadBalancer.backends().get(serverGroupName) + if (lbAP) { + def chain = loadBalancer.update() + .withoutBackend(lbAP.name()) + + chain.apply() + } + + return "${loadBalancer.id()}/backendAddressPools/${serverGroupName}" + } else { + throw new RuntimeException("Load balancer ${loadBalancerName} not found in resource group ${resourceGroupName}") + } + + null + } + + /** + * It creates the server group corresponding nat pool entry in the selected load balancer + * This will be later used as a parameter in the create server group deployment template + * @param resourceGroupName the name of the resource group to look into + * @param loadBalancerName the of the application gateway + * @param serverGroupName the of the application gateway + * @return a resource id for the nat pool that got created or null/Runtime Exception if something went wrong + */ + String createLoadBalancerNatPoolPortRangeforServerGroup(String resourceGroupName, String loadBalancerName, String serverGroupName) { + def loadBalancer = executeOp({ + azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) + }) + + if (loadBalancer) { + // Fetch the front end name, which will be used in NAT pool. If no front end configured, return null + Set frontEndSet = loadBalancer.frontends().keySet() + if(frontEndSet.size() == 0) { + return null + } + String frontEndName = frontEndSet.iterator().next() + // the application gateway must have an backend address pool list (even if it might be empty) + List usedPortList = new ArrayList<>() + for(LoadBalancerInboundNatPool pool : loadBalancer.inboundNatPools().values()) { + int[] range = new int[2] + range[0] = pool.frontendPortRangeStart() + range[1] = pool.frontendPortRangeEnd() + usedPortList.add(range) + } + usedPortList.sort(true, new Comparator() { + @Override + int compare(int[] o1, int[] o2) { + return o1[0] - o2[0] } + }) - executeOp({appGatewayOps.createOrUpdate(resourceGroupName, appGatewayName, appGateway)}) + if (loadBalancer.inboundNatPools()?.containsKey(serverGroupName)) { + return loadBalancer.inboundNatPools().get(serverGroupName).innerModel().id() + } + int portStart = findUnusedPortsRange(usedPortList, NAT_POOL_PORT_START, NAT_POOL_PORT_END, NAT_POOL_PORT_NUMBER_PER_POOL) + if(portStart == -1) { + throw new RuntimeException("Load balancer ${loadBalancerName} does not have unused port between ${NAT_POOL_PORT_START} and ${NAT_POOL_PORT_END} with length ${NAT_POOL_PORT_NUMBER_PER_POOL}") + } + + // The purpose of the following code is to create an NAT pool in an existing Azure Load Balancer + // However Azure Java SDK doesn't provide a way to do this + // Use reflection to modify the backend of load balancer + LoadBalancerInboundNatPool.UpdateDefinitionStages.WithAttach< LoadBalancer.Update> update = loadBalancer.update() + .defineInboundNatPool(serverGroupName) + .withProtocol(TransportProtocol.TCP) + + try { + Method setRangeMethod = update.getClass().getMethod("fromFrontendPortRange", int.class, int.class) + setRangeMethod.setAccessible(true) + setRangeMethod.invoke(update, portStart, portStart + NAT_POOL_PORT_NUMBER_PER_POOL - 1) + + Method setBackendPortMethod = update.getClass().getMethod("fromFrontend", String.class) + setBackendPortMethod.setAccessible(true) + setBackendPortMethod.invoke(update, frontEndName) + + Method setFrontendMethod = update.getClass().getMethod("toBackendPort", int.class) + setFrontendMethod.setAccessible(true) + setFrontendMethod.invoke(update, 22) + } catch (NoSuchMethodException e) { + log.error("Failed to use reflection to create NAT pool in Load Balancer, detail: {}", e.getMessage()) + return null + } catch (IllegalAccessException e) { + log.error("Failed to use reflection to create NAT pool in Load Balancer, detail: {}", e.getMessage()) + return null + } catch (InvocationTargetException e) { + log.error("Failed to use reflection to create NAT pool in Load Balancer, detail: {}", e.getMessage()) + return null } - return "${appGateway.id}/backendAddressPools/${serverGroupName}" + update.attach().apply() + return loadBalancer.inboundNatPools().get(serverGroupName).innerModel().id() + + } else { + throw new RuntimeException("Load balancer ${loadBalancerName} not found in resource group ${resourceGroupName}") } null } + // Find unused port range. The usedList is the type List whose element is int[2]{portStart, portEnd} + // The usedList needs to be sorted in asc order for the element[0] + private int findUnusedPortsRange(List usedList, int start, int end, int targetLength) { + int ret = start + int retEnd = ret + targetLength + if (retEnd > end) return -1 + for (int[] p : usedList) { + if(p[0] > retEnd) return ret + ret = p[1] + 1 + retEnd = ret + targetLength + if (retEnd > end) return -1 + } + return ret + } + + /** + * It removes the server group corresponding nat pool entry in the selected load balancer + * This will be later used as a parameter in the create server group deployment template + * @param resourceGroupName the name of the resource group to look into + * @param loadBalancerName the of the application gateway + * @param serverGroupName the of the application gateway + * @return a resource id for the nat pool that got created or null/Runtime Exception if something went wrong + */ + String removeLoadBalancerNatPoolPortRangeforServerGroup(String resourceGroupName, String loadBalancerName, String serverGroupName) { + def loadBalancer = executeOp({ + azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) + }) + + String id + if (loadBalancer) { + if (loadBalancer.inboundNatPools()?.containsKey(serverGroupName)) { + id = loadBalancer.inboundNatPools().get(serverGroupName).innerModel().id() + + loadBalancer.update() + .withoutInboundNatPool(serverGroupName) + .apply() + + } else { + throw new RuntimeException("Load balancer nat pool ${serverGroupName} not found in load balancer ${loadBalancerName}") + } + } + + id + } + /** * It enables a server group that is attached to an Application Gateway resource in Azure * @param resourceGroupName name of the resource group where the Application Gateway resource was created (see application name and region/location) @@ -376,28 +554,32 @@ class AzureNetworkClient extends AzureBaseClient { * @param serverGroupName name of the server group to be enabled * @return a ServiceResponse object */ - ServiceResponse enableServerGroup(String resourceGroupName, String appGatewayName, String serverGroupName) { - def appGateway = executeOp({appGatewayOps.get(resourceGroupName, appGatewayName)})?.body + void enableServerGroupWithAppGateway(String resourceGroupName, String appGatewayName, String serverGroupName) { + def appGateway = executeOp({ + azure.applicationGateways().getByResourceGroup(resourceGroupName, appGatewayName) + }) if (appGateway) { - def agBAP = appGateway.backendAddressPools?.find { it.name == serverGroupName} + def agBAP = appGateway.backends().get(serverGroupName) if (!agBAP) { def errMsg = "Backend address pool ${serverGroupName} not found in ${appGatewayName}" log.error(errMsg) throw new RuntimeException(errMsg) } - appGateway.requestRoutingRules.each { - it.backendAddressPool.id = agBAP.id + appGateway.requestRoutingRules().each { name, rule -> + appGateway.update() + .updateRequestRoutingRule(name) + .toBackend(agBAP.name()) + .parent() + .apply() } // Store active server group in the tags map to ease debugging the operation; we could probably remove this later on - appGateway.tags.trafficEnabledSG = serverGroupName - - return executeOp({appGatewayOps.createOrUpdate(resourceGroupName, appGatewayName, appGateway)}) + appGateway.update() + .withTag("trafficEnabledSG", serverGroupName) + .apply() } - - null } /** @@ -407,18 +589,20 @@ class AzureNetworkClient extends AzureBaseClient { * @param serverGroupName name of the server group to be disabled * @return a ServiceResponse object (null if no updates were performed) */ - ServiceResponse disableServerGroup(String resourceGroupName, String appGatewayName, String serverGroupName) { - def appGateway = executeOp({appGatewayOps.get(resourceGroupName, appGatewayName)})?.body + void disableServerGroup(String resourceGroupName, String appGatewayName, String serverGroupName) { + def appGateway = executeOp({ + azure.applicationGateways().getByResourceGroup(resourceGroupName, appGatewayName) + }) if (appGateway) { - def defaultBAP = appGateway.backendAddressPools?.find { it.name == AzureAppGatewayResourceTemplate.defaultAppGatewayBeAddrPoolName } + def defaultBAP = appGateway.backends().get(AzureAppGatewayResourceTemplate.defaultAppGatewayBeAddrPoolName) if (!defaultBAP) { def errMsg = "Backend address pool ${AzureAppGatewayResourceTemplate.defaultAppGatewayBeAddrPoolName} not found in ${appGatewayName}" log.error(errMsg) throw new RuntimeException(errMsg) } - def agBAP = appGateway.backendAddressPools?.find { it.name == serverGroupName} + def agBAP = appGateway.backends().get(serverGroupName) if (!agBAP) { def errMsg = "Backend address pool ${serverGroupName} not found in ${appGatewayName}" log.error(errMsg) @@ -427,23 +611,42 @@ class AzureNetworkClient extends AzureBaseClient { // Check if the current server group is the traffic enabled one and remove it (set default BAP as the active BAP) // otherwise return (no updates are needed) - def requestedRoutingRules = appGateway.requestRoutingRules?.findAll() { - it.backendAddressPool.id == agBAP.id + def requestedRoutingRules = appGateway.requestRoutingRules()?.findAll() { name, rule -> + rule.backend() == agBAP } if (requestedRoutingRules) { - requestedRoutingRules.each { - it.backendAddressPool.id = defaultBAP.id + requestedRoutingRules.each { name, rule -> + appGateway.update() + .updateRequestRoutingRule(name) + .toBackend(defaultBAP.name()) + .parent() + .apply() } // Clear active server group (if any) from the tags map to ease debugging the operation; we will clean this later - appGateway.tags.remove("trafficEnabledSG") - - return executeOp({ appGatewayOps.createOrUpdate(resourceGroupName, appGatewayName, appGateway) }) + appGateway.update() + .withoutTag("trafficEnabledSG") + .apply() } } + } - null + /** + * Checks if a server group, not associated with a load balancer, is "enabled". Because "enabled" means "can receive traffic", + * and there is no concept of shifting traffic in server groups not fronted by load balancers, + * we use the instance count of 0 to proxy saying it is disabled. + * @param resourceGroupName name of the resource group where the Application Gateway resource was created (see application name and region/location) + * @param serverGroupName name of the server group to be disabled + * @return true if instance count is 0, false otherwise + */ + Boolean isServerGroupWithoutLoadBalancerDisabled(String resourceGroupName, String serverGroupName) { + VirtualMachineScaleSet scaleSet = executeOp({ + azure.virtualMachineScaleSets().getByResourceGroup(resourceGroupName, serverGroupName) + }) + + VirtualMachineScaleSetVMs machines = scaleSet.virtualMachines() + machines.list().size() == 0 } /** @@ -453,18 +656,155 @@ class AzureNetworkClient extends AzureBaseClient { * @param serverGroupName name of the server group to be disabled * @return true or false */ - Boolean isServerGroupDisabled(String resourceGroupName, String appGatewayName, String serverGroupName) { - def appGateway = executeOp({appGatewayOps.get(resourceGroupName, appGatewayName)})?.body + Boolean isServerGroupWithAppGatewayDisabled(String resourceGroupName, String appGatewayName, String serverGroupName) { + def appGateway = executeOp({ + azure.applicationGateways().getByResourceGroup(resourceGroupName, appGatewayName) + }) if (appGateway) { - def agBAP = appGateway.backendAddressPools?.find { it.name == serverGroupName } + def agBAP = appGateway.backends().get(serverGroupName) if (agBAP) { // Check if the current server group is the traffic enabled one - def requestedRoutingRule = appGateway.requestRoutingRules?.find() { - it.backendAddressPool.id == agBAP.id + def requestedRoutingRules = appGateway.requestRoutingRules()?.find() { name, rule -> + rule.backend() == agBAP + } + + if (requestedRoutingRules != null) { + return false + } + } + } + + true + } + + /** + * It enables a server group that is attached to an Azure Load Balancer in Azure + * @param resourceGroupName name of the resource group where the Azure Load Balancer resource was created (see application name and region/location) + * @param loadBalancerName the of the Azure Load Balancer + * @param serverGroupName name of the server group to be enabled + * @return a ServiceResponse object + */ + void enableServerGroupWithLoadBalancer(String resourceGroupName, String loadBalancerName, String serverGroupName) { + def loadBalancer = executeOp({ + azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) + }) + + if (loadBalancer) { + def lbBAP = loadBalancer.backends().get(serverGroupName) + if (!lbBAP) { + def errMsg = "Backend address pool ${serverGroupName} not found in ${loadBalancerName}" + log.error(errMsg) + throw new RuntimeException(errMsg) + } + loadBalancer.loadBalancingRules().each { name, rule -> + // Use reflection to modify the backend of load balancer because Azure Java SDK doesn't provide a way to do this + Object o = loadBalancer.update() + .updateLoadBalancingRule(name) + try { + Method setRangeMethod = o.getClass().getMethod("toBackend", String.class) + setRangeMethod.setAccessible(true) + setRangeMethod.invoke(o, serverGroupName) + LoadBalancingRule lbrule = o as LoadBalancingRule; + lbrule.innerModel().withBackendAddressPools(List.of(lbrule.innerModel().backendAddressPool())) + } catch (NoSuchMethodException e) { + log.error("Failed to use reflection to set backend of rule in Load Balancer, detail: {}", e.getMessage()) + return + } catch (IllegalAccessException e) { + log.error("Failed to use reflection to set backend of rule in Load Balancer, detail: {}", e.getMessage()) + return + } catch (InvocationTargetException e) { + log.error("Failed to use reflection to set backend of rule in Load Balancer, detail: {}", e.getMessage()) + return + } + + ((LoadBalancingRule.UpdateDefinitionStages.WithAttach)o).attach().apply() + } + } + } + + /** + * It disables a server group that is attached to an Azure Load Balancer resource in Azure + * @param resourceGroupName name of the resource group where the Azure Load Balancer resource was created (see application name and region/location) + * @param loadBalancerName the of the Azure Load Balancer + * @param serverGroupName name of the server group to be disabled + * @return a ServiceResponse object (null if no updates were performed) + */ + void disableServerGroupWithLoadBalancer(String resourceGroupName, String loadBalancerName, String serverGroupName) { + def loadBalancer = executeOp({ + azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) + }) + + if (loadBalancer) { + def defaultBAP = loadBalancer.backends().get(AzureLoadBalancerResourceTemplate.DEFAULT_BACKEND_POOL) + if (!defaultBAP) { + def errMsg = "Backend address pool ${AzureLoadBalancerResourceTemplate.DEFAULT_BACKEND_POOL} not found in ${loadBalancerName}" + log.error(errMsg) + throw new RuntimeException(errMsg) + } + + def lbBAP = loadBalancer.backends().get(serverGroupName) + if (!lbBAP) { + def errMsg = "Backend address pool ${serverGroupName} not found in ${loadBalancerName}" + log.error(errMsg) + throw new RuntimeException(errMsg) + } + + // Check if the current server group is the traffic enabled one and remove it (set default BAP as the active BAP) + // otherwise return (no updates are needed) + def requestedRoutingRules = loadBalancer.loadBalancingRules()?.findAll() { name, rule -> + rule.backend() == lbBAP + } + + if (requestedRoutingRules) { + requestedRoutingRules.each { name, rule -> + // Use reflection to modify the backend of load balancer because Azure Java SDK doesn't provide a way to do this + Object o = loadBalancer.update() + .updateLoadBalancingRule(name) + try { + Method setRangeMethod = o.getClass().getMethod("toBackend", String.class) + setRangeMethod.setAccessible(true) + setRangeMethod.invoke(o, AzureLoadBalancerResourceTemplate.DEFAULT_BACKEND_POOL) + LoadBalancingRule lbrule = o as LoadBalancingRule; + lbrule.innerModel().withBackendAddressPools(List.of(lbrule.innerModel().backendAddressPool())) + } catch (NoSuchMethodException e) { + log.error("Failed to use reflection to set backend of rule in Load Balancer, detail: {}", e.getMessage()) + return + } catch (IllegalAccessException e) { + log.error("Failed to use reflection to set backend of rule in Load Balancer, detail: {}", e.getMessage()) + return + } catch (InvocationTargetException e) { + log.error("Failed to use reflection to set backend of rule in Load Balancer, detail: {}", e.getMessage()) + return + } + + ((LoadBalancingRule.UpdateDefinitionStages.WithAttach)o).attach().apply() + } + } + } + } + + /** + * Checks if a server group that is attached to an Azure Load Balancer resource in Azure is set to receive traffic + * @param resourceGroupName name of the resource group where the Azure Load Balancer resource was created (see application name and region/location) + * @param loadBalancerName the of the Azure Load Balancer + * @param serverGroupName name of the server group to be disabled + * @return true or false + */ + Boolean isServerGroupWithLoadBalancerDisabled(String resourceGroupName, String loadBalancerName, String serverGroupName) { + def loadBalancer = executeOp({ + azure.loadBalancers().getByResourceGroup(resourceGroupName, loadBalancerName) + }) + + if (loadBalancer) { + def lbBAP = loadBalancer.backends().get(serverGroupName) + if (lbBAP) { + // Check if the current server group is the traffic enabled one + def requestedRoutingRules = loadBalancer.loadBalancingRules()?.find() { name, rule -> + rule.backend() == lbBAP } - if (requestedRoutingRule) { + if (requestedRoutingRules != null) { return false } } @@ -479,10 +819,22 @@ class AzureNetworkClient extends AzureBaseClient { * @param securityGroupName name of the Azure network security group to delete * @return a ServiceResponse object */ - ServiceResponse deleteSecurityGroup(String resourceGroupName, String securityGroupName) { + Response deleteSecurityGroup(String resourceGroupName, String securityGroupName) { + def associatedSubnets = azure.networkSecurityGroups().getByResourceGroup(resourceGroupName, securityGroupName).listAssociatedSubnets() + + associatedSubnets?.each{ associatedSubnet -> + def subnetName = associatedSubnet.innerModel().name() + associatedSubnet + .parent() + .update() + .updateSubnet(subnetName) + .withoutNetworkSecurityGroup() + .parent() + .apply() + } deleteAzureResource( - networkSecurityGroupOperations.&delete, + azure.networkSecurityGroups().&deleteByResourceGroup, resourceGroupName, securityGroupName, null, @@ -499,26 +851,14 @@ class AzureNetworkClient extends AzureBaseClient { */ void createVirtualNetwork(String resourceGroupName, String virtualNetworkName, String region, String addressPrefix = AzureUtilities.VNET_DEFAULT_ADDRESS_PREFIX) { try { - List subnets = [] - - // Define address space - List addressPrefixes = [] - addressPrefixes.add(addressPrefix) - AddressSpace addressSpace = new AddressSpace() - addressSpace.setAddressPrefixes(addressPrefixes) - - // Define DHCP Options - DhcpOptions dhcpOptions = new DhcpOptions() - dhcpOptions.dnsServers = [] - - VirtualNetwork virtualNetwork = new VirtualNetwork() - virtualNetwork.setLocation(region) - virtualNetwork.setDhcpOptions(dhcpOptions) - virtualNetwork.setSubnets(subnets) - virtualNetwork.setAddressSpace(addressSpace) //Create the virtual network for the resource group - virtualNetworksOperations.createOrUpdate(resourceGroupName, virtualNetworkName, virtualNetwork) + azure.networks() + .define(virtualNetworkName) + .withRegion(region) + .withExistingResourceGroup(resourceGroupName) + .withAddressSpace(addressPrefix) + .create() } catch (e) { throw new RuntimeException("Unable to create Virtual network ${virtualNetworkName} in Resource Group ${resourceGroupName}", e) @@ -535,26 +875,27 @@ class AzureNetworkClient extends AzureBaseClient { * @returns Resource ID of subnet created */ String createSubnet(String resourceGroupName, String virtualNetworkName, String subnetName, String addressPrefix, String securityGroupName) { - Subnet subnet = new Subnet() - subnet.setAddressPrefix(addressPrefix) + def virtualNetwork = azure.networks().getByResourceGroup(resourceGroupName, virtualNetworkName) - if (securityGroupName) { - addSecurityGroupToSubnet(resourceGroupName, securityGroupName, subnet) + if (virtualNetwork == null) { + def error = "Virtual network: ${virtualNetwork} not found when creating subnet: ${subnetName}" + log.error error + throw new RuntimeException(error) } - //This will throw an exception if the it fails. If it returns then the call was successful - //Log the error Let it bubble up to the caller to handle as they see fit - try { - def op = subnetOperations.createOrUpdate(resourceGroupName, virtualNetworkName, subnetName, subnet) - - // Return the resource Id - op?.body?.id + def chain = virtualNetwork.update() + .defineSubnet(subnetName) + .withAddressPrefix(addressPrefix) - } catch (Exception e) { - // Add something to the log to show that the subnet creation failed then rethrow the exception - log.error("Unable to create subnet ${subnetName} in Resource Group ${resourceGroupName}") - throw e + if (securityGroupName) { + def sg = azure.networkSecurityGroups().getByResourceGroup(resourceGroupName, securityGroupName) + chain.withExistingNetworkSecurityGroup(sg) } + + chain.attach() + .apply() + + virtualNetwork.subnets().get(subnetName).innerModel().id() } /** @@ -565,10 +906,15 @@ class AzureNetworkClient extends AzureBaseClient { * @throws RuntimeException Throws RuntimeException if operation response indicates failure * @return a ServiceResponse object */ - ServiceResponse deleteSubnet(String resourceGroupName, String virtualNetworkName, String subnetName) { + Response deleteSubnet(String resourceGroupName, String virtualNetworkName, String subnetName) { deleteAzureResource( - subnetOperations.&delete, + { + String _resourceGroupName, String _subnetName, String _virtualNetworkName -> + azure.networks().getByResourceGroup(_resourceGroupName, _virtualNetworkName).update() + .withoutSubnet(_subnetName) + .apply() + }, resourceGroupName, subnetName, virtualNetworkName, @@ -577,11 +923,6 @@ class AzureNetworkClient extends AzureBaseClient { ) } - private void addSecurityGroupToSubnet(String resourceGroupName, String securityGroupName, Subnet subnet) { - def securityGroup = executeOp({networkSecurityGroupOperations.get(resourceGroupName, securityGroupName, null)})?.body - subnet.setNetworkSecurityGroup(securityGroup) - } - /** * Retrieve a collection of all network security groups for a give set of credentials and the location * @param region the location of the network security group @@ -591,18 +932,20 @@ class AzureNetworkClient extends AzureBaseClient { def result = new ArrayList() try { - def securityGroups = executeOp({networkSecurityGroupOperations.listAll()})?.body + def securityGroups = executeOp({ + azure.networkSecurityGroups().list() + }) def currentTime = System.currentTimeMillis() securityGroups?.each { item -> - if (item.location == region) { + if (item.innerModel().location() == region) { try { - def sgItem = getAzureSecurityGroupDescription(item) + def sgItem = getAzureSecurityGroupDescription(item.innerModel()) sgItem.lastReadTime = currentTime result += sgItem } catch (RuntimeException re) { // if we get a runtime exception here, log it but keep processing the rest of the // NSGs - log.error("Unable to process network security group ${item.name}: ${re.message}") + log.error("Unable to process network security group ${item.name()}: ${re.message}") } } } @@ -621,9 +964,11 @@ class AzureNetworkClient extends AzureBaseClient { */ AzureSecurityGroupDescription getNetworkSecurityGroup(String resourceGroupName, String securityGroupName) { try { - def securityGroup = executeOp({networkSecurityGroupOperations.get(resourceGroupName, securityGroupName, null)})?.body + def securityGroup = executeOp({ + azure.networkSecurityGroups().getByResourceGroup(resourceGroupName, securityGroupName) + }) def currentTime = System.currentTimeMillis() - def sgItem = getAzureSecurityGroupDescription(securityGroup) + def sgItem = getAzureSecurityGroupDescription(securityGroup.innerModel()) sgItem.lastReadTime = currentTime return sgItem @@ -634,41 +979,47 @@ class AzureNetworkClient extends AzureBaseClient { null } - private static AzureSecurityGroupDescription getAzureSecurityGroupDescription(NetworkSecurityGroup item) { + private static AzureSecurityGroupDescription getAzureSecurityGroupDescription(NetworkSecurityGroupInner item) { def sgItem = new AzureSecurityGroupDescription() - - sgItem.name = item.name - sgItem.id = item.name - sgItem.location = item.location - sgItem.region = item.location + sgItem.name = item.name() + sgItem.id = item.name() + sgItem.location = item.location() + sgItem.region = item.name() sgItem.cloudProvider = "azure" - sgItem.provisioningState = item.provisioningState - sgItem.resourceGuid = item.resourceGuid - sgItem.resourceId = item.id - sgItem.tags = item.tags - def parsedName = Names.parseName(item.name) - sgItem.stack = item.tags?.stack ?: parsedName.stack - sgItem.detail = item.tags?.detail ?: parsedName.detail - sgItem.appName = item.tags?.appName ?: parsedName.app - sgItem.createdTime = item.tags?.createdTime?.toLong() - sgItem.type = item.type + sgItem.provisioningState = item.provisioningState() + sgItem.resourceGuid = item.id() + sgItem.resourceId = item.id() + sgItem.tags.putAll(item.tags()) + def parsedName = Names.parseName(item.name()) + sgItem.stack = item.tags()?.stack ?: parsedName.stack + sgItem.detail = item.tags()?.detail ?: parsedName.detail + sgItem.appName = item.tags()?.appName ?: parsedName.app + sgItem.createdTime = item.tags()?.createdTime?.toLong() + sgItem.type = item.type() sgItem.securityRules = new ArrayList() - item.securityRules?.each {rule -> sgItem.securityRules += new AzureSecurityGroupDescription.AzureSGRule( - resourceId: rule.id, - id: rule.name, - name: rule.name, - access: rule.access, - priority: rule.priority, - protocol: rule.protocol, - direction: rule.direction, - destinationAddressPrefix: rule.destinationAddressPrefix, - destinationPortRange: rule.destinationPortRange, - sourceAddressPrefix: rule.sourceAddressPrefix, - sourcePortRange: rule.sourcePortRange) } + item.securityRules().each { rule -> + sgItem.securityRules += new AzureSecurityGroupDescription.AzureSGRule( + resourceId: rule.id(), + id: rule.name(), + name: rule.name(), + access: rule.access().toString(), + priority: rule.priority(), + protocol: rule.protocol().toString(), + direction: rule.direction().toString(), + destinationAddressPrefix: rule.destinationAddressPrefix(), + destinationPortRange: rule.destinationPortRange(), + destinationPortRanges: rule.destinationPortRanges(), + destinationPortRangeModel: rule.destinationPortRange() ? rule.destinationPortRange() : rule.destinationPortRanges()?.toString()?.replaceAll("[^(0-9),-]", ""), + sourceAddressPrefix: rule.sourceAddressPrefix(), + sourceAddressPrefixes: rule.sourceAddressPrefixes(), + sourceAddressPrefixModel: rule.sourceAddressPrefix() ? rule.sourceAddressPrefix() : rule.sourceAddressPrefixes()?.toString()?.replaceAll("[^(0-9a-zA-Z)./,:]", ""), + sourcePortRange: rule.sourcePortRange()) + } + sgItem.subnets = new ArrayList() - item.subnets?.each { sgItem.subnets += AzureUtilities.getNameFromResourceId(it.id) } + item.subnets()?.each { sgItem.subnets += AzureUtilities.getNameFromResourceId(it.id()) } sgItem.networkInterfaces = new ArrayList() - item.networkInterfaces?.each { sgItem.networkInterfaces += it.id } + item.networkInterfaces()?.each { sgItem.networkInterfaces += it.id() } sgItem } @@ -682,18 +1033,20 @@ class AzureNetworkClient extends AzureBaseClient { def result = new ArrayList() try { - def vnets = executeOp({virtualNetworksOperations.listAll()})?.body + def vnets = executeOp({ + azure.networks().list() + }) def currentTime = System.currentTimeMillis() - vnets?.each { item-> - if (item.location == region) { + vnets?.each { item -> + if (item.innerModel().location() == region) { try { - AzureSubnetDescription.getSubnetsForVirtualNetwork(item).each { AzureSubnetDescription subnet -> + AzureSubnetDescription.getSubnetsForVirtualNetwork(item.innerModel()).each { AzureSubnetDescription subnet -> subnet.lastReadTime = currentTime result += subnet } } catch (RuntimeException re) { // if we get a runtime exception here, log it but keep processing the rest of the subnets - log.error("Unable to process subnets for virtual network ${item.name}", re) + log.error("Unable to process subnets for virtual network ${item.name()}", re) } } } @@ -710,8 +1063,10 @@ class AzureNetworkClient extends AzureBaseClient { * @param virtualNetworkName name of the virtual network to get * @return virtual network instance, or null if it does not exist */ - VirtualNetwork getVirtualNetwork(String resourceGroupName, String virtualNetworkName) { - executeOp({virtualNetworksOperations.get(resourceGroupName, virtualNetworkName, null)})?.body + Network getVirtualNetwork(String resourceGroupName, String virtualNetworkName) { + executeOp({ + azure.networks().getByResourceGroup(resourceGroupName, virtualNetworkName) + }) } /** @@ -719,31 +1074,33 @@ class AzureNetworkClient extends AzureBaseClient { * @param region the location of the virtual network * @return a Collection of objects which represent a Virtual Network in Azure */ - Collection getVirtualNetworksAll(String region){ + Collection getVirtualNetworksAll(String region) { def result = new ArrayList() try { - def vnetList = executeOp({virtualNetworksOperations.listAll()})?.body + def vnetList = executeOp({ + azure.networks().list() + }) def currentTime = System.currentTimeMillis() vnetList?.each { item -> - if (item.location == region) { + if (item.innerModel().location() == region) { try { - if (item?.addressSpace?.addressPrefixes?.size() != 1) { - log.warn("Virtual Network found with ${item?.addressSpace?.addressPrefixes?.size()} address spaces; expected: 1") + if (item?.innerModel().addressSpace()?.addressPrefixes()?.size() != 1) { + log.warn("Virtual Network found with ${item?.innerModel().addressSpace()?.addressPrefixes()?.size()} address spaces; expected: 1") } - def vnet = AzureVirtualNetworkDescription.getDescriptionForVirtualNetwork(item) - vnet.subnets = AzureSubnetDescription.getSubnetsForVirtualNetwork(item) + def vnet = AzureVirtualNetworkDescription.getDescriptionForVirtualNetwork(item.innerModel()) + vnet.subnets = AzureSubnetDescription.getSubnetsForVirtualNetwork(item.innerModel()).toList() - def appGateways = executeOp({appGatewayOps.listAll()})?.body - AzureSubnetDescription.getAppGatewaysConnectedResources(vnet, appGateways.findAll {it.location == region}) +// def appGateways = executeOp({ appGatewayOps.listAll() })?.body +// AzureSubnetDescription.getAppGatewaysConnectedResources(vnet, appGateways.findAll { it.location == region }) vnet.lastReadTime = currentTime result += vnet } catch (RuntimeException re) { // if we get a runtime exception here, log it but keep processing the rest of the // virtual networks - log.error("Unable to process virtual network ${item.name}", re) + log.error("Unable to process virtual network ${item.innerModel().name()}", re) } } } @@ -764,12 +1121,11 @@ class AzureNetworkClient extends AzureBaseClient { String dnsName = "dns-not-found" try { - PublicIPAddress publicIp = publicIpName ? - executeOp({publicIPAddressOperations.get( - resourceGroupName, - publicIpName, null)} - )?.body : null - if (publicIp?.dnsSettings?.fqdn) dnsName = publicIp.dnsSettings.fqdn + PublicIpAddress publicIp = publicIpName ? + executeOp({ + azure.publicIpAddresses().getByResourceGroup(resourceGroupName, publicIpName) + }) : null + if (publicIp?.fqdn()) dnsName = publicIp.fqdn() } catch (Exception e) { log.error("getDnsNameForPublicIp -> Unexpected exception ", e) } @@ -777,14 +1133,9 @@ class AzureNetworkClient extends AzureBaseClient { dnsName } - private NetworkManagementClient initializeClient(ApplicationTokenCredentials tokenCredentials) { - NetworkManagementClient networkManagementClient = new NetworkManagementClientImpl(tokenCredentials) - networkManagementClient.setSubscriptionId(this.subscriptionId) - networkManagementClient.setLogLevel(HttpLoggingInterceptor.Level.NONE) - - setUserAgent(networkManagementClient, userAgentApplicationName, true) - - networkManagementClient + Boolean checkDnsNameAvailability(String dnsName) { + def isAvailable = azure.trafficManagerProfiles().checkDnsNameAvailability(dnsName) + isAvailable } /*** @@ -795,5 +1146,4 @@ class AzureNetworkClient extends AzureBaseClient { String getProviderNamespace() { "Microsoft.Network" } - } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureResourceManagerClient.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureResourceManagerClient.groovy index 4bc7f8d2f4f..776625db8fb 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureResourceManagerClient.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureResourceManagerClient.groovy @@ -16,58 +16,32 @@ package com.netflix.spinnaker.clouddriver.azure.client -import com.fasterxml.jackson.databind.ObjectMapper -import com.microsoft.azure.CloudException -import com.microsoft.azure.credentials.ApplicationTokenCredentials -import com.microsoft.azure.management.network.models.VirtualNetwork -import com.microsoft.azure.management.resources.DeploymentOperationsOperations -import com.microsoft.azure.management.resources.DeploymentsOperations -import com.microsoft.azure.management.resources.ProvidersOperations -import com.microsoft.azure.management.resources.ResourceGroupsOperations -import com.microsoft.azure.management.resources.ResourceManagementClientImpl -import com.microsoft.azure.management.resources.ResourcesOperations -import com.microsoft.azure.management.resources.models.Deployment -import com.microsoft.azure.management.resources.models.DeploymentExtended -import com.microsoft.azure.management.resources.models.DeploymentMode -import com.microsoft.azure.management.resources.models.DeploymentOperation -import com.microsoft.azure.management.resources.models.DeploymentProperties -import com.microsoft.azure.management.resources.models.ResourceGroup -import com.microsoft.azure.management.resources.ResourceManagementClient +import com.azure.core.credential.TokenCredential +import com.azure.core.management.exception.ManagementException +import com.azure.core.management.profile.AzureProfile +import com.azure.resourcemanager.network.models.Network +import com.azure.resourcemanager.resources.models.Deployment +import com.azure.resourcemanager.resources.models.DeploymentMode +import com.azure.resourcemanager.resources.models.DeploymentOperation +import com.azure.resourcemanager.resources.models.Provider +import com.azure.resourcemanager.resources.models.ResourceGroup import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities -import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials -import groovy.transform.Canonical +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j -import okhttp3.logging.HttpLoggingInterceptor +@CompileStatic @Slf4j class AzureResourceManagerClient extends AzureBaseClient { - private final ResourceManagementClient client - /** * Client for communication with Azure Resource Management * @param subscriptionId - Azure Subscription ID * @param credentials - Token Credentials to use for communication with Auzre */ - AzureResourceManagerClient(String subscriptionId, ApplicationTokenCredentials credentials, String userAgentApplicationName = "") { - super(subscriptionId, userAgentApplicationName) - this.client = initializeClient(credentials) + AzureResourceManagerClient(String subscriptionId, TokenCredential credentials, AzureProfile azureProfile) { + super(subscriptionId, azureProfile, credentials) } - @Lazy - ResourceGroupsOperations resourceGroupOperations = { client.getResourceGroupsOperations() }() - - @Lazy - DeploymentOperationsOperations deploymentOperationOperations = { client.getDeploymentOperationsOperations() }() - - @Lazy - DeploymentsOperations deploymentOperations = {client.getDeploymentsOperations()}() - - @Lazy - ResourcesOperations resourceOperations = {client.getResourcesOperations()}() - - @Lazy - ProvidersOperations providerOperations = {client.getProvidersOperations()}() /** * Create a given set of resources in Azure based on template provided @@ -80,26 +54,23 @@ class AzureResourceManagerClient extends AzureBaseClient { * @param templateParams - key/value list of parameters to pass to the template * @return */ - DeploymentExtended createResourceFromTemplate(String template, - String resourceGroupName, - String region, - String resourceName, - String resourceType, - Map templateParams = [:]) { + Deployment createResourceFromTemplate(String template, + String resourceGroupName, + String region, + String resourceName, + String resourceType, + Map templateParams = [:]) { String deploymentName = [resourceName, resourceType, "deployment"].join(AzureUtilities.NAME_SEPARATOR) if (!templateParams['location']) { templateParams['location'] = region } - DeploymentExtended deployment = createTemplateDeployment(client, - resourceGroupName, + createTemplateDeployment(resourceGroupName, DeploymentMode.INCREMENTAL, deploymentName, template, templateParams) - - deployment } /** @@ -112,10 +83,10 @@ class AzureResourceManagerClient extends AzureBaseClient { try { //Create an instance of the resource group to be passed as the "parameters" for the createOrUpdate method //Set appropriate attributes of instance to define resource group - ResourceGroup resourceGroup = new ResourceGroup() - resourceGroup.setLocation(region) - - resourceGroupOperations.createOrUpdate(resourceGroupName,resourceGroup)?.body + azure.resourceGroups() + .define(resourceGroupName) + .withRegion(region) + .create() } catch (e) { throw new RuntimeException("Unable to create Resource Group ${resourceGroupName} in region ${region}", e) @@ -130,18 +101,16 @@ class AzureResourceManagerClient extends AzureBaseClient { * @param region - Azure region * @return - instance of the Azure SDK ResourceGroup class */ - ResourceGroup initializeResourceGroupAndVNet(AzureCredentials creds, String resourceGroupName, String virtualNetworkName, String region) { - ResourceGroup resourceGroupParameters = new ResourceGroup() - resourceGroupParameters.setLocation(region) + ResourceGroup initializeResourceGroupAndVNet(String resourceGroupName, String virtualNetworkName, String region) { ResourceGroup resourceGroup if (!resourceGroupExists(resourceGroupName)) { resourceGroup = createResourceGroup(resourceGroupName, region) } else { - resourceGroup = resourceGroupOperations.get(resourceGroupName)?.body + resourceGroup = getResourceGroup(resourceGroupName) } if (virtualNetworkName) { - initializeResourceGroupVNet(creds, resourceGroupName, virtualNetworkName, region) + initializeResourceGroupVNet(resourceGroupName, virtualNetworkName, region) } resourceGroup @@ -153,7 +122,11 @@ class AzureResourceManagerClient extends AzureBaseClient { * @return True if it already exists */ boolean resourceGroupExists(String resourceGroupName) { - resourceGroupOperations.checkExistence(resourceGroupName)?.body + azure.resourceGroups().contain(resourceGroupName) + } + + private ResourceGroup getResourceGroup(String resourceGroupName) { + azure.resourceGroups().getByName(resourceGroupName) } /** @@ -164,9 +137,14 @@ class AzureResourceManagerClient extends AzureBaseClient { * @return List of Azure SDK DeploymentOperations objects */ List getDeploymentOperations(String resourceGroupName, - String deploymentName, - Integer operationCount = 10) { - executeOp({deploymentOperationOperations.list(resourceGroupName, deploymentName, operationCount)})?.body + String deploymentName) { + executeOp({ + def list = azure.deployments() + .getByResourceGroup(resourceGroupName, deploymentName) + .deploymentOperations() + .list() + list.asList() + }) } /** @@ -175,8 +153,10 @@ class AzureResourceManagerClient extends AzureBaseClient { * @param deploymentName - name of the deployment * @return Azure SDK DeploymentExtended object */ - DeploymentExtended getDeployment(String resourceGroupName, String deploymentName) { - executeOp({deploymentOperations.get(resourceGroupName, deploymentName)})?.body + Deployment getDeployment(String resourceGroupName, String deploymentName) { + executeOp({ + azure.deployments().getByResourceGroup(resourceGroupName, deploymentName) + }) } /** @@ -184,7 +164,7 @@ class AzureResourceManagerClient extends AzureBaseClient { */ void healthCheck() { try { - resourceOperations.list(null, 1) + azure.genericResources().list() } catch (Exception e) { throw new Exception("Unable to ping Azure", e) @@ -198,16 +178,22 @@ class AzureResourceManagerClient extends AzureBaseClient { * @param virtualNetworkName - name of the virtual network to lookup/create * @param region - Azure region to lookup/create virtual network resource in */ - private static void initializeResourceGroupVNet(AzureCredentials creds, String resourceGroupName, String virtualNetworkName, String region) { - VirtualNetwork vNet = null + private void initializeResourceGroupVNet(String resourceGroupName, String virtualNetworkName, String region) { + Network vNet = null try { - vNet = creds.networkClient.getVirtualNetwork(resourceGroupName, virtualNetworkName) - } catch (CloudException ignore) { + vNet = azure.networks().getByResourceGroup(resourceGroupName, virtualNetworkName) + } catch (ManagementException ignore) { // Assumes that a cloud exception means that the rest call failed to locate the vNet log.warn("Failed to locate Azure Virtual Network ${virtualNetworkName}") } - if (!vNet) vNet = creds.networkClient.createVirtualNetwork(resourceGroupName, virtualNetworkName, region) + if (!vNet) { + azure.networks() + .define(virtualNetworkName) + .withRegion(region) + .withExistingResourceGroup(resourceGroupName) + .create() + } } /** @@ -220,41 +206,21 @@ class AzureResourceManagerClient extends AzureBaseClient { * @param templateParameters - key/value list of parameters that will be passed to the template * @return Azure Deployment object */ - private static DeploymentExtended createTemplateDeployment( - ResourceManagementClient resourceManagementClient, + private Deployment createTemplateDeployment( String resourceGroupName, DeploymentMode deploymentMode, String deploymentName, String template, Map templateParameters) { - - DeploymentProperties deploymentProperties = new DeploymentProperties() - deploymentProperties.setMode(deploymentMode) - - // set the link to template JSON. - // Deserialize to pass it as an instance of a JSON Node object - deploymentProperties.setTemplate(mapper.readTree(template)) - - // initialize the parameters for this template. If the parameter is not a String, - // then treat it as a Reference Parameter - if (templateParameters) { - deploymentProperties.setParameters(mapper.readTree(convertParametersToTemplateJSON(mapper, templateParameters))) - } - - // kick off the deployment - Deployment deployment = new Deployment() - deployment.setProperties(deploymentProperties) - try { - return resourceManagementClient?. - getDeploymentsOperations()?. - createOrUpdate(resourceGroupName, deploymentName, deployment)?. - body - } catch (CloudException ce) { //TODO: (masm) move this error handling logic into the operation classes as part of refactoring how we monitor/report deployment operations/errors - def errorDetails = ce.body.details*.message.join('\n') - log.error("Azure Deployment Error: ${ce.body.message}. Error Details: {}", errorDetails) - throw ce - } catch (Exception e) { + String parameters = AzureUtilities.convertParametersToTemplateJSON(mapper, templateParameters) + return azure.deployments().define(deploymentName) + .withExistingResourceGroup(resourceGroupName) + .withTemplate(template) + .withParameters(parameters) + .withMode(deploymentMode) + .create() + } catch (Throwable e) { log.error("Exception occured during deployment ${e.message}") throw e } finally { @@ -266,41 +232,22 @@ class AzureResourceManagerClient extends AzureBaseClient { log.info("Template for deployment {}: {}\nTemplate Parameters: {}", deploymentName, template, parameters.toMapString()) } - static String convertParametersToTemplateJSON(ObjectMapper mapper, Map sourceParameters) { - def parameters = sourceParameters.collectEntries{[it.key, (it.value.class == String ? new ValueParameter(it.value) : new ReferenceParameter(it.value))]} - mapper.writeValueAsString(parameters) - } - - /** - * initialize the Azure client that will be used for interactions(s) with this provider in Azure - * @param credentials - Credentials that will be used for authentication with Azure - * @return - an initialized instance of the Azure ResourceManagementClient object - */ - private ResourceManagementClient initializeClient(ApplicationTokenCredentials credentials) { - ResourceManagementClient resourceManagementClient = new ResourceManagementClientImpl(credentials) - resourceManagementClient.setSubscriptionId(this.subscriptionId) - resourceManagementClient.setLogLevel(HttpLoggingInterceptor.Level.NONE) - - // Add Azure Spinnaker telemetry capturing - setUserAgent(resourceManagementClient, userAgentApplicationName) - - resourceManagementClient - } - /** * Register the Resource Provider in Azure * @param namespace - the namespace for the Resource Provider to register */ void registerProvider(String namespace) { try { - if (providerOperations.get(namespace)?.body?.registrationState != "Registered") { + Provider provider = azure.providers().getByName(namespace) + if (provider.registrationState() != "Registered") { log.info("Registering Azure provider: ${namespace}") - providerOperations.register(namespace) + azure.providers().register(namespace) log.info("Azure provider ${namespace} registered") } } catch (Exception e) { // Something went wrong. log the exception log.error("Unable to register Azure Provider: ${namespace}", e) + throw e } } @@ -312,15 +259,4 @@ class AzureResourceManagerClient extends AzureBaseClient { String getProviderNamespace() { "Microsoft.Resources" } - - @Canonical - private static class ValueParameter { - Object value - } - - @Canonical - private static class ReferenceParameter { - Object reference - } - } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureStorageClient.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureStorageClient.groovy index a280b5d5929..372e6fb2a14 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureStorageClient.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/client/AzureStorageClient.groovy @@ -16,44 +16,24 @@ package com.netflix.spinnaker.clouddriver.azure.client -import com.microsoft.azure.credentials.ApplicationTokenCredentials -import com.microsoft.azure.management.storage.StorageAccountsOperations -import com.microsoft.azure.management.storage.StorageManagementClient -import com.microsoft.azure.management.storage.StorageManagementClientImpl -import com.microsoft.azure.storage.blob.CloudBlobClient -import com.microsoft.azure.storage.blob.CloudBlobContainer -import com.microsoft.azure.storage.blob.CloudBlobDirectory -import com.microsoft.azure.storage.blob.ListBlobItem -import com.microsoft.azure.storage.CloudStorageAccount -import com.microsoft.rest.ServiceResponse +import com.azure.core.credential.TokenCredential +import com.azure.core.http.rest.Response +import com.azure.core.management.profile.AzureProfile +import com.azure.storage.blob.BlobContainerClient +import com.azure.storage.blob.BlobContainerClientBuilder +import com.azure.storage.blob.models.BlobItem import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureCustomImageStorage import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureCustomVMImage import groovy.transform.CompileStatic import groovy.util.logging.Slf4j -import okhttp3.logging.HttpLoggingInterceptor @Slf4j @CompileStatic class AzureStorageClient extends AzureBaseClient { static final String AZURE_IMAGE_FILE_EXT = ".vhd" - private final StorageManagementClient client - - AzureStorageClient(String subscriptionId, ApplicationTokenCredentials credentials, String userAgentApplicationName) { - super(subscriptionId, userAgentApplicationName) - this.client = this.initialize(credentials) - } - - /** - * get the StorageManagementClient which will be used for all interaction related to compute resources in Azure - * @param creds the credentials to use when communicating to the Azure subscription(s) - * @return an instance of the Azure StorageManagementClient - */ - private StorageManagementClient initialize(ApplicationTokenCredentials tokenCredentials) { - StorageManagementClient storageClient = new StorageManagementClientImpl(tokenCredentials) - storageClient.setSubscriptionId(this.subscriptionId) - storageClient.setLogLevel(HttpLoggingInterceptor.Level.NONE) - storageClient + AzureStorageClient(String subscriptionId, TokenCredential credentials, AzureProfile azureProfile) { + super(subscriptionId, azureProfile, credentials) } /** @@ -63,11 +43,10 @@ class AzureStorageClient extends AzureBaseClient { * @throws RuntimeException Throws RuntimeException if operation response indicates failure * @return a ServiceResponse object */ - ServiceResponse deleteStorageAccount(String resourceGroupName, String storageName) { - StorageAccountsOperations ops = client.getStorageAccountsOperations() + Response deleteStorageAccount(String resourceGroupName, String storageName) { deleteAzureResource( - ops.&delete, + azure.storageAccounts().&deleteByResourceGroup, resourceGroupName, storageName, null, @@ -88,34 +67,24 @@ class AzureStorageClient extends AzureBaseClient { if (storage && storage.scs && storage.blobDir && storage.osType) { try { ArrayList blobDirectoryList = [] + blobDirectoryList.addAll(storage.blobDir.split("/")) + final BlobContainerClient blobContainerClient = new BlobContainerClientBuilder() + .connectionString(storage.scs) + .containerName(blobDirectoryList.remove(0)) - // Retrieve storage account from connection-string. - CloudStorageAccount storageAccount = CloudStorageAccount.parse(storage.scs) - - // retrieve the blob client. - CloudBlobClient blobClient = storageAccount.createCloudBlobClient() - String dirDelimiter = blobClient.getDirectoryDelimiter() - blobDirectoryList.addAll(storage.blobDir.split(dirDelimiter)) - def container = blobClient.getContainerReference(blobDirectoryList.remove(0)) - - if (container) { + .buildClient() + if (blobContainerClient.exists()) { if (blobDirectoryList.size()) { - def dir = blobDirectoryList.remove(0) - def blob = container.getDirectoryReference(dir) + String folderPath = blobDirectoryList.join("/") - while (blobDirectoryList.size()) { - dir = blobDirectoryList.remove(0) - blob = blob.getDirectoryReference(dir) - } - if (blob) { - getBlobsContent(blob, AZURE_IMAGE_FILE_EXT).each { String uri -> - vmImages.add(getAzureCustomVMImage(uri, dirDelimiter, storage.osType, storage.region)) + getBlobsContent(blobContainerClient.listBlobsByHierarchy(folderPath).asList(), AZURE_IMAGE_FILE_EXT).each { String uri -> + vmImages.add(getAzureCustomVMImage(uri, '/', storage.osType, storage.region)) } - } + } else { - getBlobsContent(container, AZURE_IMAGE_FILE_EXT).each { String uri -> - vmImages.add(getAzureCustomVMImage(uri, dirDelimiter, storage.osType, storage.region)) + getBlobsContent(blobContainerClient, AZURE_IMAGE_FILE_EXT).each { String uri -> + vmImages.add(getAzureCustomVMImage(uri, "/", storage.osType, storage.region)) } } } @@ -133,16 +102,16 @@ class AzureStorageClient extends AzureBaseClient { /** * Return list of files in a CloudBlobDirectory matching a filter - * @param blobDir - CloudBlobDirectory to retrieve the content from + * @param blobItems - CloudBlobDirectory to retrieve the content from * @param filter - extension of the files to be retrieved * @return List of URI strings corresponding to the files found */ - static List getBlobsContent(CloudBlobDirectory blobDir, String filter) { + static List getBlobsContent(List blobItems, String filter) { def uriList = new ArrayList() - blobDir.listBlobs().each { ListBlobItem blob -> - if (blob.uri.toString().toLowerCase().endsWith(filter)) { - uriList.add(blob.uri.toString()) + blobItems.each { BlobItem blob -> + if (blob.getName().toLowerCase().endsWith(filter)) { + uriList.add(blob.getName()) } } @@ -155,39 +124,12 @@ class AzureStorageClient extends AzureBaseClient { * @param filter - extension of the files to be retrieved * @return List of URI strings corresponding to the files found */ - static List getBlobsContent(CloudBlobContainer container, String filter) { + static List getBlobsContent(BlobContainerClient container, String filter) { def uriList = new ArrayList() - container?.listBlobs()?.each { ListBlobItem blob -> - if (blob.uri.toString().toLowerCase().endsWith(filter)) { - uriList.add(blob.uri.toString()) - } - } - - uriList - } - - /** - * Return list of files in a CloudBlobDirectory matching a filter recursively - * @param blobDir - CloudBlobDirectory to retrieve the content from - * @param filter - extension of the files to be retrieved - * @return List of URI strings corresponding to the files found - */ - static List getBlobsContentAll(CloudBlobDirectory blobDir, String filter) { - def uriList = new ArrayList() - - blobDir?.listBlobs()?.each { ListBlobItem blob -> - try { - // try converting current blob item to a CloudBlobDirectory; if conversion fails an exception is thrown - CloudBlobDirectory blobDirectory = blob as CloudBlobDirectory - if (blobDirectory) { - uriList.addAll(getBlobsContentAll(blobDirectory, filter)) - } - } catch(Exception e) { - // blob must be a regular item - if (blob.uri.toString().toLowerCase().endsWith(filter)) { - uriList.add(blob.uri.toString()) - } + container?.listBlobs()?.each { BlobItem blob -> + if (blob.getName().toLowerCase().endsWith(filter)) { + uriList.add(blob.getName()) } } @@ -200,21 +142,16 @@ class AzureStorageClient extends AzureBaseClient { * @param filter - extension of the files to be retrieved * @return List of URI strings corresponding to the files found */ - static List getBlobsContentAll(CloudBlobContainer container, String filter) { + static List getBlobsContentAll(BlobContainerClient container, String filter) { def uriList = new ArrayList() - container?.listBlobs()?.each { ListBlobItem blob -> - try { - CloudBlobDirectory blobDirectory = blob as CloudBlobDirectory - if (blobDirectory) { - uriList.addAll(getBlobsContentAll(blobDirectory, filter)) - } - } catch(Exception e) { - // blob must be a regular item - if (blob.uri.toString().toLowerCase().endsWith(filter)) { - uriList.add(blob.uri.toString()) + container?.listBlobs()?.each { BlobItem blob -> + + if (blob.isPrefix()) { + uriList.addAll(getBlobsContentAll(container.getBlobClient(blob.getName()).getContainerClient(), filter)) + } else if (blob.getName().toLowerCase().endsWith(filter)) { + uriList.add(blob.getName()) } - } } uriList diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureAtomicOperationConverterHelper.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureAtomicOperationConverterHelper.groovy index 303b23ac2e6..d20bcc19387 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureAtomicOperationConverterHelper.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureAtomicOperationConverterHelper.groovy @@ -37,7 +37,6 @@ class AzureAtomicOperationConverterHelper { // Save these to re-assign after ObjectMapper does its work. def credentials = input.remove("credentials") - def converted = credentialsSupport.objectMapper .copy() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureUtilities.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureUtilities.groovy index 0d928592360..2a16ec187f7 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureUtilities.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureUtilities.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.azure.common +import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.azure.resources.common.AzureResourceOpsDescription import java.util.regex.Matcher @@ -45,6 +46,10 @@ class AzureUtilities { static final String AZURE_CUSTOM_SCRIPT_EXT_TYPE_WINDOWS="CustomScriptExtension" static final String AZURE_CUSTOM_SCRIPT_EXT_PUBLISHER_WINDOWS="Microsoft.Compute" static final String AZURE_CUSTOM_SCRIPT_EXT_VERSION_WINDOWS="1.8" + static final String AZURE_HEALTH_EXT_TYPE_WINDOWS="ApplicationHealthWindows" + static final String AZURE_HEALTH_EXT_TYPE_LINUX="ApplicationHealthLinux" + static final String AZURE_HEALTH_EXT_VERSION="1.0" + static final String AZURE_HEALTH_EXT_PUBLISHER="Microsoft.ManagedServices" static String getResourceGroupName(AzureResourceOpsDescription description) { if (description == null) { @@ -290,6 +295,36 @@ class AzureUtilities { return resultPrefix } + static String convertParametersToTemplateJSON(ObjectMapper mapper, Map sourceParameters) { + Map map = new HashMap<>() + if (sourceParameters.size() == 0) return mapper.writeValueAsString(sourceParameters) + for(Map.Entry entry: sourceParameters.entrySet()) { + // Avoid null reference by skipping null values. It still works for those mapping destination fields whose source values are skipped here since they will be assigned as null by default. + if(entry.value) { + if (entry.value.class == String) { + map.put(entry.key, new ValueParameter(entry.value)) + } else { + map.put(entry.key, new ReferenceParameter(entry.value)) + } + } + } + mapper.writeValueAsString(map) + } + + static class ValueParameter extends Object { + Object value + ValueParameter(Object value) { + this.value = value + } + } + + static class ReferenceParameter extends Object { + Object reference + ReferenceParameter(Object reference) { + this.reference = reference + } + } + static class ProvisioningState { public static final String SUCCEEDED = "Succeeded" public static final String FAILED = "Failed" diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/StandardAzureAttributeValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/StandardAzureAttributeValidator.groovy index debc94d460d..a8e61d2c1df 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/StandardAzureAttributeValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/StandardAzureAttributeValidator.groovy @@ -17,8 +17,8 @@ package com.netflix.spinnaker.clouddriver.azure.common import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors class StandardAzureAttributeValidator { /** @@ -29,7 +29,7 @@ class StandardAzureAttributeValidator { /** * Bound at construction, this is used to collect validation errors. */ - Errors errors + ValidationErrors errors /** * Constructs validator for standard attributes added by GCE. @@ -37,7 +37,7 @@ class StandardAzureAttributeValidator { * @param context The owner of the attributes to be validated is typically a {@code *Description} class. * @param errors Accumulates and reports on the validation errors over the lifetime of this validator. */ - StandardAzureAttributeValidator(String context, Errors errors) { + StandardAzureAttributeValidator(String context, ValidationErrors errors) { this.context = context this.errors = errors } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/cache/AzureCachingAgent.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/cache/AzureCachingAgent.groovy index e9c24c34052..c8bc9536700 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/cache/AzureCachingAgent.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/common/cache/AzureCachingAgent.groovy @@ -28,6 +28,7 @@ import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.provider.AzureInfrastructureProvider import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent +import com.netflix.spinnaker.clouddriver.cache.OnDemandType abstract class AzureCachingAgent implements CachingAgent, OnDemandAgent, AccountAware { @@ -84,7 +85,7 @@ abstract class AzureCachingAgent implements CachingAgent, OnDemandAgent, Account } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { + boolean handles(OnDemandType type, String cloudProvider) { type == getOnDemandType() && cloudProvider == azureCloudProvider.id } @@ -101,7 +102,7 @@ abstract class AzureCachingAgent implements CachingAgent, OnDemandAgent, Account abstract Boolean validKeys(Map data) - abstract protected OnDemandAgent.OnDemandType getOnDemandType() + abstract protected OnDemandType getOnDemandType() def static parseOnDemandCache(Collection results, long lastReadTime) { List evictions = new ArrayList() diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/AzureConfigurationProperties.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/AzureConfigurationProperties.groovy index 282330fd3bb..6c1b8de79e9 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/AzureConfigurationProperties.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/AzureConfigurationProperties.groovy @@ -18,7 +18,10 @@ package com.netflix.spinnaker.clouddriver.azure.config import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureCustomImageStorage import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureVMImage +import com.netflix.spinnaker.fiat.model.resources.Permissions +import groovy.transform.Canonical import groovy.transform.ToString +import org.springframework.boot.context.properties.NestedConfigurationProperty class AzureConfigurationProperties { @@ -36,7 +39,21 @@ class AzureConfigurationProperties { List customImages String defaultResourceGroup String defaultKeyVault + Boolean useSshPublicKey + Permissions.Builder permissions = new Permissions.Builder() } List accounts = [] + /** + * health check related config settings + */ + @Canonical + static class HealthConfig { + /** + * flag to toggle verifying account health check. by default, account health check is enabled. + */ + boolean verifyAccountHealth = true + } + @NestedConfigurationProperty + final HealthConfig health = new HealthConfig() } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/ops/AzureOpsConfig.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/ops/AzureOpsConfig.groovy index ff84fd20be4..6df48d9fd1a 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/ops/AzureOpsConfig.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/ops/AzureOpsConfig.groovy @@ -30,7 +30,7 @@ import groovy.util.logging.Slf4j class AzureOpsConfig { AzureOpsConfig() { - log.info("Constructor....AzureOpsConfig") + log.trace("Constructor....AzureOpsConfig") } @Bean diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/view/AzureInfrastructureProviderConfig.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/view/AzureInfrastructureProviderConfig.groovy index ab085aceef3..86095ae45dd 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/view/AzureInfrastructureProviderConfig.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/config/view/AzureInfrastructureProviderConfig.groovy @@ -19,23 +19,22 @@ package com.netflix.spinnaker.clouddriver.azure.config.view import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.cache.AzureAppGatewayCachingAgent +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.cache.AzureLoadBalancerCachingAgent import com.netflix.spinnaker.clouddriver.azure.resources.network.cache.AzureNetworkCachingAgent import com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.cache.AzureSecurityGroupCachingAgent import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.cache.AzureServerGroupCachingAgent import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.cache.AzureCustomImageCachingAgent +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.cache.AzureManagedImageCachingAgent import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.provider.AzureInfrastructureProvider import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope import java.util.concurrent.ConcurrentHashMap @@ -59,27 +58,11 @@ class AzureInfrastructureProviderConfig { azureInfrastructureProvider } - @Bean - AzureInfrastructureProviderSynchronizerTypeWrapper azureInfrastructureProviderSynchronizerTypeWrapper() { - new AzureInfrastructureProviderSynchronizerTypeWrapper() - } - - class AzureInfrastructureProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return AzureInfrastructureProviderSynchronizer - } - } - - class AzureInfrastructureProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - AzureInfrastructureProviderSynchronizer synchronizeAzureInfrastructureProvider(AzureInfrastructureProvider azureInfrastructureProvider, - AzureCloudProvider azureCloudProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - Registry registry) { + private static void synchronizeAzureInfrastructureProvider(AzureInfrastructureProvider azureInfrastructureProvider, + AzureCloudProvider azureCloudProvider, + AccountCredentialsRepository accountCredentialsRepository, + ObjectMapper objectMapper, + Registry registry) { def scheduledAccounts = ProviderUtils.getScheduledAccounts(azureInfrastructureProvider) def allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, AzureNamedAccountCredentials) @@ -88,12 +71,14 @@ class AzureInfrastructureProviderConfig { if (!scheduledAccounts.contains(creds.accountName)) { def newlyAddedAgents = [] -// newlyAddedAgents << new AzureLoadBalancerCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper, registry) + newlyAddedAgents << new AzureLoadBalancerCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper, registry) newlyAddedAgents << new AzureSecurityGroupCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper, registry) newlyAddedAgents << new AzureNetworkCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper) // newlyAddedAgents << new AzureSubnetCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper) // newlyAddedAgents << new AzureVMImageCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper) newlyAddedAgents << new AzureCustomImageCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, creds.vmCustomImages, objectMapper) + newlyAddedAgents << new AzureManagedImageCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper) + newlyAddedAgents << new AzureServerGroupCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper, registry) newlyAddedAgents << new AzureAppGatewayCachingAgent(azureCloudProvider, creds.accountName, creds.credentials, region.name, objectMapper, registry) @@ -107,8 +92,6 @@ class AzureInfrastructureProviderConfig { } } } - - new AzureInfrastructureProviderSynchronizer() } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/health/AzureHealthIndicator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/health/AzureHealthIndicator.groovy index 17512fdca94..9a5f613c1f8 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/health/AzureHealthIndicator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/health/AzureHealthIndicator.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.azure.health +import com.netflix.spinnaker.clouddriver.azure.config.AzureConfigurationProperties import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import groovy.transform.CompileStatic @@ -41,6 +42,9 @@ class AzureHealthIndicator implements HealthIndicator { @Autowired AccountCredentialsProvider accountCredentialsProvider + @Autowired + AzureConfigurationProperties azureConfigurationProperties + private final AtomicReference lastException = new AtomicReference<>(null) @Override @@ -57,6 +61,8 @@ class AzureHealthIndicator implements HealthIndicator { @Scheduled(fixedDelay = 300000L) void checkHealth() { try { + if (azureConfigurationProperties.getHealth().getVerifyAccountHealth()) { + LOG.info("azure.health.verifyAccountHealth flag is enabled - verifying connection to the Azure accounts") Set azureCredentialsSet = accountCredentialsProvider.all.findAll { it instanceof AzureNamedAccountCredentials } as Set @@ -73,7 +79,9 @@ class AzureHealthIndicator implements HealthIndicator { throw new AzureIOException(e) } } - + } else { + LOG.info("azure.health.verifyAccountHealth flag is disabled - Not verifying connection to the Azure accounts"); + } lastException.set(null) } catch (Exception ex) { LOG.warn "Unhealthy", ex diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/cache/AzureAppGatewayCachingAgent.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/cache/AzureAppGatewayCachingAgent.groovy index c06d143e28c..23ca2ac90cf 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/cache/AzureAppGatewayCachingAgent.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/cache/AzureAppGatewayCachingAgent.groovy @@ -17,12 +17,9 @@ package com.netflix.spinnaker.clouddriver.azure.resources.appgateway.cache import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AccountAware import com.netflix.spinnaker.cats.agent.AgentDataType import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.CachingAgent import com.netflix.spinnaker.cats.agent.DefaultCacheResult import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData @@ -33,10 +30,10 @@ import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.common.cache.AzureCachingAgent import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys -import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.provider.AzureInfrastructureProvider import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import groovy.transform.WithWriteLock import groovy.util.logging.Slf4j @@ -59,7 +56,7 @@ class AzureAppGatewayCachingAgent extends AzureCachingAgent { Registry registry) { super(azureCloudProvider, accountName, creds, region, objectMapper) this.registry = registry - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${azureCloudProvider.id}:${OnDemandAgent.OnDemandType.LoadBalancer}") + this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${azureCloudProvider.id}:${OnDemandType.LoadBalancer}") } @Override @@ -79,13 +76,13 @@ class AzureAppGatewayCachingAgent extends AzureCachingAgent { } @Override - OnDemandAgent.OnDemandType getOnDemandType() { - OnDemandAgent.OnDemandType.LoadBalancer + OnDemandType getOnDemandType() { + OnDemandType.LoadBalancer } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.LoadBalancer && cloudProvider == azureCloudProvider.id + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.LoadBalancer && cloudProvider == azureCloudProvider.id } @Override diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/model/AzureAppGatewayDescription.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/model/AzureAppGatewayDescription.groovy index 9739ea7c871..6c77ff3553c 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/model/AzureAppGatewayDescription.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/model/AzureAppGatewayDescription.groovy @@ -16,7 +16,8 @@ package com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model -import com.microsoft.azure.management.network.models.ApplicationGateway +import com.azure.resourcemanager.network.fluent.models.ApplicationGatewayInner +import com.azure.resourcemanager.network.models.ApplicationGatewayProtocol import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.AzureResourceOpsDescription @@ -72,55 +73,57 @@ class AzureAppGatewayDescription extends AzureResourceOpsDescription { String sslCertificate } - static AzureAppGatewayDescription getDescriptionForAppGateway(ApplicationGateway appGateway) { - AzureAppGatewayDescription description = new AzureAppGatewayDescription(name: appGateway.name) - def parsedName = Names.parseName(appGateway.name) - description.stack = appGateway.tags?.stack ?: parsedName.stack - description.detail = appGateway.tags?.detail ?: parsedName.detail - description.appName = appGateway.tags?.appName ?: parsedName.app - description.loadBalancerName = appGateway.name + static AzureAppGatewayDescription getDescriptionForAppGateway(ApplicationGatewayInner appGateway) { + AzureAppGatewayDescription description = new AzureAppGatewayDescription(name: appGateway.name()) + def parsedName = Names.parseName(appGateway.name()) + description.stack = appGateway.tags()?.stack ?: parsedName.stack + description.detail = appGateway.tags()?.detail ?: parsedName.detail + description.appName = appGateway.tags()?.appName ?: parsedName.app + description.loadBalancerName = appGateway.name() + description.sku = appGateway.sku().name().toString() + description.tier = appGateway.sku().tier().toString() // Get current backend address pool id from the application gateway requested routing rules - def bapActiveRuleId = appGateway.requestRoutingRules?.first()?.backendAddressPool?.id + def bapActiveRuleId = appGateway.requestRoutingRules()?.first()?.backendAddressPool()?.id() if (bapActiveRuleId && AzureUtilities.getNameFromResourceId(bapActiveRuleId) != AzureAppGatewayResourceTemplate.defaultAppGatewayBeAddrPoolName) { description.trafficEnabledSG = AzureUtilities.getNameFromResourceId(bapActiveRuleId) description.cluster = Names.parseName(description.trafficEnabledSG).cluster } else { - description.trafficEnabledSG = appGateway.tags?.trafficEnabledSG - description.cluster = appGateway.tags?.cluster + description.trafficEnabledSG = appGateway.tags()?.trafficEnabledSG + description.cluster = appGateway.tags()?.cluster } // Each application gateway backend address pool corresponds to a server group (except the "defaul_BAP0") description.serverGroups = [] - appGateway.backendAddressPools?.each { bap -> - if (bap.name != AzureAppGatewayResourceTemplate.defaultAppGatewayBeAddrPoolName) description.serverGroups << bap.name + appGateway.backendAddressPools()?.each { bap -> + if (bap.name() != AzureAppGatewayResourceTemplate.defaultAppGatewayBeAddrPoolName) description.serverGroups << bap.name() } // We only support one subnet so we can just retrieve the first one - description.subnetResourceId = appGateway?.gatewayIPConfigurations?.first()?.subnet?.id + description.subnetResourceId = appGateway?.gatewayIpConfigurations()?.first()?.subnet()?.id() description.subnet = AzureUtilities.getNameFromResourceId(description.subnetResourceId) description.vnet = AzureUtilities.getResourceNameFromId(description.subnetResourceId) description.vnetResourceGroup = AzureUtilities.getResourceGroupNameFromResourceId(description.subnetResourceId) - description.hasNewSubnet = appGateway.tags?.hasNewSubnet + description.hasNewSubnet = appGateway.tags()?.hasNewSubnet - description.publicIpName = AzureUtilities.getNameFromResourceId(appGateway?.frontendIPConfigurations?.first()?.getPublicIPAddress()?.id) - description.createdTime = appGateway.tags?.createdTime?.toLong() - description.tags = appGateway.tags ?: [:] - description.region = appGateway.location + description.publicIpName = AzureUtilities.getNameFromResourceId(appGateway?.frontendIpConfigurations().first().publicIpAddress().id()) + description.createdTime = appGateway.tags()?.createdTime?.toLong() + description.tags = appGateway.tags() ?: [:] + description.region = appGateway.location() - appGateway.requestRoutingRules.each { rule -> - def httpListener = appGateway.httpListeners.find { it.id == rule.httpListener.id } + appGateway.requestRoutingRules().each { rule -> + def httpListener = appGateway.httpListeners().find { it.id() == rule.httpListener().id() } // Only HTTP protocol types are supported for now; ignore any other probes // TODO: add support for other protocols (if needed) - if (httpListener && httpListener.protocol.toUpperCase() == "HTTP") { - def frontendPort = appGateway.frontendPorts?.find { it.id == httpListener.frontendPort.id } - def backendHttpSettingsCollection = appGateway.backendHttpSettingsCollection?.find { it.id == rule.backendHttpSettings.id} + if (httpListener && httpListener.protocol() == ApplicationGatewayProtocol.HTTP) { + def frontendPort = appGateway.frontendPorts()?.find { it.id() == httpListener.frontendPort().id() } + def backendHttpSettingsCollection = appGateway.backendHttpSettingsCollection()?.find { it.id() == rule.backendHttpSettings().id()} if (frontendPort && backendHttpSettingsCollection) { description.loadBalancingRules.add( new AzureAppGatewayRule( - ruleName: rule.name, - externalPort: frontendPort.port, - backendPort: backendHttpSettingsCollection.port, + ruleName: rule.name(), + externalPort: frontendPort.port(), + backendPort: backendHttpSettingsCollection.port(), protocol: AzureAppGatewayRule.AzureLoadBalancingRulesType.HTTP )) } @@ -128,17 +131,17 @@ class AzureAppGatewayDescription extends AzureResourceOpsDescription { } // Add the healthcheck probes - appGateway.probes.each { probe -> + appGateway.probes().each { probe -> // Only HTTP protocol types are supported for now; ignore any other probes // TODO: add support for other protocols (if needed) - if (probe.protocol.toUpperCase() == "HTTP") { + if (probe.protocol() == ApplicationGatewayProtocol.HTTP) { def p = new AzureAppGatewayHealthcheckProbe() - p.probeName = probe.name - p.probePath = probe.path - p.probePort = probe.host - p.probeInterval = probe.interval - p.timeout = probe.timeout - p.unhealthyThreshold = probe.unhealthyThreshold + p.probeName = probe.name() + p.probePath = probe.path() + p.probePort = probe.host() + p.probeInterval = probe.interval() + p.timeout = probe.timeout() + p.unhealthyThreshold = probe.unhealthyThreshold() p.probeProtocol = AzureAppGatewayHealthcheckProbe.AzureLoadBalancerProbesType.HTTP description.probes.add(p) } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/UpsertAzureAppGatewayAtomicOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/UpsertAzureAppGatewayAtomicOperation.groovy index b2bc032e3f9..aa2e44d0c45 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/UpsertAzureAppGatewayAtomicOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/UpsertAzureAppGatewayAtomicOperation.groovy @@ -16,8 +16,8 @@ package com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops -import com.microsoft.azure.CloudException -import com.microsoft.azure.management.resources.models.DeploymentExtended +import com.azure.core.management.exception.ManagementException +import com.azure.resourcemanager.resources.models.Deployment import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.model.AzureDeploymentOperation import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription @@ -68,6 +68,18 @@ class UpsertAzureAppGatewayAtomicOperation implements AtomicOperation { try { task.updateStatus(BASE_PHASE, "Beginning load balancer deployment") + // Check dns name conflict + if(description.dnsName){ + if(description.dnsName.isBlank()){ + throw new RuntimeException("Specified dns name $description.dnsName cannot be blank") + } + + def isDnsNameAvailable = description.credentials.networkClient.checkDnsNameAvailability(description.dnsName) + if(!isDnsNameAvailable){ + throw new RuntimeException("Specified dns name $description.dnsName has conflict") + } + } + description.name = description.loadBalancerName resourceGroupName = AzureUtilities.getResourceGroupName(description.appName, description.region) virtualNetworkName = AzureUtilities.getVirtualNetworkName(resourceGroupName) @@ -85,22 +97,29 @@ class UpsertAzureAppGatewayAtomicOperation implements AtomicOperation { description.subnetResourceId = appGatewayDescription.subnetResourceId description.serverGroups = appGatewayDescription.serverGroups description.trafficEnabledSG = appGatewayDescription.trafficEnabledSG + description.vnetResourceGroup = appGatewayDescription.vnetResourceGroup + description.sku = appGatewayDescription.sku + description.tier = appGatewayDescription.tier - DeploymentExtended deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( + Deployment deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( AzureAppGatewayResourceTemplate.getTemplate(description), resourceGroupName, description.region, description.loadBalancerName, "appGateway") - errList = AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name) + errList = AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name()) } else { // We are attempting to create a new application gateway + if (description.sku == "Standard_v2") { + description.tier = "Standard_v2" + } + if (!description.useDefaultVnet) { task.updateStatus(BASE_PHASE, "Create ApplicationGateway using virtual network $description.vnet and subnet $description.subnet for server group $description.name") // Create corresponding ResourceGroup if it's not created already - description.credentials.resourceManagerClient.initializeResourceGroupAndVNet(description.credentials, resourceGroupName, null, description.region) + description.credentials.resourceManagerClient.initializeResourceGroupAndVNet(resourceGroupName, null, description.region) // We will try to associate the server group with the selected virtual network and subnet description.hasNewSubnet = false @@ -122,7 +141,7 @@ class UpsertAzureAppGatewayAtomicOperation implements AtomicOperation { } } else { // Create ResourceGroup and default VirtualNetwork if they are not created already - description.credentials.resourceManagerClient.initializeResourceGroupAndVNet(description.credentials, resourceGroupName, virtualNetworkName, description.region) + description.credentials.resourceManagerClient.initializeResourceGroupAndVNet(resourceGroupName, virtualNetworkName, description.region) task.updateStatus(BASE_PHASE, "Creating subnet for application gateway") @@ -135,10 +154,10 @@ class UpsertAzureAppGatewayAtomicOperation implements AtomicOperation { // we'll do a final check to make sure that the subnet can be created before we pass it in the deployment template def vnet = description.credentials.networkClient.getVirtualNetwork(resourceGroupName, virtualNetworkName) - if (!subnetName || vnet?.subnets?.find { it.name == subnetName }) { + if (!subnetName || vnet?.subnets()?.find { it.name == subnetName }) { // virtualNetworkName is not yet in the cache or the subnet we try to create already exists; we'll use the current vnet // we just got to re-compute the next subnet - vnetDescription = AzureVirtualNetworkDescription.getDescriptionForVirtualNetwork(vnet) + vnetDescription = AzureVirtualNetworkDescription.getDescriptionForVirtualNetwork(vnet.innerModel()) nextSubnetAddressPrefix = AzureVirtualNetworkDescription.getNextSubnetAddressPrefix(vnetDescription, rand.nextInt(vnetDescription?.maxSubnets ?: 1)) subnetName = AzureUtilities.getSubnetName(virtualNetworkName, nextSubnetAddressPrefix) } @@ -163,20 +182,20 @@ class UpsertAzureAppGatewayAtomicOperation implements AtomicOperation { } task.updateStatus(BASE_PHASE, "Create new application gateway ${description.loadBalancerName} in ${description.region}...") - DeploymentExtended deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( + Deployment deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( AzureAppGatewayResourceTemplate.getTemplate(description), resourceGroupName, description.region, description.loadBalancerName, "appGateway") - errList = AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name) + errList = AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name()) loadBalancerName = description.name } - } catch (CloudException ce) { + } catch (ManagementException e) { task.updateStatus(BASE_PHASE, "One or more deployment operations have failed. Please see Azure portal for more information. Resource Group: ${resourceGroupName} Application Gateway: ${description.loadBalancerName}") errList.add(ce.message) - } catch (Exception e) { + } catch (Throwable e) { task.updateStatus(BASE_PHASE, "Deployment of application gateway ${description.loadBalancerName} failed: ${e.message}") errList.add(e.message) } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/converters/DeleteAzureAppGatewayAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/converters/DeleteAzureAppGatewayAtomicOperationConverter.groovy index 55c5fdc2acc..f629b4b3e08 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/converters/DeleteAzureAppGatewayAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/converters/DeleteAzureAppGatewayAtomicOperationConverter.groovy @@ -20,6 +20,10 @@ import com.netflix.spinnaker.clouddriver.azure.AzureOperation import com.netflix.spinnaker.clouddriver.azure.common.AzureAtomicOperationConverterHelper import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.DeleteAzureAppGatewayAtomicOperation +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.DeleteAzureLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.DeleteAzureLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport @@ -31,10 +35,18 @@ import org.springframework.stereotype.Component @Component("deleteAzureAppGatewayDescription") class DeleteAzureAppGatewayAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { AtomicOperation convertOperation(Map input) { - new DeleteAzureAppGatewayAtomicOperation(convertDescription(input)) + if(input.get("loadBalancerType") == AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString()) { + new DeleteAzureLoadBalancerAtomicOperation(convertALBDescription(input)) + } else { + new DeleteAzureAppGatewayAtomicOperation(convertDescription(input)) + } } AzureAppGatewayDescription convertDescription(Map input) { AzureAtomicOperationConverterHelper.convertDescription(input, this, AzureAppGatewayDescription) as AzureAppGatewayDescription } + + DeleteAzureLoadBalancerDescription convertALBDescription(Map input) { + AzureAtomicOperationConverterHelper.convertDescription(input, this, DeleteAzureLoadBalancerDescription) as DeleteAzureLoadBalancerDescription + } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/converters/UpsertAzureAppGatewayAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/converters/UpsertAzureAppGatewayAtomicOperationConverter.groovy deleted file mode 100644 index 06bb2a34c6a..00000000000 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/converters/UpsertAzureAppGatewayAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 The original authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.converters - -import com.netflix.spinnaker.clouddriver.azure.AzureOperation -import com.netflix.spinnaker.clouddriver.azure.common.AzureAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription -import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.UpsertAzureAppGatewayAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import groovy.util.logging.Slf4j -import org.springframework.stereotype.Component - -@Slf4j -@AzureOperation(AtomicOperations.UPSERT_LOAD_BALANCER) -@Component("upsertAzureAppGatewayDescription") -class UpsertAzureAppGatewayAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new UpsertAzureAppGatewayAtomicOperation(convertDescription(input)) - } - - AzureAppGatewayDescription convertDescription(Map input) { - AzureAtomicOperationConverterHelper.convertDescription(input, this, AzureAppGatewayDescription) as AzureAppGatewayDescription - } -} diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/validators/DeleteAzureAppGatewayAtomicOperationValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/validators/DeleteAzureAppGatewayAtomicOperationValidator.groovy index 06418880a65..8861fe1b6b1 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/validators/DeleteAzureAppGatewayAtomicOperationValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/validators/DeleteAzureAppGatewayAtomicOperationValidator.groovy @@ -19,10 +19,10 @@ package com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.validat import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("deleteAzureAppGatewayDescriptionValidator") class DeleteAzureAppGatewayAtomicOperationValidator extends @@ -32,7 +32,7 @@ class DeleteAzureAppGatewayAtomicOperationValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, AzureAppGatewayDescription description, Errors errors) { + void validate(List priorDescriptions, AzureAppGatewayDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("deletetAzureAppGatewayDescriptionValidator", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/validators/UpsertAzureAppGatewayAtomicOperationValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/validators/UpsertAzureAppGatewayAtomicOperationValidator.groovy index 5f5d51bc31f..b8c272345f4 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/validators/UpsertAzureAppGatewayAtomicOperationValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/ops/validators/UpsertAzureAppGatewayAtomicOperationValidator.groovy @@ -19,10 +19,10 @@ package com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.validat import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("upsertAzureAppGatewayDescriptionValidator") class UpsertAzureAppGatewayAtomicOperationValidator extends @@ -35,7 +35,7 @@ class UpsertAzureAppGatewayAtomicOperationValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, AzureAppGatewayDescription description, Errors errors) { + void validate(List priorDescriptions, AzureAppGatewayDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("upsertAzureAppGatewayDescriptionValidator", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/view/AzureAppGatewayProvider.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/view/AzureAppGatewayProvider.groovy index 76b30fbf098..56357793c49 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/view/AzureAppGatewayProvider.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/view/AzureAppGatewayProvider.groovy @@ -24,8 +24,10 @@ import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription +import com.netflix.spinnaker.clouddriver.azure.resources.cluster.view.AzureClusterProvider import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider @@ -44,6 +46,9 @@ class AzureAppGatewayProvider implements LoadBalancerProvider @Autowired AccountCredentialsProvider accountCredentialsProvider + @Autowired + AzureClusterProvider clusterProvider + @Autowired AzureAppGatewayProvider(AzureCloudProvider azureCloudProvider, Cache cacheView, ObjectMapper objectMapper) { this.azureCloudProvider = azureCloudProvider @@ -87,15 +92,18 @@ class AzureAppGatewayProvider implements LoadBalancerProvider region: description.region, vnet: description.vnet ?: "vnet-unassigned", subnet: description.subnet ?: "subnet-unassigned", - cluster: description.cluster ?: "unassigned" + cluster: description.cluster ?: "unassigned", + loadBalancerType: AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY ) description.serverGroups?.each { serverGroup -> - // TODO: add proper check for enable/disable server groups + AzureServerGroupDescription asg = clusterProvider.getServerGroup(loadBalancer.account, loadBalancer.region, serverGroup) + loadBalancer.serverGroups.add(new LoadBalancerServerGroup ( name: serverGroup, - isDisabled: false, + isDisabled: asg?.isDisabled(), detachedInstances: [], - instances: [] + instances: [], + cloudProvider: AzureCloudProvider.ID )) } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/application/model/AzureApplication.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/application/model/AzureApplication.groovy index afb1336bbd4..d5d2a04bfc7 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/application/model/AzureApplication.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/application/model/AzureApplication.groovy @@ -32,7 +32,7 @@ class AzureApplication implements Application, Serializable { Map attributes = Collections.synchronizedMap(new HashMap()) AzureApplication(String name, Map attributes, Map> clusterNames) { - log.info("Constructor....AzureApplication") + log.trace("Constructor....AzureApplication") this.name = name this.attributes = attributes diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/AzureResourceOpsDescription.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/AzureResourceOpsDescription.groovy index 02383e66cec..96974789232 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/AzureResourceOpsDescription.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/AzureResourceOpsDescription.groovy @@ -19,8 +19,9 @@ package com.netflix.spinnaker.clouddriver.azure.resources.common import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials +import com.netflix.spinnaker.orchestration.OperationDescription -class AzureResourceOpsDescription { +class AzureResourceOpsDescription implements OperationDescription { static ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) String name @@ -34,5 +35,5 @@ class AzureResourceOpsDescription { String user Long createdTime long lastReadTime - Map tags = [:] + Map tags = [:] } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/cache/Keys.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/cache/Keys.groovy index 2058982fc0a..0e8446b720b 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/cache/Keys.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/cache/Keys.groovy @@ -32,6 +32,7 @@ class Keys { AZURE_INSTANCES, AZURE_VMIMAGES, AZURE_CUSTOMVMIMAGES, + AZURE_MANAGEDIMAGES, AZURE_ON_DEMAND, AZURE_EVICTIONS @@ -103,6 +104,9 @@ class Keys { case Namespace.AZURE_CUSTOMVMIMAGES.ns: result << [account: parts[2], region: parts[3], name: parts[4]] break + case Namespace.AZURE_MANAGEDIMAGES.ns: + result << [account: parts[2], resourceGroup: parts[3], region: parts[4], name: parts[5], osType: parts[6]] + break case Namespace.AZURE_SERVER_GROUPS.ns: def names = Names.parseName(parts[2]) result << [ @@ -195,6 +199,25 @@ class Keys { "${azureCloudProviderId}:${Namespace.AZURE_NETWORKS}:${networkId}:${account}:${resourceGroup}:${region}" } + static String getManagedVMImageKey(AzureCloudProvider azureCloudProvider, + String account, + String region, + String resourceGroup, + String vmImageName, + String vmImageOsType) { + //"$azureCloudProvider.id:${Namespace.AZURE_MANAGEDIMAGES}:${account}:${resourceGroup}:${region}:${vmImageName}:${vmImageOsType}" + getManagedVMImageKey(azureCloudProvider.id, account, region, resourceGroup, vmImageName, vmImageOsType) + } + + static String getManagedVMImageKey(String azureCloudProviderId, + String account, + String region, + String resourceGroup, + String vmImageName, + String vmImageOsType) { + "${azureCloudProviderId}:${Namespace.AZURE_MANAGEDIMAGES}:${account}:${resourceGroup}:${region}:${vmImageName}:${vmImageOsType}" + } + static String getVMImageKey(AzureCloudProvider azureCloudProvider, String account, String region, diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/model/AzureDeploymentOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/model/AzureDeploymentOperation.groovy index 38a8378895a..7c339411fb3 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/model/AzureDeploymentOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/common/model/AzureDeploymentOperation.groovy @@ -16,15 +16,17 @@ package com.netflix.spinnaker.clouddriver.azure.resources.common.model +import com.azure.resourcemanager.resources.fluent.models.DeploymentOperationInner +import com.azure.resourcemanager.resources.models.DeploymentOperation import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.ObjectMapper -import com.microsoft.azure.management.resources.models.DeploymentOperation -import com.netflix.spinnaker.clouddriver.azure.client.AzureResourceManagerClient import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials import com.netflix.spinnaker.clouddriver.data.task.Task +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j +@CompileStatic @Slf4j class AzureDeploymentOperation { @@ -49,7 +51,7 @@ class AzureDeploymentOperation { Integer checkDeployment = 0 while (checkDeployment < AZURE_DEPLOYMENT_OPERATION_STATUS_RETRIES_MAX) { - deploymentState = creds.resourceManagerClient.getDeployment(resourceGroupName, deploymentName).properties.provisioningState + deploymentState = creds.resourceManagerClient.getDeployment(resourceGroupName, deploymentName).innerModel().properties().provisioningState() creds.resourceManagerClient.getDeploymentOperations(resourceGroupName, deploymentName).each { DeploymentOperation d -> @@ -58,25 +60,26 @@ class AzureDeploymentOperation { // acting on. The operations for all the resources created in the deployment do get returned and we can // identify which operation is for what resource. So for now, until we get clarity from the SDK, we will // ignore those operations that have a null target resource. - if (d.properties.targetResource) { - if (!resourceCompletedState.containsKey(d.id)) { - resourceCompletedState[d.id] = false + DeploymentOperationInner inner = d.innerModel() + if (inner.properties().targetResource()) { + if (!resourceCompletedState.containsKey(inner.id())) { + resourceCompletedState[inner.id()] = false } - if (d.properties.provisioningState == AzureUtilities.ProvisioningState.SUCCEEDED) { + if (inner.properties().provisioningState() == AzureUtilities.ProvisioningState.SUCCEEDED) { - if (!resourceCompletedState[d.id]) { - task.updateStatus opsName, String.format("Resource %s created", d.properties.targetResource.resourceName) - resourceCompletedState[d.id] = true + if (!resourceCompletedState[inner.id()]) { + task.updateStatus opsName, String.format("Resource %s created", inner.properties().targetResource().resourceName()) + resourceCompletedState[inner.id()] = true } - } else if (d.properties.provisioningState == AzureUtilities.ProvisioningState.FAILED) { - if (!resourceCompletedState[d.id]) { + } else if (inner.properties().provisioningState() == AzureUtilities.ProvisioningState.FAILED) { + if (!resourceCompletedState[inner.id()]) { //String statusMessage = updatedDeploymentOperation?.value?.first()?.properties?.statusMessage?.error?.message - String err = "Failed to create resource ${d.properties.targetResource.resourceName}: " - err += d.properties.statusMessage ? d.properties.statusMessage : "See Azure Portal for more information." + String err = "Failed to create resource ${inner.properties().targetResource().resourceName()}: " + err += inner.properties().statusMessage() ? inner.properties().statusMessage() : "See Azure Portal for more information." task.updateStatus opsName, err - resourceCompletedState[d.id] = true + resourceCompletedState[inner.id()] = true errList.add(err) } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/cache/AzureLoadBalancerCachingAgent.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/cache/AzureLoadBalancerCachingAgent.groovy index 287a7c5dbde..799d2dac54d 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/cache/AzureLoadBalancerCachingAgent.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/cache/AzureLoadBalancerCachingAgent.groovy @@ -36,6 +36,7 @@ import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.Azur import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import groovy.transform.WithWriteLock import groovy.util.logging.Slf4j @@ -68,7 +69,7 @@ class AzureLoadBalancerCachingAgent implements CachingAgent, OnDemandAgent, Acco this.region = region this.objectMapper = objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) this.registry = registry - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${azureCloudProvider.id}:${OnDemandAgent.OnDemandType.LoadBalancer}") + this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${azureCloudProvider.id}:${OnDemandType.LoadBalancer}") } @Override @@ -97,8 +98,8 @@ class AzureLoadBalancerCachingAgent implements CachingAgent, OnDemandAgent, Acco } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.LoadBalancer && cloudProvider == azureCloudProvider.id + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.LoadBalancer && cloudProvider == azureCloudProvider.id } @Override @@ -189,6 +190,9 @@ class AzureLoadBalancerCachingAgent implements CachingAgent, OnDemandAgent, Acco loadBalancers.each { AzureLoadBalancerDescription item -> AzureLoadBalancerDescription loadBalancer = item + // Skip for internal type ALB (Which only serve for connection to VMSS instance) + if (loadBalancer.internal) return + String lbKey = getLoadBalancerKey(loadBalancer) // Search the current OnDemand update map entries and look for a load balancer match diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/AzureLoadBalancer.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/AzureLoadBalancer.groovy index 3c6e3656034..133baa14b3f 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/AzureLoadBalancer.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/AzureLoadBalancer.groovy @@ -22,8 +22,25 @@ import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider import com.netflix.spinnaker.clouddriver.model.LoadBalancer import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup import com.netflix.spinnaker.moniker.Moniker +import groovy.transform.CompileStatic +@CompileStatic class AzureLoadBalancer implements LoadBalancer { + static enum AzureLoadBalancerType { + AZURE_LOAD_BALANCER("Azure Load Balancer"), + AZURE_APPLICATION_GATEWAY("Azure Application Gateway") + + private String formattedType + + private AzureLoadBalancerType(String formattedType) { + this.formattedType = formattedType + } + + @Override + String toString() { + formattedType + } + } String account String name @@ -34,6 +51,7 @@ class AzureLoadBalancer implements LoadBalancer { String cluster final String type = AzureCloudProvider.ID final String cloudProvider = AzureCloudProvider.ID + final AzureLoadBalancerType loadBalancerType void setMoniker(Moniker _ignored) {} diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/AzureLoadBalancerDescription.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/AzureLoadBalancerDescription.groovy index 8c4a22a0118..a13dc1a65db 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/AzureLoadBalancerDescription.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/AzureLoadBalancerDescription.groovy @@ -16,27 +16,36 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model -import com.microsoft.azure.management.network.models.LoadBalancer +import com.azure.resourcemanager.network.fluent.models.LoadBalancerInner +import com.azure.resourcemanager.network.models.ProbeProtocol +import com.azure.resourcemanager.network.models.TransportProtocol import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.AzureResourceOpsDescription +import com.netflix.spinnaker.clouddriver.azure.templates.AzureLoadBalancerResourceTemplate +import groovy.transform.CompileStatic +@CompileStatic class AzureLoadBalancerDescription extends AzureResourceOpsDescription { String loadBalancerName String vnet String subnet String securityGroup String dnsName + String publicIpName String cluster - String serverGroup + List serverGroups + String trafficEnabledSG String appName + String sessionPersistence + boolean internal List probes = [] List loadBalancingRules = [] List inboundNATRules = [] static class AzureLoadBalancerProbe { enum AzureLoadBalancerProbesType { - HTTP, TCP + TCP, HTTP, HTTPS } String probeName @@ -75,28 +84,40 @@ class AzureLoadBalancerDescription extends AzureResourceOpsDescription { Integer port } - static AzureLoadBalancerDescription build(LoadBalancer azureLoadBalancer) { - AzureLoadBalancerDescription description = new AzureLoadBalancerDescription(loadBalancerName: azureLoadBalancer.name) - def parsedName = Names.parseName(azureLoadBalancer.name) - description.stack = azureLoadBalancer.tags?.stack ?: parsedName.stack - description.detail = azureLoadBalancer.tags?.detail ?: parsedName.detail - description.appName = azureLoadBalancer.tags?.appName ?: parsedName.app - description.cluster = azureLoadBalancer.tags?.cluster - description.serverGroup = azureLoadBalancer.tags?.serverGroup - description.vnet = azureLoadBalancer.tags?.vnet - description.createdTime = azureLoadBalancer.tags?.createdTime?.toLong() - description.tags = azureLoadBalancer.tags - description.region = azureLoadBalancer.location - - for (def rule : azureLoadBalancer.loadBalancingRules) { - def r = new AzureLoadBalancingRule(ruleName: rule.name) - r.externalPort = rule.frontendPort - r.backendPort = rule.backendPort - r.probeName = AzureUtilities.getNameFromResourceId(rule?.probe?.id) ?: "not-assigned" - r.persistence = rule.loadDistribution; - r.idleTimeout = rule.idleTimeoutInMinutes; - - if (rule.protocol.toLowerCase() == "udp") { + static AzureLoadBalancerDescription build(LoadBalancerInner azureLoadBalancer) { + AzureLoadBalancerDescription description = new AzureLoadBalancerDescription(loadBalancerName: azureLoadBalancer.name()) + def parsedName = Names.parseName(azureLoadBalancer.name()) + description.stack = azureLoadBalancer.tags()?.stack ?: parsedName.stack + description.detail = azureLoadBalancer.tags()?.detail ?: parsedName.detail + description.appName = azureLoadBalancer.tags()?.appName ?: parsedName.app + description.cluster = azureLoadBalancer.tags()?.cluster + description.vnet = azureLoadBalancer.tags()?.vnet + description.createdTime = azureLoadBalancer.tags()?.createdTime?.toLong() + description.tags.putAll(azureLoadBalancer.tags()) + description.region = azureLoadBalancer.location() + description.internal = azureLoadBalancer.tags()?.internal != null + + def frontendIPConfigurations = azureLoadBalancer?.frontendIpConfigurations() + if (frontendIPConfigurations != null && !frontendIPConfigurations.isEmpty()) { + description.publicIpName = AzureUtilities.getNameFromResourceId(frontendIPConfigurations?.first()?.publicIpAddress()?.id()) + } + + // Each load balancer backend address pool corresponds to a server group (except the "default_LB_BAP") + description.serverGroups = [] + azureLoadBalancer.backendAddressPools()?.each { bap -> + if (bap.name() != AzureLoadBalancerResourceTemplate.DEFAULT_BACKEND_POOL) description.serverGroups << bap.name() + } + + for (def rule : azureLoadBalancer.loadBalancingRules()) { + def r = new AzureLoadBalancingRule(ruleName: rule.name()) + r.externalPort = rule.frontendPort() + r.backendPort = rule.backendPort() + r.probeName = AzureUtilities.getNameFromResourceId(rule?.probe()?.id()) ?: "not-assigned" + r.persistence = rule.loadDistribution() + r.idleTimeout = rule.idleTimeoutInMinutes() + description.trafficEnabledSG = AzureUtilities.getNameFromResourceId(rule.backendAddressPool().id()) + + if (rule.protocol() == TransportProtocol.UDP) { r.protocol = AzureLoadBalancingRule.AzureLoadBalancingRulesType.UDP } else { r.protocol = AzureLoadBalancingRule.AzureLoadBalancingRulesType.TCP @@ -105,14 +126,14 @@ class AzureLoadBalancerDescription extends AzureResourceOpsDescription { } // Add the probes - for (def probe : azureLoadBalancer.probes) { + for (def probe : azureLoadBalancer.probes()) { def p = new AzureLoadBalancerProbe() - p.probeName = probe.name - p.probeInterval = probe.intervalInSeconds - p.probePath = probe.requestPath - p.probePort = probe.port - p.unhealthyThreshold = probe.numberOfProbes - if (probe.protocol.toLowerCase() == "tcp") { + p.probeName = probe.name() + p.probeInterval = probe.intervalInSeconds() + p.probePath = probe.requestPath() + p.probePort = probe.port() + p.unhealthyThreshold = probe.numberOfProbes() + if (probe.protocol() == ProbeProtocol.TCP) { p.probeProtocol = AzureLoadBalancerProbe.AzureLoadBalancerProbesType.TCP } else { p.probeProtocol = AzureLoadBalancerProbe.AzureLoadBalancerProbesType.HTTP @@ -120,8 +141,8 @@ class AzureLoadBalancerDescription extends AzureResourceOpsDescription { description.probes.add(p) } - for (def natRule : azureLoadBalancer.inboundNatRules) { - def n = new AzureLoadBalancerInboundNATRule(ruleName: natRule.name) + for (def natRule : azureLoadBalancer.inboundNatRules()) { + def n = new AzureLoadBalancerInboundNATRule(ruleName: natRule.name()) description.inboundNATRules.add(n) } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/DeleteAzureLoadBalancerDescription.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/DeleteAzureLoadBalancerDescription.groovy index 3ed21074586..a62cfa50f81 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/DeleteAzureLoadBalancerDescription.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/model/DeleteAzureLoadBalancerDescription.groovy @@ -20,5 +20,5 @@ import com.netflix.spinnaker.clouddriver.azure.resources.common.AzureResourceOps class DeleteAzureLoadBalancerDescription extends AzureResourceOpsDescription { String loadBalancerName - List regions + String region } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/DeleteAzureLoadBalancerAtomicOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/DeleteAzureLoadBalancerAtomicOperation.groovy index eebc71b8cea..4ff0fb62f54 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/DeleteAzureLoadBalancerAtomicOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/DeleteAzureLoadBalancerAtomicOperation.groovy @@ -42,27 +42,25 @@ class DeleteAzureLoadBalancerAtomicOperation implements AtomicOperation { @Override Void operate(List priorOutputs) { task.updateStatus(BASE_PHASE, "Initializing Delete Azure Load Balancer Operation...") - for (region in description.regions) { - task.updateStatus(BASE_PHASE, "Deleting ${description.loadBalancerName} " + "in ${region}...") + task.updateStatus(BASE_PHASE, "Deleting ${description.loadBalancerName}...") - if (!description.credentials) { - throw new IllegalArgumentException("Unable to resolve credentials for the selected Azure account.") - } + if (!description.credentials) { + throw new IllegalArgumentException("Unable to resolve credentials for the selected Azure account.") + } - try { - String resourceGroupName = AzureUtilities.getResourceGroupName(description.appName, region) + try { + String resourceGroupName = AzureUtilities.getResourceGroupName(description.appName, description.region) - description - .credentials - .networkClient - .deleteLoadBalancer(resourceGroupName, description.loadBalancerName) + description + .credentials + .networkClient + .deleteLoadBalancer(resourceGroupName, description.loadBalancerName) - // TODO: check response to ensure operation succeeded - task.updateStatus(BASE_PHASE, "Deletion of Azure load balancer ${description.loadBalancerName} in ${region} has succeeded.") - } catch (Exception e) { - task.updateStatus(BASE_PHASE, "Deletion of load balancer ${description.loadBalancerName} failed: e.message") - throw new AtomicOperationException("Failed to delete ${description.name}", [e.message]) - } + // TODO: check response to ensure operation succeeded + task.updateStatus(BASE_PHASE, "Deletion of Azure load balancer ${description.loadBalancerName} in ${description.region} has succeeded.") + } catch (Exception e) { + task.updateStatus(BASE_PHASE, "Deletion of load balancer ${description.loadBalancerName} failed: e.message") + throw new AtomicOperationException("Failed to delete ${description.name}", [e.message]) } null diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/UpsertAzureLoadBalancerAtomicOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/UpsertAzureLoadBalancerAtomicOperation.groovy index 2e2df80fd09..0ead1628864 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/UpsertAzureLoadBalancerAtomicOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/UpsertAzureLoadBalancerAtomicOperation.groovy @@ -16,8 +16,8 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops -import com.microsoft.azure.CloudException -import com.microsoft.azure.management.resources.models.DeploymentExtended +import com.azure.core.management.exception.ManagementException +import com.azure.resourcemanager.resources.models.Deployment import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.model.AzureDeploymentOperation import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancerDescription @@ -52,24 +52,48 @@ class UpsertAzureLoadBalancerAtomicOperation implements AtomicOperation { "in ${description.region}...") def errList = new ArrayList() - String resourceGroupName = AzureUtilities.getResourceGroupName(description.appName, description.region) + String resourceGroupName = null try { task.updateStatus(BASE_PHASE, "Beginning load balancer deployment") - DeploymentExtended deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( + resourceGroupName = AzureUtilities.getResourceGroupName(description.appName, description.region) + // Create corresponding ResourceGroup if it's not created already + description.credentials.resourceManagerClient.initializeResourceGroupAndVNet(resourceGroupName, null, description.region) + + if(description.dnsName) { + if(description.dnsName.isBlank()){ + throw new RuntimeException("Specified dns name $description.dnsName cannot be blank") + } + + // Check dns name conflict + def isDnsNameAvailable = description.credentials.networkClient.checkDnsNameAvailability(description.dnsName) + if (!isDnsNameAvailable) { + throw new RuntimeException("Specified dns name $description.dnsName has conflict") + } + } + + description.name = description.loadBalancerName + def loadBalancerDescription = description.credentials.networkClient.getLoadBalancer(resourceGroupName, description.name) + + if(loadBalancerDescription) { + description.serverGroups = loadBalancerDescription.serverGroups + description.trafficEnabledSG = loadBalancerDescription.trafficEnabledSG + description.publicIpName = loadBalancerDescription.publicIpName + } + Deployment deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( AzureLoadBalancerResourceTemplate.getTemplate(description), resourceGroupName, description.region, description.loadBalancerName, "loadBalancer") - errList = AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name) - } catch (CloudException ce) { + errList = AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name()) + } catch (ManagementException ce) { task.updateStatus(BASE_PHASE, "One or more deployment operations have failed. Please see Azure portal for more information. Resource Group: ${resourceGroupName} Load Balancer: ${description.loadBalancerName}") errList.add(ce.message) - } catch (Exception e) { + } catch (Throwable e) { task.updateStatus(BASE_PHASE, "Deployment of load balancer ${description.loadBalancerName} failed: ${e.message}. Please see Azure Portal for more information") errList.add(e.message) } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/converters/DeleteAzureLoadBalancerAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/converters/DeleteAzureLoadBalancerAtomicOperationConverter.groovy index 900bda9b75b..27211261230 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/converters/DeleteAzureLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/converters/DeleteAzureLoadBalancerAtomicOperationConverter.groovy @@ -32,7 +32,7 @@ import org.springframework.stereotype.Component @Component("deleteAzureLoadBalancerDescription") class DeleteAzureLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { public DeleteAzureLoadBalancerAtomicOperationConverter() { - log.info("Constructor....DeleteAzureLoadBalancerAtomicOperationConverter") + log.trace("Constructor....DeleteAzureLoadBalancerAtomicOperationConverter") } @Override diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/converters/UpsertAzureLoadBalancerAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/converters/UpsertAzureLoadBalancerAtomicOperationConverter.groovy index 4b989b30deb..2ef4d343611 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/converters/UpsertAzureLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/converters/UpsertAzureLoadBalancerAtomicOperationConverter.groovy @@ -18,6 +18,10 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.conve import com.netflix.spinnaker.clouddriver.azure.AzureOperation import com.netflix.spinnaker.clouddriver.azure.common.AzureAtomicOperationConverterHelper +import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription +import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.UpsertAzureAppGatewayAtomicOperation +import com.netflix.spinnaker.clouddriver.azure.resources.common.AzureResourceOpsDescription +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancerDescription import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.UpsertAzureLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation @@ -27,21 +31,31 @@ import groovy.util.logging.Slf4j import org.springframework.stereotype.Component @Slf4j -@AzureOperation("upsertLoadBalancerL4") -//@AzureOperation(AtomicOperations.UPSERT_LOAD_BALANCER) +//@AzureOperation("upsertLoadBalancerL4") +@AzureOperation(AtomicOperations.UPSERT_LOAD_BALANCER) @Component("upsertAzureLoadBalancerDescription") class UpsertAzureLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { UpsertAzureLoadBalancerAtomicOperationConverter() { - log.info("Constructor....UpsertAzureLoadBalancerAtomicOperationConverter") + log.trace("Constructor....UpsertAzureLoadBalancerAtomicOperationConverter") } AtomicOperation convertOperation(Map input) { - new UpsertAzureLoadBalancerAtomicOperation(convertDescription(input)) + String loadBalancerType = input.get("loadBalancerType") + if(loadBalancerType == null || loadBalancerType.equals(AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString())) { + return new UpsertAzureAppGatewayAtomicOperation((AzureAppGatewayDescription)convertDescription(input)) + }else { + return new UpsertAzureLoadBalancerAtomicOperation((AzureLoadBalancerDescription)convertDescription(input)) + } } - AzureLoadBalancerDescription convertDescription(Map input) { - AzureAtomicOperationConverterHelper. - convertDescription(input, this, AzureLoadBalancerDescription) as AzureLoadBalancerDescription + AzureResourceOpsDescription convertDescription(Map input) { + String loadBalancerType = input.get("loadBalancerType") + if(loadBalancerType == null || loadBalancerType.equals(AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString())) { + return AzureAtomicOperationConverterHelper. + convertDescription(input, this, AzureAppGatewayDescription) as AzureAppGatewayDescription + }else { + return AzureAtomicOperationConverterHelper. + convertDescription(input, this, AzureLoadBalancerDescription) as AzureLoadBalancerDescription + } } } - diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/validators/DeleteAzureLoadBalancerDescriptionValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/validators/DeleteAzureLoadBalancerDescriptionValidator.groovy index 22a2eb6cc26..5dad50fd394 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/validators/DeleteAzureLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/validators/DeleteAzureLoadBalancerDescriptionValidator.groovy @@ -19,10 +19,10 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.valid import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.DeleteAzureLoadBalancerDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("deleteAzureLoadBalancerDescriptionValidator") class DeleteAzureLoadBalancerDescriptionValidator extends @@ -31,7 +31,7 @@ class DeleteAzureLoadBalancerDescriptionValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, DeleteAzureLoadBalancerDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteAzureLoadBalancerDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("deleteAzureLoadBalancerDescription", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/validators/UpsertAzureLoadBalancerDescriptionValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/validators/UpsertAzureLoadBalancerDescriptionValidator.groovy index 033d6ba9acb..73c295dee9c 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/validators/UpsertAzureLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/ops/validators/UpsertAzureLoadBalancerDescriptionValidator.groovy @@ -19,10 +19,10 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.valid import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancerDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("upsertAzureLoadBalancerDescriptionValidator") class UpsertAzureLoadBalancerDescriptionValidator extends @@ -33,7 +33,7 @@ class UpsertAzureLoadBalancerDescriptionValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, AzureLoadBalancerDescription description, Errors errors) { + void validate(List priorDescriptions, AzureLoadBalancerDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("upsertAzureLoadBalancerDescription", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerController.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerController.groovy deleted file mode 100644 index d99a1c68619..00000000000 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerController.groovy +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2015 The original authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.view - -import com.fasterxml.jackson.annotation.JsonProperty -import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities -import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired - -/** - * @deprecated - Use AzureAppGatewayController instead. - */ -@Deprecated -class AzureLoadBalancerController { - - final String cloudProvider = "DoNotUse" - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Autowired - AzureLoadBalancerProvider azureLoadBalancerProvider - - List list() { - getSummaryForLoadBalancers().values() as List - } - - private Map getSummaryForLoadBalancers() { - Map map = [:] - def loadBalancers = azureLoadBalancerProvider.getApplicationLoadBalancers('*') - - loadBalancers?.each() { lb -> - def summary = map.get(lb.name) - - if (!summary) { - summary = new AzureLoadBalancerSummary(name: lb.name) - map.put lb.name, summary - } - - def loadBalancerDetail = new AzureLoadBalancerDetail(account: lb.account, name: lb.name, region: lb.region) - - summary.getOrCreateAccount(lb.account).getOrCreateRegion(lb.region).loadBalancers << loadBalancerDetail - } - map - } - - LoadBalancerProvider.Item get(String name) { - throw new UnsupportedOperationException("TODO: Implement single getter.") - } - - List byAccountAndRegionAndName(String account, String region, String name) { - String appName = AzureUtilities.getAppNameFromAzureResourceName(name) - AzureLoadBalancerDescription azureLoadBalancerDescription = azureLoadBalancerProvider.getLoadBalancerDescription(account, appName, region, name) - - if (azureLoadBalancerDescription) { - def lbDetail = [ - name: azureLoadBalancerDescription.loadBalancerName - ] - - lbDetail.createdTime = azureLoadBalancerDescription.createdTime - lbDetail.serverGroup = azureLoadBalancerDescription.serverGroup - lbDetail.vnet = azureLoadBalancerDescription.vnet ?: "vnet-unassigned" - lbDetail.subnet = azureLoadBalancerDescription.subnet ?: "subnet-unassigned" - lbDetail.dnsName = azureLoadBalancerDescription.dnsName ?: "dnsname-unassigned" - - lbDetail.probes = azureLoadBalancerDescription.probes - lbDetail.securityGroup = azureLoadBalancerDescription.securityGroup - lbDetail.loadBalancingRules = azureLoadBalancerDescription.loadBalancingRules - lbDetail.inboundNATRules = azureLoadBalancerDescription.inboundNATRules - lbDetail.tags = azureLoadBalancerDescription.tags - - return [lbDetail] - } - - return [] - } - - static class AzureLoadBalancerSummary implements LoadBalancerProvider.Item { - private Map mappedAccounts = [:] - String name - - AzureLoadBalancerAccount getOrCreateAccount(String name) { - if (!mappedAccounts.containsKey(name)) { - mappedAccounts.put(name, new AzureLoadBalancerAccount(name:name)) - } - - mappedAccounts[name] - } - - @JsonProperty("accounts") - List getByAccounts() { - mappedAccounts.values() as List - } - } - - static class AzureLoadBalancerAccount implements LoadBalancerProvider.ByAccount { - private Map mappedRegions = [:] - String name - - AzureLoadBalancerAccountRegion getOrCreateRegion(String name) { - if (!mappedRegions.containsKey(name)) { - mappedRegions.put(name, new AzureLoadBalancerAccountRegion(name: name, loadBalancers: [])) - } - mappedRegions[name]; - } - - @JsonProperty("regions") - List getByRegions() { - mappedRegions.values() as List - } - - } - - static class AzureLoadBalancerAccountRegion implements LoadBalancerProvider.Details { - String name - List loadBalancers - } - - static class AzureLoadBalancerDetail implements LoadBalancerProvider.Details { - String account - String region - String name - String type="azure" - } -} diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerProvider.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerProvider.groovy index 77bd95e9b22..caf34ead6e8 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerProvider.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerProvider.groovy @@ -16,14 +16,19 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.view +import com.fasterxml.jackson.annotation.JsonProperty import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider +import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities +import com.netflix.spinnaker.clouddriver.azure.resources.cluster.view.AzureClusterProvider import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @@ -34,12 +39,119 @@ import org.springframework.web.bind.annotation.RestController @RestController @Component -class AzureLoadBalancerProvider /*implements LoadBalancerProvider */ { +class AzureLoadBalancerProvider implements LoadBalancerProvider { + + final String cloudProvider = AzureCloudProvider.ID private final AzureCloudProvider azureCloudProvider private final Cache cacheView final ObjectMapper objectMapper + @Autowired + AzureClusterProvider clusterProvider + + List list() { + getSummaryForLoadBalancers().values() as List + } + + private Map getSummaryForLoadBalancers() { + Map map = [:] + def loadBalancers = getApplicationLoadBalancers('*') + + loadBalancers?.each() { lb -> + def summary = map.get(lb.name) + + if (!summary) { + summary = new AzureLoadBalancerSummary(name: lb.name) + map.put lb.name, summary + } + + def loadBalancerDetail = new AzureLoadBalancerDetail(account: lb.account, name: lb.name, region: lb.region) + + summary.getOrCreateAccount(lb.account).getOrCreateRegion(lb.region).loadBalancers << loadBalancerDetail + } + map + } + + LoadBalancerProvider.Item get(String name) { + throw new UnsupportedOperationException("TODO: Implement single getter.") + } + + List byAccountAndRegionAndName(String account, String region, String name) { + String appName = AzureUtilities.getAppNameFromAzureResourceName(name) + AzureLoadBalancerDescription azureLoadBalancerDescription = getLoadBalancerDescription(account, appName, region, name) + + if (azureLoadBalancerDescription) { + def lbDetail = [ + name: azureLoadBalancerDescription.loadBalancerName + ] + + lbDetail.createdTime = azureLoadBalancerDescription.createdTime + lbDetail.serverGroup = azureLoadBalancerDescription.serverGroups + lbDetail.vnet = azureLoadBalancerDescription.vnet ?: "vnet-unassigned" + lbDetail.subnet = azureLoadBalancerDescription.subnet ?: "subnet-unassigned" + lbDetail.dnsName = azureLoadBalancerDescription.dnsName ?: "dnsname-unassigned" + + lbDetail.probes = azureLoadBalancerDescription.probes + lbDetail.securityGroup = azureLoadBalancerDescription.securityGroup + lbDetail.loadBalancingRules = azureLoadBalancerDescription.loadBalancingRules + lbDetail.inboundNATRules = azureLoadBalancerDescription.inboundNATRules + lbDetail.tags = azureLoadBalancerDescription.tags + + return [lbDetail] + } + + return [] + } + + static class AzureLoadBalancerSummary implements LoadBalancerProvider.Item { + private Map mappedAccounts = [:] + String name + + AzureLoadBalancerAccount getOrCreateAccount(String name) { + if (!mappedAccounts.containsKey(name)) { + mappedAccounts.put(name, new AzureLoadBalancerAccount(name:name)) + } + + mappedAccounts[name] + } + + @JsonProperty("accounts") + List getByAccounts() { + mappedAccounts.values() as List + } + } + + static class AzureLoadBalancerAccount implements LoadBalancerProvider.ByAccount { + private Map mappedRegions = [:] + String name + + AzureLoadBalancerAccountRegion getOrCreateRegion(String name) { + if (!mappedRegions.containsKey(name)) { + mappedRegions.put(name, new AzureLoadBalancerAccountRegion(name: name, loadBalancers: [])) + } + mappedRegions[name]; + } + + @JsonProperty("regions") + List getByRegions() { + mappedRegions.values() as List + } + + } + + static class AzureLoadBalancerAccountRegion implements LoadBalancerProvider.Details { + String name + List loadBalancers + } + + static class AzureLoadBalancerDetail implements LoadBalancerProvider.Details { + String account + String region + String name + String type="azure" + } + @Autowired AzureLoadBalancerProvider(AzureCloudProvider azureCloudProvider, Cache cacheView, ObjectMapper objectMapper) { this.azureCloudProvider = azureCloudProvider @@ -78,14 +190,28 @@ class AzureLoadBalancerProvider /*implements LoadBalancerProvider + AzureServerGroupDescription asg = clusterProvider.getServerGroup(loadBalancer.account, loadBalancer.region, serverGroup) + + loadBalancer.serverGroups.add(new LoadBalancerServerGroup ( + name: serverGroup, + isDisabled: asg?.isDisabled(), + detachedInstances: [], + instances: [], + cloudProvider: AzureCloudProvider.ID + )) + } + + loadBalancer } AzureLoadBalancerDescription getLoadBalancerDescription(String account, String appName, String region, String loadBalancerName) { diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/network/model/AzureVirtualNetworkDescription.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/network/model/AzureVirtualNetworkDescription.groovy index 15060f6de02..5d1baa4efa9 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/network/model/AzureVirtualNetworkDescription.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/network/model/AzureVirtualNetworkDescription.groovy @@ -16,37 +16,42 @@ package com.netflix.spinnaker.clouddriver.azure.resources.network.model -import com.microsoft.azure.management.network.models.VirtualNetwork +import com.azure.resourcemanager.network.fluent.models.VirtualNetworkInner import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.AzureResourceOpsDescription import com.netflix.spinnaker.clouddriver.azure.resources.subnet.model.AzureSubnetDescription +import groovy.transform.CompileStatic +@CompileStatic class AzureVirtualNetworkDescription extends AzureResourceOpsDescription { String id String type List addressSpace /* see addressPrefix */ String resourceId /* Azure resource ID */ String resourceGroup /* the Azure resource group where virtual network was created */ - Map tags + Map tags List subnets int maxSubnets int subnetAddressPrefixLength - static AzureVirtualNetworkDescription getDescriptionForVirtualNetwork(VirtualNetwork vnet) { + static AzureVirtualNetworkDescription getDescriptionForVirtualNetwork(VirtualNetworkInner vnet) { if (!vnet) { return null } AzureVirtualNetworkDescription description = new AzureVirtualNetworkDescription() - description.name = vnet.name - description.region = vnet.location + description.name = vnet.name() + description.region = vnet.location() // TODO We assume that the vnet first address space matters; we'll revise this later if we need to support more then one - description.addressSpace = vnet.addressSpace?.addressPrefixes - description.subnets = AzureSubnetDescription.getSubnetsForVirtualNetwork(vnet) - description.resourceId = vnet.id - description.resourceGroup = AzureUtilities.getResourceGroupNameFromResourceId(vnet.id) - description.id = vnet.name - description.tags = vnet.tags + description.addressSpace = vnet.addressSpace()?.addressPrefixes() + description.subnets = AzureSubnetDescription.getSubnetsForVirtualNetwork(vnet)?.toList() + description.resourceId = vnet.id() + description.resourceGroup = AzureUtilities.getResourceGroupNameFromResourceId(vnet.id()) + description.id = vnet.name() + if (vnet.tags()){ + description.tags = new HashMap() + description.tags.putAll(vnet.tags()) + } description.subnetAddressPrefixLength = description.subnets?.min {it.addressPrefixLength}?.addressPrefixLength ?: AzureUtilities.SUBNET_DEFAULT_ADDRESS_PREFIX_LENGTH description.maxSubnets = AzureUtilities.getSubnetRangeMax( description.addressSpace?.first(), diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/cache/AzureSecurityGroupCachingAgent.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/cache/AzureSecurityGroupCachingAgent.groovy index d578679bdaf..acb8c68054f 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/cache/AzureSecurityGroupCachingAgent.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/cache/AzureSecurityGroupCachingAgent.groovy @@ -36,6 +36,7 @@ import com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.model.Azu import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import groovy.transform.WithWriteLock import groovy.util.logging.Slf4j @@ -68,7 +69,7 @@ class AzureSecurityGroupCachingAgent implements CachingAgent, OnDemandAgent, Acc this.region = region this.objectMapper = objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) this.registry = registry - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${azureCloudProvider.id}:${OnDemandAgent.OnDemandType.SecurityGroup}") + this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${azureCloudProvider.id}:${OnDemandType.SecurityGroup}") } @Override @@ -97,8 +98,8 @@ class AzureSecurityGroupCachingAgent implements CachingAgent, OnDemandAgent, Acc } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.SecurityGroup && cloudProvider == azureCloudProvider.id + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.SecurityGroup && cloudProvider == azureCloudProvider.id } @Override @@ -134,6 +135,7 @@ class AzureSecurityGroupCachingAgent implements CachingAgent, OnDemandAgent, Acc return buildCacheResult(providerCache, null, 0, updatedSecurityGroup, null) } else { evictedSecurityGroup = new AzureSecurityGroupDescription( + id: securityGroupName, name: securityGroupName, region: region, appName: AzureUtilities.getAppNameFromAzureResourceName(securityGroupName), diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/model/AzureSecurityGroupDescription.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/model/AzureSecurityGroupDescription.groovy index d2bff4eecb6..78d8ceda92d 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/model/AzureSecurityGroupDescription.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/model/AzureSecurityGroupDescription.groovy @@ -24,13 +24,15 @@ class AzureSecurityGroupDescription extends AzureResourceOpsDescription { String id String location String type - Map tags = [:] + Map tags = [:] String provisioningState String resourceGuid List securityRules = [] List networkInterfaces = [] List subnets = [] String subnet + String vnet + String vnetResourceGroup static class AzureSGRule { String id @@ -39,12 +41,16 @@ class AzureSecurityGroupDescription extends AzureResourceOpsDescription { String resourceId /*Azure resource ID */ String description /* restricted to 140 chars */ String access /* gets or sets network traffic is allowed or denied; possible values are “Allow” and “Deny” */ - String destinationAddressPrefix /* CIDR or destination IP range; asterix “*” can also be used to match all source IPs; default tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used */ - String destinationPortRange /* Integer or range between 0 and 65535; asterix “*” can also be used to match all ports */ + String destinationAddressPrefix /* CIDR or destination IP range; asterisk “*” can also be used to match all source IPs; default tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used */ + String destinationPortRange /* Integer or range between 0 and 65535; asterisk “*” can also be used to match all ports */ + List destinationPortRanges /* List of integer or range between 0 and 65535 */ + String destinationPortRangeModel /* The model destination port that is transparent whether it is from destinationPortRange or destinationPortRanges */ String direction /* InBound or Outbound */ Integer priority /* value can be between 100 and 4096 */ String protocol /* Tcp, Udp or All(*) */ - String sourceAddressPrefix /* CIDR or source IP range; asterix “*” can also be used to match all source IPs; default tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used */ - String sourcePortRange /* Integer or range between 0 and 65535; asterix “*” can also be used to match all ports */ + String sourceAddressPrefix /* CIDR or source IP range; asterisk “*” can also be used to match all source IPs; default tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used */ + List sourceAddressPrefixes /* List of CIDR or source IP range*/ + String sourceAddressPrefixModel /* The model source IP/CIDR address that it transparent whether it is from sourceAddressPrefix or sourceAddressPrefixes */ + String sourcePortRange /* Integer or range between 0 and 65535; asterisk “*” can also be used to match all ports */ } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/UpsertAzureSecurityGroupAtomicOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/UpsertAzureSecurityGroupAtomicOperation.groovy index f64e291e577..fec94e7cc21 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/UpsertAzureSecurityGroupAtomicOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/UpsertAzureSecurityGroupAtomicOperation.groovy @@ -16,7 +16,7 @@ package com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.ops -import com.microsoft.azure.management.resources.models.DeploymentExtended +import com.azure.resourcemanager.resources.models.Deployment import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.model.AzureDeploymentOperation import com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.model.UpsertAzureSecurityGroupDescription @@ -59,17 +59,27 @@ class UpsertAzureSecurityGroupAtomicOperation implements AtomicOperation { String resourceGroupName = AzureUtilities.getResourceGroupName(description.appName, description.region) // Create corresponding ResourceGroup if it's not created already - description.credentials.resourceManagerClient.initializeResourceGroupAndVNet(description.credentials, resourceGroupName, null, description.region) + description.credentials.resourceManagerClient.initializeResourceGroupAndVNet(resourceGroupName, null, description.region) - DeploymentExtended deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( + def templateParamMap = [ + location : description.region, + networkSecurityGroupName : description.securityGroupName, + networkSecurityGroupResourceGroupName : resourceGroupName, + virtualNetworkName : description.vnet, + virtualNetworkResourceGroupName : description.vnetResourceGroup, + subnetName : description.subnet + ] + + Deployment deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( AzureSecurityGroupResourceTemplate.getTemplate(description), resourceGroupName, description.region, description.securityGroupName, - "securityGroup") + "securityGroup", + templateParamMap) - errList = AzureDeploymentOperation.checkDeploymentOperationStatus(task,BASE_PHASE, description.credentials, resourceGroupName, deployment.name) - } catch (Exception e) { + errList = AzureDeploymentOperation.checkDeploymentOperationStatus(task,BASE_PHASE, description.credentials, resourceGroupName, deployment.name()) + } catch (Throwable e) { task.updateStatus(BASE_PHASE, "Deployment of security group $description.securityGroupName failed: ${e.message}") errList.add(e.message) } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/converters/DeleteAzureSecurityGroupAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/converters/DeleteAzureSecurityGroupAtomicOperationConverter.groovy index 22ac99be0ee..44169f828b7 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/converters/DeleteAzureSecurityGroupAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/converters/DeleteAzureSecurityGroupAtomicOperationConverter.groovy @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component @Component("deleteAzureSecurityGroupDescription") class DeleteAzureSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { DeleteAzureSecurityGroupAtomicOperationConverter() { - log.info("Constructor....DeleteAzureSecurityGroupAtomicOperationConverter") + log.trace("Constructor....DeleteAzureSecurityGroupAtomicOperationConverter") } @Override diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/converters/UpsertAzureSecurityGroupAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/converters/UpsertAzureSecurityGroupAtomicOperationConverter.groovy index 41ad852597f..cb0fc1b3f62 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/converters/UpsertAzureSecurityGroupAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/converters/UpsertAzureSecurityGroupAtomicOperationConverter.groovy @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component @Component("upsertAzureSecurityGroupDescription") class UpsertAzureSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { UpsertAzureSecurityGroupAtomicOperationConverter() { - log.info("Constructor....UpsertAzureSecurityGroupAtomicOperationConverter") + log.trace("Constructor....UpsertAzureSecurityGroupAtomicOperationConverter") } AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/validators/DeleteAzureSecurityGroupDescriptionValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/validators/DeleteAzureSecurityGroupDescriptionValidator.groovy index 752aec179a6..5c07f739f55 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/validators/DeleteAzureSecurityGroupDescriptionValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/validators/DeleteAzureSecurityGroupDescriptionValidator.groovy @@ -19,10 +19,10 @@ package com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.ops.vali import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.model.DeleteAzureSecurityGroupDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("deleteAzureSecurityGroupDescriptionValidator") class DeleteAzureSecurityGroupDescriptionValidator extends @@ -31,7 +31,7 @@ class DeleteAzureSecurityGroupDescriptionValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, DeleteAzureSecurityGroupDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteAzureSecurityGroupDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("deleteAzureSecurityGroupDescription", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/validators/UpsertAzureSecurityGroupDescriptionValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/validators/UpsertAzureSecurityGroupDescriptionValidator.groovy index 0079181f8a2..24cd9e3df4e 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/validators/UpsertAzureSecurityGroupDescriptionValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/ops/validators/UpsertAzureSecurityGroupDescriptionValidator.groovy @@ -17,12 +17,12 @@ package com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.ops.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.azure.resources.securitygroup.model.UpsertAzureSecurityGroupDescription import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("upsertAzureSecurityGroupDescriptionValidator") class UpsertAzureSecurityGroupDescriptionValidator extends @@ -32,7 +32,7 @@ class UpsertAzureSecurityGroupDescriptionValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, UpsertAzureSecurityGroupDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertAzureSecurityGroupDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("UpsertAzureSecurityGroupDescription", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/view/AzureSecurityGroupProvider.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/view/AzureSecurityGroupProvider.groovy index 53fd29a5581..bcb6bda4650 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/view/AzureSecurityGroupProvider.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/view/AzureSecurityGroupProvider.groovy @@ -73,6 +73,11 @@ class AzureSecurityGroupProvider implements SecurityGroupProvider getAllMatchingKeyPattern(String pattern, boolean includeRules) { loadResults(includeRules, cacheView.filterIdentifiers(Keys.Namespace.SECURITY_GROUPS.ns, pattern)) } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/cache/AzureServerGroupCachingAgent.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/cache/AzureServerGroupCachingAgent.groovy index 715750c33ff..923d37129ab 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/cache/AzureServerGroupCachingAgent.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/cache/AzureServerGroupCachingAgent.groovy @@ -31,6 +31,9 @@ import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.common.cache.AzureCachingAgent import com.netflix.spinnaker.clouddriver.azure.common.cache.MutableCacheData import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer +import com.netflix.spinnaker.clouddriver.cache.OnDemandType + import static com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys.Namespace.* import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials @@ -62,7 +65,16 @@ class AzureServerGroupCachingAgent extends AzureCachingAgent { List serverGroups = creds.computeClient.getServerGroupsAll(region) serverGroups?.each { try { - it.isDisabled = creds.networkClient.isServerGroupDisabled(AzureUtilities.getResourceGroupName(it.appName, region), it.appGatewayName, it.name) + if (it.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString()) { + it.disabled = creds.networkClient.isServerGroupWithLoadBalancerDisabled(AzureUtilities.getResourceGroupName(it.appName, region), it.loadBalancerName, it.name) + } else if (it.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString()) { + it.disabled = creds.networkClient.isServerGroupWithAppGatewayDisabled(AzureUtilities.getResourceGroupName(it.appName, region), it.appGatewayName, it.name) + } else if (it.loadBalancerType == null) { + it.disabled = creds.networkClient.isServerGroupWithoutLoadBalancerDisabled(AzureUtilities.getResourceGroupName(it.appName, region), it.name) + } else { + throw new RuntimeException("Invalid load balancer type $it.loadBalancerType") + } + } catch (Exception e) { log.warn("Exception ${e.message} while computing 'isDisable' state for server group ${it.name}") } @@ -85,15 +97,13 @@ class AzureServerGroupCachingAgent extends AzureCachingAgent { it.attributes.processedCount = (it.attributes.processedCount ?: 0) + 1 } - if (result.cacheResults[AZURE_SERVER_GROUPS.ns]) { - result - } else { - // run the cache cleanup routine on an empty server group list only for now - removeDeadCacheEntries(result, providerCache) - } + removeDeadCacheEntries(result, providerCache) + + result } CacheResult removeDeadCacheEntries(CacheResult cacheResult, ProviderCache providerCache) { + // Server Groups def sgIdentifiers = providerCache.filterIdentifiers(AZURE_SERVER_GROUPS.ns, Keys.getServerGroupKey(AzureCloudProvider.ID, "*", region, accountName)) def sgCacheResults = providerCache.getAll((AZURE_SERVER_GROUPS.ns), sgIdentifiers, RelationshipCacheFilter.none()) def evictedSGList = sgCacheResults.collect{ cached -> @@ -103,10 +113,12 @@ class AzureServerGroupCachingAgent extends AzureCachingAgent { null } } + evictedSGList.removeAll(Collections.singleton(null)) if (evictedSGList) { cacheResult.evictions[AZURE_SERVER_GROUPS.ns] = evictedSGList } + // Instances def instanceIdentifiers = providerCache.filterIdentifiers(AZURE_INSTANCES.ns, Keys.getInstanceKey(AzureCloudProvider.ID, "*", "*", region, accountName)) def instanceCacheResults = providerCache.getAll((AZURE_INSTANCES.ns), instanceIdentifiers, RelationshipCacheFilter.none()) def evictedInstanceList = instanceCacheResults.collect{ cached -> @@ -116,12 +128,34 @@ class AzureServerGroupCachingAgent extends AzureCachingAgent { null } } + evictedInstanceList.removeAll(Collections.singleton(null)) if (evictedInstanceList) { cacheResult.evictions[AZURE_INSTANCES.ns] = evictedInstanceList } - // TODO: evict dead cluster cache entries - // Since the cluster is not region base (unlike the cache agent) we need to make sure that we don't remove "live" entries + // Clusters + def clusterIdentifiers = providerCache.filterIdentifiers(AZURE_CLUSTERS.ns, Keys.getClusterKey(AzureCloudProvider.ID, "*", "*", accountName)) + def clusterCacheResults = providerCache.getAll((AZURE_CLUSTERS.ns), clusterIdentifiers, RelationshipCacheFilter.include(AZURE_SERVER_GROUPS.ns)) + + def evictedClusterList = clusterCacheResults?.collect{ cached -> + def relatedServerGroups = cached.relationships.azureServerGroups + if(relatedServerGroups) { + if (!relatedServerGroups.find { + providerCache.exists(AZURE_SERVER_GROUPS.ns, it) + }) { + cached.id + } else { + null + } + } else { + cached.id + } + } + + evictedClusterList.removeAll(Collections.singleton(null)) + if (evictedClusterList) { + cacheResult.evictions[AZURE_CLUSTERS.ns] = evictedClusterList + } cacheResult } @@ -314,8 +348,8 @@ class AzureServerGroupCachingAgent extends AzureCachingAgent { } @Override - OnDemandAgent.OnDemandType getOnDemandType() { - OnDemandAgent.OnDemandType.ServerGroup + OnDemandType getOnDemandType() { + OnDemandType.ServerGroup } private static void cache(List data, Map cacheDataById) { diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureInstance.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureInstance.groovy index 303d3ba94f2..b758da0b624 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureInstance.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureInstance.groovy @@ -16,14 +16,18 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model -import com.microsoft.azure.management.compute.models.VirtualMachineScaleSetVM +import com.azure.resourcemanager.compute.models.StatusLevelTypes +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetVM import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.model.Instance +import groovy.transform.CompileStatic - +@CompileStatic class AzureInstance implements Instance, Serializable { + public static final String APP_HEALTH_EXT_LINUX = "Microsoft.ManagedServices.ApplicationHealthLinux" + public static final String APP_HEALTH_EXT_WINDOWS = "Microsoft.ManagedServices.ApplicationHealthWindows" String name String resourceId String vhd @@ -31,23 +35,23 @@ class AzureInstance implements Instance, Serializable { Long launchTime final String zone = 'N/A' String instanceType - List> health + List> health final String providerType = AzureCloudProvider.ID final String cloudProvider = AzureCloudProvider.ID static AzureInstance build(VirtualMachineScaleSetVM vm) { AzureInstance instance = new AzureInstance() - instance.name = vm.name - instance.instanceType = vm.sku.name - instance.resourceId = vm.instanceId - instance.vhd = vm.storageProfile?.osDisk?.vhd?.uri + instance.name = vm.name() + instance.instanceType = vm.sku().name() + instance.resourceId = vm.instanceId() + instance.vhd = vm.storageProfile()?.osDisk()?.vhd()?.uri() - vm.instanceView?.statuses?.each { status -> - def codes = status.code.split('/') + vm.instanceView()?.statuses()?.each { status -> + def codes = status.code().split('/') switch (codes[0]) { case "ProvisioningState": if (codes[1].toLowerCase() == AzureUtilities.ProvisioningState.SUCCEEDED.toLowerCase()) { - instance.launchTime = status.time?.millis + instance.launchTime = status.time()?.toEpochSecond() } else { instance.healthState = HealthState.Failed } @@ -61,6 +65,23 @@ class AzureInstance implements Instance, Serializable { } } + + // if health extension exists, read its status and update health state + vm?.instanceView()?.extensions()?.each { extension -> + if (extension.type() == APP_HEALTH_EXT_LINUX || + extension.type() == APP_HEALTH_EXT_WINDOWS) { + def substatuses = extension.substatuses() + if (substatuses != null) { + def statusLevel = substatuses[0]?.level() + if (statusLevel == StatusLevelTypes.ERROR) { + instance.healthState = HealthState.Down + } else { + instance.healthState = HealthState.Up + } + } + } + } + instance } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureServerGroupDescription.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureServerGroupDescription.groovy index 352f8d1b8d9..a72eb81a9d5 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureServerGroupDescription.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureServerGroupDescription.groovy @@ -16,11 +16,15 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model -import com.microsoft.azure.management.compute.models.VirtualMachineScaleSet +import com.azure.resourcemanager.compute.fluent.models.VirtualMachineScaleSetInner +import com.azure.resourcemanager.compute.models.ResourceIdentityType +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetDataDisk +import com.google.common.collect.Sets import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.AzureResourceOpsDescription +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureNamedImage import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.model.Instance @@ -36,15 +40,17 @@ class AzureServerGroupDescription extends AzureResourceOpsDescription implements Set loadBalancers Set securityGroups Set zones + Map instanceTags /* custom tags specified by user */ final String type = AzureCloudProvider.ID final String cloudProvider = AzureCloudProvider.ID Map launchConfig - ServerGroup.Capacity capacity - ServerGroup.ImagesSummary imagesSummary - ServerGroup.ImageSummary imageSummary + Capacity capacity + ImagesSummary imagesSummary + ImageSummary imageSummary UpgradePolicy upgradePolicy String loadBalancerName + String loadBalancerType String appGatewayName String appGatewayBapId AzureNamedImage image @@ -56,7 +62,7 @@ class AzureServerGroupDescription extends AzureResourceOpsDescription implements String securityGroupName String subnetId /*Azure resource ID*/ List storageAccountNames - Boolean isDisabled = false + Boolean disabled = false List inboundPortConfigs = [] String vnet String subnet @@ -64,6 +70,15 @@ class AzureServerGroupDescription extends AzureResourceOpsDescription implements Boolean hasNewSubnet = false Boolean createNewSubnet = false AzureExtensionCustomScriptSettings customScriptsSettings + AzureExtensionHealthSettings healthSettings + Boolean enableInboundNAT = false + List dataDisks + Integer terminationNotBeforeTimeoutInMinutes + String windowsTimeZone + Boolean doNotRunExtensionsOnOverprovisionedVMs = false + Boolean useSystemManagedIdentity = false + String userAssignedIdentities + Boolean enableIpForwarding = false static class AzureScaleSetSku { String name @@ -84,7 +99,12 @@ class AzureServerGroupDescription extends AzureResourceOpsDescription implements int frontEndPortRangeStart int frontEndPortRangeEnd int backendPort + } + static class AzureExtensionHealthSettings { + String protocol + String port + String requestPath } static class AzureExtensionCustomScriptSettings { @@ -93,7 +113,7 @@ class AzureServerGroupDescription extends AzureResourceOpsDescription implements } Integer getStorageAccountCount() { - (sku.capacity / 20) + 1 + (int)(sku.capacity / 20) + 1 } static UpgradePolicy getPolicyFromMode(String mode) { @@ -108,19 +128,20 @@ class AzureServerGroupDescription extends AzureResourceOpsDescription implements String.format("%s-%s-%s", application, stack, detail) } - @Override - Set getLoadBalancers() { - return [this.appGatewayName] + Boolean isDisabled() { + disabled } @Override - Set getSecurityGroups() { - return [this.securityGroupName] + Set getLoadBalancers() { + if(this.appGatewayName != null) return Sets.newHashSet(this.appGatewayName) + if(this.loadBalancerName != null) return Sets.newHashSet(this.loadBalancerName) + new HashSet() } @Override - Boolean isDisabled() { - this.isDisabled + Set getSecurityGroups() { + return this.securityGroupName == null ? new HashSet() : Sets.newHashSet(this.securityGroupName) } @Override @@ -136,80 +157,125 @@ class AzureServerGroupDescription extends AzureResourceOpsDescription implements } @Override - ServerGroup.Capacity getCapacity() { - new ServerGroup.Capacity( + Capacity getCapacity() { + new Capacity( min: 1, max: instances ? instances.size() : 1, - desired: 1 //TODO (scotm) figure out how these should be set correctly + desired: instances ? instances.size() : 1 ) } - static AzureServerGroupDescription build(VirtualMachineScaleSet scaleSet) { + static AzureServerGroupDescription build(VirtualMachineScaleSetInner scaleSet) { def azureSG = new AzureServerGroupDescription() - azureSG.name = scaleSet.name - def parsedName = Names.parseName(scaleSet.name) + azureSG.name = scaleSet.name() + def parsedName = Names.parseName(scaleSet.name()) // Get the values from the tags if they exist - azureSG.tags = scaleSet.tags ? scaleSet.tags : [:] + azureSG.tags = scaleSet.tags() ? scaleSet.tags() : [:] // favor tag settings then Frigga name parser - azureSG.appName = scaleSet.tags?.appName ?: parsedName.app - azureSG.stack = scaleSet.tags?.stack ?: parsedName.stack - azureSG.detail = scaleSet.tags?.detail ?: parsedName.detail + azureSG.appName = scaleSet.tags()?.appName ?: parsedName.app + azureSG.stack = scaleSet.tags()?.stack ?: parsedName.stack + azureSG.detail = scaleSet.tags()?.detail ?: parsedName.detail azureSG.application = azureSG.appName - azureSG.clusterName = scaleSet.tags?.cluster ?: parsedName.cluster - azureSG.securityGroupName = scaleSet.tags?.securityGroupName - azureSG.loadBalancerName = scaleSet.tags?.loadBalancerName - azureSG.appGatewayName = scaleSet.tags?.appGatewayName - azureSG.appGatewayBapId = scaleSet.tags?.appGatewayBapId + azureSG.clusterName = scaleSet.tags()?.cluster ?: parsedName.cluster + azureSG.securityGroupName = scaleSet.tags()?.securityGroupName + azureSG.loadBalancerName = scaleSet.tags()?.loadBalancerName + azureSG.enableInboundNAT = scaleSet.tags()?.enableInboundNAT + azureSG.appGatewayName = scaleSet.tags()?.appGatewayName + if (azureSG.appGatewayName == null && azureSG.loadBalancerName == null) { + azureSG.loadBalancerType = null + } else if (azureSG.appGatewayName == null) { + azureSG.loadBalancerType = AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString() + } else { + azureSG.loadBalancerType = AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString() + } + azureSG.appGatewayBapId = scaleSet.tags()?.appGatewayBapId + + def networkInterfaceConfigurations = scaleSet.virtualMachineProfile()?.networkProfile()?.networkInterfaceConfigurations() + + if (networkInterfaceConfigurations && networkInterfaceConfigurations.size() > 0) { + azureSG.enableIpForwarding = networkInterfaceConfigurations[0].enableIpForwarding() + } + // scaleSet.virtualMachineProfile()?.networkProfile()?.networkInterfaceConfigurations()?[0].ipConfigurations()?[0].applicationGatewayBackendAddressPools()?[0].id() // TODO: appGatewayBapId can be retrieved via scaleSet->networkProfile->networkInterfaceConfigurations->ipConfigurations->ApplicationGatewayBackendAddressPools - azureSG.subnetId = scaleSet.tags?.subnetId + azureSG.subnetId = scaleSet.tags()?.subnetId azureSG.subnet = AzureUtilities.getNameFromResourceId(azureSG.subnetId) - azureSG.vnet = azureSG.subnetId ? AzureUtilities.getNameFromResourceId(azureSG.subnetId) : scaleSet.tags?.vnet - azureSG.vnetResourceGroup = azureSG.subnetId ? AzureUtilities.getResourceGroupNameFromResourceId(azureSG.subnetId) : scaleSet.tags?.vnetResourceGroup - azureSG.hasNewSubnet = (scaleSet.tags?.hasNewSubnet == "true") + azureSG.vnet = azureSG.subnetId ? AzureUtilities.getNameFromResourceId(azureSG.subnetId) : scaleSet.tags()?.vnet + azureSG.vnetResourceGroup = azureSG.subnetId ? AzureUtilities.getResourceGroupNameFromResourceId(azureSG.subnetId) : scaleSet.tags()?.vnetResourceGroup + azureSG.hasNewSubnet = (scaleSet.tags()?.hasNewSubnet == "true") - azureSG.createdTime = scaleSet.tags?.createdTime?.toLong() - azureSG.image = new AzureNamedImage(isCustom: scaleSet.tags?.customImage, imageName: scaleSet.tags?.imageName) + azureSG.createdTime = scaleSet.tags()?.createdTime?.toLong() + azureSG.image = new AzureNamedImage(isCustom: scaleSet.tags()?.customImage, imageName: scaleSet.tags()?.imageName) if (!azureSG.image.isCustom) { // Azure server group which was created using Azure Market Store images will have a number of storage accounts // that were created at the time the server group was created; these storage account should be in saved in the - // tags map under storrageAccountNames key as a comma separated list of strings + // tags map under storageAccountNames key as a comma separated list of strings azureSG.storageAccountNames = new ArrayList() - String storageNames = scaleSet.tags?.storageAccountNames + String storageNames = scaleSet.tags()?.storageAccountNames if (storageNames) azureSG.storageAccountNames.addAll(storageNames.split(",")) } + azureSG.doNotRunExtensionsOnOverprovisionedVMs = scaleSet.doNotRunExtensionsOnOverprovisionedVMs() + + //Fetch system and user assigned identity details + if(scaleSet.identity()!=null) { + ResourceIdentityType rType = scaleSet.identity().type() + azureSG.useSystemManagedIdentity = rType == ResourceIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED || rType == ResourceIdentityType.SYSTEM_ASSIGNED + if (rType == ResourceIdentityType.USER_ASSIGNED || rType == ResourceIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED) { + StringBuilder sb = new StringBuilder() + for (String identity : scaleSet.identity().userAssignedIdentities().keySet()) { + if (sb.length() > 0) { + sb.append(",") + } + sb.append(identity) + } + azureSG.userAssignedIdentities = sb.toString() + } + } + - azureSG.region = scaleSet.location - azureSG.upgradePolicy = getPolicyFromMode(scaleSet.upgradePolicy.mode) + azureSG.region = scaleSet.location() + azureSG.upgradePolicy = getPolicyFromMode(scaleSet.upgradePolicy().mode().name()) + + def termProfile = scaleSet.virtualMachineProfile()?.scheduledEventsProfile()?.terminateNotificationProfile() + if (termProfile) + { + String[] str = termProfile.notBeforeTimeout().findAll( /\d+/ ) + if (str.size() > 0) { + azureSG.terminationNotBeforeTimeoutInMinutes = str[0].toInteger() + } + } + azureSG.windowsTimeZone = scaleSet.virtualMachineProfile()?.osProfile()?.windowsConfiguration()?.timeZone() // Get the image reference data - def imgRef = scaleSet.virtualMachineProfile?.storageProfile?.imageReference + def storageProfile = scaleSet.virtualMachineProfile()?.storageProfile() + def imgRef = storageProfile?.imageReference() if (imgRef) { - azureSG.image.offer = imgRef.offer - azureSG.image.publisher = imgRef.publisher - azureSG.image.sku = imgRef.sku - azureSG.image.version = imgRef.version + azureSG.image.offer = imgRef.offer() + azureSG.image.publisher = imgRef.publisher() + azureSG.image.sku = imgRef.sku() + azureSG.image.version = imgRef.version() } + azureSG.dataDisks = storageProfile?.dataDisks() + // get the OS configuration data def osConfig = new AzureOperatingSystemConfig() - def osProfile = scaleSet?.virtualMachineProfile?.osProfile + def osProfile = scaleSet?.virtualMachineProfile()?.osProfile() if (osProfile) { - osConfig.adminPassword = osProfile.adminPassword - osConfig.adminUserName = osProfile.adminUsername - osConfig.computerNamePrefix = osProfile.computerNamePrefix - osConfig.customData = osProfile.customData - + osConfig.adminPassword = osProfile.adminPassword() + osConfig.adminUserName = osProfile.adminUsername() + osConfig.computerNamePrefix = osProfile.computerNamePrefix() + osConfig.customData = osProfile.customData() } azureSG.osConfig = osConfig def customScriptSettings = new AzureExtensionCustomScriptSettings() - def extensionProfile = scaleSet?.virtualMachineProfile?.extensionProfile + def extensionProfile = scaleSet?.virtualMachineProfile()?.extensionProfile() if (extensionProfile) { - def customScriptExtensionSettings = extensionProfile.extensions.find({ - it.type == AzureUtilities.AZURE_CUSTOM_SCRIPT_EXT_TYPE_LINUX || - it.type == AzureUtilities.AZURE_CUSTOM_SCRIPT_EXT_TYPE_WINDOWS - })?.settings + def customScriptExtensionSettings = extensionProfile.extensions().find({ + it.type() == AzureUtilities.AZURE_CUSTOM_SCRIPT_EXT_TYPE_LINUX || + it.type() == AzureUtilities.AZURE_CUSTOM_SCRIPT_EXT_TYPE_WINDOWS + })?.settings() //def customScriptExtensionSettings = extensionProfile.extensions.find({it.type=="CustomScript"}).settings if (customScriptExtensionSettings) { customScriptSettings = mapper.convertValue(customScriptExtensionSettings, AzureExtensionCustomScriptSettings) @@ -219,21 +285,23 @@ class AzureServerGroupDescription extends AzureResourceOpsDescription implements azureSG.customScriptsSettings = customScriptSettings def sku = new AzureScaleSetSku() - def skuData = scaleSet.sku + def skuData = scaleSet.sku() if (skuData) { - sku.capacity = skuData.capacity - sku.name = skuData.name - sku.tier = skuData.tier + sku.capacity = skuData.capacity() + sku.name = skuData.name() + sku.tier = skuData.tier() } azureSG.sku = sku + def zones = scaleSet.zones() + azureSG.zones = zones == null ? new HashSet<>() : zones.toSet() - azureSG.provisioningState = scaleSet.provisioningState + azureSG.provisioningState = scaleSet.provisioningState() azureSG } - static Collection filterInstancesByHealthState(Set instances, HealthState healthState) { - instances?.findAll { Instance it -> it.getHealthState() == healthState } + static Collection filterInstancesByHealthState(Set instances, HealthState healthState) { + (Collection) instances?.findAll { Instance it -> it.getHealthState() == healthState } } void addInboundPortConfig(String name, int startRange, int endRange, String protocol, int backendPort) { diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupAtomicOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupAtomicOperation.groovy index 87c725eff58..74eb3c6aaa4 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupAtomicOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupAtomicOperation.groovy @@ -16,7 +16,6 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops -import com.microsoft.azure.management.resources.models.DeploymentExtended import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.model.AzureDeploymentOperation import com.netflix.spinnaker.clouddriver.azure.resources.network.model.AzureVirtualNetworkDescription @@ -30,8 +29,11 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationException import org.springframework.beans.factory.annotation.Autowired +import com.azure.resourcemanager.resources.models.Deployment + class CreateAzureServerGroupAtomicOperation implements AtomicOperation { private static final String BASE_PHASE = "CREATE_SERVER_GROUP" + public static final long SERVER_WAIT_TIMEOUT = 60 * 60 * 1000 private static Task getTask() { TaskRepository.threadLocalTask.get() @@ -47,7 +49,7 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { } /** - * curl -X POST -H "Content-Type: application/json" -d '[{"createServerGroup":{"name":"taz-st1-d1","cloudProvider":"azure","application":"taz","stack":"st1","detail":"d1","vnet":"vnet-select","subnet":"subnet1","account":"azure-cred1","selectedProvider":"azure","capacity":{"useSourceCapacity":false,"min":1,"max":1},"credentials":"azure-cred1","region":"westus","loadBalancerName":"taz-ag1-d1","securityGroupName":"taz-secg1","user":"[anonymous]","upgradePolicy":"Manual","image":{"account":"azure-cred1","imageName":"UbuntuServer-14.04.3-LTS(Recommended)","isCustom":false,"offer":"UbuntuServer","ostype":null,"publisher":"Canonical","region":null,"sku":"14.04.3-LTS","uri":null,"version":"14.04.201602171"},"sku":{"name":"Standard_DS1_v2","tier":"Standard","capacity":1},"osConfig":{"adminUserName":"spinnakeruser","adminPassword":"!Qnti**234"},"type":"createServerGroup"}}]' localhost:7002/ops + * curl -X POST -H "Content-Type: application/json" -d '[{"createServerGroup":{"name":"taz-st1-d1","cloudProvider":"azure","application":"taz","stack":"st1","detail":"d1","vnet":"vnet-select","subnet":"subnet1","account":"azure-cred1","selectedProvider":"azure","capacity":{"useSourceCapacity":false,"min":1,"max":1},"credentials":"azure-cred1","region":"westus","loadBalancerName":"taz-ag1-d1","securityGroupName":"taz-secg1","user":"[anonymous]","upgradePolicy":"Manual","image":{"account":"azure-cred1","imageName":"UbuntuServer-14.04.3-LTS(Recommended)","isCustom":false,"offer":"UbuntuServer","ostype":null,"publisher":"Canonical","region":null,"sku":"14.04.3-LTS","uri":null,"version":"14.04.201602171"},"sku":{"name":"Standard_DS1_v2","tier":"Standard","capacity":1},"osConfig":{},"type":"createServerGroup"}}]' localhost:7002/ops * * @param priorOutputs * @return @@ -62,11 +64,9 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { String virtualNetworkName = null String subnetName = null String subnetId - String serverGroupName = null String appGatewayPoolID = null try { - task.updateStatus(BASE_PHASE, "Beginning server group deployment") // if this is not a custom image, then we need to go get the OsType from Azure @@ -78,8 +78,8 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { throw new RuntimeException("Invalid published image was selected; $description.image.publisher:$description.image.offer:$description.image.sku:$description.image.version does not exist") } - description.image.imageName ?: virtualMachineImage.name - description.image.ostype = virtualMachineImage?.osDiskImage?.operatingSystem + description.image.imageName ?: virtualMachineImage.innerModel().name() + description.image.ostype = virtualMachineImage?.osDiskImage()?.operatingSystem() } resourceGroupName = AzureUtilities.getResourceGroupName(description.application, description.region) @@ -87,6 +87,7 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { // TODO: replace appGatewayName with loadBalancerName if (!description.appGatewayName) { description.appGatewayName = description.loadBalancerName + description.loadBalancerName = null } def appGatewayDescription = description.credentials.networkClient.getAppGateway(resourceGroupName, description.appGatewayName) @@ -130,10 +131,10 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { // we'll do a final check to make sure that the subnet can be created before we pass it in the deployment template def vnet = description.credentials.networkClient.getVirtualNetwork(resourceGroupName, virtualNetworkName) - if (!subnetName || vnet?.subnets?.find { it.name == subnetName }) { + if (!subnetName || vnet?.subnets()?.find { it.key == subnetName }) { // virtualNetworkName is not yet in the cache or the subnet we try to create already exists; we'll use the current vnet // we just got to re-compute the next subnet - vnetDescription = AzureVirtualNetworkDescription.getDescriptionForVirtualNetwork(vnet) + vnetDescription = AzureVirtualNetworkDescription.getDescriptionForVirtualNetwork(vnet.innerModel()) nextSubnetAddressPrefix = AzureVirtualNetworkDescription.getNextSubnetAddressPrefix(vnetDescription, rand.nextInt(vnetDescription?.maxSubnets ?: 1)) subnetName = AzureUtilities.getSubnetName(virtualNetworkName, nextSubnetAddressPrefix) } @@ -184,16 +185,30 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { description.credentials.subscriptionId, description.credentials.defaultResourceGroup, description.credentials.defaultKeyVault) - templateParameters[AzureServerGroupResourceTemplate.vmPasswordParameterName] = new KeyVaultSecret("VMPassword", - description.credentials.subscriptionId, - description.credentials.defaultResourceGroup, - description.credentials.defaultKeyVault) - templateParameters[AzureServerGroupResourceTemplate.customDataParameterName] = description.osConfig.customData ?: "" + + if(description.credentials.useSshPublicKey) { + templateParameters[AzureServerGroupResourceTemplate.vmSshPublicKeyParameterName] = new KeyVaultSecret("VMSshPublicKey", + description.credentials.subscriptionId, + description.credentials.defaultResourceGroup, + description.credentials.defaultKeyVault) + } + else { + templateParameters[AzureServerGroupResourceTemplate.vmPasswordParameterName] = new KeyVaultSecret("VMPassword", + description.credentials.subscriptionId, + description.credentials.defaultResourceGroup, + description.credentials.defaultKeyVault) + } + + // The empty "" cannot be assigned to the custom data otherwise Azure service will run into error complaining "custom data must be in Base64". + // So once there is no custom data, remove this template section rather than assigning a "". + if(description.osConfig.customData){ + templateParameters[AzureServerGroupResourceTemplate.customDataParameterName] = description.osConfig.customData + } if (errList.isEmpty()) { description.subnetId = subnetId task.updateStatus(BASE_PHASE, "Deploying server group") - DeploymentExtended deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( + Deployment deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( AzureServerGroupResourceTemplate.getTemplate(description), resourceGroupName, description.region, @@ -201,28 +216,45 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { "serverGroup", templateParameters) - errList.addAll(AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name)) - serverGroupName = errList.isEmpty() ? description.name : null + errList.addAll(AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name())) } } catch (Exception e) { task.updateStatus(BASE_PHASE, "Unexpected exception: Deployment of server group ${description.name} failed: ${e.message}") errList.add(e.message) } if (errList.isEmpty()) { + if (description.credentials.networkClient.isServerGroupWithAppGatewayDisabled(resourceGroupName, description.appGatewayName, description.name)) { + description + .credentials + .networkClient + .enableServerGroupWithAppGateway(resourceGroupName, description.appGatewayName, description.name) + + def healthy = description.credentials.computeClient.waitForScaleSetHealthy(resourceGroupName, description.name, SERVER_WAIT_TIMEOUT) + + if (healthy) { + task.updateStatus BASE_PHASE, "Done enabling Azure server group ${description.name} in ${description.region}." + } else { + errList.add("Server group did not come up in time") + } + + } else { + task.updateStatus BASE_PHASE, "Azure server group ${description.name} in ${description.region} is already enabled." + } + task.updateStatus(BASE_PHASE, "Deployment for server group ${description.name} in ${description.region} has succeeded.") } - else { - // cleanup any resources that might have been created prior to server group failing to deploy + + if (!errList.isEmpty()) { task.updateStatus(BASE_PHASE, "Cleanup any resources created as part of server group upsert") try { - if (serverGroupName) { + if (description.name) { def sgDescription = description.credentials .computeClient - .getServerGroup(resourceGroupName, serverGroupName) + .getServerGroup(resourceGroupName, description.name) if (sgDescription) { description.credentials .computeClient - .destroyServerGroup(resourceGroupName, serverGroupName) + .destroyServerGroup(resourceGroupName, description.name) // If this an Azure Market Store image, delete the storage that was created for it as well if (!sgDescription.image.isCustom) { @@ -244,7 +276,6 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { task.updateStatus(BASE_PHASE, errMessage) errList.add(errMessage) } - try { if (appGatewayPoolID) { description.credentials @@ -256,7 +287,6 @@ class CreateAzureServerGroupAtomicOperation implements AtomicOperation { task.updateStatus(BASE_PHASE, errMessage) errList.add(errMessage) } - throw new AtomicOperationException("${description.name} deployment failed", errList) } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/DestroyAzureServerGroupAtomicOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/DestroyAzureServerGroupAtomicOperation.groovy index bcf01137d71..e6e387b5655 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/DestroyAzureServerGroupAtomicOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/DestroyAzureServerGroupAtomicOperation.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository @@ -78,12 +79,26 @@ class DestroyAzureServerGroupAtomicOperation implements AtomicOperation { // Clean-up the storrage account, load balancer and the subnet that where attached to the server group if (errList.isEmpty()) { - // Remove association between server group and the assigned application gateway backend address pool - task.updateStatus(BASE_PHASE, "Remove backend address pool in $description.appGatewayName") - description - .credentials - .networkClient - .removeAppGatewayBAPforServerGroup(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name) + if (serverGroupDescription.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString()) { + task.updateStatus(BASE_PHASE, "Remove backend address pool in $description.loadBalancerName") + description + .credentials + .networkClient + .removeLoadBalancerAPforServerGroup(resourceGroupName, serverGroupDescription.loadBalancerName, serverGroupDescription.name) + + task.updateStatus(BASE_PHASE, "Remove NAT pool in $description.loadBalancerName") + description + .credentials + .networkClient + .removeLoadBalancerNatPoolPortRangeforServerGroup(resourceGroupName, serverGroupDescription.loadBalancerName, serverGroupDescription.name) + } else if (serverGroupDescription.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString()) { + // Remove association between server group and the assigned application gateway backend address pool + task.updateStatus(BASE_PHASE, "Remove backend address pool in $description.appGatewayName") + description + .credentials + .networkClient + .removeAppGatewayBAPforServerGroup(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name) + } // Delete storage accounts if any serverGroupDescription.storageAccountNames?.each { def storageAccountName -> @@ -102,18 +117,19 @@ class DestroyAzureServerGroupAtomicOperation implements AtomicOperation { } // Delete load balancer attached to server group - if (serverGroupDescription.loadBalancerName) { - task.updateStatus(BASE_PHASE, "Deleting load balancer ${serverGroupDescription.loadBalancerName} " + "in ${region}...") + if (serverGroupDescription.enableInboundNAT) { + String loadBalancerName = AzureUtilities.LB_NAME_PREFIX + serverGroupDescription.name + task.updateStatus(BASE_PHASE, "Deleting load balancer ${loadBalancerName} " + "in ${region}...") try { description .credentials .networkClient - .deleteLoadBalancer(resourceGroupName, serverGroupDescription.loadBalancerName) + .deleteLoadBalancer(resourceGroupName, loadBalancerName) - task.updateStatus(BASE_PHASE, "Deletion of Azure load balancer ${serverGroupDescription.loadBalancerName} in ${region} has succeeded.") + task.updateStatus(BASE_PHASE, "Deletion of Azure load balancer ${loadBalancerName} in ${region} has succeeded.") } catch (Exception e) { - task.updateStatus(BASE_PHASE, "Deletion of Azure load balancer ${serverGroupDescription.loadBalancerName} failed: ${e.message}") - errList.add("Failed to delete ${serverGroupDescription.loadBalancerName}: ${e.message}") + task.updateStatus(BASE_PHASE, "Deletion of Azure load balancer ${loadBalancerName} failed: ${e.message}") + errList.add("Failed to delete ${loadBalancerName}: ${e.message}") } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/DisableAzureServerGroupAtomicOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/DisableAzureServerGroupAtomicOperation.groovy index c2a2368c36c..faecffcb19e 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/DisableAzureServerGroupAtomicOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/DisableAzureServerGroupAtomicOperation.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.EnableDisableDestroyAzureServerGroupDescription import com.netflix.spinnaker.clouddriver.data.task.Task @@ -65,16 +66,16 @@ class DisableAzureServerGroupAtomicOperation implements AtomicOperation { errList.add("could not find server group ${description.name} in ${region}") } else { try { - if (description.credentials.networkClient.isServerGroupDisabled(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name)) { - task.updateStatus BASE_PHASE, "Azure server group ${serverGroupDescription.name} in ${region} is already disabled." + if(serverGroupDescription.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString()) { + disableServerGroupWithLoadBalancer(resourceGroupName, serverGroupDescription, region) + } else if (serverGroupDescription.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString()) { + disableServerGroupWithApplicationGateway(resourceGroupName, serverGroupDescription, region) + } else if (serverGroupDescription.loadBalancerType == null) { + disableServerGroupWithoutLoadBalancers(resourceGroupName, serverGroupDescription, region) } else { - description - .credentials - .networkClient - .disableServerGroup(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name) - - task.updateStatus BASE_PHASE, "Done disabling Azure server group ${serverGroupDescription.name} in ${region}." + throw new IllegalArgumentException("Load balancer type $serverGroupDescription.loadBalancerType was not valid.") } + } catch (Exception e) { task.updateStatus(BASE_PHASE, "Disabling of server group ${description.name} failed: ${e.message}") errList.add("Failed to disable server group ${description.name}: ${e.message}") @@ -95,4 +96,47 @@ class DisableAzureServerGroupAtomicOperation implements AtomicOperation { null } + + private void disableServerGroupWithoutLoadBalancers(String resourceGroupName, AzureServerGroupDescription serverGroupDescription, region) { + if (description + .credentials + .networkClient + .isServerGroupWithoutLoadBalancerDisabled(resourceGroupName, serverGroupDescription.name)) { + task.updateStatus BASE_PHASE, "Azure server group ${serverGroupDescription.name} in ${region} is already disabled." + } else { + // there is no concept of a "disabled" server group without a load balancer, so scale to 0 + description + .credentials + .computeClient + .resizeServerGroup(resourceGroupName, serverGroupDescription.name, 0) + + task.updateStatus BASE_PHASE, "Done disabling (via resize) Azure server group ${serverGroupDescription.name} in ${region}." + } + } + + private void disableServerGroupWithApplicationGateway(String resourceGroupName, AzureServerGroupDescription serverGroupDescription, region) { + if (description.credentials.networkClient.isServerGroupWithAppGatewayDisabled(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name)) { + task.updateStatus BASE_PHASE, "Azure server group ${serverGroupDescription.name} in ${region} is already disabled." + } else { + description + .credentials + .networkClient + .disableServerGroup(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name) + + task.updateStatus BASE_PHASE, "Done disabling Azure server group ${serverGroupDescription.name} in ${region}." + } + } + + private void disableServerGroupWithLoadBalancer(String resourceGroupName, AzureServerGroupDescription serverGroupDescription, region) { + if (description.credentials.networkClient.isServerGroupWithLoadBalancerDisabled(resourceGroupName, serverGroupDescription.loadBalancerName, serverGroupDescription.name)) { + task.updateStatus BASE_PHASE, "Azure server group ${serverGroupDescription.name} in ${region} is already disabled." + } else { + description + .credentials + .networkClient + .disableServerGroupWithLoadBalancer(resourceGroupName, serverGroupDescription.loadBalancerName, serverGroupDescription.name) + + task.updateStatus BASE_PHASE, "Done disabling Azure server group ${serverGroupDescription.name} in ${region}." + } + } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/EnableAzureServerGroupAtomicOperation.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/EnableAzureServerGroupAtomicOperation.groovy index 828320b7f1a..ed7826ad1f0 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/EnableAzureServerGroupAtomicOperation.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/EnableAzureServerGroupAtomicOperation.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository @@ -65,14 +66,30 @@ class EnableAzureServerGroupAtomicOperation implements AtomicOperation { errList.add("could not find server group ${description.name} in ${region}") } else { try { - if (description.credentials.networkClient.isServerGroupDisabled(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name)) { - description - .credentials - .networkClient - .enableServerGroup(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name) - task.updateStatus BASE_PHASE, "Done enabling Azure server group ${serverGroupDescription.name} in ${region}." + if (serverGroupDescription.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString()) { + if (description.credentials.networkClient.isServerGroupWithLoadBalancerDisabled(resourceGroupName, serverGroupDescription.loadBalancerName, serverGroupDescription.name)) { + description + .credentials + .networkClient + .enableServerGroupWithLoadBalancer(resourceGroupName, serverGroupDescription.loadBalancerName, serverGroupDescription.name) + + waitForHealthy(resourceGroupName, serverGroupDescription, region, errList) + } else { + task.updateStatus BASE_PHASE, "Azure server group ${serverGroupDescription.name} in ${region} is already enabled." + } + } else if (serverGroupDescription.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString()) { + if (description.credentials.networkClient.isServerGroupWithAppGatewayDisabled(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name)) { + description + .credentials + .networkClient + .enableServerGroupWithAppGateway(resourceGroupName, serverGroupDescription.appGatewayName, serverGroupDescription.name) + + waitForHealthy(resourceGroupName, serverGroupDescription, region, errList) + } else { + task.updateStatus BASE_PHASE, "Azure server group ${serverGroupDescription.name} in ${region} is already enabled." + } } else { - task.updateStatus BASE_PHASE, "Azure server group ${serverGroupDescription.name} in ${region} is already enabled." + throw new RuntimeException("Azure server group with load balancer type $serverGroupDescription.loadBalancerType cannot be enabled.") } } catch (Exception e) { task.updateStatus(BASE_PHASE, "Enabling of server group ${description.name} failed: ${e.message}") @@ -94,4 +111,14 @@ class EnableAzureServerGroupAtomicOperation implements AtomicOperation { null } + + private void waitForHealthy(String resourceGroupName, AzureServerGroupDescription serverGroupDescription, String region, ArrayList errList) { + def healthy = description.credentials.computeClient.waitForScaleSetHealthy(resourceGroupName, serverGroupDescription.name, CreateAzureServerGroupAtomicOperation.SERVER_WAIT_TIMEOUT) + + if (healthy) { + task.updateStatus BASE_PHASE, "Done enabling Azure server group ${serverGroupDescription.name} in ${region}." + } else { + errList.add("Server group ${serverGroupDescription.name} in ${region} did not come up in time.") + } + } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/CreateAzureServerGroupAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/CreateAzureServerGroupAtomicOperationConverter.groovy index 2b09c1ead25..cef0c5179ea 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/CreateAzureServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/CreateAzureServerGroupAtomicOperationConverter.groovy @@ -18,8 +18,11 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.conver import com.netflix.spinnaker.clouddriver.azure.AzureOperation import com.netflix.spinnaker.clouddriver.azure.common.AzureAtomicOperationConverterHelper +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.CreateAzureServerGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.CreateAzureServerGroupWithAzureLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.CreateAzureServerGroupWithoutLoadBalancersAtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport @@ -32,7 +35,16 @@ import org.springframework.stereotype.Component class CreateAzureServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { AtomicOperation convertOperation(Map input) { - new CreateAzureServerGroupAtomicOperation(convertDescription(input)) + AzureServerGroupDescription asgd = convertDescription(input) + if (asgd.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString()) { + new CreateAzureServerGroupWithAzureLoadBalancerAtomicOperation(asgd) + } else if (asgd.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString()) { + new CreateAzureServerGroupAtomicOperation(asgd) + } else if (asgd.loadBalancerType == null) { + new CreateAzureServerGroupWithoutLoadBalancersAtomicOperation(asgd) + } else { + throw new RuntimeException("Cannot create Azure server group with load balancer type $asgd.loadBalancerType") + } } AzureServerGroupDescription convertDescription(Map input) { diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/DestroyAzureServerGroupAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/DestroyAzureServerGroupAtomicOperationConverter.groovy index 56b5c54c4c7..93d535b017d 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/DestroyAzureServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/DestroyAzureServerGroupAtomicOperationConverter.groovy @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component @Component("destroyAzureServerGroupDescription") class DestroyAzureServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { DestroyAzureServerGroupAtomicOperationConverter() { - log.info("Constructor....DestroyAzureServerGroupAtomicOperationConverter") + log.trace("Constructor....DestroyAzureServerGroupAtomicOperationConverter") } @Override diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/DisableAzureServerGroupAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/DisableAzureServerGroupAtomicOperationConverter.groovy index 95a06860610..481225a2ad7 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/DisableAzureServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/DisableAzureServerGroupAtomicOperationConverter.groovy @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component @Component("disableAzureServerGroupDescription") class DisableAzureServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { DisableAzureServerGroupAtomicOperationConverter() { - log.info("Constructor....DestroyAzureServerGroupAtomicOperationConverter") + log.trace("Constructor....DestroyAzureServerGroupAtomicOperationConverter") } @Override diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/EnableAzureServerGroupAtomicOperationConverter.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/EnableAzureServerGroupAtomicOperationConverter.groovy index 28e06b7f343..417adc96426 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/EnableAzureServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/EnableAzureServerGroupAtomicOperationConverter.groovy @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component @Component("enableAzureServerGroupDescription") class EnableAzureServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport{ EnableAzureServerGroupAtomicOperationConverter() { - log.info("Constructor....EnableAzureServerGroupAtomicOperationConverter") + log.trace("Constructor....EnableAzureServerGroupAtomicOperationConverter") } @Override diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/preprocessors/RegionsToRegionDescriptionPreProcessor.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/preprocessors/RegionsToRegionDescriptionPreProcessor.groovy index 6152abcce93..cb45a89312b 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/preprocessors/RegionsToRegionDescriptionPreProcessor.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/preprocessors/RegionsToRegionDescriptionPreProcessor.groovy @@ -19,11 +19,17 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.prepro import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.EnableDisableDestroyAzureServerGroupDescription import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationDescriptionPreProcessor import groovy.util.logging.Slf4j +import org.slf4j.Logger import org.springframework.stereotype.Component @Slf4j @Component class RegionsToRegionDescriptionPreProcessor implements AtomicOperationDescriptionPreProcessor { + + private Logger getLog() { + return log + } + @Override boolean supports(Class descriptionClass) { return descriptionClass == EnableDisableDestroyAzureServerGroupDescription @@ -36,7 +42,7 @@ class RegionsToRegionDescriptionPreProcessor implements AtomicOperationDescripti region = regions[0] if (regions.size() > 1) { - log.warn("EnableDisableDestroyAzureServerGroupDescription has regions size greater than 1: $regions") + getLog().warn("EnableDisableDestroyAzureServerGroupDescription has regions size greater than 1: $regions") } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/AzureServerGroupDescriptionValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/AzureServerGroupDescriptionValidator.groovy index f7191bb8ac1..b9eda90154d 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/AzureServerGroupDescriptionValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/AzureServerGroupDescriptionValidator.groovy @@ -19,10 +19,10 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.valida import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("azureServerGroupDescriptionValidator") class AzureServerGroupDescriptionValidator extends @@ -32,7 +32,7 @@ class AzureServerGroupDescriptionValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, AzureServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, AzureServerGroupDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("azureServerGroupDescription", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/DestroyAzureServerGroupDescriptionValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/DestroyAzureServerGroupDescriptionValidator.groovy index c8e20cce5bb..3a0a13bbf3a 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/DestroyAzureServerGroupDescriptionValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/DestroyAzureServerGroupDescriptionValidator.groovy @@ -20,11 +20,11 @@ import com.netflix.spinnaker.clouddriver.azure.AzureOperation import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.EnableDisableDestroyAzureServerGroupDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AzureOperation(AtomicOperations.DESTROY_SERVER_GROUP) @Component("DestroyAzureServerGroupDescriptionValidator") @@ -34,7 +34,7 @@ class DestroyAzureServerGroupDescriptionValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, EnableDisableDestroyAzureServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableDestroyAzureServerGroupDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("EnableDisableDestroyAzureServerGroupDescription", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/DisableAzureServerGroupDescriptionValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/DisableAzureServerGroupDescriptionValidator.groovy index 2d66081d538..9c6d42c5abe 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/DisableAzureServerGroupDescriptionValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/DisableAzureServerGroupDescriptionValidator.groovy @@ -20,11 +20,11 @@ import com.netflix.spinnaker.clouddriver.azure.AzureOperation import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.EnableDisableDestroyAzureServerGroupDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AzureOperation(AtomicOperations.DISABLE_SERVER_GROUP) @Component("disableAzureServerGroupDescriptionValidator") @@ -34,7 +34,7 @@ class DisableAzureServerGroupDescriptionValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, EnableDisableDestroyAzureServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableDestroyAzureServerGroupDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("EnableDisableDestroyAzureServerGroupDescription", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/EnableAzureServerGroupDescriptionValidator.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/EnableAzureServerGroupDescriptionValidator.groovy index 3354a18e5f6..43aa0fe5930 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/EnableAzureServerGroupDescriptionValidator.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/validators/EnableAzureServerGroupDescriptionValidator.groovy @@ -20,11 +20,11 @@ import com.netflix.spinnaker.clouddriver.azure.AzureOperation import com.netflix.spinnaker.clouddriver.azure.common.StandardAzureAttributeValidator import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.EnableDisableDestroyAzureServerGroupDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @AzureOperation(AtomicOperations.ENABLE_SERVER_GROUP) @Component("enableAzureServerGroupDescriptionValidator") @@ -34,7 +34,7 @@ class EnableAzureServerGroupDescriptionValidator extends AccountCredentialsProvider accountCredentialsProvider @Override - void validate(List priorDescriptions, EnableDisableDestroyAzureServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableDestroyAzureServerGroupDescription description, ValidationErrors errors) { def helper = new StandardAzureAttributeValidator("EnableDisableDestroyAzureServerGroupDescription", errors) helper.validateCredentials(description.credentials, accountCredentialsProvider) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/view/AzureInstanceProvider.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/view/AzureInstanceProvider.groovy index a7d7583d14c..1fa7f26802f 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/view/AzureInstanceProvider.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/view/AzureInstanceProvider.groovy @@ -29,7 +29,7 @@ import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @Component -class AzureInstanceProvider implements InstanceProvider { +class AzureInstanceProvider implements InstanceProvider { final String cloudProvider = AzureCloudProvider.ID private final AzureCloudProvider azureCloudProvider private final Cache cacheView diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/subnet/model/AzureSubnetDescription.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/subnet/model/AzureSubnetDescription.groovy index 05ae8588f3b..4da4b68ec93 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/subnet/model/AzureSubnetDescription.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/subnet/model/AzureSubnetDescription.groovy @@ -16,15 +16,13 @@ package com.netflix.spinnaker.clouddriver.azure.resources.subnet.model -import com.microsoft.azure.management.network.models.ApplicationGateway -import com.microsoft.azure.management.network.models.Subnet -import com.microsoft.azure.management.network.models.VirtualNetwork +import com.azure.resourcemanager.network.fluent.models.SubnetInner +import com.azure.resourcemanager.network.fluent.models.VirtualNetworkInner import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.common.AzureResourceOpsDescription -import com.netflix.spinnaker.clouddriver.azure.resources.network.model.AzureVirtualNetworkDescription - -import java.util.regex.Pattern +import groovy.transform.CompileStatic +@CompileStatic class AzureSubnetDescription extends AzureResourceOpsDescription { String id = "unknown" String addressPrefix = "unknown" @@ -37,62 +35,40 @@ class AzureSubnetDescription extends AzureResourceOpsDescription { int ipv4 int addressPrefixLength - static AzureSubnetDescription getDescriptionForAzureSubnet(VirtualNetwork vnet, Subnet subnet) { - AzureSubnetDescription description = new AzureSubnetDescription(name: subnet.name) - description.name = subnet.name - description.region = vnet.location + static AzureSubnetDescription getDescriptionForAzureSubnet(VirtualNetworkInner vnet, SubnetInner subnet) { + AzureSubnetDescription description = new AzureSubnetDescription(name: subnet.name()) + description.name = subnet.name() + description.region = vnet.location() description.cloudProvider = "azure" - description.vnet = vnet.name - description.resourceId = subnet.id - description.id = subnet.name - description.addressPrefix = subnet.addressPrefix - description.ipv4 = AzureUtilities.convertIpv4PrefixToInt(subnet.addressPrefix) + description.vnet = vnet.name() + description.resourceId = subnet.id() + description.id = subnet.name() + description.addressPrefix = subnet.addressPrefix() + description.ipv4 = AzureUtilities.convertIpv4PrefixToInt(subnet.addressPrefix()) - description.addressPrefixLength = AzureUtilities.getAddressPrefixLength(subnet.addressPrefix) - subnet.ipConfigurations?.each {resourceId -> - description.ipConfigurations += resourceId.id + description.addressPrefixLength = AzureUtilities.getAddressPrefixLength(subnet.addressPrefix()) + subnet.ipConfigurations()?.each {resourceId -> + description.ipConfigurations += resourceId.id() description.connectedDevices += new SubnetConnectedDevices( - name: AzureUtilities.getResourceNameFromId(resourceId.id), - resourceId: resourceId.id, - type: AzureUtilities.getResourceTypeFromId(resourceId.id) + name: AzureUtilities.getResourceNameFromId(resourceId.id()), + resourceId: resourceId.id(), + type: AzureUtilities.getResourceTypeFromId(resourceId.id()) ) // TODO: iterate through applicationGatewayIPConfigurations which contains the ApplicationGateway related associations // This property is not yet exposed in the current Azure Java SDK } - description.networkSecurityGroup = subnet.networkSecurityGroup?.id + description.networkSecurityGroup = subnet.networkSecurityGroup()?.id() description } - static Collection getSubnetsForVirtualNetwork(VirtualNetwork vnet) { + static Collection getSubnetsForVirtualNetwork(VirtualNetworkInner vnet) { // sort the list of subnet based on their ivp4 vals in order to speed the search when computing the next subnet - vnet.subnets?.collect { + vnet.subnets()?.collect { getDescriptionForAzureSubnet(vnet, it) }?.sort { a,b -> a.ipv4 <=> b.ipv4} } - // This is a temporary workaround for a missing API in Azure Java SDK which should retrieve any Azure ApplicationGateway - // associations with the subnet via applicationGatewayIPConfigurations is captured in the Azure Subnet. It should be - // later replaced by a simple iteration through applicationGatewayIPConfigurations once that property becomes available - // in the Azure JSDK - static void getAppGatewaysConnectedResources(AzureVirtualNetworkDescription vnet, List appGateways) { - if (vnet.subnets && appGateways) { - appGateways.each { appGateway -> - // Iterate through the gatewayIPConfigurations and extract the subnet id which will be compared with the subnets within the vnet - appGateway?.gatewayIPConfigurations?.each { gatewayConfig -> - def subnetDescription = vnet.subnets.find { it.resourceId == gatewayConfig?.subnet?.id } - if (subnetDescription) { - subnetDescription.connectedDevices += new SubnetConnectedDevices( - name: appGateway.name, - resourceId: gatewayConfig?.id, - type: "applicationGateways", - ) - } - } - } - } - } - static class SubnetConnectedDevices { String name String type diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureCustomImageCachingAgent.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureCustomImageCachingAgent.groovy index 4b8db74fda6..66f3b366d2a 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureCustomImageCachingAgent.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureCustomImageCachingAgent.groovy @@ -32,10 +32,7 @@ import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.provider.A import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureCustomImageStorage import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureCustomVMImage import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials -import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/view/AzureVMImageLookupController.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/view/AzureVMImageLookupController.groovy index 0e24ad1d44f..6fc4cd354d4 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/view/AzureVMImageLookupController.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/view/AzureVMImageLookupController.groovy @@ -23,6 +23,7 @@ import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureCustomVMImage +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureManagedVMImage import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureNamedImage import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureVMImage import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials @@ -46,14 +47,14 @@ class AzureVMImageLookupController { private static final int MIN_NAME_FILTER = 3 private final AzureCloudProvider azureCloudProvider + private final AccountCredentialsProvider accountCredentialsProvider private final Cache cacheView - final ObjectMapper objectMapper + private final ObjectMapper objectMapper - @Autowired - AccountCredentialsProvider accountCredentialsProvider @Autowired - AzureVMImageLookupController(AzureCloudProvider azureCloudProvider, Cache cacheView, ObjectMapper objectMapper) { + AzureVMImageLookupController(AccountCredentialsProvider accountCredentialsProvider, AzureCloudProvider azureCloudProvider, Cache cacheView, ObjectMapper objectMapper) { + this.accountCredentialsProvider = accountCredentialsProvider this.azureCloudProvider = azureCloudProvider this.cacheView = cacheView this.objectMapper = objectMapper @@ -67,12 +68,11 @@ class AzureVMImageLookupController { result.addAll( getAllMatchingKeyPattern( imageId, - Keys.Namespace.AZURE_CUSTOMVMIMAGES.ns, + Keys.Namespace.AZURE_CUSTOMVMIMAGES, Keys.getCustomVMImageKey(azureCloudProvider, account, region, - "*"), - true)) + "*"))) if (!result.isEmpty()) { // found at least one match @@ -102,18 +102,32 @@ class AzureVMImageLookupController { return result } + /// Search for any matches in the Managed image cache + result.addAll( + getAllMatchingKeyPattern( + imageId, + Keys.Namespace.AZURE_MANAGEDIMAGES, + Keys.getManagedVMImageKey(azureCloudProvider, + account, + region, + "*", "*", "*"))) + + if (!result.isEmpty()) { + // found at least one match + return result + } + /// Search for any matches in the market store VM image cache result.addAll( getAllMatchingKeyPattern( imageId, - Keys.Namespace.AZURE_VMIMAGES.ns, + Keys.Namespace.AZURE_VMIMAGES, Keys.getVMImageKey(azureCloudProvider, account, region, "*", - "*"), - false)) + "*"))) if (result.isEmpty()) { throw new ImageNotFoundException("${imageId} not found in ${account}/${region}") @@ -130,12 +144,23 @@ class AzureVMImageLookupController { result.addAll( getAllMatchingKeyPattern( lookupOptions.q, - Keys.Namespace.AZURE_CUSTOMVMIMAGES.ns, + Keys.Namespace.AZURE_CUSTOMVMIMAGES, Keys.getCustomVMImageKey(azureCloudProvider, lookupOptions.account ?: '*', lookupOptions.region ?: '*', - "*"), - true)) + "*"))) + + if (lookupOptions.managedImages) { + result.addAll( + getAllMatchingKeyPattern( + lookupOptions.q, + Keys.Namespace.AZURE_MANAGEDIMAGES, + Keys.getManagedVMImageKey(azureCloudProvider, + lookupOptions.account ?: '*', + lookupOptions.region ?: '*', + "*", + "*", "*"))) + } if (!lookupOptions.customOnly) { @@ -162,31 +187,42 @@ class AzureVMImageLookupController { if (!lookupOptions.configOnly && lookupOptions.q != null && lookupOptions.q.length() >= MIN_NAME_FILTER) { // retrieve the list of virtual machine images from the azure respective cache + result.addAll( getAllMatchingKeyPattern( lookupOptions.q, - Keys.Namespace.AZURE_VMIMAGES.ns, + Keys.Namespace.AZURE_VMIMAGES, Keys.getVMImageKey(azureCloudProvider, lookupOptions.account ?: '*', lookupOptions.region ?: '*', "*", - "*"), - false)) + "*"))) } } result } - List getAllMatchingKeyPattern(String vmImagePartName, String type, String pattern, Boolean customImage) { - loadResults(vmImagePartName, type, cacheView.filterIdentifiers(type, pattern), customImage) + List getAllMatchingKeyPattern(String vmImagePartName, Keys.Namespace type, String pattern) { + loadResults(vmImagePartName, type, cacheView.filterIdentifiers(type.ns, pattern)) } - List loadResults(String vmImagePartName, String type, Collection identifiers, Boolean customImage) { + List loadResults(String vmImagePartName, Keys.Namespace type, Collection identifiers) { def results = [] as List - def data = cacheView.getAll(type, identifiers, RelationshipCacheFilter.none()) + def data = cacheView.getAll(type.ns, identifiers, RelationshipCacheFilter.none()) data.each {cacheData -> - def item = customImage? fromCustomImageCacheData(vmImagePartName, cacheData) : fromVMImageCacheData(vmImagePartName, cacheData) + def item = null + if(type == Keys.Namespace.AZURE_CUSTOMVMIMAGES) { + item = fromCustomImageCacheData(vmImagePartName, cacheData) + } + + if( type == Keys.Namespace.AZURE_MANAGEDIMAGES) { + item = fromManagedImageCacheData(vmImagePartName, cacheData) + } + + if(type == Keys.Namespace.AZURE_VMIMAGES) { + item = fromVMImageCacheData(vmImagePartName, cacheData) + } if (item) results += item @@ -251,15 +287,42 @@ class AzureVMImageLookupController { null } + AzureNamedImage fromManagedImageCacheData(String vmImagePartName, CacheData cacheData) { + try { + AzureManagedVMImage vmImage = objectMapper.convertValue(cacheData.attributes['vmimage'], AzureManagedVMImage) + def parts = Keys.parse(azureCloudProvider, cacheData.id) + + if ((vmImage.region == parts.region) && (vmImagePartName == null || vmImage.name.toLowerCase().contains(vmImagePartName.toLowerCase()))) { + return new AzureNamedImage( + imageName: vmImage.name, + isCustom: true, + publisher: "na", + offer: "na", + sku: "na", + version: "na", + uri: "na", + ostype: vmImage.osType, + account: parts.account, + region: parts.region + ) + } + } catch (Exception e) { + log.error("fromManagedImageCacheData -> Unexpected exception", e) + } + + null + } + @ResponseStatus(value = HttpStatus.NOT_FOUND, reason = 'Image not found') @InheritConstructors - private static class ImageNotFoundException extends RuntimeException { } + static class ImageNotFoundException extends RuntimeException { } - private static class LookupOptions { + static class LookupOptions { String q String account String region Boolean configOnly = true Boolean customOnly = false + Boolean managedImages = false } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureCredentials.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureCredentials.groovy index 1e8c983f37e..1fd941d86fb 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureCredentials.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureCredentials.groovy @@ -21,7 +21,9 @@ import com.netflix.spinnaker.clouddriver.azure.client.AzureComputeClient import com.netflix.spinnaker.clouddriver.azure.client.AzureNetworkClient import com.netflix.spinnaker.clouddriver.azure.client.AzureResourceManagerClient import com.netflix.spinnaker.clouddriver.azure.client.AzureStorageClient +import groovy.util.logging.Slf4j +@Slf4j class AzureCredentials { final String tenantId @@ -32,13 +34,15 @@ class AzureCredentials { final String defaultKeyVault final String defaultResourceGroup final String userAgentApplicationName + final String configuredAzureEnvironment + final Boolean useSshPublicKey final AzureResourceManagerClient resourceManagerClient final AzureNetworkClient networkClient final AzureComputeClient computeClient final AzureStorageClient storageClient - AzureCredentials(String tenantId, String clientId, String appKey, String subscriptionId, String defaultKeyVault, String defaultResourceGroup, String userAgentApplicationName) { + AzureCredentials(String tenantId, String clientId, String appKey, String subscriptionId, String defaultKeyVault, String defaultResourceGroup, String userAgentApplicationName, String configuredAzureEnvironment, Boolean useSshPublicKey) { this.tenantId = tenantId this.clientId = clientId this.appKey = appKey @@ -47,17 +51,26 @@ class AzureCredentials { this.defaultKeyVault = defaultKeyVault this.defaultResourceGroup = defaultResourceGroup this.userAgentApplicationName = userAgentApplicationName + this.configuredAzureEnvironment = configuredAzureEnvironment + this.useSshPublicKey = useSshPublicKey - def token = AzureBaseClient.getTokenCredentials(this.clientId, this.tenantId, this.appKey) + def token = AzureBaseClient.getTokenCredentials(this.clientId, this.tenantId, this.appKey, this.configuredAzureEnvironment) + def azureProfile = AzureBaseClient.getAzureProfile(this.configuredAzureEnvironment) - resourceManagerClient = new AzureResourceManagerClient(this.subscriptionId, token, userAgentApplicationName) + resourceManagerClient = new AzureResourceManagerClient(this.subscriptionId, token, azureProfile) - networkClient = new AzureNetworkClient(this.subscriptionId, token, userAgentApplicationName) + networkClient = new AzureNetworkClient(this.subscriptionId, token, azureProfile) - computeClient = new AzureComputeClient(this.subscriptionId, token, userAgentApplicationName) + computeClient = new AzureComputeClient(this.subscriptionId, token, azureProfile) - storageClient = new AzureStorageClient(this.subscriptionId, token, userAgentApplicationName) - registerProviders() + storageClient = new AzureStorageClient(this.subscriptionId, token, azureProfile) + + try { + registerProviders() + } catch (Exception e) { + log.error("Failed to register providers with AzureResourceManagerClient", e) + throw e + } } /** diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureCredentialsInitializer.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureCredentialsInitializer.groovy index 5da2778b564..b6e72cb9e5a 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureCredentialsInitializer.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureCredentialsInitializer.groovy @@ -33,7 +33,7 @@ class AzureCredentialsInitializer { AccountCredentialsRepository accountCredentialsRepository, String clouddriverUserAgentApplicationName) { - def azureAccounts = [] + List azureAccounts = [] azureConfigurationProperties.accounts.each { AzureConfigurationProperties.ManagedAccount managedAccount -> try { def azureAccount = new AzureNamedAccountCredentials( @@ -49,7 +49,9 @@ class AzureCredentialsInitializer { managedAccount.customImages, managedAccount.defaultResourceGroup, managedAccount.defaultKeyVault, - clouddriverUserAgentApplicationName + managedAccount.useSshPublicKey, + clouddriverUserAgentApplicationName, + managedAccount.permissions.build() ) azureAccounts << (accountCredentialsRepository.save(managedAccount.name, azureAccount) as AzureNamedAccountCredentials) diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureNamedAccountCredentials.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureNamedAccountCredentials.groovy index 833e3548ea9..fa85d30c31d 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureNamedAccountCredentials.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/security/AzureNamedAccountCredentials.groovy @@ -16,13 +16,18 @@ package com.netflix.spinnaker.clouddriver.azure.security +import com.netflix.spinnaker.clouddriver.azure.client.AzureComputeClient import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureCustomImageStorage import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureVMImage -import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials + +import com.netflix.spinnaker.fiat.model.resources.Permissions +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j @Slf4j -public class AzureNamedAccountCredentials implements AccountCredentials { +@CompileStatic +public class AzureNamedAccountCredentials extends AbstractAccountCredentials { private static final String CLOUD_PROVIDER = "azure" final String accountName final String environment @@ -39,7 +44,11 @@ public class AzureNamedAccountCredentials implements AccountCredentials> locationToInstanceTypesMap + final List regionsSupportZones + final List availabilityZones + final Boolean useSshPublicKey + final Permissions permissions AzureNamedAccountCredentials(String accountName, String environment, @@ -53,7 +62,9 @@ public class AzureNamedAccountCredentials implements AccountCredentials vmCustomImages, String defaultResourceGroup, String defaultKeyVault, + Boolean useSshPublicKey, String applicationName, + Permissions permissions = null, List requiredGroupMembership = null) { this.accountName = accountName this.environment = environment @@ -68,8 +79,13 @@ public class AzureNamedAccountCredentials implements AccountCredentials + this.permissions = permissions this.credentials = appKey.isEmpty() ? null : buildCredentials() + this.locationToInstanceTypesMap = this.credentials.computeClient.getVirtualMachineSizesByRegions(this.regions) + this.regionsSupportZones = Arrays.asList("centralus", "eastus", "eastus2", "francecentral", "northeurope", "southeastasia", "westeurope", "westus2") + this.availabilityZones = Arrays.asList("1", "2", "3") } @Override @@ -83,7 +99,7 @@ public class AzureNamedAccountCredentials implements AccountCredentials buildPreferredVMImageList(List vmImages) { @@ -113,7 +129,7 @@ public class AzureNamedAccountCredentials implements AccountCredentials buildRegions(List regions) { - regions?.collect {new AzureRegion(it)} ?: [] + regions?.collect {new AzureRegion(it)} ?: new ArrayList() } public static class AzureRegion { diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureAppGatewayResourceTemplate.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureAppGatewayResourceTemplate.groovy index 36c6b44e114..7ee8665a6c7 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureAppGatewayResourceTemplate.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureAppGatewayResourceTemplate.groovy @@ -18,9 +18,9 @@ package com.netflix.spinnaker.clouddriver.azure.templates import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature -import groovy.util.logging.Slf4j import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription +import groovy.util.logging.Slf4j @Slf4j class AzureAppGatewayResourceTemplate { @@ -44,6 +44,7 @@ class AzureAppGatewayResourceTemplate { } static class AppGatewayTemplate { + //TODO: Make this configurable for AZURE_US_GOVERNMENT String $schema = "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" String contentVersion = "1.0.0.0" @@ -58,11 +59,26 @@ class AzureAppGatewayResourceTemplate { AppGatewayTemplate(AzureAppGatewayDescription description) { parameters = new AppGatewayTemplateParameters() variables = new AppGatewayTemplateVariables(description) - if (!description.publicIpName) { - // this is not an edit operation of an existing application gateway; we must create a PublicIp resource in this case - resources.add(new PublicIpResource()) + ApplicationGatewayResource appGateway = new ApplicationGatewayResource(description) + + if(description.dnsName){ + def publicIp = new PublicIpResource(properties: new PublicIPPropertiesWithDns()) + if (description.sku == "Standard_v2") { + // publicIp sku must be Standard for Standard_v2 app gateways + publicIp.sku = new Sku("Standard") + publicIp.properties.publicIPAllocationMethod = 'Static' + } + resources.add(publicIp) + appGateway.addDependency(publicIp) + } else { + if (!description.publicIpName) { + def publicIp = new PublicIpResource() + resources.add(publicIp) + appGateway.addDependency(publicIp) + } } - resources.add(new ApplicationGatewayResource(description)) + + resources.add(appGateway) } } @@ -78,7 +94,7 @@ class AzureAppGatewayResourceTemplate { static final String defaultAppGatewayBeAddrPoolName = "default_BAP0" static class AppGatewayTemplateVariables { - final String apiVersion = "2015-06-15" + final String apiVersion = "2018-04-01" String appGwName String publicIPAddressName String dnsNameForLBIP @@ -97,7 +113,7 @@ class AzureAppGatewayResourceTemplate { } else { publicIPAddressName = AzureUtilities.PUBLICIP_NAME_PREFIX + description.name.toLowerCase() } - dnsNameForLBIP = DnsSettings.getUniqueDNSName(description.name) + dnsNameForLBIP = description.dnsName ?: DnsSettings.getUniqueDNSName(description.name) appGwSubnetID = description.subnetResourceId if (description.trafficEnabledSG) { // This is an edit operation; preserve the current backend address pool as the active rule @@ -124,14 +140,10 @@ class AzureAppGatewayResourceTemplate { if (description.stack) tags.stack = description.stack if (description.detail) tags.detail = description.detail if (description.cluster) tags.cluster = description.cluster - if (description.serverGroups) tags.serverGroups = description.serverGroups.join(" ") if (description.securityGroup) tags.securityGroup = description.securityGroup if (description.vnet) tags.vnet = description.vnet if (description.subnet) tags.subnet = description.subnet if (description.vnet) tags.vnetResourceGroup = description.vnetResourceGroup - if (!description.publicIpName) { - this.dependsOn.add("[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]") - } properties = new ApplicationGatewayResourceProperties(description) } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureLoadBalancerResourceTemplate.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureLoadBalancerResourceTemplate.groovy index 58a0082c34c..d32fd83ecab 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureLoadBalancerResourceTemplate.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureLoadBalancerResourceTemplate.groovy @@ -25,12 +25,15 @@ class AzureLoadBalancerResourceTemplate { static ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.INDENT_OUTPUT, true) + static final String DEFAULT_BACKEND_POOL = "default_LB_BAP" + static String getTemplate(AzureLoadBalancerDescription description) { LoadBalancerTemplate template = new LoadBalancerTemplate(description) mapper.writeValueAsString(template) } static class LoadBalancerTemplate{ + //TODO: Make this configurable for AZURE_US_GOVERNMENT String $schema = "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" String contentVersion = "1.0.0.0" @@ -41,21 +44,31 @@ class AzureLoadBalancerResourceTemplate { LoadBalancerTemplate(AzureLoadBalancerDescription description){ parameters = new LoadBalancerParameters() variables = new LoadBalancerTemplateVariables(description) + LoadBalancer lb = new LoadBalancer(description) - resources.add(new PublicIpResource(properties: new PublicIPPropertiesWithDns())) + def publicIp = new PublicIpResource(properties: new PublicIPPropertiesWithDns()) + publicIp.sku = new Sku("Standard") + publicIp.properties.publicIPAllocationMethod = "Static" + + if(description.dnsName){ + resources.add(publicIp) + lb.addDependency(publicIp) + } else { + if (!description.publicIpName) { + resources.add(publicIp) + lb.addDependency(publicIp) + } + } - LoadBalancer lb = new LoadBalancer(description) - lb.addDependency(resources[0]) resources.add(lb) } } static class LoadBalancerTemplateVariables{ - String apiVersion = "2015-05-01-preview" + String apiVersion = "2018-08-01" String loadBalancerName String virtualNetworkName String publicIPAddressName - String publicIPAddressType = "Dynamic" String loadBalancerFrontEnd String loadBalancerBackEnd String dnsNameForLBIP @@ -71,10 +84,15 @@ class AzureLoadBalancerResourceTemplate { loadBalancerName = description.loadBalancerName.toLowerCase() virtualNetworkName = AzureUtilities.VNET_NAME_PREFIX + resourceGroupName.toLowerCase() - publicIPAddressName = AzureUtilities.PUBLICIP_NAME_PREFIX + description.loadBalancerName.toLowerCase() + if (description.publicIpName) { + // reuse the existing public IP (this is an edit operation) + publicIPAddressName = description.publicIpName + } else { + publicIPAddressName = AzureUtilities.PUBLICIP_NAME_PREFIX + description.loadBalancerName.toLowerCase() + } loadBalancerFrontEnd = AzureUtilities.LBFRONTEND_NAME_PREFIX + description.loadBalancerName.toLowerCase() - loadBalancerBackEnd = AzureUtilities.LBBACKEND_NAME_PREFIX + description.loadBalancerName.toLowerCase() - dnsNameForLBIP = DnsSettings.getUniqueDNSName(description.loadBalancerName.toLowerCase()) + loadBalancerBackEnd = description.trafficEnabledSG ? description.trafficEnabledSG : DEFAULT_BACKEND_POOL + dnsNameForLBIP = description.dnsName ?: DnsSettings.getUniqueDNSName(description.loadBalancerName.toLowerCase()) ipConfigName = AzureUtilities.IPCONFIG_NAME_PREFIX + description.loadBalancerName.toLowerCase() } } @@ -90,12 +108,14 @@ class AzureLoadBalancerResourceTemplate { static class LoadBalancer extends DependingResource{ LoadBalancerProperties properties + Sku sku LoadBalancer(AzureLoadBalancerDescription description) { apiVersion = "[variables('apiVersion')]" name = "[variables('loadBalancerName')]" type = "Microsoft.Network/loadBalancers" location = "[parameters('location')]" + sku = new Sku("Standard") def currentTime = System.currentTimeMillis() tags = [:] tags.appName = description.appName @@ -103,10 +123,6 @@ class AzureLoadBalancerResourceTemplate { tags.detail = description.detail tags.createdTime = currentTime.toString() if (description.cluster) tags.cluster = description.cluster - if (description.serverGroup) tags.serverGroup = description.serverGroup - if (description.securityGroup) tags.securityGroup = description.securityGroup - if (description.vnet) tags.vnet = description.vnet - if (description.subnet) tags.subnet = description.subnet properties = new LoadBalancerProperties(description) } @@ -147,7 +163,13 @@ class AzureLoadBalancerResourceTemplate { LoadBalancerProperties(AzureLoadBalancerDescription description){ frontEndIPConfigurations.add(new FrontEndIpConfiguration()) backendAddressPools.add(new BackEndAddressPool()) - description.loadBalancingRules?.each{loadBalancingRules.add(new LoadBalancingRule(it))} + description.serverGroups?.each { + backendAddressPools.add(new BackEndAddressPool(it)) + } + description.loadBalancingRules?.each{ + it.persistence = description.sessionPersistence + loadBalancingRules.add(new LoadBalancingRule(it)) + } description.probes?.each{ probes.add(new AzureProbe(it))} } } @@ -168,7 +190,12 @@ class AzureLoadBalancerResourceTemplate { BackEndAddressPool() { - name = "[variables('loadBalancerBackEnd')]" + name = DEFAULT_BACKEND_POOL + } + + BackEndAddressPool(String name) + { + this.name = name } } @@ -234,12 +261,20 @@ class AzureLoadBalancerResourceTemplate { } static class LoadBalancingRuleProperties{ + static enum LoadDistribution { + Default, + SourceIP, + SourceIPProtocol + } + IdRef frontendIPConfiguration IdRef backendAddressPool String protocol Integer frontendPort Integer backendPort + Integer idleTimeoutInMinutes IdRef probe + LoadDistribution loadDistribution LoadBalancingRuleProperties(AzureLoadBalancerDescription.AzureLoadBalancingRule rule){ frontendIPConfiguration = new IdRef("[variables('frontEndIPConfig')]") @@ -247,7 +282,19 @@ class AzureLoadBalancerResourceTemplate { protocol = rule.protocol.toString().toLowerCase() frontendPort = rule.externalPort backendPort = rule.backendPort + idleTimeoutInMinutes = rule.idleTimeout probe = new IdRef("[concat(variables('loadBalancerID'),'/probes/" + rule.probeName + "')]") + switch(rule.persistence) { + case "None": + loadDistribution = LoadDistribution.Default + break + case "Client IP": + loadDistribution = LoadDistribution.SourceIP + break + case "Client IP and protocol": + loadDistribution = LoadDistribution.SourceIPProtocol + break + } } } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureSecurityGroupResourceTemplate.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureSecurityGroupResourceTemplate.groovy index 2a3e40acbca..e7678b23409 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureSecurityGroupResourceTemplate.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureSecurityGroupResourceTemplate.groovy @@ -26,11 +26,11 @@ class AzureSecurityGroupResourceTemplate { static String getTemplate(UpsertAzureSecurityGroupDescription description) { SecurityGroupTemplate template = new SecurityGroupTemplate(description) - mapper.writeValueAsString(template) } static class SecurityGroupTemplate{ + //TODO: Make this configurable for AZURE_US_GOVERNMENT String $schema = "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" String contentVersion = "1.0.0.0" @@ -43,8 +43,13 @@ class AzureSecurityGroupResourceTemplate { variables = new SecurityGroupTemplateVariables(description) SecurityGroup sg = new SecurityGroup(description) - resources.add(sg) + + // Apply NSG to subnet by using nested ARM template only when a subnet is pointed by users + if(description.subnet) { + SecurityGroupSubnet sg_subnet = new SecurityGroupSubnet() + resources.add(sg_subnet) + } } } @@ -58,6 +63,11 @@ class AzureSecurityGroupResourceTemplate { static class SecurityGroupParameters{ Location location = new Location() + NetworkSecurityGroupName networkSecurityGroupName = new NetworkSecurityGroupName() + NetworkSecurityGroupResourceGroupName networkSecurityGroupResourceGroupName = new NetworkSecurityGroupResourceGroupName() + VirtualNetworkName virtualNetworkName = new VirtualNetworkName() + VirtualNetworkResourceGroupName virtualNetworkResourceGroupName = new VirtualNetworkResourceGroupName() + SubnetName subnetName = new SubnetName() } static class Location{ @@ -65,12 +75,40 @@ class AzureSecurityGroupResourceTemplate { Map metadata = ["description":"Location to deploy"] } + static class NetworkSecurityGroupName{ + String type = "string" + Map metadata = ["description":"The NSG name"] + } + + static class NetworkSecurityGroupResourceGroupName{ + String type = "string" + Map metadata = ["description":"The resource group name of NSG"] + } + + static class VirtualNetworkResourceGroupName{ + String type = "string" + String defaultValue = "" + Map metadata = ["description":"The resource group name of Virtual Network"] + } + + static class VirtualNetworkName{ + String type = "string" + String defaultValue = "" + Map metadata = ["description":"The Virtual Network name"] + } + + static class SubnetName{ + String type = "string" + String defaultValue = "" + Map metadata = ["description":"The subnet name"] + } + static class SecurityGroup extends DependingResource{ Map tags SecurityGroupProperties properties SecurityGroup(UpsertAzureSecurityGroupDescription description) { - apiVersion = "2015-05-01-preview" + apiVersion = "2018-11-01" name = "[variables('securityGroupName')]" type = "Microsoft.Network/networkSecurityGroups" location = "[parameters('location')]" @@ -108,10 +146,12 @@ class AzureSecurityGroupResourceTemplate { String access /* gets or sets network traffic is allowed or denied; possible values are “Allow” and “Deny” */ String destinationAddressPrefix /* CIDR or destination IP range; asterix “*” can also be used to match all source IPs; default tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used */ String destinationPortRange /* Integer or range between 0 and 65535; asterix “*” can also be used to match all ports */ + List destinationPortRanges /* List of integer or range between 0 and 65535 */ String direction /* InBound or Outbound */ Integer priority /* value can be between 100 and 4096 */ String protocol /* Tcp, Udp or All(*) */ String sourceAddressPrefix /* CIDR or source IP range; asterix “*” can also be used to match all source IPs; default tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used */ + List sourceAddressPrefixes /* List of CIDR or source IP range */ String sourcePortRange /* Integer or range between 0 and 65535; asterix “*” can also be used to match all ports */ AzureNSGRuleProperties(AzureSGRule rule) { @@ -119,12 +159,74 @@ class AzureSecurityGroupResourceTemplate { access = rule.access destinationAddressPrefix = rule.destinationAddressPrefix destinationPortRange = rule.destinationPortRange + destinationPortRanges = rule.destinationPortRanges direction = rule.direction priority = rule.priority protocol = rule.protocol sourceAddressPrefix = rule.sourceAddressPrefix + sourceAddressPrefixes = rule.sourceAddressPrefixes sourcePortRange = rule.sourcePortRange } } + + /* + Use ARM nested template to apply NSG to an existing subnet + */ + static class SecurityGroupSubnet extends DependingResource{ + String resourceGroup + SecurityGroupSubnetProperties properties + + SecurityGroupSubnet() { + apiVersion = "2017-08-01" + name = "nestedTemplate_NSGSubnet" + type = "Microsoft.Resources/deployments" + resourceGroup = "[parameters('virtualNetworkResourceGroupName')]" + dependsOn.add("[parameters('networkSecurityGroupName')]") + properties = new SecurityGroupSubnetProperties() + } + } + + static class SecurityGroupSubnetProperties { + String mode + SecurityGroupSubnetPropertiesNestedTemplate template + + SecurityGroupSubnetProperties() { + mode = "Incremental" + template = new SecurityGroupSubnetPropertiesNestedTemplate() + } + } + + static class SecurityGroupSubnetPropertiesNestedTemplate { + String $schema + String contentVersion + ArrayList resources = [] + + SecurityGroupSubnetPropertiesNestedTemplate(){ + $schema = "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" + contentVersion = "1.0.0.0" + resources.add(new SecurityGroupSubnetPropertiesNestedTemplateSubnet()) + } + } + + static class SecurityGroupSubnetPropertiesNestedTemplateSubnet extends Resource { + SecurityGroupSubnetPropertiesNestedTemplateSubnetProperties properties + + SecurityGroupSubnetPropertiesNestedTemplateSubnet(){ + apiVersion = "2018-11-01" + type = "Microsoft.Network/virtualNetworks/subnets" + name = "[concat(parameters('virtualNetworkName'), '/', parameters('subnetName'))]" + location = "[parameters('location')]" + properties = new SecurityGroupSubnetPropertiesNestedTemplateSubnetProperties() + } + } + + static class SecurityGroupSubnetPropertiesNestedTemplateSubnetProperties { + String addressPrefix = "[reference(resourceId(parameters('virtualNetworkResourceGroupName'), 'Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworkName'), parameters('subnetName')), '2018-11-01').addressPrefix]" + SecurityGroupSubnetPropertiesNestedTemplateSubnetPropertiesNSG networkSecurityGroup = new SecurityGroupSubnetPropertiesNestedTemplateSubnetPropertiesNSG() + } + + static class SecurityGroupSubnetPropertiesNestedTemplateSubnetPropertiesNSG { + String id = "[resourceId(parameters('networkSecurityGroupResourceGroupName'), 'Microsoft.Network/networkSecurityGroups', parameters('networkSecurityGroupName'))]" + } } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureServerGroupResourceTemplate.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureServerGroupResourceTemplate.groovy index c23ba1697f9..a894e9a7eb4 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureServerGroupResourceTemplate.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/AzureServerGroupResourceTemplate.groovy @@ -30,17 +30,20 @@ */ package com.netflix.spinnaker.clouddriver.azure.templates +import com.azure.resourcemanager.compute.models.ResourceIdentityType +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetDataDisk +import com.fasterxml.jackson.annotation.JsonInclude import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature -import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription.AzureInboundPortConfig -import groovy.util.logging.Slf4j import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription.AzureInboundPortConfig +import groovy.util.logging.Slf4j @Slf4j class AzureServerGroupResourceTemplate { static final String STORAGE_ACCOUNT_SUFFIX = "sa" - static String LB_NAME = null protected static ObjectMapper mapper = new ObjectMapper() @@ -70,6 +73,7 @@ class AzureServerGroupResourceTemplate { * */ static class ServerGroupTemplate { + //TODO: Make this configurable for AZURE_US_GOVERNMENT String $schema = "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" String contentVersion = "1.0.0.0" @@ -82,7 +86,12 @@ class AzureServerGroupResourceTemplate { * @param description */ ServerGroupTemplate(AzureServerGroupDescription description) { - initializeCommonVariables(description) + if (description.enableInboundNAT){ + initializeCommonVariables(description) + resources.add(new PublicIpResource(properties: new PublicIPPropertiesWithDns())) + resources.add(new LoadBalancer(description)) + } + parameters = new ServerGroupTemplateParameters() //If it's custom, @@ -93,8 +102,6 @@ class AzureServerGroupResourceTemplate { resources.add(new StorageAccount(description)) } - resources.add(new PublicIpResource(properties: new PublicIPPropertiesWithDns())) - resources.add(new LoadBalancer(description)) resources.add(new VirtualMachineScaleSet(description)) } @@ -103,31 +110,33 @@ class AzureServerGroupResourceTemplate { interface TemplateVariables {} static class CoreServerGroupTemplateVariables implements TemplateVariables { - final String apiVersion = "2015-06-15" - String publicIPAddressName - String publicIPAddressID - String publicIPAddressType - String dnsNameForLBIP - String loadBalancerBackend - String loadBalancerFrontEnd - String loadBalancerName - String loadBalancerID - String frontEndIPConfigID - String inboundNatPoolName + final String apiVersion = "2019-03-01" + String publicIPAddressName = "" + String publicIPAddressID = "" + String publicIPAddressType = "" + String dnsNameForLBIP = "" + String loadBalancerBackend = "" + String loadBalancerFrontEnd = "" + String loadBalancerName = "" + String loadBalancerID = "" + String frontEndIPConfigID = "" + String inboundNatPoolName = "" CoreServerGroupTemplateVariables() {} CoreServerGroupTemplateVariables(AzureServerGroupDescription description) { - publicIPAddressName = AzureUtilities.PUBLICIP_NAME_PREFIX + description.name - publicIPAddressID = "[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]" - publicIPAddressType = "Dynamic" - dnsNameForLBIP = AzureUtilities.DNS_NAME_PREFIX + description.name.toLowerCase() - frontEndIPConfigID = "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations/', variables('loadBalancerName'), variables('loadBalancerFrontEnd'))]" - loadBalancerFrontEnd = AzureUtilities.LBFRONTEND_NAME_PREFIX + description.name - loadBalancerBackend = AzureUtilities.LBBACKEND_NAME_PREFIX + description.name - loadBalancerName = LB_NAME - loadBalancerID = "[resourceId('Microsoft.Network/loadBalancers', variables('loadBalancerName'))]" - inboundNatPoolName = AzureUtilities.INBOUND_NATPOOL_PREFIX + description.name + if(description.enableInboundNAT){ + publicIPAddressName = AzureUtilities.PUBLICIP_NAME_PREFIX + description.name + publicIPAddressID = "[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]" + publicIPAddressType = "Dynamic" + dnsNameForLBIP = AzureUtilities.DNS_NAME_PREFIX + description.name.toLowerCase() + frontEndIPConfigID = "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations/', variables('loadBalancerName'), variables('loadBalancerFrontEnd'))]" + loadBalancerFrontEnd = AzureUtilities.LBFRONTEND_NAME_PREFIX + description.name + loadBalancerBackend = AzureUtilities.LBBACKEND_NAME_PREFIX + description.name + loadBalancerName = LB_NAME + loadBalancerID = "[resourceId('Microsoft.Network/loadBalancers', variables('loadBalancerName'))]" + inboundNatPoolName = AzureUtilities.INBOUND_NATPOOL_PREFIX + description.name + } } } @@ -154,6 +163,7 @@ class AzureServerGroupResourceTemplate { */ ExtendedServerGroupTemplateVariables(AzureServerGroupDescription description) { super(description) + vhdContainerName = description.name.toLowerCase() osType = new OsType(description) imageReference = "[variables('osType')]" @@ -174,18 +184,23 @@ class AzureServerGroupResourceTemplate { */ static class ServerGroupTemplateParameters { LocationParameter location = new LocationParameter(["description": "Location to deploy"]) - SubnetParameter subnetId = new SubnetParameter(["description": "Subnet Resource ID"]) - AppGatewayAddressPoolParameter appGatewayAddressPoolId = new AppGatewayAddressPoolParameter(["description": "App Gateway backend address pool resource ID"]) - VMUserNameParameter vmuserName = new VMUserNameParameter(["description": "default VM account name"]) - VMPasswordParameter vmPassword = new VMPasswordParameter(["description": "default VM account password"]) - CustomDataParameter customData = new CustomDataParameter(["description":"custom data to pass down to the virtual machine(s)"], "") + SubnetParameter subnetId = new SubnetParameter(["description": "Subnet Resource ID"], "") + AppGatewayAddressPoolParameter appGatewayAddressPoolId = new AppGatewayAddressPoolParameter(["description": "App Gateway backend address pool resource ID"], "") + VMUserNameParameter vmUserName = new VMUserNameParameter(["description": "Admin username on all VMs"], "") + VMPasswordParameter vmPassword = new VMPasswordParameter(["description": "Admin password on all VMs"], "") + VMSshPublicKeyParameter vmSshPublicKey = new VMSshPublicKeyParameter(["description": "SSH public key on all VMs"], "") + LoadBalancerPoolParameter loadBalancerAddressPoolId = new LoadBalancerPoolParameter(["description": "Load balancer pool ID"], "") + LoadBalancerNatPoolParameter loadBalancerNatPoolId = new LoadBalancerNatPoolParameter(["description": "Load balancer NAT pool ID"], "") + + // The default value of custom data cannot be "" otherwise Azure service will run into error complaining "custom data must be in Base64". + CustomDataParameter customData = new CustomDataParameter(["description":"custom data to pass down to the virtual machine(s)"], "sample custom data") } /* Server Group Parameters */ static String subnetParameterName = "subnetId" - static class SubnetParameter extends StringParameter { - SubnetParameter(Map metadata) { - super(metadata) + static class SubnetParameter extends StringParameterWithDefault { + SubnetParameter(Map metadata, String defaultValue) { + super(metadata, defaultValue) } } @@ -197,9 +212,23 @@ class AzureServerGroupResourceTemplate { } static String appGatewayAddressPoolParameterName = "appGatewayAddressPoolId" - static class AppGatewayAddressPoolParameter extends StringParameter { - AppGatewayAddressPoolParameter(Map metadata) { - super(metadata) + static class AppGatewayAddressPoolParameter extends StringParameterWithDefault { + AppGatewayAddressPoolParameter(Map metadata, String defaultValue) { + super(metadata, defaultValue) + } + } + + static String loadBalancerAddressPoolParameterName = "loadBalancerAddressPoolId" + static class LoadBalancerPoolParameter extends StringParameterWithDefault { + LoadBalancerPoolParameter(Map metadata, String defaultValue) { + super(metadata, defaultValue) + } + } + + static String loadBalancerNatPoolParameterName = "loadBalancerNatPoolId" + static class LoadBalancerNatPoolParameter extends StringParameterWithDefault { + LoadBalancerNatPoolParameter(Map metadata, String defaultValue) { + super(metadata, defaultValue) } } @@ -210,17 +239,24 @@ class AzureServerGroupResourceTemplate { } } - static String vmUserNameParameterName = "vmUsername" + static String vmUserNameParameterName = "vmUserName" static class VMUserNameParameter extends SecureStringParameter { - VMUserNameParameter(Map metadata) { - super(metadata) + VMUserNameParameter(Map metadata, String defaultValue) { + super(metadata, defaultValue) } } static String vmPasswordParameterName = "vmPassword" static class VMPasswordParameter extends SecureStringParameter { - VMPasswordParameter(Map metadata) { - super(metadata) + VMPasswordParameter(Map metadata, String defaultValue) { + super(metadata, defaultValue) + } + } + + static String vmSshPublicKeyParameterName = "vmSshPublicKey" + static class VMSshPublicKeyParameter extends SecureStringParameter { + VMSshPublicKeyParameter(Map metadata, String defaultValue) { + super(metadata, defaultValue) } } @@ -317,45 +353,50 @@ class AzureServerGroupResourceTemplate { static class VirtualMachineScaleSet extends DependingResource { ScaleSetSkuProperty sku VirtualMachineScaleSetProperty properties + ManagedIdentity identity + + @JsonInclude(JsonInclude.Include.NON_NULL) + List zones VirtualMachineScaleSet(AzureServerGroupDescription description) { apiVersion = "[variables('apiVersion')]" name = description.name type = "Microsoft.Compute/virtualMachineScaleSets" location = "[parameters('${locationParameterName}')]" + + String userAssignedIdentities = description.userAssignedIdentities + if (!userAssignedIdentities?.trim()){ + // If the userAssignedIdentities is null or empty just attempt to create a system managed identity if it is enabled + identity = new ManagedIdentity(description.useSystemManagedIdentity) + }else{ + // else create an user assigned identity with optional system managed identity (if it is enabled) + identity = new UserAndOptionalSystemAssignedIdentity(description.useSystemManagedIdentity, userAssignedIdentities) + } + def currentTime = System.currentTimeMillis() tags = [:] - tags.appName = description.application - tags.stack = description.stack - tags.detail = description.detail - tags.cluster = description.clusterName tags.createdTime = currentTime.toString() - tags.loadBalancerName = LB_NAME - tags.hasNewSubnet = description.hasNewSubnet.toString() + if (description.subnetId) tags.subnetId = description.subnetId + if (description.securityGroupName) tags.securityGroupName = description.securityGroupName + + if (description.instanceTags != null) { + tags << description.instanceTags + } // debug only; can be removed as part of the tags cleanup if (description.appGatewayName) tags.appGatewayName = description.appGatewayName - if (description.appGatewayBapId) tags.appGatewayBapId = description.appGatewayBapId - - if (description.securityGroupName) tags.securityGroupName = description.securityGroupName - if (description.subnetId) tags.subnetId = description.subnetId - tags.imageIsCustom = description.image.isCustom.toString() + if (description.loadBalancerName) tags.loadBalancerName = description.loadBalancerName // will need this when cloning a server group if (description.image.imageName) tags.imageName = description.image.imageName - if (!description.image.isCustom) { - description.getStorageAccountCount().times { idx -> - this.dependsOn.add( - String.format("[concat('Microsoft.Storage/storageAccounts/', variables('%s')[%s])]", - ExtendedServerGroupTemplateVariables.uniqueStorageNamesArrayVar, - idx) - ) - String uniqueName = getUniqueStorageName(description.name, idx) - tags.storageAccountNames = tags.storageAccountNames ? "${tags.storageAccountNames},${uniqueName}" : uniqueName - } + if(description.zones != null && description.zones.size() != 0) { + zones = description.zones.asList() } - this.dependsOn.add("[concat('Microsoft.Network/loadBalancers/', variables('loadBalancerName'))]") + if(description.enableInboundNAT){ + tags.enableInboundNAT = description.enableInboundNAT ? "true" : "false" + this.dependsOn.add("[concat('Microsoft.Network/loadBalancers/', variables('loadBalancerName'))]") + } properties = new VirtualMachineScaleSetProperty(description) sku = new ScaleSetSkuProperty(description) @@ -365,14 +406,20 @@ class AzureServerGroupResourceTemplate { static class VirtualMachineScaleSetProperty { Map upgradePolicy = [:] ScaleSetVMProfile virtualMachineProfile + Boolean doNotRunExtensionsOnOverprovisionedVMs VirtualMachineScaleSetProperty(AzureServerGroupDescription description) { upgradePolicy["mode"] = description.upgradePolicy.toString() - virtualMachineProfile = description.customScriptsSettings?.fileUris ? - new ScaleSetVMProfilePropertyWithExtension(description) : - new ScaleSetVMProfileProperty(description) - + doNotRunExtensionsOnOverprovisionedVMs = description.doNotRunExtensionsOnOverprovisionedVMs + // protocol is the only required setting in both scenarios + // https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-health-extension#settings + if (description.customScriptsSettings?.commandToExecute || description.healthSettings?.protocol) { + virtualMachineProfile = new ScaleSetVMProfilePropertyWithExtension(description) + } + else { + virtualMachineProfile = new ScaleSetVMProfileProperty(description) + } } } @@ -396,6 +443,59 @@ class AzureServerGroupResourceTemplate { } } + // Scheduled Event Profiles + static class ScheduledEventsProfile { + TerminateNotificationProfile terminateNotificationProfile + + ScheduledEventsProfile(AzureServerGroupDescription description) { + terminateNotificationProfile = new TerminateNotificationProfile(description) + } + } + + static class TerminateNotificationProfile { + String notBeforeTimeout + Boolean enable + + TerminateNotificationProfile(AzureServerGroupDescription description) { + enable = true + notBeforeTimeout = "PT" + description.terminationNotBeforeTimeoutInMinutes + "M" + } + } + + // ***Scale Set None/System Managed Identity + static class ManagedIdentity { + String type + + ManagedIdentity(){} + /** + * + * @param description + */ + ManagedIdentity(Boolean enableSystemAssigned) { + type = enableSystemAssigned ? ResourceIdentityType.SYSTEM_ASSIGNED: ResourceIdentityType.NONE + } + } + + // ***Scale Set User assigned and optionaly system assigned Identity + static class UserAndOptionalSystemAssignedIdentity extends ManagedIdentity { + // user assigned identities needs to be added in the following format + // "[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/','')]" : { } + Map> userAssignedIdentities = [:] + + /** + * + * @param description + */ + UserAndOptionalSystemAssignedIdentity(Boolean enableSystemAssigned, String userAssignedIdentities) { + type = enableSystemAssigned ? ResourceIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.toString() : ResourceIdentityType.USER_ASSIGNED + if (userAssignedIdentities.length() > 0) { + for (String identity : userAssignedIdentities.split(",")) { + this.userAssignedIdentities.put(String.format("[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/','%s')]", identity), [:]) + } + } + } + } + interface ScaleSetOsProfile {} // ***OSProfile @@ -403,25 +503,76 @@ class AzureServerGroupResourceTemplate { String computerNamePrefix String adminUsername String adminPassword + String customData ScaleSetOsProfileProperty(AzureServerGroupDescription description) { //Max length of 10 characters to allow for an aditional postfix within a max length of 15 characters - computerNamePrefix = description.getIdentifier().substring(0, 10) - log.info("computerNamePrefix will be truncated to 10 characters to maintain Azure restrictions") + String osType = description.image.ostype + int maxLengthOsSpecific = osType==null||osType.isEmpty()||osType.isAllWhitespace()||osType.equalsIgnoreCase("linux")?10:9 + int identifierLength=description.getIdentifier().length() + //A guard to avoid nullpointer in case identifierLength is less than maxLengthOsSpecific + int identifierMaxIndex=identifierLength publicKeys = [] + + ScaleSetOsProfileLinuxConfigurationSsh() { + publicKeys.add(new ScaleSetOsProfileLinuxConfigurationSshPublicKey()) + } + } + + static class ScaleSetOsProfileLinuxConfigurationSshPublicKey { + String path + String keyData + + ScaleSetOsProfileLinuxConfigurationSshPublicKey() { + path = "[concat('/home/', parameters('${vmUserNameParameterName}'), '/.ssh/authorized_keys')]" + keyData = "[parameters('${vmSshPublicKeyParameterName}')]" + } + } // ***Network Profile static class ScaleSetNetworkProfileProperty { @@ -447,10 +598,11 @@ class AzureServerGroupResourceTemplate { } /** - * + * Here is the location to put NSG applying to VMSS nic */ static class NetworkInterfaceConfigurationProperty { boolean primary + boolean enableIpForwarding ArrayList ipConfigurations = [] /** @@ -459,10 +611,19 @@ class AzureServerGroupResourceTemplate { */ NetworkInterfaceConfigurationProperty(AzureServerGroupDescription description) { primary = true + enableIpForwarding = description.enableIpForwarding ipConfigurations.add(new NetworkInterfaceIPConfiguration(description)) } } + static class NetworkSecurityGroup { + String id + + NetworkSecurityGroup(String name) { + this.id = "[resourceId('Microsoft.Network/networkSecurityGroups', '${name}')]" + } + } + /** * */ @@ -476,7 +637,7 @@ class AzureServerGroupResourceTemplate { */ NetworkInterfaceIPConfiguration(AzureServerGroupDescription description) { name = AzureUtilities.IPCONFIG_NAME_PREFIX + description.getIdentifier() - properties = new NetworkInterfaceIPConfigurationsProperty() + properties = new NetworkInterfaceIPConfigurationsProperty(description) } } @@ -493,11 +654,23 @@ class AzureServerGroupResourceTemplate { * * @param description */ - NetworkInterfaceIPConfigurationsProperty() { - subnet = new NetworkInterfaceIPConfigurationSubnet() - loadBalancerBackendAddressPools.add(new LoadBalancerBackendAddressPool()) - ApplicationGatewayBackendAddressPools.add(new AppGatewayBackendAddressPool()) - loadBalancerInboundNatPools.add(new LoadBalancerInboundNatPoolId()) + NetworkInterfaceIPConfigurationsProperty(AzureServerGroupDescription description) { + if(description.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString()) { + subnet = new NetworkInterfaceIPConfigurationSubnet() + loadBalancerBackendAddressPools.add(new ExistLoadBalancerBackendAddressPool()) + loadBalancerInboundNatPools.add(new ExistLoadBalancerInboundNatPoolId()) + } else if (description.loadBalancerType == AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString()) { + subnet = new NetworkInterfaceIPConfigurationSubnet() + if(description.enableInboundNAT) { + loadBalancerBackendAddressPools.add(new LoadBalancerBackendAddressPool()) + loadBalancerInboundNatPools.add(new LoadBalancerInboundNatPoolId()) + } + ApplicationGatewayBackendAddressPools.add(new AppGatewayBackendAddressPool()) + } else if (description.loadBalancerType == null) { + subnet = new NetworkInterfaceIPConfigurationSubnet() + } else { + throw new RuntimeException("Load balancer type $description.loadBalancerType is not valid") + } } } @@ -521,12 +694,25 @@ class AzureServerGroupResourceTemplate { } static class LoadBalancerInboundNatPoolId extends IdRef { - LoadBalancerInboundNatPoolId() { id = "[resourceId('Microsoft.Network/loadBalancers/inboundNatPools', variables('loadBalancerName'), variables('inboundNatPoolName'))]" } } + static class ExistLoadBalancerBackendAddressPool { + String id + + ExistLoadBalancerBackendAddressPool() { + id = "[parameters('${loadBalancerAddressPoolParameterName}')]" + } + } + + static class ExistLoadBalancerInboundNatPoolId extends IdRef { + ExistLoadBalancerInboundNatPoolId() { + id = "[parameters('${loadBalancerNatPoolParameterName}')]" + } + } + static class AppGatewayBackendAddressPool { String id @@ -544,16 +730,28 @@ class AzureServerGroupResourceTemplate { StorageProfile storageProfile ScaleSetOsProfile osProfile ScaleSetNetworkProfileProperty networkProfile + ScheduledEventsProfile scheduledEventsProfile ScaleSetVMProfileProperty(AzureServerGroupDescription description) { storageProfile = description.image.isCustom ? - new ScaleSetCustomImageStorageProfile(description) : + new ScaleSetCustomManagedImageStorageProfile(description) : new ScaleSetStorageProfile(description) - osProfile = description.osConfig.customData ? - new ScaleSetOsProfileCustomDataProperty(description) : - new ScaleSetOsProfileProperty(description) - networkProfile = new ScaleSetNetworkProfileProperty(description) + if (description.credentials.useSshPublicKey) { + osProfile = new ScaleSetOsProfileLinuxConfiguration(description) + } + else if (description.windowsTimeZone) { + osProfile = new ScaleSetOsProfileWindowsConfiguration(description) + } + else { + osProfile = new ScaleSetOsProfileProperty(description) + } + + if (description.terminationNotBeforeTimeoutInMinutes != null) { + scheduledEventsProfile = new ScheduledEventsProfile(description) + } + + networkProfile = new ScaleSetNetworkProfileProperty(description) } } @@ -582,6 +780,8 @@ class AzureServerGroupResourceTemplate { OSDisk osDisk String imageReference + List dataDisks + /** * * @param serverGroupDescription @@ -589,21 +789,32 @@ class AzureServerGroupResourceTemplate { ScaleSetStorageProfile(AzureServerGroupDescription description) { osDisk = new VirtualMachineOSDisk(description) imageReference = "[variables('imageReference')]" + dataDisks = description.dataDisks + } + } + + + static class ImageReference { + String id + + ImageReference(AzureServerGroupDescription description) { + id = description.image.uri } } /** * */ - static class ScaleSetCustomImageStorageProfile implements StorageProfile { - - OSDisk osDisk + static class ScaleSetCustomManagedImageStorageProfile implements StorageProfile { + ImageReference imageReference + List dataDisks /** * * @param serverGroupDescription */ - ScaleSetCustomImageStorageProfile(AzureServerGroupDescription description) { - osDisk = new VirtualMachineCustomImageOSDisk(description) + ScaleSetCustomManagedImageStorageProfile(AzureServerGroupDescription description) { + imageReference = new ImageReference(description) + dataDisks = description.dataDisks } } @@ -627,51 +838,86 @@ class AzureServerGroupResourceTemplate { } } - static class VirtualMachineCustomImageOSDisk implements OSDisk { + /**** VMSS extensionsProfile ****/ + static class ScaleSetExtensionProfileProperty { + Collection extensions = [] - String name - String caching - String createOption - String osType - Map image = [:] + ScaleSetExtensionProfileProperty(AzureServerGroupDescription description) { + if (description.customScriptsSettings?.commandToExecute) { + Collection uriTemp = description.customScriptsSettings.fileUris + if (!uriTemp || uriTemp.isEmpty() || (uriTemp.size() == 1 && !uriTemp.first()?.trim())) { - VirtualMachineCustomImageOSDisk(AzureServerGroupDescription description) { - name = "osdisk-${description.name}" - caching = "ReadOnly" - createOption = "FromImage" - osType = description.image.ostype - image.uri = description.image.uri + // if there are no custom scripts provided, set the fileUris section as an empty array. + description.customScriptsSettings.fileUris = [] + } + + extensions.add(new CustomScriptExtensions(description)) + } + + // protocol is the only required setting in both scenarios + // https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-health-extension#settings + if (description.healthSettings?.protocol) { + extensions.add(new HealthExtensions(description)) + } } } + interface IExtensions { + String name + IExtensionProperty properties + } - /**** VMSS extensionsProfile ****/ - static class ScaleSetExtensionProfileProperty { - Collection extensions = [] + static class HealthExtensions implements IExtensions { + String name + HealthExtensionProperty properties - ScaleSetExtensionProfileProperty(AzureServerGroupDescription description) { - extensions.add(new Extensions(description)) + HealthExtensions(AzureServerGroupDescription description) { + name = description.application + "_health_ext" + properties = new HealthExtensionProperty(description) } } - static class Extensions { + static class CustomScriptExtensions implements IExtensions { String name - ExtensionProperty properties + CustomStringExtensionProperty properties - Extensions(AzureServerGroupDescription description) { + CustomScriptExtensions(AzureServerGroupDescription description) { name = description.application + "_ext" - properties = new ExtensionProperty(description) + properties = new CustomStringExtensionProperty(description) + } + } + + interface IExtensionProperty { + String publisher + String type + String typeHandlerVersion // This will need to be updated every time the custom script extension major version is updated + Boolean autoUpgradeMinorVersion = true + IExtensionSettings settings + } + + static class HealthExtensionProperty implements IExtensionProperty { + String publisher + String type + String typeHandlerVersion // This will need to be updated every time the custom script extension major version is updated + Boolean autoUpgradeMinorVersion = true + HealthExtensionSettings settings + + HealthExtensionProperty(AzureServerGroupDescription description) { + settings = new HealthExtensionSettings(description) + publisher = AzureUtilities.AZURE_HEALTH_EXT_PUBLISHER + type = description.image?.ostype?.toLowerCase() == "linux" ? AzureUtilities.AZURE_HEALTH_EXT_TYPE_LINUX : AzureUtilities.AZURE_HEALTH_EXT_TYPE_WINDOWS + typeHandlerVersion = AzureUtilities.AZURE_HEALTH_EXT_VERSION } } - static class ExtensionProperty { + static class CustomStringExtensionProperty implements IExtensionProperty { String publisher String type String typeHandlerVersion // This will need to be updated every time the custom script extension major version is updated Boolean autoUpgradeMinorVersion = true CustomScriptExtensionSettings settings - ExtensionProperty(AzureServerGroupDescription description) { + CustomStringExtensionProperty(AzureServerGroupDescription description) { settings = new CustomScriptExtensionSettings(description) publisher = description.image?.ostype?.toLowerCase() == "linux" ? AzureUtilities.AZURE_CUSTOM_SCRIPT_EXT_PUBLISHER_LINUX : AzureUtilities.AZURE_CUSTOM_SCRIPT_EXT_PUBLISHER_WINDOWS type = description.image?.ostype?.toLowerCase() == "linux" ? AzureUtilities.AZURE_CUSTOM_SCRIPT_EXT_TYPE_LINUX: AzureUtilities.AZURE_CUSTOM_SCRIPT_EXT_TYPE_WINDOWS @@ -679,7 +925,26 @@ class AzureServerGroupResourceTemplate { } } - static class CustomScriptExtensionSettings { + interface IExtensionSettings {} + + static class HealthExtensionSettings implements IExtensionSettings { + String protocol + int port + String requestPath + + HealthExtensionSettings(AzureServerGroupDescription description) { + protocol = description.healthSettings.protocol + try { + port = Integer.parseInt(description.healthSettings.port) + } catch (NumberFormatException ignored) { + port = 0 + throw new IllegalArgumentException("healthSettings.port \"$description.healthSettings.port\" is not a valid integer") + } + requestPath = description.healthSettings.requestPath + } + } + + static class CustomScriptExtensionSettings implements IExtensionSettings { Collection fileUris String commandToExecute @@ -689,7 +954,6 @@ class AzureServerGroupResourceTemplate { } } - /**** Load Balancer Resource ****/ static class LoadBalancer extends DependingResource { LoadBalancerProperties properties @@ -705,6 +969,8 @@ class AzureServerGroupResourceTemplate { tags.stack = description.stack tags.detail = description.detail tags.createdTime = currentTime.toString() + // Mark self as an internal load balancer only for instance access + tags.internal = "1" if (description.clusterName) tags.cluster = description.clusterName if (description.name) tags.serverGroup = description.name if (description.securityGroupName) tags.securityGroupName = description.securityGroupName @@ -781,25 +1047,4 @@ class AzureServerGroupResourceTemplate { backendPort = inboundPortConfig.backendPort } } -/* - static class PublicIpResource extends Resource { - - PublicIpResource() { - apiVersion = '2015-06-15' - name = '''[variables('publicIpAddressName')]''' - type = '''Microsoft.Network/publicIPAddresses''' - location = "[parameters('${locationParameterName}')]" - } - PublicIPPropertiesWithDns properties = new PublicIPPropertiesWithDns() - } - - static class PublicIPProperties { - String publicIPAllocationMethod = '''[variables('publicIpAddressType')]''' - DnsSettings dnsSettings = new DnsSettings() - } - - static class DnsSettings { - String domainNameLabel = '''[variables('dnsNameForLBIP')]''' - } -*/ } diff --git a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/Resource.groovy b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/Resource.groovy index 9bb210163fd..4d528905e9c 100644 --- a/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/Resource.groovy +++ b/clouddriver-azure/src/main/groovy/com/netflix/spinnaker/clouddriver/azure/templates/Resource.groovy @@ -46,6 +46,14 @@ class Resource { Map tags } +class Sku { + String name + + Sku(String name) { + this.name = name + } +} + class IdRef{ String id @@ -59,12 +67,14 @@ class IdRef{ class PublicIpResource extends Resource{ PublicIPProperties properties = new PublicIPProperties() + Sku sku PublicIpResource() { apiVersion = "[variables('apiVersion')]" name = "[variables('publicIPAddressName')]" type = "Microsoft.Network/publicIPAddresses" location = "[parameters('location')]" + sku = new Sku("Basic") } } @@ -107,9 +117,15 @@ class StringParameterWithDefault extends StringParameter { } class SecureStringParameter extends StringParameter{ + String defaultValue SecureStringParameter(Map metadata) { super(metadata) type = "securestring" } + + SecureStringParameter(Map metadata, String defaultValue) { + this(metadata) + this.defaultValue = defaultValue + } } diff --git a/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/ResizeAzureServerGroupDescription.java b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/ResizeAzureServerGroupDescription.java new file mode 100644 index 00000000000..d7d9c8ec6b9 --- /dev/null +++ b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/ResizeAzureServerGroupDescription.java @@ -0,0 +1,58 @@ +/* + * Copyright 2019 The original authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model; + +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; + +public class ResizeAzureServerGroupDescription extends AzureServerGroupDescription + implements ServerGroupsNameable { + private String serverGroupName; + private Integer targetSize; + private Capacity capacity; + + public String getServerGroupName() { + return serverGroupName; + } + + public void setServerGroupName(String serverGroupName) { + this.serverGroupName = serverGroupName; + } + + public Integer getTargetSize() { + return targetSize; + } + + public void setTargetSize(Integer targetSize) { + this.targetSize = targetSize; + } + + public Capacity getCapacity() { + return capacity; + } + + public void setCapacity(Capacity capacity) { + this.capacity = capacity; + } + + @Override + public Collection getServerGroupNames() { + return new ArrayList(Arrays.asList(serverGroupName)); + } +} diff --git a/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupWithAzureLoadBalancerAtomicOperation.java b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupWithAzureLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..0e7ea4f0887 --- /dev/null +++ b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupWithAzureLoadBalancerAtomicOperation.java @@ -0,0 +1,425 @@ +/* + * Copyright 2016 The original authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops; + +import static com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.CreateAzureServerGroupAtomicOperation.SERVER_WAIT_TIMEOUT; + +import com.azure.resourcemanager.compute.models.VirtualMachineImage; +import com.azure.resourcemanager.resources.models.Deployment; +import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities; +import com.netflix.spinnaker.clouddriver.azure.resources.common.model.AzureDeploymentOperation; +import com.netflix.spinnaker.clouddriver.azure.resources.common.model.KeyVaultSecret; +import com.netflix.spinnaker.clouddriver.azure.resources.network.model.AzureVirtualNetworkDescription; +import com.netflix.spinnaker.clouddriver.azure.resources.network.view.AzureNetworkProvider; +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription; +import com.netflix.spinnaker.clouddriver.azure.resources.subnet.model.AzureSubnetDescription; +import com.netflix.spinnaker.clouddriver.azure.templates.AzureServerGroupResourceTemplate; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationException; +import java.util.*; +import org.springframework.beans.factory.annotation.Autowired; + +public class CreateAzureServerGroupWithAzureLoadBalancerAtomicOperation + implements AtomicOperation { + private static final String BASE_PHASE = "CREATE_SERVER_GROUP"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private final AzureServerGroupDescription description; + + @Autowired AzureNetworkProvider networkProvider; + + public CreateAzureServerGroupWithAzureLoadBalancerAtomicOperation( + AzureServerGroupDescription description) { + this.description = description; + } + + /** + * curl -X POST -H "Content-Type: application/json" -d + * '[{"createServerGroup":{"name":"taz-st1-d1","cloudProvider":"azure","application":"taz","stack":"st1","detail":"d1","vnet":"vnet-select","subnet":"subnet1","account":"azure-cred1","selectedProvider":"azure","capacity":{"useSourceCapacity":false,"min":1,"max":1},"credentials":"azure-cred1","region":"westus","loadBalancerName":"taz-ag1-d1","securityGroupName":"taz-secg1","user":"[anonymous]","upgradePolicy":"Manual","image":{"account":"azure-cred1","imageName":"UbuntuServer-14.04.3-LTS(Recommended)","isCustom":false,"offer":"UbuntuServer","ostype":null,"publisher":"Canonical","region":null,"sku":"14.04.3-LTS","uri":null,"version":"14.04.201602171"},"sku":{"name":"Standard_DS1_v2","tier":"Standard","capacity":1},"osConfig":{},"type":"createServerGroup"}}]' + * localhost:7002/ops + * + * @param priorOutputs + * @return + */ + @Override + public Map operate(List priorOutputs) { + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Initializing deployment of server group with Azure Load Balancer %s in %s", + description.getName(), description.getRegion())); + + List errList = new ArrayList<>(); + String resourceGroupName = null; + String virtualNetworkName = null; + String loadBalancerPoolID = null; + String inboundNatPoolID = null; + String subnetId = null; + + try { + + getTask().updateStatus(BASE_PHASE, "Beginning server group deployment"); + + // if this is not a custom image, then we need to go get the OsType from Azure + if (!description.getImage().getIsCustom()) { + VirtualMachineImage virtualMachineImage = + description + .getCredentials() + .getComputeClient() + .getVMImage( + description.getRegion(), + description.getImage().getPublisher(), + description.getImage().getOffer(), + description.getImage().getSku(), + description.getImage().getVersion()); + + if (virtualMachineImage != null) { + throw new RuntimeException( + String.format( + "Invalid published image was selected; %s:%s:%s:%s does not exist", + description.getImage().getPublisher(), + description.getImage().getOffer(), + description.getImage().getSku(), + description.getImage().getVersion())); + } + + if (description.getImage().getImageName() == null) { + description.getImage().setImageName(virtualMachineImage.innerModel().name()); + } + if (description.getImage().getOstype() == null) { + description + .getImage() + .setOstype(virtualMachineImage.osDiskImage().operatingSystem().name()); + } + } + + resourceGroupName = + AzureUtilities.getResourceGroupName( + description.getApplication(), description.getRegion()); + + String loadBalancerName = description.getLoadBalancerName(); + + virtualNetworkName = description.getVnet(); + final String subnetName = description.getSubnet(); + + AzureVirtualNetworkDescription vnetDescription = + networkProvider.get( + description.getAccountName(), + description.getRegion(), + description.getVnetResourceGroup(), + virtualNetworkName); + + if (vnetDescription == null) { + throw new RuntimeException( + "Selected virtual network " + virtualNetworkName + " does not exist"); + } + + List subnets = vnetDescription.getSubnets(); + + if (subnets == null || subnets.size() == 0) { + throw new RuntimeException( + "Cannot find any subnets in virtual network " + virtualNetworkName); + } + + Optional filteredSubnet = + subnets.stream().filter(subnet -> subnet.getName().equals(subnetName)).findFirst(); + + if (!filteredSubnet.isPresent()) { + throw new RuntimeException( + "Selected subnet " + + subnetName + + " in virtual network " + + virtualNetworkName + + " is not valid"); + } + + subnetId = filteredSubnet.get().getResourceId(); + + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Using virtual network %s and subnet %s for server group %s", + virtualNetworkName, subnetName, description.getName())); + + // we will try to associate the server group with the selected virtual network and subnet + description.setHasNewSubnet(false); + + AzureServerGroupNameResolver nameResolver = + new AzureServerGroupNameResolver( + description.getAccountName(), description.getRegion(), description.getCredentials()); + description.setName( + nameResolver.resolveNextServerGroupName( + description.getApplication(), + description.getStack(), + description.getDetail(), + false)); + description.setClusterName(description.getClusterName()); + description.setAppName(description.getApplication()); + + // Verify that it can be used for this server group/cluster. create a backend address pool + // entry if it doesn't already exist + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Create new backend address pool in Load Balancer: %s", loadBalancerName)); + loadBalancerPoolID = + description + .getCredentials() + .getNetworkClient() + .createLoadBalancerAPforServerGroup( + resourceGroupName, description.getLoadBalancerName(), description.getName()); + + if (loadBalancerPoolID == null) { + throw new RuntimeException( + String.format( + "Selected Load Balancer %s does not exist", description.getLoadBalancerName())); + } + + // Create new inbound NAT pool for the server group + getTask() + .updateStatus( + BASE_PHASE, + String.format("Create new inbound NAT pool in Load Balancer: %s", loadBalancerName)); + inboundNatPoolID = + description + .getCredentials() + .getNetworkClient() + .createLoadBalancerNatPoolPortRangeforServerGroup( + resourceGroupName, description.getLoadBalancerName(), description.getName()); + + if (inboundNatPoolID == null) { + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Failed to create new inbound NAT pool in Load Balancer: %s, the task will continue", + loadBalancerName)); + } + + Map templateParameters = new HashMap<>(); + + templateParameters.put(AzureServerGroupResourceTemplate.getSubnetParameterName(), subnetId); + templateParameters.put( + AzureServerGroupResourceTemplate.getAppGatewayAddressPoolParameterName(), + loadBalancerPoolID); + templateParameters.put( + AzureServerGroupResourceTemplate.getVmUserNameParameterName(), + new KeyVaultSecret( + "VMUsername", + description.getCredentials().getSubscriptionId(), + description.getCredentials().getDefaultResourceGroup(), + description.getCredentials().getDefaultKeyVault())); + + if (description.getCredentials().getUseSshPublicKey() != null + && description.getCredentials().getUseSshPublicKey()) { + templateParameters.put( + AzureServerGroupResourceTemplate.getVmSshPublicKeyParameterName(), + new KeyVaultSecret( + "VMSshPublicKey", + description.getCredentials().getSubscriptionId(), + description.getCredentials().getDefaultResourceGroup(), + description.getCredentials().getDefaultKeyVault())); + } else { + templateParameters.put( + AzureServerGroupResourceTemplate.getVmPasswordParameterName(), + new KeyVaultSecret( + "VMPassword", + description.getCredentials().getSubscriptionId(), + description.getCredentials().getDefaultResourceGroup(), + description.getCredentials().getDefaultKeyVault())); + } + + templateParameters.put( + AzureServerGroupResourceTemplate.getLoadBalancerAddressPoolParameterName(), + loadBalancerPoolID); + templateParameters.put( + AzureServerGroupResourceTemplate.getLoadBalancerNatPoolParameterName(), inboundNatPoolID); + + // The empty "" cannot be assigned to the custom data otherwise Azure service will run into + // error complaining "custom data must be in Base64". + // So once there is no custom data, remove this template section rather than assigning a "". + if (description.getOsConfig().getCustomData() != null + && description.getOsConfig().getCustomData().length() > 0) { + templateParameters.put( + AzureServerGroupResourceTemplate.getCustomDataParameterName(), + description.getOsConfig().getCustomData()); + } + + if (errList.isEmpty()) { + getTask().updateStatus(BASE_PHASE, "Deploying server group"); + Deployment deployment = + description + .getCredentials() + .getResourceManagerClient() + .createResourceFromTemplate( + AzureServerGroupResourceTemplate.getTemplate(description), + resourceGroupName, + description.getRegion(), + description.getName(), + "serverGroup", + templateParameters); + + errList.addAll( + AzureDeploymentOperation.checkDeploymentOperationStatus( + getTask(), + BASE_PHASE, + description.getCredentials(), + resourceGroupName, + deployment.name())); + } + } catch (Exception e) { + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Unexpected exception: Deployment of server group %s failed: %s", + description.getName(), e.getMessage())); + errList.add(e.getMessage()); + } + if (errList.isEmpty()) { + if (description + .getCredentials() + .getNetworkClient() + .isServerGroupWithAppGatewayDisabled( + resourceGroupName, description.getLoadBalancerName(), description.getName())) { + description + .getCredentials() + .getNetworkClient() + .enableServerGroupWithLoadBalancer( + resourceGroupName, description.getLoadBalancerName(), description.getName()); + + Boolean healthy = + description + .getCredentials() + .getComputeClient() + .waitForScaleSetHealthy( + resourceGroupName, description.getName(), SERVER_WAIT_TIMEOUT); + + if (healthy) { + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Done enabling Azure server group %s in %s.", + description.getName(), description.getRegion())); + } else { + errList.add("Server group did not come up in time"); + } + + } else { + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Azure server group %s in %s is already enabled.", + description.getName(), description.getRegion())); + } + + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Deployment for server group %s in %s has succeeded.", + description.getName(), description.getRegion())); + } + if (!errList.isEmpty()) { + // cleanup any resources that might have been created prior to server group failing to deploy + getTask() + .updateStatus(BASE_PHASE, "Cleanup any resources created as part of server group upsert"); + try { + if (description.getName() != null && description.getName().length() > 0) { + AzureServerGroupDescription sgDescription = + description + .getCredentials() + .getComputeClient() + .getServerGroup(resourceGroupName, description.getName()); + if (sgDescription != null) { + description + .getCredentials() + .getComputeClient() + .destroyServerGroup(resourceGroupName, description.getName()); + } + } + } catch (Exception e) { + String errMessage = + String.format( + "Unexpected exception: %s! Please log in into Azure Portal and manually delete any resource associated with the %s server group such as storage accounts, internal load balancer, public IP and subnets", + e.getMessage(), description.getName()); + getTask().updateStatus(BASE_PHASE, errMessage); + errList.add(errMessage); + } + + try { + if (loadBalancerPoolID != null) { + description + .getCredentials() + .getNetworkClient() + .removeLoadBalancerAPforServerGroup( + resourceGroupName, description.getLoadBalancerName(), description.getName()); + } + } catch (Exception e) { + String errMessage = + String.format( + "Unexpected exception: %s! Load balancer backend address pool entry %s associated with the %s server group could not be deleted", + e.getMessage(), loadBalancerPoolID, description.getName()); + getTask().updateStatus(BASE_PHASE, errMessage); + errList.add(errMessage); + } + + try { + if (inboundNatPoolID != null) { + description + .getCredentials() + .getNetworkClient() + .removeLoadBalancerNatPoolPortRangeforServerGroup( + resourceGroupName, description.getLoadBalancerName(), description.getName()); + } + } catch (Exception e) { + String errMessage = + String.format( + "Unexpected exception: %s! Load balancer inbound nat pool entry %s associated with the %s server group could not be deleted", + e.getMessage(), inboundNatPoolID, description.getName()); + getTask().updateStatus(BASE_PHASE, errMessage); + errList.add(errMessage); + } + + throw new AtomicOperationException( + String.format("%s deployment failed", description.getName()), errList); + } + + LinkedHashMap map = new LinkedHashMap(2); + LinkedHashMap> map1 = + new LinkedHashMap>(1); + LinkedHashMap map2 = new LinkedHashMap(1); + map2.put("name", description.getName()); + map1.put(description.getRegion(), map2); + map.put("serverGroups", map1); + map.put( + "serverGroupNames", + new ArrayList( + Arrays.asList(description.getRegion() + ":" + description.getName().toString()))); + return map; + } +} diff --git a/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupWithoutLoadBalancersAtomicOperation.groovy b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupWithoutLoadBalancersAtomicOperation.groovy new file mode 100644 index 00000000000..5eef56e0918 --- /dev/null +++ b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/CreateAzureServerGroupWithoutLoadBalancersAtomicOperation.groovy @@ -0,0 +1,210 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops + +import com.azure.resourcemanager.resources.models.Deployment +import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities +import com.netflix.spinnaker.clouddriver.azure.resources.common.model.AzureDeploymentOperation +import com.netflix.spinnaker.clouddriver.azure.resources.common.model.KeyVaultSecret +import com.netflix.spinnaker.clouddriver.azure.resources.network.view.AzureNetworkProvider +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription +import com.netflix.spinnaker.clouddriver.azure.templates.AzureServerGroupResourceTemplate +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationException +import org.springframework.beans.factory.annotation.Autowired + +import static com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.CreateAzureServerGroupAtomicOperation.* + +class CreateAzureServerGroupWithoutLoadBalancersAtomicOperation implements AtomicOperation { + private static final String BASE_PHASE = "CREATE_SERVER_GROUP" + + private static Task getTask() { + TaskRepository.threadLocalTask.get() + } + + private final AzureServerGroupDescription description + + @Autowired + AzureNetworkProvider networkProvider + + CreateAzureServerGroupWithoutLoadBalancersAtomicOperation(AzureServerGroupDescription description) { + this.description = description + } + + @Override + Map operate(List priorOutputs) { + def errList = new ArrayList() + String resourceGroupName = null + String virtualNetworkName = null + String subnetName = null + String subnetId + String serverGroupName = null + + try { + task.updateStatus(BASE_PHASE, "Beginning server group deployment") + + // if this is not a custom image, then we need to go get the OsType from Azure + if (!description.image.isCustom) { + def virtualMachineImage = description.credentials.computeClient.getVMImage(description.region, + description.image.publisher, description.image.offer, description.image.sku, description.image.version) + + if (!virtualMachineImage) { + throw new RuntimeException("Invalid published image was selected; $description.image.publisher:$description.image.offer:$description.image.sku:$description.image.version does not exist") + } + + description.image.imageName ?: virtualMachineImage.name + description.image.ostype = virtualMachineImage?.osDiskImage?.operatingSystem + } + + resourceGroupName = AzureUtilities.getResourceGroupName(description.application, description.region) + // Create corresponding ResourceGroup if it's not created already + description.credentials.resourceManagerClient.initializeResourceGroupAndVNet(resourceGroupName, null, description.region) + + virtualNetworkName = description.vnet + + def vnetDescription = networkProvider.get(description.accountName, description.region, description.vnetResourceGroup, virtualNetworkName) + + if (!vnetDescription) { + throw new RuntimeException("Selected virtual network $virtualNetworkName does not exist") + } + + task.updateStatus(BASE_PHASE, "Using virtual network $virtualNetworkName and subnet $description.subnet for server group $description.name") + + // we will try to associate the server group with the selected virtual network and subnet + description.hasNewSubnet = false + + // subnet is valid only if it exists within the selected vnet and it's unassigned or all the associations are NOT application gateways + subnetId = vnetDescription.subnets?.find { subnet -> + (subnet.name == description.subnet) && (!subnet.connectedDevices || !subnet.connectedDevices.find {it.type == "applicationGateways"}) + }?.resourceId + + if (!subnetId) { + throw new RuntimeException("Selected subnet $description.subnet in virtual network $description.vnet is not valid") + } + + AzureServerGroupNameResolver nameResolver = new AzureServerGroupNameResolver(description.accountName, description.region, description.credentials) + description.name = nameResolver.resolveNextServerGroupName(description.application, description.stack, description.detail, false) + description.clusterName = description.getClusterName() + description.appName = description.application + description.vnet = virtualNetworkName + description.subnet = subnetName + + Map templateParameters = [:] + + templateParameters[AzureServerGroupResourceTemplate.subnetParameterName] = subnetId + templateParameters[AzureServerGroupResourceTemplate.vmUserNameParameterName] = new KeyVaultSecret("VMUsername", + description.credentials.subscriptionId, + description.credentials.defaultResourceGroup, + description.credentials.defaultKeyVault) + + if(description.credentials.useSshPublicKey) { + templateParameters[AzureServerGroupResourceTemplate.vmSshPublicKeyParameterName] = new KeyVaultSecret("VMSshPublicKey", + description.credentials.subscriptionId, + description.credentials.defaultResourceGroup, + description.credentials.defaultKeyVault) + } + else { + templateParameters[AzureServerGroupResourceTemplate.vmPasswordParameterName] = new KeyVaultSecret("VMPassword", + description.credentials.subscriptionId, + description.credentials.defaultResourceGroup, + description.credentials.defaultKeyVault) + } + + // The empty "" cannot be assigned to the custom data otherwise Azure service will run into error complaining "custom data must be in Base64". + // So once there is no custom data, remove this template section rather than assigning a "". + if(description.osConfig.customData){ + templateParameters[AzureServerGroupResourceTemplate.customDataParameterName] = description.osConfig.customData + } + + if (errList.isEmpty()) { + description.subnetId = subnetId + task.updateStatus(BASE_PHASE, "Deploying server group") + String template = AzureServerGroupResourceTemplate.getTemplate(description) + Deployment deployment = description.credentials.resourceManagerClient.createResourceFromTemplate( + template, + resourceGroupName, + description.region, + description.name, + "serverGroup", + templateParameters) + + def healthy = description.credentials.computeClient.waitForScaleSetHealthy(resourceGroupName, description.name, SERVER_WAIT_TIMEOUT) + + if (healthy) { + getTask().updateStatus(BASE_PHASE, String.format( + "Done enabling Azure server group %s in %s.", + description.getName(), description.getRegion())) + } else { + errList.add("Server group did not come up in time") + } + + errList.addAll(AzureDeploymentOperation.checkDeploymentOperationStatus(task, BASE_PHASE, description.credentials, resourceGroupName, deployment.name())) + serverGroupName = errList.isEmpty() ? description.name : null + } + } catch (Exception e) { + task.updateStatus(BASE_PHASE, "Unexpected exception: Deployment of server group ${description.name} failed: ${e.message}") + errList.add(e.message) + } + if (errList.isEmpty()) { + // There is no concept of "disabled" for a server group that is not fronted by a load balancer. + // Because of that, we leave it up to the user to decide how to handle it via their pipeline + // (either resize/disable or destroy) + + task.updateStatus(BASE_PHASE, "Deployment for server group ${description.name} in ${description.region} has succeeded.") + } else { + // cleanup any resources that might have been created prior to server group failing to deploy + task.updateStatus(BASE_PHASE, "Cleanup any resources created as part of server group upsert") + try { + if (description.name) { + def sgDescription = description.credentials + .computeClient + .getServerGroup(resourceGroupName, description.name) + if (sgDescription) { + description.credentials + .computeClient + .destroyServerGroup(resourceGroupName, description.name) + + // If this an Azure Market Store image, delete the storage that was created for it as well + if (!sgDescription.image.isCustom) { + sgDescription.storageAccountNames?.each { def storageAccountName -> + description.credentials + .storageClient + .deleteStorageAccount(resourceGroupName, storageAccountName) + } + } + } + } + if (description.hasNewSubnet) { + description.credentials + .networkClient + .deleteSubnet(description.vnetResourceGroup, virtualNetworkName, subnetName) + } + } catch (Exception e) { + def errMessage = "Unexpected exception: ${e.message}! Please log in into Azure Portal and manually delete any resource associated with the ${description.name} server group such as storage accounts, internal load balancer, public IP and subnets" + task.updateStatus(BASE_PHASE, errMessage) + errList.add(errMessage) + } + + throw new AtomicOperationException("${description.name} deployment failed", errList) + } + + [serverGroups: [(description.region): [name: description.name]], + serverGroupNames: ["${description.region}:${description.name}".toString()]] + } +} diff --git a/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/ResizeAzureServerGroupAtomicOperation.java b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/ResizeAzureServerGroupAtomicOperation.java new file mode 100644 index 00000000000..9d830cf645c --- /dev/null +++ b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/ResizeAzureServerGroupAtomicOperation.java @@ -0,0 +1,158 @@ +/* + * Copyright 2019 The original authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities; +import com.netflix.spinnaker.clouddriver.azure.resources.cluster.view.AzureClusterProvider; +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription; +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.ResizeAzureServerGroupDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationException; +import java.util.ArrayList; +import java.util.List; +import org.codehaus.groovy.runtime.DefaultGroovyMethods; +import org.codehaus.groovy.runtime.StringGroovyMethods; +import org.springframework.beans.factory.annotation.Autowired; + +public class ResizeAzureServerGroupAtomicOperation implements AtomicOperation { + private static final String BASE_PHASE = "RESIZE_SERVER_GROUP"; + private final ResizeAzureServerGroupDescription description; + @Autowired private AzureClusterProvider azureClusterProvider; + + public AzureClusterProvider getAzureClusterProvider() { + return azureClusterProvider; + } + + public void setAzureClusterProvider(AzureClusterProvider azureClusterProvider) { + this.azureClusterProvider = azureClusterProvider; + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + public ResizeAzureServerGroupAtomicOperation(ResizeAzureServerGroupDescription description) { + this.description = description; + } + + /** + * curl -X POST -H "Content-Type: application/json" -d '[ { "resizeServerGroup": { + * "serverGroupName": "myapp-dev-v000", "targetSize": 2, "region": "us-central1", "credentials": + * "my-account-name" }} ]' localhost:7002/azure/ops + */ + @Override + public Void operate(List priorOutputs) { + getTask().updateStatus(BASE_PHASE, "Initializing Resize Azure Server Group Operation..."); + + final String region = description.getRegion(); + if (StringGroovyMethods.asBoolean(description.getServerGroupName())) + description.setName(description.getServerGroupName()); + if (!StringGroovyMethods.asBoolean(description.getApplication())) { + final String name = description.getAppName(); + description.setApplication( + StringGroovyMethods.asBoolean(name) + ? name + : Names.parseName(description.getName()).getApp()); + } + + final int targetSize = + description.getTargetSize() instanceof Number + ? description.getTargetSize() + : description.getCapacity().getDesired(); + getTask() + .updateStatus( + BASE_PHASE, + "Resizing server group " + + description.getName() + + " in " + + region + + " to target size " + + String.valueOf(targetSize) + + "..."); + + if (!DefaultGroovyMethods.asBoolean(description.getCredentials())) { + throw new IllegalArgumentException( + "Unable to resolve credentials for the selected Azure account."); + } + + ArrayList errList = new ArrayList(); + + try { + String resourceGroupName = + AzureUtilities.getResourceGroupName(description.getApplication(), region); + AzureServerGroupDescription serverGroupDescription = + description + .getCredentials() + .getComputeClient() + .getServerGroup(resourceGroupName, description.getName()); + + if (!DefaultGroovyMethods.asBoolean(serverGroupDescription)) { + getTask() + .updateStatus( + BASE_PHASE, + "Resize Server Group Operation failed: could not find server group " + + description.getName() + + " in " + + region); + errList.add("could not find server group " + description.getName() + " in " + region); + } else { + try { + description + .getCredentials() + .getComputeClient() + .resizeServerGroup(resourceGroupName, description.getName(), targetSize); + getTask() + .updateStatus( + BASE_PHASE, + "Done resizing Azure server group " + + description.getName() + + " in " + + region + + "."); + } catch (Exception e) { + getTask() + .updateStatus( + BASE_PHASE, + "Resizing server group " + description.getName() + " failed: " + e.getMessage()); + errList.add( + "Failed to resize server group " + description.getName() + ": " + e.getMessage()); + } + } + } catch (Exception e) { + getTask() + .updateStatus( + BASE_PHASE, + "Resizing server group " + description.getName() + " failed: " + e.getMessage()); + errList.add("Failed to resize server group " + description.getName() + ": " + e.getMessage()); + } + + if (errList.isEmpty()) { + getTask() + .updateStatus( + BASE_PHASE, + "Resize Azure Server Group Operation for " + description.getName() + " succeeded."); + } else { + errList.add(" Go to Azure Portal for more info"); + throw new AtomicOperationException("Failed to resize " + description.getName(), errList); + } + + return null; + } +} diff --git a/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/ResizeAzureServerGroupAtomicOperationConverter.java b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/ResizeAzureServerGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..9492c74e50a --- /dev/null +++ b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/ops/converters/ResizeAzureServerGroupAtomicOperationConverter.java @@ -0,0 +1,56 @@ +/* + * Copyright 2019 The original authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.converters; + +import com.netflix.spinnaker.clouddriver.azure.AzureOperation; +import com.netflix.spinnaker.clouddriver.azure.common.AzureAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.ResizeAzureServerGroupDescription; +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.ops.ResizeAzureServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import groovy.util.logging.Slf4j; +import java.util.Map; +import org.codehaus.groovy.runtime.DefaultGroovyMethods; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Component; + +@Slf4j +@AzureOperation(AtomicOperations.RESIZE_SERVER_GROUP) +@Component("resizeAzureServerGroupDescription") +public class ResizeAzureServerGroupAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + private final Logger log = LoggerFactory.getLogger(getClass()); + + public ResizeAzureServerGroupAtomicOperationConverter() { + log.trace("Constructor....ResizeAzureServerGroupAtomicOperationConverter"); + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new ResizeAzureServerGroupAtomicOperation(convertDescription(input)); + } + + @Override + public ResizeAzureServerGroupDescription convertDescription(Map input) { + return DefaultGroovyMethods.asType( + AzureAtomicOperationConverterHelper.convertDescription( + input, this, ResizeAzureServerGroupDescription.class), + ResizeAzureServerGroupDescription.class); + } +} diff --git a/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureManagedImageCachingAgent.java b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureManagedImageCachingAgent.java new file mode 100644 index 00000000000..6091b76973f --- /dev/null +++ b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureManagedImageCachingAgent.java @@ -0,0 +1,155 @@ +/* + * Copyright 2022 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.vmimage.cache; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.*; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider; +import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys; +import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.provider.AzureInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureManagedVMImage; +import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials; +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AzureManagedImageCachingAgent + implements CachingAgent, CustomScheduledAgent, AccountAware { + private final Logger log = LoggerFactory.getLogger(getClass()); + private static final long DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.HOURS.toMillis(2); + private static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); + + private final AzureCloudProvider azureCloudProvider; + private final String accountName; + private final AzureCredentials creds; + private final String region; + private final ObjectMapper objectMapper; + + private final long pollIntervalMillis; + private final long timeoutMillis; + + private static final java.util.Set types = + Set.of(AUTHORITATIVE.forType(Keys.Namespace.AZURE_MANAGEDIMAGES.toString())); + + public AzureManagedImageCachingAgent( + AzureCloudProvider azureCloudProvider, + String accountName, + AzureCredentials creds, + String region, + ObjectMapper objectMapper) { + this( + azureCloudProvider, + accountName, + creds, + region, + objectMapper, + DEFAULT_POLL_INTERVAL_MILLIS, + DEFAULT_TIMEOUT_MILLIS); + } + + AzureManagedImageCachingAgent( + AzureCloudProvider azureCloudProvider, + String accountName, + AzureCredentials creds, + String region, + ObjectMapper objectMapper, + long pollIntervalMillis, + long timeoutMillis) { + this.azureCloudProvider = azureCloudProvider; + this.accountName = accountName; + this.creds = creds; + this.region = region; + this.objectMapper = objectMapper; + this.pollIntervalMillis = pollIntervalMillis; + this.timeoutMillis = timeoutMillis; + } + + @Override + public String getProviderName() { + return AzureInfrastructureProvider.PROVIDER_NAME; + } + + @Override + public String getAgentType() { + return new StringJoiner("/") + .add(accountName) + .add(creds.getDefaultResourceGroup()) + .add(region) + .add(this.getClass().getSimpleName()) + .toString(); + } + + @Override + public String getAccountName() { + return accountName; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + log.info("Describing items in {}", getAgentType()); + + List vmImages = + creds.getComputeClient().getAllVMCustomImages(creds.getDefaultResourceGroup(), region); + TypeReference> typeRef = new TypeReference<>() {}; + + List data = + vmImages.stream() + .map( + vmImage -> { + Map attributes = objectMapper.convertValue(vmImage, typeRef); + return new DefaultCacheData( + Keys.getManagedVMImageKey( + azureCloudProvider, + accountName, + vmImage.getRegion(), + vmImage.getResourceGroup(), + vmImage.getName(), + vmImage.getOsType()), + Map.of("vmimage", attributes), + Map.of()); + }) + .collect(Collectors.toList()); + + log.info("Caching {} items in {}", data.size(), getAgentType()); + + return new DefaultCacheResult(Map.of(Keys.Namespace.AZURE_MANAGEDIMAGES.toString(), data)); + } + + @Override + public long getPollIntervalMillis() { + return this.pollIntervalMillis; + } + + @Override + public long getTimeoutMillis() { + return this.timeoutMillis; + } +} diff --git a/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/model/AzureManagedVMImage.java b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/model/AzureManagedVMImage.java new file mode 100644 index 00000000000..dc61f425cf8 --- /dev/null +++ b/clouddriver-azure/src/main/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/model/AzureManagedVMImage.java @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model; + +public class AzureManagedVMImage { + + private String name; + private String resourceGroup; + private String region; + private String osType; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getResourceGroup() { + return resourceGroup; + } + + public void setResourceGroup(String resourceGroup) { + this.resourceGroup = resourceGroup; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + public String getOsType() { + return osType; + } + + public void setOsType(String osType) { + this.osType = osType; + } +} diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/cache/KeysSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/cache/KeysSpec.groovy index ec629486b1c..fa3ae36127e 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/cache/KeysSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/cache/KeysSpec.groovy @@ -35,6 +35,9 @@ class KeysSpec extends Specification { static final String CLUSTER_NAME = [APP_NAME,STACK_NAME, DETAIL].join('-') static final String LOAD_BALANCER_NAME = [APP_NAME,STACK_NAME, DETAIL].join('-') static final String LOAD_BALANCER_ID = [APP_NAME,STACK_NAME, DETAIL].join('-') + 'ID' + static final String RESOURCE_GROUP = 'resourcegroup' + static final String VM_NAME = 'vmname' + static final String VM_OS_TYPE = 'Windows' @Unroll def 'namespace string generation'(Keys.Namespace ns, String expected) { @@ -50,6 +53,7 @@ class KeysSpec extends Specification { AZURE_INSTANCES | "azureInstances" AZURE_VMIMAGES | "azureVmimages" AZURE_CUSTOMVMIMAGES | "azureCustomvmimages" + AZURE_MANAGEDIMAGES | "azureManagedimages" AZURE_NETWORKS | "azureNetworks" AZURE_SUBNETS | "azureSubnets" SECURITY_GROUPS | "securityGroups" @@ -62,5 +66,6 @@ class KeysSpec extends Specification { Keys.parse(AzureCloudProvider.ID, Keys.getServerGroupKey(AzureCloudProvider.ID, SERVER_GROUP_NAME, REGION, ACCOUNT)) == [provider: PROVIDER, type: AZURE_SERVER_GROUPS.ns, application: APP_NAME, serverGroup: SERVER_GROUP_NAME, account: ACCOUNT, region: REGION, detail: DETAIL, stack: STACK_NAME] Keys.parse(AzureCloudProvider.ID, Keys.getLoadBalancerKey(AzureCloudProvider.ID, LOAD_BALANCER_NAME , LOAD_BALANCER_ID, APP_NAME, CLUSTER_NAME, REGION, ACCOUNT )) == [provider: PROVIDER, type: AZURE_LOAD_BALANCERS.ns, application: APP_NAME, name: LOAD_BALANCER_NAME, id: LOAD_BALANCER_ID, cluster: CLUSTER_NAME, appname: APP_NAME, account: ACCOUNT, region: REGION] Keys.parse(AzureCloudProvider.ID, Keys.getInstanceKey(AzureCloudProvider.ID, SERVER_GROUP_NAME, INSTANCE, REGION, ACCOUNT)) == [provider: PROVIDER, type: AZURE_INSTANCES.ns, application: APP_NAME, serverGroup: SERVER_GROUP_NAME, name: INSTANCE, region: REGION, account: ACCOUNT] + Keys.parse(AzureCloudProvider.ID, Keys.getManagedVMImageKey(AzureCloudProvider.ID, ACCOUNT, REGION, RESOURCE_GROUP, VM_NAME, VM_OS_TYPE)) == [provider: PROVIDER, type: AZURE_MANAGEDIMAGES.ns, account: ACCOUNT, resourceGroup: RESOURCE_GROUP, region: REGION, name:VM_NAME, osType: VM_OS_TYPE] } } diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureUtilitiesSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureUtilitiesSpec.groovy index 46de787c4a8..44b36186b90 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureUtilitiesSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/common/AzureUtilitiesSpec.groovy @@ -16,9 +16,21 @@ package com.netflix.spinnaker.clouddriver.azure.common +import com.fasterxml.jackson.databind.DeserializationFeature +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.SerializationFeature +import com.netflix.spinnaker.clouddriver.azure.resources.common.model.KeyVaultSecret +import com.netflix.spinnaker.clouddriver.azure.templates.AzureServerGroupResourceTemplate import spock.lang.Specification class AzureUtilitiesSpec extends Specification { + ObjectMapper objectMapper + + void setup() { + objectMapper = new ObjectMapper().configure(SerializationFeature.INDENT_OUTPUT, true) + objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + } + def "CompareIpv4AddrPrefixes == 0"() { expect: AzureUtilities.compareIpv4AddrPrefixes(left, right) == 0 @@ -194,4 +206,33 @@ class AzureUtilitiesSpec extends Specification { '/subscriptions/***-***-***/resourceGroups/***/providers/Microsoft.Compute/virtualMachineScaleSets/vmss000/virtualMachines/0/networkInterfaces/nic1/ipConfigurations/ipc1' | 'virtualMachineScaleSets' } + def 'verify parameters JSON'() { + + def parameters = [:] + parameters[AzureServerGroupResourceTemplate.subnetParameterName] = subnetId + parameters[AzureServerGroupResourceTemplate.vmPasswordParameterName] = new KeyVaultSecret(secretName, subscriptionId, defaultResourceGroup, defaultVaultName) + String parametersJSON = AzureUtilities.convertParametersToTemplateJSON(objectMapper, parameters) + + expect: parametersJSON.replace('\r', '') == expectedParameters + } + + private static String expectedParameters = """{ + "subnetId" : { + "value" : "$subnetId" + }, + "vmPassword" : { + "reference" : { + "keyVault" : { + "id" : "/subscriptions/$subscriptionId/resourceGroups/$defaultResourceGroup/providers/Microsoft.KeyVault/vaults/$defaultVaultName" + }, + "secretName" : "$secretName" + } + } +}""" + + private static final String subscriptionId = "testSubscriptionID" + private static final String subnetId = "SubNetTestID" + private static final String defaultResourceGroup = "defaultResourceGroup" + private static final String defaultVaultName = "defaultKeyVault" + private static final String secretName = "VMPassword" } diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/common/client/AzureComputeClientSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/common/client/AzureComputeClientSpec.groovy deleted file mode 100644 index 3ba05d95903..00000000000 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/common/client/AzureComputeClientSpec.groovy +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright 2016 The original authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.azure.common.client - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature -import com.microsoft.azure.credentials.ApplicationTokenCredentials -import com.microsoft.azure.management.compute.ComputeManagementClient -import com.microsoft.azure.management.compute.ComputeManagementClientImpl -import com.microsoft.azure.management.compute.VirtualMachineImagesOperations -import com.microsoft.azure.management.compute.models.VirtualMachineImageResource -import com.microsoft.rest.ServiceResponse -import com.netflix.spinnaker.clouddriver.azure.client.AzureComputeClient -import spock.lang.Shared -import spock.lang.Specification - -class AzureComputeClientSpec extends Specification{ - static final String AZURE_VMIMAGE_PUBLISHER = "publisher1" - static final String AZURE_VMIMAGE_OFFER = "offer2" - static final String AZURE_VMIMAGE_SKU = "sku3" - static final String AZURE_VMIMAGE_VERSION = "version4" - - @Shared - ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.INDENT_OUTPUT, true) - - @Shared - ApplicationTokenCredentials credentials = Mock(ApplicationTokenCredentials) - - @Shared - ComputeManagementClient computeManagementClient = Mock(ComputeManagementClient) - - @Shared - AzureComputeClient azureComputeClient = Mock(AzureComputeClient) - - def setupSpec() { - VirtualMachineImageResource vmImagePublisher = Mock(VirtualMachineImageResource) - VirtualMachineImageResource vmImageOffer = Mock(VirtualMachineImageResource) - VirtualMachineImageResource vmImageSKU = Mock(VirtualMachineImageResource) - VirtualMachineImageResource vmImageVersion = Mock(VirtualMachineImageResource) - ServiceResponse> srPublisher = Mock(ServiceResponse) - ServiceResponse> srOffer = Mock(ServiceResponse) - ServiceResponse> srSKU = Mock(ServiceResponse) - ServiceResponse> srVersion = Mock(ServiceResponse) - VirtualMachineImagesOperations ops = Mock(VirtualMachineImagesOperations) - computeManagementClient.getVirtualMachineImagesOperations() >> ops - azureComputeClient = new AzureComputeClient("subscriptionId", computeManagementClient) - - ops.listPublishers(_) >> srPublisher - srPublisher.body >> [vmImagePublisher] - vmImagePublisher.name >> AZURE_VMIMAGE_PUBLISHER - - ops.listOffers(_,_) >> srOffer - srOffer.body >> [vmImageOffer, vmImageOffer] - vmImageOffer.name >> AZURE_VMIMAGE_OFFER - - ops.listSkus(_,_,_) >> srSKU - srSKU.body >> [vmImageSKU, vmImageSKU, vmImageSKU] - vmImageSKU.name >> AZURE_VMIMAGE_SKU - - ops.list(_,_,_,_,_,_,_) >> srVersion - srVersion.body >> [vmImageVersion, vmImageVersion, vmImageVersion, vmImageVersion] - vmImageVersion.name >> AZURE_VMIMAGE_VERSION - } - - void "Get all VM images"() { - setup: - - when: - def vmImages = mapper.writeValueAsString(azureComputeClient.getVMImagesAll("westus")) - - then: - vmImages == expectedFullListVMImages - } - - void "Create an AzureComputeClient instance"() { - setup: - - when: - def azureComputeClient = new AzureComputeClient("subscriptionId", credentials, "") - - then: - azureComputeClient instanceof AzureComputeClient - //true - } - - void "List all the Azure VMImage publishers"() { - setup: - - when: - def vmImages = computeManagementClient.getVirtualMachineImagesOperations().listPublishers("westus").body.collect { it.name} - - then: - vmImages == [AZURE_VMIMAGE_PUBLISHER] - } - - void "List all the Azure VMImage offers"() { - setup: - - when: - def vmImages = computeManagementClient.getVirtualMachineImagesOperations().listOffers("westus", "publisher").body.collect { it.name} - - then: - vmImages == [AZURE_VMIMAGE_OFFER, AZURE_VMIMAGE_OFFER] - } - - void "List all the Azure VMImage SKUs"() { - setup: - - when: - def vmImages = computeManagementClient.getVirtualMachineImagesOperations().listSkus("westus", "publisher", "sku").body.collect { it.name} - - then: - vmImages == [AZURE_VMIMAGE_SKU, AZURE_VMIMAGE_SKU, AZURE_VMIMAGE_SKU] - } - - void "List all the Azure VMImage versions"() { - setup: - - when: - def vmImages = computeManagementClient.getVirtualMachineImagesOperations().list("westus", "publisher", "offer", "sku", null, 100, "name").body.collect { it.name} - - then: - vmImages == [AZURE_VMIMAGE_VERSION, AZURE_VMIMAGE_VERSION, AZURE_VMIMAGE_VERSION, AZURE_VMIMAGE_VERSION] - } - - private static String expectedFullListVMImages = '''[ { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -}, { - "publisher" : "publisher1", - "offer" : "offer2", - "sku" : "sku3", - "version" : "version4" -} ]''' - -} diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/health/AzureHealthIndicatorSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/health/AzureHealthIndicatorSpec.groovy new file mode 100644 index 00000000000..4b833f9a0b4 --- /dev/null +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/health/AzureHealthIndicatorSpec.groovy @@ -0,0 +1,145 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.azure.health + +import com.netflix.spinnaker.clouddriver.azure.config.AzureConfigurationProperties +import com.netflix.spinnaker.clouddriver.azure.client.AzureResourceManagerClient +import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials +import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import org.springframework.boot.actuate.health.Status +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +class AzureHealthIndicatorSpec extends Specification { + + @Shared + AccountCredentialsProvider accountCredentialsProvider + + private void setupMocks(def mockResourceManager, def mockCredentials, def mockAccountCredentials) { + def credentialsField = AzureNamedAccountCredentials.getDeclaredField("credentials") + credentialsField.accessible = true + credentialsField.set(mockAccountCredentials, mockCredentials) + + def resourceManagerField = AzureCredentials.getDeclaredField("resourceManagerClient") + resourceManagerField.accessible = true + resourceManagerField.set(mockCredentials, mockResourceManager) + + accountCredentialsProvider = Mock(AccountCredentialsProvider) + accountCredentialsProvider.all >> [mockAccountCredentials] + } + + @Unroll + def "health succeeds when azure is reachable"() { + setup: + def mockResourceManager = Mock(AzureResourceManagerClient) + def mockCredentials = Mock(AzureCredentials) + def mockAccountCredentials = Mock(AzureNamedAccountCredentials) + setupMocks(mockResourceManager, mockCredentials, mockAccountCredentials) + + def indicator = new AzureHealthIndicator(azureConfigurationProperties: new AzureConfigurationProperties()) + indicator.accountCredentialsProvider = accountCredentialsProvider + + when: + indicator.checkHealth() + def health = indicator.health() + + then: + health.status == Status.UP + health.details.isEmpty() + } + + @Unroll + def "health fails when azure is unreachable - verifyAccountHealth:true"() { + setup: + def mockResourceManager = Mock(AzureResourceManagerClient) + def mockCredentials = Mock(AzureCredentials) + def mockAccountCredentials = Mock(AzureNamedAccountCredentials) + setupMocks(mockResourceManager, mockCredentials, mockAccountCredentials) + + def indicator = new AzureHealthIndicator(azureConfigurationProperties: new AzureConfigurationProperties()) + indicator.accountCredentialsProvider = accountCredentialsProvider + + when: + mockResourceManager.healthCheck() >> { throw new IOException("Azure is unreachable") } + indicator.checkHealth() + indicator.health() + + then: + thrown(AzureHealthIndicator.AzureIOException) + } + + @Unroll + def "health fails when no azure credentials are found - verifyAccountHealth:true"() { + setup: + accountCredentialsProvider = Mock(AccountCredentialsProvider) + accountCredentialsProvider.all >> [] + + def indicator = new AzureHealthIndicator(azureConfigurationProperties: new AzureConfigurationProperties()) + indicator.accountCredentialsProvider = accountCredentialsProvider + + when: + indicator.checkHealth() + indicator.health() + + then: + thrown(AzureHealthIndicator.AzureCredentialsNotFoundException) + } + + @Unroll + def "health succeeds when verifyAccountHealth flag is disabled"() { + setup: + def azureConfigProps = new AzureConfigurationProperties() + azureConfigProps.health.verifyAccountHealth = false + + def indicator = new AzureHealthIndicator(azureConfigurationProperties: azureConfigProps) + indicator.accountCredentialsProvider = accountCredentialsProvider + + when: + indicator.checkHealth() + def health = indicator.health() + + then: + health.status == Status.UP + health.details.isEmpty() + } + + @Unroll + def "health succeeds when azure is unreachable - verifyAccountHealth:false"() { + setup: + def mockResourceManager = Mock(AzureResourceManagerClient) + def mockCredentials = Mock(AzureCredentials) + def mockAccountCredentials = Mock(AzureNamedAccountCredentials) + setupMocks(mockResourceManager, mockCredentials, mockAccountCredentials) + + def azureConfigProps = new AzureConfigurationProperties() + azureConfigProps.health.verifyAccountHealth = false + def indicator = new AzureHealthIndicator(azureConfigurationProperties: azureConfigProps) + indicator.accountCredentialsProvider = accountCredentialsProvider + + when: + mockResourceManager.healthCheck() >> { throw new IOException("Azure is unreachable") } + indicator.checkHealth() + def health = indicator.health() + + then: + health.status == Status.UP + health.details.isEmpty() + } +} diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/converters/UpsertAzureAppGatewayAtomicOperationConverterSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/converters/UpsertAzureAppGatewayAtomicOperationConverterSpec.groovy index a4b415dc2a6..3576d9c2d08 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/converters/UpsertAzureAppGatewayAtomicOperationConverterSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/converters/UpsertAzureAppGatewayAtomicOperationConverterSpec.groovy @@ -19,7 +19,7 @@ package com.netflix.spinnaker.clouddriver.azure.resources.appgateway.deploy.conv import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription -import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.converters.UpsertAzureAppGatewayAtomicOperationConverter +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.converters.UpsertAzureLoadBalancerAtomicOperationConverter import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import spock.lang.Shared @@ -29,11 +29,10 @@ class UpsertAzureAppGatewayAtomicOperationConverterSpec extends Specification{ @Shared ObjectMapper mapper = new ObjectMapper() - - @Shared UpsertAzureAppGatewayAtomicOperationConverter converter + @Shared UpsertAzureLoadBalancerAtomicOperationConverter converter def setupSpec() { - this.converter = new UpsertAzureAppGatewayAtomicOperationConverter(objectMapper: mapper) + this.converter = new UpsertAzureLoadBalancerAtomicOperationConverter(objectMapper: mapper) def accountCredentialsProvider = Mock(AccountCredentialsProvider) def mockCredentials = Mock(AzureNamedAccountCredentials) accountCredentialsProvider.getCredentials(_) >> mockCredentials @@ -44,47 +43,30 @@ class UpsertAzureAppGatewayAtomicOperationConverterSpec extends Specification{ setup: mapper.configure(SerializationFeature.INDENT_OUTPUT, true) mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) - def input = [ - name: "testappgw-lb1-d1", - loadBalancerName: "testappgw-lb1-d1", - region: "westus", - accountName: "myazure-account", - cloudProvider: "azure", - appName: "testappgw", - stack: "lb1", - detail: "d1", - probes: [ - [ - probeName: "healthcheck1", - probeProtocol: "HTTP", - probePath: "/healthcheck", - probeInterval: 120, - timeout: 30, - unhealthyThreshold: 8 - ] - ], - loadBalancingRules: [ - [ - ruleName: "lbRule1", - protocol: "HTTP", - externalPort: 80, - backendPort: 8080, - ], - [ - ruleName: "lbRule2", - protocol: "HTTP", - externalPort: 8080, - backendPort: 8080, - ] - ] - ] when: - def description = converter.convertDescription(input) + def description = converter.convertDescription(basicGatewayInput) + + then: + description instanceof AzureAppGatewayDescription + mapper.writeValueAsString(description).replace('\r', '') == expectedFullDescription + } + + void "Create an AzureAppGatewayDescription from a given input with v2 sku"() { + setup: + mapper.configure(SerializationFeature.INDENT_OUTPUT, true) + mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) + + def input = basicGatewayInput + input['sku'] = 'Standard_v2' + input['tier'] = 'Standard_v2' + + when: + def description = converter.convertDescription(basicGatewayInput) then: description instanceof AzureAppGatewayDescription - mapper.writeValueAsString(description) == expectedFullDescription + mapper.writeValueAsString(description).replace('\r', '') == expectedFullDescriptionV2sku } private static String expectedFullDescription = '''{ @@ -139,4 +121,93 @@ class UpsertAzureAppGatewayAtomicOperationConverterSpec extends Specification{ "tier" : "Standard", "capacity" : 2 }''' + + private static String expectedFullDescriptionV2sku = '''{ + "name" : "testappgw-lb1-d1", + "cloudProvider" : "azure", + "accountName" : "myazure-account", + "appName" : "testappgw", + "stack" : "lb1", + "detail" : "d1", + "credentials" : null, + "region" : "westus", + "user" : null, + "createdTime" : null, + "lastReadTime" : 0, + "tags" : { }, + "loadBalancerName" : "testappgw-lb1-d1", + "vnet" : null, + "subnet" : null, + "subnetResourceId" : null, + "vnetResourceGroup" : null, + "hasNewSubnet" : null, + "useDefaultVnet" : false, + "securityGroup" : null, + "dnsName" : null, + "cluster" : null, + "serverGroups" : null, + "trafficEnabledSG" : null, + "publicIpName" : null, + "probes" : [ { + "probeName" : "healthcheck1", + "probeProtocol" : "HTTP", + "probePort" : "localhost", + "probePath" : "/healthcheck", + "probeInterval" : 120, + "timeout" : 30, + "unhealthyThreshold" : 8 + } ], + "loadBalancingRules" : [ { + "ruleName" : "lbRule1", + "protocol" : "HTTP", + "externalPort" : 80, + "backendPort" : 8080, + "sslCertificate" : null + }, { + "ruleName" : "lbRule2", + "protocol" : "HTTP", + "externalPort" : 8080, + "backendPort" : 8080, + "sslCertificate" : null + } ], + "sku" : "Standard_v2", + "tier" : "Standard_v2", + "capacity" : 2 +}''' + + private static final basicGatewayInput = [ + name : "testappgw-lb1-d1", + loadBalancerName : "testappgw-lb1-d1", + loadBalancerType : "Azure Application Gateway", + region : "westus", + accountName : "myazure-account", + cloudProvider : "azure", + appName : "testappgw", + stack : "lb1", + detail : "d1", + probes : [ + [ + probeName : "healthcheck1", + probeProtocol : "HTTP", + probePath : "/healthcheck", + probeInterval : 120, + timeout : 30, + unhealthyThreshold: 8 + ] + ], + loadBalancingRules: [ + [ + ruleName : "lbRule1", + protocol : "HTTP", + externalPort: 80, + backendPort : 8080, + ], + [ + ruleName : "lbRule2", + protocol : "HTTP", + externalPort: 8080, + backendPort : 8080, + ] + ] + ] } diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/ops/DeleteAzureAppGatewayAtomicOperationSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/ops/DeleteAzureAppGatewayAtomicOperationSpec.groovy index 56e7066bac2..9b625f77b9a 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/ops/DeleteAzureAppGatewayAtomicOperationSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/ops/DeleteAzureAppGatewayAtomicOperationSpec.groovy @@ -53,7 +53,7 @@ class DeleteAzureAppGatewayAtomicOperationSpec extends Specification{ then: operation - mapper.writeValueAsString(description) == expectedFullDescription + mapper.writeValueAsString(description).replace('\r', '') == expectedFullDescription } private static String expectedFullDescription = '''{ diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/ops/UpsertAzureAppGatewayAtomicOperationSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/ops/UpsertAzureAppGatewayAtomicOperationSpec.groovy index 317a82307b2..f6e0578c105 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/ops/UpsertAzureAppGatewayAtomicOperationSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/ops/UpsertAzureAppGatewayAtomicOperationSpec.groovy @@ -19,8 +19,8 @@ package com.netflix.spinnaker.clouddriver.azure.resources.appgateway.deploy.ops import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.model.AzureAppGatewayDescription -import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.converters.UpsertAzureAppGatewayAtomicOperationConverter import com.netflix.spinnaker.clouddriver.azure.resources.appgateway.ops.UpsertAzureAppGatewayAtomicOperation +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.converters.UpsertAzureLoadBalancerAtomicOperationConverter import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import spock.lang.Shared @@ -30,10 +30,10 @@ class UpsertAzureAppGatewayAtomicOperationSpec extends Specification{ @Shared ObjectMapper mapper = new ObjectMapper() - @Shared UpsertAzureAppGatewayAtomicOperationConverter converter + @Shared UpsertAzureLoadBalancerAtomicOperationConverter converter def setupSpec() { - this.converter = new UpsertAzureAppGatewayAtomicOperationConverter(objectMapper: mapper) + this.converter = new UpsertAzureLoadBalancerAtomicOperationConverter(objectMapper: mapper) def accountCredentialsProvider = Mock(AccountCredentialsProvider) def mockCredentials = Mock(AzureNamedAccountCredentials) accountCredentialsProvider.getCredentials(_) >> mockCredentials @@ -44,7 +44,7 @@ class UpsertAzureAppGatewayAtomicOperationSpec extends Specification{ setup: mapper.configure(SerializationFeature.INDENT_OUTPUT, true) mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) - def input = '''{ "cloudProvider" : "azure", "appName" : "testappgw", "loadBalancerName" : "testappgw-lb1-d1", "stack" : "lb1", "detail" : "d1", "credentials" : "myazure-account", "region" : "westus", "probes" : [ { "probeName" : "healthcheck1", "probeProtocol" : "HTTP", "probePath" : "/healthcheck", "probeInterval" : 120, "unhealthyThreshold" : 8, "timeout" : 30 } ], "loadBalancingRules" : [ { "ruleName" : "lbRule1", "protocol" : "HTTP", "externalPort" : "80", "backendPort" : "8080" } ], "name" : "testappgw-lb1-d1", "user" : "[anonymous]" }''' + def input = '''{ "cloudProvider" : "azure", "appName" : "testappgw", "loadBalancerName" : "testappgw-lb1-d1", "loadBalancerType" : "Azure Application Gateway", "stack" : "lb1", "detail" : "d1", "credentials" : "myazure-account", "region" : "westus", "probes" : [ { "probeName" : "healthcheck1", "probeProtocol" : "HTTP", "probePath" : "/healthcheck", "probeInterval" : 120, "unhealthyThreshold" : 8, "timeout" : 30 } ], "loadBalancingRules" : [ { "ruleName" : "lbRule1", "protocol" : "HTTP", "externalPort" : "80", "backendPort" : "8080" } ], "name" : "testappgw-lb1-d1", "user" : "[anonymous]" }''' when: UpsertAzureAppGatewayAtomicOperation operation = converter.convertOperation(mapper.readValue(input, Map)) @@ -52,7 +52,7 @@ class UpsertAzureAppGatewayAtomicOperationSpec extends Specification{ then: operation - mapper.writeValueAsString(description) == expectedFullDescription + mapper.writeValueAsString(description).replace('\r', '') == expectedFullDescription } private static String expectedFullDescription = '''{ diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/template/AzureAppGatewayResourceTemplateSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/template/AzureAppGatewayResourceTemplateSpec.groovy index 1348fb7efa3..e91609ad783 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/template/AzureAppGatewayResourceTemplateSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/appgateway/deploy/template/AzureAppGatewayResourceTemplateSpec.groovy @@ -32,7 +32,7 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { def 'generate an Azure Application Gateway resource template using a description object'() { String template = AzureAppGatewayResourceTemplate.getTemplate(description) - expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedFullTemplate + expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplate } def 'generate an Azure Application Gateway resource template using a minimal description object'() { @@ -43,7 +43,20 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { String template = AzureAppGatewayResourceTemplate.getTemplate(description) - expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedMinimalTemplate + expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedMinimalTemplate + } + + def 'generate an Azure Application Gateway resource template with v2 sku'() { + description = new AzureAppGatewayDescription() + description.name = 'testappgw-lb1-d1' + description.vnet = 'vnet-testappgw-westus' + description.subnet = 'subnet-testappgw-lb1-d1' + description.sku = 'Standard_v2' + description.tier = 'Standard_v2' + + String template = AzureAppGatewayResourceTemplate.getTemplate(description) + + expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedMinimalTemplateV2sku } def 'should fail to generate an Azure Application Gateway resource template using a description object with no name'() { @@ -138,7 +151,7 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { } }, "variables" : { - "apiVersion" : "2015-06-15", + "apiVersion" : "2018-04-01", "appGwName" : "testappgw-lb1-d1", "publicIPAddressName" : "pip-testappgw-lb1-d1", "dnsNameForLBIP" : "[concat('dns-', uniqueString(concat(resourceGroup().id, subscription().id, 'testappgwlb1d1')))]", @@ -156,6 +169,9 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { "tags" : null, "properties" : { "publicIPAllocationMethod" : "[variables('publicIPAddressType')]" + }, + "sku" : { + "name" : "Basic" } }, { "apiVersion" : "[variables('apiVersion')]", @@ -170,12 +186,11 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { "stack" : "lb1", "detail" : "d1", "cluster" : "testappgw-sg1-d1", - "serverGroups" : "testappgw-sg1-d1-v000 testappgw-sg1-d1-v001", "vnet" : "vnet-testappgw-westus", "subnet" : "subnet-testappgw-lb1-d1", "vnetResourceGroup" : null }, - "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" ], + "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/',variables('publicIPAddressName'))]" ], "properties" : { "sku" : { "name" : "Standard_Small", @@ -312,7 +327,7 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { } }, "variables" : { - "apiVersion" : "2015-06-15", + "apiVersion" : "2018-04-01", "appGwName" : "testappgw-lb1-d1", "publicIPAddressName" : "pip-testappgw-lb1-d1", "dnsNameForLBIP" : "[concat('dns-', uniqueString(concat(resourceGroup().id, subscription().id, 'testappgwlb1d1')))]", @@ -330,6 +345,9 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { "tags" : null, "properties" : { "publicIPAllocationMethod" : "[variables('publicIPAddressType')]" + }, + "sku" : { + "name" : "Basic" } }, { "apiVersion" : "[variables('apiVersion')]", @@ -342,7 +360,7 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { "subnet" : "subnet-testappgw-lb1-d1", "vnetResourceGroup" : null }, - "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" ], + "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/',variables('publicIPAddressName'))]" ], "properties" : { "sku" : { "name" : "Standard_Small", @@ -377,4 +395,84 @@ class AzureAppGatewayResourceTemplateSpec extends Specification { } ] }''' + private static String expectedMinimalTemplateV2sku = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + } + }, + "variables" : { + "apiVersion" : "2018-04-01", + "appGwName" : "testappgw-lb1-d1", + "publicIPAddressName" : "pip-testappgw-lb1-d1", + "dnsNameForLBIP" : "[concat('dns-', uniqueString(concat(resourceGroup().id, subscription().id, 'testappgwlb1d1')))]", + "appGwSubnetID" : null, + "publicIPAddressType" : "Dynamic", + "publicIPAddressID" : "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]", + "appGwID" : "[resourceId('Microsoft.Network/applicationGateways',variables('appGwName'))]", + "appGwBeAddrPoolName" : "default_BAP0" + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[variables('publicIPAddressName')]", + "type" : "Microsoft.Network/publicIPAddresses", + "location" : "[parameters('location')]", + "tags" : null, + "properties" : { + "publicIPAllocationMethod" : "[variables('publicIPAddressType')]" + }, + "sku" : { + "name" : "Basic" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[variables('appGwName')]", + "type" : "Microsoft.Network/applicationGateways", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "vnet" : "vnet-testappgw-westus", + "subnet" : "subnet-testappgw-lb1-d1", + "vnetResourceGroup" : null + }, + "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/',variables('publicIPAddressName'))]" ], + "properties" : { + "sku" : { + "name" : "Standard_v2", + "tier" : "Standard_v2", + "capacity" : "2" + }, + "gatewayIPConfigurations" : [ { + "name" : "appGwIpConfig", + "properties" : { + "subnet" : { + "id" : "[variables('appGwSubnetID')]" + } + } + } ], + "frontendIPConfigurations" : [ { + "name" : "appGwFrontendIP", + "properties" : { + "publicIPAddress" : { + "id" : "[variables('publicIPAddressID')]" + } + } + } ], + "frontendPorts" : [ ], + "backendAddressPools" : [ { + "name" : "default_BAP0" + } ], + "backendHttpSettingsCollection" : [ ], + "httpListeners" : [ ], + "requestRoutingRules" : [ ], + "probes" : [ ] + } + } ] +}''' + } diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/converters/UpsertAzureLoadBalancerAtomicOperationConverterUnitSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/converters/UpsertAzureLoadBalancerAtomicOperationConverterUnitSpec.groovy index 6237c2fc2b7..1f0f6ab5ea0 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/converters/UpsertAzureLoadBalancerAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/converters/UpsertAzureLoadBalancerAtomicOperationConverterUnitSpec.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.deploy.converters import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancerDescription @@ -77,6 +78,7 @@ class UpsertAzureLoadBalancerAtomicOperationConverterUnitSpec extends Specificat setup: def input = [ loadBalancerName: LOAD_BALANCER_NAME, + loadBalancerType: AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString(), region: REGION, accountName: ACCOUNT_NAME, cloudProvider: CLOUD_PROVIDER, diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/ops/UpsertAzureLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/ops/UpsertAzureLoadBalancerAtomicOperationUnitSpec.groovy index cbb8bffbda7..eaba1cab233 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/ops/UpsertAzureLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/ops/UpsertAzureLoadBalancerAtomicOperationUnitSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.deploy.ops import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.converters.UpsertAzureLoadBalancerAtomicOperationConverter @@ -80,6 +81,7 @@ class UpsertAzureLoadBalancerAtomicOperationUnitSpec extends Specification { setup: def input = [ loadBalancerName: LOAD_BALANCER_NAME, + loadBalancerType: AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString(), region: REGION, accountName: ACCOUNT_NAME, cloudProvider: CLOUD_PROVIDER, diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/templates/AzureLoadBalancerResourceTemplateSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/templates/AzureLoadBalancerResourceTemplateSpec.groovy index d0bb93e715f..692c99bd78b 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/templates/AzureLoadBalancerResourceTemplateSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/templates/AzureLoadBalancerResourceTemplateSpec.groovy @@ -28,8 +28,7 @@ class AzureLoadBalancerResourceTemplateSpec extends Specification { def 'should generate correct LoadBalancer create template'(){ String template = AzureLoadBalancerResourceTemplate.getTemplate(description) - - expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedFullTemplate + expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplate } AzureLoadBalancerDescription createDescription(){ @@ -94,13 +93,12 @@ class AzureLoadBalancerResourceTemplateSpec extends Specification { } }, "variables" : { - "apiVersion" : "2015-05-01-preview", + "apiVersion" : "2018-08-01", "loadBalancerName" : "azuremasm-st1-d11", "virtualNetworkName" : "vnet-azuremasm-westus", "publicIPAddressName" : "pip-azuremasm-st1-d11", - "publicIPAddressType" : "Dynamic", "loadBalancerFrontEnd" : "fe-azuremasm-st1-d11", - "loadBalancerBackEnd" : "be-azuremasm-st1-d11", + "loadBalancerBackEnd" : "default_LB_BAP", "dnsNameForLBIP" : "[concat('dns-', uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11')))]", "ipConfigName" : "ipc-azuremasm-st1-d11", "loadBalancerID" : "[resourceID('Microsoft.Network/loadBalancers',variables('loadBalancerName'))]", @@ -115,10 +113,13 @@ class AzureLoadBalancerResourceTemplateSpec extends Specification { "location" : "[parameters('location')]", "tags" : null, "properties" : { - "publicIPAllocationMethod" : "[variables('publicIPAddressType')]", + "publicIPAllocationMethod" : "Static", "dnsSettings" : { "domainNameLabel" : "[variables('dnsNameForLBIP')]" } + }, + "sku" : { + "name" : "Standard" } }, { "apiVersion" : "[variables('apiVersion')]", @@ -129,10 +130,7 @@ class AzureLoadBalancerResourceTemplateSpec extends Specification { "appName" : "azuremasm", "stack" : "st1", "detail" : "d11", - "createdTime" : "1234567890", - "securityGroup" : "azuremasm-sg1", - "vnet" : "azuremasm-vnet-westus", - "subnet" : "azuremasm-subnet-westus" + "createdTime" : "1234567890" }, "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/',variables('publicIPAddressName'))]" ], "properties" : { @@ -145,7 +143,7 @@ class AzureLoadBalancerResourceTemplateSpec extends Specification { } } ], "backendAddressPools" : [ { - "name" : "[variables('loadBalancerBackEnd')]" + "name" : "default_LB_BAP" } ], "loadBalancingRules" : [ { "name" : "lbrule1", @@ -159,9 +157,11 @@ class AzureLoadBalancerResourceTemplateSpec extends Specification { "protocol" : "tcp", "frontendPort" : 80, "backendPort" : 80, + "idleTimeoutInMinutes" : 4, "probe" : { "id" : "[concat(variables('loadBalancerID'),'/probes/healthcheck1')]" - } + }, + "loadDistribution" : null } } ], "probes" : [ { @@ -174,6 +174,9 @@ class AzureLoadBalancerResourceTemplateSpec extends Specification { }, "name" : "healthcheck1" } ] + }, + "sku" : { + "name" : "Standard" } } ] }''' diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/validators/UpsertAzureLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/validators/UpsertAzureLoadBalancerDescriptionValidatorSpec.groovy index 830b83debe9..4f4743b4314 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/validators/UpsertAzureLoadBalancerDescriptionValidatorSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/deploy/validators/UpsertAzureLoadBalancerDescriptionValidatorSpec.groovy @@ -17,14 +17,15 @@ package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.deploy.validators import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials import com.netflix.spinnaker.clouddriver.azure.security.AzureNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.converters.UpsertAzureLoadBalancerAtomicOperationConverter import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.ops.validators.UpsertAzureLoadBalancerDescriptionValidator -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Specification @@ -81,7 +82,10 @@ class UpsertAzureLoadBalancerDescriptionValidatorSpec extends Specification { UpsertAzureLoadBalancerDescriptionValidator validator void setupSpec() { - azureCredentials = new AzureCredentials(ACCOUNT_CLIENTID, ACCOUNT_TENANTID, ACCOUNT_APPKEY, SUBSCRIPTION_ID, DEFAULT_KEY_VAULT, DEFAULT_RESOURCE_GROUP, "") + // GroovyMock is necessary for AzureCredentials becuase it's a groovy class + // See https://stackoverflow.com/questions/34121999/mock-final-class-in-spock + azureCredentials = GroovyMock(AzureCredentials) + azureCredentials.appKey >> ACCOUNT_TENANTID def credentialsRepo = new MapBackedAccountCredentialsRepository() def credentials = Mock(AzureNamedAccountCredentials) @@ -100,6 +104,7 @@ class UpsertAzureLoadBalancerDescriptionValidatorSpec extends Specification { setup: def input = [ loadBalancerName: LOAD_BALANCER_NAME, + loadBalancerType: AzureLoadBalancer.AzureLoadBalancerType.AZURE_LOAD_BALANCER.toString(), region: REGION, accountName: ACCOUNT_NAME, cloudProvider: CLOUD_PROVIDER, @@ -147,7 +152,7 @@ class UpsertAzureLoadBalancerDescriptionValidatorSpec extends Specification { def description = converter.convertDescription(input) description.credentials = azureCredentials - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/network/model/AzureVirtualNetworkDescriptionSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/network/model/AzureVirtualNetworkDescriptionSpec.groovy index a191b5410f0..affd633e054 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/network/model/AzureVirtualNetworkDescriptionSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/network/model/AzureVirtualNetworkDescriptionSpec.groovy @@ -16,9 +16,9 @@ package com.netflix.spinnaker.clouddriver.azure.resources.network.model +import com.azure.resourcemanager.network.fluent.models.VirtualNetworkInner import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature -import com.microsoft.azure.management.network.models.VirtualNetwork import spock.lang.Shared import spock.lang.Specification @@ -27,7 +27,7 @@ class AzureVirtualNetworkDescriptionSpec extends Specification { @Shared ObjectMapper mapper = new ObjectMapper() - VirtualNetwork vnet + VirtualNetworkInner vnet void "Create a simple AzureVirtualNetworkDescription from a given input"() { setup: @@ -39,14 +39,14 @@ class AzureVirtualNetworkDescriptionSpec extends Specification { location: "westus", id: "vnet-test-westus-id", ] - def vnet = mapper.convertValue(input, VirtualNetwork) as VirtualNetwork + def vnet = mapper.convertValue(input, VirtualNetworkInner) as VirtualNetworkInner when: def vnetDescription = AzureVirtualNetworkDescription.getDescriptionForVirtualNetwork(vnet) then: vnetDescription instanceof AzureVirtualNetworkDescription - mapper.writeValueAsString(vnetDescription) == expectedSimpleDescription + mapper.writeValueAsString(vnetDescription).replace('\r', '') == expectedSimpleDescription } void "Create a full AzureVirtualNetworkDescription from a given input and calculate next subnet address prefix"() { @@ -54,29 +54,43 @@ class AzureVirtualNetworkDescriptionSpec extends Specification { mapper.configure(SerializationFeature.INDENT_OUTPUT, true) mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) - def input = [ - name: "vnet-test-westus", - location: "westus", - id: "vnet-test-westus-id", - "properties.addressSpace": [ - addressPrefixes: ["10.0.0.0/8"] - ], - "properties.subnets": [ - [ - name: "vnet-test-westus-subnet-10_0_1_0_24", - "properties.addressPrefix": "10.0.1.0/24" - ], - [ - name: "vnet-test-westus-subnet-10_0_2_0_24", - "properties.addressPrefix": "10.0.2.0/24" - ], - [ - name: "vnet-test-westus-subnet-10_0_30_0_24", - "properties.addressPrefix": "10.0.30.0/24" - ] - ], - ] - def vnet = mapper.convertValue(input, VirtualNetwork) as VirtualNetwork + String input = + ''' + { + "location":"westus", + "name":"vnet-test-westus", + "id":"vnet-test-westus-id", + "properties":{ + "addressSpace":{ + "addressPrefixes":[ + "10.0.0.0/8" + ] + }, + "subnets":[ + { + "properties":{ + "addressPrefix":"10.0.1.0/24" + }, + "name":"vnet-test-westus-subnet-10_0_1_0_24" + }, + { + "properties":{ + "addressPrefix":"10.0.2.0/24" + }, + "name":"vnet-test-westus-subnet-10_0_2_0_24" + }, + { + "properties":{ + "addressPrefix":"10.0.30.0/24" + }, + "name":"vnet-test-westus-subnet-10_0_30_0_24" + } + ] + } + } + ''' + + def vnet = mapper.readValue(input, VirtualNetworkInner.class) when: def vnetDescription = AzureVirtualNetworkDescription.getDescriptionForVirtualNetwork(vnet) diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/deploy/templates/AzureSecurityGroupResourceTemplateSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/deploy/templates/AzureSecurityGroupResourceTemplateSpec.groovy index fe48374ab62..deaa7719abc 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/deploy/templates/AzureSecurityGroupResourceTemplateSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/securitygroup/deploy/templates/AzureSecurityGroupResourceTemplateSpec.groovy @@ -32,7 +32,7 @@ class AzureSecurityGroupResourceTemplateSpec extends Specification { def 'should generate a correct Azure Security Group create template'(){ String template = AzureSecurityGroupResourceTemplate.getTemplate(description) - expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedFullTemplate + expect: template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplate } UpsertAzureSecurityGroupDescription createNoRulesDescription(){ @@ -88,13 +88,46 @@ class AzureSecurityGroupResourceTemplateSpec extends Specification { "metadata" : { "description" : "Location to deploy" } + }, + "networkSecurityGroupName" : { + "type" : "string", + "metadata" : { + "description" : "The NSG name" + } + }, + "networkSecurityGroupResourceGroupName" : { + "type" : "string", + "metadata" : { + "description" : "The resource group name of NSG" + } + }, + "virtualNetworkName" : { + "type" : "string", + "defaultValue" : "", + "metadata" : { + "description" : "The Virtual Network name" + } + }, + "virtualNetworkResourceGroupName" : { + "type" : "string", + "defaultValue" : "", + "metadata" : { + "description" : "The resource group name of Virtual Network" + } + }, + "subnetName" : { + "type" : "string", + "defaultValue" : "", + "metadata" : { + "description" : "The subnet name" + } } }, "variables" : { "securityGroupName" : "azuremasm-sg1-d11" }, "resources" : [ { - "apiVersion" : "2015-05-01-preview", + "apiVersion" : "2018-11-01", "name" : "[variables('securityGroupName')]", "type" : "Microsoft.Network/networkSecurityGroups", "location" : "[parameters('location')]", @@ -113,10 +146,12 @@ class AzureSecurityGroupResourceTemplateSpec extends Specification { "access" : "Allow", "destinationAddressPrefix" : "*", "destinationPortRange" : "433", + "destinationPortRanges" : null, "direction" : "Inbound", "priority" : 100, "protocol" : "TCP", "sourceAddressPrefix" : "10.0.0.0/24", + "sourceAddressPrefixes" : null, "sourcePortRange" : "*" } }, { @@ -126,10 +161,12 @@ class AzureSecurityGroupResourceTemplateSpec extends Specification { "access" : "Deny", "destinationAddressPrefix" : "*", "destinationPortRange" : "3389", + "destinationPortRanges" : null, "direction" : "Inbound", "priority" : 101, "protocol" : "TCP", "sourceAddressPrefix" : "Internet", + "sourceAddressPrefixes" : null, "sourcePortRange" : "*" } } ] diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureInstanceSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureInstanceSpec.groovy index f2663313d95..cd9c260a0b7 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureInstanceSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroup/model/AzureInstanceSpec.groovy @@ -16,10 +16,10 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model -import com.microsoft.azure.management.compute.models.InstanceViewStatus -import com.microsoft.azure.management.compute.models.Sku -import com.microsoft.azure.management.compute.models.VirtualMachineInstanceView -import com.microsoft.azure.management.compute.models.VirtualMachineScaleSetVM +import com.azure.resourcemanager.compute.models.InstanceViewStatus +import com.azure.resourcemanager.compute.models.Sku +import com.azure.resourcemanager.compute.models.VirtualMachineInstanceView +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetVM import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities import com.netflix.spinnaker.clouddriver.model.HealthState import spock.lang.Specification @@ -29,20 +29,21 @@ class AzureInstanceSpec extends Specification { def 'should generate a correctly structured instance'(){ def vm = Mock(VirtualMachineScaleSetVM) def instanceView = Mock(VirtualMachineInstanceView) - def sku = Mock(Sku) + def sku = new Sku() def provisioningStatus = new InstanceViewStatus() - provisioningStatus.code = 'ProvisioningState/' + AzureUtilities.ProvisioningState.SUCCEEDED + provisioningStatus.withCode( 'ProvisioningState/' + AzureUtilities.ProvisioningState.SUCCEEDED) def powerStatus = new InstanceViewStatus() - powerStatus.code = 'PowerState/Running' + powerStatus.withCode( 'PowerState/Running') List statuses = [provisioningStatus, powerStatus] - vm.instanceView >> instanceView - instanceView.statuses >> statuses - vm.sku >> sku - sku.name >> "test" + vm.instanceView() >> instanceView + vm.instanceView().statuses() >> statuses + + vm.sku() >> sku + sku.name() >> "test" def instance = AzureInstance.build(vm) diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/AzureServerGroupResourceTemplateSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/AzureServerGroupResourceTemplateSpec.groovy new file mode 100644 index 00000000000..47138e3c910 --- /dev/null +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/AzureServerGroupResourceTemplateSpec.groovy @@ -0,0 +1,3813 @@ +/* + * Copyright 2016 The original authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.servergroups.deploy + +import com.fasterxml.jackson.databind.DeserializationFeature +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.SerializationFeature +import com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.model.AzureLoadBalancer +import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureNamedImage +import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials +import com.netflix.spinnaker.clouddriver.azure.templates.AzureServerGroupResourceTemplate +import spock.lang.Specification + +class AzureServerGroupResourceTemplateSpec extends Specification { + ObjectMapper objectMapper + AzureServerGroupDescription description + static AzureCredentials azureCredentials + + def setupSpec() { + azureCredentials = GroovyMock(AzureCredentials) + } + void setup() { + description = createDescription(false) + objectMapper = new ObjectMapper().configure(SerializationFeature.INDENT_OUTPUT, true) + objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + } + + def 'should generate correct ServerGroup resource template'() { + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplate + } + + def 'should generate correct ServerGroup resource template with scheduled event profile'() { + description = createDescription(false) + description.terminationNotBeforeTimeoutInMinutes = 15 + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithScheduledEventsProfile + } + + def 'should generate correct ServerGroup resource template with windowsConfiguration timeZone'() { + description = createDescription(false) + description.windowsTimeZone = "Central America Standard Time" + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithWindowsConfigurationTimeZone + } + + def 'should generate correct ServerGroup resource template with doNotRunExtensionsOnOverprovisionedVMs'() { + description = createDescription(false) + description.doNotRunExtensionsOnOverprovisionedVMs = true + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithDoNotRunExtensionsOnOverprovisionedVMs + } + + def 'should generate correct ServerGroup resource template with enableIpForwarding'() { + description = createDescription(false) + description.enableIpForwarding = true + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithEnableIpForwarding + } + + def 'should generate correct ServerGroup resource template custom execution is blank'() { + String[] fileUris = [] + description = createCustomDescription(true, "", fileUris) + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWindows + } + + def 'should generate correct ServerGroup resource template health extension is blank'() { + description = createHealthDescription("", 0, "") + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplate + } + + def 'should generate correct ServerGroup resource template with custom image'() { + description = createDescription(true) + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithCustomImage + } + + def 'generate server group template with health extensions profile'() { + description = createHealthDescription() + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedHealthTemplate + } + + def 'generate server group template with custom script extensions profile for linux'() { + description = createCustomDescription() + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedCustomScriptTemplateLinux + } + + def 'generate server group template with custom script extension profile for windows'() { + description = createCustomDescription(true) + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedCustomScriptTemplateWindows + } + + def 'generate server group template with health extensions profile with custom script extension profile for windows'() { + description = createCustomHealthDescription(true) + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedCustomScriptTemplateWindowsWithHealth + } + + def 'generate server group template with health extensions profile custom script extensions profile for linux'() { + description = createCustomHealthDescription() + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedCustomScriptTemplateLinuxWithHealth + } + + def 'generate server group template with custom data'() { + description = createCustomDescription() + + description.osConfig.customData = "this is test custom data" + + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedCustomDataTemplate + } + + def 'generate server group with custom availability zones'() { + description = createCustomDescription() + + description.zones = ["1", "3"] + + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedCustomZonesTemplate + } + + def 'generate server group with custom tags'() { + description = createCustomDescription() + + Map tags = [:] + tags.put("key1", "value1") + tags.put("key2", "value2") + + description.instanceTags = tags + + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedCustomTagsTemplate + } + + def 'generate server group with useSystemManagedIdentity'() { + description = createDescription(false) + description.useSystemManagedIdentity = true + + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithUseSystemManagedIdentity + } + + def 'generate server group with userAssignedIdentities'() { + description = createDescription(false) + description.useSystemManagedIdentity = true + description.userAssignedIdentities = "test" + + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithUseSystemManagedIdentityAndUserAssignedIdentities + } + + def 'generate server group with userAssignedIdentities without useSystemManagedIdentity'() { + description = createDescription(false) + description.useSystemManagedIdentity = false + description.userAssignedIdentities = "test" + + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithUserAssignedIdentities + } + + def 'generates server group without a load balancer'() { + description = createDescription(false) + description.loadBalancerType = null + description.loadBalancerName = null + + String template = AzureServerGroupResourceTemplate.getTemplate(description) + + expect: + template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"').replace('\r', '') == expectedFullTemplateWithNoLoadBalancer + } + + private static AzureServerGroupDescription.AzureExtensionHealthSettings createHealthExtension(String protocol = "https", int port = 7000, String requestPath = "localhost") { + AzureServerGroupDescription.AzureExtensionHealthSettings extension = new AzureServerGroupDescription.AzureExtensionHealthSettings() + extension.protocol = protocol + extension.port = port + extension.requestPath = requestPath + + extension + } + + private static AzureServerGroupDescription.AzureExtensionCustomScriptSettings createCustomScriptExtension(String commandToExecute = "", String[] fileUris = ["storage1", "file2"]) { + AzureServerGroupDescription.AzureExtensionCustomScriptSettings extension = new AzureServerGroupDescription.AzureExtensionCustomScriptSettings() + extension.commandToExecute = commandToExecute + extension.fileUris = fileUris + + extension + } + + private static AzureServerGroupDescription createDescription(boolean withCustomImage = false) { + AzureServerGroupDescription description = new AzureServerGroupDescription() + description.name = 'azureMASM-st1-d11' + description.cloudProvider = 'azure' + description.application = 'azureMASM' + description.stack = 'st1' + description.detail = 'd11' + description.clusterName = description.getClusterName() + description.region = 'westus' + description.user = '[anonymous]' + + description.upgradePolicy = AzureServerGroupDescription.UpgradePolicy.Manual + + AzureNamedImage image = new AzureNamedImage() + if (withCustomImage) { + image.isCustom = true + image.ostype = 'Linux' + image.region = 'westus' + image.uri = '/subscriptions/faab228d-df7a-4086-991e-e81c4659d41a/resourceGroups/zhqqi-sntest/providers/Microsoft.Compute/images/hello-karyon-rxnetty-all-20190125054410-ubuntu-1604' + } else { + image.sku = '14.04.3-LTS' + image.offer = 'UbuntuServer' + image.publisher = 'Canonical' + image.version = 'latest' + } + description.image = image + + AzureServerGroupDescription.AzureScaleSetSku scaleSetSku = new AzureServerGroupDescription.AzureScaleSetSku() + scaleSetSku.name = 'Standard_A1' + scaleSetSku.capacity = 2 + scaleSetSku.tier = 'Standard' + description.sku = scaleSetSku + + AzureServerGroupDescription.AzureOperatingSystemConfig config = new AzureServerGroupDescription.AzureOperatingSystemConfig() + description.osConfig = config + + int backendPort = withCustomImage ? 22 : 3389 + description.addInboundPortConfig("InboundPortConfig", 50000, 50099, "tcp", backendPort) + + description.loadBalancerName = 'load-balancer-name' + description.loadBalancerType = AzureLoadBalancer.AzureLoadBalancerType.AZURE_APPLICATION_GATEWAY.toString() + + description.credentials = azureCredentials + + description + } + + private static AzureServerGroupDescription createCustomDescription(boolean targetWindows = false, String commandToExecute = "mkdir mydir", String[] fileUris = ["storage1", "file2"]) { + AzureServerGroupDescription description = createDescription() + description.customScriptsSettings = createCustomScriptExtension(commandToExecute, fileUris) + + //Set the OS type and backend port accordingly + description.image.ostype = targetWindows ? "Windows" : "Linux" + description.inboundPortConfigs[0].backendPort = targetWindows ? 3389 : 22 + + description + } + + private static AzureServerGroupDescription createHealthDescription(String protocol = "https", int port = 7000, String requestPath = "localhost") { + AzureServerGroupDescription description = createDescription() + description.healthSettings = createHealthExtension(protocol, port, requestPath) + + description + } + + private static AzureServerGroupDescription createCustomHealthDescription(boolean targetWindows = false) { + AzureServerGroupDescription description = createCustomDescription(targetWindows) + description.healthSettings = createHealthExtension() + + description + } + + private static String expectedFullTemplate = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedFullTemplateWithScheduledEventsProfile = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : { + "terminateNotificationProfile" : { + "notBeforeTimeout" : "PT15M", + "enable" : true + } + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedFullTemplateWithDoNotRunExtensionsOnOverprovisionedVMs = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : true + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedFullTemplateWithWindowsConfigurationTimeZone = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]", + "windowsConfiguration" : { + "timeZone" : "Central America Standard Time" + } + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedFullTemplateWindows = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedFullTemplateWithCustomImage = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "" + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "imageReference" : { + "id" : "/subscriptions/faab228d-df7a-4086-991e-e81c4659d41a/resourceGroups/zhqqi-sntest/providers/Microsoft.Compute/images/hello-karyon-rxnetty-all-20190125054410-ubuntu-1604" + }, + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedCustomScriptTemplateLinux = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null, + "extensionProfile" : { + "extensions" : [ { + "name" : "azureMASM_ext", + "properties" : { + "publisher" : "Microsoft.Azure.Extensions", + "type" : "CustomScript", + "typeHandlerVersion" : "2.0", + "autoUpgradeMinorVersion" : true, + "settings" : { + "fileUris" : [ "storage1", "file2" ], + "commandToExecute" : "mkdir mydir" + } + } + } ] + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedCustomZonesTemplate = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null, + "extensionProfile" : { + "extensions" : [ { + "name" : "azureMASM_ext", + "properties" : { + "publisher" : "Microsoft.Azure.Extensions", + "type" : "CustomScript", + "typeHandlerVersion" : "2.0", + "autoUpgradeMinorVersion" : true, + "settings" : { + "fileUris" : [ "storage1", "file2" ], + "commandToExecute" : "mkdir mydir" + } + } + } ] + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + }, + "zones" : [ "1", "3" ] + } ] +}''' + + private static String expectedFullTemplateWithEnableIpForwarding = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : true, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedCustomScriptTemplateWindows = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null, + "extensionProfile" : { + "extensions" : [ { + "name" : "azureMASM_ext", + "properties" : { + "publisher" : "Microsoft.Compute", + "type" : "CustomScriptExtension", + "typeHandlerVersion" : "1.8", + "autoUpgradeMinorVersion" : true, + "settings" : { + "fileUris" : [ "storage1", "file2" ], + "commandToExecute" : "mkdir mydir" + } + } + } ] + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedCustomDataTemplate = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null, + "extensionProfile" : { + "extensions" : [ { + "name" : "azureMASM_ext", + "properties" : { + "publisher" : "Microsoft.Azure.Extensions", + "type" : "CustomScript", + "typeHandlerVersion" : "2.0", + "autoUpgradeMinorVersion" : true, + "settings" : { + "fileUris" : [ "storage1", "file2" ], + "commandToExecute" : "mkdir mydir" + } + } + } ] + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedFullTemplateWithUseSystemManagedIdentity = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "SystemAssigned" + } + } ] +}''' + + private static String expectedCustomTagsTemplate = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "key1" : "value1", + "key2" : "value2", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null, + "extensionProfile" : { + "extensions" : [ { + "name" : "azureMASM_ext", + "properties" : { + "publisher" : "Microsoft.Azure.Extensions", + "type" : "CustomScript", + "typeHandlerVersion" : "2.0", + "autoUpgradeMinorVersion" : true, + "settings" : { + "fileUris" : [ "storage1", "file2" ], + "commandToExecute" : "mkdir mydir" + } + } + } ] + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedCustomScriptTemplateWindowsWithHealth = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null, + "extensionProfile" : { + "extensions" : [ { + "name" : "azureMASM_ext", + "properties" : { + "publisher" : "Microsoft.Compute", + "type" : "CustomScriptExtension", + "typeHandlerVersion" : "1.8", + "autoUpgradeMinorVersion" : true, + "settings" : { + "fileUris" : [ "storage1", "file2" ], + "commandToExecute" : "mkdir mydir" + } + } + }, { + "name" : "azureMASM_health_ext", + "properties" : { + "publisher" : "Microsoft.ManagedServices", + "type" : "ApplicationHealthWindows", + "typeHandlerVersion" : "1.0", + "autoUpgradeMinorVersion" : true, + "settings" : { + "protocol" : "https", + "port" : 7000, + "requestPath" : "localhost" + } + } + } ] + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedFullTemplateWithUserAssignedIdentities = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "UserAssigned", + "userAssignedIdentities" : { + "[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/','test')]" : { } + } + } + } ] +}''' + + private static String expectedFullTemplateWithUseSystemManagedIdentityAndUserAssignedIdentities = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "SystemAssigned, UserAssigned", + "userAssignedIdentities" : { + "[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/','test')]" : { } + } + } + } ] +}''' + + private static String expectedCustomScriptTemplateLinuxWithHealth = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null, + "extensionProfile" : { + "extensions" : [ { + "name" : "azureMASM_ext", + "properties" : { + "publisher" : "Microsoft.Azure.Extensions", + "type" : "CustomScript", + "typeHandlerVersion" : "2.0", + "autoUpgradeMinorVersion" : true, + "settings" : { + "fileUris" : [ "storage1", "file2" ], + "commandToExecute" : "mkdir mydir" + } + } + }, { + "name" : "azureMASM_health_ext", + "properties" : { + "publisher" : "Microsoft.ManagedServices", + "type" : "ApplicationHealthLinux", + "typeHandlerVersion" : "1.0", + "autoUpgradeMinorVersion" : true, + "settings" : { + "protocol" : "https", + "port" : 7000, + "requestPath" : "localhost" + } + } + } ] + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedHealthTemplate = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890", + "loadBalancerName" : "load-balancer-name" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ { + "id" : "[parameters('appGatewayAddressPoolId')]" + } ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null, + "extensionProfile" : { + "extensions" : [ { + "name" : "azureMASM_health_ext", + "properties" : { + "publisher" : "Microsoft.ManagedServices", + "type" : "ApplicationHealthWindows", + "typeHandlerVersion" : "1.0", + "autoUpgradeMinorVersion" : true, + "settings" : { + "protocol" : "https", + "port" : 7000, + "requestPath" : "localhost" + } + } + } ] + } + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + + private static String expectedFullTemplateWithNoLoadBalancer = '''{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "location" : { + "type" : "string", + "metadata" : { + "description" : "Location to deploy" + } + }, + "subnetId" : { + "type" : "string", + "metadata" : { + "description" : "Subnet Resource ID" + }, + "defaultValue" : "" + }, + "appGatewayAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "App Gateway backend address pool resource ID" + }, + "defaultValue" : "" + }, + "vmUserName" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin username on all VMs" + }, + "defaultValue" : "" + }, + "vmPassword" : { + "type" : "securestring", + "metadata" : { + "description" : "Admin password on all VMs" + }, + "defaultValue" : "" + }, + "vmSshPublicKey" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH public key on all VMs" + }, + "defaultValue" : "" + }, + "loadBalancerAddressPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer pool ID" + }, + "defaultValue" : "" + }, + "loadBalancerNatPoolId" : { + "type" : "string", + "metadata" : { + "description" : "Load balancer NAT pool ID" + }, + "defaultValue" : "" + }, + "customData" : { + "type" : "string", + "metadata" : { + "description" : "custom data to pass down to the virtual machine(s)" + }, + "defaultValue" : "sample custom data" + } + }, + "variables" : { + "apiVersion" : "2019-03-01", + "publicIPAddressName" : "", + "publicIPAddressID" : "", + "publicIPAddressType" : "", + "dnsNameForLBIP" : "", + "loadBalancerBackend" : "", + "loadBalancerFrontEnd" : "", + "loadBalancerName" : "", + "loadBalancerID" : "", + "frontEndIPConfigID" : "", + "inboundNatPoolName" : "", + "vhdContainerName" : "azuremasm-st1-d11", + "osType" : { + "publisher" : "Canonical", + "offer" : "UbuntuServer", + "sku" : "14.04.3-LTS", + "version" : "latest" + }, + "imageReference" : "[variables('osType')]", + "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] + }, + "resources" : [ { + "apiVersion" : "[variables('apiVersion')]", + "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", + "type" : "Microsoft.Storage/storageAccounts", + "location" : "[parameters('location')]", + "tags" : { + "appName" : "azureMASM", + "stack" : "st1", + "detail" : "d11", + "cluster" : "azureMASM-st1-d11", + "serverGroupName" : "azureMASM-st1-d11", + "createdTime" : "1234567890" + }, + "copy" : { + "name" : "storageLoop", + "count" : 1 + }, + "properties" : { + "accountType" : "Premium_LRS" + } + }, { + "apiVersion" : "[variables('apiVersion')]", + "name" : "azureMASM-st1-d11", + "type" : "Microsoft.Compute/virtualMachineScaleSets", + "location" : "[parameters('location')]", + "tags" : { + "createdTime" : "1234567890" + }, + "dependsOn" : [ ], + "sku" : { + "name" : "Standard_A1", + "tier" : "Standard", + "capacity" : 2 + }, + "properties" : { + "upgradePolicy" : { + "mode" : "Manual" + }, + "virtualMachineProfile" : { + "storageProfile" : { + "osDisk" : { + "name" : "osdisk-azureMASM-st1-d11", + "caching" : "ReadOnly", + "createOption" : "FromImage", + "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] + }, + "imageReference" : "[variables('imageReference')]", + "dataDisks" : null + }, + "osProfile" : { + "computerNamePrefix" : "azureMASM-", + "adminUsername" : "[parameters('vmUserName')]", + "adminPassword" : "[parameters('vmPassword')]", + "customData" : "[base64(parameters('customData'))]" + }, + "networkProfile" : { + "networkInterfaceConfigurations" : [ { + "name" : "nic-azureMASM-st1-d11", + "properties" : { + "primary" : true, + "enableIpForwarding" : false, + "ipConfigurations" : [ { + "name" : "ipc-azureMASM-st1-d11", + "properties" : { + "subnet" : { + "id" : "[parameters('subnetId')]" + }, + "loadBalancerBackendAddressPools" : [ ], + "loadBalancerInboundNatPools" : [ ], + "applicationGatewayBackendAddressPools" : [ ] + } + } ] + } + } ] + }, + "scheduledEventsProfile" : null + }, + "doNotRunExtensionsOnOverprovisionedVMs" : false + }, + "identity" : { + "type" : "None" + } + } ] +}''' + +} + + diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/ops/DestroyAzureServerGroupAtomicOperationSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/ops/DestroyAzureServerGroupAtomicOperationSpec.groovy index 3c47d44bca8..726dbd81aef 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/ops/DestroyAzureServerGroupAtomicOperationSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/ops/DestroyAzureServerGroupAtomicOperationSpec.groovy @@ -55,7 +55,7 @@ class DestroyAzureServerGroupAtomicOperationSpec extends Specification { def setupSpec() { converter = new DestroyAzureServerGroupAtomicOperationConverter(objectMapper: mapper) - azureCredentials = new AzureCredentials(ACCOUNT_CLIENTID, ACCOUNT_TENANTID, ACCOUNT_APPKEY, SUBSCRIPTION_ID, DEFAULT_KEY_VAULT, DEFAULT_RESOURCE_GROUP, "") + azureCredentials = GroovyMock(AzureCredentials) def credentialsRepo = new MapBackedAccountCredentialsRepository() credentials = Mock(AzureNamedAccountCredentials) diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/templates/AzureServerGroupResourceTemplateSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/templates/AzureServerGroupResourceTemplateSpec.groovy deleted file mode 100644 index e2b4f5cefbf..00000000000 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/templates/AzureServerGroupResourceTemplateSpec.groovy +++ /dev/null @@ -1,1215 +0,0 @@ -/* - * Copyright 2016 The original authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.azure.resources.servergroups.deploy.templates - -import com.fasterxml.jackson.databind.DeserializationFeature -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature -import com.netflix.spinnaker.clouddriver.azure.client.AzureResourceManagerClient -import com.netflix.spinnaker.clouddriver.azure.resources.common.model.KeyVaultSecret -import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription -import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureNamedImage -import com.netflix.spinnaker.clouddriver.azure.templates.AzureServerGroupResourceTemplate -import spock.lang.Specification - -class AzureServerGroupResourceTemplateSpec extends Specification { - ObjectMapper objectMapper - AzureServerGroupDescription description - - void setup() { - description = createDescription(false) - objectMapper = new ObjectMapper().configure(SerializationFeature.INDENT_OUTPUT, true) - objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) - } - - def 'should generate correct ServerGroup resource template'() { - String template = AzureServerGroupResourceTemplate.getTemplate(description) - - expect: - template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedFullTemplate - } - - def 'should generate correct ServerGroup resource template with custom image'() { - description = createDescription(true) - String template = AzureServerGroupResourceTemplate.getTemplate(description) - - expect: - template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedFullTemplateWithCustomImage - } - - def 'generate server group template with extensions profile for linux'() { - description = createCustomDescription() - String template = AzureServerGroupResourceTemplate.getTemplate(description) - - expect: - template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedCustomScriptTemplateLinux - } - - def 'generate server group template with extension profile for windows'() { - description = createCustomDescription(true) - String template = AzureServerGroupResourceTemplate.getTemplate(description) - - expect: - template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedCustomScriptTemplateWindows - } - - def 'generate server group template with custom data'() { - description = createCustomDescription() - - description.osConfig.customData = "this is test custom data" - - String template = AzureServerGroupResourceTemplate.getTemplate(description) - - expect: - template.replaceAll('"createdTime" : "\\d+"', '"createdTime" : "1234567890"') == expectedCustomDataTemplate - } - - def 'verify parameters JSON'() { - - def parameters = [:] - parameters[AzureServerGroupResourceTemplate.subnetParameterName] = subnetId - parameters[AzureServerGroupResourceTemplate.vmPasswordParameterName] = new KeyVaultSecret(secretName, subscriptionId, defaultResourceGroup, defaultVaultName) - String parametersJSON = AzureResourceManagerClient.convertParametersToTemplateJSON(objectMapper, parameters) - - expect: parametersJSON == expectedParameters - } - - private static AzureServerGroupDescription createDescription(boolean withCustomImage = false) { - AzureServerGroupDescription description = new AzureServerGroupDescription() - description.name = 'azureMASM-st1-d11' - description.cloudProvider = 'azure' - description.application = 'azureMASM' - description.stack = 'st1' - description.detail = 'd11' - description.clusterName = description.getClusterName() - description.region = 'westus' - description.user = '[anonymous]' - - description.upgradePolicy = AzureServerGroupDescription.UpgradePolicy.Manual - - AzureNamedImage image = new AzureNamedImage() - if (withCustomImage) { - image.isCustom = true - image.ostype = 'Linux' - image.region = 'westus' - image.uri = 'https://storevm112345.blob.core.windows.net/vhds/vm1-1234520161917555.vhd' - } else { - image.sku = '14.04.3-LTS' - image.offer = 'UbuntuServer' - image.publisher = 'Canonical' - image.version = 'latest' - } - description.image = image - - AzureServerGroupDescription.AzureScaleSetSku scaleSetSku = new AzureServerGroupDescription.AzureScaleSetSku() - scaleSetSku.name = 'Standard_A1' - scaleSetSku.capacity = 2 - scaleSetSku.tier = 'Standard' - description.sku = scaleSetSku - - AzureServerGroupDescription.AzureOperatingSystemConfig config = new AzureServerGroupDescription.AzureOperatingSystemConfig() - description.osConfig = config - - int backendPort = withCustomImage ? 22 : 3389 - description.addInboundPortConfig("InboundPortConfig", 50000, 50099, "tcp", backendPort) - - description - } - - private static AzureServerGroupDescription createCustomDescription(boolean targetWindows = false) { - AzureServerGroupDescription description = createDescription() - AzureServerGroupDescription.AzureExtensionCustomScriptSettings extension = new AzureServerGroupDescription.AzureExtensionCustomScriptSettings() - extension.commandToExecute = "mkdir mydir" - extension.fileUris = ["storage1", "file2"] - description.customScriptsSettings = extension - - //Set the OS type and backend port accordingly - description.image.ostype = targetWindows ? "Windows" : "Linux" - description.inboundPortConfigs[0].backendPort = targetWindows ? 3389 : 22 - - description - } - - private static String expectedFullTemplate = '''{ - "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion" : "1.0.0.0", - "parameters" : { - "location" : { - "type" : "string", - "metadata" : { - "description" : "Location to deploy" - } - }, - "subnetId" : { - "type" : "string", - "metadata" : { - "description" : "Subnet Resource ID" - } - }, - "appGatewayAddressPoolId" : { - "type" : "string", - "metadata" : { - "description" : "App Gateway backend address pool resource ID" - } - }, - "vmuserName" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account name" - } - }, - "vmPassword" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account password" - } - }, - "customData" : { - "type" : "string", - "metadata" : { - "description" : "custom data to pass down to the virtual machine(s)" - }, - "defaultValue" : "" - } - }, - "variables" : { - "apiVersion" : "2015-06-15", - "publicIPAddressName" : "pip-azureMASM-st1-d11", - "publicIPAddressID" : "[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]", - "publicIPAddressType" : "Dynamic", - "dnsNameForLBIP" : "dns-azuremasm-st1-d11", - "loadBalancerBackend" : "be-azureMASM-st1-d11", - "loadBalancerFrontEnd" : "fe-azureMASM-st1-d11", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "loadBalancerID" : "[resourceId('Microsoft.Network/loadBalancers', variables('loadBalancerName'))]", - "frontEndIPConfigID" : "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations/', variables('loadBalancerName'), variables('loadBalancerFrontEnd'))]", - "inboundNatPoolName" : "np-azureMASM-st1-d11", - "vhdContainerName" : "azuremasm-st1-d11", - "osType" : { - "publisher" : "Canonical", - "offer" : "UbuntuServer", - "sku" : "14.04.3-LTS", - "version" : "latest" - }, - "imageReference" : "[variables('osType')]", - "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] - }, - "resources" : [ { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", - "type" : "Microsoft.Storage/storageAccounts", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "serverGroupName" : "azureMASM-st1-d11", - "createdTime" : "1234567890" - }, - "copy" : { - "name" : "storageLoop", - "count" : 1 - }, - "properties" : { - "accountType" : "Premium_LRS" - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('publicIPAddressName')]", - "type" : "Microsoft.Network/publicIPAddresses", - "location" : "[parameters('location')]", - "tags" : null, - "properties" : { - "publicIPAllocationMethod" : "[variables('publicIPAddressType')]", - "dnsSettings" : { - "domainNameLabel" : "[variables('dnsNameForLBIP')]" - } - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('loadBalancerName')]", - "type" : "Microsoft.Network/loadBalancers", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "createdTime" : "1234567890", - "cluster" : "azureMASM-st1-d11", - "serverGroup" : "azureMASM-st1-d11" - }, - "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" ], - "properties" : { - "frontendIPConfigurations" : [ { - "name" : "[variables('loadBalancerFrontEnd')]", - "properties" : { - "publicIpAddress" : { - "id" : "[variables('publicIPAddressID')]" - } - } - } ], - "backendAddressPools" : [ { - "name" : "[variables('loadBalancerBackEnd')]" - } ], - "inboundNatPools" : [ { - "name" : "InboundPortConfig", - "properties" : { - "frontendIPConfiguration" : { - "id" : "[variables('frontEndIPConfigID')]" - }, - "protocol" : "tcp", - "frontendPortRangeStart" : 50000, - "frontendPortRangeEnd" : 50099, - "backendPort" : 3389 - } - } ] - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "azureMASM-st1-d11", - "type" : "Microsoft.Compute/virtualMachineScaleSets", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "createdTime" : "1234567890", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "hasNewSubnet" : "false", - "imageIsCustom" : "false", - "storageAccountNames" : "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" - }, - "dependsOn" : [ "[concat('Microsoft.Storage/storageAccounts/', variables('uniqueStorageNameArray')[0])]", "[concat('Microsoft.Network/loadBalancers/', variables('loadBalancerName'))]" ], - "sku" : { - "name" : "Standard_A1", - "tier" : "Standard", - "capacity" : 2 - }, - "properties" : { - "upgradePolicy" : { - "mode" : "Manual" - }, - "virtualMachineProfile" : { - "storageProfile" : { - "osDisk" : { - "name" : "osdisk-azureMASM-st1-d11", - "caching" : "ReadOnly", - "createOption" : "FromImage", - "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] - }, - "imageReference" : "[variables('imageReference')]" - }, - "osProfile" : { - "computerNamePrefix" : "azureMASM-", - "adminUsername" : "[parameters('vmUsername')]", - "adminPassword" : "[parameters('vmPassword')]" - }, - "networkProfile" : { - "networkInterfaceConfigurations" : [ { - "name" : "nic-azureMASM-st1-d11", - "properties" : { - "primary" : true, - "ipConfigurations" : [ { - "name" : "ipc-azureMASM-st1-d11", - "properties" : { - "subnet" : { - "id" : "[parameters('subnetId')]" - }, - "loadBalancerBackendAddressPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', variables('loadBalancerName'), variables('loadBalancerBackend'))]" - } ], - "loadBalancerInboundNatPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/inboundNatPools', variables('loadBalancerName'), variables('inboundNatPoolName'))]" - } ], - "applicationGatewayBackendAddressPools" : [ { - "id" : "[parameters('appGatewayAddressPoolId')]" - } ] - } - } ] - } - } ] - } - } - } - } ] -}''' - - private static String expectedFullTemplateWithCustomImage = '''{ - "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion" : "1.0.0.0", - "parameters" : { - "location" : { - "type" : "string", - "metadata" : { - "description" : "Location to deploy" - } - }, - "subnetId" : { - "type" : "string", - "metadata" : { - "description" : "Subnet Resource ID" - } - }, - "appGatewayAddressPoolId" : { - "type" : "string", - "metadata" : { - "description" : "App Gateway backend address pool resource ID" - } - }, - "vmuserName" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account name" - } - }, - "vmPassword" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account password" - } - }, - "customData" : { - "type" : "string", - "metadata" : { - "description" : "custom data to pass down to the virtual machine(s)" - }, - "defaultValue" : "" - } - }, - "variables" : { - "apiVersion" : "2015-06-15", - "publicIPAddressName" : "pip-azureMASM-st1-d11", - "publicIPAddressID" : "[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]", - "publicIPAddressType" : "Dynamic", - "dnsNameForLBIP" : "dns-azuremasm-st1-d11", - "loadBalancerBackend" : "be-azureMASM-st1-d11", - "loadBalancerFrontEnd" : "fe-azureMASM-st1-d11", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "loadBalancerID" : "[resourceId('Microsoft.Network/loadBalancers', variables('loadBalancerName'))]", - "frontEndIPConfigID" : "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations/', variables('loadBalancerName'), variables('loadBalancerFrontEnd'))]", - "inboundNatPoolName" : "np-azureMASM-st1-d11" - }, - "resources" : [ { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('publicIPAddressName')]", - "type" : "Microsoft.Network/publicIPAddresses", - "location" : "[parameters('location')]", - "tags" : null, - "properties" : { - "publicIPAllocationMethod" : "[variables('publicIPAddressType')]", - "dnsSettings" : { - "domainNameLabel" : "[variables('dnsNameForLBIP')]" - } - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('loadBalancerName')]", - "type" : "Microsoft.Network/loadBalancers", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "createdTime" : "1234567890", - "cluster" : "azureMASM-st1-d11", - "serverGroup" : "azureMASM-st1-d11" - }, - "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" ], - "properties" : { - "frontendIPConfigurations" : [ { - "name" : "[variables('loadBalancerFrontEnd')]", - "properties" : { - "publicIpAddress" : { - "id" : "[variables('publicIPAddressID')]" - } - } - } ], - "backendAddressPools" : [ { - "name" : "[variables('loadBalancerBackEnd')]" - } ], - "inboundNatPools" : [ { - "name" : "InboundPortConfig", - "properties" : { - "frontendIPConfiguration" : { - "id" : "[variables('frontEndIPConfigID')]" - }, - "protocol" : "tcp", - "frontendPortRangeStart" : 50000, - "frontendPortRangeEnd" : 50099, - "backendPort" : 22 - } - } ] - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "azureMASM-st1-d11", - "type" : "Microsoft.Compute/virtualMachineScaleSets", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "createdTime" : "1234567890", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "hasNewSubnet" : "false", - "imageIsCustom" : "true" - }, - "dependsOn" : [ "[concat('Microsoft.Network/loadBalancers/', variables('loadBalancerName'))]" ], - "sku" : { - "name" : "Standard_A1", - "tier" : "Standard", - "capacity" : 2 - }, - "properties" : { - "upgradePolicy" : { - "mode" : "Manual" - }, - "virtualMachineProfile" : { - "storageProfile" : { - "osDisk" : { - "name" : "osdisk-azureMASM-st1-d11", - "caching" : "ReadOnly", - "createOption" : "FromImage", - "osType" : "Linux", - "image" : { - "uri" : "https://storevm112345.blob.core.windows.net/vhds/vm1-1234520161917555.vhd" - } - } - }, - "osProfile" : { - "computerNamePrefix" : "azureMASM-", - "adminUsername" : "[parameters('vmUsername')]", - "adminPassword" : "[parameters('vmPassword')]" - }, - "networkProfile" : { - "networkInterfaceConfigurations" : [ { - "name" : "nic-azureMASM-st1-d11", - "properties" : { - "primary" : true, - "ipConfigurations" : [ { - "name" : "ipc-azureMASM-st1-d11", - "properties" : { - "subnet" : { - "id" : "[parameters('subnetId')]" - }, - "loadBalancerBackendAddressPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', variables('loadBalancerName'), variables('loadBalancerBackend'))]" - } ], - "loadBalancerInboundNatPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/inboundNatPools', variables('loadBalancerName'), variables('inboundNatPoolName'))]" - } ], - "applicationGatewayBackendAddressPools" : [ { - "id" : "[parameters('appGatewayAddressPoolId')]" - } ] - } - } ] - } - } ] - } - } - } - } ] -}''' - - private static String expectedCustomScriptTemplateLinux = '''{ - "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion" : "1.0.0.0", - "parameters" : { - "location" : { - "type" : "string", - "metadata" : { - "description" : "Location to deploy" - } - }, - "subnetId" : { - "type" : "string", - "metadata" : { - "description" : "Subnet Resource ID" - } - }, - "appGatewayAddressPoolId" : { - "type" : "string", - "metadata" : { - "description" : "App Gateway backend address pool resource ID" - } - }, - "vmuserName" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account name" - } - }, - "vmPassword" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account password" - } - }, - "customData" : { - "type" : "string", - "metadata" : { - "description" : "custom data to pass down to the virtual machine(s)" - }, - "defaultValue" : "" - } - }, - "variables" : { - "apiVersion" : "2015-06-15", - "publicIPAddressName" : "pip-azureMASM-st1-d11", - "publicIPAddressID" : "[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]", - "publicIPAddressType" : "Dynamic", - "dnsNameForLBIP" : "dns-azuremasm-st1-d11", - "loadBalancerBackend" : "be-azureMASM-st1-d11", - "loadBalancerFrontEnd" : "fe-azureMASM-st1-d11", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "loadBalancerID" : "[resourceId('Microsoft.Network/loadBalancers', variables('loadBalancerName'))]", - "frontEndIPConfigID" : "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations/', variables('loadBalancerName'), variables('loadBalancerFrontEnd'))]", - "inboundNatPoolName" : "np-azureMASM-st1-d11", - "vhdContainerName" : "azuremasm-st1-d11", - "osType" : { - "publisher" : "Canonical", - "offer" : "UbuntuServer", - "sku" : "14.04.3-LTS", - "version" : "latest" - }, - "imageReference" : "[variables('osType')]", - "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] - }, - "resources" : [ { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", - "type" : "Microsoft.Storage/storageAccounts", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "serverGroupName" : "azureMASM-st1-d11", - "createdTime" : "1234567890" - }, - "copy" : { - "name" : "storageLoop", - "count" : 1 - }, - "properties" : { - "accountType" : "Premium_LRS" - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('publicIPAddressName')]", - "type" : "Microsoft.Network/publicIPAddresses", - "location" : "[parameters('location')]", - "tags" : null, - "properties" : { - "publicIPAllocationMethod" : "[variables('publicIPAddressType')]", - "dnsSettings" : { - "domainNameLabel" : "[variables('dnsNameForLBIP')]" - } - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('loadBalancerName')]", - "type" : "Microsoft.Network/loadBalancers", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "createdTime" : "1234567890", - "cluster" : "azureMASM-st1-d11", - "serverGroup" : "azureMASM-st1-d11" - }, - "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" ], - "properties" : { - "frontendIPConfigurations" : [ { - "name" : "[variables('loadBalancerFrontEnd')]", - "properties" : { - "publicIpAddress" : { - "id" : "[variables('publicIPAddressID')]" - } - } - } ], - "backendAddressPools" : [ { - "name" : "[variables('loadBalancerBackEnd')]" - } ], - "inboundNatPools" : [ { - "name" : "InboundPortConfig", - "properties" : { - "frontendIPConfiguration" : { - "id" : "[variables('frontEndIPConfigID')]" - }, - "protocol" : "tcp", - "frontendPortRangeStart" : 50000, - "frontendPortRangeEnd" : 50099, - "backendPort" : 22 - } - } ] - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "azureMASM-st1-d11", - "type" : "Microsoft.Compute/virtualMachineScaleSets", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "createdTime" : "1234567890", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "hasNewSubnet" : "false", - "imageIsCustom" : "false", - "storageAccountNames" : "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" - }, - "dependsOn" : [ "[concat('Microsoft.Storage/storageAccounts/', variables('uniqueStorageNameArray')[0])]", "[concat('Microsoft.Network/loadBalancers/', variables('loadBalancerName'))]" ], - "sku" : { - "name" : "Standard_A1", - "tier" : "Standard", - "capacity" : 2 - }, - "properties" : { - "upgradePolicy" : { - "mode" : "Manual" - }, - "virtualMachineProfile" : { - "storageProfile" : { - "osDisk" : { - "name" : "osdisk-azureMASM-st1-d11", - "caching" : "ReadOnly", - "createOption" : "FromImage", - "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] - }, - "imageReference" : "[variables('imageReference')]" - }, - "osProfile" : { - "computerNamePrefix" : "azureMASM-", - "adminUsername" : "[parameters('vmUsername')]", - "adminPassword" : "[parameters('vmPassword')]" - }, - "networkProfile" : { - "networkInterfaceConfigurations" : [ { - "name" : "nic-azureMASM-st1-d11", - "properties" : { - "primary" : true, - "ipConfigurations" : [ { - "name" : "ipc-azureMASM-st1-d11", - "properties" : { - "subnet" : { - "id" : "[parameters('subnetId')]" - }, - "loadBalancerBackendAddressPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', variables('loadBalancerName'), variables('loadBalancerBackend'))]" - } ], - "loadBalancerInboundNatPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/inboundNatPools', variables('loadBalancerName'), variables('inboundNatPoolName'))]" - } ], - "applicationGatewayBackendAddressPools" : [ { - "id" : "[parameters('appGatewayAddressPoolId')]" - } ] - } - } ] - } - } ] - }, - "extensionProfile" : { - "extensions" : [ { - "name" : "azureMASM_ext", - "properties" : { - "publisher" : "Microsoft.Azure.Extensions", - "type" : "CustomScript", - "typeHandlerVersion" : "2.0", - "autoUpgradeMinorVersion" : true, - "settings" : { - "fileUris" : [ "storage1", "file2" ], - "commandToExecute" : "mkdir mydir" - } - } - } ] - } - } - } - } ] -}''' - - private static String expectedCustomScriptTemplateWindows = '''{ - "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion" : "1.0.0.0", - "parameters" : { - "location" : { - "type" : "string", - "metadata" : { - "description" : "Location to deploy" - } - }, - "subnetId" : { - "type" : "string", - "metadata" : { - "description" : "Subnet Resource ID" - } - }, - "appGatewayAddressPoolId" : { - "type" : "string", - "metadata" : { - "description" : "App Gateway backend address pool resource ID" - } - }, - "vmuserName" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account name" - } - }, - "vmPassword" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account password" - } - }, - "customData" : { - "type" : "string", - "metadata" : { - "description" : "custom data to pass down to the virtual machine(s)" - }, - "defaultValue" : "" - } - }, - "variables" : { - "apiVersion" : "2015-06-15", - "publicIPAddressName" : "pip-azureMASM-st1-d11", - "publicIPAddressID" : "[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]", - "publicIPAddressType" : "Dynamic", - "dnsNameForLBIP" : "dns-azuremasm-st1-d11", - "loadBalancerBackend" : "be-azureMASM-st1-d11", - "loadBalancerFrontEnd" : "fe-azureMASM-st1-d11", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "loadBalancerID" : "[resourceId('Microsoft.Network/loadBalancers', variables('loadBalancerName'))]", - "frontEndIPConfigID" : "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations/', variables('loadBalancerName'), variables('loadBalancerFrontEnd'))]", - "inboundNatPoolName" : "np-azureMASM-st1-d11", - "vhdContainerName" : "azuremasm-st1-d11", - "osType" : { - "publisher" : "Canonical", - "offer" : "UbuntuServer", - "sku" : "14.04.3-LTS", - "version" : "latest" - }, - "imageReference" : "[variables('osType')]", - "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] - }, - "resources" : [ { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", - "type" : "Microsoft.Storage/storageAccounts", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "serverGroupName" : "azureMASM-st1-d11", - "createdTime" : "1234567890" - }, - "copy" : { - "name" : "storageLoop", - "count" : 1 - }, - "properties" : { - "accountType" : "Premium_LRS" - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('publicIPAddressName')]", - "type" : "Microsoft.Network/publicIPAddresses", - "location" : "[parameters('location')]", - "tags" : null, - "properties" : { - "publicIPAllocationMethod" : "[variables('publicIPAddressType')]", - "dnsSettings" : { - "domainNameLabel" : "[variables('dnsNameForLBIP')]" - } - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('loadBalancerName')]", - "type" : "Microsoft.Network/loadBalancers", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "createdTime" : "1234567890", - "cluster" : "azureMASM-st1-d11", - "serverGroup" : "azureMASM-st1-d11" - }, - "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" ], - "properties" : { - "frontendIPConfigurations" : [ { - "name" : "[variables('loadBalancerFrontEnd')]", - "properties" : { - "publicIpAddress" : { - "id" : "[variables('publicIPAddressID')]" - } - } - } ], - "backendAddressPools" : [ { - "name" : "[variables('loadBalancerBackEnd')]" - } ], - "inboundNatPools" : [ { - "name" : "InboundPortConfig", - "properties" : { - "frontendIPConfiguration" : { - "id" : "[variables('frontEndIPConfigID')]" - }, - "protocol" : "tcp", - "frontendPortRangeStart" : 50000, - "frontendPortRangeEnd" : 50099, - "backendPort" : 3389 - } - } ] - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "azureMASM-st1-d11", - "type" : "Microsoft.Compute/virtualMachineScaleSets", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "createdTime" : "1234567890", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "hasNewSubnet" : "false", - "imageIsCustom" : "false", - "storageAccountNames" : "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" - }, - "dependsOn" : [ "[concat('Microsoft.Storage/storageAccounts/', variables('uniqueStorageNameArray')[0])]", "[concat('Microsoft.Network/loadBalancers/', variables('loadBalancerName'))]" ], - "sku" : { - "name" : "Standard_A1", - "tier" : "Standard", - "capacity" : 2 - }, - "properties" : { - "upgradePolicy" : { - "mode" : "Manual" - }, - "virtualMachineProfile" : { - "storageProfile" : { - "osDisk" : { - "name" : "osdisk-azureMASM-st1-d11", - "caching" : "ReadOnly", - "createOption" : "FromImage", - "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] - }, - "imageReference" : "[variables('imageReference')]" - }, - "osProfile" : { - "computerNamePrefix" : "azureMASM-", - "adminUsername" : "[parameters('vmUsername')]", - "adminPassword" : "[parameters('vmPassword')]" - }, - "networkProfile" : { - "networkInterfaceConfigurations" : [ { - "name" : "nic-azureMASM-st1-d11", - "properties" : { - "primary" : true, - "ipConfigurations" : [ { - "name" : "ipc-azureMASM-st1-d11", - "properties" : { - "subnet" : { - "id" : "[parameters('subnetId')]" - }, - "loadBalancerBackendAddressPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', variables('loadBalancerName'), variables('loadBalancerBackend'))]" - } ], - "loadBalancerInboundNatPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/inboundNatPools', variables('loadBalancerName'), variables('inboundNatPoolName'))]" - } ], - "applicationGatewayBackendAddressPools" : [ { - "id" : "[parameters('appGatewayAddressPoolId')]" - } ] - } - } ] - } - } ] - }, - "extensionProfile" : { - "extensions" : [ { - "name" : "azureMASM_ext", - "properties" : { - "publisher" : "Microsoft.Compute", - "type" : "CustomScriptExtension", - "typeHandlerVersion" : "1.8", - "autoUpgradeMinorVersion" : true, - "settings" : { - "fileUris" : [ "storage1", "file2" ], - "commandToExecute" : "mkdir mydir" - } - } - } ] - } - } - } - } ] -}''' - - private static String expectedCustomDataTemplate = '''{ - "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion" : "1.0.0.0", - "parameters" : { - "location" : { - "type" : "string", - "metadata" : { - "description" : "Location to deploy" - } - }, - "subnetId" : { - "type" : "string", - "metadata" : { - "description" : "Subnet Resource ID" - } - }, - "appGatewayAddressPoolId" : { - "type" : "string", - "metadata" : { - "description" : "App Gateway backend address pool resource ID" - } - }, - "vmuserName" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account name" - } - }, - "vmPassword" : { - "type" : "securestring", - "metadata" : { - "description" : "default VM account password" - } - }, - "customData" : { - "type" : "string", - "metadata" : { - "description" : "custom data to pass down to the virtual machine(s)" - }, - "defaultValue" : "" - } - }, - "variables" : { - "apiVersion" : "2015-06-15", - "publicIPAddressName" : "pip-azureMASM-st1-d11", - "publicIPAddressID" : "[resourceId('Microsoft.Network/publicIPAddresses', variables('publicIPAddressName'))]", - "publicIPAddressType" : "Dynamic", - "dnsNameForLBIP" : "dns-azuremasm-st1-d11", - "loadBalancerBackend" : "be-azureMASM-st1-d11", - "loadBalancerFrontEnd" : "fe-azureMASM-st1-d11", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "loadBalancerID" : "[resourceId('Microsoft.Network/loadBalancers', variables('loadBalancerName'))]", - "frontEndIPConfigID" : "[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations/', variables('loadBalancerName'), variables('loadBalancerFrontEnd'))]", - "inboundNatPoolName" : "np-azureMASM-st1-d11", - "vhdContainerName" : "azuremasm-st1-d11", - "osType" : { - "publisher" : "Canonical", - "offer" : "UbuntuServer", - "sku" : "14.04.3-LTS", - "version" : "latest" - }, - "imageReference" : "[variables('osType')]", - "uniqueStorageNameArray" : [ "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" ] - }, - "resources" : [ { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[concat(variables('uniqueStorageNameArray')[copyIndex()])]", - "type" : "Microsoft.Storage/storageAccounts", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "serverGroupName" : "azureMASM-st1-d11", - "createdTime" : "1234567890" - }, - "copy" : { - "name" : "storageLoop", - "count" : 1 - }, - "properties" : { - "accountType" : "Premium_LRS" - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('publicIPAddressName')]", - "type" : "Microsoft.Network/publicIPAddresses", - "location" : "[parameters('location')]", - "tags" : null, - "properties" : { - "publicIPAllocationMethod" : "[variables('publicIPAddressType')]", - "dnsSettings" : { - "domainNameLabel" : "[variables('dnsNameForLBIP')]" - } - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "[variables('loadBalancerName')]", - "type" : "Microsoft.Network/loadBalancers", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "createdTime" : "1234567890", - "cluster" : "azureMASM-st1-d11", - "serverGroup" : "azureMASM-st1-d11" - }, - "dependsOn" : [ "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" ], - "properties" : { - "frontendIPConfigurations" : [ { - "name" : "[variables('loadBalancerFrontEnd')]", - "properties" : { - "publicIpAddress" : { - "id" : "[variables('publicIPAddressID')]" - } - } - } ], - "backendAddressPools" : [ { - "name" : "[variables('loadBalancerBackEnd')]" - } ], - "inboundNatPools" : [ { - "name" : "InboundPortConfig", - "properties" : { - "frontendIPConfiguration" : { - "id" : "[variables('frontEndIPConfigID')]" - }, - "protocol" : "tcp", - "frontendPortRangeStart" : 50000, - "frontendPortRangeEnd" : 50099, - "backendPort" : 22 - } - } ] - } - }, { - "apiVersion" : "[variables('apiVersion')]", - "name" : "azureMASM-st1-d11", - "type" : "Microsoft.Compute/virtualMachineScaleSets", - "location" : "[parameters('location')]", - "tags" : { - "appName" : "azureMASM", - "stack" : "st1", - "detail" : "d11", - "cluster" : "azureMASM-st1-d11", - "createdTime" : "1234567890", - "loadBalancerName" : "lb-azureMASM-st1-d11", - "hasNewSubnet" : "false", - "imageIsCustom" : "false", - "storageAccountNames" : "[concat(uniqueString(concat(resourceGroup().id, subscription().id, 'azuremasmst1d11', '0')), 'sa')]" - }, - "dependsOn" : [ "[concat('Microsoft.Storage/storageAccounts/', variables('uniqueStorageNameArray')[0])]", "[concat('Microsoft.Network/loadBalancers/', variables('loadBalancerName'))]" ], - "sku" : { - "name" : "Standard_A1", - "tier" : "Standard", - "capacity" : 2 - }, - "properties" : { - "upgradePolicy" : { - "mode" : "Manual" - }, - "virtualMachineProfile" : { - "storageProfile" : { - "osDisk" : { - "name" : "osdisk-azureMASM-st1-d11", - "caching" : "ReadOnly", - "createOption" : "FromImage", - "vhdContainers" : [ "[concat('https://', variables('uniqueStorageNameArray')[0], '.blob.core.windows.net/', variables('vhdContainerName'))]" ] - }, - "imageReference" : "[variables('imageReference')]" - }, - "osProfile" : { - "computerNamePrefix" : "azureMASM-", - "adminUsername" : "[parameters('vmUsername')]", - "adminPassword" : "[parameters('vmPassword')]", - "customData" : "[base64(parameters('customData'))]" - }, - "networkProfile" : { - "networkInterfaceConfigurations" : [ { - "name" : "nic-azureMASM-st1-d11", - "properties" : { - "primary" : true, - "ipConfigurations" : [ { - "name" : "ipc-azureMASM-st1-d11", - "properties" : { - "subnet" : { - "id" : "[parameters('subnetId')]" - }, - "loadBalancerBackendAddressPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', variables('loadBalancerName'), variables('loadBalancerBackend'))]" - } ], - "loadBalancerInboundNatPools" : [ { - "id" : "[resourceId('Microsoft.Network/loadBalancers/inboundNatPools', variables('loadBalancerName'), variables('inboundNatPoolName'))]" - } ], - "applicationGatewayBackendAddressPools" : [ { - "id" : "[parameters('appGatewayAddressPoolId')]" - } ] - } - } ] - } - } ] - }, - "extensionProfile" : { - "extensions" : [ { - "name" : "azureMASM_ext", - "properties" : { - "publisher" : "Microsoft.Azure.Extensions", - "type" : "CustomScript", - "typeHandlerVersion" : "2.0", - "autoUpgradeMinorVersion" : true, - "settings" : { - "fileUris" : [ "storage1", "file2" ], - "commandToExecute" : "mkdir mydir" - } - } - } ] - } - } - } - } ] -}''' - - private static String expectedParameters = """{ - "subnetId" : { - "value" : "$subnetId" - }, - "vmPassword" : { - "reference" : { - "keyVault" : { - "id" : "/subscriptions/$subscriptionId/resourceGroups/$defaultResourceGroup/providers/Microsoft.KeyVault/vaults/$defaultVaultName" - }, - "secretName" : "$secretName" - } - } -}""" - - private static final String subscriptionId = "testSubscriptionID" - private static final String subnetId = "SubNetTestID" - private static final String defaultResourceGroup = "defaultResourceGroup" - private static final String defaultVaultName = "defaultKeyVault" - private static final String secretName = "VMPassword" - -} diff --git a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/templates/description/AzureServerGroupDescriptionUnitSpec.groovy b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/templates/description/AzureServerGroupDescriptionUnitSpec.groovy index 0844ad5b177..433966e3e6b 100644 --- a/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/templates/description/AzureServerGroupDescriptionUnitSpec.groovy +++ b/clouddriver-azure/src/test/groovy/com/netflix/spinnaker/clouddriver/azure/resources/servergroups/deploy/templates/description/AzureServerGroupDescriptionUnitSpec.groovy @@ -16,22 +16,20 @@ package com.netflix.spinnaker.clouddriver.azure.resources.servergroups.deploy.templates.description -import com.microsoft.azure.management.compute.models.ImageReference -import com.microsoft.azure.management.compute.models.Sku -import com.microsoft.azure.management.compute.models.UpgradePolicy -import com.microsoft.azure.management.compute.models.VirtualMachineScaleSet -import com.microsoft.azure.management.compute.models.VirtualMachineScaleSetOSProfile -import com.microsoft.azure.management.compute.models.VirtualMachineScaleSetSku -import com.microsoft.azure.management.compute.models.VirtualMachineScaleSetStorageProfile -import com.microsoft.azure.management.compute.models.VirtualMachineScaleSetVMProfile -import com.netflix.spinnaker.clouddriver.azure.common.AzureUtilities +import com.azure.resourcemanager.compute.fluent.models.VirtualMachineScaleSetInner +import com.azure.resourcemanager.compute.models.ImageReference +import com.azure.resourcemanager.compute.models.Sku +import com.azure.resourcemanager.compute.models.UpgradeMode +import com.azure.resourcemanager.compute.models.UpgradePolicy +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetOSProfile +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetStorageProfile +import com.azure.resourcemanager.compute.models.VirtualMachineScaleSetVMProfile import com.netflix.spinnaker.clouddriver.azure.resources.servergroup.model.AzureServerGroupDescription import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureNamedImage import spock.lang.Specification - class AzureServerGroupDescriptionUnitSpec extends Specification { - VirtualMachineScaleSet scaleSet + VirtualMachineScaleSetInner scaleSet void setup() { scaleSet = createScaleSet() @@ -43,65 +41,63 @@ class AzureServerGroupDescriptionUnitSpec extends Specification { } - private static VirtualMachineScaleSet createScaleSet() { + private static VirtualMachineScaleSetInner createScaleSet() { Map tags = [ "stack": "testStack", "detail": "testDetail", "appName" : "testScaleSet", "cluster" : "testScaleSet-testStack-testDetail"] - VirtualMachineScaleSet scaleSet = new VirtualMachineScaleSet() + VirtualMachineScaleSetInner scaleSet = new VirtualMachineScaleSetInner() //name is read only //scaleSet.name = 'testScaleSet-testStack-testDetail' - scaleSet.location = 'testLocation' - scaleSet.tags = tags + scaleSet.withLocation 'testLocation' + scaleSet.withTags tags def upgradePolicy = new UpgradePolicy() - upgradePolicy.mode = "Automatic" - scaleSet.upgradePolicy = upgradePolicy + upgradePolicy.withMode UpgradeMode.AUTOMATIC + scaleSet.withUpgradePolicy upgradePolicy VirtualMachineScaleSetVMProfile vmProfile = new VirtualMachineScaleSetVMProfile() VirtualMachineScaleSetOSProfile osProfile = new VirtualMachineScaleSetOSProfile() - osProfile.adminUsername = "testtest" - osProfile.adminPassword = "t3stt3st" - osProfile.computerNamePrefix = "nflx" - vmProfile.osProfile = osProfile + osProfile.withAdminUsername "testtest" + osProfile.withAdminPassword "t3stt3st" + osProfile.withComputerNamePrefix "nflx" + vmProfile.withOsProfile osProfile VirtualMachineScaleSetStorageProfile storageProfile = new VirtualMachineScaleSetStorageProfile() ImageReference image = new ImageReference() - image.offer = "testOffer" - image.publisher = "testPublisher" - image.sku = "testSku" - image.version = "testVersion" - storageProfile.imageReference = image - vmProfile.storageProfile = storageProfile + image.withOffer "testOffer" + image.withPublisher "testPublisher" + image.withSku "testSku" + image.withVersion "testVersion" + storageProfile.withImageReference image + vmProfile.withStorageProfile storageProfile - scaleSet.virtualMachineProfile = vmProfile + scaleSet.withVirtualMachineProfile vmProfile Sku sku = new Sku() - sku.name = "testSku" - sku.capacity = 100 - sku.tier = "tier1" - scaleSet.sku = sku - - scaleSet.provisioningState = "Succeeded" + sku.withName "testSku" + sku.withCapacity 100 + sku.withTier "tier1" + scaleSet.withSku sku scaleSet } - private static Boolean descriptionIsValid(AzureServerGroupDescription description, VirtualMachineScaleSet scaleSet) { - (description.name == scaleSet.name - && description.appName == scaleSet.tags.appName - && description.tags == scaleSet.tags - && description.stack == scaleSet.tags.stack - && description.detail == scaleSet.tags.detail - && description.application == scaleSet.tags.appName - && description.clusterName == scaleSet.tags.cluster - && description.region == scaleSet.location - && description.upgradePolicy == getPolicy(scaleSet.upgradePolicy.mode) + private static Boolean descriptionIsValid(AzureServerGroupDescription description, VirtualMachineScaleSetInner scaleSet) { + (description.name == scaleSet.name() + && description.appName == scaleSet.tags().appName + && description.tags == scaleSet.tags() + && description.stack == scaleSet.tags().stack + && description.detail == scaleSet.tags().detail + && description.application == scaleSet.tags().appName + && description.clusterName == scaleSet.tags().cluster + && description.region == scaleSet.location() + && description.upgradePolicy.name().toLowerCase() == scaleSet.upgradePolicy().mode().name().toLowerCase() && isValidImage(description.image, scaleSet) && isValidOsConfig(description.osConfig, scaleSet) && isValidSku(description.sku, scaleSet) - && description.provisioningState == AzureUtilities.ProvisioningState.SUCCEEDED) + && description.provisioningState == null) } private static AzureServerGroupDescription.UpgradePolicy getPolicy(String scaleSetPolicyMode) @@ -109,23 +105,23 @@ class AzureServerGroupDescriptionUnitSpec extends Specification { AzureServerGroupDescription.getPolicyFromMode(scaleSetPolicyMode) } - private static Boolean isValidImage(AzureNamedImage image, VirtualMachineScaleSet scaleSet) { - (image.offer == scaleSet.virtualMachineProfile.storageProfile.imageReference.offer - && image.sku == scaleSet.virtualMachineProfile.storageProfile.imageReference.sku - && image.publisher == scaleSet.virtualMachineProfile.storageProfile.imageReference.publisher - && image.version == scaleSet.virtualMachineProfile.storageProfile.imageReference.version) + private static Boolean isValidImage(AzureNamedImage image, VirtualMachineScaleSetInner scaleSet) { + (image.offer == scaleSet.virtualMachineProfile().storageProfile().imageReference().offer() + && image.sku == scaleSet.virtualMachineProfile().storageProfile().imageReference().sku() + && image.publisher == scaleSet.virtualMachineProfile().storageProfile().imageReference().publisher() + && image.version == scaleSet.virtualMachineProfile().storageProfile().imageReference().version()) } - private static Boolean isValidOsConfig (AzureServerGroupDescription.AzureOperatingSystemConfig osConfig, VirtualMachineScaleSet scaleSet) { - (osConfig.adminPassword == scaleSet.virtualMachineProfile.osProfile.adminPassword - && osConfig.adminUserName == scaleSet.virtualMachineProfile.osProfile.adminUsername - && osConfig.computerNamePrefix == scaleSet.virtualMachineProfile.osProfile.computerNamePrefix) + private static Boolean isValidOsConfig (AzureServerGroupDescription.AzureOperatingSystemConfig osConfig, VirtualMachineScaleSetInner scaleSet) { + (osConfig.adminPassword == scaleSet.virtualMachineProfile().osProfile().adminPassword() + && osConfig.adminUserName == scaleSet.virtualMachineProfile().osProfile().adminUsername() + && osConfig.computerNamePrefix == scaleSet.virtualMachineProfile().osProfile().computerNamePrefix()) } - private static Boolean isValidSku (AzureServerGroupDescription.AzureScaleSetSku sku, VirtualMachineScaleSet scaleSet) { - (sku.name == scaleSet.sku.name - && sku.tier == scaleSet.sku.tier - && sku.capacity == scaleSet.sku.capacity) + private static Boolean isValidSku (AzureServerGroupDescription.AzureScaleSetSku sku, VirtualMachineScaleSetInner scaleSet) { + (sku.name == scaleSet.sku().name() + && sku.tier == scaleSet.sku().tier() + && sku.capacity == scaleSet.sku().capacity()) } } diff --git a/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerProviderTest.java b/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerProviderTest.java new file mode 100644 index 00000000000..a2bd146bb9f --- /dev/null +++ b/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/loadbalancer/view/AzureLoadBalancerProviderTest.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.loadbalancer.view; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.mockito.Mockito.mock; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider; +import com.netflix.spinnaker.clouddriver.azure.resources.application.view.AzureApplicationProvider; +import com.netflix.spinnaker.clouddriver.azure.resources.cluster.view.AzureClusterProvider; +import org.junit.jupiter.api.Test; +import org.springframework.boot.context.annotation.UserConfigurations; +import org.springframework.boot.test.context.runner.ApplicationContextRunner; +import org.springframework.context.annotation.Bean; + +public class AzureLoadBalancerProviderTest { + + private static class AzureTestConfig { + + @Bean + AzureCloudProvider azureCloudProvider() { + return mock(AzureCloudProvider.class); + } + + @Bean + AzureClusterProvider azureClusterProvider() { + return mock(AzureClusterProvider.class); + } + + @Bean + AzureApplicationProvider azureApplicationProvider() { + return mock(AzureApplicationProvider.class); + } + + @Bean + Cache cache() { + return mock(Cache.class); + } + + @Bean + ObjectMapper getObjectMapper() { + return new ObjectMapper(); + } + } + + private final ApplicationContextRunner applicationContextRunner = + new ApplicationContextRunner() + .withConfiguration(UserConfigurations.of(AzureTestConfig.class)) + .withBean(AzureLoadBalancerProvider.class); + + /** + * The AzureLoadBalancerProvider class previously had a self-reference, which resulted in a + * circular reference exception. The intention of this test is to detect that exception scenario, + * without enabling the Azure provider. + */ + @Test + public void testCircularDependenciesException() { + assertDoesNotThrow( + () -> + applicationContextRunner.run( + ctx -> assertThat(ctx).hasSingleBean(AzureLoadBalancerProvider.class))); + } +} diff --git a/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureManagedImageCachingAgentTest.java b/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureManagedImageCachingAgentTest.java new file mode 100644 index 00000000000..b404f710ee1 --- /dev/null +++ b/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/cache/AzureManagedImageCachingAgentTest.java @@ -0,0 +1,117 @@ +/* + * Copyright 2022 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.vmimage.cache; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider; +import com.netflix.spinnaker.clouddriver.azure.client.AzureComputeClient; +import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys; +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureManagedVMImage; +import com.netflix.spinnaker.clouddriver.azure.security.AzureCredentials; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +class AzureManagedImageCachingAgentTest { + + private static final String NAME = "name"; + private static final String RESOURCEGROUP = "resourcegroup"; + private static final String REGION = "eastus"; + private static final String OS_TYPE = "Windows"; + private static final ObjectMapper MAPPER = new ObjectMapper(); + + @Test + @DisplayName( + "Cache for Namespace.AZURE_MANAGEDIMAGES should be empty when no custom VMs returned by Azure Compute Client") + void shouldNotCacheWhenNoImageReturnedByAzureComputeClient() { + AzureCredentials azureCredentials = mock(AzureCredentials.class); + when(azureCredentials.getDefaultResourceGroup()).thenReturn("resource"); + AzureComputeClient azureComputeClient = mock(AzureComputeClient.class); + when(azureCredentials.getComputeClient()).thenReturn(azureComputeClient); + when(azureComputeClient.getAllVMCustomImages(anyString(), anyString())).thenReturn(List.of()); + AzureManagedImageCachingAgent region = + new AzureManagedImageCachingAgent( + new AzureCloudProvider(), "my-account", azureCredentials, REGION, MAPPER); + CacheResult cacheResult = region.loadData(mock(ProviderCache.class)); + + assertThat(cacheResult).isNotNull(); + Map> cacheResults = cacheResult.getCacheResults(); + assertThat(cacheResults) + .isNotEmpty() + .containsKey(Keys.Namespace.AZURE_MANAGEDIMAGES.toString()); + assertThat(cacheResults.get(Keys.Namespace.AZURE_MANAGEDIMAGES.toString())).isEmpty(); + } + + @Test + @DisplayName("Cache for Namespace.AZURE_MANAGEDIMAGES should return one result") + void shouldReturnOneCachedResult() { + + AzureCredentials azureCredentials = mock(AzureCredentials.class); + when(azureCredentials.getDefaultResourceGroup()).thenReturn("resource"); + AzureComputeClient azureComputeClient = mock(AzureComputeClient.class); + when(azureCredentials.getComputeClient()).thenReturn(azureComputeClient); + + AzureManagedVMImage managedVMImage = new AzureManagedVMImage(); + + managedVMImage.setName(NAME); + managedVMImage.setResourceGroup(RESOURCEGROUP); + managedVMImage.setRegion(REGION); + managedVMImage.setOsType(OS_TYPE); + + when(azureComputeClient.getAllVMCustomImages(anyString(), anyString())) + .thenReturn(List.of(managedVMImage)); + AzureManagedImageCachingAgent region = + new AzureManagedImageCachingAgent( + new AzureCloudProvider(), "my-account", azureCredentials, REGION, MAPPER); + CacheResult cacheResult = region.loadData(mock(ProviderCache.class)); + + assertThat(cacheResult).isNotNull(); + Map> cacheResults = cacheResult.getCacheResults(); + assertThat(cacheResults) + .isNotEmpty() + .containsKey(Keys.Namespace.AZURE_MANAGEDIMAGES.toString()); + Collection cacheData = + cacheResults.get((Keys.Namespace.AZURE_MANAGEDIMAGES.toString())); + assertThat(cacheData).hasSize(1); + cacheData.forEach( + data -> { + AzureManagedVMImage cachedManagedImage = + MAPPER.convertValue(data.getAttributes().get("vmimage"), AzureManagedVMImage.class); + assertManagedImage(managedVMImage, cachedManagedImage); + assertThat(data.getRelationships()).isEmpty(); + }); + } + + private static void assertManagedImage( + AzureManagedVMImage actualManagedVMImage, AzureManagedVMImage cachedManagedImage) { + assertThat(cachedManagedImage.getName()).isEqualTo(actualManagedVMImage.getName()); + assertThat(cachedManagedImage.getRegion()).isEqualTo(actualManagedVMImage.getRegion()); + assertThat(cachedManagedImage.getOsType()).isEqualTo(actualManagedVMImage.getOsType()); + assertThat(cachedManagedImage.getResourceGroup()) + .isEqualTo(actualManagedVMImage.getResourceGroup()); + } +} diff --git a/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/view/AzureVMImageLookupControllerTest.java b/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/view/AzureVMImageLookupControllerTest.java new file mode 100644 index 00000000000..8fff54e6908 --- /dev/null +++ b/clouddriver-azure/src/test/java/com/netflix/spinnaker/clouddriver/azure/resources/vmimage/view/AzureVMImageLookupControllerTest.java @@ -0,0 +1,566 @@ +/* + * Copyright 2022 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.azure.resources.vmimage.view; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.*; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.CacheFilter; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.azure.AzureCloudProvider; +import com.netflix.spinnaker.clouddriver.azure.resources.common.cache.Keys; +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureCustomVMImage; +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureManagedVMImage; +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureNamedImage; +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.model.AzureVMImage; +import com.netflix.spinnaker.clouddriver.azure.resources.vmimage.view.AzureVMImageLookupController.LookupOptions; +import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; + +class AzureVMImageLookupControllerTest { + + public static final String REGION = "eastus"; + public static final String VM_IMAGE_NAME = "imagename"; + public static final String OS_TYPE = "windows"; + public static final String AZURE_ACCOUNT = "azure"; + public static final String RESOURCE_GROUP = "testgroup"; + public static final String NOT_AVAILABLE = "na"; + public static final String CUSTOM_IMAGE_PATH = "path/to/image"; + public static final String OFFER = "offer"; + public static final String SKU = "sku"; + public static final String PUBLISHER = "publisher"; + public static final String VERSION = "1"; + private Cache cache; + private DefaultAccountCredentialsProvider accountCredentialsProvider; + private static final AzureCloudProvider azureCloudProvider = new AzureCloudProvider(); + + private static final ObjectMapper objectMapper = new ObjectMapper(); + private AzureVMImageLookupController lookupController; + + @BeforeEach + public void setUp() { + this.cache = mock(Cache.class); + this.accountCredentialsProvider = mock(DefaultAccountCredentialsProvider.class); + lookupController = + new AzureVMImageLookupController( + this.accountCredentialsProvider, azureCloudProvider, this.cache, objectMapper); + } + + @AfterEach + public void tearDown() { + cache = null; + lookupController = null; + } + + @Test + @DisplayName("Should throw exception when no image type found") + void shouldThrowExceptionWhenNoImageFound() { + + // prepare + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyString())) + .willReturn(List.of()); + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()), anyString())) + .willReturn(List.of()); + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_VMIMAGES.getNs()), anyString())) + .willReturn(List.of()); + given(accountCredentialsProvider.getAll()).willReturn(Set.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_VMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of()); + + // act and assert + assertThatExceptionOfType(AzureVMImageLookupController.ImageNotFoundException.class) + .isThrownBy(() -> lookupController.getVMImage(AZURE_ACCOUNT, REGION, VM_IMAGE_NAME)) + .withMessage(VM_IMAGE_NAME + " not found in " + AZURE_ACCOUNT + "/" + REGION); + } + + @Test + @DisplayName( + "When custom flag is set to true and managed imaged to false it should only return VM custom images") + void shouldReturnCustomImage() { + + // prepare + LookupOptions lookupOptions = getLookupOptions(false, true, false); + + String key = Keys.getCustomVMImageKey(azureCloudProvider, AZURE_ACCOUNT, REGION, VM_IMAGE_NAME); + + Map azureImageAsJson = getVmCustomImageAsJsonMap(VM_IMAGE_NAME); + + CacheData c = new DefaultCacheData(key, Map.of("vmimage", azureImageAsJson), Map.of()); + + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyString())) + .willReturn(List.of(key)); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of(c)); + + // act + List list = lookupController.list(lookupOptions); + + // assert + ArgumentCaptor namespace = ArgumentCaptor.forClass(String.class); + assertThat(list).isNotEmpty().hasSize(1); + AzureNamedImage namedImage = list.get(0); + assertThat(namedImage) + .isNotNull() + .returns(VM_IMAGE_NAME, AzureNamedImage::getImageName) + .returns(REGION, AzureNamedImage::getRegion) + .returns(AZURE_ACCOUNT, AzureNamedImage::getAccount) + .returns(OS_TYPE, AzureNamedImage::getOstype) + .returns(true, AzureNamedImage::getIsCustom) + .returns(NOT_AVAILABLE, AzureNamedImage::getOffer) + .returns(NOT_AVAILABLE, AzureNamedImage::getSku) + .returns(NOT_AVAILABLE, AzureNamedImage::getVersion) + .returns(CUSTOM_IMAGE_PATH, AzureNamedImage::getUri); + + verify(cache, times(1)).getAll(namespace.capture(), anyList(), any(CacheFilter.class)); + List keyNamespace = namespace.getAllValues(); + assertThat(keyNamespace).containsOnly(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()); + } + + @Test + @DisplayName("Should Return only custom images if found in the cache") + void shouldReturnOnlyCustomImageWhenSearchedByAccountRegionAndName() { + + // prepare + + String key = Keys.getCustomVMImageKey(azureCloudProvider, AZURE_ACCOUNT, REGION, VM_IMAGE_NAME); + + Map azureImageAsJson = getVmCustomImageAsJsonMap(VM_IMAGE_NAME); + + CacheData c = new DefaultCacheData(key, Map.of("vmimage", azureImageAsJson), Map.of()); + + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyString())) + .willReturn(List.of(key)); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of(c)); + + // act + List list = lookupController.getVMImage(AZURE_ACCOUNT, REGION, VM_IMAGE_NAME); + + // assert + ArgumentCaptor namespace = ArgumentCaptor.forClass(String.class); + assertThat(list).isNotEmpty().hasSize(1); + AzureNamedImage namedImage = list.get(0); + assertThat(namedImage) + .isNotNull() + .returns(VM_IMAGE_NAME, AzureNamedImage::getImageName) + .returns(REGION, AzureNamedImage::getRegion) + .returns(AZURE_ACCOUNT, AzureNamedImage::getAccount) + .returns(OS_TYPE, AzureNamedImage::getOstype) + .returns(true, AzureNamedImage::getIsCustom) + .returns(NOT_AVAILABLE, AzureNamedImage::getOffer) + .returns(NOT_AVAILABLE, AzureNamedImage::getSku) + .returns(NOT_AVAILABLE, AzureNamedImage::getVersion) + .returns(CUSTOM_IMAGE_PATH, AzureNamedImage::getUri); + + verify(cache, times(1)).filterIdentifiers(anyString(), anyString()); + verify(cache, times(1)).getAll(namespace.capture(), anyList(), any(CacheFilter.class)); + List keyNamespace = namespace.getAllValues(); + assertThat(keyNamespace).containsOnly(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()); + } + + @Test + @DisplayName( + "When custom flag is set to false and managed imaged to false it should return VM custom images and images from yaml file if any defined") + void shouldReturnCustomImagesAndImagesConfiguredInYaml() { + + // prepare + LookupOptions lookupOptions = getLookupOptions(false, false, true); + + String key = Keys.getCustomVMImageKey(azureCloudProvider, AZURE_ACCOUNT, REGION, VM_IMAGE_NAME); + + Map azureImageAsJson = getVmCustomImageAsJsonMap(VM_IMAGE_NAME); + + CacheData c = new DefaultCacheData(key, Map.of("vmimage", azureImageAsJson), Map.of()); + + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyString())) + .willReturn(List.of(key)); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of(c)); + + given(accountCredentialsProvider.getAll()).willReturn(Set.of()); + // act + List list = lookupController.list(lookupOptions); + + // assert + ArgumentCaptor namespace = ArgumentCaptor.forClass(String.class); + assertThat(list).isNotEmpty().hasSize(1); + AzureNamedImage namedImage = list.get(0); + assertThat(namedImage) + .isNotNull() + .returns(VM_IMAGE_NAME, AzureNamedImage::getImageName) + .returns(REGION, AzureNamedImage::getRegion) + .returns(AZURE_ACCOUNT, AzureNamedImage::getAccount) + .returns(OS_TYPE, AzureNamedImage::getOstype) + .returns(true, AzureNamedImage::getIsCustom) + .returns(NOT_AVAILABLE, AzureNamedImage::getOffer) + .returns(NOT_AVAILABLE, AzureNamedImage::getSku) + .returns(NOT_AVAILABLE, AzureNamedImage::getVersion) + .returns(CUSTOM_IMAGE_PATH, AzureNamedImage::getUri); + + verify(this.accountCredentialsProvider, times(1)).getAll(); + verify(this.cache, times(1)).getAll(namespace.capture(), anyList(), any(CacheFilter.class)); + List keyNamespace = namespace.getAllValues(); + assertThat(keyNamespace).containsOnly(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()); + } + + @Test + @DisplayName( + "When custom flag is set to false and managed imaged to false it should return VM custom images, images from yaml file if any defined and Azure VM images if matched the filter") + void shouldReturnCustomImagesAndImagesConfiguredInYamlAndAzureVMImages() { + + // prepare + LookupOptions lookupOptions = getLookupOptions(false, false, false); + lookupOptions.setQ(OFFER + "-" + SKU); + + String key = Keys.getCustomVMImageKey(azureCloudProvider, AZURE_ACCOUNT, REGION, VM_IMAGE_NAME); + + String vmImageKey = + Keys.getVMImageKey( + azureCloudProvider, + AZURE_ACCOUNT, + REGION, + OFFER + "-" + SKU, + VERSION + "-" + PUBLISHER); + + Map azureCustomImageAsJson = getVmCustomImageAsJsonMap(OFFER + "-" + SKU); + + Map azureVmImageAsJson = getAzureVMImageAsJsonMap(); + + CacheData customImage = + new DefaultCacheData(key, Map.of("vmimage", azureCustomImageAsJson), Map.of()); + CacheData vmImage = + new DefaultCacheData(vmImageKey, Map.of("vmimage", azureVmImageAsJson), Map.of()); + + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyString())) + .willReturn(List.of(key)); + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_VMIMAGES.getNs()), anyString())) + .willReturn(List.of(vmImageKey)); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of(customImage)); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_VMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of(vmImage)); + + given(accountCredentialsProvider.getAll()).willReturn(Set.of()); + // act + List list = lookupController.list(lookupOptions); + + // assert + ArgumentCaptor namespace = ArgumentCaptor.forClass(String.class); + assertThat(list).isNotEmpty().hasSize(2); + AzureNamedImage namedImage = list.get(0); + assertThat(namedImage) + .isNotNull() + .returns(OFFER + "-" + SKU, AzureNamedImage::getImageName) + .returns(REGION, AzureNamedImage::getRegion) + .returns(AZURE_ACCOUNT, AzureNamedImage::getAccount) + .returns(OS_TYPE, AzureNamedImage::getOstype) + .returns(true, AzureNamedImage::getIsCustom) + .returns(NOT_AVAILABLE, AzureNamedImage::getOffer) + .returns(NOT_AVAILABLE, AzureNamedImage::getSku) + .returns(NOT_AVAILABLE, AzureNamedImage::getVersion) + .returns(CUSTOM_IMAGE_PATH, AzureNamedImage::getUri); + + AzureNamedImage resultVmImage = list.get(1); + assertThat(resultVmImage) + .isNotNull() + .returns( + OFFER + "-" + SKU + "(" + PUBLISHER + "_" + VERSION + ")", + AzureNamedImage::getImageName) + .returns(REGION, AzureNamedImage::getRegion) + .returns(AZURE_ACCOUNT, AzureNamedImage::getAccount) + .returns(NOT_AVAILABLE, AzureNamedImage::getOstype) + .returns(false, AzureNamedImage::getIsCustom) + .returns(OFFER, AzureNamedImage::getOffer) + .returns(SKU, AzureNamedImage::getSku) + .returns(VERSION, AzureNamedImage::getVersion) + .returns(NOT_AVAILABLE, AzureNamedImage::getUri); + + verify(this.accountCredentialsProvider, times(1)).getAll(); + verify(this.cache, times(2)).filterIdentifiers(anyString(), anyString()); + verify(this.cache, times(2)).getAll(namespace.capture(), anyList(), any(CacheFilter.class)); + List keyNamespace = namespace.getAllValues(); + assertThat(keyNamespace) + .containsOnly( + Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs(), Keys.Namespace.AZURE_VMIMAGES.getNs()); + } + + @Test + @DisplayName( + "Should return only AzureVMImages when searched by account, region and name AND no custom/managed/yaml images found") + void shouldReturnOnlyAzureVMImages() { + + // prepare + + String vmImageKey = + Keys.getVMImageKey( + azureCloudProvider, + AZURE_ACCOUNT, + REGION, + OFFER + "-" + SKU, + VERSION + "-" + PUBLISHER); + + Map azureVmImageAsJson = getAzureVMImageAsJsonMap(); + + CacheData vmImage = + new DefaultCacheData(vmImageKey, Map.of("vmimage", azureVmImageAsJson), Map.of()); + + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyString())) + .willReturn(List.of()); + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()), anyString())) + .willReturn(List.of()); + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_VMIMAGES.getNs()), anyString())) + .willReturn(List.of(vmImageKey)); + given(accountCredentialsProvider.getAll()).willReturn(Set.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_VMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of(vmImage)); + + given(accountCredentialsProvider.getAll()).willReturn(Set.of()); + // act + List list = + lookupController.getVMImage(AZURE_ACCOUNT, REGION, OFFER + "-" + SKU); + + // assert + ArgumentCaptor namespace = ArgumentCaptor.forClass(String.class); + assertThat(list).isNotEmpty().hasSize(1); + + AzureNamedImage resultVmImage = list.get(0); + assertThat(resultVmImage) + .isNotNull() + .returns( + OFFER + "-" + SKU + "(" + PUBLISHER + "_" + VERSION + ")", + AzureNamedImage::getImageName) + .returns(REGION, AzureNamedImage::getRegion) + .returns(AZURE_ACCOUNT, AzureNamedImage::getAccount) + .returns(NOT_AVAILABLE, AzureNamedImage::getOstype) + .returns(false, AzureNamedImage::getIsCustom) + .returns(OFFER, AzureNamedImage::getOffer) + .returns(SKU, AzureNamedImage::getSku) + .returns(VERSION, AzureNamedImage::getVersion) + .returns(NOT_AVAILABLE, AzureNamedImage::getUri); + + verify(this.accountCredentialsProvider, times(1)).getAll(); + verify(this.cache, times(3)).filterIdentifiers(anyString(), anyString()); + verify(this.cache, times(3)).getAll(namespace.capture(), anyList(), any(CacheFilter.class)); + List keyNamespace = namespace.getAllValues(); + assertThat(keyNamespace) + .containsOnly( + Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs(), + Keys.Namespace.AZURE_VMIMAGES.getNs(), + Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()); + } + + @Test + @DisplayName( + "When managed image flag is set to true should return the AzureManagedImage if it is available in the cache") + void shouldReturnManagedImage() { + + // prepare + LookupOptions lookupOptions = getLookupOptions(true, true, false); + + String key = + Keys.getManagedVMImageKey( + azureCloudProvider, AZURE_ACCOUNT, REGION, RESOURCE_GROUP, VM_IMAGE_NAME, OS_TYPE); + + Map objectAsMap = getManagedImageAsJsonMap(); + + CacheData c = new DefaultCacheData(key, Map.of("vmimage", objectAsMap), Map.of()); + + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyString())) + .willReturn(List.of()); + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()), anyString())) + .willReturn(List.of(key)); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of(c)); + + // act + List list = lookupController.list(lookupOptions); + + // assert + ArgumentCaptor namespace = ArgumentCaptor.forClass(String.class); + assertThat(list).isNotEmpty().hasSize(1); + AzureNamedImage namedImage = list.get(0); + assertThat(namedImage) + .isNotNull() + .returns(VM_IMAGE_NAME, AzureNamedImage::getImageName) + .returns(REGION, AzureNamedImage::getRegion) + .returns(AZURE_ACCOUNT, AzureNamedImage::getAccount) + .returns(OS_TYPE, AzureNamedImage::getOstype) + .returns(true, AzureNamedImage::getIsCustom) + .returns(NOT_AVAILABLE, AzureNamedImage::getOffer) + .returns(NOT_AVAILABLE, AzureNamedImage::getSku) + .returns(NOT_AVAILABLE, AzureNamedImage::getVersion) + .returns(NOT_AVAILABLE, AzureNamedImage::getUri); + + verify(cache, times(2)).getAll(namespace.capture(), anyList(), any(CacheFilter.class)); + List keyNamespace = namespace.getAllValues(); + assertThat(keyNamespace) + .containsOnly( + Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs(), + Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()); + } + + @Test + @DisplayName( + "Should return managed images when searched by account, region and name AND no custom image and no yaml image configured") + void shouldReturnOnlyManagedImageWhenNoCustomAndYamlImagesConfigured() { + + // prepare + + String key = + Keys.getManagedVMImageKey( + azureCloudProvider, AZURE_ACCOUNT, REGION, RESOURCE_GROUP, VM_IMAGE_NAME, OS_TYPE); + + Map objectAsMap = getManagedImageAsJsonMap(); + + CacheData c = new DefaultCacheData(key, Map.of("vmimage", objectAsMap), Map.of()); + + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyString())) + .willReturn(List.of()); + given(cache.filterIdentifiers(eq(Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()), anyString())) + .willReturn(List.of(key)); + given(accountCredentialsProvider.getAll()).willReturn(Set.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of()); + given( + cache.getAll( + eq(Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()), anyList(), any(CacheFilter.class))) + .willReturn(List.of(c)); + + // act + List list = lookupController.getVMImage(AZURE_ACCOUNT, REGION, VM_IMAGE_NAME); + + // assert + ArgumentCaptor namespace = ArgumentCaptor.forClass(String.class); + assertThat(list).isNotEmpty().hasSize(1); + AzureNamedImage namedImage = list.get(0); + assertThat(namedImage) + .isNotNull() + .returns(VM_IMAGE_NAME, AzureNamedImage::getImageName) + .returns(REGION, AzureNamedImage::getRegion) + .returns(AZURE_ACCOUNT, AzureNamedImage::getAccount) + .returns(OS_TYPE, AzureNamedImage::getOstype) + .returns(true, AzureNamedImage::getIsCustom) + .returns(NOT_AVAILABLE, AzureNamedImage::getOffer) + .returns(NOT_AVAILABLE, AzureNamedImage::getSku) + .returns(NOT_AVAILABLE, AzureNamedImage::getVersion) + .returns(NOT_AVAILABLE, AzureNamedImage::getUri); + + verify(cache, times(2)).filterIdentifiers(anyString(), anyString()); + verify(cache, times(2)).getAll(namespace.capture(), anyList(), any(CacheFilter.class)); + List keyNamespace = namespace.getAllValues(); + assertThat(keyNamespace) + .containsOnly( + Keys.Namespace.AZURE_CUSTOMVMIMAGES.getNs(), + Keys.Namespace.AZURE_MANAGEDIMAGES.getNs()); + } + + private static Map getManagedImageAsJsonMap() { + var namedImage = new AzureManagedVMImage(); + namedImage.setResourceGroup(RESOURCE_GROUP); + namedImage.setRegion(REGION); + namedImage.setOsType(OS_TYPE); + namedImage.setName(VM_IMAGE_NAME); + return objectMapper.convertValue(namedImage, new TypeReference<>() {}); + } + + private static Map getVmCustomImageAsJsonMap(String name) { + var namedImage = new AzureCustomVMImage(); + namedImage.setUri(CUSTOM_IMAGE_PATH); + namedImage.setRegion(REGION); + namedImage.setOsType(OS_TYPE); + namedImage.setName(name); + return objectMapper.convertValue(namedImage, new TypeReference<>() {}); + } + + private static Map getAzureVMImageAsJsonMap() { + return objectMapper.convertValue(getAzureVmImage(), new TypeReference<>() {}); + } + + @NotNull + private static LookupOptions getLookupOptions( + boolean managedImage, boolean customOnly, boolean configOnly) { + var lookupOptions = new LookupOptions(); + lookupOptions.setAccount(AZURE_ACCOUNT); + lookupOptions.setRegion(REGION); + lookupOptions.setManagedImages(managedImage); + lookupOptions.setCustomOnly(customOnly); + lookupOptions.setConfigOnly(configOnly); + return lookupOptions; + } + + private static AzureVMImage getAzureVmImage() { + AzureVMImage azureVMImage = new AzureVMImage(); + azureVMImage.setOffer(OFFER); + azureVMImage.setSku(SKU); + azureVMImage.setPublisher(PUBLISHER); + azureVMImage.setVersion(VERSION); + return azureVMImage; + } +} diff --git a/clouddriver-bom/clouddriver-bom.gradle b/clouddriver-bom/clouddriver-bom.gradle new file mode 100644 index 00000000000..ff6cd417fc5 --- /dev/null +++ b/clouddriver-bom/clouddriver-bom.gradle @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +apply plugin: "java-platform" +apply plugin: "maven-publish" + +javaPlatform { + allowDependencies() +} + +dependencies { + api(enforcedPlatform("io.spinnaker.kork:kork-bom:$korkVersion")) + + constraints { + api("io.spinnaker.fiat:fiat-api:$fiatVersion") + api("io.spinnaker.fiat:fiat-core:$fiatVersion") + + rootProject + .subprojects + .findAll { it != project } + .each { api(project(it.path)) } + } +} diff --git a/clouddriver-cloudfoundry/clouddriver-cloudfoundry.gradle b/clouddriver-cloudfoundry/clouddriver-cloudfoundry.gradle index 07a35f3e588..d045a3c8a6f 100644 --- a/clouddriver-cloudfoundry/clouddriver-cloudfoundry.gradle +++ b/clouddriver-cloudfoundry/clouddriver-cloudfoundry.gradle @@ -1,22 +1,57 @@ -test { - useJUnitPlatform() -} +apply plugin: 'com.google.protobuf' -dependencies { - compile project(":clouddriver-artifacts") - compile project(":clouddriver-core") +ext { + protobufVersion = '3.21.12' +} - compile spinnaker.dependency('frigga') - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:$protobufVersion" + } +} - compile spinnaker.dependency('korkArtifacts') - compile spinnaker.dependency('lombok') +dependencies { + implementation project(":clouddriver-artifacts") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + implementation project(":cats:cats-core") + implementation project(":clouddriver-docker") - spinnaker.group('retrofitDefault') + implementation "org.apache.groovy:groovy" - testCompile('org.junit.jupiter:junit-jupiter-api:5.2.0') - testRuntime('org.junit.jupiter:junit-jupiter-engine:5.2.0') + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "org.apache.commons:commons-lang3" + implementation "com.fasterxml.jackson.datatype:jackson-datatype-jsr310" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml" + implementation "com.github.ben-manes.caffeine:guava" + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-moniker" + implementation "com.netflix.spectator:spectator-api" + implementation "com.squareup.okhttp3:okhttp" + implementation "com.squareup.okhttp3:okhttp-urlconnection" + implementation "com.squareup.retrofit2:converter-jackson" + implementation "com.squareup.retrofit2:retrofit" + implementation "com.squareup.retrofit2:retrofit-mock" + implementation "com.squareup.retrofit2:converter-protobuf:2.8.1" + implementation "commons-io:commons-io" + implementation "io.github.resilience4j:resilience4j-retry" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.springframework.cloud:spring-cloud-context" + implementation "org.yaml:snakeyaml" + implementation "com.google.protobuf:protobuf-java" + implementation "com.google.protobuf:protobuf-java-util" + implementation "commons-fileupload:commons-fileupload:1.4" + implementation "io.micrometer:micrometer-core:1.6.4" - compile "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${spinnaker.version("jackson")}" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.junit.jupiter:junit-jupiter-params" + testImplementation "org.mockito:mockito-core" + testImplementation "com.github.tomakehurst:wiremock-jre8-standalone" + testImplementation "ru.lanwen.wiremock:wiremock-junit5:1.2.0" } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/CloudFoundryCloudProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/CloudFoundryCloudProvider.java index fb77de44ee8..2e221aa5ad3 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/CloudFoundryCloudProvider.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/CloudFoundryCloudProvider.java @@ -17,9 +17,8 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry; import com.netflix.spinnaker.clouddriver.core.CloudProvider; -import org.springframework.stereotype.Component; - import java.lang.annotation.Annotation; +import org.springframework.stereotype.Component; @Component public class CloudFoundryCloudProvider implements CloudProvider { @@ -39,4 +38,4 @@ public String getDisplayName() { public Class getOperationAnnotationType() { return CloudFoundryOperation.class; } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/CloudFoundryOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/CloudFoundryOperation.java index de4a3ebf844..0265a8af15b 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/CloudFoundryOperation.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/CloudFoundryOperation.java @@ -23,6 +23,6 @@ @Target(ElementType.TYPE) @Retention(RetentionPolicy.RUNTIME) -@interface CloudFoundryOperation { +public @interface CloudFoundryOperation { String value(); } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/artifacts/CloudFoundryArtifactCredentials.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/artifacts/CloudFoundryArtifactCredentials.java new file mode 100644 index 00000000000..d10aeddbbe1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/artifacts/CloudFoundryArtifactCredentials.java @@ -0,0 +1,54 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.artifacts; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.InputStream; +import javax.annotation.Nonnull; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@AllArgsConstructor +@Getter +public class CloudFoundryArtifactCredentials implements ArtifactCredentials { + public static final String ARTIFACTS_TYPE = "artifacts/cloudfoundry"; + public static final String TYPE = "cloudfoundry/app"; + + private final String name = "cloudfoundry"; + private final CloudFoundryClient client; + + @Override + @Nonnull + public ImmutableList getTypes() { + return ImmutableList.of(TYPE); + } + + @Override + @Nonnull + public InputStream download(@Nonnull Artifact artifact) { + String packageId = client.getApplications().findCurrentPackageIdByAppId(artifact.getUuid()); + return client.getApplications().downloadPackageBits(packageId); + } + + @Override + public String getType() { + return ARTIFACTS_TYPE; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/CacheRepository.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/CacheRepository.java new file mode 100644 index 00000000000..95c7685fea5 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/CacheRepository.java @@ -0,0 +1,285 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.cache; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.*; +import static java.util.Collections.emptySet; +import static java.util.stream.Collectors.toSet; + +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.fasterxml.jackson.databind.introspect.AnnotatedClass; +import com.fasterxml.jackson.databind.introspect.JacksonAnnotationIntrospector; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.*; +import java.util.Collection; +import java.util.Optional; +import java.util.Set; +import org.springframework.stereotype.Repository; + +@Repository +public class CacheRepository { + private final ObjectMapper objectMapper = + new ObjectMapper().disable(MapperFeature.DEFAULT_VIEW_INCLUSION); + + private final Cache cacheView; + + public CacheRepository(Cache cacheView) { + this.cacheView = cacheView; + this.objectMapper + .setConfig(objectMapper.getSerializationConfig().withView(Views.Cache.class)) + .setAnnotationIntrospector( + new JacksonAnnotationIntrospector() { + @Override + public JsonPOJOBuilder.Value findPOJOBuilderConfig(AnnotatedClass ac) { + return new JsonPOJOBuilder.Value("build", ""); + } + }); + } + + public Set findSpacesByAccount(String account) { + return cacheView + .getAll( + SPACES.getNs(), + cacheView.filterIdentifiers(SPACES.getNs(), Keys.getAllSpacesKey(account))) + .stream() + .map( + spaceData -> + objectMapper.convertValue( + spaceData.getAttributes().get("resource"), CloudFoundrySpace.class)) + .collect(toSet()); + } + + public Set findApplicationsByKeys( + Collection keys, Detail detail) { + return cacheView.getAll(APPLICATIONS.getNs(), keys, detail.appFilter()).stream() + .map(appData -> applicationFromCacheData(appData, detail)) + .collect(toSet()); + } + + public Optional findApplicationByKey(String key, Detail detail) { + return Optional.ofNullable(cacheView.get(APPLICATIONS.getNs(), key, detail.appFilter())) + .map(appData -> applicationFromCacheData(appData, detail)); + } + + private CloudFoundryApplication applicationFromCacheData(CacheData appData, Detail detail) { + CloudFoundryApplication app = + objectMapper.convertValue( + appData.getAttributes().get("resource"), CloudFoundryApplication.class); + if (detail.equals(Detail.NONE)) { + return app.withClusters(emptySet()); + } + return app.withClusters( + findClustersByKeys(appData.getRelationships().get(CLUSTERS.getNs()), Detail.NONE)); + } + + public Set findClustersByKeys(Collection keys, Detail detail) { + return cacheView.getAll(CLUSTERS.getNs(), keys, detail.clusterFilter()).stream() + .map(clusterData -> clusterFromCacheData(clusterData, detail)) + .collect(toSet()); + } + + public Optional findClusterByKey(String key, Detail detail) { + return Optional.ofNullable(cacheView.get(CLUSTERS.getNs(), key, detail.clusterFilter())) + .map(clusterData -> clusterFromCacheData(clusterData, detail)); + } + + private CloudFoundryCluster clusterFromCacheData(CacheData clusterData, Detail detail) { + CloudFoundryCluster cluster = + objectMapper.convertValue( + clusterData.getAttributes().get("resource"), CloudFoundryCluster.class); + if (detail.equals(Detail.NONE)) { + return cluster.withServerGroups(emptySet()); + } + return cluster.withServerGroups( + findServerGroupsByKeys( + clusterData.getRelationships().get(SERVER_GROUPS.getNs()), detail.deep())); + } + + public Set findServerGroupsByKeys( + Collection keys, Detail detail) { + return cacheView.getAll(SERVER_GROUPS.getNs(), keys, detail.serverGroupFilter()).stream() + .map(serverGroupData -> serverGroupFromCacheData(serverGroupData, detail)) + .collect(toSet()); + } + + public Optional findServerGroupByKey(String key, Detail detail) { + return Optional.ofNullable( + cacheView.get(SERVER_GROUPS.getNs(), key, detail.serverGroupFilter())) + .map(serverGroupData -> serverGroupFromCacheData(serverGroupData, detail)); + } + + private CloudFoundryServerGroup serverGroupFromCacheData( + CacheData serverGroupData, Detail detail) { + CloudFoundryServerGroup serverGroup = + objectMapper.convertValue( + serverGroupData.getAttributes().get("resource"), CloudFoundryServerGroup.class); + if (detail.equals(Detail.NONE)) { + return serverGroup + .withLoadBalancerNames(emptySet()) + .withInstances( + findInstancesByKeys(serverGroupData.getRelationships().get(INSTANCES.getNs()))); + } + return serverGroup + .withLoadBalancerNames( + findLoadBalancersByKeys( + serverGroupData.getRelationships().get(LOAD_BALANCERS.getNs()), Detail.NONE) + .stream() + .map(CloudFoundryLoadBalancer::getName) + .collect(toSet())) + .withInstances( + findInstancesByKeys(serverGroupData.getRelationships().get(INSTANCES.getNs()))); + } + + public Set findLoadBalancersByKeys( + Collection keys, Detail detail) { + return cacheView.getAll(LOAD_BALANCERS.getNs(), keys, detail.loadBalancerFilter()).stream() + .map(lbData -> loadBalancerFromCacheData(lbData, detail)) + .collect(toSet()); + } + + private CloudFoundryLoadBalancer loadBalancerFromCacheData(CacheData lbData, Detail detail) { + CloudFoundryLoadBalancer loadBalancer = + objectMapper.convertValue( + lbData.getAttributes().get("resource"), CloudFoundryLoadBalancer.class); + if (detail.equals(Detail.NONE)) { + return loadBalancer; + } + + // the server groups populated here will have an empty load balancer names set to avoid a cyclic + // call back to findLoadBalancersByKeys + return loadBalancer.withMappedApps( + findServerGroupsByKeys(lbData.getRelationships().get(SERVER_GROUPS.getNs()), Detail.NONE)); + } + + public Set findLoadBalancersByClusterKeys( + Collection keys, Detail detail) { + Set serverGroupKeys = + cacheView.getAll(CLUSTERS.getNs(), keys).stream() + .flatMap(cl -> cl.getRelationships().get(SERVER_GROUPS.getNs()).stream()) + .collect(toSet()); + + Set loadBalancerKeys = + cacheView.getAll(SERVER_GROUPS.getNs(), serverGroupKeys).stream() + .flatMap( + sg -> + sg.getRelationships().get(LOAD_BALANCERS.getNs()).stream() + .map( + lb -> + Keys.getLoadBalancerKey( + objectMapper + .convertValue( + sg.getAttributes().get("resource"), + CloudFoundryServerGroup.class) + .getAccount(), + lb))) + .collect(toSet()); + + return findLoadBalancersByKeys( + loadBalancerKeys.stream() + .flatMap(lb -> cacheView.filterIdentifiers(LOAD_BALANCERS.getNs(), lb).stream()) + .collect(toSet()), + detail); + } + + public Set findInstancesByKeys(Collection keys) { + return cacheView.getAll(INSTANCES.getNs(), keys).stream() + .map( + instanceData -> + objectMapper.convertValue( + instanceData.getAttributes().get("resource"), CloudFoundryInstance.class)) + .collect(toSet()); + } + + public Optional findInstanceByKey(String key) { + return Optional.ofNullable(cacheView.get(INSTANCES.getNs(), key)) + .map( + instanceData -> + objectMapper.convertValue( + instanceData.getAttributes().get("resource"), CloudFoundryInstance.class)); + } + + public enum Detail { + /** Don't deserialize any relationships. */ + NONE, + + /** Only deserialize names. */ + NAMES_ONLY, + + /** Fully rehydrate the model. */ + FULL; + + public Detail deep() { + switch (this) { + case FULL: + return FULL; + case NAMES_ONLY: + case NONE: + default: + return NONE; + } + } + + public RelationshipCacheFilter appFilter() { + switch (this) { + case FULL: + case NAMES_ONLY: + return RelationshipCacheFilter.include(CLUSTERS.getNs()); + case NONE: + default: + return RelationshipCacheFilter.none(); + } + } + + public RelationshipCacheFilter clusterFilter() { + switch (this) { + case FULL: + case NAMES_ONLY: + return RelationshipCacheFilter.include(SERVER_GROUPS.getNs()); + case NONE: + default: + return RelationshipCacheFilter.none(); + } + } + + public RelationshipCacheFilter serverGroupFilter() { + switch (this) { + case FULL: + case NAMES_ONLY: + return RelationshipCacheFilter.include(INSTANCES.getNs(), LOAD_BALANCERS.getNs()); + case NONE: + default: + // we always populate instance data on server groups, regardless of detail level + return RelationshipCacheFilter.include(INSTANCES.getNs()); + } + } + + public RelationshipCacheFilter loadBalancerFilter() { + switch (this) { + case FULL: + case NAMES_ONLY: + return RelationshipCacheFilter.include(SERVER_GROUPS.getNs()); + case NONE: + default: + return RelationshipCacheFilter.none(); + } + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/Keys.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/Keys.java index 164aee99cb2..7975c51efa2 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/Keys.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/Keys.java @@ -16,27 +16,27 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.cache; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider.ID; +import static java.util.Collections.emptyMap; + import com.netflix.spinnaker.clouddriver.cache.KeyParser; import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; -import org.springframework.stereotype.Component; - -import javax.annotation.Nullable; import java.util.Arrays; import java.util.HashMap; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider.ID; -import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.*; +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import lombok.Getter; +import org.springframework.stereotype.Component; @Component("CloudFoundryInfraKeys") public class Keys implements KeyParser { - - @Nullable - public static Map parse(String key) { + public static Optional> parse(String key) { String[] parts = key.split(":"); if (parts.length < 2 || !parts[0].equals(ID)) { - return null; + return Optional.empty(); } Map result = new HashMap<>(); @@ -65,67 +65,121 @@ public static Map parse(String key) { result.put("name", parts[4]); } else if (Namespace.SERVER_GROUPS.ns.equals(type)) { result.put("account", parts[2]); - result.put("name", parts[3]); + result.put("serverGroup", parts[3]); result.put("region", parts[4]); } else { - return null; + return Optional.empty(); } - return result; + return Optional.of(result); } public static String getApplicationKey(String app) { - return ID + ":" + APPLICATIONS + ":" + app; + return ID + ":" + Namespace.APPLICATIONS + ":" + app.toLowerCase(); + } + + public static String getSpaceKey(String account, String region) { + return ID + ":" + Namespace.SPACES + ":" + account + ":" + region; + } + + public static String getAllSpacesKey(String account) { + return ID + ":" + Namespace.SPACES + ":" + account + ":*"; } public static String getAllLoadBalancers() { - return ID + ":" + LOAD_BALANCERS + ":*"; + return ID + ":" + Namespace.LOAD_BALANCERS + ":*"; } public static String getLoadBalancerKey(String account, CloudFoundryLoadBalancer lb) { - return ID + - ":" + LOAD_BALANCERS + - ":" + account + - ":" + lb.getId() + - ":" + (lb.getHost() != null ? lb.getHost() : "") + - ":" + lb.getDomain().getName() + - ":" + (lb.getPath() != null ? lb.getPath() : "") + - ":" + (lb.getPort() != null ? lb.getPort() : -1) + - ":" + lb.getRegion(); + return ID + + ":" + + Namespace.LOAD_BALANCERS + + ":" + + account + + ":" + + lb.getId() + + ":" + + (lb.getHost() != null ? lb.getHost() : "") + + ":" + + lb.getDomain().getName() + + ":" + + (lb.getPath() != null ? lb.getPath() : "") + + ":" + + (lb.getPort() != null ? lb.getPort() : -1) + + ":" + + lb.getRegion(); + } + + public static String getLoadBalancerKey(String account, String guid) { + return ID + ":" + Namespace.LOAD_BALANCERS + ":" + account + ":" + guid + ":*"; + } + + public static String getLoadBalancerKey(String account, String uri, String region) { + Pattern VALID_ROUTE_REGEX = + Pattern.compile("^([a-zA-Z0-9_-]+)\\.([a-zA-Z0-9_.-]+)(:[0-9]+)?([/a-zA-Z0-9_-]+)?$"); + Matcher matcher = VALID_ROUTE_REGEX.matcher(uri); + if (matcher.find()) { + String host = Optional.ofNullable(matcher.group(1)).orElse("*"); + String domain = Optional.ofNullable(matcher.group(2)).orElse("*"); + String port = Optional.ofNullable(matcher.group(3)).orElse("-1"); + String path = Optional.ofNullable(matcher.group(4)).orElse(""); + return ID + + ":" + + Namespace.LOAD_BALANCERS + + ":" + + account + + ":*:" + + host + + ":" + + domain + + ":" + + path + + ":" + + port + + ":" + + region; + } else { + return null; + } } public static String getClusterKey(String account, String app, String name) { - return ID + - ":" + CLUSTERS + - ":" + account + - ":" + app + - ":" + name; + return ID + + ":" + + Namespace.CLUSTERS + + ":" + + account + + ":" + + app.toLowerCase() + + ":" + + name.toLowerCase(); } public static String getServerGroupKey(String account, String name, String region) { - return ID + - ":" + SERVER_GROUPS + - ":" + account + - ":" + name + - ":" + region; + return ID + + ":" + + Namespace.SERVER_GROUPS + + ":" + + account + + ":" + + name.toLowerCase() + + ":" + + region; } public static String getInstanceKey(String account, String instanceName) { - return ID + - ":" + INSTANCES + - ":" + account + - ":" + instanceName; + return ID + ":" + Namespace.INSTANCES + ":" + account + ":" + instanceName; } @Override public String getCloudProvider() { - // This is intentionally 'aws'. See in todos in SearchController#search for why. + // This is intentionally 'aws'. Refer to todos in SearchController#search for why. return "aws"; } @Override public Map parseKey(String key) { - return parse(key); + return parse(key).orElse(emptyMap()); } @Override @@ -138,12 +192,15 @@ public Boolean canParseField(String field) { return false; } - enum Namespace { + @Getter + public enum Namespace { APPLICATIONS("applications"), - LOAD_BALANCERS("loadBalancers"), CLUSTERS("clusters"), + INSTANCES("instances"), + LOAD_BALANCERS("loadBalancers"), + ON_DEMAND("onDemand"), SERVER_GROUPS("serverGroups"), - INSTANCES("instances"); + SPACES("spaces"); final String ns; diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/ResourceCacheData.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/ResourceCacheData.java index a3c3d72690b..8b236d48d7d 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/ResourceCacheData.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/ResourceCacheData.java @@ -16,26 +16,29 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.cache; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.spinnaker.cats.cache.CacheData; -import lombok.Getter; - import java.util.Collection; -import java.util.HashMap; import java.util.Map; +import lombok.EqualsAndHashCode; +import lombok.Getter; @Getter -class ResourceCacheData implements CacheData { +@EqualsAndHashCode +public class ResourceCacheData implements CacheData { final String id; final Map> relationships; final Map attributes; final int ttlSeconds = -1; - ResourceCacheData(String id, Object resource, Map> relationships) { + @JsonCreator + public ResourceCacheData( + @JsonProperty("id") String id, + @JsonProperty("attributes") Map attributes, + @JsonProperty("relationships") Map> relationships) { this.id = id; - - this.attributes = new HashMap<>(); - this.attributes.put("resource", resource); - + this.attributes = attributes; this.relationships = relationships; } } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Applications.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Applications.java new file mode 100644 index 00000000000..dc672ae0423 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Applications.java @@ -0,0 +1,754 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.*; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ErrorDescription.Code.NOT_AUTHORIZED; +import static java.util.Arrays.asList; +import static java.util.Collections.*; +import static java.util.Optional.ofNullable; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ApplicationService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ApplicationEnv; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.MapRoute; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceBinding; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Package; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Process; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.*; +import com.netflix.spinnaker.clouddriver.helpers.AbstractServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import java.io.File; +import java.io.InputStream; +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.extern.slf4j.Slf4j; +import okhttp3.MediaType; +import okhttp3.MultipartBody; +import okhttp3.RequestBody; +import okhttp3.ResponseBody; +import org.apache.commons.lang3.StringUtils; + +@Slf4j +public class Applications { + private final String account; + private final String appsManagerUri; + private final String metricsUri; + private final ApplicationService api; + private final Spaces spaces; + private final Processes processes; + private final Integer resultsPerPage; + private final boolean onlySpinnakerManaged; + private final ForkJoinPool forkJoinPool; + private final LoadingCache serverGroupCache; + + public Applications( + String account, + String appsManagerUri, + String metricsUri, + ApplicationService api, + Spaces spaces, + Processes processes, + Integer resultsPerPage, + boolean onlySpinnakerManaged, + ForkJoinPool forkJoinPool, + CloudFoundryConfigurationProperties.LocalCacheConfig localCacheConfig) { + this.account = account; + this.appsManagerUri = appsManagerUri; + this.metricsUri = metricsUri; + this.api = api; + this.spaces = spaces; + this.processes = processes; + this.resultsPerPage = resultsPerPage; + this.onlySpinnakerManaged = onlySpinnakerManaged; + this.forkJoinPool = forkJoinPool; + + CacheBuilder builder = CacheBuilder.newBuilder(); + if (localCacheConfig.getApplicationsAccessExpirySeconds() >= 0) { + builder.expireAfterAccess( + localCacheConfig.getApplicationsAccessExpirySeconds(), TimeUnit.SECONDS); + } + if (localCacheConfig.getApplicationsWriteExpirySeconds() >= 0) { + builder.expireAfterWrite( + localCacheConfig.getApplicationsWriteExpirySeconds(), TimeUnit.SECONDS); + } + + this.serverGroupCache = + builder.build( + new CacheLoader<>() { + @Override + public CloudFoundryServerGroup load(@Nonnull String guid) + throws ResourceNotFoundException { + return safelyCall(() -> api.findById(guid)) + .map(Applications.this::map) + .flatMap(sg -> sg) + .orElseThrow(ResourceNotFoundException::new); + } + }); + } + + @Nullable + public CloudFoundryServerGroup findById(String guid) { + try { + return serverGroupCache.get(guid); + } catch (ExecutionException e) { + if (e.getCause() instanceof ResourceNotFoundException) { + return null; + } + throw new CloudFoundryApiException(e.getCause(), "Unable to find server group by id"); + } + } + + public List all(List spaceGuids) { + log.debug("Listing all applications from account {}", this.account); + + String spaceGuidsQ = + spaceGuids == null || spaceGuids.isEmpty() ? null : String.join(",", spaceGuids); + + List newCloudFoundryAppList = + collectPages("applications", page -> api.all(page, resultsPerPage, null, spaceGuidsQ)); + + log.debug( + "Fetched {} total apps from foundation account {}", + newCloudFoundryAppList.size(), + this.account); + + List cacheableApplications = + newCloudFoundryAppList.stream() + .filter(this::shouldCacheApplication) + .collect(Collectors.toUnmodifiableList()); + + List availableAppIds = + cacheableApplications.stream().map(Application::getGuid).collect(toList()); + + long invalidatedServerGroups = + serverGroupCache.asMap().keySet().parallelStream() + .filter(appGuid -> !availableAppIds.contains(appGuid)) + .peek(appGuid -> log.trace("Evicting the following SG with id '{}'", appGuid)) + .peek(serverGroupCache::invalidate) + .count(); + + log.debug( + "Evicted {} serverGroups from the cache that aren't on the '{}' foundation anymore", + invalidatedServerGroups, + this.account); + + // if the update time doesn't match then we need to update the cache + // if the app is not found in the cache we need to process with `map` and update the cache + try { + forkJoinPool + .submit( + () -> + cacheableApplications.parallelStream() + .filter( + app -> { + CloudFoundryServerGroup cachedApp = findById(app.getGuid()); + if (cachedApp != null) { + if (!cachedApp + .getUpdatedTime() + .equals(app.getUpdatedAt().toInstant().toEpochMilli())) { + log.trace( + "App '{}' cached version is out of date on foundation '{}'", + app.getName(), + this.account); + return true; + } else { + return false; + } + } else { + log.trace( + "App '{}' not found in cache for foundation '{}'", + app.getName(), + this.account); + return true; + } + }) + .map(this::map) + .filter(Optional::isPresent) + .map(Optional::get) + .forEach(sg -> serverGroupCache.put(sg.getId(), sg))) + .get(); + + forkJoinPool + .submit( + () -> + // execute health check on instances, set number of available instances and health + // status + cacheableApplications.parallelStream() + .forEach( + a -> + serverGroupCache.put( + a.getGuid(), checkHealthStatus(findById(a.getGuid()), a)))) + .get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + Map> serverGroupsByClusters = new HashMap<>(); + Map> clustersByApps = new HashMap<>(); + + for (CloudFoundryServerGroup serverGroup : serverGroupCache.asMap().values()) { + Names names = Names.parseName(serverGroup.getName()); + + serverGroupsByClusters + .computeIfAbsent(names.getCluster(), clusterName -> new HashSet<>()) + .add(serverGroup); + + clustersByApps + .computeIfAbsent(names.getApp(), appName -> new HashSet<>()) + .add(names.getCluster()); + } + + return clustersByApps.entrySet().stream() + .map( + clustersByApp -> + CloudFoundryApplication.builder() + .name(clustersByApp.getKey()) + .clusters( + clustersByApp.getValue().stream() + .map( + clusterName -> + CloudFoundryCluster.builder() + .accountName(account) + .name(clusterName) + .serverGroups(serverGroupsByClusters.get(clusterName)) + .build()) + .collect(toSet())) + .build()) + .collect(toList()); + } + + @Nullable + public CloudFoundryServerGroup findServerGroupByNameAndSpaceId(String name, String spaceId) { + Optional result = + safelyCall(() -> api.all(null, 1, singletonList(name), spaceId)) + .flatMap( + page -> + page.getResources().stream() + .findFirst() + .map(this::map) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Not authorized error retrieving details for this Server Group"))); + result.ifPresent(sg -> serverGroupCache.put(sg.getId(), sg)); + return result.orElse(null); + } + + @Nullable + public String findServerGroupId(String name, String spaceId) { + return serverGroupCache.asMap().values().stream() + .filter( + serverGroup -> + serverGroup.getName().equalsIgnoreCase(name) + && serverGroup.getSpace().getId().equals(spaceId)) + .findFirst() + .map(CloudFoundryServerGroup::getId) + .orElseGet( + () -> + safelyCall(() -> api.all(null, 1, singletonList(name), spaceId)) + .flatMap( + page -> + page.getResources().stream() + .findFirst() + .map(this::map) + .filter(Optional::isPresent) + .map(Optional::get) + .map( + serverGroup -> { + serverGroupCache.put(serverGroup.getId(), serverGroup); + return serverGroup; + }) + .map(CloudFoundryServerGroup::getId)) + .orElse(null)); + } + + private boolean shouldCacheApplication(Application application) { + Names names = Names.parseName(application.getName()); + + if (names.getCluster() == null) { + log.debug( + "Skipping app '{}' from foundation '{}' because the name isn't following the frigga naming schema.", + application.getName(), + this.account); + return false; + } + + if (onlySpinnakerManaged && names.getSequence() == null) { + log.debug( + "Skipping app '{}' from foundation '{}' because onlySpinnakerManaged is true and it has no version.", + application.getName(), + this.account); + return false; + } + + return true; + } + + private Optional map(Application application) { + CloudFoundryServerGroup.State state = + CloudFoundryServerGroup.State.valueOf(application.getState()); + + CloudFoundrySpace space = spaces.findById(application.getLinks().get("space").getGuid()); + String appId = application.getGuid(); + + ApplicationEnv applicationEnv; + try { + applicationEnv = safelyCall(() -> api.findApplicationEnvById(appId)).orElse(null); + } catch (CloudFoundryApiException e) { + // this happens when an account has access to a space but only has read only permissions + // catching this here to prevent all() from completely failing and breaking caching + // agents if one space in an account has permissions issues + if (e.getErrorCode() == NOT_AUTHORIZED) { + return Optional.empty(); + } + + // null is a valid value and is handled properly in this method + applicationEnv = null; + } + + Process process = processes.findProcessById(appId).orElse(null); + + CloudFoundryDroplet droplet = null; + try { + CloudFoundryPackage cfPackage = + safelyCall(() -> api.findPackagesByAppId(appId)) + .flatMap( + packages -> + packages.getResources().stream() + .findFirst() + .map( + pkg -> + CloudFoundryPackage.builder() + .downloadUrl( + pkg.getLinks().containsKey("download") + ? pkg.getLinks().get("download").getHref() + : null) + .checksumType( + pkg.getData().getChecksum() == null + ? null + : pkg.getData().getChecksum().getType()) + .checksum( + pkg.getData().getChecksum() == null + ? null + : pkg.getData().getChecksum().getValue()) + .build())) + .orElse(null); + + droplet = + safelyCall(() -> api.findDropletByApplicationGuid(appId)) + .map( + apiDroplet -> + CloudFoundryDroplet.builder() + .id(apiDroplet.getGuid()) + .name(application.getName() + "-droplet") + .stack(apiDroplet.getStack()) + .buildpacks( + ofNullable(apiDroplet.getBuildpacks()).orElse(emptyList()).stream() + .map( + bp -> + CloudFoundryBuildpack.builder() + .name(bp.getName()) + .detectOutput(bp.getDetectOutput()) + .version(bp.getVersion()) + .buildpackName(bp.getBuildpackName()) + .build()) + .collect(toList())) + .space(space) + .sourcePackage(cfPackage) + .build()) + .orElse(null); + } catch (Exception ex) { + log.debug("Unable to retrieve droplet for application '" + application.getName() + "'"); + } + + List cloudFoundryServices = + applicationEnv == null + ? emptyList() + : applicationEnv.getSystemEnvJson().getVcapServices().entrySet().stream() + .flatMap( + vcap -> + vcap.getValue().stream() + .map( + instance -> { + CloudFoundryServiceInstance.CloudFoundryServiceInstanceBuilder + cloudFoundryServiceInstanceBuilder = + CloudFoundryServiceInstance.builder() + .serviceInstanceName(vcap.getKey()) + .name(instance.getName()) + .plan(instance.getPlan()) + .tags(instance.getTags()); + if (instance.getLastOperation() != null + && instance.getLastOperation().getState() != null) { + cloudFoundryServiceInstanceBuilder + .status(instance.getLastOperation().getState().toString()) + .lastOperationDescription( + instance.getLastOperation().getDescription()); + } + return cloudFoundryServiceInstanceBuilder.build(); + })) + .collect(toList()); + + Map environmentVars = + applicationEnv == null || applicationEnv.getEnvironmentJson() == null + ? emptyMap() + : applicationEnv.getEnvironmentJson(); + + // filter out environment variables that aren't Spinnaker metadata + // as these could contain secrets + environmentVars = + environmentVars.entrySet().stream() + .filter(e -> ServerGroupMetaDataEnvVar.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + final CloudFoundryBuildInfo buildInfo = getBuildInfoFromEnvVars(environmentVars); + final ArtifactInfo artifactInfo = getArtifactInfoFromEnvVars(environmentVars); + final String pipelineId = + getEnvironmentVar(environmentVars, ServerGroupMetaDataEnvVar.PipelineId); + + String healthCheckType = null; + String healthCheckHttpEndpoint = null; + if (process != null && process.getHealthCheck() != null) { + final Process.HealthCheck healthCheck = process.getHealthCheck(); + healthCheckType = healthCheck.getType(); + if (healthCheck.getData() != null) { + healthCheckHttpEndpoint = healthCheck.getData().getEndpoint(); + } + } + + String serverGroupAppManagerUri = appsManagerUri; + if (StringUtils.isNotEmpty(appsManagerUri)) { + serverGroupAppManagerUri = + Optional.ofNullable(space) + .map( + s -> + appsManagerUri + + "/organizations/" + + s.getOrganization().getId() + + "/spaces/" + + s.getId() + + "/applications/" + + appId) + .orElse(""); + } + + String serverGroupMetricsUri = metricsUri; + if (StringUtils.isNotEmpty(metricsUri)) { + serverGroupMetricsUri = metricsUri + "/apps/" + appId; + } + + CloudFoundryServerGroup cloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .account(account) + .appsManagerUri(serverGroupAppManagerUri) + .metricsUri(serverGroupMetricsUri) + .name(application.getName()) + .id(appId) + .memory(process != null ? process.getMemoryInMb() : null) + .instances(emptySet()) + .droplet(droplet) + .diskQuota(process != null ? process.getDiskInMb() : null) + .healthCheckType(healthCheckType) + .healthCheckHttpEndpoint(healthCheckHttpEndpoint) + .space(space) + .createdTime(application.getCreatedAt().toInstant().toEpochMilli()) + .serviceInstances(cloudFoundryServices) + .state(state) + .env(environmentVars) + .ciBuild(buildInfo) + .appArtifact(artifactInfo) + .pipelineId(pipelineId) + .updatedTime(application.getUpdatedAt().toInstant().toEpochMilli()) + .build(); + + return Optional.of(checkHealthStatus(cloudFoundryServerGroup, application)); + } + + private CloudFoundryServerGroup checkHealthStatus( + CloudFoundryServerGroup cloudFoundryServerGroup, Application application) { + CloudFoundryServerGroup.State state = + CloudFoundryServerGroup.State.valueOf(application.getState()); + Set instances; + switch (state) { + case STARTED: + try { + instances = + safelyCall(() -> api.instances(cloudFoundryServerGroup.getId())) + .orElse(emptyMap()) + .entrySet() + .stream() + .map( + inst -> { + HealthState healthState = HealthState.Unknown; + switch (inst.getValue().getState()) { + case RUNNING: + healthState = HealthState.Up; + break; + case DOWN: + case CRASHED: + healthState = HealthState.Down; + break; + case STARTING: + healthState = HealthState.Starting; + break; + } + return CloudFoundryInstance.builder() + .appGuid(cloudFoundryServerGroup.getId()) + .key(inst.getKey()) + .healthState(healthState) + .details(inst.getValue().getDetails()) + .launchTime( + System.currentTimeMillis() - (inst.getValue().getUptime() * 1000)) + .zone(cloudFoundryServerGroup.getRegion()) + .build(); + }) + .collect(toSet()); + + log.trace( + "Successfully retrieved " + + instances.size() + + " instances for application '" + + application.getName() + + "'"); + } catch (Exception ex) { + log.debug("Unable to retrieve droplet for application '" + application.getName() + "'"); + instances = emptySet(); + } + break; + case STOPPED: + default: + instances = emptySet(); + } + return cloudFoundryServerGroup.toBuilder().state(state).instances(instances).build(); + } + + private String getEnvironmentVar( + Map environmentVars, ServerGroupMetaDataEnvVar var) { + return Optional.ofNullable(environmentVars.get(var.envVarName)) + .map(Object::toString) + .orElse(null); + } + + private CloudFoundryBuildInfo getBuildInfoFromEnvVars(Map environmentVars) { + return CloudFoundryBuildInfo.builder() + .jobName(getEnvironmentVar(environmentVars, ServerGroupMetaDataEnvVar.JobName)) + .jobNumber(getEnvironmentVar(environmentVars, ServerGroupMetaDataEnvVar.JobNumber)) + .jobUrl(getEnvironmentVar(environmentVars, ServerGroupMetaDataEnvVar.JobUrl)) + .build(); + } + + private ArtifactInfo getArtifactInfoFromEnvVars(Map environmentVars) { + return ArtifactInfo.builder() + .name(getEnvironmentVar(environmentVars, ServerGroupMetaDataEnvVar.ArtifactName)) + .version(getEnvironmentVar(environmentVars, ServerGroupMetaDataEnvVar.ArtifactVersion)) + .url(getEnvironmentVar(environmentVars, ServerGroupMetaDataEnvVar.ArtifactUrl)) + .build(); + } + + public void mapRoute(String applicationGuid, String routeGuid) throws CloudFoundryApiException { + safelyCall(() -> api.mapRoute(applicationGuid, routeGuid, new MapRoute())); + } + + public void unmapRoute(String applicationGuid, String routeGuid) throws CloudFoundryApiException { + safelyCall(() -> api.unmapRoute(applicationGuid, routeGuid)); + } + + public void startApplication(String applicationGuid) throws CloudFoundryApiException { + safelyCall(() -> api.startApplication(applicationGuid, new StartApplication())); + } + + public void stopApplication(String applicationGuid) throws CloudFoundryApiException { + safelyCall(() -> api.stopApplication(applicationGuid, new StopApplication())); + } + + public void deleteApplication(String applicationGuid) throws CloudFoundryApiException { + safelyCall(() -> api.deleteApplication(applicationGuid)); + } + + public void deleteAppInstance(String guid, String index) throws CloudFoundryApiException { + safelyCall(() -> api.deleteAppInstance(guid, index)); + } + + public CloudFoundryServerGroup createApplication( + String appName, + CloudFoundrySpace space, + @Nullable Map environmentVariables, + Lifecycle lifecycle) + throws CloudFoundryApiException { + Map relationships = new HashMap<>(); + relationships.put("space", new ToOneRelationship(new Relationship(space.getId()))); + return safelyCall( + () -> + api.createApplication( + new CreateApplication(appName, relationships, environmentVariables, lifecycle))) + .map(this::map) + .flatMap(sg -> sg) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Cloud Foundry signaled that application creation succeeded but failed to provide a response.")); + } + + public String createPackage(CreatePackage createPackageRequest) throws CloudFoundryApiException { + return safelyCall(() -> api.createPackage(createPackageRequest)) + .map(Package::getGuid) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Cloud Foundry signaled that package creation succeeded but failed to provide a response.")); + } + + @Nullable + public String findCurrentPackageIdByAppId(String appGuid) throws CloudFoundryApiException { + return safelyCall(() -> this.api.findDropletByApplicationGuid(appGuid)) + .map( + droplet -> + StringUtils.substringAfterLast(droplet.getLinks().get("package").getHref(), "/")) + .orElse(null); + } + + @Nonnull + public InputStream downloadPackageBits(String packageGuid) throws CloudFoundryApiException { + Optional optionalPackageInput = + safelyCall(() -> api.downloadPackage(packageGuid)).map(ResponseBody::byteStream); + return optionalPackageInput.orElseThrow( + () -> new CloudFoundryApiException("Failed to retrieve input stream of package bits.")); + } + + public void uploadPackageBits(String packageGuid, File file) throws CloudFoundryApiException { + MultipartBody.Part filePart = + MultipartBody.Part.createFormData( + "bits", + file.getName(), + RequestBody.create(MediaType.parse("multipart/form-data"), file)); + safelyCall(() -> api.uploadPackageBits(packageGuid, filePart)) + .map(Package::getGuid) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Cloud Foundry signaled that package upload succeeded but failed to provide a response.")); + } + + public String createBuild( + String packageGuid, @Nullable Integer memoryAmount, @Nullable Integer diskSizeAmount) + throws CloudFoundryApiException { + return safelyCall( + () -> api.createBuild(new CreateBuild(packageGuid, memoryAmount, diskSizeAmount))) + .map(Build::getGuid) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Cloud Foundry signaled that build creation succeeded but failed to provide a response.")); + } + + public Boolean buildCompleted(String buildGuid) throws CloudFoundryApiException { + switch (safelyCall(() -> api.getBuild(buildGuid)) + .map(Build::getState) + .orElse(Build.State.FAILED)) { + case FAILED: + throw new CloudFoundryApiException( + "Failed to build droplet or there are not enough resources available"); + case STAGED: + return true; + default: + return false; + } + } + + public boolean packageUploadComplete(String packageGuid) throws CloudFoundryApiException { + switch (safelyCall(() -> api.getPackage(packageGuid)) + .map(Package::getState) + .orElse(Package.State.FAILED)) { + case FAILED: + case EXPIRED: + throw new CloudFoundryApiException("Upload failed"); + case READY: + return true; + default: + return false; + } + } + + public String findDropletGuidFromBuildId(String buildGuid) throws CloudFoundryApiException { + return safelyCall(() -> api.getBuild(buildGuid)) + .map(Build::getDroplet) + .map(Droplet::getGuid) + .orElse(null); + } + + public void setCurrentDroplet(String appGuid, String dropletGuid) + throws CloudFoundryApiException { + safelyCall( + () -> api.setCurrentDroplet(appGuid, new ToOneRelationship(new Relationship(dropletGuid)))); + } + + public List> + getTakenSlots(String clusterName, String spaceId) { + String finalName = buildFinalAsgName(clusterName); + List filter = + asList("name<=" + finalName, "name>=" + clusterName, "space_guid:" + spaceId); + return collectPageResources("applications", page -> api.listAppsFiltered(page, filter, 10)) + .stream() + .filter( + app -> { + Names entityNames = Names.parseName(app.getEntity().getName()); + return clusterName.equals(entityNames.getCluster()); + }) + .collect(Collectors.toList()); + } + + private String buildFinalAsgName(String clusterName) { + Names names = Names.parseName(clusterName); + return AbstractServerGroupNameResolver.generateServerGroupName( + names.getApp(), names.getStack(), names.getDetail(), 999, false); + } + + public void restageApplication(String appGuid) { + safelyCall(() -> api.restageApplication(appGuid, "")); + } + + public ProcessStats.State getAppState(String guid) { + return processes + .getProcessState(guid) + .orElseGet( + () -> + safelyCall(() -> api.findById(guid)) + .filter( + application -> + CloudFoundryServerGroup.State.STARTED.equals( + CloudFoundryServerGroup.State.valueOf(application.getState()))) + .map(appState -> ProcessStats.State.RUNNING) + .orElse(ProcessStats.State.DOWN)); + } + + public List> getServiceBindingsByApp(String appGuid) { + return collectPageResources("service bindings", pg -> api.getServiceBindings(appGuid)); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryApiException.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryApiException.java new file mode 100644 index 00000000000..2dcfb647675 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryApiException.java @@ -0,0 +1,62 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static java.util.Arrays.stream; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ErrorDescription; +import java.util.Arrays; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.Getter; + +@Getter +public class CloudFoundryApiException extends RuntimeException { + + @Nullable private ErrorDescription.Code errorCode; + + public CloudFoundryApiException(ErrorDescription errorCause) { + super( + Optional.ofNullable(errorCause) + .map(e -> getMessage(e.getErrors().toArray(new String[0]))) + .orElse(null)); + if (errorCause != null) { + this.errorCode = errorCause.getCode(); + } + } + + public CloudFoundryApiException(Throwable t, String... errors) { + super(getMessage(t, errors), t); + } + + public CloudFoundryApiException(String... errors) { + super(getMessage(errors)); + } + + private static String getMessage(String... errors) { + return "Cloud Foundry API returned with error(s): " + + stream(errors).filter(Objects::nonNull).collect(Collectors.joining(" and ")); + } + + private static String getMessage(Throwable t, String... errors) { + String[] allErrors = Arrays.copyOf(errors, errors.length + 1); + allErrors[errors.length] = t.getMessage(); + return getMessage(allErrors); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClient.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClient.java index 13b5c1bb79e..7d330f3d2cb 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClient.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClient.java @@ -16,12 +16,24 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.client; -import lombok.RequiredArgsConstructor; - -@RequiredArgsConstructor -public class CloudFoundryClient { - private final String account; - private final String apiHost; - private final String user; - private final String password; +public interface CloudFoundryClient { + Spaces getSpaces(); + + Organizations getOrganizations(); + + Domains getDomains(); + + Routes getRoutes(); + + Applications getApplications(); + + ServiceInstances getServiceInstances(); + + ServiceKeys getServiceKeys(); + + Tasks getTasks(); + + Logs getLogs(); + + Processes getProcesses(); } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClientUtils.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClientUtils.java new file mode 100644 index 00000000000..50e4ce9872a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClientUtils.java @@ -0,0 +1,122 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ErrorDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Page; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Pagination; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Supplier; +import okhttp3.ResponseBody; +import retrofit2.Call; +import retrofit2.Response; + +public final class CloudFoundryClientUtils { + // https://github.com/cloudfoundry/cloud_controller_ng/blob/main/app/models/services/service_binding.rb#L53 + private static final String BINDING_NAME_INVALID_CHARACTER_PATTERN = "[^\\w-]+"; + + private static final ObjectMapper mapper = + new ObjectMapper() + .setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE) + .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); + + public static Optional safelyCall(Supplier> r) { + Response response = null; + try { + response = r.get().execute(); + } catch (Exception e) { + throw new CloudFoundryApiException(e); + } finally { + if (response != null && !response.isSuccessful()) { + try (ResponseBody responseBody = response.errorBody()) { + if (response.code() == 401) { + throw new CloudFoundryApiException("Unauthorized"); + } + if (response.code() == 404) { + return Optional.empty(); + } + ErrorDescription errorDescription = + mapper.readValue(responseBody.string(), ErrorDescription.class); + throw new CloudFoundryApiException(errorDescription); + } catch (IOException e) { + throw new CloudFoundryApiException(e, "Could not parse error"); + } + } + } + return Optional.ofNullable(response.body()); + } + + static List collectPages( + String resourceNamePluralized, Function>> fetchPage) + throws CloudFoundryApiException { + Pagination firstPage = + safelyCall(() -> fetchPage.apply(null)) + .orElseThrow( + () -> new CloudFoundryApiException("Unable to retrieve " + resourceNamePluralized)); + + List allResources = new ArrayList<>(firstPage.getResources()); + for (int page = 2; page <= firstPage.getPagination().getTotalPages(); page++) { + final int p = page; + allResources.addAll( + safelyCall(() -> fetchPage.apply(p)) + .orElseThrow( + () -> + new CloudFoundryApiException("Unable to retrieve " + resourceNamePluralized)) + .getResources()); + } + + return allResources; + } + + static List> collectPageResources( + String resourceNamePluralized, Function>> fetchPage) + throws CloudFoundryApiException { + Page firstPage = + safelyCall(() -> fetchPage.apply(null)) + .orElseThrow( + () -> new CloudFoundryApiException("Unable to retrieve " + resourceNamePluralized)); + + List> allResources = new ArrayList<>(firstPage.getResources()); + for (int page = 2; page <= firstPage.getTotalPages(); page++) { + final int p = page; + allResources.addAll( + safelyCall(() -> fetchPage.apply(p)) + .orElseThrow( + () -> + new CloudFoundryApiException("Unable to retrieve " + resourceNamePluralized)) + .getResources()); + } + + return allResources; + } + + public static ObjectMapper getMapper() { + return mapper; + } + + public static String convertToValidServiceBindingName(final String name) { + return name.replaceAll(BINDING_NAME_INVALID_CHARACTER_PATTERN, "-"); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Domains.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Domains.java new file mode 100644 index 00000000000..0a5c7f6e05c --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Domains.java @@ -0,0 +1,115 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.collectPageResources; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.safelyCall; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.DomainService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Domain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import groovy.util.logging.Slf4j; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +@Slf4j +public class Domains { + private final DomainService api; + private final Organizations organizations; + + private final LoadingCache domainCache = + CacheBuilder.newBuilder() + .expireAfterWrite(5, TimeUnit.MINUTES) + .build( + new CacheLoader() { + @Override + public CloudFoundryDomain load(@Nonnull String guid) + throws CloudFoundryApiException, ResourceNotFoundException { + Resource domain = + safelyCall(() -> api.findSharedDomainById(guid)) + .orElseGet( + () -> safelyCall(() -> api.findPrivateDomainById(guid)).orElse(null)); + + if (domain == null) throw new ResourceNotFoundException(); + + return map(domain); + } + }); + + private CloudFoundryDomain map(Resource res) throws CloudFoundryApiException { + String orgGuid = res.getEntity().getOwningOrganizationGuid(); + return CloudFoundryDomain.builder() + .id(res.getMetadata().getGuid()) + .name(res.getEntity().getName()) + .organization(orgGuid != null ? organizations.findById(orgGuid) : null) + .build(); + } + + @Nullable + public CloudFoundryDomain findById(String guid) throws CloudFoundryApiException { + try { + return domainCache.get(guid); + } catch (ExecutionException e) { + if (e.getCause() instanceof ResourceNotFoundException) return null; + throw new CloudFoundryApiException(e.getCause(), "Unable to find domain by id"); + } + } + + public Optional findByName(String domainName) + throws CloudFoundryApiException { + return all().stream().filter(d -> d.getName().equals(domainName)).findFirst(); + } + + public List all() throws CloudFoundryApiException { + List> sharedDomains = collectPageResources("shared domains", api::allShared); + List> privateDomains = + collectPageResources("private domains", api::allPrivate); + + List domains = + new ArrayList<>(sharedDomains.size() + privateDomains.size()); + for (Resource sharedDomain : sharedDomains) { + domains.add(map(sharedDomain)); + } + for (Resource privateDomain : privateDomains) { + domains.add(map(privateDomain)); + } + for (CloudFoundryDomain domain : domains) { + domainCache.put(domain.getId(), domain); + } + return domains; + } + + public CloudFoundryDomain getDefault() { + return map( + safelyCall(() -> api.all(null)) + .orElseThrow(() -> new CloudFoundryApiException("Unable to retrieve default domain")) + .getResources() + .iterator() + .next()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/HttpCloudFoundryClient.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/HttpCloudFoundryClient.java new file mode 100644 index 00000000000..ee5e204bbf5 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/HttpCloudFoundryClient.java @@ -0,0 +1,207 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.retry.RetryInterceptor; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.tokens.AccessTokenAuthenticator; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.tokens.AccessTokenInterceptor; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.tokens.AccessTokenProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.cert.X509Certificate; +import java.util.concurrent.ForkJoinPool; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509TrustManager; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import okhttp3.OkHttpClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import retrofit2.Retrofit; +import retrofit2.converter.jackson.JacksonConverterFactory; +import retrofit2.converter.protobuf.ProtoConverterFactory; + +/** + * Waiting for this issue to be resolved before replacing this class by the CF Java Client: + * https://github.com/cloudfoundry/cf-java-client/issues/938 + */ +@Slf4j +public class HttpCloudFoundryClient implements CloudFoundryClient { + private final String apiHost; + private final String user; + private final String password; + private Logger logger = LoggerFactory.getLogger(HttpCloudFoundryClient.class); + @Getter private AuthenticationService uaaService; + @Getter private Spaces spaces; + @Getter private Organizations organizations; + @Getter private Domains domains; + @Getter private Routes routes; + @Getter private Applications applications; + @Getter private ServiceInstances serviceInstances; + @Getter private ServiceKeys serviceKeys; + @Getter private Tasks tasks; + @Getter private Logs logs; + @Getter private Processes processes; + + public HttpCloudFoundryClient( + String account, + String appsManagerUri, + String metricsUri, + String apiHost, + String user, + String password, + boolean useHttps, + boolean skipSslValidation, + boolean onlySpinnakerManaged, + Integer resultsPerPage, + ForkJoinPool forkJoinPool, + OkHttpClient.Builder okHttpClientBuilder, + CloudFoundryConfigurationProperties.ClientConfig clientConfig, + CloudFoundryConfigurationProperties.LocalCacheConfig localCacheConfig) { + + this.apiHost = apiHost; + this.user = user; + this.password = password; + + ObjectMapper mapper = new ObjectMapper(); + mapper.setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE); + mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); + mapper.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS); + mapper.registerModule(new JavaTimeModule()); + + // The UAA service is built first because the Authenticator interceptor needs it to get tokens + // from CF. + OkHttpClient okHttpClient = applySslValidator(okHttpClientBuilder, skipSslValidation); + this.uaaService = + new Retrofit.Builder() + .baseUrl( + (useHttps ? "https://" : "http://") + this.apiHost.replaceAll("^api\\.", "login.")) + .client(okHttpClient) + .addConverterFactory(JacksonConverterFactory.create(mapper)) + .build() + .create(AuthenticationService.class); + + // The remaining services need the AccessTokenAuthenticator in order to retry for 401 responses. + AccessTokenProvider accessTokenProvider = + new AccessTokenProvider(user, password, this.uaaService); + okHttpClient = + okHttpClient + .newBuilder() + .authenticator(new AccessTokenAuthenticator(accessTokenProvider)) + .addInterceptor(new AccessTokenInterceptor(accessTokenProvider)) + .addInterceptor(new RetryInterceptor(clientConfig.getMaxRetries())) + .build(); + + // Shared retrofit targeting cf api with preconfigured okhttpclient and jackson converter + Retrofit retrofit = + new Retrofit.Builder() + .client(okHttpClient) + .baseUrl((useHttps ? "https://" : "http://") + this.apiHost) + .addConverterFactory(JacksonConverterFactory.create(mapper)) + .build(); + + this.organizations = new Organizations(retrofit.create(OrganizationService.class)); + this.spaces = new Spaces(retrofit.create(SpaceService.class), organizations); + this.processes = new Processes(retrofit.create(ProcessesService.class)); + + this.applications = + new Applications( + account, + appsManagerUri, + metricsUri, + retrofit.create(ApplicationService.class), + spaces, + processes, + resultsPerPage, + onlySpinnakerManaged, + forkJoinPool, + localCacheConfig); + this.domains = new Domains(retrofit.create(DomainService.class), organizations); + this.serviceInstances = + new ServiceInstances( + retrofit.create(ServiceInstanceService.class), + retrofit.create(ConfigService.class), + spaces); + this.routes = + new Routes( + account, + retrofit.create(RouteService.class), + applications, + domains, + spaces, + resultsPerPage, + forkJoinPool, + localCacheConfig); + this.serviceKeys = new ServiceKeys(retrofit.create(ServiceKeyService.class), spaces); + this.tasks = new Tasks(retrofit.create(TaskService.class)); + + // Logs requires retrofit with different baseUrl and converterFactory + this.logs = + new Logs( + new Retrofit.Builder() + .client(okHttpClient) + .baseUrl( + (useHttps ? "https://" : "http://") + apiHost.replaceAll("^api\\.", "doppler.")) + .addConverterFactory(ProtoConverterFactory.create()) + .build() + .create(DopplerService.class)); + } + + private static OkHttpClient applySslValidator( + OkHttpClient.Builder builder, boolean skipSslValidation) { + if (skipSslValidation) { + builder.hostnameVerifier((s, sslSession) -> true); + + TrustManager[] trustManagers = + new TrustManager[] { + new X509TrustManager() { + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s) {} + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s) {} + + @Override + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[0]; + } + } + }; + + SSLContext sslContext; + try { + sslContext = SSLContext.getInstance("SSL"); + sslContext.init(null, trustManagers, new SecureRandom()); + } catch (KeyManagementException | NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + + X509TrustManager trustManager = (X509TrustManager) trustManagers[0]; + builder.sslSocketFactory(sslContext.getSocketFactory(), trustManager); + } + return builder.build(); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Logs.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Logs.java new file mode 100644 index 00000000000..ceb4e165d4b --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Logs.java @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.safelyCall; +import static java.util.stream.Collectors.joining; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.DopplerService; +import java.util.Comparator; +import java.util.List; +import lombok.RequiredArgsConstructor; +import org.cloudfoundry.dropsonde.events.EventFactory.Envelope; +import org.cloudfoundry.dropsonde.events.EventFactory.Envelope.EventType; +import org.cloudfoundry.dropsonde.events.LogFactory.LogMessage; + +@RequiredArgsConstructor +public class Logs { + private final DopplerService api; + + public String recentApplicationLogs(String applicationGuid, int instanceIndex) { + return recentLogsFiltered(applicationGuid, "APP/PROC/WEB", instanceIndex); + } + + public String recentTaskLogs(String applicationGuid, String taskName) { + return recentLogsFiltered(applicationGuid, "APP/TASK/" + taskName, 0); + } + + public List recentLogs(String applicationGuid) { + return safelyCall(() -> api.recentLogs(applicationGuid)) + .orElseThrow(IllegalStateException::new); + } + + private String recentLogsFiltered( + String applicationGuid, String logSourceFilter, int instanceIndex) { + List envelopes = recentLogs(applicationGuid); + + return envelopes.stream() + .filter(e -> e.getEventType().equals(EventType.LogMessage)) + .map(Envelope::getLogMessage) + .filter( + logMessage -> + logSourceFilter.equals(logMessage.getSourceType()) + && logMessage.getSourceInstance().equals(String.valueOf(instanceIndex))) + .sorted(Comparator.comparingLong(LogMessage::getTimestamp)) + .map(msg -> msg.getMessage().toStringUtf8()) + .collect(joining("\n")); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Organizations.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Organizations.java new file mode 100644 index 00000000000..20633192b31 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Organizations.java @@ -0,0 +1,86 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.collectPages; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.safelyCall; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.OrganizationService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class Organizations { + private final OrganizationService api; + + private final LoadingCache organizationCache = + CacheBuilder.newBuilder() + .expireAfterWrite(5, TimeUnit.MINUTES) + .build( + new CacheLoader() { + @Override + public CloudFoundryOrganization load(@Nonnull String guid) + throws CloudFoundryApiException, ResourceNotFoundException { + return safelyCall(() -> api.findById(guid)) + .map( + org -> + CloudFoundryOrganization.builder() + .id(org.getGuid()) + .name(org.getName()) + .build()) + .orElseThrow(ResourceNotFoundException::new); + } + }); + + @Nullable + public CloudFoundryOrganization findById(String orgId) throws CloudFoundryApiException { + try { + return organizationCache.get(orgId); + } catch (ExecutionException e) { + if (e.getCause() instanceof ResourceNotFoundException) return null; + throw new CloudFoundryApiException(e.getCause(), "Unable to find organization by id"); + } + } + + public Optional findByName(String orgName) + throws CloudFoundryApiException { + return collectPages("organizations", page -> api.all(page, orgName)).stream() + .findAny() + .map( + org -> + CloudFoundryOrganization.builder().id(org.getGuid()).name(org.getName()).build()); + } + + public List findAllByNames(List names) { + if (names == null || names.isEmpty()) + throw new IllegalArgumentException("Organization names must not be empty or null"); + return collectPages("organizations", page -> api.all(page, String.join(",", names))).stream() + .map( + org -> CloudFoundryOrganization.builder().id(org.getGuid()).name(org.getName()).build()) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Processes.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Processes.java new file mode 100644 index 00000000000..cbea8e478b8 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Processes.java @@ -0,0 +1,100 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.collectPages; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.safelyCall; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ProcessesService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Process; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ScaleProcess; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.UpdateProcess; +import groovy.util.logging.Slf4j; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; + +@Slf4j +@RequiredArgsConstructor +public class Processes { + + private final ProcessesService api; + + public List getAllProcessesByAppId(String appGuid) { + if (appGuid == null || appGuid.isEmpty()) { + throw new IllegalArgumentException( + "An application guid must be provided in order to return processes by app."); + } + return collectPages("processes", page -> api.getProcesses(page, appGuid)); + } + + public void scaleProcess( + String guid, + @Nullable Integer instances, + @Nullable Integer memInMb, + @Nullable Integer diskInMb) + throws CloudFoundryApiException { + if ((memInMb == null && diskInMb == null && instances == null) + || (Integer.valueOf(0).equals(memInMb) + && Integer.valueOf(0).equals(diskInMb) + && Integer.valueOf(0).equals(instances))) { + return; + } + safelyCall(() -> api.scaleProcess(guid, new ScaleProcess(instances, memInMb, diskInMb))); + } + + public Optional findProcessById(String guid) { + return safelyCall(() -> api.findProcessById(guid)); + } + + public void updateProcess( + String guid, + @Nullable String command, + @Nullable String healthCheckType, + @Nullable String healthCheckEndpoint, + @Nullable Integer healthCheckTimeout, + @Nullable Integer healthCheckInvocationTimeout) + throws CloudFoundryApiException { + + Process.HealthCheck healthCheck = + new Process.HealthCheck.HealthCheckBuilder() + .type(healthCheckType) + .data( + new Process.HealthCheckData.HealthCheckDataBuilder() + .endpoint(healthCheckEndpoint) + .timeout(healthCheckTimeout) + .invocationTimeout(healthCheckInvocationTimeout) + .build()) + .build(); + + if (command != null && command.isEmpty()) { + throw new IllegalArgumentException( + "Buildpack commands cannot be empty. Please specify a custom command or set it to null to use the original buildpack command."); + } + + safelyCall(() -> api.updateProcess(guid, new UpdateProcess(command, healthCheck))); + } + + @Nullable + public Optional getProcessState(String guid) throws CloudFoundryApiException { + return safelyCall(() -> api.findProcessStatsById(guid)) + .map(pr -> pr.getResources().stream().findAny().map(ProcessStats::getState)) + .orElse(Optional.empty()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ResourceNotFoundException.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ResourceNotFoundException.java new file mode 100644 index 00000000000..63614b5f9bf --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ResourceNotFoundException.java @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +class ResourceNotFoundException extends RuntimeException { + ResourceNotFoundException(String message) { + super(message); + } + + ResourceNotFoundException() {} +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Routes.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Routes.java new file mode 100644 index 00000000000..cc2e67c2e6a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Routes.java @@ -0,0 +1,249 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.collectPageResources; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.safelyCall; +import static java.util.Collections.emptySet; +import static java.util.Collections.singletonList; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.RouteService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Route; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.RouteMapping; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class Routes { + private static final Pattern VALID_ROUTE_REGEX = + Pattern.compile("^([a-zA-Z0-9_-]+)\\.([a-zA-Z0-9_.-]+)(:[0-9]+)?([/a-zA-Z0-9_.-]+)?$"); + + private final String account; + private final RouteService api; + private final Applications applications; + private final Domains domains; + private final Spaces spaces; + private final Integer resultsPerPage; + + private final ForkJoinPool forkJoinPool; + private final LoadingCache> routeMappings; + + public Routes( + String account, + RouteService api, + Applications applications, + Domains domains, + Spaces spaces, + Integer resultsPerPage, + ForkJoinPool forkJoinPool, + CloudFoundryConfigurationProperties.LocalCacheConfig localCacheConfig) { + this.account = account; + this.api = api; + this.applications = applications; + this.domains = domains; + this.spaces = spaces; + this.resultsPerPage = resultsPerPage; + this.forkJoinPool = forkJoinPool; + + CacheBuilder builder = CacheBuilder.newBuilder(); + if (localCacheConfig.getRoutesAccessExpirySeconds() >= 0) { + builder.expireAfterAccess(localCacheConfig.getRoutesAccessExpirySeconds(), TimeUnit.SECONDS); + } + if (localCacheConfig.getRoutesWriteExpirySeconds() >= 0) { + builder.expireAfterWrite(localCacheConfig.getRoutesWriteExpirySeconds(), TimeUnit.SECONDS); + } + + this.routeMappings = + builder.build( + new CacheLoader<>() { + @Override + public List load(@Nonnull String guid) + throws CloudFoundryApiException, ResourceNotFoundException { + return collectPageResources("route mappings", pg -> api.routeMappings(guid, pg)) + .stream() + .map(Resource::getEntity) + .collect(Collectors.toList()); + } + }); + } + + private CloudFoundryLoadBalancer map(Resource res) throws CloudFoundryApiException { + Route route = res.getEntity(); + + Set mappedApps = emptySet(); + try { + mappedApps = + routeMappings.get(res.getMetadata().getGuid()).stream() + .map( + rm -> { + try { + return applications.findById(rm.getAppGuid()); + } catch (Exception e) { + return null; + } + }) + .filter(Objects::nonNull) + .collect(Collectors.toSet()); + } catch (Exception e) { + if (!(e.getCause() instanceof ResourceNotFoundException)) + throw new CloudFoundryApiException(e.getCause(), "Unable to find route mappings by id"); + } + + return CloudFoundryLoadBalancer.builder() + .account(account) + .id(res.getMetadata().getGuid()) + .host(route.getHost()) + .path(route.getPath()) + .port(route.getPort()) + .space(spaces.findById(route.getSpaceGuid())) + .domain(domains.findById(route.getDomainGuid())) + .mappedApps(mappedApps) + .build(); + } + + @Nullable + public CloudFoundryLoadBalancer find(RouteId routeId, String spaceId) + throws CloudFoundryApiException { + CloudFoundrySpace id = spaces.findById(spaceId); + String orgId = id.getOrganization().getId(); + + List queryParams = new ArrayList<>(); + queryParams.add("host:" + routeId.getHost()); + queryParams.add("organization_guid:" + orgId); + queryParams.add("domain_guid:" + routeId.getDomainGuid()); + if (routeId.getPath() != null) queryParams.add("path:" + routeId.getPath()); + if (routeId.getPort() != null) queryParams.add("port:" + routeId.getPort().toString()); + + return collectPageResources("route mappings", pg -> api.all(pg, 1, queryParams)).stream() + .filter( + routeResource -> + (routeId.getPath() != null || routeResource.getEntity().getPath().isEmpty()) + && (routeId.getPort() != null || routeResource.getEntity().getPort() == null)) + .findFirst() + .map(this::map) + .orElse(null); + } + + @Nullable + public RouteId toRouteId(String uri) throws CloudFoundryApiException { + Matcher matcher = VALID_ROUTE_REGEX.matcher(uri); + if (matcher.find()) { + CloudFoundryDomain domain = domains.findByName(matcher.group(2)).orElse(null); + if (domain == null) { + return null; + } + RouteId routeId = new RouteId(); + routeId.setHost(matcher.group(1)); + routeId.setDomainGuid(domain.getId()); + routeId.setPort( + matcher.group(3) == null ? null : Integer.parseInt(matcher.group(3).substring(1))); + routeId.setPath(matcher.group(4)); + return routeId; + } else { + return null; + } + } + + public List all(List spaces) + throws CloudFoundryApiException { + try { + if (!spaces.isEmpty()) { + List spaceGuids = + spaces.stream().map(CloudFoundrySpace::getId).collect(Collectors.toList()); + String orgFilter = + "organization_guid IN " + + spaces.stream() + .map(s -> s.getOrganization().getId()) + .collect(Collectors.joining(",")); + return forkJoinPool + .submit( + () -> + collectPageResources( + "routes", pg -> api.all(pg, resultsPerPage, singletonList(orgFilter))) + .parallelStream() + .map(this::map) + .filter(lb -> spaceGuids.contains(lb.getSpace().getId())) + .collect(Collectors.toList())) + .get(); + } else { + return forkJoinPool + .submit( + () -> + collectPageResources("routes", pg -> api.all(pg, resultsPerPage, null)) + .parallelStream() + .map(this::map) + .collect(Collectors.toList())) + .get(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public CloudFoundryLoadBalancer createRoute(RouteId routeId, String spaceId) + throws CloudFoundryApiException { + Route route = new Route(routeId, spaceId); + try { + Resource newRoute = + safelyCall(() -> api.createRoute(route)) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Cloud Foundry signaled that route creation succeeded but failed to provide a response.")); + return map(newRoute); + } catch (CloudFoundryApiException e) { + if (e.getErrorCode() == null) throw e; + + switch (e.getErrorCode()) { + case ROUTE_HOST_TAKEN: + case ROUTE_PATH_TAKEN: + case ROUTE_PORT_TAKEN: + return this.find(routeId, spaceId); + default: + throw e; + } + } + } + + public void deleteRoute(String loadBalancerGuid) throws CloudFoundryApiException { + safelyCall(() -> api.deleteRoute(loadBalancerGuid)); + } + + public static boolean isValidRouteFormat(String route) { + return VALID_ROUTE_REGEX.matcher(route).find(); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceInstances.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceInstances.java new file mode 100644 index 00000000000..181b6d28d8d --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceInstances.java @@ -0,0 +1,645 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.*; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ConfigFeatureFlag.ConfigFlag.SERVICE_INSTANCE_SHARING; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.*; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.*; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance.Type.MANAGED_SERVICE_INSTANCE; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance.Type.USER_PROVIDED_SERVICE_INSTANCE; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.Optional.ofNullable; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; +import static org.apache.commons.lang3.StringUtils.isBlank; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.annotations.VisibleForTesting; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ConfigService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ServiceInstanceService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.CreateSharedServiceInstances; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServicePlan; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.io.IOException; +import java.util.*; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; +import okhttp3.ResponseBody; +import org.springframework.util.StringUtils; +import retrofit2.Call; + +@RequiredArgsConstructor +public class ServiceInstances { + private final ServiceInstanceService api; + private final ConfigService configApi; + private final Spaces spaces; + + public List> findAllServiceBindingsByServiceName( + String region, String serviceName) { + CloudFoundryServiceInstance serviceInstance = getServiceInstance(region, serviceName); + if (serviceInstance == null) { + return emptyList(); + } + return findAllServiceBindingsByService(serviceInstance.getId()); + } + + public void createServiceBinding(CreateServiceBinding createServiceBinding) { + try { + safelyCall(() -> api.createServiceBinding(createServiceBinding)); + } catch (CloudFoundryApiException e) { + if (e.getErrorCode() == null) throw e; + + switch (e.getErrorCode()) { + case SERVICE_INSTANCE_ALREADY_BOUND: + return; + default: + throw e; + } + } + } + + public List> findAllServiceBindingsByApp(String appGuid) { + String bindingsQuery = "app_guid:" + appGuid; + return collectPageResources( + "service bindings", pg -> api.getAllServiceBindings(singletonList(bindingsQuery))); + } + + public List> findAllServiceBindingsByService(String serviceGuid) { + String bindingsQuery = "service_instance_guid:" + serviceGuid; + return collectPageResources( + "service bindings", pg -> api.getAllServiceBindings(singletonList(bindingsQuery))); + } + + public void deleteServiceBinding(String serviceBindingGuid) { + safelyCall(() -> api.deleteServiceBinding(serviceBindingGuid)); + } + + private Resource findServiceByServiceName(String serviceName) { + List> services = + collectPageResources( + "services by name", pg -> api.findService(pg, singletonList("label:" + serviceName))); + return ofNullable(services.get(0)).orElse(null); + } + + private List findAllServicePlansByServiceName(String serviceName) { + Resource service = findServiceByServiceName(serviceName); + List> services = + collectPageResources( + "service plans by id", + pg -> + api.findServicePlans( + pg, singletonList("service_guid:" + service.getMetadata().getGuid()))); + + return services.stream() + .map( + resource -> + CloudFoundryServicePlan.builder() + .name(resource.getEntity().getName()) + .id(resource.getMetadata().getGuid()) + .build()) + .collect(toList()); + } + + public List findAllServicesByRegion(String region) { + return spaces + .findSpaceByRegion(region) + .map( + space -> { + List> services = + collectPageResources( + "all service", pg -> api.findServiceBySpaceId(space.getId(), pg, null)); + return services.stream() + .map( + serviceResource -> + CloudFoundryService.builder() + .name(serviceResource.getEntity().getLabel()) + .servicePlans( + findAllServicePlansByServiceName( + serviceResource.getEntity().getLabel())) + .build()) + .collect(toList()); + }) + .orElse(Collections.emptyList()); + } + + public List> findAllServicesBySpaceAndNames( + CloudFoundrySpace space, List serviceInstanceNames) { + if (serviceInstanceNames == null || serviceInstanceNames.isEmpty()) return emptyList(); + List serviceInstanceQuery = getServiceQueryParams(serviceInstanceNames, space); + List> serviceInstances = new ArrayList<>(); + serviceInstances.addAll( + collectPageResources("service instances", pg -> api.all(pg, serviceInstanceQuery))); + serviceInstances.addAll( + collectPageResources( + "service instances", pg -> api.allUserProvided(pg, serviceInstanceQuery))); + return serviceInstances; + } + + public List> + findAllVersionedServiceInstancesBySpaceAndName( + CloudFoundrySpace space, String serviceInstanceName) { + List serviceInstanceQuery = + Arrays.asList( + "name>=" + serviceInstanceName, + "organization_guid:" + space.getOrganization().getId(), + "space_guid:" + space.getId()); + List> serviceInstances = new ArrayList<>(); + serviceInstances.addAll( + collectPageResources("service instances", pg -> api.all(pg, serviceInstanceQuery))); + serviceInstances.addAll( + collectPageResources( + "service instances", pg -> api.allUserProvided(pg, serviceInstanceQuery))); + return serviceInstances; + } + + // Visible for testing + CloudFoundryServiceInstance getOsbServiceInstanceByRegion( + String region, String serviceInstanceName) { + CloudFoundrySpace space = + spaces + .findSpaceByRegion(region) + .orElseThrow(() -> new CloudFoundryApiException("Cannot find region '" + region + "'")); + return ofNullable(getOsbServiceInstance(space, serviceInstanceName)) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Cannot find service '" + + serviceInstanceName + + "' in region '" + + space.getRegion() + + "'")); + } + + private Set vetSharingOfServicesArgumentsAndGetSharingSpaces( + String sharedFromRegion, + @Nullable String serviceInstanceName, + @Nullable Set sharingRegions, + String gerund) { + if (isBlank(serviceInstanceName)) { + throw new CloudFoundryApiException( + "Please specify a name for the " + gerund + " service instance"); + } + sharingRegions = ofNullable(sharingRegions).orElse(Collections.emptySet()); + if (sharingRegions.size() == 0) { + throw new CloudFoundryApiException( + "Please specify a list of regions for " + gerund + " '" + serviceInstanceName + "'"); + } + + return sharingRegions.stream() + .map( + r -> { + if (sharedFromRegion.equals(r)) { + throw new CloudFoundryApiException( + "Cannot specify 'org > space' as any of the " + gerund + " regions"); + } + return spaces + .findSpaceByRegion(r) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Cannot find region '" + r + "' for " + gerund)); + }) + .collect(toSet()); + } + + // Visible for testing + Set vetUnshareServiceArgumentsAndGetSharingSpaces( + @Nullable String serviceInstanceName, @Nullable Set sharingRegions) { + return vetSharingOfServicesArgumentsAndGetSharingSpaces( + "", serviceInstanceName, sharingRegions, "unsharing"); + } + + // Visible for testing + Set vetShareServiceArgumentsAndGetSharingSpaces( + @Nullable String sharedFromRegion, + @Nullable String serviceInstanceName, + @Nullable Set sharingRegions) { + if (isBlank(sharedFromRegion)) { + throw new CloudFoundryApiException( + "Please specify a region for the sharing service instance"); + } + return vetSharingOfServicesArgumentsAndGetSharingSpaces( + sharedFromRegion, serviceInstanceName, sharingRegions, "sharing"); + } + + // Visible for testing + Void checkServiceShareable( + String serviceInstanceName, CloudFoundryServiceInstance serviceInstance) { + ConfigFeatureFlag featureFlag = + safelyCall(configApi::getConfigFeatureFlags).orElse(Collections.emptySet()).stream() + .filter(it -> it.getName() == SERVICE_INSTANCE_SHARING) + .findFirst() + .orElseThrow( + () -> + new CloudFoundryApiException( + "'service_instance_sharing' flag must be enabled in order to share services")); + if (!featureFlag.isEnabled()) { + throw new CloudFoundryApiException( + "'service_instance_sharing' flag must be enabled in order to share services"); + } + ServicePlan plan = + safelyCall(() -> api.findServicePlanByServicePlanId(serviceInstance.getPlanId())) + .map(Resource::getEntity) + .orElseThrow( + () -> + new CloudFoundryApiException( + "The service plan for 'new-service-plan-name' was not found")); + String extraString = + safelyCall(() -> api.findServiceByServiceId(plan.getServiceGuid())) + .map(Resource::getEntity) + .map( + s -> + ofNullable(s.getExtra()) + .orElseThrow( + () -> + new CloudFoundryApiException( + "The service broker must be configured as 'shareable' in order to share services"))) + .orElseThrow( + () -> + new CloudFoundryApiException( + "The service broker for '" + serviceInstanceName + "' was not found")); + + boolean isShareable; + try { + isShareable = + !StringUtils.isEmpty(extraString) + && new ObjectMapper().readValue(extraString, Map.class).get("shareable") + == Boolean.TRUE; + } catch (IOException e) { + throw new CloudFoundryApiException(e); + } + + if (!isShareable) { + throw new CloudFoundryApiException( + "The service broker must be configured as 'shareable' in order to share services"); + } + + return null; + } + + public ServiceInstanceResponse shareServiceInstance( + @Nullable String region, + @Nullable String serviceInstanceName, + @Nullable Set shareToRegions) { + Set shareToSpaces = + vetShareServiceArgumentsAndGetSharingSpaces(region, serviceInstanceName, shareToRegions); + CloudFoundryServiceInstance serviceInstance = + getOsbServiceInstanceByRegion(region, serviceInstanceName); + + if (MANAGED_SERVICE_INSTANCE.name().equalsIgnoreCase(serviceInstance.getType())) { + checkServiceShareable(serviceInstanceName, serviceInstance); + } + + String serviceInstanceId = serviceInstance.getId(); + SharedTo sharedTo = + safelyCall(() -> api.getShareServiceInstanceSpaceIdsByServiceInstanceId(serviceInstanceId)) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Could not fetch spaces to which '" + + serviceInstanceName + + "' has been shared")); + Set> shareToIdsBody = + shareToSpaces.stream() + .map(space -> Collections.singletonMap("guid", space.getId())) + .filter(idMap -> !sharedTo.getData().contains(idMap)) + .collect(toSet()); + + if (shareToIdsBody.size() > 0) { + safelyCall( + () -> + api.shareServiceInstanceToSpaceIds( + serviceInstanceId, new CreateSharedServiceInstances().setData(shareToIdsBody))); + } + + return new ServiceInstanceResponse() + .setServiceInstanceName(serviceInstanceName) + .setType(SHARE) + .setState(SUCCEEDED); + } + + public ServiceInstanceResponse unshareServiceInstance( + @Nullable String serviceInstanceName, @Nullable Set unshareFromRegions) { + Set unshareFromSpaces = + vetUnshareServiceArgumentsAndGetSharingSpaces(serviceInstanceName, unshareFromRegions); + + unshareFromSpaces.forEach( + space -> + ofNullable(spaces.getServiceInstanceByNameAndSpace(serviceInstanceName, space)) + .map( + si -> + safelyCall( + () -> + api.unshareServiceInstanceFromSpaceId(si.getId(), space.getId())))); + + return new ServiceInstanceResponse() + .setServiceInstanceName(serviceInstanceName) + .setType(UNSHARE) + .setState(SUCCEEDED); + } + + @Nullable + public CloudFoundryServiceInstance getServiceInstance(String region, String serviceInstanceName) { + CloudFoundrySpace space = + spaces + .findSpaceByRegion(region) + .orElseThrow(() -> new CloudFoundryApiException("Cannot find region '" + region + "'")); + Supplier si = + () -> ofNullable(getOsbServiceInstance(space, serviceInstanceName)).orElse(null); + + Supplier up = + () -> ofNullable(getUserProvidedServiceInstance(space, serviceInstanceName)).orElse(null); + + return ofNullable(si.get()).orElseGet(up); + } + + @Nullable + @VisibleForTesting + CloudFoundryServiceInstance getOsbServiceInstance( + CloudFoundrySpace space, @Nullable String serviceInstanceName) { + return ofNullable(getServiceInstance(api::all, space, serviceInstanceName)) + .map( + r -> + CloudFoundryServiceInstance.builder() + .serviceInstanceName(r.getEntity().getName()) + .planId(r.getEntity().getServicePlanGuid()) + .type(r.getEntity().getType().toString()) + .status(r.getEntity().getLastOperation().getState().toString()) + .lastOperationDescription(r.getEntity().getLastOperation().getDescription()) + .id(r.getMetadata().getGuid()) + .build()) + .orElse(null); + } + + @Nullable + @VisibleForTesting + CloudFoundryServiceInstance getUserProvidedServiceInstance( + CloudFoundrySpace space, @Nullable String serviceInstanceName) { + return ofNullable(getServiceInstance(api::allUserProvided, space, serviceInstanceName)) + .map( + r -> + CloudFoundryServiceInstance.builder() + .serviceInstanceName(r.getEntity().getName()) + .type(USER_PROVIDED_SERVICE_INSTANCE.toString()) + .status(SUCCEEDED.toString()) + .id(r.getMetadata().getGuid()) + .build()) + .orElse(null); + } + + @Nullable + private Resource getServiceInstance( + BiFunction, Call>> func, + CloudFoundrySpace space, + @Nullable String serviceInstanceName) { + if (isBlank(serviceInstanceName)) { + throw new CloudFoundryApiException("Please specify a name for the service being sought"); + } + + List> serviceInstances = + collectPageResources( + "service instances by space and name", + pg -> + func.apply( + pg, + getServiceQueryParams(Collections.singletonList(serviceInstanceName), space))); + + if (serviceInstances.isEmpty()) { + return null; + } + + if (serviceInstances.size() > 1) { + throw new CloudFoundryApiException( + serviceInstances.size() + + " service instances found with name '" + + serviceInstanceName + + "' in space '" + + space.getName() + + "', but expected only 1"); + } + + return serviceInstances.get(0); + } + + public ServiceInstanceResponse destroyServiceInstance( + CloudFoundrySpace space, String serviceInstanceName) { + CloudFoundryServiceInstance managedServiceInstance = + getOsbServiceInstance(space, serviceInstanceName); + if (managedServiceInstance != null) { + String serviceInstanceId = managedServiceInstance.getId(); + destroyServiceInstance( + pg -> api.getBindingsForServiceInstance(serviceInstanceId, pg, null), + () -> api.destroyServiceInstance(serviceInstanceId)); + return new ServiceInstanceResponse() + .setServiceInstanceName(serviceInstanceName) + .setType(DELETE) + .setState(IN_PROGRESS); + } + + CloudFoundryServiceInstance userProvidedServiceInstance = + getUserProvidedServiceInstance(space, serviceInstanceName); + if (userProvidedServiceInstance != null) { + String serviceInstanceId = userProvidedServiceInstance.getId(); + destroyServiceInstance( + pg -> api.getBindingsForUserProvidedServiceInstance(serviceInstanceId, pg, null), + () -> api.destroyUserProvidedServiceInstance(serviceInstanceId)); + return new ServiceInstanceResponse() + .setServiceInstanceName(serviceInstanceName) + .setType(DELETE) + .setState(IN_PROGRESS); + } + return new ServiceInstanceResponse() + .setServiceInstanceName(serviceInstanceName) + .setType(DELETE) + .setState(NOT_FOUND); + } + + private void destroyServiceInstance( + Function>> fetchPage, + Supplier> delete) { + List> serviceBindings = + collectPageResources("service bindings", fetchPage); + if (!serviceBindings.isEmpty()) { + throw new CloudFoundryApiException( + "Unable to destroy service instance while " + + serviceBindings.size() + + " service binding(s) exist"); + } + safelyCall(delete); + } + + public ServiceInstanceResponse createServiceInstance( + String newServiceInstanceName, + String serviceName, + String servicePlanName, + Set tags, + Map parameters, + boolean updatable, + CloudFoundrySpace space) { + List cloudFoundryServicePlans = + findAllServicePlansByServiceName(serviceName); + if (cloudFoundryServicePlans.isEmpty()) { + throw new ResourceNotFoundException( + "No plans available for service name '" + serviceName + "'"); + } + + String servicePlanId = + cloudFoundryServicePlans.stream() + .filter(plan -> plan.getName().equals(servicePlanName)) + .findAny() + .orElseThrow( + () -> + new ResourceNotFoundException( + "Service '" + + serviceName + + "' does not have a matching plan '" + + servicePlanName + + "'")) + .getId(); + + CreateServiceInstance command = new CreateServiceInstance(); + command.setName(newServiceInstanceName); + command.setSpaceGuid(space.getId()); + command.setServicePlanGuid(servicePlanId); + command.setTags(tags); + command.setParameters(parameters); + + ServiceInstanceResponse response = + createServiceInstance( + command, + api::createServiceInstance, + api::updateServiceInstance, + api::all, + c -> getOsbServiceInstance(space, c.getName()), + (createServiceInstance, r) -> { + if (!r.getPlanId().equals(createServiceInstance.getServicePlanGuid())) { + throw new CloudFoundryApiException( + "A service with name '" + + createServiceInstance.getName() + + "' exists but has a different plan"); + } + }, + updatable, + space); + + response.setState(updatable ? IN_PROGRESS : SUCCEEDED); + return response; + } + + public ServiceInstanceResponse createUserProvidedServiceInstance( + String newUserProvidedServiceInstanceName, + String syslogDrainUrl, + Set tags, + Map credentials, + String routeServiceUrl, + boolean updatable, + CloudFoundrySpace space) { + CreateUserProvidedServiceInstance command = new CreateUserProvidedServiceInstance(); + command.setName(newUserProvidedServiceInstanceName); + command.setSyslogDrainUrl(syslogDrainUrl); + command.setTags(tags); + command.setCredentials(credentials); + command.setRouteServiceUrl(routeServiceUrl); + command.setSpaceGuid(space.getId()); + + ServiceInstanceResponse response = + createServiceInstance( + command, + api::createUserProvidedServiceInstance, + api::updateUserProvidedServiceInstance, + api::allUserProvided, + c -> getUserProvidedServiceInstance(space, c.getName()), + (c, r) -> {}, + updatable, + space); + + response.setState(SUCCEEDED); + return response; + } + + private + ServiceInstanceResponse createServiceInstance( + T command, + Function>> create, + BiFunction>> update, + BiFunction, Call>> getAllServices, + Function getServiceInstance, + BiConsumer updateValidation, + boolean updatable, + CloudFoundrySpace space) { + LastOperation.Type operationType; + List serviceInstanceQuery = + getServiceQueryParams(Collections.singletonList(command.getName()), space); + List> serviceInstances = new ArrayList<>(); + serviceInstances.addAll( + collectPageResources( + "service instances", pg -> getAllServices.apply(pg, serviceInstanceQuery))); + + operationType = CREATE; + if (serviceInstances.size() == 0) { + safelyCall(() -> create.apply(command)) + .map(res -> res.getMetadata().getGuid()) + .orElseThrow( + () -> + new CloudFoundryApiException( + "service instance '" + command.getName() + "' could not be created")); + } else if (updatable) { + operationType = UPDATE; + serviceInstances.stream() + .findFirst() + .map(r -> r.getMetadata().getGuid()) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Service instance '" + command.getName() + "' not found")); + CloudFoundryServiceInstance serviceInstance = getServiceInstance.apply(command); + if (serviceInstance == null) { + throw new CloudFoundryApiException( + "No service instances with name '" + + command.getName() + + "' found in space " + + space.getName()); + } + updateValidation.accept(command, serviceInstance); + safelyCall(() -> update.apply(serviceInstance.getId(), command)); + } + + return new ServiceInstanceResponse() + .setServiceInstanceName(command.getName()) + .setType(operationType); + } + + private static List getServiceQueryParams( + List serviceNames, CloudFoundrySpace space) { + return Arrays.asList( + serviceNames.size() == 1 + ? "name:" + serviceNames.get(0) + : "name IN " + String.join(",", serviceNames), + "organization_guid:" + space.getOrganization().getId(), + "space_guid:" + space.getId()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceKeys.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceKeys.java new file mode 100644 index 00000000000..bf623776e74 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceKeys.java @@ -0,0 +1,120 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.collectPageResources; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.safelyCall; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ServiceKeyService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceKeyResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.*; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class ServiceKeys { + private final ServiceKeyService api; + private final Spaces spaces; + + public ServiceKeyResponse createServiceKey( + CloudFoundrySpace space, String serviceInstanceName, String serviceKeyName) { + return Optional.ofNullable(spaces.getServiceInstanceByNameAndSpace(serviceInstanceName, space)) + .map( + ssi -> + getServiceKey(ssi.getId(), serviceKeyName) + .map(Resource::getEntity) + .map(ServiceKey::getCredentials) + .orElseGet( + () -> + safelyCall( + () -> + api.createServiceKey( + new CreateServiceKey() + .setName(serviceKeyName) + .setServiceInstanceGuid(ssi.getId()))) + .map(Resource::getEntity) + .map(ServiceCredentials::getCredentials) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Service key '" + + serviceKeyName + + "' could not be created for service instance '" + + serviceInstanceName + + "' in region '" + + space.getRegion() + + "'")))) + .map( + serviceCredentials -> + (ServiceKeyResponse) + new ServiceKeyResponse() + .setServiceKeyName(serviceKeyName) + .setServiceKey(serviceCredentials) + .setType(LastOperation.Type.CREATE_SERVICE_KEY) + .setState(LastOperation.State.SUCCEEDED) + .setServiceInstanceName(serviceInstanceName)) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Service instance '" + + serviceInstanceName + + "' not found in region '" + + space.getRegion() + + "'")); + } + + public ServiceKeyResponse deleteServiceKey( + CloudFoundrySpace space, String serviceInstanceName, String serviceKeyName) { + return Optional.ofNullable(spaces.getServiceInstanceByNameAndSpace(serviceInstanceName, space)) + .map( + ssi -> + getServiceKey(ssi.getId(), serviceKeyName) + .map( + serviceKeyResource -> + safelyCall( + () -> + api.deleteServiceKey( + serviceKeyResource.getMetadata().getGuid())))) + .map( + _a -> + (ServiceKeyResponse) + new ServiceKeyResponse() + .setServiceKeyName(serviceKeyName) + .setType(LastOperation.Type.CREATE_SERVICE_KEY) + .setState(LastOperation.State.SUCCEEDED) + .setServiceInstanceName(serviceInstanceName)) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Cannot find service '" + + serviceInstanceName + + "' in region '" + + space.getRegion() + + "'")); + } + + Optional> getServiceKey(String serviceInstanceId, String serviceKeyName) { + List queryParams = + Arrays.asList("service_instance_guid:" + serviceInstanceId, "name:" + serviceKeyName); + return collectPageResources( + "service key by service instance id and service key name", + pg -> api.getServiceKey(pg, queryParams)) + .stream() + .findFirst(); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Spaces.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Spaces.java new file mode 100644 index 00000000000..dc7a0ad89b0 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Spaces.java @@ -0,0 +1,174 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.*; +import static java.util.stream.Collectors.toList; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.SpaceService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Space; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class Spaces { + private final SpaceService api; + private final Organizations organizations; + + private final LoadingCache spaceCache = + CacheBuilder.newBuilder() + .expireAfterWrite(5, TimeUnit.MINUTES) + .build( + new CacheLoader() { + @Override + public CloudFoundrySpace load(@Nonnull String guid) + throws CloudFoundryApiException, ResourceNotFoundException { + return safelyCall(() -> api.findById(guid)) + .map(Spaces.this::map) + .orElseThrow(ResourceNotFoundException::new); + } + }); + + public CloudFoundrySpace findById(String guid) throws CloudFoundryApiException { + try { + return spaceCache.get(guid); + } catch (ExecutionException e) { + if (e.getCause() instanceof ResourceNotFoundException) return null; + throw new CloudFoundryApiException(e.getCause(), "Unable to find space by id"); + } + } + + public List all() throws CloudFoundryApiException { + return collectPages("spaces", page -> api.all(page, null, null)).stream() + .map(this::map) + .collect(toList()); + } + + @Nullable + public CloudFoundryServiceInstance getServiceInstanceById( + String spaceId, String serviceInstanceName) { + return collectPageResources( + "get service instances by id", + pg -> + api.getServiceInstancesById( + spaceId, pg, Collections.singletonList("name:" + serviceInstanceName))) + .stream() + .findFirst() + .map( + e -> + CloudFoundryServiceInstance.builder() + .name(e.getEntity().getName()) + .id(e.getMetadata().getGuid()) + .build()) + .orElse(null); + } + + @Nullable + public CloudFoundrySpace findByName(String orgId, String spaceName) + throws CloudFoundryApiException { + return collectPages("spaces", page -> api.all(page, spaceName, orgId)).stream() + .findAny() + .map(this::map) + .orElse(null); + } + + @Nullable + public CloudFoundryServiceInstance getServiceInstanceByNameAndSpace( + String serviceInstanceName, CloudFoundrySpace space) { + return Optional.ofNullable(getServiceInstanceById(space.getId(), serviceInstanceName)) + .orElse(null); + } + + private CloudFoundrySpace map(Space space) throws CloudFoundryApiException { + return CloudFoundrySpace.builder() + .id(space.getGuid()) + .name(space.getName()) + .organization( + organizations.findById( + space.getRelationships().get("organization").getData().getGuid())) + .build(); + } + + public Optional findSpaceByRegion(String region) { + CloudFoundrySpace space = CloudFoundrySpace.fromRegion(region); + + CloudFoundryOrganization organization = + organizations + .findByName(space.getOrganization().getName()) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Unable to find organization: " + space.getOrganization().getName())); + + Optional spaceOptional = + collectPages("spaces", page -> api.all(page, space.getName(), organization.getId())) + .stream() + .findAny() + .map( + s -> + CloudFoundrySpace.builder() + .id(s.getGuid()) + .name(s.getName()) + .organization(organization) + .build()); + + spaceOptional.ifPresent( + spaceCase -> { + if (!(space.getName().equals(spaceCase.getName()) + && space.getOrganization().getName().equals(spaceCase.getOrganization().getName()))) { + throw new CloudFoundryApiException("Org or Space name not in correct case"); + } + }); + + return spaceOptional; + } + + public List findAllBySpaceNamesAndOrgNames( + List spaceNames, List orgNames) { + Map allOrgsByGuids = new HashMap<>(); + organizations.findAllByNames(orgNames).stream().forEach(o -> allOrgsByGuids.put(o.getId(), o)); + + String spaceNamesQ = + spaceNames == null || spaceNames.isEmpty() ? null : String.join(",", spaceNames); + String orgGuidsQ = + allOrgsByGuids.keySet().isEmpty() ? null : String.join(",", allOrgsByGuids.keySet()); + + return collectPages("spaces", page -> api.all(page, spaceNamesQ, orgGuidsQ)).stream() + .map( + s -> + CloudFoundrySpace.builder() + .organization( + allOrgsByGuids.getOrDefault( + s.getRelationships().get("organization").getData().getGuid(), null)) + .name(s.getName()) + .id(s.getGuid()) + .build()) + .filter( + s -> s.getOrganization() != null && orgNames.contains(s.getOrganization().getName())) + .collect(toList()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Tasks.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Tasks.java new file mode 100644 index 00000000000..4e15172fc03 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/Tasks.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.safelyCall; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.TaskService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.CreateTask; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Task; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class Tasks { + private final TaskService api; + + public Task createTask(String applicationGuid, String command, String name) { + CreateTask createTask = CreateTask.builder().command(command).name(name).build(); + + return safelyCall(() -> api.createTask(applicationGuid, createTask)) + .orElseThrow(ResourceNotFoundException::new); + } + + public Task getTask(String id) { + return safelyCall(() -> api.getTask(id)).orElseThrow(ResourceNotFoundException::new); + } + + public Task cancelTask(String id) { + return safelyCall(() -> api.cancelTask(id, "")).orElseThrow(ResourceNotFoundException::new); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ApplicationService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ApplicationService.java new file mode 100644 index 00000000000..fe1382533f6 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ApplicationService.java @@ -0,0 +1,111 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Application; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Package; +import java.util.List; +import java.util.Map; +import okhttp3.MultipartBody; +import okhttp3.ResponseBody; +import retrofit2.Call; +import retrofit2.http.*; + +public interface ApplicationService { + @GET("/v3/apps") + Call> all( + @Query("page") Integer page, + @Query("per_page") Integer perPage, + @Query("names") List names, + @Query("space_guids") String spaceGuids); + + @GET("/v3/apps/{guid}") + Call findById(@Path("guid") String guid); + + @GET("/v2/apps/{guid}/env") + Call findApplicationEnvById(@Path("guid") String guid); + + @GET("/v3/apps/{guid}/droplets/current") + Call findDropletByApplicationGuid(@Path("guid") String guid); + + @GET("/v2/apps/{guid}/instances") + Call> instances(@Path("guid") String guid); + + @GET("/v2/apps") + Call> + listAppsFiltered( + @Query("page") Integer page, + @Query("q") List q, + @Query("results-per-page") Integer resultsPerPage); + + /** Requires an empty body. */ + @PUT("/v2/apps/{aguid}/routes/{rguid}") + Call mapRoute( + @Path("aguid") String applicationGuid, @Path("rguid") String routeGuid, @Body MapRoute body); + + @DELETE("/v2/apps/{aguid}/routes/{rguid}") + Call unmapRoute( + @Path("aguid") String applicationGuid, @Path("rguid") String routeGuid); + + @POST("/v3/apps/{guid}/actions/start") + Call startApplication(@Path("guid") String guid, @Body StartApplication body); + + @POST("/v3/apps/{guid}/actions/stop") + Call stopApplication(@Path("guid") String guid, @Body StopApplication body); + + @DELETE("/v3/apps/{guid}") + Call deleteApplication(@Path("guid") String guid); + + @DELETE("/v2/apps/{guid}/instances/{index}") + Call deleteAppInstance(@Path("guid") String guid, @Path("index") String index); + + @POST("/v3/apps") + Call createApplication(@Body CreateApplication application); + + @GET("/v3/apps/{guid}/packages") + Call> findPackagesByAppId(@Path("guid") String appGuid); + + @POST("/v3/packages") + Call createPackage(@Body CreatePackage pkg); + + @GET("/v3/packages/{guid}") + Call getPackage(@Path("guid") String packageGuid); + + @GET("/v3/packages/{guid}/download") + Call downloadPackage(@Path("guid") String packageGuid); + + @Multipart + @POST("/v3/packages/{guid}/upload") + Call uploadPackageBits(@Path("guid") String packageGuid, @Part MultipartBody.Part file); + + @POST("/v3/builds") + Call createBuild(@Body CreateBuild build); + + @GET("/v3/builds/{guid}") + Call getBuild(@Path("guid") String buildGuid); + + @PATCH("/v3/apps/{guid}/relationships/current_droplet") + Call setCurrentDroplet(@Path("guid") String appGuid, @Body ToOneRelationship body); + + @POST("/v2/apps/{guid}/restage") + Call restageApplication(@Path("guid") String appGuid, @Body Object dummy); + + @GET("/v2/apps/{guid}/service_bindings") + Call> getServiceBindings(@Path("guid") String appGuid); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/AuthenticationService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/AuthenticationService.java new file mode 100644 index 00000000000..bd781c9bf17 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/AuthenticationService.java @@ -0,0 +1,46 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.Token; +import java.util.Map; +import okhttp3.ResponseBody; +import retrofit2.Call; +import retrofit2.Callback; +import retrofit2.http.*; + +public interface AuthenticationService { + @FormUrlEncoded + @POST("/oauth/token") + Call passwordToken( + @Field("grant_type") String grantType, + @Field("username") String username, + @Field("password") String password, + @Field("client_id") String clientId, + @Field("client_secret") String clientSecret); + + @DELETE("/oauth/token/revoke/client/{clientId}") + Call revokeToken(@Path("clientId") String tokenId, Callback callback); + + @FormUrlEncoded + @POST("/oath/authorize") + Call authorize( + @Field("response_type") String responseType, + @Field("client_id") String clientId, + @Field("scope") String scope, + Callback callback); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ConfigService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ConfigService.java new file mode 100644 index 00000000000..e0b8751210b --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ConfigService.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ConfigFeatureFlag; +import java.util.Set; +import retrofit2.Call; +import retrofit2.http.GET; + +public interface ConfigService { + @GET("/v2/config/feature_flags") + Call> getConfigFeatureFlags(); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/DomainService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/DomainService.java new file mode 100644 index 00000000000..05119cfa271 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/DomainService.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Domain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Page; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import retrofit2.Call; +import retrofit2.http.GET; +import retrofit2.http.Path; +import retrofit2.http.Query; + +public interface DomainService { + @GET("/v2/shared_domains/{guid}") + Call> findSharedDomainById(@Path("guid") String guid); + + @GET("/v2/private_domains/{guid}") + Call> findPrivateDomainById(@Path("guid") String guid); + + @GET("/v2/private_domains") + Call> allPrivate(@Query("page") Integer page); + + @GET("/v2/shared_domains") + Call> allShared(@Query("page") Integer page); + + @GET("/v2/domains") + Call> all(@Query("page") Integer page); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/DopplerService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/DopplerService.java new file mode 100644 index 00000000000..2f67e245341 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/DopplerService.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import java.util.List; +import org.cloudfoundry.dropsonde.events.EventFactory.Envelope; +import retrofit2.Call; +import retrofit2.http.GET; +import retrofit2.http.Path; + +public interface DopplerService { + @GET("/apps/{guid}/recentlogs") + Call> recentLogs(@Path("guid") String appGuid); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/OrganizationService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/OrganizationService.java new file mode 100644 index 00000000000..22c26c7a475 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/OrganizationService.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Organization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Pagination; +import retrofit2.Call; +import retrofit2.http.GET; +import retrofit2.http.Path; +import retrofit2.http.Query; + +public interface OrganizationService { + @GET("/v3/organizations") + Call> all(@Query("page") Integer page, @Query("names") String orgNames); + + @GET("/v3/organizations/{guid}") + Call findById(@Path("guid") String guid); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ProcessesService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ProcessesService.java new file mode 100644 index 00000000000..08fa014e0ac --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ProcessesService.java @@ -0,0 +1,45 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Pagination; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Process; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessResources; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ScaleProcess; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.UpdateProcess; +import okhttp3.ResponseBody; +import retrofit2.Call; +import retrofit2.http.*; + +public interface ProcessesService { + + @GET("/v3/processes") + Call> getProcesses( + @Query("page") Integer page, @Query("app_guids") String appGuids); + + @POST("/v3/processes/{guid}/actions/scale") + Call scaleProcess(@Path("guid") String guid, @Body ScaleProcess scaleProcess); + + @PATCH("/v3/processes/{guid}") + Call updateProcess(@Path("guid") String guid, @Body UpdateProcess updateProcess); + + @GET("/v3/processes/{guid}") + Call findProcessById(@Path("guid") String guid); + + @GET("/v3/processes/{guid}/stats") + Call findProcessStatsById(@Path("guid") String guid); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/RouteService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/RouteService.java new file mode 100644 index 00000000000..2cd7e083295 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/RouteService.java @@ -0,0 +1,48 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Page; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Route; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.RouteMapping; +import java.util.List; +import okhttp3.ResponseBody; +import retrofit2.Call; +import retrofit2.http.*; + +public interface RouteService { + // Mapping to CF API style query params - + // https://apidocs.cloudfoundry.org/1.34.0/routes/list_all_routes.html + @GET("/v2/routes?results-per-page=100") + Call> all( + @Query("page") Integer page, + @Query("per_page") Integer perPage, + @Query("q") List queryParams); + + @GET("/v2/routes/{guid}") + Call> findById(@Path("guid") String guid); + + @GET("/v2/routes/{guid}/route_mappings") + Call> routeMappings(@Path("guid") String guid, @Query("page") Integer page); + + @POST("/v2/routes") + Call> createRoute(@Body Route route); + + @DELETE("/v2/routes/{guid}?recursive=true") + Call deleteRoute(@Path("guid") String guid); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ServiceInstanceService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ServiceInstanceService.java new file mode 100644 index 00000000000..84851dd495c --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ServiceInstanceService.java @@ -0,0 +1,109 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.CreateSharedServiceInstances; +import java.util.List; +import okhttp3.ResponseBody; +import retrofit2.Call; +import retrofit2.http.*; + +public interface ServiceInstanceService { + @GET("/v2/service_instances") + Call> all( + @Query("page") Integer page, @Query("q") List queryParams); + + @GET("/v2/user_provided_service_instances") + Call> allUserProvided( + @Query("page") Integer page, @Query("q") List queryParam); + + @GET("/v2/services") + Call> findService( + @Query("page") Integer page, @Query("q") List queryParams); + + @GET("/v2/service_plans/{guid}") + Call> findServicePlanByServicePlanId(@Path("guid") String servicePlanGuid); + + @GET("/v2/services/{guid}") + Call> findServiceByServiceId(@Path("guid") String serviceGuid); + + @GET("/v2/spaces/{guid}/services") + Call> findServiceBySpaceId( + @Path("guid") String spaceGuid, + @Query("page") Integer page, + @Query("q") List queryParams); + + @GET("/v2/service_plans") + Call> findServicePlans( + @Query("page") Integer page, @Query("q") List queryParams); + + @POST("/v2/service_instances?accepts_incomplete=true") + Call> createServiceInstance(@Body CreateServiceInstance body); + + @POST("/v2/user_provided_service_instances") + Call> createUserProvidedServiceInstance( + @Body CreateUserProvidedServiceInstance body); + + @PUT("/v2/service_instances/{guid}?accepts_incomplete=true") + Call> updateServiceInstance( + @Path("guid") String serviceInstanceGuid, @Body CreateServiceInstance body); + + @PUT("/v2/user_provided_service_instances/{guid}") + Call> updateUserProvidedServiceInstance( + @Path("guid") String userProvidedServiceInstanceGuid, + @Body CreateUserProvidedServiceInstance body); + + @POST("/v2/service_bindings?accepts_incomplete=true") + Call> createServiceBinding(@Body CreateServiceBinding body); + + @GET("/v2/service_instances/{guid}/service_bindings") + Call> getBindingsForServiceInstance( + @Path("guid") String serviceInstanceGuid, + @Query("page") Integer page, + @Query("q") List queryParams); + + @GET("/v2/user_provided_service_instances/{guid}/service_bindings") + Call> getBindingsForUserProvidedServiceInstance( + @Path("guid") String userProvidedServiceInstanceGuid, + @Query("page") Integer page, + @Query("q") List queryParams); + + @GET("/v2/service_bindings") + Call> getAllServiceBindings(@Query("q") List queryParams); + + @DELETE("/v2/service_bindings/{guid}?accepts_incomplete=true") + Call deleteServiceBinding(@Path("guid") String serviceBindingGuid); + + @DELETE("/v2/service_instances/{guid}?accepts_incomplete=true") + Call destroyServiceInstance(@Path("guid") String serviceInstanceGuid); + + @DELETE("/v2/user_provided_service_instances/{guid}") + Call destroyUserProvidedServiceInstance(@Path("guid") String serviceInstanceGuid); + + @POST("/v3/service_instances/{guid}/relationships/shared_spaces") + Call shareServiceInstanceToSpaceIds( + @Path("guid") String serviceInstanceGuid, @Body CreateSharedServiceInstances body); + + @GET("/v3/service_instances/{guid}/relationships/shared_spaces") + Call getShareServiceInstanceSpaceIdsByServiceInstanceId( + @Path("guid") String serviceInstanceGuid); + + @DELETE("/v3/service_instances/{guid}/relationships/shared_spaces/{space_guid}") + Call unshareServiceInstanceFromSpaceId( + @Path("guid") String serviceInstanceGuid, @Path("space_guid") String spaceGuid); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ServiceKeyService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ServiceKeyService.java new file mode 100644 index 00000000000..aeaf25968b3 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/ServiceKeyService.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.*; +import java.util.List; +import okhttp3.ResponseBody; +import retrofit2.Call; +import retrofit2.http.*; + +public interface ServiceKeyService { + @POST("/v2/service_keys") + Call> createServiceKey(@Body CreateServiceKey body); + + @GET("/v2/service_keys") + Call> getServiceKey( + @Query("page") Integer page, @Query("q") List queryParams); + + @DELETE("/v2/service_keys/{guid}") + Call deleteServiceKey(@Path("guid") String guid); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/SpaceService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/SpaceService.java new file mode 100644 index 00000000000..6b6ba202499 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/SpaceService.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Page; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Pagination; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Space; +import java.util.List; +import retrofit2.Call; +import retrofit2.http.GET; +import retrofit2.http.Path; +import retrofit2.http.Query; + +public interface SpaceService { + @GET("/v3/spaces") + Call> all( + @Query("page") Integer page, + @Query("names") String names, + @Query("organization_guids") String orgGuids); + + @GET("/v3/spaces/{guid}") + Call findById(@Path("guid") String guid); + + @GET("/v2/spaces/{guid}/service_instances") + Call> getServiceInstancesById( + @Path("guid") String guid, @Query("page") Integer page, @Query("q") List queryParams); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/TaskService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/TaskService.java new file mode 100644 index 00000000000..4e3987dbb2e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/api/TaskService.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.api; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.CreateTask; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Task; +import retrofit2.Call; +import retrofit2.http.Body; +import retrofit2.http.GET; +import retrofit2.http.POST; +import retrofit2.http.Path; + +public interface TaskService { + + @POST("/v3/apps/{guid}/tasks") + Call createTask(@Path("guid") String guid, @Body CreateTask body); + + @GET("/v3/tasks/{guid}") + Call getTask(@Path("guid") String guid); + + @POST("/v3/tasks/{guid}/actions/cancel") + Call cancelTask(@Path("guid") String guid, @Body Object emptyBody); +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ErrorDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ErrorDescription.java new file mode 100644 index 00000000000..3bc60ee3b99 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ErrorDescription.java @@ -0,0 +1,115 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model; + +import static java.util.Arrays.stream; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.Setter; + +/** + * This is a union type of the code/description mechanisms for Cloud Foundry API v2 and v3. V3 { + * "errors": [ { "code": 10008, "title": "CF-UnprocessableEntity", "detail": "something went wrong" + * } ] } + * + *

V2 { "description": "The route is invalid: host is required for shared-domains", "error_code": + * "CF-RouteInvalid", "code": 210001 } + * + *

UAA { "error_description":"Password must contain at least 1 special characters.", + * "error":"invalid_password", <- no logic for this right now -- not needed "message":"Password must + * contain at least 1 special characters." } + */ +@Setter +public class ErrorDescription { + /** Cloud Foundry API v2. */ + @Nullable private String description; + + /** Cloud Foundry API v2. */ + @JsonProperty("error_code") + @Nullable + private Code errorCode; + + /** Cloud Foundry API v2 & v3. */ + @Nullable private int code; + + /** UAA API */ + @JsonProperty("error_description") + @Nullable + private String errorDescription; + + /** Cloud Foundry API v3. */ + @Nullable private List errors; + + /** Cloud Foundry API v3. */ + @Getter @Nullable private Code title; + + /** Cloud Foundry API v3. */ + @Getter @Nullable private String detail; + + @Nullable + public Code getCode() { + return errors != null && !errors.isEmpty() ? errors.get(0).getTitle() : errorCode; + } + + public List getErrors() { + // v2 error + if (description != null) { + return singletonList(description); + } + + // v3 error + if (errors != null && !errors.isEmpty()) { + return errors.stream().map(e -> e.getDetail()).collect(Collectors.toList()); + } + + // UAA error + if (errorDescription != null) { + return singletonList(errorDescription); + } + + return emptyList(); + } + + public enum Code { + ROUTE_HOST_TAKEN("CF-RouteHostTaken"), + ROUTE_PATH_TAKEN("CF-RoutePathTaken"), + ROUTE_PORT_TAKEN("CF-RoutePortTaken"), + RESOURCE_NOT_FOUND("CF-ResourceNotFound"), + SERVICE_ALREADY_EXISTS("60002"), + SERVICE_INSTANCE_ALREADY_BOUND("CF-ServiceBindingAppServiceTaken"), + NOT_AUTHORIZED("CF-NotAuthorized"); + + private final String code; + + Code(String code) { + this.code = code; + } + + @Nullable + @JsonCreator + public static Code fromCode(String code) { + return stream(Code.values()).filter(st -> st.code.equals(code)).findFirst().orElse(null); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/RouteId.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/RouteId.java new file mode 100644 index 00000000000..64bddab942d --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/RouteId.java @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model; + +import javax.annotation.Nullable; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@NoArgsConstructor +@AllArgsConstructor +@Data +public class RouteId { + private String host; + private String path; + + @Nullable private Integer port; + + private String domainGuid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ServiceInstanceResponse.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ServiceInstanceResponse.java new file mode 100644 index 00000000000..931af321835 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ServiceInstanceResponse.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation; +import lombok.Data; + +@Data +public class ServiceInstanceResponse { + private String serviceInstanceName; + private LastOperation.Type type; + private LastOperation.State state; + private String previousInstanceName; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ServiceKeyResponse.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ServiceKeyResponse.java new file mode 100644 index 00000000000..bad0e3f14cc --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ServiceKeyResponse.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model; + +import java.util.Collections; +import java.util.Map; +import lombok.Data; + +@Data +public class ServiceKeyResponse extends ServiceInstanceResponse { + Map serviceKey = Collections.emptyMap(); + String serviceKeyName; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/Token.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/Token.java new file mode 100644 index 00000000000..02d982a065e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/Token.java @@ -0,0 +1,30 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model; + +import lombok.Data; + +@Data +public class Token { + private String accessToken; + + /** Dimensioned in seconds */ + private long expiresIn; + + /** A globally unique identifier for this token */ + private String jti; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/AbstractCreateServiceInstance.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/AbstractCreateServiceInstance.java new file mode 100644 index 00000000000..59793414ac0 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/AbstractCreateServiceInstance.java @@ -0,0 +1,30 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import java.util.Set; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +public abstract class AbstractCreateServiceInstance { + private String name; + private String spaceGuid; + private boolean updatable = true; + + @Nullable private Set tags; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/AbstractServiceInstance.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/AbstractServiceInstance.java new file mode 100644 index 00000000000..b734ca4e9c6 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/AbstractServiceInstance.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import java.util.Set; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +public abstract class AbstractServiceInstance { + private String name; + + @Nullable private Set tags; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Application.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Application.java new file mode 100644 index 00000000000..024a48d27ec --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Application.java @@ -0,0 +1,25 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; + +@Data +public class Application { + private String name; + private String spaceGuid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ApplicationEnv.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ApplicationEnv.java new file mode 100644 index 00000000000..5a694566496 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ApplicationEnv.java @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; +import java.util.Map; +import lombok.Data; + +@Data +public class ApplicationEnv { + private SystemEnv systemEnvJson; + private Map environmentJson; + + @Data + public static class SystemEnv { + @JsonProperty("VCAP_SERVICES") + private Map> vcapServices; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ConfigFeatureFlag.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ConfigFeatureFlag.java new file mode 100644 index 00000000000..bfd96f263f2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ConfigFeatureFlag.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import static java.util.Arrays.stream; + +import com.fasterxml.jackson.annotation.JsonCreator; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +public class ConfigFeatureFlag { + ConfigFlag name; + boolean enabled; + + public enum ConfigFlag { + SERVICE_INSTANCE_SHARING("service_instance_sharing"); + + private final String type; + + ConfigFlag(String type) { + this.type = type; + } + + @Nullable + @JsonCreator + public static ConfigFlag fromType(String type) { + return stream(ConfigFlag.values()) + .filter(st -> st.type.equalsIgnoreCase(type)) + .findFirst() + .orElse(null); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceBinding.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceBinding.java new file mode 100644 index 00000000000..60aee5c4d48 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceBinding.java @@ -0,0 +1,48 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.convertToValidServiceBindingName; + +import java.util.Map; +import lombok.Getter; + +@Getter +public class CreateServiceBinding { + private final String serviceInstanceGuid; + private final String appGuid; + private final String name; + private Map parameters; + + public CreateServiceBinding( + final String serviceInstanceGuid, + final String appGuid, + final String name, + final Map parameters) { + this.serviceInstanceGuid = serviceInstanceGuid; + this.appGuid = appGuid; + this.name = convertToValidServiceBindingName(name); + this.parameters = parameters; + } + + public CreateServiceBinding( + final String serviceInstanceGuid, final String appGuid, final String name) { + this.serviceInstanceGuid = serviceInstanceGuid; + this.appGuid = appGuid; + this.name = convertToValidServiceBindingName(name); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceInstance.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceInstance.java new file mode 100644 index 00000000000..747f0c9fb56 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceInstance.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import com.fasterxml.jackson.annotation.JsonInclude; +import java.util.Map; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class CreateServiceInstance extends AbstractCreateServiceInstance { + private String servicePlanGuid; + + @Nullable private Map parameters; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceKey.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceKey.java new file mode 100644 index 00000000000..a7334b4868a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateServiceKey.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import com.fasterxml.jackson.annotation.JsonInclude; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode +@JsonInclude(JsonInclude.Include.NON_NULL) +public class CreateServiceKey { + private String name; + + private String serviceInstanceGuid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateUserProvidedServiceInstance.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateUserProvidedServiceInstance.java new file mode 100644 index 00000000000..d6d9922fd86 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/CreateUserProvidedServiceInstance.java @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import com.fasterxml.jackson.annotation.JsonInclude; +import java.util.Map; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class CreateUserProvidedServiceInstance extends AbstractCreateServiceInstance { + @Nullable private String syslogDrainUrl; + + @Nullable private Map credentials; + + @Nullable private String routeServiceUrl; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Domain.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Domain.java new file mode 100644 index 00000000000..cdcfcbbdd22 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Domain.java @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; + +@Data +public class Domain { + private String name; + private String owningOrganizationGuid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/InstanceStatus.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/InstanceStatus.java new file mode 100644 index 00000000000..73ec6d51020 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/InstanceStatus.java @@ -0,0 +1,33 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; + +@Data +public class InstanceStatus { + private State state; + private Long uptime; + private String details; + + public enum State { + RUNNING, + STARTING, + CRASHED, + DOWN; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/LastOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/LastOperation.java new file mode 100644 index 00000000000..74513cbc0f2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/LastOperation.java @@ -0,0 +1,77 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import static java.util.Arrays.stream; + +import com.fasterxml.jackson.annotation.JsonCreator; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +public class LastOperation { + private LastOperation.Type type; + private LastOperation.State state; + private String description; + + public enum Type { + CREATE("create"), + CREATE_SERVICE_KEY("createServiceKey"), + DELETE("delete"), + DELETE_SERVICE_KEY("deleteServiceKey"), + SHARE("share"), + UNSHARE("unshare"), + UPDATE("update"); + + private final String type; + + Type(String type) { + this.type = type; + } + + @Nullable + @JsonCreator + public static Type fromType(String type) { + return stream(Type.values()) + .filter(st -> st.type.equalsIgnoreCase(type)) + .findFirst() + .orElse(null); + } + } + + public enum State { + FAILED("failed"), + IN_PROGRESS("in progress"), + NOT_FOUND("not found"), + SUCCEEDED("succeeded"); + + private final String state; + + State(String state) { + this.state = state; + } + + @Nullable + @JsonCreator + public static State fromState(String state) { + return stream(State.values()) + .filter(st -> st.state.equalsIgnoreCase(state)) + .findFirst() + .orElse(null); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/MapRoute.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/MapRoute.java new file mode 100644 index 00000000000..28d37283013 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/MapRoute.java @@ -0,0 +1,19 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +public class MapRoute {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Organization.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Organization.java new file mode 100644 index 00000000000..e1ce1f4360f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Organization.java @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; + +@Data +public class Organization { + private String name; + private String status; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Page.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Page.java new file mode 100644 index 00000000000..58a100bcdf4 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Page.java @@ -0,0 +1,70 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import lombok.Data; + +@Data +public class Page { + private int totalResults; + private int totalPages; + private List> resources = Collections.emptyList(); + + public static Page singleton(R data, String resourceId) { + Page page = new Page<>(); + page.setTotalPages(1); + page.setTotalResults(1); + + Resource.Metadata metadata = new Resource.Metadata(); + metadata.setGuid(resourceId); + + Resource resource = new Resource<>(); + resource.setMetadata(metadata); + resource.setEntity(data); + + page.setResources(Collections.singletonList(resource)); + + return page; + } + + public static Page asPage(R... data) { + Page page = new Page<>(); + page.setTotalPages(1); + page.setTotalResults(data.length); + + page.setResources( + Arrays.stream(data) + .map( + d -> { + Resource.Metadata metadata = new Resource.Metadata(); + metadata.setGuid(UUID.randomUUID().toString()); + + Resource resource = new Resource<>(); + resource.setMetadata(metadata); + resource.setEntity(d); + return resource; + }) + .collect(Collectors.toList())); + + return page; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Resource.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Resource.java new file mode 100644 index 00000000000..db38fbd8e10 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Resource.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import java.time.ZonedDateTime; +import lombok.Data; + +@Data +public class Resource { + private Metadata metadata; + private T entity; + + @Data + public static class Metadata { + private String guid; + private ZonedDateTime createdAt; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Route.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Route.java new file mode 100644 index 00000000000..09786300fab --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Route.java @@ -0,0 +1,33 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +import lombok.experimental.Delegate; + +@NoArgsConstructor +@AllArgsConstructor +@Data +public class Route { + @JsonIgnore @Delegate private RouteId routeId = new RouteId(); + + private String spaceGuid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/RouteMapping.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/RouteMapping.java new file mode 100644 index 00000000000..2a67699eaef --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/RouteMapping.java @@ -0,0 +1,24 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; + +@Data +public class RouteMapping { + private String appGuid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Service.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Service.java new file mode 100644 index 00000000000..e0a0fce9d21 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Service.java @@ -0,0 +1,27 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import javax.annotation.Nullable; +import lombok.Data; + +@Data +public class Service { + private String label; + + @Nullable private String extra; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceBinding.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceBinding.java new file mode 100644 index 00000000000..4efe6b45a4e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceBinding.java @@ -0,0 +1,26 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; + +@Data +public class ServiceBinding { + private String appGuid; + private String serviceInstanceGuid; + private String name; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceCredentials.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceCredentials.java new file mode 100644 index 00000000000..77ed0ade0f8 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceCredentials.java @@ -0,0 +1,25 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import java.util.Map; +import lombok.Data; + +@Data +public class ServiceCredentials { + Map credentials; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceInstance.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceInstance.java new file mode 100644 index 00000000000..066c3af75f9 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceInstance.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import static java.util.Arrays.stream; + +import com.fasterxml.jackson.annotation.JsonCreator; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +public class ServiceInstance extends AbstractServiceInstance { + private String plan; + private String servicePlanGuid; + private LastOperation lastOperation; + private Type type; + + public enum Type { + MANAGED_SERVICE_INSTANCE("managed_service_instance"), + USER_PROVIDED_SERVICE_INSTANCE("user_provided_service_instance"); + + private final String type; + + Type(String type) { + this.type = type; + } + + @Nullable + @JsonCreator + public static Type fromType(String type) { + return stream(Type.values()) + .filter(st -> st.type.equalsIgnoreCase(type)) + .findFirst() + .orElse(null); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceKey.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceKey.java new file mode 100644 index 00000000000..25ed3058eba --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServiceKey.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import java.util.Map; +import lombok.Data; + +@Data +public class ServiceKey { + private String name; + private String serviceInstanceGuid; + private Map credentials; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServicePlan.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServicePlan.java new file mode 100644 index 00000000000..2017b69a6e7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/ServicePlan.java @@ -0,0 +1,26 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; + +@Data +public class ServicePlan { + private String id; + private String name; + private String serviceGuid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/SharedTo.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/SharedTo.java new file mode 100644 index 00000000000..0e9dfd1debc --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/SharedTo.java @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import java.util.Map; +import java.util.Set; +import lombok.Data; + +@Data +public class SharedTo { + private Set> data; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Space.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Space.java new file mode 100644 index 00000000000..ea718279ed3 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/Space.java @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; + +@Data +public class Space { + private String name; + private String organizationGuid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/UserProvidedServiceInstance.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/UserProvidedServiceInstance.java new file mode 100644 index 00000000000..32d2b1823a2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/UserProvidedServiceInstance.java @@ -0,0 +1,24 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class UserProvidedServiceInstance extends AbstractServiceInstance {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Application.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Application.java new file mode 100644 index 00000000000..2a753c09d92 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Application.java @@ -0,0 +1,31 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.time.ZonedDateTime; +import java.util.Map; +import lombok.Data; + +@Data +public class Application { + private String name; + private String guid; + private String state; + private ZonedDateTime createdAt; + private ZonedDateTime updatedAt; + private Map links; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Build.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Build.java new file mode 100644 index 00000000000..983d3d74f5c --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Build.java @@ -0,0 +1,35 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.time.ZonedDateTime; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +public class Build { + private String guid; + private ZonedDateTime createdAt; + private State state; + @Nullable private Droplet droplet; + + public enum State { + STAGING, + STAGED, + FAILED + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Buildpack.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Buildpack.java new file mode 100644 index 00000000000..39a09cdffdf --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Buildpack.java @@ -0,0 +1,27 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import lombok.Data; + +@Data +public class Buildpack { + private String name; + private String detectOutput; + private String version; + private String buildpackName; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateApplication.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateApplication.java new file mode 100644 index 00000000000..f62ce53387c --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateApplication.java @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import com.fasterxml.jackson.annotation.JsonInclude; +import java.util.Map; +import javax.annotation.Nullable; +import lombok.Getter; + +@JsonInclude(JsonInclude.Include.NON_NULL) +@Getter +public class CreateApplication { + private final String name; + private final Map relationships; + + @Nullable private final Map environmentVariables; + + @Nullable private final Lifecycle lifecycle; + + public CreateApplication( + String name, + Map relationships, + @Nullable Map environmentVariables, + Lifecycle lifecycle) { + this.name = name; + this.relationships = relationships; + this.environmentVariables = environmentVariables; + this.lifecycle = lifecycle; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateBuild.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateBuild.java new file mode 100644 index 00000000000..16502c332a2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateBuild.java @@ -0,0 +1,47 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import com.fasterxml.jackson.annotation.JsonProperty; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.RequiredArgsConstructor; + +@Getter +public class CreateBuild { + @JsonProperty("package") + private PackageId packageId; + + @JsonProperty("staging_memory_in_mb") + private final Integer memoryAmount; + + @JsonProperty("staging_disk_in_mb") + private final Integer diskSizeAmount; + + public CreateBuild( + String packageId, @Nullable Integer memoryAmount, @Nullable Integer diskSizeAmount) { + this.packageId = new PackageId(packageId); + this.memoryAmount = memoryAmount == null ? 1024 : memoryAmount; + this.diskSizeAmount = diskSizeAmount == null ? 1024 : diskSizeAmount; + } + + @RequiredArgsConstructor + @Getter + private static class PackageId { + private final String guid; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreatePackage.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreatePackage.java new file mode 100644 index 00000000000..6bf09ee9300 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreatePackage.java @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.util.HashMap; +import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.Getter; + +@Data +public class CreatePackage { + private final String type; + private final Map relationships = new HashMap<>(); + private Docker data; + + public CreatePackage(String appId, Type type, Docker data) { + this.type = type.getValue(); + relationships.put("app", new ToOneRelationship(new Relationship(appId))); + this.data = data; + } + + @Getter + @AllArgsConstructor + public enum Type { + BITS("bits"), + DOCKER("docker"); + private String value; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateSharedServiceInstances.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateSharedServiceInstances.java new file mode 100644 index 00000000000..d27d25effe4 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateSharedServiceInstances.java @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.util.Map; +import java.util.Set; +import lombok.Data; + +@Data +public class CreateSharedServiceInstances { + private Set> data; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateTask.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateTask.java new file mode 100644 index 00000000000..ccc673a8774 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateTask.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Value; + +@Value +@JsonDeserialize(builder = CreateTask.CreateTaskBuilder.class) +@Builder +public class CreateTask { + @Nullable private String name; + private String command; + + @JsonPOJOBuilder(withPrefix = "") + public static class CreateTaskBuilder {} +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Docker.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Docker.java new file mode 100644 index 00000000000..cba8cf8671f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Docker.java @@ -0,0 +1,29 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import lombok.Builder; +import lombok.Data; + +@Data +@Builder +public class Docker { + private final String image; + private final String username; + private final String password; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Droplet.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Droplet.java new file mode 100644 index 00000000000..ae4e35ab82b --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Droplet.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.time.ZonedDateTime; +import java.util.Collection; +import java.util.Map; +import lombok.Data; + +@Data +public class Droplet { + private String guid; + private ZonedDateTime createdAt; + private String stack; + private String state; + private Map links; + private Collection buildpacks; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Lifecycle.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Lifecycle.java new file mode 100644 index 00000000000..fe81b4d4786 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Lifecycle.java @@ -0,0 +1,59 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServerGroupDescription; +import java.util.Collections; +import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +public class Lifecycle { + private final String type; + private final Map data; + + public Lifecycle( + Type type, + DeployCloudFoundryServerGroupDescription.ApplicationAttributes applicationAttributes) { + this.type = type.getValue(); + this.data = + type.equals(Type.BUILDPACK) + ? new BuildpackLifecycleBuilder() + .putIfValueNotNull("buildpacks", applicationAttributes.getBuildpacks()) + .putIfValueNotNull("stack", applicationAttributes.getStack()) + .build() + : Collections.emptyMap(); + } + + @Getter + @AllArgsConstructor + public enum Type { + BUILDPACK("buildpack"), + DOCKER("docker"); + private String value; + } + + static class BuildpackLifecycleBuilder extends ImmutableMap.Builder { + public BuildpackLifecycleBuilder putIfValueNotNull(K key, V value) { + if (value != null) super.put(key, value); + return this; + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Link.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Link.java new file mode 100644 index 00000000000..e3ddfd8637f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Link.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import lombok.Data; + +@Data +public class Link { + private String href; + + /** + * If this link's last path segment is a GUID (i.e. it refers to a single resource), then this + * extracts that GUID. + */ + public String getGuid() { + return href.substring(href.lastIndexOf('/') + 1); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Organization.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Organization.java new file mode 100644 index 00000000000..5418dd12614 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Organization.java @@ -0,0 +1,28 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.util.Map; +import lombok.Data; + +@Data +public class Organization { + private String name; + private String guid; + private Map links; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Package.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Package.java new file mode 100644 index 00000000000..05b1fa2c391 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Package.java @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.time.ZonedDateTime; +import java.util.Map; +import lombok.Data; + +@Data +public class Package { + private String guid; + private State state; + private String type; + private ZonedDateTime createdAt; + private PackageData data; + private Map links; + + public enum State { + AWAITING_UPLOAD, + PROCESSING_UPLOAD, + READY, + FAILED, + COPYING, + EXPIRED + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/PackageChecksum.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/PackageChecksum.java new file mode 100644 index 00000000000..1197ae7e64e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/PackageChecksum.java @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import lombok.Data; + +@Data +public class PackageChecksum { + private String type; + private String value; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/PackageData.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/PackageData.java new file mode 100644 index 00000000000..794c84c738f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/PackageData.java @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import javax.annotation.Nullable; +import lombok.Data; + +@Data +public class PackageData { + @Nullable private PackageChecksum checksum; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Pagination.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Pagination.java new file mode 100644 index 00000000000..accc7f99ca6 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Pagination.java @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.util.List; +import lombok.Data; +import lombok.Getter; +import lombok.Setter; + +@Data +public class Pagination { + private Details pagination; + private List resources; + + @Getter + @Setter + public static class Details { + private int totalPages; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Process.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Process.java new file mode 100644 index 00000000000..3dc2d366102 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Process.java @@ -0,0 +1,112 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import com.fasterxml.jackson.annotation.JsonInclude; +import javax.annotation.Nullable; +import lombok.*; + +@Data +@JsonInclude(value = JsonInclude.Include.NON_NULL) +public class Process { + private String type; + private String guid; + private int instances; + private int memoryInMb; + private int diskInMb; + + @Nullable + @JsonInclude(value = JsonInclude.Include.CUSTOM, valueFilter = HealthCheck.class) + private HealthCheck healthCheck; + + @Data + @JsonInclude(value = JsonInclude.Include.NON_NULL) + public static class HealthCheck { + + private HealthCheck() {} + + @Nullable private String type; + + @Nullable private HealthCheckData data; + + public static class HealthCheckBuilder { + private String type; + private HealthCheckData data; + + public HealthCheckBuilder type(String type) { + this.type = type; + return this; + } + + public HealthCheckBuilder data(HealthCheckData data) { + this.data = data; + return this; + } + + public HealthCheck build() { + HealthCheck healthCheck = new HealthCheck(); + healthCheck.setType(this.type); + healthCheck.setData(this.data); + return healthCheck; + } + } + } + + @Data + @JsonInclude(value = JsonInclude.Include.NON_NULL) + public static class HealthCheckData { + + private HealthCheckData() {} + + @Nullable private String endpoint; + + @Nullable private Integer timeout; + + @Nullable private Integer invocationTimeout; + + public static class HealthCheckDataBuilder { + private String endpoint; + + private Integer timeout; + + private Integer invocationTimeout; + + public HealthCheckDataBuilder endpoint(String endpoint) { + this.endpoint = endpoint; + return this; + } + + public HealthCheckDataBuilder timeout(Integer timeout) { + this.timeout = timeout; + return this; + } + + public HealthCheckDataBuilder invocationTimeout(Integer invocationTimeout) { + this.invocationTimeout = invocationTimeout; + return this; + } + + public HealthCheckData build() { + HealthCheckData healthCheckData = new HealthCheckData(); + healthCheckData.setEndpoint(this.endpoint); + healthCheckData.setTimeout(this.timeout); + healthCheckData.setInvocationTimeout(this.invocationTimeout); + return healthCheckData; + } + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessRequest.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessRequest.java new file mode 100644 index 00000000000..8422cf7c245 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import javax.annotation.Nullable; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +public class ProcessRequest { + @Nullable private String type; + @Nullable private String command; + @Nullable private String diskQuota; + @Nullable private String healthCheckType; + @Nullable private String healthCheckHttpEndpoint; + @Nullable private Integer healthCheckInvocationTimeout; + @Nullable private Integer instances; + @Nullable private String memory; + @Nullable private Integer timeout; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessResources.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessResources.java new file mode 100644 index 00000000000..55d1c3cfdf3 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessResources.java @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.util.List; +import lombok.Data; + +@Data +public class ProcessResources { + private List resources; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessStats.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessStats.java new file mode 100644 index 00000000000..ff0062ca90a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessStats.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import lombok.Data; + +@Data +public class ProcessStats { + private State state; + + public enum State { + RUNNING, + CRASHED, + STARTING, + STOPPING, + DOWN + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Relationship.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Relationship.java new file mode 100644 index 00000000000..e5008de9fc5 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Relationship.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@NoArgsConstructor +@AllArgsConstructor +@Data +public class Relationship { + private String guid; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ScaleProcess.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ScaleProcess.java new file mode 100644 index 00000000000..5ec596b812b --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ScaleProcess.java @@ -0,0 +1,33 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import com.fasterxml.jackson.annotation.JsonInclude; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +@Getter +@JsonInclude(JsonInclude.Include.NON_NULL) +public class ScaleProcess { + @Nullable private final Integer instances; + + @Nullable private final Integer memoryInMb; + + @Nullable private final Integer diskInMb; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Space.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Space.java new file mode 100644 index 00000000000..da0ba30948b --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Space.java @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import java.util.Map; +import lombok.Data; + +@Data +public class Space { + private String guid; + private String name; + private Map relationships; + private Map links; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/StartApplication.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/StartApplication.java new file mode 100644 index 00000000000..7b76c11110f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/StartApplication.java @@ -0,0 +1,19 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +public class StartApplication {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/StopApplication.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/StopApplication.java new file mode 100644 index 00000000000..aa4ef577f10 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/StopApplication.java @@ -0,0 +1,19 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +public class StopApplication {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Task.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Task.java new file mode 100644 index 00000000000..58a11fc3fd7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/Task.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import java.time.ZonedDateTime; +import java.util.Map; +import lombok.Builder; +import lombok.Value; + +@Value +@JsonDeserialize(builder = Task.TaskBuilder.class) +@Builder +public class Task { + private String guid; + private String name; + private State state; + private ZonedDateTime createdAt; + private ZonedDateTime updatedAt; + private Map links; + + public enum State { + SUCCEEDED, + RUNNING, + FAILED + } + + @JsonPOJOBuilder(withPrefix = "") + public static class TaskBuilder {} +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ToOneRelationship.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ToOneRelationship.java new file mode 100644 index 00000000000..4b48286ebc4 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ToOneRelationship.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@NoArgsConstructor +@AllArgsConstructor +@Data +public class ToOneRelationship { + private Relationship data; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/UpdateProcess.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/UpdateProcess.java new file mode 100644 index 00000000000..aed295a8ca7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/UpdateProcess.java @@ -0,0 +1,29 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import com.fasterxml.jackson.annotation.JsonInclude; +import javax.annotation.Nullable; +import lombok.Data; + +@Data +@JsonInclude(JsonInclude.Include.NON_NULL) +public class UpdateProcess { + @Nullable private final String command; + + @Nullable private final Process.HealthCheck healthCheck; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/retry/RetryInterceptor.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/retry/RetryInterceptor.java new file mode 100644 index 00000000000..9291419273b --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/retry/RetryInterceptor.java @@ -0,0 +1,96 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.retry; + +import groovy.util.logging.Slf4j; +import io.github.resilience4j.retry.IntervalFunction; +import io.github.resilience4j.retry.Retry; +import io.github.resilience4j.retry.RetryConfig; +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.time.Duration; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import okhttp3.Interceptor; +import okhttp3.Response; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Slf4j +public class RetryInterceptor implements Interceptor { + private Logger logger = LoggerFactory.getLogger(RetryInterceptor.class); + private final int maxAttempts; + + public RetryInterceptor(int maxAttempts) { + this.maxAttempts = maxAttempts; + } + + @Override + public Response intercept(Chain chain) throws IOException { + final String callName = "cf.api.call"; + AtomicInteger currentAttempts = new AtomicInteger(); + Retry retry = + Retry.of( + callName, + RetryConfig.custom() + .maxAttempts(maxAttempts) + .intervalFunction(IntervalFunction.ofExponentialBackoff(Duration.ofSeconds(10), 3)) + .retryExceptions(SocketTimeoutException.class, RetryableApiException.class) + .build()); + logger.trace("cf request: " + chain.request().url()); + AtomicReference lastResponse = new AtomicReference<>(); + try { + return retry.executeCallable( + () -> { + currentAttempts.incrementAndGet(); + Response response = chain.proceed(chain.request()); + lastResponse.set(response); + switch (response.code()) { + case 502: + case 503: + case 504: + // after retries fail, the response body for these status codes will get wrapped up + // into a CloudFoundryApiException + if (currentAttempts.get() < maxAttempts) { + response.close(); + } + throw new RetryableApiException( + "Response Code " + + response.code() + + ": " + + chain.request().url() + + " attempting retry"); + } + + return response; + }); + } catch (Exception e) { + final Response response = lastResponse.get(); + if (response == null) { + throw new IllegalStateException(e); + } + return response; + } + } + + private static class RetryableApiException extends RuntimeException { + RetryableApiException(String message) { + super(message); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenAuthenticator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenAuthenticator.java new file mode 100644 index 00000000000..566f3382a94 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenAuthenticator.java @@ -0,0 +1,61 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.tokens; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.Token; +import java.io.IOException; +import lombok.Getter; +import lombok.NonNull; +import lombok.RequiredArgsConstructor; +import okhttp3.Authenticator; +import okhttp3.Request; +import okhttp3.Response; +import okhttp3.Route; +import org.jetbrains.annotations.Nullable; + +@Getter +@RequiredArgsConstructor +public class AccessTokenAuthenticator implements Authenticator { + + private final AccessTokenProvider accessTokenProvider; + + @Nullable + @Override + public Request authenticate(@Nullable Route route, Response response) throws IOException { + final Token currentToken = accessTokenProvider.getAccessToken(); + + synchronized (accessTokenProvider.getTokenLock()) { + final Token newToken = accessTokenProvider.getAccessToken(); + + // Token was refreshed before the synchronization. Use the updated token and retry. + if (!currentToken.equals(newToken)) { + return newRequestWithAccessToken(response.request(), newToken); + } + + // Refresh for new token and retry. + accessTokenProvider.refreshAccessToken(); + final Token updatedToken = accessTokenProvider.getAccessToken(); + return newRequestWithAccessToken(response.request(), updatedToken); + } + } + + @NonNull + private Request newRequestWithAccessToken(@NonNull Request request, @NonNull Token token) { + return request.newBuilder().header("Authorization", "Bearer " + token.getAccessToken()).build(); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenInterceptor.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenInterceptor.java new file mode 100644 index 00000000000..049bcd8c3fd --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenInterceptor.java @@ -0,0 +1,69 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.tokens; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.Token; +import java.io.IOException; +import lombok.NonNull; +import lombok.RequiredArgsConstructor; +import okhttp3.Interceptor; +import okhttp3.Request; +import okhttp3.Response; + +@RequiredArgsConstructor +public class AccessTokenInterceptor implements Interceptor { + + private final AccessTokenProvider accessTokenProvider; + + @Override + public Response intercept(Chain chain) throws IOException { + final Token currentToken = accessTokenProvider.getAccessToken(); + // Token is expiring soon, refresh and proceed. + if (currentToken == null + || System.currentTimeMillis() >= accessTokenProvider.getTokenExpiration()) { + + if (accessTokenProvider.getRefreshLock().tryLock()) { + try { + final Token newToken = accessTokenProvider.getAccessToken(); + + // Token was refreshed before getting the lock. Use the updated token and proceed. + if (currentToken == null || !currentToken.equals(newToken)) { + return chain.proceed(newRequestWithAccessToken(chain.request(), newToken)); + } + + // Refresh for new token and proceed. + accessTokenProvider.refreshAccessToken(); + + final Token updatedToken = accessTokenProvider.getAccessToken(); + return chain.proceed(newRequestWithAccessToken(chain.request(), updatedToken)); + } finally { + if (accessTokenProvider.getRefreshLock().isHeldByCurrentThread()) { + accessTokenProvider.getRefreshLock().unlock(); + } + } + } + } + // Token should still be valid even though its expiring soon, proceed. + return chain.proceed(newRequestWithAccessToken(chain.request(), currentToken)); + } + + @NonNull + private Request newRequestWithAccessToken(@NonNull Request request, @NonNull Token token) { + return request.newBuilder().header("Authorization", "Bearer " + token.getAccessToken()).build(); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenProvider.java new file mode 100644 index 00000000000..a4193f0fba0 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/tokens/AccessTokenProvider.java @@ -0,0 +1,65 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.tokens; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils.safelyCall; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.AuthenticationService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.Token; +import java.util.concurrent.locks.ReentrantLock; +import lombok.Getter; + +public class AccessTokenProvider { + + private final String user; + private final String password; + private final AuthenticationService uaa; + @Getter private final Object tokenLock = new Object(); + @Getter private long tokenExpiration; + private Token token; + @Getter private ReentrantLock refreshLock = new ReentrantLock(); + + public AccessTokenProvider(String user, String password, AuthenticationService uaa) { + this.user = user; + this.password = password; + this.uaa = uaa; + } + + Token getAccessToken() { + if (token == null) { + refreshAccessToken(); + } + return this.token; + } + + void refreshAccessToken() { + try { + Token token = + safelyCall(() -> uaa.passwordToken("password", user, password, "cf", "")) + .orElseThrow( + () -> + new CloudFoundryApiException( + "Unable to get authentication token from cloud foundry.")); + this.token = token; + this.tokenExpiration = System.currentTimeMillis() + ((token.getExpiresIn() - 120) * 1000); + } catch (Exception e) { + throw new CloudFoundryApiException(e, "Could not refresh token."); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/config/CloudFoundryConfigurationProperties.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/config/CloudFoundryConfigurationProperties.java index b974ca5715b..98f37c0374e 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/config/CloudFoundryConfigurationProperties.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/config/CloudFoundryConfigurationProperties.java @@ -16,30 +16,80 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.config; -import lombok.Data; -import lombok.Getter; -import lombok.Setter; -import lombok.ToString; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.*; +import lombok.*; +import org.springframework.beans.factory.DisposableBean; import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.NestedConfigurationProperty; import org.springframework.stereotype.Component; -import java.util.ArrayList; -import java.util.List; - @Component @Data @ConfigurationProperties("cloudfoundry") -public class CloudFoundryConfigurationProperties { +public class CloudFoundryConfigurationProperties implements DisposableBean { + static final int POLLING_INTERVAL_MILLISECONDS_DEFAULT = 300 * 1000; + static final int ASYNC_OPERATION_TIMEOUT_MILLISECONDS_DEFAULT = + (int) (POLLING_INTERVAL_MILLISECONDS_DEFAULT * 1.5); + static final int ASYNC_OPERATION_MAX_POLLING_INTERVAL_MILLISECONDS = 8 * 1000; + + private int pollingIntervalMilliseconds = POLLING_INTERVAL_MILLISECONDS_DEFAULT; + private int asyncOperationTimeoutMillisecondsDefault = + ASYNC_OPERATION_TIMEOUT_MILLISECONDS_DEFAULT; + private int asyncOperationMaxPollingIntervalMilliseconds = + ASYNC_OPERATION_MAX_POLLING_INTERVAL_MILLISECONDS; + private List accounts = new ArrayList<>(); + private int apiRequestParallelism = 100; + + @NestedConfigurationProperty private ClientConfig client = new ClientConfig(); + + @NestedConfigurationProperty private LocalCacheConfig localCacheConfig = new LocalCacheConfig(); + + @Override + public void destroy() { + this.accounts = new ArrayList<>(); + } + @Getter @Setter @ToString(exclude = "password") - public static class ManagedAccount { + @EqualsAndHashCode + public static class ManagedAccount implements CredentialsDefinition { private String name; private String api; + private String appsManagerUri; + private String metricsUri; private String user; private String password; private String environment; + private boolean skipSslValidation; + private boolean onlySpinnakerManaged; + private Integer resultsPerPage; + + @Deprecated + private Integer + maxCapiConnectionsForCache; // Deprecated in favor of cloudfoundry.apiRequestParallelism + + private Permissions.Builder permissions = new Permissions.Builder(); + private Map> spaceFilter = Collections.emptyMap(); + } + + @Data + public static class ClientConfig { + private int connectionTimeout = 10000; + private int writeTimeout = 10000; + private int readTimeout = 10000; + private int maxRetries = 3; + } + + @Data + public static class LocalCacheConfig { + private long applicationsAccessExpirySeconds = -1; + private long applicationsWriteExpirySeconds = 600; + private long routesAccessExpirySeconds = -1; + private long routesWriteExpirySeconds = 180; } } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/controller/CloudFoundryImageController.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/controller/CloudFoundryImageController.java new file mode 100644 index 00000000000..cc5a911af72 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/controller/CloudFoundryImageController.java @@ -0,0 +1,66 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.controller; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptySet; +import static java.util.stream.Collectors.toSet; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryCluster; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view.CloudFoundryClusterProvider; +import java.util.Collection; +import java.util.Set; +import java.util.stream.Stream; +import lombok.AllArgsConstructor; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@AllArgsConstructor +@RestController +@RequestMapping("/cloudfoundry/images") +public class CloudFoundryImageController { + private final CloudFoundryClusterProvider clusterProvider; + + /** + * Cloud Foundry droplets aren't human readable independently of the server group to which they + * are attached. + */ + @RequestMapping(value = "/find", method = RequestMethod.GET) + public Collection list(@RequestParam(required = false) String account) { + Stream clusters = + account == null + ? clusterProvider.getClusters().values().stream().flatMap(Set::stream) + : clusterProvider.getClusters().get(account).stream(); + + return clusters + .map( + cluster -> + cluster.withServerGroups( + cluster.getServerGroups().stream() + .filter(serverGroup -> serverGroup.getDroplet() != null) + .map( + serverGroup -> + serverGroup + .withInstances(emptySet()) + .withServiceInstances(emptyList())) + .collect(toSet()))) + .filter(cluster -> !cluster.getServerGroups().isEmpty()) + .collect(toSet()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/CloudFoundryServerGroupNameResolver.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/CloudFoundryServerGroupNameResolver.java new file mode 100644 index 00000000000..6494ec7ad7b --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/CloudFoundryServerGroupNameResolver.java @@ -0,0 +1,61 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.helpers.AbstractServerGroupNameResolver; +import java.util.Date; +import java.util.List; +import java.util.stream.Collectors; +import lombok.AccessLevel; +import lombok.AllArgsConstructor; +import lombok.experimental.FieldDefaults; + +@AllArgsConstructor +@FieldDefaults(makeFinal = true, level = AccessLevel.PRIVATE) +public class CloudFoundryServerGroupNameResolver extends AbstractServerGroupNameResolver { + private static final String PHASE = "DEPLOY"; + + CloudFoundryClient client; + CloudFoundrySpace space; + + @Override + public String getPhase() { + return PHASE; + } + + @Override + public String getRegion() { + return space.getRegion(); + } + + @Override + public List getTakenSlots(String clusterName) { + return client.getApplications().getTakenSlots(clusterName, space.getId()).stream() + .map( + app -> { + Names names = Names.parseName(app.getEntity().getName()); + return new TakenSlot( + names.getCluster(), + names.getSequence(), + Date.from(app.getMetadata().getCreatedAt().toInstant())); + }) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryAtomicOperationConverter.java new file mode 100644 index 00000000000..3b4e486e050 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryAtomicOperationConverter.java @@ -0,0 +1,37 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import java.util.Optional; + +public abstract class AbstractCloudFoundryAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + + protected Optional findSpace(String region, CloudFoundryClient client) { + return client.getSpaces().findSpaceByRegion(region); + } + + protected CloudFoundryClient getClient(Map input) { + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + return credentials.getClient(); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryServerGroupAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryServerGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..f321928c5cf --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryServerGroupAtomicOperationConverter.java @@ -0,0 +1,31 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import javax.annotation.Nullable; + +public abstract class AbstractCloudFoundryServerGroupAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + @Nullable + protected String getServerGroupId( + String serverGroupName, String region, CloudFoundryClient client) { + return findSpace(region, client) + .map(space -> client.getApplications().findServerGroupId(serverGroupName, space.getId())) + .orElse(null); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractLoadBalancersAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractLoadBalancersAtomicOperationConverter.java new file mode 100644 index 00000000000..2ac0f8c1cb4 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractLoadBalancersAtomicOperationConverter.java @@ -0,0 +1,49 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.LoadBalancersDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import java.util.List; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component +abstract class AbstractLoadBalancersAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + @Override + public LoadBalancersDescription convertDescription(Map input) { + List routes = (List) input.get("loadBalancerNames"); + if (routes.isEmpty()) { + throw new IllegalArgumentException("No routes supplied."); + } + + LoadBalancersDescription converted = + getObjectMapper().convertValue(input, LoadBalancersDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + converted.setServerGroupId( + getServerGroupId( + converted.getServerGroupName(), converted.getRegion(), converted.getClient())); + converted.setRoutes(routes); + + return findSpace(converted.getRegion(), converted.getClient()) + .map(converted::setSpace) + .orElseThrow(() -> new IllegalArgumentException("No space supplied.")); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CloneCloudFoundryServerGroupAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CloneCloudFoundryServerGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..bfd214a78f0 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CloneCloudFoundryServerGroupAtomicOperationConverter.java @@ -0,0 +1,39 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.CLONE_SERVER_GROUP) +@Component +public class CloneCloudFoundryServerGroupAtomicOperationConverter + extends DeployCloudFoundryServerGroupAtomicOperationConverter { + public CloneCloudFoundryServerGroupAtomicOperationConverter( + @Qualifier("cloudFoundryOperationPoller") OperationPoller operationPoller, + ArtifactCredentialsRepository credentialsRepository, + CredentialsRepository + dockerRegistryCredentialsRepository) { + super(operationPoller, credentialsRepository, dockerRegistryCredentialsRepository); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CloudFoundryRunJobOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CloudFoundryRunJobOperationConverter.java new file mode 100644 index 00000000000..a318331f865 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CloudFoundryRunJobOperationConverter.java @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RUN_JOB; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CloudFoundryRunJobOperationDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.CloudFoundryRunJobOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(RUN_JOB) +@Component +public class CloudFoundryRunJobOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + + @Override + public AtomicOperation convertOperation(Map input) { + return new CloudFoundryRunJobOperation(convertDescription(input)); + } + + @Override + public CloudFoundryRunJobOperationDescription convertDescription(Map input) { + CloudFoundryRunJobOperationDescription converted = + getObjectMapper().convertValue(input, CloudFoundryRunJobOperationDescription.class); + + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + CloudFoundryClient client = credentials.getClient(); + converted.setClient(client); + String jobName = (String) input.get("jobName"); + String region = (String) input.get("region"); + String serverGroupName = (String) input.get("serverGroupName"); + + CloudFoundrySpace space = + findSpace(region, client) + .orElseThrow( + () -> + new IllegalArgumentException( + "Unable to find organization and space '" + region + "'.")); + + CloudFoundryServerGroup serverGroup = + client.getApplications().findServerGroupByNameAndSpaceId(serverGroupName, space.getId()); + + if (serverGroup == null) { + throw new IllegalStateException( + String.format( + "Can't run job '%s': CloudFoundry application '%s' not found in org/space '%s'", + jobName, serverGroupName, region)); + } + + converted.setServerGroup(serverGroup); + + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceBindingAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceBindingAtomicOperationConverter.java new file mode 100644 index 00000000000..c417f014426 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceBindingAtomicOperationConverter.java @@ -0,0 +1,91 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CreateCloudFoundryServiceBindingDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.CreateCloudFoundryServiceBindingAtomicOperation; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.InputStream; +import java.util.List; +import java.util.Map; +import org.jetbrains.annotations.Nullable; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.CREATE_SERVICE_BINDINGS) +@Component +public class CreateCloudFoundryServiceBindingAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + + private final OperationPoller operationPoller; + private final ArtifactDownloader artifactDownloader; + + public CreateCloudFoundryServiceBindingAtomicOperationConverter( + @Qualifier("cloudFoundryOperationPoller") OperationPoller operationPoller, + ArtifactDownloader artifactDownloader) { + this.operationPoller = operationPoller; + this.artifactDownloader = artifactDownloader; + } + + @Nullable + @Override + public AtomicOperation convertOperation(Map input) { + return new CreateCloudFoundryServiceBindingAtomicOperation( + operationPoller, convertDescription(input)); + } + + @Override + public CreateCloudFoundryServiceBindingDescription convertDescription(Map input) { + List> requests = + (List>) input.get("serviceBindingRequests"); + for (Map request : requests) { + if (request.get("artifact") != null) { + Artifact artifact = getObjectMapper().convertValue(request.get("artifact"), Artifact.class); + try (InputStream inputStream = artifactDownloader.download(artifact)) { + Map paramMap = getObjectMapper().readValue(inputStream, Map.class); + request.put("parameters", paramMap); + } catch (Exception e) { + throw new CloudFoundryApiException( + "Could not convert service binding request parameters to json."); + } + } + } + input.put("serviceBindingRequests", requests); + + CreateCloudFoundryServiceBindingDescription description = + getObjectMapper().convertValue(input, CreateCloudFoundryServiceBindingDescription.class); + description.setCredentials(getCredentialsObject(input.get("credentials").toString())); + description.setClient(getClient(input)); + description.setServerGroupId( + getServerGroupId( + description.getServerGroupName(), description.getRegion(), description.getClient())); + findSpace(description.getRegion(), description.getClient()) + .ifPresentOrElse( + description::setSpace, + () -> { + throw new CloudFoundryApiException("Could not determine CloudFoundry Space."); + }); + return description; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceKeyAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceKeyAtomicOperationConverter.java new file mode 100644 index 00000000000..cc9ee67608e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceKeyAtomicOperationConverter.java @@ -0,0 +1,49 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CreateCloudFoundryServiceKeyDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.CreateCloudFoundryServiceKeyAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.CREATE_SERVICE_KEY) +@Component +public class CreateCloudFoundryServiceKeyAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new CreateCloudFoundryServiceKeyAtomicOperation(convertDescription(input)); + } + + @Override + public CreateCloudFoundryServiceKeyDescription convertDescription(Map input) { + CreateCloudFoundryServiceKeyDescription converted = + getObjectMapper().convertValue(input, CreateCloudFoundryServiceKeyDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + CloudFoundryClient client = getClient(input); + converted.setClient(client); + findSpace(converted.getRegion(), client).ifPresent(converted::setSpace); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryLoadBalancerAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryLoadBalancerAtomicOperationConverter.java new file mode 100644 index 00000000000..37ee68d630a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryLoadBalancerAtomicOperationConverter.java @@ -0,0 +1,76 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static java.util.function.Function.identity; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.DeleteCloudFoundryLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Collection; +import java.util.Map; +import java.util.Optional; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DELETE_LOAD_BALANCER) +@Component +public class DeleteCloudFoundryLoadBalancerAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteCloudFoundryLoadBalancerAtomicOperation(convertDescription(input)); + } + + @SuppressWarnings("unchecked") + @Override + public DeleteCloudFoundryLoadBalancerDescription convertDescription(Map input) { + DeleteCloudFoundryLoadBalancerDescription converted = + getObjectMapper().convertValue(input, DeleteCloudFoundryLoadBalancerDescription.class); + converted.setClient(getClient(input)); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + + CloudFoundryClient client = converted.getClient(); + return ((Collection) input.get("regions")) + .stream() + .map(region -> findSpace(region, client)) + .filter(Optional::isPresent) + .findFirst() + .flatMap(identity()) + .map( + space -> { + String routePath = input.get("loadBalancerName").toString(); + RouteId routeId = client.getRoutes().toRouteId(routePath); + if (routeId == null) { + throw new IllegalArgumentException( + "Invalid format or domain for route '" + routePath + "'"); + } + converted.setRegion(space.getRegion()); + converted.setSpace(space); + return converted.setLoadBalancer(client.getRoutes().find(routeId, space.getId())); + }) + .orElseThrow( + () -> + new IllegalArgumentException( + "Unable to find the space(s) that this load balancer was expected to be in.")); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceBindingAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceBindingAtomicOperationConverter.java new file mode 100644 index 00000000000..207bb91e3d4 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceBindingAtomicOperationConverter.java @@ -0,0 +1,58 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryServiceBindingDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.DeleteCloudFoundryServiceBindingAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.jetbrains.annotations.Nullable; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DELETE_SERVICE_BINDINGS) +@Component +public class DeleteCloudFoundryServiceBindingAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + + @Nullable + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteCloudFoundryServiceBindingAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteCloudFoundryServiceBindingDescription convertDescription(Map input) { + + DeleteCloudFoundryServiceBindingDescription description = + getObjectMapper().convertValue(input, DeleteCloudFoundryServiceBindingDescription.class); + description.setCredentials(getCredentialsObject(input.get("credentials").toString())); + description.setClient(getClient(input)); + description.setServerGroupId( + getServerGroupId( + description.getServerGroupName(), description.getRegion(), description.getClient())); + findSpace(description.getRegion(), description.getClient()) + .ifPresentOrElse( + description::setSpace, + () -> { + throw new CloudFoundryApiException("Could not determine CloudFoundry Space."); + }); + return description; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceKeyAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceKeyAtomicOperationConverter.java new file mode 100644 index 00000000000..7e31b272ca7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceKeyAtomicOperationConverter.java @@ -0,0 +1,49 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryServiceKeyDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.DeleteCloudFoundryServiceKeyAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DELETE_SERVICE_KEY) +@Component +public class DeleteCloudFoundryServiceKeyAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteCloudFoundryServiceKeyAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteCloudFoundryServiceKeyDescription convertDescription(Map input) { + DeleteCloudFoundryServiceKeyDescription converted = + getObjectMapper().convertValue(input, DeleteCloudFoundryServiceKeyDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + CloudFoundryClient client = getClient(input); + converted.setClient(client); + findSpace(converted.getRegion(), client).ifPresent(converted::setSpace); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServerGroupAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServerGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..b3c2e58aaf4 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServerGroupAtomicOperationConverter.java @@ -0,0 +1,263 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static io.vavr.API.*; +import static java.util.stream.Collectors.toList; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.PropertyNamingStrategies; +import com.google.common.collect.Lists; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.artifacts.CloudFoundryArtifactCredentials; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Docker; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessRequest; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.DeployCloudFoundryServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.util.RandomWordGenerator; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import javax.annotation.Nullable; +import lombok.Data; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.CREATE_SERVER_GROUP) +@Component +public class DeployCloudFoundryServerGroupAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + private final OperationPoller operationPoller; + private final ArtifactCredentialsRepository credentialsRepository; + private final CredentialsRepository + dockerRegistryCredentialsRepository; + + public DeployCloudFoundryServerGroupAtomicOperationConverter( + @Qualifier("cloudFoundryOperationPoller") OperationPoller operationPoller, + ArtifactCredentialsRepository credentialsRepository, + CredentialsRepository + dockerRegistryCredentialsRepository) { + this.operationPoller = operationPoller; + this.credentialsRepository = credentialsRepository; + this.dockerRegistryCredentialsRepository = dockerRegistryCredentialsRepository; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeployCloudFoundryServerGroupAtomicOperation( + operationPoller, convertDescription(input)); + } + + @Override + public DeployCloudFoundryServerGroupDescription convertDescription(Map input) { + DeployCloudFoundryServerGroupDescription converted = + getObjectMapper().convertValue(input, DeployCloudFoundryServerGroupDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(credentials.getClient()); + converted.setAccountName(credentials.getName()); + + String region = converted.getRegion(); + converted.setSpace( + findSpace(region, converted.getClient()) + .orElseThrow( + () -> + new IllegalArgumentException( + "Unable to find organization and space '" + region + "'."))); + + // fail early if we're not going to be able to locate credentials to download the artifact in + // the deploy operation. + converted.setArtifactCredentials(getArtifactCredentials(converted)); + DeployCloudFoundryServerGroupDescription.ApplicationAttributes applicationAttributes = + convertManifest( + converted.getManifest().stream().findFirst().orElse(Collections.emptyMap())); + converted.setApplicationAttributes(applicationAttributes); + converted.setDocker( + converted.getArtifactCredentials().getTypes().contains("docker/image") + ? resolveDockerAccount(converted.getApplicationArtifact()) + : null); + List routes = applicationAttributes.getRoutes(); + + if ((routes == null || routes.isEmpty()) && applicationAttributes.getRandomRoute()) { + setRandomRoute(converted); + } + return converted; + } + + private void setRandomRoute(DeployCloudFoundryServerGroupDescription client) { + CloudFoundryDomain defaultDomain = client.getClient().getDomains().getDefault(); + if (defaultDomain != null) { + String routeName = null; + for (int i = 0; i < 10; i++) { + routeName = RandomWordGenerator.randomQualifiedNoun() + "." + defaultDomain.getName(); + RouteId routeId = client.getClient().getRoutes().toRouteId(routeName); + CloudFoundryLoadBalancer cloudFoundryLoadBalancer = + client.getClient().getRoutes().find(routeId, client.getSpace().getId()); + if (cloudFoundryLoadBalancer == null) { + break; + } + } + + client.getApplicationAttributes().setRoutes(Lists.newArrayList(routeName)); + } + } + + private Docker resolveDockerAccount(Artifact artifact) { + DockerRegistryNamedAccountCredentials dockerCreds = + dockerRegistryCredentialsRepository.getAll().stream() + .filter(reg -> reg.getRegistry().equals(artifact.getReference().split("/")[0])) + .filter(reg -> reg.getRepositories().contains(artifact.getName().split("/", 2)[1])) + .findFirst() + .orElseThrow( + () -> + new IllegalArgumentException( + "Could not find a docker registry for the docker image: " + + artifact.getName())); + + return Docker.builder() + .image(artifact.getReference()) + .username(dockerCreds.getUsername()) + .password(dockerCreds.getPassword()) + .build(); + } + + private ArtifactCredentials getArtifactCredentials( + DeployCloudFoundryServerGroupDescription converted) { + Artifact artifact = converted.getApplicationArtifact(); + if (artifact == null) { + throw new IllegalArgumentException("No artifact definition in stage configuration"); + } + + String artifactAccount = artifact.getArtifactAccount(); + if (CloudFoundryArtifactCredentials.TYPE.equals(artifact.getType())) { + CloudFoundryCredentials credentials = getCredentialsObject(artifactAccount); + String uuid = + getServerGroupId(artifact.getName(), artifact.getLocation(), credentials.getClient()); + converted.setApplicationArtifact(artifact.toBuilder().uuid(uuid).build()); + return new CloudFoundryArtifactCredentials(credentials.getClient()); + } + + ArtifactCredentials credentials = + credentialsRepository.getFirstCredentialsWithName(artifactAccount); + if (credentials == null) { + throw new IllegalArgumentException( + "Unable to find artifact credentials '" + artifactAccount + "'"); + } + return credentials; + } + + // visible for testing + DeployCloudFoundryServerGroupDescription.ApplicationAttributes convertManifest( + Map manifestMap) { + List manifestApps = + new ObjectMapper() + .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE) + .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) + .convertValue(manifestMap.get("applications"), new TypeReference<>() {}); + + return manifestApps.stream() + .findFirst() + .map( + app -> { + final List buildpacks = + Match(app) + .of( + Case($(a -> a.getBuildpacks() != null), app.getBuildpacks()), + Case( + $(a -> a.getBuildpack() != null && a.getBuildpack().length() > 0), + Collections.singletonList(app.getBuildpack())), + Case($(), Collections.emptyList())); + + DeployCloudFoundryServerGroupDescription.ApplicationAttributes attrs = + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes(); + attrs.setInstances(app.getInstances() == null ? 1 : app.getInstances()); + attrs.setMemory(app.getMemory() == null ? "1024" : app.getMemory()); + attrs.setDiskQuota(app.getDiskQuota() == null ? "1024" : app.getDiskQuota()); + attrs.setHealthCheckHttpEndpoint(app.getHealthCheckHttpEndpoint()); + attrs.setHealthCheckType(app.getHealthCheckType()); + attrs.setBuildpacks(buildpacks); + attrs.setServices(app.getServices()); + attrs.setRoutes( + app.getRoutes() == null + ? null + : app.getRoutes().stream() + .flatMap(route -> route.values().stream()) + .collect(toList())); + attrs.setEnv(app.getEnv()); + attrs.setStack(app.getStack()); + attrs.setCommand(app.getCommand()); + attrs.setProcesses(app.getProcesses()); + attrs.setRandomRoute(app.getRandomRoute()); + attrs.setTimeout(app.getTimeout()); + return attrs; + }) + .orElseThrow( + () -> + new IllegalArgumentException( + "No app manifest found in Cloud Foundry manifest file")); + } + + @Data + private static class CloudFoundryManifest { + @Nullable private Integer instances; + + @Nullable private String memory; + + @Nullable + @JsonProperty("disk_quota") + private String diskQuota; + + @Nullable private String healthCheckType; + + @Nullable private String healthCheckHttpEndpoint; + + @Nullable private String buildpack; + + @Nullable private List buildpacks; + + @Nullable private List services; + + @Nullable private List> routes; + + @Nullable private Map env; + + @Nullable private String stack; + + @Nullable private String command; + + @Nullable private Boolean randomRoute; + + @Nullable private Integer timeout; + + private List processes = Collections.emptyList(); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServiceAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServiceAtomicOperationConverter.java new file mode 100644 index 00000000000..e836be7efe3 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServiceAtomicOperationConverter.java @@ -0,0 +1,316 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.fasterxml.jackson.annotation.JsonAlias; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.*; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.DeployCloudFoundryServiceAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.Data; +import org.apache.commons.lang3.StringUtils; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DEPLOY_SERVICE) +@Component +public class DeployCloudFoundryServiceAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + private static final ObjectMapper objectMapper = + new ObjectMapper() + .setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE) + .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); + + private final Pattern r = Pattern.compile(".*?-v(\\d+)"); + + public DeployCloudFoundryServiceAtomicOperationConverter() {} + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeployCloudFoundryServiceAtomicOperation(convertDescription(input)); + } + + @Override + public DeployCloudFoundryServiceDescription convertDescription(Map input) { + DeployCloudFoundryServiceDescription converted = + getObjectMapper().convertValue(input, DeployCloudFoundryServiceDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + converted.setSpace( + findSpace(converted.getRegion(), converted.getClient()) + .orElseThrow( + () -> + new IllegalArgumentException( + "Unable to find space '" + converted.getRegion() + "'."))); + + List> manifest = converted.getManifest(); + + if (converted.isUserProvided()) { + converted.setUserProvidedServiceAttributes( + convertUserProvidedServiceManifest(manifest.stream().findFirst().orElse(null))); + if (converted.getUserProvidedServiceAttributes() != null + && converted.getUserProvidedServiceAttributes().isVersioned()) { + DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes attributes = + converted.getUserProvidedServiceAttributes(); + attributes.setPreviousInstanceName( + getLatestInstanceName( + attributes.getServiceInstanceName(), converted.getClient(), converted.getSpace())); + attributes.setServiceInstanceName( + getNextInstanceName( + attributes.getServiceInstanceName(), attributes.getPreviousInstanceName())); + converted.setUserProvidedServiceAttributes(attributes); + } + } else { + converted.setServiceAttributes(convertManifest(manifest.stream().findFirst().orElse(null))); + if (converted.getServiceAttributes() != null + && converted.getServiceAttributes().isVersioned()) { + DeployCloudFoundryServiceDescription.ServiceAttributes attributes = + converted.getServiceAttributes(); + attributes.setPreviousInstanceName( + getLatestInstanceName( + attributes.getServiceInstanceName(), converted.getClient(), converted.getSpace())); + attributes.setServiceInstanceName( + getNextInstanceName( + attributes.getServiceInstanceName(), attributes.getPreviousInstanceName())); + converted.setServiceAttributes(attributes); + } + } + return converted; + } + + @Nullable + private String getLatestInstanceName( + String serviceInstanceName, CloudFoundryClient client, CloudFoundrySpace space) { + if (serviceInstanceName == null || serviceInstanceName.isEmpty()) { + throw new IllegalArgumentException("Service Instance Name must not be null or empty."); + } + List serviceInstances = + client + .getServiceInstances() + .findAllVersionedServiceInstancesBySpaceAndName( + space, String.format("%s-v%03d", serviceInstanceName, 0)) + .stream() + .filter(n -> n.getEntity().getName().startsWith(serviceInstanceName)) + .map(rs -> rs.getEntity().getName()) + .filter(n -> isVersioned(n)) + .collect(Collectors.toList()); + + if (serviceInstances.isEmpty()) { + return null; + } + + Integer latestVersion = + serviceInstances.stream() + .map( + v -> { + Matcher m = r.matcher(v); + m.find(); + return Integer.parseInt(m.group(1)); + }) + .mapToInt(n -> n) + .max() + .orElseThrow( + () -> + new CloudFoundryApiException( + "Unable to determine latest version for service instance: " + + serviceInstanceName)); + + return String.format("%s-v%03d", serviceInstanceName, latestVersion); + } + + private String getNextInstanceName(String serviceInstanceName, String latestInstanceName) { + if (latestInstanceName == null) { + return String.format("%s-v%03d", serviceInstanceName, 0); + } + Matcher m = r.matcher(latestInstanceName); + m.find(); + int latestVersion = Integer.parseInt(m.group(1)); + return String.format("%s-v%03d", serviceInstanceName, latestVersion + 1); + } + + private boolean isVersioned(String name) { + Matcher m = r.matcher(name); + return m.find(); + } + + // visible for testing + DeployCloudFoundryServiceDescription.ServiceAttributes convertManifest(Object manifestMap) { + if (manifestMap == null) { + throw new IllegalArgumentException("No configurations detected"); + } + ServiceManifest manifest = objectMapper.convertValue(manifestMap, ServiceManifest.class); + if (manifest.getService() == null) { + throw new IllegalArgumentException("Manifest is missing the service"); + } else if (manifest.getServiceInstanceName() == null) { + throw new IllegalArgumentException("Manifest is missing the service instance name"); + } else if (manifest.getServicePlan() == null) { + throw new IllegalArgumentException("Manifest is missing the service plan"); + } + DeployCloudFoundryServiceDescription.ServiceAttributes attrs = + new DeployCloudFoundryServiceDescription.ServiceAttributes(); + attrs.setService(manifest.getService()); + attrs.setServiceInstanceName(manifest.getServiceInstanceName()); + attrs.setServicePlan(manifest.getServicePlan()); + attrs.setTags(manifest.getTags()); + attrs.setUpdatable(manifest.isUpdatable()); + attrs.setVersioned(manifest.isVersioned()); + attrs.setParameterMap(manifest.getParameters()); + return attrs; + } + + // visible for testing + DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes + convertUserProvidedServiceManifest(Object manifestMap) { + if (manifestMap == null) { + throw new IllegalArgumentException("No configurations detected"); + } + UserProvidedServiceManifest manifest = + objectMapper.convertValue(manifestMap, UserProvidedServiceManifest.class); + if (manifest.getServiceInstanceName() == null) { + throw new IllegalArgumentException("Manifest is missing the service name"); + } + DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes attrs = + new DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes(); + attrs.setServiceInstanceName(manifest.getServiceInstanceName()); + attrs.setSyslogDrainUrl(manifest.getSyslogDrainUrl()); + attrs.setRouteServiceUrl(manifest.getRouteServiceUrl()); + attrs.setTags(manifest.getTags()); + attrs.setUpdatable(manifest.isUpdatable()); + attrs.setVersioned(manifest.isVersioned()); + attrs.setCredentials(manifest.getCredentials()); + return attrs; + } + + @Data + private static class ServiceManifest { + private String service; + private boolean updatable = true; + private boolean versioned = false; + + @JsonAlias({"service_instance_name", "serviceInstanceName"}) + private String serviceInstanceName; + + @JsonAlias({"service_plan", "servicePlan"}) + private String servicePlan; + + @Nullable private Set tags; + + @Nullable + @JsonDeserialize(using = OptionallySerializedMapDeserializer.class) + private Map parameters; + } + + @Data + private static class UserProvidedServiceManifest { + private boolean updatable = true; + private boolean versioned = false; + + @JsonAlias({"service_instance_name", "serviceInstanceName"}) + private String serviceInstanceName; + + @Nullable + @JsonAlias({"syslog_drain_url", "syslogDrainUrl"}) + private String syslogDrainUrl; + + @Nullable + @JsonAlias({"route_service_url", "routeServiceUrl"}) + private String routeServiceUrl; + + @Nullable private Set tags; + + @Nullable + @JsonAlias({"credentials_map", "credentialsMap"}) + @JsonDeserialize(using = OptionallySerializedMapDeserializer.class) + private Map credentials; + } + + public static class OptionallySerializedMapDeserializer + extends JsonDeserializer> { + + private final TypeReference> mapTypeReference = + new TypeReference>() {}; + + private final ObjectMapper yamlObjectMapper = new ObjectMapper(new YAMLFactory()); + + @Override + public Map deserialize(JsonParser parser, DeserializationContext context) + throws IOException { + JsonToken currentToken = parser.currentToken(); + + Map deserializedMap = null; + + if (currentToken == JsonToken.START_OBJECT) { + deserializedMap = + context.readValue(parser, context.getTypeFactory().constructType(mapTypeReference)); + } else if (currentToken == JsonToken.VALUE_STRING) { + String serizalizedMap = parser.getValueAsString(); + if (StringUtils.isNotBlank(serizalizedMap)) { + deserializedMap = + deserializeWithMappers( + serizalizedMap, + mapTypeReference, + yamlObjectMapper, + (ObjectMapper) parser.getCodec()); + } + } + + return deserializedMap; + } + + /** + * Deserialize a String trying with multiple {@link ObjectMapper}. + * + * @return The value returned by the first mapper successfully deserializing the input. + * @throws IOException When all ObjectMappers fail to deserialize the input. + */ + private T deserializeWithMappers( + String serialized, TypeReference typeReference, ObjectMapper... mappers) + throws IOException { + + IOException deserializationFailed = + new IOException("Could not deserialize value using the provided objectMappers"); + + for (ObjectMapper mapper : mappers) { + try { + return mapper.readValue(serialized, typeReference); + } catch (IOException e) { + deserializationFailed.addSuppressed(e); + } + } + throw deserializationFailed; + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServerGroupAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServerGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..0dc75d9bb60 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServerGroupAtomicOperationConverter.java @@ -0,0 +1,49 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DestroyCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.DestroyCloudFoundryServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DESTROY_SERVER_GROUP) +@Component +public class DestroyCloudFoundryServerGroupAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new DestroyCloudFoundryServerGroupAtomicOperation(convertDescription(input)); + } + + @Override + public DestroyCloudFoundryServerGroupDescription convertDescription(Map input) { + DestroyCloudFoundryServerGroupDescription converted = + getObjectMapper().convertValue(input, DestroyCloudFoundryServerGroupDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + converted.setServerGroupId( + getServerGroupId( + converted.getServerGroupName(), converted.getRegion(), converted.getClient())); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServiceAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServiceAtomicOperationConverter.java new file mode 100644 index 00000000000..0ac13818c9f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServiceAtomicOperationConverter.java @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DestroyCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.DestroyCloudFoundryServiceAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DESTROY_SERVICE) +@Component +public class DestroyCloudFoundryServiceAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + + @Override + public AtomicOperation convertOperation(Map input) { + return new DestroyCloudFoundryServiceAtomicOperation(convertDescription(input)); + } + + @Override + public DestroyCloudFoundryServiceDescription convertDescription(Map input) { + DestroyCloudFoundryServiceDescription converted = + getObjectMapper().convertValue(input, DestroyCloudFoundryServiceDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + converted.setSpace( + findSpace(converted.getRegion(), converted.getClient()) + .orElseThrow( + () -> + new IllegalArgumentException( + "Unable to find space '" + converted.getRegion() + "'."))); + if (converted.getApplication() == null || converted.getApplication().isEmpty()) { + throw new IllegalArgumentException( + "Application must not be null. Please re-create the destroy service stage in order to automatically add this field."); + } + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/MapLoadBalancersAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/MapLoadBalancersAtomicOperationConverter.java new file mode 100644 index 00000000000..85b6ff92da7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/MapLoadBalancersAtomicOperationConverter.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.MapLoadBalancersAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.MAP_LOAD_BALANCERS) +@Component +public class MapLoadBalancersAtomicOperationConverter + extends AbstractLoadBalancersAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new MapLoadBalancersAtomicOperation(convertDescription(input)); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ScaleCloudFoundryServerGroupAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ScaleCloudFoundryServerGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..2aefb07936f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ScaleCloudFoundryServerGroupAtomicOperationConverter.java @@ -0,0 +1,59 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.ScaleCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.ScaleCloudFoundryServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.RESIZE_SERVER_GROUP) +@Component +public class ScaleCloudFoundryServerGroupAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + private final OperationPoller operationPoller; + + public ScaleCloudFoundryServerGroupAtomicOperationConverter( + @Qualifier("cloudFoundryOperationPoller") OperationPoller operationPoller) { + this.operationPoller = operationPoller; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new ScaleCloudFoundryServerGroupAtomicOperation( + operationPoller, convertDescription(input)); + } + + @Override + public ScaleCloudFoundryServerGroupDescription convertDescription(Map input) { + ScaleCloudFoundryServerGroupDescription converted = + getObjectMapper().convertValue(input, ScaleCloudFoundryServerGroupDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + converted.setServerGroupId( + getServerGroupId( + converted.getServerGroupName(), converted.getRegion(), converted.getClient())); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ShareCloudFoundryServiceAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ShareCloudFoundryServiceAtomicOperationConverter.java new file mode 100644 index 00000000000..bbbac22cf6e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ShareCloudFoundryServiceAtomicOperationConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.ShareCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.ShareCloudFoundryServiceAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.SHARE_SERVICE) +@Component +public class ShareCloudFoundryServiceAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new ShareCloudFoundryServiceAtomicOperation(convertDescription(input)); + } + + @Override + public ShareCloudFoundryServiceDescription convertDescription(Map input) { + ShareCloudFoundryServiceDescription converted = + getObjectMapper().convertValue(input, ShareCloudFoundryServiceDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/StartCloudFoundryServerGroupAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/StartCloudFoundryServerGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..403a63ab73f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/StartCloudFoundryServerGroupAtomicOperationConverter.java @@ -0,0 +1,59 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.StartCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.StartCloudFoundryServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.ENABLE_SERVER_GROUP) +@Component +public class StartCloudFoundryServerGroupAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + private final OperationPoller operationPoller; + + public StartCloudFoundryServerGroupAtomicOperationConverter( + @Qualifier("cloudFoundryOperationPoller") OperationPoller operationPoller) { + this.operationPoller = operationPoller; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new StartCloudFoundryServerGroupAtomicOperation( + operationPoller, convertDescription(input)); + } + + @Override + public StartCloudFoundryServerGroupDescription convertDescription(Map input) { + StartCloudFoundryServerGroupDescription converted = + getObjectMapper().convertValue(input, StartCloudFoundryServerGroupDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + converted.setServerGroupId( + getServerGroupId( + converted.getServerGroupName(), converted.getRegion(), converted.getClient())); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/StopCloudFoundryServerGroupAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/StopCloudFoundryServerGroupAtomicOperationConverter.java new file mode 100644 index 00000000000..40f483e9e99 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/StopCloudFoundryServerGroupAtomicOperationConverter.java @@ -0,0 +1,59 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.StopCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.StopCloudFoundryServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DISABLE_SERVER_GROUP) +@Component +public class StopCloudFoundryServerGroupAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + private final OperationPoller operationPoller; + + public StopCloudFoundryServerGroupAtomicOperationConverter( + @Qualifier("cloudFoundryOperationPoller") OperationPoller operationPoller) { + this.operationPoller = operationPoller; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new StopCloudFoundryServerGroupAtomicOperation( + operationPoller, convertDescription(input)); + } + + @Override + public StopCloudFoundryServerGroupDescription convertDescription(Map input) { + StopCloudFoundryServerGroupDescription converted = + getObjectMapper().convertValue(input, StopCloudFoundryServerGroupDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + converted.setServerGroupId( + getServerGroupId( + converted.getServerGroupName(), converted.getRegion(), converted.getClient())); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/TerminateCloudFoundryInstancesAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/TerminateCloudFoundryInstancesAtomicOperationConverter.java new file mode 100644 index 00000000000..47400b4a13f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/TerminateCloudFoundryInstancesAtomicOperationConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.TerminateCloudFoundryInstancesDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.TerminateCloudFoundryInstancesAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.TERMINATE_INSTANCES) +@Component +public class TerminateCloudFoundryInstancesAtomicOperationConverter + extends AbstractCloudFoundryServerGroupAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new TerminateCloudFoundryInstancesAtomicOperation(convertDescription(input)); + } + + @Override + public TerminateCloudFoundryInstancesDescription convertDescription(Map input) { + TerminateCloudFoundryInstancesDescription converted = + getObjectMapper().convertValue(input, TerminateCloudFoundryInstancesDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UnmapLoadBalancersAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UnmapLoadBalancersAtomicOperationConverter.java new file mode 100644 index 00000000000..e6273e3c644 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UnmapLoadBalancersAtomicOperationConverter.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.UnmapLoadBalancersAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.UNMAP_LOAD_BALANCERS) +@Component +public class UnmapLoadBalancersAtomicOperationConverter + extends AbstractLoadBalancersAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new UnmapLoadBalancersAtomicOperation(convertDescription(input)); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UnshareCloudFoundryServiceAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UnshareCloudFoundryServiceAtomicOperationConverter.java new file mode 100644 index 00000000000..596146e48c6 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UnshareCloudFoundryServiceAtomicOperationConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.UnshareCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.UnshareCloudFoundryServiceAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.UNSHARE_SERVICE) +@Component +public class UnshareCloudFoundryServiceAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new UnshareCloudFoundryServiceAtomicOperation(convertDescription(input)); + } + + @Override + public UnshareCloudFoundryServiceDescription convertDescription(Map input) { + UnshareCloudFoundryServiceDescription converted = + getObjectMapper().convertValue(input, UnshareCloudFoundryServiceDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + converted.setClient(getClient(input)); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UpsertCloudFoundryLoadBalancerAtomicOperationConverter.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UpsertCloudFoundryLoadBalancerAtomicOperationConverter.java new file mode 100644 index 00000000000..1aaf4d52241 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/UpsertCloudFoundryLoadBalancerAtomicOperationConverter.java @@ -0,0 +1,61 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.UpsertCloudFoundryLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.UpsertCloudFoundryLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.UPSERT_LOAD_BALANCER) +@Component +public class UpsertCloudFoundryLoadBalancerAtomicOperationConverter + extends AbstractCloudFoundryAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new UpsertCloudFoundryLoadBalancerAtomicOperation(convertDescription(input)); + } + + @Override + public UpsertCloudFoundryLoadBalancerDescription convertDescription(Map input) { + UpsertCloudFoundryLoadBalancerDescription converted = + getObjectMapper().convertValue(input, UpsertCloudFoundryLoadBalancerDescription.class); + CloudFoundryCredentials credentials = getCredentialsObject(input.get("credentials").toString()); + converted.setCredentials(credentials); + CloudFoundryClient client = getClient(input); + converted.setClient(client); + findSpace(converted.getRegion(), client) + .map(converted::setSpace) + .orElseThrow( + () -> + new IllegalArgumentException( + "Unable to find space '" + converted.getRegion() + "'")); + String domainName = input.get("domain").toString(); + converted.setDomain( + client + .getDomains() + .findByName(domainName) + .orElseThrow( + () -> new IllegalArgumentException("Unable to find domain '" + domainName + "'"))); + return converted; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryDescription.java new file mode 100644 index 00000000000..c598d306c1f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryDescription.java @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.security.resources.AccountNameable; +import lombok.Data; + +@Data +public abstract class AbstractCloudFoundryDescription implements AccountNameable { + @JsonIgnore private CloudFoundryClient client; + + private String region; + + @JsonIgnore private CloudFoundryCredentials credentials; + + @Override + public String getAccount() { + if (credentials != null) { + return credentials.getName(); + } + throw new IllegalStateException("Credentials must not be null"); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryLoadBalancerDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryLoadBalancerDescription.java new file mode 100644 index 00000000000..ce8aa360ed2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryLoadBalancerDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public abstract class AbstractCloudFoundryLoadBalancerDescription + extends AbstractCloudFoundryDescription { + CloudFoundrySpace space; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryServerGroupDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryServerGroupDescription.java new file mode 100644 index 00000000000..842654c19b4 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryServerGroupDescription.java @@ -0,0 +1,46 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Collection; +import java.util.Collections; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public abstract class AbstractCloudFoundryServerGroupDescription + extends AbstractCloudFoundryDescription implements ApplicationNameable { + private String serverGroupId; + private String serverGroupName; + + private String cluster; + private Moniker moniker; + + @Override + public Collection getApplications() { + if (moniker != null) { + return Collections.singletonList(moniker.getApp()); + } else if (cluster != null) { + return Collections.singletonList(Names.parseName(cluster).getApp()); + } + return Collections.singletonList(Names.parseName(serverGroupName).getApp()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryServiceDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryServiceDescription.java new file mode 100644 index 00000000000..1edef313031 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/AbstractCloudFoundryServiceDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public abstract class AbstractCloudFoundryServiceDescription + extends AbstractCloudFoundryDescription { + private CloudFoundrySpace space; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CloudFoundryRunJobOperationDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CloudFoundryRunJobOperationDescription.java new file mode 100644 index 00000000000..22d9a8969b2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CloudFoundryRunJobOperationDescription.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class CloudFoundryRunJobOperationDescription + extends AbstractCloudFoundryServerGroupDescription { + + private CloudFoundryServerGroup serverGroup; + @Nullable private String jobName; + private String command; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CreateCloudFoundryServiceBindingDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CreateCloudFoundryServiceBindingDescription.java new file mode 100644 index 00000000000..a7974c55920 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CreateCloudFoundryServiceBindingDescription.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.List; +import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; + +@Data +@EqualsAndHashCode(callSuper = true) +@AllArgsConstructor +@NoArgsConstructor +public class CreateCloudFoundryServiceBindingDescription + extends AbstractCloudFoundryServerGroupDescription { + + private CloudFoundrySpace space; + private List serviceBindingRequests; + private boolean restageRequired = true; + private boolean restartRequired; + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class ServiceBindingRequest { + private String serviceInstanceName; + private Map parameters; + private boolean updatable; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CreateCloudFoundryServiceKeyDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CreateCloudFoundryServiceKeyDescription.java new file mode 100644 index 00000000000..e34b9244a2e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/CreateCloudFoundryServiceKeyDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class CreateCloudFoundryServiceKeyDescription + extends AbstractCloudFoundryServiceDescription { + private String serviceInstanceName; + private String serviceKeyName; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryLoadBalancerDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryLoadBalancerDescription.java new file mode 100644 index 00000000000..e1cd89a2a4e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryLoadBalancerDescription.java @@ -0,0 +1,29 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeleteCloudFoundryLoadBalancerDescription + extends AbstractCloudFoundryLoadBalancerDescription { + @Nullable private CloudFoundryLoadBalancer loadBalancer; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryServiceBindingDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryServiceBindingDescription.java new file mode 100644 index 00000000000..67a52a1aa34 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryServiceBindingDescription.java @@ -0,0 +1,42 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.List; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; + +@Data +@EqualsAndHashCode(callSuper = true) +@AllArgsConstructor +@NoArgsConstructor +public class DeleteCloudFoundryServiceBindingDescription + extends AbstractCloudFoundryServerGroupDescription { + + private CloudFoundrySpace space; + private List serviceUnbindingRequests; + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class ServiceUnbindingRequest { + private String serviceInstanceName; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryServiceKeyDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryServiceKeyDescription.java new file mode 100644 index 00000000000..f89fc63f594 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeleteCloudFoundryServiceKeyDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeleteCloudFoundryServiceKeyDescription + extends AbstractCloudFoundryServiceDescription { + private String serviceInstanceName; + private String serviceKeyName; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeployCloudFoundryServerGroupDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeployCloudFoundryServerGroupDescription.java new file mode 100644 index 00000000000..00cde7950bb --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeployCloudFoundryServerGroupDescription.java @@ -0,0 +1,89 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Docker; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessRequest; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import javax.annotation.Nullable; +import lombok.AccessLevel; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.Getter; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeployCloudFoundryServerGroupDescription + extends AbstractCloudFoundryServerGroupDescription { + private String accountName; + private String application; + private String stack; + private String freeFormDetails; + private CloudFoundrySpace space; + private boolean startApplication; + private Artifact applicationArtifact; + private List> manifest; + private String executionId; + private Map trigger; + + @JsonIgnore private ArtifactCredentials artifactCredentials; + + @JsonIgnore private ApplicationAttributes applicationAttributes; + + @JsonIgnore private Docker docker; + + @Data + public static class ApplicationAttributes { + private int instances; + private String memory; + private String diskQuota; + + @Nullable private String healthCheckType; + + @Nullable private String healthCheckHttpEndpoint; + + @Nullable private List routes; + + @Nullable private List buildpacks; + + @Nullable private Map env; + + @Nullable private List services; + + @Nullable private String stack; + + @Nullable private String command; + + private List processes = Collections.emptyList(); + + @Nullable private Integer timeout; + + @Getter(AccessLevel.NONE) + @Nullable + private Boolean randomRoute; + + public boolean getRandomRoute() { + return randomRoute != null && randomRoute; + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeployCloudFoundryServiceDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeployCloudFoundryServiceDescription.java new file mode 100644 index 00000000000..0bed6f0cfcf --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DeployCloudFoundryServiceDescription.java @@ -0,0 +1,69 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeployCloudFoundryServiceDescription extends AbstractCloudFoundryServiceDescription { + private boolean userProvided = false; + + private List> manifest; + + @JsonIgnore private ServiceAttributes serviceAttributes; + + @JsonIgnore private UserProvidedServiceAttributes userProvidedServiceAttributes; + + @Data + public static class ServiceAttributes { + String service; + String serviceInstanceName; + String servicePlan; + boolean updatable = true; + boolean versioned = false; + + @Nullable Set tags; + + @Nullable Map parameterMap; + + @JsonIgnore String previousInstanceName; + } + + @Data + public static class UserProvidedServiceAttributes { + String serviceInstanceName; + boolean updatable = true; + boolean versioned = false; + + @Nullable Set tags; + + @Nullable String syslogDrainUrl; + + @Nullable Map credentials; + + @Nullable String routeServiceUrl; + + @JsonIgnore String previousInstanceName; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DestroyCloudFoundryServerGroupDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DestroyCloudFoundryServerGroupDescription.java new file mode 100644 index 00000000000..2dfa8916e2e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DestroyCloudFoundryServerGroupDescription.java @@ -0,0 +1,20 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +public class DestroyCloudFoundryServerGroupDescription + extends AbstractCloudFoundryServerGroupDescription {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DestroyCloudFoundryServiceDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DestroyCloudFoundryServiceDescription.java new file mode 100644 index 00000000000..5913d0346a1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/DestroyCloudFoundryServiceDescription.java @@ -0,0 +1,38 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.Collection; +import java.util.Collections; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DestroyCloudFoundryServiceDescription + extends AbstractCloudFoundryServerGroupDescription { + private String serviceInstanceName; + private CloudFoundrySpace space; + private boolean removeBindings; + private String application; + + @Override + public Collection getApplications() { + return Collections.singletonList(application); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/LoadBalancersDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/LoadBalancersDescription.java new file mode 100644 index 00000000000..a85a70c4802 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/LoadBalancersDescription.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class LoadBalancersDescription extends AbstractCloudFoundryServerGroupDescription { + private List routes; + private CloudFoundrySpace space; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/ScaleCloudFoundryServerGroupDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/ScaleCloudFoundryServerGroupDescription.java new file mode 100644 index 00000000000..9425ffa289c --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/ScaleCloudFoundryServerGroupDescription.java @@ -0,0 +1,35 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class ScaleCloudFoundryServerGroupDescription + extends AbstractCloudFoundryServerGroupDescription { + ServerGroup.Capacity capacity; + + @Nullable private Integer memory; + + @Nullable private Integer diskQuota; + + @Nullable private Boolean scaleStoppedServerGroup; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/ShareCloudFoundryServiceDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/ShareCloudFoundryServiceDescription.java new file mode 100644 index 00000000000..8b62270fe40 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/ShareCloudFoundryServiceDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import java.util.Set; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class ShareCloudFoundryServiceDescription extends AbstractCloudFoundryServiceDescription { + private String serviceInstanceName; + private Set shareToRegions; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/StartCloudFoundryServerGroupDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/StartCloudFoundryServerGroupDescription.java new file mode 100644 index 00000000000..d4eea59b05a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/StartCloudFoundryServerGroupDescription.java @@ -0,0 +1,20 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +public class StartCloudFoundryServerGroupDescription + extends AbstractCloudFoundryServerGroupDescription {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/StopCloudFoundryServerGroupDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/StopCloudFoundryServerGroupDescription.java new file mode 100644 index 00000000000..2743739b25e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/StopCloudFoundryServerGroupDescription.java @@ -0,0 +1,20 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +public class StopCloudFoundryServerGroupDescription + extends AbstractCloudFoundryServerGroupDescription {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/TerminateCloudFoundryInstancesDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/TerminateCloudFoundryInstancesDescription.java new file mode 100644 index 00000000000..33c8ba24f7c --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/TerminateCloudFoundryInstancesDescription.java @@ -0,0 +1,26 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class TerminateCloudFoundryInstancesDescription extends AbstractCloudFoundryDescription { + private String[] instanceIds; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/UnshareCloudFoundryServiceDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/UnshareCloudFoundryServiceDescription.java new file mode 100644 index 00000000000..4eb656ec455 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/UnshareCloudFoundryServiceDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import java.util.Set; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class UnshareCloudFoundryServiceDescription extends AbstractCloudFoundryServiceDescription { + private String serviceInstanceName; + private Set unshareFromRegions; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/UpsertCloudFoundryLoadBalancerDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/UpsertCloudFoundryLoadBalancerDescription.java new file mode 100644 index 00000000000..d927b121c9a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/description/UpsertCloudFoundryLoadBalancerDescription.java @@ -0,0 +1,33 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class UpsertCloudFoundryLoadBalancerDescription + extends AbstractCloudFoundryLoadBalancerDescription { + private String host; + private String path; + private int port; + private CloudFoundrySpace space; + private CloudFoundryDomain domain; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryLoadBalancerMappingOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryLoadBalancerMappingOperation.java new file mode 100644 index 00000000000..67640eee8d0 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryLoadBalancerMappingOperation.java @@ -0,0 +1,86 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static java.util.stream.Collectors.toList; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.AbstractCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import java.util.List; +import java.util.Objects; +import javax.annotation.Nullable; + +public abstract class AbstractCloudFoundryLoadBalancerMappingOperation { + protected abstract String getPhase(); + + protected static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + // VisibleForTesting + boolean mapRoutes( + AbstractCloudFoundryServerGroupDescription description, + @Nullable List routes, + CloudFoundrySpace space, + String serverGroupId) { + if (routes == null) { + getTask().updateStatus(getPhase(), "No load balancers provided to create or update"); + return true; + } + + getTask().updateStatus(getPhase(), "Creating or updating load balancers"); + + CloudFoundryClient client = description.getClient(); + List routeIds = + routes.stream() + .map( + routePath -> { + RouteId routeId = client.getRoutes().toRouteId(routePath); + if (routeId == null) { + throw new IllegalArgumentException(routePath + " is an invalid route"); + } + return routeId; + }) + .filter(Objects::nonNull) + .collect(toList()); + + for (RouteId routeId : routeIds) { + CloudFoundryLoadBalancer loadBalancer = + client.getRoutes().createRoute(routeId, space.getId()); + if (loadBalancer == null) { + throw new CloudFoundryApiException( + "Load balancer already exists in another organization and space"); + } + getTask() + .updateStatus( + getPhase(), + "Mapping load balancer '" + + loadBalancer.getName() + + "' to " + + description.getServerGroupName()); + client.getApplications().mapRoute(serverGroupId, loadBalancer.getId()); + } + + return true; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CloudFoundryOperationUtils.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CloudFoundryOperationUtils.java new file mode 100644 index 00000000000..cefc691ef47 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CloudFoundryOperationUtils.java @@ -0,0 +1,36 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; + +class CloudFoundryOperationUtils { + static String describeProcessState(ProcessStats.State state) { + switch (state) { + case STARTING: + return "is still starting"; + case CRASHED: + return "crashed"; + case STOPPING: + return "is in graceful shutdown - stopping"; + case RUNNING: + case DOWN: + default: + return "is " + state.toString().toLowerCase(); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CloudFoundryRunJobOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CloudFoundryRunJobOperation.java new file mode 100644 index 00000000000..53fe8f3e6d1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CloudFoundryRunJobOperation.java @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Task; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CloudFoundryRunJobOperationDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.Collections; +import java.util.List; +import lombok.RequiredArgsConstructor; +import org.apache.commons.lang3.StringUtils; + +@RequiredArgsConstructor +public class CloudFoundryRunJobOperation implements AtomicOperation { + private static final String PHASE = "RUN_CLOUDFOUNDRY_JOB"; + private final CloudFoundryRunJobOperationDescription description; + + @Override + public DeploymentResult operate(List priorOutputs) { + CloudFoundryClient client = description.getClient(); + CloudFoundryServerGroup serverGroup = description.getServerGroup(); + String applicationGuid = serverGroup.getId(); + String applicationName = serverGroup.getName(); + + // make the job name unique by appending to it a random string so its logs are filterable + String originalName = description.getJobName(); + String randomString = Long.toHexString(Double.doubleToLongBits(Math.random())); + String jobName = + (StringUtils.isNotEmpty(originalName) ? originalName + "-" : "") + randomString; + + TaskRepository.threadLocalTask + .get() + .updateStatus( + PHASE, + String.format( + "Running job '%1$s' as a CloudFoundry task '%1$s' on org/space '%2$s' with application '%3$s'", + jobName, description.getRegion(), applicationName)); + + Task cfTask = client.getTasks().createTask(applicationGuid, description.getCommand(), jobName); + + DeploymentResult deploymentResult = new DeploymentResult(); + deploymentResult + .getDeployedNamesByLocation() + .put(description.getRegion(), Collections.singletonList(cfTask.getGuid())); + return deploymentResult; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceBindingAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceBindingAtomicOperation.java new file mode 100644 index 00000000000..73bdcc8e066 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceBindingAtomicOperation.java @@ -0,0 +1,166 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.CloudFoundryOperationUtils.describeProcessState; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.CreateServiceBinding; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CreateCloudFoundryServiceBindingDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class CreateCloudFoundryServiceBindingAtomicOperation implements AtomicOperation { + + private static final String PHASE = "CREATE_SERVICE_BINDINGS"; + private final OperationPoller operationPoller; + private final CreateCloudFoundryServiceBindingDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + + List serviceInstanceNames = + description.getServiceBindingRequests().stream() + .map(s -> s.getServiceInstanceName()) + .collect(Collectors.toList()); + + getTask() + .updateStatus( + PHASE, + "Creating Cloud Foundry service bindings between application '" + + description.getServerGroupName() + + "' and services: " + + serviceInstanceNames); + + Map serviceInstanceGuids = new HashMap<>(); + + description + .getClient() + .getServiceInstances() + .findAllServicesBySpaceAndNames(description.getSpace(), serviceInstanceNames) + .forEach(s -> serviceInstanceGuids.put(s.getEntity().getName(), s.getMetadata().getGuid())); + + List bindings = + description.getServiceBindingRequests().stream() + .map( + s -> { + String serviceGuid = serviceInstanceGuids.get(s.getServiceInstanceName()); + if (serviceGuid == null || serviceGuid.isEmpty()) { + throw new CloudFoundryApiException( + "Unable to find service with the name: '" + + s.getServiceInstanceName() + + "'"); + } + if (s.isUpdatable()) { + removeBindings(serviceGuid, description.getServerGroupId()); + } + return new CreateServiceBinding( + serviceGuid, + description.getServerGroupId(), + s.getServiceInstanceName(), + s.getParameters()); + }) + .collect(Collectors.toList()); + + bindings.forEach(b -> description.getClient().getServiceInstances().createServiceBinding(b)); + + if (description.isRestageRequired()) { + getTask().updateStatus(PHASE, "Restaging application '" + description.getServerGroupName()); + description.getClient().getApplications().restageApplication(description.getServerGroupId()); + } else { + getTask().updateStatus(PHASE, "Restarting application '" + description.getServerGroupName()); + description.getClient().getApplications().stopApplication(description.getServerGroupId()); + operationPoller.waitForOperation( + () -> + description.getClient().getApplications().getAppState(description.getServerGroupId()), + inProgressState -> + inProgressState == ProcessStats.State.DOWN + || inProgressState == ProcessStats.State.CRASHED, + null, + getTask(), + description.getServerGroupName(), + PHASE); + description.getClient().getApplications().startApplication(description.getServerGroupId()); + } + + ProcessStats.State state = + operationPoller.waitForOperation( + () -> + description + .getClient() + .getApplications() + .getAppState(description.getServerGroupId()), + inProgressState -> + inProgressState == ProcessStats.State.RUNNING + || inProgressState == ProcessStats.State.CRASHED, + null, + getTask(), + description.getServerGroupName(), + PHASE); + + if (state != ProcessStats.State.RUNNING) { + getTask() + .updateStatus( + PHASE, + "Failed to create Cloud Foundry service bindings between application '" + + description.getServerGroupName() + + "' and services: " + + serviceInstanceNames); + throw new CloudFoundryApiException( + "Failed to start '" + + description.getServerGroupName() + + "' which instead " + + describeProcessState(state)); + } + + getTask() + .updateStatus( + PHASE, + "Created Cloud Foundry service bindings between application '" + + description.getServerGroupName() + + "' and services: " + + serviceInstanceNames); + + return null; + } + + private void removeBindings(String serviceGuid, String appGuid) { + description.getClient().getApplications().getServiceBindingsByApp(appGuid).stream() + .filter(s -> serviceGuid.equalsIgnoreCase(s.getEntity().getServiceInstanceGuid())) + .findAny() + .ifPresent( + s -> + description + .getClient() + .getServiceInstances() + .deleteServiceBinding(s.getMetadata().getGuid())); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceKeyAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceKeyAtomicOperation.java new file mode 100644 index 00000000000..71368fe9d34 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceKeyAtomicOperation.java @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceKeyResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CreateCloudFoundryServiceKeyDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class CreateCloudFoundryServiceKeyAtomicOperation + implements AtomicOperation { + private static final String PHASE = "CREATE_SERVICE_KEY"; + private final CreateCloudFoundryServiceKeyDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public ServiceKeyResponse operate(List priorOutputs) { + Task task = getTask(); + + CloudFoundrySpace space = description.getSpace(); + String serviceInstanceName = description.getServiceInstanceName(); + String serviceKeyName = description.getServiceKeyName(); + task.updateStatus( + PHASE, + "Creating service key '" + + serviceKeyName + + "' for service '" + + serviceInstanceName + + "' in '" + + space.getRegion() + + "'"); + + ServiceKeyResponse results = + description + .getClient() + .getServiceKeys() + .createServiceKey(space, serviceInstanceName, serviceKeyName); + + task.updateStatus(PHASE, "Finished creating service key '" + serviceKeyName + "'"); + + return results; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryLoadBalancerAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..16cc11fbfb7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryLoadBalancerAtomicOperation.java @@ -0,0 +1,53 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class DeleteCloudFoundryLoadBalancerAtomicOperation implements AtomicOperation { + private static final String PHASE = "DELETE_LOAD_BALANCER"; + private final DeleteCloudFoundryLoadBalancerDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + CloudFoundryClient client = description.getClient(); + + if (description.getLoadBalancer() == null) { + throw new CloudFoundryApiException("Load balancer does not exist"); + } else { + getTask() + .updateStatus(PHASE, "Deleting load balancer " + description.getLoadBalancer().getName()); + client.getRoutes().deleteRoute(description.getLoadBalancer().getId()); + getTask() + .updateStatus(PHASE, "Deleted load balancer " + description.getLoadBalancer().getName()); + } + + return null; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceBindingAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceBindingAtomicOperation.java new file mode 100644 index 00000000000..8d3ec46a91c --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceBindingAtomicOperation.java @@ -0,0 +1,92 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceBinding; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryServiceBindingDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class DeleteCloudFoundryServiceBindingAtomicOperation implements AtomicOperation { + + private static final String PHASE = "DELETE_SERVICE_BINDINGS"; + private final DeleteCloudFoundryServiceBindingDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + List unbindingServiceInstanceNames = + description.getServiceUnbindingRequests().stream() + .map( + DeleteCloudFoundryServiceBindingDescription.ServiceUnbindingRequest + ::getServiceInstanceName) + .collect(Collectors.toList()); + + List unbindingServiceBindingNames = + unbindingServiceInstanceNames.stream() + .map(CloudFoundryClientUtils::convertToValidServiceBindingName) + .collect(Collectors.toList()); + + getTask() + .updateStatus( + PHASE, + "Unbinding Cloud Foundry application '" + + description.getServerGroupName() + + "' from services: " + + unbindingServiceInstanceNames); + + List> bindings = + description + .getClient() + .getApplications() + .getServiceBindingsByApp(description.getServerGroupId()); + + removeBindings(bindings, unbindingServiceBindingNames); + + getTask() + .updateStatus( + PHASE, + "Successfully unbound Cloud Foundry application '" + + description.getServerGroupName() + + "' from services: " + + unbindingServiceInstanceNames); + + return null; + } + + private void removeBindings( + List> bindings, List unbindingServiceBindingNames) { + bindings.stream() + .filter(b -> unbindingServiceBindingNames.contains(b.getEntity().getName())) + .forEach( + b -> + description + .getClient() + .getServiceInstances() + .deleteServiceBinding(b.getMetadata().getGuid())); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceKeyAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceKeyAtomicOperation.java new file mode 100644 index 00000000000..bb5f152e9e7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceKeyAtomicOperation.java @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceKeyResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryServiceKeyDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class DeleteCloudFoundryServiceKeyAtomicOperation + implements AtomicOperation { + private static final String PHASE = "DELETE_SERVICE_KEY"; + private final DeleteCloudFoundryServiceKeyDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public ServiceKeyResponse operate(List priorOutputs) { + Task task = getTask(); + + CloudFoundrySpace space = description.getSpace(); + String serviceInstanceName = description.getServiceInstanceName(); + String serviceKeyName = description.getServiceKeyName(); + task.updateStatus( + PHASE, + "Deleting service key '" + + serviceKeyName + + "' for service '" + + serviceInstanceName + + "' in '" + + space.getRegion() + + "'"); + + ServiceKeyResponse results = + description + .getClient() + .getServiceKeys() + .deleteServiceKey(space, serviceInstanceName, serviceKeyName); + + task.updateStatus(PHASE, "Finished deleting service key '" + serviceKeyName + "'"); + + return results; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServerGroupAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServerGroupAtomicOperation.java new file mode 100644 index 00000000000..0d7fc33904d --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServerGroupAtomicOperation.java @@ -0,0 +1,590 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.CloudFoundryOperationUtils.describeProcessState; +import static com.netflix.spinnaker.clouddriver.deploy.DeploymentResult.Deployment; +import static com.netflix.spinnaker.clouddriver.deploy.DeploymentResult.Deployment.Capacity; +import static java.util.stream.Collectors.toList; + +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.artifacts.maven.MavenArtifactCredentials; +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.artifacts.CloudFoundryArtifactCredentials; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.CreateServiceBinding; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.CreatePackage; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Lifecycle; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Process; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessRequest; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.CloudFoundryServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.ServerGroupMetaDataEnvVar; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.*; +import java.util.*; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Data; +import lombok.RequiredArgsConstructor; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; + +@RequiredArgsConstructor +public class DeployCloudFoundryServerGroupAtomicOperation + extends AbstractCloudFoundryLoadBalancerMappingOperation + implements AtomicOperation { + private static final String PHASE = "DEPLOY"; + + private final OperationPoller operationPoller; + private final DeployCloudFoundryServerGroupDescription description; + + @Override + protected String getPhase() { + return PHASE; + } + + @Override + public DeploymentResult operate(List priorOutputs) { + getTask().updateStatus(PHASE, "Deploying '" + description.getApplication() + "'"); + CloudFoundryClient client = description.getClient(); + + CloudFoundryServerGroupNameResolver serverGroupNameResolver = + new CloudFoundryServerGroupNameResolver(client, description.getSpace()); + + description.setServerGroupName( + serverGroupNameResolver.resolveNextServerGroupName( + description.getApplication(), + description.getStack(), + description.getFreeFormDetails(), + false)); + + CloudFoundryServerGroup serverGroup = createApplication(description); + String packageId; + + File packageArtifact = + description.getDocker() == null ? downloadPackageArtifact(description) : null; + try { + packageId = buildPackage(serverGroup.getId(), description, packageArtifact); + } finally { + if (packageArtifact != null) { + packageArtifact.delete(); + } + } + + // create service bindings and configure app/processes before building droplet + createServiceBindings(serverGroup, description); + + // build the app droplet + buildDroplet(packageId, serverGroup.getId(), description); + + // update processes before scaling them + updateProcess(serverGroup.getId(), description); + scaleApplication(serverGroup.getId(), description); + + if (!mapRoutes( + description, + description.getApplicationAttributes().getRoutes(), + description.getSpace(), + serverGroup.getId())) { + return deploymentResult(); + } + + final int desiredInstanceCount = description.getApplicationAttributes().getInstances(); + if (description.isStartApplication() && desiredInstanceCount > 0) { + client.getApplications().startApplication(serverGroup.getId()); + ProcessStats.State state = + operationPoller.waitForOperation( + () -> client.getApplications().getAppState(serverGroup.getId()), + inProgressState -> + inProgressState == ProcessStats.State.RUNNING + || inProgressState == ProcessStats.State.CRASHED, + null, + getTask(), + description.getServerGroupName(), + PHASE); + + if (state != ProcessStats.State.RUNNING) { + throw new CloudFoundryApiException( + "Failed to start '" + + description.getServerGroupName() + + "' which instead " + + describeProcessState(state)); + } + } else { + getTask() + .updateStatus(PHASE, "Stop state requested for '" + description.getServerGroupName()); + } + + getTask().updateStatus(PHASE, "Deployed '" + description.getApplication() + "'"); + + return deploymentResult(); + } + + private void createServiceBindings( + CloudFoundryServerGroup serverGroup, DeployCloudFoundryServerGroupDescription description) { + + List serviceNames = description.getApplicationAttributes().getServices(); + if (serviceNames == null || serviceNames.isEmpty()) return; + + getTask() + .updateStatus( + PHASE, + "Creating Cloud Foundry service bindings between application '" + + description.getServerGroupName() + + "' and services: " + + description.getApplicationAttributes().getServices()); + + Map serviceInstanceGuids = new HashMap<>(); + + // find guids for services + description + .getClient() + .getServiceInstances() + .findAllServicesBySpaceAndNames(serverGroup.getSpace(), serviceNames) + .forEach(s -> serviceInstanceGuids.put(s.getEntity().getName(), s.getMetadata().getGuid())); + + // try and create service binding request for each service + List bindings = + serviceNames.stream() + .map( + name -> { + String serviceGuid = serviceInstanceGuids.get(name); + if (serviceGuid == null || serviceGuid.isEmpty()) { + getTask() + .updateStatus( + PHASE, + "Failed to create Cloud Foundry service bindings between application '" + + description.getServerGroupName() + + "' and services: " + + serviceNames); + + throw new CloudFoundryApiException( + "Unable to find service with the name: '" + + name + + "' in " + + serverGroup.getSpace()); + } + + return new CreateServiceBinding( + serviceGuid, serverGroup.getId(), name, Collections.emptyMap()); + }) + .collect(Collectors.toList()); + + bindings.forEach(b -> description.getClient().getServiceInstances().createServiceBinding(b)); + + getTask() + .updateStatus( + PHASE, + "Created Cloud Foundry service bindings between application '" + + description.getServerGroupName() + + "' and services: " + + description.getApplicationAttributes().getServices()); + } + + private DeploymentResult deploymentResult() { + DeploymentResult deploymentResult = new DeploymentResult(); + deploymentResult.setServerGroupNames( + Collections.singletonList( + description.getRegion() + ":" + description.getServerGroupName())); + deploymentResult + .getServerGroupNameByRegion() + .put(description.getRegion(), description.getServerGroupName()); + deploymentResult.setMessages( + getTask().getHistory().stream() + .map(hist -> hist.getPhase() + ":" + hist.getStatus()) + .collect(toList())); + List routes = description.getApplicationAttributes().getRoutes(); + if (routes == null) { + routes = Collections.emptyList(); + } + final Integer desiredInstanceCount = description.getApplicationAttributes().getInstances(); + final Deployment deployment = new Deployment(); + deployment.setCloudProvider(CloudFoundryCloudProvider.ID); + deployment.setAccount(description.getAccountName()); + deployment.setServerGroupName(description.getServerGroupName()); + final Capacity capacity = new Capacity(); + capacity.setDesired(desiredInstanceCount); + deployment.setCapacity(capacity); + final Map metadata = new HashMap<>(); + metadata.put("env", description.getApplicationAttributes().getEnv()); + metadata.put("routes", routes); + deployment.setMetadata(metadata); + if (!routes.isEmpty()) { + deployment.setLocation(routes.get(0)); + } + deploymentResult.setDeployments(Collections.singleton(deployment)); + return deploymentResult; + } + + private static CloudFoundryServerGroup createApplication( + DeployCloudFoundryServerGroupDescription description) { + CloudFoundryClient client = description.getClient(); + getTask() + .updateStatus( + PHASE, "Creating Cloud Foundry application '" + description.getServerGroupName() + "'"); + + Lifecycle lifecycle = + description.getDocker() == null + ? new Lifecycle(Lifecycle.Type.BUILDPACK, description.getApplicationAttributes()) + : new Lifecycle(Lifecycle.Type.DOCKER, description.getApplicationAttributes()); + + CloudFoundryServerGroup serverGroup = + client + .getApplications() + .createApplication( + description.getServerGroupName(), + description.getSpace(), + getEnvironmentVars(description), + lifecycle); + getTask() + .updateStatus( + PHASE, "Created Cloud Foundry application '" + description.getServerGroupName() + "'"); + + return serverGroup; + } + + private static Map getEnvironmentVars( + DeployCloudFoundryServerGroupDescription description) { + Map environmentVars = + Optional.ofNullable(description.getApplicationAttributes().getEnv()) + .map(HashMap::new) + .orElse(new HashMap<>()); + + final Artifact applicationArtifact = description.getApplicationArtifact(); + if (CloudFoundryArtifactCredentials.TYPE.equals(applicationArtifact.getType())) { + CloudFoundryClient client = description.getClient(); + final CloudFoundrySpace orgAndSpaceName = + CloudFoundrySpace.fromRegion(applicationArtifact.getLocation()); + final Optional orgOptional = + client.getOrganizations().findByName(orgAndSpaceName.getOrganization().getName()); + orgOptional.ifPresent( + org -> { + final CloudFoundrySpace space = + client.getSpaces().findByName(org.getId(), orgAndSpaceName.getName()); + if (space != null) { + final CloudFoundryServerGroup serverGroup = + client + .getApplications() + .findServerGroupByNameAndSpaceId( + applicationArtifact.getName(), space.getId()); + if (serverGroup != null) { + serverGroup.getEnv().entrySet().stream() + .filter(e -> e.getKey().startsWith(ServerGroupMetaDataEnvVar.PREFIX)) + .forEach(i -> environmentVars.put(i.getKey(), i.getValue().toString())); + } + } + }); + } + + final ExternalReference artifactInfo = resolveArtifactInfo(description); + artifactInfo + .getName() + .map(name -> environmentVars.put(ServerGroupMetaDataEnvVar.ArtifactName.envVarName, name)); + artifactInfo + .getNumber() + .map( + number -> + environmentVars.put(ServerGroupMetaDataEnvVar.ArtifactVersion.envVarName, number)); + artifactInfo + .getUrl() + .map(url -> environmentVars.put(ServerGroupMetaDataEnvVar.ArtifactUrl.envVarName, url)); + final ExternalReference buildInfo = resolveBuildInfo(description); + buildInfo + .getName() + .map(name -> environmentVars.put(ServerGroupMetaDataEnvVar.JobName.envVarName, name)); + buildInfo + .getNumber() + .map(number -> environmentVars.put(ServerGroupMetaDataEnvVar.JobNumber.envVarName, number)); + buildInfo + .getUrl() + .map(url -> environmentVars.put(ServerGroupMetaDataEnvVar.JobUrl.envVarName, url)); + Optional.ofNullable(description.getExecutionId()) + .ifPresent( + executionId -> + environmentVars.put(ServerGroupMetaDataEnvVar.PipelineId.envVarName, executionId)); + + return environmentVars; + } + + private static ExternalReference resolveArtifactInfo( + DeployCloudFoundryServerGroupDescription description) { + return Optional.ofNullable(description.getApplicationArtifact()) + .map( + applicationArtifact -> { + final ExternalReference.ExternalReferenceBuilder artifactInfo = + ExternalReference.builder(); + if (MavenArtifactCredentials.TYPES.contains(applicationArtifact.getType())) { + final ArtifactCredentials artifactCredentials = + description.getArtifactCredentials(); + artifactInfo + .name(artifactCredentials.resolveArtifactName(applicationArtifact)) + .number(artifactCredentials.resolveArtifactVersion(applicationArtifact)) + .url(Optional.ofNullable(applicationArtifact.getLocation())); + } + return artifactInfo.build(); + }) + .orElseGet(() -> ExternalReference.builder().build()); + } + + private static ExternalReference resolveBuildInfo( + DeployCloudFoundryServerGroupDescription description) { + Map buildInfo = null; + final Artifact applicationArtifact = description.getApplicationArtifact(); + if (applicationArtifact != null) { + buildInfo = (Map) applicationArtifact.getMetadata("build"); + } + if (buildInfo == null) { + final Map trigger = description.getTrigger(); + if (trigger != null) { + final String triggerType = (String) trigger.get("type"); + if (triggerType.equals("jenkins") || triggerType.equals("manual")) { + final Map triggerBuildInfo = + (Map) trigger.get("buildInfo"); + if (triggerBuildInfo != null) { + buildInfo = triggerBuildInfo; + } + } + } + } + return Optional.ofNullable(buildInfo) + .map( + buildInfoMap -> + ExternalReference.builder() + .name(Optional.ofNullable(buildInfoMap.get("name")).map(Object::toString)) + .number(Optional.ofNullable(buildInfoMap.get("number")).map(Object::toString)) + .url(Optional.ofNullable(buildInfoMap.get("url")).map(Object::toString)) + .build()) + .orElse(ExternalReference.builder().build()); + } + + @Data + @Builder + private static class ExternalReference { + @Builder.Default private Optional name = Optional.empty(); + + @Builder.Default private Optional number = Optional.empty(); + + @Builder.Default private Optional url = Optional.empty(); + } + + @NotNull + private File downloadPackageArtifact(DeployCloudFoundryServerGroupDescription description) { + File file = null; + try { + file = File.createTempFile(UUID.randomUUID().toString(), null); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + if (file != null) file.delete(); + } + + try (InputStream artifactInputStream = + description.getArtifactCredentials().download(description.getApplicationArtifact()); + FileOutputStream fileOutputStream = new FileOutputStream(file)) { + IOUtils.copy(artifactInputStream, fileOutputStream); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + return file; + } + + private String buildPackage( + String serverGroupId, + DeployCloudFoundryServerGroupDescription description, + File packageArtifact) { + CloudFoundryClient client = description.getClient(); + getTask() + .updateStatus( + PHASE, "Creating package for application '" + description.getServerGroupName() + "'"); + + String packageId; + if (packageArtifact != null) { + // Bits Package + packageId = + client + .getApplications() + .createPackage(new CreatePackage(serverGroupId, CreatePackage.Type.BITS, null)); + client.getApplications().uploadPackageBits(packageId, packageArtifact); + } else { + // Docker Package + packageId = + client + .getApplications() + .createPackage( + new CreatePackage( + serverGroupId, CreatePackage.Type.DOCKER, description.getDocker())); + } + + operationPoller.waitForOperation( + () -> client.getApplications().packageUploadComplete(packageId), + Function.identity(), + null, + getTask(), + description.getServerGroupName(), + PHASE); + + getTask() + .updateStatus( + PHASE, + "Completed creating package for application '" + + description.getServerGroupName() + + "'"); + + return packageId; + } + + private void buildDroplet( + String packageId, + String serverGroupId, + DeployCloudFoundryServerGroupDescription description) { + CloudFoundryClient client = description.getClient(); + getTask().updateStatus(PHASE, "Building droplet for package '" + packageId + "'"); + + Integer memoryAmount = + convertToMb("memory", description.getApplicationAttributes().getMemory()); + Integer diskSizeAmount = + convertToMb("disk quota", description.getApplicationAttributes().getDiskQuota()); + + String buildId = client.getApplications().createBuild(packageId, memoryAmount, diskSizeAmount); + + operationPoller.waitForOperation( + () -> client.getApplications().buildCompleted(buildId), + Function.identity(), + null, + getTask(), + description.getServerGroupName(), + PHASE); + + String dropletGuid = client.getApplications().findDropletGuidFromBuildId(buildId); + + client.getApplications().setCurrentDroplet(serverGroupId, dropletGuid); + getTask().updateStatus(PHASE, "Droplet built for package '" + packageId + "'"); + } + + private void scaleApplication( + String serverGroupId, DeployCloudFoundryServerGroupDescription description) { + CloudFoundryClient client = description.getClient(); + getTask().updateStatus(PHASE, "Scaling application '" + description.getServerGroupName() + "'"); + + Integer memoryAmount = + convertToMb("memory", description.getApplicationAttributes().getMemory()); + Integer diskSizeAmount = + convertToMb("disk quota", description.getApplicationAttributes().getDiskQuota()); + client + .getProcesses() + .scaleProcess( + serverGroupId, + description.getApplicationAttributes().getInstances(), + memoryAmount, + diskSizeAmount); + + if (!description.getApplicationAttributes().getProcesses().isEmpty()) { + List processes = client.getProcesses().getAllProcessesByAppId(serverGroupId); + + for (ProcessRequest req : description.getApplicationAttributes().getProcesses()) { + String processGuid = getProcessGuidByType(processes, req.getType()); + + Integer pMemoryAmount = convertToMb("memory", req.getMemory()); + Integer pDiskSizeAmount = convertToMb("disk quota", req.getDiskQuota()); + client + .getProcesses() + .scaleProcess(processGuid, req.getInstances(), pMemoryAmount, pDiskSizeAmount); + } + } + getTask().updateStatus(PHASE, "Scaled application '" + description.getServerGroupName() + "'"); + } + + private void updateProcess( + String serverGroupId, DeployCloudFoundryServerGroupDescription description) { + CloudFoundryClient client = description.getClient(); + getTask().updateStatus(PHASE, "Updating process '" + description.getServerGroupName() + "'"); + + client + .getProcesses() + .updateProcess( + serverGroupId, + description.getApplicationAttributes().getCommand(), + description.getApplicationAttributes().getHealthCheckType(), + description.getApplicationAttributes().getHealthCheckHttpEndpoint(), + description.getApplicationAttributes().getTimeout(), + null); + + if (!description.getApplicationAttributes().getProcesses().isEmpty()) { + List processes = client.getProcesses().getAllProcessesByAppId(serverGroupId); + + for (ProcessRequest req : description.getApplicationAttributes().getProcesses()) { + String processGuid = getProcessGuidByType(processes, req.getType()); + + client + .getProcesses() + .updateProcess( + processGuid, + req.getCommand(), + req.getHealthCheckType(), + req.getHealthCheckHttpEndpoint(), + req.getTimeout(), + req.getHealthCheckInvocationTimeout()); + } + } + + getTask().updateStatus(PHASE, "Updated process '" + description.getServerGroupName() + "'"); + } + + // VisibleForTesting + @Nullable + static Integer convertToMb(String field, @Nullable String size) { + if (size == null) { + return null; + } else if (StringUtils.isNumeric(size)) { + return Integer.parseInt(size); + } else { + size = size.toLowerCase(); + if (size.endsWith("g") || size.endsWith("gb")) { + String value = size.substring(0, size.indexOf('g')); + if (StringUtils.isNumeric(value)) return Integer.parseInt(value) * 1024; + } else if (size.endsWith("m") || size.endsWith("mb")) { + String value = size.substring(0, size.indexOf('m')); + if (StringUtils.isNumeric(value)) return Integer.parseInt(value); + } + } + + throw new IllegalArgumentException( + String.format("Invalid size for application %s = '%s'", field, size)); + } + + // Helper method for filtering and returning a process guid by type + private String getProcessGuidByType(List processes, String type) { + return processes.stream() + .filter(p -> p.getType().equalsIgnoreCase(type)) + .map(Process::getGuid) + .findFirst() + .orElseThrow( + () -> new CloudFoundryApiException("Unable to find a process with type: " + type)); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServiceAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServiceAtomicOperation.java new file mode 100644 index 00000000000..49d5c6be6aa --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServiceAtomicOperation.java @@ -0,0 +1,98 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.UPDATE; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class DeployCloudFoundryServiceAtomicOperation + implements AtomicOperation { + private static final String PHASE = "DEPLOY_SERVICE"; + private final DeployCloudFoundryServiceDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public ServiceInstanceResponse operate(List priorOutputs) { + Task task = getTask(); + final ServiceInstanceResponse serviceInstanceResponse; + final String serviceInstanceName; + if (!description.isUserProvided()) { + DeployCloudFoundryServiceDescription.ServiceAttributes serviceAttributes = + description.getServiceAttributes(); + serviceInstanceName = serviceAttributes.getServiceInstanceName(); + serviceInstanceResponse = + description + .getClient() + .getServiceInstances() + .createServiceInstance( + serviceInstanceName, + serviceAttributes.getService(), + serviceAttributes.getServicePlan(), + serviceAttributes.getTags(), + serviceAttributes.getParameterMap(), + serviceAttributes.isUpdatable(), + description.getSpace()); + String gerund = serviceInstanceResponse.getType() == UPDATE ? "Updating" : "Creating"; + serviceInstanceResponse.setPreviousInstanceName(serviceAttributes.getPreviousInstanceName()); + task.updateStatus( + PHASE, + gerund + + " service instance '" + + serviceInstanceName + + "' from service " + + serviceAttributes.getService() + + " and service plan " + + serviceAttributes.getServicePlan()); + } else { + DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes + userProvidedServiceAttributes = description.getUserProvidedServiceAttributes(); + serviceInstanceName = userProvidedServiceAttributes.getServiceInstanceName(); + task.updateStatus( + PHASE, "Creating user-provided service instance '" + serviceInstanceName + "'"); + serviceInstanceResponse = + description + .getClient() + .getServiceInstances() + .createUserProvidedServiceInstance( + serviceInstanceName, + userProvidedServiceAttributes.getSyslogDrainUrl(), + userProvidedServiceAttributes.getTags(), + userProvidedServiceAttributes.getCredentials(), + userProvidedServiceAttributes.getRouteServiceUrl(), + userProvidedServiceAttributes.isUpdatable(), + description.getSpace()); + String verb = serviceInstanceResponse.getType() == UPDATE ? "Updated" : "Created"; + serviceInstanceResponse.setPreviousInstanceName( + userProvidedServiceAttributes.getPreviousInstanceName()); + task.updateStatus( + PHASE, verb + " user-provided service instance '" + serviceInstanceName + "'"); + } + + return serviceInstanceResponse; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServerGroupAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServerGroupAtomicOperation.java new file mode 100644 index 00000000000..6f1450bab6d --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServerGroupAtomicOperation.java @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DestroyCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class DestroyCloudFoundryServerGroupAtomicOperation implements AtomicOperation { + private static final String PHASE = "DESTROY_SERVER_GROUP"; + private final DestroyCloudFoundryServerGroupDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask().updateStatus(PHASE, "Destroying '" + description.getServerGroupName() + "'"); + CloudFoundryClient client = description.getClient(); + client.getApplications().deleteApplication(description.getServerGroupId()); + getTask().updateStatus(PHASE, "Destroyed '" + description.getServerGroupName() + "'"); + return null; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServiceAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServiceAtomicOperation.java new file mode 100644 index 00000000000..9214547b9a8 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServiceAtomicOperation.java @@ -0,0 +1,118 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.NOT_FOUND; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceBinding; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DestroyCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class DestroyCloudFoundryServiceAtomicOperation + implements AtomicOperation { + private static final String PHASE = "DELETE_SERVICE"; + private final DestroyCloudFoundryServiceDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public ServiceInstanceResponse operate(List priorOutputs) { + Task task = getTask(); + + if (description.isRemoveBindings()) { + task.updateStatus( + PHASE, + "Started removing service bindings for '" + + description.getServiceInstanceName() + + "' from space " + + description.getSpace().getName()); + + // create map of binding guid to service binding entity + Map map = new HashMap<>(); + description + .getClient() + .getServiceInstances() + .findAllServiceBindingsByServiceName( + description.getRegion(), description.getServiceInstanceName()) + .stream() + .forEach(r -> map.put(r.getMetadata().getGuid(), r.getEntity())); + + // make sure that the bindings are only to sg's that belong to the specific spinnaker + // application + // before deleting, or else throw + for (ServiceBinding sb : map.values()) { + CloudFoundryServerGroup sg = + description.getClient().getApplications().findById(sb.getAppGuid()); + String appName = description.getApplications().stream().findFirst().get(); + if (!sg.getMoniker().getApp().equals(appName)) { + throw new IllegalArgumentException( + "Unable to unbind server group '" + + sg.getName() + + "' from '" + + description.getServiceInstanceName() + + "' because it doesn't belong to the application '" + + appName + + "'"); + } + task.updateStatus( + PHASE, + "Finished removing service bindings for '" + + description.getServiceInstanceName() + + "' from space " + + description.getSpace().getName()); + } + + // delete the service binding + for (String sbKey : map.keySet()) { + description.getClient().getServiceInstances().deleteServiceBinding(sbKey); + } + } + + ServiceInstanceResponse response = + description + .getClient() + .getServiceInstances() + .destroyServiceInstance(description.getSpace(), description.getServiceInstanceName()); + task.updateStatus( + PHASE, + "Started removing service instance '" + + description.getServiceInstanceName() + + "' from space " + + description.getSpace().getName()); + LastOperation.State state = response.getState(); + if (state == NOT_FOUND) { + throw new RuntimeException( + "Service instance " + + description.getServiceInstanceName() + + " not found, in " + + description.getSpace().getRegion()); + } + return response; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/MapLoadBalancersAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/MapLoadBalancersAtomicOperation.java new file mode 100644 index 00000000000..6fbc14c5428 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/MapLoadBalancersAtomicOperation.java @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.LoadBalancersDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class MapLoadBalancersAtomicOperation + extends AbstractCloudFoundryLoadBalancerMappingOperation implements AtomicOperation { + + public static final String PHASE = "MAP_LOAD_BALANCERS"; + + @Override + protected String getPhase() { + return PHASE; + } + + private final LoadBalancersDescription description; + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + PHASE, "Mapping '" + description.getServerGroupName() + "' with loadbalancer(s)."); + + if (mapRoutes( + description, + description.getRoutes(), + description.getSpace(), + description.getServerGroupId())) { + getTask() + .updateStatus( + PHASE, "Mapped '" + description.getServerGroupName() + "' with loadbalancer(s)."); + } + + return null; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ScaleCloudFoundryServerGroupAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ScaleCloudFoundryServerGroupAtomicOperation.java new file mode 100644 index 00000000000..668e7c12e36 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ScaleCloudFoundryServerGroupAtomicOperation.java @@ -0,0 +1,93 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.CloudFoundryOperationUtils.describeProcessState; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats.State; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.ScaleCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import java.util.Optional; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class ScaleCloudFoundryServerGroupAtomicOperation implements AtomicOperation { + private static final String PHASE = "RESIZE_SERVER_GROUP"; + + private final OperationPoller operationPoller; + private final ScaleCloudFoundryServerGroupDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask().updateStatus(PHASE, "Resizing '" + description.getServerGroupName() + "'"); + + final CloudFoundryClient client = description.getClient(); + + ServerGroup.Capacity capacity = description.getCapacity(); + boolean scaleStoppedInstance = + Optional.ofNullable(description.getScaleStoppedServerGroup()).orElse(false); + Integer numInstances = + Optional.ofNullable(capacity) + .map(c -> scaleStoppedInstance ? capacity.getMax() : capacity.getDesired()) + .orElse(null); + + client + .getProcesses() + .scaleProcess( + description.getServerGroupId(), + numInstances, + description.getMemory(), + description.getDiskQuota()); + + State state = + operationPoller.waitForOperation( + () -> client.getApplications().getAppState(description.getServerGroupId()), + inProgressState -> + (inProgressState == State.RUNNING + || inProgressState == State.CRASHED + || inProgressState == State.DOWN), + null, + getTask(), + description.getServerGroupName(), + PHASE); + + if (state == State.RUNNING + || (state == State.DOWN && description.getCapacity().getDesired() == 0) + || (state == State.DOWN && Boolean.TRUE.equals(description.getScaleStoppedServerGroup()))) { + getTask().updateStatus(PHASE, "Resized '" + description.getServerGroupName() + "'"); + } else { + throw new CloudFoundryApiException( + "Failed to start '" + + description.getServerGroupName() + + "' which instead " + + describeProcessState(state)); + } + + return null; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ShareCloudFoundryServiceAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ShareCloudFoundryServiceAtomicOperation.java new file mode 100644 index 00000000000..d6168c8cdfb --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ShareCloudFoundryServiceAtomicOperation.java @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static java.util.stream.Collectors.toSet; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.ShareCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import java.util.Set; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class ShareCloudFoundryServiceAtomicOperation + implements AtomicOperation { + private static final String PHASE = "SHARE_SERVICE"; + private final ShareCloudFoundryServiceDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public ServiceInstanceResponse operate(List priorOutputs) { + Task task = getTask(); + + String serviceInstanceName = description.getServiceInstanceName(); + String region = description.getRegion(); + Set shareToRegions = description.getShareToRegions(); + task.updateStatus( + PHASE, + "Sharing service instance '" + + serviceInstanceName + + "' from '" + + region + + "' into " + + String.join(", ", shareToRegions.stream().map(s -> "'" + s + "'").collect(toSet()))); + + ServiceInstanceResponse results = + description + .getClient() + .getServiceInstances() + .shareServiceInstance(region, serviceInstanceName, shareToRegions); + + task.updateStatus(PHASE, "Finished sharing service instance '" + serviceInstanceName + "'"); + + return results; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StartCloudFoundryServerGroupAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StartCloudFoundryServerGroupAtomicOperation.java new file mode 100644 index 00000000000..64aeed7fddb --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StartCloudFoundryServerGroupAtomicOperation.java @@ -0,0 +1,72 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.CloudFoundryOperationUtils.describeProcessState; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.StartCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class StartCloudFoundryServerGroupAtomicOperation implements AtomicOperation { + private static final String PHASE = "START_SERVER_GROUP"; + + private final OperationPoller operationPoller; + private final StartCloudFoundryServerGroupDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask().updateStatus(PHASE, "Starting '" + description.getServerGroupName() + "'"); + + CloudFoundryClient client = description.getClient(); + + client.getApplications().startApplication(description.getServerGroupId()); + + ProcessStats.State state = + operationPoller.waitForOperation( + () -> client.getApplications().getAppState(description.getServerGroupId()), + inProgressState -> inProgressState != ProcessStats.State.STARTING, + null, + getTask(), + description.getServerGroupName(), + PHASE); + + if (state != ProcessStats.State.RUNNING) { + throw new CloudFoundryApiException( + "Failed to start '" + + description.getServerGroupName() + + "' which instead " + + describeProcessState(state)); + } + + getTask().updateStatus(PHASE, "Started '" + description.getServerGroupName() + "'"); + + return null; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StopCloudFoundryServerGroupAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StopCloudFoundryServerGroupAtomicOperation.java new file mode 100644 index 00000000000..db723101feb --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StopCloudFoundryServerGroupAtomicOperation.java @@ -0,0 +1,73 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.CloudFoundryOperationUtils.describeProcessState; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.StopCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class StopCloudFoundryServerGroupAtomicOperation implements AtomicOperation { + private static final String PHASE = "STOP_SERVER_GROUP"; + private final OperationPoller operationPoller; + private final StopCloudFoundryServerGroupDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask().updateStatus(PHASE, "Stopping '" + description.getServerGroupName() + "'"); + CloudFoundryClient client = description.getClient(); + + client.getApplications().stopApplication(description.getServerGroupId()); + + ProcessStats.State state = + operationPoller.waitForOperation( + () -> client.getApplications().getAppState(description.getServerGroupId()), + inProgressState -> + inProgressState != ProcessStats.State.STARTING + && inProgressState != ProcessStats.State.RUNNING + && inProgressState != ProcessStats.State.STOPPING, + null, + getTask(), + description.getServerGroupName(), + PHASE); + + if (state != ProcessStats.State.DOWN) { + throw new CloudFoundryApiException( + "Failed to stop '" + + description.getServerGroupName() + + "' which instead " + + describeProcessState(state)); + } + + getTask().updateStatus(PHASE, "Stopped '" + description.getServerGroupName() + "'"); + + return null; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/TerminateCloudFoundryInstancesAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/TerminateCloudFoundryInstancesAtomicOperation.java new file mode 100644 index 00000000000..3072b6f8abd --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/TerminateCloudFoundryInstancesAtomicOperation.java @@ -0,0 +1,69 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static java.util.stream.Collectors.joining; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.TerminateCloudFoundryInstancesDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.Arrays; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class TerminateCloudFoundryInstancesAtomicOperation implements AtomicOperation { + private static final String PHASE = "TERMINATE_INSTANCES"; + private final TerminateCloudFoundryInstancesDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask().updateStatus(PHASE, "Terminating " + instanceDescription()); + final CloudFoundryClient client = description.getClient(); + + for (String instance : description.getInstanceIds()) { + try { + String serverGroupId = instance.substring(0, instance.lastIndexOf("-")); + String instanceIndex = instance.substring(instance.lastIndexOf("-") + 1); + client.getApplications().deleteAppInstance(serverGroupId, instanceIndex); + getTask().updateStatus(PHASE, "Terminated " + instanceDescription()); + } catch (CloudFoundryApiException e) { + throw new CloudFoundryApiException( + "Failed to terminate '" + instance + "': " + e.getMessage()); + } + } + + return null; + } + + private String instanceDescription() { + return description.getInstanceIds().length == 1 + ? "application instance '" + description.getInstanceIds()[0] + "'" + : "application instances [" + + Arrays.stream(description.getInstanceIds()) + .map(id -> "'" + id + "'") + .collect(joining(", ")) + + "]"; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnmapLoadBalancersAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnmapLoadBalancersAtomicOperation.java new file mode 100644 index 00000000000..88e5cebb444 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnmapLoadBalancersAtomicOperation.java @@ -0,0 +1,86 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Routes; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.LoadBalancersDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class UnmapLoadBalancersAtomicOperation implements AtomicOperation { + public static final String PHASE = "UNMAP_LOAD_BALANCERS"; + + private final LoadBalancersDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + PHASE, "Unmapping '" + description.getServerGroupName() + "' from load balancer(s)."); + + List routeList = description.getRoutes(); + if (routeList == null || routeList.size() == 0) { + throw new CloudFoundryApiException("No load balancer specified"); + } else { + Routes routes = description.getClient().getRoutes(); + Map> lbMap = + routeList.stream() + .collect( + Collectors.toMap( + uri -> uri, + uri -> + Optional.ofNullable( + routes.find(routes.toRouteId(uri), description.getSpace().getId())))); + routeList.forEach( + uri -> { + if (!Routes.isValidRouteFormat(uri)) { + throw new CloudFoundryApiException("Invalid format for load balancer '" + uri + "'"); + } else if (!lbMap.get(uri).isPresent()) { + throw new CloudFoundryApiException("Load balancer '" + uri + "' does not exist"); + } + }); + + CloudFoundryClient client = description.getClient(); + lbMap.forEach( + (uri, o) -> { + getTask().updateStatus(PHASE, "Unmapping load balancer '" + uri + "'"); + o.ifPresent( + lb -> + client + .getApplications() + .unmapRoute(description.getServerGroupId(), lb.getId())); + getTask().updateStatus(PHASE, "Unmapped load balancer '" + uri + "'"); + }); + } + + return null; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnshareCloudFoundryServiceAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnshareCloudFoundryServiceAtomicOperation.java new file mode 100644 index 00000000000..f399d73b0ba --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnshareCloudFoundryServiceAtomicOperation.java @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static java.util.stream.Collectors.toSet; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.UnshareCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import java.util.Set; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class UnshareCloudFoundryServiceAtomicOperation + implements AtomicOperation { + private static final String PHASE = "UNSHARE_SERVICE"; + private final UnshareCloudFoundryServiceDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public ServiceInstanceResponse operate(List priorOutputs) { + Task task = getTask(); + + String serviceInstanceName = description.getServiceInstanceName(); + Set unshareFromRegions = description.getUnshareFromRegions(); + task.updateStatus( + PHASE, + "Unsharing service instance '" + + serviceInstanceName + + "' from '" + + String.join( + ", ", unshareFromRegions.stream().map(s -> "'" + s + "'").collect(toSet()))); + + ServiceInstanceResponse results = + description + .getClient() + .getServiceInstances() + .unshareServiceInstance(serviceInstanceName, unshareFromRegions); + + task.updateStatus(PHASE, "Finished unsharing service instance '" + serviceInstanceName + "'"); + + return results; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UpsertCloudFoundryLoadBalancerAtomicOperation.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UpsertCloudFoundryLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..2feda3366f6 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UpsertCloudFoundryLoadBalancerAtomicOperation.java @@ -0,0 +1,65 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.UpsertCloudFoundryLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class UpsertCloudFoundryLoadBalancerAtomicOperation + implements AtomicOperation { + private static final String PHASE = "UPSERT_LOAD_BALANCER"; + private final UpsertCloudFoundryLoadBalancerDescription description; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public CloudFoundryLoadBalancer operate(List priorOutputs) { + getTask().updateStatus(PHASE, "Creating load balancer in '" + description.getRegion() + "'"); + + CloudFoundryClient client = description.getClient(); + CloudFoundryLoadBalancer loadBalancer = + client + .getRoutes() + .createRoute( + new RouteId( + description.getHost(), + description.getPath(), + description.getPort(), + description.getDomain().getId()), + description.getSpace().getId()); + + if (loadBalancer != null) { + getTask().updateStatus(PHASE, "Done creating load balancer"); + } else { + throw new CloudFoundryApiException( + "Load balancer already exists in another organization and space"); + } + + return loadBalancer; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/util/RandomWordGenerator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/util/RandomWordGenerator.java new file mode 100644 index 00000000000..b6f0c05cd27 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/util/RandomWordGenerator.java @@ -0,0 +1,215 @@ +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.util; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; + +// Based on +// https://github.com/cloudfoundry/cli/blob/8c20e5118d7cb38f52d52b69060f6b77943dce6e/util/randomword/generator.go +public class RandomWordGenerator { + private static final List adjectives = + Arrays.asList( + "accountable", + "active", + "agile", + "anxious", + "appreciative", + "balanced", + "boisterous", + "bold", + "boring", + "brash", + "brave", + "bright", + "busy", + "chatty", + "cheerful", + "chipper", + "comedic", + "courteous", + "daring", + "delightful", + "empathic", + "excellent", + "exhausted", + "fantastic", + "fearless", + "fluent", + "forgiving", + "friendly", + "funny", + "generous", + "grateful", + "grouchy", + "grumpy", + "happy", + "hilarious", + "humble", + "impressive", + "insightful", + "intelligent", + "interested", + "kind", + "lean", + "nice", + "noisy", + "optimistic", + "patient", + "persistent", + "proud", + "quick", + "quiet", + "reflective", + "relaxed", + "reliable", + "responsible", + "responsive", + "rested", + "restless", + "shiny", + "shy", + "silly", + "sleepy", + "smart", + "spontaneous", + "surprised", + "sweet", + "talkative", + "terrific", + "thankful", + "timely", + "tired", + "turbulent", + "unexpected", + "wacky", + "wise", + "zany"); + + private static final List nouns = + Arrays.asList( + "ardvark", + "alligator", + "antelope", + "baboon", + "badger", + "bandicoot", + "bat", + "bear", + "bilby", + "bongo", + "bonobo", + "buffalo", + "bushbuck", + "camel", + "cassowary", + "cat", + "cheetah", + "chimpanzee", + "chipmunk", + "civet", + "crane", + "crocodile", + "dingo", + "dog", + "dugong", + "duiker", + "echidna", + "eland", + "elephant", + "emu", + "fossa", + "fox", + "gazelle", + "gecko", + "gelada", + "genet", + "gerenuk", + "giraffe", + "gnu", + "gorilla", + "grysbok", + "hartebeest", + "hedgehog", + "hippopotamus", + "hyena", + "hyrax", + "impala", + "jackal", + "jaguar", + "kangaroo", + "klipspringer", + "koala", + "kob", + "kookaburra", + "kudu", + "lemur", + "leopard", + "lion", + "lizard", + "llama", + "lynx", + "manatee", + "mandrill", + "meerkat", + "mongoose", + "mouse", + "numbat", + "nyala", + "okapi", + "oribi", + "oryx", + "ostrich", + "otter", + "panda", + "pangolin", + "panther", + "parrot", + "platypus", + "porcupine", + "possum", + "puku", + "quokka", + "quoll", + "rabbit", + "ratel", + "raven", + "reedbuck", + "rhinocerous", + "roan", + "sable", + "serval", + "shark", + "sitatunga", + "springhare", + "squirrel", + "swan", + "tasmaniandevil", + "tiger", + "topi", + "toucan", + "turtle", + "wallaby", + "warthog", + "waterbuck", + "wildebeest", + "wolf", + "wolverine", + "wombat", + "zebra"); + + public static String randomQualifiedNoun() { + return randomAdjective() + randomNoun(); + } + + public static String randomAdjective() { + return randomElement(adjectives); + } + + public static String randomNoun() { + return randomElement(nouns); + } + + public static String randomElement(List elements) { + return elements.get(ThreadLocalRandom.current().nextInt(elements.size())); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/AbstractCloudFoundryDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/AbstractCloudFoundryDescriptionValidator.java new file mode 100644 index 00000000000..42a99de87a5 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/AbstractCloudFoundryDescriptionValidator.java @@ -0,0 +1,37 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.AbstractCloudFoundryDescription; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import java.util.List; + +public abstract class AbstractCloudFoundryDescriptionValidator + extends DescriptionValidator { + + @Override + public void validate( + List priorDescriptions, + AbstractCloudFoundryDescription description, + ValidationErrors errors) { + StandardCloudFoundryAttributeValidator helper = + new StandardCloudFoundryAttributeValidator(description.getClass().getSimpleName(), errors); + helper.validateRegions(description.getRegion(), description.getCredentials()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CloudFoundryRunJobOperationDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CloudFoundryRunJobOperationDescriptionValidator.java new file mode 100644 index 00000000000..ce60fa307a7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CloudFoundryRunJobOperationDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.RUN_JOB) +@Component("cloudFoundryRunJobOperationDescriptionValidator") +public class CloudFoundryRunJobOperationDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CreateCloudFoundryServiceBindingDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CreateCloudFoundryServiceBindingDescriptionValidator.java new file mode 100644 index 00000000000..3f694273bf3 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CreateCloudFoundryServiceBindingDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.CREATE_SERVICE_BINDINGS) +@Component("createCloudFoundryServiceBindingDescriptionValidator") +public class CreateCloudFoundryServiceBindingDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CreateCloudFoundryServiceKeyDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CreateCloudFoundryServiceKeyDescriptionValidator.java new file mode 100644 index 00000000000..726f8a768d5 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/CreateCloudFoundryServiceKeyDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.CREATE_SERVICE_KEY) +@Component("createCloudFoundryServiceKeyDescriptionValidator") +public class CreateCloudFoundryServiceKeyDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryLoadBalancerDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryLoadBalancerDescriptionValidator.java new file mode 100644 index 00000000000..a8f99ccd456 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryLoadBalancerDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DELETE_LOAD_BALANCER) +@Component("deleteCloudFoundryLoadBalancerDescriptionValidator") +public class DeleteCloudFoundryLoadBalancerDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryServiceBindingDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryServiceBindingDescriptionValidator.java new file mode 100644 index 00000000000..2f33e08da39 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryServiceBindingDescriptionValidator.java @@ -0,0 +1,26 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DELETE_SERVICE_BINDINGS) +@Component("deleteCloudFoundryServiceBindingDescriptionValidator") +public class DeleteCloudFoundryServiceBindingDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryServiceKeyDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryServiceKeyDescriptionValidator.java new file mode 100644 index 00000000000..a313cd194b7 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeleteCloudFoundryServiceKeyDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DELETE_SERVICE_KEY) +@Component("deleteCloudFoundryServiceKeyDescriptionValidator") +public class DeleteCloudFoundryServiceKeyDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeployCloudFoundryServerGroupDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeployCloudFoundryServerGroupDescriptionValidator.java new file mode 100644 index 00000000000..5301f39e860 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeployCloudFoundryServerGroupDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.CREATE_SERVER_GROUP) +@Component("deployCloudFoundryServerGroupDescriptionValidator") +public class DeployCloudFoundryServerGroupDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeployCloudFoundryServiceDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeployCloudFoundryServiceDescriptionValidator.java new file mode 100644 index 00000000000..83b393cfb57 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DeployCloudFoundryServiceDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DEPLOY_SERVICE) +@Component("deployCloudFoundryServiceDescriptionValidator") +public class DeployCloudFoundryServiceDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DestroyCloudFoundryServerGroupDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DestroyCloudFoundryServerGroupDescriptionValidator.java new file mode 100644 index 00000000000..7c59f4f8c33 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DestroyCloudFoundryServerGroupDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DESTROY_SERVER_GROUP) +@Component("destroyCloudFoundryServerGroupDescriptionValidator") +public class DestroyCloudFoundryServerGroupDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DestroyCloudFoundryServiceDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DestroyCloudFoundryServiceDescriptionValidator.java new file mode 100644 index 00000000000..711e56c6fc1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/DestroyCloudFoundryServiceDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DESTROY_SERVICE) +@Component("destroyCloudFoundryServiceDescriptionValidator") +public class DestroyCloudFoundryServiceDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/MapLoadBalancersDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/MapLoadBalancersDescriptionValidator.java new file mode 100644 index 00000000000..21b4652885e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/MapLoadBalancersDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.MAP_LOAD_BALANCERS) +@Component("mapLoadBalancersDescriptionValidator") +public class MapLoadBalancersDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/ScaleCloudFoundryServerGroupDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/ScaleCloudFoundryServerGroupDescriptionValidator.java new file mode 100644 index 00000000000..58861622bb6 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/ScaleCloudFoundryServerGroupDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.RESIZE_SERVER_GROUP) +@Component("scaleCloudFoundryServerGroupDescriptionValidator") +public class ScaleCloudFoundryServerGroupDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/ShareCloudFoundryServiceDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/ShareCloudFoundryServiceDescriptionValidator.java new file mode 100644 index 00000000000..a7e17c8f974 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/ShareCloudFoundryServiceDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.SHARE_SERVICE) +@Component("shareCloudFoundryServiceDescriptionValidator") +public class ShareCloudFoundryServiceDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StandardCloudFoundryAttributeValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StandardCloudFoundryAttributeValidator.java new file mode 100644 index 00000000000..c75e6aa9600 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StandardCloudFoundryAttributeValidator.java @@ -0,0 +1,46 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import java.util.List; +import java.util.stream.Collectors; + +public class StandardCloudFoundryAttributeValidator { + + private String context; + private ValidationErrors errors; + + public StandardCloudFoundryAttributeValidator(String context, ValidationErrors errors) { + this.context = context; + this.errors = errors; + } + + public void validateRegions(String region, CloudFoundryCredentials credentials) { + List filteredRegions = + credentials.getFilteredSpaces().stream() + .map(s -> s.getRegion()) + .collect(Collectors.toList()); + if (!credentials.getFilteredSpaces().isEmpty()) { + if (!filteredRegions.contains(region)) { + errors.rejectValue(context + ".region", context + ".region." + "notValid"); + } + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StartCloudFoundryServerGroupDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StartCloudFoundryServerGroupDescriptionValidator.java new file mode 100644 index 00000000000..feba0696084 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StartCloudFoundryServerGroupDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.ENABLE_SERVER_GROUP) +@Component("startCloudFoundryServerGroupDescriptionValidator") +public class StartCloudFoundryServerGroupDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StopCloudFoundryServerGroupDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StopCloudFoundryServerGroupDescriptionValidator.java new file mode 100644 index 00000000000..06c600ea05a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/StopCloudFoundryServerGroupDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.DISABLE_SERVER_GROUP) +@Component("stopCloudFoundryServerGroupDescriptionValidator") +public class StopCloudFoundryServerGroupDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UnmapLoadBalancersDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UnmapLoadBalancersDescriptionValidator.java new file mode 100644 index 00000000000..4ddc939a31b --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UnmapLoadBalancersDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.UNMAP_LOAD_BALANCERS) +@Component("unmapLoadBalancersDescriptionValidator") +public class UnmapLoadBalancersDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UnshareCloudFoundryServiceDescriptionValidator.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UnshareCloudFoundryServiceDescriptionValidator.java new file mode 100644 index 00000000000..187eb2c688f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UnshareCloudFoundryServiceDescriptionValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.UNSHARE_SERVICE) +@Component("unshareCloudFoundryServiceDescriptionValidator") +public class UnshareCloudFoundryServiceDescriptionValidator + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UpsertCloudFoundryLoadBalancerDescription.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UpsertCloudFoundryLoadBalancerDescription.java new file mode 100644 index 00000000000..3226fe263f2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/validators/UpsertCloudFoundryLoadBalancerDescription.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import org.springframework.stereotype.Component; + +@CloudFoundryOperation(AtomicOperations.UPSERT_LOAD_BALANCER) +@Component("upsertCloudFoundryLoadBalancerDescription") +public class UpsertCloudFoundryLoadBalancerDescription + extends AbstractCloudFoundryDescriptionValidator {} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/ArtifactInfo.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/ArtifactInfo.java new file mode 100644 index 00000000000..38c78d1531f --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/ArtifactInfo.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import lombok.Builder; +import lombok.Value; + +/** The CI build metadata for an app artifact based on the build info produced by Artifactory */ +@Value +@Builder +@JsonDeserialize(builder = ArtifactInfo.ArtifactInfoBuilder.class) +@JsonInclude(JsonInclude.Include.NON_EMPTY) +public class ArtifactInfo { + + @JsonView(Views.Cache.class) + String name; + + @JsonView(Views.Cache.class) + String version; + + @JsonView(Views.Cache.class) + String url; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryApplication.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryApplication.java index dca57ecd7dc..39678b4ecef 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryApplication.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryApplication.java @@ -16,30 +16,46 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.netflix.spinnaker.clouddriver.model.Application; -import lombok.Getter; -import lombok.RequiredArgsConstructor; +import static java.util.Collections.emptyMap; +import static java.util.stream.Collectors.*; -import java.util.List; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.netflix.spinnaker.clouddriver.model.Application; import java.util.Map; import java.util.Set; - -import static java.util.stream.Collectors.*; - -@RequiredArgsConstructor -@Getter +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; +import lombok.experimental.Wither; + +@Value +@EqualsAndHashCode(of = "name") +@Builder +@JsonDeserialize(builder = CloudFoundryApplication.CloudFoundryApplicationBuilder.class) +@JsonIgnoreProperties("clusters") public class CloudFoundryApplication implements Application { - private final String name; - - @JsonIgnore - private final List clusters; + @JsonView(Views.Cache.class) + String name; - private final Map attributes; + @Wither + @JsonView(Views.Relationship.class) + Set clusters; @Override public Map> getClusterNames() { - return clusters.stream().collect(groupingBy(CloudFoundryCluster::getAccountName, - mapping(CloudFoundryCluster::getName, toSet()))); + return clusters == null + ? emptyMap() + : clusters.stream() + .collect( + groupingBy( + CloudFoundryCluster::getAccountName, + mapping(CloudFoundryCluster::getName, toSet()))); + } + + @Override + public Map getAttributes() { + return emptyMap(); } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryBuildInfo.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryBuildInfo.java new file mode 100644 index 00000000000..b7bc868cef2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryBuildInfo.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import lombok.Builder; +import lombok.Value; + +/** The CI build metadata for an app artifact based on the build info produced by Artifactory */ +@Value +@Builder +@JsonDeserialize(builder = CloudFoundryBuildInfo.CloudFoundryBuildInfoBuilder.class) +@JsonInclude(JsonInclude.Include.NON_EMPTY) +public class CloudFoundryBuildInfo { + + @JsonView(Views.Cache.class) + String jobName; + + @JsonView(Views.Cache.class) + String jobNumber; + + @JsonView(Views.Cache.class) + String jobUrl; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryBuildpack.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryBuildpack.java index 90dab83d4b5..e5e1f410c21 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryBuildpack.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryBuildpack.java @@ -16,5 +16,24 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import lombok.Builder; +import lombok.Value; + +@Value +@Builder +@JsonDeserialize(builder = CloudFoundryBuildpack.CloudFoundryBuildpackBuilder.class) public class CloudFoundryBuildpack { + @JsonView(Views.Cache.class) + String name; + + @JsonView(Views.Cache.class) + String detectOutput; + + @JsonView(Views.Cache.class) + String version; + + @JsonView(Views.Cache.class) + String buildpackName; } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryCluster.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryCluster.java index 32c7b88a5ef..fe3a3145e69 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryCluster.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryCluster.java @@ -16,23 +16,48 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; +import static java.util.Collections.emptySet; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.netflix.frigga.Names; import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; import com.netflix.spinnaker.clouddriver.model.Cluster; -import lombok.AllArgsConstructor; -import lombok.EqualsAndHashCode; -import lombok.Getter; - +import com.netflix.spinnaker.clouddriver.model.LoadBalancer; import java.util.Set; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; +import lombok.experimental.Wither; -@AllArgsConstructor -@EqualsAndHashCode(of = {"name", "accountName"}, callSuper = false) -@Getter +@Value +@EqualsAndHashCode( + of = {"name", "accountName"}, + callSuper = false) +@Builder +@JsonDeserialize(builder = CloudFoundryCluster.CloudFoundryClusterBuilder.class) +@JsonInclude(JsonInclude.Include.NON_EMPTY) public class CloudFoundryCluster extends CloudFoundryModel implements Cluster { - private final String accountName; - private final String name; - private final Set serverGroups; - private final Set loadBalancers; + + @JsonView(Views.Cache.class) + String accountName; + + @JsonView(Views.Cache.class) + String name; + + @Wither + @JsonView(Views.Relationship.class) + Set serverGroups; + + /** + * Load balancers are read from the server group model, and don't make sense on cluster. There is + * no practical impact to leaving this empty. + */ + @Override + public Set getLoadBalancers() { + return emptySet(); + } public String getStack() { return Names.parseName(name).getStack(); @@ -45,4 +70,4 @@ public String getDetail() { public String getType() { return CloudFoundryCloudProvider.ID; } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryDomain.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryDomain.java index b24bbb816f6..2956a5fe1ed 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryDomain.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryDomain.java @@ -16,21 +16,26 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import lombok.AllArgsConstructor; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; - +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import javax.annotation.Nullable; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; -@AllArgsConstructor -@ToString +@Value +@Builder +@JsonDeserialize(builder = CloudFoundryDomain.CloudFoundryDomainBuilder.class) @EqualsAndHashCode(of = "id") -@Getter public class CloudFoundryDomain { - private final String id; - private final String name; + @JsonView(Views.Cache.class) + @Nullable + String id; + + @JsonView(Views.Cache.class) + String name; + @JsonView(Views.Cache.class) @Nullable - private final CloudFoundryOrganization organization; + CloudFoundryOrganization organization; } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryDroplet.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryDroplet.java index 9a54832b8c6..ed8cda30195 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryDroplet.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryDroplet.java @@ -17,27 +17,40 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.netflix.spinnaker.clouddriver.model.Image; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import lombok.ToString; - import java.util.Collection; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; -@RequiredArgsConstructor -@ToString +@Value @EqualsAndHashCode(of = "id") -@Getter +@Builder +@JsonDeserialize(builder = CloudFoundryDroplet.CloudFoundryDropletBuilder.class) @JsonIgnoreProperties(ignoreUnknown = true) public class CloudFoundryDroplet implements Image { + @JsonView(Views.Cache.class) String id; + + @JsonView(Views.Cache.class) String name; - CloudFoundrySpace space; + + @JsonView(Views.Cache.class) String stack; + + @JsonView(Views.Cache.class) Collection buildpacks; + + @JsonView(Views.Cache.class) + @Nullable + CloudFoundrySpace space; + + @JsonView(Views.Cache.class) + @Nullable CloudFoundryPackage sourcePackage; - String packageChecksum; @Override public String getRegion() { diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryImage.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryImage.java index e8a97d479d1..3edec3e786b 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryImage.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryImage.java @@ -16,14 +16,22 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.netflix.spinnaker.clouddriver.model.Image; -import lombok.Getter; -import lombok.RequiredArgsConstructor; +import lombok.Builder; +import lombok.Value; -@RequiredArgsConstructor -@Getter +@Value +@Builder +@JsonDeserialize(builder = CloudFoundryImage.CloudFoundryImageBuilder.class) public class CloudFoundryImage implements Image { - private final String id; - private final String name; - private final String region; + @JsonView(Views.Cache.class) + String id; + + @JsonView(Views.Cache.class) + String name; + + @JsonView(Views.Cache.class) + String region; } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryInstance.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryInstance.java index 00e1bd4be0d..b6f0163d2ee 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryInstance.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryInstance.java @@ -16,43 +16,53 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; import com.netflix.spinnaker.clouddriver.model.HealthState; import com.netflix.spinnaker.clouddriver.model.Instance; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import lombok.ToString; - import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; -@RequiredArgsConstructor -@ToString -@EqualsAndHashCode(of = {"appGuid", "key"}, callSuper = false) -@Getter -@JsonIgnoreProperties(ignoreUnknown = true) +@Value +@EqualsAndHashCode( + of = {"appGuid", "key"}, + callSuper = false) +@Builder +@JsonDeserialize(builder = CloudFoundryInstance.CloudFoundryInstanceBuilder.class) public class CloudFoundryInstance extends CloudFoundryModel implements Instance { - private final String appGuid; + @JsonView(Views.Cache.class) + String appGuid; /* * A sequence number that may get recycled when instances come and go. */ - private final String key; + @JsonView(Views.Cache.class) + String key; + + @JsonView(Views.Cache.class) + HealthState healthState; + + @JsonView(Views.Cache.class) + String details; + + @JsonView(Views.Cache.class) + Long launchTime; - private final HealthState healthState; - private final String details; - private final Long launchTime; - private final String zone; + @JsonView(Views.Cache.class) + String zone; @Override public List> getHealth() { Map health = new HashMap<>(); health.put("healthClass", "platform"); - health.put("state", healthState.toString()); + health.put("type", "TargetGroup"); + health.put("state", (healthState == null ? HealthState.Unknown : healthState).toString()); return Collections.singletonList(health); } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryJobStatus.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryJobStatus.java new file mode 100644 index 00000000000..95cb290b11a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryJobStatus.java @@ -0,0 +1,86 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.model; + +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Task; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Task.State; +import com.netflix.spinnaker.clouddriver.model.JobState; +import com.netflix.spinnaker.clouddriver.model.JobStatus; +import java.io.Serializable; +import java.util.Collections; +import java.util.Map; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +@Builder +@JsonDeserialize(builder = CloudFoundryJobStatus.CloudFoundryJobStatusBuilder.class) +public class CloudFoundryJobStatus implements JobStatus { + @Nullable private String name; + + private String account; + + private String id; + + private String location; + + private final String provider = CloudFoundryCloudProvider.ID; + + private JobState jobState; + + private Long createdTime; + + @Nullable private Long completedTime; + + @Override + public Map getCompletionDetails() { + return Collections.emptyMap(); + } + + public static CloudFoundryJobStatus fromTask(Task task, String account, String location) { + State state = task.getState(); + CloudFoundryJobStatusBuilder builder = CloudFoundryJobStatus.builder(); + switch (state) { + case FAILED: + builder.jobState(JobState.Failed); + builder.completedTime(task.getUpdatedAt().toInstant().toEpochMilli()); + break; + case RUNNING: + builder.jobState(JobState.Running); + break; + case SUCCEEDED: + builder.jobState(JobState.Succeeded); + builder.completedTime(task.getUpdatedAt().toInstant().toEpochMilli()); + break; + default: + builder.jobState(JobState.Unknown); + } + return builder + .name(task.getName()) + .account(account) + .id(task.getGuid()) + .location(location) + .createdTime(task.getCreatedAt().toInstant().toEpochMilli()) + .build(); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryLoadBalancer.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryLoadBalancer.java index cce4264b50c..a601b7b5869 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryLoadBalancer.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryLoadBalancer.java @@ -16,50 +16,68 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import com.fasterxml.jackson.annotation.JsonIgnore; +import static java.util.Collections.emptySet; +import static java.util.stream.Collectors.toSet; +import static org.apache.commons.lang3.StringUtils.isEmpty; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; import com.netflix.spinnaker.clouddriver.model.LoadBalancer; import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; import com.netflix.spinnaker.moniker.Moniker; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import lombok.ToString; - -import javax.annotation.Nullable; import java.util.Set; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; +import lombok.experimental.Wither; -import static java.util.Collections.emptySet; -import static java.util.stream.Collectors.toSet; - -@RequiredArgsConstructor -@ToString +@Value @EqualsAndHashCode(of = "id", callSuper = false) -@Getter -@JsonIgnoreProperties(ignoreUnknown = true) +@Builder +@JsonDeserialize(builder = CloudFoundryLoadBalancer.CloudFoundryLoadBalancerBuilder.class) +@JsonIgnoreProperties("mappedApps") public class CloudFoundryLoadBalancer extends CloudFoundryModel implements LoadBalancer, Cloneable { private static final Moniker EMPTY_MONIKER = new Moniker(); - private final String account; - private final String id; - @Nullable - private final String host; + @JsonView(Views.Cache.class) + String account; + + @JsonView(Views.Cache.class) + String id; + + @JsonView(Views.Cache.class) + String host; + + @JsonView(Views.Cache.class) @Nullable - private final String path; + String path; + + @JsonView(Views.Cache.class) @Nullable - private final Integer port; - private final CloudFoundrySpace space; - private final CloudFoundryDomain domain; + Integer port; + + @JsonView(Views.Cache.class) + CloudFoundrySpace space; + + @JsonView(Views.Cache.class) + CloudFoundryDomain domain; - @JsonIgnore - private final Set mappedApps; + @Wither + @JsonView(Views.Relationship.class) + Set mappedApps; @JsonProperty public String getName() { - return host + "." + domain + "." + getName() + (port == null ? "" : "-" + port) + (path == null ? "" : "/" + path); + return host + + "." + + domain.getName() + + (port == null ? "" : "-" + port) + + (isEmpty(path) ? "" : path); } @Override @@ -69,19 +87,23 @@ public Moniker getMoniker() { @Override public Set getServerGroups() { - return mappedApps.stream().map(app -> - new LoadBalancerServerGroup( - app.getName(), - account, - app.getRegion(), - app.getState() == CloudFoundryServerGroup.State.STOPPED, - emptySet(), - app.getInstances() - .stream() - .map(it -> new LoadBalancerInstance(it.getId(), it.getName(), null, it.getHealth().get(0))) - .collect(toSet()) - ) - ).collect(toSet()); + return mappedApps.stream() + .map( + app -> + new LoadBalancerServerGroup( + app.getName(), + account, + app.getRegion(), + app.getState() == CloudFoundryServerGroup.State.STOPPED, + emptySet(), + app.getInstances().stream() + .map( + it -> + new LoadBalancerInstance( + it.getId(), it.getName(), null, it.getHealth().get(0))) + .collect(toSet()), + CloudFoundryCloudProvider.ID)) + .collect(toSet()); } @Deprecated diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryModel.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryModel.java index 26709d51c5c..9c092f77ad3 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryModel.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryModel.java @@ -22,4 +22,4 @@ abstract class CloudFoundryModel { public String getCloudProvider() { return CloudFoundryCloudProvider.ID; } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryOrganization.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryOrganization.java index 32cd4123ede..44bfcfe8952 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryOrganization.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryOrganization.java @@ -16,16 +16,20 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import lombok.Builder; import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import lombok.ToString; +import lombok.Value; -@RequiredArgsConstructor -@ToString +@Value +@Builder +@JsonDeserialize(builder = CloudFoundryOrganization.CloudFoundryOrganizationBuilder.class) @EqualsAndHashCode(of = "id") -@Getter -class CloudFoundryOrganization { - private final String id; - private final String name; +public class CloudFoundryOrganization { + @JsonView(Views.Cache.class) + String id; + + @JsonView(Views.Cache.class) + String name; } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryPackage.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryPackage.java index 0b1da00372c..c1743b02e4b 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryPackage.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryPackage.java @@ -16,20 +16,27 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import lombok.Builder; +import lombok.Value; + /* - * A package is an application’s ‘source code’; either raw bits for your application or a pointer to these bits. + * A package is an application's 'source code'; either raw bits for your application or a pointer to these bits. */ +@Value +@Builder +@JsonDeserialize(builder = CloudFoundryPackage.CloudFoundryPackageBuilder.class) public class CloudFoundryPackage { /* * This endpoint downloads the bits of an existing package. */ - private String downloadUrl; + @JsonView(Views.Cache.class) + String downloadUrl; - public String getDownloadUrl() { - return downloadUrl; - } + @JsonView(Views.Cache.class) + String checksumType; - public void setDownloadUrl(String downloadUrl) { - this.downloadUrl = downloadUrl; - } + @JsonView(Views.Cache.class) + String checksum; } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryRegion.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryRegion.java deleted file mode 100644 index f6c81dbcb5e..00000000000 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryRegion.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2018 Pivotal, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cloudfoundry.model; - -import lombok.RequiredArgsConstructor; - -@RequiredArgsConstructor -class CloudFoundryRegion { - private final String name; -} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServerGroup.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServerGroup.java index e1162a9cc3f..1e46c2341a8 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServerGroup.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServerGroup.java @@ -16,49 +16,120 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; +import static com.netflix.spinnaker.clouddriver.model.HealthState.*; +import static java.util.Collections.*; + import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonView; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.netflix.frigga.Names; import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; import com.netflix.spinnaker.clouddriver.model.Image; import com.netflix.spinnaker.clouddriver.model.ServerGroup; import com.netflix.spinnaker.clouddriver.names.NamerRegistry; import com.netflix.spinnaker.moniker.Moniker; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import lombok.ToString; - -import java.util.HashMap; +import io.vavr.collection.HashMap; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.model.HealthState.*; -import static java.util.Collections.*; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; +import lombok.With; -@RequiredArgsConstructor -@ToString -@Getter +@Value @EqualsAndHashCode(of = "id", callSuper = false) -@JsonIgnoreProperties(ignoreUnknown = true) +@Builder(toBuilder = true) +@JsonDeserialize(builder = CloudFoundryServerGroup.CloudFoundryServerGroupBuilder.class) +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonIgnoreProperties("loadBalancerNames") public class CloudFoundryServerGroup extends CloudFoundryModel implements ServerGroup { - private static final ObjectMapper mapper = new ObjectMapper(); - - private final String account; - private final String name; - private final String id; - private final Integer memory; - private final Set instances; - private final CloudFoundryDroplet droplet; - private final Integer diskQuota; - private final State state; - private final CloudFoundrySpace space; - private final Long createdTime; - private final List routes; - private final List serviceInstances; + private static final ObjectMapper IMAGE_MAPPER = new ObjectMapper(); + + @JsonView(Views.Cache.class) + String account; + + @JsonView(Views.Cache.class) + String appsManagerUri; + + @JsonView(Views.Cache.class) + String metricsUri; + + @JsonView(Views.Cache.class) + String name; + + @JsonView(Views.Cache.class) + String id; + + @JsonView(Views.Cache.class) + Integer memory; + + @JsonView(Views.Cache.class) + @Nullable + CloudFoundryDroplet droplet; + + @JsonView(Views.Cache.class) + Integer diskQuota; + + @JsonView(Views.Cache.class) + @Nullable + String healthCheckType; + + @JsonView(Views.Cache.class) + @Nullable + String healthCheckHttpEndpoint; + + @JsonView(Views.Cache.class) + State state; + + @JsonView(Views.Cache.class) + CloudFoundrySpace space; + + @JsonView(Views.Cache.class) + Long updatedTime; + + @JsonView(Views.Cache.class) + Long createdTime; + + @JsonView(Views.Cache.class) + Map env; + + @With + @JsonInclude(JsonInclude.Include.NON_EMPTY) + @JsonView(Views.Cache.class) + List serviceInstances; + + @JsonView(Views.Cache.class) + CloudFoundryBuildInfo ciBuild; + + @JsonView(Views.Cache.class) + ArtifactInfo appArtifact; + + @JsonView(Views.Cache.class) + String pipelineId; + + @With + @JsonView(Views.Relationship.class) + Set instances; + + @With + @JsonInclude(JsonInclude.Include.NON_EMPTY) + @JsonView(Views.Relationship.class) + Set loadBalancerNames; + + @Override + public Set getLoadBalancers() { + return loadBalancerNames == null ? emptySet() : loadBalancerNames; + } + + @Override + public Set getInstances() { + return instances == null ? emptySet() : instances; + } @Override public ImagesSummary getImagesSummary() { @@ -66,59 +137,65 @@ public ImagesSummary getImagesSummary() { @Override public List getSummaries() { return singletonList( - new ImageSummary() { - @Override - public String getServerGroupName() { - return name; - } - - @Override - public String getImageName() { - return name + "-droplet"; - } - - @Override - public String getImageId() { - return droplet == null ? "unknown" : droplet.getId(); - } - - @Override - public Map getImage() { - return mapper.convertValue(this, new TypeReference() { - }); - } - - @Override - public Map getBuildInfo() { - return emptyMap(); - } - } - ); + new ImageSummary() { + @Override + public String getServerGroupName() { + return name; + } + + @Override + public String getImageName() { + return name + "-droplet"; + } + + @Override + public String getImageId() { + return droplet == null ? "unknown" : droplet.getId(); + } + + @Override + public Map getImage() { + return IMAGE_MAPPER.convertValue(this, new TypeReference<>() {}); + } + + @Override + public Map getBuildInfo() { + return emptyMap(); + } + }); } }; } public Image getImage() { - return new CloudFoundryImage(droplet == null ? "unknown" : droplet.getId(), name + "-droplet", space.getRegion()); + return CloudFoundryImage.builder() + .id(droplet == null ? "unknown" : droplet.getId()) + .name(name + "-droplet") + .region(getRegion()) + .build(); } public Map getBuildInfo() { - Map buildInfo = new HashMap<>(); - buildInfo.put("droplet", droplet); - buildInfo.put("serviceInstances", serviceInstances); - buildInfo.put("id", id); - return buildInfo; + return HashMap.of( + "appsManagerUri", appsManagerUri, + "metricsUri", metricsUri, + "droplet", droplet, + "id", id, + "serviceInstances", serviceInstances, + "ciBuild", ciBuild, + "appArtifact", appArtifact) + .toJavaMap(); } @Deprecated @Override public ImageSummary getImageSummary() { - return getImagesSummary() != null ? getImagesSummary().getSummaries().get(0) : null; + return getImagesSummary().getSummaries().get(0); } @Override public String getRegion() { - return space.getRegion(); + return space == null ? "unknown" : space.getRegion(); } @Override @@ -128,12 +205,7 @@ public Boolean isDisabled() { @Override public Set getZones() { - return singleton(space.getName()); - } - - @Override - public Set getLoadBalancers() { - return routes.stream().map(CloudFoundryLoadBalancer::getName).collect(Collectors.toSet()); + return space == null ? emptySet() : singleton(space.getName()); } @Override @@ -149,13 +221,12 @@ public Map getLaunchConfig() { @Override public InstanceCounts getInstanceCounts() { return new InstanceCounts( - instances.size(), - (int) instances.stream().filter(in -> Up.equals(in.getHealthState())).count(), - (int) instances.stream().filter(in -> Down.equals(in.getHealthState())).count(), - (int) instances.stream().filter(in -> Unknown.equals(in.getHealthState())).count(), - (int) instances.stream().filter(in -> OutOfService.equals(in.getHealthState())).count(), - (int) instances.stream().filter(in -> Starting.equals(in.getHealthState())).count() - ); + instances.size(), + (int) instances.stream().filter(in -> Up.equals(in.getHealthState())).count(), + (int) instances.stream().filter(in -> Down.equals(in.getHealthState())).count(), + (int) instances.stream().filter(in -> Unknown.equals(in.getHealthState())).count(), + (int) instances.stream().filter(in -> OutOfService.equals(in.getHealthState())).count(), + (int) instances.stream().filter(in -> Starting.equals(in.getHealthState())).count()); } @Override @@ -173,8 +244,12 @@ public String getDetail() { public Moniker getMoniker() { Moniker moniker = NamerRegistry.getDefaultNamer().deriveMoniker(this); - return new Moniker(moniker.getApp(), moniker.getCluster(), moniker.getDetail(), moniker.getStack(), - moniker.getSequence() == null ? 0 : moniker.getSequence()); + return new Moniker( + moniker.getApp(), + moniker.getCluster(), + moniker.getDetail(), + moniker.getStack(), + moniker.getSequence() == null ? 0 : moniker.getSequence()); } @Deprecated @@ -182,7 +257,7 @@ public String getType() { return CloudFoundryCloudProvider.ID; } - enum State { + public enum State { STOPPED, STARTED } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryService.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryService.java new file mode 100644 index 00000000000..281a49835fc --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryService.java @@ -0,0 +1,33 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.model; + +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.netflix.spinnaker.clouddriver.model.Service; +import java.util.Collection; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; + +@Value +@EqualsAndHashCode(callSuper = false) +@Builder +@JsonDeserialize(builder = CloudFoundryService.CloudFoundryServiceBuilder.class) +public class CloudFoundryService extends CloudFoundryModel implements Service { + String name; + Collection servicePlans; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServiceInstance.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServiceInstance.java index db7e94f096b..9d9d9109bc5 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServiceInstance.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServiceInstance.java @@ -16,13 +16,42 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import lombok.AllArgsConstructor; -import lombok.Getter; - -@AllArgsConstructor -@Getter -class CloudFoundryServiceInstance { - private final String serviceName; - private final String name; - private final String plan; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.netflix.spinnaker.clouddriver.model.ServiceInstance; +import java.util.Set; +import lombok.Builder; +import lombok.Value; + +/** "Service" in this context refers to an Open Service Broker service. */ +@Value +@Builder +@JsonDeserialize(builder = CloudFoundryServiceInstance.CloudFoundryServiceInstanceBuilder.class) +public class CloudFoundryServiceInstance implements ServiceInstance { + @JsonView(Views.Cache.class) + String serviceInstanceName; + + @JsonView(Views.Cache.class) + String name; + + @JsonView(Views.Cache.class) + String id; + + @JsonView(Views.Cache.class) + String plan; + + @JsonView(Views.Cache.class) + String planId; + + @JsonView(Views.Cache.class) + String status; + + @JsonView(Views.Cache.class) + String lastOperationDescription; + + @JsonView(Views.Cache.class) + Set tags; + + @JsonView(Views.Cache.class) + String type; } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServicePlan.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServicePlan.java new file mode 100644 index 00000000000..a376a3dca0e --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryServicePlan.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.model; + +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.netflix.spinnaker.clouddriver.model.ServicePlan; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Value; + +@Value +@EqualsAndHashCode(callSuper = false) +@Builder +@JsonDeserialize(builder = CloudFoundryServicePlan.CloudFoundryServicePlanBuilder.class) +public class CloudFoundryServicePlan extends CloudFoundryModel implements ServicePlan { + String name; + String id; +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundrySpace.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundrySpace.java index 196ee92fc65..250d1520d3a 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundrySpace.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundrySpace.java @@ -16,17 +16,24 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import com.fasterxml.jackson.annotation.JsonIgnore; -import lombok.AllArgsConstructor; -import lombok.Getter; -import lombok.ToString; - -@AllArgsConstructor -@ToString -@Getter +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import lombok.Builder; +import lombok.Value; + +@Value +@Builder +@JsonDeserialize(builder = CloudFoundrySpace.CloudFoundrySpaceBuilder.class) +@JsonIgnoreProperties("region") public class CloudFoundrySpace { - private final String id; - private final String name; + @JsonView(Views.Cache.class) + String id; + + @JsonView(Views.Cache.class) + String name; + + @JsonView(Views.Cache.class) CloudFoundryOrganization organization; public static CloudFoundrySpace fromRegion(String region) { @@ -44,8 +51,9 @@ public boolean equals(Object o) { CloudFoundrySpace that = (CloudFoundrySpace) o; if (name != null ? !name.equals(that.name) : that.name != null) return false; - return organization != null ? organization.getName().equals(that.organization.getName()) : - that.organization == null; + return organization != null + ? organization.getName().equals(that.organization.getName()) + : that.organization == null; } @Override @@ -55,8 +63,7 @@ public int hashCode() { return result; } - @JsonIgnore - String getRegion() { + public String getRegion() { return organization.getName() + " > " + name; } } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/ServerGroupMetaDataEnvVar.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/ServerGroupMetaDataEnvVar.java new file mode 100644 index 00000000000..8b77f83d6e5 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/ServerGroupMetaDataEnvVar.java @@ -0,0 +1,43 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.model; + +public enum ServerGroupMetaDataEnvVar { + JobName(ServerGroupMetaDataEnvVar.PREFIX + "BUILD_JOB_NAME"), + JobNumber(ServerGroupMetaDataEnvVar.PREFIX + "BUILD_JOB_NUMBER"), + JobUrl(ServerGroupMetaDataEnvVar.PREFIX + "BUILD_JOB_URL"), + ArtifactName(ServerGroupMetaDataEnvVar.PREFIX + "ARTIFACT_NAME"), + ArtifactVersion(ServerGroupMetaDataEnvVar.PREFIX + "ARTIFACT_VERSION"), + ArtifactUrl(ServerGroupMetaDataEnvVar.PREFIX + "ARTIFACT_URL"), + PipelineId(ServerGroupMetaDataEnvVar.PREFIX + "PIPELINE_ID"); + + public static final String PREFIX = "__SPINNAKER_"; + public final String envVarName; + + ServerGroupMetaDataEnvVar(String envVarName) { + this.envVarName = envVarName; + } + + public static boolean contains(String envVar) { + for (ServerGroupMetaDataEnvVar v : values()) { + if (v.envVarName.equals(envVar)) { + return true; + } + } + return false; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/Views.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/Views.java new file mode 100644 index 00000000000..a84e9e4f9d0 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/Views.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.model; + +public class Views { + /** + * A minimal set of fields to be cached. Generally fields that are expressed as relationships are + * NOT marked with this view. + */ + public static class Cache {} + + /** Populated via a cache relationship and exposed through clouddriver APIs. */ + public static class Relationship {} +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/CloudFoundryProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/CloudFoundryProvider.java index a8ae06d54e5..ded632655af 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/CloudFoundryProvider.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/CloudFoundryProvider.java @@ -16,32 +16,57 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.provider; -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; -import com.netflix.spinnaker.clouddriver.cache.SearchableProvider; -import lombok.Getter; -import lombok.RequiredArgsConstructor; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.*; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toSet; -import java.util.Collection; -import java.util.Collections; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.cache.SearchableProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.security.BaseProvider; import java.util.Map; import java.util.Set; +import java.util.stream.Stream; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.RequiredArgsConstructor; @RequiredArgsConstructor @Getter -public class CloudFoundryProvider extends AgentSchedulerAware implements SearchableProvider { - // todo(jkschneider): add default caches - final Set defaultCaches = Collections.emptySet(); - final Map urlMappingTemplates = Collections.emptyMap(); - // todo(jkschneider): add search result hydrator - final Map searchResultHydrators = Collections.emptyMap(); - private final String id = "cloudfoundry"; +public class CloudFoundryProvider extends BaseProvider implements SearchableProvider { + private final Set defaultCaches = + Stream.of( + APPLICATIONS.getNs(), + CLUSTERS.getNs(), + SERVER_GROUPS.getNs(), + INSTANCES.getNs(), + LOAD_BALANCERS.getNs(), + SPACES.getNs()) + .collect(toSet()); + + private final Map searchResultHydrators = + singletonMap( + new SearchableResource(APPLICATIONS.getNs(), "cloudfoundry"), + new ApplicationSearchResultHydrator()); + + private final Map urlMappingTemplates = emptyMap(); + public static final String PROVIDER_ID = "cloudfoundry"; private final String providerName = CloudFoundryProvider.class.getName(); - private final Collection agents; + static class ApplicationSearchResultHydrator implements SearchableProvider.SearchResultHydrator { + @Override + public Map hydrateResult( + Cache cacheView, Map result, String id) { + // needed by deck to render correctly in infrastructure search results + result.put("application", result.get("name")); + return result; + } + } + + @Nullable @Override public Map parseKey(String key) { - // todo(jkschneider): parse keys - return Collections.emptyMap(); + return Keys.parse(key).orElse(null); } } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/AbstractCloudFoundryCachingAgent.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/AbstractCloudFoundryCachingAgent.java new file mode 100644 index 00000000000..e5987cce508 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/AbstractCloudFoundryCachingAgent.java @@ -0,0 +1,142 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent; + +import static java.util.Collections.emptyMap; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.ResourceCacheData; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.Views; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import java.io.IOException; +import java.time.Clock; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@Getter +@Slf4j +abstract class AbstractCloudFoundryCachingAgent + implements CachingAgent, OnDemandAgent, AccountAware { + private final String providerName = CloudFoundryProvider.class.getName(); + private static final ObjectMapper cacheViewMapper = + new ObjectMapper().disable(MapperFeature.DEFAULT_VIEW_INCLUSION); + + private final OnDemandMetricsSupport metricsSupport; + private final Clock internalClock; + private final CloudFoundryCredentials credentials; + + AbstractCloudFoundryCachingAgent(CloudFoundryCredentials credentials, Registry registry) { + this(credentials, registry, Clock.systemDefaultZone()); + } + + private AbstractCloudFoundryCachingAgent( + CloudFoundryCredentials credentials, Registry registry, Clock internalClock) { + this.credentials = credentials; + cacheViewMapper.setConfig(cacheViewMapper.getSerializationConfig().withView(Views.Cache.class)); + this.metricsSupport = + new OnDemandMetricsSupport( + registry, this, CloudFoundryProvider.PROVIDER_ID + ":" + OnDemandType.ServerGroup); + this.internalClock = internalClock; + } + + @Override + public String getAccountName() { + return credentials.getName(); + } + + @Override + public String getAgentType() { + return getAccountName() + "/" + getClass().getSimpleName(); + } + + @Override + public String getOnDemandAgentType() { + return getAgentType() + "-OnDemand"; + } + + @Override + public OnDemandMetricsSupport getMetricsSupport() { + return metricsSupport; + } + + /** + * Serialize just enough data to be able to reconstitute the model fully if its relationships are + * also deserialized. + */ + // Visible for testing + static Map cacheView(Object o) { + return Collections.singletonMap( + "resource", cacheViewMapper.convertValue(o, new TypeReference>() {})); + } + + protected CloudFoundryClient getClient() { + return credentials.getClient(); + } + + Map> getCacheResultsFromCacheData(CacheData cacheData) { + try { + return cacheViewMapper.readValue( + cacheData.getAttributes().get("cacheResults").toString(), + new TypeReference>>() {}); + } catch (IOException e) { + throw new RuntimeException("Failed to deserialize cache results", e); + } + } + + void processOnDemandCacheData(CacheData cacheData) { + Map attributes = cacheData.getAttributes(); + attributes.put("processedTime", System.currentTimeMillis()); + attributes.put("processedCount", (Integer) attributes.getOrDefault("processedCount", 0) + 1); + } + + CacheData buildOnDemandCacheData(String key, Map> cacheResult) { + try { + return new DefaultCacheData( + key, + (int) TimeUnit.MINUTES.toSeconds(10), // ttl + io.vavr.collection.HashMap.of( + "cacheTime", + this.getInternalClock().instant().toEpochMilli(), + "cacheResults", + cacheViewMapper.writeValueAsString(cacheResult), + "processedCount", + 0) + .toJavaMap(), + emptyMap(), + this.getInternalClock()); + } catch (JsonProcessingException serializationException) { + throw new RuntimeException("cache results serialization failed", serializationException); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryCachingAgent.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryCachingAgent.java deleted file mode 100644 index e69f453ad4d..00000000000 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryCachingAgent.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2018 Pivotal, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.cats.agent.*; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; -import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; -import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; - -@RequiredArgsConstructor -@Getter -@Slf4j -public class CloudFoundryCachingAgent implements CachingAgent, AccountAware { - final String providerName = CloudFoundryProvider.class.getName(); - final Collection providedDataTypes = Arrays.asList( - AgentDataType.Authority.AUTHORITATIVE.forType(Namespace.APPLICATIONS.ns), - AgentDataType.Authority.AUTHORITATIVE.forType(Namespace.CLUSTERS.ns), - AgentDataType.Authority.AUTHORITATIVE.forType(Namespace.SERVER_GROUPS.ns), - AgentDataType.Authority.AUTHORITATIVE.forType(Namespace.INSTANCES.ns), - AgentDataType.Authority.AUTHORITATIVE.forType(Namespace.LOAD_BALANCERS.ns) - ); - private final CloudFoundryCredentials credentials; - private final ObjectMapper objectMapper; - - @Override - public CacheResult loadData(ProviderCache providerCache) { - log.info("Caching all resources in Cloud Foundry account $accountName"); - - // todo(jkschneider): cache all Cloud Foundry resources - return new DefaultCacheResult(Collections.emptyMap()); - } - - @Override - public String getAccountName() { - return credentials.getName(); - } - - @Override - public String getAgentType() { - return getAccountName() + "/" + getClass().getSimpleName(); - } -} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryLoadBalancerCachingAgent.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryLoadBalancerCachingAgent.java new file mode 100644 index 00000000000..65e27dfb6f1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryLoadBalancerCachingAgent.java @@ -0,0 +1,258 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toSet; + +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.ResourceCacheData; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import java.util.*; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@Getter +@Slf4j +public class CloudFoundryLoadBalancerCachingAgent extends AbstractCloudFoundryCachingAgent { + private static final ObjectMapper cacheViewMapper = + new ObjectMapper().disable(MapperFeature.DEFAULT_VIEW_INCLUSION); + + private final Collection providedDataTypes = + Collections.singletonList(AUTHORITATIVE.forType(LOAD_BALANCERS.getNs())); + + public CloudFoundryLoadBalancerCachingAgent( + CloudFoundryCredentials credentials, Registry registry) { + super(credentials, registry); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + long loadDataStart = this.getInternalClock().millis(); + String accountName = getAccountName(); + log.info("Caching all load balancers (routes) in Cloud Foundry account " + accountName); + List spaceFilters = this.getCredentials().getFilteredSpaces(); + + // Once Routes are migrated to v3 we can take advantage of space_guids. Until then... + List loadBalancers = this.getClient().getRoutes().all(spaceFilters); + + Collection onDemandCacheData = + providerCache.getAll( + ON_DEMAND.getNs(), + providerCache.filterIdentifiers(ON_DEMAND.getNs(), Keys.getAllLoadBalancers())); + + List toEvict = new ArrayList<>(); + Map toKeep = new java.util.HashMap<>(); + onDemandCacheData.forEach( + cacheData -> { + long cacheTime = (long) cacheData.getAttributes().get("cacheTime"); + if (cacheTime < loadDataStart + && (int) cacheData.getAttributes().computeIfAbsent("processedCount", s -> 0) > 0) { + toEvict.add(cacheData.getId()); + } else { + toKeep.put(cacheData.getId(), cacheData); + } + }); + + Map loadBalancersByServerGroupIds = new HashMap<>(); + loadBalancers.stream() + .forEach( + lb -> + lb.getMappedApps().stream() + .forEach( + sg -> + loadBalancersByServerGroupIds + .computeIfAbsent( + sg.getId(), + (s) -> + new ResourceCacheData( + Keys.getServerGroupKey( + sg.getAccount(), sg.getName(), sg.getRegion()), + emptyMap(), + new java.util.HashMap<>())) + .getRelationships() + .computeIfAbsent(LOAD_BALANCERS.getNs(), k -> new HashSet<>()) + .add(lb.getId()))); + + Map> results = + io.vavr.collection.HashMap.of( + LOAD_BALANCERS.getNs(), + loadBalancers.stream() + .map(lb -> setCacheData(toKeep, lb, loadDataStart)) + .collect(toSet()), + SERVER_GROUPS.getNs(), + loadBalancersByServerGroupIds.values()) + .toJavaMap(); + + onDemandCacheData.forEach(this::processOnDemandCacheData); + results.put(ON_DEMAND.getNs(), toKeep.values()); + + log.debug( + "LoadBalancer cache loaded for Cloud Foundry account {}, ({} sec)", + accountName, + (getInternalClock().millis() - loadDataStart) / 1000); + return new DefaultCacheResult(results, Collections.singletonMap(ON_DEMAND.getNs(), toEvict)); + } + + private CacheData setCacheData( + Map onDemandCacheDataToKeep, + CloudFoundryLoadBalancer cloudFoundryLoadBalancer, + long start) { + String account = this.getAccountName(); + String key = Keys.getLoadBalancerKey(account, cloudFoundryLoadBalancer); + CacheData lbCacheData = onDemandCacheDataToKeep.get(key); + if (lbCacheData != null && (long) lbCacheData.getAttributes().get("cacheTime") > start) { + Map> cacheResults = + getCacheResultsFromCacheData(lbCacheData); + onDemandCacheDataToKeep.remove(key); + return cacheResults.get(LOAD_BALANCERS.getNs()).stream().findFirst().orElse(null); + } else { + return new ResourceCacheData( + Keys.getLoadBalancerKey(account, cloudFoundryLoadBalancer), + cacheView(cloudFoundryLoadBalancer), + singletonMap( + SERVER_GROUPS.getNs(), + cloudFoundryLoadBalancer.getServerGroups().stream() + .map(sg -> Keys.getServerGroupKey(account, sg.getName(), sg.getRegion())) + .collect(toSet()))); + } + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return type.equals(OnDemandType.LoadBalancer) + && cloudProvider.equals(CloudFoundryProvider.PROVIDER_ID); + } + + @Nullable + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + String account = Optional.ofNullable(data.get("account")).map(Object::toString).orElse(null); + String region = Optional.ofNullable(data.get("region")).map(Object::toString).orElse(null); + String loadBalancerName = + Optional.ofNullable(data.get("loadBalancerName")).map(Object::toString).orElse(null); + if (account == null || region == null || loadBalancerName == null) { + return null; + } + + if (!this.getAccountName().equals(account)) { + return null; + } + + CloudFoundrySpace space = getClient().getSpaces().findSpaceByRegion(region).orElse(null); + if (space == null) { + return null; + } + log.info("On Demand cache refresh triggered, waiting for load balancers loadData to be called"); + RouteId routeId = this.getClient().getRoutes().toRouteId(loadBalancerName); + if (routeId == null) { + return null; + } + CloudFoundryLoadBalancer cloudFoundryLoadBalancer = + this.getClient().getRoutes().find(routeId, space.getId()); + String loadBalancerKey = + Optional.ofNullable(cloudFoundryLoadBalancer) + .map(lb -> Keys.getLoadBalancerKey(account, lb)) + .orElse(Keys.getLoadBalancerKey(this.getAccountName(), loadBalancerName, region)); + Map> evictions; + + DefaultCacheResult loadBalancerCacheResults; + + if (cloudFoundryLoadBalancer != null) { + Collection loadBalancerCacheData = + Collections.singleton( + new ResourceCacheData( + loadBalancerKey, + cacheView(cloudFoundryLoadBalancer), + singletonMap( + SERVER_GROUPS.getNs(), + cloudFoundryLoadBalancer.getServerGroups().stream() + .map(sg -> Keys.getServerGroupKey(account, sg.getName(), sg.getRegion())) + .collect(toSet())))); + + loadBalancerCacheResults = + new DefaultCacheResult( + Collections.singletonMap(LOAD_BALANCERS.getNs(), loadBalancerCacheData)); + + providerCache.putCacheData( + ON_DEMAND.getNs(), + buildOnDemandCacheData(loadBalancerKey, loadBalancerCacheResults.getCacheResults())); + evictions = Collections.emptyMap(); + } else { + loadBalancerCacheResults = + new DefaultCacheResult( + Collections.singletonMap(LOAD_BALANCERS.getNs(), Collections.emptyList())); + evictions = + Collections.singletonMap( + LOAD_BALANCERS.getNs(), + providerCache.filterIdentifiers(LOAD_BALANCERS.getNs(), loadBalancerKey)); + } + + return new OnDemandResult(getOnDemandAgentType(), loadBalancerCacheResults, evictions); + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + Collection keys = + providerCache.filterIdentifiers(ON_DEMAND.getNs(), Keys.getAllLoadBalancers()); + return providerCache.getAll(ON_DEMAND.getNs(), keys, RelationshipCacheFilter.none()).stream() + .map( + it -> { + String loadbalancerId = it.getId(); + Map details = Keys.parse(loadbalancerId).orElse(emptyMap()); + Map attributes = it.getAttributes(); + + return io.vavr.collection.HashMap.of( + "id", + loadbalancerId, + "details", + details, + "moniker", + convertOnDemandDetails(details), + "cacheTime", + attributes.get("cacheTime"), + "cacheExpiry", + attributes.get("cacheExpiry"), + "processedCount", + attributes.get("processedCount"), + "processedTime", + attributes.get("processedTime")) + .toJavaMap(); + }) + .collect(toSet()); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryServerGroupCachingAgent.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryServerGroupCachingAgent.java new file mode 100644 index 00000000000..dc1d2907abf --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryServerGroupCachingAgent.java @@ -0,0 +1,346 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.*; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toSet; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import com.netflix.frigga.Names; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.ResourceCacheData; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.moniker.Moniker; +import io.vavr.collection.HashMap; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@Getter +@Slf4j +public class CloudFoundryServerGroupCachingAgent extends AbstractCloudFoundryCachingAgent { + private static final ObjectMapper cacheViewMapper = + new ObjectMapper().disable(MapperFeature.DEFAULT_VIEW_INCLUSION); + + private final Collection providedDataTypes = + Arrays.asList( + AUTHORITATIVE.forType(APPLICATIONS.getNs()), + AUTHORITATIVE.forType(CLUSTERS.getNs()), + AUTHORITATIVE.forType(SERVER_GROUPS.getNs()), + AUTHORITATIVE.forType(INSTANCES.getNs())); + + public CloudFoundryServerGroupCachingAgent( + CloudFoundryCredentials cloudFoundryCredentials, Registry registry) { + super(cloudFoundryCredentials, registry); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + long loadDataStart = this.getInternalClock().millis(); + String accountName = getAccountName(); + log.info("Caching all resources in Cloud Foundry account " + accountName); + + List spaceFilters = + this.getCredentials().getFilteredSpaces().stream() + .map(s -> s.getId()) + .collect(Collectors.toList()); + + List apps = this.getClient().getApplications().all(spaceFilters); + List clusters = + apps.stream().flatMap(app -> app.getClusters().stream()).collect(Collectors.toList()); + + List serverGroups = + clusters.stream() + .flatMap(cluster -> cluster.getServerGroups().stream()) + .collect(Collectors.toList()); + + List instances = + serverGroups.stream() + .flatMap(serverGroup -> serverGroup.getInstances().stream()) + .collect(Collectors.toList()); + + Collection onDemandCacheData = + providerCache.getAll( + ON_DEMAND.getNs(), + providerCache.filterIdentifiers( + ON_DEMAND.getNs(), Keys.getServerGroupKey(accountName, "*", "*"))); + + List toEvict = new ArrayList<>(); + Map toKeep = new java.util.HashMap<>(); + onDemandCacheData.forEach( + cacheData -> { + long cacheTime = (long) cacheData.getAttributes().get("cacheTime"); + if (cacheTime < loadDataStart + && (int) cacheData.getAttributes().computeIfAbsent("processedCount", s -> 0) > 0) { + toEvict.add(cacheData.getId()); + } else { + toKeep.put(cacheData.getId(), cacheData); + } + }); + + Map> results = + HashMap.>empty().toJavaMap(); + results.put( + APPLICATIONS.getNs(), + apps.stream().map(this::buildApplicationCacheData).collect(Collectors.toSet())); + results.put( + CLUSTERS.getNs(), + clusters.stream().map(this::buildClusterCacheData).collect(Collectors.toSet())); + results.put( + SERVER_GROUPS.getNs(), + serverGroups.stream() + .map(sg -> setServerGroupCacheData(toKeep, sg, loadDataStart)) + .filter(c -> c != null && c.getId() != null) + .collect(Collectors.toSet())); + results.put( + INSTANCES.getNs(), + instances.stream().map(this::buildInstanceCacheData).collect(Collectors.toSet())); + + onDemandCacheData.forEach(this::processOnDemandCacheData); + results.put(ON_DEMAND.getNs(), toKeep.values()); + + log.debug( + "Cache loaded for Cloud Foundry account {}, ({} sec)", + accountName, + (getInternalClock().millis() - loadDataStart) / 1000); + return new DefaultCacheResult(results, Collections.singletonMap(ON_DEMAND.getNs(), toEvict)); + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return type.equals(OnDemandType.ServerGroup) + && cloudProvider.equals(CloudFoundryProvider.PROVIDER_ID); + } + + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + String account = Optional.ofNullable(data.get("account")).map(Object::toString).orElse(null); + String region = Optional.ofNullable(data.get("region")).map(Object::toString).orElse(null); + if (account == null || region == null) { + return null; + } + + if (!this.getAccountName().equals(account)) { + return null; + } + + CloudFoundrySpace space = this.getClient().getSpaces().findSpaceByRegion(region).orElse(null); + if (space == null) { + return null; + } + String serverGroupName = + Optional.ofNullable(data.get("serverGroupName")).map(Object::toString).orElse(null); + if (serverGroupName == null) { + return null; + } + + log.info("On Demand cache refresh triggered, waiting for Server group loadData to be called"); + CloudFoundryServerGroup cloudFoundryServerGroup = + this.getClient() + .getApplications() + .findServerGroupByNameAndSpaceId(serverGroupName, space.getId()); + + String serverGroupKey = Keys.getServerGroupKey(this.getAccountName(), serverGroupName, region); + Map> evictions; + DefaultCacheResult serverGroupCacheResults; + + if (cloudFoundryServerGroup != null) { + Collection serverGroupCacheData = + Collections.singleton(buildServerGroupCacheData(cloudFoundryServerGroup)); + + serverGroupCacheResults = + new DefaultCacheResult( + Collections.singletonMap(SERVER_GROUPS.getNs(), serverGroupCacheData)); + + providerCache.putCacheData( + ON_DEMAND.getNs(), + buildOnDemandCacheData(serverGroupKey, serverGroupCacheResults.getCacheResults())); + evictions = Collections.emptyMap(); + } else { + serverGroupCacheResults = + new DefaultCacheResult( + Collections.singletonMap(SERVER_GROUPS.getNs(), Collections.emptyList())); + evictions = + Collections.singletonMap( + SERVER_GROUPS.getNs(), + providerCache.filterIdentifiers(SERVER_GROUPS.getNs(), serverGroupKey)); + } + + return new OnDemandResult(getOnDemandAgentType(), serverGroupCacheResults, evictions); + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + Collection keys = + providerCache.filterIdentifiers( + ON_DEMAND.getNs(), Keys.getServerGroupKey(this.getAccountName(), "*", "*")); + return providerCache.getAll(ON_DEMAND.getNs(), keys, RelationshipCacheFilter.none()).stream() + .map( + it -> { + String serverGroupId = it.getId(); + Map details = Keys.parse(serverGroupId).orElse(emptyMap()); + Map attributes = it.getAttributes(); + + return HashMap.of( + "id", + serverGroupId, + "details", + details, + "moniker", + convertOnDemandDetails( + singletonMap("serverGroupName", details.get("serverGroup"))), + "cacheTime", + attributes.get("cacheTime"), + "cacheExpiry", + attributes.get("cacheExpiry"), + "processedCount", + attributes.get("processedCount"), + "processedTime", + attributes.get("processedTime")) + .toJavaMap(); + }) + .collect(toSet()); + } + + @Override + public Moniker convertOnDemandDetails(Map monikerData) { + return Optional.ofNullable(monikerData) + .flatMap( + m -> + Optional.ofNullable(m.get("serverGroupName")) + .map( + serverGroupName -> { + Names names = Names.parseName(serverGroupName); + return Moniker.builder() + .app(names.getApp()) + .stack(names.getStack()) + .detail(names.getDetail()) + .cluster(names.getCluster()) + .sequence(names.getSequence()) + .build(); + })) + .orElse(null); + } + + private CacheData setServerGroupCacheData( + Map onDemandCacheDataToKeep, + CloudFoundryServerGroup serverGroup, + long start) { + String account = this.getAccountName(); + String key = Keys.getServerGroupKey(account, serverGroup.getName(), serverGroup.getRegion()); + CacheData sgCacheData = onDemandCacheDataToKeep.get(key); + if (sgCacheData != null && (long) sgCacheData.getAttributes().get("cacheTime") > start) { + Map> cacheResults = + getCacheResultsFromCacheData(sgCacheData); + onDemandCacheDataToKeep.remove(key); + return cacheResults.get(SERVER_GROUPS.getNs()).stream().findFirst().orElse(null); + } else { + return buildServerGroupCacheData(serverGroup); + } + } + + private CacheData buildApplicationCacheData(CloudFoundryApplication app) { + return new ResourceCacheData( + Keys.getApplicationKey(app.getName()), + cacheView(app), + singletonMap( + CLUSTERS.getNs(), + app.getClusters().stream() + .map( + cluster -> + Keys.getClusterKey(this.getAccountName(), app.getName(), cluster.getName())) + .collect(toSet()))); + } + + private CacheData buildClusterCacheData(CloudFoundryCluster cluster) { + String account = this.getAccountName(); + return new ResourceCacheData( + Keys.getClusterKey(account, cluster.getMoniker().getApp(), cluster.getName()), + cacheView(cluster), + singletonMap( + SERVER_GROUPS.getNs(), + cluster.getServerGroups().stream() + .map(sg -> Keys.getServerGroupKey(account, sg.getName(), sg.getRegion())) + .collect(toSet()))); + } + + private CacheData buildServerGroupCacheData(CloudFoundryServerGroup serverGroup) { + String account = this.getAccountName(); + return new ResourceCacheData( + Keys.getServerGroupKey(account, serverGroup.getName(), serverGroup.getRegion()), + cacheView(serverGroup), + HashMap.>of( + INSTANCES.getNs(), + serverGroup.getInstances().stream() + .map(inst -> Keys.getInstanceKey(account, inst.getName())) + .collect(toSet()), + LOAD_BALANCERS.getNs(), + serverGroup.getLoadBalancers().stream() + .map( + lb -> + Keys.getLoadBalancerKey( + account, lb, serverGroup.getSpace().getRegion())) + .collect(toSet())) + .toJavaMap()); + } + + private CacheData buildInstanceCacheData(CloudFoundryInstance instance) { + return new ResourceCacheData( + Keys.getInstanceKey(this.getAccountName(), instance.getName()), + cacheView(instance), + emptyMap()); + } + + @Override + CacheData buildOnDemandCacheData(String key, Map> cacheResult) { + try { + return new DefaultCacheData( + key, + (int) TimeUnit.MINUTES.toSeconds(10), // ttl + ImmutableMap.of( + "cacheTime", + this.getInternalClock().instant().toEpochMilli(), + "cacheResults", + cacheViewMapper.writeValueAsString(cacheResult), + "processedCount", + 0), + emptyMap(), + this.getInternalClock()); + } catch (JsonProcessingException serializationException) { + throw new RuntimeException("cache results serialization failed", serializationException); + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundrySpaceCachingAgent.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundrySpaceCachingAgent.java new file mode 100644 index 00000000000..28011a1cb63 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundrySpaceCachingAgent.java @@ -0,0 +1,113 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.SPACES; +import static java.util.Collections.emptyMap; +import static java.util.stream.Collectors.toSet; + +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.ResourceCacheData; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@Getter +@Slf4j +public class CloudFoundrySpaceCachingAgent extends AbstractCloudFoundryCachingAgent { + + private static final ObjectMapper cacheViewMapper = + new ObjectMapper().disable(MapperFeature.DEFAULT_VIEW_INCLUSION); + + private final Collection providedDataTypes = + Arrays.asList(AUTHORITATIVE.forType(SPACES.getNs())); + + public CloudFoundrySpaceCachingAgent(CloudFoundryCredentials credentials, Registry registry) { + super(credentials, registry); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + long loadDataStart = this.getInternalClock().millis(); + String accountName = getAccountName(); + log.info("Caching all spaces in Cloud Foundry account " + accountName); + + List spaces = this.getClient().getSpaces().all(); + + if (!this.getCredentials().getFilteredSpaces().isEmpty()) { + List filteredRegions = + this.getCredentials().getFilteredSpaces().stream() + .map(s -> s.getRegion()) + .collect(Collectors.toList()); + spaces = + spaces.stream() + .filter(s -> filteredRegions.contains(s.getRegion())) + .collect(Collectors.toList()); + } + + Map> results = + ImmutableMap.of( + SPACES.getNs(), + spaces.stream() + .map( + s -> + new ResourceCacheData( + Keys.getSpaceKey(accountName, s.getRegion()), cacheView(s), emptyMap())) + .collect(toSet())); + + log.debug( + "Space cache loaded for Cloud Foundry account {}, ({} sec)", + accountName, + (getInternalClock().millis() - loadDataStart) / 1000); + return new DefaultCacheResult(results, emptyMap()); + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return false; + } + + @Nullable + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + return null; + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + return null; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/config/CloudFoundryProviderConfig.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/config/CloudFoundryProviderConfig.java index a38e31d72b3..952c194b52c 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/config/CloudFoundryProviderConfig.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/config/CloudFoundryProviderConfig.java @@ -16,72 +16,119 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.config; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.SerializationFeature; -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; -import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent.CloudFoundryCachingAgent; import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; -import com.netflix.spinnaker.clouddriver.security.ProviderUtils; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.AbstractCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.BasicCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource; +import com.netflix.spinnaker.credentials.poller.Poller; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.binder.okhttp3.OkHttpMetricsEventListener; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import okhttp3.OkHttpClient; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.DependsOn; -import org.springframework.context.annotation.Scope; - -import java.util.Collections; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; @Configuration public class CloudFoundryProviderConfig { @Bean - @DependsOn("cloudFoundryAccountCredentials") - public CloudFoundryProvider cloudFoundryProvider(AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper) { - CloudFoundryProvider provider = new CloudFoundryProvider( - Collections.newSetFromMap(new ConcurrentHashMap<>())); - synchronizeCloudFoundryProvider(provider, accountCredentialsRepository, objectMapper); - return provider; + public CloudFoundryProvider cloudFoundryProvider() { + return new CloudFoundryProvider(); } @Bean - public CloudFoundryProviderSynchronizerTypeWrapper cloudFoundryProviderSynchronizerTypeWrapper() { - return new CloudFoundryProviderSynchronizerTypeWrapper(); + public ForkJoinPool cloudFoundryThreadPool( + CloudFoundryConfigurationProperties cloudFoundryConfigurationProperties) { + return new ForkJoinPool(cloudFoundryConfigurationProperties.getApiRequestParallelism()); } - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean - public CloudFoundryProviderSynchronizer synchronizeCloudFoundryProvider(CloudFoundryProvider cloudFoundryProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper) { - Set scheduledAccounts = ProviderUtils.getScheduledAccounts(cloudFoundryProvider); - Set allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, - CloudFoundryCredentials.class); - - objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); + public OkHttpClient cloudFoundryOkHttpClient( + CloudFoundryConfigurationProperties configurationProperties, MeterRegistry meterRegistry) { + return new OkHttpClient.Builder() + .connectTimeout( + configurationProperties.getClient().getConnectionTimeout(), TimeUnit.MILLISECONDS) + .readTimeout( + configurationProperties.getClient().getConnectionTimeout(), TimeUnit.MILLISECONDS) + .writeTimeout(configurationProperties.getClient().getReadTimeout(), TimeUnit.MILLISECONDS) + .eventListener( + OkHttpMetricsEventListener.builder(meterRegistry, "cf.okhttp.requests").build()) + .build(); + } - cloudFoundryProvider.getAgents().addAll(allAccounts.stream() - .map(credentials -> !scheduledAccounts.contains(credentials.getName()) ? - new CloudFoundryCachingAgent(credentials, objectMapper) : - null) - .filter(Objects::nonNull) - .collect(Collectors.toList())); + @Bean + @ConditionalOnMissingBean( + value = CloudFoundryCredentials.class, + parameterizedContainer = AbstractCredentialsLoader.class) + public AbstractCredentialsLoader cloudFoundryCredentialsLoader( + @Nullable + CredentialsDefinitionSource + cloudFoundryCredentialSource, + CloudFoundryConfigurationProperties configurationProperties, + CacheRepository cacheRepository, + CredentialsRepository cloudFoundryCredentialsRepository, + ForkJoinPool cloudFoundryThreadPool, + @Qualifier("cloudFoundryOkHttpClient") OkHttpClient okHttpClient) { - return new CloudFoundryProviderSynchronizer(); + if (cloudFoundryCredentialSource == null) { + cloudFoundryCredentialSource = configurationProperties::getAccounts; + } + return new BasicCredentialsLoader<>( + cloudFoundryCredentialSource, + a -> + new CloudFoundryCredentials( + a.getName(), + a.getAppsManagerUri(), + a.getMetricsUri(), + a.getApi(), + a.getUser(), + a.getPassword(), + a.getEnvironment(), + a.isSkipSslValidation(), + a.isOnlySpinnakerManaged(), + a.getResultsPerPage(), + cacheRepository, + a.getPermissions().build(), + cloudFoundryThreadPool, + a.getSpaceFilter(), + okHttpClient, + configurationProperties.getClient(), + configurationProperties.getLocalCacheConfig()), + cloudFoundryCredentialsRepository); } - class CloudFoundryProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - public Class getSynchronizerType() { - return CloudFoundryProviderSynchronizer.class; - } + @Bean + @ConditionalOnMissingBean( + value = CloudFoundryCredentials.class, + parameterizedContainer = CredentialsRepository.class) + public CredentialsRepository cloudFoundryCredentialsRepository( + CredentialsLifecycleHandler eventHandler) { + return new MapBackedCredentialsRepository<>(CloudFoundryProvider.PROVIDER_ID, eventHandler); } - class CloudFoundryProviderSynchronizer { + @Bean + @ConditionalOnMissingBean( + value = CloudFoundryConfigurationProperties.ManagedAccount.class, + parameterizedContainer = CredentialsDefinitionSource.class) + public CredentialsInitializerSynchronizable cloudFoundryCredentialsInitializerSynchronizable( + AbstractCredentialsLoader loader) { + final Poller poller = new Poller<>(loader); + return new CredentialsInitializerSynchronizable() { + @Override + public void synchronize() { + poller.run(); + } + }; } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryApplicationProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryApplicationProvider.java new file mode 100644 index 00000000000..fe12f487e6d --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryApplicationProvider.java @@ -0,0 +1,52 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.APPLICATIONS; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryApplication; +import com.netflix.spinnaker.clouddriver.model.Application; +import com.netflix.spinnaker.clouddriver.model.ApplicationProvider; +import java.util.Set; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; + +@RequiredArgsConstructor +@Component +public class CloudFoundryApplicationProvider implements ApplicationProvider { + private final Cache cacheView; + private final CacheRepository repository; + + @Override + public Set getApplications(boolean expand) { + return repository.findApplicationsByKeys( + cacheView.filterIdentifiers(APPLICATIONS.getNs(), Keys.getApplicationKey("*")), + expand ? CacheRepository.Detail.NAMES_ONLY : CacheRepository.Detail.NONE); + } + + @Nullable + @Override + public CloudFoundryApplication getApplication(String name) { + return repository + .findApplicationByKey(Keys.getApplicationKey(name), CacheRepository.Detail.NAMES_ONLY) + .orElse(null); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryClusterProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryClusterProvider.java new file mode 100644 index 00000000000..f791088c669 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryClusterProvider.java @@ -0,0 +1,126 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.CLUSTERS; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toSet; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryCluster; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import com.netflix.spinnaker.clouddriver.model.Cluster; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import java.util.Collection; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; + +@RequiredArgsConstructor +@Component +public class CloudFoundryClusterProvider implements ClusterProvider { + private final Cache cacheView; + private final CacheRepository repository; + + @Override + public String getCloudProviderId() { + return CloudFoundryCloudProvider.ID; + } + + private static Map> distinctGroupByAccount( + Collection clusters) { + return clusters.stream().collect(groupingBy(Cluster::getAccountName, toSet())); + } + + @Override + public Map> getClusters() { + return distinctGroupByAccount( + repository.findClustersByKeys( + cacheView.filterIdentifiers(CLUSTERS.getNs(), Keys.getClusterKey("*", "*", "*")), + CacheRepository.Detail.FULL)); + } + + @Override + public Map> getClusterSummaries(String applicationName) { + return distinctGroupByAccount( + repository.findClustersByKeys( + cacheView.filterIdentifiers( + CLUSTERS.getNs(), Keys.getClusterKey("*", applicationName, "*")), + CacheRepository.Detail.NONE)); + } + + @Override + public Map> getClusterDetails(String applicationName) { + return distinctGroupByAccount( + repository.findClustersByKeys( + cacheView.filterIdentifiers( + CLUSTERS.getNs(), Keys.getClusterKey("*", applicationName, "*")), + CacheRepository.Detail.FULL)); + } + + @Override + public Set getClusters(String applicationName, String account) { + return repository.findClustersByKeys( + cacheView.filterIdentifiers( + CLUSTERS.getNs(), Keys.getClusterKey(account, applicationName, "*")), + CacheRepository.Detail.FULL); + } + + @Nullable + @Override + public CloudFoundryCluster getCluster( + String applicationName, String account, String clusterName) { + return getCluster(applicationName, account, clusterName, true); + } + + @Nullable + @Override + public CloudFoundryCluster getCluster( + String application, String account, String name, boolean includeDetails) { + return repository + .findClusterByKey( + Keys.getClusterKey(account, application, name), + includeDetails ? CacheRepository.Detail.FULL : CacheRepository.Detail.NAMES_ONLY) + .orElse(null); + } + + @Nullable + @Override + public CloudFoundryServerGroup getServerGroup( + String account, String region, String name, boolean includeDetails) { + return repository + .findServerGroupByKey( + Keys.getServerGroupKey(account, name, region), + includeDetails ? CacheRepository.Detail.FULL : CacheRepository.Detail.NAMES_ONLY) + .orElse(null); + } + + @Override + public CloudFoundryServerGroup getServerGroup(String account, String region, String name) { + return getServerGroup(account, region, name, true); + } + + @Override + public boolean supportsMinimalClusters() { + return true; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryInstanceProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryInstanceProvider.java new file mode 100644 index 00000000000..a7793ba307d --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryInstanceProvider.java @@ -0,0 +1,109 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Logs; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Task; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.model.InstanceProvider; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; + +@RequiredArgsConstructor +@Component +public class CloudFoundryInstanceProvider + implements InstanceProvider { + + private final CacheRepository repository; + private final CredentialsRepository credentialsRepository; + + @Nullable + @Override + public CloudFoundryInstance getInstance(String account, String region, String id) { + return repository.findInstanceByKey(Keys.getInstanceKey(account, id)).orElse(null); + } + + @Override + public String getConsoleOutput(String account, String region, String id) { + CloudFoundryCredentials credentials = credentialsRepository.getOne(account); + if (credentials == null) { + return null; + } + final CloudFoundryClient client = credentials.getClient(); + final Logs logsService = client.getLogs(); + + final CloudFoundryConsoleOutputIdParameter idParam = + CloudFoundryConsoleOutputIdParameter.fromString(id); + + final String logs; + LogsResourceType logsResourceType = idParam.logsResourceType; + switch (logsResourceType) { + case APP: + logs = logsService.recentApplicationLogs(idParam.guid, idParam.instanceIndex); + break; + case TASK: + Task task = client.getTasks().getTask(idParam.guid); + String appGuid = task.getLinks().get("app").getGuid(); + logs = logsService.recentTaskLogs(appGuid, task.getName()); + break; + default: + throw new IllegalArgumentException("Unsupported LogsResourceType: " + logsResourceType); + } + + return logs; + } + + public final String getCloudProvider() { + return CloudFoundryCloudProvider.ID; + } + + @RequiredArgsConstructor + @Data + static class CloudFoundryConsoleOutputIdParameter { + private final LogsResourceType logsResourceType; + private final String guid; + private final int instanceIndex; + + static CloudFoundryConsoleOutputIdParameter fromString(String value) { + try { + String[] parts = value.split(":"); + LogsResourceType type = LogsResourceType.valueOf(parts[0].toUpperCase()); + return new CloudFoundryConsoleOutputIdParameter( + type, parts[1], type == LogsResourceType.APP ? Integer.parseInt(parts[2]) : 0); + } catch (Exception e) { + throw new IllegalArgumentException( + String.format( + "Error parsing '%s'. Expected format: 'app:appGuid:instanceIndex' or 'task:taskGuid'", + value), + e); + } + } + } + + enum LogsResourceType { + APP, + TASK + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryJobProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryJobProvider.java new file mode 100644 index 00000000000..55722871445 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryJobProvider.java @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Task; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryJobStatus; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.model.JobProvider; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Map; +import lombok.Getter; +import org.springframework.stereotype.Component; + +@Component +public class CloudFoundryJobProvider implements JobProvider { + + @Getter private String platform = CloudFoundryCloudProvider.ID; + private final CredentialsRepository credentialsRepository; + + public CloudFoundryJobProvider( + CredentialsRepository credentialsRepository) { + this.credentialsRepository = credentialsRepository; + } + + @Override + public CloudFoundryJobStatus collectJob(String account, String location, String id) { + CloudFoundryCredentials credentials = credentialsRepository.getOne(account); + if (credentials == null) { + return null; + } + + Task task = credentials.getClient().getTasks().getTask(id); + return CloudFoundryJobStatus.fromTask(task, account, location); + } + + @Override + public Map getFileContents( + String account, String location, String id, String fileName) { + return null; + } + + @Override + public void cancelJob(String account, String location, String taskGuid) { + CloudFoundryCredentials credentials = credentialsRepository.getOne(account); + if (credentials == null) { + return; + } + credentials.getClient().getTasks().cancelTask(taskGuid); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryLoadBalancerProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryLoadBalancerProvider.java new file mode 100644 index 00000000000..b4a10de34e1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryLoadBalancerProvider.java @@ -0,0 +1,156 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository.Detail.FULL; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository.Detail.NAMES_ONLY; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.LOAD_BALANCERS; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import java.util.*; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; + +@RequiredArgsConstructor +@Component +class CloudFoundryLoadBalancerProvider implements LoadBalancerProvider { + private final Cache cacheView; + private final CacheRepository repository; + + @Override + public String getCloudProvider() { + return CloudFoundryCloudProvider.ID; + } + + @Override + public List list() { + return new ArrayList<>( + summarizeLoadBalancers( + cacheView.filterIdentifiers(LOAD_BALANCERS.getNs(), Keys.getAllLoadBalancers())) + .values()); + } + + @Nullable + @Override + public CloudFoundryLoadBalancerSummary get(String name) { + return null; // intentionally null, unused + } + + @Nullable + @Override + public List byAccountAndRegionAndName( + String account, String region, String name) { + return null; // intentionally null, unused + } + + /** + * @return The set of CF routes that are mapped to CF apps representing server groups inside of + * this application. Once a route is unmapped from the app, it will no longer show up as a + * load balancer for the app. + */ + @Override + public Set getApplicationLoadBalancers(String application) { + return repository.findLoadBalancersByClusterKeys( + cacheView.filterIdentifiers(CLUSTERS.getNs(), Keys.getClusterKey("*", application, "*")), + NAMES_ONLY); + } + + private Map summarizeLoadBalancers( + Collection loadBalancerKeys) { + Map summariesByAccount = new HashMap<>(); + + for (CloudFoundryLoadBalancer loadBalancer : + repository.findLoadBalancersByKeys(loadBalancerKeys, FULL)) { + String account = loadBalancer.getAccount(); + CloudFoundryLoadBalancerSummary summary = + summariesByAccount.computeIfAbsent(account, CloudFoundryLoadBalancerSummary::new); + + CloudFoundryLoadBalancerDetail detail = + new CloudFoundryLoadBalancerDetail( + account, loadBalancer.getName(), loadBalancer.getSpace()); + + summary + .accounts + .computeIfAbsent(account, CloudFoundryLoadBalancerAccount::new) + .regions + .computeIfAbsent( + loadBalancer.getSpace().getRegion(), CloudFoundryLoadBalancerAccountRegion::new) + .loadBalancers + .add(detail); + } + + return summariesByAccount; + } + + @RequiredArgsConstructor + @Getter + public static class CloudFoundryLoadBalancerSummary implements Item { + private final String name; + + @JsonIgnore + private final Map accounts = new HashMap<>(); + + @Override + public List getByAccounts() { + return new ArrayList<>(accounts.values()); + } + } + + @RequiredArgsConstructor + @Getter + public static class CloudFoundryLoadBalancerAccount implements ByAccount { + private final String name; + + @JsonIgnore + private final Map regions = new HashMap<>(); + + @JsonProperty("regions") + public List getByRegions() { + return new ArrayList<>(regions.values()); + } + } + + @RequiredArgsConstructor + @Getter + public static class CloudFoundryLoadBalancerAccountRegion implements ByRegion { + private final String name; + private final List loadBalancers = new ArrayList<>(); + } + + @RequiredArgsConstructor + @Getter + public static class CloudFoundryLoadBalancerDetail implements Details { + private final String account; + private final String name; + private final CloudFoundrySpace space; + + public String getType() { + return "cf"; + } + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryServiceProvider.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryServiceProvider.java new file mode 100644 index 00000000000..1c2bc86a3bb --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryServiceProvider.java @@ -0,0 +1,68 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.CloudFoundryCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.model.ServiceInstance; +import com.netflix.spinnaker.clouddriver.model.ServiceProvider; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Collection; +import java.util.Collections; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class CloudFoundryServiceProvider implements ServiceProvider { + private final CredentialsRepository credentialsRepository; + + @Autowired + public CloudFoundryServiceProvider( + CredentialsRepository credentialsRepository) { + this.credentialsRepository = credentialsRepository; + } + + @Override + public Collection getServices(String account, String region) { + CloudFoundryCredentials credentials = credentialsRepository.getOne(account); + if (credentials == null) { + return Collections.emptyList(); + } + + return credentials.getCredentials().getServiceInstances().findAllServicesByRegion(region); + } + + @Override + public ServiceInstance getServiceInstance( + String account, String region, String serviceInstanceName) { + CloudFoundryCredentials credentials = credentialsRepository.getOne(account); + if (credentials == null) { + return null; + } + + return credentials + .getCredentials() + .getServiceInstances() + .getServiceInstance(region, serviceInstanceName); + } + + @Override + public String getCloudProvider() { + return CloudFoundryCloudProvider.ID; + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentials.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentials.java index 49816563da1..44a5c922737 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentials.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentials.java @@ -16,30 +16,255 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.security; -import com.fasterxml.jackson.annotation.JsonIgnore; +import static java.util.Collections.*; +import static java.util.stream.Collectors.toList; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.HttpCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.*; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.AccessLevel; import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import okhttp3.OkHttpClient; -import java.util.Collections; -import java.util.List; - +@Slf4j @Getter -public class CloudFoundryCredentials implements AccountCredentials { +@JsonIgnoreProperties({ + "credentials", + "client", + "password", + "spaceSupplier", + "cacheRepository", + "forkJoinPool", + "filteredSpaces", + "spacesLive" +}) +public class CloudFoundryCredentials extends AbstractAccountCredentials { + private static final int SPACE_EXPIRY_SECONDS = 30; + private final String name; - private final String environment; + private final String appsManagerUri; + private final String metricsUri; + private final String apiHost; + private final String userName; + private final String password; + private final boolean skipSslValidation; + private final boolean onlySpinnakerManaged; + + @Nullable private final String environment; + @Nullable private final Integer resultsPerPage; + private final String accountType = "cloudfoundry"; private final String cloudProvider = "cloudfoundry"; - @Deprecated - private final List requiredGroupMembership = Collections.emptyList(); + @Deprecated private final List requiredGroupMembership = Collections.emptyList(); + + private final Supplier> spaceSupplier = + Memoizer.memoizeWithExpiration(this::spaceSupplier, SPACE_EXPIRY_SECONDS, TimeUnit.SECONDS); - @JsonIgnore - private final CloudFoundryClient credentials; + private final CacheRepository cacheRepository; + private final Permissions permissions; + private final ForkJoinPool forkJoinPool; + private final List filteredSpaces; - public CloudFoundryCredentials(String name, String apiHost, String userName, String password, String environment) { + @Getter(AccessLevel.NONE) + private final CloudFoundryClient cloudFoundryClient; + + public CloudFoundryCredentials( + String name, + String appsManagerUri, + String metricsUri, + String apiHost, + String userName, + String password, + String environment, + boolean skipSslValidation, + boolean onlySpinnakerManaged, + Integer resultsPerPage, + CacheRepository cacheRepository, + Permissions permissions, + ForkJoinPool forkJoinPool, + Map> spaceFilter, + OkHttpClient okHttpClient, + CloudFoundryConfigurationProperties.ClientConfig clientConfig, + CloudFoundryConfigurationProperties.LocalCacheConfig localCacheConfig) { this.name = name; - this.environment = environment; - this.credentials = new CloudFoundryClient(name, apiHost, userName, password); + this.appsManagerUri = appsManagerUri; + this.metricsUri = metricsUri; + this.apiHost = apiHost; + this.userName = userName; + this.password = password; + this.environment = Optional.ofNullable(environment).orElse("dev"); + this.skipSslValidation = skipSslValidation; + this.onlySpinnakerManaged = onlySpinnakerManaged; + this.resultsPerPage = Optional.ofNullable(resultsPerPage).orElse(100); + this.cacheRepository = cacheRepository; + this.permissions = permissions == null ? Permissions.EMPTY : permissions; + this.forkJoinPool = forkJoinPool; + this.cloudFoundryClient = + new HttpCloudFoundryClient( + name, + appsManagerUri, + metricsUri, + apiHost, + userName, + password, + true, + skipSslValidation, + onlySpinnakerManaged, + resultsPerPage, + forkJoinPool, + okHttpClient.newBuilder(), + clientConfig, + localCacheConfig); + this.filteredSpaces = createFilteredSpaces(spaceFilter); + } + + public CloudFoundryClient getCredentials() { + return getClient(); + } + + public CloudFoundryClient getClient() { + return cloudFoundryClient; + } + + public Collection> getRegions() { + return spaceSupplier.get().stream() + .filter( + s -> { + if (!filteredSpaces.isEmpty()) { + List filteredRegions = + filteredSpaces.stream().map(CloudFoundrySpace::getRegion).collect(toList()); + return filteredRegions.contains(s.getRegion()); + } + return true; + }) + .map(space -> singletonMap("name", space.getRegion())) + .collect(toList()); + } + + protected List spaceSupplier() { + Set spaces = cacheRepository.findSpacesByAccount(name); + if (!spaces.isEmpty()) { + return new ArrayList<>(spaces); + } + return getSpacesLive(); + } + + private List getSpacesLive() { + try { + return getClient().getSpaces().all(); + } catch (CloudFoundryApiException e) { + log.warn("Unable to determine regions for Cloud Foundry account " + name, e); + return emptyList(); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof CloudFoundryCredentials)) { + return false; + } + CloudFoundryCredentials that = (CloudFoundryCredentials) o; + return name.equals(that.name) + && Objects.equals(appsManagerUri, that.appsManagerUri) + && Objects.equals(metricsUri, that.metricsUri) + && Objects.equals(userName, that.userName) + && Objects.equals(password, that.password) + && Objects.equals(environment, that.environment) + && Objects.equals(skipSslValidation, that.skipSslValidation) + && Objects.equals(resultsPerPage, that.resultsPerPage); + } + + @Override + public int hashCode() { + return Objects.hash( + name, + appsManagerUri, + metricsUri, + userName, + password, + environment, + skipSslValidation, + resultsPerPage); + } + + protected List createFilteredSpaces(Map> spaceFilter) { + List spaces = new ArrayList<>(); + if (spaceFilter.isEmpty()) { + return emptyList(); + } + + Set filteredRegions = new HashSet<>(); + // IF an Org is provided without spaces -> add all spaces for the ORG + for (String orgName : spaceFilter.keySet()) { + if (spaceFilter.get(orgName).isEmpty() || spaceFilter.get(orgName) == null) { + List allSpacesByOrg = + this.getClient() + .getSpaces() + .findAllBySpaceNamesAndOrgNames(null, singletonList(orgName)); + spaces.addAll(allSpacesByOrg); + } else { + for (String spaceName : spaceFilter.get(orgName)) { + filteredRegions.add(orgName + " > " + spaceName); + } + } + } + // IF an Org is provided with spaces -> add all spaces that are in the ORG and filteredRegions + List allSpaces = + this.getClient() + .getSpaces() + .findAllBySpaceNamesAndOrgNames( + spaceFilter.values().stream() + .flatMap(Collection::stream) + .collect(Collectors.toList()), + List.copyOf(spaceFilter.keySet())); + allSpaces.stream().filter(s -> filteredRegions.contains(s.getRegion())).forEach(spaces::add); + + if (spaces.isEmpty()) + throw new IllegalArgumentException( + "The spaceFilter had Orgs and/or Spaces but CloudFoundry returned no spaces as a result. Spaces must not be null or empty when a spaceFilter is included."); + + return ImmutableList.copyOf(spaces); + } + + /** + * Thin wrapper around a Caffeine cache that handles memoizing a supplier function with expiration + */ + private static class Memoizer implements Supplier { + private static final String CACHE_KEY = "key"; + private final LoadingCache cache; + + private Memoizer(Supplier supplier, long expirySeconds, TimeUnit timeUnit) { + this.cache = + Caffeine.newBuilder() + .refreshAfterWrite(expirySeconds, timeUnit) + .build(key -> supplier.get()); + } + + public static Memoizer memoizeWithExpiration( + Supplier supplier, long expirySeconds, TimeUnit timeUnit) { + return new Memoizer<>(supplier, expirySeconds, timeUnit); + } + + public T get() { + return cache.get(CACHE_KEY); + } } } diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsInitializer.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsInitializer.java deleted file mode 100644 index 4ea3c49975b..00000000000 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsInitializer.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2018 Pivotal, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cloudfoundry.security; - -import com.netflix.spinnaker.cats.module.CatsModule; -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper; -import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; -import com.netflix.spinnaker.clouddriver.security.ProviderUtils; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; -import org.springframework.context.ApplicationContext; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Scope; - -import java.util.List; -import java.util.stream.Collectors; - -@Configuration -public class CloudFoundryCredentialsInitializer implements CredentialsInitializerSynchronizable { - - @Bean - public List cloudFoundryAccountCredentials(CloudFoundryConfigurationProperties cloudFoundryConfigurationProperties, - AccountCredentialsRepository accountCredentialsRepository, - ApplicationContext applicationContext, - List providerSynchronizerTypeWrappers) { - return synchronizeCloudFoundryAccounts(cloudFoundryConfigurationProperties, null, accountCredentialsRepository, - applicationContext, providerSynchronizerTypeWrappers); - } - - @Override - public String getCredentialsSynchronizationBeanName() { - return "synchronizeCloudFoundryAccounts"; - } - - @SuppressWarnings("unchecked") - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - public List synchronizeCloudFoundryAccounts(CloudFoundryConfigurationProperties cloudFoundryConfigurationProperties, - CatsModule catsModule, - AccountCredentialsRepository accountCredentialsRepository, - ApplicationContext applicationContext, - List providerSynchronizerTypeWrappers) { - List deltas = ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, CloudFoundryCredentials.class, - cloudFoundryConfigurationProperties.getAccounts()); - - List accountsToAdd = (List) deltas.get(0); - List namesOfDeletedAccounts = (List) deltas.get(1); - - for (CloudFoundryConfigurationProperties.ManagedAccount managedAccount : accountsToAdd) { - CloudFoundryCredentials cloudFoundryAccountCredentials = new CloudFoundryCredentials( - managedAccount.getName(), - managedAccount.getApi(), - managedAccount.getUser(), - managedAccount.getPassword(), - managedAccount.getEnvironment() - ); - accountCredentialsRepository.save(managedAccount.getName(), cloudFoundryAccountCredentials); - } - - ProviderUtils.unscheduleAndDeregisterAgents(namesOfDeletedAccounts, catsModule); - - if (!accountsToAdd.isEmpty() && catsModule != null) { - ProviderUtils.synchronizeAgentProviders(applicationContext, providerSynchronizerTypeWrappers); - } - - return accountCredentialsRepository.getAll().stream() - .filter(CloudFoundryCredentials.class::isInstance) - .map(CloudFoundryCredentials.class::cast) - .collect(Collectors.toList()); - } -} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsLifecycleHandler.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsLifecycleHandler.java new file mode 100644 index 00000000000..6a02338f1bb --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsLifecycleHandler.java @@ -0,0 +1,68 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.security; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent.CloudFoundryLoadBalancerCachingAgent; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent.CloudFoundryServerGroupCachingAgent; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent.CloudFoundrySpaceCachingAgent; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import java.util.Collections; +import java.util.List; +import lombok.RequiredArgsConstructor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Component; + +@Component +@RequiredArgsConstructor +public class CloudFoundryCredentialsLifecycleHandler + implements CredentialsLifecycleHandler { + private static final Logger log = + LoggerFactory.getLogger(CloudFoundryCredentialsLifecycleHandler.class); + private final CloudFoundryProvider provider; + private final Registry registry; + + @Override + public void credentialsAdded(CloudFoundryCredentials credentials) { + log.info("Adding agents for new account {}", credentials.getName()); + provider.addAgents(agentsForCredentials(credentials)); + } + + @Override + public void credentialsUpdated(CloudFoundryCredentials credentials) { + log.info("Refreshing agents for updated account {}", credentials.getName()); + provider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + provider.addAgents(agentsForCredentials(credentials)); + } + + @Override + public void credentialsDeleted(CloudFoundryCredentials credentials) { + log.info("Removing agents for deleted account {}", credentials.getName()); + provider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + } + + private List agentsForCredentials(CloudFoundryCredentials credentials) { + return List.of( + new CloudFoundryServerGroupCachingAgent(credentials, registry), + new CloudFoundryLoadBalancerCachingAgent(credentials, registry), + new CloudFoundrySpaceCachingAgent(credentials, registry)); + } +} diff --git a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/config/CloudFoundryConfiguration.java b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/config/CloudFoundryConfiguration.java index e860a25dc69..f9f9700fd7d 100644 --- a/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/config/CloudFoundryConfiguration.java +++ b/clouddriver-cloudfoundry/src/main/java/com/netflix/spinnaker/config/CloudFoundryConfiguration.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Pivotal, Inc. + * Copyright 2019 Pivotal, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. @@ -16,11 +16,11 @@ package com.netflix.spinnaker.config; -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper; import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; -import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentialsInitializer; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.cloud.context.config.annotation.RefreshScope; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.Configuration; @@ -34,27 +34,15 @@ public class CloudFoundryConfiguration { @Bean + @RefreshScope CloudFoundryConfigurationProperties cloudFoundryConfigurationProperties() { return new CloudFoundryConfigurationProperties(); } @Bean - CloudFoundrySynchronizerTypeWrapper cloudFoundrySynchronizerTypeWrapper() { - return new CloudFoundrySynchronizerTypeWrapper(); - } - - @Bean - CloudFoundryCredentialsInitializer cloudFoundryCredentialsInitializer() { - return new CloudFoundryCredentialsInitializer(); - } - - public static class CloudFoundryProviderSynchronizer { - } - - class CloudFoundrySynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - public Class getSynchronizerType() { - return CloudFoundryProviderSynchronizer.class; - } + OperationPoller cloudFoundryOperationPoller(CloudFoundryConfigurationProperties properties) { + return new OperationPoller( + properties.getAsyncOperationTimeoutMillisecondsDefault(), + properties.getAsyncOperationMaxPollingIntervalMilliseconds()); } } diff --git a/clouddriver-cloudfoundry/src/main/proto/README.md b/clouddriver-cloudfoundry/src/main/proto/README.md new file mode 100644 index 00000000000..2e02c219b87 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/proto/README.md @@ -0,0 +1,318 @@ +Sourced from https://github.com/cloudfoundry/dropsonde-protocol + +# Protocol Documentation + + +## Table of Contents +* [envelope.proto](#envelope.proto) + * [Envelope](#events.Envelope) + * [Envelope.TagsEntry](#events.Envelope.TagsEntry) + * [Envelope.EventType](#events.Envelope.EventType) +* [error.proto](#error.proto) + * [Error](#events.Error) +* [http.proto](#http.proto) + * [HttpStartStop](#events.HttpStartStop) + * [Method](#events.Method) + * [PeerType](#events.PeerType) +* [log.proto](#log.proto) + * [LogMessage](#events.LogMessage) + * [LogMessage.MessageType](#events.LogMessage.MessageType) +* [metric.proto](#metric.proto) + * [ContainerMetric](#events.ContainerMetric) + * [CounterEvent](#events.CounterEvent) + * [ValueMetric](#events.ValueMetric) +* [uuid.proto](#uuid.proto) + * [UUID](#events.UUID) +* [Scalar Value Types](#scalar-value-types) + + +

Top

+ +## envelope.proto + + +### Envelope + +Envelope wraps an Event and adds metadata. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| origin | [string](#string) | required | Unique description of the origin of this event. | +| eventType | [Envelope.EventType](#events.Envelope.EventType) | required | Type of wrapped event. Only the optional field corresponding to the value of eventType should be set. | +| timestamp | [int64](#int64) | optional | UNIX timestamp (in nanoseconds) event was wrapped in this Envelope. | +| deployment | [string](#string) | optional | Deployment name (used to uniquely identify source). | +| job | [string](#string) | optional | Job name (used to uniquely identify source). | +| index | [string](#string) | optional | Index of job (used to uniquely identify source). | +| ip | [string](#string) | optional | IP address (used to uniquely identify source). | +| tags | [Envelope.TagsEntry](#events.Envelope.TagsEntry) | repeated | key/value tags to include additional identifying information. | +| httpStartStop | [HttpStartStop](#events.HttpStartStop) | optional | | +| logMessage | [LogMessage](#events.LogMessage) | optional | | +| valueMetric | [ValueMetric](#events.ValueMetric) | optional | | +| counterEvent | [CounterEvent](#events.CounterEvent) | optional | | +| error | [Error](#events.Error) | optional | | +| containerMetric | [ContainerMetric](#events.ContainerMetric) | optional | | + + +### Envelope.TagsEntry + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | optional | | +| value | [string](#string) | optional | | + + + +### Envelope.EventType + +Type of the wrapped event. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| HttpStartStop | 4 | | +| LogMessage | 5 | | +| ValueMetric | 6 | | +| CounterEvent | 7 | | +| Error | 8 | | +| ContainerMetric | 9 | | + + +

Top

+ +## error.proto + + + + +### Error + +An Error event represents an error in the originating process. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| source | [string](#string) | required | Source of the error. This may or may not be the same as the Origin in the envelope. | +| code | [int32](#int32) | required | Numeric error code. This is provided for programmatic responses to the error. | +| message | [string](#string) | required | Error description (preferably human-readable). | + + +

Top

+ +## http.proto + + + + +### HttpStartStop + +An HttpStartStop event represents the whole lifecycle of an HTTP request. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| startTimestamp | [int64](#int64) | required | UNIX timestamp (in nanoseconds) when the request was sent (by a client) or received (by a server). | +| stopTimestamp | [int64](#int64) | required | UNIX timestamp (in nanoseconds) when the request was received. | +| requestId | [UUID](#events.UUID) | required | ID for tracking lifecycle of request. | +| peerType | [PeerType](#events.PeerType) | required | Role of the emitting process in the request cycle. | +| method | [Method](#events.Method) | required | Method of the request. | +| uri | [string](#string) | required | Destination of the request. | +| remoteAddress | [string](#string) | required | Remote address of the request. (For a server, this should be the origin of the request.) | +| userAgent | [string](#string) | required | Contents of the UserAgent header on the request. | +| statusCode | [int32](#int32) | required | Status code returned with the response to the request. | +| contentLength | [int64](#int64) | required | Length of response (bytes). | +| applicationId | [UUID](#events.UUID) | optional | If this request was made in relation to an appliciation, this field should track that application's ID. | +| instanceIndex | [int32](#int32) | optional | Index of the application instance. | +| instanceId | [string](#string) | optional | ID of the application instance. | +| forwarded | [string](#string) | repeated | This contains http forwarded-for [x-forwarded-for] header from the request. | + + + +### Method + +HTTP method. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| GET | 1 | | +| POST | 2 | | +| PUT | 3 | | +| DELETE | 4 | | +| HEAD | 5 | | +| ACL | 6 | | +| BASELINE_CONTROL | 7 | | +| BIND | 8 | | +| CHECKIN | 9 | | +| CHECKOUT | 10 | | +| CONNECT | 11 | | +| COPY | 12 | | +| DEBUG | 13 | | +| LABEL | 14 | | +| LINK | 15 | | +| LOCK | 16 | | +| MERGE | 17 | | +| MKACTIVITY | 18 | | +| MKCALENDAR | 19 | | +| MKCOL | 20 | | +| MKREDIRECTREF | 21 | | +| MKWORKSPACE | 22 | | +| MOVE | 23 | | +| OPTIONS | 24 | | +| ORDERPATCH | 25 | | +| PATCH | 26 | | +| PRI | 27 | | +| PROPFIND | 28 | | +| PROPPATCH | 29 | | +| REBIND | 30 | | +| REPORT | 31 | | +| SEARCH | 32 | | +| SHOWMETHOD | 33 | | +| SPACEJUMP | 34 | | +| TEXTSEARCH | 35 | | +| TRACE | 36 | | +| TRACK | 37 | | +| UNBIND | 38 | | +| UNCHECKOUT | 39 | | +| UNLINK | 40 | | +| UNLOCK | 41 | | +| UPDATE | 42 | | +| UPDATEREDIRECTREF | 43 | | +| VERSION_CONTROL | 44 | | + +### PeerType + +Type of peer handling request. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| Client | 1 | Request is made by this process. | +| Server | 2 | Request is received by this process. | + + + + +

Top

+ +## log.proto + + + + +### LogMessage + +A LogMessage contains a "log line" and associated metadata. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| message | [bytes](#bytes) | required | Bytes of the log message. (Note that it is not required to be a single line.) | +| message_type | [LogMessage.MessageType](#events.LogMessage.MessageType) | required | Type of the message (OUT or ERR). | +| timestamp | [int64](#int64) | required | UNIX timestamp (in nanoseconds) when the log was written. | +| app_id | [string](#string) | optional | Application that emitted the message (or to which the application is related). | +| source_type | [string](#string) | optional | Source of the message. For Cloud Foundry, this can be "APP", "RTR", "DEA", "STG", etc. | +| source_instance | [string](#string) | optional | Instance that emitted the message. | + + + +### LogMessage.MessageType + +MessageType stores the destination of the message (corresponding to STDOUT or STDERR). + +| Name | Number | Description | +| ---- | ------ | ----------- | +| OUT | 1 | | +| ERR | 2 | | + + + + +

Top

+ +## metric.proto + + + + +### ContainerMetric + +A ContainerMetric records resource usage of an app in a container. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| applicationId | [string](#string) | required | ID of the contained application. | +| instanceIndex | [int32](#int32) | required | Instance index of the contained application. (This, with applicationId, should uniquely identify a container.) | +| cpuPercentage | [double](#double) | required | CPU based on number of cores. | +| memoryBytes | [uint64](#uint64) | required | Bytes of memory used. | +| diskBytes | [uint64](#uint64) | required | Bytes of disk used. | +| memoryBytesQuota | [uint64](#uint64) | optional | Maximum bytes of memory allocated to container. | +| diskBytesQuota | [uint64](#uint64) | optional | Maximum bytes of disk allocated to container. | + + +### CounterEvent + +A CounterEvent represents the increment of a counter. It contains only the change in the value; it is the responsibility of downstream consumers to maintain the value of the counter. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | required | Name of the counter. Must be consistent for downstream consumers to associate events semantically. | +| delta | [uint64](#uint64) | required | Amount by which to increment the counter. | +| total | [uint64](#uint64) | optional | Total value of the counter. This will be overridden by Metron, which internally tracks the total of each named Counter it receives. | + + +### ValueMetric + +A ValueMetric indicates the value of a metric at an instant in time. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | required | Name of the metric. Must be consistent for downstream consumers to associate events semantically. | +| value | [double](#double) | required | Value at the time of event emission. | +| unit | [string](#string) | required | Unit of the metric. Please see http://metrics20.org/spec/#units for ideas; SI units/prefixes are recommended where applicable. Should be consistent for the life of the metric (consumers are expected to report, but not interpret, prefixes). | + + + + + + +

Top

+ +## uuid.proto + + + + +### UUID + +Type representing a 128-bit UUID. + +The bytes of the UUID should be packed in little-endian **byte** (not bit) order. For example, the UUID `f47ac10b-58cc-4372-a567-0e02b2c3d479` should be encoded as `UUID{ low: 0x7243cc580bc17af4, high: 0x79d4c3b2020e67a5 }` + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| low | [uint64](#uint64) | required | | +| high | [uint64](#uint64) | required | | + + + + + + + +## Scalar Value Types + + +| .proto Type | Notes | C++ Type | Java Type | Python Type | +| ----------- | ----- | -------- | --------- | ----------- | +| double | | double | double | float | +| float | | float | float | float | +| int32 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. | int32 | int | int | +| int64 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. | int64 | long | int/long | +| uint32 | Uses variable-length encoding. | uint32 | int | int/long | +| uint64 | Uses variable-length encoding. | uint64 | long | int/long | +| sint32 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. | int32 | int | int | +| sint64 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. | int64 | long | int/long | +| fixed32 | Always four bytes. More efficient than uint32 if values are often greater than 2^28. | uint32 | int | int | +| fixed64 | Always eight bytes. More efficient than uint64 if values are often greater than 2^56. | uint64 | long | int/long | +| sfixed32 | Always four bytes. | int32 | int | int | +| sfixed64 | Always eight bytes. | int64 | long | int/long | +| bool | | bool | boolean | boolean | +| string | A string must always contain UTF-8 encoded or 7-bit ASCII text. | string | String | str/unicode | +| bytes | May contain any arbitrary sequence of bytes. | string | ByteString | str | + +

Top

diff --git a/clouddriver-cloudfoundry/src/main/proto/envelope.proto b/clouddriver-cloudfoundry/src/main/proto/envelope.proto new file mode 100644 index 00000000000..024ee387ea1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/proto/envelope.proto @@ -0,0 +1,47 @@ +package events; +option java_package = "org.cloudfoundry.dropsonde.events"; +option java_outer_classname = "EventFactory"; + +import "http.proto"; +import "log.proto"; +import "metric.proto"; +import "error.proto"; + +/// Envelope wraps an Event and adds metadata. +message Envelope { + /// Type of the wrapped event. + enum EventType { + // Removed Heartbeat at position 1 + // Removed HttpStart at position 2 + // Removed HttpStop at position 3 + HttpStartStop = 4; + LogMessage = 5; + ValueMetric = 6; + CounterEvent = 7; + Error = 8; + ContainerMetric = 9; + } + + required string origin = 1; /// Unique description of the origin of this event. + required EventType eventType = 2; /// Type of wrapped event. Only the optional field corresponding to the value of eventType should be set. + + optional int64 timestamp = 6; /// UNIX timestamp (in nanoseconds) event was wrapped in this Envelope. + + optional string deployment = 13; /// Deployment name (used to uniquely identify source). + optional string job = 14; /// Job name (used to uniquely identify source). + optional string index = 15; /// Index of job (used to uniquely identify source). + optional string ip = 16; /// IP address (used to uniquely identify source). + + map tags = 17; /// key/value tags to include additional identifying information. + + // Removed Heartbeat at position 3 + // Removed HttpStart at position 4 + // Removed HttpStop at position 5 + optional HttpStartStop httpStartStop = 7; + optional LogMessage logMessage = 8; + optional ValueMetric valueMetric = 9; + optional CounterEvent counterEvent = 10; + optional Error error = 11; + optional ContainerMetric containerMetric = 12; +} + diff --git a/clouddriver-cloudfoundry/src/main/proto/error.proto b/clouddriver-cloudfoundry/src/main/proto/error.proto new file mode 100644 index 00000000000..9ba2baf5964 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/proto/error.proto @@ -0,0 +1,11 @@ +package events; + +option java_package = "org.cloudfoundry.dropsonde.events"; +option java_outer_classname = "ErrorFactory"; + +/// An Error event represents an error in the originating process. +message Error { + required string source = 1; /// Source of the error. This may or may not be the same as the Origin in the envelope. + required int32 code = 2; /// Numeric error code. This is provided for programmatic responses to the error. + required string message = 3; /// Error description (preferably human-readable). +} diff --git a/clouddriver-cloudfoundry/src/main/proto/http.proto b/clouddriver-cloudfoundry/src/main/proto/http.proto new file mode 100644 index 00000000000..2e9c9b4c020 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/proto/http.proto @@ -0,0 +1,84 @@ +package events; + +option java_package = "org.cloudfoundry.dropsonde.events"; +option java_outer_classname = "HttpFactory"; + +import "uuid.proto"; + +/// Type of peer handling request. +enum PeerType { + Client = 1; /// Request is made by this process. + Server = 2; /// Request is received by this process. +} + +/// HTTP method. +enum Method { + GET = 1; + POST = 2; + PUT = 3; + DELETE = 4; + HEAD = 5; + + ACL = 6; + BASELINE_CONTROL = 7; + BIND = 8; + CHECKIN = 9; + CHECKOUT = 10; + CONNECT = 11; + COPY = 12; + DEBUG = 13; + LABEL = 14; + LINK = 15; + LOCK = 16; + MERGE = 17; + MKACTIVITY = 18; + MKCALENDAR = 19; + MKCOL = 20; + MKREDIRECTREF = 21; + MKWORKSPACE = 22; + MOVE = 23; + OPTIONS = 24; + ORDERPATCH = 25; + PATCH = 26; + PRI = 27; + PROPFIND = 28; + PROPPATCH = 29; + REBIND = 30; + REPORT = 31; + SEARCH = 32; + SHOWMETHOD = 33; + SPACEJUMP = 34; + TEXTSEARCH = 35; + TRACE = 36; + TRACK = 37; + UNBIND = 38; + UNCHECKOUT = 39; + UNLINK = 40; + UNLOCK = 41; + UPDATE = 42; + UPDATEREDIRECTREF = 43; + VERSION_CONTROL = 44; +} + +/// An HttpStartStop event represents the whole lifecycle of an HTTP request. +message HttpStartStop { + required int64 startTimestamp = 1; /// UNIX timestamp (in nanoseconds) when the request was sent (by a client) or received (by a server). + required int64 stopTimestamp = 2; /// UNIX timestamp (in nanoseconds) when the request was received. + + required UUID requestId = 3; /// ID for tracking lifecycle of request. + required PeerType peerType = 4; /// Role of the emitting process in the request cycle. + required Method method = 5; /// Method of the request. + required string uri = 6; /// Destination of the request. + required string remoteAddress = 7; /// Remote address of the request. (For a server, this should be the origin of the request.) + required string userAgent = 8; /// Contents of the UserAgent header on the request. + + required int32 statusCode = 9; /// Status code returned with the response to the request. + required int64 contentLength = 10; /// Length of response (bytes). + + /// 11 used to be ParentRequestID which has been deprecated. + + optional UUID applicationId = 12; /// If this request was made in relation to an appliciation, this field should track that application's ID. + optional int32 instanceIndex = 13; /// Index of the application instance. + optional string instanceId = 14; /// ID of the application instance. + repeated string forwarded = 15; /// This contains http forwarded-for [x-forwarded-for] header from the request. +} diff --git a/clouddriver-cloudfoundry/src/main/proto/log.proto b/clouddriver-cloudfoundry/src/main/proto/log.proto new file mode 100644 index 00000000000..c012f87e1ef --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/proto/log.proto @@ -0,0 +1,21 @@ +package events; + +option java_package = "org.cloudfoundry.dropsonde.events"; +option java_outer_classname = "LogFactory"; + +/// A LogMessage contains a "log line" and associated metadata. +message LogMessage { + + /// MessageType stores the destination of the message (corresponding to STDOUT or STDERR). + enum MessageType { + OUT = 1; + ERR = 2; + } + + required bytes message = 1; /// Bytes of the log message. (Note that it is not required to be a single line.) + required MessageType message_type = 2; /// Type of the message (OUT or ERR). + required int64 timestamp = 3; /// UNIX timestamp (in nanoseconds) when the log was written. + optional string app_id = 4; /// Application that emitted the message (or to which the application is related). + optional string source_type = 5; /// Source of the message. For Cloud Foundry, this can be "APP", "RTR", "DEA", "STG", etc. + optional string source_instance = 6; /// Instance that emitted the message. +} diff --git a/clouddriver-cloudfoundry/src/main/proto/metric.proto b/clouddriver-cloudfoundry/src/main/proto/metric.proto new file mode 100644 index 00000000000..006344d53b2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/proto/metric.proto @@ -0,0 +1,32 @@ +package events; + +option java_package = "org.cloudfoundry.dropsonde.events"; +option java_outer_classname = "MetricFactory"; + +import "uuid.proto"; + +/// A ValueMetric indicates the value of a metric at an instant in time. +message ValueMetric { + required string name = 1; /// Name of the metric. Must be consistent for downstream consumers to associate events semantically. + required double value = 2; /// Value at the time of event emission. + required string unit = 3; /// Unit of the metric. Please see http://metrics20.org/spec/#units for ideas; SI units/prefixes are recommended where applicable. Should be consistent for the life of the metric (consumers are expected to report, but not interpret, prefixes). +} + +/// A CounterEvent represents the increment of a counter. It contains only the change in the value; it is the responsibility of downstream consumers to maintain the value of the counter. +message CounterEvent { + required string name = 1; /// Name of the counter. Must be consistent for downstream consumers to associate events semantically. + required uint64 delta = 2; /// Amount by which to increment the counter. + optional uint64 total = 3; /// Total value of the counter. This will be overridden by Metron, which internally tracks the total of each named Counter it receives. +} + +/// A ContainerMetric records resource usage of an app in a container. + message ContainerMetric { + required string applicationId = 1; /// ID of the contained application. + required int32 instanceIndex = 2; /// Instance index of the contained application. (This, with applicationId, should uniquely identify a container.) + + required double cpuPercentage = 3; /// CPU based on number of cores. + required uint64 memoryBytes = 4; /// Bytes of memory used. + required uint64 diskBytes = 5; /// Bytes of disk used. + optional uint64 memoryBytesQuota = 6; /// Maximum bytes of memory allocated to container. + optional uint64 diskBytesQuota = 7; /// Maximum bytes of disk allocated to container. + } diff --git a/clouddriver-cloudfoundry/src/main/proto/uuid.proto b/clouddriver-cloudfoundry/src/main/proto/uuid.proto new file mode 100644 index 00000000000..44c1c5adb0a --- /dev/null +++ b/clouddriver-cloudfoundry/src/main/proto/uuid.proto @@ -0,0 +1,12 @@ +package events; + +option java_package = "org.cloudfoundry.dropsonde.events"; +option java_outer_classname = "UuidFactory"; + +/// Type representing a 128-bit UUID. +// +// The bytes of the UUID should be packed in little-endian **byte** (not bit) order. For example, the UUID `f47ac10b-58cc-4372-a567-0e02b2c3d479` should be encoded as `UUID{ low: 0x7243cc580bc17af4, high: 0x79d4c3b2020e67a5 }` +message UUID { + required uint64 low = 1; + required uint64 high = 2; +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/artifacts/ArtifactCredentialsFromString.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/artifacts/ArtifactCredentialsFromString.java new file mode 100644 index 00000000000..50fc1c7dcbb --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/artifacts/ArtifactCredentialsFromString.java @@ -0,0 +1,63 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.artifacts; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.List; + +@NonnullByDefault +public class ArtifactCredentialsFromString implements ArtifactCredentials { + public static final String ARTIFACT_TYPE = "artifacts/string"; + + private final String name; + private final ImmutableList types; + private final String downloadContent; + + public ArtifactCredentialsFromString(String name, List types, String downloadContent) { + this.name = name; + this.types = ImmutableList.copyOf(types); + this.downloadContent = downloadContent; + } + + @Override + public InputStream download(Artifact artifact) { + return new ByteArrayInputStream(downloadContent.getBytes(StandardCharsets.UTF_8)); + } + + public String getName() { + return name; + } + + @Override + public String getType() { + return ARTIFACT_TYPE; + } + + public ImmutableList getTypes() { + return types; + } + + public String getDownloadContent() { + return downloadContent; + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/CacheRepositoryTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/CacheRepositoryTest.java new file mode 100644 index 00000000000..c5fb3cf966c --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/cache/CacheRepositoryTest.java @@ -0,0 +1,241 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.cache; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository.Detail.*; +import static java.util.Collections.*; +import static java.util.stream.Collectors.toList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.cats.provider.DefaultProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Applications; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Routes; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent.CloudFoundryServerGroupCachingAgent; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import java.util.List; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class CacheRepositoryTest { + private final ProviderCache cache = new DefaultProviderCache(new InMemoryCache()); + private final CacheRepository repo = new CacheRepository(cache); + + @BeforeEach + void before() { + CloudFoundryInstance instance = + CloudFoundryInstance.builder() + .appGuid("appguid") + .key("abc123") + .healthState(HealthState.Up) + .launchTime(1L) + .zone("us-east-1") + .build(); + + CloudFoundryServerGroup serverGroup = + CloudFoundryServerGroup.builder() + .name("demo-dev-v001") + .account("devaccount") + .createdTime(1L) + .space(CloudFoundrySpace.fromRegion("myorg > dev")) + .instances(singleton(instance)) + .droplet( + CloudFoundryDroplet.builder() + .id("dropletid") + .name("dropletname") + .buildpacks( + singletonList( + CloudFoundryBuildpack.builder().buildpackName("java").build())) + .sourcePackage(CloudFoundryPackage.builder().checksum("check").build()) + .build()) + .build(); + + CloudFoundryServerGroup serverGroupWithoutInstances = + CloudFoundryServerGroup.builder() + .name("demo-staging-v001") + .account("devaccount") + .createdTime(1L) + .space(CloudFoundrySpace.fromRegion("myorg > staging")) + .droplet( + CloudFoundryDroplet.builder() + .id("dropletid") + .name("dropletname") + .buildpacks( + singletonList( + CloudFoundryBuildpack.builder().buildpackName("java").build())) + .sourcePackage(CloudFoundryPackage.builder().checksum("check").build()) + .build()) + .build(); + + CloudFoundryCluster cluster = + CloudFoundryCluster.builder() + .accountName("devaccount") + .name("demo-dev") + .serverGroups(singleton(serverGroup)) + .build(); + + CloudFoundryCluster clusterWithoutInstances = + CloudFoundryCluster.builder() + .accountName("devaccount") + .name("demo-staging") + .serverGroups(singleton(serverGroupWithoutInstances)) + .build(); + + CloudFoundryApplication app = + CloudFoundryApplication.builder().name("demo").clusters(singleton(cluster)).build(); + + CloudFoundryApplication appWithoutInstances = + CloudFoundryApplication.builder() + .name("demo-without-instances") + .clusters(singleton(clusterWithoutInstances)) + .build(); + + CloudFoundryClient client = mock(CloudFoundryClient.class); + Applications apps = mock(Applications.class); + Routes routes = mock(Routes.class); + ProviderCache providerCache = mock(ProviderCache.class); + CloudFoundryCredentials credentials = mock(CloudFoundryCredentials.class); + + when(client.getApplications()).thenReturn(apps); + when(client.getRoutes()).thenReturn(routes); + when(apps.all(emptyList())).thenReturn(List.of(app, appWithoutInstances)); + when(routes.all(emptyList())).thenReturn(emptyList()); + when(providerCache.filterIdentifiers(any(), any())).thenReturn(emptyList()); + when(providerCache.getAll(any(), anyCollection())).thenReturn(emptyList()); + when(credentials.getName()).thenReturn("devaccount"); + when(credentials.getClient()).thenReturn(client); + + CloudFoundryServerGroupCachingAgent agent = + new CloudFoundryServerGroupCachingAgent(credentials, mock(Registry.class)); + + CacheResult result = agent.loadData(providerCache); + List authoritativeTypes = + agent.getProvidedDataTypes().stream().map(AgentDataType::getTypeName).collect(toList()); + cache.putCacheResult(agent.getAgentType(), authoritativeTypes, result); + } + + private CloudFoundryCredentials createCredentials(String name) { + return new CloudFoundryCredentials( + name, + null, + null, + "api." + name, + "user-" + name, + "pwd-" + name, + null, + false, + false, + null, + repo, + null, + ForkJoinPool.commonPool(), + emptyMap(), + new OkHttpClient(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()); + } + + @Test + void findApplication() { + assertThat(repo.findApplicationByKey(Keys.getApplicationKey("demo"), FULL)) + .hasValueSatisfying( + app -> { + assertThat(app.getName()).isEqualTo("demo"); + assertThat(app.getClusters()) + .hasOnlyOneElementSatisfying( + cluster -> { + assertThat(cluster.getName()).isEqualTo("demo-dev"); + + // rehydrated clusters are shallow, serve only the purpose of providing + // cluster names + assertThat(cluster.getServerGroups()).isEmpty(); + }); + }); + } + + @Test + void findCluster() { + String clusterKey = Keys.getClusterKey("devaccount", "demo", "demo-dev"); + + assertThat(repo.findClusterByKey(clusterKey, FULL)) + .hasValueSatisfying( + cluster -> { + assertThat(cluster.getName()).isEqualTo("demo-dev"); + assertThat(cluster.getServerGroups()) + .hasOnlyOneElementSatisfying( + serverGroup -> assertThat(serverGroup.getInstances()).hasSize(1)); + }); + + assertThat(repo.findClusterByKey(clusterKey, NAMES_ONLY)) + .hasValueSatisfying( + cluster -> + assertThat(cluster.getServerGroups()) + .hasOnlyOneElementSatisfying( + serverGroup -> { + assertThat(serverGroup.getLoadBalancers()).isEmpty(); + assertThat(serverGroup.getInstances()).isNotEmpty(); + })); + + assertThat(repo.findClusterByKey(clusterKey, NONE)) + .hasValueSatisfying(cluster -> assertThat(cluster.getServerGroups()).isEmpty()); + } + + @Test + void findServerGroup() { + assertThat( + repo.findServerGroupByKey( + Keys.getServerGroupKey("devaccount", "demo-dev-v001", "myorg > dev"), FULL)) + .hasValueSatisfying( + serverGroup -> { + assertThat(serverGroup.getName()).isEqualTo("demo-dev-v001"); + assertThat(serverGroup.getInstances()) + .hasOnlyOneElementSatisfying( + inst -> { + assertThat(inst.getHealthState()).isEqualTo(HealthState.Up); + assertThat(inst.getZone()).isEqualTo("us-east-1"); + assertThat(inst.getLaunchTime()).isEqualTo(1L); + }); + }); + } + + @Test + void findServerGroupWithoutInstances() { + assertThat( + repo.findServerGroupByKey( + Keys.getServerGroupKey("devaccount", "demo-staging-v001", "myorg > staging"), FULL)) + .hasValueSatisfying( + serverGroup -> { + assertThat(serverGroup.getName()).isEqualTo("demo-staging-v001"); + assertThat(serverGroup.getInstances()).isNotNull(); + assertThat(serverGroup.getInstances()).isEmpty(); + }); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ApplicationsTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ApplicationsTest.java new file mode 100644 index 00000000000..ce882d63b76 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ApplicationsTest.java @@ -0,0 +1,593 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup.State.STARTED; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.*; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ApplicationService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ProcessesService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Application; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Package; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Process; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.*; +import io.vavr.collection.HashMap; +import java.time.ZonedDateTime; +import java.util.*; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import retrofit2.Response; +import retrofit2.mock.Calls; + +class ApplicationsTest { + private final ApplicationService applicationService = mock(ApplicationService.class); + private final ProcessesService processesService = mock(ProcessesService.class); + private final Processes processes = mock(Processes.class); + private final Spaces spaces = mock(Spaces.class); + private final int resultsPerPage = 500; + private final Applications apps = + new Applications( + "pws", + "some-apps-man-uri", + "some-metrics-uri", + applicationService, + spaces, + processes, + resultsPerPage, + true, + ForkJoinPool.commonPool(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()); + private final String spaceId = "space-guid"; + private final CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id(spaceId) + .name("space-name") + .organization(CloudFoundryOrganization.builder().id("org-id").name("org-name").build()) + .build(); + + @Test + void errorHandling() { + CloudFoundryClient client = + new HttpCloudFoundryClient( + "pws", + "some.api.uri.example.com", + "some-metrics-uri", + "api.run.pivotal.io", + "baduser", + "badpassword", + false, + false, + false, + resultsPerPage, + ForkJoinPool.commonPool(), + new OkHttpClient().newBuilder(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()); + + assertThatThrownBy(() -> client.getApplications().all(emptyList())) + .isInstanceOf(CloudFoundryApiException.class); + } + + @Test + void findByIdIfInputsAreValid() { + String serverGroupId = "some-app-guid"; + String serverGroupName = "some-app-name"; + Application application = + new Application() + .setCreatedAt(ZonedDateTime.now()) + .setUpdatedAt(ZonedDateTime.now()) + .setGuid(serverGroupId) + .setName(serverGroupName) + .setState("STARTED") + .setLinks( + HashMap.of("space", new Link().setHref("http://capi.io/space/space-guid")) + .toJavaMap()); + + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance + .setPlan("service-plan") + .setServicePlanGuid("service-plan-guid") + .setTags(new HashSet<>(Arrays.asList("tag1", "tag2"))) + .setName("service-instance"); + + ApplicationEnv.SystemEnv systemEnv = + new ApplicationEnv.SystemEnv() + .setVcapServices( + HashMap.of("service-name-1", Collections.singletonList(serviceInstance)) + .toJavaMap()); + ApplicationEnv applicationEnv = new ApplicationEnv().setSystemEnvJson(systemEnv); + + Process process = + new Process().setDiskInMb(1024).setGuid("process-guid").setInstances(1).setMemoryInMb(1024); + + Package applicationPackage = + new Package() + .setData( + new PackageData() + .setChecksum( + new PackageChecksum() + .setType("package-checksum-type") + .setValue("package-check-sum-value"))) + .setLinks( + HashMap.of("download", new Link().setHref("http://capi.io/download/space-guid")) + .toJavaMap()); + Pagination packagePagination = + new Pagination() + .setPagination(new Pagination.Details().setTotalPages(1)) + .setResources(Collections.singletonList(applicationPackage)); + + Droplet droplet = + new Droplet() + .setGuid("droplet-guid") + .setStack("droplet-stack") + .setBuildpacks( + Collections.singletonList(new Buildpack().setBuildpackName("build-pack-name"))); + + CloudFoundryOrganization cloudFoundryOrganization = + CloudFoundryOrganization.builder().id("org-id").name("org-name").build(); + CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id("space-id") + .name("space-name") + .organization(cloudFoundryOrganization) + .build(); + + when(applicationService.findById(anyString())).thenReturn(Calls.response(application)); + when(applicationService.findApplicationEnvById(anyString())) + .thenReturn(Calls.response(applicationEnv)); + when(spaces.findById(any())).thenReturn(cloudFoundrySpace); + when(processes.findProcessById(any())).thenReturn(Optional.of(process)); + when(applicationService.instances(anyString())) + .thenReturn( + Calls.response( + HashMap.of( + "0", + new InstanceStatus() + .setState(InstanceStatus.State.RUNNING) + .setUptime(2405L)) + .toJavaMap())); + when(applicationService.findPackagesByAppId(anyString())) + .thenReturn(Calls.response(packagePagination)); + when(applicationService.findDropletByApplicationGuid(anyString())) + .thenReturn(Calls.response(droplet)); + + CloudFoundryServerGroup cloudFoundryServerGroup = apps.findById(serverGroupId); + assertThat(cloudFoundryServerGroup).isNotNull(); + assertThat(cloudFoundryServerGroup.getId()).isEqualTo(serverGroupId); + assertThat(cloudFoundryServerGroup.getName()).isEqualTo(serverGroupName); + assertThat(cloudFoundryServerGroup.getAppsManagerUri()) + .isEqualTo( + "some-apps-man-uri/organizations/org-id/spaces/space-id/applications/some-app-guid"); + assertThat(cloudFoundryServerGroup.getMetricsUri()) + .isEqualTo("some-metrics-uri/apps/some-app-guid"); + assertThat(cloudFoundryServerGroup.getServiceInstances().size()).isEqualTo(1); + assertThat(cloudFoundryServerGroup.getServiceInstances().get(0).getTags()) + .containsExactly("tag1", "tag2"); + + verify(applicationService).findById(serverGroupId); + verify(applicationService).findApplicationEnvById(serverGroupId); + verify(applicationService).instances(serverGroupId); + verify(applicationService).findPackagesByAppId(serverGroupId); + verify(applicationService).findDropletByApplicationGuid(serverGroupId); + } + + @Test + void nonSpinnakerEnvironmentVarsAreRemoved() { + String serverGroupId = "some-app-guid"; + String serverGroupName = "some-app-name"; + Application application = + new Application() + .setCreatedAt(ZonedDateTime.now()) + .setUpdatedAt(ZonedDateTime.now()) + .setGuid(serverGroupId) + .setName(serverGroupName) + .setState("STARTED") + .setLinks( + HashMap.of("space", new Link().setHref("http://capi.io/space/space-guid")) + .toJavaMap()); + + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance + .setPlan("service-plan") + .setServicePlanGuid("service-plan-guid") + .setTags(new HashSet<>(Arrays.asList("tag1", "tag2"))) + .setName("service-instance"); + + ApplicationEnv.SystemEnv systemEnv = + new ApplicationEnv.SystemEnv() + .setVcapServices( + HashMap.of("service-name-1", Collections.singletonList(serviceInstance)) + .toJavaMap()); + Map environment = + Map.of( + ServerGroupMetaDataEnvVar.PipelineId.envVarName, + "ABCDEF", + "super-secret-key", + "super-secret-value"); + ApplicationEnv applicationEnv = + new ApplicationEnv().setSystemEnvJson(systemEnv).setEnvironmentJson(environment); + + Package applicationPackage = + new Package() + .setData( + new PackageData() + .setChecksum( + new PackageChecksum() + .setType("package-checksum-type") + .setValue("package-check-sum-value"))) + .setLinks( + HashMap.of("download", new Link().setHref("http://capi.io/download/space-guid")) + .toJavaMap()); + Pagination packagePagination = + new Pagination() + .setPagination(new Pagination.Details().setTotalPages(1)) + .setResources(Collections.singletonList(applicationPackage)); + + Droplet droplet = + new Droplet() + .setGuid("droplet-guid") + .setStack("droplet-stack") + .setBuildpacks( + Collections.singletonList(new Buildpack().setBuildpackName("build-pack-name"))); + + CloudFoundryOrganization cloudFoundryOrganization = + CloudFoundryOrganization.builder().id("org-id").name("org-name").build(); + CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id("space-id") + .name("space-name") + .organization(cloudFoundryOrganization) + .build(); + + when(applicationService.findById(anyString())).thenReturn(Calls.response(application)); + when(applicationService.findApplicationEnvById(anyString())) + .thenReturn(Calls.response(applicationEnv)); + when(spaces.findById(any())).thenReturn(cloudFoundrySpace); + when(processes.findProcessById(any())).thenReturn(Optional.empty()); + when(applicationService.instances(anyString())) + .thenReturn( + Calls.response( + HashMap.of( + "0", + new InstanceStatus() + .setState(InstanceStatus.State.RUNNING) + .setUptime(2405L)) + .toJavaMap())); + when(applicationService.findPackagesByAppId(anyString())) + .thenReturn(Calls.response(packagePagination)); + when(applicationService.findDropletByApplicationGuid(anyString())) + .thenReturn(Calls.response(droplet)); + + CloudFoundryServerGroup cloudFoundryServerGroup = apps.findById(serverGroupId); + assertThat(cloudFoundryServerGroup).isNotNull(); + assertThat(cloudFoundryServerGroup.getEnv()).isNotNull(); + assertThat(cloudFoundryServerGroup.getEnv()) + .containsKey(ServerGroupMetaDataEnvVar.PipelineId.envVarName); + assertThat(cloudFoundryServerGroup.getEnv()).doesNotContainKey("super-secret-key"); + } + + @Test + void allDoesNotSkipVersionedAppWhenOnlySpinnakerManagedTrue() { + String guid = "guid"; + Application application = + new Application() + .setCreatedAt(ZonedDateTime.now()) + .setUpdatedAt(ZonedDateTime.now()) + .setGuid(guid) + .setName("my-app-v000") + .setState("STARTED") + .setLinks( + HashMap.of("space", new Link().setHref("http://capi.io/space/space-guid")) + .toJavaMap()); + + Pagination applicationPagination = + new Pagination() + .setPagination(new Pagination.Details().setTotalPages(1)) + .setResources(Collections.singletonList(application)); + + when(applicationService.all(any(), any(), any(), any())) + .thenReturn(Calls.response(Response.success(applicationPagination))); + when(applicationService.findById(anyString())).thenReturn(Calls.response(application)); + mockMap(cloudFoundrySpace, "droplet-guid"); + + List result = apps.all(List.of(spaceId)); + assertThat(result.size()).isEqualTo(1); + + verify(applicationService).all(null, resultsPerPage, null, spaceId); + } + + @Test + void allSkipsUnversionedAppWhenOnlySpinnakerManagedTrue() { + String guid = "guid"; + Application application = + new Application() + .setCreatedAt(ZonedDateTime.now()) + .setUpdatedAt(ZonedDateTime.now()) + .setGuid(guid) + .setName("my-app") + .setState("STARTED") + .setLinks( + HashMap.of("space", new Link().setHref("http://capi.io/space/space-guid")) + .toJavaMap()); + + Pagination applicationPagination = + new Pagination() + .setPagination(new Pagination.Details().setTotalPages(1)) + .setResources(Collections.singletonList(application)); + + when(applicationService.all(any(), any(), any(), any())) + .thenReturn(Calls.response(Response.success(applicationPagination))); + when(applicationService.findById(anyString())).thenReturn(Calls.response(application)); + + List result = apps.all(List.of(spaceId)); + assertThat(result.size()).isEqualTo(0); + + verify(applicationService).all(null, resultsPerPage, null, spaceId); + + // these methods should never be called if the app is skipped + verify(applicationService, never()).findApplicationEnvById(guid); + verify(spaces, never()).findById(guid); + verify(processesService, never()).findProcessById(guid); + verify(applicationService, never()).instances(guid); + verify(applicationService, never()).findPackagesByAppId(guid); + verify(applicationService, never()).findDropletByApplicationGuid(guid); + } + + @Test + void getAppStateWhenProcessStateNotFound() { + when(processes.getProcessState(anyString())).thenReturn(Optional.empty()); + Application app = new Application(); + app.setState("STARTED"); + when(applicationService.findById("some-app-guid")) + .thenReturn(Calls.response(Response.success(app))); + ProcessStats.State result = apps.getAppState("some-app-guid"); + assertThat(result).isEqualTo(ProcessStats.State.RUNNING); + } + + @Test + void getProcessStateWhenStatsIsEmptyListAndAppIsStarted() { + Application application = + new Application() + .setCreatedAt(ZonedDateTime.now()) + .setGuid("some-app-guid") + .setName("some-app") + .setState("STARTED") + .setLinks( + HashMap.of("space", new Link().setHref("http://capi.io/space/space-guid")) + .toJavaMap()); + ProcessResources processResources = + new ProcessResources().setResources(Collections.emptyList()); + when(processesService.findProcessStatsById(anyString())) + .thenReturn(Calls.response(Response.success(processResources))); + when(applicationService.findById(anyString())) + .thenReturn(Calls.response(Response.success(application))); + ProcessStats.State result = apps.getAppState("some-app-guid"); + assertThat(result).isEqualTo(ProcessStats.State.RUNNING); + verify(applicationService).findById("some-app-guid"); + } + + @Test + void getProcessStateWhenStatsIsEmptyListAndAppIsStopped() { + Application application = + new Application() + .setCreatedAt(ZonedDateTime.now()) + .setGuid("some-app-guid") + .setName("some-app") + .setState("STOPPED") + .setLinks( + HashMap.of("space", new Link().setHref("http://capi.io/space/space-guid")) + .toJavaMap()); + ProcessResources processResources = + new ProcessResources().setResources(Collections.emptyList()); + when(processesService.findProcessStatsById(anyString())) + .thenReturn(Calls.response(Response.success(processResources))); + when(applicationService.findById(anyString())) + .thenReturn(Calls.response(Response.success(application))); + ProcessStats.State result = apps.getAppState("some-app-guid"); + assertThat(result).isEqualTo(ProcessStats.State.DOWN); + verify(applicationService).findById("some-app-guid"); + } + + @ParameterizedTest + @ValueSource(strings = {"myapp-v999", "myapp"}) + void getTakenServerGroups(String existingApp) { + when(applicationService.listAppsFiltered(isNull(), any(), any())) + .thenReturn( + Calls.response(Response.success(Page.singleton(getApplication(existingApp), "123")))); + + List> + taken = apps.getTakenSlots("myapp", "space"); + assertThat(taken).first().extracting(app -> app.getEntity().getName()).isEqualTo(existingApp); + } + + @ParameterizedTest + @ValueSource( + strings = {"myapp-v999", "myapp", "myapp-stack2", "anothername", "myapp-stack-detail"}) + void getTakenServerGroupsWhenNoPriorVersionExists(String similarAppName) { + com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Application application = + getApplication(similarAppName); + + when(applicationService.listAppsFiltered(isNull(), any(), any())) + .thenReturn(Calls.response(Response.success(Page.singleton(application, "123")))); + + List> + taken = apps.getTakenSlots("myapp-stack", "space"); + assertThat(taken).isEmpty(); + } + + @Test + void getLatestServerGroupCapiDoesntCorrectlyOrderResults() { + when(applicationService.listAppsFiltered(isNull(), any(), any())) + .thenReturn( + Calls.response( + Response.success( + Page.asPage( + getApplication("myapp-prod-v046"), + getApplication("myapp-v003"), + getApplication("myapp"))))); + + List> + taken = apps.getTakenSlots("myapp", "space"); + + assertThat(taken).extracting(app -> app.getEntity().getName()).contains("myapp", "myapp-v003"); + } + + @Test + void findServerGroupId() { + String serverGroupName = "server-group"; + String spaceId = "space-guid"; + String expectedServerGroupId = "app-guid"; + Application application = + new Application() + .setCreatedAt(ZonedDateTime.now()) + .setUpdatedAt(ZonedDateTime.now()) + .setGuid(expectedServerGroupId) + .setName("app") + .setState("STARTED") + .setLinks( + HashMap.of("space", new Link().setHref("http://capi.io/space/space-guid")) + .toJavaMap()); + Pagination applicationPagination = + new Pagination() + .setPagination(new Pagination.Details().setTotalPages(1)) + .setResources(Collections.singletonList(application)); + when(applicationService.all(any(), any(), any(), any())) + .thenReturn(Calls.response(Response.success(applicationPagination))); + mockMap(cloudFoundrySpace, "droplet-id"); + + String serverGroupId = apps.findServerGroupId(serverGroupName, spaceId); + + assertThat(serverGroupId).isEqualTo(expectedServerGroupId); + } + + @Test + void findServerGroupByNameAndSpaceId() { + String serverGroupId = "server-group-guid"; + String serverGroupName = "server-group"; + Process process = new Process().setDiskInMb(0).setMemoryInMb(0); + Application application = + new Application() + .setCreatedAt(ZonedDateTime.now()) + .setUpdatedAt(ZonedDateTime.now()) + .setGuid(serverGroupId) + .setName(serverGroupName) + .setState("STARTED") + .setLinks( + HashMap.of("space", new Link().setHref("http://capi.io/space/space-guid")) + .toJavaMap()); + Pagination applicationPagination = + new Pagination() + .setPagination(new Pagination.Details().setTotalPages(1)) + .setResources(Collections.singletonList(application)); + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance + .setPlan("service-plan") + .setServicePlanGuid("service-plan-guid") + .setTags(Collections.emptySet()) + .setName("service-instance"); + String dropletId = "droplet-guid"; + + when(applicationService.all(any(), any(), any(), any())) + .thenReturn(Calls.response(Response.success(applicationPagination))); + when(processes.findProcessById(any())).thenReturn(Optional.of(process)); + mockMap(cloudFoundrySpace, dropletId); + + CloudFoundryDroplet expectedDroplet = CloudFoundryDroplet.builder().id(dropletId).build(); + CloudFoundryServerGroup expectedCloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .account("pws") + .state(STARTED) + .space(cloudFoundrySpace) + .droplet(expectedDroplet) + .id(serverGroupId) + .env(emptyMap()) + .instances(Collections.emptySet()) + .serviceInstances(Collections.emptyList()) + .createdTime(application.getCreatedAt().toInstant().toEpochMilli()) + .updatedTime(application.getUpdatedAt().toInstant().toEpochMilli()) + .memory(0) + .diskQuota(0) + .name(serverGroupName) + .appsManagerUri( + "some-apps-man-uri/organizations/org-id/spaces/space-guid/applications/server-group-guid") + .metricsUri("some-metrics-uri/apps/server-group-guid") + .ciBuild(CloudFoundryBuildInfo.builder().build()) + .appArtifact(ArtifactInfo.builder().build()) + .build(); + + CloudFoundryServerGroup serverGroup = + apps.findServerGroupByNameAndSpaceId(serverGroupName, spaceId); + + assertThat(serverGroup) + .usingRecursiveComparison() + .usingOverriddenEquals() + .isEqualTo(expectedCloudFoundryServerGroup); + // server group should be cached because of call to "findServerGroupId" + verify(applicationService, never()).findById(serverGroupId); + } + + private com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Application getApplication( + String applicationName) { + return new com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Application() + .setName(applicationName) + .setSpaceGuid("space-guid"); + } + + private void mockMap(CloudFoundrySpace cloudFoundrySpace, String dropletId) { + ApplicationEnv.SystemEnv systemEnv = new ApplicationEnv.SystemEnv().setVcapServices(emptyMap()); + ApplicationEnv applicationEnv = new ApplicationEnv().setSystemEnvJson(systemEnv); + Process process = new Process().setGuid("process-guid").setInstances(1); + Package applicationPacakage = + new Package() + .setData( + new PackageData() + .setChecksum( + new PackageChecksum() + .setType("package-checksum-type") + .setValue("package-check-sum-value"))) + .setLinks( + HashMap.of("download", new Link().setHref("http://capi.io/download/space-guid")) + .toJavaMap()); + Pagination packagePagination = + new Pagination() + .setPagination(new Pagination.Details().setTotalPages(1)) + .setResources(Collections.singletonList(applicationPacakage)); + Droplet droplet = new Droplet().setGuid(dropletId); + + when(applicationService.findApplicationEnvById(any())) + .thenReturn(Calls.response(Response.success(applicationEnv))); + when(spaces.findById(any())).thenReturn(cloudFoundrySpace); + when(processesService.findProcessById(any())) + .thenReturn(Calls.response(Response.success(process))); + when(applicationService.instances(any())) + .thenReturn(Calls.response(Response.success(emptyMap()))); + when(applicationService.findPackagesByAppId(any())) + .thenReturn(Calls.response(Response.success(packagePagination))); + when(applicationService.findDropletByApplicationGuid(any())) + .thenReturn(Calls.response(Response.success(droplet))); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClientUtilsTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClientUtilsTest.java new file mode 100644 index 00000000000..b8089a265d0 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/CloudFoundryClientUtilsTest.java @@ -0,0 +1,113 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ApplicationService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.DomainService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Domain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Page; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Application; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Pagination; +import java.util.Collections; +import java.util.List; +import org.junit.jupiter.api.Test; +import retrofit2.Response; +import retrofit2.mock.Calls; + +class CloudFoundryClientUtilsTest { + + @Test + void collectPagesIteratesOverOnePage() { + ApplicationService applicationService = mock(ApplicationService.class); + Application applicationOne = new Application().setName("app-name-one"); + List pageOneResources = Collections.singletonList(applicationOne); + Pagination pageOne = new Pagination<>(); + pageOne.setPagination(new Pagination.Details().setTotalPages(1)); + pageOne.setResources(pageOneResources); + + when(applicationService.all(null, null, null, null)) + .thenReturn(Calls.response(Response.success(pageOne))); + + List results = + CloudFoundryClientUtils.collectPages( + "applications", page -> applicationService.all(page, null, null, null)); + + assertThat(results).containsExactly(applicationOne); + } + + @Test + void collectPagesIteratesOverMultiplePages() { + ApplicationService applicationService = mock(ApplicationService.class); + Application applicationOne = new Application().setName("app-name-one"); + List pageOneResources = Collections.singletonList(applicationOne); + Pagination pageOne = new Pagination<>(); + pageOne.setPagination(new Pagination.Details().setTotalPages(2)); + pageOne.setResources(pageOneResources); + Application applicationTwo = new Application().setName("app-name-two"); + List pageTwoResources = Collections.singletonList(applicationTwo); + Pagination pageTwo = new Pagination<>(); + pageTwo.setPagination(new Pagination.Details().setTotalPages(2)); + pageTwo.setResources(pageTwoResources); + + when(applicationService.all(null, null, null, null)) + .thenReturn(Calls.response(Response.success(pageOne))); + when(applicationService.all(2, null, null, null)) + .thenReturn(Calls.response(Response.success(pageTwo))); + + List results = + CloudFoundryClientUtils.collectPages( + "applications", page -> applicationService.all(page, null, null, null)); + + assertThat(results).containsExactly(applicationOne, applicationTwo); + } + + @Test + void collectPageResourcesIteratesOverOnePage() { + DomainService domainService = mock(DomainService.class); + Domain domainOne = new Domain().setName("domain-name-one"); + Page pageOne = Page.singleton(domainOne, "domain-one-guid").setTotalPages(1).setTotalResults(1); + + when(domainService.allShared(null)).thenReturn(Calls.response(Response.success(pageOne))); + + List results = + CloudFoundryClientUtils.collectPageResources("shared domains", domainService::allShared); + + assertThat(results).containsExactly(pageOne.getResources().get(0)); + } + + @Test + void collectPageResourcesIteratesOverMultiplePages() { + DomainService domainService = mock(DomainService.class); + Domain domainOne = new Domain().setName("domain-name-one"); + Page pageOne = Page.singleton(domainOne, "domain-one-guid").setTotalPages(2).setTotalResults(2); + Domain domainTwo = new Domain().setName("domain-name-two"); + Page pageTwo = Page.singleton(domainTwo, "domain-two-guid").setTotalPages(2).setTotalResults(2); + + when(domainService.allShared(null)).thenReturn(Calls.response(Response.success(pageOne))); + when(domainService.allShared(2)).thenReturn(Calls.response(Response.success(pageTwo))); + + List results = + CloudFoundryClientUtils.collectPageResources("shared domains", domainService::allShared); + + assertThat(results) + .containsExactly(pageOne.getResources().get(0), pageTwo.getResources().get(0)); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/HttpCloudFoundryClientTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/HttpCloudFoundryClientTest.java new file mode 100644 index 00000000000..775b7574b54 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/HttpCloudFoundryClientTest.java @@ -0,0 +1,266 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.stubbing.Scenario.STARTED; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.matching.UrlPattern; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.CreateServiceBinding; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import ru.lanwen.wiremock.ext.WiremockResolver; + +@ExtendWith({WiremockResolver.class}) +class HttpCloudFoundryClientTest { + @Test + void createRetryInterceptorShouldRetryOnInternalServerErrorsThenTimeOut( + @WiremockResolver.Wiremock WireMockServer server) throws Exception { + stubServer( + server, + 200, + STARTED, + "Will respond 502", + "{\"access_token\":\"token\",\"expires_in\":1000000}"); + stubServer( + server, + 502, + "Will respond 502", + "Will respond 503", + "{\"errors\":[{\"detail\":\"502 error\"}]}"); + stubServer( + server, + 503, + "Will respond 503", + "Will respond 504", + "{\"errors\":[{\"detail\":\"503 error\"}]}"); + stubServer( + server, + 504, + "Will respond 504", + "Will respond 200", + "{\"errors\":[{\"detail\":\"504 error\"}]}"); + stubServer(server, 200, "Will respond 200", "END", "{}"); + + HttpCloudFoundryClient cloudFoundryClient = createDefaultCloudFoundryClient(server); + + CloudFoundryApiException thrown = + assertThrows( + CloudFoundryApiException.class, + () -> cloudFoundryClient.getOrganizations().findByName("randomName"), + "Expected thrown 'Cloud Foundry API returned with error(s): 504 error', but it didn't"); + + // 504 means it was retried after 502 and 503 + assertTrue(thrown.getMessage().contains("Cloud Foundry API returned with error(s): 504 error")); + } + + @Test + void createRetryInterceptorShouldRetryOnTimeoutErrors( + @WiremockResolver.Wiremock WireMockServer server) throws Exception { + stubServer( + server, + 200, + STARTED, + "Will respond 200 delayed", + "{\"access_token\":\"token\",\"expires_in\":1000000}"); + stubServerWithFixedDelay( + server, + 200, + "Will respond 200 delayed", + "Will respond 200 without delay", + // successful but delayed and with diff org name + "{\"pagination\":{\"total_pages\":1},\"resources\":[{\"guid\": \"orgId\", \"name\":\"orgNameDelayed\"}]}", + 10000); + stubServer( + server, + 200, + "Will respond 200 without delay", + "END", + "{\"pagination\":{\"total_pages\":1},\"resources\":[{\"guid\": \"orgId\", \"name\":\"orgName\"}]}"); + + HttpCloudFoundryClient cloudFoundryClient = createDefaultCloudFoundryClient(server); + + Optional cloudFoundryOrganization = + cloudFoundryClient.getOrganizations().findByName("randomName"); + + assertThat(cloudFoundryOrganization.get()) + .extracting(CloudFoundryOrganization::getId, CloudFoundryOrganization::getName) + .containsExactly("orgId", "orgName"); + } + + @Test + void createRetryInterceptorShouldNotRetryOnTimeoutErrorsWhenConfigIsOverridden( + @WiremockResolver.Wiremock WireMockServer server) throws Exception { + stubServer( + server, + 200, + STARTED, + "Will respond 200 delayed", + "{\"access_token\":\"token\",\"expires_in\":1000000}"); + stubServerWithFixedDelay( + server, + 200, + "Will respond 200 delayed", + "Will respond 200 without delay", + // successful but delayed and with diff org name + "{\"pagination\":{\"total_pages\":1},\"resources\":[{\"guid\": \"orgId\", \"name\":\"orgNameDelayed\"}]}", + 10000); + + CloudFoundryConfigurationProperties.ClientConfig clientConfig = + new CloudFoundryConfigurationProperties.ClientConfig(); + clientConfig.setMaxRetries(1); + clientConfig.setConnectionTimeout(1); + clientConfig.setReadTimeout(1); + clientConfig.setWriteTimeout(1); + HttpCloudFoundryClient cloudFoundryClient = + createCloudFoundryClientWithRetryConfig(server, clientConfig); + + CloudFoundryApiException thrown = + assertThrows( + CloudFoundryApiException.class, + () -> cloudFoundryClient.getOrganizations().findByName("randomName"), + "Expected thrown 'Cloud Foundry API returned with error(s): java.net.SocketTimeoutException', but it didn't"); + } + + @Test + void createRetryInterceptorShouldNotRefreshTokenOnBadCredentials( + @WiremockResolver.Wiremock WireMockServer server) throws Exception { + stubServer(server, 401, STARTED, "Bad credentials"); + + HttpCloudFoundryClient cloudFoundryClient = createDefaultCloudFoundryClient(server); + + CloudFoundryApiException thrown = + assertThrows( + CloudFoundryApiException.class, + () -> cloudFoundryClient.getOrganizations().findByName("randomName"), + "Expected thrown 'Cloud Foundry API returned with error(s): Unauthorized', but it didn't"); + + assertTrue(thrown.getMessage().contains("Unauthorized")); + } + + @Test + void createRetryInterceptorShouldReturnOnSecondAttempt( + @WiremockResolver.Wiremock WireMockServer server) throws Exception { + stubServer( + server, + 200, + STARTED, + "Will respond 502", + "{\"access_token\":\"token\",\"expires_in\":1000000}"); + stubServer( + server, + 502, + "Will respond 502", + "Will respond 200", + "{\"errors\":[{\"detail\":\"502 error\"}]}"); + stubServer( + server, + 200, + "Will respond 200", + "END", + "{\"pagination\":{\"total_pages\":1},\"resources\":[{\"guid\": \"orgId\", \"name\":\"orgName\"}]}"); + + HttpCloudFoundryClient cloudFoundryClient = createDefaultCloudFoundryClient(server); + + Optional cloudFoundryOrganization = + cloudFoundryClient.getOrganizations().findByName("randomName"); + + assertThat(cloudFoundryOrganization.get()) + .extracting(CloudFoundryOrganization::getId, CloudFoundryOrganization::getName) + .containsExactly("orgId", "orgName"); + } + + @Test + void shouldReplaceInvalidNameCharacters() { + String invalidBindingName = "test-service-binding~123#test"; + String sanitisedBindingName = "test-service-binding-123-test"; + + CreateServiceBinding binding = + new CreateServiceBinding( + UUID.randomUUID().toString(), UUID.randomUUID().toString(), invalidBindingName); + assertThat(binding.getName()).isEqualTo(sanitisedBindingName); + } + + private void stubServer( + WireMockServer server, int status, String currentState, String nextState) { + stubServer(server, status, currentState, nextState, ""); + } + + private void stubServer( + WireMockServer server, int status, String currentState, String nextState, String body) { + stubServerWithFixedDelay(server, status, currentState, nextState, body, null); + } + + private void stubServerWithFixedDelay( + WireMockServer server, + int status, + String currentState, + String nextState, + String body, + Integer delayInMillis) { + server.stubFor( + any(UrlPattern.ANY) + .inScenario("Retry Scenario") + .whenScenarioStateIs(currentState) + .willReturn( + aResponse() + .withStatus(status) + .withHeader("Content-Type", "application/json") + .withBody(body) + .withFixedDelay(delayInMillis)) + .willSetStateTo(nextState)); + } + + @NotNull + private HttpCloudFoundryClient createDefaultCloudFoundryClient(WireMockServer server) { + CloudFoundryConfigurationProperties.ClientConfig clientConfig = + new CloudFoundryConfigurationProperties.ClientConfig(); + return createCloudFoundryClientWithRetryConfig(server, clientConfig); + } + + @NotNull + private HttpCloudFoundryClient createCloudFoundryClientWithRetryConfig( + WireMockServer server, CloudFoundryConfigurationProperties.ClientConfig clientConfig) { + return new HttpCloudFoundryClient( + "account", + "appsManUri", + "metricsUri", + "localhost:" + server.port() + "/", + "user", + "password", + false, + true, + false, + 500, + ForkJoinPool.commonPool(), + new OkHttpClient.Builder(), + clientConfig, + new CloudFoundryConfigurationProperties.LocalCacheConfig()); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/LogsTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/LogsTest.java new file mode 100644 index 00000000000..8cf94ceb48b --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/LogsTest.java @@ -0,0 +1,131 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.singletonList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.protobuf.ByteString; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.DopplerService; +import java.util.Arrays; +import org.cloudfoundry.dropsonde.events.EventFactory.Envelope; +import org.cloudfoundry.dropsonde.events.EventFactory.Envelope.EventType; +import org.cloudfoundry.dropsonde.events.LogFactory.LogMessage; +import org.cloudfoundry.dropsonde.events.LogFactory.LogMessage.MessageType; +import org.junit.jupiter.api.Test; +import retrofit2.Response; +import retrofit2.mock.Calls; + +class LogsTest { + private Envelope logMessage( + long timestamp, String message, String sourceType, int sourceInstance) { + return envelope(EventType.LogMessage, timestamp, message, sourceType, sourceInstance); + } + + private Envelope envelope( + EventType eventType, long timestamp, String message, String sourceType, int sourceInstance) { + return Envelope.newBuilder(Envelope.getDefaultInstance()) + .setEventType(eventType) + .setLogMessage( + LogMessage.newBuilder() + .setMessageType(MessageType.OUT) + .setTimestamp(timestamp) + .setMessage(ByteString.copyFrom(message, UTF_8)) + .setSourceType(sourceType) + .setSourceInstance(String.valueOf(sourceInstance)) + .build()) + .setOrigin("") + .build(); + } + + private DopplerService fakeDopplerService(String forAppGuid, Envelope... envelopes) { + DopplerService dopplerService = mock(DopplerService.class); + when(dopplerService.recentLogs(eq(forAppGuid))) + .thenReturn(Calls.response(Response.success(Arrays.asList(envelopes)))); + return dopplerService; + } + + @Test + void recentTaskLogs_filterInLogMessagesOnly() { + Logs logs = new Logs(fakeDopplerService("12345", envelope(EventType.Error, 0, "", "", 0))); + String result = logs.recentTaskLogs("12345", "task1"); + assertThat(result).isEmpty(); + } + + @Test + void recentTaskLogs_filterInSpecifiedTaskLogMessagesOnly() { + Logs logs = + new Logs( + fakeDopplerService( + "12345", + logMessage(0, "msg1", "APP/TASK/task1", 0), + logMessage(0, "msg2", "APP/TASK/task2", 0))); + + String result = logs.recentTaskLogs("12345", "task1"); + assertThat(result).isEqualTo("msg1"); + } + + @Test + void recentTaskLogs_returnsSortedLogMessagesByTimestampsAsc() { + Logs logs = + new Logs( + fakeDopplerService( + "12345", + logMessage(10, "msg1", "APP/TASK/task1", 0), + logMessage(1, "msg2", "APP/TASK/task1", 0))); + + String[] result = logs.recentTaskLogs("12345", "task1").split("\n"); + assertThat(result.length).isEqualTo(2); + assertThat(result[0]).isEqualTo("msg2"); + assertThat(result[1]).isEqualTo("msg1"); + } + + @Test + void recentApplicationLogs_filterInAppLogMessagesOnlyForSpecifiedAppGuid() { + DopplerService dopplerService = mock(DopplerService.class); + when(dopplerService.recentLogs(eq("12345"))) + .thenReturn( + Calls.response( + Response.success(singletonList(logMessage(0, "msg1", "APP/PROC/WEB", 0))))); + when(dopplerService.recentLogs(eq("99999"))) + .thenReturn( + Calls.response( + Response.success(singletonList(logMessage(0, "msg2", "APP/PROC/WEB", 0))))); + + Logs logs = new Logs(dopplerService); + + String result = logs.recentApplicationLogs("12345", 0); + assertThat(result).isEqualTo("msg1"); + } + + @Test + void recentApplicationLogs_filterInAppLogMessagesOnlyForSpecifiedAppGuidAndSourceInstanceIndex() { + Logs logs = + new Logs( + fakeDopplerService( + "12345", + logMessage(0, "msg1", "APP/PROC/WEB", 0), + logMessage(0, "msg2", "APP/PROC/WEB", 1))); + + String result = logs.recentApplicationLogs("12345", 0); + assertThat(result).isEqualTo("msg1"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/MockCloudFoundryClient.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/MockCloudFoundryClient.java new file mode 100644 index 00000000000..03d9e75bd86 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/MockCloudFoundryClient.java @@ -0,0 +1,74 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static org.mockito.Mockito.mock; + +public class MockCloudFoundryClient implements CloudFoundryClient { + + final Spaces spaces = mock(Spaces.class); + final Organizations organizations = mock(Organizations.class); + final Domains domains = mock(Domains.class); + final Routes routes = mock(Routes.class); + final Applications applications = mock(Applications.class); + final ServiceInstances serviceInstances = mock(ServiceInstances.class); + final ServiceKeys serviceKeys = mock(ServiceKeys.class); + final Tasks tasks = mock(Tasks.class); + final Processes processes = mock(Processes.class); + + public Spaces getSpaces() { + return spaces; + } + + public Organizations getOrganizations() { + return organizations; + } + + public Domains getDomains() { + return domains; + } + + public Routes getRoutes() { + return routes; + } + + public Applications getApplications() { + return applications; + } + + public ServiceInstances getServiceInstances() { + return serviceInstances; + } + + public ServiceKeys getServiceKeys() { + return serviceKeys; + } + + @Override + public Tasks getTasks() { + return tasks; + } + + @Override + public Logs getLogs() { + return mock(Logs.class); + } + + public Processes getProcesses() { + return processes; + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/OrganizationsTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/OrganizationsTest.java new file mode 100644 index 00000000000..37ebff18130 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/OrganizationsTest.java @@ -0,0 +1,49 @@ +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.OrganizationService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Organization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Pagination; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import java.util.List; +import java.util.Optional; +import org.junit.jupiter.api.Test; +import retrofit2.Response; +import retrofit2.mock.Calls; + +class OrganizationsTest { + private Organizations organizations; + + { + OrganizationService organizationService = mock(OrganizationService.class); + organizations = new Organizations(organizationService); + when(organizationService.all(any(), any())) + .thenReturn(Calls.response(Response.success(generateOrganizationPage()))); + } + + @Test + void findByNameSucceedsWhenOrgExists() { + CloudFoundryOrganization expectedOrganization = + CloudFoundryOrganization.builder().id("org-guid").name("org").build(); + + Optional result = organizations.findByName("org"); + + assertThat(result).isEqualTo(Optional.of(expectedOrganization)); + } + + private Pagination generateOrganizationPage() { + Organization organization = new Organization(); + organization.setGuid("org-guid"); + organization.setName("org"); + Pagination.Details details = new Pagination.Details(); + details.setTotalPages(1); + Pagination pagination = new Pagination<>(); + pagination.setPagination(details); + pagination.setResources(List.of(organization)); + return pagination; + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ProcessesTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ProcessesTest.java new file mode 100644 index 00000000000..6e46c3b9a1f --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ProcessesTest.java @@ -0,0 +1,150 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; +import static org.mockito.Mockito.verify; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ProcessesService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Process; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessResources; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.UpdateProcess; +import java.util.Collections; +import org.junit.jupiter.api.Test; +import retrofit2.Response; +import retrofit2.mock.Calls; + +public class ProcessesTest { + private final ProcessesService processesService = mock(ProcessesService.class); + private final Processes processes = new Processes(processesService); + + @Test + void dontScaleApplicationIfInputsAreNullOrZero() { + processes.scaleProcess("id", null, null, null); + processes.scaleProcess("id", 0, 0, 0); + + verify(processesService, never()).scaleProcess(any(), any()); + } + + @Test + void scaleApplicationIfInputsAreMixOfNullAndZero() { + when(processesService.scaleProcess(any(), any())) + .thenReturn(Calls.response(Response.success(null))); + + processes.scaleProcess("id", 0, null, null); + + verify(processesService).scaleProcess(any(), any()); + } + + @Test + void updateProcess() { + when(processesService.updateProcess(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(new Process()))); + + processes.updateProcess("guid1", "command1", "http", "/endpoint", 180, 180); + verify(processesService) + .updateProcess( + "guid1", + new UpdateProcess( + "command1", + new Process.HealthCheck.HealthCheckBuilder() + .type("http") + .data( + new Process.HealthCheckData.HealthCheckDataBuilder() + .endpoint("/endpoint") + .invocationTimeout(180) + .timeout(180) + .build()) + .build())); + + processes.updateProcess("guid1", "command1", "http", null, null, null); + verify(processesService) + .updateProcess( + "guid1", + new UpdateProcess( + "command1", + new Process.HealthCheck.HealthCheckBuilder() + .type("http") + .data(new Process.HealthCheckData.HealthCheckDataBuilder().build()) + .build())); + + processes.updateProcess("guid1", "command1", "http", "/endpoint", 180, null); + verify(processesService) + .updateProcess( + "guid1", + new UpdateProcess( + "command1", + new Process.HealthCheck.HealthCheckBuilder() + .type("http") + .data( + new Process.HealthCheckData.HealthCheckDataBuilder() + .endpoint("/endpoint") + .timeout(180) + .build()) + .build())); + processes.updateProcess("guid1", "command1", "http", "/endpoint", null, 180); + verify(processesService) + .updateProcess( + "guid1", + new UpdateProcess( + "command1", + new Process.HealthCheck.HealthCheckBuilder() + .type("http") + .data( + new Process.HealthCheckData.HealthCheckDataBuilder() + .endpoint("/endpoint") + .invocationTimeout(180) + .build()) + .build())); + } + + @Test + void getProcessState() { + ProcessStats processStats = new ProcessStats().setState(ProcessStats.State.RUNNING); + ProcessResources processResources = + new ProcessResources().setResources(Collections.singletonList(processStats)); + when(processesService.findProcessStatsById(anyString())) + .thenReturn(Calls.response(Response.success(processResources))); + ProcessStats.State result = processes.getProcessState("some-app-guid").get(); + assertThat(result).isEqualTo(ProcessStats.State.RUNNING); + } + + @Test + void updateProcessHealthCheck1() { + when(processesService.updateProcess(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(new Process()))); + + processes.updateProcess("guid1", null, null, null, 90, null); + verify(processesService) + .updateProcess( + "guid1", + new UpdateProcess( + null, + new Process.HealthCheck.HealthCheckBuilder() + .type(null) + .data( + new Process.HealthCheckData.HealthCheckDataBuilder() + .endpoint(null) + .invocationTimeout(null) + .timeout(90) + .build()) + .build())); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/RoutesTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/RoutesTest.java new file mode 100644 index 00000000000..d92024117b6 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/RoutesTest.java @@ -0,0 +1,214 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.RouteService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Page; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Route; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.RouteMapping; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.Arrays; +import java.util.Optional; +import java.util.concurrent.ForkJoinPool; +import org.junit.jupiter.api.Test; +import retrofit2.Response; +import retrofit2.mock.Calls; + +class RoutesTest { + @Test + void toRouteId() { + CloudFoundryDomain domain = + CloudFoundryDomain.builder().id("domainGuid").name("apps.calabasas.cf-app.com").build(); + + Domains domains = mock(Domains.class); + when(domains.findById(eq("domainGuid"))).thenReturn(domain); + when(domains.findByName(eq("apps.calabasas.cf-app.com"))).thenReturn(Optional.of(domain)); + + Spaces spaces = mock(Spaces.class); + when(spaces.findById(any())).thenReturn(CloudFoundrySpace.fromRegion("myorg > dev")); + + Route route = new Route(); + route.setHost("demo1-prod"); + route.setDomainGuid("domainGuid"); + route.setPath("/path"); + + RouteService routeService = mock(RouteService.class); + when(routeService.all(any(), any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(Page.singleton(route, "abc123")))); + when(routeService.routeMappings(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(new Page<>()))); + + Routes routes = + new Routes( + "pws", + routeService, + null, + domains, + spaces, + 500, + ForkJoinPool.commonPool(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()); + RouteId routeId = routes.toRouteId("demo1-prod.apps.calabasas.cf-app.com/path/v1.0"); + assertThat(routeId).isNotNull(); + assertThat(routeId.getHost()).isEqualTo("demo1-prod"); + assertThat(routeId.getDomainGuid()).isEqualTo("domainGuid"); + assertThat(routeId.getPath()).isEqualTo("/path/v1.0"); + } + + @Test + void toRouteIdReturnsNullForInvalidRoute() { + Routes routes = + new Routes( + null, + null, + null, + null, + null, + 500, + ForkJoinPool.commonPool(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()); + assertNull(routes.toRouteId("demo1-pro cf-app.com/path")); + } + + @Test + void findShouldFilterCorrectlyOnMultipleResults() { + CloudFoundryDomain domain = + CloudFoundryDomain.builder().id("domain-guid").name("apps.calabasas.cf-app.com").build(); + + Domains domains = mock(Domains.class); + + when(domains.findById(eq("domain-guid"))).thenReturn(domain); + when(domains.findByName(eq("apps.calabasas.cf-app.com"))).thenReturn(Optional.of(domain)); + + Route hostOnly = + new Route( + new RouteId().setHost("somehost").setDomainGuid("domain-guid").setPath(""), + "space-guid"); + Route withPath1 = + new Route( + new RouteId().setHost("somehost").setDomainGuid("domain-guid").setPath("/person"), + "space-guid"); + Route withPath2 = + new Route( + new RouteId().setHost("somehost").setDomainGuid("domain-guid").setPath("/account"), + "space-guid"); + Route withPathAndPort = + new Route( + new Route() + .setHost("somehost") + .setDomainGuid("domain-guid") + .setPath("/account") + .setPort(8888), + "space-guid"); + + Page routePage = new Page<>(); + routePage.setTotalPages(1); + routePage.setTotalResults(4); + routePage.setResources( + Arrays.asList( + createRouteResource(withPath2), + createRouteResource(withPath1), + createRouteResource(hostOnly), + createRouteResource(withPathAndPort))); + + Spaces spaces = mock(Spaces.class); + CloudFoundryOrganization org = + CloudFoundryOrganization.builder().id("org-id").name("org-name").build(); + CloudFoundrySpace space = + CloudFoundrySpace.builder().organization(org).name("space-name").id("space-guid").build(); + RouteService routeService = mock(RouteService.class); + + Page routeMappingPage = new Page<>(); + routeMappingPage.setTotalResults(0); + routeMappingPage.setTotalPages(1); + + when(spaces.findById("space-guid")).thenReturn(space); + when(routeService.all(any(), any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(routePage))); + when(routeService.routeMappings(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(routeMappingPage))); + + Routes routes = + new Routes( + "pws", + routeService, + null, + domains, + spaces, + 500, + ForkJoinPool.commonPool(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()); + + CloudFoundryLoadBalancer loadBalancer = + routes.find(new RouteId().setHost("somehost").setDomainGuid("domain-guid"), "space-guid"); + assertThat(loadBalancer).isNotNull(); + assertThat(loadBalancer.getHost()).isEqualTo("somehost"); + assertThat(loadBalancer.getDomain().getId()).isEqualTo("domain-guid"); + assertThat(loadBalancer.getPath()).isNullOrEmpty(); + assertThat(loadBalancer.getPort()).isNull(); + + routePage.setResources( + Arrays.asList(createRouteResource(withPathAndPort), createRouteResource(withPath2))); + + loadBalancer = + routes.find( + new RouteId().setHost("somehost").setDomainGuid("domain-guid").setPath("/account"), + "space-guid"); + assertThat(loadBalancer).isNotNull(); + assertThat(loadBalancer.getHost()).isEqualTo("somehost"); + assertThat(loadBalancer.getDomain().getId()).isEqualTo("domain-guid"); + assertThat(loadBalancer.getPath()).isEqualTo("/account"); + assertThat(loadBalancer.getPort()).isNull(); + } + + private Resource createRouteResource(Route route) { + return new Resource() + .setEntity(route) + .setMetadata(new Resource.Metadata().setGuid("route-guid")); + } + + @Test + void validRouteFormatsReturnTrue() { + assertTrue(Routes.isValidRouteFormat("a.b")); + assertTrue(Routes.isValidRouteFormat("foo.bar")); + assertTrue(Routes.isValidRouteFormat("10_bLAh.org:3000")); + assertTrue(Routes.isValidRouteFormat("unbe-lievable.b_c.gov:9999/fo-o_bar")); + } + + @Test + void invalidRouteFormatsReturnFalse() { + assertFalse(Routes.isValidRouteFormat("abc")); + assertFalse(Routes.isValidRouteFormat("ab.c d.com")); + assertFalse(Routes.isValidRouteFormat("ab.cd.com:a5b0")); + assertFalse(Routes.isValidRouteFormat("EBCDIC.com/DVORAK:a5b0")); + assertFalse(Routes.isValidRouteFormat("ab.cd.com/fo ba")); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceInstancesTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceInstancesTest.java new file mode 100644 index 00000000000..76e3df21f81 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceInstancesTest.java @@ -0,0 +1,1441 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ConfigFeatureFlag.ConfigFlag.SERVICE_INSTANCE_SHARING; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.*; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.*; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance.Type.MANAGED_SERVICE_INSTANCE; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance.Type.USER_PROVIDED_SERVICE_INSTANCE; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.utils.TestUtils.assertThrows; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singleton; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ConfigService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ServiceInstanceService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.CreateSharedServiceInstances; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.*; +import okhttp3.MediaType; +import okhttp3.ResponseBody; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import retrofit2.Response; +import retrofit2.mock.Calls; + +class ServiceInstancesTest { + private final CloudFoundryOrganization cloudFoundryOrganization = + CloudFoundryOrganization.builder().id("some-org-guid").name("org").build(); + private final CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id("some-space-guid") + .name("space") + .organization(cloudFoundryOrganization) + .build(); + private final ServiceInstanceService serviceInstanceService = mock(ServiceInstanceService.class); + private final ConfigService configService = mock(ConfigService.class); + private final Organizations organizations = mock(Organizations.class); + private final Spaces spaces = mock(Spaces.class); + private final ServiceInstances serviceInstances = + new ServiceInstances(serviceInstanceService, configService, spaces); + + { + when(serviceInstanceService.findService(any(), any())) + .thenReturn( + Calls.response( + Response.success( + Page.singleton(new Service().setLabel("service1"), "service-guid")))); + + when(serviceInstanceService.findServicePlans(any(), any())) + .thenReturn( + Calls.response( + Response.success( + Page.singleton(new ServicePlan().setName("ServicePlan1"), "plan-guid")))); + } + + @Test + void shouldCreateServiceBindingWhenServiceExists() { + CloudFoundryServerGroup cloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .account("some-account") + .id("servergroup-id") + .space(cloudFoundrySpace) + .build(); + + Page serviceMappingPageOne = Page.singleton(null, "service-instance-guid"); + CreateServiceBinding binding = + new CreateServiceBinding( + "service-instance-guid", cloudFoundryServerGroup.getId(), "service-name", emptyMap()); + serviceMappingPageOne.setTotalResults(0); + serviceMappingPageOne.setTotalPages(0); + when(serviceInstanceService.all(eq(null), any())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceMappingPageOne))); + when(serviceInstanceService.all(eq(1), any())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceMappingPageOne))); + + Page userProvidedServiceMappingPageOne = + createEmptyUserProvidedServiceInstancePage(); + when(serviceInstanceService.allUserProvided(eq(null), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(userProvidedServiceMappingPageOne))); + when(serviceInstanceService.allUserProvided(eq(1), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(userProvidedServiceMappingPageOne))); + when(serviceInstanceService.createServiceBinding(binding)) + .thenAnswer(invocation -> Calls.response(Response.success(createServiceBindingResource()))); + + serviceInstances.createServiceBinding(binding); + verify(serviceInstanceService, atLeastOnce()).createServiceBinding(any()); + } + + @Test + void shouldCreateServiceBindingWhenUserProvidedServiceExists() { + CloudFoundryServerGroup cloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .account("some-account") + .id("servergroup-id") + .space(cloudFoundrySpace) + .build(); + + Page serviceMappingPageOne = createEmptyOsbServiceInstancePage(); + CreateServiceBinding binding = + new CreateServiceBinding( + "service-instance-guid", cloudFoundryServerGroup.getId(), "service-name", emptyMap()); + when(serviceInstanceService.all(eq(null), any())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceMappingPageOne))); + when(serviceInstanceService.all(eq(1), any())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceMappingPageOne))); + + Page userProvidedServiceMappingPageOne = + Page.singleton(null, "service-instance-guid"); + userProvidedServiceMappingPageOne.setTotalResults(0); + userProvidedServiceMappingPageOne.setTotalPages(0); + when(serviceInstanceService.allUserProvided(eq(null), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(userProvidedServiceMappingPageOne))); + when(serviceInstanceService.allUserProvided(eq(1), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(userProvidedServiceMappingPageOne))); + when(serviceInstanceService.createServiceBinding(binding)) + .thenAnswer(invocation -> Calls.response(Response.success(createServiceBindingResource()))); + + serviceInstances.createServiceBinding(binding); + + verify(serviceInstanceService, atLeastOnce()).createServiceBinding(any()); + } + + @Test + void shouldSucceedServiceBindingWhenServiceBindingExists() { + CloudFoundryServerGroup cloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .account("some-account") + .id("servergroup-id") + .space(cloudFoundrySpace) + .build(); + + Page serviceMappingPageOne = Page.singleton(null, "service-instance-guid"); + CreateServiceBinding binding = + new CreateServiceBinding( + "service-instance-guid", cloudFoundryServerGroup.getId(), "service-name", emptyMap()); + serviceMappingPageOne.setTotalResults(0); + serviceMappingPageOne.setTotalPages(0); + when(serviceInstanceService.all(eq(null), any())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceMappingPageOne))); + when(serviceInstanceService.all(eq(1), any())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceMappingPageOne))); + + Page userProvidedServiceMappingPageOne = + createEmptyUserProvidedServiceInstancePage(); + when(serviceInstanceService.allUserProvided(eq(null), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(userProvidedServiceMappingPageOne))); + when(serviceInstanceService.allUserProvided(eq(1), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(userProvidedServiceMappingPageOne))); + when(serviceInstanceService.createServiceBinding(binding)) + .thenReturn( + Calls.response( + Response.error( + 500, + ResponseBody.create( + MediaType.get("application/json"), + "{\"error_code\": \"CF-ServiceBindingAppServiceTaken\", \"description\":\"already bound\"}")))); + + serviceInstances.createServiceBinding(binding); + verify(serviceInstanceService, atLeastOnce()).createServiceBinding(any()); + } + + @Test + void shouldSuccessfullyCreateService() { + Resource succeededServiceInstanceResource = createServiceInstanceResource(); + succeededServiceInstanceResource + .getEntity() + .setLastOperation(new LastOperation().setType(CREATE).setState(SUCCEEDED)); + + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createEmptyUserProvidedServiceInstancePage()))); + when(serviceInstanceService.createServiceInstance(any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createServiceInstanceResource()))); + + ServiceInstanceResponse response = + serviceInstances.createServiceInstance( + "new-service-instance-name", + "serviceName", + "ServicePlan1", + Collections.emptySet(), + null, + true, + cloudFoundrySpace); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("new-service-instance-name") + .setType(CREATE) + .setState(IN_PROGRESS)); + verify(serviceInstanceService, times(1)).createServiceInstance(any()); + verify(serviceInstanceService, never()).updateServiceInstance(any(), any()); + } + + @Test + void shouldThrowExceptionWhenCreationReturnsHttpNotFound() { + + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createEmptyUserProvidedServiceInstancePage()))); + when(serviceInstanceService.createServiceInstance(any())) + .thenReturn( + Calls.response( + Response.error( + 404, + ResponseBody.create( + MediaType.get("application/json"), + "{\"error_code\": \"CF-ResourceNotFound\", \"description\":\"service instance 'new-service-instance-name' could not be created\"}")))); + + assertThrows( + () -> + serviceInstances.createServiceInstance( + "new-service-instance-name", + "serviceName", + "ServicePlan1", + Collections.emptySet(), + null, + false, + cloudFoundrySpace), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): service instance 'new-service-instance-name' could not be created"); + verify(serviceInstanceService, times(1)).createServiceInstance(any()); + verify(serviceInstanceService, never()).updateServiceInstance(any(), any()); + } + + @Test + void throwExceptionWhenNoServicePlanExistsWithTheNameProvided() { + Page servicePlansPageOne = new Page<>(); + servicePlansPageOne.setTotalResults(0); + servicePlansPageOne.setTotalPages(1); + servicePlansPageOne.setResources(Collections.emptyList()); + when(serviceInstanceService.findServicePlans(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(servicePlansPageOne))); + + assertThrows( + () -> + serviceInstances.createServiceInstance( + "new-service-instance-name", + "serviceName", + "servicePlanName", + Collections.emptySet(), + null, + true, + cloudFoundrySpace), + ResourceNotFoundException.class, + "No plans available for service name 'serviceName'"); + } + + @Test + void shouldUpdateTheServiceIfAlreadyExists() { + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createEmptyUserProvidedServiceInstancePage()))); + when(serviceInstanceService.updateServiceInstance(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createServiceInstanceResource()))); + + ServiceInstanceResponse response = + serviceInstances.createServiceInstance( + "new-service-instance-name", + "serviceName", + "ServicePlan1", + Collections.emptySet(), + null, + true, + cloudFoundrySpace); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("new-service-instance-name") + .setType(UPDATE) + .setState(IN_PROGRESS)); + verify(serviceInstanceService, times(0)).createServiceInstance(any()); + verify(serviceInstanceService, times(1)).updateServiceInstance(any(), any()); + } + + @Test + void shouldNotUpdateTheServiceIfAlreadyExists() { + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createEmptyUserProvidedServiceInstancePage()))); + when(serviceInstanceService.updateServiceInstance(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createServiceInstanceResource()))); + + ServiceInstanceResponse response = + serviceInstances.createServiceInstance( + "new-service-instance-name", + "serviceName", + "ServicePlan1", + Collections.emptySet(), + null, + false, + cloudFoundrySpace); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("new-service-instance-name") + .setType(CREATE) + .setState(SUCCEEDED)); + verify(serviceInstanceService, times(0)).createServiceInstance(any()); + verify(serviceInstanceService, times(0)).updateServiceInstance(any(), any()); + } + + @Test + void shouldThrowExceptionIfServiceExistsAndNeedsChangingButUpdateFails() { + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createEmptyUserProvidedServiceInstancePage()))); + when(serviceInstanceService.updateServiceInstance(any(), any())) + .thenReturn( + Calls.response( + Response.error( + 418, + ResponseBody.create( + MediaType.get("application/json"), + "{\"description\":\"update failed\"}")))); + + assertThrows( + () -> + serviceInstances.createServiceInstance( + "new-service-instance-name", + "serviceName", + "ServicePlan1", + Collections.emptySet(), + null, + true, + cloudFoundrySpace), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): update failed"); + + verify(serviceInstanceService, times(1)).updateServiceInstance(any(), any()); + } + + @Test + void shouldThrowCloudFoundryApiErrorWhenMoreThanOneServiceInstanceWithTheSameNameExists() { + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance.setServicePlanGuid("plan-guid").setName("new-service-instance-name"); + Page serviceInstancePage = new Page<>(); + Resource serviceInstanceResource = new Resource<>(); + Resource.Metadata serviceInstanceMetadata = new Resource.Metadata(); + serviceInstanceMetadata.setGuid("service-instance-guid"); + serviceInstanceResource.setMetadata(serviceInstanceMetadata); + serviceInstanceResource.setEntity(serviceInstance); + serviceInstancePage.setTotalResults(2); + serviceInstancePage.setTotalPages(1); + serviceInstancePage.setResources( + Arrays.asList(serviceInstanceResource, serviceInstanceResource)); + + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceInstancePage))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createEmptyUserProvidedServiceInstancePage()))); + + assertThrows( + () -> + serviceInstances.createServiceInstance( + "new-service-instance-name", + "serviceName", + "ServicePlan1", + Collections.emptySet(), + null, + true, + cloudFoundrySpace), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): 2 service instances found with name 'new-service-instance-name' in space 'space', but expected only 1"); + } + + @Test + void shouldSuccessfullyCreateUserProvidedService() { + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createEmptyUserProvidedServiceInstancePage()))); + when(serviceInstanceService.createUserProvidedServiceInstance(any())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstanceResource()))); + + ServiceInstanceResponse response = + serviceInstances.createUserProvidedServiceInstance( + "new-up-service-instance-name", + "syslogDrainUrl", + Collections.emptySet(), + Collections.emptyMap(), + "routeServiceUrl", + true, + cloudFoundrySpace); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("new-up-service-instance-name") + .setType(CREATE) + .setState(SUCCEEDED)); + verify(serviceInstanceService, times(1)).createUserProvidedServiceInstance(any()); + verify(serviceInstanceService, never()).updateUserProvidedServiceInstance(any(), any()); + } + + @Test + void shouldUpdateUserProvidedServiceInstanceIfAlreadyExists() { + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstancePage()))); + when(serviceInstanceService.updateUserProvidedServiceInstance(any(), any())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstanceResource()))); + + ServiceInstanceResponse response = + serviceInstances.createUserProvidedServiceInstance( + "new-up-service-instance-name", + "syslogDrainUrl", + Collections.emptySet(), + Collections.emptyMap(), + "routeServiceUrl", + true, + cloudFoundrySpace); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("new-up-service-instance-name") + .setType(UPDATE) + .setState(SUCCEEDED)); + verify(serviceInstanceService, times(0)).createUserProvidedServiceInstance(any()); + verify(serviceInstanceService, times(1)).updateUserProvidedServiceInstance(any(), any()); + } + + @Test + void shouldNotUpdateUserProvidedServiceInstanceIfAlreadyExists() { + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstancePage()))); + when(serviceInstanceService.updateUserProvidedServiceInstance(any(), any())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstanceResource()))); + + ServiceInstanceResponse response = + serviceInstances.createUserProvidedServiceInstance( + "new-up-service-instance-name", + "syslogDrainUrl", + Collections.emptySet(), + Collections.emptyMap(), + "routeServiceUrl", + false, + cloudFoundrySpace); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("new-up-service-instance-name") + .setType(CREATE) + .setState(SUCCEEDED)); + verify(serviceInstanceService, times(0)).createUserProvidedServiceInstance(any()); + verify(serviceInstanceService, times(0)).updateUserProvidedServiceInstance(any(), any()); + } + + @Test + void vetShareServiceArgumentsAndGetSharingRegionIdsShouldThrowExceptionWhenRegionIsBlank() { + assertThrows( + () -> + serviceInstances.vetShareServiceArgumentsAndGetSharingSpaces( + "", "service-name", singleton("org1 > space1")), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Please specify a region for the sharing service instance"); + } + + @Test + void + vetShareServiceArgumentsAndGetSharingRegionIdsShouldThrowExceptionWhenServiceSharingShareToSpaceIsTheSourceSpace() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + when(configService.getConfigFeatureFlags()) + .thenReturn( + Calls.response( + Response.success( + singleton( + new ConfigFeatureFlag() + .setName(SERVICE_INSTANCE_SHARING) + .setEnabled(true))))); + when(serviceInstanceService.all(any(), any())) + .thenReturn( + Calls.response( + Response.success(createOsbServiceInstancePage(USER_PROVIDED_SERVICE_INSTANCE)))); + + assertThrows( + () -> + serviceInstances.vetShareServiceArgumentsAndGetSharingSpaces( + "org > space", "service-instance-name", singleton("org > space")), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Cannot specify 'org > space' as any of the sharing regions"); + } + + @Test + void + getOsbCloudFoundryServiceInstanceShouldThrowExceptionWhenServiceSharingServiceInstanceDoesNotExist() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + + assertThrows( + () -> + serviceInstances.getOsbServiceInstanceByRegion("org > space", "service-instance-name"), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Cannot find service 'service-instance-name' in region 'org > space'"); + } + + @Test + void getOsbCloudFoundryServiceInstanceShouldThrowExceptionWhenServiceSharingSpaceDoesNotExist() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.empty()); + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + + assertThrows( + () -> + serviceInstances.getOsbServiceInstanceByRegion("org > space", "service-instance-name"), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Cannot find region 'org > space'"); + } + + @Test + void checkServiceShareableShouldThrowExceptionWhenManagedServiceSharingFlagIsNotPresent() { + when(configService.getConfigFeatureFlags()) + .thenAnswer(invocation -> Calls.response(Response.success(Collections.emptySet()))); + + assertThrows( + () -> serviceInstances.checkServiceShareable("service-instance-name", null), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): 'service_instance_sharing' flag must be enabled in order to share services"); + } + + @Test + void checkServiceShareableShouldThrowExceptionWhenManagedServiceSharingFlagIsSetToFalse() { + when(configService.getConfigFeatureFlags()) + .thenReturn( + Calls.response( + Response.success( + singleton(new ConfigFeatureFlag().setName(SERVICE_INSTANCE_SHARING))))); + + assertThrows( + () -> serviceInstances.checkServiceShareable("service-instance-name", null), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): 'service_instance_sharing' flag must be enabled in order to share services"); + } + + @Test + void checkServiceShareableShouldThrowExceptionIfServicePlanNotFound() { + when(configService.getConfigFeatureFlags()) + .thenReturn( + Calls.response( + Response.success( + singleton( + new ConfigFeatureFlag() + .setName(SERVICE_INSTANCE_SHARING) + .setEnabled(true))))); + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + when(serviceInstanceService.findServicePlanByServicePlanId(any())) + .thenAnswer(invocation -> Calls.response(Response.success(null))); + + assertThrows( + () -> + serviceInstances.checkServiceShareable( + "service-instance-name", + CloudFoundryServiceInstance.builder().planId("some-plan").build()), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): The service plan for 'new-service-plan-name' was not found"); + } + + @Test + void checkServiceShareableShouldThrowExceptionWhenManagedServiceDoesNotExist() { + when(configService.getConfigFeatureFlags()) + .thenReturn( + Calls.response( + Response.success( + singleton( + new ConfigFeatureFlag() + .setName(SERVICE_INSTANCE_SHARING) + .setEnabled(true))))); + Resource rsp = new Resource<>(); + rsp.setEntity(new ServicePlan().setServiceGuid("service-guid")); + when(serviceInstanceService.findServicePlanByServicePlanId(any())) + .thenAnswer(invocation -> Calls.response(Response.success(rsp))); + when(serviceInstanceService.findServiceByServiceId(any())) + .thenReturn(Calls.response(Response.success(null))); + + assertThrows( + () -> + serviceInstances.checkServiceShareable( + "service-instance-name", + CloudFoundryServiceInstance.builder().planId("some-plan").build()), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): The service broker for 'service-instance-name' was not found"); + } + + @Test + void checkServiceShareableShouldThrowExceptionWhenManagedServiceDoesNotSupportSharing() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + when(configService.getConfigFeatureFlags()) + .thenReturn( + Calls.response( + Response.success( + singleton( + new ConfigFeatureFlag() + .setName(SERVICE_INSTANCE_SHARING) + .setEnabled(true))))); + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + Resource rsp = new Resource<>(); + rsp.setEntity(new ServicePlan().setServiceGuid("service-guid")); + when(serviceInstanceService.findServicePlanByServicePlanId(any())) + .thenAnswer(invocation -> Calls.response(Response.success(rsp))); + Resource r = new Resource<>(); + r.setEntity(new Service().setExtra("{\"shareable\": false}")); + when(serviceInstanceService.findServiceByServiceId(any())) + .thenAnswer(invocation -> Calls.response(Response.success(r))); + + assertThrows( + () -> + serviceInstances.checkServiceShareable( + "service-instance-name", + CloudFoundryServiceInstance.builder().planId("some-plan").build()), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): The service broker must be configured as 'shareable' in order to share services"); + } + + @Test + void shareServiceInstanceShouldSuccessfullyShareAnUnmanagedInstanceToAUniqueListOfRegions() { + CloudFoundrySpace space1 = + CloudFoundrySpace.builder() + .id("space-guid-1") + .name("some-space-1") + .organization(cloudFoundryOrganization) + .build(); + CloudFoundrySpace space2 = + CloudFoundrySpace.builder() + .id("space-guid-2") + .name("some-space-2") + .organization(cloudFoundryOrganization) + .build(); + + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())) + .thenReturn(Optional.of(space1)) + .thenReturn(Optional.of(space2)) + .thenReturn(Optional.of(cloudFoundrySpace)); + when(serviceInstanceService.all(any(), any())) + .thenReturn( + Calls.response( + Response.success(createOsbServiceInstancePage(USER_PROVIDED_SERVICE_INSTANCE)))); + when(serviceInstanceService.getShareServiceInstanceSpaceIdsByServiceInstanceId(any())) + .thenReturn( + Calls.response(Response.success(new SharedTo().setData(Collections.emptySet())))); + when(serviceInstanceService.shareServiceInstanceToSpaceIds(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(202, null))); + ArgumentCaptor serviceInstanceIdCaptor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor shareToCaptor = + ArgumentCaptor.forClass(CreateSharedServiceInstances.class); + Set> s = new HashSet<>(); + s.add(Collections.singletonMap("guid", "space-guid-1")); + s.add(Collections.singletonMap("guid", "space-guid-2")); + CreateSharedServiceInstances expectedBody = new CreateSharedServiceInstances().setData(s); + Set sharedToRegions = new HashSet<>(); + sharedToRegions.add("org1 > space1"); + sharedToRegions.add("org2 > space2"); + ServiceInstanceResponse expectedResult = + new ServiceInstanceResponse() + .setServiceInstanceName("service-instance-name") + .setType(SHARE) + .setState(SUCCEEDED); + + ServiceInstanceResponse result = + serviceInstances.shareServiceInstance( + "org > space", "service-instance-name", sharedToRegions); + + verify(serviceInstanceService) + .shareServiceInstanceToSpaceIds(serviceInstanceIdCaptor.capture(), shareToCaptor.capture()); + assertThat(serviceInstanceIdCaptor.getValue()).isEqualTo("service-instance-guid"); + assertThat(shareToCaptor.getValue()).usingRecursiveComparison().isEqualTo(expectedBody); + assertThat(result).usingRecursiveComparison().isEqualTo(expectedResult); + } + + @Test + void + shareServiceInstanceShouldShareManagedServiceInstanceOnlyIntoSpacesIntoWhichServiceInstanceHasNotBeenShared() { + CloudFoundrySpace space1 = + CloudFoundrySpace.builder() + .id("space-guid-1") + .name("some-space-1") + .organization(cloudFoundryOrganization) + .build(); + CloudFoundrySpace space2 = + CloudFoundrySpace.builder() + .id("space-guid-2") + .name("some-space-2") + .organization(cloudFoundryOrganization) + .build(); + + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())) + .thenReturn(Optional.of(space1)) + .thenReturn(Optional.of(space2)) + .thenReturn(Optional.of(cloudFoundrySpace)); + when(configService.getConfigFeatureFlags()) + .thenReturn( + Calls.response( + Response.success( + singleton( + new ConfigFeatureFlag() + .setName(SERVICE_INSTANCE_SHARING) + .setEnabled(true))))); + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + Set> alreadySharedTo = new HashSet<>(); + alreadySharedTo.add(Collections.singletonMap("guid", "space-guid-1")); + alreadySharedTo.add(Collections.singletonMap("guid", "space-guid-3")); + when(serviceInstanceService.getShareServiceInstanceSpaceIdsByServiceInstanceId(any())) + .thenAnswer( + invocation -> + Calls.response(Response.success(new SharedTo().setData(alreadySharedTo)))); + ArgumentCaptor servicePlanIdCaptor = ArgumentCaptor.forClass(String.class); + Resource rsp = new Resource<>(); + rsp.setEntity(new ServicePlan().setServiceGuid("service-guid")); + when(serviceInstanceService.findServicePlanByServicePlanId(any())) + .thenAnswer(invocation -> Calls.response(Response.success(rsp))); + ArgumentCaptor serviceIdCaptor = ArgumentCaptor.forClass(String.class); + Resource r = new Resource<>(); + r.setEntity(new Service().setExtra("{\"shareable\": true}")); + when(serviceInstanceService.findServiceByServiceId(any())) + .thenAnswer(invocation -> Calls.response(Response.success(r))); + when(serviceInstanceService.shareServiceInstanceToSpaceIds(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(202, null))); + ArgumentCaptor serviceInstanceIdCaptor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor shareToCaptor = + ArgumentCaptor.forClass(CreateSharedServiceInstances.class); + Set> s = singleton(Collections.singletonMap("guid", "space-guid-2")); + CreateSharedServiceInstances expectedBody = new CreateSharedServiceInstances().setData(s); + ServiceInstanceResponse expectedResult = + new ServiceInstanceResponse() + .setServiceInstanceName("service-instance-name") + .setType(SHARE) + .setState(SUCCEEDED); + Set sharingToRegions = new HashSet<>(); + sharingToRegions.add("org1 > space1"); + sharingToRegions.add("org2 > space2"); + + ServiceInstanceResponse result = + serviceInstances.shareServiceInstance( + "org > space", "service-instance-name", sharingToRegions); + + verify(serviceInstanceService).findServicePlanByServicePlanId(servicePlanIdCaptor.capture()); + assertThat(servicePlanIdCaptor.getValue()).isEqualTo("plan-guid"); + verify(serviceInstanceService).findServiceByServiceId(serviceIdCaptor.capture()); + assertThat(serviceIdCaptor.getValue()).isEqualTo("service-guid"); + verify(serviceInstanceService) + .shareServiceInstanceToSpaceIds(serviceInstanceIdCaptor.capture(), shareToCaptor.capture()); + assertThat(serviceInstanceIdCaptor.getValue()).isEqualTo("service-instance-guid"); + assertThat(shareToCaptor.getValue()).usingRecursiveComparison().isEqualTo(expectedBody); + assertThat(result).usingRecursiveComparison().isEqualTo(expectedResult); + } + + @Test + void + shareServiceInstanceShouldNotShareManagedServiceInstanceIfThereAreNoSpacesIntoWhichItHasNotBeenShared() { + CloudFoundrySpace space1 = + CloudFoundrySpace.builder() + .id("space-guid-1") + .name("some-space-1") + .organization(cloudFoundryOrganization) + .build(); + CloudFoundrySpace space2 = + CloudFoundrySpace.builder() + .id("space-guid-2") + .name("some-space-2") + .organization(cloudFoundryOrganization) + .build(); + + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())) + .thenReturn(Optional.of(space1)) + .thenReturn(Optional.of(space2)) + .thenReturn(Optional.of(cloudFoundrySpace)); + when(configService.getConfigFeatureFlags()) + .thenReturn( + Calls.response( + Response.success( + singleton( + new ConfigFeatureFlag() + .setName(SERVICE_INSTANCE_SHARING) + .setEnabled(true))))); + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + Set> alreadySharedTo = new HashSet<>(); + alreadySharedTo.add(Collections.singletonMap("guid", "space-guid-1")); + alreadySharedTo.add(Collections.singletonMap("guid", "space-guid-2")); + when(serviceInstanceService.getShareServiceInstanceSpaceIdsByServiceInstanceId(any())) + .thenAnswer( + invocation -> + Calls.response(Response.success(new SharedTo().setData(alreadySharedTo)))); + Resource rsp = new Resource<>(); + rsp.setEntity(new ServicePlan().setServiceGuid("service-guid")); + when(serviceInstanceService.findServicePlanByServicePlanId(any())) + .thenAnswer(invocation -> Calls.response(Response.success(rsp))); + Resource r = new Resource<>(); + r.setEntity(new Service().setExtra("{\"shareable\": true}")); + when(serviceInstanceService.findServiceByServiceId(any())) + .thenAnswer(invocation -> Calls.response(Response.success(r))); + Set> s = singleton(Collections.singletonMap("guid", "space-guid-2")); + ServiceInstanceResponse expectedResult = + new ServiceInstanceResponse() + .setServiceInstanceName("service-instance-name") + .setType(SHARE) + .setState(SUCCEEDED); + Set sharingToRegions = new HashSet<>(); + sharingToRegions.add("org1 > space1"); + sharingToRegions.add("org2 > space2"); + + ServiceInstanceResponse result = + serviceInstances.shareServiceInstance( + "org > space", "service-instance-name", sharingToRegions); + + verify(serviceInstanceService, never()).shareServiceInstanceToSpaceIds(any(), any()); + assertThat(result).usingRecursiveComparison().isEqualTo(expectedResult); + } + + @Test + void + vetUnshareServiceArgumentsAndGetSharingRegionIdsShouldThrowExceptionWhenServiceInstanceNameIsBlank() { + assertThrows( + () -> + serviceInstances.vetUnshareServiceArgumentsAndGetSharingSpaces( + "", singleton("org1 > space1")), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Please specify a name for the unsharing service instance"); + } + + @Test + void vetUnshareServiceArgumentsAndGetSharingRegionIdsShouldThrowExceptionWhenRegionListIsEmpty() { + assertThrows( + () -> + serviceInstances.vetUnshareServiceArgumentsAndGetSharingSpaces( + "service-instance-name", null), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Please specify a list of regions for unsharing 'service-instance-name'"); + } + + @Test + void + vetUnshareServiceArgumentsAndGetSharingRegionIdsShouldThrowExceptionWhenServiceSharingRegionDoesNotExist() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.empty()); + + assertThrows( + () -> + serviceInstances.vetUnshareServiceArgumentsAndGetSharingSpaces( + "service-instance-name", singleton("org1 > space1")), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Cannot find region 'org1 > space1' for unsharing"); + } + + @Test + void + vetUnshareServiceArgumentsAndGetSharingRegionIdsShouldThrowExceptionWhenServiceSharingShareToSpaceDoesNotExist() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.empty()); + when(configService.getConfigFeatureFlags()) + .thenReturn( + Calls.response( + Response.success( + singleton( + new ConfigFeatureFlag() + .setName(SERVICE_INSTANCE_SHARING) + .setEnabled(true))))); + when(serviceInstanceService.all(any(), any())) + .thenReturn( + Calls.response( + Response.success(createOsbServiceInstancePage(USER_PROVIDED_SERVICE_INSTANCE)))); + + assertThrows( + () -> + serviceInstances.vetUnshareServiceArgumentsAndGetSharingSpaces( + "service-instance-name", singleton("org1 > space1")), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Cannot find region 'org1 > space1' for unsharing"); + } + + @Test + void + unshareServiceInstanceShouldSuccessfullyUnshareAnInstanceOnlyFromAUniqueListOfRegionIdsWhereItHadBeenShared() { + CloudFoundrySpace space0 = + CloudFoundrySpace.builder() + .id("space-guid-0") + .name("some-space-0") + .organization(cloudFoundryOrganization) + .build(); + CloudFoundrySpace space1 = + CloudFoundrySpace.builder() + .id("space-guid-1") + .name("some-space-1") + .organization(cloudFoundryOrganization) + .build(); + CloudFoundrySpace space2 = + CloudFoundrySpace.builder() + .id("space-guid-2") + .name("some-space-2") + .organization(cloudFoundryOrganization) + .build(); + + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())) + .thenReturn(Optional.of(space0)) + .thenReturn(Optional.of(space1)) + .thenReturn(Optional.of(space2)); + when(spaces.getServiceInstanceByNameAndSpace(any(), eq(space0))).thenReturn(null); + when(spaces.getServiceInstanceByNameAndSpace(any(), eq(space1))) + .thenReturn( + CloudFoundryServiceInstance.builder() + .name("service-instance-name") + .id("service-instance-guid-1") + .build()); + when(spaces.getServiceInstanceByNameAndSpace(any(), eq(space2))) + .thenReturn( + CloudFoundryServiceInstance.builder() + .name("service-instance-name") + .id("service-instance-guid-2") + .build()); + when(serviceInstanceService.unshareServiceInstanceFromSpaceId(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(202, null))); + Set unshareFromRegions = new HashSet<>(); + unshareFromRegions.add("org0 > some-space-0"); + unshareFromRegions.add("org1 > some-space-1"); + unshareFromRegions.add("org2 > some-space-2"); + ServiceInstanceResponse expectedResult = + new ServiceInstanceResponse() + .setServiceInstanceName("service-instance-name") + .setType(UNSHARE) + .setState(SUCCEEDED); + + ServiceInstanceResponse result = + serviceInstances.unshareServiceInstance("service-instance-name", unshareFromRegions); + + assertThat(result).usingRecursiveComparison().isEqualTo(expectedResult); + verify(serviceInstanceService) + .unshareServiceInstanceFromSpaceId("service-instance-guid-1", "space-guid-1"); + verify(serviceInstanceService) + .unshareServiceInstanceFromSpaceId("service-instance-guid-2", "space-guid-2"); + verify(spaces).getServiceInstanceByNameAndSpace(eq("service-instance-name"), eq(space0)); + verify(spaces).getServiceInstanceByNameAndSpace(eq("service-instance-name"), eq(space1)); + verify(spaces).getServiceInstanceByNameAndSpace(eq("service-instance-name"), eq(space2)); + } + + @Test + void getServiceInstanceShouldThrowAnExceptionWhenTheRegionCannotBeFound() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.empty()); + + assertThrows( + () -> serviceInstances.getServiceInstance("org > space", "service-instance-name"), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Cannot find region 'org > space'"); + } + + @Test + void getServiceInstanceShouldReturnCloudFoundryOsbServiceInstance() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + + CloudFoundryServiceInstance results = + serviceInstances.getServiceInstance("org > space", "new-service-instance-name"); + CloudFoundryServiceInstance expected = + CloudFoundryServiceInstance.builder() + .id("service-instance-guid") + .planId("plan-guid") + .type(MANAGED_SERVICE_INSTANCE.toString()) + .serviceInstanceName("new-service-instance-name") + .status(SUCCEEDED.toString()) + .build(); + + assertThat(results).usingRecursiveComparison().isEqualTo(expected); + } + + @Test + void getServiceInstanceShouldReturnCloudFoundryUserProvidedServiceInstance() { + when(organizations.findByName(any())).thenReturn(Optional.ofNullable(cloudFoundryOrganization)); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), any())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstancePage()))); + + CloudFoundryServiceInstance results = + serviceInstances.getServiceInstance("org > space", "up-service-instance-name"); + CloudFoundryServiceInstance expected = + CloudFoundryServiceInstance.builder() + .id("up-service-instance-guid") + .type(USER_PROVIDED_SERVICE_INSTANCE.toString()) + .serviceInstanceName("up-service-instance-name") + .status(SUCCEEDED.toString()) + .build(); + + assertThat(results).usingRecursiveComparison().isEqualTo(expected); + } + + @Test + void getOsbServiceInstanceShouldReturnAServiceInstanceWhenExactlyOneIsReturnedFromApi() { + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + + CloudFoundryServiceInstance service = + serviceInstances.getOsbServiceInstance(cloudFoundrySpace, "service-instance-name"); + CloudFoundryServiceInstance expected = + CloudFoundryServiceInstance.builder() + .id("service-instance-guid") + .planId("plan-guid") + .type(MANAGED_SERVICE_INSTANCE.toString()) + .serviceInstanceName("new-service-instance-name") + .status(SUCCEEDED.toString()) + .build(); + + assertThat(service).isNotNull(); + assertThat(service).usingRecursiveComparison().isEqualTo(expected); + } + + @Test + void getUserProvidedServiceInstanceShouldReturnAServiceInstanceWhenExactlyOneIsReturnedFromApi() { + when(serviceInstanceService.allUserProvided(any(), any())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstancePage()))); + + CloudFoundryServiceInstance service = + serviceInstances.getUserProvidedServiceInstance( + cloudFoundrySpace, "up-service-instance-name"); + + assertThat(service).isNotNull(); + CloudFoundryServiceInstance expected = + CloudFoundryServiceInstance.builder() + .id("up-service-instance-guid") + .type(USER_PROVIDED_SERVICE_INSTANCE.toString()) + .serviceInstanceName("up-service-instance-name") + .status(SUCCEEDED.toString()) + .build(); + assertThat(service).usingRecursiveComparison().isEqualTo(expected); + } + + @Test + void getServiceInstanceShouldReturnAServiceInstanceWithStatusWhenExactlyOneIsReturnedFromApi() { + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(createServiceInstancePage()))); + + CloudFoundryServiceInstance service = + serviceInstances.getOsbServiceInstance(cloudFoundrySpace, "up-service-instance-name"); + + assertThat(service).isNotNull(); + CloudFoundryServiceInstance expected = + CloudFoundryServiceInstance.builder() + .id("up-service-instance-guid") + .type(MANAGED_SERVICE_INSTANCE.toString()) + .serviceInstanceName("up-service-instance-name") + .status(FAILED.toString()) + .lastOperationDescription("Custom description") + .build(); + assertThat(service).usingRecursiveComparison().isEqualTo(expected); + } + + @Test + void getOsbServiceInstanceShouldThrowAnExceptionWhenMultipleServicesAreReturnedFromApi() { + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + + assertThat( + serviceInstances.getOsbServiceInstance(cloudFoundrySpace, "new-service-instance-name")) + .isNull(); + } + + @Test + void getOsbServiceInstanceShouldThrowAnExceptionWhenNoServicesAreReturnedFromApi() { + Page page = new Page<>(); + page.setTotalResults(2); + page.setTotalPages(1); + page.setResources( + Arrays.asList(createServiceInstanceResource(), createServiceInstanceResource())); + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(page))); + + assertThrows( + () -> + serviceInstances.getOsbServiceInstance(cloudFoundrySpace, "new-service-instance-name"), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): 2 service instances found with name 'new-service-instance-name' in space 'space', but expected only 1"); + } + + @Test + void getOsbServiceInstanceShouldThrowExceptionWhenServiceNameIsBlank() { + assertThrows( + () -> serviceInstances.getOsbServiceInstance(cloudFoundrySpace, ""), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Please specify a name for the service being sought"); + } + + @Test + void destroyServiceInstanceShouldSucceedWhenNoServiceBindingsExist() { + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + when(serviceInstanceService.getBindingsForServiceInstance("service-instance-guid", null, null)) + .thenAnswer(invocation -> Calls.response(Response.success(new Page<>()))); + when(serviceInstanceService.destroyServiceInstance(any())) + .thenAnswer(invocation -> Calls.response(Response.success(202, null))); + + ServiceInstanceResponse response = + serviceInstances.destroyServiceInstance(cloudFoundrySpace, "new-service-instance-name"); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("new-service-instance-name") + .setType(DELETE) + .setState(IN_PROGRESS)); + verify(serviceInstanceService, times(1)).all(any(), anyList()); + verify(serviceInstanceService, times(1)).destroyServiceInstance(any()); + verify(serviceInstanceService, never()).allUserProvided(any(), any()); + } + + @Test + void destroyServiceInstanceShouldThrowExceptionWhenDeleteServiceInstanceFails() { + Page serviceBindingPage = new Page<>(); + serviceBindingPage.setTotalResults(0); + serviceBindingPage.setTotalPages(1); + + when(serviceInstanceService.all(any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + when(serviceInstanceService.getBindingsForServiceInstance(any(), any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceBindingPage))); + when(serviceInstanceService.destroyServiceInstance(any())) + .thenReturn( + Calls.response( + Response.error(500, ResponseBody.create(MediaType.get("application/json"), "{}")))); + + assertThrows( + () -> serviceInstances.destroyServiceInstance(cloudFoundrySpace, "service-instance-name"), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): "); + + verify(serviceInstanceService, times(1)).destroyServiceInstance(any()); + verify(serviceInstanceService, never()).allUserProvided(any(), any()); + } + + @Test + void destroyServiceInstanceShouldReturnSuccessWhenServiceInstanceDoesNotExist() { + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createEmptyUserProvidedServiceInstancePage()))); + + ServiceInstanceResponse response = + serviceInstances.destroyServiceInstance(cloudFoundrySpace, "service-instance-name"); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("service-instance-name") + .setType(DELETE) + .setState(LastOperation.State.NOT_FOUND)); + verify(serviceInstanceService, never()).destroyServiceInstance(any()); + } + + @Test + void destroyServiceInstanceShouldFailIfServiceBindingsExists() { + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(createOsbServiceInstancePage()))); + when(serviceInstanceService.getBindingsForServiceInstance("service-instance-guid", null, null)) + .thenReturn( + Calls.response( + Response.success(Page.singleton(new ServiceBinding(), "service-binding-guid")))); + + assertThrows( + () -> serviceInstances.destroyServiceInstance(cloudFoundrySpace, "service-instance-name"), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Unable to destroy service instance while 1 service binding(s) exist"); + + verify(serviceInstanceService, never()).destroyServiceInstance(any()); + verify(serviceInstanceService, never()).allUserProvided(any(), any()); + } + + @Test + void destroyUserProvidedServiceInstanceShouldSucceedWhenNoServiceBindingsExist() { + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(new Page<>()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstancePage()))); + when(serviceInstanceService.getBindingsForUserProvidedServiceInstance( + "up-service-instance-guid", null, null)) + .thenAnswer(invocation -> Calls.response(Response.success(new Page<>()))); + when(serviceInstanceService.destroyUserProvidedServiceInstance(any())) + .thenAnswer(invocation -> Calls.response(Response.success(""))); + + ServiceInstanceResponse response = + serviceInstances.destroyServiceInstance(cloudFoundrySpace, "new-service-instance-name"); + + assertThat(response) + .isEqualTo( + new ServiceInstanceResponse() + .setServiceInstanceName("new-service-instance-name") + .setType(DELETE) + .setState(IN_PROGRESS)); + verify(serviceInstanceService, times(1)).all(any(), anyList()); + verify(serviceInstanceService, times(1)).allUserProvided(any(), any()); + verify(serviceInstanceService, times(1)).destroyUserProvidedServiceInstance(any()); + verify(serviceInstanceService, times(1)) + .getBindingsForUserProvidedServiceInstance(any(), any(), any()); + verify(serviceInstanceService, never()).destroyServiceInstance(any()); + } + + @Test + void destroyUserProvidedServiceInstanceShouldThrowExceptionWhenDeleteServiceInstanceFails() { + Page serviceBindingPage = new Page<>(); + serviceBindingPage.setTotalResults(0); + serviceBindingPage.setTotalPages(1); + + when(serviceInstanceService.all(any(), anyList())) + .thenAnswer(invocation -> Calls.response(Response.success(new Page<>()))); + when(serviceInstanceService.allUserProvided(any(), anyList())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstancePage()))); + when(serviceInstanceService.getBindingsForUserProvidedServiceInstance(any(), any(), any())) + .thenAnswer(invocation -> Calls.response(Response.success(serviceBindingPage))); + when(serviceInstanceService.destroyUserProvidedServiceInstance(any())) + .thenReturn( + Calls.response( + Response.error( + 418, + ResponseBody.create( + MediaType.get("application/json"), + "{\"error_code\": \"CF-ServiceBindingAppServiceTaken\", \"description\":\"i'm a teapod\"}")))); + + assertThrows( + () -> serviceInstances.destroyServiceInstance(cloudFoundrySpace, "service-instance-name"), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): i'm a teapod"); + + verify(serviceInstanceService, times(1)).all(any(), anyList()); + verify(serviceInstanceService, times(1)).allUserProvided(any(), any()); + verify(serviceInstanceService, times(1)) + .getBindingsForUserProvidedServiceInstance(any(), any(), any()); + verify(serviceInstanceService, times(1)).destroyUserProvidedServiceInstance(any()); + verify(serviceInstanceService, never()).destroyServiceInstance(any()); + } + + @Test + void destroyUserProvidedServiceInstanceShouldFailIfServiceBindingsExists() { + when(serviceInstanceService.all(any(), any())) + .thenAnswer( + invocation -> Calls.response(Response.success(createEmptyOsbServiceInstancePage()))); + when(serviceInstanceService.allUserProvided(any(), any())) + .thenAnswer( + invocation -> + Calls.response(Response.success(createUserProvidedServiceInstancePage()))); + when(serviceInstanceService.getBindingsForUserProvidedServiceInstance( + "up-service-instance-guid", null, null)) + .thenReturn( + Calls.response( + Response.success( + Page.singleton(new ServiceBinding(), "up-service-instance-guid")))); + + assertThrows( + () -> serviceInstances.destroyServiceInstance(cloudFoundrySpace, "service-instance-name"), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Unable to destroy service instance while 1 service binding(s) exist"); + + verify(serviceInstanceService, times(1)).all(any(), any()); + verify(serviceInstanceService, times(1)).allUserProvided(any(), any()); + verify(serviceInstanceService, never()).destroyUserProvidedServiceInstance(any()); + verify(serviceInstanceService, never()).destroyServiceInstance(any()); + } + + private Resource createServiceInstanceResource() { + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance.setServicePlanGuid("plan-guid").setName("new-service-instance-name"); + Resource serviceInstanceResource = new Resource<>(); + serviceInstanceResource.setMetadata(new Resource.Metadata().setGuid("service-instance-guid")); + serviceInstanceResource.setEntity(serviceInstance); + return serviceInstanceResource; + } + + private Resource createServiceBindingResource() { + ServiceBinding serviceBinding = new ServiceBinding(); + serviceBinding.setAppGuid("servergroup-id"); + serviceBinding.setName(""); + serviceBinding.setServiceInstanceGuid("service-instance-guid"); + Resource serviceBindingResource = new Resource<>(); + serviceBindingResource.setEntity(serviceBinding); + serviceBindingResource.setMetadata(new Resource.Metadata().setGuid("service-binding-guid")); + return serviceBindingResource; + } + + private Resource createUserProvidedServiceInstanceResource() { + UserProvidedServiceInstance userProvidedServiceInstance = new UserProvidedServiceInstance(); + userProvidedServiceInstance.setName("new-service-instance-name"); + Resource userProvidedServiceInstanceResource = new Resource<>(); + userProvidedServiceInstanceResource.setMetadata( + new Resource.Metadata().setGuid("up-service-instance-guid")); + userProvidedServiceInstanceResource.setEntity(userProvidedServiceInstance); + return userProvidedServiceInstanceResource; + } + + private Page createOsbServiceInstancePage() { + return createOsbServiceInstancePage(MANAGED_SERVICE_INSTANCE); + } + + private Page createOsbServiceInstancePage(ServiceInstance.Type type) { + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance + .setLastOperation(new LastOperation().setType(CREATE).setState(SUCCEEDED)) + .setServicePlanGuid("plan-guid") + .setType(type) + .setName("new-service-instance-name") + .setTags(singleton("spinnakerVersion-v001")); + return Page.singleton(serviceInstance, "service-instance-guid"); + } + + private Page createEmptyOsbServiceInstancePage() { + Page serviceInstancePage = new Page<>(); + serviceInstancePage.setTotalResults(0).setTotalPages(1); + return serviceInstancePage; + } + + private Page createUserProvidedServiceInstancePage() { + UserProvidedServiceInstance serviceInstance = new UserProvidedServiceInstance(); + serviceInstance.setName("up-service-instance-name").setTags(singleton("spinnakerVersion-v000")); + return Page.singleton(serviceInstance, "up-service-instance-guid"); + } + + private Page createServiceInstancePage() { + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance.setName("up-service-instance-name").setTags(singleton("spinnakerVersion-v000")); + + LastOperation lastOperation = new LastOperation(); + lastOperation.setState(FAILED); + lastOperation.setDescription("Custom description"); + + serviceInstance.setLastOperation(lastOperation); + serviceInstance.setType(MANAGED_SERVICE_INSTANCE); + + return Page.singleton(serviceInstance, "up-service-instance-guid"); + } + + private Page createEmptyUserProvidedServiceInstancePage() { + Page userProvidedServiceInstancePage = new Page<>(); + userProvidedServiceInstancePage.setTotalResults(0).setTotalPages(1); + return userProvidedServiceInstancePage; + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceKeysTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceKeysTest.java new file mode 100644 index 00000000000..12f2d75c64b --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/ServiceKeysTest.java @@ -0,0 +1,323 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.SUCCEEDED; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.CREATE_SERVICE_KEY; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.DELETE_SERVICE_KEY; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.utils.TestUtils.assertThrows; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ServiceInstanceService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.ServiceKeyService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceKeyResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import io.vavr.collection.HashMap; +import java.util.Arrays; +import java.util.Map; +import java.util.Optional; +import okhttp3.MediaType; +import okhttp3.ResponseBody; +import org.junit.jupiter.api.Test; +import retrofit2.Response; +import retrofit2.mock.Calls; + +class ServiceKeysTest { + private String serviceInstanceName = "service-instance"; + private String serviceInstanceId = "service-instance-guid"; + private String serviceKeyName = "service-key"; + private String serviceKeyId = "service-key-guid"; + private CloudFoundryOrganization cloudFoundryOrganization = + CloudFoundryOrganization.builder().id("org-guid").name("org").build(); + private CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id("space-guid") + .name("space") + .organization(cloudFoundryOrganization) + .build(); + private ServiceInstanceService serviceInstanceService = mock(ServiceInstanceService.class); + private ServiceKeyService serviceKeyService = mock(ServiceKeyService.class); + private Spaces spaces = mock(Spaces.class); + private ServiceKeys serviceKeys = new ServiceKeys(serviceKeyService, spaces); + private Map credentials = + HashMap.of( + "username", "name1", + "password", "xxwer3", + "details", singleton("detail")) + .toJavaMap(); + + { + when(serviceInstanceService.findService(any(), anyList())) + .thenReturn( + Calls.response( + Response.success( + Page.singleton(new Service().setLabel("service1"), "service-guid")))); + + when(serviceInstanceService.findServicePlans(any(), anyList())) + .thenReturn( + Calls.response( + Response.success( + Page.singleton(new ServicePlan().setName("ServicePlan1"), "plan-guid")))); + } + + @Test + void createServiceKeyShouldReturnSuccessWhenServiceKeyIsCreated() { + when(spaces.getServiceInstanceByNameAndSpace(any(), any())) + .thenReturn( + CloudFoundryServiceInstance.builder() + .name(serviceKeyName) + .id(serviceInstanceId) + .build()); + ServiceCredentials serviceCredentials = new ServiceCredentials().setCredentials(credentials); + Resource resource = new Resource<>(); + resource.setEntity(serviceCredentials); + when(serviceKeyService.createServiceKey(any())) + .thenReturn(Calls.response(Response.success(resource))); + CreateServiceKey requestBody = + new CreateServiceKey().setName(serviceKeyName).setServiceInstanceGuid(serviceInstanceId); + + ServiceKeyResponse expectedResults = new ServiceKeyResponse(); + expectedResults.setServiceKey(credentials); + expectedResults.setType(CREATE_SERVICE_KEY); + expectedResults.setState(SUCCEEDED); + expectedResults.setServiceInstanceName(serviceInstanceName); + expectedResults.setServiceKeyName(serviceKeyName); + when(serviceKeyService.getServiceKey(any(), any())) + .thenReturn(Calls.response(Response.success(createEmptyServiceKeyPage()))); + + ServiceKeyResponse results = + serviceKeys.createServiceKey(cloudFoundrySpace, serviceInstanceName, serviceKeyName); + + assertThat(results).usingRecursiveComparison().isEqualTo(expectedResults); + verify(spaces).getServiceInstanceByNameAndSpace(eq(serviceInstanceName), eq(cloudFoundrySpace)); + verify(serviceKeyService).createServiceKey(eq(requestBody)); + } + + @Test + void createServiceKeyShouldThrowExceptionWhenServiceNameDoesNotExistInSpace() { + when(spaces.getServiceInstanceByNameAndSpace(any(), any())).thenReturn(null); + + assertThrows( + () -> serviceKeys.createServiceKey(cloudFoundrySpace, serviceInstanceName, serviceKeyName), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Service instance '" + + serviceInstanceName + + "' not found in region '" + + cloudFoundrySpace.getRegion() + + "'"); + verify(spaces).getServiceInstanceByNameAndSpace(eq(serviceInstanceName), eq(cloudFoundrySpace)); + verify(serviceKeyService, never()).createServiceKey(any()); + } + + @Test + void createServiceKeyShouldThrowExceptionWhenServiceKeyReturnsNotFound() { + when(spaces.getServiceInstanceByNameAndSpace(any(), any())) + .thenReturn( + CloudFoundryServiceInstance.builder() + .name(serviceKeyName) + .id(serviceInstanceId) + .build()); + CreateServiceKey requestBody = + new CreateServiceKey().setName(serviceKeyName).setServiceInstanceGuid(serviceInstanceId); + when(serviceKeyService.getServiceKey(any(), any())) + .thenReturn(Calls.response(Response.success(createEmptyServiceKeyPage()))); + + when(serviceKeyService.createServiceKey(any())) + .thenReturn( + Calls.response( + Response.error(404, ResponseBody.create(MediaType.get("application/json"), "{}")))); + + assertThrows( + () -> serviceKeys.createServiceKey(cloudFoundrySpace, serviceInstanceName, serviceKeyName), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Service key '" + + serviceKeyName + + "' could not be created for service instance '" + + serviceInstanceName + + "' in region '" + + cloudFoundrySpace.getRegion() + + "'"); + verify(spaces).getServiceInstanceByNameAndSpace(eq(serviceInstanceName), eq(cloudFoundrySpace)); + verify(serviceKeyService).createServiceKey(requestBody); + } + + @Test + void createServiceKeyShouldSucceedWhenServiceKeyAlreadyExists() { + when(spaces.getServiceInstanceByNameAndSpace(any(), any())) + .thenReturn( + CloudFoundryServiceInstance.builder() + .name(serviceKeyName) + .id(serviceInstanceId) + .build()); + when(serviceKeyService.getServiceKey(any(), any())) + .thenReturn( + Calls.response(Response.success(createServiceKeyPage(serviceKeyName, serviceKeyId)))); + + ServiceKeyResponse expectedResults = new ServiceKeyResponse(); + expectedResults.setServiceKey(credentials); + expectedResults.setType(CREATE_SERVICE_KEY); + expectedResults.setState(SUCCEEDED); + expectedResults.setServiceInstanceName(serviceInstanceName); + expectedResults.setServiceKeyName(serviceKeyName); + + ServiceKeyResponse serviceKeyResponse = + serviceKeys.createServiceKey(cloudFoundrySpace, serviceInstanceName, serviceKeyName); + + assertThat(serviceKeyResponse).isEqualTo(expectedResults); + verify(serviceKeyService, never()).createServiceKey(any()); + } + + @Test + void getServiceKeyShouldSucceed() { + ServiceKey serviceKey = + new ServiceKey() + .setName("service-key") + .setCredentials(singletonMap("username", "user1")) + .setServiceInstanceGuid("service-instance-guid"); + String serviceKeyGuid = "service-key-guid"; + Page page = Page.singleton(serviceKey, serviceKeyGuid); + when(serviceKeyService.getServiceKey(any(), any())) + .thenReturn(Calls.response(Response.success(page))); + Resource expectedResource = + new Resource() + .setEntity(serviceKey) + .setMetadata(new Resource.Metadata().setGuid(serviceKeyGuid)); + + Optional> serviceKeyResults = + serviceKeys.getServiceKey("service-instance-guid", "service-key"); + + assertThat(serviceKeyResults.isPresent()).isTrue(); + assertThat(serviceKeyResults.get()).isEqualTo(expectedResource); + verify(serviceKeyService) + .getServiceKey( + any(), + eq(Arrays.asList("service_instance_guid:service-instance-guid", "name:service-key"))); + } + + @Test + void getServiceKeyShouldReturnEmptyOptionalWhenNotPresent() { + Page page = new Page().setTotalResults(0).setTotalPages(1); + when(serviceKeyService.getServiceKey(any(), any())) + .thenReturn(Calls.response(Response.success(page))); + + Optional> serviceKeyResults = + serviceKeys.getServiceKey("service-instance-guid", "service-key"); + + assertThat(serviceKeyResults.isPresent()).isFalse(); + verify(serviceKeyService) + .getServiceKey( + any(), + eq(Arrays.asList("service_instance_guid:service-instance-guid", "name:service-key"))); + } + + @Test + void deleteServiceKeyShouldSucceedWhenServiceKeyIsDeleted() { + when(spaces.getServiceInstanceByNameAndSpace(any(), any())) + .thenReturn( + CloudFoundryServiceInstance.builder() + .name(serviceInstanceName) + .id(serviceInstanceId) + .build()); + when(serviceKeyService.getServiceKey(any(), any())) + .thenReturn( + Calls.response(Response.success(createServiceKeyPage(serviceKeyName, serviceKeyId)))); + when(serviceKeyService.deleteServiceKey(any())) + .thenReturn(Calls.response(Response.success(202, null))); + ServiceKeyResponse expectedResponse = + (ServiceKeyResponse) + new ServiceKeyResponse() + .setServiceKeyName(serviceKeyName) + .setType(DELETE_SERVICE_KEY) + .setState(SUCCEEDED) + .setServiceInstanceName(serviceInstanceName); + + ServiceKeyResponse response = + serviceKeys.deleteServiceKey(cloudFoundrySpace, serviceInstanceName, serviceKeyName); + + assertThat(response).isEqualTo(expectedResponse); + verify(spaces).getServiceInstanceByNameAndSpace(eq(serviceInstanceName), eq(cloudFoundrySpace)); + verify(serviceKeyService) + .getServiceKey( + any(), + eq(Arrays.asList("service_instance_guid:service-instance-guid", "name:service-key"))); + verify(serviceKeyService).deleteServiceKey(serviceKeyId); + } + + @Test + void deleteServiceKeyShouldSucceedWhenServiceKeyDoesNotExist() { + when(spaces.getServiceInstanceByNameAndSpace(any(), any())) + .thenReturn( + CloudFoundryServiceInstance.builder() + .name(serviceInstanceName) + .id(serviceInstanceId) + .build()); + when(serviceKeyService.getServiceKey(any(), any())) + .thenReturn(Calls.response(Response.success(createEmptyServiceKeyPage()))); + ServiceKeyResponse expectedResponse = + (ServiceKeyResponse) + new ServiceKeyResponse() + .setServiceKeyName(serviceKeyName) + .setType(DELETE_SERVICE_KEY) + .setState(SUCCEEDED) + .setServiceInstanceName(serviceInstanceName); + + ServiceKeyResponse response = + serviceKeys.deleteServiceKey(cloudFoundrySpace, serviceInstanceName, serviceKeyName); + + assertThat(response).isEqualTo(expectedResponse); + verify(spaces).getServiceInstanceByNameAndSpace(eq(serviceInstanceName), eq(cloudFoundrySpace)); + verify(serviceKeyService) + .getServiceKey( + any(), + eq(Arrays.asList("service_instance_guid:service-instance-guid", "name:service-key"))); + verify(serviceKeyService, never()).deleteServiceKey(any()); + } + + @Test + void deleteServiceKeyShouldThrowExceptionWhenServiceDoesNotExistInSpace() { + when(spaces.getServiceInstanceByNameAndSpace(any(), any())).thenReturn(null); + + assertThrows( + () -> serviceKeys.deleteServiceKey(cloudFoundrySpace, serviceInstanceName, serviceKeyName), + CloudFoundryApiException.class, + "Cloud Foundry API returned with error(s): Cannot find service 'service-instance' in region 'org > space'"); + verify(spaces).getServiceInstanceByNameAndSpace(eq(serviceInstanceName), eq(cloudFoundrySpace)); + verify(serviceKeyService, never()).getServiceKey(any(), any()); + verify(serviceKeyService, never()).deleteServiceKey(any()); + } + + private Page createServiceKeyPage(String serviceKeyName, String serviceKeyId) { + return Page.singleton( + new ServiceKey().setName(serviceKeyName).setCredentials(credentials), serviceKeyId); + } + + private Page createEmptyServiceKeyPage() { + return new Page().setTotalPages(1).setTotalResults(0); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/SpacesTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/SpacesTest.java new file mode 100644 index 00000000000..693492e657f --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/SpacesTest.java @@ -0,0 +1,114 @@ +package com.netflix.spinnaker.clouddriver.cloudfoundry.client; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.api.SpaceService; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Page; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Pagination; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Space; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import org.junit.jupiter.api.Test; +import retrofit2.Response; +import retrofit2.mock.Calls; + +class SpacesTest { + private SpaceService spaceService = mock(SpaceService.class); + private Organizations orgs = mock(Organizations.class); + private Spaces spaces = new Spaces(spaceService, orgs); + private String spaceId = "space-guid"; + private CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder().name("space").id(spaceId).build(); + + @Test + void getServiceInstanceByNameAndSpaceShouldReturnServiceInstance() { + String serviceInstanceName = "service-instance"; + String serviceInstanceId = "service-instance-guid"; + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance.setName(serviceInstanceName); + when(spaceService.getServiceInstancesById(any(), any(), any())) + .thenReturn( + Calls.response(Response.success(Page.singleton(serviceInstance, serviceInstanceId)))); + + CloudFoundryServiceInstance actual = + spaces.getServiceInstanceByNameAndSpace(serviceInstanceName, cloudFoundrySpace); + + assertThat(actual.getName()).isEqualTo(serviceInstanceName); + assertThat(actual.getId()).isEqualTo(serviceInstanceId); + verify(spaceService) + .getServiceInstancesById( + eq(spaceId), any(), eq(Collections.singletonList("name:" + serviceInstanceName))); + } + + @Test + void getServiceInstanceByNameAndSpaceShouldReturnNullWhenSpaceHasNoServiceInstances() { + String serviceInstanceName1 = "service-instance"; + when(spaceService.getServiceInstancesById(any(), any(), any())) + .thenReturn( + Calls.response( + Response.success(new Page().setTotalResults(0).setTotalPages(1)))); + + CloudFoundryServiceInstance actual = + spaces.getServiceInstanceByNameAndSpace(serviceInstanceName1, cloudFoundrySpace); + + assertThat(actual).isNull(); + } + + @Test + void findSpaceByRegionSucceedsWhenSpaceExistsInOrg() { + CloudFoundryOrganization expectedOrganization = + CloudFoundryOrganization.builder().id("org-guid").name("org").build(); + + Space space = new Space(); + space.setName("space"); + space.setGuid("space-guid"); + + CloudFoundrySpace expectedSpace = + CloudFoundrySpace.builder() + .id("space-guid") + .name("space") + .organization(expectedOrganization) + .build(); + + when(spaceService.all(any(), any(), any())) + .thenReturn(Calls.response(Response.success(generateSpacePage()))); + when(orgs.findByName(anyString())).thenReturn(Optional.of(expectedOrganization)); + Optional result = spaces.findSpaceByRegion("org > space"); + + assertThat(result).isEqualTo(Optional.of(expectedSpace)); + } + + @Test + void findSpaceByRegionThrowsExceptionForOrgSpaceNameCaseMismatch() { + try { + spaces.findSpaceByRegion("org > sPaCe"); + failBecauseExceptionWasNotThrown(CloudFoundryApiException.class); + } catch (Throwable t) { + assertThat(t).isInstanceOf(CloudFoundryApiException.class); + } + } + + private Pagination generateSpacePage() { + Space space = new Space(); + space.setGuid("space-guid"); + space.setName("space"); + Pagination.Details details = new Pagination.Details(); + details.setTotalPages(1); + Pagination pagination = new Pagination<>(); + pagination.setPagination(details); + pagination.setResources(List.of(space)); + return pagination; + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ErrorCodeTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ErrorCodeTest.java new file mode 100644 index 00000000000..8a8129dc44c --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/ErrorCodeTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClientUtils; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +class ErrorCodeTest { + + @Test + void deserialize() throws IOException { + ObjectMapper mapper = CloudFoundryClientUtils.getMapper(); + assertThat(mapper.readValue("\"CF-RouteHostTaken\"", ErrorDescription.Code.class)) + .isEqualTo(ErrorDescription.Code.ROUTE_HOST_TAKEN); + } + + @Test + void deserializeV2() throws IOException { + ObjectMapper mapper = CloudFoundryClientUtils.getMapper(); + ErrorDescription err = + mapper.readValue( + "{\"description\":\"The host is taken: tester\",\"error_code\":\"CF-RouteHostTaken\",\"code\":210003}", + ErrorDescription.class); + + assertThat(err.getCode()).isEqualTo(ErrorDescription.Code.ROUTE_HOST_TAKEN); + assertThat(err.getErrors()).contains("The host is taken: tester"); + } + + @Test + void deserializeV3() throws IOException { + ObjectMapper mapper = CloudFoundryClientUtils.getMapper(); + ErrorDescription err = + mapper.readValue( + "{\"errors\":[{\"code\":210003,\"title\":\"CF-RouteHostTaken\",\"detail\":\"The host is taken: tester\"}]}", + ErrorDescription.class); + + assertThat(err.getCode()).isEqualTo(ErrorDescription.Code.ROUTE_HOST_TAKEN); + assertThat(err.getErrors()).contains("The host is taken: tester"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/RouteTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/RouteTest.java new file mode 100644 index 00000000000..9f5cd773ee9 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v2/RouteTest.java @@ -0,0 +1,27 @@ +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +class RouteTest { + @Test + void routeSerialization() throws IOException { + RouteId routeId = new RouteId("host", "path", 8080, "domainId"); + Route route = new Route(routeId, "spaceId"); + + String routeSerialized = + new ObjectMapper() + .enable(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY) + .writeValueAsString(route); + assertThat(routeSerialized) + .isEqualTo( + "{\"domainGuid\":\"domainId\",\"host\":\"host\",\"path\":\"path\",\"port\":8080,\"spaceGuid\":\"spaceId\"}"); + assertThat(new ObjectMapper().readValue(routeSerialized, Route.class).getRouteId()) + .isEqualTo(routeId); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateApplicationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateApplicationTest.java new file mode 100644 index 00000000000..53f3e7f948b --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateApplicationTest.java @@ -0,0 +1,83 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServerGroupDescription; +import java.util.Collections; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class CreateApplicationTest { + + @Test + void getLifecycleShouldReturnMultipleBuildpacks() { + ToOneRelationship toOneRelationship = new ToOneRelationship(new Relationship("space-guid")); + Map relationships = + Collections.singletonMap("relationship", toOneRelationship); + DeployCloudFoundryServerGroupDescription.ApplicationAttributes applicationAttributes = + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes(); + applicationAttributes.setBuildpacks(ImmutableList.of("buildpackOne", "buildpackTwo")); + Lifecycle lifecycle = new Lifecycle(Lifecycle.Type.BUILDPACK, applicationAttributes); + CreateApplication createApplication = + new CreateApplication("some-application", relationships, null, lifecycle); + + assertThat(createApplication.getLifecycle().getData().get("buildpacks")) + .isEqualTo(applicationAttributes.getBuildpacks()); + } + + @Test + void getLifecycleShouldReturnWithBuildpackAndWithStack() { + ToOneRelationship toOneRelationship = new ToOneRelationship(new Relationship("space-guid")); + Map relationships = + Collections.singletonMap("relationship", toOneRelationship); + DeployCloudFoundryServerGroupDescription.ApplicationAttributes applicationAttributes = + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes(); + applicationAttributes.setBuildpacks(ImmutableList.of("buildpackOne")); + applicationAttributes.setStack("cflinuxfs3"); + Lifecycle lifecycle = new Lifecycle(Lifecycle.Type.BUILDPACK, applicationAttributes); + CreateApplication createApplication = + new CreateApplication("some-application", relationships, null, lifecycle); + + Map data = + ImmutableMap.of( + "buildpacks", applicationAttributes.getBuildpacks(), + "stack", applicationAttributes.getStack()); + + assertThat(createApplication.getLifecycle().getData()).isEqualTo(data); + } + + @Test + void getLifecycleShouldReturnWithoutBuildpackAndWithStack() { + ToOneRelationship toOneRelationship = new ToOneRelationship(new Relationship("space-guid")); + Map relationships = + Collections.singletonMap("relationship", toOneRelationship); + DeployCloudFoundryServerGroupDescription.ApplicationAttributes applicationAttributes = + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes(); + applicationAttributes.setStack("cflinuxfs3"); + Lifecycle lifecycle = new Lifecycle(Lifecycle.Type.BUILDPACK, applicationAttributes); + CreateApplication createApplication = + new CreateApplication("some-application", relationships, null, lifecycle); + + Map data = ImmutableMap.of("stack", applicationAttributes.getStack()); + + assertThat(createApplication.getLifecycle().getData()).isEqualTo(data); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateBuildTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateBuildTest.java new file mode 100644 index 00000000000..642c91af0c1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/CreateBuildTest.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.Test; + +class CreateBuildTest { + @Test + void serialize() throws JsonProcessingException { + assertThat(new ObjectMapper().writeValueAsString(new CreateBuild("123", 1024, 1024))) + .isEqualTo( + "{\"package\":{\"guid\":\"123\"},\"staging_memory_in_mb\":1024,\"staging_disk_in_mb\":1024}"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/LinkTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/LinkTest.java new file mode 100644 index 00000000000..652fe16fa12 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/LinkTest.java @@ -0,0 +1,31 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Test; + +class LinkTest { + @Test + void getGuid() { + Link link = new Link(); + link.setHref( + "https://api.sys.calabasas.cf-app.com/v3/spaces/72d50cd9-434e-4738-9349-cb146987b963"); + assertThat(link.getGuid()).isEqualTo("72d50cd9-434e-4738-9349-cb146987b963"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessTest.java new file mode 100644 index 00000000000..d8465cebeee --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/client/model/v3/ProcessTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import java.util.Map; +import org.junit.jupiter.api.Test; + +public class ProcessTest { + + @Test + void buildObjectTest0() { + ObjectMapper mapper = new ObjectMapper(); + + Process.HealthCheck healthCheck = + new Process.HealthCheck.HealthCheckBuilder().type(null).data(null).build(); + + Process process = new Process().setHealthCheck(healthCheck); + + Map converted = mapper.convertValue(process, Map.class); + + assertThat(converted.entrySet().size()).isEqualTo(3); + assertThat(converted.get("healthCheck")).isNull(); + } + + @Test + void buildObjectTest1() { + ObjectMapper mapper = new ObjectMapper(); + + Process.HealthCheck healthCheck = + new Process.HealthCheck.HealthCheckBuilder().type(null).data(null).build(); + + Map converted = mapper.convertValue(healthCheck, Map.class); + + assertThat(converted.entrySet().size()).isEqualTo(0); + } + + @Test + void buildObjectTest2() { + ObjectMapper mapper = new ObjectMapper(); + + Process.HealthCheck healthCheck = + new Process.HealthCheck.HealthCheckBuilder() + .type(null) + .data(new Process.HealthCheckData.HealthCheckDataBuilder().timeout(90).build()) + .build(); + + Map converted = mapper.convertValue(healthCheck, Map.class); + + Map data = (Map) converted.get("data"); + assertThat(data).isNotNull(); + assertThat(((int) data.get("timeout"))).isEqualTo(90); + assertThat(data.entrySet().size()).isEqualTo(1); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryAtomicOperationConverterTest.java new file mode 100644 index 00000000000..f724d0223a3 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryAtomicOperationConverterTest.java @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.Optional; +import org.junit.jupiter.api.Test; +import org.mockito.stubbing.Answer; + +class AbstractCloudFoundryAtomicOperationConverterTest { + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + private final DestroyCloudFoundryServerGroupAtomicOperationConverter converter = + new DestroyCloudFoundryServerGroupAtomicOperationConverter(); + + { + when(cloudFoundryClient.getOrganizations().findByName(any())) + .thenReturn( + Optional.of(CloudFoundryOrganization.builder().id("org-guid").name("org").build())); + } + + @Test + void expectFindSpaceSucceeds() { + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenAnswer( + (Answer>) + invocation -> + Optional.of( + CloudFoundrySpace.builder() + .id("space-guid") + .name("space") + .organization( + CloudFoundryOrganization.builder() + .id("org-guid") + .name("org") + .build()) + .build())); + + CloudFoundryOrganization expectedOrg = + CloudFoundryOrganization.builder().id("org-guid").name("org").build(); + CloudFoundrySpace expectedSpace = + CloudFoundrySpace.builder() + .id("space-guid") + .name("space") + .organization(expectedOrg) + .build(); + assertThat(converter.findSpace("org > space", cloudFoundryClient)) + .isEqualTo(Optional.of(expectedSpace)); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryServerGroupAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryServerGroupAtomicOperationConverterTest.java new file mode 100644 index 00000000000..4e1ba9efbb9 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractCloudFoundryServerGroupAtomicOperationConverterTest.java @@ -0,0 +1,87 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.Optional; +import org.junit.jupiter.api.Test; +import org.mockito.stubbing.Answer; + +class AbstractCloudFoundryServerGroupAtomicOperationConverterTest { + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + private final DestroyCloudFoundryServerGroupAtomicOperationConverter converter = + new DestroyCloudFoundryServerGroupAtomicOperationConverter(); + + { + when(cloudFoundryClient.getOrganizations().findByName(any())) + .thenAnswer( + (Answer>) + invocation -> { + Object[] args = invocation.getArguments(); + return Optional.of( + CloudFoundryOrganization.builder() + .id(args[0].toString() + "ID") + .name(args[0].toString()) + .build()); + }); + + when(cloudFoundryClient.getApplications().findServerGroupId(any(), any())) + .thenAnswer( + (Answer) + invocation -> { + Object[] args = invocation.getArguments(); + + if (args[0].equals("bad-servergroup-name")) { + return null; + } else { + return "servergroup-id"; + } + }); + } + + @Test + void getServerGroupIdSuccess() { + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenReturn(Optional.of(CloudFoundrySpace.builder().build())); + assertThat(converter.getServerGroupId("server", "region > space", cloudFoundryClient)) + .isEqualTo("servergroup-id"); + } + + @Test + void getServerGroupIdFindFails() { + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenReturn(Optional.of(CloudFoundrySpace.builder().build())); + assertThat( + converter.getServerGroupId( + "bad-servergroup-name", "region > space", cloudFoundryClient)) + .isNull(); + } + + @Test + void getServerGroupIdSpaceInvalid() { + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())).thenReturn(Optional.empty()); + assertThat(converter.getServerGroupId("server", "region > region", cloudFoundryClient)) + .isNull(); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractLoadBalancersAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractLoadBalancersAtomicOperationConverterTest.java new file mode 100644 index 00000000000..555ac615634 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/AbstractLoadBalancersAtomicOperationConverterTest.java @@ -0,0 +1,168 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.LoadBalancersDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import io.vavr.collection.HashMap; +import io.vavr.collection.List; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.stubbing.Answer; + +class AbstractLoadBalancersAtomicOperationConverterTest { + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + private final CacheRepository cacheRepository = mock(CacheRepository.class); + + { + when(cloudFoundryClient.getOrganizations().findByName(any())) + .thenAnswer( + (Answer>) + invocation -> { + Object[] args = invocation.getArguments(); + return Optional.of( + CloudFoundryOrganization.builder() + .id(args[0].toString() + "ID") + .name(args[0].toString()) + .build()); + }); + + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenReturn(Optional.of(CloudFoundrySpace.builder().build())); + + when(cloudFoundryClient.getRoutes().toRouteId(any())) + .thenAnswer( + (Answer) + invocation -> { + Object[] args = invocation.getArguments(); + if (args[0].equals("foo")) { + return null; + } + return new RouteId("host", "index", null, "some-guid"); + }); + + when(cloudFoundryClient.getRoutes().find(any(), any())) + .thenReturn( + CloudFoundryLoadBalancer.builder() + .host("host") + .path("index") + .domain(CloudFoundryDomain.builder().name("domain.com").build()) + .build()); + } + + private final CloudFoundryCredentials cloudFoundryCredentials = + new CloudFoundryCredentials( + "test", + "managerUri", + "metricsUri", + "api.host", + "username", + "password", + "environment", + false, + false, + 500, + cacheRepository, + null, + ForkJoinPool.commonPool(), + emptyMap(), + new OkHttpClient(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()) { + public CloudFoundryClient getClient() { + return cloudFoundryClient; + } + }; + + private final CredentialsRepository credentialsRepository = + new MapBackedCredentialsRepository<>(CloudFoundryProvider.PROVIDER_ID, null); + + { + credentialsRepository.save(cloudFoundryCredentials); + } + + private final TestAbstractLoadBalancersAtomicOperationConverter converter = + new TestAbstractLoadBalancersAtomicOperationConverter(); + + @BeforeEach + void initializeClassUnderTest() { + converter.setCredentialsRepository(credentialsRepository); + } + + @Test + void convertValidDescription() { + final Map input = + HashMap.of( + "credentials", "test", + "region", "org > space", + "loadBalancerNames", List.of("foo.host.com/index", "bar.host.com").asJava(), + "serverGroupName", "serverGroupName") + .toJavaMap(); + + final LoadBalancersDescription result = converter.convertDescription(input); + + assertThat(result.getRoutes()) + .isEqualTo(List.of("foo.host.com/index", "bar.host.com").asJava()); + assertThat(result.getRegion()).isEqualTo("org > space"); + } + + @Test + void convertWithRoutesNotFound() { + final Map input = + HashMap.of( + "credentials", "test", + "region", "org > space", + "loadBalancerNames", Collections.EMPTY_LIST, + "serverGroupName", "serverGroupName") + .toJavaMap(); + + assertThrows(IllegalArgumentException.class, () -> converter.convertDescription(input)); + } + + private class TestAbstractLoadBalancersAtomicOperationConverter + extends AbstractLoadBalancersAtomicOperationConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return null; + } + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceKeyAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceKeyAtomicOperationConverterTest.java new file mode 100644 index 00000000000..7430724bdf9 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/CreateCloudFoundryServiceKeyAtomicOperationConverterTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CreateCloudFoundryServiceKeyDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import io.vavr.collection.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.Test; + +class CreateCloudFoundryServiceKeyAtomicOperationConverterTest { + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + private final CacheRepository cacheRepository = mock(CacheRepository.class); + + private CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id("space-guid") + .name("space") + .organization(CloudFoundryOrganization.builder().id("org-guid").name("org").build()) + .build(); + + { + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenReturn(Optional.of(cloudFoundrySpace)); + } + + private final CloudFoundryCredentials cloudFoundryCredentials = + new CloudFoundryCredentials( + "my-account", + "managerUri", + "metricsUri", + "apiHost", + "username", + "password", + "environment", + false, + false, + 500, + cacheRepository, + null, + ForkJoinPool.commonPool(), + emptyMap(), + new OkHttpClient(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()) { + public CloudFoundryClient getClient() { + return cloudFoundryClient; + } + }; + + private final CredentialsRepository credentialsRepository = + new MapBackedCredentialsRepository<>(CloudFoundryProvider.PROVIDER_ID, null); + + { + credentialsRepository.save(cloudFoundryCredentials); + } + + @Test + void convertDescriptionSucceeds() { + CreateCloudFoundryServiceKeyAtomicOperationConverter converter = + new CreateCloudFoundryServiceKeyAtomicOperationConverter(); + converter.setCredentialsRepository(credentialsRepository); + + String serviceKeyName = "service-key-name"; + String serviceInstanceName = "service-instance-name"; + String region = "org > space"; + Map input = + HashMap.of( + "credentials", cloudFoundryCredentials.getName(), + "region", region, + "serviceInstanceName", serviceInstanceName, + "serviceKeyName", serviceKeyName) + .toJavaMap(); + + CreateCloudFoundryServiceKeyDescription expectedResult = + (CreateCloudFoundryServiceKeyDescription) + new CreateCloudFoundryServiceKeyDescription() + .setServiceKeyName(serviceKeyName) + .setServiceInstanceName(serviceInstanceName) + .setSpace(cloudFoundrySpace) + .setRegion(region) + .setClient(cloudFoundryClient) + .setCredentials(cloudFoundryCredentials); + + CreateCloudFoundryServiceKeyDescription result = converter.convertDescription(input); + + assertThat(result).usingRecursiveComparison().isEqualTo(expectedResult); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceKeyAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceKeyAtomicOperationConverterTest.java new file mode 100644 index 00000000000..6facd4307d5 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeleteCloudFoundryServiceKeyAtomicOperationConverterTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryServiceKeyDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import io.vavr.collection.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.Test; + +class DeleteCloudFoundryServiceKeyAtomicOperationConverterTest { + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + private final CacheRepository cacheRepository = mock(CacheRepository.class); + + private CloudFoundryOrganization cloudFoundryOrganization = + CloudFoundryOrganization.builder().id("org-guid").name("org").build(); + + private CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id("space-guid") + .name("space") + .organization(cloudFoundryOrganization) + .build(); + + { + when(cloudFoundryClient.getOrganizations().findByName(any())) + .thenReturn(Optional.of(cloudFoundryOrganization)); + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenReturn(Optional.of(cloudFoundrySpace)); + } + + private final CloudFoundryCredentials cloudFoundryCredentials = + new CloudFoundryCredentials( + "my-account", + "managerUri", + "metricsUri", + "apiHost", + "username", + "password", + "environment", + false, + false, + 500, + cacheRepository, + null, + ForkJoinPool.commonPool(), + emptyMap(), + new OkHttpClient(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()) { + public CloudFoundryClient getClient() { + return cloudFoundryClient; + } + }; + + private final CredentialsRepository credentialsRepository = + new MapBackedCredentialsRepository<>(CloudFoundryProvider.PROVIDER_ID, null); + + { + credentialsRepository.save(cloudFoundryCredentials); + } + + @Test + void convertDescriptionSucceeds() { + DeleteCloudFoundryServiceKeyAtomicOperationConverter converter = + new DeleteCloudFoundryServiceKeyAtomicOperationConverter(); + converter.setCredentialsRepository(credentialsRepository); + + String serviceKeyName = "service-key-name"; + String serviceInstanceName = "service-instance-name"; + String region = "org > space"; + Map input = + HashMap.of( + "credentials", cloudFoundryCredentials.getName(), + "region", region, + "serviceInstanceName", serviceInstanceName, + "serviceKeyName", serviceKeyName) + .toJavaMap(); + + DeleteCloudFoundryServiceKeyDescription expectedResult = + (DeleteCloudFoundryServiceKeyDescription) + new DeleteCloudFoundryServiceKeyDescription() + .setServiceKeyName(serviceKeyName) + .setServiceInstanceName(serviceInstanceName) + .setSpace(cloudFoundrySpace) + .setRegion(region) + .setClient(cloudFoundryClient) + .setCredentials(cloudFoundryCredentials); + + DeleteCloudFoundryServiceKeyDescription result = converter.convertDescription(input); + + assertThat(result).usingRecursiveComparison().isEqualTo(expectedResult); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServerGroupAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServerGroupAtomicOperationConverterTest.java new file mode 100644 index 00000000000..028398b96dc --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServerGroupAtomicOperationConverterTest.java @@ -0,0 +1,312 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.artifacts.ArtifactCredentialsFromString; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessRequest; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.stubbing.Answer; + +class DeployCloudFoundryServerGroupAtomicOperationConverterTest { + + private static CloudFoundryCredentials createCredentials(String name) { + CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + CacheRepository cacheRepository = mock(CacheRepository.class); + { + when(cloudFoundryClient.getOrganizations().findByName(any())) + .thenAnswer( + (Answer>) + invocation -> { + Object[] args = invocation.getArguments(); + return Optional.of( + CloudFoundryOrganization.builder() + .id(args[0].toString() + "-guid") + .name(args[0].toString()) + .build()); + }); + + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenReturn( + Optional.of( + CloudFoundrySpace.builder() + .id("space-guid") + .name("space") + .organization( + CloudFoundryOrganization.builder().id("org-guid").name("org").build()) + .build())); + + when(cloudFoundryClient.getApplications().findServerGroupId(any(), any())) + .thenReturn("servergroup-id"); + + when(cloudFoundryClient.getDomains().getDefault()) + .thenReturn(CloudFoundryDomain.builder().name("cf-app.com").build()); + when(cloudFoundryClient.getRoutes().find(any(), any())) + .thenReturn(CloudFoundryLoadBalancer.builder().build()) + .thenReturn(null); + } + + return new CloudFoundryCredentials( + name, + "managerUri", + "metricsUri", + "apiHost", + "username", + "password", + "environment", + false, + false, + 500, + cacheRepository, + null, + ForkJoinPool.commonPool(), + emptyMap(), + new OkHttpClient(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()) { + public CloudFoundryClient getClient() { + return cloudFoundryClient; + } + }; + } + + private List accounts = + List.of("test", "sourceAccount", "sourceAccount1", "sourceAccount2", "destinationAccount"); + + private CredentialsRepository + artifactCredentialsFromStringRepository = + new MapBackedCredentialsRepository<>( + ArtifactCredentialsFromString.ARTIFACT_TYPE, new NoopCredentialsLifecycleHandler<>()); + private final ArtifactCredentialsRepository artifactCredentialsRepository = + new ArtifactCredentialsRepository( + Collections.singletonList(artifactCredentialsFromStringRepository)); + + private final CredentialsRepository credentialsRepository = + new MapBackedCredentialsRepository<>(CloudFoundryProvider.PROVIDER_ID, null); + + { + accounts.stream() + .map( + account -> + new ArtifactCredentialsFromString( + account, List.of("test"), "applications: [{instances: 42}]")) + .forEach(artifactCredentialsFromStringRepository::save); + accounts.forEach(account -> credentialsRepository.save(createCredentials(account))); + } + + private final DeployCloudFoundryServerGroupAtomicOperationConverter converter = + new DeployCloudFoundryServerGroupAtomicOperationConverter( + null, artifactCredentialsRepository, null); + + @BeforeEach + void initializeClassUnderTest() { + converter.setCredentialsRepository(credentialsRepository); + } + + @Test + void convertManifestMapToApplicationAttributes() { + final Map input = + Map.of( + "applications", + List.of( + Map.of( + "instances", + 7, + "memory", + "1G", + "disk_quota", + "2048M", + "health-check-type", + "http", + "health-check-http-endpoint", + "/health", + "buildpacks", + List.of("buildpack1", "buildpack2"), + "services", + List.of("service1"), + "routes", + List.of(Map.of("route", "www.example.com/foo")), + "env", + Map.of("token", "ASDF"), + "command", + "some-command"))); + + assertThat(converter.convertManifest(input)) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes() + .setInstances(7) + .setMemory("1G") + .setDiskQuota("2048M") + .setHealthCheckType("http") + .setHealthCheckHttpEndpoint("/health") + .setBuildpacks(List.of("buildpack1", "buildpack2")) + .setServices(List.of("service1")) + .setRoutes(List.of("www.example.com/foo")) + .setEnv(Map.of("token", "ASDF")) + .setCommand("some-command") + .setProcesses(emptyList())); + } + + @Test + void convertManifestMapToApplicationAttributesUsingDeprecatedBuildpackAttr() { + final Map input = Map.of("applications", List.of(Map.of("buildpack", "buildpack1"))); + + assertThat(converter.convertManifest(input)) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes() + .setInstances(1) + .setMemory("1024") + .setDiskQuota("1024") + .setBuildpacks(List.of("buildpack1")) + .setProcesses(emptyList())); + } + + @Test + void convertManifestMapToApplicationAttributesUsingDeprecatedBuildpackAttrBlankStringValue() { + final Map input = Map.of("applications", List.of(Map.of("buildpack", ""))); + + assertThat(converter.convertManifest(input)) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes() + .setInstances(1) + .setMemory("1024") + .setDiskQuota("1024") + .setBuildpacks(Collections.emptyList()) + .setProcesses(emptyList())); + } + + @Test + void convertManifestMapToApplicationAttributesUsingWithNoBuildpacks() { + final Map input = Map.of("applications", List.of(Collections.EMPTY_MAP)); + + assertThat(converter.convertManifest(input)) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes() + .setInstances(1) + .setMemory("1024") + .setDiskQuota("1024") + .setBuildpacks(Collections.emptyList()) + .setProcesses(emptyList())); + } + + @Test + void convertDescriptionTest() { + Map description = + ImmutableMap.of( + "applicationArtifact", + ImmutableMap.of( + "artifactAccount", + "destinationAccount", + "type", + "cloudfoundry/app", + "name", + "server-group-name", + "location", + "cf-region"), + "credentials", + "test", + "manifest", + ImmutableList.of( + ImmutableMap.of("applications", List.of(Map.of("random-route", true))))); + + DeployCloudFoundryServerGroupDescription result = converter.convertDescription(description); + + assertThat(result.getArtifactCredentials()).isNotNull(); + assertThat(result.getArtifactCredentials().getName()).isEqualTo("cloudfoundry"); + assertThat(result.getApplicationArtifact()).isNotNull(); + assertThat(result.getApplicationArtifact().getName()).isEqualTo("server-group-name"); + assertThat(result.getApplicationArtifact().getArtifactAccount()) + .isEqualTo("destinationAccount"); + assertThat(result.getApplicationArtifact().getUuid()).isEqualTo("servergroup-id"); + assertThat(result.getApplicationAttributes().getRoutes()).isNotEmpty(); + } + + @Test + void convertDescriptionWithProcesses() { + final Map input = + Map.of( + "applications", + List.of( + Map.of( + "processes", + List.of( + new ProcessRequest().setType("web").setInstances(2).setMemory("800M"))))); + + assertThat(converter.convertManifest(input)) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes() + .setInstances(1) + .setMemory("1024") + .setDiskQuota("1024") + .setBuildpacks(Collections.emptyList()) + .setProcesses( + List.of( + new ProcessRequest().setType("web").setInstances(2).setMemory("800M")))); + } + + @Test + void convertRandomRoutes() { + DeployCloudFoundryServerGroupDescription.ApplicationAttributes applicationAttributes = + converter.convertManifest( + ImmutableMap.of("applications", List.of(Map.of("random-route", true)))); + + assertThat(applicationAttributes.getRandomRoute()).isTrue(); + } + + @Test + void convertTimeout() { + DeployCloudFoundryServerGroupDescription.ApplicationAttributes applicationAttributes = + converter.convertManifest(ImmutableMap.of("applications", List.of(Map.of("timeout", 60)))); + + assertThat(applicationAttributes.getTimeout() == 60); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServiceAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServiceAtomicOperationConverterTest.java new file mode 100644 index 00000000000..0e937930926 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DeployCloudFoundryServiceAtomicOperationConverterTest.java @@ -0,0 +1,385 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.artifacts.ArtifactCredentialsFromString; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.AbstractServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import java.util.*; +import java.util.concurrent.ForkJoinPool; +import javax.annotation.Nullable; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.stubbing.Answer; + +class DeployCloudFoundryServiceAtomicOperationConverterTest { + + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + private final CacheRepository cacheRepository = mock(CacheRepository.class); + + { + when(cloudFoundryClient.getOrganizations().findByName(any())) + .thenReturn( + Optional.of(CloudFoundryOrganization.builder().id("space-guid").name("space").build())); + + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenAnswer( + (Answer>) + invocation -> + Optional.of( + CloudFoundrySpace.builder() + .id("space-guid") + .name("space") + .organization( + CloudFoundryOrganization.builder() + .id("org-guid") + .name("org") + .build()) + .build())); + } + + private final CloudFoundryCredentials cloudFoundryCredentials = + new CloudFoundryCredentials( + "test", + "managerUri", + "metricsUri", + "api.Host", + "userName", + "password", + "environment", + false, + false, + 500, + cacheRepository, + null, + ForkJoinPool.commonPool(), + emptyMap(), + new OkHttpClient(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()) { + public CloudFoundryClient getClient() { + return cloudFoundryClient; + } + }; + + private final CredentialsRepository + artifactCredentialsFromStringCredentialsRepository = + new MapBackedCredentialsRepository<>( + ArtifactCredentialsFromString.ARTIFACT_TYPE, new NoopCredentialsLifecycleHandler<>()); + + private final ArtifactCredentialsRepository artifactCredentialsRepository = + new ArtifactCredentialsRepository( + Collections.singletonList(artifactCredentialsFromStringCredentialsRepository)); + + private final CredentialsRepository credentialsRepository = + new MapBackedCredentialsRepository<>(CloudFoundryProvider.PROVIDER_ID, null); + + { + artifactCredentialsFromStringCredentialsRepository.save( + new ArtifactCredentialsFromString( + "test", + List.of("test"), + "service_instance_name: my-service-instance-name\n" + + "service: my-service\n" + + "service_plan: my-service-plan\n" + + "tags:\n" + + "- tag1\n" + + "updatable: false\n" + + "parameters: |\n" + + " { \"foo\": \"bar\" }\n")); + credentialsRepository.save(cloudFoundryCredentials); + } + + private final DeployCloudFoundryServiceAtomicOperationConverter converter = + new DeployCloudFoundryServiceAtomicOperationConverter(); + + @BeforeEach + void initializeClassUnderTest() { + converter.setCredentialsRepository(credentialsRepository); + } + + @Test + void convertManifestMapToServiceAttributes() { + final Map input = + Map.of( + "service", "my-service", + "service_instance_name", "my-service-instance-name", + "service_plan", "my-service-plan", + "tags", List.of("my-tag"), + "parameters", "{\"foo\": \"bar\"}"); + + assertThat(converter.convertManifest(input)) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServiceDescription.ServiceAttributes() + .setService("my-service") + .setServiceInstanceName("my-service-instance-name") + .setServicePlan("my-service-plan") + .setTags(Collections.singleton("my-tag")) + .setUpdatable(true) + .setParameterMap(Map.of("foo", "bar"))); + } + + @Test + void convertManifestMapToServiceAttributesMissingServiceThrowsException() { + final Map input = + Map.of( + "service_instance_name", "my-service-instance-name", + "service_plan", "my-service-plan", + "tags", Collections.singletonList("my-tag"), + "parameters", "{\"foo\": \"bar\"}"); + + assertThrows( + IllegalArgumentException.class, + () -> converter.convertManifest(input), + "Manifest is missing the service"); + } + + @Test + void convertManifestMapToServiceAttributesMissingServiceNameThrowsException() { + final Map input = + Map.of( + "service_instance_name", "my-service-instance-name", + "service_plan", "my-service-plan", + "tags", Collections.singletonList("my-tag"), + "parameters", "{\"foo\": \"bar\"}"); + + assertThrows( + IllegalArgumentException.class, + () -> converter.convertManifest(input), + "Manifest is missing the service name"); + } + + @Test + void convertManifestMapToServiceAttributesMissingServicePlanThrowsException() { + final Map input = + Map.of( + "service", "my-service", + "service_instance_name", "my-service-instance-name", + "tags", Collections.singletonList("my-tag"), + "parameters", "{\"foo\": \"bar\"}"); + + assertThrows( + IllegalArgumentException.class, + () -> converter.convertManifest(input), + "Manifest is missing the service plan"); + } + + @Test + void convertCupsManifestMapToUserProvidedServiceAttributes() { + final Map input = + Map.of( + "service_instance_name", "my-service-instance-name", + "syslog_drain_url", "test-syslog-drain-url", + "updatable", false, + "route_service_url", "test-route-service-url", + "tags", Collections.singletonList("my-tag"), + "credentials_map", "{\"foo\": \"bar\"}"); + + assertThat(converter.convertUserProvidedServiceManifest(input)) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes() + .setServiceInstanceName("my-service-instance-name") + .setSyslogDrainUrl("test-syslog-drain-url") + .setRouteServiceUrl("test-route-service-url") + .setTags(Collections.singleton("my-tag")) + .setUpdatable(false) + .setCredentials(Collections.singletonMap("foo", "bar"))); + } + + @Test + void convertCupsManifestMapToUserProvidedServiceAttributesMissingServiceNameThrowsException() { + final Map input = + Map.of( + "syslog_drain_url", "test-syslog-drain-url", + "route_service_url", "test-route-service-url", + "tags", Collections.singletonList("my-tag"), + "credentials_map", "{\"foo\": \"bar\"}"); + + assertThrows( + IllegalArgumentException.class, + () -> converter.convertUserProvidedServiceManifest(input), + "Manifest is missing the service name"); + } + + @Test + void convertDescriptionWithUserProvidedInput() { + final Map input = + Map.of( + "credentials", + "test", + "region", + "org > space", + "userProvided", + true, + "manifest", + Collections.singletonList( + Map.of( + "serviceInstanceName", "userProvidedServiceName", + "tags", Collections.singletonList("my-tag"), + "syslogDrainUrl", "http://syslogDrainUrl.io", + "credentials", "{\"foo\": \"bar\"}", + "routeServiceUrl", "http://routeServiceUrl.io"))); + + final DeployCloudFoundryServiceDescription result = converter.convertDescription(input); + assertThat(result.getServiceAttributes()).isNull(); + assertThat(result.getUserProvidedServiceAttributes()) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes() + .setServiceInstanceName("userProvidedServiceName") + .setSyslogDrainUrl("http://syslogDrainUrl.io") + .setRouteServiceUrl("http://routeServiceUrl.io") + .setTags(Collections.singleton("my-tag")) + .setUpdatable(true) + .setCredentials(Map.of("foo", "bar"))); + } + + @Test + void convertDescriptionWithUserProvidedInputAndVersioned() { + final Map input = + Map.of( + "credentials", + "test", + "region", + "org > space", + "userProvided", + true, + "manifest", + Collections.singletonList( + Map.of( + "serviceInstanceName", "userProvidedServiceName", + "tags", Collections.singletonList("my-tag"), + "syslogDrainUrl", "http://syslogDrainUrl.io", + "credentials", "{\"foo\": \"bar\"}", + "versioned", "true", + "updatable", "false", + "routeServiceUrl", "http://routeServiceUrl.io"))); + + ServiceInstance si = new ServiceInstance(); + si.setName("userProvidedServiceName-v000"); + Resource resource = new Resource<>(); + resource.setEntity(si); + List> serviceInstances = List.of(resource); + + when(cloudFoundryClient + .getServiceInstances() + .findAllVersionedServiceInstancesBySpaceAndName(any(), any())) + .thenReturn(serviceInstances); + + final DeployCloudFoundryServiceDescription result = converter.convertDescription(input); + assertThat(result.getServiceAttributes()).isNull(); + assertThat(result.getUserProvidedServiceAttributes()) + .usingRecursiveComparison() + .isEqualTo( + new DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes() + .setServiceInstanceName("userProvidedServiceName-v001") + .setPreviousInstanceName("userProvidedServiceName-v000") + .setSyslogDrainUrl("http://syslogDrainUrl.io") + .setRouteServiceUrl("http://routeServiceUrl.io") + .setTags(Collections.singleton("my-tag")) + .setUpdatable(false) + .setVersioned(true) + .setCredentials(Map.of("foo", "bar"))); + } + + private static class WithMap { + public WithMap() {} + + public WithMap(String key, Object value) { + this.mapField = Collections.singletonMap(key, value); + } + + @Nullable + @JsonDeserialize( + using = + DeployCloudFoundryServiceAtomicOperationConverter.OptionallySerializedMapDeserializer + .class) + private Map mapField; + } + + @Test + void deserializeYamlSerializedMap() { + final WithMap result = + new ObjectMapper() + .convertValue(Collections.singletonMap("mapField", "key1: value1"), WithMap.class); + + assertThat(result).usingRecursiveComparison().isEqualTo(new WithMap("key1", "value1")); + } + + @Test + void deserializeJsonSerializedMap() { + final WithMap result = + new ObjectMapper() + .convertValue( + Collections.singletonMap("mapField", "{\"key1\": \"value1\"}}"), WithMap.class); + + assertThat(result).usingRecursiveComparison().isEqualTo(new WithMap("key1", "value1")); + } + + @Test + void deserializeAlreadyDeserializedMap() { + final WithMap result = + new ObjectMapper() + .convertValue( + Collections.singletonMap("mapField", Collections.singletonMap("key1", "value1")), + WithMap.class); + + assertThat(result).usingRecursiveComparison().isEqualTo(new WithMap("key1", "value1")); + } + + @Test + void deserializeEmptyStringAsMap() { + final WithMap result = + new ObjectMapper().convertValue(Collections.singletonMap("mapField", ""), WithMap.class); + + assertThat(result).usingRecursiveComparison().isEqualTo(new WithMap()); + } + + @Test + void deserializeNullStringAsMap() { + final WithMap result = + new ObjectMapper().convertValue(Collections.singletonMap("mapField", null), WithMap.class); + + assertThat(result).usingRecursiveComparison().isEqualTo(new WithMap()); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServerGroupAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServerGroupAtomicOperationConverterTest.java new file mode 100644 index 00000000000..705b7945333 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/DestroyCloudFoundryServerGroupAtomicOperationConverterTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static org.junit.jupiter.api.Assertions.*; + +import org.junit.jupiter.api.Test; + +class DestroyCloudFoundryServerGroupAtomicOperationConverterTest { + + @Test + void convertDescription() {} +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ScaleCloudFoundryServerGroupAtomicOperationConverterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ScaleCloudFoundryServerGroupAtomicOperationConverterTest.java new file mode 100644 index 00000000000..c62dbe56252 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/converters/ScaleCloudFoundryServerGroupAtomicOperationConverterTest.java @@ -0,0 +1,155 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.converters; + +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.ScaleCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.CloudFoundryProvider; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import io.vavr.collection.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.stubbing.Answer; + +class ScaleCloudFoundryServerGroupAtomicOperationConverterTest { + + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + private final CacheRepository cacheRepository = mock(CacheRepository.class); + + private final CloudFoundryCredentials cloudFoundryCredentials = + new CloudFoundryCredentials( + "test", + "managerUri", + "metricsUri", + "apiHost", + "username", + "password", + "environment", + false, + false, + 500, + cacheRepository, + null, + ForkJoinPool.commonPool(), + emptyMap(), + new OkHttpClient(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()) { + public CloudFoundryClient getClient() { + return cloudFoundryClient; + } + }; + + { + when(cloudFoundryClient.getOrganizations().findByName(any())) + .thenAnswer( + (Answer>) + invocation -> { + Object[] args = invocation.getArguments(); + return Optional.of( + CloudFoundryOrganization.builder() + .id(args[0].toString() + "-guid") + .name(args[0].toString()) + .build()); + }); + + when(cloudFoundryClient.getSpaces().findSpaceByRegion(any())) + .thenReturn(Optional.of(CloudFoundrySpace.builder().build())); + } + + private final CredentialsRepository credentialsRepository = + new MapBackedCredentialsRepository<>(CloudFoundryProvider.PROVIDER_ID, null); + + { + credentialsRepository.save(cloudFoundryCredentials); + } + + private final ScaleCloudFoundryServerGroupAtomicOperationConverter converter = + new ScaleCloudFoundryServerGroupAtomicOperationConverter(null); + + @BeforeEach + void initializeClassUnderTest() { + converter.setCredentialsRepository(credentialsRepository); + } + + @Test + void convertDescription() { + final Map input = + HashMap.of( + "credentials", + "test", + "region", + "org > space", + "capacity", + HashMap.of( + "desired", 15, + "min", 12, + "max", 61) + .toJavaMap(), + "diskQuota", + 1027, + "memory", + 10249) + .toJavaMap(); + + final ScaleCloudFoundryServerGroupDescription result = converter.convertDescription(input); + + assertThat(result.getCapacity().getDesired()).isEqualTo(15); + assertThat(result.getDiskQuota()).isEqualTo(1027); + assertThat(result.getMemory()).isEqualTo(10249); + } + + @Test + void convertDescriptionMissingFields() { + final Map input = + HashMap.of( + "credentials", + "test", + "region", + "org > space", + "capacity", + HashMap.of( + "desired", 215, + "min", 12, + "max", 61) + .toJavaMap()) + .toJavaMap(); + + final ScaleCloudFoundryServerGroupDescription result = converter.convertDescription(input); + + assertThat(result.getCapacity().getDesired()).isEqualTo(215); + assertThat(result.getDiskQuota()).isNull(); + assertThat(result.getMemory()).isNull(); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryAtomicOperationTest.java new file mode 100644 index 00000000000..923b8f21396 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryAtomicOperationTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static java.util.Collections.emptyList; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.*; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.Status; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.Collections; +import java.util.Optional; +import org.assertj.core.api.Condition; + +class AbstractCloudFoundryAtomicOperationTest { + final CloudFoundryClient client; + + AbstractCloudFoundryAtomicOperationTest() { + client = new MockCloudFoundryClient(); + } + + Task runOperation(AtomicOperation op) { + Task task = new DefaultTask("test"); + TaskRepository.threadLocalTask.set(task); + try { + Optional.ofNullable(op.operate(emptyList())) + .ifPresent(o -> task.addResultObjects(Collections.singletonList(o))); + } catch (CloudFoundryApiException e) { + task.addResultObjects(Collections.singletonList(Collections.singletonMap("EXCEPTION", e))); + } + return task; + } + + static Condition status(String desc) { + return new Condition<>( + status -> status.getStatus().equals(desc), "description = '" + desc + "'"); + } + + static Condition statusStartsWith(String desc) { + return new Condition<>( + status -> status.getStatus().startsWith(desc), "description = '" + desc + "'"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryLoadBalancerMappingOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryLoadBalancerMappingOperationTest.java new file mode 100644 index 00000000000..6c605a1f179 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/AbstractCloudFoundryLoadBalancerMappingOperationTest.java @@ -0,0 +1,167 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.data.Index.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.AbstractCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.Status; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import java.util.Collections; +import java.util.List; +import org.assertj.core.api.Condition; +import org.junit.jupiter.api.Test; + +class AbstractCloudFoundryLoadBalancerMappingOperationTest { + final CloudFoundryClient client; + + private DefaultTask testTask = new DefaultTask("testTask"); + + { + TaskRepository.threadLocalTask.set(testTask); + } + + AbstractCloudFoundryLoadBalancerMappingOperationTest() { + client = new MockCloudFoundryClient(); + } + + static Condition status(String desc) { + return new Condition<>( + status -> status.getStatus().equals(desc), "description = '" + desc + "'"); + } + + @Test + void mapRoutesShouldReturnTrueWhenRoutesIsNull() { + AbstractCloudFoundryServerGroupDescription description = + new AbstractCloudFoundryServerGroupDescription() {}; + AbstractCloudFoundryAtomicOperationTestClass operation = + new AbstractCloudFoundryAtomicOperationTestClass(); + + assertThat(operation.mapRoutes(description, null, null, null)).isTrue(); + assertThat(testTask.getHistory()) + .has(status("No load balancers provided to create or update"), atIndex(1)); + } + + @Test + void mapRoutesShouldReturnTrueWhenRoutesAreValid() { + AbstractCloudFoundryServerGroupDescription description = + new AbstractCloudFoundryServerGroupDescription() {}; + description.setClient(client); + description.setServerGroupName("sg-name"); + AbstractCloudFoundryAtomicOperationTestClass operation = + new AbstractCloudFoundryAtomicOperationTestClass(); + when(client.getRoutes().toRouteId(anyString())) + .thenReturn(new RouteId("road.to.nowhere", null, null, "domain-guid")); + + CloudFoundryOrganization org = + CloudFoundryOrganization.builder().id("org-id").name("org-name").build(); + CloudFoundrySpace space = + CloudFoundrySpace.builder().id("space-id").name("space-name").organization(org).build(); + CloudFoundryLoadBalancer loadBalancer = + CloudFoundryLoadBalancer.builder() + .host("road.to") + .domain( + CloudFoundryDomain.builder() + .id("domain-id") + .name("nowhere") + .organization(org) + .build()) + .build(); + when(client.getRoutes().createRoute(any(RouteId.class), anyString())).thenReturn(loadBalancer); + + List routeList = Collections.singletonList("road.to.nowhere"); + + assertThat(operation.mapRoutes(description, routeList, space, null)).isTrue(); + assertThat(testTask.getHistory()) + .has(status("Mapping load balancer 'road.to.nowhere' to sg-name"), atIndex(2)); + } + + @Test + void mapRoutesShouldThrowAnExceptionWhenInvalidRoutesAreFound() { + AbstractCloudFoundryServerGroupDescription description = + new AbstractCloudFoundryServerGroupDescription() {}; + description.setClient(client); + description.setServerGroupName("sg-name"); + AbstractCloudFoundryAtomicOperationTestClass operation = + new AbstractCloudFoundryAtomicOperationTestClass(); + when(client.getRoutes().toRouteId(anyString())).thenReturn(null); + + List routeList = Collections.singletonList("road.to.nowhere"); + + Exception exception = null; + try { + operation.mapRoutes(description, routeList, null, null); + } catch (IllegalArgumentException illegalArgumentException) { + exception = illegalArgumentException; + } + assertThat(exception).isNotNull(); + assertThat(exception.getMessage()).isEqualTo("road.to.nowhere is an invalid route"); + } + + @Test + void mapRoutesShouldThrowAnExceptionWhenRoutesExistInOtherOrgSpace() { + AbstractCloudFoundryServerGroupDescription description = + new AbstractCloudFoundryServerGroupDescription() {}; + description.setClient(client); + description.setServerGroupName("sg-name"); + + CloudFoundryOrganization org = + CloudFoundryOrganization.builder().id("org-id").name("org-name").build(); + CloudFoundrySpace space = + CloudFoundrySpace.builder().id("space-id").name("space-name").organization(org).build(); + + AbstractCloudFoundryAtomicOperationTestClass operation = + new AbstractCloudFoundryAtomicOperationTestClass(); + when(client.getRoutes().toRouteId(anyString())) + .thenReturn(new RouteId("road.to.nowhere", null, null, "domain-guid")); + when(client.getRoutes().createRoute(any(RouteId.class), anyString())).thenReturn(null); + + List routeList = Collections.singletonList("road.to.nowhere"); + + Exception exception = null; + try { + operation.mapRoutes(description, routeList, space, null); + } catch (CloudFoundryApiException cloudFoundryApiException) { + exception = cloudFoundryApiException; + } + assertThat(exception).isNotNull(); + assertThat(exception.getMessage()) + .isEqualTo( + "Cloud Foundry API returned with error(s): Load balancer already exists in another organization and space"); + } + + private static class AbstractCloudFoundryAtomicOperationTestClass + extends AbstractCloudFoundryLoadBalancerMappingOperation { + @Override + protected String getPhase() { + return "IT_S_JUST_A_PHASE"; + } + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceBindingRequestAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceBindingRequestAtomicOperationTest.java new file mode 100644 index 00000000000..250b3eca122 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceBindingRequestAtomicOperationTest.java @@ -0,0 +1,146 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.AbstractServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.UserProvidedServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CreateCloudFoundryServiceBindingDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; +import org.junit.jupiter.api.Test; + +public class CreateCloudFoundryServiceBindingRequestAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + + OperationPoller poller = mock(OperationPoller.class); + CloudFoundryClient client = new MockCloudFoundryClient(); + + private final CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .name("space") + .organization(CloudFoundryOrganization.builder().name("org").build()) + .build(); + + @Test + public void shouldCreateServiceBinding() { + CreateCloudFoundryServiceBindingDescription desc = + new CreateCloudFoundryServiceBindingDescription(); + desc.setSpace(cloudFoundrySpace); + desc.setRegion(cloudFoundrySpace.getRegion()); + desc.setClient(client); + desc.setRestageRequired(true); + desc.setServerGroupName("app1"); + CreateCloudFoundryServiceBindingDescription.ServiceBindingRequest binding = + new CreateCloudFoundryServiceBindingDescription.ServiceBindingRequest( + "service1", null, false); + desc.setServiceBindingRequests(Collections.singletonList(binding)); + + CreateCloudFoundryServiceBindingAtomicOperation operation = + new CreateCloudFoundryServiceBindingAtomicOperation(poller, desc); + + UserProvidedServiceInstance serviceInstance = new UserProvidedServiceInstance(); + serviceInstance.setName("service1"); + + Resource resource = new Resource<>(); + Resource.Metadata metadata = new Resource.Metadata(); + metadata.setGuid("123abc"); + resource.setEntity(serviceInstance); + resource.setMetadata(metadata); + + List> instances = List.of(resource); + when(desc.getClient().getServiceInstances().findAllServicesBySpaceAndNames(any(), any())) + .thenReturn(instances); + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.RUNNING); + + Task task = runOperation(operation); + + verify(client.getServiceInstances()).createServiceBinding(any()); + assertThat(task.getHistory()) + .has( + status( + "Creating Cloud Foundry service bindings between application 'app1' and services: [service1]"), + atIndex(1)); + assertThat(task.getHistory()) + .has( + status( + "Created Cloud Foundry service bindings between application 'app1' and services: [service1]"), + atIndex(3)); + } + + @Test + public void shouldCreateServiceBindingWithParameters() { + CreateCloudFoundryServiceBindingDescription desc = + new CreateCloudFoundryServiceBindingDescription(); + desc.setSpace(cloudFoundrySpace); + desc.setRegion(cloudFoundrySpace.getRegion()); + desc.setClient(client); + desc.setRestageRequired(true); + desc.setServerGroupName("app1"); + CreateCloudFoundryServiceBindingDescription.ServiceBindingRequest binding = + new CreateCloudFoundryServiceBindingDescription.ServiceBindingRequest( + "service1", null, false); + desc.setServiceBindingRequests(Collections.singletonList(binding)); + + CreateCloudFoundryServiceBindingAtomicOperation operation = + new CreateCloudFoundryServiceBindingAtomicOperation(poller, desc); + + UserProvidedServiceInstance serviceInstance = new UserProvidedServiceInstance(); + serviceInstance.setName("service1"); + + Resource resource = new Resource<>(); + Resource.Metadata metadata = new Resource.Metadata(); + metadata.setGuid("123abc"); + resource.setEntity(serviceInstance); + resource.setMetadata(metadata); + + List> instances = List.of(resource); + when(desc.getClient().getServiceInstances().findAllServicesBySpaceAndNames(any(), any())) + .thenReturn(instances); + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.RUNNING); + + Task task = runOperation(operation); + + verify(client.getServiceInstances()).createServiceBinding(any()); + assertThat(task.getHistory()) + .has( + status( + "Creating Cloud Foundry service bindings between application 'app1' and services: [service1]"), + atIndex(1)); + assertThat(task.getHistory()) + .has( + status( + "Created Cloud Foundry service bindings between application 'app1' and services: [service1]"), + atIndex(3)); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceKeyAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceKeyAtomicOperationTest.java new file mode 100644 index 00000000000..3c573bfb546 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/CreateCloudFoundryServiceKeyAtomicOperationTest.java @@ -0,0 +1,116 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.SUCCEEDED; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.CREATE_SERVICE_KEY; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.Mockito.*; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceKeyResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.CreateCloudFoundryServiceKeyDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import io.vavr.collection.HashMap; +import java.util.List; +import org.junit.jupiter.api.Test; + +class CreateCloudFoundryServiceKeyAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private CreateCloudFoundryServiceKeyDescription desc = + new CreateCloudFoundryServiceKeyDescription(); + private final CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .name("space") + .organization(CloudFoundryOrganization.builder().name("org").build()) + .build(); + + @Test + void printsTwoStatusesWhenCreatingServiceKeySucceeds() { + String serviceInstanceName = "service-instance-name"; + String serviceKeyName = "service-key-name"; + desc.setSpace(cloudFoundrySpace); + desc.setServiceInstanceName(serviceInstanceName); + desc.setServiceKeyName(serviceKeyName); + desc.setClient(client); + ServiceKeyResponse serviceKeyResponse = + (ServiceKeyResponse) + new ServiceKeyResponse() + .setServiceKey(HashMap.of("username", "user-1").toJavaMap()) + .setServiceKeyName(serviceKeyName) + .setServiceInstanceName(serviceInstanceName) + .setType(CREATE_SERVICE_KEY) + .setState(SUCCEEDED); + when(client.getServiceKeys().createServiceKey(any(), any(), any())) + .thenReturn(serviceKeyResponse); + + CreateCloudFoundryServiceKeyAtomicOperation op = + new CreateCloudFoundryServiceKeyAtomicOperation(desc); + + Task task = runOperation(op); + + verify(client.getServiceKeys()) + .createServiceKey(eq(cloudFoundrySpace), eq(serviceInstanceName), eq(serviceKeyName)); + assertThat(task.getHistory()) + .has( + status( + "Creating service key 'service-key-name' for service 'service-instance-name' in 'org > space'"), + atIndex(1)); + assertThat(task.getHistory()) + .has(status("Finished creating service key 'service-key-name'"), atIndex(2)); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceKeyResponse.class); + ServiceKeyResponse response = (ServiceKeyResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceKeyResponse); + } + + @Test + void printsOnlyOneStatusWhenCreationFails() { + String serviceInstanceName = "service-instance-name"; + String serviceKeyName = "service-key-name"; + desc.setSpace(cloudFoundrySpace); + desc.setServiceInstanceName(serviceInstanceName); + desc.setServiceKeyName(serviceKeyName); + desc.setClient(client); + + when(client.getServiceKeys().createServiceKey(any(), any(), any())) + .thenThrow(new CloudFoundryApiException("Much fail")); + + CreateCloudFoundryServiceKeyAtomicOperation op = + new CreateCloudFoundryServiceKeyAtomicOperation(desc); + + Task task = runOperation(op); + + verify(client.getServiceKeys()) + .createServiceKey(eq(cloudFoundrySpace), eq(serviceInstanceName), eq(serviceKeyName)); + assertThat(task.getHistory().size()).isEqualTo(2); + assertThat(task.getHistory()) + .has( + status( + "Creating service key 'service-key-name' for service 'service-instance-name' in 'org > space'"), + atIndex(1)); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isNotInstanceOf(ServiceKeyResponse.class); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryLoadBalancerAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryLoadBalancerAtomicOperationTest.java new file mode 100644 index 00000000000..22345809fb2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryLoadBalancerAtomicOperationTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.data.Index.atIndex; +import static org.mockito.Mockito.verify; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class DeleteCloudFoundryLoadBalancerAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + DeleteCloudFoundryLoadBalancerAtomicOperationTest() { + super(); + } + + @Test + void deleteLoadBalancer() { + CloudFoundryLoadBalancer loadBalancer = + CloudFoundryLoadBalancer.builder() + .id("id") + .host("host") + .domain(CloudFoundryDomain.builder().name("mydomain").build()) + .build(); + + DeleteCloudFoundryLoadBalancerDescription desc = + new DeleteCloudFoundryLoadBalancerDescription(); + desc.setClient(client); + desc.setLoadBalancer(loadBalancer); + + DeleteCloudFoundryLoadBalancerAtomicOperation op = + new DeleteCloudFoundryLoadBalancerAtomicOperation(desc); + + assertThat(runOperation(op).getHistory()) + .has(status("Deleting load balancer " + loadBalancer.getName()), atIndex(1)) + .has(status("Deleted load balancer " + loadBalancer.getName()), atIndex(2)); + + verify(client.getRoutes()).deleteRoute(loadBalancer.getId()); + } + + @Test + void nonExistentRoute() { + DeleteCloudFoundryLoadBalancerDescription desc = + new DeleteCloudFoundryLoadBalancerDescription(); + desc.setClient(client); + desc.setLoadBalancer(null); + + DeleteCloudFoundryLoadBalancerAtomicOperation op = + new DeleteCloudFoundryLoadBalancerAtomicOperation(desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .isEqualTo("Cloud Foundry API returned with error(s): Load balancer does not exist"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceBindingAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceBindingAtomicOperationTest.java new file mode 100644 index 00000000000..fd6e9fdc928 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceBindingAtomicOperationTest.java @@ -0,0 +1,106 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.AbstractServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceBinding; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryServiceBindingDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.Collections; +import java.util.List; +import org.junit.jupiter.api.Test; + +public class DeleteCloudFoundryServiceBindingAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + + private final CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .name("space") + .organization(CloudFoundryOrganization.builder().name("org").build()) + .build(); + CloudFoundryClient client = new MockCloudFoundryClient(); + + @Test + public void shouldDeleteServiceBinding() { + DeleteCloudFoundryServiceBindingDescription desc = + new DeleteCloudFoundryServiceBindingDescription(); + desc.setSpace(cloudFoundrySpace); + desc.setRegion(cloudFoundrySpace.getRegion()); + desc.setClient(client); + desc.setServerGroupName("app1"); + desc.setServerGroupId("app-guid-123"); + + DeleteCloudFoundryServiceBindingDescription.ServiceUnbindingRequest unbinding = + new DeleteCloudFoundryServiceBindingDescription.ServiceUnbindingRequest("service1"); + desc.setServiceUnbindingRequests(Collections.singletonList(unbinding)); + + DeleteCloudFoundryServiceBindingAtomicOperation operation = + new DeleteCloudFoundryServiceBindingAtomicOperation(desc); + ServiceBinding appServiceInstance = new ServiceBinding(); + appServiceInstance.setName("service1"); + appServiceInstance.setAppGuid("app1"); + appServiceInstance.setServiceInstanceGuid("service-guid-123"); + + Resource appResource = new Resource<>(); + Resource.Metadata appMetadata = new Resource.Metadata(); + appMetadata.setGuid("service-guid-123"); + appResource.setEntity(appServiceInstance); + appResource.setMetadata(appMetadata); + + List> appInstances = List.of(appResource); + when(desc.getClient().getApplications().getServiceBindingsByApp(any())) + .thenReturn(appInstances); + + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance.setName("service1"); + + Resource resource = new Resource<>(); + Resource.Metadata metadata = new Resource.Metadata(); + metadata.setGuid("service-guid-123"); + resource.setEntity(serviceInstance); + resource.setMetadata(metadata); + + List> serviceInstances = List.of(resource); + + when(desc.getClient().getServiceInstances().findAllServicesBySpaceAndNames(any(), any())) + .thenReturn(serviceInstances); + + Task task = runOperation(operation); + + verify(client.getServiceInstances()).deleteServiceBinding(any()); + assertThat(task.getHistory()) + .has( + status("Unbinding Cloud Foundry application 'app1' from services: [service1]"), + atIndex(1)); + assertThat(task.getHistory()) + .has( + status( + "Successfully unbound Cloud Foundry application 'app1' from services: [service1]"), + atIndex(2)); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceKeyAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceKeyAtomicOperationTest.java new file mode 100644 index 00000000000..4bceee89963 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeleteCloudFoundryServiceKeyAtomicOperationTest.java @@ -0,0 +1,116 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.SUCCEEDED; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.DELETE_SERVICE_KEY; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.Mockito.*; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceKeyResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeleteCloudFoundryServiceKeyDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import io.vavr.collection.HashMap; +import java.util.List; +import org.junit.jupiter.api.Test; + +class DeleteCloudFoundryServiceKeyAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private DeleteCloudFoundryServiceKeyDescription desc = + new DeleteCloudFoundryServiceKeyDescription(); + private final CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .name("space") + .organization(CloudFoundryOrganization.builder().name("org").build()) + .build(); + + @Test + void printsTwoStatusesWhenDeletingServiceKeySucceeds() { + String serviceInstanceName = "service-instance-name"; + String serviceKeyName = "service-key-name"; + desc.setSpace(cloudFoundrySpace); + desc.setServiceInstanceName(serviceInstanceName); + desc.setServiceKeyName(serviceKeyName); + desc.setClient(client); + ServiceKeyResponse serviceKeyResponse = + (ServiceKeyResponse) + new ServiceKeyResponse() + .setServiceKey(HashMap.of("username", "user-1").toJavaMap()) + .setServiceKeyName(serviceKeyName) + .setServiceInstanceName(serviceInstanceName) + .setType(DELETE_SERVICE_KEY) + .setState(SUCCEEDED); + when(client.getServiceKeys().deleteServiceKey(any(), any(), any())) + .thenReturn(serviceKeyResponse); + + DeleteCloudFoundryServiceKeyAtomicOperation op = + new DeleteCloudFoundryServiceKeyAtomicOperation(desc); + + Task task = runOperation(op); + + verify(client.getServiceKeys()) + .deleteServiceKey(eq(cloudFoundrySpace), eq(serviceInstanceName), eq(serviceKeyName)); + assertThat(task.getHistory()) + .has( + status( + "Deleting service key 'service-key-name' for service 'service-instance-name' in 'org > space'"), + atIndex(1)); + assertThat(task.getHistory()) + .has(status("Finished deleting service key 'service-key-name'"), atIndex(2)); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceKeyResponse.class); + ServiceKeyResponse response = (ServiceKeyResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceKeyResponse); + } + + @Test + void printsOnlyOneStatusWhenDeletionFails() { + String serviceInstanceName = "service-instance-name"; + String serviceKeyName = "service-key-name"; + desc.setSpace(cloudFoundrySpace); + desc.setServiceInstanceName(serviceInstanceName); + desc.setServiceKeyName(serviceKeyName); + desc.setClient(client); + + when(client.getServiceKeys().deleteServiceKey(any(), any(), any())) + .thenThrow(new CloudFoundryApiException("Much fail")); + + DeleteCloudFoundryServiceKeyAtomicOperation op = + new DeleteCloudFoundryServiceKeyAtomicOperation(desc); + + Task task = runOperation(op); + + verify(client.getServiceKeys()) + .deleteServiceKey(eq(cloudFoundrySpace), eq(serviceInstanceName), eq(serviceKeyName)); + assertThat(task.getHistory().size()).isEqualTo(2); + assertThat(task.getHistory()) + .has( + status( + "Deleting service key 'service-key-name' for service 'service-instance-name' in 'org > space'"), + atIndex(1)); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isNotInstanceOf(ServiceKeyResponse.class); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServerGroupAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServerGroupAtomicOperationTest.java new file mode 100644 index 00000000000..5e6045e4e80 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServerGroupAtomicOperationTest.java @@ -0,0 +1,323 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops.DeployCloudFoundryServerGroupAtomicOperation.convertToMb; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.*; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.artifacts.ArtifactCredentialsFromString; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.AbstractServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.Resource; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.ServiceInstance; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.CreatePackage; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.Docker; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryServerGroup; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view.CloudFoundryClusterProvider; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import io.vavr.collection.HashMap; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; +import org.assertj.core.util.Lists; +import org.junit.jupiter.api.Test; +import org.mockito.InOrder; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; +import org.mockito.verification.VerificationMode; + +class DeployCloudFoundryServerGroupAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + + private final DefaultTask testTask = new DefaultTask("testTask"); + + { + TaskRepository.threadLocalTask.set(testTask); + } + + @Test + void convertToMbHandling() { + assertThat(convertToMb("memory", "123")).isEqualTo(123); + assertThat(convertToMb("memory", "1G")).isEqualTo(1024); + assertThat(convertToMb("memory", "1GB")).isEqualTo(1024); + assertThat(convertToMb("memory", "1M")).isEqualTo(1); + assertThat(convertToMb("memory", "1MB")).isEqualTo(1); + + assertThatThrownBy(() -> convertToMb("memory", "abc")) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> convertToMb("memory", "123.45")) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + void executeOperationAndDeploySucceeds() { + // Given + final DeployCloudFoundryServerGroupDescription description = + getDeployCloudFoundryServerGroupDescription(true); + final CloudFoundryClusterProvider clusterProvider = mock(CloudFoundryClusterProvider.class); + final DeployCloudFoundryServerGroupAtomicOperation operation = + new DeployCloudFoundryServerGroupAtomicOperation( + new PassThroughOperationPoller(), description); + final Applications apps = getApplications(clusterProvider, ProcessStats.State.RUNNING); + final ServiceInstances serviceInstances = getServiceInstances(); + final Processes processes = getProcesses(); + + // When + final DeploymentResult result = operation.operate(Lists.emptyList()); + + // Then + verifyInOrder(apps, serviceInstances, processes, Mockito::atLeastOnce); + + assertThat(testTask.getStatus().isFailed()).isFalse(); + assertThat(result.getServerGroupNames()) + .isEqualTo(Collections.singletonList("region1:app1-stack1-detail1-v000")); + } + + @Test + void executeOperationAndDeployDockerSucceeds() { + // Given + final DeployCloudFoundryServerGroupDescription description = + getDockerDeployCloudFoundryServerGroupDescription(true); + final CloudFoundryClusterProvider clusterProvider = mock(CloudFoundryClusterProvider.class); + final DeployCloudFoundryServerGroupAtomicOperation operation = + new DeployCloudFoundryServerGroupAtomicOperation( + new PassThroughOperationPoller(), description); + final Applications apps = getApplications(clusterProvider, ProcessStats.State.RUNNING); + final ServiceInstances serviceInstances = getServiceInstances(); + final Processes processes = getProcesses(); + + // When + final DeploymentResult result = operation.operate(Lists.emptyList()); + + // Then + verifyInOrderDockerDeploy(apps, serviceInstances, processes, Mockito::atLeastOnce); + + assertThat(testTask.getStatus().isFailed()).isFalse(); + assertThat(result.getServerGroupNames()) + .isEqualTo(Collections.singletonList("region1:app1-stack1-detail1-v000")); + } + + @Test + void executeOperationAndDeployFails() { + // Given + final DeployCloudFoundryServerGroupDescription description = + getDeployCloudFoundryServerGroupDescription(true); + final DeployCloudFoundryServerGroupAtomicOperation operation = + new DeployCloudFoundryServerGroupAtomicOperation( + new PassThroughOperationPoller(), description); + final CloudFoundryClusterProvider clusterProvider = mock(CloudFoundryClusterProvider.class); + getApplications(clusterProvider, ProcessStats.State.CRASHED); + + Exception exception = null; + // When + try { + when(description + .getClient() + .getServiceInstances() + .findAllServicesBySpaceAndNames(any(), any())) + .thenReturn(createServiceInstanceResource()); + operation.operate(Lists.emptyList()); + } catch (CloudFoundryApiException cloudFoundryApiException) { + exception = cloudFoundryApiException; + } + + // Then + assertThat(exception).isNotNull(); + assertThat(exception.getMessage()) + .isEqualTo( + "Cloud Foundry API returned with error(s): Failed to start 'app1-stack1-detail1-v000' which instead crashed"); + } + + @Test + void executeOperationWithNoStartFlag() { + // Given + final DeployCloudFoundryServerGroupDescription description = + getDeployCloudFoundryServerGroupDescription(false); + final CloudFoundryClusterProvider clusterProvider = mock(CloudFoundryClusterProvider.class); + final DeployCloudFoundryServerGroupAtomicOperation operation = + new DeployCloudFoundryServerGroupAtomicOperation( + new PassThroughOperationPoller(), description); + final Applications apps = getApplications(clusterProvider, ProcessStats.State.RUNNING); + final ServiceInstances serviceInstances = getServiceInstances(); + final Processes processes = getProcesses(); + + // When + final DeploymentResult result = operation.operate(Lists.emptyList()); + + // Then + verifyInOrder(apps, serviceInstances, processes, Mockito::never); + + assertThat(testTask.getStatus().isFailed()).isFalse(); + assertThat(result.getServerGroupNames()) + .isEqualTo(Collections.singletonList("region1:app1-stack1-detail1-v000")); + } + + private void verifyInOrder( + final Applications apps, + ServiceInstances serviceInstances, + Processes processes, + Supplier calls) { + InOrder inOrder = Mockito.inOrder(apps, serviceInstances, processes); + inOrder.verify(apps).createApplication(any(), any(), any(), any()); + inOrder + .verify(apps) + .createPackage(eq(new CreatePackage("serverGroupId", CreatePackage.Type.BITS, null))); + inOrder.verify(apps).uploadPackageBits(any(), any()); + inOrder.verify(serviceInstances).createServiceBinding(any()); + inOrder.verify(apps).createBuild(any(), any(), any()); + inOrder.verify(apps).buildCompleted(any()); + inOrder.verify(apps).findDropletGuidFromBuildId(any()); + inOrder.verify(apps).setCurrentDroplet(any(), any()); + inOrder.verify(processes).updateProcess("serverGroupId", null, "http", "/health", 180, null); + inOrder.verify(processes).scaleProcess(any(), any(), any(), any()); + inOrder.verify(apps, calls.get()).startApplication("serverGroupId"); + } + + private void verifyInOrderDockerDeploy( + final Applications apps, + ServiceInstances serviceInstances, + Processes processes, + Supplier calls) { + InOrder inOrder = Mockito.inOrder(apps, processes, serviceInstances); + inOrder.verify(apps).createApplication(any(), any(), any(), any()); + inOrder.verify(apps).createPackage(any()); + inOrder.verify(cloudFoundryClient.getServiceInstances()).createServiceBinding(any()); + inOrder.verify(apps).createBuild(any(), any(), any()); + inOrder.verify(processes).updateProcess("serverGroupId", null, "http", "/health", 180, null); + inOrder.verify(processes).scaleProcess("serverGroupId", 7, 1024, 2048); + inOrder.verify(apps, calls.get()).startApplication("serverGroupId"); + } + + private ServiceInstances getServiceInstances() { + final ServiceInstances serviceInstances = cloudFoundryClient.getServiceInstances(); + when(serviceInstances.findAllServicesBySpaceAndNames(any(), any())) + .thenReturn(createServiceInstanceResource()); + return serviceInstances; + } + + private Processes getProcesses() { + return cloudFoundryClient.getProcesses(); + } + + private List> createServiceInstanceResource() { + ServiceInstance serviceInstance = new ServiceInstance(); + serviceInstance.setServicePlanGuid("plan-guid").setName("service1"); + Resource serviceInstanceResource = new Resource<>(); + serviceInstanceResource.setMetadata(new Resource.Metadata().setGuid("service-instance-guid")); + serviceInstanceResource.setEntity(serviceInstance); + return List.of(serviceInstanceResource); + } + + private Applications getApplications( + CloudFoundryClusterProvider clusterProvider, ProcessStats.State state) { + final Applications apps = cloudFoundryClient.getApplications(); + when(clusterProvider.getClusters()).thenReturn(Collections.emptyMap()); + when(apps.createApplication(any(), any(), any(), any())) + .thenReturn( + CloudFoundryServerGroup.builder() + .id("serverGroupId") + .space(CloudFoundrySpace.builder().id("spaceId").build()) + .build()); + when(apps.getAppState(any())).thenReturn(state); + when(apps.createPackage(any())) + .thenAnswer( + (Answer) + invocation -> { + Object[] args = invocation.getArguments(); + return args[0].toString() + "_package"; + }); + when(apps.createBuild(any(), any(), any())).thenReturn("some-build"); + when(apps.buildCompleted(any())).thenReturn(true); + when(apps.findDropletGuidFromBuildId(any())).thenReturn("droplet-guid"); + doNothing().when(apps).setCurrentDroplet(any(), any()); + return apps; + } + + private DeployCloudFoundryServerGroupDescription getDeployCloudFoundryServerGroupDescription( + boolean b) { + final DeployCloudFoundryServerGroupDescription description = + new DeployCloudFoundryServerGroupDescription() + .setAccountName("account1") + .setApplication("app1") + .setStack("stack1") + .setFreeFormDetails("detail1") + .setArtifactCredentials( + new ArtifactCredentialsFromString( + "test", io.vavr.collection.List.of("a").asJava(), "")) + .setSpace(CloudFoundrySpace.builder().id("space1Id").name("space1").build()) + .setApplicationArtifact(Artifact.builder().reference("ref1").build()) + .setDocker(null) + .setApplicationAttributes( + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes() + .setInstances(7) + .setMemory("1G") + .setDiskQuota("2048M") + .setHealthCheckType("http") + .setHealthCheckHttpEndpoint("/health") + .setBuildpacks(io.vavr.collection.List.of("buildpack1", "buildpack2").asJava()) + .setServices(List.of("service1")) + .setEnv(HashMap.of("token", "ASDF").toJavaMap()) + .setTimeout(180)); + description.setClient(cloudFoundryClient); + description.setRegion("region1"); + description.setStartApplication(b); + return description; + } + + private DeployCloudFoundryServerGroupDescription + getDockerDeployCloudFoundryServerGroupDescription(boolean b) { + final DeployCloudFoundryServerGroupDescription description = + new DeployCloudFoundryServerGroupDescription() + .setAccountName("account1") + .setApplication("app1") + .setStack("stack1") + .setFreeFormDetails("detail1") + .setSpace(CloudFoundrySpace.builder().id("space1Id").name("space1").build()) + .setArtifactCredentials( + new ArtifactCredentialsFromString( + "test", io.vavr.collection.List.of("a").asJava(), "")) + .setApplicationArtifact(Artifact.builder().reference("ref1").build()) + .setDocker(Docker.builder().image("some/image").build()) + .setApplicationAttributes( + new DeployCloudFoundryServerGroupDescription.ApplicationAttributes() + .setInstances(7) + .setMemory("1G") + .setDiskQuota("2048M") + .setHealthCheckType("http") + .setHealthCheckHttpEndpoint("/health") + .setBuildpacks(Collections.emptyList()) + .setServices(List.of("service1")) + .setEnv(HashMap.of("token", "ASDF").toJavaMap()) + .setTimeout(180)); + description.setClient(cloudFoundryClient); + description.setRegion("region1"); + description.setStartApplication(b); + return description; + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServiceAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServiceAtomicOperationTest.java new file mode 100644 index 00000000000..f2770eb12e1 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DeployCloudFoundryServiceAtomicOperationTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.IN_PROGRESS; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.SUCCEEDED; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.CREATE; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.UPDATE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DeployCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.List; +import org.junit.jupiter.api.Test; + +class DeployCloudFoundryServiceAtomicOperationTest extends AbstractCloudFoundryAtomicOperationTest { + private DeployCloudFoundryServiceDescription desc = new DeployCloudFoundryServiceDescription(); + + @Test + void deployService() { + desc.setClient(client); + desc.setServiceAttributes( + new DeployCloudFoundryServiceDescription.ServiceAttributes() + .setServiceInstanceName("some-service-name") + .setService("some-service") + .setServicePlan("some-service-plan")); + + ServiceInstanceResponse serviceInstanceResponse = + new ServiceInstanceResponse() + .setServiceInstanceName("some-service-name") + .setType(UPDATE) + .setState(IN_PROGRESS); + when(client + .getServiceInstances() + .createServiceInstance(any(), any(), any(), any(), any(), anyBoolean(), any())) + .thenReturn(serviceInstanceResponse); + + DeployCloudFoundryServiceAtomicOperation op = + new DeployCloudFoundryServiceAtomicOperation(desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(1).isEqualTo(resultObjects.size()); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceInstanceResponse.class); + ServiceInstanceResponse response = (ServiceInstanceResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceInstanceResponse); + assertThat(task.getHistory()) + .has( + status( + "Updating service instance 'some-service-name' from service some-service and service plan some-service-plan"), + atIndex(1)); + } + + @Test + void deployUserProvidedService() { + desc.setUserProvided(true); + desc.setClient(client); + desc.setUserProvidedServiceAttributes( + new DeployCloudFoundryServiceDescription.UserProvidedServiceAttributes() + .setServiceInstanceName("some-up-service-name")); + + ServiceInstanceResponse serviceInstanceResponse = + new ServiceInstanceResponse() + .setServiceInstanceName("some-up-service-name") + .setType(CREATE) + .setState(SUCCEEDED); + when(client + .getServiceInstances() + .createUserProvidedServiceInstance( + any(), any(), any(), any(), any(), anyBoolean(), any())) + .thenReturn(serviceInstanceResponse); + + DeployCloudFoundryServiceAtomicOperation op = + new DeployCloudFoundryServiceAtomicOperation(desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(1).isEqualTo(resultObjects.size()); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceInstanceResponse.class); + ServiceInstanceResponse response = (ServiceInstanceResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceInstanceResponse); + assertThat(task.getHistory()) + .has(status("Creating user-provided service instance 'some-up-service-name'"), atIndex(1)) + .has(status("Created user-provided service instance 'some-up-service-name'"), atIndex(2)); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServiceAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServiceAtomicOperationTest.java new file mode 100644 index 00000000000..35c65877694 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/DestroyCloudFoundryServiceAtomicOperationTest.java @@ -0,0 +1,172 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.IN_PROGRESS; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.NOT_FOUND; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.DELETE; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.utils.TestUtils.assertThrows; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.DestroyCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.List; +import org.junit.jupiter.api.Test; + +class DestroyCloudFoundryServiceAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private DestroyCloudFoundryServiceDescription desc = new DestroyCloudFoundryServiceDescription(); + + { + desc.setServiceInstanceName("service-instance-name"); + desc.setApplication("sampleapp"); + desc.setSpace( + CloudFoundrySpace.builder() + .name("space-name") + .organization(CloudFoundryOrganization.builder().name("org-name").build()) + .build()); + desc.setClient(client); + } + + @Test + void destroyCloudFoundryService() { + ServiceInstanceResponse serviceInstanceResponse = + new ServiceInstanceResponse() + .setServiceInstanceName("service-instance-name") + .setType(DELETE) + .setState(IN_PROGRESS); + + when(client.getServiceInstances().destroyServiceInstance(any(), any())) + .thenReturn(serviceInstanceResponse); + + DestroyCloudFoundryServiceAtomicOperation op = + new DestroyCloudFoundryServiceAtomicOperation(desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(1).isEqualTo(resultObjects.size()); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceInstanceResponse.class); + ServiceInstanceResponse response = (ServiceInstanceResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceInstanceResponse); + assertThat(task.getHistory()) + .has( + status( + "Started removing service instance 'service-instance-name' from space space-name"), + atIndex(1)); + } + + @Test + void destroyCloudFoundryServiceWithRemoveBindings() { + ServiceInstanceResponse serviceInstanceResponse = + new ServiceInstanceResponse() + .setServiceInstanceName("service-instance-name") + .setType(DELETE) + .setState(IN_PROGRESS); + + DestroyCloudFoundryServiceDescription newDesc = new DestroyCloudFoundryServiceDescription(); + newDesc.setRemoveBindings(true); + newDesc.setServiceInstanceName("service-instance-name"); + newDesc.setApplication("sampleapp"); + newDesc.setSpace( + CloudFoundrySpace.builder() + .name("space-name") + .organization(CloudFoundryOrganization.builder().name("org-name").build()) + .build()); + newDesc.setClient(client); + + when(client.getServiceInstances().destroyServiceInstance(any(), any())) + .thenReturn(serviceInstanceResponse); + + DestroyCloudFoundryServiceAtomicOperation op = + new DestroyCloudFoundryServiceAtomicOperation(newDesc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(1).isEqualTo(resultObjects.size()); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceInstanceResponse.class); + ServiceInstanceResponse response = (ServiceInstanceResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceInstanceResponse); + assertThat(task.getHistory()) + .has( + status( + "Started removing service bindings for '" + + newDesc.getServiceInstanceName() + + "' from space " + + newDesc.getSpace().getName()), + atIndex(1)); + } + + @Test + void destroyCloudFoundryServiceFailsWhenInstanceNotFound() { + ServiceInstanceResponse serviceInstanceResponse = + new ServiceInstanceResponse() + .setServiceInstanceName("service-instance-name") + .setType(DELETE) + .setState(NOT_FOUND); + + when(client.getServiceInstances().destroyServiceInstance(any(), any())) + .thenReturn(serviceInstanceResponse); + + DestroyCloudFoundryServiceAtomicOperation op = + new DestroyCloudFoundryServiceAtomicOperation(desc); + + assertThrows( + () -> runOperation(op), + RuntimeException.class, + "Service instance " + + desc.getServiceInstanceName() + + " not found, in " + + desc.getSpace().getRegion()); + } + + @Test + void destroyUserProvidedService() { + desc.setServiceInstanceName("up-service-instance-name"); + ServiceInstanceResponse serviceInstanceResponse = + new ServiceInstanceResponse() + .setServiceInstanceName("up-service-instance-name") + .setType(DELETE) + .setState(IN_PROGRESS); + + when(client.getServiceInstances().destroyServiceInstance(any(), any())) + .thenReturn(serviceInstanceResponse); + + DestroyCloudFoundryServiceAtomicOperation op = + new DestroyCloudFoundryServiceAtomicOperation(desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(1).isEqualTo(resultObjects.size()); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceInstanceResponse.class); + ServiceInstanceResponse response = (ServiceInstanceResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceInstanceResponse); + assertThat(task.getHistory()) + .has( + status( + "Started removing service instance 'up-service-instance-name' from space space-name"), + atIndex(1)); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/PassThroughOperationPoller.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/PassThroughOperationPoller.java new file mode 100644 index 00000000000..db2a5768377 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/PassThroughOperationPoller.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import groovy.lang.Closure; +import java.util.function.Function; +import java.util.function.Supplier; + +public class PassThroughOperationPoller extends OperationPoller { + public PassThroughOperationPoller() { + super(0, 0); + } + + @Override + public Object waitForOperation( + Closure operation, + Closure ifDone, + Long timeoutSeconds, + Task task, + String resourceString, + String basePhase) { + return operation.call(); + } + + @Override + public T waitForOperation( + Supplier operation, + Function ifDone, + Long timeoutSeconds, + Task task, + String resourceString, + String basePhase) { + return operation.get(); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ScaleCloudFoundryServerGroupAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ScaleCloudFoundryServerGroupAtomicOperationTest.java new file mode 100644 index 00000000000..0354279ac02 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ScaleCloudFoundryServerGroupAtomicOperationTest.java @@ -0,0 +1,123 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.ScaleCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import org.junit.jupiter.api.Test; + +class ScaleCloudFoundryServerGroupAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private final ScaleCloudFoundryServerGroupDescription desc; + + ScaleCloudFoundryServerGroupAtomicOperationTest() { + desc = new ScaleCloudFoundryServerGroupDescription(); + desc.setClient(client); + desc.setServerGroupName("myapp"); + desc.setCapacity(ServerGroup.Capacity.builder().desired(2).build()); + } + + @Test + void scale() { + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.RUNNING); + + ScaleCloudFoundryServerGroupAtomicOperation op = + new ScaleCloudFoundryServerGroupAtomicOperation(poller, desc); + + assertThat(runOperation(op).getHistory()) + .has(status("Resizing 'myapp'"), atIndex(1)) + .has(status("Resized 'myapp'"), atIndex(2)); + } + + @Test + void scaleForStoppedServerGroup() { + desc.setScaleStoppedServerGroup(true); + desc.setCapacity(ServerGroup.Capacity.builder().desired(2).min(2).max(3).build()); + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.RUNNING); + + ScaleCloudFoundryServerGroupAtomicOperation op = + new ScaleCloudFoundryServerGroupAtomicOperation(poller, desc); + + assertThat(runOperation(op).getHistory()) + .has(status("Resizing 'myapp'"), atIndex(1)) + .has(status("Resized 'myapp'"), atIndex(2)); + verify(client.getProcesses()).scaleProcess(any(), eq(3), any(), any()); + } + + @Test + void failedToScale() { + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.CRASHED); + + ScaleCloudFoundryServerGroupAtomicOperation op = + new ScaleCloudFoundryServerGroupAtomicOperation(poller, desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .isEqualTo( + "Cloud Foundry API returned with error(s): Failed to start 'myapp' which instead crashed"); + } + + @Test + void scaleDownToZero() { + desc.setCapacity(new ServerGroup.Capacity(1, 1, 0)); + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.DOWN); + + ScaleCloudFoundryServerGroupAtomicOperation op = + new ScaleCloudFoundryServerGroupAtomicOperation(poller, desc); + + assertThat(runOperation(op).getHistory()) + .has(status("Resizing 'myapp'"), atIndex(1)) + .has(status("Resized 'myapp'"), atIndex(2)); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ShareCloudFoundryServiceAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ShareCloudFoundryServiceAtomicOperationTest.java new file mode 100644 index 00000000000..f6b23afcd60 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/ShareCloudFoundryServiceAtomicOperationTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.SUCCEEDED; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.SHARE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.matches; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.ShareCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import org.junit.jupiter.api.Test; + +class ShareCloudFoundryServiceAtomicOperationTest extends AbstractCloudFoundryAtomicOperationTest { + private ShareCloudFoundryServiceDescription desc = new ShareCloudFoundryServiceDescription(); + + @Test + void printsTwoStatusesWhenSharingSucceeds() { + desc.setRegion("org > space"); + desc.setClient(client); + desc.setServiceInstanceName("service-instance-name"); + Set sharedToRegions = new HashSet<>(); + sharedToRegions.add("org1 > region1"); + sharedToRegions.add("org2 > region2"); + sharedToRegions.add("org3 > region3"); + desc.setShareToRegions(sharedToRegions); + + ServiceInstanceResponse serviceInstanceResponse = + new ServiceInstanceResponse() + .setServiceInstanceName("some-service-name") + .setType(SHARE) + .setState(SUCCEEDED); + when(client.getServiceInstances().shareServiceInstance(any(), any(), any())) + .thenReturn(serviceInstanceResponse); + + ShareCloudFoundryServiceAtomicOperation op = new ShareCloudFoundryServiceAtomicOperation(desc); + + Task task = runOperation(op); + + verify(client.getServiceInstances(), times(1)) + .shareServiceInstance( + matches("org > space"), matches("service-instance-name"), same(sharedToRegions)); + assertThat(task.getHistory()) + .has( + statusStartsWith( + "Sharing service instance 'service-instance-name' from 'org > space' into '"), + atIndex(1)); + assertThat(task.getHistory()) + .has(status("Finished sharing service instance 'service-instance-name'"), atIndex(2)); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceInstanceResponse.class); + ServiceInstanceResponse response = (ServiceInstanceResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceInstanceResponse); + } + + @Test + void printsOnlyOneStatusWhenSharingFails() { + desc.setRegion("org > space"); + desc.setClient(client); + desc.setServiceInstanceName("service-instance-name"); + Set sharedToRegions = new HashSet<>(); + sharedToRegions.add("org1 > region1"); + sharedToRegions.add("org2 > region2"); + sharedToRegions.add("org3 > region3"); + desc.setShareToRegions(sharedToRegions); + + when(client.getServiceInstances().shareServiceInstance(any(), any(), any())) + .thenThrow(new CloudFoundryApiException("Much fail")); + + ShareCloudFoundryServiceAtomicOperation op = new ShareCloudFoundryServiceAtomicOperation(desc); + + Task task = runOperation(op); + + verify(client.getServiceInstances(), times(1)) + .shareServiceInstance( + matches("org > space"), matches("service-instance-name"), same(sharedToRegions)); + assertThat(task.getHistory().size()).isEqualTo(2); + assertThat(task.getHistory()) + .has( + statusStartsWith( + "Sharing service instance 'service-instance-name' from 'org > space' into '"), + atIndex(1)); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isNotInstanceOf(ServiceInstanceResponse.class); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StartCloudFoundryServerGroupAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StartCloudFoundryServerGroupAtomicOperationTest.java new file mode 100644 index 00000000000..eaa9b6f5dc6 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StartCloudFoundryServerGroupAtomicOperationTest.java @@ -0,0 +1,89 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.StartCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class StartCloudFoundryServerGroupAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private StartCloudFoundryServerGroupDescription desc = + new StartCloudFoundryServerGroupDescription(); + + StartCloudFoundryServerGroupAtomicOperationTest() { + super(); + } + + @BeforeEach + void before() { + desc.setClient(client); + desc.setServerGroupName("myapp"); + } + + @Test + void start() { + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.RUNNING); + + StartCloudFoundryServerGroupAtomicOperation op = + new StartCloudFoundryServerGroupAtomicOperation(poller, desc); + + assertThat(runOperation(op).getHistory()) + .has(status("Starting 'myapp'"), atIndex(1)) + .has(status("Started 'myapp'"), atIndex(2)); + } + + @Test + void failedToStart() { + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.CRASHED); + + StartCloudFoundryServerGroupAtomicOperation op = + new StartCloudFoundryServerGroupAtomicOperation(poller, desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .isEqualTo( + "Cloud Foundry API returned with error(s): Failed to start 'myapp' which instead crashed"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StopCloudFoundryServerGroupAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StopCloudFoundryServerGroupAtomicOperationTest.java new file mode 100644 index 00000000000..eea3035207c --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/StopCloudFoundryServerGroupAtomicOperationTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v3.ProcessStats; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.StopCloudFoundryServerGroupDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class StopCloudFoundryServerGroupAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private StopCloudFoundryServerGroupDescription desc = + new StopCloudFoundryServerGroupDescription(); + + StopCloudFoundryServerGroupAtomicOperationTest() { + super(); + } + + @BeforeEach + void before() { + desc.setClient(client); + desc.setServerGroupName("myapp"); + } + + @Test + void stop() { + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.DOWN); + + StopCloudFoundryServerGroupAtomicOperation op = + new StopCloudFoundryServerGroupAtomicOperation(poller, desc); + + assertThat(runOperation(op).getHistory()) + .has(status("Stopping 'myapp'"), atIndex(1)) + .has(status("Stopped 'myapp'"), atIndex(2)); + } + + @Test + void failedToStop() { + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.RUNNING); + + StopCloudFoundryServerGroupAtomicOperation op = + new StopCloudFoundryServerGroupAtomicOperation(poller, desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .isEqualTo( + "Cloud Foundry API returned with error(s): Failed to stop 'myapp' which instead is running"); + } + + @Test + void failedToStopStopping() { + OperationPoller poller = mock(OperationPoller.class); + + //noinspection unchecked + when(poller.waitForOperation(any(Supplier.class), any(), any(), any(), any(), any())) + .thenReturn(ProcessStats.State.STOPPING); + + StopCloudFoundryServerGroupAtomicOperation op = + new StopCloudFoundryServerGroupAtomicOperation(poller, desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .isEqualTo( + "Cloud Foundry API returned with error(s): Failed to stop 'myapp' which instead is in graceful shutdown - stopping"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/TerminateCloudFoundryServerGroupAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/TerminateCloudFoundryServerGroupAtomicOperationTest.java new file mode 100644 index 00000000000..0b52f894e37 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/TerminateCloudFoundryServerGroupAtomicOperationTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2018 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.TerminateCloudFoundryInstancesDescription; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class TerminateCloudFoundryServerGroupAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private TerminateCloudFoundryInstancesDescription desc = + new TerminateCloudFoundryInstancesDescription(); + + TerminateCloudFoundryServerGroupAtomicOperationTest() { + super(); + } + + @BeforeEach + void before() { + desc.setClient(client); + desc.setInstanceIds(new String[] {"123-0", "123-1"}); + } + + @Test + void terminate() { + TerminateCloudFoundryInstancesAtomicOperation op = + new TerminateCloudFoundryInstancesAtomicOperation(desc); + + assertThat(runOperation(op).getHistory()) + .has(status("Terminating application instances ['123-0', '123-1']"), atIndex(1)) + .has(status("Terminated application instances ['123-0', '123-1']"), atIndex(2)); + + verify(client.getApplications(), times(2)).deleteAppInstance(eq("123"), anyString()); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnmapLoadBalancersAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnmapLoadBalancersAtomicOperationTest.java new file mode 100644 index 00000000000..b7562552b91 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnmapLoadBalancersAtomicOperationTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.matches; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Routes; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.LoadBalancersDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import io.vavr.collection.List; +import java.util.Collections; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class UnmapLoadBalancersAtomicOperationTest extends AbstractCloudFoundryAtomicOperationTest { + private final LoadBalancersDescription desc; + private final CloudFoundrySpace space; + + UnmapLoadBalancersAtomicOperationTest() { + desc = new LoadBalancersDescription(); + desc.setClient(client); + desc.setServerGroupName("myapp"); + desc.setServerGroupId("myapp-id"); + space = CloudFoundrySpace.fromRegion("org>space"); + Routes routes = client.getRoutes(); + desc.setSpace(space); + when(routes.toRouteId(anyString())).thenReturn(mock(RouteId.class)); + } + + @Test + void operateWithNullRoutes() { + UnmapLoadBalancersAtomicOperation op = new UnmapLoadBalancersAtomicOperation(desc); + + Task task = runOperation(op); + java.util.List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .isEqualTo("Cloud Foundry API returned with error(s): No load balancer specified"); + } + + @Test + void operateWithEmptyRoutes() { + desc.setRoutes(Collections.emptyList()); + UnmapLoadBalancersAtomicOperation op = new UnmapLoadBalancersAtomicOperation(desc); + + Task task = runOperation(op); + java.util.List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .isEqualTo("Cloud Foundry API returned with error(s): No load balancer specified"); + } + + @Test + void operateWithMultipleBadRoutes() { + desc.setRoutes(List.of("bad.route-1.example.com", "bad.route 2.example.com").asJava()); + UnmapLoadBalancersAtomicOperation op = new UnmapLoadBalancersAtomicOperation(desc); + Task task = runOperation(op); + java.util.List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .isEqualTo( + "Cloud Foundry API returned with error(s): Load balancer 'bad.route-1.example.com' does not exist"); + } + + @Test + void operateWithGoodRoutes() { + desc.setRoutes(List.of("good.route-1.example.com", "good.route-2.example.com").asJava()); + CloudFoundryDomain domain = + CloudFoundryDomain.builder().id("domain-id").name("domain.com").build(); + CloudFoundryLoadBalancer lb1 = + CloudFoundryLoadBalancer.builder() + .account("account") + .id("lb1-id") + .host("host1") + .space(space) + .domain(domain) + .build(); + CloudFoundryLoadBalancer lb2 = + CloudFoundryLoadBalancer.builder() + .account("account") + .id("lb2-id") + .host("host2") + .space(space) + .domain(domain) + .build(); + when(client.getRoutes().find(any(), any())).thenReturn(lb1).thenReturn(lb2); + UnmapLoadBalancersAtomicOperation op = new UnmapLoadBalancersAtomicOperation(desc); + + Task task = runOperation(op); + + assertThat(task.getHistory()) + .has(status("Unmapping 'myapp' from load balancer(s)."), atIndex(1)) + .has(status("Unmapping load balancer 'good.route-1.example.com'"), atIndex(2)) + .has(status("Unmapped load balancer 'good.route-1.example.com'"), atIndex(3)) + .has(status("Unmapping load balancer 'good.route-2.example.com'"), atIndex(4)) + .has(status("Unmapped load balancer 'good.route-2.example.com'"), atIndex(5)); + verify(client.getApplications(), times(1)).unmapRoute(matches("myapp-id"), matches("lb1-id")); + verify(client.getApplications(), times(1)).unmapRoute(matches("myapp-id"), matches("lb2-id")); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnshareCloudFoundryServiceAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnshareCloudFoundryServiceAtomicOperationTest.java new file mode 100644 index 00000000000..049d6cffb87 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UnshareCloudFoundryServiceAtomicOperationTest.java @@ -0,0 +1,114 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.State.SUCCEEDED; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.v2.LastOperation.Type.UNSHARE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.atIndex; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.matches; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.ServiceInstanceResponse; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.UnshareCloudFoundryServiceDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import org.junit.jupiter.api.Test; + +class UnshareCloudFoundryServiceAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private UnshareCloudFoundryServiceDescription desc = new UnshareCloudFoundryServiceDescription(); + + @Test + void deployService() { + desc.setRegion("org > space"); + desc.setClient(client); + desc.setServiceInstanceName("service-instance-name"); + Set unsharedFromRegions = new HashSet<>(); + unsharedFromRegions.add("org1 > region1"); + unsharedFromRegions.add("org2 > region2"); + unsharedFromRegions.add("org3 > region3"); + desc.setUnshareFromRegions(unsharedFromRegions); + + ServiceInstanceResponse serviceInstanceResponse = + new ServiceInstanceResponse() + .setServiceInstanceName("some-service-name") + .setType(UNSHARE) + .setState(SUCCEEDED); + when(client.getServiceInstances().unshareServiceInstance(any(), any())) + .thenReturn(serviceInstanceResponse); + + UnshareCloudFoundryServiceAtomicOperation op = + new UnshareCloudFoundryServiceAtomicOperation(desc); + + Task task = runOperation(op); + + verify(client.getServiceInstances(), times(1)) + .unshareServiceInstance(matches("service-instance-name"), same(unsharedFromRegions)); + assertThat(task.getHistory()) + .has( + statusStartsWith("Unsharing service instance 'service-instance-name' from '"), + atIndex(1)); + assertThat(task.getHistory()) + .has(status("Finished unsharing service instance 'service-instance-name'"), atIndex(2)); + List resultObjects = task.getResultObjects(); + assertThat(1).isEqualTo(resultObjects.size()); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(ServiceInstanceResponse.class); + ServiceInstanceResponse response = (ServiceInstanceResponse) o; + assertThat(response).usingRecursiveComparison().isEqualTo(serviceInstanceResponse); + } + + @Test + void printsOnlyOneStatusWhenUnsharingFails() { + desc.setRegion("org > space"); + desc.setClient(client); + desc.setServiceInstanceName("service-instance-name"); + Set unshareFromRegions = new HashSet<>(); + unshareFromRegions.add("org1 > region1"); + unshareFromRegions.add("org2 > region2"); + unshareFromRegions.add("org3 > region3"); + desc.setUnshareFromRegions(unshareFromRegions); + + when(client.getServiceInstances().unshareServiceInstance(any(), any())) + .thenThrow(new CloudFoundryApiException("Much fail")); + + UnshareCloudFoundryServiceAtomicOperation op = + new UnshareCloudFoundryServiceAtomicOperation(desc); + + Task task = runOperation(op); + + verify(client.getServiceInstances(), times(1)) + .unshareServiceInstance(matches("service-instance-name"), same(unshareFromRegions)); + assertThat(task.getHistory().size()).isEqualTo(2); + assertThat(task.getHistory()) + .has( + statusStartsWith("Unsharing service instance 'service-instance-name' from '"), + atIndex(1)); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isNotInstanceOf(ServiceInstanceResponse.class); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UpsertCloudFoundryLoadBalancerAtomicOperationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UpsertCloudFoundryLoadBalancerAtomicOperationTest.java new file mode 100644 index 00000000000..fa538d8880f --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/deploy/ops/UpsertCloudFoundryLoadBalancerAtomicOperationTest.java @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.ops; + +import static org.assertj.core.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryApiException; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Routes; +import com.netflix.spinnaker.clouddriver.cloudfoundry.deploy.description.UpsertCloudFoundryLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryDomain; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class UpsertCloudFoundryLoadBalancerAtomicOperationTest + extends AbstractCloudFoundryAtomicOperationTest { + private final UpsertCloudFoundryLoadBalancerDescription desc; + private final Routes routes; + + { + desc = new UpsertCloudFoundryLoadBalancerDescription(); + desc.setClient(client); + desc.setRegion("org>space"); + desc.setSpace(CloudFoundrySpace.fromRegion("org>space")); + desc.setHost("some-host"); + desc.setPath("some-path"); + desc.setPort(8080); + desc.setDomain(CloudFoundryDomain.builder().build()); + routes = client.getRoutes(); + } + + @Test + void operateSuccessfullyCreatedLoadBalancer() { + when(routes.createRoute(any(), any())).thenReturn(CloudFoundryLoadBalancer.builder().build()); + + UpsertCloudFoundryLoadBalancerAtomicOperation op = + new UpsertCloudFoundryLoadBalancerAtomicOperation(desc); + + assertThat(runOperation(op).getHistory()) + .has(status("Creating load balancer in 'org>space'"), atIndex(1)) + .has(status("Done creating load balancer"), atIndex(2)); + } + + @Test + void operateThrowCloudFoundryApiExceptionWhenRouteExists() { + when(routes.createRoute(any(), any())).thenReturn(null); + + UpsertCloudFoundryLoadBalancerAtomicOperation op = + new UpsertCloudFoundryLoadBalancerAtomicOperation(desc); + + Task task = runOperation(op); + List resultObjects = task.getResultObjects(); + assertThat(resultObjects.size()).isEqualTo(1); + Object o = resultObjects.get(0); + assertThat(o).isInstanceOf(Map.class); + Object ex = ((Map) o).get("EXCEPTION"); + assertThat(ex).isInstanceOf(CloudFoundryApiException.class); + assertThat(((CloudFoundryApiException) ex).getMessage()) + .contains("Load balancer already exists in another organization and space"); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryApplicationTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryApplicationTest.java index 481a9b11975..94cad085581 100644 --- a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryApplicationTest.java +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryApplicationTest.java @@ -16,28 +16,26 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import org.junit.jupiter.api.Test; +import static java.util.Collections.emptySet; +import static java.util.stream.Collectors.toSet; +import static org.assertj.core.api.Assertions.assertThat; -import java.util.Arrays; import java.util.Map; import java.util.Set; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.assertj.core.api.Assertions.assertThat; +import java.util.stream.Stream; +import org.junit.jupiter.api.Test; class CloudFoundryApplicationTest { @Test void getClusterNamesGroupsByAccount() { - CloudFoundryApplication app = new CloudFoundryApplication( - "app", - Arrays.asList( - new CloudFoundryCluster("dev", "app-dev1", emptySet(), emptySet()), - new CloudFoundryCluster("dev", "app-dev2", emptySet(), emptySet()), - new CloudFoundryCluster("prod", "app-prod", emptySet(), emptySet()) - ), - emptyMap() - ); + CloudFoundryApplication app = + new CloudFoundryApplication( + "app", + Stream.of( + new CloudFoundryCluster("dev", "app-dev1", emptySet()), + new CloudFoundryCluster("dev", "app-dev2", emptySet()), + new CloudFoundryCluster("prod", "app-prod", emptySet())) + .collect(toSet())); Map> clusterNames = app.getClusterNames(); @@ -45,4 +43,4 @@ void getClusterNamesGroupsByAccount() { assertThat(clusterNames.get("dev")).containsExactlyInAnyOrder("app-dev1", "app-dev2"); assertThat(clusterNames.get("prod")).containsExactly("app-prod"); } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryClusterTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryClusterTest.java index c6edf9f06f4..8e5a7d33080 100644 --- a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryClusterTest.java +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryClusterTest.java @@ -16,13 +16,14 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import org.junit.jupiter.api.Test; - import static java.util.Collections.emptySet; import static org.assertj.core.api.Assertions.assertThat; +import org.junit.jupiter.api.Test; + class CloudFoundryClusterTest { - private CloudFoundryCluster cluster = new CloudFoundryCluster("dev", "app-dev-detail", emptySet(), emptySet()); + private CloudFoundryCluster cluster = + new CloudFoundryCluster("dev", "app-dev-detail", emptySet()); @Test void getStack() { @@ -33,4 +34,4 @@ void getStack() { void getDetail() { assertThat(cluster.getDetail()).isEqualTo("detail"); } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryLoadBalancerTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryLoadBalancerTest.java index effa2d1299a..a0b6ed323c3 100644 --- a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryLoadBalancerTest.java +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundryLoadBalancerTest.java @@ -16,25 +16,88 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import org.junit.jupiter.api.Test; - import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.Test; class CloudFoundryLoadBalancerTest { - private CloudFoundryOrganization org = new CloudFoundryOrganization("orgId", "org"); - - private CloudFoundryLoadBalancer loadBalancer = new CloudFoundryLoadBalancer( - "dev", - "id", - "host", - "path", - 8080, - new CloudFoundrySpace("spaceId", "space", org), - new CloudFoundryDomain("domainId", "domain", org), - emptySet() - ); + private CloudFoundryOrganization org = + CloudFoundryOrganization.builder().id("orgId").name("org").build(); + + private CloudFoundryLoadBalancer loadBalancer = + CloudFoundryLoadBalancer.builder() + .account("dev") + .id("id") + .host("host") + .path("path") + .port(8080) + .space(CloudFoundrySpace.builder().id("spaceId").name("space").organization(org).build()) + .domain( + CloudFoundryDomain.builder().id("domainId").name("domain").organization(org).build()) + .mappedApps( + singleton( + CloudFoundryServerGroup.builder() + .name("demo-dev-v001") + .instances(emptySet()) + .build())) + .build(); @Test - void getName() { + void serialization() throws JsonProcessingException { + ObjectMapper mapper = new ObjectMapper(); + assertThat(mapper.writeValueAsString(loadBalancer)).doesNotContain("mappedApps"); + } + + @Test + void getNameWithoutPortWithoutPath() { + CloudFoundryLoadBalancer testLoadBalancer = + CloudFoundryLoadBalancer.builder() + .host("hostname") + .domain(CloudFoundryDomain.builder().name("example.com").build()) + .build(); + + assertThat(testLoadBalancer.getName()).isEqualToIgnoringCase("hostname.example.com"); + } + + @Test + void getNameWithoutPortWithPath() { + CloudFoundryLoadBalancer testLoadBalancer = + CloudFoundryLoadBalancer.builder() + .host("hostname") + .path("/my-path") + .domain(CloudFoundryDomain.builder().name("example.com").build()) + .build(); + + assertThat(testLoadBalancer.getName()).isEqualToIgnoringCase("hostname.example.com/my-path"); + } + + @Test + void getNameWithPortWithoutPath() { + CloudFoundryLoadBalancer testLoadBalancer = + CloudFoundryLoadBalancer.builder() + .host("hostname") + .port(9999) + .domain(CloudFoundryDomain.builder().name("example.com").build()) + .build(); + + assertThat(testLoadBalancer.getName()).isEqualToIgnoringCase("hostname.example.com-9999"); + } + + @Test + void getNameWithPortWithPath() { + CloudFoundryLoadBalancer testLoadBalancer = + CloudFoundryLoadBalancer.builder() + .host("hostname") + .path("/my-path") + .port(9999) + .domain(CloudFoundryDomain.builder().name("example.com").build()) + .build(); + + assertThat(testLoadBalancer.getName()) + .isEqualToIgnoringCase("hostname.example.com-9999/my-path"); } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundrySpaceTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundrySpaceTest.java index 143b73b1566..b404f23fff7 100644 --- a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundrySpaceTest.java +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/model/CloudFoundrySpaceTest.java @@ -16,10 +16,10 @@ package com.netflix.spinnaker.clouddriver.cloudfoundry.model; -import org.junit.jupiter.api.Test; - import static org.assertj.core.api.Assertions.assertThat; +import org.junit.jupiter.api.Test; + class CloudFoundrySpaceTest { @Test @@ -43,4 +43,4 @@ void equality() { assertThat(space).isEqualTo(space2); assertThat(space.hashCode()).isEqualTo(space2.hashCode()); } -} \ No newline at end of file +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryLoadBalancerCachingAgentTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryLoadBalancerCachingAgentTest.java new file mode 100644 index 00000000000..6eb13493e20 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryLoadBalancerCachingAgentTest.java @@ -0,0 +1,476 @@ +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.*; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent.CloudFoundryServerGroupCachingAgent.cacheView; +import static java.util.Collections.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.ResourceCacheData; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Routes; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Spaces; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.model.RouteId; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.moniker.Moniker; +import io.vavr.collection.HashMap; +import io.vavr.collection.HashSet; +import io.vavr.collection.List; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.*; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class CloudFoundryLoadBalancerCachingAgentTest { + private Instant now = Instant.now(); + private String accountName = "account"; + private ObjectMapper objectMapper = + new ObjectMapper().disable(MapperFeature.DEFAULT_VIEW_INCLUSION); + private CloudFoundryClient cloudFoundryClient = mock(CloudFoundryClient.class); + private Registry registry = mock(Registry.class); + private final Clock internalClock = Clock.fixed(now, ZoneId.systemDefault()); + private CloudFoundryCredentials credentials = mock(CloudFoundryCredentials.class); + private CloudFoundryLoadBalancerCachingAgent cloudFoundryLoadBalancerCachingAgent = + new CloudFoundryLoadBalancerCachingAgent(credentials, registry); + private ProviderCache mockProviderCache = mock(ProviderCache.class); + private String spaceId = "space-guid"; + private String spaceName = "space"; + private String orgId = "org-guid"; + private String orgName = "org"; + private CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id(spaceId) + .name(spaceName) + .organization(CloudFoundryOrganization.builder().id(orgId).name(orgName).build()) + .build(); + private Spaces spaces = mock(Spaces.class); + + @BeforeEach + void before() { + when(credentials.getClient()).thenReturn(cloudFoundryClient); + when(credentials.getName()).thenReturn(accountName); + when(cloudFoundryClient.getSpaces()).thenReturn(spaces); + } + + @Test + void handleShouldReturnNullWhenAccountDoesNotMatch() { + Map data = HashMap.of("account", "other-account").toJavaMap(); + + OnDemandAgent.OnDemandResult result = + cloudFoundryLoadBalancerCachingAgent.handle(mockProviderCache, data); + assertThat(result).isNull(); + } + + @Test + void handleShouldReturnNullWhenRegionIsUnspecified() { + OnDemandAgent.OnDemandResult result = + cloudFoundryLoadBalancerCachingAgent.handle(mockProviderCache, emptyMap()); + + assertThat(result).isNull(); + } + + @Test + void handleShouldReturnNullWhenRegionDoesNotExist() { + String region = "org > space"; + Map data = + HashMap.of( + "account", accountName, + "region", region, + "loadBalancerName", "loadBalancerName") + .toJavaMap(); + + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.empty()); + + OnDemandAgent.OnDemandResult result = + cloudFoundryLoadBalancerCachingAgent.handle(mockProviderCache, data); + assertThat(result).isNull(); + verify(spaces).findSpaceByRegion(eq(region)); + } + + @Test + void handleShouldReturnNullWhenLoadBalancerNameIsUnspecified() { + String region = "org > space"; + Map data = + HashMap.of( + "account", accountName, + "region", region) + .toJavaMap(); + + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + + OnDemandAgent.OnDemandResult result = + cloudFoundryLoadBalancerCachingAgent.handle(mockProviderCache, data); + assertThat(result).isNull(); + } + + @Test + void handleShouldReturnNullWhenLoadBalancerDoesNotExist() { + String region = "org > space"; + String serverGroupName = "server-group"; + Map data = + HashMap.of( + "account", accountName, + "region", region, + "serverGroupName", serverGroupName) + .toJavaMap(); + + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + + Routes mockRoutes = mock(Routes.class); + when(cloudFoundryClient.getRoutes()).thenReturn(mockRoutes); + when(mockRoutes.find(any(), any())).thenReturn(null); + + OnDemandAgent.OnDemandResult result = + cloudFoundryLoadBalancerCachingAgent.handle(mockProviderCache, data); + + assertThat(result).isNull(); + } + + @Test + void handleShouldReturnOnDemandResultsWithCacheTimeAndNoProcessedTime() { + String region = "org > space"; + String loadBalancerName = "server-group"; + Map data = + HashMap.of( + "account", accountName, + "region", region, + "loadBalancerName", loadBalancerName) + .toJavaMap(); + CloudFoundryLoadBalancer cloudFoundryLoadBalancer = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-1") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .mappedApps(emptySet()) + .build(); + ResourceCacheData onDemandCacheData = + new ResourceCacheData( + Keys.getLoadBalancerKey(accountName, cloudFoundryLoadBalancer), + cacheView(cloudFoundryLoadBalancer), + HashMap.>of(SERVER_GROUPS.getNs(), emptyList()).toJavaMap()); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + Routes mockRoutes = mock(Routes.class); + when(mockRoutes.find(any(), any())).thenReturn(cloudFoundryLoadBalancer); + when(mockRoutes.toRouteId(any())).thenReturn(mock(RouteId.class)); + when(cloudFoundryClient.getRoutes()).thenReturn(mockRoutes); + Map> cacheResults = + HashMap.>of( + LOAD_BALANCERS.getNs(), singleton(onDemandCacheData)) + .toJavaMap(); + String sourceAgentType = "account/CloudFoundryLoadBalancerCachingAgent-OnDemand"; + CacheResult expectedCacheResult = new DefaultCacheResult(cacheResults); + OnDemandAgent.OnDemandResult expectedResult = + new OnDemandAgent.OnDemandResult(sourceAgentType, expectedCacheResult, emptyMap()); + + OnDemandAgent.OnDemandResult result = + cloudFoundryLoadBalancerCachingAgent.handle(mockProviderCache, data); + + assertThat(result) + .usingRecursiveComparison() + .ignoringCollectionOrder() + .isEqualTo(expectedResult); + } + + @Test + void pendingOnDemandRequestsShouldReturnOnDemandCacheData() { + CloudFoundryLoadBalancer cloudFoundryLoadBalancer = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-1") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .mappedApps(emptySet()) + .build(); + Date cacheTime = new Date(); + Long cacheExpiry = 111L; + Integer processedCount = 1; + Long processedTime = 222L; + String loadBalancerKey = Keys.getLoadBalancerKey(accountName, cloudFoundryLoadBalancer); + Collection expectedKeys = singleton("key1"); + when(mockProviderCache.filterIdentifiers(any(), any())).thenReturn(expectedKeys); + Collection onDemandCacheData = + singleton( + new DefaultCacheData( + loadBalancerKey, + HashMap.of( + "cacheTime", cacheTime, + "cacheExpiry", cacheExpiry, + "processedCount", processedCount, + "processedTime", processedTime) + .toJavaMap(), + emptyMap())); + when(mockProviderCache.getAll(any(), any(), any())).thenReturn(onDemandCacheData); + Moniker moniker = Moniker.builder().build(); + Collection expectedResult = + singleton( + HashMap.of( + "id", loadBalancerKey, + "details", Keys.parse(loadBalancerKey).get(), + "moniker", moniker, + "cacheTime", cacheTime, + "cacheExpiry", cacheExpiry, + "processedCount", processedCount, + "processedTime", processedTime) + .toJavaMap()); + + Collection> result = + cloudFoundryLoadBalancerCachingAgent.pendingOnDemandRequests(mockProviderCache); + + assertThat(result).isEqualTo(expectedResult); + } + + @Test + void loadDataShouldReturnCacheResultWithUpdatedData() { + CloudFoundryLoadBalancer loadBalancer1 = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-1") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .mappedApps(emptySet()) + .build(); + CloudFoundryLoadBalancer loadBalancer2 = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-2") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .mappedApps(emptySet()) + .build(); + + when(mockProviderCache.getAll(any(), anyCollection())).thenReturn(emptySet()); + + Routes mockRoutes = mock(Routes.class); + when(mockRoutes.all(emptyList())) + .thenReturn(List.of(loadBalancer1, loadBalancer2).toJavaList()); + + when(cloudFoundryClient.getRoutes()).thenReturn(mockRoutes); + + CacheData loadBalancerCacheData1 = + new ResourceCacheData( + Keys.getLoadBalancerKey(accountName, loadBalancer1), + cacheView(loadBalancer1), + Collections.singletonMap(SERVER_GROUPS.getNs(), emptyList())); + CacheData loadBalancerCacheData2 = + new ResourceCacheData( + Keys.getLoadBalancerKey(accountName, loadBalancer2), + cacheView(loadBalancer2), + Collections.singletonMap(SERVER_GROUPS.getNs(), emptyList())); + + Map> cacheResults = + HashMap.>of( + LOAD_BALANCERS.getNs(), + HashSet.of(loadBalancerCacheData1, loadBalancerCacheData2).toJavaSet(), + ON_DEMAND.getNs(), + emptySet(), + SERVER_GROUPS.getNs(), + emptySet()) + .toJavaMap(); + CacheResult expectedCacheResult = + new DefaultCacheResult( + cacheResults, + HashMap.>of(ON_DEMAND.getNs(), emptySet()).toJavaMap()); + + CacheResult result = cloudFoundryLoadBalancerCachingAgent.loadData(mockProviderCache); + + assertThat(result) + .usingRecursiveComparison() + .ignoringCollectionOrder() + .isEqualTo(expectedCacheResult); + } + + @Test + void loadDataShouldReturnCacheResultWithUpdatedDataAndServerGroups() { + + CloudFoundryInstance instance1 = CloudFoundryInstance.builder().appGuid("ap-guid-1").build(); + + CloudFoundryServerGroup serverGroup1 = + CloudFoundryServerGroup.builder() + .account(accountName) + .id("sg-guid-1") + .name("demo") + .space(cloudFoundrySpace) + .instances(HashSet.of(instance1).toJavaSet()) + .build(); + + CloudFoundryLoadBalancer loadBalancer1 = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-1") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .mappedApps(HashSet.of(serverGroup1).toJavaSet()) + .build(); + + when(mockProviderCache.getAll(any(), anyCollection())).thenReturn(emptySet()); + + Routes mockRoutes = mock(Routes.class); + + when(mockRoutes.all(emptyList())).thenReturn(List.of(loadBalancer1).toJavaList()); + + when(cloudFoundryClient.getRoutes()).thenReturn(mockRoutes); + + CacheData serverGroupCacheData1 = + new ResourceCacheData( + Keys.getServerGroupKey( + serverGroup1.getAccount(), serverGroup1.getName(), cloudFoundrySpace.getRegion()), + emptyMap(), + Collections.singletonMap( + LOAD_BALANCERS.getNs(), HashSet.of(loadBalancer1.getId()).toJavaList())); + + Map loadBalancersByServerGroupIds = + HashMap.of("1", serverGroupCacheData1).toJavaMap(); + + CacheData loadBalancerCacheData1 = + new ResourceCacheData( + Keys.getLoadBalancerKey(accountName, loadBalancer1), + cacheView(loadBalancer1), + Collections.singletonMap( + SERVER_GROUPS.getNs(), + HashSet.of( + Keys.getServerGroupKey( + serverGroup1.getAccount(), + serverGroup1.getName(), + cloudFoundrySpace.getRegion())) + .toJavaSet())); + + Map> cacheResults = + HashMap.>of( + LOAD_BALANCERS.getNs(), + HashSet.of(loadBalancerCacheData1).toJavaSet(), + ON_DEMAND.getNs(), + emptySet(), + SERVER_GROUPS.getNs(), + loadBalancersByServerGroupIds.values()) + .toJavaMap(); + + CacheResult expectedCacheResult = + new DefaultCacheResult( + cacheResults, + HashMap.>of(ON_DEMAND.getNs(), emptySet()).toJavaMap()); + + CacheResult result = cloudFoundryLoadBalancerCachingAgent.loadData(mockProviderCache); + + assertThat(result) + .usingRecursiveComparison() + .ignoringCollectionOrder() + .isEqualTo(expectedCacheResult); + } + + @Test + void loadDataShouldReturnCacheResultWithDataFromOnDemandNamespace() + throws JsonProcessingException { + + CloudFoundryLoadBalancer loadBalancer = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-1") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .mappedApps(emptySet()) + .build(); + + CloudFoundryServerGroup onDemandCloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .name("serverGroupName") + .id("sg-guid-1") + .account(accountName) + .space(cloudFoundrySpace) + .diskQuota(1024) + .build(); + + CloudFoundryLoadBalancer onDemandLoadBalancer = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-1") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .mappedApps(singleton(onDemandCloudFoundryServerGroup)) + .build(); + + Routes mockRoutes = mock(Routes.class); + when(mockRoutes.all(emptyList())).thenReturn(List.of(loadBalancer).toJavaList()); + + CacheData onDemandCacheResults = + new ResourceCacheData( + Keys.getLoadBalancerKey(accountName, onDemandLoadBalancer), + cacheView(onDemandLoadBalancer), + Collections.singletonMap( + SERVER_GROUPS.getNs(), + singleton( + Keys.getServerGroupKey( + accountName, + onDemandCloudFoundryServerGroup.getName(), + onDemandCloudFoundryServerGroup.getRegion())))); + + when(mockProviderCache.getAll(any(), anyCollection())) + .thenReturn( + singleton( + new DefaultCacheData( + Keys.getLoadBalancerKey(accountName, onDemandLoadBalancer), + (int) TimeUnit.MINUTES.toSeconds(10), // ttl + HashMap.of( + "cacheTime", + internalClock.instant().plusSeconds(600).toEpochMilli(), + "cacheResults", + objectMapper.writeValueAsString( + Collections.singletonMap( + LOAD_BALANCERS.getNs(), + Collections.singleton(onDemandCacheResults))), + "processedCount", + 0) + .toJavaMap(), + emptyMap(), + internalClock))); + + when(cloudFoundryClient.getRoutes()).thenReturn(mockRoutes); + + Map> cacheResults = + HashMap.>of( + LOAD_BALANCERS.getNs(), + HashSet.of(onDemandCacheResults).toJavaSet(), + ON_DEMAND.getNs(), + emptySet(), + SERVER_GROUPS.getNs(), + emptySet()) + .toJavaMap(); + + CacheResult expectedCacheResult = + new DefaultCacheResult( + cacheResults, + HashMap.>of(ON_DEMAND.getNs(), emptySet()).toJavaMap()); + + CacheResult result = cloudFoundryLoadBalancerCachingAgent.loadData(mockProviderCache); + + assertThat(result).usingRecursiveComparison().isEqualTo(expectedCacheResult); + } + + @Test + void shouldReturnNullWhenAccountNameDiffers() { + Map data = + HashMap.of( + "account", "NotAccount", + "region", "org1 > space1", + "loadBalancerName", "doesntMatter") + .toJavaMap(); + + OnDemandAgent.OnDemandResult result = + cloudFoundryLoadBalancerCachingAgent.handle(mockProviderCache, data); + + assertThat(result).isEqualTo(null); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryServerGroupCachingAgentTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryServerGroupCachingAgentTest.java new file mode 100644 index 00000000000..7ca1e03a06a --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundryServerGroupCachingAgentTest.java @@ -0,0 +1,690 @@ +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.*; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent.CloudFoundryServerGroupCachingAgent.cacheView; +import static java.util.Collections.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.refEq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.ResourceCacheData; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Applications; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Spaces; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.*; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.moniker.Moniker; +import io.vavr.collection.HashMap; +import io.vavr.collection.HashSet; +import io.vavr.collection.List; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.*; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class CloudFoundryServerGroupCachingAgentTest { + private Instant now = Instant.now(); + private String accountName = "account"; + private ObjectMapper objectMapper = + new ObjectMapper().disable(MapperFeature.DEFAULT_VIEW_INCLUSION); + private CloudFoundryClient cloudFoundryClient = mock(CloudFoundryClient.class); + private CloudFoundryCredentials credentials = mock(CloudFoundryCredentials.class); + private Registry registry = mock(Registry.class); + private final Clock internalClock = Clock.fixed(now, ZoneId.systemDefault()); + private CloudFoundryServerGroupCachingAgent cloudFoundryServerGroupCachingAgent = + new CloudFoundryServerGroupCachingAgent(credentials, registry); + private ProviderCache mockProviderCache = mock(ProviderCache.class); + private String spaceId = "space-guid"; + private String spaceName = "space"; + private String orgId = "org-guid"; + private String orgName = "org"; + private CloudFoundrySpace cloudFoundrySpace = + CloudFoundrySpace.builder() + .id(spaceId) + .name(spaceName) + .organization(CloudFoundryOrganization.builder().id(orgId).name(orgName).build()) + .build(); + private Spaces spaces = mock(Spaces.class); + + @BeforeEach + void before() { + when(credentials.getClient()).thenReturn(cloudFoundryClient); + when(credentials.getName()).thenReturn(accountName); + when(cloudFoundryClient.getSpaces()).thenReturn(spaces); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.empty()); + } + + @Test + void buildOnDemandCacheDataShouldIncludeServerGroupAttributes() throws JsonProcessingException { + + CloudFoundryInstance cloudFoundryInstance = + CloudFoundryInstance.builder().appGuid("instance-guid-1").key("instance-key").build(); + + CloudFoundryServerGroup onDemandCloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .name("serverGroupName") + .id("sg-guid-1") + .account(accountName) + .space(cloudFoundrySpace) + .diskQuota(1024) + .instances(singleton(cloudFoundryInstance)) + .build(); + + Map> serverGroupRelationships = + HashMap.>of( + INSTANCES.getNs(), + singleton(Keys.getInstanceKey(accountName, cloudFoundryInstance.getName())), + LOAD_BALANCERS.getNs(), + emptyList()) + .toJavaMap(); + + ResourceCacheData onDemandCacheResults = + new ResourceCacheData( + Keys.getServerGroupKey( + accountName, + onDemandCloudFoundryServerGroup.getName(), + onDemandCloudFoundryServerGroup.getRegion()), + cacheView(onDemandCloudFoundryServerGroup), + serverGroupRelationships); + + CacheData cacheData = + cloudFoundryServerGroupCachingAgent.buildOnDemandCacheData( + Keys.getServerGroupKey( + accountName, + onDemandCloudFoundryServerGroup.getName(), + onDemandCloudFoundryServerGroup.getRegion()), + Collections.singletonMap( + SERVER_GROUPS.getNs(), Collections.singleton(onDemandCacheResults))); + + ResourceCacheData result = + objectMapper + .readValue( + cacheData.getAttributes().get("cacheResults").toString(), + new TypeReference>>() {}) + .get("serverGroups") + .stream() + .findFirst() + .get(); + + assertThat(result).usingRecursiveComparison().isEqualTo(onDemandCacheResults); + } + + @Test + void handleShouldReturnNullWhenAccountDoesNotMatch() { + Map data = HashMap.of("account", "other-account").toJavaMap(); + + OnDemandAgent.OnDemandResult result = + cloudFoundryServerGroupCachingAgent.handle(mockProviderCache, data); + assertThat(result).isNull(); + } + + @Test + void handleShouldReturnNullWhenRegionIsUnspecified() { + OnDemandAgent.OnDemandResult result = + cloudFoundryServerGroupCachingAgent.handle(mockProviderCache, emptyMap()); + + assertThat(result).isNull(); + } + + @Test + void handleShouldReturnNullWhenRegionDoesNotExist() { + String region = "org > space"; + Map data = + HashMap.of( + "account", accountName, + "region", region) + .toJavaMap(); + + OnDemandAgent.OnDemandResult result = + cloudFoundryServerGroupCachingAgent.handle(mockProviderCache, data); + assertThat(result).isNull(); + verify(spaces).findSpaceByRegion(eq(region)); + } + + @Test + void handleShouldReturnNullWhenServerGroupNameIsUnspecified() { + String region = "org > space"; + Map data = + HashMap.of( + "account", accountName, + "region", region) + .toJavaMap(); + + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + + OnDemandAgent.OnDemandResult result = + cloudFoundryServerGroupCachingAgent.handle(mockProviderCache, data); + assertThat(result).isNull(); + verify(spaces).findSpaceByRegion(eq(region)); + } + + @Test + void handleShouldReturnAnEvictResultWhenServerGroupDoesNotExists() { + String region = "org > space"; + String serverGroupName = "server-group"; + Map data = + HashMap.of( + "account", accountName, + "region", region, + "serverGroupName", serverGroupName) + .toJavaMap(); + + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + + Applications mockApplications = mock(Applications.class); + when(cloudFoundryClient.getApplications()).thenReturn(mockApplications); + when(mockApplications.findServerGroupByNameAndSpaceId(any(), any())).thenReturn(null); + + when(mockProviderCache.filterIdentifiers(any(), any())) + .thenReturn(Collections.singletonList("key")); + + OnDemandAgent.OnDemandResult result = + cloudFoundryServerGroupCachingAgent.handle(mockProviderCache, data); + + assertThat(result).isNotNull(); + assertThat(result.getEvictions()).hasSize(1); + assertThat(result.getEvictions().get(SERVER_GROUPS.getNs())).containsExactly("key"); + } + + @Test + void handleShouldReturnOnDemandResultsWithCacheTimeAndNoProcessedTime() { + String region = "org > space"; + String serverGroupName = "server-group"; + Map data = + HashMap.of( + "account", accountName, + "region", region, + "serverGroupName", serverGroupName) + .toJavaMap(); + CloudFoundryInstance cloudFoundryInstance = + CloudFoundryInstance.builder().appGuid("instance-guid").key("instance-key").build(); + CloudFoundryServerGroup matchingCloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .name(serverGroupName) + .account(accountName) + .space(cloudFoundrySpace) + .instances(singleton(cloudFoundryInstance)) + .build(); + ResourceCacheData onDemandCacheData = + new ResourceCacheData( + Keys.getServerGroupKey(accountName, serverGroupName, region), + cacheView(matchingCloudFoundryServerGroup), + HashMap.>of( + INSTANCES.getNs(), + Collections.singletonList( + Keys.getInstanceKey(accountName, cloudFoundryInstance.getName())), + LOAD_BALANCERS.getNs(), Collections.emptyList()) + .toJavaMap()); + when(spaces.findSpaceByRegion(any())).thenReturn(Optional.of(cloudFoundrySpace)); + Applications mockApplications = mock(Applications.class); + when(mockApplications.findServerGroupByNameAndSpaceId(any(), any())) + .thenReturn(matchingCloudFoundryServerGroup); + when(cloudFoundryClient.getApplications()).thenReturn(mockApplications); + Map> cacheResults = + HashMap.>of( + SERVER_GROUPS.getNs(), singleton(onDemandCacheData)) + .toJavaMap(); + String sourceAgentType = "account/CloudFoundryServerGroupCachingAgent-OnDemand"; + CacheResult expectedCacheResult = new DefaultCacheResult(cacheResults); + OnDemandAgent.OnDemandResult expectedResult = + new OnDemandAgent.OnDemandResult(sourceAgentType, expectedCacheResult, emptyMap()); + + OnDemandAgent.OnDemandResult result = + cloudFoundryServerGroupCachingAgent.handle(mockProviderCache, data); + + assertThat(result) + .usingRecursiveComparison() + .ignoringCollectionOrder() + .isEqualTo(expectedResult); + } + + @Test + void pendingOnDemandRequestsShouldReturnOnDemandCacheData() { + String serverGroupName = "application-stack-detail-v000"; + String region = "org > space"; + Date cacheTime = new Date(); + Long cacheExpiry = 111L; + Integer processedCount = 1; + Long processedTime = 222L; + String serverGroupKey = Keys.getServerGroupKey(accountName, serverGroupName, region); + Collection expectedKeys = singleton("key1"); + when(mockProviderCache.filterIdentifiers(any(), any())).thenReturn(expectedKeys); + Collection onDemandCacheData = + singleton( + new DefaultCacheData( + serverGroupKey, + HashMap.of( + "cacheTime", cacheTime, + "cacheExpiry", cacheExpiry, + "processedCount", processedCount, + "processedTime", processedTime) + .toJavaMap(), + emptyMap())); + when(mockProviderCache.getAll(any(), any(), any())).thenReturn(onDemandCacheData); + Moniker moniker = + Moniker.builder() + .app("application") + .stack("stack") + .detail("detail") + .cluster("application-stack-detail") + .sequence(0) + .build(); + Collection expectedResult = + singleton( + HashMap.of( + "id", serverGroupKey, + "details", Keys.parse(serverGroupKey).get(), + "moniker", moniker, + "cacheTime", cacheTime, + "cacheExpiry", cacheExpiry, + "processedCount", processedCount, + "processedTime", processedTime) + .toJavaMap()); + + Collection> result = + cloudFoundryServerGroupCachingAgent.pendingOnDemandRequests(mockProviderCache); + + assertThat(result).isEqualTo(expectedResult); + verify(mockProviderCache) + .filterIdentifiers( + eq(ON_DEMAND.getNs()), eq(Keys.getServerGroupKey(accountName, "*", "*"))); + verify(mockProviderCache) + .getAll(eq(ON_DEMAND.getNs()), eq(expectedKeys), refEq(RelationshipCacheFilter.none())); + } + + @Test + void convertOnDemandDetailsShouldReturnNullMonikerForNullMonikerData() { + Moniker result = cloudFoundryServerGroupCachingAgent.convertOnDemandDetails(null); + + assertThat(result).isNull(); + } + + @Test + void convertOnDemandDetailsShouldReturnNullMonikerForNoServerGroupInMonikerData() { + Moniker result = cloudFoundryServerGroupCachingAgent.convertOnDemandDetails(emptyMap()); + + assertThat(result).isNull(); + } + + @Test + void convertOnDemandDetailsShouldReturnMonikerDataForServerGroup() { + Moniker expectedMoniker = + Moniker.builder() + .app("app") + .stack("stack") + .detail("detail") + .cluster("app-stack-detail") + .sequence(235) + .build(); + + Moniker result = + cloudFoundryServerGroupCachingAgent.convertOnDemandDetails( + singletonMap("serverGroupName", "app-stack-detail-v235")); + + assertThat(result).usingRecursiveComparison().isEqualTo(expectedMoniker); + } + + @Test + void loadDataShouldReturnCacheResultWithUpdatedData() { + String region = "org > space"; + String appName1 = "app1"; + String appName3 = "app3"; + String clusterName1 = appName1 + "-stack1-detail1"; + String clusterName3 = appName3 + "-stack3-detail3"; + String serverGroupName1 = clusterName1 + "-v000"; + String serverGroupName2 = clusterName1 + "-v001"; + String serverGroupName3 = clusterName3 + "-v000"; + String instanceId1 = "instance-guid-1-instance-key"; + String serverGroupKey1 = Keys.getServerGroupKey(accountName, serverGroupName1, region); + String serverGroupKey2 = Keys.getServerGroupKey(accountName, serverGroupName2, region); + String serverGroupKey3 = Keys.getServerGroupKey(accountName, serverGroupName3, region); + + CloudFoundryLoadBalancer loadBalancer1 = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-1") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .build(); + CloudFoundryLoadBalancer loadBalancer2 = + CloudFoundryLoadBalancer.builder() + .account(accountName) + .id("lb-guid-2") + .domain(CloudFoundryDomain.builder().name("domain-name").build()) + .build(); + CloudFoundryInstance instance1 = + CloudFoundryInstance.builder().appGuid("instance-guid-1").key("instance-key").build(); + CloudFoundryServerGroup serverGroup1 = + CloudFoundryServerGroup.builder() + .name(serverGroupName1) + .id("sg-guid-1") + .account(accountName) + .space(cloudFoundrySpace) + .instances(singleton(instance1)) + .loadBalancerNames(Collections.singleton(loadBalancer1.getName())) + .build(); + CloudFoundryServerGroup serverGroup2 = + CloudFoundryServerGroup.builder() + .name(serverGroupName2) + .id("sg-guid-2") + .account(accountName) + .space(cloudFoundrySpace) + .instances(emptySet()) + .loadBalancerNames(Collections.singleton(loadBalancer2.getName())) + .build(); + CloudFoundryServerGroup serverGroup3 = + CloudFoundryServerGroup.builder() + .name(serverGroupName3) + .id("sg-guid-3") + .account(accountName) + .space(cloudFoundrySpace) + .instances(emptySet()) + .build(); + + CloudFoundryCluster cloudFoundryCluster1 = + CloudFoundryCluster.builder() + .accountName(accountName) + .name(clusterName1) + .serverGroups(HashSet.of(serverGroup1, serverGroup2).toJavaSet()) + .build(); + CloudFoundryCluster cloudFoundryCluster3 = + CloudFoundryCluster.builder() + .accountName(accountName) + .name(clusterName3) + .serverGroups(singleton(serverGroup3)) + .build(); + CloudFoundryApplication cloudFoundryApplication1 = + CloudFoundryApplication.builder() + .name(appName1) + .clusters(singleton(cloudFoundryCluster1)) + .build(); + CloudFoundryApplication cloudFoundryApplication3 = + CloudFoundryApplication.builder() + .name(appName3) + .clusters(singleton(cloudFoundryCluster3)) + .build(); + + when(mockProviderCache.getAll(any(), anyCollection())).thenReturn(emptySet()); + + Applications mockApplications = mock(Applications.class); + when(mockApplications.all(emptyList())) + .thenReturn(List.of(cloudFoundryApplication1, cloudFoundryApplication3).toJavaList()); + + when(cloudFoundryClient.getApplications()).thenReturn(mockApplications); + + Map> applicationRelationships1 = + HashMap.>of( + CLUSTERS.getNs(), + singleton(Keys.getClusterKey(accountName, appName1, clusterName1))) + .toJavaMap(); + Map> applicationRelationships3 = + HashMap.>of( + CLUSTERS.getNs(), + singleton(Keys.getClusterKey(accountName, appName3, clusterName3))) + .toJavaMap(); + CacheData applicationsCacheData1 = + new ResourceCacheData( + Keys.getApplicationKey(appName1), + cacheView(cloudFoundryApplication1), + applicationRelationships1); + CacheData applicationsCacheData3 = + new ResourceCacheData( + Keys.getApplicationKey(appName3), + cacheView(cloudFoundryApplication3), + applicationRelationships3); + + Map> serverGroupRelationships1 = + HashMap.>of( + INSTANCES.getNs(), singleton(Keys.getInstanceKey(accountName, instanceId1)), + LOAD_BALANCERS.getNs(), + singleton( + Keys.getLoadBalancerKey( + accountName, loadBalancer1.getName(), serverGroup1.getRegion()))) + .toJavaMap(); + Map> serverGroupRelationships2 = + HashMap.>of( + INSTANCES.getNs(), emptySet(), + LOAD_BALANCERS.getNs(), + singleton( + Keys.getLoadBalancerKey( + accountName, loadBalancer2.getName(), serverGroup2.getRegion()))) + .toJavaMap(); + Map> serverGroupRelationships3 = + HashMap.>of( + INSTANCES.getNs(), emptySet(), + LOAD_BALANCERS.getNs(), emptySet()) + .toJavaMap(); + + CacheData serverGroupCacheData1 = + new ResourceCacheData(serverGroupKey1, cacheView(serverGroup1), serverGroupRelationships1); + CacheData serverGroupCacheData2 = + new ResourceCacheData(serverGroupKey2, cacheView(serverGroup2), serverGroupRelationships2); + CacheData serverGroupCacheData3 = + new ResourceCacheData(serverGroupKey3, cacheView(serverGroup3), serverGroupRelationships3); + + Map> clusterRelationships1 = + HashMap.>of( + SERVER_GROUPS.getNs(), HashSet.of(serverGroupKey1, serverGroupKey2).toJavaSet()) + .toJavaMap(); + Map> clusterRelationships3 = + HashMap.>of(SERVER_GROUPS.getNs(), singleton(serverGroupKey3)) + .toJavaMap(); + + String clusterKey1 = Keys.getClusterKey(accountName, appName1, clusterName1); + String clusterKey3 = Keys.getClusterKey(accountName, appName3, clusterName3); + CacheData clusterCacheData1 = + new ResourceCacheData(clusterKey1, cacheView(cloudFoundryCluster1), clusterRelationships1); + CacheData clusterCacheData3 = + new ResourceCacheData(clusterKey3, cacheView(cloudFoundryCluster3), clusterRelationships3); + + CacheData instanceCacheData = + new ResourceCacheData( + Keys.getInstanceKey(accountName, instanceId1), cacheView(instance1), emptyMap()); + + Map> cacheResults = + HashMap.>of( + APPLICATIONS.getNs(), + HashSet.of(applicationsCacheData1, applicationsCacheData3).toJavaSet(), + CLUSTERS.getNs(), HashSet.of(clusterCacheData1, clusterCacheData3).toJavaSet(), + SERVER_GROUPS.getNs(), + HashSet.of(serverGroupCacheData1, serverGroupCacheData2, serverGroupCacheData3) + .toJavaSet(), + INSTANCES.getNs(), singleton(instanceCacheData), + ON_DEMAND.getNs(), emptySet()) + .toJavaMap(); + CacheResult expectedCacheResult = + new DefaultCacheResult( + cacheResults, + HashMap.>of(ON_DEMAND.getNs(), emptySet()).toJavaMap()); + + CacheResult result = cloudFoundryServerGroupCachingAgent.loadData(mockProviderCache); + + assertThat(result).usingRecursiveComparison().isEqualTo(expectedCacheResult); + verify(mockApplications).all(emptyList()); + } + + @Test + void loadDataShouldReturnCacheResultWithDataFromOnDemandNamespace() + throws JsonProcessingException { + + CloudFoundryInstance cloudFoundryInstance = + CloudFoundryInstance.builder().appGuid("instance-guid-1").key("instance-key").build(); + + CloudFoundryServerGroup cloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .name("serverGroupName") + .id("sg-guid-1") + .account(accountName) + .space(cloudFoundrySpace) + .instances(singleton(cloudFoundryInstance)) + .build(); + + CloudFoundryServerGroup onDemandCloudFoundryServerGroup = + CloudFoundryServerGroup.builder() + .name("serverGroupName") + .id("sg-guid-1") + .account(accountName) + .space(cloudFoundrySpace) + .diskQuota(1024) + .instances(singleton(cloudFoundryInstance)) + .build(); + + CloudFoundryCluster cloudFoundryCluster = + CloudFoundryCluster.builder() + .accountName(accountName) + .name("clusterName-foo-bar") + .serverGroups(Collections.singleton(cloudFoundryServerGroup)) + .build(); + + CloudFoundryApplication cloudFoundryApplication = + CloudFoundryApplication.builder() + .name("appName") + .clusters(singleton(cloudFoundryCluster)) + .build(); + + Map> applicationRelationships = + HashMap.>of( + CLUSTERS.getNs(), + singleton( + Keys.getClusterKey( + accountName, + cloudFoundryApplication.getName(), + cloudFoundryCluster.getName()))) + .toJavaMap(); + + Map> clusterRelationships = + HashMap.>of( + SERVER_GROUPS.getNs(), + HashSet.of( + Keys.getServerGroupKey( + accountName, + cloudFoundryServerGroup.getName(), + cloudFoundryServerGroup.getRegion())) + .toJavaSet()) + .toJavaMap(); + + Map> serverGroupRelationships = + HashMap.>of( + INSTANCES.getNs(), + singleton(Keys.getInstanceKey(accountName, cloudFoundryInstance.getName())), + LOAD_BALANCERS.getNs(), emptyList()) + .toJavaMap(); + + Applications mockApplications = mock(Applications.class); + when(mockApplications.all(emptyList())) + .thenReturn(List.of(cloudFoundryApplication).toJavaList()); + + ResourceCacheData onDemandCacheResults = + new ResourceCacheData( + Keys.getServerGroupKey( + accountName, + onDemandCloudFoundryServerGroup.getName(), + onDemandCloudFoundryServerGroup.getRegion()), + cacheView(onDemandCloudFoundryServerGroup), + serverGroupRelationships); + + when(mockProviderCache.getAll(any(), anyCollection())) + .thenReturn( + singleton( + new DefaultCacheData( + Keys.getServerGroupKey( + accountName, + onDemandCloudFoundryServerGroup.getName(), + onDemandCloudFoundryServerGroup.getRegion()), + (int) TimeUnit.MINUTES.toSeconds(10), // ttl + io.vavr.collection.HashMap.of( + "cacheTime", + internalClock.instant().plusSeconds(600).toEpochMilli(), + "cacheResults", + objectMapper.writeValueAsString( + Collections.singletonMap( + SERVER_GROUPS.getNs(), + Collections.singleton(onDemandCacheResults))), + "processedCount", + 0) + .toJavaMap(), + emptyMap(), + internalClock))); + when(cloudFoundryClient.getApplications()).thenReturn(mockApplications); + + Map> cacheResults = + HashMap.>of( + APPLICATIONS.getNs(), + Collections.singleton( + new ResourceCacheData( + Keys.getApplicationKey(cloudFoundryApplication.getName()), + cacheView(cloudFoundryApplication), + applicationRelationships)), + CLUSTERS.getNs(), + Collections.singleton( + new ResourceCacheData( + Keys.getClusterKey( + accountName, + cloudFoundryCluster.getMoniker().getApp(), + cloudFoundryCluster.getName()), + cacheView(cloudFoundryCluster), + clusterRelationships)), + SERVER_GROUPS.getNs(), + Collections.singleton( + new ResourceCacheData( + Keys.getServerGroupKey( + accountName, + onDemandCloudFoundryServerGroup.getName(), + onDemandCloudFoundryServerGroup.getRegion()), + cacheView(onDemandCloudFoundryServerGroup), + serverGroupRelationships)), + INSTANCES.getNs(), + singleton( + new ResourceCacheData( + Keys.getInstanceKey(accountName, cloudFoundryInstance.getName()), + cacheView(cloudFoundryInstance), + emptyMap())), + ON_DEMAND.getNs(), + emptySet()) + .toJavaMap(); + + CacheResult expectedCacheResult = + new DefaultCacheResult( + cacheResults, + HashMap.>of(ON_DEMAND.getNs(), emptySet()).toJavaMap()); + + CacheResult result = cloudFoundryServerGroupCachingAgent.loadData(mockProviderCache); + + assertThat(result).usingRecursiveComparison().isEqualTo(expectedCacheResult); + } + + @Test + void shouldReturnNullWhenAccountNameDiffers() { + Map data = + HashMap.of( + "account", "NotAccount", + "region", "org1 > space1", + "serverGroupName", "doesntMatter") + .toJavaMap(); + + OnDemandAgent.OnDemandResult result = + cloudFoundryServerGroupCachingAgent.handle(mockProviderCache, data); + + assertThat(result).isEqualTo(null); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundrySpaceCachingAgentTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundrySpaceCachingAgentTest.java new file mode 100644 index 00000000000..beab8be0017 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/agent/CloudFoundrySpaceCachingAgentTest.java @@ -0,0 +1,104 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys.Namespace.SPACES; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.provider.agent.AbstractCloudFoundryCachingAgent.cacheView; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.ResourceCacheData; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.Spaces; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import io.vavr.collection.List; +import java.util.Collection; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class CloudFoundrySpaceCachingAgentTest { + private String accountName = "account"; + private CloudFoundryClient cloudFoundryClient = mock(CloudFoundryClient.class); + private Registry registry = mock(Registry.class); + private CloudFoundryCredentials credentials = mock(CloudFoundryCredentials.class); + private CloudFoundrySpaceCachingAgent cloudFoundrySpaceCachingAgent = + new CloudFoundrySpaceCachingAgent(credentials, registry); + private ProviderCache mockProviderCache = mock(ProviderCache.class); + private Spaces spaces = mock(Spaces.class); + + @BeforeEach + void before() { + when(credentials.getClient()).thenReturn(cloudFoundryClient); + when(credentials.getName()).thenReturn(accountName); + } + + @Test + void loadDataShouldReturnCacheResultWithUpdatedData() { + + CloudFoundrySpace space1 = + CloudFoundrySpace.builder() + .id("space-guid-1") + .name("space1") + .organization(CloudFoundryOrganization.builder().id("org-guid-1").name("org1").build()) + .build(); + + CloudFoundrySpace space2 = + CloudFoundrySpace.builder() + .id("space-guid-2") + .name("space2") + .organization(CloudFoundryOrganization.builder().id("org-guid-2").name("org2").build()) + .build(); + + when(mockProviderCache.getAll(any(), anyCollection())).thenReturn(emptySet()); + when(cloudFoundryClient.getSpaces()).thenReturn(spaces); + when(spaces.all()).thenReturn(List.of(space1, space2).toJavaList()); + + CacheData spaceCacheData1 = + new ResourceCacheData( + Keys.getSpaceKey(accountName, space1.getRegion()), cacheView(space1), emptyMap()); + + CacheData spaceCacheData2 = + new ResourceCacheData( + Keys.getSpaceKey(accountName, space2.getRegion()), cacheView(space2), emptyMap()); + + Map> cacheResults = + ImmutableMap.of(SPACES.getNs(), ImmutableSet.of(spaceCacheData1, spaceCacheData2)); + + CacheResult expectedCacheResult = new DefaultCacheResult(cacheResults, emptyMap()); + + CacheResult result = cloudFoundrySpaceCachingAgent.loadData(mockProviderCache); + + assertThat(result).usingRecursiveComparison().isEqualTo(expectedCacheResult); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryInstanceProviderTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryInstanceProviderTest.java new file mode 100644 index 00000000000..ed696a9e1d2 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/provider/view/CloudFoundryInstanceProviderTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view; + +import static com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view.CloudFoundryInstanceProvider.LogsResourceType.APP; +import static com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view.CloudFoundryInstanceProvider.LogsResourceType.TASK; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.provider.view.CloudFoundryInstanceProvider.CloudFoundryConsoleOutputIdParameter; +import com.netflix.spinnaker.clouddriver.cloudfoundry.security.CloudFoundryCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import org.junit.jupiter.api.Test; + +class CloudFoundryInstanceProviderTest { + + @Test + void getConsoleOutput_withNoAccount_returnsNull() { + CredentialsRepository credentialsRepository = + mock(CredentialsRepository.class); + when(credentialsRepository.getOne(eq("account1"))) + .thenReturn(mock(CloudFoundryCredentials.class)); + + CloudFoundryInstanceProvider provider = + new CloudFoundryInstanceProvider(mock(CacheRepository.class), credentialsRepository); + + assertThat(provider.getConsoleOutput("account2", "location", "task:jobId")).isNull(); + } + + @Test + void cloudFoundryConsoleOutputIdParameter_fromString_validAppLogsId() { + CloudFoundryConsoleOutputIdParameter param = + CloudFoundryConsoleOutputIdParameter.fromString("app:12345:99"); + + assertThat(param.getLogsResourceType()).isEqualTo(APP); + assertThat(param.getGuid()).isEqualTo("12345"); + assertThat(param.getInstanceIndex()).isEqualTo(99); + } + + @Test + void cloudFoundryConsoleOutputIdParameter_fromString_validTaskLogsId() { + CloudFoundryConsoleOutputIdParameter param = + CloudFoundryConsoleOutputIdParameter.fromString("task:12345"); + + assertThat(param.getLogsResourceType()).isEqualTo(TASK); + assertThat(param.getGuid()).isEqualTo("12345"); + assertThat(param.getInstanceIndex()).isEqualTo(0); + } + + @Test + void cloudFoundryConsoleOutputIdParameter_fromString_ignoredTaskInstanceId() { + CloudFoundryConsoleOutputIdParameter param = + CloudFoundryConsoleOutputIdParameter.fromString("task:12345:1"); + + assertThat(param.getLogsResourceType()).isEqualTo(TASK); + assertThat(param.getGuid()).isEqualTo("12345"); + assertThat(param.getInstanceIndex()).isEqualTo(0); + } + + @Test + void cloudFoundryConsoleOutputIdParameter_fromString_invalidType() { + assertThrows( + IllegalArgumentException.class, + () -> CloudFoundryConsoleOutputIdParameter.fromString("invalid:12345:1")); + } + + @Test + void cloudFoundryConsoleOutputIdParameter_fromString_appLogsIdMissingInstanceIndex() { + assertThrows( + IllegalArgumentException.class, + () -> CloudFoundryConsoleOutputIdParameter.fromString("app:12345")); + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsTest.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsTest.java new file mode 100644 index 00000000000..bce99a7c95f --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/security/CloudFoundryCredentialsTest.java @@ -0,0 +1,193 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.security; + +import static java.util.Collections.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.cloudfoundry.cache.CacheRepository; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.CloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.client.MockCloudFoundryClient; +import com.netflix.spinnaker.clouddriver.cloudfoundry.config.CloudFoundryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundryOrganization; +import com.netflix.spinnaker.clouddriver.cloudfoundry.model.CloudFoundrySpace; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ForkJoinPool; +import okhttp3.OkHttpClient; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +public class CloudFoundryCredentialsTest { + + private final CacheRepository cacheRepository = mock(CacheRepository.class); + private final CloudFoundryClient cloudFoundryClient = new MockCloudFoundryClient(); + + @Test + void emptySpaceFilterShouldConvertToEmptyList() { + CloudFoundryCredentials credentials = getStubCloudFoundryCredentials(); + + assertThat(credentials.getFilteredSpaces()).isEqualTo(emptyList()); + } + + @Test + void singleOrgSpaceFilterShouldConvert() { + CloudFoundryCredentials credentials = getStubCloudFoundryCredentials(); + + Map> spaceFilter = ImmutableMap.of("org", emptySet()); + + CloudFoundryOrganization organization = + CloudFoundryOrganization.builder().id("org123").name("org").build(); + CloudFoundrySpace space1 = + CloudFoundrySpace.builder() + .id("space123") + .name("space1") + .organization(organization) + .build(); + CloudFoundrySpace space2 = + CloudFoundrySpace.builder() + .id("space456") + .name("space2") + .organization(organization) + .build(); + + when(cloudFoundryClient.getSpaces().findAllBySpaceNamesAndOrgNames(isNull(), any())) + .thenReturn(List.of(space1, space2)); + List result = credentials.createFilteredSpaces(spaceFilter); + assertThat(result).isEqualTo(List.of(space1, space2)); + } + + @Test + void singleOrgSingleSpaceSpaceFilterShouldConvert() { + CloudFoundryCredentials credentials = getStubCloudFoundryCredentials(); + + Map> spaceFilter = ImmutableMap.of("org", Set.of("space1")); + + CloudFoundryOrganization organization = + CloudFoundryOrganization.builder().id("org123").name("org").build(); + CloudFoundrySpace space1 = + CloudFoundrySpace.builder() + .id("space123") + .name("space1") + .organization(organization) + .build(); + CloudFoundrySpace space2 = + CloudFoundrySpace.builder() + .id("space456") + .name("space2") + .organization(organization) + .build(); + + when(cloudFoundryClient.getSpaces().findAllBySpaceNamesAndOrgNames(any(), any())) + .thenReturn(List.of(space1, space2)); + List result = credentials.createFilteredSpaces(spaceFilter); + assertThat(result).isEqualTo(List.of(space1)); + } + + @Test + void fakeOrgFakeSpaceSpaceFilterShouldThrowError() { + CloudFoundryCredentials credentials = getStubCloudFoundryCredentials(); + + Map> spaceFilter = ImmutableMap.of("org", Set.of("space1")); + + when(cloudFoundryClient.getSpaces().findAllBySpaceNamesAndOrgNames(any(), any())) + .thenReturn(emptyList()); + Exception e = + assertThrows(Exception.class, () -> credentials.createFilteredSpaces(spaceFilter)); + assertThat(e) + .hasMessageContaining( + "The spaceFilter had Orgs and/or Spaces but CloudFoundry returned no spaces as a result. Spaces must not be null or empty when a spaceFilter is included."); + } + + @Test + @DisplayName( + "Tests Jackson Ignore Annotations. These fields should not be serialized when calling '/credentials'") + void testJacksonSerialization() throws NoSuchMethodException { + // these constructors should exist + assertNotNull(CloudFoundryCredentials.class.getMethod("getCredentials")); + assertNotNull(CloudFoundryCredentials.class.getMethod("getClient")); + assertNotNull(CloudFoundryCredentials.class.getMethod("getPassword")); + assertNotNull(CloudFoundryCredentials.class.getMethod("getSpaceSupplier")); + assertNotNull(CloudFoundryCredentials.class.getMethod("getCacheRepository")); + assertNotNull(CloudFoundryCredentials.class.getMethod("getForkJoinPool")); + assertNotNull(CloudFoundryCredentials.class.getMethod("getFilteredSpaces")); + assertNotNull(CloudFoundryCredentials.class.getDeclaredMethod("getSpacesLive")); + + // lombok shouldn't generate an additional constructor for the "cloudFoundryClient" field + assertThrows( + NoSuchMethodException.class, + () -> CloudFoundryCredentials.class.getMethod("getCloudFoundryClient")); + + ObjectMapper mapper = new ObjectMapper(); + CloudFoundryCredentials credentials = getStubCloudFoundryCredentials(); + // Test Jackson Annotations + JsonNode jsonCredentials = mapper.valueToTree(credentials); + + assertFalse(jsonCredentials.has("credentials")); + assertFalse(jsonCredentials.has("client")); + assertFalse(jsonCredentials.has("cloudFoundryClient")); + assertFalse(jsonCredentials.has("password")); + assertFalse(jsonCredentials.has("spaceSupplier")); + assertFalse(jsonCredentials.has("cacheRepository")); + assertFalse(jsonCredentials.has("forkJoinPool")); + assertFalse(jsonCredentials.has("filteredSpaces")); + assertFalse(jsonCredentials.has("spacesLive")); + } + + @NotNull + private CloudFoundryCredentials getStubCloudFoundryCredentials() { + return new CloudFoundryCredentials( + "test", + "managerUri", + "metricsUri", + "api.host", + "username", + "password", + "environment", + false, + false, + 500, + cacheRepository, + null, + ForkJoinPool.commonPool(), + emptyMap(), + new OkHttpClient(), + new CloudFoundryConfigurationProperties.ClientConfig(), + new CloudFoundryConfigurationProperties.LocalCacheConfig()) { + public CloudFoundryClient getClient() { + return cloudFoundryClient; + } + + public CloudFoundryClient getCredentials() { + return cloudFoundryClient; + } + }; + } +} diff --git a/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/utils/TestUtils.java b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/utils/TestUtils.java new file mode 100644 index 00000000000..dcf353d0ff5 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/java/com/netflix/spinnaker/clouddriver/cloudfoundry/utils/TestUtils.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudfoundry.utils; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.function.Supplier; + +public class TestUtils { + public static void assertThrows(Supplier s, Class clazz, String errorString) { + RuntimeException runtimeException = null; + try { + s.get(); + } catch (RuntimeException e) { + runtimeException = e; + } + assertThat(runtimeException).isInstanceOf(clazz); + assertThat(runtimeException.getMessage()).isEqualTo(errorString); + } +} diff --git a/clouddriver-cloudfoundry/src/test/resources/doppler.recent.logs b/clouddriver-cloudfoundry/src/test/resources/doppler.recent.logs new file mode 100644 index 00000000000..10143b39848 --- /dev/null +++ b/clouddriver-cloudfoundry/src/test/resources/doppler.recent.logs @@ -0,0 +1,99 @@ +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0˝��퓒�B� +|Cell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e successfully destroyed container for instance 8709b2e7-5a24-466b-8929-502d82cf7183˝��퓒�"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0Ύ��瓒�B� +pCell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e destroying container for instance 8709b2e7-5a24-466b-8929-502d82cf7183Ύ��瓒�"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0����瓒�B� +`Cell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e stopping instance 8709b2e7-5a24-466b-8929-502d82cf7183����瓒�"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0����擒�BW + Exit status 0����擒�"$392f4145-b1e6-4dc6-a5f4-5c7491488074*APP/TASK/fc24144520jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0���擒�BV + HELLO 18443 ���擒�"$392f4145-b1e6-4dc6-a5f4-5c7491488074*APP/TASK/fc24144520jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0����ړ��B� +zCell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e successfully created container for instance 8709b2e7-5a24-466b-8929-502d82cf7183����ړ��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0���ٓ��B� +nCell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e creating container for instance 8709b2e7-5a24-466b-8929-502d82cf7183���ٓ��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0���ݐ��B� +|Cell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e successfully destroyed container for instance fe9e239e-486f-45bd-9893-5db3acb4f224���ݐ��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0����א��B� +pCell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e destroying container for instance fe9e239e-486f-45bd-9893-5db3acb4f224����א��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0����א��B� +`Cell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e stopping instance fe9e239e-486f-45bd-9893-5db3acb4f224����א��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0Ӥ��֐��BW + Exit status 0Ӥ��֐��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*APP/TASK/7232716620jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0����֐��BV + HELLO 23469 ����֐��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*APP/TASK/7232716620jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0����ː��B� +zCell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e successfully created container for instance fe9e239e-486f-45bd-9893-5db3acb4f224����ː��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77 + + +rep0����ɐ��B� +nCell 5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e creating container for instance fe9e239e-486f-45bd-9893-5db3acb4f224����ɐ��"$392f4145-b1e6-4dc6-a5f4-5c7491488074*CELL20jcfr +diego_cellz$5cbe1595-0c9e-4f68-b74e-cc39aa5f0d5e� 10.0.4.30�1 + source_id$392f4145-b1e6-4dc6-a5f4-5c7491488074 +--a7d612f5da24eb116b1c0889c112d0a1beecd7e640d921ad9210100e2f77-- diff --git a/clouddriver-cloudrun/clouddriver-cloudrun.gradle b/clouddriver-cloudrun/clouddriver-cloudrun.gradle new file mode 100644 index 00000000000..1ed13a19428 --- /dev/null +++ b/clouddriver-cloudrun/clouddriver-cloudrun.gradle @@ -0,0 +1,41 @@ +dependencies { + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-artifacts") + implementation project(":clouddriver-core") + implementation project(":clouddriver-google-common") + implementation project(":clouddriver-security") + + implementation "com.google.apis:google-api-services-run" + implementation "com.google.apis:google-api-services-storage" + implementation 'com.google.auth:google-auth-library-oauth2-http' + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-config" + implementation "io.spinnaker.kork:kork-cloud-config-server" + implementation "io.spinnaker.kork:kork-moniker" + implementation "com.netflix.spectator:spectator-api" + implementation "com.squareup.retrofit:retrofit" + implementation "commons-io:commons-io" + implementation "org.apache.commons:commons-compress:1.20" + implementation "org.apache.groovy:groovy" + implementation "org.eclipse.jgit:org.eclipse.jgit:5.7.0.202003110725-r" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.springframework.cloud:spring-cloud-context" + implementation "org.springframework.cloud:spring-cloud-config-server" + + testImplementation "org.assertj:assertj-core" + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.mockito:mockito-core" + +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunCloudProvider.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunCloudProvider.java new file mode 100644 index 00000000000..0803c44237f --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunCloudProvider.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun; + +import com.netflix.spinnaker.clouddriver.core.CloudProvider; +import java.lang.annotation.Annotation; +import org.springframework.stereotype.Component; + +/** Google Cloud Run declaration as a {@link CloudProvider}. */ +@Component +public class CloudrunCloudProvider implements CloudProvider { + public static final String ID = "cloudrun"; + final String id = ID; + final String displayName = "Cloud run"; + final Class operationAnnotationType = CloudrunOperation.class; + + /** @return */ + @Override + public String getId() { + return id; + } + + /** @return */ + @Override + public String getDisplayName() { + return displayName; + } + + /** @return */ + @Override + public Class getOperationAnnotationType() { + return operationAnnotationType; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunJobExecutor.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunJobExecutor.java new file mode 100644 index 00000000000..c6782250152 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunJobExecutor.java @@ -0,0 +1,43 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun; + +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import com.netflix.spinnaker.clouddriver.jobs.JobRequest; +import com.netflix.spinnaker.clouddriver.jobs.JobResult; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +@Component +public class CloudrunJobExecutor { + + @Value("${cloudrun.job-sleep-ms:1000}") + private Long sleepMs; + + @Autowired private JobExecutor jobExecutor; + + public void runCommand(List command) { + JobResult jobStatus = jobExecutor.runJob(new JobRequest(command)); + if (jobStatus.getResult() == JobResult.Result.FAILURE) { + String stdOut = jobStatus.getOutput(); + String stdErr = jobStatus.getError(); + throw new IllegalArgumentException("stdout: " + stdOut + "stderr: " + stdErr); + } + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunOperation.java new file mode 100644 index 00000000000..ae25f2c681b --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/CloudrunOperation.java @@ -0,0 +1,28 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface CloudrunOperation { + String value(); +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/cache/Keys.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/cache/Keys.java new file mode 100644 index 00000000000..0a79503bad7 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/cache/Keys.java @@ -0,0 +1,144 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.cache; + +import com.google.common.base.CaseFormat; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import groovy.util.logging.Slf4j; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; +import lombok.Getter; + +@Slf4j +public class Keys { + + public static final String KEY_DELIMITER = ":"; + + public enum Namespace { + APPLICATIONS, + PLATFORM_APPLICATIONS, + CLUSTERS, + SERVER_GROUPS, + INSTANCES, + LOAD_BALANCERS, + ON_DEMAND; + + public static String provider = CloudrunCloudProvider.ID; + + @Getter final String ns; + + private Namespace() { + this.ns = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, name()); // FOO_BAR -> fooBar + } + + public String toString() { + return ns; + } + + public static Namespace from(String ns) { + return Stream.of(values()) + .filter(namespace -> namespace.ns.equals(ns)) + .findAny() + .orElseThrow(IllegalArgumentException::new); + } + } + + public static Map parse(String key) { + String[] parts = key.split(":"); + + if (parts.length < 2 || !parts[0].equals(CloudrunCloudProvider.ID)) { + return null; + } + Map result = new HashMap<>(); + result.put("provider", parts[0]); + result.put("type", parts[1]); + Namespace namespace = Namespace.from(parts[1]); + switch (namespace) { + case APPLICATIONS: + result.put("application", parts[2]); + break; + case PLATFORM_APPLICATIONS: + result.put("project", parts[2]); + break; + case CLUSTERS: + Names names = Names.parseName(parts[4]); + result.put("account", parts[2]); + result.put("application", parts[3]); + result.put("name", parts[4]); + result.put("cluster", parts[4]); + break; + case INSTANCES: + result.put("account", parts[2]); + result.put("name", parts[3]); + result.put("instance", parts[3]); + break; + case LOAD_BALANCERS: + result.put("account", parts[2]); + result.put("name", parts[3]); + result.put("loadBalancer", parts[3]); + break; + case SERVER_GROUPS: + Names names_1 = Names.parseName(parts[5]); + result.put("application", names_1.getApp()); + result.put("cluster", parts[2]); + result.put("account", parts[3]); + result.put("region", parts[4]); + result.put("serverGroup", parts[5]); + result.put("name", parts[5]); + break; + default: + break; + } + return result; + } + + public static String getApplicationKey(String application) { + return keyFor(Namespace.APPLICATIONS, application); + } + + public static String getPlatformApplicationKey(String project) { + return keyFor(Namespace.PLATFORM_APPLICATIONS, project); + } + + public static String getClusterKey(String account, String application, String clusterName) { + return keyFor(Namespace.CLUSTERS, account, application, clusterName); + } + + public static String getInstanceKey(String account, String instanceName) { + return keyFor(Namespace.INSTANCES, account, instanceName); + } + + public static String getLoadBalancerKey(String account, String loadBalancerName) { + return keyFor(Namespace.LOAD_BALANCERS, account, loadBalancerName); + } + + public static String getServerGroupKey(String account, String serverGroupName, String region) { + Names names = Names.parseName(serverGroupName); + return keyFor(Namespace.SERVER_GROUPS, names.getCluster(), account, region, names.getGroup()); + } + + private static String keyFor(Namespace namespace, String... parts) { + StringBuilder builder = + new StringBuilder(CloudrunCloudProvider.ID + KEY_DELIMITER).append(namespace); + for (String part : parts) { + builder.append(KEY_DELIMITER).append(part); + } + return builder.toString(); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/config/CloudrunConfigurationProperties.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/config/CloudrunConfigurationProperties.java new file mode 100644 index 00000000000..d15fe0932c9 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/config/CloudrunConfigurationProperties.java @@ -0,0 +1,59 @@ +/* + * Copyright 2022 OpsMx Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.config; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.googlecommon.config.GoogleCommonManagedAccount; +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; +import org.springframework.util.StringUtils; + +@Data +public class CloudrunConfigurationProperties { + private List accounts = new ArrayList<>(); + private String gcloudPath; + + @Data + @EqualsAndHashCode(callSuper = true) + public static class ManagedAccount extends GoogleCommonManagedAccount { + + private String serviceAccountEmail; + private String localRepositoryDirectory = "/tmp"; + private boolean sshTrustUnknownHosts; + + public void initialize(CloudrunJobExecutor jobExecutor, String gcloudPath) { + if (!StringUtils.isEmpty(getJsonPath())) { + jobExecutor.runCommand( + List.of(gcloudPath, "auth", "activate-service-account", "--key-file", getJsonPath())); + ObjectMapper mapper = new ObjectMapper(); + try { + JsonNode node = mapper.readTree(new File(getJsonPath())); + if (StringUtils.isEmpty(getProject())) { + setProject(node.get("project_id").asText()); + } + } catch (Exception e) { + throw new RuntimeException("Could not find read JSON configuration file.", e); + } + } + } + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/config/CloudrunCredentialsConfiguration.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/config/CloudrunCredentialsConfiguration.java new file mode 100644 index 00000000000..2fac240b267 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/config/CloudrunCredentialsConfiguration.java @@ -0,0 +1,89 @@ +/* + * Copyright 2022 OpsMx + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.config; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsTypeBaseConfiguration; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import com.netflix.spinnaker.kork.configserver.ConfigFileService; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class CloudrunCredentialsConfiguration { + private static final Logger log = LoggerFactory.getLogger(CloudrunCredentialsConfiguration.class); + + @Bean + public CredentialsTypeBaseConfiguration< + CloudrunNamedAccountCredentials, CloudrunConfigurationProperties.ManagedAccount> + cloudrunCredentialsProperties( + ApplicationContext applicationContext, + CloudrunConfigurationProperties configurationProperties, + CloudrunJobExecutor jobExecutor, + ConfigFileService configFileService, + String clouddriverUserAgentApplicationName) { + return new CredentialsTypeBaseConfiguration( + applicationContext, + CredentialsTypeProperties + . + builder() + .type(CloudrunNamedAccountCredentials.CREDENTIALS_TYPE) + .credentialsDefinitionClass(CloudrunConfigurationProperties.ManagedAccount.class) + .credentialsClass(CloudrunNamedAccountCredentials.class) + .credentialsParser( + a -> { + try { + String gcloudPath = configurationProperties.getGcloudPath(); + if (StringUtils.isEmpty(gcloudPath)) { + gcloudPath = "gcloud"; + } + a.initialize(jobExecutor, gcloudPath); + String jsonKey = configFileService.getContents(a.getJsonPath()); + return new CloudrunNamedAccountCredentials.Builder() + .setName(a.getName()) + .setEnvironment( + StringUtils.isEmpty(a.getEnvironment()) + ? a.getName() + : a.getEnvironment()) + .setAccountType( + StringUtils.isEmpty(a.getAccountType()) + ? a.getName() + : a.getAccountType()) + .setProject(a.getProject()) + .setJsonKey(jsonKey) + .setApplicationName(clouddriverUserAgentApplicationName) + .setJsonPath(a.getJsonPath()) + .setServiceAccountEmail(a.getServiceAccountEmail()) + .setLocalRepositoryDirectory(a.getLocalRepositoryDirectory()) + .setRequiredGroupMembership(a.getRequiredGroupMembership()) + .setPermissions(a.getPermissions().build()) + .build(jobExecutor); + } catch (Exception e) { + log.info( + String.format("Could not load account %s for Cloud Run", a.getName()), e); + return null; + } + }) + .defaultCredentialsSource(configurationProperties::getAccounts) + .build()); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/converter/manifest/CloudrunCleanupArtifactsConverter.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/converter/manifest/CloudrunCleanupArtifactsConverter.java new file mode 100644 index 00000000000..05aa0bdf6d8 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/converter/manifest/CloudrunCleanupArtifactsConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.CLEANUP_ARTIFACTS; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters.CloudrunAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.cloudrun.description.manifest.CloudrunCleanupArtifactsDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.op.artifact.CloudrunCleanupArtifactsOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudrunOperation(CLEANUP_ARTIFACTS) +@Component +public class CloudrunCleanupArtifactsConverter + extends AbstractAtomicOperationsCredentialsConverter { + + @Override + public AtomicOperation convertOperation(Map input) { + return new CloudrunCleanupArtifactsOperation(convertDescription(input)); + } + + @Override + public CloudrunCleanupArtifactsDescription convertDescription(Map input) { + return CloudrunAtomicOperationConverterHelper.convertDescription( + input, this, CloudrunCleanupArtifactsDescription.class); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/converter/manifest/CloudrunDeployManifestConverter.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/converter/manifest/CloudrunDeployManifestConverter.java new file mode 100644 index 00000000000..1c8ce80702a --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/converter/manifest/CloudrunDeployManifestConverter.java @@ -0,0 +1,66 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DEPLOY_CLOUDRUN_MANIFEST; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters.CloudrunAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.cloudrun.description.manifest.CloudrunDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.op.manifest.CloudrunDeployManifestOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import groovy.util.logging.Slf4j; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@CloudrunOperation(DEPLOY_CLOUDRUN_MANIFEST) +@Component +@Slf4j +public class CloudrunDeployManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + + @Autowired private ObjectMapper objectMapper; + + public AtomicOperation convertOperation(Map input) { + return new CloudrunDeployManifestOperation(convertDescription(input)); + } + + public CloudrunDeployManifestDescription convertDescription(Map input) { + + CloudrunDeployManifestDescription description = + CloudrunAtomicOperationConverterHelper.convertDescription( + input, this, CloudrunDeployManifestDescription.class); + if (input.get("moniker") != null + && ((Map) input.get("moniker")).get("app") != null) { + description.setApplication(((Map) input.get("moniker")).get("app")); + } + return description; + } + + @Override + public ObjectMapper getObjectMapper() { + return objectMapper; + } + + public void setObjectMapper(ObjectMapper objectMapper) { + this.objectMapper = objectMapper; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunSafeRetry.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunSafeRetry.java new file mode 100644 index 00000000000..0bd837f8c5d --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunSafeRetry.java @@ -0,0 +1,85 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy; + +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception.CloudrunOperationException; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleApiException; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleCommonSafeRetry; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import groovy.lang.Closure; +import java.util.List; +import java.util.Map; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNullableByDefault; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +public final class CloudrunSafeRetry { + private final GoogleCommonSafeRetry googleCommonSafeRetry; + + @Autowired + @ParametersAreNullableByDefault + public CloudrunSafeRetry( + @Value("${cloudrun.safe-retry-max-wait-interval-ms:60000}") Integer maxWaitInterval, + @Value("${cloudrun.safe-retry-retry-interval-base-sec:2}") Integer retryIntervalBase, + @Value("${cloudrun.safe-retry-jitter-multiplier:1000}") Integer jitterMultiplier, + @Value("${cloudrun.safe-retry-max-retries:10}") Integer maxRetries) { + googleCommonSafeRetry = + new GoogleCommonSafeRetry(maxWaitInterval, retryIntervalBase, jitterMultiplier, maxRetries); + } + + private CloudrunSafeRetry(GoogleCommonSafeRetry googleCommonSafeRetry) { + this.googleCommonSafeRetry = googleCommonSafeRetry; + } + + /** + * Returns an instance of this class that never waits between retries, suitable for testing. + * + * @return An instance of {@link CloudrunSafeRetry} + */ + public static CloudrunSafeRetry withoutDelay() { + return new CloudrunSafeRetry(GoogleCommonSafeRetry.withoutDelay()); + } + + @Nullable + public V doRetry( + Closure operation, + String resource, + @Nullable Task task, + List retryCodes, + Map tags, + Registry registry) { + String action = tags.get("action"); + String description = String.format("%s of %s", action, resource); + if (task != null) { + task.updateStatus(tags.get("phase"), String.format("Attempting %s...", description)); + } + + try { + return googleCommonSafeRetry.doRetry( + operation, description, retryCodes, ImmutableList.of(), tags, registry); + } catch (GoogleApiException e) { + throw new CloudrunOperationException("Failed to " + description, e); + } + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunServerGroupNameResolver.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunServerGroupNameResolver.java new file mode 100644 index 00000000000..81b3d5e4da6 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunServerGroupNameResolver.java @@ -0,0 +1,82 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy; + +import com.google.api.services.run.v1.model.Revision; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunModelUtil; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.helpers.AbstractServerGroupNameResolver; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +public class CloudrunServerGroupNameResolver extends AbstractServerGroupNameResolver { + private static final String PHASE = "DEPLOY"; + + private final String project; + private String region; + private final CloudrunNamedAccountCredentials credentials; + + public CloudrunServerGroupNameResolver( + String project, String region, CloudrunNamedAccountCredentials credentials) { + this.project = project; + this.region = region; + this.credentials = credentials; + } + + @Override + public String getPhase() { + return PHASE; + } + + @Override + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + @Override + public List getTakenSlots(String clusterName) { + List versions = + CloudrunUtils.queryAllRevisions(project, credentials, getTask(), getPhase()); + return findMatchingVersions(versions, clusterName); + } + + public static List findMatchingVersions( + List revisions, String clusterName) { + + List slot = new ArrayList<>(); + revisions.forEach( + revision -> { + String versionName = revision.getMetadata().getName(); + Names friggaNames = Names.parseName(versionName); + if (clusterName.equals(friggaNames.getCluster())) { + Long timestamp = + CloudrunModelUtil.translateTime(revision.getMetadata().getCreationTimestamp()); + AbstractServerGroupNameResolver.TakenSlot temp = + new AbstractServerGroupNameResolver.TakenSlot( + versionName, friggaNames.getSequence(), new Date(timestamp)); + slot.add(temp); + } + }); + return slot; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunUtils.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunUtils.java new file mode 100644 index 00000000000..d3026e0fb7c --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/CloudrunUtils.java @@ -0,0 +1,70 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy; + +import com.google.api.services.run.v1.CloudRun; +import com.google.api.services.run.v1.model.Revision; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class CloudrunUtils { + + private static Logger logger = LoggerFactory.getLogger(CloudrunUtils.class); + + public static List queryAllRevisions( + String project, CloudrunNamedAccountCredentials credentials, Task task, String phase) { + task.updateStatus(phase, "Querying all revisions for project $project..."); + List serverGroups = getRevisionsList(project, credentials); + if (serverGroups == null) { + serverGroups = new ArrayList<>(); + } + return serverGroups; + } + + private static Optional getRevisionsListRequest( + String project, CloudrunNamedAccountCredentials credentials) { + try { + return Optional.of( + credentials.getCloudRun().namespaces().revisions().list("namespaces/" + project)); + } catch (IOException e) { + logger.error( + "Error in creating request for the method revisions.list !!! {} ", e.getMessage()); + return Optional.empty(); + } + } + + private static List getRevisionsList( + String project, CloudrunNamedAccountCredentials credentials) { + Optional revisionsListRequest = + getRevisionsListRequest(project, credentials); + if (revisionsListRequest.isEmpty()) { + return new ArrayList<>(); + } + try { + return revisionsListRequest.get().execute().getItems(); + } catch (IOException e) { + logger.error("Error executing revisions.list request. {}", e.getMessage()); + return new ArrayList<>(); + } + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/CloudrunAtomicOperationConverterHelper.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/CloudrunAtomicOperationConverterHelper.java new file mode 100644 index 00000000000..e3b69f8ef79 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/CloudrunAtomicOperationConverterHelper.java @@ -0,0 +1,64 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.AbstractCloudrunCredentialsDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; + +public class CloudrunAtomicOperationConverterHelper { + public static T convertDescription( + Map input, + AbstractAtomicOperationsCredentialsConverter + credentialsSupport, + Class targetDescriptionType) { + + Object accountName = null; + if (input.get("accountName") != null) { + accountName = input.get("accountName"); + } else if (input.get("account") != null) { + accountName = input.get("account"); + } else if (input.get("credentials") != null) { + accountName = input.get("credentials"); + } + + input.put("accountName", accountName); + + if (input.get("accountName") != null) { + input.put( + "credentials", + credentialsSupport.getCredentialsObject((String) input.get("accountName"))); + input.put("account", (String) input.get("accountName")); + } else { + throw new RuntimeException("Could not find Cloud Run account."); + } + + Object credentials = input.remove("credentials"); + + T converted = + credentialsSupport + .getObjectMapper() + .copy() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + .convertValue(input, targetDescriptionType); + + converted.setCredentials((CloudrunNamedAccountCredentials) credentials); + return converted; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeleteCloudrunLoadBalancerAtomicOperationConverter.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeleteCloudrunLoadBalancerAtomicOperationConverter.java new file mode 100644 index 00000000000..0a602e0a3e5 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeleteCloudrunLoadBalancerAtomicOperationConverter.java @@ -0,0 +1,41 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeleteCloudrunLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.DeleteCloudrunLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudrunOperation(AtomicOperations.DELETE_LOAD_BALANCER) +@Component +public class DeleteCloudrunLoadBalancerAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + public AtomicOperation convertOperation(Map input) { + return new DeleteCloudrunLoadBalancerAtomicOperation(convertDescription(input)); + } + + public DeleteCloudrunLoadBalancerDescription convertDescription(Map input) { + return CloudrunAtomicOperationConverterHelper.convertDescription( + input, this, DeleteCloudrunLoadBalancerDescription.class); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeployCloudrunAtomicOperationConverter.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeployCloudrunAtomicOperationConverter.java new file mode 100644 index 00000000000..d998f7c96ad --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeployCloudrunAtomicOperationConverter.java @@ -0,0 +1,63 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeployCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.DeployCloudrunAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import groovy.util.logging.Slf4j; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@CloudrunOperation(AtomicOperations.CREATE_SERVER_GROUP) +@Component +@Slf4j +public class DeployCloudrunAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + + @Autowired private ObjectMapper objectMapper; + + public AtomicOperation convertOperation(Map input) { + return new DeployCloudrunAtomicOperation(convertDescription(input)); + } + + public DeployCloudrunDescription convertDescription(Map input) { + + DeployCloudrunDescription description = + CloudrunAtomicOperationConverterHelper.convertDescription( + input, this, DeployCloudrunDescription.class); + if (input.get("application") != null) { + description.setApplication((String) input.get("application")); + } + return description; + } + + @Override + public ObjectMapper getObjectMapper() { + return objectMapper; + } + + public void setObjectMapper(ObjectMapper objectMapper) { + this.objectMapper = objectMapper; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DestroyCloudrunAtomicOperationConverter.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DestroyCloudrunAtomicOperationConverter.java new file mode 100644 index 00000000000..849d70a85e9 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DestroyCloudrunAtomicOperationConverter.java @@ -0,0 +1,41 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DestroyCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.DestroyCloudrunAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudrunOperation(AtomicOperations.DESTROY_SERVER_GROUP) +@Component +class DestroyCloudrunAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + public AtomicOperation convertOperation(Map input) { + return new DestroyCloudrunAtomicOperation(convertDescription(input)); + } + + public DestroyCloudrunDescription convertDescription(Map input) { + return CloudrunAtomicOperationConverterHelper.convertDescription( + input, this, DestroyCloudrunDescription.class); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DisableCloudrunAtomicOperationConverter.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DisableCloudrunAtomicOperationConverter.java new file mode 100644 index 00000000000..642215bd648 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DisableCloudrunAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.EnableDisableCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.DisableCloudrunAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudrunOperation(AtomicOperations.DISABLE_SERVER_GROUP) +@Component +public class DisableCloudrunAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + + public AtomicOperation convertOperation(Map input) { + return new DisableCloudrunAtomicOperation(convertDescription(input)); + } + + public EnableDisableCloudrunDescription convertDescription(Map input) { + return CloudrunAtomicOperationConverterHelper.convertDescription( + input, this, EnableDisableCloudrunDescription.class); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/EnableCloudrunAtomicOperationConverter.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/EnableCloudrunAtomicOperationConverter.java new file mode 100644 index 00000000000..4f92f9795e5 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/EnableCloudrunAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.EnableDisableCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.EnableCloudrunAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudrunOperation(AtomicOperations.ENABLE_SERVER_GROUP) +@Component +public class EnableCloudrunAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + + public AtomicOperation convertOperation(Map input) { + return new EnableCloudrunAtomicOperation(convertDescription(input)); + } + + public EnableDisableCloudrunDescription convertDescription(Map input) { + return CloudrunAtomicOperationConverterHelper.convertDescription( + input, this, EnableDisableCloudrunDescription.class); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/UpsertCloudrunLoadBalancerAtomicOperationConverter.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/UpsertCloudrunLoadBalancerAtomicOperationConverter.java new file mode 100644 index 00000000000..cfaca961a88 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/UpsertCloudrunLoadBalancerAtomicOperationConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.UpsertCloudrunLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.UpsertCloudrunLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import com.netflix.spinnaker.orchestration.OperationDescription; +import java.util.Map; +import org.springframework.stereotype.Component; + +@CloudrunOperation(AtomicOperations.UPSERT_LOAD_BALANCER) +@Component +public class UpsertCloudrunLoadBalancerAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + + @Override + public AtomicOperation convertOperation(Map input) { + return new UpsertCloudrunLoadBalancerAtomicOperation( + (UpsertCloudrunLoadBalancerDescription) convertDescription(input)); + } + + @Override + public OperationDescription convertDescription(Map input) { + // TODO + return CloudrunAtomicOperationConverterHelper.convertDescription( + input, this, UpsertCloudrunLoadBalancerDescription.class); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/AbstractCloudrunCredentialsDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/AbstractCloudrunCredentialsDescription.java new file mode 100644 index 00000000000..23c2a5c66d2 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/AbstractCloudrunCredentialsDescription.java @@ -0,0 +1,45 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable; + +public abstract class AbstractCloudrunCredentialsDescription implements CredentialsNameable { + + private String account; + + private CloudrunNamedAccountCredentials credentials; + + @Override + public String getAccount() { + return account; + } + + public void setAccount(String account) { + this.account = account; + } + + @Override + public CloudrunNamedAccountCredentials getCredentials() { + return credentials; + } + + public void setCredentials(CloudrunNamedAccountCredentials credentials) { + this.credentials = credentials; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/CloudrunAllocationDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/CloudrunAllocationDescription.java new file mode 100644 index 00000000000..04b6ba50e78 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/CloudrunAllocationDescription.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +public class CloudrunAllocationDescription { + private String revisionName; + private Integer percent; + private String locatorType; + + public String getRevisionName() { + return revisionName; + } + + public void setRevisionName(String revisionName) { + this.revisionName = revisionName; + } + + public Integer getPercent() { + return percent; + } + + public void setPercent(Integer percent) { + this.percent = percent; + } + + public String getLocatorType() { + return locatorType; + } + + public void setLocatorType(String locatorType) { + this.locatorType = locatorType; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/CloudrunTrafficSplitDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/CloudrunTrafficSplitDescription.java new file mode 100644 index 00000000000..2307bd11f1c --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/CloudrunTrafficSplitDescription.java @@ -0,0 +1,32 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +import java.util.List; + +public class CloudrunTrafficSplitDescription { + public List getAllocationDescriptions() { + return allocationDescriptions; + } + + public void setAllocationDescriptions( + List allocationDescriptions) { + this.allocationDescriptions = allocationDescriptions; + } + + private List allocationDescriptions; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeleteCloudrunLoadBalancerDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeleteCloudrunLoadBalancerDescription.java new file mode 100644 index 00000000000..fb11e0d05b0 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeleteCloudrunLoadBalancerDescription.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +public class DeleteCloudrunLoadBalancerDescription extends AbstractCloudrunCredentialsDescription { + private String accountName; + private String loadBalancerName; + private String region; + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public String getLoadBalancerName() { + return loadBalancerName; + } + + public void setLoadBalancerName(String loadBalancerName) { + this.loadBalancerName = loadBalancerName; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeployCloudrunConfigDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeployCloudrunConfigDescription.java new file mode 100644 index 00000000000..ee5825e4450 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeployCloudrunConfigDescription.java @@ -0,0 +1,32 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeployCloudrunConfigDescription extends AbstractCloudrunCredentialsDescription { + private String accountName; + private Artifact cronArtifact; + private Artifact dispatchArtifact; + private Artifact indexArtifact; + private Artifact queueArtifact; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeployCloudrunDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeployCloudrunDescription.java new file mode 100644 index 00000000000..67d1a8cd887 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DeployCloudrunDescription.java @@ -0,0 +1,131 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; +import groovy.transform.AutoClone; +import groovy.transform.Canonical; +import java.util.List; + +@AutoClone +@Canonical +public class DeployCloudrunDescription extends AbstractCloudrunCredentialsDescription + implements DeployDescription { + + String accountName; + String region; + String application; + List configFiles; + Boolean promote; + Boolean stopPreviousVersion; + String applicationDirectoryRoot; + Boolean suppressVersionString; + + String stack; + + String freeFormDetails; + + String versionName; + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public String getApplication() { + return application; + } + + public void setApplication(String application) { + this.application = application; + } + + public List getConfigFiles() { + return configFiles; + } + + public void setConfigFiles(List configFiles) { + this.configFiles = configFiles; + } + + public Boolean getPromote() { + return promote; + } + + public void setPromote(Boolean promote) { + this.promote = promote; + } + + public Boolean getStopPreviousVersion() { + return stopPreviousVersion; + } + + public void setStopPreviousVersion(Boolean stopPreviousVersion) { + this.stopPreviousVersion = stopPreviousVersion; + } + + public String getApplicationDirectoryRoot() { + return applicationDirectoryRoot; + } + + public void setApplicationDirectoryRoot(String applicationDirectoryRoot) { + this.applicationDirectoryRoot = applicationDirectoryRoot; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + public Boolean getSuppressVersionString() { + return suppressVersionString; + } + + public void setSuppressVersionString(Boolean suppressVersionString) { + this.suppressVersionString = suppressVersionString; + } + + public String getStack() { + return stack; + } + + public void setStack(String stack) { + this.stack = stack; + } + + public String getFreeFormDetails() { + return freeFormDetails; + } + + public void setFreeFormDetails(String freeFormDetails) { + this.freeFormDetails = freeFormDetails; + } + + public String getVersionName() { + return versionName; + } + + public void setVersionName(String versionName) { + this.versionName = versionName; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DestroyCloudrunDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DestroyCloudrunDescription.java new file mode 100644 index 00000000000..ace0aa32313 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/DestroyCloudrunDescription.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +public class DestroyCloudrunDescription extends AbstractCloudrunCredentialsDescription { + + String accountName; + String serverGroupName; + String region; + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public String getServerGroupName() { + return serverGroupName; + } + + public void setServerGroupName(String serverGroupName) { + this.serverGroupName = serverGroupName; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/EnableDisableCloudrunDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/EnableDisableCloudrunDescription.java new file mode 100644 index 00000000000..ec9e748cf73 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/EnableDisableCloudrunDescription.java @@ -0,0 +1,70 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescriptionTrait; +import java.util.Collection; +import java.util.Collections; + +public class EnableDisableCloudrunDescription extends AbstractCloudrunCredentialsDescription + implements EnableDisableDescriptionTrait { + + String accountName; + String serverGroupName; + String region; + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + @Override + public void setServerGroupName(String serverGroupName) { + this.serverGroupName = serverGroupName; + } + + @Override + public Collection getServerGroupNames() { + return Collections.EMPTY_LIST; + } + + public Integer getDesiredPercentage() { + throw new IllegalArgumentException( + "The selected provider hasn't implemented enabling/disabling by percentage yet"); + } + + public void setDesiredPercentage(Integer desiredPercentage) { + throw new IllegalArgumentException( + "The selected provider hasn't implemented enabling/disabling by percentage yet"); + } + + @Override + public String getServerGroupName() { + return this.serverGroupName; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/StartStopCloudrunDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/StartStopCloudrunDescription.java new file mode 100644 index 00000000000..a1ce643627e --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/StartStopCloudrunDescription.java @@ -0,0 +1,38 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +public class StartStopCloudrunDescription extends AbstractCloudrunCredentialsDescription { + String accountName; + String serverGroupName; + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public String getServerGroupName() { + return serverGroupName; + } + + public void setServerGroupName(String serverGroupName) { + this.serverGroupName = serverGroupName; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/UpsertCloudrunLoadBalancerDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/UpsertCloudrunLoadBalancerDescription.java new file mode 100644 index 00000000000..c6240b04e1c --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/description/UpsertCloudrunLoadBalancerDescription.java @@ -0,0 +1,72 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.description; + +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunTrafficSplit; + +public class UpsertCloudrunLoadBalancerDescription extends AbstractCloudrunCredentialsDescription { + private String accountName; + private String loadBalancerName; + private String region; + private CloudrunTrafficSplit split; + private CloudrunTrafficSplitDescription splitDescription; + private Boolean migrateTraffic; + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public String getLoadBalancerName() { + return loadBalancerName; + } + + public void setLoadBalancerName(String loadBalancerName) { + this.loadBalancerName = loadBalancerName; + } + + public CloudrunTrafficSplit getSplit() { + return split; + } + + public CloudrunTrafficSplitDescription getSplitDescription() { + return splitDescription; + } + + public void setSplitDescription(CloudrunTrafficSplitDescription splitDescription) { + this.splitDescription = splitDescription; + } + + public Boolean getMigrateTraffic() { + return migrateTraffic; + } + + public void setMigrateTraffic(Boolean migrateTraffic) { + this.migrateTraffic = migrateTraffic; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/exception/CloudrunDescriptionConversionException.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/exception/CloudrunDescriptionConversionException.java new file mode 100644 index 00000000000..9bd45b217cf --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/exception/CloudrunDescriptionConversionException.java @@ -0,0 +1,22 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception; + +import groovy.transform.InheritConstructors; + +@InheritConstructors +public class CloudrunDescriptionConversionException extends RuntimeException {} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/exception/CloudrunOperationException.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/exception/CloudrunOperationException.java new file mode 100644 index 00000000000..9f8301a39a2 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/exception/CloudrunOperationException.java @@ -0,0 +1,26 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception; + +public class CloudrunOperationException extends RuntimeException { + public CloudrunOperationException(String message) { + super(message); + } + + public CloudrunOperationException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/CloudrunAtomicOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/CloudrunAtomicOperation.java new file mode 100644 index 00000000000..4a6880f5446 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/CloudrunAtomicOperation.java @@ -0,0 +1,24 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import org.springframework.beans.factory.annotation.Autowired; + +public abstract class CloudrunAtomicOperation implements AtomicOperation { + @Autowired Registry registry; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeleteCloudrunLoadBalancerAtomicOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeleteCloudrunLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..e9a174fa9ec --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeleteCloudrunLoadBalancerAtomicOperation.java @@ -0,0 +1,93 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeleteCloudrunLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception.CloudrunOperationException; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.view.CloudrunLoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.ArrayList; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; + +public class DeleteCloudrunLoadBalancerAtomicOperation implements AtomicOperation { + private static final String BASE_PHASE = "DELETE_LOAD_BALANCER"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private final DeleteCloudrunLoadBalancerDescription description; + @Autowired CloudrunJobExecutor jobExecutor; + + @Autowired CloudrunLoadBalancerProvider provider; + + public DeleteCloudrunLoadBalancerAtomicOperation( + DeleteCloudrunLoadBalancerDescription description) { + this.description = description; + } + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + BASE_PHASE, + "Initializing deletion of load balancer " + description.getLoadBalancerName() + "..."); + + String project = description.getCredentials().getProject(); + String loadBalancerName = description.getLoadBalancerName(); + CloudrunLoadBalancer loadBalancer = + provider.getLoadBalancer(description.getAccountName(), loadBalancerName); + if (loadBalancer == null) { + throw new CloudrunOperationException( + "Failed to get load balancer by account " + + description.getAccountName() + + " and load balancer name : " + + loadBalancerName); + } else { + String region = loadBalancer.getRegion(); + List deployCommand = new ArrayList<>(); + deployCommand.add("gcloud"); + deployCommand.add("run"); + deployCommand.add("services"); + deployCommand.add("delete"); + deployCommand.add(loadBalancerName); + deployCommand.add("--quiet"); + deployCommand.add("--region=" + region); + deployCommand.add("--project=" + project); + + String success = "false"; + try { + jobExecutor.runCommand(deployCommand); + success = "true"; + } catch (Exception e) { + throw new CloudrunOperationException( + "Failed to delete load balancer with command " + + deployCommand + + "exception " + + e.getMessage()); + } + getTask() + .updateStatus(BASE_PHASE, "Successfully deleted load balancer " + loadBalancerName + "."); + return null; + } + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeployCloudrunAtomicOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeployCloudrunAtomicOperation.java new file mode 100644 index 00000000000..4242f3e2c91 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeployCloudrunAtomicOperation.java @@ -0,0 +1,279 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.CloudrunServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeployCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception.CloudrunOperationException; +import com.netflix.spinnaker.clouddriver.cloudrun.model.*; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.view.CloudrunLoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; + +public class DeployCloudrunAtomicOperation implements AtomicOperation { + + private static final String BASE_PHASE = "DEPLOY"; + + private static final Logger log = LoggerFactory.getLogger(DeployCloudrunAtomicOperation.class); + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Autowired CloudrunJobExecutor jobExecutor; + + @Autowired CloudrunLoadBalancerProvider provider; + + DeployCloudrunDescription description; + + private final ObjectMapper objectMapper = + new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + private final ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory()); + + private CloudrunYmlData ymlData = new CloudrunYmlData(); + + public DeployCloudrunAtomicOperation(DeployCloudrunDescription description) { + this.description = description; + } + + public String deploy(String repositoryPath) { + String project = description.getCredentials().getProject(); + String applicationDirectoryRoot = description.getApplicationDirectoryRoot(); + CloudrunServerGroupNameResolver serverGroupNameResolver = + new CloudrunServerGroupNameResolver( + project, description.getRegion(), description.getCredentials()); + String clusterName = + serverGroupNameResolver.getClusterName( + description.getApplication(), description.getStack(), description.getFreeFormDetails()); + String versionName = + serverGroupNameResolver.resolveNextServerGroupName( + description.getApplication(), + description.getStack(), + description.getFreeFormDetails(), + description.getSuppressVersionString()); + List configFiles = description.getConfigFiles(); + try { + populateCloudrunYmlData(configFiles); + } catch (Exception e) { + log.error("Failed to populate the cloudrun yml data ", e); + throw new CloudrunOperationException( + "Failed to populate the cloudrun yml data " + e.getMessage()); + } + List modConfigFiles = + insertSpinnakerAppNameServiceNameVersionName(configFiles, clusterName, versionName); + List writtenFullConfigFilePaths = + writeConfigFiles(modConfigFiles, repositoryPath, applicationDirectoryRoot); + String region = description.getRegion(); + + List deployCommand = new ArrayList<>(); + deployCommand.add("gcloud"); + deployCommand.add("run"); + deployCommand.add("services"); + deployCommand.add("replace"); + deployCommand.add(writtenFullConfigFilePaths.stream().collect(Collectors.joining(""))); + deployCommand.add("--region=" + region); + deployCommand.add("--project=" + project); + + String success = "false"; + getTask().updateStatus(BASE_PHASE, "Deploying version " + versionName + "..."); + try { + jobExecutor.runCommand(deployCommand); + success = "true"; + } catch (Exception e) { + throw new CloudrunOperationException( + "Failed to deploy to Cloud Run with command " + + deployCommand + + "exception " + + e.getMessage()); + } finally { + deleteFiles(writtenFullConfigFilePaths); + } + getTask().updateStatus(BASE_PHASE, "Done deploying version " + versionName + "..."); + return versionName; + } + + private void populateCloudrunYmlData(List configFiles) throws JsonProcessingException { + + for (String configFile : configFiles) { + CloudrunService yamlObj = yamlReader.readValue(configFile, CloudrunService.class); + if (yamlObj != null && yamlObj.getMetadata() != null && yamlObj.getSpec() != null) { + ymlData.setKind(yamlObj.getKind()); + ymlData.setApiVersion(yamlObj.getApiVersion()); + + String metaDataJson = objectMapper.writeValueAsString(yamlObj.getMetadata()); + CloudrunMetaData cloudrunMetaData = + objectMapper.readValue(metaDataJson, CloudrunMetaData.class); + ymlData.setMetadata(cloudrunMetaData); + + String specJson = objectMapper.writeValueAsString(yamlObj.getSpec()); + CloudrunSpec spec = objectMapper.readValue(specJson, CloudrunSpec.class); + ymlData.setSpec(spec); + } + } + } + + private List insertSpinnakerAppNameServiceNameVersionName( + List configFiles, String clusterName, String versionName) { + + return configFiles.stream() + .map( + (configFile) -> { + try { + CloudrunService yamlObj = yamlReader.readValue(configFile, CloudrunService.class); + if (yamlObj != null && yamlObj.getMetadata() != null) { + CloudrunMetaData metadata = ymlData.getMetadata(); + CloudrunSpec spec = ymlData.getSpec(); + if (metadata != null && spec != null) { + if (spec.getTemplate() != null && spec.getTemplate().getMetadata() != null) { + CloudrunSpecTemplateMetadata specMetadata = spec.getTemplate().getMetadata(); + specMetadata.setName(versionName); + metadata.setName(clusterName); + CloudrunLoadBalancer loadBalancer = + provider.getLoadBalancer(description.getAccountName(), clusterName); + if (loadBalancer != null) { + insertTrafficPercent(spec, loadBalancer); + } + } + } + CloudrunMetadataAnnotations annotations = metadata.getAnnotations(); + CloudrunMetadataLabels labels = metadata.getLabels(); + if (annotations == null) { + CloudrunMetadataAnnotations metadataAnnotations = + new CloudrunMetadataAnnotations(); + metadataAnnotations.setSpinnakerApplication(description.getApplication()); + metadata.setAnnotations(metadataAnnotations); + description.setRegion(labels.getCloudGoogleapisComLocation()); + } else if (annotations != null && labels != null) { + annotations.setSpinnakerApplication(description.getApplication()); + description.setRegion(labels.getCloudGoogleapisComLocation()); + } + } + return yamlReader.writeValueAsString(ymlData); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + }) + .collect(Collectors.toList()); + } + + private void insertTrafficPercent(CloudrunSpec spec, CloudrunLoadBalancer loadBalancer) { + + List trafficTargets = new ArrayList<>(); + if (loadBalancer.getSplit() != null && loadBalancer.getSplit().getTrafficTargets() != null) { + loadBalancer + .getSplit() + .getTrafficTargets() + .forEach( + trafficTarget -> { + CloudrunSpecTraffic existingTrafficMap = new CloudrunSpecTraffic(); + existingTrafficMap.setPercent(trafficTarget.getPercent()); + existingTrafficMap.setRevisionName(trafficTarget.getRevisionName()); + trafficTargets.add(existingTrafficMap); + }); + spec.setTraffic(trafficTargets.toArray(new CloudrunSpecTraffic[0])); + } + } + + @Override + public DeploymentResult operate(List priorOutputs) { + + String baseDir = description.getCredentials().getLocalRepositoryDirectory(); + String directoryPath = getFullDirectoryPath(baseDir); + String serviceAccount = description.getCredentials().getServiceAccountEmail(); + String deployPath = directoryPath; + String newVersionName; + String success = "false"; + getTask().updateStatus(BASE_PHASE, "Initializing creation of version..."); + newVersionName = deploy(deployPath); + String region = description.getRegion(); + DeploymentResult result = new DeploymentResult(); + StringBuffer sb = new StringBuffer(); + sb.append(region).append(":").append(newVersionName); + result.setServerGroupNames(Arrays.asList(sb.toString())); + Map namesByRegion = new HashMap<>(); + namesByRegion.put(region, newVersionName); + result.setServerGroupNameByRegion(namesByRegion); + log.info(" region in deploy operation : " + region); + log.info(" new version name in deploy operation : " + newVersionName); + success = "true"; + return result; + } + + public static void deleteFiles(List paths) { + paths.forEach( + path -> { + try { + new File(path).delete(); + } catch (Exception e) { + throw new CloudrunOperationException("Could not delete config file: ${e.getMessage()}"); + } + }); + } + + public static List writeConfigFiles( + List configFiles, String repositoryPath, String applicationDirectoryRoot) { + if (configFiles == null) { + return Collections.emptyList(); + } else { + return configFiles.stream() + .map( + (configFile) -> { + Path path = + generateRandomRepositoryFilePath(repositoryPath, applicationDirectoryRoot); + try { + File targetFile = new File(path.toString()); + FileUtils.writeStringToFile(targetFile, configFile, StandardCharsets.UTF_8); + } catch (Exception e) { + throw new CloudrunOperationException( + "Could not write config file: ${e.getMessage()}"); + } + return path.toString(); + }) + .collect(Collectors.toList()); + } + } + + public static Path generateRandomRepositoryFilePath( + String repositoryPath, String applicationDirectoryRoot) { + String name = UUID.randomUUID().toString(); + String filePath = applicationDirectoryRoot != null ? applicationDirectoryRoot : "."; + StringBuilder sb = new StringBuilder(name).append(".yaml"); + return Paths.get(repositoryPath, filePath, sb.toString()); + } + + public static String getFullDirectoryPath(String localRepositoryDirectory) { + return Paths.get(localRepositoryDirectory).toString(); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DestroyCloudrunAtomicOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DestroyCloudrunAtomicOperation.java new file mode 100644 index 00000000000..8935f556d46 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DestroyCloudrunAtomicOperation.java @@ -0,0 +1,103 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DestroyCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception.CloudrunOperationException; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunServerGroup; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.view.CloudrunClusterProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import groovy.util.logging.Slf4j; +import java.util.ArrayList; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; + +@Slf4j +public class DestroyCloudrunAtomicOperation extends CloudrunAtomicOperation { + + private static final String BASE_PHASE = "DESTROY_SERVER_GROUP"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private final DestroyCloudrunDescription description; + + @Autowired CloudrunClusterProvider cloudrunClusterProvider; + + @Autowired CloudrunJobExecutor jobExecutor; + + public DestroyCloudrunAtomicOperation(DestroyCloudrunDescription description) { + this.description = description; + } + + @Override + public Void operate(List priorOutputs) { + + getTask() + .updateStatus( + BASE_PHASE, + "Initializing destruction of server group " + description.getServerGroupName()); + getTask() + .updateStatus( + BASE_PHASE, + "Looking up server group with serverGroupName " + description.getServerGroupName()); + CloudrunServerGroup serverGroup = + cloudrunClusterProvider.getServerGroup( + description.getAccountName(), + description.getRegion(), + description.getServerGroupName()); + if (serverGroup == null) { + throw new CloudrunOperationException( + "Failed to get server group by account " + + description.getAccountName() + + " , region " + + description.getRegion() + + " and server group name : " + + description.getServerGroupName()); + } else { + String region = description.getRegion(); + String project = description.getCredentials().getProject(); + List deployCommand = new ArrayList<>(); + deployCommand.add("gcloud"); + deployCommand.add("run"); + deployCommand.add("revisions"); + deployCommand.add("delete"); + deployCommand.add(description.getServerGroupName()); + deployCommand.add("--quiet"); + deployCommand.add("--region=" + region); + deployCommand.add("--project=" + project); + try { + jobExecutor.runCommand(deployCommand); + } catch (Exception e) { + throw new CloudrunOperationException( + "Failed to delete the server group " + + description.getServerGroupName() + + " with command " + + deployCommand + + "exception " + + e.getMessage()); + } + } + getTask() + .updateStatus( + BASE_PHASE, "Successfully destroyed server group " + description.getServerGroupName()); + return null; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DisableCloudrunAtomicOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DisableCloudrunAtomicOperation.java new file mode 100644 index 00000000000..303482c2165 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DisableCloudrunAtomicOperation.java @@ -0,0 +1,85 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.EnableDisableCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception.CloudrunOperationException; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunServerGroup; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.view.CloudrunClusterProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent; +import java.util.Collection; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; + +public class DisableCloudrunAtomicOperation extends CloudrunAtomicOperation { + + private static final String BASE_PHASE = "DISABLE_SERVER_GROUP"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + public EnableDisableCloudrunDescription getDescription() { + return description; + } + + private final EnableDisableCloudrunDescription description; + + @Autowired CloudrunClusterProvider cloudrunClusterProvider; + + public DisableCloudrunAtomicOperation(EnableDisableCloudrunDescription description) { + this.description = description; + } + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + BASE_PHASE, + "Initializing disabling of server group " + description.getServerGroupName()); + getTask() + .updateStatus( + BASE_PHASE, + "Looking up server group with serverGroupName " + description.getServerGroupName()); + CloudrunServerGroup serverGroup = + cloudrunClusterProvider.getServerGroup( + description.getAccountName(), + description.getRegion(), + description.getServerGroupName()); + if (serverGroup == null) { + throw new CloudrunOperationException( + "Failed to get server group by account " + + description.getAccountName() + + " , region " + + description.getRegion() + + " and server group name : " + + description.getServerGroupName()); + } + getTask() + .updateStatus( + BASE_PHASE, + "Successfully disabled the server group : " + description.getServerGroupName()); + return null; + } + + @Override + public Collection getEvents() { + return super.getEvents(); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/EnableCloudrunAtomicOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/EnableCloudrunAtomicOperation.java new file mode 100644 index 00000000000..b197a010c34 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/EnableCloudrunAtomicOperation.java @@ -0,0 +1,42 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.EnableDisableCloudrunDescription; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import java.util.List; + +public class EnableCloudrunAtomicOperation extends CloudrunAtomicOperation { + private static final String BASE_PHASE = "ENABLE_SERVER_GROUP"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private final EnableDisableCloudrunDescription description; + + public EnableCloudrunAtomicOperation(EnableDisableCloudrunDescription description) { + this.description = description; + } + + @Override + public Void operate(List priorOutputs) { + + return null; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/UpsertCloudrunLoadBalancerAtomicOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/UpsertCloudrunLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..6b9895f8414 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/UpsertCloudrunLoadBalancerAtomicOperation.java @@ -0,0 +1,133 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.CloudrunTrafficSplitDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.UpsertCloudrunLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception.CloudrunOperationException; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import groovy.util.logging.Slf4j; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import lombok.Getter; +import org.springframework.beans.factory.annotation.Autowired; + +@Slf4j +public class UpsertCloudrunLoadBalancerAtomicOperation extends CloudrunAtomicOperation { + + @Getter private static final String BASE_PHASE = "UPSERT_LOAD_BALANCER"; + @Getter private final UpsertCloudrunLoadBalancerDescription description; + @Autowired CloudrunJobExecutor jobExecutor; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + public UpsertCloudrunLoadBalancerAtomicOperation( + UpsertCloudrunLoadBalancerDescription description, boolean retryApiCall) { + this(description); + } + + public UpsertCloudrunLoadBalancerAtomicOperation( + UpsertCloudrunLoadBalancerDescription description) { + this.description = description; + } + + @Override + public Map operate(List priorOutputs) { + + getTask() + .updateStatus( + BASE_PHASE, + "Initializing upsert of load balancer " + + description.getLoadBalancerName() + + " " + + "in " + + description.getRegion() + + "..."); + + String region = description.getRegion(); + String project = description.getCredentials().getProject(); + List deployCommand = new ArrayList<>(); + deployCommand.add("gcloud"); + deployCommand.add("run"); + deployCommand.add("services"); + deployCommand.add("update-traffic"); + deployCommand.add(description.getLoadBalancerName()); + deployCommand.add("--to-revisions=" + appendRevisionNameTrafficSplit(description)); + deployCommand.add("--region=" + region); + deployCommand.add("--project=" + project); + + String success = "false"; + try { + jobExecutor.runCommand(deployCommand); + success = "true"; + } catch (Exception e) { + throw new CloudrunOperationException( + "Failed to update traffic for revisions with command " + + deployCommand + + "exception " + + e.getMessage()); + } + getTask() + .updateStatus( + BASE_PHASE, + "Done upserting " + + description.getLoadBalancerName() + + " in " + + description.getRegion() + + "."); + LinkedHashMap>> loadBalancers = + new LinkedHashMap>>(1); + LinkedHashMap> regionMap = + new LinkedHashMap>(1); + LinkedHashMap loadBalancerNameMap = new LinkedHashMap(1); + loadBalancerNameMap.put("name", description.getLoadBalancerName()); + regionMap.put(description.getRegion(), loadBalancerNameMap); + loadBalancers.put("loadBalancers", regionMap); + return loadBalancers; + } + + private String appendRevisionNameTrafficSplit(UpsertCloudrunLoadBalancerDescription description) { + + StringBuilder builder = new StringBuilder(); + if (description.getSplitDescription() != null) { + CloudrunTrafficSplitDescription splitDesc = description.getSplitDescription(); + AtomicInteger counter = new AtomicInteger(); + if (splitDesc != null && !(splitDesc.getAllocationDescriptions().isEmpty())) { + splitDesc + .getAllocationDescriptions() + .forEach( + trafficSplit -> { + builder.append(trafficSplit.getRevisionName()); + builder.append("="); + builder.append(trafficSplit.getPercent()); + if (!(counter.get() == (splitDesc.getAllocationDescriptions().size() - 1))) { + builder.append(","); + } + counter.getAndIncrement(); + }); + } + } + return builder.toString(); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunConfigDescriptionValidator.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunConfigDescriptionValidator.java new file mode 100644 index 00000000000..c95df4c386c --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunConfigDescriptionValidator.java @@ -0,0 +1,45 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeployCloudrunConfigDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component("deployCloudrunConfigDescriptionValidator") +public class DeployCloudrunConfigDescriptionValidator + extends DescriptionValidator { + + @Autowired private CredentialsRepository credentialsRepository; + + @Override + public void validate( + List priorDescriptions, + DeployCloudrunConfigDescription description, + ValidationErrors errors) { + StandardCloudrunAttributeValidator helper = + new StandardCloudrunAttributeValidator( + "deployCloudrunConfigAtomicOperationDescription", errors); + helper.validateCredentials(description.getAccountName(), credentialsRepository); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunDescriptionValidator.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunDescriptionValidator.java new file mode 100644 index 00000000000..6091559e8e1 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunDescriptionValidator.java @@ -0,0 +1,43 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeployCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component("deployCloudrunAtomicOperationDescription") +public class DeployCloudrunDescriptionValidator + extends DescriptionValidator { + @Autowired CredentialsRepository credentialsRepository; + + @Override + public void validate( + List priorDescriptions, DeployCloudrunDescription description, ValidationErrors errors) { + StandardCloudrunAttributeValidator helper = + new StandardCloudrunAttributeValidator("deployCloudrunAtomicOperationDescription", errors); + + if (!helper.validateCredentials(description.getAccountName(), credentialsRepository)) { + return; + } + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/StandardCloudrunAttributeValidator.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/StandardCloudrunAttributeValidator.java new file mode 100644 index 00000000000..83f6abe1dcc --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/StandardCloudrunAttributeValidator.java @@ -0,0 +1,55 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.validators; + +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.credentials.CredentialsRepository; + +public class StandardCloudrunAttributeValidator { + String context; + ValidationErrors errors; + + public StandardCloudrunAttributeValidator(String context, ValidationErrors errors) { + this.context = context; + this.errors = errors; + } + + public boolean validateCredentials( + String credentials, + CredentialsRepository credentialsRepository) { + boolean result = validateNotEmpty(credentials, "account"); + if (result) { + CloudrunNamedAccountCredentials cloudrunCredentials = + credentialsRepository.getOne(credentials); + if (cloudrunCredentials == null) { + errors.rejectValue("${context}.account", "${context}.account.notFound"); + result = false; + } + } + return result; + } + + private boolean validateNotEmpty(Object value, String attribute) { + if (value != "" && value != null) { + return true; + } else { + errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.empty"); + return false; + } + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/description/manifest/CloudrunCleanupArtifactsDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/description/manifest/CloudrunCleanupArtifactsDescription.java new file mode 100644 index 00000000000..4a49900cbfd --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/description/manifest/CloudrunCleanupArtifactsDescription.java @@ -0,0 +1,35 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.description.manifest; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.AbstractCloudrunCredentialsDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunService; +import java.util.HashSet; +import java.util.Set; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +@EqualsAndHashCode(callSuper = true) +public class CloudrunCleanupArtifactsDescription extends AbstractCloudrunCredentialsDescription { + Set manifests = new HashSet<>(); +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/description/manifest/CloudrunDeployManifestDescription.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/description/manifest/CloudrunDeployManifestDescription.java new file mode 100644 index 00000000000..80ef4b16c8a --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/description/manifest/CloudrunDeployManifestDescription.java @@ -0,0 +1,202 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.description.manifest; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.AbstractCloudrunCredentialsDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunService; +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.List; +import java.util.Map; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +public class CloudrunDeployManifestDescription extends AbstractCloudrunCredentialsDescription + implements DeployDescription { + + private boolean enableTraffic = false; + + private List optionalArtifacts; + + private String cloudProvider; + + private List manifests; + + private Map trafficManagement; + + private boolean enableArtifactBinding = true; + + private Moniker moniker; + + private String source; + + private String region; + + private String stack; + + private String details; + + private String versionName; + + private String application; + + private String account; + + private String accountName; + + private boolean skipExpressionEvaluator; + + private List requiredArtifacts; + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + public boolean isEnableTraffic() { + return enableTraffic; + } + + public void setEnableTraffic(boolean enableTraffic) { + this.enableTraffic = enableTraffic; + } + + public List getOptionalArtifacts() { + return optionalArtifacts; + } + + public void setOptionalArtifacts(List optionalArtifacts) { + this.optionalArtifacts = optionalArtifacts; + } + + public String getCloudProvider() { + return cloudProvider; + } + + public void setCloudProvider(String cloudProvider) { + this.cloudProvider = cloudProvider; + } + + public List getManifests() { + return manifests; + } + + public void setManifests(List manifests) { + this.manifests = manifests; + } + + public Map getTrafficManagement() { + return trafficManagement; + } + + public void setTrafficManagement(Map trafficManagement) { + this.trafficManagement = trafficManagement; + } + + public Moniker getMoniker() { + return moniker; + } + + public void setMoniker(Moniker moniker) { + this.moniker = moniker; + } + + public String getSource() { + return source; + } + + public void setSource(String source) { + this.source = source; + } + + public String getAccount() { + return account; + } + + public void setAccount(String account) { + this.account = account; + } + + public boolean isSkipExpressionEvaluator() { + return skipExpressionEvaluator; + } + + public void setSkipExpressionEvaluator(boolean skipExpressionEvaluator) { + this.skipExpressionEvaluator = skipExpressionEvaluator; + } + + public List getRequiredArtifacts() { + return requiredArtifacts; + } + + public void setRequiredArtifacts(List requiredArtifacts) { + this.requiredArtifacts = requiredArtifacts; + } + + public String getStack() { + return stack; + } + + public void setStack(String stack) { + this.stack = stack; + } + + public String getDetails() { + return details; + } + + public void setDetails(String details) { + this.details = details; + } + + public String getVersionName() { + return versionName; + } + + public void setVersionName(String versionName) { + this.versionName = versionName; + } + + public String getApplication() { + return application; + } + + public void setApplication(String application) { + this.application = application; + } + + public boolean isEnableArtifactBinding() { + return enableArtifactBinding; + } + + public void setEnableArtifactBinding(boolean enableArtifactBinding) { + this.enableArtifactBinding = enableArtifactBinding; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/health/CloudrunHealthIndicator.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/health/CloudrunHealthIndicator.java new file mode 100644 index 00000000000..0f71bc1bdcc --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/health/CloudrunHealthIndicator.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.health; + +import groovy.transform.InheritConstructors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.actuate.health.Health; +import org.springframework.boot.actuate.health.HealthIndicator; +import org.springframework.http.HttpStatus; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; +import org.springframework.web.bind.annotation.ResponseStatus; + +@Component +public class CloudrunHealthIndicator implements HealthIndicator { + + private static final Logger LOG = LoggerFactory.getLogger(CloudrunHealthIndicator.class); + + @Override + public Health health() { + return new Health.Builder().up().build(); + } + + @Scheduled(fixedDelay = 300000L) + void checkHealth() {} + + @ResponseStatus( + value = HttpStatus.SERVICE_UNAVAILABLE, + reason = "Problem communicating with Cloud run") + @InheritConstructors + static class CloudrunIOException extends RuntimeException {} +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunApplication.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunApplication.java new file mode 100644 index 00000000000..161a395b52f --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunApplication.java @@ -0,0 +1,30 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.netflix.spinnaker.clouddriver.model.Application; +import java.io.Serializable; +import java.util.*; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunApplication implements Application, Serializable { + private String name; + private Map attributes = new HashMap<>(); + private Map> clusterNames = new HashMap<>(); // Account name -> cluster names +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunCluster.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunCluster.java new file mode 100644 index 00000000000..1a6a419db70 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunCluster.java @@ -0,0 +1,37 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys; +import com.netflix.spinnaker.clouddriver.model.Cluster; +import java.io.Serializable; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunCluster implements Cluster, Serializable { + private String name; + private String type = Keys.Namespace.provider; + private String accountName; + private Set serverGroups = + Collections.synchronizedSet(new HashSet()); + private Set loadBalancers = + Collections.synchronizedSet(new HashSet()); +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunHealth.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunHealth.java new file mode 100644 index 00000000000..23db42726e1 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunHealth.java @@ -0,0 +1,28 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.run.v1.model.Revision; +import com.google.api.services.run.v1.model.Service; +import com.netflix.spinnaker.clouddriver.model.Health; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import java.util.Map; +import lombok.Data; + +@Data +public class CloudrunHealth implements Health { + private HealthState state; + private String source; + private String type; + + public CloudrunHealth(Revision version, Service service) { + // cloudrun instance data is not kept available. This is a dummy info just to fill in + source = "Service "; + type = "Cloudrun Service"; + state = HealthState.Up; + } + + public Map toMap() { + return new ObjectMapper().convertValue(this, new TypeReference>() {}); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunInstance.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunInstance.java new file mode 100644 index 00000000000..9a1ff26debb --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunInstance.java @@ -0,0 +1,66 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.run.v1.model.Revision; +import com.google.api.services.run.v1.model.Service; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.Instance; +import java.io.Serializable; +import java.util.*; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunInstance implements Instance, Serializable { + private String name; + private String id; + private Long launchTime; + private CloudrunInstanceStatus instanceStatus; + private String zone; + private String serverGroup; + private Collection loadBalancers; + private final String providerType = CloudrunCloudProvider.ID; + private final String cloudProvider = CloudrunCloudProvider.ID; + private List> health; + + public CloudrunInstance(Revision revision, Service service, String region) { + Map map = new CloudrunHealth(revision, service).toMap(); + List> list = new ArrayList<>(); + list.add(map); + this.health = list; + this.instanceStatus = CloudrunInstanceStatus.DYNAMIC; + this.zone = region; + this.name = revision.getMetadata().getName() + "-instance"; + this.id = this.name; + this.launchTime = + CloudrunModelUtil.translateTime(revision.getMetadata().getCreationTimestamp()); + this.serverGroup = revision.getMetadata().getName(); + this.loadBalancers = Set.of(service.getMetadata().getName()); + } + + public HealthState getHealthState() { + return new ObjectMapper().convertValue(this.health.get(0).get("state"), HealthState.class); + } + + public static enum CloudrunInstanceStatus { + DYNAMIC, + UNKNOWN; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunLoadBalancer.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunLoadBalancer.java new file mode 100644 index 00000000000..7af8f2fd92e --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunLoadBalancer.java @@ -0,0 +1,113 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.run.v1.model.Service; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.model.LoadBalancer; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import com.netflix.spinnaker.moniker.Moniker; +import java.io.Serializable; +import java.util.*; +import java.util.stream.Collectors; +import lombok.Data; + +@Data +@JsonInclude(JsonInclude.Include.NON_NULL) +public class CloudrunLoadBalancer implements LoadBalancer, Serializable { + private String name; + private String selfLink; + private String region; + private final String type = CloudrunCloudProvider.ID; + private final String cloudProvider = CloudrunCloudProvider.ID; + private String account; + private Set serverGroups = new HashSet<>(); + private CloudrunTrafficSplit split = new CloudrunTrafficSplit(); + private String url; + private String project; + + private String latestReadyRevisionName; + + private String latestCreatedRevisionName; + + public void setMoniker(Moniker _ignored) {} + + public CloudrunLoadBalancer() {} + + public CloudrunLoadBalancer(Service service, String account, String region) { + this.name = service.getMetadata().getName(); + this.selfLink = service.getMetadata().getSelfLink(); + this.account = account; + this.region = region; + if (service.getStatus().getTraffic() != null) { + this.split + .getTrafficTargets() + .addAll( + new ObjectMapper() + .convertValue( + service.getStatus().getTraffic(), + new TypeReference>() {})); + } + this.url = service.getStatus().getUrl(); + this.latestCreatedRevisionName = service.getStatus().getLatestCreatedRevisionName(); + this.latestReadyRevisionName = service.getStatus().getLatestReadyRevisionName(); + this.project = service.getMetadata().getNamespace(); // project number + } + + public void setLoadBalancerServerGroups(Set serverGroups) { + Set loadBalancerServerGroups = new HashSet<>(); + serverGroups.forEach( + serverGroup -> { + Set instances = new HashSet<>(); + if (!serverGroup.isDisabled()) { + serverGroup + .getInstances() + .forEach( + instance -> { + Map health = new HashMap<>(); + health.put("state", instance.getHealthState().toString()); + instances.add( + new LoadBalancerInstance().setId(instance.getName()).setHealth(health)); + }); + } + + Set detachedInstances = new HashSet<>(); + if (serverGroup.isDisabled()) { + detachedInstances.addAll( + serverGroup.getInstances().stream() + .map(CloudrunInstance::getName) + .collect(Collectors.toSet())); + } + + loadBalancerServerGroups.add( + new CloudrunLoadBalancerServerGroup() + .setName(serverGroup.getName()) + .setRegion(serverGroup.getRegion()) + .setIsDisabled(serverGroup.isDisabled()) + .setInstances(instances) + .setDetachedInstances(detachedInstances) + .setCloudProvider(CloudrunCloudProvider.ID)); + }); + this.serverGroups.addAll(loadBalancerServerGroups); + } + + public static class CloudrunLoadBalancerServerGroup extends LoadBalancerServerGroup {} +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetaData.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetaData.java new file mode 100644 index 00000000000..aef5ac785ed --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetaData.java @@ -0,0 +1,43 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunMetaData { + + private String name; + + private String namespace; + + private String selfLink; + + private String uid; + + private String resourceVersion; + + private long generation; + + private String creationTimestamp; + + private CloudrunMetadataLabels labels; + + private CloudrunMetadataAnnotations annotations; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetadataAnnotations.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetadataAnnotations.java new file mode 100644 index 00000000000..83631669e70 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetadataAnnotations.java @@ -0,0 +1,53 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@JsonIgnoreProperties(ignoreUnknown = true) +public class CloudrunMetadataAnnotations { + + @JsonProperty("run.googleapis.com/client-name") + private String runGoogleapisComClientName; + + @JsonProperty("serving.knative.dev/creator") + private String servingKnativeDevCreator; + + @JsonProperty("serving.knative.dev/lastModifier") + private String servingKnativeDevLastModifier; + + @JsonProperty("client.knative.dev/user-image") + private String clientKnativeDevUserImage; + + @JsonProperty("client.knative.dev/operation-id") + private String runGoogleapisComOperationID; + + @JsonProperty("run.googleapis.com/ingress") + private String runGoogleapisComIngress; + + @JsonProperty("run.googleapis.com/ingress-status") + private String runGoogleapisComIngressStatus; + + private static final String LABELS_PREFIX = "spinnaker/"; + + private String spinnakerApplication = LABELS_PREFIX + "application"; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetadataLabels.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetadataLabels.java new file mode 100644 index 00000000000..86ac211e5df --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunMetadataLabels.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunMetadataLabels { + + @JsonProperty("cloud.googleapis.com/location") + private String cloudGoogleapisComLocation; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunModelUtil.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunModelUtil.java new file mode 100644 index 00000000000..f5d1f59bdca --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunModelUtil.java @@ -0,0 +1,44 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class CloudrunModelUtil { + + private static final List dateFormats = + Stream.of("yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'", "yyyy-MM-dd'T'HH:mm:ss'Z'") + .map(SimpleDateFormat::new) + .collect(Collectors.toList()); + + public static Long translateTime(String time) { + for (SimpleDateFormat dateFormat : dateFormats) { + try { + return dateFormat.parse(time).getTime(); + } catch (ParseException e) { + log.error("Unable to parse {}. {}", time, e.getMessage()); + } + } + return null; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunServerGroup.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunServerGroup.java new file mode 100644 index 00000000000..84baff58eff --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunServerGroup.java @@ -0,0 +1,162 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.google.api.services.run.v1.model.Revision; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import java.io.Serializable; +import java.util.*; +import java.util.stream.Collectors; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Data +public class CloudrunServerGroup implements ServerGroup, Serializable { + + private static final String LABELS_PREFIX = "serving.knative.dev/"; + private static final String LABELS_SERVICE = LABELS_PREFIX + "service"; + private static final String LABELS_LOCATION = "cloud.googleapis.com/location"; + private final String LABELS_SERVICE_UID = LABELS_PREFIX + "serviceUid"; + private final String ANNOTATIONS_PREFIX = "autoscaling.knative.dev/"; + private final String ANNOTATIONS_MINSCALE = ANNOTATIONS_PREFIX + "minScale"; + private final String ANNOTATIONS_MAXSCALE = ANNOTATIONS_PREFIX + "maxScale"; + private String name; + private final String type = CloudrunCloudProvider.ID; + private final String cloudProvider = CloudrunCloudProvider.ID; + private String account; + private String region; + private final Set zones = ImmutableSet.of(); + private Set instances; + private Set loadBalancers = new HashSet<>(); + private Long createdTime; + private final Map launchConfig = ImmutableMap.of(); + private final Set securityGroups = ImmutableSet.of(); + private Boolean disabled = true; + private ServingStatus servingStatus; + private String instanceClass; + private Integer minTotalInstances; + private Integer maxTotalInstances; + private String serviceName; + private String namespace; + + private Map tags = new HashMap<>(); + + public CloudrunServerGroup() {} + + public CloudrunServerGroup(Revision revision, String account, String loadBalancerName) { + this.account = account; + this.region = getRegion(revision); + this.name = revision.getMetadata().getName(); + this.loadBalancers.add(loadBalancerName); + this.createdTime = + CloudrunModelUtil.translateTime(revision.getMetadata().getCreationTimestamp()); + this.disabled = isDisabled(revision); + this.servingStatus = this.disabled ? ServingStatus.SERVING : ServingStatus.STOPPED; + this.minTotalInstances = getMinTotalInstances(revision); + this.maxTotalInstances = getMaxTotalInstances(revision); + this.serviceName = getServiceName(revision); + } + + @Override + public InstanceCounts getInstanceCounts() { + InstanceCounts counts = new InstanceCounts(); + return counts + .setDown(0) + .setOutOfService(0) + .setUp(minTotalInstances) + .setStarting(0) + .setUnknown(0) + .setTotal(minTotalInstances); + } + + @Override + public Capacity getCapacity() { + Capacity capacity = new Capacity(); + return capacity + .setMin(minTotalInstances) + .setMax(maxTotalInstances) + .setDesired(minTotalInstances); + } + + @Override + public Map getTags() { + return tags; + } + + private Integer getMinTotalInstances(Revision revision) { + String minScale = revision.getMetadata().getAnnotations().get(ANNOTATIONS_MINSCALE); + if (minScale == null) { // only when minscale > 0, Revision yaml will have minscale annotation + return 0; + } + return Integer.parseInt(minScale); + } + + private Integer getMaxTotalInstances(Revision revision) { + return Integer.parseInt(revision.getMetadata().getAnnotations().get(ANNOTATIONS_MAXSCALE)); + } + + public static String getServiceName(Revision revision) { + return revision.getMetadata().getLabels().get(LABELS_SERVICE); + } + + public static String getLocationLabel() { + return LABELS_LOCATION; + } + + public static String getRegion(Revision revision) { + return revision.getMetadata().getLabels().get(LABELS_LOCATION); + } + + @Override + public ImageSummary getImageSummary() { + return null; + } + + @Override + public ImagesSummary getImagesSummary() { + return null; + } + + @Override + public Boolean isDisabled() { + return disabled; + } + + public Boolean isDisabled(Revision revision) { + try { + String activeConditionStatus = + revision.getStatus().getConditions().stream() + .filter(s -> s.getType().equalsIgnoreCase("Active")) + .collect(Collectors.toList()) + .get(0) + .getStatus(); + return !activeConditionStatus.equals("True"); + } catch (IndexOutOfBoundsException e) { + log.error("No conditions exist on the Revision!! {}", e.getMessage()); + return true; + } + } + + public static enum ServingStatus { + SERVING, + STOPPED; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunService.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunService.java new file mode 100644 index 00000000000..32ec6a753cd --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunService.java @@ -0,0 +1,153 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.google.api.services.run.v1.model.ServiceStatus; +import java.util.LinkedHashMap; + +public class CloudrunService extends com.google.api.client.json.GenericJson { + + /** + * The API version for this call such as "serving.knative.dev/v1". The value may be {@code null}. + */ + @com.google.api.client.util.Key private java.lang.String apiVersion; + + /** The kind of resource, in this case "Service". The value may be {@code null}. */ + @com.google.api.client.util.Key private java.lang.String kind; + + /** + * Metadata associated with this Service, including name, namespace, labels, and annotations. The + * value may be {@code null}. + */ + @com.google.api.client.util.Key private LinkedHashMap metadata; + + /** + * Spec holds the desired state of the Service (from the client). The value may be {@code null}. + */ + @com.google.api.client.util.Key private LinkedHashMap spec; + + /** + * Status communicates the observed state of the Service (from the controller). The value may be + * {@code null}. + */ + @com.google.api.client.util.Key private ServiceStatus status; + + /** + * The API version for this call such as "serving.knative.dev/v1". + * + * @return value or {@code null} for none + */ + public java.lang.String getApiVersion() { + return apiVersion; + } + + /** + * The API version for this call such as "serving.knative.dev/v1". + * + * @param apiVersion apiVersion or {@code null} for none + */ + public CloudrunService setApiVersion(java.lang.String apiVersion) { + this.apiVersion = apiVersion; + return this; + } + + /** + * The kind of resource, in this case "Service". + * + * @return value or {@code null} for none + */ + public java.lang.String getKind() { + return kind; + } + + /** + * The kind of resource, in this case "Service". + * + * @param kind kind or {@code null} for none + */ + public CloudrunService setKind(java.lang.String kind) { + this.kind = kind; + return this; + } + + /** + * Metadata associated with this Service, including name, namespace, labels, and annotations. + * + * @return value or {@code null} for none + */ + public LinkedHashMap getMetadata() { + return metadata; + } + + /** + * Metadata associated with this Service, including name, namespace, labels, and annotations. + * + * @param metadata metadata or {@code null} for none + */ + public CloudrunService setMetadata(LinkedHashMap metadata) { + this.metadata = metadata; + return this; + } + + /** + * Spec holds the desired state of the Service (from the client). + * + * @return value or {@code null} for none + */ + public LinkedHashMap getSpec() { + return spec; + } + + /** + * Spec holds the desired state of the Service (from the client). + * + * @param spec spec or {@code null} for none + */ + public CloudrunService setSpec(LinkedHashMap spec) { + this.spec = spec; + return this; + } + + /** + * Status communicates the observed state of the Service (from the controller). + * + * @return value or {@code null} for none + */ + public ServiceStatus getStatus() { + return status; + } + + /** + * Status communicates the observed state of the Service (from the controller). + * + * @param status status or {@code null} for none + */ + public CloudrunService setStatus(ServiceStatus status) { + this.status = status; + return this; + } + + @Override + public CloudrunService set(String fieldName, Object value) { + return (CloudrunService) super.set(fieldName, value); + } + + @Override + public CloudrunService clone() { + return (CloudrunService) super.clone(); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpec.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpec.java new file mode 100644 index 00000000000..bacb4882e78 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpec.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpec { + + private CloudrunSpecTemplate template; + + private CloudrunSpecTraffic[] traffic; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainer.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainer.java new file mode 100644 index 00000000000..7f4c88f5eed --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainer.java @@ -0,0 +1,31 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpecContainer { + + private String image; + + private CloudrunSpecContainerPort[] ports; + + private CloudrunSpecContainerResources resources; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainerPort.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainerPort.java new file mode 100644 index 00000000000..80639f473df --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainerPort.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpecContainerPort { + + private String name; + + private long containerPort; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainerResources.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainerResources.java new file mode 100644 index 00000000000..0a631f891ad --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecContainerResources.java @@ -0,0 +1,27 @@ +/* + * Copyright 2023 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpecContainerResources { + + private CloudrunSpecResourceLimits limits; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecResourceLimits.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecResourceLimits.java new file mode 100644 index 00000000000..1b51cfb5a68 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecResourceLimits.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpecResourceLimits { + + private String cpu; + + private String memory; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplate.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplate.java new file mode 100644 index 00000000000..95dcbf10069 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplate.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpecTemplate { + + private CloudrunSpecTemplateMetadata metadata; + + private CloudrunTemplateSpec spec; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateAnnotations.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateAnnotations.java new file mode 100644 index 00000000000..e846c0030ca --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateAnnotations.java @@ -0,0 +1,32 @@ +/* + * Copyright 2023 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpecTemplateAnnotations { + + @JsonProperty("run.googleapis.com/client-name") + private String runGoogleapisComClientName; + + @JsonProperty("autoscaling.knative.dev/maxScale") + private String autoscalingKnativeDevMaxScale; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateLabels.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateLabels.java new file mode 100644 index 00000000000..75ae20588b0 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateLabels.java @@ -0,0 +1,27 @@ +/* + * Copyright 2023 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpecTemplateLabels { + + private String runGoogleapisComStartupProbeType; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateMetadata.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateMetadata.java new file mode 100644 index 00000000000..dd6ecef2fd6 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTemplateMetadata.java @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunSpecTemplateMetadata { + + private String name; + + private CloudrunSpecTemplateLabels labels; + + private CloudrunSpecTemplateAnnotations annotations; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTraffic.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTraffic.java new file mode 100644 index 00000000000..263741f2925 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunSpecTraffic.java @@ -0,0 +1,33 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@JsonIgnoreProperties(ignoreUnknown = true) +public class CloudrunSpecTraffic { + + private long percent; + + private boolean latestRevision; + + private String revisionName; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunTemplateSpec.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunTemplateSpec.java new file mode 100644 index 00000000000..d626ca99e4b --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunTemplateSpec.java @@ -0,0 +1,30 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunTemplateSpec { + + private long containerConcurrency; + private long timeoutSeconds; + private String serviceAccountName; + private CloudrunSpecContainer[] containers; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunTrafficSplit.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunTrafficSplit.java new file mode 100644 index 00000000000..7f7539e99c6 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunTrafficSplit.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; + +@Data +public class CloudrunTrafficSplit implements Cloneable { + + private List trafficTargets = new ArrayList<>(); + + @Override + public CloudrunTrafficSplit clone() { + try { + CloudrunTrafficSplit clone = (CloudrunTrafficSplit) super.clone(); + return clone; + } catch (CloneNotSupportedException e) { + throw new AssertionError(); + } + } + + @Data + @JsonIgnoreProperties(ignoreUnknown = true) + public static class TrafficTarget { + private String revisionName; + private Integer percent; + private Boolean latestRevision; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunYmlData.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunYmlData.java new file mode 100644 index 00000000000..ef9cf2bf204 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/model/CloudrunYmlData.java @@ -0,0 +1,33 @@ +/* + * Copyright 2023 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.model; + +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class CloudrunYmlData { + + private String apiVersion; + + private String kind; + + private CloudrunMetaData metadata; + + private CloudrunSpec spec; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/CloudrunManifestOperationResult.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/CloudrunManifestOperationResult.java new file mode 100644 index 00000000000..b827a19dcec --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/CloudrunManifestOperationResult.java @@ -0,0 +1,39 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.op; + +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunService; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import java.util.HashSet; +import java.util.Set; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@EqualsAndHashCode(callSuper = false) +public class CloudrunManifestOperationResult extends DeploymentResult { + + private Set manifests = new HashSet<>(); + + public CloudrunManifestOperationResult addManifest(CloudrunService manifest) { + manifests.add(manifest); + return this; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/artifact/CloudrunCleanupArtifactsOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/artifact/CloudrunCleanupArtifactsOperation.java new file mode 100644 index 00000000000..4c42779ec96 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/artifact/CloudrunCleanupArtifactsOperation.java @@ -0,0 +1,57 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.op.artifact; + +import com.netflix.spinnaker.clouddriver.cloudrun.description.manifest.CloudrunCleanupArtifactsDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.op.CloudrunManifestOperationResult; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunCredentials; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import javax.annotation.Nonnull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class CloudrunCleanupArtifactsOperation implements AtomicOperation { + + private static final Logger log = + LoggerFactory.getLogger(CloudrunCleanupArtifactsOperation.class); + private final CloudrunCleanupArtifactsDescription description; + private final CloudrunCredentials credentials; + @Nonnull private final String accountName; + private static final String OP_NAME = "CLEANUP_CLOUDRUN_ARTIFACTS"; + + public CloudrunCleanupArtifactsOperation(CloudrunCleanupArtifactsDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + this.accountName = description.getCredentials().getName(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public CloudrunManifestOperationResult operate(List priorOutputs) { + CloudrunManifestOperationResult result = new CloudrunManifestOperationResult(); + result.setManifests(null); + return result; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/manifest/ArtifactKey.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/manifest/ArtifactKey.java new file mode 100644 index 00000000000..3da9fd17f08 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/manifest/ArtifactKey.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.op.manifest; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; + +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.Collection; +import java.util.Objects; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.EqualsAndHashCode; +import lombok.ToString; + +/** + * When determining whether the deploy and patch manifest stages bound all required artifacts, the + * artifacts in the list of required artifacts have an artifact account set while those in the list + * we're trying to bind don't. + * + *

As the .equals function of Artifact includes the account in its comparison, this means that we + * don't recognize the replaced artifacts as the ones we expected to replace and fail the stage. + * + *

As a temporary fix until we can refactor the artifact passing code to consistently include (or + * not) account, or decide that account should always be excluded from Artifact.equals(), create a + * class to hold the fields of Artifact that these two stages should use when deciding whether + * artifacts are equal. + */ +@EqualsAndHashCode +@ToString +class ArtifactKey { + private final String type; + private final String name; + private final String version; + private final String location; + private final String reference; + + private ArtifactKey(Artifact artifact) { + this.type = artifact.getType(); + this.name = artifact.getName(); + this.version = artifact.getVersion(); + this.location = artifact.getLocation(); + this.reference = artifact.getReference(); + } + + @Nonnull + static ArtifactKey fromArtifact(@Nonnull Artifact artifact) { + return new ArtifactKey(artifact); + } + + @Nonnull + static ImmutableSet fromArtifacts(@Nullable Collection artifacts) { + if (artifacts == null) { + return ImmutableSet.of(); + } + return artifacts.stream() + .filter(Objects::nonNull) + .map(ArtifactKey::fromArtifact) + .collect(toImmutableSet()); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/manifest/CloudrunDeployManifestOperation.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/manifest/CloudrunDeployManifestOperation.java new file mode 100644 index 00000000000..2cd0e3f1bca --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/op/manifest/CloudrunDeployManifestOperation.java @@ -0,0 +1,404 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.op.manifest; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.CloudrunServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.exception.CloudrunOperationException; +import com.netflix.spinnaker.clouddriver.cloudrun.description.manifest.CloudrunDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.model.*; +import com.netflix.spinnaker.clouddriver.cloudrun.op.CloudrunManifestOperationResult; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.view.CloudrunLoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; + +public class CloudrunDeployManifestOperation implements AtomicOperation { + + private static final Logger log = LoggerFactory.getLogger(CloudrunDeployManifestOperation.class); + + private static final String OP_NAME = "DEPLOY_CLOUDRUN_MANIFEST"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Autowired CloudrunJobExecutor jobExecutor; + + @Autowired CloudrunLoadBalancerProvider provider; + + CloudrunDeployManifestDescription description; + + private final ObjectMapper objectMapper = new ObjectMapper(); + + private CloudrunYmlData ymlData = new CloudrunYmlData(); + + public CloudrunDeployManifestOperation(CloudrunDeployManifestDescription description) { + this.description = description; + } + + public String deploy(String repositoryPath) { + String project = description.getCredentials().getProject(); + String applicationDirectoryRoot = null; + List configFiles = description.getManifests(); + Map allArtifacts = initializeArtifacts(); + try { + populateCloudrunYmlData(configFiles); + } catch (Exception e) { + throw new CloudrunOperationException( + "Failed to deploy manifest to Cloud Run with command " + e.getMessage()); + } + List modConfigFiles = null; + if (allArtifacts != null && !allArtifacts.values().isEmpty()) { + modConfigFiles = bindArtifacts(configFiles, allArtifacts.values()); + } + CloudrunServerGroupNameResolver serverGroupNameResolver = + new CloudrunServerGroupNameResolver( + project, description.getRegion(), description.getCredentials()); + populateRegionFromManifest(configFiles); + serverGroupNameResolver.setRegion(description.getRegion()); + String clusterName = + serverGroupNameResolver.getClusterName( + description.getApplication(), description.getStack(), description.getDetails()); + String versionName = + serverGroupNameResolver.resolveNextServerGroupName( + description.getApplication(), description.getStack(), description.getDetails(), false); + modConfigFiles = + insertSpinnakerAppNameServiceNameVersionName(configFiles, clusterName, versionName); + List writtenFullConfigFilePaths = + writeConfigFiles(modConfigFiles, repositoryPath, applicationDirectoryRoot); + String region = description.getRegion(); + + List deployCommand = new ArrayList<>(); + deployCommand.add("gcloud"); + deployCommand.add("run"); + deployCommand.add("services"); + deployCommand.add("replace"); + deployCommand.add(writtenFullConfigFilePaths.stream().collect(Collectors.joining(""))); + deployCommand.add("--region=" + region); + deployCommand.add("--project=" + project); + + String success = "false"; + getTask().updateStatus(OP_NAME, "Deploying manifest version " + versionName + "..."); + try { + jobExecutor.runCommand(deployCommand); + success = "true"; + } catch (Exception e) { + throw new CloudrunOperationException( + "Failed to deploy manifest to Cloud Run with command " + + deployCommand + + "exception " + + e.getMessage()); + } finally { + deleteFiles(writtenFullConfigFilePaths); + } + getTask().updateStatus(OP_NAME, "Done deploying manifest version " + versionName + "..."); + return versionName; + } + + private Map initializeArtifacts() { + Map allArtifacts = new HashMap<>(); + if (!description.isEnableArtifactBinding()) { + return allArtifacts; + } + // Required artifacts are explicitly set in stage configuration + if (description.getRequiredArtifacts() != null) { + description + .getRequiredArtifacts() + .forEach(a -> allArtifacts.putIfAbsent(getArtifactKey(a), a)); + } + return allArtifacts; + } + + private String getArtifactKey(Artifact artifact) { + return String.format( + "[%s]-[%s]-[%s]", + artifact.getType(), + artifact.getName(), + artifact.getLocation() != null ? artifact.getLocation() : ""); + } + + @Override + public CloudrunManifestOperationResult operate(List priorOutputs) { + + String baseDir = description.getCredentials().getLocalRepositoryDirectory(); + String directoryPath = getFullDirectoryPath(baseDir); + String deployPath = directoryPath; + String newVersionName; + String success = "false"; + getTask().updateStatus(OP_NAME, "Initializing creation of manifest version..."); + newVersionName = deploy(directoryPath); + String region = description.getRegion(); + CloudrunManifestOperationResult result = new CloudrunManifestOperationResult(); + result.addManifest(description.getManifests().get(0)); + log.info(" region in deploy manifest operation : " + region); + log.info(" new version name in deploy manifest operation : " + newVersionName); + success = "true"; + return result; + } + + private List insertSpinnakerAppNameServiceNameVersionName( + List configFiles, String clusterName, String versionName) { + + return configFiles.stream() + .map( + (configFile) -> { + try { + ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory()); + CloudrunService yamlObj = configFile; + if (yamlObj != null) { + if (yamlObj.getMetadata() != null) { + CloudrunMetaData metadata = ymlData.getMetadata(); + CloudrunSpec spec = ymlData.getSpec(); + if (metadata != null && spec != null) { + if (spec.getTemplate() != null && spec.getTemplate().getMetadata() != null) { + CloudrunSpecTemplateMetadata specMetadata = + spec.getTemplate().getMetadata(); + specMetadata.setName(versionName); + metadata.setName(clusterName); + CloudrunLoadBalancer loadBalancer = + provider.getLoadBalancer(description.getAccountName(), clusterName); + if (loadBalancer != null) { + insertTrafficPercent(spec, loadBalancer); + } + } + } + CloudrunMetadataAnnotations annotations = metadata.getAnnotations(); + CloudrunMetadataLabels labels = metadata.getLabels(); + if (annotations == null) { + CloudrunMetadataAnnotations metadataAnnotations = + new CloudrunMetadataAnnotations(); + metadataAnnotations.setSpinnakerApplication(description.getApplication()); + metadata.setAnnotations(metadataAnnotations); + description.setRegion(labels.getCloudGoogleapisComLocation()); + } else if (annotations != null && labels != null) { + annotations.setSpinnakerApplication(description.getApplication()); + description.setRegion(labels.getCloudGoogleapisComLocation()); + } + } + } + return yamlReader.writeValueAsString(ymlData); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + }) + .collect(Collectors.toList()); + } + + private List bindArtifacts( + List configFiles, Collection artifacts) { + + return configFiles.stream() + .map( + (configFile) -> { + try { + ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory()); + CloudrunService yamlObj = configFile; + if (yamlObj != null && yamlObj.getMetadata() != null) { + CloudrunMetaData metadata = ymlData.getMetadata(); + CloudrunSpec spec = ymlData.getSpec(); + if (metadata != null && metadata != null) { + if (metadata.getAnnotations() != null && spec.getTemplate().getSpec() != null) { + CloudrunTemplateSpec specSpec = spec.getTemplate().getSpec(); + CloudrunMetadataAnnotations annotations = metadata.getAnnotations(); + bindTheRequiredArtifact(annotations, specSpec, artifacts); + } + } + } + return yamlReader.writeValueAsString(yamlObj); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + }) + .collect(Collectors.toList()); + } + + private void bindTheRequiredArtifact( + CloudrunMetadataAnnotations annotations, + CloudrunTemplateSpec specSpec, + Collection artifacts) { + + if (specSpec.getContainers() != null) { + CloudrunSpecContainer[] containerArray = specSpec.getContainers(); + if (containerArray != null && !(containerArray.length == 0)) { + CloudrunSpecContainer container = containerArray[0]; + for (Artifact artifact : artifacts) { + if (artifact.getType().equals("docker/image")) { + String cloudrunImage = (String) annotations.getClientKnativeDevUserImage(); + if (cloudrunImage != null) { + String[] imageArray = cloudrunImage.split(":"); + String image = imageArray[0]; + if (image != null && artifact.getName() != null) { + String[] imageArr = image.split("/"); + String[] artifactArr = artifact.getName().split("/"); + if (imageArr != null + && artifactArr != null + && imageArr.length > 0 + && artifactArr.length > 0) { + String appImage = imageArr[imageArr.length - 1]; + String artifactImage = artifactArr[artifactArr.length - 1]; + if (appImage != null && artifactImage != null && appImage.equals(artifactImage)) { + annotations.setClientKnativeDevUserImage(artifact.getReference()); + container.setImage(artifact.getReference()); + } else { + throw new IllegalArgumentException( + String.format( + "The following required artifacts could not be bound: '%s'. " + + "Check that the Docker image name above matches the name used in the image field of your manifest. " + + "Failing the stage as this is likely a configuration error.", + ArtifactKey.fromArtifact(artifact))); + } + } + } else { + throw new IllegalArgumentException( + String.format( + "The following required artifacts could not be bound: '%s'. " + + "Check that the Docker image name above matches the name used in the image field of your manifest. " + + "Failing the stage as this is likely a configuration error.", + ArtifactKey.fromArtifact(artifact))); + } + } + } else { + throw new IllegalArgumentException( + String.format( + "The following required artifacts could not be bound: '%s'. " + + "Check that the Docker image name above matches the name used in the image field of your manifest. " + + "Failing the stage as this is likely a configuration error.", + ArtifactKey.fromArtifact(artifact))); + } + } + } + } + } + + private void populateCloudrunYmlData(List configFiles) + throws JsonProcessingException { + + for (CloudrunService configFile : configFiles) { + CloudrunService yamlObj = configFile; + if (yamlObj != null && yamlObj.getMetadata() != null && yamlObj.getSpec() != null) { + ymlData.setKind(yamlObj.getKind()); + ymlData.setApiVersion(yamlObj.getApiVersion()); + objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + String metaDataJson = objectMapper.writeValueAsString(yamlObj.getMetadata()); + CloudrunMetaData cloudrunMetaData = + objectMapper.readValue(metaDataJson, CloudrunMetaData.class); + ymlData.setMetadata(cloudrunMetaData); + String specJson = objectMapper.writeValueAsString(yamlObj.getSpec()); + CloudrunSpec spec = objectMapper.readValue(specJson, CloudrunSpec.class); + ymlData.setSpec(spec); + } + } + } + + private void populateRegionFromManifest(List configFiles) { + + for (CloudrunService configFile : configFiles) { + CloudrunService yamlObj = configFile; + if (yamlObj != null) { + if (yamlObj.getMetadata() != null) { + CloudrunMetaData metadata = ymlData.getMetadata(); + CloudrunMetadataLabels labels = metadata.getLabels(); + if (labels != null) { + description.setRegion(labels.getCloudGoogleapisComLocation()); + } + } + } + } + } + + private void insertTrafficPercent(CloudrunSpec spec, CloudrunLoadBalancer loadBalancer) { + + List trafficTargets = new ArrayList<>(); + if (loadBalancer.getSplit() != null && loadBalancer.getSplit().getTrafficTargets() != null) { + loadBalancer + .getSplit() + .getTrafficTargets() + .forEach( + trafficTarget -> { + CloudrunSpecTraffic existingTrafficMap = new CloudrunSpecTraffic(); + existingTrafficMap.setPercent(trafficTarget.getPercent()); + existingTrafficMap.setRevisionName(trafficTarget.getRevisionName()); + trafficTargets.add(existingTrafficMap); + }); + spec.setTraffic(trafficTargets.toArray(new CloudrunSpecTraffic[0])); + } + } + + public static void deleteFiles(List paths) { + paths.forEach( + path -> { + try { + new File(path).delete(); + } catch (Exception e) { + throw new CloudrunOperationException("Could not delete config file: ${e.getMessage()}"); + } + }); + } + + public static List writeConfigFiles( + List configFiles, String repositoryPath, String applicationDirectoryRoot) { + if (configFiles == null) { + return Collections.emptyList(); + } else { + return configFiles.stream() + .map( + (configFile) -> { + Path path = + generateRandomRepositoryFilePath(repositoryPath, applicationDirectoryRoot); + try { + File targetFile = new File(path.toString()); + FileUtils.writeStringToFile(targetFile, configFile, StandardCharsets.UTF_8); + } catch (Exception e) { + throw new CloudrunOperationException( + "Could not write config file: ${e.getMessage()}"); + } + return path.toString(); + }) + .collect(Collectors.toList()); + } + } + + public static Path generateRandomRepositoryFilePath( + String repositoryPath, String applicationDirectoryRoot) { + String name = UUID.randomUUID().toString(); + String filePath = applicationDirectoryRoot != null ? applicationDirectoryRoot : "."; + StringBuilder sb = new StringBuilder(name).append(".yaml"); + return Paths.get(repositoryPath, filePath, sb.toString()); + } + + public static String getFullDirectoryPath(String localRepositoryDirectory) { + return Paths.get(localRepositoryDirectory).toString(); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/CloudrunProvider.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/CloudrunProvider.java new file mode 100644 index 00000000000..6fef2df09d5 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/CloudrunProvider.java @@ -0,0 +1,74 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.provider; + +import com.netflix.spinnaker.clouddriver.cache.SearchableProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys; +import com.netflix.spinnaker.clouddriver.security.BaseProvider; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +public class CloudrunProvider extends BaseProvider implements SearchableProvider { + public static final String PROVIDER_NAME = CloudrunProvider.class.getName(); + + final Map urlMappingTemplates = Collections.emptyMap(); + final Map searchResultHydrators = + Collections.emptyMap(); + final CloudrunCloudProvider cloudProvider; + String[] allKeys = { + Keys.Namespace.APPLICATIONS.getNs(), + Keys.Namespace.CLUSTERS.getNs(), + Keys.Namespace.SERVER_GROUPS.getNs(), + Keys.Namespace.INSTANCES.getNs(), + Keys.Namespace.LOAD_BALANCERS.getNs() + }; + + final Set defaultCaches = Set.of(allKeys); + + public CloudrunProvider(CloudrunCloudProvider cloudProvider) { + this.cloudProvider = cloudProvider; + } + + @Override + public String getProviderName() { + return PROVIDER_NAME; + } + + @Override + public Set getDefaultCaches() { + return defaultCaches; + } + + /** @return */ + @Override + public Map getUrlMappingTemplates() { + return urlMappingTemplates; + } + + /** @return */ + @Override + public Map getSearchResultHydrators() { + return searchResultHydrators; + } + + @Override + public Map parseKey(String key) { + return Keys.parse(key); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/agent/AbstractCloudrunCachingAgent.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/agent/AbstractCloudrunCachingAgent.java new file mode 100644 index 00000000000..9dd7e2ea746 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/agent/AbstractCloudrunCachingAgent.java @@ -0,0 +1,94 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.client.googleapis.batch.BatchRequest; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentIntervalAware; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.CloudrunProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public abstract class AbstractCloudrunCachingAgent + implements CachingAgent, AccountAware, AgentIntervalAware { + public abstract String getSimpleName(); + + private final String accountName; + private final String providerName = CloudrunProvider.PROVIDER_NAME; + private final CloudrunCloudProvider cloudrunCloudProvider = new CloudrunCloudProvider(); + private final ObjectMapper objectMapper; + private final CloudrunNamedAccountCredentials credentials; + + public AbstractCloudrunCachingAgent( + String accountName, ObjectMapper objectMapper, CloudrunNamedAccountCredentials credentials) { + this.accountName = accountName; + this.objectMapper = objectMapper; + this.credentials = credentials; + } + + public static void cache( + Map> cacheResults, + String cacheNamespace, + Map cacheDataById) { + cacheResults + .get(cacheNamespace) + .forEach( + cacheData -> { + CacheData existingCacheData = cacheDataById.get(cacheData.getId()); + if (existingCacheData == null) { + cacheDataById.put(cacheData.getId(), cacheData); + } else { + existingCacheData.getAttributes().putAll(cacheData.getAttributes()); + cacheData + .getRelationships() + .forEach( + (relationshipName, relationships) -> { + existingCacheData.getRelationships().put(relationshipName, relationships); + }); + } + }); + } + + public Long getAgentInterval() { + if (this.credentials.getCachingIntervalSeconds() == null) { + return TimeUnit.SECONDS.toMillis(60); + } + + return TimeUnit.SECONDS.toMillis(this.credentials.getCachingIntervalSeconds()); + } + + public String getAccountName() { + return accountName; + } + + public String getProviderName() { + return providerName; + } + + static void executeIfRequestsAreQueued(BatchRequest batch) { + try { + if (batch.size() > 0) { + batch.execute(); + } + } catch (IOException e) { + + } + } + + public CloudrunCloudProvider getCloudrunCloudProvider() { + return cloudrunCloudProvider; + } + + public ObjectMapper getObjectMapper() { + return objectMapper; + } + + public final CloudrunNamedAccountCredentials getCredentials() { + return credentials; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/agent/CloudrunServerGroupCachingAgent.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/agent/CloudrunServerGroupCachingAgent.java new file mode 100644 index 00000000000..5c5ff1dec15 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/agent/CloudrunServerGroupCachingAgent.java @@ -0,0 +1,642 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys.Namespace.*; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.client.googleapis.batch.BatchRequest; +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import com.google.api.services.run.v1.CloudRun; +import com.google.api.services.run.v1.model.ListRevisionsResponse; +import com.google.api.services.run.v1.model.Revision; +import com.google.api.services.run.v1.model.Service; +import com.google.common.collect.ImmutableSet; +import com.netflix.frigga.Names; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunInstance; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunServerGroup; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.view.MutableCacheData; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import groovy.lang.Reference; +import groovy.util.logging.Slf4j; +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; +import lombok.Getter; + +@Getter +@Slf4j +public class CloudrunServerGroupCachingAgent extends AbstractCloudrunCachingAgent + implements OnDemandAgent { + + private final String category = "serverGroup"; + private final String NAME = "name"; + private final String CLOUDRUN_INSTANCE = "-instance"; + private final String PARENT_PREFIX = "namespaces/"; + private final OnDemandMetricsSupport metricsSupport; + private static final Set types = + ImmutableSet.of( + AUTHORITATIVE.forType(APPLICATIONS.getNs()), + AUTHORITATIVE.forType(CLUSTERS.getNs()), + AUTHORITATIVE.forType(SERVER_GROUPS.getNs()), + AUTHORITATIVE.forType(INSTANCES.getNs()), + INFORMATIVE.forType(LOAD_BALANCERS.getNs())); + private String agentType = getAccountName() + "/" + getSimpleName(); + + public CloudrunServerGroupCachingAgent( + String accountName, + CloudrunNamedAccountCredentials credentials, + ObjectMapper objectMapper, + Registry registry) { + super(accountName, objectMapper, credentials); + this.metricsSupport = + new OnDemandMetricsSupport( + registry, this, CloudrunCloudProvider.ID + ":" + OnDemandType.ServerGroup); + } + + @Override + public String getSimpleName() { + return CloudrunServerGroupCachingAgent.class.getSimpleName(); + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public String getOnDemandAgentType() { + return getAgentType() + "-OnDemand"; + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return type.equals(OnDemandType.ServerGroup) && cloudProvider.equals(CloudrunCloudProvider.ID); + } + + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + + if (!data.containsKey("serverGroupName") || data.get("account") != getAccountName()) { + return null; + } + String serverGroupName = data.get("serverGroupName").toString(); + Map matchingServerGroupAndLoadBalancer = + metricsSupport.readData(() -> loadServerGroupAndLoadBalancer(serverGroupName)); + if (matchingServerGroupAndLoadBalancer.isEmpty()) { + return null; + } + Revision serverGroup = (Revision) matchingServerGroupAndLoadBalancer.get("serverGroup"); + Service loadBalancer = (Service) matchingServerGroupAndLoadBalancer.get("loadBalancer"); + Map> serverGroupsByLoadBalancer = + Map.of(loadBalancer, List.of(serverGroup)); + CacheResult result = + metricsSupport.transformData( + () -> + buildCacheResult( + serverGroupsByLoadBalancer, + new HashMap<>(), + new ArrayList<>(), + Long.MAX_VALUE)); + String serverGroupKey = + Keys.getServerGroupKey( + getAccountName(), serverGroupName, CloudrunServerGroup.getRegion(serverGroup)); + try { + String jsonResult = getObjectMapper().writeValueAsString(result.getCacheResults()); + if (result.getCacheResults().values().stream().flatMap(Collection::stream).count() == 0) { + providerCache.evictDeletedItems(ON_DEMAND.getNs(), Set.of(serverGroupKey)); + } else { + metricsSupport.onDemandStore( + () -> { + CacheData cacheData = + new DefaultCacheData( + serverGroupKey, + 10 * 60, // ttl is 10 minutes. + Map.of( + "cacheTime", + System.currentTimeMillis(), + "cacheResults", + jsonResult, + "processedCount", + 0, + "processedTime", + null), + Map.of()); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData); + return null; + }); + } + Map> evictions = Map.of(); + logger.info("On demand cache refresh (data: {}) succeeded.", data); + return new OnDemandResult(getOnDemandAgentType(), result, evictions); + } catch (JsonProcessingException e) { + throw new RuntimeException("On demand cache refresh failed. Error message : " + e); + } + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + long start = System.currentTimeMillis(); + Map> serverGroupsByLoadBalancer = loadServerGroups(); + Map instancesByServerGroup = loadInstances(serverGroupsByLoadBalancer); + List evictFromOnDemand = new ArrayList<>(); + List keepInOnDemand = new ArrayList<>(); + + Collection serverGroupKeys = + serverGroupsByLoadBalancer.values().stream() + .flatMap(Collection::stream) + .map( + revision -> + Keys.getServerGroupKey( + getAccountName(), getRevisionName(revision), getRegion(revision))) + .collect(Collectors.toSet()); + providerCache + .getAll(ON_DEMAND.getNs(), serverGroupKeys) + .forEach( + onDemandEntry -> { + String cacheTime = (String) onDemandEntry.getAttributes().get("cacheTime"); + String processedCount = (String) onDemandEntry.getAttributes().get("processedCount"); + + if (cacheTime != null + && Long.parseLong(cacheTime) < start + && processedCount != null + && Integer.parseInt(processedCount) > 0) { + evictFromOnDemand.add(onDemandEntry); + } else { + keepInOnDemand.add(onDemandEntry); + } + }); + + Map onDemandMap = new HashMap<>(); + keepInOnDemand.forEach(cacheData -> onDemandMap.put(cacheData.getId(), cacheData)); + List onDemandEvict = + evictFromOnDemand.stream().map(CacheData::getId).collect(Collectors.toList()); + CacheResult cacheResult = + buildCacheResult(serverGroupsByLoadBalancer, onDemandMap, onDemandEvict, start); + + cacheResult + .getCacheResults() + .get(ON_DEMAND.getNs()) + .forEach( + onDemandEntry -> { + onDemandEntry.getAttributes().put("processedTime", System.currentTimeMillis()); + Object processedCountObj = onDemandEntry.getAttributes().get("processedCount"); + int processedCount = 0; + if (processedCountObj != null) { + processedCount = (int) processedCountObj; + } + onDemandEntry.getAttributes().put("processedCount", processedCount + 1); + }); + return cacheResult; + } + + public CacheResult buildCacheResult( + Map> serverGroupsByLoadBalancer, + Map onDemandKeep, + List onDemandEvict, + Long start) { + logger.info("Describing items in " + getAgentType()); + + Map cachedApplications = new HashMap<>(); + Map cachedClusters = new HashMap<>(); + Map cachedServerGroups = new HashMap<>(); + Map cachedLoadBalancers = new HashMap<>(); + Map cachedInstances = new HashMap<>(); + + serverGroupsByLoadBalancer.forEach( + (loadBalancer, serverGroups) -> { + String loadBalancerName = loadBalancer.getMetadata().getName(); + String application = + loadBalancer.getMetadata().getAnnotations().get("spinnaker/application"); + serverGroups.forEach( + serverGroup -> { + String region = getRegion(serverGroup); + if (!onDemandKeep.isEmpty()) { + CacheData onDemandData = + onDemandKeep.get( + Keys.getServerGroupKey( + getAccountName(), serverGroup.getMetadata().getName(), region)); + + if (onDemandData != null + && onDemandData.getAttributes().get("cacheTime") != null + && Long.parseLong(onDemandData.getAttributes().get("cacheTime").toString()) + >= start) { + Map> cacheResults; + try { + cacheResults = + getObjectMapper() + .readValue( + onDemandData.getAttributes().get("cacheResults").toString(), + new TypeReference<>() {}); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + cache(cacheResults, APPLICATIONS.getNs(), cachedApplications); + cache(cacheResults, CLUSTERS.getNs(), cachedClusters); + cache(cacheResults, SERVER_GROUPS.getNs(), cachedServerGroups); + cache(cacheResults, INSTANCES.getNs(), cachedInstances); + cache(cacheResults, LOAD_BALANCERS.getNs(), cachedLoadBalancers); + } + } else { + String serverGroupName = serverGroup.getMetadata().getName(); + Names names = Names.parseName(serverGroupName); + String applicationName = application; + if (applicationName == null) { + applicationName = names.getApp(); + } + String clusterName = names.getCluster(); + // no instances + String serverGroupKey = + Keys.getServerGroupKey(getAccountName(), serverGroupName, region); + String applicationKey = Keys.getApplicationKey(applicationName); + String clusterKey = + Keys.getClusterKey(getAccountName(), applicationName, clusterName); + String loadBalancerKey = + Keys.getLoadBalancerKey(getAccountName(), loadBalancerName); + // application data + MutableCacheData applicationData; + if (cachedApplications.isEmpty() + || !cachedApplications.containsKey(applicationKey)) { + applicationData = new MutableCacheData(applicationKey); + applicationData.getAttributes().put(NAME, applicationName); + Map> applicationRelationships = + applicationData.getRelationships(); + applicationRelationships.put( + CLUSTERS.getNs(), + new HashSet<>() { + { + add(clusterKey); + } + }); + applicationRelationships.put( + SERVER_GROUPS.getNs(), + new HashSet<>() { + { + add(serverGroupKey); + } + }); + applicationRelationships.put( + LOAD_BALANCERS.getNs(), + new HashSet<>() { + { + add(loadBalancerKey); + } + }); + cachedApplications.put(applicationKey, applicationData); + } else { + applicationData = (MutableCacheData) cachedApplications.get(applicationKey); + applicationData.getRelationships().get(CLUSTERS.getNs()).add(clusterKey); + applicationData + .getRelationships() + .get(SERVER_GROUPS.getNs()) + .add(serverGroupKey); + applicationData + .getRelationships() + .get(LOAD_BALANCERS.getNs()) + .add(loadBalancerKey); + } + + // cluster data + MutableCacheData clusterData; + if (cachedClusters.isEmpty() || !cachedClusters.containsKey(clusterKey)) { + clusterData = new MutableCacheData(clusterKey); + clusterData.getAttributes().put(NAME, clusterName); + Map> clusterRelationships = + clusterData.getRelationships(); + clusterRelationships.put(APPLICATIONS.getNs(), Set.of(applicationKey)); + clusterRelationships.put( + SERVER_GROUPS.getNs(), + new HashSet<>() { + { + add(serverGroupKey); + } + }); + clusterRelationships.put(LOAD_BALANCERS.getNs(), Set.of(loadBalancerKey)); + cachedClusters.put(clusterKey, clusterData); + } else { + clusterData = (MutableCacheData) cachedClusters.get(clusterKey); + clusterData.getRelationships().get(SERVER_GROUPS.getNs()).add(serverGroupKey); + } + + // instance data + String instanceName = serverGroupName + CLOUDRUN_INSTANCE; + String instanceKey = Keys.getInstanceKey(getAccountName(), instanceName); + MutableCacheData instanceData = new MutableCacheData(instanceKey); + instanceData.getAttributes().put(NAME, instanceName); + instanceData + .getAttributes() + .put("instance", new CloudrunInstance(serverGroup, loadBalancer, region)); + Map> instanceRelationships = + instanceData.getRelationships(); + instanceRelationships.put(APPLICATIONS.getNs(), Set.of(applicationKey)); + instanceRelationships.put(CLUSTERS.getNs(), Set.of(clusterKey)); + instanceRelationships.put(SERVER_GROUPS.getNs(), Set.of(serverGroupKey)); + instanceRelationships.put(LOAD_BALANCERS.getNs(), Set.of(loadBalancerKey)); + cachedInstances.put(instanceName, instanceData); + // server group data + CloudrunServerGroup cloudrunServerGroup = + new CloudrunServerGroup(serverGroup, getAccountName(), loadBalancerName); + + MutableCacheData serverGroupData = new MutableCacheData(serverGroupKey); + serverGroupData.getAttributes().put(NAME, serverGroupName); + serverGroupData.getAttributes().put("serverGroup", cloudrunServerGroup); + Map> serverGroupRelationships = + serverGroupData.getRelationships(); + serverGroupRelationships.put(APPLICATIONS.getNs(), Set.of(applicationKey)); + serverGroupRelationships.put(CLUSTERS.getNs(), Set.of(clusterKey)); + serverGroupRelationships.put(INSTANCES.getNs(), Set.of(instanceKey)); + serverGroupRelationships.put(LOAD_BALANCERS.getNs(), Set.of(loadBalancerKey)); + cachedServerGroups.put(serverGroupKey, serverGroupData); + + // loadbalancer data + MutableCacheData loadbalancerData; + if (cachedLoadBalancers.isEmpty() + || !cachedLoadBalancers.containsKey(loadBalancerKey)) { + loadbalancerData = new MutableCacheData(loadBalancerKey); + loadbalancerData.getAttributes().put(NAME, loadBalancerName); + loadbalancerData + .getAttributes() + .put( + "loadBalancer", + new CloudrunLoadBalancer( + loadBalancer, getAccountName(), getRegion(serverGroup))); + Set serverGroupKeySet = new HashSet<>(); + serverGroupKeySet.add(serverGroupKey); + Set instanceKeySet = new HashSet<>(); + instanceKeySet.add(instanceKey); + loadbalancerData + .getRelationships() + .put(SERVER_GROUPS.getNs(), serverGroupKeySet); + loadbalancerData.getRelationships().put(INSTANCES.getNs(), instanceKeySet); + cachedLoadBalancers.put(loadBalancerKey, loadbalancerData); + + } else { + loadbalancerData = (MutableCacheData) cachedLoadBalancers.get(loadBalancerKey); + loadbalancerData + .getRelationships() + .get(SERVER_GROUPS.getNs()) + .add(serverGroupKey); + loadbalancerData.getRelationships().get(INSTANCES.getNs()).add(instanceKey); + } + } + }); + }); + logger.info("Caching {} applications in {}", cachedApplications.size(), agentType); + logger.info("Caching {} clusters in {}", cachedClusters.size(), agentType); + logger.info("Caching {} server groups in {}", cachedServerGroups.size(), agentType); + logger.info("Caching {} load balancers in {}", cachedLoadBalancers.size(), agentType); + logger.info("Caching {} instances in {}", cachedInstances.size(), agentType); + + return new DefaultCacheResult( + new HashMap>() { + { + put(APPLICATIONS.getNs(), cachedApplications.values()); + put(CLUSTERS.getNs(), cachedClusters.values()); + put(SERVER_GROUPS.getNs(), cachedServerGroups.values()); + put(LOAD_BALANCERS.getNs(), cachedLoadBalancers.values()); + put(INSTANCES.getNs(), cachedInstances.values()); + put(ON_DEMAND.getNs(), onDemandKeep.values()); + } + }, + new HashMap>() { + { + put(ON_DEMAND.getNs(), onDemandEvict); + } + }); + } + + public Map> loadServerGroups() { + Map> serverGroupsByLoadBalancer = new HashMap<>(); + BatchRequest batch = getCredentials().getCloudRun().batch(); + String project = getCredentials().getProject(); + Optional servicesList = getServicesListRequest(project); + if (!servicesList.isPresent()) { + return serverGroupsByLoadBalancer; + } + List loadbalancers = getServicesList(project); + if (loadbalancers != null && loadbalancers.isEmpty()) { + return serverGroupsByLoadBalancer; + } + Map loadbalancerMap = new HashMap<>(); + Map> lbServerGroupMap = new HashMap<>(); + if (loadbalancers != null) { + loadbalancers.forEach(lb -> loadbalancerMap.put(lb.getMetadata().getName(), lb)); + } + + JsonBatchCallback callback = + new JsonBatchCallback<>() { + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + String errorJson = + new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(e); + logger.error(errorJson); + } + + @Override + public void onSuccess( + ListRevisionsResponse revisionsResponse, HttpHeaders responseHeaders) + throws IOException { + List revisions = revisionsResponse.getItems(); + if (revisions != null) { + revisions.forEach( + revision -> { + String serviceName = CloudrunServerGroup.getServiceName(revision); + if (lbServerGroupMap.containsKey(serviceName)) { + lbServerGroupMap.get(serviceName).add(revision); + } else { + List revisionSubList = new ArrayList<>(); + revisionSubList.add(revision); + lbServerGroupMap.put(serviceName, revisionSubList); + } + }); + } + } + }; + Optional revisionsList = getRevisionsListRequest(project); + if (revisionsList.isPresent()) { + try { + revisionsList.get().queue(batch, callback); + } catch (IOException e) { + logger.error("Error in creating request for the method revisions.list !!!"); + return serverGroupsByLoadBalancer; + } + } + try { + if (batch.size() > 0) { + batch.execute(); + } + if (!lbServerGroupMap.isEmpty()) { + lbServerGroupMap.forEach( + (svc, rList) -> { + if (loadbalancerMap.containsKey(svc)) { + serverGroupsByLoadBalancer.put(loadbalancerMap.get(svc), lbServerGroupMap.get(svc)); + } + }); + } + } catch (IOException e) { + logger.error( + "Error while fetching Cloudrun Services for the project : {}. {}", + project, + e.getMessage()); + } + + return serverGroupsByLoadBalancer; + } + + public Map loadServerGroupAndLoadBalancer(String serverGroupName) { + Map serverGroupAndLoadBalancer = new HashMap<>(); + BatchRequest batch = getCredentials().getCloudRun().batch(); + String project = getCredentials().getProject(); + List loadBalancers = getServicesList(project); + List serverGroups = getRevisionsList(project); + if (loadBalancers.isEmpty() || serverGroups.isEmpty()) { + logger.error("No Loadbalancer or server group found !!!!"); + return serverGroupAndLoadBalancer; + } + Optional serverGroup = + serverGroups.stream() + .filter(sg -> sg.getMetadata().getName().equals(serverGroupName)) + .findFirst(); + if (serverGroup.isEmpty()) { + logger.error("No server group found with name {}", serverGroupName); + return serverGroupAndLoadBalancer; + } + String loadbalancerName = CloudrunServerGroup.getServiceName(serverGroup.get()); + Optional loadBalancer = + loadBalancers.stream() + .filter(lb -> lb.getMetadata().getName().equals(loadbalancerName)) + .findFirst(); + if (loadBalancer.isEmpty()) { + logger.error( + "No CloudRun Service found with name {} for the Revision named {}", + loadbalancerName, + serverGroupName); + return serverGroupAndLoadBalancer; + } + serverGroupAndLoadBalancer.put("serverGroup", serverGroup.get()); + serverGroupAndLoadBalancer.put("loadBalancer", loadBalancer.get()); + return serverGroupAndLoadBalancer; + } + + private String getRegion(Revision revision) { + return revision.getMetadata().getLabels().get(CloudrunServerGroup.getLocationLabel()); + } + + private String getRevisionName(Revision revision) { + return revision.getMetadata().getName(); + } + + private Optional getServicesListRequest(String project) { + try { + return Optional.of( + getCredentials().getCloudRun().namespaces().services().list(PARENT_PREFIX + project)); + } catch (IOException e) { + logger.error("Error in creating request for the method services.list !!! {}", e.getMessage()); + return Optional.empty(); + } + } + + private List getServicesList(String project) { + Optional servicesListRequest = + getServicesListRequest(project); + if (servicesListRequest.isEmpty()) { + return new ArrayList<>(); + } + try { + return servicesListRequest.get().execute().getItems(); + } catch (IOException e) { + logger.error("Error executing services.list request. {}", e.getMessage()); + return new ArrayList<>(); + } + } + + private Optional getRevisionsListRequest(String project) { + try { + return Optional.of( + getCredentials().getCloudRun().namespaces().revisions().list(PARENT_PREFIX + project)); + } catch (IOException e) { + logger.error( + "Error in creating request for the method revisions.list !!! {} ", e.getMessage()); + return Optional.empty(); + } + } + + private List getRevisionsList(String project) { + Optional revisionsListRequest = + getRevisionsListRequest(project); + if (revisionsListRequest.isEmpty()) { + return new ArrayList<>(); + } + try { + return revisionsListRequest.get().execute().getItems(); + } catch (IOException e) { + logger.error("Error executing revisions.list request. {}", e.getMessage()); + return new ArrayList<>(); + } + } + + public Map loadInstances( + Map> serverGroupsByLoadBalancer) { + Map instancesByServerGroup = new HashMap<>(); + // TODO - check if loadbalancer is needed, if not change the method signature + serverGroupsByLoadBalancer.forEach( + (loadBalancer, serverGroups) -> { + serverGroups.forEach( + serverGroup -> { + String serverGroupName = serverGroup.getMetadata().getName(); + instancesByServerGroup.put(serverGroup, serverGroupName + CLOUDRUN_INSTANCE); + }); + }); + return instancesByServerGroup; + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + Collection> requests = new HashSet<>(); + Collection keys = providerCache.getIdentifiers(ON_DEMAND.getNs()); + keys = + keys.stream() + .filter( + k -> { + Map parse = Keys.parse(k); + return (parse != null && Objects.equals(parse.get("account"), getAccountName())); + }) + .collect(Collectors.toSet()); + providerCache + .getAll(ON_DEMAND.getNs(), keys) + .forEach( + cacheData -> { + Map details = Keys.parse(cacheData.getId()); + requests.add( + Map.of( + "details", details, + "moniker", convertOnDemandDetails(details), + "cacheTime", cacheData.getAttributes().get("cacheTime"), + "processedCount", cacheData.getAttributes().get("processedCount"), + "processedTime", cacheData.getAttributes().get("processedTime"))); + }); + return requests; + } + + private static T setGroovyRef(Reference ref, T newValue) { + ref.set(newValue); + return newValue; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/callbacks/CloudrunCallback.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/callbacks/CloudrunCallback.java new file mode 100644 index 00000000000..3d5d6efbb6f --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/callbacks/CloudrunCallback.java @@ -0,0 +1,57 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.provider.callbacks; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import groovy.lang.Closure; +import java.io.IOException; +import lombok.extern.slf4j.Slf4j; +import org.codehaus.groovy.runtime.DefaultGroovyMethods; + +@Slf4j +public class CloudrunCallback extends JsonBatchCallback { + public CloudrunCallback success(Closure successCb) { + this.successCb = successCb; + return this; + } + + public CloudrunCallback failure(Closure failureCb) { + this.failureCb = failureCb; + return this; + } + + @Override + public void onSuccess(T response, HttpHeaders httpHeaders) throws IOException { + getSuccessCb(); + } + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders httpHeaders) throws IOException { + if (DefaultGroovyMethods.asBoolean(failureCb)) { + getFailureCb(); + } else { + String errorJson = new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(e); + log.error(errorJson); + } + } + + public Closure getSuccessCb() { + return successCb; + } + + public void setSuccessCb(Closure successCb) { + this.successCb = successCb; + } + + public Closure getFailureCb() { + return failureCb; + } + + public void setFailureCb(Closure failureCb) { + this.failureCb = failureCb; + } + + private Closure successCb; + private Closure failureCb; +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunApplicationProvider.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunApplicationProvider.java new file mode 100644 index 00000000000..26747d417f8 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunApplicationProvider.java @@ -0,0 +1,96 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunApplication; +import com.netflix.spinnaker.clouddriver.model.ApplicationProvider; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class CloudrunApplicationProvider implements ApplicationProvider { + @Autowired private Cache cacheView; + @Autowired private ObjectMapper objectMapper; + + @Override + public Set getApplications(boolean expand) { + RelationshipCacheFilter filter = + expand + ? RelationshipCacheFilter.include(Keys.Namespace.CLUSTERS.getNs()) + : RelationshipCacheFilter.none(); + return cacheView + .getAll( + Keys.Namespace.APPLICATIONS.getNs(), + cacheView.filterIdentifiers( + Keys.Namespace.APPLICATIONS.getNs(), CloudrunCloudProvider.ID + ":*"), + filter) + .stream() + .map(this::applicationFromCacheData) + .collect(Collectors.toSet()); + } + + @Override + public CloudrunApplication getApplication(String name) { + CacheData cacheData = + cacheView.get( + Keys.Namespace.APPLICATIONS.getNs(), + Keys.getApplicationKey(name), + RelationshipCacheFilter.include(Keys.Namespace.CLUSTERS.getNs())); + + if (cacheData == null) { + return null; + } + return applicationFromCacheData(cacheData); + } + + public CloudrunApplication applicationFromCacheData(CacheData cacheData) { + CloudrunApplication application = + objectMapper.convertValue(cacheData.getAttributes(), CloudrunApplication.class); + + if (cacheData.getRelationships().get(Keys.Namespace.CLUSTERS.getNs()) != null) { + cacheData + .getRelationships() + .get(Keys.Namespace.CLUSTERS.getNs()) + .forEach( + clusterKey -> { + if (application.getClusterNames().get(Keys.parse(clusterKey).get("account")) + != null) { + application + .getClusterNames() + .get(Keys.parse(clusterKey).get("account")) + .add(Keys.parse(clusterKey).get("name")); + } else { + Set clusterKeySet = new HashSet<>(); + clusterKeySet.add(Keys.parse(clusterKey).get("name")); + application + .getClusterNames() + .put(Keys.parse(clusterKey).get("account"), clusterKeySet); + } + }); + } + return application; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunClusterProvider.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunClusterProvider.java new file mode 100644 index 00000000000..5c0a336a672 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunClusterProvider.java @@ -0,0 +1,365 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.provider.view; + +import static com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys.Namespace.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudrun.model.*; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import java.util.*; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import org.springframework.util.CollectionUtils; + +@Component +public class CloudrunClusterProvider implements ClusterProvider { + + @Autowired private Cache cacheView; + + @Autowired private ObjectMapper objectMapper; + + @Autowired private CloudrunApplicationProvider cloudrunApplicationProvider; + + @Autowired CloudrunLoadBalancerProvider provider; + + @Override + public Set getClusters(String applicationName, final String account) { + CacheData application = + cacheView.get( + APPLICATIONS.getNs(), + Keys.getApplicationKey(applicationName), + RelationshipCacheFilter.include(CLUSTERS.getNs())); + + if (application == null) { + return new HashSet(); + } + Collection clusterKeys = + application.getRelationships().get(CLUSTERS.getNs()).stream() + .filter(s -> Objects.requireNonNull(Keys.parse(s)).get("account").equals(account)) + .collect(Collectors.toList()); + Collection clusterData = cacheView.getAll(CLUSTERS.getNs(), clusterKeys); + return (Set) translateClusters(clusterData, true); + } + + @Override + public Map> getClusters() { + Collection clusterData = cacheView.getAll(CLUSTERS.getNs()); + Map> mapClusterList = + translateClusters(clusterData, true).stream() + .collect(Collectors.groupingBy(CloudrunCluster::getName)); + Map> mapClusterSet = new HashMap<>(); + mapClusterList.forEach((k, v) -> mapClusterSet.put(k, new HashSet<>(v))); + return mapClusterSet; + } + + @Override + public CloudrunCluster getCluster( + String application, String account, String name, boolean includeDetails) { + if (cacheView.get(CLUSTERS.getNs(), Keys.getClusterKey(account, application, name)) != null) { + List clusterData = + List.of(cacheView.get(CLUSTERS.getNs(), Keys.getClusterKey(account, application, name))); + if (clusterData != null) { + Optional cluster = + translateClusters(clusterData, includeDetails).stream().findFirst(); + if (cluster.isPresent()) { + return cluster.get(); + } + } + } + return null; + } + + @Override + public CloudrunCluster getCluster(String applicationName, String account, String clusterName) { + return getCluster(applicationName, account, clusterName, true); + } + + @Override + public CloudrunServerGroup getServerGroup( + String account, String region, String serverGroupName, boolean includeDetails) { + String serverGroupKey = Keys.getServerGroupKey(account, serverGroupName, region); + CacheData serverGroupData = cacheView.get(SERVER_GROUPS.getNs(), serverGroupKey); + if (serverGroupData == null) { + return null; + } + Set instances = + cacheView + .getAll(INSTANCES.getNs(), serverGroupData.getRelationships().get(INSTANCES.getNs())) + .stream() + .map(s -> CloudrunProviderUtils.instanceFromCacheData(objectMapper, s)) + .collect(Collectors.toSet()); + return CloudrunProviderUtils.serverGroupFromCacheData(objectMapper, serverGroupData, instances); + } + + @Override + public CloudrunServerGroup getServerGroup(String account, String region, String serverGroupName) { + return getServerGroup(account, region, serverGroupName, true); + } + + @Override + public Map> getClusterSummaries(String applicationName) { + Map> mapClusterList = + translateClusters(getClusterData(applicationName), false).stream() + .collect(Collectors.groupingBy(CloudrunCluster::getName)); + Map> mapClusterSet = new HashMap<>(); + mapClusterList.forEach((k, v) -> mapClusterSet.put(k, new HashSet<>(v))); + return mapClusterSet; + } + + @Override + public Map> getClusterDetails(String applicationName) { + Map> mapClusterList = + translateClusters(getClusterData(applicationName), true).stream() + .collect(Collectors.groupingBy(CloudrunCluster::getName)); + Map> mapClusterSet = new HashMap<>(); + mapClusterList.forEach((k, v) -> mapClusterSet.put(k, new HashSet<>(v))); + return mapClusterSet; + } + + public Set getClusterData(final String applicationName) { + CloudrunApplication application = cloudrunApplicationProvider.getApplication(applicationName); + List clusterKeys = new ArrayList<>(); + // TODO - handle null + assert application != null; + if (application != null && application.getClusterNames() != null) { + application + .getClusterNames() + .forEach( + (accountName, clusterNames) -> + clusterKeys.addAll( + clusterNames.stream() + .map( + clusterName -> + Keys.getClusterKey(accountName, applicationName, clusterName)) + .collect(Collectors.toSet()))); + } + Collection data = + cacheView.getAll( + CLUSTERS.getNs(), + clusterKeys, + RelationshipCacheFilter.include(SERVER_GROUPS.getNs(), LOAD_BALANCERS.getNs())); + + if (CollectionUtils.isEmpty(data)) { + return Collections.emptySet(); + } + return data.stream().filter(it -> it != null).collect(Collectors.toSet()); + } + + @Override + public String getCloudProviderId() { + return CloudrunCloudProvider.ID; + } + + @Override + public boolean supportsMinimalClusters() { + return false; + } + + public Set translateClusters( + Collection clusterData, boolean includeDetails) { + if (clusterData == null) { + return new HashSet<>(); + } + + Map loadBalancers = + includeDetails + ? translateLoadBalancers( + CloudrunProviderUtils.resolveRelationshipDataForCollection( + cacheView, clusterData, LOAD_BALANCERS.getNs())) + : null; + + Map> serverGroups = + includeDetails + ? translateServerGroups( + CloudrunProviderUtils.resolveRelationshipDataForCollection( + cacheView, + clusterData, + SERVER_GROUPS.getNs(), + RelationshipCacheFilter.include(INSTANCES.getNs(), LOAD_BALANCERS.getNs()))) + : null; + + Set clusters = new HashSet<>(); + for (CacheData clusterDataEntry : clusterData) { + Map clusterKey = Keys.parse(clusterDataEntry.getId()); + assert clusterKey != null; + CloudrunCluster cluster = + new CloudrunCluster() + .setAccountName(clusterKey.get("account")) + .setName(clusterKey.get("name")); + + if (includeDetails) { + cluster.setLoadBalancers( + clusterDataEntry.getRelationships().get(LOAD_BALANCERS.getNs()).stream() + .map(loadBalancers::get) + .collect(Collectors.toSet())); + + cluster.setServerGroups( + serverGroups.get(cluster.getName()).stream() + .filter(it -> it.getAccount().equals(cluster.getAccountName())) + .collect(Collectors.toSet())); + } else { + clusterDataEntry + .getRelationships() + .get(LOAD_BALANCERS.getNs()) + .forEach( + loadBalancerKey -> { + Map parts = Keys.parse(loadBalancerKey); + assert parts != null; + cluster + .getLoadBalancers() + .add( + new CloudrunLoadBalancer() + .setName(parts.get("name")) + .setAccount(parts.get("account"))); + }); + + clusterDataEntry + .getRelationships() + .get(SERVER_GROUPS.getNs()) + .forEach( + serverGroupKey -> { + Map parts = Keys.parse(serverGroupKey); + assert parts != null; + cluster + .getServerGroups() + .add( + new CloudrunServerGroup() + .setName(parts.get("name")) + .setAccount(parts.get("account")) + .setRegion(parts.get("region"))); + }); + } + clusters.add(cluster); + } + addLatestRevisionToServerGroup(clusters); + return clusters; + } + + private void addLatestRevisionToServerGroup(Set clusters) { + clusters.forEach( + cluster -> { + Set serverGroups = cluster.getServerGroups(); + serverGroups.forEach( + serverGroup -> { + serverGroup + .getLoadBalancers() + .forEach( + name -> { + CloudrunLoadBalancer loadBalancer = + provider.getLoadBalancer(serverGroup.getAccount(), name); + Map tags = new HashMap<>(); + tags.put("latestRevision", loadBalancer.getLatestReadyRevisionName()); + if (serverGroup + .getName() + .equals(loadBalancer.getLatestReadyRevisionName())) { + tags.put("isLatest", true); + } else { + tags.put("isLatest", false); + } + serverGroup.setTags(tags); + }); + }); + }); + } + + public Map> translateServerGroups( + Collection serverGroupData) { + Map> instanceCacheDataMap = + CloudrunProviderUtils.preserveRelationshipDataForCollection( + cacheView, serverGroupData, INSTANCES.getNs(), RelationshipCacheFilter.none()); + Map> instances = new HashMap<>(); + instanceCacheDataMap.forEach( + (k, v) -> { + instances.put( + k, + v.stream() + .map(c -> CloudrunProviderUtils.instanceFromCacheData(objectMapper, c)) + .collect(Collectors.toSet())); + }); + + Map> acc = new HashMap<>(); + serverGroupData.forEach( + cacheData -> { + CloudrunServerGroup serverGroup = + CloudrunProviderUtils.serverGroupFromCacheData( + objectMapper, cacheData, instances.get(cacheData.getId())); + String clusterName = Names.parseName(serverGroup.getName()).getCluster(); + if (acc.isEmpty() || !acc.containsKey(clusterName)) { + acc.put( + clusterName, + new HashSet<>() { + { + add(serverGroup); + } + }); + } else { + acc.get(clusterName).add(serverGroup); + } + }); + return acc; + } + + public static Map translateLoadBalancers( + Collection loadBalancerData) { + Map result = new HashMap<>(); + loadBalancerData.forEach( + loadBalancerEntry -> { + Map parts = Keys.parse(loadBalancerEntry.getId()); + // TODO - handle nulls + assert parts != null; + result.put( + loadBalancerEntry.getId(), + new CloudrunLoadBalancer() + .setName(parts.get("name")) + .setAccount(parts.get("account"))); + }); + return result; + } + + public Cache getCacheView() { + return cacheView; + } + + public void setCacheView(Cache cacheView) { + this.cacheView = cacheView; + } + + public ObjectMapper getObjectMapper() { + return objectMapper; + } + + public void setObjectMapper(ObjectMapper objectMapper) { + this.objectMapper = objectMapper; + } + + public CloudrunApplicationProvider getCloudrunApplicationProvider() { + return cloudrunApplicationProvider; + } + + public void setCloudrunApplicationProvider( + CloudrunApplicationProvider CloudrunApplicationProvider) { + this.cloudrunApplicationProvider = CloudrunApplicationProvider; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunInstanceProvider.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunInstanceProvider.java new file mode 100644 index 00000000000..8ffc5da79c3 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunInstanceProvider.java @@ -0,0 +1,66 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.provider.view; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunInstance; +import com.netflix.spinnaker.clouddriver.model.InstanceProvider; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class CloudrunInstanceProvider implements InstanceProvider { + @Autowired Cache cacheView; + + @Autowired ObjectMapper objectMapper; + + @Override + public String getCloudProvider() { + return CloudrunCloudProvider.ID; + } + + @Override + public CloudrunInstance getInstance(String account, String region, String instanceName) { + String instanceKey = Keys.getInstanceKey(account, instanceName); + CacheData instanceData = + cacheView.get( + INSTANCES.getNs(), + instanceKey, + RelationshipCacheFilter.include(LOAD_BALANCERS.getNs(), SERVER_GROUPS.getNs())); + if (instanceData == null) { + return null; + } + return getInstanceFromCacheData(instanceData); + } + + private CloudrunInstance getInstanceFromCacheData(CacheData cacheData) { + return objectMapper.convertValue( + cacheData.getAttributes().get("instance"), CloudrunInstance.class); + } + + @Override + public String getConsoleOutput(String account, String region, String id) { + return null; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunLoadBalancerProvider.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunLoadBalancerProvider.java new file mode 100644 index 00000000000..d3578851316 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunLoadBalancerProvider.java @@ -0,0 +1,111 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.provider.view; + +import static com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys.Namespace.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.cache.Keys; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunInstance; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunServerGroup; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import java.util.*; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class CloudrunLoadBalancerProvider implements LoadBalancerProvider { + + private final String cloudProvider = CloudrunCloudProvider.ID; + @Autowired private Cache cacheView; + @Autowired private ObjectMapper objectMapper; + + @Override + public List list() { + return null; + } + + @Override + public Item get(String name) { + return null; + } + + @Override + public List

byAccountAndRegionAndName(String account, String region, String name) { + return null; + } + + @Override + public Set getApplicationLoadBalancers(String applicationName) { + String applicationKey = Keys.getApplicationKey(applicationName); + CacheData application = cacheView.get(Keys.Namespace.APPLICATIONS.getNs(), applicationKey); + + Collection applicationLoadBalancers = + CloudrunProviderUtils.resolveRelationshipData( + cacheView, application, Keys.Namespace.LOAD_BALANCERS.getNs()); + return translateLoadBalancers(applicationLoadBalancers); + } + + public Set translateLoadBalancers(Collection cacheData) { + Set loadBalancers = new HashSet<>(); + cacheData.forEach( + loadBalancerData -> { + Set serverGroups = new HashSet<>(); + CloudrunProviderUtils.resolveRelationshipData( + cacheView, loadBalancerData, SERVER_GROUPS.getNs()) + .forEach( + serverGroupRelationshipData -> { + Set instances = + CloudrunProviderUtils.resolveRelationshipData( + cacheView, serverGroupRelationshipData, INSTANCES.getNs()) + .stream() + .map( + instanceRelationshipDate -> + CloudrunProviderUtils.instanceFromCacheData( + objectMapper, instanceRelationshipDate)) + .collect(Collectors.toSet()); + serverGroups.add( + CloudrunProviderUtils.serverGroupFromCacheData( + objectMapper, serverGroupRelationshipData, instances)); + }); + CloudrunLoadBalancer loadBalancer = + CloudrunProviderUtils.loadBalancerFromCacheData( + objectMapper, loadBalancerData, serverGroups); + loadBalancers.add(loadBalancer); + }); + return loadBalancers; + } + + public CloudrunLoadBalancer getLoadBalancer(String account, String loadBalancerName) { + String loadBalancerKey = Keys.getLoadBalancerKey(account, loadBalancerName); + CacheData loadBalancerData = cacheView.get(LOAD_BALANCERS.getNs(), loadBalancerKey); + if (loadBalancerData == null) { + return null; + } + Set loadBalancers = translateLoadBalancers(Set.of(loadBalancerData)); + return loadBalancers.isEmpty() ? null : loadBalancers.stream().findFirst().get(); + } + + public final String getCloudProvider() { + return cloudProvider; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunProviderUtils.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunProviderUtils.java new file mode 100644 index 00000000000..efb3efbad4c --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/CloudrunProviderUtils.java @@ -0,0 +1,118 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.CacheFilter; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunInstance; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunServerGroup; +import java.util.*; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class CloudrunProviderUtils { + public static CloudrunServerGroup serverGroupFromCacheData( + ObjectMapper objectMapper, CacheData cacheData, Set instances) { + CloudrunServerGroup serverGroup = + objectMapper.convertValue( + cacheData.getAttributes().get("serverGroup"), CloudrunServerGroup.class); + serverGroup.setInstances(instances); + return serverGroup; + } + + public static CloudrunInstance instanceFromCacheData( + ObjectMapper objectMapper, CacheData instanceData) { + if (instanceData == null) { + return null; + } else { + return objectMapper.convertValue( + instanceData.getAttributes().get("instance"), CloudrunInstance.class); + } + } + + public static CloudrunLoadBalancer loadBalancerFromCacheData( + ObjectMapper objectMapper, + CacheData loadBalancerData, + Set serverGroups) { + CloudrunLoadBalancer loadBalancer = + objectMapper.convertValue( + loadBalancerData.getAttributes().get("loadBalancer"), CloudrunLoadBalancer.class); + loadBalancer.setLoadBalancerServerGroups(serverGroups); + return loadBalancer; + } + + public static Collection resolveRelationshipData( + Cache cacheView, CacheData source, String relationship) { + + // TODO - handle null source + assert source != null; + return cacheView.getAll( + relationship, + (source != null && source.getRelationships() != null) + ? source.getRelationships().get(relationship) + : new ArrayList()); + } + + public static Collection resolveRelationshipDataForCollection( + Cache cacheView, + Collection sources, + String relationship, + CacheFilter cacheFilter) { + List relationships = + sources.stream() + .map(t -> t.getRelationships().get(relationship)) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + return cacheView.getAll(relationship, relationships, cacheFilter); + } + + public static Collection resolveRelationshipDataForCollection( + Cache cacheView, Collection sources, String relationship) { + return CloudrunProviderUtils.resolveRelationshipDataForCollection( + cacheView, sources, relationship, null); + } + + public static Map> preserveRelationshipDataForCollection( + Cache cacheView, + Collection sources, + String relationship, + CacheFilter cacheFilter) { + Collection collection = + resolveRelationshipDataForCollection(cacheView, sources, relationship, cacheFilter); + Map allData = new HashMap<>(); + collection.forEach(v -> allData.put(v.getId(), v)); + Map> result = new HashMap<>(); + sources.forEach( + source -> + result.put( + source.getId(), + source.getRelationships().get(relationship).stream() + .map(t -> allData.get(t)) + .collect(Collectors.toList()))); + return result; + } + + public static Map> preserveRelationshipDataForCollection( + Cache cacheView, Collection sources, String relationship) { + return CloudrunProviderUtils.preserveRelationshipDataForCollection( + cacheView, sources, relationship, null); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/MutableCacheData.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/MutableCacheData.java new file mode 100644 index 00000000000..bbe542b0288 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/provider/view/MutableCacheData.java @@ -0,0 +1,52 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.provider.view; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.cats.cache.CacheData; +import java.util.*; +import lombok.EqualsAndHashCode; +import lombok.Getter; + +@Getter +@EqualsAndHashCode +public class MutableCacheData implements CacheData { + final String id; + final Map> relationships = new HashMap<>(); + final Map attributes = new HashMap<>(); + final int ttlSeconds = -1; + + public MutableCacheData(String id) { + this.id = id; + } + + @JsonCreator + public MutableCacheData( + @JsonProperty("id") String id, + @JsonProperty("attributes") Map attributes, + @JsonProperty("relationships") Map> relationships) { + this(id); + this.attributes.putAll(attributes); + this.relationships.putAll(relationships); + } + + public static Map mutableCacheMap() { + Map cacheMap = new LinkedHashMap(); + return cacheMap; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunCredentials.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunCredentials.java new file mode 100644 index 00000000000..4854ef221b2 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunCredentials.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.security; + +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.HttpTransport; +import com.google.api.client.json.JsonFactory; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.services.run.v1.CloudRun; +import com.google.auth.http.HttpCredentialsAdapter; +import com.google.auth.oauth2.GoogleCredentials; +import com.netflix.spinnaker.clouddriver.googlecommon.security.GoogleCommonCredentials; + +public class CloudrunCredentials extends GoogleCommonCredentials { + + private final String project; + + public CloudrunCredentials(String project) { + this.project = project; + } + + public CloudRun getCloudrun(String applicationName) { + HttpTransport httpTransport = buildHttpTransport(); + JsonFactory jsonFactory = GsonFactory.getDefaultInstance(); + GoogleCredentials credentials = + getCredentials().createScoped("https://www.googleapis.com/auth/cloud-platform"); + HttpRequestInitializer requestInitializer = new HttpCredentialsAdapter(credentials); + return new CloudRun.Builder(httpTransport, jsonFactory, requestInitializer) + .setApplicationName(applicationName) + .build(); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunCredentialsLifecycleHandler.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunCredentialsLifecycleHandler.java new file mode 100644 index 00000000000..362620688ba --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunCredentialsLifecycleHandler.java @@ -0,0 +1,60 @@ +/* + * Copyright 2022 OpsMx + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.security; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.CloudrunProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.agent.CloudrunServerGroupCachingAgent; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import java.util.Collections; +import java.util.List; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; + +@Component +@RequiredArgsConstructor +public class CloudrunCredentialsLifecycleHandler + implements CredentialsLifecycleHandler { + + private final CloudrunProvider cloudrunCloudProvider; + private final ObjectMapper objectMapper; + private final Registry registry; + + @Override + public void credentialsAdded(CloudrunNamedAccountCredentials credentials) { + addAgentFor(credentials); + } + + @Override + public void credentialsUpdated(CloudrunNamedAccountCredentials credentials) { + cloudrunCloudProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + addAgentFor(credentials); + } + + @Override + public void credentialsDeleted(CloudrunNamedAccountCredentials credentials) { + cloudrunCloudProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + } + + private void addAgentFor(CloudrunNamedAccountCredentials credentials) { + cloudrunCloudProvider.addAgents( + List.of( + new CloudrunServerGroupCachingAgent( + credentials.getName(), credentials, objectMapper, registry))); + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunJsonCredentials.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunJsonCredentials.java new file mode 100644 index 00000000000..add5f8d39d4 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunJsonCredentials.java @@ -0,0 +1,40 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.security; + +import com.google.api.services.run.v1.CloudRunScopes; +import com.google.auth.oauth2.GoogleCredentials; +import com.netflix.spinnaker.clouddriver.googlecommon.security.GoogleCommonCredentialUtils; + +public class CloudrunJsonCredentials extends CloudrunCredentials { + + private final String jsonKey; + + public CloudrunJsonCredentials(String project, String jsonKey) { + super(project); + this.jsonKey = jsonKey; + } + + @Override + public GoogleCredentials getCredentials() { + return GoogleCommonCredentialUtils.getCredentials(jsonKey, CloudRunScopes.CLOUD_PLATFORM); + } + + public final String getJsonKey() { + return jsonKey; + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunNamedAccountCredentials.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunNamedAccountCredentials.java new file mode 100644 index 00000000000..b1c11f71f5b --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/clouddriver/cloudrun/security/CloudrunNamedAccountCredentials.java @@ -0,0 +1,188 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cloudrun.security; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.api.services.run.v1.CloudRun; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class CloudrunNamedAccountCredentials + extends AbstractAccountCredentials { + public static final String CREDENTIALS_TYPE = "cloudrun"; + private final String name; + private final String environment; + private final String accountType; + private final String project; + private final String cloudProvider; + private final List requiredGroupMembership; + private final Permissions permissions; + + @JsonIgnore private final String jsonPath; + + @JsonIgnore private final String serviceAccountEmail; + + @JsonIgnore private final String localRepositoryDirectory; + + private final CloudrunCredentials credentials; + + private final String applicationName; + private final Long cachingIntervalSeconds; + private final CloudRun cloudRun; + + @Data + public static class Builder { + String name; + String environment; + String accountType; + String project; + String cloudProvider; + List requiredGroupMembership; + Permissions permissions = Permissions.EMPTY; + CloudrunCredentials credentials; + String jsonKey; + String jsonPath; + String serviceAccountEmail; + String localRepositoryDirectory; + String applicationName; + boolean sshTrustUnknownHosts; + CloudRun cloudRun; + Long cachingIntervalSeconds; + + Boolean liveLookupsEnabled = true; + + Builder name(String name) { + this.name = name; + return this; + } + + Builder environment(String environment) { + this.environment = environment; + return this; + } + + Builder accountType(String accountType) { + this.accountType = accountType; + return this; + } + + Builder project(String project) { + this.project = project; + return this; + } + + Builder cloudProvider(String cloudProvider) { + this.cloudProvider = CloudrunCloudProvider.ID; + return this; + } + + Builder serviceAccountEmail(String serviceAccountEmail) { + this.serviceAccountEmail = serviceAccountEmail; + return this; + } + + Builder localRepositoryDirectory(String localRepositoryDirectory) { + this.localRepositoryDirectory = localRepositoryDirectory; + return this; + } + + Builder requiredGroupMembership(List requiredGroupMembership) { + this.requiredGroupMembership = requiredGroupMembership; + return this; + } + + Builder permissions(Permissions permissions) { + if (permissions.isRestricted()) { + this.requiredGroupMembership = new ArrayList<>(); + this.permissions = permissions; + } + return this; + } + + Builder jsonPath(String jsonPath) { + this.jsonPath = jsonPath; + return this; + } + + Builder jsonKey(String jsonKey) { + this.jsonKey = jsonKey; + return this; + } + + Builder applicationName(String applicationName) { + this.applicationName = applicationName; + return this; + } + + Builder credentials(CloudrunCredentials credentials) { + this.credentials = credentials; + return this; + } + + Builder cachingIntervalSeconds(CloudrunCredentials credentials) { + this.cachingIntervalSeconds = cachingIntervalSeconds; + return this; + } + + Builder sshTrustUnknownHosts(boolean sshTrustUnknownHosts) { + this.sshTrustUnknownHosts = sshTrustUnknownHosts; + return this; + } + + Builder cloudRun(CloudRun cloudRun) { + this.cloudRun = cloudRun; + return this; + } + + public CloudrunNamedAccountCredentials build(CloudrunJobExecutor jobExecutor) { + + if (credentials != null) { + credentials = credentials; + } else if (jsonKey != null) { + credentials = new CloudrunJsonCredentials(project, jsonKey); + } else { + credentials = new CloudrunCredentials(project); + } + + cloudRun = credentials.getCloudrun(applicationName); + // To do code + return new CloudrunNamedAccountCredentials( + name, + environment, + accountType, + project, + CloudrunCloudProvider.ID, + requiredGroupMembership, + permissions, + jsonPath, + serviceAccountEmail, + localRepositoryDirectory, + credentials, + applicationName, + cachingIntervalSeconds, + cloudRun); + } + } +} diff --git a/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/config/CloudrunConfiguration.java b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/config/CloudrunConfiguration.java new file mode 100644 index 00000000000..a0c6cb139d4 --- /dev/null +++ b/clouddriver-cloudrun/src/main/java/com/netflix/spinnaker/config/CloudrunConfiguration.java @@ -0,0 +1,69 @@ +/* + * Copyright 2022 OpsMx Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunCloudProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.config.CloudrunConfigurationProperties; +import com.netflix.spinnaker.clouddriver.cloudrun.config.CloudrunCredentialsConfiguration; +import com.netflix.spinnaker.clouddriver.cloudrun.health.CloudrunHealthIndicator; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.CloudrunProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Import; +import org.springframework.scheduling.annotation.EnableScheduling; + +@Configuration +@EnableConfigurationProperties +@EnableScheduling +@ConditionalOnProperty("cloudrun.enabled") +@ComponentScan("com.netflix.spinnaker.clouddriver.cloudrun") +@Import(CloudrunCredentialsConfiguration.class) +public class CloudrunConfiguration { + @Bean + @ConfigurationProperties("cloudrun") + public CloudrunConfigurationProperties cloudrunConfigurationProperties() { + return new CloudrunConfigurationProperties(); + } + + @Bean + public CloudrunHealthIndicator cloudrunHealthIndicator() { + return new CloudrunHealthIndicator(); + } + + @Bean + public CloudrunProvider cloudrunProvider(CloudrunCloudProvider cloudProvider) { + return new CloudrunProvider(cloudProvider); + } + + @Bean + @ConditionalOnMissingBean( + value = CloudrunNamedAccountCredentials.class, + parameterizedContainer = CredentialsRepository.class) + public CredentialsRepository credentialsRepository( + CredentialsLifecycleHandler eventHandler) { + return new MapBackedCredentialsRepository<>(CloudrunCloudProvider.ID, eventHandler); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/config/AccountDefinitionTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/config/AccountDefinitionTest.java new file mode 100644 index 00000000000..24542d12869 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/config/AccountDefinitionTest.java @@ -0,0 +1,25 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.config; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Test; + +public class AccountDefinitionTest { + @Test + public void testCredentialsEquality() { + CloudrunConfigurationProperties.ManagedAccount account1 = + new CloudrunConfigurationProperties.ManagedAccount() + .setServiceAccountEmail("email@example.com"); + account1.setName("cloudrun-1"); + CloudrunConfigurationProperties.ManagedAccount account2 = + new CloudrunConfigurationProperties.ManagedAccount() + .setServiceAccountEmail("email@example.com"); + account2.setName("cloudrun-2"); + + assertThat(account1).isNotEqualTo(account2); + + // Check name is part of the comparison + account2.setName("cloudrun-1"); + assertThat(account1).isEqualTo(account1); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/CloudrunDeployManifestDescriptionTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/CloudrunDeployManifestDescriptionTest.java new file mode 100644 index 00000000000..d7ea89e2694 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/CloudrunDeployManifestDescriptionTest.java @@ -0,0 +1,63 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.cloudrun.converter.manifest.CloudrunDeployManifestConverter; +import com.netflix.spinnaker.clouddriver.cloudrun.description.manifest.CloudrunDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.op.manifest.CloudrunDeployManifestOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class CloudrunDeployManifestDescriptionTest { + + CloudrunDeployManifestConverter converter; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + Map input = + new HashMap<>() { + { + put("accountName", "cloudrunaccount"); + } + }; + + @BeforeEach + public void init() { + converter = new CloudrunDeployManifestConverter(); + credentialsRepository = mock(CredentialsRepository.class); + converter.setCredentialsRepository(credentialsRepository); + converter.setObjectMapper(new ObjectMapper()); + mockCredentials = mock(CloudrunNamedAccountCredentials.class); + } + + @Test + public void convertOperationTest() { + + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + assertTrue(converter.convertOperation(input) instanceof CloudrunDeployManifestOperation); + } + + @Test + public void convertDescriptionTest() { + + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + assertTrue(converter.convertDescription(input) instanceof CloudrunDeployManifestDescription); + } + + @Test + public void checkApplicationNameTest() { + + Map appMap = new HashMap<>(); + appMap.put("app", "foo"); + input.put("moniker", appMap); + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + CloudrunDeployManifestDescription desc = converter.convertDescription(input); + assertThat(desc.getApplication()).isEqualTo("foo"); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeleteCloudrunLoadBalancerAtomicOperationConverterTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeleteCloudrunLoadBalancerAtomicOperationConverterTest.java new file mode 100644 index 00000000000..b38ce78dec1 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeleteCloudrunLoadBalancerAtomicOperationConverterTest.java @@ -0,0 +1,46 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.DeleteCloudrunLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DeleteCloudrunLoadBalancerAtomicOperationConverterTest { + + DeleteCloudrunLoadBalancerAtomicOperationConverter + deleteCloudrunLoadBalancerAtomicOperationConverter; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + Map input = + new HashMap<>() { + { + put("accountName", "cloudrunaccount"); + } + }; + + @BeforeEach + public void init() { + deleteCloudrunLoadBalancerAtomicOperationConverter = + new DeleteCloudrunLoadBalancerAtomicOperationConverter(); + credentialsRepository = mock(CredentialsRepository.class); + deleteCloudrunLoadBalancerAtomicOperationConverter.setCredentialsRepository( + credentialsRepository); + mockCredentials = mock(CloudrunNamedAccountCredentials.class); + } + + @Test + public void ConvertOperationTest() { + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + assertTrue( + deleteCloudrunLoadBalancerAtomicOperationConverter.convertOperation(input) + instanceof DeleteCloudrunLoadBalancerAtomicOperation); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeployCloudrunAtomicOperationConverterTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeployCloudrunAtomicOperationConverterTest.java new file mode 100644 index 00000000000..b152cecd764 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DeployCloudrunAtomicOperationConverterTest.java @@ -0,0 +1,43 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.DeployCloudrunAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DeployCloudrunAtomicOperationConverterTest { + DeployCloudrunAtomicOperationConverter deployCloudrunAtomicOperationConverter; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + Map input = + new HashMap<>() { + { + put("accountName", "cloudrunaccount"); + } + }; + + @BeforeEach + public void init() { + deployCloudrunAtomicOperationConverter = new DeployCloudrunAtomicOperationConverter(); + credentialsRepository = mock(CredentialsRepository.class); + deployCloudrunAtomicOperationConverter.setCredentialsRepository(credentialsRepository); + deployCloudrunAtomicOperationConverter.setObjectMapper(new ObjectMapper()); + mockCredentials = mock(CloudrunNamedAccountCredentials.class); + } + + @Test + public void ConvertOperationTest() { + + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + assertTrue( + deployCloudrunAtomicOperationConverter.convertOperation(input) + instanceof DeployCloudrunAtomicOperation); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DestroyCloudrunAtomicOperationConverterTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DestroyCloudrunAtomicOperationConverterTest.java new file mode 100644 index 00000000000..23c3a190034 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DestroyCloudrunAtomicOperationConverterTest.java @@ -0,0 +1,43 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.DestroyCloudrunAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DestroyCloudrunAtomicOperationConverterTest { + DestroyCloudrunAtomicOperationConverter destroyCloudrunAtomicOperationConverter; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + Map input = + new HashMap<>() { + { + put("accountName", "cloudrunaccount"); + } + }; + + @BeforeEach + public void init() { + destroyCloudrunAtomicOperationConverter = new DestroyCloudrunAtomicOperationConverter(); + credentialsRepository = mock(CredentialsRepository.class); + destroyCloudrunAtomicOperationConverter.setCredentialsRepository(credentialsRepository); + mockCredentials = mock(CloudrunNamedAccountCredentials.class); + } + + @Test + public void ConvertOperationTest() { + + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + assertTrue( + destroyCloudrunAtomicOperationConverter.convertOperation(input) + instanceof DestroyCloudrunAtomicOperation); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DisableCloudrunAtomicOperationConverterTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DisableCloudrunAtomicOperationConverterTest.java new file mode 100644 index 00000000000..1709ad0b21a --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/DisableCloudrunAtomicOperationConverterTest.java @@ -0,0 +1,43 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.DisableCloudrunAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DisableCloudrunAtomicOperationConverterTest { + DisableCloudrunAtomicOperationConverter disableCloudrunAtomicOperationConverter; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + Map input = + new HashMap<>() { + { + put("accountName", "cloudrunaccount"); + } + }; + + @BeforeEach + public void init() { + disableCloudrunAtomicOperationConverter = new DisableCloudrunAtomicOperationConverter(); + credentialsRepository = mock(CredentialsRepository.class); + disableCloudrunAtomicOperationConverter.setCredentialsRepository(credentialsRepository); + mockCredentials = mock(CloudrunNamedAccountCredentials.class); + } + + @Test + public void ConvertOperationTest() { + + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + assertTrue( + disableCloudrunAtomicOperationConverter.convertOperation(input) + instanceof DisableCloudrunAtomicOperation); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/EnableCloudrunAtomicOperationConverterTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/EnableCloudrunAtomicOperationConverterTest.java new file mode 100644 index 00000000000..a479184fc49 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/EnableCloudrunAtomicOperationConverterTest.java @@ -0,0 +1,43 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.EnableCloudrunAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class EnableCloudrunAtomicOperationConverterTest { + EnableCloudrunAtomicOperationConverter enableCloudrunAtomicOperationConverter; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + Map input = + new HashMap<>() { + { + put("accountName", "cloudrunaccount"); + } + }; + + @BeforeEach + public void init() { + enableCloudrunAtomicOperationConverter = new EnableCloudrunAtomicOperationConverter(); + credentialsRepository = mock(CredentialsRepository.class); + enableCloudrunAtomicOperationConverter.setCredentialsRepository(credentialsRepository); + mockCredentials = mock(CloudrunNamedAccountCredentials.class); + } + + @Test + public void ConvertOperationTest() { + + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + assertTrue( + enableCloudrunAtomicOperationConverter.convertOperation(input) + instanceof EnableCloudrunAtomicOperation); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/UpsertCloudrunLoadBalancerAtomicOperationConverterTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/UpsertCloudrunLoadBalancerAtomicOperationConverterTest.java new file mode 100644 index 00000000000..b72b4574b7b --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/converters/UpsertCloudrunLoadBalancerAtomicOperationConverterTest.java @@ -0,0 +1,46 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.converters; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops.UpsertCloudrunLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class UpsertCloudrunLoadBalancerAtomicOperationConverterTest { + UpsertCloudrunLoadBalancerAtomicOperationConverter + upsertCloudrunLoadBalancerAtomicOperationConverter; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + Map input = + new HashMap<>() { + { + put("accountName", "cloudrunaccount"); + } + }; + + @BeforeEach + public void init() { + upsertCloudrunLoadBalancerAtomicOperationConverter = + new UpsertCloudrunLoadBalancerAtomicOperationConverter(); + credentialsRepository = mock(CredentialsRepository.class); + upsertCloudrunLoadBalancerAtomicOperationConverter.setCredentialsRepository( + credentialsRepository); + mockCredentials = mock(CloudrunNamedAccountCredentials.class); + } + + @Test + public void ConvertOperationTest() { + + when(credentialsRepository.getOne(any())).thenReturn(mockCredentials); + assertTrue( + upsertCloudrunLoadBalancerAtomicOperationConverter.convertOperation(input) + instanceof UpsertCloudrunLoadBalancerAtomicOperation); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeleteCloudrunLoadBalancerAtomicOperationTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeleteCloudrunLoadBalancerAtomicOperationTest.java new file mode 100644 index 00000000000..9bd9c3f59c8 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DeleteCloudrunLoadBalancerAtomicOperationTest.java @@ -0,0 +1,86 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import static org.mockito.BDDMockito.*; + +import com.google.api.services.run.v1.CloudRun; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeleteCloudrunLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunLoadBalancer; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.view.CloudrunLoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunCredentials; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import java.lang.reflect.Field; +import java.util.ArrayList; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DeleteCloudrunLoadBalancerAtomicOperationTest { + DeleteCloudrunLoadBalancerAtomicOperation deleteCloudrunLoadBalancerAtomicOperation; + TaskRepository taskRepository; + DeleteCloudrunLoadBalancerDescription description; + CloudrunNamedAccountCredentials mockcredentials; + Task task; + CloudrunJobExecutor jobExecutor; + CloudrunLoadBalancerProvider provider; + CloudrunLoadBalancer loadBalancer; + JobExecutor executor; + + @BeforeEach + public void init() { + mockcredentials = + new CloudrunNamedAccountCredentials.Builder() + .setName("cloudrunaccount") + .setAccountType("cloudrun") + .setCloudProvider("cloudrun") + .setApplicationName("my app") + .setCredentials(mock(CloudrunCredentials.class)) + .setCloudRun(mock(CloudRun.class)) + .setEnvironment("environment") + .setJsonKey("jsonkey") + .setLiveLookupsEnabled(false) + .setLocalRepositoryDirectory("/localdirectory") + .setJsonPath("/jsonpath") + .setProject(" my-project") + .build(mock(CloudrunJobExecutor.class)); + taskRepository = mock(TaskRepository.class); + task = mock(Task.class); + taskRepository.threadLocalTask.set(task); + provider = mock(CloudrunLoadBalancerProvider.class); + jobExecutor = mock(CloudrunJobExecutor.class); + description = new DeleteCloudrunLoadBalancerDescription(); + description.setAccountName("cloudrunaccount"); + description.setLoadBalancerName("LoadBalancer"); + description.setAccount("acc"); + description.setCredentials(mockcredentials); + deleteCloudrunLoadBalancerAtomicOperation = + new DeleteCloudrunLoadBalancerAtomicOperation(description); + loadBalancer = new CloudrunLoadBalancer(); + loadBalancer.setRegion("us-central"); + executor = mock(JobExecutor.class); + } + + @Test + public void DeleteCloudrunLoadBalancerOperateTest() throws NoSuchFieldException { + + deleteCloudrunLoadBalancerAtomicOperation = + new DeleteCloudrunLoadBalancerAtomicOperation(description); + try { + Field f = deleteCloudrunLoadBalancerAtomicOperation.getClass().getDeclaredField("provider"); + f.setAccessible(true); + f.set(deleteCloudrunLoadBalancerAtomicOperation, provider); + Field f1 = + deleteCloudrunLoadBalancerAtomicOperation.getClass().getDeclaredField("jobExecutor"); + f1.setAccessible(true); + f1.set(deleteCloudrunLoadBalancerAtomicOperation, jobExecutor); + } catch (IllegalAccessException e) { + throw new RuntimeException( + "Failed to set provider/jobExecutor of DeleteCloudrunLoadBalancerAtomicOperation object", + e); + } + given(provider.getLoadBalancer(any(), anyString())).willReturn(loadBalancer); + deleteCloudrunLoadBalancerAtomicOperation.operate(new ArrayList<>()); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DestroyCloudrunAtomicOperationTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DestroyCloudrunAtomicOperationTest.java new file mode 100644 index 00000000000..208d44a90f6 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/DestroyCloudrunAtomicOperationTest.java @@ -0,0 +1,74 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.google.api.services.run.v1.CloudRun; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DestroyCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.model.CloudrunServerGroup; +import com.netflix.spinnaker.clouddriver.cloudrun.provider.view.CloudrunClusterProvider; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunCredentials; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import java.util.ArrayList; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DestroyCloudrunAtomicOperationTest { + + DestroyCloudrunAtomicOperation operation; + TaskRepository taskRepository; + DestroyCloudrunDescription description; + CloudrunNamedAccountCredentials mockcredentials; + Task task; + CloudrunJobExecutor jobExecutor; + CloudrunClusterProvider provider; + + CloudrunServerGroup serverGroup; + + @BeforeEach + public void init() { + mockcredentials = + new CloudrunNamedAccountCredentials.Builder() + .setName("cloudrunaccount") + .setAccountType("cloudrun") + .setCloudProvider("cloudrun") + .setApplicationName("my app") + .setCredentials(mock(CloudrunCredentials.class)) + .setCloudRun(mock(CloudRun.class)) + .setEnvironment("environment") + .setJsonKey("jsonkey") + .setLiveLookupsEnabled(false) + .setLocalRepositoryDirectory("/localdirectory") + .setJsonPath("/jsonpath") + .setProject(" my project") + .build(mock(CloudrunJobExecutor.class)); + taskRepository = mock(TaskRepository.class); + task = mock(Task.class); + taskRepository.threadLocalTask.set(task); + provider = mock(CloudrunClusterProvider.class); + jobExecutor = mock(CloudrunJobExecutor.class); + description = new DestroyCloudrunDescription(); + description.setAccountName("cloudrunaccount"); + description.setAccount("acc"); + description.setRegion("region-1"); + description.setServerGroupName("revision-1"); + description.setCredentials(mockcredentials); + operation = new DestroyCloudrunAtomicOperation(description); + operation.cloudrunClusterProvider = provider; + operation.jobExecutor = jobExecutor; + serverGroup = new CloudrunServerGroup(); + serverGroup.setRegion("us-central"); + } + + @Test + public void DestroyServerGroupOperateTest() throws NoSuchFieldException { + + when(provider.getServerGroup(anyString(), anyString(), anyString())).thenReturn(serverGroup); + operation.operate(new ArrayList<>()); + verify(jobExecutor, times(1)).runCommand(any()); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/UpsertCloudrunLoadBalancerAtomicOperationTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/UpsertCloudrunLoadBalancerAtomicOperationTest.java new file mode 100644 index 00000000000..02a44461168 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/ops/UpsertCloudrunLoadBalancerAtomicOperationTest.java @@ -0,0 +1,80 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.ops; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +import com.google.api.services.run.v1.CloudRun; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.CloudrunAllocationDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.CloudrunTrafficSplitDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.UpsertCloudrunLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunCredentials; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class UpsertCloudrunLoadBalancerAtomicOperationTest { + + UpsertCloudrunLoadBalancerAtomicOperation upsertCloudrunLoadBalancerAtomicOperation; + TaskRepository taskRepository; + UpsertCloudrunLoadBalancerDescription description; + CloudrunNamedAccountCredentials mockcredentials; + Task task; + CloudrunJobExecutor jobExecutor; + CloudrunTrafficSplitDescription splitDescription; + + CloudrunAllocationDescription allocationDescription; + + @BeforeEach + public void init() { + + mockcredentials = + new CloudrunNamedAccountCredentials.Builder() + .setName("cloudrunaccount") + .setAccountType("cloudrun") + .setCloudProvider("cloudrun") + .setApplicationName("my app") + .setCredentials(mock(CloudrunCredentials.class)) + .setCloudRun(mock(CloudRun.class)) + .setEnvironment("environment") + .setJsonKey("jsonkey") + .setLiveLookupsEnabled(false) + .setLocalRepositoryDirectory("/localdirectory") + .setJsonPath("/jsonpath") + .setProject(" my project") + .build(mock(CloudrunJobExecutor.class)); + taskRepository = mock(TaskRepository.class); + task = mock(Task.class); + taskRepository.threadLocalTask.set(task); + jobExecutor = mock(CloudrunJobExecutor.class); + description = new UpsertCloudrunLoadBalancerDescription(); + description.setAccountName("cloudrunaccount"); + description.setLoadBalancerName("LoadBalancer"); + description.setAccount("acc"); + description.setRegion("us-central"); + description.setCredentials(mockcredentials); + splitDescription = new CloudrunTrafficSplitDescription(); + allocationDescription = new CloudrunAllocationDescription(); + allocationDescription.setRevisionName("revision-1"); + allocationDescription.setPercent(90); + splitDescription.setAllocationDescriptions(List.of(allocationDescription)); + description.setSplitDescription(splitDescription); + upsertCloudrunLoadBalancerAtomicOperation = + new UpsertCloudrunLoadBalancerAtomicOperation(description); + upsertCloudrunLoadBalancerAtomicOperation.jobExecutor = jobExecutor; + } + + @Test + public void UpsertCloudrunLoadBalancerOperateTest() { + Map>> s = + Map.of("loadBalancers", Map.of("us-central", Map.of("name", "LoadBalancer"))); + assertTrue(upsertCloudrunLoadBalancerAtomicOperation.operate(new ArrayList<>()).equals(s)); + verify(jobExecutor, times(1)).runCommand(any()); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunConfigDescriptionValidatorTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunConfigDescriptionValidatorTest.java new file mode 100644 index 00000000000..4bbbb773a8b --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunConfigDescriptionValidatorTest.java @@ -0,0 +1,75 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.validators; + +import static org.mockito.Mockito.*; + +import com.google.api.services.run.v1.CloudRun; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeployCloudrunConfigDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunCredentials; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import java.lang.reflect.Field; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DeployCloudrunConfigDescriptionValidatorTest { + DeployCloudrunConfigDescriptionValidator deployCloudrunConfigDescriptionValidator; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + DeployCloudrunConfigDescription description; + ValidationErrors errors; + + @BeforeEach + public void init() { + deployCloudrunConfigDescriptionValidator = new DeployCloudrunConfigDescriptionValidator(); + mockCredentials = mock(CloudrunNamedAccountCredentials.class); + credentialsRepository = mock(CredentialsRepository.class); + errors = mock(ValidationErrors.class); + description = new DeployCloudrunConfigDescription(); + description.setAccountName("cloudrunaccount"); + } + + @Test + public void ValidateTest() throws NoSuchFieldException { + mockCredentials = + new CloudrunNamedAccountCredentials.Builder() + .setName("cloudrunaccount") + .setAccountType("cloudrun") + .setCloudProvider("cloudrun") + .setApplicationName("my app") + .setCredentials(mock(CloudrunCredentials.class)) + .setCloudRun(mock(CloudRun.class)) + .setEnvironment("environment") + .setJsonKey("jsonkey") + .setLiveLookupsEnabled(false) + .setLocalRepositoryDirectory("/localdirectory") + .setJsonPath("/jsonpath") + .setProject(" my project") + .build(mock(CloudrunJobExecutor.class)); + + credentialsRepository = + new MapBackedCredentialsRepository( + CloudrunNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()); + credentialsRepository.save(mockCredentials); + try { + deployCloudrunConfigDescriptionValidator = new DeployCloudrunConfigDescriptionValidator(); + Field f = + deployCloudrunConfigDescriptionValidator + .getClass() + .getDeclaredField("credentialsRepository"); + f.setAccessible(true); + f.set(deployCloudrunConfigDescriptionValidator, credentialsRepository); + } catch (IllegalAccessException e) { + throw new RuntimeException( + "Failed to set credentialsRepository of DeployCloudrunConfigDescriptionValidator object", + e); + } + deployCloudrunConfigDescriptionValidator.validate(List.of(description), description, errors); + verify(errors, never()).rejectValue("${context}.account", "${context}.account.notFound"); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunDescriptionValidatorTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunDescriptionValidatorTest.java new file mode 100644 index 00000000000..43e1412f54f --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/DeployCloudrunDescriptionValidatorTest.java @@ -0,0 +1,88 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.validators; + +import static org.mockito.BDDMockito.*; +import static org.mockito.Mockito.mock; + +import com.google.api.services.run.v1.CloudRun; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.deploy.description.DeployCloudrunDescription; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunCredentials; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class DeployCloudrunDescriptionValidatorTest { + DeployCloudrunDescriptionValidator deployCloudrunDescriptionValidator; + CredentialsRepository credentialsRepository; + CloudrunNamedAccountCredentials mockCredentials; + DeployCloudrunDescription description; + ValidationErrors validationErrors; + + @BeforeEach + public void init() { + deployCloudrunDescriptionValidator = new DeployCloudrunDescriptionValidator(); + validationErrors = mock(ValidationErrors.class); + description = new DeployCloudrunDescription(); + description.setAccountName("cloudrunaccount"); + description.setAccount("cloudrun"); + description.setApplication("my app"); + description.setPromote(false); + description.setRegion("region"); + description.setApplicationDirectoryRoot("/directoryroot"); + description.setConfigFiles(List.of("/path")); + description.setCredentials(mockCredentials); + description.setStopPreviousVersion(false); + description.setSuppressVersionString(false); + } + + @Test + public void validateTest() { + mockCredentials = + new CloudrunNamedAccountCredentials.Builder() + .setName("cloudrunaccount") + .setAccountType("cloudrun") + .setCloudProvider("cloudrun") + .setApplicationName("my app") + .setCredentials(mock(CloudrunCredentials.class)) + .setCloudRun(mock(CloudRun.class)) + .setEnvironment("environment") + .setJsonKey("jsonkey") + .setLiveLookupsEnabled(false) + .setLocalRepositoryDirectory("/localdirectory") + .setJsonPath("/jsonpath") + .setProject(" my project") + .build(mock(CloudrunJobExecutor.class)); + + credentialsRepository = + new MapBackedCredentialsRepository( + CloudrunNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()); + credentialsRepository.save(mockCredentials); + deployCloudrunDescriptionValidator.credentialsRepository = credentialsRepository; + deployCloudrunDescriptionValidator.validate( + List.of(description), description, validationErrors); + verify(validationErrors, never()) + .rejectValue("${context}.account", "${context}.account.notFound"); + } + + @Test + public void validateFailsOnGivingNullCredentials() { + mockCredentials = null; + credentialsRepository = + new MapBackedCredentialsRepository( + CloudrunNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()); + + deployCloudrunDescriptionValidator.credentialsRepository = credentialsRepository; + deployCloudrunDescriptionValidator.validate( + List.of(description), description, validationErrors); + + verify(validationErrors, times(1)) + .rejectValue("${context}.account", "${context}.account.notFound"); + } +} diff --git a/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/StandardCloudrunAttributeValidatorTest.java b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/StandardCloudrunAttributeValidatorTest.java new file mode 100644 index 00000000000..6918642cf90 --- /dev/null +++ b/clouddriver-cloudrun/src/test/java/com/netflix/spinnaker/clouddriver/cloudrun/deploy/validators/StandardCloudrunAttributeValidatorTest.java @@ -0,0 +1,63 @@ +package com.netflix.spinnaker.clouddriver.cloudrun.deploy.validators; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +import com.google.api.services.run.v1.CloudRun; +import com.netflix.spinnaker.clouddriver.cloudrun.CloudrunJobExecutor; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunCredentials; +import com.netflix.spinnaker.clouddriver.cloudrun.security.CloudrunNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class StandardCloudrunAttributeValidatorTest { + StandardCloudrunAttributeValidator standardCloudrunAttributeValidator; + CredentialsRepository credrepo; + CloudrunNamedAccountCredentials mockcredentials; + ValidationErrors errors; + + @BeforeEach + public void init() { + errors = mock(ValidationErrors.class); + standardCloudrunAttributeValidator = + new StandardCloudrunAttributeValidator("new context", errors); + mockcredentials = + new CloudrunNamedAccountCredentials.Builder() + .setName("cloudrunaccount") + .setAccountType("cloudrun") + .setCloudProvider("cloudrun") + .setApplicationName("my app") + .setCredentials(mock(CloudrunCredentials.class)) + .setCloudRun(mock(CloudRun.class)) + .setEnvironment("environment") + .setJsonKey("jsonkey") + .setLiveLookupsEnabled(false) + .setLocalRepositoryDirectory("/localdirectory") + .setJsonPath("/jsonpath") + .setProject(" my project") + .build(mock(CloudrunJobExecutor.class)); + + credrepo = + new MapBackedCredentialsRepository( + CloudrunNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()); + credrepo.save(mockcredentials); + } + + @Test + public void validateCredentialsTest() { + assertTrue(standardCloudrunAttributeValidator.validateCredentials("cloudrunaccount", credrepo)); + } + + @Test + public void validateCredentialsFailTest() { + assertFalse( + standardCloudrunAttributeValidator.validateCredentials( + "Different cloudrun account", credrepo)); + } +} diff --git a/clouddriver-configserver/clouddriver-configserver.gradle b/clouddriver-configserver/clouddriver-configserver.gradle new file mode 100644 index 00000000000..d5cee497b2c --- /dev/null +++ b/clouddriver-configserver/clouddriver-configserver.gradle @@ -0,0 +1,21 @@ +apply plugin: 'java-library' + +tasks.compileGroovy.enabled=false + +sourceSets { + main { + java.srcDirs = ['src/main/java'] + } +} + +dependencies { + implementation "com.google.guava:guava" + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-config" + implementation "io.spinnaker.kork:kork-cloud-config-server" + implementation "io.spinnaker.kork:kork-secrets" + implementation "org.apache.commons:commons-lang3" + implementation "org.springframework.cloud:spring-cloud-context" + implementation "org.springframework.cloud:spring-cloud-config-server" + implementation "com.github.wnameless.json:json-flattener:0.14.2" +} diff --git a/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/cache/CloudConfigRefreshConfig.java b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/cache/CloudConfigRefreshConfig.java new file mode 100644 index 00000000000..645c9d25689 --- /dev/null +++ b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/cache/CloudConfigRefreshConfig.java @@ -0,0 +1,68 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.clouddriver.config.CloudConfigRefreshProperties; +import com.netflix.spinnaker.clouddriver.config.ModifiableFilePropertySources; +import com.netflix.spinnaker.clouddriver.refresh.CloudConfigRefreshScheduler; +import com.netflix.spinnaker.kork.configserver.autoconfig.RemoteConfigSourceConfigured; +import java.util.List; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.cloud.config.server.EnableConfigServer; +import org.springframework.cloud.context.refresh.ContextRefresher; +import org.springframework.cloud.context.scope.refresh.RefreshScope; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.context.annotation.*; + +/** + * Create a {@link CloudConfigRefreshScheduler} to refresh the Spring Cloud Config Server from an + * environment repository backend on a schedule that can be defined in the spring profile or has a + * sensible default (60 seconds).. + */ +@Configuration +@EnableConfigurationProperties(CloudConfigRefreshProperties.class) +public class CloudConfigRefreshConfig { + + @Configuration + @Conditional(RemoteConfigSourceConfigured.class) + @EnableConfigServer + static class RemoteConfigSourceConfiguration { + + @Bean + @ConditionalOnProperty( + prefix = "cloud.config", + value = "refreshIntervalSeconds", + matchIfMissing = true) + public CloudConfigRefreshScheduler cloudConfigIntervalRefreshScheduler( + ContextRefresher contextRefresher, CloudConfigRefreshProperties cloudConfigProperties) { + return new CloudConfigRefreshScheduler( + contextRefresher, cloudConfigProperties.getRefreshIntervalSeconds()); + } + } + + @Bean + @ConditionalOnExpression("${dynamic-config.enabled:false}") + ModifiableFilePropertySources modifiableFilePropertySources( + ConfigurableApplicationContext applicationContext, + RefreshScope refreshScope, + @Value("${dynamic-config.files}") List dynamicFiles) { + return new ModifiableFilePropertySources(applicationContext, refreshScope, dynamicFiles); + } +} diff --git a/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/AbstractBootstrapCredentialsConfigurationProvider.java b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/AbstractBootstrapCredentialsConfigurationProvider.java new file mode 100644 index 00000000000..ee1dffa011d --- /dev/null +++ b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/AbstractBootstrapCredentialsConfigurationProvider.java @@ -0,0 +1,169 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.config; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.wnameless.json.flattener.JsonFlattener; +import com.netflix.spinnaker.kork.configserver.CloudConfigResourceService; +import com.netflix.spinnaker.kork.secrets.EncryptedSecret; +import com.netflix.spinnaker.kork.secrets.SecretAwarePropertySource; +import com.netflix.spinnaker.kork.secrets.SecretManager; +import com.netflix.spinnaker.kork.secrets.SecretSession; +import java.nio.file.Path; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.springframework.boot.context.properties.bind.BindResult; +import org.springframework.boot.context.properties.bind.Bindable; +import org.springframework.boot.context.properties.bind.Binder; +import org.springframework.boot.context.properties.bind.PropertySourcesPlaceholdersResolver; +import org.springframework.boot.context.properties.source.ConfigurationPropertySource; +import org.springframework.boot.context.properties.source.MapConfigurationPropertySource; +import org.springframework.cloud.bootstrap.config.BootstrapPropertySource; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.core.env.ConfigurableEnvironment; +import org.springframework.core.env.PropertySource; + +public abstract class AbstractBootstrapCredentialsConfigurationProvider + implements ConfigurationProvider { + private final ConfigurableApplicationContext applicationContext; + private CloudConfigResourceService configResourceService; + private SecretSession secretSession; + private Map configServerCache; + private final ObjectMapper objectMapper = new ObjectMapper(); + + public AbstractBootstrapCredentialsConfigurationProvider( + ConfigurableApplicationContext applicationContext, + CloudConfigResourceService configResourceService, + SecretManager secretManager) { + this.applicationContext = applicationContext; + this.configResourceService = configResourceService; + this.secretSession = new SecretSession(secretManager); + } + + public abstract T getConfigurationProperties(); + + @Override + @SuppressWarnings("unchecked") + public Map getPropertiesMap(String property) { + ConfigurableEnvironment environment = applicationContext.getEnvironment(); + Map map; + + for (PropertySource propertySource : environment.getPropertySources()) { + if (propertySource instanceof BootstrapPropertySource + || (propertySource instanceof SecretAwarePropertySource + && ((SecretAwarePropertySource) propertySource).getDelegate() + instanceof BootstrapPropertySource)) { + map = (Map) propertySource.getSource(); + if (map.containsKey(property)) { + return map; + } + } + + if (propertySource.getSource() instanceof BootstrapPropertySource) { + BootstrapPropertySource> bootstrapPropertySource = + (BootstrapPropertySource>) propertySource.getSource(); + if (bootstrapPropertySource.containsProperty(property)) { + return bootstrapPropertySource.getSource(); + } + } + } + + throw new RuntimeException("No BootstrapPropertySource found!"); + } + + @Override + public BindResult bind(Map propertiesMap, Class clazz) { + resolveSpecialCases(propertiesMap); + ConfigurationPropertySource configurationPropertySource = + new MapConfigurationPropertySource(propertiesMap); + Iterable sourceIterable = + () -> Collections.singleton(configurationPropertySource).iterator(); + Binder binder = + new Binder( + sourceIterable, + new PropertySourcesPlaceholdersResolver(applicationContext.getEnvironment())); + return binder.bind("", Bindable.of(clazz)); + } + + private void resolveSpecialCases(Map propertiesMap) { + String result; + for (Map.Entry entry : propertiesMap.entrySet()) { + if (entry.getValue() instanceof String) { + result = resolveConfigServerPattern((String) entry.getValue()); + result = resolveEncryptedPattern(result); + entry.setValue(result); + } + } + } + + @Override + public Map getFlatMap(Map unflatMap) { + try { + return JsonFlattener.flattenAsMap(objectMapper.writeValueAsString(unflatMap)); + } catch (JsonProcessingException e) { + throw new RuntimeException("Error occurred while building object: " + e.getMessage()); + } + } + + private String resolveEncryptedPattern(String possiblePattern) { + if (possiblePattern.startsWith(EncryptedSecret.ENCRYPTED_STRING_PREFIX)) { + possiblePattern = secretSession.decrypt(possiblePattern); + } + return possiblePattern; + } + + private String resolveConfigServerPattern(String possiblePattern) { + if (possiblePattern.startsWith("configserver:")) { + possiblePattern = resolveConfigServerFilePath(possiblePattern); + } + return possiblePattern; + } + + private String resolveConfigServerFilePath(String key) { + String filePath; + + if (cacheContainsKey(key)) { + filePath = configServerCache.get(key); + if (resourceExist(filePath)) { + return filePath; + } + } + + filePath = configResourceService.getLocalPath(key); + addToCache(key, filePath); + return filePath; + } + + private boolean resourceExist(String filePath) { + return Path.of(filePath).toFile().isFile(); + } + + private void addToCache(String key, String filePath) { + configServerCache.put(key, filePath); + } + + private boolean cacheContainsKey(String key) { + if (configServerCache == null) { + configServerCache = new HashMap<>(); + return false; + } + return configServerCache.containsKey(key); + } +} diff --git a/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/CloudConfigRefreshProperties.java b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/CloudConfigRefreshProperties.java new file mode 100644 index 00000000000..1ba52fad1ad --- /dev/null +++ b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/CloudConfigRefreshProperties.java @@ -0,0 +1,26 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config; + +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@Data +@ConfigurationProperties("cloud.config") +public class CloudConfigRefreshProperties { + private int refreshIntervalSeconds = 60; +} diff --git a/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/ConfigurationProvider.java b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/ConfigurationProvider.java new file mode 100644 index 00000000000..72503315a67 --- /dev/null +++ b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/ConfigurationProvider.java @@ -0,0 +1,75 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.config; + +import java.util.Map; +import org.springframework.boot.context.properties.bind.BindResult; +import org.springframework.core.env.PropertySource; + +/** + * This interface is meant to provide a custom way of handling configuration properties. Usually, + * configuration properties are handled implicitly by Spring boot itself. But in case that is not + * desired, this will allow it to be handled in a custom way where properties are read from a + * property source, then converted to a map as an intermediate step which is then bound to a target + * implementation class. + * + *

This interface defines the necessary actions which would be required to do the same. + */ +public interface ConfigurationProvider { + /** + * Returns the desired configuration properties, bound to the target implementation class. + * + * @return a target implementation class for a property + */ + T getConfigurationProperties(); + + /** + * This method takes an input property and returns a map representation of the {@link + * PropertySource} which contains this property. + * + *

Loading of candidate property sources and defining a criteria for selecting a property + * source should be considered when implementing this. + * + * @param property A property defined in the configuration file + * @return a map representation of the {@link PropertySource} that contains the input property. + */ + Map getPropertiesMap(String property); + + /** + * This method takes in a property map (which can be nested) and flattens it. + * + *

For example: input: { name=account, configureImagePullSecrets=true, omitKinds=[podPreset], + * onlySpinnakerManaged=false } should result into: + * + *

{ "name":"account", "configureImagePullSecrets":true, "omitKinds[0]":"podPreset", + * "onlySpinnakerManaged":false } + * + * @param unflatMap any type of property map (can be nested) + * @return a flattened map representation of the input + */ + Map getFlatMap(Map unflatMap); + + /** + * This method attempts to bind an input provided as a map to its target implementation class. + * + * @param propertiesMap an input of type Map - this should be a flattened map + * @param clazz the target implementation class + * @return A {@link BindResult} object + */ + BindResult bind(Map propertiesMap, Class clazz); +} diff --git a/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/ModifiableFilePropertySources.java b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/ModifiableFilePropertySources.java new file mode 100644 index 00000000000..f4a66bca94c --- /dev/null +++ b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/config/ModifiableFilePropertySources.java @@ -0,0 +1,208 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config; + +import com.netflix.spinnaker.kork.annotations.Alpha; +import java.io.File; +import java.io.IOException; +import java.nio.file.*; +import java.util.*; +import java.util.stream.Collectors; +import javax.annotation.PostConstruct; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.ArrayUtils; +import org.springframework.beans.factory.config.BeanPostProcessor; +import org.springframework.boot.context.event.ApplicationReadyEvent; +import org.springframework.boot.env.YamlPropertySourceLoader; +import org.springframework.cloud.context.scope.refresh.RefreshScope; +import org.springframework.cloud.context.scope.refresh.RefreshScopeRefreshedEvent; +import org.springframework.context.ApplicationListener; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.core.Ordered; +import org.springframework.core.env.EnumerablePropertySource; +import org.springframework.core.env.PropertySource; +import org.springframework.core.io.FileSystemResource; + +/** + * ModifiableFilePropertySources maintains a list of files that are file property sources that can + * be watched and refreshed. + * + *

When a file is changed or added, it will be used as a {@link PropertySource}, the refresh + * scope will be refreshed, and the application notified. + */ +@Slf4j +@Alpha +public class ModifiableFilePropertySources + implements BeanPostProcessor, Ordered, Runnable, ApplicationListener { + private final List dynamicFilePropertySources; + private ConfigurableApplicationContext applicationContext; + private RefreshScope refreshScope; + + public ModifiableFilePropertySources( + ConfigurableApplicationContext applicationContext, + RefreshScope refreshScope, + List dynamicConfigFiles) { + this.applicationContext = applicationContext; + this.refreshScope = refreshScope; + this.dynamicFilePropertySources = + dynamicConfigFiles.stream() + .map(f -> new DynamicFilePropertySource(f)) + .collect(Collectors.toList()); + } + + @Override + public void onApplicationEvent(ApplicationReadyEvent event) { + Thread t = new Thread(this, "dynamicConfig"); + t.setDaemon(true); + t.start(); + } + + @PostConstruct + public void start() { + dynamicFilePropertySources.stream().forEach(DynamicFilePropertySource::install); + } + + private Optional + getDynamicFilePropertySourceFromEvent(Watchable directory, Object context) { + if (directory instanceof Path) { + String path = ((Path) directory).resolve(context.toString()).toAbsolutePath().toString(); + return dynamicFilePropertySources.stream() + .filter(f -> f.getAbsFilePath().equals(path)) + .findFirst(); + } + return Optional.empty(); + } + + private Set getPropertySourceDirectories() { + Map> directoryMap = + dynamicFilePropertySources.stream() + .collect( + Collectors.groupingBy(f -> f.getFileSystemResource().getFile().getParentFile())); + return directoryMap.keySet(); + } + + public void run() { + try { + Set directoriesToWatch = getPropertySourceDirectories(); + + WatchService watchService = FileSystems.getDefault().newWatchService(); + for (File directory : directoriesToWatch) { + Path path = Paths.get(directory.getAbsolutePath()); + path.register( + watchService, + StandardWatchEventKinds.ENTRY_CREATE, + StandardWatchEventKinds.ENTRY_DELETE, + StandardWatchEventKinds.ENTRY_MODIFY); + } + + WatchKey key; + while ((key = watchService.take()) != null) { + List> events = key.pollEvents(); + try { + boolean notify = false; + for (WatchEvent event : events) { + Optional source = + getDynamicFilePropertySourceFromEvent(key.watchable(), event.context()); + if (source.isPresent()) { + log.info("Detected changes to {}", source.get().absFilePath); + if (event.kind().equals(StandardWatchEventKinds.ENTRY_CREATE) + || event.kind().equals(StandardWatchEventKinds.ENTRY_MODIFY) + || event.kind().equals(StandardWatchEventKinds.ENTRY_DELETE)) { + source.get().sync(); + notify = true; + } + } + } + if (notify) { + refreshScope.refreshAll(); + applicationContext.publishEvent(new RefreshScopeRefreshedEvent()); + } + } catch (Exception e) { + log.error("Error refreshing dynamic config", e); + } finally { + key.reset(); + } + } + + } catch (IOException | InterruptedException e) { + log.error("Unable to watch dynamic config files", e); + } + } + + @Override + public int getOrder() { + return HIGHEST_PRECEDENCE + 7; + } + + /** + * Virtual property source that wraps an underlying property source backed by a file that can + * change. Virtual is added so property source wrappers (like {@link + * com.netflix.spinnaker.kork.secrets.SecretBeanPostProcessor} and {@link + * com.netflix.spinnaker.kork.configserver.CloudConfigAwarePropertySource}) can still function + * after a reload. + */ + class DynamicFilePropertySource { + @Getter private FileSystemResource fileSystemResource; + @Getter private String absFilePath; + private YamlPropertySourceLoader yamlPropertySourceLoader = new YamlPropertySourceLoader(); + private PropertySource dynamicPropertySource; + private List> propertySources = new ArrayList<>(); + + public DynamicFilePropertySource(String filename) { + absFilePath = Paths.get(filename).toAbsolutePath().toString(); + fileSystemResource = new FileSystemResource(filename); + } + + public void install() { + sync(); + dynamicPropertySource = + new EnumerablePropertySource(absFilePath) { + @Override + public String[] getPropertyNames() { + return propertySources.stream() + .filter(s -> s instanceof EnumerablePropertySource) + .map(s -> ((EnumerablePropertySource) s).getPropertyNames()) + .reduce(new String[0], ArrayUtils::addAll); + } + + @Override + public Object getProperty(String name) { + return propertySources.stream() + .map(s -> s.getProperty(name)) + .filter(Objects::nonNull) + .findFirst() + .orElse(null); + } + }; + applicationContext.getEnvironment().getPropertySources().addFirst(dynamicPropertySource); + } + + public void sync() { + try { + if (fileSystemResource.getFile().exists()) { + propertySources = + yamlPropertySourceLoader.load("dynamic:" + absFilePath, fileSystemResource); + } else { + propertySources = new ArrayList<>(); + } + } catch (IOException e) { + log.warn("Unable to load properties from " + absFilePath, e); + } + } + } +} diff --git a/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/refresh/CloudConfigRefreshScheduler.java b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/refresh/CloudConfigRefreshScheduler.java new file mode 100644 index 00000000000..2088c5e324b --- /dev/null +++ b/clouddriver-configserver/src/main/java/com/netflix/spinnaker/clouddriver/refresh/CloudConfigRefreshScheduler.java @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.refresh; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import lombok.extern.slf4j.Slf4j; +import org.springframework.cloud.context.refresh.ContextRefresher; + +/** + * Refresh the Spring Cloud Config context on a schedule. The configured interval should + * approximately match the cache refresh interval. + */ +@Slf4j +public class CloudConfigRefreshScheduler implements Runnable { + private final ContextRefresher contextRefresher; + + public CloudConfigRefreshScheduler(ContextRefresher contextRefresher, long interval) { + this.contextRefresher = contextRefresher; + + Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder() + .setNameFormat(CloudConfigRefreshScheduler.class.getSimpleName() + "-%d") + .build()) + .scheduleWithFixedDelay(this, interval, interval, TimeUnit.SECONDS); + } + + @Override + public void run() { + try { + contextRefresher.refresh(); + } catch (Throwable t) { + log.error("Error refreshing cloud config", t); + } + } +} diff --git a/clouddriver-consul/clouddriver-consul.gradle b/clouddriver-consul/clouddriver-consul.gradle index 04be2000a2a..f16bc69accd 100644 --- a/clouddriver-consul/clouddriver-consul.gradle +++ b/clouddriver-consul/clouddriver-consul.gradle @@ -1,3 +1,17 @@ dependencies { - compile project(":clouddriver-core") + implementation project(":clouddriver-core") + + implementation "com.jakewharton.retrofit:retrofit1-okhttp3-client" + implementation "com.squareup.retrofit:converter-jackson" + implementation "com.squareup.retrofit:retrofit" + implementation "org.apache.groovy:groovy" + implementation "org.apache.groovy:groovy-json" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "io.spinnaker.kork:kork-retrofit" + implementation "io.spinnaker.kork:kork-exceptions" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" } diff --git a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/Consul.groovy b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/Consul.groovy index e72a4fc3f69..83ad486cad4 100644 --- a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/Consul.groovy +++ b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/Consul.groovy @@ -16,12 +16,13 @@ package com.netflix.spinnaker.clouddriver.consul.api.v1 - +import com.jakewharton.retrofit.Ok3Client import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig import com.netflix.spinnaker.clouddriver.consul.config.ConsulProperties -import com.squareup.okhttp.OkHttpClient +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler +import okhttp3.OkHttpClient import retrofit.RestAdapter -import retrofit.client.OkClient +import retrofit.converter.JacksonConverter class Consul { T api @@ -37,8 +38,10 @@ class Consul { this.timeout = timeout this.api = new RestAdapter.Builder() .setEndpoint(this.endpoint) - .setClient(new OkClient(new OkHttpClient())) + .setClient(new Ok3Client(new OkHttpClient())) + .setConverter(new JacksonConverter()) .setLogLevel(RestAdapter.LogLevel.NONE) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .build() .create(type) } diff --git a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulAgent.groovy b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulAgent.groovy index a3480fd199a..7e9dafca61d 100644 --- a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulAgent.groovy +++ b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulAgent.groovy @@ -19,11 +19,6 @@ package com.netflix.spinnaker.clouddriver.consul.api.v1 import com.netflix.spinnaker.clouddriver.consul.api.v1.services.AgentApi import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig import com.netflix.spinnaker.clouddriver.consul.config.ConsulProperties -import com.squareup.okhttp.OkHttpClient -import retrofit.RestAdapter -import retrofit.client.OkClient - -import java.util.concurrent.TimeUnit class ConsulAgent extends Consul { ConsulAgent(ConsulConfig config, String agentBaseUrl) { diff --git a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulCatalog.groovy b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulCatalog.groovy index d50b97fc032..55154ab5c00 100644 --- a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulCatalog.groovy +++ b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulCatalog.groovy @@ -18,12 +18,6 @@ package com.netflix.spinnaker.clouddriver.consul.api.v1 import com.netflix.spinnaker.clouddriver.consul.api.v1.services.CatalogApi import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.consul.config.ConsulProperties -import com.squareup.okhttp.OkHttpClient -import retrofit.RestAdapter -import retrofit.client.OkClient - -import java.util.concurrent.TimeUnit class ConsulCatalog extends Consul { ConsulCatalog(ConsulConfig config) { diff --git a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulKeyValueStore.groovy b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulKeyValueStore.groovy index 16e06a230e3..e1606384d7e 100644 --- a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulKeyValueStore.groovy +++ b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/ConsulKeyValueStore.groovy @@ -18,12 +18,6 @@ package com.netflix.spinnaker.clouddriver.consul.api.v1 import com.netflix.spinnaker.clouddriver.consul.api.v1.services.KeyValueApi import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.consul.config.ConsulProperties -import com.squareup.okhttp.OkHttpClient -import retrofit.RestAdapter -import retrofit.client.OkClient - -import java.util.concurrent.TimeUnit class ConsulKeyValueStore extends Consul { ConsulKeyValueStore(ConsulConfig config) { diff --git a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/services/AgentApi.groovy b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/services/AgentApi.groovy index ea24366b28b..f4fef274e9d 100644 --- a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/services/AgentApi.groovy +++ b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/services/AgentApi.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.consul.api.v1.services import com.netflix.spinnaker.clouddriver.consul.api.v1.model.* -import com.squareup.okhttp.Response +import okhttp3.Response import retrofit.http.* interface AgentApi { diff --git a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/services/KeyValueApi.groovy b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/services/KeyValueApi.groovy index 4f421f90165..73c557df0d1 100644 --- a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/services/KeyValueApi.groovy +++ b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/api/v1/services/KeyValueApi.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.consul.api.v1.services import com.netflix.spinnaker.clouddriver.consul.api.v1.model.KeyValuePair -import com.squareup.okhttp.Response +import okhttp3.Response import retrofit.http.Body import retrofit.http.DELETE import retrofit.http.GET diff --git a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/config/ConsulConfig.groovy b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/config/ConsulConfig.groovy index e658749495d..3887424543e 100644 --- a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/config/ConsulConfig.groovy +++ b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/config/ConsulConfig.groovy @@ -17,14 +17,9 @@ package com.netflix.spinnaker.clouddriver.consul.config import com.netflix.spinnaker.clouddriver.consul.api.v1.ConsulCatalog -import com.netflix.spinnaker.clouddriver.consul.api.v1.services.CatalogApi -import com.squareup.okhttp.OkHttpClient +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import groovy.util.logging.Slf4j -import retrofit.RestAdapter -import retrofit.RetrofitError -import retrofit.client.OkClient -import java.lang.invoke.ConstantCallSite import java.util.concurrent.TimeUnit @Slf4j @@ -57,7 +52,7 @@ class ConsulConfig { try { def catalog = new ConsulCatalog(this) datacenters = catalog.api.datacenters() - } catch (RetrofitError e) { + } catch (SpinnakerServerException e) { log.warn "Unable to connect to Consul running on the local Clouddriver instance.", e datacenters = [] } diff --git a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/provider/ConsulProviderUtils.groovy b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/provider/ConsulProviderUtils.groovy index fba46cbbd95..770a0eb7199 100644 --- a/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/provider/ConsulProviderUtils.groovy +++ b/clouddriver-consul/src/main/groovy/com/netflix/spinnaker/clouddriver/consul/provider/ConsulProviderUtils.groovy @@ -23,8 +23,8 @@ import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig import com.netflix.spinnaker.clouddriver.consul.model.ConsulHealth import com.netflix.spinnaker.clouddriver.consul.model.ConsulNode import com.netflix.spinnaker.clouddriver.consul.model.ConsulService +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import groovy.util.logging.Slf4j -import retrofit.RetrofitError @Slf4j class ConsulProviderUtils { @@ -40,7 +40,7 @@ class ConsulProviderUtils { return new ConsulService(result) } ?: [] running = true - } catch (RetrofitError e) { + } catch (SpinnakerServerException e) { // Instance can't be connected to on hostname:port/v1/agent/checks log.debug(e.message) } diff --git a/clouddriver-core-tck/clouddriver-core-tck.gradle b/clouddriver-core-tck/clouddriver-core-tck.gradle new file mode 100644 index 00000000000..eae67808f6b --- /dev/null +++ b/clouddriver-core-tck/clouddriver-core-tck.gradle @@ -0,0 +1,9 @@ +dependencies { + implementation project(":clouddriver-core") + + implementation "com.fasterxml.jackson.core:jackson-annotations" + implementation "org.junit.jupiter:junit-jupiter-api" + implementation "org.apache.commons:commons-lang3" + implementation "org.assertj:assertj-core" + implementation "org.apache.groovy:groovy" +} diff --git a/clouddriver-core-tck/src/main/java/com/netflix/spinnaker/clouddriver/core/test/TaskRepositoryTck.java b/clouddriver-core-tck/src/main/java/com/netflix/spinnaker/clouddriver/core/test/TaskRepositoryTck.java new file mode 100644 index 00000000000..ec297e5ec81 --- /dev/null +++ b/clouddriver-core-tck/src/main/java/com/netflix/spinnaker/clouddriver/core/test/TaskRepositoryTck.java @@ -0,0 +1,290 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.core.test; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname; +import com.netflix.spinnaker.clouddriver.data.task.Status; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import java.lang.reflect.InvocationTargetException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.commons.lang3.text.WordUtils; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public abstract class TaskRepositoryTck { + + protected TaskRepository subject; + + protected abstract T createTaskRepository(); + + @BeforeEach + public void setupTest() { + subject = createTaskRepository(); + } + + @Test + public void testTaskPersistence() { + Task t1 = subject.create("TEST", "Test Status"); + Task t2 = subject.create("TEST", "Test Status"); + + assertThat(t1.getId()).isNotEqualTo(t2.getId()); + } + + @Test + public void testTaskLookup() { + Task t1 = subject.create("TEST", "Test Status"); + Task t2 = subject.get(t1.getId()); + + assertThat(t1.getId()).isEqualTo(t2.getId()); + assertThat(t1.getStatus().getStatus()).isEqualTo(t2.getStatus().getStatus()); + assertThat(t1.getStatus().getPhase()).isEqualTo(t2.getStatus().getPhase()); + assertThat(t1.getStartTimeMs()).isEqualTo(t2.getStartTimeMs()); + assertThat(t1.getStatus().isCompleted()).isEqualTo(t2.getStatus().isCompleted()); + assertThat(t1.getStatus().isFailed()).isEqualTo(t2.getStatus().isFailed()); + assertThat(t1.getStatus().isCompleted()).isFalse(); + assertThat(t1.getStatus().isFailed()).isFalse(); + assertThat(t1.getStatus().isRetryable()).isFalse(); + } + + @Test + public void testFailureStatus() { + Task t1 = subject.create("TEST", "Test Status"); + t1.fail(); + + Task t2 = subject.get(t1.getId()); + + assertThat(t2.getStatus().isCompleted()).isTrue(); + assertThat(t2.getStatus().isFailed()).isTrue(); + assertThat(t2.getStatus().isRetryable()).isFalse(); + } + + @Test + public void testRetryableStatus() { + Task t1 = subject.create("TEST", "Test Status"); + t1.fail(true); + + Task t2 = subject.get(t1.getId()); + + assertThat(t2.getStatus().isCompleted()).isTrue(); + assertThat(t2.getStatus().isFailed()).isTrue(); + assertThat(t2.getStatus().isRetryable()).isTrue(); + } + + @Test + public void testTaskCompletion() { + Task t1 = subject.create("TEST", "Test Status"); + t1.updateStatus("Orchestration", "completed"); + t1.complete(); + + assert (t1.getStatus().isCompleted()); + } + + @Test + public void testListRunningTasks() { + Task t1 = subject.create("TEST", "Test Status"); + Task t2 = subject.create("TEST", "Test Status"); + + List list = subject.list(); + + assertThat(list.stream().map(Task::getId)).contains(t1.getId(), t2.getId()); + + t1.complete(); + + assertThat(subject.list().stream().map(Task::getId).collect(Collectors.toList())) + .doesNotContain(t1.getId()); + assertThat(subject.list().stream().map(Task::getId).collect(Collectors.toList())) + .contains(t2.getId()); + } + + @Test + public void testListByThisInstance() { + Task t1 = subject.create("Test", "STARTED"); + Task t2 = subject.create("Test", "STARTED"); + Task t3 = subject.create("Test", "STARTED"); + Task t4 = subject.create("Test", "STARTED"); + Task t5 = subject.create("Test", "STARTED"); + String ownerId = ClouddriverHostname.ID; + + t3.updateOwnerId("foo@not_this_clouddriver", "Test"); + t5.complete(); + + List runningTasks = subject.listByThisInstance(); + + assertThat(runningTasks.stream().allMatch(t -> t.getOwnerId().equals(ownerId))).isTrue(); + assertThat(runningTasks.stream().map(Task::getId).collect(Collectors.toList())) + .contains(t1.getId(), t2.getId(), t4.getId()); + // Task 3 doesn't belong to this pod and task 5 is not running, so should not be included in the + // result + assertThat(runningTasks.stream().map(Task::getId).collect(Collectors.toList())) + .doesNotContain(t3.getId(), t5.getId()); + } + + @Test + public void testResultObjectsPersistence() { + Task t1 = subject.create("Test", "Test Status"); + + final TestObject obj = new TestObject("blimp", "bah"); + + t1.addResultObjects(Collections.singletonList(obj)); + + assertThat(t1.getResultObjects()).hasSize(1); + assertThat(getField(t1.getResultObjects().get(0), "name")).isEqualTo("blimp"); + assertThat(getField(t1.getResultObjects().get(0), "value")).isEqualTo("bah"); + + t1.addResultObjects(Collections.singletonList(new TestObject("t1", "h2"))); + + assertThat(t1.getResultObjects()).hasSize(2); + } + + @Test + public void testResultObjectOrderingIsPreserved() { + Task t1 = subject.create("Test", "Test Status"); + + t1.addResultObjects(Collections.singletonList(new TestObject("Object0", "value"))); + t1.addResultObjects(Collections.singletonList(new TestObject("Object1", "value"))); + t1.addResultObjects(Collections.singletonList(new TestObject("Object2", "value"))); + t1.addResultObjects(Collections.singletonList(new TestObject("Object3", "value"))); + + assertThat( + t1.getResultObjects().stream() + .map(o -> getField(o, "name")) + .collect(Collectors.toList())) + .containsSequence("Object0", "Object1", "Object2", "Object3"); + } + + @Test + public void testTaskHistoryPersistence() { + Task t1 = subject.create("Test", "Test Status"); + List history = t1.getHistory(); + + assertThat(history).hasSize(1); + + t1.updateStatus("Orchestration", "started"); + + assertThat(t1.getHistory()).hasSize(2); + + Status newEntry = t1.getHistory().get(1); + assertThat(newEntry.getClass().getSimpleName()).isEqualTo("TaskDisplayStatus"); + assertThat(newEntry.getPhase()).isEqualTo("Orchestration"); + assertThat(newEntry.getStatus()).isEqualTo("started"); + + t1.updateStatus("Orchestration", "update 0"); + t1.updateStatus("Orchestration", "update 1"); + t1.updateStatus("Orchestration", "update 2"); + + assertThat(t1.getHistory()).hasSize(5); + } + + @Test + public void testClientRequestIdLookup() { + Task t1 = subject.create("Test", "Test Status", "the-key"); + Task t2 = subject.create("Test", "Test Status 2", "the-key"); + Task t3 = subject.create("Test", "Test Status 3", "other-key"); + + assertThat(t1.getId()).isEqualTo(t2.getId()); + assertThat(t1.getId()).isNotEqualTo(t3.getId()); + } + + @Test + public void testUpdateOwnerIdWithANewOwnerId() { + Task t1 = subject.create("Test", "Test Status", "the-key"); + String newOwnerId = "1234@spin-clouddriver-pod-new"; + assertThat(t1.getOwnerId()).isNotEqualTo(newOwnerId); + t1.updateOwnerId(newOwnerId, "ORCHESTRATION"); + assertThat(t1.getOwnerId()).isEqualTo(newOwnerId); + } + + @Test + public void testUpdateOwnerIdWithSameOwnerId() { + Task t1 = subject.create("Test", "Test Status", "the-key"); + String newOwnerId = ClouddriverHostname.ID; + assertThat(t1.getOwnerId()).isEqualTo(newOwnerId); + t1.updateOwnerId(newOwnerId, "ORCHESTRATION"); + assertThat(t1.getOwnerId()).isEqualTo(newOwnerId); + } + + @Test + public void testTaskOutputStatus() { + Task t1 = subject.create("Test", "Test Status"); + + t1.updateOutput("some-manifest", "Deploy K8s Manifest", "output", ""); + + assertThat(t1.getOutputs()).hasSize(1); + assertThat(getField(t1.getOutputs().get(0), "manifest")).isEqualTo("some-manifest"); + assertThat(getField(t1.getOutputs().get(0), "phase")).isEqualTo("Deploy K8s Manifest"); + assertThat(getField(t1.getOutputs().get(0), "stdOut")).isEqualTo("output"); + assertThat(getField(t1.getOutputs().get(0), "stdError")).isEqualTo(""); + } + + @Test + public void testTaskOutputStatusWithNullValues() { + Task t1 = subject.create("Test", "Test Status"); + + t1.updateOutput("some-manifest", "Deploy K8s Manifest", null, ""); + + assertThat(t1.getOutputs()).hasSize(1); + assertThat(getField(t1.getOutputs().get(0), "manifest")).isEqualTo("some-manifest"); + assertThat(getField(t1.getOutputs().get(0), "phase")).isEqualTo("Deploy K8s Manifest"); + assertThat(getField(t1.getOutputs().get(0), "stdOut")).isNull(); + assertThat(getField(t1.getOutputs().get(0), "stdError")).isEqualTo(""); + } + + public class TestObject { + public String name; + public String value; + + @JsonCreator + public TestObject(String name, String value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public String getValue() { + return value; + } + } + + private Object getField(Object object, String fieldName) { + // TODO rz - Turns out the Redis & InMemory implementations behave totally different + // in how they handle result objects. For now, the TCK is going to support the two + // conflicting styles, but this needs to be fixed. Based on usage within tests, it + // seems we expect result objects to be the actual objects, but Redis deserializes + // as maps only. This is really more of a problem between JedisTask and DefaultTask. + if (object instanceof Map) { + return ((Map) object).get(fieldName); + } else { + try { + return object + .getClass() + .getDeclaredMethod("get" + WordUtils.capitalize(fieldName)) + .invoke(object); + } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/clouddriver-core/clouddriver-core.gradle b/clouddriver-core/clouddriver-core.gradle index b6b270c2809..28771fa1d3d 100644 --- a/clouddriver-core/clouddriver-core.gradle +++ b/clouddriver-core/clouddriver-core.gradle @@ -1,25 +1,66 @@ dependencies { - spinnaker.group('retrofitDefault') + api("io.spinnaker.kork:kork-api") + api("io.spinnaker.kork:kork-plugins") - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootDataRest') - compile spinnaker.dependency("commonsExec") - compile spinnaker.dependency('rxJava') - compile spinnaker.dependency('kork') - compile spinnaker.dependency('korkArtifacts') - compile spinnaker.dependency('korkWeb') - compile spinnaker.dependency('eurekaClient') - compile spinnaker.dependency("jedis") - compile spinnaker.dependency("frigga") - compile spinnaker.dependency("lombok") - compile spinnaker.dependency('logstashEncoder') + implementation project(":cats:cats-core") + implementation project(":cats:cats-redis") + implementation project(":clouddriver-api") + implementation project(":clouddriver-security") + implementation project(":clouddriver-saga") - compile project(':cats:cats-core') - compile project(':cats:cats-redis') - compile project(':cats:cats-dynomite') - compile project(':clouddriver-security') - testCompile project(':cats:cats-test') - testCompile spinnaker.dependency('korkJedisTest') + // Because a JobRequest constructor takes a org.apache.commons.exec.CommandLine argument + api "org.apache.commons:commons-exec" - compile 'com.netflix.spinnaker.moniker:moniker:0.2.0' + // This is because some classes in this module use the Groovy @Immutable annotation, + // which appears to require consumers to have core groovy on the classpath + api "org.apache.groovy:groovy" + + implementation "javax.inject:javax.inject:1" + implementation "javax.validation:validation-api" + implementation "net.logstash.logback:logstash-logback-encoder" + implementation "com.fasterxml.jackson.module:jackson-module-kotlin" + implementation "com.fasterxml.jackson.datatype:jackson-datatype-joda" + implementation "com.fasterxml.jackson.datatype:jackson-datatype-jsr310" + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-jedis" + implementation "io.spinnaker.kork:kork-retrofit" + implementation "io.spinnaker.kork:kork-web" + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-moniker" + implementation "io.spinnaker.kork:kork-secrets" + implementation "com.squareup.retrofit:converter-jackson" + implementation "com.squareup.retrofit:retrofit" + implementation "com.jakewharton.retrofit:retrofit1-okhttp3-client" + implementation "io.reactivex:rxjava" + implementation "net.jodah:failsafe:1.0.4" + implementation "org.apache.groovy:groovy" + implementation "org.apache.groovy:groovy-templates" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "redis.clients:jedis" + implementation "org.jooq:jooq" + + testImplementation project(":cats:cats-test") + testImplementation project(":clouddriver-core-tck") + + testImplementation "cglib:cglib-nodep" + testImplementation "uk.org.webcompere:system-stubs-core:2.1.5" + testImplementation "uk.org.webcompere:system-stubs-jupiter:2.1.5" + testImplementation "io.spinnaker.kork:kork-jedis-test" + testImplementation "io.spinnaker.kork:kork-test" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.junit.jupiter:junit-jupiter-params" + testImplementation "org.mockito:mockito-core" + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation "org.springframework.boot:spring-boot-starter-test" + testImplementation "com.google.cloud:google-cloud-secretmanager" + testImplementation "io.spinnaker.kork:kork-cloud-config-server" } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CacheConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CacheConfig.groovy deleted file mode 100644 index df2a866043f..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CacheConfig.groovy +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentExecution -import com.netflix.spinnaker.cats.agent.AgentScheduler -import com.netflix.spinnaker.cats.agent.DefaultAgentScheduler -import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.NamedCacheFactory -import com.netflix.spinnaker.cats.mem.InMemoryNamedCacheFactory -import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.cats.provider.Provider -import com.netflix.spinnaker.cats.provider.ProviderRegistry -import com.netflix.spinnaker.clouddriver.search.SearchProvider -import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.ComponentScan -import org.springframework.context.annotation.Configuration - -import java.util.concurrent.TimeUnit - -@Configuration -@ComponentScan([ - 'com.netflix.spinnaker.clouddriver.cache', -]) -@EnableConfigurationProperties(CatsInMemorySearchProperties) -class CacheConfig { - @Bean - @ConditionalOnMissingBean(NamedCacheFactory) - NamedCacheFactory namedCacheFactory() { - new InMemoryNamedCacheFactory() - } - - @Bean - @ConditionalOnMissingBean(AgentScheduler) - @ConditionalOnProperty(value = 'caching.writeEnabled', matchIfMissing = true) - AgentScheduler agentScheduler() { - new DefaultAgentScheduler(60, TimeUnit.SECONDS) - } - - @Bean - @ConditionalOnProperty(value = 'caching.writeEnabled', havingValue = 'false') - @ConditionalOnMissingBean(AgentScheduler) - AgentScheduler noopAgentScheduler() { - new AgentScheduler() { - @Override - void schedule(Agent agent, AgentExecution agentExecution, ExecutionInstrumentation executionInstrumentation) { - //do nothing - } - } - } - - @Bean - @ConditionalOnMissingBean(CatsModule) - CatsModule catsModule(List providers, List executionInstrumentation, NamedCacheFactory cacheFactory, AgentScheduler agentScheduler) { - new CatsModule.Builder().cacheFactory(cacheFactory).scheduler(agentScheduler).instrumentation(executionInstrumentation).build(providers) - } - - @Bean - Cache cacheView(CatsModule catsModule) { - catsModule.view - } - - @Bean - ProviderRegistry providerRegistry(CatsModule catsModule) { - catsModule.providerRegistry - } - - @Bean - ExecutionInstrumentation loggingInstrumentation() { - new LoggingInstrumentation() - } - - @Bean - ExecutionInstrumentation metricInstrumentation(Registry registry) { - new MetricInstrumentation(registry) - } - - @Bean - OnDemandCacheUpdater catsOnDemandCacheUpdater(List providers, CatsModule catsModule) { - new CatsOnDemandCacheUpdater(providers, catsModule) - } - - @Bean - SearchProvider catsSearchProvider(CatsInMemorySearchProperties catsInMemorySearchProperties, - Cache cacheView, - List providers, - ProviderRegistry providerRegistry, - Optional permissionEvaluator, - Optional> keyParsers) { - new CatsSearchProvider(catsInMemorySearchProperties, cacheView, providers, providerRegistry, permissionEvaluator, keyParsers) - } - - @Bean - @ConditionalOnMissingBean(SearchableProvider) - SearchableProvider noopSearchableProvider() { - new NoopSearchableProvider() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsOnDemandCacheUpdater.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsOnDemandCacheUpdater.groovy deleted file mode 100644 index 7a54851b0a2..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsOnDemandCacheUpdater.groovy +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentLock -import com.netflix.spinnaker.cats.agent.AgentScheduler -import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.cats.provider.Provider -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import java.util.concurrent.TimeUnit - -@Component -@Slf4j -class CatsOnDemandCacheUpdater implements OnDemandCacheUpdater { - - private final List providers - private final CatsModule catsModule - - @Autowired - AgentScheduler agentScheduler - - @Autowired - public CatsOnDemandCacheUpdater(List providers, CatsModule catsModule) { - this.providers = providers - this.catsModule = catsModule - } - - private Collection getOnDemandAgents() { - providers.collect { - it.agents.findAll { it instanceof OnDemandAgent } as Collection - }.flatten() - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - onDemandAgents.any { it.handles(type, cloudProvider) } - } - - @Override - OnDemandCacheResult handle(OnDemandAgent.OnDemandType type, String cloudProvider, Map data) { - Collection onDemandAgents = onDemandAgents.findAll { it.handles(type, cloudProvider) } - return handle(type, onDemandAgents, data) - } - - OnDemandCacheResult handle(OnDemandAgent.OnDemandType type, Collection onDemandAgents, Map data) { - log.debug("Calling handle on data: {}, onDemandAgents: {}, type: {}", data, onDemandAgents, type) - boolean hasOnDemandResults = false - Map> cachedIdentifiersByType = [:].withDefault { [] } - - for (OnDemandAgent agent : onDemandAgents) { - try { - AgentLock lock = null; - if (agentScheduler.atomic && !(lock = agentScheduler.tryLock((Agent) agent))) { - hasOnDemandResults = true // force Orca to retry - continue; - } - final long startTime = System.nanoTime() - def providerCache = catsModule.getProviderRegistry().getProviderCache(agent.providerName) - if (agent.metricsSupport) { - agent.metricsSupport.countOnDemand() - } - OnDemandAgent.OnDemandResult result = agent.handle(providerCache, data) - if (result) { - if (agentScheduler.atomic && !(agentScheduler.lockValid(lock))) { - hasOnDemandResults = true // force Orca to retry - continue; - } - if (!agent.metricsSupport) { - continue; - } - if (result.cacheResult) { - boolean agentHasOnDemandResults = !(result.cacheResult.cacheResults ?: [:]).values().flatten().isEmpty() && !agentScheduler.atomic - if (agentHasOnDemandResults) { - hasOnDemandResults = true; - result.cacheResult.cacheResults.each { k, v -> - if (v) { - cachedIdentifiersByType[k].addAll(v*.id) - } - } - } - agent.metricsSupport.cacheWrite { - providerCache.putCacheResult(result.sourceAgentType, result.authoritativeTypes, result.cacheResult) - } - } - if (result.evictions) { - agent.metricsSupport.cacheEvict { - result.evictions.each { String evictType, Collection ids -> - providerCache.evictDeletedItems(evictType, ids) - } - } - } - if (agentScheduler.atomic && !(agentScheduler.tryRelease(lock))) { - throw new IllegalStateException("We likely just wrote stale data. If you're seeing this, file a github issue: https://github.com/spinnaker/spinnaker/issues") - } - final long elapsed = System.nanoTime() - startTime - agent.metricsSupport.recordTotalRunTimeNanos(elapsed) - log.info("$agent.providerName/$agent?.onDemandAgentType handled $type in ${TimeUnit.NANOSECONDS.toMillis(elapsed)} millis. Payload: $data") - } - } catch (e) { - if (agent.metricsSupport != null) { - agent.metricsSupport.countError() - } - log.warn("$agent.providerName/$agent.onDemandAgentType failed to handle on demand update for $type", e) - } - } - - if (hasOnDemandResults) { - return new OnDemandCacheResult( - status: OnDemandCacheStatus.PENDING, - cachedIdentifiersByType: cachedIdentifiersByType - ) - } - - return new OnDemandCacheResult( - status: OnDemandCacheStatus.SUCCESSFUL - ) - } - - @Override - Collection pendingOnDemandRequests(OnDemandAgent.OnDemandType type, String cloudProvider) { - if (agentScheduler.atomic) { - return [] - } - - Collection onDemandAgents = onDemandAgents.findAll { it.handles(type, cloudProvider) } - return onDemandAgents.collect { - def providerCache = catsModule.getProviderRegistry().getProviderCache(it.providerName) - it.pendingOnDemandRequests(providerCache) - }.flatten() - } - - @Override - Map pendingOnDemandRequest(OnDemandAgent.OnDemandType type, String cloudProvider, String id) { - if (agentScheduler.atomic) { - return null - } - - Collection onDemandAgents = onDemandAgents.findAll { it.handles(type, cloudProvider) } - return onDemandAgents.findResults { - def providerCache = catsModule.getProviderRegistry().getProviderCache(it.providerName) - it.pendingOnDemandRequest(providerCache, id) - }?.getAt(0) - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsSearchProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsSearchProvider.groovy index 8c7d774841c..3045df0c905 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsSearchProvider.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsSearchProvider.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.cache +import com.google.common.util.concurrent.ThreadFactoryBuilder import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.provider.ProviderRegistry import com.netflix.spinnaker.clouddriver.search.SearchProvider @@ -51,7 +52,7 @@ class CatsSearchProvider implements SearchProvider, Runnable { - private final AtomicReference>>> cachedIdentifiersByType = new AtomicReference( + private final AtomicReference>> cachedIdentifiersByType = new AtomicReference( [:] ) @@ -87,7 +88,12 @@ class CatsSearchProvider implements SearchProvider, Runnable { } if (catsInMemorySearchProperties.enabled) { - scheduledExecutorService = Executors.newScheduledThreadPool(1) + scheduledExecutorService = + Executors.newScheduledThreadPool( + 1, + new ThreadFactoryBuilder() + .setNameFormat(CatsSearchProvider.class.getSimpleName() + "-%d") + .build()); } } @@ -120,14 +126,9 @@ class CatsSearchProvider implements SearchProvider, Runnable { }.collect { provider -> def cache = providerRegistry.getProviderCache(provider.getProviderName()) return cache.getIdentifiers("instances").findResults { key -> - def v = provider.parseKey(key) - if (v) { - v["_id"] = key - } - - return v?.collectEntries { - [it.key, it.value.toLowerCase()] - } + // Even though we don't need the parsed Map, we should still allow the provider to reject invalid keys + if (provider.parseKey(key)) + return key?.toLowerCase() } }.flatten() @@ -231,7 +232,7 @@ class CatsSearchProvider implements SearchProvider, Runnable { private List findMatches(String q, List cachesToQuery, Map filters) { - if (!q && keyParsers) { + if (!q && keyParsers && filters) { // no keyword search so find sensible default value to set for searching Set filterKeys = filters.keySet() keyParsers.find { @@ -242,7 +243,17 @@ class CatsSearchProvider implements SearchProvider, Runnable { return true } } - log.info("no query string specified, looked for sensible default and found: ${q}") + + if (q) { + log.info( + "no query string specified, looked for sensible default and found: {} (cachesToQuery: {})", + q, + cachesToQuery + ) + } else { + log.info("no query string specified and no sensible default found (cachesToQuery: {})", cachesToQuery) + return [] + } } log.info("Querying ${cachesToQuery} for term: ${q}") @@ -254,19 +265,18 @@ class CatsSearchProvider implements SearchProvider, Runnable { return true } - if (keyParsers) { - KeyParser parser = keyParsers.find { it.cloudProvider == filters.cloudProvider && it.canParseType(cache) } - if (parser) { - Map parsed = parser.parseKey(key) - return filters.entrySet().every { filter -> - String[] vals = filter.value.split(',') - filter.key == 'cloudProvider' || parsed && - ((parsed.containsKey(filter.key) && vals.contains(parsed[filter.key])) || + KeyParser parser = keyParsers?.find { it.cloudProvider == filters.cloudProvider && it.canParseType(cache) } + if (parser) { + Map parsed = parser.parseKey(key) + return filters.entrySet().every { filter -> + String[] vals = filter.value.split(',') + filter.key == 'cloudProvider' || parsed && + ((parsed.containsKey(filter.key) && vals.contains(parsed[filter.key])) || (parsed.containsKey(parser.getNameMapping(cache)) && vals.contains(parsed[parser.getNameMapping(cache)]))) - } - } else { - log.warn("No parser found for $cache:$key") } + } else { + log.debug("No parser found for $cache:$key") + return true } } catch (Exception e) { log.warn("Failed on $cache:$key", e) @@ -283,9 +293,7 @@ class CatsSearchProvider implements SearchProvider, Runnable { * should be sufficient. */ def identifiersForCache = cached.get(cache) - identifiers = identifiersForCache.findAll { identifier -> - identifier.values().contains(normalizedWord) - }.collect { it["_id"] } + identifiers = identifiersForCache.findAll { it.contains(normalizedWord) } } else { List validProviders = providers.findAll { it.supportsSearch(cache, filters) } identifiers = new HashSet<>() diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CustomSchedulableAgentIntervalProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CustomSchedulableAgentIntervalProvider.groovy deleted file mode 100644 index 6d14c0598bf..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CustomSchedulableAgentIntervalProvider.groovy +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.redis.cluster.AgentIntervalProvider -import com.netflix.spinnaker.cats.redis.cluster.DefaultAgentIntervalProvider -import groovy.transform.CompileStatic - -@CompileStatic -class CustomSchedulableAgentIntervalProvider extends DefaultAgentIntervalProvider { - - CustomSchedulableAgentIntervalProvider(long interval, long timeout) { - super(interval, timeout) - } - - CustomSchedulableAgentIntervalProvider(long interval, long errorInterval, long timeout) { - super(interval, errorInterval, timeout) - } - - @Override - AgentIntervalProvider.Interval getInterval(Agent agent) { - if (agent instanceof CustomScheduledAgent) { - return getCustomInterval(agent) - } - return super.getInterval(agent) - } - - AgentIntervalProvider.Interval getCustomInterval(CustomScheduledAgent agent) { - final long pollInterval = agent.pollIntervalMillis == -1 ? super.interval : agent.pollIntervalMillis - final long errorInterval = agent.errorIntervalMillis == -1 ? super.errorInterval : agent.errorIntervalMillis - final long timeoutMillis = agent.timeoutMillis == -1 ? super.timeout : agent.timeoutMillis - return new AgentIntervalProvider.Interval(pollInterval, errorInterval, timeoutMillis) - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CustomScheduledAgent.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CustomScheduledAgent.java deleted file mode 100644 index 793614917e1..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CustomScheduledAgent.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache; - -import com.netflix.spinnaker.cats.agent.Agent; - -/** - * Allows an Agent to customize it's poll interval. - */ -public interface CustomScheduledAgent extends Agent { - /** - * @return the interval in milliseconds, or -1 to use the system default poll interval - */ - long getPollIntervalMillis(); - - /** - * @return the timeout in milliseconds, or -1 to use the system default timeout - */ - long getTimeoutMillis(); - - /** - * @return the error interval in milliseconds, or -1 to use the system default error interval - */ - default long getErrorIntervalMillis() { - return getPollIntervalMillis(); - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/DynomiteCacheConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/DynomiteCacheConfig.groovy deleted file mode 100644 index 66284da6a61..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/DynomiteCacheConfig.groovy +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.cache - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.discovery.DiscoveryClient -import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration -import com.netflix.dyno.connectionpool.Host -import com.netflix.dyno.connectionpool.HostSupplier -import com.netflix.dyno.connectionpool.TokenMapSupplier -import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl -import com.netflix.dyno.connectionpool.impl.lb.HostToken -import com.netflix.dyno.jedis.DynoJedisClient -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentScheduler -import com.netflix.spinnaker.cats.cache.NamedCacheFactory -import com.netflix.spinnaker.cats.compression.CompressionStrategy -import com.netflix.spinnaker.cats.compression.GZipCompression -import com.netflix.spinnaker.cats.compression.NoopCompression -import com.netflix.spinnaker.cats.dynomite.cache.DynomiteCache.CacheMetrics -import com.netflix.spinnaker.cats.dynomite.cache.DynomiteNamedCacheFactory -import com.netflix.spinnaker.cats.dynomite.cluster.DynoClusteredAgentScheduler -import com.netflix.spinnaker.cats.dynomite.cluster.DynoClusteredSortAgentScheduler -import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions -import com.netflix.spinnaker.cats.redis.cluster.AgentIntervalProvider -import com.netflix.spinnaker.cats.redis.cluster.DefaultNodeIdentity -import com.netflix.spinnaker.cats.redis.cluster.DefaultNodeStatusProvider -import com.netflix.spinnaker.cats.redis.cluster.NodeStatusProvider -import com.netflix.spinnaker.clouddriver.core.RedisConfigurationProperties -import com.netflix.spinnaker.kork.dynomite.DynomiteClientDelegate -import com.netflix.spinnaker.kork.jedis.RedisClientDelegate -import org.springframework.beans.factory.annotation.Value -import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.ConfigurationProperties -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration - -import java.time.Clock -import java.util.concurrent.TimeUnit - -@Configuration -@ConditionalOnExpression('${dynomite.enabled:false}') -@EnableConfigurationProperties([DynomiteConfigurationProperties, RedisConfigurationProperties, GZipCompressionStrategyProperties]) -class DynomiteCacheConfig { - - @Bean - CacheMetrics cacheMetrics(Registry registry) { - new SpectatorDynomiteCacheMetrics(registry) - } - - @Bean - @ConfigurationProperties("dynomite.connectionPool") - ConnectionPoolConfigurationImpl connectionPoolConfiguration(DynomiteConfigurationProperties dynomiteConfigurationProperties) { - new ConnectionPoolConfigurationImpl(dynomiteConfigurationProperties.applicationName).withHashtag("{}") - } - - @Bean - CompressionStrategy compressionStrategy(ConnectionPoolConfigurationImpl connectionPoolConfiguration, - GZipCompressionStrategyProperties properties) { - if (!properties.enabled) { - return new NoopCompression() - } - return new GZipCompression( - properties.thresholdBytesSize, - properties.compressEnabled && connectionPoolConfiguration.compressionStrategy != ConnectionPoolConfiguration.CompressionStrategy.THRESHOLD - ) - } - - @Bean(destroyMethod = "stopClient") - DynoJedisClient dynoJedisClient(DynomiteConfigurationProperties dynomiteConfigurationProperties, ConnectionPoolConfigurationImpl connectionPoolConfiguration, Optional discoveryClient) { - def builder = new DynoJedisClient.Builder() - .withApplicationName(dynomiteConfigurationProperties.applicationName) - .withDynomiteClusterName(dynomiteConfigurationProperties.clusterName) - - discoveryClient.map({ dc -> - builder.withDiscoveryClient(dc) - .withCPConfig(connectionPoolConfiguration) - }).orElseGet({ - connectionPoolConfiguration - .withTokenSupplier(new StaticTokenMapSupplier(dynomiteConfigurationProperties.dynoHostTokens)) - .setLocalDataCenter(dynomiteConfigurationProperties.localDataCenter) - .setLocalRack(dynomiteConfigurationProperties.localRack) - - builder - .withHostSupplier(new StaticHostSupplier(dynomiteConfigurationProperties.dynoHosts)) - .withCPConfig(connectionPoolConfiguration) - }).build() - } - - @Bean - DynomiteClientDelegate dynomiteClientDelegate(DynoJedisClient dynoJedisClient) { - new DynomiteClientDelegate(dynoJedisClient) - } - - @Bean - @ConfigurationProperties("caching.redis") - RedisCacheOptions.Builder redisCacheOptionsBuilder() { - RedisCacheOptions.builder() - } - - @Bean - RedisCacheOptions redisCacheOptions(RedisCacheOptions.Builder redisCacheOptionsBuilder) { - redisCacheOptionsBuilder.build() - } - - @Bean - NamedCacheFactory cacheFactory( - @Value('${dynomite.keyspace:#{null}}') String keyspace, - DynomiteClientDelegate dynomiteClientDelegate, - ObjectMapper objectMapper, - RedisCacheOptions redisCacheOptions, - CacheMetrics cacheMetrics, - CompressionStrategy compressionStrategy) { - new DynomiteNamedCacheFactory(Optional.ofNullable(keyspace), dynomiteClientDelegate, objectMapper, redisCacheOptions, cacheMetrics, compressionStrategy) - } - - @Bean - @ConditionalOnMissingBean(NodeStatusProvider.class) - DefaultNodeStatusProvider nodeStatusProvider() { - new DefaultNodeStatusProvider() - } - - @Bean - AgentIntervalProvider agentIntervalProvider(RedisConfigurationProperties redisConfigurationProperties) { - new CustomSchedulableAgentIntervalProvider( - TimeUnit.SECONDS.toMillis(redisConfigurationProperties.poll.intervalSeconds), - TimeUnit.SECONDS.toMillis(redisConfigurationProperties.poll.errorIntervalSeconds), - TimeUnit.SECONDS.toMillis(redisConfigurationProperties.poll.timeoutSeconds) - ); - } - - @Bean - @ConditionalOnProperty(value = "caching.writeEnabled", matchIfMissing = true) - AgentScheduler agentScheduler(Clock clock, - RedisConfigurationProperties redisConfigurationProperties, - RedisClientDelegate redisClientDelegate, - AgentIntervalProvider agentIntervalProvider, - NodeStatusProvider nodeStatusProvider) { - if (redisConfigurationProperties.scheduler.equalsIgnoreCase("default")) { - new DynoClusteredAgentScheduler( - (DynomiteClientDelegate) redisClientDelegate, - new DefaultNodeIdentity(), - agentIntervalProvider, - nodeStatusProvider - ); - } else if (redisConfigurationProperties.scheduler.equalsIgnoreCase("sort")) { - new DynoClusteredSortAgentScheduler(clock, redisClientDelegate, nodeStatusProvider, agentIntervalProvider, redisConfigurationProperties.parallelism ?: -1); - } else { - throw new IllegalStateException("redis.scheduler must be one of 'default', 'sort', or ''."); - } - } - - static class StaticHostSupplier implements HostSupplier { - - private final List hosts - - StaticHostSupplier(List hosts) { - this.hosts = hosts - } - - @Override - List getHosts() { - return hosts - } - } - - static class StaticTokenMapSupplier implements TokenMapSupplier { - - List hostTokens = new ArrayList<>() - - StaticTokenMapSupplier(List hostTokens) { - this.hostTokens = hostTokens - } - - @Override - List getTokens(Set activeHosts) { - return hostTokens - } - - @Override - HostToken getTokenForHost(Host host, Set activeHosts) { - return hostTokens.find { it.host == host } - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/DynomiteConfigurationProperties.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/DynomiteConfigurationProperties.groovy deleted file mode 100644 index cedb7f3b3c9..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/DynomiteConfigurationProperties.groovy +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.dyno.connectionpool.Host -import com.netflix.dyno.connectionpool.Host.Status -import com.netflix.dyno.connectionpool.impl.lb.HostToken -import org.springframework.boot.context.properties.ConfigurationProperties - -import javax.validation.constraints.NotNull - -/** - * Static host setup defines defaults for running Dynomite locally. Discovery should be used for any - * non-development environment. - */ -@ConfigurationProperties("dynomite") -class DynomiteConfigurationProperties { - - Boolean enabled = false - - String applicationName = "clouddriver" - String clusterName = "dyno_clouddriver" - - String localRack = "localrack" - String localDataCenter = "localrac" - - List hosts = [] - - static class DynoHost { - @NotNull - String hostname - - String ipAddress - - int port = Host.DEFAULT_PORT - - Status status = Status.Up - - @NotNull - String rack = 'localrack' - - @NotNull - String datacenter = 'localrac' - - Long token = 1000000L - - String hashtag - } - - List getDynoHosts() { - return hosts.collect { new Host(it.hostname, it.ipAddress, it.port, it.rack, it.datacenter, it.status, it.hashtag) } - } - - List getDynoHostTokens() { - List tokens = [] - getDynoHosts().eachWithIndex { v, i -> - tokens.add(new HostToken(hosts.get(i).token, v)) - } - return tokens - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/EurekaStatusNodeStatusProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/EurekaStatusNodeStatusProvider.groovy deleted file mode 100644 index 47d356eafef..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/EurekaStatusNodeStatusProvider.groovy +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.appinfo.InstanceInfo -import com.netflix.discovery.EurekaClient -import com.netflix.spinnaker.cats.redis.cluster.NodeStatusProvider - -class EurekaStatusNodeStatusProvider implements NodeStatusProvider { - private final EurekaClient eurekaClient - - EurekaStatusNodeStatusProvider(EurekaClient eurekaClient) { - this.eurekaClient = eurekaClient - } - - @Override - boolean isNodeEnabled() { - eurekaClient.instanceRemoteStatus == InstanceInfo.InstanceStatus.UP - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/JedisCacheConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/JedisCacheConfig.groovy deleted file mode 100644 index 0bdaf7431e9..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/JedisCacheConfig.groovy +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.cache - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.AgentScheduler -import com.netflix.spinnaker.cats.cache.NamedCacheFactory -import com.netflix.spinnaker.cats.redis.cache.RedisCache.CacheMetrics -import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions -import com.netflix.spinnaker.cats.redis.cache.RedisNamedCacheFactory -import com.netflix.spinnaker.cats.redis.cluster.AgentIntervalProvider -import com.netflix.spinnaker.cats.redis.cluster.ClusteredAgentScheduler -import com.netflix.spinnaker.cats.redis.cluster.ClusteredSortAgentScheduler -import com.netflix.spinnaker.cats.redis.cluster.DefaultNodeIdentity -import com.netflix.spinnaker.cats.redis.cluster.NodeStatusProvider -import com.netflix.spinnaker.clouddriver.core.RedisConfigurationProperties -import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService -import com.netflix.spinnaker.kork.jedis.JedisClientDelegate -import com.netflix.spinnaker.kork.jedis.RedisClientDelegate -import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import redis.clients.jedis.JedisPool - -@Configuration -@ConditionalOnExpression('${redis.enabled:true}') -@EnableConfigurationProperties(RedisConfigurationProperties) -class JedisCacheConfig { - - @Bean - RedisClientDelegate redisClientDelegate(JedisPool jedisPool) { - new JedisClientDelegate(jedisPool) - } - - @Bean - NamedCacheFactory cacheFactory( - RedisClientDelegate redisClientDelegate, - ObjectMapper objectMapper, - RedisCacheOptions redisCacheOptions, - CacheMetrics cacheMetrics) { - new RedisNamedCacheFactory(redisClientDelegate, objectMapper, redisCacheOptions, cacheMetrics) - } - - @Bean - @ConditionalOnProperty(value = "caching.writeEnabled", matchIfMissing = true) - AgentScheduler agentScheduler(RedisConfigurationProperties redisConfigurationProperties, - RedisClientDelegate redisClientDelegate, - JedisPool jedisPool, - AgentIntervalProvider agentIntervalProvider, - NodeStatusProvider nodeStatusProvider, - DynamicConfigService dynamicConfigService) { - if (redisConfigurationProperties.scheduler.equalsIgnoreCase("default")) { - URI redisUri = URI.create(redisConfigurationProperties.connection) - String redisHost = redisUri.host - int redisPort = redisUri.port - if (redisPort == -1) { - redisPort = 6379 - } - new ClusteredAgentScheduler( - redisClientDelegate, - new DefaultNodeIdentity(redisHost, redisPort), - agentIntervalProvider, - nodeStatusProvider, - redisConfigurationProperties.agent.enabledPattern, - redisConfigurationProperties.agent.agentLockAcquisitionIntervalSeconds, - dynamicConfigService) - } else if (redisConfigurationProperties.scheduler.equalsIgnoreCase("sort")) { - new ClusteredSortAgentScheduler(jedisPool, nodeStatusProvider, agentIntervalProvider, redisConfigurationProperties.parallelism ?: -1); - } else { - throw new IllegalStateException("redis.scheduler must be one of 'default', 'sort', or ''."); - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/KeyParser.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/KeyParser.java deleted file mode 100644 index f121bc50362..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/KeyParser.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache; - -import java.util.Map; - -public interface KeyParser { - - /** - * Returns the parsed property name for the specified cache that represents the "name" of the item being - * parsed. - * - * For example, - * - * Given the AWS key serverGroups:mycluster-stack-detail:some-account:some-region:myservergroup-stack-detail-v000, - * we might store the server group name (the last part of the key) under a different property than name, - * e.g., serverGroup, in which case the mapping of Namespace.SERVER_GROUPS.ns to "serverGroup" - * would be needed. - * - * @param cache the name of the cache (key type) being parsed - * - * @return the mapping of the key name to the actual key property name for the specified cache or - * null if no mapping exists or is required (e.g., if the parsed key already contains a name - * property and it maps correctly). - */ - default String getNameMapping(String cache) { - return null; - } - - /** - * Indicates which provider this particular parser handles - * @return the cloud provider ID - */ - String getCloudProvider(); - - /** - * Parses the supplied key to an arbitrary Map of attributes - * @param key the full key - * @return a Map of the key attributes - */ - Map parseKey(String key); - - /** - * indicates whether this parser can parse the supplied type - * @param type the entity type, typically corresponding to a value in the implementing class's Namespace - * @return true if it can parse this type, false otherwise - */ - Boolean canParseType(String type); - - /** - * indicates whether this parser can parse the supplied field - * @param field the entity type field, typically corresponding to a value in the implementing class's - * parsed Namespace field - * @return true if it can parse this field, false otherwise - */ - Boolean canParseField(String field); -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/LoggingInstrumentation.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/LoggingInstrumentation.groovy deleted file mode 100644 index 037e8768203..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/LoggingInstrumentation.groovy +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation -import org.slf4j.Logger -import org.slf4j.LoggerFactory -import org.springframework.stereotype.Component - -@Component -class LoggingInstrumentation implements ExecutionInstrumentation { - private final Logger logger = LoggerFactory.getLogger(LoggingInstrumentation) - - @Override - void executionStarted(Agent agent) { - logger.debug("${agent.providerName}:${agent.agentType} starting") - } - - @Override - void executionCompleted(Agent agent, long durationMs) { - logger.info("${agent.providerName}:${agent.agentType} completed in ${durationMs / 1000}s") - } - - @Override - void executionFailed(Agent agent, Throwable cause) { - logger.warn("${agent.providerName}:${agent.agentType} completed with one or more failures", cause) - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentation.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentation.groovy deleted file mode 100644 index a46ebadfe62..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentation.groovy +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.spectator.api.Id -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation -import org.slf4j.Logger -import org.slf4j.LoggerFactory -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import java.util.concurrent.ConcurrentHashMap -import java.util.concurrent.ConcurrentMap -import java.util.concurrent.TimeUnit - -@Component -class MetricInstrumentation implements ExecutionInstrumentation { - private final Registry registry - - private final Id timingId - private final Id counterId - - @Autowired - public MetricInstrumentation(Registry registry) { - this.registry = registry - timingId = registry.createId('executionTime').withTag('className', MetricInstrumentation.simpleName) - counterId = registry.createId('executionCount').withTag('className', MetricInstrumentation.simpleName) - } - - private static String agentName(Agent agent) { - "$agent.providerName/$agent.agentType" - } - - @Override - void executionStarted(Agent agent) { - // do nothing - } - - @Override - void executionCompleted(Agent agent, long elapsedMs) { - registry.timer(timingId.withTag('agent', agentName(agent))).record(elapsedMs, TimeUnit.MILLISECONDS) - registry.counter(counterId.withTag('agent', agentName(agent)).withTag('status', 'success')).increment() - } - - @Override - void executionFailed(Agent agent, Throwable cause) { - registry.counter(counterId.withTag('agent', agentName(agent)).withTag('status', 'failure')).increment() - } -} - diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/NoopOnDemandCacheUpdater.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/NoopOnDemandCacheUpdater.groovy deleted file mode 100644 index 57167346bde..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/NoopOnDemandCacheUpdater.groovy +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import org.springframework.stereotype.Component - -/** - * A default, no-op implementation of an {@link OnDemandCacheUpdater} - */ -@Component -class NoopOnDemandCacheUpdater implements OnDemandCacheUpdater { - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - false - } - - @Override - OnDemandCacheResult handle(OnDemandAgent.OnDemandType type, String cloudProvider, Map data) { - return new OnDemandCacheResult( - status: OnDemandCacheStatus.SUCCESSFUL - ) - } - - @Override - Collection pendingOnDemandRequests(OnDemandAgent.OnDemandType type, String cloudProvider) { - return [] - } - - @Override - Map pendingOnDemandRequest(OnDemandAgent.OnDemandType type, String cloudProvider, String id) { - return null - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandAgent.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandAgent.java deleted file mode 100644 index dd0b5664935..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandAgent.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache; - -import com.netflix.spinnaker.cats.agent.CacheResult; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.moniker.Moniker; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -public interface OnDemandAgent { - Logger logger = LoggerFactory.getLogger(OnDemandAgent.class); - - String getProviderName(); - - String getOnDemandAgentType(); - - // TODO(ttomsu): This seems like it should go in a different interface. - OnDemandMetricsSupport getMetricsSupport(); - - enum OnDemandType { - ServerGroup, - SecurityGroup, - LoadBalancer, - Job, - TargetGroup, - Manifest; - - static OnDemandType fromString(String s) { - return Arrays.stream(values()) - .filter(v -> v.toString().equalsIgnoreCase(s)) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("Cannot create OnDemandType from '" + s + "'")); - } - } - - boolean handles(OnDemandType type, String cloudProvider); - - static class OnDemandResult { - String sourceAgentType; - Collection authoritativeTypes = new ArrayList<>(); - CacheResult cacheResult; - Map> evictions = new HashMap<>(); - - public OnDemandResult() {} - - public OnDemandResult(String sourceAgentType, CacheResult cacheResult, Map> evictions) { - this.sourceAgentType = sourceAgentType; - this.cacheResult = cacheResult; - this.evictions = evictions; - } - } - - /* - * WARNING: this is an interim solution while cloud providers write their own ways to derive monikers. - */ - default Moniker convertOnDemandDetails(Map details) { - if (details == null || details.isEmpty()) { - return null; - } - - try { - String sequence = details.get("sequence"); - - return Moniker.builder() - .app(details.get("application")) - .stack(details.get("stack")) - .detail(details.get("detail")) - .cluster(details.get("cluster")) - .sequence(sequence != null ? Integer.valueOf(sequence) : null) - .build(); - } catch (Exception e) { - logger.warn("Unable to build moniker (details: {})", e); - return null; - } - } - - OnDemandResult handle(ProviderCache providerCache, Map data); - Collection pendingOnDemandRequests(ProviderCache providerCache); - - default Map pendingOnDemandRequest(ProviderCache providerCache, String id) { - Collection pendingOnDemandRequests = pendingOnDemandRequests(providerCache); - return pendingOnDemandRequests.stream().filter(m -> id.equals(m.get("id"))).findFirst().orElse(null); - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheUpdater.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheUpdater.groovy deleted file mode 100644 index f7da7f65291..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheUpdater.groovy +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -/** - * An on-demand cache updater. Allows some non-scheduled trigger to initiate a cache refresh for a given type. An on-demand cache request will fan-out to all available updaters. - * - * - */ -interface OnDemandCacheUpdater { - - enum OnDemandCacheStatus { - SUCCESSFUL, - PENDING - } - - /** - * Indicates if the updater is able to handle this on-demand request given the type and cloudProvider - * @param type - * @param cloudProvider - * @return - */ - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) - - /** - * Handles the update request - * @param type - * @param cloudProvider - * @param data - */ - OnDemandCacheResult handle(OnDemandAgent.OnDemandType type, String cloudProvider, Map data) - - Collection pendingOnDemandRequests(OnDemandAgent.OnDemandType type, String cloudProvider) - - Map pendingOnDemandRequest(OnDemandAgent.OnDemandType type, String cloudProvider, String id) - - static class OnDemandCacheResult { - OnDemandCacheStatus status - Map> cachedIdentifiersByType = [:] - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupport.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupport.groovy deleted file mode 100644 index 44326a7c14b..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupport.groovy +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.spectator.api.Counter -import com.netflix.spectator.api.Registry -import com.netflix.spectator.api.Timer - -import java.util.concurrent.TimeUnit -import java.util.function.Supplier - -class OnDemandMetricsSupport { - public static final String ON_DEMAND_TOTAL_TIME = "onDemand_total" - public static final String DATA_READ = "onDemand_read" - public static final String DATA_TRANSFORM = "onDemand_transform" - public static final String ON_DEMAND_STORE = "onDemand_store" - public static final String CACHE_WRITE = "onDemand_cache" - public static final String CACHE_EVICT = "onDemand_evict" - public static final String ON_DEMAND_ERROR = "onDemand_error" - public static final String ON_DEMAND_COUNT = "onDemand_count" - - private final Timer onDemandTotal - private final Timer dataRead - private final Timer dataTransform - private final Timer onDemandStore - private final Timer cacheWrite - private final Timer cacheEvict - private final Counter onDemandErrors - private final Counter onDemandCount - - public OnDemandMetricsSupport(Registry registry, OnDemandAgent agent, String onDemandType) { - final String[] tags = ["providerName", agent.providerName, "agentType", agent.onDemandAgentType, "onDemandType", onDemandType] - this.onDemandTotal = registry.timer(ON_DEMAND_TOTAL_TIME, tags) - this.dataRead = registry.timer(DATA_READ, tags) - this.dataTransform = registry.timer(DATA_TRANSFORM, tags) - this.onDemandStore = registry.timer(ON_DEMAND_STORE, tags) - this.cacheWrite = registry.timer(CACHE_WRITE, tags) - this.cacheEvict = registry.timer(CACHE_EVICT, tags) - this.onDemandErrors = registry.counter(ON_DEMAND_ERROR, tags) - this.onDemandCount = registry.counter(ON_DEMAND_COUNT, tags) - } - - private T record(Timer timer, Supplier closure) { - final long start = System.nanoTime() - try { - return closure.get() - } finally { - final long elapsed = System.nanoTime() - start - timer.record(elapsed, TimeUnit.NANOSECONDS) - } - } - - public T readData(Supplier closure) { - record(dataRead, closure) - } - - public T transformData(Supplier closure) { - record(dataTransform, closure) - } - - public T onDemandStore(Supplier closure) { - record(onDemandStore, closure) - } - - public T cacheWrite(Supplier closure) { - record(cacheWrite, closure) - } - - public T cacheEvict(Supplier closure) { - record(cacheEvict, closure) - } - - public void countError() { - onDemandErrors.increment() - } - - public void countOnDemand() { - onDemandCount.increment() - } - - public void recordTotalRunTimeNanos(long nanos) { - onDemandTotal.record(nanos, TimeUnit.NANOSECONDS) - } - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/RedisCacheConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/RedisCacheConfig.groovy deleted file mode 100644 index f38308a005e..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/RedisCacheConfig.groovy +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache - -import com.netflix.discovery.EurekaClient -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.redis.cache.RedisCache.CacheMetrics -import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions -import com.netflix.spinnaker.cats.redis.cluster.AgentIntervalProvider -import com.netflix.spinnaker.cats.redis.cluster.DefaultNodeStatusProvider -import com.netflix.spinnaker.cats.redis.cluster.NodeStatusProvider -import com.netflix.spinnaker.clouddriver.core.RedisConfigurationProperties -import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression -import org.springframework.boot.context.properties.ConfigurationProperties -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration - -import java.util.concurrent.TimeUnit - -@Configuration -@ConditionalOnExpression('${redis.enabled:true}') -@EnableConfigurationProperties(RedisConfigurationProperties) -class RedisCacheConfig { - - @Bean - @ConfigurationProperties("caching.redis") - RedisCacheOptions.Builder redisCacheOptionsBuilder() { - return RedisCacheOptions.builder() - } - - @Bean - RedisCacheOptions redisCacheOptions(RedisCacheOptions.Builder redisCacheOptionsBuilder) { - return redisCacheOptionsBuilder.build() - } - - @Bean - CacheMetrics cacheMetrics(Registry registry) { - new SpectatorRedisCacheMetrics(registry) - } - - @Bean - AgentIntervalProvider agentIntervalProvider(RedisConfigurationProperties redisConfigurationProperties) { - new CustomSchedulableAgentIntervalProvider( - TimeUnit.SECONDS.toMillis(redisConfigurationProperties.poll.intervalSeconds), - TimeUnit.SECONDS.toMillis(redisConfigurationProperties.poll.errorIntervalSeconds), - TimeUnit.SECONDS.toMillis(redisConfigurationProperties.poll.timeoutSeconds) - ) - } - - @Bean - NodeStatusProvider nodeStatusProvider(Optional eurekaClient) { - return eurekaClient.map({ new EurekaStatusNodeStatusProvider(it) }).orElseGet({ new DefaultNodeStatusProvider() }) - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/SearchableProvider.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/SearchableProvider.java deleted file mode 100644 index a18071c3265..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/SearchableProvider.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.cache; - -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.agent.CachingAgent; -import com.netflix.spinnaker.cats.cache.Cache; -import com.netflix.spinnaker.cats.provider.Provider; -import groovy.transform.Canonical; -import lombok.AllArgsConstructor; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.Collection; -import java.util.Map; -import java.util.Optional; -import java.util.Set; - -public interface SearchableProvider extends Provider { - - /** - * Names of caches to search by default - */ - Set getDefaultCaches(); - - /** - * Map keyed by named cache to a template that produces a url for a search result. - * - * The template will be supplied the result from calling parseKey on the search key - */ - Map getUrlMappingTemplates(); - - /** - * SearchResultHydrators for cache types - */ - Map getSearchResultHydrators(); - - /** - * The parts of the key, if this Provider supports keys of this type, otherwise null. - */ - Map parseKey(String key); - - default Optional getKeyParser() { - return Optional.empty(); - } - - /** - * Build a search term for querying. - * - * If this SearchableProvider supplies a KeyParser then the - * search term is scoped to that KeyParsers cloudProvider, - * otherwise injects a wildcard glob at the start. - * - * Supplying a KeyParser to provide a CloudProviderId to scope - * the search more narrowly results in improved search performance. - */ - default String buildSearchTerm(String type, String queryTerm) { - String prefix = getKeyParser().map(KeyParser::getCloudProvider).orElse("*"); - return prefix + ":" + type + ":*" + queryTerm + "*"; - } - - default boolean supportsSearch(String type, Map filters) { - final boolean filterMatch; - if (filters == null || !filters.containsKey("cloudProvider")) { - filterMatch = true; - } else { - filterMatch = getKeyParser() - .map(kp -> kp.canParseType(type) && kp.getCloudProvider().equals(filters.get("cloudProvider"))) - .orElse(true); - } - - return filterMatch && hasAgentForType(type, getAgents()); - } - - static boolean hasAgentForType(String type, Collection agents) { - return agents - .stream() - .filter(CachingAgent.class::isInstance) - .map(CachingAgent.class::cast) - .anyMatch(ca -> - ca.getProvidedDataTypes() - .stream() - .anyMatch(pdt -> pdt.getTypeName().equals(type)) - ); - } - - /** - * A SearchResultHydrator provides a custom strategy for enhancing result data for a particular cache type. - */ - public static interface SearchResultHydrator { - Map hydrateResult(Cache cacheView, Map result, String id); - } - - @Canonical - @Data - @AllArgsConstructor - @NoArgsConstructor - public static class SearchableResource { - /** - * Lowercase name of a resource type. - * e.g. 'instances', 'load_balancers' - */ - String resourceType; - - /** - * Lowercase name of the platform. - * e.g. 'aws', 'gce' - */ - String platform; - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/SpectatorDynomiteCacheMetrics.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/SpectatorDynomiteCacheMetrics.java deleted file mode 100644 index a7f9a890d62..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/SpectatorDynomiteCacheMetrics.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.cache; - -import com.netflix.spectator.api.BasicTag; -import com.netflix.spectator.api.Id; -import com.netflix.spectator.api.Registry; -import com.netflix.spectator.api.Tag; -import com.netflix.spinnaker.cats.dynomite.cache.DynomiteCache.CacheMetrics; - -import java.util.Arrays; - -public class SpectatorDynomiteCacheMetrics implements CacheMetrics { - - private final Registry registry; - - SpectatorDynomiteCacheMetrics(Registry registry) { - this.registry = registry; - } - - @Override - public void merge(String prefix, String type, int itemCount, int relationshipCount, int hashMatches, int hashUpdates, int saddOperations, int hmsetOperations, int expireOperations, int delOperations) { - final Iterable tags = tags(prefix, type); - registry.counter(id("cats.dynomiteCache.merge", "itemCount", tags)).increment(itemCount); - registry.counter(id("cats.dynomiteCache.merge", "relationshipCount", tags)).increment(relationshipCount); - registry.counter(id("cats.dynomiteCache.merge", "hashMatches", tags)).increment(hashMatches); - registry.counter(id("cats.dynomiteCache.merge", "hashUpdates", tags)).increment(hashUpdates); - registry.counter(id("cats.dynomiteCache.merge", "saddOperations", tags)).increment(saddOperations); - registry.counter(id("cats.dynomiteCache.merge", "hmsetOperations", tags)).increment(hmsetOperations); - registry.counter(id("cats.dynomiteCache.merge", "expireOperations", tags)).increment(expireOperations); - registry.counter(id("cats.dynomiteCache.merge", "delOperations", tags)).increment(delOperations); - } - - @Override - public void evict(String prefix, String type, int itemCount, int delOperations, int sremOperations) { - final Iterable tags = tags(prefix, type); - registry.counter(id("cats.dynomiteCache.evict", "itemCount", tags)).increment(itemCount); - registry.counter(id("cats.dynomiteCache.evict", "delOperations", tags)).increment(delOperations); - registry.counter(id("cats.dynomiteCache.evict", "sremOperations", tags)).increment(sremOperations); - } - - @Override - public void get(String prefix, String type, int itemCount, int requestedSize, int relationshipsRequested, int hmgetAllOperations) { - final Iterable tags = tags(prefix, type); - registry.counter(id("cats.dynomiteCache.get", "itemCount", tags)).increment(itemCount); - registry.counter(id("cats.dynomiteCache.get", "requestedSize", tags)).increment(requestedSize); - registry.counter(id("cats.dynomiteCache.get", "relationshipsRequested", tags)).increment(relationshipsRequested); - registry.counter(id("cats.dynomiteCache.get", "hmgetAllOperations", tags)).increment(hmgetAllOperations); - } - - private Id id(String metricGroup, String metric, Iterable tags) { - return registry.createId(metricGroup + '.' + metric, tags); - } - - private Iterable tags(String prefix, String type) { - return Arrays.asList(new BasicTag("prefix", prefix), new BasicTag("type", type)); - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/CloudProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/CloudProvider.groovy deleted file mode 100644 index 296a5381abb..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/CloudProvider.groovy +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.core - -import java.lang.annotation.Annotation - -/** - * Different cloud providers (AWS, GCE, Titus, etc.) should implement this interface and - * annotate different implementations with annotation class indicated by {@code getAnnotation} method - * to identify the cloud provider specific implementations - * - */ -interface CloudProvider { - - /** - * A unique string that identifies the cloud provider implementation - * @return - */ - String getId() - - /** - * Display name or simply the name for the cloud provider. Use {@code getID()} for uniqueness constraints - * instead of this method - * @return - */ - String getDisplayName() - - /** - * Annotation type that can be assigned to the implementations for operations, converters, validators, etc. to enable - * lookup based on the operation description name and cloud provider type - * @return - */ - Class getOperationAnnotationType() - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/DynomiteConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/DynomiteConfig.groovy deleted file mode 100644 index bbaf0d47ba2..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/DynomiteConfig.groovy +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.core - -import com.netflix.discovery.DiscoveryClient -import com.netflix.dyno.connectionpool.Host -import com.netflix.dyno.connectionpool.HostSupplier -import com.netflix.dyno.connectionpool.TokenMapSupplier -import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl -import com.netflix.dyno.connectionpool.impl.lb.HostToken -import com.netflix.dyno.jedis.DynoJedisClient -import com.netflix.spinnaker.clouddriver.cache.DynomiteConfigurationProperties -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.data.task.jedis.RedisTaskRepository -import com.netflix.spinnaker.kork.dynomite.DynomiteClientDelegate -import com.netflix.spinnaker.kork.jedis.JedisClientDelegate -import org.apache.commons.pool2.impl.GenericObjectPoolConfig -import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.ConfigurationProperties -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import redis.clients.jedis.JedisPool -import redis.clients.jedis.Protocol - -@Configuration -@ConditionalOnExpression('${dynomite.enabled:false}') -@EnableConfigurationProperties(DynomiteConfigurationProperties) -class DynomiteConfig { - - @Bean - TaskRepository taskRepository(DynomiteClientDelegate dynomiteClientDelegate, Optional redisClientDelegatePrevious) { - new RedisTaskRepository(dynomiteClientDelegate, redisClientDelegatePrevious) - } - - @Bean - DynomiteClientDelegate dynomiteClientDelegate(DynoJedisClient dynoJedisClient) { - new DynomiteClientDelegate(dynoJedisClient) - } - - @Bean - @ConfigurationProperties("dynomite.connectionPool") - ConnectionPoolConfigurationImpl connectionPoolConfiguration(DynomiteConfigurationProperties dynomiteConfigurationProperties) { - new ConnectionPoolConfigurationImpl(dynomiteConfigurationProperties.applicationName).withHashtag("{}") - } - - @Bean(destroyMethod = "stopClient") - DynoJedisClient dynoJedisClient(DynomiteConfigurationProperties dynomiteConfigurationProperties, ConnectionPoolConfigurationImpl connectionPoolConfiguration, Optional discoveryClient) { - def builder = new DynoJedisClient.Builder() - .withApplicationName(dynomiteConfigurationProperties.applicationName) - .withDynomiteClusterName(dynomiteConfigurationProperties.clusterName) - - discoveryClient.map({ dc -> - builder.withDiscoveryClient(dc) - .withCPConfig(connectionPoolConfiguration) - }).orElseGet({ - connectionPoolConfiguration - .withTokenSupplier(new StaticTokenMapSupplier(dynomiteConfigurationProperties.dynoHostTokens)) - .setLocalDataCenter(dynomiteConfigurationProperties.localDataCenter) - .setLocalRack(dynomiteConfigurationProperties.localRack) - - builder - .withHostSupplier(new StaticHostSupplier(dynomiteConfigurationProperties.dynoHosts)) - .withCPConfig(connectionPoolConfiguration) - }).build() - } - - @Bean - @ConditionalOnProperty("redis.connectionPrevious") - JedisClientDelegate redisClientDelegatePrevious(JedisPool jedisPoolPrevious) { - return new JedisClientDelegate(jedisPoolPrevious) - } - - @Bean - @ConditionalOnProperty("redis.connectionPrevious") - JedisPool jedisPoolPrevious(RedisConfigurationProperties redisConfigurationProperties) { - return createPool(null, redisConfigurationProperties.connectionPrevious, 1000) - } - - private static JedisPool createPool(GenericObjectPoolConfig redisPoolConfig, String connection, int timeout) { - URI redisConnection = URI.create(connection) - - String host = redisConnection.host - int port = redisConnection.port == -1 ? Protocol.DEFAULT_PORT : redisConnection.port - - int database = Integer.parseInt((redisConnection.path ?: "/${Protocol.DEFAULT_DATABASE}").split('/', 2)[1]) - - String password = redisConnection.userInfo ? redisConnection.userInfo.split(':', 2)[1] : null - - new JedisPool(redisPoolConfig ?: new GenericObjectPoolConfig(), host, port, timeout, password, database, null) - } - - static class StaticHostSupplier implements HostSupplier { - - private final List hosts - - StaticHostSupplier(List hosts) { - this.hosts = hosts - } - - @Override - List getHosts() { - return hosts - } - } - - static class StaticTokenMapSupplier implements TokenMapSupplier { - - List hostTokens = new ArrayList<>() - - StaticTokenMapSupplier(List hostTokens) { - this.hostTokens = hostTokens - } - - @Override - List getTokens(Set activeHosts) { - return hostTokens - } - - @Override - HostToken getTokenForHost(Host host, Set activeHosts) { - return hostTokens.find { it.host == host } - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/NoopAtomicOperationConverter.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/NoopAtomicOperationConverter.groovy index 6564d59515a..db7c3440116 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/NoopAtomicOperationConverter.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/NoopAtomicOperationConverter.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.clouddriver.core import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter +import com.netflix.spinnaker.orchestration.OperationDescription import groovy.util.logging.Slf4j import org.springframework.stereotype.Component @@ -35,7 +36,7 @@ class NoopAtomicOperationConverter implements AtomicOperationConverter { } @Override - Object convertDescription(Map input) { + OperationDescription convertDescription(Map input) { return null } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/RedisConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/RedisConfig.groovy index 3c4c7261380..f53acd3f4e0 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/RedisConfig.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/RedisConfig.groovy @@ -21,13 +21,16 @@ import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.data.task.jedis.RedisTaskRepository import com.netflix.spinnaker.kork.jedis.JedisClientDelegate import com.netflix.spinnaker.kork.jedis.RedisClientDelegate -import com.netflix.spinnaker.kork.jedis.telemetry.InstrumentedJedis import com.netflix.spinnaker.kork.jedis.telemetry.InstrumentedJedisPool import org.apache.commons.pool2.impl.GenericObjectPool import org.apache.commons.pool2.impl.GenericObjectPoolConfig import org.springframework.boot.actuate.health.Health import org.springframework.boot.actuate.health.HealthIndicator +import org.springframework.boot.autoconfigure.EnableAutoConfiguration +import org.springframework.boot.autoconfigure.condition.ConditionalOnBean import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty +import org.springframework.boot.autoconfigure.data.redis.RedisAutoConfiguration import org.springframework.boot.context.properties.ConfigurationProperties import org.springframework.boot.context.properties.EnableConfigurationProperties import org.springframework.context.annotation.Bean @@ -37,7 +40,7 @@ import redis.clients.jedis.JedisPool import redis.clients.jedis.Protocol @Configuration -@ConditionalOnExpression('${redis.enabled:true}') +@ConditionalOnProperty(value = 'redis.enabled', matchIfMissing = true) @EnableConfigurationProperties(RedisConfigurationProperties) class RedisConfig { @Bean @@ -47,6 +50,7 @@ class RedisConfig { } @Bean + @ConditionalOnExpression('${redis.task-repository.enabled:true}') TaskRepository taskRepository(RedisClientDelegate redisClientDelegate, Optional redisClientDelegatePrevious) { new RedisTaskRepository(redisClientDelegate, redisClientDelegatePrevious) } @@ -57,6 +61,7 @@ class RedisConfig { } @Bean + @ConditionalOnBean(value = JedisPool, name = "jedisPoolPrevious") RedisClientDelegate redisClientDelegatePrevious(JedisPool jedisPoolPrevious) { return new JedisClientDelegate(jedisPoolPrevious) } @@ -100,9 +105,11 @@ class RedisConfig { String password = redisConnection.userInfo ? redisConnection.userInfo.split(':', 2)[1] : null + boolean isSSL = redisConnection.getScheme() == "rediss" + new InstrumentedJedisPool( registry, - new JedisPool(redisPoolConfig ?: new GenericObjectPoolConfig(), host, port, timeout, password, database, null), + new JedisPool(redisPoolConfig ?: new GenericObjectPoolConfig(), host, port, timeout, password, database, isSSL), name ) } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/agent/CleanupPendingOnDemandCachesAgent.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/agent/CleanupPendingOnDemandCachesAgent.java deleted file mode 100644 index 74f13bfb5cb..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/agent/CleanupPendingOnDemandCachesAgent.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.core.agent; - -import com.google.common.collect.Iterables; -import com.google.common.collect.Lists; -import com.netflix.spinnaker.cats.agent.RunnableAgent; -import com.netflix.spinnaker.cats.module.CatsModule; -import com.netflix.spinnaker.cats.provider.Provider; -import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions; -import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; -import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider; -import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.context.ApplicationContext; -import redis.clients.jedis.Response; -import redis.clients.jedis.ScanParams; -import redis.clients.jedis.ScanResult; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -public class CleanupPendingOnDemandCachesAgent implements RunnableAgent, CustomScheduledAgent { - private static final Logger log = LoggerFactory.getLogger(CleanupPendingOnDemandCachesAgent.class); - - private static final long DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(30); - private static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(5); - - private final RedisCacheOptions redisCacheOptions; - private final RedisClientDelegate redisClientDelegate; - private final ApplicationContext applicationContext; - private final long pollIntervalMillis; - private final long timeoutMillis; - - public CleanupPendingOnDemandCachesAgent(RedisCacheOptions redisCacheOptions, - RedisClientDelegate redisClientDelegate, - ApplicationContext applicationContext) { - this(redisCacheOptions, redisClientDelegate, applicationContext, DEFAULT_POLL_INTERVAL_MILLIS, DEFAULT_TIMEOUT_MILLIS); - } - - private CleanupPendingOnDemandCachesAgent(RedisCacheOptions redisCacheOptions, - RedisClientDelegate redisClientDelegate, - ApplicationContext applicationContext, - long pollIntervalMillis, - long timeoutMillis) { - this.redisCacheOptions = redisCacheOptions; - this.redisClientDelegate = redisClientDelegate; - this.applicationContext = applicationContext; - this.pollIntervalMillis = pollIntervalMillis; - this.timeoutMillis = timeoutMillis; - } - - @Override - public String getAgentType() { - return CleanupPendingOnDemandCachesAgent.class.getSimpleName(); - } - - @Override - public String getProviderName() { - return CoreProvider.PROVIDER_NAME; - } - - @Override - public void run() { - run(getCatsModule().getProviderRegistry().getProviders()); - } - - void run(Collection providers) { - providers.forEach(provider -> { - String onDemandSetName = provider.getProviderName() + ":onDemand:members"; - List onDemandKeys = scanMembers(onDemandSetName).stream() - .filter(s -> !s.equals("_ALL_")) - .collect(Collectors.toList()); - - Map> existingOnDemandKeys = new HashMap<>(); - if (redisClientDelegate.supportsMultiKeyPipelines()) { - redisClientDelegate.withMultiKeyPipeline(pipeline -> { - for (List partition : Iterables.partition(onDemandKeys, redisCacheOptions.getMaxDelSize())) { - for (String id : partition) { - existingOnDemandKeys.put(id, pipeline.exists(provider.getProviderName() + ":onDemand:attributes:" + id)); - } - } - pipeline.sync(); - }); - } else { - redisClientDelegate.withCommandsClient(client -> { - onDemandKeys.stream() - .filter(k -> client.exists(provider.getProviderName() + "onDemand:attributes:" + k)) - .forEach(k -> existingOnDemandKeys.put(k, new StaticResponse(Boolean.TRUE))); - }); - } - - List onDemandKeysToRemove = new ArrayList<>(); - for (String onDemandKey : onDemandKeys) { - if (!existingOnDemandKeys.containsKey(onDemandKey) || !existingOnDemandKeys.get(onDemandKey).get()) { - onDemandKeysToRemove.add(onDemandKey); - } - } - - if (!onDemandKeysToRemove.isEmpty()) { - log.info("Removing {} from {}", onDemandKeysToRemove.size(), onDemandSetName); - log.debug("Removing {} from {}", onDemandKeysToRemove, onDemandSetName); - - - redisClientDelegate.withMultiKeyPipeline(pipeline -> { - for (List idPartition : Lists.partition(onDemandKeysToRemove, redisCacheOptions.getMaxDelSize())) { - String[] ids = idPartition.toArray(new String[idPartition.size()]); - pipeline.srem(onDemandSetName, ids); - } - - pipeline.sync(); - }); - } - }); - } - - public long getPollIntervalMillis() { - return pollIntervalMillis; - } - - public long getTimeoutMillis() { - return timeoutMillis; - } - - private Set scanMembers(String setKey) { - return redisClientDelegate.withCommandsClient(client -> { - final Set matches = new HashSet<>(); - final ScanParams scanParams = new ScanParams().count(redisCacheOptions.getScanSize()); - String cursor = "0"; - while (true) { - final ScanResult scanResult = client.sscan(setKey, cursor, scanParams); - matches.addAll(scanResult.getResult()); - cursor = scanResult.getStringCursor(); - if ("0".equals(cursor)) { - return matches; - } - } - }); - } - - private CatsModule getCatsModule() { - return applicationContext.getBean(CatsModule.class); - } - - private static class StaticResponse extends Response { - private final Boolean value; - - StaticResponse(Boolean value) { - super(null); - this.value = value; - } - - @Override - public Boolean get() { - return value; - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfiguration.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfiguration.java deleted file mode 100644 index b5b63b5f8db..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfiguration.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.core.limits; - -import com.google.common.collect.ImmutableMap; - -import java.util.Collections; -import java.util.Map; -import java.util.Optional; - -public class ServiceLimitConfiguration { - public static final String POLL_INTERVAL_MILLIS = "agentPollIntervalMs"; - public static final String POLL_TIMEOUT_MILLIS = "agentPollTimeoutMs"; - public static final String API_RATE_LIMIT = "rateLimit"; - - - private final ServiceLimits defaults; - private final Map cloudProviderOverrides; - private final Map accountOverrides; - private final Map implementationLimits; - - public ServiceLimitConfiguration(ServiceLimits defaults, Map cloudProviderOverrides, Map accountOverrides, Map implementationLimits) { - this.defaults = defaults == null ? new ServiceLimits(null) : defaults; - this.cloudProviderOverrides = cloudProviderOverrides == null ? Collections.emptyMap() : ImmutableMap.copyOf(cloudProviderOverrides); - this.accountOverrides = accountOverrides == null ? Collections.emptyMap() : ImmutableMap.copyOf(accountOverrides); - this.implementationLimits = implementationLimits == null ? Collections.emptyMap() : ImmutableMap.copyOf(implementationLimits); - } - - public Double getLimit(String limit, String implementation, String account, String cloudProvider, Double defaultValue) { - return Optional - .ofNullable(getImplementationLimit(limit, implementation, account)) - .orElse(Optional.ofNullable(getAccountLimit(limit, account)) - .orElse(Optional.ofNullable(getCloudProviderLimit(limit, cloudProvider)) - .orElse(Optional.ofNullable(defaults.getLimit(limit)) - .orElse(defaultValue)))); - } - - private Double getAccountLimit(String limit, String account) { - return Optional - .ofNullable(account) - .map(accountOverrides::get) - .map(sl -> sl.getLimit(limit)) - .orElse(null); - } - - private Double getCloudProviderLimit(String limit, String cloudProvider) { - return Optional - .ofNullable(cloudProvider) - .map(cloudProviderOverrides::get) - .map(sl -> sl.getLimit(limit)) - .orElse(null); - } - - private Double getImplementationLimit(String limit, String implementation, String account) { - return Optional - .ofNullable(implementation) - .map(implementationLimits::get) - .map(il -> il.getLimit(limit, account)) - .orElse(null); - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfigurationBuilder.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfigurationBuilder.java deleted file mode 100644 index d62402ce8d7..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfigurationBuilder.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.core.limits; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; - - -/** - * Mutable structure for construction of ServiceLimitConfiguration. - */ -public class ServiceLimitConfigurationBuilder { - - private MutableLimits defaults = new MutableLimits(); - private Map cloudProviderOverrides = new HashMap<>(); - private Map accountOverrides = new HashMap<>(); - private Map implementationLimits = new HashMap<>(); - - public MutableLimits getDefaults() { - return defaults; - } - - public void setDefaults(MutableLimits defaults) { - this.defaults = defaults; - } - - public ServiceLimitConfigurationBuilder withDefault(String limit, Double value) { - if (defaults == null) { - defaults = new MutableLimits(); - } - defaults.setLimit(limit, value); - return this; - } - - public Map getCloudProviderOverrides() { - return cloudProviderOverrides; - } - - public void setCloudProviderOverrides(Map cloudProviderOverrides) { - this.cloudProviderOverrides = cloudProviderOverrides; - } - - public ServiceLimitConfigurationBuilder withCloudProviderOverride(String cloudProvider, String limit, Double value) { - if (cloudProviderOverrides == null) { - cloudProviderOverrides = new HashMap<>(); - } - cloudProviderOverrides.computeIfAbsent(cloudProvider, k -> new MutableLimits()).setLimit(limit, value); - return this; - } - - public Map getAccountOverrides() { - return accountOverrides; - } - - public void setAccountOverrides(Map accountOverrides) { - this.accountOverrides = accountOverrides; - } - - public ServiceLimitConfigurationBuilder withAccountOverride(String account, String limit, Double value) { - if (accountOverrides == null) { - accountOverrides = new HashMap<>(); - } - - accountOverrides.computeIfAbsent(account, k -> new MutableLimits()).setLimit(limit, value); - return this; - } - - public Map getImplementationLimits() { - return implementationLimits; - } - - public void setImplementationLimits(Map implementationLimits) { - this.implementationLimits = implementationLimits; - } - - public ServiceLimitConfigurationBuilder withImplementationDefault(String implementation, String limit, Double value) { - if (implementationLimits == null) { - implementationLimits = new HashMap<>(); - } - implementationLimits.computeIfAbsent(implementation, k -> new MutableImplementationLimits()).defaults.setLimit(limit, value); - return this; - } - - public ServiceLimitConfigurationBuilder withImplementationAccountOverride(String implementation, String account, String limit, Double value) { - if (implementationLimits == null) { - implementationLimits = new HashMap<>(); - } - - implementationLimits - .computeIfAbsent(implementation, k -> new MutableImplementationLimits()) - .accountOverrides.computeIfAbsent(account, k -> new MutableLimits()) - .setLimit(limit, value); - - return this; - } - - public ServiceLimitConfiguration build() { - return new ServiceLimitConfiguration(new ServiceLimits(defaults), toServiceLimits(cloudProviderOverrides), toServiceLimits(accountOverrides), toImplementationLimits(implementationLimits)); - } - - public static class MutableLimits extends HashMap { - public void setLimit(String limit, Double value) { - put(limit, value); - } - - public Double getLimit(String limit) { - return get(limit); - } - } - - public static class MutableImplementationLimits { - MutableLimits defaults = new MutableLimits(); - Map accountOverrides = new HashMap<>(); - - public ImplementationLimits toImplementationLimits() { - return new ImplementationLimits(new ServiceLimits(defaults), toServiceLimits(accountOverrides)); - } - - public MutableLimits getDefaults() { - return defaults; - } - - public void setDefaults(MutableLimits defaults) { - this.defaults = defaults; - } - - public Map getAccountOverrides() { - return accountOverrides; - } - - public void setAccountOverrides(Map accountOverrides) { - this.accountOverrides = accountOverrides; - } - } - - private static Map toImmutable(Map src, Function, D> converter) { - return java.util.Optional.ofNullable(src) - .map(Map::entrySet) - .map(Set::stream) - .map(s -> s.collect( - Collectors.toMap( - Map.Entry::getKey, - converter))) - .orElse(Collections.emptyMap()); - } - - private static Map toServiceLimits(Map limits) { - return toImmutable(limits, mapEntry -> new ServiceLimits(mapEntry.getValue())); - } - - private static Map toImplementationLimits(Map implementationLimits) { - return toImmutable(implementationLimits, mapEntry -> mapEntry.getValue().toImplementationLimits()); - } - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/CoreProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/CoreProvider.groovy deleted file mode 100644 index ea06880ad9b..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/CoreProvider.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.netflix.spinnaker.clouddriver.core.provider - -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware -import com.netflix.spinnaker.cats.provider.Provider - -class CoreProvider extends AgentSchedulerAware implements Provider { - public static final String PROVIDER_NAME = CoreProvider.name - - private final Collection agents - - CoreProvider(Collection agents) { - this.agents = agents - } - - @Override - String getProviderName() { - return PROVIDER_NAME - } - - @Override - Collection getAgents() { - return agents - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/ExternalHealthProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/ExternalHealthProvider.groovy deleted file mode 100644 index c0ae05c7380..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/ExternalHealthProvider.groovy +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.core.provider.agent - -import com.netflix.spinnaker.cats.provider.Provider - -interface ExternalHealthProvider extends Provider { - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/HealthProvidingCachingAgent.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/HealthProvidingCachingAgent.groovy deleted file mode 100644 index d02a43a906d..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/HealthProvidingCachingAgent.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.core.provider.agent - -import com.fasterxml.jackson.core.type.TypeReference -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CachingAgent - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES - -interface HealthProvidingCachingAgent extends CachingAgent { - static final TypeReference> ATTRIBUTES = new TypeReference>() {} - static final Collection types = Collections.unmodifiableCollection([ - AUTHORITATIVE.forType(HEALTH.ns), - INFORMATIVE.forType(INSTANCES.ns) - ]) - String getHealthId() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/Namespace.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/Namespace.groovy index 29786c67710..231b3d87942 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/Namespace.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/provider/agent/Namespace.groovy @@ -36,7 +36,10 @@ enum Namespace { HEALTH, ON_DEMAND, RESERVATION_REPORTS, - RESERVED_INSTANCES + RESERVED_INSTANCES, + PROJECT_CLUSTERS, + STACKS, + LAUNCH_TEMPLATES public final String ns final Set fields diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/services/Front50Service.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/services/Front50Service.groovy index 2bef032b8c3..71625029375 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/services/Front50Service.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/services/Front50Service.groovy @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.core.services import com.netflix.spinnaker.clouddriver.model.EntityTags -import com.netflix.spinnaker.clouddriver.model.EntityTags.EntityTag +import com.netflix.spinnaker.clouddriver.model.Front50Application import retrofit.client.Response import retrofit.http.* @@ -26,18 +26,21 @@ interface Front50Service { List getCredentials() @GET('/v2/applications') - List searchByName(@Query("name") String applicationName, + List> searchByName(@Query("name") String applicationName, @Query("pageSize") Integer pageSize, @QueryMap Map filters) @GET('/v2/applications/{applicationName}') Map getApplication(@Path('applicationName') String applicationName) + @GET('/v2/applications?restricted=false') + Set getAllApplicationsUnrestricted() + @GET('/v2/projects/{project}') Map getProject(@Path('project') String project) @GET('/v2/projects') - List searchForProjects(@QueryMap Map params, @Query("pageSize") Integer pageSize) + List> searchForProjects(@QueryMap Map params, @Query("pageSize") Integer pageSize) @POST('/snapshots') Response saveSnapshot(@Body Map snapshot) @@ -62,4 +65,13 @@ interface Front50Service { @DELETE('/v2/tags/{id}') Response deleteEntityTags(@Path('id') String id) + + // v2 MPT APIs + @GET('/v2/pipelineTemplates/{pipelineTemplateId}') + Map getV2PipelineTemplate(@Path("pipelineTemplateId") String pipelineTemplateId, + @Query("tag") String version, + @Query("digest") String digest) + + @GET('/v2/pipelineTemplates') + List listV2PipelineTemplates(@Query("scopes") List scopes) } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/DefaultTask.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/DefaultTask.groovy deleted file mode 100644 index 36dc4a210c7..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/DefaultTask.groovy +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.data.task - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname -import groovy.transform.CompileStatic -import groovy.transform.Immutable - -import java.util.concurrent.ConcurrentLinkedDeque -import java.util.logging.Logger - -@CompileStatic -public class DefaultTask implements Task { - private static final Logger log = Logger.getLogger(DefaultTask.name) - - final String id - final String ownerId = ClouddriverHostname.ID - private final Deque statusHistory = new ConcurrentLinkedDeque() - private final Deque resultObjects = new ConcurrentLinkedDeque() - final long startTimeMs = System.currentTimeMillis() - - public String getOwnerId() { - return ownerId - } - - public DefaultTask(String id) { - this(id, 'INIT', "Creating task ${id}") - } - - public DefaultTask(String id, String phase, String status) { - def initialStatus = new DefaultTaskStatus(phase, status, TaskState.STARTED) - statusHistory.addLast(initialStatus) - this.id = id - } - - public void updateStatus(String phase, String status) { - statusHistory.addLast(currentStatus().update(phase, status)) - log.info "[$phase] - $status" - } - - public void complete() { - statusHistory.addLast(currentStatus().update(TaskState.COMPLETED)) - } - - public List getHistory() { - statusHistory.collect { new TaskDisplayStatus(it) } - } - - public void fail() { - statusHistory.addLast(currentStatus().update(TaskState.FAILED)) - } - - public Status getStatus() { - currentStatus() - } - - public String toString() { - getStatus().toString() - } - - public void addResultObjects(Listresults){ - if (results) { - currentStatus().ensureUpdateable() - resultObjects.addAll(results) - } - } - - @Override - List getResultObjects() { - resultObjects.collect() - } - - private DefaultTaskStatus currentStatus() { - statusHistory.getLast() as DefaultTaskStatus - } -} - -@Immutable(knownImmutableClasses = [Status]) -@CompileStatic -class TaskDisplayStatus implements Status { - @JsonIgnore - Status taskStatus - - static TaskDisplayStatus create(Status taskStatus) { - new TaskDisplayStatus(taskStatus) - } - - @Override - String getStatus() { - taskStatus.status - } - - @Override - String getPhase() { - taskStatus.phase - } - - @JsonIgnore - Boolean isCompleted() { taskStatus.isCompleted() } - - @JsonIgnore - Boolean isFailed() { taskStatus.isFailed() } -} - -@Immutable -@CompileStatic -class DefaultTaskStatus implements Status { - String phase - String status - - @JsonIgnore - TaskState state - - // Needed so that Java can interact with Groovy @Immutable classes. - static DefaultTaskStatus create(String phase, String status, TaskState state) { - new DefaultTaskStatus(phase, status, state) - } - - Boolean isComplete() { state.completed } - - Boolean isCompleted() { state.completed } - - Boolean isFailed() { state.failed } - - DefaultTaskStatus update(String phase, String status) { - ensureUpdateable() - new DefaultTaskStatus(phase, status, state) - } - - DefaultTaskStatus update(TaskState state) { - ensureUpdateable() - new DefaultTaskStatus(phase, status, state) - } - - public void ensureUpdateable() { - if (isCompleted()) { - throw new IllegalStateException("Task is already completed! No further updates allowed!") - } - } - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepository.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepository.groovy index c9dc06202c0..f49a1460036 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepository.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepository.groovy @@ -16,6 +16,8 @@ package com.netflix.spinnaker.clouddriver.data.task +import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname + import java.util.concurrent.ConcurrentHashMap class InMemoryTaskRepository implements TaskRepository { @@ -49,17 +51,23 @@ class InMemoryTaskRepository implements TaskRepository { @Override List list() { - repository.values() as List + List tasks = new ArrayList<>(); + for (Task value : repository.values()) { + if (!value.getStatus().isCompleted()) { + tasks.add(value) + } + } + return tasks; } @Override List listByThisInstance() { - return list() + return list().findAll { it.ownerId == ClouddriverHostname.ID } } private String getNextId() { while (true) { - def maybeNext = new BigInteger(new Random().nextInt(Integer.MAX_VALUE)).toString(36) + def maybeNext = BigInteger.valueOf(new Random().nextInt(Integer.MAX_VALUE)).toString(36) if (!repository.containsKey(maybeNext)) { return maybeNext } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/Status.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/Status.groovy deleted file mode 100644 index 733ef993964..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/Status.groovy +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.data.task - -/** - * This interface is used to represent the status of a Task for a point in time. Often should be backed by a POGO, but - * may be used for more complex requirements, like querying a database or centralized task system in a multi-threaded/ - * multi-service environment. - * - * A psuedo-composite key of a Status is its phase and status strings. - * - * - */ -public interface Status { - /** - * Returns the current phase of the execution. This is useful for representing different parts of a Task execution, and - * a "status" String will be tied - */ - String getPhase() - - /** - * Returns the current status of the Task in its given phase. - */ - String getStatus() - - /** - * Informs completion of the task. - */ - Boolean isCompleted() - - /** - * Informs whether the task has failed or not. A "failed" state is always indicitive of a "completed" state. - */ - Boolean isFailed() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/Task.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/Task.groovy deleted file mode 100644 index 2fd2ff14224..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/Task.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.data.task - -/** - * This interface represents the state of a given execution. Implementations must allow for updating and completing/failing - * status, as well as providing the start time of the task. - * - * - */ -public interface Task { - /** - * A unique identifier for the task, which can be used to retrieve it at a later time. - */ - String getId() - - /** - * A list of result objects that are serialized back to the caller - */ - List getResultObjects() - - /** - * This method is used to add results objects to the Task - * @param results - */ - void addResultObjects(Listresults) - - /** - * A comprehensive history of this task's execution. - */ - List getHistory() - - /** - * The id of the clouddriver instance that submitted this task - */ - String getOwnerId() - - /** - * This method is used to update the status of the Task with given phase and status strings. - * @param phase - * @param status - */ - void updateStatus(String phase, String status) - - /** - * This method will complete the task and will represent completed = true from the Task's {@link #getStatus()} method. - */ - void complete() - - /** - * This method will fail the task and will represent completed = true and failed = true from the Task's - * {@link #getStatus()} method. - */ - void fail() - - /** - * This method will return the current status of the task. - * @see Status - */ - Status getStatus() - - /** - * This returns the start time of the Task's execution in milliseconds since epoch form. - */ - long getStartTimeMs() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/TaskRepository.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/TaskRepository.groovy deleted file mode 100644 index ec736617fb2..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/TaskRepository.groovy +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.data.task -/** - * A TaskRepository is an implementation that allows Task objects to be created, retrieved, and listed on demand. - * - * @see Task - * @see InMemoryTaskRepository - * - */ -public interface TaskRepository { - /** - * A thread local holder for a Task in-action. Useful for the {@link InMemoryTaskRepository} implementation. - */ - static final ThreadLocal threadLocalTask = new ThreadLocal<>() - - /** - * Creates a new task, and sets the initial status to the provided phase and status. - * - * @param phase - * @param status - * @return task - */ - Task create(String phase, String status) - - /** - * Creates a new task if a task has not already been created with that key - * and sets the initial status to the provided phase and status. - * - * @param phase - * @param status - * @param clientRequestId - * @return task the new task, or the previous task that was created with the supplied key - */ - Task create(String phase, String status, String clientRequestId) - - /** - * Retrieves a task by the provided id - * - * @param id - * @return task - */ - Task get(String id) - - /** - * Retrieves a task by the provided clientRequestId - * @param clientRequestId - * @return task, or null if no task has been started with the requestId - */ - Task getByClientRequestId(String clientRequestId) - - /** - * Lists all tasks currently in the repository - * - * @return list of tasks - */ - List list() - - /** - * Lists all tasks owned by this instance - */ - List listByThisInstance() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/TaskState.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/TaskState.groovy deleted file mode 100644 index 68864e233fd..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/TaskState.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.data.task - -enum TaskState { - STARTED, - COMPLETED, - FAILED - - boolean isCompleted() { - this != STARTED - } - - boolean isFailed() { - this == FAILED - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTask.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTask.groovy deleted file mode 100644 index 7c164cbc269..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTask.groovy +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.data.task.jedis - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname -import com.netflix.spinnaker.clouddriver.data.task.Status -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskState -import groovy.util.logging.Slf4j - -@Slf4j -class JedisTask implements Task { - - @JsonIgnore - RedisTaskRepository repository - - final String id - final long startTimeMs - final String ownerId - - @JsonIgnore - final boolean previousRedis - - JedisTask(String id, long startTimeMs, RedisTaskRepository repository, String ownerId, boolean previousRedis) { - this.id = id - this.startTimeMs = startTimeMs - this.repository = repository - this.ownerId = ownerId - this.previousRedis = previousRedis - } - - @Override - void updateStatus(String phase, String status) { - checkMutable() - repository.addToHistory(repository.currentState(this).update(phase, status), this) - log.info "[$phase] - $status" - } - - @Override - void complete() { - checkMutable() - repository.addToHistory(repository.currentState(this).update(TaskState.COMPLETED), this) - } - - @Override - void fail() { - checkMutable() - repository.addToHistory(repository.currentState(this).update(TaskState.FAILED), this) - } - - @Override - public void addResultObjects(List results) { - checkMutable() - if (results) { - repository.currentState(this).ensureUpdateable() - repository.addResultObjects(results, this) - } - } - - public List getResultObjects() { - repository.getResultObjects(this) - } - - public List getHistory() { - def status = repository.getHistory(this) - if (status && status.last().isCompleted()) { - status.subList(0, status.size() - 1) - } else { - status - } - } - - @Override - String getOwnerId() { - return ownerId - } - - @Override - Status getStatus() { - repository.currentState(this) - } - - private void checkMutable() { - if (previousRedis) { - throw new IllegalStateException("Read-only task") - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepository.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepository.java deleted file mode 100644 index 0c606ded989..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepository.java +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.data.task.jedis; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.dyno.connectionpool.exception.DynoException; -import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname; -import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus; -import com.netflix.spinnaker.clouddriver.data.task.Status; -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskDisplayStatus; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.data.task.TaskState; -import com.netflix.spinnaker.kork.dynomite.DynomiteClientDelegate.ClientDelegateException; -import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; -import net.jodah.failsafe.Failsafe; -import net.jodah.failsafe.RetryPolicy; -import net.jodah.failsafe.function.CheckedConsumer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import redis.clients.jedis.exceptions.JedisException; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -import static java.lang.String.format; - -public class RedisTaskRepository implements TaskRepository { - private static final Logger log = LoggerFactory.getLogger(RedisTaskRepository.class); - - private static final String RUNNING_TASK_KEY = "kato:tasks"; - private static final String TASK_KEY_MAP = "kato:taskmap"; - private static final TypeReference> HISTORY_TYPE = new TypeReference>() {}; - - private static final int TASK_TTL = (int) TimeUnit.HOURS.toSeconds(12); - - private static final RetryPolicy REDIS_RETRY_POLICY = new RetryPolicy() - .retryOn(Arrays.asList(JedisException.class, DynoException.class, ClientDelegateException.class)) - .withDelay(500, TimeUnit.MILLISECONDS) - .withMaxRetries(3); - - private final RedisClientDelegate redisClientDelegate; - private final Optional redisClientDelegatePrevious; - private final ObjectMapper mapper = new ObjectMapper(); - - public RedisTaskRepository(RedisClientDelegate redisClientDelegate, Optional redisClientDelegatePrevious) { - this.redisClientDelegate = redisClientDelegate; - this.redisClientDelegatePrevious = redisClientDelegatePrevious; - } - - @Override - public Task create(String phase, String status) { - return create(phase, status, UUID.randomUUID().toString()); - } - - @Override - public Task create(String phase, String status, String clientRequestId) { - String taskKey = getClientRequestKey(clientRequestId); - - String taskId = UUID.randomUUID().toString(); - - JedisTask task = new JedisTask(taskId, System.currentTimeMillis(), this, ClouddriverHostname.ID, false); - addToHistory(DefaultTaskStatus.create(phase, status, TaskState.STARTED), task); - set(taskId, task); - Long newTask = retry(() -> redisClientDelegate.withCommandsClient(client -> { - return client.setnx(taskKey, taskId); - }), "Registering task with index"); - if (newTask != 0) { - return task; - } - - // There's an existing taskId for this key, clean up what we just created and get the existing task - addToHistory(DefaultTaskStatus.create(phase, "Duplicate of " + clientRequestId, TaskState.FAILED), task); - return getByClientRequestId(clientRequestId); - } - - @Override - public Task get(String id) { - Map taskMap = retry(() -> redisClientDelegate.withCommandsClient(client -> { - return client.hgetAll("task:" + id); - }), format("Getting task ID %s", id)); - boolean oldTask = redisClientDelegatePrevious.isPresent() && (taskMap == null || taskMap.isEmpty()); - if (oldTask) { - try { - taskMap = redisClientDelegatePrevious.get().withCommandsClient(client -> { - return client.hgetAll("task:" + id); - }); - } catch (Exception e) { - // Failed to hit old redis, let's not blow up on that - return null; - } - } - if (taskMap.containsKey("id") && taskMap.containsKey("startTimeMs")) { - return new JedisTask( - taskMap.get("id"), - Long.parseLong(taskMap.get("startTimeMs")), - this, - taskMap.get("ownerId"), - oldTask - ); - } - return null; - } - - @Override - public Task getByClientRequestId(String clientRequestId) { - final String clientRequestKey = getClientRequestKey(clientRequestId); - String existingTask = retry(() -> redisClientDelegate.withCommandsClient(client -> { - return client.get(clientRequestKey); - }), format("Getting task by client request ID %s", clientRequestId)); - if (existingTask == null) { - if (redisClientDelegatePrevious.isPresent()) { - try { - existingTask = redisClientDelegatePrevious.get().withCommandsClient(client -> { - return client.get(clientRequestKey); - }); - } catch (Exception e) { - // Failed to hit old redis, let's not blow up on that - existingTask = null; - } - } - } - if (existingTask != null) { - return get(existingTask); - } - return null; - } - - @Override - public List list() { - return retry(() -> redisClientDelegate.withCommandsClient(client -> { - return client.smembers(RUNNING_TASK_KEY).stream().map(this::get).collect(Collectors.toList()); - }), "Getting all running tasks"); - } - - @Override - public List listByThisInstance() { - return list().stream() - .filter(t -> ClouddriverHostname.ID.equals(t.getOwnerId())) - .collect(Collectors.toList()); - } - - public void set(String id, JedisTask task) { - String taskId = "task:" + task.getId(); - Map data = new HashMap<>(); - data.put("id", task.getId()); - data.put("startTimeMs", Long.toString(task.getStartTimeMs())); - data.put("ownerId", task.getOwnerId()); - retry(() -> redisClientDelegate.withCommandsClient(client -> { - client.hmset(taskId, data); - client.expire(taskId, TASK_TTL); - client.sadd(RUNNING_TASK_KEY, id); - }), format("Writing task %s", id)); - } - - public void addToHistory(DefaultTaskStatus status, JedisTask task) { - String historyId = "taskHistory:" + task.getId(); - - Map data = new HashMap<>(); - data.put("phase", status.getPhase()); - data.put("status", status.getStatus()); - data.put("state", status.getState().toString()); - - String hist; - try { - hist = mapper.writeValueAsString(data); - } catch (JsonProcessingException e) { - throw new RuntimeException("Failed converting task history to json", e); - } - - retry(() -> redisClientDelegate.withCommandsClient(client -> { - client.rpush(historyId, hist); - client.expire(historyId, TASK_TTL); - if (status.isCompleted()) { - client.srem(RUNNING_TASK_KEY, task.getId()); - } - }), format("Adding status history to task %s: %s", task.getId(), status)); - } - - public List getHistory(JedisTask task) { - String historyId = "taskHistory:" + task.getId(); - - RedisClientDelegate client = clientForTask(task); - return retry(() -> client.withCommandsClient(c -> { - return c.lrange(historyId, 0, -1); - }), format("Getting history for task %s", task.getId())) - .stream() - .map(h -> { - Map history; - try { - history = mapper.readValue(h, HISTORY_TYPE); - } catch (IOException e) { - throw new RuntimeException("Could not convert history json to type", e); - } - return TaskDisplayStatus.create(DefaultTaskStatus.create(history.get("phase"), history.get("status"), TaskState.valueOf(history.get("state")))); - }) - .collect(Collectors.toList()); - } - - public DefaultTaskStatus currentState(JedisTask task) { - String historyId = "taskHistory:" + task.getId(); - - RedisClientDelegate client = clientForTask(task); - String state = retry(() -> client.withCommandsClient(c -> { - return c.lindex(historyId, -1); - }), format("Getting current state for task %s", task.getId())); - - Map history; - try { - history = mapper.readValue(state, HISTORY_TYPE); - } catch (IOException e) { - throw new RuntimeException("Failed converting task history json to object", e); - } - return DefaultTaskStatus.create(history.get("phase"), history.get("status"), TaskState.valueOf(history.get("state"))); - } - - public void addResultObjects(List objects, JedisTask task) { - String resultId = "taskResult:" + task.getId(); - String[] values = objects.stream() - .map(o -> { - try { - return mapper.writeValueAsString(o); - } catch (JsonProcessingException e) { - throw new RuntimeException("Failed to convert object to string", e); - } - }) - .collect(Collectors.toList()) - .toArray(new String[objects.size()]); - - log.debug("Adding results to task {} (results: {})", task.getId(), values); - retry(() -> redisClientDelegate.withCommandsClient(client -> { - client.rpush(resultId, values); - client.expire(resultId, TASK_TTL); - }), format("Adding results to task %s", task.getId())); - } - - public List getResultObjects(JedisTask task) { - String resultId = "taskResult:" + task.getId(); - - return retry(() -> clientForTask(task).withCommandsClient(client -> { - return client.lrange(resultId, 0, -1); - }), format("Getting results for task %s", task.getId())) - .stream() - .map(o -> { - try { - return mapper.readValue(o, Map.class); - } catch (IOException e) { - throw new RuntimeException("Failed to convert result object to map", e); - } - }) - .collect(Collectors.toList()); - } - - private String getClientRequestKey(String clientRequestId) { - return TASK_KEY_MAP + ":" + clientRequestId; - } - - private RedisClientDelegate clientForTask(JedisTask task) { - if (task.getPreviousRedis() && redisClientDelegatePrevious.isPresent()) { - return redisClientDelegatePrevious.get(); - } - return redisClientDelegate; - } - - private T retry(Supplier f, String onRetriesExceededMessage) { - return retry(f, failure -> { throw new ExcessiveRedisFailureRetries(onRetriesExceededMessage, failure); }); - } - - private T retry(Supplier f, CheckedConsumer retryExceededListener) { - return Failsafe - .with(REDIS_RETRY_POLICY) - .onRetriesExceeded(retryExceededListener) - .get(f::get); - } - - private void retry(Runnable f, String onRetriesExceededMessage) { - retry(f, failure -> { throw new ExcessiveRedisFailureRetries(onRetriesExceededMessage, failure); }); - } - - private void retry(Runnable f, CheckedConsumer retryExceededListener) { - Failsafe - .with(REDIS_RETRY_POLICY) - .onRetriesExceeded(retryExceededListener) - .run(f::run); - } - - private static class ExcessiveRedisFailureRetries extends RuntimeException { - ExcessiveRedisFailureRetries(String message, Throwable cause) { - super(message, cause); - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DefaultDeployHandlerRegistry.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DefaultDeployHandlerRegistry.groovy deleted file mode 100644 index 1f738956585..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DefaultDeployHandlerRegistry.groovy +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy - -import org.springframework.beans.factory.annotation.Autowired - -class DefaultDeployHandlerRegistry implements DeployHandlerRegistry { - - @Autowired - List deployHandlers - - @Override - DeployHandler findHandler(DeployDescription description) { - def handler = deployHandlers.find { it.handles(description) } - if (!handler) { - throw new DeployHandlerNotFoundException() - } else { - handler - } - } - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployAtomicOperation.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployAtomicOperation.groovy index e2bc11251fb..73f1ee4d6e4 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployAtomicOperation.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployAtomicOperation.groovy @@ -19,10 +19,13 @@ package com.netflix.spinnaker.clouddriver.deploy import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.SagaContextAware import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent import org.springframework.beans.factory.annotation.Autowired -class DeployAtomicOperation implements AtomicOperation { +import javax.annotation.Nonnull + +class DeployAtomicOperation implements AtomicOperation, SagaContextAware { private static final String TASK_PHASE = "DEPLOY" @Autowired @@ -55,10 +58,29 @@ class DeployAtomicOperation implements AtomicOperation { task.updateStatus TASK_PHASE, "Found handler: ${deployHandler.getClass().simpleName}" task.updateStatus TASK_PHASE, "Invoking Handler." - def deploymentResult = deployHandler.handle(description, priorOutputs) - task.updateStatus TASK_PHASE, "Server Groups: ${deploymentResult.serverGroupNames} created." + DeploymentResult deploymentResult = deployHandler.handle(description, priorOutputs).normalize() + task.updateStatus TASK_PHASE, "Server Groups: ${deploymentResult.getDeployments()} created." + + return deploymentResult + } - deploymentResult + @Override + void setSagaContext(@Nonnull SagaContext sagaContext) { + // DeployHandlers are singleton objects autowired differently than their one-off AtomicOperations, so we can't + // set a SagaContext onto them. Instead, we need to set it onto the description. To pile on, AtomicOperationConverters + // throw away the initial converted AtomicOperationDescription, so we can't apply the SagaContext to the description + // on behalf of cloud provider integrators... so we have to wire that up for them manually in any AtomicOperation. + if (description instanceof SagaContextAware) { + ((SagaContextAware) description).sagaContext = sagaContext + } + } + + @Override + SagaContext getSagaContext() { + if (description instanceof SagaContextAware) { + return description.sagaContext + } + return null } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandler.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandler.java deleted file mode 100644 index 542e4c1df4b..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandler.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy; - -import com.netflix.spinnaker.clouddriver.orchestration.events.CreateServerGroupEvent; -import java.util.Collections; -import java.util.List; - -/** - * A DeployHandler takes a parameterized description object and performs some deployment operation based off of its - * detail. These objects may most often be derived from a {@link DeployHandlerRegistry} implementation. - * - * @param the type of the {@link DeployDescription} - * @see DeployDescription - * - */ -public interface DeployHandler { - /** - * A method that performs the deployment action described by the description object and returns its results as an - * implementation of {@link DeploymentResult} - * - * @param description - * @param priorOutputs from prior operations - * @return deployment result object - */ - DeploymentResult handle(T description, List priorOutputs); - - /** - * Used to determine if this handler is suitable for processing the supplied description object. - * - * @param description - * @return true/false - */ - boolean handles(DeployDescription description); -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerNotFoundException.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerNotFoundException.groovy deleted file mode 100644 index 1405c616434..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerNotFoundException.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy - -import groovy.transform.InheritConstructors - -@InheritConstructors -class DeployHandlerNotFoundException extends RuntimeException {} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerRegistry.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerRegistry.groovy deleted file mode 100644 index 06b77e6aa85..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerRegistry.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy -/** - * A registry of {@link DeployHandler} instances. - * - * - */ -public interface DeployHandlerRegistry { - /** - * This method is used to locate a handler most appropriate for the provided description object. - * - * @param description - * @return a deploy handler instance - * @throws DeployHandlerNotFoundException - */ - DeployHandler findHandler(DeployDescription description) throws DeployHandlerNotFoundException -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeploymentResult.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeploymentResult.groovy index 7ca3c169e85..94c4db8206a 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeploymentResult.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeploymentResult.groovy @@ -19,7 +19,6 @@ package com.netflix.spinnaker.clouddriver.deploy import com.netflix.spinnaker.kork.artifacts.model.Artifact class DeploymentResult { - // TODO(lwander) deprecate in favor of `deployedNames` and `deployedNamesByLocation` List serverGroupNames = [] Map serverGroupNameByRegion = [:] List messages = [] @@ -28,4 +27,87 @@ class DeploymentResult { Map > deployedNamesByLocation = [:] List createdArtifacts = [] + Set deployments = [] + + DeploymentResult normalize() { + if (deployments) { + return this + } + + serverGroupNameByRegion.each { key, value -> + deployments.add(new Deployment(location: key, serverGroupName: value)) + } + + deployedNamesByLocation.each { key, values -> + values.each { value -> + deployments.add(new Deployment(location: key, serverGroupName: value)) + } + } + + return this + } + + static class Deployment { + String cloudProvider + String account + String location + String serverGroupName + + Capacity capacity + + Map metadata = [:] + + boolean equals(o) { + Collections.emptyList() + + if (this.is(o)) return true + if (getClass() != o.class) return false + + Deployment that = (Deployment) o + + if (location != that.location) return false + if (serverGroupName != that.serverGroupName) return false + + return true + } + + int hashCode() { + int result + result = (location != null ? location.hashCode() : 0) + result = 31 * result + (serverGroupName != null ? serverGroupName.hashCode() : 0) + return result + } + + @Override + String toString() { + return "${location}:${serverGroupName}" + } + + static class Capacity { + Integer min + Integer max + Integer desired + + boolean equals(o) { + if (this.is(o)) return true + if (getClass() != o.class) return false + + Capacity capacity = (Capacity) o + + if (desired != capacity.desired) return false + if (max != capacity.max) return false + if (min != capacity.min) return false + + return true + } + + int hashCode() { + int result + result = (min != null ? min.hashCode() : 0) + result = 31 * result + (max != null ? max.hashCode() : 0) + result = 31 * result + (desired != null ? desired.hashCode() : 0) + return result + } + } + } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationErrors.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationErrors.groovy deleted file mode 100644 index 9fa1bb52a4c..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationErrors.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy - -import org.springframework.validation.AbstractErrors -import org.springframework.validation.Errors -import org.springframework.validation.FieldError -import org.springframework.validation.ObjectError - -class DescriptionValidationErrors extends AbstractErrors { - Object description - List globalErrors = new ArrayList(); - List fieldErrors = new ArrayList(); - - DescriptionValidationErrors(Object description) { - this.description = description - } - - @Override - String getObjectName() { - description.class.simpleName - } - - @Override - void reject(String errorCode, Object[] errorArgs, String defaultMessage) { - globalErrors.add(new ObjectError(objectName, [errorCode] as String[], errorArgs, defaultMessage)) - } - - @Override - void rejectValue(String field, String errorCode, Object[] errorArgs, String defaultMessage) { - fieldErrors.add(new FieldError(objectName, field, null, false, [errorCode] as String[], errorArgs, defaultMessage)) - } - - @Override - void addAllErrors(Errors errors) { - globalErrors.addAll errors.allErrors - } - - @Override - Object getFieldValue(String field) { - description."$field" - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationException.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationException.groovy deleted file mode 100644 index e3b6c005336..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationException.groovy +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy - -import com.netflix.spinnaker.kork.web.exceptions.ValidationException -import org.springframework.validation.Errors -import org.springframework.validation.ObjectError - -class DescriptionValidationException extends ValidationException { - DescriptionValidationException(Errors errors) { - super("Validation Failed", getErrors(errors)) - } - - static Collection getErrors(Errors errors) { - def errorStrings = [] - errors.allErrors.each { ObjectError objectError -> - errorStrings << (objectError.defaultMessage ?: objectError.code) - } - - return errorStrings - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidator.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidator.groovy deleted file mode 100644 index 15d2ccbe60c..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidator.groovy +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy - -import com.netflix.spinnaker.clouddriver.orchestration.VersionedCloudProviderOperation -import com.netflix.spinnaker.clouddriver.security.resources.AccountNameable -import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable -import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable -import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.security.core.Authentication -import org.springframework.security.core.context.SecurityContextHolder -import org.springframework.validation.Errors - -public abstract class DescriptionValidator implements VersionedCloudProviderOperation { - - static String getValidatorName(String description) { - description + "Validator" - } - - abstract void validate(List priorDescriptions, T description, Errors errors) - - @Autowired(required = false) - FiatPermissionEvaluator permissionEvaluator - - void authorize(T description, Errors errors) { - if (!permissionEvaluator) { - return - } - - Authentication auth = SecurityContextHolder.context.authentication - - if (description instanceof ApplicationNameable) { - ApplicationNameable asApp = description as ApplicationNameable - if (!permissionEvaluator.hasPermission(auth, asApp.application, 'APPLICATION', 'WRITE')) { - errors.reject("authorization", "Access denied to application ${asApp.application}") - } - } - - if (description instanceof AccountNameable) { - AccountNameable asAcct = description as AccountNameable - if (!permissionEvaluator.hasPermission(auth, asAcct.account, 'ACCOUNT', 'WRITE')) { - errors.reject("authorization", "Access denied to account ${asAcct.account}") - } - } - - if (description instanceof ResourcesNameable) { - ResourcesNameable asResources = description as ResourcesNameable - permissionEvaluator.storeWholePermission() - asResources.applications.each { String app -> - if (!permissionEvaluator.hasPermission(auth, app, 'APPLICATION', 'WRITE')) { - errors.reject("authorization", "Access denied to application ${app}") - } - } - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/NullOpDeployHandler.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/NullOpDeployHandler.groovy deleted file mode 100644 index 72fd36038dc..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/NullOpDeployHandler.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy - -class NullOpDeployHandler implements DeployHandler { - @Override - DeploymentResult handle(String description, List priorOutputs) { - null - } - - @Override - boolean handles(DeployDescription description) { - false - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/description/EnableDisableDescriptionTrait.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/description/EnableDisableDescriptionTrait.groovy index 6de3f02f1d8..47cd4027e55 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/description/EnableDisableDescriptionTrait.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/description/EnableDisableDescriptionTrait.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.deploy.description -trait EnableDisableDescriptionTrait { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +trait EnableDisableDescriptionTrait implements ServerGroupsNameable { String serverGroupName Integer getDesiredPercentage() { @@ -26,4 +28,9 @@ trait EnableDisableDescriptionTrait { void setDesiredPercentage(Integer _) { throw new IllegalArgumentException("The selected provider hasn't implemented enabling/disabling by percentage yet") } + + @Override + Collection getServerGroupNames() { + return [serverGroupName] + } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/documentation/Empty.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/documentation/Empty.groovy deleted file mode 100644 index c82207e6de8..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/documentation/Empty.groovy +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.documentation - -import java.lang.annotation.ElementType -import java.lang.annotation.Retention -import java.lang.annotation.RetentionPolicy -import java.lang.annotation.Target - -/** - * Marker annotation that defines that the provided method may return an empty map, list, or set - * - * - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.SOURCE) -@interface Empty { - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/documentation/Nullable.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/documentation/Nullable.groovy deleted file mode 100644 index e367bccf535..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/documentation/Nullable.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.documentation - -import java.lang.annotation.ElementType -import java.lang.annotation.Retention -import java.lang.annotation.RetentionPolicy -import java.lang.annotation.Target - -/** - * Marker annotation for documentation purposes. Methods annotated with @Nullable indicate that the method may return a null value. - * - * - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.SOURCE) -@interface Nullable { -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/exceptions/CloudProviderNotFoundException.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/exceptions/CloudProviderNotFoundException.groovy deleted file mode 100644 index 787b1bd8a71..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/exceptions/CloudProviderNotFoundException.groovy +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.exceptions - -import groovy.transform.InheritConstructors -import org.springframework.http.HttpStatus -import org.springframework.web.bind.annotation.ResponseStatus - -@ResponseStatus(value = HttpStatus.BAD_REQUEST) -@InheritConstructors -class CloudProviderNotFoundException extends RuntimeException {} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/exceptions/OperationTimedOutException.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/exceptions/OperationTimedOutException.groovy deleted file mode 100644 index 7f444ae2360..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/exceptions/OperationTimedOutException.groovy +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.exceptions - -import groovy.transform.InheritConstructors - -@InheritConstructors -class OperationTimedOutException extends RuntimeException { -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/AbstractServerGroupNameResolver.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/AbstractServerGroupNameResolver.groovy index e14d15d8c6e..a545eb8ebb8 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/AbstractServerGroupNameResolver.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/AbstractServerGroupNameResolver.groovy @@ -17,6 +17,8 @@ package com.netflix.spinnaker.clouddriver.helpers import com.netflix.frigga.NameBuilder +import com.netflix.frigga.NameConstants +import com.netflix.frigga.NameValidation import com.netflix.frigga.Names import com.netflix.frigga.autoscaling.AutoScalingGroupNameBuilder import com.netflix.spinnaker.clouddriver.data.task.Task @@ -58,6 +60,11 @@ abstract class AbstractServerGroupNameResolver extends NameBuilder { return latestServerGroup ? latestServerGroup.serverGroupName : null } + String getClusterName(String application, String stack, String details) { + String clusterName = combineAppStackDetail(application, stack, details); + return clusterName; + } + String resolveNextServerGroupName(String application, String stack, String details, Boolean ignoreSequence) { Integer nextSequence = 0 String clusterName = combineAppStackDetail(application, stack, details) @@ -99,8 +106,20 @@ abstract class AbstractServerGroupNameResolver extends NameBuilder { throw new IllegalArgumentException("Sequence '${sequence}' is invalid") } + //validate characters, but we will skip pushSequence in stack/detail to not break existing clusters + if (!NameValidation.checkName(NameValidation.notEmpty(application, "application"))) { + throw new IllegalArgumentException(String.format("Invalid appName %s, may only contain %s characters", application, NameConstants.NAME_CHARS)) + } + if (stack != null && !stack.isEmpty() && !NameValidation.checkName(stack)) { + throw new IllegalArgumentException(String.format("Invalid stack %s, stack may only contain %s", stack, NameConstants.NAME_CHARS)) + } + + if (details != null && !details.isEmpty() && !NameValidation.checkNameWithHyphen(details)) { + throw new IllegalArgumentException(String.format("Invalid detail %s, detail may only contain %s", details, NameConstants.NAME_HYPHEN_CHARS)) + } + def builder = new AutoScalingGroupNameBuilder(appName: application, stack: stack, detail: details) - def groupName = builder.buildGroupName(true) + def groupName = builder.buildGroupName() if (ignoreSequence) { return groupName } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/EnableDisablePercentageCategorizer.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/EnableDisablePercentageCategorizer.groovy deleted file mode 100644 index 58e81961dc6..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/EnableDisablePercentageCategorizer.groovy +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.helpers - - -class EnableDisablePercentageCategorizer { - /** - * During an enable/disable operation that accepts a desired percentage of instances to leave enabled/disabled, this acts - * as a helper function to return which instances still need to be enabled/disabled. - * - * @param modified are the instances that don't need to be enabled/disabled (presumably have already been enabled/disabled). - * @param unmodified are the instances that do need to be enabled/disabled. - * @param desiredPercentage is the end desired percentage. - * @return the list of instances to be enabled/disabled. If the percentage has already been achieved or exceeded by the input instances, we return an empty list. - * - * @note modified + unmodified should be the total list of instances managed by one server group - */ - static List getInstancesToModify(List modified, List unmodified, int desiredPercentage) { - if (desiredPercentage < 0 || desiredPercentage > 100) { - throw new RuntimeException("Desired target percentage must be between 0 and 100 inclusive") - } - - int totalSize = modified.size() + unmodified.size() - int newSize = (int) Math.ceil(totalSize * (float) (desiredPercentage / 100)) - - int returnSize = modified.size() > newSize ? 0 : newSize - modified.size() - - return unmodified.take(returnSize) - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/OperationPoller.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/OperationPoller.groovy index b1cf18fa65e..b47b1e347a2 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/OperationPoller.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/helpers/OperationPoller.groovy @@ -20,8 +20,9 @@ import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.exceptions.OperationTimedOutException import groovy.util.logging.Slf4j +import java.util.function.Consumer import java.util.function.Function -import java.util.function.Predicate +import java.util.function.Supplier /** * A poller with an upper time limit combined with a Fibonacci-based backoff. @@ -45,7 +46,14 @@ class OperationPoller { this(asyncOperationTimeoutSecondsDefault, asyncOperationMaxPollingIntervalSeconds) this.threadSleeper = threadSleeper } -/** + + public T waitForOperation(Supplier operation, Function ifDone, + Long timeoutSeconds, Task task, String resourceString, String basePhase) { + (T) waitForOperation({ operation.get() }, { T t -> ifDone.apply(t) }, + timeoutSeconds, task, resourceString, basePhase) + } + + /** * Wrap an operational closure with a back off algorithm to check until completed. * * @param operation - a closure to perform an operation and return a testable value diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobExecutor.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobExecutor.groovy deleted file mode 100644 index 983b749f22d..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobExecutor.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.jobs - -interface JobExecutor { - String startJob(JobRequest jobRequest, Map environment, InputStream inputStream) - boolean jobExists(String jobId) - JobStatus updateJob(String jobId) - void cancelJob(String jobId) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobRequest.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobRequest.groovy deleted file mode 100644 index 9620aafeeb9..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobRequest.groovy +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.jobs - -import groovy.transform.CompileStatic -import groovy.transform.Immutable - -@Immutable(copyWith = true) -@CompileStatic -class JobRequest { - List tokenizedCommand -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobStatus.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobStatus.groovy deleted file mode 100644 index c8c4c79526e..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/JobStatus.groovy +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.jobs - -class JobStatus { - - String id - State state - Result result - String stdOut - String stdErr - - static enum State { - RUNNING, COMPLETED - } - - static enum Result { - SUCCESS, FAILURE - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/local/JobExecutorLocal.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/local/JobExecutorLocal.groovy deleted file mode 100644 index 62618d7f242..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/jobs/local/JobExecutorLocal.groovy +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.jobs.local - -import com.netflix.spinnaker.clouddriver.jobs.JobExecutor -import com.netflix.spinnaker.clouddriver.jobs.JobRequest -import com.netflix.spinnaker.clouddriver.jobs.JobStatus -import groovy.util.logging.Slf4j -import org.apache.commons.exec.CommandLine -import org.apache.commons.exec.DefaultExecuteResultHandler -import org.apache.commons.exec.DefaultExecutor -import org.apache.commons.exec.ExecuteWatchdog -import org.apache.commons.exec.Executor -import org.apache.commons.exec.PumpStreamHandler -import org.apache.commons.exec.Watchdog -import org.springframework.beans.factory.annotation.Value - -import java.util.concurrent.ConcurrentHashMap - -@Slf4j -class JobExecutorLocal implements JobExecutor { - - @Value('${jobs.local.timeoutMinutes:10}') - long timeoutMinutes - - Map jobIdToHandlerMap = new ConcurrentHashMap() - - @Override - String startJob(JobRequest jobRequest, Map environment, InputStream inputStream) { - log.debug("Starting job: '${String.join(' ', jobRequest.tokenizedCommand)}'...") - - String jobId = UUID.randomUUID().toString() - - ByteArrayOutputStream stdOut = new ByteArrayOutputStream() - ByteArrayOutputStream stdErr = new ByteArrayOutputStream() - PumpStreamHandler pumpStreamHandler = new PumpStreamHandler(stdOut, stdErr, inputStream) - CommandLine commandLine - - if (jobRequest.tokenizedCommand) { - log.debug("Executing $jobId with tokenized command: $jobRequest.tokenizedCommand") - - // Grab the first element as the command. - commandLine = new CommandLine(jobRequest.tokenizedCommand[0]) - - // Treat the rest as arguments. - String[] arguments = Arrays.copyOfRange(jobRequest.tokenizedCommand.toArray(), 1, jobRequest.tokenizedCommand.size()) - - commandLine.addArguments(arguments, false) - } else { - throw new IllegalArgumentException("No tokenizedCommand specified for $jobId.") - } - - DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler() - ExecuteWatchdog watchdog = new ExecuteWatchdog(timeoutMinutes * 60 * 1000){ - @Override - void timeoutOccured(Watchdog w) { - // If a watchdog is passed in, this was an actual time-out. Otherwise, it is likely - // the result of calling watchdog.destroyProcess(). - if (w) { - log.warn("Job $jobId timed-out (after $timeoutMinutes minutes).") - cancelJob(jobId) - } - - super.timeoutOccured(w) - } - } - Executor executor = new DefaultExecutor() - executor.setStreamHandler(pumpStreamHandler) - executor.setWatchdog(watchdog) - executor.execute(commandLine, environment, resultHandler) - - // TODO(lwander/dpeach) investigate if this is actually needed - // give the job time to startup - sleep(500) - - jobIdToHandlerMap.put(jobId, [ - handler: resultHandler, - watchdog: watchdog, - stdOut: stdOut, - stdErr: stdErr - ]) - - return jobId - } - - @Override - boolean jobExists(String jobId) { - return jobIdToHandlerMap.containsKey(jobId) - } - - @Override - JobStatus updateJob(String jobId) { - try { - log.debug("Polling state for $jobId...") - - if (jobIdToHandlerMap[jobId]) { - JobStatus jobStatus = new JobStatus(id: jobId) - - DefaultExecuteResultHandler resultHandler - ByteArrayOutputStream stdOut - ByteArrayOutputStream stdErr - - jobIdToHandlerMap[jobId].with { - resultHandler = it.handler - stdOut = it.stdOut - stdErr = it.stdErr - } - - String output = new String(stdOut.toByteArray()) - String errors = new String(stdErr.toByteArray()) - - if (resultHandler.hasResult()) { - log.debug("State for $jobId changed with exit code $resultHandler.exitValue.") - - if (!output) { - output = resultHandler.exception ? resultHandler.exception.message : "No output from command." - } - - if (resultHandler.exitValue == 0) { - jobStatus.state = JobStatus.State.COMPLETED - jobStatus.result = JobStatus.Result.SUCCESS - } else { - jobStatus.state = JobStatus.State.COMPLETED - jobStatus.result = JobStatus.Result.FAILURE - } - - jobIdToHandlerMap.remove(jobId) - } else { - jobStatus.state = JobStatus.State.RUNNING - } - jobStatus.stdOut = output - jobStatus.stdErr = errors - return jobStatus - } else { - // This instance is not managing the job, it has timed out, or it was cancelled. - return null - } - } catch (Exception e) { - log.error("Failed to update $jobId", e) - - return null - } - - } - - @Override - void cancelJob(String jobId) { - log.info("Canceling job $jobId...") - - // Remove the job from this rosco instance's handler map. - def canceledJob = jobIdToHandlerMap.remove(jobId) - - // Terminate the process. - canceledJob?.watchdog?.destroyProcess() - - // The next polling interval will be unable to retrieve the job status and will mark it as canceled. - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/metrics/TimedCallable.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/metrics/TimedCallable.groovy deleted file mode 100644 index 9278b66a729..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/metrics/TimedCallable.groovy +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.metrics - -import com.netflix.spectator.api.Id -import com.netflix.spectator.api.Registry -import groovy.transform.CompileStatic - -import java.util.concurrent.Callable -import java.util.concurrent.TimeUnit - -@CompileStatic -class TimedCallable implements Callable { - private final Registry registry - private final Id metricId - private final Callable callable - - private static class RunnableWrapper implements Callable { - private final Runnable runnable - - RunnableWrapper(Runnable runnable) { - this.runnable = runnable - } - - @Override - Void call() throws Exception { - runnable.run() - null - } - } - - private static class ClosureWrapper implements Callable { - private final Closure closure - - ClosureWrapper(Closure closure) { - this.closure = closure - } - - @Override - T call() throws Exception { - closure.call() - } - } - - public static TimedCallable forRunnable(Registry registry, Id metricId, Runnable runnable) { - new TimedCallable(registry, metricId, new RunnableWrapper(runnable)) - } - - public static TimedCallable forCallable(Registry registry, Id metricId, Callable callable) { - new TimedCallable(registry, metricId, callable) - } - - public static TimedCallable forClosure(Registry registry, Id metricId, Closure closure) { - new TimedCallable(registry, metricId, new ClosureWrapper(closure)) - } - - TimedCallable(Registry registry, Id metricId, Callable callable) { - this.registry = registry - this.metricId = metricId - this.callable = callable - } - - @Override - T call() throws Exception { - long start = System.nanoTime() - Id thisId = metricId - try { - T result = callable.call() - thisId = thisId.withTag("success", "true") - return result - } catch (Exception ex) { - thisId = thisId.withTag("success", "false").withTag("cause", ex.class.simpleName) - throw ex - } finally { - registry.timer(thisId).record(System.nanoTime() - start, TimeUnit.NANOSECONDS) - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Application.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Application.groovy deleted file mode 100644 index 2ab5b422703..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Application.groovy +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -import com.netflix.spinnaker.clouddriver.documentation.Empty - -/** - * An application is a top-level construct that provides an association to {@link Cluster} objects. - * - * - */ -interface Application { - /** - * The name of the application - * - * @return name - */ - String getName() - - /** - * Arbitrary metadata that may be associated with an application. - * - * @return map of key->value pairs, or an empty map - */ - @Empty - Map getAttributes() - - /** - * A set of cluster names that are associated with this application - * - * @return names - */ - @Empty - Map> getClusterNames() - - Closure>> mergeClusters = { Application a, Application b -> - [a, b].inject([:]) { Map map, source -> - for (Map.Entry e in source.clusterNames) { - if (!map.containsKey(e.key)) { - map[e.key] = new HashSet() - } - map[e.key].addAll e.value - } - map - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ApplicationProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ApplicationProvider.groovy deleted file mode 100644 index bb4e31f7d8c..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ApplicationProvider.groovy +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2014-2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -import com.netflix.spinnaker.clouddriver.documentation.Empty -import com.netflix.spinnaker.clouddriver.documentation.Nullable - -/** - * An application provider is an interface for which {@link Application} objects may be retrieved. This interface defines a common contract for which various providers may be queried about their - * known applications. - * - * - * - */ -interface ApplicationProvider { - /** - * Looks up all of the {@link Application} objects known to this provider - * - * @param expand Whether application relationships (ie. cluster names) should be included - * @return a set of applications or an empty set if none are known to this provider - */ - @Empty - Set getApplications(boolean expand) - - /** - * Looks up a particular application by name - * - * @param application name - * @return an application or null if it is not known to this provider - */ - @Nullable - Application getApplication(String name) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ArtifactProvider.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ArtifactProvider.java deleted file mode 100644 index 2cdadf2b4cd..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ArtifactProvider.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.model; - -import com.netflix.spinnaker.kork.artifacts.model.Artifact; - -import java.util.List; - -public interface ArtifactProvider { - List getArtifacts(String type, String name, String location); -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CachingAgentScheduler.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CachingAgentScheduler.groovy deleted file mode 100644 index cfce5136acf..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CachingAgentScheduler.groovy +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface CachingAgentScheduler { -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CertificateProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CertificateProvider.groovy deleted file mode 100644 index 54e6f893962..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CertificateProvider.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface CertificateProvider { - String getCloudProvider() - Set getAll() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricDatapoint.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricDatapoint.groovy deleted file mode 100644 index ee543b6303a..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricDatapoint.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface CloudMetricDatapoint { - - -} \ No newline at end of file diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricDescriptor.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricDescriptor.groovy deleted file mode 100644 index c7d4993ed43..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricDescriptor.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * Describes a metric reported by a cloud provider. - * - * Implementations should add any fields necessary to uniquely identify a particular metric; for example, AWS - * supplies a "namespace" field, as well as a collection of "dimensions" - */ -interface CloudMetricDescriptor { - - String name - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricProvider.groovy deleted file mode 100644 index b9aa62b1761..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricProvider.groovy +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface CloudMetricProvider { - - /** - * Returns the platform of the provider - * @return a String, e.g. 'aws', 'gcp' - */ - String getCloudProvider() - - /** - * Returns a specific metric descriptor - * @param account the account - * @param region the region - * @param filters a collection of identifiers used to uniquely identify a metric - * @return a metric descriptor if one is found; should throw an exception if multiple metric descriptors are found - * for the supplied filters - */ - T getMetricDescriptor(String account, String region, Map filters) - - /** - * Returns a list of metric descriptors matching the supplied filters - * @param account the account - * @param region the region - * @param filters a collection of identifiers used to select a subset of all metrics in the account and region - * @return a list of metric descriptors matching the filters - */ - List findMetricDescriptors(String account, String region, Map filters) - - /** - * Returns a statistic set for the metric descriptor uniquely identified by the supplied filters - * @param account the account - * @param region the region - * @param name the name of the target metric - * @param filters a collection of identifiers used to uniquely identify a metric - * @param startTime an inclusive timestamp to determine the oldest datapoint to return - * @param endTime an exclusive timestamp to determine the newest datapoint to return - * @return a CloudMetricStatistics object, describing the statistics with timestamps - */ - CloudMetricStatistics getStatistics(String account, String region, String metricName, Map filters, - Long startTime, Long endTime) - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricStatistics.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricStatistics.groovy deleted file mode 100644 index ddac4806378..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/CloudMetricStatistics.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface CloudMetricStatistics { - /** - * Unit of measurement for all datapoints; should be the plural form of the unit if applicable, - * e.g. "Bytes", "Percent", "Kilobytes/Second" - */ - String unit - - /** - * List of statistical datapoints; at least one statistic (average, sum, sampleCount, minimum, maximum) should be - * populated, as well as the timestamp - */ - List datapoints - -} \ No newline at end of file diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ClusterProvider.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ClusterProvider.java deleted file mode 100644 index 1c473b00712..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ClusterProvider.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model; - -import com.netflix.spinnaker.clouddriver.documentation.Empty; -import com.netflix.spinnaker.clouddriver.documentation.Nullable; - -import java.util.Map; -import java.util.Set; - -/** - * A cluster provider is an interface for the application to retrieve implementations of {@link Cluster} objects. This interface defines the common contract for which various providers may be queried - * for their known clusters. This interface assumes implementations may span cross account. - * - * - */ -public interface ClusterProvider { - /** - * Looks up all of the clusters available to this provider. - * Keyed on account name. - * - * @return set of clusters or an empty set if none exist - */ - @Empty - Map> getClusters(); - - /** - * Looks up all of the clusters known to this provider to be for a specified application - * Keyed on account name. - * Similar to {@link #getClusterSummaries(java.lang.String)}, but returns the names of server groups and load balancers, not reified views. - * - * @param application - * @return map of clusters, keyed on account name, or an empty map if none exist - */ - @Empty - Map> getClusterSummaries(String application); - - /** - * Looks up all of the clusters known to this provider to be for a specified application - * Keyed on account name. - * Similar to {@link #getClusterSummaries(java.lang.String)}, but returns reified views of server groups and load balancers. - * - * @param application - * @return map of clusters, keyed on account name, or an empty map if none exist - */ - @Empty - Map> getClusterDetails(String application); - - /** - * Looks up all of the clusters known to this provider to be for a specified application and within a {@link com.netflix.spinnaker.clouddriver.security.AccountCredentials} registered with - * a {@link com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider} - * - * @param application - * @param account name - * @return set of clusters with load balancers and server groups populated, or an empty set if none exist - */ - @Empty - Set getClusters(String application, String account); - - @Empty - default Set getClusters(String application, String account, boolean includeDetails) { - return getClusters(application, account); - } - - /** - * Looks up a cluster known to this provider to be for a specified application, within a specified {@link com.netflix.spinnaker.clouddriver.security.AccountCredentials}, and with the specified name. - * - * @param account - * @param name - * @return cluster with load balancers and server groups populated, or null if none exists - */ - @Nullable - T getCluster(String application, String account, String name); - - @Nullable - T getCluster(String application, String account, String name, boolean includeDetails); - - /** - * Looks up a server group known to this provider, within a specified {@link com.netflix.spinnaker.clouddriver.security.AccountCredentials} and region, and with the specified name. - * @param account name - * @param region - * @param name - * @param includeDetails - * @return the server group or null if none exists - */ - @Nullable - ServerGroup getServerGroup(String account, String region, String name, boolean includeDetails); - - @Nullable - ServerGroup getServerGroup(String account, String region, String name); - - /** - * @return the identifier of the backing cloud provider - */ - String getCloudProviderId(); - - /** - * Determines whether or not optimizations can be made by retrieving minimal or unexpanded clusters. - * - * This primarily affects how server groups are loaded for a cluster (@see com.netflix.spinnaker.clouddriver.controllers.ClusterController}. - * - * Minimal cluster support requires that server groups contained within a cluster be populated with: - * - creation time stamps - * - region / zone details - * - disabled status - * - instance counts (fully populated instances are not necessary) - */ - boolean supportsMinimalClusters(); -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ElasticIp.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ElasticIp.groovy deleted file mode 100644 index 9bbc416cbfc..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ElasticIp.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * A representation of an elastic ip - */ -interface ElasticIp { - /** - * The type of this elastic ip. May reference the cloud provider to which it is associated - * - * @return - */ - String getType() - - /** - * The public address associated with this elastic ip - * - * @return - */ - String getAddress() - - /** - * The identifier of the object that this elastic ip is attached to - * - * @return - */ - String getAttachedToId() - - /** - * The account associated with this elastic ip - * - * @return - */ - String getAccountName() - - /** - * The region associated with this elastic ip - * - * @return - */ - String getRegion() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ElasticIpProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ElasticIpProvider.groovy deleted file mode 100644 index 350b2a192dc..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ElasticIpProvider.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface ElasticIpProvider { - Set getAllByAccount(String account) - Set getAllByAccountAndRegion(String account, String region) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/EntityTags.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/EntityTags.groovy index 531c7ec705c..8cdbba5cb8d 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/EntityTags.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/EntityTags.groovy @@ -167,7 +167,11 @@ class EntityTags { switch (valueType) { case EntityTagValueType.object: - return objectMapper.readValue(value.toString(), Map.class) + try { + return objectMapper.readValue(value.toString(), Map.class) + } catch (Exception e) { + return value + } default: return value } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/EntityTagsProvider.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/EntityTagsProvider.java deleted file mode 100644 index 2ca5f5f31b4..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/EntityTagsProvider.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model; - -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -public interface EntityTagsProvider { - /** - * Fetch EntityTags by any combination of {@code cloudProvider}/{@code type}/{@code idPrefix}/{@code tags} - */ - Collection getAll(String cloudProvider, - String application, - String entityType, - List entityIds, - String idPrefix, - String account, - String region, - String namespace, - Map tags, - int maxResults); - - /** - * Fetch EntityTags by {@code id} - */ - Optional get(String id); - - /** - * Fetch EntityTags by {@code id} AND {@code tags}, both must match - */ - Optional get(String id, Map tags); - - /** - * Index an EntityTags - */ - void index(EntityTags entityTags); - - /** - * Index multiple EntityTags - */ - void bulkIndex(Collection multipleEntityTags); - - /** - * Verify that EntityTags has been indexed and can be retrieved via a search - */ - void verifyIndex(EntityTags entityTags); - - /** - * Delete EntityTags by {@code id} - */ - void delete(String id); - - /** - * Delete EntityTags - */ - void bulkDelete(Collection multipleEntityTags); - - /** - * Reindex all EntityTags - */ - void reindex(); - - /** - * Fetch delta (counts of EntityTags broken down by Elasticsearch and Front50) - * - * Can be used to identify when Elasticsearch and Front50 are out-of-sync. - */ - Map delta(); - - /** - * Remove all entity tags referencing entities that no longer exist (in a clouddriver cache). - */ - Map reconcile(String cloudProvider, String account, String region, boolean dryRun); -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Front50Application.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Front50Application.java new file mode 100644 index 00000000000..dfd9ea7fcb8 --- /dev/null +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Front50Application.java @@ -0,0 +1,26 @@ +/* + * Copyright 2022 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import lombok.Data; + +@Data +@JsonIgnoreProperties(ignoreUnknown = true) +public class Front50Application { + private String name; +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Health.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Health.groovy deleted file mode 100644 index 4b295a6c13b..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Health.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * Marker interface to represent a Health object. The concrete object will be serialized for consumers. - * - * - */ -public interface Health { - - /** - * @return health state indication - */ - HealthState getState() - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/HealthState.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/HealthState.groovy deleted file mode 100644 index 1e4c093ca6a..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/HealthState.groovy +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -enum HealthState { - Up, Down, Unknown, Starting, OutOfService, Succeeded, Failed -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Instance.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Instance.java deleted file mode 100644 index 734e27dc4e8..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Instance.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model; - -import java.util.List; -import java.util.Map; - -/** - * Primarily a marker interface, but provides the representation of an instance, which exists within a {@link ServerGroup}. Concrete implementations should provide more-specific data. - * - * - */ -public interface Instance { - /** - * The name of the instance. By convention this is expected to be globally unique. - * - * @return instance name - */ - String getName(); - - /** - * The human-readable name of the instance - * - * @return human-readable name - */ - default String getHumanReadableName() { - return getName(); - } - - /** - * A status of the health of the instance - * @return HealthState - */ - HealthState getHealthState(); - - /** - * A timestamp indicating when the instance was launched - * - * @return the number of milliseconds after the beginning of time (1 January, 1970 UTC) when - * this instance was launched - */ - Long getLaunchTime(); - - /** - * A zone specifier indicating where the instance resides - * - * @return the availability zone - */ - String getZone(); - - /** - * A list of all health metrics reported for this instance - * - * @return A list of health metrics, which will always include keys for type and status, - * and may include others, depending on the health metric - */ - List> getHealth(); - - /** - * @deprecated use #getCloudProvider - */ - String getProviderType(); - - /** - * Cloud-provider key, e.g. "aws", "titus" - * @return - */ - String getCloudProvider(); -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceProvider.groovy deleted file mode 100644 index 332a201454b..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceProvider.groovy +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface InstanceProvider { - - /** - * Returns the platform the instance provider - * @return a String, e.g. 'aws', 'gce' - */ - String getCloudProvider() - - T getInstance(String account, String region, String id) - - String getConsoleOutput(String account, String region, String id) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceType.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceType.groovy deleted file mode 100644 index e43c663585b..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceType.groovy +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * A representation of an instance type - */ -interface InstanceType { - - String getName() - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceTypeProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceTypeProvider.groovy deleted file mode 100644 index d497e3d025c..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/InstanceTypeProvider.groovy +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface InstanceTypeProvider { - Set getAll() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobProvider.groovy deleted file mode 100644 index 63fcab460c1..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobProvider.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -public interface JobProvider { - String getPlatform() - - T collectJob(String account, String location, String id) - - Map getFileContents(String account, String location, String id, String fileName) - - void cancelJob(String account, String location, String id) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobState.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobState.groovy deleted file mode 100644 index ac6672f44f4..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobState.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * A JobState defines the set of possible states a job can be in. - */ -enum JobState { - Starting, Running, Failed, Succeeded, Unknown -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobStatus.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobStatus.groovy deleted file mode 100644 index 75cd52e3280..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/JobStatus.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * A JobStatus reflects the state of a running or completed job. - */ -interface JobStatus { - String getName() - - String getAccount() - - String getId() - - String getLocation() - - String getProvider() - - JobState getJobState() - - Long getCreatedTime() - - Long getCompletedTime() - - Map getCompletionDetails() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/KeyPair.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/KeyPair.groovy deleted file mode 100644 index af9fa6e0f74..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/KeyPair.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.netflix.spinnaker.clouddriver.model - -/** - * A representation of a key pair - */ -interface KeyPair { - - /** - * The name of the key pair. - */ - String getKeyName() - - /** - * The fingerprint of the key pair. - */ - String getKeyFingerprint() - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/KeyPairProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/KeyPairProvider.groovy deleted file mode 100644 index 5e04e176a98..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/KeyPairProvider.groovy +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface KeyPairProvider { - Set getAll() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerProvider.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerProvider.java deleted file mode 100644 index 2256e59160e..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerProvider.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model; - -import com.fasterxml.jackson.annotation.JsonProperty; -import com.netflix.spinnaker.clouddriver.documentation.Empty; - -import java.util.List; -import java.util.Set; - -/** - * A loadBalancerProvider is an interface for the application to retrieve {@link LoadBalancer} objects. The interface provides a common contract for which one or many providers can be queried for - * their knowledge of load balancers at a given depth of specificity. - * - * This is a temporary class for consolidating the load balancer controllers for each cloud provider. - * Each cloud provider-specific controller will implement this interface (it already does - * implicitly, this interface just makes it explicit). Then, this interface will be merged into the - * LoadBalancerProvider interface while each controller will merge with its - * <Cloud >LoadBalancerProvider. - */ -public interface LoadBalancerProvider { - String getCloudProvider(); - - List list(); - - Item get(String name); - - List
byAccountAndRegionAndName(String account, String region, String name); - - /** - * Returns all load balancers related to an application based on one of the following criteria: - * - the load balancer name follows the Frigga naming conventions for load balancers (i.e., the load balancer name starts with the application name, followed by a hyphen) - * - the load balancer is used by a server group in the application - * @param application the name of the application - * @return a collection of load balancers with all attributes populated and a minimal amount of data - * for each server group: its name, region, and *only* the instances attached to the load balancers described above. - * The instances will have a minimal amount of data, as well: name, zone, and health related to any load balancers - */ - @Empty - Set getApplicationLoadBalancers(String application); - - // Some providers call this a "Summary", which I think is semantically different from what it is: - // a details view object, grouped by account, then region. - interface Item { - String getName(); - - @JsonProperty("accounts") - List getByAccounts(); - } - - interface ByAccount { - String getName(); - - @JsonProperty("regions") - List getByRegions(); - } - - interface ByRegion { - @JsonProperty("name") - String getName(); - - @JsonProperty("loadBalancers") - List
getLoadBalancers(); - } - - interface Details { } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Manifest.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Manifest.java deleted file mode 100644 index 0367ed35f73..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Manifest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.model; - -import com.netflix.spinnaker.moniker.Moniker; -import java.util.List; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -public interface Manifest { - Moniker getMoniker(); - String getAccount(); - String getLocation(); - Status getStatus(); - List getWarnings(); - - @Data - class Status { - Condition stable = Condition.builder().state(true).build(); - Condition paused = Condition.builder().state(false).build(); - Condition available = Condition.builder().state(true).build(); - Condition failed = Condition.builder().state(false).build(); - - public Status unknown() { - stable = null; - failed = null; - - return this; - } - - public Status failed(String message) { - failed.setMessage(message); - failed.setState(true); - - return this; - } - - public Status unstable(String message) { - stable.setMessage(message); - stable.setState(false); - - return this; - } - - public Status paused(String message) { - paused.setMessage(message); - paused.setState(true); - - return this; - } - - public Status unavailable(String message) { - available.setMessage(message); - available.setState(false); - - return this; - } - - @Data - @Builder - @NoArgsConstructor - @AllArgsConstructor - public static class Condition { - boolean state; - String message; - } - } - - @Data - @Builder - public static class Warning { - private String type; - private String message; - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ManifestProvider.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ManifestProvider.java deleted file mode 100644 index 8615d643a3e..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ManifestProvider.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.model; - -public interface ManifestProvider { - T getManifest(String account, String location, String name); -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Network.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Network.groovy deleted file mode 100644 index 19371c97038..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Network.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * A representation of a network - */ -public interface Network { - /** - * The cloud provider associated with this network - * - * @return - */ - String getCloudProvider() - - /** - * The ID associated with this network - * - * @return - */ - String getId() - - /** - * The name for this network - * - * @return - */ - String getName() - - /** - * The account associated with this network - * - * @return - */ - String getAccount() - - /** - * The region associated with this network - * - * @return - */ - String getRegion() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NetworkProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NetworkProvider.groovy deleted file mode 100644 index 44b53b41775..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NetworkProvider.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -public interface NetworkProvider { - String getCloudProvider() - Set getAll() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopApplicationProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopApplicationProvider.groovy deleted file mode 100644 index 625b2d336fc..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopApplicationProvider.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopApplicationProvider implements ApplicationProvider { - @Override - Set getApplications(boolean expand) { - Collections.emptySet() - } - - @Override - Application getApplication(String name) { - null - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopCloudMetricProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopCloudMetricProvider.groovy deleted file mode 100644 index 8d3119d5d24..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopCloudMetricProvider.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopCloudMetricProvider implements CloudMetricProvider { - - @Override - String getCloudProvider() { - 'noop' - } - - @Override - CloudMetricDescriptor getMetricDescriptor(String account, String region, Map filters) { - null - } - - @Override - List findMetricDescriptors(String account, String region, Map filters) { - Collections.emptySet() - } - - @Override - CloudMetricStatistics getStatistics(String account, String region, String metricName, Map filters, - Long startTime, Long endTime) { - null - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopClusterProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopClusterProvider.groovy deleted file mode 100644 index 5b1422c278f..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopClusterProvider.groovy +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopClusterProvider implements ClusterProvider { - - @Override - Map> getClusters() { - Collections.emptyMap() - } - - @Override - Map> getClusterDetails(String application) { - Collections.emptyMap() - } - - @Override - Map> getClusterSummaries(String application) { - Collections.emptyMap() - } - - @Override - ServerGroup getServerGroup(String account, String region, String name, boolean includeDetails) { - null - } - - @Override - ServerGroup getServerGroup(String account, String region, String name) { - null - } - - @Override - Set getClusters(String application, String account) { - Collections.emptySet() - } - - @Override - Cluster getCluster(String application, String account, String name, boolean includeDetails) { - null - } - - @Override - Cluster getCluster(String application, String account, String name) { - null - } - - @Override - String getCloudProviderId() { - return "noop" - } - - @Override - boolean supportsMinimalClusters() { - return false - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopElasticIpProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopElasticIpProvider.groovy deleted file mode 100644 index 19c0df54fda..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopElasticIpProvider.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopElasticIpProvider implements ElasticIpProvider { - @Override - Set getAllByAccount(String account) { - Collections.emptySet() - } - - @Override - Set getAllByAccountAndRegion(String account, String region) { - Collections.emptySet() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopInstanceProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopInstanceProvider.groovy deleted file mode 100644 index b51912ebf4c..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopInstanceProvider.groovy +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopInstanceProvider implements InstanceProvider { - - final String cloudProvider = "none" - - @Override - Instance getInstance(String account, String region, String id) { - null - } - - @Override - String getConsoleOutput(String account, String region, String id) { - null - } - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopInstanceTypeProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopInstanceTypeProvider.groovy deleted file mode 100644 index d4a52cfcfbb..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopInstanceTypeProvider.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopInstanceTypeProvider implements InstanceTypeProvider { - @Override - Set getAll() { - Collections.emptySet() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopKeyPairProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopKeyPairProvider.groovy deleted file mode 100644 index 419eee939a6..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopKeyPairProvider.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopKeyPairProvider implements KeyPairProvider { - @Override - Set getAll() { - Collections.emptySet() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopLoadBalancerProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopLoadBalancerProvider.groovy deleted file mode 100644 index 1daa7d5e7b6..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopLoadBalancerProvider.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopLoadBalancerProvider implements LoadBalancerProvider { - - final String cloudProvider = "noop" - - @Override - List list() { - return Collections.emptyList() - } - - @Override - LoadBalancerProvider.Item get(String name) { - return null - } - - @Override - List byAccountAndRegionAndName(String account, String region, String name) { - return Collections.emptyList() - } - - @Override - Set getApplicationLoadBalancers(String application) { - Collections.emptySet() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopManifestProvider.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopManifestProvider.java deleted file mode 100644 index f9fab41c320..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopManifestProvider.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.model; - -public class NoopManifestProvider implements ManifestProvider { - @Override - public Manifest getManifest(String account, String location, String name) { - return null; - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopNetworkProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopNetworkProvider.groovy deleted file mode 100644 index 9ab027caef3..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopNetworkProvider.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopNetworkProvider implements NetworkProvider { - @Override - String getCloudProvider() { - 'noop' - } - - @Override - Set getAll() { - Collections.emptySet() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopReservationReportProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopReservationReportProvider.groovy deleted file mode 100644 index 92b5224bff5..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopReservationReportProvider.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopReservationReportProvider implements ReservationReportProvider { - @Override - ReservationReport getReservationReport(String name, Map filters) { - return null - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopSecurityGroupProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopSecurityGroupProvider.groovy deleted file mode 100644 index 03598acf42b..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopSecurityGroupProvider.groovy +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopSecurityGroupProvider implements SecurityGroupProvider { - - final String cloudProvider = 'noop' - - @Override - Set getAll(boolean includeRules) { - Collections.emptySet() - } - - @Override - Set getAllByRegion(boolean includeRules, String region) { - Collections.emptySet() - } - - @Override - Set getAllByAccount(boolean includeRules, String account) { - Collections.emptySet() - } - - @Override - Set getAllByAccountAndName(boolean includeRules, String account, String name) { - Collections.emptySet() - } - - @Override - Set getAllByAccountAndRegion(boolean includeRules, String account, String region) { - Collections.emptySet() - } - - @Override - SecurityGroup get(String account, String region, String name, String vpcId) { - null - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopSubnetProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopSubnetProvider.groovy deleted file mode 100644 index 040838ceb91..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopSubnetProvider.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -class NoopSubnetProvider implements SubnetProvider { - @Override - String getCloudProvider() { - return null - } - - @Override - Set getAll() { - Collections.emptySet() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ReservationReport.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ReservationReport.groovy deleted file mode 100644 index a7d508b4719..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ReservationReport.groovy +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface ReservationReport {} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ReservationReportProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ReservationReportProvider.groovy deleted file mode 100644 index 5e723932bd1..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ReservationReportProvider.groovy +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface ReservationReportProvider { - T getReservationReport(String name, Map filters) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SecurityGroupProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SecurityGroupProvider.groovy deleted file mode 100644 index 1c1651cb37d..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SecurityGroupProvider.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface SecurityGroupProvider { - - String getCloudProvider() - - Collection getAll(boolean includeRules) - - Collection getAllByRegion(boolean includeRules, String region) - - Collection getAllByAccount(boolean includeRules, String account) - - Collection getAllByAccountAndName(boolean includeRules, String account, String name) - - Collection getAllByAccountAndRegion(boolean includeRule, String account, String region) - - T get(String account, String region, String name, String vpcId) - -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroup.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroup.java deleted file mode 100644 index 76ea496bd39..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroup.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.netflix.spinnaker.clouddriver.documentation.Empty; -import com.netflix.spinnaker.clouddriver.names.NamerRegistry; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static com.fasterxml.jackson.annotation.JsonInclude.Include.NON_NULL; - -/** - * A server group provides a relationship to many instances, and exists within a defined region and one or more zones. - */ -public interface ServerGroup { - /** - * The name of the server group - * - * @return name - */ - String getName(); - - /** - * This resource's moniker - * - * @return - */ - default Moniker getMoniker() { - return NamerRegistry.getDefaultNamer().deriveMoniker(this); - } - - /** - * Some arbitrary identifying type for this server group. May provide vendor-specific identification or data-center awareness to callers. - * @deprecated use #getCloudProvider - * @return type - */ - String getType(); - - /** - * Provider-specific identifier - */ - String getCloudProvider(); - - /** - * The region in which the instances of this server group are known to exist. - * - * @return server group region - */ - String getRegion(); - - /** - * Some vendor-specific indicator that the server group is disabled - * - * @return true if the server group is disabled; false otherwise - */ - Boolean isDisabled(); - - /** - * Timestamp indicating when the server group was created - * - * @return the number of milliseconds after the beginning of time (1 January, 1970 UTC) when - * this server group was created - */ - Long getCreatedTime(); - - /** - * The zones within a region that the instances within this server group occupy. - * - * @return zones of a region for which this server group has presence or is capable of having presence, or an empty set if none exist - */ - @Empty - Set getZones(); - - /** - * The concrete instances that comprise this server group - * - * @return set of instances or an empty set if none exist - */ - @Empty - Set getInstances(); - - /** - * The names of the load balancers associated with this server group - * - * @return the set of load balancer names or an empty set if none exist - */ - @Empty - Set getLoadBalancers(); - - /** - * The names of the security groups associated with this server group - * - * @return the set of security group names or an empty set if none exist - */ - @Empty - Set getSecurityGroups(); - - /** - * A collection of attributes describing the launch configuration of this server group - * - * @return a map containing various attributes of the launch configuration - */ - @Empty - Map getLaunchConfig(); - - /** - * A collection of attributes describing the tags of this server group - * - * @return a map containing various tags - */ - @Empty - default Map getTags() { - return null; - } - - /** - * A data structure with the total number of instances, and the number of instances reporting each status - * - * @return a data structure - */ - InstanceCounts getInstanceCounts(); - - /** - * The capacity (in terms of number of instances) required for the server group - * - * @return - */ - Capacity getCapacity(); - - /** - * This represents all images deployed to the server group. For most providers, this will be a singleton. - */ - @JsonIgnore - ImagesSummary getImagesSummary(); - - /** - * An ImageSummary is collection of data related to the build and VM image of the server group. This is merely a view - * of data from other parts of this object. - *

- * Deprecated in favor of getImagesSummary, which is a more generic getImageSummary. - */ - @JsonIgnore - @Deprecated - ImageSummary getImageSummary(); - - default List getServerGroupManagers() { - return new ArrayList<>(); - } - - @Builder - @NoArgsConstructor - @AllArgsConstructor - @Data - static class InstanceCounts { - /** - * Total number of instances in the server group - */ - private Integer total = 0; - /** - * Total number of "Up" instances (all health indicators report "Up" or "Unknown") - */ - private Integer up = 0; - /** - * Total number of "Down" instances (at least one health indicator reports "Down") - */ - private Integer down = 0; - /** - * Total number of "Unknown" instances (all health indicators report "Unknown", or no health indicators reported) - */ - private Integer unknown = 0; - /** - * Total number of "OutOfService" instances (at least one health indicator reports "OutOfService", none are "Down" - */ - private Integer outOfService = 0; - /** - * Total number of "Starting" instances (where any health indicator reports "Starting" and none are "Down" or "OutOfService") - */ - private Integer starting = 0; - } - - @Builder - @NoArgsConstructor - @AllArgsConstructor - @Data - public static class Capacity { - /** - * Minimum number of instances required in this server group. If provider specific {@code ServerGroup} does not have - * a notion of min then this should be same as {@code desired} - */ - private Integer min; - /** - * Max number of instances required in this server group. If provider specific {@code ServerGroup} does not have - * a notion of max then this should be same as {@code desired} - */ - private Integer max; - /** - * Desired number of instances required in this server group - */ - private Integer desired; - } - - /** - * Cloud provider-specific data related to the build and VM image of the server group. - * Deprecated in favor of Images summary - */ - @JsonInclude(NON_NULL) - public static interface ImageSummary extends Summary { - String getServerGroupName(); - - String getImageId(); - - String getImageName(); - - Map getImage(); - - @Empty - Map getBuildInfo(); - } - - /** - * Cloud provider-specific data related to the build and VM image of the server group. - */ - @JsonInclude(NON_NULL) - public static interface ImagesSummary extends Summary { - List getSummaries(); - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Subnet.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Subnet.groovy deleted file mode 100644 index e0ceebe0dff..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Subnet.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * A representation of a subnet - */ -interface Subnet { - /** - * The cloud provider associated with this subnet - * - * @return - */ - String getType() - - /** - * The ID associated with this subnet - * @return - */ - String getId() - - /** - * The purpose for this subnet. Examples: internal, external, secure, performance, etc - * @return - */ - String getPurpose() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SubnetProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SubnetProvider.groovy deleted file mode 100644 index e181a84b9c9..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SubnetProvider.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -interface SubnetProvider { - String getCloudProvider() - Set getAll() -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Summary.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Summary.groovy deleted file mode 100644 index 2cbf8d816a7..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Summary.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.model - -/** - * Summary objects are views into particular pieces of a server group. Instead of requesting an entire server group - * (including all instances and load balancers and whatnot), clients can request a Summary that contains just the - * information needed. - */ -interface Summary {} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/IpRangeRule.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/IpRangeRule.groovy index bcdfba54e1a..a1afb5aa3fa 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/IpRangeRule.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/IpRangeRule.groovy @@ -37,4 +37,9 @@ class IpRangeRule implements Rule { * {@inheritDoc} */ final SortedSet portRanges + + /** + * The Description of this rule + */ + final String description } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/names/NamerRegistry.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/names/NamerRegistry.java deleted file mode 100644 index 2c1ad51f67e..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/names/NamerRegistry.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.names; - -import com.netflix.spinnaker.moniker.Namer; -import com.netflix.spinnaker.moniker.frigga.FriggaReflectiveNamer; -import lombok.extern.slf4j.Slf4j; - -import java.util.List; -import java.util.concurrent.ConcurrentHashMap; - -/** - * The idea is each provider can register (per-account) based on config naming - * strategy. This assigns a `moniker` to any named resource which is then pushed - * through the rest of Spinnaker and can be handled without prior knowledge of what - * naming strategy was used. This is the only place the mapping from (provider, account, resource) -< namer - * must happen within Spinnaker. - */ -public class NamerRegistry { - final private List namingStrategies; - private static Namer defaultNamer = new FriggaReflectiveNamer(); - private static ProviderLookup providerLookup = new ProviderLookup(); - - public static Namer getDefaultNamer() { - return defaultNamer; - } - - public static ProviderLookup lookup() { - return providerLookup; - } - - public NamerRegistry(List namingStrategies) { - this.namingStrategies = namingStrategies; - } - - public Namer getNamingStrategy(String strategyName) { - return this.namingStrategies.stream() - .filter(strategy -> strategy.getName().equalsIgnoreCase(strategyName)) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("Could not find naming strategy '" + strategyName + "'")); - } - - @Slf4j - public static class ResourceLookup { - private ConcurrentHashMap map = new ConcurrentHashMap<>(); - - public Namer withResource(Class resource) { - if (!map.containsKey(resource)) { - log.debug("Looking up a namer for a non-registered resource"); - return getDefaultNamer(); - } else { - return map.get(resource); - } - } - - public void setNamer(Class resource, Namer namer) { - map.put(resource, namer); - } - } - - @Slf4j - public static class AccountLookup { - private ConcurrentHashMap map = new ConcurrentHashMap<>(); - - public ResourceLookup withAccount(String accountName) { - if (!map.containsKey(accountName)) { - log.debug("Looking up a namer for a non-registered account"); - ResourceLookup result = new ResourceLookup(); - map.put(accountName, result); - return result; - } else { - return map.get(accountName); - } - } - } - - @Slf4j - public static class ProviderLookup { - private ConcurrentHashMap map = new ConcurrentHashMap<>(); - - public AccountLookup withProvider(String providerName) { - if (!map.containsKey(providerName)) { - log.debug("Looking up a namer for a non-registered provider"); - AccountLookup result = new AccountLookup(); - map.put(providerName, result); - return result; - } else { - return map.get(providerName); - } - } - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AnnotationsBasedAtomicOperationsRegistry.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AnnotationsBasedAtomicOperationsRegistry.groovy index fc0c5c45977..ba764fbbd29 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AnnotationsBasedAtomicOperationsRegistry.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AnnotationsBasedAtomicOperationsRegistry.groovy @@ -16,14 +16,18 @@ package com.netflix.spinnaker.clouddriver.orchestration +import com.google.common.base.Splitter import com.netflix.spinnaker.clouddriver.core.CloudProvider import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.GlobalDescriptionValidator import com.netflix.spinnaker.clouddriver.exceptions.CloudProviderNotFoundException -import com.netflix.spinnaker.clouddriver.security.ProviderVersion +import com.netflix.spinnaker.kork.exceptions.UserException import groovy.util.logging.Slf4j import org.springframework.beans.factory.NoSuchBeanDefinitionException import org.springframework.beans.factory.annotation.Autowired +import javax.annotation.Nonnull +import javax.annotation.Nullable import java.lang.annotation.Annotation @Slf4j @@ -32,11 +36,14 @@ class AnnotationsBasedAtomicOperationsRegistry extends ApplicationContextAtomicO @Autowired List cloudProviders + @Autowired(required = false) + List globalDescriptionValidators + @Override - AtomicOperationConverter getAtomicOperationConverter(String description, String cloudProvider, ProviderVersion version) { + AtomicOperationConverter getAtomicOperationConverter(String description, String cloudProvider) { // Legacy naming convention which is not generic and description name is specific to cloud provider try { - AtomicOperationConverter converter = super.getAtomicOperationConverter(description, cloudProvider, version) + AtomicOperationConverter converter = super.getAtomicOperationConverter(description, cloudProvider) if (converter) return converter } catch (NoSuchBeanDefinitionException e) { /** @@ -51,18 +58,21 @@ class AnnotationsBasedAtomicOperationsRegistry extends ApplicationContextAtomicO } } + // Operations can be versioned + VersionedDescription versionedDescription = VersionedDescription.from(description) + Class providerAnnotationType = getCloudProviderAnnotation(cloudProvider) List converters = applicationContext.getBeansWithAnnotation(providerAnnotationType).findAll { key, value -> - value.getClass().getAnnotation(providerAnnotationType).value() == description && - value instanceof AtomicOperationConverter + VersionedDescription converterVersion = VersionedDescription.from(value.getClass().getAnnotation(providerAnnotationType).value()) + converterVersion.descriptionName == versionedDescription.descriptionName && value instanceof AtomicOperationConverter }.values().toList() - converters = VersionedOperationHelper.findVersionMatches(version, converters) + converters = VersionedOperationHelper.findVersionMatches(versionedDescription.version, converters) if (!converters) { throw new AtomicOperationConverterNotFoundException( - "No atomic operation converter found for description '${description}' and cloud provider '${cloudProvider}'. " + + "No atomic operation converter found for description '${description}' and cloud provider '${cloudProvider}'. " + "It is possible that either 1) the account name used for the operation is incorrect, or 2) the account name used for the operation is unhealthy/unable to communicate with ${cloudProvider}." ) } @@ -70,7 +80,7 @@ class AnnotationsBasedAtomicOperationsRegistry extends ApplicationContextAtomicO if (converters.size() > 1) { throw new RuntimeException( "More than one (${converters.size()}) atomic operation converters found for description '${description}' and cloud provider " + - "'${cloudProvider}' at version '${version}'" + "'${cloudProvider}'" ) } @@ -78,12 +88,12 @@ class AnnotationsBasedAtomicOperationsRegistry extends ApplicationContextAtomicO } @Override - DescriptionValidator getAtomicOperationDescriptionValidator(String validator, String cloudProvider, ProviderVersion version) { + DescriptionValidator getAtomicOperationDescriptionValidator(String validator, String cloudProvider) { // Legacy naming convention which is not generic and validator name is specific to cloud provider try { - DescriptionValidator descriptionValidator = super.getAtomicOperationDescriptionValidator(validator, cloudProvider, version) + DescriptionValidator descriptionValidator = super.getAtomicOperationDescriptionValidator(validator, cloudProvider) if (descriptionValidator) { - return descriptionValidator + return new CompositeDescriptionValidator(DescriptionValidator.getOperationName(validator), cloudProvider, descriptionValidator, globalDescriptionValidators) } } catch (NoSuchBeanDefinitionException e) {} @@ -93,12 +103,12 @@ class AnnotationsBasedAtomicOperationsRegistry extends ApplicationContextAtomicO List validators = applicationContext.getBeansWithAnnotation(providerAnnotationType).findAll { key, value -> DescriptionValidator.getValidatorName(value.getClass().getAnnotation(providerAnnotationType).value()) == validator && - value instanceof DescriptionValidator + value instanceof DescriptionValidator }.values().toList() - validators = VersionedOperationHelper.findVersionMatches(version, validators) + DescriptionValidator descriptionValidator = validators ? (DescriptionValidator) validators[0] : null - return validators ? (DescriptionValidator) validators[0] : null + return new CompositeDescriptionValidator(DescriptionValidator.getOperationName(validator), cloudProvider, descriptionValidator, globalDescriptionValidators); } protected Class getCloudProviderAnnotation(String cloudProvider) { @@ -114,4 +124,30 @@ class AnnotationsBasedAtomicOperationsRegistry extends ApplicationContextAtomicO cloudProviderInstances[0].getOperationAnnotationType() } + private static class VersionedDescription { + + private final static SPLITTER = Splitter.on("@") + + @Nonnull String descriptionName + @Nullable String version + + VersionedDescription(String descriptionName, String version) { + this.descriptionName = descriptionName + this.version = version + } + + static VersionedDescription from(String descriptionName) { + if (descriptionName.contains("@")) { + List parts = SPLITTER.splitToList(descriptionName) + if (parts.size() != 2) { + throw new UserException("Versioned descriptions must follow '{description}@{version}' format") + } + + return new VersionedDescription(parts[0], parts[1]) + } else { + return new VersionedDescription(descriptionName, null) + } + } + } + } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/ApplicationContextAtomicOperationsRegistry.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/ApplicationContextAtomicOperationsRegistry.groovy index 88780d8f52f..2f51493f0d7 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/ApplicationContextAtomicOperationsRegistry.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/ApplicationContextAtomicOperationsRegistry.groovy @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.orchestration import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.security.ProviderVersion import org.springframework.beans.factory.annotation.Autowired import org.springframework.context.ApplicationContext @@ -32,22 +31,12 @@ class ApplicationContextAtomicOperationsRegistry implements AtomicOperationsRegi ApplicationContext applicationContext @Override - AtomicOperationConverter getAtomicOperationConverter(String description, String cloudProvider, ProviderVersion version) { - def result = (AtomicOperationConverter) applicationContext.getBean(description) - if (!result.acceptsVersion(version)) { - throw new AtomicOperationConverterNotFoundException("Converter version mismatch. Converter '$description' not applicable for '$version'") - } - - return result + AtomicOperationConverter getAtomicOperationConverter(String description, String cloudProvider) { + return (AtomicOperationConverter) applicationContext.getBean(description) } @Override - DescriptionValidator getAtomicOperationDescriptionValidator(String validator, String cloudProvider, ProviderVersion version) { - def result = (DescriptionValidator) applicationContext.getBean(validator) - if (!result.acceptsVersion(version)) { - throw new AtomicOperationConverterNotFoundException("Validator version mismatch. Validator '$validator' not applicable for '$version'") - } - - return result + DescriptionValidator getAtomicOperationDescriptionValidator(String validator, String cloudProvider) { + return (DescriptionValidator) applicationContext.getBean(validator) } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperation.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperation.java deleted file mode 100644 index 5343a13d7de..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperation.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.orchestration; - -import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -/** - * An AtomicOperation is the most fundamental, low-level unit of work in a workflow. Implementations of this interface - * should perform the simplest form of work possible, often described by a description object (like {@link com.netflix.spinnaker.clouddriver.deploy.DeployDescription} - */ -public interface AtomicOperation { - /** - * This method will initiate the operation's work. In this, operation's can get a handle on prior output results - * from the requiremed method argument. - * - * @param priorOutputs - * @return parameterized type - */ - R operate(List priorOutputs); - - default Collection getEvents() { - return Collections.emptyList(); - } -} - diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverter.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverter.groovy deleted file mode 100644 index cf69a307495..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverter.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.orchestration - -import com.netflix.spinnaker.clouddriver.security.ProviderVersion - -/** - * Implementations of this trait will provide an object capable of converting a Map of input parameters to an - * operation's description object and an {@link AtomicOperation} instance. - */ -trait AtomicOperationConverter implements VersionedCloudProviderOperation { - /** - * This method takes a Map input and converts it to an {@link AtomicOperation} instance. - * - * @param input - * @return atomic operation - */ - abstract AtomicOperation convertOperation(Map input) - - /** - * This method takes a Map input and creates a description object, that will often be used by an {@link AtomicOperation}. - * - * @param input - * @return instance of an operation description object - */ - abstract Object convertDescription(Map input) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverterNotFoundException.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverterNotFoundException.groovy deleted file mode 100644 index e010c2eef8b..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverterNotFoundException.groovy +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.orchestration - -import groovy.transform.InheritConstructors -import org.springframework.http.HttpStatus -import org.springframework.web.bind.annotation.ResponseStatus - -@ResponseStatus(value = HttpStatus.BAD_REQUEST) -@InheritConstructors -class AtomicOperationConverterNotFoundException extends RuntimeException {} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationException.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationException.groovy deleted file mode 100644 index 0906e768b43..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationException.groovy +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.orchestration - -import com.netflix.spinnaker.kork.web.exceptions.HasAdditionalAttributes -import org.springframework.http.HttpStatus -import org.springframework.web.bind.annotation.ResponseStatus - -@ResponseStatus(HttpStatus.BAD_REQUEST) -class AtomicOperationException extends RuntimeException implements HasAdditionalAttributes { - List errors - - AtomicOperationException(String message, List errors) { - super(message) - this.errors = errors - } - - @Override - Map getAdditionalAttributes() { - return errors ? ["errors": errors] : [:] - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationNotFoundException.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationNotFoundException.groovy deleted file mode 100644 index 1f0eec2b1ac..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationNotFoundException.groovy +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.orchestration - -import groovy.transform.InheritConstructors -import org.springframework.http.HttpStatus -import org.springframework.web.bind.annotation.ResponseStatus - -@ResponseStatus(value = HttpStatus.BAD_REQUEST, reason = "Could not find a suitable converter for supplied type.") -@InheritConstructors -class AtomicOperationNotFoundException extends RuntimeException {} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperations.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperations.java deleted file mode 100644 index fc08322e1db..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperations.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.orchestration; - -/** - * A class that holds the names of ALL the kato operations as constants - * - */ -public final class AtomicOperations { - - // Server Group operations - public static final String CLONE_SERVER_GROUP = "cloneServerGroup"; - public static final String CREATE_SERVER_GROUP = "createServerGroup"; - public static final String DISABLE_SERVER_GROUP = "disableServerGroup"; - public static final String ENABLE_SERVER_GROUP = "enableServerGroup"; - public static final String DESTROY_SERVER_GROUP = "destroyServerGroup"; - public static final String RESIZE_SERVER_GROUP = "resizeServerGroup"; - public static final String UPSERT_SERVER_GROUP_TAGS = "upsertServerGroupTags"; - public static final String UPDATE_LAUNCH_CONFIG = "updateLaunchConfig"; - public static final String UPSERT_SCALING_POLICY = "upsertScalingPolicy"; - public static final String DELETE_SCALING_POLICY = "deleteScalingPolicy"; - public static final String MIGRATE_SERVER_GROUP = "migrateServerGroup"; - public static final String MIGRATE_CLUSTER_CONFIGURATIONS = "migrateClusterConfigurations"; - public static final String START_SERVER_GROUP = "startServerGroup"; - public static final String STOP_SERVER_GROUP = "stopServerGroup"; - - // Instance operations - public static final String REBOOT_INSTANCES = "rebootInstances"; - public static final String TERMINATE_INSTANCES = "terminateInstances"; - public static final String TERMINATE_INSTANCE_AND_DECREMENT = "terminateInstanceAndDecrementServerGroup"; - public static final String ATTACH_CLASSIC_LINK_VPC = "attachClassicLinkVpc"; - public static final String REGISTER_INSTANCES_WITH_LOAD_BALANCER = "registerInstancesWithLoadBalancer"; - public static final String DEREGISTER_INSTANCES_FROM_LOAD_BALANCER = "deregisterInstancesFromLoadBalancer"; - public static final String ENABLE_INSTANCES_IN_DISCOVERY = "enableInstancesInDiscovery"; - public static final String DISABLE_INSTANCES_IN_DISCOVERY = "disableInstancesInDiscovery"; - public static final String UPDATE_INSTANCES = "updateInstances"; - public static final String DETACH_INSTANCES = "detachInstances"; - - // Load Balancer operations - public static final String DELETE_LOAD_BALANCER = "deleteLoadBalancer"; - public static final String UPSERT_LOAD_BALANCER = "upsertLoadBalancer"; - public static final String MIGRATE_LOAD_BALANCER = "migrateLoadBalancer"; - - // Security Group operations - public static final String DELETE_SECURITY_GROUP = "deleteSecurityGroup"; - public static final String UPSERT_SECURITY_GROUP = "upsertSecurityGroup"; - public static final String MIGRATE_SECURITY_GROUP = "migrateSecurityGroup"; - - // JobStatus operations - public static final String RUN_JOB = "runJob"; - public static final String DESTROY_JOB = "destroyJob"; - public static final String CLONE_JOB = "cloneJob"; - - // Image operations - public static final String UPSERT_IMAGE_TAGS = "upsertImageTags"; - - // Snapshot operations - public static final String SAVE_SNAPSHOT = "saveSnapshot"; - public static final String RESTORE_SNAPSHOT = "restoreSnapshot"; - - // Manifest operations - public static final String DEPLOY_MANIFEST = "deployManifest"; - public static final String DELETE_MANIFEST = "deleteManifest"; - public static final String SCALE_MANIFEST = "scaleManifest"; - public static final String PATCH_MANIFEST = "patchManifest"; - public static final String PAUSE_ROLLOUT_MANIFEST = "pauseRolloutManifest"; - public static final String RESUME_ROLLOUT_MANIFEST = "resumeRolloutManifest"; - public static final String UNDO_ROLLOUT_MANIFEST = "undoRolloutManifest"; - - // Artifact operations - public static final String CLEANUP_ARTIFACTS = "cleanupArtifacts"; - - // Image operations - public static final String DEREGISTER_IMAGE = "deleteImage"; -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationsRegistry.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationsRegistry.groovy index 7369975c5d6..89e8cc79902 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationsRegistry.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationsRegistry.groovy @@ -17,7 +17,8 @@ package com.netflix.spinnaker.clouddriver.orchestration import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.security.ProviderVersion + +import javax.annotation.Nullable /** * A registry which does a lookup of AtomicOperationConverters and DescriptionValidators based on their names and @@ -30,17 +31,15 @@ interface AtomicOperationsRegistry { * * @param description * @param cloudProvider - * @param providerVersion * @return */ - AtomicOperationConverter getAtomicOperationConverter(String description, String cloudProvider, ProviderVersion version) + AtomicOperationConverter getAtomicOperationConverter(String description, String cloudProvider) /** * * @param validator * @param cloudProvider - * @param providerVersion * @return */ - DescriptionValidator getAtomicOperationDescriptionValidator(String validator, String cloudProvider, ProviderVersion version) + @Nullable DescriptionValidator getAtomicOperationDescriptionValidator(String validator, String cloudProvider) } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/DefaultOrchestrationProcessor.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/DefaultOrchestrationProcessor.groovy index 32056db7768..f4c17ea9e95 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/DefaultOrchestrationProcessor.groovy +++ b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/DefaultOrchestrationProcessor.groovy @@ -16,18 +16,25 @@ package com.netflix.spinnaker.clouddriver.orchestration +import com.fasterxml.jackson.databind.ObjectMapper +import com.google.common.util.concurrent.ThreadFactoryBuilder import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.event.exceptions.DuplicateEventAggregateException import com.netflix.spinnaker.clouddriver.metrics.TimedCallable import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEventHandler -import com.netflix.spinnaker.security.AuthenticatedRequest +import com.netflix.spinnaker.kork.api.exceptions.ExceptionSummary +import com.netflix.spinnaker.kork.web.context.RequestContextProvider +import com.netflix.spinnaker.kork.web.exceptions.ExceptionSummaryService +import groovy.transform.Canonical import groovy.util.logging.Slf4j -import org.slf4j.MDC -import org.springframework.beans.factory.annotation.Autowired import org.springframework.context.ApplicationContext +import javax.annotation.Nonnull +import javax.annotation.Nullable import java.util.concurrent.ExecutorService import java.util.concurrent.SynchronousQueue import java.util.concurrent.ThreadPoolExecutor @@ -42,37 +49,61 @@ class DefaultOrchestrationProcessor implements OrchestrationProcessor { protected ExecutorService executorService = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, - new SynchronousQueue()) { + new SynchronousQueue(), + new ThreadFactoryBuilder().setNameFormat(DefaultOrchestrationProcessor.class.getSimpleName() + "-%d").build()) { @Override protected void afterExecute(Runnable r, Throwable t) { - resetMDC() + clearRequestContext() super.afterExecute(r, t) } } - @Autowired - TaskRepository taskRepository + private final TaskRepository taskRepository + private final ApplicationContext applicationContext + private final Registry registry + private final Collection operationEventHandlers + private final ObjectMapper objectMapper + private final ExceptionClassifier exceptionClassifier + private final RequestContextProvider contextProvider + private final ExceptionSummaryService exceptionSummaryService - @Autowired - ApplicationContext applicationContext - - @Autowired - Registry registry - - @Autowired(required = false) - Collection operationEventHandlers = [] + DefaultOrchestrationProcessor( + TaskRepository taskRepository, + ApplicationContext applicationContext, + Registry registry, + Optional> operationEventHandlers, + ObjectMapper objectMapper, + ExceptionClassifier exceptionClassifier, + RequestContextProvider contextProvider, + ExceptionSummaryService exceptionSummaryService + ) { + this.taskRepository = taskRepository + this.applicationContext = applicationContext + this.registry = registry + this.operationEventHandlers = operationEventHandlers.orElse([]) + this.objectMapper = objectMapper + this.exceptionClassifier = exceptionClassifier + this.contextProvider = contextProvider + this.exceptionSummaryService = exceptionSummaryService + } @Override - Task process(List atomicOperations, String clientRequestId) { + Task process(@Nullable String cloudProvider, + @Nonnull List atomicOperations, + @Nonnull String clientRequestId) { + def orchestrationsId = registry.createId('orchestrations').withTag("cloudProvider", cloudProvider ?: "unknown") + def atomicOperationId = registry.createId('operations').withTag("cloudProvider", cloudProvider ?: "unknown") + def tasksId = registry.createId('tasks').withTag("cloudProvider", cloudProvider ?: "unknown") - def orchestrationsId = registry.createId('orchestrations') - def atomicOperationId = registry.createId('operations') - def tasksId = registry.createId('tasks') - def existingTask = taskRepository.getByClientRequestId(clientRequestId) - if (existingTask) { - return existingTask + // Get the task (either an existing one, or a new one). If the task already exists, `shouldExecute` will be false + // if the task is in a failed state and the failure is not retryable. + def result = getTask(clientRequestId) + def task = result.task + if (!result.shouldExecute) { + log.debug("task with id {} has the shouldExecute flag set to false - not executing the task", task.getId()) + return task } - def task = taskRepository.create(TASK_PHASE, "Initializing Orchestration Task...", clientRequestId) + def operationClosure = { try { // Autowire the atomic operations @@ -93,17 +124,27 @@ class DefaultOrchestrationProcessor implements OrchestrationProcessor { try { it.handle(event) } catch (e) { + log.warn("Error handling event (${event}): ${atomicOperation.class.simpleName}", e) task.updateStatus TASK_PHASE, "Error handling event (${event}): ${atomicOperation.class.simpleName} | ${e.class.simpleName}: [${e.message}]" } } } - task.updateStatus(TASK_PHASE, "Orchestration completed.") + if (task.status?.isFailed()) { + task.updateStatus(TASK_PHASE, "Orchestration completed with errors, see prior task logs.") + } else { + task.updateStatus(TASK_PHASE, "Orchestration completed.") + } }.call() } catch (AtomicOperationException e) { task.updateStatus TASK_PHASE, "Orchestration failed: ${atomicOperation.class.simpleName} | ${e.class.simpleName}: [${e.errors.join(', ')}]" - task.addResultObjects([[type: "EXCEPTION", operation: atomicOperation.class.simpleName, cause: e.class.simpleName, message: e.errors.join(", ")]]) - task.fail() + task.addResultObjects([extractExceptionSummary(e, e.errors.join(", "), [operation: atomicOperation.class.simpleName])]) + failTask(task, e) + } catch (DuplicateEventAggregateException e) { + // In this case, we can safely assume that the atomic operation is being run elsewhere and can just return + // the existing task. + log.warn("Received duplicate event aggregate: Indicative of receiving the same operation twice. Noop'ing and returning the task pointer", e) + return getTask(clientRequestId) } catch (e) { def message = e.message def stringWriter = new StringWriter() @@ -114,10 +155,10 @@ class DefaultOrchestrationProcessor implements OrchestrationProcessor { message = stackTrace } task.updateStatus TASK_PHASE, "Orchestration failed: ${atomicOperation.class.simpleName} | ${e.class.simpleName}: [${message}]" - task.addResultObjects([[type: "EXCEPTION", operation: atomicOperation.class.simpleName, cause: e.class.simpleName, message: message]]) + task.addResultObjects([extractExceptionSummary(e, message, [operation: atomicOperation.class.simpleName])]) log.error(stackTrace) - task.fail() + failTask(task, e) } } task.addResultObjects(results.findResults { it }) @@ -129,15 +170,15 @@ class DefaultOrchestrationProcessor implements OrchestrationProcessor { registry.counter(tasksId.withTag("success", "false").withTag("cause", e.class.simpleName)).increment() if (e instanceof TimeoutException) { task.updateStatus "INIT", "Orchestration timed out." - task.addResultObjects([[type: "EXCEPTION", cause: e.class.simpleName, message: "Orchestration timed out."]]) - task.fail() + task.addResultObjects([extractExceptionSummary(e, "Orchestration timed out.")]) + failTask(task, e) } else { def stringWriter = new StringWriter() def printWriter = new PrintWriter(stringWriter) e.printStackTrace(printWriter) task.updateStatus("INIT", "Unknown failure -- ${stringWriter.toString()}") - task.addResultObjects([[type: "EXCEPTION", cause: e.class.simpleName, message: "Failed for unknown reason."]]) - task.fail() + task.addResultObjects([extractExceptionSummary(e, "Failed for unknown reason.")]) + failTask(task, e) } } finally { if (!task.status?.isCompleted()) { @@ -157,17 +198,75 @@ class DefaultOrchestrationProcessor implements OrchestrationProcessor { } /** - * Ensure that the Spinnaker-related MDC values are cleared. + * Ensure that the Spinnaker-related context values are cleared. * - * This is particularly important for the inheritable MDC variables that are commonly to transmit the auth context. + * This is particularly important for the inheritable values that are used to transmit the auth context. */ - static void resetMDC() { + void clearRequestContext() { try { - MDC.remove(AuthenticatedRequest.SPINNAKER_USER) - MDC.remove(AuthenticatedRequest.SPINNAKER_ACCOUNTS) - MDC.remove(AuthenticatedRequest.SPINNAKER_EXECUTION_ID) + def context = contextProvider.get() + context.setUser(null) + context.setAccounts(null as String) + context.setExecutionId(null) } catch (Exception e) { - log.error("Unable to clear thread locals, reason: ${e.message}") + log.error("Unable to clear request context", e) + } + } + + /** + * For backwards compatibility. + * + * TODO(rz): Not 100% sure we should keep these two methods. + */ + Map extractExceptionSummary(Throwable e, String userMessage) { + ExceptionSummary summary = exceptionSummaryService.summary(e) + Map map = objectMapper.convertValue(summary, Map) + map["message"] = userMessage + map["type"] = "EXCEPTION" + return map + } + + /** + * For backwards compatibility. + * + * TODO(rz): Add "additionalFields" to ExceptionSummary? + */ + Map extractExceptionSummary(Throwable e, String userMessage, Map additionalFields) { + Map summary = extractExceptionSummary(e, userMessage) + summary.putAll(additionalFields) + return summary + } + + @Nonnull + private GetTaskResult getTask(String clientRequestId) { + def existingTask = taskRepository.getByClientRequestId(clientRequestId) + if (existingTask) { + if (!existingTask.isRetryable()) { + return new GetTaskResult(existingTask, false) + } + existingTask.updateStatus(TASK_PHASE, "Re-initializing Orchestration Task (failure is retryable)") + existingTask.retry() + existingTask.updateOwnerId(ClouddriverHostname.ID, TASK_PHASE) + return new GetTaskResult(existingTask, true) } + return new GetTaskResult( + taskRepository.create(TASK_PHASE, "Initializing Orchestration Task", clientRequestId), + true + ) + } + + private void failTask(@Nonnull Task task, @Nonnull Exception e) { + if (task.hasSagaIds()) { + task.fail(exceptionClassifier.isRetryable(e)) + } else { + // Tasks that are not Saga-backed are automatically assumed to not be retryable. + task.fail(false) + } + } + + @Canonical + private static class GetTaskResult { + Task task + boolean shouldExecute } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/OrchestrationProcessor.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/OrchestrationProcessor.groovy deleted file mode 100644 index 565e1e2675f..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/OrchestrationProcessor.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.orchestration - -import com.netflix.spinnaker.clouddriver.data.task.Task - -/** - * Implementations of this interface should perform orchestration of operations in a workflow. Often will be used in - * conjunction with {@link AtomicOperation} instances. - * - * - */ -public interface OrchestrationProcessor { - - /** - * This is the invocation point of orchestration. - * @param key a unique key, used to de-dupe orchestration requests - * @return a list of results - */ - Task process(List atomicOperations, String key) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/VersionedCloudProviderOperation.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/VersionedCloudProviderOperation.groovy deleted file mode 100644 index 93c1a251557..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/VersionedCloudProviderOperation.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.orchestration - -import com.netflix.spinnaker.clouddriver.security.ProviderVersion - -trait VersionedCloudProviderOperation { - /** - * Various operations can satisfy different provider's versions. This operation will only be applicable to accounts - * at this version. - * - * @return true i.f.f. this operations works on accounts at this version - */ - boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v1 - } -} \ No newline at end of file diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/ApplicationSearchProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/ApplicationSearchProvider.groovy deleted file mode 100644 index 84b38759145..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/ApplicationSearchProvider.groovy +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.search - -import com.google.common.collect.ImmutableList -import com.netflix.spinnaker.clouddriver.core.services.Front50Service -import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator -import groovy.transform.Canonical -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.security.core.Authentication -import org.springframework.security.core.context.SecurityContextHolder - -@Canonical -class ApplicationSearchProvider implements SearchProvider { - private final String APPLICATIONS_TYPE = "applications" - - Front50Service front50Service - - @Autowired(required = false) - FiatPermissionEvaluator permissionEvaluator - - @Override - String getPlatform() { - return "front50" - } - - @Override - SearchResultSet search(String query, Integer pageNumber, Integer pageSize) { - return search(query, [APPLICATIONS_TYPE], pageNumber, pageSize, Collections.emptyMap()) - } - - @Override - SearchResultSet search(String query, Integer pageNumber, Integer pageSize, Map filters) { - return search(query, [APPLICATIONS_TYPE], pageNumber, pageSize, filters) - } - - @Override - SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize) { - return search(query, types, pageNumber, pageSize, Collections.emptyMap()) - } - - @Override - SearchResultSet search(String query, List types, - Integer pageNumber, - Integer pageSize, - Map filters) { - if (!types.contains(APPLICATIONS_TYPE)) { - return new SearchResultSet(totalMatches: 0) - } - - Authentication auth = SecurityContextHolder.context.authentication - - def results = front50Service.searchByName(query, pageSize, filters).findResults { - def application = it.name.toString().toLowerCase() - if (permissionEvaluator && !permissionEvaluator.hasPermission(auth, application, 'APPLICATION', 'READ')) { - return null - } - it.application = application - it.type = APPLICATIONS_TYPE - it.url = "/applications/${it.application}".toString() - - return it - } - return new SearchResultSet(results.size(), pageNumber, pageSize, getPlatform(), query, results) - } - - @Override - List excludedFilters() { - return ImmutableList.of("cloudProvider") - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/NoopSearchProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/NoopSearchProvider.groovy deleted file mode 100644 index 89ecb06a5e7..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/NoopSearchProvider.groovy +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.search - -class NoopSearchProvider implements SearchProvider { - - @Override - String getPlatform() { - "noop" - } - - @Override - SearchResultSet search(String query, Integer pageNumber, Integer pageSize) { - empty(query, pageNumber, pageSize) - } - - @Override - SearchResultSet search(String query, Integer pageNumber, Integer pageSize, Map filters) { - empty(query, pageNumber, pageSize) - } - - @Override - SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize) { - empty(query, pageNumber, pageSize) - } - - @Override - SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize, Map filters) { - empty(query, pageNumber, pageSize) - } - - private static SearchResultSet empty(String query, Integer pageNumger, Integer pageSize) { - new SearchResultSet(totalMatches: 0, platform: "noop", pageNumber: pageNumger, pageSize: pageSize, query: query) - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/ProjectSearchProvider.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/ProjectSearchProvider.groovy deleted file mode 100644 index 4b413137863..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/ProjectSearchProvider.groovy +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.search - -import com.google.common.collect.ImmutableList -import com.netflix.spinnaker.clouddriver.core.services.Front50Service -import groovy.transform.Canonical - -@Canonical -class ProjectSearchProvider implements SearchProvider { - private final String PROJECTS_TYPE = "projects" - - Front50Service front50Service - - @Override - String getPlatform() { - return "front50" - } - - @Override - SearchResultSet search(String query, Integer pageNumber, Integer pageSize) { - return search(query, [PROJECTS_TYPE], pageNumber, pageSize, Collections.emptyMap()) - } - - @Override - SearchResultSet search(String query, Integer pageNumber, Integer pageSize, Map filters) { - return search(query, [PROJECTS_TYPE], pageNumber, pageSize, filters) - } - - @Override - SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize) { - return search(query, types, pageNumber, pageSize, Collections.emptyMap()) - } - - @Override - SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize, Map filters) { - if (!types.contains(PROJECTS_TYPE)) { - return new SearchResultSet(totalMatches: 0) - } - - def projects = front50Service.searchForProjects([ name: query, applications: query ] << filters, pageSize) as List - def results = (projects ?: []).collect { Map project -> - project.type = PROJECTS_TYPE - project.url = "/projects/${project.id}".toString() - return project - } as List - - return new SearchResultSet(results.size(), pageNumber, pageSize, getPlatform(), query, results) - } - - @Override - List excludedFilters() { - return ImmutableList.of("cloudProvider") - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/SearchProvider.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/SearchProvider.java deleted file mode 100644 index de9d05231cb..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/SearchProvider.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.search; - -import com.google.common.collect.ImmutableList; - -import java.util.List; -import java.util.Map; - -/** - * A Searchable component provides a mechanism to query for a collection of items - */ -public interface SearchProvider { - /** - * Returns the platform the search provider services - * - * @return a String, e.g. 'aws', 'gce' - */ - String getPlatform(); - - /** - * Finds all matching items for the provided query - * - * @param query a query string - * @param pageNumber page index (1-based) of the result set - * @param pageSize number of items per page - * @return a list of matched items - */ - SearchResultSet search(String query, Integer pageNumber, Integer pageSize); - - /** - * Finds all matching items for the provided query, filtered by the supplied filters - * - * @param query a query string - * @param pageNumber page index (1-based) of the result set - * @param pageSize number of items per page - * @param filters a map of inclusive filters - * @return a list of matched items - */ - SearchResultSet search(String query, Integer pageNumber, Integer pageSize, Map filters); - - /** - * Finds all matching items for the provided query and type - * - * @param query a query string - * @param types the types of items to search for - * @param pageNumber page index (1-based) of the result set - * @param pageSize number of items per page - * @return a list of matched items - */ - SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize); - - /** - * Finds all matching items for the provided query and type, filtered by the supplied filters - * - * @param query a query string - * @param types the types of items to search for - * @param pageNumber page index (1-based) of the result set - * @param pageSize number of items per page - * @param filters a map of inclusive filters - * @return a list of matched items - */ - SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize, Map filters); - - /** - * Provides a list of filter keys to be removed prior to searching - * @return a list of filter keys to optionally be removed prior to searching - */ - default List excludedFilters() { - return ImmutableList.of(); - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/SearchResultSet.java b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/SearchResultSet.java deleted file mode 100644 index d7f8c036198..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/search/SearchResultSet.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.search; - -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -@Data -@NoArgsConstructor -@AllArgsConstructor -@Builder -public class SearchResultSet { - /** - * The total number of items matching the search criteria (query, platform, and type) - */ - Integer totalMatches; - - /** - * The page index (1-based) of the result set - */ - Integer pageNumber; - - /** - * The number of items per page - */ - Integer pageSize; - - /** - * The platform of results the provider supplies - e.g. "aws", "gce", etc. - */ - String platform; - - /** - * The original query string, used to sort results - */ - String query; - - /** - * The paginated list of objects matching the query - */ - List> results = new ArrayList<>(); -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsSupport.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsSupport.groovy deleted file mode 100644 index ce41095c421..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsSupport.groovy +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security - -import com.fasterxml.jackson.databind.DeserializationFeature -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter -import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException -import groovy.transform.InheritConstructors -import org.springframework.beans.factory.annotation.Autowired - -abstract class AbstractAtomicOperationsCredentialsSupport implements AtomicOperationConverter { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - ObjectMapper objectMapper - - @Autowired - public void setObjectMapper(ObjectMapper objectMapper) { - this.objectMapper = objectMapper - .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) - .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) - - } - - def T getCredentialsObject(String name) { - if (name == null) { - throw new InvalidRequestException("credential name is required") - } - T credential - try { - def repoCredential = accountCredentialsProvider.getCredentials(name) - if (repoCredential == null) { - throw new NullPointerException() - } - credential = (T) repoCredential - } catch (Exception e) { - throw new InvalidRequestException("credential not found (name: ${name}, names: ${accountCredentialsProvider.getAll()*.name})", e) - } - - return credential - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/tags/EntityTagger.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/tags/EntityTagger.groovy deleted file mode 100644 index b1707fb48f7..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/tags/EntityTagger.groovy +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.tags - -import com.netflix.spinnaker.clouddriver.model.EntityTags; - -/** - * Provides a mechanism for attaching arbitrary metadata to resources within cloud providers. - */ -interface EntityTagger { - public static final String ENTITY_TYPE_SERVER_GROUP = "servergroup" - public static final String ENTITY_TYPE_CLUSTER = "cluster" - - void alert(String cloudProvider, - String accountId, - String region, - String category, - String entityType, - String entityId, - String key, - String value, - Long timestamp) - - void notice(String cloudProvider, - String accountId, - String region, - String category, - String entityType, - String entityId, - String key, - String value, - Long timestamp) - - void tag(String cloudProvider, - String accountId, - String region, - String namespace, - String entityType, - String entityId, - String tagName, - Object value, - Long timestamp) - - Collection taggedEntities(String cloudProvider, - String accountId, - String entityType, - String tagName, - int maxResults) - - void deleteAll(String cloudProvider, - String accountId, - String region, - String entityType, - String entityId) - - void delete(String cloudProvider, - String accountId, - String region, - String entityType, - String entityId, - String tagName) -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/CloudDriverConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/CloudDriverConfig.groovy deleted file mode 100644 index 28a3c42e265..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/CloudDriverConfig.groovy +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.config - -import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation -import com.netflix.spinnaker.cats.agent.NoopExecutionInstrumentation -import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions -import com.netflix.spinnaker.clouddriver.cache.CacheConfig -import com.netflix.spinnaker.clouddriver.cache.NoopOnDemandCacheUpdater -import com.netflix.spinnaker.clouddriver.cache.OnDemandCacheUpdater -import com.netflix.spinnaker.clouddriver.core.CloudProvider -import com.netflix.spinnaker.clouddriver.core.DynomiteConfig -import com.netflix.spinnaker.clouddriver.core.NoopAtomicOperationConverter -import com.netflix.spinnaker.clouddriver.core.NoopCloudProvider -import com.netflix.spinnaker.clouddriver.core.RedisConfig -import com.netflix.spinnaker.clouddriver.core.agent.CleanupPendingOnDemandCachesAgent -import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration -import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfigurationBuilder -import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider -import com.netflix.spinnaker.clouddriver.core.services.Front50Service -import com.netflix.spinnaker.clouddriver.model.ApplicationProvider -import com.netflix.spinnaker.clouddriver.model.CloudMetricProvider -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.ElasticIpProvider -import com.netflix.spinnaker.clouddriver.model.ImageProvider -import com.netflix.spinnaker.clouddriver.model.InstanceProvider -import com.netflix.spinnaker.clouddriver.model.InstanceTypeProvider -import com.netflix.spinnaker.clouddriver.model.KeyPairProvider -import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider -import com.netflix.spinnaker.clouddriver.model.ManifestProvider -import com.netflix.spinnaker.clouddriver.model.NetworkProvider -import com.netflix.spinnaker.clouddriver.model.NoopApplicationProvider -import com.netflix.spinnaker.clouddriver.model.NoopCloudMetricProvider -import com.netflix.spinnaker.clouddriver.model.NoopClusterProvider -import com.netflix.spinnaker.clouddriver.model.NoopElasticIpProvider -import com.netflix.spinnaker.clouddriver.model.NoopImageProvider -import com.netflix.spinnaker.clouddriver.model.NoopInstanceProvider -import com.netflix.spinnaker.clouddriver.model.NoopInstanceTypeProvider -import com.netflix.spinnaker.clouddriver.model.NoopKeyPairProvider -import com.netflix.spinnaker.clouddriver.model.NoopLoadBalancerProvider -import com.netflix.spinnaker.clouddriver.model.NoopManifestProvider -import com.netflix.spinnaker.clouddriver.model.NoopNetworkProvider -import com.netflix.spinnaker.clouddriver.model.NoopReservationReportProvider -import com.netflix.spinnaker.clouddriver.model.NoopSecurityGroupProvider -import com.netflix.spinnaker.clouddriver.model.NoopServerGroupManagerProvider -import com.netflix.spinnaker.clouddriver.model.NoopSubnetProvider -import com.netflix.spinnaker.clouddriver.model.ReservationReportProvider -import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider -import com.netflix.spinnaker.clouddriver.model.ServerGroupManager -import com.netflix.spinnaker.clouddriver.model.ServerGroupManagerProvider -import com.netflix.spinnaker.clouddriver.model.SubnetProvider -import com.netflix.spinnaker.clouddriver.names.NamerRegistry -import com.netflix.spinnaker.clouddriver.names.NamingStrategy -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter -import com.netflix.spinnaker.clouddriver.search.ApplicationSearchProvider -import com.netflix.spinnaker.clouddriver.search.NoopSearchProvider -import com.netflix.spinnaker.clouddriver.search.ProjectSearchProvider -import com.netflix.spinnaker.clouddriver.search.SearchProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import com.netflix.spinnaker.kork.core.RetrySupport -import com.netflix.spinnaker.kork.jedis.RedisClientDelegate -import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean -import org.springframework.boot.context.properties.ConfigurationProperties -import org.springframework.context.ApplicationContext -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Import -import org.springframework.context.annotation.PropertySource -import org.springframework.core.env.Environment -import org.springframework.web.client.RestTemplate - -import java.time.Clock - -@Configuration -@Import([ - RedisConfig, - DynomiteConfig, - CacheConfig -]) -@PropertySource(value = "classpath:META-INF/clouddriver-core.properties", ignoreResourceNotFound = true) -class CloudDriverConfig { - - @Bean - @ConditionalOnMissingBean(Clock) - Clock clock() { - Clock.systemDefaultZone() - } - - @Bean - String clouddriverUserAgentApplicationName(Environment environment) { - return "Spinnaker/${environment.getProperty("Implementation-Version", "Unknown")}" - } - - @Bean - @ConfigurationProperties('serviceLimits') - ServiceLimitConfigurationBuilder serviceLimitConfigProperties() { - return new ServiceLimitConfigurationBuilder() - } - - @Bean - ServiceLimitConfiguration serviceLimitConfiguration(ServiceLimitConfigurationBuilder serviceLimitConfigProperties) { - return serviceLimitConfigProperties.build() - } - - @Bean - @ConditionalOnMissingBean(AccountCredentialsRepository) - AccountCredentialsRepository accountCredentialsRepository() { - new MapBackedAccountCredentialsRepository() - } - - @Bean - @ConditionalOnMissingBean(AccountCredentialsProvider) - AccountCredentialsProvider accountCredentialsProvider(AccountCredentialsRepository accountCredentialsRepository) { - new DefaultAccountCredentialsProvider(accountCredentialsRepository) - } - - @Bean - RestTemplate restTemplate() { - new RestTemplate() - } - - @Bean - @ConditionalOnMissingBean(OnDemandCacheUpdater) - NoopOnDemandCacheUpdater noopOnDemandCacheUpdater() { - new NoopOnDemandCacheUpdater() - } - - @Bean - @ConditionalOnMissingBean(SearchProvider) - NoopSearchProvider noopSearchProvider() { - new NoopSearchProvider() - } - - @Bean - @ConditionalOnExpression('${services.front50.enabled:true}') - ApplicationSearchProvider applicationSearchProvider(Front50Service front50Service) { - new ApplicationSearchProvider(front50Service) - } - - @Bean - @ConditionalOnExpression('${services.front50.enabled:true}') - ProjectSearchProvider projectSearchProvider(Front50Service front50Service) { - new ProjectSearchProvider(front50Service) - } - - @Bean - @ConditionalOnMissingBean(CloudProvider) - CloudProvider noopCloudProvider() { - new NoopCloudProvider() - } - - @Bean - @ConditionalOnMissingBean(CloudMetricProvider) - CloudMetricProvider noopCloudMetricProvider() { - new NoopCloudMetricProvider() - } - - @Bean - @ConditionalOnMissingBean(ApplicationProvider) - ApplicationProvider noopApplicationProvider() { - new NoopApplicationProvider() - } - - @Bean - @ConditionalOnMissingBean(LoadBalancerProvider) - LoadBalancerProvider noopLoadBalancerProvider() { - new NoopLoadBalancerProvider() - } - - @Bean - @ConditionalOnMissingBean(ManifestProvider) - ManifestProvider noopManifestProvider() { - new NoopManifestProvider() - } - - @Bean - @ConditionalOnMissingBean(ClusterProvider) - ClusterProvider noopClusterProvider() { - new NoopClusterProvider() - } - - @Bean - @ConditionalOnMissingBean(ReservationReportProvider) - ReservationReportProvider noopReservationReportProvider() { - new NoopReservationReportProvider() - } - - @Bean - @ConditionalOnMissingBean(ExecutionInstrumentation) - ExecutionInstrumentation noopExecutionInstrumentation() { - new NoopExecutionInstrumentation() - } - - @Bean - @ConditionalOnMissingBean(InstanceProvider) - InstanceProvider noopInstanceProvider() { - new NoopInstanceProvider() - } - - @Bean - @ConditionalOnMissingBean(ImageProvider) - ImageProvider noopImageProvider() { - new NoopImageProvider() - } - - @Bean - @ConditionalOnMissingBean(InstanceTypeProvider) - InstanceTypeProvider noopInstanceTypeProvider() { - new NoopInstanceTypeProvider() - } - - @Bean - @ConditionalOnMissingBean(KeyPairProvider) - KeyPairProvider noopKeyPairProvider() { - new NoopKeyPairProvider() - } - - @Bean - @ConditionalOnMissingBean(SecurityGroupProvider) - SecurityGroupProvider noopSecurityGroupProvider() { - new NoopSecurityGroupProvider() - } - - @Bean - @ConditionalOnMissingBean(ServerGroupManager) - ServerGroupManagerProvider noopServerGroupManagerProvider() { - new NoopServerGroupManagerProvider() - } - - @Bean - @ConditionalOnMissingBean(SubnetProvider) - SubnetProvider noopSubnetProvider() { - new NoopSubnetProvider() - } - - @Bean - @ConditionalOnMissingBean(NetworkProvider) - NetworkProvider noopVpcProvider() { - new NoopNetworkProvider() - } - - @Bean - @ConditionalOnMissingBean(ElasticIpProvider) - ElasticIpProvider noopElasticIpProvider() { - new NoopElasticIpProvider() - } - - @Bean - CoreProvider coreProvider(RedisCacheOptions redisCacheOptions, - RedisClientDelegate redisClientDelegate, - ApplicationContext applicationContext) { - return new CoreProvider([ - new CleanupPendingOnDemandCachesAgent(redisCacheOptions, redisClientDelegate, applicationContext) - ]) - } - - @Bean - @ConditionalOnMissingBean(AtomicOperationConverter) - AtomicOperationConverter atomicOperationConverter() { - new NoopAtomicOperationConverter() - } - - @Bean - public RetrySupport retrySupport() { - return new RetrySupport(); - } - - @Bean - NamerRegistry namerRegistry(Optional> namingStrategies) { - new NamerRegistry(namingStrategies.orElse([])) - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/DeployConfiguration.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/DeployConfiguration.groovy deleted file mode 100644 index 3b9aecf2e9e..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/DeployConfiguration.groovy +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.config - -import com.netflix.spinnaker.clouddriver.data.task.InMemoryTaskRepository -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DefaultDeployHandlerRegistry -import com.netflix.spinnaker.clouddriver.deploy.DeployHandler -import com.netflix.spinnaker.clouddriver.deploy.DeployHandlerRegistry -import com.netflix.spinnaker.clouddriver.deploy.NullOpDeployHandler -import com.netflix.spinnaker.clouddriver.orchestration.AnnotationsBasedAtomicOperationsRegistry -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry -import com.netflix.spinnaker.clouddriver.orchestration.DefaultOrchestrationProcessor -import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration - -@Configuration -class DeployConfiguration { - @Bean - @ConditionalOnMissingBean(TaskRepository) - TaskRepository taskRepository() { - new InMemoryTaskRepository() - } - - @Bean - @ConditionalOnMissingBean(DeployHandlerRegistry) - DeployHandlerRegistry deployHandlerRegistry() { - new DefaultDeployHandlerRegistry() - } - - @Bean - @ConditionalOnMissingBean(OrchestrationProcessor) - OrchestrationProcessor orchestrationProcessor() { - new DefaultOrchestrationProcessor() - } - - @Bean - @ConditionalOnMissingBean(DeployHandler) - DeployHandler nullOpDeployHandler() { - new NullOpDeployHandler() - } - - @Bean - AtomicOperationsRegistry atomicOperationsRegistry() { - new AnnotationsBasedAtomicOperationsRegistry() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/LocalJobConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/LocalJobConfig.groovy deleted file mode 100644 index 0337ae24b4f..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/LocalJobConfig.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.config - -import com.netflix.spinnaker.clouddriver.jobs.JobExecutor -import com.netflix.spinnaker.clouddriver.jobs.local.JobExecutorLocal -import groovy.util.logging.Slf4j -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration - -@Slf4j -@Configuration -class LocalJobConfig { - - @Bean - @ConditionalOnMissingBean(JobExecutor) - JobExecutor jobExecutorLocal() { - new JobExecutorLocal() - } -} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/RetrofitConfig.groovy b/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/RetrofitConfig.groovy deleted file mode 100644 index 80b69b54f7d..00000000000 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/config/RetrofitConfig.groovy +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.config - -import com.netflix.spinnaker.clouddriver.core.Front50ConfigurationProperties -import com.netflix.spinnaker.clouddriver.core.services.Front50Service -import com.netflix.spinnaker.retrofit.Slf4jRetrofitLogger -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope -import retrofit.RequestInterceptor -import retrofit.RestAdapter -import retrofit.client.OkClient -import retrofit.converter.JacksonConverter - -import static retrofit.Endpoints.newFixedEndpoint - -@Configuration -@EnableConfigurationProperties(Front50ConfigurationProperties) -class RetrofitConfig { - - @Bean - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - OkClient okClient(OkHttpClientConfiguration okHttpClientConfiguration) { - def client = okHttpClientConfiguration.create() - return new OkClient(client) - } - - @Bean - @ConditionalOnProperty(name = 'services.front50.enabled', matchIfMissing = true) - Front50Service front50Service(Front50ConfigurationProperties front50ConfigurationProperties, RestAdapter.LogLevel retrofitLogLevel, OkClient okClient, RequestInterceptor spinnakerRequestInterceptor) { - def endpoint = newFixedEndpoint(front50ConfigurationProperties.baseUrl) - new RestAdapter.Builder() - .setRequestInterceptor(spinnakerRequestInterceptor) - .setEndpoint(endpoint) - .setClient(okClient) - .setConverter(new JacksonConverter()) - .setLogLevel(retrofitLogLevel) - .setLog(new Slf4jRetrofitLogger(Front50Service)) - .build() - .create(Front50Service) - } -} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/AgentSchedulerConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/AgentSchedulerConfig.java new file mode 100644 index 00000000000..e610f32814b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/AgentSchedulerConfig.java @@ -0,0 +1,75 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.agent.AgentScheduler; +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider; +import com.netflix.spinnaker.cats.cluster.DefaultNodeIdentity; +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider; +import com.netflix.spinnaker.cats.cluster.ShardingFilter; +import com.netflix.spinnaker.cats.redis.cluster.ClusteredAgentScheduler; +import com.netflix.spinnaker.cats.redis.cluster.ClusteredSortAgentScheduler; +import com.netflix.spinnaker.clouddriver.core.RedisConfigurationProperties; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; +import java.net.URI; +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import redis.clients.jedis.JedisPool; + +@Configuration +@ConditionalOnProperty(value = "caching.write-enabled", matchIfMissing = true) +public class AgentSchedulerConfig { + + @Bean + @ConditionalOnExpression("${redis.enabled:true} && ${redis.scheduler.enabled:true}") + AgentScheduler redisAgentScheduler( + RedisConfigurationProperties redisConfigurationProperties, + RedisClientDelegate redisClientDelegate, + JedisPool jedisPool, + AgentIntervalProvider agentIntervalProvider, + NodeStatusProvider nodeStatusProvider, + DynamicConfigService dynamicConfigService, + ShardingFilter shardingFilter) { + if (redisConfigurationProperties.getScheduler().equalsIgnoreCase("default")) { + URI redisUri = URI.create(redisConfigurationProperties.getConnection()); + String redisHost = redisUri.getHost(); + int redisPort = redisUri.getPort(); + if (redisPort == -1) { + redisPort = 6379; + } + return new ClusteredAgentScheduler( + redisClientDelegate, + new DefaultNodeIdentity(redisHost, redisPort), + agentIntervalProvider, + nodeStatusProvider, + redisConfigurationProperties.getAgent().getEnabledPattern(), + redisConfigurationProperties.getAgent().getAgentLockAcquisitionIntervalSeconds(), + dynamicConfigService, + shardingFilter); + } else if (redisConfigurationProperties.getScheduler().equalsIgnoreCase("sort")) { + return new ClusteredSortAgentScheduler( + jedisPool, + nodeStatusProvider, + agentIntervalProvider, + redisConfigurationProperties.getParallelism()); + } else { + throw new IllegalStateException("redis.scheduler must be one of 'default', 'sort', or ''."); + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CacheConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CacheConfig.java new file mode 100644 index 00000000000..80211b88f3c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CacheConfig.java @@ -0,0 +1,135 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentLock; +import com.netflix.spinnaker.cats.agent.AgentScheduler; +import com.netflix.spinnaker.cats.agent.DefaultAgentScheduler; +import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.NamedCacheFactory; +import com.netflix.spinnaker.cats.mem.InMemoryNamedCacheFactory; +import com.netflix.spinnaker.cats.module.CatsModule; +import com.netflix.spinnaker.cats.provider.Provider; +import com.netflix.spinnaker.cats.provider.ProviderRegistry; +import com.netflix.spinnaker.clouddriver.search.SearchProvider; +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ComponentScan({ + "com.netflix.spinnaker.clouddriver.cache", +}) +@EnableConfigurationProperties(CatsInMemorySearchProperties.class) +public class CacheConfig { + @Bean + @ConditionalOnMissingBean(NamedCacheFactory.class) + NamedCacheFactory namedCacheFactory() { + return new InMemoryNamedCacheFactory(); + } + + @Bean + @ConditionalOnMissingBean(AgentScheduler.class) + @ConditionalOnProperty(value = "caching.write-enabled", matchIfMissing = true) + AgentScheduler agentScheduler() { + return new DefaultAgentScheduler(60, TimeUnit.SECONDS); + } + + @Bean + @ConditionalOnProperty(value = "caching.write-enabled", havingValue = "false") + @ConditionalOnMissingBean(AgentScheduler.class) + AgentScheduler noopAgentScheduler() { + return (agent, agentExecution, executionInstrumentation) -> { + // do nothing + }; + } + + @Bean + @ConditionalOnMissingBean(CatsModule.class) + CatsModule catsModule( + List providers, + List executionInstrumentation, + NamedCacheFactory cacheFactory, + AgentScheduler agentScheduler) { + return new CatsModule.Builder() + .cacheFactory(cacheFactory) + .scheduler(agentScheduler) + .instrumentation(executionInstrumentation) + .build(providers); + } + + @Bean + Cache cacheView(CatsModule catsModule) { + return catsModule.getView(); + } + + @Bean + ProviderRegistry providerRegistry(CatsModule catsModule) { + return catsModule.getProviderRegistry(); + } + + @Bean + ExecutionInstrumentation loggingInstrumentation() { + return new LoggingInstrumentation(); + } + + @Bean + ExecutionInstrumentation metricInstrumentation(Registry registry) { + return new MetricInstrumentation(registry); + } + + @Bean + OnDemandCacheUpdater catsOnDemandCacheUpdater( + List providers, + CatsModule catsModule, + AgentScheduler agentScheduler) { + return new CatsOnDemandCacheUpdater(providers, catsModule, agentScheduler); + } + + @Bean + @ConditionalOnProperty(value = "caching.search.enabled", matchIfMissing = true) + SearchProvider catsSearchProvider( + CatsInMemorySearchProperties catsInMemorySearchProperties, + Cache cacheView, + List providers, + ProviderRegistry providerRegistry, + Optional permissionEvaluator, + Optional> keyParsers) { + return new CatsSearchProvider( + catsInMemorySearchProperties, + cacheView, + providers, + providerRegistry, + permissionEvaluator, + keyParsers); + } + + @Bean + @ConditionalOnMissingBean(SearchableProvider.class) + SearchableProvider noopSearchableProvider() { + return new NoopSearchableProvider(); + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsInMemorySearchProperties.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CatsInMemorySearchProperties.java similarity index 95% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsInMemorySearchProperties.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CatsInMemorySearchProperties.java index a872b2f5c38..e1f862297c2 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/CatsInMemorySearchProperties.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CatsInMemorySearchProperties.java @@ -18,7 +18,7 @@ import org.springframework.boot.context.properties.ConfigurationProperties; -@ConfigurationProperties("caching.search.inMemory") +@ConfigurationProperties("caching.search.in-memory") public class CatsInMemorySearchProperties { private boolean enabled = false; private int refreshIntervalSeconds = 30; diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CatsOnDemandCacheUpdater.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CatsOnDemandCacheUpdater.java new file mode 100644 index 00000000000..55d5d8a4ced --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CatsOnDemandCacheUpdater.java @@ -0,0 +1,235 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentLock; +import com.netflix.spinnaker.cats.agent.AgentScheduler; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.module.CatsModule; +import com.netflix.spinnaker.cats.provider.Provider; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; + +public class CatsOnDemandCacheUpdater implements OnDemandCacheUpdater { + + private static final Logger log = LoggerFactory.getLogger(CatsOnDemandCacheUpdater.class); + + private final List providers; + private final CatsModule catsModule; + // TODO(rz): Deliberately not using since it results in + // compilation errors. This is a side-effect of migrating away from Groovy. + // I'm sure there's a way, but it's the early morning and I'm pretty tired! + private final AgentScheduler agentScheduler; + + @Autowired + public CatsOnDemandCacheUpdater( + List providers, + CatsModule catsModule, + AgentScheduler agentScheduler) { + this.providers = providers; + this.catsModule = catsModule; + this.agentScheduler = agentScheduler; + } + + private Collection getOnDemandAgents() { + return providers.stream() + .flatMap( + provider -> provider.getAgents().stream().filter(it -> it instanceof OnDemandAgent)) + .map(it -> (OnDemandAgent) it) + .collect(Collectors.toList()); + } + + @Override + public boolean handles(final OnDemandType type, final String cloudProvider) { + return getOnDemandAgents().stream().anyMatch(it -> it.handles(type, cloudProvider)); + } + + @Override + public OnDemandCacheResult handle( + final OnDemandType type, final String cloudProvider, Map data) { + return handle(type, onDemandAgents(type, cloudProvider), data); + } + + private OnDemandCacheResult handle( + OnDemandType type, Collection onDemandAgents, Map data) { + log.debug("Calling handle onDemandAgents: {}, type: {}", onDemandAgents, type); + + boolean hasOnDemandResults = false; + Map> cachedIdentifiersByType = new HashMap<>(); + for (OnDemandAgent agent : onDemandAgents) { + try { + AgentLock lock = agentScheduler.tryLock((Agent) agent); + if (agentScheduler.isAtomic() && lock == null) { + // force Orca to retry + hasOnDemandResults = true; + continue; + } + + final long startTime = System.nanoTime(); + final ProviderCache providerCache = + catsModule.getProviderRegistry().getProviderCache(agent.getProviderName()); + if (agent.getMetricsSupport() != null) { + agent.getMetricsSupport().countOnDemand(); + } + + final OnDemandAgent.OnDemandResult result = agent.handle(providerCache, data); + if (result != null) { + if (agentScheduler.isAtomic() && !agentScheduler.lockValid(lock)) { + // force Orca to retry + hasOnDemandResults = true; + continue; + } + + if (agent.getMetricsSupport() == null) { + continue; + } + + if (result.getCacheResult() != null) { + final Map> results = + result.getCacheResult().getCacheResults(); + if (agentHasOnDemandResults(results)) { + hasOnDemandResults = true; + results.forEach( + (k, v) -> { + if (v != null && !v.isEmpty()) { + if (!cachedIdentifiersByType.containsKey(k)) { + cachedIdentifiersByType.put(k, new ArrayList<>()); + } + cachedIdentifiersByType + .get(k) + .addAll(v.stream().map(CacheData::getId).collect(Collectors.toList())); + } + }); + } + + agent + .getMetricsSupport() + .cacheWrite( + () -> { + if (result.cacheResult.isPartialResult()) { + providerCache.addCacheResult( + result.sourceAgentType, result.authoritativeTypes, result.cacheResult); + } else { + providerCache.putCacheResult( + result.sourceAgentType, result.authoritativeTypes, result.cacheResult); + } + }); + } + + if (result.getEvictions() != null && !result.getEvictions().isEmpty()) { + agent + .getMetricsSupport() + .cacheEvict( + () -> { + result.evictions.forEach(providerCache::evictDeletedItems); + }); + } + + if (agentScheduler.isAtomic() && !(agentScheduler.tryRelease(lock))) { + throw new IllegalStateException( + "We likely just wrote stale data. If you're seeing this, file a github issue: https://github.com/spinnaker/spinnaker/issues"); + } + + final long elapsed = System.nanoTime() - startTime; + agent.getMetricsSupport().recordTotalRunTimeNanos(elapsed); + + log.info( + "{}/{} handled {} in {}ms.", + agent.getProviderName(), + agent.getOnDemandAgentType(), + type, + TimeUnit.NANOSECONDS.toMillis(elapsed)); + } + + } catch (Exception e) { + if (agent.getMetricsSupport() != null) { + agent.getMetricsSupport().countError(); + } + log.warn( + "{}/{} failed to handle on demand update for {}", + agent.getProviderName(), + agent.getOnDemandAgentType(), + type, + e); + } + } + + if (hasOnDemandResults) { + return new OnDemandCacheResult(OnDemandCacheStatus.PENDING, cachedIdentifiersByType); + } + + return new OnDemandCacheResult(OnDemandCacheStatus.SUCCESSFUL); + } + + private boolean agentHasOnDemandResults(Map> results) { + return !agentScheduler.isAtomic() + && !(Optional.ofNullable(results).orElseGet(HashMap::new).values().stream() + .mapToLong(Collection::size) + .sum() + == 0); + } + + @Override + public Collection> pendingOnDemandRequests( + final OnDemandType type, final String cloudProvider) { + if (agentScheduler.isAtomic()) { + return new ArrayList<>(); + } + + return onDemandAgentStream(type, cloudProvider) + .flatMap( + it -> { + ProviderCache providerCache = + catsModule.getProviderRegistry().getProviderCache(it.getProviderName()); + return it.pendingOnDemandRequests(providerCache).stream(); + }) + .collect(Collectors.toList()); + } + + @Override + public Map pendingOnDemandRequest( + final OnDemandType type, final String cloudProvider, final String id) { + if (agentScheduler.isAtomic()) { + return null; + } + + return onDemandAgentStream(type, cloudProvider) + .map( + it -> { + ProviderCache providerCache = + catsModule.getProviderRegistry().getProviderCache(it.getProviderName()); + return it.pendingOnDemandRequest(providerCache, id); + }) + .filter(Objects::nonNull) + .findFirst() + .orElse(null); + } + + private Stream onDemandAgentStream(OnDemandType type, String cloudProvider) { + return getOnDemandAgents().stream().filter(it -> it.handles(type, cloudProvider)); + } + + private Collection onDemandAgents(OnDemandType type, String cloudProvider) { + return onDemandAgentStream(type, cloudProvider).collect(Collectors.toList()); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CustomSchedulableAgentIntervalProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CustomSchedulableAgentIntervalProvider.java new file mode 100644 index 00000000000..923a0bbf2c2 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CustomSchedulableAgentIntervalProvider.java @@ -0,0 +1,49 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider; +import com.netflix.spinnaker.cats.cluster.DefaultAgentIntervalProvider; + +public class CustomSchedulableAgentIntervalProvider extends DefaultAgentIntervalProvider { + + public CustomSchedulableAgentIntervalProvider(long interval, long errorInterval, long timeout) { + super(interval, errorInterval, timeout); + } + + @Override + public AgentIntervalProvider.Interval getInterval(Agent agent) { + if (agent instanceof CustomScheduledAgent) { + CustomScheduledAgent customAgent = (CustomScheduledAgent) agent; + return getCustomInterval(customAgent); + } + return super.getInterval(agent); + } + + AgentIntervalProvider.Interval getCustomInterval(CustomScheduledAgent agent) { + final long pollInterval = + agent.getPollIntervalMillis() == -1 ? super.getInterval() : agent.getPollIntervalMillis(); + final long errorInterval = + agent.getErrorIntervalMillis() == -1 + ? super.getErrorInterval() + : agent.getErrorIntervalMillis(); + final long timeoutMillis = + agent.getTimeoutMillis() == -1 ? super.getTimeout() : agent.getTimeoutMillis(); + return new AgentIntervalProvider.Interval(pollInterval, errorInterval, timeoutMillis); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CustomScheduledAgent.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CustomScheduledAgent.java new file mode 100644 index 00000000000..515abe45394 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/CustomScheduledAgent.java @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.agent.Agent; + +/** Allows an Agent to customize it's poll interval. */ +public interface CustomScheduledAgent extends Agent { + /** @return the interval in milliseconds, or -1 to use the system default poll interval */ + long getPollIntervalMillis(); + + /** @return the timeout in milliseconds, or -1 to use the system default timeout */ + long getTimeoutMillis(); + + /** @return the error interval in milliseconds, or -1 to use the system default error interval */ + default long getErrorIntervalMillis() { + return getPollIntervalMillis(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/DiscoveryStatusNodeStatusProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/DiscoveryStatusNodeStatusProvider.java new file mode 100644 index 00000000000..615945ec931 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/DiscoveryStatusNodeStatusProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider; +import com.netflix.spinnaker.kork.discovery.DiscoveryStatusListener; + +public class DiscoveryStatusNodeStatusProvider implements NodeStatusProvider { + public DiscoveryStatusNodeStatusProvider(DiscoveryStatusListener discoveryStatusListener) { + this.discoveryStatusListener = discoveryStatusListener; + } + + @Override + public boolean isNodeEnabled() { + return discoveryStatusListener.isEnabled(); + } + + private final DiscoveryStatusListener discoveryStatusListener; +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/GZipCompressionStrategyProperties.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/GZipCompressionStrategyProperties.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/GZipCompressionStrategyProperties.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/GZipCompressionStrategyProperties.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/JedisCacheConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/JedisCacheConfig.java new file mode 100644 index 00000000000..b4671d97df6 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/JedisCacheConfig.java @@ -0,0 +1,52 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.NamedCacheFactory; +import com.netflix.spinnaker.cats.redis.cache.RedisCache.CacheMetrics; +import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions; +import com.netflix.spinnaker.cats.redis.cache.RedisNamedCacheFactory; +import com.netflix.spinnaker.clouddriver.core.RedisConfigurationProperties; +import com.netflix.spinnaker.kork.jedis.JedisClientDelegate; +import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import redis.clients.jedis.JedisPool; + +@Configuration +@ConditionalOnExpression("${redis.enabled:true} && ${redis.cache.enabled:true}") +@EnableConfigurationProperties(RedisConfigurationProperties.class) +class JedisCacheConfig { + + @Bean + RedisClientDelegate redisClientDelegate(JedisPool jedisPool) { + return new JedisClientDelegate(jedisPool); + } + + @Bean + NamedCacheFactory cacheFactory( + RedisClientDelegate redisClientDelegate, + ObjectMapper objectMapper, + RedisCacheOptions redisCacheOptions, + CacheMetrics cacheMetrics) { + return new RedisNamedCacheFactory( + redisClientDelegate, objectMapper, redisCacheOptions, cacheMetrics); + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/KeyProcessor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/KeyProcessor.java similarity index 89% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/KeyProcessor.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/KeyProcessor.java index 965a9639714..e24238ec5ca 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/KeyProcessor.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/KeyProcessor.java @@ -22,8 +22,8 @@ public interface KeyProcessor { * Indicates whether this processor can process the specified type * * @param type the cache type to process - * - * @return true if this processor can process the specified type and false otherwise. + * @return true if this processor can process the specified type and false + * otherwise. */ Boolean canProcess(String type); @@ -31,8 +31,8 @@ public interface KeyProcessor { * Determines whether the underlying object represented by this key exists. * * @param key the cache key to process - * - * @return true if the underlying object represented by this key exists and false otherwise. + * @return true if the underlying object represented by this key exists and + * false otherwise. */ Boolean exists(String key); } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/LoggingInstrumentation.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/LoggingInstrumentation.java new file mode 100644 index 00000000000..f8169e262de --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/LoggingInstrumentation.java @@ -0,0 +1,47 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +class LoggingInstrumentation implements ExecutionInstrumentation { + private final Logger logger = LoggerFactory.getLogger(LoggingInstrumentation.class); + + @Override + public void executionStarted(Agent agent) { + logger.debug("{}:{} starting", agent.getProviderName(), agent.getAgentType()); + } + + @Override + public void executionCompleted(Agent agent, long durationMs) { + logger.debug( + "{}:{} completed in {}s", agent.getProviderName(), agent.getAgentType(), durationMs / 1000); + } + + @Override + public void executionFailed(Agent agent, Throwable cause, long durationMs) { + logger.warn( + "{}:{} completed with one or more failures in {}s", + agent.getProviderName(), + agent.getAgentType(), + durationMs / 1000, + cause); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentation.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentation.java new file mode 100644 index 00000000000..1d33c65ba26 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentation.java @@ -0,0 +1,67 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation; +import java.util.concurrent.TimeUnit; +import org.springframework.beans.factory.annotation.Autowired; + +class MetricInstrumentation implements ExecutionInstrumentation { + + private final Registry registry; + private final Id timingId; + + @Autowired + MetricInstrumentation(Registry registry) { + this.registry = registry; + timingId = + registry + .createId("executionTime") + .withTag("className", MetricInstrumentation.class.getSimpleName()); + } + + private static String stripPackageName(String className) { + return className.substring(className.lastIndexOf('.') + 1); + } + + private static String agentName(Agent agent) { + String simpleProviderName = stripPackageName(agent.getProviderName()); + return String.format("%s/%s", simpleProviderName, agent.getAgentType()); + } + + @Override + public void executionStarted(Agent agent) { + // do nothing + } + + @Override + public void executionCompleted(Agent agent, long elapsedMs) { + registry + .timer(timingId.withTag("agent", agentName(agent)).withTag("success", "true")) + .record(elapsedMs, TimeUnit.MILLISECONDS); + } + + @Override + public void executionFailed(Agent agent, Throwable cause, long elapsedMs) { + registry + .timer(timingId.withTag("agent", agentName(agent)).withTag("success", "false")) + .record(elapsedMs, TimeUnit.MILLISECONDS); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/NoopOnDemandCacheUpdater.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/NoopOnDemandCacheUpdater.java new file mode 100644 index 00000000000..514fcefe800 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/NoopOnDemandCacheUpdater.java @@ -0,0 +1,32 @@ +package com.netflix.spinnaker.clouddriver.cache; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Map; +import org.springframework.stereotype.Component; + +/** A default, no-op implementation of an {@link OnDemandCacheUpdater} */ +@Component +public class NoopOnDemandCacheUpdater implements OnDemandCacheUpdater { + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return false; + } + + @Override + public OnDemandCacheResult handle(OnDemandType type, String cloudProvider, Map data) { + return new OnDemandCacheResult(OnDemandCacheStatus.SUCCESSFUL); + } + + @Override + public Collection> pendingOnDemandRequests( + OnDemandType type, String cloudProvider) { + return new ArrayList<>(); + } + + @Override + public Map pendingOnDemandRequest( + OnDemandType type, String cloudProvider, String id) { + return null; + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/NoopSearchableProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/NoopSearchableProvider.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/cache/NoopSearchableProvider.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/NoopSearchableProvider.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/NoopShardingFilterConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/NoopShardingFilterConfig.java new file mode 100644 index 00000000000..e482c6f78c7 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/NoopShardingFilterConfig.java @@ -0,0 +1,35 @@ +/* + * Copyright 2021 OpsMx. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spinnaker.cats.cluster.NoopShardingFilter; +import com.netflix.spinnaker.cats.cluster.ShardingFilter; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty(value = "caching.write-enabled", matchIfMissing = true) +public class NoopShardingFilterConfig { + + @Bean + @ConditionalOnMissingBean(ShardingFilter.class) + ShardingFilter shardingFilter() { + return new NoopShardingFilter(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheResult.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheResult.java new file mode 100644 index 00000000000..6647b53775e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheResult.java @@ -0,0 +1,44 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.cache; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class OnDemandCacheResult { + private final OnDemandCacheStatus status; + private final Map> cachedIdentifiersByType; + + public OnDemandCacheResult(OnDemandCacheStatus status) { + this(status, new HashMap<>()); + } + + public OnDemandCacheResult( + OnDemandCacheStatus status, Map> cachedIdentifiersByType) { + this.status = status; + this.cachedIdentifiersByType = cachedIdentifiersByType; + } + + public OnDemandCacheStatus getStatus() { + return status; + } + + public Map> getCachedIdentifiersByType() { + return cachedIdentifiersByType; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheStatus.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheStatus.java new file mode 100644 index 00000000000..c1f789e261c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheStatus.java @@ -0,0 +1,22 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.cache; + +public enum OnDemandCacheStatus { + SUCCESSFUL, + PENDING; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheUpdater.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheUpdater.java new file mode 100644 index 00000000000..842950399f8 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandCacheUpdater.java @@ -0,0 +1,50 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.cache; + +import java.util.Collection; +import java.util.Map; + +/** + * An on-demand cache updater. Allows some non-scheduled trigger to initiate a cache refresh for a + * given type. An on-demand cache request will fan-out to all available updaters. + */ +public interface OnDemandCacheUpdater { + + /** + * Indicates if the updater is able to handle this on-demand request given the type and + * cloudProvider + * + * @param type + * @param cloudProvider + * @return + */ + boolean handles(OnDemandType type, String cloudProvider); + + /** + * Handles the update request + * + * @param type + * @param cloudProvider + * @param data + */ + OnDemandCacheResult handle(OnDemandType type, String cloudProvider, Map data); + + Collection> pendingOnDemandRequests(OnDemandType type, String cloudProvider); + + Map pendingOnDemandRequest(OnDemandType type, String cloudProvider, String id); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupport.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupport.java new file mode 100644 index 00000000000..295d191ff4f --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/OnDemandMetricsSupport.java @@ -0,0 +1,106 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spectator.api.Counter; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Timer; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +public class OnDemandMetricsSupport implements OnDemandMetricsSupportable { + + private final Timer onDemandTotal; + private final Timer dataRead; + private final Timer dataTransform; + private final Timer onDemandStore; + private final Timer cacheWrite; + private final Timer cacheEvict; + private final Counter onDemandErrors; + private final Counter onDemandCount; + + public OnDemandMetricsSupport(Registry registry, OnDemandAgent agent, String onDemandType) { + final String[] tags = + new String[] { + "providerName", + agent.getProviderName(), + "agentType", + agent.getOnDemandAgentType(), + "onDemandType", + onDemandType + }; + this.onDemandTotal = registry.timer(ON_DEMAND_TOTAL_TIME, tags); + this.dataRead = registry.timer(DATA_READ, tags); + this.dataTransform = registry.timer(DATA_TRANSFORM, tags); + this.onDemandStore = registry.timer(ON_DEMAND_STORE, tags); + this.cacheWrite = registry.timer(CACHE_WRITE, tags); + this.cacheEvict = registry.timer(CACHE_EVICT, tags); + this.onDemandErrors = registry.counter(ON_DEMAND_ERROR, tags); + this.onDemandCount = registry.counter(ON_DEMAND_COUNT, tags); + } + + private T record(Timer timer, Supplier closure) { + final long start = System.nanoTime(); + try { + return closure.get(); + } finally { + final long elapsed = System.nanoTime() - start; + timer.record(elapsed, TimeUnit.NANOSECONDS); + } + } + + @Override + public T readData(Supplier closure) { + return record(dataRead, closure); + } + + @Override + public T transformData(Supplier closure) { + return record(dataTransform, closure); + } + + @Override + public T onDemandStore(Supplier closure) { + return record(onDemandStore, closure); + } + + @Override + public T cacheWrite(Supplier closure) { + return record(cacheWrite, closure); + } + + @Override + public T cacheEvict(Supplier closure) { + return record(cacheEvict, closure); + } + + @Override + public void countError() { + onDemandErrors.increment(); + } + + @Override + public void countOnDemand() { + onDemandCount.increment(); + } + + @Override + public void recordTotalRunTimeNanos(long nanos) { + onDemandTotal.record(nanos, TimeUnit.NANOSECONDS); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/RedisCacheConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/RedisCacheConfig.java new file mode 100644 index 00000000000..0e77f492a59 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/cache/RedisCacheConfig.java @@ -0,0 +1,67 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.cluster.AgentIntervalProvider; +import com.netflix.spinnaker.cats.cluster.NodeStatusProvider; +import com.netflix.spinnaker.cats.redis.cache.RedisCache.CacheMetrics; +import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions; +import com.netflix.spinnaker.clouddriver.core.RedisConfigurationProperties; +import com.netflix.spinnaker.kork.discovery.DiscoveryStatusListener; +import java.util.concurrent.TimeUnit; +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnExpression("${redis.enabled:true}") +@EnableConfigurationProperties(RedisConfigurationProperties.class) +class RedisCacheConfig { + + @Bean + @ConfigurationProperties("caching.redis") + RedisCacheOptions.Builder redisCacheOptionsBuilder() { + return RedisCacheOptions.builder(); + } + + @Bean + RedisCacheOptions redisCacheOptions(RedisCacheOptions.Builder redisCacheOptionsBuilder) { + return redisCacheOptionsBuilder.build(); + } + + @Bean + CacheMetrics cacheMetrics(Registry registry) { + return new SpectatorRedisCacheMetrics(registry); + } + + @Bean + AgentIntervalProvider agentIntervalProvider( + RedisConfigurationProperties redisConfigurationProperties) { + return new CustomSchedulableAgentIntervalProvider( + TimeUnit.SECONDS.toMillis(redisConfigurationProperties.getPoll().getIntervalSeconds()), + TimeUnit.SECONDS.toMillis(redisConfigurationProperties.getPoll().getErrorIntervalSeconds()), + TimeUnit.SECONDS.toMillis(redisConfigurationProperties.getPoll().getTimeoutSeconds())); + } + + @Bean + NodeStatusProvider nodeStatusProvider(DiscoveryStatusListener discoveryStatusListener) { + return new DiscoveryStatusNodeStatusProvider(discoveryStatusListener); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/AccountDefinitionConfiguration.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/AccountDefinitionConfiguration.java new file mode 100644 index 00000000000..435077a322c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/AccountDefinitionConfiguration.java @@ -0,0 +1,208 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.jsontype.NamedType; +import com.netflix.spinnaker.clouddriver.jackson.AccountDefinitionModule; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionMapper; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionRepository; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionSecretManager; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionService; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionTypeProvider; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionTypes; +import com.netflix.spinnaker.clouddriver.security.AccountSecurityPolicy; +import com.netflix.spinnaker.clouddriver.security.AllowAllAccountSecurityPolicy; +import com.netflix.spinnaker.clouddriver.security.AuthorizedRolesExtractor; +import com.netflix.spinnaker.clouddriver.security.DefaultAccountSecurityPolicy; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator; +import com.netflix.spinnaker.kork.secrets.SecretManager; +import com.netflix.spinnaker.kork.secrets.SecretSession; +import com.netflix.spinnaker.kork.secrets.user.UserSecretManager; +import com.netflix.spinnaker.kork.secrets.user.UserSecretReference; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.RequiredArgsConstructor; +import lombok.extern.log4j.Log4j2; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.beans.factory.config.BeanDefinition; +import org.springframework.boot.autoconfigure.condition.ConditionalOnBean; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ClassPathScanningCandidateComponentProvider; +import org.springframework.context.annotation.Configuration; +import org.springframework.core.io.ResourceLoader; +import org.springframework.core.type.filter.AssignableTypeFilter; +import org.springframework.util.ClassUtils; + +/** + * Provides configuration settings related to managing account credential definitions at runtime. + * + * @see Properties + */ +@Configuration +@EnableConfigurationProperties(AccountDefinitionConfiguration.Properties.class) +@Log4j2 +@RequiredArgsConstructor +public class AccountDefinitionConfiguration { + + private final Properties properties; + + @Bean + @ConditionalOnMissingBean + public AccountSecurityPolicy accountSecurity( + @Nullable FiatPermissionEvaluator permissionEvaluator, + @Value("${services.fiat.enabled:false}") boolean fiatEnabled) { + return fiatEnabled && permissionEvaluator != null + ? new DefaultAccountSecurityPolicy(permissionEvaluator) + : new AllowAllAccountSecurityPolicy(); + } + + @Bean + public AccountDefinitionSecretManager accountDefinitionSecretManager( + UserSecretManager userSecretManager, AccountSecurityPolicy policy) { + return new AccountDefinitionSecretManager(userSecretManager, policy); + } + + /** + * Creates a mapper that can convert between JSON and {@link CredentialsDefinition} classes that + * are annotated with {@link JsonTypeName}. Account definition classes are scanned in {@code + * com.netflix.spinnaker.clouddriver} and any additional packages configured in {@link + * Properties#setAdditionalScanPackages(List)}. Only eligible account definition classes are used + * with an ObjectMapper to first convert any referenced {@link UserSecretReference} URIs and then + * convert to an appropriate CredentialsDefinition instance. + * + * @see com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer + */ + @Bean + public AccountDefinitionMapper accountDefinitionMapper( + ObjectMapper mapper, + AccountDefinitionSecretManager accountDefinitionSecretManager, + SecretManager secretManager) { + return new AccountDefinitionMapper( + mapper, accountDefinitionSecretManager, new SecretSession(secretManager)); + } + + @Bean + @ConditionalOnBean(AccountDefinitionRepository.class) + public AccountDefinitionService accountDefinitionService( + AccountDefinitionRepository repository, + AccountDefinitionSecretManager secretManager, + AccountCredentialsProvider provider, + AccountSecurityPolicy security, + List extractors) { + return new AccountDefinitionService(repository, secretManager, provider, security, extractors); + } + + @Bean + public AccountDefinitionModule accountDefinitionModule( + List typeProviders) { + return new AccountDefinitionModule( + typeProviders.stream() + .flatMap( + provider -> + provider.getCredentialsTypes().entrySet().stream() + .map(e -> new NamedType(e.getValue(), e.getKey()))) + .toArray(NamedType[]::new)); + } + + /** + * Exports all discovered account definition types from scanning the classpath. Plugins may + * register additional provider beans to register additional account types to support in {@link + * AccountDefinitionRepository}. + */ + @Bean + public AccountDefinitionTypeProvider defaultAccountDefinitionTypeProvider(ResourceLoader loader) { + var provider = new ClassPathScanningCandidateComponentProvider(false); + provider.setResourceLoader(loader); + provider.addIncludeFilter(new AssignableTypeFilter(CredentialsDefinition.class)); + List scanPackages = new ArrayList<>(properties.additionalScanPackages); + scanPackages.add(0, "com.netflix.spinnaker.clouddriver"); + return () -> + scanPackages.stream() + .flatMap(packageName -> provider.findCandidateComponents(packageName).stream()) + .map(BeanDefinition::getBeanClassName) + .filter(Objects::nonNull) + .map(className -> tryLoadAccountDefinitionClassName(className, loader.getClassLoader())) + .filter(Objects::nonNull) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private Map.Entry> + tryLoadAccountDefinitionClassName(String className, ClassLoader classLoader) { + try { + Class subtype = + ClassUtils.forName(className, classLoader).asSubclass(CredentialsDefinition.class); + String typeName = AccountDefinitionTypes.getCredentialsTypeName(subtype); + if (typeName != null) { + log.info("Discovered credentials definition type '{}' from class '{}'", typeName, subtype); + return Map.entry(typeName, subtype); + } else { + log.debug( + "Skipping CredentialsDefinition class '{}' as it does not define a @CredentialsType annotation", + subtype); + } + } catch (ClassNotFoundException e) { + log.warn( + "Unable to load CredentialsDefinition class '{}'. Credentials with this type will not be loaded.", + className, + e); + } + return null; + } + + @ConfigurationProperties("account.storage") + @ConditionalOnProperty("account.storage.enabled") + @Data + public static class Properties { + /** + * Indicates whether to enable durable storage for account definitions. When enabled with an + * implementation of {@link AccountDefinitionRepository}, account definitions can be stored and + * retrieved by a durable storage provider. + */ + private boolean enabled; + + /** + * Additional packages to scan for {@link + * com.netflix.spinnaker.credentials.definition.CredentialsDefinition} implementation classes + * that may be annotated with {@link JsonTypeName} to participate in the account management + * system. These packages are in addition to the default scan package from within Clouddriver. + * Note that this configuration option only works for account types that are compiled in + * Spinnaker; plugin account types must register a {@link AccountDefinitionTypeProvider} bean + * for additional types. + */ + private List additionalScanPackages = List.of(); + + // TODO(jvz): accounts pubsub config for https://github.com/spinnaker/kork/pull/958 + // - @Import(PubsubConfig.class) + // - CredentialsDefinitionNotifier bean + // - AccountDefinitionProcessor bean + // - account.storage.topic property for accounts pubsub topic + // - @CredentialsType javadoc + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/CloudDriverConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/CloudDriverConfig.java new file mode 100644 index 00000000000..ea6809c231e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/CloudDriverConfig.java @@ -0,0 +1,434 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; +import com.fasterxml.jackson.datatype.joda.JodaModule; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import com.fasterxml.jackson.module.kotlin.KotlinModule; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.ExecutionInstrumentation; +import com.netflix.spinnaker.cats.agent.NoopExecutionInstrumentation; +import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions; +import com.netflix.spinnaker.clouddriver.cache.CacheConfig; +import com.netflix.spinnaker.clouddriver.cache.NoopOnDemandCacheUpdater; +import com.netflix.spinnaker.clouddriver.cache.OnDemandCacheUpdater; +import com.netflix.spinnaker.clouddriver.core.CloudProvider; +import com.netflix.spinnaker.clouddriver.core.NoopAtomicOperationConverter; +import com.netflix.spinnaker.clouddriver.core.NoopCloudProvider; +import com.netflix.spinnaker.clouddriver.core.ProjectClustersService; +import com.netflix.spinnaker.clouddriver.core.RedisConfig; +import com.netflix.spinnaker.clouddriver.core.agent.CleanupPendingOnDemandCachesAgent; +import com.netflix.spinnaker.clouddriver.core.agent.ProjectClustersCachingAgent; +import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration; +import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfigurationBuilder; +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.clouddriver.deploy.DefaultDescriptionAuthorizer; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionAuthorizer; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionAuthorizerService; +import com.netflix.spinnaker.clouddriver.jackson.ClouddriverApiModule; +import com.netflix.spinnaker.clouddriver.model.ApplicationProvider; +import com.netflix.spinnaker.clouddriver.model.CloudMetricProvider; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import com.netflix.spinnaker.clouddriver.model.ElasticIpProvider; +import com.netflix.spinnaker.clouddriver.model.ImageProvider; +import com.netflix.spinnaker.clouddriver.model.InstanceProvider; +import com.netflix.spinnaker.clouddriver.model.InstanceTypeProvider; +import com.netflix.spinnaker.clouddriver.model.KeyPairProvider; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.model.NetworkProvider; +import com.netflix.spinnaker.clouddriver.model.NoopApplicationProvider; +import com.netflix.spinnaker.clouddriver.model.NoopCloudMetricProvider; +import com.netflix.spinnaker.clouddriver.model.NoopClusterProvider; +import com.netflix.spinnaker.clouddriver.model.NoopElasticIpProvider; +import com.netflix.spinnaker.clouddriver.model.NoopImageProvider; +import com.netflix.spinnaker.clouddriver.model.NoopInstanceProvider; +import com.netflix.spinnaker.clouddriver.model.NoopInstanceTypeProvider; +import com.netflix.spinnaker.clouddriver.model.NoopKeyPairProvider; +import com.netflix.spinnaker.clouddriver.model.NoopLoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.model.NoopNetworkProvider; +import com.netflix.spinnaker.clouddriver.model.NoopReservationReportProvider; +import com.netflix.spinnaker.clouddriver.model.NoopSecurityGroupProvider; +import com.netflix.spinnaker.clouddriver.model.NoopServerGroupManagerProvider; +import com.netflix.spinnaker.clouddriver.model.NoopSubnetProvider; +import com.netflix.spinnaker.clouddriver.model.ReservationReportProvider; +import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider; +import com.netflix.spinnaker.clouddriver.model.ServerGroupManager; +import com.netflix.spinnaker.clouddriver.model.ServerGroupManagerProvider; +import com.netflix.spinnaker.clouddriver.model.SubnetProvider; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter; +import com.netflix.spinnaker.clouddriver.orchestration.ExceptionClassifier; +import com.netflix.spinnaker.clouddriver.saga.SagaEvent; +import com.netflix.spinnaker.clouddriver.search.ApplicationSearchProvider; +import com.netflix.spinnaker.clouddriver.search.NoopSearchProvider; +import com.netflix.spinnaker.clouddriver.search.ProjectSearchProvider; +import com.netflix.spinnaker.clouddriver.search.SearchProvider; +import com.netflix.spinnaker.clouddriver.search.executor.SearchExecutorConfig; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionSecretManager; +import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig; +import com.netflix.spinnaker.config.PluginsAutoConfiguration; +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import com.netflix.spinnaker.credentials.Credentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.definition.CredentialsLoader; +import com.netflix.spinnaker.credentials.poller.PollerConfiguration; +import com.netflix.spinnaker.credentials.poller.PollerConfigurationProperties; +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator; +import com.netflix.spinnaker.kork.artifacts.artifactstore.ArtifactDeserializer; +import com.netflix.spinnaker.kork.artifacts.artifactstore.ArtifactStore; +import com.netflix.spinnaker.kork.artifacts.artifactstore.ArtifactStoreConfiguration; +import com.netflix.spinnaker.kork.core.RetrySupport; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer; +import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; +import java.time.Clock; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import javax.inject.Provider; +import org.springframework.beans.factory.ObjectProvider; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.boot.autoconfigure.jackson.Jackson2ObjectMapperBuilderCustomizer; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Import; +import org.springframework.context.annotation.PropertySource; +import org.springframework.core.annotation.Order; +import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler; +import org.springframework.web.client.RestTemplate; + +@Configuration +@Import({ + RedisConfig.class, + CacheConfig.class, + SearchExecutorConfig.class, + PluginsAutoConfiguration.class, + ArtifactStoreConfiguration.class, +}) +@PropertySource( + value = "classpath:META-INF/clouddriver-core.properties", + ignoreResourceNotFound = true) +@EnableConfigurationProperties({ + ProjectClustersCachingAgentProperties.class, + ExceptionClassifierConfigurationProperties.class, + PollerConfigurationProperties.class +}) +class CloudDriverConfig { + + @Bean + @ConditionalOnMissingBean(Clock.class) + Clock clock() { + return Clock.systemDefaultZone(); + } + + @Bean + Jackson2ObjectMapperBuilderCustomizer defaultObjectMapperCustomizer(List modules) { + return jacksonObjectMapperBuilder -> { + modules.addAll( + List.of( + new Jdk8Module(), + new JavaTimeModule(), + new JodaModule(), + new KotlinModule(), + new ClouddriverApiModule())); + jacksonObjectMapperBuilder.serializationInclusion(JsonInclude.Include.NON_NULL); + jacksonObjectMapperBuilder.failOnEmptyBeans(false); + jacksonObjectMapperBuilder.failOnUnknownProperties(false); + jacksonObjectMapperBuilder.modules(modules); + }; + } + + @Bean + ObjectMapperSubtypeConfigurer.SubtypeLocator clouddriverSubtypeLocator() { + return new ObjectMapperSubtypeConfigurer.ClassSubtypeLocator( + SagaEvent.class, List.of("com.netflix.spinnaker.clouddriver.orchestration.sagas")); + } + + @Bean + String clouddriverUserAgentApplicationName() { + return String.format("Spinnaker/%s", System.getProperty("Implementation-Version", "Unknown")); + } + + @Bean + @ConfigurationProperties("service-limits") + ServiceLimitConfigurationBuilder serviceLimitConfigProperties() { + return new ServiceLimitConfigurationBuilder(); + } + + @Bean + ServiceLimitConfiguration serviceLimitConfiguration( + ServiceLimitConfigurationBuilder serviceLimitConfigProperties) { + return serviceLimitConfigProperties.build(); + } + + @Bean + @ConditionalOnMissingBean(AccountCredentialsRepository.class) + AccountCredentialsRepository accountCredentialsRepository() { + return new MapBackedAccountCredentialsRepository(); + } + + @Bean + @ConditionalOnMissingBean(AccountCredentialsProvider.class) + AccountCredentialsProvider accountCredentialsProvider( + AccountCredentialsRepository accountCredentialsRepository, + CompositeCredentialsRepository> compositeRepository) { + return new DefaultAccountCredentialsProvider(accountCredentialsRepository, compositeRepository); + } + + @Bean + @ConditionalOnMissingBean( + value = AccountCredentials.class, + parameterizedContainer = CompositeCredentialsRepository.class) + CompositeCredentialsRepository compositeCredentialsRepository( + List> repositories) { + return new CompositeCredentialsRepository<>(repositories); + } + + @Bean + PollerConfiguration pollerConfiguration( + ObjectProvider> pollers, + PollerConfigurationProperties pollerConfigurationProperties) { + return new PollerConfiguration(pollerConfigurationProperties, pollers); + } + + @Bean + RestTemplate restTemplate() { + return new RestTemplate(); + } + + @Bean + @ConditionalOnMissingBean(OnDemandCacheUpdater.class) + NoopOnDemandCacheUpdater noopOnDemandCacheUpdater() { + return new NoopOnDemandCacheUpdater(); + } + + @Bean + @ConditionalOnMissingBean(SearchProvider.class) + NoopSearchProvider noopSearchProvider() { + return new NoopSearchProvider(); + } + + @Bean + @ConditionalOnExpression("${services.front50.enabled:true}") + ApplicationSearchProvider applicationSearchProvider(Front50Service front50Service) { + return new ApplicationSearchProvider(front50Service); + } + + @Bean + @ConditionalOnExpression("${services.front50.enabled:true}") + ProjectSearchProvider projectSearchProvider(Front50Service front50Service) { + return new ProjectSearchProvider(front50Service); + } + + @Bean + @ConditionalOnMissingBean(CloudProvider.class) + CloudProvider noopCloudProvider() { + return new NoopCloudProvider(); + } + + @Bean + @ConditionalOnMissingBean(CloudMetricProvider.class) + CloudMetricProvider noopCloudMetricProvider() { + return new NoopCloudMetricProvider(); + } + + @Bean + @ConditionalOnMissingBean(ApplicationProvider.class) + ApplicationProvider noopApplicationProvider() { + return new NoopApplicationProvider(); + } + + @Bean + @ConditionalOnMissingBean(LoadBalancerProvider.class) + LoadBalancerProvider noopLoadBalancerProvider() { + return new NoopLoadBalancerProvider(); + } + + @Bean + @ConditionalOnMissingBean(ClusterProvider.class) + ClusterProvider noopClusterProvider() { + return new NoopClusterProvider(); + } + + @Bean + @ConditionalOnMissingBean(ReservationReportProvider.class) + ReservationReportProvider noopReservationReportProvider() { + return new NoopReservationReportProvider(); + } + + @Bean + @ConditionalOnMissingBean(ExecutionInstrumentation.class) + ExecutionInstrumentation noopExecutionInstrumentation() { + return new NoopExecutionInstrumentation(); + } + + @Bean + @ConditionalOnMissingBean(InstanceProvider.class) + InstanceProvider noopInstanceProvider() { + return new NoopInstanceProvider(); + } + + @Bean + @ConditionalOnMissingBean(ImageProvider.class) + ImageProvider noopImageProvider() { + return new NoopImageProvider(); + } + + @Bean + @ConditionalOnMissingBean(InstanceTypeProvider.class) + InstanceTypeProvider noopInstanceTypeProvider() { + return new NoopInstanceTypeProvider(); + } + + @Bean + @ConditionalOnMissingBean(KeyPairProvider.class) + KeyPairProvider noopKeyPairProvider() { + return new NoopKeyPairProvider(); + } + + @Bean + @ConditionalOnMissingBean(SecurityGroupProvider.class) + SecurityGroupProvider noopSecurityGroupProvider() { + return new NoopSecurityGroupProvider(); + } + + @Bean + @ConditionalOnMissingBean(ServerGroupManager.class) + ServerGroupManagerProvider noopServerGroupManagerProvider() { + return new NoopServerGroupManagerProvider(); + } + + @Bean + @ConditionalOnMissingBean(SubnetProvider.class) + SubnetProvider noopSubnetProvider() { + return new NoopSubnetProvider(); + } + + @Bean + @ConditionalOnMissingBean(NetworkProvider.class) + NetworkProvider noopVpcProvider() { + return new NoopNetworkProvider(); + } + + @Bean + @ConditionalOnMissingBean(ElasticIpProvider.class) + ElasticIpProvider noopElasticIpProvider() { + return new NoopElasticIpProvider(); + } + + @Bean + ProjectClustersService projectClustersService( + Front50Service front50Service, + ObjectMapper objectMapper, + Provider> clusterProviders) { + return new ProjectClustersService(front50Service, objectMapper, clusterProviders); + } + + @Bean + CoreProvider coreProvider( + Optional redisCacheOptions, + Optional redisClientDelegate, + ApplicationContext applicationContext, + ProjectClustersService projectClustersService, + ProjectClustersCachingAgentProperties projectClustersCachingAgentProperties) { + List agents = new ArrayList<>(); + agents.add( + new ProjectClustersCachingAgent( + projectClustersService, projectClustersCachingAgentProperties)); + + if (redisCacheOptions.isPresent() && redisClientDelegate.isPresent()) { + agents.add( + new CleanupPendingOnDemandCachesAgent( + redisCacheOptions.get(), redisClientDelegate.get(), applicationContext)); + } + + return new CoreProvider(agents); + } + + @Bean + @ConditionalOnMissingBean(AtomicOperationConverter.class) + AtomicOperationConverter atomicOperationConverter() { + return new NoopAtomicOperationConverter(); + } + + @Bean + public RetrySupport retrySupport() { + return new RetrySupport(); + } + + @Bean + NamerRegistry namerRegistry(Optional> namingStrategies) { + return new NamerRegistry(namingStrategies.orElse(List.of())); + } + + @Bean + DescriptionAuthorizerService descriptionAuthorizerService( + Registry registry, + Optional fiatPermissionEvaluator, + SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps, + AccountDefinitionSecretManager secretManager) { + return new DescriptionAuthorizerService( + registry, fiatPermissionEvaluator, opsSecurityConfigProps, secretManager); + } + + @Bean + @Order + DescriptionAuthorizer descriptionAuthorizer( + DescriptionAuthorizerService descriptionAuthorizerService) { + return new DefaultDescriptionAuthorizer(descriptionAuthorizerService); + } + + @Bean + ExceptionClassifier exceptionClassifier( + ExceptionClassifierConfigurationProperties properties, + DynamicConfigService dynamicConfigService) { + return new ExceptionClassifier(properties, dynamicConfigService); + } + + @Bean + ThreadPoolTaskScheduler threadPoolTaskScheduler( + @Value("${scheduling-thread-pool-size:5}") int threadPoolSize) { + ThreadPoolTaskScheduler threadPoolTaskScheduler = new ThreadPoolTaskScheduler(); + threadPoolTaskScheduler.setPoolSize(threadPoolSize); + threadPoolTaskScheduler.setThreadNamePrefix("ThreadPoolTaskScheduler"); + return threadPoolTaskScheduler; + } + + @Bean + ArtifactDeserializer artifactDeserializer( + ArtifactStore storage, @Qualifier("artifactObjectMapper") ObjectMapper objectMapper) { + return new ArtifactDeserializer(objectMapper, storage); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/DeployConfiguration.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/DeployConfiguration.java new file mode 100644 index 00000000000..9a77b8f5c33 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/DeployConfiguration.java @@ -0,0 +1,122 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.data.task.InMemoryTaskRepository; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.deploy.DefaultDeployHandlerRegistry; +import com.netflix.spinnaker.clouddriver.deploy.DeployHandler; +import com.netflix.spinnaker.clouddriver.deploy.DeployHandlerRegistry; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionAuthorizer; +import com.netflix.spinnaker.clouddriver.deploy.NullOpDeployHandler; +import com.netflix.spinnaker.clouddriver.orchestration.AnnotationsBasedAtomicOperationsRegistry; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationDescriptionPreProcessor; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry; +import com.netflix.spinnaker.clouddriver.orchestration.DefaultOrchestrationProcessor; +import com.netflix.spinnaker.clouddriver.orchestration.ExceptionClassifier; +import com.netflix.spinnaker.clouddriver.orchestration.OperationsService; +import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor; +import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEventHandler; +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.AllowedAccountsValidator; +import com.netflix.spinnaker.kork.web.context.RequestContextProvider; +import com.netflix.spinnaker.kork.web.exceptions.ExceptionMessageDecorator; +import com.netflix.spinnaker.kork.web.exceptions.ExceptionSummaryService; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ComponentScan("com.netflix.spinnaker.clouddriver.orchestration.sagas") +class DeployConfiguration { + @Bean + @ConditionalOnMissingBean(TaskRepository.class) + TaskRepository taskRepository() { + return new InMemoryTaskRepository(); + } + + @Bean + @ConditionalOnMissingBean(DeployHandlerRegistry.class) + DeployHandlerRegistry deployHandlerRegistry(List deployHandlers) { + return new DefaultDeployHandlerRegistry(deployHandlers); + } + + @Bean + @ConditionalOnMissingBean(OrchestrationProcessor.class) + OrchestrationProcessor orchestrationProcessor( + TaskRepository taskRepository, + ApplicationContext applicationContext, + Registry registry, + Optional> operationEventHandlers, + ObjectMapper objectMapper, + ExceptionClassifier exceptionClassifier, + RequestContextProvider contextProvider, + ExceptionSummaryService exceptionSummaryService) { + return new DefaultOrchestrationProcessor( + taskRepository, + applicationContext, + registry, + operationEventHandlers, + objectMapper, + exceptionClassifier, + contextProvider, + exceptionSummaryService); + } + + @Bean + @ConditionalOnMissingBean(DeployHandler.class) + DeployHandler nullOpDeployHandler() { + return new NullOpDeployHandler(); + } + + @Bean + AtomicOperationsRegistry atomicOperationsRegistry() { + return new AnnotationsBasedAtomicOperationsRegistry(); + } + + @Bean + OperationsService operationsService( + AtomicOperationsRegistry atomicOperationsRegistry, + List descriptionAuthorizers, + Optional> allowedAccountsValidators, + Optional> + atomicOperationDescriptionPreProcessors, + AccountCredentialsRepository accountCredentialsRepository, + Optional sagaRepository, + Registry registry, + ObjectMapper objectMapper, + ExceptionMessageDecorator exceptionMessageDecorator) { + return new OperationsService( + atomicOperationsRegistry, + descriptionAuthorizers, + allowedAccountsValidators, + atomicOperationDescriptionPreProcessors, + accountCredentialsRepository, + sagaRepository, + registry, + objectMapper, + exceptionMessageDecorator); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/DualTaskRepositoryConfiguration.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/DualTaskRepositoryConfiguration.java new file mode 100644 index 00000000000..83524a7dc34 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/DualTaskRepositoryConfiguration.java @@ -0,0 +1,164 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.config; + +import static java.lang.String.format; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.data.task.DualTaskRepository; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.BeanCreationException; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; + +@Configuration +@ConditionalOnProperty("dual-task-repository.enabled") +@EnableConfigurationProperties(DualTaskRepositoryConfiguration.Properties.class) +public class DualTaskRepositoryConfiguration { + private ApplicationContext applicationContext; + private final Logger log = LoggerFactory.getLogger(getClass()); + + @Primary + @Bean + TaskRepository dualExecutionRepository( + Properties properties, + List allRepositories, + DynamicConfigService dynamicConfigService, + ApplicationContext applicationContext) { + this.applicationContext = applicationContext; + + allRepositories.forEach(repo -> log.info("Available TaskRepository: " + repo)); + + TaskRepository primary = + findTaskRepository(allRepositories, properties.primaryClass, properties.primaryName); + TaskRepository previous = + findTaskRepository(allRepositories, properties.previousClass, properties.previousName); + return new DualTaskRepository( + primary, + previous, + properties.executorThreadPoolSize, + properties.executorTimeoutSeconds, + dynamicConfigService); + } + + private TaskRepository findTaskRepositoryByClass( + List allRepositories, String className) { + Class repositoryClass; + try { + repositoryClass = Class.forName(className); + } catch (ClassNotFoundException e) { + throw new BeanCreationException("Could not find TaskRepository class", e); + } + + return allRepositories.stream() + .filter(repositoryClass::isInstance) + .findFirst() + .orElseThrow( + () -> + new IllegalStateException( + format("No TaskRepository bean of class %s found", repositoryClass))); + } + + private TaskRepository findTaskRepository( + List allRepositories, String beanClass, String beanName) { + if (!Strings.isNullOrEmpty(beanName)) { + return (TaskRepository) applicationContext.getBean(beanName); + } + + return findTaskRepositoryByClass(allRepositories, beanClass); + } + + @ConfigurationProperties("dual-task-repository") + public static class Properties { + /** The primary TaskRepository class&name. Only one is needed, name takes precedence . */ + String primaryClass; + + String primaryName; + + /** The previous TaskRepository class&name. Only one is needed, name takes precedence . */ + String previousClass; + + String previousName; + + /** + * The number of threads that will be used for collating TaskRepository results from both + * primary and previous backends. For list operations, two threads will be used. + */ + int executorThreadPoolSize = 10; + + /** + * The amount of time in seconds that async tasks will have to complete before being timed out. + */ + long executorTimeoutSeconds = 10; + + public String getPrimaryClass() { + return primaryClass; + } + + public void setPrimaryClass(String primaryClass) { + this.primaryClass = primaryClass; + } + + public String getPrimaryName() { + return primaryName; + } + + public void setPrimaryName(String primaryName) { + this.primaryName = primaryName; + } + + public String getPreviousClass() { + return previousClass; + } + + public void setPreviousClass(String previousClass) { + this.previousClass = previousClass; + } + + public String getPreviousName() { + return previousName; + } + + public void setPreviousName(String previousName) { + this.previousName = previousName; + } + + public int getExecutorThreadPoolSize() { + return executorThreadPoolSize; + } + + public void setExecutorThreadPoolSize(int executorThreadPoolSize) { + this.executorThreadPoolSize = executorThreadPoolSize; + } + + public long getExecutorTimeoutSeconds() { + return executorTimeoutSeconds; + } + + public void setExecutorTimeoutSeconds(long executorTimeoutSeconds) { + this.executorTimeoutSeconds = executorTimeoutSeconds; + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/ExceptionClassifierConfigurationProperties.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/ExceptionClassifierConfigurationProperties.java new file mode 100644 index 00000000000..a660d230869 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/ExceptionClassifierConfigurationProperties.java @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.config; + +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@ConfigurationProperties("exception-classifier") +@Data +public class ExceptionClassifierConfigurationProperties { + + /** + * A list of fully-qualified Exception class names that are retryable within the scope of + * Saga-backed orchestrations. + */ + private List retryableClasses = new ArrayList<>(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/LocalJobConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/LocalJobConfig.java new file mode 100644 index 00000000000..3f9ef8a674d --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/LocalJobConfig.java @@ -0,0 +1,34 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config; + +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import com.netflix.spinnaker.clouddriver.jobs.local.JobExecutorLocal; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class LocalJobConfig { + @Bean + @ConditionalOnMissingBean(JobExecutor.class) + public JobExecutor jobExecutorLocal( + @Value("${jobs.local.timeout-minutes:10}") long timeoutMinutes) { + return new JobExecutorLocal(timeoutMinutes); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/ProjectClustersCachingAgentProperties.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/ProjectClustersCachingAgentProperties.java new file mode 100644 index 00000000000..eb7ee246c2b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/ProjectClustersCachingAgentProperties.java @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.config; + +import com.google.common.base.Strings; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@ConfigurationProperties("caching-agent.project-clusters") +public class ProjectClustersCachingAgentProperties { + + /** A list of allowed project names that will be cached. */ + List allowList = new ArrayList<>(); + + public List getAllowList() { + return allowList; + } + + public void setAllowList(List allowList) { + this.allowList = allowList; + } + + public List getNormalizedAllowList() { + return allowList.stream() + .filter(p -> !Strings.isNullOrEmpty(p)) + .map(String::toLowerCase) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/RetrofitConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/RetrofitConfig.java new file mode 100644 index 00000000000..c4bfbb168c1 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/config/RetrofitConfig.java @@ -0,0 +1,62 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config; + +import static retrofit.Endpoints.newFixedEndpoint; + +import com.jakewharton.retrofit.Ok3Client; +import com.netflix.spinnaker.clouddriver.core.Front50ConfigurationProperties; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.config.DefaultServiceEndpoint; +import com.netflix.spinnaker.config.okhttp3.OkHttpClientProvider; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler; +import com.netflix.spinnaker.retrofit.Slf4jRetrofitLogger; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import retrofit.Endpoint; +import retrofit.RequestInterceptor; +import retrofit.RestAdapter; +import retrofit.converter.JacksonConverter; + +@Configuration +@EnableConfigurationProperties(Front50ConfigurationProperties.class) +class RetrofitConfig { + + @Bean + @ConditionalOnProperty(name = "services.front50.enabled", matchIfMissing = true) + Front50Service front50Service( + Front50ConfigurationProperties front50ConfigurationProperties, + RestAdapter.LogLevel retrofitLogLevel, + OkHttpClientProvider clientProvider, + RequestInterceptor spinnakerRequestInterceptor) { + Endpoint endpoint = newFixedEndpoint(front50ConfigurationProperties.getBaseUrl()); + return new RestAdapter.Builder() + .setRequestInterceptor(spinnakerRequestInterceptor) + .setEndpoint(endpoint) + .setClient( + new Ok3Client( + clientProvider.getClient(new DefaultServiceEndpoint("front50", endpoint.getUrl())))) + .setConverter(new JacksonConverter()) + .setLogLevel(retrofitLogLevel) + .setLog(new Slf4jRetrofitLogger(Front50Service.class)) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) + .build() + .create(Front50Service.class); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/AccountHealthIndicator.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/AccountHealthIndicator.java new file mode 100644 index 00000000000..ccd7f21de77 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/AccountHealthIndicator.java @@ -0,0 +1,97 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.core; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.patterns.PolledMeter; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; +import javax.annotation.Nonnull; +import org.springframework.boot.actuate.health.Health; +import org.springframework.boot.actuate.health.HealthIndicator; +import org.springframework.scheduling.annotation.Scheduled; + +/** + * This class represents a health indicator that reports on the status of individual cloud provider + * accounts. + * + *

It will always report a status of UP, to prevent issues with a single cloud provider from + * bringing down all of clouddriver, but any errors associated with individual accounts will appear + * in the detailed health information. + * + *

The number of unhealthy accounts will be reported as the metric health.id.errors, where id is + * the id supplied to the constructor. + * + * @param The type of account credentials this health indicator supports + */ +public abstract class AccountHealthIndicator + implements HealthIndicator { + @Nonnull private Health health = new Health.Builder().up().build(); + @Nonnull private final AtomicLong unhealthyAccounts = new AtomicLong(0); + + /** + * Create an {@code AccountHealthIndicator} reporting metrics to the supplied registry, using the + * supplied id. + * + * @param id A unique identifier for the health indicator, used for reporting metrics + * @param registry The registry to which metrics should be reported + */ + protected AccountHealthIndicator(String id, Registry registry) { + PolledMeter.using(registry).withName(metricName(id)).monitorValue(unhealthyAccounts); + } + + private static String metricName(String id) { + return "health." + id + ".errors"; + } + + @Override + public final Health health() { + return health; + } + + @Scheduled(fixedDelay = 300000L) + public void checkHealth() { + long errors = 0; + ImmutableMap.Builder builder = ImmutableMap.builder(); + for (T account : getAccounts()) { + Optional error = accountHealth(account); + if (error.isPresent()) { + errors++; + builder.put(account.getName(), error.get()); + } + } + unhealthyAccounts.set(errors); + health = new Health.Builder().up().withDetails(builder.build()).build(); + } + + /** + * Returns the accounts that should be considered by this health indicator. + * + * @return The accounts to be considered by this health indicator + */ + protected abstract Iterable getAccounts(); + + /** + * Checks the health of a given account. + * + * @return An empty {@code Optional} if the account is healthy. Otherwise, an {@code + * Optional} containing an error message describing why the account is unhealthy. + */ + protected abstract Optional accountHealth(T account); +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/AlwaysUpHealthIndicator.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/AlwaysUpHealthIndicator.java similarity index 97% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/AlwaysUpHealthIndicator.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/AlwaysUpHealthIndicator.java index 0be2a0583b1..c2d74e7beb7 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/AlwaysUpHealthIndicator.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/AlwaysUpHealthIndicator.java @@ -16,14 +16,13 @@ package com.netflix.spinnaker.clouddriver.core; import com.netflix.spectator.api.Registry; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.boot.actuate.health.Health; import org.springframework.boot.actuate.health.HealthIndicator; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; - public abstract class AlwaysUpHealthIndicator implements HealthIndicator { private static final Logger log = LoggerFactory.getLogger(AlwaysUpHealthIndicator.class); @@ -40,7 +39,8 @@ public AlwaysUpHealthIndicator(Registry registry, String name) { @Override public Health health() { if (hasInitialized.get() == Boolean.TRUE) { - // avoid being marked unhealthy once connectivity to all accounts has been verified at least once + // avoid being marked unhealthy once connectivity to all accounts has been verified at least + // once return new Health.Builder().up().build(); } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/ClouddriverHostname.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/ClouddriverHostname.java similarity index 95% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/ClouddriverHostname.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/ClouddriverHostname.java index 3431da16dbe..2170b8d887e 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/ClouddriverHostname.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/ClouddriverHostname.java @@ -16,12 +16,11 @@ package com.netflix.spinnaker.clouddriver.core; - import java.net.InetAddress; import java.util.UUID; public class ClouddriverHostname { - public final static String ID = id(); + public static final String ID = id(); private static String id() { String hostname; @@ -33,5 +32,4 @@ private static String id() { return UUID.randomUUID() + "@" + hostname; } - } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/ProjectClustersService.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/ProjectClustersService.java new file mode 100644 index 00000000000..746778d7639 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/ProjectClustersService.java @@ -0,0 +1,473 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.core; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.clouddriver.model.Cluster; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import java.util.*; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.inject.Provider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ProjectClustersService { + + private static final Logger log = LoggerFactory.getLogger(ProjectClustersService.class); + + private final Front50Service front50Service; + private final ObjectMapper objectMapper; + private final Provider> clusterProviders; + + public ProjectClustersService( + Front50Service front50Service, + ObjectMapper objectMapper, + Provider> clusterProviders) { + this.front50Service = front50Service; + this.objectMapper = + objectMapper.copy().disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); + this.clusterProviders = clusterProviders; + } + + public Map> getProjectClusters(List projectNames) { + Map> projectClusters = new HashMap<>(); + + for (String projectName : projectNames) { + try { + Map projectMap = front50Service.getProject(projectName); + + Project project; + try { + project = objectMapper.convertValue(projectMap, Project.class); + } catch (IllegalArgumentException e) { + log.error("Could not marshal project '{}' to internal model", projectName, e); + continue; + } + + if (project.config.clusters.isEmpty()) { + projectClusters.put(project.name, Collections.emptyList()); + log.debug("Project '{}' does not have any clusters", projectName); + continue; + } + + projectClusters.put(project.name, getProjectClusters(project)); + } catch (Exception e) { + log.error("Unable to fetch clusters for project '{}'", projectName, e); + } + } + + return projectClusters; + } + + public List getProjectClusters(String projectName) { + Map projectData = front50Service.getProject(projectName); + + if (projectData == null) { + return null; + } + + Project project; + try { + project = objectMapper.convertValue(projectData, Project.class); + } catch (IllegalArgumentException e) { + throw new MalformedProjectDataException( + "Could not marshal project to internal model: " + projectName, e); + } + + return getProjectClusters(project); + } + + public List getProjectClusters(Project project) { + List applicationsToRetrieve = + Optional.ofNullable(project.config.applications).orElse(Collections.emptyList()); + Map> allClusters = retrieveClusters(applicationsToRetrieve, project); + + return project.config.clusters.stream() + .map( + projectCluster -> { + List applications = + Optional.ofNullable(projectCluster.applications) + .orElse(project.config.applications); + List applicationModels = + applications.stream() + .map( + application -> { + Set appClusters = allClusters.get(application); + Set clusterMatches = + findClustersForProject(appClusters, projectCluster); + return new ApplicationClusterModel(application, clusterMatches); + }) + .collect(Collectors.toList()); + + return new ClusterModel( + projectCluster.account, + projectCluster.stack, + projectCluster.detail, + applicationModels); + }) + .collect(Collectors.toList()); + } + + private Map> retrieveClusters(List applications, Project project) { + Map> allClusters = new HashMap<>(); + + for (String application : applications) { + for (RetrievedClusters clusters : retrieveClusters(application, project)) { + allClusters + .computeIfAbsent(clusters.application, s -> new HashSet<>()) + .addAll(clusters.clusters); + } + } + + return allClusters; + } + + private Set findClustersForProject( + Set appClusters, ProjectCluster projectCluster) { + if (appClusters == null || appClusters.isEmpty()) { + return Collections.emptySet(); + } + + return appClusters.stream() + .filter( + appCluster -> { + Names clusterNameParts = Names.parseName(appCluster.getName()); + return appCluster.getAccountName().equals(projectCluster.account) + && nameMatches(clusterNameParts.getStack(), projectCluster.stack) + && nameMatches(clusterNameParts.getDetail(), projectCluster.detail); + }) + .collect(Collectors.toSet()); + } + + private List retrieveClusters(String application, Project project) { + return clusterProviders.get().stream() + .map( + clusterProvider -> { + Map> clusterSummariesByAccount = + clusterProvider.getClusterSummaries(application); + if (clusterSummariesByAccount == null) { + return null; + } + + Set allClusterSummaries = + clusterSummariesByAccount.values().stream() + .flatMap(Collection::stream) + .collect(Collectors.toSet()); + + Set matchingClusterSummaries = new HashSet<>(); + for (ProjectCluster projectCluster : project.config.clusters) { + matchingClusterSummaries.addAll( + findClustersForProject(allClusterSummaries, projectCluster)); + } + + Set expandedClusters = + matchingClusterSummaries.stream() + .map( + c -> + clusterProvider.getCluster( + c.getMoniker().getApp(), c.getAccountName(), c.getName())) + .collect(Collectors.toSet()); + + return new RetrievedClusters(application, expandedClusters); + }) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + static boolean nameMatches(String clusterNameValue, String projectClusterValue) { + if (projectClusterValue == null && clusterNameValue == null) { + return true; + } + if (projectClusterValue != null) { + return projectClusterValue.equals(clusterNameValue) || "*".equals(projectClusterValue); + } + return false; + } + + public static class Project { + public String name; + public ProjectConfig config; + } + + public static class ProjectConfig { + public List clusters; + public List applications; + } + + public static class ProjectCluster { + public String account; + public String stack; + public String detail; + public List applications; + } + + static class RetrievedClusters { + public String application; + public Set clusters; + + public RetrievedClusters(String application, Set clusters) { + this.application = application; + this.clusters = clusters; + } + } + + public static class ClusterModel { + public String account; + public String stack; + public String detail; + public List applications; + public ServerGroup.InstanceCounts instanceCounts; + + public ClusterModel( + String account, String stack, String detail, List applications) { + this.account = account; + this.stack = stack; + this.detail = detail; + this.applications = applications; + this.instanceCounts = getInstanceCounts(); + } + + ServerGroup.InstanceCounts getInstanceCounts() { + ServerGroup.InstanceCounts instanceCounts = new ServerGroup.InstanceCounts(); + + applications.stream() + .flatMap(a -> a.clusters.stream()) + .map(c -> c.instanceCounts) + .forEach(i -> incrementInstanceCounts(i, instanceCounts)); + + return instanceCounts; + } + } + + static class ApplicationClusterModel { + public String application; + public Set clusters = new HashSet<>(); + + ApplicationClusterModel(String application, Set appClusters) { + this.application = application; + Map regionClusters = new HashMap<>(); + appClusters.stream() + .flatMap(ac -> ac.getServerGroups().stream()) + .filter( + serverGroup -> + serverGroup != null + && (serverGroup.isDisabled() == null || !serverGroup.isDisabled()) + && serverGroup.getInstanceCounts().getTotal() > 0) + .forEach( + (ServerGroup serverGroup) -> { + RegionClusterModel regionCluster = + regionClusters.computeIfAbsent( + serverGroup.getRegion(), + s -> new RegionClusterModel(serverGroup.getRegion())); + incrementInstanceCounts(serverGroup, regionCluster.instanceCounts); + + ServerGroup.ImagesSummary imagesSummary = serverGroup.getImagesSummary(); + List imageSummaries = + imagesSummary == null ? new ArrayList() : imagesSummary.getSummaries(); + JenkinsBuildInfo buildInfo = extractJenkinsBuildInfo(imageSummaries); + Optional existingBuild = + regionCluster.builds.stream() + .filter( + b -> + b.buildNumber.equals(buildInfo.number) + && Optional.ofNullable(b.host) + .equals(Optional.ofNullable(buildInfo.host)) + && Optional.ofNullable(b.job) + .equals(Optional.ofNullable(buildInfo.name))) + .findFirst(); + + new OptionalConsumer<>( + (DeployedBuild b) -> { + b.deployed = Math.max(b.deployed, serverGroup.getCreatedTime()); + List images = getServerGroupBuildInfoImages(imageSummaries); + if (images != null) { + images.forEach( + image -> { + if (image != null && !b.images.contains(image)) { + b.images.add(image); + } + }); + } + }, + () -> + regionCluster.builds.add( + new DeployedBuild( + buildInfo.host, + buildInfo.name, + buildInfo.number, + serverGroup.getCreatedTime(), + getServerGroupBuildInfoImages(imageSummaries)))) + .accept(existingBuild); + }); + clusters.addAll(regionClusters.values()); + } + + @JsonProperty + Long getLastPush() { + long lastPush = 0; + for (RegionClusterModel cluster : clusters) { + if (cluster.getLastPush() != null && cluster.getLastPush() > lastPush) { + lastPush = cluster.getLastPush(); + } + } + return lastPush; + } + } + + static class RegionClusterModel { + public String region; + public List builds = new ArrayList<>(); + public ServerGroup.InstanceCounts instanceCounts = new ServerGroup.InstanceCounts(); + + public RegionClusterModel(String region) { + this.region = region; + } + + @JsonProperty + Long getLastPush() { + long max = 0; + for (DeployedBuild build : builds) { + if (build.deployed != null && build.deployed > max) { + max = build.deployed; + } + } + return max; + } + } + + static class JenkinsBuildInfo { + public String number; + public String host; + public String name; + + public JenkinsBuildInfo() { + this("0", null, null); + } + + public JenkinsBuildInfo(String number, String host, String name) { + this.number = number; + this.host = host; + this.name = name; + } + } + + static class DeployedBuild { + public String host; + public String job; + public String buildNumber; + public Long deployed; + public List images; + + public DeployedBuild(String host, String job, String buildNumber, Long deployed, List images) { + this.host = host; + this.job = job; + this.buildNumber = buildNumber; + this.deployed = deployed; + this.images = (images == null) ? new ArrayList() : new ArrayList(images); + } + } + + private static void incrementInstanceCounts( + ServerGroup source, ServerGroup.InstanceCounts target) { + incrementInstanceCounts(source.getInstanceCounts(), target); + } + + private static void incrementInstanceCounts( + ServerGroup.InstanceCounts source, ServerGroup.InstanceCounts target) { + target.setTotal(target.getTotal() + source.getTotal()); + target.setUp(target.getUp() + source.getUp()); + target.setDown(target.getDown() + source.getDown()); + target.setOutOfService(target.getOutOfService() + source.getOutOfService()); + target.setStarting(target.getStarting() + source.getStarting()); + target.setUnknown(target.getUnknown() + source.getUnknown()); + } + + @Nonnull + private static JenkinsBuildInfo extractJenkinsBuildInfo( + List imageSummaries) { + if (imageSummaries.isEmpty()) { + return new JenkinsBuildInfo(); + } + ServerGroup.ImageSummary imageSummary = imageSummaries.get(0); + + Map buildInfo = imageSummary.getBuildInfo(); + if (buildInfo == null || !buildInfo.containsKey("jenkins")) { + return new JenkinsBuildInfo(); + } + if (!(buildInfo.get("jenkins") instanceof Map)) { + return new JenkinsBuildInfo(); + } + Map jenkinsBuildInfo = (Map) buildInfo.get("jenkins"); + + String buildNumber = (String) jenkinsBuildInfo.getOrDefault("number", "0"); + String host = (String) jenkinsBuildInfo.get("host"); + String job = (String) jenkinsBuildInfo.get("name"); + + return new JenkinsBuildInfo(buildNumber, host, job); + } + + private static List getServerGroupBuildInfoImages( + List imageSummaries) { + if (imageSummaries.isEmpty()) { + return null; + } + ServerGroup.ImageSummary imageSummary = imageSummaries.get(0); + Map buildInfo = imageSummary.getBuildInfo(); + if (buildInfo == null || !buildInfo.containsKey("images")) { + return null; + } + + return (List) buildInfo.get("images"); + } + + private static class OptionalConsumer implements Consumer> { + + public static OptionalConsumer of(Consumer consumer, Runnable runnable) { + return new OptionalConsumer<>(consumer, runnable); + } + + private final Consumer consumer; + private final Runnable runnable; + + OptionalConsumer(Consumer consumer, Runnable runnable) { + super(); + this.consumer = consumer; + this.runnable = runnable; + } + + @Override + public void accept(Optional t) { + if (t.isPresent()) { + consumer.accept(t.get()); + } else { + runnable.run(); + } + } + } + + public static class MalformedProjectDataException extends RuntimeException { + MalformedProjectDataException(String message, Throwable cause) { + super(message, cause); + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/agent/CleanupPendingOnDemandCachesAgent.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/agent/CleanupPendingOnDemandCachesAgent.java new file mode 100644 index 00000000000..b78dc6102e4 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/agent/CleanupPendingOnDemandCachesAgent.java @@ -0,0 +1,201 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.core.agent; + +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.netflix.spinnaker.cats.agent.RunnableAgent; +import com.netflix.spinnaker.cats.module.CatsModule; +import com.netflix.spinnaker.cats.provider.Provider; +import com.netflix.spinnaker.cats.redis.cache.RedisCacheOptions; +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider; +import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.ApplicationContext; +import redis.clients.jedis.Response; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.ScanResult; + +public class CleanupPendingOnDemandCachesAgent implements RunnableAgent, CustomScheduledAgent { + private static final Logger log = + LoggerFactory.getLogger(CleanupPendingOnDemandCachesAgent.class); + + private static final long DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(30); + private static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(5); + + private final RedisCacheOptions redisCacheOptions; + private final RedisClientDelegate redisClientDelegate; + private final ApplicationContext applicationContext; + private final long pollIntervalMillis; + private final long timeoutMillis; + + public CleanupPendingOnDemandCachesAgent( + RedisCacheOptions redisCacheOptions, + RedisClientDelegate redisClientDelegate, + ApplicationContext applicationContext) { + this( + redisCacheOptions, + redisClientDelegate, + applicationContext, + DEFAULT_POLL_INTERVAL_MILLIS, + DEFAULT_TIMEOUT_MILLIS); + } + + private CleanupPendingOnDemandCachesAgent( + RedisCacheOptions redisCacheOptions, + RedisClientDelegate redisClientDelegate, + ApplicationContext applicationContext, + long pollIntervalMillis, + long timeoutMillis) { + this.redisCacheOptions = redisCacheOptions; + this.redisClientDelegate = redisClientDelegate; + this.applicationContext = applicationContext; + this.pollIntervalMillis = pollIntervalMillis; + this.timeoutMillis = timeoutMillis; + } + + @Override + public String getAgentType() { + return CleanupPendingOnDemandCachesAgent.class.getSimpleName(); + } + + @Override + public String getProviderName() { + return CoreProvider.PROVIDER_NAME; + } + + @Override + public void run() { + run(getCatsModule().getProviderRegistry().getProviders()); + } + + void run(Collection providers) { + providers.forEach( + provider -> { + String onDemandSetName = provider.getProviderName() + ":onDemand:members"; + List onDemandKeys = + scanMembers(onDemandSetName).stream() + .filter(s -> !s.equals("_ALL_")) + .collect(Collectors.toList()); + + Map> existingOnDemandKeys = new HashMap<>(); + if (redisClientDelegate.supportsMultiKeyPipelines()) { + redisClientDelegate.withMultiKeyPipeline( + pipeline -> { + for (List partition : + Iterables.partition(onDemandKeys, redisCacheOptions.getMaxDelSize())) { + for (String id : partition) { + existingOnDemandKeys.put( + id, + pipeline.exists( + provider.getProviderName() + ":onDemand:attributes:" + id)); + } + } + pipeline.sync(); + }); + } else { + redisClientDelegate.withCommandsClient( + client -> { + onDemandKeys.stream() + .filter( + k -> + client.exists( + provider.getProviderName() + "onDemand:attributes:" + k)) + .forEach(k -> existingOnDemandKeys.put(k, new StaticResponse(Boolean.TRUE))); + }); + } + + List onDemandKeysToRemove = new ArrayList<>(); + for (String onDemandKey : onDemandKeys) { + if (!existingOnDemandKeys.containsKey(onDemandKey) + || !existingOnDemandKeys.get(onDemandKey).get()) { + onDemandKeysToRemove.add(onDemandKey); + } + } + + if (!onDemandKeysToRemove.isEmpty()) { + log.info("Removing {} from {}", onDemandKeysToRemove.size(), onDemandSetName); + log.debug("Removing {} from {}", onDemandKeysToRemove, onDemandSetName); + + redisClientDelegate.withMultiKeyPipeline( + pipeline -> { + for (List idPartition : + Lists.partition(onDemandKeysToRemove, redisCacheOptions.getMaxDelSize())) { + String[] ids = idPartition.toArray(new String[idPartition.size()]); + pipeline.srem(onDemandSetName, ids); + } + + pipeline.sync(); + }); + } + }); + } + + public long getPollIntervalMillis() { + return pollIntervalMillis; + } + + public long getTimeoutMillis() { + return timeoutMillis; + } + + private Set scanMembers(String setKey) { + return redisClientDelegate.withCommandsClient( + client -> { + final Set matches = new HashSet<>(); + final ScanParams scanParams = new ScanParams().count(redisCacheOptions.getScanSize()); + String cursor = "0"; + while (true) { + final ScanResult scanResult = client.sscan(setKey, cursor, scanParams); + matches.addAll(scanResult.getResult()); + cursor = scanResult.getCursor(); + if ("0".equals(cursor)) { + return matches; + } + } + }); + } + + private CatsModule getCatsModule() { + return applicationContext.getBean(CatsModule.class); + } + + private static class StaticResponse extends Response { + private final Boolean value; + + StaticResponse(Boolean value) { + super(null); + this.value = value; + } + + @Override + public Boolean get() { + return value; + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/agent/ProjectClustersCachingAgent.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/agent/ProjectClustersCachingAgent.java new file mode 100644 index 00000000000..2c777efc8de --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/agent/ProjectClustersCachingAgent.java @@ -0,0 +1,134 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.core.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.PROJECT_CLUSTERS; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; +import com.netflix.spinnaker.clouddriver.config.ProjectClustersCachingAgentProperties; +import com.netflix.spinnaker.clouddriver.core.ProjectClustersService; +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public class ProjectClustersCachingAgent implements CachingAgent, CustomScheduledAgent { + + private static final long DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(1); + private static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); + + private final Collection types = + Collections.singletonList(AUTHORITATIVE.forType(PROJECT_CLUSTERS.ns)); + + private final ProjectClustersService projectClustersService; + private final ProjectClustersCachingAgentProperties properties; + + public ProjectClustersCachingAgent( + ProjectClustersService projectClustersService, + ProjectClustersCachingAgentProperties properties) { + this.projectClustersService = projectClustersService; + this.properties = properties; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + return new DefaultCacheResult( + Collections.singletonMap( + PROJECT_CLUSTERS.ns, + Collections.singletonList( + new MutableCacheData( + "v1", + new HashMap<>( + projectClustersService.getProjectClusters( + properties.getNormalizedAllowList())), + Collections.emptyMap())))); + } + + static class MutableCacheData implements CacheData { + + private final String id; + private final int ttlSeconds = -1; + private final Map attributes = new HashMap<>(); + private final Map> relationships = new HashMap<>(); + + public MutableCacheData(String id) { + this.id = id; + } + + @JsonCreator + public MutableCacheData( + String id, Map attributes, Map> relationships) { + this.id = id; + this.attributes.putAll(attributes); + this.relationships.putAll(relationships); + } + + @Override + public String getId() { + return id; + } + + @Override + public int getTtlSeconds() { + return ttlSeconds; + } + + @Override + public Map getAttributes() { + return attributes; + } + + @Override + public Map> getRelationships() { + return relationships; + } + } + + @Override + public long getPollIntervalMillis() { + return DEFAULT_POLL_INTERVAL_MILLIS; + } + + @Override + public long getTimeoutMillis() { + return DEFAULT_TIMEOUT_MILLIS; + } + + @Override + public String getAgentType() { + return ProjectClustersCachingAgent.class.getSimpleName(); + } + + @Override + public String getProviderName() { + return CoreProvider.PROVIDER_NAME; + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ImplementationLimits.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ImplementationLimits.java similarity index 80% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ImplementationLimits.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ImplementationLimits.java index a4cde82e597..ea4942aabb7 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ImplementationLimits.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ImplementationLimits.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.core.limits; import com.google.common.collect.ImmutableMap; - import java.util.Collections; import java.util.Map; import java.util.Optional; @@ -28,14 +27,14 @@ public class ImplementationLimits { public ImplementationLimits(ServiceLimits defaults, Map accountOverrides) { this.defaults = defaults == null ? new ServiceLimits(null) : defaults; - this.accountOverrides = accountOverrides == null ? Collections.emptyMap() : ImmutableMap.copyOf(accountOverrides); + this.accountOverrides = + accountOverrides == null ? Collections.emptyMap() : ImmutableMap.copyOf(accountOverrides); } public Double getLimit(String limit, String account) { - return Optional - .ofNullable(account) - .map(accountOverrides::get) - .map(sl -> sl.getLimit(limit)) - .orElse(defaults.getLimit(limit)); + return Optional.ofNullable(account) + .map(accountOverrides::get) + .map(sl -> sl.getLimit(limit)) + .orElse(defaults.getLimit(limit)); } } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfiguration.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfiguration.java new file mode 100644 index 00000000000..38ebe43d8fa --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfiguration.java @@ -0,0 +1,87 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.core.limits; + +import com.google.common.collect.ImmutableMap; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; + +public class ServiceLimitConfiguration { + public static final String POLL_INTERVAL_MILLIS = "agentPollIntervalMs"; + public static final String POLL_TIMEOUT_MILLIS = "agentPollTimeoutMs"; + public static final String API_RATE_LIMIT = "rateLimit"; + + private final ServiceLimits defaults; + private final Map cloudProviderOverrides; + private final Map accountOverrides; + private final Map implementationLimits; + + public ServiceLimitConfiguration( + ServiceLimits defaults, + Map cloudProviderOverrides, + Map accountOverrides, + Map implementationLimits) { + this.defaults = defaults == null ? new ServiceLimits(null) : defaults; + this.cloudProviderOverrides = + cloudProviderOverrides == null + ? Collections.emptyMap() + : ImmutableMap.copyOf(cloudProviderOverrides); + this.accountOverrides = + accountOverrides == null ? Collections.emptyMap() : ImmutableMap.copyOf(accountOverrides); + this.implementationLimits = + implementationLimits == null + ? Collections.emptyMap() + : ImmutableMap.copyOf(implementationLimits); + } + + public Double getLimit( + String limit, + String implementation, + String account, + String cloudProvider, + Double defaultValue) { + return Optional.ofNullable(getImplementationLimit(limit, implementation, account)) + .orElse( + Optional.ofNullable(getAccountLimit(limit, account)) + .orElse( + Optional.ofNullable(getCloudProviderLimit(limit, cloudProvider)) + .orElse( + Optional.ofNullable(defaults.getLimit(limit)).orElse(defaultValue)))); + } + + private Double getAccountLimit(String limit, String account) { + return Optional.ofNullable(account) + .map(accountOverrides::get) + .map(sl -> sl.getLimit(limit)) + .orElse(null); + } + + private Double getCloudProviderLimit(String limit, String cloudProvider) { + return Optional.ofNullable(cloudProvider) + .map(cloudProviderOverrides::get) + .map(sl -> sl.getLimit(limit)) + .orElse(null); + } + + private Double getImplementationLimit(String limit, String implementation, String account) { + return Optional.ofNullable(implementation) + .map(implementationLimits::get) + .map(il -> il.getLimit(limit, account)) + .orElse(null); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfigurationBuilder.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfigurationBuilder.java new file mode 100644 index 00000000000..9b5f2316612 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimitConfigurationBuilder.java @@ -0,0 +1,185 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.core.limits; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** Mutable structure for construction of ServiceLimitConfiguration. */ +public class ServiceLimitConfigurationBuilder { + + private MutableLimits defaults = new MutableLimits(); + private Map cloudProviderOverrides = new HashMap<>(); + private Map accountOverrides = new HashMap<>(); + private Map implementationLimits = new HashMap<>(); + + public MutableLimits getDefaults() { + return defaults; + } + + public void setDefaults(MutableLimits defaults) { + this.defaults = defaults; + } + + public ServiceLimitConfigurationBuilder withDefault(String limit, Double value) { + if (defaults == null) { + defaults = new MutableLimits(); + } + defaults.setLimit(limit, value); + return this; + } + + public Map getCloudProviderOverrides() { + return cloudProviderOverrides; + } + + public void setCloudProviderOverrides(Map cloudProviderOverrides) { + this.cloudProviderOverrides = cloudProviderOverrides; + } + + public ServiceLimitConfigurationBuilder withCloudProviderOverride( + String cloudProvider, String limit, Double value) { + if (cloudProviderOverrides == null) { + cloudProviderOverrides = new HashMap<>(); + } + cloudProviderOverrides + .computeIfAbsent(cloudProvider, k -> new MutableLimits()) + .setLimit(limit, value); + return this; + } + + public Map getAccountOverrides() { + return accountOverrides; + } + + public void setAccountOverrides(Map accountOverrides) { + this.accountOverrides = accountOverrides; + } + + public ServiceLimitConfigurationBuilder withAccountOverride( + String account, String limit, Double value) { + if (accountOverrides == null) { + accountOverrides = new HashMap<>(); + } + + accountOverrides.computeIfAbsent(account, k -> new MutableLimits()).setLimit(limit, value); + return this; + } + + public Map getImplementationLimits() { + return implementationLimits; + } + + public void setImplementationLimits( + Map implementationLimits) { + this.implementationLimits = implementationLimits; + } + + public ServiceLimitConfigurationBuilder withImplementationDefault( + String implementation, String limit, Double value) { + if (implementationLimits == null) { + implementationLimits = new HashMap<>(); + } + implementationLimits + .computeIfAbsent(implementation, k -> new MutableImplementationLimits()) + .defaults + .setLimit(limit, value); + return this; + } + + public ServiceLimitConfigurationBuilder withImplementationAccountOverride( + String implementation, String account, String limit, Double value) { + if (implementationLimits == null) { + implementationLimits = new HashMap<>(); + } + + implementationLimits + .computeIfAbsent(implementation, k -> new MutableImplementationLimits()) + .accountOverrides + .computeIfAbsent(account, k -> new MutableLimits()) + .setLimit(limit, value); + + return this; + } + + public ServiceLimitConfiguration build() { + return new ServiceLimitConfiguration( + new ServiceLimits(defaults), + toServiceLimits(cloudProviderOverrides), + toServiceLimits(accountOverrides), + toImplementationLimits(implementationLimits)); + } + + public static class MutableLimits extends HashMap { + public void setLimit(String limit, Double value) { + put(limit, value); + } + + public Double getLimit(String limit) { + return get(limit); + } + } + + public static class MutableImplementationLimits { + MutableLimits defaults = new MutableLimits(); + Map accountOverrides = new HashMap<>(); + + public ImplementationLimits toImplementationLimits() { + return new ImplementationLimits( + new ServiceLimits(defaults), toServiceLimits(accountOverrides)); + } + + public MutableLimits getDefaults() { + return defaults; + } + + public void setDefaults(MutableLimits defaults) { + this.defaults = defaults; + } + + public Map getAccountOverrides() { + return accountOverrides; + } + + public void setAccountOverrides(Map accountOverrides) { + this.accountOverrides = accountOverrides; + } + } + + private static Map toImmutable( + Map src, Function, D> converter) { + return java.util.Optional.ofNullable(src) + .map(Map::entrySet) + .map(Set::stream) + .map(s -> s.collect(Collectors.toMap(Map.Entry::getKey, converter))) + .orElse(Collections.emptyMap()); + } + + private static Map toServiceLimits(Map limits) { + return toImmutable(limits, mapEntry -> new ServiceLimits(mapEntry.getValue())); + } + + private static Map toImplementationLimits( + Map implementationLimits) { + return toImmutable( + implementationLimits, mapEntry -> mapEntry.getValue().toImplementationLimits()); + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimits.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimits.java similarity index 99% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimits.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimits.java index f6e56accda9..dd47212c5f8 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimits.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/limits/ServiceLimits.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.core.limits; import com.google.common.collect.ImmutableMap; - import java.util.Collections; import java.util.Map; diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/CoreProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/CoreProvider.java new file mode 100644 index 00000000000..69a28d8e70f --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/CoreProvider.java @@ -0,0 +1,27 @@ +package com.netflix.spinnaker.clouddriver.core.provider; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; +import com.netflix.spinnaker.cats.provider.Provider; +import java.util.Collection; + +public class CoreProvider extends AgentSchedulerAware implements Provider { + + public static final String PROVIDER_NAME = CoreProvider.class.getName(); + + private final Collection agents; + + public CoreProvider(Collection agents) { + this.agents = agents; + } + + @Override + public String getProviderName() { + return PROVIDER_NAME; + } + + @Override + public Collection getAgents() { + return agents; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/agent/ExternalHealthProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/agent/ExternalHealthProvider.java new file mode 100644 index 00000000000..cba5beaeacb --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/agent/ExternalHealthProvider.java @@ -0,0 +1,5 @@ +package com.netflix.spinnaker.clouddriver.core.provider.agent; + +import com.netflix.spinnaker.cats.provider.Provider; + +public interface ExternalHealthProvider extends Provider {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/agent/HealthProvidingCachingAgent.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/agent/HealthProvidingCachingAgent.java new file mode 100644 index 00000000000..d5214345829 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/core/provider/agent/HealthProvidingCachingAgent.java @@ -0,0 +1,23 @@ +package com.netflix.spinnaker.clouddriver.core.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import java.util.*; + +public interface HealthProvidingCachingAgent extends CachingAgent { + TypeReference> ATTRIBUTES = new TypeReference>() {}; + + Collection types = + Collections.unmodifiableCollection( + new ArrayList<>( + Arrays.asList( + AUTHORITATIVE.forType(HEALTH.getNs()), INFORMATIVE.forType(INSTANCES.getNs())))); + + String getHealthId(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DefaultTask.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DefaultTask.java new file mode 100644 index 00000000000..a00f86ee466 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DefaultTask.java @@ -0,0 +1,149 @@ +package com.netflix.spinnaker.clouddriver.data.task; + +import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname; +import java.util.*; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import org.codehaus.groovy.runtime.DefaultGroovyMethods; + +public class DefaultTask implements Task { + private static final Logger log = Logger.getLogger(DefaultTask.class.getName()); + + private final String id; + private String ownerId = ClouddriverHostname.ID; + private final String requestId = null; + private final Deque statusHistory = new ConcurrentLinkedDeque(); + private final Deque resultObjects = new ConcurrentLinkedDeque(); + private final Deque sagaIdentifiers = new ConcurrentLinkedDeque(); + private final Deque taskOutputs = new ConcurrentLinkedDeque(); + private final long startTimeMs = System.currentTimeMillis(); + + public String getOwnerId() { + return ownerId; + } + + public DefaultTask(final String id) { + this(id, "INIT", "Creating task " + id); + } + + public DefaultTask(String id, String phase, String status) { + DefaultTaskStatus initialStatus = new DefaultTaskStatus(phase, status, TaskState.STARTED); + statusHistory.addLast(initialStatus); + this.id = id; + } + + public void updateStatus(String phase, String status) { + statusHistory.addLast(currentStatus().update(phase, status)); + log.info("[" + phase + "] - Task: " + id + " " + status); + } + + public void complete() { + statusHistory.addLast(currentStatus().update(TaskState.COMPLETED)); + } + + public List getHistory() { + return statusHistory.stream().map(TaskDisplayStatus::new).collect(Collectors.toList()); + } + + public void fail() { + statusHistory.addLast(currentStatus().update(TaskState.FAILED)); + } + + @Override + public void fail(boolean retryable) { + statusHistory.addLast( + currentStatus().update(retryable ? TaskState.FAILED_RETRYABLE : TaskState.FAILED)); + } + + public Status getStatus() { + return currentStatus(); + } + + public String toString() { + return getStatus().toString(); + } + + public void addResultObjects(List results) { + if (results != null && !results.isEmpty()) { + currentStatus().ensureUpdateable(); + resultObjects.addAll(results); + } + } + + @Override + public List getResultObjects() { + return new ArrayList<>(resultObjects); + } + + private DefaultTaskStatus currentStatus() { + return (DefaultTaskStatus) statusHistory.getLast(); + } + + @Override + public void addSagaId(@Nonnull SagaId sagaId) { + sagaIdentifiers.addLast(sagaId); + } + + @Override + public Set getSagaIds() { + return DefaultGroovyMethods.toSet(sagaIdentifiers); + } + + @Override + public boolean hasSagaIds() { + return !sagaIdentifiers.isEmpty(); + } + + @Override + public void retry() { + statusHistory.addLast(currentStatus().update(TaskState.STARTED)); + } + + @Override + public void updateOutput(String manifest, String phase, String stdOut, String stdError) { + log.info("[" + phase + "] - Capturing output for Task: " + id + ", manifest: " + manifest); + TaskDisplayOutput output = new TaskDisplayOutput(manifest, phase, stdOut, stdError); + taskOutputs.addLast(output); + } + + @Override + public List getOutputs() { + return new ArrayList<>(taskOutputs); + } + + @Override + public void updateOwnerId(String ownerId, String phase) { + if (ownerId == null) { + log.info("new owner id not provided. No update necessary."); + return; + } + + String previousCloudDriverHostname = this.getOwnerId().split("@")[1]; + String currentCloudDriverHostname = ownerId.split("@")[1]; + + if (previousCloudDriverHostname.equals(currentCloudDriverHostname)) { + log.info("new owner id is the same as the previous owner Id. No update necessary."); + return; + } + + String previousOwnerId = this.ownerId; + updateStatus(phase, "Re-assigning task from: " + previousOwnerId + " to: " + ownerId); + this.ownerId = ownerId; + log.info( + "Updated ownerId for task id: " + id + " from: " + previousOwnerId + " to: " + ownerId); + } + + public final String getId() { + return id; + } + + public final String getRequestId() { + return requestId; + } + + public final long getStartTimeMs() { + return startTimeMs; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DefaultTaskStatus.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DefaultTaskStatus.java new file mode 100644 index 00000000000..95f6b6b87d3 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DefaultTaskStatus.java @@ -0,0 +1,97 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.data.task; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.kork.annotations.VisibleForTesting; + +public class DefaultTaskStatus implements Status { + + public static DefaultTaskStatus create(String phase, String status, TaskState state) { + return new DefaultTaskStatus(phase, status, state); + } + + private final String phase; + private final String status; + @JsonIgnore private final TaskState state; + + public DefaultTaskStatus(String phase, String status, TaskState state) { + this.phase = phase; + this.status = status; + this.state = state; + } + + /** + * This constructor is just for backwards-compatibility of tests from when the class was in Groovy + * and could use hash-map constructors. This constructor must not be used in application code. + */ + @VisibleForTesting + public DefaultTaskStatus(TaskState state) { + this.phase = null; + this.status = null; + this.state = state; + } + + @JsonProperty + public Boolean isComplete() { + return state.isCompleted(); + } + + @JsonProperty + public Boolean isCompleted() { + return state.isCompleted(); + } + + @JsonProperty + public Boolean isFailed() { + return state.isFailed(); + } + + @JsonProperty + public Boolean isRetryable() { + return state.isRetryable(); + } + + public DefaultTaskStatus update(String phase, String status) { + ensureUpdateable(); + return new DefaultTaskStatus(phase, status, state); + } + + public DefaultTaskStatus update(TaskState state) { + ensureUpdateable(); + return new DefaultTaskStatus(phase, status, state); + } + + public void ensureUpdateable() { + if (isCompleted()) { + throw new IllegalStateException("Task is already completed! No further updates allowed!"); + } + } + + public final String getPhase() { + return phase; + } + + public final String getStatus() { + return status; + } + + public final TaskState getState() { + return state; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DualTaskRepository.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DualTaskRepository.java new file mode 100644 index 00000000000..b80fe43eae7 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/DualTaskRepository.java @@ -0,0 +1,129 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.data.task; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import java.util.*; +import java.util.concurrent.*; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DualTaskRepository implements TaskRepository { + + private static final Logger log = LoggerFactory.getLogger(DualTaskRepository.class); + + private final TaskRepository primary; + private final TaskRepository previous; + private final ExecutorService executorService; + private final long asyncTimeoutSeconds; + private final DynamicConfigService dynamicConfigService; + + public DualTaskRepository( + TaskRepository primary, + TaskRepository previous, + int threadPoolSize, + long asyncTimeoutSeconds, + DynamicConfigService dynamicConfigService) { + this( + primary, + previous, + Executors.newFixedThreadPool( + threadPoolSize, + new ThreadFactoryBuilder() + .setNameFormat(DualTaskRepository.class.getSimpleName() + "-%d") + .build()), + asyncTimeoutSeconds, + dynamicConfigService); + } + + public DualTaskRepository( + TaskRepository primary, + TaskRepository previous, + ExecutorService executorService, + long asyncTimeoutSeconds, + DynamicConfigService dynamicConfigService) { + this.primary = primary; + this.previous = previous; + this.executorService = executorService; + this.asyncTimeoutSeconds = asyncTimeoutSeconds; + this.dynamicConfigService = dynamicConfigService; + } + + @Override + public Task create(String phase, String status) { + return primary.create(phase, status); + } + + @Override + public Task create(String phase, String status, String clientRequestId) { + return primary.create(phase, status, clientRequestId); + } + + @Override + public Task get(String id) { + Task task = primary.get(id); + + if (task == null && dynamicConfigService.isEnabled("dualtaskrepo.previous", true)) { + task = previous.get(id); + } + + return task; + } + + @Override + public Task getByClientRequestId(String clientRequestId) { + Task task = primary.getByClientRequestId(clientRequestId); + + if (task == null && dynamicConfigService.isEnabled("dualtaskrepo.previous", true)) { + task = previous.getByClientRequestId(clientRequestId); + } + + return task; + } + + @Override + public List list() { + List tasks = new ArrayList<>(); + + try { + Future> primaryList = executorService.submit(primary::list); + List tasksFromPrevious = Collections.emptyList(); + + tasks.addAll(primaryList.get(asyncTimeoutSeconds, TimeUnit.SECONDS)); + if (dynamicConfigService.isEnabled("dualtaskrepo.previous", true)) { + Future> previousList = executorService.submit(previous::list); + tasksFromPrevious = previousList.get(asyncTimeoutSeconds, TimeUnit.SECONDS); + } + + Set primaryTasks = tasks.stream().map(Task::getId).collect(Collectors.toSet()); + tasksFromPrevious.stream() + .filter(task -> !primaryTasks.contains(task.getId())) + .forEach(tasks::add); + } catch (TimeoutException | InterruptedException | ExecutionException e) { + log.error("Could not retrieve list of tasks by timeout", e); + // Return tasks so we can still get data in partial failures + } + + return tasks; + } + + @Override + public List listByThisInstance() { + return primary.listByThisInstance(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/SagaId.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/SagaId.java new file mode 100644 index 00000000000..344c7cb6edc --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/SagaId.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.data.task; + +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; + +@Value +@Builder(builderClassName = "SagaIdBuilder") +@JsonDeserialize(builder = SagaId.SagaIdBuilder.class) +public class SagaId { + @Nonnull String name; + @Nonnull String id; + + @JsonPOJOBuilder(withPrefix = "") + public static class SagaIdBuilder {} +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/Status.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/Status.java new file mode 100644 index 00000000000..bc3fd5d282b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/Status.java @@ -0,0 +1,31 @@ +package com.netflix.spinnaker.clouddriver.data.task; + +/** + * This interface is used to represent the status of a Task for a point in time. Often should be + * backed by a POGO, but may be used for more complex requirements, like querying a database or + * centralized task system in a multi-threaded/ multi-service environment. + * + *

A pseudo-composite key of a Status is its phase and status strings. + */ +public interface Status { + /** + * Returns the current phase of the execution. This is useful for representing different parts of + * a Task execution, and a "status" String will be tied + */ + String getPhase(); + + /** Returns the current status of the Task in its given phase. */ + String getStatus(); + + /** Informs completion of the task. */ + Boolean isCompleted(); + + /** + * Informs whether the task has failed or not. A "failed" state is always indicative of a + * "completed" state. + */ + Boolean isFailed(); + + /** Informs whether a failed task is retryable or not. */ + Boolean isRetryable(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/Task.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/Task.java new file mode 100644 index 00000000000..a71d48e3ee0 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/Task.java @@ -0,0 +1,113 @@ +package com.netflix.spinnaker.clouddriver.data.task; + +import java.util.List; +import java.util.Set; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * This interface represents the state of a given execution. Implementations must allow for updating + * and completing/failing status, as well as providing the start time of the task. + */ +public interface Task { + /** A unique identifier for the task, which can be used to retrieve it at a later time. */ + String getId(); + + /** A client-provided ID used for de-duplication. */ + String getRequestId(); + + /** A list of result objects that are serialized back to the caller */ + List getResultObjects(); + + /** + * This method is used to add results objects to the Task + * + * @param results + */ + void addResultObjects(List results); + + /** A comprehensive history of this task's execution. */ + List getHistory(); + + /** The id of the clouddriver instance that submitted this task */ + String getOwnerId(); + + /** + * This method is used to update the status of the Task with given phase and status strings. + * + * @param phase + * @param status + */ + void updateStatus(String phase, String status); + + /** + * This method will complete the task and will represent completed = true from the Task's {@link + * #getStatus()} method. + */ + void complete(); + + /** + * This method will fail the task and will represent completed = true and failed = true from the + * Task's {@link #getStatus()} method. + * + * @deprecated Use `fail(boolean)` instead + */ + @Deprecated + void fail(); + + /** + * This method will fail the task and will represent completed = true and failed = true from the + * Task's {@link #getStatus()} method. + * + * @param retryable If true, the failed state will be marked as retryable (only for sagas and + * kubernetes tasks) + */ + void fail(boolean retryable); + + /** + * This method will return the current status of the task. + * + * @see Status + */ + Status getStatus(); + + /** This returns the start time of the Task's execution in milliseconds since epoch form. */ + long getStartTimeMs(); + + /** + * Add a Saga to this Task. More than one Saga can be associated with a Task. + * + * @param sagaId The Saga name/id pair + */ + void addSagaId(@Nonnull SagaId sagaId); + + /** Returns true if any Sagas have been associated with this Task. */ + boolean hasSagaIds(); + + /** A set of Sagas associated with this Task, if any. */ + @Nonnull + Set getSagaIds(); + + /** Returns true if the Task is retryable (in the case of a failure) */ + default boolean isRetryable() { + return getStatus().isFailed() && getStatus().isRetryable(); + } + + /** Updates the status of a failed Task to running in response to a retry operation. */ + void retry(); + + /** + * This method is used to capture any output produced by the task. + * + * @param stdOut - captures std output + * @param stdError - captures errors + */ + void updateOutput( + String manifest, String phase, @Nullable String stdOut, @Nullable String stdError); + + /** @return */ + List getOutputs(); + + // updates the owner id in case the task was picked up by another clouddriver pod + void updateOwnerId(String ownerId, String phase); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskDisplayOutput.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskDisplayOutput.java new file mode 100644 index 00000000000..c7b068c2865 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskDisplayOutput.java @@ -0,0 +1,31 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.data.task; + +import javax.annotation.Nullable; +import lombok.AllArgsConstructor; +import lombok.Data; + +@Data +@AllArgsConstructor +public class TaskDisplayOutput implements TaskOutput { + @Nullable private String manifest; + @Nullable private String phase; + @Nullable private String stdOut; + @Nullable private String stdError; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskDisplayStatus.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskDisplayStatus.java new file mode 100644 index 00000000000..bcfe2191e31 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskDisplayStatus.java @@ -0,0 +1,62 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.data.task; + +import com.fasterxml.jackson.annotation.JsonIgnore; + +public class TaskDisplayStatus implements Status { + + public static TaskDisplayStatus create(Status taskStatus) { + return new TaskDisplayStatus(taskStatus); + } + + @JsonIgnore private final Status taskStatus; + + public TaskDisplayStatus(Status taskStatus) { + this.taskStatus = taskStatus; + } + + @Override + public String getStatus() { + return taskStatus.getStatus(); + } + + @Override + public String getPhase() { + return taskStatus.getPhase(); + } + + @JsonIgnore + public Boolean isCompleted() { + return taskStatus.isCompleted(); + } + + @JsonIgnore + public Boolean isFailed() { + return taskStatus.isFailed(); + } + + @JsonIgnore + @Override + public Boolean isRetryable() { + return taskStatus.isRetryable(); + } + + public final Status getTaskStatus() { + return taskStatus; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskOutput.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskOutput.java new file mode 100644 index 00000000000..be48af51ac1 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskOutput.java @@ -0,0 +1,36 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.data.task; + +public interface TaskOutput { + + /** Indicates the manifest in the task for which the output is captured */ + String getManifest(); + + /** + * Returns the current phase of the execution. This is useful for representing different parts of + * a Task execution + */ + String getPhase(); + + /** Returns the Stdout logs associated with the task */ + String getStdOut(); + + /** Returns the Stderr logs associated with the task */ + String getStdError(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskRepository.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskRepository.java new file mode 100644 index 00000000000..4c9751c007c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskRepository.java @@ -0,0 +1,65 @@ +package com.netflix.spinnaker.clouddriver.data.task; + +import java.util.List; + +/** + * A TaskRepository is an implementation that allows Task objects to be created, retrieved, and + * listed on demand. + * + * @see Task + * @see InMemoryTaskRepository + */ +public interface TaskRepository { + + /** + * A thread local holder for a Task in-action. Useful for the {@link InMemoryTaskRepository} + * implementation. + */ + ThreadLocal threadLocalTask = new ThreadLocal<>(); + + /** + * Creates a new task, and sets the initial status to the provided phase and status. + * + * @param phase + * @param status + * @return task + */ + public abstract Task create(String phase, String status); + + /** + * Creates a new task if a task has not already been created with that key and sets the initial + * status to the provided phase and status. + * + * @param phase + * @param status + * @param clientRequestId + * @return task the new task, or the previous task that was created with the supplied key + */ + Task create(String phase, String status, String clientRequestId); + + /** + * Retrieves a task by the provided id + * + * @param id + * @return task + */ + Task get(String id); + + /** + * Retrieves a task by the provided clientRequestId + * + * @param clientRequestId + * @return task, or null if no task has been started with the requestId + */ + Task getByClientRequestId(String clientRequestId); + + /** + * Lists all tasks currently in the repository + * + * @return list of tasks + */ + List list(); + + /** Lists all tasks owned by this instance */ + List listByThisInstance(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskState.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskState.java new file mode 100644 index 00000000000..75b4dcaf349 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/TaskState.java @@ -0,0 +1,20 @@ +package com.netflix.spinnaker.clouddriver.data.task; + +public enum TaskState { + STARTED, + COMPLETED, + FAILED, + FAILED_RETRYABLE; + + public boolean isCompleted() { + return !this.equals(STARTED); + } + + public boolean isFailed() { + return this.equals(FAILED) || this.equals(FAILED_RETRYABLE); + } + + public boolean isRetryable() { + return this.equals(FAILED_RETRYABLE); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTask.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTask.java new file mode 100644 index 00000000000..23f4d3ddec6 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTask.java @@ -0,0 +1,215 @@ +package com.netflix.spinnaker.clouddriver.data.task.jedis; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import com.google.common.collect.Iterables; +import com.netflix.spinnaker.clouddriver.data.task.SagaId; +import com.netflix.spinnaker.clouddriver.data.task.Status; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskDisplayOutput; +import com.netflix.spinnaker.clouddriver.data.task.TaskOutput; +import com.netflix.spinnaker.clouddriver.data.task.TaskState; +import java.util.List; +import java.util.Set; +import javax.annotation.Nonnull; +import org.codehaus.groovy.runtime.DefaultGroovyMethods; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The fields of the task are computed on-demand by querying the repository. This means that the + * serialized task may not be internally consistent; each field will reflect the state of the task + * in the repository at the time that field's accessor was called during serialization. This is in + * general a difficult problem to solve with redis, which does not support atomic reads of multiple + * keys, but has been solved in the SQL repository by fetching all data in a single query. As a + * workaround, we'll instruct Jackson to serialize the status first. The reason is that consumers + * tend to use the status field to check if a task is complete, and expect the other fields to be + * filled out if it is. If there is an inconsistency between the status and other fields, we'd + * rather return a stale value in the status field than in other fields. In general, returning an + * older status (ie, still running) and newer other fields will just cause clients to poll again + * until they see the updated status. Returning a newer status (ie, completed or failed) but stale + * values in other fields will in general cause clients to use these stale values, leading to bugs. + * + *

We'll force the history to be computed next (as clients could feasibly use this to determine + * whether a task is complete), then will not enforce an order on any other properties. + */ +@JsonPropertyOrder({"status", "history"}) +public class JedisTask implements Task { + + private static final Logger log = LoggerFactory.getLogger(JedisTask.class); + + @JsonIgnore private RedisTaskRepository repository; + private final String id; + private final long startTimeMs; + private String ownerId; + private final String requestId; + private final Set sagaIds; + @JsonIgnore private final boolean previousRedis; + + public JedisTask( + String id, + long startTimeMs, + RedisTaskRepository repository, + String ownerId, + String requestId, + Set sagaIds, + boolean previousRedis) { + this.id = id; + this.startTimeMs = startTimeMs; + this.repository = repository; + this.ownerId = ownerId; + this.requestId = requestId; + this.sagaIds = sagaIds; + this.previousRedis = previousRedis; + } + + @Override + public void updateStatus(String phase, String status) { + checkMutable(); + repository.addToHistory(repository.currentState(this).update(phase, status), this); + log.info("[" + phase + "] Task: " + id + " Status: " + status); + } + + @Override + public void complete() { + checkMutable(); + repository.addToHistory(repository.currentState(this).update(TaskState.COMPLETED), this); + } + + @Deprecated + @Override + public void fail() { + checkMutable(); + repository.addToHistory(repository.currentState(this).update(TaskState.FAILED), this); + } + + @Override + public void fail(boolean retryable) { + checkMutable(); + repository.addToHistory( + repository + .currentState(this) + .update(retryable ? TaskState.FAILED_RETRYABLE : TaskState.FAILED), + this); + } + + @Override + public void addResultObjects(List results) { + checkMutable(); + if (DefaultGroovyMethods.asBoolean(results)) { + repository.currentState(this).ensureUpdateable(); + repository.addResultObjects(results, this); + } + } + + public List getResultObjects() { + return repository.getResultObjects(this); + } + + public List getHistory() { + List status = repository.getHistory(this); + if (status != null && !status.isEmpty() && Iterables.getLast(status).isCompleted()) { + return status.subList(0, status.size() - 1); + } else { + return status; + } + } + + @Override + public String getOwnerId() { + return ownerId; + } + + @Override + public Status getStatus() { + return repository.currentState(this); + } + + @Override + public void addSagaId(@Nonnull SagaId sagaId) { + this.sagaIds.add(sagaId); + } + + @Override + public boolean hasSagaIds() { + return !sagaIds.isEmpty(); + } + + @Override + public void retry() { + checkMutable(); + repository.addToHistory(repository.currentState(this).update(TaskState.STARTED), this); + } + + @Override + public void updateOutput(String manifestName, String phase, String stdOut, String stdError) { + log.info("[" + phase + "] Capturing output for Task " + id + ", manifest: " + manifestName); + repository.addOutput(new TaskDisplayOutput(manifestName, phase, stdOut, stdError), this); + } + + @Override + public List getOutputs() { + return repository.getOutputs(this); + } + + @Override + public void updateOwnerId(String ownerId, String phase) { + checkMutable(); + if (ownerId == null) { + log.debug("new owner id not provided. No update necessary."); + return; + } + + String previousCloudDriverHostname = this.getOwnerId().split("@")[1]; + String currentCloudDriverHostname = ownerId.split("@")[1]; + + if (previousCloudDriverHostname.equals(currentCloudDriverHostname)) { + log.debug("new owner id is the same as the previous owner Id. No update necessary."); + return; + } + + String previousOwnerId = this.ownerId; + updateStatus(phase, "Re-assigning task from: " + previousOwnerId + " to: " + ownerId); + this.ownerId = ownerId; + repository.set(this.id, this); + log.debug("Updated ownerId for task id={} from {} to {}", id, previousOwnerId, ownerId); + } + + private void checkMutable() { + if (previousRedis) { + throw new IllegalStateException("Read-only task"); + } + } + + public RedisTaskRepository getRepository() { + return repository; + } + + public void setRepository(RedisTaskRepository repository) { + this.repository = repository; + } + + public final String getId() { + return id; + } + + public final long getStartTimeMs() { + return startTimeMs; + } + + public final String getRequestId() { + return requestId; + } + + public final Set getSagaIds() { + return sagaIds; + } + + public final boolean getPreviousRedis() { + return previousRedis; + } + + public final boolean isPreviousRedis() { + return previousRedis; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepository.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepository.java new file mode 100644 index 00000000000..101224cbc86 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepository.java @@ -0,0 +1,469 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.data.task.jedis; + +import static java.lang.String.format; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus; +import com.netflix.spinnaker.clouddriver.data.task.SagaId; +import com.netflix.spinnaker.clouddriver.data.task.Status; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskDisplayOutput; +import com.netflix.spinnaker.clouddriver.data.task.TaskDisplayStatus; +import com.netflix.spinnaker.clouddriver.data.task.TaskOutput; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.data.task.TaskState; +import com.netflix.spinnaker.kork.exceptions.SystemException; +import com.netflix.spinnaker.kork.jedis.RedisClientDelegate; +import java.io.IOException; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import net.jodah.failsafe.Failsafe; +import net.jodah.failsafe.RetryPolicy; +import net.jodah.failsafe.function.CheckedConsumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import redis.clients.jedis.exceptions.JedisException; + +public class RedisTaskRepository implements TaskRepository { + private static final Logger log = LoggerFactory.getLogger(RedisTaskRepository.class); + + private static final String RUNNING_TASK_KEY = "kato:tasks"; + private static final String TASK_KEY_MAP = "kato:taskmap"; + private static final TypeReference> HISTORY_TYPE = + new TypeReference>() {}; + private static final TypeReference> SAGA_IDS_TYPE = + new TypeReference>() {}; + + private static final int TASK_TTL = (int) TimeUnit.HOURS.toSeconds(12); + + private static final RetryPolicy REDIS_RETRY_POLICY = + new RetryPolicy() + .retryOn(Collections.singletonList(JedisException.class)) + .withDelay(500, TimeUnit.MILLISECONDS) + .withMaxRetries(3); + + private final RedisClientDelegate redisClientDelegate; + private final Optional redisClientDelegatePrevious; + private final ObjectMapper mapper = new ObjectMapper(); + + public RedisTaskRepository( + RedisClientDelegate redisClientDelegate, + Optional redisClientDelegatePrevious) { + this.redisClientDelegate = redisClientDelegate; + this.redisClientDelegatePrevious = redisClientDelegatePrevious; + } + + @Override + public Task create(String phase, String status) { + return create(phase, status, UUID.randomUUID().toString()); + } + + @Override + public Task create(String phase, String status, String clientRequestId) { + String taskKey = getClientRequestKey(clientRequestId); + + String taskId = UUID.randomUUID().toString(); + + JedisTask task = + new JedisTask( + taskId, + System.currentTimeMillis(), + this, + ClouddriverHostname.ID, + clientRequestId, + new HashSet<>(), + false); + addToHistory(DefaultTaskStatus.create(phase, status, TaskState.STARTED), task); + set(taskId, task); + Long newTask = + retry( + () -> + redisClientDelegate.withCommandsClient( + client -> { + return client.setnx(taskKey, taskId); + }), + "Registering task with index"); + if (newTask != 0) { + return task; + } + + // There's an existing taskId for this key, clean up what we just created and get the existing + // task + addToHistory( + DefaultTaskStatus.create(phase, "Duplicate of " + clientRequestId, TaskState.FAILED), task); + return getByClientRequestId(clientRequestId); + } + + @Override + public Task get(String id) { + Map taskMap = + retry( + () -> + redisClientDelegate.withCommandsClient( + client -> { + return client.hgetAll("task:" + id); + }), + format("Getting task ID %s", id)); + boolean oldTask = + redisClientDelegatePrevious.isPresent() && (taskMap == null || taskMap.isEmpty()); + if (oldTask) { + try { + taskMap = + redisClientDelegatePrevious + .get() + .withCommandsClient( + client -> { + return client.hgetAll("task:" + id); + }); + } catch (Exception e) { + // Failed to hit old redis, let's not blow up on that + return null; + } + } + if (taskMap.containsKey("id") && taskMap.containsKey("startTimeMs")) { + Set sagaIds; + if (taskMap.containsKey("sagaIds")) { + try { + sagaIds = mapper.readValue(taskMap.get("sagaIds"), SAGA_IDS_TYPE); + } catch (IOException e) { + throw new SystemException("Could not deserialize sagaIds key", e); + } + } else { + sagaIds = new HashSet<>(); + } + + return new JedisTask( + taskMap.get("id"), + Long.parseLong(taskMap.get("startTimeMs")), + this, + taskMap.get("ownerId"), + taskMap.get("requestId"), + sagaIds, + oldTask); + } + return null; + } + + @Override + public Task getByClientRequestId(String clientRequestId) { + final String clientRequestKey = getClientRequestKey(clientRequestId); + String existingTask = + retry( + () -> + redisClientDelegate.withCommandsClient( + client -> { + return client.get(clientRequestKey); + }), + format("Getting task by client request ID %s", clientRequestId)); + if (existingTask == null) { + if (redisClientDelegatePrevious.isPresent()) { + try { + existingTask = + redisClientDelegatePrevious + .get() + .withCommandsClient( + client -> { + return client.get(clientRequestKey); + }); + } catch (Exception e) { + // Failed to hit old redis, let's not blow up on that + existingTask = null; + } + } + } + if (existingTask != null) { + return get(existingTask); + } + return null; + } + + @Override + public List list() { + return retry( + () -> + redisClientDelegate.withCommandsClient( + client -> { + return client.smembers(RUNNING_TASK_KEY).stream() + .map(this::get) + .collect(Collectors.toList()); + }), + "Getting all running tasks"); + } + + @Override + public List listByThisInstance() { + return list().stream() + .filter(t -> ClouddriverHostname.ID.equals(t.getOwnerId())) + .collect(Collectors.toList()); + } + + public void set(String id, JedisTask task) { + String taskId = "task:" + task.getId(); + Map data = new HashMap<>(); + data.put("id", task.getId()); + data.put("startTimeMs", Long.toString(task.getStartTimeMs())); + data.put("ownerId", task.getOwnerId()); + try { + data.put("sagaIds", mapper.writeValueAsString(task.getSagaIds())); + } catch (JsonProcessingException e) { + throw new SystemException("Failed to serialize saga ids into Task", e); + } + retry( + () -> + redisClientDelegate.withCommandsClient( + client -> { + client.hmset(taskId, data); + client.expire(taskId, TASK_TTL); + client.sadd(RUNNING_TASK_KEY, id); + }), + format("Writing task %s", id)); + } + + public void addToHistory(DefaultTaskStatus status, JedisTask task) { + String historyId = "taskHistory:" + task.getId(); + + Map data = new HashMap<>(); + data.put("phase", status.getPhase()); + data.put("status", status.getStatus()); + data.put("state", status.getState().toString()); + + String hist; + try { + hist = mapper.writeValueAsString(data); + } catch (JsonProcessingException e) { + throw new RuntimeException("Failed converting task history to json", e); + } + + retry( + () -> + redisClientDelegate.withCommandsClient( + client -> { + client.rpush(historyId, hist); + client.expire(historyId, TASK_TTL); + if (status.isCompleted()) { + client.srem(RUNNING_TASK_KEY, task.getId()); + } + }), + format("Adding status history to task %s: %s", task.getId(), status)); + } + + public List getHistory(JedisTask task) { + String historyId = "taskHistory:" + task.getId(); + + RedisClientDelegate client = clientForTask(task); + return retry( + () -> + client.withCommandsClient( + c -> { + return c.lrange(historyId, 0, -1); + }), + format("Getting history for task %s", task.getId())) + .stream() + .map( + h -> { + Map history; + try { + history = mapper.readValue(h, HISTORY_TYPE); + } catch (IOException e) { + throw new RuntimeException("Could not convert history json to type", e); + } + return TaskDisplayStatus.create( + DefaultTaskStatus.create( + history.get("phase"), + history.get("status"), + TaskState.valueOf(history.get("state")))); + }) + .collect(Collectors.toList()); + } + + public DefaultTaskStatus currentState(JedisTask task) { + String historyId = "taskHistory:" + task.getId(); + + RedisClientDelegate client = clientForTask(task); + String state = + retry( + () -> + client.withCommandsClient( + c -> { + return c.lindex(historyId, -1); + }), + format("Getting current state for task %s", task.getId())); + + Map history; + try { + history = mapper.readValue(state, HISTORY_TYPE); + } catch (IOException e) { + throw new RuntimeException("Failed converting task history json to object", e); + } + return DefaultTaskStatus.create( + history.get("phase"), history.get("status"), TaskState.valueOf(history.get("state"))); + } + + public void addResultObjects(List objects, JedisTask task) { + String resultId = "taskResult:" + task.getId(); + String[] values = + objects.stream() + .map( + o -> { + try { + return mapper.writeValueAsString(o); + } catch (JsonProcessingException e) { + throw new RuntimeException("Failed to convert object to string", e); + } + }) + .collect(Collectors.toList()) + .toArray(new String[objects.size()]); + + log.debug("Adding results to task {} (results: {})", task.getId(), values); + retry( + () -> + redisClientDelegate.withCommandsClient( + client -> { + client.rpush(resultId, values); + client.expire(resultId, TASK_TTL); + }), + format("Adding results to task %s", task.getId())); + } + + public List getResultObjects(JedisTask task) { + String resultId = "taskResult:" + task.getId(); + + return retry( + () -> + clientForTask(task) + .withCommandsClient( + client -> { + return client.lrange(resultId, 0, -1); + }), + format("Getting results for task %s", task.getId())) + .stream() + .map( + o -> { + try { + return mapper.readValue(o, Map.class); + } catch (IOException e) { + throw new RuntimeException("Failed to convert result object to map", e); + } + }) + .collect(Collectors.toList()); + } + + public void addOutput(TaskDisplayOutput output, JedisTask task) { + String outputId = "taskOutput:" + task.getId(); + + Map data = new HashMap<>(); + data.put("manifest", output.getManifest()); + data.put("phase", output.getPhase()); + data.put("stdOut", output.getStdOut()); + data.put("stdError", output.getStdError()); + + String taskOutput; + try { + taskOutput = mapper.writeValueAsString(data); + } catch (JsonProcessingException e) { + throw new RuntimeException( + "Failed to convert task output: " + output + " to string for task: " + task.getId(), e); + } + + log.debug("Adding task output: {} to task {}", taskOutput, task.getId()); + retry( + () -> + redisClientDelegate.withCommandsClient( + client -> { + client.rpush(outputId, taskOutput); + client.expire(outputId, TASK_TTL); + }), + format("Adding task output to task %s", task.getId())); + } + + public List getOutputs(JedisTask task) { + String outputId = "taskOutput:" + task.getId(); + + return retry( + () -> + clientForTask(task) + .withCommandsClient( + client -> { + return client.lrange(outputId, 0, -1); + }), + format("Getting task outputs for task %s", task.getId())) + .stream() + .map( + o -> { + Map data; + try { + data = mapper.readValue(o, HISTORY_TYPE); + } catch (IOException e) { + throw new RuntimeException( + "Failed to convert task outputs to map for task: " + task.getId(), e); + } + return new TaskDisplayOutput( + data.get("manifest"), + data.get("phase"), + data.get("stdOut"), + data.get("stdError")); + }) + .collect(Collectors.toList()); + } + + private String getClientRequestKey(String clientRequestId) { + return TASK_KEY_MAP + ":" + clientRequestId; + } + + private RedisClientDelegate clientForTask(JedisTask task) { + if (task.getPreviousRedis() && redisClientDelegatePrevious.isPresent()) { + return redisClientDelegatePrevious.get(); + } + return redisClientDelegate; + } + + private T retry(Supplier f, String onRetriesExceededMessage) { + return retry( + f, + failure -> { + throw new ExcessiveRedisFailureRetries(onRetriesExceededMessage, failure); + }); + } + + private T retry(Supplier f, CheckedConsumer retryExceededListener) { + return Failsafe.with(REDIS_RETRY_POLICY).onRetriesExceeded(retryExceededListener).get(f::get); + } + + private void retry(Runnable f, String onRetriesExceededMessage) { + retry( + f, + failure -> { + throw new ExcessiveRedisFailureRetries(onRetriesExceededMessage, failure); + }); + } + + private void retry(Runnable f, CheckedConsumer retryExceededListener) { + Failsafe.with(REDIS_RETRY_POLICY).onRetriesExceeded(retryExceededListener).run(f::run); + } + + private static class ExcessiveRedisFailureRetries extends RuntimeException { + ExcessiveRedisFailureRetries(String message, Throwable cause) { + super(message, cause); + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DefaultDeployHandlerRegistry.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DefaultDeployHandlerRegistry.java new file mode 100644 index 00000000000..1541f6a173e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DefaultDeployHandlerRegistry.java @@ -0,0 +1,28 @@ +package com.netflix.spinnaker.clouddriver.deploy; + +import static java.lang.String.format; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +public class DefaultDeployHandlerRegistry implements DeployHandlerRegistry { + + private List deployHandlers; + + public DefaultDeployHandlerRegistry(List deployHandlers) { + this.deployHandlers = deployHandlers; + } + + @Override + public DeployHandler findHandler(final DeployDescription description) { + return Optional.ofNullable(deployHandlers).orElseGet(ArrayList::new).stream() + .filter(it -> it != null && it.handles(description)) + .findFirst() + .orElseThrow( + () -> + new DeployHandlerNotFoundException( + format( + "No handler found supporting %s", description.getClass().getSimpleName()))); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DefaultDescriptionAuthorizer.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DefaultDescriptionAuthorizer.java new file mode 100644 index 00000000000..62d5fbe7109 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DefaultDescriptionAuthorizer.java @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.deploy; + +import com.netflix.spinnaker.orchestration.OperationDescription; +import org.springframework.validation.Errors; + +public class DefaultDescriptionAuthorizer implements DescriptionAuthorizer { + + private final DescriptionAuthorizerService descriptionAuthorizerService; + + public DefaultDescriptionAuthorizer(DescriptionAuthorizerService descriptionAuthorizerService) { + this.descriptionAuthorizerService = descriptionAuthorizerService; + } + + @Override + public void authorize(OperationDescription description, Errors errors) { + descriptionAuthorizerService.authorize(description, errors); + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployDescription.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployDescription.java similarity index 88% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployDescription.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployDescription.java index 14277595d4e..9ca6b63ed65 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployDescription.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployDescription.java @@ -17,7 +17,7 @@ package com.netflix.spinnaker.clouddriver.deploy; import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent; - +import com.netflix.spinnaker.orchestration.OperationDescription; import java.util.Collection; import java.util.Collections; @@ -26,7 +26,7 @@ * * @see DeployHandler */ -public interface DeployDescription { +public interface DeployDescription extends OperationDescription { default Collection getEvents() { return Collections.emptyList(); } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandler.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandler.java new file mode 100644 index 00000000000..2f790944113 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandler.java @@ -0,0 +1,47 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.deploy; + +import java.util.List; + +/** + * A DeployHandler takes a parameterized description object and performs some deployment operation + * based off of its detail. These objects may most often be derived from a {@link + * DeployHandlerRegistry} implementation. + * + * @param the type of the {@link DeployDescription} + * @see DeployDescription + */ +public interface DeployHandler { + /** + * A method that performs the deployment action described by the description object and returns + * its results as an implementation of {@link DeploymentResult} + * + * @param description + * @param priorOutputs from prior operations + * @return deployment result object + */ + DeploymentResult handle(T description, List priorOutputs); + + /** + * Used to determine if this handler is suitable for processing the supplied description object. + * + * @param description + * @return true/false + */ + boolean handles(DeployDescription description); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerNotFoundException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerNotFoundException.java new file mode 100644 index 00000000000..019d3083fd0 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerNotFoundException.java @@ -0,0 +1,29 @@ +package com.netflix.spinnaker.clouddriver.deploy; + +import com.netflix.spinnaker.kork.exceptions.IntegrationException; + +public class DeployHandlerNotFoundException extends IntegrationException { + public DeployHandlerNotFoundException(String message) { + super(message); + } + + public DeployHandlerNotFoundException(String message, Throwable cause) { + super(message, cause); + } + + public DeployHandlerNotFoundException(Throwable cause) { + super(cause); + } + + public DeployHandlerNotFoundException(String message, String userMessage) { + super(message, userMessage); + } + + public DeployHandlerNotFoundException(String message, Throwable cause, String userMessage) { + super(message, cause, userMessage); + } + + public DeployHandlerNotFoundException(Throwable cause, String userMessage) { + super(cause, userMessage); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerRegistry.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerRegistry.java new file mode 100644 index 00000000000..967ced6a2f9 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DeployHandlerRegistry.java @@ -0,0 +1,13 @@ +package com.netflix.spinnaker.clouddriver.deploy; + +/** A registry of {@link DeployHandler} instances. */ +public interface DeployHandlerRegistry { + /** + * This method is used to locate a handler most appropriate for the provided description object. + * + * @param description + * @return a deploy handler instance + * @throws DeployHandlerNotFoundException + */ + DeployHandler findHandler(DeployDescription description) throws DeployHandlerNotFoundException; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizer.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizer.java new file mode 100644 index 00000000000..a7f72c1144f --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizer.java @@ -0,0 +1,16 @@ +package com.netflix.spinnaker.clouddriver.deploy; + +import com.netflix.spinnaker.orchestration.OperationDescription; +import org.springframework.validation.Errors; + +/** Authorizes atomic operation description objects. */ +public interface DescriptionAuthorizer { + + /** @param description The atomic operation description object this instance supports. */ + default boolean supports(Object description) { + return true; + } + + /** @param description - The atomic operation description object. */ + void authorize(OperationDescription description, Errors errors); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizerService.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizerService.java new file mode 100644 index 00000000000..83647269a86 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizerService.java @@ -0,0 +1,154 @@ +package com.netflix.spinnaker.clouddriver.deploy; + +import static java.lang.String.format; + +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionSecretManager; +import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig; +import com.netflix.spinnaker.clouddriver.security.resources.AccountNameable; +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable; +import com.netflix.spinnaker.fiat.model.resources.ResourceType; +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.security.core.Authentication; +import org.springframework.security.core.context.SecurityContextHolder; +import org.springframework.validation.Errors; + +public class DescriptionAuthorizerService { + + private final Logger log = LoggerFactory.getLogger(getClass()); + + private final Registry registry; + private final FiatPermissionEvaluator fiatPermissionEvaluator; + private final SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps; + private final AccountDefinitionSecretManager secretManager; + + private final Id skipAuthorizationId; + private final Id missingApplicationId; + private final Id authorizationId; + + public DescriptionAuthorizerService( + Registry registry, + Optional fiatPermissionEvaluator, + SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps, + AccountDefinitionSecretManager secretManager) { + this.registry = registry; + this.fiatPermissionEvaluator = fiatPermissionEvaluator.orElse(null); + this.opsSecurityConfigProps = opsSecurityConfigProps; + this.secretManager = secretManager; + + this.skipAuthorizationId = registry.createId("authorization.skipped"); + this.missingApplicationId = registry.createId("authorization.missingApplication"); + this.authorizationId = registry.createId("authorization"); + } + + public void authorize(Object description, Errors errors) { + authorize(description, errors, List.of(ResourceType.ACCOUNT, ResourceType.APPLICATION)); + } + + public void authorize(Object description, Errors errors, Collection resourceTypes) { + if (fiatPermissionEvaluator == null || description == null) { + return; + } + + Authentication auth = SecurityContextHolder.getContext().getAuthentication(); + + String account = null; + List applications = new ArrayList<>(); + boolean requiresApplicationRestriction = true; + + if (description instanceof AccountNameable) { + AccountNameable accountNameable = (AccountNameable) description; + + requiresApplicationRestriction = accountNameable.requiresApplicationRestriction(); + + if (!accountNameable.requiresAuthorization(opsSecurityConfigProps)) { + registry + .counter( + skipAuthorizationId.withTag( + "descriptionClass", description.getClass().getSimpleName())) + .increment(); + + log.info( + "Skipping authorization for operation `{}` in account `{}`.", + description.getClass().getSimpleName(), + accountNameable.getAccount()); + } else { + account = accountNameable.getAccount(); + } + } + + if (description instanceof ApplicationNameable) { + ApplicationNameable applicationNameable = (ApplicationNameable) description; + applications.addAll( + Optional.ofNullable(applicationNameable.getApplications()) + .orElse(Collections.emptyList()) + .stream() + .filter(Objects::nonNull) + .collect(Collectors.toList())); + } + + if (description instanceof ResourcesNameable) { + ResourcesNameable resourcesNameable = (ResourcesNameable) description; + + applications.addAll( + Optional.ofNullable(resourcesNameable.getResourceApplications()) + .orElse(Collections.emptyList()) + .stream() + .filter(Objects::nonNull) + .collect(Collectors.toList())); + } + + boolean hasPermission = true; + if (resourceTypes.contains(ResourceType.ACCOUNT) + && account != null + && !secretManager.canAccessAccountWithSecrets(auth.getName(), account)) { + hasPermission = false; + errors.reject("authorization.account", format("Access denied to account %s", account)); + } + + if (resourceTypes.contains(ResourceType.APPLICATION) && !applications.isEmpty()) { + fiatPermissionEvaluator.storeWholePermission(); + + for (String application : applications) { + if (!fiatPermissionEvaluator.hasPermission(auth, application, "APPLICATION", "WRITE")) { + hasPermission = false; + errors.reject( + "authorization.application", format("Access denied to application %s", application)); + } + } + } + + if (requiresApplicationRestriction && account != null && applications.isEmpty()) { + registry + .counter( + missingApplicationId + .withTag("descriptionClass", description.getClass().getSimpleName()) + .withTag("hasValidationErrors", errors.hasErrors())) + .increment(); + + log.warn( + "No application(s) specified for operation with account restriction (type: {}, account: {}, hasValidationErrors: {})", + description.getClass().getSimpleName(), + account, + errors.hasErrors()); + } + + registry + .counter( + authorizationId + .withTag("descriptionClass", description.getClass().getSimpleName()) + .withTag("success", hasPermission)) + .increment(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationErrors.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationErrors.java new file mode 100644 index 00000000000..ae9ce345c33 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationErrors.java @@ -0,0 +1,83 @@ +package com.netflix.spinnaker.clouddriver.deploy; + +import java.util.ArrayList; +import java.util.List; +import javax.annotation.Nonnull; +import lombok.SneakyThrows; +import org.springframework.validation.AbstractErrors; +import org.springframework.validation.Errors; +import org.springframework.validation.FieldError; +import org.springframework.validation.ObjectError; + +public class DescriptionValidationErrors extends AbstractErrors implements ValidationErrors { + private Object description; + private List globalErrors = new ArrayList<>(); + private List fieldErrors = new ArrayList<>(); + + public DescriptionValidationErrors(Object description) { + this.description = description; + } + + @Nonnull + @Override + public String getObjectName() { + return description.getClass().getSimpleName(); + } + + @Override + public void reject(@Nonnull String errorCode, Object[] errorArgs, String defaultMessage) { + globalErrors.add( + new ObjectError(getObjectName(), new String[] {errorCode}, errorArgs, defaultMessage)); + } + + @Override + public void rejectValue( + String field, @Nonnull String errorCode, Object[] errorArgs, String defaultMessage) { + fieldErrors.add( + new FieldError( + getObjectName(), + field, + null, + false, + new String[] {errorCode}, + errorArgs, + defaultMessage)); + } + + @Override + public void addAllErrors(Errors errors) { + globalErrors.addAll(errors.getAllErrors()); + } + + @Override + @SneakyThrows + public Object getFieldValue(@Nonnull String field) { + return description.getClass().getDeclaredField(field).get(description); + } + + public Object getDescription() { + return description; + } + + public void setDescription(Object description) { + this.description = description; + } + + @Nonnull + public List getGlobalErrors() { + return globalErrors; + } + + public void setGlobalErrors(List globalErrors) { + this.globalErrors = globalErrors; + } + + @Nonnull + public List getFieldErrors() { + return fieldErrors; + } + + public void setFieldErrors(List fieldErrors) { + this.fieldErrors = fieldErrors; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationException.java new file mode 100644 index 00000000000..e28871ade85 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidationException.java @@ -0,0 +1,26 @@ +package com.netflix.spinnaker.clouddriver.deploy; + +import com.netflix.spinnaker.kork.web.exceptions.ValidationException; +import java.util.Collection; +import java.util.Optional; +import java.util.stream.Collectors; +import org.springframework.validation.Errors; + +public class DescriptionValidationException extends ValidationException { + public DescriptionValidationException(Errors errors) { + super("Validation Failed", getErrors(errors)); + } + + public DescriptionValidationException(Collection errors) { + super("Validation Failed", errors); + } + + public static Collection getErrors(Errors errors) { + + return errors.getAllErrors().stream() + .map( + objectError -> + Optional.ofNullable(objectError.getDefaultMessage()).orElse(objectError.getCode())) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/NullOpDeployHandler.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/NullOpDeployHandler.java new file mode 100644 index 00000000000..6729c48f6b9 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/deploy/NullOpDeployHandler.java @@ -0,0 +1,15 @@ +package com.netflix.spinnaker.clouddriver.deploy; + +import java.util.List; + +public class NullOpDeployHandler implements DeployHandler { + @Override + public DeploymentResult handle(String description, List priorOutputs) { + return null; + } + + @Override + public boolean handles(DeployDescription description) { + return false; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/documentation/Empty.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/documentation/Empty.java new file mode 100644 index 00000000000..c36c1785dce --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/documentation/Empty.java @@ -0,0 +1,27 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.documentation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** Marker annotation that defines that the provided method may return an empty map, list, or set */ +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.SOURCE) +public @interface Empty {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/CloudProviderNotFoundException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/CloudProviderNotFoundException.java new file mode 100644 index 00000000000..a64600cbb90 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/CloudProviderNotFoundException.java @@ -0,0 +1,32 @@ +package com.netflix.spinnaker.clouddriver.exceptions; + +import com.netflix.spinnaker.kork.exceptions.IntegrationException; +import org.springframework.http.HttpStatus; +import org.springframework.web.bind.annotation.ResponseStatus; + +@ResponseStatus(value = HttpStatus.BAD_REQUEST) +public class CloudProviderNotFoundException extends IntegrationException { + public CloudProviderNotFoundException(String message) { + super(message); + } + + public CloudProviderNotFoundException(String message, Throwable cause) { + super(message, cause); + } + + public CloudProviderNotFoundException(Throwable cause) { + super(cause); + } + + public CloudProviderNotFoundException(String message, String userMessage) { + super(message, userMessage); + } + + public CloudProviderNotFoundException(String message, Throwable cause, String userMessage) { + super(message, cause, userMessage); + } + + public CloudProviderNotFoundException(Throwable cause, String userMessage) { + super(cause, userMessage); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/OperationTimedOutException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/OperationTimedOutException.java new file mode 100644 index 00000000000..0a21aa7912f --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/OperationTimedOutException.java @@ -0,0 +1,29 @@ +package com.netflix.spinnaker.clouddriver.exceptions; + +import com.netflix.spinnaker.kork.exceptions.SystemException; + +public class OperationTimedOutException extends SystemException { + public OperationTimedOutException(String message) { + super(message); + } + + public OperationTimedOutException(String message, Throwable cause) { + super(message, cause); + } + + public OperationTimedOutException(Throwable cause) { + super(cause); + } + + public OperationTimedOutException(String message, String userMessage) { + super(message, userMessage); + } + + public OperationTimedOutException(String message, Throwable cause, String userMessage) { + super(message, cause, userMessage); + } + + public OperationTimedOutException(Throwable cause, String userMessage) { + super(cause, userMessage); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/TrafficGuardException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/TrafficGuardException.java new file mode 100644 index 00000000000..cfc2fe53678 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/exceptions/TrafficGuardException.java @@ -0,0 +1,25 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.exceptions; + +import com.netflix.spinnaker.kork.exceptions.SpinnakerException; + +public class TrafficGuardException extends SpinnakerException { + public TrafficGuardException(String message) { + super(message); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/helpers/EnableDisablePercentageCategorizer.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/helpers/EnableDisablePercentageCategorizer.java new file mode 100644 index 00000000000..35cec8e3d75 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/helpers/EnableDisablePercentageCategorizer.java @@ -0,0 +1,34 @@ +package com.netflix.spinnaker.clouddriver.helpers; + +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import java.util.List; + +public class EnableDisablePercentageCategorizer { + /** + * During an enable/disable operation that accepts a desired percentage of instances to leave + * enabled/disabled, this acts as a helper function to return which instances still need to be + * enabled/disabled. + * + * @param modified are the instances that don't need to be enabled/disabled (presumably have + * already been enabled/disabled). + * @param unmodified are the instances that do need to be enabled/disabled. + * @param desiredPercentage is the end desired percentage. + * @return the list of instances to be enabled/disabled. If the percentage has already been + * achieved or exceeded by the input instances, we return an empty list. + * @note modified + unmodified should be the total list of instances managed by one server group + */ + public static List getInstancesToModify( + List modified, List unmodified, int desiredPercentage) { + if (desiredPercentage < 0 || desiredPercentage > 100) { + throw new RuntimeException("Desired target percentage must be between 0 and 100 inclusive"); + } + + int totalSize = modified.size() + unmodified.size(); + int newSize = (int) Math.ceil(totalSize * desiredPercentage / 100.0); + + int returnSize = modified.size() > newSize ? 0 : newSize - modified.size(); + + return Lists.newArrayList(Iterables.limit(unmodified, returnSize)); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/AccountDefinitionModule.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/AccountDefinitionModule.java new file mode 100644 index 00000000000..719f1fb847d --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/AccountDefinitionModule.java @@ -0,0 +1,48 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.jackson; + +import com.fasterxml.jackson.databind.jsontype.NamedType; +import com.fasterxml.jackson.databind.module.SimpleModule; +import com.netflix.spinnaker.clouddriver.jackson.mixins.CredentialsDefinitionMixin; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import java.util.List; + +/** + * Jackson module to register {@link CredentialsDefinition} type discriminators for the provided + * account definition types. Type discriminators are determined by the presence of a + * {@code @JsonTypeName} annotation. Plugins should export their account definition types via {@link + * com.netflix.spinnaker.clouddriver.security.AccountDefinitionTypeProvider} beans. + * + * @see + * com.netflix.spinnaker.clouddriver.config.AccountDefinitionConfiguration#accountDefinitionModule(List) + */ +public class AccountDefinitionModule extends SimpleModule { + + private final NamedType[] accountDefinitionTypes; + + public AccountDefinitionModule(NamedType... accountDefinitionTypes) { + super("Clouddriver Account Definition API"); + this.accountDefinitionTypes = accountDefinitionTypes; + } + + @Override + public void setupModule(SetupContext context) { + super.setupModule(context); + context.setMixInAnnotations(CredentialsDefinition.class, CredentialsDefinitionMixin.class); + context.registerSubtypes(accountDefinitionTypes); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/ClouddriverApiModule.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/ClouddriverApiModule.java new file mode 100644 index 00000000000..3480459a9a8 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/ClouddriverApiModule.java @@ -0,0 +1,49 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson; + +import com.fasterxml.jackson.databind.module.SimpleModule; +import com.netflix.spinnaker.clouddriver.jackson.mixins.*; +import com.netflix.spinnaker.clouddriver.model.Cluster; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.model.SecurityGroup; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; + +public class ClouddriverApiModule extends SimpleModule { + + public ClouddriverApiModule() { + super("Clouddriver API"); + } + + @Override + public void setupModule(SetupContext context) { + super.setupModule(context); + context.setMixInAnnotations(SecurityGroup.class, SecurityGroupMixin.class); + context.setMixInAnnotations(Rule.class, RuleMixin.class); + context.setMixInAnnotations(Cluster.class, ClusterMixin.class); + context.setMixInAnnotations(ServerGroup.class, ServerGroupMixin.class); + context.setMixInAnnotations(ServerGroup.ImageSummary.class, ImageSummaryMixin.class); + context.setMixInAnnotations(ServerGroup.ImagesSummary.class, ImagesSummaryMixin.class); + context.setMixInAnnotations( + LoadBalancerProvider.Item.class, LoadBalancerProviderItemMixin.class); + context.setMixInAnnotations( + LoadBalancerProvider.ByAccount.class, LoadBalancerProviderByAccountMixin.class); + context.setMixInAnnotations( + LoadBalancerProvider.ByRegion.class, LoadBalancerProviderByRegionMixin.class); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ClusterMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ClusterMixin.java new file mode 100644 index 00000000000..458043e3b47 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ClusterMixin.java @@ -0,0 +1,37 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.netflix.spinnaker.clouddriver.model.LoadBalancer; +import com.netflix.spinnaker.clouddriver.model.NullCollectionSerializer; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import java.util.Map; +import java.util.Set; + +public interface ClusterMixin { + + @JsonSerialize(nullsUsing = NullCollectionSerializer.class) + Set getServerGroups(); + + @JsonSerialize(nullsUsing = NullCollectionSerializer.class) + Set getLoadBalancers(); + + @JsonIgnore + Map getExtraAttributes(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/CredentialsDefinitionMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/CredentialsDefinitionMixin.java new file mode 100644 index 00000000000..c0b5538df76 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/CredentialsDefinitionMixin.java @@ -0,0 +1,33 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; + +/** + * Jackson mixin to add a polymorphic type name value. When a {@link + * com.netflix.spinnaker.credentials.definition.CredentialsDefinition} implementation class is + * annotated with {@link com.fasterxml.jackson.annotation.JsonTypeName}, then the value of that + * annotation is used as the {@code type} property value when marshalling and unmarshalling + * CredentialsDefinition classes. It is recommended that the corresponding cloud provider name for + * the credentials be used here. + * + * @see AccountCredentials#getCloudProvider() + */ +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") +public interface CredentialsDefinitionMixin {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ImageSummaryMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ImageSummaryMixin.java new file mode 100644 index 00000000000..72c33a5416f --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ImageSummaryMixin.java @@ -0,0 +1,22 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public interface ImageSummaryMixin {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ImagesSummaryMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ImagesSummaryMixin.java new file mode 100644 index 00000000000..707f21094ff --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ImagesSummaryMixin.java @@ -0,0 +1,22 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public interface ImagesSummaryMixin {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderByAccountMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderByAccountMixin.java new file mode 100644 index 00000000000..76831d02bc2 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderByAccountMixin.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import java.util.List; + +public interface LoadBalancerProviderByAccountMixin { + + @JsonProperty("regions") + List getByRegions(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderByRegionMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderByRegionMixin.java new file mode 100644 index 00000000000..25a6c4e882b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderByRegionMixin.java @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import java.util.List; + +public interface LoadBalancerProviderByRegionMixin { + + @JsonProperty("name") + String getName(); + + @JsonProperty("loadBalancers") + List getLoadBalancers(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderItemMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderItemMixin.java new file mode 100644 index 00000000000..a8896783e64 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/LoadBalancerProviderItemMixin.java @@ -0,0 +1,26 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import java.util.List; + +public interface LoadBalancerProviderItemMixin { + @JsonProperty("accounts") + List getByAccounts(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/RuleMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/RuleMixin.java new file mode 100644 index 00000000000..9ef6e90498e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/RuleMixin.java @@ -0,0 +1,22 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonTypeInfo; + +@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "class") +public interface RuleMixin {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/SecurityGroupMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/SecurityGroupMixin.java new file mode 100644 index 00000000000..5113780a674 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/SecurityGroupMixin.java @@ -0,0 +1,22 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonTypeInfo; + +@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "class") +public interface SecurityGroupMixin {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ServerGroupMixin.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ServerGroupMixin.java new file mode 100644 index 00000000000..eb5b7a7303c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jackson/mixins/ServerGroupMixin.java @@ -0,0 +1,37 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jackson.mixins; + +import com.fasterxml.jackson.annotation.JsonGetter; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import java.util.Map; + +public interface ServerGroupMixin { + + @JsonGetter + Boolean isDisabled(); + + @JsonIgnore + ServerGroup.ImagesSummary getImagesSummary(); + + @JsonIgnore + ServerGroup.ImageSummary getImageSummary(); + + @JsonIgnore + Map getExtraAttributes(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobExecutionException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobExecutionException.java new file mode 100644 index 00000000000..3f0bc487e94 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobExecutionException.java @@ -0,0 +1,23 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jobs; + +public class JobExecutionException extends RuntimeException { + public JobExecutionException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobExecutor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobExecutor.java new file mode 100644 index 00000000000..59c063057d3 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobExecutor.java @@ -0,0 +1,58 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.jobs; + +import com.netflix.spinnaker.clouddriver.jobs.local.ReaderConsumer; + +/** + * Executes a job defined by a JobRequest, returning the results as a JobResult. + * + *

The caller can optionally supply a ReaderConsumer, in which case the output from the job will + * be transformed by the ReaderConsumer before being returned in JobResult. + * + *

There are two general types of errors that can occur when executing a job: + * + *

    + *
  • If the JobExecutor fails to start or monitor a job, or if it is unable to read the job's + * output, a {@link JobExecutionException} is thrown. + *
  • If the JobExecutor successfully starts and monitors the job, but the job itself fails, no + * exception is thrown but the returned {@link JobResult} will indicate that the job failed. + *
+ * + * @see JobRequest + * @see JobResult + */ +public interface JobExecutor { + /** + * Runs the specified JobRequest, returning the job's standard output in a JobResult. + * + * @param jobRequest The job request + * @return The result of the job + * @throws JobExecutionException if there is an error starting or monitoring the job + */ + JobResult runJob(JobRequest jobRequest); + + /** + * Runs the specified JobRequest, transforming the job's standard output with the supplied + * ReaderConsumer, and returning the transformed result in a JobResult. + * + * @param jobRequest The job request + * @param readerConsumer A function that transforms the job's standard output + * @return The result of the job + * @throws JobExecutionException if there is an error starting or monitoring the job + */ + JobResult runJob(JobRequest jobRequest, ReaderConsumer readerConsumer); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobRequest.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobRequest.java new file mode 100644 index 00000000000..716115134a4 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobRequest.java @@ -0,0 +1,103 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.jobs; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import lombok.Getter; +import org.apache.commons.exec.CommandLine; + +@Getter +public class JobRequest { + private final List tokenizedCommand; + private final CommandLine commandLine; + private final Map environment; + private final InputStream inputStream; + private final File workingDir; + + public JobRequest(List tokenizedCommand) { + this(tokenizedCommand, System.getenv(), new ByteArrayInputStream(new byte[0])); + } + + public JobRequest(List tokenizedCommand, InputStream inputStream) { + this(tokenizedCommand, System.getenv(), inputStream); + } + + public JobRequest( + List tokenizedCommand, Map environment, InputStream inputStream) { + this(tokenizedCommand, environment, inputStream, null); + } + + public JobRequest( + List tokenizedCommand, Map environment, File workingDir) { + this(tokenizedCommand, environment, new ByteArrayInputStream(new byte[0]), workingDir); + } + + public JobRequest(List tokenizedCommand, InputStream inputStream, File workingDir) { + this(tokenizedCommand, System.getenv(), inputStream, workingDir); + } + + public JobRequest(List tokenizedCommand, File workingDir) { + this(tokenizedCommand, System.getenv(), new ByteArrayInputStream(new byte[0]), workingDir); + } + + public JobRequest( + List tokenizedCommand, + Map environment, + InputStream inputStream, + File workingDir) { + this.tokenizedCommand = tokenizedCommand; + this.commandLine = createCommandLine(tokenizedCommand); + this.environment = environment; + this.inputStream = inputStream; + this.workingDir = workingDir; + } + + // only used in tests + public JobRequest(CommandLine commandLine, InputStream inputStream) { + this.tokenizedCommand = new ArrayList<>(); + this.commandLine = commandLine; + this.environment = System.getenv(); + this.inputStream = inputStream; + this.workingDir = null; + } + + private CommandLine createCommandLine(List tokenizedCommand) { + if (tokenizedCommand == null || tokenizedCommand.size() == 0) { + throw new IllegalArgumentException("No tokenizedCommand specified."); + } + + // Grab the first element as the command. + CommandLine commandLine = new CommandLine(tokenizedCommand.get(0)); + + int size = tokenizedCommand.size(); + String[] arguments = tokenizedCommand.subList(1, size).toArray(new String[size - 1]); + commandLine.addArguments(arguments, false); + return commandLine; + } + + @Override + public String toString() { + if (!tokenizedCommand.isEmpty()) { + return String.join(" ", tokenizedCommand); + } + return commandLine.toString(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobResult.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobResult.java new file mode 100644 index 00000000000..0a7871a3a38 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/JobResult.java @@ -0,0 +1,34 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jobs; + +import lombok.Builder; +import lombok.Getter; + +@Builder +@Getter +public class JobResult { + private final Result result; + private final T output; + private final String error; + private final boolean killed; + + public enum Result { + SUCCESS, + FAILURE; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/ForceDestroyWatchdog.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/ForceDestroyWatchdog.java new file mode 100644 index 00000000000..02169df1fc7 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/ForceDestroyWatchdog.java @@ -0,0 +1,63 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jobs.local; + +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.exec.ExecuteWatchdog; +import org.apache.commons.exec.Watchdog; + +/** + * Extension of {@link org.apache.commons.exec.ExecuteWatchdog} that sends SIGKILL signal to the + * watched process if not finished by SIGTERM. + */ +@Slf4j +public class ForceDestroyWatchdog extends ExecuteWatchdog { + + private static final long GRACE_PERIOD_MS = 250; + + private final long timeout; + private Process process; + + public ForceDestroyWatchdog(final long timeout) { + super(timeout); + this.timeout = timeout; + } + + @Override + public synchronized void start(Process processToMonitor) { + super.start(processToMonitor); + this.process = processToMonitor; + } + + @Override + public synchronized void timeoutOccured(final Watchdog w) { + super.timeoutOccured(w); + + try { + Thread.sleep(GRACE_PERIOD_MS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + + if (process.isAlive()) { + log.warn( + "Timeout: Waited {} ms for process to finish and process is still alive after sending SIGTERM signal. Sending SIGKILL.", + timeout + GRACE_PERIOD_MS); + process.destroyForcibly(); + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/JobExecutorLocal.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/JobExecutorLocal.java new file mode 100644 index 00000000000..191cee00847 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/JobExecutorLocal.java @@ -0,0 +1,159 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.jobs.local; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spinnaker.clouddriver.jobs.JobExecutionException; +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import com.netflix.spinnaker.clouddriver.jobs.JobRequest; +import com.netflix.spinnaker.clouddriver.jobs.JobResult; +import java.io.*; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.exec.*; + +@Slf4j +public class JobExecutorLocal implements JobExecutor { + // We don't actually use this executor to run the jobs as we're deferring to the Apache Commons + // library to do this. Ideally we'd refactor this class to use ProcessBuilder, but given that + // the main consumer is the Kubernetes provider and we have plans to refactor it to use a client + // library, it is not worth the effort at this point. + // This executor is only used to parsing the output of a job when running in streaming mode; the + // main thread waits on the job while the output parsing is sent to the executor. + private final ExecutorService executorService = + Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setNameFormat(getClass().getSimpleName() + "-%d").build()); + private final long timeoutMinutes; + + public JobExecutorLocal(long timeoutMinutes) { + this.timeoutMinutes = timeoutMinutes; + } + + @Override + public JobResult runJob(final JobRequest jobRequest) { + return executeWrapper(jobRequest, this::execute); + } + + @Override + public JobResult runJob(final JobRequest jobRequest, ReaderConsumer readerConsumer) { + return executeWrapper(jobRequest, request -> executeStreaming(request, readerConsumer)); + } + + private JobResult executeWrapper( + final JobRequest jobRequest, RequestExecutor requestExecutor) { + log.debug(String.format("Starting job: '%s'...", jobRequest.toString())); + final String jobId = UUID.randomUUID().toString(); + + JobResult jobResult; + try { + jobResult = requestExecutor.execute(jobRequest); + } catch (IOException e) { + throw new JobExecutionException( + String.format("Error executing job: %s", jobRequest.toString()), e); + } + + if (jobResult.isKilled()) { + log.warn(String.format("Job %s timed out (after %d minutes)", jobId, timeoutMinutes)); + } + + return jobResult; + } + + private JobResult execute(JobRequest jobRequest) throws IOException { + ByteArrayOutputStream stdOut = new ByteArrayOutputStream(); + ByteArrayOutputStream stdErr = new ByteArrayOutputStream(); + + Executor executor = + buildExecutor( + new PumpStreamHandler(stdOut, stdErr, jobRequest.getInputStream()), jobRequest); + int exitValue = executor.execute(jobRequest.getCommandLine(), jobRequest.getEnvironment()); + + return JobResult.builder() + .result(exitValue == 0 ? JobResult.Result.SUCCESS : JobResult.Result.FAILURE) + .killed(executor.getWatchdog().killedProcess()) + .output(stdOut.toString()) + .error(stdErr.toString()) + .build(); + } + + private JobResult executeStreaming(JobRequest jobRequest, ReaderConsumer consumer) + throws IOException { + PipedOutputStream stdOut = new PipedOutputStream(); + ByteArrayOutputStream stdErr = new ByteArrayOutputStream(); + Executor executor = + buildExecutor( + new PumpStreamHandler(stdOut, stdErr, jobRequest.getInputStream()), jobRequest); + + // Send a task to the executor to consume the output from the job. + Future futureResult = + executorService.submit( + () -> + consumer.consume( + new BufferedReader(new InputStreamReader(new PipedInputStream(stdOut))))); + int exitValue = executor.execute(jobRequest.getCommandLine(), jobRequest.getEnvironment()); + + T result; + try { + result = futureResult.get(timeoutMinutes, TimeUnit.MINUTES); + } catch (InterruptedException e) { + executor.getWatchdog().destroyProcess(); + Thread.currentThread().interrupt(); + throw new JobExecutionException( + String.format("Interrupted while executing job: %s", jobRequest.toString()), e); + } catch (ExecutionException e) { + throw new JobExecutionException( + String.format("Error parsing output of job: %s", jobRequest.toString()), e.getCause()); + } catch (TimeoutException e) { + throw new JobExecutionException( + String.format( + "Timed out reading output of job: %s with exit value: %d. stderr: %s", + jobRequest.toString(), exitValue, stdErr.toString()), + e); + } + + return JobResult.builder() + .result(exitValue == 0 ? JobResult.Result.SUCCESS : JobResult.Result.FAILURE) + .killed(executor.getWatchdog().killedProcess()) + .output(result) + .error(stdErr.toString()) + .build(); + } + + private Executor buildExecutor(ExecuteStreamHandler streamHandler, JobRequest jobRequest) { + Executor executor = new DefaultExecutor(); + executor.setStreamHandler(streamHandler); + executor.setWatchdog(new ForceDestroyWatchdog(timeoutMinutes * 60 * 1000)); + // Setting this to null causes the executor to skip verifying exit codes; we'll handle checking + // the exit status instead of having the executor throw an exception for non-zero exit codes. + executor.setExitValues(null); + + if (jobRequest.getWorkingDir() != null) { + executor.setWorkingDirectory(jobRequest.getWorkingDir()); + } + + return executor; + } + + interface RequestExecutor { + JobResult execute(JobRequest jobRequest) throws IOException; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/ReaderConsumer.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/ReaderConsumer.java new file mode 100644 index 00000000000..bee00bb8566 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/jobs/local/ReaderConsumer.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.jobs.local; + +import java.io.BufferedReader; +import java.io.IOException; +import javax.annotation.Nonnull; +import javax.annotation.WillClose; + +/** + * Transforms a stream into an object of arbitrary type using a supplied BufferReader for the + * stream. + * + *

Implementations are responsible for closing the supplied BufferReader. + */ +@FunctionalInterface +public interface ReaderConsumer { + @Nonnull + T consume(@WillClose BufferedReader r) throws IOException; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/metrics/TimedCallable.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/metrics/TimedCallable.java new file mode 100644 index 00000000000..e8d843fa2d8 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/metrics/TimedCallable.java @@ -0,0 +1,84 @@ +package com.netflix.spinnaker.clouddriver.metrics; + +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.kork.annotations.DeprecationInfo; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +public class TimedCallable implements Callable { + + public static TimedCallable forRunnable(Registry registry, Id metricId, Runnable runnable) { + return new TimedCallable(registry, metricId, new RunnableWrapper(runnable)); + } + + public static TimedCallable forCallable( + Registry registry, Id metricId, Callable callable) { + return new TimedCallable(registry, metricId, callable); + } + + @Deprecated + @DeprecationInfo( + reason = "Groovy removal, no difference between this and forCallable", + since = "1.22.0", + eol = "1.23.0") + public static TimedCallable forClosure( + Registry registry, Id metricId, Callable closure) { + return new TimedCallable(registry, metricId, new CallableWrapper<>(closure)); + } + + private final Registry registry; + private final Id metricId; + private final Callable callable; + + public TimedCallable(Registry registry, Id metricId, Callable callable) { + this.registry = registry; + this.metricId = metricId; + this.callable = callable; + } + + @Override + public T call() throws Exception { + long start = System.nanoTime(); + Id thisId = metricId; + try { + T result = callable.call(); + thisId = thisId.withTag("success", "true"); + return result; + } catch (Exception ex) { + thisId = thisId.withTag("success", "false").withTag("cause", ex.getClass().getSimpleName()); + throw ex; + } finally { + registry.timer(thisId).record(System.nanoTime() - start, TimeUnit.NANOSECONDS); + } + } + + private static class RunnableWrapper implements Callable { + + private final Runnable runnable; + + public RunnableWrapper(Runnable runnable) { + this.runnable = runnable; + } + + @Override + public Void call() throws Exception { + runnable.run(); + return null; + } + } + + private static class CallableWrapper implements Callable { + + private final Callable closure; + + public CallableWrapper(Callable closure) { + this.closure = closure; + } + + @Override + public T call() throws Exception { + return closure.call(); + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Application.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Application.java new file mode 100644 index 00000000000..e377a8b0780 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Application.java @@ -0,0 +1,49 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import com.netflix.spinnaker.clouddriver.documentation.Empty; +import java.util.Map; +import java.util.Set; + +/** + * An application is a top-level construct that provides an association to {@link Cluster} objects. + */ +public interface Application { + /** + * The name of the application + * + * @return name + */ + String getName(); + + /** + * Arbitrary metadata that may be associated with an application. + * + * @return map of key->value pairs, or an empty map + */ + @Empty + Map getAttributes(); + + /** + * A set of cluster names that are associated with this application + * + * @return names + */ + @Empty + Map> getClusterNames(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ApplicationProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ApplicationProvider.java new file mode 100644 index 00000000000..f21b2f44d2a --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ApplicationProvider.java @@ -0,0 +1,46 @@ +/* + * Copyright 2014-2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import com.netflix.spinnaker.clouddriver.documentation.Empty; +import java.util.Set; +import javax.annotation.Nullable; + +/** + * An application provider is an interface for which {@link Application} objects may be retrieved. + * This interface defines a common contract for which various providers may be queried about their + * known applications. + */ +public interface ApplicationProvider { + /** + * Looks up all of the {@link Application} objects known to this provider + * + * @param expand Whether application relationships (ie. cluster names) should be included + * @return a set of applications or an empty set if none are known to this provider + */ + @Empty + Set getApplications(boolean expand); + + /** + * Looks up a particular application by name + * + * @param name name + * @return an application or null if it is not known to this provider + */ + @Nullable + Application getApplication(String name); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CachingAgentScheduler.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CachingAgentScheduler.java new file mode 100644 index 00000000000..0aa7791e306 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CachingAgentScheduler.java @@ -0,0 +1,19 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public interface CachingAgentScheduler {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CertificateProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CertificateProvider.java new file mode 100644 index 00000000000..9322b2e8f56 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CertificateProvider.java @@ -0,0 +1,25 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public interface CertificateProvider { + public abstract String getCloudProvider(); + + public abstract Set getAll(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricDatapoint.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricDatapoint.java new file mode 100644 index 00000000000..1fe494895a6 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricDatapoint.java @@ -0,0 +1,19 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public interface CloudMetricDatapoint {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricDescriptor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricDescriptor.java new file mode 100644 index 00000000000..3fd182e2f2c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricDescriptor.java @@ -0,0 +1,28 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** + * Describes a metric reported by a cloud provider. + * + *

Implementations should add any fields necessary to uniquely identify a particular metric; for + * example, AWS supplies a "namespace" field, as well as a collection of "dimensions" + */ +public interface CloudMetricDescriptor { + + String getName(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricProvider.java new file mode 100644 index 00000000000..43384bd8d01 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricProvider.java @@ -0,0 +1,71 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.List; +import java.util.Map; + +public interface CloudMetricProvider { + + /** + * Returns the platform of the provider + * + * @return a String, e.g. 'aws', 'gcp' + */ + String getCloudProvider(); + + /** + * Returns a specific metric descriptor + * + * @param account the account + * @param region the region + * @param filters a collection of identifiers used to uniquely identify a metric + * @return a metric descriptor if one is found; should throw an exception if multiple metric + * descriptors are found for the supplied filters + */ + T getMetricDescriptor(String account, String region, Map filters); + + /** + * Returns a list of metric descriptors matching the supplied filters + * + * @param account the account + * @param region the region + * @param filters a collection of identifiers used to select a subset of all metrics in the + * account and region + * @return a list of metric descriptors matching the filters + */ + List findMetricDescriptors(String account, String region, Map filters); + + /** + * Returns a statistic set for the metric descriptor uniquely identified by the supplied filters + * + * @param account the account + * @param region the region + * @param metricName the name of the target metric + * @param filters a collection of identifiers used to uniquely identify a metric + * @param startTime an inclusive timestamp to determine the oldest datapoint to return + * @param endTime an exclusive timestamp to determine the newest datapoint to return + * @return a CloudMetricStatistics object, describing the statistics with timestamps + */ + CloudMetricStatistics getStatistics( + String account, + String region, + String metricName, + Map filters, + Long startTime, + Long endTime); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricStatistics.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricStatistics.java new file mode 100644 index 00000000000..e62342df85b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/CloudMetricStatistics.java @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.List; + +public interface CloudMetricStatistics { + /** + * Unit of measurement for all datapoints; should be the plural form of the unit if applicable, + * e.g. "Bytes", "Percent", "Kilobytes/Second" + */ + String getUnit(); + + /** + * List of statistical datapoints; at least one statistic (average, sum, sampleCount, minimum, + * maximum) should be populated, as well as the timestamp + */ + List getDatapoints(); +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Cluster.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Cluster.java similarity index 90% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Cluster.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Cluster.java index ff1336748b0..f74a1f54cc6 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Cluster.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Cluster.java @@ -16,20 +16,19 @@ package com.netflix.spinnaker.clouddriver.model; -import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.netflix.spinnaker.clouddriver.documentation.Empty; import com.netflix.spinnaker.clouddriver.names.NamerRegistry; import com.netflix.spinnaker.moniker.Moniker; +import java.util.Collections; +import java.util.Map; +import java.util.Set; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; -import java.util.Set; - /** - * A cluster is an object that provides an association between an account, many server groups, and many load balancers. - * - * + * A cluster is an object that provides an association between an account, many server groups, and + * many load balancers. */ public interface Cluster { /** @@ -68,7 +67,6 @@ default Moniker getMoniker() { * @return a set of {@link ServerGroup} objects or an empty set if none exist */ @Empty - @JsonSerialize(nullsUsing = NullCollectionSerializer.class) Set getServerGroups(); /** @@ -77,7 +75,6 @@ default Moniker getMoniker() { * @return a set of {@link LoadBalancer} objects or an empty set if none exist */ @Empty - @JsonSerialize(nullsUsing = NullCollectionSerializer.class) // TODO(ttomsu): Why are load balancers associated with Clusters instead of ServerGroups? Set getLoadBalancers(); @@ -91,4 +88,8 @@ class SimpleCluster implements Cluster { Set serverGroups; Set loadBalancers; } + + default Map getExtraAttributes() { + return Collections.EMPTY_MAP; + } } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ClusterProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ClusterProvider.java new file mode 100644 index 00000000000..3ca4d2513c7 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ClusterProvider.java @@ -0,0 +1,129 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import com.netflix.spinnaker.clouddriver.documentation.Empty; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nullable; + +/** + * A cluster provider is an interface for the application to retrieve implementations of {@link + * Cluster} objects. This interface defines the common contract for which various providers may be + * queried for their known clusters. This interface assumes implementations may span cross account. + */ +public interface ClusterProvider { + /** + * Looks up all of the clusters available to this provider. Keyed on account name. + * + * @return set of clusters or an empty set if none exist + */ + @Empty + Map> getClusters(); + + /** + * Looks up all of the clusters known to this provider to be for a specified application Keyed on + * account name. Similar to {@link #getClusterSummaries(java.lang.String)}, but returns the names + * of server groups and load balancers, not reified views. + * + * @param application + * @return map of clusters, keyed on account name, or an empty map if none exist + */ + @Empty + Map> getClusterSummaries(String application); + + /** + * Looks up all of the clusters known to this provider to be for a specified application Keyed on + * account name. Similar to {@link #getClusterSummaries(java.lang.String)}, but returns reified + * views of server groups and load balancers. + * + * @param application + * @return map of clusters, keyed on account name, or an empty map if none exist + */ + @Empty + Map> getClusterDetails(String application); + + /** + * Looks up all of the clusters known to this provider to be for a specified application and + * within a {@link com.netflix.spinnaker.clouddriver.security.AccountCredentials} registered with + * a {@link com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider} + * + * @param application + * @param account name + * @return set of clusters with load balancers and server groups populated, or an empty set if + * none exist + */ + @Empty + Set getClusters(String application, String account); + + /** TODO(rz): What does "includeDetails" entail? */ + @Empty + default Set getClusters(String application, String account, boolean includeDetails) { + return getClusters(application, account); + } + + /** + * Looks up a cluster known to this provider to be for a specified application, within a specified + * {@link com.netflix.spinnaker.clouddriver.security.AccountCredentials}, and with the specified + * name. + * + * @param account + * @param name + * @return cluster with load balancers and server groups populated, or null if none exists + */ + @Nullable + T getCluster(String application, String account, String name); + + /** TODO(rz): What does "includeDetails" entail? */ + @Nullable + T getCluster(String application, String account, String name, boolean includeDetails); + + /** + * Looks up a server group known to this provider, within a specified {@link + * com.netflix.spinnaker.clouddriver.security.AccountCredentials} and region, and with the + * specified name. + * + *

TODO(rz): What does "includeDetails" entail? + * + * @param account name + * @param region + * @param name + * @param includeDetails + * @return the server group or null if none exists + */ + @Nullable + ServerGroup getServerGroup(String account, String region, String name, boolean includeDetails); + + @Nullable + ServerGroup getServerGroup(String account, String region, String name); + + /** @return the identifier of the backing cloud provider */ + String getCloudProviderId(); + + /** + * Determines whether or not optimizations can be made by retrieving minimal or unexpanded + * clusters. + * + *

This primarily affects how server groups are loaded for a cluster (@see + * com.netflix.spinnaker.clouddriver.controllers.ClusterController}. + * + *

Minimal cluster support requires that server groups contained within a cluster be populated + * with: - creation time stamps - region / zone details - disabled status - instance counts (fully + * populated instances are not necessary) + */ + boolean supportsMinimalClusters(); +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/DataProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/DataProvider.java similarity index 80% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/DataProvider.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/DataProvider.java index 475cd1cc1bf..401dc1dd1af 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/DataProvider.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/DataProvider.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.model; import java.io.OutputStream; -import java.util.Collection; import java.util.Map; public interface DataProvider { @@ -29,7 +28,7 @@ enum IdentifierType { /** * Fetch a specific object from a bucket that has been explicitly configured. * - * Filters are supported if the configured object is of type `list`. + *

Filters are supported if the configured object is of type `list`. * * @return string/list/map depending on type of configured object */ @@ -38,17 +37,13 @@ enum IdentifierType { /** * Stream a specified object from a bucket. * - * Both the object key and bucket name must be whitelisted. + *

Both the object key and bucket name must be whitelisted. */ void getAdhocData(String groupId, String bucketId, String objectId, OutputStream outputStream); - /** - * @return true if this identifier is supported by the data provider - */ + /** @return true if this identifier is supported by the data provider */ boolean supportsIdentifier(IdentifierType identifierType, String id); - /** - * @return the account name corresponding to the provided identifier - */ + /** @return the account name corresponding to the provided identifier */ String getAccountForIdentifier(IdentifierType identifierType, String id); } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ElasticIp.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ElasticIp.java new file mode 100644 index 00000000000..baef3f4e6d9 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ElasticIp.java @@ -0,0 +1,55 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** A representation of an elastic ip */ +public interface ElasticIp { + /** + * The type of this elastic ip. May reference the cloud provider to which it is associated + * + * @return + */ + String getType(); + + /** + * The public address associated with this elastic ip + * + * @return + */ + String getAddress(); + + /** + * The identifier of the object that this elastic ip is attached to + * + * @return + */ + String getAttachedToId(); + + /** + * The account associated with this elastic ip + * + * @return + */ + String getAccountName(); + + /** + * The region associated with this elastic ip + * + * @return + */ + String getRegion(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ElasticIpProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ElasticIpProvider.java new file mode 100644 index 00000000000..9df404ef506 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ElasticIpProvider.java @@ -0,0 +1,25 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public interface ElasticIpProvider { + Set getAllByAccount(String account); + + Set getAllByAccountAndRegion(String account, String region); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/EntityTagsProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/EntityTagsProvider.java new file mode 100644 index 00000000000..f683cb33756 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/EntityTagsProvider.java @@ -0,0 +1,86 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +public interface EntityTagsProvider { + /** + * Fetch EntityTags by any combination of {@code cloudProvider}/{@code type}/{@code + * idPrefix}/{@code tags} + */ + Collection getAll( + String cloudProvider, + String application, + String entityType, + List entityIds, + String idPrefix, + String account, + String region, + String namespace, + Map tags, + int maxResults); + + /** Fetch EntityTags by {@code id} */ + Optional get(String id); + + /** Fetch EntityTags by {@code id} AND {@code tags}, both must match */ + Optional get(String id, Map tags); + + /** Index an EntityTags */ + void index(EntityTags entityTags); + + /** Index multiple EntityTags */ + void bulkIndex(Collection multipleEntityTags); + + /** Verify that EntityTags has been indexed and can be retrieved via a search */ + void verifyIndex(EntityTags entityTags); + + /** Delete EntityTags by {@code id} */ + void delete(String id); + + /** + * Remove all entity tags within a particular namespace, optionally deleting from the source of + * truth (front50). + */ + Map deleteByNamespace(String namespace, boolean dryRun, boolean deleteFromSource); + + /** + * Remove all entity tags with a particular tag, optionally deleting from the source of truth + * (front50). + */ + Map deleteByTag(String tag, boolean dryRun, boolean deleteFromSource); + + /** Delete EntityTags */ + void bulkDelete(Collection multipleEntityTags); + + /** Reindex all EntityTags */ + void reindex(); + + /** + * Fetch delta (counts of EntityTags broken down by Elasticsearch and Front50) + * + *

Can be used to identify when Elasticsearch and Front50 are out-of-sync. + */ + Map delta(); + + /** Remove all entity tags referencing entities that no longer exist (in a clouddriver cache). */ + Map reconcile(String cloudProvider, String account, String region, boolean dryRun); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Function.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Function.java new file mode 100644 index 00000000000..9a5bfa1774b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Function.java @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public interface Function { + String getCloudProvider(); + + String getAccount(); + + String getRegion(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/FunctionProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/FunctionProvider.java new file mode 100644 index 00000000000..a933230a46c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/FunctionProvider.java @@ -0,0 +1,41 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import com.netflix.spinnaker.clouddriver.documentation.Empty; +import java.util.Collection; +import java.util.Collections; +import java.util.Set; + +public interface FunctionProvider { + Collection getAllFunctions(); + + Function getFunction(String account, String region, String functionName); + + /** + * Returns all functions related to an application based on one of the following criteria: - the + * load balancer name follows the Frigga naming conventions for load balancers (i.e., the load + * balancer name starts with the application name, followed by a hyphen) + * + * @param applicationName the name of the application + * @return a collection of functions. + */ + @Empty + default Set getApplicationFunctions(String applicationName) { + return Collections.emptySet(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Health.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Health.java new file mode 100644 index 00000000000..591926cbf0f --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Health.java @@ -0,0 +1,26 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** + * Marker interface to represent a Health object. The concrete object will be serialized for + * consumers. + */ +public interface Health { + /** @return health state indication */ + HealthState getState(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/HealthState.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/HealthState.java new file mode 100644 index 00000000000..187bfbd69e0 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/HealthState.java @@ -0,0 +1,37 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public enum HealthState { + Failed, + Down, + OutOfService, + Unknown, + Starting, + Succeeded, + Up, + Draining; + + public static HealthState fromString(final String name) { + for (HealthState state : values()) { + if (state.name().equalsIgnoreCase(name)) { + return state; + } + } + return Unknown; + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Image.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Image.java similarity index 98% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Image.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Image.java index 53decef57de..d5f77f8d4cf 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/Image.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Image.java @@ -20,20 +20,22 @@ public interface Image { /** * Returns an image identifier. + * * @return image id */ String getId(); /** * Returns an image name. + * * @return image name */ String getName(); /** * Returns the region the image exists in. + * * @return image region */ String getRegion(); - } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ImageProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ImageProvider.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ImageProvider.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ImageProvider.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Instance.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Instance.java new file mode 100644 index 00000000000..f503a5c41b8 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Instance.java @@ -0,0 +1,89 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Primarily a marker interface, but provides the representation of an instance, which exists within + * a {@link ServerGroup}. Concrete implementations should provide more-specific data. + */ +public interface Instance { + /** + * The name of the instance. By convention this is expected to be globally unique. + * + * @return instance name + */ + String getName(); + + /** + * The human-readable name of the instance + * + * @return human-readable name + */ + default String getHumanReadableName() { + return getName(); + } + + /** + * A status of the health of the instance + * + * @return HealthState + */ + HealthState getHealthState(); + + /** + * A timestamp indicating when the instance was launched + * + * @return the number of milliseconds after the beginning of time (1 January, 1970 UTC) when this + * instance was launched + */ + Long getLaunchTime(); + + /** + * A zone specifier indicating where the instance resides + * + * @return the availability zone + */ + String getZone(); + + /** + * A list of all health metrics reported for this instance + * + * @return A list of health metrics, which will always include keys for type and status, and may + * include others, depending on the health metric + */ + List> getHealth(); + + /** @deprecated use #getCloudProvider */ + default String getProviderType() { + return getCloudProvider(); + } + + /** + * Cloud-provider key, e.g. "aws", "titus" + * + * @return + */ + String getCloudProvider(); + + default Map getLabels() { + return new HashMap<>(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceProvider.java new file mode 100644 index 00000000000..7a501ead82e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceProvider.java @@ -0,0 +1,30 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public interface InstanceProvider { + /** + * Returns the platform the instance provider + * + * @return a String, e.g. 'aws', 'gce' + */ + String getCloudProvider(); + + T getInstance(String account, String region, String id); + + S getConsoleOutput(String account, String region, String id); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceType.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceType.java new file mode 100644 index 00000000000..2b3e4843c3c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceType.java @@ -0,0 +1,22 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** A representation of an instance type */ +public interface InstanceType { + String getName(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceTypeProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceTypeProvider.java new file mode 100644 index 00000000000..13ebbf448b8 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/InstanceTypeProvider.java @@ -0,0 +1,23 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public interface InstanceTypeProvider { + Set getAll(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobProvider.java new file mode 100644 index 00000000000..f5b46c1849b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobProvider.java @@ -0,0 +1,29 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Map; + +public interface JobProvider { + String getPlatform(); + + T collectJob(String account, String location, String id); + + Map getFileContents(String account, String location, String id, String fileName); + + void cancelJob(String account, String location, String id); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobState.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobState.java new file mode 100644 index 00000000000..5fbc2c3d3a4 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobState.java @@ -0,0 +1,26 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** A JobState defines the set of possible states a job can be in. */ +public enum JobState { + Starting, + Running, + Failed, + Succeeded, + Unknown; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobStatus.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobStatus.java new file mode 100644 index 00000000000..9a42174ce28 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/JobStatus.java @@ -0,0 +1,41 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.io.Serializable; +import java.util.Map; + +/** A JobStatus reflects the state of a running or completed job. */ +public interface JobStatus { + String getName(); + + String getAccount(); + + String getId(); + + String getLocation(); + + String getProvider(); + + JobState getJobState(); + + Long getCreatedTime(); + + Long getCompletedTime(); + + Map getCompletionDetails(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/KeyPair.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/KeyPair.java new file mode 100644 index 00000000000..b5c06f9ef6b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/KeyPair.java @@ -0,0 +1,26 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** A representation of a key pair */ +public interface KeyPair { + /** The name of the key pair. */ + String getKeyName(); + + /** The fingerprint of the key pair. */ + String getKeyFingerprint(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/KeyPairProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/KeyPairProvider.java new file mode 100644 index 00000000000..05ff1f7c697 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/KeyPairProvider.java @@ -0,0 +1,23 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public interface KeyPairProvider { + Set getAll(); +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancer.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancer.java similarity index 81% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancer.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancer.java index 55ac51e5b9a..83380347c4c 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancer.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancer.java @@ -19,14 +19,14 @@ import com.netflix.spinnaker.clouddriver.documentation.Empty; import com.netflix.spinnaker.clouddriver.names.NamerRegistry; import com.netflix.spinnaker.moniker.Moniker; - +import java.util.HashMap; +import java.util.Map; import java.util.Set; /** - * A representation of a network load balancer, which is indirectly correlated to a {@link Cluster} through its relationship to {@link ServerGroup} objects. This interface provides a contract for + * A representation of a network load balancer, which is indirectly correlated to a {@link Cluster} + * through its relationship to {@link ServerGroup} objects. This interface provides a contract for * retrieving the name of the load balancer and the names of the server groups that it is servicing. - * - * */ public interface LoadBalancer { /** @@ -46,19 +46,22 @@ default Moniker getMoniker() { } /** - * The type of this load balancer. Can indicate some vendor-specific designation, or cloud provider + * The type of this load balancer. Can indicate some vendor-specific designation, or cloud + * provider + * * @deprecated use #getCloudProvider * @return type */ - String getType(); + default String getType() { + return getCloudProvider(); + } - /** - * Provider-specific identifier - */ + /** Provider-specific identifier */ String getCloudProvider(); /** * Account under which this load balancer exists. + * * @return */ String getAccount(); @@ -70,4 +73,8 @@ default Moniker getMoniker() { */ @Empty Set getServerGroups(); + + default Map getLabels() { + return new HashMap<>(); + } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerInstance.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerInstance.java similarity index 97% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerInstance.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerInstance.java index 67888fb02fe..7c8435194ca 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerInstance.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerInstance.java @@ -16,14 +16,13 @@ package com.netflix.spinnaker.clouddriver.model; +import java.util.Map; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; import org.apache.commons.lang3.StringUtils; -import java.util.Map; - @Data @Builder @AllArgsConstructor @@ -40,7 +39,7 @@ public LoadBalancerInstance(String id, String zone, Map health) this.health = health; } - public String getName() { + public String getName() { if (StringUtils.isEmpty(name)) { return id; } else { diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerProvider.java new file mode 100644 index 00000000000..298505ae341 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerProvider.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import com.netflix.spinnaker.clouddriver.documentation.Empty; +import java.util.List; +import java.util.Set; + +/** + * A loadBalancerProvider is an interface for the application to retrieve {@link LoadBalancer} + * objects. The interface provides a common contract for which one or many providers can be queried + * for their knowledge of load balancers at a given depth of specificity. + * + *

This is a temporary class for consolidating the load balancer controllers for each cloud + * provider. Each cloud provider-specific controller will implement this interface (it already does + * implicitly, this interface just makes it explicit). Then, this interface will be merged into the + * LoadBalancerProvider interface while each controller will merge with its <Cloud + * >LoadBalancerProvider. + */ +public interface LoadBalancerProvider { + String getCloudProvider(); + + List list(); + + Item get(String name); + + List byAccountAndRegionAndName(String account, String region, String name); + + /** + * Returns all load balancers related to an application based on one of the following criteria: - + * the load balancer name follows the Frigga naming conventions for load balancers (i.e., the load + * balancer name starts with the application name, followed by a hyphen) - the load balancer is + * used by a server group in the application + * + * @param application the name of the application + * @return a collection of load balancers with all attributes populated and a minimal amount of + * data for each server group: its name, region, and *only* the instances attached to the load + * balancers described above. The instances will have a minimal amount of data, as well: name, + * zone, and health related to any load balancers + */ + @Empty + Set getApplicationLoadBalancers(String application); + + // Some providers call this a "Summary", which I think is semantically different from what it is: + // a details view object, grouped by account, then region. + interface Item { + String getName(); + + List getByAccounts(); + } + + interface ByAccount { + String getName(); + + List getByRegions(); + } + + interface ByRegion { + String getName(); + + List getLoadBalancers(); + } + + interface Details {} +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerServerGroup.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerServerGroup.java similarity index 97% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerServerGroup.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerServerGroup.java index e1e2d6639dd..313df22669e 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/LoadBalancerServerGroup.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/LoadBalancerServerGroup.java @@ -16,13 +16,12 @@ package com.netflix.spinnaker.clouddriver.model; +import java.util.Set; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; -import java.util.Set; - @Data @Builder @NoArgsConstructor @@ -34,4 +33,5 @@ public class LoadBalancerServerGroup { Boolean isDisabled; Set detachedInstances; Set instances; + String cloudProvider; } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Network.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Network.java new file mode 100644 index 00000000000..a0d7d13c672 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Network.java @@ -0,0 +1,55 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** A representation of a network */ +public interface Network { + /** + * The cloud provider associated with this network + * + * @return + */ + String getCloudProvider(); + + /** + * The ID associated with this network + * + * @return + */ + String getId(); + + /** + * The name for this network + * + * @return + */ + String getName(); + + /** + * The account associated with this network + * + * @return + */ + String getAccount(); + + /** + * The region associated with this network + * + * @return + */ + String getRegion(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NetworkProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NetworkProvider.java new file mode 100644 index 00000000000..ed166d45bc9 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NetworkProvider.java @@ -0,0 +1,25 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public interface NetworkProvider { + String getCloudProvider(); + + Set getAll(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopApplicationProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopApplicationProvider.java new file mode 100644 index 00000000000..af4ddab9bb5 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopApplicationProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public class NoopApplicationProvider implements ApplicationProvider { + + @Override + public Set getApplications(boolean expand) { + return Set.of(); + } + + @Override + public Application getApplication(String name) { + return null; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopCloudMetricProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopCloudMetricProvider.java new file mode 100644 index 00000000000..f61e25ff1ed --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopCloudMetricProvider.java @@ -0,0 +1,52 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class NoopCloudMetricProvider implements CloudMetricProvider { + + @Override + public String getCloudProvider() { + return "noop"; + } + + @Override + public CloudMetricDescriptor getMetricDescriptor( + String account, String region, Map filters) { + return null; + } + + @Override + public List findMetricDescriptors( + String account, String region, Map filters) { + return Collections.emptyList(); + } + + @Override + public CloudMetricStatistics getStatistics( + String account, + String region, + String metricName, + Map filters, + Long startTime, + Long endTime) { + return null; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopClusterProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopClusterProvider.java new file mode 100644 index 00000000000..ae802cb56fd --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopClusterProvider.java @@ -0,0 +1,75 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Map; +import java.util.Set; + +public class NoopClusterProvider implements ClusterProvider { + + @Override + public Map> getClusters() { + return Map.of(); + } + + @Override + public Map> getClusterDetails(String application) { + return Map.of(); + } + + @Override + public Map> getClusterSummaries(String application) { + return Map.of(); + } + + @Override + public ServerGroup getServerGroup( + String account, String region, String name, boolean includeDetails) { + return null; + } + + @Override + public ServerGroup getServerGroup(String account, String region, String name) { + return null; + } + + @Override + public Set getClusters(String application, String account) { + return Set.of(); + } + + @Override + public Cluster getCluster( + String application, String account, String name, boolean includeDetails) { + return null; + } + + @Override + public Cluster getCluster(String application, String account, String name) { + return null; + } + + @Override + public String getCloudProviderId() { + return "noop"; + } + + @Override + public boolean supportsMinimalClusters() { + return false; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopElasticIpProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopElasticIpProvider.java new file mode 100644 index 00000000000..0eec5e66134 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopElasticIpProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public class NoopElasticIpProvider implements ElasticIpProvider { + + @Override + public Set getAllByAccount(String account) { + return Set.of(); + } + + @Override + public Set getAllByAccountAndRegion(String account, String region) { + return Set.of(); + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopImageProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopImageProvider.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopImageProvider.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopImageProvider.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopInstanceProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopInstanceProvider.java new file mode 100644 index 00000000000..5b8f4c61540 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopInstanceProvider.java @@ -0,0 +1,37 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public class NoopInstanceProvider implements InstanceProvider { + + final String cloudProvider = "none"; + + @Override + public String getCloudProvider() { + return null; + } + + @Override + public Instance getInstance(String account, String region, String id) { + return null; + } + + @Override + public String getConsoleOutput(String account, String region, String id) { + return null; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopInstanceTypeProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopInstanceTypeProvider.java new file mode 100644 index 00000000000..ddaf71c4ebb --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopInstanceTypeProvider.java @@ -0,0 +1,27 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public class NoopInstanceTypeProvider implements InstanceTypeProvider { + + @Override + public Set getAll() { + return Set.of(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopKeyPairProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopKeyPairProvider.java new file mode 100644 index 00000000000..c8b3a8e967a --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopKeyPairProvider.java @@ -0,0 +1,27 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public class NoopKeyPairProvider implements KeyPairProvider { + + @Override + public Set getAll() { + return Set.of(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopLoadBalancerProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopLoadBalancerProvider.java new file mode 100644 index 00000000000..acf50060e26 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopLoadBalancerProvider.java @@ -0,0 +1,51 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.List; +import java.util.Set; + +public class NoopLoadBalancerProvider implements LoadBalancerProvider { + + final String cloudProvider = "noop"; + + @Override + public String getCloudProvider() { + return null; + } + + @Override + public List list() { + return List.of(); + } + + @Override + public LoadBalancerProvider.Item get(String name) { + return null; + } + + @Override + public List byAccountAndRegionAndName( + String account, String region, String name) { + return List.of(); + } + + @Override + public Set getApplicationLoadBalancers(String application) { + return Set.of(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopNetworkProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopNetworkProvider.java new file mode 100644 index 00000000000..efe433818c0 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopNetworkProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public class NoopNetworkProvider implements NetworkProvider { + + @Override + public String getCloudProvider() { + return "noop"; + } + + @Override + public Set getAll() { + return Set.of(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopReservationReportProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopReservationReportProvider.java new file mode 100644 index 00000000000..11d5cb32181 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopReservationReportProvider.java @@ -0,0 +1,27 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Map; + +public class NoopReservationReportProvider implements ReservationReportProvider { + + @Override + public ReservationReport getReservationReport(String name, Map filters) { + return null; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopSecurityGroupProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopSecurityGroupProvider.java new file mode 100644 index 00000000000..68a41d0dadb --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopSecurityGroupProvider.java @@ -0,0 +1,66 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public class NoopSecurityGroupProvider implements SecurityGroupProvider { + + final String cloudProvider = "noop"; + + @Override + public String getCloudProvider() { + return null; + } + + @Override + public Set getAll(boolean includeRules) { + return Set.of(); + } + + @Override + public Set getAllByRegion(boolean includeRules, String region) { + return Set.of(); + } + + @Override + public Set getAllByAccount(boolean includeRules, String account) { + return Set.of(); + } + + @Override + public Set getAllByAccountAndName( + boolean includeRules, String account, String name) { + return Set.of(); + } + + @Override + public Set getAllByAccountAndRegion( + boolean includeRules, String account, String region) { + return Set.of(); + } + + @Override + public SecurityGroup get(String account, String region, String name, String vpcId) { + return null; + } + + @Override + public SecurityGroup getById(String account, String region, String id, String vpcId) { + return null; + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopServerGroupManagerProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopServerGroupManagerProvider.java similarity index 88% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopServerGroupManagerProvider.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopServerGroupManagerProvider.java index 98ce56c4f82..62c052b64d0 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/NoopServerGroupManagerProvider.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopServerGroupManagerProvider.java @@ -19,7 +19,8 @@ import java.util.Set; -public class NoopServerGroupManagerProvider implements ServerGroupManagerProvider { +public class NoopServerGroupManagerProvider + implements ServerGroupManagerProvider { @Override public Set getServerGroupManagersByApplication(String application) { return null; diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopSubnetProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopSubnetProvider.java new file mode 100644 index 00000000000..27f2dc13840 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/NoopSubnetProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public class NoopSubnetProvider implements SubnetProvider { + + @Override + public String getCloudProvider() { + return null; + } + + @Override + public Set getAll() { + return Set.of(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ReservationReport.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ReservationReport.java new file mode 100644 index 00000000000..a6452d80480 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ReservationReport.java @@ -0,0 +1,19 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public interface ReservationReport {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ReservationReportProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ReservationReportProvider.java new file mode 100644 index 00000000000..52b08c200e3 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ReservationReportProvider.java @@ -0,0 +1,23 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Map; + +public interface ReservationReportProvider { + T getReservationReport(String name, Map filters); +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SecurityGroup.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SecurityGroup.java similarity index 82% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SecurityGroup.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SecurityGroup.java index 772d65727be..0a76f55e16b 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SecurityGroup.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SecurityGroup.java @@ -16,29 +16,27 @@ package com.netflix.spinnaker.clouddriver.model; -import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; import com.netflix.spinnaker.clouddriver.names.NamerRegistry; import com.netflix.spinnaker.moniker.Moniker; - +import java.util.HashMap; +import java.util.Map; import java.util.Set; -/** - * A representation of a security group - */ -@JsonTypeInfo(use=JsonTypeInfo.Id.CLASS, include=JsonTypeInfo.As.PROPERTY, property="class") +/** A representation of a security group */ public interface SecurityGroup { /** * The type of this security group. May reference the cloud provider to which it is associated + * * @deprecated use #getCloudProvider * @return */ - String getType(); + default String getType() { + return getCloudProvider(); + } - /** - * Provider-specific identifier - */ + /** Provider-specific identifier */ String getCloudProvider(); /** @@ -67,12 +65,14 @@ default Moniker getMoniker() { /** * The application associated with this security group. * - * Deprecated in favor of getMoniker().getApp() + *

Deprecated in favor of getMoniker().getApp() * * @return */ @Deprecated - String getApplication(); + default String getApplication() { + return getMoniker().getApp(); + } /** * The account associated with this security group @@ -98,4 +98,8 @@ default Moniker getMoniker() { Set getOutboundRules(); SecurityGroupSummary getSummary(); + + default Map getLabels() { + return new HashMap<>(); + } } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SecurityGroupProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SecurityGroupProvider.java new file mode 100644 index 00000000000..a6d016e133b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SecurityGroupProvider.java @@ -0,0 +1,37 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Collection; + +public interface SecurityGroupProvider { + String getCloudProvider(); + + Collection getAll(boolean includeRules); + + Collection getAllByRegion(boolean includeRules, String region); + + Collection getAllByAccount(boolean includeRules, String account); + + Collection getAllByAccountAndName(boolean includeRules, String account, String name); + + Collection getAllByAccountAndRegion(boolean includeRule, String account, String region); + + T get(String account, String region, String name, String vpcId); + + T getById(String account, String region, String id, String vpcId); +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SecurityGroupSummary.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SecurityGroupSummary.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/SecurityGroupSummary.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SecurityGroupSummary.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroup.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroup.java new file mode 100644 index 00000000000..a7a8c5e82ba --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroup.java @@ -0,0 +1,258 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import com.netflix.spinnaker.clouddriver.documentation.Empty; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.*; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * A server group provides a relationship to many instances, and exists within a defined region and + * one or more zones. + */ +public interface ServerGroup { + /** + * The name of the server group + * + * @return name + */ + String getName(); + + /** + * This resource's moniker + * + * @return + */ + default Moniker getMoniker() { + return NamerRegistry.getDefaultNamer().deriveMoniker(this); + } + + /** + * Some arbitrary identifying type for this server group. May provide vendor-specific + * identification or data-center awareness to callers. + * + * @deprecated use #getCloudProvider + * @return type + */ + default String getType() { + return getCloudProvider(); + } + + /** Provider-specific identifier */ + String getCloudProvider(); + + /** + * The region in which the instances of this server group are known to exist. + * + * @return server group region + */ + String getRegion(); + + /** + * Some vendor-specific indicator that the server group is disabled + * + * @return true if the server group is disabled; false otherwise + */ + Boolean isDisabled(); + + /** + * Timestamp indicating when the server group was created + * + * @return the number of milliseconds after the beginning of time (1 January, 1970 UTC) when this + * server group was created + */ + Long getCreatedTime(); + + /** + * The zones within a region that the instances within this server group occupy. + * + * @return zones of a region for which this server group has presence or is capable of having + * presence, or an empty set if none exist + */ + @Empty + Set getZones(); + + /** + * The concrete instances that comprise this server group + * + * @return set of instances or an empty set if none exist + */ + @Empty + Set getInstances(); + + /** + * The names of the load balancers associated with this server group + * + * @return the set of load balancer names or an empty set if none exist + */ + @Empty + Set getLoadBalancers(); + + /** + * The names of the security groups associated with this server group + * + * @return the set of security group names or an empty set if none exist + */ + @Empty + Set getSecurityGroups(); + + /** + * A collection of attributes describing the launch configuration of this server group + * + * @return a map containing various attributes of the launch configuration + */ + @Empty + Map getLaunchConfig(); + + default String getInstanceType() { + return null; + } + + /** + * A collection of attributes describing the tags of this server group + * + * @return a map containing various tags + */ + @Empty + default Map getTags() { + return null; + } + + /** + * A data structure with the total number of instances, and the number of instances reporting each + * status + * + * @return a data structure + */ + InstanceCounts getInstanceCounts(); + + /** + * The capacity (in terms of number of instances) required for the server group + * + * @return + */ + Capacity getCapacity(); + + /** + * This represents all images deployed to the server group. For most providers, this will be a + * singleton. + */ + ImagesSummary getImagesSummary(); + + /** + * An ImageSummary is collection of data related to the build and VM image of the server group. + * This is merely a view of data from other parts of this object. + * + *

Deprecated in favor of getImagesSummary, which is a more generic getImageSummary. + */ + @Deprecated + ImageSummary getImageSummary(); + + default List getServerGroupManagers() { + return new ArrayList<>(); + } + + default Map getLabels() { + return new HashMap<>(); + } + + default Map getExtraAttributes() { + return Collections.EMPTY_MAP; + } + + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Data + class InstanceCounts { + /** Total number of instances in the server group */ + private Integer total = 0; + /** Total number of "Up" instances (all health indicators report "Up" or "Unknown") */ + private Integer up = 0; + /** Total number of "Down" instances (at least one health indicator reports "Down") */ + private Integer down = 0; + /** + * Total number of "Unknown" instances (all health indicators report "Unknown", or no health + * indicators reported) + */ + private Integer unknown = 0; + /** + * Total number of "OutOfService" instances (at least one health indicator reports + * "OutOfService", none are "Down" + */ + private Integer outOfService = 0; + /** + * Total number of "Starting" instances (where any health indicator reports "Starting" and none + * are "Down" or "OutOfService") + */ + private Integer starting = 0; + } + + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Data + class Capacity { + /** + * Minimum number of instances required in this server group. If provider specific {@code + * ServerGroup} does not have a notion of min then this should be same as {@code desired} + */ + private Integer min; + /** + * Max number of instances required in this server group. If provider specific {@code + * ServerGroup} does not have a notion of max then this should be same as {@code desired} + */ + private Integer max; + /** Desired number of instances required in this server group */ + private Integer desired; + + /** + * @return true if the capacity of this server group is fixed, i.e min, max and desired are all + * the same + */ + public boolean isPinned() { + return Objects.equals(max, desired) && Objects.equals(desired, min); + } + } + + /** + * Cloud provider-specific data related to the build and VM image of the server group. Deprecated + * in favor of Images summary + */ + interface ImageSummary extends Summary { + String getServerGroupName(); + + String getImageId(); + + String getImageName(); + + Map getImage(); + + @Empty + Map getBuildInfo(); + } + + /** Cloud provider-specific data related to the build and VM image of the server group. */ + interface ImagesSummary extends Summary { + List getSummaries(); + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupManager.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupManager.java similarity index 85% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupManager.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupManager.java index 7f5f7c684ff..40d000f8037 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupManager.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupManager.java @@ -18,20 +18,29 @@ package com.netflix.spinnaker.clouddriver.model; import com.netflix.spinnaker.moniker.Moniker; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; -import java.util.Set; - public interface ServerGroupManager { String getName(); + String getAccount(); + Moniker getMoniker(); - Set getServerGroups(); + + Set getServerGroups(); + String getRegion(); + default Map getLabels() { + return new HashMap<>(); + } + @Data @Builder @NoArgsConstructor diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupManagerProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupManagerProvider.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupManagerProvider.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupManagerProvider.java diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupProvider.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupProvider.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupProvider.java diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupSummary.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupSummary.java similarity index 99% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupSummary.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupSummary.java index 04accd4db4b..217ed32c191 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/ServerGroupSummary.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServerGroupSummary.java @@ -21,7 +21,10 @@ public interface ServerGroupSummary { String getName(); + String getRegion(); + String getAccount(); + Moniker getMoniker(); } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Service.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Service.java new file mode 100644 index 00000000000..4291ee635e2 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Service.java @@ -0,0 +1,25 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Collection; + +public interface Service { + String getName(); + + Collection getServicePlans(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServiceInstance.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServiceInstance.java new file mode 100644 index 00000000000..878fe0a34fd --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServiceInstance.java @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public interface ServiceInstance { + String getServiceInstanceName(); + + String getStatus(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServicePlan.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServicePlan.java new file mode 100644 index 00000000000..dbda74b18de --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServicePlan.java @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +public interface ServicePlan { + String getName(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServiceProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServiceProvider.java new file mode 100644 index 00000000000..0522114cf59 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/ServiceProvider.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Collection; + +public interface ServiceProvider { + String getCloudProvider(); + + Collection getServices(String account, String region); + + ServiceInstance getServiceInstance(String account, String region, String serviceInstanceName); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Subnet.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Subnet.java new file mode 100644 index 00000000000..b0e722f2e50 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Subnet.java @@ -0,0 +1,41 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** A representation of a subnet */ +public interface Subnet { + /** + * The cloud provider associated with this subnet + * + * @return + */ + String getType(); + + /** + * The ID associated with this subnet + * + * @return + */ + String getId(); + + /** + * The purpose for this subnet. Examples: internal, external, secure, performance, etc + * + * @return + */ + String getPurpose(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SubnetProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SubnetProvider.java new file mode 100644 index 00000000000..1f0efcfc930 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/SubnetProvider.java @@ -0,0 +1,25 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +import java.util.Set; + +public interface SubnetProvider { + String getCloudProvider(); + + Set getAll(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Summary.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Summary.java new file mode 100644 index 00000000000..ae9380b94e1 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/Summary.java @@ -0,0 +1,24 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model; + +/** + * Summary objects are views into particular pieces of a server group. Instead of requesting an + * entire server group (including all instances and load balancers and whatnot), clients can request + * a Summary that contains just the information needed. + */ +public interface Summary {} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/Rule.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/securitygroups/Rule.java similarity index 90% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/Rule.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/securitygroups/Rule.java index b66c128eefd..ee3a90c3239 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/Rule.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/securitygroups/Rule.java @@ -16,19 +16,16 @@ package com.netflix.spinnaker.clouddriver.model.securitygroups; -import com.fasterxml.jackson.annotation.JsonTypeInfo; +import java.util.SortedSet; import lombok.Data; import org.apache.commons.lang3.ObjectUtils; -import java.util.SortedSet; - /** * An abstract interface representing a security rule. * * @see IpRangeRule * @see SecurityGroupRule */ -@JsonTypeInfo(use=JsonTypeInfo.Id.CLASS, include=JsonTypeInfo.As.PROPERTY, property="class") public interface Rule { /** * The port ranges associated with this rule @@ -49,7 +46,7 @@ public int compareTo(PortRange o) { if (o == null) { return 1; } - + int res = ObjectUtils.compare(this.startPort, o.startPort); return res == 0 ? ObjectUtils.compare(this.endPort, o.endPort) : res; } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/SecurityGroupRule.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/securitygroups/SecurityGroupRule.java similarity index 87% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/SecurityGroupRule.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/securitygroups/SecurityGroupRule.java index f503b3f2762..f81421df3ff 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/securitygroups/SecurityGroupRule.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/securitygroups/SecurityGroupRule.java @@ -17,30 +17,23 @@ package com.netflix.spinnaker.clouddriver.model.securitygroups; import com.netflix.spinnaker.clouddriver.model.SecurityGroup; +import java.util.SortedSet; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; -import java.util.SortedSet; - @Data @Builder @AllArgsConstructor @NoArgsConstructor public class SecurityGroupRule implements Rule { - /** - * The security group associated with this rule - */ + /** The security group associated with this rule */ SecurityGroup securityGroup; - /** - * The IP protocol associated with this rule - */ + /** The IP protocol associated with this rule */ String protocol; - /** - * {@inheritDoc} - */ + /** {@inheritDoc} */ SortedSet portRanges; } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ClusterViewModelPostProcessor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ClusterViewModelPostProcessor.java new file mode 100644 index 00000000000..b58bcdafc9e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ClusterViewModelPostProcessor.java @@ -0,0 +1,24 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model.view; + +import com.netflix.spinnaker.clouddriver.model.Cluster; + +/** + * (Optionally) used in clouddriver-web by the ClusterController to mutate server group API data. + */ +public interface ClusterViewModelPostProcessor extends ModelObjectViewModelPostProcessor {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ModelObjectViewModelPostProcessor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ModelObjectViewModelPostProcessor.java new file mode 100644 index 00000000000..5d98bd2b01c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ModelObjectViewModelPostProcessor.java @@ -0,0 +1,55 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.model.view; + +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +public interface ModelObjectViewModelPostProcessor { + static R applyExtensionsToObject( + Optional>> extensions, R object) { + return extensions + .map(exts -> exts.stream().filter(ext -> ext.supports(object)).collect(Collectors.toList())) + .filter(exts -> !exts.isEmpty()) + .map( + exts -> { + for (ModelObjectViewModelPostProcessor extension : exts) { + extension.process(object); + } + return object; + }) + .orElse(object); + } + + static Collection applyExtensions( + Optional>> extensions, Collection objects) { + return extensions + .map( + ext -> + (Collection) + objects.stream() + .map(o -> applyExtensionsToObject(extensions, o)) + .collect(Collectors.toList())) + .orElse(objects); + } + + boolean supports(T instance); + + void process(T model); +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/view/ServerGroupViewModelPostProcessor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ServerGroupViewModelPostProcessor.java similarity index 80% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/view/ServerGroupViewModelPostProcessor.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ServerGroupViewModelPostProcessor.java index 2e650256c9c..a4086eab902 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/model/view/ServerGroupViewModelPostProcessor.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/model/view/ServerGroupViewModelPostProcessor.java @@ -18,11 +18,8 @@ import com.netflix.spinnaker.clouddriver.model.ServerGroup; /** - * (Optionally) used in clouddriver-web by the ServerGroupController to mutate server group API data. + * (Optionally) used in clouddriver-web by the ServerGroupController to mutate server group API + * data. */ -public interface ServerGroupViewModelPostProcessor { - - boolean supports(ServerGroup serverGroup); - - void process(T serverGroup); -} +public interface ServerGroupViewModelPostProcessor + extends ModelObjectViewModelPostProcessor {} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/names/NamerRegistry.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/names/NamerRegistry.java new file mode 100644 index 00000000000..1192c98401e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/names/NamerRegistry.java @@ -0,0 +1,111 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.names; + +import com.netflix.spinnaker.moniker.Namer; +import com.netflix.spinnaker.moniker.frigga.FriggaReflectiveNamer; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import lombok.extern.slf4j.Slf4j; + +/** + * The idea is each provider can register (per-account) based on config naming strategy. This + * assigns a `moniker` to any named resource which is then pushed through the rest of Spinnaker and + * can be handled without prior knowledge of what naming strategy was used. This is the only place + * the mapping from (provider, account, resource) -< namer must happen within Spinnaker. + */ +public class NamerRegistry { + + private static Namer DEFAULT_NAMER = new FriggaReflectiveNamer(); + + private static ProviderLookup providerLookup = new ProviderLookup(); + + private final List namingStrategies; + + public static Namer getDefaultNamer() { + return DEFAULT_NAMER; + } + + public static ProviderLookup lookup() { + return providerLookup; + } + + public NamerRegistry(List namingStrategies) { + this.namingStrategies = namingStrategies; + } + + public Namer getNamingStrategy(String strategyName) { + return this.namingStrategies.stream() + .filter(strategy -> strategy.getName().equalsIgnoreCase(strategyName)) + .findFirst() + .orElseThrow( + () -> + new IllegalArgumentException( + "Could not find naming strategy '" + strategyName + "'")); + } + + @Slf4j + public static class ResourceLookup { + private ConcurrentHashMap, Namer> map = new ConcurrentHashMap<>(); + + public Namer withResource(Class resource) { + if (!map.containsKey(resource)) { + log.debug("Looking up a namer for a non-registered resource"); + return (Namer) getDefaultNamer(); + } else { + return (Namer) map.get(resource); + } + } + + public void setNamer(Class resource, Namer namer) { + map.put(resource, namer); + } + } + + @Slf4j + public static class AccountLookup { + private ConcurrentHashMap map = new ConcurrentHashMap<>(); + + public ResourceLookup withAccount(String accountName) { + if (!map.containsKey(accountName)) { + log.debug("Looking up a namer for a non-registered account"); + ResourceLookup result = new ResourceLookup(); + map.put(accountName, result); + return result; + } else { + return map.get(accountName); + } + } + } + + @Slf4j + public static class ProviderLookup { + private ConcurrentHashMap map = new ConcurrentHashMap<>(); + + public AccountLookup withProvider(String providerName) { + if (!map.containsKey(providerName)) { + log.debug("Looking up a namer for a non-registered provider"); + AccountLookup result = new AccountLookup(); + map.put(providerName, result); + return result; + } else { + return map.get(providerName); + } + } + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/names/NamingStrategy.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/names/NamingStrategy.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/names/NamingStrategy.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/names/NamingStrategy.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverterNotFoundException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverterNotFoundException.java new file mode 100644 index 00000000000..85643ef6a7c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationConverterNotFoundException.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration; + +import org.springframework.http.HttpStatus; +import org.springframework.web.bind.annotation.ResponseStatus; + +@ResponseStatus(value = HttpStatus.BAD_REQUEST) +public class AtomicOperationConverterNotFoundException extends RuntimeException { + public AtomicOperationConverterNotFoundException() {} + + public AtomicOperationConverterNotFoundException(String message) { + super(message); + } + + public AtomicOperationConverterNotFoundException(String message, Throwable cause) {} + + public AtomicOperationConverterNotFoundException(Throwable cause) {} + + protected AtomicOperationConverterNotFoundException( + String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {} +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationDescriptionPreProcessor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationDescriptionPreProcessor.java similarity index 84% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationDescriptionPreProcessor.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationDescriptionPreProcessor.java index e991962bf56..8333b09d166 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationDescriptionPreProcessor.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationDescriptionPreProcessor.java @@ -19,18 +19,20 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TreeTraversingParser; - import java.io.IOException; import java.util.Map; /** - * Provides an extension point for manipulating an {@code AtomicOperation} context prior to execution. + * Provides an extension point for manipulating an {@code AtomicOperation} context prior to + * execution. */ public interface AtomicOperationDescriptionPreProcessor { boolean supports(Class descriptionClass); - Map process(Map description); - default T mapTo(ObjectMapper objectMapper, Map description, Class clazz) throws IOException { + Map process(Map description); + + default T mapTo(ObjectMapper objectMapper, Map description, Class clazz) + throws IOException { ObjectNode objectNode = objectMapper.valueToTree(description); return objectMapper.readValue(new TreeTraversingParser(objectNode, objectMapper), clazz); } diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationException.java new file mode 100644 index 00000000000..87bbda475c4 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationException.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration; + +import com.netflix.spinnaker.kork.exceptions.SpinnakerException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.springframework.http.HttpStatus; +import org.springframework.web.bind.annotation.ResponseStatus; + +@ResponseStatus(HttpStatus.BAD_REQUEST) +public class AtomicOperationException extends SpinnakerException { + public AtomicOperationException(String message, List errors) { + super(message); + this.errors = errors; + } + + @Override + public Map getAdditionalAttributes() { + if (errors == null || errors.isEmpty()) { + return Collections.emptyMap(); + } + Map map = new HashMap<>(); + map.put("errors", errors); + return map; + } + + public List getErrors() { + return errors; + } + + public void setErrors(List errors) { + this.errors = errors; + } + + private List errors; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationNotFoundException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationNotFoundException.java new file mode 100644 index 00000000000..94dd2d26c07 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperationNotFoundException.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration; + +import org.springframework.http.HttpStatus; +import org.springframework.web.bind.annotation.ResponseStatus; + +@ResponseStatus( + value = HttpStatus.BAD_REQUEST, + reason = "Could not find a suitable converter for supplied type.") +public class AtomicOperationNotFoundException extends RuntimeException { + public AtomicOperationNotFoundException() {} + + public AtomicOperationNotFoundException(String message) {} + + public AtomicOperationNotFoundException(String message, Throwable cause) {} + + public AtomicOperationNotFoundException(Throwable cause) {} + + protected AtomicOperationNotFoundException( + String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {} +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperations.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperations.java new file mode 100644 index 00000000000..e3ea9b5fc08 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/AtomicOperations.java @@ -0,0 +1,130 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.orchestration; + +/** A class that holds the names of ALL the kato operations as constants */ +public final class AtomicOperations { + + // Server Group operations + public static final String CLONE_SERVER_GROUP = "cloneServerGroup"; + public static final String CREATE_SERVER_GROUP = "createServerGroup"; + public static final String DISABLE_SERVER_GROUP = "disableServerGroup"; + public static final String ENABLE_SERVER_GROUP = "enableServerGroup"; + public static final String DESTROY_SERVER_GROUP = "destroyServerGroup"; + public static final String RESIZE_SERVER_GROUP = "resizeServerGroup"; + public static final String UPSERT_SERVER_GROUP_TAGS = "upsertServerGroupTags"; + public static final String UPDATE_LAUNCH_CONFIG = "updateLaunchConfig"; + public static final String UPDATE_LAUNCH_TEMPLATE = "updateLaunchTemplate"; + public static final String UPSERT_SCALING_POLICY = "upsertScalingPolicy"; + public static final String DELETE_SCALING_POLICY = "deleteScalingPolicy"; + public static final String MIGRATE_SERVER_GROUP = "migrateServerGroup"; + public static final String MIGRATE_CLUSTER_CONFIGURATIONS = "migrateClusterConfigurations"; + public static final String START_SERVER_GROUP = "startServerGroup"; + public static final String STOP_SERVER_GROUP = "stopServerGroup"; + public static final String SET_STATEFUL_DISK = "setStatefulDisk"; + public static final String STATEFULLY_UPDATE_BOOT_IMAGE = "statefullyUpdateBootImage"; + public static final String UPSERT_DISRUPTION_BUDGET = "upsertDisruptionBudget"; + public static final String UPDATE_JOB_PROCESSES = "updateJobProcesses"; + + // Instance operations + public static final String REBOOT_INSTANCES = "rebootInstances"; + public static final String TERMINATE_INSTANCES = "terminateInstances"; + public static final String TERMINATE_INSTANCE_AND_DECREMENT = + "terminateInstanceAndDecrementServerGroup"; + public static final String ATTACH_CLASSIC_LINK_VPC = "attachClassicLinkVpc"; + public static final String REGISTER_INSTANCES_WITH_LOAD_BALANCER = + "registerInstancesWithLoadBalancer"; + public static final String DEREGISTER_INSTANCES_FROM_LOAD_BALANCER = + "deregisterInstancesFromLoadBalancer"; + public static final String MAP_LOAD_BALANCERS = "mapLoadBalancers"; + public static final String UNMAP_LOAD_BALANCERS = "unmapLoadBalancers"; + public static final String ENABLE_INSTANCES_IN_DISCOVERY = "enableInstancesInDiscovery"; + public static final String DISABLE_INSTANCES_IN_DISCOVERY = "disableInstancesInDiscovery"; + public static final String UPDATE_INSTANCES = "updateInstances"; + public static final String DETACH_INSTANCES = "detachInstances"; + + // Load Balancer operations + public static final String DELETE_LOAD_BALANCER = "deleteLoadBalancer"; + public static final String UPSERT_LOAD_BALANCER = "upsertLoadBalancer"; + public static final String MIGRATE_LOAD_BALANCER = "migrateLoadBalancer"; + + // Security Group operations + public static final String DELETE_SECURITY_GROUP = "deleteSecurityGroup"; + public static final String UPSERT_SECURITY_GROUP = "upsertSecurityGroup"; + public static final String MIGRATE_SECURITY_GROUP = "migrateSecurityGroup"; + + // JobStatus operations + public static final String RUN_JOB = "runJob"; + public static final String DESTROY_JOB = "destroyJob"; + public static final String CLONE_JOB = "cloneJob"; + + // Image operations + public static final String UPSERT_IMAGE_TAGS = "upsertImageTags"; + + // Snapshot operations + public static final String SAVE_SNAPSHOT = "saveSnapshot"; + public static final String RESTORE_SNAPSHOT = "restoreSnapshot"; + public static final String DELETE_SNAPSHOT = "deleteSnapshot"; + + // Manifest operations + public static final String DEPLOY_MANIFEST = "deployManifest"; + + public static final String DEPLOY_CLOUDRUN_MANIFEST = "deployCloudrunManifest"; + public static final String DELETE_MANIFEST = "deleteManifest"; + public static final String SCALE_MANIFEST = "scaleManifest"; + public static final String PATCH_MANIFEST = "patchManifest"; + public static final String PAUSE_ROLLOUT_MANIFEST = "pauseRolloutManifest"; + public static final String RESUME_ROLLOUT_MANIFEST = "resumeRolloutManifest"; + public static final String UNDO_ROLLOUT_MANIFEST = "undoRolloutManifest"; + public static final String ROLLING_RESTART_MANIFEST = "rollingRestartManifest"; + public static final String DISABLE_MANIFEST = "disableManifest"; + public static final String ENABLE_MANIFEST = "enableManifest"; + + // Artifact operations + public static final String CLEANUP_ARTIFACTS = "cleanupArtifacts"; + + // Image operations + public static final String DEREGISTER_IMAGE = "deleteImage"; + + // Service operations + public static final String CREATE_SERVICE_KEY = "createServiceKey"; + public static final String DELETE_SERVICE_KEY = "deleteServiceKey"; + public static final String DEPLOY_SERVICE = "deployService"; + public static final String DESTROY_SERVICE = "destroyService"; + public static final String SHARE_SERVICE = "shareService"; + public static final String UNSHARE_SERVICE = "unshareService"; + public static final String CREATE_SERVICE_BINDINGS = "createServiceBindings"; + public static final String DELETE_SERVICE_BINDINGS = "deleteServiceBindings"; + + // CloudFormation operations + public static final String DEPLOY_CLOUDFORMATION_STACK = "deployCloudFormation"; + public static final String DELETE_CLOUDFORMATION_STACK = "deleteCloudFormation"; + public static final String DELETE_CLOUDFORMATION_CHANGESET = "deleteCloudFormationChangeSet"; + public static final String EXECUTE_CLOUDFORMATION_CHANGESET = "executeCloudFormationChangeSet"; + + // Launch Config operations + public static final String DELETE_LAUNCH_CONFIGURATION = "deleteLaunchConfiguration"; + + // Launch template operations + public static final String DELETE_LAUNCH_TEMPLATE = "deleteLaunchTemplate"; + + // AppEngine Config operations + public static final String DEPLOY_APPENGINE_CONFIG = "deployAppengineConfiguration"; + + // Cloudrun Config operations + public static final String DEPLOY_CLOUDRUN_CONFIG = "deployCloudrunConfiguration"; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/CompositeDescriptionValidator.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/CompositeDescriptionValidator.java new file mode 100644 index 00000000000..96d227988ef --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/CompositeDescriptionValidator.java @@ -0,0 +1,69 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.orchestration; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.GlobalDescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import java.util.List; +import java.util.Optional; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class CompositeDescriptionValidator extends DescriptionValidator { + + private final String operationName; + private final String cloudProvider; + @Getter private final DescriptionValidator validator; + private final List globalValidators; + + public CompositeDescriptionValidator( + String operationName, + String cloudProvider, + DescriptionValidator validator, + List extensibleValidators) { + this.operationName = operationName; + this.cloudProvider = cloudProvider; + this.validator = validator; + this.globalValidators = extensibleValidators; + } + + @Override + public void validate(List priorDescriptions, T description, ValidationErrors errors) { + if (globalValidators != null) { + globalValidators.forEach( + v -> { + if (v.handles(description)) { + v.validate(operationName, priorDescriptions, description, errors); + } + }); + } + if (validator == null) { + String operationName = + Optional.ofNullable(description) + .map(it -> it.getClass().getSimpleName()) + .orElse("UNKNOWN"); + log.warn( + String.format( + "No validator found for operation %s and cloud provider %s", + operationName, cloudProvider)); + } else { + validator.validate(priorDescriptions, description, errors); + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/ExceptionClassifier.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/ExceptionClassifier.java new file mode 100644 index 00000000000..f5fb6ca7cb8 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/ExceptionClassifier.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration; + +import com.google.common.base.Splitter; +import com.google.common.collect.Lists; +import com.netflix.spinnaker.clouddriver.config.ExceptionClassifierConfigurationProperties; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.exceptions.SpinnakerException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.annotation.Nonnull; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +/** + * Utility class to allow classifying non-SpinnakerException classes according to different + * pre-determined characteristics. + */ +@Component +@Slf4j +public class ExceptionClassifier { + + private final ExceptionClassifierConfigurationProperties properties; + private final DynamicConfigService dynamicConfigService; + + public ExceptionClassifier( + ExceptionClassifierConfigurationProperties properties, + DynamicConfigService dynamicConfigService) { + this.properties = properties; + this.dynamicConfigService = dynamicConfigService; + } + + /** Returns whether or not a given Exception is retryable or not. */ + public boolean isRetryable(@Nonnull Exception e) { + if (e instanceof SpinnakerException) { + return Optional.ofNullable(((SpinnakerException) e).getRetryable()).orElse(false); + } + + boolean retryable = false; + try { + String dynamicRetraybleClasses = + dynamicConfigService.getConfig( + String.class, + "clouddriver.exception-classifier.retryable-exceptions", + String.join(",", properties.getRetryableClasses())); + + if (dynamicRetraybleClasses != null) { + List dynamicRetraybleClassesList = + Lists.newArrayList(Splitter.on(",").split(dynamicRetraybleClasses)); + + List retryableClasses = + Stream.of(dynamicRetraybleClassesList, properties.getRetryableClasses()) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + + retryable = retryableClasses.contains(e.getClass().getName()); + } else { + retryable = properties.getRetryableClasses().contains(e.getClass().getName()); + } + } catch (Exception caughtException) { + log.error("Unexpected exception while processing retryable classes", caughtException); + } + + log.trace("Evaluated retryable status for {} to '{}'", e.getClass().getName(), retryable); + + return retryable; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/OperationsService.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/OperationsService.java new file mode 100644 index 00000000000..02206d00d8c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/OperationsService.java @@ -0,0 +1,370 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Splitter; +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.data.task.SagaId; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionAuthorizer; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationErrors; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationException; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SnapshotAtomicOperationInput.SnapshotAtomicOperationInputCommand; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.AllowedAccountsValidator; +import com.netflix.spinnaker.kork.exceptions.SystemException; +import com.netflix.spinnaker.kork.web.exceptions.ExceptionMessageDecorator; +import com.netflix.spinnaker.orchestration.OperationDescription; +import com.netflix.spinnaker.security.AuthenticatedRequest; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.Value; +import lombok.extern.slf4j.Slf4j; +import org.springframework.core.ResolvableType; +import org.springframework.validation.Errors; +import org.springframework.validation.ObjectError; + +@Slf4j +public class OperationsService { + + private final Splitter COMMA_SPLITTER = Splitter.on(","); + + private final AtomicOperationsRegistry atomicOperationsRegistry; + private final List descriptionAuthorizers; + private final Collection allowedAccountValidators; + private final List + atomicOperationDescriptionPreProcessors; + private final AccountCredentialsRepository accountCredentialsRepository; + private final Optional sagaRepository; + private final Registry registry; + private final ObjectMapper objectMapper; + private final ExceptionMessageDecorator exceptionMessageDecorator; + + private final Id validationErrorsCounterId; + + public OperationsService( + AtomicOperationsRegistry atomicOperationsRegistry, + List descriptionAuthorizers, + Optional> allowedAccountValidators, + Optional> + atomicOperationDescriptionPreProcessors, + AccountCredentialsRepository accountCredentialsRepository, + Optional sagaRepository, + Registry registry, + ObjectMapper objectMapper, + ExceptionMessageDecorator exceptionMessageDecorator) { + this.atomicOperationsRegistry = atomicOperationsRegistry; + this.descriptionAuthorizers = descriptionAuthorizers; + this.allowedAccountValidators = allowedAccountValidators.orElse(Collections.emptyList()); + this.atomicOperationDescriptionPreProcessors = + atomicOperationDescriptionPreProcessors.orElse(Collections.emptyList()); + this.accountCredentialsRepository = accountCredentialsRepository; + this.sagaRepository = sagaRepository; + this.registry = registry; + this.objectMapper = objectMapper; + this.exceptionMessageDecorator = exceptionMessageDecorator; + + validationErrorsCounterId = registry.createId("validationErrors"); + } + + @Nonnull + public List collectAtomicOperations( + @Nonnull List>> inputs) { + return collectAtomicOperations(null, inputs); + } + + @Nonnull + public List collectAtomicOperations( + @Nullable String cloudProvider, @Nonnull List>> inputs) { + List results = convert(cloudProvider, inputs); + + List atomicOperations = new ArrayList<>(); + results.forEach( + bindingResult -> { + if (bindingResult.errors.hasErrors()) { + Collection errors = collectErrors(bindingResult.errors); + throw new DescriptionValidationException(errors); + } + atomicOperations.add(bindingResult.atomicOperation); + }); + return atomicOperations; + } + + private List convert( + @Nullable String cloudProvider, @Nonnull List>> inputs) { + + String username = AuthenticatedRequest.getSpinnakerUser().orElse("unknown"); + List allowedAccounts = + COMMA_SPLITTER.splitToList(AuthenticatedRequest.getSpinnakerAccounts().orElse("")); + + List descriptions = new ArrayList<>(); + return inputs.stream() + .flatMap( + input -> + input.entrySet().stream() + .map( + e -> { + final String descriptionName = e.getKey(); + final Map descriptionInput = e.getValue(); + final OperationInput operationInput = + objectMapper.convertValue(descriptionInput, OperationInput.class); + final String provider = + Optional.ofNullable(cloudProvider) + .orElse(operationInput.cloudProvider); + + AtomicOperationConverter converter = + atomicOperationsRegistry.getAtomicOperationConverter( + descriptionName, provider); + + // TODO(rz): What if a preprocessor fails due to a downstream error? How + // does this affect retrying? + Map processedInput = + processDescriptionInput( + atomicOperationDescriptionPreProcessors, + converter, + descriptionInput); + + OperationDescription description = + converter.convertDescription(processedInput); + + descriptions.add(description); + + DescriptionValidationErrors errors = + new DescriptionValidationErrors(description); + + DescriptionValidator validator = + atomicOperationsRegistry.getAtomicOperationDescriptionValidator( + DescriptionValidator.getValidatorName(descriptionName), provider); + + if (validator == null) { + String operationName = + Optional.ofNullable(description) + .map(it -> it.getClass().getSimpleName()) + .orElse("UNKNOWN"); + log.warn( + "No validator found for operation {} and cloud provider {}", + operationName, + provider); + } else { + // TODO(rz): Assert description is T + validator.validate(descriptions, description, errors); + } + + allowedAccountValidators.forEach( + it -> it.validate(username, allowedAccounts, description, errors)); + + if (description != null) { + DescriptionAuthorizer descriptionAuthorizer = + descriptionAuthorizers.stream() + .filter(it -> it.supports(description)) + .findFirst() + .orElseThrow( + () -> + new SystemException( + "Unable to find supporting description authorizer for {}", + description.getClass().getSimpleName())); + + descriptionAuthorizer.authorize(description, errors); + } + + // TODO(rz): This is so bad. We convert the description input twice (once + // above) and then once inside of this convertOperation procedure. This + // means that we do a bunch of serde work twice without needing to. + AtomicOperation atomicOperation = + converter.convertOperation(processedInput); + if (atomicOperation == null) { + throw new AtomicOperationNotFoundException(descriptionName); + } + + if (atomicOperation instanceof SagaContextAware) { + ((SagaContextAware) atomicOperation) + .setSagaContext( + new SagaContextAware.SagaContext( + cloudProvider, descriptionName, descriptionInput)); + } + + if (errors.hasErrors()) { + registry + .counter( + validationErrorsCounterId.withTag( + "operation", atomicOperation.getClass().getSimpleName())) + .increment(); + } + + return new AtomicOperationBindingResult(atomicOperation, errors); + })) + .collect(Collectors.toList()); + } + + public List collectAtomicOperationsFromSagas(Set sagaIds) { + if (sagaRepository.isEmpty()) { + return Collections.emptyList(); + } + + // Resuming a saga-backed AtomicOperation is kind of a pain. This is because AtomicOperations + // and their descriptions are totally decoupled from their input & description name, so we + // have to store additional state in the Saga and then use that to reconstruct + // AtomicOperations. It'd make sense to refactor all of this someday. + List seenDescriptions = new ArrayList<>(); + return sagaIds.stream() + .map(id -> sagaRepository.get().get(id.getName(), id.getId())) + .filter(Objects::nonNull) + .filter(it -> !it.isComplete()) + .map( + saga -> + new SagaAndSnapshot(saga, saga.getEvent(SnapshotAtomicOperationInputCommand.class))) + .filter( + it -> { + // Reduce the list of sagas attached to the task to one for each uniquely submitted + // description. This is probably unnecessary long-term. + if (seenDescriptions.contains(it.getSnapshot().getDescription())) { + return false; + } + seenDescriptions.add(it.getSnapshot().getDescription()); + return true; + }) + .flatMap( + saga -> { + List bindingResult = + convert( + saga.getSnapshot().getCloudProvider(), + Collections.singletonList( + Collections.singletonMap( + saga.getSnapshot().getDescriptionName(), + saga.getSnapshot().getDescriptionInput()))); + + // We need to ensure the encapsulated saga instance gets the same ID. + return bindingResult.stream() + .map(this::atomicOperationOrError) + .peek( + it -> { + if (it instanceof AbstractSagaAtomicOperation) { + // The saga context is always going to be set by this point, but y'know... + // safety. This should be done when the context is created, but I don't + // want to go down the path of refactoring the mess in `convert`, however + // it should be, so that the class is actually unit testable. + AbstractSagaAtomicOperation op = + (AbstractSagaAtomicOperation) it; + Optional.ofNullable(op.getSagaContext()) + .ifPresent(context -> context.setSagaId(saga.getSaga().getId())); + } + }); + }) + .collect(Collectors.toList()); + } + + private AtomicOperation atomicOperationOrError(AtomicOperationBindingResult bindingResult) { + if (bindingResult.errors.hasErrors()) { + Collection errors = collectErrors(bindingResult.errors); + throw new DescriptionValidationException(errors); + } + return bindingResult.atomicOperation; + } + + /** + * Process the validation {@link Errors} and transform errors to a collection of strings so they + * can be added to the exception. + */ + private Collection collectErrors(Errors errors) { + Collection errorCollection = new ArrayList<>(); + for (ObjectError objectError : errors.getAllErrors()) { + if (objectError.getDefaultMessage() != null && objectError.getCode() != null) { + errorCollection.add( + exceptionMessageDecorator.decorate( + objectError.getCode(), objectError.getDefaultMessage())); + } else if (objectError.getCode() != null) { + // Treat the error code as the default message - better than nothing I guess. + errorCollection.add( + exceptionMessageDecorator.decorate(objectError.getCode(), objectError.getCode())); + } + } + return errorCollection; + } + + /** + * Runs the provided descriptionInput through preprocessors. + * + *

Which preprocessors are used is determined by doing some reflection on the + * AtomicOperationConverter's return type. + */ + private static Map processDescriptionInput( + Collection descriptionPreProcessors, + AtomicOperationConverter converter, + Map descriptionInput) { + + Method convertDescriptionMethod; + try { + convertDescriptionMethod = converter.getClass().getMethod("convertDescription", Map.class); + } catch (NoSuchMethodException e) { + throw new SystemException("Could not find convertDescription method on converter", e); + } + + Class convertDescriptionReturnType = + ResolvableType.forMethodReturnType(convertDescriptionMethod).getRawClass(); + + for (AtomicOperationDescriptionPreProcessor preProcessor : descriptionPreProcessors) { + if (preProcessor.supports(convertDescriptionReturnType)) { + descriptionInput = preProcessor.process(descriptionInput); + } + } + + return descriptionInput; + } + + @Value + public static class AtomicOperationBindingResult { + private AtomicOperation atomicOperation; + private Errors errors; + } + + @Data + private static class OperationInput { + @Nullable private String credentials; + @Nullable private String accountName; + @Nullable private String account; + @Nullable private String cloudProvider; + + @Nullable + public String computeAccountName() { + return Optional.ofNullable(credentials) + .orElse(Optional.ofNullable(accountName).orElse(account)); + } + } + + @Data + @AllArgsConstructor + private static class SagaAndSnapshot { + Saga saga; + SnapshotAtomicOperationInputCommand snapshot; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/OrchestrationProcessor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/OrchestrationProcessor.java new file mode 100644 index 00000000000..c9191f80bdc --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/OrchestrationProcessor.java @@ -0,0 +1,41 @@ +/** + * Copyright 2019 Netflix, Inc. + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.util.List; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * Implementations of this interface should perform orchestration of operations in a workflow. Often + * will be used in conjunction with {@link AtomicOperation} instances. + */ +public interface OrchestrationProcessor { + /** + * This is the invocation point of orchestration. + * + * @param key a unique key, used to de-dupe orchestration requests + * @return a list of results + */ + Task process( + @Nullable String cloudProvider, + @Nonnull List atomicOperations, + @Nonnull String key); + + @Deprecated + default Task process(@Nonnull List atomicOperations, @Nonnull String key) { + return process(null, atomicOperations, key); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/SagaContextAware.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/SagaContextAware.java new file mode 100644 index 00000000000..d2aa2a3b2cc --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/SagaContextAware.java @@ -0,0 +1,49 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration; + +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Data; + +/** + * Used to bridge AtomicOperations with Sagas. + * + *

Unfortunately, AtomicOperations and their descriptions are pretty well decoupled from their + * original input. This makes it difficult to retry operations without re-sending the entire + * operation ayload. + */ +public interface SagaContextAware { + void setSagaContext(@Nonnull SagaContext sagaContext); + + @Nullable + SagaContext getSagaContext(); + + @Data + class SagaContext { + private String cloudProvider; + private String descriptionName; + private Map originalInput; + private String sagaId; + + public SagaContext(String cloudProvider, String descriptionName, Map originalInput) { + this.cloudProvider = cloudProvider; + this.descriptionName = descriptionName; + this.originalInput = originalInput; + } + } +} diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/VersionedOperationHelper.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/VersionedOperationHelper.java similarity index 76% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/VersionedOperationHelper.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/VersionedOperationHelper.java index f7aeb5445f1..8158719046e 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/VersionedOperationHelper.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/VersionedOperationHelper.java @@ -17,15 +17,17 @@ package com.netflix.spinnaker.clouddriver.orchestration; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; - +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; import java.util.List; import java.util.stream.Collectors; +import javax.annotation.Nullable; +@NonnullByDefault public class VersionedOperationHelper { - static List findVersionMatches(ProviderVersion version, List operations) { - return operations.stream() - .filter(o -> o.acceptsVersion(version)) + static List findVersionMatches( + @Nullable String version, List converters) { + return converters.stream() + .filter(it -> it.acceptsVersion(version)) .collect(Collectors.toList()); } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/CreateServerGroupEvent.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/CreateServerGroupEvent.java similarity index 77% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/CreateServerGroupEvent.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/CreateServerGroupEvent.java index 34df30f70bd..3e070f92938 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/CreateServerGroupEvent.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/CreateServerGroupEvent.java @@ -26,7 +26,8 @@ public class CreateServerGroupEvent implements OperationEvent { private final String region; private final String name; - public CreateServerGroupEvent(String cloudProvider, String accountId, String region, String name) { + public CreateServerGroupEvent( + String cloudProvider, String accountId, String region, String name) { this.cloudProvider = cloudProvider; this.accountId = accountId; this.region = region; @@ -62,13 +63,23 @@ public String getName() { @Override public String toString() { - return "CreateServerGroupEvent{" + - "type=" + type + - ", action=" + action + - ", cloudProvider='" + cloudProvider + '\'' + - ", accountId='" + accountId + '\'' + - ", region='" + region + '\'' + - ", name='" + name + '\'' + - '}'; + return "CreateServerGroupEvent{" + + "type=" + + type + + ", action=" + + action + + ", cloudProvider='" + + cloudProvider + + '\'' + + ", accountId='" + + accountId + + '\'' + + ", region='" + + region + + '\'' + + ", name='" + + name + + '\'' + + '}'; } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/DeleteServerGroupEvent.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/DeleteServerGroupEvent.java similarity index 77% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/DeleteServerGroupEvent.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/DeleteServerGroupEvent.java index 7368612a345..bc26bfe84d8 100644 --- a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/DeleteServerGroupEvent.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/DeleteServerGroupEvent.java @@ -26,7 +26,8 @@ public class DeleteServerGroupEvent implements OperationEvent { private final String region; private final String name; - public DeleteServerGroupEvent(String cloudProvider, String accountId, String region, String name) { + public DeleteServerGroupEvent( + String cloudProvider, String accountId, String region, String name) { this.cloudProvider = cloudProvider; this.accountId = accountId; this.region = region; @@ -62,13 +63,23 @@ public String getName() { @Override public String toString() { - return "DeleteServerGroupEvent{" + - "type=" + type + - ", action=" + action + - ", cloudProvider='" + cloudProvider + '\'' + - ", accountId='" + accountId + '\'' + - ", region='" + region + '\'' + - ", name='" + name + '\'' + - '}'; + return "DeleteServerGroupEvent{" + + "type=" + + type + + ", action=" + + action + + ", cloudProvider='" + + cloudProvider + + '\'' + + ", accountId='" + + accountId + + '\'' + + ", region='" + + region + + '\'' + + ", name='" + + name + + '\'' + + '}'; } } diff --git a/clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEventHandler.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEventHandler.java similarity index 100% rename from clouddriver-core/src/main/groovy/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEventHandler.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/events/OperationEventHandler.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/AbstractSagaAtomicOperation.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/AbstractSagaAtomicOperation.java new file mode 100644 index 00000000000..6b0d8a3d91e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/AbstractSagaAtomicOperation.java @@ -0,0 +1,103 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration.sagas; + +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor; +import com.netflix.spinnaker.clouddriver.orchestration.SagaContextAware; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge.ApplyCommandWrapper; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder; +import com.netflix.spinnaker.clouddriver.saga.SagaService; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow; +import java.util.List; +import java.util.Objects; +import javax.annotation.Nonnull; +import org.jetbrains.annotations.Nullable; +import org.springframework.beans.factory.annotation.Autowired; + +/** + * Removes some of the boilerplate for AtomicOperations to use Sagas. + * + * @param The AtomicOperation description + * @param The saga result type + * @param The operation result type + */ +public abstract class AbstractSagaAtomicOperation + implements AtomicOperation, SagaContextAware { + + /** + * Needs to be an autowired property due to how the {@link OrchestrationProcessor} creates + * AtomicOperations. + */ + @Autowired private SagaService sagaService; + + protected T description; + private SagaContext sagaContext; + + public AbstractSagaAtomicOperation(T description) { + this.description = description; + } + + /** Build the {@link SagaAction} for the AtomicOperation. */ + @Nonnull + protected abstract SagaFlow buildSagaFlow(List priorOutputs); + + /** Implementing classes will need to configure {@code initialCommand} at minimum. */ + protected abstract void configureSagaBridge(@Nonnull ApplyCommandWrapperBuilder builder); + + /** + * Provides the opportunity to convert a {@link SagaAction.Result} into the expected result type + * of the AtomicOperation. + */ + protected abstract R parseSagaResult(SR result); + + @Override + public R operate(List priorOutputs) { + Objects.requireNonNull(sagaContext, "A saga context must be provided"); + + SagaFlow flow = buildSagaFlow(priorOutputs); + + ApplyCommandWrapperBuilder builder = + ApplyCommandWrapper.builder() + .sagaName(this.getClass().getSimpleName()) + .inputDescription(description) + .priorOutputs(priorOutputs) + .sagaContext(sagaContext) + .task(TaskRepository.threadLocalTask.get()) + .sagaFlow(flow); + + configureSagaBridge(builder); + + // TODO(rz): Should make SagaAtomicOperationBridge a bean and inject that instead + SR result = + new SagaAtomicOperationBridge(sagaService, sagaContext.getSagaId()).apply(builder.build()); + + return parseSagaResult(result); + } + + @Override + public void setSagaContext(@Nonnull SagaContext sagaContext) { + this.sagaContext = sagaContext; + } + + @Nullable + @Override + public SagaContext getSagaContext() { + return sagaContext; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/LoadFront50App.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/LoadFront50App.java new file mode 100644 index 00000000000..5f2a8881c23 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/LoadFront50App.java @@ -0,0 +1,173 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration.sagas; + +import static java.lang.String.format; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.clouddriver.event.CompositeSpinnakerEvent; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent; +import com.netflix.spinnaker.clouddriver.saga.ManyCommands; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaIntegrationException; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.kork.exceptions.SystemException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nonnull; +import lombok.AccessLevel; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.NoArgsConstructor; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +/** + * Loads an Application from Front50, then calls the next SagaCommand. + * + *

This SagaAction can be reused across operations and even cloud providers. + * + *

{@code
+ * SagaFlow()
+ *   .then(MyPredecessorAction.class)
+ *   .then(LoadFront50App.class)
+ *   .then(MyNextAction.class)
+ *
+ * class MyPredecessorAction : SagaAction {
+ *   Result apply(SagaCommand command, Saga saga) {
+ *     return Result(
+ *       new LoadFront50AppCommand(
+ *         "clouddriver",
+ *         new MyNextActionCommand()
+ *       )
+ *     );
+ *   }
+ * }
+ *
+ * class MyNextAction : SagaAction {
+ *
+ *   // MyNextAction implements an interface to mark it knows about this action
+ *   static class MyNextActionCommand extends SagaCommand implements Front50AppAware {
+ *
+ *   }
+ * }
+ * }
+ */ +@Component +public class LoadFront50App implements SagaAction { + + private static final Logger log = LoggerFactory.getLogger(LoadFront50App.class); + + private final Front50Service front50Service; + private final ObjectMapper objectMapper; + + @Autowired + public LoadFront50App(Front50Service front50Service, ObjectMapper objectMapper) { + this.front50Service = front50Service; + this.objectMapper = objectMapper; + } + + /** Recursively applies the loaded front50 model to any {@code Front50AppAware} command. */ + private static SagaCommand applyFront50App(SagaCommand command, Front50App loadedApp) { + if (ManyCommands.class.isAssignableFrom(command.getClass())) { + for (SagaCommand c : ((ManyCommands) command).getCommands()) { + applyFront50App(c, loadedApp); + } + } + if (command instanceof Front50AppAware) { + ((Front50AppAware) command).setFront50App(loadedApp); + } + return command; + } + + @Nonnull + @Override + public Result apply(@Nonnull LoadFront50AppCommand command, @Nonnull Saga saga) { + try { + Map response = front50Service.getApplication(command.getAppName()); + try { + return new Result( + Optional.ofNullable(response) + .map(it -> objectMapper.convertValue(it, Front50App.class)) + .map(f -> applyFront50App(command.nextCommand, f)) + .orElse(null), + Collections.emptyList()); + } catch (IllegalArgumentException e) { + log.error("Failed to convert front50 application to internal model", e); + throw new SagaIntegrationException( + "Failed to convert front50 application to internal model", e); + } + } catch (Exception e) { + if (command.isAllowMissing()) { + // It's ok to not load the front50 application + return new Result(command.nextCommand, Collections.emptyList()); + } + log.error("Failed to load front50 application attributes for {}", command.getAppName(), e); + throw new SystemException( + format("Failed to load front50 application: %s", command.getAppName()), e); + } + } + + /** Marks a SagaCommand as being aware of the result of the LoadFront50App SagaAction. */ + public interface Front50AppAware { + void setFront50App(Front50App app); + } + + @Builder(builderClassName = "LoadFront50AppCommandBuilder", toBuilder = true) + @JsonDeserialize(builder = LoadFront50AppCommand.LoadFront50AppCommandBuilder.class) + @JsonTypeName("loadFront50AppCommand") + @Value + public static class LoadFront50AppCommand implements SagaCommand, CompositeSpinnakerEvent { + @Nonnull private String appName; + @Nonnull private SagaCommand nextCommand; + private boolean allowMissing; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @Nonnull + @Override + public List getComposedEvents() { + return Collections.singletonList(nextCommand); + } + + @JsonPOJOBuilder(withPrefix = "") + public static class LoadFront50AppCommandBuilder {} + } + + @Value + @AllArgsConstructor + @NoArgsConstructor(force = true, access = AccessLevel.PRIVATE) + public static class Front50App { + private String email; + private boolean platformHealthOnly; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/SagaAtomicOperationBridge.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/SagaAtomicOperationBridge.java new file mode 100644 index 00000000000..e922305ff5b --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/SagaAtomicOperationBridge.java @@ -0,0 +1,88 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration.sagas; + +import com.netflix.spinnaker.clouddriver.data.task.SagaId; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.orchestration.SagaContextAware.SagaContext; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.SagaService; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow; +import java.util.List; +import java.util.Optional; +import java.util.UUID; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Builder; +import org.slf4j.MDC; + +/** + * A helper class to reduce boilerplate code while integrating Sagas into existing AtomicOperations. + */ +public class SagaAtomicOperationBridge { + + private final SagaService sagaService; + private final String sagaId; + + public SagaAtomicOperationBridge(SagaService sagaService, String sagaId) { + this.sagaService = sagaService; + this.sagaId = sagaId; + } + + public T apply(@Nonnull ApplyCommandWrapper applyCommand) { + final SagaContext sagaContext = applyCommand.sagaContext; + final Task task = applyCommand.task; + final String sagaName = applyCommand.sagaName; + + // use a random uuid to guarantee a unique saga id (rather than task.getId() or + // task.getRequestId()). A sagaId may be provided at construct time due to retries. + final String sagaId = + Optional.ofNullable(this.sagaId).orElseGet(() -> UUID.randomUUID().toString()); + + task.addSagaId(SagaId.builder().id(sagaId).name(sagaName).build()); + + applyCommand.sagaFlow.injectFirst(SnapshotAtomicOperationInput.class); + + try { + MDC.put("X-SAGA", sagaName + "/" + sagaId); + return sagaService.applyBlocking( + sagaName, + sagaId, + applyCommand.sagaFlow, + SnapshotAtomicOperationInput.SnapshotAtomicOperationInputCommand.builder() + .cloudProvider(sagaContext.getCloudProvider()) + .descriptionName(sagaContext.getDescriptionName()) + .descriptionInput(sagaContext.getOriginalInput()) + .description(applyCommand.inputDescription) + .priorOutputs(applyCommand.priorOutputs) + .nextCommand(applyCommand.initialCommand) + .build()); + } finally { + MDC.remove("X-SAGA"); + } + } + + @Builder + public static class ApplyCommandWrapper { + @Nonnull private String sagaName; + @Nonnull private SagaContext sagaContext; + @Nonnull private Task task; + @Nonnull private Object inputDescription; + @Nonnull private SagaFlow sagaFlow; + @Nonnull private SagaCommand initialCommand; + @Nullable private List priorOutputs; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/SnapshotAtomicOperationInput.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/SnapshotAtomicOperationInput.java new file mode 100644 index 00000000000..b2cfbffd855 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/orchestration/sagas/SnapshotAtomicOperationInput.java @@ -0,0 +1,73 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration.sagas; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.springframework.stereotype.Component; + +/** + * Compatibility bridge for Tasks. If Clouddriver had a Saga-only orchestration system, this step + * would not be necessary, but might be beneficial for debugging. + */ +@Component +public class SnapshotAtomicOperationInput + implements SagaAction { + + @Nonnull + @Override + public Result apply(@Nonnull SnapshotAtomicOperationInputCommand command, @Nonnull Saga saga) { + // We happily don't need to do anything here. This action just snapshots our input data. + return new Result(command.nextCommand, Collections.emptyList()); + } + + @Builder(builderClassName = "SnapshotAtomicOperationInputCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + SnapshotAtomicOperationInputCommand.SnapshotAtomicOperationInputCommandBuilder.class) + @JsonTypeName("snapshotAtomicOperationInputCommand") + @Value + public static class SnapshotAtomicOperationInputCommand implements SagaCommand { + @Nonnull private String descriptionName; + @Nullable private String cloudProvider; + @Nonnull private Map descriptionInput; + @Nonnull private Object description; + @Nonnull private List priorOutputs; + @Nonnull private SagaCommand nextCommand; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(@Nonnull EventMetadata eventMetadata) { + this.metadata = eventMetadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class SnapshotAtomicOperationInputCommandBuilder {} + } +} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/QueuedRequestException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/QueuedRequestException.java similarity index 100% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/QueuedRequestException.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/QueuedRequestException.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueue.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueue.java new file mode 100644 index 00000000000..1d8e23e2a5e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueue.java @@ -0,0 +1,100 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.requestqueue; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.requestqueue.pooled.PooledRequestQueue; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +/** RequestQueue. */ +public interface RequestQueue { + + long DEFAULT_TIMEOUT_MILLIS = 60000; + long DEFAULT_START_WORK_TIMEOUT_MILLIS = 10000; + + static RequestQueue forConfig( + DynamicConfigService dynamicConfigService, + Registry registry, + RequestQueueConfiguration config) { + if (!config.isEnabled()) { + return noop(); + } + + return pooled( + dynamicConfigService, + registry, + config.getStartWorkTimeoutMillis(), + config.getTimeoutMillis(), + config.getPoolSize()); + } + + static RequestQueue noop() { + return new NOOP(); + } + + static RequestQueue pooled( + DynamicConfigService dynamicConfigService, Registry registry, int poolSize) { + return pooled( + dynamicConfigService, + registry, + DEFAULT_START_WORK_TIMEOUT_MILLIS, + DEFAULT_TIMEOUT_MILLIS, + poolSize); + } + + static RequestQueue pooled( + DynamicConfigService dynamicConfigService, + Registry registry, + long startWorkTimeoutMillis, + long timeoutMillis, + int poolSize) { + return new PooledRequestQueue( + dynamicConfigService, registry, startWorkTimeoutMillis, timeoutMillis, poolSize); + } + + default long getDefaultTimeoutMillis() { + return DEFAULT_TIMEOUT_MILLIS; + } + + default long getDefaultStartWorkTimeoutMillis() { + return DEFAULT_START_WORK_TIMEOUT_MILLIS; + } + + default T execute(String partition, Callable operation) throws Throwable { + return execute( + partition, + operation, + getDefaultStartWorkTimeoutMillis(), + getDefaultTimeoutMillis(), + TimeUnit.MILLISECONDS); + } + + T execute( + String partition, Callable operation, long startWorkTimeout, long timeout, TimeUnit unit) + throws Throwable; + + class NOOP implements RequestQueue { + @Override + public T execute( + String partition, Callable operation, long startWorkTimeout, long timeout, TimeUnit unit) + throws Throwable { + return operation.call(); + } + } +} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueueConfiguration.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueueConfiguration.java similarity index 97% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueueConfiguration.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueueConfiguration.java index 65a79bf2eaf..7a937d285a9 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueueConfiguration.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueueConfiguration.java @@ -18,7 +18,7 @@ import org.springframework.boot.context.properties.ConfigurationProperties; -@ConfigurationProperties("requestQueue") +@ConfigurationProperties("request-queue") public class RequestQueueConfiguration { private boolean enabled = false; private long startWorkTimeoutMillis = RequestQueue.DEFAULT_START_WORK_TIMEOUT_MILLIS; diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PollCoordinator.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PollCoordinator.java similarity index 100% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PollCoordinator.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PollCoordinator.java diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequest.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequest.java new file mode 100644 index 00000000000..af3a769dff1 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.requestqueue.pooled; + +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Timer; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.TimeUnit; +import org.slf4j.MDC; + +class PooledRequest implements Runnable { + private final Timer timer; + private final Promise result; + private final Callable work; + private final long startTime = System.nanoTime(); + + PooledRequest(Registry registry, String partition, Callable work) { + this.timer = + registry.timer(registry.createId("pooledRequestQueue.enqueueTime", "partition", partition)); + this.result = new Promise<>(registry, partition); + + // Copy the MDC before doing the work. That way information from the MDC + // (e.g. from X-SPINNAKER-* incoming http request headers) of the calling + // thread makes it into log messages. + this.work = wrapWithContext(work); + } + + Promise getPromise() { + return result; + } + + void cancel() { + result.completeWithException(new CancellationException()); + } + + private Callable wrapWithContext(final Callable callable) { + Map contextMap = MDC.getCopyOfContextMap(); + return () -> { + if (contextMap == null) { + MDC.clear(); + } else { + MDC.setContextMap(contextMap); + } + return callable.call(); + }; + } + + @Override + public void run() { + timer.record(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); + // request may have expired with a timeout prior to this point, lets not + // issue the work if that is the case as the caller has already moved on + if (result.shouldStart()) { + try { + result.complete(work.call()); + } catch (Throwable t) { + result.completeWithException(t); + } + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueue.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueue.java new file mode 100644 index 00000000000..895b5d37dc0 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueue.java @@ -0,0 +1,194 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.requestqueue.pooled; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import java.util.Collection; +import java.util.Queue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import javax.annotation.PreDestroy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.scheduling.annotation.Scheduled; + +public class PooledRequestQueue implements RequestQueue { + private final Logger log = LoggerFactory.getLogger(getClass()); + private final ConcurrentMap>> partitionedRequests = + new ConcurrentHashMap<>(); + private final PollCoordinator pollCoordinator = new PollCoordinator(); + + private final long defaultStartWorkTimeout; + private final long defaultTimeout; + private final int defaultCorePoolSize; + private final ThreadPoolExecutor executorService; + private final BlockingQueue submittedRequests; + private final Collection>> requestQueues; + private final RequestDistributor requestDistributor; + + private final DynamicConfigService dynamicConfigService; + private final Registry registry; + + private final AtomicBoolean isEnabled = new AtomicBoolean(true); + + public PooledRequestQueue( + DynamicConfigService dynamicConfigService, + Registry registry, + long defaultStartWorkTimeout, + long defaultTimeout, + int requestPoolSize) { + + if (defaultStartWorkTimeout <= 0) { + throw new IllegalArgumentException("defaultStartWorkTimeout"); + } + + if (defaultTimeout <= 0) { + throw new IllegalArgumentException("defaultTimeout"); + } + + if (requestPoolSize < 1) { + throw new IllegalArgumentException("requestPoolSize"); + } + + this.dynamicConfigService = dynamicConfigService; + this.registry = registry; + + this.defaultStartWorkTimeout = defaultStartWorkTimeout; + this.defaultTimeout = defaultTimeout; + this.defaultCorePoolSize = requestPoolSize; + + this.submittedRequests = new LinkedBlockingQueue<>(); + registry.gauge("pooledRequestQueue.executorQueue.size", submittedRequests, Queue::size); + + final int actualThreads = requestPoolSize + 1; + this.executorService = + new ThreadPoolExecutor( + actualThreads, + actualThreads, + 0, + TimeUnit.MILLISECONDS, + submittedRequests, + new ThreadFactoryBuilder() + .setNameFormat(PooledRequestQueue.class.getSimpleName() + "-%d") + .build()); + registry.gauge( + "pooledRequestQueue.corePoolSize", executorService, ThreadPoolExecutor::getCorePoolSize); + + this.requestQueues = new CopyOnWriteArrayList<>(); + this.requestDistributor = + new RequestDistributor(registry, pollCoordinator, executorService, requestQueues); + executorService.submit(requestDistributor); + + registry.gauge("pooledRequestQueue.enabled", isEnabled, value -> value.get() ? 1.0 : 0.0); + } + + @PreDestroy + public void shutdown() { + requestDistributor.shutdown(); + executorService.shutdown(); + PooledRequest req; + while ((req = (PooledRequest) submittedRequests.poll()) != null) { + req.cancel(); + } + } + + @Override + public long getDefaultTimeoutMillis() { + return defaultTimeout; + } + + @Override + public long getDefaultStartWorkTimeoutMillis() { + return defaultStartWorkTimeout; + } + + @Override + public T execute( + String partition, Callable operation, long startWorkTimeout, long timeout, TimeUnit unit) + throws Throwable { + if (!isEnabled.get()) { + return operation.call(); + } + + final long startTime = System.nanoTime(); + final Queue> queue; + if (!partitionedRequests.containsKey(partition)) { + Queue> newQueue = new LinkedBlockingQueue<>(); + Queue> existing = partitionedRequests.putIfAbsent(partition, newQueue); + if (existing == null) { + requestQueues.add(newQueue); + queue = newQueue; + registry.gauge( + registry.createId("pooledRequestQueue.partition.size", "partition", partition), + queue, + Queue::size); + } else { + queue = existing; + } + } else { + queue = partitionedRequests.get(partition); + } + + final PooledRequest request = new PooledRequest<>(registry, partition, operation); + + queue.offer(request); + pollCoordinator.notifyItemsAdded(); + + Id id = registry.createId("pooledRequestQueue.totalTime", "partition", partition); + try { + T result = request.getPromise().blockingGetOrThrow(startWorkTimeout, timeout, unit); + id = id.withTag("success", "true"); + return result; + } catch (Throwable t) { + id = id.withTags("success", "false", "cause", t.getClass().getSimpleName()); + throw t; + } finally { + registry.timer(id).record(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); + } + } + + @Scheduled(fixedDelayString = "${request-queue.core-pool-size-refresh-ms:120000}") + public void refreshCorePoolSize() { + int currentCorePoolSize = executorService.getCorePoolSize(); + int desiredCorePoolSize = + dynamicConfigService.getConfig( + Integer.class, "request-queue.pool-size", defaultCorePoolSize) + + 1; + + if (desiredCorePoolSize != currentCorePoolSize) { + log.info( + "Updating core pool size (original: {}, updated: {})", + currentCorePoolSize, + desiredCorePoolSize); + executorService.setCorePoolSize(desiredCorePoolSize); + executorService.setMaximumPoolSize(desiredCorePoolSize); + } + + isEnabled.set(dynamicConfigService.isEnabled("request-queue", true)); + } +} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/Promise.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/Promise.java similarity index 77% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/Promise.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/Promise.java index 270e990c516..9f610f35edc 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/Promise.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/Promise.java @@ -17,11 +17,9 @@ package com.netflix.spinnaker.clouddriver.requestqueue.pooled; import com.netflix.spectator.api.Registry; - import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; class Promise { @@ -71,15 +69,25 @@ boolean shouldStart() { } void complete(T result) { - registry.counter(registry.createId("pooledRequestQueue.promise.complete", "partition", partition)).increment(); + registry + .counter(registry.createId("pooledRequestQueue.promise.complete", "partition", partition)) + .increment(); this.result.compareAndSet(null, Either.forResult(result)); startingLatch.countDown(); latch.countDown(); } void completeWithException(Throwable exception) { - final String cause = Optional.ofNullable(exception).map(Throwable::getClass).map(Class::getSimpleName).orElse("unknown"); - registry.counter(registry.createId("pooledRequestQueue.promise.exception", "partition", partition, "cause", cause)).increment(); + final String cause = + Optional.ofNullable(exception) + .map(Throwable::getClass) + .map(Class::getSimpleName) + .orElse("unknown"); + registry + .counter( + registry.createId( + "pooledRequestQueue.promise.exception", "partition", partition, "cause", cause)) + .increment(); this.result.compareAndSet(null, Either.forException(exception)); startingLatch.countDown(); latch.countDown(); @@ -89,11 +97,16 @@ T blockingGetOrThrow(long startWorkTimeout, long timeout, TimeUnit unit) throws try { if (startingLatch.await(startWorkTimeout, unit)) { if (!latch.await(timeout, unit)) { - registry.counter(registry.createId("pooledRequestQueue.promise.timeout", "partition", partition)).increment(); + registry + .counter( + registry.createId("pooledRequestQueue.promise.timeout", "partition", partition)) + .increment(); completeWithException(new PromiseTimeoutException()); } } else { - registry.counter(registry.createId("pooledRequest.promise.notStarted", "partition", partition)).increment(); + registry + .counter(registry.createId("pooledRequest.promise.notStarted", "partition", partition)) + .increment(); completeWithException(new PromiseNotStartedException()); } } catch (Throwable t) { diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PromiseNotStartedException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PromiseNotStartedException.java similarity index 100% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PromiseNotStartedException.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PromiseNotStartedException.java diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PromiseTimeoutException.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PromiseTimeoutException.java similarity index 100% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PromiseTimeoutException.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PromiseTimeoutException.java diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributor.java similarity index 92% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributor.java rename to clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributor.java index 0517daeab12..eb057fb0591 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributor.java +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributor.java @@ -18,13 +18,12 @@ import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Registry; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Collection; import java.util.Queue; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; class RequestDistributor implements Runnable { private final AtomicBoolean continueRunning = new AtomicBoolean(true); @@ -35,7 +34,11 @@ class RequestDistributor implements Runnable { private final Logger log = LoggerFactory.getLogger(getClass()); - RequestDistributor(Registry registry, PollCoordinator pollCoordinator, Executor executor, Collection>> requestQueues) { + RequestDistributor( + Registry registry, + PollCoordinator pollCoordinator, + Executor executor, + Collection>> requestQueues) { this.pollCoordinator = pollCoordinator; this.executor = executor; this.requestQueues = requestQueues; diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/ClusterMatchRule.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/ClusterMatchRule.java new file mode 100644 index 00000000000..d953b633266 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/ClusterMatchRule.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.safety; + +public class ClusterMatchRule { + private String account; + private String location; + private String stack; + private String detail; + private Integer priority; + + public ClusterMatchRule() {} + + public ClusterMatchRule( + String account, String location, String stack, String detail, Integer priority) { + this.account = account; + this.location = location; + this.stack = stack; + this.detail = detail; + this.priority = priority; + } + + public String getAccount() { + return account == null ? "" : account; + } + + public void setAccount(String account) { + this.account = account; + } + + public String getLocation() { + return location == null ? "" : location; + } + + public void setLocation(String location) { + this.location = location; + } + + public String getStack() { + return stack == null ? "" : stack; + } + + public void setStack(String stack) { + this.stack = stack; + } + + public String getDetail() { + return detail == null ? "" : detail; + } + + public void setDetail(String detail) { + this.detail = detail; + } + + public Integer getPriority() { + return priority == null ? 0 : priority; + } + + public void setPriority(Integer priority) { + this.priority = priority; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/ClusterMatcher.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/ClusterMatcher.java new file mode 100644 index 00000000000..8e53c01145c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/ClusterMatcher.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.safety; + +import com.netflix.spinnaker.moniker.Moniker; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +public class ClusterMatcher { + + public static ClusterMatchRule getMatchingRule( + String account, String location, Moniker clusterMoniker, List rules) { + if (!Optional.ofNullable(rules).isPresent()) { + return null; + } + + String stack = clusterMoniker.getStack() == null ? "" : clusterMoniker.getStack(); + String detail = clusterMoniker.getDetail() == null ? "" : clusterMoniker.getDetail(); + + List candidates = + rules.stream() + .filter( + rule -> { + String ruleAccount = rule.getAccount(); + String ruleLocation = rule.getLocation(); + String ruleStack = rule.getStack(); + String ruleDetail = rule.getDetail(); + return (ruleAccount.equals("*") || ruleAccount.equals(account)) + && (ruleLocation.equals("*") || ruleLocation.equals(location)) + && (ruleStack.equals("*") + || ruleStack.equals(stack) + || ruleStack.isEmpty() && stack.isEmpty()) + && (ruleDetail.equals("*") + || ruleDetail.equals(detail) + || ruleDetail.isEmpty() && detail.isEmpty()); + }) + .sorted( + (o1, o2) -> { + if (!o1.getAccount().equals(o2.getAccount())) { + return "*".equals(o1.getAccount()) ? 1 : -1; + } + if (!o1.getLocation().equals(o2.getLocation())) { + return "*".equals(o1.getLocation()) ? 1 : -1; + } + if (!o1.getStack().equals(o2.getStack())) { + return "*".equals(o1.getStack()) ? 1 : -1; + } + if (!o1.getDetail().equals(o2.getDetail())) { + return "*".equals(o1.getDetail()) ? 1 : -1; + } + return o1.getPriority() - o2.getPriority(); + }) + .collect(Collectors.toList()); + + if (candidates.isEmpty()) { + return null; + } + + return candidates.get(0); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/TrafficGuard.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/TrafficGuard.java new file mode 100644 index 00000000000..cd3fae2c1fd --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/safety/TrafficGuard.java @@ -0,0 +1,382 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.safety; + +import static java.lang.String.format; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.impl.Preconditions; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.clouddriver.exceptions.TrafficGuardException; +import com.netflix.spinnaker.clouddriver.model.Cluster; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class TrafficGuard { + private static final String MIN_CAPACITY_RATIO = "traffic-guards.min-capacity-ratio"; + private final Logger log = LoggerFactory.getLogger(getClass()); + + private final List> clusterProviders; + private final Front50Service front50Service; + private final Registry registry; + private final DynamicConfigService dynamicConfigService; + + private final Id savesId; + + @Autowired + public TrafficGuard( + List> clusterProviders, + Optional front50Service, + Registry registry, + DynamicConfigService dynamicConfigService) { + this.clusterProviders = clusterProviders; + this.front50Service = front50Service.orElse(null); + this.registry = registry; + this.dynamicConfigService = dynamicConfigService; + this.savesId = registry.createId("trafficGuard.saves"); + } + + public void verifyInstanceTermination( + String serverGroupName, + List instanceIds, + String account, + String location, + String cloudProvider, + String operationDescriptor) { + // TODO(rz): I opted out of migrating this method because it isn't used in + // my current refactors. This method uses clouddriver search endpoint, + // which would be a much larger refactor to bring over in this commit. I + // would like to postpone such a refactor until it's actually needed. + throw new UnsupportedOperationException( + "verifyInstanceTermination method has not been migrated from Orca yet"); + } + + public void verifyTrafficRemoval( + String serverGroupName, + String account, + String location, + String cloudProvider, + String operationDescriptor) { + + Moniker serverGroupMoniker = NamerRegistry.getDefaultNamer().deriveMoniker(serverGroupName); + + ClusterProvider clusterProvider = + getClusterProvider(cloudProvider) + .orElseThrow( + () -> + new TrafficGuardException( + format( + "Could not find ClusterProvider for cloud provider '%s'", + cloudProvider))); + + Cluster cluster = + clusterProvider.getCluster( + serverGroupMoniker.getApp(), account, serverGroupMoniker.getCluster(), false); + + if (cluster == null) { + throw new TrafficGuardException( + format( + "Could not find cluster '%s' in '%s/%s'", + serverGroupMoniker.getCluster(), account, location)); + } + + List targetServerGroups = + cluster.getServerGroups().stream() + .filter(it -> it.getRegion().equals(location)) + .collect(Collectors.toList()); + + ServerGroup serverGroupGoingAway = + targetServerGroups.stream() + .filter(it -> serverGroupMoniker.equals(it.getMoniker())) + .findFirst() + .orElseThrow( + () -> { + String message = + format( + "Could not find server group '%s' in '%s/%s', found [%s]", + serverGroupName, + account, + location, + targetServerGroups.stream() + .map(it -> it.getMoniker().toString()) + .collect(Collectors.joining(", "))); + log.error("{}\nContext: {}", message, generateContext(targetServerGroups)); + return new TrafficGuardException(message); + }); + + verifyTrafficRemoval(serverGroupGoingAway, targetServerGroups, account, operationDescriptor); + } + + public void verifyTrafficRemoval( + ServerGroup serverGroupGoingAway, + Collection currentServerGroups, + String account, + String operationDescriptor) { + verifyTrafficRemoval( + Collections.singletonList(serverGroupGoingAway), + currentServerGroups, + account, + operationDescriptor); + } + + /** + * If you disable serverGroup, are there other enabled server groups in the same cluster and + * location? + * + * @param serverGroupsGoingAway + * @param currentServerGroups + * @param account + * @param operationDescriptor + */ + public void verifyTrafficRemoval( + Collection serverGroupsGoingAway, + Collection currentServerGroups, + String account, + String operationDescriptor) { + if (serverGroupsGoingAway == null || serverGroupsGoingAway.isEmpty()) { + return; + } + + Preconditions.checkArg(!currentServerGroups.isEmpty(), "currentServerGroups must not be empty"); + + // make sure all server groups are in the same location + ServerGroup someServerGroup = serverGroupsGoingAway.stream().findAny().get(); + String location = someServerGroup.getRegion(); + Preconditions.checkArg( + Stream.concat(serverGroupsGoingAway.stream(), currentServerGroups.stream()) + .allMatch(sg -> location.equals(sg.getRegion())), + "server groups must all be in the same location but some not in " + location); + + // make sure all server groups are in the same cluster + String cluster = someServerGroup.getMoniker().getCluster(); + Preconditions.checkArg( + Stream.concat(serverGroupsGoingAway.stream(), currentServerGroups.stream()) + .allMatch(sg -> cluster.equals(sg.getMoniker().getCluster())), + "server groups must all be in the same cluster but some not in " + cluster); + + if (!hasDisableLock(someServerGroup.getMoniker(), account, location)) { + log.debug("No traffic guard configured for '{}' in {}/{}", cluster, account, location); + return; + } + + // let the work begin + Map capacityByServerGroupName = + currentServerGroups.stream() + .collect(Collectors.toMap(ServerGroup::getName, this::getServerGroupCapacity)); + + Set namesOfServerGroupsGoingAway = + serverGroupsGoingAway.stream().map(ServerGroup::getName).collect(Collectors.toSet()); + + int currentCapacity = capacityByServerGroupName.values().stream().reduce(0, Integer::sum); + + if (currentCapacity == 0) { + log.debug( + "Bypassing traffic guard check for '{}' in {}/{} with no instances Up. Context: {}", + cluster, + account, + location, + generateContext(currentServerGroups)); + return; + } + + int capacityGoingAway = + capacityByServerGroupName.entrySet().stream() + .filter(entry -> namesOfServerGroupsGoingAway.contains(entry.getKey())) + .map(Map.Entry::getValue) + .reduce(0, Integer::sum); + + int futureCapacity = currentCapacity - capacityGoingAway; + + int someDesiredSize = someServerGroup.getCapacity().getDesired(); + if (futureCapacity > 0 + && serverGroupsGoingAway.size() > 1 + && serverGroupsGoingAway.stream().allMatch(sg -> sg.getCapacity().isPinned()) + && serverGroupsGoingAway.stream() + .allMatch(sg -> sg.getCapacity().getDesired() == someDesiredSize)) { + log.debug( + "Bypassing traffic guard check for '{}' in {}/{} with pinned server groups of size {}. Context: {}", + cluster, + account, + location, + someDesiredSize, + generateContext(currentServerGroups)); + return; + } + + double futureCapacityRatio = ((double) futureCapacity) / currentCapacity; + double minCapacityRatio = getMinCapacityRatio(); + if (futureCapacityRatio <= minCapacityRatio) { + String message = + generateUserFacingMessage( + cluster, + account, + location, + operationDescriptor, + namesOfServerGroupsGoingAway, + futureCapacity, + currentCapacity, + futureCapacityRatio, + minCapacityRatio); + log.debug("{}\nContext: {}", message, generateContext(currentServerGroups)); + + registry + .counter( + savesId.withTags( + "application", someServerGroup.getMoniker().getApp(), "account", account)) + .increment(); + + throw new TrafficGuardException(message); + } + } + + private String generateUserFacingMessage( + String cluster, + String account, + String location, + String operationDescriptor, + Set namesOfServerGroupsGoingAway, + int futureCapacity, + int currentCapacity, + double futureCapacityRatio, + double minCapacityRatio) { + String message = + format( + "This cluster ('%s' in %s/%s) has traffic guards enabled. %s [%s] would leave the cluster ", + cluster, + account, + location, + operationDescriptor, + String.join(",", namesOfServerGroupsGoingAway)); + + if (futureCapacity == 0) { + return message + "with no instances up."; + } + + String withInstances = + (futureCapacity == 1) + ? "with 1 instance up " + : format("with %d instances up ", futureCapacity); + return message + + withInstances + + format( + "(%.1f%% of %d instances currently up). The configured minimum is %.1f%%.", + futureCapacityRatio * 100, currentCapacity, minCapacityRatio * 100); + } + + private double getMinCapacityRatio() { + double defaultMinCapacityRatio = 0d; + try { + Double minCapacityRatio = + dynamicConfigService.getConfig(Double.class, MIN_CAPACITY_RATIO, defaultMinCapacityRatio); + if (minCapacityRatio == null || minCapacityRatio < 0 || 0.5 <= minCapacityRatio) { + log.error( + "Expecting a double value in range [0, 0.5] for {} but got {}", + MIN_CAPACITY_RATIO, + minCapacityRatio); + return 0; + } + return minCapacityRatio; + } catch (NumberFormatException e) { + log.error("Expecting a double value in range [0, 0.5] for {}", MIN_CAPACITY_RATIO, e); + return defaultMinCapacityRatio; + } + } + + private List generateContext(Collection targetServerGroups) { + return targetServerGroups.stream() + .map( + tsg -> + ImmutableMap.builder() + .put("name", tsg.getName()) + .put("disabled", tsg.isDisabled()) + .put("instances", tsg.getInstances()) + .put("capacity", tsg.getCapacity()) + .build()) + .collect(Collectors.toList()); + } + + private int getServerGroupCapacity(ServerGroup serverGroup) { + return (int) + serverGroup.getInstances().stream() + .filter(it -> HealthState.Up.equals(it.getHealthState())) + .count(); + } + + public boolean hasDisableLock(Moniker clusterMoniker, String account, String location) { + if (front50Service == null) { + log.warn( + "Front50 has not been configured, no way to check disable lock. Fix this by setting front50.enabled: true"); + return false; + } + Map application; + try { + application = front50Service.getApplication(clusterMoniker.getApp()); + } catch (SpinnakerHttpException e) { + // ignore an unknown (404) or unauthorized (403) application + if (Arrays.asList(404, 403).contains(e.getResponseCode())) { + application = null; + } else { + throw e; + } + } + if (application == null || !application.containsKey("trafficGuards")) { + return false; + } + List> trafficGuards = + (List>) application.get("trafficGuards"); + List rules = + trafficGuards.stream() + .filter(guard -> (boolean) guard.getOrDefault("enabled", true)) + .map( + guard -> + new ClusterMatchRule( + (String) guard.get("account"), + (String) guard.get("location"), + (String) guard.get("stack"), + (String) guard.get("detail"), + 1)) + .collect(Collectors.toList()); + return ClusterMatcher.getMatchingRule(account, location, clusterMoniker, rules) != null; + } + + private Optional> getClusterProvider(String cloudProvider) { + return clusterProviders.stream() + .filter(it -> it.getCloudProviderId().equals(cloudProvider)) + .findFirst(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/ApplicationSearchProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/ApplicationSearchProvider.java new file mode 100644 index 00000000000..26b196f883e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/ApplicationSearchProvider.java @@ -0,0 +1,122 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.search; + +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.springframework.security.core.Authentication; +import org.springframework.security.core.context.SecurityContextHolder; + +public class ApplicationSearchProvider implements SearchProvider { + + private final String APPLICATIONS_TYPE = "applications"; + + private final Front50Service front50Service; + private final List clusterProviders; + private final FiatPermissionEvaluator permissionEvaluator; + + public ApplicationSearchProvider( + Front50Service front50Service, + List clusterProviders, + FiatPermissionEvaluator permissionEvaluator) { + this.front50Service = front50Service; + this.clusterProviders = clusterProviders; + this.permissionEvaluator = permissionEvaluator; + } + + public ApplicationSearchProvider( + Front50Service front50Service, List clusterProviders) { + this(front50Service, clusterProviders, null); + } + + public ApplicationSearchProvider(Front50Service front50Service) { + this(front50Service, List.of(), null); + } + + @Override + public String getPlatform() { + return "front50"; + } + + @Override + public SearchResultSet search(String query, Integer pageNumber, Integer pageSize) { + return search(query, List.of(APPLICATIONS_TYPE), pageNumber, pageSize, Map.of()); + } + + @Override + public SearchResultSet search( + String query, Integer pageNumber, Integer pageSize, Map filters) { + return search(query, List.of(APPLICATIONS_TYPE), pageNumber, pageSize, filters); + } + + @Override + public SearchResultSet search( + String query, List types, Integer pageNumber, Integer pageSize) { + return search(query, types, pageNumber, pageSize, Map.of()); + } + + @Override + public SearchResultSet search( + String query, + List types, + Integer pageNumber, + Integer pageSize, + Map filters) { + if (!types.contains(APPLICATIONS_TYPE)) { + return SearchResultSet.builder().totalMatches(0).build(); + } + + Authentication auth = SecurityContextHolder.getContext().getAuthentication(); + + List> rawResults = front50Service.searchByName(query, pageSize, filters); + List> results = new ArrayList<>(); + rawResults.forEach( + application -> { + String appName = application.get("name").toString().toLowerCase(); + if (permissionEvaluator != null + && permissionEvaluator.hasPermission(auth, appName, "APPLICATION", "READ")) { + application.put("application", appName); + application.put("type", APPLICATIONS_TYPE); + application.put("url", String.format("/applications/%s", appName)); + application.put("accounts", getAccounts(appName)); + + results.add(application); + } + }); + + return new SearchResultSet(results.size(), pageNumber, pageSize, getPlatform(), query, results); + } + + private List getAccounts(String application) { + return clusterProviders.stream() + .map(provider -> provider.getClusterSummaries(application)) + .map(Map::keySet) + .map(Object::toString) + .distinct() + .collect(Collectors.toList()); + } + + @Override + public List excludedFilters() { + return List.of("cloudProvider"); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/NoopSearchProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/NoopSearchProvider.java new file mode 100644 index 00000000000..da8025446bd --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/NoopSearchProvider.java @@ -0,0 +1,49 @@ +package com.netflix.spinnaker.clouddriver.search; + +import java.util.List; +import java.util.Map; + +public class NoopSearchProvider implements SearchProvider { + @Override + public String getPlatform() { + return "noop"; + } + + @Override + public SearchResultSet search(String query, Integer pageNumber, Integer pageSize) { + return empty(query, pageNumber, pageSize); + } + + @Override + public SearchResultSet search( + String query, Integer pageNumber, Integer pageSize, Map filters) { + return empty(query, pageNumber, pageSize); + } + + @Override + public SearchResultSet search( + String query, List types, Integer pageNumber, Integer pageSize) { + return empty(query, pageNumber, pageSize); + } + + @Override + public SearchResultSet search( + String query, + List types, + Integer pageNumber, + Integer pageSize, + Map filters) { + return empty(query, pageNumber, pageSize); + } + + private static SearchResultSet empty(String query, Integer pageNumger, Integer pageSize) { + return SearchResultSet.builder() + .totalMatches(0) + .platform("noop") + .pageNumber(pageNumger) + .pageSize(pageSize) + .query(query) + .results(List.of()) + .build(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/ProjectSearchProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/ProjectSearchProvider.java new file mode 100644 index 00000000000..f77ce742d0d --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/ProjectSearchProvider.java @@ -0,0 +1,82 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.search; + +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +public class ProjectSearchProvider implements SearchProvider { + + private final String PROJECTS_TYPE = "projects"; + private final Front50Service front50Service; + + @Override + public String getPlatform() { + return "front50"; + } + + @Override + public SearchResultSet search(String query, Integer pageNumber, Integer pageSize) { + return search(query, List.of(PROJECTS_TYPE), pageNumber, pageSize, Map.of()); + } + + @Override + public SearchResultSet search( + String query, Integer pageNumber, Integer pageSize, Map filters) { + return search(query, List.of(PROJECTS_TYPE), pageNumber, pageSize, filters); + } + + @Override + public SearchResultSet search( + String query, List types, Integer pageNumber, Integer pageSize) { + return search(query, types, pageNumber, pageSize, Map.of()); + } + + @Override + public SearchResultSet search( + String query, + List types, + Integer pageNumber, + Integer pageSize, + Map filters) { + if (!types.contains(PROJECTS_TYPE)) { + return SearchResultSet.builder().totalMatches(0).build(); + } + + Map allFilters = new HashMap<>(Map.of("name", query, "applications", query)); + allFilters.putAll(filters); + + List> projects = front50Service.searchForProjects(allFilters, pageSize); + projects.forEach( + project -> { + project.put("type", PROJECTS_TYPE); + project.put("url", String.format("/projects/%s", project.get("id"))); + }); + + return new SearchResultSet( + projects.size(), pageNumber, pageSize, getPlatform(), query, projects); + } + + @Override + public List excludedFilters() { + return List.of("cloudProvider"); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchProvider.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchProvider.java new file mode 100644 index 00000000000..3023cb54623 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchProvider.java @@ -0,0 +1,90 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.search; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import java.util.Map; + +/** A Searchable component provides a mechanism to query for a collection of items */ +public interface SearchProvider { + /** + * Returns the platform the search provider services + * + * @return a String, e.g. 'aws', 'gce' + */ + String getPlatform(); + + /** + * Finds all matching items for the provided query + * + * @param query a query string + * @param pageNumber page index (1-based) of the result set + * @param pageSize number of items per page + * @return a list of matched items + */ + SearchResultSet search(String query, Integer pageNumber, Integer pageSize); + + /** + * Finds all matching items for the provided query, filtered by the supplied filters + * + * @param query a query string + * @param pageNumber page index (1-based) of the result set + * @param pageSize number of items per page + * @param filters a map of inclusive filters + * @return a list of matched items + */ + SearchResultSet search( + String query, Integer pageNumber, Integer pageSize, Map filters); + + /** + * Finds all matching items for the provided query and type + * + * @param query a query string + * @param types the types of items to search for + * @param pageNumber page index (1-based) of the result set + * @param pageSize number of items per page + * @return a list of matched items + */ + SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize); + + /** + * Finds all matching items for the provided query and type, filtered by the supplied filters + * + * @param query a query string + * @param types the types of items to search for + * @param pageNumber page index (1-based) of the result set + * @param pageSize number of items per page + * @param filters a map of inclusive filters + * @return a list of matched items + */ + SearchResultSet search( + String query, + List types, + Integer pageNumber, + Integer pageSize, + Map filters); + + /** + * Provides a list of filter keys to be removed prior to searching + * + * @return a list of filter keys to optionally be removed prior to searching + */ + default List excludedFilters() { + return ImmutableList.of(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchQueryCommand.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchQueryCommand.java new file mode 100644 index 00000000000..b110e3268e0 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchQueryCommand.java @@ -0,0 +1,53 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.search; + +import java.util.List; +import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +public class SearchQueryCommand { + /** the phrase to query */ + String q; + + /** + * (optional) a filter, used to only return results of that type. If no value is supplied, all + * types will be returned + */ + List type; + + /** a filter, used to only return results from providers whose platform value matches this */ + String platform = ""; + + /** the page number, starting with 1 */ + Integer page = 1; + + /** the maximum number of results to return per page */ + Integer pageSize = 10; + + /** + * (optional) a map of ad-hoc key-value pairs to further filter the keys, based on the map + * provided by {@link com.netflix.spinnaker.oort.aws.data.Keys#parse(java.lang.String)} potential + * matches must fully intersect the filter map entries + */ + Map filters; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchResultSet.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchResultSet.java new file mode 100644 index 00000000000..dab299ceed5 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/SearchResultSet.java @@ -0,0 +1,49 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.search; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +@Builder +public class SearchResultSet { + /** The total number of items matching the search criteria (query, platform, and type) */ + Integer totalMatches; + + /** The page index (1-based) of the result set */ + Integer pageNumber; + + /** The number of items per page */ + Integer pageSize; + + /** The platform of results the provider supplies - e.g. "aws", "gce", etc. */ + String platform; + + /** The original query string, used to sort results */ + String query; + + /** The paginated list of objects matching the query */ + List> results = new ArrayList<>(); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutor.java new file mode 100644 index 00000000000..41b55c450f4 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutor.java @@ -0,0 +1,124 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.search.executor; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.search.SearchProvider; +import com.netflix.spinnaker.clouddriver.search.SearchQueryCommand; +import com.netflix.spinnaker.clouddriver.search.SearchResultSet; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.*; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; + +@Slf4j +public class SearchExecutor { + private Integer timeout; + private ExecutorService executor; + + @Autowired Registry registry; + + SearchExecutor(SearchExecutorConfigProperties configProperties) { + this.timeout = configProperties.getTimeout(); + this.executor = + Executors.newFixedThreadPool( + configProperties.getThreadPoolSize(), + new ThreadFactoryBuilder() + .setNameFormat(SearchExecutor.class.getSimpleName() + "-%d") + .build()); + } + + public List searchAllProviders( + List providers, SearchQueryCommand searchQuery) { + List> searchTasks = + providers.stream() + .map(p -> new SearchTask(p, searchQuery, registry)) + .collect(Collectors.toList()); + List> resultFutures = null; + try { + resultFutures = executor.invokeAll(searchTasks, timeout, TimeUnit.SECONDS); + } catch (InterruptedException ie) { + log.error( + String.format( + "Search for '%s' in '%s' interrupted", searchQuery.getQ(), searchQuery.getPlatform()), + ie); + } + + if (resultFutures == null) { + return Collections.EMPTY_LIST; + } + return resultFutures.stream() + .map(f -> getFuture(f, registry, searchQuery.getQ())) + .collect(Collectors.toList()); + } + + private static SearchResultSet getFuture(Future f, Registry registry, String q) { + SearchResultSet resultSet = null; + try { + resultSet = f.get(); + } catch (ExecutionException | InterruptedException e) { + log.error(String.format("Retrieving future %s failed", f), e); + } catch (CancellationException e) { + log.error(String.format("Retrieving result failed due to cancelled task: %s", f)); + String counterId = String.format("searchExecutor.%s.failures", q != null ? q : "*"); + registry.counter(registry.createId(counterId)).increment(1); + } + + if (resultSet == null) { + return new SearchResultSet().setTotalMatches(0).setResults(Collections.EMPTY_LIST); + } + return resultSet; + } + + private static class SearchTask implements Callable { + private SearchProvider provider; + private SearchQueryCommand searchQuery; + private Registry registry; + + SearchTask(SearchProvider provider, SearchQueryCommand searchQuery, Registry registry) { + this.provider = provider; + this.searchQuery = searchQuery; + this.registry = registry; + } + + public SearchResultSet call() { + Map filters = + searchQuery.getFilters().entrySet().stream() + .filter(e -> !provider.excludedFilters().contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + String q = searchQuery.getQ(); + try { + if (searchQuery.getType() != null && !searchQuery.getType().isEmpty()) { + return provider.search( + q, searchQuery.getType(), searchQuery.getPage(), searchQuery.getPageSize(), filters); + } else { + return provider.search(q, searchQuery.getPage(), searchQuery.getPageSize(), filters); + } + } catch (Exception e) { + log.error(String.format("Search for '%s' in '%s' failed", q, searchQuery.getPlatform()), e); + String counterId = String.format("searchExecutor.%s.failures", q != null ? q : "*"); + registry.counter(registry.createId(counterId)).increment(1); + return new SearchResultSet().setTotalMatches(0).setResults(Collections.EMPTY_LIST); + } + } + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutorConfig.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutorConfig.java new file mode 100644 index 00000000000..bed6b9cd0f4 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutorConfig.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.search.executor; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnExpression("${search.executor.enabled:false}") +@EnableConfigurationProperties(SearchExecutorConfigProperties.class) +public class SearchExecutorConfig { + @Bean + SearchExecutor searchExecutor(SearchExecutorConfigProperties configProperties) { + return new SearchExecutor(configProperties); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutorConfigProperties.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutorConfigProperties.java new file mode 100644 index 00000000000..87fdf667089 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/search/executor/SearchExecutorConfigProperties.java @@ -0,0 +1,30 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.search.executor; + +import lombok.Data; +import lombok.NoArgsConstructor; +import org.springframework.boot.context.properties.ConfigurationProperties; + +@Data +@NoArgsConstructor +@ConfigurationProperties("search.executor") +class SearchExecutorConfigProperties { + private Boolean enabled; + private Integer threadPoolSize = 3; + private Integer timeout = 10; +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsConverter.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsConverter.java new file mode 100644 index 00000000000..ab50ff05e7e --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsConverter.java @@ -0,0 +1,57 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException; +import java.util.stream.Collectors; +import javax.validation.constraints.NotNull; +import lombok.Getter; +import lombok.Setter; +import org.springframework.beans.factory.annotation.Autowired; + +public abstract class AbstractAtomicOperationsCredentialsConverter> + implements AtomicOperationConverter { + + @Autowired @Getter @Setter private CredentialsRepository credentialsRepository; + + @Getter + private final ObjectMapper objectMapper = + new ObjectMapper() + .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + @NotNull + public T getCredentialsObject(@NotNull final String name) { + T creds = credentialsRepository.getOne(name); + if (creds == null) { + throw new InvalidRequestException( + String.format( + "credentials not found (name: %s, names: %s)", + name, + credentialsRepository.getAll().stream() + .map(AccountCredentials::getName) + .collect(Collectors.joining(",")))); + } + return creds; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsSupport.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsSupport.java new file mode 100644 index 00000000000..24f660d1a72 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAtomicOperationsCredentialsSupport.java @@ -0,0 +1,66 @@ +package com.netflix.spinnaker.clouddriver.security; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter; +import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; + +public abstract class AbstractAtomicOperationsCredentialsSupport + implements AtomicOperationConverter { + + @Autowired private AccountCredentialsProvider accountCredentialsProvider; + + private ObjectMapper objectMapper; + + @Autowired + public void setObjectMapper(ObjectMapper objectMapper) { + // TODO(rz): This is a bad pattern, we should be using the object mapper customizer bean, rather + // than modifying a singleton, global object mapper after injecting it somewhere. + this.objectMapper = + objectMapper + .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + } + + public T getCredentialsObject(final String name) { + if (name == null) { + throw new InvalidRequestException("credentials are required"); + } + + T credential; + try { + AccountCredentials repoCredential = accountCredentialsProvider.getCredentials(name); + if (repoCredential == null) { + throw new NullPointerException(); + } + + credential = (T) repoCredential; + } catch (Exception e) { + throw new InvalidRequestException( + String.format( + "credentials not found (name: %s, names: %s)", + name, + getAccountCredentialsProvider().getAll().stream() + .map(AccountCredentials::getName) + .collect(Collectors.joining(","))), + e); + } + + return credential; + } + + public AccountCredentialsProvider getAccountCredentialsProvider() { + return accountCredentialsProvider; + } + + public void setAccountCredentialsProvider(AccountCredentialsProvider accountCredentialsProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + } + + public ObjectMapper getObjectMapper() { + return objectMapper; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccessControlledAccountDefinitionAuthorizedRolesExtractor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccessControlledAccountDefinitionAuthorizedRolesExtractor.java new file mode 100644 index 00000000000..db76c7970e0 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccessControlledAccountDefinitionAuthorizedRolesExtractor.java @@ -0,0 +1,40 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.fiat.model.Authorization; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Set; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +public class AccessControlledAccountDefinitionAuthorizedRolesExtractor + implements AuthorizedRolesExtractor { + @Override + public boolean supportsType(Class type) { + return AccessControlledAccountDefinition.class.isAssignableFrom(type); + } + + @Override + public Set getAuthorizedRoles(CredentialsDefinition definition) { + return ((AccessControlledAccountDefinition) definition) + .getPermissions() + .getOrDefault(Authorization.WRITE, Set.of()); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionMapper.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionMapper.java new file mode 100644 index 00000000000..1e7291e72d7 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionMapper.java @@ -0,0 +1,76 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.secrets.EncryptedSecret; +import com.netflix.spinnaker.kork.secrets.SecretSession; +import com.netflix.spinnaker.kork.secrets.user.UserSecretReference; +import java.util.Iterator; +import java.util.Map; +import java.util.Optional; +import lombok.RequiredArgsConstructor; + +/** + * Maps account definitions to and from strings. Only {@link CredentialsDefinition} classes + * annotated with a {@link com.fasterxml.jackson.annotation.JsonTypeName} will be considered. {@link + * UserSecretReference} URIs may be used for credentials values which will be replaced with an + * appropriate string for the secret along with recording an associated account name for time of use + * permission checks on the user secret. Traditional {@link EncryptedSecret} URIs are also + * supported. + */ +@NonnullByDefault +@RequiredArgsConstructor +public class AccountDefinitionMapper { + + private final ObjectMapper objectMapper; + private final AccountDefinitionSecretManager secretManager; + private final SecretSession secretSession; + + public String serialize(CredentialsDefinition definition) throws JsonProcessingException { + return objectMapper.writeValueAsString(definition); + } + + public CredentialsDefinition deserialize(String string) throws JsonProcessingException { + ObjectNode account = (ObjectNode) objectMapper.readTree(string); + String accountName = account.required("name").asText(); + Iterator> it = account.fields(); + while (it.hasNext()) { + Map.Entry field = it.next(); + JsonNode node = field.getValue(); + if (node.isTextual()) { + String text = node.asText(); + Optional plaintext; + if (UserSecretReference.isUserSecret(text)) { + UserSecretReference ref = UserSecretReference.parse(text); + plaintext = Optional.of(secretManager.getUserSecretString(ref, accountName)); + } else if (EncryptedSecret.isEncryptedSecret(text)) { + plaintext = Optional.ofNullable(secretSession.decrypt(text)); + } else { + plaintext = Optional.empty(); + } + plaintext.map(account::textNode).ifPresent(field::setValue); + } + } + return objectMapper.convertValue(account, CredentialsDefinition.class); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSecretManager.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSecretManager.java new file mode 100644 index 00000000000..4f571b41ba8 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSecretManager.java @@ -0,0 +1,94 @@ +/* + * Copyright 2022 Apple, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.secrets.SecretDecryptionException; +import com.netflix.spinnaker.kork.secrets.StandardSecretParameter; +import com.netflix.spinnaker.kork.secrets.user.UserSecretManager; +import com.netflix.spinnaker.kork.secrets.user.UserSecretReference; +import java.util.Collections; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import lombok.RequiredArgsConstructor; +import lombok.experimental.Delegate; + +@RequiredArgsConstructor +@NonnullByDefault +public class AccountDefinitionSecretManager { + @Delegate private final UserSecretManager userSecretManager; + private final AccountSecurityPolicy policy; + + private final Map> refsByAccountName = new ConcurrentHashMap<>(); + + /** + * Gets a user secret string value for the given account. User secret references are tracked + * through this method to support time-of-use access control checks for accounts after they've + * been loaded by the system. + * + * @param reference parsed user secret reference to decrypt + * @param accountName name of account requesting the user secret + * @return the contents of the requested user secret string + */ + public String getUserSecretString(UserSecretReference reference, String accountName) { + var secret = getUserSecret(reference); + refsByAccountName + .computeIfAbsent(accountName, ignored -> ConcurrentHashMap.newKeySet()) + .add(reference); + var parameterName = StandardSecretParameter.KEY.getParameterName(); + var secretKey = reference.getParameters().getOrDefault(parameterName, ""); + try { + return secret.getSecretString(secretKey); + } catch (NoSuchElementException e) { + throw new SecretDecryptionException(e); + } + } + + /** + * Indicates if the given username is authorized to access the given account. When Fiat is + * enabled, this allows admins to access accounts along with users who have both WRITE permission + * on the account and are authorized to use any provided {@link UserSecretReference} data. + * + * @param username username to check for authorization to use the given account + * @param accountName the name of the account to check access to + * @return true if the given username is allowed to access the given account + */ + public boolean canAccessAccountWithSecrets(String username, String accountName) { + return policy.isAdmin(username) + || !accountDefinitionUsesUnauthorizedUserSecrets(username, accountName) + && policy.canUseAccount(username, accountName); + } + + /** + * Checks the account definition for the provided account name and returns true if said account + * references at least one UserSecret for which the provided user does not have a role in common + * with. If the account definition uses no UserSecrets or the provided user is allowed to use all + * the UserSecrets referenced, then this returns false. + */ + private boolean accountDefinitionUsesUnauthorizedUserSecrets( + String username, String accountName) { + var userRoles = policy.getRoles(username); + // if at least one UserSecret is referenced which the user has no common roles with + return refsByAccountName.getOrDefault(accountName, Set.of()).stream() + .map(this::getUserSecret) + .anyMatch(secret -> Collections.disjoint(secret.getRoles(), userRoles)); + // Collections::disjoint checks if both collections have nothing in common (i.e., an empty set + // intersection) + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionService.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionService.java new file mode 100644 index 00000000000..a101f2fde66 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionService.java @@ -0,0 +1,194 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.fiat.model.Authorization; +import com.netflix.spinnaker.kork.annotations.Beta; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.annotations.VisibleForTesting; +import com.netflix.spinnaker.kork.secrets.SecretException; +import com.netflix.spinnaker.kork.secrets.user.UserSecretReference; +import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException; +import com.netflix.spinnaker.security.AuthenticatedRequest; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; +import org.springframework.security.access.AccessDeniedException; +import org.springframework.security.access.prepost.PostFilter; +import org.springframework.security.access.prepost.PreAuthorize; +import org.springframework.util.ReflectionUtils; + +/** + * Service wrapper for an {@link AccountDefinitionRepository} which enforces permissions and other + * validations. + */ +@Beta +@NonnullByDefault +@RequiredArgsConstructor +public class AccountDefinitionService { + private final AccountDefinitionRepository repository; + private final AccountDefinitionSecretManager secretManager; + private final AccountCredentialsProvider accountCredentialsProvider; + private final AccountSecurityPolicy policy; + private final List extractors; + + /** + * Lists accounts by type that the current user has {@link Authorization#WRITE} access. Users who + * only have {@link Authorization#READ} access can only view related items like load balancers, + * clusters, security groups, etc., that use the account, but they may not directly use or view + * account definitions and credentials. + * + * @see AccountDefinitionRepository#listByType(String, int, String) + */ + @PreAuthorize("@accountSecurity.isAccountManager(authentication.name)") + @PostFilter( + "@accountDefinitionSecretManager.canAccessAccountWithSecrets(authentication.name, filterObject.name)") + public List listAccountDefinitionsByType( + String accountType, int limit, @Nullable String startingAccountName) { + return repository.listByType(accountType, limit, startingAccountName); + } + + @PreAuthorize("@accountSecurity.isAccountManager(authentication.name)") + public CredentialsDefinition createAccount(CredentialsDefinition definition) { + String name = definition.getName(); + String username = AuthenticatedRequest.getSpinnakerUser().orElse("anonymous"); + if (accountCredentialsProvider.getCredentials(name) != null) { + throw new InvalidRequestException( + String.format("Cannot create an account which already exists (name: %s)", name)); + } + validateAccountWritePermissions(username, definition, AccountAction.CREATE); + repository.create(definition); + return definition; + } + + @PreAuthorize("@accountSecurity.isAccountManager(authentication.name)") + public CredentialsDefinition saveAccount(CredentialsDefinition definition) { + String name = definition.getName(); + String username = AuthenticatedRequest.getSpinnakerUser().orElse("anonymous"); + if (accountCredentialsProvider.getCredentials(name) != null + && !policy.canModifyAccount(username, name)) { + throw new AccessDeniedException( + String.format("Unauthorized to overwrite existing account (name: %s)", name)); + } + validateAccountWritePermissions(username, definition, AccountAction.SAVE); + repository.save(definition); + return definition; + } + + @PreAuthorize("@accountSecurity.canModifyAccount(authentication.name, #definition.name)") + public CredentialsDefinition updateAccount(CredentialsDefinition definition) { + String name = definition.getName(); + if (accountCredentialsProvider.getCredentials(name) == null) { + throw new InvalidRequestException( + String.format("Cannot update an account which does not exist (name: %s)", name)); + } + String username = AuthenticatedRequest.getSpinnakerUser().orElse("anonymous"); + validateAccountWritePermissions(username, definition, AccountAction.UPDATE); + repository.update(definition); + return definition; + } + + @PreAuthorize("@accountSecurity.canModifyAccount(authentication.name, #accountName)") + public void deleteAccount(String accountName) { + repository.delete(accountName); + } + + /** + * Deletes an account by name if the current user has {@link Authorization#WRITE} access to the + * given account. + */ + @PreAuthorize("@accountSecurity.canModifyAccount(authentication.name, #accountName)") + public List getAccountHistory(String accountName) { + return repository.revisionHistory(accountName); + } + + private void validateAccountWritePermissions( + String username, CredentialsDefinition definition, AccountAction action) { + if (policy.isAdmin(username)) { + return; + } + Set userRoles = policy.getRoles(username); + validateAccountAuthorization(userRoles, definition, action); + validateUserSecretAuthorization(userRoles, definition, action); + } + + @VisibleForTesting + void validateAccountAuthorization( + Set userRoles, CredentialsDefinition definition, AccountAction action) { + String accountName = definition.getName(); + var type = definition.getClass(); + Set authorizedRoles = + extractors.stream() + .filter(extractor -> extractor.supportsType(type)) + .flatMap(extractor -> extractor.getAuthorizedRoles(definition).stream()) + .collect(Collectors.toSet()); + // if the account defines authorized roles and the user has no roles in common with these + // authorized roles, then the user attempted to create an account they'd immediately be + // locked out from which is a poor user experience + // (Collections::disjoint returns true if both collections have no elements in common) + if (!authorizedRoles.isEmpty() && Collections.disjoint(userRoles, authorizedRoles)) { + throw new InvalidRequestException( + String.format( + "Cannot %s account without granting permissions for current user (name: %s)", + action.name().toLowerCase(Locale.ROOT), accountName)); + } + } + + @VisibleForTesting + void validateUserSecretAuthorization( + Set userRoles, CredentialsDefinition definition, AccountAction action) { + var type = definition.getClass(); + Set secretReferences = new HashSet<>(); + ReflectionUtils.doWithFields( + type, + field -> { + field.setAccessible(true); + UserSecretReference.tryParse(field.get(definition)).ifPresent(secretReferences::add); + }, + field -> field.getType() == String.class); + // if the account uses any UserSecrets and the user has no roles in common with any of + // the UserSecrets, then don't allow the user to save this account due to lack of authorization + for (var ref : secretReferences) { + try { + var secret = secretManager.getUserSecret(ref); + var secretRoles = Set.copyOf(secret.getRoles()); + // (Collections::disjoint returns true if both collections have no elements in common) + if (Collections.disjoint(userRoles, secretRoles)) { + throw new AccessDeniedException( + String.format( + "Unauthorized to %s account with user secret %s", + action.name().toLowerCase(Locale.ROOT), ref)); + } + } catch (SecretException e) { + throw new InvalidRequestException(e); + } + } + } + + @VisibleForTesting + enum AccountAction { + CREATE, + UPDATE, + SAVE + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSource.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSource.java new file mode 100644 index 00000000000..d6fedec34f8 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSource.java @@ -0,0 +1,91 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * Provides a full list of CredentialsDefinition account instances for a given credentials type. + * Given an {@link AccountDefinitionRepository} bean and an optional list of {@code + * CredentialsDefinitionSource} beans for a given account type {@code T}, this class combines the + * lists from all the given credentials definition sources. When no {@code + * CredentialsDefinitionSource} beans are available for a given account type, then a default + * source should be specified to wrap any existing Spring configuration beans that provide the same. + * + * @param account credentials definition type + */ +@NonnullByDefault +public class AccountDefinitionSource + implements CredentialsDefinitionSource { + + private static final Logger LOGGER = LogManager.getLogger(); + private final List> sources; + + /** + * Constructs an account-based {@code CredentialsDefinitionSource} using the provided + * repository, account type, and additional sources for accounts of the same type. + * + * @param repository the backing repository for managing account definitions at runtime + * @param type the account type supported by this source (must be annotated with {@link + * JsonTypeName}) + * @param additionalSources the list of other credential definition sources to list accounts from + */ + public AccountDefinitionSource( + AccountDefinitionRepository repository, + Class type, + List> additionalSources) { + String typeName = AccountDefinitionTypes.getCredentialsTypeName(type); + Objects.requireNonNull( + typeName, () -> "Class " + type + " is not annotated with type discriminator"); + List> sources = new ArrayList<>(additionalSources.size() + 1); + sources.add( + () -> + repository.listByType(typeName).stream().map(type::cast).collect(Collectors.toList())); + sources.addAll(additionalSources); + this.sources = List.copyOf(sources); + } + + @Override + public List getCredentialsDefinitions() { + Set seenAccountNames = new HashSet<>(); + return sources.stream() + .flatMap(source -> source.getCredentialsDefinitions().stream()) + .filter( + definition -> { + var name = definition.getName(); + if (seenAccountNames.add(name)) { + return true; + } else { + LOGGER.warn( + "Duplicate account name detected ({}). Skipping this definition.", name); + return false; + } + }) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionTypes.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionTypes.java new file mode 100644 index 00000000000..19cbfaf523f --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionTypes.java @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Apple, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.experimental.UtilityClass; + +@UtilityClass +public class AccountDefinitionTypes { + @Nullable + public static String getCredentialsTypeName( + @Nonnull Class credentialsType) { + // TODO(jvz): migrate to @CredentialsType::value after + // https://github.com/spinnaker/kork/pull/958 is merged + JsonTypeName annotation = credentialsType.getAnnotation(JsonTypeName.class); + return annotation != null ? annotation.value() : null; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountSecurityPolicy.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountSecurityPolicy.java new file mode 100644 index 00000000000..1d9edfc6496 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AccountSecurityPolicy.java @@ -0,0 +1,40 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Set; + +/** Provides account authorization checks and related security abstractions on Fiat when enabled. */ +@NonnullByDefault +public interface AccountSecurityPolicy { + + /** Indicates if the provided user is an admin. */ + boolean isAdmin(String username); + + /** Indicates if the provided user is an account manager. */ + boolean isAccountManager(String username); + + /** Returns the set of roles assigned to the provided user. */ + Set getRoles(String username); + + /** Indicates if the provided user can use the provided account. */ + boolean canUseAccount(String username, String account); + + /** Indicates if the provided user can modify the provided account. */ + boolean canModifyAccount(String username, String account); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AllowAllAccountSecurityPolicy.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AllowAllAccountSecurityPolicy.java new file mode 100644 index 00000000000..58634dcf690 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AllowAllAccountSecurityPolicy.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Set; + +@NonnullByDefault +public class AllowAllAccountSecurityPolicy implements AccountSecurityPolicy { + @Override + public boolean isAdmin(String username) { + return true; + } + + @Override + public boolean isAccountManager(String username) { + return true; + } + + @Override + public Set getRoles(String username) { + return Set.of(); + } + + @Override + public boolean canUseAccount(String username, String account) { + return true; + } + + @Override + public boolean canModifyAccount(String username, String account) { + return true; + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AuthorizedRolesExtractor.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AuthorizedRolesExtractor.java new file mode 100644 index 00000000000..0d482e06b5c --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/AuthorizedRolesExtractor.java @@ -0,0 +1,33 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Set; + +/** + * Strategy to extract authorized roles from supported types of account definitions. All authorized + * roles returned for supported policies are checked for a non-empty intersection with the current + * user's roles. + */ +@NonnullByDefault +public interface AuthorizedRolesExtractor { + boolean supportsType(Class type); + + Set getAuthorizedRoles(CredentialsDefinition definition); +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountSecurityPolicy.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountSecurityPolicy.java new file mode 100644 index 00000000000..302894d1d26 --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountSecurityPolicy.java @@ -0,0 +1,89 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.fiat.model.Authorization; +import com.netflix.spinnaker.fiat.model.UserPermission; +import com.netflix.spinnaker.fiat.model.resources.ResourceType; +import com.netflix.spinnaker.fiat.model.resources.Role; +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import lombok.RequiredArgsConstructor; + +@RequiredArgsConstructor +@NonnullByDefault +public class DefaultAccountSecurityPolicy implements AccountSecurityPolicy { + private final FiatPermissionEvaluator permissionEvaluator; + + @Override + public boolean isAdmin(String username) { + return Optional.ofNullable(permissionEvaluator.getPermission(username)) + .filter(UserPermission.View::isAdmin) + .isPresent(); + } + + @Override + public boolean isAccountManager(String username) { + return Optional.ofNullable(permissionEvaluator.getPermission(username)) + .filter(permission -> isAccountManager(permission) || permission.isAdmin()) + .isPresent(); + } + + @Override + public Set getRoles(String username) { + return Optional.ofNullable(permissionEvaluator.getPermission(username)).stream() + .flatMap(permission -> permission.getRoles().stream().map(Role.View::getName)) + .collect(Collectors.toSet()); + } + + @Override + public boolean canUseAccount(@Nonnull String username, @Nonnull String account) { + // note that WRITE permissions are required in order to do anything with an account as the READ + // permission + // is only used for certain UI items related to the account + return Optional.ofNullable(permissionEvaluator.getPermission(username)) + .filter( + permission -> + permission.isAdmin() + || permissionEvaluator.hasPermission( + username, account, ResourceType.ACCOUNT.getName(), Authorization.WRITE)) + .isPresent(); + } + + @Override + public boolean canModifyAccount(@Nonnull String username, @Nonnull String account) { + // note that WRITE permissions are required in order to do anything with an account as the READ + // permission + // is only used for certain UI items related to the account + return Optional.ofNullable(permissionEvaluator.getPermission(username)) + .filter( + permission -> + permission.isAdmin() + || isAccountManager(permission) + && permissionEvaluator.hasPermission( + username, account, ResourceType.ACCOUNT.getName(), Authorization.WRITE)) + .isPresent(); + } + + private static boolean isAccountManager(UserPermission.View permission) { + return permission.isAccountManager(); + } +} diff --git a/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/tags/EntityTagger.java b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/tags/EntityTagger.java new file mode 100644 index 00000000000..7a04175c2ca --- /dev/null +++ b/clouddriver-core/src/main/java/com/netflix/spinnaker/clouddriver/tags/EntityTagger.java @@ -0,0 +1,58 @@ +package com.netflix.spinnaker.clouddriver.tags; + +import com.netflix.spinnaker.clouddriver.model.EntityTags; +import java.util.Collection; + +/** Provides a mechanism for attaching arbitrary metadata to resources within cloud providers. */ +public interface EntityTagger { + + String ENTITY_TYPE_SERVER_GROUP = "servergroup"; + String ENTITY_TYPE_CLUSTER = "cluster"; + + void alert( + String cloudProvider, + String accountId, + String region, + String category, + String entityType, + String entityId, + String key, + String value, + Long timestamp); + + void notice( + String cloudProvider, + String accountId, + String region, + String category, + String entityType, + String entityId, + String key, + String value, + Long timestamp); + + void tag( + String cloudProvider, + String accountId, + String region, + String namespace, + String entityType, + String entityId, + String tagName, + Object value, + Long timestamp); + + Collection taggedEntities( + String cloudProvider, String accountId, String entityType, String tagName, int maxResults); + + void deleteAll( + String cloudProvider, String accountId, String region, String entityType, String entityId); + + void delete( + String cloudProvider, + String accountId, + String region, + String entityType, + String entityId, + String tagName); +} diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/cache/CatsSearchProviderSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/cache/CatsSearchProviderSpec.groovy index 59289b8629f..699c54689aa 100644 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/cache/CatsSearchProviderSpec.groovy +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/cache/CatsSearchProviderSpec.groovy @@ -71,12 +71,7 @@ class CatsSearchProviderSpec extends Specification { then: catsSearchProvider.cachedIdentifiersByType.get() == [ - "instances": instanceIdentifiers.collect { - [ - originalKey: it.toLowerCase(), - "_id" : it.toLowerCase() - ] - } + "instances": instanceIdentifiers.collect { it.toLowerCase() } ] } diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/core/ProjectClustersServiceSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/core/ProjectClustersServiceSpec.groovy new file mode 100644 index 00000000000..93e31c29005 --- /dev/null +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/core/ProjectClustersServiceSpec.groovy @@ -0,0 +1,513 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.core + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.core.services.Front50Service +import com.netflix.spinnaker.clouddriver.model.Cluster +import com.netflix.spinnaker.clouddriver.model.ClusterProvider +import com.netflix.spinnaker.clouddriver.model.LoadBalancer +import com.netflix.spinnaker.clouddriver.model.ServerGroup +import spock.lang.Shared +import spock.lang.Specification + +import javax.inject.Provider + +import static com.netflix.spinnaker.clouddriver.core.ProjectClustersService.ClusterModel + +class ProjectClustersServiceSpec extends Specification { + + @Shared + ProjectClustersService subject + + @Shared + Front50Service front50Service + + @Shared + ClusterProvider clusterProvider + + @Shared + Map projectConfig = [ + name : "Spinnaker", + config: [ + applications: ["orca", "deck"], + clusters : [] + ] + ] + + @Shared + List allowList = ["Spinnaker"] + + def setup() { + front50Service = Mock() + clusterProvider = Mock() + + subject = new ProjectClustersService( + front50Service, + new ObjectMapper(), + new Provider>() { + @Override + List get() { + return [clusterProvider] + } + } + ) + } + + void "returns an empty list without trying to retrieve applications when no clusters are configured"() { + when: + def result = subject.getProjectClusters(allowList) + + then: + result["Spinnaker"].isEmpty() + 1 * front50Service.getProject(_) >> { projectConfig } + 0 * _ + } + + void "builds the very specific model we probably want for the project dashboard"() { + projectConfig.config.clusters = [ + [account: "prod", stack: "main"] + ] + + when: + def result = subject.getProjectClusters(allowList) + def clusters = result["Spinnaker"] + + then: + clusters.size() == 1 + clusters[0].account == "prod" + clusters[0].stack == "main" + clusters[0].detail == null + + clusters[0].applications[0].application == "orca" + clusters[0].applications[0].lastPush == 2L + clusters[0].applications[0].clusters[0].region == "us-east-1" + clusters[0].applications[0].clusters[0].lastPush == 2L + clusters[0].applications[0].clusters[0].instanceCounts.total == 1 + clusters[0].applications[0].clusters[0].instanceCounts.up == 1 + + clusters[0].applications[1].application == "deck" + clusters[0].applications[1].lastPush == 1L + clusters[0].applications[1].clusters[0].region == "us-west-1" + clusters[0].applications[1].clusters[0].lastPush == 1L + clusters[0].applications[1].clusters[0].instanceCounts.total == 2 + clusters[0].applications[1].clusters[0].instanceCounts.down == 1 + clusters[0].applications[1].clusters[0].instanceCounts.up == 1 + + 1 * front50Service.getProject(_) >> { projectConfig } + 1 * clusterProvider.getClusterSummaries("orca") >> [ + prod: [new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [] + )] as Set + ] + 1 * clusterProvider.getCluster("orca", "prod", "orca-main") >> new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 2L, new ServerGroup.InstanceCounts(total: 1, up: 1)) + ] + ) + + 1 * clusterProvider.getClusterSummaries("deck") >> [ + prod: [new TestCluster( + name: "deck-main", + accountName: "prod", + serverGroups: [] + )] as Set + ] + 1 * clusterProvider.getCluster("deck", "prod", "deck-main") >> new TestCluster( + name: "deck-main", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "deck-main-v001", "us-west-1", 31, 1L, new ServerGroup.InstanceCounts(total: 2, up: 1, down: 1)) + ] + ) + + 0 * clusterProvider._ + } + + void "includes all applications if none specified for a cluster"() { + given: + projectConfig.config.clusters = [ + [account: "prod", stack: "main"] + ] + + when: + def result = subject.getProjectClusters(allowList) + def clusters = result["Spinnaker"] + + then: + clusters.size() == 1 + clusters[0].applications.application == ["orca", "deck"] + 1 * front50Service.getProject(_) >> { projectConfig } + 1 * clusterProvider.getClusterSummaries("orca") >> [ + prod: [new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [] + )] as Set + ] + 1 * clusterProvider.getCluster("orca", "prod", "orca-main") >> new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 1L, new ServerGroup.InstanceCounts(total: 1, up: 1)) + ] + ) + + 1 * clusterProvider.getClusterSummaries("deck") >> [ + prod: [new TestCluster( + name: "deck-main", + accountName: "prod", + serverGroups: [] + )] as Set + ] + 1 * clusterProvider.getCluster("deck", "prod", "deck-main") >> new TestCluster( + name: "deck-main", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "deck-main-v001", "us-west-1", 31, 1L, new ServerGroup.InstanceCounts(total: 2, up: 1, down: 1)) + ] + ) + + 0 * clusterProvider._ + } + + + void "only returns specified applications if declared in cluster config"() { + projectConfig.config.clusters = [ + [account: "prod", stack: "main", applications: ["deck"]] + ] + + when: + def result = subject.getProjectClusters(allowList) + def clusters = result["Spinnaker"] + + then: + clusters.size() == 1 + clusters[0].applications.application == ["deck"] + 1 * front50Service.getProject(_) >> { projectConfig } + 1 * clusterProvider.getClusterSummaries("orca") >> [ + prod: [new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [] + )] as Set + ] + 1 * clusterProvider.getCluster("orca", "prod", "orca-main") >> new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 1L, new ServerGroup.InstanceCounts(total: 1, up: 1)) + ] + ) + + 1 * clusterProvider.getClusterSummaries("deck") >> [ + prod: [new TestCluster( + name: "deck-main", + accountName: "prod", + serverGroups: [] + )] as Set + ] + 1 * clusterProvider.getCluster("deck", "prod", "deck-main") >> new TestCluster( + name: "deck-main", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "deck-main-v001", "us-west-1", 31, 1L, new ServerGroup.InstanceCounts(total: 2, up: 1, down: 1)) + ] + ) + + 0 * clusterProvider._ + } + + void "includes all clusters on stack wildcard"() { + projectConfig.config.clusters = [ + [account: "prod", stack: "*", applications: ["orca"]] + ] + + when: + def result = subject.getProjectClusters(allowList) + def clusters = result["Spinnaker"] + + then: + clusters.size() == 1 + clusters[0].applications.application == ["orca"] + clusters[0].applications[0].lastPush == 5L + clusters[0].applications[0].clusters.size() == 2 + clusters[0].instanceCounts.total == 2 + clusters[0].instanceCounts.up == 2 + clusters[0].instanceCounts.starting == 0 + + 1 * front50Service.getProject(_) >> { projectConfig } + 1 * clusterProvider.getClusterSummaries("deck") >> [:] + 1 * clusterProvider.getClusterSummaries("orca") >> [ + prod: [ + new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [] + ), + new TestCluster( + name: "orca-test", + accountName: "prod", + serverGroups: [] + ), + new TestCluster( + name: "orca--foo", + accountName: "prod", + serverGroups: [] + ), + ] as Set + ] + + 1 * clusterProvider.getCluster("orca", "prod", "orca-main") >> new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 1L, new ServerGroup.InstanceCounts(total: 1, up: 1)) + ] + ) + 1 * clusterProvider.getCluster("orca", "prod", "orca-test") >> new TestCluster( + name: "orca-test", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "orca-test-v001", "us-west-1", 3, 5L, new ServerGroup.InstanceCounts(total: 1, up: 1)) + ] + ) + 0 * clusterProvider._ + } + + void "excludes disabled server groups"() { + projectConfig.config.clusters = [ + [account: "prod", stack: "main", applications: ["orca"]] + ] + + TestServerGroup disabledServerGroup = makeServerGroup("prod", "orca-main-v003", "us-east-1", 5, 5L, new ServerGroup.InstanceCounts(total: 1, up: 1)) + disabledServerGroup.disabled = true + + when: + def result = subject.getProjectClusters(allowList) + def clusters = result["Spinnaker"] + + then: + clusters.size() == 1 + clusters[0].applications.application == ["orca"] + clusters[0].applications[0].lastPush == 4L + clusters[0].applications[0].clusters.size() == 1 + clusters[0].instanceCounts.total == 2 + clusters[0].instanceCounts.up == 2 + + 1 * front50Service.getProject(_) >> { projectConfig } + 1 * clusterProvider.getClusterSummaries("deck") >> [:] + 1 * clusterProvider.getClusterSummaries("orca") >> [ + prod: [ + new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: []), + ] as Set + ] + 1 * clusterProvider.getCluster("orca", "prod", "orca-main") >> new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 1L, new ServerGroup.InstanceCounts(total: 1, up: 1)), + makeServerGroup("prod", "orca-main-v002", "us-east-1", 4, 4L, new ServerGroup.InstanceCounts(total: 1, up: 1)), + disabledServerGroup + ]) + + 0 * clusterProvider._ + } + + void "includes exactly matched clusters"() { + projectConfig.config.clusters = [ + [account: "prod", stack: "main", detail: "foo", applications: ["orca"]] + ] + + when: + def result = subject.getProjectClusters(allowList) + def clusters = result["Spinnaker"] + + then: + clusters.size() == 1 + clusters[0].applications.application == ["orca"] + clusters[0].applications[0].lastPush == 1L + clusters[0].applications[0].clusters.size() == 1 + clusters[0].instanceCounts.total == 1 + clusters[0].instanceCounts.up == 1 + + 1 * front50Service.getProject(_) >> { projectConfig } + 1 * clusterProvider.getClusterSummaries("deck") >> [:] + 1 * clusterProvider.getClusterSummaries("orca") >> [ + prod: [ + new TestCluster( + name: "orca-main-foo", + accountName: "prod", + serverGroups: []), + new TestCluster( + name: "orca-main-bar", + accountName: "prod", + serverGroups: []), + new TestCluster( + name: "orca-main", + accountName: "prod", + serverGroups: []), + new TestCluster( + name: "orca--foo", + accountName: "prod", + serverGroups: []), + ] as Set + ] + 1 * clusterProvider.getCluster("orca", "prod", "orca-main-foo") >> new TestCluster( + name: "orca-main-foo", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "orca-main-foo-v001", "us-east-1", 3, 1L, new ServerGroup.InstanceCounts(total: 1, up: 1)), + ]) + 0 * clusterProvider._ + } + + void "includes all builds per region with latest deployment date, ignoring disabled server groups"() { + given: + projectConfig.config.clusters = [ + [account: "prod", stack: "main", detail: "foo", applications: ["orca"]] + ] + def disabledServerGroup = makeServerGroup("prod", "orca-main-foo-v005", "us-west-1", 6, 7L, new ServerGroup.InstanceCounts(total: 1, up: 1)) + disabledServerGroup.disabled = true + + when: + def result = subject.getProjectClusters(allowList) + def clusters = result["Spinnaker"] + def eastCluster = clusters[0].applications[0].clusters.find { it.region == "us-east-1" } + def westCluster = clusters[0].applications[0].clusters.find { it.region == "us-west-1" } + + then: + clusters.size() == 1 + clusters[0].applications.application == ["orca"] + clusters[0].applications[0].lastPush == 6L + clusters[0].applications[0].clusters.size() == 2 + + eastCluster.lastPush == 1L + eastCluster.builds.size() == 1 + eastCluster.builds[0].buildNumber == "3" + eastCluster.builds[0].deployed == 1L + + westCluster.lastPush == 6L + westCluster.builds.size() == 2 + westCluster.builds[0].buildNumber == "4" + westCluster.builds[0].deployed == 2L + westCluster.builds[1].buildNumber == "5" + westCluster.builds[1].deployed == 6L + + clusters[0].instanceCounts.total == 4 + clusters[0].instanceCounts.up == 4 + + eastCluster.instanceCounts.total == 1 + eastCluster.instanceCounts.up == 1 + + westCluster.instanceCounts.total == 3 + westCluster.instanceCounts.up == 3 + + 1 * front50Service.getProject(_) >> { projectConfig } + 1 * clusterProvider.getClusterSummaries("orca") >> [ + prod: [ + new TestCluster( + name: "orca-main-foo", + accountName: "prod", + serverGroups: []) + ] as Set + ] + 1 * clusterProvider.getCluster("orca", "prod", "orca-main-foo") >> new TestCluster( + name: "orca-main-foo", + accountName: "prod", + serverGroups: [ + makeServerGroup("prod", "orca-main-foo-v001", "us-east-1", 3, 1L, new ServerGroup.InstanceCounts(total: 1, up: 1)), + makeServerGroup("prod", "orca-main-foo-v003", "us-west-1", 4, 2L, new ServerGroup.InstanceCounts(total: 1, up: 1)), + makeServerGroup("prod", "orca-main-foo-v004", "us-west-1", 5, 3L, new ServerGroup.InstanceCounts(total: 1, up: 1)), + makeServerGroup("prod", "orca-main-foo-v005", "us-west-1", 5, 6L, new ServerGroup.InstanceCounts(total: 1, up: 1)), + disabledServerGroup + ]) + } + + private static List cachedClusters(Map> result, String projectName) { + return result[projectName] + } + + TestServerGroup makeServerGroup(String account, + String name, + String region, + Integer buildNumber, + Long createdTime, + ServerGroup.InstanceCounts instanceCounts) { + def imageSummary = new TestImageSummary(buildInfo: [jenkins: [name: 'job', host: 'host', number: buildNumber.toString()]]) + new TestServerGroup( + name: name, + accountName: account, + region: region, + imageSummary: imageSummary, + imagesSummary: new TestImagesSummary(summaries: [imageSummary]), + createdTime: createdTime, + instanceCounts: instanceCounts, + ) + } + + static class TestImageSummary implements ServerGroup.ImageSummary { + String getServerGroupName() { null } + + String getImageId() { null } + + String getImageName() { null } + + Map getImage() { null } + + Map buildInfo + } + + static class TestImagesSummary implements ServerGroup.ImagesSummary { + List summaries = [] + } + + static class TestServerGroup implements ServerGroup { + String name + String accountName + ServerGroup.ImageSummary imageSummary + ServerGroup.ImagesSummary imagesSummary + Long createdTime + InstanceCounts instanceCounts + String type = "test" + String cloudProvider = "test" + String region + Boolean disabled + Set instances = [] + Set loadBalancers + Set securityGroups + Map launchConfig + ServerGroup.Capacity capacity + Set zones + + Boolean isDisabled() { disabled } + } + + static class TestCluster implements Cluster { + String name + String type = "test" + String accountName + Set serverGroups + Set loadBalancers + } +} diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/DualTaskRepositorySpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/DualTaskRepositorySpec.groovy new file mode 100644 index 00000000000..0a27178221d --- /dev/null +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/DualTaskRepositorySpec.groovy @@ -0,0 +1,76 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.data.task + +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import spock.lang.Specification +import spock.lang.Subject + +class DualTaskRepositorySpec extends Specification { + + TaskRepository primary = Mock() + TaskRepository previous = Mock() + + @Subject + TaskRepository subject = new DualTaskRepository(primary, previous, 4, 1, DynamicConfigService.NOOP) + + void "always creates tasks from primary"() { + when: + subject.create("afternoon", "coffee") + + then: + 1 * primary.create(_, _) + 0 * _ + + when: + subject.create("afternoon", "coffee", "needed") + + then: + 1 * primary.create(_, _, _) + 0 * _ + } + + void "reads from previous if primary is missing a task"() { + given: + def expectedTask = new DefaultTask("1") + + when: + def task = subject.get("1") + + then: + task == expectedTask + 1 * primary.get(_) >> null + 1 * previous.get(_) >> expectedTask + 0 * _ + } + + void "list collates results from both primary and previous"() { + when: + def result = subject.list() + + then: + result*.id.sort() == ["1", "2", "3", "4"] + 1 * primary.list() >> [ + new DefaultTask("1"), + new DefaultTask("2") + ] + 1 * previous.list() >> [ + new DefaultTask("3"), + new DefaultTask("4") + ] + 0 * _ + } +} diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepositorySpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepositorySpec.groovy deleted file mode 100644 index 7ddc78a96f4..00000000000 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepositorySpec.groovy +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.data.task - -import com.netflix.spinnaker.clouddriver.data.task.jedis.JedisTask -import spock.lang.Shared -import spock.lang.Specification - -class InMemoryTaskRepositorySpec extends Specification { - - @Shared - InMemoryTaskRepository taskRepository - - def setupSpec() { - resetTaskRepository() - } - - void resetTaskRepository() { - this.taskRepository = new InMemoryTaskRepository() - } - - void cleanup() { - resetTaskRepository() - } - - void "creating a new task returns task with unique id"() { - given: - def t1 = taskRepository.create("TEST", "Test Status") - def t2 = taskRepository.create("TEST", "Test Status") - - expect: - t1.id != t2.id - } - - void "looking up a task by id returns the same task"() { - setup: - def t1 = taskRepository.create("TEST", "Test Status") - - when: - def t2 = taskRepository.get(t1.id) - - then: - t1.is t2 - } - - void "listing tasks returns all avilable tasks"() { - setup: - def t1 = taskRepository.create "TEST", "Test Status" - def t2 = taskRepository.create "TEST", "Test Status" - - when: - def list = taskRepository.list() - - then: - list.containsAll([t1, t2]) - } - - void "returns the previously-created object when passed the same key"() { - given: - def t1 = taskRepository.create "Test", "Test Status", "the-key" - def t2 = taskRepository.create "Test", "Test Status 2", "the-key" - def t3 = taskRepository.create "Test", "Test Status 3", "other-key" - - expect: - t1.id == t2.id - t1.id != t3.id - } -} diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTaskSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTaskSpec.groovy index b0ed15d2ad1..8d86566d3bb 100644 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTaskSpec.groovy +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTaskSpec.groovy @@ -18,7 +18,6 @@ package com.netflix.spinnaker.clouddriver.data.task.jedis import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus import com.netflix.spinnaker.clouddriver.data.task.Status -import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskState import spock.lang.Shared import spock.lang.Specification @@ -37,7 +36,7 @@ class JedisTaskSpec extends Specification { void setup() { repository = Mock(RedisTaskRepository) - task = new JedisTask('666', System.currentTimeMillis(), repository, "owner", false) + task = new JedisTask('666', System.currentTimeMillis(), repository, "owner", "requestId", [] as Set, false) } void 'updating task status adds a history entry'() { @@ -46,7 +45,10 @@ class JedisTaskSpec extends Specification { then: 1 * repository.currentState(task) >> initialState - 1 * repository.addToHistory(initialState.update(newPhase, newState), task) + 1 * repository.addToHistory({ + assert it.phase == newPhase + assert it.status == 'end' + }, task) 0 * _ where: @@ -78,7 +80,9 @@ class JedisTaskSpec extends Specification { then: 1 * repository.currentState(task) >> initialState - 1 * repository.addToHistory(initialState.update(TaskState.COMPLETED), task) + 1 * repository.addToHistory({ + assert it.state == TaskState.COMPLETED + }, task) 0 * _ } @@ -88,7 +92,9 @@ class JedisTaskSpec extends Specification { then: 1 * repository.currentState(task) >> initialState - 1 * repository.addToHistory(initialState.update(TaskState.FAILED), task) + 1 * repository.addToHistory({ + assert it.state == TaskState.FAILED + }, task) 0 * _ } diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepositorySpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepositorySpec.groovy index e6511d197b0..9166bd38554 100644 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepositorySpec.groovy +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepositorySpec.groovy @@ -15,8 +15,7 @@ */ package com.netflix.spinnaker.clouddriver.data.task.jedis -import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus -import com.netflix.spinnaker.clouddriver.data.task.TaskState + import com.netflix.spinnaker.kork.jedis.EmbeddedRedis import com.netflix.spinnaker.kork.jedis.JedisClientDelegate import redis.clients.jedis.Jedis @@ -72,148 +71,9 @@ class RedisTaskRepositorySpec extends Specification { fromOldPool.id == oldPoolTask.id fromOldPool.startTimeMs == oldPoolTask.startTimeMs fromOldPool.status.status == oldPoolTask.status.status - } - - void "creating a new task returns task with unique id"() { - given: - def t1 = taskRepository.create("TEST", "Test Status") - def t2 = taskRepository.create("TEST", "Test Status") - - expect: - t1.id != t2.id - } - - void "looking up a task by id returns the same task"() { - setup: - def t1 = taskRepository.create("TEST", "Test Status") - - when: - def t2 = taskRepository.get(t1.id) - - then: - t1.id == t2.id - t1.status.status == t2.status.status - t1.status.phase == t2.status.phase - t1.startTimeMs == t2.startTimeMs - t1.status.isCompleted() == t2.status.isCompleted() - t1.status.isFailed() == t2.status.isFailed() - !t1.status.isCompleted() - !t1.status.isFailed() - } - - void "complete and failed are preserved"() { - setup: - def t1 = taskRepository.create("TEST", "Test Status") - t1.fail() - - when: - def t2 = taskRepository.get(t1.id) - - then: - t2.status.isCompleted() - t2.status.isFailed() - } - - void "listing tasks returns all running tasks"() { - setup: - def t1 = taskRepository.create "TEST", "Test Status" - def t2 = taskRepository.create "TEST", "Test Status" - - when: - def list = taskRepository.list() - - then: - list*.id.containsAll([t1.id, t2.id]) - - when: - t1.complete() - - then: - !taskRepository.list()*.id.contains(t1.id) - - and: - taskRepository.list()*.id.contains(t2.id) - } - - void "Can add a result object and retrieve it"() { - setup: - JedisTask t1 = taskRepository.create "Test", "Test Status" - final TestObject s = new TestObject(name: 'blimp', value: 'bah') - - expect: - taskRepository.getResultObjects(t1).empty - - when: - taskRepository.addResultObjects([s], t1) - List resultObjects = taskRepository.getResultObjects(t1) - - then: - resultObjects.size() == 1 - resultObjects.first().name == s.name - resultObjects.first().value == s.value - - when: - taskRepository.addResultObjects([new TestObject(name: "t1", value: 'h2')], t1) - resultObjects = taskRepository.getResultObjects(t1) - - then: - resultObjects.size() == 2 - } - - void "ResultObjects are retrieved in insertion order"() { - given: - JedisTask t1 = taskRepository.create "Test", "Test Status" - 4.times { - taskRepository.addResultObjects([new TestObject(name: "Object${it}", value: 'value')], t1) - } - expect: - taskRepository.getResultObjects(t1).collect { it.name } == ['Object0', - 'Object1', - 'Object2', - 'Object3'] - } - - void "task history is correctly persisted"() { - given: - JedisTask t1 = taskRepository.create "Test", "Test Status" - def history = taskRepository.getHistory(t1) - - expect: - history.size() == 1 - - when: - taskRepository.addToHistory(new DefaultTaskStatus('Orchestration', 'started', TaskState.STARTED), t1) - history = taskRepository.getHistory(t1) - def newEntry = history[1] - - then: - history.size() == 2 - newEntry.class.simpleName == 'TaskDisplayStatus' - newEntry.phase == 'Orchestration' - newEntry.status == 'started' - - when: - 3.times { - taskRepository.addToHistory(new DefaultTaskStatus('Orchestration', "update ${it}", TaskState.STARTED), t1) - } - - then: - taskRepository.getHistory(t1).size() == 5 - } - - void "returns the previously-created object when passed the same key"() { - given: - JedisTask t1 = taskRepository.create "Test", "Test Status", "the-key" - JedisTask t2 = taskRepository.create "Test", "Test Status 2", "the-key" - JedisTask t3 = taskRepository.create "Test", "Test Status 3", "other-key" - - expect: - t1.id == t2.id - t1.id != t3.id - } - class TestObject { - String name - String value + cleanup: + embeddedRedis1.destroy() + embeddedRedis2.destroy() } } diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployAtomicOperationUnitSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployAtomicOperationUnitSpec.groovy index 3b5bfebcac7..6a76b3e5521 100644 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployAtomicOperationUnitSpec.groovy +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DeployAtomicOperationUnitSpec.groovy @@ -39,6 +39,7 @@ class DeployAtomicOperationUnitSpec extends Specification { } and: + def deploymentResult = Mock(DeploymentResult) def deployHandlerRegistry = Mock(DeployHandlerRegistry) def testDeployHandler = Mock(DeployHandler) def deployAtomicOperation = new DeployAtomicOperation(deployDescription) @@ -49,7 +50,8 @@ class DeployAtomicOperationUnitSpec extends Specification { then: 1 * deployHandlerRegistry.findHandler(_) >> testDeployHandler - 1 * testDeployHandler.handle(_, _) >> { Mock(DeploymentResult) } + 1 * testDeployHandler.handle(_, _) >> { deploymentResult } + 1 * deploymentResult.normalize() >> { return deploymentResult } deployAtomicOperation.getEvents() == [ createServerGroupEvent ] } diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DeploymentResultSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DeploymentResultSpec.groovy new file mode 100644 index 00000000000..9a372142bfe --- /dev/null +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DeploymentResultSpec.groovy @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.deploy + +import spock.lang.Specification; + +class DeploymentResultSpec extends Specification { + + def "should no-op if `deployments` is non-null"() { + given: + def deploymentResult = new DeploymentResult( + deployments: [new DeploymentResult.Deployment()], + serverGroupNameByRegion: ["us-east-1": "app-v001"] + ) + + expect: + deploymentResult.normalize().deployments.size() == 1 + } + + def "should normalize `serverGroupNameByRegion` and `deployedNamesByLocation`"() { + given: + def deploymentResult = new DeploymentResult( + serverGroupNameByRegion: ["us-east-1": "app-v001"], + deployedNamesByLocation: ["us-west-2": ["app-v002", "app-v003"]] + ) + + when: + deploymentResult.normalize() + + then: + (deploymentResult.deployments as List) == [ + new DeploymentResult.Deployment(location: "us-east-1", serverGroupName: "app-v001"), + new DeploymentResult.Deployment(location: "us-west-2", serverGroupName: "app-v002"), + new DeploymentResult.Deployment(location: "us-west-2", serverGroupName: "app-v003") + ] + } +} diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidatorSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidatorSpec.groovy deleted file mode 100644 index ceba214cb3a..00000000000 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/deploy/DescriptionValidatorSpec.groovy +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.deploy - -import com.netflix.spinnaker.clouddriver.security.resources.AccountNameable -import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable -import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable -import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator -import org.springframework.security.authentication.TestingAuthenticationToken -import org.springframework.security.core.Authentication -import org.springframework.security.core.context.SecurityContext -import org.springframework.security.core.context.SecurityContextHolder -import org.springframework.validation.Errors -import spock.lang.Specification -import spock.lang.Subject - -class DescriptionValidatorSpec extends Specification { - - def "should authorize passed description"() { - setup: - FiatPermissionEvaluator evaluator = Mock(FiatPermissionEvaluator) - - Authentication auth = new TestingAuthenticationToken(null, null) - SecurityContext ctx = SecurityContextHolder.createEmptyContext() - ctx.setAuthentication(auth) - SecurityContextHolder.setContext(ctx) - - @Subject - TestValidator validator = new TestValidator(permissionEvaluator: evaluator) - - TestDescription description = new TestDescription(account: "testAccount", - application: "testApplication", - names: ["thing1", "thing2"]) - Errors errors = new DescriptionValidationErrors(description) - - when: - validator.authorize(description, errors) - - then: - 4 * evaluator.hasPermission(*_) >> false - 1 * evaluator.storeWholePermission() - errors.allErrors.size() == 4 - } - - class TestValidator extends DescriptionValidator { - - @Override - void validate(List priorDescriptions, TestDescription description, Errors errors) { - } - } - - class TestDescription implements AccountNameable, ApplicationNameable, ResourcesNameable { - String account - String application - List names - } -} diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/helpers/AbstractServerGroupNameResolverSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/helpers/AbstractServerGroupNameResolverSpec.groovy index 383c9772e39..1df8ec9c12e 100644 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/helpers/AbstractServerGroupNameResolverSpec.groovy +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/helpers/AbstractServerGroupNameResolverSpec.groovy @@ -155,7 +155,7 @@ class AbstractServerGroupNameResolverSpec extends Specification { then: IllegalArgumentException e = thrown() - e.message == "(Use alphanumeric characters only)" + e.message == /Invalid detail east!, detail may only contain -a-zA-Z0-9._~\^/ } void "application, stack, and freeform details make up the asg name"() { diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/model/SimpleInstance.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/model/SimpleInstance.groovy new file mode 100644 index 00000000000..cb7ca4ca314 --- /dev/null +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/model/SimpleInstance.groovy @@ -0,0 +1,28 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.model + +class SimpleInstance implements Instance { + String name = UUID.randomUUID().toString() + HealthState healthState + Long launchTime + String zone + List> health + String providerType + String cloudProvider +} diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/model/SimpleServerGroup.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/model/SimpleServerGroup.groovy new file mode 100644 index 00000000000..266529baf44 --- /dev/null +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/model/SimpleServerGroup.groovy @@ -0,0 +1,40 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.model + +class SimpleServerGroup implements ServerGroup { + String name + String type + String cloudProvider + String region + Boolean disabled + Long createdTime + Set zones + Set instances + Set loadBalancers + Set securityGroups + Map launchConfig + InstanceCounts instanceCounts + Capacity capacity + ImagesSummary imagesSummary + ImageSummary imageSummary + + Boolean isDisabled() { + disabled + } +} diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/AnnotationsBasedAtomicOperationsRegistrySpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/AnnotationsBasedAtomicOperationsRegistrySpec.groovy index a0e7a1ce2a1..7b056249c37 100644 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/AnnotationsBasedAtomicOperationsRegistrySpec.groovy +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/AnnotationsBasedAtomicOperationsRegistrySpec.groovy @@ -18,12 +18,12 @@ package com.netflix.spinnaker.clouddriver.orchestration import com.netflix.spinnaker.clouddriver.core.CloudProvider import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.security.ProviderVersion +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.orchestration.OperationDescription import org.springframework.beans.factory.NoSuchBeanDefinitionException import org.springframework.context.annotation.AnnotationConfigApplicationContext import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration -import org.springframework.validation.Errors import spock.lang.Specification import spock.lang.Subject @@ -39,7 +39,7 @@ class AnnotationsBasedAtomicOperationsRegistrySpec extends Specification { void 'annotations based registry should return the converter if the specified name matches the component name'() { when: - def converter = atomicOperationsRegistry.getAtomicOperationConverter('operationOldDescription', null, ProviderVersion.v1) + def converter = atomicOperationsRegistry.getAtomicOperationConverter('operationOldDescription', null) then: noExceptionThrown() @@ -47,19 +47,9 @@ class AnnotationsBasedAtomicOperationsRegistrySpec extends Specification { converter instanceof TestConverter } - void 'annotations based registry should fail to return the converter if the specified name matches the component name but an incorrect version is used'() { + void 'annotations based registry should return the converter that matches the AtomicOperationDescription name and cloud provider'() { when: - def converter = atomicOperationsRegistry.getAtomicOperationConverter('operationOldDescription', null, ProviderVersion.v2) - - then: - thrown(AtomicOperationConverterNotFoundException) - converter == null - } - - - void 'annotations based registry should return the converter that matches the AtomicOperationDescription name and cloud provider and default version'() { - when: - def converter = atomicOperationsRegistry.getAtomicOperationConverter('operationDescription', 'test-provider', ProviderVersion.v1) + def converter = atomicOperationsRegistry.getAtomicOperationConverter('operationDescription', 'test-provider') then: noExceptionThrown() @@ -67,19 +57,9 @@ class AnnotationsBasedAtomicOperationsRegistrySpec extends Specification { converter instanceof TestConverter } - void 'annotations based registry should return the converter that matches the AtomicOperationDescription name and cloud provider and version'() { - when: - def converter = atomicOperationsRegistry.getAtomicOperationConverter('operationDescription', 'test-provider', ProviderVersion.v2) - - then: - noExceptionThrown() - converter != null - converter instanceof TestConverterV2 - } - void 'annotations based registry should throw a NoSuchBeanDefinitionException if no converter found for given name with no cloud provider specified'() { when: - def converter = atomicOperationsRegistry.getAtomicOperationConverter('foo', null, ProviderVersion.v1) + def converter = atomicOperationsRegistry.getAtomicOperationConverter('foo', null) then: thrown(NoSuchBeanDefinitionException) @@ -88,74 +68,70 @@ class AnnotationsBasedAtomicOperationsRegistrySpec extends Specification { void 'annotations based registry should throw an AtomicOperationConverterNotFoundException if no converter found for given name with cloud provider specified'() { when: - def converter = atomicOperationsRegistry.getAtomicOperationConverter('foo', 'test-provider', ProviderVersion.v1) + def converter = atomicOperationsRegistry.getAtomicOperationConverter('foo', 'test-provider') then: - thrown(AtomicOperationConverterNotFoundException) + def e = thrown(AtomicOperationConverterNotFoundException) + e.message != null converter == null } - void 'uncallable converter should reject any version'() { - when: - def converter = atomicOperationsRegistry.getAtomicOperationConverter('noOperationDescription', 'test-provider', ProviderVersion.v1) - - then: - thrown(AtomicOperationConverterNotFoundException) - converter == null - + void 'annotations based registry should return the validator if the specified name matches the component name'() { when: - converter = atomicOperationsRegistry.getAtomicOperationConverter('noOperationDescription', 'test-provider', ProviderVersion.v2) + def compositeValidator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('operationOldDescriptionValidator', null) then: - thrown(AtomicOperationConverterNotFoundException) - converter == null + noExceptionThrown() + compositeValidator != null + compositeValidator instanceof CompositeDescriptionValidator + compositeValidator.getValidator() instanceof TestValidator } - void 'annotations based registry should return the validator if the specified name matches the component name'() { + void 'annotations based registry should return a composite validator that contains the validator that matches the AtomicOperationDescription name and cloud provider'() { when: - def validator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('operationOldDescriptionValidator', null, ProviderVersion.v1) + def compositeValidator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('operationDescriptionValidator', 'test-provider') then: noExceptionThrown() - validator != null - validator instanceof TestValidator + compositeValidator != null + compositeValidator instanceof CompositeDescriptionValidator + compositeValidator.getValidator() instanceof TestValidator } - void 'annotations based registry should return the validator that matches the AtomicOperationDescription name and cloud provider'() { + void 'annotations based registry should return a null if no validator found for given name with no cloud provider specified'() { when: - def validator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('operationDescriptionValidator', 'test-provider', ProviderVersion.v1) + def validator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('foo', null) then: noExceptionThrown() - validator != null - validator instanceof TestValidator + validator == null } - void 'annotations based registry should return a null if no validator found for given name with no cloud provider specified'() { + void 'annotations based registry should return a composite validator with a null validator if no validator found for given name with cloud provider specified'() { when: - def validator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('foo', null, ProviderVersion.v1) + def compositeValidator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('foo', 'test-provider') then: noExceptionThrown() - validator == null + compositeValidator != null + compositeValidator instanceof CompositeDescriptionValidator + compositeValidator.getValidator() == null } - void 'annotations based registry should return a null if no validator found for given name with no version specified'() { + void 'should return matching converter based on description version'() { when: - def validator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('foo', 'test-provider', null) + def converter = atomicOperationsRegistry.getAtomicOperationConverter('versionedDescription', 'test-provider') then: noExceptionThrown() - validator == null - } + converter instanceof VersionedDescriptionConverter - void 'annotations based registry should return a null if no validator found for given name with cloud provider specified'() { when: - def validator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator('foo', 'test-provider', ProviderVersion.v1) + converter = atomicOperationsRegistry.getAtomicOperationConverter('versionedDescription@v2', 'test-provider') then: noExceptionThrown() - validator == null + converter instanceof VersionedDescriptionV2Converter } static class MyCloudProvider implements CloudProvider { @@ -171,24 +147,19 @@ class AnnotationsBasedAtomicOperationsRegistrySpec extends Specification { new TestConverter() } - @Bean(name = "operationOldDescriptionV2") - AtomicOperationConverter testConverterV2() { - new TestConverterV2() - } - - @Bean(name = "uncallableOperation") - AtomicOperationConverter testNothingConverter() { - new TestNothingConverter() - } - @Bean(name = "operationOldDescriptionValidator") DescriptionValidator descriptionValidator() { new TestValidator() } - @Bean(name = "operationOldDescriptionValidatorV2") - DescriptionValidator descriptionValidatorV2() { - new TestValidatorV2() + @Bean(name = "versionedConverterV1") + AtomicOperationConverter versionedConverter() { + new VersionedDescriptionConverter() + } + + @Bean(name = "versionedConverterV2") + AtomicOperationConverter versionedConverterV2() { + new VersionedDescriptionV2Converter() } } @@ -199,7 +170,7 @@ class AnnotationsBasedAtomicOperationsRegistrySpec extends Specification { return null } @Override - Object convertDescription(Map input) { + OperationDescription convertDescription(Map input) { return null } } @@ -207,50 +178,41 @@ class AnnotationsBasedAtomicOperationsRegistrySpec extends Specification { @TestProviderOperation("operationDescription") static class TestValidator extends DescriptionValidator { @Override - void validate(List priorDescriptions, Object description, Errors errors) { + void validate(List priorDescriptions, Object description, ValidationErrors errors) { } } - @TestProviderOperation("operationDescription") - static class TestConverterV2 implements AtomicOperationConverter { + @TestProviderOperation("versionedDescription") + static class VersionedDescriptionConverter implements AtomicOperationConverter { @Override AtomicOperation convertOperation(Map input) { return null } @Override - Object convertDescription(Map input) { + OperationDescription convertDescription(Map input) { return null } @Override - boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2 + boolean acceptsVersion(String version) { + return version == null || version == "v1" } } - @TestProviderOperation("operationDescription") - static class TestValidatorV2 extends DescriptionValidator { - @Override - void validate(List priorDescriptions, Object description, Errors errors) { - } - @Override - boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2 - } - } - - @TestProviderOperation("noOperationDescription") - static class TestNothingConverter implements AtomicOperationConverter { + @TestProviderOperation("versionedDescription@v2") + static class VersionedDescriptionV2Converter implements AtomicOperationConverter { @Override AtomicOperation convertOperation(Map input) { return null } @Override - Object convertDescription(Map input) { + OperationDescription convertDescription(Map input) { return null } @Override - boolean acceptsVersion(ProviderVersion version) { - return false + boolean acceptsVersion(String version) { + return version == "v2" } } + + } diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/DefaultOrchestrationProcessorSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/DefaultOrchestrationProcessorSpec.groovy index 4ec2a55a137..8b8000e1999 100644 --- a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/DefaultOrchestrationProcessorSpec.groovy +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/DefaultOrchestrationProcessorSpec.groovy @@ -16,9 +16,16 @@ package com.netflix.spinnaker.clouddriver.orchestration -import com.netflix.spectator.api.Spectator +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.config.ExceptionClassifierConfigurationProperties import com.netflix.spinnaker.clouddriver.data.task.DefaultTask +import com.netflix.spinnaker.clouddriver.data.task.SagaId import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.web.context.AuthenticatedRequestContextProvider +import com.netflix.spinnaker.kork.web.exceptions.ExceptionMessageDecorator +import com.netflix.spinnaker.kork.web.exceptions.ExceptionSummaryService import com.netflix.spinnaker.security.AuthenticatedRequest import org.slf4j.MDC import org.springframework.beans.factory.config.AutowireCapableBeanFactory @@ -26,6 +33,7 @@ import org.springframework.context.ApplicationContext import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject +import spock.lang.Unroll import java.util.concurrent.TimeUnit @@ -39,17 +47,35 @@ class DefaultOrchestrationProcessorSpec extends Specification { TaskRepository taskRepository + DynamicConfigService dynamicConfigService + + ExceptionSummaryService exceptionSummaryService + String taskKey + private AuthenticatedRequestContextProvider contextProvider def setup() { taskKey = UUID.randomUUID().toString() - processor = new DefaultOrchestrationProcessor() + + taskRepository = Mock(TaskRepository) applicationContext = Mock(ApplicationContext) applicationContext.getAutowireCapableBeanFactory() >> Mock(AutowireCapableBeanFactory) - taskRepository = Mock(TaskRepository) - processor.applicationContext = applicationContext - processor.taskRepository = taskRepository - processor.registry = Spectator.globalRegistry() + dynamicConfigService = Mock(DynamicConfigService) + contextProvider = new AuthenticatedRequestContextProvider() + exceptionSummaryService = new ExceptionSummaryService(Mock(ExceptionMessageDecorator)) + + processor = new DefaultOrchestrationProcessor( + taskRepository, + applicationContext, + new NoopRegistry(), + Optional.empty(), + new ObjectMapper(), + new ExceptionClassifier(new ExceptionClassifierConfigurationProperties( + retryableClasses: [RetryableException.class.getName()] + ), dynamicConfigService), + contextProvider, + exceptionSummaryService + ) } void "complete the task when everything goes as planned"() { @@ -66,9 +92,18 @@ class DefaultOrchestrationProcessorSpec extends Specification { !task.status.isFailed() } - void "fail the task when exception is thrown"() { + @Unroll + void "fail the task when exception is thrown (#exception.class.simpleName, #sagaId)"() { setup: + dynamicConfigService.getConfig( + String.class, + "clouddriver.exception-classifier.retryable-exceptions", + 'com.netflix.spinnaker.clouddriver.orchestration.DefaultOrchestrationProcessorSpec$RetryableException' + ) >> { 'com.netflix.spinnaker.clouddriver.orchestration.DefaultOrchestrationProcessorSpec$SomeDynamicException,com.netflix.spinnaker.clouddriver.orchestration.DefaultOrchestrationProcessorSpec$AnotherDynamicException' } def task = new DefaultTask("1") + if (sagaId) { + task.sagaIdentifiers.add(sagaId) + } def atomicOperation = Mock(AtomicOperation) when: @@ -76,8 +111,20 @@ class DefaultOrchestrationProcessorSpec extends Specification { then: 1 * taskRepository.create(_, _, taskKey) >> task - 1 * atomicOperation.operate(_) >> { throw new RuntimeException() } + 1 * atomicOperation.operate(_) >> { throw exception } task.status.isFailed() + task.status.isRetryable() == retryable + + //Tasks without SagaIds (i.e., not a saga) are not retryable + where: + exception | sagaId || retryable + new RuntimeException() | null || false + new RetryableException() | null || false + new RuntimeException() | new SagaId("a", "a") || false + new NonRetryableException() | new SagaId("a", "a") || false + new RetryableException() | new SagaId("a", "a") || true + new SomeDynamicException() | new SagaId("a", "a") || true + new AnotherDynamicException() | new SagaId("a", "a") || true } void "failure should be logged in the result objects"() { @@ -114,22 +161,28 @@ class DefaultOrchestrationProcessorSpec extends Specification { void "should clear MDC thread local"() { given: + def context = contextProvider.get() MDC.put("myKey", "myValue") - MDC.put(AuthenticatedRequest.SPINNAKER_ACCOUNTS, "myAccounts") - MDC.put(AuthenticatedRequest.SPINNAKER_USER, "myUser") + context.setAccounts("myAccounts") + context.setUser( "myUser") when: - DefaultOrchestrationProcessor.resetMDC() + processor.clearRequestContext() then: MDC.get("myKey") == "myValue" - MDC.get(AuthenticatedRequest.SPINNAKER_ACCOUNTS) == null - MDC.get(AuthenticatedRequest.SPINNAKER_USER) == null + !context.getAccounts().isPresent() + !context.getUser().isPresent() } private void submitAndWait(AtomicOperation atomicOp) { - processor.process([atomicOp], taskKey) + processor.process("cloudProvider", [atomicOp], taskKey) processor.executorService.shutdown() processor.executorService.awaitTermination(5, TimeUnit.SECONDS) } + + private static class NonRetryableException extends RuntimeException {} + private static class RetryableException extends RuntimeException {} + private static class SomeDynamicException extends RuntimeException {} + private static class AnotherDynamicException extends RuntimeException {} } diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/OperationsServiceSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/OperationsServiceSpec.groovy new file mode 100644 index 00000000000..25ec7b91d2d --- /dev/null +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/orchestration/OperationsServiceSpec.groovy @@ -0,0 +1,173 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.orchestration + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription +import com.netflix.spinnaker.clouddriver.deploy.DefaultDescriptionAuthorizer +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.kork.web.exceptions.ExceptionMessageDecorator +import com.netflix.spinnaker.orchestration.OperationDescription +import org.springframework.context.annotation.AnnotationConfigApplicationContext +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class OperationsServiceSpec extends Specification { + + DefaultDescriptionAuthorizer descriptionAuthorizer = Mock(DefaultDescriptionAuthorizer) + ExceptionMessageDecorator exceptionMessageDecorator = Mock(ExceptionMessageDecorator) + + @Subject + OperationsService operationsService = new OperationsService( + new AnnotationsBasedAtomicOperationsRegistry( + applicationContext: new AnnotationConfigApplicationContext(TestConfig), + cloudProviders: [] + ), + [descriptionAuthorizer], + Optional.empty(), + Optional.empty(), + Mock(AccountCredentialsRepository), + Optional.of(Mock(SagaRepository)), + new NoopRegistry(), + new ObjectMapper(), + exceptionMessageDecorator + ) + + void "many operation descriptions are resolved and returned in order"() { + when: + def atomicOperations = operationsService.collectAtomicOperations([[desc1: [:], desc2: [:]]]) + + then: + atomicOperations.flatten()*.getClass() == [Op1, Op2] + } + + @Unroll + void "should only pre-process inputs of supported description classes"() { + when: + def output = operationsService.processDescriptionInput( + descriptionPreProcessors as Collection, + converter, + descriptionInput + ) + + then: + output == expectedOutput + + where: + descriptionPreProcessors | converter | descriptionInput || expectedOutput + [] | new Provider2DeployAtomicOperationConverter() | ["a": "b"] || ["a": "b"] + [provider1PreProcessor] | new Provider2DeployAtomicOperationConverter() | ["a": "b"] || ["a": "b"] + [provider1PreProcessor, provider2PreProcessor] | new Provider2DeployAtomicOperationConverter() | ["provider2": "false"] || ["additionalKey": "additionalVal", "provider2": "true"] + } + + @Shared + def provider1PreProcessor = new AtomicOperationDescriptionPreProcessor() { + @Override + boolean supports(Class descriptionClass) { + return descriptionClass == Provider1DeployDescription + } + + @Override + Map process(Map description) { + return ["provider1": "true"] + } + } + + @Shared + def provider2PreProcessor = new AtomicOperationDescriptionPreProcessor() { + @Override + boolean supports(Class descriptionClass) { + return descriptionClass == Provider2DeployDescription + } + + @Override + Map process(Map description) { + return new HashMap(description) + [ + "additionalKey": "additionalVal", + "provider2" : "true" + ] + } + } + + @Configuration + static class TestConfig { + @Bean + Converter1 desc1() { + new Converter1() + } + + @Bean + Converter2 desc2() { + new Converter2() + } + } + + private static class Provider1DeployDescription implements DeployDescription { + } + + private static class Provider2DeployDescription implements DeployDescription { + } + + private static class Provider2DeployAtomicOperationConverter implements AtomicOperationConverter { + @Override + AtomicOperation convertOperation(Map input) { + throw new UnsupportedOperationException() + } + + Provider2DeployDescription convertDescription(Map input) { + return new ObjectMapper().convertValue(input, Provider2DeployDescription) + } + } + + + static class Converter1 implements AtomicOperationConverter { + AtomicOperation convertOperation(Map input) { + new Op1() + } + + OperationDescription convertDescription(Map input) { + return null + } + } + + static class Converter2 implements AtomicOperationConverter { + AtomicOperation convertOperation(Map input) { + new Op2() + } + + OperationDescription convertDescription(Map input) { + return null + } + } + + static class Op1 implements AtomicOperation { + Object operate(List priorOutputs) { + return null + } + } + + static class Op2 implements AtomicOperation { + Object operate(List priorOutputs) { + return null + } + } +} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributorSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributorSpec.groovy similarity index 100% rename from clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributorSpec.groovy rename to clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/RequestDistributorSpec.groovy diff --git a/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/safety/TrafficGuardSpec.groovy b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/safety/TrafficGuardSpec.groovy new file mode 100644 index 00000000000..0d4eeed388b --- /dev/null +++ b/clouddriver-core/src/test/groovy/com/netflix/spinnaker/clouddriver/safety/TrafficGuardSpec.groovy @@ -0,0 +1,616 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.safety + +import com.netflix.frigga.Names +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.core.services.Front50Service +import com.netflix.spinnaker.clouddriver.exceptions.TrafficGuardException +import com.netflix.spinnaker.clouddriver.model.Cluster +import com.netflix.spinnaker.clouddriver.model.ClusterProvider +import com.netflix.spinnaker.clouddriver.model.HealthState +import com.netflix.spinnaker.clouddriver.model.ServerGroup +import com.netflix.spinnaker.clouddriver.model.SimpleInstance +import com.netflix.spinnaker.clouddriver.model.SimpleServerGroup +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.moniker.Moniker +import retrofit.RetrofitError +import retrofit.client.Response +import spock.lang.Ignore +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class TrafficGuardSpec extends Specification { + + ClusterProvider clusterProvider = Mock() { + getCloudProviderId() >> "aws" + } + + Front50Service front50Service = Mock(Front50Service) + Registry registry = new NoopRegistry() + DynamicConfigService dynamicConfigService = Mock(DynamicConfigService) + + @Shared String location = "us-east-1" + @Shared Moniker moniker = new Moniker(app: "app", stack: "foo", cluster: "app-foo", sequence: 1) + @Shared String targetName = "app-foo-v001" + @Shared String otherName = "app-foo-v000" + @Shared Map application = [:] + + @Subject + TrafficGuard trafficGuard = new TrafficGuard( + Collections.singletonList(clusterProvider), + Optional.of(front50Service), + registry, + dynamicConfigService + ) + + void setup() { + application.clear() + } + + def makeServerGroup(String name, int up, int down = 0, Map overrides = [:]) { + Set instances = [] + + if (up > 0) { + instances.addAll((1..up).collect { new SimpleInstance(healthState: HealthState.Up) }) + } + if (down > 0) { + instances.addAll((1..down).collect { new SimpleInstance(healthState: HealthState.OutOfService )}) + } + + ServerGroup serverGroup = new SimpleServerGroup([ + region : 'us-east-1', + name : name, + disabled : false, + instances: instances, + capacity : new ServerGroup.Capacity(min: 0, max: 4, desired: 3) + ] + overrides) + return serverGroup + } + + Cluster makeCluster(List serverGroups) { + return new Cluster.SimpleCluster( + serverGroups: serverGroups + ) + } + + void "should ignore disabled traffic guards"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo", enabled: false]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 1), + makeServerGroup(otherName, 0, 1, [disabled: true]) + ]) + 1 * front50Service.getApplication("app") >> application + } + + void "should throw exception when target server group is the only one enabled in cluster"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + def e = thrown(TrafficGuardException) + e.message.startsWith("This cluster ('app-foo' in test/us-east-1) has traffic guards enabled.") + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 1), + makeServerGroup(otherName, 0, 1, [disabled: true]) + ]) + } + + void "should throw exception when target server group can not be found in cluster"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval("app-foo-v999", "test", location, "aws", "x") + + then: + def e = thrown(TrafficGuardException) + e.message.startsWith("Could not find server group 'app-foo-v999'") + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 1), + makeServerGroup(otherName, 0, 1, [disabled: true]) + ]) + } + + void "should throw exception when capacity ratio less than configured minimum"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + thrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 2), + makeServerGroup(otherName, 1) + ]) + + // configure a minimum desired ratio of 40%, which means going from 3 to 1 instances (33%) is not ok + 1 * dynamicConfigService.getConfig(Double.class, TrafficGuard.MIN_CAPACITY_RATIO, 0d) >> 0.4d + } + + void "should not throw exception when capacity ratio more than configured minimum"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + notThrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 2), + makeServerGroup(otherName, 1) + ]) + + // configure a minimum desired ratio of 25%, which means going from 3 to 1 instances (33%) is ok + 1 * dynamicConfigService.getConfig(Double.class, TrafficGuard.MIN_CAPACITY_RATIO, 0d) >> 0.25d + } + + void "should throw exception when disabling multiple server groups leads to reduced capacity"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + // simulate the case where we have a main server group with 100 instances and a debugging one with 1 instance + // then a red/black operation can lead to the newest (small) one being cloned and everything else disabled + List serverGroupsGoingAway = + [makeServerGroup("app-foo-v000", 100), + makeServerGroup("app-foo-v001", 1)] + + when: + trafficGuard.verifyTrafficRemoval( + serverGroupsGoingAway, + serverGroupsGoingAway + [makeServerGroup("app-foo-v002", 1)], + "test", "x") + + then: + thrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * dynamicConfigService.getConfig(Double.class, TrafficGuard.MIN_CAPACITY_RATIO, 0d) >> 0.40d + } + + void "should bypass capacity check for pinned server groups"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + List serverGroupsGoingAway = + [makeServerGroup("app-foo-v000", 3, 0, [capacity: new ServerGroup.Capacity(min: 3, max: 3, desired: 3)]), + makeServerGroup("app-foo-v001", 3, 0, [capacity: new ServerGroup.Capacity(min: 3, max: 3, desired: 3)])] + + when: + trafficGuard.verifyTrafficRemoval( + serverGroupsGoingAway, + serverGroupsGoingAway + [makeServerGroup("app-foo-v002", 1)], + "test", "x") + + then: + notThrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + } + + void "should still make sure that capacity does not drop to 0 for pinned server groups"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + List serverGroupsGoingAway = + [makeServerGroup("app-foo-v000", 3, 0, [capacity: new ServerGroup.Capacity(min: 3, max: 3, desired: 3)]), + makeServerGroup("app-foo-v001", 3, 0, [capacity: new ServerGroup.Capacity(min: 3, max: 3, desired: 3)])] + + when: + trafficGuard.verifyTrafficRemoval( + serverGroupsGoingAway, + serverGroupsGoingAway + [makeServerGroup("app-foo-v002", 0)], + "test", "x") + + then: + def e = thrown(TrafficGuardException) + e.message.contains("would leave the cluster with no instances up") + 1 * front50Service.getApplication("app") >> application + } + + @Unroll + def "should still apply capacity check when pinned server groups don't qualify"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval( + serverGroupsGoingAway, + serverGroupsGoingAway + [makeServerGroup("app-foo-v002", 1)], + "test", "x") + + then: + def e = thrown(TrafficGuardException) + e.message.contains("would leave the cluster with 1 instance up") + 1 * front50Service.getApplication("app") >> application + 1 * dynamicConfigService.getConfig(Double.class, TrafficGuard.MIN_CAPACITY_RATIO, 0d) >> 0.4d + + where: + serverGroupsGoingAway << [ + // only one pinned server group going away + [makeServerGroup("app-foo-v000", 100, 0, [capacity: new ServerGroup.Capacity(min: 100, max: 100, desired: 100)])], + + // only some of the server groups going away are pinned + [makeServerGroup("app-foo-v000", 10, 0, [capacity: new ServerGroup.Capacity(min: 10, max: 10, desired: 10)]), + makeServerGroup("app-foo-v001", 10, 0, [capacity: new ServerGroup.Capacity(min: 10, max: 100, desired: 10)])], + + // the pinned server groups have different sizes + [makeServerGroup("app-foo-v000", 10, 0, [capacity: new ServerGroup.Capacity(min: 1, max: 1, desired: 1)]), + makeServerGroup("app-foo-v001", 10, 0, [capacity: new ServerGroup.Capacity(min: 100, max: 100, desired: 100)])] + ] + } + + void "should not throw exception during a regular shrink/disable cluster-wide operation"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + // simulate the case where we have a main server group with 100 instances and a debugging one with 1 instance + // then a red/black operation can lead to the newest (small) one being cloned and everything else disabled + List serverGroupsGoingAway = + [makeServerGroup("app-foo-v000", 0, 100), + makeServerGroup("app-foo-v001", 100, 0)] + + when: + trafficGuard.verifyTrafficRemoval( + serverGroupsGoingAway, + serverGroupsGoingAway + [makeServerGroup("app-foo-v002", 100)], + "test", "x") + + then: + notThrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * dynamicConfigService.getConfig(Double.class, TrafficGuard.MIN_CAPACITY_RATIO, 0d) >> 0.40d + } + + void "should be able to destroy multiple empty or disabled server groups as one operation"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + List serverGroupsGoingAway = + [makeServerGroup("app-foo-v000", 0), + makeServerGroup("app-foo-v001", 0, 3)] + + when: + trafficGuard.verifyTrafficRemoval( + serverGroupsGoingAway, + serverGroupsGoingAway, + "test", "x") + + then: + notThrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + } + + void "should throw exception when target server group is the only one in cluster"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + thrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 1) + ]) + } + + void "should validate location when looking for other enabled server groups in cluster"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + thrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 1), + makeServerGroup(otherName, 1, 0, [region: 'us-west-1']) + ]) + } + + void "should not throw exception when cluster has no active instances"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + noExceptionThrown() + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 0, 1) + ]) + } + + void "should validate existence of cluster"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + def e = thrown(TrafficGuardException) + e.message.startsWith('Could not find cluster') + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> null + } + + void "should not throw if another server group is enabled and has instances"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + notThrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 1), + makeServerGroup(otherName, 1) + ]) + } + + void "should throw if another server group is enabled but no instances are 'Up'"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyTrafficRemoval(targetName, "test", location, "aws", "x") + + then: + thrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getCluster("app", "test", "app-foo", false) >> makeCluster([ + makeServerGroup(targetName, 1), + makeServerGroup(otherName, 0, 1) + ]) + } + + @Unroll + void "hasDisableLock should match on wildcards in stack, detail, account, location"() { + given: + addGuard([account: guardAccount, stack: guardStack, detail: guardDetail, location: guardLocation]) + + when: + boolean result = trafficGuard.hasDisableLock(new Moniker(app: cluster, cluster: cluster), account, location) + + then: + result == expected + 1 * front50Service.getApplication("app") >> application + + where: + cluster | account | guardStack | guardDetail | guardAccount | guardLocation || expected + "app" | "test" | null | null | "test" | "us-east-1" || true // exact match + "app" | "test" | "*" | null | "test" | "us-east-1" || true + "app" | "test" | null | "*" | "test" | "us-east-1" || true + "app" | "test" | null | null | "*" | "us-east-1" || true + "app" | "test" | null | null | "test" | "*" || true + "app" | "test" | "*" | "*" | "*" | "*" || true + "app" | "test" | null | null | "prod" | "us-east-1" || false // different account + "app" | "test" | null | null | "test" | "eu-west-1" || false // different location + "app" | "test" | "foo" | null | "test" | "us-east-1" || false // different stack + "app" | "test" | null | "zz" | "test" | "us-east-1" || false // different detail + } + + void "hasDisableLock returns false on missing applications"() { + when: + boolean result = trafficGuard.hasDisableLock(new Moniker(app: "app", cluster: "app"), "test", location) + + then: + result == false + 1 * front50Service.getApplication("app") >> null + } + + void "hasDisableLock returns false on applications with no guards configured"() { + when: + boolean result = trafficGuard.hasDisableLock(new Moniker(app: "app", cluster: "app"), "test", location) + + then: + !application.containsKey("trafficGuards") + result == false + 1 * front50Service.getApplication("app") >> { + throw new SpinnakerHttpException(new RetrofitError(null, null, new Response("http://stash.com", 404, "test reason", [], null), null, null, null, null)) + } + } + + void "throws exception if application retrieval throws an exception"() { + when: + Exception thrownException = new RuntimeException("bad read") + trafficGuard.hasDisableLock(new Moniker(app: "app", cluster: "app"), "test", location) + + then: + thrown(RuntimeException) + 1 * front50Service.getApplication("app") >> { + throw thrownException + } + } + + void "hasDisableLock returns false on applications with empty guards configured"() { + when: + application.put("trafficGuards", []) + boolean result = trafficGuard.hasDisableLock(new Moniker(app: "app", cluster: "app"), "test", location) + + then: + result == false + 1 * front50Service.getApplication("app") >> application + } + + @Ignore("verifyInstanceTermination has not been ported yet") + void "instance termination should fail when last healthy instance in only server group in cluster"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + def targetServerGroup = makeServerGroup(targetName, 0, 0, + [instances: [[name: "i-1", healthState: "Up"], [name: "i-2", healthState: "Down"]]]) + + when: + trafficGuard.verifyInstanceTermination(null, moniker, ["i-1"], "test", location, "aws", "x") + + then: + thrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getSearchResults("i-1", "instances", "aws") >> + [[results: [[account: "test", region: location.value, serverGroup: targetName]]]] + 1 * clusterProvider.getTargetServerGroup("test", targetName, location.value, "aws") >> (targetServerGroup) + 1 * clusterProvider.getCluster("app", "test", "app-foo") >> + [serverGroups: [targetServerGroup]] + } + + @Ignore("verifyInstanceTermination has not been ported yet") + void "instance termination should fail when last healthy instance in only active server group in cluster"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + def targetServerGroup = makeServerGroup(targetName, 0, 0, [instances: [[name: "i-1", healthState: "Up"], [name: "i-2", healthState: "Down"]]]) + + when: + trafficGuard.verifyInstanceTermination(null, friggaToMoniker(null), ["i-1"], "test", location, "aws", "x") + + then: + thrown(TrafficGuardException) + 1 * front50Service.get("app") >> application + 1 * clusterProvider.getSearchResults("i-1", "instances", "aws") >> + [[results: [[account: "test", region: location.value, serverGroup: targetName]]]] + 1 * clusterProvider.getTargetServerGroup("test", targetName, location.value, "aws") >> (targetServerGroup) + 1 * clusterProvider.getCluster("app", "test", "app-foo") >> [ + serverGroups: [ + targetServerGroup, + makeServerGroup(otherName, 0, 1) + ] + ] + } + + @Ignore("verifyInstanceTermination has not been ported yet") + void "instance termination should succeed when other server group in cluster contains healthy instance"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + def targetServerGroup = makeServerGroup(targetName, 0, 0, [instances: [[name: "i-1", healthState: "Up"], [name: "i-2", healthState: "Down"]]]) + + when: + trafficGuard.verifyInstanceTermination(null, moniker, ["i-1"], "test", location, "aws", "x") + + then: + notThrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getSearchResults("i-1", "instances", "aws") >> + [[results: [[account: "test", region: location.value, serverGroup: targetName]]]] + 1 * clusterProvider.getTargetServerGroup("test", targetName, location.value, "aws") >> (targetServerGroup) + 1 * clusterProvider.getCluster("app", "test", "app-foo") >> [ + serverGroups: [ + targetServerGroup, + makeServerGroup(otherName, 1, 0) + ] + ] + } + + @Ignore("verifyInstanceTermination has not been ported yet") + void "instance termination should fail when trying to terminate all up instances in the cluster"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + def targetServerGroup = makeServerGroup(targetName, 0, 0, [instances: [[name: "i-1", healthState: "Up"], [name: "i-2", healthState: "Up"]]]) + + when: + trafficGuard.verifyInstanceTermination(null, moniker, ["i-1", "i-2"], "test", location, "aws", "x") + + then: + thrown(TrafficGuardException) + 1 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getSearchResults("i-1", "instances", "aws") >> [[results: [[account: "test", region: location.value, serverGroup: targetName]]]] + 1 * clusterProvider.getSearchResults("i-2", "instances", "aws") >> [[results: [[account: "test", region: location.value, serverGroup: targetName]]]] + 1 * clusterProvider.getTargetServerGroup("test", targetName, location.value, "aws") >> (targetServerGroup) + 1 * clusterProvider.getCluster("app", "test", "app-foo") >> [ + serverGroups: [ + targetServerGroup, + makeServerGroup(otherName, 0, 1) + ] + ] + } + + @Ignore("verifyInstanceTermination has not been ported yet") + void "instance termination should succeed when instance is not up, regardless of other instances"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyInstanceTermination(null, moniker, ["i-1"], "test", location, "aws", "x") + + then: + notThrown(TrafficGuardException) + 0 * front50Service.get("app") >> application + 1 * clusterProvider.getSearchResults("i-1", "instances", "aws") >> + [[results: [[account: "test", region: location.value, serverGroup: targetName]]]] + 1 * clusterProvider.getTargetServerGroup("test", targetName, location.value, "aws") >> + (makeServerGroup(targetName, 0, 0, [instances: [[name: "i-1"]]])) + 0 * _ + } + + @Ignore("verifyInstanceTermination has not been ported yet") + void "should avoid searching for instance ids when server group provided"() { + given: + addGuard([account: "test", location: "us-east-1", stack: "foo"]) + + when: + trafficGuard.verifyInstanceTermination(targetName, moniker, ["i-1"], "test", location, "aws", "x") + + then: + notThrown(TrafficGuardException) + + // passes with no front50 check because the instance does not have healthState: Up + 0 * front50Service.getApplication("app") >> application + 1 * clusterProvider.getTargetServerGroup("test", targetName, location.value, "aws") >> + (makeServerGroup(targetName, 0, 0, [instances: [[name: "i-1"]]])) + 0 * _ + } + + private void addGuard(Map guard) { + if (!guard.containsKey("enabled")) { + guard.enabled = true + } + application.putIfAbsent("trafficGuards", []) + application.get("trafficGuards") << guard + } + + private static Moniker friggaToMoniker(String friggaName) { + Names names = Names.parseName(friggaName); + return Moniker.builder() + .app(names.getApp()) + .stack(names.getStack()) + .detail(names.getDetail()) + .cluster(names.getCluster()) + .sequence(names.getSequence()) + .build(); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/CatsSearchProviderTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/CatsSearchProviderTest.java new file mode 100644 index 00000000000..fa1edcb47fe --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/CatsSearchProviderTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2021 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import org.junit.jupiter.api.Test; +import org.springframework.boot.context.annotation.UserConfigurations; +import org.springframework.boot.test.context.runner.ApplicationContextRunner; +import org.springframework.context.annotation.Bean; + +public class CatsSearchProviderTest { + private final ApplicationContextRunner runner = + new ApplicationContextRunner() + .withPropertyValues("caching.write-enabled=false", "redis.enabled:false") + .withConfiguration(UserConfigurations.of(CacheConfig.class, TestConfiguration.class)); + + @Test + void testCatsSearchProviderBeanIsPresentByDefault() { + runner.run(ctx -> assertThat(ctx).hasSingleBean(CatsSearchProvider.class)); + } + + @Test + void testCatsSearchProviderBeanIsPresentWhenConfiguredInSuchAWay() { + runner + .withPropertyValues("caching.search.enabled=true") + .run(ctx -> assertThat(ctx).hasSingleBean(CatsSearchProvider.class)); + } + + @Test + void testCatsSearchProviderBeanIsNotPresentWhenConfiguredInSuchAWay() { + runner + .withPropertyValues("caching.search.enabled=false") + .run(ctx -> assertThat(ctx).doesNotHaveBean(CatsSearchProvider.class)); + } + + /** + * test class that supplies the minimum set of beans needed to autowire the CatsSearchProvider + * bean and other required beans in the CacheConfig class + */ + static class TestConfiguration { + @Bean + CatsInMemorySearchProperties catsInMemorySearchProperties() { + return new CatsInMemorySearchProperties(); + } + + @Bean + Cache cache() { + return new InMemoryCache(); + } + + @Bean + Registry registry() { + return new NoopRegistry(); + } + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentationTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentationTest.java new file mode 100644 index 00000000000..6c0dd7ae1b6 --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/MetricInstrumentationTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2023 JPMorgan Chase & Co. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Timer; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import java.util.Collection; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class MetricInstrumentationTest { + + private Registry registry; + + private MetricInstrumentation metricInstrumentation; + + @BeforeEach + void setup() { + registry = new DefaultRegistry(); + metricInstrumentation = new MetricInstrumentation(registry); + } + + @Test + void test_executionCompleted_hasExpectedMetricLabels() { + // given + String agentType = "test-account-bob/us-east-1/TestCachingAgent"; + String provider = "io.spinnaker.clouddriver.test.TestProvider"; + + Agent agent = + new CachingAgent() { + @Override + public Collection getProvidedDataTypes() { + return List.of(); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + return null; + } + + @Override + public String getAgentType() { + return agentType; + } + + @Override + public String getProviderName() { + return provider; + } + }; + + Id expectedId = + registry + .createId("executionTime") + .withTag("className", MetricInstrumentation.class.getSimpleName()) + .withTag("agent", String.format("TestProvider/%s", agentType)) + .withTag("success", true); + + // when + metricInstrumentation.executionCompleted(agent, 500L); + + // then + Timer timer = registry.timer(expectedId); + assertEquals(expectedId, timer.id()); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/OnDemandTypeTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/OnDemandTypeTest.java new file mode 100644 index 00000000000..777a7960535 --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/cache/OnDemandTypeTest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.cache; + +import static org.junit.jupiter.api.Assertions.*; + +import org.junit.jupiter.api.Test; + +class OnDemandTypeTest { + + @Test + void onDemandTypesWithSameValueShouldBeEqual() { + assertEquals(new OnDemandType("instance"), new OnDemandType("instance")); + } + + @Test + void onDemandTypesWithSameValueShouldBeEqualRegardlessOfCase() { + assertEquals(new OnDemandType("Instance"), new OnDemandType("instance")); + } + + @Test + void onDemandTypesWithDifferentValuesShouldNotBeEqual() { + assertNotEquals(new OnDemandType("instance"), new OnDemandType("job")); + } + + @Test + void onDemandTypesWithSameValueShouldHaveSameHashCode() { + assertEquals(new OnDemandType("instance").hashCode(), new OnDemandType("instance").hashCode()); + } + + @Test + void onDemandTypesWithSameValueShouldHaveSameHashCodeRegardlessOfCase() { + assertEquals(new OnDemandType("Instance").hashCode(), new OnDemandType("instance").hashCode()); + } + + @Test + void onDemandTypesWithDifferentValuesShouldHaveDifferentHashCodes() { + assertNotEquals(new OnDemandType("instance").hashCode(), new OnDemandType("job").hashCode()); + } + + @Test + void toStringShouldReturnValue() { + assertEquals(new OnDemandType("instance").toString(), "instance"); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepositoryTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepositoryTest.java new file mode 100644 index 00000000000..c8504ef0324 --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/InMemoryTaskRepositoryTest.java @@ -0,0 +1,26 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.data.task; + +import com.netflix.spinnaker.clouddriver.core.test.TaskRepositoryTck; + +public class InMemoryTaskRepositoryTest extends TaskRepositoryTck { + + @Override + protected InMemoryTaskRepository createTaskRepository() { + return new InMemoryTaskRepository(); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTaskTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTaskTest.java new file mode 100644 index 00000000000..743f3709873 --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/jedis/JedisTaskTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.data.task.jedis; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.io.Resources; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus; +import com.netflix.spinnaker.clouddriver.data.task.TaskDisplayOutput; +import com.netflix.spinnaker.clouddriver.data.task.TaskState; +import java.nio.charset.StandardCharsets; +import org.assertj.core.api.AssertionsForClassTypes; +import org.junit.jupiter.api.Test; +import org.mockito.InOrder; +import org.mockito.Mockito; + +final class JedisTaskTest { + private static final ObjectMapper objectMapper = new ObjectMapper(); + private static final String PHASE = "DEPLOY"; + + @Test + void serializationTest() throws Exception { + RedisTaskRepository taskRepository = mock(RedisTaskRepository.class); + JedisTask task = + new JedisTask("123", 100, taskRepository, "owner", "requestId", ImmutableSet.of(), false); + + DefaultTaskStatus oldStatus = + DefaultTaskStatus.create(PHASE, "Starting deploy", TaskState.STARTED); + DefaultTaskStatus status = + DefaultTaskStatus.create(PHASE, "Finished deploy", TaskState.COMPLETED); + Object results = + ImmutableMap.of("instances", ImmutableList.of("my-instance-v000", "my-instance-v001")); + + when(taskRepository.getHistory(eq(task))).thenReturn(ImmutableList.of(oldStatus, status)); + when(taskRepository.getResultObjects(eq(task))).thenReturn(ImmutableList.of(results)); + when(taskRepository.currentState(eq(task))).thenReturn(status); + + String result = objectMapper.writeValueAsString(task); + String expectedResult = + Resources.toString(JedisTaskTest.class.getResource("task.json"), StandardCharsets.UTF_8); + + // Compare the parsed trees of the two results, which is agnostic to key order + AssertionsForClassTypes.assertThat(objectMapper.readTree(result)) + .isEqualTo(objectMapper.readTree(expectedResult)); + } + + @Test + void taskSerializationWithOutputTest() throws Exception { + RedisTaskRepository taskRepository = mock(RedisTaskRepository.class); + JedisTask task = + new JedisTask("123", 100, taskRepository, "owner", "requestId", ImmutableSet.of(), false); + + DefaultTaskStatus oldStatus = + DefaultTaskStatus.create(PHASE, "Starting deploy", TaskState.STARTED); + DefaultTaskStatus status = + DefaultTaskStatus.create(PHASE, "Finished deploy", TaskState.COMPLETED); + Object results = + ImmutableMap.of("instances", ImmutableList.of("my-instance-v000", "my-instance-v001")); + TaskDisplayOutput taskOutput = + new TaskDisplayOutput("some-manifest", "DEPLOY_K8S_MANIFEST", "output", ""); + + when(taskRepository.getHistory(eq(task))).thenReturn(ImmutableList.of(oldStatus, status)); + when(taskRepository.getResultObjects(eq(task))).thenReturn(ImmutableList.of(results)); + when(taskRepository.currentState(eq(task))).thenReturn(status); + when(taskRepository.getOutputs(eq(task))).thenReturn(ImmutableList.of(taskOutput)); + + String result = objectMapper.writeValueAsString(task); + String expectedResult = + Resources.toString( + JedisTaskTest.class.getResource("task_with_output.json"), StandardCharsets.UTF_8); + + // Compare the parsed trees of the two results, which is agnostic to key order + AssertionsForClassTypes.assertThat(objectMapper.readTree(result)) + .isEqualTo(objectMapper.readTree(expectedResult)); + } + + // See the large comment on the top of JedisTask for this test's rationale + @Test + void statusComputedFirst() throws Exception { + RedisTaskRepository taskRepository = mock(RedisTaskRepository.class); + + JedisTask task = + new JedisTask("123", 100, taskRepository, "owner", "requestId", ImmutableSet.of(), false); + when(taskRepository.currentState(task)).thenReturn(new DefaultTaskStatus(TaskState.STARTED)); + objectMapper.writeValueAsString(task); + + InOrder inOrder = Mockito.inOrder(taskRepository); + inOrder.verify(taskRepository).currentState(eq(task)); + inOrder.verify(taskRepository).getHistory(eq(task)); + inOrder.verify(taskRepository).getResultObjects(eq(task)); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepositoryTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepositoryTest.java new file mode 100644 index 00000000000..9624144a511 --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/data/task/jedis/RedisTaskRepositoryTest.java @@ -0,0 +1,43 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.data.task.jedis; + +import com.netflix.spinnaker.clouddriver.core.test.TaskRepositoryTck; +import com.netflix.spinnaker.kork.jedis.EmbeddedRedis; +import com.netflix.spinnaker.kork.jedis.JedisClientDelegate; +import java.util.Optional; +import org.junit.jupiter.api.AfterEach; +import redis.clients.jedis.JedisPool; + +public class RedisTaskRepositoryTest extends TaskRepositoryTck { + + JedisPool jedisPool; + + EmbeddedRedis embeddedRedis; + + @Override + protected RedisTaskRepository createTaskRepository() { + embeddedRedis = EmbeddedRedis.embed(); + jedisPool = (JedisPool) embeddedRedis.getPool(); + + return new RedisTaskRepository(new JedisClientDelegate(jedisPool), Optional.empty()); + } + + @AfterEach + public void tearDown() { + Optional.ofNullable(embeddedRedis).ifPresent(EmbeddedRedis::destroy); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizerServiceTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizerServiceTest.java new file mode 100644 index 00000000000..3c5e3029611 --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/deploy/DescriptionAuthorizerServiceTest.java @@ -0,0 +1,239 @@ +/* + * Copyright 2023 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.deploy; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionSecretManager; +import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig; +import com.netflix.spinnaker.clouddriver.security.resources.AccountNameable; +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable; +import com.netflix.spinnaker.fiat.model.resources.ResourceType; +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.stream.Stream; +import lombok.Getter; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.springframework.security.authentication.TestingAuthenticationToken; +import org.springframework.security.core.Authentication; +import org.springframework.security.core.context.SecurityContextHolder; + +public class DescriptionAuthorizerServiceTest { + + private final DefaultRegistry registry = new DefaultRegistry(); + private final FiatPermissionEvaluator evaluator = mock(FiatPermissionEvaluator.class); + private final AccountDefinitionSecretManager secretManager = + mock(AccountDefinitionSecretManager.class); + private SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps; + private DescriptionAuthorizerService service; + private final String username = "testUser"; + + @BeforeEach + public void setup() { + opsSecurityConfigProps = new SecurityConfig.OperationsSecurityConfigurationProperties(); + service = + new DescriptionAuthorizerService( + registry, Optional.of(evaluator), opsSecurityConfigProps, secretManager); + TestingAuthenticationToken auth = new TestingAuthenticationToken(username, null); + SecurityContextHolder.getContext().setAuthentication(auth); + } + + @AfterEach + public void resetRegistry() { + registry.reset(); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void shouldAuthorizePassedDescription(boolean hasPermission) { + TestDescription description = + new TestDescription( + "testAccount", + Arrays.asList("testApplication", null), + Arrays.asList("testResource1", "testResource2", null)); + + DescriptionValidationErrors errors = new DescriptionValidationErrors(description); + + when(secretManager.canAccessAccountWithSecrets(username, "testAccount")) + .thenReturn(hasPermission); + when(evaluator.hasPermission(any(Authentication.class), anyString(), anyString(), anyString())) + .thenReturn(hasPermission); + + service.authorize(description, errors); + + assertEquals(hasPermission ? 0 : 4, errors.getAllErrors().size()); + verify(secretManager).canAccessAccountWithSecrets(username, "testAccount"); + verify(evaluator, times(3)) + .hasPermission(any(Authentication.class), anyString(), anyString(), anyString()); + verify(evaluator, times(1)).storeWholePermission(); + + verifySuccessMetric(hasPermission, "TestDescription"); + } + + private static Stream provideSkipAuthenticationForImageTaggingArgs() { + return Stream.of( + Arguments.of(List.of("testAccount"), 0), + Arguments.of(List.of("anotherAccount"), 1), + Arguments.of(List.of(), 1)); + } + + @ParameterizedTest + @MethodSource("provideSkipAuthenticationForImageTaggingArgs") + public void shouldSkipAuthenticationForImageTaggingDescription( + List allowUnauthenticatedImageTaggingInAccounts, int expectedNumberOfErrors) { + TestImageTaggingDescription description = new TestImageTaggingDescription("testAccount"); + DescriptionValidationErrors errors = new DescriptionValidationErrors(description); + + opsSecurityConfigProps.setAllowUnauthenticatedImageTaggingInAccounts( + allowUnauthenticatedImageTaggingInAccounts); + + service.authorize(description, errors); + + assertEquals(errors.getAllErrors().size(), expectedNumberOfErrors); + if (!allowUnauthenticatedImageTaggingInAccounts.isEmpty() + && allowUnauthenticatedImageTaggingInAccounts.get(0).equals("testAccount")) { + verify(secretManager, never()).canAccessAccountWithSecrets(username, "testAccount"); + } else { + verify(secretManager).canAccessAccountWithSecrets(username, "testAccount"); + } + verify(evaluator, never()) + .hasPermission(any(Authentication.class), anyString(), anyString(), anyString()); + verify(evaluator, never()).storeWholePermission(); + + verifySuccessMetric(expectedNumberOfErrors == 0, "TestImageTaggingDescription"); + + assertEquals( + expectedNumberOfErrors > 0 ? 0 : 1, + registry + .counter("authorization.skipped", "descriptionClass", "TestImageTaggingDescription") + .count()); + } + + @ParameterizedTest + @CsvSource({"APPLICATION", "ACCOUNT"}) + public void shouldOnlyAuthzSpecifiedResourceType(ResourceType resourceType) { + TestDescription description = + new TestDescription( + "testAccount", + Arrays.asList("testApplication", null), + Arrays.asList("testResource1", "testResource2", null)); + + DescriptionValidationErrors errors = new DescriptionValidationErrors(description); + + service.authorize(description, errors, List.of(resourceType)); + + if (resourceType.equals(ResourceType.APPLICATION)) { + verify(evaluator, times(3)).hasPermission(any(Authentication.class), any(), any(), any()); + assertEquals(3, errors.getAllErrors().size()); + } else { + verify(secretManager).canAccessAccountWithSecrets(username, "testAccount"); + assertEquals(1, errors.getAllErrors().size()); + } + + verifySuccessMetric(false, "TestDescription"); + } + + @Test + public void shouldAddMetricWithApplicationRestrictionAndNoAccount() { + TestDescription description = new TestDescription("testAccount", List.of(), List.of()); + DescriptionValidationErrors errors = new DescriptionValidationErrors(description); + + service.authorize(description, errors); + + assertEquals(errors.getAllErrors().size(), 1); + assertEquals( + 1, + registry + .counter( + "authorization.missingApplication", + "descriptionClass", + "TestDescription", + "hasValidationErrors", + "true") + .count()); + verifySuccessMetric(false, "TestDescription"); + } + + private void verifySuccessMetric(boolean success, String descriptionClass) { + assertEquals( + 1, + registry + .counter( + "authorization", + "descriptionClass", + descriptionClass, + "success", + String.valueOf(success)) + .count()); + } + + @Getter + public static class TestDescription + implements AccountNameable, ApplicationNameable, ResourcesNameable { + String account; + Collection applications; + List names; + + public TestDescription(String account, Collection applications, List names) { + this.account = account; + this.applications = applications; + this.names = names; + } + } + + @Getter + public static class TestImageTaggingDescription implements AccountNameable { + String account; + + public TestImageTaggingDescription(String account) { + this.account = account; + } + + @Override + public boolean requiresApplicationRestriction() { + return false; + } + + @Override + public boolean requiresAuthorization( + SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps) { + return !opsSecurityConfigProps + .getAllowUnauthenticatedImageTaggingInAccounts() + .contains(account); + } + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueueTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueueTest.java new file mode 100644 index 00000000000..c44144b9f04 --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueueTest.java @@ -0,0 +1,195 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.requestqueue.pooled; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.Mockito.mock; + +import ch.qos.logback.classic.Level; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.test.log.MemoryAppender; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; + +final class PooledRequestQueueTest { + private static final Logger log = LoggerFactory.getLogger(PooledRequestQueueTest.class); + + DynamicConfigService dynamicConfigService = mock(DynamicConfigService.class); + + @Test + void shouldExecuteRequests() throws Throwable { + PooledRequestQueue queue = + new PooledRequestQueue(dynamicConfigService, new NoopRegistry(), 1000, 1000, 1); + + assertThat(queue.execute("foo", () -> 12345L)).isEqualTo(12345L); + } + + @Test + void includesMdcWhenExecutingOperation() throws Throwable { + // Capture the log messages that our test operation generates + MemoryAppender memoryAppender = new MemoryAppender(PooledRequestQueueTest.class); + + PooledRequestQueue queue = + new PooledRequestQueue(dynamicConfigService, new NoopRegistry(), 1000, 1000, 1); + + Callable testCallable = + () -> { + Map contextMap = MDC.getCopyOfContextMap(); + log.info("contextMap: {}", contextMap); + return 12345L; + }; + + // Put something in the MDC here, to see if it makes it into the thread that + // executes the operation. + String mdcKey = "myKey"; + String mdcValue = "myValue"; + MDC.put(mdcKey, mdcValue); + assertThat(queue.execute("foo", testCallable)).isEqualTo(12345L); + List logMessages = memoryAppender.search(mdcKey + "=" + mdcValue, Level.INFO); + assertThat(logMessages).hasSize(1); + + // And now clear the MDC and make sure the resulting operation gets the empty MDC. + MDC.clear(); + assertThat(queue.execute("foo", testCallable)).isEqualTo(12345L); + List emptyMdcMessages = memoryAppender.search("contextMap: null", Level.INFO); + assertThat(emptyMdcMessages).hasSize(1); + } + + @Test + void timesOutIfRequestDoesNotComplete() { + PooledRequestQueue queue = + new PooledRequestQueue(dynamicConfigService, new NoopRegistry(), 5000, 10, 1); + + CountDownLatch block = new CountDownLatch(1); + assertThatThrownBy( + () -> { + try { + queue.execute( + "foo", + () -> { + block.await(); + return 12345L; + }); + } finally { + block.countDown(); + } + }) + .isInstanceOf(PromiseTimeoutException.class); + } + + @Test + void timesOutRequestIfDoesNotStartInTime() throws Exception { + long startTimeout = 50; + PooledRequestQueue queue = + new PooledRequestQueue( + dynamicConfigService, new NoopRegistry(), startTimeout, 5 * startTimeout, 1); + + ExecutorService executor = Executors.newFixedThreadPool(2); + + CountDownLatch blockingJobStarted = new CountDownLatch(1); + CountDownLatch testJobExited = new CountDownLatch(1); + + // Block the queue with a job that holds onto the only executor slot until our test job + // has exited. + executor.submit( + safeRun( + () -> { + queue.execute( + "foo", + () -> { + blockingJobStarted.countDown(); + testJobExited.await(); + return null; + }); + })); + + // Submit another job to the queue, and ensure that it is rejected before starting. + AtomicBoolean testJobRan = new AtomicBoolean(false); + CountDownLatch testJobQueued = new CountDownLatch(1); + Future testJob = + executor.submit( + safeRun( + () -> { + try { + blockingJobStarted.await(); + testJobQueued.countDown(); + queue.execute( + "foo", + () -> { + testJobRan.set(true); + return null; + }); + } finally { + testJobExited.countDown(); + } + })); + + executor.shutdown(); + + // Once the test job is queued, we'll wait a few times the startup timeout for it to finish. + testJobQueued.await(); + if (!executor.awaitTermination(10 * startTimeout, TimeUnit.MILLISECONDS)) { + executor.shutdownNow(); + // Fail the test immediately rather than assert on the status of the jobs, given that we + // interrupted them abnormally. + fail("Timeout waiting for queued jobs to finish."); + } + + assertThatThrownBy(testJob::get).hasCauseInstanceOf(PromiseNotStartedException.class); + assertThat(testJobRan.get()).isFalse(); + } + + /** + * Translates a {@link ThrowingRunnable} into a {@link Callable}. + * + *

Invoking the {@link Callable} calls {@link ThrowingRunnable#run()}. Any {@link Exception} + * that is thrown is propagated, and any non-{@link Exception} {@link Throwable} is wrapped in a + * {@link RuntimeException}. + */ + private static Callable safeRun(ThrowingRunnable throwingRunnable) { + return () -> { + try { + throwingRunnable.run(); + } catch (Exception e) { + throw e; + } catch (Throwable t) { + throw new RuntimeException(t); + } + return null; + }; + } + + /** A {@link Runnable} that allows an arbitrary {@link Throwable} to be thrown. */ + @FunctionalInterface + private interface ThrowingRunnable { + void run() throws Throwable; + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionMapperTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionMapperTest.java new file mode 100644 index 00000000000..5334177cf8e --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionMapperTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.netflix.spinnaker.clouddriver.config.AccountDefinitionConfiguration; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.fiat.model.Authorization; +import io.spinnaker.test.security.TestAccount; +import io.spinnaker.test.security.ValueAccount; +import java.util.Set; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.ImportAutoConfiguration; +import org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.test.context.TestPropertySource; + +@SpringBootTest(classes = AccountDefinitionConfiguration.class) +@ImportAutoConfiguration(JacksonAutoConfiguration.class) +@TestPropertySource( + properties = "account.storage.additionalScanPackages = io.spinnaker.test.security") +@ComponentScan("com.netflix.spinnaker.kork.secrets") +class AccountDefinitionMapperTest { + + @Autowired AccountDefinitionMapper mapper; + + @Test + void canConvertAdditionalAccountTypes() throws JsonProcessingException { + var account = new TestAccount(); + account.setData("name", "foo"); + account.getPermissions().add(Authorization.READ, Set.of("dev", "sre")); + account.getPermissions().add(Authorization.WRITE, "sre"); + account.setData("password", "hunter2"); + assertEquals(account, mapper.deserialize(mapper.serialize(account))); + } + + @Test + void canConvertJacksonizedAccountTypes() throws JsonProcessingException { + var account = ValueAccount.builder().name("james").value("meowth").build(); + assertEquals(account, mapper.deserialize(mapper.serialize(account))); + } + + @Test + void canDecryptSecretUris() { + var data = "{\"type\":\"test\",\"name\":\"bar\",\"password\":\"secret://noop?v=hunter2&k=v\"}"; + CredentialsDefinition account = assertDoesNotThrow(() -> mapper.deserialize(data)); + assertThat(account).isInstanceOf(TestAccount.class); + assertThat(account.getName()).isEqualTo("bar"); + TestAccount testAccount = (TestAccount) account; + assertThat(testAccount.getData().get("password")).isEqualTo("hunter2"); + } + + @Test + void canDecryptEncryptedUris() { + var data = "{\"type\":\"test\",\"name\":\"bar\",\"password\":\"encrypted:noop!v:hunter2\"}"; + CredentialsDefinition account = assertDoesNotThrow(() -> mapper.deserialize(data)); + assertThat(account).isInstanceOf(TestAccount.class); + assertThat(account.getName()).isEqualTo("bar"); + TestAccount testAccount = (TestAccount) account; + assertThat(testAccount.getData().get("password")).isEqualTo("hunter2"); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSecretManagerTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSecretManagerTest.java new file mode 100644 index 00000000000..62a010359ee --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionSecretManagerTest.java @@ -0,0 +1,168 @@ +/* + * Copyright 2022 Armory, Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; +import static uk.org.webcompere.systemstubs.resource.Resources.with; + +import com.google.cloud.secretmanager.v1.SecretManagerServiceClient; +import com.netflix.spinnaker.clouddriver.config.AccountDefinitionConfiguration; +import com.netflix.spinnaker.kork.secrets.SecretManager; +import com.netflix.spinnaker.kork.secrets.user.UserSecret; +import com.netflix.spinnaker.kork.secrets.user.UserSecretManager; +import com.netflix.spinnaker.kork.secrets.user.UserSecretReference; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.Set; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.ImportAutoConfiguration; +import org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.mock.mockito.MockBean; +import uk.org.webcompere.systemstubs.environment.EnvironmentVariables; + +@SpringBootTest(classes = AccountDefinitionConfiguration.class) +@ImportAutoConfiguration(JacksonAutoConfiguration.class) +class AccountDefinitionSecretManagerTest { + + @MockBean UserSecretManager userSecretManager; + + @MockBean SecretManager secretManager; + + @MockBean AccountSecurityPolicy policy; + + @Autowired AccountDefinitionSecretManager accountDefinitionSecretManager; + + @Test + void canAccessUserSecret() { + var userSecret = mock(UserSecret.class); + given(userSecret.getRoles()).willReturn(List.of("group", "group2")); + given(userSecret.getSecretString(eq("foo"))).willReturn("bar"); + given(userSecretManager.getUserSecret(any())).willReturn(userSecret); + given(policy.isAdmin(any())).willReturn(false); + var username = "user"; + var accountName = "account"; + given(policy.getRoles(username)).willReturn(Set.of("group")); + given(policy.canUseAccount(username, accountName)).willReturn(true); + + var ref = UserSecretReference.parse("secret://test?k=foo"); + assertThat(accountDefinitionSecretManager.getUserSecretString(ref, accountName)) + .isEqualTo("bar"); + assertThat(accountDefinitionSecretManager.canAccessAccountWithSecrets(username, accountName)) + .isTrue(); + } + + @Test + void adminHasAccess() { + var userSecret = mock(UserSecret.class); + given(userSecret.getRoles()).willReturn(List.of("group", "group2")); + given(userSecret.getSecretString(eq("foo"))).willReturn("bar"); + given(userSecretManager.getUserSecret(any())).willReturn(userSecret); + given(policy.isAdmin(any())).willReturn(true); + + var ref = UserSecretReference.parse("secret://test?k=foo"); + var accountName = "cube"; + assertThat(accountDefinitionSecretManager.getUserSecretString(ref, accountName)) + .isEqualTo("bar"); + assertThat(accountDefinitionSecretManager.canAccessAccountWithSecrets("sphere", accountName)) + .isTrue(); + } + + @Test + void cannotAccessUserSecret() { + var userSecret = mock(UserSecret.class); + given(userSecret.getRoles()).willReturn(List.of("group0", "group1")); + given(userSecret.getSecretString(eq("foo"))).willReturn("bar"); + given(userSecretManager.getUserSecret(any())).willReturn(userSecret); + given(policy.isAdmin(any())).willReturn(false); + given(policy.getRoles(any())).willReturn(Set.of("group2", "group3")); + + var accountName = "cube"; + var ref = UserSecretReference.parse("secret://test?k=foo"); + assertThat(accountDefinitionSecretManager.getUserSecretString(ref, accountName)) + .isEqualTo("bar"); + assertThat(accountDefinitionSecretManager.canAccessAccountWithSecrets("sphere", accountName)) + .isFalse(); + } + + @Test + void canAccessSecretButNotAccount() { + var userSecret = mock(UserSecret.class); + given(userSecret.getRoles()).willReturn(List.of("group0", "group1")); + given(userSecret.getSecretString(eq("foo"))).willReturn("bar"); + given(userSecretManager.getUserSecret(any())).willReturn(userSecret); + given(policy.isAdmin(any())).willReturn(false); + given(policy.getRoles(any())).willReturn(Set.of("group0", "group1")); + given(policy.canUseAccount(any(), any())).willReturn(false); + + var accountName = "cube"; + var ref = UserSecretReference.parse("secret://test?k=foo"); + assertThat(accountDefinitionSecretManager.getUserSecretString(ref, accountName)) + .isEqualTo("bar"); + assertThat(accountDefinitionSecretManager.canAccessAccountWithSecrets("sphere", accountName)) + .isFalse(); + } + + @Test + void canAccessAccountWhenUserAndAccountHaveNoPermissions() { + given(policy.isAdmin(any())).willReturn(false); + var username = "user"; + var accountName = "account"; + given(policy.getRoles(username)).willReturn(Set.of()); + // also assuming that this user can user the account in general + given(policy.canUseAccount(username, accountName)).willReturn(true); + + assertThat(accountDefinitionSecretManager.canAccessAccountWithSecrets(username, accountName)) + .isTrue(); + } + + @Test + void testVersionCompatibilityForGoogleSecretManager() throws Exception { + + String credentials = + "{\n" + + " \"type\": \"service_account\",\n" + + " \"project_id\": \"my-test-project\",\n" + + " \"private_key_id\": \"aaaaaaaaaaaaaaaaaaa\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDmKDSt86RQ59Zx\\njezHgVF4IWCwbE6QzdnJGumPnpIvTS5/575A9grY5WB5s4H4FnrrmLVJe5T0mxc5\\nrJ4v9JKyHeTQn+OdMV0zwhJczIN+raROZ9GJyxgdYysiyRR1ajkd8aX+aiU5A4r9\\nFcIIcbLkrlltKfSV3I6tiE0oZja2sj+OE4+3b85NMiBUeusLH7GRglRDddnCAysY\\nvk2tIsYjI9m+f6r731rrmAU1SA5sx7pbRMJdWPVxCSrJy0F+AH/Hn1rTD+ga1m4+\\ngLuBbjZLkfFeg7xsqWKTImaMxKDNpiaNiUaBrBXS1u6IqfjDMakFU9tm5sbGuauG\\nXdIqYo0DAgMBAAECggEABPTPsZriEN+O0ovKXtkUXo0yQYpYV/qLp2Hqrq35zh/2\\neBWolxbu6kQ1doypjosGMAWhV2KpNTbglXYRdma1zZuI/mWH2sfVDuGUHXOszz3a\\noHLQfdjZFstSAsh1JgdY3iHo8uVPrBfwVpcXdX2xUW0s8Tj3X4GY5vhc8cysF/VA\\nDEAlhsxWqanZTslQrtGpuV3q0VoHaFVmf5XcxHm7IOo7UFbbmiOh2WLi22Uv7NxA\\nemqWEGzgU/j/aYoLA2YxmXmuebMTkAwuYfRYEJQ/m2/P/dnPUWRWKp6goaii+B1o\\n/SgfVxet9yM6ChUi7DVc4uRMjCDP/GkY6c26jsrLpQKBgQD4eCMFbXRb1JZGaH6D\\nIqrk+/To9wqxKOI1J3ti2mFLPG9+Pf4Erey4GkmXtFOjDxHfNq9Q4gNi2MDJDgYe\\nglnt4foPG3lqaus3cY7TcVv3cBRfMp1OX1v0Q5MmMyrcVKm9kLueFluLvSY8J8Bw\\nSF37YmWFmdANbxs2tybyTtQtLwKBgQDtIfwwlP6cSCgXaFzuQnkptLn0M5PQ8A0F\\nErxLwLnAgvIP5xI7AtOjVx0Uu7x5XGiTWnIhLU9ODkGQatcxocTmKOQ63oFiOEze\\n33XZKo1gq/ZDx7LGWpDmBR3xqgk/HwlzmuKlR0SqcS+xuU84mrNDqB8ayf6GCkao\\nYRnlE/WwbQKBgFxQXkqc8PdRU4fTOPXFwpKS3dpUNp+9ndW71obStgU67f2MUL0y\\nVVnNQnxfnhdd+Pjim15EqpdmCrJoSHO7YGgWZk6ImaKlGMEfqr36Rv32oUsBRhqh\\nKUvmc1xk9E6qEeqBRIOmsNqJKxR8fG37JRfJ5gguLnNfTVAV2h16ljA3AoGBAN+z\\nnNYz6JGEHJYgdPKrsOOgQ4BVG9ASdSXhC9Mmx9UNcs9/vBoBS6geqSeDB4UxoNHJ\\nlDsqJFNNbZqQv9tpcXdzAgNrHoGK/TGPevxYgTC+aL5+aG9oxqLIFvyA3OI4JFFz\\nvvYOan+j8Utmto5+mjhsJJPAFKVcklWL7MLHdpJtAoGAQHnyuT12Dzi3SeOMavhD\\nheJ6ant6pzH7bfifWcz558IfLtnxtYFR4azKW6n8SUnVCyybPMwVt4pxFZ6as0zK\\nKaplMuWadDqTSE56LJmG854fxQWq4/Z0qnYr9Yq7UHSRtNT7xOxyfiJkTVYqAT6q\\nYcPfY81+aw4wdjnWrDw5dSk=\\n-----END PRIVATE KEY-----\\n\",\n" + + " \"client_email\": \"dummy@my-test-project.iam.gserviceaccount.com\",\n" + + " \"client_id\": \"1111111111111111111\",\n" + + " \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n" + + " \"token_uri\": \"https://oauth2.googleapis.com/token\",\n" + + " \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n" + + " \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/dummy%40my-test-project.iam.gserviceaccount.com\"\n" + + "}"; + String credentialsPath = writeToFile(credentials, "credentials.json"); + with(new EnvironmentVariables().set("GOOGLE_APPLICATION_CREDENTIALS", credentialsPath)) + .execute(() -> assertDoesNotThrow(() -> SecretManagerServiceClient.create())); + } + + private String writeToFile(String content, String fileName) throws IOException { + Path filePath = Paths.get(System.getProperty("java.io.tmpdir"), fileName); + Files.write(filePath, content.getBytes()); + return filePath.toString(); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionServiceTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionServiceTest.java new file mode 100644 index 00000000000..3bb5383405f --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/AccountDefinitionServiceTest.java @@ -0,0 +1,133 @@ +/* + * Copyright 2024 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.security; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; + +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.kork.secrets.user.UserSecret; +import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException; +import io.spinnaker.test.security.ValueAccount; +import java.util.List; +import java.util.Set; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.springframework.security.access.AccessDeniedException; + +public class AccountDefinitionServiceTest { + AccountDefinitionRepository repository; + AccountDefinitionSecretManager secretManager; + AccountCredentialsProvider accountCredentialsProvider; + AccountSecurityPolicy policy; + AuthorizedRolesExtractor extractor; + CredentialsDefinition definition = + ValueAccount.builder().name("name").value("secret://test?k=value").build(); + AccountDefinitionService accountDefinitionService; + Set authorizedRoles = Set.of("role1", "role2"); + + @BeforeEach + public void setup() { + repository = mock(AccountDefinitionRepository.class); + secretManager = mock(AccountDefinitionSecretManager.class); + accountCredentialsProvider = mock(AccountCredentialsProvider.class); + policy = mock(AccountSecurityPolicy.class); + extractor = mock(AuthorizedRolesExtractor.class); + List extractors = List.of(extractor); + accountDefinitionService = + new AccountDefinitionService( + repository, secretManager, accountCredentialsProvider, policy, extractors); + + doReturn(true).when(extractor).supportsType(definition.getClass()); + } + + @Test + public void testValidateAccountAuthorizationWithCommonRole() { + doReturn(authorizedRoles).when(extractor).getAuthorizedRoles(definition); + Set userRoles = Set.of("role1"); + assertDoesNotThrow( + () -> + accountDefinitionService.validateAccountAuthorization( + userRoles, definition, AccountDefinitionService.AccountAction.UPDATE)); + } + + @Test + public void testValidateAccountAuthorizationEmptyAuthorizedRoles() { + doReturn(Set.of()).when(extractor).getAuthorizedRoles(definition); + Set userRoles = Set.of("role1"); + assertDoesNotThrow( + () -> + accountDefinitionService.validateAccountAuthorization( + userRoles, definition, AccountDefinitionService.AccountAction.UPDATE)); + } + + @Test + public void testValidateAccountAuthorizationNoCommonRoles() { + doReturn(authorizedRoles).when(extractor).getAuthorizedRoles(definition); + Set userRoles = Set.of("oneRole", "anotherRole"); + assertThrows( + InvalidRequestException.class, + () -> + accountDefinitionService.validateAccountAuthorization( + userRoles, definition, AccountDefinitionService.AccountAction.UPDATE)); + } + + @Test + public void testValidateUserSecretAuthorizationWithCommonRole() { + UserSecret userSecret = mock(UserSecret.class); + doReturn(List.copyOf(authorizedRoles)).when(userSecret).getRoles(); + doReturn(userSecret).when(secretManager).getUserSecret(any()); + Set userRoles = Set.of("role1", "role3"); + + assertDoesNotThrow( + () -> + accountDefinitionService.validateUserSecretAuthorization( + userRoles, definition, AccountDefinitionService.AccountAction.UPDATE)); + } + + @Test + public void testValidateUserSecretAuthorizationEmptyAuthorizedRoles() { + UserSecret userSecret = mock(UserSecret.class); + doReturn(List.of()).when(userSecret).getRoles(); + doReturn(userSecret).when(secretManager).getUserSecret(any()); + Set userRoles = Set.of("role1", "role3"); + + assertThrows( + AccessDeniedException.class, + () -> + accountDefinitionService.validateUserSecretAuthorization( + userRoles, definition, AccountDefinitionService.AccountAction.UPDATE)); + } + + @Test + public void testValidateUserSecretAuthorizationNoCommonRoles() { + UserSecret userSecret = mock(UserSecret.class); + doReturn(List.copyOf(authorizedRoles)).when(userSecret).getRoles(); + doReturn(userSecret).when(secretManager).getUserSecret(any()); + Set userRoles = Set.of("role3"); + + assertThrows( + AccessDeniedException.class, + () -> + accountDefinitionService.validateUserSecretAuthorization( + userRoles, definition, AccountDefinitionService.AccountAction.UPDATE)); + } +} diff --git a/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountSecurityPolicyTest.java b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountSecurityPolicyTest.java new file mode 100644 index 00000000000..5d72ccc7065 --- /dev/null +++ b/clouddriver-core/src/test/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountSecurityPolicyTest.java @@ -0,0 +1,111 @@ +/* + * Copyright 2023 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.security; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.fiat.model.Authorization; +import com.netflix.spinnaker.fiat.model.UserPermission; +import com.netflix.spinnaker.fiat.model.resources.Role; +import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +public class DefaultAccountSecurityPolicyTest { + private static final String username = "testUser"; + private static final String account = "testAccount"; + FiatPermissionEvaluator fiatPermissionEvaluator = mock(FiatPermissionEvaluator.class); + DefaultAccountSecurityPolicy policy; + + @BeforeEach + void setup() { + policy = new DefaultAccountSecurityPolicy(fiatPermissionEvaluator); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testIsAdmin(boolean isUserAdmin) { + when(fiatPermissionEvaluator.getPermission(username)) + .thenReturn(new UserPermission.View().setAdmin(isUserAdmin)); + + assertEquals(isUserAdmin, policy.isAdmin(username)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testIsAccountManager(boolean isAccountManager) { + when(fiatPermissionEvaluator.getPermission(username)) + .thenReturn(new UserPermission.View().setAccountManager(isAccountManager)); + + assertEquals(isAccountManager, policy.isAccountManager(username)); + } + + @Test + public void testGetRoles() { + Set roles = Set.of("role1", "role2", "role3"); + when(fiatPermissionEvaluator.getPermission(username)) + .thenReturn( + new UserPermission.View() + .setRoles( + roles.stream() + .map(role -> new Role.View().setName(role).setSource(Role.Source.LDAP)) + .collect(Collectors.toSet()))); + + assertEquals(roles, policy.getRoles(username)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testCanUseAccount_NotAdmin(boolean hasPermission) { + when(fiatPermissionEvaluator.getPermission(username)) + .thenReturn(new UserPermission.View().setAdmin(false)); + when(fiatPermissionEvaluator.hasPermission(username, account, "account", Authorization.WRITE)) + .thenReturn(hasPermission); + + assertEquals(hasPermission, policy.canUseAccount(username, account)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testCanModifyAccount(boolean isAdmin) { + when(fiatPermissionEvaluator.getPermission(username)) + .thenReturn(new UserPermission.View().setAdmin(isAdmin)); + + assertEquals(isAdmin, policy.canModifyAccount(username, account)); + } + + @ParameterizedTest + @CsvSource({"false,false", "false,true", "true,false", "true,true"}) + public void testCanModifyAccountAsAccountManager( + boolean isAccountManager, boolean hasWritePermission) { + when(fiatPermissionEvaluator.getPermission(username)) + .thenReturn(new UserPermission.View().setAdmin(false).setAccountManager(isAccountManager)); + when(fiatPermissionEvaluator.hasPermission(username, account, "account", Authorization.WRITE)) + .thenReturn(hasWritePermission); + + assertEquals( + isAccountManager && hasWritePermission, policy.canModifyAccount(username, account)); + } +} diff --git a/clouddriver-core/src/test/java/io/spinnaker/test/security/TestAccount.java b/clouddriver-core/src/test/java/io/spinnaker/test/security/TestAccount.java new file mode 100644 index 00000000000..d42054f33ac --- /dev/null +++ b/clouddriver-core/src/test/java/io/spinnaker/test/security/TestAccount.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.spinnaker.test.security; + +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonAnySetter; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.netflix.spinnaker.clouddriver.security.AccessControlledAccountDefinition; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +@JsonTypeName("test") +@NonnullByDefault +public class TestAccount implements AccessControlledAccountDefinition { + private final Permissions.Builder permissions = new Permissions.Builder(); + private final Map data = new HashMap<>(); + + @Override + @JsonIgnore + public String getName() { + return (String) data.get("name"); + } + + public Permissions.Builder getPermissions() { + return permissions; + } + + @JsonAnyGetter + public Map getData() { + return data; + } + + @JsonAnySetter + public void setData(String key, Object value) { + data.put(key, value); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestAccount that = (TestAccount) o; + return permissions.equals(that.permissions) && data.equals(that.data); + } + + @Override + public int hashCode() { + return Objects.hash(permissions, data); + } +} diff --git a/clouddriver-core/src/test/java/io/spinnaker/test/security/ValueAccount.java b/clouddriver-core/src/test/java/io/spinnaker/test/security/ValueAccount.java new file mode 100644 index 00000000000..ac82ccba9e6 --- /dev/null +++ b/clouddriver-core/src/test/java/io/spinnaker/test/security/ValueAccount.java @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.spinnaker.test.security; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import lombok.Builder; +import lombok.Value; +import lombok.extern.jackson.Jacksonized; + +// TODO(jvz): change to @CredentialsType after https://github.com/spinnaker/kork/pull/958 merged +@JsonTypeName("value") +@NonnullByDefault +@Value +@Builder +@Jacksonized +public class ValueAccount implements CredentialsDefinition { + String name; + String value; +} diff --git a/clouddriver-core/src/test/resources/com/netflix/spinnaker/clouddriver/data/task/jedis/task.json b/clouddriver-core/src/test/resources/com/netflix/spinnaker/clouddriver/data/task/jedis/task.json new file mode 100644 index 00000000000..e8feead6a4a --- /dev/null +++ b/clouddriver-core/src/test/resources/com/netflix/spinnaker/clouddriver/data/task/jedis/task.json @@ -0,0 +1,35 @@ +{ + "history" : [ + { + "complete" : false, + "completed" : false, + "failed" : false, + "phase" : "DEPLOY", + "retryable" : false, + "status" : "Starting deploy" + } + ], + "id" : "123", + "ownerId" : "owner", + "requestId" : "requestId", + "resultObjects" : [ + { + "instances" : [ + "my-instance-v000", + "my-instance-v001" + ] + } + ], + "retryable" : false, + "sagaIds" : [], + "startTimeMs" : 100, + "status" : { + "complete" : true, + "completed" : true, + "failed" : false, + "phase" : "DEPLOY", + "retryable" : false, + "status" : "Finished deploy" + }, + "outputs": [] +} diff --git a/clouddriver-core/src/test/resources/com/netflix/spinnaker/clouddriver/data/task/jedis/task_with_output.json b/clouddriver-core/src/test/resources/com/netflix/spinnaker/clouddriver/data/task/jedis/task_with_output.json new file mode 100644 index 00000000000..6e585468130 --- /dev/null +++ b/clouddriver-core/src/test/resources/com/netflix/spinnaker/clouddriver/data/task/jedis/task_with_output.json @@ -0,0 +1,42 @@ +{ + "history" : [ + { + "complete" : false, + "completed" : false, + "failed" : false, + "phase" : "DEPLOY", + "retryable" : false, + "status" : "Starting deploy" + } + ], + "id" : "123", + "ownerId" : "owner", + "requestId" : "requestId", + "resultObjects" : [ + { + "instances" : [ + "my-instance-v000", + "my-instance-v001" + ] + } + ], + "retryable" : false, + "sagaIds" : [], + "startTimeMs" : 100, + "status" : { + "complete" : true, + "completed" : true, + "failed" : false, + "phase" : "DEPLOY", + "retryable" : false, + "status" : "Finished deploy" + }, + "outputs": [ + { + "manifest":"some-manifest", + "phase":"DEPLOY_K8S_MANIFEST", + "stdOut":"output", + "stdError":"" + } + ] +} diff --git a/clouddriver-dcos/clouddriver-dcos.gradle b/clouddriver-dcos/clouddriver-dcos.gradle index e3f0efaa3d6..c407c0c8e76 100644 --- a/clouddriver-dcos/clouddriver-dcos.gradle +++ b/clouddriver-dcos/clouddriver-dcos.gradle @@ -1,6 +1,24 @@ dependencies { - compile project(":clouddriver-core") - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') - compile 'com.cerner.marathon:marathon-client:0.6.3' + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + implementation project(":cats:cats-core") + + compileOnly "org.apache.commons:commons-lang3" + implementation "com.github.ben-manes.caffeine:guava" + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-moniker" + implementation "joda-time:joda-time:2.10.1" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation 'com.cerner.marathon:marathon-client:0.6.3' + + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" } diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/description/servergroup/AbstractDcosServerGroupDescription.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/description/servergroup/AbstractDcosServerGroupDescription.groovy index b0479ce0a2b..44a9e0eebfa 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/description/servergroup/AbstractDcosServerGroupDescription.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/description/servergroup/AbstractDcosServerGroupDescription.groovy @@ -18,9 +18,14 @@ package com.netflix.spinnaker.clouddriver.dcos.deploy.description.servergroup; import com.netflix.spinnaker.clouddriver.dcos.deploy.description.AbstractDcosCredentialsDescription; -import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable; +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable; -abstract class AbstractDcosServerGroupDescription extends AbstractDcosCredentialsDescription implements ServerGroupNameable { +abstract class AbstractDcosServerGroupDescription extends AbstractDcosCredentialsDescription implements ServerGroupsNameable { String serverGroupName boolean forceDeployment = true + + @Override + Collection getServerGroupNames() { + return [serverGroupName] + } } diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/description/servergroup/DeployDcosServerGroupDescription.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/description/servergroup/DeployDcosServerGroupDescription.groovy index 1501cd7f675..23e1674ee01 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/description/servergroup/DeployDcosServerGroupDescription.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/description/servergroup/DeployDcosServerGroupDescription.groovy @@ -65,6 +65,11 @@ class DeployDcosServerGroupDescription extends AbstractDcosCredentialsDescriptio boolean forceDeployment + @Override + Collection getApplications() { + return [application] + } + @Canonical static class Container { String type diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/AbstractDcosDescriptionValidatorSupport.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/AbstractDcosDescriptionValidatorSupport.groovy index 9ae0fce7357..eab7848a881 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/AbstractDcosDescriptionValidatorSupport.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/AbstractDcosDescriptionValidatorSupport.groovy @@ -22,7 +22,6 @@ import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator import com.netflix.spinnaker.clouddriver.security.AccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors abstract class AbstractDcosDescriptionValidatorSupport extends DescriptionValidator { @@ -35,7 +34,7 @@ abstract class AbstractDcosDescriptionValidatorSupport extends AbstractDcosDescriptionValidatorSupport { @@ -31,7 +30,7 @@ abstract class AbstractDcosServerGroupValidator lastException = new AtomicReference<>(null) - DcosHealthIndicator(AccountCredentialsProvider accountCredentialsProvider, + DcosHealthIndicator(Registry registry, + AccountCredentialsProvider accountCredentialsProvider, DcosClientProvider dcosClientProvider) { + super(registry, "dcos") this.accountCredentialsProvider = accountCredentialsProvider this.dcosClientProvider = dcosClientProvider } - @Override - Health health() { - def ex = lastException.get() - - if (ex) { - new Health.Builder().down().build() - } - - new Health.Builder().up().build() - } - @Scheduled(fixedDelay = 300000L) void checkHealth() { - try { + updateHealth { Set dcosCredentialsSet = accountCredentialsProvider.all.findAll { it instanceof DcosAccountCredentials } as Set @@ -68,10 +61,6 @@ class DcosHealthIndicator implements HealthIndicator { } } } - - lastException.set(null) - } catch (Exception ex) { - lastException.set(ex) } } @@ -79,4 +68,3 @@ class DcosHealthIndicator implements HealthIndicator { @InheritConstructors static class DcosIOException extends RuntimeException {} } - diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/model/DcosLoadBalancer.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/model/DcosLoadBalancer.groovy index 7c3dfa5152d..4c168f0bcba 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/model/DcosLoadBalancer.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/model/DcosLoadBalancer.groovy @@ -90,7 +90,9 @@ class DcosLoadBalancer implements LoadBalancer, Serializable, LoadBalancerProvid //} } as Set, // TODO once we can do this - detachedInstances: []) + detachedInstances: [], + cloudProvider: DcosCloudProvider.ID + ) } as Set } diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/config/DcosProviderConfig.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/config/DcosProviderConfig.groovy index d38a7d533a3..6cbd10aba22 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/config/DcosProviderConfig.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/config/DcosProviderConfig.groovy @@ -25,7 +25,6 @@ import com.google.common.collect.Multimap import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.Agent import com.netflix.spinnaker.cats.provider.Provider -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper import com.netflix.spinnaker.clouddriver.dcos.DcosClientProvider import com.netflix.spinnaker.clouddriver.dcos.DcosCloudProvider import com.netflix.spinnaker.clouddriver.dcos.provider.DcosProvider @@ -40,11 +39,9 @@ import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository import com.netflix.spinnaker.clouddriver.security.ProviderUtils import groovy.util.logging.Slf4j import org.apache.commons.lang3.tuple.Pair -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope import java.util.concurrent.ConcurrentHashMap @@ -64,28 +61,11 @@ class DcosProviderConfig { provider } - @Bean - DcosProviderSynchronizerTypeWrapper dcosProviderSynchronizerTypeWrapper() { - new DcosProviderSynchronizerTypeWrapper() - } - - class DcosProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - - @Override - Class getSynchronizerType() { - return DcosProviderSynchronizer - } - } - - class DcosProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - DcosProviderSynchronizer synchronizeDcosProvider(DcosProvider dcosProvider, - AccountCredentialsProvider accountCredentialsProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - Registry registry) { + private static void synchronizeDcosProvider(DcosProvider dcosProvider, + AccountCredentialsProvider accountCredentialsProvider, + AccountCredentialsRepository accountCredentialsRepository, + ObjectMapper objectMapper, + Registry registry) { Set> scheduledAgents = getScheduledClusterAgents(dcosProvider) @@ -116,8 +96,6 @@ class DcosProviderConfig { if (!newlyAddedAgents.isEmpty()) { dcosProvider.agents.addAll(newlyAddedAgents) } - - new DcosProviderSynchronizer() } static def synchronizeAgent(DcosProvider dcosProvider, Pair clusterKey, Collection allAccounts) { diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/view/DcosInstanceProvider.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/view/DcosInstanceProvider.groovy index 31c2b56b64b..a1acb6b413a 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/view/DcosInstanceProvider.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/view/DcosInstanceProvider.groovy @@ -28,7 +28,7 @@ import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @Component -class DcosInstanceProvider implements InstanceProvider { +class DcosInstanceProvider implements InstanceProvider { private final Cache cacheView private final ObjectMapper objectMapper diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/view/DcosJobProvider.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/view/DcosJobProvider.groovy index 2b6ac31bc27..10d6b16ac18 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/view/DcosJobProvider.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/view/DcosJobProvider.groovy @@ -74,7 +74,7 @@ class DcosJobProvider implements JobProvider { // map), we want to protect against non-DCOS credentials and return an empty map so that we don't break the // job endpoint by throwing an exception (which will return a 500 to the caller). if (!(credentials instanceof DcosAccountCredentials)) { - return [:] + return null } def dcosClient = dcosClientProvider.getDcosClient(credentials, location) @@ -106,7 +106,7 @@ class DcosJobProvider implements JobProvider { def file = dcosClient.getAgentSandboxFileAsString(jobTask.getSlave_id(), filePath) if (!file.isPresent()) { - return [:] + return null } final contents = file.get() @@ -122,7 +122,7 @@ class DcosJobProvider implements JobProvider { } catch (DCOSException e) { if (e.status == 404) { LOGGER.warn("File [${fileName}] does not exist for job [${location}.${id}].") - return [:] + return null } else { throw e } diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/security/DcosAccountCredentials.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/security/DcosAccountCredentials.groovy index 8d39c85ccd1..57e1c223d49 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/security/DcosAccountCredentials.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/security/DcosAccountCredentials.groovy @@ -21,14 +21,13 @@ import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spectator.api.Registry import com.netflix.spinnaker.clouddriver.dcos.cache.Keys import com.netflix.spinnaker.clouddriver.dcos.deploy.util.id.MarathonPathId -import com.netflix.spinnaker.clouddriver.security.AccountCredentials import com.netflix.spinnaker.fiat.model.resources.Permissions import org.slf4j.Logger import org.slf4j.LoggerFactory import static com.netflix.spinnaker.clouddriver.dcos.DcosConfigurationProperties.LinkedDockerRegistryConfiguration -class DcosAccountCredentials implements AccountCredentials { +class DcosAccountCredentials extends AbstractAccountCredentials { private static final Logger LOGGER = LoggerFactory.getLogger(DcosAccountCredentials) private static final String CLOUD_PROVIDER = Keys.PROVIDER diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/security/DcosCredentialsInitializer.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/security/DcosCredentialsInitializer.groovy index 927dbe5a7e6..587187429de 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/security/DcosCredentialsInitializer.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/security/DcosCredentialsInitializer.groovy @@ -19,12 +19,10 @@ package com.netflix.spinnaker.clouddriver.dcos.security import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper import com.netflix.spinnaker.clouddriver.dcos.DcosClientCompositeKey import com.netflix.spinnaker.clouddriver.dcos.DcosClientProvider import com.netflix.spinnaker.clouddriver.dcos.DcosConfigurationProperties import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable import com.netflix.spinnaker.clouddriver.security.ProviderUtils import feign.FeignException import groovy.util.logging.Slf4j @@ -32,46 +30,36 @@ import mesosphere.dcos.client.DCOS import mesosphere.dcos.client.DCOSException import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.context.ApplicationContext import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope @Slf4j @Configuration -class DcosCredentialsInitializer implements CredentialsInitializerSynchronizable { +class DcosCredentialsInitializer { private final static LOGGER = LoggerFactory.getLogger(DcosCredentialsInitializer) @Autowired Registry spectatorRegistry @Bean - List dcosCredentials(String clouddriverUserAgentApplicationName, - DcosConfigurationProperties dcosConfigurationProperties, - ApplicationContext applicationContext, - AccountCredentialsRepository accountCredentialsRepository, - DcosClientProvider clientProvider, - List providerSynchronizerTypeWrappers) { - - synchronizeDcosAccounts(clouddriverUserAgentApplicationName, dcosConfigurationProperties, null, applicationContext, accountCredentialsRepository, clientProvider, providerSynchronizerTypeWrappers) + @DependsOn("dockerRegistryNamedAccountCredentials") + List dcosCredentials( + String clouddriverUserAgentApplicationName, + DcosConfigurationProperties dcosConfigurationProperties, + AccountCredentialsRepository accountCredentialsRepository, + DcosClientProvider clientProvider) { + + synchronizeDcosAccounts(clouddriverUserAgentApplicationName, + dcosConfigurationProperties, null, + accountCredentialsRepository, clientProvider) } - @Override - String getCredentialsSynchronizationBeanName() { - return "synchronizeDcosAccounts" - } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @DependsOn("dockerRegistryNamedAccountCredentials") - List synchronizeDcosAccounts(String clouddriverUserAgentApplicationName, - DcosConfigurationProperties dcosConfigurationProperties, - CatsModule catsModule, - ApplicationContext applicationContext, - AccountCredentialsRepository accountCredentialsRepository, - DcosClientProvider clientProvider, - List providerSynchronizerTypeWrappers) { + private List synchronizeDcosAccounts( + String clouddriverUserAgentApplicationName, + DcosConfigurationProperties dcosConfigurationProperties, + CatsModule catsModule, + AccountCredentialsRepository accountCredentialsRepository, + DcosClientProvider clientProvider) { // TODO what to do with clouddriverUserAgentApplicationName? Map clusterMap = new HashMap<>() @@ -135,10 +123,6 @@ class DcosCredentialsInitializer implements CredentialsInitializerSynchronizable ProviderUtils.unscheduleAndDeregisterAgents(namesOfDeletedAccounts, catsModule) - if (accountsToAdd && catsModule) { - ProviderUtils.synchronizeAgentProviders(applicationContext, providerSynchronizerTypeWrappers) - } - accountCredentialsRepository.all.findAll { it instanceof DcosAccountCredentials } as List diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/config/DcosConfiguration.groovy b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/config/DcosConfiguration.groovy index a81f4fb0eb3..f4a7942b064 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/config/DcosConfiguration.groovy +++ b/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/config/DcosConfiguration.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.config +import com.netflix.spectator.api.Registry import com.netflix.spinnaker.clouddriver.dcos.DcosClientProvider import com.netflix.spinnaker.clouddriver.dcos.DcosConfigurationProperties import com.netflix.spinnaker.clouddriver.dcos.deploy.util.mapper.DeployDcosServerGroupDescriptionToAppMapper @@ -54,8 +55,8 @@ class DcosConfiguration { } @Bean - DcosHealthIndicator dcosHealthIndicator(AccountCredentialsProvider accountCredentialsProvider, DcosClientProvider dcosClientProvider) { - new DcosHealthIndicator(accountCredentialsProvider, dcosClientProvider) + DcosHealthIndicator dcosHealthIndicator(Registry registry, AccountCredentialsProvider accountCredentialsProvider, DcosClientProvider dcosClientProvider) { + new DcosHealthIndicator(registry, accountCredentialsProvider, dcosClientProvider) } @Bean @@ -76,5 +77,3 @@ class DcosConfiguration { new PollingDcosDeploymentMonitor(operationPoller) } } - - diff --git a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/agent/DcosClusterAware.java b/clouddriver-dcos/src/main/java/com/netflix/spinnaker/clouddriver/dcos/provider/agent/DcosClusterAware.java similarity index 99% rename from clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/agent/DcosClusterAware.java rename to clouddriver-dcos/src/main/java/com/netflix/spinnaker/clouddriver/dcos/provider/agent/DcosClusterAware.java index dc3f5e1eb89..e3f62300b39 100644 --- a/clouddriver-dcos/src/main/groovy/com/netflix/spinnaker/clouddriver/dcos/provider/agent/DcosClusterAware.java +++ b/clouddriver-dcos/src/main/java/com/netflix/spinnaker/clouddriver/dcos/provider/agent/DcosClusterAware.java @@ -18,7 +18,6 @@ package com.netflix.spinnaker.clouddriver.dcos.provider.agent; import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials; - import java.util.Collection; public interface DcosClusterAware { diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/instance/TerminateDcosInstanceAndDecrementDescriptionValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/instance/TerminateDcosInstanceAndDecrementDescriptionValidatorSpec.groovy index 7189990d9ba..224aea1db73 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/instance/TerminateDcosInstanceAndDecrementDescriptionValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/instance/TerminateDcosInstanceAndDecrementDescriptionValidatorSpec.groovy @@ -1,12 +1,12 @@ /* * Copyright 2018 Cerner Corporation - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,8 +20,8 @@ import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.instance.TerminateDcosInstancesAndDecrementDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Subject class TerminateDcosInstanceAndDecrementDescriptionValidatorSpec extends BaseSpecification { @@ -39,7 +39,7 @@ class TerminateDcosInstanceAndDecrementDescriptionValidatorSpec extends BaseSpec void "validate should give errors when given an empty TerminateDcosInstancesAndDecrementDescription"() { setup: def description = new TerminateDcosInstancesAndDecrementDescription(credentials: null, region: null, instanceIds: []) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/instance/TerminateDcosInstanceDescriptionValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/instance/TerminateDcosInstanceDescriptionValidatorSpec.groovy index 3fc4c0ed6f4..e855cf1ba27 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/instance/TerminateDcosInstanceDescriptionValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/instance/TerminateDcosInstanceDescriptionValidatorSpec.groovy @@ -1,12 +1,12 @@ /* * Copyright 2018 Cerner Corporation - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,8 +20,8 @@ import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.instance.TerminateDcosInstancesDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Subject class TerminateDcosInstanceDescriptionValidatorSpec extends BaseSpecification { @@ -39,7 +39,7 @@ class TerminateDcosInstanceDescriptionValidatorSpec extends BaseSpecification { void "validate should give errors when given an empty TerminateDcosInstancesDescription"() { setup: def description = new TerminateDcosInstancesDescription(credentials: null, dcosCluster: null, instanceIds: []) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/job/RunDcosJobValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/job/RunDcosJobValidatorSpec.groovy index feeee250be4..9db86f1badf 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/job/RunDcosJobValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/job/RunDcosJobValidatorSpec.groovy @@ -20,8 +20,8 @@ import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.job.RunDcosJobDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Subject class RunDcosJobValidatorSpec extends BaseSpecification { @@ -39,7 +39,7 @@ class RunDcosJobValidatorSpec extends BaseSpecification { void "validate should give errors when given an empty RunDcosJobDescription"() { setup: def description = new RunDcosJobDescription(credentials: null, dcosCluster: null, general: null) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/loadbalancer/DeleteDcosLoadBalancerAtomicOperationDescriptionValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/loadbalancer/DeleteDcosLoadBalancerAtomicOperationDescriptionValidatorSpec.groovy index 974076d7dae..5aacc064aae 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/loadbalancer/DeleteDcosLoadBalancerAtomicOperationDescriptionValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/loadbalancer/DeleteDcosLoadBalancerAtomicOperationDescriptionValidatorSpec.groovy @@ -1,12 +1,12 @@ /* * Copyright 2018 Cerner Corporation - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,8 +19,8 @@ package com.netflix.spinnaker.clouddriver.dcos.deploy.validators.loadbalancer import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.loadbalancer.DeleteDcosLoadBalancerAtomicOperationDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Subject @@ -51,7 +51,7 @@ class DeleteDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -67,7 +67,7 @@ class DeleteDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -87,7 +87,7 @@ class DeleteDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/loadbalancer/UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/loadbalancer/UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec.groovy index c953946fd53..87b1905a76a 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/loadbalancer/UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/loadbalancer/UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec.groovy @@ -19,8 +19,8 @@ package com.netflix.spinnaker.clouddriver.dcos.deploy.validators.loadbalancer import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.loadbalancer.UpsertDcosLoadBalancerAtomicOperationDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Subject @@ -59,7 +59,7 @@ class UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -83,7 +83,7 @@ class UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -106,7 +106,7 @@ class UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -131,7 +131,7 @@ class UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -156,7 +156,7 @@ class UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -183,7 +183,7 @@ class UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -207,7 +207,7 @@ class UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) @@ -233,7 +233,7 @@ class UpsertDcosLoadBalancerAtomicOperationDescriptionValidatorSpec extends Base it } - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DeployDcosServerGroupDescriptionValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DeployDcosServerGroupDescriptionValidatorSpec.groovy index f2583bc8f70..3a43567ab4c 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DeployDcosServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DeployDcosServerGroupDescriptionValidatorSpec.groovy @@ -18,8 +18,8 @@ package com.netflix.spinnaker.clouddriver.dcos.deploy.validators.servergroup import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.servergroup.DeployDcosServerGroupDescription +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Subject class DeployDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { @@ -38,7 +38,7 @@ class DeployDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { setup: def description = new DeployDcosServerGroupDescription(account: null, dcosCluster: null, credentials: null, application: null, desiredCapacity: -1, cpus: -1, mem: -1, disk: -1, gpus: -1) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: @@ -60,7 +60,7 @@ class DeployDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { setup: def description = new DeployDcosServerGroupDescription(region: '-iNv.aLiD-', credentials: defaultCredentialsBuilder().account(BAD_ACCOUNT).build(), application: '-iNv.aLiD-', dcosCluster: "", desiredCapacity: 1, cpus: 1, mem: 512, disk: 0, gpus: 0) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: @@ -82,7 +82,7 @@ class DeployDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { setup: def description = new DeployDcosServerGroupDescription(region: DEFAULT_REGION, dcosCluster: DEFAULT_REGION, credentials: testCredentials, application: "test", desiredCapacity: 1, cpus: 1, mem: 512, disk: 0, gpus: 0) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DestroyDcosServerGroupDescriptionValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DestroyDcosServerGroupDescriptionValidatorSpec.groovy index 73020ca03a1..55daf0b8a3f 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DestroyDcosServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DestroyDcosServerGroupDescriptionValidatorSpec.groovy @@ -20,8 +20,8 @@ import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.servergroup.DestroyDcosServerGroupDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Subject class DestroyDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { @@ -40,7 +40,7 @@ class DestroyDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give errors when given an empty DestroyDcosServerGroupDescription"() { setup: def description = new DestroyDcosServerGroupDescription(region: null, dcosCluster: null, credentials: null, serverGroupName: null) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: @@ -56,7 +56,7 @@ class DestroyDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give errors when given an invalid DestroyDcosServerGroupDescription"() { setup: def description = new DestroyDcosServerGroupDescription(region: INVALID_MARATHON_PART, dcosCluster: "", credentials: defaultCredentialsBuilder().account(BAD_ACCOUNT).build(), serverGroupName: INVALID_MARATHON_PART) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: @@ -72,7 +72,7 @@ class DestroyDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give no errors when given an valid DestroyDcosServerGroupDescription"() { setup: def description = new DestroyDcosServerGroupDescription(region: DEFAULT_REGION, dcosCluster: DEFAULT_REGION, credentials: testCredentials, serverGroupName: 'test') - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DisableDcosServerGroupDescriptionValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DisableDcosServerGroupDescriptionValidatorSpec.groovy index 2645b356a72..3050905ad91 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DisableDcosServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/DisableDcosServerGroupDescriptionValidatorSpec.groovy @@ -20,8 +20,8 @@ import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.servergroup.DisableDcosServerGroupDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Subject class DisableDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { @@ -40,7 +40,7 @@ class DisableDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give errors when given an empty DestroyDcosServerGroupDescription"() { setup: def description = new DisableDcosServerGroupDescription(region: null, dcosCluster: null, credentials: null, serverGroupName: null) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: @@ -56,7 +56,7 @@ class DisableDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give errors when given an invalid DestroyDcosServerGroupDescription"() { setup: def description = new DisableDcosServerGroupDescription(region: INVALID_MARATHON_PART, dcosCluster: "", credentials: defaultCredentialsBuilder().account(BAD_ACCOUNT).build(), serverGroupName: INVALID_MARATHON_PART) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: @@ -72,7 +72,7 @@ class DisableDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give no errors when given an valid DestroyDcosServerGroupDescription"() { setup: def description = new DisableDcosServerGroupDescription(region: DEFAULT_REGION, dcosCluster: DEFAULT_REGION, credentials: testCredentials, serverGroupName: 'test') - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: diff --git a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/ResizeDcosServerGroupDescriptionValidatorSpec.groovy b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/ResizeDcosServerGroupDescriptionValidatorSpec.groovy index c4b3521aaa9..f89952461e1 100644 --- a/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/ResizeDcosServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-dcos/src/test/groovy/com/netflix/spinnaker/clouddriver/dcos/deploy/validators/servergroup/ResizeDcosServerGroupDescriptionValidatorSpec.groovy @@ -20,8 +20,8 @@ import com.netflix.spinnaker.clouddriver.dcos.security.DcosAccountCredentials import com.netflix.spinnaker.clouddriver.dcos.deploy.BaseSpecification import com.netflix.spinnaker.clouddriver.dcos.deploy.description.servergroup.ResizeDcosServerGroupDescription import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors import spock.lang.Subject class ResizeDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { @@ -40,7 +40,7 @@ class ResizeDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give errors when given an empty ResizeDcosServerGroupDescription"() { setup: def description = new ResizeDcosServerGroupDescription(region: null, dcosCluster: null, credentials: null, serverGroupName: null, targetSize: null) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: @@ -57,7 +57,7 @@ class ResizeDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give errors when given an invalid DestroyDcosServerGroupDescription"() { setup: def description = new ResizeDcosServerGroupDescription(region: INVALID_MARATHON_PART, dcosCluster: "", credentials: defaultCredentialsBuilder().account(BAD_ACCOUNT).build(), serverGroupName: INVALID_MARATHON_PART, targetSize: -1) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: @@ -74,7 +74,7 @@ class ResizeDcosServerGroupDescriptionValidatorSpec extends BaseSpecification { void "validate should give no errors when given an valid DestroyDcosServerGroupDescription"() { setup: def description = new ResizeDcosServerGroupDescription(region: DEFAULT_REGION, dcosCluster: DEFAULT_REGION, credentials: testCredentials, serverGroupName: 'test', targetSize: 0) - def errorsMock = Mock(Errors) + def errorsMock = Mock(ValidationErrors) when: validator.validate([], description, errorsMock) then: diff --git a/clouddriver-docker/clouddriver-docker.gradle b/clouddriver-docker/clouddriver-docker.gradle index 4941f9e44ff..e6fcfe364a2 100644 --- a/clouddriver-docker/clouddriver-docker.gradle +++ b/clouddriver-docker/clouddriver-docker.gradle @@ -1,5 +1,39 @@ dependencies { - compile project(":clouddriver-core") - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + implementation project(":cats:cats-core") + + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.springframework.cloud:spring-cloud-context" + implementation "org.apache.groovy:groovy" + implementation "com.google.guava:guava" + implementation "com.jakewharton.retrofit:retrofit1-okhttp3-client" + implementation "com.netflix.spectator:spectator-api" + implementation "com.squareup.retrofit:converter-jackson" + implementation "com.squareup.retrofit:retrofit" + implementation "org.apache.commons:commons-compress:1.21" + implementation "commons-io:commons-io" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-retrofit" + implementation "io.spinnaker.kork:kork-exceptions" + + testImplementation "com.squareup.retrofit2:retrofit-mock" + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.junit.jupiter:junit-jupiter-params" + testImplementation "org.mockito:mockito-core" + testImplementation 'org.mockito:mockito-inline' + testImplementation "org.mockito:mockito-junit-jupiter" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "org.springframework.boot:spring-boot-starter-test" + testImplementation "org.springframework.security:spring-security-test" + testImplementation "io.spinnaker.kork:kork-core" } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/auth/DockerBearerTokenService.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/auth/DockerBearerTokenService.groovy index 042c3ff889d..a2d685bd927 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/auth/DockerBearerTokenService.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/auth/DockerBearerTokenService.groovy @@ -18,14 +18,19 @@ package com.netflix.spinnaker.clouddriver.docker.registry.api.v2.auth import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.DockerUserAgent import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.exception.DockerRegistryAuthenticationException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import groovy.util.logging.Slf4j import org.apache.commons.io.IOUtils import retrofit.RestAdapter +import retrofit.converter.JacksonConverter import retrofit.http.GET import retrofit.http.Headers import retrofit.http.Path import retrofit.http.Query +import java.nio.charset.Charset +import java.nio.charset.StandardCharsets + @Slf4j class DockerBearerTokenService { Map realmToService @@ -71,10 +76,10 @@ class DockerBearerTokenService { def errCode = process.waitFor() log.debug("Full command is: ${pb.command()}") if (errCode != 0) { - def err = IOUtils.toString(process.getErrorStream()) + def err = IOUtils.toString(process.getErrorStream(), StandardCharsets.UTF_8) log.error("Password command returned a non 0 return code, stderr/stdout was: '${err}'") } - resolvedPassword = IOUtils.toString(process.getInputStream()).trim() + resolvedPassword = IOUtils.toString(process.getInputStream(), StandardCharsets.UTF_8).trim() log.debug("resolvedPassword is ${resolvedPassword}") } else if (passwordFile) { resolvedPassword = new BufferedReader(new FileReader(passwordFile)).getText() @@ -92,7 +97,7 @@ class DockerBearerTokenService { } } - def basicAuth = new String(Base64.encoder.encode(("${username}:${resolvedPassword}").bytes)) + return new String(Base64.encoder.encode(("${username}:${resolvedPassword}").bytes)) } String getBasicAuthHeader() { @@ -174,7 +179,8 @@ class DockerBearerTokenService { throw new DockerRegistryAuthenticationException("Www-Authenticate header must provide 'realm' parameter.") } if (!result.service) { - throw new DockerRegistryAuthenticationException("Www-Authenticate header must provide 'service' parameter.") + // This e.g. is the case for OpenShift Container Registry + result.service = null } return result @@ -184,7 +190,12 @@ class DockerBearerTokenService { def tokenService = realmToService.get(realm) if (tokenService == null) { - def builder = new RestAdapter.Builder().setEndpoint(realm).setLogLevel(RestAdapter.LogLevel.NONE).build() + def builder = new RestAdapter.Builder() + .setEndpoint(realm) + .setConverter(new JacksonConverter()) + .setLogLevel(RestAdapter.LogLevel.NONE) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) + .build() tokenService = builder.create(TokenService.class) realmToService[realm] = tokenService } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerRegistryClient.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerRegistryClient.groovy index 06c7da90f39..d8200dfcfab 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerRegistryClient.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerRegistryClient.groovy @@ -23,26 +23,23 @@ import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.auth.DockerBeare import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.auth.DockerBearerTokenService import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.exception.DockerRegistryAuthenticationException import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.exception.DockerRegistryOperationException -import com.netflix.spinnaker.clouddriver.docker.registry.security.TrustAllX509TrustManager -import com.squareup.okhttp.OkHttpClient +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import groovy.util.logging.Slf4j import org.slf4j.Logger import org.slf4j.LoggerFactory import retrofit.RestAdapter -import retrofit.RetrofitError -import retrofit.client.OkClient import retrofit.client.Response import retrofit.converter.GsonConverter +import retrofit.converter.JacksonConverter import retrofit.http.GET import retrofit.http.Header import retrofit.http.Headers import retrofit.http.Path import retrofit.http.Query -import javax.net.ssl.SSLContext -import javax.net.ssl.TrustManager import java.time.Instant -import java.util.concurrent.TimeUnit @Slf4j class DockerRegistryClient { @@ -58,7 +55,9 @@ class DockerRegistryClient { long clientTimeoutMillis int paginateSize String catalogFile + String repositoriesRegex boolean insecureRegistry + DockerOkClientProvider okClientProvider Builder address(String address) { this.address = address @@ -110,22 +109,33 @@ class DockerRegistryClient { return this } + Builder repositoriesRegex(String regex) { + this.repositoriesRegex = regex + return this + } + + Builder insecureRegistry(boolean insecureRegistry) { this.insecureRegistry = insecureRegistry return this } + Builder okClientProvider(DockerOkClientProvider okClientProvider) { + this.okClientProvider = okClientProvider + return this + } + DockerRegistryClient build() { if (password && passwordFile || password && passwordCommand || passwordFile && passwordCommand) { throw new IllegalArgumentException('Error, at most one of "password", "passwordFile", "passwordCommand" or "dockerconfigFile" can be specified') } if (password || passwordCommand) { - return new DockerRegistryClient(address, email, username, password, passwordCommand, clientTimeoutMillis, paginateSize, catalogFile, insecureRegistry) + return new DockerRegistryClient(address, email, username, password, passwordCommand, clientTimeoutMillis, paginateSize, catalogFile, repositoriesRegex, insecureRegistry, okClientProvider) } else if (passwordFile) { - return new DockerRegistryClient(address, email, username, passwordFile, clientTimeoutMillis, paginateSize, catalogFile, insecureRegistry) + return new DockerRegistryClient(address, email, username, passwordFile, clientTimeoutMillis, paginateSize, catalogFile, repositoriesRegex, insecureRegistry, okClientProvider) } else { - return new DockerRegistryClient(address, clientTimeoutMillis, paginateSize, catalogFile, insecureRegistry) + return new DockerRegistryClient(address, clientTimeoutMillis, paginateSize, catalogFile, repositoriesRegex, insecureRegistry, okClientProvider) } } @@ -140,6 +150,7 @@ class DockerRegistryClient { DockerRegistryService registryService GsonConverter converter String catalogFile + String repositoriesRegex final static String userAgent = DockerUserAgent.getUserAgent() final int paginateSize @@ -148,38 +159,73 @@ class DockerRegistryClient { return tokenService?.basicAuth } - DockerRegistryClient(String address, long clientTimeoutMillis, int paginateSize, String catalogFile, boolean insecureRegistry) { + DockerRegistryClient(String address, + long clientTimeoutMillis, + int paginateSize, + String catalogFile, + String repositoriesRegex, + boolean insecureRegistry, + DockerOkClientProvider okClientProvider) { + this.paginateSize = paginateSize this.tokenService = new DockerBearerTokenService() - OkHttpClient client = new OkHttpClient() - client.setReadTimeout(clientTimeoutMillis, TimeUnit.MILLISECONDS) - - if (insecureRegistry) { - SSLContext sslContext = SSLContext.getInstance("SSL") - TrustManager[] trustManagers = [new TrustAllX509TrustManager()] - sslContext.init(null, trustManagers, new java.security.SecureRandom()) - client.setSslSocketFactory(sslContext.getSocketFactory()) - } this.registryService = new RestAdapter.Builder() .setEndpoint(address) - .setClient(new OkClient(client)) + .setClient(okClientProvider.provide(address, clientTimeoutMillis, insecureRegistry)) + .setConverter(new JacksonConverter()) .setLogLevel(RestAdapter.LogLevel.NONE) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .build() .create(DockerRegistryService) this.converter = new GsonConverter(new GsonBuilder().create()) this.address = address this.catalogFile = catalogFile + this.repositoriesRegex = repositoriesRegex } - DockerRegistryClient(String address, String email, String username, String password, String passwordCommand, long clientTimeoutMillis, int paginateSize, String catalogFile, boolean insecureRegistry) { - this(address, clientTimeoutMillis, paginateSize, catalogFile, insecureRegistry) + DockerRegistryClient(String address, + String email, + String username, + String password, + String passwordCommand, + long clientTimeoutMillis, + int paginateSize, + String catalogFile, + String repositoriesRegex, + boolean insecureRegistry, + DockerOkClientProvider okClientProvider) { + this(address, clientTimeoutMillis, paginateSize, catalogFile, repositoriesRegex, insecureRegistry, okClientProvider) this.tokenService = new DockerBearerTokenService(username, password, passwordCommand) this.email = email } - DockerRegistryClient(String address, String email, String username, File passwordFile, long clientTimeoutMillis, int paginateSize, String catalogFile, boolean insecureRegistry) { - this(address, clientTimeoutMillis, paginateSize, catalogFile, insecureRegistry) + DockerRegistryClient(String address, + int paginateSize, + String catalogFile, + String repositoriesRegex, + DockerRegistryService dockerRegistryService, + DockerBearerTokenService dockerBearerTokenService) { + this.paginateSize = paginateSize + this.converter = new GsonConverter(new GsonBuilder().create()) + this.address = address + this.catalogFile = catalogFile + this.repositoriesRegex = repositoriesRegex + this.tokenService = dockerBearerTokenService + this.registryService = dockerRegistryService; + } + + DockerRegistryClient(String address, + String email, + String username, + File passwordFile, + long clientTimeoutMillis, + int paginateSize, + String catalogFile, + String repositoriesRegex, + boolean insecureRegistry, + DockerOkClientProvider okClientProvider) { + this(address, clientTimeoutMillis, paginateSize, catalogFile, repositoriesRegex, insecureRegistry, okClientProvider) this.tokenService = new DockerBearerTokenService(username, passwordFile) this.email = email } @@ -197,6 +243,13 @@ class DockerRegistryClient { ]) Response getManifest(@Path(value="name", encode=false) String name, @Path(value="reference", encode=false) String reference, @Header("Authorization") String token, @Header("User-Agent") String agent) + @GET("/v2/{name}/manifests/{reference}") + @Headers([ + "Docker-Distribution-API-Version: registry/2.0", + "Accept: application/vnd.docker.distribution.manifest.v2+json" + ]) + Response getSchemaV2Manifest(@Path(value="name", encode=false) String name, @Path(value="reference", encode=false) String reference, @Header("Authorization") String token, @Header("User-Agent") String agent) + @GET("/v2/_catalog") @Headers([ "Docker-Distribution-API-Version: registry/2.0" @@ -215,6 +268,12 @@ class DockerRegistryClient { "Docker-Distribution-API-Version: registry/2.0" ]) Response checkVersion(@Header("Authorization") String token, @Header("User-Agent") String agent) + + @GET("/v2/{repository}/blobs/{digest}") + @Headers([ + "Docker-Distribution-API-Version: registry/2.0" + ]) + Response getDigestContent(@Path(value="repository", encode=false) String repository, @Path(value="digest", encode=false) String digest, @Header("Authorization") String token, @Header("User-Agent") String agent) } public String getDigest(String name, String tag) { @@ -226,6 +285,21 @@ class DockerRegistryClient { return digest?.value } + public String getConfigDigest(String name, String tag) { + def response = getSchemaV2Manifest(name, tag) + def manifestMap = converter.fromBody(response.body, Map) as Map + return manifestMap?.config?.digest + } + + public Map getDigestContent(String name, String digest) { + def response = request({ + registryService.getDigestContent(name, digest, tokenService.basicAuthHeader, userAgent) + }, { token -> + registryService.getDigestContent(name, digest, token, userAgent) + }, name) + return converter.fromBody(response.body, Map) + } + private Map tagDateCache = [:] public Instant getCreationDate(String name, String tag) { @@ -247,6 +321,14 @@ class DockerRegistryClient { }, name) } + private getSchemaV2Manifest(String name, String tag) { + request({ + registryService.getSchemaV2Manifest(name, tag, tokenService.basicAuthHeader, userAgent) + }, { token -> + registryService.getSchemaV2Manifest(name, tag, token, userAgent) + }, name) + } + private static String parseLink(retrofit.client.Header header) { if (!header.name.equalsIgnoreCase("link")) { return null @@ -308,7 +390,7 @@ class DockerRegistryClient { String userDefinedCatalog = new File(catalogFile).getText() return (DockerRegistryCatalog) new Gson().fromJson(userDefinedCatalog, DockerRegistryCatalog.class) } catch (Exception e) { - throw new DockerRegistryOperationException("Unable to read catalog file $catalogFile:" + e.getMessage()) + throw new DockerRegistryOperationException("Unable to read catalog file $catalogFile: " + e.getMessage(), e) } } @@ -322,13 +404,16 @@ class DockerRegistryClient { registryService.getCatalog(paginateSize, token, userAgent) }, "_catalog") } catch (Exception e) { - log.warn("Error encountered during catalog of $path" + e.getMessage()) + log.warn("Error encountered during catalog of $path", e) return new DockerRegistryCatalog(repositories: []) } def nextPath = findNextLink(response?.headers) def catalog = (DockerRegistryCatalog) converter.fromBody(response.body, DockerRegistryCatalog) + if(repositoriesRegex) { + catalog.repositories = catalog.repositories.findAll { it ==~ repositoriesRegex } + } if (nextPath) { def nextCatalog = getCatalog(nextPath) catalog.repositories.addAll(nextCatalog.repositories) @@ -364,10 +449,10 @@ class DockerRegistryClient { public void checkV2Availability() { try { doCheckV2Availability() - } catch (RetrofitError error) { + } catch (SpinnakerServerException error) { // If no credentials are supplied, and we got a 401, the best[1] we can do is assume the registry is OK. // [1] https://docs.docker.com/registry/spec/api/#/api-version-check - if (!tokenService.basicAuthHeader && error.response?.status == 401) { + if (!tokenService.basicAuthHeader && error instanceof SpinnakerHttpException && ((SpinnakerHttpException)error).getResponseCode() == 401) { return } Response response = doCheckV2Availability(tokenService.basicAuthHeader) @@ -407,20 +492,20 @@ class DockerRegistryClient { } else { response = withoutToken() } - } catch (RetrofitError error) { - def status = error.response?.status + } catch (SpinnakerHttpException error) { + def status = error.getResponseCode() // note, this is a workaround for registries that should be returning // 401 when a token expires if ([400, 401].contains(status)) { - String authenticateHeader = null + List authenticateHeader = null - error.response.headers.forEach { header -> - if (header.name.equalsIgnoreCase("www-authenticate")) { + error.headers.entrySet().forEach { header -> + if (header.key.equalsIgnoreCase("www-authenticate")) { authenticateHeader = header.value } } - if (!authenticateHeader) { + if (!authenticateHeader || authenticateHeader.isEmpty()) { log.warn "Registry $address returned status $status for request '$target' without a WWW-Authenticate header" tokenService.clearToken(target) throw error @@ -428,19 +513,21 @@ class DockerRegistryClient { String bearerPrefix = "bearer " String basicPrefix = "basic " - if (bearerPrefix.equalsIgnoreCase(authenticateHeader.substring(0, bearerPrefix.length()))) { - // If we got a 401 and the request requires bearer auth, get a new token and try again - dockerToken = tokenService.getToken(target, authenticateHeader.substring(bearerPrefix.length())) - token = "Bearer ${(dockerToken.bearer_token ?: dockerToken.token) ?: dockerToken.access_token}" - response = withToken(token) - } else if (basicPrefix.equalsIgnoreCase(authenticateHeader.substring(0, basicPrefix.length()))) { - // If we got a 401 and the request requires basic auth, there's no point in trying again - tokenService.clearToken(target) - throw error - } else { - tokenService.clearToken(target) - throw new DockerRegistryAuthenticationException("Docker registry must support 'Bearer' or 'Basic' authentication.") + for (String headerValue in authenticateHeader) { + if (bearerPrefix.equalsIgnoreCase(headerValue.substring(0, bearerPrefix.length()))) { + // If we got a 401 and the request requires bearer auth, get a new token and try again + dockerToken = tokenService.getToken(target, headerValue.substring(bearerPrefix.length())) + token = "Bearer ${(dockerToken.bearer_token ?: dockerToken.token) ?: dockerToken.access_token}" + return withToken(token) + } else if (basicPrefix.equalsIgnoreCase(headerValue.substring(0, basicPrefix.length()))) { + // If we got a 401 and the request requires basic auth, there's no point in trying again + tokenService.clearToken(target) + throw error + } } + + tokenService.clearToken(target) + throw new DockerRegistryAuthenticationException("Docker registry must support 'Bearer' or 'Basic' authentication.") } else { throw error } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/cache/DefaultCacheDataBuilder.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/cache/DefaultCacheDataBuilder.groovy index 651eb65a444..6e584301a27 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/cache/DefaultCacheDataBuilder.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/cache/DefaultCacheDataBuilder.groovy @@ -18,6 +18,9 @@ package com.netflix.spinnaker.clouddriver.docker.registry.cache import com.netflix.spinnaker.cats.cache.DefaultCacheData +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.ConcurrentMap + class DefaultCacheDataBuilder { String id = '' int ttlSeconds = -1 @@ -28,7 +31,7 @@ class DefaultCacheDataBuilder { new DefaultCacheData(id, ttlSeconds, attributes, relationships) } - public static Map defaultCacheDataBuilderMap() { - return [:].withDefault { String id -> new DefaultCacheDataBuilder(id: id) } + public static ConcurrentMap defaultCacheDataBuilderMap() { + return new ConcurrentHashMap() } } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/config/DockerRegistryConfigurationProperties.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/config/DockerRegistryConfigurationProperties.groovy index a3fb748b5ec..0dadf2f862c 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/config/DockerRegistryConfigurationProperties.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/config/DockerRegistryConfigurationProperties.groovy @@ -16,12 +16,19 @@ package com.netflix.spinnaker.clouddriver.docker.registry.config +import com.fasterxml.jackson.annotation.JsonTypeName +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition +import com.netflix.spinnaker.fiat.model.resources.Permissions +import groovy.transform.EqualsAndHashCode import groovy.transform.ToString @ToString(includeNames = true) class DockerRegistryConfigurationProperties { + @ToString(includeNames = true) - static class ManagedAccount { + @JsonTypeName("dockerRegistry") + @EqualsAndHashCode + static class ManagedAccount implements CredentialsDefinition { String name String environment String accountType @@ -49,14 +56,21 @@ class DockerRegistryConfigurationProperties { int paginateSize // Track digest changes. This is _not_ recommended as it consumes a high QPM, and most registries are flaky. boolean trackDigests + // inspect digests + boolean inspectDigests // Sort tags by creation date. boolean sortTagsByDate + boolean insecureRegistry // List of all repositories to index. Can be of the form /, // or for repositories like 'ubuntu'. List repositories List skip // a file listing all repositories to index String catalogFile + // Allow filter the repositories by a regular expression + String repositoriesRegex + // Permissions for using this account + Permissions.Builder permissions = new Permissions.Builder() } List accounts = [] diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupController.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupController.groovy index 7fa4643eec7..25dcc1021e1 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupController.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupController.groovy @@ -24,27 +24,48 @@ import com.netflix.spinnaker.clouddriver.docker.registry.provider.DockerRegistry import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import org.springframework.beans.factory.annotation.Autowired +import org.springframework.security.access.prepost.PostFilter +import org.springframework.security.access.prepost.PreAuthorize import org.springframework.web.bind.annotation.RequestMapping import org.springframework.web.bind.annotation.RequestMethod import org.springframework.web.bind.annotation.RequestParam import org.springframework.web.bind.annotation.RestController @RestController -@RequestMapping("/dockerRegistry/images") +@RequestMapping(["/dockerRegistry/images", "/titus/images"]) class DockerRegistryImageLookupController { @Autowired - private final Cache cacheView + private Cache cacheView @Autowired AccountCredentialsProvider accountCredentialsProvider @RequestMapping(value = "/tags", method = RequestMethod.GET) + @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") List getTags(@RequestParam('account') String account, @RequestParam('repository') String repository) { def credentials = (DockerRegistryNamedAccountCredentials) accountCredentialsProvider.getCredentials(account) - credentials?.getTags(repository) + if (!credentials) { + return [] + } + + return DockerRegistryProviderUtils.getAllMatchingKeyPattern( + cacheView, + Keys.Namespace.TAGGED_IMAGE.ns, + Keys.getTaggedImageKey(account, repository, "*") + ).sort { a, b -> + if (credentials.sortTagsByDate) { + b.attributes.date.epochSecond <=> a.attributes.date.epochSecond + } else { + a.id <=> b.id + } + }.collect { + def parse = Keys.parse(it.id) + return (String) parse.tag + } } @RequestMapping(value = '/find', method = RequestMethod.GET) + @PostFilter("hasPermission(filterObject['account'], 'ACCOUNT', 'READ')") List find(LookupOptions lookupOptions) { def account = lookupOptions.account ?: "" @@ -88,27 +109,56 @@ class DockerRegistryImageLookupController { if (!credentials) { return null } else { - def parse = Keys.parse(it.id) + def parse = Keys.parse(it.getId()) + def repo = (String) parse.repository + def tag = (String) parse.tag + + // if the request asks for specific repositories or tags, + // do the filtering accordingly + if (lookupOptions.repository && !lookupOptions.repository.equals(repo) || + lookupOptions.repository && lookupOptions.tag && !lookupOptions.tag.equals(tag)) { + return null + } + + if (lookupOptions.includeDetails) { + return [ + repository : repo, + tag : tag, + account : it.attributes.account, + registry : credentials.getRegistry(), + digest : it.attributes.digest, + commitId : it.attributes.labels?.commitId, + buildNumber: it.attributes.labels?.buildNumber, + branch : it.attributes.labels?.branch, + artifact : generateArtifact(credentials.getRegistry(), parse.repository, parse.tag, it.attributes.labels) + ] + } + return [ - repository: (String) parse.repository, //TODO: Deprecate - tag : (String) parse.tag, //TODO: Deprecate - account : it.attributes.account, //TODO: Deprecate - registry : credentials.registry, //TODO: Deprecate - digest : it.attributes.digest, //TODO: Deprecate - artifact : generateArtifact(credentials.registry, parse.repository, parse.tag) + repository: repo, + tag : tag, + account : it.attributes.account, + registry : credentials.getRegistry(), + digest : it.attributes.digest, + artifact : generateArtifact(credentials.getRegistry(), parse.repository, parse.tag) ] + } } } Map generateArtifact( String registry,def repository, def tag) { + generateArtifact( registry, repository, tag, new HashMap()); + } + + Map generateArtifact( String registry,def repository, def tag, def labels) { String reference = "${registry}/${repository}:${tag}" [ name : repository, type : "docker", version : tag, reference : reference, - metadata : [ registry: registry ] + metadata : [ registry: registry, labels: labels ] ] } @@ -139,14 +189,21 @@ class DockerRegistryImageLookupController { private boolean isTrackDigestsDisabled() { return accountCredentialsProvider.all - .findAll { it.cloudProvider == DockerRegistryCloudProvider.DOCKER_REGISTRY } - .every { !((DockerRegistryNamedAccountCredentials) it).trackDigests } + .findAll { + it.getCloudProvider() == DockerRegistryCloudProvider.DOCKER_REGISTRY + } + .every { + !((DockerRegistryNamedAccountCredentials) it).getTrackDigests() + } } private static class LookupOptions { String q String account String region + String repository + String tag Integer count + Boolean includeDetails } } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicator.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicator.groovy deleted file mode 100644 index e8fb680590f..00000000000 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicator.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.docker.registry.health - -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.core.AlwaysUpHealthIndicator -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryCredentials -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import groovy.transform.InheritConstructors -import org.springframework.http.HttpStatus -import org.springframework.scheduling.annotation.Scheduled -import org.springframework.web.bind.annotation.ResponseStatus - -class DockerRegistryHealthIndicator extends AlwaysUpHealthIndicator { - - AccountCredentialsProvider accountCredentialsProvider - - DockerRegistryHealthIndicator(Registry registry, AccountCredentialsProvider accountCredentialsProvider) { - super(registry, "docker") - this.accountCredentialsProvider = accountCredentialsProvider - } - - @Scheduled(fixedDelay = 300000L) - void checkHealth() { - updateHealth { - Set dockerRegistryCredentialsSet = accountCredentialsProvider.all.findAll { - it instanceof DockerRegistryNamedAccountCredentials - } as Set - - for (DockerRegistryNamedAccountCredentials accountCredentials in dockerRegistryCredentialsSet) { - DockerRegistryCredentials dockerRegistryCredentials = accountCredentials.credentials - - dockerRegistryCredentials.client.checkV2Availability() - } - } - } - - @ResponseStatus(value = HttpStatus.SERVICE_UNAVAILABLE, reason = "Problem communicating with DockerRegistry.") - @InheritConstructors - static class DockerRegistryIOException extends RuntimeException {} -} - diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/DockerRegistryProvider.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/DockerRegistryProvider.groovy index b963bf5e1a3..883cf266dd2 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/DockerRegistryProvider.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/DockerRegistryProvider.groovy @@ -16,27 +16,23 @@ package com.netflix.spinnaker.clouddriver.docker.registry.provider -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware + import com.netflix.spinnaker.clouddriver.cache.SearchableProvider import com.netflix.spinnaker.clouddriver.docker.registry.DockerRegistryCloudProvider import com.netflix.spinnaker.clouddriver.docker.registry.cache.Keys +import com.netflix.spinnaker.clouddriver.security.BaseProvider -import static com.netflix.spinnaker.clouddriver.cache.SearchableProvider.SearchableResource - -class DockerRegistryProvider extends AgentSchedulerAware implements SearchableProvider { - public static final String PROVIDER_NAME = DockerRegistryProvider.name +class DockerRegistryProvider extends BaseProvider implements SearchableProvider { + public static final String PROVIDER_NAME = DockerRegistryCloudProvider.DOCKER_REGISTRY final Set defaultCaches = Collections.emptySet() final Map urlMappingTemplates = Collections.emptyMap() - final Collection agents final DockerRegistryCloudProvider cloudProvider - DockerRegistryProvider(DockerRegistryCloudProvider cloudProvider, Collection agents) { + DockerRegistryProvider(DockerRegistryCloudProvider cloudProvider) { this.cloudProvider = cloudProvider - this.agents = agents } @Override diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/agent/DockerRegistryImageCachingAgent.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/agent/DockerRegistryImageCachingAgent.groovy index d81b1f5a4de..e40d9ce8c98 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/agent/DockerRegistryImageCachingAgent.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/agent/DockerRegistryImageCachingAgent.groovy @@ -25,9 +25,10 @@ import com.netflix.spinnaker.clouddriver.docker.registry.cache.Keys import com.netflix.spinnaker.clouddriver.docker.registry.provider.DockerRegistryProvider import com.netflix.spinnaker.clouddriver.docker.registry.provider.DockerRegistryProviderUtils import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryCredentials +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException import groovy.util.logging.Slf4j -import retrofit.RetrofitError +import java.util.concurrent.ConcurrentMap import java.util.concurrent.TimeUnit import static java.util.Collections.unmodifiableSet @@ -35,8 +36,8 @@ import static java.util.Collections.unmodifiableSet @Slf4j class DockerRegistryImageCachingAgent implements CachingAgent, AccountAware, AgentIntervalAware { static final Set types = unmodifiableSet([ - AgentDataType.Authority.INFORMATIVE.forType(Keys.Namespace.TAGGED_IMAGE.ns), - AgentDataType.Authority.INFORMATIVE.forType(Keys.Namespace.IMAGE_ID.ns) + AgentDataType.Authority.AUTHORITATIVE.forType(Keys.Namespace.TAGGED_IMAGE.ns), + AgentDataType.Authority.AUTHORITATIVE.forType(Keys.Namespace.IMAGE_ID.ns) ] as Set) private DockerRegistryCredentials credentials @@ -89,14 +90,14 @@ class DockerRegistryImageCachingAgent implements CachingAgent, AccountAware, Age credentials.repositories.findAll { it -> threadCount == 1 || (it.hashCode() % threadCount).abs() == index }.collectEntries { repository -> - if(credentials.skip?.contains(repository)) { - return [:] + if (credentials.skip?.contains(repository)) { + return [:] } DockerRegistryTags tags = null try { tags = credentials.client.getTags(repository) } catch (Exception e) { - if (e instanceof RetrofitError && e.response?.status == 404) { + if (e instanceof SpinnakerHttpException && ((SpinnakerHttpException)e).getResponseCode() == 404) { log.warn("Could not load tags for ${repository} in ${credentials.client.address}, reason: ${e.message}") } else { log.error("Could not load tags for ${repository} in ${credentials.client.address}", e) @@ -128,11 +129,11 @@ class DockerRegistryImageCachingAgent implements CachingAgent, AccountAware, Age private CacheResult buildCacheResult(Map> tagMap) { log.info("Describing items in ${agentType}") - Map cachedTags = DefaultCacheDataBuilder.defaultCacheDataBuilderMap() - Map cachedIds = DefaultCacheDataBuilder.defaultCacheDataBuilderMap() + ConcurrentMap cachedTags = DefaultCacheDataBuilder.defaultCacheDataBuilderMap() + ConcurrentMap cachedIds = DefaultCacheDataBuilder.defaultCacheDataBuilderMap() tagMap.forEach { repository, tags -> - tags.forEach { tag -> + tags.parallelStream().forEach { tag -> if (!tag) { log.warn("Empty tag encountered for $accountName/$repository, not caching") return @@ -140,12 +141,15 @@ class DockerRegistryImageCachingAgent implements CachingAgent, AccountAware, Age def tagKey = Keys.getTaggedImageKey(accountName, repository, tag) def imageIdKey = Keys.getImageIdKey(DockerRegistryProviderUtils.imageId(registry, repository, tag)) def digest = null + def digestContent = null + def creationDate = null if (credentials.trackDigests) { try { digest = credentials.client.getDigest(repository, tag) } catch (Exception e) { - if (e instanceof RetrofitError && ((RetrofitError) e).response?.status == 404) { + if(e instanceof SpinnakerHttpException && ((SpinnakerHttpException)e).getResponseCode() == 404) + { // Indicates inconsistency in registry, or deletion between call for all tags and manifest retrieval. // In either case, we need to trust that this tag no longer exists. log.warn("Image manifest for $tagKey no longer available; tag will not be cached: $e.message") @@ -158,16 +162,39 @@ class DockerRegistryImageCachingAgent implements CachingAgent, AccountAware, Age } } - cachedTags[tagKey].with { - attributes.name = "${repository}:${tag}".toString() - attributes.account = accountName - attributes.digest = digest + if (credentials.inspectDigests) { + try { + digest = credentials.client.getConfigDigest(repository, tag) + digestContent = credentials.client.getDigestContent(repository, digest) + } catch (Exception e) { + log.warn("Error retrieving config digest for $tagKey; digest and tag will not be cached: $e.message") + } + } + + if (credentials.sortTagsByDate) { + try { + creationDate = credentials.client.getCreationDate(repository, tag) + } catch (Exception e) { + log.warn("Unable to fetch tag creation date, reason: {} (tag: {}, repository: {})", e.message, tag, repository) + } } - cachedIds[imageIdKey].with { - attributes.tagKey = tagKey - attributes.account = accountName + def tagData = new DefaultCacheDataBuilder() + tagData.setId(tagKey) + tagData.attributes.put("name", "${repository}:${tag}".toString()) + tagData.attributes.put("account", accountName) + tagData.attributes.put("digest", digest) + tagData.attributes.put("date", creationDate) + if (digestContent?.config != null) { + tagData.attributes.put("labels", digestContent.config.Labels) } + cachedTags.put(tagKey, tagData) + + def idData = new DefaultCacheDataBuilder() + idData.setId(imageIdKey) + idData.attributes.put("tagKey", tagKey) + idData.attributes.put("account", accountName) + cachedIds.put(imageIdKey, idData) } null diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/config/DockerRegistryProviderConfig.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/config/DockerRegistryProviderConfig.groovy index 99826505570..5244048358f 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/config/DockerRegistryProviderConfig.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/config/DockerRegistryProviderConfig.groovy @@ -16,81 +16,15 @@ package com.netflix.spinnaker.clouddriver.docker.registry.provider.config -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper import com.netflix.spinnaker.clouddriver.docker.registry.DockerRegistryCloudProvider import com.netflix.spinnaker.clouddriver.docker.registry.provider.DockerRegistryProvider -import com.netflix.spinnaker.clouddriver.docker.registry.provider.agent.DockerRegistryImageCachingAgent -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope - -import java.util.concurrent.ConcurrentHashMap @Configuration class DockerRegistryProviderConfig { @Bean - @DependsOn('dockerRegistryNamedAccountCredentials') - DockerRegistryProvider dockerRegistryProvider(DockerRegistryCloudProvider dockerRegistryCloudProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - Registry registry) { - def dockerRegistryProvider = new DockerRegistryProvider(dockerRegistryCloudProvider, Collections.newSetFromMap(new ConcurrentHashMap())) - - synchronizeDockerRegistryProvider(dockerRegistryProvider, dockerRegistryCloudProvider, accountCredentialsRepository, objectMapper, registry) - - dockerRegistryProvider - } - - @Bean - DockerRegistryProviderSynchronizerTypeWrapper dockerRegistryProviderSynchronizerTypeWrapper() { - new DockerRegistryProviderSynchronizerTypeWrapper() - } - - class DockerRegistryProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return DockerRegistryProviderSynchronizer - } - } - - class DockerRegistryProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - DockerRegistryProviderSynchronizer synchronizeDockerRegistryProvider(DockerRegistryProvider dockerRegistryProvider, - DockerRegistryCloudProvider dockerRegistryCloudProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - Registry registry) { - def scheduledAccounts = ProviderUtils.getScheduledAccounts(dockerRegistryProvider) - def allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, DockerRegistryNamedAccountCredentials) - - allAccounts.each { DockerRegistryNamedAccountCredentials credentials -> - if (!scheduledAccounts.contains(credentials.accountName)) { - def newlyAddedAgents = [] - - credentials.cacheThreads.times { i -> - newlyAddedAgents << new DockerRegistryImageCachingAgent(dockerRegistryCloudProvider, credentials.accountName, credentials.credentials, i, credentials.cacheThreads, credentials.cacheIntervalSeconds, credentials.registry) - } - - // If there is an agent scheduler, then this provider has been through the AgentController in the past. - // In that case, we need to do the scheduling here (because accounts have been added to a running system). - if (dockerRegistryProvider.agentScheduler) { - ProviderUtils.rescheduleAgents(dockerRegistryProvider, newlyAddedAgents) - } - - dockerRegistryProvider.agents.addAll(newlyAddedAgents) - } - } - - new DockerRegistryProviderSynchronizer() + DockerRegistryProvider dockerRegistryProvider(DockerRegistryCloudProvider dockerRegistryCloudProvider) { + new DockerRegistryProvider(dockerRegistryCloudProvider) } } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentials.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentials.groovy index 94b7bb5ff02..619483639e9 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentials.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentials.groovy @@ -23,12 +23,14 @@ class DockerRegistryCredentials { private List repositories private final boolean reloadRepositories private final boolean trackDigests + private final boolean inspectDigests private final boolean sortTagsByDate private List skip - DockerRegistryCredentials(DockerRegistryClient client, List repositories, boolean trackDigests, List skip, boolean sortTagsByDate) { + DockerRegistryCredentials(DockerRegistryClient client, List repositories, boolean trackDigests, boolean inspectDigests, List skip, boolean sortTagsByDate) { this.client = client this.trackDigests = trackDigests + this.inspectDigests = inspectDigests this.skip = skip if (!repositories) { this.reloadRepositories = true @@ -52,6 +54,14 @@ class DockerRegistryCredentials { return trackDigests } + boolean getInspectDigests() { + return inspectDigests + } + + boolean getSortTagsByDate() { + return sortTagsByDate + } + List getSkip(){ return skip } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsInitializer.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsInitializer.groovy index fdba0164b73..a61bfb02fde 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsInitializer.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsInitializer.groovy @@ -16,87 +16,23 @@ package com.netflix.spinnaker.clouddriver.docker.registry.security -import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.clouddriver.docker.registry.config.DockerRegistryConfigurationProperties -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable -import com.netflix.spinnaker.clouddriver.security.ProviderUtils +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DefaultDockerOkClientProvider +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerOkClientProvider import groovy.util.logging.Slf4j -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.context.ApplicationContext +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope import org.springframework.stereotype.Component @Slf4j @Component @Configuration -class DockerRegistryCredentialsInitializer implements CredentialsInitializerSynchronizable { +class DockerRegistryCredentialsInitializer { @Bean - List dockerRegistryNamedAccountCredentials(DockerRegistryConfigurationProperties dockerRegistryConfigurationProperties, - AccountCredentialsRepository accountCredentialsRepository, - ApplicationContext applicationContext, - List providerSynchronizerTypeWrappers) { - synchronizeDockerRegistryAccounts(dockerRegistryConfigurationProperties, accountCredentialsRepository, null, applicationContext, providerSynchronizerTypeWrappers) + @ConditionalOnMissingBean(DockerOkClientProvider) + DockerOkClientProvider defaultDockerOkClientProvider() { + new DefaultDockerOkClientProvider() } - @Override - String getCredentialsSynchronizationBeanName() { - return "synchronizeDockerRegistryAccounts" - } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - List synchronizeDockerRegistryAccounts(DockerRegistryConfigurationProperties dockerRegistryConfigurationProperties, - AccountCredentialsRepository accountCredentialsRepository, - CatsModule catsModule, - ApplicationContext applicationContext, - List providerSynchronizerTypeWrappers) { - def (ArrayList accountsToAdd, List namesOfDeletedAccounts) = - ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, DockerRegistryNamedAccountCredentials, - dockerRegistryConfigurationProperties.accounts) - - accountsToAdd.each { DockerRegistryConfigurationProperties.ManagedAccount managedAccount -> - try { - def dockerRegistryAccount = (new DockerRegistryNamedAccountCredentials.Builder()) - .accountName(managedAccount.name) - .environment(managedAccount.environment ?: managedAccount.name) - .accountType(managedAccount.accountType ?: managedAccount.name) - .address(managedAccount.address) - .password(managedAccount.password) - .passwordCommand(managedAccount.passwordCommand) - .username(managedAccount.username) - .email(managedAccount.email) - .passwordFile(managedAccount.passwordFile) - .catalogFile(managedAccount.catalogFile) - .dockerconfigFile(managedAccount.dockerconfigFile) - .cacheThreads(managedAccount.cacheThreads) - .cacheIntervalSeconds(managedAccount.cacheIntervalSeconds) - .clientTimeoutMillis(managedAccount.clientTimeoutMillis) - .paginateSize(managedAccount.paginateSize) - .trackDigests(managedAccount.trackDigests) - .sortTagsByDate(managedAccount.sortTagsByDate) - .repositories(managedAccount.repositories) - .skip(managedAccount.skip) - .build() - - accountCredentialsRepository.save(managedAccount.name, dockerRegistryAccount) - } catch (e) { - log.info "Could not load account ${managedAccount.name} for DockerRegistry.", e - } - } - - ProviderUtils.unscheduleAndDeregisterAgents(namesOfDeletedAccounts, catsModule) - - if (accountsToAdd && catsModule) { - ProviderUtils.synchronizeAgentProviders(applicationContext, providerSynchronizerTypeWrappers) - } - - accountCredentialsRepository.all.findAll { - it instanceof DockerRegistryNamedAccountCredentials - } as List - } } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryNamedAccountCredentials.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryNamedAccountCredentials.groovy index 4847c54d313..4bd49c68766 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryNamedAccountCredentials.groovy +++ b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryNamedAccountCredentials.groovy @@ -17,16 +17,21 @@ package com.netflix.spinnaker.clouddriver.docker.registry.security import com.fasterxml.jackson.annotation.JsonIgnore +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerOkClientProvider import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerRegistryClient import com.netflix.spinnaker.clouddriver.docker.registry.exception.DockerRegistryConfigException -import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials +import com.netflix.spinnaker.fiat.model.Authorization +import com.netflix.spinnaker.fiat.model.resources.Permissions +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j -import retrofit.RetrofitError +import java.time.Instant import java.util.concurrent.TimeUnit @Slf4j -class DockerRegistryNamedAccountCredentials implements AccountCredentials { +class DockerRegistryNamedAccountCredentials extends AbstractAccountCredentials { static class Builder { String accountName String environment @@ -43,11 +48,15 @@ class DockerRegistryNamedAccountCredentials implements AccountCredentials repositories List skip String catalogFile + String repositoriesRegex + Permissions permissions + DockerOkClientProvider dockerOkClientProvider Builder() {} @@ -136,6 +145,11 @@ class DockerRegistryNamedAccountCredentials implements AccountCredentials requiredGroupMembership) { + List requiredGroupMembership, + Permissions permissions, + DockerOkClientProvider dockerOkClientProvider) { if (!accountName) { throw new IllegalArgumentException("Docker Registry account must be provided with a name.") } @@ -257,6 +302,10 @@ class DockerRegistryNamedAccountCredentials implements AccountCredentials getTags(String repository) { def tags = credentials.client.getTags(repository).tags if (sortTagsByDate) { - tags = tags.parallelStream().map({ - tag -> try { - [date: credentials.client.getCreationDate(repository, tag), tag: tag] - } catch (Exception e) { - log.warn("Unable to fetch tag creation date, reason: {} (tag: {}, repository: {})", e.message, tag, repository) - return [date: new Date(0), tag: tag] - } - }).toArray().sort { - it.date - }.reverse().tag + tags = KeyBasedSorter.sort(tags, { String t -> getCreationDate(repository, t) }, Comparator.reverseOrder()) } tags } + @CompileStatic + private Instant getCreationDate(String repository, String tag) { + try { + return credentials.client.getCreationDate(repository, tag) + } catch (Exception e) { + log.warn("Unable to fetch tag creation date, reason: {} (tag: {}, repository: {})", e.message, tag, repository) + return Instant.EPOCH; + } + } + String getV2Endpoint() { return "$address/v2" } + boolean getTrackDigests() { + return trackDigests + } + + boolean getInspectDigests() { + return inspectDigests + } + + int getCacheThreads() { + return cacheThreads + } + + long getCacheIntervalSeconds() { + return cacheIntervalSeconds + } + + DockerRegistryCredentials getCredentials() { + return credentials + } + @Override String getCloudProvider() { return CLOUD_PROVIDER } - private DockerRegistryCredentials buildCredentials(List repositories, String catalogFile) { + private DockerRegistryCredentials buildCredentials(List repositories, String catalogFile, File dockerconfigFile) { try { DockerRegistryClient client = (new DockerRegistryClient.Builder()) .address(address) @@ -354,12 +432,15 @@ class DockerRegistryNamedAccountCredentials implements AccountCredentials requiredGroupMembership) { + if (requiredGroupMembership?.empty ?: true) { + return Permissions.EMPTY + } + def builder = new Permissions.Builder() + requiredGroupMembership.forEach { + builder.add(Authorization.READ, it).add(Authorization.WRITE, it) + } + builder.build() + } + private static final String CLOUD_PROVIDER = "dockerRegistry" - final String accountName + private final String accountName final String environment final String accountType final String address @@ -381,6 +473,7 @@ class DockerRegistryNamedAccountCredentials implements AccountCredentials requiredGroupMembership + final Permissions permissions final List skip final String catalogFile + final String repositoriesRegex + final DockerOkClientProvider dockerOkClientProvider } diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/config/DockerRegistryConfiguration.groovy b/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/config/DockerRegistryConfiguration.groovy deleted file mode 100644 index 178fc5caf2e..00000000000 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/config/DockerRegistryConfiguration.groovy +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.config - -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.docker.registry.config.DockerRegistryConfigurationProperties -import com.netflix.spinnaker.clouddriver.docker.registry.health.DockerRegistryHealthIndicator -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryCredentialsInitializer -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.ConfigurationProperties -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.ComponentScan -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Import -import org.springframework.context.annotation.Scope -import org.springframework.scheduling.annotation.EnableScheduling - -@Configuration -@EnableConfigurationProperties -@EnableScheduling -@ConditionalOnProperty('dockerRegistry.enabled') -@ComponentScan(["com.netflix.spinnaker.clouddriver.docker.registry"]) -@Import([ DockerRegistryCredentialsInitializer ]) -class DockerRegistryConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("dockerRegistry") - DockerRegistryConfigurationProperties dockerRegistryConfigurationProperties() { - new DockerRegistryConfigurationProperties() - } - - @Bean - DockerRegistryHealthIndicator dockerRegistryHealthIndicator(Registry registry, AccountCredentialsProvider accountCredentialsProvider) { - new DockerRegistryHealthIndicator(registry, accountCredentialsProvider) - } -} diff --git a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/DockerUserAgent.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/DockerUserAgent.java similarity index 95% rename from clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/DockerUserAgent.java rename to clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/DockerUserAgent.java index cf8739802da..636e771b006 100644 --- a/clouddriver-docker/src/main/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/DockerUserAgent.java +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/DockerUserAgent.java @@ -18,7 +18,7 @@ package com.netflix.spinnaker.clouddriver.docker.registry.api.v2; public class DockerUserAgent { - static public String getUserAgent() { + public static String getUserAgent() { String version; try { version = DockerUserAgent.class.getPackage().getImplementationVersion(); diff --git a/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DefaultDockerOkClientProvider.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DefaultDockerOkClientProvider.java new file mode 100644 index 00000000000..b32e7ad3943 --- /dev/null +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DefaultDockerOkClientProvider.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client; + +import com.jakewharton.retrofit.Ok3Client; +import com.netflix.spinnaker.clouddriver.docker.registry.security.TrustAllX509TrustManager; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.concurrent.TimeUnit; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509TrustManager; +import okhttp3.OkHttpClient; + +public class DefaultDockerOkClientProvider implements DockerOkClientProvider { + + @Override + public Ok3Client provide(String address, long timeoutMs, boolean insecure) { + OkHttpClient.Builder clientBuilder = + new OkHttpClient.Builder().readTimeout(timeoutMs, TimeUnit.MILLISECONDS); + + if (insecure) { + SSLContext sslContext; + TrustManager[] trustManagers = {new TrustAllX509TrustManager()}; + try { + sslContext = SSLContext.getInstance("SSL"); + sslContext.init(null, trustManagers, new SecureRandom()); + } catch (NoSuchAlgorithmException | KeyManagementException e) { + throw new IllegalStateException("Failed configuring insecure SslSocketFactory", e); + } + clientBuilder.sslSocketFactory( + sslContext.getSocketFactory(), (X509TrustManager) trustManagers[0]); + } + + return new Ok3Client(clientBuilder.build()); + } +} diff --git a/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerOkClientProvider.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerOkClientProvider.java new file mode 100644 index 00000000000..d111453009b --- /dev/null +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerOkClientProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client; + +import com.jakewharton.retrofit.Ok3Client; + +/** Allows custom configuration of the Docker Registry OkHttpClient. */ +public interface DockerOkClientProvider { + + /** + * @param address Provided simply in case a client provider needs to conditionally apply rules + * per-registry + * @param timeoutMs The client timeout in milliseconds + * @param insecure Whether or not the registry should be configured to trust all SSL certificates. + * If this is true, you may want to fallback to {@code DefaultDockerOkClientProvider} + * @return An Ok3Client + */ + Ok3Client provide(String address, long timeoutMs, boolean insecure); +} diff --git a/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicator.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicator.java new file mode 100644 index 00000000000..cf539e1e37b --- /dev/null +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicator.java @@ -0,0 +1,53 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.health; + +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.core.AccountHealthIndicator; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Optional; + +public class DockerRegistryHealthIndicator + extends AccountHealthIndicator { + private static final String ID = "docker"; + private final CredentialsRepository credentialsRepository; + + public DockerRegistryHealthIndicator( + Registry registry, + CredentialsRepository credentialsRepository) { + super(ID, registry); + this.credentialsRepository = credentialsRepository; + } + + @Override + protected ImmutableList getAccounts() { + return ImmutableList.copyOf(credentialsRepository.getAll()); + } + + @Override + protected Optional accountHealth( + DockerRegistryNamedAccountCredentials accountCredentials) { + try { + accountCredentials.getCredentials().getClient().checkV2Availability(); + return Optional.empty(); + } catch (RuntimeException e) { + return Optional.of(e.getMessage()); + } + } +} diff --git a/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsLifecycleHandler.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsLifecycleHandler.java new file mode 100644 index 00000000000..bbdce96abb5 --- /dev/null +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsLifecycleHandler.java @@ -0,0 +1,74 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.security; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.clouddriver.docker.registry.DockerRegistryCloudProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.provider.DockerRegistryProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.provider.agent.DockerRegistryImageCachingAgent; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import java.util.ArrayList; +import java.util.List; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +@Component +@RequiredArgsConstructor +@Slf4j +public class DockerRegistryCredentialsLifecycleHandler + implements CredentialsLifecycleHandler { + + private final DockerRegistryProvider provider; + private final DockerRegistryCloudProvider cloudProvider; + + @Override + public void credentialsAdded(DockerRegistryNamedAccountCredentials credentials) { + log.info("Adding agents for docker account {}", credentials.getName()); + provider.addAgents(agentsForCredentials(credentials)); + } + + @Override + public void credentialsUpdated(DockerRegistryNamedAccountCredentials credentials) { + log.info("Updating agents for docker account {}", credentials.getName()); + provider.removeAgentsForAccounts(List.of(credentials.getName())); + provider.addAgents(agentsForCredentials(credentials)); + } + + @Override + public void credentialsDeleted(DockerRegistryNamedAccountCredentials credentials) { + log.info("Removing agents for docker account {}", credentials.getName()); + provider.removeAgentsForAccounts(List.of(credentials.getName())); + } + + private List agentsForCredentials(DockerRegistryNamedAccountCredentials credentials) { + List agents = new ArrayList<>(); + + for (int i = 0; i < credentials.getCacheThreads(); i++) { + agents.add( + new DockerRegistryImageCachingAgent( + cloudProvider, + credentials.getName(), + credentials.getCredentials(), + i, + credentials.getCacheThreads(), + credentials.getCacheIntervalSeconds(), + credentials.getRegistry())); + } + return agents; + } +} diff --git a/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/security/KeyBasedSorter.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/security/KeyBasedSorter.java new file mode 100644 index 00000000000..38cf9b2bdda --- /dev/null +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/clouddriver/docker/registry/security/KeyBasedSorter.java @@ -0,0 +1,69 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.security; + +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; +import lombok.AccessLevel; +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; + +/** + * This class implements a Schwartzian transform to sort the elements of an array on a sort key + * while guaranteeing that the sort key will only be computed once per element, and is thus suitable + * for cases where computation of the sort key is expensive. + */ +@RequiredArgsConstructor(access = AccessLevel.PRIVATE) +@Slf4j +public final class KeyBasedSorter { + /** + * Sorts a collection and returns the result as an array without modifying the input collection. + * The sort is performed by first extracting a sort key using the supplied extractor, then + * comparing the sort keys using the supplied comparator. + * + *

The algorithm is guaranteed to only apply the extractor once per method, and is thus + * suitable for cases where this computation is expensive. It may execute the extractor in + * parallel. + * + * @param input The collection to sort + * @param extractor A function to extract a sort key from each element of the input collection + * @param comparator A comparator that defines an ordering on the sort keys + * @param The class of objects in the collection to be sorted + * @param The class of the sort key extracted from objects in the collection + * @return A list containing the sorted elements of the collection + */ + public static List sort( + Collection input, Function extractor, Comparator comparator) { + return input.parallelStream() + .map(t -> new ElementWithComparisonField<>(t, extractor.apply(t))) + .sorted(Comparator.comparing(ElementWithComparisonField::getComparisonField, comparator)) + .map(ElementWithComparisonField::getElement) + .collect(Collectors.toList()); + } + + @AllArgsConstructor + @Getter + private static class ElementWithComparisonField { + private P element; + private Q comparisonField; + } +} diff --git a/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerDefaultConfiguration.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerDefaultConfiguration.java new file mode 100644 index 00000000000..d805119c038 --- /dev/null +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerDefaultConfiguration.java @@ -0,0 +1,45 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.clouddriver.docker.registry.provider.DockerRegistryProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty( + value = "docker-registry.enabled", + havingValue = "false", + matchIfMissing = true) +class DockerDefaultConfiguration { + + // this bean will be created for DeployCloudFoundryServerGroupAtomicOperationConverter class + // only if dockerRegistry is disabled + @Bean + @ConditionalOnProperty(value = "cloudfoundry.enabled", havingValue = "true") + public CredentialsRepository + defaultDockerRegistryCredentialsRepository() { + return new MapBackedCredentialsRepository<>( + DockerRegistryProvider.PROVIDER_NAME, new NoopCredentialsLifecycleHandler<>()); + } +} diff --git a/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerRegistryAccountDefinitionSourceConfiguration.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerRegistryAccountDefinitionSourceConfiguration.java new file mode 100644 index 00000000000..bb82a1cf1ee --- /dev/null +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerRegistryAccountDefinitionSourceConfiguration.java @@ -0,0 +1,29 @@ +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.clouddriver.docker.registry.config.DockerRegistryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.docker.registry.config.DockerRegistryConfigurationProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionRepository; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionSource; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource; +import java.util.List; +import java.util.Optional; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; + +@Configuration +@ConditionalOnProperty({"account.storage.enabled", "account.storage.docker-registry.enabled"}) +public class DockerRegistryAccountDefinitionSourceConfiguration { + @Bean + @Primary + public CredentialsDefinitionSource dockerRegistryAccountSource( + AccountDefinitionRepository repository, + Optional>> additionalSources, + DockerRegistryConfigurationProperties properties) { + return new AccountDefinitionSource<>( + repository, + ManagedAccount.class, + additionalSources.orElseGet(() -> List.of(properties::getAccounts))); + } +} diff --git a/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerRegistryConfiguration.java b/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerRegistryConfiguration.java new file mode 100644 index 00000000000..5b6a06f73ae --- /dev/null +++ b/clouddriver-docker/src/main/java/com/netflix/spinnaker/config/DockerRegistryConfiguration.java @@ -0,0 +1,141 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerOkClientProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.config.DockerRegistryConfigurationProperties; +import com.netflix.spinnaker.clouddriver.docker.registry.config.DockerRegistryConfigurationProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.docker.registry.health.DockerRegistryHealthIndicator; +import com.netflix.spinnaker.clouddriver.docker.registry.provider.DockerRegistryProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryCredentialsInitializer; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.AbstractCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.BasicCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource; +import com.netflix.spinnaker.credentials.poller.Poller; +import javax.annotation.Nullable; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.cloud.context.config.annotation.RefreshScope; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Import; +import org.springframework.scheduling.annotation.EnableScheduling; + +@Configuration +@EnableConfigurationProperties +@EnableScheduling +@ConditionalOnProperty("docker-registry.enabled") +@ComponentScan("com.netflix.spinnaker.clouddriver.docker.registry") +@Import(DockerRegistryCredentialsInitializer.class) +public class DockerRegistryConfiguration { + + @Bean + @RefreshScope + @ConfigurationProperties("docker-registry") + public DockerRegistryConfigurationProperties dockerRegistryConfigurationProperties() { + return new DockerRegistryConfigurationProperties(); + } + + @Bean + public DockerRegistryHealthIndicator dockerRegistryHealthIndicator( + Registry registry, + CredentialsRepository credentialsRepository) { + return new DockerRegistryHealthIndicator(registry, credentialsRepository); + } + + @Bean + @ConditionalOnMissingBean( + value = DockerRegistryNamedAccountCredentials.class, + parameterizedContainer = AbstractCredentialsLoader.class) + public AbstractCredentialsLoader + dockerRegistryCredentialsLoader( + @Nullable CredentialsDefinitionSource dockerRegistryCredentialsSource, + DockerRegistryConfigurationProperties accountProperties, + DockerOkClientProvider dockerOkClientProvider, + CredentialsRepository + dockerRegistryCredentialsRepository) { + + if (dockerRegistryCredentialsSource == null) { + dockerRegistryCredentialsSource = accountProperties::getAccounts; + } + + return new BasicCredentialsLoader<>( + dockerRegistryCredentialsSource, + a -> + (new DockerRegistryNamedAccountCredentials.Builder()) + .accountName(a.getName()) + .environment(a.getEnvironment() != null ? a.getEnvironment() : a.getName()) + .accountType(a.getAccountType() != null ? a.getAccountType() : a.getName()) + .address(a.getAddress()) + .password(a.getPassword()) + .passwordCommand(a.getPasswordCommand()) + .username(a.getUsername()) + .email(a.getEmail()) + .passwordFile(a.getPasswordFile()) + .catalogFile(a.getCatalogFile()) + .repositoriesRegex(a.getRepositoriesRegex()) + .dockerconfigFile(a.getDockerconfigFile()) + .cacheThreads(a.getCacheThreads()) + .cacheIntervalSeconds(a.getCacheIntervalSeconds()) + .clientTimeoutMillis(a.getClientTimeoutMillis()) + .paginateSize(a.getPaginateSize()) + .trackDigests(a.getTrackDigests()) + .inspectDigests(a.getInspectDigests()) + .sortTagsByDate(a.getSortTagsByDate()) + .insecureRegistry(a.getInsecureRegistry()) + .repositories(a.getRepositories()) + .skip(a.getSkip()) + .permissions(a.getPermissions().build()) + .dockerOkClientProvider(dockerOkClientProvider) + .build(), + dockerRegistryCredentialsRepository); + } + + @Bean + @ConditionalOnMissingBean( + value = DockerRegistryNamedAccountCredentials.class, + parameterizedContainer = CredentialsRepository.class) + public CredentialsRepository + dockerRegistryCredentialsRepository( + CredentialsLifecycleHandler eventHandler) { + return new MapBackedCredentialsRepository<>(DockerRegistryProvider.PROVIDER_NAME, eventHandler); + } + + @Bean + @ConditionalOnMissingBean( + value = ManagedAccount.class, + parameterizedContainer = CredentialsDefinitionSource.class) + public CredentialsInitializerSynchronizable dockerRegistryCredentialsInitializerSynchronizable( + AbstractCredentialsLoader loader) { + final Poller poller = new Poller<>(loader); + return new CredentialsInitializerSynchronizable() { + @Override + public void synchronize() { + poller.run(); + } + }; + } +} diff --git a/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/auth/DockerBearerTokenServiceSpec.groovy b/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/auth/DockerBearerTokenServiceSpec.groovy index ac4f870f9a3..c1f00e567b4 100644 --- a/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/auth/DockerBearerTokenServiceSpec.groovy +++ b/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/auth/DockerBearerTokenServiceSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import spock.lang.Ignore import spock.lang.Shared import spock.lang.Specification -import sun.misc.BASE64Decoder +import java.util.Base64 class DockerBearerTokenServiceSpec extends Specification { private static final REALM1 = "https://auth.docker.io" @@ -50,6 +50,19 @@ class DockerBearerTokenServiceSpec extends Specification { result.scope == SCOPE1 } + void "should parse Www-Authenticate header with missing service and path."() { + setup: + def input = "realm=\"${REALM1}/${PATH1}\",scope=\"${SCOPE1}\"" + when: + def result = tokenService.parseBearerAuthenticateHeader(input) + + then: + result.path == PATH1 + result.realm == REALM1 + result.service == null + result.scope == SCOPE1 + } + void "should parse Www-Authenticate header with some privileges and path."() { setup: def input = "realm=\"${REALM1}/${PATH1}\",service=\"${SERVICE1}\",scope=\"${SCOPE2}\"" @@ -76,6 +89,19 @@ class DockerBearerTokenServiceSpec extends Specification { result.scope == SCOPE2 } + void "should parse Www-Authenticate header with missing service and no path."() { + setup: + def input = "realm=\"${REALM1}\",scope=\"${SCOPE2}\"" + when: + def result = tokenService.parseBearerAuthenticateHeader(input) + + then: + !result.path + result.realm == REALM1 + result.service == null + result.scope == SCOPE2 + } + void "should parse unquoted Www-Authenticate header with some privileges and path."() { setup: def input = "realm=${REALM1}/${PATH1},service=${SERVICE1},scope=${SCOPE2}" diff --git a/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerRegistryClientSpec.groovy b/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerRegistryClientSpec.groovy index fd2e7916540..31b5e31fee7 100644 --- a/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerRegistryClientSpec.groovy +++ b/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/api/v2/client/DockerRegistryClientSpec.groovy @@ -16,7 +16,15 @@ package com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client -import spock.lang.Ignore +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.auth.DockerBearerToken +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.auth.DockerBearerTokenService +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import org.springframework.http.HttpStatus +import retrofit.RetrofitError +import retrofit.client.Header +import retrofit.client.Response +import retrofit.mime.TypedByteArray +import retrofit.mime.TypedInput import spock.lang.Shared import spock.lang.Specification @@ -27,12 +35,81 @@ import java.util.concurrent.TimeUnit * with an exception indicating a network or HTTP error, or will fail to load data * from dockerhub. */ -@Ignore class DockerRegistryClientSpec extends Specification { private static final REPOSITORY1 = "library/ubuntu" @Shared DockerRegistryClient client + def dockerBearerTokenService = Mock(DockerBearerTokenService) + + def stubbedRegistryService = Stub(DockerRegistryClient.DockerRegistryService){ + String tagsJson = "{\"name\":\"library/ubuntu\",\"tags\":[\"latest\",\"xenial\",\"rolling\"]}" + TypedInput tagsTypedInput = new TypedByteArray("application/json", tagsJson.getBytes()) + Response tagsResponse = new Response("/v2/{repository}/tags/list",200, "nothing", Collections.EMPTY_LIST, tagsTypedInput) + getTags(_,_,_) >> tagsResponse + + String checkJson = "{}" + TypedInput checkTypedInput = new TypedByteArray("application/json", checkJson.getBytes()) + Response checkResponse = new Response("/v2/",200, "nothing", Collections.EMPTY_LIST, checkTypedInput) + checkVersion(_,_) >> checkResponse + + String json = "{\"repositories\":[\"armory-io/armorycommons\",\"armory/aquascan\",\"other/keel\"]}" + TypedInput catalogTypedInput = new TypedByteArray("application/json", json.getBytes()) + Response catalogResponse = new Response("/v2/_catalog/",200, "nothing", Collections.EMPTY_LIST, catalogTypedInput) + getCatalog(_,_,_) >> catalogResponse + + String schemaJson = '''{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 4405, + "digest": "sha256:fa8d22f4899110fdecf7ae344a8129fb6175ed5294ffe9ca3fb09dfca5252c93" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 3310095, + "digest": "sha256:1ace22715a341b6ad81b784da18f2efbcea18ff7b4b4edf4f467f193b7de3750" + } + ] + }''' + TypedInput schemaV2Input = new TypedByteArray("application/json", schemaJson.getBytes()) + Response schemaV2Response = new Response("/v2/{name}/manifests/{reference}",200, "nothing", Collections.EMPTY_LIST, schemaV2Input) + getSchemaV2Manifest(_,_,_,_) >> schemaV2Response + + String configDigestContentJson = '''{ + "architecture": "amd64", + "config": { + "Hostname": "", + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/opt/app/server" + ], + "Image": "sha256:3862e8f6f860c732be3fe0c0545330f9573a09cf906a78b06a329e09f9dc7191", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": { + "branch": "main", + "buildNumber": "1", + "commitId": "b48e2cf960de545597411c99ec969e47a7635ba3", + "jobName": "test" + } + }, + "container": "fc1607ce29cfa58cc6cad846b911ec0c4de76d426de2b528a126e715615286bc", + "created": "2021-02-16T19:18:50.176616541Z", + "docker_version": "19.03.6-ce", + "os": "linux", + "rootfs": {} + }''' + TypedInput configDigestContentInput = new TypedByteArray("application/json", configDigestContentJson.getBytes()) + Response contentDigestResponse = new Response("/v2/{repository}/blobs/{digest}",200, "nothing", Collections.EMPTY_LIST, configDigestContentInput) + getDigestContent(_,_,_,_) >> contentDigestResponse + } def setupSpec() { @@ -40,44 +117,91 @@ class DockerRegistryClientSpec extends Specification { void "DockerRegistryClient should request a real set of tags."() { when: - client = new DockerRegistryClient("https://index.docker.io", "", "", "", TimeUnit.MINUTES.toMillis(1), 100, "", false) - DockerRegistryTags result = client.getTags(REPOSITORY1) + client = new DockerRegistryClient("https://index.docker.io",100,"","",stubbedRegistryService, dockerBearerTokenService) + def result = client.getTags(REPOSITORY1) then: - result.name == REPOSITORY1 - result.tags.size() > 0 + result.name == REPOSITORY1 + result.tags.size() > 0 } void "DockerRegistryClient should validate that it is pointing at a v2 endpoint."() { when: - client = new DockerRegistryClient("https://index.docker.io", "", "", "", TimeUnit.MINUTES.toMillis(1), 100, "", false) - // Can only fail due to an exception thrown here. - client.checkV2Availability() + client = new DockerRegistryClient("https://index.docker.io",100,"","",stubbedRegistryService, dockerBearerTokenService) + // Can only fail due to an exception thrown here. + client.checkV2Availability() then: - true + true } void "DockerRegistryClient invoked with insecureRegistry=true"() { when: - client = new DockerRegistryClient("https://index.docker.io", "", "", "", TimeUnit.MINUTES.toMillis(1), 100, "", true) - DockerRegistryTags result = client.getTags(REPOSITORY1) + client = new DockerRegistryClient("https://index.docker.io",100,"","",stubbedRegistryService, dockerBearerTokenService) + DockerRegistryTags result = client.getTags(REPOSITORY1) then: - result.name == REPOSITORY1 - result.tags.size() > 0 + result.name == REPOSITORY1 + result.tags.size() > 0 } void "DockerRegistryClient uses correct user agent"() { - when: - client = new DockerRegistryClient("https://index.docker.io", "", "", "", TimeUnit.MINUTES.toMillis(1), 100, "", true) - client.registryService = Mock(DockerRegistryClient.DockerRegistryService) + def mockService = Mock(DockerRegistryClient.DockerRegistryService); + client = new DockerRegistryClient("https://index.docker.io",100,"","",mockService, dockerBearerTokenService) + when: + client.checkV2Availability() def userAgent = client.userAgent - client.getTags(REPOSITORY1) then: userAgent.startsWith("Spinnaker") - 1 * client.registryService.getTags(_, _, userAgent) + 1 * mockService.checkVersion(_,_) + } + + void "DockerRegistryClient should filter repositories by regular expression."() { + when: + client = new DockerRegistryClient("https://index.docker.io",100,"","",stubbedRegistryService, dockerBearerTokenService) + def original = client.getCatalog().repositories.size() + client = new DockerRegistryClient("https://index.docker.io",100,"","armory\\/.*",stubbedRegistryService, dockerBearerTokenService) + def filtered = client.getCatalog().repositories.size() + + then: + filtered < original + } + + void "DockerRegistryClient should be able to fetch digest."() { + when: + client = new DockerRegistryClient("https://index.docker.io",100,"","",stubbedRegistryService, dockerBearerTokenService) + def result = client.getConfigDigest(REPOSITORY1, "tag") + + then: + result == "sha256:fa8d22f4899110fdecf7ae344a8129fb6175ed5294ffe9ca3fb09dfca5252c93" + } + + void "DockerRegistryClient should be able to fetch the config layer."() { + when: + client = new DockerRegistryClient("https://index.docker.io",100,"","",stubbedRegistryService, dockerBearerTokenService) + def results = client.getDigestContent(REPOSITORY1, "digest") + + then: + results?.config?.Labels != null + results?.config?.Labels?.commitId == "b48e2cf960de545597411c99ec969e47a7635ba3" + } + + void "DockerRegistryClient should honor the www-authenticate header"() { + setup: + def authenticateDetails = "realm=\"https://auth.docker.io/token\",service=\"registry.docker.io\",scope=\"repository:${REPOSITORY1}:pull\"" + def unauthorizedRetroFitError = RetrofitError.httpError("url", + new Response("url", HttpStatus.UNAUTHORIZED.value(), "authentication required", [new Header("www-authenticate", "Bearer ${authenticateDetails}")], null), + null, null) + DockerBearerToken token = new DockerBearerToken() + token.bearer_token = "bearer-token" + + when: + client = new DockerRegistryClient("https://index.docker.io", 100, "", "", stubbedRegistryService, dockerBearerTokenService) + client.request(() -> {throw new SpinnakerHttpException(unauthorizedRetroFitError)}, (_) -> null, REPOSITORY1) + + then: + 1 * dockerBearerTokenService.getToken(REPOSITORY1, authenticateDetails) >> token } } diff --git a/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupControllerSpec.groovy b/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupControllerSpec.groovy index fd84e7022a0..41090bdbb13 100644 --- a/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupControllerSpec.groovy +++ b/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupControllerSpec.groovy @@ -16,13 +16,65 @@ package com.netflix.spinnaker.clouddriver.docker.registry.controllers +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.CacheFilter +import com.netflix.spinnaker.clouddriver.docker.registry.DockerRegistryCloudProvider +import com.netflix.spinnaker.clouddriver.docker.registry.cache.Keys +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import spock.lang.Specification class DockerRegistryImageLookupControllerSpec extends Specification { - def "GenerateArtifact"() { - setup: - DockerRegistryImageLookupController dockerRegistryImageLookupController = new DockerRegistryImageLookupController(); + DockerRegistryImageLookupController dockerRegistryImageLookupController + + Set resultData = [ + new CacheData(){ + @Override + String getId() { Keys.getTaggedImageKey("test-account", "test-repo", "1.0") } + + @Override + int getTtlSeconds() { return 0 } + @Override + Map> getRelationships() { return null } + + @Override + Map getAttributes() { + return [ + "account": "test-account", + "digest": "test-digest", + "labels": [ + "commitId": "test-commit", + "buildNumber": "1", + "branch": "test-branch", + "jobName": "test-job" + ] + ] + } + } + ] + + def accountCredentials = Stub(DockerRegistryNamedAccountCredentials) { + getCloudProvider() >> DockerRegistryCloudProvider.DOCKER_REGISTRY + getTrackDigests() >> true + getRegistry() >> "test-registry" + } + + def setup() { + dockerRegistryImageLookupController = new DockerRegistryImageLookupController( + accountCredentialsProvider: Stub(AccountCredentialsProvider){ + getAll() >> [accountCredentials] + getCredentials(*_) >> accountCredentials + }, + cacheView: Stub(Cache) { + filterIdentifiers(_,_) >> ["someID"] + getAll(*_) >> resultData + } + ) + } + + def "GenerateArtifact"() { when: def result = dockerRegistryImageLookupController.generateArtifact("foo.registry", "my/app", "mytag") @@ -33,4 +85,122 @@ class DockerRegistryImageLookupControllerSpec extends Specification { result.type == "docker" result.metadata.registry == "foo.registry" } + + void "When finding images with includeDetails == false"() { + when: + def result = dockerRegistryImageLookupController.find(new DockerRegistryImageLookupController.LookupOptions(includeDetails: false)) + + then: + result.size() == 1 + result[0].account == "test-account" + result[0].digest == "test-digest" + result[0].commitId == null + result[0].buildNumber == null + result[0].artifact.type == "docker" + result[0].artifact.metadata.registry == "test-registry" + } + + void "When finding images with includeDetails == true"() { + when: + def result = dockerRegistryImageLookupController.find(new DockerRegistryImageLookupController.LookupOptions(includeDetails: true)) + + then: + result.size() == 1 + result[0].account == "test-account" + result[0].digest == "test-digest" + result[0].commitId == "test-commit" + result[0].branch == "test-branch" + result[0].buildNumber == "1" + result[0].artifact.type == "docker" + result[0].artifact.metadata.registry == "test-registry" + result[0].artifact.metadata.labels != null + result[0].artifact.metadata.labels.jobName == "test-job" + } + + void "When finding images with filtered repository for an image that exists" () { + when: + def result = dockerRegistryImageLookupController.find( + new DockerRegistryImageLookupController.LookupOptions(repository: "test-repo")) + + then: + result.size() == 1 + result[0].account == "test-account" + result[0].digest == "test-digest" + result[0].artifact.type == "docker" + result[0].artifact.metadata.registry == "test-registry" + } + + void "When finding images with filtered repository and tag for an image that exists" () { + when: + def result = dockerRegistryImageLookupController.find( + new DockerRegistryImageLookupController.LookupOptions(repository: "test-repo", tag: "1.0")) + + then: + result.size() == 1 + result[0].account == "test-account" + result[0].digest == "test-digest" + result[0].artifact.type == "docker" + result[0].artifact.metadata.registry == "test-registry" + } + + void "When finding images with filtered repository for an image that does not exist" () { + when: + def result = dockerRegistryImageLookupController.find( + new DockerRegistryImageLookupController.LookupOptions(repository: "wrong-repo")) + + then: + result.size() == 0 + } + + void "When finding images with filtered tag for a tag that does not exist" () { + when: + def result = dockerRegistryImageLookupController.find( + new DockerRegistryImageLookupController.LookupOptions(repository: "wrong-tag")) + + then: + result.size() == 0 + } + + void "When finding images with no metadata and includeDetails == true"() { + setup: + def noLabelResultData = [ + new CacheData(){ + @Override + String getId() { Keys.getTaggedImageKey("test-account", "test-repo", "1.0") } + + @Override + int getTtlSeconds() { return 0 } + + @Override + Map> getRelationships() { return null } + + @Override + Map getAttributes() { return ["account": "test-account", "digest": "test-digest"] } + } + ] + + dockerRegistryImageLookupController = new DockerRegistryImageLookupController( + accountCredentialsProvider: Stub(AccountCredentialsProvider){ + getAll() >> [accountCredentials] + getCredentials(*_) >> accountCredentials + }, + cacheView: Stub(Cache) { + filterIdentifiers(_,_) >> ["someID"] + getAll(*_) >> noLabelResultData + } + ) + + when: + def result = dockerRegistryImageLookupController.find(new DockerRegistryImageLookupController.LookupOptions(includeDetails: true)) + + then: + result.size() == 1 + result[0].account == "test-account" + result[0].digest == "test-digest" + result[0].commitId == null + result[0].buildNumber == null + result[0].artifact.type == "docker" + result[0].artifact.metadata.registry == "test-registry" + result[0].artifact.metadata.labels == null + } } diff --git a/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/agent/DockerRegistryImageCachingAgentTest.groovy b/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/agent/DockerRegistryImageCachingAgentTest.groovy new file mode 100644 index 00000000000..24ceb8047d4 --- /dev/null +++ b/clouddriver-docker/src/test/groovy/com/netflix/spinnaker/clouddriver/docker/registry/provider/agent/DockerRegistryImageCachingAgentTest.groovy @@ -0,0 +1,306 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.provider.agent + +import com.netflix.spinnaker.cats.agent.CacheResult +import com.netflix.spinnaker.clouddriver.docker.registry.DockerRegistryCloudProvider +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerRegistryClient +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerRegistryTags +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryCredentials +import retrofit.RetrofitError +import spock.lang.Specification + +import java.time.Instant + +class DockerRegistryImageCachingAgentTest extends Specification { + + def KEY_PREFIX = "dockerRegistry" + def ACCOUNT_NAME = "test-docker" + def REGISTRY_NAME = "test-registry" + def CACHE_GROUP_TAGGED_IMAGE = "taggedImage" + def CACHE_GROUP_IMAGE_ID = "imageId" + + DockerRegistryImageCachingAgent agent + def credentials = Mock(DockerRegistryCredentials) + def provider = Mock(DockerRegistryCloudProvider) + def client = Mock(DockerRegistryClient) + + def setup() { + credentials.client >> client + agent = new DockerRegistryImageCachingAgent(provider, ACCOUNT_NAME, credentials, 0, 1, 1, REGISTRY_NAME) + } + + def "tags loaded from docker registry should be cached"() { + given: + credentials.repositories >> ["repo-1", "repo-2"] + client.getTags("repo-1") >> new DockerRegistryTags().tap { + name = "repo-1" + tags = ["tag-1-1"] + } + client.getTags("repo-2") >> new DockerRegistryTags().tap { + name = "repo-2" + tags = ["tag-2-1", "tag-2-2"] + } + def repoTagSequence = [ + ["repo-1", "tag-1-1"], + ["repo-2", "tag-2-1"], + ["repo-2", "tag-2-2"], + ] + + when: + def cacheResult = agent.loadData(null) + + then: + sortCacheResult(cacheResult) + def cacheResultImageIds = cacheResult.cacheResults.get(CACHE_GROUP_IMAGE_ID) + for (int i = 0; i < cacheResultImageIds.size(); i++) { + assert cacheResultImageIds[i].id == buildImageIdCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultImageIds[i].attributes.get("tagKey") == buildTaggedImageCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultImageIds[i].attributes.get("account") == ACCOUNT_NAME + } + def cacheResultTaggedImages = cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE) + for (int i = 0; i < cacheResultTaggedImages.size(); i++) { + assert cacheResultTaggedImages[i].id == buildTaggedImageCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultTaggedImages[i].attributes.get("name") == "${repoTagSequence[i][0]}:${repoTagSequence[i][1]}" + assert cacheResultTaggedImages[i].attributes.get("account") == ACCOUNT_NAME + assert cacheResultTaggedImages[i].attributes.get("digest") == null + assert cacheResultTaggedImages[i].attributes.get("date") == null + } + } + + def "cached tags should include creation date"() { + given: + credentials.sortTagsByDate >> true + credentials.repositories >> ["repo-1"] + client.getTags("repo-1") >> new DockerRegistryTags().tap { + name="repo-1" + tags=["tag-1", "tag-2"] + } + def repoTagSequence = [ + ["repo-1", "tag-1"], + ["repo-1", "tag-2"], + ] + client.getCreationDate("repo-1", "tag-1") >> Instant.ofEpochSecond(0) + client.getCreationDate("repo-1", "tag-2") >> Instant.ofEpochSecond(1) + + when: + def cacheResult = agent.loadData(null) + + then: + sortCacheResult(cacheResult) + def cacheResultImageIds = cacheResult.cacheResults.get(CACHE_GROUP_IMAGE_ID) + for (int i = 0; i < cacheResultImageIds.size(); i++) { + assert cacheResultImageIds[i].id == buildImageIdCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultImageIds[i].attributes.get("tagKey") == buildTaggedImageCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultImageIds[i].attributes.get("account") == ACCOUNT_NAME + } + def cacheResultTaggedImages = cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE) + for (int i = 0; i < cacheResultTaggedImages.size(); i++) { + assert cacheResultTaggedImages[i].id == buildTaggedImageCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultTaggedImages[i].attributes.get("name") == "${repoTagSequence[i][0]}:${repoTagSequence[i][1]}" + assert cacheResultTaggedImages[i].attributes.get("account") == ACCOUNT_NAME + assert cacheResultTaggedImages[i].attributes.get("digest") == null + assert cacheResultTaggedImages[i].attributes.get("date") == Instant.ofEpochSecond(i) + } + } + + def "cached tags should include digest"() { + given: + credentials.trackDigests >> true + credentials.repositories >> ["repo-1"] + client.getTags("repo-1") >> new DockerRegistryTags().tap { + name="repo-1" + tags=["tag-1", "tag-2"] + } + def repoTagSequence = [ + ["repo-1", "tag-1"], + ["repo-1", "tag-2"], + ] + client.getDigest("repo-1", "tag-1") >> "repo-1_tag-1" + client.getDigest("repo-1", "tag-2") >> "repo-1_tag-2" + + when: + def cacheResult = agent.loadData(null) + + then: + sortCacheResult(cacheResult) + def cacheResultImageIds = cacheResult.cacheResults.get(CACHE_GROUP_IMAGE_ID) + for (int i = 0; i < cacheResultImageIds.size(); i++) { + assert cacheResultImageIds[i].id == buildImageIdCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultImageIds[i].attributes.get("tagKey") == buildTaggedImageCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultImageIds[i].attributes.get("account") == ACCOUNT_NAME + } + def cacheResultTaggedImages = cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE) + for (int i = 0; i < cacheResultTaggedImages.size(); i++) { + assert cacheResultTaggedImages[i].id == buildTaggedImageCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultTaggedImages[i].attributes.get("name") == "${repoTagSequence[i][0]}:${repoTagSequence[i][1]}" + assert cacheResultTaggedImages[i].attributes.get("account") == ACCOUNT_NAME + assert cacheResultTaggedImages[i].attributes.get("digest") == "${repoTagSequence[i][0]}_${repoTagSequence[i][1]}" + assert cacheResultTaggedImages[i].attributes.get("date") == null + } + } + + def "cached tags should include label if inspectDigest is true"() { + given: + credentials.inspectDigests >> true + credentials.repositories >> ["repo-1"] + client.getTags("repo-1") >> new DockerRegistryTags().tap { name="repo-1"; tags=["tag-1"] } + client.getConfigDigest("repo-1", "tag-1") >> "digest-1" + client.getDigestContent("repo-1", "digest-1") >> ["config": ["Labels": ["commitId": "id1", "buildNumber": "1"] ]] + + when: + def cacheResult = agent.loadData(null) + + then: + sortCacheResult(cacheResult) + def cacheResultTaggedImages = cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE) + for (int i = 0; i < cacheResultTaggedImages.size(); i++) { + assert cacheResultTaggedImages[i].attributes.get("digest") == "digest-1" + assert cacheResultTaggedImages[i].attributes.get("labels") == ["commitId": "id1", "buildNumber": "1"] + } + } + + def "error loading tags returns empty result"() { + given: + credentials.repositories >> ["repo-1"] + client.getTags("repo-1") >> { + throw new IOException() + } + + when: + def cacheResult = agent.loadData(null) + + then: + cacheResult.cacheResults.get(CACHE_GROUP_IMAGE_ID).size() == 0 + cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE).size() == 0 + } + + def "error loading tag date should set to null date attribute"() { + given: + credentials.sortTagsByDate >> true + credentials.repositories >> ["repo-1"] + client.getTags("repo-1") >> new DockerRegistryTags().tap { + name="repo-1" + tags=["tag-1", "tag-2"] + } + def repoTagSequence = [ + ["repo-1", "tag-1"], + ["repo-1", "tag-2"], + ] + client.getCreationDate("repo-1", "tag-1") >> { + throw RetrofitError.httpError("", null, null, null) + } + client.getCreationDate("repo-1", "tag-2") >> Instant.EPOCH + + when: + def cacheResult = agent.loadData(null) + + then: + sortCacheResult(cacheResult) + def cacheResultImageIds = cacheResult.cacheResults.get(CACHE_GROUP_IMAGE_ID) + for (int i = 0; i < cacheResultImageIds.size(); i++) { + assert cacheResultImageIds[i].id == buildImageIdCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultImageIds[i].attributes.get("tagKey") == buildTaggedImageCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultImageIds[i].attributes.get("account") == ACCOUNT_NAME + } + def cacheResultTaggedImages = cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE) + for (int i = 0; i < cacheResultTaggedImages.size(); i++) { + assert cacheResultTaggedImages[i].id == buildTaggedImageCacheKey(repoTagSequence[i][0], repoTagSequence[i][1]) + assert cacheResultTaggedImages[i].attributes.get("name") == "${repoTagSequence[i][0]}:${repoTagSequence[i][1]}" + assert cacheResultTaggedImages[i].attributes.get("account") == ACCOUNT_NAME + assert cacheResultTaggedImages[i].attributes.get("digest") == null + assert cacheResultTaggedImages[i].attributes.get("date") == (i == 0 ? null : Instant.EPOCH) + } + } + + def "error loading tag digest should not cache that tag"() { + given: + credentials.trackDigests >> true + credentials.repositories >> ["repo-1"] + client.getTags("repo-1") >> new DockerRegistryTags().tap { + name="repo-1" + tags=["tag-1", "tag-2"] + } + client.getDigest("repo-1", "tag-1") >> { + throw new IOException() + } + client.getDigest("repo-1", "tag-2") >> "repo-1_tag-2" + + when: + def cacheResult = agent.loadData(null) + + then: + sortCacheResult(cacheResult) + def cacheResultImageIds = cacheResult.cacheResults.get(CACHE_GROUP_IMAGE_ID) + cacheResultImageIds.size() == 1 + cacheResultImageIds[0].id == buildImageIdCacheKey("repo-1", "tag-2") + cacheResultImageIds[0].attributes.get("tagKey") == buildTaggedImageCacheKey("repo-1", "tag-2") + cacheResultImageIds[0].attributes.get("account") == ACCOUNT_NAME + def cacheResultTaggedImages = cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE) + cacheResultTaggedImages.size() == 1 + cacheResultTaggedImages[0].id == buildTaggedImageCacheKey("repo-1", "tag-2") + cacheResultTaggedImages[0].attributes.get("name") == "repo-1:tag-2" + cacheResultTaggedImages[0].attributes.get("account") == ACCOUNT_NAME + cacheResultTaggedImages[0].attributes.get("digest") == "repo-1_tag-2" + cacheResultTaggedImages[0].attributes.get("date") == null + } + + def "empty tags should not be cached"() { + given: + credentials.repositories >> ["repo-1"] + client.getTags("repo-1") >> new DockerRegistryTags().tap { + name="repo-1" + tags=["tag-1", ""] + } + + when: + def cacheResult = agent.loadData(null) + + then: + sortCacheResult(cacheResult) + def cacheResultImageIds = cacheResult.cacheResults.get(CACHE_GROUP_IMAGE_ID) + cacheResultImageIds.size() == 1 + cacheResultImageIds[0].id == buildImageIdCacheKey("repo-1", "tag-1") + cacheResultImageIds[0].attributes.get("tagKey") == buildTaggedImageCacheKey("repo-1", "tag-1") + cacheResultImageIds[0].attributes.get("account") == ACCOUNT_NAME + def cacheResultTaggedImages = cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE) + cacheResultTaggedImages.size() == 1 + cacheResultTaggedImages[0].id == buildTaggedImageCacheKey("repo-1", "tag-1") + cacheResultTaggedImages[0].attributes.get("name") == "repo-1:tag-1" + cacheResultTaggedImages[0].attributes.get("account") == ACCOUNT_NAME + cacheResultTaggedImages[0].attributes.get("digest") == null + cacheResultTaggedImages[0].attributes.get("date") == null + } + + + private String buildTaggedImageCacheKey(repo, tag) { + "${KEY_PREFIX}:${CACHE_GROUP_TAGGED_IMAGE}:${ACCOUNT_NAME}:${repo}:${tag}" + } + + private String buildImageIdCacheKey(repo, tag) { + "${KEY_PREFIX}:${CACHE_GROUP_IMAGE_ID}:${REGISTRY_NAME}/${repo}:${tag}" + } + + private void sortCacheResult(CacheResult cacheResult) { + cacheResult.cacheResults.get(CACHE_GROUP_TAGGED_IMAGE).sort { + it.id + } + cacheResult.cacheResults.get(CACHE_GROUP_IMAGE_ID).sort { + it.id + } + } +} diff --git a/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/config/DockerRegistryConfigurationPropertiesTest.java b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/config/DockerRegistryConfigurationPropertiesTest.java new file mode 100644 index 00000000000..6edd1ec0c54 --- /dev/null +++ b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/config/DockerRegistryConfigurationPropertiesTest.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Wise Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.config; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.List; +import org.junit.jupiter.api.Test; + +class DockerRegistryConfigurationPropertiesTest { + + @Test + void managedAccountIsComparable() { + DockerRegistryConfigurationProperties.ManagedAccount account1 = createAccount("docker"); + DockerRegistryConfigurationProperties.ManagedAccount account2 = createAccount("docker"); + DockerRegistryConfigurationProperties.ManagedAccount account3 = createAccount("docker2"); + + assertThat(account1).isEqualTo(account2); + assertThat(account1).isNotEqualTo(account3); + } + + DockerRegistryConfigurationProperties.ManagedAccount createAccount(String name) { + DockerRegistryConfigurationProperties.ManagedAccount account = + new DockerRegistryConfigurationProperties.ManagedAccount(); + account.setName(name); + account.setEnvironment("production"); + account.setAccountType("dockerRegistry"); + account.setUsername("docker-user"); + account.setPassword("test-password"); + account.setAddress("hub.docker.com"); + account.setCacheThreads(5); + account.setCacheIntervalSeconds(6); + account.setClientTimeoutMillis(700); + account.setRepositories(List.of("repo-1", "repo-2")); + return account; + } +} diff --git a/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupControllerTest.java b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupControllerTest.java new file mode 100644 index 00000000000..8d67a54d329 --- /dev/null +++ b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/controllers/DockerRegistryImageLookupControllerTest.java @@ -0,0 +1,212 @@ +/* + * Copyright 2024 Apple, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.controllers; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.mock; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; + +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.WriteableCache; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.clouddriver.docker.registry.DockerRegistryCloudProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.cache.Keys; +import com.netflix.spinnaker.clouddriver.docker.registry.controllers.DockerRegistryImageLookupControllerTest.TestConfig; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository; +import com.netflix.spinnaker.fiat.model.Authorization; +import com.netflix.spinnaker.fiat.model.UserPermission; +import com.netflix.spinnaker.fiat.model.resources.Account; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import com.netflix.spinnaker.fiat.model.resources.Role; +import com.netflix.spinnaker.fiat.shared.EnableFiatAutoConfig; +import com.netflix.spinnaker.fiat.shared.FiatService; +import com.netflix.spinnaker.fiat.shared.FiatStatus; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.dynamicconfig.SpringDynamicConfigService; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.json.AutoConfigureJson; +import org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureMockMvc; +import org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureWebMvc; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.mock.mockito.MockBean; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Import; +import org.springframework.security.test.context.support.WithMockUser; +import org.springframework.test.web.servlet.MockMvc; +import retrofit2.mock.Calls; + +@SpringBootTest(classes = TestConfig.class, properties = "services.fiat.cache.max-entries=0") +@AutoConfigureMockMvc +@AutoConfigureWebMvc +@AutoConfigureJson +@EnableFiatAutoConfig +@WithMockUser +class DockerRegistryImageLookupControllerTest { + @Import(DockerRegistryImageLookupController.class) + static class TestConfig { + @Bean + WriteableCache cache() { + return new InMemoryCache(); + } + + @Bean + AccountCredentialsRepository accountCredentialsRepository() { + return new MapBackedAccountCredentialsRepository(); + } + + @Bean + AccountCredentialsProvider accountCredentialsProvider( + AccountCredentialsRepository accountCredentialsRepository) { + return new DefaultAccountCredentialsProvider(accountCredentialsRepository); + } + + @Bean + Registry registry() { + return new NoopRegistry(); + } + + @Bean + DynamicConfigService dynamicConfigService() { + return new SpringDynamicConfigService(); + } + } + + @Autowired MockMvc mockMvc; + @Autowired WriteableCache cache; + @Autowired AccountCredentialsRepository accountCredentialsRepository; + @MockBean FiatStatus fiatStatus; + @MockBean FiatService fiatService; + + @BeforeEach + void setUp() { + given(fiatStatus.isEnabled()).willReturn(true); + } + + @Test + void authorizedToReadTags() throws Exception { + var permissions = createAuthorizedUserPermission(); + given(fiatService.getUserPermission(eq("user"))).willReturn(Calls.response(permissions)); + + mockMvc + .perform( + get("/dockerRegistry/images/tags") + .queryParam("account", "test-account") + .queryParam("repository", "test-repository")) + .andExpect(status().isOk()); + } + + @Test + void notAuthorizedToReadTags() throws Exception { + var permissions = createUnauthorizedUserPermission(); + given(fiatService.getUserPermission("user")).willReturn(Calls.response(permissions)); + + mockMvc + .perform( + get("/dockerRegistry/images/tags") + .queryParam("account", "test-account") + .queryParam("repository", "test-repository")) + .andExpect(status().isForbidden()); + } + + @Test + void canSearchForAuthorizedItems() throws Exception { + var permissions = createAuthorizedUserPermission(); + given(fiatService.getUserPermission("user")).willReturn(Calls.response(permissions)); + cache.merge(Keys.Namespace.TAGGED_IMAGE.getNs(), createTestAccountTaggedImageCacheData()); + var credentials = createTestAccountCredentials(); + accountCredentialsRepository.save(credentials.getName(), credentials); + + mockMvc + .perform(get("/dockerRegistry/images/find")) + .andExpect(jsonPath("$[0].account").value("test-account")); + } + + @Test + void filtersOutUnauthorizedItems() throws Exception { + var permissions = createUnauthorizedUserPermission(); + given(fiatService.getUserPermission("user")).willReturn(Calls.response(permissions)); + cache.merge(Keys.Namespace.TAGGED_IMAGE.getNs(), createTestAccountTaggedImageCacheData()); + var credentials = createTestAccountCredentials(); + accountCredentialsRepository.save(credentials.getName(), credentials); + + mockMvc + .perform(get("/dockerRegistry/images/find")) + .andExpectAll(status().isOk(), jsonPath("$.length()").value(0)); + } + + private static UserPermission.View createAuthorizedUserPermission() { + return new UserPermission() + .setId("user") + .addResources( + List.of( + new Account() + .setName("test-account") + .setPermissions( + new Permissions.Builder().add(Authorization.READ, "user").build()), + new Role("user"))) + .getView(); + } + + private static UserPermission.View createUnauthorizedUserPermission() { + return new UserPermission().setId("user").addResources(List.of(new Role("user"))).getView(); + } + + private static CacheData createTestAccountTaggedImageCacheData() { + String imageKey = Keys.getTaggedImageKey("test-account", "test-repository", "1.0"); + return new DefaultCacheData( + imageKey, + Map.of( + "account", + "test-account", + "digest", + "test-digest", + "labels", + Map.of( + "commitId", + "test-commit", + "buildNumber", + "1", + "branch", + "test-branch", + "jobName", + "test-job")), + Map.of()); + } + + private static DockerRegistryNamedAccountCredentials createTestAccountCredentials() { + var credentials = mock(DockerRegistryNamedAccountCredentials.class); + given(credentials.getName()).willReturn("test-account"); + given(credentials.getCloudProvider()) + .willReturn(DockerRegistryCloudProvider.getDOCKER_REGISTRY()); + given(credentials.getRegistry()).willReturn("test-registry"); + return credentials; + } +} diff --git a/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicatorTest.java b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicatorTest.java new file mode 100644 index 00000000000..91076d32039 --- /dev/null +++ b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/health/DockerRegistryHealthIndicatorTest.java @@ -0,0 +1,186 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.health; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.*; + +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.docker.registry.DockerRegistryCloudProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerRegistryClient; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryCredentials; +import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.boot.actuate.health.Health; +import org.springframework.boot.actuate.health.Status; + +@ExtendWith(MockitoExtension.class) +class DockerRegistryHealthIndicatorTest { + + private static final String ERROR_MESSAGE = "Failed to get namespaces"; + private static final String HEALTHY_ACCOUNT_NAME = "healthy"; + private static final String UNHEALTHY_ACCOUNT_NAME_FIRST = "unhealthy1"; + private static final String UNHEALTHY_ACCOUNT_NAME_SECOND = "unhealthy2"; + private static final String CREDENTIALS_TYPE = "dockerRegistry"; + + private static final Registry REGISTRY = new NoopRegistry(); + + @Mock private DockerRegistryNamedAccountCredentials healthyNamedCredentials; + @Mock private DockerRegistryNamedAccountCredentials unhealthyNamedAccountCredentialsFirst; + @Mock private DockerRegistryNamedAccountCredentials unhealthyNamedAccountCredentialsSecond; + + @Mock private DockerRegistryCredentials dockerRegistryCredentials; + + @Mock private DockerRegistryClient dockerRegistryClient; + + @Test + void healthyWithNoAccounts() { + CredentialsRepository repository = + stubCredentialsRepository(ImmutableList.of()); + + DockerRegistryHealthIndicator healthIndicator = + new DockerRegistryHealthIndicator(REGISTRY, repository); + + healthIndicator.checkHealth(); + Health result = healthIndicator.getHealth(true); + + assertThat(result.getStatus()).isEqualTo(Status.UP); + assertThat(result.getDetails()).isEmpty(); + } + + @Test + void healthyWithOnlyHealthyAccounts() { + when(healthyNamedCredentials.getCredentials()).thenReturn(dockerRegistryCredentials); + when(healthyNamedCredentials.getName()).thenReturn(HEALTHY_ACCOUNT_NAME); + when(healthyNamedCredentials.getType()).thenReturn(CREDENTIALS_TYPE); + when(dockerRegistryCredentials.getClient()).thenReturn(dockerRegistryClient); + + // no exception if account is healthy + doNothing().when(dockerRegistryClient).checkV2Availability(); + + CredentialsRepository repository = + stubCredentialsRepository(ImmutableList.of(healthyNamedCredentials)); + + DockerRegistryHealthIndicator healthIndicator = + new DockerRegistryHealthIndicator(REGISTRY, repository); + + healthIndicator.checkHealth(); + Health result = healthIndicator.getHealth(true); + + assertThat(result.getStatus()).isEqualTo(Status.UP); + assertThat(result.getDetails()).isEmpty(); + } + + @Test + void reportsErrorForUnhealthyAccount() { + when(unhealthyNamedAccountCredentialsFirst.getCredentials()) + .thenReturn(dockerRegistryCredentials); + when(unhealthyNamedAccountCredentialsFirst.getName()).thenReturn(UNHEALTHY_ACCOUNT_NAME_FIRST); + when(unhealthyNamedAccountCredentialsFirst.getType()).thenReturn(CREDENTIALS_TYPE); + when(dockerRegistryCredentials.getClient()).thenReturn(dockerRegistryClient); + // exception thrown because the account is unhealthy + doThrow(new RuntimeException(ERROR_MESSAGE)).when(dockerRegistryClient).checkV2Availability(); + + CredentialsRepository repository = + stubCredentialsRepository(ImmutableList.of(unhealthyNamedAccountCredentialsFirst)); + + DockerRegistryHealthIndicator healthIndicator = + new DockerRegistryHealthIndicator(REGISTRY, repository); + healthIndicator.checkHealth(); + Health result = healthIndicator.getHealth(true); + + assertThat(result.getStatus()).isEqualTo(Status.UP); + assertEquals(1, result.getDetails().size()); + assertTrue( + result.getDetails().containsKey(UNHEALTHY_ACCOUNT_NAME_FIRST) + && result.getDetails().get(UNHEALTHY_ACCOUNT_NAME_FIRST).equals(ERROR_MESSAGE)); + } + + @Test + void reportsMultipleErrors() { + when(healthyNamedCredentials.getCredentials()).thenReturn(dockerRegistryCredentials); + when(healthyNamedCredentials.getName()).thenReturn(HEALTHY_ACCOUNT_NAME); + when(healthyNamedCredentials.getType()).thenReturn(CREDENTIALS_TYPE); + when(dockerRegistryCredentials.getClient()).thenReturn(dockerRegistryClient); + + DockerRegistryCredentials unhealthyDockerRegistryCredentials = + mock(DockerRegistryCredentials.class); + DockerRegistryClient unhealthyDockerRegistryClient = mock(DockerRegistryClient.class); + + when(unhealthyNamedAccountCredentialsFirst.getCredentials()) + .thenReturn(unhealthyDockerRegistryCredentials); + when(unhealthyNamedAccountCredentialsFirst.getName()).thenReturn(UNHEALTHY_ACCOUNT_NAME_FIRST); + when(unhealthyNamedAccountCredentialsFirst.getType()).thenReturn(CREDENTIALS_TYPE); + when(unhealthyDockerRegistryCredentials.getClient()).thenReturn(unhealthyDockerRegistryClient); + + when(unhealthyNamedAccountCredentialsSecond.getCredentials()) + .thenReturn(unhealthyDockerRegistryCredentials); + when(unhealthyNamedAccountCredentialsSecond.getName()) + .thenReturn(UNHEALTHY_ACCOUNT_NAME_SECOND); + when(unhealthyNamedAccountCredentialsSecond.getType()).thenReturn(CREDENTIALS_TYPE); + when(unhealthyDockerRegistryCredentials.getClient()).thenReturn(unhealthyDockerRegistryClient); + + // no exception if account is healthy + doNothing().when(dockerRegistryClient).checkV2Availability(); + // exception thrown because the account is unhealthy + doThrow(new RuntimeException(ERROR_MESSAGE)) + .when(unhealthyDockerRegistryClient) + .checkV2Availability(); + + CredentialsRepository repository = + stubCredentialsRepository( + ImmutableList.of( + healthyNamedCredentials, + unhealthyNamedAccountCredentialsFirst, + unhealthyNamedAccountCredentialsSecond)); + + DockerRegistryHealthIndicator healthIndicator = + new DockerRegistryHealthIndicator(REGISTRY, repository); + + healthIndicator.checkHealth(); + Health result = healthIndicator.getHealth(true); + + assertThat(result.getStatus()).isEqualTo(Status.UP); + assertEquals(2, result.getDetails().size()); + assertTrue( + result.getDetails().containsKey(UNHEALTHY_ACCOUNT_NAME_FIRST) + && result.getDetails().get(UNHEALTHY_ACCOUNT_NAME_FIRST).equals(ERROR_MESSAGE)); + assertTrue( + result.getDetails().containsKey(UNHEALTHY_ACCOUNT_NAME_SECOND) + && result.getDetails().get(UNHEALTHY_ACCOUNT_NAME_SECOND).equals(ERROR_MESSAGE)); + } + + private static CredentialsRepository + stubCredentialsRepository(Iterable accounts) { + CredentialsRepository repository = + new MapBackedCredentialsRepository<>( + DockerRegistryCloudProvider.getDOCKER_REGISTRY(), null); + for (DockerRegistryNamedAccountCredentials account : accounts) { + repository.save(account); + } + return repository; + } +} diff --git a/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsLifecycleHandlerTest.java b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsLifecycleHandlerTest.java new file mode 100644 index 00000000000..6832237ee51 --- /dev/null +++ b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryCredentialsLifecycleHandlerTest.java @@ -0,0 +1,158 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.security; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.docker.registry.DockerRegistryCloudProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.provider.DockerRegistryProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.provider.agent.DockerRegistryImageCachingAgent; +import java.util.List; +import org.junit.jupiter.api.Test; + +class DockerRegistryCredentialsLifecycleHandlerTest { + + @Test + public void testAddCredentials() { + DockerRegistryCloudProvider dockerRegistryCloudProvider = new DockerRegistryCloudProvider(); + DockerRegistryProvider provider = new DockerRegistryProvider(dockerRegistryCloudProvider); + DockerRegistryCredentialsLifecycleHandler handler = + new DockerRegistryCredentialsLifecycleHandler(provider, dockerRegistryCloudProvider); + + // Check we start with no agents + assertThat(provider.getAgents()).isEmpty(); + + DockerRegistryNamedAccountCredentials dockerRegistryNamedAccountCredentials = + mock(DockerRegistryNamedAccountCredentials.class); + DockerRegistryCredentials dockerRegistryCredentials = mock(DockerRegistryCredentials.class); + + when(dockerRegistryNamedAccountCredentials.getName()).thenReturn("docker"); + when(dockerRegistryNamedAccountCredentials.getCredentials()) + .thenReturn(dockerRegistryCredentials); + when(dockerRegistryNamedAccountCredentials.getCacheThreads()).thenReturn(1); + when(dockerRegistryNamedAccountCredentials.getCacheIntervalSeconds()).thenReturn(10L); + when(dockerRegistryNamedAccountCredentials.getRegistry()).thenReturn("registry"); + + handler.credentialsAdded(dockerRegistryNamedAccountCredentials); + // We should have added an agent + assertThat(provider.getAgents()).hasSize(1); + + handler.credentialsAdded(dockerRegistryNamedAccountCredentials); + // We should have yet another one + assertThat(provider.getAgents()).hasSize(2); + } + + @Test + public void testMultipleCacheThreads() { + DockerRegistryCloudProvider dockerRegistryCloudProvider = new DockerRegistryCloudProvider(); + DockerRegistryProvider provider = new DockerRegistryProvider(dockerRegistryCloudProvider); + DockerRegistryCredentialsLifecycleHandler handler = + new DockerRegistryCredentialsLifecycleHandler(provider, dockerRegistryCloudProvider); + + // Check we start with no agents + assertThat(provider.getAgents()).isEmpty(); + + DockerRegistryNamedAccountCredentials dockerRegistryNamedAccountCredentials = + mock(DockerRegistryNamedAccountCredentials.class); + DockerRegistryCredentials dockerRegistryCredentials = mock(DockerRegistryCredentials.class); + + when(dockerRegistryNamedAccountCredentials.getName()).thenReturn("docker"); + when(dockerRegistryNamedAccountCredentials.getCredentials()) + .thenReturn(dockerRegistryCredentials); + + final int cacheThreads = 3; + when(dockerRegistryNamedAccountCredentials.getCacheThreads()).thenReturn(cacheThreads); + when(dockerRegistryNamedAccountCredentials.getCacheIntervalSeconds()).thenReturn(10L); + when(dockerRegistryNamedAccountCredentials.getRegistry()).thenReturn("registry"); + + handler.credentialsAdded(dockerRegistryNamedAccountCredentials); + // We should have added an agent per cache thread + assertThat(provider.getAgents()).hasSize(cacheThreads); + } + + @Test + public void testUpdateCredentials() { + DockerRegistryCloudProvider dockerRegistryCloudProvider = new DockerRegistryCloudProvider(); + DockerRegistryProvider provider = new DockerRegistryProvider(dockerRegistryCloudProvider); + DockerRegistryCredentialsLifecycleHandler handler = + new DockerRegistryCredentialsLifecycleHandler(provider, dockerRegistryCloudProvider); + + // Check we start with no agents + assertThat(provider.getAgents()).isEmpty(); + + DockerRegistryNamedAccountCredentials dockerRegistryNamedAccountCredentials = + mock(DockerRegistryNamedAccountCredentials.class); + DockerRegistryCredentials dockerRegistryCredentials = mock(DockerRegistryCredentials.class); + + when(dockerRegistryNamedAccountCredentials.getName()).thenReturn("docker"); + when(dockerRegistryNamedAccountCredentials.getCredentials()) + .thenReturn(dockerRegistryCredentials); + when(dockerRegistryNamedAccountCredentials.getCacheThreads()).thenReturn(1); + when(dockerRegistryNamedAccountCredentials.getCacheIntervalSeconds()).thenReturn(10L); + when(dockerRegistryNamedAccountCredentials.getRegistry()).thenReturn("registry"); + + handler.credentialsAdded(dockerRegistryNamedAccountCredentials); + // We should have added an agent + assertThat(provider.getAgents()).hasSize(1); + DockerRegistryImageCachingAgent agent = + ((DockerRegistryImageCachingAgent) provider.getAgents().stream().findFirst().get()); + assertEquals(10000L, agent.getAgentInterval()); + + // updating a field + when(dockerRegistryNamedAccountCredentials.getCacheIntervalSeconds()).thenReturn(20L); + handler.credentialsUpdated(dockerRegistryNamedAccountCredentials); + // We should have only one + assertThat(provider.getAgents()).hasSize(1); + + agent = ((DockerRegistryImageCachingAgent) provider.getAgents().stream().findFirst().get()); + assertEquals(20000L, agent.getAgentInterval()); + } + + @Test + public void testRemoveCredentials() { + String ACCOUNT1 = "account1"; + String ACCOUNT2 = "account2"; + + DockerRegistryCloudProvider dockerRegistryCloudProvider = new DockerRegistryCloudProvider(); + DockerRegistryProvider provider = new DockerRegistryProvider(dockerRegistryCloudProvider); + DockerRegistryCredentialsLifecycleHandler handler = + new DockerRegistryCredentialsLifecycleHandler(provider, dockerRegistryCloudProvider); + + // Check we start with no agents + assertThat(provider.getAgents()).isEmpty(); + + DockerRegistryImageCachingAgent agent1 = mock(DockerRegistryImageCachingAgent.class); + when(agent1.handlesAccount(ACCOUNT1)).thenReturn(true); + + DockerRegistryImageCachingAgent agent2 = mock(DockerRegistryImageCachingAgent.class); + when(agent1.handlesAccount(ACCOUNT2)).thenReturn(true); + + provider.addAgents(List.of(agent1, agent2)); + assertThat(provider.getAgents()).hasSize(2); + + DockerRegistryNamedAccountCredentials cred1 = mock(DockerRegistryNamedAccountCredentials.class); + when(cred1.getName()).thenReturn(ACCOUNT1); + handler.credentialsDeleted(cred1); + + // We removed account1 so only agent2 should remain + assertThat(provider.getAgents()).hasSize(1); + assertThat(provider.getAgents()).contains(agent2); + } +} diff --git a/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryNamedAccountCredentialsTest.java b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryNamedAccountCredentialsTest.java new file mode 100644 index 00000000000..7fb04a70ff7 --- /dev/null +++ b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/DockerRegistryNamedAccountCredentialsTest.java @@ -0,0 +1,187 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.security; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.jakewharton.retrofit.Ok3Client; +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerOkClientProvider; +import com.netflix.spinnaker.clouddriver.docker.registry.api.v2.client.DockerRegistryTags; +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.time.format.DateTimeFormatter; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.junit.jupiter.api.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import retrofit.client.Request; +import retrofit.client.Response; +import retrofit.mime.TypedString; + +final class DockerRegistryNamedAccountCredentialsTest { + private static final ObjectMapper objectMapper = new ObjectMapper(); + private static final String ACCOUNT_NAME = "test-account"; + private static final String REPO_NAME = "myrepo"; + private static Instant LATEST_DATE = Instant.ofEpochSecond(1500000000L); + + @Test + void getTags() throws IOException { + ImmutableList tags = ImmutableList.of("latest", "other", "something"); + Ok3Client ok3Client = mockDockerOkClient(tags, ImmutableMap.of()); + DockerRegistryNamedAccountCredentials credentials = + new DockerRegistryNamedAccountCredentials.Builder() + .accountName(ACCOUNT_NAME) + .address("https://gcr.io") + .dockerOkClientProvider(new MockDockerOkClientProvider(ok3Client)) + .build(); + assertThat(credentials.getTags(REPO_NAME)).containsExactlyInAnyOrderElementsOf(tags); + } + + @Test + void getTagsInOrder() throws IOException { + ImmutableList tags = ImmutableList.of("older", "nodate", "oldest", "latest"); + ImmutableMap creationDates = + ImmutableMap.of( + "latest", + LATEST_DATE, + "older", + LATEST_DATE.minus(Duration.ofSeconds(1)), + "oldest", + LATEST_DATE.minus(Duration.ofDays(1))); + + Ok3Client ok3Client = mockDockerOkClient(tags, creationDates); + DockerRegistryNamedAccountCredentials credentials = + new DockerRegistryNamedAccountCredentials.Builder() + .accountName(ACCOUNT_NAME) + .address("https://gcr.io") + .sortTagsByDate(true) + .dockerOkClientProvider(new MockDockerOkClientProvider(ok3Client)) + .build(); + assertThat(credentials.getTags(REPO_NAME)) + .containsExactly("latest", "older", "oldest", "nodate"); + } + + /** + * Generates a mock Ok3Client that simulates responses from a docker registry with the supplied + * tags and supplied creation dates for each tag. Tags that are not present in the map of creation + * dates will return null as their creation date. + */ + private static Ok3Client mockDockerOkClient( + Iterable tags, Map creationDates) throws IOException { + Ok3Client ok3Client = mock(Ok3Client.class); + doReturn( + new Response( + "https://gcr.io/v2/myrepo/tags/list", + 200, + "", + Collections.emptyList(), + new TypedString(objectMapper.writeValueAsString(getTagsResponse(tags))))) + .when(ok3Client) + .execute(argThat(r -> r.getUrl().equals("https://gcr.io/v2/myrepo/tags/list"))); + + doAnswer( + new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + Request request = (Request) args[0]; + String tag = getTag(request.getUrl()); + Instant optionalDate = creationDates.get(tag); + return new Response( + "https://gcr.io/v2/myrepo/manifests/latest", + 200, + "", + Collections.emptyList(), + new TypedString( + objectMapper.writeValueAsString( + DockerManifestResponse.withCreationDate(optionalDate)))); + } + + private String getTag(String url) { + Matcher matcher = + Pattern.compile("https://gcr.io/v2/myrepo/manifests/(.*)").matcher(url); + if (matcher.matches()) { + return matcher.group(1); + } + throw new IllegalArgumentException(); + } + }) + .when(ok3Client) + .execute(argThat(r -> r.getUrl().matches("https://gcr.io/v2/myrepo/manifests/.*"))); + + return ok3Client; + } + + private static DockerRegistryTags getTagsResponse(Iterable tags) { + DockerRegistryTags tagsResponse = new DockerRegistryTags(); + tagsResponse.setName(REPO_NAME); + tagsResponse.setTags(ImmutableList.copyOf(tags)); + return tagsResponse; + } + + /** + * Helper class for generating the response from a call to the /manifests docker endpoint. At this + * point, the only field we look at is the created timestamp, so we only send this part of the + * response. + */ + @Getter + @RequiredArgsConstructor + private static class DockerManifestResponse { + private final ImmutableList history; + + static DockerManifestResponse withCreationDate(Instant instant) throws IOException { + return new DockerManifestResponse(ImmutableList.of(HistoryEntry.withCreationDate(instant))); + } + + @Getter + @RequiredArgsConstructor + private static class HistoryEntry { + private final String v1Compatibility; + private static DateTimeFormatter formatter = DateTimeFormatter.ISO_INSTANT; + + static HistoryEntry withCreationDate(Instant instant) throws IOException { + Map entries = new HashMap<>(); + entries.put("created", formatter.format(instant)); + return new HistoryEntry(objectMapper.writeValueAsString(entries)); + } + } + } + + @RequiredArgsConstructor + private static class MockDockerOkClientProvider implements DockerOkClientProvider { + private final Ok3Client mockClient; + + @Override + public Ok3Client provide(String address, long timeoutMs, boolean insecure) { + return mockClient; + } + } +} diff --git a/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/KeyBasedSorterTest.java b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/KeyBasedSorterTest.java new file mode 100644 index 00000000000..731b0f9c0dd --- /dev/null +++ b/clouddriver-docker/src/test/java/com/netflix/spinnaker/clouddriver/docker/registry/security/KeyBasedSorterTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.docker.registry.security; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import java.util.Comparator; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.IntStream; +import lombok.AccessLevel; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.ToString; +import org.junit.jupiter.api.Test; + +final class KeyBasedSorterTest { + @Test + void naturalOrderSort() { + ImmutableList listToSort = IntegerWrapper.from(2, 0, 7, -100, 27, -38, -2, -3); + assertThat(KeyBasedSorter.sort(listToSort, IntegerWrapper::getValue, Comparator.naturalOrder())) + .containsExactlyElementsOf(IntegerWrapper.from(-100, -38, -3, -2, 0, 2, 7, 27)); + } + + @Test + void reverseOrderSort() { + ImmutableList listToSort = IntegerWrapper.from(2, 0, 7, -100, 27, -38, -2, -3); + assertThat(KeyBasedSorter.sort(listToSort, IntegerWrapper::getValue, Comparator.reverseOrder())) + .containsExactlyElementsOf(IntegerWrapper.from(27, 7, 2, 0, -2, -3, -38, -100)); + } + + @Test + void callsKeyFunctionOnce() { + ImmutableList listToSort = IntegerWrapper.from(2, 0, 7, -100, 27, -38, -2, -3); + + // Check that no exceptions are thrown by trying to look up the sort key more than once for + // a given element. + KeyBasedSorter.sort(listToSort, IntegerWrapper::getSortKey, Comparator.naturalOrder()); + } + + /** + * Test class that wraps a simple integer used to test sorting. The integer can either be accessed + * by getInteger or by getSortKey, with the difference being that getSortKey will throw an + * IllegalStageException when called more than once on the same instance, which is useful for + * validating that we only extract the sort key once per element per sort. + */ + @EqualsAndHashCode + @ToString + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + private static class IntegerWrapper { + @EqualsAndHashCode.Exclude private AtomicBoolean sortKeyCalled = new AtomicBoolean(false); + + @Getter private final int value; + + static ImmutableList from(int... values) { + return IntStream.of(values).mapToObj(IntegerWrapper::new).collect(toImmutableList()); + } + + int getSortKey() { + if (sortKeyCalled.getAndSet(true)) { + throw new IllegalStateException("Sort key can only be called once!"); + } + return value; + } + } +} diff --git a/clouddriver-ecs/README.md b/clouddriver-ecs/README.md index 5ec611d3cb4..d0acbe6cf49 100644 --- a/clouddriver-ecs/README.md +++ b/clouddriver-ecs/README.md @@ -1,12 +1,10 @@ -## AWS ECS Clouddriver +## Amazon ECS Clouddriver -The clouddriver-ecs module allows for ECS deployments of dockerized applications. **You need to enable the AWS cloud provider in order for the ECS cloud provider to work**. - -It is a work in progress +The clouddriver-ecs module allows for Amazon ECS deployments of dockerized applications. **You need to enable the AWS cloud provider in order for the ECS cloud provider to work**. ## Clouddriver configuration -In order for the ECS cloud provider to work, a corresponding AWS account must be configured and enabled. An ECS account will be tied to a given AWS account by its name. Below is an example snippet of `clouddriver.yml`: +In order for the Amazon ECS cloud provider to work, a corresponding AWS account must be configured and enabled. An ECS account will be tied to a given AWS account by its name. Below is an example snippet of `clouddriver.yml`: ``` aws: @@ -27,29 +25,13 @@ ecs: ``` +## Spinnaker role +In Spinnaker 1.19 and later, the Amazon ECS cloud provider requires [service-linked roles](https://docs.aws.amazon.com/AmazonECS/latest/userguide/using-service-linked-roles.html) for Amazon ECS and Application Auto Scaling. To create the required service-linked roles, run the following `aws-cli` commands: -## Spinnaker role -Make sure that you allow the `application-autoscaling.amazonaws.com` and `ecs.amazonaws.com` principals to assume the SpinnakerManaged role by adding it as a principal. See example code below. Failure to do so will prevent you from deploying ECS server groups: ``` -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": [ - "ecs.amazonaws.com", - "application-autoscaling.amazonaws.com" - ], - }, - "Action": "sts:AssumeRole" - } - ] -} +aws iam create-service-linked-role --aws-service-name ecs.amazonaws.com +aws iam create-service-linked-role --aws-service-name ecs.application-autoscaling.amazonaws.com ``` -## - -TODO Wishlist: -1. Perhaps clouddriver should try to add the 2 required trust relationships on startup if they are detected as not being present +See the official Spinnaker [Amazon ECS provider setup docs](https://spinnaker.io/setup/install/providers/aws/aws-ecs/#service-linked-iam-roles) for more information. diff --git a/clouddriver-ecs/clouddriver-ecs.gradle b/clouddriver-ecs/clouddriver-ecs.gradle index e86258330f3..d3a151080f2 100644 --- a/clouddriver-ecs/clouddriver-ecs.gradle +++ b/clouddriver-ecs/clouddriver-ecs.gradle @@ -1,7 +1,74 @@ +sourceSets { + integration { + java.srcDirs = ["src/integration/java"] + resources.srcDirs = ["src/integration/resources"] + compileClasspath += main.output + test.output + } +} + +configurations { + integrationImplementation.extendsFrom testImplementation + integrationRuntime.extendsFrom testRuntime +} + dependencies { - compile project(":clouddriver-aws") - compile spinnaker.dependency('lombok') + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-artifacts") + implementation project(":clouddriver-aws") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + + implementation "com.amazonaws:aws-java-sdk" + implementation "com.github.ben-manes.caffeine:guava" + implementation "com.netflix.awsobjectmapper:awsobjectmapper" + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-credentials-api" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-moniker" + implementation "com.squareup.retrofit:converter-jackson" + implementation "com.squareup.retrofit:retrofit" + implementation "commons-io:commons-io" + implementation "org.apache.commons:commons-lang3" + implementation "org.apache.httpcomponents:httpclient" + implementation "org.apache.httpcomponents:httpcore" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-starter-web" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.mockito:mockito-core" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework.boot:spring-boot-starter-test" + + integrationImplementation project(":clouddriver-web") + integrationImplementation "org.springframework:spring-test" + integrationImplementation "org.testcontainers:mysql" + integrationImplementation "com.mysql:mysql-connector-j" + integrationImplementation sourceSets.test.output + integrationImplementation sourceSets.main.output + integrationImplementation "io.rest-assured:rest-assured" +} + +task integrationTest(type: Test) { + description = "Runs Amazon ECS provider integration tests." + group = 'verification' + + environment "PROJECT_ROOT", project.rootDir.toString() + useJUnitPlatform() + + testClassesDirs = sourceSets.integration.output.classesDirs + classpath = sourceSets.integration.runtimeClasspath + shouldRunAfter test + +// maxParallelForks = 4 - spinnaker.group('amazon') - spinnaker.group('retrofitDefault') + minHeapSize = "512m" + maxHeapSize = "${testJvmMaxMemory}" } diff --git a/clouddriver-ecs/src/integration/README.md b/clouddriver-ecs/src/integration/README.md new file mode 100644 index 00000000000..17bb634a320 --- /dev/null +++ b/clouddriver-ecs/src/integration/README.md @@ -0,0 +1,29 @@ +# Amazon ECS Integration tests + +Tests which exercise Amazon ECS controllers and atomic operations against a running `clouddriver` application. + +## Running the tests + +To manually run the gradle task for these tests: +```bash +$> ./gradlew :clouddriver-ecs:integrationTest +``` + +## Guidance for modifying this package + +### When to add a new test + +New Amazon ECS provider features of significant scope should include an integration test which exercises new functionality. +Examples of qualifying changes include (but are not limited to): + +* Implementing a new atomic operation +* New forking logic in how `CreateServerGroup` functions or is validated (especially re: broadly impactful settings like `networkMode`, launch type, load balancing, or application autoscaling) +* Adding a new controller for a new type of resource + +### Changing existing tests + +In general, existing test cases should function as-is after new contributions to ensure existing features continue to function as expected. +Possible exceptions to this guidance may include: + +* Updates to internal implementation details (required `@Beans`, etc.) that don't effect operation success or API response content +* Adding and asserting on *new* data in a `clouddriver` API response diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/EcsSpec.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/EcsSpec.java new file mode 100644 index 00000000000..ecebaf0000c --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/EcsSpec.java @@ -0,0 +1,122 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.Main; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import java.util.function.BooleanSupplier; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.mock.mockito.MockBean; +import org.springframework.boot.web.server.LocalServerPort; +import org.springframework.context.annotation.Import; +import org.springframework.test.context.TestPropertySource; + +@Import(EcsTestConfiguration.class) +@SpringBootTest( + classes = {Main.class}, + webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) +@TestPropertySource(properties = {"spring.config.location = classpath:clouddriver.yml"}) +public class EcsSpec { + protected static final String TEST_OPERATIONS_LOCATION = + "src/integration/resources/testoperations"; + protected static final String TEST_ARTIFACTS_LOCATION = "src/integration/resources/testartifacts"; + + @Value("${ecs.primaryAccount}") + protected String ACTUAL_ECS_ACCOUNT_NAME; + + protected static final String ECS_ACCOUNT_NAME = "ecs-account"; + + @Value("${ecs.accounts[1].name}") + protected String ACTUAL_ECS_MONIKER_ACCOUNT_NAME; + + protected static final String ECS_MONIKER_ACCOUNT_NAME = "ecs-moniker-account"; + + protected final String TEST_REGION = "us-west-2"; + protected final int TASK_RETRY_SECONDS = 3; + protected static final String CREATE_SG_TEST_PATH = "/ecs/ops/createServerGroup"; + + @Value("${ecs.enabled}") + Boolean ecsEnabled; + + @Value("${aws.enabled}") + Boolean awsEnabled; + + @LocalServerPort private int port; + + @MockBean protected AmazonClientProvider mockAwsProvider; + + @DisplayName(".\n===\n" + "Assert AWS and ECS providers are enabled" + "\n===") + @Test + public void configTest() { + assertTrue(awsEnabled); + assertTrue(ecsEnabled); + assertEquals(ECS_ACCOUNT_NAME, ACTUAL_ECS_ACCOUNT_NAME); + assertEquals(ECS_MONIKER_ACCOUNT_NAME, ACTUAL_ECS_MONIKER_ACCOUNT_NAME); + } + + protected String generateStringFromTestFile(String path) throws IOException { + return new String(Files.readAllBytes(Paths.get(TEST_OPERATIONS_LOCATION, path))); + } + + protected String generateStringFromTestArtifactFile(String path) throws IOException { + return new String(Files.readAllBytes(Paths.get(TEST_ARTIFACTS_LOCATION, path))); + } + + protected String getTestUrl(String path) { + return "http://localhost:" + port + path; + } + + protected DefaultCacheResult buildCacheResult( + Map attributes, String namespace, String key) { + Collection dataPoints = new LinkedList<>(); + dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); + + Map> dataMap = new HashMap<>(); + dataMap.put(namespace, dataPoints); + + return new DefaultCacheResult(dataMap); + } + + protected void retryUntilTrue(BooleanSupplier func, String failMsg, int retrySeconds) + throws InterruptedException { + for (int i = 0; i < retrySeconds; i++) { + if (!func.getAsBoolean()) { + Thread.sleep(1000); + } else { + return; + } + } + fail(failMsg); + } +} diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/EcsTestConfiguration.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/EcsTestConfiguration.java new file mode 100644 index 00000000000..a780ae11ff6 --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/EcsTestConfiguration.java @@ -0,0 +1,84 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.*; + +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixAssumeRoleEcsCredentials; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.CredentialsParser; +import org.mockito.stubbing.Answer; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Primary; +import org.springframework.test.context.TestPropertySource; + +@TestConfiguration +@TestPropertySource(properties = {"spring.config.location = classpath:clouddriver.yml"}) +public class EcsTestConfiguration { + + @Value("${ecs.primaryAccount}") + protected String ECS_ACCOUNT_NAME; + + @Value("${ecs.accounts[1].name}") + protected String ECS_MONIKER_ACCOUNT_NAME; + + @Value("${aws.primaryAccount}") + protected String AWS_ACCOUNT_NAME; + + @Bean + @Primary + public CompositeCredentialsRepository compositeCredentialsRepository() { + NetflixAmazonCredentials awsCreds = TestCredential.named(AWS_ACCOUNT_NAME); + NetflixECSCredentials ecsCreds = + new NetflixAssumeRoleEcsCredentials( + TestCredential.assumeRoleNamed(ECS_ACCOUNT_NAME), AWS_ACCOUNT_NAME); + NetflixECSCredentials ecsMonikerCreds = + new NetflixAssumeRoleEcsCredentials( + TestCredential.assumeRoleNamed(ECS_MONIKER_ACCOUNT_NAME), AWS_ACCOUNT_NAME); + CompositeCredentialsRepository repo = + mock(CompositeCredentialsRepository.class); + when(repo.getCredentials(eq(AWS_ACCOUNT_NAME), eq("aws"))).thenReturn(awsCreds); + when(repo.getCredentials(eq(ECS_ACCOUNT_NAME), eq("ecs"))).thenReturn(ecsCreds); + when(repo.getCredentials(eq(ECS_MONIKER_ACCOUNT_NAME), eq("ecs"))).thenReturn(ecsMonikerCreds); + when(repo.getFirstCredentialsWithName(AWS_ACCOUNT_NAME)).thenReturn(awsCreds); + when(repo.getFirstCredentialsWithName(ECS_ACCOUNT_NAME)).thenReturn(ecsCreds); + when(repo.getFirstCredentialsWithName(ECS_MONIKER_ACCOUNT_NAME)).thenReturn(ecsMonikerCreds); + return repo; + } + + @Bean("amazonCredentialsParser") + @Primary + public CredentialsParser amazonCredentialsParser() { + CredentialsParser parser = mock(CredentialsParser.class, withSettings().verboseLogging()); + when(parser.parse(any())) + .thenAnswer( + (Answer) + invocation -> { + AccountsConfiguration.Account account = + invocation.getArgument(0, AccountsConfiguration.Account.class); + return TestCredential.assumeRoleNamed(account.getName()); + }); + return parser; + } +} diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupForExistingServiceSpec.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupForExistingServiceSpec.java new file mode 100644 index 00000000000..ac8affb0202 --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupForExistingServiceSpec.java @@ -0,0 +1,206 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.test; + +import static io.restassured.RestAssured.get; +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScalingClient; +import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsRequest; +import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsResult; +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.*; +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.EcsSpec; +import io.restassured.http.ContentType; +import java.io.IOException; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; + +public class CreateServerGroupForExistingServiceSpec extends EcsSpec { + + private AWSApplicationAutoScalingClient mockAWSApplicationAutoScalingClient = + mock(AWSApplicationAutoScalingClient.class); + + private AmazonECS mockECS = mock(AmazonECS.class); + + private AmazonElasticLoadBalancing mockELB = mock(AmazonElasticLoadBalancing.class); + + @BeforeEach + public void setup() { + + // mocking calls + when(mockECS.listAccountSettings(any(ListAccountSettingsRequest.class))) + .thenReturn(new ListAccountSettingsResult()); + when(mockECS.describeServices(any(DescribeServicesRequest.class))) + .thenReturn( + new DescribeServicesResult() + .withServices( + Collections.singletonList( + new Service() + .withServiceName("ecs-integInputEC2TgMappingsExistingServiceStack-v000") + .withCreatedAt(new Date()) + .withStatus("INACTIVE")))); + + when(mockECS.createService(any(CreateServiceRequest.class))) + .thenReturn(new CreateServiceResult().withService(new Service())); + + when(mockECS.registerTaskDefinition(any(RegisterTaskDefinitionRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + RegisterTaskDefinitionRequest request = + (RegisterTaskDefinitionRequest) invocation.getArguments()[0]; + String testArn = "arn:aws:ecs:::task-definition/" + request.getFamily() + ":2"; + TaskDefinition taskDef = new TaskDefinition().withTaskDefinitionArn(testArn); + return new RegisterTaskDefinitionResult().withTaskDefinition(taskDef); + }); + + when(mockECS.listServices(any(ListServicesRequest.class))) + .thenReturn( + new ListServicesResult() + .withServiceArns( + Collections.singletonList( + "arn:aws:ecs:ecs-integInputEC2TgMappingsExistingServiceStack-v000"))); + + when(mockAWSApplicationAutoScalingClient.describeScalableTargets( + any(DescribeScalableTargetsRequest.class))) + .thenReturn(new DescribeScalableTargetsResult()); + + // mock ELB responses + when(mockELB.describeTargetGroups(any(DescribeTargetGroupsRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + DescribeTargetGroupsRequest request = + (DescribeTargetGroupsRequest) invocation.getArguments()[0]; + String testArn = + "arn:aws:elasticloadbalancing:::targetgroup/" + + request.getNames().get(0) + + "/76tgredfc"; + TargetGroup testTg = new TargetGroup().withTargetGroupArn(testArn); + + return new DescribeTargetGroupsResult().withTargetGroups(testTg); + }); + + when(mockAwsProvider.getAmazonEcs( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockECS); + + when(mockAwsProvider.getAmazonElasticLoadBalancingV2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockELB); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def input, EC2 launch type, and new target group " + + "fields with the existing service, successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_inputsEC2TgMappingsExistingServiceTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile( + "/createServerGroup-input-EC2-targetGroupMappings-existingService.json"); + String expectedServerGroupName = "ecs-integInputEC2TgMappingsExistingServiceStack-v001"; + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily() + "-v001"); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + assertEquals("v001", seenTaskDefRequest.getContainerDefinitions().get(0).getName()); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB).describeTargetGroups(elbArgCaptor.capture()); + DescribeTargetGroupsRequest seenTargetGroupRequest = elbArgCaptor.getValue(); + + assertTrue( + seenTargetGroupRequest + .getNames() + .contains("integInputEC2TgMappingsExistingService-targetGroup")); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("EC2", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName, seenCreateServRequest.getServiceName()); + assertEquals(1, seenCreateServRequest.getLoadBalancers().size()); + LoadBalancer serviceLB = seenCreateServRequest.getLoadBalancers().get(0); + assertEquals("v001", serviceLB.getContainerName()); + assertEquals(80, serviceLB.getContainerPort().intValue()); + assertEquals( + "integInputEC2TgMappingsExistingService-cluster", seenCreateServRequest.getCluster()); + assertEquals( + "arn:aws:elasticloadbalancing:::targetgroup/integInputEC2TgMappingsExistingService-targetGroup/76tgredfc", + serviceLB.getTargetGroupArn()); + } +} diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupSpec.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupSpec.java new file mode 100644 index 00000000000..1e03bf18ffd --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupSpec.java @@ -0,0 +1,503 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.test; + +import static io.restassured.RestAssured.get; +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.*; +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.EcsSpec; +import io.restassured.http.ContentType; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; + +public class CreateServerGroupSpec extends EcsSpec { + + private AmazonECS mockECS = mock(AmazonECS.class); + private AmazonElasticLoadBalancing mockELB = mock(AmazonElasticLoadBalancing.class); + + @BeforeEach + public void setup() { + // mock ECS responses + when(mockECS.listAccountSettings(any(ListAccountSettingsRequest.class))) + .thenReturn(new ListAccountSettingsResult()); + when(mockECS.listServices(any(ListServicesRequest.class))).thenReturn(new ListServicesResult()); + when(mockECS.describeServices(any(DescribeServicesRequest.class))) + .thenReturn(new DescribeServicesResult()); + when(mockECS.registerTaskDefinition(any(RegisterTaskDefinitionRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + RegisterTaskDefinitionRequest request = + (RegisterTaskDefinitionRequest) invocation.getArguments()[0]; + String testArn = "arn:aws:ecs:::task-definition/" + request.getFamily() + ":1"; + TaskDefinition taskDef = new TaskDefinition().withTaskDefinitionArn(testArn); + return new RegisterTaskDefinitionResult().withTaskDefinition(taskDef); + }); + when(mockECS.createService(any(CreateServiceRequest.class))) + .thenReturn( + new CreateServiceResult().withService(new Service().withServiceName("createdService"))); + + when(mockAwsProvider.getAmazonEcs( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockECS); + + // mock ELB responses + when(mockELB.describeTargetGroups(any(DescribeTargetGroupsRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + DescribeTargetGroupsRequest request = + (DescribeTargetGroupsRequest) invocation.getArguments()[0]; + String testArn = + "arn:aws:elasticloadbalancing:::targetgroup/" + + request.getNames().get(0) + + "/76tgredfc"; + TargetGroup testTg = new TargetGroup().withTargetGroupArn(testArn); + + return new DescribeTargetGroupsResult().withTargetGroups(testTg); + }); + + when(mockAwsProvider.getAmazonElasticLoadBalancingV2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockELB); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ inputs, EC2 launch type, and legacy target group fields, " + + "successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_InputsEc2LegacyTargetGroupTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = generateStringFromTestFile("/createServerGroup-inputs-ec2.json"); + String expectedServerGroupName = "ecs-integInputsEc2LegacyTargetGroup"; + // when + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s-v000", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + // then + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily()); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB).describeTargetGroups(elbArgCaptor.capture()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("EC2", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName + "-v000", seenCreateServRequest.getServiceName()); + assertEquals(1, seenCreateServRequest.getLoadBalancers().size()); + LoadBalancer serviceLB = seenCreateServRequest.getLoadBalancers().get(0); + assertEquals("v000", serviceLB.getContainerName()); + assertEquals(80, serviceLB.getContainerPort().intValue()); + assertEquals("integInputsEc2LegacyTargetGroup-cluster", seenCreateServRequest.getCluster()); + assertEquals(0, seenCreateServRequest.getTags().size()); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def inputs, FARGATE launch type, and legacy target group fields, " + + "successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_InputsFargateLegacyTargetGroupTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile( + "/createServerGroupOperation-inputs-fargate-legacyTargetGroup.json"); + String expectedServerGroupName = "ecs-integInputsFargateLegacyTargetGroup"; + // when + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s-v000", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + // then + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily()); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + assertEquals("aws-vpc", seenTaskDefRequest.getNetworkMode()); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB).describeTargetGroups(elbArgCaptor.capture()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("FARGATE", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName + "-v000", seenCreateServRequest.getServiceName()); + assertEquals(1, seenCreateServRequest.getLoadBalancers().size()); + LoadBalancer serviceLB = seenCreateServRequest.getLoadBalancers().get(0); + assertEquals("v000", serviceLB.getContainerName()); + assertEquals(80, serviceLB.getContainerPort().intValue()); + assertEquals("integInputsFargateLegacyTargetGroup-cluster", seenCreateServRequest.getCluster()); + assertEquals(0, seenCreateServRequest.getTags().size()); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def inputs, FARGATE launch type, and new target group fields, " + + "successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_InputsFargateTgMappingsTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile( + "/createServerGroupOperation-inputs-fargate-targetGroupMappings.json"); + String expectedServerGroupName = "ecs-integInputsFargateTgMappings"; + // when + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s-v000", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + // then + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily()); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + assertEquals("aws-vpc", seenTaskDefRequest.getNetworkMode()); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB).describeTargetGroups(elbArgCaptor.capture()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals(expectedServerGroupName + "-v000", seenCreateServRequest.getServiceName()); + assertEquals(1, seenCreateServRequest.getLoadBalancers().size()); + assertEquals("FARGATE", seenCreateServRequest.getLaunchType()); + // assert network stuff is set + LoadBalancer serviceLB = seenCreateServRequest.getLoadBalancers().get(0); + assertEquals("main", serviceLB.getContainerName()); + assertEquals(80, serviceLB.getContainerPort().intValue()); + assertEquals("integInputsFargateTgMappings-cluster", seenCreateServRequest.getCluster()); + assertEquals(0, seenCreateServRequest.getTags().size()); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def inputs," + + "task should fail if ECS service creation fails" + + "\n===") + @Test + public void createServerGroup_errorIfCreateServiceFails() + throws IOException, InterruptedException { + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile("/createServerGroup-inputs-ecsCreateFails.json"); + // when + Mockito.doThrow(new InvalidParameterException("Something is wrong.")) + .when(mockECS) + .createService(any(CreateServiceRequest.class)); + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + retryUntilTrue( + () -> { + HashMap status = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("status"); + + return status.get("failed").equals(true); + }, + String.format("Failed to observe task failure after %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ inputs, EC2 launch type " + + "with no load balancing successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_InputsEc2WithoutLoadBalacingTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile("/createServerGroup-inputs-ec2-withoutLoadBalacing.json"); + String expectedServerGroupName = "ecs-integInputsEc2NoLoadBalancing"; + + // when + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s-v000", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + // then + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily()); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("EC2", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName + "-v000", seenCreateServRequest.getServiceName()); + assertEquals(0, seenCreateServRequest.getTags().size()); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ inputs, EC2 launch type" + + "and service discovery registry fields, " + + "successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_InputsEc2ServiceDiscoveryTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile("/createServerGroup-inputs-ec2-serviceDiscovery.json"); + String expectedServerGroupName = "ecs-integInputsEc2WithServiceDiscovery"; + + // when + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s-v000", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + // then + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily()); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("EC2", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName + "-v000", seenCreateServRequest.getServiceName()); + assertEquals(80, seenCreateServRequest.getServiceRegistries().get(0).getContainerPort()); + assertEquals( + "arn:aws:servicediscovery:us-west-2:910995322324:service/srv-ckeydmrhzmqh6yfz", + seenCreateServRequest.getServiceRegistries().get(0).getRegistryArn()); + assertEquals( + true, + seenCreateServRequest.getServiceRegistries().get(0).getContainerName().contains("v000")); + assertEquals(0, seenCreateServRequest.getTags().size()); + } +} diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithAppAutoScalingSpec.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithAppAutoScalingSpec.java new file mode 100644 index 00000000000..73bda8dddd6 --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithAppAutoScalingSpec.java @@ -0,0 +1,275 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.test; + +import static io.restassured.RestAssured.get; +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScalingClient; +import com.amazonaws.services.applicationautoscaling.model.*; +import com.amazonaws.services.cloudwatch.AmazonCloudWatch; +import com.amazonaws.services.cloudwatch.model.*; +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.*; +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.EcsSpec; +import io.restassured.http.ContentType; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; + +public class CreateServerGroupWithAppAutoScalingSpec extends EcsSpec { + + private AmazonECS mockECS = mock(AmazonECS.class); + private AmazonElasticLoadBalancing mockELB = mock(AmazonElasticLoadBalancing.class); + private AWSApplicationAutoScalingClient mockAWSApplicationAutoScalingClient = + mock(AWSApplicationAutoScalingClient.class); + private AmazonCloudWatch mockAmazonCloudWatchClient = mock(AmazonCloudWatch.class); + + @BeforeEach + public void setup() { + // mock ECS responses + when(mockECS.listAccountSettings(any(ListAccountSettingsRequest.class))) + .thenReturn(new ListAccountSettingsResult()); + when(mockECS.listServices(any(ListServicesRequest.class))).thenReturn(new ListServicesResult()); + when(mockECS.describeServices(any(DescribeServicesRequest.class))) + .thenReturn(new DescribeServicesResult()); + when(mockECS.registerTaskDefinition(any(RegisterTaskDefinitionRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + RegisterTaskDefinitionRequest request = + (RegisterTaskDefinitionRequest) invocation.getArguments()[0]; + String testArn = "arn:aws:ecs:::task-definition/" + request.getFamily() + ":1"; + TaskDefinition taskDef = new TaskDefinition().withTaskDefinitionArn(testArn); + return new RegisterTaskDefinitionResult().withTaskDefinition(taskDef); + }); + when(mockECS.createService(any(CreateServiceRequest.class))) + .thenReturn( + new CreateServiceResult().withService(new Service().withServiceName("createdService"))); + + when(mockAwsProvider.getAmazonEcs( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockECS); + + // mock ELB responses + when(mockELB.describeTargetGroups(any(DescribeTargetGroupsRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + DescribeTargetGroupsRequest request = + (DescribeTargetGroupsRequest) invocation.getArguments()[0]; + String testArn = + "arn:aws:elasticloadbalancing:::targetgroup/" + + request.getNames().get(0) + + "/76tgredfc"; + TargetGroup testTg = new TargetGroup().withTargetGroupArn(testArn); + + return new DescribeTargetGroupsResult().withTargetGroups(testTg); + }); + + when(mockAWSApplicationAutoScalingClient.describeScalableTargets( + any(DescribeScalableTargetsRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + ScalableTarget scalableTarget = + new ScalableTarget() + .withMaxCapacity(1) + .withMinCapacity(1) + .withResourceId("service/default/sample-webapp"); + return new DescribeScalableTargetsResult().withScalableTargets(scalableTarget); + }); + + when(mockAWSApplicationAutoScalingClient.describeScalingPolicies( + any(DescribeScalingPoliciesRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + Alarm alarm = + new Alarm() + .withAlarmARN("arn:aws:cloudwatch:us-east-1:123456789012:alarm:testAlarm") + .withAlarmName("testAlarm"); + ScalingPolicy scalablePolicy = + new ScalingPolicy() + .withResourceId("service/default/sample-webapp") + .withPolicyName("ecsTestPolicy") + .withPolicyARN( + "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/web-app-cpu-gt-75") + .withAlarms(Arrays.asList(alarm)); + return new DescribeScalingPoliciesResult() + .withScalingPolicies(Arrays.asList(scalablePolicy)); + }); + + when(mockAWSApplicationAutoScalingClient.putScalingPolicy(any(PutScalingPolicyRequest.class))) + .thenReturn( + new PutScalingPolicyResult() + .withPolicyARN( + "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/web-app-cpu-gt-75")); + + when(mockAmazonCloudWatchClient.describeAlarms(any(DescribeAlarmsRequest.class))) + .thenReturn( + new DescribeAlarmsResult() + .withMetricAlarms(Arrays.asList(new MetricAlarm().withAlarmName("testAlarm")))); + + when(mockAwsProvider.getAmazonApplicationAutoScaling( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockAWSApplicationAutoScalingClient); + + when(mockAwsProvider.getAmazonCloudWatch( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockAmazonCloudWatchClient); + + when(mockAwsProvider.getAmazonElasticLoadBalancingV2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockELB); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ inputs, EC2 launch type, and target group mappings " + + "with Application Auto Scaling, successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_InputsEc2TargetGroupMappingsWithAppAutoScalingGroupTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile( + "/createServerGroup-inputs-ec2-targetGroupMappings-appAutoScalingGroup.json"); + String expectedServerGroupName = "ecs-integInputsEc2TargetGroupMappingsWithAppAutoScalingGroup"; + + // when + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s-v000", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + // then + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily()); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB).describeTargetGroups(elbArgCaptor.capture()); + DescribeTargetGroupsRequest seenDescribeTargetGroups = elbArgCaptor.getValue(); + + assertEquals( + "integInputsEc2TargetGroupMappingsWithAppAutoScalingGroup-targetGroup", + seenDescribeTargetGroups.getNames().get(0)); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("EC2", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName + "-v000", seenCreateServRequest.getServiceName()); + assertEquals(1, seenCreateServRequest.getLoadBalancers().size()); + assertEquals( + "integInputsEc2TargetGroupMappingsWithAppAutoScaling-cluster", + seenCreateServRequest.getCluster()); + LoadBalancer serviceLB = seenCreateServRequest.getLoadBalancers().get(0); + assertEquals("v000", serviceLB.getContainerName()); + assertEquals(80, serviceLB.getContainerPort().intValue()); + + ArgumentCaptor describeAlarmsRequestArgsCaptor = + ArgumentCaptor.forClass(DescribeAlarmsRequest.class); + verify(mockAmazonCloudWatchClient, atLeast(1)) + .describeAlarms(describeAlarmsRequestArgsCaptor.capture()); + + assertTrue( + describeAlarmsRequestArgsCaptor.getAllValues().stream() + .anyMatch(alarm -> alarm.getAlarmNames().contains("testAlarm"))); + + ArgumentCaptor describeScalingPoliciesRequestArgumentCaptor = + ArgumentCaptor.forClass(DescribeScalingPoliciesRequest.class); + verify(mockAWSApplicationAutoScalingClient) + .describeScalingPolicies(describeScalingPoliciesRequestArgumentCaptor.capture()); + DescribeScalingPoliciesRequest seenDescribePoliciesRequest = + describeScalingPoliciesRequestArgumentCaptor.getValue(); + + assertEquals("service/default/sample-webapp", seenDescribePoliciesRequest.getResourceId()); + + ArgumentCaptor describeScalableTargetsRequestArgumentCaptor = + ArgumentCaptor.forClass(DescribeScalableTargetsRequest.class); + verify(mockAWSApplicationAutoScalingClient, atLeast(1)) + .describeScalableTargets(describeScalableTargetsRequestArgumentCaptor.capture()); + + assertTrue( + describeScalableTargetsRequestArgumentCaptor.getAllValues().stream() + .anyMatch( + scalabletarget -> + ("ecs:service:DesiredCount").equals(scalabletarget.getScalableDimension()))); + + ArgumentCaptor putScalingPolicyRequestArgumentCaptor = + ArgumentCaptor.forClass(PutScalingPolicyRequest.class); + verify(mockAWSApplicationAutoScalingClient) + .putScalingPolicy(putScalingPolicyRequestArgumentCaptor.capture()); + PutScalingPolicyRequest seenPutScalingPolicyRequest = + putScalingPolicyRequestArgumentCaptor.getValue(); + assertEquals("createdServiceTestPolicy", seenPutScalingPolicyRequest.getPolicyName()); + assertEquals( + "service/integInputsEc2TargetGroupMappingsWithAppAutoScaling-cluster/createdService", + seenPutScalingPolicyRequest.getResourceId()); + } +} diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithArtifactsSpec.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithArtifactsSpec.java new file mode 100644 index 00000000000..a189d0f9b41 --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithArtifactsSpec.java @@ -0,0 +1,693 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.test; + +import static io.restassured.RestAssured.get; +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScalingClient; +import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsRequest; +import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsResult; +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.*; +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.EcsSpec; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import io.restassured.http.ContentType; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; +import org.springframework.boot.test.mock.mockito.MockBean; + +public class CreateServerGroupWithArtifactsSpec extends EcsSpec { + + @MockBean ArtifactDownloader mockArtifactDownloader; + + @MockBean ArtifactCredentialsRepository mockArtifactCredentialsRepository; + + private ArtifactCredentials mockArtifactCredentials = mock(ArtifactCredentials.class); + + private AWSApplicationAutoScalingClient mockAWSApplicationAutoScalingClient = + mock(AWSApplicationAutoScalingClient.class); + + private AmazonECS mockECS = mock(AmazonECS.class); + + private AmazonElasticLoadBalancing mockELB = mock(AmazonElasticLoadBalancing.class); + + @BeforeEach + public void setup() { + + // mocking calls + when(mockECS.listAccountSettings(any(ListAccountSettingsRequest.class))) + .thenReturn(new ListAccountSettingsResult()); + + when(mockECS.listServices(any(ListServicesRequest.class))).thenReturn(new ListServicesResult()); + + when(mockECS.describeServices(any(DescribeServicesRequest.class))) + .thenReturn(new DescribeServicesResult()); + + when(mockECS.createService(any(CreateServiceRequest.class))) + .thenReturn( + new CreateServiceResult().withService(new Service().withServiceName("createdService"))); + + when(mockECS.registerTaskDefinition(any(RegisterTaskDefinitionRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + RegisterTaskDefinitionRequest request = + (RegisterTaskDefinitionRequest) invocation.getArguments()[0]; + String testArn = "arn:aws:ecs:::task-definition/" + request.getFamily() + ":1"; + TaskDefinition taskDef = new TaskDefinition().withTaskDefinitionArn(testArn); + return new RegisterTaskDefinitionResult().withTaskDefinition(taskDef); + }); + + when(mockArtifactCredentialsRepository.getCredentials(anyString(), anyString())) + .thenReturn(mockArtifactCredentials); + + when(mockAWSApplicationAutoScalingClient.describeScalableTargets( + any(DescribeScalableTargetsRequest.class))) + .thenReturn(new DescribeScalableTargetsResult()); + + // mock ELB responses + when(mockELB.describeTargetGroups(any(DescribeTargetGroupsRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + DescribeTargetGroupsRequest request = + (DescribeTargetGroupsRequest) invocation.getArguments()[0]; + String testArn = + "arn:aws:elasticloadbalancing:::targetgroup/" + + request.getNames().get(0) + + "/76tgredfc"; + TargetGroup testTg = new TargetGroup().withTargetGroupArn(testArn); + + return new DescribeTargetGroupsResult().withTargetGroups(testTg); + }); + + when(mockAwsProvider.getAmazonEcs( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockECS); + + when(mockAwsProvider.getAmazonApplicationAutoScaling( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockAWSApplicationAutoScalingClient); + + when(mockAwsProvider.getAmazonElasticLoadBalancingV2( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockELB); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def artifacts, EC2 launch type, and new target group fields, " + + "successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_ArtifactsEC2TgMappingsTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile("/createServerGroup-artifact-EC2-targetGroupMappings.json"); + String expectedServerGroupName = "ecs-integArtifactsEC2TgMappingsStack-detailTest-v000"; + + ByteArrayInputStream byteArrayInputStreamOfArtifactsForEC2Type = + new ByteArrayInputStream( + generateStringFromTestArtifactFile( + "/createServerGroup-artifact-EC2-targetGroup-artifactFile.json") + .getBytes()); + + when(mockArtifactDownloader.download(any(Artifact.class))) + .thenReturn(byteArrayInputStreamOfArtifactsForEC2Type); + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily() + "-v000"); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + assertEquals( + "arn:aws:iam:::executionRole/testExecutionRole:1", + seenTaskDefRequest.getExecutionRoleArn()); + assertEquals("arn:aws:iam:::role/testTaskRole:1", seenTaskDefRequest.getTaskRoleArn()); + assertEquals("application", seenTaskDefRequest.getContainerDefinitions().get(0).getName()); + assertEquals( + "awslogs", + seenTaskDefRequest.getContainerDefinitions().get(0).getLogConfiguration().getLogDriver()); + assertEquals( + "spinnaker-ecs-demo", + seenTaskDefRequest + .getContainerDefinitions() + .get(0) + .getLogConfiguration() + .getOptions() + .get("awslogs-group")); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB).describeTargetGroups(elbArgCaptor.capture()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("EC2", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName, seenCreateServRequest.getServiceName()); + assertEquals(1, seenCreateServRequest.getLoadBalancers().size()); + LoadBalancer serviceLB = seenCreateServRequest.getLoadBalancers().get(0); + assertEquals("application", serviceLB.getContainerName()); + assertEquals(80, serviceLB.getContainerPort().intValue()); + assertEquals("integArtifactEC2TgMappings-cluster", seenCreateServRequest.getCluster()); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def artifacts, FARGATE launch type, and new target group fields, " + + "successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_ArtifactsFARGATETgMappingsTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile("/createServerGroup-artifact-FARGATE-targetGroupMappings.json"); + String expectedServerGroupName = "ecs-integArtifactsFargateTgMappingsStack-detailTest-v000"; + + ByteArrayInputStream byteArrayInputStreamOfArtifactsForFargateType = + new ByteArrayInputStream( + generateStringFromTestArtifactFile( + "/createServerGroup-artifact-Fargate-targetGroup-artifactFile.json") + .getBytes()); + + when(mockArtifactDownloader.download(any(Artifact.class))) + .thenReturn(byteArrayInputStreamOfArtifactsForFargateType); + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily() + "-v000"); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + assertEquals( + "arn:aws:iam:::executionRole/testExecutionRole:1", + seenTaskDefRequest.getExecutionRoleArn()); + assertEquals("arn:aws:iam:::role/testTaskRole:1", seenTaskDefRequest.getTaskRoleArn()); + assertEquals("application", seenTaskDefRequest.getContainerDefinitions().get(0).getName()); + assertEquals( + "awslogs", + seenTaskDefRequest.getContainerDefinitions().get(0).getLogConfiguration().getLogDriver()); + assertEquals( + "spinnaker-ecs-demo", + seenTaskDefRequest + .getContainerDefinitions() + .get(0) + .getLogConfiguration() + .getOptions() + .get("awslogs-group")); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB).describeTargetGroups(elbArgCaptor.capture()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals(0, seenCreateServRequest.getCapacityProviderStrategy().size()); + assertEquals("FARGATE", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName, seenCreateServRequest.getServiceName()); + assertEquals(1, seenCreateServRequest.getLoadBalancers().size()); + LoadBalancer serviceLB = seenCreateServRequest.getLoadBalancers().get(0); + assertEquals("application", serviceLB.getContainerName()); + assertEquals(80, serviceLB.getContainerPort().intValue()); + assertEquals("integArtifactsFargateTgMappings-cluster", seenCreateServRequest.getCluster()); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def artifacts and a FARGATE capacity provider strategy " + + "successfully submits a createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_ArtifactsFARGATECapacityProviderTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile( + "/createServerGroup-artifact-FARGATE-capacityProviderStrategy.json"); + String expectedServerGroupName = + "ecs-integArtifactsFargateCapacityProviderStrategyStack-detailTest-v000"; + + ByteArrayInputStream byteArrayInputStreamOfArtifactsForFargateType = + new ByteArrayInputStream( + generateStringFromTestArtifactFile( + "/createServerGroup-artifact-Fargate-targetGroup-artifactFile.json") + .getBytes()); + + when(mockArtifactDownloader.download(any(Artifact.class))) + .thenReturn(byteArrayInputStreamOfArtifactsForFargateType); + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily() + "-v000"); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + assertEquals( + "arn:aws:iam:::executionRole/testExecutionRole:1", + seenTaskDefRequest.getExecutionRoleArn()); + assertEquals("arn:aws:iam:::role/testTaskRole:1", seenTaskDefRequest.getTaskRoleArn()); + assertEquals("application", seenTaskDefRequest.getContainerDefinitions().get(0).getName()); + assertEquals( + "awslogs", + seenTaskDefRequest.getContainerDefinitions().get(0).getLogConfiguration().getLogDriver()); + assertEquals( + "spinnaker-ecs-demo", + seenTaskDefRequest + .getContainerDefinitions() + .get(0) + .getLogConfiguration() + .getOptions() + .get("awslogs-group")); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB).describeTargetGroups(elbArgCaptor.capture()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals(1, seenCreateServRequest.getCapacityProviderStrategy().size()); + assertEquals( + "FARGATE", + seenCreateServRequest.getCapacityProviderStrategy().get(0).getCapacityProvider()); + assertNull(seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName, seenCreateServRequest.getServiceName()); + assertEquals(1, seenCreateServRequest.getLoadBalancers().size()); + LoadBalancer serviceLB = seenCreateServRequest.getLoadBalancers().get(0); + assertEquals("application", serviceLB.getContainerName()); + assertEquals(80, serviceLB.getContainerPort().intValue()); + assertEquals( + "integArtifactsFargateCapacityProviderStrategy-cluster", + seenCreateServRequest.getCluster()); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def artifacts, EC2 launch type, and new target group fields " + + "without container definition, gives an exception(Provided task definition does not contain any container definitions). " + + "\n===") + @Test + public void createServerGroup_errorIfNoContainersTest() throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile("/createServerGroup-artifact-EC2-targetGroupMappings.json"); + + ByteArrayInputStream byteArrayInputStreamOfArtifactsForEC2Type = + new ByteArrayInputStream( + generateStringFromTestArtifactFile( + "createServerGroup-artifact-EC2-targetGroup-WithNoContainers-artifactFile.json") + .getBytes()); + + when(mockArtifactDownloader.download(any(Artifact.class))) + .thenReturn(byteArrayInputStreamOfArtifactsForEC2Type); + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + HashMap status = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("status"); + + return status.get("failed").equals(true); + }, + String.format("Failed to detect task failure, in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ task def artifacts, EC2 launch type and " + + "multiple load balancers successfully submit createServerGroup operation" + + "\n===") + @Test + public void createServerGroup_ArtifactsEC2WithMultipleLBsTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile( + "/createServerGroup-artifact-EC2-TGMappings-multipleLBsAndContainers.json"); + String expectedServerGroupName = + "ecs-integArtifactsEC2TgMappingsStackWithMultipleLBsAndContainers-detailTest-v000"; + + ByteArrayInputStream byteArrayInputStreamOfArtifactsForEC2Type = + new ByteArrayInputStream( + generateStringFromTestArtifactFile( + "/createServerGroup-artifact-EC2-TGMappings-multipleLBsAndContainers-artifactFile.json") + .getBytes()); + + when(mockArtifactDownloader.download(any(Artifact.class))) + .thenReturn(byteArrayInputStreamOfArtifactsForEC2Type); + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily() + "-v000"); + assertEquals( + "arn:aws:iam:::executionRole/testExecutionRole:1", + seenTaskDefRequest.getExecutionRoleArn()); + assertEquals("arn:aws:iam:::role/testTaskRole:1", seenTaskDefRequest.getTaskRoleArn()); + assertEquals(2, seenTaskDefRequest.getContainerDefinitions().size()); + ContainerDefinition container1 = + seenTaskDefRequest.getContainerDefinitions().stream() + .filter(container -> container.getName().equals("application1")) + .collect(Collectors.toList()) + .get(0); + ContainerDefinition container2 = + seenTaskDefRequest.getContainerDefinitions().stream() + .filter(container -> container.getName().equals("application2")) + .collect(Collectors.toList()) + .get(0); + assertEquals("application1", container1.getName()); + assertEquals("app1/image", container1.getImage()); + assertEquals("application2", container2.getName()); + assertEquals("app2/image", container2.getImage()); + assertEquals(80, container1.getPortMappings().get(0).getContainerPort()); + assertEquals(84, container2.getPortMappings().get(0).getContainerPort()); + assertEquals( + "spinnaker-ecs-demo", container1.getLogConfiguration().getOptions().get("awslogs-group")); + assertEquals("awslogs", container1.getLogConfiguration().getLogDriver()); + + ArgumentCaptor elbArgCaptor = + ArgumentCaptor.forClass(DescribeTargetGroupsRequest.class); + verify(mockELB, times(2)).describeTargetGroups(elbArgCaptor.capture()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("EC2", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName, seenCreateServRequest.getServiceName()); + assertEquals(2, seenCreateServRequest.getLoadBalancers().size()); + LoadBalancer serviceLB1 = + seenCreateServRequest.getLoadBalancers().stream() + .filter(lb -> lb.getContainerName().equals("application1")) + .collect(Collectors.toList()) + .get(0); + LoadBalancer serviceLB2 = + seenCreateServRequest.getLoadBalancers().stream() + .filter(lb -> lb.getContainerName().equals("application2")) + .collect(Collectors.toList()) + .get(0); + + assertEquals("application1", serviceLB1.getContainerName()); + assertEquals(80, serviceLB1.getContainerPort().intValue()); + assertEquals( + "arn:aws:elasticloadbalancing:::targetgroup/integArtifactEC2TgMappings-targetGroupForPort80/76tgredfc", + serviceLB1.getTargetGroupArn()); + assertEquals("application2", serviceLB2.getContainerName()); + assertEquals(84, serviceLB2.getContainerPort().intValue()); + assertEquals( + "arn:aws:elasticloadbalancing:::targetgroup/integArtifactEC2TgMappings-targetGroupForPort84/76tgredfc", + serviceLB2.getTargetGroupArn()); + assertEquals( + "integArtifactEC2TgMappingskWithMultipleLBsAndContainers-cluster", + seenCreateServRequest.getCluster()); + } + + @Test + public void createServerGroup_ProcessedArtifactsEC2TgMappingsTest() + throws IOException, InterruptedException { + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = + generateStringFromTestFile( + "/createServerGroup-spelProcessedArtifact-EC2-targetGroupMappings.json"); + String expectedServerGroupName = + "ecs-integSpELProcessedArtifactsEC2TgMappingsStack-detailTest-v000"; + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily() + "-v000"); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + assertEquals( + "arn:aws:iam:::executionRole/testExecutionRole:1", + seenTaskDefRequest.getExecutionRoleArn()); + assertEquals("application", seenTaskDefRequest.getContainerDefinitions().get(0).getName()); + assertEquals( + "awslogs", + seenTaskDefRequest.getContainerDefinitions().get(0).getLogConfiguration().getLogDriver()); + assertEquals( + "spinnaker-ecs-demo", + seenTaskDefRequest + .getContainerDefinitions() + .get(0) + .getLogConfiguration() + .getOptions() + .get("awslogs-group")); + + ContainerDefinition containerDefinition = + seenTaskDefRequest.getContainerDefinitions().stream() + .filter(container -> container.getName().equals("application")) + .collect(Collectors.toList()) + .get(0); + + assertEquals(80, containerDefinition.getPortMappings().get(0).getContainerPort()); + + assertEquals("tcp", containerDefinition.getPortMappings().get(0).getProtocol()); + + assertEquals(256, containerDefinition.getCpu()); + + assertEquals(512, containerDefinition.getMemoryReservation()); + + assertEquals("PLACEHOLDER", containerDefinition.getImage()); + + assertEquals("bridge", seenTaskDefRequest.getNetworkMode()); + assertEquals( + "ecs-integSpELProcessedArtifactsEC2TgMappingsStack-detailTest", + seenTaskDefRequest.getFamily()); + assertEquals("bridge", seenTaskDefRequest.getNetworkMode()); + } +} diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithMonikerSpec.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithMonikerSpec.java new file mode 100644 index 00000000000..36d664bef64 --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/CreateServerGroupWithMonikerSpec.java @@ -0,0 +1,199 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.test; + +import static io.restassured.RestAssured.get; +import static io.restassured.RestAssured.given; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.*; +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.EcsSpec; +import io.restassured.http.ContentType; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; + +public class CreateServerGroupWithMonikerSpec extends EcsSpec { + + private AmazonECS mockECS = mock(AmazonECS.class); + private AmazonElasticLoadBalancing mockELB = mock(AmazonElasticLoadBalancing.class); + + @BeforeEach + public void setup() { + when(mockECS.listServices(any(ListServicesRequest.class))).thenReturn(new ListServicesResult()); + when(mockECS.describeServices(any(DescribeServicesRequest.class))) + .thenReturn(new DescribeServicesResult()); + when(mockECS.registerTaskDefinition(any(RegisterTaskDefinitionRequest.class))) + .thenAnswer( + (Answer) + invocation -> { + RegisterTaskDefinitionRequest request = + (RegisterTaskDefinitionRequest) invocation.getArguments()[0]; + String testArn = "arn:aws:ecs:::task-definition/" + request.getFamily() + ":1"; + TaskDefinition taskDef = new TaskDefinition().withTaskDefinitionArn(testArn); + return new RegisterTaskDefinitionResult().withTaskDefinition(taskDef); + }); + when(mockECS.createService(any(CreateServiceRequest.class))) + .thenReturn( + new CreateServiceResult().withService(new Service().withServiceName("createdService"))); + + when(mockAwsProvider.getAmazonEcs( + any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockECS); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ inputs, EC2 launch type, and moniker enabled " + + "successfully submit createServerGroup operation with tags" + + "\n===") + @Test + public void createServerGroup_InputsEc2WithMoniker() throws IOException, InterruptedException { + // When account has tags enabled + when(mockECS.listAccountSettings(any(ListAccountSettingsRequest.class))) + .thenReturn( + new ListAccountSettingsResult() + .withSettings( + new Setting().withName(SettingName.ServiceLongArnFormat).withValue("enabled"), + new Setting().withName(SettingName.TaskLongArnFormat).withValue("enabled"))); + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = generateStringFromTestFile("/createServerGroup-inputs-ec2-moniker.json"); + String expectedServerGroupName = "ecs-integInputsMoniker-detailTest"; + + // when + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + retryUntilTrue( + () -> { + List taskHistory = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("history"); + if (taskHistory + .toString() + .contains(String.format("Done creating 1 of %s-v000", expectedServerGroupName))) { + return true; + } + return false; + }, + String.format("Failed to detect service creation in %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + + // then + ArgumentCaptor registerTaskDefArgs = + ArgumentCaptor.forClass(RegisterTaskDefinitionRequest.class); + verify(mockECS).registerTaskDefinition(registerTaskDefArgs.capture()); + RegisterTaskDefinitionRequest seenTaskDefRequest = registerTaskDefArgs.getValue(); + assertEquals(expectedServerGroupName, seenTaskDefRequest.getFamily()); + assertEquals(1, seenTaskDefRequest.getContainerDefinitions().size()); + + ArgumentCaptor createServiceArgs = + ArgumentCaptor.forClass(CreateServiceRequest.class); + verify(mockECS).createService(createServiceArgs.capture()); + CreateServiceRequest seenCreateServRequest = createServiceArgs.getValue(); + assertEquals("EC2", seenCreateServRequest.getLaunchType()); + assertEquals(expectedServerGroupName + "-v000", seenCreateServRequest.getServiceName()); + assertEquals(4, seenCreateServRequest.getTags().size()); + assertThat( + seenCreateServRequest.getTags(), + containsInAnyOrder( + new Tag().withKey("moniker.spinnaker.io/application").withValue("ecs"), + new Tag().withKey("moniker.spinnaker.io/stack").withValue("integInputsMoniker"), + new Tag().withKey("moniker.spinnaker.io/detail").withValue("detailTest"), + new Tag().withKey("moniker.spinnaker.io/sequence").withValue("0"))); + } + + @DisplayName( + ".\n===\n" + + "Given description w/ inputs, EC2 launch type, and moniker enabled " + + "task should fail if ECS account has tags disabled" + + "\n===") + @Test + public void createServerGroup_errorIfCreateServiceFails() + throws IOException, InterruptedException { + // When account has tags disabled + when(mockECS.listAccountSettings(any(ListAccountSettingsRequest.class))) + .thenReturn(new ListAccountSettingsResult()); + + // given + String url = getTestUrl(CREATE_SG_TEST_PATH); + String requestBody = generateStringFromTestFile("/createServerGroup-inputs-ec2-moniker.json"); + + // when + Mockito.doThrow(new InvalidParameterException("Something is wrong.")) + .when(mockECS) + .createService(any(CreateServiceRequest.class)); + + String taskId = + given() + .contentType(ContentType.JSON) + .body(requestBody) + .when() + .post(url) + .then() + .statusCode(200) + .contentType(ContentType.JSON) + .body("id", notNullValue()) + .body("resourceUri", containsString("/task/")) + .extract() + .path("id"); + + // then + retryUntilTrue( + () -> { + HashMap status = + get(getTestUrl("/task/" + taskId)) + .then() + .contentType(ContentType.JSON) + .extract() + .path("status"); + + return status.get("failed").equals(true); + }, + String.format("Failed to observe task failure after %s seconds", TASK_RETRY_SECONDS), + TASK_RETRY_SECONDS); + } +} diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/EcsControllersSpec.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/EcsControllersSpec.java new file mode 100644 index 00000000000..a97125c3532 --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/EcsControllersSpec.java @@ -0,0 +1,221 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.test; + +import static io.restassured.RestAssured.get; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.Cluster; +import com.amazonaws.services.ecs.model.DescribeClustersRequest; +import com.amazonaws.services.ecs.model.DescribeClustersResult; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderRegistry; +import com.netflix.spinnaker.clouddriver.ecs.EcsSpec; +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; +import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; +import io.restassured.http.ContentType; +import io.restassured.response.Response; +import java.util.*; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.springframework.beans.factory.annotation.Autowired; + +public class EcsControllersSpec extends EcsSpec { + + @Autowired private ProviderRegistry providerRegistry; + private AmazonECS mockECS = mock(AmazonECS.class); + + @DisplayName( + ".\n===\n" + + "Given cached ECS clusters (names), retrieve detailed description " + + "of the cluster from /ecs/ecsDescribeClusters/{account}/{region}" + + "\n===") + @Test + public void getAllEcsClusterDetailsTest() throws JsonProcessingException { + // given + ProviderCache ecsCache = providerRegistry.getProviderCache(EcsProvider.NAME); + String testClusterName = "example-app-test-Cluster-NSnYsTXmCfV2"; + String testNamespace = Keys.Namespace.ECS_CLUSTERS.ns; + + String clusterKey = Keys.getClusterKey(ECS_ACCOUNT_NAME, TEST_REGION, testClusterName); + Map attributes = new HashMap<>(); + attributes.put("account", ECS_ACCOUNT_NAME); + attributes.put("region", TEST_REGION); + attributes.put("clusterArn", "arn:aws:ecs:::cluster/" + testClusterName); + attributes.put("clusterName", testClusterName); + + DefaultCacheResult testResult = buildCacheResult(attributes, testNamespace, clusterKey); + ecsCache.addCacheResult("TestAgent", Collections.singletonList(testNamespace), testResult); + + when(mockAwsProvider.getAmazonEcs(any(NetflixECSCredentials.class), anyString(), anyBoolean())) + .thenReturn(mockECS); + + Cluster clusterDecription = + new Cluster() + .withClusterArn("arn:aws:ecs:::cluster/" + testClusterName) + .withStatus("ACTIVE") + .withCapacityProviders("FARGATE", "FARGATE_SPOT") + .withClusterName(testClusterName); + when(mockECS.describeClusters(any(DescribeClustersRequest.class))) + .thenReturn(new DescribeClustersResult().withClusters(clusterDecription)); + + // when + String testUrl = + getTestUrl("/ecs/ecsClusterDescriptions/" + ECS_ACCOUNT_NAME + "/" + TEST_REGION); + + Response response = + get(testUrl).then().statusCode(200).contentType(ContentType.JSON).extract().response(); + + ObjectMapper objectMapper = new ObjectMapper(); + objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Collection clusters = + Arrays.asList(objectMapper.readValue(response.asString(), Cluster[].class)); + // then + assertNotNull(clusters); + Cluster clusterDescription = + (clusters.stream().filter(cluster -> cluster.getClusterName().equals(testClusterName))) + .findAny() + .get(); + assertTrue(clusterDescription.getClusterArn().contains(testClusterName)); + assertEquals(2, clusterDescription.getCapacityProviders().size()); + assertEquals("ACTIVE", clusterDescription.getStatus()); + assertTrue(clusterDescription.getCapacityProviders().contains("FARGATE")); + assertTrue(clusterDescription.getCapacityProviders().contains("FARGATE_SPOT")); + } + + @DisplayName(".\n===\n" + "Given cached ECS cluster, retrieve it from /ecs/ecsClusters" + "\n===") + @ParameterizedTest + @ValueSource(strings = {ECS_ACCOUNT_NAME, ECS_MONIKER_ACCOUNT_NAME}) + public void getEcsClustersTest(String accountName) { + // given + ProviderCache ecsCache = providerRegistry.getProviderCache(EcsProvider.NAME); + String testClusterName = "integ-test-cluster"; + String testNamespace = Keys.Namespace.ECS_CLUSTERS.ns; + + String clusterKey = Keys.getClusterKey(accountName, TEST_REGION, testClusterName); + Map attributes = new HashMap<>(); + attributes.put("account", accountName); + attributes.put("region", TEST_REGION); + attributes.put("clusterArn", "arn:aws:ecs:::cluster/" + testClusterName); + attributes.put("clusterName", testClusterName); + + DefaultCacheResult testResult = buildCacheResult(attributes, testNamespace, clusterKey); + ecsCache.addCacheResult("TestAgent", Collections.singletonList(testNamespace), testResult); + + // when + String testUrl = getTestUrl("/ecs/ecsClusters"); + + Response response = + get(testUrl).then().statusCode(200).contentType(ContentType.JSON).extract().response(); + + // then + assertNotNull(response); + // TODO: serialize into expected return type to validate API contract hasn't changed + String responseStr = response.asString(); + assertTrue(responseStr.contains(testClusterName)); + assertTrue(responseStr.contains(accountName)); + assertTrue(responseStr.contains(TEST_REGION)); + } + + @DisplayName(".\n===\n" + "Given cached ECS secret, retrieve it from /ecs/secrets" + "\n===") + @ParameterizedTest + @ValueSource(strings = {ECS_ACCOUNT_NAME, ECS_MONIKER_ACCOUNT_NAME}) + public void getEcsSecretsTest(String accountName) { + // given + ProviderCache ecsCache = providerRegistry.getProviderCache(EcsProvider.NAME); + String testSecretName = "tut/secret"; + String testNamespace = Keys.Namespace.SECRETS.ns; + String testSecretArn = "arn:aws:secretsmanager:region:aws_account_id:secret:tut/sevret-jiObOV"; + + String secretKey = Keys.getClusterKey(accountName, TEST_REGION, testSecretName); + String url = getTestUrl("/ecs/secrets"); + Map attributes = new HashMap<>(); + attributes.put("account", accountName); + attributes.put("region", TEST_REGION); + attributes.put("secretName", testSecretName); + attributes.put("secretArn", testSecretArn); + + DefaultCacheResult testResult = buildCacheResult(attributes, testNamespace, secretKey); + ecsCache.addCacheResult("TestAgent", Collections.singletonList(testNamespace), testResult); + + // when + Response response = get(url).then().contentType(ContentType.JSON).extract().response(); + + // then + assertNotNull(response); + + String responseStr = response.asString(); + assertTrue(responseStr.contains(accountName)); + assertTrue(responseStr.contains(TEST_REGION)); + assertTrue(responseStr.contains(testSecretName)); + assertTrue(responseStr.contains(testSecretArn)); + } + + @DisplayName( + ".\n===\n" + + "Given cached service disc registry, retrieve it from /ecs/serviceDiscoveryRegistries" + + "\n===") + @ParameterizedTest + @ValueSource(strings = {ECS_ACCOUNT_NAME, ECS_MONIKER_ACCOUNT_NAME}) + public void getServiceDiscoveryRegistriesTest(String accountName) { + // given + ProviderCache ecsCache = providerRegistry.getProviderCache(EcsProvider.NAME); + String testRegistryId = "spinnaker-registry"; + String testNamespace = Keys.Namespace.SERVICE_DISCOVERY_REGISTRIES.ns; + String testSdServiceArn = + "arn:aws:servicediscovery:region:aws_account_id:service/srv-utcrh6wavdkggqtk"; + + String serviceDiscoveryRegistryKey = + Keys.getServiceDiscoveryRegistryKey(accountName, TEST_REGION, testRegistryId); + String url = getTestUrl("/ecs/serviceDiscoveryRegistries"); + Map attributes = new HashMap<>(); + attributes.put("account", accountName); + attributes.put("region", TEST_REGION); + attributes.put("serviceName", "spinnaker-demo"); + attributes.put("serviceId", "srv-v001"); + attributes.put("serviceArn", testSdServiceArn); + + DefaultCacheResult testResult = + buildCacheResult(attributes, testNamespace, serviceDiscoveryRegistryKey); + ecsCache.addCacheResult("TestAgent", Collections.singletonList(testNamespace), testResult); + + // when + Response response = get(url).then().contentType(ContentType.JSON).extract().response(); + + // then + assertNotNull(response); + + String responseStr = response.asString(); + assertTrue(responseStr.contains(accountName)); + assertTrue(responseStr.contains(TEST_REGION)); + assertTrue(responseStr.contains("spinnaker-demo")); + assertTrue(responseStr.contains("srv-v001")); + assertTrue(responseStr.contains(testSdServiceArn)); + } +} diff --git a/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/LoadBalancersSpec.java b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/LoadBalancersSpec.java new file mode 100644 index 00000000000..ed60d2ef954 --- /dev/null +++ b/clouddriver-ecs/src/integration/java/com/netflix/spinnaker/clouddriver/ecs/test/LoadBalancersSpec.java @@ -0,0 +1,227 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.test; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import static io.restassured.RestAssured.get; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.ecs.model.LoadBalancer; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderRegistry; +import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace; +import com.netflix.spinnaker.clouddriver.ecs.EcsSpec; +import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsAccountMapper; +import io.restassured.http.ContentType; +import io.restassured.response.Response; +import java.util.*; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.mock.mockito.MockBean; + +public class LoadBalancersSpec extends EcsSpec { + + @Autowired private ProviderRegistry providerRegistry; + + @MockBean EcsAccountMapper mockEcsAccountMapper; + + @Test + public void getLoadBalancersTest() { + // given + ProviderCache ecsCache = providerRegistry.getProviderCache(EcsProvider.NAME); + String testNamespace = Namespace.LOAD_BALANCERS.ns; + String loadBalancerKey = + com.netflix.spinnaker.clouddriver.aws.data.Keys.getLoadBalancerKey( + testNamespace, "*", TEST_REGION, "*", "*"); + + Set keys = new HashSet<>(); + keys.add( + "aws:targetGroups:my-aws-devel-acct:us-west-2:spinnaker-ecs-demo-artifacts-tg:ip:vpc-07daae48bf98a8fd8"); + + List securityGroups = new ArrayList<>(); + securityGroups.add("test-security"); + + Map> relationships = new HashMap<>(); + relationships.put("targetGroups", keys); + + Map attributes = new HashMap<>(); + attributes.put("account", ECS_ACCOUNT_NAME); + attributes.put("region", TEST_REGION); + attributes.put("name", ECS_ACCOUNT_NAME); + attributes.put("vpcId", "vpc-123"); + attributes.put("loadBalancerType", "test-type"); + attributes.put("securityGroups", securityGroups); + attributes.put("targetGroups", "test-target"); + attributes.put("loadBalancerName", "testLB"); + + DefaultCacheResult testResult = + buildCacheResultForLB(attributes, testNamespace, loadBalancerKey, relationships); + ecsCache.addCacheResult("TestAgent", Collections.singletonList(testNamespace), testResult); + + String url = getTestUrl("/ecs/loadBalancers"); + + when(mockEcsAccountMapper.fromAwsAccountNameToEcsAccountName("*")).thenReturn(ECS_ACCOUNT_NAME); + + // when + Response response = + get(url).then().statusCode(200).contentType(ContentType.JSON).extract().response(); + + String responseStr = response.asString(); + assertTrue(responseStr.contains(ECS_ACCOUNT_NAME)); + assertTrue(responseStr.contains(TEST_REGION)); + assertTrue(responseStr.contains("spinnaker-ecs-demo-artifacts-tg")); + assertTrue(responseStr.contains("testLB")); + assertTrue(responseStr.contains("vpc-123")); + assertTrue(responseStr.contains("test-security")); + } + + @Test + public void getLoadBalancersForApplicationTest() { + // given + ProviderCache ecsCache = providerRegistry.getProviderCache(EcsProvider.NAME); + String testNamespaceForLB = Namespace.LOAD_BALANCERS.ns; + String loadBalancerKey = + com.netflix.spinnaker.clouddriver.aws.data.Keys.getLoadBalancerKey( + testNamespaceForLB, "*", TEST_REGION, "*", "*"); + String targetGroup = + "aws:targetGroups:aws-account:us-west-2:spinnaker-ecs-demo-tg:ip:vpc-07daae48bf98a8fd8"; + + Map> relationships = new HashMap<>(); + relationships.put("loadBalancers", Arrays.asList(loadBalancerKey)); + relationships.put("targetGroups", Arrays.asList(targetGroup)); + + Map LBAttributes = new HashMap<>(); + LBAttributes.put("account", ECS_ACCOUNT_NAME); + LBAttributes.put("region", TEST_REGION); + LBAttributes.put("name", ECS_ACCOUNT_NAME); + LBAttributes.put("vpcId", "vpc-123"); + LBAttributes.put("loadBalancerType", "test-type"); + LBAttributes.put("securityGroups", Arrays.asList("test-security")); + LBAttributes.put("targetGroups", "test-target"); + LBAttributes.put("loadBalancerName", "testLB"); + + DefaultCacheResult testResult = + buildCacheResultForLB(LBAttributes, testNamespaceForLB, loadBalancerKey, relationships); + ecsCache.addCacheResult( + "TestAgentLB", Collections.singletonList(testNamespaceForLB), testResult); + + LoadBalancer loadBalancer = + new LoadBalancer() + .withLoadBalancerName("testLB") + .withTargetGroupArn( + "arn:aws:elasticloadbalancing:us-west-2:910995322324:targetgroup/spinnaker-ecs-demo-tg/84e8edbbc69cd97b"); + + Long createdAtLong = (new Date().getTime()); + + String testNamespaceForService = SERVICES.ns; + String serviceKey = + com.netflix.spinnaker.clouddriver.ecs.cache.Keys.getServiceKey( + ECS_ACCOUNT_NAME, "us-west-2", "TestAgentService"); + + Map serviceAttributes = new HashMap<>(); + serviceAttributes.put("account", ECS_ACCOUNT_NAME); + serviceAttributes.put("region", TEST_REGION); + serviceAttributes.put("applicationName", "TestAgentService"); + serviceAttributes.put("loadBalancers", Arrays.asList(loadBalancer)); + serviceAttributes.put("serviceName", "testService"); + serviceAttributes.put("serviceArn", "service/testServiceArn"); + serviceAttributes.put("clusterName", "ecsTestCluster"); + serviceAttributes.put("clusterArn", "cluster/testClusterArn"); + serviceAttributes.put("roleArn", "role/testRoleArn"); + serviceAttributes.put("taskDefinition", "testTaskDefinition"); + serviceAttributes.put("desiredCount", 1); + serviceAttributes.put("maximumPercent", 10); + serviceAttributes.put("minimumHealthyPercent", 10); + serviceAttributes.put("subnets", Arrays.asList("testSubnet")); + serviceAttributes.put("securityGroups", Arrays.asList("test-security")); + serviceAttributes.put("createdAt", createdAtLong); + + DefaultCacheResult testResultForService = + buildCacheResult(serviceAttributes, testNamespaceForService, serviceKey); + ecsCache.addCacheResult( + "TestAgentService", + Collections.singletonList(testNamespaceForService), + testResultForService); + + String testNamespaceForTG = TARGET_GROUPS.ns; + + String targetGroupKey = + com.netflix.spinnaker.clouddriver.aws.data.Keys.getTargetGroupKey( + "spinnaker-ecs-demo-tg", "aws-account", TEST_REGION, "", "vpc-123"); + + Map targetGroupAttributes = new HashMap<>(); + targetGroupAttributes.put("loadBalancerNames", Arrays.asList("ecsLB")); + targetGroupAttributes.put("targetGroupName", "spinnaker-ecs-demo-tg"); + targetGroupAttributes.put("targetGroupArn", targetGroup); + + DefaultCacheResult testResultForTargetGroup = + buildCacheResultForLB( + targetGroupAttributes, testNamespaceForTG, targetGroupKey, relationships); + ecsCache.addCacheResult( + "TestAgentTG", Collections.singletonList(testNamespaceForTG), testResultForTargetGroup); + + when(mockEcsAccountMapper.fromAwsAccountNameToEcsAccountName("*")).thenReturn("ecs-account"); + when(mockEcsAccountMapper.fromEcsAccountNameToAwsAccountName(ECS_ACCOUNT_NAME)) + .thenReturn("aws-account"); + + String url = getTestUrl("/applications/TestAgentService/loadBalancers"); + + // when + Response response = get(url).then().contentType(ContentType.JSON).extract().response(); + + String responseStr = response.asString(); + assertTrue(responseStr.contains(ECS_ACCOUNT_NAME)); + assertTrue(responseStr.contains(TEST_REGION)); + assertTrue(responseStr.contains("spinnaker-ecs-demo-tg")); + assertTrue(responseStr.contains("testLB")); + assertTrue(responseStr.contains("vpc-123")); + assertTrue(responseStr.contains("test-security")); + assertTrue(responseStr.contains(targetGroup)); + assertTrue( + responseStr.contains( + "arn:aws:elasticloadbalancing:us-west-2:910995322324:targetgroup/spinnaker-ecs-demo-tg/84e8edbbc69cd97b")); + } + + private DefaultCacheResult buildCacheResultForLB( + Map attributes, + String namespace, + String key, + Map> relationships) { + Collection dataPoints = new LinkedList<>(); + dataPoints.add(new DefaultCacheData(key, attributes, relationships)); + + Map> dataMap = new HashMap<>(); + dataMap.put(namespace, dataPoints); + return new DefaultCacheResult(dataMap); + } + + protected DefaultCacheResult buildCacheResult( + Map attributes, String namespace, String key) { + Collection dataPoints = new LinkedList<>(); + dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); + + Map> dataMap = new HashMap<>(); + dataMap.put(namespace, dataPoints); + + return new DefaultCacheResult(dataMap); + } +} diff --git a/clouddriver-ecs/src/integration/resources/clouddriver.yml b/clouddriver-ecs/src/integration/resources/clouddriver.yml new file mode 100644 index 00000000000..d730c8e9a99 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/clouddriver.yml @@ -0,0 +1,69 @@ +spring: + application: + name: clouddriver + +aws: + enabled: true + primaryAccount: aws-account + accounts: + - name: aws-account + requiredGroupMembership: [] + providerVersion: V1 + permissions: {} + accountId: '123456789012' + regions: + - name: us-west-2 + assumeRole: role/SpinnakerManaged + bakeryDefaults: + baseImages: [] + defaultKeyPairTemplate: '{{name}}-keypair' + defaultRegions: + - name: us-west-2 + defaults: + iamRole: BaseIAMRole +ecs: + enabled: true + primaryAccount: ecs-account + defaultNamingStrategy: tags + accounts: + - name: ecs-account + providerVersion: V1 + awsAccount: aws-account + namingStrategy: default + - name: ecs-moniker-account + providerVersion: V1 + awsAccount: aws-account + +sql: + enabled: true + taskRepository: + enabled: true + cache: + enabled: true + readBatchSize: 500 + writeBatchSize: 300 + scheduler: + enabled: true + connectionPools: + default: + default: true + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + tasks: + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + migration: + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + +redis: + enabled: false + cache: + enabled: false + scheduler: + enabled: false + taskRepository: + enabled: false + +services: + fiat: + baseUrl: http://fiat.net + front50: + baseUrl: http://front50.net diff --git a/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-TGMappings-multipleLBsAndContainers-artifactFile.json b/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-TGMappings-multipleLBsAndContainers-artifactFile.json new file mode 100644 index 00000000000..37429c08eea --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-TGMappings-multipleLBsAndContainers-artifactFile.json @@ -0,0 +1,69 @@ +{ + "family": "PLACEHOLDER", + "containerDefinitions": [ + { + "name": "application1", + "image": "PLACEHOLDER", + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "spinnaker-ecs-demo", + "awslogs-region": "us-west-2", + "awslogs-stream-prefix": "spinnaker" + } + }, + "portMappings": [ + { + "hostPort": 80, + "protocol": "tcp", + "containerPort": 80 + } + ], + "environment": [ + { + "name": "PORT", + "value": "80" + } + ], + "cpu": 256, + "memoryReservation": 512, + "essential": true + }, + { + "name": "application2", + "image": "PLACEHOLDER", + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "spinnaker-ecs-demo", + "awslogs-region": "us-west-2", + "awslogs-stream-prefix": "spinnaker" + } + }, + "portMappings": [ + { + "hostPort": 84, + "protocol": "tcp", + "containerPort": 84 + } + ], + "environment": [ + { + "name": "PORT", + "value": "84" + } + ], + "cpu": 256, + "memoryReservation": 512, + "essential": true + } + ], + "cpu": "256", + "memory": "512", + "requiresCompatibilities": [ + "EC2" + ], + "executionRoleArn": "arn:aws:iam:::executionRole/testExecutionRole:1", + "networkMode": "bridge", + "taskRoleArn" : "arn:aws:iam:::role/testTaskRole:1" +} diff --git a/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-targetGroup-WithNoContainers-artifactFile.json b/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-targetGroup-WithNoContainers-artifactFile.json new file mode 100644 index 00000000000..c4926849d36 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-targetGroup-WithNoContainers-artifactFile.json @@ -0,0 +1,12 @@ +{ + "family": "PLACEHOLDER", + "containerDefinitions": [], + "cpu": "256", + "memory": "512", + "requiresCompatibilities": [ + "EC2" + ], + "executionRoleArn": "arn:aws:iam:::executionRole/testExecutionRole:1", + "networkMode": "bridge", + "taskRoleArn" : "arn:aws:iam:::role/testTaskRole:1" +} diff --git a/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-targetGroup-artifactFile.json b/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-targetGroup-artifactFile.json new file mode 100644 index 00000000000..8a7197f5a5b --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-EC2-targetGroup-artifactFile.json @@ -0,0 +1,41 @@ +{ + "family": "PLACEHOLDER", + "containerDefinitions": [ + { + "name": "application", + "image": "PLACEHOLDER", + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "spinnaker-ecs-demo", + "awslogs-region": "us-west-2", + "awslogs-stream-prefix": "spinnaker" + } + }, + "portMappings": [ + { + "hostPort": 80, + "protocol": "tcp", + "containerPort": 80 + } + ], + "environment": [ + { + "name": "PORT", + "value": "80" + } + ], + "cpu": 256, + "memoryReservation": 512, + "essential": true + } + ], + "cpu": "256", + "memory": "512", + "requiresCompatibilities": [ + "EC2" + ], + "executionRoleArn": "arn:aws:iam:::executionRole/testExecutionRole:1", + "networkMode": "bridge", + "taskRoleArn" : "arn:aws:iam:::role/testTaskRole:1" +} diff --git a/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-Fargate-targetGroup-artifactFile.json b/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-Fargate-targetGroup-artifactFile.json new file mode 100644 index 00000000000..8fba8f57152 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testartifacts/createServerGroup-artifact-Fargate-targetGroup-artifactFile.json @@ -0,0 +1,42 @@ + +{ + "family": "PLACEHOLDER", + "containerDefinitions": [ + { + "name": "application", + "image": "PLACEHOLDER", + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "spinnaker-ecs-demo", + "awslogs-region": "us-west-2", + "awslogs-stream-prefix": "spinnaker" + } + }, + "portMappings": [ + { + "hostPort": 80, + "protocol": "tcp", + "containerPort": 80 + } + ], + "environment": [ + { + "name": "PORT", + "value": "80" + } + ], + "cpu": 256, + "memoryReservation": 512, + "essential": true + } + ], + "cpu": "256", + "memory": "512", + "requiresCompatibilities": [ + "FARGATE" + ], + "executionRoleArn": "arn:aws:iam:::executionRole/testExecutionRole:1", + "networkMode": "aws-vpc", + "taskRoleArn" : "arn:aws:iam:::role/testTaskRole:1" +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-EC2-TGMappings-multipleLBsAndContainers.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-EC2-TGMappings-multipleLBsAndContainers.json new file mode 100644 index 00000000000..8bac490af0c --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-EC2-TGMappings-multipleLBsAndContainers.json @@ -0,0 +1,57 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integArtifactEC2TgMappingskWithMultipleLBsAndContainers-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integArtifactsEC2TgMappingsStackWithMultipleLBsAndContainers", + "freeFormDetails" : "detailTest", + "targetGroupMappings": [ + { + "containerName": "application1", + "containerPort": 80, + "targetGroup": "integArtifactEC2TgMappings-targetGroupForPort80" + }, + { + "containerName": "application2", + "containerPort": 84, + "targetGroup": "integArtifactEC2TgMappings-targetGroupForPort84" + } + ], + "useTaskDefinitionArtifact" : true, + "taskDefinitionArtifactAccount" : "my-github", + "resolvedTaskDefinitionArtifact": { + "account": "ecs-account", + "type": "ecs", + "customKind" : true, + "name" : "applications", + "location" : "us-west-2", + "reference" : "refernce", + "metadata" : null, + "artifactAccount" : "my-github", + "provenance" : "prov", + "uuid" : "uid-123" + }, + "containerToImageMap": + { + "application1": "app1/image", + "application2": "app2/image" + } +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-EC2-targetGroupMappings.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-EC2-targetGroupMappings.json new file mode 100644 index 00000000000..b61e080d317 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-EC2-targetGroupMappings.json @@ -0,0 +1,58 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integArtifactEC2TgMappings-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integArtifactsEC2TgMappingsStack", + "freeFormDetails" : "detailTest", + "targetGroupMappings": [ + { + "containerName": "application", + "containerPort": 80, + "targetGroup": "integArtifactEC2TgMappings-targetGroup" + } + ], + "useTaskDefinitionArtifact" : true, + "taskDefinitionArtifactAccount" : "my-github", + "source" : { + "account" : "ecs-account", + "region" : "us-west-2", + "asgName" : "ecs", + "useSourceCapacity" : true + }, + "resolvedTaskDefinitionArtifact": { + "account": "ecs-account", + "type": "ecs", + "customKind" : true, + "name" : "application", + + "location" : "us-west-2", + "reference" : "refernce", + "metadata" : null, + "artifactAccount" : "my-github", + "provenance" : "prov", + "uuid" : "uid-123" + }, + "containerToImageMap": + { + "application": "PLACEHOLDER" + } +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-FARGATE-capacityProviderStrategy.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-FARGATE-capacityProviderStrategy.json new file mode 100644 index 00000000000..b537e370f79 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-FARGATE-capacityProviderStrategy.json @@ -0,0 +1,63 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integArtifactsFargateCapacityProviderStrategy-cluster", + "capacityProviderStrategy": [ + { + "capacityProvider": "FARGATE", + "weight": 1 + } + ], + "networkMode": "aws-vpc", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integArtifactsFargateCapacityProviderStrategyStack", + "freeFormDetails" : "detailTest", + "targetGroupMappings": [ + { + "containerName": "application", + "containerPort": 80, + "targetGroup": "integArtifactsFargateCapacityProviderStrategy-targetGroup" + } + ], + "useTaskDefinitionArtifact" : true, + "taskDefinitionArtifactAccount" : "my-github", + "source" : { + "account" : "ecs-account", + "region" : "us-west-2", + "asgName" : "ecs", + "useSourceCapacity" : true + }, + "resolvedTaskDefinitionArtifact": { + "account": "ecs-account", + "type": "ecs", + "customKind" : true, + "name" : "application", + "version" : "v001", + "location" : "us-west-2", + "reference" : "refernce", + "metadata" : null, + "artifactAccount" : "my-github", + "provenance" : "prov", + "uuid" : "uid-123" + }, + "containerToImageMap": + { + "application": "PLACEHOLDER" + } +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-FARGATE-targetGroupMappings.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-FARGATE-targetGroupMappings.json new file mode 100644 index 00000000000..9ef44093b1f --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-artifact-FARGATE-targetGroupMappings.json @@ -0,0 +1,58 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integArtifactsFargateTgMappings-cluster", + "launchType": "FARGATE", + "networkMode": "aws-vpc", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integArtifactsFargateTgMappingsStack", + "freeFormDetails" : "detailTest", + "targetGroupMappings": [ + { + "containerName": "application", + "containerPort": 80, + "targetGroup": "integArtifactFargateTgMappings-targetGroup" + } + ], + "useTaskDefinitionArtifact" : true, + "taskDefinitionArtifactAccount" : "my-github", + "source" : { + "account" : "ecs-account", + "region" : "us-west-2", + "asgName" : "ecs", + "useSourceCapacity" : true + }, + "resolvedTaskDefinitionArtifact": { + "account": "ecs-account", + "type": "ecs", + "customKind" : true, + "name" : "application", + "version" : "v001", + "location" : "us-west-2", + "reference" : "refernce", + "metadata" : null, + "artifactAccount" : "my-github", + "provenance" : "prov", + "uuid" : "uid-123" + }, + "containerToImageMap": + { + "application": "PLACEHOLDER" + } +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-input-EC2-targetGroupMappings-existingService.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-input-EC2-targetGroupMappings-existingService.json new file mode 100644 index 00000000000..0fad6f82066 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-input-EC2-targetGroupMappings-existingService.json @@ -0,0 +1,36 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputEC2TgMappingsExistingService-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputEC2TgMappingsExistingServiceStack", + "targetGroupMappings": [ + { + "containerName": "", + "containerPort": 80, + "targetGroup": "integInputEC2TgMappingsExistingService-targetGroup" + } + ], + "containerToImageMap": + { + "application": "PLACEHOLDER" + } +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-moniker.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-moniker.json new file mode 100644 index 00000000000..61987d861fa --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-moniker.json @@ -0,0 +1,31 @@ +{ + "account": "ecs-moniker-account", + "application": "ecs", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "containerPort": 80, + "credentials": "ecs-moniker-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputsMoniker-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputsMoniker", + "moniker": { + "app": "ecs", + "detail": "detailTest", + "stack": "integInputsMoniker" + } +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-serviceDiscovery.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-serviceDiscovery.json new file mode 100644 index 00000000000..8b5246de4a7 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-serviceDiscovery.json @@ -0,0 +1,38 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputsEc2WithServiceDiscovery-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputsEc2WithServiceDiscovery", + "serviceDiscoveryAssociations": [ + { + "containerPort": 80, + "registry": { + "account": "ecs-my-aws-devel-acct", + "arn": "arn:aws:servicediscovery:us-west-2:910995322324:service/srv-ckeydmrhzmqh6yfz", + "displayName": "spinnaker-ecs-demo-artifacts1 (srv-ckeydmrhzmqh6yfz)", + "id": "srv-ckeydmrhzmqh6yfz", + "name": "spinnaker-ecs-demo-artifacts1", + "region": "us-west-2" + } + } + ] +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-targetGroupMappings-appAutoScalingGroup.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-targetGroupMappings-appAutoScalingGroup.json new file mode 100644 index 00000000000..3b834ed502d --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-targetGroupMappings-appAutoScalingGroup.json @@ -0,0 +1,39 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "containerPort": 80, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputsEc2TargetGroupMappingsWithAppAutoScaling-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputsEc2TargetGroupMappingsWithAppAutoScalingGroup", + "targetGroupMappings": [ + { + "containerName": "", + "containerPort": 80, + "targetGroup": "integInputsEc2TargetGroupMappingsWithAppAutoScalingGroup-targetGroup" + } + ], + "source" : { + "account" : "ecs-account", + "region" : "us-west-2", + "asgName" : "ecs", + "useSourceCapacity" : true + } +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-withoutLoadBalacing.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-withoutLoadBalacing.json new file mode 100644 index 00000000000..f399970ee53 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2-withoutLoadBalacing.json @@ -0,0 +1,26 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "containerPort": 80, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputsEc2NoLoadBalancing-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputsEc2NoLoadBalancing" +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2.json new file mode 100644 index 00000000000..96cb48e3dfa --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ec2.json @@ -0,0 +1,27 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "containerPort": 80, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputsEc2LegacyTargetGroup-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputsEc2LegacyTargetGroup", + "targetGroup": "integInputsEc2LegacyTargetGroup-targetGroup" +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ecsCreateFails.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ecsCreateFails.json new file mode 100644 index 00000000000..a7c8851fdcb --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-inputs-ecsCreateFails.json @@ -0,0 +1,27 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "containerPort": 80, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputsErrorFromEcs-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputsErrorFromEcs", + "targetGroup": "integInputsErrorFromEcs-targetGroup" +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-spelProcessedArtifact-EC2-targetGroupMappings.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-spelProcessedArtifact-EC2-targetGroupMappings.json new file mode 100644 index 00000000000..84c88a86ac5 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroup-spelProcessedArtifact-EC2-targetGroupMappings.json @@ -0,0 +1,99 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integSpELProcessedArtifactEC2TgMappings-cluster", + "launchType": "EC2", + "networkMode": "bridge", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integSpELProcessedArtifactsEC2TgMappingsStack", + "freeFormDetails" : "detailTest", + "targetGroupMappings": [ + { + "containerName": "application", + "containerPort": 80, + "targetGroup": "integSpELProcessedArtifactEC2TgMappings-targetGroup" + } + ], + "useTaskDefinitionArtifact" : true, + "evaluateTaskDefinitionArtifactExpressions" : true, + "taskDefinitionArtifactAccount" : "my-github", + "source" : { + "account" : "ecs-account", + "region" : "us-west-2", + "asgName" : "ecs", + "useSourceCapacity" : true + }, + "resolvedTaskDefinitionArtifact": { + "account": "ecs-account", + "type": "ecs", + "customKind" : true, + "name" : "application", + + "location" : "us-west-2", + "reference" : "refernce", + "metadata" : null, + "artifactAccount" : "my-github", + "provenance" : "prov", + "uuid" : "uid-123" + }, + "spelProcessedTaskDefinitionArtifact" : { + "family": "PLACEHOLDER", + "containerDefinitions": [ + { + "name": "application", + "image": "PLACEHOLDER", + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "spinnaker-ecs-demo", + "awslogs-region": "eu-central-1", + "awslogs-stream-prefix": "spinnaker" + } + }, + "portMappings": [ + { + "hostPort": 80, + "protocol": "tcp", + "containerPort": 80 + } + ], + "environment": [ + { + "name": "PORT", + "value": "80" + } + ], + "cpu": 256, + "memoryReservation": 512, + "essential": true + } + ], + "cpu": "256", + "memory": "512", + "requiresCompatibilities": [ + "FARGATE" + ], + "executionRoleArn": "arn:aws:iam:::executionRole/testExecutionRole:1", + "networkMode": "bridge" + }, + "containerToImageMap": + { + "application": "PLACEHOLDER" + } +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroupOperation-inputs-fargate-legacyTargetGroup.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroupOperation-inputs-fargate-legacyTargetGroup.json new file mode 100644 index 00000000000..17485060e87 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroupOperation-inputs-fargate-legacyTargetGroup.json @@ -0,0 +1,27 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "containerPort": 80, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputsFargateLegacyTargetGroup-cluster", + "launchType": "FARGATE", + "networkMode": "aws-vpc", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputsFargateLegacyTargetGroup", + "targetGroup": "integInputsFargateLegacyTargetGroup-targetGroup" +} diff --git a/clouddriver-ecs/src/integration/resources/testoperations/createServerGroupOperation-inputs-fargate-targetGroupMappings.json b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroupOperation-inputs-fargate-targetGroupMappings.json new file mode 100644 index 00000000000..b59f4752ae2 --- /dev/null +++ b/clouddriver-ecs/src/integration/resources/testoperations/createServerGroupOperation-inputs-fargate-targetGroupMappings.json @@ -0,0 +1,32 @@ +{ + "account": "ecs-account", + "application": "ecs", + "availabilityZones": { + "us-west-2": [ + "us-west-2a", + "us-west-2c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "ecs", + "computeUnits": 256, + "credentials": "ecs-account", + "dockerImageAddress": "nginx", + "ecsClusterName": "integInputsFargateTgMappings-cluster", + "launchType": "FARGATE", + "networkMode": "aws-vpc", + "placementStrategySequence": [], + "reservedMemory": 512, + "stack": "integInputsFargateTgMappings", + "targetGroupMappings": [ + { + "containerName" : "main", + "containerPort" : 80, + "targetGroup" : "integInputsFargateTgMappings-targetGroup" + } + ] +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsCloudProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsCloudProvider.java index cd6e225c4d0..54d0e53391e 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsCloudProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsCloudProvider.java @@ -16,10 +16,9 @@ package com.netflix.spinnaker.clouddriver.ecs; -import org.springframework.stereotype.Component; import com.netflix.spinnaker.clouddriver.core.CloudProvider; - import java.lang.annotation.Annotation; +import org.springframework.stereotype.Component; @Component public class EcsCloudProvider implements CloudProvider { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsConfigurationProperties.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsConfigurationProperties.java index d2f2c898d69..3272515e602 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsConfigurationProperties.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsConfigurationProperties.java @@ -19,7 +19,4 @@ import org.springframework.boot.context.properties.ConfigurationProperties; @ConfigurationProperties("ecs") -class EcsConfigurationProperties { - - -} +class EcsConfigurationProperties {} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsOperation.java index 71713cb986f..00415d45e25 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/EcsOperation.java @@ -26,5 +26,4 @@ public @interface EcsOperation { String value(); - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/Keys.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/Keys.java index 175838f8f23..0083edaf92f 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/Keys.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/Keys.java @@ -16,17 +16,17 @@ package com.netflix.spinnaker.clouddriver.ecs.cache; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; +import static com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider.ID; + import com.google.common.base.CaseFormat; import com.netflix.spinnaker.clouddriver.cache.KeyParser; - import java.util.HashMap; import java.util.Map; -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; -import static com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider.ID; - public class Keys implements KeyParser { public enum Namespace { + ECS_APPLICATIONS, IAM_ROLE, SERVICES, ECS_CLUSTERS, @@ -34,7 +34,10 @@ public enum Namespace { CONTAINER_INSTANCES, TASK_DEFINITIONS, ALARMS, - SCALABLE_TARGETS; + SCALABLE_TARGETS, + SECRETS, + SERVICE_DISCOVERY_REGISTRIES, + TARGET_HEALTHS; public final String ns; @@ -83,46 +86,75 @@ public static Map parse(String key) { Map result = new HashMap<>(); result.put("provider", parts[0]); result.put("type", parts[1]); - result.put("account", parts[2]); - if(!canParse(parts[1]) && parts[1].equals(HEALTH.getNs())){ + if (parts[1].equals(HEALTH.getNs())) { + result.put("account", parts[2]); result.put("region", parts[3]); result.put("taskId", parts[4]); return result; } - - Namespace namespace = Namespace.valueOf(CaseFormat.LOWER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, parts[1])); - - if (!namespace.equals(Namespace.IAM_ROLE)) { - result.put("region", parts[3]); - } + Namespace namespace = + Namespace.valueOf(CaseFormat.LOWER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, parts[1])); switch (namespace) { + case ECS_APPLICATIONS: + result.put("application", parts[2]); + break; case SERVICES: + result.put("account", parts[2]); + result.put("region", parts[3]); result.put("serviceName", parts[4]); break; case ECS_CLUSTERS: + result.put("account", parts[2]); + result.put("region", parts[3]); result.put("clusterName", parts[4]); break; case TASKS: + result.put("account", parts[2]); + result.put("region", parts[3]); result.put("taskId", parts[4]); break; case CONTAINER_INSTANCES: + result.put("account", parts[2]); + result.put("region", parts[3]); result.put("containerInstanceArn", parts[4]); break; case TASK_DEFINITIONS: + result.put("account", parts[2]); + result.put("region", parts[3]); result.put("taskDefinitionArn", parts[4]); break; case ALARMS: + result.put("account", parts[2]); + result.put("region", parts[3]); result.put("alarmArn", parts[4]); break; case IAM_ROLE: + result.put("account", parts[2]); result.put("roleName", parts[3]); break; + case SECRETS: + result.put("account", parts[2]); + result.put("region", parts[3]); + result.put("secretName", parts[4]); + break; + case SERVICE_DISCOVERY_REGISTRIES: + result.put("account", parts[2]); + result.put("region", parts[3]); + result.put("serviceId", parts[4]); + break; case SCALABLE_TARGETS: + result.put("account", parts[2]); + result.put("region", parts[3]); result.put("resource", parts[4]); break; + case TARGET_HEALTHS: + result.put("account", parts[2]); + result.put("region", parts[3]); + result.put("targetGroupArn", parts[4]); + break; default: break; } @@ -143,6 +175,10 @@ public static String getClusterKey(String account, String region, String cluster return buildKey(Namespace.ECS_CLUSTERS.ns, account, region, clusterName); } + public static String getApplicationKey(String name) { + return ID + SEPARATOR + Namespace.ECS_APPLICATIONS + SEPARATOR + name.toLowerCase(); + } + public static String getTaskKey(String account, String region, String taskId) { return buildKey(Namespace.TASKS.ns, account, region, taskId); } @@ -151,16 +187,22 @@ public static String getTaskHealthKey(String account, String region, String task return buildKey(HEALTH.getNs(), account, region, taskId); } - public static String getContainerInstanceKey(String account, String region, String containerInstanceArn) { + public static String getTargetHealthKey(String account, String region, String targetGroupArn) { + return buildKey(Namespace.TARGET_HEALTHS.ns, account, region, targetGroupArn); + } + + public static String getContainerInstanceKey( + String account, String region, String containerInstanceArn) { return buildKey(Namespace.CONTAINER_INSTANCES.ns, account, region, containerInstanceArn); } - public static String getTaskDefinitionKey(String account, String region, String taskDefinitionArn) { + public static String getTaskDefinitionKey( + String account, String region, String taskDefinitionArn) { return buildKey(Namespace.TASK_DEFINITIONS.ns, account, region, taskDefinitionArn); } - public static String getAlarmKey(String account, String region, String alarmArn) { - return buildKey(Namespace.ALARMS.ns, account, region, alarmArn); + public static String getAlarmKey(String account, String region, String alarmArn, String cluster) { + return buildKey(Namespace.ALARMS.ns, account, region, alarmArn + SEPARATOR + cluster); } public static String getScalableTargetKey(String account, String region, String resourceId) { @@ -171,7 +213,25 @@ public static String getIamRoleKey(String account, String iamRoleName) { return ID + SEPARATOR + Namespace.IAM_ROLE + SEPARATOR + account + SEPARATOR + iamRoleName; } - private static String buildKey(String namespace,String account, String region, String identifier){ - return ID + SEPARATOR + namespace + SEPARATOR + account + SEPARATOR + region + SEPARATOR + identifier; + public static String getSecretKey(String account, String region, String secretName) { + return buildKey(Namespace.SECRETS.ns, account, region, secretName); + } + + public static String getServiceDiscoveryRegistryKey( + String account, String region, String registryId) { + return buildKey(Namespace.SERVICE_DISCOVERY_REGISTRIES.ns, account, region, registryId); + } + + private static String buildKey( + String namespace, String account, String region, String identifier) { + return ID + + SEPARATOR + + namespace + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + identifier; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/AbstractCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/AbstractCacheClient.java index 6b57712f731..d0686b544ca 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/AbstractCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/AbstractCacheClient.java @@ -19,14 +19,16 @@ import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; - import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; abstract class AbstractCacheClient { + private final Logger log = LoggerFactory.getLogger(getClass()); private final String keyNamespace; protected final Cache cacheView; @@ -46,9 +48,7 @@ abstract class AbstractCacheClient { */ protected abstract T convert(CacheData cacheData); - /** - * @return A list of all generic type objects belonging to the key namespace. - */ + /** @return A list of all generic type objects belonging to the key namespace. */ public Collection getAll() { Collection allData = cacheView.getAll(keyNamespace); return convertAll(allData); @@ -56,14 +56,23 @@ public Collection getAll() { /** * @param account name of the AWS account, as defined in clouddriver.yml - * @param region region of the AWS account, as defined in clouddriver.yml - * @return A list of all generic type objects belonging to the account and region in the key namespace. + * @param region region of the AWS account, as defined in clouddriver.yml + * @return A list of all generic type objects belonging to the account and region in the key + * namespace. */ public Collection getAll(String account, String region) { Collection data = fetchFromCache(account, region); return convertAll(data); } + public Collection getAll(Collection identifiers) { + Collection allData = cacheView.getAll(keyNamespace, identifiers); + if (allData == null) { + return Collections.emptyList(); + } + return convertAll(allData); + } + /** * @param key A key within the key namespace that will be used to retrieve the object. * @return An object of the generic type that is associated to the key. @@ -77,25 +86,26 @@ public T get(String key) { } /** - * @param cacheData A collection of CacheData that will be converted into a collection of generic typ objects. + * @param cacheData A collection of CacheData that will be converted into a collection of generic + * typ objects. * @return A collection of generic typ objects. */ private Collection convertAll(Collection cacheData) { - return cacheData.stream() - .map(this::convert) - .collect(Collectors.toList()); + return cacheData.stream().map(this::convert).collect(Collectors.toList()); } /** * @param account name of the AWS account, as defined in clouddriver.yml - * @param region region of the AWS account, as defined in clouddriver.yml + * @param region region of the AWS account, as defined in clouddriver.yml * @return */ private Collection fetchFromCache(String account, String region) { + log.debug("fetching all for account '{}' and region '{}'", account, region); String accountFilter = account != null ? account + Keys.SEPARATOR : "*" + Keys.SEPARATOR; String regionFilter = region != null ? region + Keys.SEPARATOR : "*" + Keys.SEPARATOR; Set keys = new HashSet<>(); - String pattern = "ecs" + Keys.SEPARATOR + keyNamespace + Keys.SEPARATOR + accountFilter + regionFilter + "*"; + String pattern = + "ecs" + Keys.SEPARATOR + keyNamespace + Keys.SEPARATOR + accountFilter + regionFilter + "*"; Collection nameMatches = cacheView.filterIdentifiers(keyNamespace, pattern); keys.addAll(nameMatches); @@ -108,4 +118,8 @@ private Collection fetchFromCache(String account, String region) { return allData; } + + public Collection filterIdentifiers(String glob) { + return cacheView.filterIdentifiers(keyNamespace, glob); + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ContainerInstanceCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ContainerInstanceCacheClient.java index 5579419e26f..07ac5f51d69 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ContainerInstanceCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ContainerInstanceCacheClient.java @@ -16,16 +16,15 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; + import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance; +import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; - @Component public class ContainerInstanceCacheClient extends AbstractCacheClient { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsCloudWatchAlarmCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsCloudWatchAlarmCacheClient.java index 78c4da0b2d2..d710f87a2e5 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsCloudWatchAlarmCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsCloudWatchAlarmCacheClient.java @@ -16,19 +16,21 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ALARMS; + import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsMetricAlarm; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ALARMS; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class EcsCloudWatchAlarmCacheClient extends AbstractCacheClient { @@ -60,8 +62,10 @@ protected EcsMetricAlarm convert(CacheData cacheData) { metricAlarm.setOKActions(Collections.emptyList()); } - if (attributes.containsKey("insufficientDataActions") && attributes.get("insufficientDataActions") != null) { - metricAlarm.setInsufficientDataActions((Collection) attributes.get("insufficientDataActions")); + if (attributes.containsKey("insufficientDataActions") + && attributes.get("insufficientDataActions") != null) { + metricAlarm.setInsufficientDataActions( + (Collection) attributes.get("insufficientDataActions")); } else { metricAlarm.setInsufficientDataActions(Collections.emptyList()); } @@ -69,31 +73,29 @@ protected EcsMetricAlarm convert(CacheData cacheData) { return metricAlarm; } - public List getMetricAlarms(String serviceName, String accountName, String region) { + public List getMetricAlarms( + String serviceName, String accountName, String region, String ecsClusterName) { List metricAlarms = new LinkedList<>(); - Collection allMetricAlarms = getAll(accountName, region); - outLoop: - for (EcsMetricAlarm metricAlarm : allMetricAlarms) { - for (String action : metricAlarm.getAlarmActions()) { - if (action.contains(serviceName)) { - metricAlarms.add(metricAlarm); - continue outLoop; - } - } + String glob = Keys.getAlarmKey(accountName, region, "*", ecsClusterName); + Collection metricAlarmsIds = filterIdentifiers(glob); + String globEmptyDimension = Keys.getAlarmKey(accountName, region, "*", ""); + Collection otherMetricAlarmsIds = filterIdentifiers(globEmptyDimension); - for (String action : metricAlarm.getOKActions()) { - if (action.contains(serviceName)) { - metricAlarms.add(metricAlarm); - continue outLoop; - } - } + Collection combinedMetricIds = + Stream.of(metricAlarmsIds, otherMetricAlarmsIds) + .filter(m -> m != null) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + + Collection allMetricAlarms = getAll(combinedMetricIds); - for (String action : metricAlarm.getInsufficientDataActions()) { - if (action.contains(serviceName)) { - metricAlarms.add(metricAlarm); - continue outLoop; - } + for (EcsMetricAlarm metricAlarm : allMetricAlarms) { + if (metricAlarm.getAlarmActions().stream().anyMatch(action -> action.contains(serviceName)) + || metricAlarm.getOKActions().stream().anyMatch(action -> action.contains(serviceName)) + || metricAlarm.getInsufficientDataActions().stream() + .anyMatch(action -> action.contains(serviceName))) { + metricAlarms.add(metricAlarm); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsClusterCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsClusterCacheClient.java index 91fad48c8c9..2b48f375b46 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsClusterCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsClusterCacheClient.java @@ -16,18 +16,17 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; + import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsCluster; +import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; - @Component -public class EcsClusterCacheClient extends AbstractCacheClient{ +public class EcsClusterCacheClient extends AbstractCacheClient { @Autowired public EcsClusterCacheClient(Cache cacheView) { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsInstanceCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsInstanceCacheClient.java index 1a913f6475a..4a521d260b2 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsInstanceCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsInstanceCacheClient.java @@ -16,17 +16,16 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES; + import com.amazonaws.services.ec2.model.Instance; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.clouddriver.aws.data.Keys; -import org.springframework.stereotype.Component; - import java.util.Collection; import java.util.Set; import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES; +import org.springframework.stereotype.Component; @Component public class EcsInstanceCacheClient { @@ -52,8 +51,7 @@ public Set find(String instanceId, String account, String region) { Collection instanceKeys = cacheView.filterIdentifiers(INSTANCES.getNs(), searchKey); return cacheView.getAll(INSTANCES.getNs(), instanceKeys).stream() - .map(cacheData -> objectMapper.convertValue(cacheData.getAttributes(), Instance.class)) - .collect(Collectors.toSet()); + .map(cacheData -> objectMapper.convertValue(cacheData.getAttributes(), Instance.class)) + .collect(Collectors.toSet()); } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsLoadbalancerCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsLoadbalancerCacheClient.java index 472ad84ebed..08321e3369a 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsLoadbalancerCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsLoadbalancerCacheClient.java @@ -16,34 +16,32 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; + import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; import com.netflix.spinnaker.clouddriver.aws.data.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsLoadBalancerCache; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsAccountMapper; +import java.util.*; import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS; -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; +import org.springframework.stereotype.Component; @Component public class EcsLoadbalancerCacheClient { private final Cache cacheView; private final ObjectMapper objectMapper; + private final EcsAccountMapper ecsAccountMapper; - public EcsLoadbalancerCacheClient(Cache cacheView, ObjectMapper objectMapper) { + public EcsLoadbalancerCacheClient( + Cache cacheView, ObjectMapper objectMapper, EcsAccountMapper ecsAccountMapper) { this.cacheView = cacheView; this.objectMapper = objectMapper; + this.ecsAccountMapper = ecsAccountMapper; } public List find(String account, String region) { @@ -57,26 +55,40 @@ public List findAll() { private Set> fetchFromCache(String account, String region) { String accountFilter = account != null ? account : "*"; + if (!"*".equals(accountFilter)) { + String awsAccountName = ecsAccountMapper.fromEcsAccountNameToAwsAccountName(accountFilter); + if (awsAccountName != null) { + accountFilter = awsAccountName; + } + } String regionFilter = region != null ? region : "*"; String searchKey = Keys.getLoadBalancerKey("*", accountFilter, regionFilter, "*", "*") + "*"; - Collection loadbalancerKeys = cacheView.filterIdentifiers(LOAD_BALANCERS.getNs(), searchKey); + Collection loadbalancerKeys = + cacheView.filterIdentifiers(LOAD_BALANCERS.getNs(), searchKey); return fetchLoadBalancerAttributes(loadbalancerKeys); } - public Set findWithTargetGroups(Set targetGroups) { - return findAll().stream() - .filter(ecsLoadBalancerCache -> targetGroups.containsAll(ecsLoadBalancerCache.getTargetGroups())) - .collect(Collectors.toSet()); + public List findWithTargetGroups(Set targetGroupKeys) { + Set targetGroupCacheData = + new HashSet<>( + cacheView.getAll( + TARGET_GROUPS.getNs(), + targetGroupKeys, + RelationshipCacheFilter.include(LOAD_BALANCERS.getNs()))); + Set lbKeys = inferAssociatedLoadBalancers(targetGroupCacheData); + Set> loadbalancerAttributes = fetchLoadBalancerAttributes(lbKeys); + return convertToLoadbalancer(loadbalancerAttributes); } private EcsLoadBalancerCache convertToLoadBalancer(Map targetGroupAttributes) { return objectMapper.convertValue(targetGroupAttributes, EcsLoadBalancerCache.class); } - private List convertToLoadbalancer(Collection> targetGroupAttributes) { + private List convertToLoadbalancer( + Collection> targetGroupAttributes) { List ecsTargetGroups = new ArrayList<>(); for (Map attributes : targetGroupAttributes) { @@ -86,19 +98,19 @@ private List convertToLoadbalancer(Collection> fetchLoadBalancerAttributes(Collection targetGroupKeys) { - Set loadBalancerCache = fetchLoadBalancers(targetGroupKeys); + private Set> fetchLoadBalancerAttributes( + Collection loadBalancerKeys) { + Set loadBalancerCache = fetchLoadBalancers(loadBalancerKeys); return loadBalancerCache.stream() - .filter(this::hashTargetGroups) - .map(this::convertCacheData) - .collect(Collectors.toSet()); + .filter(this::hashTargetGroups) + .map(this::convertCacheData) + .collect(Collectors.toSet()); } private boolean hashTargetGroups(CacheData loadbalancerCache) { return loadbalancerCache.getRelationships().get("targetGroups") != null - && loadbalancerCache.getRelationships().get("targetGroups").size() > 0; + && loadbalancerCache.getRelationships().get("targetGroups").size() > 0; } private Map convertCacheData(CacheData loadbalancerCache) { @@ -106,30 +118,24 @@ private Map convertCacheData(CacheData loadbalancerCache) { Map parts = Keys.parse(loadbalancerCache.getId()); attributes.put("region", parts.get("region")); - attributes.put("account", parts.get("account")); + String ecsAccount = ecsAccountMapper.fromAwsAccountNameToEcsAccountName(parts.get("account")); + attributes.put("account", ecsAccount); attributes.put("loadBalancerType", parts.get("loadBalancerType")); - attributes.put("targetGroups", loadbalancerCache.getRelationships().get("targetGroups").stream() - .map(id -> Keys.parse(id).get("targetGroup")) - .collect(Collectors.toSet()) - ); + attributes.put( + "targetGroups", + loadbalancerCache.getRelationships().get("targetGroups").stream() + .map(id -> Keys.parse(id).get("targetGroup")) + .collect(Collectors.toSet())); return attributes; - - } - - private Set> retrieveLoadbalancers(Set loadbalancersAssociatedWithTargetGroups) { - Collection loadbalancers = cacheView.getAll(LOAD_BALANCERS.getNs(), loadbalancersAssociatedWithTargetGroups); - return loadbalancers.stream() - .map(CacheData::getAttributes) - .collect(Collectors.toSet()); } - private Set inferAssociatedLoadBalancers(Set targetGroups) { Set loadbalancersAssociatedWithTargetGroups = new HashSet<>(); for (CacheData targetGroup : targetGroups) { - Collection relatedLoadbalancer = targetGroup.getRelationships().get("loadbalancer"); + Collection relatedLoadbalancer = + targetGroup.getRelationships().get(LOAD_BALANCERS.ns); if (relatedLoadbalancer != null && relatedLoadbalancer.size() > 0) { loadbalancersAssociatedWithTargetGroups.addAll(relatedLoadbalancer); } @@ -138,8 +144,10 @@ private Set inferAssociatedLoadBalancers(Set targetGroups) { } private Set fetchLoadBalancers(Collection loadBalancerKeys) { - return new HashSet<>(cacheView.getAll(LOAD_BALANCERS.getNs(), - loadBalancerKeys, - RelationshipCacheFilter.include(TARGET_GROUPS.getNs()))); + return new HashSet<>( + cacheView.getAll( + LOAD_BALANCERS.getNs(), + loadBalancerKeys, + RelationshipCacheFilter.include(TARGET_GROUPS.getNs()))); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsTargetGroupCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsTargetGroupCacheClient.java index 5d73849cfc9..82828b86ba9 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsTargetGroupCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/EcsTargetGroupCacheClient.java @@ -16,14 +16,15 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; + import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; import com.netflix.spinnaker.clouddriver.aws.data.Keys; import com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer.EcsTargetGroup; -import org.springframework.stereotype.Component; - import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -31,9 +32,7 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS; -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; +import org.springframework.stereotype.Component; @Component public class EcsTargetGroupCacheClient { @@ -48,8 +47,13 @@ public EcsTargetGroupCacheClient(Cache cacheView, ObjectMapper objectMapper) { public List findAll() { String searchKey = Keys.getTargetGroupKey("*", "*", "*", "*", "*") + "*"; - Collection targetGroupKeys = cacheView.filterIdentifiers(TARGET_GROUPS.getNs(), searchKey); + Collection targetGroupKeys = + cacheView.filterIdentifiers(TARGET_GROUPS.getNs(), searchKey); + + return find(targetGroupKeys); + } + public List find(Collection targetGroupKeys) { Set> targetGroupAttributes = fetchLoadBalancerAttributes(targetGroupKeys); List targetGroups = convertToTargetGroup(targetGroupAttributes); @@ -57,12 +61,18 @@ public List findAll() { return targetGroups; } + public Collection getAllKeys() { + return cacheView.getIdentifiers(TARGET_GROUPS.ns); + } + private EcsTargetGroup convertToTargetGroup(Map targetGroupAttributes) { - EcsTargetGroup ecsTargetGroup = objectMapper.convertValue(targetGroupAttributes, EcsTargetGroup.class); + EcsTargetGroup ecsTargetGroup = + objectMapper.convertValue(targetGroupAttributes, EcsTargetGroup.class); return ecsTargetGroup; } - private List convertToTargetGroup(Collection> targetGroupAttributes) { + private List convertToTargetGroup( + Collection> targetGroupAttributes) { List ecsTargetGroups = new ArrayList<>(); for (Map attributes : targetGroupAttributes) { @@ -72,35 +82,34 @@ private List convertToTargetGroup(Collection return ecsTargetGroups; } - private Set> fetchLoadBalancerAttributes(Collection targetGroupKeys) { Set targetGroups = fetchTargetGroups(targetGroupKeys); - Set> targetGroupAttributes = targetGroups.stream() - .filter(this::hashLoadBalancers) - .map(CacheData::getAttributes) - .collect(Collectors.toSet()); + Set> targetGroupAttributes = + targetGroups.stream() + .filter(this::hashLoadBalancers) + .map(CacheData::getAttributes) + .collect(Collectors.toSet()); return targetGroupAttributes; } private boolean hashLoadBalancers(CacheData targetGroupCache) { return targetGroupCache.getRelationships().get("loadBalancers") != null - && targetGroupCache.getRelationships().get("loadBalancers").size() > 0; + && targetGroupCache.getRelationships().get("loadBalancers").size() > 0; } - private Set> retrieveTargetGroups(Set targetGroupsAssociatedWithLoadBalancers) { - Collection targetGroupCache = cacheView.getAll(TARGET_GROUPS.getNs(), targetGroupsAssociatedWithLoadBalancers); + private Set> retrieveTargetGroups( + Set targetGroupsAssociatedWithLoadBalancers) { + Collection targetGroupCache = + cacheView.getAll(TARGET_GROUPS.getNs(), targetGroupsAssociatedWithLoadBalancers); - Set> targetGroupAttributes = targetGroupCache - .stream() - .map(CacheData::getAttributes) - .collect(Collectors.toSet()); + Set> targetGroupAttributes = + targetGroupCache.stream().map(CacheData::getAttributes).collect(Collectors.toSet()); return targetGroupAttributes; } - private Set inferAssociatedTargetGroups(Set loadBalancers) { Set targetGroupsAssociatedWithLoadBalancers = new HashSet<>(); @@ -114,9 +123,10 @@ private Set inferAssociatedTargetGroups(Set loadBalancers) { } private Set fetchTargetGroups(Collection targetGroupKeys) { - return new HashSet<>(cacheView.getAll(TARGET_GROUPS.getNs(), - targetGroupKeys, - RelationshipCacheFilter.include(LOAD_BALANCERS.getNs()))); + return new HashSet<>( + cacheView.getAll( + TARGET_GROUPS.getNs(), + targetGroupKeys, + RelationshipCacheFilter.include(LOAD_BALANCERS.getNs()))); } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/IamRoleCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/IamRoleCacheClient.java index 1865bea9c76..a019800520d 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/IamRoleCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/IamRoleCacheClient.java @@ -16,21 +16,20 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; + import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.model.IamRole; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamTrustRelationship; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class IamRoleCacheClient extends AbstractCacheClient { @@ -56,15 +55,16 @@ private Collection filterResultsForEcsTrustRelationship(Collection result = new HashSet<>(); for (CacheData cacheData : allData) { - List> trustRelationships = (List>) cacheData.getAttributes().get("trustRelationships"); + List> trustRelationships = + (List>) cacheData.getAttributes().get("trustRelationships"); for (Map trustRelationship : trustRelationships) { - if (trustRelationship.get("type").equals("Service") && trustRelationship.get("value").equals("ecs-tasks.amazonaws.com")) { + if (trustRelationship.get("type").equals("Service") + && trustRelationship.get("value").equals("ecs-tasks.amazonaws.com")) { result.add(convert(cacheData)); continue; } } - } return result; @@ -73,7 +73,8 @@ private Collection filterResultsForEcsTrustRelationship(Collection> trustRelationships = (List>) cacheData.getAttributes().get("trustRelationships"); + List> trustRelationships = + (List>) cacheData.getAttributes().get("trustRelationships"); Set iamTrustRelationships = new HashSet<>(); IamRole iamRole = new IamRole(); @@ -90,19 +91,19 @@ protected IamRole convert(CacheData cacheData) { iamRole.setTrustRelationships(iamTrustRelationships); - return iamRole; } /** * @param account name of the AWS account, as defined in clouddriver.yml - * @param region is not used in AWS as IAM is region-agnostic + * @param region is not used in AWS as IAM is region-agnostic * @return */ private Collection fetchFromCache(String account, String region) { - Set keys = cacheView.filterIdentifiers(IAM_ROLE.ns, "*:" + account + ":*").stream() - .distinct() - .collect(Collectors.toSet()); + Set keys = + cacheView.filterIdentifiers(IAM_ROLE.ns, "*:" + account + ":*").stream() + .distinct() + .collect(Collectors.toSet()); return cacheView.getAll(IAM_ROLE.ns, keys); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ScalableTargetCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ScalableTargetCacheClient.java index 6e8f59ca67e..0304563194f 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ScalableTargetCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ScalableTargetCacheClient.java @@ -16,6 +16,8 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SCALABLE_TARGETS; + import com.amazonaws.services.applicationautoscaling.model.ScalableTarget; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.Cache; @@ -23,8 +25,6 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SCALABLE_TARGETS; - @Component public class ScalableTargetCacheClient extends AbstractCacheClient { private final ObjectMapper objectMapper; @@ -41,5 +41,4 @@ protected ScalableTarget convert(CacheData cacheData) { scalableTarget = objectMapper.convertValue(cacheData.getAttributes(), ScalableTarget.class); return scalableTarget; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/SecretCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/SecretCacheClient.java new file mode 100644 index 00000000000..e8b9b75b882 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/SecretCacheClient.java @@ -0,0 +1,47 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache.client; + +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SECRETS; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Secret; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class SecretCacheClient extends AbstractCacheClient { + + @Autowired + public SecretCacheClient(Cache cacheView) { + super(cacheView, SECRETS.toString()); + } + + @Override + protected Secret convert(CacheData cacheData) { + Secret secret = new Secret(); + Map attributes = cacheData.getAttributes(); + + secret.setAccount((String) attributes.get("account")); + secret.setRegion((String) attributes.get("region")); + secret.setName((String) attributes.get("secretName")); + secret.setArn((String) attributes.get("secretArn")); + + return secret; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ServiceCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ServiceCacheClient.java index e879ac7eba8..2dc4ed7fe4e 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ServiceCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ServiceCacheClient.java @@ -16,20 +16,20 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; + import com.amazonaws.services.ecs.model.LoadBalancer; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - +import com.netflix.spinnaker.moniker.Moniker; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class ServiceCacheClient extends AbstractCacheClient { @@ -58,14 +58,18 @@ protected Service convert(CacheData cacheData) { service.setDesiredCount((Integer) attributes.get("desiredCount")); service.setMaximumPercent((Integer) attributes.get("maximumPercent")); service.setMinimumHealthyPercent((Integer) attributes.get("minimumHealthyPercent")); + service.setSubnets((List) attributes.get("subnets")); + service.setSecurityGroups((List) attributes.get("securityGroups")); if (attributes.containsKey("loadBalancers")) { - List> loadBalancers = (List>) attributes.get("loadBalancers"); + List> loadBalancers = + (List>) attributes.get("loadBalancers"); List deserializedLoadbalancers = new ArrayList<>(loadBalancers.size()); for (Map serializedLoadbalancer : loadBalancers) { if (serializedLoadbalancer != null) { - deserializedLoadbalancers.add(objectMapper.convertValue(serializedLoadbalancer, LoadBalancer.class)); + deserializedLoadbalancers.add( + objectMapper.convertValue(serializedLoadbalancer, LoadBalancer.class)); } } @@ -74,9 +78,12 @@ protected Service convert(CacheData cacheData) { service.setLoadBalancers(Collections.emptyList()); } - service.setCreatedAt((Long) attributes.get("createdAt")); + if (attributes.containsKey("moniker")) { + service.setMoniker(objectMapper.convertValue(attributes.get("moniker"), Moniker.class)); + } + return service; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ServiceDiscoveryCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ServiceDiscoveryCacheClient.java new file mode 100644 index 00000000000..9ef409dbe52 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/ServiceDiscoveryCacheClient.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache.client; + +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICE_DISCOVERY_REGISTRIES; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.ServiceDiscoveryRegistry; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class ServiceDiscoveryCacheClient extends AbstractCacheClient { + + @Autowired + public ServiceDiscoveryCacheClient(Cache cacheView) { + super(cacheView, SERVICE_DISCOVERY_REGISTRIES.toString()); + } + + @Override + protected ServiceDiscoveryRegistry convert(CacheData cacheData) { + ServiceDiscoveryRegistry registry = new ServiceDiscoveryRegistry(); + Map attributes = cacheData.getAttributes(); + + registry.setAccount((String) attributes.get("account")); + registry.setRegion((String) attributes.get("region")); + registry.setName((String) attributes.get("serviceName")); + registry.setArn((String) attributes.get("serviceArn")); + registry.setId((String) attributes.get("serviceId")); + + return registry; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TargetHealthCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TargetHealthCacheClient.java new file mode 100644 index 00000000000..74ba5cde49a --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TargetHealthCacheClient.java @@ -0,0 +1,70 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache.client; + +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TARGET_HEALTHS; + +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsTargetHealth; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class TargetHealthCacheClient extends AbstractCacheClient { + private ObjectMapper objectMapper; + + @Autowired + public TargetHealthCacheClient(Cache cacheView, ObjectMapper objectMapper) { + super(cacheView, TARGET_HEALTHS.toString()); + this.objectMapper = objectMapper; + } + + @Override + protected EcsTargetHealth convert(CacheData cacheData) { + EcsTargetHealth targetHealth = new EcsTargetHealth(); + Map attributes = cacheData.getAttributes(); + + targetHealth.setTargetGroupArn((String) attributes.get("targetGroupArn")); + + if (attributes.containsKey("targetHealthDescriptions")) { + List> targetHealthDescriptions = + (List>) attributes.get("targetHealthDescriptions"); + List deserializedTargetHealthDescriptions = + new ArrayList<>(targetHealthDescriptions.size()); + + for (Map serializedTargetHealthDescription : targetHealthDescriptions) { + if (serializedTargetHealthDescription != null) { + deserializedTargetHealthDescriptions.add( + objectMapper.convertValue( + serializedTargetHealthDescription, TargetHealthDescription.class)); + } + } + + targetHealth.setTargetHealthDescriptions(deserializedTargetHealthDescriptions); + } else { + targetHealth.setTargetHealthDescriptions(Collections.emptyList()); + } + + return targetHealth; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskCacheClient.java index a53f88437be..3f3b3e007bd 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskCacheClient.java @@ -16,20 +16,19 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; + import com.amazonaws.services.ecs.model.Container; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.model.Task; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class TaskCacheClient extends AbstractCacheClient { @@ -52,17 +51,21 @@ protected Task convert(CacheData cacheData) { task.setGroup((String) attributes.get("group")); task.setLastStatus((String) attributes.get("lastStatus")); task.setDesiredStatus((String) attributes.get("desiredStatus")); + task.setHealthStatus((String) attributes.get("healthStatus")); + task.setAvailabilityZone((String) attributes.get("availabilityZone")); if (attributes.containsKey("startedAt")) { task.setStartedAt((Long) attributes.get("startedAt")); } if (attributes.containsKey("containers")) { - List> containers = (List>) attributes.get("containers"); + List> containers = + (List>) attributes.get("containers"); List deserializedLoadbalancers = new ArrayList<>(containers.size()); for (Map serializedContainer : containers) { if (serializedContainer != null) { - deserializedLoadbalancers.add(objectMapper.convertValue(serializedContainer, Container.class)); + deserializedLoadbalancers.add( + objectMapper.convertValue(serializedContainer, Container.class)); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskDefinitionCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskDefinitionCacheClient.java index 5c775bcce81..07f243b5484 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskDefinitionCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskDefinitionCacheClient.java @@ -16,20 +16,19 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; + import com.amazonaws.services.ecs.model.ContainerDefinition; import com.amazonaws.services.ecs.model.TaskDefinition; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class TaskDefinitionCacheClient extends AbstractCacheClient { @@ -48,14 +47,19 @@ protected TaskDefinition convert(CacheData cacheData) { taskDefinition.setTaskDefinitionArn((String) attributes.get("taskDefinitionArn")); taskDefinition.setTaskRoleArn((String) attributes.get("taskRoleArn")); + taskDefinition.setCpu((String) attributes.get("cpu")); + taskDefinition.setMemory((String) attributes.get("memory")); if (attributes.containsKey("containerDefinitions")) { - List> containerDefinitions = (List>) attributes.get("containerDefinitions"); - List deserializedContainerDefinitions = new ArrayList<>(containerDefinitions.size()); + List> containerDefinitions = + (List>) attributes.get("containerDefinitions"); + List deserializedContainerDefinitions = + new ArrayList<>(containerDefinitions.size()); for (Map serializedContainerDefinitions : containerDefinitions) { if (serializedContainerDefinitions != null) { - deserializedContainerDefinitions.add(objectMapper.convertValue(serializedContainerDefinitions, ContainerDefinition.class)); + deserializedContainerDefinitions.add( + objectMapper.convertValue(serializedContainerDefinitions, ContainerDefinition.class)); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskHealthCacheClient.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskHealthCacheClient.java index dbcef502bb9..04ffb6098e3 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskHealthCacheClient.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/client/TaskHealthCacheClient.java @@ -16,16 +16,15 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.client; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; + import com.netflix.spinnaker.cats.cache.Cache; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.model.TaskHealth; +import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; - @Component public class TaskHealthCacheClient extends AbstractCacheClient { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Application.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Application.java new file mode 100644 index 00000000000..526af88f3a3 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Application.java @@ -0,0 +1,28 @@ +/* + * Copyright 2021 Amazon.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache.model; + +import java.util.Collection; +import java.util.Map; +import lombok.Data; + +@Data +public class Application { + String name; + Map> relationships; +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsLoadBalancerCache.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsLoadBalancerCache.java index 4ac48b21b92..5eb49f51a9d 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsLoadBalancerCache.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsLoadBalancerCache.java @@ -20,10 +20,9 @@ import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.model.LoadBalancer; import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; -import lombok.Data; - import java.util.List; import java.util.Set; +import lombok.Data; @Data public class EcsLoadBalancerCache implements LoadBalancer { @@ -45,7 +44,7 @@ public class EcsLoadBalancerCache implements LoadBalancer { private List subnets; private List securityGroups; private List targetGroups; - //private List state; + // private List state; private Set serverGroups; @Override @@ -55,6 +54,6 @@ public String getName() { @Override public String getType() { - return loadBalancerType; + return cloudProvider; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsMetricAlarm.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsMetricAlarm.java index 2e80adb9d26..8ed5a856f33 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsMetricAlarm.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsMetricAlarm.java @@ -24,12 +24,12 @@ public class EcsMetricAlarm extends MetricAlarm { private String accountName; private String region; - public EcsMetricAlarm withAccountName(String accountName){ + public EcsMetricAlarm withAccountName(String accountName) { setAccountName(accountName); return this; } - public EcsMetricAlarm withRegion(String region){ + public EcsMetricAlarm withRegion(String region) { setRegion(region); return this; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsTargetHealth.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsTargetHealth.java new file mode 100644 index 00000000000..90428216e80 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/EcsTargetHealth.java @@ -0,0 +1,26 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache.model; + +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription; +import java.util.List; +import lombok.Data; + +@Data +public class EcsTargetHealth { + String targetGroupArn; + List targetHealthDescriptions; +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/IamRole.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/IamRole.java index 4845164eeef..3ab2ce6e4ef 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/IamRole.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/IamRole.java @@ -18,12 +18,11 @@ import com.netflix.spinnaker.clouddriver.aws.model.Role; import com.netflix.spinnaker.clouddriver.aws.model.TrustRelationship; +import java.util.Set; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; -import java.util.Set; - @Data @AllArgsConstructor @NoArgsConstructor @@ -37,5 +36,4 @@ public class IamRole implements Role { String name; String accountName; Set trustRelationships; - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Secret.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Secret.java new file mode 100644 index 00000000000..3f8f7da46fc --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Secret.java @@ -0,0 +1,26 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache.model; + +import lombok.Data; + +@Data +public class Secret { + String account; + String region; + String name; + String arn; +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Service.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Service.java index 3906c03c377..58adf6c0b51 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Service.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Service.java @@ -17,9 +17,9 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.model; import com.amazonaws.services.ecs.model.LoadBalancer; -import lombok.Data; - +import com.netflix.spinnaker.moniker.Moniker; import java.util.List; +import lombok.Data; @Data public class Service { @@ -36,5 +36,8 @@ public class Service { int maximumPercent; int minimumHealthyPercent; List loadBalancers; + List subnets; + List securityGroups; long createdAt; + Moniker moniker; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/ServiceDiscoveryRegistry.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/ServiceDiscoveryRegistry.java new file mode 100644 index 00000000000..147a39fcff8 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/ServiceDiscoveryRegistry.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache.model; + +import lombok.Data; + +@Data +public class ServiceDiscoveryRegistry { + String account; + String region; + String id; + String name; + String arn; +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Task.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Task.java index 58a748c25f6..4e29fdc3da5 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Task.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/cache/model/Task.java @@ -17,9 +17,8 @@ package com.netflix.spinnaker.clouddriver.ecs.cache.model; import com.amazonaws.services.ecs.model.Container; -import lombok.Data; - import java.util.List; +import lombok.Data; @Data public class Task { @@ -30,6 +29,8 @@ public class Task { String group; String lastStatus; String desiredStatus; + String healthStatus; + String availabilityZone; long startedAt; List containers; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsCloudMetricController.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsCloudMetricController.java index 96833392e80..164f5096599 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsCloudMetricController.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsCloudMetricController.java @@ -18,12 +18,11 @@ import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsMetricAlarm; import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsCloudMetricProvider; +import java.util.Collection; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; -import java.util.Collection; - @RestController @RequestMapping("/ecs/cloudMetrics") public class EcsCloudMetricController { @@ -34,7 +33,6 @@ public EcsCloudMetricController(EcsCloudMetricProvider provider) { this.provider = provider; } - @RequestMapping(value = {"/alarms"}) public Collection findAllMetricAlarms() { return provider.getAllMetricAlarms(); diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsClusterController.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsClusterController.java index 5eeca9d9a47..8ca18a8af97 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsClusterController.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsClusterController.java @@ -16,14 +16,15 @@ package com.netflix.spinnaker.clouddriver.ecs.controllers; +import com.amazonaws.services.ecs.model.Cluster; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsCluster; import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsClusterProvider; +import java.util.Collection; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; -import java.util.Collection; - @RestController public class EcsClusterController { @@ -34,10 +35,14 @@ public EcsClusterController(EcsClusterProvider ecsClusterProvider) { this.ecsClusterProvider = ecsClusterProvider; } - @RequestMapping(value = {"/ecs/ecsClusters"}) public Collection getAllEcsClusters() { return ecsClusterProvider.getAllEcsClusters(); } + @RequestMapping(value = {"/ecs/ecsClusterDescriptions/{account}/{region}"}) + public Collection getEcsClusterDescriptions( + @PathVariable String account, @PathVariable String region) { + return ecsClusterProvider.getEcsClusterDescriptions(account, region); + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsImagesController.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsImagesController.java index ca776371007..cdc4a78cd88 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsImagesController.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsImagesController.java @@ -18,16 +18,15 @@ import com.netflix.spinnaker.clouddriver.ecs.model.EcsDockerImage; import com.netflix.spinnaker.clouddriver.ecs.provider.view.ImageRepositoryProvider; +import java.util.List; +import java.util.stream.Collectors; +import javax.servlet.http.HttpServletRequest; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; -import javax.servlet.http.HttpServletRequest; -import java.util.List; -import java.util.stream.Collectors; - @RestController @RequestMapping("/ecs/images") public class EcsImagesController { @@ -39,16 +38,19 @@ public EcsImagesController(List imageRepositoryProvider } @RequestMapping(value = "/find", method = RequestMethod.GET) - public List findImage(@RequestParam("q") String dockerImageUrl, HttpServletRequest request) { + public List findImage( + @RequestParam("q") String dockerImageUrl, HttpServletRequest request) { for (ImageRepositoryProvider provider : imageRepositoryProviders) { if (provider.handles(dockerImageUrl)) { return provider.findImage(dockerImageUrl); } } - throw new Error("The URL is not support by any of the providers. Currently enabled and supported providers are: " + - imageRepositoryProviders.stream(). - map(ImageRepositoryProvider::getRepositoryName). - collect(Collectors.joining(", ")) + "."); + throw new Error( + "The URL is not support by any of the providers. Currently enabled and supported providers are: " + + imageRepositoryProviders.stream() + .map(ImageRepositoryProvider::getRepositoryName) + .collect(Collectors.joining(", ")) + + "."); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsSecretController.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsSecretController.java new file mode 100644 index 00000000000..9743aa5e0bc --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsSecretController.java @@ -0,0 +1,39 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.controllers; + +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Secret; +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsSecretProvider; +import java.util.Collection; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class EcsSecretController { + + EcsSecretProvider secretProvider; + + @Autowired + public EcsSecretController(EcsSecretProvider secretProvider) { + this.secretProvider = secretProvider; + } + + @RequestMapping(value = {"/ecs/secrets"}) + public Collection getAllSecrets() { + return secretProvider.getAllSecrets(); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsServiceDiscoveryController.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsServiceDiscoveryController.java new file mode 100644 index 00000000000..16f5f6acc95 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsServiceDiscoveryController.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.controllers; + +import com.netflix.spinnaker.clouddriver.ecs.cache.model.ServiceDiscoveryRegistry; +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsServiceDiscoveryProvider; +import java.util.Collection; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class EcsServiceDiscoveryController { + + EcsServiceDiscoveryProvider serviceDiscoveryProvider; + + @Autowired + public EcsServiceDiscoveryController(EcsServiceDiscoveryProvider serviceDiscoveryProvider) { + this.serviceDiscoveryProvider = serviceDiscoveryProvider; + } + + @RequestMapping(value = {"/ecs/serviceDiscoveryRegistries"}) + public Collection getAllServiceDiscoveryRegistries() { + return serviceDiscoveryProvider.getAllServiceDiscoveryRegistries(); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/servergroup/EcsServerGroupController.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/servergroup/EcsServerGroupController.java index b53a90bdb9f..bb86bbb5491 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/servergroup/EcsServerGroupController.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/servergroup/EcsServerGroupController.java @@ -1,3 +1,19 @@ +/* + * Copyright 2017 Lookout, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package com.netflix.spinnaker.clouddriver.ecs.controllers.servergroup; import com.amazonaws.services.ecs.AmazonECS; @@ -10,20 +26,19 @@ import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; import com.netflix.spinnaker.clouddriver.ecs.model.EcsServerGroupEvent; import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.ArrayList; +import java.util.List; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.*; -import java.util.ArrayList; -import java.util.List; - @RestController @RequestMapping("/applications/{application}/serverGroups/{account}/{serverGroupName}") public class EcsServerGroupController { - private final AccountCredentialsProvider accountCredentialsProvider; + private final CredentialsRepository credentialsRepository; private final AmazonClientProvider amazonClientProvider; @@ -32,43 +47,47 @@ public class EcsServerGroupController { private final ServerGroupEventStatusConverter statusConverter; @Autowired - public EcsServerGroupController(AccountCredentialsProvider accountCredentialsProvider, - AmazonClientProvider amazonClientProvider, - ServiceCacheClient serviceCacheClient, - ServerGroupEventStatusConverter statusConverter) { - this.accountCredentialsProvider = accountCredentialsProvider; + public EcsServerGroupController( + CredentialsRepository credentialsRepository, + AmazonClientProvider amazonClientProvider, + ServiceCacheClient serviceCacheClient, + ServerGroupEventStatusConverter statusConverter) { + this.credentialsRepository = credentialsRepository; this.amazonClientProvider = amazonClientProvider; this.serviceCacheClient = serviceCacheClient; this.statusConverter = statusConverter; } @RequestMapping(value = "/events", method = RequestMethod.GET) - ResponseEntity getServerGroupEvents(@PathVariable String account, - @PathVariable String serverGroupName, - @RequestParam(value = "region", required = true) String region) { - NetflixAmazonCredentials credentials = (NetflixAmazonCredentials) accountCredentialsProvider.getCredentials(account); + ResponseEntity getServerGroupEvents( + @PathVariable String account, + @PathVariable String serverGroupName, + @RequestParam(value = "region", required = true) String region) { + NetflixAmazonCredentials credentials = credentialsRepository.getOne(account); - if (!(credentials instanceof NetflixECSCredentials)) { - return new ResponseEntity(String.format("Account %s is not an ECS account", account), HttpStatus.BAD_REQUEST); + if (credentials == null) { + return new ResponseEntity( + String.format("Account %s is not an ECS account", account), HttpStatus.BAD_REQUEST); } AmazonECS ecs = amazonClientProvider.getAmazonEcs(credentials, region, true); - Service cachedService = serviceCacheClient.getAll(account, region).stream() - .filter(service -> service.getServiceName().equals(serverGroupName)) - .findFirst() - .get(); + Service cachedService = + serviceCacheClient.getAll(account, region).stream() + .filter(service -> service.getServiceName().equals(serverGroupName)) + .findFirst() + .get(); - DescribeServicesResult describeServicesResult = ecs.describeServices( - new DescribeServicesRequest() - .withServices(serverGroupName) - .withCluster(cachedService.getClusterArn()) - ); + DescribeServicesResult describeServicesResult = + ecs.describeServices( + new DescribeServicesRequest() + .withServices(serverGroupName) + .withCluster(cachedService.getClusterArn())); if (describeServicesResult.getServices().size() == 0) { return new ResponseEntity( - String.format("Server group %s was not found in account ", serverGroupName, account), - HttpStatus.NOT_FOUND); + String.format("Server group %s was not found in account ", serverGroupName, account), + HttpStatus.NOT_FOUND); } List rawEvents = describeServicesResult.getServices().get(0).getEvents(); @@ -76,12 +95,12 @@ ResponseEntity getServerGroupEvents(@PathVariable String account, List events = new ArrayList<>(); for (ServiceEvent rawEvent : rawEvents) { - EcsServerGroupEvent newEvent = new EcsServerGroupEvent( - rawEvent.getMessage(), - rawEvent.getCreatedAt(), - rawEvent.getId(), - statusConverter.inferEventStatus(rawEvent) - ); + EcsServerGroupEvent newEvent = + new EcsServerGroupEvent( + rawEvent.getMessage(), + rawEvent.getCreatedAt(), + rawEvent.getId(), + statusConverter.inferEventStatus(rawEvent)); events.add(newEvent); } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/servergroup/ServerGroupEventStatusConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/servergroup/ServerGroupEventStatusConverter.java index 2e02cf0bd6c..d63d1b08078 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/servergroup/ServerGroupEventStatusConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/controllers/servergroup/ServerGroupEventStatusConverter.java @@ -1,3 +1,19 @@ +/* + * Copyright 2017 Lookout, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package com.netflix.spinnaker.clouddriver.ecs.controllers.servergroup; import com.amazonaws.services.ecs.model.ServiceEvent; diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/CloneServiceAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/CloneServiceAtomicOperationConverter.java index 4a9fa84e6ba..ac34ce06092 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/CloneServiceAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/CloneServiceAtomicOperationConverter.java @@ -16,21 +16,19 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.converters; -import com.netflix.spinnaker.clouddriver.deploy.DeployAtomicOperation; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; -import com.netflix.spinnaker.clouddriver.ecs.deploy.description.BasicEcsDeployDescription; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CloneServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.CloneServiceAtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.CLONE_SERVER_GROUP) @Component("ecsCloneLastService") -public class CloneServiceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class CloneServiceAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -39,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public CloneServiceDescription convertDescription(Map input) { - CloneServiceDescription converted = getObjectMapper().convertValue(input, CloneServiceDescription.class); + CloneServiceDescription converted = + getObjectMapper().convertValue(input, CloneServiceDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DeleteScalingPolicyAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DeleteScalingPolicyAtomicOperationConverter.java index 674bd9a544d..904cb15df10 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DeleteScalingPolicyAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DeleteScalingPolicyAtomicOperationConverter.java @@ -17,20 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.converters; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; -import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CloneServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.DeleteScalingPolicyDescription; -import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.CloneServiceAtomicOperation; import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.DeleteScalingPolicyAtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.DELETE_SCALING_POLICY) @Component("ecsDeleteScalingPolicy") -public class DeleteScalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class DeleteScalingPolicyAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -39,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public DeleteScalingPolicyDescription convertDescription(Map input) { - DeleteScalingPolicyDescription converted = getObjectMapper().convertValue(input, DeleteScalingPolicyDescription.class); + DeleteScalingPolicyDescription converted = + getObjectMapper().convertValue(input, DeleteScalingPolicyDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DestroyServiceAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DestroyServiceAtomicOperationConverter.java index 859f23ba795..a22c5d2102d 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DestroyServiceAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DestroyServiceAtomicOperationConverter.java @@ -22,13 +22,13 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.DESTROY_SERVER_GROUP) @Component("ecsDestroyServerGroup") -public class DestroyServiceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class DestroyServiceAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -37,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public ModifyServiceDescription convertDescription(Map input) { - ModifyServiceDescription converted = getObjectMapper().convertValue(input, ModifyServiceDescription.class); + ModifyServiceDescription converted = + getObjectMapper().convertValue(input, ModifyServiceDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DisableServiceAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DisableServiceAtomicOperationConverter.java index e8584988299..e77ae4d1329 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DisableServiceAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/DisableServiceAtomicOperationConverter.java @@ -22,13 +22,13 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.DISABLE_SERVER_GROUP) @Component("ecsDisableServerGroup") -public class DisableServiceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class DisableServiceAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -37,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public ModifyServiceDescription convertDescription(Map input) { - ModifyServiceDescription converted = getObjectMapper().convertValue(input, ModifyServiceDescription.class); + ModifyServiceDescription converted = + getObjectMapper().convertValue(input, ModifyServiceDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EcsCreateServerGroupAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EcsCreateServerGroupAtomicOperationConverter.java index 60073451007..5cadf511415 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EcsCreateServerGroupAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EcsCreateServerGroupAtomicOperationConverter.java @@ -22,13 +22,13 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component("ecsCreateServerGroup") -public class EcsCreateServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class EcsCreateServerGroupAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -37,7 +37,8 @@ public AtomicOperation convertOperation(Map input) { @Override public CreateServerGroupDescription convertDescription(Map input) { - CreateServerGroupDescription converted = getObjectMapper().convertValue(input, CreateServerGroupDescription.class); + CreateServerGroupDescription converted = + getObjectMapper().convertValue(input, CreateServerGroupDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EnableServiceAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EnableServiceAtomicOperationConverter.java index 4e1db41a1a0..05adc635fbc 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EnableServiceAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EnableServiceAtomicOperationConverter.java @@ -22,13 +22,13 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.ENABLE_SERVER_GROUP) @Component("ecsEnableServerGroup") -public class EnableServiceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class EnableServiceAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -37,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public ModifyServiceDescription convertDescription(Map input) { - ModifyServiceDescription converted = getObjectMapper().convertValue(input, ModifyServiceDescription.class); + ModifyServiceDescription converted = + getObjectMapper().convertValue(input, ModifyServiceDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/ResizeServiceAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/ResizeServiceAtomicOperationConverter.java index 1f74c6f06bb..7f19a392fb8 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/ResizeServiceAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/ResizeServiceAtomicOperationConverter.java @@ -22,13 +22,13 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; -@Component("resizeServerGroup") +@Component("ecsResizeServerGroup") @EcsOperation(AtomicOperations.RESIZE_SERVER_GROUP) -public class ResizeServiceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class ResizeServiceAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -37,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public ResizeServiceDescription convertDescription(Map input) { - ResizeServiceDescription converted = getObjectMapper().convertValue(input, ResizeServiceDescription.class); + ResizeServiceDescription converted = + getObjectMapper().convertValue(input, ResizeServiceDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/StartServiceAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/StartServiceAtomicOperationConverter.java index d1b276dca89..bf2cf846dfe 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/StartServiceAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/StartServiceAtomicOperationConverter.java @@ -17,20 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.converters; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; -import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CloneServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.StartServiceDescription; -import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.CloneServiceAtomicOperation; import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.StartServiceAtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.START_SERVER_GROUP) @Component("ecsStartServerGroup") -public class StartServiceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class StartServiceAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -39,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public StartServiceDescription convertDescription(Map input) { - StartServiceDescription converted = getObjectMapper().convertValue(input, StartServiceDescription.class); + StartServiceDescription converted = + getObjectMapper().convertValue(input, StartServiceDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/StopServiceAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/StopServiceAtomicOperationConverter.java index dfb4885d19e..0b7f4270ace 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/StopServiceAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/StopServiceAtomicOperationConverter.java @@ -17,20 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.converters; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; -import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CloneServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.StopServiceDescription; -import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.CloneServiceAtomicOperation; import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.StopServiceAtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.STOP_SERVER_GROUP) @Component("ecsStopServerGroup") -public class StopServiceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class StopServiceAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -39,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public StopServiceDescription convertDescription(Map input) { - StopServiceDescription converted = getObjectMapper().convertValue(input, StopServiceDescription.class); + StopServiceDescription converted = + getObjectMapper().convertValue(input, StopServiceDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/TerminateInstancesAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/TerminateInstancesAtomicOperationConverter.java index b4344e68da5..aa4be06c0e3 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/TerminateInstancesAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/TerminateInstancesAtomicOperationConverter.java @@ -22,15 +22,15 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.ArrayList; import java.util.List; import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.TERMINATE_INSTANCES) @Component("ecsTerminateInstances") -public class TerminateInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class TerminateInstancesAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { return new TerminateInstancesAtomicOperation(convertDescription(input)); diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/UpdateServiceAndTaskConfigAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/UpdateServiceAndTaskConfigAtomicOperationConverter.java index 4b1f2e2b711..2a29b97ad91 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/UpdateServiceAndTaskConfigAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/UpdateServiceAndTaskConfigAtomicOperationConverter.java @@ -22,13 +22,13 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.UPDATE_LAUNCH_CONFIG) @Component("ecsUpdateServiceAndTaskConfig") -public class UpdateServiceAndTaskConfigAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class UpdateServiceAndTaskConfigAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -37,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public UpdateServiceAndTaskConfigDescription convertDescription(Map input) { - UpdateServiceAndTaskConfigDescription converted = getObjectMapper().convertValue(input, UpdateServiceAndTaskConfigDescription.class); + UpdateServiceAndTaskConfigDescription converted = + getObjectMapper().convertValue(input, UpdateServiceAndTaskConfigDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/UpsertScalingPolicyAtomicOperationConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/UpsertScalingPolicyAtomicOperationConverter.java index ec82cf6f79f..41c7f39b4d2 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/UpsertScalingPolicyAtomicOperationConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/UpsertScalingPolicyAtomicOperationConverter.java @@ -16,22 +16,19 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.converters; -import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAlarmDescription; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; -import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CloneServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.UpsertScalingPolicyDescription; -import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.CloneServiceAtomicOperation; import com.netflix.spinnaker.clouddriver.ecs.deploy.ops.UpsertScalingPolicyAtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import org.springframework.stereotype.Component; - import java.util.Map; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.UPSERT_SCALING_POLICY) @Component("ecsUpsertScalingPolicy") -public class UpsertScalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class UpsertScalingPolicyAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { @Override public AtomicOperation convertOperation(Map input) { @@ -40,10 +37,10 @@ public AtomicOperation convertOperation(Map input) { @Override public UpsertScalingPolicyDescription convertDescription(Map input) { - UpsertScalingPolicyDescription converted = getObjectMapper().convertValue(input, UpsertScalingPolicyDescription.class); + UpsertScalingPolicyDescription converted = + getObjectMapper().convertValue(input, UpsertScalingPolicyDescription.class); converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); return converted; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/AbstractECSDescription.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/AbstractECSDescription.java index 504ae69a342..e5413942721 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/AbstractECSDescription.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/AbstractECSDescription.java @@ -17,14 +17,22 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.description; import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription; +import com.netflix.spinnaker.moniker.Moniker; import lombok.Data; import lombok.EqualsAndHashCode; @Data @EqualsAndHashCode(callSuper = false) public abstract class AbstractECSDescription extends AbstractAmazonCredentialsDescription { - String application; + /** @deprecated This field is deprecated in favour of [moniker.app] */ + @Deprecated String application; + + /** @deprecated This field is deprecated in favour of [moniker.stack] */ + @Deprecated String stack; + + /** @deprecated This field is deprecated in favour of [moniker.detail] */ + @Deprecated String freeFormDetails; + String region; - String stack; - String freeFormDetails; + Moniker moniker; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/BasicEcsDeployDescription.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/BasicEcsDeployDescription.java index 3b597042b12..1bdb26ef30e 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/BasicEcsDeployDescription.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/BasicEcsDeployDescription.java @@ -19,7 +19,8 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription; import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; -public class BasicEcsDeployDescription extends AbstractAmazonCredentialsDescription implements DeployDescription { +public class BasicEcsDeployDescription extends AbstractAmazonCredentialsDescription + implements DeployDescription { - //TODO - implement this stub + // TODO - implement this stub } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/CreateServerGroupDescription.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/CreateServerGroupDescription.java index ebd6fc3f234..2a5bbc94fec 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/CreateServerGroupDescription.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/CreateServerGroupDescription.java @@ -16,47 +16,138 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.description; -import com.amazonaws.services.cloudwatch.model.MetricAlarm; +import com.amazonaws.services.ecs.model.CapacityProviderStrategyItem; +import com.amazonaws.services.ecs.model.PlacementConstraint; import com.amazonaws.services.ecs.model.PlacementStrategy; import com.netflix.spinnaker.clouddriver.model.ServerGroup; -import lombok.Data; -import lombok.EqualsAndHashCode; - +import com.netflix.spinnaker.kork.artifacts.model.Artifact; import java.util.List; import java.util.Map; +import java.util.Set; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; @Data @EqualsAndHashCode(callSuper = false) public class CreateServerGroupDescription extends AbstractECSDescription { String ecsClusterName; String iamRole; - Integer containerPort; - String targetGroup; + + /** + * @deprecated this field only allows for one container port to be specified. ECS supports the + * ability to have multiple target groups and container ports to be mapped to a container. + *

This field is deprecated in favour of [targetGroupMappings.containerPort] + */ + @Deprecated Integer containerPort; + + /** + * @deprecated this field only allows for one target group to be specified. ECS supports the + * ability to have multiple target groups and container ports to be mapped to a container. + *

This field is deprecated in favour of [targetGroupMappings.targetGroup] + */ + @Deprecated String targetGroup; + List securityGroupNames; String portProtocol; - Integer computeUnits; - Integer reservedMemory; + @Nullable Integer computeUnits; + @Nullable Integer reservedMemory; + + Map environmentVariables; + Map tags; - String dockerImageAddress; + @Nullable String dockerImageAddress; + String dockerImageCredentialsSecret; ServerGroup.Capacity capacity; Map> availabilityZones; - List autoscalingPolicies; + boolean copySourceScalingPoliciesAndActions = true; + Source source = new Source(); + List placementStrategySequence; + List placementConstraints; String networkMode; - String subnetType; + + /** + * @deprecated this field only allows for one subnetType where as ECS supports the ability to + * deploy to multiple subnets. + */ + @Deprecated String subnetType; + + Set subnetTypes; + Boolean associatePublicIpAddress; Integer healthCheckGracePeriodSeconds; String launchType; + String platformVersion; + + String logDriver; + Map logOptions; + Map dockerLabels; + + List serviceDiscoveryAssociations; + + boolean useTaskDefinitionArtifact; + boolean evaluateTaskDefinitionArtifactExpressions; + Artifact resolvedTaskDefinitionArtifact; + Map spelProcessedTaskDefinitionArtifact; + String taskDefinitionArtifactAccount; + Map containerToImageMap; + boolean enableExecuteCommand; + boolean enableDeploymentCircuitBreaker; + + /** + * @deprecated this field only allows for one container to be specified. ECS supports the ability + * to have multiple target groups and container ports to be mapped to one or more containers. + *

This field is deprecated in favour of [targetGroupMappings.containerName] + */ + @Deprecated String loadBalancedContainer; + + Set targetGroupMappings; + + List capacityProviderStrategy; @Override public String getRegion() { - //CreateServerGroupDescription does not contain a region. Instead it has AvailabilityZones + // CreateServerGroupDescription does not contain a region. Instead it has AvailabilityZones return getAvailabilityZones().keySet().iterator().next(); } + + @Data + @EqualsAndHashCode(callSuper = false) + public static class Source { + String account; + String region; + String asgName; + Boolean useSourceCapacity; + } + + @Data + @EqualsAndHashCode(callSuper = false) + public static class ServiceDiscoveryAssociation { + ServiceRegistry registry; + Integer containerPort; + String containerName; + } + + @Data + @EqualsAndHashCode(callSuper = false) + public static class ServiceRegistry { + String arn; + String name; + String id; + } + + @Data + @EqualsAndHashCode(callSuper = false) + public static class TargetGroupProperties { + String containerName; + Integer containerPort; + String targetGroup; + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/TerminateInstancesDescription.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/TerminateInstancesDescription.java index 83e78b8e5fc..77c845b2e48 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/TerminateInstancesDescription.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/description/TerminateInstancesDescription.java @@ -16,11 +16,10 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.description; +import java.util.List; import lombok.Data; import lombok.EqualsAndHashCode; -import java.util.List; - @Data @EqualsAndHashCode(callSuper = false) public class TerminateInstancesDescription extends AbstractECSDescription { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/handlers/BasicEcsDeployHandler.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/handlers/BasicEcsDeployHandler.java index 4818b08e3b7..1271f0edcd9 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/handlers/BasicEcsDeployHandler.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/handlers/BasicEcsDeployHandler.java @@ -20,7 +20,6 @@ import com.netflix.spinnaker.clouddriver.deploy.DeployHandler; import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.BasicEcsDeployDescription; - import java.util.List; public class BasicEcsDeployHandler implements DeployHandler { @@ -33,7 +32,7 @@ public boolean handles(DeployDescription description) { @Override public DeploymentResult handle(BasicEcsDeployDescription description, List priorOutputs) { - //TODO - Implement this stub + // TODO - Implement this stub return new DeploymentResult(); } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/AbstractEcsAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/AbstractEcsAtomicOperation.java index 826657e76be..00151c627e8 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/AbstractEcsAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/AbstractEcsAtomicOperation.java @@ -16,7 +16,7 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; -import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; import com.amazonaws.services.ecs.AmazonECS; import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; @@ -24,25 +24,23 @@ import com.netflix.spinnaker.clouddriver.data.task.Task; import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.AbstractECSDescription; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; import com.netflix.spinnaker.clouddriver.ecs.services.ContainerInformationService; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.credentials.CredentialsRepository; import org.springframework.beans.factory.annotation.Autowired; -public abstract class AbstractEcsAtomicOperation implements AtomicOperation { +public abstract class AbstractEcsAtomicOperation + implements AtomicOperation { private final String basePhase; - @Autowired - AmazonClientProvider amazonClientProvider; - @Autowired - AccountCredentialsProvider accountCredentialsProvider; - @Autowired - ContainerInformationService containerInformationService; + @Autowired AmazonClientProvider amazonClientProvider; + @Autowired CredentialsRepository credentialsRepository; + @Autowired ContainerInformationService containerInformationService; T description; AbstractEcsAtomicOperation(T description, String basePhase) { this.description = description; this.basePhase = basePhase; - } private static Task getTask() { @@ -55,19 +53,25 @@ String getCluster(String service, String account) { } AmazonECS getAmazonEcsClient() { - AWSCredentialsProvider credentialsProvider = getCredentials().getCredentialsProvider(); String region = getRegion(); NetflixAmazonCredentials credentialAccount = description.getCredentials(); return amazonClientProvider.getAmazonEcs(credentialAccount, region, false); } + AWSApplicationAutoScaling getAmazonApplicationAutoScalingClient() { + String region = getRegion(); + NetflixAmazonCredentials credentialAccount = description.getCredentials(); + + return amazonClientProvider.getAmazonApplicationAutoScaling(credentialAccount, region, false); + } + protected String getRegion() { return description.getRegion(); } AmazonCredentials getCredentials() { - return (AmazonCredentials) accountCredentialsProvider.getCredentials(description.getCredentialAccount()); + return credentialsRepository.getOne(description.getAccount()); } void updateTaskStatus(String status) { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CloneServiceAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CloneServiceAtomicOperation.java index b70efc04427..bfef32d019b 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CloneServiceAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CloneServiceAtomicOperation.java @@ -16,12 +16,8 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CloneServiceDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent; - -import java.util.Collection; import java.util.List; public class CloneServiceAtomicOperation implements AtomicOperation { @@ -39,5 +35,4 @@ public Void operate(List priorOutputs) { return null; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CreateServerGroupAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CreateServerGroupAtomicOperation.java index b048daf12e8..686cffcc477 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CreateServerGroupAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CreateServerGroupAtomicOperation.java @@ -16,27 +16,16 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; -import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; +import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsRequest; +import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsResult; import com.amazonaws.services.applicationautoscaling.model.RegisterScalableTargetRequest; import com.amazonaws.services.applicationautoscaling.model.ScalableDimension; +import com.amazonaws.services.applicationautoscaling.model.ScalableTarget; import com.amazonaws.services.applicationautoscaling.model.ServiceNamespace; -import com.amazonaws.services.cloudwatch.model.MetricAlarm; +import com.amazonaws.services.applicationautoscaling.model.SuspendedState; import com.amazonaws.services.ecs.AmazonECS; -import com.amazonaws.services.ecs.model.AwsVpcConfiguration; -import com.amazonaws.services.ecs.model.ContainerDefinition; -import com.amazonaws.services.ecs.model.CreateServiceRequest; -import com.amazonaws.services.ecs.model.DeploymentConfiguration; -import com.amazonaws.services.ecs.model.KeyValuePair; -import com.amazonaws.services.ecs.model.ListServicesRequest; -import com.amazonaws.services.ecs.model.ListServicesResult; -import com.amazonaws.services.ecs.model.LoadBalancer; -import com.amazonaws.services.ecs.model.NetworkConfiguration; -import com.amazonaws.services.ecs.model.PortMapping; -import com.amazonaws.services.ecs.model.RegisterTaskDefinitionRequest; -import com.amazonaws.services.ecs.model.RegisterTaskDefinitionResult; -import com.amazonaws.services.ecs.model.Service; -import com.amazonaws.services.ecs.model.TaskDefinition; +import com.amazonaws.services.ecs.model.*; import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest; import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult; @@ -44,46 +33,73 @@ import com.amazonaws.services.identitymanagement.model.GetRoleRequest; import com.amazonaws.services.identitymanagement.model.GetRoleResult; import com.amazonaws.services.identitymanagement.model.Role; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; import com.netflix.spinnaker.clouddriver.aws.security.AssumeRoleAmazonCredentials; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials; import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CreateServerGroupDescription; +import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CreateServerGroupDescription.ServiceDiscoveryAssociation; +import com.netflix.spinnaker.clouddriver.ecs.names.EcsResource; +import com.netflix.spinnaker.clouddriver.ecs.names.EcsServerGroupName; +import com.netflix.spinnaker.clouddriver.ecs.names.EcsServerGroupNameResolver; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamPolicyReader; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamTrustRelationship; import com.netflix.spinnaker.clouddriver.ecs.security.NetflixAssumeRoleEcsCredentials; import com.netflix.spinnaker.clouddriver.ecs.services.EcsCloudMetricService; import com.netflix.spinnaker.clouddriver.ecs.services.SecurityGroupSelector; import com.netflix.spinnaker.clouddriver.ecs.services.SubnetSelector; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.util.*; +import java.util.stream.Collectors; +import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -public class CreateServerGroupAtomicOperation extends AbstractEcsAtomicOperation { +public class CreateServerGroupAtomicOperation + extends AbstractEcsAtomicOperation { private static final String NECESSARY_TRUSTED_SERVICE = "ecs-tasks.amazonaws.com"; - public static final String AWSVPC_NETWORK_MODE = "awsvpc"; - public static final String FARGATE_LAUNCH_TYPE = "FARGATE"; + protected static final String AWSVPC_NETWORK_MODE = "awsvpc"; + protected static final String HOST_NETWORK_MODE = "host"; + protected static final String EC2 = "EC2"; + protected static final String FARGATE = "FARGATE"; + protected static final String FARGATE_SPOT = "FARGATE_SPOT"; + protected static final String NO_IAM_ROLE = "None (No IAM role)"; + protected static final String NO_IMAGE_CREDENTIALS = "None (No registry credentials)"; + + protected static final String DOCKER_LABEL_KEY_SERVERGROUP = "spinnaker.servergroup"; + protected static final String DOCKER_LABEL_KEY_STACK = "spinnaker.stack"; + protected static final String DOCKER_LABEL_KEY_DETAIL = "spinnaker.detail"; + + protected ObjectMapper mapper = + new ObjectMapper().enable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); + + private final Logger log = LoggerFactory.getLogger(getClass()); - @Autowired - EcsCloudMetricService ecsCloudMetricService; - @Autowired - IamPolicyReader iamPolicyReader; + @Autowired EcsCloudMetricService ecsCloudMetricService; + @Autowired IamPolicyReader iamPolicyReader; - @Autowired - SubnetSelector subnetSelector; + @Autowired SubnetSelector subnetSelector; - @Autowired - SecurityGroupSelector securityGroupSelector; + @Autowired SecurityGroupSelector securityGroupSelector; + + @Autowired ArtifactDownloader artifactDownloader; public CreateServerGroupAtomicOperation(CreateServerGroupDescription description) { super(description, "CREATE_ECS_SERVER_GROUP"); @@ -97,174 +113,661 @@ public DeploymentResult operate(List priorOutputs) { AmazonECS ecs = getAmazonEcsClient(); - String serverGroupVersion = inferNextServerGroupVersion(ecs); + Namer namer = + NamerRegistry.lookup() + .withProvider(EcsCloudProvider.ID) + .withAccount(credentials.getName()) + .withResource(EcsResource.class); + + EcsServerGroupName newServerGroup = buildEcsServerGroupName(ecs, namer); + + ScalableTarget sourceTarget = getSourceScalableTarget(); + Service sourceService = getSourceService(); String ecsServiceRole = inferAssumedRoleArn(credentials); updateTaskStatus("Creating Amazon ECS Task Definition..."); - TaskDefinition taskDefinition = registerTaskDefinition(ecs, ecsServiceRole, serverGroupVersion); + TaskDefinition taskDefinition = registerTaskDefinition(ecs, ecsServiceRole, newServerGroup); updateTaskStatus("Done creating Amazon ECS Task Definition..."); - Service service = createService(ecs, taskDefinition, ecsServiceRole, serverGroupVersion); + Service service = createService(ecs, taskDefinition, newServerGroup, sourceService, namer); + + String resourceId = registerAutoScalingGroup(credentials, service, sourceTarget); + + if (description.isCopySourceScalingPoliciesAndActions() && sourceTarget != null) { + updateTaskStatus("Copying scaling policies..."); + ecsCloudMetricService.copyScalingPolicies( + description.getAccount(), + getRegion(), + service.getServiceName(), + resourceId, + description.getSource().getAccount(), + description.getSource().getRegion(), + description.getSource().getAsgName(), + sourceTarget.getResourceId(), + description.getEcsClusterName()); + updateTaskStatus("Done copying scaling policies..."); + } + + return makeDeploymentResult(service); + } - String resourceId = registerAutoScalingGroup(credentials, service); + private EcsServerGroupName buildEcsServerGroupName(AmazonECS ecs, Namer namer) { + EcsServerGroupNameResolver serverGroupNameResolver = + new EcsServerGroupNameResolver(description.getEcsClusterName(), ecs, getRegion(), namer); - if (!description.getAutoscalingPolicies().isEmpty()) { - List alarmNames = description.getAutoscalingPolicies().stream() - .map(MetricAlarm::getAlarmName) - .collect(Collectors.toList()); - ecsCloudMetricService.associateAsgWithMetrics(description.getCredentialAccount(), getRegion(), alarmNames, service.getServiceName(), resourceId); + if (description.getMoniker() != null) { + return serverGroupNameResolver.resolveNextName(description.getMoniker()); } - return makeDeploymentResult(service); + return serverGroupNameResolver.resolveNextName( + description.getApplication(), description.getStack(), description.getFreeFormDetails()); } - private TaskDefinition registerTaskDefinition(AmazonECS ecs, String ecsServiceRole, String version) { + protected TaskDefinition registerTaskDefinition( + AmazonECS ecs, String ecsServiceRole, EcsServerGroupName newServerGroupName) { + + RegisterTaskDefinitionRequest request; + + if (description.isUseTaskDefinitionArtifact()) { + request = makeTaskDefinitionRequestFromArtifact(ecsServiceRole, newServerGroupName); + } else { + request = makeTaskDefinitionRequest(ecsServiceRole, newServerGroupName); + } + + RegisterTaskDefinitionResult registerTaskDefinitionResult = ecs.registerTaskDefinition(request); + + return registerTaskDefinitionResult.getTaskDefinition(); + } + protected RegisterTaskDefinitionRequest makeTaskDefinitionRequest( + String ecsServiceRole, EcsServerGroupName newServerGroupName) { Collection containerEnvironment = new LinkedList<>(); - containerEnvironment.add(new KeyValuePair().withName("SERVER_GROUP").withValue(version)); - containerEnvironment.add(new KeyValuePair().withName("CLOUD_STACK").withValue(description.getStack())); - containerEnvironment.add(new KeyValuePair().withName("CLOUD_DETAIL").withValue(description.getFreeFormDetails())); - PortMapping portMapping = new PortMapping() - .withProtocol(description.getPortProtocol() != null ? description.getPortProtocol() : "tcp"); + // Set all user defined environment variables + final Map environmentVariables = description.getEnvironmentVariables(); + if (environmentVariables != null) { + for (Map.Entry entry : environmentVariables.entrySet()) { + containerEnvironment.add( + new KeyValuePair().withName(entry.getKey()).withValue(entry.getValue())); + } + } - if (AWSVPC_NETWORK_MODE.equals(description.getNetworkMode())) { - portMapping - .withHostPort(description.getContainerPort()) - .withContainerPort(description.getContainerPort()); - } else { - portMapping - .withHostPort(0) - .withContainerPort(description.getContainerPort()); + containerEnvironment = setSpinnakerEnvVars(containerEnvironment, newServerGroupName); + + ContainerDefinition containerDefinition = + new ContainerDefinition() + .withName(newServerGroupName.getContainerName()) + .withEnvironment(containerEnvironment) + .withCpu(description.getComputeUnits()) + .withMemoryReservation(description.getReservedMemory()) + .withImage(description.getDockerImageAddress()); + + Set portMappings = new HashSet<>(); + + if (!StringUtils.isEmpty(description.getTargetGroup()) + && description.getContainerPort() != null) { + PortMapping portMapping = + new PortMapping() + .withProtocol( + description.getPortProtocol() != null ? description.getPortProtocol() : "tcp"); + + if (AWSVPC_NETWORK_MODE.equals(description.getNetworkMode()) + || HOST_NETWORK_MODE.equals(description.getNetworkMode())) { + portMapping + .withHostPort(description.getContainerPort()) + .withContainerPort(description.getContainerPort()); + } else { + portMapping.withHostPort(0).withContainerPort(description.getContainerPort()); + } + + portMappings.add(portMapping); } - Collection portMappings = new LinkedList<>(); - portMappings.add(portMapping); + if (description.getTargetGroupMappings() != null) { + for (CreateServerGroupDescription.TargetGroupProperties properties : + description.getTargetGroupMappings()) { + PortMapping portMapping = + new PortMapping() + .withProtocol( + description.getPortProtocol() != null ? description.getPortProtocol() : "tcp"); + + if (AWSVPC_NETWORK_MODE.equals(description.getNetworkMode()) + || HOST_NETWORK_MODE.equals(description.getNetworkMode())) { + portMapping + .withHostPort(properties.getContainerPort()) + .withContainerPort(properties.getContainerPort()); + } else { + portMapping.withHostPort(0).withContainerPort(properties.getContainerPort()); + } - ContainerDefinition containerDefinition = new ContainerDefinition() - .withName(version) - .withEnvironment(containerEnvironment) - .withPortMappings(portMappings) - .withCpu(description.getComputeUnits()) - .withMemoryReservation(description.getReservedMemory()) - .withImage(description.getDockerImageAddress()); + portMappings.add(portMapping); + } + } + + if (description.getServiceDiscoveryAssociations() != null) { + for (ServiceDiscoveryAssociation config : description.getServiceDiscoveryAssociations()) { + if (config.getContainerPort() != null + && config.getContainerPort() != 0 + && config.getContainerPort() != description.getContainerPort()) { + PortMapping portMapping = new PortMapping().withProtocol("tcp"); + if (AWSVPC_NETWORK_MODE.equals(description.getNetworkMode())) { + portMapping + .withHostPort(config.getContainerPort()) + .withContainerPort(config.getContainerPort()); + } else { + portMapping.withHostPort(0).withContainerPort(config.getContainerPort()); + } + portMappings.add(portMapping); + } + } + } + + log.debug("The container port mappings are: {}", portMappings); + containerDefinition.setPortMappings(portMappings); + + if (!NO_IMAGE_CREDENTIALS.equals(description.getDockerImageCredentialsSecret()) + && description.getDockerImageCredentialsSecret() != null) { + RepositoryCredentials credentials = + new RepositoryCredentials() + .withCredentialsParameter(description.getDockerImageCredentialsSecret()); + containerDefinition.withRepositoryCredentials(credentials); + } + + Map labelsMap = new HashMap<>(); + if (description.getDockerLabels() != null) { + labelsMap.putAll(description.getDockerLabels()); + } + + labelsMap = setSpinnakerDockerLabels(labelsMap, newServerGroupName); + + containerDefinition.withDockerLabels(labelsMap); + + if (description.getLogDriver() != null && !"None".equals(description.getLogDriver())) { + LogConfiguration logConfiguration = + new LogConfiguration() + .withLogDriver(description.getLogDriver()) + .withOptions(description.getLogOptions()); + + containerDefinition.withLogConfiguration(logConfiguration); + } Collection containerDefinitions = new LinkedList<>(); containerDefinitions.add(containerDefinition); - RegisterTaskDefinitionRequest request = new RegisterTaskDefinitionRequest() - .withContainerDefinitions(containerDefinitions) - .withFamily(getFamilyName()); + RegisterTaskDefinitionRequest request = + new RegisterTaskDefinitionRequest() + .withContainerDefinitions(containerDefinitions) + .withFamily(newServerGroupName.getFamilyName()); if (description.getNetworkMode() != null && !description.getNetworkMode().equals("default")) { request.withNetworkMode(description.getNetworkMode()); } - if (!description.getIamRole().equals("None (No IAM role)")) { - checkRoleTrustRelations(description.getIamRole()); - request.setTaskRoleArn(description.getIamRole()); + if (!NO_IAM_ROLE.equals(description.getIamRole()) && description.getIamRole() != null) { + request.setTaskRoleArn(checkRoleTrustRelations(description.getIamRole()).getRole().getArn()); } if (!StringUtils.isEmpty(description.getLaunchType())) { request.setRequiresCompatibilities(Arrays.asList(description.getLaunchType())); + + if (FARGATE.equals(description.getLaunchType())) { + request.setExecutionRoleArn(ecsServiceRole); + request.setCpu(description.getComputeUnits().toString()); + request.setMemory(description.getReservedMemory().toString()); + } } - if (FARGATE_LAUNCH_TYPE.equals(description.getLaunchType())) { - request.setExecutionRoleArn(ecsServiceRole); - request.setCpu(description.getComputeUnits().toString()); - request.setMemory(description.getReservedMemory().toString()); + if (description.getCapacityProviderStrategy() != null + && !description.getCapacityProviderStrategy().isEmpty()) { + + for (CapacityProviderStrategyItem cpStrategy : description.getCapacityProviderStrategy()) { + if (FARGATE.equals(cpStrategy.getCapacityProvider()) + || FARGATE_SPOT.equals(cpStrategy.getCapacityProvider())) { + request.setRequiresCompatibilities(Arrays.asList(FARGATE)); + request.setExecutionRoleArn(ecsServiceRole); + request.setCpu(description.getComputeUnits().toString()); + request.setMemory(description.getReservedMemory().toString()); + } + } } - RegisterTaskDefinitionResult registerTaskDefinitionResult = ecs.registerTaskDefinition(request); + return request; + } - return registerTaskDefinitionResult.getTaskDefinition(); + private RegisterTaskDefinitionRequest getSpelProcessedArtifact() { + if (description.getSpelProcessedTaskDefinitionArtifact() != null) { + return mapper.convertValue( + description.getSpelProcessedTaskDefinitionArtifact(), + RegisterTaskDefinitionRequest.class); + } else { + throw new IllegalArgumentException("Task definition artifact can not be null"); + } } - private Service createService(AmazonECS ecs, TaskDefinition taskDefinition, String ecsServiceRole, String version) { - String serviceName = getNextServiceName(version); - Collection loadBalancers = new LinkedList<>(); - loadBalancers.add(retrieveLoadBalancer(version)); + private RegisterTaskDefinitionRequest getArtifactFromFile() { + File artifactFile = + downloadTaskDefinitionArtifact(description.getResolvedTaskDefinitionArtifact()); + try { + return mapper.readValue(artifactFile, RegisterTaskDefinitionRequest.class); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + protected RegisterTaskDefinitionRequest makeTaskDefinitionRequestFromArtifact( + String ecsServiceRole, EcsServerGroupName newServerGroupName) { + + RegisterTaskDefinitionRequest requestTemplate = null; + if (description.isEvaluateTaskDefinitionArtifactExpressions()) { + requestTemplate = getSpelProcessedArtifact(); + } else { + requestTemplate = getArtifactFromFile(); + } + + String templateMode = requestTemplate.getNetworkMode(); + if (templateMode != null + && !templateMode.isEmpty() + && !templateMode.equals(description.getNetworkMode())) { + throw new IllegalArgumentException( + "Task definition networkMode does not match server group value. Found '" + + templateMode + + "' but expected '" + + description.getNetworkMode() + + "'"); + } + + List containers = requestTemplate.getContainerDefinitions(); + if (containers.size() == 0) { + throw new IllegalArgumentException( + "Provided task definition does not contain any container definitions."); + } + + description + .getContainerToImageMap() + .forEach( + (k, v) -> { + // check if taskDefTemplate contains matching container + List matches = + containers.stream() + .filter(x -> x.getName().equals(k)) + .collect(Collectors.toList()); + + if (matches.size() != 1) { + throw new IllegalArgumentException( + "Invalid number of matching containers found for mapping '" + + k + + "'. Have " + + matches.size() + + " but expected 1."); + } + + // interpolate container mappings + matches.get(0).setImage(v); + }); + + containers.forEach( + (c) -> { + Collection updatedEnv = + setSpinnakerEnvVars(c.getEnvironment(), newServerGroupName); + c.setEnvironment(updatedEnv); + + Map updatedLabels = + setSpinnakerDockerLabels(c.getDockerLabels(), newServerGroupName); + c.setDockerLabels(updatedLabels); + }); + + requestTemplate.setFamily(newServerGroupName.getFamilyName()); + + if (FARGATE.equals(description.getLaunchType())) { + String templateExecutionRole = requestTemplate.getExecutionRoleArn(); + + if (templateExecutionRole == null || templateExecutionRole.isEmpty()) { + requestTemplate.setExecutionRoleArn(ecsServiceRole); + } + } else if (description.getCapacityProviderStrategy() != null + && !description.getCapacityProviderStrategy().isEmpty()) { + for (CapacityProviderStrategyItem cpStrategy : description.getCapacityProviderStrategy()) { + if (FARGATE.equals(cpStrategy.getCapacityProvider()) + || FARGATE_SPOT.equals(cpStrategy.getCapacityProvider())) { + String templateExecutionRole = requestTemplate.getExecutionRoleArn(); + + if (templateExecutionRole == null || StringUtils.isBlank(templateExecutionRole)) { + requestTemplate.setExecutionRoleArn(ecsServiceRole); + } + + return requestTemplate; + } + } + } + + return requestTemplate; + } + + private File downloadTaskDefinitionArtifact(Artifact taskDefArtifact) { + File file = null; + if (taskDefArtifact.getArtifactAccount() == null + || taskDefArtifact.getArtifactAccount().isEmpty() + && description.getTaskDefinitionArtifactAccount() != null + && !description.getTaskDefinitionArtifactAccount().isEmpty()) { + taskDefArtifact = + taskDefArtifact.toBuilder() + .artifactAccount(description.getTaskDefinitionArtifactAccount()) + .build(); + } + try { + InputStream artifactInput = artifactDownloader.download(taskDefArtifact); + file = File.createTempFile(UUID.randomUUID().toString(), null); + FileOutputStream fileOutputStream = new FileOutputStream(file); + IOUtils.copy(artifactInput, fileOutputStream); + fileOutputStream.close(); + } catch (IOException e) { + if (file != null) { + file.delete(); + } + throw new UncheckedIOException(e); + } + return file; + } + + private Service createService( + AmazonECS ecs, + TaskDefinition taskDefinition, + EcsServerGroupName newServerGroupName, + Service sourceService, + Namer namer) { - Integer desiredCount = description.getCapacity().getDesired(); String taskDefinitionArn = taskDefinition.getTaskDefinitionArn(); - DeploymentConfiguration deploymentConfiguration = new DeploymentConfiguration() - .withMinimumHealthyPercent(100) - .withMaximumPercent(200); + Integer desiredCount = description.getCapacity().getDesired(); + if (sourceService != null + && description.getSource() != null + && description.getSource().getUseSourceCapacity() != null + && description.getSource().getUseSourceCapacity()) { + desiredCount = sourceService.getDesiredCount(); + } + + CreateServiceRequest request = + makeServiceRequest( + taskDefinitionArn, newServerGroupName, desiredCount, namer, isTaggingEnabled(ecs)); + + updateTaskStatus( + String.format( + "Creating %s of %s with %s for %s.", + desiredCount, newServerGroupName, taskDefinitionArn, description.getAccount())); + + log.debug("CreateServiceRequest being made is: {}", request.toString()); + + Service service = ecs.createService(request).getService(); + + updateTaskStatus( + String.format( + "Done creating %s of %s with %s for %s.", + desiredCount, newServerGroupName, taskDefinitionArn, description.getAccount())); + + return service; + } + + protected CreateServiceRequest makeServiceRequest( + String taskDefinitionArn, + EcsServerGroupName newServerGroupName, + Integer desiredCount, + Namer namer, + boolean taggingEnabled) { + Collection serviceRegistries = new LinkedList<>(); + if (description.getServiceDiscoveryAssociations() != null) { + for (ServiceDiscoveryAssociation config : description.getServiceDiscoveryAssociations()) { + ServiceRegistry registryEntry = + new ServiceRegistry().withRegistryArn(config.getRegistry().getArn()); + + if (config.getContainerPort() != null && config.getContainerPort() != 0) { + registryEntry.setContainerPort(config.getContainerPort()); + + if (StringUtils.isEmpty(config.getContainerName())) { + registryEntry.setContainerName(newServerGroupName.getContainerName()); + } else { + registryEntry.setContainerName(config.getContainerName()); + } + } + + serviceRegistries.add(registryEntry); + } + } + + DeploymentConfiguration deploymentConfiguration = + new DeploymentConfiguration().withMinimumHealthyPercent(100).withMaximumPercent(200); + + DeploymentCircuitBreaker deploymentCircuitBreaker = + new DeploymentCircuitBreaker() + .withEnable(description.isEnableDeploymentCircuitBreaker()) + .withRollback(false); + deploymentConfiguration.setDeploymentCircuitBreaker(deploymentCircuitBreaker); + + CreateServiceRequest request = + new CreateServiceRequest() + .withServiceName(newServerGroupName.getServiceName()) + .withDesiredCount(desiredCount) + .withCluster(description.getEcsClusterName()) + .withTaskDefinition(taskDefinitionArn) + .withPlacementConstraints(description.getPlacementConstraints()) + .withPlacementStrategy(description.getPlacementStrategySequence()) + .withServiceRegistries(serviceRegistries) + .withDeploymentConfiguration(deploymentConfiguration) + .withEnableExecuteCommand(description.isEnableExecuteCommand()); + + List taskDefTags = new LinkedList<>(); + if (description.getTags() != null && !description.getTags().isEmpty()) { + for (Map.Entry entry : description.getTags().entrySet()) { + taskDefTags.add(new Tag().withKey(entry.getKey()).withValue(entry.getValue())); + } + } + + // Apply moniker strategy which may add tags + namer.applyMoniker( + new EcsResource() { + @Override + public String getName() { + return request.getServiceName(); + } - CreateServiceRequest request = new CreateServiceRequest() - .withServiceName(serviceName) - .withDesiredCount(desiredCount) - .withCluster(description.getEcsClusterName()) - .withLoadBalancers(loadBalancers) - .withTaskDefinition(taskDefinitionArn) - .withPlacementStrategy(description.getPlacementStrategySequence()) - .withDeploymentConfiguration(deploymentConfiguration); + // Used by Frigga when moniker support is disabled + public void setName(String name) { + request.setServiceName(name); + } - if (!AWSVPC_NETWORK_MODE.equals(description.getNetworkMode())) { - request.withRole(ecsServiceRole); + @Override + public List getResourceTags() { + return taskDefTags; + } + }, + newServerGroupName.getMoniker()); + + // Only add tags if they're set as it's an optional feature for ECS + if (taggingEnabled) { + request + .withTags(taskDefTags) + .withEnableECSManagedTags(true) + .withPropagateTags(PropagateTags.SERVICE.toString()); + } else { + if (!taskDefTags.isEmpty()) { + throw new IllegalArgumentException( + "ECS account settings for account " + + description.getAccount() + + " do not allow tagging as `serviceLongArnFormat` and `taskLongArnFormat` are not enabled."); + } } + request.withLoadBalancers(retrieveLoadBalancers(newServerGroupName.getContainerName())); + if (AWSVPC_NETWORK_MODE.equals(description.getNetworkMode())) { - Collection subnetIds = subnetSelector.resolveSubnetsIds(description.getAccount(), description.getRegion(), description.getSubnetType()); - Collection vpcIds = subnetSelector.getSubnetVpcIds(description.getAccount(), description.getRegion(), subnetIds); - Collection securityGroupIds = securityGroupSelector.resolveSecurityGroupNames( - description.getAccount(), - description.getRegion(), - description.getSecurityGroupNames(), - vpcIds); - - AwsVpcConfiguration awsvpcConfiguration = new AwsVpcConfiguration() - .withSecurityGroups(securityGroupIds) - .withSubnets(subnetIds); + Collection subnetIds = + subnetSelector.resolveSubnetsIdsForMultipleSubnetTypes( + description.getAccount(), + description.getRegion(), + description.getAvailabilityZones().get(description.getRegion()), + getSubnetTypes()); + Collection vpcIds = + subnetSelector.getSubnetVpcIds( + description.getAccount(), description.getRegion(), subnetIds); + Collection securityGroupIds = + securityGroupSelector.resolveSecurityGroupNames( + description.getAccount(), + description.getRegion(), + description.getSecurityGroupNames(), + vpcIds); + + AwsVpcConfiguration awsvpcConfiguration = + new AwsVpcConfiguration().withSecurityGroups(securityGroupIds).withSubnets(subnetIds); if (description.getAssociatePublicIpAddress() != null) { - awsvpcConfiguration.withAssignPublicIp(description.getAssociatePublicIpAddress() ? "ENABLED" : "DISABLED"); + awsvpcConfiguration.withAssignPublicIp( + description.getAssociatePublicIpAddress() ? "ENABLED" : "DISABLED"); } - request.withNetworkConfiguration(new NetworkConfiguration().withAwsvpcConfiguration(awsvpcConfiguration)); + request.withNetworkConfiguration( + new NetworkConfiguration().withAwsvpcConfiguration(awsvpcConfiguration)); } if (!StringUtils.isEmpty(description.getLaunchType())) { request.withLaunchType(description.getLaunchType()); + } else if (description.getCapacityProviderStrategy() != null + && !description.getCapacityProviderStrategy().isEmpty()) { + request.withCapacityProviderStrategy(description.getCapacityProviderStrategy()); + } + + if (!StringUtils.isEmpty(description.getPlatformVersion())) { + request.withPlatformVersion(description.getPlatformVersion()); } if (description.getHealthCheckGracePeriodSeconds() != null) { request.withHealthCheckGracePeriodSeconds(description.getHealthCheckGracePeriodSeconds()); } - updateTaskStatus(String.format("Creating %s of %s with %s for %s.", - desiredCount, serviceName, taskDefinitionArn, description.getCredentialAccount())); + return request; + } - Service service = ecs.createService(request).getService(); + private boolean isTaggingEnabled(AmazonECS ecs) { + boolean isServiceLongArnFormatEnabled = false; + boolean isTaskLongArnFormatEnabled = false; - updateTaskStatus(String.format("Done creating %s of %s with %s for %s.", - desiredCount, serviceName, taskDefinitionArn, description.getCredentialAccount())); + String nextToken = null; + do { + ListAccountSettingsRequest request = + new ListAccountSettingsRequest().withEffectiveSettings(true).withNextToken(nextToken); - return service; + ListAccountSettingsResult response = ecs.listAccountSettings(request); + + for (Setting setting : response.getSettings()) { + if (setting.getName().equals(SettingName.ServiceLongArnFormat.toString()) + && setting.getValue().equals("enabled")) { + isServiceLongArnFormatEnabled = true; + } + + if (setting.getName().equals(SettingName.TaskLongArnFormat.toString()) + && setting.getValue().equals("enabled")) { + isTaskLongArnFormatEnabled = true; + } + } + + nextToken = response.getNextToken(); + } while (nextToken != null); + + return isServiceLongArnFormatEnabled && isTaskLongArnFormatEnabled; } - private String registerAutoScalingGroup(AmazonCredentials credentials, - Service service) { + private String registerAutoScalingGroup( + AmazonCredentials credentials, Service service, ScalableTarget sourceTarget) { AWSApplicationAutoScaling autoScalingClient = getAmazonApplicationAutoScalingClient(); - String assumedRoleArn = inferAssumedRoleArn(credentials); - RegisterScalableTargetRequest request = new RegisterScalableTargetRequest() - .withServiceNamespace(ServiceNamespace.Ecs) - .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) - .withResourceId(String.format("service/%s/%s", description.getEcsClusterName(), service.getServiceName())) - .withRoleARN(assumedRoleArn) - .withMinCapacity(description.getCapacity().getMin()) - .withMaxCapacity(description.getCapacity().getMax()); + Integer min = description.getCapacity().getMin(); + Integer max = description.getCapacity().getMax(); + + if (sourceTarget != null + && description.getSource() != null + && description.getSource().getUseSourceCapacity() != null + && description.getSource().getUseSourceCapacity()) { + min = sourceTarget.getMinCapacity(); + max = sourceTarget.getMaxCapacity(); + } + + RegisterScalableTargetRequest request = + new RegisterScalableTargetRequest() + .withServiceNamespace(ServiceNamespace.Ecs) + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) + .withResourceId( + String.format( + "service/%s/%s", description.getEcsClusterName(), service.getServiceName())) + .withMinCapacity(min) + .withMaxCapacity(max) + .withSuspendedState( + new SuspendedState() + .withDynamicScalingInSuspended(false) + .withDynamicScalingOutSuspended(false) + .withScheduledScalingSuspended(false)); updateTaskStatus("Creating Amazon Application Auto Scaling Scalable Target Definition..."); - autoScalingClient.registerScalableTarget(request); + // ECS DescribeService is eventually consistent, so sometimes RegisterScalableTarget will + // return a ValidationException with message "ECS service doesn't exist", because the service + // was just created. Retry until consistency is likely reached. + OperationPoller.retryWithBackoff( + o -> autoScalingClient.registerScalableTarget(request), 1000, 3); updateTaskStatus("Done creating Amazon Application Auto Scaling Scalable Target Definition."); return request.getResourceId(); } + private ScalableTarget getSourceScalableTarget() { + if (description.getSource() != null + && description.getSource().getRegion() != null + && description.getSource().getAccount() != null + && description.getSource().getAsgName() != null) { + + AWSApplicationAutoScaling autoScalingClient = getSourceAmazonApplicationAutoScalingClient(); + + DescribeScalableTargetsRequest request = + new DescribeScalableTargetsRequest() + .withServiceNamespace(ServiceNamespace.Ecs) + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) + .withResourceIds( + String.format( + "service/%s/%s", + description.getEcsClusterName(), description.getSource().getAsgName())); + + DescribeScalableTargetsResult result = autoScalingClient.describeScalableTargets(request); + if (result.getScalableTargets() != null && !result.getScalableTargets().isEmpty()) { + return result.getScalableTargets().get(0); + } + + return null; + } + + return null; + } + + private Service getSourceService() { + if (description.getSource() != null + && description.getSource().getRegion() != null + && description.getSource().getAccount() != null + && description.getSource().getAsgName() != null) { + + AmazonECS ecsClient = getSourceAmazonEcsClient(); + + DescribeServicesRequest request = + new DescribeServicesRequest() + .withCluster(description.getEcsClusterName()) + .withServices(description.getSource().getAsgName()); + + DescribeServicesResult result = ecsClient.describeServices(request); + if (result.getServices() != null && !result.getServices().isEmpty()) { + return result.getServices().get(0); + } + + return null; + } + + return null; + } + private String inferAssumedRoleArn(AmazonCredentials credentials) { String role; if (credentials instanceof AssumeRoleAmazonCredentials) { @@ -274,31 +777,52 @@ private String inferAssumedRoleArn(AmazonCredentials credentials) { } else if (credentials instanceof NetflixAssumeRoleEcsCredentials) { role = ((NetflixAssumeRoleEcsCredentials) credentials).getAssumeRole(); } else { - throw new UnsupportedOperationException("The given kind of credentials is not supported, " + - "please report this issue to the Spinnaker project on Github."); + throw new UnsupportedOperationException( + "The given kind of credentials is not supported, " + + "please report this issue to the Spinnaker project on Github."); } - - return String.format("arn:aws:iam::%s:%s", credentials.getAccountId(), role); + if (!role.startsWith("arn:")) { + return String.format("arn:aws:iam::%s:%s", credentials.getAccountId(), role); + } + return role; } - private void checkRoleTrustRelations(String roleName) { + private GetRoleResult checkRoleTrustRelations(String roleName) { updateTaskStatus("Checking role trust relations for: " + roleName); AmazonIdentityManagement iamClient = getAmazonIdentityManagementClient(); - GetRoleResult response = iamClient.getRole(new GetRoleRequest() - .withRoleName(roleName)); + GetRoleResult response = iamClient.getRole(new GetRoleRequest().withRoleName(roleName)); Role role = response.getRole(); - Set trustedEntities = iamPolicyReader.getTrustedEntities(role.getAssumeRolePolicyDocument()); + Set trustedEntities = + iamPolicyReader.getTrustedEntities(role.getAssumeRolePolicyDocument()); - Set trustedServices = trustedEntities.stream() - .filter(trustRelation -> trustRelation.getType().equals("Service")) - .map(IamTrustRelationship::getValue) - .collect(Collectors.toSet()); + Set trustedServices = + trustedEntities.stream() + .filter(trustRelation -> trustRelation.getType().equals("Service")) + .map(IamTrustRelationship::getValue) + .collect(Collectors.toSet()); if (!trustedServices.contains(NECESSARY_TRUSTED_SERVICE)) { - throw new IllegalArgumentException("The " + roleName + " role does not have a trust relationship to ecs-tasks.amazonaws.com."); + throw new IllegalArgumentException( + "The " + + roleName + + " role does not have a trust relationship to ecs-tasks.amazonaws.com."); + } + return response; + } + + private Set getSubnetTypes() { + Set subnetTypes = new HashSet<>(); + + if (description.getSubnetTypes() != null && !description.getSubnetTypes().isEmpty()) { + subnetTypes.addAll(description.getSubnetTypes()); } + + if (StringUtils.isNotBlank(description.getSubnetType())) { + subnetTypes.add(description.getSubnetType()); + } + return subnetTypes; } private DeploymentResult makeDeploymentResult(Service service) { @@ -311,45 +835,94 @@ private DeploymentResult makeDeploymentResult(Service service) { return result; } - private LoadBalancer retrieveLoadBalancer(String version) { - LoadBalancer loadBalancer = new LoadBalancer(); - loadBalancer.setContainerName(version); - loadBalancer.setContainerPort(description.getContainerPort()); + private Collection retrieveLoadBalancers(String containerName) { + Set loadBalancers = new HashSet<>(); + Set targetGroupMappings = new HashSet<>(); + + if (description.getTargetGroupMappings() != null + && !description.getTargetGroupMappings().isEmpty()) { + targetGroupMappings.addAll(description.getTargetGroupMappings()); + } + + if (StringUtils.isNotBlank(description.getTargetGroup())) { + CreateServerGroupDescription.TargetGroupProperties targetGroupMapping = + new CreateServerGroupDescription.TargetGroupProperties(); + + String containerToUse = + StringUtils.isNotBlank(description.getLoadBalancedContainer()) + ? description.getLoadBalancedContainer() + : containerName; + + targetGroupMapping.setContainerName(containerToUse); + targetGroupMapping.setContainerPort(description.getContainerPort()); + targetGroupMapping.setTargetGroup(description.getTargetGroup()); + + targetGroupMappings.add(targetGroupMapping); + } + + for (CreateServerGroupDescription.TargetGroupProperties targetGroupAssociation : + targetGroupMappings) { + LoadBalancer loadBalancer = new LoadBalancer(); + + String containerToUse = + StringUtils.isNotBlank(targetGroupAssociation.getContainerName()) + ? targetGroupAssociation.getContainerName() + : containerName; + + loadBalancer.setContainerName(containerToUse); + loadBalancer.setContainerPort(targetGroupAssociation.getContainerPort()); - if (description.getTargetGroup() != null) { AmazonElasticLoadBalancing loadBalancingV2 = getAmazonElasticLoadBalancingClient(); - DescribeTargetGroupsRequest request = new DescribeTargetGroupsRequest().withNames(description.getTargetGroup()); - DescribeTargetGroupsResult describeTargetGroupsResult = loadBalancingV2.describeTargetGroups(request); + DescribeTargetGroupsRequest request = + new DescribeTargetGroupsRequest().withNames(targetGroupAssociation.getTargetGroup()); + DescribeTargetGroupsResult describeTargetGroupsResult = + loadBalancingV2.describeTargetGroups(request); if (describeTargetGroupsResult.getTargetGroups().size() == 1) { - loadBalancer.setTargetGroupArn(describeTargetGroupsResult.getTargetGroups().get(0).getTargetGroupArn()); + loadBalancer.setTargetGroupArn( + describeTargetGroupsResult.getTargetGroups().get(0).getTargetGroupArn()); } else if (describeTargetGroupsResult.getTargetGroups().size() > 1) { - throw new IllegalArgumentException("There are multiple target groups with the name " + description.getTargetGroup() + "."); + throw new IllegalArgumentException( + "There are multiple target groups with the name " + + targetGroupAssociation.getTargetGroup() + + "."); } else { - throw new IllegalArgumentException("There is no target group with the name " + description.getTargetGroup() + "."); + throw new IllegalArgumentException( + "There is no target group with the name " + + targetGroupAssociation.getTargetGroup() + + "."); } + loadBalancers.add(loadBalancer); } - return loadBalancer; + + return loadBalancers; } - private AWSApplicationAutoScaling getAmazonApplicationAutoScalingClient() { - AWSCredentialsProvider credentialsProvider = getCredentials().getCredentialsProvider(); - NetflixAmazonCredentials credentialAccount = description.getCredentials(); + private AWSApplicationAutoScaling getSourceAmazonApplicationAutoScalingClient() { + String sourceRegion = description.getSource().getRegion(); + NetflixAmazonCredentials sourceCredentials = + credentialsRepository.getOne(description.getSource().getAccount()); + return amazonClientProvider.getAmazonApplicationAutoScaling( + sourceCredentials, sourceRegion, false); + } - return amazonClientProvider.getAmazonApplicationAutoScaling(credentialAccount, getRegion(), false); + private AmazonECS getSourceAmazonEcsClient() { + String sourceRegion = description.getSource().getRegion(); + NetflixAmazonCredentials sourceCredentials = + credentialsRepository.getOne(description.getSource().getAccount()); + return amazonClientProvider.getAmazonEcs(sourceCredentials, sourceRegion, false); } private AmazonElasticLoadBalancing getAmazonElasticLoadBalancingClient() { - AWSCredentialsProvider credentialsProvider = getCredentials().getCredentialsProvider(); NetflixAmazonCredentials credentialAccount = description.getCredentials(); - return amazonClientProvider.getAmazonElasticLoadBalancingV2(credentialAccount, getRegion(), false); + return amazonClientProvider.getAmazonElasticLoadBalancingV2( + credentialAccount, getRegion(), false); } private AmazonIdentityManagement getAmazonIdentityManagementClient() { - AWSCredentialsProvider credentialsProvider = getCredentials().getCredentialsProvider(); NetflixAmazonCredentials credentialAccount = description.getCredentials(); return amazonClientProvider.getAmazonIdentityManagement(credentialAccount, getRegion(), false); @@ -360,57 +933,45 @@ private String getServerGroupName(Service service) { return getRegion() + ":" + service.getServiceName(); } - private String getNextServiceName(String versionString) { - return getFamilyName() + "-" + versionString; - } - - @Override - protected String getRegion() { - //CreateServerGroupDescription does not contain a region. Instead it has AvailabilityZones - return description.getAvailabilityZones().keySet().iterator().next(); - } + private Collection setSpinnakerEnvVars( + Collection targetEnv, EcsServerGroupName newServerGroupName) { - private String inferNextServerGroupVersion(AmazonECS ecs) { - int latestVersion = 0; - String familyName = getFamilyName(); + Moniker moniker = newServerGroupName.getMoniker(); - String nextToken = null; - do { - ListServicesRequest request = new ListServicesRequest().withCluster(description.getEcsClusterName()); - if (nextToken != null) { - request.setNextToken(nextToken); - } + targetEnv.add( + new KeyValuePair().withName("SERVER_GROUP").withValue(newServerGroupName.getServiceName())); + targetEnv.add(new KeyValuePair().withName("CLOUD_STACK").withValue(moniker.getStack())); + targetEnv.add(new KeyValuePair().withName("CLOUD_DETAIL").withValue(moniker.getDetail())); - ListServicesResult result = ecs.listServices(request); - for (String serviceArn : result.getServiceArns()) { - if (serviceArn.contains(familyName)) { - int currentVersion; - try { - String versionString = StringUtils.substringAfterLast(serviceArn, "-").replaceAll("v", ""); - currentVersion = Integer.parseInt(versionString); - } catch (NumberFormatException e) { - currentVersion = 0; - } - latestVersion = Math.max(currentVersion, latestVersion); - } - } + return targetEnv; + } - nextToken = result.getNextToken(); - } while (nextToken != null && nextToken.length() != 0); + private Map setSpinnakerDockerLabels( + Map targetMap, EcsServerGroupName newServerGroupName) { - return String.format("v%04d", (latestVersion + 1)); - } + Map newLabels = new HashMap<>(); + if (targetMap != null) { + newLabels.putAll(targetMap); + } - private String getFamilyName() { - String familyName = description.getApplication(); + Moniker moniker = newServerGroupName.getMoniker(); - if (description.getStack() != null) { - familyName += "-" + description.getStack(); + if (StringUtils.isNotBlank(moniker.getStack())) { + newLabels.put(DOCKER_LABEL_KEY_STACK, moniker.getStack()); } - if (description.getFreeFormDetails() != null) { - familyName += "-" + description.getFreeFormDetails(); + + if (StringUtils.isNotBlank(moniker.getDetail())) { + newLabels.put(DOCKER_LABEL_KEY_DETAIL, moniker.getDetail()); } - return familyName; + newLabels.put(DOCKER_LABEL_KEY_SERVERGROUP, newServerGroupName.getServiceName()); + + return newLabels; + } + + @Override + protected String getRegion() { + // CreateServerGroupDescription does not contain a region. Instead it has AvailabilityZones + return description.getAvailabilityZones().keySet().iterator().next(); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DeleteScalingPolicyAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DeleteScalingPolicyAtomicOperation.java index 2b6af776de1..f0acf27f42b 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DeleteScalingPolicyAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DeleteScalingPolicyAtomicOperation.java @@ -16,10 +16,8 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.DeleteScalingPolicyDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - import java.util.List; public class DeleteScalingPolicyAtomicOperation implements AtomicOperation { @@ -37,5 +35,4 @@ public Void operate(List priorOutputs) { return null; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DestroyServiceAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DestroyServiceAtomicOperation.java index 40fc9a0ba8f..f3a9e3d4e43 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DestroyServiceAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DestroyServiceAtomicOperation.java @@ -23,13 +23,12 @@ import com.amazonaws.services.ecs.model.UpdateServiceRequest; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ModifyServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.services.EcsCloudMetricService; -import org.springframework.beans.factory.annotation.Autowired; - import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; -public class DestroyServiceAtomicOperation extends AbstractEcsAtomicOperation { - @Autowired - EcsCloudMetricService ecsCloudMetricService; +public class DestroyServiceAtomicOperation + extends AbstractEcsAtomicOperation { + @Autowired EcsCloudMetricService ecsCloudMetricService; public DestroyServiceAtomicOperation(ModifyServiceDescription description) { super(description, "DESTROY_ECS_SERVER_GROUP"); @@ -40,10 +39,16 @@ public Void operate(List priorOutputs) { updateTaskStatus("Initializing Destroy Amazon ECS Server Group Operation..."); AmazonECS ecs = getAmazonEcsClient(); - String ecsClusterName = containerInformationService.getClusterName(description.getServerGroupName(), description.getAccount(), description.getRegion()); + String ecsClusterName = + containerInformationService.getClusterName( + description.getServerGroupName(), description.getAccount(), description.getRegion()); updateTaskStatus("Removing MetricAlarms from " + description.getServerGroupName() + "."); - ecsCloudMetricService.deleteMetrics(description.getServerGroupName(), description.getAccount(), description.getRegion()); + ecsCloudMetricService.deleteMetrics( + description.getServerGroupName(), + description.getAccount(), + description.getRegion(), + ecsClusterName); updateTaskStatus("Done removing MetricAlarms from " + description.getServerGroupName() + "."); UpdateServiceRequest updateServiceRequest = new UpdateServiceRequest(); @@ -61,8 +66,13 @@ public Void operate(List priorOutputs) { updateTaskStatus("Deleting " + description.getServerGroupName() + " server group."); DeleteServiceResult deleteServiceResult = ecs.deleteService(deleteServiceRequest); - updateTaskStatus("Deleting " + deleteServiceResult.getService().getTaskDefinition() + " task definition belonging to the server group."); - ecs.deregisterTaskDefinition(new DeregisterTaskDefinitionRequest().withTaskDefinition(deleteServiceResult.getService().getTaskDefinition())); + updateTaskStatus( + "Deleting " + + deleteServiceResult.getService().getTaskDefinition() + + " task definition belonging to the server group."); + ecs.deregisterTaskDefinition( + new DeregisterTaskDefinitionRequest() + .withTaskDefinition(deleteServiceResult.getService().getTaskDefinition())); return null; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DisableServiceAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DisableServiceAtomicOperation.java index 760c58f9fe3..d4e845c48a5 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DisableServiceAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DisableServiceAtomicOperation.java @@ -16,13 +16,15 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; +import com.amazonaws.services.applicationautoscaling.model.*; import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.UpdateServiceRequest; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ModifyServiceDescription; - import java.util.List; -public class DisableServiceAtomicOperation extends AbstractEcsAtomicOperation { +public class DisableServiceAtomicOperation + extends AbstractEcsAtomicOperation { public DisableServiceAtomicOperation(ModifyServiceDescription description) { super(description, "DISABLE_ECS_SERVER_GROUP"); @@ -37,17 +39,68 @@ public Void operate(List priorOutputs) { private void disableService() { AmazonECS ecs = getAmazonEcsClient(); + AWSApplicationAutoScaling autoScalingClient = getAmazonApplicationAutoScalingClient(); String service = description.getServerGroupName(); - String account = description.getCredentialAccount(); + String account = description.getAccount(); String cluster = getCluster(service, account); + DescribeScalableTargetsRequest describeRequest = + new DescribeScalableTargetsRequest() + .withServiceNamespace(ServiceNamespace.Ecs) + .withResourceIds(String.format("service/%s/%s", cluster, service)) + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount); + DescribeScalableTargetsResult describeResult = + autoScalingClient.describeScalableTargets(describeRequest); + + if (isSuspended(describeResult)) { + updateTaskStatus( + String.format( + "Autoscaling already suspended on server group %s for %s.", service, account)); + } else { + updateTaskStatus( + String.format("Suspending autoscaling on %s server group for %s.", service, account)); + RegisterScalableTargetRequest suspendRequest = + new RegisterScalableTargetRequest() + .withServiceNamespace(ServiceNamespace.Ecs) + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) + .withResourceId(String.format("service/%s/%s", cluster, service)) + .withSuspendedState( + new SuspendedState() + .withDynamicScalingInSuspended(true) + .withDynamicScalingOutSuspended(true) + .withScheduledScalingSuspended(true)); + autoScalingClient.registerScalableTarget(suspendRequest); + updateTaskStatus( + String.format("Autoscaling on server group %s suspended for %s.", service, account)); + } + updateTaskStatus(String.format("Disabling %s server group for %s.", service, account)); - UpdateServiceRequest request = new UpdateServiceRequest() - .withCluster(cluster) - .withService(service) - .withDesiredCount(0); + UpdateServiceRequest request = + new UpdateServiceRequest().withCluster(cluster).withService(service).withDesiredCount(0); ecs.updateService(request); updateTaskStatus(String.format("Server group %s disabled for %s.", service, account)); } + + private boolean isSuspended(DescribeScalableTargetsResult describeResult) { + if (describeResult != null + && describeResult.getScalableTargets() != null + && describeResult.getScalableTargets().size() > 0) { + ScalableTarget target = + describeResult.getScalableTargets().stream() + .filter( + e -> + (e.getScalableDimension() + .equals(ScalableDimension.EcsServiceDesiredCount.toString()))) + .findFirst() + .orElse(null); + + return (target != null) + && target.getSuspendedState().getScheduledScalingSuspended() + && target.getSuspendedState().getDynamicScalingInSuspended() + && target.getSuspendedState().getDynamicScalingOutSuspended(); + } + + return false; + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/EnableServiceAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/EnableServiceAtomicOperation.java index 2394bc08e59..bb4af79213f 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/EnableServiceAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/EnableServiceAtomicOperation.java @@ -16,22 +16,16 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; -import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; -import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsRequest; -import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsResult; -import com.amazonaws.services.applicationautoscaling.model.ScalableDimension; -import com.amazonaws.services.applicationautoscaling.model.ScalableTarget; -import com.amazonaws.services.applicationautoscaling.model.ServiceNamespace; +import com.amazonaws.services.applicationautoscaling.model.*; import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.UpdateServiceRequest; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ModifyServiceDescription; - import java.util.ArrayList; import java.util.List; -public class EnableServiceAtomicOperation extends AbstractEcsAtomicOperation { +public class EnableServiceAtomicOperation + extends AbstractEcsAtomicOperation { public EnableServiceAtomicOperation(ModifyServiceDescription description) { super(description, "ENABLE_ECS_SERVER_GROUP"); @@ -46,19 +40,37 @@ public Void operate(List priorOutputs) { private void enableService() { AmazonECS ecsClient = getAmazonEcsClient(); + AWSApplicationAutoScaling autoScalingClient = getAmazonApplicationAutoScalingClient(); String service = description.getServerGroupName(); - String account = description.getCredentialAccount(); + String account = description.getAccount(); String cluster = getCluster(service, account); - UpdateServiceRequest request = new UpdateServiceRequest() - .withCluster(cluster) - .withService(service) - .withDesiredCount(getMaxCapacity(cluster)); + UpdateServiceRequest request = + new UpdateServiceRequest() + .withCluster(cluster) + .withService(service) + .withDesiredCount(getMaxCapacity(cluster)); updateTaskStatus(String.format("Enabling %s server group for %s.", service, account)); ecsClient.updateService(request); updateTaskStatus(String.format("Server group %s enabled for %s.", service, account)); + + updateTaskStatus( + String.format("Resuming autoscaling on %s server group for %s.", service, account)); + RegisterScalableTargetRequest resumeRequest = + new RegisterScalableTargetRequest() + .withServiceNamespace(ServiceNamespace.Ecs) + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) + .withResourceId(String.format("service/%s/%s", cluster, service)) + .withSuspendedState( + new SuspendedState() + .withDynamicScalingInSuspended(false) + .withDynamicScalingOutSuspended(false) + .withScheduledScalingSuspended(false)); + autoScalingClient.registerScalableTarget(resumeRequest); + updateTaskStatus( + String.format("Autoscaling on server group %s resumed for %s.", service, account)); } private Integer getMaxCapacity(String cluster) { @@ -75,10 +87,11 @@ private ScalableTarget getScalableTarget(String cluster) { List resourceIds = new ArrayList<>(); resourceIds.add(String.format("service/%s/%s", cluster, description.getServerGroupName())); - DescribeScalableTargetsRequest request = new DescribeScalableTargetsRequest() - .withResourceIds(resourceIds) - .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) - .withServiceNamespace(ServiceNamespace.Ecs); + DescribeScalableTargetsRequest request = + new DescribeScalableTargetsRequest() + .withResourceIds(resourceIds) + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) + .withServiceNamespace(ServiceNamespace.Ecs); DescribeScalableTargetsResult result = appASClient.describeScalableTargets(request); @@ -92,13 +105,4 @@ private ScalableTarget getScalableTarget(String cluster) { throw new Error("Multiple Scalable Targets found"); } - - private AWSApplicationAutoScaling getAmazonApplicationAutoScalingClient() { - AWSCredentialsProvider credentialsProvider = getCredentials().getCredentialsProvider(); - String region = description.getRegion(); - NetflixAmazonCredentials credentialAccount = description.getCredentials(); - - return amazonClientProvider.getAmazonApplicationAutoScaling(credentialAccount, region, false); - } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperation.java index 81bc7785752..def912b524a 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperation.java @@ -16,7 +16,6 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; -import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; import com.amazonaws.services.applicationautoscaling.model.RegisterScalableTargetRequest; import com.amazonaws.services.applicationautoscaling.model.ScalableDimension; @@ -24,17 +23,16 @@ import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.Service; import com.amazonaws.services.ecs.model.UpdateServiceRequest; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ResizeServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.services.ContainerInformationService; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import org.springframework.beans.factory.annotation.Autowired; - import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; -public class ResizeServiceAtomicOperation extends AbstractEcsAtomicOperation implements AtomicOperation { - @Autowired - ContainerInformationService containerInformationService; +public class ResizeServiceAtomicOperation + extends AbstractEcsAtomicOperation + implements AtomicOperation { + @Autowired ContainerInformationService containerInformationService; public ResizeServiceAtomicOperation(ResizeServiceDescription description) { super(description, "RESIZE_ECS_SERVER_GROUP"); @@ -55,12 +53,15 @@ private Service resizeService() { String serviceName = description.getServerGroupName(); Integer desiredCount = description.getCapacity().getDesired(); - String ecsClusterName = containerInformationService.getClusterName(serviceName, description.getAccount(), description.getRegion()); - - UpdateServiceRequest updateServiceRequest = new UpdateServiceRequest() - .withCluster(ecsClusterName) - .withService(serviceName) - .withDesiredCount(desiredCount); + String ecsClusterName = + containerInformationService.getClusterName( + serviceName, description.getAccount(), description.getRegion()); + + UpdateServiceRequest updateServiceRequest = + new UpdateServiceRequest() + .withCluster(ecsClusterName) + .withService(serviceName) + .withDesiredCount(desiredCount); updateTaskStatus(String.format("Resizing %s to %s instances.", serviceName, desiredCount)); Service service = amazonECS.updateService(updateServiceRequest).getService(); updateTaskStatus(String.format("Done resizing %s to %s", serviceName, desiredCount)); @@ -71,26 +72,27 @@ private void resizeAutoScalingGroup(Service service) { AWSApplicationAutoScaling autoScalingClient = getAmazonApplicationAutoScalingClient(); Integer desiredCount = description.getCapacity().getDesired(); - String ecsClusterName = containerInformationService.getClusterName(service.getServiceName(), description.getAccount(), description.getRegion()); - - RegisterScalableTargetRequest request = new RegisterScalableTargetRequest() - .withServiceNamespace(ServiceNamespace.Ecs) - .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) - .withResourceId(String.format("service/%s/%s", ecsClusterName, service.getServiceName())) - .withRoleARN(service.getRoleArn()) - .withMinCapacity(description.getCapacity().getMin()) - .withMaxCapacity(description.getCapacity().getMax()); - - updateTaskStatus(String.format("Resizing Scalable Target of %s to %s instances", service.getServiceName(), desiredCount)); + String ecsClusterName = + containerInformationService.getClusterName( + service.getServiceName(), description.getAccount(), description.getRegion()); + + RegisterScalableTargetRequest request = + new RegisterScalableTargetRequest() + .withServiceNamespace(ServiceNamespace.Ecs) + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) + .withResourceId( + String.format("service/%s/%s", ecsClusterName, service.getServiceName())) + .withMinCapacity(description.getCapacity().getMin()) + .withMaxCapacity(description.getCapacity().getMax()); + + updateTaskStatus( + String.format( + "Resizing Scalable Target of %s to %s instances", + service.getServiceName(), desiredCount)); autoScalingClient.registerScalableTarget(request); - updateTaskStatus(String.format("Done resizing Scalable Target of %s to %s instances", service.getServiceName(), desiredCount)); - } - - private AWSApplicationAutoScaling getAmazonApplicationAutoScalingClient() { - AWSCredentialsProvider credentialsProvider = getCredentials().getCredentialsProvider(); - String region = description.getRegion(); - NetflixAmazonCredentials credentialAccount = description.getCredentials(); - - return amazonClientProvider.getAmazonApplicationAutoScaling(credentialAccount, region, false); + updateTaskStatus( + String.format( + "Done resizing Scalable Target of %s to %s instances", + service.getServiceName(), desiredCount)); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/StartServiceAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/StartServiceAtomicOperation.java index 6206a3f58da..c4ca66d45d6 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/StartServiceAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/StartServiceAtomicOperation.java @@ -16,10 +16,8 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops; -import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ResizeServiceDescription; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.StartServiceDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - import java.util.List; public class StartServiceAtomicOperation implements AtomicOperation { @@ -37,5 +35,4 @@ public Void operate(List priorOutputs) { return null; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/StopServiceAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/StopServiceAtomicOperation.java index c578a61d30a..9ad1683c33d 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/StopServiceAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/StopServiceAtomicOperation.java @@ -18,7 +18,6 @@ import com.netflix.spinnaker.clouddriver.ecs.deploy.description.StopServiceDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - import java.util.List; public class StopServiceAtomicOperation implements AtomicOperation { @@ -36,5 +35,4 @@ public Void operate(List priorOutputs) { return null; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/TerminateInstancesAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/TerminateInstancesAtomicOperation.java index d6154e378f4..119fad1f003 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/TerminateInstancesAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/TerminateInstancesAtomicOperation.java @@ -19,10 +19,10 @@ import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.StopTaskRequest; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.TerminateInstancesDescription; - import java.util.List; -public class TerminateInstancesAtomicOperation extends AbstractEcsAtomicOperation { +public class TerminateInstancesAtomicOperation + extends AbstractEcsAtomicOperation { public TerminateInstancesAtomicOperation(TerminateInstancesDescription description) { super(description, "TERMINATE_ECS_INSTANCES"); @@ -35,12 +35,13 @@ public Void operate(List priorOutputs) { for (String taskId : description.getEcsTaskIds()) { updateTaskStatus("Terminating instance: " + taskId); - String clusterArn = containerInformationService.getClusterArn(description.getCredentialAccount(), description.getRegion(), taskId); + String clusterArn = + containerInformationService.getClusterArn( + description.getAccount(), description.getRegion(), taskId); StopTaskRequest request = new StopTaskRequest().withTask(taskId).withCluster(clusterArn); ecs.stopTask(request); } return null; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/UpdateServiceAndTaskConfigAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/UpdateServiceAndTaskConfigAtomicOperation.java index 61f535f885a..a2dc16fdedd 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/UpdateServiceAndTaskConfigAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/UpdateServiceAndTaskConfigAtomicOperation.java @@ -18,14 +18,14 @@ import com.netflix.spinnaker.clouddriver.ecs.deploy.description.UpdateServiceAndTaskConfigDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - import java.util.List; public class UpdateServiceAndTaskConfigAtomicOperation implements AtomicOperation { UpdateServiceAndTaskConfigDescription description; - public UpdateServiceAndTaskConfigAtomicOperation(UpdateServiceAndTaskConfigDescription description) { + public UpdateServiceAndTaskConfigAtomicOperation( + UpdateServiceAndTaskConfigDescription description) { this.description = description; } @@ -36,5 +36,4 @@ public Void operate(List priorOutputs) { return null; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/UpsertScalingPolicyAtomicOperation.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/UpsertScalingPolicyAtomicOperation.java index a96a67fdf60..7b8ce8b2cd4 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/UpsertScalingPolicyAtomicOperation.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/UpsertScalingPolicyAtomicOperation.java @@ -18,7 +18,6 @@ import com.netflix.spinnaker.clouddriver.ecs.deploy.description.UpsertScalingPolicyDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - import java.util.List; public class UpsertScalingPolicyAtomicOperation implements AtomicOperation { @@ -36,5 +35,4 @@ public Void operate(List priorOutputs) { return null; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/CloneServiceAtomicOperationValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/CloneServiceAtomicOperationValidator.java index b4c2a11dcd8..814a2afbf80 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/CloneServiceAtomicOperationValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/CloneServiceAtomicOperationValidator.java @@ -17,19 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - import java.util.List; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.CLONE_SERVER_GROUP) @Component("cloneServiceAtomicOperationValidator") public class CloneServiceAtomicOperationValidator extends DescriptionValidator { @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { // TODO - Implement this stub diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/CommonValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/CommonValidator.java index 0ebb9dc7a83..d6fec0d90ce 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/CommonValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/CommonValidator.java @@ -19,9 +19,8 @@ import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription; import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.model.ServerGroup; -import org.springframework.validation.Errors; - import java.util.Collection; import java.util.Set; import java.util.stream.Collectors; @@ -33,13 +32,18 @@ public CommonValidator(String erroryKey) { this.errorKey = erroryKey; } - void validateRegions(AbstractAmazonCredentialsDescription credentialsDescription, Collection regionNames, Errors errors, String attributeName) { + void validateRegions( + AbstractAmazonCredentialsDescription credentialsDescription, + Collection regionNames, + ValidationErrors errors, + String attributeName) { if (regionNames.isEmpty()) { rejectValue(errors, attributeName, "empty"); } else { - Set validRegions = credentialsDescription.getCredentials().getRegions().stream() - .map(AmazonCredentials.AWSRegion::getName) - .collect(Collectors.toSet()); + Set validRegions = + credentialsDescription.getCredentials().getRegions().stream() + .map(AmazonCredentials.AWSRegion::getName) + .collect(Collectors.toSet()); if (!validRegions.isEmpty() && !validRegions.containsAll(regionNames)) { rejectValue(errors, attributeName, "not.configured"); @@ -47,7 +51,10 @@ void validateRegions(AbstractAmazonCredentialsDescription credentialsDescription } } - boolean validateCredentials(AbstractAmazonCredentialsDescription credentialsDescription, Errors errors, String attributeName) { + boolean validateCredentials( + AbstractAmazonCredentialsDescription credentialsDescription, + ValidationErrors errors, + String attributeName) { if (credentialsDescription.getCredentials() == null) { rejectValue(errors, attributeName, "not.nullable"); return false; @@ -55,7 +62,7 @@ boolean validateCredentials(AbstractAmazonCredentialsDescription credentialsDesc return true; } - void validateCapacity(Errors errors, ServerGroup.Capacity capacity) { + void validateCapacity(ValidationErrors errors, ServerGroup.Capacity capacity) { if (capacity != null) { boolean desiredNotNull = capacity.getDesired() != null; boolean minNotNull = capacity.getMin() != null; @@ -75,7 +82,6 @@ void validateCapacity(Errors errors, ServerGroup.Capacity capacity) { positivityCheck(minNotNull, capacity.getMin(), "min", errors); positivityCheck(maxNotNull, capacity.getMax(), "max", errors); - if (minNotNull && maxNotNull) { if (capacity.getMin() > capacity.getMax()) { rejectValue(errors, "capacity.min.max.range", "invalid"); @@ -95,11 +101,12 @@ void validateCapacity(Errors errors, ServerGroup.Capacity capacity) { } } - void rejectValue(Errors errors, String field, String reason) { + void rejectValue(ValidationErrors errors, String field, String reason) { errors.rejectValue(field, errorKey + "." + field + "." + reason); } - private void positivityCheck(boolean isNotNull, Integer capacity, String fieldName, Errors errors) { + private void positivityCheck( + boolean isNotNull, Integer capacity, String fieldName, ValidationErrors errors) { if (isNotNull && capacity < 0) { rejectValue(errors, "capacity." + fieldName, "invalid"); } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/DeleteScalingPolicyAtomicOperationValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/DeleteScalingPolicyAtomicOperationValidator.java index 9a0b28b1521..3ad46aeba02 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/DeleteScalingPolicyAtomicOperationValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/DeleteScalingPolicyAtomicOperationValidator.java @@ -17,19 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - import java.util.List; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.DELETE_SCALING_POLICY) @Component("deleteScalingPolicyAtomicOperationValidator") public class DeleteScalingPolicyAtomicOperationValidator extends DescriptionValidator { @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { // TODO - Implement this stub diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/EcsCreateServerGroupDescriptionValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/EcsCreateServerGroupDescriptionValidator.java index b732d439360..043ba08813a 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/EcsCreateServerGroupDescriptionValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/EcsCreateServerGroupDescriptionValidator.java @@ -19,49 +19,72 @@ import com.amazonaws.services.ecs.model.PlacementStrategy; import com.amazonaws.services.ecs.model.PlacementStrategyType; import com.google.common.collect.Sets; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CreateServerGroupDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Collections; import java.util.List; import java.util.Set; +import org.apache.commons.lang3.StringUtils; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component("ecsCreateServerGroupDescriptionValidator") public class EcsCreateServerGroupDescriptionValidator extends CommonValidator { private static final Set BINPACK_VALUES = Sets.newHashSet("cpu", "memory"); - private static final Set SPREAD_VALUES = Sets.newHashSet( - "instanceId", - "attribute:ecs.availability-zone", - "attribute:ecs.instance-type", - "attribute:ecs.os-type", - "attribute:ecs.ami-id" - ); + private static final Set SPREAD_VALUES = + Sets.newHashSet( + "instanceId", + "attribute:ecs.availability-zone", + "attribute:ecs.instance-type", + "attribute:ecs.os-type", + "attribute:ecs.ami-id"); + + private static final Set RESERVED_ENVIRONMENT_VARIABLES = + Sets.newHashSet("SERVER_GROUP", "CLOUD_STACK", "CLOUD_DETAIL"); public EcsCreateServerGroupDescriptionValidator() { super("createServerGroupDescription"); } @Override - public void validate(List priorDescriptions, Object description, Errors errors) { - CreateServerGroupDescription createServerGroupDescription = (CreateServerGroupDescription) description; + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { + CreateServerGroupDescription createServerGroupDescription = + (CreateServerGroupDescription) description; validateCredentials(createServerGroupDescription, errors, "credentials"); validateCapacity(errors, createServerGroupDescription.getCapacity()); + if (createServerGroupDescription.getSubnetTypes() != null + && createServerGroupDescription.getSubnetTypes().size() > 0) { + if (StringUtils.isNotBlank(createServerGroupDescription.getSubnetType())) { + errors.rejectValue( + "subnetTypes", + errorKey + "." + "subnetTypes" + "." + "invalid", + "SubnetType (string) cannot be specified when SubnetTypes (list) is specified. Please use SubnetTypes (list)"); + } + } + if (createServerGroupDescription.getAvailabilityZones() != null) { if (createServerGroupDescription.getAvailabilityZones().size() != 1) { rejectValue(errors, "availabilityZones", "must.have.only.one"); } + + List zones = + createServerGroupDescription.getAvailabilityZones().values().iterator().next(); + if (zones == null || zones.isEmpty()) { + rejectValue(errors, "availabilityZones.zones", "not.nullable"); + } } else { rejectValue(errors, "availabilityZones", "not.nullable"); } if (createServerGroupDescription.getPlacementStrategySequence() != null) { - for (PlacementStrategy placementStrategy : createServerGroupDescription.getPlacementStrategySequence()) { + for (PlacementStrategy placementStrategy : + createServerGroupDescription.getPlacementStrategySequence()) { PlacementStrategyType type; try { type = PlacementStrategyType.fromValue(placementStrategy.getType()); @@ -84,51 +107,166 @@ public void validate(List priorDescriptions, Object description, Errors errors) } break; } - } } else { rejectValue(errors, "placementStrategySequence", "not.nullable"); } - if (createServerGroupDescription.getAutoscalingPolicies() == null) { - rejectValue(errors, "autoscalingPolicies", "not.nullable"); - } + Moniker moniker = createServerGroupDescription.getMoniker(); + if (moniker == null) { + if (createServerGroupDescription.getApplication() == null) { + rejectValue(errors, "application", "not.nullable"); + } + } else { + if (moniker.getApp() == null) { + rejectValue(errors, "moniker.app", "not.nullable"); + } + + if (StringUtils.isNotBlank(createServerGroupDescription.getApplication()) + && !StringUtils.equals(createServerGroupDescription.getApplication(), moniker.getApp())) { + rejectValue(errors, "moniker.app", "invalid"); + } - if (createServerGroupDescription.getApplication() == null) { - rejectValue(errors, "application", "not.nullable"); + if (StringUtils.isNotBlank(createServerGroupDescription.getFreeFormDetails()) + && !StringUtils.equals( + createServerGroupDescription.getFreeFormDetails(), moniker.getDetail())) { + rejectValue(errors, "moniker.detail", "invalid"); + } + + if (StringUtils.isNotBlank(createServerGroupDescription.getStack()) + && !StringUtils.equals(createServerGroupDescription.getStack(), moniker.getStack())) { + rejectValue(errors, "moniker.stack", "invalid"); + } } if (createServerGroupDescription.getEcsClusterName() == null) { rejectValue(errors, "ecsClusterName", "not.nullable"); } - if (createServerGroupDescription.getDockerImageAddress() == null) { - rejectValue(errors, "dockerImageAddress", "not.nullable"); + if (createServerGroupDescription.getServiceDiscoveryAssociations() != null) { + for (CreateServerGroupDescription.ServiceDiscoveryAssociation association : + createServerGroupDescription.getServiceDiscoveryAssociations()) { + if (association.getRegistry() == null) { + rejectValue(errors, "serviceDiscoveryAssociations", "item.invalid"); + } + } + } + + boolean hasTargetGroup = StringUtils.isNotBlank(createServerGroupDescription.getTargetGroup()); + validateComputeOptions(createServerGroupDescription, errors); + + if (!createServerGroupDescription.isUseTaskDefinitionArtifact()) { + if (createServerGroupDescription.getDockerImageAddress() == null) { + rejectValue(errors, "dockerImageAddress", "not.nullable"); + } + + if (createServerGroupDescription.getComputeUnits() != null) { + if (createServerGroupDescription.getComputeUnits() < 0) { + rejectValue(errors, "computeUnits", "invalid"); + } + } else { + rejectValue(errors, "computeUnits", "not.nullable"); + } + + if (createServerGroupDescription.getReservedMemory() != null) { + if (createServerGroupDescription.getReservedMemory() < 0) { + rejectValue(errors, "reservedMemory", "invalid"); + } + } else { + rejectValue(errors, "reservedMemory", "not.nullable"); + } + } else { + // Verify load balanced services w/ an artifact specify which container to load balance on + boolean hasLoadBalancedContainer = + StringUtils.isNotBlank(createServerGroupDescription.getLoadBalancedContainer()); + + if (hasTargetGroup && !hasLoadBalancedContainer) { + rejectValue(errors, "loadBalancedContainer", "not.nullable"); + } else if (!hasTargetGroup && hasLoadBalancedContainer) { + rejectValue(errors, "targetGroup", "not.nullable"); + } } if (createServerGroupDescription.getContainerPort() != null) { - if (createServerGroupDescription.getContainerPort() < 0 || createServerGroupDescription.getContainerPort() > 65535) { + if (createServerGroupDescription.getContainerPort() < 0 + || createServerGroupDescription.getContainerPort() > 65535) { rejectValue(errors, "containerPort", "invalid"); } - } else { + } else if (hasTargetGroup) { + // if a target group is specified, a container port must be specified rejectValue(errors, "containerPort", "not.nullable"); } - if (createServerGroupDescription.getComputeUnits() != null) { - if (createServerGroupDescription.getComputeUnits() < 0) { - rejectValue(errors, "computeUnits", "invalid"); + validateTargetGroupMappings(createServerGroupDescription, errors); + + // Verify that the environment variables set by the user do not contain reserved values + if (createServerGroupDescription.getEnvironmentVariables() != null) { + if (!Collections.disjoint( + createServerGroupDescription.getEnvironmentVariables().keySet(), + RESERVED_ENVIRONMENT_VARIABLES)) { + rejectValue(errors, "environmentVariables", "invalid"); } - } else { - rejectValue(errors, "computeUnits", "not.nullable"); } + } - if (createServerGroupDescription.getReservedMemory() != null) { - if (createServerGroupDescription.getReservedMemory() < 0) { - rejectValue(errors, "reservedMemory", "invalid"); + private void validateComputeOptions( + CreateServerGroupDescription createServerGroupDescription, ValidationErrors errors) { + if (createServerGroupDescription.getCapacityProviderStrategy() != null + && !createServerGroupDescription.getCapacityProviderStrategy().isEmpty()) { + if (!StringUtils.isBlank(createServerGroupDescription.getLaunchType())) { + errors.rejectValue( + "launchType", + errorKey + "." + "launchType" + "." + "invalid", + "LaunchType cannot be specified when CapacityProviderStrategy are specified."); } - } else { - rejectValue(errors, "reservedMemory", "not.nullable"); + } else if (createServerGroupDescription.getCapacityProviderStrategy() == null + && StringUtils.isBlank(createServerGroupDescription.getLaunchType())) { + errors.rejectValue( + "launchType", + errorKey + "." + "launchType" + "." + "invalid", + "LaunchType or CapacityProviderStrategy must be specified."); } + } + + private void validateTargetGroupMappings( + CreateServerGroupDescription createServerGroupDescription, ValidationErrors errors) { + if (createServerGroupDescription.getTargetGroupMappings() != null + && !createServerGroupDescription.getTargetGroupMappings().isEmpty()) { + + if (StringUtils.isNotEmpty(createServerGroupDescription.getTargetGroup())) { + // Only one of TargetGroup or TargetGroupMappings should be defined. + errors.rejectValue( + "targetGroup", + errorKey + "." + "targetGroup" + "." + "invalid", + "TargetGroup cannot be specified when TargetGroupMapping.TargetGroup is specified. Please use TargetGroupMapping"); + } + + for (CreateServerGroupDescription.TargetGroupProperties targetGroupProperties : + createServerGroupDescription.getTargetGroupMappings()) { + // Verify each target group mapping contains a target group name, container name (or docker + // image address if it's a single container using inputs), and container port. + boolean hasTargetGroup = StringUtils.isNotBlank(targetGroupProperties.getTargetGroup()); + boolean hasContainerName = StringUtils.isNotBlank(targetGroupProperties.getContainerName()); + if (createServerGroupDescription.isUseTaskDefinitionArtifact()) { + if (hasTargetGroup && !hasContainerName) { + rejectValue(errors, "targetGroupMappings.containerName", "not.nullable"); + } else if (!hasTargetGroup && hasContainerName) { + rejectValue(errors, "targetGroupMappings.targetGroup", "not.nullable"); + } + } + + if (targetGroupProperties.getContainerPort() != null) { + if (targetGroupProperties.getContainerPort() < 0 + || targetGroupProperties.getContainerPort() > 65535) { + + rejectValue(errors, "targetGroupMappings.containerPort", "invalid"); + } + } else if (hasTargetGroup) { + // if a target group is specified, a container port must be specified + rejectValue(errors, "targetGroupMappings.containerPort", "not.nullable"); + } + } + } } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ResizeServiceDescriptionValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ResizeServiceDescriptionValidator.java index dbfe67251b3..88abf7103fc 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ResizeServiceDescriptionValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ResizeServiceDescriptionValidator.java @@ -16,14 +16,13 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ResizeServiceDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - import java.util.Collections; import java.util.List; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.RESIZE_SERVER_GROUP) @Component("resizeServiceAtomicOperationValidator") @@ -34,13 +33,14 @@ public ResizeServiceDescriptionValidator() { } @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { ResizeServiceDescription typedDescription = (ResizeServiceDescription) description; boolean validCredentials = validateCredentials(typedDescription, errors, "credentials"); if (validCredentials) { - validateRegions(typedDescription, Collections.singleton(typedDescription.getRegion()), errors, "region"); + validateRegions( + typedDescription, Collections.singleton(typedDescription.getRegion()), errors, "region"); } if (typedDescription.getServerGroupName() == null) { @@ -49,5 +49,4 @@ public void validate(List priorDescriptions, Object description, Errors errors) validateCapacity(errors, typedDescription.getCapacity()); } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ServerGroupDescriptionValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ServerGroupDescriptionValidator.java index 26b6c2fa4ca..7e3c2e9b490 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ServerGroupDescriptionValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ServerGroupDescriptionValidator.java @@ -16,9 +16,8 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ModifyServiceDescription; -import org.springframework.validation.Errors; - import java.util.Collections; import java.util.List; @@ -29,13 +28,14 @@ public ServerGroupDescriptionValidator(String errorKey) { } @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { ModifyServiceDescription typeDescription = (ModifyServiceDescription) description; boolean validCredentials = validateCredentials(typeDescription, errors, "credentials"); if (validCredentials) { - validateRegions(typeDescription, Collections.singleton(typeDescription.getRegion()), errors, "region"); + validateRegions( + typeDescription, Collections.singleton(typeDescription.getRegion()), errors, "region"); } if (typeDescription.getServerGroupName() == null) { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/StartServiceAtomicOperationValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/StartServiceAtomicOperationValidator.java index 037474ebd6e..d395683ae4c 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/StartServiceAtomicOperationValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/StartServiceAtomicOperationValidator.java @@ -17,19 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - import java.util.List; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.START_SERVER_GROUP) @Component("startServiceAtomicOperationValidator") public class StartServiceAtomicOperationValidator extends DescriptionValidator { @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { // TODO - Implement this stub diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/StopServiceAtomicOperationValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/StopServiceAtomicOperationValidator.java index f73185cf470..cb8791b8c02 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/StopServiceAtomicOperationValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/StopServiceAtomicOperationValidator.java @@ -17,19 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - import java.util.List; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.STOP_SERVER_GROUP) @Component("stopServiceAtomicOperationValidator") public class StopServiceAtomicOperationValidator extends DescriptionValidator { @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { // TODO - Implement this stub diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/TerminateInstancesDescriptionValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/TerminateInstancesDescriptionValidator.java index 1e392dcb280..55d6ac4e256 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/TerminateInstancesDescriptionValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/TerminateInstancesDescriptionValidator.java @@ -16,41 +16,46 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.ecs.deploy.description.TerminateInstancesDescription; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - import java.util.Collections; import java.util.List; import java.util.regex.Pattern; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.TERMINATE_INSTANCES) @Component("ecsTerminateInstancesDescriptionValidator") public class TerminateInstancesDescriptionValidator extends CommonValidator { - public static final Pattern TASK_ID_PATTERN = Pattern.compile("[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}"); + public static final Pattern OLD_TASK_ID_PATTERN = + Pattern.compile("[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}"); + public static final Pattern NEW_TASK_ID_PATTERN = Pattern.compile("[\\da-f]{32}"); public TerminateInstancesDescriptionValidator() { super("terminateInstancesDescription"); } @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { TerminateInstancesDescription typedDescription = (TerminateInstancesDescription) description; boolean validCredentials = validateCredentials(typedDescription, errors, "credentials"); if (validCredentials) { - validateRegions(typedDescription, Collections.singleton(typedDescription.getRegion()), errors, "region"); + validateRegions( + typedDescription, Collections.singleton(typedDescription.getRegion()), errors, "region"); } if (typedDescription.getEcsTaskIds() != null) { - typedDescription.getEcsTaskIds().forEach(taskId -> { - if (!TASK_ID_PATTERN.matcher(taskId).find()) { - rejectValue(errors, "ecsTaskIds." + taskId, "invalid"); - } - } - ); + typedDescription + .getEcsTaskIds() + .forEach( + taskId -> { + if (!OLD_TASK_ID_PATTERN.matcher(taskId).find() + && !NEW_TASK_ID_PATTERN.matcher(taskId).find()) { + rejectValue(errors, "ecsTaskIds." + taskId, "invalid"); + } + }); } else { rejectValue(errors, "ecsTaskIds", "not.nullable"); } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/UpdateServiceAndTaskConfigAtomicOperationValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/UpdateServiceAndTaskConfigAtomicOperationValidator.java index 0ba84fb38ed..c89b4707c15 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/UpdateServiceAndTaskConfigAtomicOperationValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/UpdateServiceAndTaskConfigAtomicOperationValidator.java @@ -17,19 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - import java.util.List; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.UPDATE_LAUNCH_CONFIG) @Component("updateServiceAndTaskConfigAtomicOperationValidator") public class UpdateServiceAndTaskConfigAtomicOperationValidator extends DescriptionValidator { @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { // TODO - Implement this stub diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/UpsertScalingPolicyAtomicOperationValidator.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/UpsertScalingPolicyAtomicOperationValidator.java index 3eeba48045c..e08fff6837a 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/UpsertScalingPolicyAtomicOperationValidator.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/UpsertScalingPolicyAtomicOperationValidator.java @@ -17,19 +17,18 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators; import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.ecs.EcsOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - import java.util.List; +import org.springframework.stereotype.Component; @EcsOperation(AtomicOperations.UPSERT_SCALING_POLICY) @Component("upsertScalingPolicyAtomicOperationValidator") public class UpsertScalingPolicyAtomicOperationValidator extends DescriptionValidator { @Override - public void validate(List priorDescriptions, Object description, Errors errors) { + public void validate(List priorDescriptions, Object description, ValidationErrors errors) { // TODO - Implement this stub diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsApplication.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsApplication.java index 5169a7909f1..f710362c5d0 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsApplication.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsApplication.java @@ -16,10 +16,9 @@ package com.netflix.spinnaker.clouddriver.ecs.model; import com.netflix.spinnaker.clouddriver.model.Application; -import lombok.Data; - import java.util.Map; import java.util.Set; +import lombok.Data; @Data public class EcsApplication implements Application { @@ -27,10 +26,16 @@ public class EcsApplication implements Application { private String name; Map attributes; Map> clusterNames; + Map> clusterNameMetadata; - public EcsApplication(String name, Map attributes, Map> clusterNames) { + public EcsApplication( + String name, + Map attributes, + Map> clusterNames, + Map> getClusterNameMetadata) { this.name = name; this.attributes = attributes; this.clusterNames = clusterNames; + this.clusterNameMetadata = getClusterNameMetadata; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsDockerImage.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsDockerImage.java index 59ee3801d1b..bd2b1a12f2a 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsDockerImage.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsDockerImage.java @@ -16,12 +16,11 @@ package com.netflix.spinnaker.clouddriver.ecs.model; -import lombok.Data; - import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import lombok.Data; @Data public class EcsDockerImage { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsSecurityGroup.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsSecurityGroup.java index 4cd9cf0ca58..00f0131f016 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsSecurityGroup.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsSecurityGroup.java @@ -21,9 +21,8 @@ import com.netflix.spinnaker.clouddriver.model.SecurityGroup; import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; -import lombok.Data; - import java.util.Set; +import lombok.Data; @Data @JsonInclude(JsonInclude.Include.NON_EMPTY) @@ -41,16 +40,17 @@ public class EcsSecurityGroup implements SecurityGroup { final Set inboundRules; final Set outboundRules; - public EcsSecurityGroup(String id, - String name, - String vpcId, - String description, - String application, - String accountName, - String accountId, - String region, - Set inboundRules, - Set outboundRules) { + public EcsSecurityGroup( + String id, + String name, + String vpcId, + String description, + String application, + String accountName, + String accountId, + String region, + Set inboundRules, + Set outboundRules) { this.id = id; this.name = name; this.vpcId = vpcId; diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerCluster.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerCluster.java index 758f7ba1f53..b4c4e6e67ce 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerCluster.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerCluster.java @@ -21,11 +21,10 @@ import com.netflix.spinnaker.clouddriver.model.Cluster; import com.netflix.spinnaker.clouddriver.model.LoadBalancer; import com.netflix.spinnaker.clouddriver.model.ServerGroup; -import lombok.Data; - import java.util.Collections; import java.util.HashSet; import java.util.Set; +import lombok.Data; @Data public class EcsServerCluster implements Cluster { @@ -36,11 +35,11 @@ public class EcsServerCluster implements Cluster { private String accountName; - private Set targetGroups = Collections.synchronizedSet(new HashSet()); + private Set targetGroups = + Collections.synchronizedSet(new HashSet()); private Set serverGroups = Collections.synchronizedSet(new HashSet()); - private Set loadBalancers = Collections.synchronizedSet(new HashSet()); - - public EcsServerCluster() { - } + private Set loadBalancers = + Collections.synchronizedSet(new HashSet()); + public EcsServerCluster() {} } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroup.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroup.java index 38c1bb6eb3b..ca456fe636b 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroup.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroup.java @@ -18,11 +18,11 @@ import com.netflix.spinnaker.clouddriver.model.Instance; import com.netflix.spinnaker.clouddriver.model.ServerGroup; -import lombok.Data; -import lombok.NoArgsConstructor; - +import com.netflix.spinnaker.moniker.Moniker; import java.util.Map; import java.util.Set; +import lombok.Data; +import lombok.NoArgsConstructor; @Data @NoArgsConstructor @@ -39,6 +39,7 @@ public class EcsServerGroup implements ServerGroup { Set loadBalancers; Set securityGroups; Map launchConfig; + Image image; InstanceCounts instanceCounts; Capacity capacity; ImagesSummary imagesSummary; @@ -49,6 +50,7 @@ public class EcsServerGroup implements ServerGroup { String vpcId; AutoScalingGroup asg; Set metricAlarms; + Moniker moniker; @Override public Boolean isDisabled() { @@ -63,4 +65,10 @@ public static class AutoScalingGroup { Integer desiredCapacity; } + @Data + @NoArgsConstructor + public static class Image { + public String imageId; + public String name; + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroupEvent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroupEvent.java index da9d9984b53..08bd9af7d87 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroupEvent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroupEvent.java @@ -1,8 +1,23 @@ -package com.netflix.spinnaker.clouddriver.ecs.model; +/* + * Copyright 2017 Lookout, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ -import lombok.Data; +package com.netflix.spinnaker.clouddriver.ecs.model; import java.util.Date; +import lombok.Data; @Data public class EcsServerGroupEvent { @@ -12,14 +27,13 @@ public class EcsServerGroupEvent { String id; EcsServerGroupEventStatus status; - public EcsServerGroupEvent(String message, Date createdAt, String id, EcsServerGroupEventStatus status) { + public EcsServerGroupEvent( + String message, Date createdAt, String id, EcsServerGroupEventStatus status) { this.message = message; this.createdAt = createdAt; this.id = id; this.status = status; } - public EcsServerGroupEvent() { - } - + public EcsServerGroupEvent() {} } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroupEventStatus.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroupEventStatus.java index 62f7410f5b8..bb38cadbfa7 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroupEventStatus.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsServerGroupEventStatus.java @@ -1,7 +1,22 @@ +/* + * Copyright 2017 Lookout, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package com.netflix.spinnaker.clouddriver.ecs.model; public enum EcsServerGroupEventStatus { - Success, Failure, Transition diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsSubnet.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsSubnet.java index 1cde00d35b5..8696b76a0f8 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsSubnet.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsSubnet.java @@ -20,36 +20,35 @@ public class EcsSubnet extends AmazonSubnet { - public EcsSubnet() { - } + public EcsSubnet() {} - public EcsSubnet(String type, - String id, - String state, - String vpcId, - String cidrBlock, - Integer availableIpAddressCount, - String account, - String region, - String availabilityZone, - String purpose, - String target, - boolean deprecated) { + public EcsSubnet( + String type, + String id, + String state, + String vpcId, + String cidrBlock, + Integer availableIpAddressCount, + String account, + String accountId, + String region, + String availabilityZone, + String purpose, + String target, + boolean deprecated) { super( - type, - id, - state, - vpcId, - cidrBlock, - availableIpAddressCount, - account, - region, - availabilityZone, - purpose, - target, - deprecated - ); - - + type, + id, + state, + vpcId, + cidrBlock, + availableIpAddressCount, + account, + accountId, + region, + availabilityZone, + purpose, + target, + deprecated); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsTask.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsTask.java index 906003fdc33..a4277c06a0f 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsTask.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/EcsTask.java @@ -20,11 +20,10 @@ import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.model.HealthState; import com.netflix.spinnaker.clouddriver.model.Instance; -import lombok.Data; - import java.io.Serializable; import java.util.List; import java.util.Map; +import lombok.Data; @Data public class EcsTask implements Instance, Serializable { @@ -38,48 +37,57 @@ public class EcsTask implements Instance, Serializable { private String privateAddress; private NetworkInterface networkInterface; - public EcsTask(String name, - Long launchTime, - String lastStatus, - String desiredStatus, - String availabilityZone, - List> health, - String privateAddress, - NetworkInterface networkInterface) { + public EcsTask( + String name, + Long launchTime, + String lastStatus, + String desiredStatus, + String healthStatus, + String availabilityZone, + List> health, + String privateAddress, + NetworkInterface networkInterface, + boolean hasHealthCheck) { this.name = name; providerType = cloudProvider = EcsCloudProvider.ID; this.launchTime = launchTime; this.health = health; - healthState = calculateHealthState(lastStatus, desiredStatus); + healthState = calculateHealthState(lastStatus, desiredStatus, healthStatus, hasHealthCheck); zone = availabilityZone; this.privateAddress = privateAddress; this.networkInterface = networkInterface; } /** - * Maps the Last Status and Desired Status of a Tasks to a Health State understandable by Spinnaker - *

- * The mapping is based on: - *

- * Task Life Cycle: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_life_cycle.html + * Maps the Last Status and Desired Status of a Tasks to a Health State understandable by + * Spinnaker + * + *

The mapping is based on: * - * @param lastStatus Last reported status of the Task + *

Task Life Cycle: + * http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_life_cycle.html + * + * @param lastStatus Last reported status of the Task * @param desiredStatus Desired status of the Task * @return Spinnaker understandable Health State */ - private HealthState calculateHealthState(String lastStatus, String desiredStatus) { - HealthState currentState = null; + private HealthState calculateHealthState( + String lastStatus, String desiredStatus, String healthStatus, boolean hasHealthCheck) { + + if (hasHealthCheck && "UNKNOWN".equals(healthStatus)) { + return HealthState.Starting; + } else if ("UNHEALTHY".equals(healthStatus)) { + return HealthState.Down; + } if ("RUNNING".equals(desiredStatus) && "PENDING".equals(lastStatus)) { - currentState = HealthState.Starting; + return HealthState.Starting; } else if ("RUNNING".equals(lastStatus)) { - currentState = HealthState.Up; + return HealthState.Up; } else if ("STOPPED".equals(desiredStatus)) { - currentState = HealthState.Down; + return HealthState.Down; } else { - currentState = HealthState.Unknown; + return HealthState.Unknown; } - - return currentState; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/TaskDefinition.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/TaskDefinition.java index ef588ae1d81..a90c3b77d97 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/TaskDefinition.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/TaskDefinition.java @@ -17,11 +17,10 @@ package com.netflix.spinnaker.clouddriver.ecs.model; import com.amazonaws.services.ecs.model.KeyValuePair; +import java.util.Collection; import lombok.Data; import lombok.NoArgsConstructor; -import java.util.Collection; - @Data @NoArgsConstructor public class TaskDefinition { @@ -31,7 +30,7 @@ public class TaskDefinition { int containerPort; int cpuUnits; int memoryReservation; + int memoryLimit; Collection environmentVariables; - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancer.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancer.java new file mode 100644 index 00000000000..9b71a0621f0 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancer.java @@ -0,0 +1,59 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer; + +import com.amazonaws.services.elasticloadbalancingv2.model.Listener; +import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; +import com.netflix.spinnaker.clouddriver.model.LoadBalancer; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import java.util.List; +import java.util.Map; +import java.util.Set; +import lombok.Data; + +@Data +public class EcsLoadBalancer implements LoadBalancer { + // TODO: refactor EcsLoadBalancerCache so can be extended here? + + private String account; + private String region; + private String loadBalancerArn; + private String loadBalancerType; + private String cloudProvider = EcsCloudProvider.ID; + private List listeners; + private List availabilityZones; + private String ipAddressType; + private String loadBalancerName; + private String canonicalHostedZoneId; + private String vpcId; + private String dnsname; + private Long createdTime; + private List subnets; + private List securityGroups; + private List targetGroups; + private Set serverGroups; + private Map> targetGroupServices; + + @Override + public String getName() { + return loadBalancerName; + } + + @Override + public String getType() { + return cloudProvider; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerDetail.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerDetail.java index e7a2494e3b4..6c107424bdd 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerDetail.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerDetail.java @@ -16,12 +16,11 @@ package com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer; -import lombok.Data; +import static com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider.Details; import java.util.LinkedList; import java.util.List; - -import static com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider.Details; +import lombok.Data; @Data public class EcsLoadBalancerDetail implements Details { @@ -29,7 +28,7 @@ public class EcsLoadBalancerDetail implements Details { String region; String name; String vpcId; - String type = "aws"; + String type = "ecs"; String loadBalancerType; List securityGroups = new LinkedList<>(); List targetGroups = new LinkedList<>(); diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummary.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummary.java index b2e667a6ef7..3d5ec1b3fb8 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummary.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummary.java @@ -16,16 +16,13 @@ package com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer; - -import lombok.Data; +import static com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider.Item; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider.Item; - +import lombok.Data; @Data public class EcsLoadBalancerSummary implements Item { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummaryByAccount.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummaryByAccount.java index 473c173d89a..e90a8916525 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummaryByAccount.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummaryByAccount.java @@ -16,14 +16,13 @@ package com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer; -import lombok.Data; +import static com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider.ByAccount; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider.ByAccount; +import lombok.Data; @Data public class EcsLoadBalancerSummaryByAccount implements ByAccount { @@ -31,7 +30,7 @@ public class EcsLoadBalancerSummaryByAccount implements ByAccount { private String name; private Map byRegions = new HashMap<>(); - public EcsLoadBalancerSummaryByAccount withName(String name){ + public EcsLoadBalancerSummaryByAccount withName(String name) { setName(name); return this; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummaryByRegion.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummaryByRegion.java index a208d197c9e..b63a493badf 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummaryByRegion.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsLoadBalancerSummaryByRegion.java @@ -16,19 +16,18 @@ package com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer; -import lombok.Data; +import static com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider.ByRegion; import java.util.LinkedList; import java.util.List; - -import static com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider.ByRegion; +import lombok.Data; @Data public class EcsLoadBalancerSummaryByRegion implements ByRegion { private String name; private List loadBalancers = new LinkedList<>(); - public EcsLoadBalancerSummaryByRegion withName(String name){ + public EcsLoadBalancerSummaryByRegion withName(String name) { setName(name); return this; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsTargetGroup.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsTargetGroup.java index 1e7a23b79d8..deec7101681 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsTargetGroup.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/model/loadbalancer/EcsTargetGroup.java @@ -17,10 +17,9 @@ package com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer; import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; -import lombok.Data; - import java.util.List; import java.util.Map; +import lombok.Data; @Data public class EcsTargetGroup implements LoadBalancerProvider.Details { @@ -28,6 +27,7 @@ public class EcsTargetGroup implements LoadBalancerProvider.Details { List loadBalancerNames; List instances; + String targetType; Integer healthCheckTimeoutSeconds; String targetGroupArn; String healthCheckPort; @@ -42,5 +42,4 @@ public class EcsTargetGroup implements LoadBalancerProvider.Details { String vpcId; Integer unhealthyThresholdCount; Map attributes; - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsDefaultNamer.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsDefaultNamer.java new file mode 100644 index 00000000000..03d2906740d --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsDefaultNamer.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.names; + +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; +import org.springframework.stereotype.Component; + +/* + * The default naming strategy for ECS that just delegates to the NamerRegistry default + */ +@Component +public class EcsDefaultNamer implements NamingStrategy { + + private final Namer namer; + + public EcsDefaultNamer() { + this.namer = NamerRegistry.getDefaultNamer(); + } + + @Override + public String getName() { + return "default"; + } + + @Override + public void applyMoniker(EcsResource resource, Moniker moniker) { + namer.applyMoniker(resource, moniker); + } + + @Override + public Moniker deriveMoniker(EcsResource resource) { + return namer.deriveMoniker(resource); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsResource.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsResource.java new file mode 100644 index 00000000000..182c39d4aa3 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsResource.java @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.names; + +import com.amazonaws.services.ecs.model.Tag; +import java.util.List; + +public interface EcsResource { + + String getName(); + + void setName(String name); + + List getResourceTags(); +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsResourceService.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsResourceService.java new file mode 100644 index 00000000000..c0f497aa104 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsResourceService.java @@ -0,0 +1,46 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.names; + +import com.amazonaws.services.ecs.model.Service; +import com.amazonaws.services.ecs.model.Tag; +import java.util.List; + +public class EcsResourceService implements EcsResource { + + private final Service service; + + public EcsResourceService(final Service service) { + this.service = service; + } + + @Override + public String getName() { + return service.getServiceName(); + } + + @Override + public void setName(String name) { + service.setServiceName(name); + } + + @Override + public List getResourceTags() { + return service.getTags(); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupName.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupName.java new file mode 100644 index 00000000000..67c7bfc60df --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupName.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.names; + +import com.netflix.spinnaker.moniker.Moniker; + +public class EcsServerGroupName { + + private Moniker moniker; + + public EcsServerGroupName(String fullName) { + this(MonikerHelper.applicationNameToMoniker(fullName)); + } + + public EcsServerGroupName(Moniker moniker) { + this.moniker = moniker; + } + + public Moniker getMoniker() { + return moniker; + } + + public String getFamilyName() { + String cluster = moniker.getCluster(); + String detail = moniker.getDetail(); + String stack = moniker.getStack(); + if (cluster == null) { + cluster = MonikerHelper.getClusterName(moniker.getApp(), stack, detail); + } + return cluster; + } + + public String getServiceName() { + return String.join("-", getFamilyName(), getContainerName()); + } + + public String getContainerName() { + return String.format("v%03d", moniker.getSequence()); + } + + @Override + public String toString() { + return getServiceName(); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupNameResolver.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupNameResolver.java new file mode 100644 index 00000000000..75d2c874e8c --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupNameResolver.java @@ -0,0 +1,149 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.names; + +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.*; +import com.google.common.collect.Lists; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; +import java.util.*; +import java.util.stream.IntStream; +import org.apache.commons.lang3.StringUtils; + +public class EcsServerGroupNameResolver { + + private static final int SEQUENTIAL_NUMBERING_NAMESPACE_SIZE = 1000; + + private static final int MAX_NEXT_SERVER_GROUP_ATTEMPTS = 5; + + private final String ecsClusterName; + private final AmazonECS ecs; + private final String region; + private final Namer naming; + + public EcsServerGroupNameResolver( + String ecsClusterName, AmazonECS ecs, String region, Namer namer) { + this.ecsClusterName = ecsClusterName; + this.ecs = ecs; + this.region = region; + this.naming = namer; + } + + public EcsServerGroupName resolveNextName(String application, String stack, String detail) { + Moniker moniker = Moniker.builder().app(application).detail(detail).stack(stack).build(); + + return resolveNextName(moniker); + } + + public EcsServerGroupName resolveNextName(Moniker currentName) { + Set takenSequences = new HashSet<>(); + + // 1. Get a list of all of the services + List allServices = listAllServices(ecsClusterName); + + // 2. Get the details of the services in the same server group + List> serviceBatches = Lists.partition(allServices, 10); + for (List serviceBatch : serviceBatches) { + DescribeServicesRequest request = + new DescribeServicesRequest() + .withCluster(ecsClusterName) + .withServices(serviceBatch) + .withInclude("TAGS"); + DescribeServicesResult result = ecs.describeServices(request); + for (Service service : result.getServices()) { + Moniker moniker = naming.deriveMoniker(new EcsResourceService(service)); + + if (isSameName(currentName.getApp(), moniker.getApp()) + && isSameName(currentName.getDetail(), moniker.getDetail()) + && isSameName(currentName.getStack(), moniker.getStack()) + && moniker.getSequence() != null) { + takenSequences.add(moniker.getSequence()); + } + } + } + + // 3. Find the next free sequence number + int currentMaxSequence = takenSequences.stream().reduce(Math::max).orElse(0); + IntStream after = IntStream.range(currentMaxSequence, SEQUENTIAL_NUMBERING_NAMESPACE_SIZE); + IntStream before = IntStream.range(0, currentMaxSequence); + + Moniker.MonikerBuilder nameBuilder = + Moniker.builder() + .app(currentName.getApp()) + .detail(currentName.getDetail()) + .stack(currentName.getStack()); + + Moniker newMoniker = + IntStream.concat(after, before) + .filter(s -> !takenSequences.contains(s)) + .mapToObj(s -> nameBuilder.sequence(s).build()) + .limit(MAX_NEXT_SERVER_GROUP_ATTEMPTS) + .filter(moniker -> isNotTaken(moniker)) + .findFirst() + .orElseThrow( + () -> + new IllegalArgumentException( + "All server group names for cluster " + + ecsClusterName + + " in " + + region + + " are taken.")); + + return new EcsServerGroupName(newMoniker); + } + + private boolean isSameName(String name, String name2) { + return (StringUtils.isBlank(name) && StringUtils.isBlank(name2)) + || StringUtils.equals(name, name2); + } + + private boolean isNotTaken(Moniker newMoniker) { + String newServiceName = new EcsServerGroupName(newMoniker).getServiceName(); + + // An ECS service with this name might exist already in "Draining" state, + // so it would not show up in the "taken slots" list. + // We need to describe it to determine if it does exist before using the name + DescribeServicesRequest request = + new DescribeServicesRequest().withCluster(ecsClusterName).withServices(newServiceName); + DescribeServicesResult result = ecs.describeServices(request); + + // an active or draining ECS service with this name was not found + return result.getServices().isEmpty() + || result.getServices().get(0).getStatus().equals("INACTIVE"); + } + + private List listAllServices(String ecsClusterName) { + List allServices = new ArrayList<>(); + String nextToken = null; + do { + ListServicesRequest request = new ListServicesRequest().withCluster(ecsClusterName); + if (nextToken != null) { + request.setNextToken(nextToken); + } + + ListServicesResult result = ecs.listServices(request); + for (String serviceArn : result.getServiceArns()) { + allServices.add(serviceArn); + } + + nextToken = result.getNextToken(); + } while (nextToken != null && nextToken.length() != 0); + return allServices; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsTagNamer.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsTagNamer.java new file mode 100644 index 00000000000..f34dd184e96 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/EcsTagNamer.java @@ -0,0 +1,134 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.names; + +import com.amazonaws.services.ecs.model.Tag; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.AbstractMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import org.jetbrains.annotations.NotNull; +import org.springframework.stereotype.Component; + +@Component +public class EcsTagNamer implements NamingStrategy { + + // Borrow naming convention from KubernetesManifestLabeler + private static final String SPINNAKER_ANNOTATION = "spinnaker.io"; + private static final String MONIKER_ANNOTATION_PREFIX = "moniker." + SPINNAKER_ANNOTATION; + public static final String CLUSTER = MONIKER_ANNOTATION_PREFIX + "/cluster"; + public static final String APPLICATION = MONIKER_ANNOTATION_PREFIX + "/application"; + public static final String STACK = MONIKER_ANNOTATION_PREFIX + "/stack"; + public static final String DETAIL = MONIKER_ANNOTATION_PREFIX + "/detail"; + public static final String SEQUENCE = MONIKER_ANNOTATION_PREFIX + "/sequence"; + + @Override + public String getName() { + return "tags"; + } + + @Override + public void applyMoniker(EcsResource resource, Moniker moniker) { + applyTags(resource, moniker); + } + + @Override + public Moniker deriveMoniker(EcsResource resource) { + return getMoniker(resource); + } + + private static void applyTags(EcsResource resource, Moniker moniker) { + Map tags = new TagMap(resource.getResourceTags()); + setIfPresent(value -> tags.putIfAbsent(APPLICATION, value), moniker.getApp()); + setIfPresent(value -> tags.putIfAbsent(CLUSTER, value), moniker.getCluster()); + setIfPresent(value -> tags.putIfAbsent(DETAIL, value), moniker.getDetail()); + setIfPresent(value -> tags.putIfAbsent(STACK, value), moniker.getStack()); + setIfPresent( + value -> tags.put(SEQUENCE, value), + moniker.getSequence() != null + ? moniker.getSequence().toString() + : null); // Always overwrite sequence + } + + private static Moniker getMoniker(EcsResource resource) { + String name = resource.getName(); + Names parsed = Names.parseName(name); + + Moniker moniker = + Moniker.builder() + .app(parsed.getApp()) + .cluster(parsed.getCluster()) + .detail(parsed.getDetail()) + .stack(parsed.getStack()) + .sequence(parsed.getSequence()) + .build(); + + Map tags = new TagMap(resource.getResourceTags()); + if (moniker.getApp() != null && tags != null) { + setIfPresent(moniker::setApp, tags.get(APPLICATION)); + String cluster = tags.get(CLUSTER); + String stack = tags.get(STACK); + String detail = tags.get(DETAIL); + String sequence = tags.get(SEQUENCE); + if (cluster == null && (detail != null || stack != null)) { + // If detail or stack is set and not cluster, we generate the cluster name using frigga + // convention (app-stack-detail) + cluster = MonikerHelper.getClusterName(moniker.getApp(), stack, detail); + } + setIfPresent(moniker::setStack, stack); + setIfPresent(moniker::setDetail, detail); + setIfPresent(moniker::setCluster, cluster); + setIfPresent(moniker::setSequence, sequence != null ? Integer.parseInt(sequence) : null); + } + + return moniker; + } + + private static void setIfPresent(Consumer setter, T value) { + if (value != null) { + setter.accept(value); + } + } + + private static class TagMap extends AbstractMap { + + private final List tags; + + private TagMap(final List tags) { + this.tags = tags; + } + + @NotNull + @Override + public Set> entrySet() { + return tags.stream().collect(Collectors.toMap(Tag::getKey, Tag::getValue)).entrySet(); + } + + @Override + public String put(String key, String value) { + String prev = remove(key); + tags.add(new Tag().withKey(key).withValue(value)); + return prev; + } + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/MonikerHelper.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/MonikerHelper.java new file mode 100644 index 00000000000..c7409b0539b --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/names/MonikerHelper.java @@ -0,0 +1,51 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.names; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Objects; +import org.apache.commons.lang3.StringUtils; + +public class MonikerHelper { + + public static String getClusterName(String appName, String stack, String detail) { + stack = Objects.toString(stack, ""); + + if (StringUtils.isNotEmpty(detail)) { + return String.join("-", appName, stack, detail); + } + + if (StringUtils.isNotEmpty(stack)) { + return String.join("-", appName, stack); + } + + return appName; + } + + public static Moniker applicationNameToMoniker(String appName) { + Names names = Names.parseName(appName); + return Moniker.builder() + .app(names.getApp()) + .stack(names.getStack()) + .detail(names.getDetail()) + .cluster(names.getCluster()) + .sequence(names.getSequence()) + .build(); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/EcsProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/EcsProvider.java index ca1153d31a4..1d240ec60c4 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/EcsProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/EcsProvider.java @@ -16,13 +16,18 @@ package com.netflix.spinnaker.clouddriver.ecs.provider; -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ALARMS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SCALABLE_TARGETS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; + import com.netflix.spinnaker.clouddriver.cache.SearchableProvider; import com.netflix.spinnaker.clouddriver.core.provider.agent.HealthProvidingCachingAgent; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; - +import com.netflix.spinnaker.clouddriver.security.BaseProvider; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -32,35 +37,24 @@ import java.util.Set; import java.util.stream.Collectors; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ALARMS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SCALABLE_TARGETS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; - - -public class EcsProvider extends AgentSchedulerAware implements SearchableProvider { +public class EcsProvider extends BaseProvider implements SearchableProvider { public static final String NAME = EcsProvider.class.getName(); - private static final Set defaultCaches = new HashSet<>(Arrays.asList( - SERVICES.toString(), ECS_CLUSTERS.toString(), TASKS.toString(), - CONTAINER_INSTANCES.toString(), TASK_DEFINITIONS.toString(), ALARMS.toString(), - SCALABLE_TARGETS.toString())); + private static final Set defaultCaches = + new HashSet<>( + Arrays.asList( + SERVICES.toString(), + ECS_CLUSTERS.toString(), + TASKS.toString(), + CONTAINER_INSTANCES.toString(), + TASK_DEFINITIONS.toString(), + ALARMS.toString(), + SCALABLE_TARGETS.toString())); private static final Map urlMappingTemplates = new HashMap<>(); - - private final Collection agents; - private final AccountCredentialsRepository accountCredentialsRepository; private final Keys keys = new Keys(); private Collection healthAgents; - public EcsProvider(AccountCredentialsRepository accountCredentialsRepository, Collection agents) { - this.agents = agents; - this.accountCredentialsRepository = accountCredentialsRepository; - } - @Override public Set getDefaultCaches() { return defaultCaches; @@ -72,8 +66,9 @@ public Map getUrlMappingTemplates() { } @Override - public Map getSearchResultHydrators() { - //TODO: Implement if needed - see InstanceSearchResultHydrator as an example. + public Map + getSearchResultHydrators() { + // TODO: Implement if needed - see InstanceSearchResultHydrator as an example. return Collections.emptyMap(); } @@ -87,21 +82,16 @@ public String getProviderName() { return NAME; } - @Override - public Collection getAgents() { - return agents; - } - - public void synchronizeHealthAgents() { - healthAgents = Collections.unmodifiableCollection(agents.stream() - .filter(a -> a instanceof HealthProvidingCachingAgent) - .map(a -> (HealthProvidingCachingAgent) a) - .collect(Collectors.toList())); + healthAgents = + Collections.unmodifiableCollection( + getAgents().stream() + .filter(a -> a instanceof HealthProvidingCachingAgent) + .map(a -> (HealthProvidingCachingAgent) a) + .collect(Collectors.toList())); } public Collection getHealthAgents() { return Collections.unmodifiableCollection(healthAgents); } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsAwsAwareCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsAwsAwareCachingAgent.java new file mode 100644 index 00000000000..85d88ae9eab --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsAwsAwareCachingAgent.java @@ -0,0 +1,80 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentExecution; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderRegistry; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * AbstractEcsAwsAwareCachingAgent provides an AWS Provider cache for ECS caching agents that need + * to access resources cached by the AWS Provider. + */ +abstract class AbstractEcsAwsAwareCachingAgent extends AbstractEcsCachingAgent { + protected ProviderCache awsProviderCache; + + AbstractEcsAwsAwareCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider) { + super(account, region, amazonClientProvider, awsCredentialsProvider); + } + + public void setAwsCache(ProviderCache awsCache) { + this.awsProviderCache = awsCache; + } + + @Override + public AgentExecution getAgentExecution(ProviderRegistry providerRegistry) { + return new EcsAwsAwareCacheExecution(providerRegistry); + } + + class EcsAwsAwareCacheExecution extends CacheExecution { + private final Logger log = LoggerFactory.getLogger(EcsAwsAwareCacheExecution.class); + private final ProviderRegistry providerRegistry; + + public EcsAwsAwareCacheExecution(ProviderRegistry providerRegistry) { + super(providerRegistry); + this.providerRegistry = providerRegistry; + } + + /** + * Retrieves the awsProviderCache from the provider registry and sets it on the agent before + * loading its data. + */ + @Override + public CacheResult executeAgentWithoutStore(Agent agent) { + AbstractEcsAwsAwareCachingAgent cachingAgent = (AbstractEcsAwsAwareCachingAgent) agent; + ProviderCache ecsCache = providerRegistry.getProviderCache(cachingAgent.getProviderName()); + + String awsProviderName = AwsProvider.PROVIDER_NAME; + log.info("Setting AWS Provider: " + awsProviderName); + ProviderCache awsCache = providerRegistry.getProviderCache(awsProviderName); + cachingAgent.setAwsCache(awsCache); + + return cachingAgent.loadData(ecsCache); + } + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsCachingAgent.java index 3bf1a66b2ca..47ac8a89cdc 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsCachingAgent.java @@ -16,11 +16,17 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; + import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.ListClustersRequest; import com.amazonaws.services.ecs.model.ListClustersResult; import com.google.common.base.CaseFormat; +import com.netflix.spinnaker.cats.agent.AccountAware; import com.netflix.spinnaker.cats.agent.AgentDataType; import com.netflix.spinnaker.cats.agent.CacheResult; import com.netflix.spinnaker.cats.agent.CachingAgent; @@ -31,9 +37,6 @@ import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Collection; import java.util.HashMap; import java.util.HashSet; @@ -41,12 +44,10 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; - -abstract class AbstractEcsCachingAgent implements CachingAgent { +abstract class AbstractEcsCachingAgent implements CachingAgent, AccountAware { private final Logger log = LoggerFactory.getLogger(getClass()); final AmazonClientProvider amazonClientProvider; @@ -55,7 +56,11 @@ abstract class AbstractEcsCachingAgent implements CachingAgent { final String region; final String accountName; - AbstractEcsCachingAgent(NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider, AWSCredentialsProvider awsCredentialsProvider) { + AbstractEcsCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider) { this.account = account; this.accountName = account.getName(); this.region = region; @@ -65,6 +70,7 @@ abstract class AbstractEcsCachingAgent implements CachingAgent { /** * Fetches items from the ECS service. + * * @param ecs The AmazonECS client that will be used to make the queries. * @param providerCache A ProviderCache that is used to access already existing cache. * @return A list of generic type objects. @@ -72,11 +78,14 @@ abstract class AbstractEcsCachingAgent implements CachingAgent { protected abstract List getItems(AmazonECS ecs, ProviderCache providerCache); /** - * Generates a map of CacheData collections associated to a key namespace from a given collection of generic type objects. + * Generates a map of CacheData collections associated to a key namespace from a given collection + * of generic type objects. + * * @param cacheableItems A collection of generic type objects. * @return A map of CacheData collections belonging to a key namespace. */ - protected abstract Map> generateFreshData(Collection cacheableItems); + protected abstract Map> generateFreshData( + Collection cacheableItems); @Override public String getProviderName() { @@ -93,18 +102,21 @@ public CacheResult loadData(ProviderCache providerCache) { } /** - * Provides a set of ECS cluster ARNs. - * Either uses the cache, or queries the ECS service. + * Provides a set of ECS cluster ARNs. Either uses the cache, or queries the ECS service. + * * @param ecs The AmazonECS client to use for querying. * @param providerCache The ProviderCache to retrieve clusters from. * @return A set of ECS cluster ARNs. */ Set getClusters(AmazonECS ecs, ProviderCache providerCache) { - Set clusters = providerCache.getAll(ECS_CLUSTERS.toString()).stream() - .filter(cacheData -> cacheData.getAttributes().get("region").equals(region) && - cacheData.getAttributes().get("account").equals(accountName)) - .map(cacheData -> (String) cacheData.getAttributes().get("clusterArn")) - .collect(Collectors.toSet()); + Set clusters = + providerCache.getAll(ECS_CLUSTERS.toString()).stream() + .filter( + cacheData -> + cacheData.getAttributes().get("region").equals(region) + && cacheData.getAttributes().get("account").equals(accountName)) + .map(cacheData -> (String) cacheData.getAttributes().get("clusterArn")) + .collect(Collectors.toSet()); if (clusters == null || clusters.isEmpty()) { clusters = new HashSet<>(); @@ -125,57 +137,72 @@ Set getClusters(AmazonECS ecs, ProviderCache providerCache) { } /** - * Provides the key namespace that the caching agent is authoritative of. - * Currently only supports the caching agent being authoritative over one key namespace. + * Provides the key namespace that the caching agent is authoritative of. Currently only supports + * the caching agent being authoritative over one key namespace. + * * @return Key namespace. */ String getAuthoritativeKeyName() { - Collection authoritativeNamespaces = getProvidedDataTypes().stream() - .filter(agentDataType -> agentDataType.getAuthority().equals(AUTHORITATIVE)) - .collect(Collectors.toSet()); + Collection authoritativeNamespaces = + getProvidedDataTypes().stream() + .filter(agentDataType -> agentDataType.getAuthority().equals(AUTHORITATIVE)) + .collect(Collectors.toSet()); if (authoritativeNamespaces.size() != 1) { - throw new RuntimeException("AbstractEcsCachingAgent supports only one authoritative key namespace. " + - authoritativeNamespaces.size() + " authoritative key namespace were given."); + throw new RuntimeException( + "AbstractEcsCachingAgent supports only one authoritative key namespace. " + + authoritativeNamespaces.size() + + " authoritative key namespace were given."); } return authoritativeNamespaces.iterator().next().getTypeName(); } - CacheResult buildCacheResult(String authoritativeKeyName, List items, ProviderCache providerCache) { - String prettyKeyName = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, authoritativeKeyName); + CacheResult buildCacheResult( + String authoritativeKeyName, List items, ProviderCache providerCache) { + String prettyKeyName = + CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, authoritativeKeyName); Map> dataMap = generateFreshData(items); - //Old keys can come from different account/region, filter them to the current account/region. - Set oldKeys = providerCache.getAll(authoritativeKeyName).stream() - .map(CacheData::getId) - .filter(key -> keyAccountRegionFilter(authoritativeKeyName, key)) - .collect(Collectors.toSet()); + // Old keys can come from different account/region, filter them to the current account/region. + Set oldKeys = + providerCache.getAll(authoritativeKeyName).stream() + .map(CacheData::getId) + .filter(key -> keyAccountRegionFilter(authoritativeKeyName, key)) + .collect(Collectors.toSet()); - Map> evictions = computeEvictableData(dataMap.get(authoritativeKeyName), oldKeys); + Map> evictions = + computeEvictableData(dataMap.get(authoritativeKeyName), oldKeys); evictions = addExtraEvictions(evictions); - log.info("Evicting " + evictions.size() + " " + prettyKeyName + (evictions.size() > 1 ? "s" : "") + " in " + getAgentType()); + log.info( + "Evicting " + + evictions.size() + + " " + + prettyKeyName + + (evictions.size() > 1 ? "s" : "") + + " in " + + getAgentType()); return new DefaultCacheResult(dataMap, evictions); } /** - * Evicts cache that does not belong to an entity on the ECS service. - * This is done by evicting old keys that are no longer found in the new keys provided by the new data. + * Evicts cache that does not belong to an entity on the ECS service. This is done by evicting old + * keys that are no longer found in the new keys provided by the new data. + * * @param newData New data that contains new keys. * @param oldKeys Old keys. - * @return Key collection associated to the key namespace the the caching agent is authoritative of. + * @return Key collection associated to the key namespace the the caching agent is authoritative + * of. */ - private Map> computeEvictableData(Collection newData, Collection oldKeys) { - //New data can only come from the current account and region, no need to filter. - Set newKeys = newData.stream() - .map(CacheData::getId) - .collect(Collectors.toSet()); + private Map> computeEvictableData( + Collection newData, Collection oldKeys) { + // New data can only come from the current account and region, no need to filter. + Set newKeys = newData.stream().map(CacheData::getId).collect(Collectors.toSet()); - Set evictedKeys = oldKeys.stream() - .filter(oldKey -> !newKeys.contains(oldKey)) - .collect(Collectors.toSet()); + Set evictedKeys = + oldKeys.stream().filter(oldKey -> !newKeys.contains(oldKey)).collect(Collectors.toSet()); Map> evictionsByKey = new HashMap<>(); evictionsByKey.put(getAuthoritativeKeyName(), evictedKeys); @@ -185,18 +212,35 @@ private Map> computeEvictableData(Collection keyParts = Keys.parse(key); - return keyParts != null && - keyParts.get("account").equals(accountName) && - //IAM role keys are not region specific, so it will be true. The region will be checked of other keys. - (authoritativeKeyName.equals(IAM_ROLE.ns) || keyParts.get("region").equals(region)); + return keyParts != null + && ((accountName.equals(keyParts.get("account")) + && + // IAM role keys are not region specific, so it will be true. The region will be + // checked of other keys. + (authoritativeKeyName.equals(IAM_ROLE.ns) || keyParts.get("region").equals(region))) + // Application keys are not account or region specific so this will be true. The region + // and + // account will be checked for other keys. + || (authoritativeKeyName.equals(ECS_APPLICATIONS.ns))); } /** * This method is to be overridden in order to add extra evictions. + * * @param evictions The existing eviction map. * @return Eviction map with addtional keys. */ - protected Map> addExtraEvictions(Map> evictions) { + protected Map> addExtraEvictions( + Map> evictions) { return evictions; } + /** + * Returns the account name with which this agent is associated. + * + * @return The name of the account this agent handles. + */ + @Override + public String getAccountName() { + return accountName; + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsOnDemandAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsOnDemandAgent.java index 8e5e8d367bd..c3d793f3987 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsOnDemandAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/AbstractEcsOnDemandAgent.java @@ -25,20 +25,32 @@ import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; -import groovy.lang.Closure; - import java.util.Collection; import java.util.LinkedList; import java.util.List; import java.util.Map; -abstract class AbstractEcsOnDemandAgent extends AbstractEcsCachingAgent implements OnDemandAgent { +abstract class AbstractEcsOnDemandAgent extends AbstractEcsCachingAgent + implements OnDemandAgent { final OnDemandMetricsSupport metricsSupport; - AbstractEcsOnDemandAgent(NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider, AWSCredentialsProvider awsCredentialsProvider, Registry registry) { + AbstractEcsOnDemandAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + Registry registry) { super(account, region, amazonClientProvider, awsCredentialsProvider); - this.metricsSupport = new OnDemandMetricsSupport(registry, this, EcsCloudProvider.ID + ":" + EcsCloudProvider.ID + ":${OnDemandAgent.OnDemandType.ServerGroup}"); + this.metricsSupport = + new OnDemandMetricsSupport( + registry, + this, + EcsCloudProvider.ID + + ":" + + EcsCloudProvider.ID + + ":${OnDemandAgent.OnDemandType.ServerGroup}"); } @Override @@ -47,7 +59,7 @@ public OnDemandMetricsSupport getMetricsSupport() { } @Override - public Collection pendingOnDemandRequests(ProviderCache providerCache) { + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { return new LinkedList<>(); } @@ -73,9 +85,15 @@ public OnDemandResult handle(ProviderCache providerCache, Map data) { storeOnDemand(providerCache, data); - CacheResult cacheResult = metricsSupport.transformData(() -> buildCacheResult(getAuthoritativeKeyName(), items, providerCache)); + CacheResult cacheResult = + metricsSupport.transformData( + () -> buildCacheResult(getAuthoritativeKeyName(), items, providerCache)); - return new OnDemandResult(getAgentType(), cacheResult, null); // TODO(Bruno Carrier) - evictions should happen properly instead of having a null here + return new OnDemandResult( + getAgentType(), + cacheResult, + null); // TODO(Bruno Carrier) - evictions should happen properly instead of having a null + // here } void storeOnDemand(ProviderCache providerCache, Map data) { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ApplicationCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ApplicationCachingAgent.java new file mode 100644 index 00000000000..af3f58db9fe --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ApplicationCachingAgent.java @@ -0,0 +1,89 @@ +/* + * Copyright 2021 Amazon.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_APPLICATIONS; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.ecs.AmazonECS; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Application; +import java.util.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ApplicationCachingAgent extends AbstractEcsOnDemandAgent { + private static final Collection types = + Collections.unmodifiableCollection( + Arrays.asList(AUTHORITATIVE.forType(ECS_APPLICATIONS.toString()))); + private final Logger log = LoggerFactory.getLogger(getClass()); + + private ObjectMapper objectMapper; + + public ApplicationCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + Registry registry, + ObjectMapper objectMapper) { + super(account, region, amazonClientProvider, awsCredentialsProvider, registry); + this.objectMapper = objectMapper; + } + + public static Map convertApplicationToAttributes(Application application) { + Map attributes = new HashMap<>(); + attributes.put("name", application.getName()); + return attributes; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public String getAgentType() { + return accountName + "/" + region + "/" + getClass().getSimpleName(); + } + + @Override + protected List getItems(AmazonECS ecs, ProviderCache providerCache) { + List applications = new ArrayList<>(); + return applications; + } + + @Override + protected Map> generateFreshData( + Collection applications) { + Collection applicationData = new LinkedList<>(); + + Map> cacheDataMap = new HashMap<>(); + log.info("Amazon ECS ApplicationCachingAgent will cache applications in a future update"); + cacheDataMap.put(ECS_APPLICATIONS.toString(), applicationData); + + return cacheDataMap; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCachingAgent.java index c603ae920c1..ce7e071fcd9 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCachingAgent.java @@ -16,6 +16,9 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; + import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.Attribute; @@ -31,9 +34,6 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -42,17 +42,21 @@ import java.util.List; import java.util.Map; import java.util.Set; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ContainerInstanceCachingAgent extends AbstractEcsOnDemandAgent { - private static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(CONTAINER_INSTANCES.toString()) - )); + private static final Collection types = + Collections.unmodifiableCollection( + Arrays.asList(AUTHORITATIVE.forType(CONTAINER_INSTANCES.toString()))); private final Logger log = LoggerFactory.getLogger(getClass()); - public ContainerInstanceCachingAgent(NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider, AWSCredentialsProvider awsCredentialsProvider, Registry registry) { + public ContainerInstanceCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + Registry registry) { super(account, region, amazonClientProvider, awsCredentialsProvider, registry); } @@ -74,19 +78,26 @@ protected List getItems(AmazonECS ecs, ProviderCache provider for (String cluster : clusters) { String nextToken = null; do { - ListContainerInstancesRequest listContainerInstancesRequest = new ListContainerInstancesRequest().withCluster(cluster); + ListContainerInstancesRequest listContainerInstancesRequest = + new ListContainerInstancesRequest().withCluster(cluster); if (nextToken != null) { listContainerInstancesRequest.setNextToken(nextToken); } - ListContainerInstancesResult listContainerInstancesResult = ecs.listContainerInstances(listContainerInstancesRequest); - List containerInstanceArns = listContainerInstancesResult.getContainerInstanceArns(); + ListContainerInstancesResult listContainerInstancesResult = + ecs.listContainerInstances(listContainerInstancesRequest); + List containerInstanceArns = + listContainerInstancesResult.getContainerInstanceArns(); if (containerInstanceArns.size() == 0) { continue; } - List containerInstances = ecs.describeContainerInstances(new DescribeContainerInstancesRequest() - .withCluster(cluster).withContainerInstances(containerInstanceArns)).getContainerInstances(); + List containerInstances = + ecs.describeContainerInstances( + new DescribeContainerInstancesRequest() + .withCluster(cluster) + .withContainerInstances(containerInstanceArns)) + .getContainerInstances(); containerInstanceList.addAll(containerInstances); nextToken = listContainerInstancesResult.getNextToken(); @@ -96,13 +107,16 @@ protected List getItems(AmazonECS ecs, ProviderCache provider } @Override - protected Map> generateFreshData(Collection containerInstances) { + protected Map> generateFreshData( + Collection containerInstances) { Collection dataPoints = new LinkedList<>(); for (ContainerInstance containerInstance : containerInstances) { Map attributes = convertContainerInstanceToAttributes(containerInstance); - String key = Keys.getContainerInstanceKey(accountName, region, containerInstance.getContainerInstanceArn()); + String key = + Keys.getContainerInstanceKey( + accountName, region, containerInstance.getContainerInstanceArn()); dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); } @@ -113,7 +127,8 @@ protected Map> generateFreshData(Collection convertContainerInstanceToAttributes(ContainerInstance containerInstance) { + public static Map convertContainerInstanceToAttributes( + ContainerInstance containerInstance) { Map attributes = new HashMap<>(); attributes.put("containerInstanceArn", containerInstance.getContainerInstanceArn()); attributes.put("ec2InstanceId", containerInstance.getEc2InstanceId()); diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsCloudMetricAlarmCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsCloudMetricAlarmCachingAgent.java index c826798ca5f..4527fd2afbc 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsCloudMetricAlarmCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsCloudMetricAlarmCachingAgent.java @@ -16,11 +16,14 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; -import com.amazonaws.auth.AWSCredentialsProvider; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ALARMS; + import com.amazonaws.services.cloudwatch.AmazonCloudWatch; import com.amazonaws.services.cloudwatch.model.DescribeAlarmsRequest; import com.amazonaws.services.cloudwatch.model.DescribeAlarmsResult; import com.amazonaws.services.cloudwatch.model.MetricAlarm; +import com.netflix.spinnaker.cats.agent.AccountAware; import com.netflix.spinnaker.cats.agent.AgentDataType; import com.netflix.spinnaker.cats.agent.CacheResult; import com.netflix.spinnaker.cats.agent.CachingAgent; @@ -32,9 +35,6 @@ import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -43,33 +43,29 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ALARMS; - -public class EcsCloudMetricAlarmCachingAgent implements CachingAgent { - static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(ALARMS.toString()) - )); +public class EcsCloudMetricAlarmCachingAgent implements CachingAgent, AccountAware { + static final Collection types = + Collections.unmodifiableCollection(Arrays.asList(AUTHORITATIVE.forType(ALARMS.toString()))); private final Logger log = LoggerFactory.getLogger(getClass()); private AmazonClientProvider amazonClientProvider; - private AWSCredentialsProvider awsCredentialsProvider; private NetflixAmazonCredentials account; private String accountName; private String region; - public EcsCloudMetricAlarmCachingAgent(NetflixAmazonCredentials account, String region, - AmazonClientProvider amazonClientProvider, - AWSCredentialsProvider awsCredentialsProvider) { + public EcsCloudMetricAlarmCachingAgent( + NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider) { this.region = region; this.account = account; this.accountName = account.getName(); this.amazonClientProvider = amazonClientProvider; - this.awsCredentialsProvider = awsCredentialsProvider; } - public static Map convertMetricAlarmToAttributes(MetricAlarm metricAlarm, String accountName, String region) { + public static Map convertMetricAlarmToAttributes( + MetricAlarm metricAlarm, String accountName, String region) { Map attributes = new HashMap<>(); attributes.put("alarmArn", metricAlarm.getAlarmArn()); attributes.put("alarmName", metricAlarm.getAlarmName()); @@ -94,19 +90,22 @@ public CacheResult loadData(ProviderCache providerCache) { Map> newDataMap = generateFreshData(cacheableMetricAlarm); Collection newData = newDataMap.get(ALARMS.toString()); - Set oldKeys = providerCache.getAll(ALARMS.toString()).stream() - .map(CacheData::getId) - .filter(this::keyAccountRegionFilter) - .collect(Collectors.toSet()); + Set oldKeys = + providerCache.getAll(ALARMS.toString()).stream() + .map(CacheData::getId) + .filter(this::keyAccountRegionFilter) + .collect(Collectors.toSet()); Map> evictionsByKey = computeEvictableData(newData, oldKeys); return new DefaultCacheResult(newDataMap, evictionsByKey); } - private Map> computeEvictableData(Collection newData, Collection oldKeys) { + private Map> computeEvictableData( + Collection newData, Collection oldKeys) { Set newKeys = newData.stream().map(CacheData::getId).collect(Collectors.toSet()); - Set evictedKeys = oldKeys.stream().filter(oldKey -> !newKeys.contains(oldKey)).collect(Collectors.toSet()); + Set evictedKeys = + oldKeys.stream().filter(oldKey -> !newKeys.contains(oldKey)).collect(Collectors.toSet()); Map> evictionsByKey = new HashMap<>(); evictionsByKey.put(ALARMS.toString(), evictedKeys); @@ -119,8 +118,15 @@ Map> generateFreshData(Set cacheableM Map> newDataMap = new HashMap<>(); for (MetricAlarm metricAlarm : cacheableMetricAlarm) { - String key = Keys.getAlarmKey(accountName, region, metricAlarm.getAlarmArn()); - Map attributes = convertMetricAlarmToAttributes(metricAlarm, accountName, region); + String cluster = + metricAlarm.getDimensions().stream() + .filter(t -> t.getName().equalsIgnoreCase("ClusterName")) + .map(t -> t.getValue()) + .collect(Collectors.joining()); + + String key = Keys.getAlarmKey(accountName, region, metricAlarm.getAlarmArn(), cluster); + Map attributes = + convertMetricAlarmToAttributes(metricAlarm, accountName, region); CacheData data = new DefaultCacheData(key, attributes, Collections.emptyMap()); dataPoints.add(data); @@ -151,9 +157,9 @@ Set fetchMetricAlarms(AmazonCloudWatch cloudWatch) { private boolean keyAccountRegionFilter(String key) { Map keyParts = Keys.parse(key); - return keyParts != null && - keyParts.get("account").equals(accountName) && - keyParts.get("region").equals(region); + return keyParts != null + && keyParts.get("account").equals(accountName) + && keyParts.get("region").equals(region); } @Override @@ -165,4 +171,9 @@ public String getAgentType() { public String getProviderName() { return EcsProvider.NAME; } + + @Override + public String getAccountName() { + return accountName; + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCachingAgent.java index d90b5e67395..71518e86b34 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCachingAgent.java @@ -16,6 +16,9 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; + import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.ListClustersRequest; @@ -27,10 +30,6 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -38,17 +37,21 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class EcsClusterCachingAgent extends AbstractEcsCachingAgent { - private static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(ECS_CLUSTERS.toString()) - )); + private static final Collection types = + Collections.unmodifiableCollection( + Arrays.asList(AUTHORITATIVE.forType(ECS_CLUSTERS.toString()))); private final Logger log = LoggerFactory.getLogger(getClass()); - public EcsClusterCachingAgent(NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider, AWSCredentialsProvider awsCredentialsProvider) { + public EcsClusterCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider) { super(account, region, amazonClientProvider, awsCredentialsProvider); } @@ -87,7 +90,8 @@ protected Map> generateFreshData(Collection attributes = convertClusterArnToAttributes(accountName, region, clusterArn); + Map attributes = + convertClusterArnToAttributes(accountName, region, clusterArn); String key = Keys.getClusterKey(accountName, region, clusterName); dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); @@ -99,7 +103,9 @@ protected Map> generateFreshData(Collection convertClusterArnToAttributes(String accountName, String region, String clusterArn){ + + public static Map convertClusterArnToAttributes( + String accountName, String region, String clusterArn) { String clusterName = StringUtils.substringAfterLast(clusterArn, "/"); Map attributes = new HashMap<>(); diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamPolicyReader.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamPolicyReader.java index 425ee6bd9f3..04a8c5402c0 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamPolicyReader.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamPolicyReader.java @@ -16,17 +16,15 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; - import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Sets; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.net.URLDecoder; import java.util.List; import java.util.Map; import java.util.Set; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class IamPolicyReader { private static final Logger logger = LoggerFactory.getLogger(IamPolicyReader.class); @@ -45,26 +43,33 @@ public Set getTrustedEntities(String urlEncodedPolicyDocum Map policyDocument; try { policyDocument = mapper.readValue(decodedPolicyDocument, Map.class); - List> statementItems = (List>) policyDocument.get("Statement"); + List> statementItems = + (List>) policyDocument.get("Statement"); for (Map statementItem : statementItems) { if ("sts:AssumeRole".equals(statementItem.get("Action"))) { Map principal = (Map) statementItem.get("Principal"); for (Map.Entry principalEntry : principal.entrySet()) { if (principalEntry.getValue() instanceof List) { - ((List) principalEntry.getValue()).stream() - .forEach(o -> trustedEntities.add(new IamTrustRelationship(principalEntry.getKey(), o.toString()))); + ((List) principalEntry.getValue()) + .stream() + .forEach( + o -> + trustedEntities.add( + new IamTrustRelationship(principalEntry.getKey(), o.toString()))); } else { - trustedEntities.add(new IamTrustRelationship(principalEntry.getKey(), principalEntry.getValue().toString())); + trustedEntities.add( + new IamTrustRelationship( + principalEntry.getKey(), principalEntry.getValue().toString())); } } } } } catch (IOException e) { - logger.error("Unable to extract trusted entities (policyDocument: {})", urlEncodedPolicyDocument, e); + logger.error( + "Unable to extract trusted entities (policyDocument: {})", urlEncodedPolicyDocument, e); } return trustedEntities; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCachingAgent.java index 12d1ba53d77..0c7abd88df3 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCachingAgent.java @@ -16,12 +16,15 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; -import com.amazonaws.auth.AWSCredentialsProvider; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; + import com.amazonaws.regions.Regions; import com.amazonaws.services.identitymanagement.AmazonIdentityManagement; import com.amazonaws.services.identitymanagement.model.ListRolesRequest; import com.amazonaws.services.identitymanagement.model.ListRolesResult; import com.amazonaws.services.identitymanagement.model.Role; +import com.netflix.spinnaker.cats.agent.AccountAware; import com.netflix.spinnaker.cats.agent.AgentDataType; import com.netflix.spinnaker.cats.agent.CacheResult; import com.netflix.spinnaker.cats.agent.CachingAgent; @@ -34,9 +37,6 @@ import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.model.IamRole; import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -46,31 +46,27 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; - -public class IamRoleCachingAgent implements CachingAgent { +public class IamRoleCachingAgent implements CachingAgent, AccountAware { - static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(IAM_ROLE.toString()) - )); + static final Collection types = + Collections.unmodifiableCollection(Arrays.asList(AUTHORITATIVE.forType(IAM_ROLE.toString()))); private final Logger log = LoggerFactory.getLogger(getClass()); private AmazonClientProvider amazonClientProvider; - private AWSCredentialsProvider awsCredentialsProvider; private NetflixAmazonCredentials account; private String accountName; private IamPolicyReader iamPolicyReader; - - public IamRoleCachingAgent(NetflixAmazonCredentials account, - AmazonClientProvider amazonClientProvider, - AWSCredentialsProvider awsCredentialsProvider, - IamPolicyReader iamPolicyReader) { + public IamRoleCachingAgent( + NetflixAmazonCredentials account, + AmazonClientProvider amazonClientProvider, + IamPolicyReader iamPolicyReader) { this.account = account; this.accountName = account.getName(); this.amazonClientProvider = amazonClientProvider; - this.awsCredentialsProvider = awsCredentialsProvider; this.iamPolicyReader = iamPolicyReader; } @@ -85,16 +81,17 @@ public static Map convertIamRoleToAttributes(IamRole iamRole) { @Override public CacheResult loadData(ProviderCache providerCache) { - AmazonIdentityManagement iam = amazonClientProvider.getIam(account, Regions.DEFAULT_REGION.getName(), false); + AmazonIdentityManagement iam = amazonClientProvider.getIam(account, getIamRegion(), false); Set cacheableRoles = fetchIamRoles(iam, accountName); Map> newDataMap = generateFreshData(cacheableRoles); Collection newData = newDataMap.get(IAM_ROLE.toString()); - Set oldKeys = providerCache.getAll(IAM_ROLE.toString()).stream() - .map(CacheData::getId) - .filter(this::keyAccountFilter) - .collect(Collectors.toSet()); + Set oldKeys = + providerCache.getAll(IAM_ROLE.toString()).stream() + .map(CacheData::getId) + .filter(this::keyAccountFilter) + .collect(Collectors.toSet()); Map> evictionsByKey = computeEvictableData(newData, oldKeys); logUpcomingActions(newDataMap, evictionsByKey); @@ -102,27 +99,26 @@ public CacheResult loadData(ProviderCache providerCache) { return new DefaultCacheResult(newDataMap, evictionsByKey); } - private void logUpcomingActions(Map> newDataMap, Map> evictionsByKey) { - log.info(String.format("Caching %s IAM roles in %s for account %s", - newDataMap.get(IAM_ROLE.toString()).size(), - getAgentType(), - accountName) - ); + private void logUpcomingActions( + Map> newDataMap, + Map> evictionsByKey) { + log.info( + String.format( + "Caching %s IAM roles in %s for account %s", + newDataMap.get(IAM_ROLE.toString()).size(), getAgentType(), accountName)); if (evictionsByKey.get(IAM_ROLE.toString()).size() > 0) { - log.info(String.format("Evicting %s IAM roles in %s for account %s", - evictionsByKey.get(IAM_ROLE.toString()).size(), - getAgentType(), - accountName) - ); + log.info( + String.format( + "Evicting %s IAM roles in %s for account %s", + evictionsByKey.get(IAM_ROLE.toString()).size(), getAgentType(), accountName)); } } - private Map> computeEvictableData(Collection newData, Collection oldKeys) { + private Map> computeEvictableData( + Collection newData, Collection oldKeys) { - Set newKeys = newData.stream() - .map(CacheData::getId) - .collect(Collectors.toSet()); + Set newKeys = newData.stream().map(CacheData::getId).collect(Collectors.toSet()); Set evictedKeys = new HashSet<>(); for (String oldKey : oldKeys) { @@ -135,6 +131,23 @@ private Map> computeEvictableData(Collection> generateFreshData(Set cacheableRoles) { Collection dataPoints = new HashSet<>(); Map> newDataMap = new HashMap<>(); @@ -164,11 +177,11 @@ Set fetchIamRoles(AmazonIdentityManagement iam, String accountName) { List roles = listRolesResult.getRoles(); for (Role role : roles) { cacheableRoles.add( - new IamRole(role.getArn(), - role.getRoleName(), - accountName, - iamPolicyReader.getTrustedEntities(role.getAssumeRolePolicyDocument())) - ); + new IamRole( + role.getArn(), + role.getRoleName(), + accountName, + iamPolicyReader.getTrustedEntities(role.getAssumeRolePolicyDocument()))); } if (listRolesResult.isTruncated()) { @@ -184,8 +197,7 @@ Set fetchIamRoles(AmazonIdentityManagement iam, String accountName) { private boolean keyAccountFilter(String key) { Map keyParts = Keys.parse(key); - return keyParts != null && - keyParts.get("account").equals(accountName); + return keyParts != null && keyParts.get("account").equals(accountName); } @Override @@ -202,4 +214,9 @@ public String getProviderName() { public Collection getProvidedDataTypes() { return types; } + + @Override + public String getAccountName() { + return accountName; + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ScalableTargetsCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ScalableTargetsCachingAgent.java index c9ca167c9fe..dd1641a27c8 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ScalableTargetsCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ScalableTargetsCachingAgent.java @@ -16,13 +16,16 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; -import com.amazonaws.auth.AWSCredentialsProvider; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SCALABLE_TARGETS; + import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsRequest; import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsResult; import com.amazonaws.services.applicationautoscaling.model.ScalableTarget; import com.amazonaws.services.applicationautoscaling.model.ServiceNamespace; import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.AccountAware; import com.netflix.spinnaker.cats.agent.AgentDataType; import com.netflix.spinnaker.cats.agent.CacheResult; import com.netflix.spinnaker.cats.agent.CachingAgent; @@ -34,9 +37,6 @@ import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -45,36 +45,35 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SCALABLE_TARGETS; - -public class ScalableTargetsCachingAgent implements CachingAgent { - static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(SCALABLE_TARGETS.toString()) - )); +public class ScalableTargetsCachingAgent implements CachingAgent, AccountAware { + static final Collection types = + Collections.unmodifiableCollection( + Arrays.asList(AUTHORITATIVE.forType(SCALABLE_TARGETS.toString()))); private final Logger log = LoggerFactory.getLogger(getClass()); private final ObjectMapper objectMapper; private AmazonClientProvider amazonClientProvider; - private AWSCredentialsProvider awsCredentialsProvider; private NetflixAmazonCredentials account; private String accountName; private String region; - public ScalableTargetsCachingAgent(NetflixAmazonCredentials account, String region, - AmazonClientProvider amazonClientProvider, - AWSCredentialsProvider awsCredentialsProvider, - ObjectMapper objectMapper) { + public ScalableTargetsCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + ObjectMapper objectMapper) { this.region = region; this.account = account; this.accountName = account.getName(); this.amazonClientProvider = amazonClientProvider; - this.awsCredentialsProvider = awsCredentialsProvider; this.objectMapper = objectMapper; } - public static Map convertMetricAlarmToAttributes(ScalableTarget scalableTarget, ObjectMapper objectMapper) { + public static Map convertMetricAlarmToAttributes( + ScalableTarget scalableTarget, ObjectMapper objectMapper) { return objectMapper.convertValue(scalableTarget, Map.class); } @@ -85,25 +84,29 @@ public Collection getProvidedDataTypes() { @Override public CacheResult loadData(ProviderCache providerCache) { - AWSApplicationAutoScaling autoScalingClient = amazonClientProvider.getAmazonApplicationAutoScaling(account, region, false); + AWSApplicationAutoScaling autoScalingClient = + amazonClientProvider.getAmazonApplicationAutoScaling(account, region, false); Set scalableTargets = fetchScalableTargets(autoScalingClient); Map> newDataMap = generateFreshData(scalableTargets); Collection newData = newDataMap.get(SCALABLE_TARGETS.toString()); - Set oldKeys = providerCache.getAll(SCALABLE_TARGETS.toString()).stream() - .map(CacheData::getId) - .filter(this::keyAccountRegionFilter) - .collect(Collectors.toSet()); + Set oldKeys = + providerCache.getAll(SCALABLE_TARGETS.toString()).stream() + .map(CacheData::getId) + .filter(this::keyAccountRegionFilter) + .collect(Collectors.toSet()); Map> evictionsByKey = computeEvictableData(newData, oldKeys); return new DefaultCacheResult(newDataMap, evictionsByKey); } - private Map> computeEvictableData(Collection newData, Collection oldKeys) { + private Map> computeEvictableData( + Collection newData, Collection oldKeys) { Set newKeys = newData.stream().map(CacheData::getId).collect(Collectors.toSet()); - Set evictedKeys = oldKeys.stream().filter(oldKey -> !newKeys.contains(oldKey)).collect(Collectors.toSet()); + Set evictedKeys = + oldKeys.stream().filter(oldKey -> !newKeys.contains(oldKey)).collect(Collectors.toSet()); Map> evictionsByKey = new HashMap<>(); evictionsByKey.put(SCALABLE_TARGETS.toString(), evictedKeys); @@ -132,7 +135,8 @@ Set fetchScalableTargets(AWSApplicationAutoScaling autoScalingCl Set scalableTargets = new HashSet<>(); String nextToken = null; do { - DescribeScalableTargetsRequest request = new DescribeScalableTargetsRequest().withServiceNamespace(ServiceNamespace.Ecs); + DescribeScalableTargetsRequest request = + new DescribeScalableTargetsRequest().withServiceNamespace(ServiceNamespace.Ecs); if (nextToken != null) { request.setNextToken(nextToken); } @@ -148,9 +152,9 @@ Set fetchScalableTargets(AWSApplicationAutoScaling autoScalingCl private boolean keyAccountRegionFilter(String key) { Map keyParts = Keys.parse(key); - return keyParts != null && - keyParts.get("account").equals(accountName) && - keyParts.get("region").equals(region); + return keyParts != null + && keyParts.get("account").equals(accountName) + && keyParts.get("region").equals(region); } @Override @@ -162,4 +166,9 @@ public String getAgentType() { public String getProviderName() { return EcsProvider.NAME; } + + @Override + public String getAccountName() { + return accountName; + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/SecretCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/SecretCachingAgent.java new file mode 100644 index 00000000000..0b8157f6289 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/SecretCachingAgent.java @@ -0,0 +1,163 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SECRETS; + +import com.amazonaws.services.secretsmanager.AWSSecretsManager; +import com.amazonaws.services.secretsmanager.model.ListSecretsRequest; +import com.amazonaws.services.secretsmanager.model.ListSecretsResult; +import com.amazonaws.services.secretsmanager.model.SecretListEntry; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; +import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; +import java.util.*; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SecretCachingAgent implements CachingAgent, AccountAware { + static final Collection types = + Collections.unmodifiableCollection(Arrays.asList(AUTHORITATIVE.forType(SECRETS.toString()))); + + private final Logger log = LoggerFactory.getLogger(getClass()); + private AmazonClientProvider amazonClientProvider; + private NetflixAmazonCredentials account; + private String accountName; + private String region; + + public SecretCachingAgent( + NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider) { + this.region = region; + this.account = account; + this.accountName = account.getName(); + this.amazonClientProvider = amazonClientProvider; + } + + public static Map convertSecretToAttributes( + String accountName, String region, SecretListEntry secret) { + Map attributes = new HashMap<>(); + attributes.put("account", accountName); + attributes.put("region", region); + attributes.put("secretName", secret.getName()); + attributes.put("secretArn", secret.getARN()); + return attributes; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + AWSSecretsManager secretsManagerClient = + amazonClientProvider.getAmazonSecretsManager(account, region, false); + + Set secrets = fetchSecrets(secretsManagerClient); + Map> newDataMap = generateFreshData(secrets); + Collection newData = newDataMap.get(SECRETS.toString()); + + Set oldKeys = + providerCache.getAll(SECRETS.toString()).stream() + .map(CacheData::getId) + .filter(this::keyAccountRegionFilter) + .collect(Collectors.toSet()); + + Map> evictionsByKey = computeEvictableData(newData, oldKeys); + + return new DefaultCacheResult(newDataMap, evictionsByKey); + } + + private Map> computeEvictableData( + Collection newData, Collection oldKeys) { + Set newKeys = newData.stream().map(CacheData::getId).collect(Collectors.toSet()); + Set evictedKeys = + oldKeys.stream().filter(oldKey -> !newKeys.contains(oldKey)).collect(Collectors.toSet()); + + Map> evictionsByKey = new HashMap<>(); + evictionsByKey.put(SECRETS.toString(), evictedKeys); + log.info("Evicting " + evictedKeys.size() + " secrets in " + getAgentType()); + return evictionsByKey; + } + + Map> generateFreshData(Set secrets) { + Collection dataPoints = new HashSet<>(); + Map> newDataMap = new HashMap<>(); + + for (SecretListEntry secret : secrets) { + String key = Keys.getSecretKey(accountName, region, secret.getName()); + Map attributes = convertSecretToAttributes(accountName, region, secret); + + CacheData data = new DefaultCacheData(key, attributes, Collections.emptyMap()); + dataPoints.add(data); + } + + log.info("Caching " + dataPoints.size() + " secrets in " + getAgentType()); + newDataMap.put(SECRETS.toString(), dataPoints); + return newDataMap; + } + + Set fetchSecrets(AWSSecretsManager secretsManagerClient) { + Set secrets = new HashSet<>(); + String nextToken = null; + do { + ListSecretsRequest request = new ListSecretsRequest(); + if (nextToken != null) { + request.setNextToken(nextToken); + } + + ListSecretsResult result = secretsManagerClient.listSecrets(request); + secrets.addAll(result.getSecretList()); + + nextToken = result.getNextToken(); + } while (nextToken != null && nextToken.length() != 0); + + return secrets; + } + + private boolean keyAccountRegionFilter(String key) { + Map keyParts = Keys.parse(key); + return keyParts != null + && keyParts.get("account").equals(accountName) + && keyParts.get("region").equals(region); + } + + @Override + public String getAgentType() { + return accountName + "/" + region + "/" + getClass().getSimpleName(); + } + + @Override + public String getProviderName() { + return EcsProvider.NAME; + } + + @Override + public String getAccountName() { + return accountName; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCachingAgent.java index 2328cf7e1b5..2c05a35ec16 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCachingAgent.java @@ -16,12 +16,15 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; + import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ecs.AmazonECS; -import com.amazonaws.services.ecs.model.DescribeServicesRequest; -import com.amazonaws.services.ecs.model.ListServicesRequest; -import com.amazonaws.services.ecs.model.ListServicesResult; -import com.amazonaws.services.ecs.model.Service; +import com.amazonaws.services.ecs.model.*; +import com.google.common.annotations.VisibleForTesting; import com.netflix.spectator.api.Registry; import com.netflix.spinnaker.cats.agent.AgentDataType; import com.netflix.spinnaker.cats.cache.CacheData; @@ -29,11 +32,13 @@ import com.netflix.spinnaker.cats.provider.ProviderCache; import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.netflix.spinnaker.clouddriver.ecs.names.EcsResource; +import com.netflix.spinnaker.clouddriver.ecs.names.EcsResourceService; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -42,26 +47,56 @@ import java.util.List; import java.util.Map; import java.util.Set; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ServiceCachingAgent extends AbstractEcsOnDemandAgent { - private static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(SERVICES.toString()), - INFORMATIVE.forType(ECS_CLUSTERS.toString()) - )); + private static final Collection types = + Collections.unmodifiableCollection( + Arrays.asList( + AUTHORITATIVE.forType(SERVICES.toString()), + INFORMATIVE.forType(ECS_CLUSTERS.toString()))); private final Logger log = LoggerFactory.getLogger(getClass()); - public ServiceCachingAgent(NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider, AWSCredentialsProvider awsCredentialsProvider, Registry registry) { + private final Namer naming; + + public ServiceCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + Registry registry) { + this( + account, + region, + amazonClientProvider, + awsCredentialsProvider, + registry, + NamerRegistry.lookup() + .withProvider(EcsCloudProvider.ID) + .withAccount(account.getName()) + .withResource(EcsResource.class)); + } + + @VisibleForTesting + public ServiceCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + Registry registry, + Namer naming) { super(account, region, amazonClientProvider, awsCredentialsProvider, registry); + this.naming = naming; } - public static Map convertServiceToAttributes(String accountName, String region, Service service) { + public Map convertServiceToAttributes(Service service) { Map attributes = new HashMap<>(); - String applicationName = service.getServiceName().contains("-") ? StringUtils.substringBefore(service.getServiceName(), "-") : service.getServiceName(); + + Moniker moniker = naming.deriveMoniker(new EcsResourceService(service)); + + String applicationName = moniker.getApp(); String clusterName = StringUtils.substringAfterLast(service.getClusterArn(), "/"); attributes.put("account", accountName); @@ -75,9 +110,21 @@ public static Map convertServiceToAttributes(String accountName, attributes.put("taskDefinition", service.getTaskDefinition()); attributes.put("desiredCount", service.getDesiredCount()); attributes.put("maximumPercent", service.getDeploymentConfiguration().getMaximumPercent()); - attributes.put("minimumHealthyPercent", service.getDeploymentConfiguration().getMinimumHealthyPercent()); + attributes.put( + "minimumHealthyPercent", service.getDeploymentConfiguration().getMinimumHealthyPercent()); attributes.put("loadBalancers", service.getLoadBalancers()); + + if (service.getNetworkConfiguration() != null + && service.getNetworkConfiguration().getAwsvpcConfiguration() != null) { + attributes.put( + "subnets", service.getNetworkConfiguration().getAwsvpcConfiguration().getSubnets()); + attributes.put( + "securityGroups", + service.getNetworkConfiguration().getAwsvpcConfiguration().getSecurityGroups()); + } + attributes.put("createdAt", service.getCreatedAt().getTime()); + attributes.put("moniker", moniker); return attributes; } @@ -110,7 +157,13 @@ protected List getItems(AmazonECS ecs, ProviderCache providerCache) { continue; } - List services = ecs.describeServices(new DescribeServicesRequest().withCluster(cluster).withServices(serviceArns)).getServices(); + List services = + ecs.describeServices( + new DescribeServicesRequest() + .withCluster(cluster) + .withServices(serviceArns) + .withInclude("TAGS")) + .getServices(); serviceList.addAll(services); nextToken = listServicesResult.getNextToken(); @@ -125,15 +178,18 @@ protected Map> generateFreshData(Collection clusterDataPoints = new HashMap<>(); for (Service service : services) { - Map attributes = convertServiceToAttributes(accountName, region, service); + Map attributes = convertServiceToAttributes(service); String key = Keys.getServiceKey(accountName, region, service.getServiceName()); dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); - Map clusterAttributes = EcsClusterCachingAgent.convertClusterArnToAttributes(accountName, region, service.getClusterArn()); + Map clusterAttributes = + EcsClusterCachingAgent.convertClusterArnToAttributes( + accountName, region, service.getClusterArn()); String clusterName = StringUtils.substringAfterLast(service.getClusterArn(), "/"); key = Keys.getClusterKey(accountName, region, clusterName); - clusterDataPoints.put(key, new DefaultCacheData(key, clusterAttributes, Collections.emptyMap())); + clusterDataPoints.put( + key, new DefaultCacheData(key, clusterAttributes, Collections.emptyMap())); } log.info("Caching " + dataPoints.size() + " services in " + getAgentType()); diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceDiscoveryCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceDiscoveryCachingAgent.java new file mode 100644 index 00000000000..43db1b8e82b --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceDiscoveryCachingAgent.java @@ -0,0 +1,165 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICE_DISCOVERY_REGISTRIES; + +import com.amazonaws.services.servicediscovery.AWSServiceDiscovery; +import com.amazonaws.services.servicediscovery.model.ListServicesRequest; +import com.amazonaws.services.servicediscovery.model.ListServicesResult; +import com.amazonaws.services.servicediscovery.model.ServiceSummary; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; +import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; +import java.util.*; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ServiceDiscoveryCachingAgent implements CachingAgent, AccountAware { + static final Collection types = + Collections.unmodifiableCollection( + Arrays.asList(AUTHORITATIVE.forType(SERVICE_DISCOVERY_REGISTRIES.toString()))); + + private final Logger log = LoggerFactory.getLogger(getClass()); + private AmazonClientProvider amazonClientProvider; + private NetflixAmazonCredentials account; + private String accountName; + private String region; + + public ServiceDiscoveryCachingAgent( + NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider) { + this.region = region; + this.account = account; + this.accountName = account.getName(); + this.amazonClientProvider = amazonClientProvider; + } + + public static Map convertServiceToAttributes( + String accountName, String region, ServiceSummary service) { + Map attributes = new HashMap<>(); + attributes.put("account", accountName); + attributes.put("region", region); + attributes.put("serviceName", service.getName()); + attributes.put("serviceArn", service.getArn()); + attributes.put("serviceId", service.getId()); + return attributes; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + AWSServiceDiscovery serviceDiscoveryClient = + amazonClientProvider.getAmazonServiceDiscovery(account, region, false); + + Set services = fetchServices(serviceDiscoveryClient); + Map> newDataMap = generateFreshData(services); + Collection newData = newDataMap.get(SERVICE_DISCOVERY_REGISTRIES.toString()); + + Set oldKeys = + providerCache.getAll(SERVICE_DISCOVERY_REGISTRIES.toString()).stream() + .map(CacheData::getId) + .filter(this::keyAccountRegionFilter) + .collect(Collectors.toSet()); + + Map> evictionsByKey = computeEvictableData(newData, oldKeys); + + return new DefaultCacheResult(newDataMap, evictionsByKey); + } + + private Map> computeEvictableData( + Collection newData, Collection oldKeys) { + Set newKeys = newData.stream().map(CacheData::getId).collect(Collectors.toSet()); + Set evictedKeys = + oldKeys.stream().filter(oldKey -> !newKeys.contains(oldKey)).collect(Collectors.toSet()); + + Map> evictionsByKey = new HashMap<>(); + evictionsByKey.put(SERVICE_DISCOVERY_REGISTRIES.toString(), evictedKeys); + log.info("Evicting " + evictedKeys.size() + " service discovery services in " + getAgentType()); + return evictionsByKey; + } + + Map> generateFreshData(Set services) { + Collection dataPoints = new HashSet<>(); + Map> newDataMap = new HashMap<>(); + + for (ServiceSummary service : services) { + String key = Keys.getServiceDiscoveryRegistryKey(accountName, region, service.getId()); + Map attributes = convertServiceToAttributes(accountName, region, service); + + CacheData data = new DefaultCacheData(key, attributes, Collections.emptyMap()); + dataPoints.add(data); + } + + log.info("Caching " + dataPoints.size() + " service discovery services in " + getAgentType()); + newDataMap.put(SERVICE_DISCOVERY_REGISTRIES.toString(), dataPoints); + return newDataMap; + } + + Set fetchServices(AWSServiceDiscovery serviceDiscoveryClient) { + Set services = new HashSet<>(); + String nextToken = null; + do { + ListServicesRequest request = new ListServicesRequest(); + if (nextToken != null) { + request.setNextToken(nextToken); + } + + ListServicesResult result = serviceDiscoveryClient.listServices(request); + services.addAll(result.getServices()); + + nextToken = result.getNextToken(); + } while (nextToken != null && nextToken.length() != 0); + + return services; + } + + private boolean keyAccountRegionFilter(String key) { + Map keyParts = Keys.parse(key); + return keyParts != null + && keyParts.get("account").equals(accountName) + && keyParts.get("region").equals(region); + } + + @Override + public String getAgentType() { + return accountName + "/" + region + "/" + getClass().getSimpleName(); + } + + @Override + public String getProviderName() { + return EcsProvider.NAME; + } + + @Override + public String getAccountName() { + return accountName; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TargetHealthCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TargetHealthCachingAgent.java new file mode 100644 index 00000000000..3eeaf176273 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TargetHealthCachingAgent.java @@ -0,0 +1,188 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TARGET_HEALTHS; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthRequest; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthResult; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupNotFoundException; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.*; +import com.netflix.spinnaker.cats.cache.*; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.core.provider.agent.HealthProvidingCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; +import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsTargetGroupCacheClient; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsTargetHealth; +import com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer.EcsTargetGroup; +import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; +import java.util.*; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TargetHealthCachingAgent extends AbstractEcsAwsAwareCachingAgent + implements HealthProvidingCachingAgent { + + private static final Collection types = + Collections.unmodifiableCollection(Arrays.asList(AUTHORITATIVE.forType(TARGET_HEALTHS.ns))); + private static final String HEALTH_ID = "ecs-alb-target-health"; + + private final Logger log = LoggerFactory.getLogger(getClass()); + private final ObjectMapper objectMapper; + + public TargetHealthCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + ObjectMapper objectMapper) { + super(account, region, amazonClientProvider, awsCredentialsProvider); + this.objectMapper = objectMapper; + } + + @Override + protected List getItems(AmazonECS ecs, ProviderCache providerCache) { + if (awsProviderCache == null) { + throw new NullPointerException("awsProviderCache not initialized on " + getAgentType() + "."); + } + + EcsTargetGroupCacheClient targetGroupCacheClient = + new EcsTargetGroupCacheClient(awsProviderCache, objectMapper); + Set targetGroups = fetchTargetGroups(targetGroupCacheClient); + log.debug("Fetched {} target groups for which to get target healths", targetGroups.size()); + + AmazonElasticLoadBalancing amazonLoadBalancing = + amazonClientProvider.getAmazonElasticLoadBalancingV2(account, region, false); + + List targetHealthList = new LinkedList<>(); + + if (targetGroups != null) { + for (String tgArn : targetGroups) { + + DescribeTargetHealthResult describeTargetHealthResult = new DescribeTargetHealthResult(); + try { + describeTargetHealthResult = + amazonLoadBalancing.describeTargetHealth( + new DescribeTargetHealthRequest().withTargetGroupArn(tgArn)); + } catch (TargetGroupNotFoundException ignore) { + } + + List healthDescriptions = + describeTargetHealthResult.getTargetHealthDescriptions(); + + if (healthDescriptions != null && healthDescriptions.size() > 0) { + targetHealthList.add(makeEcsTargetHealth(tgArn, healthDescriptions)); + log.debug( + "Cached {} EcsTargetHealths for targetGroup {}", healthDescriptions.size(), tgArn); + } else { + log.debug("No TargetHealthDescriptions found for target group {}, skipping", tgArn); + } + } + } + + return targetHealthList; + } + + protected Set fetchTargetGroups(EcsTargetGroupCacheClient cacheClient) { + String searchKey = + com.netflix.spinnaker.clouddriver.aws.data.Keys.getTargetGroupKey( + "*", "*", region, "*", "*") + + "*"; + + Collection targetGroupKeys = + awsProviderCache.filterIdentifiers(TARGET_GROUPS.getNs(), searchKey); + + if (targetGroupKeys.size() > 0) { + log.debug("Found " + targetGroupKeys.size() + " target group keys in " + getAgentType()); + } + + // TODO: refine search key instead of filtering these cache results + List tgList = + cacheClient.find(targetGroupKeys).stream() + .filter( + t -> + t.getTargetGroupArn() != null + && t.getTargetGroupArn().contains(account.getAccountId())) + .collect(Collectors.toList()); + + return tgList.stream().map(EcsTargetGroup::getTargetGroupArn).collect(Collectors.toSet()); + } + + private EcsTargetHealth makeEcsTargetHealth( + String targetGroupArn, List healthDescriptions) { + EcsTargetHealth targetHealth = new EcsTargetHealth(); + targetHealth.setTargetGroupArn(targetGroupArn); + targetHealth.setTargetHealthDescriptions(healthDescriptions); + + return targetHealth; + } + + @Override + protected Map> generateFreshData( + Collection targetHealthList) { + Collection dataPoints = new LinkedList<>(); + + for (EcsTargetHealth targetHealth : targetHealthList) { + Map attributes = convertToTargetHealthAttributes(targetHealth); + String key = Keys.getTargetHealthKey(accountName, region, targetHealth.getTargetGroupArn()); + + dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); + } + + log.info("Caching " + dataPoints.size() + " target health checks in " + getAgentType()); + Map> dataMap = new HashMap<>(); + dataMap.put(TARGET_HEALTHS.ns, dataPoints); + + return dataMap; + } + + public static Map convertToTargetHealthAttributes(EcsTargetHealth targetHealth) { + Map attributes = new HashMap<>(); + attributes.put("targetGroupArn", targetHealth.getTargetGroupArn()); + attributes.put("targetHealthDescriptions", targetHealth.getTargetHealthDescriptions()); + return attributes; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public String getAgentType() { + return accountName + "/" + region + "/" + getClass().getSimpleName(); + } + + @Override + public String getProviderName() { + return EcsProvider.NAME; + } + + @Override + public String getHealthId() { + return HEALTH_ID; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCachingAgent.java index 926fa79c5a8..f738b4f4866 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCachingAgent.java @@ -16,6 +16,13 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; + import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.DescribeTasksRequest; @@ -30,10 +37,6 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -43,22 +46,24 @@ import java.util.List; import java.util.Map; import java.util.Set; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TaskCachingAgent extends AbstractEcsOnDemandAgent { - private static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(TASKS.toString()), - INFORMATIVE.forType(ECS_CLUSTERS.toString()) - )); + private static final Collection types = + Collections.unmodifiableCollection( + Arrays.asList( + AUTHORITATIVE.forType(TASKS.toString()), + INFORMATIVE.forType(ECS_CLUSTERS.toString()))); private final Logger log = LoggerFactory.getLogger(getClass()); - public TaskCachingAgent(NetflixAmazonCredentials account, String region, AmazonClientProvider amazonClientProvider, AWSCredentialsProvider awsCredentialsProvider, Registry registry) { + public TaskCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + Registry registry) { super(account, region, amazonClientProvider, awsCredentialsProvider, registry); } @@ -89,7 +94,9 @@ protected List getItems(AmazonECS ecs, ProviderCache providerCache) { if (taskArns.size() == 0) { continue; } - List tasks = ecs.describeTasks(new DescribeTasksRequest().withCluster(cluster).withTasks(taskArns)).getTasks(); + List tasks = + ecs.describeTasks(new DescribeTasksRequest().withCluster(cluster).withTasks(taskArns)) + .getTasks(); taskList.addAll(tasks); nextToken = listTasksResult.getNextToken(); } while (nextToken != null && nextToken.length() != 0); @@ -98,14 +105,17 @@ protected List getItems(AmazonECS ecs, ProviderCache providerCache) { } @Override - public Collection pendingOnDemandRequests(ProviderCache providerCache) { + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { Collection allOnDemand = providerCache.getAll(ON_DEMAND.toString()); - List returnResults = new LinkedList<>(); + List> returnResults = new LinkedList<>(); for (CacheData onDemand : allOnDemand) { Map parsedKey = Keys.parse(onDemand.getId()); - if (parsedKey != null && parsedKey.get("type") != null && - (parsedKey.get("type").equals(SERVICES.toString()) || parsedKey.get("type").equals(TASKS.toString()) && - parsedKey.get("account").equals(accountName) && parsedKey.get("region").equals(region))) { + if (parsedKey != null + && parsedKey.get("type") != null + && (parsedKey.get("type").equals(SERVICES.toString()) + || parsedKey.get("type").equals(TASKS.toString()) + && parsedKey.get("account").equals(accountName) + && parsedKey.get("region").equals(region))) { parsedKey.put("type", "serverGroup"); parsedKey.put("serverGroup", parsedKey.get("serviceName")); @@ -116,8 +126,16 @@ public Collection pendingOnDemandRequests(ProviderCache providerCache) { result.put("cacheTime", onDemand.getAttributes().get("cacheTime")); result.put("cacheExpiry", onDemand.getAttributes().get("cacheExpiry")); - result.put("processedCount", (onDemand.getAttributes().get("processedCount") != null ? onDemand.getAttributes().get("processedCount") : 1)); - result.put("processedTime", onDemand.getAttributes().get("processedTime") != null ? onDemand.getAttributes().get("processedTime") : new Date()); + result.put( + "processedCount", + (onDemand.getAttributes().get("processedCount") != null + ? onDemand.getAttributes().get("processedCount") + : 1)); + result.put( + "processedTime", + onDemand.getAttributes().get("processedTime") != null + ? onDemand.getAttributes().get("processedTime") + : new Date()); returnResults.add(result); } @@ -127,14 +145,16 @@ public Collection pendingOnDemandRequests(ProviderCache providerCache) { @Override void storeOnDemand(ProviderCache providerCache, Map data) { - metricsSupport.onDemandStore(() ->{ - String keyString = Keys.getServiceKey(accountName, region, (String) data.get("serverGroupName")); - Map att = new HashMap<>(); - att.put("cacheTime", new Date()); - CacheData cacheData = new DefaultCacheData(keyString, att, Collections.emptyMap()); - providerCache.putCacheData(ON_DEMAND.toString(), cacheData); - return null; - }); + metricsSupport.onDemandStore( + () -> { + String keyString = + Keys.getServiceKey(accountName, region, (String) data.get("serverGroupName")); + Map att = new HashMap<>(); + att.put("cacheTime", new Date()); + CacheData cacheData = new DefaultCacheData(keyString, att, Collections.emptyMap()); + providerCache.putCacheData(ON_DEMAND.toString(), cacheData); + return null; + }); } @Override @@ -150,9 +170,12 @@ protected Map> generateFreshData(Collection dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); String clusterName = StringUtils.substringAfterLast(task.getClusterArn(), "/"); - Map clusterAttributes = EcsClusterCachingAgent.convertClusterArnToAttributes(accountName, region, task.getClusterArn()); + Map clusterAttributes = + EcsClusterCachingAgent.convertClusterArnToAttributes( + accountName, region, task.getClusterArn()); key = Keys.getClusterKey(accountName, region, clusterName); - clusterDataPoints.put(key, new DefaultCacheData(key, clusterAttributes, Collections.emptyMap())); + clusterDataPoints.put( + key, new DefaultCacheData(key, clusterAttributes, Collections.emptyMap())); } log.info("Caching " + dataPoints.size() + " tasks in " + getAgentType()); @@ -177,10 +200,12 @@ public static Map convertTaskToAttributes(Task task) { attributes.put("containers", task.getContainers()); attributes.put("lastStatus", task.getLastStatus()); attributes.put("desiredStatus", task.getDesiredStatus()); + attributes.put("healthStatus", task.getHealthStatus()); if (task.getStartedAt() != null) { attributes.put("startedAt", task.getStartedAt().getTime()); } attributes.put("attachments", task.getAttachments()); + attributes.put("availabilityZone", task.getAvailabilityZone()); return attributes; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCachingAgent.java index aa6f1dc717d..0fb61acb055 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCachingAgent.java @@ -16,11 +16,12 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; + import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ecs.AmazonECS; import com.amazonaws.services.ecs.model.DescribeTaskDefinitionRequest; -import com.amazonaws.services.ecs.model.ListTaskDefinitionsRequest; -import com.amazonaws.services.ecs.model.ListTaskDefinitionsResult; import com.amazonaws.services.ecs.model.TaskDefinition; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spectator.api.Registry; @@ -31,46 +32,40 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; +import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskDefinitionCacheClient; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; +import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; - public class TaskDefinitionCachingAgent extends AbstractEcsOnDemandAgent { - private static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(TASK_DEFINITIONS.toString()) - )); + private static final Collection types = + Collections.unmodifiableCollection( + Arrays.asList(AUTHORITATIVE.forType(TASK_DEFINITIONS.toString()))); private final Logger log = LoggerFactory.getLogger(getClass()); private ObjectMapper objectMapper; - public TaskDefinitionCachingAgent(NetflixAmazonCredentials account, String region, - AmazonClientProvider amazonClientProvider, - AWSCredentialsProvider awsCredentialsProvider, - Registry registry, - ObjectMapper objectMapper) { + public TaskDefinitionCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + Registry registry, + ObjectMapper objectMapper) { super(account, region, amazonClientProvider, awsCredentialsProvider, registry); this.objectMapper = objectMapper; } - public static Map convertTaskDefinitionToAttributes(TaskDefinition taskDefinition) { + public static Map convertTaskDefinitionToAttributes( + TaskDefinition taskDefinition) { Map attributes = new HashMap<>(); attributes.put("taskDefinitionArn", taskDefinition.getTaskDefinitionArn()); attributes.put("containerDefinitions", taskDefinition.getContainerDefinitions()); attributes.put("taskRoleArn", taskDefinition.getTaskRoleArn()); + attributes.put("memory", taskDefinition.getMemory()); + attributes.put("cpu", taskDefinition.getCpu()); return attributes; } @@ -86,70 +81,66 @@ public String getAgentType() { @Override protected List getItems(AmazonECS ecs, ProviderCache providerCache) { - List taskDefinitionList = new LinkedList<>(); - Set cachedArns = providerCache.getIdentifiers(TASK_DEFINITIONS.toString()).stream() - .filter(id -> keyAccountRegionFilter(TASK_DEFINITIONS.toString(), id)) - .map(id -> { - Map keyParts = Keys.parse(id); - return keyParts.get("taskDefinitionArn"); - }) - .collect(Collectors.toSet()); - - String nextToken = null; - do { - ListTaskDefinitionsRequest listTasksRequest = new ListTaskDefinitionsRequest(); - if (nextToken != null) { - listTasksRequest.setNextToken(nextToken); - } - ListTaskDefinitionsResult listTaskDefinitionsResult = ecs.listTaskDefinitions(listTasksRequest); - List taskDefinitionArns = listTaskDefinitionsResult.getTaskDefinitionArns(); + // get all ECS services in region for account + ServiceCacheClient serviceCacheClient = new ServiceCacheClient(providerCache, objectMapper); + Collection services = serviceCacheClient.getAll(accountName, region); + log.debug("Found {} ECS services for which to cache task definitions", services.size()); - if (taskDefinitionArns.size() == 0) { - continue; - } + Set taskDefArns = new HashSet<>(); + + for (Service service : services) { + taskDefArns.add(service.getTaskDefinition()); + } - Set newTaskDefArns = new HashSet<>(taskDefinitionArns); - newTaskDefArns.removeAll(cachedArns); + List taskDefinitions = new ArrayList<>(); - Set existingTaskDefArns = new HashSet<>(taskDefinitionArns); - existingTaskDefArns.removeAll(newTaskDefArns); + int newTaskDefs = 0; - if (!existingTaskDefArns.isEmpty()) { - // TaskDefinitions are immutable, there's no reason to make a describe call on existing ones. - taskDefinitionList.addAll(retrieveFromCache(existingTaskDefArns, providerCache)); - } + for (String arn : taskDefArns) { - for (String taskDefinitionArn : newTaskDefArns) { - TaskDefinition taskDefinition = ecs.describeTaskDefinition(new DescribeTaskDefinitionRequest() - .withTaskDefinition(taskDefinitionArn)).getTaskDefinition(); - taskDefinitionList.add(taskDefinition); + // TaskDefinitions are immutable, there's no reason to + // make a describe call on existing ones. + TaskDefinition cacheEntry = retrieveFromCache(arn, providerCache); + + if (cacheEntry != null) { + taskDefinitions.add(cacheEntry); + } else { + TaskDefinition taskDef = + ecs.describeTaskDefinition(new DescribeTaskDefinitionRequest().withTaskDefinition(arn)) + .getTaskDefinition(); + if (taskDef != null) { + taskDefinitions.add(taskDef); + newTaskDefs++; + } } + } - nextToken = listTaskDefinitionsResult.getNextToken(); - } while (nextToken != null && nextToken.length() != 0); - return taskDefinitionList; + log.info( + "Described {} new task definitions ({} already cached)", + newTaskDefs, + taskDefinitions.size() - newTaskDefs); + + return taskDefinitions; } - private Set retrieveFromCache(Set taskDefArns, ProviderCache providerCache) { - TaskDefinitionCacheClient taskDefinitionCacheClient = new TaskDefinitionCacheClient(providerCache, objectMapper); - Set taskDefs = new HashSet<>(); + private TaskDefinition retrieveFromCache(String taskDefArn, ProviderCache providerCache) { + TaskDefinitionCacheClient taskDefinitionCacheClient = + new TaskDefinitionCacheClient(providerCache, objectMapper); - for (String taskDefArn : taskDefArns) { - String key = Keys.getTaskDefinitionKey(accountName, region, taskDefArn); - TaskDefinition taskDefinition = taskDefinitionCacheClient.get(key); - taskDefs.add(taskDefinition); - } + String key = Keys.getTaskDefinitionKey(accountName, region, taskDefArn); - return taskDefs; + return taskDefinitionCacheClient.get(key); } @Override - protected Map> generateFreshData(Collection taskDefinitions) { + protected Map> generateFreshData( + Collection taskDefinitions) { Collection dataPoints = new LinkedList<>(); for (TaskDefinition taskDefinition : taskDefinitions) { Map attributes = convertTaskDefinitionToAttributes(taskDefinition); - String key = Keys.getTaskDefinitionKey(accountName, region, taskDefinition.getTaskDefinitionArn()); + String key = + Keys.getTaskDefinitionKey(accountName, region, taskDefinition.getTaskDefinitionArn()); dataPoints.add(new DefaultCacheData(key, attributes, Collections.emptyMap())); } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCachingAgent.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCachingAgent.java index c7354020ef2..01e2ad29ef4 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCachingAgent.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCachingAgent.java @@ -16,14 +16,19 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; + import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.Container; +import com.amazonaws.services.ecs.model.ContainerDefinition; import com.amazonaws.services.ecs.model.LoadBalancer; +import com.amazonaws.services.ecs.model.NetworkBinding; import com.amazonaws.services.ecs.model.NetworkInterface; -import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthRequest; -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthResult; -import com.amazonaws.services.elasticloadbalancingv2.model.TargetDescription; +import com.amazonaws.services.ecs.model.PortMapping; +import com.amazonaws.services.ecs.model.TaskDefinition; import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.agent.AgentDataType; @@ -34,17 +39,8 @@ import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.core.provider.agent.HealthProvidingCachingAgent; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import com.netflix.spinnaker.clouddriver.ecs.cache.client.ContainerInstanceCacheClient; -import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient; -import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskCacheClient; -import com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance; -import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; -import com.netflix.spinnaker.clouddriver.ecs.cache.model.Task; -import com.netflix.spinnaker.clouddriver.ecs.cache.model.TaskHealth; -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.netflix.spinnaker.clouddriver.ecs.cache.client.*; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.*; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -52,29 +48,29 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; - -public class TaskHealthCachingAgent extends AbstractEcsCachingAgent implements HealthProvidingCachingAgent { - private static final Collection types = Collections.unmodifiableCollection(Arrays.asList( - AUTHORITATIVE.forType(HEALTH.toString()) - )); - private final static String HEALTH_ID = "ecs-task-instance-health"; +public class TaskHealthCachingAgent extends AbstractEcsCachingAgent + implements HealthProvidingCachingAgent { + private static final Collection types = + Collections.unmodifiableCollection(Arrays.asList(AUTHORITATIVE.forType(HEALTH.toString()))); + private static final String HEALTH_ID = "ecs-task-instance-health"; + private static final String STATUS_UP = "Up"; + private static final String STATUS_UNKNOWN = "Unknown"; private final Logger log = LoggerFactory.getLogger(getClass()); - private Collection taskEvicitions; - private Collection serviceEvicitions; - private Collection taskDefEvicitions; + private Collection taskEvictions; private ObjectMapper objectMapper; - public TaskHealthCachingAgent(NetflixAmazonCredentials account, String region, - AmazonClientProvider amazonClientProvider, - AWSCredentialsProvider awsCredentialsProvider, - ObjectMapper objectMapper) { + public TaskHealthCachingAgent( + NetflixAmazonCredentials account, + String region, + AmazonClientProvider amazonClientProvider, + AWSCredentialsProvider awsCredentialsProvider, + ObjectMapper objectMapper) { super(account, region, amazonClientProvider, awsCredentialsProvider); this.objectMapper = objectMapper; } @@ -82,7 +78,6 @@ public TaskHealthCachingAgent(NetflixAmazonCredentials account, String region, public static Map convertTaskHealthToAttributes(TaskHealth taskHealth) { Map attributes = new HashMap<>(); attributes.put("instanceId", taskHealth.getInstanceId()); - attributes.put("state", taskHealth.getState()); attributes.put("type", taskHealth.getType()); attributes.put("service", taskHealth.getServiceName()); @@ -94,102 +89,148 @@ public static Map convertTaskHealthToAttributes(TaskHealth taskH @Override protected List getItems(AmazonECS ecs, ProviderCache providerCache) { TaskCacheClient taskCacheClient = new TaskCacheClient(providerCache, objectMapper); + TaskDefinitionCacheClient taskDefinitionCacheClient = + new TaskDefinitionCacheClient(providerCache, objectMapper); ServiceCacheClient serviceCacheClient = new ServiceCacheClient(providerCache, objectMapper); - AmazonElasticLoadBalancing amazonloadBalancing = amazonClientProvider.getAmazonElasticLoadBalancingV2(account, region, false); + TargetHealthCacheClient targetHealthCacheClient = + new TargetHealthCacheClient(providerCache, objectMapper); - ContainerInstanceCacheClient containerInstanceCacheClient = new ContainerInstanceCacheClient(providerCache); + ContainerInstanceCacheClient containerInstanceCacheClient = + new ContainerInstanceCacheClient(providerCache); List taskHealthList = new LinkedList<>(); - taskEvicitions = new LinkedList<>(); - serviceEvicitions = new LinkedList<>(); - taskDefEvicitions = new LinkedList<>(); + taskEvictions = new LinkedList<>(); Collection tasks = taskCacheClient.getAll(accountName, region); if (tasks != null) { + log.debug("Found {} tasks to retrieve health for.", tasks.size()); for (Task task : tasks) { - String containerInstanceCacheKey = Keys.getContainerInstanceKey(accountName, region, task.getContainerInstanceArn()); - ContainerInstance containerInstance = containerInstanceCacheClient.get(containerInstanceCacheKey); + String containerInstanceCacheKey = + Keys.getContainerInstanceKey(accountName, region, task.getContainerInstanceArn()); + ContainerInstance containerInstance = + containerInstanceCacheClient.get(containerInstanceCacheKey); String serviceName = StringUtils.substringAfter(task.getGroup(), "service:"); String serviceKey = Keys.getServiceKey(accountName, region, serviceName); Service service = serviceCacheClient.get(serviceKey); + if (service == null) { String taskEvictionKey = Keys.getTaskKey(accountName, region, task.getTaskId()); - taskEvicitions.add(taskEvictionKey); + taskEvictions.add(taskEvictionKey); + log.debug( + "Service '{}' for task '{}' is null. Will not retrieve health.", + serviceName, + task.getTaskArn()); continue; } - if (isContainerMissingNetworking(task)) { + String taskDefinitionCacheKey = + Keys.getTaskDefinitionKey(accountName, region, service.getTaskDefinition()); + TaskDefinition taskDefinition = taskDefinitionCacheClient.get(taskDefinitionCacheKey); + + boolean lacksNetworkInterfaces = isTaskMissingNetworkInterfaces(task); + if (task.getContainers().isEmpty() + || (isTaskMissingNetworkBindings(task) && lacksNetworkInterfaces)) { + log.debug( + "Task '{}' is missing networking. Will not retrieve load balancer health.", + task.getTaskArn()); continue; } TaskHealth taskHealth; - if (task.getContainers().get(0).getNetworkBindings().size() == 1) { - taskHealth = inferHealthNetworkBindedContainer(amazonloadBalancing, task, containerInstance, serviceName, service); + // ideally, could determine health check method by looking at taskDef.networkMode, + // however this isn't reliably cached yet, so reusing network binding check. + if (!lacksNetworkInterfaces) { + // if network interfaces are present, assume awsvpc mode + taskHealth = + inferHealthNetworkInterfacedContainer( + targetHealthCacheClient, task, serviceName, service, taskDefinition); } else { - taskHealth = inferHealthNetworkInterfacedContainer(amazonloadBalancing, task, serviceName, service); + taskHealth = + inferHealthNetworkBindedContainer( + targetHealthCacheClient, task, containerInstance, serviceName, service); } + log.debug("Task Health contains the following elements: {}", taskHealth); if (taskHealth != null) { taskHealthList.add(taskHealth); } + log.debug("TaskHealthList contains the following elements: {}", taskHealthList); } + } else { + log.debug("Task list is null. No healths to describe."); } return taskHealthList; } - private TaskHealth inferHealthNetworkInterfacedContainer(AmazonElasticLoadBalancing amazonloadBalancing, - Task task, - String serviceName, - Service loadBalancerService) { + private TaskHealth inferHealthNetworkInterfacedContainer( + TargetHealthCacheClient targetHealthCacheClient, + Task task, + String serviceName, + Service loadBalancerService, + TaskDefinition taskDefinition) { + + if (taskDefinition == null) { + log.debug( + "Provided task definition '{}' is null for task '{}'.", + loadBalancerService.getTaskDefinition(), + task.getTaskArn()); + return null; + } List loadBalancers = loadBalancerService.getLoadBalancers(); + log.debug("LoadBalancerService found {} load balancers.", loadBalancers.size()); + TaskHealth overallTaskHealth = null; for (LoadBalancer loadBalancer : loadBalancers) { if (loadBalancer.getTargetGroupArn() == null) { + log.debug("LoadBalancer does not contain a target group arn."); continue; } - NetworkInterface networkInterface = task.getContainers().get(0).getNetworkInterfaces().get(0); - DescribeTargetHealthResult describeTargetHealthResult = amazonloadBalancing.describeTargetHealth( - new DescribeTargetHealthRequest() - .withTargetGroupArn(loadBalancer.getTargetGroupArn()) - .withTargets( - new TargetDescription() - .withId(networkInterface.getPrivateIpv4Address()) - ) - ); - - if (describeTargetHealthResult.getTargetHealthDescriptions().isEmpty()) { - evictStaleData(task, loadBalancerService); + if (!isContainerPortPresent( + taskDefinition.getContainerDefinitions(), loadBalancer.getContainerPort())) { + log.debug( + "Container does not contain a port mapping with load balanced container port: {}.", + loadBalancer.getContainerPort()); continue; } + Collection containers = task.getContainers(); + NetworkInterface networkInterface = null; - TargetHealthDescription healthDescription = describeTargetHealthResult.getTargetHealthDescriptions().get(0); + for (Container container : containers) { + if (container.getNetworkInterfaces().size() >= 1) { + networkInterface = container.getNetworkInterfaces().get(0); + break; + } + } - TaskHealth taskHealth = makeTaskHealth(task, serviceName, healthDescription); - return taskHealth; + overallTaskHealth = + describeTargetHealth( + targetHealthCacheClient, + task, + serviceName, + loadBalancer.getTargetGroupArn(), + networkInterface.getPrivateIpv4Address(), + loadBalancer.getContainerPort(), + overallTaskHealth); } - return null; + return overallTaskHealth; } - private void evictStaleData(Task task, Service loadBalancerService) { - String serviceEvictionKey = Keys.getTaskDefinitionKey(accountName, region, loadBalancerService.getServiceName()); - serviceEvicitions.add(serviceEvictionKey); - String taskEvictionKey = Keys.getTaskKey(accountName, region, task.getTaskId()); - taskEvicitions.add(taskEvictionKey); - - String taskDefArn = loadBalancerService.getTaskDefinition(); - String taskDefKey = Keys.getTaskDefinitionKey(accountName, region, taskDefArn); - taskDefEvicitions.add(taskDefKey); - } - - private TaskHealth makeTaskHealth(Task task, String serviceName, TargetHealthDescription healthDescription) { - String targetHealth = healthDescription.getTargetHealth().getState().equals("healthy") ? "Up" : "Unknown"; - + private TaskHealth makeTaskHealth( + Task task, String serviceName, TargetHealthDescription healthDescription) { + String targetHealth = STATUS_UNKNOWN; + if (healthDescription != null) { + log.debug("Task target health is: {}", healthDescription.getTargetHealth()); + targetHealth = + healthDescription.getTargetHealth().getState().equals("healthy") + ? STATUS_UP + : STATUS_UNKNOWN; + } TaskHealth taskHealth = new TaskHealth(); taskHealth.setType("loadBalancer"); taskHealth.setState(targetHealth); @@ -197,71 +238,165 @@ private TaskHealth makeTaskHealth(Task task, String serviceName, TargetHealthDes taskHealth.setTaskId(task.getTaskId()); taskHealth.setTaskArn(task.getTaskArn()); taskHealth.setInstanceId(task.getTaskArn()); + log.debug("Task Health is: {}", taskHealth); return taskHealth; } - private TaskHealth inferHealthNetworkBindedContainer(AmazonElasticLoadBalancing amazonloadBalancing, - Task task, - ContainerInstance containerInstance, - String serviceName, - Service loadBalancerService) { - int port = task.getContainers().get(0).getNetworkBindings().get(0).getHostPort(); + private TaskHealth inferHealthNetworkBindedContainer( + TargetHealthCacheClient targetHealthCacheClient, + Task task, + ContainerInstance containerInstance, + String serviceName, + Service loadBalancerService) { List loadBalancers = loadBalancerService.getLoadBalancers(); + log.debug("LoadBalancerService found {} load balancers.", loadBalancers.size()); + TaskHealth overallTaskHealth = null; for (LoadBalancer loadBalancer : loadBalancers) { - if (loadBalancer.getTargetGroupArn() == null || containerInstance.getEc2InstanceId() == null) { + if (loadBalancer.getTargetGroupArn() == null) { + log.debug("LoadBalancer does not contain a target group arn."); continue; } - DescribeTargetHealthResult describeTargetHealthResult; - describeTargetHealthResult = amazonloadBalancing.describeTargetHealth( - new DescribeTargetHealthRequest().withTargetGroupArn(loadBalancer.getTargetGroupArn()).withTargets( - new TargetDescription().withId(containerInstance.getEc2InstanceId()).withPort(port))); + if (containerInstance == null || containerInstance.getEc2InstanceId() == null) { + log.debug("Container instance is missing or does not contain a ec2 instance id."); + continue; + } - if (describeTargetHealthResult.getTargetHealthDescriptions().isEmpty()) { - evictStaleData(task, loadBalancerService); + Optional hostPort = + getHostPort(task.getContainers(), loadBalancer.getContainerPort()); + if (!hostPort.isPresent()) { + log.debug( + "Container does not contain a port mapping with load balanced container port: {}.", + loadBalancer.getContainerPort()); continue; } - TargetHealthDescription healthDescription = describeTargetHealthResult.getTargetHealthDescriptions().get(0); + overallTaskHealth = + describeTargetHealth( + targetHealthCacheClient, + task, + serviceName, + loadBalancer.getTargetGroupArn(), + containerInstance.getEc2InstanceId(), + hostPort.get(), + overallTaskHealth); + } + + return overallTaskHealth; + } + + private TargetHealthDescription findHealthDescription( + List targetHealths, String targetId, Integer targetPort) { + + return targetHealths.stream() + .filter( + h -> + h.getTarget().getId().equals(targetId) + && h.getTarget().getPort().equals(targetPort)) + .findFirst() + .orElse(null); + } + + private TaskHealth describeTargetHealth( + TargetHealthCacheClient targetHealthCacheClient, + Task task, + String serviceName, + String targetGroupArn, + String targetId, + Integer targetPort, + TaskHealth overallTaskHealth) { + + String targetHealthKey = Keys.getTargetHealthKey(accountName, region, targetGroupArn); + EcsTargetHealth targetHealth = targetHealthCacheClient.get(targetHealthKey); + + if (targetHealth == null) { + log.debug("Cached EcsTargetHealth is empty for targetGroup {}", targetGroupArn); + return makeTaskHealth(task, serviceName, null); + } + TargetHealthDescription targetHealthDescription = + findHealthDescription(targetHealth.getTargetHealthDescriptions(), targetId, targetPort); + + if (targetHealthDescription == null) { + log.debug( + "TargetHealthDescription is empty on targetGroup '{}' for {}:{}", + targetGroupArn, + targetId, + targetPort); + return makeTaskHealth(task, serviceName, null); + } - TaskHealth taskHealth = makeTaskHealth(task, serviceName, healthDescription); + log.debug("Retrieved health of targetId {} for targetGroup {}", targetId, targetGroupArn); + + TaskHealth taskHealth = makeTaskHealth(task, serviceName, targetHealthDescription); + if ((overallTaskHealth == null) || (taskHealth.getState().equals(STATUS_UNKNOWN))) { return taskHealth; } - return null; + return overallTaskHealth; } - private boolean isContainerMissingNetworking(Task task) { - if (task.getContainers().isEmpty()) { - return true; + private Optional getHostPort(List containers, Integer hostPort) { + if (containers != null && !containers.isEmpty()) { + for (Container container : containers) { + for (NetworkBinding networkBinding : container.getNetworkBindings()) { + Integer containerPort = networkBinding.getContainerPort(); + + if (containerPort != null && containerPort.intValue() == hostPort.intValue()) { + log.debug("Load balanced hostPort: {} found for container.", hostPort); + return Optional.of(networkBinding.getHostPort()); + } + } + } } - if (isTaskMissingNetworkBindings(task) - && isTaskMissingNetworkInterfaces(task)) { - return true; - } else { - return false; + return Optional.empty(); + } + + private boolean isContainerPortPresent( + List containerDefinitions, Integer containerPort) { + for (ContainerDefinition containerDefinition : containerDefinitions) { + for (PortMapping portMapping : containerDefinition.getPortMappings()) { + if (portMapping.getContainerPort().intValue() == containerPort.intValue()) { + log.debug("Load balanced containerPort: {} found for container.", containerPort); + return true; + } + } } + + return false; } private boolean isTaskMissingNetworkBindings(Task task) { - return task.getContainers().isEmpty() - || task.getContainers().get(0).getNetworkBindings() == null - || task.getContainers().get(0).getNetworkBindings().isEmpty() - || task.getContainers().get(0).getNetworkBindings().get(0) == null; + Collection containers = task.getContainers(); + + for (Container container : containers) { + if (!(container.getNetworkBindings() == null + || container.getNetworkBindings().isEmpty() + || container.getNetworkBindings().get(0) == null)) { + return false; + } + } + return true; } private boolean isTaskMissingNetworkInterfaces(Task task) { - return task.getContainers().isEmpty() - || task.getContainers().get(0).getNetworkInterfaces() == null - || task.getContainers().get(0).getNetworkInterfaces().isEmpty() - || task.getContainers().get(0).getNetworkInterfaces().get(0) == null; + Collection containers = task.getContainers(); + + for (Container container : containers) { + if (!(container.getNetworkInterfaces() == null + || container.getNetworkInterfaces().isEmpty() + || container.getNetworkInterfaces().get(0) == null)) { + return false; + } + } + return true; } @Override - protected Map> generateFreshData(Collection taskHealthList) { + protected Map> generateFreshData( + Collection taskHealthList) { Collection dataPoints = new LinkedList<>(); for (TaskHealth taskHealth : taskHealthList) { @@ -271,7 +406,7 @@ protected Map> generateFreshData(Collection> dataMap = new HashMap<>(); dataMap.put(HEALTH.toString(), dataPoints); @@ -279,26 +414,13 @@ protected Map> generateFreshData(Collection> addExtraEvictions(Map> evictions) { - if (!taskEvicitions.isEmpty()) { + protected Map> addExtraEvictions( + Map> evictions) { + if (!taskEvictions.isEmpty()) { if (evictions.containsKey(TASKS.toString())) { - evictions.get(TASKS.toString()).addAll(taskEvicitions); - } else { - evictions.put(TASKS.toString(), taskEvicitions); - } - } - if (!serviceEvicitions.isEmpty()) { - if (evictions.containsKey(SERVICES.toString())) { - evictions.get(SERVICES.toString()).addAll(serviceEvicitions); - } else { - evictions.put(SERVICES.toString(), serviceEvicitions); - } - } - if (!taskDefEvicitions.isEmpty()) { - if (evictions.containsKey(TASK_DEFINITIONS.toString())) { - evictions.get(TASK_DEFINITIONS.toString()).addAll(taskDefEvicitions); + evictions.get(TASKS.toString()).addAll(taskEvictions); } else { - evictions.put(TASK_DEFINITIONS.toString(), taskDefEvicitions); + evictions.put(TASKS.toString(), taskEvictions); } } return evictions; diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/config/EcsProviderConfig.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/config/EcsProviderConfig.java index 69d70ae2c75..cd405c845df 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/config/EcsProviderConfig.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/config/EcsProviderConfig.java @@ -16,39 +16,11 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.config; -import com.amazonaws.auth.AWSCredentialsProvider; import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ContainerInstanceCachingAgent; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.EcsCloudMetricAlarmCachingAgent; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.EcsClusterCachingAgent; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamPolicyReader; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamRoleCachingAgent; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ScalableTargetsCachingAgent; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ServiceCachingAgent; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskCachingAgent; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskDefinitionCachingAgent; -import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskHealthCachingAgent; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; -import com.netflix.spinnaker.clouddriver.security.ProviderUtils; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.DependsOn; -import org.springframework.context.annotation.Scope; - -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -import static com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials.AWSRegion; @Configuration public class EcsProviderConfig { @@ -59,50 +31,7 @@ public IamPolicyReader iamPolicyReader(ObjectMapper objectMapper) { } @Bean - @DependsOn("netflixECSCredentials") - public EcsProvider ecsProvider(AccountCredentialsRepository accountCredentialsRepository, AmazonClientProvider amazonClientProvider, - AWSCredentialsProvider awsCredentialsProvider, Registry registry, IamPolicyReader iamPolicyReader, - ObjectMapper objectMapper) { - EcsProvider provider = new EcsProvider(accountCredentialsRepository, Collections.newSetFromMap(new ConcurrentHashMap())); - synchronizeEcsProvider(provider, accountCredentialsRepository, amazonClientProvider, awsCredentialsProvider, registry, iamPolicyReader, objectMapper); - return provider; - } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - public EcsProviderSynchronizer synchronizeEcsProvider(EcsProvider ecsProvider, AccountCredentialsRepository accountCredentialsRepository, - AmazonClientProvider amazonClientProvider, AWSCredentialsProvider awsCredentialsProvider, Registry registry, - IamPolicyReader iamPolicyReader, - ObjectMapper objectMapper) { - - Set scheduledAccounts = ProviderUtils.getScheduledAccounts(ecsProvider); - Set allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, NetflixAmazonCredentials.class); - List newAgents = new LinkedList<>(); - - for (NetflixAmazonCredentials credentials : allAccounts) { - if (credentials.getCloudProvider().equals(EcsCloudProvider.ID)) { - newAgents.add(new IamRoleCachingAgent(credentials, amazonClientProvider, awsCredentialsProvider, iamPolicyReader)); // IAM is region-agnostic, so one caching agent per account is enough - - for (AWSRegion region : credentials.getRegions()) { - if (!scheduledAccounts.contains(credentials.getName())) { - newAgents.add(new EcsClusterCachingAgent(credentials, region.getName(), amazonClientProvider, awsCredentialsProvider)); - newAgents.add(new ServiceCachingAgent(credentials, region.getName(), amazonClientProvider, awsCredentialsProvider, registry)); - newAgents.add(new TaskCachingAgent(credentials, region.getName(), amazonClientProvider, awsCredentialsProvider, registry)); - newAgents.add(new ContainerInstanceCachingAgent(credentials, region.getName(), amazonClientProvider, awsCredentialsProvider, registry)); - newAgents.add(new TaskDefinitionCachingAgent(credentials, region.getName(), amazonClientProvider, awsCredentialsProvider, registry, objectMapper)); - newAgents.add(new TaskHealthCachingAgent(credentials, region.getName(), amazonClientProvider, awsCredentialsProvider, objectMapper)); - newAgents.add(new EcsCloudMetricAlarmCachingAgent(credentials, region.getName(), amazonClientProvider, awsCredentialsProvider)); - newAgents.add(new ScalableTargetsCachingAgent(credentials, region.getName(), amazonClientProvider, awsCredentialsProvider, objectMapper)); - } - } - } - } - - ecsProvider.getAgents().addAll(newAgents); - ecsProvider.synchronizeHealthAgents(); - return new EcsProviderSynchronizer(); - } - - class EcsProviderSynchronizer { + public EcsProvider ecsProvider() { + return new EcsProvider(); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/AmazonPrimitiveConverter.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/AmazonPrimitiveConverter.java index 1d53a0c13c6..40355fdee55 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/AmazonPrimitiveConverter.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/AmazonPrimitiveConverter.java @@ -21,13 +21,12 @@ import com.netflix.spinnaker.clouddriver.ecs.model.EcsSecurityGroup; import com.netflix.spinnaker.clouddriver.ecs.model.EcsSubnet; import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.Collection; import java.util.HashSet; import java.util.Optional; import java.util.Set; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class AmazonPrimitiveConverter { @@ -39,7 +38,8 @@ public AmazonPrimitiveConverter(EcsAccountMapper accountMapper) { this.accountMapper = accountMapper; } - public Collection convertToEcsSecurityGroup(Collection securityGroups) { + public Collection convertToEcsSecurityGroup( + Collection securityGroups) { Collection convertedSecurityGroups = new HashSet<>(); for (AmazonSecurityGroup securityGroup : securityGroups) { @@ -53,22 +53,28 @@ public Collection convertToEcsSecurityGroup(Collection convertToEcsSubnet(Collection subnet) { for (AmazonSubnet securityGroup : subnet) { EcsSubnet convertedSecurityGroup = convertToEcsSubnet(securityGroup); - Optional.ofNullable(convertToEcsSubnet(securityGroup)).ifPresent(convertedSecurityGroups::add); + Optional.ofNullable(convertToEcsSubnet(securityGroup)) + .ifPresent(convertedSecurityGroups::add); } return convertedSecurityGroups; } public EcsSubnet convertToEcsSubnet(AmazonSubnet subnet) { + if (subnet == null) { + return null; + } + NetflixECSCredentials ecsAccount = accountMapper.fromAwsAccountNameToEcs(subnet.getAccount()); if (ecsAccount == null) { return null; } - EcsSubnet ecsSubnet = new EcsSubnet( - subnet.getType(), - subnet.getId(), - subnet.getState(), - subnet.getVpcId(), - subnet.getCidrBlock(), - subnet.getAvailableIpAddressCount(), - ecsAccount.getName(), - subnet.getRegion(), - subnet.getAvailabilityZone(), - subnet.getPurpose(), - subnet.getTarget(), - subnet.isDeprecated() - ); + EcsSubnet ecsSubnet = + new EcsSubnet( + subnet.getType(), + subnet.getId(), + subnet.getState(), + subnet.getVpcId(), + subnet.getCidrBlock(), + subnet.getAvailableIpAddressCount(), + ecsAccount.getName(), + ecsAccount.getAccountId(), + subnet.getRegion(), + subnet.getAvailabilityZone(), + subnet.getPurpose(), + subnet.getTarget(), + subnet.isDeprecated()); return ecsSubnet; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcrImageProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcrImageProvider.java index 104a42db70e..78c4f2112e8 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcrImageProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcrImageProvider.java @@ -20,44 +20,51 @@ import com.amazonaws.services.ecr.model.DescribeImagesRequest; import com.amazonaws.services.ecr.model.DescribeImagesResult; import com.amazonaws.services.ecr.model.ImageDetail; +import com.amazonaws.services.ecr.model.ImageIdentifier; import com.amazonaws.services.ecr.model.ListImagesRequest; import com.amazonaws.services.ecr.model.ListImagesResult; import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.model.EcsDockerImage; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class EcrImageProvider implements ImageRepositoryProvider { private static final Pattern ACCOUNT_ID_PATTERN = Pattern.compile("^([0-9]{12})"); - private static final Pattern REPOSITORY_NAME_PATTERN = Pattern.compile("\\/([a-z0-9._-]+)"); - private static final String IDENTIFIER_PATTERN = "(:([a-z0-9._-]+)|@(sha256:[0-9a-f]{64}))"; + private static final Pattern REPOSITORY_NAME_PATTERN = + Pattern.compile( + "\\/(((?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*){2,})"); + private static final String IDENTIFIER_PATTERN = "(:([a-zA-Z0-9._-]+)|@(sha256:[0-9a-f]{64}))"; private static final Pattern REGION_PATTERN = Pattern.compile("(\\w+-\\w+-\\d+)"); - static final Pattern ECR_REPOSITORY_URI_PATTERN = Pattern.compile(ACCOUNT_ID_PATTERN.toString() + "\\.dkr\\.ecr\\." + - REGION_PATTERN.toString() + ".+" + - REPOSITORY_NAME_PATTERN.toString() + - IDENTIFIER_PATTERN); + static final Pattern ECR_REPOSITORY_URI_PATTERN = + Pattern.compile( + ACCOUNT_ID_PATTERN.toString() + + "\\.dkr\\.ecr\\." + + REGION_PATTERN.toString() + + ".+?" + + REPOSITORY_NAME_PATTERN.toString() + + IDENTIFIER_PATTERN); private final AmazonClientProvider amazonClientProvider; - private final AccountCredentialsProvider accountCredentialsProvider; + private final CredentialsRepository credentialsRepository; @Autowired - public EcrImageProvider(AmazonClientProvider amazonClientProvider, - AccountCredentialsProvider accountCredentialsProvider) { + public EcrImageProvider( + AmazonClientProvider amazonClientProvider, + CredentialsRepository credentialsRepository) { this.amazonClientProvider = amazonClientProvider; - this.accountCredentialsProvider = accountCredentialsProvider; + this.credentialsRepository = credentialsRepository; } @Override @@ -78,29 +85,41 @@ public List findImage(String url) { String accountId = extractAwsAccountId(url); String repository = extractEcrRepositoryName(url); String identifier = extractEcrIdentifier(repository, url); - boolean isTag = !(identifier.startsWith("sha256:") && identifier.length() == ("sha256:".length() + 64)); + boolean isTag = + !(identifier.startsWith("sha256:") && identifier.length() == ("sha256:".length() + 64)); String region = extractAwsRegion(url); - NetflixAmazonCredentials credentials = getCredentials(accountId); + NetflixAmazonCredentials credentials = getCredentials(accountId, region); if (!isValidRegion(credentials, region)) { - throw new IllegalArgumentException("The repository URI provided does not belong to a region that the credentials have access to or the region is not valid."); + throw new IllegalArgumentException( + "The repository URI provided does not belong to a region that the credentials have access to or the region is not valid."); } AmazonECR amazonECR = amazonClientProvider.getAmazonEcr(credentials, region, false); - ListImagesResult result = amazonECR.listImages(new ListImagesRequest().withRegistryId(accountId).withRepositoryName(repository)); - DescribeImagesResult imagesResult = amazonECR.describeImages(new DescribeImagesRequest().withRegistryId(accountId).withRepositoryName(repository).withImageIds(result.getImageIds())); + List imageIds = + getImageIdentifiers(amazonECR, accountId, repository, identifier, isTag); + DescribeImagesResult imagesResult = + amazonECR.describeImages( + new DescribeImagesRequest() + .withRegistryId(accountId) + .withRepositoryName(repository) + .withImageIds(imageIds)); - // TODO - what is the user interface we want to have here? We should discuss with Lars and Ethan from the community as this whole thing will undergo a big refactoring - List imagesWithThisIdentifier = imagesResult.getImageDetails().stream() - .filter(imageDetail -> imageFilter(imageDetail, identifier, isTag)) - .collect(Collectors.toList()); + // TODO - what is the user interface we want to have here? We should discuss with Lars and + // Ethan from the community as this whole thing will undergo a big refactoring + List imagesWithThisIdentifier = imagesResult.getImageDetails(); if (imagesWithThisIdentifier.size() > 1) { - throw new IllegalArgumentException("More than 1 image has this " + (isTag ? "tag" : "digest") + "! This is currently not supported."); + throw new IllegalArgumentException( + "More than 1 image has this " + + (isTag ? "tag" : "digest") + + "! This is currently not supported."); } else if (imagesWithThisIdentifier.size() == 0) { - throw new IllegalArgumentException(String.format("No image with the " + (isTag ? "tag" : "digest") + " %s was found.", identifier)); + throw new IllegalArgumentException( + String.format( + "No image with the " + (isTag ? "tag" : "digest") + " %s was found.", identifier)); } ImageDetail matchedImage = imagesWithThisIdentifier.get(0); @@ -109,37 +128,65 @@ public List findImage(String url) { ecsDockerImage.setRegion(region); ecsDockerImage.addAmiForRegion(region, matchedImage.getImageDigest()); ecsDockerImage.setAttribute("creationDate", matchedImage.getImagePushedAt()); - ecsDockerImage.setImageName(buildFullDockerImageUrl(matchedImage.getImageDigest(), - matchedImage.getRegistryId(), - matchedImage.getRepositoryName(), - region)); + ecsDockerImage.setImageName( + buildFullDockerImageUrl( + matchedImage.getImageDigest(), + matchedImage.getRegistryId(), + matchedImage.getRepositoryName(), + region)); return Collections.singletonList(ecsDockerImage); } - private boolean imageFilter(ImageDetail imageDetail, String identifier, boolean isTag) { - return isTag ? - imageDetail.getImageTags() != null && imageDetail.getImageTags().contains(identifier) : - imageDetail.getImageDigest().equals(identifier); + private boolean imageFilter(ImageIdentifier imageIdentifier, String identifier, boolean isTag) { + return isTag + ? imageIdentifier.getImageTag() != null && imageIdentifier.getImageTag().equals(identifier) + : imageIdentifier.getImageDigest().equals(identifier); } - private NetflixAmazonCredentials getCredentials(String accountId) { - for (AccountCredentials credentials : accountCredentialsProvider.getAll()) { - if (credentials instanceof NetflixAmazonCredentials) { - NetflixAmazonCredentials amazonCredentials = (NetflixAmazonCredentials) credentials; - if (amazonCredentials.getAccountId().equals(accountId)) { - return amazonCredentials; - } + private NetflixAmazonCredentials getCredentials(String accountId, String region) { + + for (NetflixECSCredentials credentials : credentialsRepository.getAll()) { + if (credentials.getAccountId().equals(accountId) + && (credentials.getRegions().isEmpty() + || credentials.getRegions().stream() + .anyMatch(oneRegion -> oneRegion.getName().equals(region)))) { + return credentials; } } - throw new NotFoundException(String.format("AWS account %s was not found. Please specify a valid account name", accountId)); + throw new NotFoundException( + String.format( + "AWS account %s with region %s was not found. Please specify a valid account name and region", + accountId, region)); } + private List getImageIdentifiers( + AmazonECR ecr, String accountId, String repository, String identifier, boolean isTag) { + List imageIdentifiers = new ArrayList(); + String token = null; + + ListImagesRequest request = + new ListImagesRequest().withRegistryId(accountId).withRepositoryName(repository); + + do { + ListImagesResult result = ecr.listImages(request); + result.getImageIds().stream() + .filter(imageId -> imageFilter(imageId, identifier, isTag)) + .forEachOrdered(imageIdentifiers::add); + + token = result.getNextToken(); + if (token != null) { + request.setNextToken(token); + } + } while (token != null); + + return imageIdentifiers; + } private boolean isValidRegion(NetflixAmazonCredentials credentials, String region) { return credentials.getRegions().stream() - .map(AmazonCredentials.AWSRegion::getName) - .anyMatch(region::equals); + .map(AmazonCredentials.AWSRegion::getName) + .anyMatch(region::equals); } private boolean isValidEcrUrl(String imageUrl) { @@ -149,18 +196,27 @@ private boolean isValidEcrUrl(String imageUrl) { } private String extractAwsAccountId(String imageUrl) { - return extractString(ACCOUNT_ID_PATTERN, imageUrl, 1, - "The repository URI provided does not contain a proper account ID."); + return extractString( + ACCOUNT_ID_PATTERN, + imageUrl, + 1, + "The repository URI provided does not contain a proper account ID."); } private String extractEcrRepositoryName(String imageUrl) { - return extractString(REPOSITORY_NAME_PATTERN, imageUrl, 1, - "The repository URI provided does not contain a proper repository name."); + return extractString( + REPOSITORY_NAME_PATTERN, + imageUrl, + 1, + "The repository URI provided does not contain a proper repository name."); } private String extractAwsRegion(String imageUrl) { - return extractString(REGION_PATTERN, imageUrl, 0, - "The repository URI provided does not contain a proper region."); + return extractString( + REGION_PATTERN, + imageUrl, + 0, + "The repository URI provided does not contain a proper region."); } private String extractString(Pattern pattern, String imageUrl, int group, String error) { @@ -175,14 +231,20 @@ private String extractEcrIdentifier(String repository, String imageUrl) { final Pattern identifierPatter = Pattern.compile(repository + IDENTIFIER_PATTERN); Matcher matcher = identifierPatter.matcher(imageUrl); if (!matcher.find()) { - throw new IllegalArgumentException("The repository URI provided does not contain a proper tag or sha256 digest."); + throw new IllegalArgumentException( + "The repository URI provided does not contain a proper tag or sha256 digest."); } - return matcher.group(1).startsWith(":") ? - matcher.group(2) : - matcher.group(3); + return matcher.group(1).startsWith(":") ? matcher.group(2) : matcher.group(3); } - private String buildFullDockerImageUrl(String imageDigest, String registryId, String repositoryName, String region) { - return registryId + ".dkr.ecr." + region + ".amazonaws.com/" + repositoryName + "@" + imageDigest; + private String buildFullDockerImageUrl( + String imageDigest, String registryId, String repositoryName, String region) { + return registryId + + ".dkr.ecr." + + region + + ".amazonaws.com/" + + repositoryName + + "@" + + imageDigest; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsAccountMapper.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsAccountMapper.java index 4230c1993dc..f5e54751e67 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsAccountMapper.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsAccountMapper.java @@ -1,58 +1,83 @@ +/* + * Copyright 2017 Lookout, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package com.netflix.spinnaker.clouddriver.ecs.provider.view; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.security.NetflixAssumeRoleEcsCredentials; import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Lazy; import org.springframework.stereotype.Component; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - @Component +@Lazy public class EcsAccountMapper { - final AccountCredentialsProvider accountCredentialsProvider; - final Map ecsCredentialsMap; - final Map awsCredentialsMap; + final CredentialsRepository credentialsRepository; + final CompositeCredentialsRepository compositeCredentialsRepository; + protected final Map ecsCredentialsMap; + protected final Map awsCredentialsMap; @Autowired - public EcsAccountMapper(AccountCredentialsProvider accountCredentialsProvider) { - this.accountCredentialsProvider = accountCredentialsProvider; - - Set allAccounts = accountCredentialsProvider.getAll(); + public EcsAccountMapper( + @Lazy CredentialsRepository credentialsRepository, + @Lazy CompositeCredentialsRepository compositeCredentialsRepository) { + this.credentialsRepository = credentialsRepository; + this.compositeCredentialsRepository = compositeCredentialsRepository; - Collection ecsAccounts = - (Collection) allAccounts - .stream() - .filter(credentials -> credentials instanceof NetflixAssumeRoleEcsCredentials) - .collect(Collectors.toSet()); + ecsCredentialsMap = new ConcurrentHashMap<>(); + awsCredentialsMap = new ConcurrentHashMap<>(); + } - ecsCredentialsMap = new HashMap<>(); - awsCredentialsMap = new HashMap<>(); + public void addMapEntry(NetflixAssumeRoleEcsCredentials credentials) { + ecsCredentialsMap.put(credentials.getAwsAccount(), credentials.getName()); + awsCredentialsMap.put(credentials.getName(), credentials.getAwsAccount()); + } - for (NetflixAssumeRoleEcsCredentials ecsAccount : ecsAccounts) { - ecsCredentialsMap.put(ecsAccount.getAwsAccount(), ecsAccount); + public void removeMapEntry(String ecsAccountName) { + ecsCredentialsMap.remove(awsCredentialsMap.get(ecsAccountName)); + awsCredentialsMap.remove(ecsAccountName); + } - allAccounts - .stream() - .filter(credentials -> credentials.getName().equals(ecsAccount.getAwsAccount())) - .findFirst() - .ifPresent(v -> awsCredentialsMap.put(ecsAccount.getName(), (NetflixAmazonCredentials) v)); + public NetflixECSCredentials fromAwsAccountNameToEcs(String awsAccountName) { + String ecsAccountName = ecsCredentialsMap.get(awsAccountName); + if (ecsAccountName == null) { + return null; } + return credentialsRepository.getOne(ecsAccountName); + } + + public NetflixAmazonCredentials fromEcsAccountNameToAws(String ecsAccountName) { + return (NetflixAmazonCredentials) + compositeCredentialsRepository.getCredentials( + awsCredentialsMap.get(ecsAccountName), AmazonCloudProvider.ID); } - public NetflixECSCredentials fromAwsAccountNameToEcs(String awsAccoutName) { - return ecsCredentialsMap.get(awsAccoutName); + public String fromAwsAccountNameToEcsAccountName(String awsAccountName) { + return ecsCredentialsMap.get(awsAccountName); } - public NetflixAmazonCredentials fromEcsAccountNameToAws(String ecsAccountName) { + public String fromEcsAccountNameToAwsAccountName(String ecsAccountName) { return awsCredentialsMap.get(ecsAccountName); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsCloudMetricProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsCloudMetricProvider.java index be3a2f70fea..2bddefd635c 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsCloudMetricProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsCloudMetricProvider.java @@ -19,11 +19,10 @@ import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsCloudWatchAlarmCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsMetricAlarm; +import java.util.Collection; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Collection; - @Component public class EcsCloudMetricProvider { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsClusterProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsClusterProvider.java index f7d3689aa1d..5b8a0a5a50f 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsClusterProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsClusterProvider.java @@ -16,18 +16,33 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.view; +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.Cluster; +import com.amazonaws.services.ecs.model.DescribeClustersRequest; +import com.amazonaws.services.ecs.model.DescribeClustersResult; +import com.google.common.collect.Lists; import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsClusterCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsCluster; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.*; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Collection; - +@Slf4j @Component public class EcsClusterProvider { private EcsClusterCacheClient ecsClusterCacheClient; + @Autowired private CredentialsRepository credentialsRepository; + @Autowired private AmazonClientProvider amazonClientProvider; + // Describe Cluster API accepts only 100 cluster Names at a time as an input. + private static final int EcsClusterDescriptionMaxSize = 100; @Autowired public EcsClusterProvider(Cache cacheView) { @@ -38,4 +53,55 @@ public Collection getAllEcsClusters() { return ecsClusterCacheClient.getAll(); } + // TODO include[] input of Describe Cluster is not a part of this implementation, need to + // implement in the future if additional properties are needed. + public Collection getEcsClusterDescriptions(String account, String region) { + String glob = Keys.getClusterKey(account, region, "*"); + Collection ecsClustersIdentifiers = ecsClusterCacheClient.filterIdentifiers(glob); + Collection clusters = new ArrayList<>(); + List filteredEcsClusters = + ecsClusterCacheClient.getAll(ecsClustersIdentifiers).stream() + .filter( + cluster -> + account.equals(cluster.getAccount()) && region.equals(cluster.getRegion())) + .map(cluster -> cluster.getName()) + .collect(Collectors.toList()); + log.info("Total number of items in the filteredEcsCluster(s): {}", filteredEcsClusters.size()); + List> batchClusterList = + Lists.partition(filteredEcsClusters, EcsClusterDescriptionMaxSize); + log.info("filteredEcsCluster(s) item(s) split among {} partition(s)", batchClusterList.size()); + AmazonECS client = getAmazonEcsClient(account, region); + for (List batchClusters : batchClusterList) { + List describeClusterResponse = getDescribeClusters(client, batchClusters); + if (describeClusterResponse != null) { + clusters.addAll(describeClusterResponse); + } + } + return clusters; + } + + private AmazonECS getAmazonEcsClient(String account, String region) { + NetflixECSCredentials credentials = credentialsRepository.getOne(account); + if (!(credentials instanceof NetflixECSCredentials)) { + throw new IllegalArgumentException("Invalid credentials:" + account + ":" + region); + } + return amazonClientProvider.getAmazonEcs(credentials, region, true); + } + + private List getDescribeClusters(AmazonECS client, List clusterNames) { + DescribeClustersRequest describeClustersRequest = + new DescribeClustersRequest().withClusters(clusterNames); + DescribeClustersResult describeClustersResult = + client.describeClusters(describeClustersRequest); + if (describeClustersResult == null) { + log.warn( + "Describe Cluster call returned with empty response. Please check your inputs (account, region and cluster list)"); + return Collections.emptyList(); + } else if (!describeClustersResult.getFailures().isEmpty()) { + log.warn( + "Describe Cluster call responded with failure(s):" + + describeClustersResult.getFailures()); + } + return describeClustersResult.getClusters(); + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsLoadBalancerProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsLoadBalancerProvider.java index 57d241e93b0..006b03684ae 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsLoadBalancerProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsLoadBalancerProvider.java @@ -16,34 +16,51 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.view; -import com.netflix.spinnaker.clouddriver.aws.model.AmazonLoadBalancer; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; + +import com.amazonaws.services.ecs.model.LoadBalancer; +import com.google.common.collect.Sets; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.data.ArnUtils; import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsLoadbalancerCacheClient; +import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsTargetGroupCacheClient; +import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsLoadBalancerCache; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; +import com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer.EcsLoadBalancer; import com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer.EcsLoadBalancerDetail; import com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer.EcsLoadBalancerSummary; -import com.netflix.spinnaker.clouddriver.ecs.security.ECSCredentialsConfig; +import com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer.EcsTargetGroup; import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import java.util.*; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - @Component -public class EcsLoadBalancerProvider implements LoadBalancerProvider { +public class EcsLoadBalancerProvider implements LoadBalancerProvider { private final EcsLoadbalancerCacheClient ecsLoadbalancerCacheClient; - private final ECSCredentialsConfig ecsCredentialsConfig; + private final EcsAccountMapper ecsAccountMapper; + private final ServiceCacheClient ecsServiceCacheClient; + private final EcsTargetGroupCacheClient ecsTargetGroupCacheClient; + + private final Logger log = LoggerFactory.getLogger(getClass()); @Autowired - public EcsLoadBalancerProvider(EcsLoadbalancerCacheClient ecsLoadbalancerCacheClient, - ECSCredentialsConfig ecsCredentialsConfig) { + public EcsLoadBalancerProvider( + EcsLoadbalancerCacheClient ecsLoadbalancerCacheClient, + EcsAccountMapper ecsAccountMapper, + ServiceCacheClient ecsServiceCacheClient, + EcsTargetGroupCacheClient ecsTargetGroupCacheClient) { this.ecsLoadbalancerCacheClient = ecsLoadbalancerCacheClient; - this.ecsCredentialsConfig = ecsCredentialsConfig; + this.ecsAccountMapper = ecsAccountMapper; + this.ecsServiceCacheClient = ecsServiceCacheClient; + this.ecsTargetGroupCacheClient = ecsTargetGroupCacheClient; } @Override @@ -57,8 +74,7 @@ public List list() { List loadBalancers = ecsLoadbalancerCacheClient.findAll(); for (EcsLoadBalancerCache lb : loadBalancers) { - String account = getEcsAccountName(lb.getAccount()); - if (account == null) { + if (lb.getAccount() == null) { continue; } @@ -72,7 +88,7 @@ public List list() { } EcsLoadBalancerDetail loadBalancer = new EcsLoadBalancerDetail(); - loadBalancer.setAccount(account); + loadBalancer.setAccount(lb.getAccount()); loadBalancer.setRegion(region); loadBalancer.setName(name); loadBalancer.setVpcId(lb.getVpcId()); @@ -80,8 +96,11 @@ public List list() { loadBalancer.setLoadBalancerType(lb.getLoadBalancerType()); loadBalancer.setTargetGroups(lb.getTargetGroups()); - - summary.getOrCreateAccount(account).getOrCreateRegion(region).getLoadBalancers().add(loadBalancer); + summary + .getOrCreateAccount(lb.getAccount()) + .getOrCreateRegion(region) + .getLoadBalancers() + .add(loadBalancer); } return new ArrayList<>(map.values()); @@ -89,25 +108,120 @@ public List list() { @Override public Item get(String name) { - return null; //TODO - Implement this. + return null; // intentionally null, implement if/when needed in Deck. } @Override public List
byAccountAndRegionAndName(String account, String region, String name) { - return null; //TODO - Implement this. This is used to show the details view of a load balancer which is not even implemented yet + return null; // intentionally null, implement if/when needed in Deck. } @Override - public Set getApplicationLoadBalancers(String application) { - return null; //TODO - Implement this. This is used to show load balancers and reveals other buttons - } - - private String getEcsAccountName(String awsAccountName) { - for (ECSCredentialsConfig.Account ecsAccount : ecsCredentialsConfig.getAccounts()) { - if (ecsAccount.getAwsAccount().equals(awsAccountName)) { - return ecsAccount.getName(); + public Set getApplicationLoadBalancers(String application) { + // Find the load balancers currently in use by ECS services in this application + String glob = + application != null + ? Keys.getServiceKey("*", "*", application + "*") + : Keys.getServiceKey("*", "*", "*"); + Collection ecsServices = ecsServiceCacheClient.filterIdentifiers(glob); + Set services = + ecsServiceCacheClient.getAll(ecsServices).stream() + .filter(service -> service.getApplicationName().equals(application)) + .collect(Collectors.toSet()); + log.debug("Retrieved {} services for application '{}'", services.size(), application); + + Collection allTargetGroupKeys = ecsTargetGroupCacheClient.getAllKeys(); + log.debug( + "Retrieved {} target group keys for application '{}'", + allTargetGroupKeys.size(), + application); + + Map> targetGroupToServicesMap = new HashMap<>(); + Set targetGroupKeys = new HashSet<>(); + + // find all the target group cache keys + for (Service service : services) { + String awsAccountName = + ecsAccountMapper.fromEcsAccountNameToAwsAccountName(service.getAccount()); + for (LoadBalancer loadBalancer : service.getLoadBalancers()) { + if (loadBalancer.getTargetGroupArn() != null) { + String tgArn = loadBalancer.getTargetGroupArn(); + String keyPrefix = + String.format( + "%s:%s:%s:%s:%s:", + AmazonCloudProvider.ID, + TARGET_GROUPS.getNs(), + awsAccountName, + service.getRegion(), + ArnUtils.extractTargetGroupName(tgArn).get()); + Set matchingKeys = + allTargetGroupKeys.stream() + .filter(key -> key.startsWith(keyPrefix)) + .collect(Collectors.toSet()); + targetGroupKeys.addAll(matchingKeys); + // associate target group with services it contains targets for + if (targetGroupToServicesMap.containsKey(tgArn)) { + log.debug("Mapping additional service '{}' to '{}'", service.getServiceName(), tgArn); + Set serviceList = targetGroupToServicesMap.get(tgArn); + serviceList.add(service.getServiceName()); + targetGroupToServicesMap.put(tgArn, serviceList); + } else { + log.debug("Mapping service '{}' to '{}'", service.getServiceName(), tgArn); + Set srcServices = Sets.newHashSet(service.getServiceName()); + targetGroupToServicesMap.put(tgArn, srcServices); + } + } } } - return null; + + // retrieve matching target groups + List tgs = ecsTargetGroupCacheClient.find(targetGroupKeys); + + // find the load balancers for those target groups + List tgLBs = + ecsLoadbalancerCacheClient.findWithTargetGroups(targetGroupKeys); + log.debug( + "Retrieved {} load balancers for {} target group keys.", + tgLBs.size(), + targetGroupKeys.size()); + + Set ecsLoadBalancers = new HashSet<>(); + for (EcsLoadBalancerCache loadBalancerCache : tgLBs) { + List matchingTGs = + tgs.stream() + .filter(tg -> loadBalancerCache.getTargetGroups().contains(tg.getTargetGroupName())) + .collect(Collectors.toList()); + EcsLoadBalancer ecsLB = + makeEcsLoadBalancer(loadBalancerCache, matchingTGs, targetGroupToServicesMap); + ecsLoadBalancers.add(ecsLB); + } + + return ecsLoadBalancers; + } + + private EcsLoadBalancer makeEcsLoadBalancer( + EcsLoadBalancerCache elbCacheData, + List tgCacheData, + Map> tgToServiceMap) { + EcsLoadBalancer ecsLoadBalancer = new EcsLoadBalancer(); + ecsLoadBalancer.setAccount(elbCacheData.getAccount()); + ecsLoadBalancer.setRegion(elbCacheData.getRegion()); + ecsLoadBalancer.setLoadBalancerArn(elbCacheData.getLoadBalancerArn()); + ecsLoadBalancer.setLoadBalancerName(elbCacheData.getLoadBalancerName()); + ecsLoadBalancer.setLoadBalancerType(elbCacheData.getLoadBalancerType()); + ecsLoadBalancer.setCloudProvider(elbCacheData.getCloudProvider()); + ecsLoadBalancer.setListeners(elbCacheData.getListeners()); + ecsLoadBalancer.setAvailabilityZones(elbCacheData.getAvailabilityZones()); + ecsLoadBalancer.setIpAddressType(elbCacheData.getIpAddressType()); + ecsLoadBalancer.setDnsname(elbCacheData.getDnsname()); + ecsLoadBalancer.setVpcId(elbCacheData.getVpcId()); + ecsLoadBalancer.setCreatedTime(elbCacheData.getCreatedTime()); + ecsLoadBalancer.setSecurityGroups(elbCacheData.getSecurityGroups()); + ecsLoadBalancer.setSubnets(elbCacheData.getSubnets()); + ecsLoadBalancer.setTargetGroups(tgCacheData); + ecsLoadBalancer.setTargetGroupServices(tgToServiceMap); + // TODO: get, add target healths per service/tg + + return ecsLoadBalancer; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsRoleProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsRoleProvider.java index 2b16f048d46..09f9d91ba6a 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsRoleProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsRoleProvider.java @@ -20,11 +20,10 @@ import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.ecs.cache.client.IamRoleCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.IamRole; +import java.util.Collection; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Collection; - @Component public class EcsRoleProvider implements RoleProvider { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecretProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecretProvider.java new file mode 100644 index 00000000000..76def2ff660 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecretProvider.java @@ -0,0 +1,38 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.view; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.ecs.cache.client.SecretCacheClient; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Secret; +import java.util.Collection; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class EcsSecretProvider { + + private SecretCacheClient secretCacheClient; + + @Autowired + public EcsSecretProvider(Cache cacheView) { + this.secretCacheClient = new SecretCacheClient(cacheView); + } + + public Collection getAllSecrets() { + return secretCacheClient.getAll(); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecurityGroupProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecurityGroupProvider.java index 7805d12caaa..fb7ce526d0d 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecurityGroupProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecurityGroupProvider.java @@ -20,11 +20,10 @@ import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.ecs.model.EcsSecurityGroup; import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider; +import java.util.Collection; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Collection; - @Component class EcsSecurityGroupProvider implements SecurityGroupProvider { @@ -34,46 +33,69 @@ class EcsSecurityGroupProvider implements SecurityGroupProvider getAll(boolean includeRules) { - return amazonPrimitiveConverter.convertToEcsSecurityGroup(amazonSecurityGroupProvider.getAll(includeRules)); + return amazonPrimitiveConverter.convertToEcsSecurityGroup( + amazonSecurityGroupProvider.getAll(includeRules)); } @Override public Collection getAllByRegion(boolean includeRules, String region) { - return amazonPrimitiveConverter.convertToEcsSecurityGroup(amazonSecurityGroupProvider.getAllByRegion(includeRules, region)); + return amazonPrimitiveConverter.convertToEcsSecurityGroup( + amazonSecurityGroupProvider.getAllByRegion(includeRules, region)); } @Override public Collection getAllByAccount(boolean includeRules, String account) { - return amazonPrimitiveConverter.convertToEcsSecurityGroup(amazonSecurityGroupProvider.getAllByAccount(includeRules, account)); + String awsAccount = ecsAccountMapper.fromEcsAccountNameToAwsAccountName(account); + return amazonPrimitiveConverter.convertToEcsSecurityGroup( + amazonSecurityGroupProvider.getAllByAccount(includeRules, awsAccount)); } @Override - public Collection getAllByAccountAndName(boolean includeRules, String account, String name) { - return amazonPrimitiveConverter.convertToEcsSecurityGroup(amazonSecurityGroupProvider.getAllByAccountAndName(includeRules, account, name)); + public Collection getAllByAccountAndName( + boolean includeRules, String account, String name) { + String awsAccount = ecsAccountMapper.fromEcsAccountNameToAwsAccountName(account); + return amazonPrimitiveConverter.convertToEcsSecurityGroup( + amazonSecurityGroupProvider.getAllByAccountAndName(includeRules, awsAccount, name)); } @Override - public Collection getAllByAccountAndRegion(boolean includeRules, String account, String region) { - return amazonPrimitiveConverter.convertToEcsSecurityGroup(amazonSecurityGroupProvider.getAllByAccountAndRegion(includeRules, account, region)); + public Collection getAllByAccountAndRegion( + boolean includeRules, String account, String region) { + String awsAccount = ecsAccountMapper.fromEcsAccountNameToAwsAccountName(account); + return amazonPrimitiveConverter.convertToEcsSecurityGroup( + amazonSecurityGroupProvider.getAllByAccountAndRegion(includeRules, awsAccount, region)); } @Override public EcsSecurityGroup get(String account, String region, String name, String vpcId) { - return amazonPrimitiveConverter.convertToEcsSecurityGroup(amazonSecurityGroupProvider.get(account, region, name, vpcId)); + String awsAccount = ecsAccountMapper.fromEcsAccountNameToAwsAccountName(account); + return amazonPrimitiveConverter.convertToEcsSecurityGroup( + amazonSecurityGroupProvider.get(awsAccount, region, name, vpcId)); + } + + @Override + public EcsSecurityGroup getById(String account, String region, String id, String vpcId) { + String awsAccount = ecsAccountMapper.fromEcsAccountNameToAwsAccountName(account); + return amazonPrimitiveConverter.convertToEcsSecurityGroup( + amazonSecurityGroupProvider.getById(awsAccount, region, id, vpcId)); } @Override public String getCloudProvider() { return cloudProvider; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServerClusterProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServerClusterProvider.java index 4aff77ae8c5..0867087d1c1 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServerClusterProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServerClusterProvider.java @@ -37,20 +37,16 @@ import com.netflix.spinnaker.clouddriver.ecs.model.EcsServerGroup; import com.netflix.spinnaker.clouddriver.ecs.model.EcsTask; import com.netflix.spinnaker.clouddriver.ecs.model.TaskDefinition; +import com.netflix.spinnaker.clouddriver.ecs.names.MonikerHelper; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; import com.netflix.spinnaker.clouddriver.ecs.services.ContainerInformationService; +import com.netflix.spinnaker.clouddriver.ecs.services.SubnetSelector; import com.netflix.spinnaker.clouddriver.model.ClusterProvider; import com.netflix.spinnaker.clouddriver.model.Instance; import com.netflix.spinnaker.clouddriver.model.LoadBalancer; import com.netflix.spinnaker.clouddriver.model.ServerGroup; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.moniker.Moniker; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; @@ -59,6 +55,11 @@ import java.util.NoSuchElementException; import java.util.Set; import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class EcsServerClusterProvider implements ClusterProvider { @@ -69,22 +70,26 @@ public class EcsServerClusterProvider implements ClusterProvider credentialsRepository; private final ContainerInformationService containerInformationService; + private final SubnetSelector subnetSelector; private final Logger log = LoggerFactory.getLogger(getClass()); @Autowired - public EcsServerClusterProvider(AccountCredentialsProvider accountCredentialsProvider, - ContainerInformationService containerInformationService, - TaskCacheClient taskCacheClient, - ServiceCacheClient serviceCacheClient, - ScalableTargetCacheClient scalableTargetCacheClient, - EcsLoadbalancerCacheClient ecsLoadbalancerCacheClient, - TaskDefinitionCacheClient taskDefinitionCacheClient, - EcsCloudWatchAlarmCacheClient ecsCloudWatchAlarmCacheClient) { - this.accountCredentialsProvider = accountCredentialsProvider; + public EcsServerClusterProvider( + CredentialsRepository credentialsRepository, + ContainerInformationService containerInformationService, + SubnetSelector subnetSelector, + TaskCacheClient taskCacheClient, + ServiceCacheClient serviceCacheClient, + ScalableTargetCacheClient scalableTargetCacheClient, + EcsLoadbalancerCacheClient ecsLoadbalancerCacheClient, + TaskDefinitionCacheClient taskDefinitionCacheClient, + EcsCloudWatchAlarmCacheClient ecsCloudWatchAlarmCacheClient) { + this.credentialsRepository = credentialsRepository; this.containerInformationService = containerInformationService; + this.subnetSelector = subnetSelector; this.taskCacheClient = taskCacheClient; this.serviceCacheClient = serviceCacheClient; this.scalableTargetCacheClient = scalableTargetCacheClient; @@ -93,64 +98,97 @@ public EcsServerClusterProvider(AccountCredentialsProvider accountCredentialsPro this.ecsCloudWatchAlarmCacheClient = ecsCloudWatchAlarmCacheClient; } - private Map> findClusters(Map> clusterMap, - AmazonCredentials credentials) { - return findClusters(clusterMap, credentials, null); + private Map> findClusters( + Map> clusterMap, AmazonCredentials credentials) { + return findClusters(clusterMap, credentials, null, true); } - private Map> findClusters(Map> clusterMap, - AmazonCredentials credentials, - String application) { + private Map> findClusters( + Map> clusterMap, + AmazonCredentials credentials, + String application, + boolean inludeDetails) { for (AmazonCredentials.AWSRegion awsRegion : credentials.getRegions()) { - clusterMap = findClustersForRegion(clusterMap, credentials, awsRegion, application); + clusterMap = + findClustersForRegion(clusterMap, credentials, awsRegion, application, inludeDetails); } return clusterMap; } - private Map> findClustersForRegion(Map> clusterMap, - AmazonCredentials credentials, - AmazonCredentials.AWSRegion awsRegion, - String application) { + private Map> findClustersForRegion( + Map> clusterMap, + AmazonCredentials credentials, + AmazonCredentials.AWSRegion awsRegion, + String application, + boolean includeDetails) { - Collection services = serviceCacheClient.getAll(credentials.getName(), awsRegion.getName()); + String glob = + application != null + ? Keys.getServiceKey(credentials.getName(), awsRegion.getName(), application + "*") + : Keys.getServiceKey(credentials.getName(), awsRegion.getName(), "*"); + + Collection ecsServices = serviceCacheClient.filterIdentifiers(glob); + Collection services = serviceCacheClient.getAll(ecsServices); Collection allTasks = taskCacheClient.getAll(credentials.getName(), awsRegion.getName()); for (Service service : services) { - String applicationName = service.getApplicationName(); + final Moniker moniker = service.getMoniker(); + String applicationName = moniker.getApp(); String serviceName = service.getServiceName(); if (application != null && !applicationName.equals(application)) { continue; } - Set loadBalancers = new HashSet<>(ecsLoadbalancerCacheClient.find(credentials.getName(), awsRegion.getName())); - - Set instances = allTasks.stream() - .filter(task -> task.getGroup().equals("service:" + serviceName)) - .map(task -> convertToEcsTask(credentials.getName(), awsRegion.getName(), serviceName, task)) - .collect(Collectors.toSet()); - - String taskDefinitionKey = Keys.getTaskDefinitionKey(credentials.getName(), awsRegion.getName(), service.getTaskDefinition()); - com.amazonaws.services.ecs.model.TaskDefinition taskDefinition = taskDefinitionCacheClient.get(taskDefinitionKey); + Set loadBalancers = + new HashSet<>( + ecsLoadbalancerCacheClient.find(credentials.getName(), awsRegion.getName())); + + Set instances = + allTasks.stream() + .filter(task -> task.getGroup().equals("service:" + serviceName)) + .map( + task -> + convertToEcsTask( + credentials.getName(), awsRegion.getName(), serviceName, task)) + .collect(Collectors.toSet()); + + String taskDefinitionKey = + Keys.getTaskDefinitionKey( + credentials.getName(), awsRegion.getName(), service.getTaskDefinition()); + com.amazonaws.services.ecs.model.TaskDefinition taskDefinition = + taskDefinitionCacheClient.get(taskDefinitionKey); if (taskDefinition == null) { continue; } - EcsServerGroup ecsServerGroup = buildEcsServerGroup(credentials.getName(), awsRegion.getName(), - serviceName, service.getDesiredCount(), instances, service.getCreatedAt(), - service.getClusterName(), taskDefinition); + EcsServerGroup ecsServerGroup = + buildEcsServerGroup( + credentials.getName(), + awsRegion.getName(), + serviceName, + moniker, + service.getDesiredCount(), + instances, + service.getCreatedAt(), + service.getClusterName(), + taskDefinition, + service.getSubnets(), + service.getSecurityGroups(), + includeDetails); if (ecsServerGroup == null) { continue; } if (clusterMap.containsKey(applicationName)) { - String escClusterName = StringUtils.substringBeforeLast(ecsServerGroup.getName(), "-"); + String ecsServerClusterName = moniker.getCluster(); boolean found = false; for (EcsServerCluster cluster : clusterMap.get(applicationName)) { - if (cluster.getName().equals(escClusterName)) { + if (StringUtils.equals(cluster.getName(), ecsServerClusterName) + && StringUtils.equals(cluster.getAccountName(), credentials.getName())) { cluster.getServerGroups().add(ecsServerGroup); found = true; break; @@ -158,11 +196,13 @@ private Map> findClustersForRegion(Map> healthStatus = containerInformationService.getHealthStatus(taskId, serviceName, account, region); + List> healthStatus = + containerInformationService.getHealthStatus(taskId, serviceName, account, region); String availabilityZone = containerInformationService.getTaskZone(account, region, task); - NetworkInterface networkInterface = - !task.getContainers().isEmpty() - && !task.getContainers().get(0).getNetworkInterfaces().isEmpty() - ? task.getContainers().get(0).getNetworkInterfaces().get(0) : null; + Service service = containerInformationService.getService(serviceName, account, region); + boolean hasHealthCheck = + containerInformationService.taskHasHealthCheck(service, account, region); - return new EcsTask(taskId, launchTime, task.getLastStatus(), task.getDesiredStatus(), availabilityZone, healthStatus, address, networkInterface); + NetworkInterface networkInterface = + !task.getContainers().isEmpty() + && !task.getContainers().get(0).getNetworkInterfaces().isEmpty() + ? task.getContainers().get(0).getNetworkInterfaces().get(0) + : null; + + return new EcsTask( + taskId, + launchTime, + task.getLastStatus(), + task.getDesiredStatus(), + task.getHealthStatus(), + availabilityZone, + healthStatus, + address, + networkInterface, + hasHealthCheck); } - private TaskDefinition buildTaskDefinition(com.amazonaws.services.ecs.model.TaskDefinition taskDefinition) { + private TaskDefinition buildTaskDefinition( + com.amazonaws.services.ecs.model.TaskDefinition taskDefinition) { String roleArn = taskDefinition.getTaskRoleArn(); String iamRole = roleArn != null ? StringUtils.substringAfterLast(roleArn, "/") : "None"; ContainerDefinition containerDefinition = taskDefinition.getContainerDefinitions().get(0); + int cpu = 0; + if (containerDefinition.getCpu() != null) { + cpu = containerDefinition.getCpu(); + } else if (taskDefinition.getCpu() != null) { + cpu = Integer.parseInt(taskDefinition.getCpu()); + } + + int memoryReservation = 0; + if (containerDefinition.getMemoryReservation() != null) { + memoryReservation = containerDefinition.getMemoryReservation(); + } + + int memoryLimit = 0; + if (containerDefinition.getMemory() != null) { + memoryLimit = containerDefinition.getMemory(); + } else if (taskDefinition.getMemory() != null) { + memoryLimit = Integer.parseInt(taskDefinition.getMemory()); + } + return new TaskDefinition() - .setContainerImage(containerDefinition.getImage()) - .setContainerPort(containerDefinition.getPortMappings().get(0).getContainerPort()) - .setCpuUnits(containerDefinition.getCpu()) - .setMemoryReservation(containerDefinition.getMemoryReservation()) - .setIamRole(iamRole) - .setTaskName(StringUtils.substringAfterLast(taskDefinition.getTaskDefinitionArn(), "/")) - .setEnvironmentVariables(containerDefinition.getEnvironment()); + .setContainerImage(containerDefinition.getImage()) + .setContainerPort( + containerDefinition.getPortMappings().isEmpty() + ? 0 + : containerDefinition.getPortMappings().get(0).getContainerPort()) + .setCpuUnits(cpu) + .setMemoryReservation(memoryReservation) + .setMemoryLimit(memoryLimit) + .setIamRole(iamRole) + .setTaskName(StringUtils.substringAfterLast(taskDefinition.getTaskDefinitionArn(), "/")) + .setEnvironmentVariables(containerDefinition.getEnvironment()); } private ServerGroup.Capacity buildServerGroupCapacity(int desiredCount, ScalableTarget target) { @@ -208,35 +288,39 @@ private ServerGroup.Capacity buildServerGroupCapacity(int desiredCount, Scalable capacity.setMin(target.getMinCapacity()); capacity.setMax(target.getMaxCapacity()); } else { - //TODO: Min/Max should be based on (desired count * min/max precent). + // TODO: Min/Max should be based on (desired count * min/max precent). capacity.setMin(desiredCount); capacity.setMax(desiredCount); } return capacity; } - private EcsServerCluster buildSpinnakerServerCluster(AmazonCredentials credentials, - Set loadBalancers, - EcsServerGroup ecsServerGroup) { + private EcsServerCluster buildSpinnakerServerCluster( + AmazonCredentials credentials, + Set loadBalancers, + EcsServerGroup ecsServerGroup) { return new EcsServerCluster() - .setAccountName(credentials.getName()) - .setName(StringUtils.substringBeforeLast(ecsServerGroup.getName(), "-")) - .setLoadBalancers(loadBalancers) - .setServerGroups(Sets.newHashSet(ecsServerGroup)); + .setAccountName(credentials.getName()) + .setName(ecsServerGroup.getMoniker().getCluster()) + .setLoadBalancers(loadBalancers) + .setServerGroups(Sets.newHashSet(ecsServerGroup)); } - private EcsServerGroup buildEcsServerGroup(String account, - String region, - String serviceName, - int desiredCount, - Set instances, - long creationTime, - String ecsCluster, - com.amazonaws.services.ecs.model.TaskDefinition taskDefinition) { + private EcsServerGroup buildEcsServerGroup( + String account, + String region, + String serviceName, + Moniker moniker, + int desiredCount, + Set instances, + long creationTime, + String ecsClusterName, + com.amazonaws.services.ecs.model.TaskDefinition taskDefinition, + List eniSubnets, + List eniSecurityGroups, + boolean includeDetails) { ServerGroup.InstanceCounts instanceCounts = buildInstanceCount(instances); - TaskDefinition ecsTaskDefinition = buildTaskDefinition(taskDefinition); - - String scalableTargetId = "service/" + ecsCluster + "/" + serviceName; + String scalableTargetId = "service/" + ecsClusterName + "/" + serviceName; String scalableTargetKey = Keys.getScalableTargetKey(account, region, scalableTargetId); ScalableTarget scalableTarget = scalableTargetCacheClient.get(scalableTargetKey); if (scalableTarget == null) { @@ -245,49 +329,104 @@ private EcsServerGroup buildEcsServerGroup(String account, ServerGroup.Capacity capacity = buildServerGroupCapacity(desiredCount, scalableTarget); - String vpcId = "None"; //ENI will change the way VPCs are handled. + String vpcId = "None"; Set securityGroups = new HashSet<>(); if (!instances.isEmpty()) { - String taskId = instances.iterator().next().getName(); - String taskKey = Keys.getTaskKey(account, region, taskId); - Task task = taskCacheClient.get(taskKey); - com.amazonaws.services.ec2.model.Instance ec2Instance = containerInformationService.getEc2Instance(account, region, task); - - vpcId = ec2Instance.getVpcId(); - securityGroups = ec2Instance.getSecurityGroups().stream() - .map(GroupIdentifier::getGroupId) - .collect(Collectors.toSet()); - } + if (eniSubnets != null + && !eniSubnets.isEmpty() + && eniSecurityGroups != null + && !eniSecurityGroups.isEmpty()) { + securityGroups = eniSecurityGroups.stream().collect(Collectors.toSet()); + Collection vpcIds = subnetSelector.getSubnetVpcIds(account, region, eniSubnets); - Set metricAlarmNames = ecsCloudWatchAlarmCacheClient.getMetricAlarms(serviceName, account, region).stream() - .map(EcsMetricAlarm::getAlarmName) - .collect(Collectors.toSet()); - - EcsServerGroup serverGroup = new EcsServerGroup() - .setDisabled(capacity.getDesired() == 0) - .setName(serviceName) - .setCloudProvider(EcsCloudProvider.ID) - .setType(EcsCloudProvider.ID) - .setRegion(region) - .setInstances(instances) - .setCapacity(capacity) - .setInstanceCounts(instanceCounts) - .setCreatedTime(creationTime) - .setEcsCluster(ecsCluster) - .setTaskDefinition(ecsTaskDefinition) - .setVpcId(vpcId) - .setSecurityGroups(securityGroups) - .setMetricAlarms(metricAlarmNames); - - EcsServerGroup.AutoScalingGroup asg = new EcsServerGroup.AutoScalingGroup() - .setDesiredCapacity(scalableTarget.getMaxCapacity()) - .setMaxSize(scalableTarget.getMaxCapacity()) - .setMinSize(scalableTarget.getMinCapacity()); + if (!vpcIds.isEmpty()) { + if (vpcIds.size() > 1) { + throw new IllegalArgumentException("Services with multiple VPCs are not supported"); + } + + vpcId = vpcIds.iterator().next(); + } + } else { + for (Instance instance : instances) { + String taskId = instance.getName(); + String taskKey = Keys.getTaskKey(account, region, taskId); + Task task = taskCacheClient.get(taskKey); + + if (task != null) { + com.amazonaws.services.ec2.model.Instance ec2Instance = + containerInformationService.getEc2Instance(account, region, task); + if (ec2Instance != null) { + if (ec2Instance.getVpcId() != null && !ec2Instance.getVpcId().isEmpty()) { + vpcId = ec2Instance.getVpcId(); + } + if (ec2Instance.getSecurityGroups() != null) { + securityGroups = + ec2Instance.getSecurityGroups().stream() + .map(GroupIdentifier::getGroupId) + .collect(Collectors.toSet()); + } + break; + } + } + } + } + } + Set metricAlarmNames = + ecsCloudWatchAlarmCacheClient + .getMetricAlarms(serviceName, account, region, ecsClusterName) + .stream() + .map(EcsMetricAlarm::getAlarmName) + .collect(Collectors.toSet()); + EcsServerGroup serverGroup = new EcsServerGroup(); + if (includeDetails) { + TaskDefinition ecsTaskDefinition = buildTaskDefinition(taskDefinition); + EcsServerGroup.Image image = new EcsServerGroup.Image(); + image.setImageId(ecsTaskDefinition.getContainerImage()); + image.setName(ecsTaskDefinition.getContainerImage()); + serverGroup + .setDisabled(capacity.getDesired() == 0) + .setName(serviceName) + .setCloudProvider(EcsCloudProvider.ID) + .setType(EcsCloudProvider.ID) + .setRegion(region) + .setInstances(instances) + .setCapacity(capacity) + .setImage(image) + .setInstanceCounts(instanceCounts) + .setCreatedTime(creationTime) + .setEcsCluster(ecsClusterName) + .setTaskDefinition(ecsTaskDefinition) + .setVpcId(vpcId) + .setSecurityGroups(securityGroups) + .setMetricAlarms(metricAlarmNames) + .setMoniker(moniker); + } else { + serverGroup + .setDisabled(capacity.getDesired() == 0) + .setName(serviceName) + .setCloudProvider(EcsCloudProvider.ID) + .setType(EcsCloudProvider.ID) + .setRegion(region) + .setInstances(instances) + .setCapacity(capacity) + .setInstanceCounts(instanceCounts) + .setCreatedTime(creationTime) + .setEcsCluster(ecsClusterName) + .setVpcId(vpcId) + .setSecurityGroups(securityGroups) + .setMetricAlarms(metricAlarmNames) + .setMoniker(moniker); + } + EcsServerGroup.AutoScalingGroup asg = + new EcsServerGroup.AutoScalingGroup() + .setDesiredCapacity(scalableTarget.getMaxCapacity()) + .setMaxSize(scalableTarget.getMaxCapacity()) + .setMinSize(scalableTarget.getMinCapacity()); // TODO: Update Deck to handle an asg. Current Deck implementation uses a EC2 AutoScaling Group - //serverGroup.setAsg(asg); + // serverGroup.setAsg(asg); return serverGroup; } @@ -318,82 +457,74 @@ private ServerGroup.InstanceCounts buildInstanceCount(Set instances) { instanceCounts.setUp(instanceCounts.getUp()); break; default: - throw new Error(String.format( - "Unexpected health state: %s. Don't know how to proceed - update %s", - instance.getHealthState(), - this.getClass().getSimpleName())); + throw new Error( + String.format( + "Unexpected health state: %s. Don't know how to proceed - update %s", + instance.getHealthState(), this.getClass().getSimpleName())); } instanceCounts.setTotal(instanceCounts.getTotal() + 1); } return instanceCounts; } - private List getEcsCredentials() { - List ecsCredentialsList = new ArrayList<>(); - for (AccountCredentials credentials : accountCredentialsProvider.getAll()) { - if (credentials instanceof AmazonCredentials && credentials.getCloudProvider().equals(EcsCloudProvider.ID)) { - ecsCredentialsList.add((AmazonCredentials) credentials); - } - } - return ecsCredentialsList; + private Set getEcsCredentials() { + return credentialsRepository.getAll(); } private AmazonCredentials getEcsCredentials(String account) { - try { - return getEcsCredentials().stream() - .filter(credentials -> credentials.getName().equals(account)) - .findFirst().get(); - } catch (NoSuchElementException exception) { - throw new NoSuchElementException(String.format("There is no ECS account by the name of '%s'", account)); + NetflixECSCredentials creds = credentialsRepository.getOne(account); + if (creds == null) { + throw new NoSuchElementException( + String.format("There is no ECS account by the name of '%s'", account)); } + return creds; } @Override public Map> getClusterSummaries(String application) { - return getClusters(); + return getClusters0(application, false); } @Override public Map> getClusterDetails(String application) { + return getClusters0(application, true); + } + + @Override + public Map> getClusters() { Map> clusterMap = new HashMap<>(); for (AmazonCredentials credentials : getEcsCredentials()) { - clusterMap = findClusters(clusterMap, credentials, application); + clusterMap = findClusters(clusterMap, credentials); } - return clusterMap; } - - @Override - public Map> getClusters() { + private Map> getClusters0( + String application, boolean includeDetails) { Map> clusterMap = new HashMap<>(); for (AmazonCredentials credentials : getEcsCredentials()) { - clusterMap = findClusters(clusterMap, credentials); + clusterMap = findClusters(clusterMap, credentials, application, includeDetails); } return clusterMap; } - /** - * Gets Spinnaker clusters for a given Spinnaker application and ECS account. - */ + /** Gets Spinnaker clusters for a given Spinnaker application and ECS account. */ @Override public Set getClusters(String application, String account) { try { AmazonCredentials credentials = getEcsCredentials(account); - return findClusters(new HashMap<>(), credentials, application) - .get(application); + return findClusters(new HashMap<>(), credentials, application, true).get(application); } catch (NoSuchElementException exception) { log.info("No ECS Credentials were found for account " + account); return null; } - } - /** - * Gets a Spinnaker clusters for a given Spinnaker application, ECS account, and the Spinnaker cluster name. + * Gets a Spinnaker clusters for a given Spinnaker application, ECS account, and the Spinnaker + * cluster name. */ @Override public EcsServerCluster getCluster(String application, String account, String name) { @@ -409,34 +540,38 @@ public EcsServerCluster getCluster(String application, String account, String na } /** - * Gets a Spinnaker clusters for a given Spinnaker application, ECS account, and the Spinnaker cluster name. - * TODO: Make includeDetails actually function. + * Gets a Spinnaker clusters for a given Spinnaker application, ECS account, and the Spinnaker + * cluster name. TODO: Make includeDetails actually function. */ @Override - public EcsServerCluster getCluster(String application, String account, String name, boolean includeDetails) { + public EcsServerCluster getCluster( + String application, String account, String name, boolean includeDetails) { return getCluster(application, account, name); } /** - * Gets a Spinnaker server group for a given Spinnaker application, ECS account, and the Spinnaker server group name. + * Gets a Spinnaker server group for a given Spinnaker application, ECS account, and the Spinnaker + * server group name. */ @Override - public ServerGroup getServerGroup(String account, String region, String serverGroupName, boolean includeDetails) { + public ServerGroup getServerGroup( + String account, String region, String serverGroupName, boolean includeDetails) { if (serverGroupName == null) { throw new Error("Invalid Server Group"); } - // TODO - remove the application filter. - String application = StringUtils.substringBefore(serverGroupName, "-"); + Map> clusterMap = new HashMap<>(); try { AmazonCredentials credentials = getEcsCredentials(account); - clusterMap = findClusters(clusterMap, credentials, application); + Moniker moniker = MonikerHelper.applicationNameToMoniker(serverGroupName); + log.debug("App Name is: " + moniker.getApp()); + clusterMap = findClusters(clusterMap, credentials, moniker.getApp(), true); } catch (NoSuchElementException exception) { /* This is ugly, but not sure how else to do it. If we don't have creds due - * to not being an ECS account, there's nothing to do here, and we should - * just continue on. - */ + * to not being an ECS account, there's nothing to do here, and we should + * just continue on. + */ log.info("No ECS credentials were found for the account " + account); } @@ -444,14 +579,15 @@ public ServerGroup getServerGroup(String account, String region, String serverGr for (EcsServerCluster ecsServerCluster : entry.getValue()) { for (ServerGroup serverGroup : ecsServerCluster.getServerGroups()) { if (region.equals(serverGroup.getRegion()) - && serverGroupName.equals(serverGroup.getName())) { + && serverGroupName.equals(serverGroup.getName())) { return serverGroup; } } } } - // I don't think this should throw an error.. other classes (such as the AmazonClusterProvider return null + // I don't think this should throw an error.. other classes (such as the AmazonClusterProvider + // return null // if it isn't found..) log.info("No ECS Server Groups were found with the name " + serverGroupName); return null; @@ -468,7 +604,7 @@ public String getCloudProviderId() { @Override public boolean supportsMinimalClusters() { - //TODO: Implement if needed. + // TODO: Implement if needed. return false; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServiceDiscoveryProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServiceDiscoveryProvider.java new file mode 100644 index 00000000000..fb36a7c677f --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServiceDiscoveryProvider.java @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.view; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceDiscoveryCacheClient; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.ServiceDiscoveryRegistry; +import java.util.Collection; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class EcsServiceDiscoveryProvider { + + private ServiceDiscoveryCacheClient serviceDiscoveryCacheClient; + + @Autowired + public EcsServiceDiscoveryProvider(Cache cacheView) { + this.serviceDiscoveryCacheClient = new ServiceDiscoveryCacheClient(cacheView); + } + + public Collection getAllServiceDiscoveryRegistries() { + return serviceDiscoveryCacheClient.getAll(); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSubnetProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSubnetProvider.java index 37a9921d646..d0ccedad556 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSubnetProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSubnetProvider.java @@ -20,11 +20,10 @@ import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; import com.netflix.spinnaker.clouddriver.ecs.model.EcsSubnet; import com.netflix.spinnaker.clouddriver.model.SubnetProvider; +import java.util.Set; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Set; - @Component public class EcsSubnetProvider implements SubnetProvider { @@ -32,7 +31,9 @@ public class EcsSubnetProvider implements SubnetProvider { final AmazonPrimitiveConverter amazonPrimitiveConverter; @Autowired - public EcsSubnetProvider(AmazonSubnetProvider amazonSubnetProvider, AmazonPrimitiveConverter amazonPrimitiveConverter) { + public EcsSubnetProvider( + AmazonSubnetProvider amazonSubnetProvider, + AmazonPrimitiveConverter amazonPrimitiveConverter) { this.amazonSubnetProvider = amazonSubnetProvider; this.amazonPrimitiveConverter = amazonPrimitiveConverter; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/ImageRepositoryProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/ImageRepositoryProvider.java index 15093c358aa..494934eb115 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/ImageRepositoryProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/ImageRepositoryProvider.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.view; import com.netflix.spinnaker.clouddriver.ecs.model.EcsDockerImage; - import java.util.List; public interface ImageRepositoryProvider { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/UnvalidatedDockerImageProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/UnvalidatedDockerImageProvider.java index ac2dac5f19c..dfecac8f8c9 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/UnvalidatedDockerImageProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/provider/view/UnvalidatedDockerImageProvider.java @@ -1,18 +1,34 @@ +/* + * Copyright 2017 Lookout, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package com.netflix.spinnaker.clouddriver.ecs.provider.view; -import com.netflix.spinnaker.clouddriver.ecs.model.EcsDockerImage; -import org.springframework.stereotype.Component; +import static com.netflix.spinnaker.clouddriver.ecs.provider.view.EcrImageProvider.ECR_REPOSITORY_URI_PATTERN; +import com.netflix.spinnaker.clouddriver.ecs.model.EcsDockerImage; import java.util.Collections; import java.util.List; import java.util.regex.Matcher; - -import static com.netflix.spinnaker.clouddriver.ecs.provider.view.EcrImageProvider.ECR_REPOSITORY_URI_PATTERN; +import org.springframework.stereotype.Component; /** - * This ImageRepositoryProvider does not validate that the image does indeed exist. An invalid image URL will lead to - * the ECS Agent to fail at starting ECS tasks for the deployed server group, and is likely to be painful - * to track and fix for users. Still, this class allows to decouple the ECS cloud provider from the ECR docker registry. + * This ImageRepositoryProvider does not validate that the image does indeed exist. An invalid image + * URL will lead to the ECS Agent to fail at starting ECS tasks for the deployed server group, and + * is likely to be painful to track and fix for users. Still, this class allows to decouple the ECS + * cloud provider from the ECR docker registry. */ @Component public class UnvalidatedDockerImageProvider implements ImageRepositoryProvider { diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/ECSCredentialsConfig.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/ECSCredentialsConfig.java index 1d8b4632801..742ffa6db95 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/ECSCredentialsConfig.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/ECSCredentialsConfig.java @@ -16,19 +16,37 @@ package com.netflix.spinnaker.clouddriver.ecs.security; - -import lombok.Data; - +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; import java.util.List; - +import lombok.Data; @Data public class ECSCredentialsConfig { + String defaultNamingStrategy = "default"; List accounts; @Data - public static class Account { + public static class Account implements CredentialsDefinition { private String name; private String awsAccount; + private String namingStrategy; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Account account = (Account) o; + + if (!name.equals(account.name)) return false; + return awsAccount.equals(account.awsAccount); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + awsAccount.hashCode(); + return result; + } } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsAccountBuilder.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsAccountBuilder.java index ccc1245556c..2903f283d3a 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsAccountBuilder.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsAccountBuilder.java @@ -19,21 +19,23 @@ import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration.Account; import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig; import com.netflix.spinnaker.fiat.model.Authorization; import com.netflix.spinnaker.fiat.model.resources.Permissions; - import java.util.LinkedList; import java.util.List; public class EcsAccountBuilder { - public static CredentialsConfig.Account build(NetflixAmazonCredentials netflixAmazonCredentials, String accountName, String accountType) { - CredentialsConfig.Account account = new CredentialsConfig.Account(); + public static Account build( + NetflixAmazonCredentials netflixAmazonCredentials, String accountName, String accountType) { + Account account = new Account(); account.setName(accountName); account.setAccountType(accountType); account.setAccountId(netflixAmazonCredentials.getAccountId()); - account.setAllowPrivateThirdPartyImages(netflixAmazonCredentials.getAllowPrivateThirdPartyImages()); + account.setAllowPrivateThirdPartyImages( + netflixAmazonCredentials.getAllowPrivateThirdPartyImages()); account.setBastionEnabled(netflixAmazonCredentials.getBastionEnabled()); account.setBastionHost(netflixAmazonCredentials.getBastionHost()); account.setEdda(account.getEdda()); @@ -48,11 +50,14 @@ public static CredentialsConfig.Account build(NetflixAmazonCredentials netflixAm account.setFront50Enabled(netflixAmazonCredentials.getFront50Enabled()); account.setRequiredGroupMembership(netflixAmazonCredentials.getRequiredGroupMembership()); - //TODO - The lines below should be conditional on having an AssumeRole - if (netflixAmazonCredentials instanceof NetflixAssumeRoleAmazonCredentials && - ((NetflixAssumeRoleAmazonCredentials) netflixAmazonCredentials).getAssumeRole() != null) { - account.setSessionName(((NetflixAssumeRoleAmazonCredentials) netflixAmazonCredentials).getSessionName()); - account.setAssumeRole(((NetflixAssumeRoleAmazonCredentials) netflixAmazonCredentials).getAssumeRole()); + // TODO - The lines below should be conditional on having an AssumeRole + if (netflixAmazonCredentials instanceof NetflixAssumeRoleAmazonCredentials + && ((NetflixAssumeRoleAmazonCredentials) netflixAmazonCredentials).getAssumeRole() + != null) { + account.setSessionName( + ((NetflixAssumeRoleAmazonCredentials) netflixAmazonCredentials).getSessionName()); + account.setAssumeRole( + ((NetflixAssumeRoleAmazonCredentials) netflixAmazonCredentials).getAssumeRole()); } List regions = new LinkedList<>(); @@ -81,7 +86,8 @@ public static CredentialsConfig.Account build(NetflixAmazonCredentials netflixAm for (String group : netflixAmazonCredentials.getPermissions().allGroups()) { List roles = new LinkedList<>(); roles.add(group); - for (Authorization auth : netflixAmazonCredentials.getPermissions().getAuthorizations(roles)) { + for (Authorization auth : + netflixAmazonCredentials.getPermissions().getAuthorizations(roles)) { permBuilder.add(auth, group); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsInitializer.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsInitializer.java index 42e8c9857d5..41fea213ea4 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsInitializer.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsInitializer.java @@ -17,27 +17,30 @@ package com.netflix.spinnaker.clouddriver.ecs.security; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials; -import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig; -import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsLoader; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration; +import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.AbstractCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.BasicCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource; +import com.netflix.spinnaker.credentials.definition.CredentialsParser; +import com.netflix.spinnaker.credentials.poller.Poller; +import javax.annotation.Nullable; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.DependsOn; -import org.springframework.context.annotation.Scope; - -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; @Configuration -public class EcsCredentialsInitializer implements CredentialsInitializerSynchronizable { +public class EcsCredentialsInitializer { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean @ConfigurationProperties("ecs") public ECSCredentialsConfig ecsCredentialsConfig() { @@ -45,53 +48,56 @@ public ECSCredentialsConfig ecsCredentialsConfig() { } @Bean - @DependsOn("netflixAmazonCredentials") - public List netflixECSCredentials(CredentialsLoader credentialsLoader, - ECSCredentialsConfig credentialsConfig, - AccountCredentialsRepository accountCredentialsRepository) throws Throwable { - return synchronizeECSAccounts(credentialsLoader, credentialsConfig, accountCredentialsRepository); + @DependsOn("amazonCredentialsLoader") + @ConditionalOnMissingBean( + value = NetflixECSCredentials.class, + parameterizedContainer = CredentialsRepository.class) + CredentialsRepository ecsCredentialsRepository( + CredentialsLifecycleHandler eventHandler) { + return new MapBackedCredentialsRepository<>(EcsCloudProvider.ID, eventHandler); } - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean - @DependsOn("netflixAmazonCredentials") - public List synchronizeECSAccounts(CredentialsLoader credentialsLoader, - ECSCredentialsConfig ecsCredentialsConfig, - AccountCredentialsRepository accountCredentialsRepository) throws Throwable { - - // TODO: add support for mutable accounts. - //List deltaAccounts = ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, NetflixAmazonCredentials.class, accounts); - List credentials = new LinkedList<>(); - - for (AccountCredentials accountCredentials : accountCredentialsRepository.getAll()) { - if (accountCredentials instanceof NetflixAmazonCredentials) { - for (ECSCredentialsConfig.Account ecsAccount : ecsCredentialsConfig.getAccounts()) { - if (ecsAccount.getAwsAccount().equals(accountCredentials.getName())) { - - NetflixAmazonCredentials netflixAmazonCredentials = (NetflixAmazonCredentials) accountCredentials; - - // TODO: accountCredentials should be serializable or somehow cloneable. - CredentialsConfig.Account account = EcsAccountBuilder.build(netflixAmazonCredentials, ecsAccount.getName(), "ecs"); - - CredentialsConfig ecsCopy = new CredentialsConfig(); - ecsCopy.setAccounts(Collections.singletonList(account)); - - NetflixECSCredentials ecsCredentials = new NetflixAssumeRoleEcsCredentials((NetflixAssumeRoleAmazonCredentials)credentialsLoader.load(ecsCopy).get(0), ecsAccount.getAwsAccount()); - credentials.add(ecsCredentials); - - accountCredentialsRepository.save(ecsAccount.getName(), ecsCredentials); - break; + @DependsOn("amazonCredentialsLoader") + @ConditionalOnMissingBean(name = "ecsCredentialsParser") + CredentialsParser ecsCredentialsParser( + ECSCredentialsConfig ecsCredentialsConfig, + CompositeCredentialsRepository compositeCredentialsRepository, + CredentialsParser + amazonCredentialsParser, + NamerRegistry namerRegistry) { + return new EcsCredentialsParser<>( + ecsCredentialsConfig, + compositeCredentialsRepository, + amazonCredentialsParser, + namerRegistry); + } - } - } - } + @Bean + @DependsOn("ecsCredentialsParser") + @ConditionalOnMissingBean(name = "ecsCredentialsLoader") + AbstractCredentialsLoader ecsCredentialsLoader( + CredentialsParser + amazonCredentialsParser, + CredentialsRepository repository, + ECSCredentialsConfig ecsCredentialsConfig, + @Nullable CredentialsDefinitionSource ecsCredentialsSource) { + if (ecsCredentialsSource == null) { + ecsCredentialsSource = ecsCredentialsConfig::getAccounts; } - - return credentials; + return new BasicCredentialsLoader<>(ecsCredentialsSource, amazonCredentialsParser, repository); } - @Override - public String getCredentialsSynchronizationBeanName() { - return "synchronizeECSAccounts"; + @Bean + @ConditionalOnMissingBean(name = "ecsCredentialsInializerSynchronizable") + CredentialsInitializerSynchronizable ecsCredentialsInializerSynchronizable( + AbstractCredentialsLoader ecsCredentialsLoader) { + final Poller poller = new Poller<>(ecsCredentialsLoader); + return new CredentialsInitializerSynchronizable() { + @Override + public void synchronize() { + poller.run(); + } + }; } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsLifeCycleHandler.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsLifeCycleHandler.java new file mode 100644 index 00000000000..69fb542969f --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsLifeCycleHandler.java @@ -0,0 +1,170 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.security; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.module.CatsModule; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ApplicationCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ContainerInstanceCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.EcsCloudMetricAlarmCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.EcsClusterCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamPolicyReader; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamRoleCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ScalableTargetsCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.SecretCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ServiceCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ServiceDiscoveryCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TargetHealthCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskDefinitionCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskHealthCachingAgent; +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsAccountMapper; +import com.netflix.spinnaker.clouddriver.security.ProviderUtils; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.jetbrains.annotations.NotNull; +import org.springframework.context.annotation.Lazy; +import org.springframework.stereotype.Component; + +@Slf4j +@RequiredArgsConstructor +@Component +@Lazy +public class EcsCredentialsLifeCycleHandler + implements CredentialsLifecycleHandler { + protected final EcsProvider ecsProvider; + protected final AmazonClientProvider amazonClientProvider; + protected final AWSCredentialsProvider awsCredentialsProvider; + protected final Registry registry; + protected final IamPolicyReader iamPolicyReader; + protected final ObjectMapper objectMapper; + protected final CatsModule catsModule; + protected final EcsAccountMapper ecsAccountMapper; + + @Override + public void credentialsAdded(@NotNull NetflixECSCredentials credentials) { + log.info("ECS account, {}, was added. Scheduling caching agents", credentials.getName()); + if (credentials instanceof NetflixAssumeRoleEcsCredentials) { + ecsAccountMapper.addMapEntry(((NetflixAssumeRoleEcsCredentials) credentials)); + } + scheduleAgents(credentials); + log.debug("Caching agents scheduled for ECS account {}", credentials.getName()); + } + + @Override + public void credentialsUpdated(@NotNull NetflixECSCredentials credentials) { + log.info("ECS account, {}, was updated. Updating caching agents", credentials.getName()); + ecsProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + scheduleAgents(credentials); + log.debug("Caching agents rescheduled for ECS account {}", credentials.getName()); + } + + @Override + public void credentialsDeleted(NetflixECSCredentials credentials) { + log.info("ECS account, {}, was deleted. Removing caching agents", credentials.getName()); + ecsProvider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + ecsAccountMapper.removeMapEntry(credentials.getName()); + ecsProvider.synchronizeHealthAgents(); + log.debug("Caching agents removed for ECS account {}", credentials.getName()); + } + + private void scheduleAgents(NetflixECSCredentials credentials) { + Set scheduledAccounts = ProviderUtils.getScheduledAccounts(ecsProvider); + List newAgents = new LinkedList<>(); + newAgents.add(new IamRoleCachingAgent(credentials, amazonClientProvider, iamPolicyReader)); + newAgents.add( + new ApplicationCachingAgent( + credentials, + "us-east-1", + amazonClientProvider, + awsCredentialsProvider, + registry, + objectMapper)); + if (!scheduledAccounts.contains(credentials.getName())) { + for (AmazonCredentials.AWSRegion region : credentials.getRegions()) { + newAgents.add( + new EcsClusterCachingAgent( + credentials, region.getName(), amazonClientProvider, awsCredentialsProvider)); + newAgents.add( + new ServiceCachingAgent( + credentials, + region.getName(), + amazonClientProvider, + awsCredentialsProvider, + registry)); + newAgents.add( + new TaskCachingAgent( + credentials, + region.getName(), + amazonClientProvider, + awsCredentialsProvider, + registry)); + newAgents.add( + new ContainerInstanceCachingAgent( + credentials, + region.getName(), + amazonClientProvider, + awsCredentialsProvider, + registry)); + newAgents.add( + new TaskDefinitionCachingAgent( + credentials, + region.getName(), + amazonClientProvider, + awsCredentialsProvider, + registry, + objectMapper)); + newAgents.add( + new TaskHealthCachingAgent( + credentials, + region.getName(), + amazonClientProvider, + awsCredentialsProvider, + objectMapper)); + newAgents.add( + new EcsCloudMetricAlarmCachingAgent( + credentials, region.getName(), amazonClientProvider)); + newAgents.add( + new ScalableTargetsCachingAgent( + credentials, region.getName(), amazonClientProvider, objectMapper)); + newAgents.add(new SecretCachingAgent(credentials, region.getName(), amazonClientProvider)); + newAgents.add( + new ServiceDiscoveryCachingAgent(credentials, region.getName(), amazonClientProvider)); + newAgents.add( + new TargetHealthCachingAgent( + credentials, + region.getName(), + amazonClientProvider, + awsCredentialsProvider, + objectMapper)); + } + } + + ecsProvider.addAgents(newAgents); + ecsProvider.synchronizeHealthAgents(); + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsParser.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsParser.java new file mode 100644 index 00000000000..56acb084da0 --- /dev/null +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsParser.java @@ -0,0 +1,78 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.security; + +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration; +import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider; +import com.netflix.spinnaker.clouddriver.ecs.names.EcsResource; +import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.CredentialsParser; +import com.netflix.spinnaker.moniker.Namer; +import lombok.AllArgsConstructor; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Qualifier; + +@AllArgsConstructor +public class EcsCredentialsParser + implements CredentialsParser { + + private final ECSCredentialsConfig ecsCredentialsConfig; + + private final CompositeCredentialsRepository compositeCredentialsRepository; + + @Qualifier("amazonCredentialsParser") + private final CredentialsParser + amazonCredentialsParser; + + private final NamerRegistry namerRegistry; + + @Override + public NetflixECSCredentials parse(ECSCredentialsConfig.@NotNull Account accountDefinition) { + NetflixAmazonCredentials netflixAmazonCredentials = + (NetflixAmazonCredentials) + compositeCredentialsRepository.getCredentials( + accountDefinition.getAwsAccount(), AmazonCloudProvider.ID); + + AccountsConfiguration.Account account = + EcsAccountBuilder.build( + netflixAmazonCredentials, accountDefinition.getName(), EcsProvider.NAME); + NetflixECSCredentials netflixECSCredentials = + new NetflixAssumeRoleEcsCredentials( + (NetflixAssumeRoleAmazonCredentials) amazonCredentialsParser.parse(account), + accountDefinition.getAwsAccount()); + + // If no naming strategy is set at the account or provider + // level then the NamerRegistry will fallback to Frigga + String namingStrategy = + StringUtils.firstNonBlank( + accountDefinition.getNamingStrategy(), ecsCredentialsConfig.getDefaultNamingStrategy()); + + Namer namer = namerRegistry.getNamingStrategy(namingStrategy); + NamerRegistry.lookup() + .withProvider(EcsCloudProvider.ID) + .withAccount(account.getName()) + .setNamer(EcsResource.class, namer); + + return netflixECSCredentials; + } +} diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/NetflixAssumeRoleEcsCredentials.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/NetflixAssumeRoleEcsCredentials.java index 684d93e8f67..37f8c7b0eb0 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/NetflixAssumeRoleEcsCredentials.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/security/NetflixAssumeRoleEcsCredentials.java @@ -23,7 +23,8 @@ public class NetflixAssumeRoleEcsCredentials extends NetflixECSCredentials { private final String sessionName; private final String awsAccount; - public NetflixAssumeRoleEcsCredentials(NetflixAssumeRoleAmazonCredentials copy, String awsAccount) { + public NetflixAssumeRoleEcsCredentials( + NetflixAssumeRoleAmazonCredentials copy, String awsAccount) { super(copy); this.assumeRole = copy.getAssumeRole(); this.sessionName = copy.getSessionName(); diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/ContainerInformationService.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/ContainerInformationService.java index 785bb89ae26..e8595b55cf7 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/ContainerInformationService.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/ContainerInformationService.java @@ -17,26 +17,32 @@ package com.netflix.spinnaker.clouddriver.ecs.services; import com.amazonaws.services.ec2.model.Instance; +import com.amazonaws.services.ecs.model.ContainerDefinition; import com.amazonaws.services.ecs.model.LoadBalancer; +import com.amazonaws.services.ecs.model.NetworkBinding; +import com.amazonaws.services.ecs.model.TaskDefinition; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.ContainerInstanceCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsInstanceCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient; +import com.netflix.spinnaker.clouddriver.ecs.cache.client.TargetHealthCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskCacheClient; +import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskDefinitionCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskHealthCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsTargetHealth; import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; import com.netflix.spinnaker.clouddriver.ecs.cache.model.Task; import com.netflix.spinnaker.clouddriver.ecs.cache.model.TaskHealth; import com.netflix.spinnaker.clouddriver.ecs.security.ECSCredentialsConfig; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class ContainerInformationService { @@ -45,60 +51,153 @@ public class ContainerInformationService { private final TaskCacheClient taskCacheClient; private final ServiceCacheClient serviceCacheClient; private final TaskHealthCacheClient taskHealthCacheClient; + private final TaskDefinitionCacheClient taskDefinitionCacheClient; private final EcsInstanceCacheClient ecsInstanceCacheClient; private final ContainerInstanceCacheClient containerInstanceCacheClient; + private final TargetHealthCacheClient targetHealthCacheClient; @Autowired - public ContainerInformationService(ECSCredentialsConfig ecsCredentialsConfig, - TaskCacheClient taskCacheClient, - ServiceCacheClient serviceCacheClient, - TaskHealthCacheClient taskHealthCacheClient, - EcsInstanceCacheClient ecsInstanceCacheClient, - ContainerInstanceCacheClient containerInstanceCacheClient) { + public ContainerInformationService( + ECSCredentialsConfig ecsCredentialsConfig, + TaskCacheClient taskCacheClient, + ServiceCacheClient serviceCacheClient, + TaskHealthCacheClient taskHealthCacheClient, + TaskDefinitionCacheClient taskDefinitionCacheClient, + EcsInstanceCacheClient ecsInstanceCacheClient, + ContainerInstanceCacheClient containerInstanceCacheClient, + TargetHealthCacheClient targetHealthCacheClient) { this.ecsCredentialsConfig = ecsCredentialsConfig; this.taskCacheClient = taskCacheClient; this.serviceCacheClient = serviceCacheClient; this.taskHealthCacheClient = taskHealthCacheClient; + this.taskDefinitionCacheClient = taskDefinitionCacheClient; this.ecsInstanceCacheClient = ecsInstanceCacheClient; this.containerInstanceCacheClient = containerInstanceCacheClient; + this.targetHealthCacheClient = targetHealthCacheClient; } - public List> getHealthStatus(String taskId, String serviceName, String accountName, String region) { - String serviceCacheKey = Keys.getServiceKey(accountName, region, serviceName); - Service service = serviceCacheClient.get(serviceCacheKey); + public List> getHealthStatus( + String taskId, String serviceName, String accountName, String region) { + Service service = getService(serviceName, accountName, region); String healthKey = Keys.getTaskHealthKey(accountName, region, taskId); TaskHealth taskHealth = taskHealthCacheClient.get(healthKey); - if (service == null || taskHealth == null) { - List> healthMetrics = new ArrayList<>(); + String taskKey = Keys.getTaskKey(accountName, region, taskId); + Task task = taskCacheClient.get(taskKey); + List> healthMetrics = new ArrayList<>(); + + // Load balancer-based health + if (service == null || taskHealth == null) { Map loadBalancerHealth = new HashMap<>(); loadBalancerHealth.put("instanceId", taskId); loadBalancerHealth.put("state", "Unknown"); loadBalancerHealth.put("type", "loadBalancer"); healthMetrics.add(loadBalancerHealth); - return healthMetrics; - } - - List loadBalancers = service.getLoadBalancers(); - //There should only be 1 based on AWS documentation. - if (loadBalancers.size() == 1) { - - List> healthMetrics = new ArrayList<>(); + } else { Map loadBalancerHealth = new HashMap<>(); loadBalancerHealth.put("instanceId", taskId); loadBalancerHealth.put("state", taskHealth.getState()); loadBalancerHealth.put("type", taskHealth.getType()); healthMetrics.add(loadBalancerHealth); - return healthMetrics; - } else if (loadBalancers.size() >= 2) { - throw new IllegalArgumentException("Cannot have more than 1 load balancer while checking ECS health."); } - return null; + // Task-based health + if (task != null) { + boolean hasHealthCheck = false; + EcsTargetHealth targetHealth = null; + if (service != null) { + hasHealthCheck = taskHasHealthCheck(service, accountName, region); + LoadBalancer loadBalancer = service.getLoadBalancers().stream().findFirst().orElse(null); + if (loadBalancer != null) { + String targetGroupKey = + Keys.getTargetHealthKey(accountName, region, loadBalancer.getTargetGroupArn()); + targetHealth = targetHealthCacheClient.get(targetGroupKey); + } + } + + Map taskPlatformHealth = new HashMap<>(); + taskPlatformHealth.put("instanceId", taskId); + taskPlatformHealth.put("type", "ecs"); + taskPlatformHealth.put("healthClass", "platform"); + taskPlatformHealth.put( + "state", + toPlatformHealthState( + task.getLastStatus(), task.getHealthStatus(), hasHealthCheck, targetHealth)); + healthMetrics.add(taskPlatformHealth); + } + + return healthMetrics; + } + + public boolean taskHasHealthCheck(Service service, String accountName, String region) { + if (service != null) { + String taskDefinitionCacheKey = + Keys.getTaskDefinitionKey(accountName, region, service.getTaskDefinition()); + TaskDefinition taskDefinition = taskDefinitionCacheClient.get(taskDefinitionCacheKey); + + if (taskDefinition != null) { + for (ContainerDefinition containerDefinition : taskDefinition.getContainerDefinitions()) { + if (containerDefinition.getHealthCheck() != null + && containerDefinition.getHealthCheck().getCommand() != null) { + return true; + } + } + } + } + + return false; + } + + private String toPlatformHealthState( + String ecsTaskStatus, + String ecsTaskHealthStatus, + boolean hasHealthCheck, + EcsTargetHealth ecsTargetHealth) { + if (hasHealthCheck && "UNKNOWN".equals(ecsTaskHealthStatus)) { + return "Starting"; + } else if ("UNHEALTHY".equals(ecsTaskHealthStatus)) { + return "Down"; + } + + if (ecsTargetHealth != null) { + return getPlatformHealthStateFromTargetGroup(ecsTargetHealth); + } + + switch (ecsTaskStatus) { + case "PROVISIONING": + case "PENDING": + case "ACTIVATING": + return "Starting"; + case "RUNNING": + return "Up"; + default: + return "Down"; + } + } + + // based on: + // https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#target-health-states + private String getPlatformHealthStateFromTargetGroup(EcsTargetHealth targetHealth) { + Set statuses = + targetHealth.getTargetHealthDescriptions().stream() + .map(tg -> tg.getTargetHealth().getState()) + .collect(Collectors.toSet()); + + for (String status : statuses) { + if ("healthy".equalsIgnoreCase(status)) { + return "Up"; + } + if ("initial".equalsIgnoreCase(status)) { + return "Starting"; + } + } + + // statuses: unhealthy, unused, draining, unavailable + return "Down"; } public String getClusterArn(String accountName, String region, String taskId) { @@ -111,24 +210,29 @@ public String getClusterArn(String accountName, String region, String taskId) { } public String getClusterName(String serviceName, String accountName, String region) { - String serviceCachekey = Keys.getServiceKey(accountName, region, serviceName); - Service service = serviceCacheClient.get(serviceCachekey); + Service service = getService(serviceName, accountName, region); if (service != null) { return service.getClusterName(); } return null; } - public String getTaskPrivateAddress(String accountName, String region, Task task) { - if (task.getContainers().size() > 1) { - throw new IllegalArgumentException("Multiple containers for a task is not supported."); - } + public Service getService(String serviceName, String accountName, String region) { + String serviceCacheKey = Keys.getServiceKey(accountName, region, serviceName); + return serviceCacheClient.get(serviceCacheKey); + } + public String getTaskPrivateAddress(String accountName, String region, Task task) { // int hostPort; - try { - hostPort = task.getContainers().get(0).getNetworkBindings().get(0).getHostPort(); - } catch (Exception e) { - hostPort = -1; + + if (task.getContainers().size() > 1) { + hostPort = getAddressHostPortForMultipleContainers(task); + } else { + try { + hostPort = task.getContainers().get(0).getNetworkBindings().get(0).getHostPort(); + } catch (Exception e) { + hostPort = -1; + } } if (hostPort < 0 || hostPort > 65535) { @@ -136,34 +240,44 @@ public String getTaskPrivateAddress(String accountName, String region, Task task } Instance instance = getEc2Instance(accountName, region, task); - if(instance == null){ + if (instance == null) { return null; } String hostPrivateIpAddress = instance.getPrivateIpAddress(); + if (hostPrivateIpAddress == null || hostPrivateIpAddress.isEmpty()) { + return null; + } + return String.format("%s:%s", hostPrivateIpAddress, hostPort); } public String getTaskZone(String accountName, String region, Task task) { Instance ec2Instance = getEc2Instance(accountName, region, task); - if (ec2Instance != null) { + if (ec2Instance != null && ec2Instance.getPlacement() != null) { return ec2Instance.getPlacement().getAvailabilityZone(); } - // TODO for tasks not placed on an instance (e.g. Fargate), determine the zone from the network interface attachment + // TODO for tasks not placed on an instance (e.g. Fargate), determine the zone from the network + // interface attachment return null; } - public Instance getEc2Instance(String ecsAccount, String region, Task task){ - String containerInstanceCacheKey = Keys.getContainerInstanceKey(ecsAccount, region, task.getContainerInstanceArn()); - ContainerInstance containerInstance = containerInstanceCacheClient.get(containerInstanceCacheKey); + public Instance getEc2Instance(String ecsAccount, String region, Task task) { + String containerInstanceCacheKey = + Keys.getContainerInstanceKey(ecsAccount, region, task.getContainerInstanceArn()); + ContainerInstance containerInstance = + containerInstanceCacheClient.get(containerInstanceCacheKey); if (containerInstance == null) { return null; } - Set instances = ecsInstanceCacheClient.find(containerInstance.getEc2InstanceId(), getAwsAccountName(ecsAccount), region); + Set instances = + ecsInstanceCacheClient.find( + containerInstance.getEc2InstanceId(), getAwsAccountName(ecsAccount), region); if (instances.size() > 1) { - throw new IllegalArgumentException("There cannot be more than 1 EC2 container instance for a given region and instance ID."); + throw new IllegalArgumentException( + "There cannot be more than 1 EC2 container instance for a given region and instance ID."); } else if (instances.size() == 0) { return null; } @@ -179,4 +293,26 @@ private String getAwsAccountName(String ecsAccountName) { } return null; } + + private int getAddressHostPortForMultipleContainers(Task task) { + List hostPorts = new ArrayList() {}; + + task.getContainers() + .forEach( + (c) -> { + List networkBindings = c.getNetworkBindings(); + networkBindings.forEach( + (b) -> { + if (b.getHostPort() != null) { + hostPorts.add(b.getHostPort()); + } + }); + }); + + if (hostPorts.size() == 1) { + return hostPorts.get(0); + } + + return -1; + } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/EcsCloudMetricService.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/EcsCloudMetricService.java index 5419555a55b..3f434037a61 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/EcsCloudMetricService.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/EcsCloudMetricService.java @@ -17,61 +17,49 @@ package com.netflix.spinnaker.clouddriver.ecs.services; import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling; -import com.amazonaws.services.applicationautoscaling.model.DeregisterScalableTargetRequest; -import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsRequest; -import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsResult; -import com.amazonaws.services.applicationautoscaling.model.DescribeScalingPoliciesRequest; -import com.amazonaws.services.applicationautoscaling.model.DescribeScalingPoliciesResult; -import com.amazonaws.services.applicationautoscaling.model.PutScalingPolicyRequest; -import com.amazonaws.services.applicationautoscaling.model.PutScalingPolicyResult; -import com.amazonaws.services.applicationautoscaling.model.ScalingPolicy; -import com.amazonaws.services.applicationautoscaling.model.ServiceNamespace; +import com.amazonaws.services.applicationautoscaling.model.*; import com.amazonaws.services.cloudwatch.AmazonCloudWatch; -import com.amazonaws.services.cloudwatch.model.DeleteAlarmsRequest; -import com.amazonaws.services.cloudwatch.model.DescribeAlarmsRequest; -import com.amazonaws.services.cloudwatch.model.DescribeAlarmsResult; -import com.amazonaws.services.cloudwatch.model.MetricAlarm; -import com.amazonaws.services.cloudwatch.model.PutMetricAlarmRequest; +import com.amazonaws.services.cloudwatch.model.*; +import com.google.common.collect.Iterables; import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; -import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsCloudWatchAlarmCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsMetricAlarm; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.*; +import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - @Component public class EcsCloudMetricService { - @Autowired - EcsCloudWatchAlarmCacheClient metricAlarmCacheClient; - @Autowired - AccountCredentialsProvider accountCredentialsProvider; - @Autowired - AmazonClientProvider amazonClientProvider; + @Autowired EcsCloudWatchAlarmCacheClient metricAlarmCacheClient; + @Autowired CredentialsRepository credentialsRepository; + @Autowired AmazonClientProvider amazonClientProvider; - public void deleteMetrics(String serviceName, String account, String region) { - List metricAlarms = metricAlarmCacheClient.getMetricAlarms(serviceName, account, region); + private final Logger log = LoggerFactory.getLogger(getClass()); + + public void deleteMetrics( + String serviceName, String account, String region, String ecsClusterName) { + List metricAlarms = + metricAlarmCacheClient.getMetricAlarms(serviceName, account, region, ecsClusterName); if (metricAlarms.isEmpty()) { return; } - AmazonCredentials credentials = (AmazonCredentials) accountCredentialsProvider.getCredentials(account); - AmazonCloudWatch amazonCloudWatch = amazonClientProvider.getAmazonCloudWatch(account, credentials.getCredentialsProvider(), region); + NetflixAmazonCredentials credentials = credentialsRepository.getOne(account); + AmazonCloudWatch amazonCloudWatch = + amazonClientProvider.getAmazonCloudWatch(credentials, region, false); - amazonCloudWatch.deleteAlarms(new DeleteAlarmsRequest().withAlarmNames(metricAlarms.stream() - .map(MetricAlarm::getAlarmName) - .collect(Collectors.toSet()))); + amazonCloudWatch.deleteAlarms( + new DeleteAlarmsRequest() + .withAlarmNames( + metricAlarms.stream().map(MetricAlarm::getAlarmName).collect(Collectors.toSet()))); Set resources = new HashSet<>(); // Stream and flatMap it? Couldn't figure out how. @@ -86,18 +74,20 @@ public void deleteMetrics(String serviceName, String account, String region) { private Set buildResourceList(List metricAlarmArn, String serviceName) { return metricAlarmArn.stream() - .filter(arn -> arn.contains(serviceName)) - .map(arn -> { - String resource = StringUtils.substringAfterLast(arn, ":resource/"); - resource = StringUtils.substringBeforeLast(resource, ":policyName"); - return resource; - }) - .collect(Collectors.toSet()); + .filter(arn -> arn.contains(serviceName)) + .map( + arn -> { + String resource = StringUtils.substringAfterLast(arn, ":resource/"); + resource = StringUtils.substringBeforeLast(resource, ":policyName"); + return resource; + }) + .collect(Collectors.toSet()); } private void deregisterScalableTargets(Set resources, String account, String region) { - NetflixAmazonCredentials credentials = (NetflixAmazonCredentials) accountCredentialsProvider.getCredentials(account); - AWSApplicationAutoScaling autoScaling = amazonClientProvider.getAmazonApplicationAutoScaling(credentials, region, false); + NetflixAmazonCredentials credentials = credentialsRepository.getOne(account); + AWSApplicationAutoScaling autoScaling = + amazonClientProvider.getAmazonApplicationAutoScaling(credentials, region, false); Map> resourceMap = new HashMap<>(); for (String resource : resources) { @@ -116,9 +106,10 @@ private void deregisterScalableTargets(Set resources, String account, St for (String namespace : resourceMap.keySet()) { String nextToken = null; do { - DescribeScalableTargetsRequest request = new DescribeScalableTargetsRequest() - .withServiceNamespace(namespace) - .withResourceIds(resourceMap.get(namespace)); + DescribeScalableTargetsRequest request = + new DescribeScalableTargetsRequest() + .withServiceNamespace(namespace) + .withResourceIds(resourceMap.get(namespace)); if (nextToken != null) { request.setNextToken(nextToken); @@ -126,12 +117,15 @@ private void deregisterScalableTargets(Set resources, String account, St DescribeScalableTargetsResult result = autoScaling.describeScalableTargets(request); - deregisterRequests.addAll(result.getScalableTargets().stream() - .map(scalableTarget -> new DeregisterScalableTargetRequest() - .withResourceId(scalableTarget.getResourceId()) - .withScalableDimension(scalableTarget.getScalableDimension()) - .withServiceNamespace(scalableTarget.getServiceNamespace())) - .collect(Collectors.toSet())); + deregisterRequests.addAll( + result.getScalableTargets().stream() + .map( + scalableTarget -> + new DeregisterScalableTargetRequest() + .withResourceId(scalableTarget.getResourceId()) + .withScalableDimension(scalableTarget.getScalableDimension()) + .withServiceNamespace(scalableTarget.getServiceNamespace())) + .collect(Collectors.toSet())); nextToken = result.getNextToken(); } while (nextToken != null && nextToken.length() != 0); @@ -142,88 +136,184 @@ private void deregisterScalableTargets(Set resources, String account, St } } - private PutMetricAlarmRequest buildPutMetricAlarmRequest(MetricAlarm metricAlarm, - String serviceName, - Set insufficientActionPolicyArns, - Set okActionPolicyArns, - Set alarmActionPolicyArns) { + private PutMetricAlarmRequest buildPutMetricAlarmRequest( + MetricAlarm metricAlarm, + String alarmName, + String dstServiceName, + String clusterName, + String srcRegion, + String dstRegion, + String srcAccountId, + String dstAccountId, + Map policyArnReplacements) { return new PutMetricAlarmRequest() - .withAlarmName(metricAlarm.getAlarmName() + "-" + serviceName) - .withEvaluationPeriods(metricAlarm.getEvaluationPeriods()) - .withThreshold(metricAlarm.getThreshold()) - .withActionsEnabled(metricAlarm.getActionsEnabled()) - .withAlarmDescription(metricAlarm.getAlarmDescription()) - .withComparisonOperator(metricAlarm.getComparisonOperator()) - .withDimensions(metricAlarm.getDimensions()) - .withMetricName(metricAlarm.getMetricName()) - .withUnit(metricAlarm.getUnit()) - .withPeriod(metricAlarm.getPeriod()) - .withNamespace(metricAlarm.getNamespace()) - .withStatistic(metricAlarm.getStatistic()) - .withEvaluateLowSampleCountPercentile(metricAlarm.getEvaluateLowSampleCountPercentile()) - .withTreatMissingData(metricAlarm.getTreatMissingData()) - .withExtendedStatistic(metricAlarm.getExtendedStatistic()) - .withInsufficientDataActions(insufficientActionPolicyArns) - .withOKActions(okActionPolicyArns) - .withAlarmActions(alarmActionPolicyArns); + .withAlarmName(alarmName) + .withEvaluationPeriods(metricAlarm.getEvaluationPeriods()) + .withThreshold(metricAlarm.getThreshold()) + .withActionsEnabled(metricAlarm.getActionsEnabled()) + .withAlarmDescription(metricAlarm.getAlarmDescription()) + .withComparisonOperator(metricAlarm.getComparisonOperator()) + .withDimensions( + metricAlarm.getDimensions().stream() + .map( + dimension -> + buildNewServiceAlarmDimension( + dimension, metricAlarm.getNamespace(), dstServiceName, clusterName)) + .collect(Collectors.toSet())) + .withMetricName(metricAlarm.getMetricName()) + .withUnit(metricAlarm.getUnit()) + .withPeriod(metricAlarm.getPeriod()) + .withNamespace(metricAlarm.getNamespace()) + .withStatistic(metricAlarm.getStatistic()) + .withEvaluateLowSampleCountPercentile(metricAlarm.getEvaluateLowSampleCountPercentile()) + .withTreatMissingData(metricAlarm.getTreatMissingData()) + .withExtendedStatistic(metricAlarm.getExtendedStatistic()) + .withInsufficientDataActions( + replacePolicyArnActions( + srcRegion, + dstRegion, + srcAccountId, + dstAccountId, + policyArnReplacements, + metricAlarm.getInsufficientDataActions())) + .withOKActions( + replacePolicyArnActions( + srcRegion, + dstRegion, + srcAccountId, + dstAccountId, + policyArnReplacements, + metricAlarm.getOKActions())) + .withAlarmActions( + replacePolicyArnActions( + srcRegion, + dstRegion, + srcAccountId, + dstAccountId, + policyArnReplacements, + metricAlarm.getAlarmActions())); } - private PutScalingPolicyRequest buildPutScalingPolicyRequest(ScalingPolicy policy) { - return new PutScalingPolicyRequest() - .withPolicyName(policy.getPolicyName()) - .withServiceNamespace(policy.getServiceNamespace()) - .withPolicyType(policy.getPolicyType()) - .withResourceId(policy.getResourceId()) - .withScalableDimension(policy.getScalableDimension()) - .withStepScalingPolicyConfiguration(policy.getStepScalingPolicyConfiguration()) - .withTargetTrackingScalingPolicyConfiguration(policy.getTargetTrackingScalingPolicyConfiguration()); + protected Collection replacePolicyArnActions( + String srcRegion, + String dstRegion, + String srcAccountId, + String dstAccountId, + Map replacements, + Collection actions) { + return actions.stream() + // Replace src scaling policy ARNs with dst scaling policy ARNs + .map(action -> replacements.keySet().contains(action) ? replacements.get(action) : action) + // If we are copying across accounts or regions, do not copy over unrelated actions like SNS + // topics + .filter(action -> srcRegion.equals(dstRegion) || !action.contains(srcRegion)) + .filter(action -> srcAccountId.equals(dstAccountId) || !action.contains(srcAccountId)) + .collect(Collectors.toSet()); } - public void associateAsgWithMetrics(String account, - String region, - List alarmNames, - String serviceName, - String resourceId) { - - NetflixAmazonCredentials credentials = (NetflixAmazonCredentials) accountCredentialsProvider.getCredentials(account); - - AmazonCloudWatch cloudWatch = amazonClientProvider.getAmazonCloudWatch(credentials, region, false); - AWSApplicationAutoScaling autoScalingClient = amazonClientProvider.getAmazonApplicationAutoScaling(credentials, region, false); - - DescribeAlarmsResult describeAlarmsResult = cloudWatch.describeAlarms(new DescribeAlarmsRequest() - .withAlarmNames(alarmNames)); - - for (MetricAlarm metricAlarm : describeAlarmsResult.getMetricAlarms()) { - Set okScalingPolicyArns = putScalingPolicies(autoScalingClient, metricAlarm.getOKActions(), - serviceName, resourceId, "ok", "scaling-policy-" + metricAlarm.getAlarmName()); - Set alarmScalingPolicyArns = putScalingPolicies(autoScalingClient, metricAlarm.getAlarmActions(), - serviceName, resourceId, "alarm", "scaling-policy-" + metricAlarm.getAlarmName()); - Set insufficientActionPolicyArns = putScalingPolicies(autoScalingClient, metricAlarm.getInsufficientDataActions(), - serviceName, resourceId, "insuffiicient", "scaling-policy-" + metricAlarm.getAlarmName()); - - cloudWatch.putMetricAlarm(buildPutMetricAlarmRequest(metricAlarm, serviceName, - insufficientActionPolicyArns, okScalingPolicyArns, alarmScalingPolicyArns)); + private Dimension buildNewServiceAlarmDimension( + Dimension oldDimension, String namespace, String serviceName, String clusterName) { + String value = oldDimension.getValue(); + if (namespace.equals("AWS/ECS")) { + if (oldDimension.getName().equals("ClusterName")) { + value = clusterName; + } else if (oldDimension.getName().equals("ServiceName")) { + value = serviceName; + } } + return new Dimension().withName(oldDimension.getName()).withValue(value); } - private Set putScalingPolicies(AWSApplicationAutoScaling autoScalingClient, - List actionArns, - String serviceName, - String resourceId, - String type, - String suffix) { - if (actionArns.isEmpty()) { - return Collections.emptySet(); + private MetricDimension buildNewServiceTargetTrackingDimension( + MetricDimension oldDimension, String namespace, String serviceName, String clusterName) { + String value = oldDimension.getValue(); + if (namespace.equals("AWS/ECS")) { + if (oldDimension.getName().equals("ClusterName")) { + value = clusterName; + } else if (oldDimension.getName().equals("ServiceName")) { + value = serviceName; + } } + return new MetricDimension().withName(oldDimension.getName()).withValue(value); + } + + private PutScalingPolicyRequest buildPutScalingPolicyRequest(ScalingPolicy policy) { + return new PutScalingPolicyRequest() + .withPolicyName(policy.getPolicyName()) + .withServiceNamespace(policy.getServiceNamespace()) + .withPolicyType(policy.getPolicyType()) + .withResourceId(policy.getResourceId()) + .withScalableDimension(policy.getScalableDimension()) + .withStepScalingPolicyConfiguration(policy.getStepScalingPolicyConfiguration()) + .withTargetTrackingScalingPolicyConfiguration( + policy.getTargetTrackingScalingPolicyConfiguration()); + } + + public void copyScalingPolicies( + String dstAccount, + String dstRegion, + String dstServiceName, + String dstResourceId, + String srcAccount, + String srcRegion, + String srcServiceName, + String srcResourceId, + String clusterName) { + NetflixAmazonCredentials dstCredentials = credentialsRepository.getOne(dstAccount); + NetflixAmazonCredentials srcCredentials = credentialsRepository.getOne(srcAccount); + + AWSApplicationAutoScaling dstAutoScalingClient = + amazonClientProvider.getAmazonApplicationAutoScaling(dstCredentials, dstRegion, false); + AWSApplicationAutoScaling srcAutoScalingClient = + amazonClientProvider.getAmazonApplicationAutoScaling(srcCredentials, srcRegion, false); + AmazonCloudWatch dstCloudWatchClient = + amazonClientProvider.getAmazonCloudWatch(dstCredentials, dstRegion, false); + AmazonCloudWatch srcCloudWatchClient = + amazonClientProvider.getAmazonCloudWatch(srcCredentials, srcRegion, false); + + // Copy the scaling policies + Set sourceScalingPolicies = + getScalingPolicies(srcAutoScalingClient, srcResourceId); + + Map srcPolicyArnToDstPolicyArn = + putScalingPolicies( + dstAutoScalingClient, + srcServiceName, + dstServiceName, + dstResourceId, + clusterName, + sourceScalingPolicies); + + // Copy the alarms that target the scaling policies + Set allSourceAlarmNames = + sourceScalingPolicies.stream() + .flatMap(policy -> policy.getAlarms().stream()) + .map(alarm -> alarm.getAlarmName()) + .collect(Collectors.toSet()); + copyAlarmsForAsg( + srcCloudWatchClient, + dstCloudWatchClient, + srcRegion, + dstRegion, + srcCredentials.getAccountId(), + dstCredentials.getAccountId(), + srcServiceName, + dstServiceName, + clusterName, + allSourceAlarmNames, + srcPolicyArnToDstPolicyArn); + } + private Set getScalingPolicies( + AWSApplicationAutoScaling autoScalingClient, String resourceId) { Set scalingPolicies = new HashSet<>(); String nextToken = null; do { - DescribeScalingPoliciesRequest request = new DescribeScalingPoliciesRequest().withPolicyNames(actionArns.stream() - .map(arn -> StringUtils.substringAfterLast(arn, ":policyName/")) - .collect(Collectors.toSet())) - .withServiceNamespace(ServiceNamespace.Ecs); + DescribeScalingPoliciesRequest request = + new DescribeScalingPoliciesRequest() + .withServiceNamespace(ServiceNamespace.Ecs) + .withResourceId(resourceId); if (nextToken != null) { request.setNextToken(nextToken); } @@ -234,17 +324,95 @@ private Set putScalingPolicies(AWSApplicationAutoScaling autoScalingClie nextToken = result.getNextToken(); } while (nextToken != null && nextToken.length() != 0); - Set policyArns = new HashSet<>(); - for (ScalingPolicy scalingPolicy : scalingPolicies) { - String newPolicyName = serviceName + "-" + type + "-" + suffix; + return scalingPolicies; + } + + // Return map of src policy ARN -> dst policy ARN + private Map putScalingPolicies( + AWSApplicationAutoScaling dstAutoScalingClient, + String srcServiceName, + String dstServiceName, + String dstResourceId, + String clusterName, + Set srcScalingPolicies) { + Map srcPolicyArnToDstPolicyArn = new HashMap<>(); + + for (ScalingPolicy scalingPolicy : srcScalingPolicies) { + String newPolicyName = + scalingPolicy.getPolicyName().replaceAll(srcServiceName, dstServiceName); + if (!newPolicyName.contains(dstServiceName)) { + newPolicyName = newPolicyName + "-" + dstServiceName; + } + ScalingPolicy clone = scalingPolicy.clone(); clone.setPolicyName(newPolicyName); - clone.setResourceId(resourceId); + clone.setResourceId(dstResourceId); + + if (clone.getTargetTrackingScalingPolicyConfiguration() != null + && clone.getTargetTrackingScalingPolicyConfiguration().getCustomizedMetricSpecification() + != null) { + CustomizedMetricSpecification spec = + clone.getTargetTrackingScalingPolicyConfiguration().getCustomizedMetricSpecification(); + spec.setDimensions( + spec.getDimensions().stream() + .map( + dimension -> + buildNewServiceTargetTrackingDimension( + dimension, spec.getNamespace(), dstServiceName, clusterName)) + .collect(Collectors.toSet())); + } + + PutScalingPolicyResult result = + dstAutoScalingClient.putScalingPolicy(buildPutScalingPolicyRequest(clone)); - PutScalingPolicyResult result = autoScalingClient.putScalingPolicy(buildPutScalingPolicyRequest(clone)); - policyArns.add(result.getPolicyARN()); + srcPolicyArnToDstPolicyArn.put(scalingPolicy.getPolicyARN(), result.getPolicyARN()); } - return policyArns; + return srcPolicyArnToDstPolicyArn; + } + + private void copyAlarmsForAsg( + AmazonCloudWatch srcCloudWatchClient, + AmazonCloudWatch dstCloudWatchClient, + String srcRegion, + String dstRegion, + String srcAccountId, + String dstAccountId, + String srcServiceName, + String dstServiceName, + String clusterName, + Set srcAlarmNames, + Map srcPolicyArnToDstPolicyArn) { + + for (List srcAlarmsPartition : Iterables.partition(srcAlarmNames, 100)) { + DescribeAlarmsResult describeAlarmsResult = + srcCloudWatchClient.describeAlarms( + new DescribeAlarmsRequest().withAlarmNames(srcAlarmsPartition)); + + for (MetricAlarm srcMetricAlarm : describeAlarmsResult.getMetricAlarms()) { + if (srcMetricAlarm.getAlarmName().startsWith("TargetTracking-")) { + // Target Tracking policies auto-create their alarms, so we don't need to copy them + continue; + } + + String dstAlarmName = + srcMetricAlarm.getAlarmName().replaceAll(srcServiceName, dstServiceName); + if (!dstAlarmName.contains(dstServiceName)) { + dstAlarmName = dstAlarmName + "-" + dstServiceName; + } + + dstCloudWatchClient.putMetricAlarm( + buildPutMetricAlarmRequest( + srcMetricAlarm, + dstAlarmName, + dstServiceName, + clusterName, + srcRegion, + dstRegion, + srcAccountId, + dstAccountId, + srcPolicyArnToDstPolicyArn)); + } + } } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/SecurityGroupSelector.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/SecurityGroupSelector.java index 380e37a8b25..4746b1d5825 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/SecurityGroupSelector.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/SecurityGroupSelector.java @@ -21,13 +21,12 @@ import com.netflix.spinnaker.clouddriver.ecs.model.EcsSecurityGroup; import com.netflix.spinnaker.clouddriver.ecs.provider.view.AmazonPrimitiveConverter; import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsAccountMapper; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class SecurityGroupSelector { @@ -37,34 +36,39 @@ public class SecurityGroupSelector { EcsAccountMapper ecsAccountMapper; @Autowired - public SecurityGroupSelector(AmazonSecurityGroupProvider amazonSecurityGroupProvider, - AmazonPrimitiveConverter converter, - EcsAccountMapper ecsAccountMapper) { + public SecurityGroupSelector( + AmazonSecurityGroupProvider amazonSecurityGroupProvider, + AmazonPrimitiveConverter converter, + EcsAccountMapper ecsAccountMapper) { this.amazonSecurityGroupProvider = amazonSecurityGroupProvider; this.converter = converter; this.ecsAccountMapper = ecsAccountMapper; } - public Collection resolveSecurityGroupNames(String ecsAccountName, - String region, - Collection securityGroupNames, - Collection vpcIds) { - String correspondingAwsAccountName = ecsAccountMapper.fromEcsAccountNameToAws(ecsAccountName).getName(); + public Collection resolveSecurityGroupNames( + String ecsAccountName, + String region, + Collection securityGroupNames, + Collection vpcIds) { + String correspondingAwsAccountName = + ecsAccountMapper.fromEcsAccountNameToAws(ecsAccountName).getName(); - Collection amazonSecurityGroups = amazonSecurityGroupProvider.getAllByAccountAndRegion( - true, correspondingAwsAccountName, region); + Collection amazonSecurityGroups = + amazonSecurityGroupProvider.getAllByAccountAndRegion( + true, correspondingAwsAccountName, region); - Collection ecsSecurityGroups = converter.convertToEcsSecurityGroup(amazonSecurityGroups); + Collection ecsSecurityGroups = + converter.convertToEcsSecurityGroup(amazonSecurityGroups); Set securityGroupNamesSet = new HashSet(securityGroupNames); Set vpcIdsSet = new HashSet(vpcIds); - Set filteredSecurityGroupIds = ecsSecurityGroups - .stream() - .filter(group -> securityGroupNamesSet.contains(group.getName())) - .filter(group -> vpcIdsSet.contains(group.getVpcId())) - .map(EcsSecurityGroup::getId) - .collect(Collectors.toSet()); + Set filteredSecurityGroupIds = + ecsSecurityGroups.stream() + .filter(group -> securityGroupNamesSet.contains(group.getName())) + .filter(group -> vpcIdsSet.contains(group.getVpcId())) + .map(EcsSecurityGroup::getId) + .collect(Collectors.toSet()); return filteredSecurityGroupIds; } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/SubnetSelector.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/SubnetSelector.java index 748cc6892a4..479960dadde 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/SubnetSelector.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/services/SubnetSelector.java @@ -22,13 +22,12 @@ import com.netflix.spinnaker.clouddriver.ecs.model.EcsSubnet; import com.netflix.spinnaker.clouddriver.ecs.provider.view.AmazonPrimitiveConverter; import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsAccountMapper; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.Collection; -import java.util.List; +import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class SubnetSelector { @@ -38,44 +37,66 @@ public class SubnetSelector { EcsAccountMapper ecsAccountMapper; @Autowired - public SubnetSelector(AmazonSubnetProvider amazonSubnetProvider, - AmazonPrimitiveConverter converter, - EcsAccountMapper ecsAccountMapper) { + public SubnetSelector( + AmazonSubnetProvider amazonSubnetProvider, + AmazonPrimitiveConverter converter, + EcsAccountMapper ecsAccountMapper) { this.amazonSubnetProvider = amazonSubnetProvider; this.converter = converter; this.ecsAccountMapper = ecsAccountMapper; } - public Collection resolveSubnetsIds(String ecsAccountName, String region, String subnetType) { - String correspondingAwsAccountName = ecsAccountMapper.fromEcsAccountNameToAws(ecsAccountName).getName(); + public Collection resolveSubnetsIds( + String ecsAccountName, + String region, + Collection availabilityZones, + String subnetType) { + String correspondingAwsAccountName = + ecsAccountMapper.fromEcsAccountNameToAws(ecsAccountName).getName(); - Set amazonSubnets = amazonSubnetProvider.getAllMatchingKeyPattern( - Keys.getSubnetKey("*", region, correspondingAwsAccountName)); + Set amazonSubnets = + amazonSubnetProvider.getAllMatchingKeyPattern( + Keys.getSubnetKey("*", region, correspondingAwsAccountName)); Set ecsSubnets = converter.convertToEcsSubnet(amazonSubnets); - Set filteredSubnetIds = ecsSubnets - .stream() - .filter(subnet -> subnetType.equals(subnet.getPurpose())) - .map(AmazonSubnet::getId) - .collect(Collectors.toSet()); + Set filteredSubnetIds = + ecsSubnets.stream() + .filter(subnet -> subnetType.equals(subnet.getPurpose())) + .filter(subnet -> availabilityZones.contains(subnet.getAvailabilityZone())) + .map(AmazonSubnet::getId) + .collect(Collectors.toSet()); return filteredSubnetIds; } - public Collection getSubnetVpcIds(String ecsAccountName, String region, Collection subnetIds) { - String correspondingAwsAccountName = ecsAccountMapper.fromEcsAccountNameToAws(ecsAccountName).getName(); + public Set resolveSubnetsIdsForMultipleSubnetTypes( + String ecsAccountName, + String region, + Collection availabilityZones, + Set subnetTypes) { + + Set subnetIds = new HashSet(); + for (String subnetType : subnetTypes) { + subnetIds.addAll(resolveSubnetsIds(ecsAccountName, region, availabilityZones, subnetType)); + } + + return subnetIds; + } + + public Collection getSubnetVpcIds( + String ecsAccountName, String region, Collection subnetIds) { + String correspondingAwsAccountName = + ecsAccountMapper.fromEcsAccountNameToAws(ecsAccountName).getName(); - Set subnetKeys = subnetIds.stream() - .map(subnetId -> Keys.getSubnetKey(subnetId, region, correspondingAwsAccountName)) - .collect(Collectors.toSet()); + Set subnetKeys = + subnetIds.stream() + .map(subnetId -> Keys.getSubnetKey(subnetId, region, correspondingAwsAccountName)) + .collect(Collectors.toSet()); Set amazonSubnets = amazonSubnetProvider.loadResults(subnetKeys); Set ecsSubnets = converter.convertToEcsSubnet(amazonSubnets); - return ecsSubnets - .stream() - .map(AmazonSubnet::getVpcId) - .collect(Collectors.toSet()); + return ecsSubnets.stream().map(AmazonSubnet::getVpcId).collect(Collectors.toSet()); } } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/view/EcsApplicationProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/view/EcsApplicationProvider.java index 04711b09ac6..0f219ed252c 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/view/EcsApplicationProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/view/EcsApplicationProvider.java @@ -16,81 +16,85 @@ package com.netflix.spinnaker.clouddriver.ecs.view; - import com.google.common.collect.Sets; import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; import com.netflix.spinnaker.clouddriver.ecs.model.EcsApplication; +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials; import com.netflix.spinnaker.clouddriver.model.Application; import com.netflix.spinnaker.clouddriver.model.ApplicationProvider; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.moniker.Moniker; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component public class EcsApplicationProvider implements ApplicationProvider { private final ServiceCacheClient serviceCacheClient; - private final AccountCredentialsProvider accountCredentialsProvider; + private final CredentialsRepository credentialsRepository; @Autowired - public EcsApplicationProvider(AccountCredentialsProvider accountCredentialsProvider, ServiceCacheClient serviceCacheClient) { - this.accountCredentialsProvider = accountCredentialsProvider; + public EcsApplicationProvider( + CredentialsRepository credentialsRepository, + ServiceCacheClient serviceCacheClient) { + this.credentialsRepository = credentialsRepository; this.serviceCacheClient = serviceCacheClient; } - @Override public Application getApplication(String name) { - - for (Application application : getApplications(true)) { + String glob = Keys.getServiceKey("*", "*", name + "*"); + Collection ecsServices = serviceCacheClient.filterIdentifiers(glob); + for (Application application : populateApplicationSet(ecsServices, true)) { if (name.equals(application.getName())) { return application; } } - return null; } @Override - public Set getApplications(boolean expand) { - Set applications = new HashSet<>(); - - for (AccountCredentials credentials : accountCredentialsProvider.getAll()) { - if (credentials instanceof AmazonCredentials) { - Set retrievedApplications = findApplicationsForAllRegions((AmazonCredentials) credentials, expand); - applications.addAll(retrievedApplications); - } + public Set getApplications(boolean expand) { + Set applications = new HashSet<>(); + for (NetflixECSCredentials credentials : credentialsRepository.getAll()) { + Set retrievedApplications = + findApplicationsForAllRegions(credentials, expand); + applications.addAll(retrievedApplications); } return applications; } - private Set findApplicationsForAllRegions(AmazonCredentials credentials, boolean expand) { - Set applications = new HashSet<>(); + private Set findApplicationsForAllRegions( + AmazonCredentials credentials, boolean expand) { + Set applications = new HashSet<>(); for (AmazonCredentials.AWSRegion awsRegion : credentials.getRegions()) { - applications.addAll(findApplicationsForRegion(credentials.getName(), awsRegion.getName(), expand)); + applications.addAll( + findApplicationsForRegion(credentials.getName(), awsRegion.getName(), expand)); } return applications; } - private Set findApplicationsForRegion(String account, String region, boolean expand) { - HashMap applicationHashMap = populateApplicationMap(account, region, expand); + private Set findApplicationsForRegion( + String account, String region, boolean expand) { + HashMap applicationHashMap = + populateApplicationMap(account, region, expand); return transposeApplicationMapToSet(applicationHashMap); } - private HashMap populateApplicationMap(String account, String region, boolean expand) { - HashMap applicationHashMap = new HashMap<>(); + private HashMap populateApplicationMap( + String account, String region, boolean expand) { + HashMap applicationHashMap = new HashMap<>(); Collection services = serviceCacheClient.getAll(account, region); for (Service service : services) { @@ -99,42 +103,70 @@ private HashMap populateApplicationMap(String account, Stri return applicationHashMap; } - private Set transposeApplicationMapToSet(HashMap applicationHashMap) { - Set applications = new HashSet<>(); + private Set populateApplicationSet( + Collection identifiers, boolean expand) { + HashMap applicationHashMap = new HashMap<>(); + Collection services = serviceCacheClient.getAll(identifiers); - for (Map.Entry entry : applicationHashMap.entrySet()) { + for (Service service : services) { + if (credentialsRepository.has(service.getAccount())) { + applicationHashMap = inferApplicationFromServices(applicationHashMap, service, expand); + } + } + return transposeApplicationMapToSet(applicationHashMap); + } + + private Set transposeApplicationMapToSet( + HashMap applicationHashMap) { + Set applications = new HashSet<>(); + + for (Map.Entry entry : applicationHashMap.entrySet()) { applications.add(entry.getValue()); } return applications; } - private HashMap inferApplicationFromServices(HashMap applicationHashMap, Service service, boolean expand) { + private HashMap inferApplicationFromServices( + HashMap applicationHashMap, Service service, boolean expand) { + + HashMap attributes = new HashMap<>(); + Moniker moniker = service.getMoniker(); - HashMap attributes = new HashMap<>(); // After POC we'll figure exactly what info we want to put in here - String appName = service.getApplicationName(); + String appName = moniker.getApp(); String serviceName = service.getServiceName(); - attributes.put("iamRole", service.getRoleArn()); - attributes.put("taskDefinition", service.getTaskDefinition()); - attributes.put("desiredCount", String.valueOf(service.getDesiredCount())); + String accountName = service.getAccount(); + attributes.put("name", appName); HashMap> clusterNames = new HashMap<>(); + HashMap> clusterNamesMetadata = new HashMap<>(); + if (expand) { - clusterNames.put(appName, Sets.newHashSet(serviceName)); + clusterNames.put(accountName, Sets.newHashSet(serviceName)); + clusterNamesMetadata.put(accountName, Sets.newHashSet(moniker.getCluster())); } - EcsApplication application = new EcsApplication(appName, attributes, clusterNames); + EcsApplication application = + new EcsApplication(appName, attributes, clusterNames, clusterNamesMetadata); if (!applicationHashMap.containsKey(appName)) { applicationHashMap.put(appName, application); } else { applicationHashMap.get(appName).getAttributes().putAll(application.getAttributes()); if (expand) { - applicationHashMap.get(appName).getClusterNames().get(appName).add(serviceName); + applicationHashMap + .get(appName) + .getClusterNames() + .computeIfAbsent(accountName, k -> Sets.newHashSet()) + .add(serviceName); + applicationHashMap + .get(appName) + .getClusterNameMetadata() + .computeIfAbsent(accountName, k -> Sets.newHashSet()) + .add(moniker.getCluster()); } } return applicationHashMap; } - } diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/view/EcsInstanceProvider.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/view/EcsInstanceProvider.java index e692c897aeb..4a3b07bbc48 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/view/EcsInstanceProvider.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/clouddriver/ecs/view/EcsInstanceProvider.java @@ -21,29 +21,29 @@ import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.ContainerInstanceCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskCacheClient; -import com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance; +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; import com.netflix.spinnaker.clouddriver.ecs.cache.model.Task; import com.netflix.spinnaker.clouddriver.ecs.model.EcsTask; import com.netflix.spinnaker.clouddriver.ecs.services.ContainerInformationService; import com.netflix.spinnaker.clouddriver.model.InstanceProvider; -import org.apache.commons.lang.StringUtils; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.List; import java.util.Map; +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component -public class EcsInstanceProvider implements InstanceProvider { +public class EcsInstanceProvider implements InstanceProvider { private final TaskCacheClient taskCacheClient; private final ContainerInstanceCacheClient containerInstanceCacheClient; private ContainerInformationService containerInformationService; @Autowired - public EcsInstanceProvider(ContainerInformationService containerInformationService, - TaskCacheClient taskCacheClient, - ContainerInstanceCacheClient containerInstanceCacheClient) { + public EcsInstanceProvider( + ContainerInformationService containerInformationService, + TaskCacheClient taskCacheClient, + ContainerInstanceCacheClient containerInstanceCacheClient) { this.containerInformationService = containerInformationService; this.taskCacheClient = taskCacheClient; this.containerInstanceCacheClient = containerInstanceCacheClient; @@ -56,8 +56,7 @@ public String getCloudProvider() { @Override public EcsTask getInstance(String account, String region, String id) { - if (!isValidId(id, region)) - return null; + if (!isValidId(id, region)) return null; EcsTask ecsInstance = null; @@ -70,17 +69,37 @@ public EcsTask getInstance(String account, String region, String id) { String serviceName = StringUtils.substringAfter(task.getGroup(), "service:"); Long launchTime = task.getStartedAt(); - List> healthStatus = containerInformationService.getHealthStatus(id, serviceName, account, region); + List> healthStatus = + containerInformationService.getHealthStatus(id, serviceName, account, region); String address = containerInformationService.getTaskPrivateAddress(account, region, task); - String zone = containerInformationService.getTaskZone(account, region, task); + String zone = + task.getAvailabilityZone() != null && !task.getAvailabilityZone().isEmpty() + ? task.getAvailabilityZone() + : containerInformationService.getTaskZone(account, region, task); NetworkInterface networkInterface = - task.getContainers() != null - && !task.getContainers().isEmpty() - && !task.getContainers().get(0).getNetworkInterfaces().isEmpty() - ? task.getContainers().get(0).getNetworkInterfaces().get(0) : null; - - ecsInstance = new EcsTask(id, launchTime, task.getLastStatus(), task.getDesiredStatus(), zone, healthStatus, address, networkInterface); + task.getContainers() != null + && !task.getContainers().isEmpty() + && !task.getContainers().get(0).getNetworkInterfaces().isEmpty() + ? task.getContainers().get(0).getNetworkInterfaces().get(0) + : null; + + Service service = containerInformationService.getService(serviceName, account, region); + boolean hasHealthCheck = + containerInformationService.taskHasHealthCheck(service, account, region); + + ecsInstance = + new EcsTask( + id, + launchTime, + task.getLastStatus(), + task.getDesiredStatus(), + task.getHealthStatus(), + zone, + healthStatus, + address, + networkInterface, + hasHealthCheck); return ecsInstance; } @@ -91,11 +110,19 @@ public String getConsoleOutput(String account, String region, String id) { } private boolean isValidId(String id, String region) { - String idRegex = "[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}"; - String idOnly = String.format("^%s$", idRegex); - String arn = String.format("arn:aws:ecs:%s:\\d*:task/%s", region, idRegex); - return id.matches(idOnly) || id.matches(arn); + String oldTaskIdRegex = "[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}"; + String newTaskIdRegex = "[\\da-f]{32}"; + String clusterNameRegex = "[a-zA-Z0-9\\-_]{1,255}"; + String oldTaskIdOnly = String.format("^%s$", oldTaskIdRegex); + String newTaskIdOnly = String.format("^%s$", newTaskIdRegex); + // arn:aws:ecs:region:account-id:task/task-id + String oldTaskArn = String.format("arn:aws:ecs:%s:\\d*:task/%s", region, oldTaskIdRegex); + // arn:aws:ecs:region:account-id:task/cluster-name/task-id + String newTaskArn = + String.format("arn:aws:ecs:%s:\\d*:task/%s/%s", region, clusterNameRegex, newTaskIdRegex); + return id.matches(oldTaskIdOnly) + || id.matches(newTaskIdOnly) + || id.matches(oldTaskArn) + || id.matches(newTaskArn); } - } - diff --git a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/config/EcsConfiguration.java b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/config/EcsConfiguration.java index 07f30077715..80a203662d0 100644 --- a/clouddriver-ecs/src/main/java/com/netflix/spinnaker/config/EcsConfiguration.java +++ b/clouddriver-ecs/src/main/java/com/netflix/spinnaker/config/EcsConfiguration.java @@ -23,6 +23,4 @@ @Configuration @ComponentScan("com.netflix.spinnaker.clouddriver.ecs") @ConditionalOnProperty("ecs.enabled") -public class EcsConfiguration { - -} +public class EcsConfiguration {} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/TestCredential.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/TestCredential.groovy deleted file mode 100644 index 899844650a7..00000000000 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/TestCredential.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2017 Lookout, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.ecs -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import groovy.transform.CompileStatic - -@CompileStatic -class TestCredential { - public static NetflixAmazonCredentials named(String name, Map params = [:]) { - def credJson = [ - name: name, - environment: name, - accountType: name, - accountId: "123456789012" + name, - defaultKeyPair: 'default-keypair', - regions: [[name: 'us-east-1', availabilityZones: ['us-east-1b', 'us-east-1c', 'us-east-1d']], - [name: 'us-west-1', availabilityZones: ["us-west-1a", "us-west-1b"]]], - ] + params - - new ObjectMapper().convertValue(credJson, NetflixAmazonCredentials) - } -} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsCloudWatchAlarmCacheClientSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsCloudWatchAlarmCacheClientSpec.groovy index de6503b4217..6e08c8a0025 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsCloudWatchAlarmCacheClientSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsCloudWatchAlarmCacheClientSpec.groovy @@ -16,27 +16,61 @@ package com.netflix.spinnaker.clouddriver.ecs.cache +import com.amazonaws.auth.AWSCredentialsProvider +import com.amazonaws.services.cloudwatch.AmazonCloudWatch +import com.amazonaws.services.cloudwatch.model.Dimension +import com.amazonaws.services.cloudwatch.model.MetricAlarm import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsCloudWatchAlarmCacheClient import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsMetricAlarm +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.CommonCachingAgent import com.netflix.spinnaker.clouddriver.ecs.provider.agent.EcsCloudMetricAlarmCachingAgent +import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject +import java.util.stream.Collectors +import java.util.stream.Stream + class EcsCloudWatchAlarmCacheClientSpec extends Specification { - def cacheView = Mock(Cache) @Subject - EcsCloudWatchAlarmCacheClient client = new EcsCloudWatchAlarmCacheClient(cacheView) + EcsCloudWatchAlarmCacheClient client + + @Subject + EcsCloudMetricAlarmCachingAgent agent + + @Shared + String ACCOUNT = 'test-account' + + @Shared + String REGION = 'us-west-1' + + Cache cacheView + AmazonCloudWatch cloudWatch + AmazonClientProvider clientProvider + ProviderCache providerCache + AWSCredentialsProvider credentialsProvider + + def setup() { + cacheView = Mock(Cache) + client = new EcsCloudWatchAlarmCacheClient(cacheView) + cloudWatch = Mock(AmazonCloudWatch) + clientProvider = Mock(AmazonClientProvider) + providerCache = Mock(ProviderCache) + credentialsProvider = Mock(AWSCredentialsProvider) + agent = new EcsCloudMetricAlarmCachingAgent(CommonCachingAgent.netflixAmazonCredentials, REGION, clientProvider) + } def 'should convert cache data into object'() { given: - def accountName = 'test-account-1' - def region = 'us-west-1' - def metricAlarm = new EcsMetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn").withRegion(region).withAccountName(accountName) - def key = Keys.getAlarmKey(accountName, region, metricAlarm.getAlarmArn()) - def attributes = EcsCloudMetricAlarmCachingAgent.convertMetricAlarmToAttributes(metricAlarm, accountName, region) + def ecsClusterName = 'my-cluster' + def metricAlarm = new EcsMetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn").withRegion(REGION).withAccountName(ACCOUNT) + def key = Keys.getAlarmKey(ACCOUNT, REGION, metricAlarm.getAlarmArn(), ecsClusterName) + def attributes = EcsCloudMetricAlarmCachingAgent.convertMetricAlarmToAttributes(metricAlarm, ACCOUNT, REGION) when: def returnedMetricAlarm = client.get(key) @@ -44,4 +78,229 @@ class EcsCloudWatchAlarmCacheClientSpec extends Specification { cacheView.get(Keys.Namespace.ALARMS.ns, key) >> new DefaultCacheData(key, attributes, [:]) returnedMetricAlarm == metricAlarm } + + + def 'should return metric alarms for a service - single cluster'() { + given: + def serviceName = 'my-service' + def serviceName2 = 'not-matching-service' + + def ecsClusterName = 'my-cluster' + def metricAlarms = Set.of( + new MetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]), + new MetricAlarm().withAlarmName("alarm-name-2").withAlarmArn("alarmArn2") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]), + new MetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn3") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName2}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]) + ) + def keys = metricAlarms.collect { alarm -> + def key = Keys.getAlarmKey(ACCOUNT, REGION, alarm.getAlarmArn(), ecsClusterName) + def attributes = agent.convertMetricAlarmToAttributes(alarm, ACCOUNT, REGION) + [key, new DefaultCacheData(key, attributes, [:])] + } + + cacheView.filterIdentifiers(Keys.Namespace.ALARMS.ns, _) >> keys*.first() + cacheView.getAll(Keys.Namespace.ALARMS.ns, _) >> keys*.last() + + when: + def metricAlarmsReturned = client.getMetricAlarms(serviceName, ACCOUNT, REGION, ecsClusterName) + + then: + metricAlarmsReturned.size() == 2 + metricAlarmsReturned*.alarmName.containsAll(["alarm-name", "alarm-name-2"]) + metricAlarmsReturned*.alarmArn.containsAll(["alarmArn", "alarmArn2"]) + !metricAlarmsReturned*.alarmArn.contains(["alarmArn3"]) +} + + def 'should return metric alarms for a service - multiple clusters'() { + given: + def serviceName = 'my-service' + + def ecsClusterName = 'my-cluster' + def ecsClusterName2 = 'my-cluster-2' + def metricAlarm1 = new MetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]) + def metricAlarm2 = new MetricAlarm().withAlarmName("alarm-name-2").withAlarmArn("alarmArn2") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]) + def metricAlarm3 = new MetricAlarm().withAlarmName("alarm-name3").withAlarmArn("alarmArn3") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName2)]) + + def key1 = Keys.getAlarmKey(ACCOUNT, REGION, metricAlarm1.getAlarmArn(), ecsClusterName) + def attributes1 = agent.convertMetricAlarmToAttributes(metricAlarm1, ACCOUNT, REGION) + def key2 = Keys.getAlarmKey(ACCOUNT, REGION, metricAlarm2.getAlarmArn(), ecsClusterName) + def attributes2 = agent.convertMetricAlarmToAttributes(metricAlarm2, ACCOUNT, REGION) + def key3 = Keys.getAlarmKey(ACCOUNT, REGION, metricAlarm3.getAlarmArn(), ecsClusterName2) + def attributes3 = agent.convertMetricAlarmToAttributes(metricAlarm3, ACCOUNT, REGION) + + + cacheView.filterIdentifiers(Keys.Namespace.ALARMS.ns, Keys.getAlarmKey(ACCOUNT, REGION, "*", ecsClusterName)) >> [key1,key2] + cacheView.filterIdentifiers(Keys.Namespace.ALARMS.ns, Keys.getAlarmKey(ACCOUNT, REGION, "*", ecsClusterName2)) >> [key3] + cacheView.getAll(Keys.Namespace.ALARMS.ns, [key1,key2]) >> [ + new DefaultCacheData(key1, attributes1, [:]), + new DefaultCacheData(key2, attributes2, [:]) + ] + cacheView.getAll(Keys.Namespace.ALARMS.ns, [key3]) >> [ + new DefaultCacheData(key3, attributes3, [:]) + ] + when: + def metricAlarmsReturned = client.getMetricAlarms(serviceName, ACCOUNT, REGION, ecsClusterName) + def metricAlarmsReturned2 = client.getMetricAlarms(serviceName, ACCOUNT, REGION, ecsClusterName2) + + then: + metricAlarmsReturned.size() == 2 + metricAlarmsReturned*.alarmName.containsAll(["alarm-name", "alarm-name-2"]) + metricAlarmsReturned*.alarmArn.containsAll(["alarmArn", "alarmArn2"]) + !metricAlarmsReturned*.alarmArn.contains(["alarmArn3"]) + metricAlarmsReturned2.size() == 1 + metricAlarmsReturned2*.alarmName.containsAll(["alarm-name3"]) + !metricAlarmsReturned2*.alarmArn.containsAll(["alarmArn", "alarmArn2"]) + metricAlarmsReturned2*.alarmArn.containsAll(["alarmArn3"]) + } + +def 'should return empty list if no metric alarms match the service'() { + given: + def serviceName = 'my-service' + + def ecsClusterName = 'my-cluster' + def metricAlarms = Set.of(new MetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)])) + def key = Keys.getAlarmKey(ACCOUNT, REGION, metricAlarms[0].getAlarmArn(), ecsClusterName) + def attributes = agent.convertMetricAlarmToAttributes(metricAlarms[0], ACCOUNT, REGION) + + cacheView.filterIdentifiers(Keys.Namespace.ALARMS.ns, _) >> [key] + cacheView.getAll(Keys.Namespace.ALARMS.ns, _) >> [ + new DefaultCacheData(key, attributes, [:]) + ] + + when: + def metricAlarmsReturned = client.getMetricAlarms("some-other-service", ACCOUNT, REGION, ecsClusterName) + + then: + metricAlarmsReturned.isEmpty() +} + +def 'should return metric alarms with actions matching the service'() { + given: + def serviceName = 'my-service' + + def ecsClusterName = 'my-cluster' + def metricAlarms = Set.of( + new MetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}-AlarmsActions") + .withOKActions("arn:aws:sns:us-west-1:123456789012:${serviceName}-OKActions") + .withInsufficientDataActions("arn:aws:sns:us-west-1:123456789012:${serviceName}-InsufficientDataActions") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]), + new MetricAlarm().withAlarmName("alarm-name-2").withAlarmArn("alarmArn2") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]), + new MetricAlarm().withAlarmName("alarm-name-3").withAlarmArn("alarmArn3") + .withAlarmActions( + "arn:aws:sns:us-west-1:123456789012:${serviceName}-AlarmsActions1", + "arn:aws:sns:us-west-1:123456789012:${serviceName}-AlarmsActions2", + "arn:aws:sns:us-west-1:123456789012:${serviceName}-AlarmsActions3" + ) + .withOKActions( + "arn:aws:sns:us-west-1:123456789012:${serviceName}-OKActions1" + ) + .withInsufficientDataActions( + "arn:aws:sns:us-west-1:123456789012:${serviceName}-InsufficientDataActions1" + ) + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]) + ) + def keys = metricAlarms.collect { alarm -> + def key = Keys.getAlarmKey(ACCOUNT, REGION, alarm.getAlarmArn(), ecsClusterName) + def attributes = agent.convertMetricAlarmToAttributes(alarm, ACCOUNT, REGION) + [key, new DefaultCacheData(key, attributes, [:])] + } + + cacheView.filterIdentifiers(Keys.Namespace.ALARMS.ns, _) >> keys*.first() + cacheView.getAll(Keys.Namespace.ALARMS.ns, _) >> keys*.last() + + when: + def metricAlarmsReturned = client.getMetricAlarms(serviceName, ACCOUNT, REGION, ecsClusterName) + + then: + metricAlarmsReturned.size() == 2 + metricAlarmsReturned*.alarmName.containsAll(["alarm-name", "alarm-name-3"]) + metricAlarmsReturned*.alarmArn.containsAll(["alarmArn", "alarmArn3"]) + metricAlarmsReturned*.alarmActions.flatten().size() == 4 + metricAlarmsReturned*.OKActions.flatten().size() == 2 + metricAlarmsReturned*.insufficientDataActions.flatten().size() == 2 + metricAlarmsReturned*.OKActions.flatten().sort() == List.of( + "arn:aws:sns:us-west-1:123456789012:${serviceName}-OKActions", + "arn:aws:sns:us-west-1:123456789012:${serviceName}-OKActions1" + ) + metricAlarmsReturned*.alarmActions.flatten().sort() == List.of( + "arn:aws:sns:us-west-1:123456789012:${serviceName}-AlarmsActions", + "arn:aws:sns:us-west-1:123456789012:${serviceName}-AlarmsActions1", + "arn:aws:sns:us-west-1:123456789012:${serviceName}-AlarmsActions2", + "arn:aws:sns:us-west-1:123456789012:${serviceName}-AlarmsActions3" + ) + metricAlarmsReturned*.insufficientDataActions.flatten().sort() == List.of( + "arn:aws:sns:us-west-1:123456789012:${serviceName}-InsufficientDataActions", + "arn:aws:sns:us-west-1:123456789012:${serviceName}-InsufficientDataActions1" + ) +} + + + def 'should return metric alarms for a service - single cluster with Custom Alarms/Cloudwatch Dimensions'() { + given: + def serviceName = 'my-service' + def serviceName2 = 'not-matching-service' + + def ecsClusterName = 'my-cluster' + def metricAlarms = Set.of( + new MetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]), + new MetricAlarm().withAlarmName("alarm-name-2").withAlarmArn("alarmArn2") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]), + new MetricAlarm().withAlarmName("alarm-name").withAlarmArn("alarmArn3") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName2}") + .withDimensions([new Dimension().withName("ClusterName").withValue(ecsClusterName)]) + ) + def metricAlarmCustomDimension = Set.of ( + new MetricAlarm().withAlarmName("alarm-name-2-custom").withAlarmArn("alarmArn2-custom") + .withAlarmActions("arn:aws:sns:us-west-1:123456789012:${serviceName}") + .withDimensions([new Dimension().withName("CustomDimension").withValue("customValue")]), + ) + + def keys = metricAlarms.collect { alarm -> + def key = Keys.getAlarmKey(ACCOUNT, REGION, alarm.getAlarmArn(), ecsClusterName) + def attributes = agent.convertMetricAlarmToAttributes(alarm, ACCOUNT, REGION) + [key, new DefaultCacheData(key, attributes, [:])] + } + def keysCustom = metricAlarmCustomDimension.collect { alarm -> + def key = Keys.getAlarmKey(ACCOUNT, REGION, alarm.getAlarmArn(), "") + def attributes = agent.convertMetricAlarmToAttributes(alarm, ACCOUNT, REGION) + [key, new DefaultCacheData(key, attributes, [:])] + } + + cacheView.filterIdentifiers(Keys.Namespace.ALARMS.ns, Keys.getAlarmKey(ACCOUNT, REGION, "*", ecsClusterName)) >> keys*.first() + cacheView.filterIdentifiers(Keys.Namespace.ALARMS.ns, Keys.getAlarmKey(ACCOUNT, REGION, "*", "")) >> keysCustom*.first() + def combinedMetricIds = Stream.of( keys*.first(), keysCustom*.first()) + .filter { it != null } + .flatMap { it.stream() } + .collect(Collectors.toList()) + + cacheView.getAll(Keys.Namespace.ALARMS.ns, combinedMetricIds) >> keys*.last() + keysCustom*.last() + + when: + def metricAlarmsReturned = client.getMetricAlarms(serviceName, ACCOUNT, REGION, ecsClusterName) + + then: + metricAlarmsReturned.size() == 3 + metricAlarmsReturned*.alarmName.containsAll(["alarm-name", "alarm-name-2", "alarm-name-2-custom"]) + metricAlarmsReturned*.alarmArn.containsAll(["alarmArn", "alarmArn2","alarmArn2-custom"]) + !metricAlarmsReturned*.alarmArn.contains(["alarmArn3"]) + } + } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsLoadbalancerCacheClientSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsLoadbalancerCacheClientSpec.groovy index 68c84cf2526..dc5900a4d5a 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsLoadbalancerCacheClientSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsLoadbalancerCacheClientSpec.groovy @@ -25,6 +25,7 @@ import com.netflix.spinnaker.clouddriver.aws.data.Keys import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsLoadbalancerCacheClient import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsLoadBalancerCache +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsAccountMapper import spock.lang.Specification import spock.lang.Subject @@ -32,24 +33,29 @@ class EcsLoadbalancerCacheClientSpec extends Specification { def cacheView = Mock(Cache) def objectMapper = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + def accountMapper = Mock(EcsAccountMapper) @Subject - EcsLoadbalancerCacheClient client = new EcsLoadbalancerCacheClient(cacheView, objectMapper) + def client = new EcsLoadbalancerCacheClient(cacheView, objectMapper, accountMapper) def 'should convert cache data into object'() { given: def loadBalancerName = 'test-name' - def account = 'test-account' + def ecsAccount = 'ecsAccount' + def awsAccount = 'awsAccount' def region = 'us-west-1' def vpcId = 'vpc-id' def loadBalancerType = 'classic' def targetGroupName = 'test-target-group' - def loadbalancerKey = Keys.getLoadBalancerKey(loadBalancerName, account, region, vpcId, loadBalancerType) - def targetGroupKey = Keys.getTargetGroupKey(targetGroupName, account, region, TargetTypeEnum.Instance.toString(), vpcId) + accountMapper.fromEcsAccountNameToAwsAccountName(ecsAccount) >> awsAccount + accountMapper.fromAwsAccountNameToEcsAccountName(awsAccount) >> ecsAccount + + def loadbalancerKey = Keys.getLoadBalancerKey(loadBalancerName, awsAccount, region, vpcId, loadBalancerType) + def targetGroupKey = Keys.getTargetGroupKey(targetGroupName, awsAccount, region, TargetTypeEnum.Instance.toString(), vpcId) def givenEcsLoadbalancer = new EcsLoadBalancerCache( - account: account, + account: ecsAccount, region: region, loadBalancerArn: 'arn', loadBalancerType: loadBalancerType, diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsTargetGroupCacheClientSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsTargetGroupCacheClientSpec.groovy index 261434f6b1b..22aeec889f3 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsTargetGroupCacheClientSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/EcsTargetGroupCacheClientSpec.groovy @@ -52,6 +52,7 @@ class EcsTargetGroupCacheClientSpec extends Specification { instances: [], healthCheckTimeoutSeconds: 30, targetGroupArn: 'arn', + targetType: 'ip', healthCheckPort: 1337, matcher: [:], healthCheckProtocol: 'http', diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/KeysSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/KeysSpec.groovy index eda7bc75fd9..bb10720c2e5 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/KeysSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/KeysSpec.groovy @@ -46,7 +46,29 @@ class KeysSpec extends Specification { 'test-account-5' | 'us-west-5' | TASK_DEFINITIONS.ns | 'arn:aws:ecs:' + region + ':012345678910:task-definition/hello_world:10' | buildParsedKey(account, region, namespace, [taskDefinitionArn: identifier]) 'test-account-6' | 'us-west-6' | ALARMS.ns | 'arn:aws:ecs:' + region + ':012345678910:alarms/14e8cce9-0b16-4af4-bfac-a85f7587aa98' | buildParsedKey(account, region, namespace, [alarmArn: identifier]) 'test-account-7' | 'us-west-7' | SCALABLE_TARGETS.ns | 'service/test-cluster/test-service' | buildParsedKey(account, region, namespace, [resource: identifier]) + 'test-account-8' | 'us-west-8' | SECRETS.ns | 'my-secret' | buildParsedKey(account, region, namespace, [secretName: identifier]) + 'test-account-9' | 'us-west-9' | SERVICE_DISCOVERY_REGISTRIES.ns | 'srv-123' | buildParsedKey(account, region, namespace, [serviceId: identifier]) + 'test-account-10' | 'us-west-10' | TARGET_HEALTHS.ns | 'arn:aws:elasticloadbalancing' + region + ':012345678910:targetgroup/ECSTG/htgbfvv' | buildParsedKey(account, region, namespace, [targetGroupArn: identifier]) + } + + def 'should parse a given application key properly'() { + given: + def application_1 = 'test-application-1' + def application_2 = 'test-application-2' + expect: + Keys.parse(ID + SEPARATOR + ECS_APPLICATIONS.ns + SEPARATOR + application_1) == [provider: ID, type: ECS_APPLICATIONS.ns, application: application_1] + Keys.parse(ID + SEPARATOR + ECS_APPLICATIONS.ns + SEPARATOR + application_2) == [provider: ID, type: ECS_APPLICATIONS.ns, application: application_2] + } + + def 'should generate the proper application key'() { + given: + def application_1 = 'test-application-1' + def application_2 = 'test-application-2' + + expect: + Keys.getApplicationKey(application_1) == ID + SEPARATOR + ECS_APPLICATIONS.ns + SEPARATOR + application_1 + Keys.getApplicationKey(application_2) == ID + SEPARATOR + ECS_APPLICATIONS.ns + SEPARATOR + application_2 } def 'should parse a given iam role key properly'() { @@ -138,4 +160,34 @@ class KeysSpec extends Specification { 'us-west-1' | 'test-account-1' | 'service/test-cluster/test-service' 'us-west-2' | 'test-account-2' | 'service/mycluster/myservice' } + + def 'should generate the proper secret key'() { + expect: + Keys.getSecretKey(account, region, secretName) == buildKey(SECRETS.ns, account, region, secretName) + + where: + region | account | secretName + 'us-west-1' | 'test-account-1' | 'my-first-secret' + 'us-west-2' | 'test-account-2' | 'my-second-secret' + } + + def 'should generate the proper service discovery key'() { + expect: + Keys.getServiceDiscoveryRegistryKey(account, region, serviceId) == buildKey(SERVICE_DISCOVERY_REGISTRIES.ns, account, region, serviceId) + + where: + region | account | serviceId + 'us-west-1' | 'test-account-1' | 'my-first-service' + 'us-west-2' | 'test-account-2' | 'my-second-service' + } + + def 'should generate the proper target health key'() { + expect: + Keys.getTargetHealthKey(account, region, targetGroupArn) == buildKey(TARGET_HEALTHS.ns, account, region, targetGroupArn) + + where: + region | account | targetGroupArn + 'us-west-1' | 'test-account-1' | 'arn:aws:elasticloadbalancing' + region + ':012345678910:targetgroup/ECSTG/htgbfvv' + 'us-west-2' | 'test-account-2' | 'arn:aws:elasticloadbalancing' + region + ':012345678910:targetgroup/ECSTG/eognasm' + } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/SecretCacheClientSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/SecretCacheClientSpec.groovy new file mode 100644 index 00000000000..642a18cbba2 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/SecretCacheClientSpec.groovy @@ -0,0 +1,63 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache + +import com.amazonaws.services.secretsmanager.model.SecretListEntry +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.clouddriver.ecs.cache.client.SecretCacheClient +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Secret +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.SecretCachingAgent +import spock.lang.Specification +import spock.lang.Subject + +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SECRETS + +class SecretCacheClientSpec extends Specification { + def cacheView = Mock(Cache) + @Subject + private SecretCacheClient client = new SecretCacheClient(cacheView) + + def 'should convert'() { + given: + def account = 'test-account' + def region = 'us-west-1' + def secretName = 'my-secret' + def secretArn = 'arn:aws:secretsmanager:us-west-1:123456789012:secret:my-secret' + def key = Keys.getSecretKey('test-account', 'us-west-1', secretName) + + def originalSecret = new Secret( + account: account, + region: region, + name: secretName, + arn: secretArn + ) + + def originalSecretEntry = new SecretListEntry( + name: secretName, + aRN: secretArn + ); + + def attributes = SecretCachingAgent.convertSecretToAttributes(account, region, originalSecretEntry) + cacheView.get(SECRETS.ns, key) >> new DefaultCacheData(key, attributes, Collections.emptyMap()) + + when: + def retrievedSecret = client.get(key) + + then: + retrievedSecret == originalSecret + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/ServiceDiscoveryCacheClientSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/ServiceDiscoveryCacheClientSpec.groovy new file mode 100644 index 00000000000..8b9265c26d9 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/ServiceDiscoveryCacheClientSpec.groovy @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache + +import com.amazonaws.services.servicediscovery.model.ServiceSummary +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceDiscoveryCacheClient +import com.netflix.spinnaker.clouddriver.ecs.cache.model.ServiceDiscoveryRegistry +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ServiceDiscoveryCachingAgent +import spock.lang.Specification +import spock.lang.Subject + +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICE_DISCOVERY_REGISTRIES + +class ServiceDiscoveryCacheClientSpec extends Specification { + def cacheView = Mock(Cache) + @Subject + private ServiceDiscoveryCacheClient client = new ServiceDiscoveryCacheClient(cacheView) + + def 'should convert'() { + given: + def account = 'test-account' + def region = 'us-west-1' + def serviceName = 'my-service' + def serviceId = 'srv-123' + def serviceArn = 'arn:aws:servicediscovery:us-west-1:123456789012:service/srv-123' + def key = Keys.getServiceDiscoveryRegistryKey('test-account', 'us-west-1', serviceId) + + def originalService = new ServiceDiscoveryRegistry( + account: account, + region: region, + name: serviceName, + arn: serviceArn, + id: serviceId + ) + + def originalServiceEntry = new ServiceSummary( + name: serviceName, + id: serviceId, + arn: serviceArn + ); + + def attributes = ServiceDiscoveryCachingAgent.convertServiceToAttributes(account, region, originalServiceEntry) + cacheView.get(SERVICE_DISCOVERY_REGISTRIES.ns, key) >> new DefaultCacheData(key, attributes, Collections.emptyMap()) + + when: + def retrievedService = client.get(key) + + then: + retrievedService == originalService + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/TargetHealthCacheClientSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/TargetHealthCacheClientSpec.groovy new file mode 100644 index 00000000000..e5d1e7c3c5c --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/TargetHealthCacheClientSpec.groovy @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.cache + +import com.amazonaws.services.elasticloadbalancingv2.model.TargetDescription +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealth +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthStateEnum +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.DefaultCacheData + +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TARGET_HEALTHS; + +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.clouddriver.ecs.cache.client.TargetHealthCacheClient +import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsTargetHealth +import spock.lang.Specification +import spock.lang.Subject + +class TargetHealthCacheClientSpec extends Specification { + def cacheView = Mock(Cache) + def objectMapper = new ObjectMapper() + @Subject + private TargetHealthCacheClient client = new TargetHealthCacheClient(cacheView, objectMapper) + + def 'should convert'() { + given: + def targetId = '10.0.0.13' + def targetGroupArn = 'arn:targetgroup' + def key = + Keys.getTargetHealthKey('test-account', 'us-west-1', targetGroupArn) + + def targetHealthDescription = new TargetHealthDescription().withTarget( + new TargetDescription().withId(targetId).withPort(80)) + .withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Healthy)) + + def originalTargetHealth = new EcsTargetHealth( + targetGroupArn: targetGroupArn, + targetHealthDescriptions: Collections.singletonList(targetHealthDescription) + ) + + def attributes = objectMapper.convertValue(originalTargetHealth, Map) + cacheView.get(TARGET_HEALTHS.toString(), key) >> new DefaultCacheData(key, attributes, Collections.emptyMap()) + + when: + def retrievedTargetHealth = client.get(key) + + then: + retrievedTargetHealth == originalTargetHealth + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/TaskHealthCacheClientSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/TaskHealthCacheClientSpec.groovy index 6084277ed25..ffe2b76de34 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/TaskHealthCacheClientSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/cache/TaskHealthCacheClientSpec.groovy @@ -28,7 +28,7 @@ import spock.lang.Subject class TaskHealthCacheClientSpec extends Specification { def cacheView = Mock(Cache) @Subject - private final TaskHealthCacheClient client = new TaskHealthCacheClient(cacheView) + private TaskHealthCacheClient client = new TaskHealthCacheClient(cacheView) def 'should convert'() { given: diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsSecretControllerSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsSecretControllerSpec.groovy new file mode 100644 index 00000000000..b8270659496 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsSecretControllerSpec.groovy @@ -0,0 +1,52 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.controllers + + +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Secret +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsSecretProvider +import spock.lang.Specification +import spock.lang.Subject + +class EcsSecretControllerSpec extends Specification { + + def provider = Mock(EcsSecretProvider) + @Subject + def controller = new EcsSecretController(provider) + + def 'should retrieve a collection of secrets'() { + given: + def numberOfSecrets = 5 + def givenSecrets = [] + for (int x = 0; x < numberOfSecrets; x++) { + givenSecrets << new Secret( + account: 'test-account-' + x, + region: 'us-west-' + x, + name: 'secret-name-' + x, + arn: 'secret-arn-' + x + ) + } + provider.allSecrets >> givenSecrets + + + when: + def retrievedSecrets = controller.getAllSecrets() + + then: + retrievedSecrets == givenSecrets + } + +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsServiceDiscoveryControllerSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsServiceDiscoveryControllerSpec.groovy new file mode 100644 index 00000000000..5299ddc4acb --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/controllers/EcsServiceDiscoveryControllerSpec.groovy @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.controllers + + +import com.netflix.spinnaker.clouddriver.ecs.cache.model.ServiceDiscoveryRegistry +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsServiceDiscoveryProvider +import spock.lang.Specification +import spock.lang.Subject + +class EcsServiceDiscoveryControllerSpec extends Specification { + + def provider = Mock(EcsServiceDiscoveryProvider) + @Subject + def controller = new EcsServiceDiscoveryController(provider) + + def 'should retrieve a collection of service discovery registries'() { + given: + def numberOfServices = 5 + def givenServices = [] + for (int x = 0; x < numberOfServices; x++) { + givenServices << new ServiceDiscoveryRegistry( + account: 'test-account-' + x, + region: 'us-west-' + x, + name: 'service-name-' + x, + arn: 'service-arn-' + x, + id: 'srv-' + x + ) + } + provider.allServiceDiscoveryRegistries >> givenServices + + + when: + def retrievedServices = controller.getAllServiceDiscoveryRegistries() + + then: + retrievedServices == givenServices + } + +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EcsCreateServerGroupAtomicOperationConverterSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EcsCreateServerGroupAtomicOperationConverterSpec.groovy index 042525e8a55..f39d9f37411 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EcsCreateServerGroupAtomicOperationConverterSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/converters/EcsCreateServerGroupAtomicOperationConverterSpec.groovy @@ -47,7 +47,6 @@ class EcsCreateServerGroupAtomicOperationConverterSpec extends Specification { dockerImageAddress : 'docker-url', capacity : new ServerGroup.Capacity(0, 2, 1,), availabilityZones : ['us-west-1': ['us-west-1a']], - autoscalingPolicies : [], placementStrategySequence: [new PlacementStrategy().withType(PlacementStrategyType.Random)], region : 'us-west-1', credentials : 'test' diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CommonAtomicOperation.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CommonAtomicOperation.groovy index 6c9e01da52f..473d375f4d6 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CommonAtomicOperation.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CommonAtomicOperation.groovy @@ -16,19 +16,21 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling import com.amazonaws.services.ecs.AmazonECS import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.ecs.services.ContainerInformationService -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification class CommonAtomicOperation extends Specification{ def amazonClientProvider = Mock(AmazonClientProvider) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def credentialsRepository = Mock(CredentialsRepository) def containerInformationService = Mock(ContainerInformationService) def ecs = Mock(AmazonECS) + def autoscaling = Mock(AWSApplicationAutoScaling) def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CreateServerGroupAtomicOperationSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CreateServerGroupAtomicOperationSpec.groovy index 22631bc33a4..0e5300320b0 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CreateServerGroupAtomicOperationSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/CreateServerGroupAtomicOperationSpec.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling +import com.amazonaws.services.applicationautoscaling.model.* import com.amazonaws.services.ecs.model.* import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult @@ -24,17 +25,27 @@ import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup import com.amazonaws.services.identitymanagement.AmazonIdentityManagement import com.amazonaws.services.identitymanagement.model.GetRoleResult import com.amazonaws.services.identitymanagement.model.Role +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials -import com.netflix.spinnaker.clouddriver.aws.security.AssumeRoleAmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials +import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider import com.netflix.spinnaker.clouddriver.ecs.TestCredential import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CreateServerGroupDescription +import com.netflix.spinnaker.clouddriver.ecs.names.EcsDefaultNamer +import com.netflix.spinnaker.clouddriver.ecs.names.EcsServerGroupName +import com.netflix.spinnaker.clouddriver.ecs.names.EcsResource +import com.netflix.spinnaker.clouddriver.ecs.names.EcsTagNamer import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamPolicyReader import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamTrustRelationship import com.netflix.spinnaker.clouddriver.ecs.services.EcsCloudMetricService import com.netflix.spinnaker.clouddriver.ecs.services.SecurityGroupSelector import com.netflix.spinnaker.clouddriver.ecs.services.SubnetSelector import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.fiat.model.resources.Permissions +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import com.netflix.spinnaker.kork.artifacts.model.Artifact + +import static com.netflix.spinnaker.clouddriver.ecs.deploy.ops.CreateServerGroupAtomicOperation.DOCKER_LABEL_KEY_SERVERGROUP class CreateServerGroupAtomicOperationSpec extends CommonAtomicOperation { def iamClient = Mock(AmazonIdentityManagement) @@ -43,6 +54,8 @@ class CreateServerGroupAtomicOperationSpec extends CommonAtomicOperation { def autoScalingClient = Mock(AWSApplicationAutoScaling) def subnetSelector = Mock(SubnetSelector) def securityGroupSelector = Mock(SecurityGroupSelector) + def objectMapper = Mock(ObjectMapper) + def artifactDownloader = Mock(ArtifactDownloader) def applicationName = 'myapp' def stack = 'kcats' @@ -54,20 +67,43 @@ class CreateServerGroupAtomicOperationSpec extends CommonAtomicOperation { def role = new Role(assumeRolePolicyDocument: "json-encoded-string-here") - def creds = new AssumeRoleAmazonCredentials("test", "test", "test", "test", "test", - [new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1a', 'us-west-1b'])], - [], [], Permissions.factory([:]), [], false, 'test-role', "test") + def creds = Mock(NetflixAssumeRoleAmazonCredentials) { + getName() >> { "test" } + getRegions() >> { [new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1a', 'us-west-1b'])] } + getAssumeRole() >> { 'test-role' } + getAccountId() >> { 'test' } + } def taskDefinition = new TaskDefinition().withTaskDefinitionArn("task-def-arn") def targetGroup = new TargetGroup().withLoadBalancerArns("loadbalancer-arn").withTargetGroupArn('target-group-arn') - def service = new Service(serviceName: "${serviceName}") + def service = new Service(serviceName: "${serviceName}-v008") + + def source = new CreateServerGroupDescription.Source() + + def setup() { + source.account = "test" + source.region = "us-west-1" + source.asgName = "${serviceName}-v007" + source.useSourceCapacity = true + amazonClientProvider.getAmazonEcs(_, _, _) >> ecs + amazonClientProvider.getAmazonIdentityManagement(_, _, _) >> iamClient + amazonClientProvider.getAmazonElasticLoadBalancingV2(_, _, _) >> loadBalancingV2 + amazonClientProvider.getAmazonApplicationAutoScaling(_, _, _) >> autoScalingClient + containerInformationService.getClusterName(_, _, _) >> 'cluster-name' + credentialsRepository.getOne(_) >> creds + } def 'should create a service'() { given: + def placementConstraint = new PlacementConstraint(type: 'memberOf', expression: 'attribute:ecs.instance-type =~ t2.*') + + def placementStrategy = new PlacementStrategy(type: 'spread', field: 'attribute:ecs.availability-zone') + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), application: applicationName, stack: stack, freeFormDetails: detail, @@ -77,12 +113,14 @@ class CreateServerGroupAtomicOperationSpec extends CommonAtomicOperation { targetGroup: 'target-group-arn', portProtocol: 'tcp', computeUnits: 9001, + tags: ['label1': 'value1', 'fruit': 'tomato'], reservedMemory: 9002, dockerImageAddress: 'docker-image-url', capacity: new ServerGroup.Capacity(1, 1, 1), availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], - autoscalingPolicies: [], - placementStrategySequence: [] + placementStrategySequence: [placementStrategy], + placementConstraints: [placementConstraint], + source: source ) def operation = new CreateServerGroupAtomicOperation(description) @@ -90,59 +128,102 @@ class CreateServerGroupAtomicOperationSpec extends CommonAtomicOperation { operation.amazonClientProvider = amazonClientProvider operation.ecsCloudMetricService = Mock(EcsCloudMetricService) operation.iamPolicyReader = iamPolicyReader - operation.accountCredentialsProvider = accountCredentialsProvider + operation.credentialsRepository = credentialsRepository operation.containerInformationService = containerInformationService - amazonClientProvider.getAmazonEcs(_, _, _) >> ecs - amazonClientProvider.getAmazonIdentityManagement(_, _, _) >> iamClient - amazonClientProvider.getAmazonElasticLoadBalancingV2(_, _, _) >> loadBalancingV2 - amazonClientProvider.getAmazonApplicationAutoScaling(_, _, _) >> autoScalingClient - containerInformationService.getClusterName(_, _, _) >> 'cluster-name' - accountCredentialsProvider.getCredentials(_) >> creds - when: def result = operation.operate([]) then: - 1 * ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") - - 1 * ecs.registerTaskDefinition({RegisterTaskDefinitionRequest request -> - request.containerDefinitions.size() == 1 - request.containerDefinitions.get(0).memoryReservation == 9002 - request.containerDefinitions.get(0).cpu == 9001 - request.containerDefinitions.get(0).portMappings.size() == 1 - request.containerDefinitions.get(0).portMappings.get(0).containerPort == 1337 - request.containerDefinitions.get(0).portMappings.get(0).hostPort == 0 - request.containerDefinitions.get(0).portMappings.get(0).protocol == 'tcp' - request.containerDefinitions.get(0).image == 'docker-image-url' - request.taskRoleArn == 'test-role' - }) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) - - 1 * iamClient.getRole(_) >> new GetRoleResult().withRole(role) - 1 * iamPolicyReader.getTrustedEntities(_) >> trustRelationships - 1 * loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + ecs.listAccountSettings(_) >> new ListAccountSettingsResult().withSettings( + new Setting(name: SettingName.TaskLongArnFormat, value: "enabled"), + new Setting(name: SettingName.ServiceLongArnFormat, value: "enabled") + ) + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() - 1 * ecs.createService({ CreateServiceRequest request -> - request.serviceName.startsWith(serviceName) - request.desiredCount == 1 - request.cluster = 'test-cluster' + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + iamClient.getRole(_) >> new GetRoleResult().withRole(role) + iamPolicyReader.getTrustedEntities(_) >> trustRelationships + loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + ecs.createService({ CreateServiceRequest request -> + request.cluster == 'test-cluster' + request.serviceName == 'myapp-kcats-liated-v008' + request.taskDefinition == 'task-def-arn' request.loadBalancers.size() == 1 - request.loadBalancers.get(0).containerPort == 1337 request.loadBalancers.get(0).targetGroupArn == 'target-group-arn' - request.taskDefinition == 'task-def-arn' + request.loadBalancers.get(0).containerName == 'v008' + request.loadBalancers.get(0).containerPort == 1337 + request.serviceRegistries == [] + request.desiredCount == 3 + request.role == null + request.placementConstraints.size() == 1 + request.placementConstraints.get(0).type == 'memberOf' + request.placementConstraints.get(0).expression == 'attribute:ecs.instance-type =~ t2.*' + request.placementStrategy.size() == 1 + request.placementStrategy.get(0).type == 'spread' + request.placementStrategy.get(0).field == 'attribute:ecs.availability-zone' request.networkConfiguration == null - request.role == 'arn:aws:iam::test:test-role' - } as CreateServiceRequest) >> new CreateServiceResult().withService(service) + request.healthCheckGracePeriodSeconds == null + request.enableECSManagedTags == true + request.propagateTags == PropagateTags.SERVICE.toString() + request.tags.size() == 2 + request.tags.get(0).key == 'label1' + request.tags.get(0).value == 'value1' + request.tags.get(1).key == 'fruit' + request.tags.get(1).value == 'tomato' + request.launchType == null + request.platformVersion == null + request.enableExecuteCommand == false + request.deploymentConfiguration.deploymentCircuitBreaker.enable == false + }) >> new CreateServiceResult().withService(service) result.getServerGroupNames().size() == 1 result.getServerGroupNameByRegion().size() == 1 - result.getServerGroupNames().contains("us-west-1:" + serviceName) + result.getServerGroupNames().contains("us-west-1:" + serviceName + "-v008") result.getServerGroupNameByRegion().containsKey('us-west-1') - result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName) + result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName + "-v008") + + 1 * autoScalingClient.registerScalableTarget(_) >> { arguments -> + RegisterScalableTargetRequest request = arguments.get(0) + assert request.serviceNamespace == ServiceNamespace.Ecs.toString() + assert request.scalableDimension == ScalableDimension.EcsServiceDesiredCount.toString() + assert request.resourceId == "service/test-cluster/${serviceName}-v008" + assert request.roleARN == null + assert request.minCapacity == 2 + assert request.maxCapacity == 4 + } + + autoScalingClient.describeScalableTargets(_) >> new DescribeScalableTargetsResult() + .withScalableTargets(new ScalableTarget() + .withResourceId("service/test-cluster/${serviceName}-v007") + .withMinCapacity(2) + .withMaxCapacity(4)) + + 1 * operation.ecsCloudMetricService.copyScalingPolicies( + "Test", + "us-west-1", + "${serviceName}-v008", + "service/test-cluster/${serviceName}-v008", + "test", + "us-west-1", + "${serviceName}-v007", + "service/test-cluster/${serviceName}-v007", + "test-cluster" + ) } def 'should create a service using VPC and Fargate mode'() { given: + def serviceRegistry = new CreateServerGroupDescription.ServiceDiscoveryAssociation( + registry: new CreateServerGroupDescription.ServiceRegistry(arn: 'srv-registry-arn'), + containerPort: 9090 + ) def description = new CreateServerGroupDescription( credentials: TestCredential.named('Test', [:]), application: applicationName, @@ -158,13 +239,16 @@ class CreateServerGroupAtomicOperationSpec extends CommonAtomicOperation { dockerImageAddress: 'docker-image-url', capacity: new ServerGroup.Capacity(1, 1, 1), availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], - autoscalingPolicies: [], placementStrategySequence: [], launchType: 'FARGATE', + platformVersion: '1.0.0', networkMode: 'awsvpc', subnetType: 'public', securityGroupNames: ['helloworld'], - associatePublicIpAddress: true + associatePublicIpAddress: true, + serviceDiscoveryAssociations: [serviceRegistry], + enableExecuteCommand: true, + enableDeploymentCircuitBreaker: true ) def operation = new CreateServerGroupAtomicOperation(description) @@ -172,19 +256,119 @@ class CreateServerGroupAtomicOperationSpec extends CommonAtomicOperation { operation.amazonClientProvider = amazonClientProvider operation.ecsCloudMetricService = Mock(EcsCloudMetricService) operation.iamPolicyReader = iamPolicyReader - operation.accountCredentialsProvider = accountCredentialsProvider + operation.credentialsRepository = credentialsRepository operation.containerInformationService = containerInformationService operation.subnetSelector = subnetSelector operation.securityGroupSelector = securityGroupSelector - amazonClientProvider.getAmazonEcs(_, _, _) >> ecs - amazonClientProvider.getAmazonIdentityManagement(_, _, _) >> iamClient - amazonClientProvider.getAmazonElasticLoadBalancingV2(_, _, _) >> loadBalancingV2 - amazonClientProvider.getAmazonApplicationAutoScaling(_, _, _) >> autoScalingClient - containerInformationService.getClusterName(_, _, _) >> 'cluster-name' - accountCredentialsProvider.getCredentials(_) >> creds + subnetSelector.resolveSubnetsIdsForMultipleSubnetTypes(_, _, _, _) >> ['subnet-12345'] + subnetSelector.getSubnetVpcIds(_, _, _) >> ['vpc-123'] + securityGroupSelector.resolveSecurityGroupNames(_, _, _, _) >> ['sg-12345'] + + when: + def result = operation.operate([]) + + then: + ecs.listAccountSettings(_) >> new ListAccountSettingsResult() + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + request.include == ["TAGS"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() + + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + + iamClient.getRole(_) >> new GetRoleResult().withRole(role) + iamPolicyReader.getTrustedEntities(_) >> trustRelationships + loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + + ecs.createService({ CreateServiceRequest request -> + request.cluster == 'test-cluster' + request.serviceName == 'myapp-kcats-liated-v008' + request.taskDefinition == 'task-def-arn' + request.loadBalancers.size() == 1 + request.loadBalancers.get(0).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(0).containerName == 'v008' + request.loadBalancers.get(0).containerPort == 1337 + request.serviceRegistries.size() == 1 + request.serviceRegistries.get(0) == new ServiceRegistry( + registryArn: 'srv-registry-arn', + containerPort: 9090, + containerName: 'v008' + ) + request.desiredCount == 1 + request.role == null + request.placementStrategy == [] + request.placementConstraints == [] + request.networkConfiguration.awsvpcConfiguration.subnets == ['subnet-12345'] + request.networkConfiguration.awsvpcConfiguration.securityGroups == ['sg-12345'] + request.networkConfiguration.awsvpcConfiguration.assignPublicIp == 'ENABLED' + request.healthCheckGracePeriodSeconds == null + request.enableECSManagedTags == null + request.propagateTags == null + request.tags == [] + request.launchType == 'FARGATE' + request.platformVersion == '1.0.0' + request.enableExecuteCommand == true + request.deploymentConfiguration.deploymentCircuitBreaker.enable == true + } as CreateServiceRequest) >> new CreateServiceResult().withService(service) + + result.getServerGroupNames().size() == 1 + result.getServerGroupNameByRegion().size() == 1 + result.getServerGroupNames().contains("us-west-1:" + serviceName + "-v008") + result.getServerGroupNameByRegion().containsKey('us-west-1') + result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName + "-v008") + } + + def 'should create a service using VPC and FARGATE Capacity Provider Strategy'() { + given: + def serviceRegistry = new CreateServerGroupDescription.ServiceDiscoveryAssociation( + registry: new CreateServerGroupDescription.ServiceRegistry(arn: 'srv-registry-arn'), + containerPort: 9090 + ) + def capacityProviderStrategy = new CapacityProviderStrategyItem( + capacityProvider: 'FARGATE', + weight: 1 + ) + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), + application: applicationName, + stack: stack, + freeFormDetails: detail, + ecsClusterName: 'test-cluster', + iamRole: 'test-role', + containerPort: 1337, + targetGroup: 'target-group-arn', + portProtocol: 'tcp', + computeUnits: 9001, + reservedMemory: 9002, + dockerImageAddress: 'docker-image-url', + capacity: new ServerGroup.Capacity(1, 1, 1), + availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], + placementStrategySequence: [], + capacityProviderStrategy: [capacityProviderStrategy], + platformVersion: '1.0.0', + networkMode: 'awsvpc', + subnetType: 'public', + securityGroupNames: ['helloworld'], + associatePublicIpAddress: true, + serviceDiscoveryAssociations: [serviceRegistry] + ) + + def operation = new CreateServerGroupAtomicOperation(description) + + operation.amazonClientProvider = amazonClientProvider + operation.ecsCloudMetricService = Mock(EcsCloudMetricService) + operation.iamPolicyReader = iamPolicyReader + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + operation.subnetSelector = subnetSelector + operation.securityGroupSelector = securityGroupSelector - subnetSelector.resolveSubnetsIds(_, _, _) >> ['subnet-12345'] + subnetSelector.resolveSubnetsIdsForMultipleSubnetTypes(_, _, _, _) >> ['subnet-12345'] subnetSelector.getSubnetVpcIds(_, _, _) >> ['vpc-123'] securityGroupSelector.resolveSecurityGroupNames(_, _, _, _) >> ['sg-12345'] @@ -192,38 +376,1579 @@ class CreateServerGroupAtomicOperationSpec extends CommonAtomicOperation { def result = operation.operate([]) then: - 1 * ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.listAccountSettings(_) >> new ListAccountSettingsResult() + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() - 1 * ecs.registerTaskDefinition({RegisterTaskDefinitionRequest request -> - request.networkMode == 'awsvpc' - request.containerDefinitions.size() == 1 - request.containerDefinitions.get(0).portMappings.size() == 1 - request.containerDefinitions.get(0).portMappings.get(0).containerPort == 1337 - request.containerDefinitions.get(0).portMappings.get(0).hostPort == 0 - request.containerDefinitions.get(0).portMappings.get(0).protocol == 'tcp' - request.requiresCompatibilities.size() == 1 - request.requiresCompatibilities.get(0) == 'FARGATE' - request.memory == 9001 - request.cpu == 9002 - request.executionRoleArn == 'arn:aws:iam::test:test-role' - }) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) - 1 * iamClient.getRole(_) >> new GetRoleResult().withRole(role) - 1 * iamPolicyReader.getTrustedEntities(_) >> trustRelationships - 1 * loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + iamClient.getRole(_) >> new GetRoleResult().withRole(role) + iamPolicyReader.getTrustedEntities(_) >> trustRelationships + loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) - 1 * ecs.createService({ CreateServiceRequest request -> + ecs.createService({ CreateServiceRequest request -> + request.cluster == 'test-cluster' + request.serviceName == 'myapp-kcats-liated-v008' + request.taskDefinition == 'task-def-arn' + request.loadBalancers.size() == 1 + request.loadBalancers.get(0).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(0).containerName == 'v008' + request.loadBalancers.get(0).containerPort == 1337 + request.serviceRegistries.size() == 1 + request.serviceRegistries.get(0) == new ServiceRegistry( + registryArn: 'srv-registry-arn', + containerPort: 9090, + containerName: 'v008' + ) + request.desiredCount == 1 + request.role == null + request.placementStrategy == [] + request.placementConstraints == [] request.networkConfiguration.awsvpcConfiguration.subnets == ['subnet-12345'] request.networkConfiguration.awsvpcConfiguration.securityGroups == ['sg-12345'] request.networkConfiguration.awsvpcConfiguration.assignPublicIp == 'ENABLED' + request.healthCheckGracePeriodSeconds == null + request.enableECSManagedTags == null + request.propagateTags == null + request.tags == [] + request.capacityProviderStrategy == [capacityProviderStrategy] + request.platformVersion == '1.0.0' + } as CreateServiceRequest) >> new CreateServiceResult().withService(service) + + result.getServerGroupNames().size() == 1 + result.getServerGroupNameByRegion().size() == 1 + result.getServerGroupNames().contains("us-west-1:" + serviceName + "-v008") + result.getServerGroupNameByRegion().containsKey('us-west-1') + result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName + "-v008") + } + + def 'should create a service using multiple subnet types'() { + given: + def serviceRegistry = new CreateServerGroupDescription.ServiceDiscoveryAssociation( + registry: new CreateServerGroupDescription.ServiceRegistry(arn: 'srv-registry-arn'), + containerPort: 9090 + ) + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), + application: applicationName, + stack: stack, + freeFormDetails: detail, + ecsClusterName: 'test-cluster', + iamRole: 'test-role', + containerPort: 1337, + targetGroup: 'target-group-arn', + portProtocol: 'tcp', + computeUnits: 9001, + reservedMemory: 9002, + dockerImageAddress: 'docker-image-url', + capacity: new ServerGroup.Capacity(1, 1, 1), + availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], + placementStrategySequence: [], + launchType: 'FARGATE', + platformVersion: '1.0.0', + networkMode: 'awsvpc', + subnetTypes: ['public-az1', 'public-az2'], + securityGroupNames: ['helloworld'], + associatePublicIpAddress: true, + serviceDiscoveryAssociations: [serviceRegistry] + ) + + def operation = new CreateServerGroupAtomicOperation(description) + + operation.amazonClientProvider = amazonClientProvider + operation.ecsCloudMetricService = Mock(EcsCloudMetricService) + operation.iamPolicyReader = iamPolicyReader + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + operation.subnetSelector = subnetSelector + operation.securityGroupSelector = securityGroupSelector + + subnetSelector.resolveSubnetsIdsForMultipleSubnetTypes(_, _, _, _) >> ['subnet-12345', 'subnet-23456'] + subnetSelector.getSubnetVpcIds(_, _, _) >> ['vpc-123'] + securityGroupSelector.resolveSecurityGroupNames(_, _, _, _) >> ['sg-12345'] + + when: + def result = operation.operate([]) + + then: + ecs.listAccountSettings(_) >> new ListAccountSettingsResult() + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() + + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + + iamClient.getRole(_) >> new GetRoleResult().withRole(role) + iamPolicyReader.getTrustedEntities(_) >> trustRelationships + loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + + ecs.createService({ CreateServiceRequest request -> + request.cluster == 'test-cluster' + request.serviceName == 'myapp-kcats-liated-v008' + request.taskDefinition == 'task-def-arn' + request.loadBalancers.size() == 1 + request.loadBalancers.get(0).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(0).containerName == 'v008' + request.loadBalancers.get(0).containerPort == 1337 + request.serviceRegistries.size() == 1 + request.serviceRegistries.get(0) == new ServiceRegistry( + registryArn: 'srv-registry-arn', + containerPort: 9090, + containerName: 'v008' + ) + request.desiredCount == 1 request.role == null + request.placementStrategy == [] + request.placementConstraints == [] + request.networkConfiguration.awsvpcConfiguration.subnets == ['subnet-12345', 'subnet-23456'] + request.networkConfiguration.awsvpcConfiguration.securityGroups == ['sg-12345'] + request.networkConfiguration.awsvpcConfiguration.assignPublicIp == 'ENABLED' + request.healthCheckGracePeriodSeconds == null + request.enableECSManagedTags == null + request.propagateTags == null + request.tags == [] request.launchType == 'FARGATE' + request.platformVersion == '1.0.0' } as CreateServiceRequest) >> new CreateServiceResult().withService(service) result.getServerGroupNames().size() == 1 result.getServerGroupNameByRegion().size() == 1 - result.getServerGroupNames().contains("us-west-1:" + serviceName) + result.getServerGroupNames().contains("us-west-1:" + serviceName + "-v008") result.getServerGroupNameByRegion().containsKey('us-west-1') - result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName) + result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName + "-v008") + } + + def 'should create services without load balancers'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getTargetGroup() >> null + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeServiceRequest('task-def-arn', + new EcsServerGroupName('mygreatapp-stack1-details2-v011'), + 1, new EcsDefaultNamer(), false) + + then: + request.getLoadBalancers() == [] + request.getRole() == null + } + + def 'should create default Docker labels'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getDockerLabels() >> null + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + def labels = request.getContainerDefinitions().get(0).getDockerLabels() + labels.get(DOCKER_LABEL_KEY_SERVERGROUP) == 'mygreatapp-stack1-details2-v011' + labels.get(CreateServerGroupAtomicOperation.DOCKER_LABEL_KEY_STACK) == 'stack1' + labels.get(CreateServerGroupAtomicOperation.DOCKER_LABEL_KEY_DETAIL) == 'details2' + } + + def 'should create custom Docker labels'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getDockerLabels() >> ['label1': 'value1', 'fruit':'tomato'] + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + def labels = request.getContainerDefinitions().get(0).getDockerLabels() + labels.get('label1') == 'value1' + labels.get('fruit') == 'tomato' + } + + def 'should not allow overwriting Spinnaker Docker labels'() { + given: + def description = Mock(CreateServerGroupDescription) + + def dockerLabels = [:] + dockerLabels.put(DOCKER_LABEL_KEY_SERVERGROUP, 'some-value-we-dont-want-to-see') + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getDockerLabels() >> dockerLabels + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + def labels = request.getContainerDefinitions().get(0).getDockerLabels() + labels.get(DOCKER_LABEL_KEY_SERVERGROUP) == 'mygreatapp-stack1-details2-v011' + labels.get(DOCKER_LABEL_KEY_SERVERGROUP) != 'some-value-we-dont-want-to-see' + } + + def 'should allow selecting the logDriver'() { + given: + def description = Mock(CreateServerGroupDescription) + description.getLogDriver() >> 'some-log-driver' + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + request.getContainerDefinitions().get(0).getLogConfiguration().getLogDriver() == 'some-log-driver' + } + + def 'should allow empty logOptions'() { + given: + def description = Mock(CreateServerGroupDescription) + description.getLogDriver() >> 'some-log-driver' + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + request.getContainerDefinitions().get(0).getLogConfiguration().getOptions() == null + } + + def 'should allow registering logOptions'() { + given: + def description = Mock(CreateServerGroupDescription) + description.getLogDriver() >> 'some-log-driver' + def logOptions = ['key1': '1value', 'key2': 'value2'] + description.getLogOptions() >> logOptions + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + request.getContainerDefinitions().get(0).getLogConfiguration().getOptions() == logOptions + } + + def 'should allow no port mappings'() { + given: + def description = Mock(CreateServerGroupDescription) + description.getContainerPort() >> null + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + request.getContainerDefinitions().get(0).getPortMappings().isEmpty() + } + + def 'should allow using secret credentials for the docker image'() { + given: + def description = Mock(CreateServerGroupDescription) + description.getDockerImageCredentialsSecret() >> 'my-secret' + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + request.getContainerDefinitions().get(0).getRepositoryCredentials().getCredentialsParameter() == 'my-secret' + } + + def 'should allow not specifying secret credentials for the docker image'() { + given: + def description = Mock(CreateServerGroupDescription) + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + request.getContainerDefinitions().get(0).getRepositoryCredentials() == null + } + + def 'should generate a RegisterTaskDefinitionRequest object'() { + given: + def description = Mock(CreateServerGroupDescription) + description.getApplication() >> 'v1' + description.getStack() >> 'kcats' + description.getFreeFormDetails() >> 'liated' + description.getEcsClusterName() >> 'test-cluster' + description.getIamRole() >> 'None (No IAM role)' + description.getContainerPort() >> 1337 + description.getTargetGroup() >> 'target-group-arn' + description.getPortProtocol() >> 'tcp' + description.getComputeUnits() >> 9001 + description.getReservedMemory() >> 9001 + description.getDockerImageAddress() >> 'docker-image-url' + description.capacity = new ServerGroup.Capacity(1, 1, 1) + description.availabilityZones = ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']] + description.placementStrategySequence = [] + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + RegisterTaskDefinitionRequest result = operation.makeTaskDefinitionRequest("test-role", new EcsServerGroupName('v1-kcats-liated-v001')) + + then: + result.getTaskRoleArn() == null + result.getFamily() == "v1-kcats-liated" + + result.getContainerDefinitions().size() == 1 + def containerDefinition = result.getContainerDefinitions().first() + containerDefinition.name == 'v001' + containerDefinition.image == 'docker-image-url' + containerDefinition.cpu == 9001 + containerDefinition.memoryReservation == 9001 + + containerDefinition.portMappings.size() == 1 + def portMapping = containerDefinition.portMappings.first() + portMapping.getHostPort() == 0 + portMapping.getContainerPort() == 1337 + portMapping.getProtocol() == 'tcp' + + containerDefinition.environment.size() == 3 + def environments = [:] + for(elem in containerDefinition.environment){ + environments.put(elem.getName(), elem.getValue()) + } + environments.get("SERVER_GROUP") == "v1-kcats-liated-v001" + environments.get("CLOUD_STACK") == "kcats" + environments.get("CLOUD_DETAIL") == "liated" + } + + def 'should generate a RegisterTaskDefinitionRequest object from artifact'() { + given: + def resolvedArtifact = Artifact.builder() + .name("taskdef.json") + .reference("fake.github.com/repos/org/repo/taskdef.json") + .artifactAccount("my-github-acct") + .type("github/file") + .build() + def containerDef1 = + new ContainerDefinition() + .withName("web") + .withImage("PLACEHOLDER") + .withMemoryReservation(512) + def containerDef2 = + new ContainerDefinition() + .withName("logs") + .withImage("PLACEHOLDER") + .withMemoryReservation(1024) + def registerTaskDefRequest = + new RegisterTaskDefinitionRequest() + .withContainerDefinitions([containerDef1, containerDef2]) + .withExecutionRoleArn("arn:aws:role/myExecutionRole") + def description = Mock(CreateServerGroupDescription) + description.getApplication() >> 'v1' + description.getStack() >> 'ecs' + description.getFreeFormDetails() >> 'test' + description.getEcsClusterName() >> 'test-cluster' + description.getIamRole() >> 'None (No IAM role)' + description.getResolvedTaskDefinitionArtifact() >> resolvedArtifact + description.getContainerToImageMap() >> [ + web: "docker-image-url/one", + logs: "docker-image-url/two" + ] + + def operation = new CreateServerGroupAtomicOperation(description) + operation.artifactDownloader = artifactDownloader + operation.mapper = objectMapper + + artifactDownloader.download(_) >> new ByteArrayInputStream() + objectMapper.readValue(_,_) >> registerTaskDefRequest + + when: + RegisterTaskDefinitionRequest result = + operation.makeTaskDefinitionRequestFromArtifact("test-role", new EcsServerGroupName("v1-ecs-test-v001")) + + then: + result.getTaskRoleArn() == null + result.getFamily() == "v1-ecs-test" + result.getExecutionRoleArn() == "arn:aws:role/myExecutionRole" + + result.getContainerDefinitions().size() == 2 + + def webContainer = result.getContainerDefinitions().find {it.getName() == "web"} + assert webContainer != null + webContainer.image == "docker-image-url/one" + webContainer.memoryReservation == 512 + + def logsContainer = result.getContainerDefinitions().find {it.getName() == "logs"} + assert logsContainer != null + logsContainer.image == "docker-image-url/two" + logsContainer.memoryReservation == 1024 + + result.getContainerDefinitions().forEach({ + it.environment.size() == 3 + + def environments = [:] + for(elem in it.environment){ + environments.put(elem.getName(), elem.getValue()) + } + environments.get("SERVER_GROUP") == "v1-ecs-test-v001" + environments.get("CLOUD_STACK") == "ecs" + environments.get("CLOUD_DETAIL") == "test" + }) + } + + def 'should set spinnaker role on LaunchType FARGATE RegisterTaskDefinitionRequest if none in artifact'() { + given: + def resolvedArtifact = Artifact.builder() + .name("taskdef.json") + .reference("fake.github.com/repos/org/repo/taskdef.json") + .artifactAccount("my-github-acct") + .type("github/file") + .build() + def containerDef = + new ContainerDefinition() + .withName("web") + .withImage("PLACEHOLDER") + .withMemoryReservation(512) + def registerTaskDefRequest = + new RegisterTaskDefinitionRequest().withContainerDefinitions([containerDef]) + def description = Mock(CreateServerGroupDescription) + description.getApplication() >> 'v1' + description.getStack() >> 'ecs' + description.getFreeFormDetails() >> 'test' + description.getEcsClusterName() >> 'test-cluster' + description.getIamRole() >> 'None (No IAM role)' + description.getLaunchType() >> 'FARGATE' + description.getResolvedTaskDefinitionArtifact() >> resolvedArtifact + description.getContainerToImageMap() >> [ + web: "docker-image-url" + ] + + def operation = new CreateServerGroupAtomicOperation(description) + operation.artifactDownloader = artifactDownloader + operation.mapper = objectMapper + + artifactDownloader.download(_) >> new ByteArrayInputStream() + objectMapper.readValue(_,_) >> registerTaskDefRequest + + when: + RegisterTaskDefinitionRequest result = + operation.makeTaskDefinitionRequestFromArtifact("test-role", new EcsServerGroupName('v1-ecs-test-v001')) + + then: + result.getTaskRoleArn() == null + result.getFamily() == "v1-ecs-test" + result.getExecutionRoleArn() == "test-role" + + result.getContainerDefinitions().size() == 1 + def containerDefinition = result.getContainerDefinitions().first() + containerDefinition.name == "web" + containerDefinition.image == "docker-image-url" + containerDefinition.memoryReservation == 512 + } + + def 'should set spinnaker role on CapacityProvider FARGATE RegisterTaskDefinitionRequest if none in artifact'() { + given: + def resolvedArtifact = Artifact.builder() + .name("taskdef.json") + .reference("fake.github.com/repos/org/repo/taskdef.json") + .artifactAccount("my-github-acct") + .type("github/file") + .build() + def containerDef = + new ContainerDefinition() + .withName("web") + .withImage("PLACEHOLDER") + .withMemoryReservation(512) + def capacityProviderStrategy = new CapacityProviderStrategyItem( + capacityProvider: 'FARGATE', + weight: 1 + ) + def registerTaskDefRequest = + new RegisterTaskDefinitionRequest().withContainerDefinitions([containerDef]) + def description = Mock(CreateServerGroupDescription) + description.getApplication() >> 'v1' + description.getStack() >> 'ecs' + description.getFreeFormDetails() >> 'test' + description.getEcsClusterName() >> 'test-cluster' + description.getIamRole() >> 'None (No IAM role)' + description.getCapacityProviderStrategy() >> [capacityProviderStrategy] + description.getResolvedTaskDefinitionArtifact() >> resolvedArtifact + description.getContainerToImageMap() >> [ + web: "docker-image-url" + ] + + def operation = new CreateServerGroupAtomicOperation(description) + operation.artifactDownloader = artifactDownloader + operation.mapper = objectMapper + + artifactDownloader.download(_) >> new ByteArrayInputStream() + objectMapper.readValue(_,_) >> registerTaskDefRequest + + when: + RegisterTaskDefinitionRequest result = + operation.makeTaskDefinitionRequestFromArtifact("test-role", new EcsServerGroupName('v1-ecs-test-v001')) + + then: + result.getTaskRoleArn() == null + result.getFamily() == "v1-ecs-test" + result.getExecutionRoleArn() == "test-role" + + result.getContainerDefinitions().size() == 1 + def containerDefinition = result.getContainerDefinitions().first() + containerDefinition.name == "web" + containerDefinition.image == "docker-image-url" + containerDefinition.memoryReservation == 512 + } + + def 'should fail if network mode in artifact does not match description'() { + given: + def resolvedArtifact = Artifact.builder() + .name("taskdef.json") + .reference("fake.github.com/repos/org/repo/taskdef.json") + .artifactAccount("my-github-acct") + .type("github/file") + .build() + def registerTaskDefRequest = + new RegisterTaskDefinitionRequest() + .withContainerDefinitions([new ContainerDefinition()]) + .withNetworkMode("bridge") + def description = Mock(CreateServerGroupDescription) + description.getApplication() >> 'v1' + description.getStack() >> 'ecs' + description.getFreeFormDetails() >> 'test' + description.getEcsClusterName() >> 'test-cluster' + description.getLaunchType() >> 'FARGATE' + description.getNetworkMode() >> 'awsvpc' + description.getResolvedTaskDefinitionArtifact() >> resolvedArtifact + + def operation = new CreateServerGroupAtomicOperation(description) + operation.artifactDownloader = artifactDownloader + operation.mapper = objectMapper + + artifactDownloader.download(_) >> new ByteArrayInputStream() + objectMapper.readValue(_,_) >> registerTaskDefRequest + + when: + operation.makeTaskDefinitionRequestFromArtifact("test-role", new EcsServerGroupName('v1-ecs-test-v001')) + + then: + IllegalArgumentException exception = thrown() + exception.message == + "Task definition networkMode does not match server group value. Found 'bridge' but expected 'awsvpc'" + } + + def 'should set additional environment variables'() { + given: + def description = Mock(CreateServerGroupDescription) + description.getApplication() >> 'v1' + description.getStack() >> 'kcats' + description.getFreeFormDetails() >> 'liated' + description.getEnvironmentVariables() >> ["ENVIRONMENT_1" : "test1", "ENVIRONMENT_2" : "test2"] + def operation = new CreateServerGroupAtomicOperation(description) + + when: + RegisterTaskDefinitionRequest result = operation.makeTaskDefinitionRequest("test-role", new EcsServerGroupName('v1-kcats-liated-v001')) + + then: + result.getContainerDefinitions().size() == 1 + def containerDefinition = result.getContainerDefinitions().first() + containerDefinition.environment.size() == 5 + def environments = [:] + for(elem in containerDefinition.environment){ + environments.put(elem.getName(), elem.getValue()) + } + environments.get("SERVER_GROUP") == "v1-kcats-liated-v001" + environments.get("CLOUD_STACK") == "kcats" + environments.get("CLOUD_DETAIL") == "liated" + environments.get("ENVIRONMENT_1") == "test1" + environments.get("ENVIRONMENT_2") == "test2" + } + + def 'should use same port for host and container in host mode'() { + given: + def description = Mock(CreateServerGroupDescription) + description.getTargetGroup() >> 'target-group-arn' + description.getContainerPort() >> 10000 + description.getNetworkMode() >> 'host' + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeTaskDefinitionRequest('arn:aws:iam::test:test-role', new EcsServerGroupName('mygreatapp-stack1-details2-v011')) + + then: + def portMapping = request.getContainerDefinitions().get(0).getPortMappings().get(0) + portMapping.getHostPort() == 10000 + portMapping.getContainerPort() == 10000 + portMapping.getProtocol() == 'tcp' + } + + def 'create a service with the same TargetGroupMappings and deprecated target group properties'() { + given: + def targetGroupProperty = new CreateServerGroupDescription.TargetGroupProperties( + containerPort: 1337, + containerName: 'v008', + targetGroup: 'target-group-arn' + ) + + def placementConstraint = new PlacementConstraint(type: 'memberOf', expression: 'attribute:ecs.instance-type =~ t2.*') + + def placementStrategy = new PlacementStrategy(type: 'spread', field: 'attribute:ecs.availability-zone') + + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), + application: applicationName, + stack: stack, + freeFormDetails: detail, + ecsClusterName: 'test-cluster', + iamRole: 'test-role', + containerPort: 1337, + targetGroup: 'target-group-arn', + targetGroupMappings: [targetGroupProperty], + portProtocol: 'tcp', + computeUnits: 9001, + tags: ['label1': 'value1', 'fruit': 'tomato'], + reservedMemory: 9002, + dockerImageAddress: 'docker-image-url', + capacity: new ServerGroup.Capacity(1, 1, 1), + availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], + placementStrategySequence: [placementStrategy], + placementConstraints: [placementConstraint], + source: source + ) + + def operation = new CreateServerGroupAtomicOperation(description) + + operation.amazonClientProvider = amazonClientProvider + operation.ecsCloudMetricService = Mock(EcsCloudMetricService) + operation.iamPolicyReader = iamPolicyReader + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + + when: + def result = operation.operate([]) + + then: + ecs.listAccountSettings(_) >> new ListAccountSettingsResult().withSettings( + new Setting(name: SettingName.TaskLongArnFormat, value: "enabled"), + new Setting(name: SettingName.ServiceLongArnFormat, value: "enabled") + ) + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() + + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + iamClient.getRole(_) >> new GetRoleResult().withRole(role) + iamPolicyReader.getTrustedEntities(_) >> trustRelationships + loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + ecs.createService({ CreateServiceRequest request -> + request.cluster == 'test-cluster' + request.serviceName == 'myapp-kcats-liated-v008' + request.taskDefinition == 'task-def-arn' + request.loadBalancers.size() == 1 + request.loadBalancers.get(0).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(0).containerName == 'v008' + request.loadBalancers.get(0).containerPort == 1337 + request.serviceRegistries == [] + request.desiredCount == 3 + request.role == null + request.placementConstraints.size() == 1 + request.placementConstraints.get(0).type == 'memberOf' + request.placementConstraints.get(0).expression == 'attribute:ecs.instance-type =~ t2.*' + request.placementStrategy.size() == 1 + request.placementStrategy.get(0).type == 'spread' + request.placementStrategy.get(0).field == 'attribute:ecs.availability-zone' + request.networkConfiguration == null + request.healthCheckGracePeriodSeconds == null + request.enableECSManagedTags == true + request.propagateTags == 'SERVICE' + request.tags.size() == 2 + request.tags.get(0).key == 'label1' + request.tags.get(0).value == 'value1' + request.tags.get(1).key == 'fruit' + request.tags.get(1).value == 'tomato' + request.launchType == null + request.platformVersion == null + }) >> new CreateServiceResult().withService(service) + + result.getServerGroupNames().size() == 1 + result.getServerGroupNameByRegion().size() == 1 + result.getServerGroupNames().contains("us-west-1:" + serviceName + "-v008") + result.getServerGroupNameByRegion().containsKey('us-west-1') + result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName + "-v008") + + 1 * autoScalingClient.registerScalableTarget(_) >> { arguments -> + RegisterScalableTargetRequest request = arguments.get(0) + assert request.serviceNamespace == ServiceNamespace.Ecs.toString() + assert request.scalableDimension == ScalableDimension.EcsServiceDesiredCount.toString() + assert request.resourceId == "service/test-cluster/${serviceName}-v008" + assert request.roleARN == null + assert request.minCapacity == 2 + assert request.maxCapacity == 4 + } + + autoScalingClient.describeScalableTargets(_) >> new DescribeScalableTargetsResult() + .withScalableTargets(new ScalableTarget() + .withResourceId("service/test-cluster/${serviceName}-v007") + .withMinCapacity(2) + .withMaxCapacity(4)) + + 1 * operation.ecsCloudMetricService.copyScalingPolicies( + "Test", + "us-west-1", + "${serviceName}-v008", + "service/test-cluster/${serviceName}-v008", + "test", + "us-west-1", + "${serviceName}-v007", + "service/test-cluster/${serviceName}-v007", + "test-cluster" + ) + } + + def 'create a service with different TargetGroupMappings and deprecated target group properties'() { + given: + def targetGroupProperty = new CreateServerGroupDescription.TargetGroupProperties( + containerPort: 80, + containerName: 'v009', + targetGroup: 'target-group-arn' + ) + + def placementConstraint = new PlacementConstraint(type: 'memberOf', expression: 'attribute:ecs.instance-type =~ t2.*') + + def placementStrategy = new PlacementStrategy(type: 'spread', field: 'attribute:ecs.availability-zone') + + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), + application: applicationName, + stack: stack, + freeFormDetails: detail, + ecsClusterName: 'test-cluster', + iamRole: 'test-role', + containerPort: 1337, + targetGroup: 'target-group-arn', + targetGroupMappings: [targetGroupProperty], + portProtocol: 'tcp', + computeUnits: 9001, + tags: ['label1': 'value1', 'fruit': 'tomato'], + reservedMemory: 9002, + dockerImageAddress: 'docker-image-url', + capacity: new ServerGroup.Capacity(1, 1, 1), + availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], + placementStrategySequence: [placementStrategy], + placementConstraints: [placementConstraint], + source: source + ) + + def operation = new CreateServerGroupAtomicOperation(description) + + operation.amazonClientProvider = amazonClientProvider + operation.ecsCloudMetricService = Mock(EcsCloudMetricService) + operation.iamPolicyReader = iamPolicyReader + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + + when: + def result = operation.operate([]) + + then: + ecs.listAccountSettings(_) >> new ListAccountSettingsResult().withSettings( + new Setting(name: SettingName.TaskLongArnFormat, value: "enabled"), + new Setting(name: SettingName.ServiceLongArnFormat, value: "enabled") + ) + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() + + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + iamClient.getRole(_) >> new GetRoleResult().withRole(role) + iamPolicyReader.getTrustedEntities(_) >> trustRelationships + loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + ecs.createService({ CreateServiceRequest request -> + request.cluster == 'test-cluster' + request.serviceName == 'myapp-kcats-liated-v008' + request.taskDefinition == 'task-def-arn' + request.loadBalancers.size() == 2 + request.loadBalancers.get(0).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(0).containerName == 'v008' + request.loadBalancers.get(0).containerPort == 1337 + request.loadBalancers.get(1).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(1).containerName == 'v009' + request.loadBalancers.get(1).containerPort == 80 + request.serviceRegistries == [] + request.desiredCount == 3 + request.role == null + request.placementConstraints.size() == 1 + request.placementConstraints.get(0).type == 'memberOf' + request.placementConstraints.get(0).expression == 'attribute:ecs.instance-type =~ t2.*' + request.placementStrategy.size() == 1 + request.placementStrategy.get(0).type == 'spread' + request.placementStrategy.get(0).field == 'attribute:ecs.availability-zone' + request.networkConfiguration == null + request.healthCheckGracePeriodSeconds == null + request.enableECSManagedTags == true + request.propagateTags == 'SERVICE' + request.tags.size() == 2 + request.tags.get(0).key == 'label1' + request.tags.get(0).value == 'value1' + request.tags.get(1).key == 'fruit' + request.tags.get(1).value == 'tomato' + request.launchType == null + request.platformVersion == null + }) >> new CreateServiceResult().withService(service) + + result.getServerGroupNames().size() == 1 + result.getServerGroupNameByRegion().size() == 1 + result.getServerGroupNames().contains("us-west-1:" + serviceName + "-v008") + result.getServerGroupNameByRegion().containsKey('us-west-1') + result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName + "-v008") + + 1 * autoScalingClient.registerScalableTarget(_) >> { arguments -> + RegisterScalableTargetRequest request = arguments.get(0) + assert request.serviceNamespace == ServiceNamespace.Ecs.toString() + assert request.scalableDimension == ScalableDimension.EcsServiceDesiredCount.toString() + assert request.resourceId == "service/test-cluster/${serviceName}-v008" + assert request.roleARN == null + assert request.minCapacity == 2 + assert request.maxCapacity == 4 + } + + autoScalingClient.describeScalableTargets(_) >> new DescribeScalableTargetsResult() + .withScalableTargets(new ScalableTarget() + .withResourceId("service/test-cluster/${serviceName}-v007") + .withMinCapacity(2) + .withMaxCapacity(4)) + + 1 * operation.ecsCloudMetricService.copyScalingPolicies( + "Test", + "us-west-1", + "${serviceName}-v008", + "service/test-cluster/${serviceName}-v008", + "test", + "us-west-1", + "${serviceName}-v007", + "service/test-cluster/${serviceName}-v007", + "test-cluster" + ) + } + + def 'create a service with TargetGroupMappings'() { + given: + def targetGroupProperty = new CreateServerGroupDescription.TargetGroupProperties( + containerPort: 80, + containerName: 'v009', + targetGroup: 'target-group-arn' + ) + + def placementConstraint = new PlacementConstraint(type: 'memberOf', expression: 'attribute:ecs.instance-type =~ t2.*') + + def placementStrategy = new PlacementStrategy(type: 'spread', field: 'attribute:ecs.availability-zone') + + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), + application: applicationName, + stack: stack, + freeFormDetails: detail, + ecsClusterName: 'test-cluster', + iamRole: 'test-role', + targetGroupMappings: [targetGroupProperty], + portProtocol: 'tcp', + computeUnits: 9001, + tags: ['label1': 'value1', 'fruit': 'tomato'], + reservedMemory: 9002, + dockerImageAddress: 'docker-image-url', + capacity: new ServerGroup.Capacity(1, 1, 1), + availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], + placementStrategySequence: [placementStrategy], + placementConstraints: [placementConstraint], + source: source + ) + + def operation = new CreateServerGroupAtomicOperation(description) + + operation.amazonClientProvider = amazonClientProvider + operation.ecsCloudMetricService = Mock(EcsCloudMetricService) + operation.iamPolicyReader = iamPolicyReader + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + + when: + def result = operation.operate([]) + + then: + ecs.listAccountSettings(_) >> new ListAccountSettingsResult().withSettings( + new Setting(name: SettingName.TaskLongArnFormat, value: "enabled"), + new Setting(name: SettingName.ServiceLongArnFormat, value: "enabled") + ) + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() + + 1 * ecs.registerTaskDefinition(_) >> { arguments -> + RegisterTaskDefinitionRequest request = arguments.get(0) + assert request.getTaskRoleArn() == "arn:aws:iam::test:path/test-role" + new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + } + + iamClient.getRole(_) >> new GetRoleResult().withRole(role.withArn("arn:aws:iam::test:path/test-role")) + iamPolicyReader.getTrustedEntities(_) >> trustRelationships + loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + ecs.createService({ CreateServiceRequest request -> + request.cluster == 'test-cluster' + request.serviceName == 'myapp-kcats-liated-v008' + request.taskDefinition == 'task-def-arn' + request.loadBalancers.size() == 1 + request.loadBalancers.get(0).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(0).containerName == 'v009' + request.loadBalancers.get(0).containerPort == 80 + request.serviceRegistries == [] + request.desiredCount == 3 + request.role == null + request.placementConstraints.size() == 1 + request.placementConstraints.get(0).type == 'memberOf' + request.placementConstraints.get(0).expression == 'attribute:ecs.instance-type =~ t2.*' + request.placementStrategy.size() == 1 + request.placementStrategy.get(0).type == 'spread' + request.placementStrategy.get(0).field == 'attribute:ecs.availability-zone' + request.networkConfiguration == null + request.healthCheckGracePeriodSeconds == null + request.enableECSManagedTags == true + request.propagateTags == 'SERVICE' + request.tags.size() == 2 + request.tags.get(0).key == 'label1' + request.tags.get(0).value == 'value1' + request.tags.get(1).key == 'fruit' + request.tags.get(1).value == 'tomato' + request.launchType == null + request.platformVersion == null + }) >> new CreateServiceResult().withService(service) + + result.getServerGroupNames().size() == 1 + result.getServerGroupNameByRegion().size() == 1 + result.getServerGroupNames().contains("us-west-1:" + serviceName + "-v008") + result.getServerGroupNameByRegion().containsKey('us-west-1') + result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName + "-v008") + + 1 * autoScalingClient.registerScalableTarget(_) >> { arguments -> + RegisterScalableTargetRequest request = arguments.get(0) + assert request.serviceNamespace == ServiceNamespace.Ecs.toString() + assert request.scalableDimension == ScalableDimension.EcsServiceDesiredCount.toString() + assert request.resourceId == "service/test-cluster/${serviceName}-v008" + assert request.roleARN == null + assert request.minCapacity == 2 + assert request.maxCapacity == 4 + } + + autoScalingClient.describeScalableTargets(_) >> new DescribeScalableTargetsResult() + .withScalableTargets(new ScalableTarget() + .withResourceId("service/test-cluster/${serviceName}-v007") + .withMinCapacity(2) + .withMaxCapacity(4)) + + 1 * operation.ecsCloudMetricService.copyScalingPolicies( + "Test", + "us-west-1", + "${serviceName}-v008", + "service/test-cluster/${serviceName}-v008", + "test", + "us-west-1", + "${serviceName}-v007", + "service/test-cluster/${serviceName}-v007", + "test-cluster" + ) + } + + def 'create a service with multiple TargetGroupMappings'() { + given: + def originalTargetGroupProperty = new CreateServerGroupDescription.TargetGroupProperties( + containerPort: 80, + containerName: 'v009', + targetGroup: 'target-group-arn' + ) + + def newTargetGroupProperty = new CreateServerGroupDescription.TargetGroupProperties( + containerPort: 1337, + containerName: 'v008', + targetGroup: 'target-group-arn' + ) + + def placementConstraint = new PlacementConstraint(type: 'memberOf', expression: 'attribute:ecs.instance-type =~ t2.*') + + def placementStrategy = new PlacementStrategy(type: 'spread', field: 'attribute:ecs.availability-zone') + + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), + application: applicationName, + stack: stack, + freeFormDetails: detail, + ecsClusterName: 'test-cluster', + iamRole: 'test-role', + targetGroupMappings: [originalTargetGroupProperty, newTargetGroupProperty], + portProtocol: 'tcp', + computeUnits: 9001, + tags: ['label1': 'value1', 'fruit': 'tomato'], + reservedMemory: 9002, + dockerImageAddress: 'docker-image-url', + capacity: new ServerGroup.Capacity(1, 1, 1), + availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], + placementStrategySequence: [placementStrategy], + placementConstraints: [placementConstraint], + source: source + ) + + def operation = new CreateServerGroupAtomicOperation(description) + + operation.amazonClientProvider = amazonClientProvider + operation.ecsCloudMetricService = Mock(EcsCloudMetricService) + operation.iamPolicyReader = iamPolicyReader + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + + when: + def result = operation.operate([]) + + then: + ecs.listAccountSettings(_) >> new ListAccountSettingsResult().withSettings( + new Setting(name: SettingName.TaskLongArnFormat, value: "enabled"), + new Setting(name: SettingName.ServiceLongArnFormat, value: "enabled") + ) + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() + + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + iamClient.getRole(_) >> new GetRoleResult().withRole(role) + iamPolicyReader.getTrustedEntities(_) >> trustRelationships + loadBalancingV2.describeTargetGroups(_) >> new DescribeTargetGroupsResult().withTargetGroups(targetGroup) + ecs.createService({ CreateServiceRequest request -> + request.cluster == 'test-cluster' + request.serviceName == 'myapp-kcats-liated-v008' + request.taskDefinition == 'task-def-arn' + request.loadBalancers.size() == 2 + request.loadBalancers.get(0).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(0).containerName == 'v008' + request.loadBalancers.get(0).containerPort == 1337 + request.loadBalancers.get(1).targetGroupArn == 'target-group-arn' + request.loadBalancers.get(1).containerName == 'v009' + request.loadBalancers.get(1).containerPort == 80 + request.serviceRegistries == [] + request.desiredCount == 3 + request.role == null + request.placementConstraints.size() == 1 + request.placementConstraints.get(0).type == 'memberOf' + request.placementConstraints.get(0).expression == 'attribute:ecs.instance-type =~ t2.*' + request.placementStrategy.size() == 1 + request.placementStrategy.get(0).type == 'spread' + request.placementStrategy.get(0).field == 'attribute:ecs.availability-zone' + request.networkConfiguration == null + request.healthCheckGracePeriodSeconds == null + request.enableECSManagedTags == true + request.propagateTags == 'SERVICE' + request.tags.size() == 2 + request.tags.get(0).key == 'label1' + request.tags.get(0).value == 'value1' + request.tags.get(1).key == 'fruit' + request.tags.get(1).value == 'tomato' + request.launchType == null + request.platformVersion == null + }) >> new CreateServiceResult().withService(service) + + result.getServerGroupNames().size() == 1 + result.getServerGroupNameByRegion().size() == 1 + result.getServerGroupNames().contains("us-west-1:" + serviceName + "-v008") + result.getServerGroupNameByRegion().containsKey('us-west-1') + result.getServerGroupNameByRegion().get('us-west-1').contains(serviceName + "-v008") + + 1 * autoScalingClient.registerScalableTarget(_) >> { arguments -> + RegisterScalableTargetRequest request = arguments.get(0) + assert request.serviceNamespace == ServiceNamespace.Ecs.toString() + assert request.scalableDimension == ScalableDimension.EcsServiceDesiredCount.toString() + assert request.resourceId == "service/test-cluster/${serviceName}-v008" + assert request.roleARN == null + assert request.minCapacity == 2 + assert request.maxCapacity == 4 + } + + autoScalingClient.describeScalableTargets(_) >> new DescribeScalableTargetsResult() + .withScalableTargets(new ScalableTarget() + .withResourceId("service/test-cluster/${serviceName}-v007") + .withMinCapacity(2) + .withMaxCapacity(4)) + + 1 * operation.ecsCloudMetricService.copyScalingPolicies( + "Test", + "us-west-1", + "${serviceName}-v008", + "service/test-cluster/${serviceName}-v008", + "test", + "us-west-1", + "${serviceName}-v007", + "service/test-cluster/${serviceName}-v007", + "test-cluster" + ) + } + + def 'should create no tags by default'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getTags() >> null + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeServiceRequest("taskDefArn", new EcsServerGroupName('mygreatapp-stack1-details2-v011'), 1, new EcsDefaultNamer(), false) + + then: + assert request.enableECSManagedTags == null + assert request.propagateTags == null + def tags = request.getTags() + assert tags.isEmpty() + } + + def 'should create moniker tags if enabled and no other tags'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getTags() >> null + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeServiceRequest("taskDefArn", new EcsServerGroupName('mygreatapp-stack1-details2-v011'), 1, new EcsTagNamer(), true) + + then: + assert request.enableECSManagedTags == true + assert request.propagateTags == PropagateTags.SERVICE.toString() + def tags = request.getTags() + assert tags.size() == 5 + tags.contains(new Tag(key: EcsTagNamer.APPLICATION, value: 'mygreatapp')) + tags.contains(new Tag(key: EcsTagNamer.CLUSTER, value: 'mygreatapp-stack1-details2')) + tags.contains(new Tag(key: EcsTagNamer.STACK, value: 'stack1')) + tags.contains(new Tag(key: EcsTagNamer.DETAIL, value: 'details2')) + tags.contains(new Tag(key: EcsTagNamer.SEQUENCE, value: '11')) + } + + def 'should create custom tags if moniker not enabled'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getTags() >> ['label1': 'value1', 'fruit':'tomato'] + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeServiceRequest("taskDefArn", new EcsServerGroupName('mygreatapp-stack1-details2-v011'), 1, new EcsDefaultNamer(), true) + + then: + assert request.enableECSManagedTags == true + assert request.propagateTags == 'SERVICE' + def tags = request.getTags() + assert tags.size() == 2 + tags.contains(new Tag(key: 'label1', value: 'value1')) + tags.contains(new Tag(key: 'fruit', value: 'tomato')) + } + + def 'should create custom tags and moniker tags if moniker enabled'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getTags() >> ['label1': 'value1', 'fruit':'tomato'] + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeServiceRequest("taskDefArn", new EcsServerGroupName('mygreatapp-stack1-details2-v011'), 1, new EcsTagNamer(), true) + + then: + assert request.enableECSManagedTags == true + assert request.propagateTags == 'SERVICE' + def tags = request.getTags() + assert tags.size() == 7 + tags.contains(new Tag(key: 'label1', value: 'value1')) + tags.contains(new Tag(key: 'fruit', value: 'tomato')) + tags.contains(new Tag(key: EcsTagNamer.APPLICATION, value: 'mygreatapp')) + tags.contains(new Tag(key: EcsTagNamer.CLUSTER, value: 'mygreatapp-stack1-details2')) + tags.contains(new Tag(key: EcsTagNamer.STACK, value: 'stack1')) + tags.contains(new Tag(key: EcsTagNamer.DETAIL, value: 'details2')) + tags.contains(new Tag(key: EcsTagNamer.SEQUENCE, value: '11')) + } + + def 'should fail to create service with custom tags and moniker tags if tags disabled'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getTags() >> ['label1': 'value1', 'fruit':'tomato'] + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + operation.makeServiceRequest("taskDefArn", new EcsServerGroupName('mygreatapp-stack1-details2-v011'), 1, new EcsTagNamer(), false) + + then: + IllegalArgumentException ex = thrown() + ex.message == "ECS account settings for account null do not allow tagging as `serviceLongArnFormat` and `taskLongArnFormat` are not enabled." + } + + def 'should fail to create service with custom tags and no moniker tags if tags disabled'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getStack() >> 'stack1' + description.getFreeFormDetails() >> 'details2' + description.getTags() >> ['label1': 'value1', 'fruit':'tomato'] + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + operation.makeServiceRequest("taskDefArn", new EcsServerGroupName('mygreatapp-stack1-details2-v011'), 1, new EcsDefaultNamer(), false) + + then: + IllegalArgumentException ex = thrown() + ex.message == "ECS account settings for account null do not allow tagging as `serviceLongArnFormat` and `taskLongArnFormat` are not enabled." + } + + def 'should not create tags with duplicate keys'() { + given: + def description = Mock(CreateServerGroupDescription) + + description.getApplication() >> 'mygreatapp' + description.getTags() >> ['label1': 'value2', 'label1': 'value1'] + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + def request = operation.makeServiceRequest("taskDefArn", new EcsServerGroupName('mygreatapp-v011'), 1, new EcsDefaultNamer(), true) + + then: + assert request.enableECSManagedTags == true + assert request.propagateTags == 'SERVICE' + def tags = request.getTags() + assert tags.size() == 1 + tags.contains(new Tag(key: 'label1', value: 'value1')) + } + + def 'should fail to create service with tags if task ARN format is not updated '() { + given: + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), + application: applicationName, + stack: stack, + freeFormDetails: detail, + ecsClusterName: 'test-cluster', + tags: ['label1': 'value1', 'fruit': 'tomato'], + dockerImageAddress: 'docker-image-url', + capacity: new ServerGroup.Capacity(1, 1, 1), + availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], + ) + + def operation = new CreateServerGroupAtomicOperation(description) + + operation.amazonClientProvider = amazonClientProvider + operation.ecsCloudMetricService = Mock(EcsCloudMetricService) + operation.iamPolicyReader = iamPolicyReader + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + + when: + def result = operation.operate([]) + + then: + ecs.listAccountSettings(_) >> new ListAccountSettingsResult().withSettings( + new Setting(name: SettingName.ServiceLongArnFormat, value: "enabled") + ) + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + + IllegalArgumentException ex = thrown() + ex.message == "ECS account settings for account Test do not allow tagging as `serviceLongArnFormat` and `taskLongArnFormat` are not enabled." + } + + def 'should fail to create service with tags if service ARN format is not updated '() { + given: + def description = new CreateServerGroupDescription( + credentials: TestCredential.named('Test', [:]), + application: applicationName, + stack: stack, + freeFormDetails: detail, + ecsClusterName: 'test-cluster', + tags: ['label1': 'value1', 'fruit': 'tomato'], + dockerImageAddress: 'docker-image-url', + capacity: new ServerGroup.Capacity(1, 1, 1), + availabilityZones: ['us-west-1': ['us-west-1a', 'us-west-1b', 'us-west-1c']], + ) + + def operation = new CreateServerGroupAtomicOperation(description) + + operation.amazonClientProvider = amazonClientProvider + operation.ecsCloudMetricService = Mock(EcsCloudMetricService) + operation.iamPolicyReader = iamPolicyReader + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + + when: + def result = operation.operate([]) + + then: + ecs.listAccountSettings(_) >> new ListAccountSettingsResult().withSettings( + new Setting(name: SettingName.TaskLongArnFormat, value: "enabled") + ) + ecs.listServices(_) >> new ListServicesResult().withServiceArns("${serviceName}-v007") + ecs.describeServices({DescribeServicesRequest request -> + request.cluster == 'test-cluster' + request.services == ["${serviceName}-v007"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "${serviceName}-v007", createdAt: new Date(), desiredCount: 3)) + ecs.describeServices(_) >> new DescribeServicesResult() + ecs.registerTaskDefinition(_) >> new RegisterTaskDefinitionResult().withTaskDefinition(taskDefinition) + + IllegalArgumentException ex = thrown() + ex.message == "ECS account settings for account Test do not allow tagging as `serviceLongArnFormat` and `taskLongArnFormat` are not enabled." + } + + def 'should return valid task execution role arn for non-china and non-gov-cloud partition'() { + given: + def credentials = Mock(NetflixAssumeRoleAmazonCredentials) { + getName() >> { "test" } + getRegions() >> { [new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1a', 'us-west-1b'])] } + getAssumeRole() >> { 'role/test-role' } + getAccountId() >> { 'test' } + } + def resolvedArtifact = createResolvedArtifact() + def containerDef = createContainerDef() + def registerTaskDefRequest = createRegisterTaskDefRequest(containerDef) + def description = createDescription(resolvedArtifact); + + def operation = new CreateServerGroupAtomicOperation(description) + operation.artifactDownloader = artifactDownloader + operation.mapper = objectMapper + + artifactDownloader.download(_) >> new ByteArrayInputStream() + objectMapper.readValue(_,_) >> registerTaskDefRequest + + when: + String ecsServiceRole = operation.inferAssumedRoleArn(credentials); + RegisterTaskDefinitionRequest result = + operation.makeTaskDefinitionRequestFromArtifact(ecsServiceRole, new EcsServerGroupName('v1-ecs-test-v001')) + + then: + result.getTaskRoleArn() == null + result.getFamily() == "v1-ecs-test" + result.getExecutionRoleArn() == "arn:aws:iam::test:role/test-role" + } + + def 'should return valid task execution role arn for china partition'() { + given: + def credentials = Mock(NetflixAssumeRoleAmazonCredentials) { + getName() >> { "test" } + getRegions() >> { [new AmazonCredentials.AWSRegion('cn-north-1', ['cn-north-1a', 'cn-north-1b'])] } + getAssumeRole() >> { 'arn:aws-cn:iam:123123123123:role/test-role' } + getAccountId() >> { 'test' } + } + def resolvedArtifact = createResolvedArtifact() + def containerDef = createContainerDef() + def registerTaskDefRequest = createRegisterTaskDefRequest(containerDef) + def description = createDescription(resolvedArtifact); + + def operation = new CreateServerGroupAtomicOperation(description) + operation.artifactDownloader = artifactDownloader + operation.mapper = objectMapper + + artifactDownloader.download(_) >> new ByteArrayInputStream() + objectMapper.readValue(_,_) >> registerTaskDefRequest + + when: + String ecsServiceRole = operation.inferAssumedRoleArn(credentials); + RegisterTaskDefinitionRequest result = + operation.makeTaskDefinitionRequestFromArtifact(ecsServiceRole, new EcsServerGroupName('v1-ecs-test-v001')) + + then: + result.getTaskRoleArn() == null + result.getFamily() == "v1-ecs-test" + result.getExecutionRoleArn() == "arn:aws-cn:iam:123123123123:role/test-role" + } + + def 'should return valid task execution role arn for gov-cloud partition'() { + given: + def credentials = Mock(NetflixAssumeRoleAmazonCredentials) { + getName() >> { "test" } + getRegions() >> { [new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1a', 'us-west-1b'])] } + getAssumeRole() >> { 'arn:aws-us-gov:iam:123123123123:role/test-role' } + getAccountId() >> { 'test' } + } + def resolvedArtifact = createResolvedArtifact() + def containerDef = createContainerDef() + def registerTaskDefRequest = createRegisterTaskDefRequest(containerDef) + def description = createDescription(resolvedArtifact); + + def operation = new CreateServerGroupAtomicOperation(description) + operation.artifactDownloader = artifactDownloader + operation.mapper = objectMapper + + artifactDownloader.download(_) >> new ByteArrayInputStream() + objectMapper.readValue(_,_) >> registerTaskDefRequest + + when: + String ecsServiceRole = operation.inferAssumedRoleArn(credentials); + RegisterTaskDefinitionRequest result = + operation.makeTaskDefinitionRequestFromArtifact(ecsServiceRole, new EcsServerGroupName('v1-ecs-test-v001')) + + then: + result.getTaskRoleArn() == null + result.getFamily() == "v1-ecs-test" + result.getExecutionRoleArn() == "arn:aws-us-gov:iam:123123123123:role/test-role" + } + + def 'should return valid task def from spelProcessedArtifact'() { + given: + def credentials = Mock(NetflixAssumeRoleAmazonCredentials) { + getName() >> { "test" } + getRegions() >> { [new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1a', 'us-west-1b'])] } + getAssumeRole() >> { 'arn:aws-us-gov:iam:123123123123:role/test-role' } + getAccountId() >> { 'test' } + } + def containerDef = createContainerDef() + def registerTaskDefRequest = createRegisterTaskDefRequest(containerDef) + def description = createDescription(null); + description.isEvaluateTaskDefinitionArtifactExpressions() >> true + Map spelArtifactMap = new HashMap<>() + spelArtifactMap.put("family", "PLACEHOLDER") + description.getSpelProcessedTaskDefinitionArtifact() >> spelArtifactMap + + def operation = new CreateServerGroupAtomicOperation(description) + operation.mapper = objectMapper + + objectMapper.convertValue(_,_) >> registerTaskDefRequest + + when: + String ecsServiceRole = operation.inferAssumedRoleArn(credentials); + RegisterTaskDefinitionRequest result = + operation.makeTaskDefinitionRequestFromArtifact(ecsServiceRole, new EcsServerGroupName('v1-ecs-test-v001')) + + then: + result.getTaskRoleArn() == null + result.getFamily() == "v1-ecs-test" + result.getExecutionRoleArn() == "arn:aws-us-gov:iam:123123123123:role/test-role" + } + + def 'should fail for invalid task def from spelProcessedArtifact'() { + given: + def credentials = Mock(NetflixAssumeRoleAmazonCredentials) { + getName() >> { "test" } + getRegions() >> { [new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1a', 'us-west-1b'])] } + getAssumeRole() >> { 'arn:aws-us-gov:iam:123123123123:role/test-role' } + getAccountId() >> { 'test' } + } + + def description = createDescription(null); + description.isEvaluateTaskDefinitionArtifactExpressions() >> true + description.getSpelProcessedTaskDefinitionArtifact() >> null + + def operation = new CreateServerGroupAtomicOperation(description) + + when: + String ecsServiceRole = operation.inferAssumedRoleArn(credentials); + RegisterTaskDefinitionRequest result = + operation.makeTaskDefinitionRequestFromArtifact(ecsServiceRole, new EcsServerGroupName('v1-ecs-test-v001')) + + then: + IllegalArgumentException exception = thrown() + exception.message == + "Task definition artifact can not be null" + } + + def createResolvedArtifact (){ + return Artifact.builder() + .name("taskdef.json") + .reference("fake.github.com/repos/org/repo/taskdef.json") + .artifactAccount("my-github-acct") + .type("github/file") + .build() + } + + def createContainerDef(){ + return new ContainerDefinition() + .withName("web") + .withImage("PLACEHOLDER") + .withMemoryReservation(512) + } + + def createRegisterTaskDefRequest(ContainerDefinition containerDef){ + return new RegisterTaskDefinitionRequest().withContainerDefinitions([containerDef]) + } + + def createDescription(Artifact resolvedArtifact){ + def description = Mock(CreateServerGroupDescription) + description.getApplication() >> 'v1' + description.getStack() >> 'ecs' + description.getFreeFormDetails() >> 'test' + description.getEcsClusterName() >> 'test-cluster' + description.getIamRole() >> 'None (No IAM role)' + description.getLaunchType() >> 'FARGATE' + description.getResolvedTaskDefinitionArtifact() >> resolvedArtifact + description.getContainerToImageMap() >> [ + web: "docker-image-url" + ] + return description } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DestroyServiceAtomicOperationSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DestroyServiceAtomicOperationSpec.groovy index c50bdd74824..0bea677b743 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DestroyServiceAtomicOperationSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DestroyServiceAtomicOperationSpec.groovy @@ -33,12 +33,12 @@ class DestroyServiceAtomicOperationSpec extends CommonAtomicOperation { operation.amazonClientProvider = amazonClientProvider operation.ecsCloudMetricService = Mock(EcsCloudMetricService) - operation.accountCredentialsProvider = accountCredentialsProvider + operation.credentialsRepository = credentialsRepository operation.containerInformationService = containerInformationService amazonClientProvider.getAmazonEcs(_, _, _) >> ecs containerInformationService.getClusterName(_, _, _) >> 'cluster-name' - accountCredentialsProvider.getCredentials(_) >> TestCredential.named("test") + credentialsRepository.getOne(_) >> TestCredential.named("test") when: operation.operate([]) diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DisableServiceAtomicOperationSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DisableServiceAtomicOperationSpec.groovy index ce19a52ce75..7f55ad99902 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DisableServiceAtomicOperationSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/DisableServiceAtomicOperationSpec.groovy @@ -16,6 +16,10 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.ops +import com.amazonaws.services.applicationautoscaling.model.DescribeScalableTargetsResult +import com.amazonaws.services.applicationautoscaling.model.ScalableDimension +import com.amazonaws.services.applicationautoscaling.model.ScalableTarget +import com.amazonaws.services.applicationautoscaling.model.SuspendedState import com.netflix.spinnaker.clouddriver.ecs.TestCredential import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ModifyServiceDescription @@ -28,17 +32,57 @@ class DisableServiceAtomicOperationSpec extends CommonAtomicOperation { )) operation.amazonClientProvider = amazonClientProvider - operation.accountCredentialsProvider = accountCredentialsProvider + operation.credentialsRepository = credentialsRepository operation.containerInformationService = containerInformationService amazonClientProvider.getAmazonEcs(_, _, _) >> ecs + amazonClientProvider.getAmazonApplicationAutoScaling(_, _, _) >> autoscaling containerInformationService.getClusterName(_, _, _) >> 'cluster-name' - accountCredentialsProvider.getCredentials(_) >> TestCredential.named("test") + credentialsRepository.getOne(_) >> TestCredential.named("test") when: operation.operate([]) then: + 1 * autoscaling.describeScalableTargets(_) >> new DescribeScalableTargetsResult() + .withScalableTargets(new ScalableTarget() + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) + .withSuspendedState(new SuspendedState() + .withScheduledScalingSuspended(false) + .withDynamicScalingInSuspended(false) + .withDynamicScalingOutSuspended(false))) + 1 * autoscaling.registerScalableTarget(_) + 1 * ecs.updateService(_) + } + + void 'should not suspend autoscaling if it is already suspended'() { + given: + def operation = new DisableServiceAtomicOperation(new ModifyServiceDescription( + serverGroupName: "test-server-group", + credentials: TestCredential.named('Test', [:]) + )) + + operation.amazonClientProvider = amazonClientProvider + operation.credentialsRepository = credentialsRepository + operation.containerInformationService = containerInformationService + + amazonClientProvider.getAmazonEcs(_, _, _) >> ecs + amazonClientProvider.getAmazonApplicationAutoScaling(_, _, _) >> autoscaling + containerInformationService.getClusterName(_, _, _) >> 'cluster-name' + credentialsRepository.getOne(_) >> TestCredential.named("test") + + when: + operation.operate([]) + + then: + 1 * autoscaling.describeScalableTargets(_) >> new DescribeScalableTargetsResult() + .withScalableTargets(new ScalableTarget() + .withScalableDimension(ScalableDimension.EcsServiceDesiredCount) + .withSuspendedState(new SuspendedState() + .withScheduledScalingSuspended(true) + .withDynamicScalingInSuspended(true) + .withDynamicScalingOutSuspended(true))) + 0 * autoscaling.registerScalableTarget(_) 1 * ecs.updateService(_) } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/EnableServiceAtomicOperationSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/EnableServiceAtomicOperationSpec.groovy index d83b28c19b9..39b7267ae82 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/EnableServiceAtomicOperationSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/EnableServiceAtomicOperationSpec.groovy @@ -22,8 +22,6 @@ import com.netflix.spinnaker.clouddriver.ecs.TestCredential import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ModifyServiceDescription class EnableServiceAtomicOperationSpec extends CommonAtomicOperation { - def autoscaling = Mock(AWSApplicationAutoScaling) - void 'should execute the operation'() { given: def operation = new EnableServiceAtomicOperation(new ModifyServiceDescription( @@ -32,14 +30,14 @@ class EnableServiceAtomicOperationSpec extends CommonAtomicOperation { )) operation.amazonClientProvider = amazonClientProvider - operation.accountCredentialsProvider = accountCredentialsProvider + operation.credentialsRepository = credentialsRepository operation.containerInformationService = containerInformationService amazonClientProvider.getAmazonEcs(_, _, _) >> ecs amazonClientProvider.getAmazonApplicationAutoScaling(_, _, _) >> autoscaling containerInformationService.getClusterName(_, _, _) >> 'cluster-name' - accountCredentialsProvider.getCredentials(_) >> TestCredential.named("test") + credentialsRepository.getOne(_) >> TestCredential.named("test") when: operation.operate([]) @@ -47,5 +45,6 @@ class EnableServiceAtomicOperationSpec extends CommonAtomicOperation { then: 1 * autoscaling.describeScalableTargets(_) >> new DescribeScalableTargetsResult().withScalableTargets([]) 1 * ecs.updateService(_) + 1 * autoscaling.registerScalableTarget(_) } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperationSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperationSpec.groovy index a93be8980cc..9c8f4fae1c8 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperationSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/ResizeServiceAtomicOperationSpec.groovy @@ -37,13 +37,13 @@ class ResizeServiceAtomicOperationSpec extends CommonAtomicOperation { )) operation.amazonClientProvider = amazonClientProvider - operation.accountCredentialsProvider = accountCredentialsProvider + operation.credentialsRepository = credentialsRepository operation.containerInformationService = containerInformationService amazonClientProvider.getAmazonEcs(_, _, _) >> ecs amazonClientProvider.getAmazonApplicationAutoScaling(_, _, _) >> autoscaling containerInformationService.getClusterArn(_, _, _) >> 'cluster-arn' - accountCredentialsProvider.getCredentials(_) >> credentials + credentialsRepository.getOne(_) >> credentials when: operation.operate([]) diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/TerminateInstanceAtomicOperationSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/TerminateInstanceAtomicOperationSpec.groovy index 2e1c3e09cc0..6c54027e03c 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/TerminateInstanceAtomicOperationSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/ops/TerminateInstanceAtomicOperationSpec.groovy @@ -28,12 +28,12 @@ class TerminateInstanceAtomicOperationSpec extends CommonAtomicOperation { )) operation.amazonClientProvider = amazonClientProvider - operation.accountCredentialsProvider = accountCredentialsProvider + operation.credentialsRepository = credentialsRepository operation.containerInformationService = containerInformationService amazonClientProvider.getAmazonEcs(_, _, _) >> ecs containerInformationService.getClusterArn(_, _, _) >> 'cluster-arn' - accountCredentialsProvider.getCredentials(_) >> TestCredential.named("test") + credentialsRepository.getOne(_) >> TestCredential.named("test") when: operation.operate([]) diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/AbstractValidatorSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/AbstractValidatorSpec.groovy index f8910148694..5ee43c6281e 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/AbstractValidatorSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/AbstractValidatorSpec.groovy @@ -19,9 +19,9 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.ecs.TestCredential import com.netflix.spinnaker.clouddriver.ecs.deploy.description.AbstractECSDescription -import org.springframework.validation.Errors import spock.lang.Specification import spock.lang.Subject @@ -58,7 +58,7 @@ abstract class AbstractValidatorSpec extends Specification { def description = getDescription() description.credentials = TestCredential.named('test') description.region = 'wrong-region-test' - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: if(testRegion) { @@ -75,7 +75,7 @@ abstract class AbstractValidatorSpec extends Specification { given: def description = getNulledDescription() def descriptionName = getDescriptionName() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def nullProperties = notNullableProperties() when: @@ -91,7 +91,7 @@ abstract class AbstractValidatorSpec extends Specification { given: def description = getInvalidDescription() def descriptionName = getDescriptionName() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def invalidFields = invalidProperties() @@ -107,7 +107,7 @@ abstract class AbstractValidatorSpec extends Specification { void 'should pass validation'() { given: def description = getDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/EcsCreateServergroupDescriptionValidatorSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/EcsCreateServergroupDescriptionValidatorSpec.groovy index f6684373e1e..c09b458c433 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/EcsCreateServergroupDescriptionValidatorSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/EcsCreateServergroupDescriptionValidatorSpec.groovy @@ -16,14 +16,16 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators +import com.amazonaws.services.ecs.model.CapacityProviderStrategyItem import com.amazonaws.services.ecs.model.PlacementStrategy import com.amazonaws.services.ecs.model.PlacementStrategyType import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.ecs.TestCredential import com.netflix.spinnaker.clouddriver.ecs.deploy.description.AbstractECSDescription import com.netflix.spinnaker.clouddriver.ecs.deploy.description.CreateServerGroupDescription import com.netflix.spinnaker.clouddriver.model.ServerGroup -import org.springframework.validation.Errors +import com.netflix.spinnaker.moniker.Moniker class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec { @@ -31,7 +33,7 @@ class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec given: def description = (CreateServerGroupDescription) getDescription() description.capacity = null - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -44,7 +46,7 @@ class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec given: def description = (CreateServerGroupDescription) getDescription() description.capacity.setDesired(9001) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -57,7 +59,7 @@ class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec given: def description = (CreateServerGroupDescription) getDescription() description.capacity.setDesired(0) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -66,11 +68,11 @@ class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec 1 * errors.rejectValue('capacity.desired', "${getDescriptionName()}.capacity.desired.less.than.min") } - void 'should fail when more than one availability zones is present'() { + void 'should fail when more than one region is present'() { given: def description = (CreateServerGroupDescription) getDescription() description.availabilityZones = ['us-west-1': ['us-west-1a'], 'us-west-2': ['us-west-2a']] - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -79,14 +81,318 @@ class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec 1 * errors.rejectValue('availabilityZones', "${getDescriptionName()}.availabilityZones.must.have.only.one") } + void 'should fail when no availability zones are present'() { + given: + def description = (CreateServerGroupDescription) getDescription() + description.availabilityZones = ['us-west-1': []] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('availabilityZones.zones', "${getDescriptionName()}.availabilityZones.zones.not.nullable") + } + + void 'should fail when environment variables contain reserved key'() { + given: + def description = (CreateServerGroupDescription) getDescription() + description.environmentVariables = ['SERVER_GROUP':'invalid', 'tag_1':'valid_tag'] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('environmentVariables', "${getDescriptionName()}.environmentVariables.invalid") + } + + void 'should pass with correct environment variables'() { + given: + def description = getDescription() + description.environmentVariables = ['TAG_1':'valid_tag_1', 'TAG_2':'valid_tag_2'] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors.rejectValue(_, _) + } + + void 'should pass without load balancer'() { + given: + def description = getDescription() + description.containerPort = null + description.targetGroup = null + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors.rejectValue(_, _) + } + + void '(with artifact) should fail when load balancer specified but loadBalanced container missing'() { + given: + def description = getDescription() + description.useTaskDefinitionArtifact = true + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('loadBalancedContainer', "${getDescriptionName()}.loadBalancedContainer.not.nullable") + } + + void '(with artifact) should fail when load balanced container is specified but load balancer is missing'() { + given: + def description = getDescription() + description.targetGroup = null + description.loadBalancedContainer = 'load-balanced-container' + description.useTaskDefinitionArtifact = true + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('targetGroup', "${getDescriptionName()}.targetGroup.not.nullable") + } + + void 'target group mappings should fail when load balancer specified but container name is missing'() { + given: + def targetGroupMappings = new CreateServerGroupDescription.TargetGroupProperties( + containerName: null, + containerPort: 1337, + targetGroup: 'target-group-arn' + ) + def description = getDescription() + description.targetGroup = null + description.containerPort = null + description.dockerImageAddress = null + description.useTaskDefinitionArtifact = true + description.targetGroupMappings = [targetGroupMappings] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('targetGroupMappings.containerName', "${getDescriptionName()}.targetGroupMappings.containerName.not.nullable") + } + + void 'should fail when launch type and capacity provider strategy are both defined'() { + given: + def capacityProviderStrategy = new CapacityProviderStrategyItem( + capacityProvider: 'FARGATE', + weight: 1 + ) + def description = getDescription() + description.capacityProviderStrategy = [capacityProviderStrategy] + description.launchType = 'FARGATE' + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('launchType', 'createServerGroupDescription.launchType.invalid', 'LaunchType cannot be specified when CapacityProviderStrategy are specified.') + } + + void 'should fail when subnet type and subnet types are both defined'() { + given: + def description = getDescription() + description.subnetType = 'public' + description.subnetTypes = ['public', 'private'] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('subnetTypes', 'createServerGroupDescription.subnetTypes.invalid', 'SubnetType (string) cannot be specified when SubnetTypes (list) is specified. Please use SubnetTypes (list)') + } + + void 'should fail when neither launch type or capacity provider strategy are defined'() { + given: + def description = getDescription() + description.capacityProviderStrategy = null + description.launchType = null + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('launchType', 'createServerGroupDescription.launchType.invalid', 'LaunchType or CapacityProviderStrategy must be specified.') + } + + void 'target group mappings should fail when container name is specified but load balancer is missing'() { + given: + def targetGroupMappings = new CreateServerGroupDescription.TargetGroupProperties( + containerName: 'test-container', + containerPort: 1337, + targetGroup: null + ) + def description = getDescription() + description.targetGroup = null + description.containerPort = null + description.dockerImageAddress = null + description.useTaskDefinitionArtifact = true + description.targetGroupMappings = [targetGroupMappings] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('targetGroupMappings.targetGroup', "${getDescriptionName()}.targetGroupMappings.targetGroup.not.nullable") + } + + void 'target group mappings should fail when container port is invalid'() { + given: + def targetGroupMappings = new CreateServerGroupDescription.TargetGroupProperties( + containerName: null, + containerPort: -1, + targetGroup: 'target-group-arn' + ) + def description = getDescription() + description.targetGroup = null + description.containerPort = null + description.targetGroupMappings = [targetGroupMappings] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('targetGroupMappings.containerPort', "${getDescriptionName()}.targetGroupMappings.containerPort.invalid") + } + + void 'target group mappings should fail when container port is missing'() { + given: + def targetGroupMappings = new CreateServerGroupDescription.TargetGroupProperties( + containerName: null, + containerPort: null, + targetGroup: 'target-group-arn' + ) + def description = getDescription() + description.targetGroup = null + description.containerPort = null + description.targetGroupMappings = [targetGroupMappings] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('targetGroupMappings.containerPort', "${getDescriptionName()}.targetGroupMappings.containerPort.not.nullable") + } + + void 'target group mappings should pass without load balancer if using container inputs'() { + given: + def targetGroupMappings = new CreateServerGroupDescription.TargetGroupProperties( + containerName: null, + containerPort: 1337, + targetGroup: 'target-group-arn' + ) + def description = getDescription() + description.targetGroup = null + description.containerPort = null + description.targetGroupMappings = [targetGroupMappings] + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors.rejectValue(_, _) + } + + void 'application must be set if moniker is null'() { + given: + def description = getDescription() + description.application = null + description.moniker = null + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('application', "${getDescriptionName()}.application.not.nullable") + } + + void 'moniker application cannot be null'() { + given: + def description = getDescription() + description.application = "foo" + description.moniker.app = null + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('moniker.app', "${getDescriptionName()}.moniker.app.not.nullable") + } + + void 'application can be null if moniker is set'() { + given: + def description = getDescription() + description.application = null + description.moniker.app = "foo" + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors.rejectValue(_, _) + } + + void 'moniker can be null if application is set'() { + given: + def description = getDescription() + description.application = "foo" + description.moniker = null + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 0 * errors.rejectValue(_, _) + } + + void 'both app and moniker should match if both are set'() { + given: + def description = getDescription() + description.application = "foo" + description.freeFormDetails = "detail" + description.stack = "stack" + description.moniker.app = "bar" + description.moniker.detail = "wrongdetail" + description.moniker.stack = "wrongstack" + def errors = Mock(ValidationErrors) + + when: + validator.validate([], description, errors) + + then: + 1 * errors.rejectValue('moniker.app', "${getDescriptionName()}.moniker.app.invalid") + 1 * errors.rejectValue('moniker.detail', "${getDescriptionName()}.moniker.detail.invalid") + 1 * errors.rejectValue('moniker.stack', "${getDescriptionName()}.moniker.stack.invalid") + } @Override AbstractECSDescription getNulledDescription() { def description = (CreateServerGroupDescription) getDescription() description.placementStrategySequence = null description.availabilityZones = null - description.autoscalingPolicies = null - description.application = null description.ecsClusterName = null description.dockerImageAddress = null description.credentials = null @@ -96,12 +402,13 @@ class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec description.capacity.setDesired(null) description.capacity.setMin(null) description.capacity.setMax(null) + description.moniker.app = null; return description } @Override Set notNullableProperties() { - ['placementStrategySequence', 'availabilityZones', 'autoscalingPolicies', 'application', + ['placementStrategySequence', 'availabilityZones', 'moniker.app', 'ecsClusterName', 'dockerImageAddress', 'credentials', 'containerPort', 'computeUnits', 'reservedMemory', 'capacity.desired', 'capacity.min', 'capacity.max'] } @@ -146,7 +453,7 @@ class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec description.credentials = TestCredential.named('test') description.region = 'us-west-1' - description.application = 'my-app' + description.moniker = Moniker.builder().app('my-app').build(); description.ecsClusterName = 'mycluster' description.iamRole = 'iam-role-arn' description.containerPort = 1337 @@ -158,7 +465,6 @@ class EcsCreateServergroupDescriptionValidatorSpec extends AbstractValidatorSpec description.dockerImageAddress = 'docker-image-url' description.capacity = new ServerGroup.Capacity(1, 2, 1) description.availabilityZones = ['us-west-1': ['us-west-1a']] - description.autoscalingPolicies = [] description.placementStrategySequence = [new PlacementStrategy().withType(PlacementStrategyType.Random)] description diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ResizeDescriptionValidatorSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ResizeDescriptionValidatorSpec.groovy index f011f1db6a9..40aaf77a133 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ResizeDescriptionValidatorSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/ResizeDescriptionValidatorSpec.groovy @@ -17,11 +17,11 @@ package com.netflix.spinnaker.clouddriver.ecs.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.ecs.TestCredential import com.netflix.spinnaker.clouddriver.ecs.deploy.description.AbstractECSDescription import com.netflix.spinnaker.clouddriver.ecs.deploy.description.ResizeServiceDescription import com.netflix.spinnaker.clouddriver.model.ServerGroup -import org.springframework.validation.Errors class ResizeDescriptionValidatorSpec extends AbstractValidatorSpec { @@ -29,7 +29,7 @@ class ResizeDescriptionValidatorSpec extends AbstractValidatorSpec { given: def description = (ResizeServiceDescription) getDescription() description.capacity = null - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -42,7 +42,7 @@ class ResizeDescriptionValidatorSpec extends AbstractValidatorSpec { given: def description = (ResizeServiceDescription) getDescription() description.capacity.setDesired(9001) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -55,7 +55,7 @@ class ResizeDescriptionValidatorSpec extends AbstractValidatorSpec { given: def description = (ResizeServiceDescription) getDescription() description.capacity.setDesired(0) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/TerminateInstanceDescriptionValidatorSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/TerminateInstanceDescriptionValidatorSpec.groovy index 5af2d597e50..cdef48d09b0 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/TerminateInstanceDescriptionValidatorSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/deploy/validators/TerminateInstanceDescriptionValidatorSpec.groovy @@ -65,7 +65,7 @@ class TerminateInstanceDescriptionValidatorSpec extends AbstractValidatorSpec { def description = new TerminateInstancesDescription() description.credentials = TestCredential.named('test') description.region = 'us-west-1' - description.ecsTaskIds = ['deadbeef-33f7-4637-ab84-606f0c77af42'] + description.ecsTaskIds = ['deadbeef-33f7-4637-ab84-606f0c77af42', 'deadbeef33f74637ab84606f0c77af42'] description } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupNameResolverSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupNameResolverSpec.groovy new file mode 100644 index 00000000000..7198afed845 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/names/EcsServerGroupNameResolverSpec.groovy @@ -0,0 +1,435 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + + +package com.netflix.spinnaker.clouddriver.ecs.names + + +import com.amazonaws.services.ecs.AmazonECS +import com.amazonaws.services.ecs.model.* +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import spock.lang.Specification + +class EcsServerGroupNameResolverSpec extends Specification { + def ecsClient = Mock(AmazonECS) + + def ecsClusterName = 'default' + def region = 'us-west-1' + + void setup() { + Task task = new DefaultTask("task") + TaskRepository.threadLocalTask.set(task) + } + + void "should handle only tagged services"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsTagNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("another-tagged-service", "tagged-service") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["another-tagged-service", "tagged-service"] + request.include == ["TAGS"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "another-tagged-service", createdAt: new Date(1), status: "ACTIVE", + tags: [ + new Tag(key: EcsTagNamer.CLUSTER, value: "application-stack-details"), + new Tag(key: EcsTagNamer.APPLICATION, value: "application"), + new Tag(key: EcsTagNamer.STACK, value: "stack"), + new Tag(key: EcsTagNamer.DETAIL, value: "details"), + new Tag(key: EcsTagNamer.SEQUENCE, value: 1) + ]), + new Service(serviceName: "tagged-service", createdAt: new Date(1), status: "ACTIVE", + tags: [ + new Tag(key: EcsTagNamer.CLUSTER, value: "application-stack-details"), + new Tag(key: EcsTagNamer.APPLICATION, value: "application"), + new Tag(key: EcsTagNamer.STACK, value: "stack"), + new Tag(key: EcsTagNamer.DETAIL, value: "details"), + new Tag(key: EcsTagNamer.SEQUENCE, value: 2) + ]) + ) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v003"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application-stack-details-v003", reason: "MISSING") + ) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v003" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should handle mix of tagged services"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsTagNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("application-stack-details-v001", "tagged-service") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v001", "tagged-service"] + request.include == ["TAGS"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v001", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "tagged-service", createdAt: new Date(1), status: "ACTIVE", + tags: [ + new Tag(key: EcsTagNamer.CLUSTER, value: "application-stack-details"), + new Tag(key: EcsTagNamer.APPLICATION, value: "application"), + new Tag(key: EcsTagNamer.STACK, value: "stack"), + new Tag(key: EcsTagNamer.DETAIL, value: "details"), + new Tag(key: EcsTagNamer.SEQUENCE, value: 2) + ]) + ) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v003"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application-stack-details-v003", reason: "MISSING") + ) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v003" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should generate new name from first sequence"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns([]) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v000"] + }) >> new DescribeServicesResult().withServices([]) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v000" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should handle sequence roll over"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("application-stack-details-v999") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v999"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v999", createdAt: new Date(1), status: "ACTIVE")) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v000"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application-stack-details-v000", reason: "MISSING") + ) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v000" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should resolve task definition family name from service group name"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("application-stack-details-v001") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v001"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v001", createdAt: new Date(1), status: "ACTIVE")) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v002"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application-stack-details-v002", reason: "MISSING") + ) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v002" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should resolve task definition container name from service group name"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("application-stack-details-v001", "application-stack-details-v002") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v001", "application-stack-details-v002"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v001", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application-stack-details-v002", createdAt: new Date(2), status: "ACTIVE")) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v003"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application-stack-details-v003", reason: "MISSING") + ) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v003" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should skip names that already exist as ECS services in active and draining state"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("application-stack-details-v001", "application-stack-details-v002") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v001", "application-stack-details-v002"] + request.include == ["TAGS"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v001", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application-stack-details-v002", createdAt: new Date(2), status: "ACTIVE")) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v003"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v003", createdAt: new Date(3), status: "DRAINING")) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v004"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v004", createdAt: new Date(3), status: "ACTIVE")) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v005"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application-stack-details-v005", reason: "MISSING") + ) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v005" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should not skip names for inactive ECS services"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("application-stack-details-v001", "application-stack-details-v002") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v001", "application-stack-details-v002"] + request.include == ["TAGS"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v001", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application-stack-details-v002", createdAt: new Date(2), status: "ACTIVE")) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v003"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v003", createdAt: new Date(3), status: "INACTIVE")) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v003" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should give up trying to find a name if all services are draining"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("application-stack-details-v001", "application-stack-details-v002") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v001", "application-stack-details-v002"] + request.include == ["TAGS"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v001", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application-stack-details-v002", createdAt: new Date(2), status: "ACTIVE")) + ecsClient.describeServices(_) >> new DescribeServicesResult().withServices( + new Service(serviceName: "blah-blah", createdAt: new Date(1), status: "DRAINING")) + + when: + resolver.resolveNextName('application', 'stack', 'details') + + then: + thrown(IllegalArgumentException) + } + + void "should generate name with null details"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns([]) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-v000"] + }) >> new DescribeServicesResult().withServices([]) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', null) + + then: + nextServerGroupName.getServiceName() == "application-stack-v000" + nextServerGroupName.getFamilyName() == "application-stack" + } + + void "should generate name with null stack"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns([]) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application--details-v000"] + }) >> new DescribeServicesResult().withServices([]) + + when: + def nextServerGroupName = resolver.resolveNextName('application', null, 'details') + + then: + nextServerGroupName.getServiceName() == "application--details-v000" + nextServerGroupName.getFamilyName() == "application--details" + } + + void "should skip name if stack name is null and the existing one is empty"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns( + "application--details-v001", + "application--details-v002" + ) + + and: 'two existing and active services' + ecsClient.describeServices({ DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application--details-v001", "application--details-v002"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application--details-v001", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application--details-v002", createdAt: new Date(2), status: "ACTIVE") + ) + + and: 'one missing service' + ecsClient.describeServices({ DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application--details-v003"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application--details-v003", reason: "MISSING") + ) + + + when: 'stack has an empty value on resolving name' + def nextServerGroupName = resolver.resolveNextName('application', '', 'details') + + then: 'it will have the same result as if it was null' + nextServerGroupName.getServiceName() == "application--details-v003" + nextServerGroupName.getFamilyName() == "application--details" + + // If this is called it means `resolveNextName` failed to add the taken sequences (1 and 2) + 0 * ecsClient.describeServices({ DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application--details-v000"] + }) + } + + void "should generate name with null details and stack"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns([]) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-v000"] + }) >> new DescribeServicesResult().withServices([]) + + when: + def nextServerGroupName = resolver.resolveNextName('application', null, null) + + then: + nextServerGroupName.getServiceName() == "application-v000" + nextServerGroupName.getFamilyName() == "application" + } + + void "should handle find slot after max with gaps"() { + given: + def serviceArns = ["application-stack-details-v001", "application-stack-details-v003", + "application-stack-details-v004", "application-stack-details-v005", + "application-stack-details-v006"] + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns(serviceArns) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == serviceArns + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details-v001", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application-stack-details-v003", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application-stack-details-v004", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application-stack-details-v005", createdAt: new Date(1), status: "ACTIVE"), + new Service(serviceName: "application-stack-details-v006", createdAt: new Date(1), status: "ACTIVE") + ) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v007"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application-stack-details-v007", reason: "MISSING") + ) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v007" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + + void "should resolve task definition family name from service group name with no sequence"() { + given: + def resolver = new EcsServerGroupNameResolver(ecsClusterName, ecsClient, region, new EcsDefaultNamer()) + ecsClient.listServices(_) >> new ListServicesResult().withServiceArns("application-stack-details") + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details"] + }) >> new DescribeServicesResult().withServices( + new Service(serviceName: "application-stack-details", createdAt: new Date(1), status: "ACTIVE")) + ecsClient.describeServices({DescribeServicesRequest request -> + request.cluster == ecsClusterName + request.services == ["application-stack-details-v000"] + }) >> new DescribeServicesResult().withFailures( + new Failure(arn: "application-stack-details-v000", reason: "MISSING") + ) + + when: + def nextServerGroupName = resolver.resolveNextName('application', 'stack', 'details') + + then: + nextServerGroupName.getServiceName() == "application-stack-details-v000" + nextServerGroupName.getFamilyName() == "application-stack-details" + } + +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/names/EcsTagNamerSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/names/EcsTagNamerSpec.groovy new file mode 100644 index 00000000000..9409788e72e --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/names/EcsTagNamerSpec.groovy @@ -0,0 +1,81 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.names + +import com.amazonaws.services.ecs.model.Service +import com.amazonaws.services.ecs.model.Tag +import com.netflix.spinnaker.moniker.Namer +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +import static EcsTagNamer.APPLICATION +import static EcsTagNamer.CLUSTER +import static EcsTagNamer.STACK +import static EcsTagNamer.DETAIL +import static EcsTagNamer.SEQUENCE + +class EcsTagNamerSpec extends Specification { + + @Shared + Namer namer = new EcsTagNamer() + + @Unroll + def "should derive correct moniker"() { + given: + def service = new Service(serviceName: name, tags: tags?.collect {new Tag(key: it.key, value: it.value) }) + def moniker = namer.deriveMoniker(new EcsResourceService(service)) + + expect: + with(moniker) { + app == expectedApp + cluster == expectedCluster + stack == expectedStack + detail == expectedDetail + sequence == expectedSequence + } + + where: + name | tags || expectedApp | expectedCluster | expectedStack | expectedDetail | expectedSequence + "cass-nccpintegration-random-junk-d0prod-z0useast1a-v003" | null || "cass" | "cass-nccpintegration-random-junk-d0prod-z0useast1a" | "nccpintegration" | "random-junk-d0prod-z0useast1a" | 3 + "cass-nccpintegration-random-junk-d0prod-z0useast1a-v003" | [:] || "cass" | "cass-nccpintegration-random-junk-d0prod-z0useast1a" | "nccpintegration" | "random-junk-d0prod-z0useast1a" | 3 + "cass-nccpintegration-random-junk-d0prod-z0useast1a-v003" | [(APPLICATION): "myApp"] || "myApp" | "cass-nccpintegration-random-junk-d0prod-z0useast1a" | "nccpintegration" | "random-junk-d0prod-z0useast1a" | 3 + "cass-nccpintegration-random-junk-v003" | [(CLUSTER): "myCluster"] || "cass" | "myCluster" | "nccpintegration" | "random-junk" | 3 + "cass-nccpintegration-random-junk-v003" | [(STACK): "myStack"] || "cass" | "cass-myStack" | "myStack" | "random-junk" | 3 + "cass-nccpintegration-random-junk-v003" | [(STACK): "myStack", (DETAIL): ""] || "cass" | "cass-myStack" | "myStack" | "" | 3 + "cass-nccpintegration-random-junk-v003" | [(DETAIL): "myDetail"] || "cass" | "cass--myDetail" | "nccpintegration" | "myDetail" | 3 + "cass-nccpintegration-random-junk-v003" | [(SEQUENCE): "42"] || "cass" | "cass-nccpintegration-random-junk" | "nccpintegration" | "random-junk" | 42 + "app" | [(STACK): "myStack", (SEQUENCE): "2"] || "app" | "app-myStack" | "myStack" | null | 2 + "app" | null || "app" | "app" | null | null | null + "app-cluster" | null || "app" | "app-cluster" | "cluster" | null | null + "app-cluster" | [(CLUSTER): "myCluster"] || "app" | "myCluster" | "cluster" | null | null + "app-v042" | [(SEQUENCE): "13"] || "app" | "app" | null | null | 13 + "app-v042" | [(DETAIL): "myDetail"] || "app" | "app--myDetail" | null | "myDetail" | 42 + "awesomeapp--my-detail" | null || "awesomeapp" | "awesomeapp--my-detail" | null | "my-detail" | null + "awesomeapp--my-detail" | getAllMonikerTags(true) || "myApp" | "myCluster" | "myStack" | "myDetail" | 13 + "awesomeapp--my-detail" | getAllMonikerTags(false) || "myApp" | "myApp-myStack-myDetail" | "myStack" | "myDetail" | 13 + }; + + def getAllMonikerTags(includeCluster = false) { + def tags = [(APPLICATION): "myApp", (STACK): "myStack", (DETAIL): "myDetail", (SEQUENCE): "13"] + if (includeCluster) { + tags << [(CLUSTER): "myCluster"] + } + tags + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsCloudMetricAlarmCachingAgentSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsCloudMetricAlarmCachingAgentSpec.groovy index 54dd0b3f127..05b5299f564 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsCloudMetricAlarmCachingAgentSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsCloudMetricAlarmCachingAgentSpec.groovy @@ -19,9 +19,11 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.services.cloudwatch.AmazonCloudWatch import com.amazonaws.services.cloudwatch.model.DescribeAlarmsResult +import com.amazonaws.services.cloudwatch.model.Dimension +import com.amazonaws.services.cloudwatch.model.MetricAlarm +import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.ecs.cache.Keys import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsMetricAlarm import spock.lang.Shared @@ -39,13 +41,15 @@ class EcsCloudMetricAlarmCachingAgentSpec extends Specification { AWSCredentialsProvider credentialsProvider @Subject - EcsCloudMetricAlarmCachingAgent agent = new EcsCloudMetricAlarmCachingAgent(CommonCachingAgent.netflixAmazonCredentials, REGION, clientProvider, credentialsProvider) + EcsCloudMetricAlarmCachingAgent agent def setup() { cloudWatch = Mock(AmazonCloudWatch) clientProvider = Mock(AmazonClientProvider) providerCache = Mock(ProviderCache) credentialsProvider = Mock(AWSCredentialsProvider) + agent = new EcsCloudMetricAlarmCachingAgent(CommonCachingAgent.netflixAmazonCredentials, 'us-west-1', clientProvider) + } def 'should get a list of cloud watch alarms'() { @@ -75,4 +79,35 @@ class EcsCloudMetricAlarmCachingAgentSpec extends Specification { metricAlarms*.accountName.containsAll(cacheData.get(Keys.Namespace.ALARMS.ns)*.getAttributes().accountName) metricAlarms*.region.containsAll(cacheData.get(Keys.Namespace.ALARMS.ns)*.getAttributes().region) } + + def 'should evict old keys when id is appended'() { + given: + def metricAlarm1 = new MetricAlarm().withAlarmName("alarm-name-1").withAlarmArn("alarmArn-1").withDimensions([new Dimension().withName("ClusterName").withValue("my-cluster")]) + def metricAlarm2 = new MetricAlarm().withAlarmName("alarm-name-2").withAlarmArn("alarmArn-2").withDimensions([new Dimension().withName("ClusterName").withValue("my-cluster")]) + def attributes1 = EcsCloudMetricAlarmCachingAgent.convertMetricAlarmToAttributes(metricAlarm1, ACCOUNT, REGION) + def attributes2 = EcsCloudMetricAlarmCachingAgent.convertMetricAlarmToAttributes(metricAlarm2, ACCOUNT, REGION) + def metricAlarms = [metricAlarm1, metricAlarm2] + def describeAlarmsResult = new DescribeAlarmsResult().withMetricAlarms(metricAlarms) + cloudWatch.describeAlarms(_) >> describeAlarmsResult + clientProvider.getAmazonCloudWatch(_, _, _) >> cloudWatch + + def oldKey1 = Keys.buildKey(Keys.Namespace.ALARMS.ns, ACCOUNT, REGION, metricAlarm1.getAlarmArn()) + def oldKey2 = Keys.buildKey(Keys.Namespace.ALARMS.ns, ACCOUNT, REGION, metricAlarm2.getAlarmArn()) + def oldData = [new DefaultCacheData(oldKey1, attributes1, [:]), new DefaultCacheData(oldKey2, attributes2, [:])] + providerCache.getAll(Keys.Namespace.ALARMS.ns) >> oldData + + def newKey1 = Keys.getAlarmKey(ACCOUNT, REGION, metricAlarm1.getAlarmArn(), "my-cluster") + def newKey2 = Keys.getAlarmKey(ACCOUNT, REGION, metricAlarm2.getAlarmArn(), "my-cluster") + + when: + def cacheResult = agent.loadData(providerCache) + + then: + cacheResult.evictions[Keys.Namespace.ALARMS.ns].size() == 2 + cacheResult.evictions[Keys.Namespace.ALARMS.ns].containsAll([oldKey1, oldKey2]) + cacheResult.cacheResults[Keys.Namespace.ALARMS.ns].size() == 2 + cacheResult.cacheResults[Keys.Namespace.ALARMS.ns]*.id.containsAll([newKey1, newKey2]) + cacheResult.cacheResults[Keys.Namespace.ALARMS.ns]*.attributes.containsAll([attributes1, attributes2]) + } + } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ScalableTargetCachingAgentSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ScalableTargetCachingAgentSpec.groovy index f38dedb2936..354b2bb1e19 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ScalableTargetCachingAgentSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ScalableTargetCachingAgentSpec.groovy @@ -39,7 +39,7 @@ class ScalableTargetCachingAgentSpec extends Specification { .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) @Subject - ScalableTargetsCachingAgent agent = new ScalableTargetsCachingAgent(CommonCachingAgent.netflixAmazonCredentials, 'us-west-1', clientProvider, credentialsProvider, objectMapper) + ScalableTargetsCachingAgent agent = new ScalableTargetsCachingAgent(CommonCachingAgent.netflixAmazonCredentials, 'us-west-1', clientProvider, objectMapper) def 'should get a list of cloud watch alarms'() { given: diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/SecretCachingAgentSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/SecretCachingAgentSpec.groovy new file mode 100644 index 00000000000..b2bb86aa1ed --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/SecretCachingAgentSpec.groovy @@ -0,0 +1,90 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent + + +import com.amazonaws.services.secretsmanager.AWSSecretsManager +import com.amazonaws.services.secretsmanager.model.ListSecretsResult +import com.amazonaws.services.secretsmanager.model.SecretListEntry +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Secret +import spock.lang.Specification +import spock.lang.Subject + +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SECRETS + +class SecretCachingAgentSpec extends Specification { + def secretsManager = Mock(AWSSecretsManager) + def clientProvider = Mock(AmazonClientProvider) + def providerCache = Mock(ProviderCache) + + @Subject + SecretCachingAgent agent = new SecretCachingAgent(CommonCachingAgent.netflixAmazonCredentials, 'us-west-1', clientProvider) + + def 'should get a list of secrets'() { + given: + def account = 'test-account' + def region = 'us-west-1' + def givenSecrets = [] + def secretsEntries = [] + 0.upto(4, { + def secretName = "test-secret-${it}" + givenSecrets << new SecretListEntry( + name: secretName, + aRN: "arn:aws:secretsmanager:us-west-1:0123456789012:secret:${secretName}" + ) + }) + secretsManager.listSecrets(_) >> new ListSecretsResult().withSecretList(givenSecrets) + + when: + def retrievedSecrets = agent.fetchSecrets(secretsManager) + + then: + retrievedSecrets.containsAll(givenSecrets) + givenSecrets.containsAll(retrievedSecrets) + } + + def 'should generate fresh data'() { + given: + Set givenSecrets = [] + Set secretsEntries = [] + 0.upto(4, { + def secretName = "test-secret-${it}" + givenSecrets << new Secret( + account: 'test-account', + region: 'us-west-1', + name: secretName, + arn: "arn:aws:secretsmanager:us-west-1:0123456789012:secret:${secretName}" + ) + secretsEntries << new SecretListEntry( + name: secretName, + aRN: "arn:aws:secretsmanager:us-west-1:0123456789012:secret:${secretName}" + ) + }) + + when: + def cacheData = agent.generateFreshData(secretsEntries) + + then: + cacheData.size() == 1 + cacheData.get(SECRETS.ns).size() == givenSecrets.size() + givenSecrets*.account.containsAll(cacheData.get(SECRETS.ns)*.getAttributes().account) + givenSecrets*.region.containsAll(cacheData.get(SECRETS.ns)*.getAttributes().region) + givenSecrets*.name.containsAll(cacheData.get(SECRETS.ns)*.getAttributes().secretName) + givenSecrets*.arn.containsAll(cacheData.get(SECRETS.ns)*.getAttributes().secretArn) + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceDiscoveryCachingAgentSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceDiscoveryCachingAgentSpec.groovy new file mode 100644 index 00000000000..3c851c5fd7d --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceDiscoveryCachingAgentSpec.groovy @@ -0,0 +1,99 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent + +import com.amazonaws.auth.AWSCredentialsProvider +import com.amazonaws.services.servicediscovery.AWSServiceDiscovery +import com.amazonaws.services.servicediscovery.model.ListServicesRequest +import com.amazonaws.services.servicediscovery.model.ListServicesResult +import com.amazonaws.services.servicediscovery.model.ServiceSummary +import com.fasterxml.jackson.databind.DeserializationFeature +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.ecs.cache.model.ServiceDiscoveryRegistry +import spock.lang.Specification +import spock.lang.Subject + +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICE_DISCOVERY_REGISTRIES + +class ServiceDiscoveryCachingAgentSpec extends Specification { + def serviceDiscovery = Mock(AWSServiceDiscovery) + def clientProvider = Mock(AmazonClientProvider) + def providerCache = Mock(ProviderCache) + def credentialsProvider = Mock(AWSCredentialsProvider) + def objectMapper = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + + @Subject + ServiceDiscoveryCachingAgent agent = new ServiceDiscoveryCachingAgent(CommonCachingAgent.netflixAmazonCredentials, 'us-west-1', clientProvider) + + def 'should get a list of service discovery registries'() { + given: + def givenServices = [] + 0.upto(4, { + def serviceName = "test-service-${it}" + def serviceId = "srv-${it}" + givenServices << new ServiceSummary( + name: serviceName, + id: serviceId, + arn: "arn:aws:servicediscovery:us-west-1:0123456789012:service/${serviceId}" + ) + }) + serviceDiscovery.listServices(_) >> new ListServicesResult().withServices(givenServices) + + when: + def retrievedServices = agent.fetchServices(serviceDiscovery) + + then: + retrievedServices.containsAll(givenServices) + givenServices.containsAll(retrievedServices) + } + + def 'should generate fresh data'() { + given: + Set givenServices = [] + Set servicesEntries = [] + 0.upto(4, { + def serviceName = "test-service-${it}" + def serviceId = "srv-${it}" + givenServices << new ServiceDiscoveryRegistry( + account: 'test-account', + region: 'us-west-1', + name: serviceName, + id: serviceId, + arn: "arn:aws:servicediscovery:us-west-1:0123456789012:service/${serviceId}" + ) + servicesEntries << new ServiceSummary( + name: serviceName, + id: serviceId, + arn: "arn:aws:servicediscovery:us-west-1:0123456789012:service/${serviceId}" + ) + }) + + when: + def cacheData = agent.generateFreshData(servicesEntries) + + then: + cacheData.size() == 1 + cacheData.get(SERVICE_DISCOVERY_REGISTRIES.ns).size() == givenServices.size() + givenServices*.account.containsAll(cacheData.get(SERVICE_DISCOVERY_REGISTRIES.ns)*.getAttributes().account) + givenServices*.region.containsAll(cacheData.get(SERVICE_DISCOVERY_REGISTRIES.ns)*.getAttributes().region) + givenServices*.name.containsAll(cacheData.get(SERVICE_DISCOVERY_REGISTRIES.ns)*.getAttributes().serviceName) + givenServices*.arn.containsAll(cacheData.get(SERVICE_DISCOVERY_REGISTRIES.ns)*.getAttributes().serviceArn) + givenServices*.id.containsAll(cacheData.get(SERVICE_DISCOVERY_REGISTRIES.ns)*.getAttributes().serviceId) + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TargetHealthCachingAgentSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TargetHealthCachingAgentSpec.groovy new file mode 100644 index 00000000000..fc1907da7e8 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TargetHealthCachingAgentSpec.groovy @@ -0,0 +1,243 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent + +import com.amazonaws.auth.AWSCredentialsProvider +import com.amazonaws.services.ecs.AmazonECS +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthRequest +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthResult +import com.amazonaws.services.elasticloadbalancingv2.model.TargetDescription +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroupNotFoundException +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealth +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthStateEnum +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.aws.data.Keys +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider +import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsTargetHealth +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS +import spock.lang.Specification +import spock.lang.Subject + +class TargetHealthCachingAgentSpec extends Specification { + def ecs = Mock(AmazonECS) + def clientProvider = Mock(AmazonClientProvider) + def awsProviderCache = Mock(ProviderCache) + def credentialsProvider = Mock(AWSCredentialsProvider) + def amazonloadBalancing = Mock(AmazonElasticLoadBalancing) + def targetGroupArn = 'arn:aws:elasticloadbalancing:' + CommonCachingAgent.REGION + ':' + CommonCachingAgent.ACCOUNT_ID + ':targetgroup/test-tg/9e8997b7cff00c62' + ObjectMapper mapper = new ObjectMapper() + + @Subject + TargetHealthCachingAgent agent = + new TargetHealthCachingAgent(CommonCachingAgent.netflixAmazonCredentials, CommonCachingAgent.REGION, clientProvider, credentialsProvider, mapper) + + def setup() { + clientProvider.getAmazonElasticLoadBalancingV2(_, _, _) >> amazonloadBalancing + awsProviderCache.filterIdentifiers(_, _) >> [] + + def targetGroupAttributes = [ + loadBalancerNames: ['loadBalancerName'], + targetGroupArn: targetGroupArn, + targetGroupName: 'test-tg', + vpcId: 'vpc-id', + ] + def targetGroupKey = + Keys.getTargetGroupKey('test-tg', CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, 'ip', 'vpc-id') + def loadbalancerKey = + Keys.getLoadBalancerKey('loadBalancerName', CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, 'vpc-id', 'ip') + def relations = [loadBalancers: [loadbalancerKey]] + def targetGroupCacheData = + new DefaultCacheData(targetGroupKey, targetGroupAttributes, relations) + + awsProviderCache.getAll(TARGET_GROUPS.getNs(), _, _) >> Collections.singletonList(targetGroupCacheData) + } + + def 'should skip targetGroups with empty TargetHealthDescriptions'() { + when: + agent.setAwsCache(awsProviderCache) + def targetHealthList = agent.getItems(ecs, Mock(ProviderCache)) + + then: + // ELB response contains no TargetHealths + 1 * amazonloadBalancing.describeTargetHealth({ DescribeTargetHealthRequest request -> + request.targetGroupArn == targetGroupArn + }) >> new DescribeTargetHealthResult() + + targetHealthList.size() == 0 + } + + def 'should get a list of target health objects'() { + given: + def healthyTargetId = '10.0.0.3' + def unhealthyTargetId = '10.0.0.14' + + TargetHealthDescription targetHealth1 = + new TargetHealthDescription().withTarget( + new TargetDescription().withId(healthyTargetId).withPort(80)) + .withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Healthy)) + + TargetHealthDescription targetHealth2 = + new TargetHealthDescription().withTarget( + new TargetDescription().withId(unhealthyTargetId).withPort(80)) + .withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Unhealthy)) + + when: + agent.setAwsCache(awsProviderCache) + def targetHealthList = agent.getItems(ecs, Mock(ProviderCache)) + + then: + 1 * amazonloadBalancing.describeTargetHealth({ DescribeTargetHealthRequest request -> + request.targetGroupArn == targetGroupArn + }) >> new DescribeTargetHealthResult().withTargetHealthDescriptions(targetHealth1, targetHealth2) + + targetHealthList.size() == 1 + EcsTargetHealth targetHealth = targetHealthList.get(0) + targetHealth.getTargetGroupArn() == targetGroupArn + for (TargetHealthDescription targetHealthDescription: targetHealth.getTargetHealthDescriptions()) { + targetHealthDescription.getTarget().getPort() == 80 + if (targetHealthDescription.getTarget().getId() == healthyTargetId) { + targetHealthDescription.getTargetHealth().getState() == TargetHealthStateEnum.Healthy.toString() + } else { + targetHealthDescription.getTargetHealth().getState() == TargetHealthStateEnum.Unhealthy.toString() + } + } + } + + def 'should raise exception if getItems() called before awsProviderCache is set'() { + when: + agent.getItems(ecs, Mock(ProviderCache)) + + then: + thrown NullPointerException + } + + def 'should catch and ignore TargetGroupNotFoundExceptions'() { + when: + agent.setAwsCache(awsProviderCache) + def targetHealthList = agent.getItems(ecs, Mock(ProviderCache)) + + then: + 1 * amazonloadBalancing.describeTargetHealth({ DescribeTargetHealthRequest request -> + request.targetGroupArn == targetGroupArn + }) >> { throw new TargetGroupNotFoundException("The specified target group does not exist.") } + + targetHealthList.size() == 0 + } + + def 'should describe and return target groups for matching account only'() { + given: + // set up correct account cache data + def targetGroupAttributes = [ + loadBalancerNames: ['loadBalancerName'], + targetGroupArn: targetGroupArn, + targetGroupName: 'test-tg', + vpcId: 'vpc-id', + ] + def targetGroupKey = + Keys.getTargetGroupKey('test-tg', CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, 'ip', 'vpc-id') + def loadbalancerKey = + Keys.getLoadBalancerKey('loadBalancerName', CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, 'vpc-id', 'ip') + def relations = [loadBalancers: [loadbalancerKey]] + def targetGroupCacheData = + new DefaultCacheData(targetGroupKey, targetGroupAttributes, relations) + + // set up other account cache data + def targetGroupArn2 = 'arn:aws:elasticloadbalancing:' + CommonCachingAgent.REGION + ':210987654321:targetgroup/other-tg/7uf491b7cff00c62' + def otherAccountName = "other-account" + def targetGroupAttributes2 = [ + loadBalancerNames: ['loadBalancerName'], + targetGroupArn: targetGroupArn2, + targetGroupName: 'other-tg', + vpcId: 'vpc-id', + ] + def targetGroupKey2 = + Keys.getTargetGroupKey('test-tg', otherAccountName, CommonCachingAgent.REGION, 'ip', 'vpc-id') + def loadbalancerKey2 = + Keys.getLoadBalancerKey('loadBalancerName', otherAccountName, CommonCachingAgent.REGION, 'vpc-id', 'ip') + def relations2 = [loadBalancers: [loadbalancerKey2]] + def targetGroupCacheData2 = + new DefaultCacheData(targetGroupKey2, targetGroupAttributes2, relations2) + + TargetHealthDescription targetHealth = + new TargetHealthDescription().withTarget( + new TargetDescription().withId('10.0.0.3').withPort(80)) + .withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Healthy)) + + // return cache data for both target groups + awsProviderCache.getAll(TARGET_GROUPS.getNs(), _, _) >> [targetGroupCacheData, targetGroupCacheData2] + + when: + agent.setAwsCache(awsProviderCache) + def targetHealthList = agent.getItems(ecs, Mock(ProviderCache)) + + then: + // expect one describe call, with correct target group + 1 * amazonloadBalancing.describeTargetHealth({ DescribeTargetHealthRequest request -> + request.targetGroupArn == targetGroupArn + }) >> new DescribeTargetHealthResult().withTargetHealthDescriptions(targetHealth) + + targetHealthList.size() == 1 + EcsTargetHealth targetHealthDescription = targetHealthList.get(0) + targetHealthDescription.getTargetGroupArn() == targetGroupArn + } + + def 'should handle null targetGroupArn in cache data'() { + given: + def targetGroupAttributes = [ + loadBalancerNames: ['loadBalancerName'], + targetGroupArn: null, + targetGroupName: null, + vpcId: 'vpc-id', + ] + def targetGroupKey = + Keys.getTargetGroupKey('test-tg', CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, 'ip', 'vpc-id') + def loadbalancerKey = + Keys.getLoadBalancerKey('loadBalancerName', CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, 'vpc-id', 'ip') + def relations = [loadBalancers: [loadbalancerKey]] + def targetGroupCacheData = + new DefaultCacheData(targetGroupKey, targetGroupAttributes, relations) + + + def badAwsProviderCache = Mock(ProviderCache) + badAwsProviderCache.filterIdentifiers(_, _) >> [] + badAwsProviderCache.getAll(TARGET_GROUPS.getNs(), _, _) >> [targetGroupCacheData] + + when: + agent.setAwsCache(badAwsProviderCache) + def targetHealthList = agent.getItems(ecs, Mock(ProviderCache)) + + then: + targetHealthList.size() == 0 + } + + def 'should return empty set when no target group cache data available'() { + given: + def emptyAwsProviderCache = Mock(ProviderCache) + emptyAwsProviderCache.filterIdentifiers(_, _) >> [] + emptyAwsProviderCache.getAll(TARGET_GROUPS.getNs(), _, _) >> [] + + when: + agent.setAwsCache(emptyAwsProviderCache) + def targetHealthList = agent.getItems(ecs, Mock(ProviderCache)) + + then: + targetHealthList.size() == 0 + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCacheSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCacheSpec.groovy index e483307d452..e1a52d08e80 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCacheSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCacheSpec.groovy @@ -19,10 +19,13 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.services.ecs.AmazonECS import com.amazonaws.services.ecs.model.Container +import com.amazonaws.services.ecs.model.ContainerDefinition import com.amazonaws.services.ecs.model.LoadBalancer import com.amazonaws.services.ecs.model.NetworkBinding +import com.amazonaws.services.ecs.model.PortMapping import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthResult +import com.amazonaws.services.elasticloadbalancingv2.model.TargetDescription import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealth import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthStateEnum @@ -30,14 +33,15 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.ecs.cache.Keys import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskHealthCacheClient import spock.lang.Specification import spock.lang.Subject import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TARGET_HEALTHS import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS class TaskHealthCacheSpec extends Specification { def ecs = Mock(AmazonECS) @@ -62,10 +66,13 @@ class TaskHealthCacheSpec extends Specification { def healthKey = Keys.getTaskHealthKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) def serviceKey = Keys.getServiceKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.SERVICE_NAME_1) def containerInstanceKey = Keys.getContainerInstanceKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.CONTAINER_INSTANCE_ARN_1) + def targetHealthKey = Keys.getTargetHealthKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, targetGroupArn) ObjectMapper mapper = new ObjectMapper() - Map containerMap = mapper.convertValue(new Container().withNetworkBindings(new NetworkBinding().withHostPort(1337)), Map.class) - Map loadbalancerMap = mapper.convertValue(new LoadBalancer().withTargetGroupArn(targetGroupArn), Map.class) + Map containerMap = mapper.convertValue(new Container().withNetworkBindings(new NetworkBinding().withContainerPort(1338).withHostPort(1338)), Map.class) + Map loadbalancerMap = mapper.convertValue(new LoadBalancer().withTargetGroupArn(targetGroupArn).withContainerPort(1338), Map.class) + Map targetHealthMap = mapper.convertValue( + new TargetHealthDescription().withTarget(new TargetDescription().withId(CommonCachingAgent.EC2_INSTANCE_ID_1).withPort(1338)).withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Healthy)), Map.class) def taskAttributes = [ taskId : CommonCachingAgent.TASK_ID_1, @@ -96,6 +103,14 @@ class TaskHealthCacheSpec extends Specification { def containerInstanceCache = new DefaultCacheData(containerInstanceKey, containerInstanceAttributes, Collections.emptyMap()) providerCache.get(Keys.Namespace.CONTAINER_INSTANCES.toString(), containerInstanceKey) >> containerInstanceCache + def targetHealthAttributes = [ + targetGroupArn : targetGroupArn, + targetHealthDescriptions : Collections.singletonList(targetHealthMap) + ] + + def targetHealthCache = new DefaultCacheData(targetHealthKey, targetHealthAttributes, Collections.emptyMap()) + providerCache.get(TARGET_HEALTHS.toString(), targetHealthKey) >> targetHealthCache + DescribeTargetHealthResult describeTargetHealthResult = new DescribeTargetHealthResult().withTargetHealthDescriptions( new TargetHealthDescription().withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Healthy)) ) @@ -103,6 +118,17 @@ class TaskHealthCacheSpec extends Specification { amazonloadBalancing.describeTargetHealth(_) >> describeTargetHealthResult providerCache.getAll(HEALTH.toString()) >> [] + Map containerDefinitionMap = mapper.convertValue(new ContainerDefinition().withPortMappings( + new PortMapping().withHostPort(1338) + ), Map.class) + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1, + containerDefinitions : [ containerDefinitionMap ] + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData + when: def cacheResult = agent.loadData(providerCache) providerCache.get(HEALTH.toString(), healthKey) >> cacheResult.getCacheResults().get(HEALTH.toString()).iterator().next() diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCachingAgentSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCachingAgentSpec.groovy index 87f21eb0cfd..93113a21c1c 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCachingAgentSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskHealthCachingAgentSpec.groovy @@ -19,10 +19,12 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.services.ecs.AmazonECS import com.amazonaws.services.ecs.model.Container +import com.amazonaws.services.ecs.model.ContainerDefinition import com.amazonaws.services.ecs.model.LoadBalancer import com.amazonaws.services.ecs.model.NetworkBinding -import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing -import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetHealthResult +import com.amazonaws.services.ecs.model.NetworkInterface +import com.amazonaws.services.ecs.model.PortMapping +import com.amazonaws.services.elasticloadbalancingv2.model.TargetDescription import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealth import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthStateEnum @@ -31,51 +33,46 @@ import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.ecs.cache.Keys import com.netflix.spinnaker.clouddriver.ecs.cache.model.TaskHealth import spock.lang.Specification import spock.lang.Subject import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TARGET_HEALTHS import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS class TaskHealthCachingAgentSpec extends Specification { def ecs = Mock(AmazonECS) def clientProvider = Mock(AmazonClientProvider) def providerCache = Mock(ProviderCache) def credentialsProvider = Mock(AWSCredentialsProvider) + def targetGroupArn = 'arn:aws:elasticloadbalancing:' + CommonCachingAgent.REGION + ':' + CommonCachingAgent.ACCOUNT_ID + ':targetgroup/test-target-group/9e8997b7cff00c62' ObjectMapper mapper = new ObjectMapper() + @Subject TaskHealthCachingAgent agent = new TaskHealthCachingAgent(CommonCachingAgent.netflixAmazonCredentials, CommonCachingAgent.REGION, clientProvider, credentialsProvider, mapper) - def 'should get a list of task definitions'() { - given: - AmazonElasticLoadBalancing amazonloadBalancing = Mock(AmazonElasticLoadBalancing) - clientProvider.getAmazonElasticLoadBalancingV2(_, _, _) >> amazonloadBalancing - - def targetGroupArn = 'arn:aws:elasticloadbalancing:' + CommonCachingAgent.REGION + ':769716316905:targetgroup/test-target-group/9e8997b7cff00c62' + def setup() { - def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) def serviceKey = Keys.getServiceKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.SERVICE_NAME_1) def containerInstanceKey = Keys.getContainerInstanceKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.CONTAINER_INSTANCE_ARN_1) + def targetHealthKey = Keys.getTargetHealthKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, targetGroupArn) ObjectMapper mapper = new ObjectMapper() - Map containerMap = mapper.convertValue(new Container().withNetworkBindings(new NetworkBinding().withHostPort(1337)), Map.class) - Map loadbalancerMap = mapper.convertValue(new LoadBalancer().withTargetGroupArn(targetGroupArn), Map.class) + Map loadbalancerMap = mapper.convertValue(new LoadBalancer().withTargetGroupArn(targetGroupArn).withContainerPort(1338), Map.class) + Map targetHealthMap = mapper.convertValue( + new TargetHealthDescription().withTarget(new TargetDescription().withId(CommonCachingAgent.EC2_INSTANCE_ID_1).withPort(1338)).withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Healthy)), Map.class) + Map targetHealthMap2 = mapper.convertValue( + new TargetHealthDescription().withTarget(new TargetDescription().withId("192.168.0.100").withPort(1338)).withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Healthy)), Map.class) + + def targetHealths = new ArrayList<>(); + targetHealths.add(targetHealthMap) + targetHealths.add(targetHealthMap2) - def taskAttributes = [ - taskId : CommonCachingAgent.TASK_ID_1, - taskArn : CommonCachingAgent.TASK_ARN_1, - startedAt : new Date().getTime(), - containerInstanceArn: CommonCachingAgent.CONTAINER_INSTANCE_ARN_1, - group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, - containers : Collections.singletonList(containerMap) - ] - def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) providerCache.filterIdentifiers(_, _) >> [] - providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) def serviceAttributes = [ loadBalancers : Collections.singletonList(loadbalancerMap), @@ -94,11 +91,359 @@ class TaskHealthCachingAgentSpec extends Specification { def containerInstanceCache = new DefaultCacheData(containerInstanceKey, containerInstanceAttributes, Collections.emptyMap()) providerCache.get(Keys.Namespace.CONTAINER_INSTANCES.toString(), containerInstanceKey) >> containerInstanceCache - DescribeTargetHealthResult describeTargetHealthResult = new DescribeTargetHealthResult().withTargetHealthDescriptions( - new TargetHealthDescription().withTargetHealth(new TargetHealth().withState(TargetHealthStateEnum.Healthy)) - ) + def targetHealthAttributes = [ + targetGroupArn : targetGroupArn, + targetHealthDescriptions : targetHealths + ] + + def targetHealthCache = new DefaultCacheData(targetHealthKey, targetHealthAttributes, Collections.emptyMap()) + providerCache.get(TARGET_HEALTHS.toString(), targetHealthKey) >> targetHealthCache + } + + def 'should get a list of task health'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue(new Container().withNetworkBindings(new NetworkBinding().withContainerPort(1338).withHostPort(1338)), Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + startedAt : new Date().getTime(), + containerInstanceArn : CommonCachingAgent.CONTAINER_INSTANCE_ARN_1, + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + Map containerDefinitionMap = mapper.convertValue(new ContainerDefinition().withPortMappings( + new PortMapping().withHostPort(1338) + ), Map.class) + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1, + containerDefinitions : [ containerDefinitionMap ] + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData + + + when: + def taskHealthList = agent.getItems(ecs, providerCache) + + then: + taskHealthList.size() == 1 + TaskHealth taskHealth = taskHealthList.get(0) + taskHealth.getState() == 'Up' + taskHealth.getType() == 'loadBalancer' + taskHealth.getInstanceId() == CommonCachingAgent.TASK_ARN_1 + taskHealth.getServiceName() == CommonCachingAgent.SERVICE_NAME_1 + taskHealth.getTaskArn() == CommonCachingAgent.TASK_ARN_1 + taskHealth.getTaskId() == CommonCachingAgent.TASK_ID_1 + } + + def 'should get a list of task health with host port mapping of 0'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue( + new Container().withNetworkBindings( + new NetworkBinding() + .withContainerPort(1338) + .withHostPort(1338)), Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + startedAt : new Date().getTime(), + containerInstanceArn : CommonCachingAgent.CONTAINER_INSTANCE_ARN_1, + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + Map containerDefinitionMap = mapper.convertValue(new ContainerDefinition().withPortMappings( + new PortMapping().withHostPort(0 ) + ), Map.class) + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1, + containerDefinitions : [ containerDefinitionMap ] + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData + + + when: + def taskHealthList = agent.getItems(ecs, providerCache) + + then: + taskHealthList.size() == 1 + TaskHealth taskHealth = taskHealthList.get(0) + taskHealth.getState() == 'Up' + taskHealth.getType() == 'loadBalancer' + taskHealth.getInstanceId() == CommonCachingAgent.TASK_ARN_1 + taskHealth.getServiceName() == CommonCachingAgent.SERVICE_NAME_1 + taskHealth.getTaskArn() == CommonCachingAgent.TASK_ARN_1 + taskHealth.getTaskId() == CommonCachingAgent.TASK_ID_1 + } + + def 'should get a list of task health for tasks with multiple network bindings'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue(new Container().withNetworkBindings( + new NetworkBinding().withContainerPort(1337).withHostPort(1337), + new NetworkBinding().withContainerPort(1338).withHostPort(1338) + ), Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1, + startedAt : new Date().getTime(), + containerInstanceArn: CommonCachingAgent.CONTAINER_INSTANCE_ARN_1, + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + Map containerDefinitionMap = mapper.convertValue(new ContainerDefinition().withPortMappings( + new PortMapping().withHostPort(1338) + ), Map.class) + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1, + containerDefinitions : [ containerDefinitionMap ] + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData + + when: + def taskHealthList = agent.getItems(ecs, providerCache) + + then: + taskHealthList.size() == 1 + TaskHealth taskHealth = taskHealthList.get(0) + taskHealth.getState() == 'Up' + taskHealth.getType() == 'loadBalancer' + taskHealth.getInstanceId() == CommonCachingAgent.TASK_ARN_1 + taskHealth.getServiceName() == CommonCachingAgent.SERVICE_NAME_1 + taskHealth.getTaskArn() == CommonCachingAgent.TASK_ARN_1 + taskHealth.getTaskId() == CommonCachingAgent.TASK_ID_1 + } + + def 'should skip tasks with a non-cached container instance'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue(new Container().withNetworkBindings(new NetworkBinding().withContainerPort(1337).withHostPort(1337)), Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + startedAt : new Date().getTime(), + containerInstanceArn: CommonCachingAgent.CONTAINER_INSTANCE_ARN_2, + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + when: + def taskHealthList = agent.getItems(ecs, providerCache) + + then: + taskHealthList == [] + } + + def 'should get a list of task health for aws-vpc mode'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue(new Container().withNetworkInterfaces( + new NetworkInterface().withPrivateIpv4Address("192.168.0.100")), + Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + startedAt : new Date().getTime(), + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + Map containerDefinitionMap = mapper.convertValue(new ContainerDefinition().withPortMappings( + new PortMapping().withContainerPort(1338) + ), Map.class) + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1, + containerDefinitions : [ containerDefinitionMap ] + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData + + when: + def taskHealthList = agent.getItems(ecs, providerCache) - amazonloadBalancing.describeTargetHealth(_) >> describeTargetHealthResult + then: + taskHealthList.size() == 1 + TaskHealth taskHealth = taskHealthList.get(0) + taskHealth.getState() == 'Up' + taskHealth.getType() == 'loadBalancer' + taskHealth.getInstanceId() == CommonCachingAgent.TASK_ARN_1 + taskHealth.getServiceName() == CommonCachingAgent.SERVICE_NAME_1 + taskHealth.getTaskArn() == CommonCachingAgent.TASK_ARN_1 + taskHealth.getTaskId() == CommonCachingAgent.TASK_ID_1 + } + + def 'should skip tasks with a non-cached task definition and aws-vpc mode'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue(new Container().withNetworkInterfaces( + new NetworkInterface().withPrivateIpv4Address("192.168.0.100")), + Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + startedAt : new Date().getTime(), + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + when: + def taskHealthList = agent.getItems(ecs, providerCache) + + then: + taskHealthList == [] + } + + def 'should skip tasks with no networking'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue(new Container(), Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + startedAt : new Date().getTime(), + containerInstanceArn : CommonCachingAgent.CONTAINER_INSTANCE_ARN_1, + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1 + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData + + when: + def taskHealthList = agent.getItems(ecs, providerCache) + + then: + taskHealthList == [] + } + + def 'should skip tasks with null network bindings'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue(new Container().withNetworkBindings(null), Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + startedAt : new Date().getTime(), + containerInstanceArn : CommonCachingAgent.CONTAINER_INSTANCE_ARN_1, + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1 + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData + + when: + def taskHealthList = agent.getItems(ecs, providerCache) + + then: + taskHealthList == [] + } + + def 'should skip tasks with null network interfaces'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap = mapper.convertValue(new Container().withNetworkInterfaces(null), Map.class) + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + startedAt : new Date().getTime(), + containerInstanceArn : CommonCachingAgent.CONTAINER_INSTANCE_ARN_1, + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : Collections.singletonList(containerMap) + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1 + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData + + when: + def taskHealthList = agent.getItems(ecs, providerCache) + + then: + taskHealthList == [] + } + + def 'should get task health for task with some non-networked containers'() { + given: + ObjectMapper mapper = new ObjectMapper() + Map containerMap1 = mapper.convertValue(new Container().withName('noports'), Map.class) + Map containerMap2 = mapper.convertValue(new Container().withName('withports').withNetworkBindings( + new NetworkBinding().withContainerPort(1338).withHostPort(1338) + ), Map.class) + + def taskAttributes = [ + taskId : CommonCachingAgent.TASK_ID_1, + taskArn : CommonCachingAgent.TASK_ARN_1, + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1, + startedAt : new Date().getTime(), + containerInstanceArn: CommonCachingAgent.CONTAINER_INSTANCE_ARN_1, + group : 'service:' + CommonCachingAgent.SERVICE_NAME_1, + containers : [containerMap1, containerMap2] + ] + def taskKey = Keys.getTaskKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_ID_1) + def taskCacheData = new DefaultCacheData(taskKey, taskAttributes, Collections.emptyMap()) + providerCache.getAll(TASKS.toString(), _) >> Collections.singletonList(taskCacheData) + + Map containerDefinitionMap1 = mapper.convertValue(new ContainerDefinition().withName('noports'), Map.class) + Map containerDefinitionMap2 = mapper.convertValue(new ContainerDefinition().withName('withports').withPortMappings( + new PortMapping().withHostPort(1338) + ), Map.class) + def taskDefAttributes = [ + taskDefinitionArn : CommonCachingAgent.TASK_DEFINITION_ARN_1, + containerDefinitions : [ containerDefinitionMap1, containerDefinitionMap2 ] + ] + def taskDefKey = Keys.getTaskDefinitionKey(CommonCachingAgent.ACCOUNT, CommonCachingAgent.REGION, CommonCachingAgent.TASK_DEFINITION_ARN_1) + def taskDefCacheData = new DefaultCacheData(taskDefKey, taskDefAttributes, Collections.emptyMap()) + providerCache.get(TASK_DEFINITIONS.toString(), taskDefKey) >> taskDefCacheData when: def taskHealthList = agent.getItems(ecs, providerCache) diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcrImageProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcrImageProviderSpec.groovy index efef272c016..cbb136bf751 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcrImageProviderSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcrImageProviderSpec.groovy @@ -17,22 +17,25 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.view import com.amazonaws.services.ecr.AmazonECR +import com.amazonaws.services.ecr.model.DescribeImagesRequest import com.amazonaws.services.ecr.model.DescribeImagesResult import com.amazonaws.services.ecr.model.ImageDetail +import com.amazonaws.services.ecr.model.ImageIdentifier import com.amazonaws.services.ecr.model.ListImagesResult import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.ecs.TestCredential import com.netflix.spinnaker.clouddriver.ecs.model.EcsDockerImage -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials import spock.lang.Specification +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Subject class EcrImageProviderSpec extends Specification { def amazonClientProvider = Mock(AmazonClientProvider) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def credentialsRepository = Mock(CredentialsRepository) @Subject - def provider = new EcrImageProvider(amazonClientProvider, accountCredentialsProvider) + def provider = new EcrImageProvider(amazonClientProvider, credentialsRepository) def 'should the handle url'() { given: @@ -77,8 +80,8 @@ class EcrImageProviderSpec extends Specification { def amazonECR = Mock(AmazonECR) amazonClientProvider.getAmazonEcr(_, _, _) >> amazonECR - accountCredentialsProvider.getAll() >> [TestCredential.named('')] - amazonECR.listImages(_) >> new ListImagesResult() + credentialsRepository.getAll() >> [new NetflixECSCredentials(TestCredential.named('')) ] + amazonECR.listImages(_) >> new ListImagesResult().withImageIds(Collections.emptyList()) amazonECR.describeImages(_) >> new DescribeImagesResult().withImageDetails(imageDetail) def expectedListOfImages = [new EcsDockerImage( @@ -115,8 +118,8 @@ class EcrImageProviderSpec extends Specification { def amazonECR = Mock(AmazonECR) amazonClientProvider.getAmazonEcr(_, _, _) >> amazonECR - accountCredentialsProvider.getAll() >> [TestCredential.named('')] - amazonECR.listImages(_) >> new ListImagesResult() + credentialsRepository.getAll() >> [new NetflixECSCredentials(TestCredential.named('')) ] + amazonECR.listImages(_) >> new ListImagesResult().withImageIds(Collections.emptyList()) amazonECR.describeImages(_) >> new DescribeImagesResult().withImageDetails(imageDetail) def expectedListOfImages = [new EcsDockerImage( @@ -133,6 +136,65 @@ class EcrImageProviderSpec extends Specification { retrievedListOfImages == expectedListOfImages } + def 'should find second credential when two share account ids'() { + given: + def region = 'us-east-1' + def repoName = 'repositoryname' + def accountId = '123456789012' + def tag = 'arbitrary-tag' + def digest = 'sha256:deadbeef785192c146085da66a4261e25e79a6210103433464eb7f79deadbeef' + def creationDate = new Date() + def url = accountId + '.dkr.ecr.' + region + '.amazonaws.com/' + repoName + ':' + tag// + '@' + digest + def imageDetail = new ImageDetail( + repositoryName: repoName, + registryId: accountId, + imageDigest: digest, + imageTags: List.of(tag), + imagePushedAt: creationDate + ) + + Map region1 = Map.of( + 'name', 'eu-west-1', + 'availabilityZones', Arrays.asList('eu-west-1a', 'eu-west-1b', 'eu-west-1c') + ) + Map overrides1 = Map.of( + 'accountId', accountId, + 'regions', Arrays.asList(region1) + ) + + Map region2 = Map.of( + 'name', region, + 'availabilityZones', Arrays.asList('us-east-1a', 'us-east-1b', 'us-east-1c') + ) + Map overrides2 = Map.of( + 'accountId', accountId, + 'regions', Arrays.asList(region2) + ) + + credentialsRepository.getAll() >> [ + new NetflixECSCredentials(TestCredential.named('incorrect-region', overrides1)), + new NetflixECSCredentials(TestCredential.named('correct-region', overrides2))] + + def amazonECR = Mock(AmazonECR) + + amazonClientProvider.getAmazonEcr(_, _, _) >> amazonECR + amazonECR.listImages(_) >> new ListImagesResult().withImageIds(Collections.emptyList()) + amazonECR.describeImages(_) >> new DescribeImagesResult().withImageDetails(imageDetail) + + def expectedListOfImages = [new EcsDockerImage( + region: region, + imageName: accountId + '.dkr.ecr.' + region + '.amazonaws.com/' + repoName + '@' + digest, + amis: ['us-east-1': Collections.singletonList(digest)], + attributes: [creationDate: creationDate] + )] + + when: + def retrievedListOfImages = provider.findImage(url) + + then: + retrievedListOfImages == expectedListOfImages + } + def 'should throw exception due to malformed account'() { given: def region = 'us-west-1' @@ -189,13 +251,66 @@ class EcrImageProviderSpec extends Specification { def digest = 'sha256:deadbeef785192c146085da66a4261e25e79a6210103433464eb7f79deadbeef' def url = accountId + '.dkr.ecr.' + region + '.amazonaws.com/' + repoName + '@' + digest - accountCredentialsProvider.getAll() >> [TestCredential.named('')] + credentialsRepository.getAll() >> [new NetflixECSCredentials(TestCredential.named('')) ] when: provider.findImage(url) then: - final IllegalArgumentException error = thrown() - error.message == "The repository URI provided does not belong to a region that the credentials have access to or the region is not valid." + final com.netflix.spinnaker.kork.web.exceptions.NotFoundException error = thrown() + error.message == String.format("AWS account %s with region %s was not found. Please specify a valid account name and region", accountId, region) + } + + def 'should find the image in a repository with a large number of images'() { + given: + def tag = 'latest' + def region = 'us-west-1' + def repoName = 'too-many' + def accountId = '123456789012' + def digest = 'sha256:deadbeef785192c146085da66a4261e25e79a6210103433464eb7f79deadbeef' + def url = accountId + '.dkr.ecr.' + region + '.amazonaws.com/' + repoName + ':' + tag + def imageId = new ImageIdentifier().withImageTag(tag).withImageDigest(digest) + def creationDate = new Date() + + def amazonECR = Mock(AmazonECR) + + amazonClientProvider.getAmazonEcr(_, _, _) >> amazonECR + credentialsRepository.getAll() >> [new NetflixECSCredentials(TestCredential.named('')) ] + + amazonECR.listImages(_) >>> [ + new ListImagesResult() + .withImageIds(new ImageIdentifier().withImageTag("notlatest1").withImageDigest("sha256:aaa")) + .withNextToken("next1"), + new ListImagesResult() + .withImageIds(new ImageIdentifier().withImageTag("notlatest2").withImageDigest("sha256:bbb")) + .withImageIds(imageId) + .withNextToken(null), + ] + amazonECR.describeImages( + new DescribeImagesRequest() + .withRegistryId(accountId) + .withRepositoryName(repoName) + .withImageIds(imageId) + ) >> new DescribeImagesResult() + .withImageDetails(new ImageDetail( + imageTags: [tag], + repositoryName: repoName, + registryId: accountId, + imageDigest: digest, + imagePushedAt: creationDate, + )) + + def expectedImages = [new EcsDockerImage( + region: region, + imageName: accountId + '.dkr.ecr.' + region + '.amazonaws.com/' + repoName + '@' + digest, + amis: ['us-west-1': Collections.singletonList(digest)], + attributes: [creationDate: creationDate], + )] + + when: + def retrievedListOfImages = provider.findImage(url) + + then: + retrievedListOfImages == expectedImages } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsAccountMapperSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsAccountMapperSpec.groovy index 782b84ae3c8..761652139e1 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsAccountMapperSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsAccountMapperSpec.groovy @@ -16,28 +16,36 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.view -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials +import com.netflix.spinnaker.clouddriver.ecs.security.ECSCredentialsConfig import com.netflix.spinnaker.clouddriver.ecs.security.NetflixAssumeRoleEcsCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification class EcsAccountMapperSpec extends Specification { - - def accountCredentialsProvider = Mock(AccountCredentialsProvider) - def awsAccount = Mock(NetflixAssumeRoleAmazonCredentials) - def ecsAccount = Mock(NetflixAssumeRoleEcsCredentials) + def awsAccountName = "awsAccountNameHere" + def ecsAccountName = "ecsAccountNameHere" + def awsAccount = Mock(NetflixAmazonCredentials) { + getName() >> awsAccountName + } + def ecsAccount = Mock(NetflixAssumeRoleEcsCredentials) { + getName() >> ecsAccountName + getAwsAccount() >> awsAccountName + } + def credentialsRepository = Mock(CredentialsRepository) { + getOne(ecsAccountName) >> ecsAccount + } + def compositeCredentialsRepository = Mock(CompositeCredentialsRepository) { + getCredentials(awsAccountName, AmazonCloudProvider.ID) >> awsAccount + } def 'should map an AWS account to its ECS account'() { given: - awsAccount.name >> 'awsAccountNameHere' - ecsAccount.name >> 'ecsAccountNameHere' - ecsAccount.awsAccount >> awsAccount.name - - - def accounts = [ ecsAccount, awsAccount ] - accountCredentialsProvider.getAll() >> accounts + def ecsAccountMapper = new EcsAccountMapper(credentialsRepository, compositeCredentialsRepository) - def ecsAccountMapper = new EcsAccountMapper(accountCredentialsProvider) + ecsAccountMapper.addMapEntry(ecsAccount) when: def retrievedEcsAccount = ecsAccountMapper.fromAwsAccountNameToEcs(awsAccount.name) @@ -47,4 +55,42 @@ class EcsAccountMapperSpec extends Specification { retrievedEcsAccount.name == ecsAccount.name retrievedAwsAccount.name == awsAccount.name } + + def 'should map an AWS account name to its ECS account name'() { + given: + def ecsAccountMapper = new EcsAccountMapper(credentialsRepository, compositeCredentialsRepository) + ecsAccountMapper.addMapEntry(ecsAccount) + + when: + def retrievedEcsAccount = ecsAccountMapper.fromAwsAccountNameToEcsAccountName(awsAccount.name) + def retrievedAwsAccount = ecsAccountMapper.fromEcsAccountNameToAwsAccountName(ecsAccount.name) + + then: + retrievedEcsAccount == ecsAccount.name + retrievedAwsAccount == awsAccount.name + } + + def 'should remove AWS and ECS accounts'() { + given: + def ecsAccountMapper = new EcsAccountMapper(credentialsRepository, compositeCredentialsRepository) + ecsAccountMapper.addMapEntry(ecsAccount) + + when: + ecsAccountMapper.removeMapEntry(ecsAccountName) + + then: + !ecsAccountMapper.ecsCredentialsMap.containsKey(ecsAccountName) + !ecsAccountMapper.awsCredentialsMap.containsKey(awsAccount) + } + + def 'should return null if provided name is invalid'() { + given: + def ecsAccountMapper = new EcsAccountMapper(credentialsRepository, compositeCredentialsRepository) + + when: + def result = ecsAccountMapper.fromAwsAccountNameToEcs("invalid") + + then: + result == null + } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsClusterProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsClusterProviderSpec.groovy index 1b368ea12db..173e7821572 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsClusterProviderSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsClusterProviderSpec.groovy @@ -17,11 +17,17 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.view import com.netflix.spinnaker.cats.cache.Cache +import com.amazonaws.services.ecs.model.Cluster +import com.amazonaws.services.ecs.AmazonECS; +import com.amazonaws.services.ecs.model.DescribeClustersResult import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider import com.netflix.spinnaker.clouddriver.ecs.cache.Keys import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsCluster import com.netflix.spinnaker.clouddriver.ecs.provider.agent.EcsClusterCachingAgent +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification import spock.lang.Subject @@ -30,9 +36,141 @@ class EcsClusterProviderSpec extends Specification { private static String REGION = 'us-west-1' private Cache cacheView = Mock(Cache) + private CredentialsRepository credentialsRepository; + private AmazonClientProvider mockAwsProvider @Subject private EcsClusterProvider ecsClusterProvider = new EcsClusterProvider(cacheView) + def 'should get multiple cluster descriptions'() { + given: + int numberOfClusters = 3 + Set clusterNames = new HashSet<>() + Collection cacheData = new HashSet<>() + Collection clustersResponse = new ArrayList<>() + Collection ecsClustersIdentifiers = new ArrayList<>() + clusterNames.add("example-app-test-Cluster-NSnYsTXmCfV2") + clusterNames.add("TestCluster") + clusterNames.add("spinnaker-deployment-cluster") + + for (int x = 0; x < numberOfClusters; x++) { + String clusterKey = Keys.getClusterKey(ACCOUNT, REGION, clusterNames[x]) + Map attributes = new HashMap<>() + ecsClustersIdentifiers.add(Keys.getClusterKey(ACCOUNT, REGION, clusterNames[x])) + attributes.put("account", ACCOUNT) + attributes.put("region", REGION) + attributes.put("clusterArn", "arn:aws:ecs:::cluster/" + clusterNames[x]) + attributes.put("clusterName", clusterNames[x]) + + cacheData.add(new DefaultCacheData(clusterKey, attributes, Collections.emptyMap())) + } + cacheView.filterIdentifiers(_, _) >> ecsClustersIdentifiers + cacheView.getAll(_, ecsClustersIdentifiers) >> cacheData + + for (int x = 0; x < numberOfClusters; x++) { + Cluster cluster = new Cluster() + .withCapacityProviders("FARGATE", "FARGATE_SPOT").withStatus("ACTIVE") + .withDefaultCapacityProviderStrategy().withPendingTasksCount(0) + .withActiveServicesCount(0).withClusterArn("arn:aws:ecs:::cluster/" + clusterNames[x]).withClusterName(clusterNames[x]) + + clustersResponse.add(cluster) + } + def credentials = Mock(NetflixECSCredentials) + def amazonEcs = Mock(AmazonECS) + mockAwsProvider = Mock(AmazonClientProvider) + credentialsRepository = Mock(CredentialsRepository) + + credentialsRepository.getOne(_) >> credentials + and: + ecsClusterProvider.credentialsRepository = credentialsRepository + + mockAwsProvider.getAmazonEcs(_, _, _) >> amazonEcs + and: + ecsClusterProvider.amazonClientProvider = mockAwsProvider; + + amazonEcs.describeClusters(_) >> new DescribeClustersResult().withClusters(clustersResponse) + + when: + def ecsClusters = ecsClusterProvider.getEcsClusterDescriptions(ACCOUNT, REGION) + + then: + ecsClusters.size() == numberOfClusters + ecsClusters*.getClusterName().containsAll(clusterNames) + ecsClusters*.getCapacityProviders()*.get(0).contains("FARGATE") + ecsClusters*.getCapacityProviders()*.get(1).contains("FARGATE_SPOT") + } + + def 'should get multiple cluster descriptions filtered based on the account and region'() { + given: + int numberOfClusters = 3 + Set clusterNames = new HashSet<>() + Collection cacheData = new HashSet<>() + Collection clustersResponse = new ArrayList<>() + Collection ecsClustersIdentifiers = new ArrayList<>() + clusterNames.add("example-app-test-Cluster-NSnYsTXmCfV2") + clusterNames.add("TestCluster") + clusterNames.add("spinnaker-deployment-cluster") + + for (int x = 0; x < 2; x++) { + String clusterKey = Keys.getClusterKey(ACCOUNT, REGION, clusterNames[x]) + ecsClustersIdentifiers.add(Keys.getClusterKey(ACCOUNT, REGION, clusterNames[x])) + Map attributes = new HashMap<>() + attributes.put("account", ACCOUNT) + attributes.put("region", REGION) + attributes.put("clusterArn", "arn:aws:ecs:::cluster/" + clusterNames[x]) + attributes.put("clusterName", clusterNames[x]) + + cacheData.add(new DefaultCacheData(clusterKey, attributes, Collections.emptyMap())) + } + //Purposely adding cluster with the different region to the cache data. + String clusterKey = Keys.getClusterKey(ACCOUNT, "us-east-1", clusterNames[2]) + Map attributes = new HashMap<>() + attributes.put("account", ACCOUNT) + attributes.put("region", "us-east-1") + attributes.put("clusterArn", "arn:aws:ecs:::cluster/" + clusterNames[2]) + attributes.put("clusterName", clusterNames[2]) + + cacheData.add(new DefaultCacheData(clusterKey, attributes, Collections.emptyMap())) + + cacheView.filterIdentifiers(_, _) >> ecsClustersIdentifiers + cacheView.getAll(_, ecsClustersIdentifiers) >> cacheData + + //Adding only two clusters in the response which belongs to the expected region. + for (int x = 0; x < 2; x++) { + Cluster cluster = new Cluster() + .withCapacityProviders("FARGATE", "FARGATE_SPOT").withStatus("ACTIVE") + .withDefaultCapacityProviderStrategy().withPendingTasksCount(0) + .withActiveServicesCount(0).withClusterArn("arn:aws:ecs:::cluster/" + clusterNames[x]).withClusterName(clusterNames[x]) + + clustersResponse.add(cluster) + } + def credentials = Mock(NetflixECSCredentials) + def amazonEcs = Mock(AmazonECS) + mockAwsProvider = Mock(AmazonClientProvider) + credentialsRepository = Mock(CredentialsRepository) + + credentialsRepository.getOne(_) >> credentials + and: + ecsClusterProvider.credentialsRepository = credentialsRepository + + mockAwsProvider.getAmazonEcs(_, _, _) >> amazonEcs + and: + ecsClusterProvider.amazonClientProvider = mockAwsProvider; + + amazonEcs.describeClusters(_) >> new DescribeClustersResult().withClusters(clustersResponse) + + when: + def ecsClusters = ecsClusterProvider.getEcsClusterDescriptions(ACCOUNT, REGION) + + then: + // numberOfClusters - 1 justifies that we added 3 clusters to the cache, two with the us-west-2 which is the expected region + // and one with the us-east-1 which will be filtered out from the filtering logic in ecsClusterProvider.getEcsClusterDescriptions + ecsClusters.size() == numberOfClusters - 1 + ecsClusters*.getClusterName().contains(clusterNames[0]) + ecsClusters*.getClusterName().contains(clusterNames[1]) + ecsClusters*.getCapacityProviders()*.get(0).contains("FARGATE") + ecsClusters*.getCapacityProviders()*.get(1).contains("FARGATE_SPOT") + } + def 'should get no clusters'() { given: cacheView.getAll(_) >> Collections.emptySet() diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsLoadBalancerProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsLoadBalancerProviderSpec.groovy index c0c8d41d047..d73f782f413 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsLoadBalancerProviderSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsLoadBalancerProviderSpec.groovy @@ -16,25 +16,46 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.view +import com.amazonaws.services.ecs.model.LoadBalancer import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsLoadbalancerCacheClient +import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsTargetGroupCacheClient +import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsLoadBalancerCache +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service +import com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer.EcsLoadBalancer +import com.netflix.spinnaker.clouddriver.ecs.model.loadbalancer.EcsTargetGroup import com.netflix.spinnaker.clouddriver.ecs.security.ECSCredentialsConfig +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials import spock.lang.Specification import spock.lang.Subject class EcsLoadBalancerProviderSpec extends Specification { - def client = Mock(EcsLoadbalancerCacheClient) - def ecsCredentialsConfig = Mock(ECSCredentialsConfig) + def ECS_ACCOUNT = 'ecsAccount' + def AWS_ACCOUNT = 'awsAccount' + + def mockLBCache = Mock(EcsLoadbalancerCacheClient) + def mockServiceCache = Mock(ServiceCacheClient) + def mockTargetGroupCache = Mock(EcsTargetGroupCacheClient) + def accountMapper = Mock(EcsAccountMapper) + @Subject - def provider = new EcsLoadBalancerProvider(client, ecsCredentialsConfig) + def provider = new EcsLoadBalancerProvider( + mockLBCache, + accountMapper, + mockServiceCache, + mockTargetGroupCache) + + def setup() { + accountMapper.fromEcsAccountNameToAwsAccountName(ECS_ACCOUNT) >> AWS_ACCOUNT + } def 'should retrieve an empty list'() { when: def retrievedList = provider.list() then: - client.findAll() >> Collections.emptyList() + mockLBCache.findAll() >> Collections.emptyList() retrievedList.size() == 0 } @@ -42,7 +63,6 @@ class EcsLoadBalancerProviderSpec extends Specification { given: def expectedNumberOfLoadbalancers = 2 def givenList = [] - def accounts = [] (1..expectedNumberOfLoadbalancers).forEach() { givenList << new EcsLoadBalancerCache( account: 'test-account-' + it, @@ -64,20 +84,182 @@ class EcsLoadBalancerProviderSpec extends Specification { targetGroups: ['target-group-' + it], serverGroups: [] ) - - accounts << new ECSCredentialsConfig.Account( - name: 'test-account-' + it, - awsAccount: 'test-account-' + it - ) } - ecsCredentialsConfig.getAccounts() >> accounts when: def retrievedList = provider.list() then: - client.findAll() >> givenList + mockLBCache.findAll() >> givenList retrievedList.size() == expectedNumberOfLoadbalancers retrievedList*.getName().containsAll(givenList*.loadBalancerName) } + + def 'should retrieve application load balancers'() { + given: + def applicationName = 'myEcsApp' + def tgArn1 = 'arn:aws:elasticloadbalancing:us-west-1:1234567890:targetgroup/test-tg-1/2136bac' + def tgArn2 = 'arn:aws:elasticloadbalancing:us-west-1:1234567890:targetgroup/test-tg-2/2136bac' + + // define 2 ports to expose on our task + LoadBalancer ecsLb1 = new LoadBalancer() + ecsLb1.setContainerName("container-name") + ecsLb1.setContainerPort(8080) + ecsLb1.setTargetGroupArn(tgArn1) + + LoadBalancer ecsLb2 = new LoadBalancer() + ecsLb2.setContainerName("container-name") + ecsLb2.setContainerPort(443) + ecsLb2.setTargetGroupArn(tgArn2) + + // add these to our service + Service ecsService = new Service() + ecsService.setServiceName('ecs-test-detail-000v') + ecsService.setServiceArn('arn:aws:ecs:service/ecs-test-detail-000v') + ecsService.setLoadBalancers([ecsLb1, ecsLb2]) + ecsService.setAccount('test-account') + ecsService.setApplicationName(applicationName) + + // mock the cache entries for the TGs and their associated LBs + EcsTargetGroup ecsTg1 = new EcsTargetGroup() + ecsTg1.setTargetGroupArn(tgArn1) + ecsTg1.setTargetGroupName('test-tg-1') + + EcsTargetGroup ecsTg2 = new EcsTargetGroup() + ecsTg2.setTargetGroupArn(tgArn2) + ecsTg2.setTargetGroupName('test-tg-2') + + EcsLoadBalancerCache ecsLoadBalancerCache1 = new EcsLoadBalancerCache( + account: 'test-account', + region: 'us-west-2', + loadBalancerArn: 'arn:1', + loadBalancerType: 'application', + cloudProvider: EcsCloudProvider.ID, + listeners: [], + scheme: 'scheme', + availabilityZones: [], + ipAddressType: 'ipv4', + loadBalancerName: 'load-balancer-name1', + canonicalHostedZoneId: 'zone-id', + vpcId: 'vpc-id', + dnsname: 'dns-name', + createdTime: System.currentTimeMillis(), + subnets: [], + securityGroups: [], + targetGroups: [ecsTg1.getTargetGroupName()], + serverGroups: [] + ) + EcsLoadBalancerCache ecsLoadBalancerCache2 = new EcsLoadBalancerCache( + account: 'test-account', + region: 'us-west-2', + loadBalancerArn: 'arn:2', + loadBalancerType: 'application', + cloudProvider: EcsCloudProvider.ID, + listeners: [], + scheme: 'scheme', + availabilityZones: [], + ipAddressType: 'ipv4', + loadBalancerName: 'load-balancer-name2', + canonicalHostedZoneId: 'zone-id', + vpcId: 'vpc-id', + dnsname: 'dns-name', + createdTime: System.currentTimeMillis(), + subnets: [], + securityGroups: [], + targetGroups: [ecsTg2.getTargetGroupName()], + serverGroups: [] + ) + + when: + def loadBalancerList = provider.getApplicationLoadBalancers(applicationName) + + then: + mockServiceCache.getAll(_) >> Collections.singletonList(ecsService) + mockTargetGroupCache.getAllKeys() >> ['fake-tg-key-1', 'fake-tg-key-2'] + mockTargetGroupCache.find(_) >> [ecsTg1, ecsTg2] + mockLBCache.findWithTargetGroups(_) >> [ecsLoadBalancerCache1, ecsLoadBalancerCache2] + + loadBalancerList.size() == 2 + for (EcsLoadBalancer lb : loadBalancerList) { + lb.targetGroupServices.size() == 1 + lb.targetGroups.size() == 1 + + def tgArn = lb.targetGroups[0].getTargetGroupArn() + lb.targetGroupServices[tgArn][0] == ecsService.getServiceName() + } + } + + def 'should retrieve load balancers mapped to multiple services'() { + given: + def applicationName = 'myEcsApp' + def tgArn = 'arn:aws:elasticloadbalancing:us-west-1:1234567890:targetgroup/test-tg-1/2136bac' + + // define 2 ecs services load balanced behind the same target group + LoadBalancer service1lb = new LoadBalancer() + service1lb.setContainerName("container-name") + service1lb.setContainerPort(8080) + service1lb.setTargetGroupArn(tgArn) + + Service ecsService1 = new Service() + ecsService1.setServiceName('ecs-test-one-000v') + ecsService1.setServiceArn('arn:aws:ecs:service/ecs-test-one-000v') + ecsService1.setLoadBalancers([service1lb]) + ecsService1.setAccount('test-account') + ecsService1.setApplicationName(applicationName) + + LoadBalancer service2lb = new LoadBalancer() + service2lb.setContainerName("container-name") + service2lb.setContainerPort(8080) + service2lb.setTargetGroupArn(tgArn) + + Service ecsService2 = new Service() + ecsService2.setServiceName('ecs-test-two-000v') + ecsService2.setServiceArn('arn:aws:ecs:service/ecs-test-two-000v') + ecsService2.setLoadBalancers([service2lb]) + ecsService2.setAccount('test-account') + ecsService2.setApplicationName(applicationName) + + // mock the cache entries for the TG and associated LB + EcsTargetGroup ecsTg = new EcsTargetGroup() + ecsTg.setTargetGroupArn(tgArn) + ecsTg.setTargetGroupName('test-tg-1') + + EcsLoadBalancerCache ecsLoadBalancerCache = new EcsLoadBalancerCache( + account: 'test-account', + region: 'us-west-2', + loadBalancerArn: 'arn:1', + loadBalancerType: 'application', + cloudProvider: EcsCloudProvider.ID, + listeners: [], + scheme: 'scheme', + availabilityZones: [], + ipAddressType: 'ipv4', + loadBalancerName: 'load-balancer-name1', + canonicalHostedZoneId: 'zone-id', + vpcId: 'vpc-id', + dnsname: 'dns-name', + createdTime: System.currentTimeMillis(), + subnets: [], + securityGroups: [], + targetGroups: [ecsTg.getTargetGroupName()], + serverGroups: [] + ) + + when: + def loadBalancerList = provider.getApplicationLoadBalancers(applicationName) + + then: + mockServiceCache.getAll(_) >> [ecsService1, ecsService2] + mockTargetGroupCache.getAllKeys() >> ['fake-tg-key-1'] + mockTargetGroupCache.find(_) >> [ecsTg] + mockLBCache.findWithTargetGroups(_) >> Collections.singletonList(ecsLoadBalancerCache) + + loadBalancerList.size() == 1 + + def lb = loadBalancerList[0] + lb.targetGroupServices.size() == 1 + lb.targetGroups.size() == 1 + def services = lb.targetGroupServices[tgArn] + services.size() == 2 + } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecretProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecretProviderSpec.groovy new file mode 100644 index 00000000000..d3a152da290 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecretProviderSpec.groovy @@ -0,0 +1,108 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.view + +import com.amazonaws.services.secretsmanager.model.SecretListEntry +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys +import com.netflix.spinnaker.clouddriver.ecs.cache.model.Secret +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.SecretCachingAgent +import spock.lang.Specification +import spock.lang.Subject + +class EcsSecretProviderSpec extends Specification { + private static String ACCOUNT = 'test-account' + private static String REGION = 'us-west-1' + + private Cache cacheView = Mock(Cache) + @Subject + private EcsSecretProvider secretProvider = new EcsSecretProvider(cacheView) + + def 'should get no secrets'() { + given: + cacheView.getAll(_) >> Collections.emptySet() + + when: + def secrets = secretProvider.getAllSecrets() + + then: + secrets.size() == 0 + } + + def 'should get a secret'() { + given: + def secretName = "my-secret" + def secretArn = "arn:aws:secretsmanager:" + REGION + ":012345678910:secret:" + secretName + def key = Keys.getSecretKey(ACCOUNT, REGION, secretName) + + HashSet keys = [key] + + SecretListEntry secretEntry = new SecretListEntry( + name: secretName, + aRN: secretArn + ) + + def attributes = SecretCachingAgent.convertSecretToAttributes(ACCOUNT, REGION, secretEntry) + def cacheData = new HashSet() + cacheData.add(new DefaultCacheData(key, attributes, Collections.emptyMap())) + + cacheView.getAll(_) >> cacheData + + when: + Collection secrets = secretProvider.getAllSecrets() + + then: + secrets.size() == 1 + secrets[0].getName() == secretName + } + + def 'should get multiple secrets'() { + given: + int numberOfSecrets = 5 + Set secretNames = new HashSet<>() + Collection cacheData = new HashSet<>() + Set keys = new HashSet<>() + + numberOfSecrets.times { x -> + String secretName = "test-secret-" + x + String secretArn = "arn:aws:secretsmanager:" + REGION + ":012345678910:secret:" + secretName + String key = Keys.getSecretKey(ACCOUNT, REGION, secretName) + + keys.add(key) + secretNames.add(secretName) + + SecretListEntry secretEntry = new SecretListEntry( + name: secretName, + aRN: secretArn + ) + + Map attributes = SecretCachingAgent.convertSecretToAttributes(ACCOUNT, REGION, secretEntry) + cacheData.add(new DefaultCacheData(key, attributes, Collections.emptyMap())) + } + + cacheView.getAll(_) >> cacheData + + when: + Collection secrets = secretProvider.getAllSecrets() + + then: + secrets.size() == numberOfSecrets + secretNames.containsAll(secrets*.getName()) + secrets*.getName().containsAll(secretNames) + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecurityGroupProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecurityGroupProviderSpec.groovy new file mode 100644 index 00000000000..8a71722f1f9 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsSecurityGroupProviderSpec.groovy @@ -0,0 +1,155 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.view + +import com.netflix.spinnaker.clouddriver.aws.model.AmazonSecurityGroup +import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonSecurityGroupProvider +import com.netflix.spinnaker.clouddriver.ecs.model.EcsSecurityGroup +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials +import spock.lang.Specification +import spock.lang.Subject + +class EcsSecurityGroupProviderSpec extends Specification { + + public static final String ECS_ACCOUNT = 'ecsAccount' + public static final String AWS_ACCOUNT = 'awsAccount' + public static final String REGION = 'us-west-2' + public static final String SG_ID_1 = 'sg-aa123456' + public static final String SG_ID_2 = 'sg-bb123123' + public static final String VPC_ID = 'vpc-1' + public static final String SG_NAME_1 = 'hello' + public static final String SG_NAME_2 = 'world' + + def accountMapper = Mock(EcsAccountMapper) + def ecsCreds = Mock(NetflixECSCredentials) + def securityGroupProvider = Mock(AmazonSecurityGroupProvider) + def primitiveConverter = new AmazonPrimitiveConverter(accountMapper) + + @Subject + def provider = new EcsSecurityGroupProvider(primitiveConverter, securityGroupProvider, accountMapper) + + def sg1 = new AmazonSecurityGroup( + accountName: AWS_ACCOUNT, + region: REGION, + name: SG_NAME_1, + vpcId: VPC_ID, + id: SG_ID_1) + + def sg2 = new AmazonSecurityGroup( + accountName: AWS_ACCOUNT, + region: REGION, + name: SG_NAME_2, + vpcId: VPC_ID, + id: SG_ID_2) + + def ecsSg1 = new EcsSecurityGroup( + SG_ID_1, + SG_NAME_1, + VPC_ID, + null, + null, + ECS_ACCOUNT, + null, + REGION, + null, + null + ) + + def ecsSg2 = new EcsSecurityGroup( + SG_ID_2, + SG_NAME_2, + VPC_ID, + null, + null, + ECS_ACCOUNT, + null, + REGION, + null, + null + ) + + def setup() { + ecsCreds.getName() >> ECS_ACCOUNT + accountMapper.fromEcsAccountNameToAwsAccountName(ECS_ACCOUNT) >> AWS_ACCOUNT + accountMapper.fromAwsAccountNameToEcs(AWS_ACCOUNT) >> ecsCreds + } + + def 'should get all security groups from the AWS provider and convert them to ECS security groups'() { + given: + securityGroupProvider.getAll(true) >> [sg1, sg2] + + when: + def ecsSGs = provider.getAll(true) + + then: + ecsSGs.sort() == [ecsSg1, ecsSg2].sort() + } + + def 'should get all security groups for region from the AWS provider and convert them to ECS security groups'() { + given: + securityGroupProvider.getAllByRegion(true, REGION) >> [sg1, sg2] + + when: + def ecsSGs = provider.getAllByRegion(true, REGION) + + then: + ecsSGs.sort() == [ecsSg1, ecsSg2].sort() + } + + def 'should get all security groups from the AWS provider for the AWS account associated with the ECS account'() { + given: + securityGroupProvider.getAllByAccount(true, AWS_ACCOUNT) >> [sg1, sg2] + + when: + def ecsSGs = provider.getAllByAccount(true, ECS_ACCOUNT) + + then: + ecsSGs.sort() == [ecsSg1, ecsSg2].sort() + } + + def 'should get all security groups from the AWS provider for the AWS account associated with the ECS account and security group name'() { + given: + securityGroupProvider.getAllByAccountAndName(true, AWS_ACCOUNT, SG_NAME_1) >> [sg1] + + when: + def ecsSGs = provider.getAllByAccountAndName(true, ECS_ACCOUNT, SG_NAME_1) + + then: + ecsSGs.sort() == [ecsSg1].sort() + } + + def 'should get all security groups from the AWS provider for the AWS account associated with the ECS account and region'() { + given: + securityGroupProvider.getAllByAccountAndRegion(true, AWS_ACCOUNT, REGION) >> [sg1, sg2] + + when: + def ecsSGs = provider.getAllByAccountAndRegion(true, ECS_ACCOUNT, REGION) + + then: + ecsSGs.sort() == [ecsSg1, ecsSg2].sort() + } + + def 'should get security group from the AWS provider by AWS account name, region, security group name, and VPC ID'() { + given: + securityGroupProvider.get(AWS_ACCOUNT, REGION, SG_NAME_1, VPC_ID) >> sg1 + + when: + def ecsSG = provider.get(ECS_ACCOUNT, REGION, SG_NAME_1, VPC_ID) + + then: + ecsSG == ecsSg1 + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServerClusterProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServerClusterProviderSpec.groovy index 70599c220af..857a8660b70 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServerClusterProviderSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServerClusterProviderSpec.groovy @@ -17,14 +17,17 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.view import com.amazonaws.services.applicationautoscaling.model.ScalableTarget +import com.amazonaws.services.ec2.model.GroupIdentifier import com.amazonaws.services.ec2.model.Instance import com.amazonaws.services.ec2.model.Placement import com.amazonaws.services.ecs.model.* import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.frigga.Names import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials import com.netflix.spinnaker.clouddriver.ecs.cache.Keys import com.netflix.spinnaker.clouddriver.ecs.cache.client.* import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsLoadBalancerCache @@ -33,9 +36,14 @@ import com.netflix.spinnaker.clouddriver.ecs.model.EcsServerGroup import com.netflix.spinnaker.clouddriver.ecs.model.EcsTask import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ServiceCachingAgent import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskCachingAgent +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TestServiceCachingAgentFactory import com.netflix.spinnaker.clouddriver.ecs.services.ContainerInformationService +import com.netflix.spinnaker.clouddriver.ecs.services.SubnetSelector import com.netflix.spinnaker.clouddriver.model.ServerGroup +import com.netflix.spinnaker.credentials.CredentialsRepository import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.moniker.Moniker import spock.lang.Specification import spock.lang.Subject @@ -50,12 +58,14 @@ class EcsServerClusterProviderSpec extends Specification { def taskDefinitionCacheClient = Mock(TaskDefinitionCacheClient) def ecsLoadbalancerCacheClient = Mock(EcsLoadbalancerCacheClient) def ecsCloudWatchAlarmCacheClient = Mock(EcsCloudWatchAlarmCacheClient) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def credentialsRepository = Mock(CredentialsRepository) def containerInformationService = Mock(ContainerInformationService) + def subnetSelector = Mock(SubnetSelector) @Subject - def provider = new EcsServerClusterProvider(accountCredentialsProvider, + def provider = new EcsServerClusterProvider(credentialsRepository, containerInformationService, + subnetSelector, taskCacheClient, serviceCacheClient, scalableTargetCacheClient, @@ -63,25 +73,38 @@ class EcsServerClusterProviderSpec extends Specification { taskDefinitionCacheClient, ecsCloudWatchAlarmCacheClient) - def 'should produce an ecs cluster'() { - given: - def applicationName = 'myapp' + Service cachedService + TaskDefinition cachedTaskDefinition + Instance ec2Instance + EcsServerCluster expectedCluster + EcsServerCluster expectedCluster2 + + private static final FAMILY_NAME = 'myapp-kcats-liated' + private static final CREDS_NAME = 'test' + private static final CREDS_NAME_2 = 'test2' + + def setup() { def taskId = 'task-id' def ip = '127.0.0.0' def region = 'us-west-1' def availabilityZone = "${region}a" - def familyName = "${applicationName}-kcats-liated" - def serviceName = "${familyName}-v007" + def serviceName = "${FAMILY_NAME}-v007" def startedAt = new Date() - def creds = Mock(AmazonCredentials) + def creds = Mock(NetflixECSCredentials) creds.getCloudProvider() >> 'ecs' - creds.getName() >> 'test' + creds.getName() >> CREDS_NAME creds.getRegions() >> [new AmazonCredentials.AWSRegion('us-east-1', ['us-east-1b', 'us-east-1c', 'us-east-1d']), new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1b', 'us-west-1c', 'us-west-1d'])] + def creds2 = Mock(NetflixECSCredentials) + creds2.getCloudProvider() >> 'ecs' + creds2.getName() >> CREDS_NAME_2 + creds2.getRegions() >> [new AmazonCredentials.AWSRegion('us-east-1', ['us-east-1b', 'us-east-1c', 'us-east-1d']), + new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1b', 'us-west-1c', 'us-west-1d'])] + - def cachedService = new Service( + cachedService = new Service( serviceName: serviceName, deploymentConfiguration: new DeploymentConfiguration(minimumHealthyPercent: 0, maximumPercent: 100), createdAt: startedAt, @@ -107,13 +130,17 @@ class EcsServerClusterProviderSpec extends Specification { type : 'loadbalancer' ] - def ec2Instance = new Instance( + ec2Instance = new Instance( placement: new Placement( availabilityZone: availabilityZone - ) + ), + vpcId: 'vpc-1234', + securityGroups: [new GroupIdentifier ( + groupId: 'sg-1234' + )] ) - def taskDefinition = new TaskDefinition( + cachedTaskDefinition = new TaskDefinition( containerDefinitions: [ new ContainerDefinition( image: 'my-image', @@ -134,41 +161,229 @@ class EcsServerClusterProviderSpec extends Specification { def ecsServerGroupEast = makeEcsServerGroup(serviceName, 'us-east-1', startedAt.getTime(), taskId, healthStatus, ip) def ecsServerGroupWest = makeEcsServerGroup(serviceName, 'us-west-1', startedAt.getTime(), taskId, healthStatus, ip) - def expectedCluster = new EcsServerCluster() + expectedCluster = new EcsServerCluster() expectedCluster.setAccountName(creds.getName()) - expectedCluster.setName(familyName) - expectedCluster.setServerGroups(new HashSet([ecsServerGroupEast, ecsServerGroupWest])) + expectedCluster.setName(FAMILY_NAME) + expectedCluster.setServerGroups(new LinkedHashSet([ecsServerGroupEast, ecsServerGroupWest])) expectedCluster.setLoadBalancers(Collections.singleton(loadbalancer)) + expectedCluster2 = new EcsServerCluster() + expectedCluster2.setAccountName(creds2.getName()) + expectedCluster2.setName(FAMILY_NAME) + expectedCluster2.setServerGroups(new LinkedHashSet([ecsServerGroupEast, ecsServerGroupWest])) + expectedCluster2.setLoadBalancers(Collections.singleton(loadbalancer)) - def serviceAttributes = ServiceCachingAgent.convertServiceToAttributes(creds.getName(), creds.getRegions()[0].getName(), cachedService) + def serviceAttributes = TestServiceCachingAgentFactory.create(creds, creds.getRegions()[0].getName()).convertServiceToAttributes(cachedService) + def serviceAttributes2 = TestServiceCachingAgentFactory.create(creds2, creds.getRegions()[0].getName()).convertServiceToAttributes(cachedService) def taskAttributes = TaskCachingAgent.convertTaskToAttributes(task) def serviceCacheData = new DefaultCacheData('', serviceAttributes, [:]) + def serviceCacheData2 = new DefaultCacheData('', serviceAttributes2, [:]) def taskCacheData = new DefaultCacheData('', taskAttributes, [:]) - accountCredentialsProvider.getAll() >> [creds] + credentialsRepository.getAll() >> [creds, creds2] + credentialsRepository.getOne(creds.getName()) >> creds + credentialsRepository.getOne(creds.getName()) >> creds2 ecsLoadbalancerCacheClient.find(_, _) >> [loadbalancer] containerInformationService.getTaskPrivateAddress(_, _, _) >> "${ip}:1337" containerInformationService.getHealthStatus(_, _, _, _) >> [healthStatus] containerInformationService.getEc2Instance(_, _, _) >> ec2Instance - containerInformationService.getTaskZone(_, _, _) >> availabilityZone - taskDefinitionCacheClient.get(_) >> taskDefinition + containerInformationService.getTaskZone(_, _, _) >> 'us-west-1a' + taskDefinitionCacheClient.get(_) >> cachedTaskDefinition scalableTargetCacheClient.get(_) >> scalableTarget - ecsCloudWatchAlarmCacheClient.getMetricAlarms(_, _, _) >> [] + ecsCloudWatchAlarmCacheClient.getMetricAlarms(_, _,_ ,_) >> [] + subnetSelector.getSubnetVpcIds(_, _, _) >> ['vpc-1234'] cacheView.filterIdentifiers(_, _) >> ['key'] - cacheView.getAll(Keys.Namespace.SERVICES.ns, _) >> [serviceCacheData] + cacheView.getAll(Keys.Namespace.SERVICES.ns, _) >> [serviceCacheData, serviceCacheData2] cacheView.getAll(Keys.Namespace.TASKS.ns, _) >> [taskCacheData] + cacheView.get(Keys.Namespace.TASKS.ns, _) >> taskCacheData + } + + def 'should produce an ecs cluster'() { + when: + def retrievedCluster = provider.getCluster("myapp", CREDS_NAME, FAMILY_NAME) + + then: + retrievedCluster == expectedCluster + } + + def 'should produce an ecs cluster with VPC network configuration'() { + given: + def creds = Mock(NetflixECSCredentials) + creds.getCloudProvider() >> 'ecs' + creds.getName() >> CREDS_NAME + creds.getRegions() >> [new AmazonCredentials.AWSRegion('us-east-1', ['us-east-1b', 'us-east-1c', 'us-east-1d']), + new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1b', 'us-west-1c', 'us-west-1d'])] + + cachedService.networkConfiguration = new NetworkConfiguration( + awsvpcConfiguration: new AwsVpcConfiguration( + subnets: ['subnet-1234'], + securityGroups: ['sg-1234'] + ) + ) + + def serviceAttributes = TestServiceCachingAgentFactory.create(creds, creds.getRegions()[0].getName()).convertServiceToAttributes(cachedService) + def serviceCacheData = new DefaultCacheData('', serviceAttributes, [:]) + + ec2Instance.vpcId = 'vpc-wrong' + ec2Instance.securityGroups = [new GroupIdentifier (groupId: 'sg-wrong')] + + when: + def retrievedCluster = provider.getCluster("myapp", CREDS_NAME, FAMILY_NAME) + + then: + cacheView.getAll(Keys.Namespace.SERVICES.ns, _) >> [serviceCacheData] + retrievedCluster == expectedCluster + } + + def 'should produce an ecs cluster using Fargate'() { + given: + def creds = Mock(NetflixECSCredentials) + creds.getCloudProvider() >> 'ecs' + creds.getName() >> CREDS_NAME + creds.getRegions() >> [new AmazonCredentials.AWSRegion('us-east-1', ['us-east-1b', 'us-east-1c', 'us-east-1d']), + new AmazonCredentials.AWSRegion('us-west-1', ['us-west-1b', 'us-west-1c', 'us-west-1d'])] + + cachedService.networkConfiguration = new NetworkConfiguration( + awsvpcConfiguration: new AwsVpcConfiguration( + subnets: ['subnet-1234'], + securityGroups: ['sg-1234'] + ) + ) + def serviceAttributes = TestServiceCachingAgentFactory.create(creds, creds.getRegions()[0].getName()).convertServiceToAttributes(cachedService) + def serviceCacheData = new DefaultCacheData('', serviceAttributes, [:]) + + when: + def retrievedCluster = provider.getCluster("myapp", CREDS_NAME, FAMILY_NAME) + + then: + cacheView.getAll(Keys.Namespace.SERVICES.ns, _) >> [serviceCacheData] + containerInformationService.getEc2Instance(_, _, _) >> null + retrievedCluster == expectedCluster + } + + def 'should produce an ecs cluster with hard memory limit'() { + given: + cachedTaskDefinition = new TaskDefinition( + containerDefinitions: [ + new ContainerDefinition( + image: 'my-image', + environment: [], + portMappings: [new PortMapping(containerPort: 1337)], + memory: 256, + cpu: 123 + ) + ] + ) + for (serverGroup in expectedCluster.serverGroups) { + EcsServerGroup ecsServerGroup = serverGroup + ecsServerGroup.taskDefinition.memoryLimit = 256 + ecsServerGroup.taskDefinition.memoryReservation = 0 + } when: - def retrievedCluster = provider.getCluster("myapp", creds.getName(), familyName) + def retrievedCluster = provider.getCluster("myapp", CREDS_NAME, FAMILY_NAME) then: + taskDefinitionCacheClient.get(_) >> cachedTaskDefinition retrievedCluster == expectedCluster } + def 'should produce an ecs cluster with CPU and memory set at task level'() { + given: + cachedTaskDefinition = new TaskDefinition( + memory: '256', + cpu: '123', + containerDefinitions: [ + new ContainerDefinition( + image: 'my-image', + environment: [], + portMappings: [new PortMapping(containerPort: 1337)] + ) + ] + ) + for (serverGroup in expectedCluster.serverGroups) { + EcsServerGroup ecsServerGroup = serverGroup + ecsServerGroup.taskDefinition.memoryLimit = 256 + ecsServerGroup.taskDefinition.memoryReservation = 0 + } + + when: + def retrievedCluster = provider.getCluster("myapp", CREDS_NAME, FAMILY_NAME) + + then: + taskDefinitionCacheClient.get(_) >> cachedTaskDefinition + retrievedCluster == expectedCluster + } + + def 'should produce an ecs cluster with zero port mappings'() { + given: + cachedTaskDefinition = new TaskDefinition( + containerDefinitions: [ + new ContainerDefinition( + image: 'my-image', + environment: [], + memoryReservation: 256, + cpu: 123 + ) + ] + ) + for (serverGroup in expectedCluster.serverGroups) { + EcsServerGroup ecsServerGroup = serverGroup + ecsServerGroup.taskDefinition.containerPort = 0 + } + + when: + def retrievedCluster = provider.getCluster("myapp", CREDS_NAME, FAMILY_NAME) + + then: + taskDefinitionCacheClient.get(_) >> cachedTaskDefinition + retrievedCluster == expectedCluster + } + + def 'should produce an ecs cluster with unknown VPC for missing EC2 instance'() { + given: + for (serverGroup in expectedCluster.serverGroups) { + EcsServerGroup ecsServerGroup = serverGroup + ecsServerGroup.vpcId = 'None' + ecsServerGroup.securityGroups = [] + } + + when: + def retrievedCluster = provider.getCluster("myapp", CREDS_NAME, FAMILY_NAME) + + then: + containerInformationService.getEc2Instance(_, _, _) >> null + retrievedCluster == expectedCluster + } + + def 'should produce an ecs cluster with unknown VPC for EC2 instance missing its network config'() { + given: + for (serverGroup in expectedCluster.serverGroups) { + EcsServerGroup ecsServerGroup = serverGroup + ecsServerGroup.vpcId = 'None' + ecsServerGroup.securityGroups = [] + } + + when: + def retrievedCluster = provider.getCluster("myapp", CREDS_NAME, FAMILY_NAME) + + then: + containerInformationService.getEc2Instance(_, _, _) >> new Instance() + retrievedCluster == expectedCluster + } + + def 'should produce ecs clusters'() { + when: + def retrievedClusters = provider.getClusterDetails("myapp").values().flatten() + + then: + retrievedClusters.sort() == [expectedCluster, expectedCluster2].sort() + } + def makeEcsServerGroup(String serviceName, String region, long startTime, String taskId, Map healthStatus, String ip) { + Names name = Names.parseName(serviceName) new EcsServerGroup( name: serviceName, type: 'ecs', @@ -177,9 +392,10 @@ class EcsServerClusterProviderSpec extends Specification { disabled: false, createdTime: startTime, instances: [ - new EcsTask(taskId, startTime, 'RUNNING', 'RUNNING', "us-west-1a", [healthStatus], "${ip}:1337", null) + new EcsTask(taskId, startTime, 'RUNNING', 'RUNNING', 'HEALTHY', "us-west-1a", [healthStatus], "${ip}:1337", null, true) ], - securityGroups: [], + vpcId: 'vpc-1234', + securityGroups: ['sg-1234'], instanceCounts: new ServerGroup.InstanceCounts( total: 1, up: 1, @@ -201,7 +417,18 @@ class EcsServerClusterProviderSpec extends Specification { environmentVariables: [], iamRole: 'None' ), + image: new EcsServerGroup.Image( + imageId: 'my-image', + name: 'my-image' + ), metricAlarms: [], + moniker: Moniker.builder() + .app(name.app) + .stack(name.stack) + .detail(name.detail) + .sequence(name.sequence) + .cluster(name.cluster) + .build() ) } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServiceDiscoveryProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServiceDiscoveryProviderSpec.groovy new file mode 100644 index 00000000000..7fe6634f7a7 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/provider/view/EcsServiceDiscoveryProviderSpec.groovy @@ -0,0 +1,114 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.view + +import com.amazonaws.services.servicediscovery.model.ServiceSummary +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys +import com.netflix.spinnaker.clouddriver.ecs.cache.model.ServiceDiscoveryRegistry +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ServiceDiscoveryCachingAgent +import spock.lang.Specification +import spock.lang.Subject + +class EcsServiceDiscoveryProviderSpec extends Specification { + private static String ACCOUNT = 'test-account' + private static String REGION = 'us-west-1' + + private Cache cacheView = Mock(Cache) + @Subject + private EcsServiceDiscoveryProvider serviceDiscoveryProvider = new EcsServiceDiscoveryProvider(cacheView) + + def 'should get no registries'() { + given: + cacheView.getAll(_) >> Collections.emptySet() + + when: + def services = serviceDiscoveryProvider.getAllServiceDiscoveryRegistries() + + then: + services.size() == 0 + } + + def 'should get a registry'() { + given: + def serviceName = 'my-service' + def serviceId = 'srv-123' + def serviceArn = "arn:aws:servicediscovery:" + REGION + ":012345678910:service/" + serviceId + def key = Keys.getServiceDiscoveryRegistryKey(ACCOUNT, REGION, serviceId) + + HashSet keys = [key] + + ServiceSummary serviceEntry = new ServiceSummary( + name: serviceName, + arn: serviceArn, + id: serviceId + ) + + def attributes = ServiceDiscoveryCachingAgent.convertServiceToAttributes(ACCOUNT, REGION, serviceEntry) + def cacheData = new HashSet() + cacheData.add(new DefaultCacheData(key, attributes, Collections.emptyMap())) + + cacheView.getAll(_) >> cacheData + + when: + Collection services = serviceDiscoveryProvider.getAllServiceDiscoveryRegistries() + + then: + services.size() == 1 + services[0].getName() == serviceName + services[0].getId() == serviceId + services[0].getArn() == serviceArn + } + + def 'should get multiple services'() { + given: + int numberOfServices = 5 + Set serviceIds = new HashSet<>() + Collection cacheData = new HashSet<>() + Set keys = new HashSet<>() + + numberOfServices.times { x -> + String serviceName = "test-service-" + x + String serviceId = "srv-" + x + String serviceArn = "arn:aws:servicediscovery:" + REGION + ":012345678910:service/" + serviceId + String key = Keys.getServiceDiscoveryRegistryKey(ACCOUNT, REGION, serviceId) + + keys.add(key) + serviceIds.add(serviceId) + + ServiceSummary serviceEntry = new ServiceSummary( + name: serviceName, + arn: serviceArn, + id: serviceId + ) + + Map attributes = ServiceDiscoveryCachingAgent.convertServiceToAttributes(ACCOUNT, REGION, serviceEntry) + cacheData.add(new DefaultCacheData(key, attributes, Collections.emptyMap())) + } + + cacheView.getAll(_) >> cacheData + + when: + Collection services = serviceDiscoveryProvider.getAllServiceDiscoveryRegistries() + + then: + services.size() == numberOfServices + serviceIds.containsAll(services*.getId()) + services*.getId().containsAll(serviceIds) + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsLifeCyclerHandlerSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsLifeCyclerHandlerSpec.groovy new file mode 100644 index 00000000000..61ab13a8201 --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsLifeCyclerHandlerSpec.groovy @@ -0,0 +1,92 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.security + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.clouddriver.ecs.TestCredential +import com.netflix.spinnaker.clouddriver.ecs.provider.EcsProvider +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.* +import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcsAccountMapper +import spock.lang.Specification + +import java.util.stream.Collectors + +class EcsCredentialsLifeCyclerHandlerSpec extends Specification { + + EcsProvider ecsProvider + def objectMapper = new ObjectMapper() + def registry = new DefaultRegistry() + + def setup() { + ecsProvider = new EcsProvider() + } + def credOne = new NetflixAssumeRoleEcsCredentials(TestCredential.assumeRoleNamed('one'), 'one-aws') + def ecsAccountMapper = Mock(EcsAccountMapper) + + + def 'it should add agents'() { + + given: + def handler = new EcsCredentialsLifeCycleHandler(ecsProvider, null, null, registry, null, objectMapper, null, ecsAccountMapper) + Set expectedClasses = [ IamRoleCachingAgent.class, EcsClusterCachingAgent.class, ServiceCachingAgent.class, + TaskCachingAgent.class, ContainerInstanceCachingAgent.class, TaskDefinitionCachingAgent.class, + TaskHealthCachingAgent.class, EcsCloudMetricAlarmCachingAgent.class, ScalableTargetsCachingAgent.class, + SecretCachingAgent.class, ServiceDiscoveryCachingAgent.class, TargetHealthCachingAgent.class, + ApplicationCachingAgent.class ] + Set actualClasses =[] + + when: + handler.credentialsAdded(credOne) + + then: + 1 * ecsAccountMapper.addMapEntry({it.getName() == credOne.getName()}) + ecsProvider.getAgents().size() == 24 // 2 * 11 + 1 + 1 ( One IamRoleCachingAgent and ApplicationCachingAgent per account ) + ecsProvider.getHealthAgents().size() == 4 + ecsProvider.getAgents().each({actualClasses.add(it.getClass())}) + (actualClasses - expectedClasses).isEmpty() + } + + def 'it should remove agents'() { + + given: + ecsProvider.addAgents(Collections.singletonList(new TargetHealthCachingAgent(credOne, "region", null, null, objectMapper))) + def handler = new EcsCredentialsLifeCycleHandler(ecsProvider, null, null, registry, null, objectMapper, null, ecsAccountMapper) + + when: + handler.credentialsDeleted(credOne) + + then: + ecsProvider.getAgents().isEmpty() + ecsProvider.getHealthAgents().isEmpty() + } + + def 'it should update agents'() { + given: + ecsProvider.addAgents(Collections.singletonList(new TargetHealthCachingAgent(credOne, "region", null, null, objectMapper))) + def handler = new EcsCredentialsLifeCycleHandler(ecsProvider, null, null, registry, null, objectMapper, null, ecsAccountMapper) + + when: + handler.credentialsUpdated(credOne) + + then: + ecsProvider.getAgents().stream() + .filter({ agent -> agent instanceof TargetHealthCachingAgent }) + .collect(Collectors.toList()) + .size() == 2 + ecsProvider.getHealthAgents().size() == 4 + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsParserSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsParserSpec.groovy new file mode 100644 index 00000000000..695d59a180f --- /dev/null +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/security/EcsCredentialsParserSpec.groovy @@ -0,0 +1,111 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under + * the License. + */ + +package com.netflix.spinnaker.clouddriver.ecs.security + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials +import com.netflix.spinnaker.clouddriver.ecs.EcsCloudProvider +import com.netflix.spinnaker.clouddriver.ecs.TestCredential +import com.netflix.spinnaker.clouddriver.ecs.names.EcsDefaultNamer +import com.netflix.spinnaker.clouddriver.ecs.names.EcsResource +import com.netflix.spinnaker.clouddriver.ecs.names.EcsTagNamer +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository +import com.netflix.spinnaker.credentials.definition.CredentialsParser +import spock.lang.Specification + +class EcsCredentialsParserSpec extends Specification{ + + def credOne = TestCredential.named("one") + + def credJson = [ + name: "one-ecs", + environment: "one", + accountType: "one", + accountId: "123456789012" + "one", + defaultKeyPair: 'default-keypair', + regions: [[name: 'us-east-1', availabilityZones: ['us-east-1b', 'us-east-1c', 'us-east-1d']], + [name: 'us-west-1', availabilityZones: ["us-west-1a", "us-west-1b"]]], + assumeRole: "oneRole", + sessionName: "sessionOne" + ] + def assumeRoleCred = new ObjectMapper().convertValue(credJson, NetflixAssumeRoleAmazonCredentials) + + def compositeCredentialsRepository = Mock(CompositeCredentialsRepository) + def parser = Mock(CredentialsParser) + + def namerRegistry = new NamerRegistry( + List.of(new EcsDefaultNamer(), new EcsTagNamer()) + ) + + def ecsCredentialsParser = new EcsCredentialsParser( + new ECSCredentialsConfig(), compositeCredentialsRepository, parser, namerRegistry + ) + + def 'should parse credentials'() { + given: + + def account = new ECSCredentialsConfig.Account(){{ + setName("one-ecs") + setAwsAccount("one") + }} + + when: + def response = ecsCredentialsParser.parse(account) + + then: + 1 * parser.parse({it.getName() == "one-ecs" } ) >> assumeRoleCred + 1 * compositeCredentialsRepository.getCredentials("one", AmazonCloudProvider.ID) >> credOne +// ecsAccountMapper.fromAwsAccountNameToEcsAccountName("one") == "one-ecs" + response.getName() == "one-ecs" + NamerRegistry.lookup() + .withProvider(EcsCloudProvider.ID) + .withAccount("one-ecs") + .withResource(EcsResource.class).class == EcsDefaultNamer.class + } + + def 'should parse credentials and override defaults'() { + given: + + def account = new ECSCredentialsConfig.Account(){{ + setName("one-ecs") + setAwsAccount("one") + setNamingStrategy("tags") + }} + + when: + def response = ecsCredentialsParser.parse(account) + + then: + 1 * parser.parse({it.getName() == "one-ecs" } ) >> assumeRoleCred + 1 * compositeCredentialsRepository.getCredentials("one", AmazonCloudProvider.ID) >> credOne +// ecsAccountMapper.fromAwsAccountNameToEcsAccountName("one") == "one-ecs" + response.getName() == "one-ecs" + NamerRegistry.lookup() + .withProvider(EcsCloudProvider.ID) + .withAccount("one-ecs") + .withResource(EcsResource.class).class == EcsTagNamer.class + } + + def cleanupSpec() { + // We can't clear the NameRegistry but we can at least set it back to the default + NamerRegistry.lookup() + .withProvider(EcsCloudProvider.ID) + .withAccount("one-ecs") + .setNamer(EcsResource.class, new EcsDefaultNamer()) + } +} diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/ContainerInformationServiceSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/ContainerInformationServiceSpec.groovy index 6066c1e813f..a3ae8e89067 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/ContainerInformationServiceSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/ContainerInformationServiceSpec.groovy @@ -19,14 +19,21 @@ package com.netflix.spinnaker.clouddriver.ecs.services import com.amazonaws.services.ec2.model.Instance import com.amazonaws.services.ec2.model.Placement import com.amazonaws.services.ecs.model.Container +import com.amazonaws.services.ecs.model.ContainerDefinition +import com.amazonaws.services.ecs.model.HealthCheck import com.amazonaws.services.ecs.model.LoadBalancer import com.amazonaws.services.ecs.model.NetworkBinding +import com.amazonaws.services.ecs.model.TaskDefinition +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealth +import com.amazonaws.services.elasticloadbalancingv2.model.TargetHealthDescription import com.netflix.spinnaker.clouddriver.ecs.cache.client.* import com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance +import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsTargetHealth import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service import com.netflix.spinnaker.clouddriver.ecs.cache.model.Task import com.netflix.spinnaker.clouddriver.ecs.cache.model.TaskHealth import com.netflix.spinnaker.clouddriver.ecs.security.ECSCredentialsConfig +import org.assertj.core.util.Lists import spock.lang.Specification import spock.lang.Subject @@ -35,16 +42,20 @@ class ContainerInformationServiceSpec extends Specification { def taskCacheClient = Mock(TaskCacheClient) def serviceCacheClient = Mock(ServiceCacheClient) def taskHealthCacheClient = Mock(TaskHealthCacheClient) + def taskDefinitionCacheClient = Mock(TaskDefinitionCacheClient) def ecsInstanceCacheClient = Mock(EcsInstanceCacheClient) def containerInstanceCacheClient = Mock(ContainerInstanceCacheClient) + def targetHealthCacheClient = Mock(TargetHealthCacheClient) @Subject def service = new ContainerInformationService(ecsCredentialsConfig, taskCacheClient, serviceCacheClient, taskHealthCacheClient, + taskDefinitionCacheClient, ecsInstanceCacheClient, - containerInstanceCacheClient) + containerInstanceCacheClient, + targetHealthCacheClient) def 'should return a proper health status'() { given: @@ -66,12 +77,20 @@ class ContainerInformationServiceSpec extends Specification { serviceCacheClient.get(_) >> cachedService taskHealthCacheClient.get(_) >> cachedTaskHealth + taskCacheClient.get(_) >> new Task(lastStatus: 'RUNNING') + taskDefinitionCacheClient.get(_) >> new TaskDefinition() def expectedHealthStatus = [ [ instanceId: taskId, state : state, type : type + ], + [ + instanceId: taskId, + state : 'Up', + type :'ecs', + healthClass: 'platform' ] ] @@ -97,6 +116,7 @@ class ContainerInformationServiceSpec extends Specification { serviceCacheClient.get(_) >> null taskHealthCacheClient.get(_) >> cachedTaskHealth + taskDefinitionCacheClient.get(_) >> new TaskDefinition() def expectedHealthStatus = [ [ @@ -142,21 +162,230 @@ class ContainerInformationServiceSpec extends Specification { retrievedHealthStatus == expectedHealthStatus } - def 'should throw an exception when the service has multiple loadbalancers'() { + def 'should return correct health status based on last task status with no health checks defined'() { + setup: + def taskId = 'task-id' + def serviceName = 'test-service-name' + def cachedService = new Service( + serviceName: serviceName, + loadBalancers: [new LoadBalancer()] + ) + + serviceCacheClient.get(_) >> cachedService + taskCacheClient.get(_) >> new Task(lastStatus: lastStatus, healthStatus: healthStatus) + taskDefinitionCacheClient.get(_) >> new TaskDefinition() + def expectedHealthStatus = [ + [ + instanceId: taskId, + state : 'Unknown', + type : 'loadBalancer' + ], + [ + instanceId: taskId, + state : resultStatus, + type : 'ecs', + healthClass: 'platform' + ] + ] + def retrievedHealthStatus = service.getHealthStatus(taskId, serviceName, 'test-account', 'us-west-1') + + expect: + retrievedHealthStatus == expectedHealthStatus + + where: + healthStatus | resultStatus | lastStatus + 'UNKNOWN' | 'Starting' | 'PROVISIONING' + 'UNKNOWN' | 'Starting' | 'PENDING' + 'UNKNOWN' | 'Starting' | 'ACTIVATING' + 'UNKNOWN' | 'Up' | 'RUNNING' + 'UNHEALTHY' | 'Down' | 'PROVISIONING' + 'UNHEALTHY' | 'Down' | 'PENDING' + 'UNHEALTHY' | 'Down' | 'ACTIVATING' + 'UNHEALTHY' | 'Down' | 'RUNNING' + } + + def 'should return correct health status based on health status with health check defined'() { + setup: + def taskId = 'task-id' + def serviceName = 'test-service-name' + def cachedService = new Service( + serviceName: serviceName, + loadBalancers: [new LoadBalancer()] + ) + + serviceCacheClient.get(_) >> cachedService + taskCacheClient.get(_) >> new Task(lastStatus: lastStatus, healthStatus: healthStatus) + taskDefinitionCacheClient.get(_) >> new TaskDefinition(containerDefinitions: Lists.newArrayList(new ContainerDefinition + (healthCheck: new HealthCheck( + command: Lists.newArrayList("myCommand"))))) + def expectedHealthStatus = [ + [ + instanceId: taskId, + state : 'Unknown', + type : 'loadBalancer' + ], + [ + instanceId: taskId, + state : resultStatus, + type : 'ecs', + healthClass: 'platform' + ] + ] + def retrievedHealthStatus = service.getHealthStatus(taskId, serviceName, 'test-account', 'us-west-1') + + expect: + retrievedHealthStatus == expectedHealthStatus + + where: + healthStatus | resultStatus | lastStatus + 'UNKNOWN' | 'Starting' | 'PROVISIONING' + 'UNKNOWN' | 'Starting' | 'PENDING' + 'UNKNOWN' | 'Starting' | 'ACTIVATING' + 'UNKNOWN' | 'Starting' | 'RUNNING' + 'UNHEALTHY' | 'Down' | 'PROVISIONING' + 'UNHEALTHY' | 'Down' | 'PENDING' + 'UNHEALTHY' | 'Down' | 'ACTIVATING' + 'UNHEALTHY' | 'Down' | 'RUNNING' + 'HEALTHY' | 'Starting' | 'PROVISIONING' + 'HEALTHY' | 'Starting' | 'PENDING' + 'HEALTHY' | 'Starting' | 'ACTIVATING' + 'HEALTHY' | 'Up' | 'RUNNING' + } + + def 'should return Up health check status if task is running but healthcheck in container definition is null and targetHealthchecks related container is null'() { given: + def taskId = 'task-id' + def serviceName = 'test-service-name' + def type = 'loadBalancer' + def cachedService = new Service( - loadBalancers: [new LoadBalancer(), new LoadBalancer()] + serviceName: serviceName, + loadBalancers: [new LoadBalancer()] ) serviceCacheClient.get(_) >> cachedService - taskHealthCacheClient.get(_) >> new TaskHealth() + taskCacheClient.get(_) >> new Task(lastStatus: lastStatus, healthStatus: healthStatus) + taskDefinitionCacheClient.get(_) >> new TaskDefinition(containerDefinitions: Lists.newArrayList(new ContainerDefinition(healthCheck: null))) + targetHealthCacheClient.get(_) >> null - when: - service.getHealthStatus('task-id', 'test-service-name', 'test-account', 'us-west-1') + def expectedHealthStatus = [ + [ + instanceId: taskId, + state : 'Unknown', + type : type + ], + [ + instanceId: taskId, + state : resultStatus, + type : 'ecs', + healthClass: 'platform' + ] + ] + def retrievedHealthStatus = service.getHealthStatus(taskId, serviceName, 'test-account', 'us-west-1') + + expect: + retrievedHealthStatus == expectedHealthStatus + + where: + healthStatus | resultStatus | lastStatus + 'UNKNOWN' | 'Starting' | 'PROVISIONING' + 'UNKNOWN' | 'Starting' | 'PENDING' + 'UNKNOWN' | 'Starting' | 'ACTIVATING' + 'UNKNOWN' | 'Up' | 'RUNNING' + } + + def 'should return health status based on target group if task is running but healthcheck in container definition is null and container has a targetHealthcheck defined'() { + given: + def taskId = 'task-id' + def serviceName = 'test-service-name' + def type = 'loadBalancer' + + def cachedService = new Service( + serviceName: serviceName, + loadBalancers: [new LoadBalancer()] + ) + + serviceCacheClient.get(_) >> cachedService + taskCacheClient.get(_) >> new Task(lastStatus: lastStatus, healthStatus: healthStatus) + taskDefinitionCacheClient.get(_) >> new TaskDefinition(containerDefinitions: Lists.newArrayList(new ContainerDefinition(healthCheck: null))) + targetHealthCacheClient.get(_) >> new EcsTargetHealth(targetHealthDescriptions: List.of( + new TargetHealthDescription(targetHealth: new TargetHealth(state: targetHealthStatus)) + )) + + def expectedHealthStatus = [ + [ + instanceId: taskId, + state : 'Unknown', + type : type + ], + [ + instanceId: taskId, + state : resultStatus, + type : 'ecs', + healthClass: 'platform' + ] + ] + def retrievedHealthStatus = service.getHealthStatus(taskId, serviceName, 'test-account', 'us-west-1') + + expect: + retrievedHealthStatus == expectedHealthStatus + + where: + healthStatus | resultStatus | lastStatus | targetHealthStatus + 'UNKNOWN' | 'Starting' | 'RUNNING' | 'initial' + 'UNKNOWN' | 'Up' | 'RUNNING' | 'healthy' + 'UNKNOWN' | 'Down' | 'RUNNING' | 'unhealthy' + 'UNKNOWN' | 'Down' | 'RUNNING' | 'unused' + 'UNKNOWN' | 'Down' | 'RUNNING' | 'draining' + 'UNKNOWN' | 'Down' | 'RUNNING' | 'unavailable' + + } + + def 'should return health status based on target group if task is running but healthcheck in container definition is null and container has multiple targetHealthcheck related'() { + given: + def taskId = 'task-id' + def serviceName = 'test-service-name' + def type = 'loadBalancer' + + def cachedService = new Service( + serviceName: serviceName, + loadBalancers: [new LoadBalancer()] + ) + + serviceCacheClient.get(_) >> cachedService + taskCacheClient.get(_) >> new Task(lastStatus: lastStatus, healthStatus: healthStatus) + taskDefinitionCacheClient.get(_) >> new TaskDefinition(containerDefinitions: Lists.newArrayList(new ContainerDefinition(healthCheck: null))) + targetHealthCacheClient.get(_) >> new EcsTargetHealth(targetHealthDescriptions: List.of( + new TargetHealthDescription(targetHealth: new TargetHealth(state: targetHealthStatus1)), + new TargetHealthDescription(targetHealth: new TargetHealth(state: targetHealthStatus2)) + )) + + def expectedHealthStatus = [ + [ + instanceId: taskId, + state : 'Unknown', + type : type + ], + [ + instanceId: taskId, + state : resultStatus, + type : 'ecs', + healthClass: 'platform' + ] + ] + def retrievedHealthStatus = service.getHealthStatus(taskId, serviceName, 'test-account', 'us-west-1') + + expect: + retrievedHealthStatus == expectedHealthStatus + + where: + healthStatus | resultStatus | lastStatus | targetHealthStatus1 | targetHealthStatus2 + 'UNKNOWN' | 'Starting' | 'RUNNING' | 'initial' | 'draining' + 'UNKNOWN' | 'Starting' | 'RUNNING' | 'draining' | 'initial' + 'UNKNOWN' | 'Up' | 'RUNNING' | 'healthy' | 'draining' + 'UNKNOWN' | 'Up' | 'RUNNING' | 'draining' | 'healthy' + 'UNKNOWN' | 'Up' | 'RUNNING' | 'unhealthy' | 'healthy' - then: - IllegalArgumentException exception = thrown() - exception.message == 'Cannot have more than 1 load balancer while checking ECS health.' } def 'should return a proper private address for a task'() { @@ -225,6 +454,48 @@ class ContainerInformationServiceSpec extends Specification { retrievedIp == null } + def 'should return a null when container instance IP address is not yet cached'() { + given: + def account = 'test-account' + def region = 'us-west-1' + def containerInstanceArn = 'container-instance-arn' + def port = 1337 + + def ecsAccount = new ECSCredentialsConfig.Account( + name: account, + awsAccount: 'aws-' + account + ) + + def task = new Task( + containerInstanceArn: containerInstanceArn, + containers: [ + new Container( + networkBindings: [ + new NetworkBinding( + hostPort: port + ) + ] + ) + ] + ) + + def containerInstance = new ContainerInstance( + ec2InstanceId: 'i-deadbeef' + ) + + def instance = new Instance() + + containerInstanceCacheClient.get(_) >> containerInstance + ecsInstanceCacheClient.find(_, _, _) >> [instance] + ecsCredentialsConfig.getAccounts() >> [ecsAccount] + + when: + def retrievedIp = service.getTaskPrivateAddress(account, region, task) + + then: + retrievedIp == null + } + def 'should return a unknown when there is no container instance for the task'() { given: def task = new Task( @@ -284,18 +555,99 @@ class ContainerInformationServiceSpec extends Specification { retrievedIp == null } - def 'should throw an exception when task has multiple containers'() { + def 'should return null when task has multiple network bindings'() { given: + def account = 'test-account' + def region = 'us-west-1' + + def ecsAccount = new ECSCredentialsConfig.Account( + name: account, + awsAccount: 'aws-' + account + ) + def task = new Task( - containers: [new Container(), new Container(), new Container()] + containerInstanceArn: 'container-instance-arn', + containers: [ + new Container( + networkBindings: [ + new NetworkBinding( + hostPort: 1234 + ) + ] + ), + new Container( + networkBindings: [ + new NetworkBinding( + hostPort: 5678 + ) + ] + ) + ] + ) + + def containerInstance = new ContainerInstance( + ec2InstanceId: 'i-deadbeef' + ) + + def instance = new Instance( + privateIpAddress: '127.0.0.1' ) + containerInstanceCacheClient.get(_) >> containerInstance + ecsInstanceCacheClient.find(_, _, _) >> [instance] + ecsCredentialsConfig.getAccounts() >> [ecsAccount] + when: - service.getTaskPrivateAddress('test-account', 'region', task) + def retrievedIp = service.getTaskPrivateAddress(account, region, task) then: - IllegalArgumentException exception = thrown() - exception.message == 'Multiple containers for a task is not supported.' + retrievedIp == null + } + + def 'should return a proper address when task has multiple containers but only one network binding'() { + given: + def account = 'test-account' + def region = 'us-west-1' + def containerInstanceArn = 'container-instance-arn' + def ip = '127.0.0.1' + def port = 1337 + + def ecsAccount = new ECSCredentialsConfig.Account( + name: account, + awsAccount: 'aws-' + account + ) + + def task = new Task( + containerInstanceArn: containerInstanceArn, + containers: [ + new Container( + networkBindings: [ + new NetworkBinding( + hostPort: port + ) + ] + ), + new Container() + ] + ) + + def containerInstance = new ContainerInstance( + ec2InstanceId: 'i-deadbeef' + ) + + def instance = new Instance( + privateIpAddress: ip + ) + + containerInstanceCacheClient.get(_) >> containerInstance + ecsInstanceCacheClient.find(_, _, _) >> [instance] + ecsCredentialsConfig.getAccounts() >> [ecsAccount] + + when: + def retrievedIp = service.getTaskPrivateAddress(account, region, task) + + then: + retrievedIp == ip + ':' + port } def 'should throw an exception when container has multiple ec2 instances'() { @@ -378,6 +730,31 @@ class ContainerInformationServiceSpec extends Specification { retrievedZone == null } + def 'should return null when container instance zone is not yet cached'() { + given: + def task = new Task(containerInstanceArn: 'container-instance-arn') + def containerInstance = new ContainerInstance(ec2InstanceId: 'i-deadbeef') + def ecsAccount = new ECSCredentialsConfig.Account( + name: 'ecs-account', + awsAccount: 'aws-test-account' + ) + def givenInstance = new Instance( + instanceId: 'i-deadbeef', + privateIpAddress: '0.0.0.0', + publicIpAddress: '127.0.0.1' + ) + + containerInstanceCacheClient.get(_) >> containerInstance + ecsCredentialsConfig.getAccounts() >> [ecsAccount] + ecsInstanceCacheClient.find(_, _, _) >> [givenInstance] + + when: + def retrievedZone = service.getTaskZone('ecs-account', 'us-west-1', task) + + then: + retrievedZone == null + } + def 'should return a cluster name'() { given: def originalClusterName = 'test-cluster' diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/EcsCloudMetricServiceSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/EcsCloudMetricServiceSpec.groovy index df538f1aa32..21234947c9e 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/EcsCloudMetricServiceSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/EcsCloudMetricServiceSpec.groovy @@ -16,52 +16,415 @@ package com.netflix.spinnaker.clouddriver.ecs.services +import com.amazonaws.services.applicationautoscaling.AWSApplicationAutoScaling +import com.amazonaws.services.applicationautoscaling.model.* import com.amazonaws.services.cloudwatch.AmazonCloudWatch +import com.amazonaws.services.cloudwatch.model.DeleteAlarmsRequest +import com.amazonaws.services.cloudwatch.model.DescribeAlarmsResult +import com.amazonaws.services.cloudwatch.model.Dimension +import com.amazonaws.services.cloudwatch.model.MetricAlarm +import com.amazonaws.services.cloudwatch.model.PutMetricAlarmRequest import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.ecs.TestCredential import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsCloudWatchAlarmCacheClient import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsMetricAlarm -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification import spock.lang.Subject class EcsCloudMetricServiceSpec extends Specification { def metricAlarmCacheClient = Mock(EcsCloudWatchAlarmCacheClient) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) - def amazonClientProvider = Mock(AmazonClientProvider) + + def sourceAutoScaling = Mock(AWSApplicationAutoScaling) + def targetAutoScaling = Mock(AWSApplicationAutoScaling) + def sourceCloudWatch = Mock(AmazonCloudWatch) + def targetCloudWatch = Mock(AmazonCloudWatch) + def sourceAccountName = 'abc123' + def targetAccountName = 'def456' + def sourceAccountId = 'abc' + def targetAccountId = 'def' + def sourceRegion = 'us-east-1' + def targetRegion = 'us-west-1' + def clusterName = 'default' + def sourceServiceName = 'asgard-v000' + def targetServiceName = 'asgard-v001' + def sourceResourceId = 'service/default/asgard-v000' + def targetResourceId = 'service/default/asgard-v001' + def sourceCredentials = Stub(NetflixECSCredentials) { + getAccountId() >> sourceAccountId + } + def targetCredentials = Stub(NetflixECSCredentials) { + getAccountId() >> targetAccountId + } + def credentialsRepository = Stub(CredentialsRepository) { + getOne(sourceAccountName) >> sourceCredentials + getOne(targetAccountName) >> targetCredentials + } + def amazonClientProvider = Stub(AmazonClientProvider) { + getAmazonApplicationAutoScaling(sourceCredentials, sourceRegion, false) >> sourceAutoScaling + getAmazonApplicationAutoScaling(targetCredentials, targetRegion, false) >> targetAutoScaling + getAmazonCloudWatch(sourceCredentials, sourceRegion, false) >> sourceCloudWatch + getAmazonCloudWatch(targetCredentials, targetRegion, false) >> targetCloudWatch + } @Subject def service = new EcsCloudMetricService() - def 'should delete metric alarms'() { + def setup() { + service.amazonClientProvider = amazonClientProvider + service.credentialsRepository = credentialsRepository + service.metricAlarmCacheClient = metricAlarmCacheClient + } + + void 'should copy nothing when there are no scaling policies'() { + when: + service.copyScalingPolicies(targetAccountName, targetRegion, targetServiceName, targetResourceId, + sourceAccountName, sourceRegion, sourceServiceName, sourceResourceId, clusterName) + + then: + 1 * sourceAutoScaling.describeScalingPolicies(new DescribeScalingPoliciesRequest( + serviceNamespace: "ecs", + resourceId: sourceResourceId)) >> new DescribeScalingPoliciesResult(scalingPolicies: []) + 0 * targetAutoScaling.putScalingPolicy(_) + 0 * sourceCloudWatch.describeAlarms(_) + 0 * targetCloudWatch.putMetricAlarm(_) + } + + void 'should replace scaling policy ARNs and omit actions that are specific to the source account/region when they differ'() { given: - def creds = TestCredential.named('test') - def region = creds.getRegions()[0].getName() - def serviceName = 'test-kcats-liated' + def replacements = ['oldPolicyARN': 'newPolicyARN'] + def actions = ['oldPolicyARN', 'sns:us-east-1', "sns:${sourceCredentials.accountId}:someQueue".toString(), 'ok-one'] + + when: + def replacedActions = service.replacePolicyArnActions( + sourceRegion, targetRegion, + sourceAccountId, targetAccountId, + replacements, actions) + + then: + replacedActions.sort() == ['newPolicyARN', 'ok-one'] + } + + void 'should copy scaling policies and alarms'() { + when: + service.copyScalingPolicies(targetAccountName, targetRegion, targetServiceName, targetResourceId, + sourceAccountName, sourceRegion, sourceServiceName, sourceResourceId, clusterName) + + then: + 1 * sourceAutoScaling.describeScalingPolicies(new DescribeScalingPoliciesRequest( + serviceNamespace: ServiceNamespace.Ecs, + resourceId: sourceResourceId)) >> + new DescribeScalingPoliciesResult(scalingPolicies: [ + new ScalingPolicy( + policyName: 'policy1', + policyARN: 'oldPolicyARN1', + resourceId: 'service/default/asgard-v000', + policyType: 'TargetTrackingScaling', + serviceNamespace: 'ecs', + scalableDimension: 'ecs:service:DesiredCount', + targetTrackingScalingPolicyConfiguration: new TargetTrackingScalingPolicyConfiguration( + targetValue: 30.0, + predefinedMetricSpecification: new PredefinedMetricSpecification( + predefinedMetricType: 'ECSServiceAverageCPUUtilization' + ), + scaleOutCooldown: 300, + scaleInCooldown: 300 + ), + alarms: ['TargetTracking-alarm1', 'TargetTracking-alarm2'].collect { new Alarm(alarmName: it) } + ), + new ScalingPolicy( + policyName: 'policy2', + policyARN: 'oldPolicyARN2', + resourceId: 'service/default/asgard-v000', + policyType: 'TargetTrackingScaling', + serviceNamespace: 'ecs', + scalableDimension: 'ecs:service:DesiredCount', + targetTrackingScalingPolicyConfiguration: new TargetTrackingScalingPolicyConfiguration( + targetValue: 20.0, + customizedMetricSpecification: new CustomizedMetricSpecification( + metricName: 'CPUUtilization', + dimensions: [ + new MetricDimension(name: 'ClusterName', value: 'default'), + new MetricDimension(name: 'ServiceName', value: 'asgard-v000') + ], + namespace: 'AWS/ECS', + statistic: 'Average', + unit: 'Percent' + ), + scaleOutCooldown: 200, + scaleInCooldown: 200 + ), + alarms: ['TargetTracking-alarm3', 'TargetTracking-alarm4'].collect { new Alarm(alarmName: it) } + ), + new ScalingPolicy( + policyName: 'policy3-asgard-v000', + policyARN: 'oldPolicyARN3', + resourceId: 'service/default/asgard-v000', + policyType: 'StepScaling', + serviceNamespace: 'ecs', + scalableDimension: 'ecs:service:DesiredCount', + stepScalingPolicyConfiguration: new StepScalingPolicyConfiguration( + adjustmentType: 'ChangeInCapacity', + minAdjustmentMagnitude: 20, + metricAggregationType: 'Average', + cooldown: 100, + stepAdjustments: [ + new StepAdjustment( + metricIntervalLowerBound: 10.5, + metricIntervalUpperBound: 11.5, + scalingAdjustment: 90, + ) + ], + ), + alarms: ['alarm5', 'alarm6-asgard-v000'].collect { new Alarm(alarmName: it) } + ) + ] + ) + + 1 * targetAutoScaling.putScalingPolicy(new PutScalingPolicyRequest( + policyName: 'policy1-asgard-v001', + resourceId: 'service/default/asgard-v001', + policyType: 'TargetTrackingScaling', + serviceNamespace: 'ecs', + scalableDimension: 'ecs:service:DesiredCount', + targetTrackingScalingPolicyConfiguration: new TargetTrackingScalingPolicyConfiguration( + targetValue: 30.0, + predefinedMetricSpecification: new PredefinedMetricSpecification( + predefinedMetricType: 'ECSServiceAverageCPUUtilization' + ), + scaleOutCooldown: 300, + scaleInCooldown: 300 + ) + )) >> new PutScalingPolicyResult(policyARN: 'newPolicyARN1') + 1 * targetAutoScaling.putScalingPolicy(new PutScalingPolicyRequest( + policyName: 'policy2-asgard-v001', + resourceId: 'service/default/asgard-v001', + policyType: 'TargetTrackingScaling', + serviceNamespace: 'ecs', + scalableDimension: 'ecs:service:DesiredCount', + targetTrackingScalingPolicyConfiguration: new TargetTrackingScalingPolicyConfiguration( + targetValue: 20.0, + customizedMetricSpecification: new CustomizedMetricSpecification( + metricName: 'CPUUtilization', + dimensions: [ + new MetricDimension(name: 'ClusterName', value: 'default'), + new MetricDimension(name: 'ServiceName', value: 'asgard-v001') + ], + namespace: 'AWS/ECS', + statistic: 'Average', + unit: 'Percent' + ), + scaleOutCooldown: 200, + scaleInCooldown: 200 + ) + )) >> new PutScalingPolicyResult(policyARN: 'newPolicyARN2') + + 1 * targetAutoScaling.putScalingPolicy(new PutScalingPolicyRequest( + policyName: 'policy3-asgard-v001', + resourceId: 'service/default/asgard-v001', + policyType: 'StepScaling', + serviceNamespace: 'ecs', + scalableDimension: 'ecs:service:DesiredCount', + stepScalingPolicyConfiguration: new StepScalingPolicyConfiguration( + adjustmentType: 'ChangeInCapacity', + minAdjustmentMagnitude: 20, + metricAggregationType: 'Average', + cooldown: 100, + stepAdjustments: [ + new StepAdjustment( + metricIntervalLowerBound: 10.5, + metricIntervalUpperBound: 11.5, + scalingAdjustment: 90, + ) + ], + ) + )) >> new PutScalingPolicyResult(policyARN: 'newPolicyARN3') + + 1 * sourceCloudWatch.describeAlarms(_) >> new DescribeAlarmsResult(metricAlarms: [ + new MetricAlarm( + alarmName: 'TargetTracking-alarm1', + alarmDescription: 'alarm 1 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['oldPolicyARN1'], + insufficientDataActions: [], + metricName: 'metric1', + namespace: 'AWS/ECS', + statistic: 'statistic1', + dimensions: [ + new Dimension(name: 'ClusterName', value: 'default'), + new Dimension(name: 'ServiceName', value: 'asgard-v000') + ], + period: 1, + unit: 'unit1', + evaluationPeriods: 2, + threshold: 4.2, + comparisonOperator: 'GreaterThanOrEqualToThreshold' + ), + new MetricAlarm( + alarmName: 'TargetTracking-alarm2', + alarmDescription: 'alarm 2 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['oldPolicyARN1'], + insufficientDataActions: [], + metricName: 'metric2', + namespace: 'hello', + statistic: 'statistic2', + dimensions: [ + new Dimension(name: 'ClusterName', value: 'default'), + new Dimension(name: 'ServiceName', value: 'asgard-v000'), + new Dimension(name: 'other', value: 'dimension1') + ], + period: 10, + unit: 'unit2', + evaluationPeriods: 20, + threshold: 40.2, + comparisonOperator: 'LessThanOrEqualToThreshold' + ), + new MetricAlarm( + alarmName: 'TargetTracking-alarm3', + alarmDescription: 'alarm 3 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['oldPolicyARN2'], + insufficientDataActions: [], + metricName: 'metric3', + namespace: 'AWS/ECS', + statistic: 'statistic3', + dimensions: [ + new Dimension(name: 'ClusterName', value: 'default'), + new Dimension(name: 'ServiceName', value: 'asgard-v000') + ], + period: 1, + unit: 'unit3', + evaluationPeriods: 2, + threshold: 4.2, + comparisonOperator: 'GreaterThanOrEqualToThreshold' + ), + new MetricAlarm( + alarmName: 'TargetTracking-alarm4', + alarmDescription: 'alarm 4 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['oldPolicyARN2'], + insufficientDataActions: [], + metricName: 'metric4', + namespace: 'hello', + statistic: 'statistic4', + dimensions: [ + new Dimension(name: 'ClusterName', value: 'default'), + new Dimension(name: 'ServiceName', value: 'asgard-v000') + ], + period: 10, + unit: 'unit4', + evaluationPeriods: 20, + threshold: 40.2, + comparisonOperator: 'LessThanOrEqualToThreshold' + ), + new MetricAlarm( + alarmName: 'alarm5', + alarmDescription: 'alarm 5 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['oldPolicyARN3'], + insufficientDataActions: [], + metricName: 'metric5', + namespace: 'other', + statistic: 'statistic5', + dimensions: [ + new Dimension(name: 'hello', value: 'world') + ], + period: 1, + unit: 'unit5', + evaluationPeriods: 2, + threshold: 4.2, + comparisonOperator: 'GreaterThanOrEqualToThreshold' + ), + new MetricAlarm( + alarmName: 'alarm6-asgard-v000', + alarmDescription: 'alarm 6 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['oldPolicyARN3'], + insufficientDataActions: [], + metricName: 'metric6', + namespace: 'AWS/ECS', + statistic: 'statistic6', + dimensions: [ + new Dimension(name: 'ClusterName', value: 'default'), + new Dimension(name: 'ServiceName', value: 'asgard-v000') + ], + period: 10, + unit: 'unit6', + evaluationPeriods: 20, + threshold: 40.2, + comparisonOperator: 'LessThanOrEqualToThreshold' + ), + ]) + 1 * targetCloudWatch.putMetricAlarm(new PutMetricAlarmRequest( + alarmName: 'alarm5-asgard-v001', + alarmDescription: 'alarm 5 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['newPolicyARN3'], + insufficientDataActions: [], + metricName: 'metric5', + namespace: 'other', + statistic: 'statistic5', + dimensions: [ + new Dimension(name: 'hello', value: 'world') + ], + period: 1, + unit: 'unit5', + evaluationPeriods: 2, + threshold: 4.2, + comparisonOperator: 'GreaterThanOrEqualToThreshold' + )) + 1 * targetCloudWatch.putMetricAlarm(new PutMetricAlarmRequest( + alarmName: 'alarm6-asgard-v001', + alarmDescription: 'alarm 6 description', + actionsEnabled: true, + oKActions: [], + alarmActions: ['newPolicyARN3'], + insufficientDataActions: [], + metricName: 'metric6', + namespace: 'AWS/ECS', + statistic: 'statistic6', + dimensions: [ + new Dimension(name: 'ClusterName', value: 'default'), + new Dimension(name: 'ServiceName', value: 'asgard-v001') + ], + period: 10, + unit: 'unit6', + evaluationPeriods: 20, + threshold: 40.2, + comparisonOperator: 'LessThanOrEqualToThreshold' + )) + } + + def 'should delete metric alarms'() { + given: def metricAlarms = [] 5.times { metricAlarms << new EcsMetricAlarm( - accountName: creds.getName(), - region: region + accountName: targetAccountName, + region: targetRegion, + alarmName: "alarm-name-${it}" ) } - def amazonCloudWatch = Mock(AmazonCloudWatch) - - service.amazonClientProvider = amazonClientProvider - service.accountCredentialsProvider = accountCredentialsProvider - service.metricAlarmCacheClient = metricAlarmCacheClient - - accountCredentialsProvider.getCredentials(_) >> creds - amazonClientProvider.getAmazonCloudWatch(_, _, _) >> amazonCloudWatch - metricAlarmCacheClient.getMetricAlarms(_, _, _) >> metricAlarms - + metricAlarmCacheClient.getMetricAlarms(targetServiceName,targetAccountName,targetRegion,clusterName) >> metricAlarms when: - service.deleteMetrics(serviceName, creds.getName(), region) + service.deleteMetrics(targetServiceName, targetAccountName, targetRegion, clusterName) then: - 1 * amazonCloudWatch.deleteAlarms(_) + 1 * targetCloudWatch.deleteAlarms({ DeleteAlarmsRequest request -> + request.alarmNames.sort() == metricAlarms*.alarmName.sort() + }) } + + } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/SecurityGroupSelectorSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/SecurityGroupSelectorSpec.groovy index 9687495a131..4e7124a5c45 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/SecurityGroupSelectorSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/SecurityGroupSelectorSpec.groovy @@ -96,7 +96,7 @@ class SecurityGroupSelectorSpec extends Specification { accountMapper.fromEcsAccountNameToAws(ECS_ACCOUNT) >> awsAccount - amazonSecurityGroupProvider.getAllByAccountAndRegion(_, _, _) >> [sg1, sg2, sg3, sg4] + amazonSecurityGroupProvider.getAllByAccountAndRegion(_, AWS_ACCOUNT, _) >> [sg1, sg2, sg3, sg4] amazonPrimitiveConverter.convertToEcsSecurityGroup([sg1, sg2, sg3, sg4]) >> [ecsSG1, ecsSG2, ecsSG3, ecsSG4] diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/SubnetSelectorSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/SubnetSelectorSpec.groovy index 5191c39a053..06856187d9f 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/SubnetSelectorSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/services/SubnetSelectorSpec.groovy @@ -31,10 +31,14 @@ class SubnetSelectorSpec extends Specification { public static final String ECS_ACCOUNT = 'ecsAccount' public static final String AWS_ACCOUNT = 'awsAccount' public static final String REGION = 'us-west-2' + public static final String AVAILABILITY_ZONE_1 = 'us-west-2a' + public static final String AVAILABILITY_ZONE_2 = 'us-west-2b' public static final String GOOD_SUBNET_ID_1 = 'subnet-aa123456' public static final String GOOD_SUBNET_ID_2 = 'subnet-bb123123' public static final String GOOD_SUBNET_ID_3 = 'subnet-dd345345' + public static final String GOOD_SUBNET_ID_4 = 'subnet-ee345678' public static final String BAD_SUBNET_ID = 'subnet-cc233323' + public static final String BAD_SUBNET_ID_2 = 'subnet-12345678' public static final String VPC_ID_1 = 'vpc-1' public static final String VPC_ID_2 = 'vpc-2' @@ -51,39 +55,60 @@ class SubnetSelectorSpec extends Specification { def subnet1 = new AmazonSubnet() subnet1.account = AWS_ACCOUNT subnet1.region = REGION + subnet1.availabilityZone = AVAILABILITY_ZONE_1 subnet1.purpose = subnetTypeNameWeWantToRetrieve subnet1.id = GOOD_SUBNET_ID_1 + def subnet2 = new AmazonSubnet() subnet2.account = AWS_ACCOUNT subnet2.region = REGION + subnet2.availabilityZone = AVAILABILITY_ZONE_1 subnet2.purpose = subnetTypeNameWeWantToRetrieve subnet2.id = GOOD_SUBNET_ID_2 def subnet3 = new AmazonSubnet() subnet3.account = AWS_ACCOUNT subnet3.region = REGION + subnet3.availabilityZone = AVAILABILITY_ZONE_1 subnet3.purpose = subnetTypeNameWeDoNotWantToRetrieve subnet3.id = BAD_SUBNET_ID + def subnet4 = new AmazonSubnet() + subnet4.account = AWS_ACCOUNT + subnet4.region = REGION + subnet4.availabilityZone = AVAILABILITY_ZONE_2 + subnet4.purpose = subnetTypeNameWeWantToRetrieve + subnet3.id = BAD_SUBNET_ID_2 + def ecsSubnet1 = new EcsSubnet() ecsSubnet1.account = ECS_ACCOUNT ecsSubnet1.region = REGION + ecsSubnet1.availabilityZone = AVAILABILITY_ZONE_1 ecsSubnet1.purpose = subnetTypeNameWeWantToRetrieve ecsSubnet1.id = GOOD_SUBNET_ID_1 def ecsSubnet2 = new EcsSubnet() ecsSubnet2.account = ECS_ACCOUNT ecsSubnet2.region = REGION + ecsSubnet2.availabilityZone = AVAILABILITY_ZONE_1 ecsSubnet2.purpose = subnetTypeNameWeWantToRetrieve ecsSubnet2.id = GOOD_SUBNET_ID_2 def ecsSubnet3 = new EcsSubnet() ecsSubnet3.account = ECS_ACCOUNT ecsSubnet3.region = REGION + ecsSubnet3.availabilityZone = AVAILABILITY_ZONE_1 ecsSubnet3.purpose = subnetTypeNameWeDoNotWantToRetrieve ecsSubnet3.id = BAD_SUBNET_ID + def ecsSubnet4 = new EcsSubnet() + ecsSubnet4.account = ECS_ACCOUNT + ecsSubnet4.region = REGION + ecsSubnet4.availabilityZone = AVAILABILITY_ZONE_2 + ecsSubnet4.purpose = subnetTypeNameWeWantToRetrieve + ecsSubnet4.id = BAD_SUBNET_ID_2 + awsAccount.name >> AWS_ACCOUNT accountMapper.fromEcsAccountNameToAws(ECS_ACCOUNT) >> awsAccount @@ -101,6 +126,7 @@ class SubnetSelectorSpec extends Specification { def retrievedSubnetIds = subnetSelector.resolveSubnetsIds( ECS_ACCOUNT, REGION, + [AVAILABILITY_ZONE_1], subnetTypeNameWeWantToRetrieve ) retrievedSubnetIds.sort() @@ -110,8 +136,113 @@ class SubnetSelectorSpec extends Specification { retrievedSubnetIds.containsAll(desiredSubnetIds) desiredSubnetIds.containsAll(retrievedSubnetIds) !desiredSubnetIds.contains(BAD_SUBNET_ID) + !desiredSubnetIds.contains(BAD_SUBNET_ID_2) } + def 'should find the right subnets if multiple selected'() { + given: + def desiredSubnetIds = [GOOD_SUBNET_ID_1, GOOD_SUBNET_ID_2, GOOD_SUBNET_ID_4] + def subnetTypeNamesWeWantToRetrieve = Sets.newHashSet('goodSubnetType1', 'goodSubnetType2'); + + def subnet1 = new AmazonSubnet() + subnet1.account = AWS_ACCOUNT + subnet1.region = REGION + subnet1.availabilityZone = AVAILABILITY_ZONE_1 + subnet1.purpose = 'goodSubnetType1' + subnet1.id = GOOD_SUBNET_ID_1 + + def subnet2 = new AmazonSubnet() + subnet2.account = AWS_ACCOUNT + subnet2.region = REGION + subnet2.availabilityZone = AVAILABILITY_ZONE_1 + subnet2.purpose = 'goodSubnetType1' + subnet2.id = GOOD_SUBNET_ID_2 + + def subnet3 = new AmazonSubnet() + subnet3.account = AWS_ACCOUNT + subnet3.region = REGION + subnet3.availabilityZone = AVAILABILITY_ZONE_1 + subnet3.purpose = 'badSubnetType' + subnet3.id = BAD_SUBNET_ID + + def subnet4 = new AmazonSubnet() + subnet4.account = AWS_ACCOUNT + subnet4.region = REGION + subnet4.availabilityZone = AVAILABILITY_ZONE_2 + subnet4.purpose = 'goodSubnetType2' + subnet3.id = BAD_SUBNET_ID_2 + + def subnet5 = new AmazonSubnet() + subnet5.account = AWS_ACCOUNT + subnet5.region = REGION + subnet5.availabilityZone = AVAILABILITY_ZONE_1 + subnet5.purpose = 'goodSubnetType2' + subnet5.id = GOOD_SUBNET_ID_4 + + def ecsSubnet1 = new EcsSubnet() + ecsSubnet1.account = ECS_ACCOUNT + ecsSubnet1.region = REGION + ecsSubnet1.availabilityZone = AVAILABILITY_ZONE_1 + ecsSubnet1.purpose = 'goodSubnetType1' + ecsSubnet1.id = GOOD_SUBNET_ID_1 + + def ecsSubnet2 = new EcsSubnet() + ecsSubnet2.account = ECS_ACCOUNT + ecsSubnet2.region = REGION + ecsSubnet2.availabilityZone = AVAILABILITY_ZONE_1 + ecsSubnet2.purpose = 'goodSubnetType1' + ecsSubnet2.id = GOOD_SUBNET_ID_2 + + def ecsSubnet3 = new EcsSubnet() + ecsSubnet3.account = ECS_ACCOUNT + ecsSubnet3.region = REGION + ecsSubnet3.availabilityZone = AVAILABILITY_ZONE_1 + ecsSubnet3.purpose = 'badSubnetType' + ecsSubnet3.id = BAD_SUBNET_ID + + def ecsSubnet4 = new EcsSubnet() + ecsSubnet4.account = ECS_ACCOUNT + ecsSubnet4.region = REGION + ecsSubnet4.availabilityZone = AVAILABILITY_ZONE_2 + ecsSubnet4.purpose = 'goodSubnetType2' + ecsSubnet4.id = BAD_SUBNET_ID_2 + + def ecsSubnet5 = new EcsSubnet() + ecsSubnet5.account = ECS_ACCOUNT + ecsSubnet5.region = REGION + ecsSubnet5.availabilityZone = AVAILABILITY_ZONE_1 + ecsSubnet5.purpose = 'goodSubnetType2' + ecsSubnet5.id = GOOD_SUBNET_ID_4 + + awsAccount.name >> AWS_ACCOUNT + + accountMapper.fromEcsAccountNameToAws(ECS_ACCOUNT) >> awsAccount + + amazonSubnetProvider.getAllMatchingKeyPattern( + Keys.getSubnetKey('*', REGION, AWS_ACCOUNT)) >> [subnet1, subnet2, subnet3, subnet5] + + def aws_sets = Sets.newHashSet(subnet1, subnet2, subnet3, subnet5); + amazonPrimitiveConverter.convertToEcsSubnet(aws_sets) >> [ecsSubnet1, ecsSubnet2, ecsSubnet3, ecsSubnet5] + + def subnetSelector = new SubnetSelector(amazonSubnetProvider, amazonPrimitiveConverter, accountMapper) + + + when: + def retrievedSubnetIds = subnetSelector.resolveSubnetsIdsForMultipleSubnetTypes( + ECS_ACCOUNT, + REGION, + [AVAILABILITY_ZONE_1], + subnetTypeNamesWeWantToRetrieve + ) + retrievedSubnetIds.sort() + desiredSubnetIds.sort() + + then: + retrievedSubnetIds.containsAll(desiredSubnetIds) + desiredSubnetIds.containsAll(retrievedSubnetIds) + !desiredSubnetIds.contains(BAD_SUBNET_ID) + !desiredSubnetIds.contains(BAD_SUBNET_ID_2) + } def 'should return the right VPC IDs'() { given: diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcrImageProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcrImageProviderSpec.groovy index e63b0d165e9..88731c204d9 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcrImageProviderSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcrImageProviderSpec.groovy @@ -1,7 +1,7 @@ package com.netflix.spinnaker.clouddriver.ecs.view import com.netflix.spinnaker.clouddriver.ecs.provider.view.EcrImageProvider -import org.junit.Test +import org.junit.jupiter.api.Test import spock.lang.Specification class EcrImageProviderSpec extends Specification { @@ -14,9 +14,57 @@ class EcrImageProviderSpec extends Specification { when: def canHandle = ecrImageProvider.handles(ecrRepositoryUrl) + def repoName = ecrImageProvider.extractEcrRepositoryName(ecrRepositoryUrl) + def accountId = ecrImageProvider.extractAwsAccountId(ecrRepositoryUrl) + def awsRegion = ecrImageProvider.extractAwsRegion(ecrRepositoryUrl) + def ecrIdentifier = ecrImageProvider.extractEcrIdentifier(repoName, ecrRepositoryUrl) then: - canHandle + canHandle && + accountId == "123456789012" && + repoName == "continuous-delivery" && + awsRegion == "us-west-2" && + ecrIdentifier == "latest" + } + + @Test + void shouldHandleEcrSubOrgRepositoryUrl() { + given: + EcrImageProvider ecrImageProvider = new EcrImageProvider(null, null) + String ecrRepositoryUrl = "123456789012.dkr.ecr.us-west-2.amazonaws.com/sub-org/continuous-delivery:latest" + + when: + def canHandle = ecrImageProvider.handles(ecrRepositoryUrl) + def repoName = ecrImageProvider.extractEcrRepositoryName(ecrRepositoryUrl) + def accountId = ecrImageProvider.extractAwsAccountId(ecrRepositoryUrl) + def awsRegion = ecrImageProvider.extractAwsRegion(ecrRepositoryUrl) + def ecrIdentifier = ecrImageProvider.extractEcrIdentifier(repoName, ecrRepositoryUrl) + + then: + accountId == "123456789012" && + repoName == "sub-org/continuous-delivery" && + awsRegion == "us-west-2" && + ecrIdentifier == "latest" + } + + @Test + void shouldHandleEcrSha256RepositoryUrl() { + given: + EcrImageProvider ecrImageProvider = new EcrImageProvider(null, null) + String ecrRepositoryUrl = "123456789012.dkr.ecr.us-east-1.amazonaws.com/continuous-delivery@sha256:e87afa4e9a1b5b2b10b596526881acb6e7007dbff43f37270921ba84dbeda428" + + when: + def canHandle = ecrImageProvider.handles(ecrRepositoryUrl) + def repoName = ecrImageProvider.extractEcrRepositoryName(ecrRepositoryUrl) + def accountId = ecrImageProvider.extractAwsAccountId(ecrRepositoryUrl) + def awsRegion = ecrImageProvider.extractAwsRegion(ecrRepositoryUrl) + def ecrIdentifier = ecrImageProvider.extractEcrIdentifier(repoName, ecrRepositoryUrl) + + then: + accountId == "123456789012" && + repoName == "continuous-delivery" && + awsRegion == "us-east-1" && + ecrIdentifier == "sha256:e87afa4e9a1b5b2b10b596526881acb6e7007dbff43f37270921ba84dbeda428" } @Test @@ -31,4 +79,34 @@ class EcrImageProviderSpec extends Specification { then: !canHandle } + + @Test + void shouldNotHandleShortEcrRepoName() { + given: + EcrImageProvider ecrImageProvider = new EcrImageProvider(null, null) + String ecrRepositoryUrl = "123456789012.dkr.ecr.us-west-2.amazonaws.com/n:latest" + + when: + boolean canHandle = ecrImageProvider.handles(ecrRepositoryUrl) + + then: + !canHandle + } + + @Test + void shouldNotHandleMissingRepoName() { + given: + EcrImageProvider ecrImageProvider = new EcrImageProvider(null, null) + String domainNameOnly = "123456789012.dkr.ecr.us-east-1.amazonaws.com" + String noRepoName = "123456789012.dkr.ecr.us-east-1.amazonaws.com/" + String subOrgNoRepoName = "123456789012.dkr.ecr.us-east-1.amazonaws.com/sub-org/" + + when: + boolean canHandleDomainNameOnly = ecrImageProvider.handles(domainNameOnly) + boolean canHandleNoRepoName = ecrImageProvider.handles(noRepoName) + boolean canHandleSubOrgNoRepoName = ecrImageProvider.handles(subOrgNoRepoName) + + then: + !canHandleDomainNameOnly && !canHandleNoRepoName && !canHandleSubOrgNoRepoName + } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcsApplicationProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcsApplicationProviderSpec.groovy index 82a66f06f5d..c31e0d65450 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcsApplicationProviderSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcsApplicationProviderSpec.groovy @@ -22,11 +22,14 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.clouddriver.ecs.TestCredential +import com.netflix.spinnaker.clouddriver.ecs.cache.Keys import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient import com.netflix.spinnaker.clouddriver.ecs.model.EcsApplication import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ServiceCachingAgent +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TestServiceCachingAgentFactory +import com.netflix.spinnaker.clouddriver.ecs.security.NetflixECSCredentials import com.netflix.spinnaker.clouddriver.model.Application -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Specification import spock.lang.Subject @@ -34,25 +37,30 @@ class EcsApplicationProviderSpec extends Specification { def mapper = new ObjectMapper() def cache = Mock(Cache) def serviceCacheClient = new ServiceCacheClient(cache, mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def credentialsRepository = Mock(CredentialsRepository) @Subject - def provider = new EcsApplicationProvider(accountCredentialsProvider, serviceCacheClient) + def provider = new EcsApplicationProvider(credentialsRepository, serviceCacheClient) def 'should return an application'() { given: - def credentials = TestCredential.named('test') - def appName = 'test' - def serviceName = appName + '-kcats-liated' + def accountName = 'test-account' + def credentials = new NetflixECSCredentials(TestCredential.named(accountName)) + def appName = 'testapp' + def serviceName = appName + '-kcats-liated-v001' + def monikerCluster = appName + '-kcats-liated' Map> clusterNames = new HashMap<>() - clusterNames.put(appName, Collections.singleton(serviceName)) + clusterNames.put(accountName, Collections.singleton(serviceName)) + Map> clusterNameMetadata = new HashMap<>() + clusterNameMetadata.put(accountName, Collections.singleton(monikerCluster)) + + def givenApp = (Application) new EcsApplication(appName, [ - iamRole : null, - desiredCount : '1', - taskDefinition: null + name: appName ], - clusterNames) + clusterNames, + clusterNameMetadata) def service = new Service( serviceName: serviceName, @@ -62,12 +70,13 @@ class EcsApplicationProviderSpec extends Specification { desiredCount: 1, createdAt: new Date() ) - def attributes = ServiceCachingAgent.convertServiceToAttributes(credentials.getName(), - credentials.getRegions()[0].getName(), service) + def attributes = TestServiceCachingAgentFactory.create(credentials, + credentials.getRegions()[0].getName()).convertServiceToAttributes(service) - accountCredentialsProvider.getAll() >> [credentials] - cache.filterIdentifiers(_, _) >> [] - cache.getAll(_, _) >> [new DefaultCacheData('key', attributes, [:])] + credentialsRepository.getAll() >> [credentials] + credentialsRepository.has(accountName) >> true + cache.filterIdentifiers(_, _) >> [Keys.getServiceKey(accountName,"us-east-1",serviceName)] + cache.getAll(_, _) >> [new DefaultCacheData(Keys.getServiceKey(accountName,"us-east-1",serviceName), attributes, [:])] when: def retrievedApp = provider.getApplication(appName) diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcsInstanceProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcsInstanceProviderSpec.groovy index 2fd549157b7..366b53e33e6 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcsInstanceProviderSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/EcsInstanceProviderSpec.groovy @@ -26,9 +26,11 @@ import com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance import com.netflix.spinnaker.clouddriver.ecs.cache.model.Task import com.netflix.spinnaker.clouddriver.ecs.model.EcsTask import com.netflix.spinnaker.clouddriver.ecs.services.ContainerInformationService +import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import spock.lang.Specification import spock.lang.Subject +import spock.lang.Unroll class EcsInstanceProviderSpec extends Specification { def accountCredentialsProvider = Mock(AccountCredentialsProvider) @@ -41,12 +43,10 @@ class EcsInstanceProviderSpec extends Specification { def provider = new EcsInstanceProvider(containerInformationService, taskCacheClient, containerInstanceCacheClient) + @Unroll def 'should return an EcsTask'() { given: - def region = 'us-west-1' def account = 'test-account' - def taskId = 'deadbeef-94f3-4994-8e81-339c4d1be1ba' - def taskArn = 'arn:aws:ecs:' + region + ':123456789012:task/' + taskId def address = '127.0.0.1:1337' def startTime = System.currentTimeMillis() @@ -64,8 +64,8 @@ class EcsInstanceProviderSpec extends Specification { def containerInstance = new ContainerInstance() - def ecsTask = new EcsTask(taskId, startTime, 'RUNNING', 'RUNNING', - null, null, address, null) + def ecsTask = new EcsTask(taskId, startTime, 'RUNNING', 'RUNNING', 'HEALTHY', + null, null, address, null, false) taskCacheClient.get(_) >> task accountCredentialsProvider.getCredentials(_) >> netflixAmazonCredentials @@ -79,5 +79,77 @@ class EcsInstanceProviderSpec extends Specification { then: taskInstance == ecsTask + + where: + region | taskId | taskArn + 'us-west-1' | 'deadbeef-94f3-4994-8e81-339c4d1be1ba' | 'arn:aws:ecs:us-west-1:123456789012:task/deadbeef-94f3-4994-8e81-339c4d1be1ba' + 'us-west-1' | 'deadbeef94f349948e81339c4d1be1ba' | 'arn:aws:ecs:us-west-1:123456789012:task/my-cluster-123/deadbeef94f349948e81339c4d1be1ba' + 'us-west-1' | 'arn:aws:ecs:us-west-1:123456789012:task/deadbeef-94f3-4994-8e81-339c4d1be1ba' | 'foo' + 'us-west-1' | 'arn:aws:ecs:us-west-1:123456789012:task/my-cluster-123/deadbeef94f349948e81339c4d1be1ba' | 'foo' + } + + @Unroll + def 'should return an EcsTask with an health status defined'() { + given: + def account = 'test-account' + def address = '127.0.0.1:1337' + def startTime = System.currentTimeMillis() + + def netflixAmazonCredentials = Mock(NetflixAmazonCredentials) + def awsCredentialsProvider = Mock(AWSCredentialsProvider) + def amazonEC2 = Mock(AmazonEC2) + + def task = new Task( + taskId: taskId, + taskArn: 'arn:aws:ecs:us-west-1:123456789012:task/deadbeef-94f3-4994-8e81-339c4d1be1ba', + lastStatus: 'RUNNING', + desiredStatus: 'RUNNING', + healthStatus: healthStatus, + startedAt: startTime, + ) + + def containerInstance = new ContainerInstance() + + def ecsTask = new EcsTask(taskId, startTime, 'RUNNING', 'RUNNING', healthStatus, + null, null, address, null, hasHealthCheck) + + taskCacheClient.get(_) >> task + accountCredentialsProvider.getCredentials(_) >> netflixAmazonCredentials + netflixAmazonCredentials.getCredentialsProvider() >> awsCredentialsProvider + amazonClientProvider.getAmazonEC2(_, _, _) >> amazonEC2 + containerInstanceCacheClient.get(_) >> containerInstance + containerInformationService.getTaskPrivateAddress(_, _, _) >> address + containerInformationService.taskHasHealthCheck(_, _, _) >> hasHealthCheck + + when: + def taskInstance = provider.getInstance(account, region, taskId) + + then: + taskInstance == ecsTask + taskInstance.getHealthState() == ecsTask.getHealthState() + taskInstance.getHealthState() == healthState + + where: + healthStatus | healthState | region | taskId | hasHealthCheck + 'UNHEALTHY' | HealthState.Down | 'us-west-1' | 'deadbeef-94f3-4994-8e81-339c4d1be1ba'| true + 'UNHEALTHY' | HealthState.Down | 'us-west-1' | 'deadbeef-94f3-4994-8e81-339c4d1be1ba'| false + 'HEALTHY' | HealthState.Up | 'us-west-1' | 'deadbeef94f349948e81339c4d1be1ba' | true + 'HEALTHY' | HealthState.Up | 'us-west-1' | 'deadbeef94f349948e81339c4d1be1ba' | false + 'UNKNOWN' | HealthState.Starting | 'us-west-1' | 'deadbeef-94f3-4994-8e81-339c4d1be1ba'| true + 'UNKNOWN' | HealthState.Up | 'us-west-1' | 'deadbeef-94f3-4994-8e81-339c4d1be1ba'| false + } + + @Unroll + def 'should return null for invalid ECS task ID'() { + when: + def taskInstance = provider.getInstance(account, region, taskId) + + then: + taskInstance == null + + where: + account | region | taskId + 'test-account' | 'us-west-1' | 'i-deadbeef' + 'test-account' | 'us-west-1' | 'arn:aws:ecs:us-west-1:123456789012:cluster/my-cluster-name' } } diff --git a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/UnvalidatedDockerImageProviderSpec.groovy b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/UnvalidatedDockerImageProviderSpec.groovy index 23b1610bb3f..984d3b19a87 100644 --- a/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/UnvalidatedDockerImageProviderSpec.groovy +++ b/clouddriver-ecs/src/test/groovy/com/netflix/spinnaker/clouddriver/ecs/view/UnvalidatedDockerImageProviderSpec.groovy @@ -1,7 +1,7 @@ package com.netflix.spinnaker.clouddriver.ecs.view import com.netflix.spinnaker.clouddriver.ecs.provider.view.UnvalidatedDockerImageProvider -import org.junit.Test +import org.junit.jupiter.api.Test import spock.lang.Specification class UnvalidatedDockerImageProviderSpec extends Specification { diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/TestCredential.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/TestCredential.java new file mode 100644 index 00000000000..f42e10d1a24 --- /dev/null +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/TestCredential.java @@ -0,0 +1,85 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAssumeRoleAmazonCredentials; +import java.util.*; + +public class TestCredential { + + public static NetflixAmazonCredentials named(String name) { + return TestCredential.named(name, Collections.emptyMap()); + } + + public static NetflixAmazonCredentials named(String name, Map additionalParams) { + final Map params = new LinkedHashMap<>(); + params.put("name", name); + params.put("environment", name); + params.put("accountType", name); + params.put("accountId", "123456789012" + name); + params.put("defaultKeyPair", "default-keypair"); + + final Map region1 = new LinkedHashMap<>(); + region1.put("name", "us-east-1"); + region1.put("availabilityZones", Arrays.asList("us-east-1b", "us-east-1c", "us-east-1d")); + + final Map region2 = new LinkedHashMap<>(); + region2.put("name", "us-west-1"); + region2.put("availabilityZones", Arrays.asList("us-west-1a", "us-west-1b")); + + params.put("regions", Arrays.asList(region1, region2)); + + params.putAll(additionalParams); + + return new ObjectMapper().convertValue(params, NetflixAmazonCredentials.class); + } + + public static NetflixAssumeRoleAmazonCredentials assumeRoleNamed(String name) { + return TestCredential.assumeRoleNamed(name, Collections.emptyMap()); + } + + public static NetflixAssumeRoleAmazonCredentials assumeRoleNamed( + String name, Map additionalParams) { + final Map params = new LinkedHashMap<>(); + params.put("name", name); + params.put("environment", name); + params.put("accountType", name); + params.put("accountId", "123456789012" + name); + params.put("defaultKeyPair", "default-keypair"); + + final Map region1 = new LinkedHashMap<>(); + region1.put("name", "us-east-1"); + region1.put("availabilityZones", Arrays.asList("us-east-1b", "us-east-1c", "us-east-1d")); + + final Map region2 = new LinkedHashMap<>(); + region2.put("name", "us-west-1"); + region2.put("availabilityZones", Arrays.asList("us-west-1a", "us-west-1b")); + + params.put("regions", Arrays.asList(region1, region2)); + + params.put("assumeRole", "role/" + name); + params.put("sessionName", name); + params.put("externalId", name); + + params.putAll(additionalParams); + + return new ObjectMapper().convertValue(params, NetflixAssumeRoleAmazonCredentials.class); + } +} diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/CommonCacheClient.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/CommonCacheClient.java index ed19c92d83a..c9950d2473e 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/CommonCacheClient.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/CommonCacheClient.java @@ -16,10 +16,10 @@ package com.netflix.spinnaker.clouddriver.ecs.cache; -import com.netflix.spinnaker.cats.cache.Cache; - import static org.mockito.Mockito.mock; +import com.netflix.spinnaker.cats.cache.Cache; + class CommonCacheClient { static final String REGION = "us-west-2"; static final String ACCOUNT = "test-account"; diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/ContainerInstanceCacheClientTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/ContainerInstanceCacheClientTest.java index c24ff509def..355d092f6db 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/ContainerInstanceCacheClientTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/ContainerInstanceCacheClientTest.java @@ -16,19 +16,18 @@ package com.netflix.spinnaker.clouddriver.ecs.cache; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.ContainerInstance; import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.client.ContainerInstanceCacheClient; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ContainerInstanceCachingAgent; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collections; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.when; +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class ContainerInstanceCacheClientTest extends CommonCacheClient { @Subject @@ -36,25 +35,39 @@ public class ContainerInstanceCacheClientTest extends CommonCacheClient { @Test public void shouldConvert() { - //Given - String containerInstanceArn = "arn:aws:ecs:" + REGION + ":012345678910:container-instance/14e8cce9-0b16-4af4-bfac-a85f7587aa98"; + // Given + String containerInstanceArn = + "arn:aws:ecs:" + + REGION + + ":012345678910:container-instance/14e8cce9-0b16-4af4-bfac-a85f7587aa98"; String key = Keys.getContainerInstanceKey(ACCOUNT, REGION, containerInstanceArn); ContainerInstance containerInstance = new ContainerInstance(); containerInstance.setEc2InstanceId("i-deadbeef"); containerInstance.setContainerInstanceArn(containerInstanceArn); - Map attributes = ContainerInstanceCachingAgent.convertContainerInstanceToAttributes(containerInstance); - when(cacheView.get(CONTAINER_INSTANCES.toString(), key)).thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); + Map attributes = + ContainerInstanceCachingAgent.convertContainerInstanceToAttributes(containerInstance); + when(cacheView.get(CONTAINER_INSTANCES.toString(), key)) + .thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); - //When - com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance ecsContainerInstance = client.get(key); + // When + com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance ecsContainerInstance = + client.get(key); - //Then - assertTrue("Expected the EC2 instance ID to be " + containerInstance.getEc2InstanceId() + " but got " + ecsContainerInstance.getEc2InstanceId(), - containerInstance.getEc2InstanceId().equals(ecsContainerInstance.getEc2InstanceId())); + // Then + assertTrue( + containerInstance.getEc2InstanceId().equals(ecsContainerInstance.getEc2InstanceId()), + "Expected the EC2 instance ID to be " + + containerInstance.getEc2InstanceId() + + " but got " + + ecsContainerInstance.getEc2InstanceId()); - assertTrue("Expected the container instance ARN to be " + containerInstance.getContainerInstanceArn() + " but got " + ecsContainerInstance.getArn(), - containerInstance.getContainerInstanceArn().equals(ecsContainerInstance.getArn())); + assertTrue( + containerInstance.getContainerInstanceArn().equals(ecsContainerInstance.getArn()), + "Expected the container instance ARN to be " + + containerInstance.getContainerInstanceArn() + + " but got " + + ecsContainerInstance.getArn()); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/EcsClusterCacheClientTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/EcsClusterCacheClientTest.java index f09d8c74c1f..4007ffcade8 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/EcsClusterCacheClientTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/EcsClusterCacheClientTest.java @@ -16,42 +16,50 @@ package com.netflix.spinnaker.clouddriver.ecs.cache; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsClusterCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsCluster; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.EcsClusterCachingAgent; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collections; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.when; +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class EcsClusterCacheClientTest extends CommonCacheClient { - @Subject - private final EcsClusterCacheClient client = new EcsClusterCacheClient(cacheView); + @Subject private final EcsClusterCacheClient client = new EcsClusterCacheClient(cacheView); @Test public void shouldConvert() { - //Given + // Given String clusterName = "test-cluster"; String clusterArn = "arn:aws:ecs:" + REGION + ":012345678910:cluster/" + clusterName; String key = Keys.getClusterKey(ACCOUNT, REGION, clusterName); - Map attributes = EcsClusterCachingAgent.convertClusterArnToAttributes(ACCOUNT, REGION, clusterArn); + Map attributes = + EcsClusterCachingAgent.convertClusterArnToAttributes(ACCOUNT, REGION, clusterArn); - when(cacheView.get(ECS_CLUSTERS.toString(), key)).thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); + when(cacheView.get(ECS_CLUSTERS.toString(), key)) + .thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); - //When + // When EcsCluster ecsCluster = client.get(key); - //Then - assertTrue("Expected cluster name to be " + clusterName + " but got " + ecsCluster.getName(), clusterName.equals(ecsCluster.getName())); - assertTrue("Expected cluster ARN to be " + clusterArn + " but got " + ecsCluster.getArn(), clusterArn.equals(ecsCluster.getArn())); - assertTrue("Expected cluster account to be " + ACCOUNT + " but got " + ecsCluster.getAccount(), ACCOUNT.equals(ecsCluster.getAccount())); - assertTrue("Expected cluster region to be " + REGION + " but got " + ecsCluster.getRegion(), REGION.equals(ecsCluster.getRegion())); + // Then + assertTrue( + clusterName.equals(ecsCluster.getName()), + "Expected cluster name to be " + clusterName + " but got " + ecsCluster.getName()); + assertTrue( + clusterArn.equals(ecsCluster.getArn()), + "Expected cluster ARN to be " + clusterArn + " but got " + ecsCluster.getArn()); + assertTrue( + ACCOUNT.equals(ecsCluster.getAccount()), + "Expected cluster account to be " + ACCOUNT + " but got " + ecsCluster.getAccount()); + assertTrue( + REGION.equals(ecsCluster.getRegion()), + "Expected cluster region to be " + REGION + " but got " + ecsCluster.getRegion()); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/IamRoleCacheClientTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/IamRoleCacheClientTest.java index 8c6386ba717..de409c2b2cd 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/IamRoleCacheClientTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/IamRoleCacheClientTest.java @@ -16,29 +16,27 @@ package com.netflix.spinnaker.clouddriver.ecs.cache; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.client.IamRoleCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.IamRole; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamRoleCachingAgent; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.IamTrustRelationship; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collections; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.when; +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class IamRoleCacheClientTest extends CommonCacheClient { - @Subject - private final IamRoleCacheClient client = new IamRoleCacheClient(cacheView); + @Subject private final IamRoleCacheClient client = new IamRoleCacheClient(cacheView); @Test public void shouldConvert() { - //Given + // Given ObjectMapper mapper = new ObjectMapper(); String name = "iam-role-name"; String key = Keys.getIamRoleKey(ACCOUNT, name); @@ -53,15 +51,19 @@ public void shouldConvert() { iamRole.setTrustRelationships(Collections.singleton(iamTrustRelationship)); Map attributes = IamRoleCachingAgent.convertIamRoleToAttributes(iamRole); - attributes.put("trustRelationships", Collections.singletonList(mapper.convertValue(iamTrustRelationship, Map.class))); + attributes.put( + "trustRelationships", + Collections.singletonList(mapper.convertValue(iamTrustRelationship, Map.class))); - when(cacheView.get(IAM_ROLE.toString(), key)).thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); + when(cacheView.get(IAM_ROLE.toString(), key)) + .thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); - //When + // When IamRole returnedIamRole = client.get(key); - //Then - assertTrue("Expected the IAM Role to be " + iamRole + " but got " + returnedIamRole, - iamRole.equals(returnedIamRole)); + // Then + assertTrue( + iamRole.equals(returnedIamRole), + "Expected the IAM Role to be " + iamRole + " but got " + returnedIamRole); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/ServiceCacheClientTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/ServiceCacheClientTest.java index 8952c874e9c..fbcbfeeff4d 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/ServiceCacheClientTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/ServiceCacheClientTest.java @@ -16,33 +16,35 @@ package com.netflix.spinnaker.clouddriver.ecs.cache; -import com.amazonaws.services.ecs.model.DeploymentConfiguration; -import com.amazonaws.services.ecs.model.LoadBalancer; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.ecs.model.*; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.ecs.TestCredential; import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.Service; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.ServiceCachingAgent; -import org.junit.Test; -import spock.lang.Subject; - +import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TestServiceCachingAgentFactory; import java.util.Collections; import java.util.Date; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.when; +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class ServiceCacheClientTest extends CommonCacheClient { private final ObjectMapper mapper = new ObjectMapper(); - @Subject - private final ServiceCacheClient client = new ServiceCacheClient(cacheView, mapper); + + @Subject private final ServiceCacheClient client = new ServiceCacheClient(cacheView, mapper); @Test public void shouldConvert() { - //Given - ObjectMapper mapper = new ObjectMapper(); + // Given + ServiceCachingAgent agent = + TestServiceCachingAgentFactory.create(TestCredential.named(ACCOUNT), REGION); + String applicationName = "test"; String serviceName = applicationName + "-stack-detail-v1"; String key = Keys.getServiceKey(ACCOUNT, REGION, serviceName); @@ -54,68 +56,156 @@ public void shouldConvert() { loadBalancer.setLoadBalancerName("balancer-of-load"); loadBalancer.setTargetGroupArn("target-group-arn"); - com.amazonaws.services.ecs.model.Service service = new com.amazonaws.services.ecs.model.Service(); + com.amazonaws.services.ecs.model.Service service = + new com.amazonaws.services.ecs.model.Service(); service.setServiceName(serviceName); service.setServiceArn("arn:aws:ecs:" + REGION + ":012345678910:service/" + serviceName); service.setClusterArn("arn:aws:ecs:" + REGION + ":012345678910:cluster/" + clusterName); - service.setTaskDefinition("arn:aws:ecs:" + REGION + ":012345678910:task-definition/test-task-def:1"); + service.setTaskDefinition( + "arn:aws:ecs:" + REGION + ":012345678910:task-definition/test-task-def:1"); service.setRoleArn("arn:aws:ecs:" + REGION + ":012345678910:service/test-role"); - service.setDeploymentConfiguration(new DeploymentConfiguration().withMinimumHealthyPercent(50).withMaximumPercent(100)); + service.setDeploymentConfiguration( + new DeploymentConfiguration().withMinimumHealthyPercent(50).withMaximumPercent(100)); + service.setNetworkConfiguration( + new NetworkConfiguration() + .withAwsvpcConfiguration( + new AwsVpcConfiguration() + .withSecurityGroups(Collections.singletonList("security-group-id")) + .withSubnets(Collections.singletonList("subnet-id")))); service.setLoadBalancers(Collections.singleton(loadBalancer)); service.setDesiredCount(9001); service.setCreatedAt(new Date()); - Map attributes = ServiceCachingAgent.convertServiceToAttributes(ACCOUNT, REGION, service); - attributes.put("loadBalancers", Collections.singletonList(mapper.convertValue(loadBalancer, Map.class))); + Map attributes = agent.convertServiceToAttributes(service); + attributes.put( + "loadBalancers", Collections.singletonList(mapper.convertValue(loadBalancer, Map.class))); - when(cacheView.get(SERVICES.toString(), key)).thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); + when(cacheView.get(SERVICES.toString(), key)) + .thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); - //When + // When Service ecsService = client.get(key); - //Then - assertTrue("Expected the cluster name to be " + clusterName + " but got " + ecsService.getClusterName(), - clusterName.equals(ecsService.getClusterName())); - - assertTrue("Expected the cluster ARN to be " + service.getClusterArn() + " but got " + ecsService.getClusterArn(), - service.getClusterArn().equals(ecsService.getClusterArn())); - - assertTrue("Expected the account of the service to be " + ACCOUNT + " but got " + ecsService.getAccount(), - ACCOUNT.equals(ecsService.getAccount())); - - assertTrue("Expected the region of the service to be " + REGION + " but got " + ecsService.getRegion(), - REGION.equals(ecsService.getRegion())); - - assertTrue("Expected the service application name to be " + applicationName + " but got " + ecsService.getApplicationName(), - applicationName.equals(ecsService.getApplicationName())); - - assertTrue("Expected the service name to be " + serviceName + " but got " + ecsService.getServiceName(), - serviceName.equals(ecsService.getServiceName())); - - assertTrue("Expected the service ARN to be " + service.getServiceArn() + " but got " + ecsService.getServiceArn(), - service.getServiceArn().equals(ecsService.getServiceArn())); - - assertTrue("Expected the role ARN of the service to be " + service.getRoleArn() + " but got " + ecsService.getRoleArn(), - service.getRoleArn().equals(ecsService.getRoleArn())); - - assertTrue("Expected the task definition of the service to be " + service.getTaskDefinition() + " but got " + ecsService.getTaskDefinition(), - service.getTaskDefinition().equals(ecsService.getTaskDefinition())); - - assertTrue("Expected the desired count of the service to be " + service.getDesiredCount() + " but got " + ecsService.getDesiredCount(), - service.getDesiredCount() == ecsService.getDesiredCount()); - - assertTrue("Expected the maximum percent of the service to be " + service.getDeploymentConfiguration().getMaximumPercent() + " but got " + ecsService.getMaximumPercent(), - service.getDeploymentConfiguration().getMaximumPercent() == ecsService.getMaximumPercent()); - - assertTrue("Expected the minimum healthy percent of the service to be " + service.getDeploymentConfiguration().getMinimumHealthyPercent() + " but got " + ecsService.getMinimumHealthyPercent(), - service.getDeploymentConfiguration().getMinimumHealthyPercent() == ecsService.getMinimumHealthyPercent()); - - assertTrue("Expected the created at of the service to be " + service.getCreatedAt().getTime() + " but got " + ecsService.getCreatedAt(), - service.getCreatedAt().getTime() == ecsService.getCreatedAt()); - - assertTrue("Expected the service to have 1 load balancer but got " + ecsService.getLoadBalancers().size(), - ecsService.getLoadBalancers().size() == 1); - - assertTrue("Expected the service to have load balancer " + loadBalancer + " but got " + ecsService.getLoadBalancers().get(0), - ecsService.getLoadBalancers().get(0).equals(loadBalancer)); + // Then + assertTrue( + clusterName.equals(ecsService.getClusterName()), + "Expected the cluster name to be " + + clusterName + + " but got " + + ecsService.getClusterName()); + + assertTrue( + service.getClusterArn().equals(ecsService.getClusterArn()), + "Expected the cluster ARN to be " + + service.getClusterArn() + + " but got " + + ecsService.getClusterArn()); + + assertTrue( + ACCOUNT.equals(ecsService.getAccount()), + "Expected the account of the service to be " + + ACCOUNT + + " but got " + + ecsService.getAccount()); + + assertTrue( + REGION.equals(ecsService.getRegion()), + "Expected the region of the service to be " + + REGION + + " but got " + + ecsService.getRegion()); + + assertTrue( + applicationName.equals(ecsService.getApplicationName()), + "Expected the service application name to be " + + applicationName + + " but got " + + ecsService.getApplicationName()); + + assertTrue( + serviceName.equals(ecsService.getServiceName()), + "Expected the service name to be " + + serviceName + + " but got " + + ecsService.getServiceName()); + + assertTrue( + service.getServiceArn().equals(ecsService.getServiceArn()), + "Expected the service ARN to be " + + service.getServiceArn() + + " but got " + + ecsService.getServiceArn()); + + assertTrue( + service.getRoleArn().equals(ecsService.getRoleArn()), + "Expected the role ARN of the service to be " + + service.getRoleArn() + + " but got " + + ecsService.getRoleArn()); + + assertTrue( + service.getTaskDefinition().equals(ecsService.getTaskDefinition()), + "Expected the task definition of the service to be " + + service.getTaskDefinition() + + " but got " + + ecsService.getTaskDefinition()); + + assertTrue( + service.getDesiredCount() == ecsService.getDesiredCount(), + "Expected the desired count of the service to be " + + service.getDesiredCount() + + " but got " + + ecsService.getDesiredCount()); + + assertTrue( + service.getDeploymentConfiguration().getMaximumPercent() == ecsService.getMaximumPercent(), + "Expected the maximum percent of the service to be " + + service.getDeploymentConfiguration().getMaximumPercent() + + " but got " + + ecsService.getMaximumPercent()); + + assertTrue( + service.getDeploymentConfiguration().getMinimumHealthyPercent() + == ecsService.getMinimumHealthyPercent(), + "Expected the minimum healthy percent of the service to be " + + service.getDeploymentConfiguration().getMinimumHealthyPercent() + + " but got " + + ecsService.getMinimumHealthyPercent()); + + assertTrue( + service.getCreatedAt().getTime() == ecsService.getCreatedAt(), + "Expected the created at of the service to be " + + service.getCreatedAt().getTime() + + " but got " + + ecsService.getCreatedAt()); + + assertTrue( + ecsService.getSubnets().size() == 1, + "Expected the service to have 1 subnet but got " + ecsService.getSubnets().size()); + + assertTrue( + ecsService.getSubnets().get(0).equals("subnet-id"), + "Expected the service to have subnet subnet-id but got " + ecsService.getSubnets().get(0)); + + assertTrue( + ecsService.getSecurityGroups().size() == 1, + "Expected the service to have 1 security group but got " + + ecsService.getSecurityGroups().size()); + + assertTrue( + ecsService.getSecurityGroups().get(0).equals("security-group-id"), + "Expected the service to have security group security-group-id but got " + + ecsService.getSecurityGroups().get(0)); + + assertTrue( + ecsService.getLoadBalancers().size() == 1, + "Expected the service to have 1 load balancer but got " + + ecsService.getLoadBalancers().size()); + + assertTrue( + ecsService.getLoadBalancers().get(0).equals(loadBalancer), + "Expected the service to have load balancer " + + loadBalancer + + " but got " + + ecsService.getLoadBalancers().get(0)); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/TaskCacheClientTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/TaskCacheClientTest.java index 87cbf99fbd0..4837a8411ea 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/TaskCacheClientTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/TaskCacheClientTest.java @@ -16,74 +16,111 @@ package com.netflix.spinnaker.clouddriver.ecs.cache; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.Task; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskCacheClient; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskCachingAgent; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collections; import java.util.Date; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.when; +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class TaskCacheClientTest extends CommonCacheClient { private final ObjectMapper mapper = new ObjectMapper(); - @Subject - private final TaskCacheClient client = new TaskCacheClient(cacheView, mapper); + @Subject private final TaskCacheClient client = new TaskCacheClient(cacheView, mapper); @Test public void shouldConvert() { - //Given + // Given String taskId = "1dc5c17a-422b-4dc4-b493-371970c6c4d6"; String key = Keys.getTaskKey(ACCOUNT, REGION, taskId); String clusterArn = "arn:aws:ecs:" + REGION + ":012345678910:cluster/test-cluster"; String taskArn = "arn:aws:ecs:" + REGION + ":012345678910:task/" + taskId; + String availabilityZone = REGION + "a"; Task task = new Task(); task.setClusterArn(clusterArn); task.setTaskArn(taskArn); - task.setContainerInstanceArn("arn:aws:ecs:" + REGION + ":012345678910:container/e09064f7-7361-4c87-8ab9-8d073bbdbcb9"); + task.setContainerInstanceArn( + "arn:aws:ecs:" + REGION + ":012345678910:container/e09064f7-7361-4c87-8ab9-8d073bbdbcb9"); task.setGroup("group:testservice-stack-details-v1"); task.setContainers(Collections.emptyList()); task.setLastStatus("RUNNING"); + task.setHealthStatus("HEALTHY"); task.setDesiredStatus("RUNNING"); task.setStartedAt(new Date()); + task.setAvailabilityZone(availabilityZone); Map attributes = TaskCachingAgent.convertTaskToAttributes(task); - when(cacheView.get(TASKS.toString(), key)).thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); + when(cacheView.get(TASKS.toString(), key)) + .thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); - //When + // When com.netflix.spinnaker.clouddriver.ecs.cache.model.Task ecsTask = client.get(key); - //Then - assertTrue("Expected the cluster ARN to be " + clusterArn + " but got " + ecsTask.getClusterArn(), - clusterArn.equals(ecsTask.getClusterArn())); - - assertTrue("Expected the task ARN to be " + taskArn + " but got " + ecsTask.getTaskArn(), - taskArn.equals(ecsTask.getTaskArn())); - - assertTrue("Expected the container instance ARN name to be " + task.getContainerInstanceArn() + " but got " + ecsTask.getContainerInstanceArn(), - task.getContainerInstanceArn().equals(ecsTask.getContainerInstanceArn())); - - assertTrue("Expected the group to be " + task.getGroup() + " but got " + ecsTask.getGroup(), - task.getGroup().equals(ecsTask.getGroup())); - - assertTrue("Expected the last status to be " + task.getLastStatus() + " but got " + ecsTask.getLastStatus(), - task.getLastStatus().equals(ecsTask.getLastStatus())); - - assertTrue("Expected the desired status to be " + task.getDesiredStatus() + " but got " + ecsTask.getDesiredStatus(), - task.getDesiredStatus().equals(ecsTask.getDesiredStatus())); - - assertTrue("Expected the started at to be " + task.getStartedAt().getTime() + " but got " + ecsTask.getStartedAt(), - task.getStartedAt().getTime() == ecsTask.getStartedAt()); - - assertTrue("Expected the task to have 0 containers but got " + task.getContainers().size(), - task.getContainers().size() == 0); + // Then + assertTrue( + clusterArn.equals(ecsTask.getClusterArn()), + "Expected the cluster ARN to be " + clusterArn + " but got " + ecsTask.getClusterArn()); + + assertTrue( + taskArn.equals(ecsTask.getTaskArn()), + "Expected the task ARN to be " + taskArn + " but got " + ecsTask.getTaskArn()); + + assertTrue( + task.getContainerInstanceArn().equals(ecsTask.getContainerInstanceArn()), + "Expected the container instance ARN name to be " + + task.getContainerInstanceArn() + + " but got " + + ecsTask.getContainerInstanceArn()); + + assertTrue( + task.getGroup().equals(ecsTask.getGroup()), + "Expected the group to be " + task.getGroup() + " but got " + ecsTask.getGroup()); + + assertTrue( + task.getLastStatus().equals(ecsTask.getLastStatus()), + "Expected the last status to be " + + task.getLastStatus() + + " but got " + + ecsTask.getLastStatus()); + + assertTrue( + task.getHealthStatus().equals(ecsTask.getHealthStatus()), + "Expected the health status to be " + + task.getHealthStatus() + + " but got " + + ecsTask.getHealthStatus()); + + assertTrue( + task.getDesiredStatus().equals(ecsTask.getDesiredStatus()), + "Expected the desired status to be " + + task.getDesiredStatus() + + " but got " + + ecsTask.getDesiredStatus()); + + assertTrue( + task.getStartedAt().getTime() == ecsTask.getStartedAt(), + "Expected the started at to be " + + task.getStartedAt().getTime() + + " but got " + + ecsTask.getStartedAt()); + + assertTrue( + task.getContainers().size() == 0, + "Expected the task to have 0 containers but got " + task.getContainers().size()); + + assertTrue( + task.getAvailabilityZone().equals(ecsTask.getAvailabilityZone()), + "Expected the availability zone to be " + + task.getAvailabilityZone() + + " but got " + + ecsTask.getAvailabilityZone()); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/TaskDefinitionCacheClientTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/TaskDefinitionCacheClientTest.java index 7df67eff7da..d7a378ce6c1 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/TaskDefinitionCacheClientTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/cache/TaskDefinitionCacheClientTest.java @@ -16,21 +16,20 @@ package com.netflix.spinnaker.clouddriver.ecs.cache; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.ContainerDefinition; import com.amazonaws.services.ecs.model.TaskDefinition; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskDefinitionCacheClient; import com.netflix.spinnaker.clouddriver.ecs.provider.agent.TaskDefinitionCachingAgent; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collections; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.when; +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class TaskDefinitionCacheClientTest extends CommonCacheClient { ObjectMapper mapper = new ObjectMapper(); @@ -40,9 +39,10 @@ public class TaskDefinitionCacheClientTest extends CommonCacheClient { @Test public void shouldConvert() { - //Given + // Given ObjectMapper mapper = new ObjectMapper(); - String taskDefinitionArn = "arn:aws:ecs:" + REGION + ":012345678910:task-definition/hello_world:10"; + String taskDefinitionArn = + "arn:aws:ecs:" + REGION + ":012345678910:task-definition/hello_world:10"; String key = Keys.getTaskDefinitionKey(ACCOUNT, REGION, taskDefinitionArn); ContainerDefinition containerDefinition = new ContainerDefinition(); @@ -52,17 +52,27 @@ public void shouldConvert() { TaskDefinition taskDefinition = new TaskDefinition(); taskDefinition.setTaskDefinitionArn(taskDefinitionArn); + taskDefinition.setMemory("1"); + taskDefinition.setCpu("2"); taskDefinition.setContainerDefinitions(Collections.singleton(containerDefinition)); - Map attributes = TaskDefinitionCachingAgent.convertTaskDefinitionToAttributes(taskDefinition); - attributes.put("containerDefinitions", Collections.singletonList(mapper.convertValue(containerDefinition, Map.class))); - when(cacheView.get(TASK_DEFINITIONS.toString(), key)).thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); + Map attributes = + TaskDefinitionCachingAgent.convertTaskDefinitionToAttributes(taskDefinition); + attributes.put( + "containerDefinitions", + Collections.singletonList(mapper.convertValue(containerDefinition, Map.class))); + when(cacheView.get(TASK_DEFINITIONS.toString(), key)) + .thenReturn(new DefaultCacheData(key, attributes, Collections.emptyMap())); - //When + // When TaskDefinition retrievedTaskDefinition = client.get(key); - //Then - assertTrue("Expected the task definition to be " + taskDefinition + " but got " + retrievedTaskDefinition, - taskDefinition.equals(retrievedTaskDefinition)); + // Then + assertTrue( + taskDefinition.equals(retrievedTaskDefinition), + "Expected the task definition to be " + + taskDefinition + + " but got " + + retrievedTaskDefinition); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/CommonCachingAgent.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/CommonCachingAgent.java index 62fe40e45e0..5c2c58f7081 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/CommonCachingAgent.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/CommonCachingAgent.java @@ -16,23 +16,24 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ecs.AmazonECS; import com.netflix.spectator.api.Registry; import com.netflix.spinnaker.cats.provider.ProviderCache; import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import org.junit.BeforeClass; - -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import org.junit.jupiter.api.BeforeAll; public class CommonCachingAgent { static final String REGION = "us-west-2"; - private static final String ECS_SERIVCE = "arn:aws:ecs:" + REGION + ":012345678910:"; + static final String ACCOUNT_ID = "012345678910"; + private static final String ECS_SERIVCE = "arn:aws:ecs:" + REGION + ":" + ACCOUNT_ID + ":"; static final String ACCOUNT = "test-account"; static final String APP_NAME = "testapp"; static final String ROLE_ARN = ECS_SERIVCE + "service/test-role"; @@ -53,8 +54,10 @@ public class CommonCachingAgent { static final String TASK_ARN_1 = ECS_SERIVCE + "task/" + TASK_ID_1; static final String TASK_ARN_2 = ECS_SERIVCE + "task/" + TASK_ID_2; - static final String CONTAINER_INSTANCE_ARN_1 = ECS_SERIVCE + "container-instance/14e8cce9-0b16-4af4-bfac-a85f7587aa98"; - static final String CONTAINER_INSTANCE_ARN_2 = ECS_SERIVCE + "container-instance/deadbeef-0b16-4af4-bfac-a85f7587aa98"; + static final String CONTAINER_INSTANCE_ARN_1 = + ECS_SERIVCE + "container-instance/14e8cce9-0b16-4af4-bfac-a85f7587aa98"; + static final String CONTAINER_INSTANCE_ARN_2 = + ECS_SERIVCE + "container-instance/deadbeef-0b16-4af4-bfac-a85f7587aa98"; static final String EC2_INSTANCE_ID_1 = "i-042f39dc"; static final String EC2_INSTANCE_ID_2 = "i-deadbeef"; @@ -62,6 +65,9 @@ public class CommonCachingAgent { static final String TASK_DEFINITION_ARN_1 = ECS_SERIVCE + "task-definition/hello_world:10"; static final String TASK_DEFINITION_ARN_2 = ECS_SERIVCE + "task-definition/hello_world:20"; + static final String SUBNET_ID_1 = "subnet-1234"; + static final String SECURITY_GROUP_1 = "sg-1234"; + static final AmazonECS ecs = mock(AmazonECS.class); static final AmazonClientProvider clientProvider = mock(AmazonClientProvider.class); final ProviderCache providerCache = mock(ProviderCache.class); @@ -72,11 +78,12 @@ public class CommonCachingAgent { static { netflixAmazonCredentials = mock(NetflixAmazonCredentials.class); when(netflixAmazonCredentials.getName()).thenReturn(ACCOUNT); + when(netflixAmazonCredentials.getAccountId()).thenReturn(ACCOUNT_ID); } - @BeforeClass + @BeforeAll public static void setUp() { - when(clientProvider.getAmazonEcs(eq(netflixAmazonCredentials), anyString(), anyBoolean())).thenReturn(ecs); + when(clientProvider.getAmazonEcs(eq(netflixAmazonCredentials), anyString(), anyBoolean())) + .thenReturn(ecs); } - } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCacheTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCacheTest.java index 3970546e64a..8f505b2baf9 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCacheTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCacheTest.java @@ -16,6 +16,11 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.ContainerInstance; import com.amazonaws.services.ecs.model.DescribeContainerInstancesRequest; import com.amazonaws.services.ecs.model.DescribeContainerInstancesResult; @@ -27,52 +32,68 @@ import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.ContainerInstanceCacheClient; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class ContainerInstanceCacheTest extends CommonCachingAgent { @Subject - private final ContainerInstanceCachingAgent agent = new ContainerInstanceCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); + private final ContainerInstanceCachingAgent agent = + new ContainerInstanceCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); + @Subject - private final ContainerInstanceCacheClient client = new ContainerInstanceCacheClient(providerCache); + private final ContainerInstanceCacheClient client = + new ContainerInstanceCacheClient(providerCache); @Test public void shouldRetrieveFromWrittenCache() { - //Given + // Given String key = Keys.getContainerInstanceKey(ACCOUNT, REGION, CONTAINER_INSTANCE_ARN_1); ContainerInstance containerInstance = new ContainerInstance(); containerInstance.setContainerInstanceArn(CONTAINER_INSTANCE_ARN_1); containerInstance.setEc2InstanceId(EC2_INSTANCE_ID_1); - when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); - when(ecs.listContainerInstances(any(ListContainerInstancesRequest.class))).thenReturn(new ListContainerInstancesResult().withContainerInstanceArns(CONTAINER_INSTANCE_ARN_1)); - when(ecs.describeContainerInstances(any(DescribeContainerInstancesRequest.class))).thenReturn(new DescribeContainerInstancesResult().withContainerInstances(containerInstance)); + when(ecs.listClusters(any(ListClustersRequest.class))) + .thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); + when(ecs.listContainerInstances(any(ListContainerInstancesRequest.class))) + .thenReturn( + new ListContainerInstancesResult().withContainerInstanceArns(CONTAINER_INSTANCE_ARN_1)); + when(ecs.describeContainerInstances(any(DescribeContainerInstancesRequest.class))) + .thenReturn( + new DescribeContainerInstancesResult().withContainerInstances(containerInstance)); - //When + // When CacheResult cacheResult = agent.loadData(providerCache); - when(providerCache.get(CONTAINER_INSTANCES.toString(), key)).thenReturn(cacheResult.getCacheResults().get(CONTAINER_INSTANCES.toString()).iterator().next()); + when(providerCache.get(CONTAINER_INSTANCES.toString(), key)) + .thenReturn( + cacheResult.getCacheResults().get(CONTAINER_INSTANCES.toString()).iterator().next()); - //Then - Collection cacheData = cacheResult.getCacheResults().get(CONTAINER_INSTANCES.toString()); - com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance ecsContainerInstance = client.get(key); + // Then + Collection cacheData = + cacheResult.getCacheResults().get(CONTAINER_INSTANCES.toString()); + com.netflix.spinnaker.clouddriver.ecs.cache.model.ContainerInstance ecsContainerInstance = + client.get(key); - assertTrue("Expected CacheData to be returned but null is returned", cacheData != null); - assertTrue("Expected 1 CacheData but returned " + cacheData.size(), cacheData.size() == 1); + assertTrue(cacheData != null, "Expected CacheData to be returned but null is returned"); + assertTrue(cacheData.size() == 1, "Expected 1 CacheData but returned " + cacheData.size()); String retrievedKey = cacheData.iterator().next().getId(); - assertTrue("Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey, retrievedKey.equals(key)); + assertTrue( + retrievedKey.equals(key), + "Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey); - assertTrue("Expected the container instance to have EC2 instance ID of " + containerInstance.getEc2InstanceId() + " but got " + ecsContainerInstance.getEc2InstanceId(), - containerInstance.getEc2InstanceId().equals(ecsContainerInstance.getEc2InstanceId())); - assertTrue("Expected the container instance to have the ARN " + containerInstance.getContainerInstanceArn() + " but got " + ecsContainerInstance.getArn(), - containerInstance.getContainerInstanceArn().equals(ecsContainerInstance.getArn())); + assertTrue( + containerInstance.getEc2InstanceId().equals(ecsContainerInstance.getEc2InstanceId()), + "Expected the container instance to have EC2 instance ID of " + + containerInstance.getEc2InstanceId() + + " but got " + + ecsContainerInstance.getEc2InstanceId()); + assertTrue( + containerInstance.getContainerInstanceArn().equals(ecsContainerInstance.getArn()), + "Expected the container instance to have the ARN " + + containerInstance.getContainerInstanceArn() + + " but got " + + ecsContainerInstance.getArn()); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCachingAgentTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCachingAgentTest.java index 3cf7782fdc8..8e18b954d06 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCachingAgentTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ContainerInstanceCachingAgentTest.java @@ -16,6 +16,11 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.ContainerInstance; import com.amazonaws.services.ecs.model.DescribeContainerInstancesRequest; import com.amazonaws.services.ecs.model.DescribeContainerInstancesResult; @@ -24,60 +29,77 @@ import com.amazonaws.services.ecs.model.ListContainerInstancesRequest; import com.amazonaws.services.ecs.model.ListContainerInstancesResult; import com.netflix.spinnaker.cats.cache.CacheData; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.CONTAINER_INSTANCES; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class ContainerInstanceCachingAgentTest extends CommonCachingAgent { @Subject - private final ContainerInstanceCachingAgent agent = new ContainerInstanceCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); + private final ContainerInstanceCachingAgent agent = + new ContainerInstanceCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); @Test public void shouldGetListOfContainerInstances() { - //Given + // Given List containerInstanceArns = new LinkedList<>(); containerInstanceArns.add(CONTAINER_INSTANCE_ARN_1); containerInstanceArns.add(CONTAINER_INSTANCE_ARN_2); Collection containerInstances = new LinkedList<>(); - containerInstances.add(new ContainerInstance().withContainerInstanceArn(CONTAINER_INSTANCE_ARN_1)); - containerInstances.add(new ContainerInstance().withContainerInstanceArn(CONTAINER_INSTANCE_ARN_2)); + containerInstances.add( + new ContainerInstance().withContainerInstanceArn(CONTAINER_INSTANCE_ARN_1)); + containerInstances.add( + new ContainerInstance().withContainerInstanceArn(CONTAINER_INSTANCE_ARN_2)); - ListContainerInstancesResult listContainerInstacesResult = new ListContainerInstancesResult().withContainerInstanceArns(containerInstanceArns); - when(ecs.listContainerInstances(any(ListContainerInstancesRequest.class))).thenReturn(listContainerInstacesResult); + ListContainerInstancesResult listContainerInstacesResult = + new ListContainerInstancesResult().withContainerInstanceArns(containerInstanceArns); + when(ecs.listContainerInstances(any(ListContainerInstancesRequest.class))) + .thenReturn(listContainerInstacesResult); - DescribeContainerInstancesResult describeContainerInstanceResult = new DescribeContainerInstancesResult().withContainerInstances(containerInstances); - when(ecs.describeContainerInstances(any(DescribeContainerInstancesRequest.class))).thenReturn(describeContainerInstanceResult); + DescribeContainerInstancesResult describeContainerInstanceResult = + new DescribeContainerInstancesResult().withContainerInstances(containerInstances); + when(ecs.describeContainerInstances(any(DescribeContainerInstancesRequest.class))) + .thenReturn(describeContainerInstanceResult); - when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); + when(ecs.listClusters(any(ListClustersRequest.class))) + .thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); - //When + // When List returnedContainerInstances = agent.getItems(ecs, providerCache); - //Then - assertTrue("Expected the list to contain " + containerInstances.size() + " ECS container instances, but got " + returnedContainerInstances.size(), returnedContainerInstances.size() == containerInstances.size()); + // Then + assertTrue( + returnedContainerInstances.size() == containerInstances.size(), + "Expected the list to contain " + + containerInstances.size() + + " ECS container instances, but got " + + returnedContainerInstances.size()); for (ContainerInstance containerInstance : returnedContainerInstances) { - assertTrue("Expected the container instance to be in " + containerInstances + " list but it was not. The container instance is: " + containerInstance, containerInstances.contains(containerInstance)); - assertTrue("Expected the container instance arn to be in " + containerInstanceArns + " list but it was not. The container instance ARN is: " + containerInstance.getContainerInstanceArn(), containerInstanceArns.contains(containerInstance.getContainerInstanceArn())); + assertTrue( + containerInstances.contains(containerInstance), + "Expected the container instance to be in " + + containerInstances + + " list but it was not. The container instance is: " + + containerInstance); + assertTrue( + containerInstanceArns.contains(containerInstance.getContainerInstanceArn()), + "Expected the container instance arn to be in " + + containerInstanceArns + + " list but it was not. The container instance ARN is: " + + containerInstance.getContainerInstanceArn()); } } @Test public void shouldGenerateFreshData() { - //Given + // Given Set arns = new HashSet<>(); arns.add(CONTAINER_INSTANCE_ARN_1); arns.add(CONTAINER_INSTANCE_ARN_2); @@ -87,21 +109,50 @@ public void shouldGenerateFreshData() { ec2Ids.add(EC2_INSTANCE_ID_2); Collection containerInstances = new LinkedList<>(); - containerInstances.add(new ContainerInstance().withContainerInstanceArn(CONTAINER_INSTANCE_ARN_1).withEc2InstanceId(EC2_INSTANCE_ID_1)); - containerInstances.add(new ContainerInstance().withContainerInstanceArn(CONTAINER_INSTANCE_ARN_2).withEc2InstanceId(EC2_INSTANCE_ID_2)); - - //When + containerInstances.add( + new ContainerInstance() + .withContainerInstanceArn(CONTAINER_INSTANCE_ARN_1) + .withEc2InstanceId(EC2_INSTANCE_ID_1)); + containerInstances.add( + new ContainerInstance() + .withContainerInstanceArn(CONTAINER_INSTANCE_ARN_2) + .withEc2InstanceId(EC2_INSTANCE_ID_2)); + + // When Map> dataMap = agent.generateFreshData(containerInstances); - //Then - assertTrue("Expected the data map to contain 1 namespace, but it contains " + dataMap.keySet().size() + " namespaces.", dataMap.keySet().size() == 1); - assertTrue("Expected the data map to contain " + CONTAINER_INSTANCES.toString() + " namespace, but it contains " + dataMap.keySet() + " namespaces.", dataMap.containsKey(CONTAINER_INSTANCES.toString())); - assertTrue("Expected there to be 2 CacheData, instead there is " + dataMap.get(CONTAINER_INSTANCES.toString()).size(), dataMap.get(CONTAINER_INSTANCES.toString()).size() == 2); + // Then + assertTrue( + dataMap.keySet().size() == 1, + "Expected the data map to contain 1 namespace, but it contains " + + dataMap.keySet().size() + + " namespaces."); + assertTrue( + dataMap.containsKey(CONTAINER_INSTANCES.toString()), + "Expected the data map to contain " + + CONTAINER_INSTANCES.toString() + + " namespace, but it contains " + + dataMap.keySet() + + " namespaces."); + assertTrue( + dataMap.get(CONTAINER_INSTANCES.toString()).size() == 2, + "Expected there to be 2 CacheData, instead there is " + + dataMap.get(CONTAINER_INSTANCES.toString()).size()); for (CacheData cacheData : dataMap.get(CONTAINER_INSTANCES.toString())) { Map attributes = cacheData.getAttributes(); - assertTrue("Expected the container instance ARN to be in the " + arns + " list, but was not. The given arn is " + attributes.get("containerInstanceArn"), arns.contains(attributes.get("containerInstanceArn"))); - assertTrue("Expected the EC2 instance ID to be in the " + ec2Ids + " list, but was not. The given arn is " + attributes.get("ec2InstanceId"), ec2Ids.contains(attributes.get("ec2InstanceId"))); + assertTrue( + arns.contains(attributes.get("containerInstanceArn")), + "Expected the container instance ARN to be in the " + + arns + + " list, but was not. The given arn is " + + attributes.get("containerInstanceArn")); + assertTrue( + ec2Ids.contains(attributes.get("ec2InstanceId")), + "Expected the EC2 instance ID to be in the " + + ec2Ids + + " list, but was not. The given arn is " + + attributes.get("ec2InstanceId")); } } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCacheTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCacheTest.java index a97f4d6dd74..4b059db9773 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCacheTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCacheTest.java @@ -16,6 +16,11 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.ListClustersRequest; import com.amazonaws.services.ecs.model.ListClustersResult; import com.netflix.spinnaker.cats.agent.CacheResult; @@ -23,47 +28,52 @@ import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.EcsClusterCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.EcsCluster; -import org.junit.Assert; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class EcsClusterCacheTest extends CommonCachingAgent { @Subject - private final EcsClusterCachingAgent agent = new EcsClusterCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider); - @Subject - private final EcsClusterCacheClient client = new EcsClusterCacheClient(providerCache); + private final EcsClusterCachingAgent agent = + new EcsClusterCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider); + + @Subject private final EcsClusterCacheClient client = new EcsClusterCacheClient(providerCache); @Test public void shouldRetrieveFromWrittenCache() { - //Given + // Given String key = Keys.getClusterKey(ACCOUNT, REGION, CLUSTER_NAME_1); ListClustersResult listClustersResult = new ListClustersResult().withClusterArns(CLUSTER_ARN_1); when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(listClustersResult); - //When + // When CacheResult cacheResult = agent.loadData(providerCache); - when(providerCache.get(ECS_CLUSTERS.toString(), key)).thenReturn(cacheResult.getCacheResults().get(ECS_CLUSTERS.toString()).iterator().next()); + when(providerCache.get(ECS_CLUSTERS.toString(), key)) + .thenReturn(cacheResult.getCacheResults().get(ECS_CLUSTERS.toString()).iterator().next()); - //Then + // Then Collection cacheData = cacheResult.getCacheResults().get(ECS_CLUSTERS.toString()); EcsCluster ecsCluster = client.get(key); - assertTrue("Expected CacheData to be returned but null is returned", cacheData != null); - assertTrue("Expected 1 CacheData but returned " + cacheData.size(), cacheData.size() == 1); + assertTrue(cacheData != null, "Expected CacheData to be returned but null is returned"); + assertTrue(cacheData.size() == 1, "Expected 1 CacheData but returned " + cacheData.size()); String retrievedKey = cacheData.iterator().next().getId(); - assertTrue("Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey, retrievedKey.equals(key)); + assertTrue( + retrievedKey.equals(key), + "Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey); - Assert.assertTrue("Expected cluster name to be " + CLUSTER_NAME_1 + " but got " + ecsCluster.getName(), CLUSTER_NAME_1.equals(ecsCluster.getName())); - Assert.assertTrue("Expected cluster ARN to be " + CLUSTER_ARN_1 + " but got " + ecsCluster.getArn(), CLUSTER_ARN_1.equals(ecsCluster.getArn())); - Assert.assertTrue("Expected cluster account to be " + ACCOUNT + " but got " + ecsCluster.getAccount(), ACCOUNT.equals(ecsCluster.getAccount())); - Assert.assertTrue("Expected cluster region to be " + REGION + " but got " + ecsCluster.getRegion(), REGION.equals(ecsCluster.getRegion())); + assertTrue( + CLUSTER_NAME_1.equals(ecsCluster.getName()), + "Expected cluster name to be " + CLUSTER_NAME_1 + " but got " + ecsCluster.getName()); + assertTrue( + CLUSTER_ARN_1.equals(ecsCluster.getArn()), + "Expected cluster ARN to be " + CLUSTER_ARN_1 + " but got " + ecsCluster.getArn()); + assertTrue( + ACCOUNT.equals(ecsCluster.getAccount()), + "Expected cluster account to be " + ACCOUNT + " but got " + ecsCluster.getAccount()); + assertTrue( + REGION.equals(ecsCluster.getRegion()), + "Expected cluster region to be " + REGION + " but got " + ecsCluster.getRegion()); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCachingAgentTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCachingAgentTest.java index 9a14715b1be..3b81e597079 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCachingAgentTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/EcsClusterCachingAgentTest.java @@ -16,48 +16,61 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.ListClustersRequest; import com.amazonaws.services.ecs.model.ListClustersResult; import com.netflix.spinnaker.cats.agent.CacheResult; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class EcsClusterCachingAgentTest extends CommonCachingAgent { @Subject - private final EcsClusterCachingAgent agent = new EcsClusterCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider); + private final EcsClusterCachingAgent agent = + new EcsClusterCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider); @Test public void shouldGetListOfArns() { - //Given - ListClustersResult listClustersResult = new ListClustersResult().withClusterArns(CLUSTER_ARN_1, CLUSTER_ARN_2); + // Given + ListClustersResult listClustersResult = + new ListClustersResult().withClusterArns(CLUSTER_ARN_1, CLUSTER_ARN_2); when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(listClustersResult); - //When + // When List clusterArns = agent.getItems(ecs, providerCache); - //Then - assertTrue("Expected the list to contain 2 ECS cluster ARNs " + clusterArns.size(), clusterArns.size() == 2); - assertTrue("Expected the list to contain " + CLUSTER_ARN_1 + ", but it does not. It contains: " + clusterArns, clusterArns.contains(CLUSTER_ARN_1)); - assertTrue("Expected the list to contain " + CLUSTER_ARN_2 + ", but it does not. It contains: " + clusterArns, clusterArns.contains(CLUSTER_ARN_2)); + // Then + assertTrue( + clusterArns.size() == 2, + "Expected the list to contain 2 ECS cluster ARNs " + clusterArns.size()); + assertTrue( + clusterArns.contains(CLUSTER_ARN_1), + "Expected the list to contain " + + CLUSTER_ARN_1 + + ", but it does not. It contains: " + + clusterArns); + assertTrue( + clusterArns.contains(CLUSTER_ARN_2), + "Expected the list to contain " + + CLUSTER_ARN_2 + + ", but it does not. It contains: " + + clusterArns); } @Test public void shouldGenerateFreshData() { - //Given + // Given Set clusterArns = new HashSet<>(); clusterArns.add(CLUSTER_ARN_1); clusterArns.add(CLUSTER_ARN_2); @@ -66,35 +79,62 @@ public void shouldGenerateFreshData() { keys.add(Keys.getClusterKey(ACCOUNT, REGION, CLUSTER_NAME_1)); keys.add(Keys.getClusterKey(ACCOUNT, REGION, CLUSTER_NAME_2)); - //When + // When Map> dataMap = agent.generateFreshData(clusterArns); - //Then - assertTrue("Expected the data map to contain 1 namespace, but it contains " + dataMap.keySet().size() + " namespaces.", dataMap.keySet().size() == 1); - assertTrue("Expected the data map to contain " + ECS_CLUSTERS.toString() + " namespace, but it contains " + dataMap.keySet() + " namespaces.", dataMap.containsKey(ECS_CLUSTERS.toString())); - assertTrue("Expected there to be 2 CacheData, instead there is "+ dataMap.get(ECS_CLUSTERS.toString()).size(), dataMap.get(ECS_CLUSTERS.toString()).size() == 2); + // Then + assertTrue( + dataMap.keySet().size() == 1, + "Expected the data map to contain 1 namespace, but it contains " + + dataMap.keySet().size() + + " namespaces."); + assertTrue( + dataMap.containsKey(ECS_CLUSTERS.toString()), + "Expected the data map to contain " + + ECS_CLUSTERS.toString() + + " namespace, but it contains " + + dataMap.keySet() + + " namespaces."); + assertTrue( + dataMap.get(ECS_CLUSTERS.toString()).size() == 2, + "Expected there to be 2 CacheData, instead there is " + + dataMap.get(ECS_CLUSTERS.toString()).size()); for (CacheData cacheData : dataMap.get(ECS_CLUSTERS.toString())) { - assertTrue("Expected the key to be one of the following keys: " + keys.toString() + ". The key is: " + cacheData.getId() +".", keys.contains(cacheData.getId())); - assertTrue("Expected the cluster ARN to be one of the following ARNs: " + clusterArns.toString() + ". The cluster ARN is: " + cacheData.getAttributes().get("clusterArn") +".", clusterArns.contains(cacheData.getAttributes().get("clusterArn"))); + assertTrue( + keys.contains(cacheData.getId()), + "Expected the key to be one of the following keys: " + + keys.toString() + + ". The key is: " + + cacheData.getId() + + "."); + assertTrue( + clusterArns.contains(cacheData.getAttributes().get("clusterArn")), + "Expected the cluster ARN to be one of the following ARNs: " + + clusterArns.toString() + + ". The cluster ARN is: " + + cacheData.getAttributes().get("clusterArn") + + "."); } } @Test public void shouldAddToCache() { - //Given + // Given String key = Keys.getClusterKey(ACCOUNT, REGION, CLUSTER_NAME_1); ListClustersResult listClustersResult = new ListClustersResult().withClusterArns(CLUSTER_ARN_1); when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(listClustersResult); - //When + // When CacheResult cacheResult = agent.loadData(providerCache); - //Then + // Then Collection cacheData = cacheResult.getCacheResults().get(ECS_CLUSTERS.toString()); - assertTrue("Expected CacheData to be returned but null is returned", cacheData != null); - assertTrue("Expected 1 CacheData but returned " + cacheData.size(), cacheData.size() == 1); + assertTrue(cacheData != null, "Expected CacheData to be returned but null is returned"); + assertTrue(cacheData.size() == 1, "Expected 1 CacheData but returned " + cacheData.size()); String retrievedKey = cacheData.iterator().next().getId(); - assertTrue("Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey, retrievedKey.equals(key)); + assertTrue( + retrievedKey.equals(key), + "Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCacheTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCacheTest.java index 739bb1f5a0b..20962aa846d 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCacheTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCacheTest.java @@ -16,7 +16,14 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; -import com.amazonaws.auth.AWSCredentialsProvider; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import com.amazonaws.services.identitymanagement.AmazonIdentityManagement; import com.amazonaws.services.identitymanagement.model.ListRolesRequest; import com.amazonaws.services.identitymanagement.model.ListRolesResult; @@ -28,34 +35,26 @@ import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.IamRoleCacheClient; import com.netflix.spinnaker.clouddriver.ecs.cache.model.IamRole; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; import java.util.Collections; import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class IamRoleCacheTest extends CommonCachingAgent { - @Subject - private final IamRoleCacheClient client = new IamRoleCacheClient(providerCache); + @Subject private final IamRoleCacheClient client = new IamRoleCacheClient(providerCache); private final AmazonIdentityManagement iam = mock(AmazonIdentityManagement.class); private final IamPolicyReader iamPolicyReader = mock(IamPolicyReader.class); + @Subject - private final IamRoleCachingAgent agent = new IamRoleCachingAgent(netflixAmazonCredentials, clientProvider, credentialsProvider, iamPolicyReader); + private final IamRoleCachingAgent agent = + new IamRoleCachingAgent(netflixAmazonCredentials, clientProvider, iamPolicyReader); @Test public void shouldRetrieveFromWrittenCache() { - //Given - when(clientProvider.getIam(any(NetflixAmazonCredentials.class), anyString(), anyBoolean())).thenReturn(iam); + // Given + when(clientProvider.getIam(any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(iam); ObjectMapper mapper = new ObjectMapper(); String name = "iam-role-name"; String roleArn = "iam-role-arn"; @@ -73,24 +72,39 @@ public void shouldRetrieveFromWrittenCache() { Role role = new Role(); role.setArn(roleArn); role.setRoleName(name); - when(iam.listRoles(any(ListRolesRequest.class))).thenReturn(new ListRolesResult().withRoles(role).withIsTruncated(false)); - when(iamPolicyReader.getTrustedEntities(anyString())).thenReturn(Collections.singleton(iamTrustRelationship)); + when(iam.listRoles(any(ListRolesRequest.class))) + .thenReturn(new ListRolesResult().withRoles(role).withIsTruncated(false)); + when(iamPolicyReader.getTrustedEntities(anyString())) + .thenReturn(Collections.singleton(iamTrustRelationship)); - //When + // When CacheResult cacheResult = agent.loadData(providerCache); - cacheResult.getCacheResults().get(IAM_ROLE.toString()).iterator().next().getAttributes().put("trustRelationships", Collections.singletonList(mapper.convertValue(iamTrustRelationship, Map.class))); - when(providerCache.get(IAM_ROLE.toString(), key)).thenReturn(cacheResult.getCacheResults().get(IAM_ROLE.toString()).iterator().next()); + cacheResult + .getCacheResults() + .get(IAM_ROLE.toString()) + .iterator() + .next() + .getAttributes() + .put( + "trustRelationships", + Collections.singletonList(mapper.convertValue(iamTrustRelationship, Map.class))); + when(providerCache.get(IAM_ROLE.toString(), key)) + .thenReturn(cacheResult.getCacheResults().get(IAM_ROLE.toString()).iterator().next()); - //Then - Collection cacheData = cacheResult.getCacheResults().get(Keys.Namespace.IAM_ROLE.toString()); + // Then + Collection cacheData = + cacheResult.getCacheResults().get(Keys.Namespace.IAM_ROLE.toString()); IamRole returnedIamRole = client.get(key); - assertTrue("Expected CacheData to be returned but null is returned", cacheData != null); - assertTrue("Expected 1 CacheData but returned " + cacheData.size(), cacheData.size() == 1); + assertTrue(cacheData != null, "Expected CacheData to be returned but null is returned"); + assertTrue(cacheData.size() == 1, "Expected 1 CacheData but returned " + cacheData.size()); String retrievedKey = cacheData.iterator().next().getId(); - assertTrue("Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey, retrievedKey.equals(key)); + assertTrue( + retrievedKey.equals(key), + "Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey); - assertTrue("Expected the IAM Role to be " + iamRole + " but got " + returnedIamRole, - iamRole.equals(returnedIamRole)); + assertTrue( + iamRole.equals(returnedIamRole), + "Expected the IAM Role to be " + iamRole + " but got " + returnedIamRole); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCachingAgentTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCachingAgentTest.java index 9a5f3588d40..7ad124f5ac2 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCachingAgentTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/IamRoleCachingAgentTest.java @@ -16,42 +16,40 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; -import com.amazonaws.auth.AWSCredentialsProvider; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.amazonaws.regions.Regions; import com.amazonaws.services.identitymanagement.AmazonIdentityManagement; import com.amazonaws.services.identitymanagement.model.ListRolesRequest; import com.amazonaws.services.identitymanagement.model.ListRolesResult; import com.amazonaws.services.identitymanagement.model.Role; import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.model.IamRole; -import org.junit.Test; +import java.util.*; +import org.junit.jupiter.api.Test; import spock.lang.Subject; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.IAM_ROLE; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - - public class IamRoleCachingAgentTest extends CommonCachingAgent { private final AmazonIdentityManagement iam = mock(AmazonIdentityManagement.class); private final IamPolicyReader iamPolicyReader = mock(IamPolicyReader.class); + @Subject - private final IamRoleCachingAgent agent = new IamRoleCachingAgent(netflixAmazonCredentials, clientProvider, credentialsProvider, iamPolicyReader); + private final IamRoleCachingAgent agent = + new IamRoleCachingAgent(netflixAmazonCredentials, clientProvider, iamPolicyReader); @Test public void shouldGetListOfServices() { - //Given + // Given int numberOfRoles = 3; IamTrustRelationship iamTrustRelationship = new IamTrustRelationship(); iamTrustRelationship.setType("Service"); @@ -73,23 +71,37 @@ public void shouldGetListOfServices() { iamRoles.add(iamRole); } - when(clientProvider.getIam(any(NetflixAmazonCredentials.class), anyString(), anyBoolean())).thenReturn(iam); - when(iam.listRoles(any(ListRolesRequest.class))).thenReturn(new ListRolesResult().withRoles(roles).withIsTruncated(false)); - when(iamPolicyReader.getTrustedEntities(anyString())).thenReturn(Collections.singleton(iamTrustRelationship)); + when(clientProvider.getIam(any(NetflixAmazonCredentials.class), anyString(), anyBoolean())) + .thenReturn(iam); + when(iam.listRoles(any(ListRolesRequest.class))) + .thenReturn(new ListRolesResult().withRoles(roles).withIsTruncated(false)); + when(iamPolicyReader.getTrustedEntities(any())) + .thenReturn(Collections.singleton(iamTrustRelationship)); - //When + // When Set returnedRoles = agent.fetchIamRoles(iam, ACCOUNT); - //Then - assertTrue("Expected the list to contain " + numberOfRoles + " ECS IAM roles, but got " + returnedRoles.size(), returnedRoles.size() == numberOfRoles); + // Then + assertEquals( + returnedRoles.size(), + numberOfRoles, + "Expected the list to contain " + + numberOfRoles + + " ECS IAM roles, but got " + + returnedRoles.size()); for (IamRole iamRole : returnedRoles) { - assertTrue("Expected the IAM role to be in " + iamRoles + " list but it was not. The IAM role is: " + iamRole, iamRoles.contains(iamRole)); + assertTrue( + iamRoles.contains(iamRole), + "Expected the IAM role to be in " + + iamRoles + + " list but it was not. The IAM role is: " + + iamRole); } } @Test public void shouldGenerateFreshData() { - //Given + // Given int numberOfRoles = 3; IamTrustRelationship iamTrustRelationship = new IamTrustRelationship(); iamTrustRelationship.setType("Service"); @@ -110,17 +122,72 @@ public void shouldGenerateFreshData() { keys.add(Keys.getIamRoleKey(ACCOUNT, name)); } - //When + // When Map> dataMap = agent.generateFreshData(iamRoles); - //Then - assertTrue("Expected the data map to contain 1 namespace, but it contains " + dataMap.keySet().size() + " namespaces.", dataMap.keySet().size() == 1); - assertTrue("Expected the data map to contain " + IAM_ROLE.toString() + " namespace, but it contains " + dataMap.keySet() + " namespaces.", dataMap.containsKey(IAM_ROLE.toString())); - assertTrue("Expected there to be " + numberOfRoles + " CacheData, instead there is " + dataMap.get(IAM_ROLE.toString()).size(), dataMap.get(IAM_ROLE.toString()).size() == numberOfRoles); + // Then + assertTrue( + dataMap.keySet().size() == 1, + "Expected the data map to contain 1 namespace, but it contains " + + dataMap.keySet().size() + + " namespaces."); + assertTrue( + dataMap.containsKey(IAM_ROLE.toString()), + "Expected the data map to contain " + + IAM_ROLE.toString() + + " namespace, but it contains " + + dataMap.keySet() + + " namespaces."); + assertTrue( + dataMap.get(IAM_ROLE.toString()).size() == numberOfRoles, + "Expected there to be " + + numberOfRoles + + " CacheData, instead there is " + + dataMap.get(IAM_ROLE.toString()).size()); for (CacheData cacheData : dataMap.get(IAM_ROLE.toString())) { - assertTrue("Expected the key to be one of the following keys: " + keys.toString() + ". The key is: " + cacheData.getId() + ".", keys.contains(cacheData.getId())); + assertTrue( + keys.contains(cacheData.getId()), + "Expected the key to be one of the following keys: " + + keys.toString() + + ". The key is: " + + cacheData.getId() + + "."); } + } + @Test + public void shouldGetDefaultRegion() { + // given + String defaultRegionName = Regions.DEFAULT_REGION.getName(); + when(netflixAmazonCredentials.getRegions()) + .thenReturn(Collections.singletonList(new AmazonCredentials.AWSRegion("us-east-1", null))); + + // when + String actualRegionName = agent.getIamRegion(); + + // then + assertEquals( + defaultRegionName, + actualRegionName, + "Expected region to equal " + defaultRegionName + ", but got " + actualRegionName); + } + + @Test + public void shouldGetConfiguredIamRegion() { + // given + String expectedRegionName = "cn-north-1"; + when(netflixAmazonCredentials.getRegions()) + .thenReturn( + Collections.singletonList(new AmazonCredentials.AWSRegion(expectedRegionName, null))); + + // when + String actualRegionName = agent.getIamRegion(); + + // then + assertEquals( + expectedRegionName, + actualRegionName, + "Expected region to equal " + expectedRegionName + ", but got " + actualRegionName); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCacheTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCacheTest.java index ef24bddeab8..2a73d4e5b9f 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCacheTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCacheTest.java @@ -16,42 +16,34 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; -import com.amazonaws.services.ecs.model.DeploymentConfiguration; -import com.amazonaws.services.ecs.model.DescribeServicesRequest; -import com.amazonaws.services.ecs.model.DescribeServicesResult; -import com.amazonaws.services.ecs.model.ListClustersRequest; -import com.amazonaws.services.ecs.model.ListClustersResult; -import com.amazonaws.services.ecs.model.ListServicesRequest; -import com.amazonaws.services.ecs.model.ListServicesResult; -import com.amazonaws.services.ecs.model.Service; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.ecs.model.*; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.agent.CacheResult; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.ServiceCacheClient; -import org.junit.Assert; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; import java.util.Collections; import java.util.Date; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class ServiceCacheTest extends CommonCachingAgent { private final ObjectMapper mapper = new ObjectMapper(); - private final ServiceCachingAgent agent = new ServiceCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); - @Subject - private final ServiceCacheClient client = new ServiceCacheClient(providerCache, mapper); + private final ServiceCachingAgent agent = + new ServiceCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); + @Subject private final ServiceCacheClient client = new ServiceCacheClient(providerCache, mapper); @Test public void shouldRetrieveFromWrittenCache() { - //Given + // Given String key = Keys.getServiceKey(ACCOUNT, REGION, SERVICE_NAME_1); Service service = new Service(); @@ -60,47 +52,124 @@ public void shouldRetrieveFromWrittenCache() { service.setClusterArn(CLUSTER_ARN_1); service.setTaskDefinition(TASK_DEFINITION_ARN_1); service.setRoleArn(ROLE_ARN); - service.setDeploymentConfiguration(new DeploymentConfiguration().withMinimumHealthyPercent(50).withMaximumPercent(100)); + service.setDeploymentConfiguration( + new DeploymentConfiguration().withMinimumHealthyPercent(50).withMaximumPercent(100)); service.setLoadBalancers(Collections.emptyList()); + service.setNetworkConfiguration( + new NetworkConfiguration() + .withAwsvpcConfiguration( + new AwsVpcConfiguration() + .withSecurityGroups(SECURITY_GROUP_1) + .withSubnets(SUBNET_ID_1))); service.setDesiredCount(1); service.setCreatedAt(new Date()); - when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); - when(ecs.listServices(any(ListServicesRequest.class))).thenReturn(new ListServicesResult().withServiceArns(SERVICE_ARN_1)); - when(ecs.describeServices(any(DescribeServicesRequest.class))).thenReturn(new DescribeServicesResult().withServices(service)); + when(ecs.listClusters(any(ListClustersRequest.class))) + .thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); + when(ecs.listServices(any(ListServicesRequest.class))) + .thenReturn(new ListServicesResult().withServiceArns(SERVICE_ARN_1)); + when(ecs.describeServices(any(DescribeServicesRequest.class))) + .thenReturn(new DescribeServicesResult().withServices(service)); - //When + // When CacheResult cacheResult = agent.loadData(providerCache); - when(providerCache.get(SERVICES.toString(), key)).thenReturn(cacheResult.getCacheResults().get(SERVICES.toString()).iterator().next()); + when(providerCache.get(SERVICES.toString(), key)) + .thenReturn(cacheResult.getCacheResults().get(SERVICES.toString()).iterator().next()); - //Then + // Then Collection cacheData = cacheResult.getCacheResults().get(SERVICES.toString()); com.netflix.spinnaker.clouddriver.ecs.cache.model.Service ecsService = client.get(key); - assertTrue("Expected CacheData to be returned but null is returned", cacheData != null); - assertTrue("Expected 1 CacheData but returned " + cacheData.size(), cacheData.size() == 1); + assertTrue(cacheData != null, "Expected CacheData to be returned but null is returned"); + assertTrue(cacheData.size() == 1, "Expected 1 CacheData but returned " + cacheData.size()); String retrievedKey = cacheData.iterator().next().getId(); - assertTrue("Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey, retrievedKey.equals(key)); + assertTrue( + retrievedKey.equals(key), + "Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey); - assertTrue("Expected the service application name to be " + APP_NAME + " but got " + ecsService.getApplicationName(), - APP_NAME.equals(ecsService.getApplicationName())); - assertTrue("Expected the service name to be " + SERVICE_NAME_1 + " but got " + ecsService.getServiceName(), - SERVICE_NAME_1.equals(ecsService.getServiceName())); - assertTrue("Expected the service ARN to be " + SERVICE_ARN_1 + " but got " + ecsService.getServiceArn(), - SERVICE_ARN_1.equals(ecsService.getServiceArn())); - assertTrue("Expected the service's cluster ARN to be " + CLUSTER_ARN_1 + " but got " + ecsService.getClusterArn(), - CLUSTER_ARN_1.equals(ecsService.getClusterArn())); - Assert.assertTrue("Expected the role ARN of the service to be " + service.getRoleArn() + " but got " + ecsService.getRoleArn(), - service.getRoleArn().equals(ecsService.getRoleArn())); - Assert.assertTrue("Expected the task definition of the service to be " + service.getTaskDefinition() + " but got " + ecsService.getTaskDefinition(), service.getTaskDefinition().equals(ecsService.getTaskDefinition())); - Assert.assertTrue("Expected the desired count of the service to be " + service.getDesiredCount() + " but got " + ecsService.getDesiredCount(), - service.getDesiredCount() == ecsService.getDesiredCount()); - Assert.assertTrue("Expected the maximum percent of the service to be " + service.getDeploymentConfiguration().getMaximumPercent() + " but got " + ecsService.getMaximumPercent(), - service.getDeploymentConfiguration().getMaximumPercent() == ecsService.getMaximumPercent()); - Assert.assertTrue("Expected the minimum healthy percent of the service to be " + service.getDeploymentConfiguration().getMinimumHealthyPercent() + " but got " + ecsService.getMinimumHealthyPercent(), - service.getDeploymentConfiguration().getMinimumHealthyPercent() == ecsService.getMinimumHealthyPercent()); - Assert.assertTrue("Expected the created at of the service to be " + service.getCreatedAt().getTime() + " but got " + ecsService.getCreatedAt(), service.getCreatedAt().getTime() == ecsService.getCreatedAt()); - Assert.assertTrue("Expected the service to have 0 load balancer but got " + ecsService.getLoadBalancers().size(), - ecsService.getLoadBalancers().size() == 0); + assertTrue( + APP_NAME.equals(ecsService.getApplicationName()), + "Expected the service application name to be " + + APP_NAME + + " but got " + + ecsService.getApplicationName()); + assertTrue( + SERVICE_NAME_1.equals(ecsService.getServiceName()), + "Expected the service name to be " + + SERVICE_NAME_1 + + " but got " + + ecsService.getServiceName()); + assertTrue( + SERVICE_ARN_1.equals(ecsService.getServiceArn()), + "Expected the service ARN to be " + + SERVICE_ARN_1 + + " but got " + + ecsService.getServiceArn()); + assertTrue( + CLUSTER_ARN_1.equals(ecsService.getClusterArn()), + "Expected the service's cluster ARN to be " + + CLUSTER_ARN_1 + + " but got " + + ecsService.getClusterArn()); + assertTrue( + service.getRoleArn().equals(ecsService.getRoleArn()), + "Expected the role ARN of the service to be " + + service.getRoleArn() + + " but got " + + ecsService.getRoleArn()); + assertTrue( + service.getTaskDefinition().equals(ecsService.getTaskDefinition()), + "Expected the task definition of the service to be " + + service.getTaskDefinition() + + " but got " + + ecsService.getTaskDefinition()); + assertTrue( + service.getDesiredCount() == ecsService.getDesiredCount(), + "Expected the desired count of the service to be " + + service.getDesiredCount() + + " but got " + + ecsService.getDesiredCount()); + assertTrue( + service.getDeploymentConfiguration().getMaximumPercent() == ecsService.getMaximumPercent(), + "Expected the maximum percent of the service to be " + + service.getDeploymentConfiguration().getMaximumPercent() + + " but got " + + ecsService.getMaximumPercent()); + assertTrue( + service.getDeploymentConfiguration().getMinimumHealthyPercent() + == ecsService.getMinimumHealthyPercent(), + "Expected the minimum healthy percent of the service to be " + + service.getDeploymentConfiguration().getMinimumHealthyPercent() + + " but got " + + ecsService.getMinimumHealthyPercent()); + assertTrue( + service.getCreatedAt().getTime() == ecsService.getCreatedAt(), + "Expected the created at of the service to be " + + service.getCreatedAt().getTime() + + " but got " + + ecsService.getCreatedAt()); + assertTrue( + ecsService.getLoadBalancers().size() == 0, + "Expected the service to have 0 load balancer but got " + + ecsService.getLoadBalancers().size()); + assertTrue( + ecsService.getSubnets().size() == 1, + "Expected the service to have 1 subnet but got " + ecsService.getSubnets().size()); + assertTrue( + SUBNET_ID_1.equals(ecsService.getSubnets().get(0)), + "Expected the service's subnet to be " + + SUBNET_ID_1 + + " but got " + + ecsService.getSubnets().get(0)); + assertTrue( + ecsService.getSecurityGroups().size() == 1, + "Expected the service to have 1 security group but got " + + ecsService.getSecurityGroups().size()); + assertTrue( + SECURITY_GROUP_1.equals(ecsService.getSecurityGroups().get(0)), + "Expected the service's security group to be " + + SECURITY_GROUP_1 + + " but got " + + ecsService.getSecurityGroups().get(0)); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCachingAgentTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCachingAgentTest.java index c18dcbb434d..d1481b40923 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCachingAgentTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/ServiceCachingAgentTest.java @@ -16,6 +16,12 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.DeploymentConfiguration; import com.amazonaws.services.ecs.model.DescribeServicesRequest; import com.amazonaws.services.ecs.model.DescribeServicesResult; @@ -26,9 +32,6 @@ import com.amazonaws.services.ecs.model.Service; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; import java.util.Collections; import java.util.Date; @@ -37,46 +40,54 @@ import java.util.List; import java.util.Map; import java.util.Set; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class ServiceCachingAgentTest extends CommonCachingAgent { @Subject - private final ServiceCachingAgent agent = new ServiceCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); + private final ServiceCachingAgent agent = + new ServiceCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); @Test public void shouldGetListOfServices() { - //Given - ListServicesResult listServicesResult = new ListServicesResult().withServiceArns(SERVICE_ARN_1, SERVICE_ARN_2); + // Given + ListServicesResult listServicesResult = + new ListServicesResult().withServiceArns(SERVICE_ARN_1, SERVICE_ARN_2); when(ecs.listServices(any(ListServicesRequest.class))).thenReturn(listServicesResult); List services = new LinkedList<>(); services.add(new Service().withServiceArn(SERVICE_ARN_1)); services.add(new Service().withServiceArn(SERVICE_ARN_2)); - DescribeServicesResult describeServicesResult = new DescribeServicesResult().withServices(services); - when(ecs.describeServices(any(DescribeServicesRequest.class))).thenReturn(describeServicesResult); + DescribeServicesResult describeServicesResult = + new DescribeServicesResult().withServices(services); + when(ecs.describeServices(any(DescribeServicesRequest.class))) + .thenReturn(describeServicesResult); - when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); + when(ecs.listClusters(any(ListClustersRequest.class))) + .thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); - //When + // When List returnedServices = agent.getItems(ecs, providerCache); - //Then - assertTrue("Expected the list to contain 2 ECS services, but got " + returnedServices.size(), returnedServices.size() == 2); + // Then + assertTrue( + returnedServices.size() == 2, + "Expected the list to contain 2 ECS services, but got " + returnedServices.size()); for (Service service : returnedServices) { - assertTrue("Expected the service to be in " + services + " list but it was not. The service is: " + service, services.contains(service)); + assertTrue( + services.contains(service), + "Expected the service to be in " + + services + + " list but it was not. The service is: " + + service); } } @Test public void shouldGenerateFreshData() { - //Given + // Given List serviceNames = new LinkedList<>(); serviceNames.add(SERVICE_NAME_1); serviceNames.add(SERVICE_NAME_2); @@ -90,29 +101,65 @@ public void shouldGenerateFreshData() { for (int x = 0; x < serviceArns.size(); x++) { keys.add(Keys.getServiceKey(ACCOUNT, REGION, serviceNames.get(x))); - services.add(new Service().withClusterArn(CLUSTER_ARN_1) - .withServiceArn(serviceArns.get(x)) - .withServiceName(serviceNames.get(x)) - .withTaskDefinition(TASK_DEFINITION_ARN_1) - .withRoleArn(ROLE_ARN) - .withDeploymentConfiguration(new DeploymentConfiguration().withMinimumHealthyPercent(50).withMaximumPercent(100)) - .withLoadBalancers(Collections.emptyList()) - .withDesiredCount(1) - .withCreatedAt(new Date())); + services.add( + new Service() + .withClusterArn(CLUSTER_ARN_1) + .withServiceArn(serviceArns.get(x)) + .withServiceName(serviceNames.get(x)) + .withTaskDefinition(TASK_DEFINITION_ARN_1) + .withRoleArn(ROLE_ARN) + .withDeploymentConfiguration( + new DeploymentConfiguration() + .withMinimumHealthyPercent(50) + .withMaximumPercent(100)) + .withLoadBalancers(Collections.emptyList()) + .withDesiredCount(1) + .withCreatedAt(new Date())); } - //When + // When Map> dataMap = agent.generateFreshData(services); - //Then - assertTrue("Expected the data map to contain 2 namespaces, but it contains " + dataMap.keySet().size() + " namespaces.", dataMap.keySet().size() == 2); - assertTrue("Expected the data map to contain " + SERVICES.toString() + " namespace, but it contains " + dataMap.keySet() + " namespaces.", dataMap.containsKey(SERVICES.toString())); - assertTrue("Expected the data map to contain " + ECS_CLUSTERS.toString() + " namespace, but it contains " + dataMap.keySet() + " namespaces.", dataMap.containsKey(ECS_CLUSTERS.toString())); - assertTrue("Expected there to be 2 CacheData, instead there is "+ dataMap.get(SERVICES.toString()).size(), dataMap.get(SERVICES.toString()).size() == 2); + // Then + assertTrue( + dataMap.keySet().size() == 2, + "Expected the data map to contain 2 namespaces, but it contains " + + dataMap.keySet().size() + + " namespaces."); + assertTrue( + dataMap.containsKey(SERVICES.toString()), + "Expected the data map to contain " + + SERVICES.toString() + + " namespace, but it contains " + + dataMap.keySet() + + " namespaces."); + assertTrue( + dataMap.containsKey(ECS_CLUSTERS.toString()), + "Expected the data map to contain " + + ECS_CLUSTERS.toString() + + " namespace, but it contains " + + dataMap.keySet() + + " namespaces."); + assertTrue( + dataMap.get(SERVICES.toString()).size() == 2, + "Expected there to be 2 CacheData, instead there is " + + dataMap.get(SERVICES.toString()).size()); for (CacheData cacheData : dataMap.get(SERVICES.toString())) { - assertTrue("Expected the key to be one of the following keys: " + keys.toString() + ". The key is: " + cacheData.getId() +".", keys.contains(cacheData.getId())); - assertTrue("Expected the service ARN to be one of the following ARNs: " + serviceArns.toString() + ". The service ARN is: " + cacheData.getAttributes().get("serviceArn") +".", serviceArns.contains(cacheData.getAttributes().get("serviceArn"))); + assertTrue( + keys.contains(cacheData.getId()), + "Expected the key to be one of the following keys: " + + keys.toString() + + ". The key is: " + + cacheData.getId() + + "."); + assertTrue( + serviceArns.contains(cacheData.getAttributes().get("serviceArn")), + "Expected the service ARN to be one of the following ARNs: " + + serviceArns.toString() + + ". The service ARN is: " + + cacheData.getAttributes().get("serviceArn") + + "."); } } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCacheTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCacheTest.java index 5ded542f3cf..c3b15787dc8 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCacheTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCacheTest.java @@ -16,6 +16,11 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.DescribeTasksRequest; import com.amazonaws.services.ecs.model.DescribeTasksResult; import com.amazonaws.services.ecs.model.ListClustersRequest; @@ -28,80 +33,111 @@ import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskCacheClient; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; import java.util.Collections; import java.util.Date; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class TaskCacheTest extends CommonCachingAgent { private final ObjectMapper mapper = new ObjectMapper(); + @Subject - private final TaskCachingAgent agent = new TaskCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); - @Subject - private final TaskCacheClient client = new TaskCacheClient(providerCache, mapper); + private final TaskCachingAgent agent = + new TaskCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); + + @Subject private final TaskCacheClient client = new TaskCacheClient(providerCache, mapper); @Test public void shouldRetrieveFromWrittenCache() { - //Given + // Given String key = Keys.getTaskKey(ACCOUNT, REGION, TASK_ID_1); Task task = new Task(); task.setTaskArn(TASK_ARN_1); task.setClusterArn(CLUSTER_ARN_1); task.setContainerInstanceArn(CONTAINER_INSTANCE_ARN_1); - task.setGroup("group"+SERVICE_NAME_1); + task.setGroup("group" + SERVICE_NAME_1); task.setContainers(Collections.emptyList()); task.setLastStatus(STATUS); task.setDesiredStatus(STATUS); task.setStartedAt(new Date()); + task.setAvailabilityZone(REGION + "a"); - when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); - when(ecs.listTasks(any(ListTasksRequest.class))).thenReturn(new ListTasksResult().withTaskArns(TASK_ARN_1)); - when(ecs.describeTasks(any(DescribeTasksRequest.class))).thenReturn(new DescribeTasksResult().withTasks(task)); + when(ecs.listClusters(any(ListClustersRequest.class))) + .thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); + when(ecs.listTasks(any(ListTasksRequest.class))) + .thenReturn(new ListTasksResult().withTaskArns(TASK_ARN_1)); + when(ecs.describeTasks(any(DescribeTasksRequest.class))) + .thenReturn(new DescribeTasksResult().withTasks(task)); - //When + // When CacheResult cacheResult = agent.loadData(providerCache); - when(providerCache.get(TASKS.toString(), key)).thenReturn(cacheResult.getCacheResults().get(TASKS.toString()).iterator().next()); + when(providerCache.get(TASKS.toString(), key)) + .thenReturn(cacheResult.getCacheResults().get(TASKS.toString()).iterator().next()); - //Then - Collection cacheData = cacheResult.getCacheResults().get(Keys.Namespace.TASKS.toString()); + // Then + Collection cacheData = + cacheResult.getCacheResults().get(Keys.Namespace.TASKS.toString()); com.netflix.spinnaker.clouddriver.ecs.cache.model.Task ecsTask = client.get(key); - assertTrue("Expected CacheData to be returned but null is returned", cacheData != null); - assertTrue("Expected 1 CacheData but returned " + cacheData.size(), cacheData.size() == 1); + assertTrue(cacheData != null, "Expected CacheData to be returned but null is returned"); + assertTrue(cacheData.size() == 1, "Expected 1 CacheData but returned " + cacheData.size()); String retrievedKey = cacheData.iterator().next().getId(); - assertTrue("Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey, retrievedKey.equals(key)); - - assertTrue("Expected the cluster ARN to be " + CLUSTER_ARN_1 + " but got " + ecsTask.getClusterArn(), - CLUSTER_ARN_1.equals(ecsTask.getClusterArn())); - - assertTrue("Expected the task ARN to be " + TASK_ARN_1 + " but got " + ecsTask.getTaskArn(), - TASK_ARN_1.equals(ecsTask.getTaskArn())); - - assertTrue("Expected the container instance ARN name to be " + task.getContainerInstanceArn() + " but got " + ecsTask.getContainerInstanceArn(), - task.getContainerInstanceArn().equals(ecsTask.getContainerInstanceArn())); - - assertTrue("Expected the group to be " + task.getGroup() + " but got " + ecsTask.getGroup(), - task.getGroup().equals(ecsTask.getGroup())); - - assertTrue("Expected the last status to be " + task.getLastStatus() + " but got " + ecsTask.getLastStatus(), - task.getLastStatus().equals(ecsTask.getLastStatus())); - - assertTrue("Expected the desired status to be " + task.getDesiredStatus() + " but got " + ecsTask.getDesiredStatus(), - task.getDesiredStatus().equals(ecsTask.getDesiredStatus())); - - assertTrue("Expected the started at to be " + task.getStartedAt().getTime() + " but got " + ecsTask.getStartedAt(), - task.getStartedAt().getTime() == ecsTask.getStartedAt()); - - assertTrue("Expected the task to have 0 containers but got " + task.getContainers().size(), - task.getContainers().size() == 0); + assertTrue( + cacheData.size() == 1, + "Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey); + + assertTrue( + CLUSTER_ARN_1.equals(ecsTask.getClusterArn()), + "Expected the cluster ARN to be " + CLUSTER_ARN_1 + " but got " + ecsTask.getClusterArn()); + + assertTrue( + TASK_ARN_1.equals(ecsTask.getTaskArn()), + "Expected the task ARN to be " + TASK_ARN_1 + " but got " + ecsTask.getTaskArn()); + + assertTrue( + task.getContainerInstanceArn().equals(ecsTask.getContainerInstanceArn()), + "Expected the container instance ARN name to be " + + task.getContainerInstanceArn() + + " but got " + + ecsTask.getContainerInstanceArn()); + + assertTrue( + task.getGroup().equals(ecsTask.getGroup()), + "Expected the group to be " + task.getGroup() + " but got " + ecsTask.getGroup()); + + assertTrue( + task.getLastStatus().equals(ecsTask.getLastStatus()), + "Expected the last status to be " + + task.getLastStatus() + + " but got " + + ecsTask.getLastStatus()); + + assertTrue( + task.getDesiredStatus().equals(ecsTask.getDesiredStatus()), + "Expected the desired status to be " + + task.getDesiredStatus() + + " but got " + + ecsTask.getDesiredStatus()); + + assertTrue( + task.getStartedAt().getTime() == ecsTask.getStartedAt(), + "Expected the started at to be " + + task.getStartedAt().getTime() + + " but got " + + ecsTask.getStartedAt()); + + assertTrue( + task.getAvailabilityZone() == ecsTask.getAvailabilityZone(), + "Expected the availability zone to be " + + task.getAvailabilityZone() + + " but got " + + ecsTask.getAvailabilityZone()); + + assertTrue( + task.getContainers().size() == 0, + "Expected the task to have 0 containers but got " + task.getContainers().size()); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCachingAgentTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCachingAgentTest.java index bdd53ffde09..cc64a8cd4e7 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCachingAgentTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskCachingAgentTest.java @@ -16,6 +16,12 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.DescribeTasksRequest; import com.amazonaws.services.ecs.model.DescribeTasksResult; import com.amazonaws.services.ecs.model.ListClustersRequest; @@ -25,9 +31,6 @@ import com.amazonaws.services.ecs.model.Task; import com.netflix.spinnaker.cats.cache.CacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import org.junit.Test; -import spock.lang.Subject; - import java.util.Collection; import java.util.Collections; import java.util.Date; @@ -36,21 +39,18 @@ import java.util.List; import java.util.Map; import java.util.Set; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.ECS_CLUSTERS; -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASKS; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - +import org.junit.jupiter.api.Test; +import spock.lang.Subject; public class TaskCachingAgentTest extends CommonCachingAgent { @Subject - private final TaskCachingAgent agent = new TaskCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); + private final TaskCachingAgent agent = + new TaskCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry); @Test public void shouldGetListOfTasks() { - //Given + // Given ListTasksResult listTasksResult = new ListTasksResult().withTaskArns(TASK_ARN_1, TASK_ARN_2); when(ecs.listTasks(any(ListTasksRequest.class))).thenReturn(listTasksResult); @@ -61,21 +61,29 @@ public void shouldGetListOfTasks() { DescribeTasksResult describeResult = new DescribeTasksResult().withTasks(tasks); when(ecs.describeTasks(any(DescribeTasksRequest.class))).thenReturn(describeResult); - when(ecs.listClusters(any(ListClustersRequest.class))).thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); + when(ecs.listClusters(any(ListClustersRequest.class))) + .thenReturn(new ListClustersResult().withClusterArns(CLUSTER_ARN_1)); - //When + // When List returnedTasks = agent.getItems(ecs, providerCache); - //Then - assertTrue("Expected the list to contain " + tasks.size() + " ECS tasks, but got " + returnedTasks.size(), returnedTasks.size() == tasks.size()); + // Then + assertTrue( + returnedTasks.size() == tasks.size(), + "Expected the list to contain " + + tasks.size() + + " ECS tasks, but got " + + returnedTasks.size()); for (Task task : returnedTasks) { - assertTrue("Expected the task to be in " + tasks + " list but it was not. The task is: " + task, tasks.contains(task)); + assertTrue( + tasks.contains(task), + "Expected the task to be in " + tasks + " list but it was not. The task is: " + task); } } @Test public void shouldGenerateFreshData() { - //Given + // Given List taskIDs = new LinkedList<>(); taskIDs.add(TASK_ID_1); taskIDs.add(TASK_ID_1); @@ -89,28 +97,61 @@ public void shouldGenerateFreshData() { for (int x = 0; x < taskArns.size(); x++) { keys.add(Keys.getTaskKey(ACCOUNT, REGION, taskIDs.get(x))); - tasks.add(new Task().withClusterArn(CLUSTER_ARN_1) - .withTaskArn(taskArns.get(x)) - .withContainerInstanceArn(CONTAINER_INSTANCE_ARN_1) - .withGroup("group:"+SERVICE_NAME_1) - .withContainers(Collections.emptyList()) - .withLastStatus(STATUS) - .withDesiredStatus(STATUS) - .withStartedAt(new Date())); + tasks.add( + new Task() + .withClusterArn(CLUSTER_ARN_1) + .withTaskArn(taskArns.get(x)) + .withContainerInstanceArn(CONTAINER_INSTANCE_ARN_1) + .withGroup("group:" + SERVICE_NAME_1) + .withContainers(Collections.emptyList()) + .withLastStatus(STATUS) + .withDesiredStatus(STATUS) + .withStartedAt(new Date())); } - //When + // When Map> dataMap = agent.generateFreshData(tasks); - //Then - assertTrue("Expected the data map to contain 2 namespaces, but it contains " + dataMap.keySet().size() + " namespaces.", dataMap.keySet().size() == 2); - assertTrue("Expected the data map to contain " + TASKS.toString() + " namespace, but it contains " + dataMap.keySet() + " namespaces.", dataMap.containsKey(TASKS.toString())); - assertTrue("Expected the data map to contain " + ECS_CLUSTERS.toString() + " namespace, but it contains " + dataMap.keySet() + " namespaces.", dataMap.containsKey(ECS_CLUSTERS.toString())); - assertTrue("Expected there to be 2 CacheData, instead there is " + dataMap.get(TASKS.toString()).size(), dataMap.get(TASKS.toString()).size() == 2); + // Then + assertTrue( + dataMap.keySet().size() == 2, + "Expected the data map to contain 2 namespaces, but it contains " + + dataMap.keySet().size() + + " namespaces."); + assertTrue( + dataMap.containsKey(TASKS.toString()), + "Expected the data map to contain " + + TASKS.toString() + + " namespace, but it contains " + + dataMap.keySet() + + " namespaces."); + assertTrue( + dataMap.containsKey(ECS_CLUSTERS.toString()), + "Expected the data map to contain " + + ECS_CLUSTERS.toString() + + " namespace, but it contains " + + dataMap.keySet() + + " namespaces."); + assertTrue( + dataMap.get(TASKS.toString()).size() == 2, + "Expected there to be 2 CacheData, instead there is " + + dataMap.get(TASKS.toString()).size()); for (CacheData cacheData : dataMap.get(TASKS.toString())) { - assertTrue("Expected the key to be one of the following keys: " + keys.toString() + ". The key is: " + cacheData.getId() + ".", keys.contains(cacheData.getId())); - assertTrue("Expected the task ARN to be one of the following ARNs: " + taskArns.toString() + ". The task ARN is: " + cacheData.getAttributes().get("taskArn") + ".", taskArns.contains(cacheData.getAttributes().get("taskArn"))); + assertTrue( + keys.contains(cacheData.getId()), + "Expected the key to be one of the following keys: " + + keys.toString() + + ". The key is: " + + cacheData.getId() + + "."); + assertTrue( + taskArns.contains(cacheData.getAttributes().get("taskArn")), + "Expected the task ARN to be one of the following ARNs: " + + taskArns.toString() + + ". The task ARN is: " + + cacheData.getAttributes().get("taskArn") + + "."); } } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCacheTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCacheTest.java index 2cf6077150d..1640f18d883 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCacheTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCacheTest.java @@ -16,61 +16,89 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; -import com.amazonaws.services.ecs.model.DescribeTaskDefinitionRequest; -import com.amazonaws.services.ecs.model.DescribeTaskDefinitionResult; -import com.amazonaws.services.ecs.model.ListTaskDefinitionsRequest; -import com.amazonaws.services.ecs.model.ListTaskDefinitionsResult; -import com.amazonaws.services.ecs.model.TaskDefinition; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.ecs.model.*; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.agent.CacheResult; import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; import com.netflix.spinnaker.clouddriver.ecs.cache.client.TaskDefinitionCacheClient; -import org.junit.Assert; -import org.junit.Test; +import java.util.*; +import org.junit.jupiter.api.Test; import spock.lang.Subject; -import java.util.Collection; -import java.util.Collections; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - - public class TaskDefinitionCacheTest extends CommonCachingAgent { private final ObjectMapper mapper = new ObjectMapper(); + @Subject - private final TaskDefinitionCachingAgent agent = new TaskDefinitionCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry, mapper); + private final TaskDefinitionCachingAgent agent = + new TaskDefinitionCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry, mapper); + @Subject - private final TaskDefinitionCacheClient client = new TaskDefinitionCacheClient(providerCache, mapper); + private final TaskDefinitionCacheClient client = + new TaskDefinitionCacheClient(providerCache, mapper); @Test public void shouldRetrieveFromWrittenCache() { - //Given + // Given String key = Keys.getTaskDefinitionKey(ACCOUNT, REGION, TASK_DEFINITION_ARN_1); TaskDefinition taskDefinition = new TaskDefinition(); taskDefinition.setTaskDefinitionArn(TASK_DEFINITION_ARN_1); taskDefinition.setContainerDefinitions(Collections.emptyList()); - when(ecs.listTaskDefinitions(any(ListTaskDefinitionsRequest.class))).thenReturn(new ListTaskDefinitionsResult().withTaskDefinitionArns(TASK_DEFINITION_ARN_1)); - when(ecs.describeTaskDefinition(any(DescribeTaskDefinitionRequest.class))).thenReturn(new DescribeTaskDefinitionResult().withTaskDefinition(taskDefinition)); + Map serviceAttr = new HashMap<>(); + serviceAttr.put("taskDefinition", TASK_DEFINITION_ARN_1); + serviceAttr.put("desiredCount", 1); + serviceAttr.put("serviceName", SERVICE_NAME_1); + serviceAttr.put("maximumPercent", 200); + serviceAttr.put("minimumHealthyPercent", 50); + serviceAttr.put("createdAt", 8976543L); + + DefaultCacheData serviceCache = + new DefaultCacheData("test-service", serviceAttr, Collections.emptyMap()); + + when(ecs.describeTaskDefinition(any(DescribeTaskDefinitionRequest.class))) + .thenReturn(new DescribeTaskDefinitionResult().withTaskDefinition(taskDefinition)); + when(providerCache.filterIdentifiers( + SERVICES.toString(), "ecs;services;test-account;us-west-2;*")) + .thenReturn(Collections.singletonList("test-service")); + when(providerCache.getAll(anyString(), any(Set.class))) + .thenReturn(Collections.singletonList(serviceCache)); - //When + // When CacheResult cacheResult = agent.loadData(providerCache); - when(providerCache.get(TASK_DEFINITIONS.toString(), key)).thenReturn(cacheResult.getCacheResults().get(TASK_DEFINITIONS.toString()).iterator().next()); + when(providerCache.get(TASK_DEFINITIONS.toString(), key)) + .thenReturn( + cacheResult.getCacheResults().get(TASK_DEFINITIONS.toString()).iterator().next()); TaskDefinition retrievedTaskDefinition = client.get(key); - //Then - Collection cacheData = cacheResult.getCacheResults().get(TASK_DEFINITIONS.toString()); - assertTrue("Expected CacheData to be returned but null is returned", cacheData != null); - assertTrue("Expected 1 CacheData but returned " + cacheData.size(), cacheData.size() == 1); + // Then + Collection cacheData = + cacheResult.getCacheResults().get(TASK_DEFINITIONS.toString()); + assertNotNull(cacheData, "Expected CacheData to be returned but null is returned"); + assertEquals(1, cacheData.size(), "Expected 1 CacheData but returned " + cacheData.size()); String retrievedKey = cacheData.iterator().next().getId(); - assertTrue("Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey, retrievedKey.equals(key)); + assertEquals( + retrievedKey, + key, + "Expected CacheData with ID " + key + " but retrieved ID " + retrievedKey); - Assert.assertTrue("Expected the task definition to be " + taskDefinition + " but got " + retrievedTaskDefinition, - taskDefinition.equals(retrievedTaskDefinition)); + assertEquals( + taskDefinition, + retrievedTaskDefinition, + "Expected the task definition to be " + + taskDefinition + + " but got " + + retrievedTaskDefinition); } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCachingAgentTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCachingAgentTest.java index c59e52a67a8..9d742507ed2 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCachingAgentTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TaskDefinitionCachingAgentTest.java @@ -16,59 +16,128 @@ package com.netflix.spinnaker.clouddriver.ecs.provider.agent; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.SERVICES; +import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.when; + import com.amazonaws.services.ecs.model.DescribeTaskDefinitionRequest; import com.amazonaws.services.ecs.model.DescribeTaskDefinitionResult; -import com.amazonaws.services.ecs.model.ListTaskDefinitionsRequest; -import com.amazonaws.services.ecs.model.ListTaskDefinitionsResult; import com.amazonaws.services.ecs.model.TaskDefinition; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; import com.netflix.spinnaker.clouddriver.ecs.cache.Keys; -import org.junit.Test; +import java.util.*; +import org.junit.jupiter.api.Test; import spock.lang.Subject; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static com.netflix.spinnaker.clouddriver.ecs.cache.Keys.Namespace.TASK_DEFINITIONS; -import static junit.framework.TestCase.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - - public class TaskDefinitionCachingAgentTest extends CommonCachingAgent { ObjectMapper mapper = new ObjectMapper(); @Subject - private final TaskDefinitionCachingAgent agent = new TaskDefinitionCachingAgent(netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry, mapper); + private final TaskDefinitionCachingAgent agent = + new TaskDefinitionCachingAgent( + netflixAmazonCredentials, REGION, clientProvider, credentialsProvider, registry, mapper); @Test public void shouldGetListOfTaskDefinitions() { - //Given - ListTaskDefinitionsResult listTaskDefinitionsResult = new ListTaskDefinitionsResult().withTaskDefinitionArns(TASK_DEFINITION_ARN_1); - when(ecs.listTaskDefinitions(any(ListTaskDefinitionsRequest.class))).thenReturn(listTaskDefinitionsResult); + // Given + Map serviceAttr = new HashMap<>(); + serviceAttr.put("taskDefinition", TASK_DEFINITION_ARN_1); + serviceAttr.put("desiredCount", 1); + serviceAttr.put("serviceName", SERVICE_NAME_1); + serviceAttr.put("maximumPercent", 200); + serviceAttr.put("minimumHealthyPercent", 50); + serviceAttr.put("createdAt", 8976543L); + + DefaultCacheData serviceCache = + new DefaultCacheData("test-service", serviceAttr, Collections.emptyMap()); + when(providerCache.filterIdentifiers( + SERVICES.toString(), "ecs;services;test-account;us-west-2;*")) + .thenReturn(Collections.singletonList("test-service")); + when(providerCache.getAll(anyString(), any(Set.class))) + .thenReturn(Collections.singletonList(serviceCache)); + + DescribeTaskDefinitionResult describeTaskDefinitionResult = + new DescribeTaskDefinitionResult() + .withTaskDefinition(new TaskDefinition().withTaskDefinitionArn(TASK_DEFINITION_ARN_1)); + when(ecs.describeTaskDefinition(any(DescribeTaskDefinitionRequest.class))) + .thenReturn(describeTaskDefinitionResult); + + // When + List returnedTaskDefs = agent.getItems(ecs, providerCache); - DescribeTaskDefinitionResult describeTaskDefinitionResult = new DescribeTaskDefinitionResult().withTaskDefinition(new TaskDefinition().withTaskDefinitionArn(TASK_DEFINITION_ARN_1)); - when(ecs.describeTaskDefinition(any(DescribeTaskDefinitionRequest.class))).thenReturn(describeTaskDefinitionResult); + // Then + assertEquals( + 1, + returnedTaskDefs.size(), + "Expected the list to contain 1 ECS task definition, but got " + returnedTaskDefs.size()); + for (TaskDefinition taskDef : returnedTaskDefs) { + assertEquals( + taskDef.getTaskDefinitionArn(), + TASK_DEFINITION_ARN_1, + "Expected the task definition ARN to be " + + TASK_DEFINITION_ARN_1 + + " but it was: " + + taskDef.getTaskDefinitionArn()); + } + } - //When + @Test + public void shouldRetainCachedTaskDefinitions() { + // Given + Map serviceAttr = new HashMap<>(); + serviceAttr.put("taskDefinition", TASK_DEFINITION_ARN_1); + serviceAttr.put("desiredCount", 1); + serviceAttr.put("serviceName", SERVICE_NAME_1); + serviceAttr.put("maximumPercent", 200); + serviceAttr.put("minimumHealthyPercent", 50); + serviceAttr.put("createdAt", 8976543L); + + DefaultCacheData serviceCache = + new DefaultCacheData("test-service", serviceAttr, Collections.emptyMap()); + when(providerCache.filterIdentifiers( + SERVICES.toString(), "ecs;services;test-account;us-west-2;*")) + .thenReturn(Collections.singletonList("test-service")); + when(providerCache.getAll(anyString(), any(Set.class))) + .thenReturn(Collections.singletonList(serviceCache)); + + Map taskDefAttr = new HashMap<>(); + taskDefAttr.put("taskDefinitionArn", TASK_DEFINITION_ARN_1); + + DefaultCacheData taskDefCache = + new DefaultCacheData(TASK_DEFINITION_ARN_1, taskDefAttr, Collections.emptyMap()); + when(providerCache.get( + TASK_DEFINITIONS.toString(), + "ecs;taskDefinitions;test-account;us-west-2;" + TASK_DEFINITION_ARN_1)) + .thenReturn(taskDefCache); + + // When List returnedTaskDefs = agent.getItems(ecs, providerCache); - //Then - assertTrue("Expected the list to contain 1 ECS task definition, but got " + returnedTaskDefs.size(), returnedTaskDefs.size() == 1); + // Then + assertEquals( + 1, + returnedTaskDefs.size(), + "Expected the list to contain 1 ECS task definition, but got " + returnedTaskDefs.size()); for (TaskDefinition taskDef : returnedTaskDefs) { - assertTrue("Expected the task definition to be in " + taskDef + " list but it was not. The task definition is: " + taskDef, returnedTaskDefs.contains(taskDef)); + assertEquals( + taskDef.getTaskDefinitionArn(), + TASK_DEFINITION_ARN_1, + "Expected the task definition ARN to be " + + TASK_DEFINITION_ARN_1 + + " but it was: " + + taskDef.getTaskDefinitionArn()); } } @Test public void shouldGenerateFreshData() { - //Given + // Given List taskDefinitionArns = new LinkedList<>(); taskDefinitionArns.add(TASK_DEFINITION_ARN_1); taskDefinitionArns.add(TASK_DEFINITION_ARN_2); @@ -78,21 +147,48 @@ public void shouldGenerateFreshData() { for (String taskDefArn : taskDefinitionArns) { keys.add(Keys.getTaskDefinitionKey(ACCOUNT, REGION, taskDefArn)); - tasks.add(new TaskDefinition().withTaskDefinitionArn(taskDefArn) - .withContainerDefinitions(Collections.emptyList())); + tasks.add( + new TaskDefinition() + .withTaskDefinitionArn(taskDefArn) + .withContainerDefinitions(Collections.emptyList())); } - //When + // When Map> dataMap = agent.generateFreshData(tasks); - //Then - assertTrue("Expected the data map to contain 1 namespaces, but it contains " + dataMap.keySet().size() + " namespaces.", dataMap.keySet().size() == 1); - assertTrue("Expected the data map to contain " + TASK_DEFINITIONS.toString() + " namespace, but it contains " + dataMap.keySet() + " namespaces.", dataMap.containsKey(TASK_DEFINITIONS.toString())); - assertTrue("Expected there to be 2 CacheData, instead there is " + dataMap.get(TASK_DEFINITIONS.toString()).size(), dataMap.get(TASK_DEFINITIONS.toString()).size() == 2); + // Then + assertTrue( + dataMap.keySet().size() == 1, + "Expected the data map to contain 1 namespaces, but it contains " + + dataMap.keySet().size() + + " namespaces."); + assertTrue( + dataMap.containsKey(TASK_DEFINITIONS.toString()), + "Expected the data map to contain " + + TASK_DEFINITIONS.toString() + + " namespace, but it contains " + + dataMap.keySet() + + " namespaces."); + assertTrue( + dataMap.get(TASK_DEFINITIONS.toString()).size() == 2, + "Expected there to be 2 CacheData, instead there is " + + dataMap.get(TASK_DEFINITIONS.toString()).size()); for (CacheData cacheData : dataMap.get(TASK_DEFINITIONS.toString())) { - assertTrue("Expected the key to be one of the following keys: " + keys.toString() + ". The key is: " + cacheData.getId() + ".", keys.contains(cacheData.getId())); - assertTrue("Expected the task definition ARN to be one of the following ARNs: " + taskDefinitionArns.toString() + ". The task definition ARN is: " + cacheData.getAttributes().get("taskDefinitionArn") + ".", taskDefinitionArns.contains(cacheData.getAttributes().get("taskDefinitionArn"))); + assertTrue( + keys.contains(cacheData.getId()), + "Expected the key to be one of the following keys: " + + keys.toString() + + ". The key is: " + + cacheData.getId() + + "."); + assertTrue( + taskDefinitionArns.contains(cacheData.getAttributes().get("taskDefinitionArn")), + "Expected the task definition ARN to be one of the following ARNs: " + + taskDefinitionArns.toString() + + ". The task definition ARN is: " + + cacheData.getAttributes().get("taskDefinitionArn") + + "."); } } } diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TestServiceCachingAgentFactory.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TestServiceCachingAgentFactory.java new file mode 100644 index 00000000000..f6259ef184b --- /dev/null +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/provider/agent/TestServiceCachingAgentFactory.java @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Expedia, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.ecs.provider.agent; + +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.ecs.names.EcsDefaultNamer; + +public class TestServiceCachingAgentFactory { + public static ServiceCachingAgent create(NetflixAmazonCredentials creds, String region) { + return new ServiceCachingAgent( + creds, region, null, null, new NoopRegistry(), new EcsDefaultNamer()); + } +} diff --git a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsAccountBuilderTest.java b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsAccountBuilderTest.java index 0bd8330f248..09c735b3cc7 100644 --- a/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsAccountBuilderTest.java +++ b/clouddriver-ecs/src/test/java/com/netflix/spinnaker/clouddriver/ecs/security/EcsAccountBuilderTest.java @@ -16,22 +16,20 @@ package com.netflix.spinnaker.clouddriver.ecs.security; -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; -import com.netflix.spinnaker.clouddriver.aws.security.config.CredentialsConfig; -import com.netflix.spinnaker.fiat.model.resources.Permissions; -import org.junit.Test; - -import java.util.Collections; - -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.config.AccountsConfiguration.Account; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import org.junit.jupiter.api.Test; + public class EcsAccountBuilderTest { @Test public void shouldBuildAccount() { - //Given + // Given String accountName = "ecs-test-account"; String accountType = "ecs"; @@ -39,17 +37,28 @@ public void shouldBuildAccount() { when(netflixAmazonCredentials.getPermissions()).thenReturn(mock(Permissions.class)); when(netflixAmazonCredentials.getAccountId()).thenReturn("id-1234567890"); - //When - CredentialsConfig.Account account = EcsAccountBuilder.build(netflixAmazonCredentials, accountName, accountType); + // When + Account account = EcsAccountBuilder.build(netflixAmazonCredentials, accountName, accountType); - //Then - assertTrue("The new account should not be of the same type as the old account (" + netflixAmazonCredentials.getAccountType() + ").", - !account.getAccountType().equals(netflixAmazonCredentials.getAccountType())); + // Then + assertTrue( + !account.getAccountType().equals(netflixAmazonCredentials.getAccountType()), + "The new account should not be of the same type as the old account (" + + netflixAmazonCredentials.getAccountType() + + ")."); - assertTrue("The new account should not have the same name as the old account (" + netflixAmazonCredentials.getName() + ").", - !account.getName().equals(netflixAmazonCredentials.getName())); + assertTrue( + !account.getName().equals(netflixAmazonCredentials.getName()), + "The new account should not have the same name as the old account (" + + netflixAmazonCredentials.getName() + + ")."); - assertTrue("The new account should have the same account ID as the old one (" + netflixAmazonCredentials.getAccountId() + - ") but has " + account.getAccountId() + " as the ID.", account.getAccountId().equals(netflixAmazonCredentials.getAccountId())); + assertTrue( + account.getAccountId().equals(netflixAmazonCredentials.getAccountId()), + "The new account should have the same account ID as the old one (" + + netflixAmazonCredentials.getAccountId() + + ") but has " + + account.getAccountId() + + " as the ID."); } } diff --git a/clouddriver-elasticsearch-aws/clouddriver-elasticsearch-aws.gradle b/clouddriver-elasticsearch-aws/clouddriver-elasticsearch-aws.gradle deleted file mode 100644 index 8ea5572c026..00000000000 --- a/clouddriver-elasticsearch-aws/clouddriver-elasticsearch-aws.gradle +++ /dev/null @@ -1,10 +0,0 @@ -apply from: "$rootDir/gradle/kotlin.gradle" - -repositories { - jcenter() -} - -dependencies { - compile project(":clouddriver-aws") - compile project(":clouddriver-elasticsearch") -} diff --git a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/ElasticSearchClient.kt b/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/ElasticSearchClient.kt deleted file mode 100644 index aeb55456a42..00000000000 --- a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/ElasticSearchClient.kt +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.elasticsearch - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.elasticsearch.model.ElasticSearchException -import com.netflix.spinnaker.clouddriver.elasticsearch.model.Model -import com.netflix.spinnaker.clouddriver.elasticsearch.model.ModelType -import io.searchbox.client.JestClient -import io.searchbox.core.Bulk -import io.searchbox.core.BulkResult -import io.searchbox.core.Index -import io.searchbox.indices.CreateIndex -import io.searchbox.indices.DeleteIndex -import io.searchbox.indices.aliases.AddAliasMapping -import io.searchbox.indices.aliases.GetAliases -import io.searchbox.indices.aliases.ModifyAliases -import java.io.IOException - -class ElasticSearchClient(private val objectMapper : ObjectMapper, private val jestClient: JestClient) { - fun getPreviousIndexes(prefix: String): Set { - try { - val result = jestClient.execute(GetAliases.Builder().build()) - val r = objectMapper.readValue(result.jsonString, Map::class.java) as Map - return r.keys.filter { k -> k.startsWith(prefix) }.toSet() - } catch (e: IOException) { - throw ElasticSearchException("Unable to fetch previous indexes (prefix: $prefix)", e) - } - } - - fun createIndex(prefix: String): String { - val newIndexName = "${prefix}_${System.currentTimeMillis()}" - - try { - jestClient.execute(CreateIndex.Builder(newIndexName).build()) - return newIndexName - } catch (e: IOException) { - throw ElasticSearchException("Unable to create index (index: $newIndexName)", e) - } - } - - fun createAlias(index: String, alias: String) { - try { - jestClient.execute( - ModifyAliases.Builder( - AddAliasMapping.Builder(index, alias).build() - ).build() - ) - } catch (e: IOException) { - throw ElasticSearchException("Unable to create alias (index: $index, alias: $alias)", e) - } - } - - fun deleteIndex(index: String) { - try { - jestClient.execute( - DeleteIndex.Builder(index).build() - ) - } catch (e: IOException) { - throw ElasticSearchException("Unable to delete index (index: $index)", e) - } - } - - fun store(index: String, type: ModelType, partition: List) { - var builder: Bulk.Builder = Bulk.Builder().defaultIndex(index) - - for (serverGroupModel in partition) { - builder = builder.addAction( - Index.Builder(objectMapper.convertValue(serverGroupModel, Map::class.java)) - .index(index) - .type(type.toString()) - .id(serverGroupModel.id) - .build() - ) - } - - val bulk = builder.build() - try { - val jestResult = jestClient.execute(bulk) - if (!jestResult.isSucceeded) { - throw ElasticSearchException( - java.lang.String.format("Failed to index server groups, reason: '%s'", jestResult.getErrorMessage()) - ) - } - } catch (e: IOException) { - throw ElasticSearchException( - java.lang.String.format("Failed to index server groups, reason: '%s'", e.message) - ) - } - } -} diff --git a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonCachingAgentProvider.kt b/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonCachingAgentProvider.kt deleted file mode 100644 index b528ec07945..00000000000 --- a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonCachingAgentProvider.kt +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.elasticsearch.aws - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentProvider -import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.elasticsearch.ElasticSearchClient -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.kork.core.RetrySupport -import io.searchbox.client.JestClient - -open class ElasticSearchAmazonCachingAgentProvider( - private val objectMapper: ObjectMapper, - private val jestClient: JestClient, - private val retrySupport: RetrySupport, - private val registry: Registry, - private val amazonClientProvider: AmazonClientProvider, - private val accountCredentialsProvider: AccountCredentialsProvider -) : AgentProvider { - - override fun supports(providerName: String): Boolean { - return providerName.equals(AwsProvider.PROVIDER_NAME, ignoreCase = true) - } - - override fun agents(): Collection { - val credentials = accountCredentialsProvider - .all - .filter { NetflixAmazonCredentials::class.java.isInstance(it) } - .map { c -> c as NetflixAmazonCredentials } - - val elasticSearchClient = ElasticSearchClient( - objectMapper.copy().enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS), - jestClient - ) - - return listOf( - ElasticSearchAmazonServerGroupCachingAgent( - retrySupport, - registry, - amazonClientProvider, - credentials, - elasticSearchClient - ), - ElasticSearchAmazonInstanceCachingAgent( - retrySupport, - registry, - amazonClientProvider, - credentials, - elasticSearchClient - ) - ) - } -} diff --git a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonInstanceCachingAgent.kt b/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonInstanceCachingAgent.kt deleted file mode 100644 index d248d16e2f0..00000000000 --- a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonInstanceCachingAgent.kt +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.elasticsearch.aws - -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.DescribeInstancesRequest -import com.amazonaws.services.ec2.model.Instance -import com.google.common.collect.Lists -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.RunnableAgent -import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent -import com.netflix.spinnaker.clouddriver.elasticsearch.ElasticSearchClient -import com.netflix.spinnaker.clouddriver.elasticsearch.model.* -import com.netflix.spinnaker.kork.core.RetrySupport -import org.slf4j.LoggerFactory -import java.util.concurrent.TimeUnit -import java.util.concurrent.atomic.AtomicInteger - -class ElasticSearchAmazonInstanceCachingAgent( - val retrySupport: RetrySupport, - val registry: Registry, - val amazonClientProvider: AmazonClientProvider, - val accounts: Collection, - val elasticSearchClient: ElasticSearchClient -) : RunnableAgent, CustomScheduledAgent { - - private val log = LoggerFactory.getLogger(ElasticSearchAmazonInstanceCachingAgent::class.java) - - override fun getPollIntervalMillis(): Long { - return TimeUnit.MINUTES.toMillis(10) - } - - override fun getTimeoutMillis(): Long { - return TimeUnit.MINUTES.toMillis(20) - } - - override fun getAgentType(): String { - return ElasticSearchAmazonInstanceCachingAgent::class.java.simpleName - } - - override fun getProviderName(): String { - return AwsProvider.PROVIDER_NAME - } - - override fun run() { - val prefix = "aws_instances" - var previousIndexes = elasticSearchClient.getPreviousIndexes(prefix) - if (previousIndexes.size > 2) { - log.warn("Found multiple previous indexes: {}", previousIndexes.joinToString(", ")) - - // TODO-AJ revisit this safe guard ... at least emit a metric that can be alerted upon when this goes production - for (previousIndex in previousIndexes) { - elasticSearchClient.deleteIndex(previousIndex) - } - previousIndexes = emptySet() - } - - val index = elasticSearchClient.createIndex(prefix) - - val instanceCount = AtomicInteger(0) - - for (credentials in accounts) { - for (region in credentials.regions) { - val instanceModels = fetchInstanceModels(credentials, region.name) - - instanceCount.addAndGet(instanceModels.size) - - for (partition in Lists.partition(instanceModels, 1000)) { - retrySupport.retry( - { elasticSearchClient.store(index, ModelType.Intance, partition) }, - 10, - 2000, - false - ) - } - } - } - - log.info("Total # of instances: $instanceCount") - - elasticSearchClient.createAlias(index, prefix) - for (previousIndex in previousIndexes) { - elasticSearchClient.deleteIndex(previousIndex) - } - } - - private fun fetchInstanceModels(credentials: NetflixAmazonCredentials, region: String): List { - val amazonEC2 = amazonClientProvider.getAmazonEC2(credentials, region) - - log.debug("Describing All Instances in ${credentials.name}:${region}") - - return fetchAllInstances(amazonEC2).map { instance -> - InstanceModel( - "${credentials.accountId}:${region}:${instance.instanceId}".toLowerCase(), - instance.instanceId, - InstanceTypeModel(instance.instanceType), - LocationModel("availabilityZone", instance.placement.availabilityZone), - AccountModel(credentials.accountId, credentials.name), - listOfNotNull(instance.publicIpAddress, instance.privateIpAddress), - instance.launchTime, - instance.tags.map { TagModel(it.key, it.value) } - ) - } - } - - private fun fetchAllInstances(amazonEC2: AmazonEC2): List { - val instances = mutableListOf() - - var request = DescribeInstancesRequest() - while (true) { - val response = amazonEC2.describeInstances(request) - - instances.addAll(response.reservations.flatMap { it.instances }) - if (response.nextToken != null) { - request = request.withNextToken(response.nextToken) - } else { - break - } - } - - return instances; - } -} diff --git a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonServerGroupCachingAgent.kt b/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonServerGroupCachingAgent.kt deleted file mode 100644 index 2501bbd9707..00000000000 --- a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/aws/ElasticSearchAmazonServerGroupCachingAgent.kt +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.elasticsearch.aws - - -import com.amazonaws.services.autoscaling.AmazonAutoScaling -import com.amazonaws.services.autoscaling.model.AutoScalingGroup -import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest -import com.amazonaws.services.autoscaling.model.DescribeLaunchConfigurationsRequest -import com.amazonaws.services.autoscaling.model.LaunchConfiguration -import com.google.common.collect.Lists -import com.netflix.frigga.Names -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.RunnableAgent -import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials -import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent -import com.netflix.spinnaker.clouddriver.elasticsearch.ElasticSearchClient -import com.netflix.spinnaker.clouddriver.elasticsearch.model.* -import com.netflix.spinnaker.kork.core.RetrySupport -import org.slf4j.LoggerFactory - -import java.util.concurrent.TimeUnit -import java.util.concurrent.atomic.AtomicInteger - -class ElasticSearchAmazonServerGroupCachingAgent( - val retrySupport: RetrySupport, - val registry: Registry, - val amazonClientProvider: AmazonClientProvider, - val accounts: Collection, - val elasticSearchClient: ElasticSearchClient -) : RunnableAgent, CustomScheduledAgent { - - private val log = LoggerFactory.getLogger(ElasticSearchAmazonServerGroupCachingAgent::class.java) - - override fun getPollIntervalMillis(): Long { - return TimeUnit.SECONDS.toMillis(180) - } - - override fun getTimeoutMillis(): Long { - return TimeUnit.MINUTES.toMillis(5) - } - - override fun getAgentType(): String { - return ElasticSearchAmazonServerGroupCachingAgent::class.java.simpleName - } - - override fun getProviderName(): String { - return AwsProvider.PROVIDER_NAME - } - - override fun run() { - val prefix = "aws_server_groups" - var previousIndexes = elasticSearchClient.getPreviousIndexes(prefix) - if (previousIndexes.size > 2) { - log.warn("Found multiple previous indexes: {}", previousIndexes.joinToString(", ")) - - // TODO-AJ revisit this safe guard ... at least emit a metric that can be alerted upon when this goes production - for (previousIndex in previousIndexes) { - elasticSearchClient.deleteIndex(previousIndex) - } - previousIndexes = emptySet() - } - - val index = elasticSearchClient.createIndex(prefix) - val serverGroupCount = AtomicInteger(0) - - for (credentials in accounts) { - for (region in credentials.regions) { - val serverGroupModels = fetchServerGroupModels(credentials, region.name) - - serverGroupCount.addAndGet(serverGroupModels.size) - - for (partition in Lists.partition(serverGroupModels, 1000)) { - retrySupport.retry( - { elasticSearchClient.store(index, ModelType.ServerGroup, partition) }, - 10, - 2000, - false - ) - } - } - } - - log.info("Total # of server groups: $serverGroupCount") - - elasticSearchClient.createAlias(index, prefix) - for (previousIndex in previousIndexes) { - elasticSearchClient.deleteIndex(previousIndex) - } - } - - private fun fetchServerGroupModels(credentials: NetflixAmazonCredentials, region: String): List { - val amazonAutoScaling = amazonClientProvider.getAutoScaling(credentials, region) - - log.debug("Describing All Autoscaling Groups in ${credentials.name}:${region}") - val autoScalingGroups = fetchAllAutoScalingGroups(amazonAutoScaling) - - log.debug("Describing All Launch Configurations in ${credentials.name}:${region}") - val launchConfigurationsByName = fetchAllLaunchConfigurations(amazonAutoScaling) - .map { it.launchConfigurationName.toLowerCase() to it } - .toMap() - - return autoScalingGroups.map { asg -> - val launchConfiguration = launchConfigurationsByName.getOrDefault( - asg.launchConfigurationName?.toLowerCase(), - LaunchConfiguration() - ) - - val blockDeviceType = when { - launchConfiguration.blockDeviceMappings.isEmpty() -> "none" - launchConfiguration.blockDeviceMappings[0].ebs != null -> "ebs" - else -> "ephemeral" - } - - val instanceTypes = listOfNotNull(launchConfiguration.instanceType).map { InstanceTypeModel(it) } - - ServerGroupModel( - "${credentials.accountId}:${region}:${asg.autoScalingGroupName}".toLowerCase(), - asg.autoScalingGroupName, - Names.parseName(asg.autoScalingGroupName).toMoniker(), - LocationModel("region", region), - AccountModel(credentials.accountId, credentials.name), - instanceTypes, - BlockDeviceModel(blockDeviceType) - ) - } - } - - private fun fetchAllAutoScalingGroups(amazonAutoScaling: AmazonAutoScaling): List { - val autoScalingGroups = mutableListOf() - - var request = DescribeAutoScalingGroupsRequest() - while (true) { - val response = amazonAutoScaling.describeAutoScalingGroups(request) - - autoScalingGroups.addAll(response.autoScalingGroups) - if (response.nextToken != null) { - request = request.withNextToken(response.nextToken) - } else { - break - } - } - - return autoScalingGroups; - } - - private fun fetchAllLaunchConfigurations(amazonAutoScaling: AmazonAutoScaling): List { - val launchConfigurations = mutableListOf() - - var request = DescribeLaunchConfigurationsRequest() - while (true) { - val response = amazonAutoScaling.describeLaunchConfigurations(request) - - launchConfigurations.addAll(response.launchConfigurations) - if (response.nextToken != null) { - request = request.withNextToken(response.nextToken) - } else { - break - } - } - - return launchConfigurations; - } - - fun Names.toMoniker() = Moniker(this.app, this.stack, this.detail, this.cluster) -} diff --git a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchModels.kt b/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchModels.kt deleted file mode 100644 index 4d4b755751f..00000000000 --- a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchModels.kt +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.elasticsearch.model - -interface Model { - val id: String -} - -enum class ModelType { - ServerGroup, - Intance -} - -data class LocationModel(val type: String, val value: String) - -data class AccountModel(val id: String, val name: String) - -data class InstanceTypeModel(val type: String) - -data class BlockDeviceModel(val type: String) - -data class TagModel(val key: String, val value: Any) - -data class Moniker(val application: String, - val stack: String?, - val details: String?, - val cluster: String) - - -data class ServerGroupModel(override val id: String, - val name: String, - val moniker: Moniker, - val location: LocationModel, - val account: AccountModel, - val instanceTypes: Collection, - val blockDevice: BlockDeviceModel) : Model - -data class InstanceModel(override val id: String, - val instanceId: String, - val instanceType: InstanceTypeModel, - val location: LocationModel, - val account: AccountModel, - val ipAddresses: Collection, - val launchTime: java.util.Date, - val tags: Collection) : Model diff --git a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/config/ElasticSearchAmazonConfig.kt b/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/config/ElasticSearchAmazonConfig.kt deleted file mode 100644 index 4c70ee521aa..00000000000 --- a/clouddriver-elasticsearch-aws/src/main/kotlin/com/netflix/spinnaker/config/ElasticSearchAmazonConfig.kt +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.config - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider -import com.netflix.spinnaker.clouddriver.elasticsearch.aws.ElasticSearchAmazonCachingAgentProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.kork.core.RetrySupport -import io.searchbox.client.JestClient -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration - -@Configuration -@ConditionalOnProperty("elasticSearch.caching.enabled") -open class ElasticSearchAmazonConfig { - @Bean - open fun elasticSearchAmazonCachingAgentProvider(objectMapper: ObjectMapper, - jestClient: JestClient, - retrySupport: RetrySupport, - registry: Registry, - amazonClientProvider: AmazonClientProvider, - accountCredentialsProvider: AccountCredentialsProvider) = - ElasticSearchAmazonCachingAgentProvider( - objectMapper, - jestClient, - retrySupport, - registry, - amazonClientProvider, - accountCredentialsProvider - ) -} diff --git a/clouddriver-elasticsearch/clouddriver-elasticsearch.gradle b/clouddriver-elasticsearch/clouddriver-elasticsearch.gradle index 100f9ba10c2..39450f54819 100644 --- a/clouddriver-elasticsearch/clouddriver-elasticsearch.gradle +++ b/clouddriver-elasticsearch/clouddriver-elasticsearch.gradle @@ -1,6 +1,25 @@ dependencies { - compile project(":clouddriver-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + + implementation "com.netflix.frigga:frigga" + implementation "io.searchbox:jest:6.3.1" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-moniker" + implementation "io.spinnaker.kork:kork-retrofit" + implementation "io.spinnaker.kork:kork-security" + implementation "com.squareup.retrofit:retrofit" + implementation "org.apache.groovy:groovy" + implementation "org.elasticsearch:elasticsearch" + implementation "org.springframework.boot:spring-boot-starter-web" + + testImplementation "org.testcontainers:elasticsearch" + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testRuntimeOnly "net.bytebuddy:byte-buddy" - compile 'org.elasticsearch:elasticsearch:2.4.1' - compile 'io.searchbox:jest:2.0.3' } diff --git a/clouddriver-elasticsearch/elasticsearch_index_template.json b/clouddriver-elasticsearch/elasticsearch_index_template.json index f4ce6d5b2ed..31f80c458db 100644 --- a/clouddriver-elasticsearch/elasticsearch_index_template.json +++ b/clouddriver-elasticsearch/elasticsearch_index_template.json @@ -1,6 +1,8 @@ { "order": 0, - "template": "tags_v*", + "index_patterns": [ + "tags_v*" + ], "settings": { "index": { "number_of_shards": "6", @@ -9,7 +11,7 @@ } }, "mappings": { - "_default_": { + "_doc": { "dynamic": "false", "dynamic_templates": [ { @@ -24,41 +26,37 @@ "entityRef_template": { "path_match": "entityRef.*", "mapping": { - "index": "not_analyzed" + "index": "keyword" } } } ], "properties": { + "id": { + "type": "text" + }, "entityRef": { "properties": { "accountId": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" }, "application": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" }, "entityType": { - "index": "not_analyzed", - "type": "string" + "type": "text" }, "cloudProvider": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" }, "entityId": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" }, "region": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" }, "account": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" } } }, @@ -66,20 +64,16 @@ "type": "nested", "properties": { "valueType": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" }, "name": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" }, "namespace": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" }, "value": { - "index": "not_analyzed", - "type": "string" + "type": "keyword" } } } @@ -87,4 +81,4 @@ } }, "aliases": {} -} \ No newline at end of file +} diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ElasticSearchEntityTagger.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ElasticSearchEntityTagger.java index 479ff901964..15449a35403 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ElasticSearchEntityTagger.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ElasticSearchEntityTagger.java @@ -28,11 +28,10 @@ import com.netflix.spinnaker.clouddriver.model.EntityTags; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.tags.EntityTagger; +import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; - public class ElasticSearchEntityTagger implements EntityTagger { private static final Logger log = LoggerFactory.getLogger(ElasticSearchEntityTagger.class); @@ -47,175 +46,195 @@ public class ElasticSearchEntityTagger implements EntityTagger { private final UpsertEntityTagsAtomicOperationConverter upsertEntityTagsAtomicOperationConverter; private final DeleteEntityTagsAtomicOperationConverter deleteEntityTagsAtomicOperationConverter; - public ElasticSearchEntityTagger(ElasticSearchEntityTagsProvider entityTagsProvider, - UpsertEntityTagsAtomicOperationConverter upsertEntityTagsAtomicOperationConverter, - DeleteEntityTagsAtomicOperationConverter deleteEntityTagsAtomicOperationConverter) { + public ElasticSearchEntityTagger( + ElasticSearchEntityTagsProvider entityTagsProvider, + UpsertEntityTagsAtomicOperationConverter upsertEntityTagsAtomicOperationConverter, + DeleteEntityTagsAtomicOperationConverter deleteEntityTagsAtomicOperationConverter) { this.entityTagsProvider = entityTagsProvider; this.upsertEntityTagsAtomicOperationConverter = upsertEntityTagsAtomicOperationConverter; this.deleteEntityTagsAtomicOperationConverter = deleteEntityTagsAtomicOperationConverter; } @Override - public void alert(String cloudProvider, - String accountId, - String region, - String category, - String entityType, - String entityId, - String key, - String value, - Long timestamp) { + public void alert( + String cloudProvider, + String accountId, + String region, + String category, + String entityType, + String entityId, + String key, + String value, + Long timestamp) { upsertEntityTags( - ALERT_TYPE, ALERT_KEY_PREFIX, - cloudProvider, - accountId, - region, - category, - entityType, - entityId, - key, - value, - timestamp - ); + ALERT_TYPE, + ALERT_KEY_PREFIX, + cloudProvider, + accountId, + region, + category, + entityType, + entityId, + key, + value, + timestamp); } @Override - public void notice(String cloudProvider, - String accountId, - String region, - String category, - String entityType, - String entityId, - String key, - String value, - Long timestamp) { + public void notice( + String cloudProvider, + String accountId, + String region, + String category, + String entityType, + String entityId, + String key, + String value, + Long timestamp) { upsertEntityTags( - NOTICE_TYPE, - NOTICE_KEY_PREFIX, - cloudProvider, - accountId, - region, - category, - entityType, - entityId, - key, - value, - timestamp - ); + NOTICE_TYPE, + NOTICE_KEY_PREFIX, + cloudProvider, + accountId, + region, + category, + entityType, + entityId, + key, + value, + timestamp); } @Override - public void tag(String cloudProvider, - String accountId, - String region, - String namespace, - String entityType, - String entityId, - String tagName, - Object value, - Long timestamp) { + public void tag( + String cloudProvider, + String accountId, + String region, + String namespace, + String entityType, + String entityId, + String tagName, + Object value, + Long timestamp) { upsertEntityTags( - tagName, - cloudProvider, - accountId, - region, - namespace, - entityType, - entityId, - value, - timestamp); + tagName, + cloudProvider, + accountId, + region, + namespace, + entityType, + entityId, + value, + timestamp); } @Override - public Collection taggedEntities(String cloudProvider, - String accountId, - String entityType, - String tagName, - int maxResults) { + public Collection taggedEntities( + String cloudProvider, String accountId, String entityType, String tagName, int maxResults) { return entityTagsProvider.getAll( - cloudProvider, - null, - entityType, - null, - null, - accountId, - null, - null, - Collections.singletonMap(tagName, "*"), - maxResults - ); + cloudProvider, + null, + entityType, + null, + null, + accountId, + null, + null, + Collections.singletonMap(tagName, "*"), + maxResults); } @Override - public void deleteAll(String cloudProvider, String accountId, String region, String entityType, String entityId) { - DeleteEntityTagsDescription deleteEntityTagsDescription = deleteEntityTagsDescription( - cloudProvider, - accountId, - region, - entityType, - entityId, - null - ); + public void deleteAll( + String cloudProvider, String accountId, String region, String entityType, String entityId) { + DeleteEntityTagsDescription deleteEntityTagsDescription = + deleteEntityTagsDescription(cloudProvider, accountId, region, entityType, entityId, null); log.info("Removing all entity tags for '{}'", deleteEntityTagsDescription.getId()); delete(deleteEntityTagsDescription); } @Override - public void delete(String cloudProvider, - String accountId, - String region, - String entityType, - String entityId, - String tagName) { - DeleteEntityTagsDescription deleteEntityTagsDescription = deleteEntityTagsDescription( - cloudProvider, - accountId, - region, - entityType, - entityId, - Collections.singletonList(tagName) - ); + public void delete( + String cloudProvider, + String accountId, + String region, + String entityType, + String entityId, + String tagName) { + DeleteEntityTagsDescription deleteEntityTagsDescription = + deleteEntityTagsDescription( + cloudProvider, + accountId, + region, + entityType, + entityId, + Collections.singletonList(tagName)); log.info("Removing '{}' for '{}'", tagName, deleteEntityTagsDescription.getId()); delete(deleteEntityTagsDescription); } private void delete(DeleteEntityTagsDescription deleteEntityTagsDescription) { - AtomicOperation operation = deleteEntityTagsAtomicOperationConverter.buildOperation(deleteEntityTagsDescription); + AtomicOperation operation = + deleteEntityTagsAtomicOperationConverter.buildOperation(deleteEntityTagsDescription); operate(operation); } - private void upsertEntityTags(String type, - String prefix, - String cloudProvider, - String accountId, - String region, - String category, - String entityType, - String entityId, - String key, - String value, - Long timestamp) { + private void upsertEntityTags( + String type, + String prefix, + String cloudProvider, + String accountId, + String region, + String category, + String entityType, + String entityId, + String key, + String value, + Long timestamp) { upsertEntityTags( - upsertCategorizedEntityTagsDescription(type, prefix, cloudProvider, accountId, region, category, entityType, entityId, key, value, timestamp)); + upsertCategorizedEntityTagsDescription( + type, + prefix, + cloudProvider, + accountId, + region, + category, + entityType, + entityId, + key, + value, + timestamp)); } - private void upsertEntityTags(String name, - String cloudProvider, - String accountId, - String region, - String namespace, - String entityType, - String entityId, - Object tagValue, - Long timestamp) { + private void upsertEntityTags( + String name, + String cloudProvider, + String accountId, + String region, + String namespace, + String entityType, + String entityId, + Object tagValue, + Long timestamp) { upsertEntityTags( - upsertEntityTagsDescription(name, cloudProvider, accountId, region, namespace, null, entityType, entityId, tagValue, timestamp)); + upsertEntityTagsDescription( + name, + cloudProvider, + accountId, + region, + namespace, + null, + entityType, + entityId, + tagValue, + timestamp)); } private void upsertEntityTags(UpsertEntityTagsDescription description) { - AtomicOperation operation = upsertEntityTagsAtomicOperationConverter.buildOperation(description); + AtomicOperation operation = + upsertEntityTagsAtomicOperationConverter.buildOperation(description); operate(operation); } @@ -224,8 +243,8 @@ private void operate(AtomicOperation operation) { Task originalTask = TaskRepository.threadLocalTask.get(); try { TaskRepository.threadLocalTask.set( - Optional.ofNullable(originalTask).orElse(new DefaultTask(ElasticSearchEntityTagger.class.getSimpleName())) - ); + Optional.ofNullable(originalTask) + .orElse(new DefaultTask(ElasticSearchEntityTagger.class.getSimpleName()))); run(operation); } finally { TaskRepository.threadLocalTask.set(originalTask); @@ -237,34 +256,46 @@ protected void run(AtomicOperation operation) { operation.operate(Collections.emptyList()); } - private static UpsertEntityTagsDescription upsertCategorizedEntityTagsDescription(String type, - String prefix, - String cloudProvider, - String accountId, - String region, - String category, - String entityType, - String entityId, - String key, - String value, - Long timestamp) { + private static UpsertEntityTagsDescription upsertCategorizedEntityTagsDescription( + String type, + String prefix, + String cloudProvider, + String accountId, + String region, + String category, + String entityType, + String entityId, + String key, + String value, + Long timestamp) { String name = prefix + key; Map tagValue = new HashMap<>(); tagValue.put("message", value); tagValue.put("type", type); - return upsertEntityTagsDescription(name, cloudProvider, accountId, region, null, category, entityType, entityId, tagValue, timestamp); + return upsertEntityTagsDescription( + name, + cloudProvider, + accountId, + region, + null, + category, + entityType, + entityId, + tagValue, + timestamp); } - private static UpsertEntityTagsDescription upsertEntityTagsDescription(String name, - String cloudProvider, - String accountId, - String region, - String namespace, - String category, - String entityType, - String entityId, - Object entityTagValue, - Long timestamp) { + private static UpsertEntityTagsDescription upsertEntityTagsDescription( + String name, + String cloudProvider, + String accountId, + String region, + String namespace, + String category, + String entityType, + String entityId, + Object entityTagValue, + Long timestamp) { EntityTags.EntityRef entityRef = new EntityTags.EntityRef(); entityRef.setEntityType(entityType); entityRef.setEntityId(entityId); @@ -287,15 +318,15 @@ private static UpsertEntityTagsDescription upsertEntityTagsDescription(String na return upsertEntityTagsDescription; } - private static DeleteEntityTagsDescription deleteEntityTagsDescription(String cloudProvider, - String accountId, - String region, - String entityType, - String entityId, - List tags) { - EntityRefIdBuilder.EntityRefId entityRefId = EntityRefIdBuilder.buildId( - cloudProvider, entityType, entityId, accountId, region - ); + private static DeleteEntityTagsDescription deleteEntityTagsDescription( + String cloudProvider, + String accountId, + String region, + String entityType, + String entityId, + List tags) { + EntityRefIdBuilder.EntityRefId entityRefId = + EntityRefIdBuilder.buildId(cloudProvider, entityType, entityId, accountId, region); DeleteEntityTagsDescription deleteEntityTagsDescription = new DeleteEntityTagsDescription(); deleteEntityTagsDescription.setId(entityRefId.id); diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/EntityRefIdBuilder.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/EntityRefIdBuilder.java index b3da312be0b..59a42e822f8 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/EntityRefIdBuilder.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/EntityRefIdBuilder.java @@ -22,17 +22,19 @@ import java.util.stream.Stream; public class EntityRefIdBuilder { - public static EntityRefId buildId(String cloudProvider, String entityType, String entityId, String accountId, String region) { + public static EntityRefId buildId( + String cloudProvider, String entityType, String entityId, String accountId, String region) { Objects.requireNonNull(cloudProvider, "cloudProvider must be non-null"); Objects.requireNonNull(entityType, "entityType must be non-null"); Objects.requireNonNull(entityId, "entityId must be non-null"); - String id = Stream - .of(cloudProvider, entityType, entityId, accountId, region) - .map(s -> Optional.ofNullable(s).orElse("*")) - .collect(Collectors.joining(":")); + String id = + Stream.of(cloudProvider, entityType, entityId, accountId, region) + .map(s -> Optional.ofNullable(s).orElse("*")) + .collect(Collectors.joining(":")); - return new EntityRefId(id.toLowerCase(), "{{cloudProvider}}:{{entityType}}:{{entityId}}:{{account}}:{{region}}"); + return new EntityRefId( + id.toLowerCase(), "{{cloudProvider}}:{{entityType}}:{{entityId}}:{{account}}:{{region}}"); } public static class EntityRefId { diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/BulkUpsertEntityTagsAtomicOperationConverter.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/BulkUpsertEntityTagsAtomicOperationConverter.java index 03abc6876ea..16e0a329528 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/BulkUpsertEntityTagsAtomicOperationConverter.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/BulkUpsertEntityTagsAtomicOperationConverter.java @@ -25,13 +25,13 @@ import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; import com.netflix.spinnaker.kork.core.RetrySupport; +import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Map; - @Component("bulkUpsertEntityTagsDescription") -public class BulkUpsertEntityTagsAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class BulkUpsertEntityTagsAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { private final ObjectMapper objectMapper; private final RetrySupport retrySupport; @@ -40,11 +40,12 @@ public class BulkUpsertEntityTagsAtomicOperationConverter extends AbstractAtomic private final ElasticSearchEntityTagsProvider entityTagsProvider; @Autowired - public BulkUpsertEntityTagsAtomicOperationConverter(ObjectMapper objectMapper, - RetrySupport retrySupport, - Front50Service front50Service, - AccountCredentialsProvider accountCredentialsProvider, - ElasticSearchEntityTagsProvider entityTagsProvider) { + public BulkUpsertEntityTagsAtomicOperationConverter( + ObjectMapper objectMapper, + RetrySupport retrySupport, + Front50Service front50Service, + AccountCredentialsProvider accountCredentialsProvider, + ElasticSearchEntityTagsProvider entityTagsProvider) { this.objectMapper = objectMapper; this.retrySupport = retrySupport; this.front50Service = front50Service; @@ -54,15 +55,21 @@ public BulkUpsertEntityTagsAtomicOperationConverter(ObjectMapper objectMapper, public AtomicOperation convertOperation(Map input) { return new BulkUpsertEntityTagsAtomicOperation( - retrySupport, front50Service, accountCredentialsProvider, entityTagsProvider, this.convertDescription(input) - ); + retrySupport, + front50Service, + accountCredentialsProvider, + entityTagsProvider, + this.convertDescription(input)); } public BulkUpsertEntityTagsDescription convertDescription(Map input) { - BulkUpsertEntityTagsDescription description = objectMapper.convertValue(input, BulkUpsertEntityTagsDescription.class); - description.entityTags.forEach(upsertEntityTagsDescription -> - upsertEntityTagsDescription.getTags().forEach(UpsertEntityTagsAtomicOperationConverter::setTagValueType) - ); + BulkUpsertEntityTagsDescription description = + objectMapper.convertValue(input, BulkUpsertEntityTagsDescription.class); + description.entityTags.forEach( + upsertEntityTagsDescription -> + upsertEntityTagsDescription + .getTags() + .forEach(UpsertEntityTagsAtomicOperationConverter::setTagValueType)); return description; } } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/DeleteEntityTagsAtomicOperationConverter.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/DeleteEntityTagsAtomicOperationConverter.java index 827cf0e0212..2eb80c18005 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/DeleteEntityTagsAtomicOperationConverter.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/DeleteEntityTagsAtomicOperationConverter.java @@ -25,24 +25,27 @@ import com.netflix.spinnaker.clouddriver.elasticsearch.ops.DeleteEntityTagsAtomicOperation; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.Map; - @Component("deleteEntityTags") -public class DeleteEntityTagsAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class DeleteEntityTagsAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { private final ObjectMapper objectMapper; private final Front50Service front50Service; private final ElasticSearchEntityTagsProvider entityTagsProvider; @Autowired - public DeleteEntityTagsAtomicOperationConverter(ObjectMapper objectMapper, - Front50Service front50Service, - ElasticSearchEntityTagsProvider entityTagsProvider) { - this.objectMapper = objectMapper.copy() - .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) - .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + public DeleteEntityTagsAtomicOperationConverter( + ObjectMapper objectMapper, + Front50Service front50Service, + ElasticSearchEntityTagsProvider entityTagsProvider) { + this.objectMapper = + objectMapper + .copy() + .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); this.front50Service = front50Service; this.entityTagsProvider = entityTagsProvider; @@ -53,9 +56,7 @@ public AtomicOperation convertOperation(Map input) { } public AtomicOperation buildOperation(DeleteEntityTagsDescription description) { - return new DeleteEntityTagsAtomicOperation( - front50Service, entityTagsProvider, description - ); + return new DeleteEntityTagsAtomicOperation(front50Service, entityTagsProvider, description); } public DeleteEntityTagsDescription convertDescription(Map input) { diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/UpsertEntityTagsAtomicOperationConverter.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/UpsertEntityTagsAtomicOperationConverter.java index e1e302957dd..667bd14a13c 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/UpsertEntityTagsAtomicOperationConverter.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/converters/UpsertEntityTagsAtomicOperationConverter.java @@ -28,14 +28,14 @@ import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; import com.netflix.spinnaker.kork.core.RetrySupport; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - import java.util.Collection; import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; @Component("upsertEntityTags") -public class UpsertEntityTagsAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +public class UpsertEntityTagsAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { private final ObjectMapper objectMapper; private final RetrySupport retrySupport; private final Front50Service front50Service; @@ -43,14 +43,17 @@ public class UpsertEntityTagsAtomicOperationConverter extends AbstractAtomicOper private final ElasticSearchEntityTagsProvider entityTagsProvider; @Autowired - public UpsertEntityTagsAtomicOperationConverter(ObjectMapper objectMapper, - RetrySupport retrySupport, - Front50Service front50Service, - AccountCredentialsProvider accountCredentialsProvider, - ElasticSearchEntityTagsProvider entityTagsProvider) { - this.objectMapper = objectMapper.copy() - .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) - .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + public UpsertEntityTagsAtomicOperationConverter( + ObjectMapper objectMapper, + RetrySupport retrySupport, + Front50Service front50Service, + AccountCredentialsProvider accountCredentialsProvider, + ElasticSearchEntityTagsProvider entityTagsProvider) { + this.objectMapper = + objectMapper + .copy() + .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); this.retrySupport = retrySupport; this.front50Service = front50Service; @@ -65,23 +68,21 @@ public AtomicOperation convertOperation(Map input) { public AtomicOperation buildOperation(UpsertEntityTagsDescription description) { description.getTags().forEach(UpsertEntityTagsAtomicOperationConverter::setTagValueType); return new UpsertEntityTagsAtomicOperation( - retrySupport, - front50Service, - accountCredentialsProvider, - entityTagsProvider, - description - ); + retrySupport, front50Service, accountCredentialsProvider, entityTagsProvider, description); } public UpsertEntityTagsDescription convertDescription(Map input) { - UpsertEntityTagsDescription upsertEntityTagsDescription = objectMapper.convertValue(input, UpsertEntityTagsDescription.class); + UpsertEntityTagsDescription upsertEntityTagsDescription = + objectMapper.convertValue(input, UpsertEntityTagsDescription.class); return upsertEntityTagsDescription; } static void setTagValueType(EntityTags.EntityTag entityTag) { if (entityTag.getValueType() == null) { - boolean isObject = entityTag.getValue() instanceof Map || entityTag.getValue() instanceof Collection; - entityTag.setValueType(isObject ? EntityTags.EntityTagValueType.object : EntityTags.EntityTagValueType.literal); + boolean isObject = + entityTag.getValue() instanceof Map || entityTag.getValue() instanceof Collection; + entityTag.setValueType( + isObject ? EntityTags.EntityTagValueType.object : EntityTags.EntityTagValueType.literal); } } } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/BulkUpsertEntityTagsDescription.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/BulkUpsertEntityTagsDescription.java index a1c75de3d4f..96746aa473e 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/BulkUpsertEntityTagsDescription.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/BulkUpsertEntityTagsDescription.java @@ -19,12 +19,10 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.spinnaker.clouddriver.model.EntityTags; import com.netflix.spinnaker.clouddriver.security.resources.NonCredentialed; - import java.util.ArrayList; import java.util.List; public class BulkUpsertEntityTagsDescription implements NonCredentialed { - @JsonProperty - public boolean isPartial = true; + @JsonProperty public boolean isPartial = true; public List entityTags = new ArrayList<>(); } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/DeleteEntityTagsDescription.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/DeleteEntityTagsDescription.java index 3392023f1f3..32ee689d392 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/DeleteEntityTagsDescription.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/DeleteEntityTagsDescription.java @@ -18,18 +18,15 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.spinnaker.clouddriver.security.resources.NonCredentialed; - +import com.netflix.spinnaker.orchestration.OperationDescription; import java.util.List; -public class DeleteEntityTagsDescription implements NonCredentialed { - @JsonProperty - private String id; +public class DeleteEntityTagsDescription implements NonCredentialed, OperationDescription { + @JsonProperty private String id; - @JsonProperty - private List tags; + @JsonProperty private List tags; - @JsonProperty - private boolean deleteAll = false; + @JsonProperty private boolean deleteAll = false; public String getId() { return id; diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/UpsertEntityTagsDescription.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/UpsertEntityTagsDescription.java index 445b7d05a03..19c706345d3 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/UpsertEntityTagsDescription.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/descriptions/UpsertEntityTagsDescription.java @@ -21,6 +21,5 @@ import com.netflix.spinnaker.clouddriver.security.resources.NonCredentialed; public class UpsertEntityTagsDescription extends EntityTags implements NonCredentialed { - @JsonProperty - public boolean isPartial = true; + @JsonProperty public boolean isPartial = true; } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/events/CreateServerGroupEventHandler.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/events/CreateServerGroupEventHandler.java index 179be3885e2..aa55e53fc4d 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/events/CreateServerGroupEventHandler.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/events/CreateServerGroupEventHandler.java @@ -47,11 +47,10 @@ public void handle(OperationEvent operationEvent) { CreateServerGroupEvent createServerGroupEvent = (CreateServerGroupEvent) operationEvent; serverGroupTagger.deleteAll( - createServerGroupEvent.getCloudProvider(), - createServerGroupEvent.getAccountId(), - createServerGroupEvent.getRegion(), - EntityTagger.ENTITY_TYPE_SERVER_GROUP, - createServerGroupEvent.getName() - ); + createServerGroupEvent.getCloudProvider(), + createServerGroupEvent.getAccountId(), + createServerGroupEvent.getRegion(), + EntityTagger.ENTITY_TYPE_SERVER_GROUP, + createServerGroupEvent.getName()); } } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/events/DeleteServerGroupEventHandler.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/events/DeleteServerGroupEventHandler.java index e9ec7ed3e85..87403a2227c 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/events/DeleteServerGroupEventHandler.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/events/DeleteServerGroupEventHandler.java @@ -47,11 +47,10 @@ public void handle(OperationEvent operationEvent) { DeleteServerGroupEvent deleteServerGroupEvent = (DeleteServerGroupEvent) operationEvent; serverGroupTagger.deleteAll( - deleteServerGroupEvent.getCloudProvider(), - deleteServerGroupEvent.getAccountId(), - deleteServerGroupEvent.getRegion(), - EntityTagger.ENTITY_TYPE_SERVER_GROUP, - deleteServerGroupEvent.getName() - ); + deleteServerGroupEvent.getCloudProvider(), + deleteServerGroupEvent.getAccountId(), + deleteServerGroupEvent.getRegion(), + EntityTagger.ENTITY_TYPE_SERVER_GROUP, + deleteServerGroupEvent.getName()); } } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsProvider.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsProvider.java index 162b6e1e120..ca0b467257b 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsProvider.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsProvider.java @@ -16,8 +16,11 @@ package com.netflix.spinnaker.clouddriver.elasticsearch.model; +import static java.lang.String.format; + import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.netflix.frigga.Names; import com.netflix.spinnaker.clouddriver.core.services.Front50Service; import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; @@ -25,6 +28,7 @@ import com.netflix.spinnaker.clouddriver.model.EntityTagsProvider; import com.netflix.spinnaker.config.ElasticSearchConfigProperties; import com.netflix.spinnaker.kork.core.RetrySupport; +import com.netflix.spinnaker.security.AuthenticatedRequest; import io.searchbox.client.JestClient; import io.searchbox.client.JestResult; import io.searchbox.core.Bulk; @@ -37,6 +41,15 @@ import io.searchbox.indices.CreateIndex; import io.searchbox.indices.DeleteIndex; import io.searchbox.params.Parameters; +import java.io.IOException; +import java.net.URLEncoder; +import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -47,19 +60,6 @@ import org.springframework.context.ApplicationContext; import org.springframework.stereotype.Component; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -import static java.lang.String.format; - @Component public class ElasticSearchEntityTagsProvider implements EntityTagsProvider { private static final Logger log = LoggerFactory.getLogger(ElasticSearchEntityTagsProvider.class); @@ -72,48 +72,49 @@ public class ElasticSearchEntityTagsProvider implements EntityTagsProvider { private final String activeElasticSearchIndex; - private final boolean singleMappingType; private final String mappingTypeName; - @Autowired - public ElasticSearchEntityTagsProvider(ApplicationContext applicationContext, - RetrySupport retrySupport, - ObjectMapper objectMapper, - Front50Service front50Service, - JestClient jestClient, - ElasticSearchConfigProperties elasticSearchConfigProperties) { + public ElasticSearchEntityTagsProvider( + ApplicationContext applicationContext, + RetrySupport retrySupport, + ObjectMapper objectMapper, + Front50Service front50Service, + JestClient jestClient, + ElasticSearchConfigProperties elasticSearchConfigProperties) { this.applicationContext = applicationContext; this.retrySupport = retrySupport; this.objectMapper = objectMapper; this.front50Service = front50Service; this.jestClient = jestClient; this.activeElasticSearchIndex = elasticSearchConfigProperties.getActiveIndex(); - this.singleMappingType = elasticSearchConfigProperties.isSingleMappingType(); this.mappingTypeName = elasticSearchConfigProperties.getMappingTypeName(); } @Override - public Collection getAll(String cloudProvider, - String application, - String entityType, - List entityIds, - String idPrefix, - String account, - String region, - String namespace, - Map tags, - int maxResults) { + public Collection getAll( + String cloudProvider, + String application, + String entityType, + List entityIds, + String idPrefix, + String account, + String region, + String namespace, + Map tags, + int maxResults) { BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery(); if (cloudProvider != null) { // restrict to a specific cloudProvider (optional) - queryBuilder = queryBuilder.must(QueryBuilders.termQuery("entityRef.cloudProvider", cloudProvider)); + queryBuilder = + queryBuilder.must(QueryBuilders.termQuery("entityRef.cloudProvider", cloudProvider)); } if (application != null) { // restrict to a specific application (optional) - queryBuilder = queryBuilder.must(QueryBuilders.termQuery("entityRef.application", application)); + queryBuilder = + queryBuilder.must(QueryBuilders.termQuery("entityRef.application", application)); } if (entityIds != null && !entityIds.isEmpty()) { @@ -136,24 +137,32 @@ public Collection getAll(String cloudProvider, queryBuilder = queryBuilder.must(QueryBuilders.wildcardQuery("id", idPrefix)); } + if (entityType != null) { + queryBuilder = + queryBuilder.must( + QueryBuilders.wildcardQuery("entityRef.entityType", entityType.toLowerCase())); + } + if (tags != null) { for (Map.Entry entry : tags.entrySet()) { - // each key/value pair maps to a distinct nested `tags` object and must be a unique query snippet - queryBuilder = queryBuilder.must( - applyTagsToBuilder(namespace, Collections.singletonMap(entry.getKey(), entry.getValue())) - ); + // each key/value pair maps to a distinct nested `tags` object and must be a unique query + // snippet + queryBuilder = + queryBuilder.must( + applyTagsToBuilder( + namespace, Collections.singletonMap(entry.getKey(), entry.getValue()))); } } if ((tags == null || tags.isEmpty()) && namespace != null) { - // this supports a search akin to /tags?namespace=my_namespace which should return all entities with _any_ tag in - // the given namespace ... ensures that the namespace filter is applied even if no tag criteria provided - queryBuilder = queryBuilder.must( - applyTagsToBuilder(namespace, Collections.emptyMap()) - ); + // this supports a search akin to /tags?namespace=my_namespace which should return all + // entities with _any_ tag in + // the given namespace ... ensures that the namespace filter is applied even if no tag + // criteria provided + queryBuilder = queryBuilder.must(applyTagsToBuilder(namespace, Collections.emptyMap())); } - return search(entityType, queryBuilder, maxResults); + return search(queryBuilder, maxResults); } @Override @@ -163,77 +172,89 @@ public Optional get(String id) { @Override public Optional get(String id, Map tags) { - BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("_id", id)); + BoolQueryBuilder queryBuilder = + QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("_id", id)); if (tags != null) { for (Map.Entry entry : tags.entrySet()) { - // each key/value pair maps to a distinct nested `tags` object and must be a unique query snippet - queryBuilder = queryBuilder.must( - applyTagsToBuilder(null, Collections.singletonMap(entry.getKey(), entry.getValue())) - ); + // each key/value pair maps to a distinct nested `tags` object and must be a unique query + // snippet + queryBuilder = + queryBuilder.must( + applyTagsToBuilder( + null, Collections.singletonMap(entry.getKey(), entry.getValue()))); } } - List entityTags = search(null, queryBuilder, 1); + List entityTags = search(queryBuilder, 1); return entityTags.isEmpty() ? Optional.empty() : Optional.of(entityTags.get(0)); } @Override public void index(EntityTags entityTags) { try { - Index action = new Index.Builder(objectMapper.convertValue(prepareForWrite(objectMapper, entityTags), Map.class)) - .index(activeElasticSearchIndex) - .type(getDocumentType(entityTags)) - .id(entityTags.getId()) - .build(); + Index action = + new Index.Builder( + objectMapper.convertValue(prepareForWrite(objectMapper, entityTags), Map.class)) + .index(activeElasticSearchIndex) + .type(mappingTypeName) + .id(URLEncoder.encode(entityTags.getId(), "UTF-8")) + .build(); JestResult jestResult = jestClient.execute(action); if (!jestResult.isSucceeded()) { throw new ElasticSearchException( - format("Failed to index %s, reason: '%s'", entityTags.getId(), jestResult.getErrorMessage()) - ); + format( + "Failed to index %s, reason: '%s'", + entityTags.getId(), jestResult.getErrorMessage())); } } catch (IOException e) { throw new ElasticSearchException( - format("Failed to index %s, reason: '%s'", entityTags.getId(), e.getMessage()) - ); + format("Failed to index %s, reason: '%s'", entityTags.getId(), e.getMessage())); } } @Override public void bulkIndex(Collection multipleEntityTags) { - Lists.partition(new ArrayList<>(multipleEntityTags), 1000).forEach(tags -> { - Bulk.Builder builder = new Bulk.Builder() - .defaultIndex(activeElasticSearchIndex); - - for (EntityTags entityTags : tags) { - builder = builder.addAction( - new Index.Builder(objectMapper.convertValue(prepareForWrite(objectMapper, entityTags), Map.class)) - .index(activeElasticSearchIndex) - .type(getDocumentType(entityTags)) - .id(entityTags.getId()) - .build() - ); - } - - Bulk bulk = builder.build(); - retrySupport.retry( - () -> { - try { - JestResult jestResult = jestClient.execute(bulk); - if (!jestResult.isSucceeded()) { - throw new ElasticSearchException( - format("Failed to index bulk entity tags, reason: '%s'", jestResult.getErrorMessage()) - ); - } - return true; - } catch (IOException e) { - String message = format("Failed to index bulk entity tags, reason: '%s'", e.getMessage()); - log.error(message + " ... retrying!"); - throw new ElasticSearchException(message); - } - }, - 5, 1000, false); - }); + Lists.partition(new ArrayList<>(multipleEntityTags), 1000) + .forEach( + tags -> { + Bulk.Builder builder = new Bulk.Builder().defaultIndex(activeElasticSearchIndex); + + for (EntityTags entityTags : tags) { + Map tag = + objectMapper.convertValue(prepareForWrite(objectMapper, entityTags), Map.class); + builder = + builder.addAction( + new Index.Builder(tag) + .index(activeElasticSearchIndex) + .type(mappingTypeName) + .id(entityTags.getId()) + .build()); + } + + Bulk bulk = builder.build(); + retrySupport.retry( + () -> { + try { + JestResult jestResult = jestClient.execute(bulk); + if (!jestResult.isSucceeded()) { + throw new ElasticSearchException( + format( + "Failed to index bulk entity tags, reason: '%s'", + jestResult.getErrorMessage())); + } + return true; + } catch (IOException e) { + String message = + format("Failed to index bulk entity tags, reason: '%s'", e.getMessage()); + log.error(message + " ... retrying!"); + throw new ElasticSearchException(message); + } + }, + 5, + 1000, + false); + }); } @Override @@ -245,87 +266,77 @@ public void delete(String id) { return; } - Delete action = new Delete.Builder(id) - .index(activeElasticSearchIndex) - .type(getDocumentType(entityTags)) - .build(); + Delete action = + new Delete.Builder(id).index(activeElasticSearchIndex).type(mappingTypeName).build(); JestResult jestResult = jestClient.execute(action); if (!jestResult.isSucceeded()) { throw new ElasticSearchException( - format("Failed to delete %s, reason: '%s'", id, jestResult.getErrorMessage()) - ); + format("Failed to delete %s, reason: '%s'", id, jestResult.getErrorMessage())); } } catch (IOException e) { throw new ElasticSearchException( - format("Failed to delete %s, reason: '%s'", id, e.getMessage()) - ); + format("Failed to delete %s, reason: '%s'", id, e.getMessage())); } } @Override public void bulkDelete(Collection multipleEntityTags) { - Lists.partition(new ArrayList<>(multipleEntityTags), 1000).forEach(tags -> { - Bulk.Builder builder = new Bulk.Builder() - .defaultIndex(activeElasticSearchIndex); - - for (EntityTags entityTags : tags) { - builder = builder.addAction( - new Delete.Builder(entityTags.getId()) - .type(getDocumentType(entityTags)) - .build() - ); - } - - Bulk bulk = builder.build(); - try { - JestResult jestResult = jestClient.execute(bulk); - if (!jestResult.isSucceeded()) { - throw new ElasticSearchException( - format("Failed to bulk delete entity tags, reason: '%s'", jestResult.getErrorMessage()) - ); - } - } catch (IOException e) { - throw new ElasticSearchException( - format("Failed to bulk delete entity tags, reason: '%s'", e.getMessage()) - ); - } - }); + Lists.partition(new ArrayList<>(multipleEntityTags), 1000) + .forEach( + tags -> { + Bulk.Builder builder = new Bulk.Builder().defaultIndex(activeElasticSearchIndex); + + for (EntityTags entityTags : tags) { + builder = + builder.addAction( + new Delete.Builder(entityTags.getId()).type(mappingTypeName).build()); + } + + Bulk bulk = builder.build(); + try { + JestResult jestResult = jestClient.execute(bulk); + if (!jestResult.isSucceeded()) { + throw new ElasticSearchException( + format( + "Failed to bulk delete entity tags, reason: '%s'", + jestResult.getErrorMessage())); + } + } catch (IOException e) { + throw new ElasticSearchException( + format("Failed to bulk delete entity tags, reason: '%s'", e.getMessage())); + } + }); } @Override public void reindex() { try { log.info("Deleting Index {}", activeElasticSearchIndex); - jestClient.execute( - new DeleteIndex.Builder(activeElasticSearchIndex).build() - ); + jestClient.execute(new DeleteIndex.Builder(activeElasticSearchIndex).build()); log.info("Deleted Index {}", activeElasticSearchIndex); log.info("Creating Index {}", activeElasticSearchIndex); - jestClient.execute( - new CreateIndex.Builder(activeElasticSearchIndex).build() - ); + jestClient.execute(new CreateIndex.Builder(activeElasticSearchIndex).build()); log.info("Created Index {}", activeElasticSearchIndex); } catch (IOException e) { - throw new ElasticSearchException("Unable to re-create index '" + activeElasticSearchIndex + "'"); + throw new ElasticSearchException( + "Unable to re-create index '" + activeElasticSearchIndex + "'"); } Collection entityTags = front50Service.getAllEntityTags(true); - Collection filteredEntityTags = getElasticSearchEntityTagsReconciler().filter(entityTags); + Collection filteredEntityTags = + getElasticSearchEntityTagsReconciler().filter(entityTags); log.info( - "Indexing {} entity tags ({} orphans have been excluded)", - filteredEntityTags.size(), - entityTags.size() - filteredEntityTags.size() - ); + "Indexing {} entity tags ({} orphans have been excluded)", + filteredEntityTags.size(), + entityTags.size() - filteredEntityTags.size()); bulkIndex( - filteredEntityTags - .stream() - .filter(e -> e.getEntityRef() != null) - .collect(Collectors.toList()) - ); + filteredEntityTags.stream() + .filter(e -> e.getEntityRef() != null) + .collect(Collectors.toList())); log.info("Indexed {} entity tags", filteredEntityTags.size()); } @@ -333,98 +344,320 @@ public void reindex() { @Override public Map delta() { Collection allEntityTagsFront50 = front50Service.getAllEntityTags(false); - Map> entityTagsByEntityTypeFront50 = allEntityTagsFront50 - .stream() - .collect(Collectors.groupingBy(e -> - Optional.ofNullable( - Optional.ofNullable(e.getEntityRef()).orElse(new EntityTags.EntityRef()).getEntityType() - ).orElse("unknown") - ) - ); + Map> entityTagsByEntityTypeFront50 = + allEntityTagsFront50.stream() + .collect( + Collectors.groupingBy( + e -> + Optional.ofNullable( + Optional.ofNullable(e.getEntityRef()) + .orElse(new EntityTags.EntityRef()) + .getEntityType()) + .orElse("unknown"))); Map> entityTagsByEntityTypeElasticsearch = new HashMap<>(); - entityTagsByEntityTypeFront50.keySet().forEach(entityType -> - entityTagsByEntityTypeElasticsearch.put(entityType, fetchAll(entityType, 5000, "2m")) - ); + entityTagsByEntityTypeFront50 + .keySet() + .forEach( + entityType -> { + BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery(); + queryBuilder = + queryBuilder.must(QueryBuilders.termQuery("entityRef.entityType", entityType)); + + entityTagsByEntityTypeElasticsearch.put( + entityType, fetchAll(queryBuilder, 5000, "2m")); + }); Map metadata = new HashMap<>(); - entityTagsByEntityTypeFront50.keySet().forEach(entityType -> { - Map entityTypeMetadata = new HashMap<>(); - metadata.put(entityType, entityTypeMetadata); - - Set entityIdsFront50 = entityTagsByEntityTypeFront50.get(entityType).stream() - .map(EntityTags::getId) - .collect(Collectors.toSet()); + entityTagsByEntityTypeFront50 + .keySet() + .forEach( + entityType -> { + Map entityTypeMetadata = new HashMap<>(); + metadata.put(entityType, entityTypeMetadata); + + Set entityIdsFront50 = + entityTagsByEntityTypeFront50.get(entityType).stream() + .map(EntityTags::getId) + .collect(Collectors.toSet()); + + Set entityIdsElasticsearch = + entityTagsByEntityTypeElasticsearch.get(entityType).stream() + .map(EntityTags::getId) + .collect(Collectors.toSet()); + + entityTypeMetadata.put("front50_count", entityIdsFront50.size()); + entityTypeMetadata.put("elasticsearch_count", entityIdsElasticsearch.size()); + + if (!entityIdsFront50.equals(entityIdsElasticsearch)) { + Set entityIdsMissingInFront50 = + entityIdsElasticsearch.stream() + .filter(e -> !entityIdsFront50.contains(e)) + .collect(Collectors.toSet()); + + Set entityIdsMissingInElasticsearch = + entityIdsFront50.stream() + .filter(e -> !entityIdsElasticsearch.contains(e)) + .collect(Collectors.toSet()); + + log.warn( + "'{}' missing in Front50 ({}) {}", + entityType, + entityIdsMissingInFront50.size(), + entityIdsMissingInFront50); + log.warn( + "'{}' missing in Elasticsearch ({}) {}", + entityType, + entityIdsMissingInElasticsearch.size(), + entityIdsMissingInElasticsearch); + + entityTypeMetadata.put("front50_missing", entityIdsMissingInFront50); + entityTypeMetadata.put("front50_missing_count", entityIdsMissingInFront50.size()); + + entityTypeMetadata.put("elasticsearch_missing", entityIdsMissingInElasticsearch); + entityTypeMetadata.put( + "elasticsearch_missing_count", entityIdsMissingInElasticsearch.size()); + } + }); - Set entityIdsElasticsearch = entityTagsByEntityTypeElasticsearch.get(entityType).stream() - .map(EntityTags::getId) - .collect(Collectors.toSet()); - - entityTypeMetadata.put("front50_count", entityIdsFront50.size()); - entityTypeMetadata.put("elasticsearch_count", entityIdsElasticsearch.size()); + return metadata; + } - if (!entityIdsFront50.equals(entityIdsElasticsearch)) { - Set entityIdsMissingInFront50 = entityIdsElasticsearch.stream() - .filter(e -> !entityIdsFront50.contains(e)) - .collect(Collectors.toSet()); + @Override + public void verifyIndex(EntityTags entityTags) { + OperationPoller.retryWithBackoff( + o -> { + // verify that the indexed document can be retrieved (accounts for index lag) + Map entityTagsCriteria = new HashMap<>(); + entityTags.getTags().stream() + .filter(entityTag -> entityTag != null && entityTag.getValueType() != null) + .forEach( + entityTag -> { + switch (entityTag.getValueType()) { + case object: + entityTagsCriteria.put(entityTag.getName(), "*"); + break; + default: + entityTagsCriteria.put( + entityTag.getName(), entityTag.getValueForRead(objectMapper)); + } + }); + + if (!get(entityTags.getId(), entityTagsCriteria).isPresent()) { + throw new ElasticSearchException( + format( + "Failed to index %s, reason: 'no document found with id'", entityTags.getId())); + } + return true; + }, + 1000, + 3); + } - Set entityIdsMissingInElasticsearch = entityIdsFront50.stream() - .filter(e -> !entityIdsElasticsearch.contains(e)) - .collect(Collectors.toSet()); + @Override + public Map reconcile(String cloudProvider, String account, String region, boolean dryRun) { + return getElasticSearchEntityTagsReconciler() + .reconcile(this, cloudProvider, account, region, dryRun); + } - log.warn("'{}' missing in Front50 ({}) {}", entityType, entityIdsMissingInFront50.size(), entityIdsMissingInFront50); - log.warn("'{}' missing in Elasticsearch ({}) {}", entityType, entityIdsMissingInElasticsearch.size(), entityIdsMissingInElasticsearch); + @Override + public Map deleteByNamespace( + String namespace, boolean dryRun, boolean deleteFromSource) { + List entityTagsForNamespace = getAllMatchingEntityTags(namespace, null); + + for (EntityTags entityTags : entityTagsForNamespace) { + // ensure that all tags (and their metadata) in the offending namespace are removed + entityTags.setTags( + entityTags.getTags().stream() + .filter(e -> !namespace.equalsIgnoreCase(e.getNamespace())) + .collect(Collectors.toList())); + + Set tagNames = + entityTags.getTags().stream() + .map(e -> e.getName().toLowerCase()) + .collect(Collectors.toSet()); + + entityTags.setTagsMetadata( + entityTags.getTagsMetadata().stream() + .filter(e -> tagNames.contains(e.getName().toLowerCase())) + .collect(Collectors.toList())); + } - entityTypeMetadata.put("front50_missing", entityIdsMissingInFront50); - entityTypeMetadata.put("front50_missing_count", entityIdsMissingInFront50.size()); + Map results = + new HashMap() { + { + put( + "affectedIds", + entityTagsForNamespace.stream() + .map(EntityTags::getId) + .collect(Collectors.toList())); + put("deletedFromSource", false); + put("deletedFromElasticsearch", false); + } + }; + + if (!dryRun) { + bulkIndex(entityTagsForNamespace); + results.put("deletedFromElasticsearch", true); + + if (deleteFromSource) { + ExecutorService executor = + Executors.newFixedThreadPool( + 6, + new ThreadFactoryBuilder() + .setNameFormat(ElasticSearchEntityTagsProvider.class.getSimpleName() + "-%d") + .build()); + + AtomicLong countProcessed = new AtomicLong(); + + Lists.partition(entityTagsForNamespace, 50) + .forEach( + entityTags -> + executor.submit( + () -> { + try { + AuthenticatedRequest.allowAnonymous( + () -> front50Service.batchUpdate(entityTags)); + + log.info( + "Deleted {} out of {} tags in namespace {}", + countProcessed.addAndGet(entityTags.size()), + entityTagsForNamespace.size(), + namespace); + } catch (Exception e) { + log.error( + "Failed to delete a batch of tags from front50 in namespace {}", + namespace, + e); + } + })); - entityTypeMetadata.put("elasticsearch_missing", entityIdsMissingInElasticsearch); - entityTypeMetadata.put("elasticsearch_missing_count", entityIdsMissingInElasticsearch.size()); + try { + executor.shutdown(); + executor.awaitTermination(15, TimeUnit.MINUTES); + results.put("deletedFromSource", true); + } catch (InterruptedException e) { + String error = + String.format( + "Failed to bulk remove tags from front50 in namespace %s due to timeout, please try again", + namespace); + + log.error(error, e); + results.put("error", error); + results.put("exception", e); + } } - }); + } - return metadata; + return results; } @Override - public void verifyIndex(EntityTags entityTags) { - OperationPoller.retryWithBackoff(o -> { - // verify that the indexed document can be retrieved (accounts for index lag) - Map entityTagsCriteria = new HashMap<>(); - entityTags.getTags().stream().filter(entityTag -> entityTag != null && entityTag.getValueType() != null).forEach(entityTag -> { - switch (entityTag.getValueType()) { - case object: - entityTagsCriteria.put(entityTag.getName(), "*"); - break; - default: - entityTagsCriteria.put(entityTag.getName(), entityTag.getValueForRead(objectMapper)); + public Map deleteByTag(String tag, boolean dryRun, boolean deleteFromSource) { + List entityTagsForTag = getAllMatchingEntityTags(null, tag); + + for (EntityTags entityTags : entityTagsForTag) { + // ensure that all matching tags (and their metadata) are removed + entityTags.setTags( + entityTags.getTags().stream() + .filter(e -> !tag.equalsIgnoreCase(e.getName())) + .collect(Collectors.toList())); + + Set tagNames = + entityTags.getTags().stream() + .map(e -> e.getName().toLowerCase()) + .collect(Collectors.toSet()); + + entityTags.setTagsMetadata( + entityTags.getTagsMetadata().stream() + .filter(e -> tagNames.contains(e.getName().toLowerCase())) + .collect(Collectors.toList())); + } + + Map results = + new HashMap() { + { + put( + "affectedIds", + entityTagsForTag.stream().map(EntityTags::getId).collect(Collectors.toList())); + put("deletedFromSource", false); + put("deletedFromElasticsearch", false); } - }); + }; - if (!get(entityTags.getId(), entityTagsCriteria).isPresent()) { - throw new ElasticSearchException(format("Failed to index %s, reason: 'no document found with id'", entityTags.getId())); - } - return true; - }, - 1000, - 3 - ); + if (!dryRun) { + bulkIndex(entityTagsForTag); + results.put("deletedFromElasticsearch", true); + + if (deleteFromSource) { + Lists.partition(entityTagsForTag, 50).forEach(front50Service::batchUpdate); + results.put("deletedFromSource", true); + } + } + + return results; } - @Override - public Map reconcile(String cloudProvider, String account, String region, boolean dryRun) { - return getElasticSearchEntityTagsReconciler().reconcile(this, cloudProvider, account, region, dryRun); + private List getAllMatchingEntityTags(String namespace, String tag) { + Set entityTagsIdentifiers = new HashSet<>(); + + List entityTagsForTag = + front50Service.getAllEntityTags(false).stream() + .filter( + e -> + e.getTags().stream() + .anyMatch( + t -> + (namespace != null && namespace.equalsIgnoreCase(t.getNamespace())) + || (tag != null && tag.equalsIgnoreCase(t.getName())))) + .collect(Collectors.toList()); + + entityTagsIdentifiers.addAll( + entityTagsForTag.stream().map(e -> e.getId().toLowerCase()).collect(Collectors.toSet())); + + if (tag != null) { + BoolQueryBuilder queryBuilder = + QueryBuilders.boolQuery() + .must(applyTagsToBuilder(null, Collections.singletonMap(tag, "*"))); + + fetchAll(queryBuilder, 5000, "2m") + .forEach( + entityTags -> { + if (!entityTagsIdentifiers.contains(entityTags.getId())) { + entityTagsForTag.add(entityTags); + entityTagsIdentifiers.add(entityTags.getId().toLowerCase()); + } + }); + } + + if (namespace != null) { + BoolQueryBuilder queryBuilder = + QueryBuilders.boolQuery().must(applyTagsToBuilder(namespace, Collections.emptyMap())); + + fetchAll(queryBuilder, 5000, "2m") + .forEach( + entityTags -> { + if (!entityTagsIdentifiers.contains(entityTags.getId())) { + entityTagsForTag.add(entityTags); + entityTagsIdentifiers.add(entityTags.getId().toLowerCase()); + } + }); + } + + return entityTagsForTag; } private QueryBuilder applyTagsToBuilder(String namespace, Map tags) { BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery(); - for (Map.Entry entry : flatten(new HashMap<>(), null, tags).entrySet()) { - // restrict to specific tags (optional) - boolQueryBuilder.must(QueryBuilders.termQuery("tags.name", entry.getKey())); - if (!entry.getValue().equals("*")) { - boolQueryBuilder.must(QueryBuilders.matchQuery("tags.value", entry.getValue())); + if (tags != null && !tags.isEmpty()) { + for (Map.Entry entry : flatten(new HashMap<>(), null, tags).entrySet()) { + // restrict to specific tags (optional) + boolQueryBuilder.must(QueryBuilders.termQuery("tags.name", entry.getKey())); + if (!entry.getValue().equals("*")) { + boolQueryBuilder.must(QueryBuilders.matchQuery("tags.value", entry.getValue())); + } } } @@ -432,56 +665,55 @@ private QueryBuilder applyTagsToBuilder(String namespace, Map ta boolQueryBuilder.must(QueryBuilders.termQuery("tags.namespace", namespace)); } - return QueryBuilders.nestedQuery("tags", boolQueryBuilder); + return QueryBuilders.nestedQuery("tags", boolQueryBuilder, ScoreMode.Avg); } - /** - * Elasticsearch requires that all search criteria be flattened (vs. nested) - */ - private Map flatten(Map accumulator, String rootKey, Map criteria) { - criteria.forEach((k, v) -> { - if (v instanceof Map) { - flatten(accumulator, (rootKey == null) ? "" + k : rootKey + "." + k, (Map) v); - } else { - accumulator.put((rootKey == null) ? "" + k : rootKey + "." + k, v); - } - } - ); + /** Elasticsearch requires that all search criteria be flattened (vs. nested) */ + private Map flatten( + Map accumulator, String rootKey, Map criteria) { + criteria.forEach( + (k, v) -> { + if (v instanceof Map) { + flatten(accumulator, (rootKey == null) ? "" + k : rootKey + "." + k, (Map) v); + } else { + accumulator.put((rootKey == null) ? "" + k : rootKey + "." + k, v); + } + }); return accumulator; } - private List search(String type, QueryBuilder queryBuilder, int maxResults) { - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(queryBuilder).size(maxResults); + private List search(QueryBuilder queryBuilder, int maxResults) { + SearchSourceBuilder searchSourceBuilder = + new SearchSourceBuilder().query(queryBuilder).size(maxResults); String searchQuery = searchSourceBuilder.toString(); - Search.Builder searchBuilder = new Search.Builder(searchQuery).addIndex(activeElasticSearchIndex); - if (type != null) { - // restrict to a specific index type (optional) - searchBuilder.addType(type.toLowerCase()); - } + Search.Builder searchBuilder = + new Search.Builder(searchQuery).addIndex(activeElasticSearchIndex); try { SearchResult searchResult = jestClient.execute(searchBuilder.build()); return searchResult.getHits(Map.class).stream() - .map(h -> h.source) - .map(s -> prepareForRead(objectMapper, s)) - .collect(Collectors.toList()); + .map(h -> h.source) + .map(s -> prepareForRead(objectMapper, s)) + .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException(e); } } - private List fetchAll(String type, int scrollSize, String scrollTime) { + private List fetchAll(QueryBuilder queryBuilder, int scrollSize, String scrollTime) { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(QueryBuilders.matchAllQuery()); + searchSourceBuilder.query(queryBuilder); - Search search = new Search.Builder(searchSourceBuilder.toString()) - .addIndex(activeElasticSearchIndex) - .addType(type) - .setParameter(Parameters.SIZE, scrollSize) - .setParameter(Parameters.SCROLL, scrollTime) - .build(); + Search.Builder builder = + new Search.Builder(searchSourceBuilder.toString()).addIndex(activeElasticSearchIndex); + + Search search = + builder + .setParameter(Parameters.SIZE, scrollSize) + .setParameter(Parameters.SCROLL, scrollTime) + .build(); List allEntityTags = new ArrayList<>(); @@ -498,8 +730,7 @@ private List fetchAll(String type, int scrollSize, String scrollTime try { while (entityTags.size() > 0) { - SearchScroll scroll = new SearchScroll.Builder(scrollId, scrollTime) - .setParameter(Parameters.SIZE, scrollSize).build(); + SearchScroll scroll = new SearchScroll.Builder(scrollId, scrollTime).build(); try { result = jestClient.execute(scroll); @@ -528,11 +759,13 @@ private ElasticSearchEntityTagsReconciler getElasticSearchEntityTagsReconciler() } private static EntityTags prepareForWrite(ObjectMapper objectMapper, EntityTags entityTags) { - EntityTags copyOfEntityTags = objectMapper.convertValue( - objectMapper.convertValue(entityTags, Map.class), EntityTags.class - ); + EntityTags copyOfEntityTags = + objectMapper.convertValue( + objectMapper.convertValue(entityTags, Map.class), EntityTags.class); - copyOfEntityTags.getTags().forEach(entityTag -> entityTag.setValue(entityTag.getValueForWrite(objectMapper))); + copyOfEntityTags + .getTags() + .forEach(entityTag -> entityTag.setValue(entityTag.getValueForWrite(objectMapper))); String application = copyOfEntityTags.getEntityRef().getApplication(); if (application == null || application.trim().isEmpty()) { @@ -541,10 +774,9 @@ private static EntityTags prepareForWrite(ObjectMapper objectMapper, EntityTags copyOfEntityTags.getEntityRef().setApplication(names.getApp()); } catch (Exception e) { log.error( - "Unable to extract application name (entityId: {})", - copyOfEntityTags.getEntityRef().getEntityId(), - e - ); + "Unable to extract application name (entityId: {})", + copyOfEntityTags.getEntityRef().getEntityId(), + e); } } @@ -553,16 +785,10 @@ private static EntityTags prepareForWrite(ObjectMapper objectMapper, EntityTags private static EntityTags prepareForRead(ObjectMapper objectMapper, Map indexedEntityTags) { EntityTags entityTags = objectMapper.convertValue(indexedEntityTags, EntityTags.class); - entityTags.getTags().forEach(entityTag -> entityTag.setValue(entityTag.getValueForRead(objectMapper))); + entityTags + .getTags() + .forEach(entityTag -> entityTag.setValue(entityTag.getValueForRead(objectMapper))); return entityTags; } - - private String getDocumentType(EntityTags entityTags) { - if (singleMappingType) { - return mappingTypeName; - } else { - return entityTags.getEntityRef().getEntityType(); - } - } } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsReconciler.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsReconciler.java index 2a522cbed93..87fd7031630 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsReconciler.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsReconciler.java @@ -16,68 +16,64 @@ package com.netflix.spinnaker.clouddriver.elasticsearch.model; -import com.google.common.collect.ImmutableMap; +import static com.netflix.spinnaker.clouddriver.tags.EntityTagger.ENTITY_TYPE_SERVER_GROUP; +import com.google.common.collect.ImmutableMap; import com.netflix.spinnaker.clouddriver.core.services.Front50Service; import com.netflix.spinnaker.clouddriver.model.EntityTags; import com.netflix.spinnaker.clouddriver.model.ServerGroupProvider; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import java.util.*; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.tags.EntityTagger.ENTITY_TYPE_SERVER_GROUP; - @Component public class ElasticSearchEntityTagsReconciler { private final Logger log = LoggerFactory.getLogger(getClass()); private final Front50Service front50Service; - private final Map serverGroupProviderByCloudProvider = new HashMap<>(); + private final Map serverGroupProviderByCloudProvider = + new HashMap<>(); @Autowired - public ElasticSearchEntityTagsReconciler(Front50Service front50Service, - Optional> serverGroupProviders) { + public ElasticSearchEntityTagsReconciler( + Front50Service front50Service, + Optional> serverGroupProviders) { this.front50Service = front50Service; for (ServerGroupProvider serverGroupProvider : serverGroupProviders.orElse(new ArrayList<>())) { - serverGroupProviderByCloudProvider.put(serverGroupProvider.getCloudProviderId(), serverGroupProvider); + serverGroupProviderByCloudProvider.put( + serverGroupProvider.getCloudProviderId(), serverGroupProvider); } } /** - * Remove any orphaned entity tags from elastic search in a specific account/region and cloud provider. + * Remove any orphaned entity tags from elastic search in a specific account/region and cloud + * provider. */ - Map reconcile(ElasticSearchEntityTagsProvider entityTagsProvider, - String cloudProvider, - String account, - String region, - boolean dryRun) { + Map reconcile( + ElasticSearchEntityTagsProvider entityTagsProvider, + String cloudProvider, + String account, + String region, + boolean dryRun) { Collection allEntityTags = front50Service.getAllEntityTags(false); - List allServerGroupEntityTags = filter( - allEntityTags, - cloudProvider, - account, - region - ); + List allServerGroupEntityTags = + filter(allEntityTags, cloudProvider, account, region); - List existingServerGroupEntityTags = filter( - Collections.singletonList(cloudProvider), - allServerGroupEntityTags - ); + List existingServerGroupEntityTags = + filter(Collections.singletonList(cloudProvider), allServerGroupEntityTags); log.debug( - "Found {} server group entity tags (valid: {}, invalid: {}, dryRun: {})", - allServerGroupEntityTags.size(), - existingServerGroupEntityTags.size(), - allServerGroupEntityTags.size() - existingServerGroupEntityTags.size(), - dryRun - ); + "Found {} server group entity tags (valid: {}, invalid: {}, dryRun: {})", + allServerGroupEntityTags.size(), + existingServerGroupEntityTags.size(), + allServerGroupEntityTags.size() - existingServerGroupEntityTags.size(), + dryRun); List orphanedServerGroupEntityTags = new ArrayList<>(allServerGroupEntityTags); orphanedServerGroupEntityTags.removeAll(existingServerGroupEntityTags); @@ -89,72 +85,72 @@ Map reconcile(ElasticSearchEntityTagsProvider entityTagsProvider, } return ImmutableMap.builder() - .put("dryRun", dryRun) - .put("orphanCount", orphanedServerGroupEntityTags.size()) - .build(); + .put("dryRun", dryRun) + .put("orphanCount", orphanedServerGroupEntityTags.size()) + .build(); } /** * Filter out any orphaned entity tags that reference a non-existent server group. * - * This is invoked as part of the re-indexing process where historical data is imported from Front50. + *

This is invoked as part of the re-indexing process where historical data is imported from + * Front50. */ public List filter(Collection entityTags) { return filter(serverGroupProviderByCloudProvider.keySet(), entityTags); } - private List filter(Collection cloudProviders, - Collection entityTags) { - Set serverGroupIdentifiers = serverGroupProviderByCloudProvider.values() - .stream() - .filter(p -> cloudProviders.contains(p.getCloudProviderId())) - .flatMap(p -> p.getServerGroupIdentifiers(null, null).stream()) - .map(String::toLowerCase) - .collect(Collectors.toSet()); - - Set missingServerGroupEntityTags = entityTags - .stream() - .filter(e -> e.getEntityRef() != null) - - // if cloud provider is unknown, entity tags should _not_ be filtered out - .filter(e -> cloudProviders.contains(e.getEntityRef().getCloudProvider())) - - // not all entity types are filterable - .filter(e -> ENTITY_TYPE_SERVER_GROUP.equalsIgnoreCase(e.getEntityRef().getEntityType())) - - // find any entity tags that reference a non-existent server group - .filter(e -> !serverGroupIdentifiers.contains(buildServerGroupIdentifier(e.getEntityRef()))) - .map(EntityTags::getId) - .collect(Collectors.toSet()); - - return entityTags - .stream() - .filter(e -> !missingServerGroupEntityTags.contains(e.getId())) - .collect(Collectors.toList()); + private List filter( + Collection cloudProviders, Collection entityTags) { + Set serverGroupIdentifiers = + serverGroupProviderByCloudProvider.values().stream() + .filter(p -> cloudProviders.contains(p.getCloudProviderId())) + .flatMap(p -> p.getServerGroupIdentifiers(null, null).stream()) + .map(String::toLowerCase) + .collect(Collectors.toSet()); + + Set missingServerGroupEntityTags = + entityTags.stream() + .filter(e -> e.getEntityRef() != null) + + // if cloud provider is unknown, entity tags should _not_ be filtered out + .filter(e -> cloudProviders.contains(e.getEntityRef().getCloudProvider())) + + // not all entity types are filterable + .filter( + e -> ENTITY_TYPE_SERVER_GROUP.equalsIgnoreCase(e.getEntityRef().getEntityType())) + + // find any entity tags that reference a non-existent server group + .filter( + e -> !serverGroupIdentifiers.contains(buildServerGroupIdentifier(e.getEntityRef()))) + .map(EntityTags::getId) + .collect(Collectors.toSet()); + + return entityTags.stream() + .filter(e -> !missingServerGroupEntityTags.contains(e.getId())) + .collect(Collectors.toList()); } - private List filter(Collection entityTags, - String cloudProvider, - String account, - String region) { + private List filter( + Collection entityTags, String cloudProvider, String account, String region) { return entityTags.stream() - .filter(e -> e.getEntityRef() != null) - .filter(e -> ENTITY_TYPE_SERVER_GROUP.equalsIgnoreCase(e.getEntityRef().getEntityType())) - .filter(e -> cloudProvider.equalsIgnoreCase(e.getEntityRef().getCloudProvider())) - .filter(e -> account == null || account.equalsIgnoreCase(e.getEntityRef().getAccount())) - .filter(e -> region == null || region.equalsIgnoreCase(e.getEntityRef().getRegion())) - - // tag must be > 14 days old (temporary safe guard) - .filter(e -> e.getLastModified() < System.currentTimeMillis() - TimeUnit.DAYS.toMillis(14)) - .collect(Collectors.toList()); + .filter(e -> e.getEntityRef() != null) + .filter(e -> ENTITY_TYPE_SERVER_GROUP.equalsIgnoreCase(e.getEntityRef().getEntityType())) + .filter(e -> cloudProvider.equalsIgnoreCase(e.getEntityRef().getCloudProvider())) + .filter(e -> account == null || account.equalsIgnoreCase(e.getEntityRef().getAccount())) + .filter(e -> region == null || region.equalsIgnoreCase(e.getEntityRef().getRegion())) + + // tag must be > 14 days old (temporary safe guard) + .filter(e -> e.getLastModified() < System.currentTimeMillis() - TimeUnit.DAYS.toMillis(14)) + .collect(Collectors.toList()); } private String buildServerGroupIdentifier(EntityTags.EntityRef entityRef) { - ServerGroupProvider serverGroupProvider = serverGroupProviderByCloudProvider.get(entityRef.getCloudProvider()); - return serverGroupProvider.buildServerGroupIdentifier( - entityRef.getAccount(), - entityRef.getRegion(), - entityRef.getEntityId() - ).toLowerCase(); + ServerGroupProvider serverGroupProvider = + serverGroupProviderByCloudProvider.get(entityRef.getCloudProvider()); + return serverGroupProvider + .buildServerGroupIdentifier( + entityRef.getAccount(), entityRef.getRegion(), entityRef.getEntityId()) + .toLowerCase(); } } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperation.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperation.java index 1952291a98e..2f3dae64c2f 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperation.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperation.java @@ -16,6 +16,8 @@ package com.netflix.spinnaker.clouddriver.elasticsearch.ops; +import static java.lang.String.format; + import com.google.common.collect.Lists; import com.netflix.spinnaker.clouddriver.core.services.Front50Service; import com.netflix.spinnaker.clouddriver.data.task.Task; @@ -29,17 +31,16 @@ import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; import com.netflix.spinnaker.kork.core.RetrySupport; import com.netflix.spinnaker.security.AuthenticatedRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import static java.lang.String.format; - -public class BulkUpsertEntityTagsAtomicOperation implements AtomicOperation { - private static final Logger log = LoggerFactory.getLogger(BulkUpsertEntityTagsAtomicOperation.class); +public class BulkUpsertEntityTagsAtomicOperation + implements AtomicOperation { + private static final Logger log = + LoggerFactory.getLogger(BulkUpsertEntityTagsAtomicOperation.class); private static final String BASE_PHASE = "ENTITY_TAGS"; private final RetrySupport retrySupport; @@ -48,11 +49,12 @@ public class BulkUpsertEntityTagsAtomicOperation implements AtomicOperation( - bulkUpsertEntityTagsDescription.entityTags - ); + bulkUpsertEntityTagsDescription.entityTags = + new ArrayList<>(bulkUpsertEntityTagsDescription.entityTags); } else { bulkUpsertEntityTagsDescription.entityTags = new ArrayList<>(); } @@ -79,38 +81,43 @@ public BulkUpsertEntityTagsAtomicOperationResult operate(List priorOutputs) { Date now = new Date(); - Lists.partition(entityTags, 50).forEach(tags -> { - getTask().updateStatus(BASE_PHASE, "Retrieving current entity tags"); - Map existingTags = retrieveExistingTags(tags); - - List modifiedEntityTags = new ArrayList<>(); - getTask().updateStatus(BASE_PHASE, "Merging existing tags and metadata"); - tags.forEach(tag -> { - boolean wasModified = mergeExistingTagsAndMetadata( - now, - existingTags.get(tag.getId()), - tag, - bulkUpsertEntityTagsDescription.isPartial - ); - - if (wasModified) { - modifiedEntityTags.add(tag); - } - }); - - if (modifiedEntityTags.isEmpty()) { - getTask().updateStatus(BASE_PHASE, "No tags have been modified"); - return; - } - - getTask().updateStatus(BASE_PHASE, "Performing batch update to durable tagging service"); - Map durableTags = front50Service.batchUpdate(new ArrayList<>(modifiedEntityTags)) - .stream().collect(Collectors.toMap(EntityTags::getId, Function.identity())); - - getTask().updateStatus(BASE_PHASE, "Pushing tags to Elastic Search"); - updateMetadataFromDurableTagsAndIndex(modifiedEntityTags, durableTags, result); - result.upserted.addAll(modifiedEntityTags); - }); + Lists.partition(entityTags, 50) + .forEach( + tags -> { + getTask().updateStatus(BASE_PHASE, "Retrieving current entity tags"); + Map existingTags = retrieveExistingTags(tags); + + List modifiedEntityTags = new ArrayList<>(); + getTask().updateStatus(BASE_PHASE, "Merging existing tags and metadata"); + tags.forEach( + tag -> { + boolean wasModified = + mergeExistingTagsAndMetadata( + now, + existingTags.get(tag.getId()), + tag, + bulkUpsertEntityTagsDescription.isPartial); + + if (wasModified) { + modifiedEntityTags.add(tag); + } + }); + + if (modifiedEntityTags.isEmpty()) { + getTask().updateStatus(BASE_PHASE, "No tags have been modified"); + return; + } + + getTask() + .updateStatus(BASE_PHASE, "Performing batch update to durable tagging service"); + Map durableTags = + front50Service.batchUpdate(new ArrayList<>(modifiedEntityTags)).stream() + .collect(Collectors.toMap(EntityTags::getId, Function.identity())); + + getTask().updateStatus(BASE_PHASE, "Pushing tags to Elastic Search"); + updateMetadataFromDurableTagsAndIndex(modifiedEntityTags, durableTags, result); + result.upserted.addAll(modifiedEntityTags); + }); return result; } @@ -118,96 +125,116 @@ private Map retrieveExistingTags(List entityTags List ids = entityTags.stream().map(EntityTags::getId).collect(Collectors.toList()); try { - return retrySupport.retry(() -> front50Service.getAllEntityTagsById(ids) - .stream() - .collect(Collectors.toMap(EntityTags::getId, Function.identity())), 10, 2000, false); + return retrySupport.retry( + () -> + front50Service.getAllEntityTagsById(ids).stream() + .collect(Collectors.toMap(EntityTags::getId, Function.identity())), + 10, + 2000, + false); } catch (Exception e) { - log.error("Unable to retrieve existing tags from Front50, reason: {} (ids: {})", e.getMessage(), ids); + log.error( + "Unable to retrieve existing tags from Front50, reason: {} (ids: {})", + e.getMessage(), + ids); throw e; } } - private void addTagIdsIfMissing(List entityTags, BulkUpsertEntityTagsAtomicOperationResult result) { + private void addTagIdsIfMissing( + List entityTags, BulkUpsertEntityTagsAtomicOperationResult result) { Collection failed = new ArrayList<>(); - entityTags.forEach(tag -> { - if (tag.getId() == null) { - try { - EntityRefIdBuilder.EntityRefId entityRefId = entityRefId(accountCredentialsProvider, tag); - tag.setId(entityRefId.id); - tag.setIdPattern(entityRefId.idPattern); - } catch (Exception e) { - log.error("Failed to build tag id for {}", tag.getId(), e); - getTask().updateStatus( - BASE_PHASE, format("Failed to build tag id for %s, reason: %s", tag.getId(), e.getMessage()) - ); - failed.add(tag); - result.failures.add(new BulkUpsertEntityTagsAtomicOperationResult.UpsertFailureResult(tag, e)); - } - } - }); + entityTags.forEach( + tag -> { + if (tag.getId() == null) { + try { + EntityRefIdBuilder.EntityRefId entityRefId = + entityRefId(accountCredentialsProvider, tag); + tag.setId(entityRefId.id); + tag.setIdPattern(entityRefId.idPattern); + } catch (Exception e) { + log.error("Failed to build tag id for {}", tag.getId(), e); + getTask() + .updateStatus( + BASE_PHASE, + format( + "Failed to build tag id for %s, reason: %s", + tag.getId(), e.getMessage())); + failed.add(tag); + result.failures.add( + new BulkUpsertEntityTagsAtomicOperationResult.UpsertFailureResult(tag, e)); + } + } + }); entityTags.removeAll(failed); } - private void updateMetadataFromDurableTagsAndIndex(List entityTags, - Map durableTags, - BulkUpsertEntityTagsAtomicOperationResult result) { + private void updateMetadataFromDurableTagsAndIndex( + List entityTags, + Map durableTags, + BulkUpsertEntityTagsAtomicOperationResult result) { Collection failed = new ArrayList<>(); - entityTags.forEach(tag -> { - try { - EntityTags durableTag = durableTags.get(tag.getId()); - tag.setLastModified(durableTag.getLastModified()); - tag.setLastModifiedBy(durableTag.getLastModifiedBy()); - } catch (Exception e) { - log.error("Failed to update {} in ElasticSearch", tag.getId(), e); - getTask().updateStatus( - BASE_PHASE, format("Failed to update %s in ElasticSearch, reason: %s", tag.getId(), e.getMessage()) - ); - failed.add(tag); - result.failures.add(new BulkUpsertEntityTagsAtomicOperationResult.UpsertFailureResult(tag, e)); - } - }); + entityTags.forEach( + tag -> { + try { + EntityTags durableTag = durableTags.get(tag.getId()); + tag.setLastModified(durableTag.getLastModified()); + tag.setLastModifiedBy(durableTag.getLastModifiedBy()); + } catch (Exception e) { + log.error("Failed to update {} in ElasticSearch", tag.getId(), e); + getTask() + .updateStatus( + BASE_PHASE, + format( + "Failed to update %s in ElasticSearch, reason: %s", + tag.getId(), e.getMessage())); + failed.add(tag); + result.failures.add( + new BulkUpsertEntityTagsAtomicOperationResult.UpsertFailureResult(tag, e)); + } + }); entityTags.removeAll(failed); getTask().updateStatus(BASE_PHASE, "Indexing tags in ElasticSearch"); entityTagsProvider.bulkIndex(entityTags); - entityTags.forEach(tag -> { - try { - entityTagsProvider.verifyIndex(tag); - } catch (Exception e) { - log.error("Failed to verify {} in ElasticSearch", tag.getId(), e); - getTask().updateStatus( - BASE_PHASE, format("Failed to verify %s in ElasticSearch, reason: %s", tag.getId(), e.getMessage()) - ); - failed.add(tag); - } - }); + entityTags.forEach( + tag -> { + try { + entityTagsProvider.verifyIndex(tag); + } catch (Exception e) { + log.error("Failed to verify {} in ElasticSearch", tag.getId(), e); + getTask() + .updateStatus( + BASE_PHASE, + format( + "Failed to verify %s in ElasticSearch, reason: %s", + tag.getId(), e.getMessage())); + failed.add(tag); + } + }); entityTags.removeAll(failed); } - public static EntityRefIdBuilder.EntityRefId entityRefId(AccountCredentialsProvider accountCredentialsProvider, - EntityTags description) { + public static EntityRefIdBuilder.EntityRefId entityRefId( + AccountCredentialsProvider accountCredentialsProvider, EntityTags description) { EntityTags.EntityRef entityRef = description.getEntityRef(); String entityRefAccount = entityRef.getAccount(); String entityRefAccountId = entityRef.getAccountId(); if (entityRefAccount != null && !entityRefAccount.equals("*") && entityRefAccountId == null) { // add `accountId` if not explicitly provided - AccountCredentials accountCredentials = lookupAccountCredentialsByAccountIdOrName( - accountCredentialsProvider, - entityRefAccount, - "accountName" - ); + AccountCredentials accountCredentials = + lookupAccountCredentialsByAccountIdOrName( + accountCredentialsProvider, entityRefAccount, "accountName"); entityRefAccountId = accountCredentials.getAccountId(); entityRef.setAccountId(entityRefAccountId); } if (entityRefAccount == null && entityRefAccountId != null) { // add `account` if not explicitly provided - AccountCredentials accountCredentials = lookupAccountCredentialsByAccountIdOrName( - accountCredentialsProvider, - entityRefAccountId, - "accountId" - ); + AccountCredentials accountCredentials = + lookupAccountCredentialsByAccountIdOrName( + accountCredentialsProvider, entityRefAccountId, "accountId"); if (accountCredentials != null) { entityRefAccount = accountCredentials.getName(); entityRef.setAccount(entityRefAccount); @@ -215,18 +242,15 @@ public static EntityRefIdBuilder.EntityRefId entityRefId(AccountCredentialsProvi } return EntityRefIdBuilder.buildId( - entityRef.getCloudProvider(), - entityRef.getEntityType(), - entityRef.getEntityId(), - Optional.ofNullable(entityRefAccountId).orElse(entityRefAccount), - entityRef.getRegion() - ); + entityRef.getCloudProvider(), + entityRef.getEntityType(), + entityRef.getEntityId(), + Optional.ofNullable(entityRefAccountId).orElse(entityRefAccount), + entityRef.getRegion()); } - public static boolean mergeExistingTagsAndMetadata(Date now, - EntityTags currentTags, - EntityTags updatedTags, - boolean isPartial) { + public static boolean mergeExistingTagsAndMetadata( + Date now, EntityTags currentTags, EntityTags updatedTags, boolean isPartial) { if (currentTags == null) { addTagMetadata(now, updatedTags); return true; @@ -243,8 +267,7 @@ public static boolean mergeExistingTagsAndMetadata(Date now, } updatedTags.setTagsMetadata( - currentTags.getTagsMetadata() == null ? new ArrayList<>() : currentTags.getTagsMetadata() - ); + currentTags.getTagsMetadata() == null ? new ArrayList<>() : currentTags.getTagsMetadata()); updatedTags.getTags().forEach(tag -> updatedTags.putEntityTagMetadata(tagMetadata(tag, now))); @@ -253,43 +276,39 @@ public static boolean mergeExistingTagsAndMetadata(Date now, return wasModified; } - /** - * @return true if all {@code target} tags are contained in {@code source}, otherwise false - */ + /** @return true if all {@code target} tags are contained in {@code source}, otherwise false */ private static boolean containedWithin(EntityTags source, EntityTags target) { - return target - .getTags() - .stream() - .allMatch( - updatedTag -> source - .getTags() - .stream() - .anyMatch(currentTag -> - currentTag.getName().equals(updatedTag.getName()) && currentTag.getValue().equals(updatedTag.getValue()) - ) - ); + return target.getTags().stream() + .allMatch( + updatedTag -> + source.getTags().stream() + .anyMatch( + currentTag -> + currentTag.getName().equals(updatedTag.getName()) + && currentTag.getValue().equals(updatedTag.getValue()))); } private static void mergeTags(BulkUpsertEntityTagsDescription bulkUpsertEntityTagsDescription) { List toRemove = new ArrayList<>(); - bulkUpsertEntityTagsDescription.entityTags.forEach(tag -> { - Collection matches = bulkUpsertEntityTagsDescription.entityTags - .stream() - .filter(t -> - t.getId().equals(tag.getId()) && !toRemove.contains(t) && !tag.equals(t) - ) - .collect(Collectors.toList()); - if (matches.size() > 1) { - matches.forEach(m -> tag.getTags().addAll(m.getTags())); - toRemove.addAll(matches); - } - }); + bulkUpsertEntityTagsDescription.entityTags.forEach( + tag -> { + Collection matches = + bulkUpsertEntityTagsDescription.entityTags.stream() + .filter( + t -> t.getId().equals(tag.getId()) && !toRemove.contains(t) && !tag.equals(t)) + .collect(Collectors.toList()); + if (matches.size() > 1) { + matches.forEach(m -> tag.getTags().addAll(m.getTags())); + toRemove.addAll(matches); + } + }); bulkUpsertEntityTagsDescription.entityTags.removeAll(toRemove); } private static void replaceTagContents(EntityTags currentTags, EntityTags entityTagsDescription) { - Map entityTagsByName = entityTagsDescription.getTags().stream() - .collect(Collectors.toMap(EntityTags.EntityTag::getName, x -> x)); + Map entityTagsByName = + entityTagsDescription.getTags().stream() + .collect(Collectors.toMap(EntityTags.EntityTag::getName, x -> x)); currentTags.setTags(entityTagsDescription.getTags()); for (EntityTags.EntityTagMetadata entityTagMetadata : currentTags.getTagsMetadata()) { @@ -299,7 +318,8 @@ private static void replaceTagContents(EntityTags currentTags, EntityTags entity } } - private static EntityTags.EntityTagMetadata tagMetadata(EntityTags.EntityTag entityTag, Date now) { + private static EntityTags.EntityTagMetadata tagMetadata( + EntityTags.EntityTag entityTag, Date now) { String user = AuthenticatedRequest.getSpinnakerUser().orElse("unknown"); String tagName = entityTag.getName(); @@ -323,15 +343,21 @@ private static void addTagMetadata(Date now, EntityTags entityTags) { entityTags.getTags().forEach(tag -> entityTags.putEntityTagMetadata(tagMetadata(tag, now))); } - private static AccountCredentials lookupAccountCredentialsByAccountIdOrName(AccountCredentialsProvider accountCredentialsProvider, - String entityRefAccountIdOrName, - String type) { + private static AccountCredentials lookupAccountCredentialsByAccountIdOrName( + AccountCredentialsProvider accountCredentialsProvider, + String entityRefAccountIdOrName, + String type) { return accountCredentialsProvider.getAll().stream() - .filter(c -> entityRefAccountIdOrName.equals(c.getAccountId()) || entityRefAccountIdOrName.equals(c.getName())) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException( - String.format("No credentials found for %s: %s", type, entityRefAccountIdOrName) - )); + .filter( + c -> + entityRefAccountIdOrName.equals(c.getAccountId()) + || entityRefAccountIdOrName.equals(c.getName())) + .findFirst() + .orElseThrow( + () -> + new IllegalArgumentException( + String.format( + "No credentials found for %s: %s", type, entityRefAccountIdOrName))); } private static Task getTask() { diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperationResult.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperationResult.java index 0b5f3888d06..8264f2673b1 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperationResult.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperationResult.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.elasticsearch.ops; import com.netflix.spinnaker.clouddriver.model.EntityTags; - import java.util.ArrayList; import java.util.Collection; diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/DeleteEntityTagsAtomicOperation.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/DeleteEntityTagsAtomicOperation.java index c0f2f89c54f..2a63f3bde93 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/DeleteEntityTagsAtomicOperation.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/DeleteEntityTagsAtomicOperation.java @@ -16,6 +16,8 @@ package com.netflix.spinnaker.clouddriver.elasticsearch.ops; +import static java.lang.String.format; + import com.netflix.spinnaker.clouddriver.core.services.Front50Service; import com.netflix.spinnaker.clouddriver.data.task.Task; import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; @@ -23,13 +25,11 @@ import com.netflix.spinnaker.clouddriver.elasticsearch.model.ElasticSearchEntityTagsProvider; import com.netflix.spinnaker.clouddriver.model.EntityTags; import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import retrofit.RetrofitError; - +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException; import java.util.Collection; import java.util.List; import java.util.stream.Collectors; - -import static java.lang.String.format; +import org.springframework.http.HttpStatus; public class DeleteEntityTagsAtomicOperation implements AtomicOperation { private static final String BASE_PHASE = "ENTITY_TAGS"; @@ -38,9 +38,10 @@ public class DeleteEntityTagsAtomicOperation implements AtomicOperation { private final ElasticSearchEntityTagsProvider entityTagsProvider; private final DeleteEntityTagsDescription entityTagsDescription; - public DeleteEntityTagsAtomicOperation(Front50Service front50Service, - ElasticSearchEntityTagsProvider entityTagsProvider, - DeleteEntityTagsDescription entityTagsDescription) { + public DeleteEntityTagsAtomicOperation( + Front50Service front50Service, + ElasticSearchEntityTagsProvider entityTagsProvider, + DeleteEntityTagsDescription entityTagsDescription) { this.front50Service = front50Service; this.entityTagsProvider = entityTagsProvider; this.entityTagsDescription = entityTagsDescription; @@ -48,39 +49,63 @@ public DeleteEntityTagsAtomicOperation(Front50Service front50Service, @Override public Void operate(List priorOutputs) { - getTask().updateStatus(BASE_PHASE, format("Retrieving %s from Front50", entityTagsDescription.getId())); + getTask() + .updateStatus( + BASE_PHASE, format("Retrieving %s from Front50", entityTagsDescription.getId())); EntityTags currentTags; try { currentTags = front50Service.getEntityTags(entityTagsDescription.getId()); - } catch (RetrofitError e) { - getTask().updateStatus(BASE_PHASE, format("Did not find %s in Front50", entityTagsDescription.getId())); - - getTask().updateStatus(BASE_PHASE, format("Deleting %s from ElasticSearch", entityTagsDescription.getId())); - entityTagsProvider.delete(entityTagsDescription.getId()); - getTask().updateStatus(BASE_PHASE, format("Deleted %s from ElasticSearch", entityTagsDescription.getId())); - - return null; + } catch (SpinnakerHttpException e) { + if (e.getResponseCode() == HttpStatus.NOT_FOUND.value()) { + getTask() + .updateStatus( + BASE_PHASE, format("Did not find %s in Front50", entityTagsDescription.getId())); + + getTask() + .updateStatus( + BASE_PHASE, + format("Deleting %s from ElasticSearch", entityTagsDescription.getId())); + entityTagsProvider.delete(entityTagsDescription.getId()); + getTask() + .updateStatus( + BASE_PHASE, format("Deleted %s from ElasticSearch", entityTagsDescription.getId())); + + return null; + } + throw e; } - Collection currentTagNames = currentTags.getTags().stream() - .map(EntityTags.EntityTag::getName) - .collect(Collectors.toSet()); + Collection currentTagNames = + currentTags.getTags().stream() + .map(EntityTags.EntityTag::getName) + .collect(Collectors.toSet()); - if (entityTagsDescription.isDeleteAll() || entityTagsDescription.getTags().containsAll(currentTagNames)) { - getTask().updateStatus(BASE_PHASE, format("Deleting %s from ElasticSearch", entityTagsDescription.getId())); + if (entityTagsDescription.isDeleteAll() + || entityTagsDescription.getTags().containsAll(currentTagNames)) { + getTask() + .updateStatus( + BASE_PHASE, format("Deleting %s from ElasticSearch", entityTagsDescription.getId())); entityTagsProvider.delete(entityTagsDescription.getId()); - getTask().updateStatus(BASE_PHASE, format("Deleted %s from ElasticSearch", entityTagsDescription.getId())); + getTask() + .updateStatus( + BASE_PHASE, format("Deleted %s from ElasticSearch", entityTagsDescription.getId())); - getTask().updateStatus(BASE_PHASE, format("Deleting %s from Front50", entityTagsDescription.getId())); + getTask() + .updateStatus( + BASE_PHASE, format("Deleting %s from Front50", entityTagsDescription.getId())); front50Service.deleteEntityTags(entityTagsDescription.getId()); - getTask().updateStatus(BASE_PHASE, format("Deleted %s from Front50", entityTagsDescription.getId())); + getTask() + .updateStatus( + BASE_PHASE, format("Deleted %s from Front50", entityTagsDescription.getId())); return null; } - getTask().updateStatus( - BASE_PHASE, - format("Removing tags from %s (tags: %s)", entityTagsDescription.getId(), entityTagsDescription.getTags()) - ); + getTask() + .updateStatus( + BASE_PHASE, + format( + "Removing tags from %s (tags: %s)", + entityTagsDescription.getId(), entityTagsDescription.getTags())); entityTagsDescription.getTags().forEach(currentTags::removeEntityTag); EntityTags durableEntityTags = front50Service.saveEntityTags(currentTags); @@ -92,7 +117,9 @@ public Void operate(List priorOutputs) { entityTagsProvider.index(currentTags); entityTagsProvider.verifyIndex(currentTags); - getTask().updateStatus(BASE_PHASE, format("Updated %s in ElasticSearch", entityTagsDescription.getId())); + getTask() + .updateStatus( + BASE_PHASE, format("Updated %s in ElasticSearch", entityTagsDescription.getId())); return null; } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/UpsertEntityTagsAtomicOperation.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/UpsertEntityTagsAtomicOperation.java index e76223011b6..bd7243ed6a9 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/UpsertEntityTagsAtomicOperation.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/ops/UpsertEntityTagsAtomicOperation.java @@ -16,6 +16,8 @@ package com.netflix.spinnaker.clouddriver.elasticsearch.ops; +import static com.netflix.spinnaker.clouddriver.elasticsearch.ops.BulkUpsertEntityTagsAtomicOperation.entityRefId; + import com.netflix.spinnaker.clouddriver.core.services.Front50Service; import com.netflix.spinnaker.clouddriver.data.task.Task; import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; @@ -26,9 +28,9 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; import com.netflix.spinnaker.kork.core.RetrySupport; - import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; public class UpsertEntityTagsAtomicOperation implements AtomicOperation { @@ -40,11 +42,12 @@ public class UpsertEntityTagsAtomicOperation implements AtomicOperation { private final ElasticSearchEntityTagsProvider entityTagsProvider; private final UpsertEntityTagsDescription entityTagsDescription; - public UpsertEntityTagsAtomicOperation(RetrySupport retrySupport, - Front50Service front50Service, - AccountCredentialsProvider accountCredentialsProvider, - ElasticSearchEntityTagsProvider entityTagsProvider, - UpsertEntityTagsDescription tagEntityDescription) { + public UpsertEntityTagsAtomicOperation( + RetrySupport retrySupport, + Front50Service front50Service, + AccountCredentialsProvider accountCredentialsProvider, + ElasticSearchEntityTagsProvider entityTagsProvider, + UpsertEntityTagsDescription tagEntityDescription) { this.retrySupport = retrySupport; this.front50Service = front50Service; this.accountCredentialsProvider = accountCredentialsProvider; @@ -57,23 +60,25 @@ public Void operate(List priorOutputs) { bulkDescription.isPartial = entityTagsDescription.isPartial; bulkDescription.entityTags = Collections.singletonList(entityTagsDescription); - getTask().updateStatus( - BASE_PHASE, - String.format( - "Updating entity tags for %s (isPartial: %s, tags: %s)", - entityTagsDescription.getId(), - entityTagsDescription.isPartial, - entityTagsDescription.getTags().stream().map(EntityTags.EntityTag::getName).collect(Collectors.joining(", ")) - ) - ); + getTask() + .updateStatus( + BASE_PHASE, + String.format( + "Updating entity tags for %s (isPartial: %s, tags: %s)", + Optional.ofNullable(entityTagsDescription.getId()) + .orElse(entityRefId(accountCredentialsProvider, entityTagsDescription).id), + entityTagsDescription.isPartial, + entityTagsDescription.getTags().stream() + .map(EntityTags.EntityTag::getName) + .collect(Collectors.joining(", ")))); new BulkUpsertEntityTagsAtomicOperation( - retrySupport, - front50Service, - accountCredentialsProvider, - entityTagsProvider, - bulkDescription - ).operate(priorOutputs); + retrySupport, + front50Service, + accountCredentialsProvider, + entityTagsProvider, + bulkDescription) + .operate(priorOutputs); return null; } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/validators/BulkUpsertEntityTagsDescriptionValidator.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/validators/BulkUpsertEntityTagsDescriptionValidator.java index ab3dea6e42d..5b58a835722 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/validators/BulkUpsertEntityTagsDescriptionValidator.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/clouddriver/elasticsearch/validators/BulkUpsertEntityTagsDescriptionValidator.java @@ -17,24 +17,28 @@ package com.netflix.spinnaker.clouddriver.elasticsearch.validators; import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; import com.netflix.spinnaker.clouddriver.elasticsearch.descriptions.BulkUpsertEntityTagsDescription; +import java.util.List; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.List; @Component("bulkUpsertEntityTagsDescriptionValidator") -public class BulkUpsertEntityTagsDescriptionValidator extends DescriptionValidator { +public class BulkUpsertEntityTagsDescriptionValidator + extends DescriptionValidator { - @Value("${entityTags.maxConcurrentBulkTags:1000}") + @Value("${entity-tags.max-concurrent-bulk-tags:1000}") Integer maxConcurrentBulkTags; @Override - public void validate(List priorDescriptions, BulkUpsertEntityTagsDescription description, Errors errors) { + public void validate( + List priorDescriptions, + BulkUpsertEntityTagsDescription description, + ValidationErrors errors) { if (description.entityTags != null && description.entityTags.size() > maxConcurrentBulkTags) { - errors.rejectValue("entityTags.length", - "Max number of entity tags that can be submitted at once is " + maxConcurrentBulkTags); + errors.rejectValue( + "entityTags.length", + "Max number of entity tags that can be submitted at once is " + maxConcurrentBulkTags); } } } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/config/ElasticSearchConfig.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/config/ElasticSearchConfig.java index fd1c992bf4c..f763b40e9e4 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/config/ElasticSearchConfig.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/config/ElasticSearchConfig.java @@ -16,13 +16,10 @@ package com.netflix.spinnaker.config; -import com.netflix.spinnaker.clouddriver.core.services.Front50Service; import com.netflix.spinnaker.clouddriver.elasticsearch.ElasticSearchEntityTagger; import com.netflix.spinnaker.clouddriver.elasticsearch.converters.DeleteEntityTagsAtomicOperationConverter; import com.netflix.spinnaker.clouddriver.elasticsearch.converters.UpsertEntityTagsAtomicOperationConverter; import com.netflix.spinnaker.clouddriver.elasticsearch.model.ElasticSearchEntityTagsProvider; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.kork.core.RetrySupport; import io.searchbox.client.JestClient; import io.searchbox.client.JestClientFactory; import io.searchbox.client.config.HttpClientConfig; @@ -33,7 +30,7 @@ import org.springframework.context.annotation.Configuration; @Configuration -@ConditionalOnProperty({"elasticSearch.connection"}) +@ConditionalOnProperty({"elastic-search.connection"}) @ComponentScan({"com.netflix.spinnaker.clouddriver.elasticsearch"}) @EnableConfigurationProperties(ElasticSearchConfigProperties.class) public class ElasticSearchConfig { @@ -43,21 +40,24 @@ JestClient jestClient(ElasticSearchConfigProperties elasticSearchConfigPropertie JestClientFactory factory = new JestClientFactory(); - HttpClientConfig.Builder builder = new HttpClientConfig.Builder(elasticSearchConnection) - .readTimeout(elasticSearchConfigProperties.getReadTimeout()) - .connTimeout(elasticSearchConfigProperties.getConnectionTimeout()) - .multiThreaded(true); + HttpClientConfig.Builder builder = + new HttpClientConfig.Builder(elasticSearchConnection) + .readTimeout(elasticSearchConfigProperties.getReadTimeout()) + .connTimeout(elasticSearchConfigProperties.getConnectionTimeout()) + .multiThreaded(true); factory.setHttpClientConfig(builder.build()); return factory.getObject(); } @Bean - ElasticSearchEntityTagger elasticSearchEntityTagger(ElasticSearchEntityTagsProvider elasticSearchEntityTagsProvider, - UpsertEntityTagsAtomicOperationConverter upsertEntityTagsAtomicOperationConverter, - DeleteEntityTagsAtomicOperationConverter deleteEntityTagsAtomicOperationConverter) { + ElasticSearchEntityTagger elasticSearchEntityTagger( + ElasticSearchEntityTagsProvider elasticSearchEntityTagsProvider, + UpsertEntityTagsAtomicOperationConverter upsertEntityTagsAtomicOperationConverter, + DeleteEntityTagsAtomicOperationConverter deleteEntityTagsAtomicOperationConverter) { return new ElasticSearchEntityTagger( - elasticSearchEntityTagsProvider, upsertEntityTagsAtomicOperationConverter, deleteEntityTagsAtomicOperationConverter - ); + elasticSearchEntityTagsProvider, + upsertEntityTagsAtomicOperationConverter, + deleteEntityTagsAtomicOperationConverter); } } diff --git a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/config/ElasticSearchConfigProperties.java b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/config/ElasticSearchConfigProperties.java index 059e20f3447..2db29d72f32 100644 --- a/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/config/ElasticSearchConfigProperties.java +++ b/clouddriver-elasticsearch/src/main/java/com/netflix/spinnaker/config/ElasticSearchConfigProperties.java @@ -18,7 +18,7 @@ import org.springframework.boot.context.properties.ConfigurationProperties; -@ConfigurationProperties("elasticSearch") +@ConfigurationProperties("elastic-search") public class ElasticSearchConfigProperties { private String activeIndex; private String connection; @@ -26,12 +26,14 @@ public class ElasticSearchConfigProperties { private int readTimeout = 30000; private int connectionTimeout = 10000; - // As of Elasticsearch 6.0, new indices can only contain a single mapping type. Setting singleMappingType to true - // will index all documents under a single mapping type. When singleMappingType is false (which is the default), + // As of Elasticsearch 6.0, new indices can only contain a single mapping type. Setting + // singleMappingType to true + // will index all documents under a single mapping type. When singleMappingType is false (which + // is the default), // the mapping type of each document is set to the type of the entity being tagged. - // The name of the unique mapping type is configurable as mappingTypeName, but is defaulted to "_doc", which is + // The name of the unique mapping type is configurable as mappingTypeName, but is defaulted to + // "_doc", which is // recommended for forward compatibility with Elasticsearch 7.0. - private boolean singleMappingType = false; private String mappingTypeName = "_doc"; public String getActiveIndex() { @@ -66,14 +68,6 @@ public void setConnectionTimeout(int connectionTimeout) { this.connectionTimeout = connectionTimeout; } - public void setSingleMappingType(boolean singleMappingType) { - this.singleMappingType = singleMappingType; - } - - public boolean isSingleMappingType() { - return singleMappingType; - } - public void setMappingTypeName(String mappingTypeName) { this.mappingTypeName = mappingTypeName; } diff --git a/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsProviderSpec.groovy b/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsProviderSpec.groovy index 7a3d53e7fb7..efb9f1e9ea9 100644 --- a/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsProviderSpec.groovy +++ b/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/model/ElasticSearchEntityTagsProviderSpec.groovy @@ -24,27 +24,32 @@ import com.netflix.spinnaker.config.ElasticSearchConfig import com.netflix.spinnaker.config.ElasticSearchConfigProperties import com.netflix.spinnaker.kork.core.RetrySupport import io.searchbox.client.JestClient +import io.searchbox.client.JestResult import io.searchbox.indices.CreateIndex import io.searchbox.indices.DeleteIndex -import org.elasticsearch.common.settings.Settings -import org.elasticsearch.node.Node +import io.searchbox.indices.Refresh +import io.searchbox.indices.template.PutTemplate import org.springframework.context.ApplicationContext +import org.testcontainers.DockerClientFactory +import org.testcontainers.elasticsearch.ElasticsearchContainer +import spock.lang.Requires import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll -import static org.elasticsearch.node.NodeBuilder.nodeBuilder +import java.util.function.Supplier +@Requires({ DockerClientFactory.instance().isDockerAvailable() }) class ElasticSearchEntityTagsProviderSpec extends Specification { - @Shared - Node node - @Shared JestClient jestClient @Shared ElasticSearchConfigProperties elasticSearchConfigProperties + @Shared + ElasticsearchContainer esContainer = new ElasticsearchContainer("docker.elastic.co/elasticsearch/elasticsearch:6.8.2") + RetrySupport retrySupport = Spy(RetrySupport) { _ * sleep(_) >> { /* do nothing */ } } @@ -59,54 +64,117 @@ class ElasticSearchEntityTagsProviderSpec extends Specification { } def setupSpec() { - def elasticSearchSettings = Settings.settingsBuilder() - .put("script.inline", "on") - .put("script.indexed", "on") - .put("path.data", "./es-tmp/es") - .put("path.home", "./es-tmp/es") - - node = nodeBuilder() - .local(true) - .settings(elasticSearchSettings.build()) - .node() + esContainer.start() elasticSearchConfigProperties = new ElasticSearchConfigProperties( activeIndex: "tags_v1", - connection: "http://localhost:9200" + connection: "http://" + esContainer.getHttpHostAddress() ) def config = new ElasticSearchConfig() jestClient = config.jestClient(elasticSearchConfigProperties) } + def cleanupSpec() { + esContainer.stop() + } + def setup() { - jestClient.execute(new DeleteIndex.Builder(elasticSearchConfigProperties.activeIndex).build()); + jestClient.execute(new DeleteIndex.Builder(elasticSearchConfigProperties.activeIndex).build()) def settings = """{ + "order": 0, + "index_patterns": [ + "tags_v*" + ], "settings": { - "refresh_interval": "1s" + "index": { + "number_of_shards": "1", + "number_of_replicas": "1", + "refresh_interval": "-1" + } }, "mappings": { - "_default_": { + "_doc": { + "dynamic": "false", + "dynamic_templates": [ + { + "tags_template": { + "path_match": "tagsMetadata", + "mapping": { + "index": "no" + } + } + }, + { + "entityRef_template": { + "path_match": "entityRef.*", + "mapping": { + "index": "keyword" + } + } + } + ], "properties": { - "tags": { - "type": "nested" + "id": { + "type": "text" }, "entityRef": { "properties": { + "accountId": { + "type": "keyword" + }, + "application": { + "type": "keyword" + }, + "entityType": { + "type": "text" + }, + "cloudProvider": { + "type": "keyword" + }, "entityId": { - "type": "string", - "index": "not_analyzed" + "type": "keyword" + }, + "region": { + "type": "keyword" + }, + "account": { + "type": "keyword" + } + } + }, + "tags": { + "type": "nested", + "properties": { + "valueType": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "value": { + "type": "keyword" } } } } } - } + }, + "aliases": {} }""" - jestClient.execute(new CreateIndex.Builder(elasticSearchConfigProperties.activeIndex) - .settings(settings) - .build()); + def result = jestClient.execute( + new PutTemplate.Builder("tags_v1", settings).build() + ) + assert(result.succeeded) + + result = jestClient + .execute(new CreateIndex.Builder(elasticSearchConfigProperties.activeIndex) + .build()) + assert(result.succeeded) entityTagsProvider = new ElasticSearchEntityTagsProvider( applicationContext, @@ -122,6 +190,7 @@ class ElasticSearchEntityTagsProviderSpec extends Specification { given: def entityTags = buildEntityTags("aws:cluster:front50-main:myaccount:*", ["tag1": "value1", "tag2": "value2"]) entityTagsProvider.index(entityTags) + refreshIndices() entityTagsProvider.verifyIndex(entityTags) expect: @@ -132,12 +201,14 @@ class ElasticSearchEntityTagsProviderSpec extends Specification { !entityTagsProvider.get(entityTags.id, ["tag3": "value3"]).isPresent() } + @Unroll def "should assign entityRef.application if not specified"() { given: def entityTags = buildEntityTags("aws:cluster:front50-main:myaccount:*", ["tag1": "value1", "tag2": "value2"]) entityTags.entityRef.application = application entityTagsProvider.index(entityTags) + refreshIndices() entityTagsProvider.verifyIndex(entityTags) expect: @@ -155,10 +226,12 @@ class ElasticSearchEntityTagsProviderSpec extends Specification { given: def entityTags = buildEntityTags("aws:cluster:clouddriver-main:myaccount:*", ["tag3": "value3"]) entityTagsProvider.index(entityTags) + refreshIndices() entityTagsProvider.verifyIndex(entityTags) def moreEntityTags = buildEntityTags("aws:cluster:front50-main:myaccount:*", ["tag1": "value1"]) entityTagsProvider.index(moreEntityTags) + refreshIndices() entityTagsProvider.verifyIndex(moreEntityTags) expect: @@ -222,6 +295,7 @@ class ElasticSearchEntityTagsProviderSpec extends Specification { when: entityTagsProvider.reindex() + refreshIndices() then: 1 * front50Service.getAllEntityTags(true) >> { return allEntityTags } @@ -240,28 +314,89 @@ class ElasticSearchEntityTagsProviderSpec extends Specification { ] allEntityTags.each { entityTagsProvider.index(it) + refreshIndices() entityTagsProvider.verifyIndex(it) } when: entityTagsProvider.bulkDelete(allEntityTags) + refreshIndices() then: verifyNotIndexed(allEntityTags[0]) verifyNotIndexed(allEntityTags[1]) verifyNotIndexed(allEntityTags[2]) - } - boolean verifyNotIndexed(EntityTags entityTags) { - return (1..5).any { - if (!entityTagsProvider.get(entityTags.id).isPresent()) { - return true - } + def "should delete all entity tags in namespace"() { + given: + def allEntityTags = [ + buildEntityTags("titus:servergroup:clouddriver-main-^1.0.0-v150:myaccount:us-west-1", ["a": "1"], "my_namespace"), + buildEntityTags("aws:servergroup:clouddriver-main-v001:myaccount:us-west-1", ["a": "1"], "my_namespace"), + buildEntityTags("aws:servergroup:clouddriver-main-v002:myaccount:us-west-1", ["b": "2"], "my_namespace"), + buildEntityTags("aws:servergroup:clouddriver-main-v003:myaccount:us-west-1", ["c": "3"]), + ] + allEntityTags.each { + entityTagsProvider.index(it) + refreshIndices() + entityTagsProvider.verifyIndex(it) + } + + when: + entityTagsProvider.deleteByNamespace("my_namespace", true, false) // dry-run + + then: + 1 * front50Service.getAllEntityTags(false) >> { + return entityTagsProvider.getAll( + null, null, null, null, null, null, null, null, [:], 100 + ) + } + 0 * _ + + when: + entityTagsProvider.deleteByNamespace("my_namespace", true, true) // dry-run + + then: + 1 * front50Service.getAllEntityTags(false) >> { + return entityTagsProvider.getAll( + null, null, null, null, null, null, null, null, [:], 100 + ) + } + 0 * _ + + when: + entityTagsProvider.deleteByNamespace("my_namespace", false, false) // remove from elasticsearch (only!) + refreshIndices() + + def allIndexedEntityTags = entityTagsProvider.getAll( + null, null, null, null, null, null, null, null, [:], 100 + ) - Thread.sleep(500) - return false + then: + 1 * front50Service.getAllEntityTags(false) >> { + return entityTagsProvider.getAll( + null, null, null, null, null, null, null, null, [:], 100 + ) } + _ * retrySupport.retry(_, _, _, _) >> { Supplier fn, int maxRetries, long retryBackoff, boolean exponential -> fn.get() } + 0 * _ + + allIndexedEntityTags.findAll { + it.tags.any { it.namespace == "my_namespace"} + }.isEmpty() + + when: + entityTagsProvider.deleteByNamespace("my_namespace", false, true) // remove from elasticsearch and front50 + + then: + 1 * front50Service.getAllEntityTags(false) >> { return allEntityTags } + 1 * front50Service.batchUpdate(_) + _ * retrySupport.retry(_, _, _, _) >> { Supplier fn, int maxRetries, long retryBackoff, boolean exponential -> fn.get() } + 0 * _ + } + + boolean verifyNotIndexed(EntityTags entityTags) { + return !entityTagsProvider.get(entityTags.id).isPresent() } private static EntityTags buildEntityTags(String id, Map tags, String namespace = "default") { @@ -278,4 +413,12 @@ class ElasticSearchEntityTagsProviderSpec extends Specification { ) ) } + + private void refreshIndices() { + JestResult result = jestClient.execute(new Refresh.Builder().build()) + if (!result.isSucceeded()) { + throw new ElasticSearchException( + String.format("Failed to refresh index: %s", result.getErrorMessage())) + } + } } diff --git a/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperationSpec.groovy b/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperationSpec.groovy index 9f321db2fe9..0a9d2dc0d2b 100644 --- a/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperationSpec.groovy +++ b/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/ops/BulkUpsertEntityTagsAtomicOperationSpec.groovy @@ -266,7 +266,7 @@ class BulkUpsertEntityTagsAtomicOperationSpec extends Specification { void 'should detect whether entity tags have been modified'() { given: EntityTags currentTags = new EntityTags( - tags: buildTags(current) + tags: buildTags(cur) ) EntityTags updatedTags = new EntityTags( tags: buildTags(updated) @@ -278,7 +278,7 @@ class BulkUpsertEntityTagsAtomicOperationSpec extends Specification { ) == expectedToBeModified where: - current | updated | isPartial || expectedToBeModified + cur | updated | isPartial || expectedToBeModified [foo: "bar"] | [foo: "bar"] | false || false [foo: "bar"] | [foo: "bar"] | true || false [foo: "bar"] | [foo: "not bar"] | false || true diff --git a/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/ops/DeleteEntityTagsAtomicOperationSpec.groovy b/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/ops/DeleteEntityTagsAtomicOperationSpec.groovy index 3509d150707..39c178475d1 100644 --- a/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/ops/DeleteEntityTagsAtomicOperationSpec.groovy +++ b/clouddriver-elasticsearch/src/test/groovy/com/netflix/spinnaker/clouddriver/elasticsearch/ops/DeleteEntityTagsAtomicOperationSpec.groovy @@ -22,7 +22,10 @@ import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.elasticsearch.descriptions.DeleteEntityTagsDescription import com.netflix.spinnaker.clouddriver.elasticsearch.model.ElasticSearchEntityTagsProvider import com.netflix.spinnaker.clouddriver.model.EntityTags +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import org.springframework.http.HttpStatus import retrofit.RetrofitError +import retrofit.client.Response import spock.lang.Specification class DeleteEntityTagsAtomicOperationSpec extends Specification { @@ -44,12 +47,17 @@ class DeleteEntityTagsAtomicOperationSpec extends Specification { } void 'should remove entityTag from ElasticSearch if not found in Front50'() { + given: + RetrofitError notFoundRetrofitError = RetrofitError.httpError("url", + new Response("url", HttpStatus.NOT_FOUND.value(), "Application Not Found", [], null), + null, null) + SpinnakerHttpException spinnakerHttpException = new SpinnakerHttpException(notFoundRetrofitError) when: description.id = 'abc' operation.operate([]) then: - 1 * front50Service.getEntityTags('abc') >> { throw new RetrofitError("a", null, null, null, null, null, null) } + 1 * front50Service.getEntityTags('abc') >> { throw spinnakerHttpException } 1 * entityTagsProvider.delete('abc') 0 * _ } diff --git a/clouddriver-eureka/README.md b/clouddriver-eureka/README.md index e40af3c80f8..6787375ac95 100644 --- a/clouddriver-eureka/README.md +++ b/clouddriver-eureka/README.md @@ -25,8 +25,8 @@ The provider also supports a region placeholder: regions: - us-west-1 readOnlyUrl: "http://myhostname.{{region}}.mycompany.com:8080/eureka/v2" - -Each account definition in AWS must also define a `discovery` field that denotes the URL for the writeable eureka. + +Each account definition in AWS must also define a `discovery` field that denotes the URL for the writeable eureka. ``` - name: test @@ -38,7 +38,7 @@ Each account definition in AWS must also define a `discovery` field that denotes - name: us-east-1 ``` -By default, only one Eureka is supported per AWS account. If you have multiple +By default, only one Eureka is supported per AWS account. If you have multiple `aws.accounts` configured in clouddriver which share an accountId, and wish to use a separate Eureka for each, then you can enable support by setting `eureka.provider.allowMultipleEurekaPerAccount` to true. For example: @@ -82,4 +82,4 @@ name of the AWS account (`aws.enabled.accounts[].name`) with which it shares the same Eureka. Please note that `eureka.provider.allowMultipleEurekaPerAccount` only works with AWS as the cloud provider. Additionally, this feature is not supported -in the titus integration. +in the titus integration. diff --git a/clouddriver-eureka/clouddriver-eureka.gradle b/clouddriver-eureka/clouddriver-eureka.gradle index 04be2000a2a..f76c3572633 100644 --- a/clouddriver-eureka/clouddriver-eureka.gradle +++ b/clouddriver-eureka/clouddriver-eureka.gradle @@ -1,3 +1,21 @@ dependencies { - compile project(":clouddriver-core") + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-web" + implementation "io.spinnaker.kork:kork-retrofit" + implementation "com.amazonaws:aws-java-sdk" + implementation "com.jakewharton.retrofit:retrofit1-okhttp3-client" + implementation "com.squareup.retrofit:converter-jackson" + implementation "com.squareup.retrofit:retrofit" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-starter-web" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" } diff --git a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/api/EurekaApiFactory.groovy b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/api/EurekaApiFactory.groovy index df963297672..e2a9027354c 100644 --- a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/api/EurekaApiFactory.groovy +++ b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/api/EurekaApiFactory.groovy @@ -16,23 +16,28 @@ package com.netflix.spinnaker.clouddriver.eureka.api +import com.jakewharton.retrofit.Ok3Client +import com.netflix.spinnaker.config.OkHttp3ClientConfiguration +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import retrofit.RestAdapter import retrofit.converter.Converter -import java.util.regex.Pattern - class EurekaApiFactory { private Converter eurekaConverter + private OkHttp3ClientConfiguration okHttp3ClientConfiguration - EurekaApiFactory(Converter eurekaConverter) { + EurekaApiFactory(Converter eurekaConverter, OkHttp3ClientConfiguration okHttp3ClientConfiguration) { this.eurekaConverter = eurekaConverter + this.okHttp3ClientConfiguration = okHttp3ClientConfiguration } public EurekaApi createApi(String endpoint) { new RestAdapter.Builder() .setConverter(eurekaConverter) + .setClient(new Ok3Client(okHttp3ClientConfiguration.create().build())) .setEndpoint(endpoint) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .build() .create(EurekaApi) } diff --git a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/api/EurekaConfig.groovy b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/api/EurekaConfig.groovy deleted file mode 100644 index 19dc334c626..00000000000 --- a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/api/EurekaConfig.groovy +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.eureka.api - -import com.fasterxml.jackson.databind.DeserializationFeature -import com.fasterxml.jackson.databind.MapperFeature -import com.fasterxml.jackson.databind.ObjectMapper -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import retrofit.converter.Converter -import retrofit.converter.JacksonConverter - -@Configuration -class EurekaConfig { - @Bean - Converter eurekaConverter() { - new JacksonConverter(new ObjectMapper() - .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) - .enable(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY) - .enable(DeserializationFeature.UNWRAP_ROOT_VALUE) - .enable(MapperFeature.AUTO_DETECT_CREATORS)) - } - - @Bean - EurekaApiFactory eurekaApiFactory(Converter eurekaConverter) { - new EurekaApiFactory(eurekaConverter) - } - -} diff --git a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/AbstractEurekaSupport.groovy b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/AbstractEurekaSupport.groovy index a4b1c1df400..78d78146628 100644 --- a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/AbstractEurekaSupport.groovy +++ b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/AbstractEurekaSupport.groovy @@ -21,11 +21,13 @@ import com.netflix.spinnaker.clouddriver.eureka.api.Eureka import com.netflix.spinnaker.clouddriver.helpers.EnableDisablePercentageCategorizer import com.netflix.spinnaker.clouddriver.model.ClusterProvider import com.netflix.spinnaker.clouddriver.model.ServerGroup +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import groovy.transform.InheritConstructors import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError import retrofit.client.Response @Slf4j @@ -49,9 +51,11 @@ abstract class AbstractEurekaSupport { Task task, String phaseName, DiscoveryStatus discoveryStatus, - List instanceIds) { + List instanceIds, + boolean strict = false) { updateDiscoveryStatusForInstances( - description, task, phaseName, discoveryStatus, instanceIds, eurekaSupportConfigurationProperties.retryMax, eurekaSupportConfigurationProperties.retryMax + description, task, phaseName, discoveryStatus, instanceIds, + eurekaSupportConfigurationProperties.retryMax, eurekaSupportConfigurationProperties.retryMax, strict ) } @@ -62,7 +66,8 @@ abstract class AbstractEurekaSupport { DiscoveryStatus discoveryStatus, List instanceIds, int findApplicationNameRetryMax, - int updateEurekaRetryMax) { + int updateEurekaRetryMax, + boolean strict = false) { if (eurekaSupportConfigurationProperties == null) { throw new IllegalStateException("eureka configuration not supplied") @@ -70,27 +75,30 @@ abstract class AbstractEurekaSupport { def eureka = getEureka(description.credentials, description.region) def random = new Random() + def instanceDetails = null def applicationName = null def targetHealthyDeployPercentage = description.targetHealthyDeployPercentage != null ? description.targetHealthyDeployPercentage : 100 + if (targetHealthyDeployPercentage < 0 || targetHealthyDeployPercentage > 100) { throw new NumberFormatException("targetHealthyDeployPercentage must be an integer between 0 and 100") } else if (targetHealthyDeployPercentage < 100) { AbstractEurekaSupport.log.info("Marking ${description.asgName} instances ${discoveryStatus.value} with targetHealthyDeployPercentage ${targetHealthyDeployPercentage}") } + try { - applicationName = retry(task, phaseName, findApplicationNameRetryMax) { retryCount -> + (applicationName, instanceDetails) = retry(task, phaseName, findApplicationNameRetryMax) { retryCount -> def instanceId = instanceIds[random.nextInt(instanceIds.size())] task.updateStatus phaseName, "Looking up discovery application name for instance $instanceId (attempt: $retryCount)" - def instanceDetails = eureka.getInstanceInfo(instanceId) - def appName = instanceDetails?.instance?.app + def details = eureka.getInstanceInfo(instanceId) + def appName = details?.instance?.app if (!appName) { throw new RetryableException("Looking up instance application name in Discovery failed for instance ${instanceId} (attempt: $retryCount)") } - return appName + return [appName, details] } } catch (e) { - if (discoveryStatus == DiscoveryStatus.Enable || verifyInstanceAndAsgExist(description.credentials, description.region, null, description.asgName)) { + if (discoveryStatus == DiscoveryStatus.UP || verifyInstanceAndAsgExist(description.credentials, description.region, null, description.asgName)) { throw e } } @@ -103,17 +111,18 @@ abstract class AbstractEurekaSupport { def errors = [:] def fatals = [] + List skipped = [] int index = 0 for (String instanceId : instanceIds) { if (index > 0) { sleep eurekaSupportConfigurationProperties.throttleMillis } - if (discoveryStatus == DiscoveryStatus.Disable) { + if (discoveryStatus == DiscoveryStatus.OUT_OF_SERVICE) { if (index % eurekaSupportConfigurationProperties.attemptShortCircuitEveryNInstances == 0) { try { def hasUpInstances = doesCachedClusterContainDiscoveryStatus( - clusterProviders, description.credentialAccount, description.region, description.asgName, "UP" + clusterProviders, description.account, description.region, description.asgName, "UP" ) if (hasUpInstances.present && !hasUpInstances.get()) { // there are no UP instances, we can return early @@ -121,7 +130,7 @@ abstract class AbstractEurekaSupport { break } } catch (Exception e) { - def account = description.credentialAccount + def account = description.account def region = description.region def asgName = description.asgName AbstractEurekaSupport.log.error("[$phaseName] - Unable to verify cached discovery status (account: ${account}, region: ${region}, asgName: ${asgName}", e) @@ -135,25 +144,36 @@ abstract class AbstractEurekaSupport { Response resp - if (discoveryStatus == DiscoveryStatus.Disable) { + if (discoveryStatus == DiscoveryStatus.OUT_OF_SERVICE) { resp = eureka.updateInstanceStatus(applicationName, instanceId, discoveryStatus.value) } else { - resp = eureka.resetInstanceStatus(applicationName, instanceId, DiscoveryStatus.Disable.value) + resp = eureka.resetInstanceStatus(applicationName, instanceId, DiscoveryStatus.OUT_OF_SERVICE.value) } - if (resp.status != 200) { + if (resp?.status != 200) { throw new RetryableException("Non HTTP 200 response from discovery for instance ${instanceId}, will retry (attempt: $retryCount}).") } } - } catch (RetrofitError retrofitError) { - if (retrofitError.response?.status == 404 && discoveryStatus == DiscoveryStatus.Disable) { - task.updateStatus phaseName, "Could not find ${instanceId} in application $applicationName in discovery, skipping disable operation." + } catch (SpinnakerServerException e) { + def alwaysSkippable = e instanceof SpinnakerHttpException && ((SpinnakerHttpException)e).getResponseCode() == 404 + def willSkip = alwaysSkippable || !strict + def skippingOrNot = willSkip ? "skipping" : "not skipping" + + String errorMessage = "Failed updating status of $instanceId to '$discoveryStatus' in application '$applicationName' in discovery" + + " and strict=$strict, $skippingOrNot operation." + + // in strict mode, only 404 errors are ignored + if (!willSkip) { + errors[instanceId] = e } else { - errors[instanceId] = retrofitError + skipped.add(instanceId) } + + task.updateStatus phaseName, errorMessage } catch (ex) { errors[instanceId] = ex } + if (errors[instanceId]) { if (verifyInstanceAndAsgExist(description.credentials, description.region, instanceId, description.asgName)) { fatals.add(instanceId) @@ -161,8 +181,10 @@ abstract class AbstractEurekaSupport { task.updateStatus phaseName, "Instance '${instanceId}' does not exist and will not be marked as '${discoveryStatus.value}'" } } + index++ } + if (fatals) { Integer requiredInstances = Math.ceil(instanceIds.size() * targetHealthyDeployPercentage / 100D) as Integer if (instanceIds.size() - fatals.size() >= requiredInstances) { @@ -174,6 +196,12 @@ abstract class AbstractEurekaSupport { AbstractEurekaSupport.log.info("[$phaseName] - Failed marking discovery $discoveryStatus.value for instances ${errors}") } } + + if (!skipped.isEmpty()) { + task.addResultObjects([ + ["discoverySkippedInstanceIds": skipped] + ]) + } } def retry(Task task, String phaseName, int maxRetries, Closure c) { @@ -193,22 +221,24 @@ abstract class AbstractEurekaSupport { retryCount++ sleep(getDiscoveryRetryMs()); - } catch (RetrofitError re) { + } catch (SpinnakerServerException e) { if (retryCount >= (maxRetries - 1)) { - throw re + throw e } - AbstractEurekaSupport.log.debug("[$phaseName] - Failed calling external service ${re.message}") + AbstractEurekaSupport.log.debug("[$phaseName] - Failed calling external service ${e.getMessage()}") - if (re.kind == RetrofitError.Kind.NETWORK || re.response.status == 404 || re.response.status == 406) { + if ( (e instanceof SpinnakerNetworkException) + || ((e instanceof SpinnakerHttpException) && ((SpinnakerHttpException) e).responseCode == 406) + || ((e instanceof SpinnakerHttpException) && ((SpinnakerHttpException) e).responseCode == 404)) { retryCount++ sleep(getDiscoveryRetryMs()) - } else if (re.response.status >= 500) { + } else if (e instanceof SpinnakerHttpException && ((SpinnakerHttpException) e).responseCode >= 500) { // automatically retry on server errors (but wait a little longer between attempts) sleep(getDiscoveryRetryMs() * 10) retryCount++ } else { - throw re + throw e } } catch (AmazonServiceException ase) { if (ase.statusCode == 503) { @@ -264,43 +294,57 @@ abstract class AbstractEurekaSupport { return serverGroup } + /** + * Returns a list of instanceIds to disable. Only really used for RollingRedBlack strategy. + * The list represents the given percentage of "enabled" instances. + * Enabled instance is one that that has at least 1 health provider indicating it's UP + * and zero health providers indicating it's DOWN. + * + * @param account + * @param region + * @param asgName + * @param instances instanceIDs to pick from + * @param desiredPercentage (0-100) + * @return list of instance IDs + */ List getInstanceToModify(String account, String region, String asgName, List instances, int desiredPercentage) { ServerGroup serverGroup = getCachedServerGroup(clusterProviders, account, region, asgName) if (!serverGroup) { return [] } - Set modified = [] - Set unmodified = [] + Set ineligible = [] + Set eligible = [] instances.each { instanceId -> def instanceInExistingServerGroup = serverGroup.instances.find { it.name == instanceId } + if (instanceInExistingServerGroup) { - boolean isUp = false - instanceInExistingServerGroup.health?.flatten()?.each { Map health -> - if (DiscoveryStatus.Enable.value.equalsIgnoreCase(health?.eurekaStatus)) { - isUp = true - } + boolean anyDown = instanceInExistingServerGroup.health?.flatten()?.any { + Map health -> ("down".compareToIgnoreCase(health.state ?: "") == 0) + } + boolean anyUp = instanceInExistingServerGroup.health?.flatten()?.any { + Map health -> ("up".compareToIgnoreCase(health.state ?: "") == 0) } - if (isUp) { - unmodified.add(instanceId) + if (anyUp && !anyDown) { + eligible.add(instanceId) } else { - modified.add(instanceId) + ineligible.add(instanceId) } } } return EnableDisablePercentageCategorizer.getInstancesToModify( - modified as List, - unmodified as List, + ineligible as List, + eligible as List, desiredPercentage ) } enum DiscoveryStatus { - Enable('UP'), - Disable('OUT_OF_SERVICE') + UP('UP'), + OUT_OF_SERVICE('OUT_OF_SERVICE') String value diff --git a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/EurekaUtil.groovy b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/EurekaUtil.groovy index 60e4a3d58cd..a0268b17918 100644 --- a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/EurekaUtil.groovy +++ b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/EurekaUtil.groovy @@ -16,9 +16,11 @@ package com.netflix.spinnaker.clouddriver.eureka.deploy.ops import com.netflix.spinnaker.clouddriver.eureka.api.Eureka +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import org.apache.http.impl.client.HttpClients import retrofit.RestAdapter import retrofit.client.ApacheClient +import retrofit.converter.JacksonConverter import java.util.concurrent.atomic.AtomicReference import java.util.regex.Pattern @@ -27,7 +29,12 @@ class EurekaUtil { static Eureka getWritableEureka(String endpoint, String region) { String eurekaEndpoint = endpoint.replaceAll(Pattern.quote('{{region}}'), region) - new RestAdapter.Builder().setEndpoint(eurekaEndpoint).setClient(getApacheClient()).build().create(Eureka) + new RestAdapter.Builder() + .setEndpoint(eurekaEndpoint) + .setClient(getApacheClient()) + .setConverter(new JacksonConverter()) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) + .build().create(Eureka) } //Lazy-create apache client on request if there is a discoveryEnabled AmazonCredentials: diff --git a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/model/EurekaInstance.groovy b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/model/EurekaInstance.groovy index 073a1947722..5592b2c2ae0 100644 --- a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/model/EurekaInstance.groovy +++ b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/model/EurekaInstance.groovy @@ -86,6 +86,16 @@ class EurekaInstance extends DiscoveryHealth { healthState = HealthState.Down } + // if this has an asgName and is not part of a titus task registration, + // prefer the app name derived from the asg name rather than the supplied + // app name. We index these records on application to associate them to + // a particular cluster, and with the name incorrect then the record will + // not be properly linked + if (metadata?.titusTaskId == null && asgName != null) { + def idx = asgName.indexOf('-') + def appFromAsg = idx == -1 ? asgName : asgName.substring(0, idx) + app = appFromAsg + } //the preferred instanceId value comes from DataCenterInfo Metadata // Jackson was doing some shenanigans whereby the top level registration diff --git a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/provider/agent/EurekaCachingAgent.groovy b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/provider/agent/EurekaCachingAgent.groovy index 675abfe35b7..0db3f5db13a 100644 --- a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/provider/agent/EurekaCachingAgent.groovy +++ b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/clouddriver/eureka/provider/agent/EurekaCachingAgent.groovy @@ -31,7 +31,9 @@ import com.netflix.spinnaker.clouddriver.core.provider.agent.HealthProvidingCach import com.netflix.spinnaker.clouddriver.eureka.api.EurekaApi import com.netflix.spinnaker.clouddriver.eureka.model.EurekaApplication import com.netflix.spinnaker.clouddriver.eureka.model.EurekaApplications -import com.netflix.spinnaker.clouddriver.eureka.model.EurekaInstance +import com.netflix.spinnaker.clouddriver.model.HealthState +import com.netflix.spinnaker.kork.core.RetrySupport +import com.netflix.spinnaker.security.AuthenticatedRequest import groovy.util.logging.Slf4j import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH @@ -49,6 +51,7 @@ class EurekaCachingAgent implements CachingAgent, HealthProvidingCachingAgent, C final String healthId = "Discovery" private final long pollIntervalMillis private final long timeoutMillis + private final RetrySupport retry = new RetrySupport() private List eurekaAwareProviderList @@ -91,43 +94,69 @@ class EurekaCachingAgent implements CachingAgent, HealthProvidingCachingAgent, C @Override CacheResult loadData(ProviderCache providerCache) { log.info("Describing items in ${agentType}") - EurekaApplications disco = eurekaApi.loadEurekaApplications() + EurekaApplications disco = AuthenticatedRequest.allowAnonymous({ + retry.retry({ eurekaApi.loadEurekaApplications() }, 3, 100, false) + }) - Collection eurekaCacheData = new LinkedList() - Collection instanceCacheData = new LinkedList() + Map> instanceHealthRelationships = [:].withDefault { new HashSet() } + Map> eurekaInstances = [:].withDefault { [] } for (EurekaApplication application : disco.applications) { - Map> convertedInstancesById = ((List) objectMapper.convertValue( - application.instances.findAll { it.instanceId }, - new TypeReference>>() {} - )).collectEntries { - [it.instanceId, it] - } + List> instanceAttributes = objectMapper.convertValue(application.instances, + new TypeReference>>() {}) - for (EurekaInstance instance : application.instances) { - if (instance.instanceId) { - Map attributes = convertedInstancesById[instance.instanceId] + for (Map attributes : instanceAttributes) { + if (attributes.instanceId) { attributes.eurekaAccountName = eurekaAccountName attributes.allowMultipleEurekaPerAccount = allowMultipleEurekaPerAccount + attributes.application = application.name.toLowerCase() + eurekaAwareProviderList.each { provider -> if (provider.isProviderForEurekaRecord(attributes)) { String instanceKey = provider.getInstanceKey(attributes, region) if (instanceKey) { String instanceHealthKey = provider.getInstanceHealthKey(attributes, region, healthId) - Map> relationships = [(INSTANCES.ns): [instanceKey]] - eurekaCacheData.add(new DefaultCacheData(instanceHealthKey, attributes, relationships)) + instanceHealthRelationships[instanceKey].add(instanceHealthKey) + Map> healthRelationship = [(INSTANCES.ns): [instanceKey]] + eurekaInstances[instanceHealthKey].add(new DefaultCacheData(instanceHealthKey, attributes, healthRelationship)) } } } } } } + Collection instanceCacheData = instanceHealthRelationships.collect { instanceId, healths -> + new DefaultCacheData(instanceId, Collections.emptyMap(), [(HEALTH.ns): healths]) + } + + Set dupeDetected = [] + Collection eurekaCacheData = eurekaInstances.values().findResults { List cacheDatas -> + if (cacheDatas.size() == 1) { + return cacheDatas[0] + } + + cacheDatas.sort(new EurekaHealthComparator()) + def data = cacheDatas.first() + dupeDetected.add(data.id) + return data + } + if (dupeDetected) { + log.warn("Duplicate eureka records found for instances: $dupeDetected") + } log.info("Caching ${eurekaCacheData.size()} items in ${agentType}") new DefaultCacheResult( (INSTANCES.ns): instanceCacheData, (HEALTH.ns): eurekaCacheData) } + private static class EurekaHealthComparator implements Comparator { + @Override + int compare(CacheData a, CacheData b) { + return HealthState.fromString(a.attributes.state) <=> HealthState.fromString(b.attributes.state) ?: + (Long) b.attributes.lastUpdatedTimestamp <=> (Long) a.attributes.lastUpdatedTimestamp + } + } + @Override long getPollIntervalMillis() { return pollIntervalMillis diff --git a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/config/EurekaProviderConfiguration.groovy b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/config/EurekaProviderConfiguration.groovy index a460b3e2120..47963bb923c 100644 --- a/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/config/EurekaProviderConfiguration.groovy +++ b/clouddriver-eureka/src/main/groovy/com/netflix/spinnaker/config/EurekaProviderConfiguration.groovy @@ -16,22 +16,32 @@ package com.netflix.spinnaker.config +import com.fasterxml.jackson.databind.DeserializationFeature +import com.fasterxml.jackson.databind.MapperFeature import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.Registry import com.netflix.spinnaker.clouddriver.eureka.api.EurekaApiFactory import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.EurekaSupportConfigurationProperties import com.netflix.spinnaker.clouddriver.eureka.provider.EurekaCachingProvider import com.netflix.spinnaker.clouddriver.eureka.provider.agent.EurekaAwareProvider import com.netflix.spinnaker.clouddriver.eureka.provider.agent.EurekaCachingAgent import com.netflix.spinnaker.clouddriver.eureka.provider.config.EurekaAccountConfigurationProperties +import com.netflix.spinnaker.okhttp.OkHttp3MetricsInterceptor +import com.netflix.spinnaker.okhttp.OkHttpClientConfigurationProperties +import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Value -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty import org.springframework.boot.context.properties.ConfigurationProperties import org.springframework.boot.context.properties.EnableConfigurationProperties +import org.springframework.boot.context.properties.bind.Bindable +import org.springframework.boot.context.properties.bind.Binder +import org.springframework.boot.context.properties.source.ConfigurationPropertyName import org.springframework.context.annotation.Bean import org.springframework.context.annotation.ComponentScan import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope +import org.springframework.core.env.Environment +import retrofit.converter.Converter +import retrofit.converter.JacksonConverter import java.util.regex.Pattern @@ -40,25 +50,59 @@ import java.util.regex.Pattern @ConditionalOnProperty('eureka.provider.enabled') @ComponentScan(["com.netflix.spinnaker.clouddriver.eureka"]) class EurekaProviderConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + + @Autowired + Registry registry + + @Autowired + Environment environment + @Bean @ConfigurationProperties("eureka.provider") EurekaAccountConfigurationProperties eurekaConfigurationProperties() { new EurekaAccountConfigurationProperties() } - @Value('${eureka.pollIntervalMillis:15000}') + private OkHttpClientConfigurationProperties eurekaClientConfig() { + OkHttpClientConfigurationProperties properties = + new OkHttpClientConfigurationProperties( + propagateSpinnakerHeaders: false, + connectTimoutMs: 10000, + keyStore: null, + trustStore: null) + Binder.get(environment).bind( + ConfigurationPropertyName.of("eureka.readonly.ok-http-client"), + Bindable.ofInstance(properties)) + return properties + } + + private static Converter eurekaConverter() { + new JacksonConverter(new ObjectMapper() + .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) + .enable(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY) + .enable(DeserializationFeature.UNWRAP_ROOT_VALUE) + .enable(MapperFeature.AUTO_DETECT_CREATORS)) + } + + private EurekaApiFactory eurekaApiFactory(OkHttpMetricsInterceptorProperties okHttpMetricsInterceptorProperties) { + OkHttp3ClientConfiguration config = new OkHttp3ClientConfiguration(eurekaClientConfig(), + new OkHttp3MetricsInterceptor({ registry }, okHttpMetricsInterceptorProperties)) + return new EurekaApiFactory(eurekaConverter(), config) + } + + @Value('${eureka.poll-interval-millis:15000}') Long pollIntervalMillis - @Value('${eureka.timeoutMillis:300000}') + @Value('${eureka.timeout-millis:300000}') Long timeoutMillis @Bean EurekaCachingProvider eurekaCachingProvider(EurekaAccountConfigurationProperties eurekaAccountConfigurationProperties, + OkHttpMetricsInterceptorProperties okHttpMetricsInterceptorProperties, List eurekaAwareProviderList, - ObjectMapper objectMapper, - EurekaApiFactory eurekaApiFactory) { + ObjectMapper objectMapper) { List agents = [] + def eurekaApiFactory = eurekaApiFactory(okHttpMetricsInterceptorProperties) eurekaAccountConfigurationProperties.accounts.each { EurekaAccountConfigurationProperties.EurekaAccount accountConfig -> accountConfig.regions.each { region -> String eurekaHost = accountConfig.readOnlyUrl.replaceAll(Pattern.quote('{{region}}'), region) diff --git a/clouddriver-eureka/src/test/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/AbstractEurekaSupportSpec.groovy b/clouddriver-eureka/src/test/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/AbstractEurekaSupportSpec.groovy index 12cee831096..1bc457a5fe3 100644 --- a/clouddriver-eureka/src/test/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/AbstractEurekaSupportSpec.groovy +++ b/clouddriver-eureka/src/test/groovy/com/netflix/spinnaker/clouddriver/eureka/deploy/ops/AbstractEurekaSupportSpec.groovy @@ -16,22 +16,26 @@ package com.netflix.spinnaker.clouddriver.eureka.deploy.ops +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask +import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.eureka.api.Eureka import com.netflix.spinnaker.clouddriver.model.ClusterProvider import com.netflix.spinnaker.clouddriver.model.Instance import com.netflix.spinnaker.clouddriver.model.ServerGroup +import retrofit.client.Response import spock.lang.Specification import spock.lang.Subject -import spock.lang.Unroll; +import spock.lang.Unroll class AbstractEurekaSupportSpec extends Specification { def clusterProvider = Mock(ClusterProvider) + def eureka = Mock(Eureka) @Subject def eurekaSupport = new MyEurekaSupport(clusterProviders: [clusterProvider]) @Unroll - def ""() { + def "identifies up instances to disable"() { when: def instancesToModify = eurekaSupport.getInstanceToModify("test", "us-west-2", "asg-v001", allInstances, percentageToDisable) @@ -39,7 +43,7 @@ class AbstractEurekaSupportSpec extends Specification { 1 * clusterProvider.getServerGroup("test", "us-west-2", "asg-v001") >> { return serverGroup( instancesInServerGroup.collect { - instance(it.key, [["eurekaStatus": it.value], ["notEurekaStatus": it.value]]) + instance(it.key, [["state": it.value]]) } ) } @@ -47,14 +51,16 @@ class AbstractEurekaSupportSpec extends Specification { instancesToModify == expectedInstancesToModify where: - allInstances | instancesInServerGroup | percentageToDisable || expectedInstancesToModify - ["i-1", "i-2"] | ["i-1": "UP"] | 50 || ["i-1"] // i-2 doesn't actually exist so should be skipped - ["i-1", "i-2"] | ["i-1": "OUT_OF_SERVICE"] | 50 || [] // i-1 is already disabled - ["i-1", "i-2"] | ["i-1": "UP"] | 1 || ["i-1"] // always round up when determining what to disable - ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 100 || ["i-1", "i-2", "i-3"] - ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 30 || ["i-1"] - ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 60 || ["i-1", "i-2"] - ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 90 || ["i-1", "i-2", "i-3"] + allInstances | instancesInServerGroup | percentageToDisable || expectedInstancesToModify + ["i-1", "i-2"] | ["i-1": "UP"] | 50 || ["i-1"] // i-2 doesn't actually exist so should be skipped + ["i-1", "i-2"] | ["i-1": "OUT_OF_SERVICE"] | 50 || [] // i-1 is already disabled + ["i-1", "i-2"] | ["i-1": "UP"] | 1 || ["i-1"] // always round up when determining what to disable + ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 100 || ["i-1", "i-2", "i-3"] + ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 30 || ["i-1"] + ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 60 || ["i-1", "i-2"] + ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 90 || ["i-1", "i-2", "i-3"] + ["i-1", "i-2", "i-3"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP"] | 90 || ["i-1", "i-2", "i-3"] + ["i-1", "i-2", "i-3", "i-4", "i-5"] | ["i-1": "UP", "i-2": "UP", "i-3": "UP", "i-4": "UP", "i-5": "UP"] | 20 || ["i-1"] } ServerGroup serverGroup(List instances) { @@ -73,12 +79,12 @@ class AbstractEurekaSupportSpec extends Specification { class MyEurekaSupport extends AbstractEurekaSupport { @Override Eureka getEureka(Object credentials, String region) { - throw new UnsupportedOperationException() + return eureka } @Override boolean verifyInstanceAndAsgExist(Object credentials, String region, String instanceId, String asgName) { - throw new UnsupportedOperationException() + return true } } } diff --git a/clouddriver-eureka/src/test/groovy/com/netflix/spinnaker/clouddriver/eureka/provider/agent/EurekaCachingAgentSpec.groovy b/clouddriver-eureka/src/test/groovy/com/netflix/spinnaker/clouddriver/eureka/provider/agent/EurekaCachingAgentSpec.groovy new file mode 100644 index 00000000000..1f2457641eb --- /dev/null +++ b/clouddriver-eureka/src/test/groovy/com/netflix/spinnaker/clouddriver/eureka/provider/agent/EurekaCachingAgentSpec.groovy @@ -0,0 +1,126 @@ +package com.netflix.spinnaker.clouddriver.eureka.provider.agent + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.eureka.api.EurekaApi +import com.netflix.spinnaker.clouddriver.eureka.model.DataCenterInfo +import com.netflix.spinnaker.clouddriver.eureka.model.DataCenterMetadata +import com.netflix.spinnaker.clouddriver.eureka.model.EurekaApplication +import com.netflix.spinnaker.clouddriver.eureka.model.EurekaApplications +import com.netflix.spinnaker.clouddriver.eureka.model.EurekaInstance +import com.netflix.spinnaker.clouddriver.model.HealthState +import spock.lang.Specification + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES + +class EurekaCachingAgentSpec extends Specification { + def providerCache = Stub(ProviderCache) + def eurekaApi = Stub(EurekaApi) + def eap = new TestEurekaAwareProvider() + + def agent = new EurekaCachingAgent(eurekaApi, "us-foo-2", new ObjectMapper(), "http://eureka", "true", "eureka-foo", [eap], 0, 0) + + def "it should cache instances"() { + given: + eurekaApi.loadEurekaApplications() >> new EurekaApplications(applications: [ + new EurekaApplication(name: "foo", instances: [ + instance("foo", "i-1", "UP"), + instance("foo", "i-2", "UP") + ]) + ]) + + when: + def result = agent.loadData(providerCache) + + then: + result.cacheResults.size() == 2 + result.cacheResults[HEALTH.ns].size() == 2 + result.cacheResults[INSTANCES.ns].size() == 2 + result.cacheResults[HEALTH.ns]*.id.sort() == ["us-foo-2:i-1:Discovery", "us-foo-2:i-2:Discovery"] + result.cacheResults[INSTANCES.ns]*.id.sort() == ["us-foo-2:i-1", "us-foo-2:i-2"] + } + + def "it should dedupe multiple discovery records prefering HealthState order"() { + given: + eurekaApi.loadEurekaApplications() >> new EurekaApplications(applications: [ + new EurekaApplication(name: "foo", instances: [ + instance("foo", "i-1", "UP"), + instance("foo", "i-1", "DOWN") + ]) + ]) + + when: + def result = agent.loadData(providerCache) + + then: + result.cacheResults.size() == 2 + result.cacheResults[HEALTH.ns].size() == 1 + result.cacheResults[INSTANCES.ns].size() == 1 + result.cacheResults[HEALTH.ns].first().attributes.state == HealthState.Down.name() + + } + + def "it should dedupe multiple discovery records preferring newest"() { + given: + eurekaApi.loadEurekaApplications() >> new EurekaApplications(applications: [ + new EurekaApplication(name: "foo", instances: [ + instance("foo", "i-1", "UP", 12345), + instance("foo", "i-1", "UP", 23451), + instance("foo", "i-1", "UP", 12344) + ]) + ]) + + when: + def result = agent.loadData(providerCache) + + then: + result.cacheResults.size() == 2 + result.cacheResults[HEALTH.ns].size() == 1 + result.cacheResults[INSTANCES.ns].size() == 1 + result.cacheResults[HEALTH.ns].first().attributes.lastUpdatedTimestamp == 23451 + + } + + private static EurekaInstance instance(String app, String id, String status, Long timestamp = System.currentTimeMillis()) { + EurekaInstance.buildInstance( + "host", + app, + "127.0.0.1", + status, + "UNKNOWN", + new DataCenterInfo( + name: "my-dc", + metadata: new DataCenterMetadata( + accountId: "foo", + availabilityZone: "us-foo-2a", + amiId: "ami-foo", + instanceId: id, + instanceType: "m3.megabig")), + "/status", + "/healthcheck", + id, + id, + timestamp, + "$app-v000", + null, + id) + } + + static class TestEurekaAwareProvider implements EurekaAwareProvider { + @Override + Boolean isProviderForEurekaRecord(Map attributes) { + return true + } + + @Override + String getInstanceKey(Map attributes, String region) { + return "$region:$attributes.instanceId" + } + + @Override + String getInstanceHealthKey(Map attributes, String region, String healthId) { + return "$region:$attributes.instanceId:$healthId" + } + } +} diff --git a/clouddriver-event/clouddriver-event.gradle b/clouddriver-event/clouddriver-event.gradle new file mode 100644 index 00000000000..0c70d4ab7b8 --- /dev/null +++ b/clouddriver-event/clouddriver-event.gradle @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +apply from: "$rootDir/gradle/kotlin.gradle" +apply from: "$rootDir/gradle/kotlin-test.gradle" + +dependencies { + annotationProcessor "org.springframework.boot:spring-boot-autoconfigure-processor" + + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "com.google.guava:guava" + implementation "com.google.code.findbugs:jsr305" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "com.fasterxml.jackson.module:jackson-module-kotlin" + implementation "javax.validation:validation-api" + implementation "org.hibernate.validator:hibernate-validator" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.springframework:spring-test" + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation "org.assertj:assertj-core" + testImplementation "io.strikt:strikt-core" + testImplementation "dev.minutest:minutest" + testImplementation "io.mockk:mockk" +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/config/EventConfiguration.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/config/EventConfiguration.kt new file mode 100644 index 00000000000..11f6d8c5200 --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/config/EventConfiguration.kt @@ -0,0 +1,24 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config + +import org.springframework.context.annotation.ComponentScan +import org.springframework.context.annotation.Configuration + +@Configuration +@ComponentScan(basePackages = ["com.netflix.spinnaker.clouddriver.event"]) +open class EventConfiguration diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/AbstractSpinnakerEvent.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/AbstractSpinnakerEvent.kt new file mode 100644 index 00000000000..050cb4d5579 --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/AbstractSpinnakerEvent.kt @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event + +import com.fasterxml.jackson.annotation.JsonIgnore +import com.netflix.spinnaker.clouddriver.event.exceptions.UninitializedEventException + +/** + * WARNING: Do not use this base class with Lombok events, you will have a bad time! Only use in Kotlin classes. + * For some reason, Lombok / Jackson can't find methods to deserialize, so the Java classes have to implement the + * interface directly. I'm not sure if this is a result of writing in Kotlin, or an issue in Lombok and/or Jackson. + */ +abstract class AbstractSpinnakerEvent : SpinnakerEvent { + /** + * Not a lateinit to make Java/Lombok & Jackson compatibility a little easier, although behavior is exactly the same. + */ + private var metadata: EventMetadata? = null + + @JsonIgnore + override fun getMetadata(): EventMetadata { + return metadata ?: throw UninitializedEventException() + } + + override fun setMetadata(eventMetadata: EventMetadata) { + metadata = eventMetadata + } + + fun hasMetadata() = metadata != null +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/Aggregate.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/Aggregate.kt new file mode 100644 index 00000000000..63100735e60 --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/Aggregate.kt @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event + +/** + * The identifiable collection of an event log. + * + * Aggregates are grouped by a [type] which should be unique for each domain entity, with unique + * [id] values therein. A [version] field is used to ensure business logic is operating on the + * latest event state; any modification to an [Aggregate] event log will increment this value. + * When an operation is attempted on an [version] which is not head, the event framework will + * reject the change. + * + * TODO(rz): Add `currentSequence` to make resuming aggregate processing in-flight easier. + */ +class Aggregate( + val type: String, + val id: String, + var version: Long +) { + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + + other as Aggregate + + if (type != other.type) return false + if (id != other.id) return false + + return true + } + + override fun hashCode(): Int { + var result = type.hashCode() + result = 31 * result + id.hashCode() + return result + } +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/CompositeSpinnakerEvent.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/CompositeSpinnakerEvent.kt new file mode 100644 index 00000000000..23b8930e02f --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/CompositeSpinnakerEvent.kt @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event + +import com.fasterxml.jackson.annotation.JsonIgnore + +/** + * Marks a [SpinnakerEvent] as being constructed of multiple [SpinnakerEvent]s. + * + * This interface is necessary to correctly hydrate [EventMetadata] on [SpinnakerEvent] before persisting. + */ +interface CompositeSpinnakerEvent : SpinnakerEvent { + /** + * Returns a list of the composed [SpinnakerEvent]s. + */ + @JsonIgnore + fun getComposedEvents(): List +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/EventMetadata.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/EventMetadata.kt new file mode 100644 index 00000000000..3ac4f625019 --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/EventMetadata.kt @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event + +import java.time.Instant + +/** + * Metadata for a [SpinnakerEvent]. + * + * @param id A unique ID for the event (not used beyond tracing, debugging) + * @param aggregateType The type of aggregate the event is for + * @param aggregateId The id of the aggregate the event is for + * @param sequence Auto-incrementing number for event ordering + * @param originatingVersion The aggregate version that originated this event + * @param timestamp The time at which the event was created + * @param serviceVersion The version of the service (clouddriver) that created the event + * @param source Where/what generated the event + */ +data class EventMetadata( + val id: String, + val aggregateType: String, + val aggregateId: String, + val sequence: Long, + val originatingVersion: Long, + val timestamp: Instant = Instant.now(), + val serviceVersion: String = "unknown", + val source: String = "unknown" +) diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/SpinnakerEvent.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/SpinnakerEvent.kt new file mode 100644 index 00000000000..7014b3d8c8b --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/SpinnakerEvent.kt @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event + +import com.fasterxml.jackson.annotation.JsonGetter +import com.fasterxml.jackson.annotation.JsonSetter +import com.fasterxml.jackson.annotation.JsonTypeInfo + +/** + * The base type for the eventing library. All library-level code is contained within [EventMetadata]. + */ +@JsonTypeInfo( + use = JsonTypeInfo.Id.NAME, + include = JsonTypeInfo.As.PROPERTY, + property = "eventType" +) +interface SpinnakerEvent { + @JsonGetter + fun getMetadata(): EventMetadata + + @JsonSetter + fun setMetadata(eventMetadata: EventMetadata) +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/config/EventSourceAutoConfiguration.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/config/EventSourceAutoConfiguration.kt new file mode 100644 index 00000000000..939433c5c7d --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/config/EventSourceAutoConfiguration.kt @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.config + +import org.springframework.context.annotation.Configuration +import org.springframework.context.annotation.Import + +/** + * Auto-configures the event sourcing library. + */ +@Configuration +@Import(MemoryEventRepositoryConfig::class) +open class EventSourceAutoConfiguration diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/config/MemoryEventRepositoryConfig.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/config/MemoryEventRepositoryConfig.kt new file mode 100644 index 00000000000..cc1271bfad3 --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/config/MemoryEventRepositoryConfig.kt @@ -0,0 +1,100 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.config + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository +import com.netflix.spinnaker.clouddriver.event.persistence.InMemoryEventRepository +import java.time.Duration +import javax.validation.Constraint +import javax.validation.ConstraintValidator +import javax.validation.ConstraintValidatorContext +import javax.validation.constraints.Min +import kotlin.reflect.KClass +import org.slf4j.LoggerFactory +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean +import org.springframework.boot.context.properties.ConfigurationProperties +import org.springframework.boot.context.properties.EnableConfigurationProperties +import org.springframework.context.ApplicationEventPublisher +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration +import org.springframework.validation.annotation.Validated + +@Configuration +@EnableConfigurationProperties(MemoryEventRepositoryConfigProperties::class) +open class MemoryEventRepositoryConfig { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + init { + log.info("Configuring EventRepository: InMemoryEventRepository") + } + + @Bean + @ConditionalOnMissingBean(EventRepository::class) + open fun eventRepository( + properties: MemoryEventRepositoryConfigProperties, + applicationEventPublisher: ApplicationEventPublisher, + registry: Registry + ): EventRepository = + InMemoryEventRepository(properties, applicationEventPublisher, registry) +} + +@MemoryEventRepositoryConfigProperties.SpinValidated +@ConfigurationProperties("spinnaker.clouddriver.eventing.memory-repository") +open class MemoryEventRepositoryConfigProperties { + /** + * The max age of an [Aggregate]. One of this and [maxAggregatesCount] must be set. + */ + @Min( + message = "Event repository aggregate age cannot be less than 24 hours.", + value = 60 * 60 * 24 * 1000 + ) + var maxAggregateAgeMs: Long? = Duration.ofHours(24).toMillis() + + /** + * The max number of [Aggregate] objects. One of this and [maxAggregateAgeMs] must be set. + */ + var maxAggregatesCount: Int? = null + + @Validated + @Constraint(validatedBy = [Validator::class]) + @Target(AnnotationTarget.CLASS) + annotation class SpinValidated( + val message: String = "Invalid event repository configuration", + val groups: Array> = [], + val payload: Array> = [] + ) + + class Validator : ConstraintValidator { + override fun isValid( + value: MemoryEventRepositoryConfigProperties, + context: ConstraintValidatorContext + ): Boolean { + if (value.maxAggregateAgeMs != null && value.maxAggregatesCount != null) { + context.buildConstraintViolationWithTemplate("Only one of 'maxAggregateAgeMs' and 'maxAggregatesCount' can be defined") + .addConstraintViolation() + return false + } + if (value.maxAggregateAgeMs == null && value.maxAggregatesCount == null) { + context.buildConstraintViolationWithTemplate("One of 'maxAggregateAgeMs' and 'maxAggregatesCount' must be set") + .addConstraintViolation() + return false + } + return true + } + } +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/AggregateChangeRejectedException.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/AggregateChangeRejectedException.kt new file mode 100644 index 00000000000..1b2f5a6083c --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/AggregateChangeRejectedException.kt @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.exceptions + +import com.netflix.spinnaker.kork.exceptions.SystemException + +/** + * Thrown when one or more [SpinEvent] have been rejected from being committed to an [Aggregate]. + * + * The process which originated the event must be retryable. + */ +class AggregateChangeRejectedException( + aggregateVersion: Long, + originatingVersion: Long +) : SystemException( + "Attempting to save new events against an old aggregate version " + + "(version: $aggregateVersion, originatingVersion: $originatingVersion)" +) { + init { + retryable = false + } +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/DuplicateEventAggregateException.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/DuplicateEventAggregateException.kt new file mode 100644 index 00000000000..96585f3cd0a --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/DuplicateEventAggregateException.kt @@ -0,0 +1,20 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.exceptions + +import com.netflix.spinnaker.kork.exceptions.SystemException + +class DuplicateEventAggregateException(e: Exception) : SystemException(e), EventingException diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/EventingException.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/EventingException.kt new file mode 100644 index 00000000000..71280cb1ca1 --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/EventingException.kt @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.exceptions + +/** + * Marker + */ +interface EventingException diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/InvalidEventTypeException.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/InvalidEventTypeException.kt new file mode 100644 index 00000000000..58a8fba840f --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/InvalidEventTypeException.kt @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.exceptions + +import com.netflix.spinnaker.kork.exceptions.IntegrationException + +/** + * Thrown when a [SpinnakerEvent] cannot be created. + */ +class InvalidEventTypeException(cause: Throwable) : IntegrationException(cause), EventingException { + init { + retryable = false + } +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/UninitializedEventException.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/UninitializedEventException.kt new file mode 100644 index 00000000000..c88a75e3f79 --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/exceptions/UninitializedEventException.kt @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.exceptions + +import com.netflix.spinnaker.kork.exceptions.IntegrationException + +/** + * Thrown when an event's metadata is attempted to be retrieved before it has been initialized + * by the library. + */ +class UninitializedEventException : + IntegrationException( + "Cannot access event metadata before initialization" + ), + EventingException { + init { + retryable = false + } +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/EventRepository.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/EventRepository.kt new file mode 100644 index 00000000000..81858403a07 --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/EventRepository.kt @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.persistence + +import com.netflix.spinnaker.clouddriver.event.Aggregate +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import javax.validation.constraints.Max +import javax.validation.constraints.Positive + +/** + * The [EventRepository] is responsible for reading and writing immutable event logs from a persistent store. + * + * There's deliberately no eviction API. It's expected that each [EventRepository] implementation will implement + * that functionality on their own, including invocation apis and/or scheduling; tailoring to the operational + * needs of that backend. + */ +interface EventRepository { + /** + * Save [events] to an [Aggregate]. + * + * @param aggregateType The aggregate collection name + * @param aggregateId The unique identifier of the event aggregate within the [aggregateType] + * @param originatingVersion The aggregate version that originated the [events]. This is used to ensure events are + * added only based off the latest aggregate state + * @param newEvents A list of events to be saved + */ + fun save(aggregateType: String, aggregateId: String, originatingVersion: Long, newEvents: List) + + /** + * List all events for a given [Aggregate]. + * + * @param aggregateType The aggregate collection name + * @param aggregateId The unique identifier of the event aggregate within the [aggregateType] + * @return An ordered list of events, oldest to newest + */ + fun list(aggregateType: String, aggregateId: String): List + + /** + * List all aggregates for a given type. + * + * @param criteria The criteria to limit the response by + * @return A list of matching aggregates + */ + fun listAggregates(criteria: ListAggregatesCriteria): ListAggregatesResult + + /** + * @param aggregateType The type of [Aggregate] to return. If unset, all types will be returned. + * @param token The page token to paginate from. It will return the first results + * @param perPage The number of [Aggregate]s to return in each response + */ + class ListAggregatesCriteria( + val aggregateType: String? = null, + val token: String? = null, + + @Positive @Max(1000) + val perPage: Int = 100 + ) + + /** + * @param aggregates The collection of [Aggregate]s returned + * @param nextPageToken The next page token + */ + class ListAggregatesResult( + val aggregates: List, + val nextPageToken: String? = null + ) +} diff --git a/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/InMemoryEventRepository.kt b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/InMemoryEventRepository.kt new file mode 100644 index 00000000000..d65c557e6cd --- /dev/null +++ b/clouddriver-event/src/main/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/InMemoryEventRepository.kt @@ -0,0 +1,192 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.persistence + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.event.Aggregate +import com.netflix.spinnaker.clouddriver.event.EventMetadata +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.config.MemoryEventRepositoryConfigProperties +import com.netflix.spinnaker.clouddriver.event.exceptions.AggregateChangeRejectedException +import com.netflix.spinnaker.kork.exceptions.SystemException +import java.time.Duration +import java.time.Instant +import java.util.UUID +import java.util.concurrent.ConcurrentHashMap +import kotlin.math.max +import org.slf4j.LoggerFactory +import org.springframework.context.ApplicationEventPublisher +import org.springframework.scheduling.annotation.Scheduled + +/** + * An in-memory only [EventRepository]. This implementation should only be used for testing. + */ +class InMemoryEventRepository( + private val config: MemoryEventRepositoryConfigProperties, + private val applicationEventPublisher: ApplicationEventPublisher, + private val registry: Registry +) : EventRepository { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + private val aggregateCountId = registry.createId("eventing.aggregates") + private val aggregateWriteCountId = registry.createId("eventing.aggregates.writes") + private val aggregateReadCountId = registry.createId("eventing.aggregates.reads") + private val eventCountId = registry.createId("eventing.events") + private val eventWriteCountId = registry.createId("eventing.events.writes") + private val eventReadCountId = registry.createId("eventing.events.reads") + + private val events: MutableMap> = ConcurrentHashMap() + + override fun save( + aggregateType: String, + aggregateId: String, + originatingVersion: Long, + newEvents: List + ) { + registry.counter(aggregateWriteCountId).increment() + + val aggregate = getAggregate(aggregateType, aggregateId) + + if (aggregate.version != originatingVersion) { + // If this is being thrown, ensure that the originating process is retried on the latest aggregate version + // by re-reading the newEvents list. + throw AggregateChangeRejectedException(aggregate.version, originatingVersion) + } + + events.getOrPut(aggregate) { mutableListOf() }.let { aggregateEvents -> + val currentSequence = aggregateEvents.map { it.getMetadata().sequence }.maxOrNull() ?: 0 + + newEvents.forEachIndexed { index, newEvent -> + // TODO(rz): Plugin more metadata (provenance, serviceVersion, etc) + newEvent.setMetadata( + EventMetadata( + id = UUID.randomUUID().toString(), + aggregateType = aggregateType, + aggregateId = aggregateId, + sequence = currentSequence + (index + 1), + originatingVersion = originatingVersion + ) + ) + } + + registry.counter(eventWriteCountId).increment(newEvents.size.toLong()) + aggregateEvents.addAll(newEvents) + aggregate.version = aggregate.version + 1 + } + + log.debug( + "Saved $aggregateType/$aggregateId@${aggregate.version}: " + + "[${newEvents.joinToString(",") { it.javaClass.simpleName }}]" + ) + + newEvents.forEach { applicationEventPublisher.publishEvent(it) } + } + + override fun list(aggregateType: String, aggregateId: String): List { + registry.counter(eventReadCountId).increment() + + return getAggregate(aggregateType, aggregateId) + .let { + events[it]?.toList() + } + ?: throw MissingAggregateEventsException(aggregateType, aggregateId) + } + + override fun listAggregates(criteria: EventRepository.ListAggregatesCriteria): EventRepository.ListAggregatesResult { + val aggregates = events.keys + + val result = aggregates.toList() + .let { list -> + criteria.aggregateType?.let { requiredType -> list.filter { it.type == requiredType } } ?: list + } + .let { list -> + criteria.token?.let { nextPageToken -> + val start = list.indexOf(list.find { "${it.type}/${it.id}" == nextPageToken }) + val end = (start + criteria.perPage).let { + if (it > list.size - 1) { + list.size + } else { + criteria.perPage + } + } + list.subList(start, end) + } ?: list + } + + return EventRepository.ListAggregatesResult( + aggregates = result, + nextPageToken = result.lastOrNull()?.let { "${it.type}/${it.id}" } + ) + } + + private fun getAggregate(aggregateType: String, aggregateId: String): Aggregate { + registry.counter(aggregateReadCountId).increment() + + val aggregate = Aggregate( + aggregateType, + aggregateId, + 0L + ) + events.putIfAbsent(aggregate, mutableListOf()) + return events.keys.first { it == aggregate } + } + + @Scheduled(fixedDelayString = "\${spinnaker.clouddriver.eventing.memory-repository.cleanup-job-delay-ms:60000}") + private fun cleanup() { + registry.counter(eventReadCountId).increment() + + config.maxAggregateAgeMs + ?.let { Duration.ofMillis(it) } + ?.let { maxAge -> + val horizon = Instant.now().minus(maxAge) + log.info("Cleaning up aggregates last updated earlier than $maxAge ($horizon)") + events.entries + .filter { it.value.any { event -> event.getMetadata().timestamp.isBefore(horizon) } } + .map { it.key } + .forEach { + log.trace("Cleaning up $it") + events.remove(it) + } + } + + config.maxAggregatesCount + ?.let { maxCount -> + log.info("Cleaning up aggregates to max $maxCount items, pruning by earliest updated") + events.entries + // Flatten into pairs of List + .flatMap { entry -> + entry.value.map { Pair(entry.key, it) } + } + .sortedBy { it.second.getMetadata().timestamp } + .subList(0, max(events.size - maxCount, 0)) + .forEach { + log.trace("Cleaning up ${it.first}") + events.remove(it.first) + } + } + } + + @Scheduled(fixedRate = 1_000) + private fun recordMetrics() { + registry.gauge(aggregateCountId).set(events.size.toDouble()) + registry.gauge(eventCountId).set(events.flatMap { it.value }.size.toDouble()) + } + + inner class MissingAggregateEventsException(aggregateType: String, aggregateId: String) : SystemException( + "Aggregate $aggregateType/$aggregateId is missing its internal events list store" + ) +} diff --git a/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/EventSourceSystemTest.kt b/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/EventSourceSystemTest.kt new file mode 100644 index 00000000000..a0838d747a7 --- /dev/null +++ b/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/EventSourceSystemTest.kt @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event + +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.event.config.EventSourceAutoConfiguration +import com.netflix.spinnaker.clouddriver.event.persistence.InMemoryEventRepository +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import org.springframework.boot.autoconfigure.AutoConfigurations +import org.springframework.boot.test.context.assertj.AssertableApplicationContext +import org.springframework.boot.test.context.runner.ApplicationContextRunner +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration +import strikt.api.expect +import strikt.assertions.isA + +class EventSourceSystemTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + ApplicationContextRunner() + .withConfiguration( + AutoConfigurations.of( + EventSourceAutoConfiguration::class.java + ) + ) + } + + test("supports no config") { + withUserConfiguration(EventSourceAutoConfiguration::class.java, DependencyConfiguration::class.java) + .run { ctx: AssertableApplicationContext -> + expect { + that(ctx.getBean("eventRepository")).describedAs("eventRepository").isA() + } + } + } + } + + @Configuration + open class DependencyConfiguration { + @Bean + open fun registry(): Registry = NoopRegistry() + } +} diff --git a/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/ObjectMappingTest.kt b/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/ObjectMappingTest.kt new file mode 100644 index 00000000000..d7a89ae3651 --- /dev/null +++ b/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/ObjectMappingTest.kt @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event + +import com.fasterxml.jackson.annotation.JsonTypeName +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.module.kotlin.readValue +import com.fasterxml.jackson.module.kotlin.registerKotlinModule +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import strikt.api.expectThat +import strikt.assertions.isA +import strikt.assertions.isEqualTo + +class ObjectMappingTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + ObjectMapper() + .registerKotlinModule() + .findAndRegisterModules() + .apply { + registerSubtypes(listOf(MyEvent::class.java)) + } + } + + test("can serialize and deserialize events") { + val event = MyEvent("world") + event.setMetadata( + EventMetadata( + id = "myid", + aggregateType = "type", + aggregateId = "id", + sequence = 999, + originatingVersion = 100 + ) + ) + + val serializedEvent = writeValueAsString(event) + expectThat(readValue(serializedEvent)) + .isA() + .and { + get { hello }.isEqualTo("world") + // EventMetadata should be excluded from SpinnakerEvent serialization + get { hasMetadata() }.isEqualTo(false) + } + + val serializedEventMetadata = writeValueAsString(event.getMetadata()) + expectThat(readValue(serializedEventMetadata)) + .and { + get { id }.isEqualTo("myid") + get { aggregateType }.isEqualTo("type") + get { aggregateId }.isEqualTo("id") + get { sequence }.isEqualTo(999) + get { originatingVersion }.isEqualTo(100) + } + } + } + + @JsonTypeName("myEvent") + class MyEvent( + val hello: String + ) : AbstractSpinnakerEvent() +} diff --git a/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/InMemoryEventRepositoryTest.kt b/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/InMemoryEventRepositoryTest.kt new file mode 100644 index 00000000000..85c377ff275 --- /dev/null +++ b/clouddriver-event/src/test/kotlin/com/netflix/spinnaker/clouddriver/event/persistence/InMemoryEventRepositoryTest.kt @@ -0,0 +1,150 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.event.persistence + +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.event.AbstractSpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.config.MemoryEventRepositoryConfigProperties +import com.netflix.spinnaker.clouddriver.event.exceptions.AggregateChangeRejectedException +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository.ListAggregatesCriteria +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import io.mockk.confirmVerified +import io.mockk.mockk +import io.mockk.verify +import org.junit.jupiter.api.assertThrows +import org.springframework.context.ApplicationEventPublisher +import strikt.api.expectThat +import strikt.assertions.containsExactly +import strikt.assertions.get +import strikt.assertions.isEmpty +import strikt.assertions.isEqualTo +import strikt.assertions.isSameInstanceAs +import strikt.assertions.map + +class InMemoryEventRepositoryTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + Fixture() + } + + test("no events returned for a non-existent aggregate") { + expectThat(subject.list("type", "noexist")) + .isEmpty() + } + + test("save appends aggregate events") { + val event = MyEvent("agg", "id", "hello world") + subject.save("agg", "id", 0L, listOf(event)) + + expectThat(subject.list("agg", "id")) { + get { size }.isEqualTo(1) + get(0).and { + isSameInstanceAs(event) + } + } + + val event2 = MyEvent("agg", "id", "hello rob") + subject.save("agg", "id", 1L, listOf(event2)) + + expectThat(subject.list("agg", "id")) { + get { size }.isEqualTo(2) + get(0).and { + isSameInstanceAs(event) + } + get(1).and { + isSameInstanceAs(event2) + } + } + } + + test("saving with a new aggregate with a non-zero originating version fails") { + val event = MyEvent("agg", "id", "hello") + assertThrows { + subject.save("agg", "id", 10L, listOf(event)) + } + } + + test("saving an aggregate with an old originating version fails") { + val event = MyEvent("agg", "id", "hello") + subject.save("agg", "id", 0L, listOf(event)) + + assertThrows { + subject.save("agg", "id", 0L, listOf(event)) + } + } + + test("newly saved events are published") { + val event = MyEvent("agg", "id", "hello") + subject.save("agg", "id", 0L, listOf(event)) + + verify { eventPublisher.publishEvent(event) } + confirmVerified(eventPublisher) + } + + context("listing aggregates") { + val event1 = MyEvent("type1", "id", "one") + val event2 = MyEvent("type2", "id", "two") + val event3 = MyEvent("type3", "id", "three") + + test("not providing a type") { + listOf(event1, event2, event3).forEach { + subject.save(it.aggregateType, it.aggregateId, 0L, listOf(it)) + } + + expectThat(subject.listAggregates(ListAggregatesCriteria())) { + get { aggregates }.map { it.type }.containsExactly("type1", "type2", "type3") + } + } + + test("providing a type") { + listOf(event1, event2, event3).forEach { + subject.save(it.aggregateType, it.aggregateId, 0L, listOf(it)) + } + + expectThat(subject.listAggregates(ListAggregatesCriteria(aggregateType = event1.getMetadata().aggregateType))) { + get { aggregates }.map { it.type }.containsExactly("type1") + } + } + + test("providing a non-existent type") { + listOf(event1, event2, event3).forEach { + subject.save(it.aggregateType, it.aggregateId, 0L, listOf(it)) + } + + expectThat(subject.listAggregates(ListAggregatesCriteria(aggregateType = "unknown"))) { + get { aggregates }.isEmpty() + } + } + } + } + + inner class Fixture { + var eventPublisher: ApplicationEventPublisher = mockk(relaxed = true) + var subject: EventRepository = InMemoryEventRepository( + MemoryEventRepositoryConfigProperties(), + eventPublisher, + NoopRegistry() + ) + } + + private inner class MyEvent( + val aggregateType: String, + val aggregateId: String, + val value: String + ) : AbstractSpinnakerEvent() +} diff --git a/clouddriver-google-common/clouddriver-google-common.gradle b/clouddriver-google-common/clouddriver-google-common.gradle index 4cea00252bd..e94817df8aa 100644 --- a/clouddriver-google-common/clouddriver-google-common.gradle +++ b/clouddriver-google-common/clouddriver-google-common.gradle @@ -1,4 +1,20 @@ dependencies { - compile project(":clouddriver-core") - spinnaker.group('google') + implementation "com.google.api-client:google-api-client" + implementation 'com.google.auth:google-auth-library-oauth2-http' + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-credentials-api" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "org.apache.groovy:groovy" + implementation "org.slf4j:slf4j-api" + implementation "org.springframework.security:spring-security-config" + implementation "org.springframework.security:spring-security-core" + implementation "org.springframework.security:spring-security-web" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testRuntimeOnly "net.bytebuddy:byte-buddy" } diff --git a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/config/GoogleCommonManagedAccount.groovy b/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/config/GoogleCommonManagedAccount.groovy index 26d01799abc..50f23d0960f 100644 --- a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/config/GoogleCommonManagedAccount.groovy +++ b/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/config/GoogleCommonManagedAccount.groovy @@ -16,9 +16,10 @@ package com.netflix.spinnaker.clouddriver.googlecommon.config +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition import com.netflix.spinnaker.fiat.model.resources.Permissions -class GoogleCommonManagedAccount { +class GoogleCommonManagedAccount implements CredentialsDefinition { String name String environment String accountType @@ -28,16 +29,4 @@ class GoogleCommonManagedAccount { String serviceAccountProject @Deprecated List requiredGroupMembership Permissions.Builder permissions = new Permissions.Builder() - - public InputStream getInputStream() { - if (jsonPath) { - if (jsonPath.startsWith("classpath:")) { - return getClass().getResourceAsStream(jsonPath.replace("classpath:", "")) - } else { - return new FileInputStream(new File(jsonPath)) - } - } else { - return null - } - } } diff --git a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetry.groovy b/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetry.groovy deleted file mode 100644 index 226ab6b99bc..00000000000 --- a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetry.groovy +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.googlecommon.deploy - -import com.google.api.client.googleapis.json.GoogleJsonResponseException -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.data.task.Task -import groovy.util.logging.Slf4j - -import java.util.concurrent.TimeUnit - -@Slf4j -abstract class GoogleCommonSafeRetry { - public static class SafeRetryState { - Object finalResult - Exception lastSeenException - int tries = 1 - boolean success = false - } - - /** - * Retry an operation if it fails. Treat any error codes in successfulErrorCodes as success. - * - * @param operation - The operation. - * @param action - String describing the operation. - * @param resource - Resource we are operating on. - * @param task - Spinnaker task. Can be null. - * @param phase - * @param retryCodes - GoogleJsonResponseException codes we retry on. - * @param successfulErrorCodes - GoogleJsonException codes we treat as success. - * - * @return Object returned from the operation. - */ - public Object doRetry(Closure operation, - String resource, - Task task, - List retryCodes, - List successfulErrorCodes, - Long maxWaitInterval, - Long retryIntervalBase, - Long jitterMultiplier, - Long maxRetries, - Map tags, - Registry registry) { - long startTime = registry.clock().monotonicTime() - SafeRetryState state = performOperation( - operation, tags.action, resource, task, tags.phase, - retryCodes, successfulErrorCodes, - maxWaitInterval, retryIntervalBase, jitterMultiplier, maxRetries) - - def all_tags = [:] - all_tags.putAll(tags) - all_tags.success = state.success ? "true" : "false" - registry.timer(registry.createId("google.safeRetry", all_tags)) - .record(registry.clock().monotonicTime() - startTime, TimeUnit.NANOSECONDS) - - return determineFinalResult(state, tags.action, resource) - } - - - protected SafeRetryState performOperation(Closure operation, - String action, - String resource, - Task task, - String phase, - List retryCodes, - List successfulErrorCodes, - Long maxWaitInterval, - Long retryIntervalBase, - Long jitterMultiplier, - Long maxRetries) { - SafeRetryState state = new SafeRetryState() - try { - task?.updateStatus phase, "Attempting $action of $resource..." - state.finalResult = operation() - state.success = true - return state - } catch (GoogleJsonResponseException | SocketTimeoutException | SocketException e) { - // Don't retry if we don't have to. - if (e instanceof GoogleJsonResponseException && e.statusCode in successfulErrorCodes) { - state.success = true - return state - } else if (!isRetryable(e, retryCodes)) { - throw e - } - log.warn "Initial $action of $resource failed, retrying..." - - while (state.tries < maxRetries) { - try { - def tries = ++state.tries - // Sleep with exponential backoff based on the number of retries. Add retry jitter with Math.random() to - // prevent clients syncing up and bursting at regular intervals. Don't wait longer than a minute. - Long thisIntervalWait = TimeUnit.SECONDS.toMillis(Math.pow(retryIntervalBase, tries) as Integer) - sleep(Math.min(thisIntervalWait, maxWaitInterval) + Math.round(Math.random() * jitterMultiplier)) - log.warn "$action $resource attempt #$tries..." - state.finalResult = operation() - state.success = true - return state - } catch (GoogleJsonResponseException jsonException) { - if (jsonException.statusCode in retryCodes) { - log.warn "Retry $action of $resource encountered ${jsonException.statusCode} with error message: ${jsonException.message}. Trying again..." - } else { - throw jsonException - } - state.lastSeenException = jsonException - } catch (SocketTimeoutException toEx) { - log.warn "Retry $action timed out again, trying again..." - state.lastSeenException = toEx - } - } - } - return state - } - - /** - * @return true if the status code is contained in the retryCodes list, or is a 5xx. The happens - * across the platform randomly, and our only real option is to retry. - */ - static boolean isRetryable(Exception e, List retryCodes) { - if (e instanceof GoogleJsonResponseException) { - GoogleJsonResponseException g = (GoogleJsonResponseException) e - return g.statusCode in retryCodes || ((int)(g.statusCode / 100)) == 5 - } - return true - } - - protected Object determineFinalResult(SafeRetryState state, - String action, - String resource) { - if (state.success) { - return state.finalResult - } - Exception lastSeenException = state.lastSeenException - int tries = state.tries - if (lastSeenException instanceof GoogleJsonResponseException) { - def lastSeenError = lastSeenException?.getDetails()?.getErrors()[0] ?: null - if (lastSeenError) { - if (lastSeenError.getReason() == 'resourceInUseByAnotherResource') { - // Don't fail the operation if the resource is in use. The main use case for this is resiliency in delete operations - - // we don't want to fail the operation if something is in use by another resource. - log.warn("Failed to $action $resource after #$tries." - + " Last seen exception has status code ${lastSeenException.getStatusCode()} with error message ${lastSeenError.getMessage()}" - + " and reason ${lastSeenError.getReason()}.") - return null - } else { - throw providerOperationException("Failed to $action $resource after #$tries." - + " Last seen exception has status code ${lastSeenException.getStatusCode()} with error message ${lastSeenError.getMessage()}" - + " and reason ${lastSeenError.getReason()}.") - } - } else { - throw providerOperationException("Failed to $action $resource after #$tries." - + " Last seen exception has status code ${lastSeenException.getStatusCode()} with message ${lastSeenException.getMessage()}.") - } - } else if (lastSeenException instanceof SocketTimeoutException) { - throw providerOperationException("Failed to $action $resource after #$tries." - + " Last operation timed out.") - } else { - throw new IllegalStateException("Caught exception is neither a JsonResponseException nor a OperationTimedOutException." - + " Caught exception: ${lastSeenException}") - } - } - - abstract Exception providerOperationException(String message) -} diff --git a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/security/GoogleCommonCredentialUtils.groovy b/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/security/GoogleCommonCredentialUtils.groovy index 674d2d90f84..0a2e3edb15f 100644 --- a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/security/GoogleCommonCredentialUtils.groovy +++ b/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/security/GoogleCommonCredentialUtils.groovy @@ -16,15 +16,14 @@ package com.netflix.spinnaker.clouddriver.googlecommon.security -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.http.HttpTransport -import com.google.api.client.json.JsonFactory +import com.google.auth.oauth2.GoogleCredentials +import groovy.transform.CompileStatic +@CompileStatic class GoogleCommonCredentialUtils { - static getCredentials (HttpTransport httpTransport, JsonFactory jsonFactory, String jsonKey, String scope) { + static GoogleCredentials getCredentials(String jsonKey, String scope) { InputStream credentialStream = new ByteArrayInputStream(jsonKey.getBytes("UTF-8")) - return GoogleCredential.fromStream(credentialStream, httpTransport, jsonFactory) - .createScoped(Collections.singleton(scope)) + return GoogleCredentials.fromStream(credentialStream).createScoped(Collections.singleton(scope)) } } diff --git a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/security/GoogleCommonCredentials.groovy b/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/security/GoogleCommonCredentials.groovy index c87e87a822a..12f35d84cba 100644 --- a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/security/GoogleCommonCredentials.groovy +++ b/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/security/GoogleCommonCredentials.groovy @@ -16,17 +16,19 @@ package com.netflix.spinnaker.clouddriver.googlecommon.security -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport import com.google.api.client.http.HttpRequest import com.google.api.client.http.HttpRequestInitializer import com.google.api.client.http.HttpTransport -import com.google.api.client.json.JsonFactory +import com.google.auth.http.HttpCredentialsAdapter +import com.google.auth.oauth2.GoogleCredentials +import groovy.transform.CompileStatic +@CompileStatic class GoogleCommonCredentials { - GoogleCredential getCredential(HttpTransport httpTransport, JsonFactory jsonFactory) { + GoogleCredentials getCredentials() { // No JSON key was specified in matching config on key server, so use application default credentials. - GoogleCredential.getApplicationDefault() + GoogleCredentials.getApplicationDefault().createScoped(Arrays.asList("https://www.googleapis.com/auth/cloud-platform")) } HttpTransport buildHttpTransport() { @@ -37,11 +39,11 @@ class GoogleCommonCredentials { } } - static HttpRequestInitializer setHttpTimeout(final HttpRequestInitializer requestInitializer) { - return new HttpRequestInitializer() { + static HttpRequestInitializer setHttpTimeout(final GoogleCredentials credentials) { + return new HttpCredentialsAdapter(credentials) { @Override public void initialize(HttpRequest httpRequest) throws IOException { - requestInitializer.initialize(httpRequest) + super.initialize(httpRequest); httpRequest.setConnectTimeout(2 * 60000) // 2 minutes connect timeout httpRequest.setReadTimeout(2 * 60000) // 2 minutes read timeout } diff --git a/clouddriver-google-common/src/main/java/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleApiException.java b/clouddriver-google-common/src/main/java/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleApiException.java new file mode 100644 index 00000000000..f72f99f7286 --- /dev/null +++ b/clouddriver-google-common/src/main/java/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleApiException.java @@ -0,0 +1,101 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.googlecommon.deploy; + +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.googleapis.json.GoogleJsonError.ErrorInfo; +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.io.IOException; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Value; + +@NonnullByDefault +public class GoogleApiException extends IOException { + GoogleApiException(String message) { + super(message); + } + + static GoogleApiException fromGoogleJsonException(GoogleJsonResponseException e) { + ErrorDetails errorDetails = ErrorDetails.fromGoogleJsonException(e); + if (errorDetails.getStatusCode() == 404) { + return new NotFoundException(errorDetails.toString()); + } + if (errorDetails.getReason().equals("resourceInUseByAnotherResource")) { + return new ResourceInUseException(errorDetails.toString()); + } + return new GoogleApiException(errorDetails.toString()); + } + + @Value + private static class ErrorDetails { + private final int statusCode; + private final String message; + private final String reason; + + @ParametersAreNullableByDefault + private ErrorDetails(int statusCode, String message, String reason) { + this.statusCode = statusCode; + this.message = Strings.nullToEmpty(message); + this.reason = Strings.nullToEmpty(reason); + } + + static ErrorDetails fromGoogleJsonException(GoogleJsonResponseException e) { + Optional optionalErrorInfo = + Optional.ofNullable(e.getDetails()) + .map(GoogleJsonError::getErrors) + .orElse(ImmutableList.of()) + .stream() + .findFirst(); + + if (optionalErrorInfo.isPresent()) { + ErrorInfo errorInfo = optionalErrorInfo.get(); + return new ErrorDetails(e.getStatusCode(), errorInfo.getMessage(), errorInfo.getReason()); + } else { + return new ErrorDetails(e.getStatusCode(), e.getMessage(), ""); + } + } + + @Override + public String toString() { + String base = + String.format( + "Operation failed. Last attempt returned status code %s with error message %s", + statusCode, message); + if (Strings.isNullOrEmpty(reason)) { + return String.format("%s.", base); + } else { + return String.format("%s and reason %s.", base, reason); + } + } + } + + public static final class ResourceInUseException extends GoogleApiException { + ResourceInUseException(String message) { + super(message); + } + } + + public static final class NotFoundException extends GoogleApiException { + NotFoundException(String message) { + super(message); + } + } +} diff --git a/clouddriver-google-common/src/main/java/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetry.java b/clouddriver-google-common/src/main/java/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetry.java new file mode 100644 index 00000000000..3faaa65301b --- /dev/null +++ b/clouddriver-google-common/src/main/java/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetry.java @@ -0,0 +1,164 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.googlecommon.deploy; + +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.net.SocketTimeoutException; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.extern.slf4j.Slf4j; + +@NonnullByDefault +@Slf4j +public final class GoogleCommonSafeRetry { + private final long maxWaitInterval; + private final long retryIntervalBase; + private final long jitterMultiplier; + private final long maxRetries; + + @Builder + @ParametersAreNullableByDefault + public GoogleCommonSafeRetry( + Integer maxWaitInterval, + Integer retryIntervalBase, + Integer jitterMultiplier, + Integer maxRetries) { + this.maxWaitInterval = Optional.ofNullable(maxWaitInterval).orElse(60000); + this.retryIntervalBase = Optional.ofNullable(retryIntervalBase).orElse(2); + this.jitterMultiplier = Optional.ofNullable(jitterMultiplier).orElse(1000); + this.maxRetries = Optional.ofNullable(maxRetries).orElse(10); + } + + /** + * Returns an instance of this class that never waits between retries, suitable for testing. + * + * @return An instance of {@link GoogleCommonSafeRetry} + */ + public static GoogleCommonSafeRetry withoutDelay() { + return GoogleCommonSafeRetry.builder().retryIntervalBase(0).jitterMultiplier(0).build(); + } + + /** + * Retry an operation if it fails. Treat any error codes in successCodes as success. + * + * @param operation - The operation. + * @param description - Description of the operation, used for logging. + * @param retryCodes - GoogleJsonResponseException codes we retry on. + * @param successCodes - GoogleJsonException codes we treat as success. + * @return Object returned from the operation. + */ + @Nullable + public V doRetry( + Callable operation, + String description, + List retryCodes, + List successCodes, + Map tags, + Registry registry) + throws GoogleApiException { + boolean success = false; + long startTime = registry.clock().monotonicTime(); + try { + V result = performOperation(operation, description, retryCodes, successCodes); + success = true; + return result; + } catch (GoogleJsonResponseException e) { + throw GoogleApiException.fromGoogleJsonException(e); + } catch (SocketTimeoutException e) { + throw new GoogleApiException("Operation failed. Last attempt timed out."); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new GoogleApiException("Operation failed. Thread was interrupted waiting to retry."); + } catch (Exception e) { + throw new IllegalStateException("Operation failed.", e); + } finally { + Map metricTags = + ImmutableMap.builder() + .putAll(tags) + .put("success", Boolean.toString(success)) + .build(); + registry + .timer(registry.createId("google.safeRetry", metricTags)) + .record(registry.clock().monotonicTime() - startTime, TimeUnit.NANOSECONDS); + } + } + + @Nullable + private V performOperation( + Callable operation, + String description, + List retryCodes, + List successfulErrorCodes) + throws Exception { + long maxAttempts = Math.max(1, maxRetries); + int tries = 1; + // This logic runs maxAttempts - 1 times, as we don't catch exceptions on the last try + while (tries < maxAttempts) { + try { + return attemptOperation(operation, successfulErrorCodes); + } catch (GoogleJsonResponseException jsonException) { + if (!retryCodes.contains(jsonException.getStatusCode())) { + throw jsonException; + } + log.warn( + "{} attempt #{} encountered retryable statusCode={} with error message: {}.", + description, + tries, + jsonException.getStatusCode(), + jsonException.getMessage()); + } catch (SocketTimeoutException toEx) { + log.warn("Retryable {} attempt #{} timed out.", description, tries); + } + tries++; + // Sleep with exponential backoff based on the number of retries. Add retry jitter with + // Math.random() to prevent clients syncing up and bursting at regular intervals. Don't wait + // longer than a minute. + long thisIntervalWait = + TimeUnit.SECONDS.toMillis((long) Math.pow(retryIntervalBase, tries - 1)); + long sleepMillis = + Math.min(thisIntervalWait, maxWaitInterval) + + Math.round(Math.random() * jitterMultiplier); + log.warn("Waiting {} ms to retry {}.", sleepMillis, description); + Thread.sleep(sleepMillis); + log.warn("Retrying {} attempt #{}...", description, tries); + } + // Don't catch any exceptions on the last attempt + return attemptOperation(operation, successfulErrorCodes); + } + + @Nullable + private static V attemptOperation(Callable operation, List successCodes) + throws Exception { + try { + return operation.call(); + } catch (GoogleJsonResponseException e) { + if (successCodes.contains(e.getStatusCode())) { + return null; + } + throw e; + } + } +} diff --git a/clouddriver-google-common/src/test/groovy/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetrySpec.groovy b/clouddriver-google-common/src/test/groovy/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetrySpec.groovy index 035adae59ed..f3b21baea17 100644 --- a/clouddriver-google-common/src/test/groovy/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetrySpec.groovy +++ b/clouddriver-google-common/src/test/groovy/com/netflix/spinnaker/clouddriver/googlecommon/deploy/GoogleCommonSafeRetrySpec.groovy @@ -17,31 +17,131 @@ package com.netflix.spinnaker.clouddriver.googlecommon.deploy import com.google.api.client.googleapis.json.GoogleJsonResponseException +import com.google.api.client.googleapis.testing.json.GoogleJsonResponseExceptionFactoryTesting import com.google.api.client.http.HttpHeaders import com.google.api.client.http.HttpResponseException +import com.google.api.client.testing.json.MockJsonFactory +import com.google.common.collect.ImmutableList +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.Registry; +import spock.lang.Shared import spock.lang.Specification -import spock.lang.Unroll class GoogleCommonSafeRetrySpec extends Specification { - @Unroll - def "should retry on certain error codes"() { - setup: - HttpResponseException.Builder b = new HttpResponseException.Builder((int) code, null, new HttpHeaders()) - GoogleJsonResponseException e = new GoogleJsonResponseException(b, null) - - expect: - retryable == GoogleCommonSafeRetry.isRetryable(e, [400]) - - // Ensure non-GCP exceptions also cause retries. - GoogleCommonSafeRetry.isRetryable(new SocketException(), []) - - where: - code || retryable - 399 || false - 400 || true - 401 || false - 500 || true - 503 || true + @Shared + Registry registry = new DefaultRegistry() + + GoogleCommonSafeRetry makeRetrier(int maxRetries) { + return GoogleCommonSafeRetry.builder().maxWaitInterval(0).maxRetries(maxRetries).build() + } + + def "no_retry"() { + given: + Closure mockClosure = Mock(Closure) + int maxRetries = 10 + GoogleCommonSafeRetry retrier = makeRetrier(maxRetries) + + when: + Object result = retrier.doRetry( + mockClosure, "resource", + Arrays.asList(500), Arrays.asList(404), + ImmutableMap.of("action", "test"), registry) + then: + 1 * mockClosure() >> "Hello World" + result == "Hello World" + } + + def "retry_until_success"() { + given: + Closure mockClosure = Mock(Closure) + int maxRetries = 10 + GoogleCommonSafeRetry retrier = makeRetrier(maxRetries) + + when: + Object result = retrier.doRetry( + mockClosure, "resource", + Arrays.asList(500), Arrays.asList(404), + ImmutableMap.of("action", "test"), registry) + then: + 2 * mockClosure() >> { + throw new SocketTimeoutException() + } + 2 * mockClosure() >> { + throw GoogleJsonResponseExceptionFactoryTesting.newMock( + new MockJsonFactory(), 500, "oops") + } + 1 * mockClosure() >> "Hello World" + result == "Hello World" + } + + def "retry_until_exhausted"() { + given: + Closure mockClosure = Mock(Closure) + GoogleCommonSafeRetry retrier = makeRetrier(4) + + when: + Object result = retrier.doRetry( + mockClosure, "resource", + Arrays.asList(500), Arrays.asList(404), + ImmutableMap.of("action", "test"), registry) + then: + 2 * mockClosure() >> { + throw GoogleJsonResponseExceptionFactoryTesting.newMock( + new MockJsonFactory(), 500, "oops") + } + 2 * mockClosure() >> { + throw new SocketTimeoutException() + } + thrown(GoogleApiException) + } + + def "retry_until_404_ok"() { + given: + Closure mockClosure = Mock(Closure) + int maxRetries = 10 + GoogleCommonSafeRetry retrier = makeRetrier(maxRetries) + HttpResponseException.Builder b = new HttpResponseException.Builder(404, null, new HttpHeaders()) + GoogleJsonResponseException e = new GoogleJsonResponseException(b, null) + + when: + Object result = retrier.doRetry( + mockClosure, "resource", + Arrays.asList(500), Arrays.asList(404), + ImmutableMap.of("action", "test"), registry) + then: + 2 * mockClosure() >> { + throw GoogleJsonResponseExceptionFactoryTesting.newMock( + new MockJsonFactory(), 500, "oops") + } + 1 * mockClosure() >> { + throw e + } + result == null + } + + def "retry_until_404_not_ok"() { + given: + Closure mockClosure = Mock(Closure) + int maxRetries = 10 + GoogleCommonSafeRetry retrier = makeRetrier(maxRetries) + HttpResponseException.Builder b = new HttpResponseException.Builder(404, null, new HttpHeaders()) + GoogleJsonResponseException e = new GoogleJsonResponseException(b, null) + + when: + Object result = retrier.doRetry( + mockClosure, "resource", + Arrays.asList(500), ImmutableList.of(), + ImmutableMap.of("action", "test"), registry) + then: + 2 * mockClosure() >> { + throw GoogleJsonResponseExceptionFactoryTesting.newMock( + new MockJsonFactory(), 500, "oops") + } + 1 * mockClosure() >> { + throw e + } + thrown(GoogleApiException) } } diff --git a/clouddriver-google/clouddriver-google.gradle b/clouddriver-google/clouddriver-google.gradle index da8b3ad6435..5b59fc0368f 100644 --- a/clouddriver-google/clouddriver-google.gradle +++ b/clouddriver-google/clouddriver-google.gradle @@ -1,14 +1,51 @@ dependencies { - compile project(":clouddriver-artifacts") - compile project(":clouddriver-core") - compile project(":clouddriver-consul") - compile project(":clouddriver-google-common") - spinnaker.group('google') - compile spinnaker.dependency('frigga') - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-artifacts") + implementation project(":clouddriver-consul") + implementation project(":clouddriver-core") + implementation project(":clouddriver-google-common") + implementation project(":clouddriver-security") - // Move this to spinnaker-dependencies when we can confirm we'll use this feature. - compile "com.google.apis:google-api-services-iam:v1-rev225-1.23.0" - compile 'org.slf4j:jul-to-slf4j:1.7.25' + implementation "org.apache.groovy:groovy" + implementation "org.apache.groovy:groovy-json" + implementation "org.apache.commons:commons-lang3" + implementation "com.google.apis:google-api-services-compute" + implementation "com.google.guava:guava" + implementation "com.google.apis:google-api-services-iam" + implementation 'com.google.auth:google-auth-library-oauth2-http' + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-credentials-api" + implementation "io.spinnaker.kork:kork-config" + implementation "io.spinnaker.kork:kork-cloud-config-server" + implementation "io.spinnaker.kork:kork-moniker" + implementation "io.spinnaker.kork:kork-retrofit" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "com.squareup.retrofit:retrofit" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.springframework.cloud:spring-cloud-context" + implementation "org.springframework.cloud:spring-cloud-config-server" + + testImplementation "org.assertj:assertj-core" + testImplementation "cglib:cglib-nodep" + testImplementation "commons-fileupload:commons-fileupload:1.4" + testImplementation "org.apache.httpcomponents:httpmime" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.mockito:mockito-core" + testImplementation "org.mockito:mockito-inline" + testImplementation "org.mockito:mockito-junit-jupiter" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation "org.apache.groovy:groovy-test" } diff --git a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/GoogleExecutor.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/GoogleExecutor.groovy similarity index 94% rename from clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/GoogleExecutor.groovy rename to clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/GoogleExecutor.groovy index 7883b18c089..3e806bd7a12 100644 --- a/clouddriver-google-common/src/main/groovy/com/netflix/spinnaker/clouddriver/googlecommon/GoogleExecutor.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/GoogleExecutor.groovy @@ -13,19 +13,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.netflix.spinnaker.clouddriver.googlecommon +package com.netflix.spinnaker.clouddriver.google -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.services.AbstractGoogleClientRequest import com.google.api.client.http.HttpResponseException import com.netflix.spectator.api.Clock -import com.netflix.spectator.api.Id import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import java.util.concurrent.TimeUnit import javax.annotation.PostConstruct +import java.util.concurrent.TimeUnit /** * Provides a static-ish means to wrap API execution calls with spectator metrics. @@ -75,12 +74,11 @@ class GoogleExecutor { final static String TAG_REGION = "region" final static String TAG_SCOPE = "scope" final static String TAG_ZONE = "zone" - final static String SCOPE_BATCH = "batch" final static String SCOPE_GLOBAL = "global" final static String SCOPE_REGIONAL = "regional" final static String SCOPE_ZONAL = "zonal" - public static T timeExecuteBatch(Registry spectator_registry, BatchRequest batch, String batchContext, String... tags) throws IOException { + public static T timeExecuteBatch(Registry spectator_registry, GoogleBatchRequest batch, String batchContext, String... tags) throws IOException { def batchSize = batch.size() def success = "false" Clock clock = spectator_registry.clock() @@ -125,4 +123,4 @@ class GoogleExecutor { } return result } -} \ No newline at end of file +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/GoogleExecutorTraits.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/GoogleExecutorTraits.groovy deleted file mode 100644 index a7b13f6b80d..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/GoogleExecutorTraits.groovy +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.google - -import com.google.api.client.googleapis.batch.BatchRequest -import com.google.api.client.googleapis.services.AbstractGoogleClientRequest -import com.google.api.client.http.HttpResponseException -import com.netflix.spinnaker.clouddriver.googlecommon.GoogleExecutor -import com.netflix.spinnaker.clouddriver.google.security.AccountForClient - -import com.netflix.spectator.api.Clock -import com.netflix.spectator.api.Id -import com.netflix.spectator.api.Registry - -import java.util.concurrent.TimeUnit - - -/** - * This class is syntactic sugar atop the static GoogleExecutor. - * By making it a traite, we can wrap the calls with less in-line syntax. - */ -trait GoogleExecutorTraits { - final String TAG_BATCH_CONTEXT = GoogleExecutor.TAG_BATCH_CONTEXT - final String TAG_REGION = GoogleExecutor.TAG_REGION - final String TAG_SCOPE = GoogleExecutor.TAG_SCOPE - final String TAG_ZONE = GoogleExecutor.TAG_ZONE - final String SCOPE_BATCH = GoogleExecutor.SCOPE_BATCH - final String SCOPE_GLOBAL = GoogleExecutor.SCOPE_GLOBAL - final String SCOPE_REGIONAL = GoogleExecutor.SCOPE_REGIONAL - final String SCOPE_ZONAL = GoogleExecutor.SCOPE_ZONAL - - abstract Registry getRegistry() - - public T timeExecuteBatch(BatchRequest batch, String batchContext, String... tags) throws IOException { - return GoogleExecutor.timeExecuteBatch(getRegistry(), batch, batchContext, tags) - } - - public T timeExecute(AbstractGoogleClientRequest request, String api, String... tags) throws IOException { - String account = AccountForClient.getAccount(request.getAbstractGoogleClient()) - return GoogleExecutor.timeExecute(getRegistry(), request, "google.api", api, "account", account, *tags) - } -} - diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilder.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilder.groovy index 133b8d16978..72be39a307f 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilder.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilder.groovy @@ -16,11 +16,15 @@ package com.netflix.spinnaker.clouddriver.google.cache +import com.google.common.collect.ImmutableSet +import com.netflix.spinnaker.cats.agent.AgentDataType import com.netflix.spinnaker.cats.agent.DefaultCacheResult import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData import groovy.util.logging.Slf4j +import static com.google.common.collect.ImmutableSet.toImmutableSet +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.ON_DEMAND @Slf4j @@ -30,6 +34,27 @@ class CacheResultBuilder { CacheMutation onDemand = new CacheMutation() + Set authoritativeTypes = ImmutableSet.of() + + CacheResultBuilder() {} + + /** + * Create a CacheResultBuilder for the given dataTypes. + * + * Any authoritative types in dataTypes are guaranteed to be listed in the + * output. If you say you are authoritative for "clusters", but don't include + * any data under that namespace, an empty list will be included in the + * result. (Whereas if you don't pass dataTypes to the constructor, "clusters" + * will just be missing from the result if you don't specify any, and any + * existing clusters will remain in the cache). + */ + CacheResultBuilder(Collection dataTypes) { + authoritativeTypes = dataTypes.stream() + .filter({ dataType -> dataType.getAuthority() == AUTHORITATIVE }) + .map({ dataType -> dataType.getTypeName() }) + .collect(toImmutableSet()) + } + Map namespaceBuilders = [:].withDefault { String ns -> new NamespaceBuilder(namespace: ns) } @@ -42,6 +67,9 @@ class CacheResultBuilder { Map> keep = [:] Map> evict = [:] + authoritativeTypes.each { namespace -> + keep[namespace] = [] + } if (!onDemand.toKeep.empty) { keep += [(ON_DEMAND.ns): onDemand.toKeep.values()] } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/cache/Keys.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/cache/Keys.groovy index a6ec3d1b900..c4cfff1b6fd 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/cache/Keys.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/cache/Keys.groovy @@ -16,12 +16,21 @@ package com.netflix.spinnaker.clouddriver.google.cache +import com.google.common.base.CaseFormat +import com.google.common.collect.ImmutableSet import com.netflix.frigga.Names +import com.netflix.spinnaker.clouddriver.cache.KeyParser import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider +import com.netflix.spinnaker.moniker.Moniker import groovy.util.logging.Slf4j +import org.springframework.stereotype.Component + +import javax.annotation.Nullable @Slf4j -class Keys { +@Component("GoogleKeys") +class Keys implements KeyParser { + static enum Namespace { ADDRESSES, APPLICATIONS, @@ -37,15 +46,12 @@ class Keys { SERVER_GROUPS, SSL_CERTIFICATES, SUBNETS, - ON_DEMAND, final String ns private Namespace() { - def parts = name().split('_') - - ns = parts.tail().inject(new StringBuilder(parts.head().toLowerCase())) { val, next -> val.append(next.charAt(0)).append(next.substring(1).toLowerCase()) } + ns = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, name()) } String toString() { @@ -53,6 +59,28 @@ class Keys { } } + @Override + String getCloudProvider() { + // This is intentionally 'aws'. Refer to todos in SearchController#search for why. + return "aws" + } + + @Override + Map parseKey(String key) { + return parse(key) + } + + @Override + Boolean canParseType(String type) { + return Namespace.values().any { it.ns == type } + } + + @Override + Boolean canParseField(String field) { + return false + } + + @Nullable static Map parse(String key) { def parts = key.split(':') @@ -252,17 +280,21 @@ class Keys { } static String getServerGroupKey(String managedInstanceGroupName, + String cluster, String account, String region) { - getServerGroupKey(managedInstanceGroupName, account, region, null) + getServerGroupKey(managedInstanceGroupName, cluster, account, region, null) } static String getServerGroupKey(String managedInstanceGroupName, + String cluster, String account, String region, String zone) { - Names names = Names.parseName(managedInstanceGroupName) - "$GoogleCloudProvider.ID:${Namespace.SERVER_GROUPS}:${names.cluster}:${account}:${region}:${names.group}${zone ? ":$zone" : ""}" + if (cluster == null) { + cluster = Names.parseName(managedInstanceGroupName).cluster + } + "$GoogleCloudProvider.ID:${Namespace.SERVER_GROUPS}:${cluster}:${account}:${region}:${managedInstanceGroupName}${zone ? ":$zone" : ""}" } static String getSslCertificateKey(String account, diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/config/GoogleConfigurationProperties.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/config/GoogleConfigurationProperties.groovy index 5473558ad3c..587811aafbe 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/config/GoogleConfigurationProperties.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/config/GoogleConfigurationProperties.groovy @@ -18,12 +18,25 @@ package com.netflix.spinnaker.clouddriver.google.config import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig import com.netflix.spinnaker.clouddriver.googlecommon.config.GoogleCommonManagedAccount +import groovy.transform.Canonical import groovy.transform.ToString +import org.springframework.boot.context.properties.NestedConfigurationProperty class GoogleConfigurationProperties { public static final int ASYNC_OPERATION_TIMEOUT_SECONDS_DEFAULT = 300 public static final int ASYNC_OPERATION_MAX_POLLING_INTERVAL_SECONDS = 8 + /** + * health check related config settings + */ + @Canonical + static class HealthConfig { + /** + * flag to toggle verifying account health check. by default, account health check is enabled. + */ + boolean verifyAccountHealth = true + } + @ToString(includeNames = true) static class ManagedAccount extends GoogleCommonManagedAccount { boolean alphaListed @@ -34,6 +47,7 @@ class GoogleConfigurationProperties { // defaultRegions if left unspecified. An empty list will index no regions. List regions boolean required + String namingStrategy = "gceAnnotations" } List accounts = [] @@ -44,4 +58,7 @@ class GoogleConfigurationProperties { // Takes a list of regions you want indexed. Will default to indexing all regions if left // unspecified. An empty list will index no regions. List defaultRegions + + @NestedConfigurationProperty + final HealthConfig health = new HealthConfig() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/controllers/GoogleNamedImageLookupController.java b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/controllers/GoogleNamedImageLookupController.java deleted file mode 100644 index 9dca63abce9..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/controllers/GoogleNamedImageLookupController.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.controllers; - -import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.IMAGES; - -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.api.client.json.jackson2.JacksonFactory; -import com.google.api.services.compute.model.Image; -import com.google.common.annotations.VisibleForTesting; -import com.netflix.spinnaker.cats.cache.Cache; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; -import com.netflix.spinnaker.cats.mem.InMemoryCache; -import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider; -import com.netflix.spinnaker.clouddriver.google.cache.Keys; -import groovy.util.logging.Slf4j; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.function.Predicate; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import javax.servlet.http.HttpServletRequest; -import org.apache.commons.lang.StringUtils; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RequestMethod; -import org.springframework.web.bind.annotation.RequestParam; -import org.springframework.web.bind.annotation.RestController; - -@Slf4j -@RestController -@RequestMapping("/gce/images") -public class GoogleNamedImageLookupController { - - private final Cache cacheView; - private final JacksonFactory jsonMapper = new JacksonFactory(); - private final ObjectMapper objectMapper = new ObjectMapper().configure(JsonGenerator.Feature.WRITE_NUMBERS_AS_STRINGS, true); - - @Autowired - private GoogleNamedImageLookupController(Cache cacheView) { - this.cacheView = cacheView; - } - - @RequestMapping(value = "/find", method = RequestMethod.GET) - public List list( - @RequestParam(required = false) String q, - @RequestParam(required = false) String account, - HttpServletRequest request - ) { - Map> imageMap = listImagesByAccount(); - - // Set of accounts for which we should return images: either the supplied account, or default - // to all accounts - Set accounts; - if (StringUtils.isNotEmpty(account)) { - accounts = new HashSet<>(); - if (imageMap.containsKey(account)) { - accounts.add(account); - } - } else { - accounts = imageMap.keySet(); - } - - List results = new ArrayList<>(); - for (String imageAccount : accounts) { - for (Image i : imageMap.get(imageAccount)) { - Map attributes = new HashMap<>(); - attributes.put("creationDate", i.get("creationTimestamp")); - NamedImage newImage = new NamedImage(imageAccount, i.getName(), attributes, buildTagsMap(i)); - results.add(newImage); - } - } - - Predicate queryFilter = i -> true; - if (q != null && q.trim().length() > 0) { - String glob = q.trim(); - // Wrap in '*' if there are no glob-style characters in the query string. - if (!glob.contains("*") && !glob.contains("?") && !glob.contains("[") && !glob.contains("\\")) { - glob = "*" + glob + "*"; - } - Pattern pattern = new InMemoryCache.Glob(glob).toPattern(); - queryFilter = i -> pattern.matcher(i.imageName).matches(); - } - - return results.stream() - .filter(queryFilter) - .filter(namedImage -> matchesTagFilters(namedImage, extractTagFilters(request))) - .sorted(Comparator.comparing(image -> image.imageName)) - .collect(Collectors.toList()); - } - - private Map> listImagesByAccount() { - Collection identifiers = cacheView.filterIdentifiers(IMAGES.getNs(), GoogleCloudProvider.getID() + ":*"); - Map> result = new HashMap<>(); - - Collection allCacheData = cacheView.getAll(IMAGES.getNs(), identifiers, RelationshipCacheFilter.none()); - allCacheData.forEach( - cacheData -> { - String account = Keys.parse(cacheData.getId()).get("account"); - if (!result.containsKey(account)) { - result.put(account, new ArrayList<>()); - } - Object hashImage = cacheData.getAttributes().get("image"); - try { - Image myImage = jsonMapper.fromString(objectMapper.writeValueAsString(hashImage), Image.class); - result.get(account).add(myImage); - } catch (IOException e) { - throw new RuntimeException("Image deserialization failed"); - } - } - ); - - return result; - } - - @VisibleForTesting - public static Map buildTagsMap(Image image) { - Map tags = new HashMap<>(); - - String description = image.getDescription(); - // For a description of the form: - // key1: value1, key2: value2, key3: value3 - // we'll build a map associating each key with - // its associated value - if (description != null) { - tags = Arrays.stream(description.split(",")) - .filter(token -> token.contains(": ")) - .map(token -> token.split(": ", 2)) - .collect(Collectors.toMap( - token -> token[0].trim(), - token -> token[1].trim(), - (a, b) -> b - )); - } - - Map labels = image.getLabels(); - if (labels != null) { - tags.putAll(labels); - } - - return tags; - } - - /** - * Apply tag-based filtering to the list of named images. - * - * For example: /gce/images/find?q=PackageName&tag:stage=released&tag:somekey=someval - */ - private static List filter(List namedImages, Map tagFilters) { - return namedImages.stream() - .filter(namedImage -> matchesTagFilters(namedImage, tagFilters)) - .collect(Collectors.toList()); - } - - private static boolean matchesTagFilters(NamedImage namedImage, Map tagFilters) { - Map tags = namedImage.tags; - return tagFilters.keySet().stream() - .allMatch( - tag -> tags.containsKey(tag.toLowerCase()) && tags.get(tag.toLowerCase()).equalsIgnoreCase(tagFilters.get(tag)) - ); - } - - private static Map extractTagFilters(HttpServletRequest httpServletRequest) { - List parameterNames = Collections.list(httpServletRequest.getParameterNames()); - - return parameterNames.stream() - .filter(Objects::nonNull) - .filter(parameter -> parameter.toLowerCase().startsWith("tag:")) - .collect(Collectors.toMap( - tagParameter -> tagParameter.replaceAll("tag:", "").toLowerCase(), - httpServletRequest::getParameter, - (a, b) -> b - )); - } - - @VisibleForTesting - public static class NamedImage { - public String account; - public String imageName; - public Map attributes = new HashMap<>(); - public Map tags = new HashMap<>(); - - private NamedImage(String account, String imageName, Map attributes, Map tags) { - this.account = account; - this.imageName = imageName; - this.attributes = attributes; - this.tags = tags; - } - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEServerGroupNameResolver.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEServerGroupNameResolver.groovy index d3e618b9383..e3fc032878a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEServerGroupNameResolver.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEServerGroupNameResolver.groovy @@ -17,11 +17,18 @@ package com.netflix.spinnaker.clouddriver.google.deploy import com.google.api.services.compute.model.InstanceGroupManager -import com.netflix.frigga.Names +import com.google.api.services.compute.model.InstanceProperties +import com.google.api.services.compute.model.InstanceTemplate +import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.helpers.AbstractServerGroupNameResolver +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import com.netflix.spinnaker.moniker.Moniker +import com.netflix.spinnaker.moniker.Namer class GCEServerGroupNameResolver extends AbstractServerGroupNameResolver { @@ -30,16 +37,23 @@ class GCEServerGroupNameResolver extends AbstractServerGroupNameResolver { private final String project private final String region private final GoogleNamedAccountCredentials credentials + private final GoogleClusterProvider googleClusterProvider private final GoogleExecutorTraits executor + private final Namer naming private SafeRetry safeRetry - GCEServerGroupNameResolver(String project, String region, GoogleNamedAccountCredentials credentials, SafeRetry safeRetry, GoogleExecutorTraits executor) { + GCEServerGroupNameResolver(String project, String region, GoogleNamedAccountCredentials credentials, GoogleClusterProvider googleClusterProvider, SafeRetry safeRetry, GoogleExecutorTraits executor) { this.project = project this.region = region this.credentials = credentials + this.googleClusterProvider = googleClusterProvider this.safeRetry = safeRetry this.executor = executor + this.naming = NamerRegistry.lookup() + .withProvider(GoogleCloudProvider.ID) + .withAccount(credentials.name) + .withResource(GoogleLabeledResource) } @Override @@ -55,7 +69,6 @@ class GCEServerGroupNameResolver extends AbstractServerGroupNameResolver { @Override List getTakenSlots(String clusterName) { def managedInstanceGroups = GCEUtil.queryAllManagedInstanceGroups(project, region, credentials, task, phase, safeRetry, executor) - return findMatchingManagedInstanceGroups(managedInstanceGroups, clusterName) } @@ -65,18 +78,34 @@ class GCEServerGroupNameResolver extends AbstractServerGroupNameResolver { return [] } + def instanceTemplates = GCEUtil.queryAllInstanceTemplates(credentials, executor) + return managedInstanceGroups.findResults { managedInstanceGroup -> - def names = Names.parseName(managedInstanceGroup.name) + String instanceTemplateName = GCEUtil.getLocalName(managedInstanceGroup.getInstanceTemplate()) + InstanceTemplate instanceTemplate = instanceTemplates.find { it.getName() == instanceTemplateName } + InstanceProperties instanceProperties = instanceTemplate.getProperties() + GoogleLabeledManagedInstanceGroup labeledInstanceTemplate = new GoogleLabeledManagedInstanceGroup(managedInstanceGroup.getName(), instanceProperties.getLabels()) + Moniker moniker = naming.deriveMoniker(labeledInstanceTemplate) - if (names.cluster == clusterName) { + if (moniker.cluster == clusterName) { return new AbstractServerGroupNameResolver.TakenSlot( serverGroupName: managedInstanceGroup.name, - sequence : names.sequence, - createdTime : new Date(Utils.getTimeFromTimestamp(managedInstanceGroup.creationTimestamp)) + sequence: moniker.sequence, + createdTime: new Date(Utils.getTimeFromTimestamp(managedInstanceGroup.getCreationTimestamp())) ) } else { return null } } } + + private class GoogleLabeledManagedInstanceGroup implements GoogleLabeledResource { + String name + Map labels + + GoogleLabeledManagedInstanceGroup (String name, Map labels) { + this.name = name + this.labels = labels + } + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtil.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtil.groovy index 2225067a288..34a781404e6 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtil.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtil.groovy @@ -17,26 +17,21 @@ package com.netflix.spinnaker.clouddriver.google.deploy import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback import com.google.api.client.googleapis.json.GoogleJsonError import com.google.api.client.googleapis.json.GoogleJsonResponseException import com.google.api.client.http.GenericUrl import com.google.api.client.http.HttpHeaders -import com.google.api.client.http.HttpRequest -import com.google.api.client.http.HttpRequestInitializer import com.google.api.client.http.HttpResponse import com.google.api.client.json.JsonObjectParser -import com.google.api.client.json.jackson2.JacksonFactory +import com.google.api.client.json.gson.GsonFactory import com.google.api.services.compute.Compute import com.google.api.services.compute.model.* import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactUtils -import com.netflix.spinnaker.clouddriver.consul.provider.ConsulProviderUtils import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.deploy.description.BaseGoogleInstanceDescription import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription @@ -46,7 +41,6 @@ import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceNotFoundException import com.netflix.spinnaker.clouddriver.google.model.* import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils -import com.netflix.spinnaker.clouddriver.google.model.health.GoogleInstanceHealth import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.* import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider @@ -54,20 +48,29 @@ import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancer import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSubnetProvider import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.kork.artifacts.model.Artifact import groovy.util.logging.Slf4j -import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.HTTP_HEALTH_CHECKS import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.HEALTH_CHECKS +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.HTTP_HEALTH_CHECKS @Slf4j class GCEUtil { + public static final String GCE_IMAGE_TYPE = "gce/image"; private static final String DISK_TYPE_PERSISTENT = "PERSISTENT" private static final String DISK_TYPE_SCRATCH = "SCRATCH" - private static final String GCE_API_PREFIX = "https://www.googleapis.com/compute/v1/projects/" - private static final List RETRY_ERROR_CODES = [400, 403, 412] + private static final String GCE_API_PREFIX = "https://compute.googleapis.com/compute/v1/projects/" + private static final List RETRY_ERROR_CODES = [400, 403, 412, 429, 503] public static final String TARGET_POOL_NAME_PREFIX = "tp" + public static final String REGIONAL_LOAD_BALANCER_NAMES = "load-balancer-names" + public static final String GLOBAL_LOAD_BALANCER_NAMES = "global-load-balancer-names" + public static final String BACKEND_SERVICE_NAMES = "backend-service-names" + public static final String REGION_BACKEND_SERVICE_NAMES = "region-backend-service-names" + public static final String LOAD_BALANCING_POLICY = "load-balancing-policy" + public static final String SELECT_ZONES = 'select-zones' + public static final String AUTOSCALING_POLICY = 'autoscaling-policy' static String queryMachineType(String instanceType, String location, GoogleNamedAccountCredentials credentials, Task task, String phase) { task.updateStatus phase, "Looking up machine type $instanceType..." @@ -79,10 +82,8 @@ class GCEUtil { } } - static Image queryImage(String projectName, - String imageName, + static Image queryImage(String imageName, GoogleNamedAccountCredentials credentials, - Compute compute, Task task, String phase, String clouddriverUserAgentApplicationName, @@ -91,10 +92,10 @@ class GCEUtil { task.updateStatus phase, "Looking up image $imageName..." def filter = "name eq $imageName" - def imageProjects = [projectName] + credentials?.imageProjects + baseImageProjects - null + def imageProjects = [credentials.project] + credentials?.imageProjects + baseImageProjects - null def sourceImage = null - def imageListBatch = buildBatchRequest(compute, clouddriverUserAgentApplicationName) + def imageListBatch = new GoogleBatchRequest(credentials.compute, clouddriverUserAgentApplicationName) def imageListCallback = new JsonBatchCallback() { @Override void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { @@ -110,9 +111,9 @@ class GCEUtil { } imageProjects.each { imageProject -> - def imagesList = compute.images().list(imageProject) + def imagesList = credentials.compute.images().list(imageProject) imagesList.setFilter(filter) - imagesList.queue(imageListBatch, imageListCallback) + imageListBatch.queue(imagesList, imageListCallback) } executor.timeExecuteBatch(imageListBatch, "findImage") @@ -130,8 +131,8 @@ class GCEUtil { String phase, SafeRetry safeRetry, GoogleExecutorTraits executor) { - if (artifact.getType() != ArtifactUtils.GCE_IMAGE_TYPE) { - throw new GoogleOperationException("Artifact to deploy to GCE must be of type ${ArtifactUtils.GCE_IMAGE_TYPE}") + if (artifact.getType() != GCE_IMAGE_TYPE) { + throw new GoogleOperationException("Artifact to deploy to GCE must be of type ${GCE_IMAGE_TYPE}") } def reference = artifact.getReference() @@ -141,7 +142,7 @@ class GCEUtil { { return compute.getRequestFactory() .buildGetRequest(new GenericUrl(reference)) - .setParser(new JsonObjectParser(JacksonFactory.getDefaultInstance())) + .setParser(new JsonObjectParser(GsonFactory.getDefaultInstance())) .execute() }, "gce/image", @@ -172,10 +173,8 @@ class GCEUtil { executor ) } else { - return queryImage(description.credentials.project, - description.image, + return queryImage(description.image, description.credentials, - description.credentials.compute, task, phase, clouddriverUserAgentApplicationName, @@ -184,15 +183,28 @@ class GCEUtil { } } - private static BatchRequest buildBatchRequest(def compute, String clouddriverUserAgentApplicationName) { - return compute.batch( - new HttpRequestInitializer() { - @Override - void initialize(HttpRequest request) throws IOException { - request.headers.setUserAgent(clouddriverUserAgentApplicationName); - } + static boolean isShieldedVmCompatible(Image image) { + java.util.List guestOsFeatureList = image.getGuestOsFeatures() + if (guestOsFeatureList == null || guestOsFeatureList.size() == 0) { + return false + } + + boolean isUefiCompatible = false; + boolean isSecureBootCompatible = false; + + guestOsFeatureList.each { feature -> + if (feature.getType() == "UEFI_COMPATIBLE") { + isUefiCompatible= true + return } - ) + + if (feature.getType() == "SECURE_BOOT") { + isSecureBootCompatible = true + return + } + } + + return isUefiCompatible && isSecureBootCompatible } static GoogleNetwork queryNetwork(String accountName, String networkName, Task task, String phase, GoogleNetworkProvider googleNetworkProvider) { @@ -226,8 +238,8 @@ class GCEUtil { // Try to retrieve this forwarding rule in each region. def all_regions = executor.timeExecute(compute.regions().list(projectName), - "compute.regions.list", - executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + "compute.regions.list", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL) for (def region : all_regions.items) { try { return executor.timeExecute( @@ -285,28 +297,28 @@ class GCEUtil { static def queryHealthCheck(String projectName, String account, String healthCheckName, + GoogleHealthCheck.HealthCheckKind healthCheckKind, Compute compute, Cache cacheView, Task task, String phase, GoogleExecutorTraits executor) { - task.updateStatus phase, "Looking up http(s) health check $healthCheckName..." - - def httpHealthCheckIdentifiers = cacheView.filterIdentifiers(HTTP_HEALTH_CHECKS.ns, Keys.getHttpHealthCheckKey(account, healthCheckName)) - def results = cacheView.getAll(HTTP_HEALTH_CHECKS.ns, httpHealthCheckIdentifiers, RelationshipCacheFilter.none()) + task.updateStatus phase, "Looking up health check $healthCheckName..." - if (results[0]?.attributes?.httpHealthCheck) { - return results[0]?.attributes?.httpHealthCheck - } else { - try { - // TODO(duftler): Update this to use the cache instead of a live call once we are caching https health checks. - return executor.timeExecute( - compute.httpsHealthChecks().get(projectName, healthCheckName), - "compute.httpsHealthChecks.get", - executor.TAG_SCOPE, executor.SCOPE_GLOBAL) - } catch (GoogleJsonResponseException | SocketTimeoutException | SocketException _) { - updateStatusAndThrowNotFoundException("Http(s) health check $healthCheckName not found.", task, phase) - } + switch (healthCheckKind) { + case GoogleHealthCheck.HealthCheckKind.healthCheck: + return queryNestedHealthCheck(projectName, account, healthCheckName, compute, cacheView, task, phase, executor) + case GoogleHealthCheck.HealthCheckKind.httpHealthCheck: + return queryLegacyHttpHealthCheck(account, healthCheckName, cacheView, task, phase) + case GoogleHealthCheck.HealthCheckKind.httpsHealthCheck: + return queryLegacyHttpsHealthCheck(projectName, healthCheckName, compute, task, phase, executor) + default: + // Note: Cache queries for these health checks must occur in this order since queryLegacyHttpsHealthCheck() will make a live + // call that fails on a missing health check. + // todo(mneterval): return null instead of querying for each type once all health check payloads include `healthCheckKind` + return queryNestedHealthCheck(projectName, account, healthCheckName, compute, cacheView, task, phase, executor) ?: + queryLegacyHttpHealthCheck(account, healthCheckName, cacheView, task, phase) ?: + queryLegacyHttpsHealthCheck(projectName, healthCheckName, compute, task, phase, executor) } } @@ -326,6 +338,36 @@ class GCEUtil { return results[0]?.attributes?.healthCheck } + static def queryLegacyHttpHealthCheck(String account, + String healthCheckName, + Cache cacheView, + Task task, + String phase) { + task.updateStatus phase, "Looking up http health check $healthCheckName..." + + def httpHealthCheckIdentifiers = cacheView.filterIdentifiers(HTTP_HEALTH_CHECKS.ns, Keys.getHttpHealthCheckKey(account, healthCheckName)) + def results = cacheView.getAll(HTTP_HEALTH_CHECKS.ns, httpHealthCheckIdentifiers, RelationshipCacheFilter.none()) + return results[0]?.attributes?.httpHealthCheck + } + + static def queryLegacyHttpsHealthCheck(String projectName, + String healthCheckName, + Compute compute, + Task task, + String phase, + GoogleExecutorTraits executor) { + task.updateStatus phase, "Looking up https health check $healthCheckName..." + try { + // TODO(duftler): Update this to use the cache instead of a live call once we are caching https health checks. + return executor.timeExecute( + compute.httpsHealthChecks().get(projectName, healthCheckName), + "compute.httpsHealthChecks.get", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + } catch (GoogleJsonResponseException | SocketTimeoutException | SocketException _) { + updateStatusAndThrowNotFoundException("Https health check $healthCheckName not found.", task, phase) + } + } + static List queryRegionalForwardingRules(String projectName, String region, List forwardingRuleNames, @@ -338,10 +380,10 @@ class GCEUtil { def forwardingRules = safeRetry.doRetry( { return executor.timeExecute( - compute.forwardingRules().list(projectName, region), - "compute.forwardingRules.list", - executor.TAG_SCOPE, executor.SCOPE_GLOBAL - ).items + compute.forwardingRules().list(projectName, region), + "compute.forwardingRules.list", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL + ).items }, "regional forwarding rules", task, @@ -354,7 +396,7 @@ class GCEUtil { it.name in forwardingRuleNames } - if (foundForwardingRules.size == forwardingRuleNames.size) { + if (foundForwardingRules.size() == forwardingRuleNames.size()) { return foundForwardingRules } else { def foundNames = foundForwardingRules.collect { it.name } @@ -370,10 +412,10 @@ class GCEUtil { def loadBalancers = googleLoadBalancerProvider.getApplicationLoadBalancers("") as List def foundLoadBalancers = loadBalancers.findAll { it.name in forwardingRuleNames } - if (foundLoadBalancers.size == forwardingRuleNames.size) { + if (foundLoadBalancers.size() == forwardingRuleNames.size()) { return foundLoadBalancers } else { - def foundNames = loadBalancers.collect { it.name } + def foundNames = foundLoadBalancers.collect { it.name } updateStatusAndThrowNotFoundException("Load balancers ${forwardingRuleNames - foundNames} not found.", task, phase) } } @@ -406,7 +448,7 @@ class GCEUtil { } }.flatten() - null - if (foundInstances.size == instanceLocalNames.size) { + if (foundInstances.size() == instanceLocalNames.size()) { return foundInstances.collect { it.selfLink } } else { def foundNames = foundInstances.collect { it.name } @@ -425,9 +467,9 @@ class GCEUtil { GoogleExecutorTraits executor) { return safeRetry.doRetry( { return executor.timeExecute( - credentials.compute.regionInstanceGroupManagers().get(projectName, region, serverGroupName), - "compute.regionInstanceGroupManagers.get", - executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + credentials.compute.regionInstanceGroupManagers().get(projectName, region, serverGroupName), + "compute.regionInstanceGroupManagers.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) }, "regional managed instance group", task, @@ -448,9 +490,9 @@ class GCEUtil { GoogleExecutorTraits executor) { return safeRetry.doRetry( { return executor.timeExecute( - credentials.compute.instanceGroupManagers().get(projectName, zone, serverGroupName), - "compute.instanceGroupManagers.get", - executor.TAG_SCOPE, executor.SCOPE_ZONAL, executor.TAG_ZONE, zone) + credentials.compute.instanceGroupManagers().get(projectName, zone, serverGroupName), + "compute.instanceGroupManagers.get", + executor.TAG_SCOPE, executor.SCOPE_ZONAL, executor.TAG_ZONE, zone) }, "zonal managed instance group", task, @@ -538,17 +580,34 @@ class GCEUtil { } } - static BaseGoogleInstanceDescription buildInstanceDescriptionFromTemplate(InstanceTemplate instanceTemplate) { + static InstanceTemplate queryInstanceTemplate(String instanceTemplateName, + GoogleNamedAccountCredentials credentials, + GoogleExecutorTraits executor) { + return executor.timeExecute( + credentials.compute.instanceTemplates().get(credentials.project, instanceTemplateName), + "compute.instanceTemplates.get", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + } + + static List queryAllInstanceTemplates(GoogleNamedAccountCredentials credentials, + GoogleExecutorTraits executor) { + return executor.timeExecute( + credentials.compute.instanceTemplates().list(credentials.project), + "compute.instanceTemplates.list", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL).getItems() + } + + static BaseGoogleInstanceDescription buildInstanceDescriptionFromTemplate(String project, InstanceTemplate instanceTemplate) { def instanceTemplateProperties = instanceTemplate?.properties if (instanceTemplateProperties == null) { throw new GoogleOperationException("Unable to determine properties of instance template " + - "$instanceTemplate.name.") + "$instanceTemplate.name.") } - if (instanceTemplateProperties.networkInterfaces?.size != 1) { + if (instanceTemplateProperties.networkInterfaces?.size() != 1) { throw new GoogleOperationException("Instance templates must have exactly one network interface defined. " + - "Instance template $instanceTemplate.name has ${instanceTemplateProperties.networkInterfaces?.size}.") + "Instance template $instanceTemplate.name has ${instanceTemplateProperties.networkInterfaces?.size()}.") } def image @@ -562,16 +621,18 @@ class GCEUtil { def initializeParams = attachedDisk.initializeParams new GoogleDisk(type: initializeParams.diskType, - sizeGb: initializeParams.diskSizeGb, - autoDelete: attachedDisk.autoDelete) + sizeGb: initializeParams.diskSizeGb, + autoDelete: attachedDisk.autoDelete, + labels: instanceTemplateProperties.labels) } } else { throw new GoogleOperationException("Instance templates must have at least one disk defined. Instance template " + - "$instanceTemplate.name has ${instanceTemplateProperties.disks?.size}.") + "$instanceTemplate.name has ${instanceTemplateProperties.disks?.size()}.") } def networkInterface = instanceTemplateProperties.networkInterfaces[0] def serviceAccountEmail = instanceTemplateProperties.serviceAccounts?.getAt(0)?.email + def shieldedVmConfig = instanceTemplateProperties.shieldedVmConfig return new BaseGoogleInstanceDescription( image: image, @@ -582,59 +643,16 @@ class GCEUtil { [it.key, it.value] }, tags: instanceTemplateProperties.tags?.items, - network: getLocalName(networkInterface.network), + network: Utils.decorateXpnResourceIdIfNeeded(project, networkInterface.network), + subnet: Utils.decorateXpnResourceIdIfNeeded(project, networkInterface.subnet), serviceAccountEmail: serviceAccountEmail, - authScopes: retrieveScopesFromServiceAccount(serviceAccountEmail, instanceTemplateProperties.serviceAccounts) + authScopes: retrieveScopesFromServiceAccount(serviceAccountEmail, instanceTemplateProperties.serviceAccounts), + enableSecureBoot: shieldedVmConfig?.enableSecureBoot, + enableVtpm: shieldedVmConfig?.enableVtpm, + enableIntegrityMonitoring: shieldedVmConfig?.enableIntegrityMonitoring ) } - static GoogleAutoscalingPolicy buildAutoscalingPolicyDescriptionFromAutoscalingPolicy( - AutoscalingPolicy autoscalingPolicy) { - if (!autoscalingPolicy) { - return null - } - - autoscalingPolicy.with { - def autoscalingPolicyDescription = - new GoogleAutoscalingPolicy( - coolDownPeriodSec: coolDownPeriodSec, - minNumReplicas: minNumReplicas, - maxNumReplicas: maxNumReplicas - ) - - if (cpuUtilization) { - autoscalingPolicyDescription.cpuUtilization = - new GoogleAutoscalingPolicy.CpuUtilization( - utilizationTarget: cpuUtilization.utilizationTarget - ) - } - - if (loadBalancingUtilization) { - autoscalingPolicyDescription.loadBalancingUtilization = - new GoogleAutoscalingPolicy.LoadBalancingUtilization( - utilizationTarget: loadBalancingUtilization.utilizationTarget - ) - } - - if (customMetricUtilizations) { - autoscalingPolicyDescription.customMetricUtilizations = - customMetricUtilizations.collect { - new GoogleAutoscalingPolicy.CustomMetricUtilization( - metric: it.metric, - utilizationTarget: it.utilizationTarget, - utilizationTargetType: it.utilizationTargetType - ) - } - } - - if (mode) { - autoscalingPolicyDescription.mode = AutoscalingMode.valueOf(mode) - } - - return autoscalingPolicyDescription - } - } - static GoogleAutoHealingPolicy buildAutoHealingPolicyDescriptionFromAutoHealingPolicy( InstanceGroupManagerAutoHealingPolicy autoHealingPolicy) { if (!autoHealingPolicy) { @@ -664,6 +682,10 @@ class GCEUtil { return GCE_API_PREFIX + "$projectName/global/sslCertificates/$certName" } + static String buildRegionalCertificateUrl(String projectName, String region, String certName) { + return GCE_API_PREFIX + "$projectName/regions/$region/sslCertificates/$certName" + } + static String buildHttpHealthCheckUrl(String projectName, String healthCheckName) { return GCE_API_PREFIX + "$projectName/global/httpHealthChecks/$healthCheckName" } @@ -676,6 +698,14 @@ class GCEUtil { return GCE_API_PREFIX + "$projectName/global/healthChecks/$healthCheckName" } + static String buildRegionalHealthCheckUrl(String projectName, String region, String healthCheckName) { + return GCE_API_PREFIX + "$projectName/regions/$region/healthChecks/$healthCheckName" + } + + static String buildInstanceTemplateUrl(String projectName, String templateName) { + return GCE_API_PREFIX + "$projectName/global/instanceTemplates/$templateName" + } + static String buildBackendServiceUrl(String projectName, String backendServiceName) { return GCE_API_PREFIX + "$projectName/global/backendServices/$backendServiceName" } @@ -700,6 +730,7 @@ class GCEUtil { String phase, String clouddriverUserAgentApplicationName, List baseImageProjects, + Image bootImage, SafeRetry safeRetry, GoogleExecutorTraits executor) { def credentials = description.credentials @@ -714,13 +745,11 @@ class GCEUtil { throw new GoogleOperationException("Unable to determine disks for instance type $instanceType.") } - def bootImage = getBootImage(description, - task, - phase, - clouddriverUserAgentApplicationName, - baseImageProjects, - safeRetry, - executor) + disks.findAll { it.isPersistent() } + .eachWithIndex { disk, i -> + def baseDeviceName = description.baseDeviceName ?: 'device' + disk.deviceName = "$baseDeviceName-$i" + } def firstPersistentDisk = disks.find { it.persistent } return disks.collect { disk -> @@ -730,16 +759,14 @@ class GCEUtil { if (disk.persistent) { sourceImage = disk.is(firstPersistentDisk) - ? bootImage - : queryImage(credentials.project, - disk.sourceImage, - credentials, - credentials.compute, - task, - phase, - clouddriverUserAgentApplicationName, - baseImageProjects, - executor) + ? bootImage + : queryImage(disk.sourceImage, + credentials, + task, + phase, + clouddriverUserAgentApplicationName, + baseImageProjects, + executor) } if (sourceImage && sourceImage.diskSizeGb > disk.sizeGb) { @@ -748,13 +775,15 @@ class GCEUtil { def attachedDiskInitializeParams = new AttachedDiskInitializeParams(sourceImage: sourceImage?.selfLink, - diskSizeGb: disk.sizeGb, - diskType: diskType) + diskSizeGb: disk.sizeGb, + diskType: diskType, + labels: description.labels) new AttachedDisk(boot: disk.is(firstPersistentDisk), - autoDelete: disk.autoDelete, - type: disk.persistent ? DISK_TYPE_PERSISTENT : DISK_TYPE_SCRATCH, - initializeParams: attachedDiskInitializeParams) + autoDelete: disk.autoDelete, + deviceName: disk.deviceName, + type: disk.persistent ? DISK_TYPE_PERSISTENT : DISK_TYPE_SCRATCH, + initializeParams: attachedDiskInitializeParams) } } @@ -764,7 +793,7 @@ class GCEUtil { String accessConfigName, String accessConfigType) { NetworkInterface networkInterface = new NetworkInterface(network: network.selfLink, - subnetwork: subnet ? subnet.selfLink : null) + subnetwork: subnet ? subnet.selfLink : null) if (associatePublicIpAddress) { networkInterface.setAccessConfigs([new AccessConfig(name: accessConfigName, type: accessConfigType)]) @@ -786,7 +815,8 @@ class GCEUtil { } static Map buildMapFromMetadata(Metadata metadata) { - def map = metadata?.items?.collectEntries { def metadataItems -> + Map map = metadata?.getItems()?.collectEntries { def metadataItems -> + // Abuse Groovy's lack of respect for boundaries to query the properties directly. [(metadataItems.key): metadataItems.value] } @@ -803,32 +833,67 @@ class GCEUtil { GoogleAutoscalingPolicy autoscalingPolicy) { autoscalingPolicy.with { def gceAutoscalingPolicy = new AutoscalingPolicy(coolDownPeriodSec: coolDownPeriodSec, - minNumReplicas: minNumReplicas, - maxNumReplicas: maxNumReplicas, - mode: mode ? mode.toString() : "ON" + minNumReplicas: minNumReplicas, + maxNumReplicas: maxNumReplicas, + mode: mode ? mode.toString() : "ON" ) if (cpuUtilization) { - gceAutoscalingPolicy.cpuUtilization = - new AutoscalingPolicyCpuUtilization(utilizationTarget: cpuUtilization.utilizationTarget) + if (cpuUtilization.utilizationTarget) { + gceAutoscalingPolicy.cpuUtilization = + new AutoscalingPolicyCpuUtilization(utilizationTarget: cpuUtilization.utilizationTarget, + predictiveMethod: cpuUtilization.predictiveMethod) + } } if (loadBalancingUtilization) { - gceAutoscalingPolicy.loadBalancingUtilization = + if (loadBalancingUtilization.utilizationTarget) { + gceAutoscalingPolicy.loadBalancingUtilization = new AutoscalingPolicyLoadBalancingUtilization(utilizationTarget: loadBalancingUtilization.utilizationTarget) + } } if (customMetricUtilizations) { gceAutoscalingPolicy.customMetricUtilizations = customMetricUtilizations.collect { new AutoscalingPolicyCustomMetricUtilization(metric: it.metric, - utilizationTarget: it.utilizationTarget, - utilizationTargetType: it.utilizationTargetType) + utilizationTarget: it.utilizationTarget, + utilizationTargetType: it.utilizationTargetType, + filter: it.filter, + singleInstanceAssignment: it.singleInstanceAssignment) + } + } + + if (scalingSchedules) { + gceAutoscalingPolicy.scalingSchedules = scalingSchedules.collectEntries { scalingSchedule -> + [scalingSchedule.scheduleName , new AutoscalingPolicyScalingSchedule(description: scalingSchedule.scheduleDescription, + disabled: !scalingSchedule.enabled, + durationSec: scalingSchedule.duration, + minRequiredReplicas: scalingSchedule.minimumRequiredInstances, + schedule: scalingSchedule.scheduleCron, + timeZone: scalingSchedule.timezone)] + } + } + + if (scaleInControl) { + if (scaleInControl.maxScaledInReplicas && scaleInControl.timeWindowSec) { + def scaledInReplicasInput = scaleInControl.maxScaledInReplicas + FixedOrPercent maxScaledInReplicas = null + if (scaledInReplicasInput != null) { + maxScaledInReplicas = new FixedOrPercent( + fixed: scaledInReplicasInput.fixed, + percent: scaledInReplicasInput.percent + ) + } + gceAutoscalingPolicy.scaleInControl = + new AutoscalingPolicyScaleInControl( + maxScaledInReplicas: maxScaledInReplicas, + timeWindowSec: scaleInControl.timeWindowSec) } } new Autoscaler(name: serverGroupName, - target: targetLink, - autoscalingPolicy: gceAutoscalingPolicy) + target: targetLink, + autoscalingPolicy: gceAutoscalingPolicy) } } @@ -851,11 +916,11 @@ class GCEUtil { // We only support zero or one service account per instance/instance-template. static List buildServiceAccount(String serviceAccountEmail, List authScopes) { return serviceAccountEmail && authScopes - ? [new ServiceAccount(email: serviceAccountEmail, scopes: resolveAuthScopes(authScopes))] - : [] + ? [new ServiceAccount(email: serviceAccountEmail, scopes: resolveAuthScopes(authScopes))] + : [] } - static ServiceAccount buildScheduling(BaseGoogleInstanceDescription description) { + static Scheduling buildScheduling(BaseGoogleInstanceDescription description) { def scheduling = new Scheduling() if (description.preemptible != null) { @@ -873,6 +938,24 @@ class GCEUtil { return scheduling } + static ShieldedVmConfig buildShieldedVmConfig(BaseGoogleInstanceDescription description) { + def shieldedVmConfig = new ShieldedVmConfig() + + if (description.enableSecureBoot != null) { + shieldedVmConfig.enableSecureBoot = description.enableSecureBoot + } + + if (description.enableVtpm != null) { + shieldedVmConfig.enableVtpm = description.enableVtpm + } + + if (description.enableIntegrityMonitoring != null) { + shieldedVmConfig.enableIntegrityMonitoring = description.enableIntegrityMonitoring + } + + return shieldedVmConfig + } + static void updateStatusAndThrowNotFoundException(String errorMsg, Task task, String phase) { task.updateStatus phase, errorMsg throw new GoogleResourceNotFoundException(errorMsg) @@ -905,13 +988,13 @@ class GCEUtil { static def buildHttpHealthCheck(String name, UpsertGoogleLoadBalancerDescription.HealthCheck healthCheckDescription) { return new HttpHealthCheck( - name: name, - checkIntervalSec: healthCheckDescription.checkIntervalSec, - timeoutSec: healthCheckDescription.timeoutSec, - healthyThreshold: healthCheckDescription.healthyThreshold, - unhealthyThreshold: healthCheckDescription.unhealthyThreshold, - port: healthCheckDescription.port, - requestPath: healthCheckDescription.requestPath) + name: name, + checkIntervalSec: healthCheckDescription.checkIntervalSec, + timeoutSec: healthCheckDescription.timeoutSec, + healthyThreshold: healthCheckDescription.healthyThreshold, + unhealthyThreshold: healthCheckDescription.unhealthyThreshold, + port: healthCheckDescription.port, + requestPath: healthCheckDescription.requestPath) } static void addInternalLoadBalancerBackends(Compute compute, @@ -920,20 +1003,21 @@ class GCEUtil { GoogleLoadBalancerProvider googleLoadBalancerProvider, Task task, String phase, + GoogleOperationPoller googleOperationPoller, GoogleExecutorTraits executor) { String serverGroupName = serverGroup.name String region = serverGroup.region Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata Map metadataMap = buildMapFromMetadata(instanceMetadata) - def regionalLoadBalancersInMetadata = metadataMap?.(GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + def regionalLoadBalancersInMetadata = metadataMap?.get(REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] def internalLoadBalancersToAddTo = queryAllLoadBalancers(googleLoadBalancerProvider, regionalLoadBalancersInMetadata, task, phase) .findAll { it.loadBalancerType == GoogleLoadBalancerType.INTERNAL } if (!internalLoadBalancersToAddTo) { log.warn("Cache call missed for internal load balancer, making a call to GCP") List projectRegionalForwardingRules = executor.timeExecute( - compute.forwardingRules().list(project, region), - "compute.forwardingRules.list", - executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region + compute.forwardingRules().list(project, region), + "compute.forwardingRules.list", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region ).getItems() internalLoadBalancersToAddTo = projectRegionalForwardingRules.findAll { // TODO(jacobkiefer): Update this check if any other types of loadbalancers support backend services from regional forwarding rules. @@ -959,10 +1043,12 @@ class GCEUtil { backendService.backends = [] } backendService.backends << backendToAdd - executor.timeExecute( + def updateOp = executor.timeExecute( compute.regionBackendServices().update(project, region, backendServiceName, backendService), "compute.regionBackendServices.update", executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + googleOperationPoller.waitForRegionalOperation(compute, project, region, updateOp.getName(), + null, task, "compute.${region}.backendServices.update", phase) task.updateStatus phase, "Enabled backend for server group ${serverGroupName} in Internal load balancer backend service ${backendServiceName}." } } @@ -975,16 +1061,17 @@ class GCEUtil { GoogleLoadBalancerProvider googleLoadBalancerProvider, Task task, String phase, + GoogleOperationPoller googleOperationPoller, GoogleExecutorTraits executor) { String serverGroupName = serverGroup.name Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata - Map metadataMap = buildMapFromMetadata(instanceMetadata) - def httpLoadBalancersInMetadata = metadataMap?.(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] - def networkLoadBalancersInMetadata = metadataMap?.(GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + Map metadataMap = buildMapFromMetadata(instanceMetadata) + def httpLoadBalancersInMetadata = metadataMap?.get(GLOBAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + def networkLoadBalancersInMetadata = metadataMap?.get(REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] def allFoundLoadBalancers = (httpLoadBalancersInMetadata + networkLoadBalancersInMetadata) as List def httpLoadBalancersToAddTo = queryAllLoadBalancers(googleLoadBalancerProvider, allFoundLoadBalancers, task, phase) - .findAll { it.loadBalancerType == GoogleLoadBalancerType.HTTP } + .findAll { it.loadBalancerType == GoogleLoadBalancerType.HTTP } if (!httpLoadBalancersToAddTo) { log.warn("Cache call missed for Http load balancers ${httpLoadBalancersInMetadata}, making a call to GCP") List projectGlobalForwardingRules = executor.timeExecute( @@ -999,13 +1086,13 @@ class GCEUtil { } if (httpLoadBalancersToAddTo) { - String policyJson = metadataMap?.(GoogleServerGroup.View.LOAD_BALANCING_POLICY) + String policyJson = metadataMap?.get(LOAD_BALANCING_POLICY) if (!policyJson) { updateStatusAndThrowNotFoundException("Load Balancing Policy not found for server group ${serverGroupName}", task, phase) } GoogleHttpLoadBalancingPolicy policy = objectMapper.readValue(policyJson, GoogleHttpLoadBalancingPolicy) - List backendServiceNames = metadataMap?.(GoogleServerGroup.View.BACKEND_SERVICE_NAMES)?.split(",") ?: [] + List backendServiceNames = metadataMap?.get(BACKEND_SERVICE_NAMES)?.split(",") ?: [] if (backendServiceNames) { backendServiceNames.each { String backendServiceName -> BackendService backendService = executor.timeExecute( @@ -1022,16 +1109,84 @@ class GCEUtil { backendService.backends = [] } backendService.backends << backendToAdd - executor.timeExecute( + def updateOp = executor.timeExecute( compute.backendServices().update(project, backendServiceName, backendService), "compute.backendServices.update", executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + googleOperationPoller.waitForGlobalOperation(compute, project, updateOp.getName(), null, + task, 'compute.backendService.update', phase) task.updateStatus phase, "Enabled backend for server group ${serverGroupName} in Http(s) load balancer backend service ${backendServiceName}." } } } } + static void addInternalHttpLoadBalancerBackends(Compute compute, + ObjectMapper objectMapper, + String project, + GoogleServerGroup.View serverGroup, + GoogleLoadBalancerProvider googleLoadBalancerProvider, + Task task, + String phase, + GoogleOperationPoller googleOperationPoller, + GoogleExecutorTraits executor) { + String serverGroupName = serverGroup.name + String region = serverGroup.region + Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata + Map metadataMap = buildMapFromMetadata(instanceMetadata) + def internalHttpLoadBalancersInMetadata = metadataMap?.get(REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + + def internalHttpLoadBalancersToAddTo = queryAllLoadBalancers(googleLoadBalancerProvider, internalHttpLoadBalancersInMetadata, task, phase) + .findAll { it.loadBalancerType == GoogleLoadBalancerType.INTERNAL_MANAGED } + if (!internalHttpLoadBalancersToAddTo) { + log.warn("Cache call missed for Internal Http load balancers ${internalHttpLoadBalancersInMetadata}, making a call to GCP") + List projectForwardingRules = executor.timeExecute( + compute.forwardingRules().list(project, region), + "compute.forwardingRules.list", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region + ).getItems() + internalHttpLoadBalancersToAddTo = projectForwardingRules.findAll { ForwardingRule forwardingRule -> + forwardingRule.name in serverGroup.loadBalancers && forwardingRule.target && + Utils.getTargetProxyType(forwardingRule.target) in [GoogleTargetProxyType.HTTP, GoogleTargetProxyType.HTTPS] + } + } + + if (internalHttpLoadBalancersToAddTo) { + String policyJson = metadataMap?.get(LOAD_BALANCING_POLICY) + if (!policyJson) { + updateStatusAndThrowNotFoundException("Load Balancing Policy not found for server group ${serverGroupName}", task, phase) + } + GoogleHttpLoadBalancingPolicy policy = objectMapper.readValue(policyJson, GoogleHttpLoadBalancingPolicy) + + List backendServiceNames = metadataMap?.get(REGION_BACKEND_SERVICE_NAMES)?.split(",") ?: [] + if (backendServiceNames) { + backendServiceNames.each { String backendServiceName -> + BackendService backendService = executor.timeExecute( + compute.regionBackendServices().get(project, region, backendServiceName), + "compute.regionBackendServices.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + Backend backendToAdd = backendFromLoadBalancingPolicy(policy) + if (serverGroup.regional) { + backendToAdd.setGroup(buildRegionalServerGroupUrl(project, serverGroup.region, serverGroupName)) + } else { + backendToAdd.setGroup(buildZonalServerGroupUrl(project, serverGroup.zone, serverGroupName)) + } + if (backendService.backends == null) { + backendService.backends = [] + } + backendService.backends << backendToAdd + def updateOp = executor.timeExecute( + compute.regionBackendServices().update(project, region, backendServiceName, backendService), + "compute.regionBackendServices.update", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL) + googleOperationPoller.waitForGlobalOperation(compute, project, updateOp.getName(), null, + task, 'compute.regionBackendService.update', phase) + task.updateStatus phase, "Enabled backend for server group ${serverGroupName} in Internal Http(s) load balancer backend service ${backendServiceName}." + } + } + } + } + static void addSslLoadBalancerBackends(Compute compute, ObjectMapper objectMapper, String project, @@ -1039,12 +1194,13 @@ class GCEUtil { GoogleLoadBalancerProvider googleLoadBalancerProvider, Task task, String phase, + GoogleOperationPoller googleOperationPoller, GoogleExecutorTraits executor) { String serverGroupName = serverGroup.name Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata Map metadataMap = buildMapFromMetadata(instanceMetadata) - def globalLoadBalancersInMetadata = metadataMap?.(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] - def regionalLoadBalancersInMetadata = metadataMap?.(GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + def globalLoadBalancersInMetadata = metadataMap?.get(GLOBAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + def regionalLoadBalancersInMetadata = metadataMap?.get(REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] def allFoundLoadBalancers = (globalLoadBalancersInMetadata + regionalLoadBalancersInMetadata) as List def sslLoadBalancersToAddTo = queryAllLoadBalancers(googleLoadBalancerProvider, allFoundLoadBalancers, task, phase) @@ -1063,7 +1219,7 @@ class GCEUtil { } if (sslLoadBalancersToAddTo) { - String policyJson = metadataMap?.(GoogleServerGroup.View.LOAD_BALANCING_POLICY) + String policyJson = metadataMap?.get(LOAD_BALANCING_POLICY) if (!policyJson) { updateStatusAndThrowNotFoundException("Load Balancing Policy not found for server group ${serverGroupName}", task, phase) } @@ -1086,10 +1242,12 @@ class GCEUtil { backendService.backends = [] } backendService.backends << backendToAdd - executor.timeExecute( - compute.backendServices().update(project, backendServiceName, backendService), - "compute.backendServices.update", - executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + def updateOp = executor.timeExecute( + compute.backendServices().update(project, backendServiceName, backendService), + "compute.backendServices.update", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + googleOperationPoller.waitForGlobalOperation(compute, project, updateOp.getName(), null, + task, 'compute.backendService.update', phase) task.updateStatus phase, "Enabled backend for server group ${serverGroupName} in ssl load balancer backend service ${backendServiceName}." } } @@ -1102,12 +1260,13 @@ class GCEUtil { GoogleLoadBalancerProvider googleLoadBalancerProvider, Task task, String phase, + GoogleOperationPoller googleOperationPoller, GoogleExecutorTraits executor) { String serverGroupName = serverGroup.name Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata Map metadataMap = buildMapFromMetadata(instanceMetadata) - def globalLoadBalancersInMetadata = metadataMap?.(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] - def regionalLoadBalancersInMetadata = metadataMap?.(GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + def globalLoadBalancersInMetadata = metadataMap?.get(GLOBAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + def regionalLoadBalancersInMetadata = metadataMap?.get(REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] def allFoundLoadBalancers = (globalLoadBalancersInMetadata + regionalLoadBalancersInMetadata) as List def tcpLoadBalancersToAddTo = queryAllLoadBalancers(googleLoadBalancerProvider, allFoundLoadBalancers, task, phase) @@ -1126,7 +1285,7 @@ class GCEUtil { } if (tcpLoadBalancersToAddTo) { - String policyJson = metadataMap?.(GoogleServerGroup.View.LOAD_BALANCING_POLICY) + String policyJson = metadataMap?.get(LOAD_BALANCING_POLICY) if (!policyJson) { updateStatusAndThrowNotFoundException("Load Balancing Policy not found for server group ${serverGroupName}", task, phase) } @@ -1149,10 +1308,12 @@ class GCEUtil { backendService.backends = [] } backendService.backends << backendToAdd - executor.timeExecute( - compute.backendServices().update(project, backendServiceName, backendService), - "compute.backendServices.update", - executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + def updateOp = executor.timeExecute( + compute.backendServices().update(project, backendServiceName, backendService), + "compute.backendServices.update", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + googleOperationPoller.waitForGlobalOperation(compute, project, updateOp.getName(), null, + task, 'compute.backendService.update', phase) task.updateStatus phase, "Enabled backend for server group ${serverGroupName} in tcp load balancer backend service ${backendServiceName}." } } @@ -1184,7 +1345,7 @@ class GCEUtil { policy.setNamedPorts([new NamedPort(name: GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME, port: policy.listeningPort)]) policy.listeningPort = null // Deprecated. } - instanceMetadata[(GoogleServerGroup.View.LOAD_BALANCING_POLICY)] = objectMapper.writeValueAsString(policy) + instanceMetadata[(LOAD_BALANCING_POLICY)] = objectMapper.writeValueAsString(policy) } // Note: namedPorts are not set in this method. @@ -1208,6 +1369,7 @@ class GCEUtil { GoogleLoadBalancerProvider googleLoadBalancerProvider, Task task, String phase, + GoogleOperationPoller googleOperationPoller, GoogleExecutorTraits executor) { def serverGroupName = serverGroup.name def region = serverGroup.region @@ -1249,10 +1411,12 @@ class GCEUtil { (getLocalName(backend.group) == serverGroupName) && (Utils.getRegionFromGroupUrl(backend.group) == region) } - executor.timeExecute( + def updateOp = executor.timeExecute( compute.backendServices().update(project, backendServiceName, backendService), "compute.backendServices.update", executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + googleOperationPoller.waitForGlobalOperation(compute, project, updateOp.getName(), null, + task, 'compute.backendService.update', phase) task.updateStatus phase, "Deleted backend for server group ${serverGroupName} from ssl load balancer backend service ${backendServiceName}." } } @@ -1263,6 +1427,7 @@ class GCEUtil { GoogleLoadBalancerProvider googleLoadBalancerProvider, Task task, String phase, + GoogleOperationPoller googleOperationPoller, GoogleExecutorTraits executor) { def serverGroupName = serverGroup.name def region = serverGroup.region @@ -1304,10 +1469,12 @@ class GCEUtil { (getLocalName(backend.group) == serverGroupName) && (Utils.getRegionFromGroupUrl(backend.group) == region) } - executor.timeExecute( + def updateOp = executor.timeExecute( compute.backendServices().update(project, backendServiceName, backendService), "compute.backendServices.update", executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + googleOperationPoller.waitForGlobalOperation(compute, project, updateOp.getName(), null, + task, 'compute.backendService.update', phase) task.updateStatus phase, "Deleted backend for server group ${serverGroupName} from tcp load balancer backend service ${backendServiceName}." } } @@ -1318,6 +1485,7 @@ class GCEUtil { GoogleLoadBalancerProvider googleLoadBalancerProvider, Task task, String phase, + GoogleOperationPoller googleOperationPoller, GoogleExecutorTraits executor) { def serverGroupName = serverGroup.name def region = serverGroup.region @@ -1355,10 +1523,12 @@ class GCEUtil { (getLocalName(backend.group) == serverGroupName) && (Utils.getRegionFromGroupUrl(backend.group) == region) } - executor.timeExecute( + def updateOp = executor.timeExecute( compute.regionBackendServices().update(project, region, backendServiceName, backendService), "compute.backendServices.update", executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + googleOperationPoller.waitForRegionalOperation(compute, project, region, updateOp.getName(), null, + task, "compute.${region}.backendServices.update", phase) task.updateStatus phase, "Deleted backend for server group ${serverGroupName} from internal load balancer backend service ${backendServiceName}." } } @@ -1369,9 +1539,10 @@ class GCEUtil { GoogleLoadBalancerProvider googleLoadBalancerProvider, Task task, String phase, + GoogleOperationPoller googleOperationPoller, GoogleExecutorTraits executor) { def serverGroupName = serverGroup.name - def httpLoadBalancersInMetadata = serverGroup?.asg?.get(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES) ?: [] + def httpLoadBalancersInMetadata = serverGroup?.asg?.get(GLOBAL_LOAD_BALANCER_NAMES) ?: [] log.debug("Attempting to delete backends for ${serverGroup.name} from the following Http load balancers: ${httpLoadBalancersInMetadata}") log.debug("Looking up the following Http load balancers in the cache: ${httpLoadBalancersInMetadata}") @@ -1399,7 +1570,7 @@ class GCEUtil { if (foundHttpLoadBalancers) { Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata Map metadataMap = buildMapFromMetadata(instanceMetadata) - List backendServiceNames = metadataMap?.(GoogleServerGroup.View.BACKEND_SERVICE_NAMES)?.split(",") + List backendServiceNames = metadataMap?.get(BACKEND_SERVICE_NAMES)?.split(",") if (backendServiceNames) { backendServiceNames.each { String backendServiceName -> BackendService backendService = executor.timeExecute( @@ -1408,18 +1579,81 @@ class GCEUtil { executor.TAG_SCOPE, executor.SCOPE_GLOBAL) backendService?.backends?.removeAll { Backend backend -> (getLocalName(backend.group) == serverGroupName) && - (Utils.getRegionFromGroupUrl(backend.group) == serverGroup.region) + (Utils.getRegionFromGroupUrl(backend.group) == serverGroup.region) } - executor.timeExecute( - compute.backendServices().update(project, backendServiceName, backendService), - "compute.backendServices.update", - executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + def updateOp = executor.timeExecute( + compute.backendServices().update(project, backendServiceName, backendService), + "compute.backendServices.update", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + googleOperationPoller.waitForGlobalOperation(compute, project, updateOp.getName(), null, + task, 'compute.backendService.update', phase) task.updateStatus phase, "Deleted backend for server group ${serverGroupName} from Http(s) load balancer backend service ${backendServiceName}." } } } } + static void destroyInternalHttpLoadBalancerBackends(Compute compute, + String project, + GoogleServerGroup.View serverGroup, + GoogleLoadBalancerProvider googleLoadBalancerProvider, + Task task, + String phase, + GoogleOperationPoller googleOperationPoller, + GoogleExecutorTraits executor) { + def serverGroupName = serverGroup.name + def region = serverGroup.region + def httpLoadBalancersInMetadata = serverGroup?.asg?.get(REGIONAL_LOAD_BALANCER_NAMES) ?: [] + log.debug("Attempting to delete backends for ${serverGroup.name} from the following Internal Http load balancers: ${httpLoadBalancersInMetadata}") + + log.debug("Looking up the following Internal Http load balancers in the cache: ${httpLoadBalancersInMetadata}") + def foundInternalHttpLoadBalancers = googleLoadBalancerProvider.getApplicationLoadBalancers("").findAll { + it.name in serverGroup.loadBalancers && it.loadBalancerType == GoogleLoadBalancerType.INTERNAL_MANAGED + } + if (!foundInternalHttpLoadBalancers) { + log.warn("Cache call missed for Internal Http load balancers ${httpLoadBalancersInMetadata}, making a call to GCP") + List projectForwardingRules = executor.timeExecute( + compute.forwardingRules().list(project, region), + "compute.forwardingRules", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region + ).getItems() + foundInternalHttpLoadBalancers = projectForwardingRules.findAll { ForwardingRule forwardingRule -> + forwardingRule.target && Utils.getTargetProxyType(forwardingRule.target) in [GoogleTargetProxyType.HTTP, GoogleTargetProxyType.HTTPS] && + forwardingRule.name in serverGroup.loadBalancers + } + } + + def notDeleted = httpLoadBalancersInMetadata - (foundInternalHttpLoadBalancers.collect { it.name }) + if (notDeleted) { + log.warn("Could not locate the following Internal Http load balancers: ${notDeleted}. Proceeding with other backend deletions without mutating them.") + } + + if (foundInternalHttpLoadBalancers) { + Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata + Map metadataMap = buildMapFromMetadata(instanceMetadata) + List backendServiceNames = metadataMap?.get(REGION_BACKEND_SERVICE_NAMES)?.split(",") + if (backendServiceNames) { + backendServiceNames.each { String backendServiceName -> + BackendService backendService = executor.timeExecute( + compute.regionBackendServices().get(project, region, backendServiceName), + "compute.regionBackendService.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + backendService?.backends?.removeAll { Backend backend -> + (getLocalName(backend.group) == serverGroupName) && + (Utils.getRegionFromGroupUrl(backend.group) == serverGroup.region) + } + def updateOp = executor.timeExecute( + compute.regionBackendServices().update(project, region, backendServiceName, backendService), + "compute.regionBackendServices.update", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + googleOperationPoller.waitForRegionalOperation(compute, project, region, updateOp.getName(), null, + task, 'compute.regionBackendService.update', phase) + task.updateStatus phase, "Deleted backend for server group ${serverGroupName} from Internal Http(s) load balancer backend service ${backendServiceName}." + } + } + } + } + static Boolean isBackendServiceInUse(List projectUrlMaps, String backendServiceName) { def defaultServicesMatch = projectUrlMaps?.findAll { UrlMap urlMap -> getLocalName(urlMap.getDefaultService()) == backendServiceName @@ -1551,6 +1785,47 @@ class GCEUtil { return retrievedTargetProxy } + def static getRegionTargetProxyFromRule(Compute compute, String project, String region, ForwardingRule forwardingRule, String phase, SafeRetry safeRetry, GoogleExecutorTraits executor) { + String target = forwardingRule.getTarget() + GoogleTargetProxyType targetProxyType = Utils.getTargetProxyType(target) + String targetProxyName = getLocalName(target) + + def operationName + def proxyGet = null + switch (targetProxyType) { + case GoogleTargetProxyType.HTTP: + proxyGet = { executor.timeExecute( + compute.regionTargetHttpProxies().get(project, region, targetProxyName), + "compute.regionTargetHttpProxies.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL) + } + operationName = "compute.regionTargetHttpProxies.get" + break + case GoogleTargetProxyType.HTTPS: + proxyGet = { executor.timeExecute( + compute.regionTargetHttpsProxies().get(project, region, targetProxyName), + "compute.regionTargetHttpsProxies.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL) + } + operationName = "compute.regionTargetHttpsProxies.get" + break + default: + log.warn("Unexpected target proxy type for $targetProxyName in $region.") + return null + break + } + def retrievedTargetProxy = safeRetry.doRetry( + proxyGet, + "Region Target proxy $targetProxyName", + null, + [400, 403, 412], + [], + [action: "get", phase: phase, operation: operationName, (executor.TAG_SCOPE): executor.SCOPE_REGIONAL, (executor.TAG_REGION): region], + executor.registry + ) + return retrievedTargetProxy + } + /** * Deletes an L7/SSL LB global listener, i.e. a global forwarding rule and its target proxy. * @param compute @@ -1565,9 +1840,9 @@ class GCEUtil { GoogleExecutorTraits executor) { ForwardingRule ruleToDelete = safeRetry.doRetry( { executor.timeExecute( - compute.globalForwardingRules().get(project, forwardingRuleName), - "compute.globalForwardingRules.get", - executor.TAG_SCOPE, executor.SCOPE_GLOBAL) + compute.globalForwardingRules().get(project, forwardingRuleName), + "compute.globalForwardingRules.get", + executor.TAG_SCOPE, executor.SCOPE_GLOBAL) }, "global forwarding rule ${forwardingRuleName}", null, @@ -1640,6 +1915,72 @@ class GCEUtil { return result } } + static Operation deleteRegionalListener(Compute compute, + String project, + String region, + String forwardingRuleName, + String phase, + SafeRetry safeRetry, + GoogleExecutorTraits executor) { + ForwardingRule ruleToDelete = safeRetry.doRetry( + { executor.timeExecute( + compute.forwardingRules().get(project, region, forwardingRuleName), + "compute.forwardingRules.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + }, + "forwarding rule ${forwardingRuleName}", + null, + [400, 412], + [404], + [action: "get", phase: phase, operation: "compute.forwardingRules.get", (executor.TAG_SCOPE): executor.SCOPE_REGIONAL, (executor.TAG_REGION): region], + executor.registry + ) as ForwardingRule + if (ruleToDelete) { + def operation_name + executor.timeExecute( + compute.forwardingRules().delete(project, region, ruleToDelete.getName()), + "compute.forwardingRules.delete", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + String targetProxyLink = ruleToDelete.getTarget() + String targetProxyName = getLocalName(targetProxyLink) + GoogleTargetProxyType targetProxyType = Utils.getTargetProxyType(targetProxyLink) + Closure deleteProxyClosure = { null } + switch (targetProxyType) { + case GoogleTargetProxyType.HTTP: + deleteProxyClosure = { + executor.timeExecute( + compute.regionTargetHttpProxies().delete(project, region, targetProxyName), + "compute.regionTargetHttpProxies.delete", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + } + operation_name = "compute.regionTargetHttpProxies.delete" + break + case GoogleTargetProxyType.HTTPS: + deleteProxyClosure = { + executor.timeExecute( + compute.regionTargetHttpsProxies().delete(project, region, targetProxyName), + "compute.regionTargetHttpsProxies.delete", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + } + operation_name = "compute.regionTargetHttpsProxies.delete" + break + default: + log.warn("Unexpected target proxy type for $targetProxyName.") + break + } + + Operation result = safeRetry.doRetry( + deleteProxyClosure, + "region target proxy ${targetProxyName}", + null, + [400, 412], + [404], + [action: "delete", phase: phase, operation: operation_name, (executor.TAG_SCOPE): executor.SCOPE_REGIONAL, (executor.TAG_REGION): region], + executor.registry + ) as Operation + return result + } + } static Operation deleteIfNotInUse(Closure closure, String component, @@ -1678,8 +2019,8 @@ class GCEUtil { GoogleNetworkProvider googleNetworkProvider) { def network = queryNetwork(accountName, securityGroupDescription.network, task, phase, googleNetworkProvider) def firewall = new Firewall( - name: securityGroupDescription.securityGroupName, - network: network.selfLink + name: securityGroupDescription.securityGroupName, + network: network.selfLink ) def allowed = securityGroupDescription.allowed.collect { new Firewall.Allowed(IPProtocol: it.ipProtocol, ports: it.portRanges) @@ -1771,9 +2112,9 @@ class GCEUtil { case GoogleHealthCheck.HealthCheckType.SSL: existingHealthCheck.sslHealthCheck.port = descriptionHealthCheck.port break - case GoogleHealthCheck.HealthCheckType.UDP: - existingHealthCheck.udpHealthCheck.port = descriptionHealthCheck.port - break +// case GoogleHealthCheck.HealthCheckType.UDP: +// existingHealthCheck.udpHealthCheck.port = descriptionHealthCheck.port +// break default: throw new IllegalArgumentException("Description contains illegal health check type.") break @@ -1816,10 +2157,10 @@ class GCEUtil { newHealthCheck.type = 'SSL' newHealthCheck.sslHealthCheck = new SSLHealthCheck(port: descriptionHealthCheck.port) break - case GoogleHealthCheck.HealthCheckType.UDP: - newHealthCheck.type = 'UDP' - newHealthCheck.udpHealthCheck = new UDPHealthCheck(port: descriptionHealthCheck.port) - break +// case GoogleHealthCheck.HealthCheckType.UDP: +// newHealthCheck.type = 'UDP' +// newHealthCheck.udpHealthCheck = new UDPHealthCheck(port: descriptionHealthCheck.port) +// break default: throw new IllegalArgumentException("Description contains illegal health check type.") break @@ -1892,6 +2233,24 @@ class GCEUtil { return healthChecks } + + static List fetchRegionalHealthChecks(GoogleExecutorTraits agent, Compute compute, String project, String region) { + Boolean executedAtLeastOnce = false + String nextPageToken = null + List healthChecks = [] + while (!executedAtLeastOnce || nextPageToken) { + HealthCheckList healthCheckList = agent.timeExecute( + compute.regionHealthChecks().list(project, region).setPageToken(nextPageToken), + "compute.regionHealthChecks.list", + agent.TAG_SCOPE, agent.SCOPE_REGIONAL, agent.TAG_REGION, region) + + executedAtLeastOnce = true + nextPageToken = healthCheckList.getNextPageToken() + healthChecks.addAll(healthCheckList.getItems() ?: []) + } + return healthChecks + } + static List fetchInstances(GoogleExecutorTraits agent, GoogleNamedAccountCredentials credentials) { List instances = new ArrayList() String pageToken = null @@ -1917,36 +2276,8 @@ class GCEUtil { List instances = [] instanceAggregatedList?.items?.each { String zone, InstancesScopedList instancesScopedList -> - def localZoneName = Utils.getLocalName(zone) instancesScopedList?.instances?.each { Instance instance -> - def consulNode = credentials.consulConfig?.enabled ? - ConsulProviderUtils.getHealths(credentials.consulConfig, instance.getName()) - : null - long instanceTimestamp = instance.creationTimestamp ? - Utils.getTimeFromTimestamp(instance.creationTimestamp) : - Long.MAX_VALUE - String instanceName = Utils.getLocalName(instance.name) - def googleInstance = new GoogleInstance( - name: instanceName, - gceId: instance.id, - instanceType: Utils.getLocalName(instance.machineType), - cpuPlatform: instance.cpuPlatform, - launchTime: instanceTimestamp, - zone: localZoneName, - region: credentials.regionFromZone(localZoneName), - networkInterfaces: instance.networkInterfaces, - networkName: Utils.decorateXpnResourceIdIfNeeded(credentials.project, instance.networkInterfaces?.getAt(0)?.network), - metadata: instance.metadata, - disks: instance.disks, - serviceAccounts: instance.serviceAccounts, - selfLink: instance.selfLink, - tags: instance.tags, - labels: instance.labels, - consulNode: consulNode, - instanceHealth: new GoogleInstanceHealth( - status: GoogleInstanceHealth.Status.valueOf(instance.getStatus()) - )) - instances << googleInstance + instances << GoogleInstances.createFromComputeInstance(instance, credentials) } } @@ -1960,6 +2291,11 @@ class GCEUtil { def instanceName = Utils.getLocalName(status.instance) def googleLBHealthStatus = GoogleLoadBalancerHealth.PlatformStatus.valueOf(status.healthState) + if (googleLoadBalancer.type == GoogleLoadBalancerType.NETWORK && googleLoadBalancer.ipAddress != status.ipAddress) { + log.debug("Skip adding health for ${instanceName} to ${googleLoadBalancer.name} (${googleLoadBalancer.ipAddress}): ${status.healthState} ($status.ipAddress)") + return + } + googleLoadBalancer.healths << new GoogleLoadBalancerHealth( instanceName: instanceName, instanceZone: Utils.getZoneFromInstanceUrl(status.instance), diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/SafeRetry.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/SafeRetry.groovy deleted file mode 100644 index afa3c441ddc..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/SafeRetry.groovy +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.deploy - -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException -import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleCommonSafeRetry -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.beans.factory.annotation.Value -import org.springframework.stereotype.Component -import java.util.concurrent.TimeUnit - -// TODO(jacobkiefer): This used to have a generic return type associated with 'doRetry'. Find a way to reincorporate while still making this a Bean. -@Component -class SafeRetry extends GoogleCommonSafeRetry { - - @Value('${google.safeRetryMaxWaitIntervalMs:60000}') - Long maxWaitInterval - - @Value('${google.safeRetryRetryIntervalBaseSec:2}') - Long retryIntervalBase - - @Value('${google.safeRetryJitterMultiplier:1000}') - Long jitterMultiplier - - @Value('${google.safeRetryMaxRetries:10}') - Long maxRetries - - public Object doRetry(Closure operation, - String resource, - Task task, - List retryCodes, - List successfulErrorCodes, - Map tags, - Registry registry) { - return super.doRetry(operation, - resource, - task, - retryCodes, - successfulErrorCodes, - maxWaitInterval, - retryIntervalBase, - jitterMultiplier, - maxRetries, - tags, - registry) - } - - @Override - GoogleOperationException providerOperationException(String message) { - new GoogleOperationException(message) - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/AbandonAndDecrementGoogleServerGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/AbandonAndDecrementGoogleServerGroupAtomicOperationConverter.groovy index 0dca9205f8f..d041a105d81 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/AbandonAndDecrementGoogleServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/AbandonAndDecrementGoogleServerGroupAtomicOperationConverter.groovy @@ -18,13 +18,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.deploy.description.AbandonAndDecrementGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.AbandonAndDecrementGoogleServerGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @Component("abandonAndDecrementGoogleServerGroupDescription") class AbandonAndDecrementGoogleServerGroupAtomicOperationConverter - extends AbstractAtomicOperationsCredentialsSupport { + extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/BasicGoogleDeployAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/BasicGoogleDeployAtomicOperationConverter.groovy index aa8e37b4721..04c40ae3f2b 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/BasicGoogleDeployAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/BasicGoogleDeployAtomicOperationConverter.groovy @@ -19,16 +19,17 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.deploy.DeployAtomicOperation import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import groovy.util.logging.Slf4j import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component("basicGoogleDeployDescription") @Slf4j -class BasicGoogleDeployAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class BasicGoogleDeployAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new DeployAtomicOperation(convertDescription(input)) } @@ -49,6 +50,16 @@ class BasicGoogleDeployAtomicOperationConverter extends AbstractAtomicOperations } } + def acceleratorConfigs = input?.acceleratorConfigs; + if (acceleratorConfigs && !acceleratorConfigs.isEmpty()) { + input.acceleratorConfigs = acceleratorConfigs.collect { + [ + acceleratorType: it.acceleratorType, + acceleratorCount: new Integer((it.acceleratorCount as Double).intValue()) + ] + } + } + GoogleAtomicOperationConverterHelper.convertDescription(input, this, BasicGoogleDeployDescription) } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CopyLastGoogleServerGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CopyLastGoogleServerGroupAtomicOperationConverter.groovy index c59ae38ff9a..644ce2f72ea 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CopyLastGoogleServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CopyLastGoogleServerGroupAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.CopyLastGoogleServerGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.CLONE_SERVER_GROUP) @Component("copyLastGoogleServerGroupDescription") -class CopyLastGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class CopyLastGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new CopyLastGoogleServerGroupAtomicOperation(convertDescription(input)) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CreateGoogleInstanceAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CreateGoogleInstanceAtomicOperationConverter.groovy index f6b907a906b..ff7ffe09b7a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CreateGoogleInstanceAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CreateGoogleInstanceAtomicOperationConverter.groovy @@ -18,12 +18,13 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.deploy.description.CreateGoogleInstanceDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.CreateGoogleInstanceAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @Component("createGoogleInstanceDescription") -class CreateGoogleInstanceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class CreateGoogleInstanceAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new CreateGoogleInstanceAtomicOperation(convertDescription(input)) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleAutoscalingPolicyAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleAutoscalingPolicyAtomicOperationConverter.groovy index 2b0f568d185..58fbeaff900 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleAutoscalingPolicyAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleAutoscalingPolicyAtomicOperationConverter.groovy @@ -17,19 +17,38 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DeleteGoogleAutoscalingPolicyAtomicOperation +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry +import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter +import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.DELETE_SCALING_POLICY) @Component("deleteGoogleScalingPolicyDescription") -class DeleteGoogleAutoscalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DeleteGoogleAutoscalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { + + @Autowired + GoogleClusterProvider googleClusterProvider + + @Autowired + GoogleOperationPoller googleOperationPoller + + @Autowired + AtomicOperationsRegistry atomicOperationsRegistry + + @Autowired + OrchestrationProcessor orchestrationProcessor + @Override AtomicOperation convertOperation(Map input) { - new DeleteGoogleAutoscalingPolicyAtomicOperation(convertDescription(input)) + new DeleteGoogleAutoscalingPolicyAtomicOperation(convertDescription(input), googleClusterProvider, googleOperationPoller, atomicOperationsRegistry, orchestrationProcessor) } @Override diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverter.groovy index 3f0488ad3ee..1255a26dec3 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverter.groovy @@ -19,19 +19,21 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleHttpLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleInternalHttpLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleInternalLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleSslLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleTcpLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.DELETE_LOAD_BALANCER) @Component("deleteGoogleLoadBalancerDescription") -class DeleteGoogleLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DeleteGoogleLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { DeleteGoogleLoadBalancerDescription description = convertDescription(input) @@ -42,6 +44,9 @@ class DeleteGoogleLoadBalancerAtomicOperationConverter extends AbstractAtomicOpe case GoogleLoadBalancerType.HTTP: return new DeleteGoogleHttpLoadBalancerAtomicOperation(description) break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + return new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + break case GoogleLoadBalancerType.INTERNAL: return new DeleteGoogleInternalLoadBalancerAtomicOperation(description) break diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleSecurityGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleSecurityGroupAtomicOperationConverter.groovy index 5e0919e5e57..b109ce882c6 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleSecurityGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleSecurityGroupAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleSecurityGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DeleteGoogleSecurityGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.DELETE_SECURITY_GROUP) @Component("deleteGoogleSecurityGroupDescription") -class DeleteGoogleSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DeleteGoogleSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { new DeleteGoogleSecurityGroupAtomicOperation(convertDescription(input)) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverter.groovy index 8733985b0bf..1aa6c3675e8 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverter.groovy @@ -19,15 +19,16 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DeregisterInstancesFromGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DeregisterInstancesFromGoogleLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.DEREGISTER_INSTANCES_FROM_LOAD_BALANCER) @Component("deregisterInstancesFromGoogleLoadBalancerDescription") class DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverter - extends AbstractAtomicOperationsCredentialsSupport { + extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { new DeregisterInstancesFromGoogleLoadBalancerAtomicOperation( diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DestroyGoogleServerGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DestroyGoogleServerGroupAtomicOperationConverter.groovy index 93cc0223578..68393370791 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DestroyGoogleServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DestroyGoogleServerGroupAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DestroyGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DestroyGoogleServerGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.DESTROY_SERVER_GROUP) @Component("destroyGoogleServerGroupDescription") -class DestroyGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DestroyGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DisableGoogleServerGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DisableGoogleServerGroupAtomicOperationConverter.groovy index ad09103e06a..f5d7e7b41ea 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DisableGoogleServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DisableGoogleServerGroupAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.EnableDisableGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DisableGoogleServerGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.DISABLE_SERVER_GROUP) @Component("disableGoogleServerGroupDescription") -class DisableGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class DisableGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/EnableGoogleServerGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/EnableGoogleServerGroupAtomicOperationConverter.groovy index a014cc4aae3..51dec9a0628 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/EnableGoogleServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/EnableGoogleServerGroupAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.EnableDisableGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.EnableGoogleServerGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.ENABLE_SERVER_GROUP) @Component("enableGoogleServerGroupDescription") -class EnableGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class EnableGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/GoogleAtomicOperationConverterHelper.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/GoogleAtomicOperationConverterHelper.groovy index b28d5303148..0cb14435135 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/GoogleAtomicOperationConverterHelper.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/GoogleAtomicOperationConverterHelper.groovy @@ -18,12 +18,13 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.fasterxml.jackson.databind.DeserializationFeature import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter +import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable class GoogleAtomicOperationConverterHelper { - static Object convertDescription(Map input, - AbstractAtomicOperationsCredentialsSupport credentialsSupport, - Class targetDescriptionType) { + static T convertDescription(Map input, + AbstractAtomicOperationsCredentialsConverter credentialsSupport, + Class targetDescriptionType) { if (!input.accountName) { input.accountName = input.credentials } @@ -41,7 +42,8 @@ class GoogleAtomicOperationConverterHelper { .convertValue(input, targetDescriptionType) // Re-assign the credentials. - converted.credentials = credentials in GoogleNamedAccountCredentials ? credentials : null + converted.credentials = credentials as GoogleNamedAccountCredentials + converted } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter.groovy index abe38ffce32..7c36d397d93 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.ModifyGoogleServerGroupInstanceTemplateDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.ModifyGoogleServerGroupInstanceTemplateAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.UPDATE_LAUNCH_CONFIG) @Component("modifyGoogleServerGroupInstanceTemplateDescription") -class ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RebootGoogleInstancesAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RebootGoogleInstancesAtomicOperationConverter.groovy index eef88e1eb91..fe879c0bc7f 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RebootGoogleInstancesAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RebootGoogleInstancesAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.RebootGoogleInstancesDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.RebootGoogleInstancesAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.REBOOT_INSTANCES) @Component -class RebootGoogleInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class RebootGoogleInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { new RebootGoogleInstancesAtomicOperation(convertDescription(input)) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverter.groovy index f39395ae954..05569c194f6 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverter.groovy @@ -19,15 +19,16 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.RegisterInstancesWithGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.RegisterInstancesWithGoogleLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.REGISTER_INSTANCES_WITH_LOAD_BALANCER) @Component("registerInstancesWithGoogleLoadBalancerDescription") class RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverter - extends AbstractAtomicOperationsCredentialsSupport { + extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { new RegisterInstancesWithGoogleLoadBalancerAtomicOperation( diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ResizeGoogleServerGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ResizeGoogleServerGroupAtomicOperationConverter.groovy index ada1015a593..524b9547373 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ResizeGoogleServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ResizeGoogleServerGroupAtomicOperationConverter.groovy @@ -16,9 +16,11 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters -import com.google.api.services.compute.model.AutoscalingPolicy + +import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.description.ResizeGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.ResizeGoogleServerGroupAtomicOperation @@ -26,45 +28,68 @@ import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleAutoscali import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.orchestration.* +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter +import com.netflix.spinnaker.orchestration.OperationDescription +import com.fasterxml.jackson.databind.ObjectMapper import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.RESIZE_SERVER_GROUP) @Component("resizeGoogleServerGroupDescription") -class ResizeGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class ResizeGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Autowired GoogleClusterProvider googleClusterProvider + @Autowired + GoogleOperationPoller googleOperationPoller + + @Autowired + AtomicOperationsRegistry atomicOperationsRegistry + + @Autowired + OrchestrationProcessor orchestrationProcessor + + @Autowired + Cache cacheView + + @Autowired + ObjectMapper objectMapper + @Override AtomicOperation convertOperation(Map input) { // If the target server group has an Autoscaler configured we need to modify that policy as opposed to the // target size of the managed instance group itself. - AutoscalingPolicy autoscalingPolicy = resolveServerGroup(input)?.autoscalingPolicy + GoogleAutoscalingPolicy autoscalingPolicy = resolveServerGroup(input)?.autoscalingPolicy def convertedDescription = convertDescription(input, autoscalingPolicy) if (autoscalingPolicy) { - new UpsertGoogleAutoscalingPolicyAtomicOperation(convertedDescription) + new UpsertGoogleAutoscalingPolicyAtomicOperation(convertedDescription, googleClusterProvider, googleOperationPoller, atomicOperationsRegistry, orchestrationProcessor, cacheView, objectMapper) } else { new ResizeGoogleServerGroupAtomicOperation(convertedDescription) } } - def convertDescription(Map input, AutoscalingPolicy autoscalingPolicy) { + OperationDescription convertDescription(Map input, GoogleAutoscalingPolicy autoscalingPolicy) { if (autoscalingPolicy) { UpsertGoogleAutoscalingPolicyDescription upsertGoogleAutoscalingPolicyDescription = GoogleAtomicOperationConverterHelper.convertDescription(input, this, UpsertGoogleAutoscalingPolicyDescription) // Retrieve the existing autoscaling policy and overwrite the min/max settings. - GoogleAutoscalingPolicy googleAutoscalingPolicy = - GCEUtil.buildAutoscalingPolicyDescriptionFromAutoscalingPolicy(autoscalingPolicy) - - upsertGoogleAutoscalingPolicyDescription.autoscalingPolicy = googleAutoscalingPolicy + upsertGoogleAutoscalingPolicyDescription.autoscalingPolicy = autoscalingPolicy upsertGoogleAutoscalingPolicyDescription.autoscalingPolicy.minNumReplicas = input.capacity?.min upsertGoogleAutoscalingPolicyDescription.autoscalingPolicy.maxNumReplicas = input.capacity?.max + if (input?.writeMetadata != null) { + upsertGoogleAutoscalingPolicyDescription.writeMetadata = input?.writeMetadata + } + + // Override autoscaling mode. This is useful in situations where we need the resize to happen + // regardless of previous autoscaling mode (e.g. scale down in red/black deployment strategies). + if (input?.autoscalingMode) { + upsertGoogleAutoscalingPolicyDescription.autoscalingPolicy.mode = input.autoscalingMode + } return upsertGoogleAutoscalingPolicyDescription } else { @@ -73,7 +98,7 @@ class ResizeGoogleServerGroupAtomicOperationConverter extends AbstractAtomicOper } @Override - def convertDescription(Map input) { + OperationDescription convertDescription(Map input) { return convertDescription(input, resolveServerGroup(input)?.autoscalingPolicy) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateAndDecrementGoogleServerGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateAndDecrementGoogleServerGroupAtomicOperationConverter.groovy index ad3aa5b60e5..cf2074132c8 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateAndDecrementGoogleServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateAndDecrementGoogleServerGroupAtomicOperationConverter.groovy @@ -19,15 +19,16 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.TerminateAndDecrementGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.TerminateAndDecrementGoogleServerGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.TERMINATE_INSTANCE_AND_DECREMENT) @Component("terminateAndDecrementGoogleServerGroupDescription") class TerminateAndDecrementGoogleServerGroupAtomicOperationConverter - extends AbstractAtomicOperationsCredentialsSupport { + extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateGoogleInstancesAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateGoogleInstancesAtomicOperationConverter.groovy index 8bce495850b..0da196a2414 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateGoogleInstancesAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateGoogleInstancesAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.TerminateGoogleInstancesDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.TerminateGoogleInstancesAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.TERMINATE_INSTANCES) @Component -class TerminateGoogleInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class TerminateGoogleInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleAutoscalingPolicyAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleAutoscalingPolicyAtomicOperationConverter.groovy index 76c11f50db9..fe96528c215 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleAutoscalingPolicyAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleAutoscalingPolicyAtomicOperationConverter.groovy @@ -16,20 +16,47 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters +import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.clouddriver.google.GoogleOperation +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleAutoscalingPolicyAtomicOperation +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry +import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter +import com.fasterxml.jackson.databind.ObjectMapper +import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.UPSERT_SCALING_POLICY) @Component("upsertGoogleScalingPolicyDescription") -class UpsertGoogleAutoscalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class UpsertGoogleAutoscalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter{ + + @Autowired + GoogleClusterProvider googleClusterProvider + + @Autowired + GoogleOperationPoller googleOperationPoller + + @Autowired + AtomicOperationsRegistry atomicOperationsRegistry + + @Autowired + OrchestrationProcessor orchestrationProcessor + + @Autowired + Cache cacheView + + @Autowired + ObjectMapper objectMapper + @Override AtomicOperation convertOperation(Map input) { - new UpsertGoogleAutoscalingPolicyAtomicOperation(convertDescription(input)) + new UpsertGoogleAutoscalingPolicyAtomicOperation(convertDescription(input), googleClusterProvider, googleOperationPoller, atomicOperationsRegistry, orchestrationProcessor, cacheView, objectMapper) } @Override diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleImageTagsAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleImageTagsAtomicOperationConverter.groovy index 204f85b5aa2..b9e76b6d182 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleImageTagsAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleImageTagsAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleImageTagsDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleImageTagsAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.UPSERT_IMAGE_TAGS) @Component -class UpsertGoogleImageTagsAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class UpsertGoogleImageTagsAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverter.groovy index 17474fffbe0..ce6c20158ac 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverter.groovy @@ -19,19 +19,21 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleHttpLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleInternalHttpLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleInternalLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleSslLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleTcpLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.UPSERT_LOAD_BALANCER) @Component("upsertGoogleLoadBalancerDescription") -class UpsertGoogleLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class UpsertGoogleLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { UpsertGoogleLoadBalancerDescription description = convertDescription(input) switch (description.loadBalancerType) { @@ -41,6 +43,9 @@ class UpsertGoogleLoadBalancerAtomicOperationConverter extends AbstractAtomicOpe case GoogleLoadBalancerType.HTTP: return new UpsertGoogleHttpLoadBalancerAtomicOperation(description) break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + return new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + break case GoogleLoadBalancerType.INTERNAL: return new UpsertGoogleInternalLoadBalancerAtomicOperation(description) break diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleSecurityGroupAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleSecurityGroupAtomicOperationConverter.groovy index 77d9f381dec..e5e9188c584 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleSecurityGroupAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleSecurityGroupAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleSecurityGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleSecurityGroupAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.UPSERT_SECURITY_GROUP) @Component("upsertGoogleSecurityGroupDescription") -class UpsertGoogleSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class UpsertGoogleSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { new UpsertGoogleSecurityGroupAtomicOperation(convertDescription(input)) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleServerGroupTagsAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleServerGroupTagsAtomicOperationConverter.groovy index 6e14f5331ce..2b3a0765a48 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleServerGroupTagsAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleServerGroupTagsAtomicOperationConverter.groovy @@ -19,14 +19,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleServerGroupTagsDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleServerGroupTagsAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.UPSERT_SERVER_GROUP_TAGS) @Component("upsertGoogleServerGroupTagsDescription") -class UpsertGoogleServerGroupTagsAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class UpsertGoogleServerGroupTagsAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter{ @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/discovery/DisableInstancesInDiscoveryConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/discovery/DisableInstancesInDiscoveryConverter.groovy index 3644eb0cfef..c7e4b279d3d 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/discovery/DisableInstancesInDiscoveryConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/discovery/DisableInstancesInDiscoveryConverter.groovy @@ -20,12 +20,13 @@ import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.converters.GoogleAtomicOperationConverterHelper import com.netflix.spinnaker.clouddriver.google.deploy.description.GoogleInstanceListDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.discovery.DisableInstancesInDiscoveryOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter @GoogleOperation(AtomicOperations.DISABLE_INSTANCES_IN_DISCOVERY) -class DisableInstancesInDiscoveryConverter extends AbstractAtomicOperationsCredentialsSupport { +class DisableInstancesInDiscoveryConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new DisableInstancesInDiscoveryOperation(convertDescription(input)) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/discovery/EnableInstancesInDiscoveryConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/discovery/EnableInstancesInDiscoveryConverter.groovy index 30b9353d4ee..0ca7a581436 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/discovery/EnableInstancesInDiscoveryConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/discovery/EnableInstancesInDiscoveryConverter.groovy @@ -20,12 +20,13 @@ import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.converters.GoogleAtomicOperationConverterHelper import com.netflix.spinnaker.clouddriver.google.deploy.description.GoogleInstanceListDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.discovery.EnableInstancesInDiscoveryOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter @GoogleOperation(AtomicOperations.ENABLE_INSTANCES_IN_DISCOVERY) -class EnableInstancesInDiscoveryConverter extends AbstractAtomicOperationsCredentialsSupport { +class EnableInstancesInDiscoveryConverter extends AbstractAtomicOperationsCredentialsConverter { AtomicOperation convertOperation(Map input) { new EnableInstancesInDiscoveryOperation(convertDescription(input)) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/snapshot/RestoreSnapshotAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/snapshot/RestoreSnapshotAtomicOperationConverter.groovy index a92c1cd02d6..d9a0c7522a3 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/snapshot/RestoreSnapshotAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/snapshot/RestoreSnapshotAtomicOperationConverter.groovy @@ -20,14 +20,15 @@ import com.netflix.spinnaker.clouddriver.google.deploy.converters.GoogleAtomicOp import com.netflix.spinnaker.clouddriver.google.deploy.description.snapshot.RestoreSnapshotDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.snapshot.RestoreSnapshotAtomicOperation import com.netflix.spinnaker.clouddriver.google.GoogleOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.RESTORE_SNAPSHOT) @Component("restoreSnapshotDescription") -class RestoreSnapshotAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class RestoreSnapshotAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/snapshot/SaveSnapshotAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/snapshot/SaveSnapshotAtomicOperationConverter.groovy index 9874601c122..210d0c6752c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/snapshot/SaveSnapshotAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/snapshot/SaveSnapshotAtomicOperationConverter.groovy @@ -20,14 +20,15 @@ import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.converters.GoogleAtomicOperationConverterHelper import com.netflix.spinnaker.clouddriver.google.deploy.description.snapshot.SaveSnapshotDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.snapshot.SaveSnapshotAtomicOperation +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter import org.springframework.stereotype.Component @GoogleOperation(AtomicOperations.SAVE_SNAPSHOT) @Component("saveSnapshotDescription") -class SaveSnapshotAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { +class SaveSnapshotAtomicOperationConverter extends AbstractAtomicOperationsCredentialsConverter { @Override AtomicOperation convertOperation(Map input) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/AbandonAndDecrementGoogleServerGroupDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/AbandonAndDecrementGoogleServerGroupDescription.groovy index a2d4ba7fff4..464072df474 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/AbandonAndDecrementGoogleServerGroupDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/AbandonAndDecrementGoogleServerGroupDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class AbandonAndDecrementGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable + +class AbandonAndDecrementGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable { String serverGroupName List instanceIds String region diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/BaseGoogleInstanceDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/BaseGoogleInstanceDescription.groovy index 4aea652e1ac..eed888a4027 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/BaseGoogleInstanceDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/BaseGoogleInstanceDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description +import com.google.api.services.compute.model.StructuredEntries import com.netflix.spinnaker.clouddriver.google.model.GoogleDisk +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource import com.netflix.spinnaker.kork.artifacts.model.Artifact import groovy.transform.AutoClone import groovy.transform.Canonical @@ -25,7 +27,7 @@ import groovy.transform.ToString @AutoClone @Canonical @ToString(includeNames = true) -class BaseGoogleInstanceDescription extends AbstractGoogleCredentialsDescription { +class BaseGoogleInstanceDescription extends AbstractGoogleCredentialsDescription implements GoogleLabeledResource { String instanceType String minCpuPlatform List disks @@ -42,6 +44,22 @@ class BaseGoogleInstanceDescription extends AbstractGoogleCredentialsDescription Boolean preemptible Boolean automaticRestart OnHostMaintenance onHostMaintenance + // Secure boot helps protect your VM instances against boot-level and kernel-level malware and rootkits. + // Supported only for Shielded VMs + Boolean enableSecureBoot; + // Virtual Trusted Platform Module (vTPM) validates your guest VM pre-boot and boot integrity, + // and offers key generation and protection. + // Supported only for Shielded VMs + Boolean enableVtpm; + // Integrity monitoring lets you monitor and verify the runtime boot integrity of your shielded VM instances using Stackdriver reports. + // Note: requires vTPM to be enabled. + // Supported only for Shielded VMs + Boolean enableIntegrityMonitoring; + + // Unique disk device name addressable by a Linux OS in /dev/disk/by-id/google-* in the running instance. + // Used to reference disk for mounting, resizing, etc. + // Only applicable for persistent disks. + String baseDeviceName // We support passing the image to deploy as either a string or an artifact, but default to // the string for backwards-compatibility @@ -51,6 +69,9 @@ class BaseGoogleInstanceDescription extends AbstractGoogleCredentialsDescription String accountName + Map resourceManagerTags + Map partnerMetadata + // The source of the image to deploy // ARTIFACT: An artifact of type gce/image stored in imageArtifact // STRING: A string representing a GCE image name in the current diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/BasicGoogleDeployDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/BasicGoogleDeployDescription.groovy index 67c5b4fa991..1afaebcf35a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/BasicGoogleDeployDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/BasicGoogleDeployDescription.groovy @@ -16,14 +16,17 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description +import com.google.api.services.compute.model.AcceleratorConfig +import com.netflix.frigga.NameBuilder import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy +import com.netflix.spinnaker.clouddriver.google.deploy.description.BaseGoogleInstanceDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoHealingPolicy +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy import com.netflix.spinnaker.clouddriver.google.model.GoogleDistributionPolicy import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancingPolicy import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable -import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable import groovy.transform.AutoClone import groovy.transform.Canonical import groovy.transform.EqualsAndHashCode @@ -48,8 +51,9 @@ class BasicGoogleDeployDescription extends BaseGoogleInstanceDescription impleme GoogleAutoscalingPolicy autoscalingPolicy GoogleHttpLoadBalancingPolicy loadBalancingPolicy GoogleAutoHealingPolicy autoHealingPolicy + Boolean overwriteAncestorAutoHealingPolicy = false /** - * Optional explicit specification of zones for an RMIG. + * Optional explicit specification of zones and target shape for an RMIG. */ GoogleDistributionPolicy distributionPolicy // Capacity is optional. If it is specified, capacity.desired takes precedence over targetSize. @@ -58,6 +62,18 @@ class BasicGoogleDeployDescription extends BaseGoogleInstanceDescription impleme Capacity capacity Source source = new Source() String userData + List acceleratorConfigs + + @Override + String getName() { + def nameBuilder = new NameBuilder() { + @Override + protected String combineAppStackDetail(String appName, String stack, String detail) { + return super.combineAppStackDetail(appName, stack, detail) + } + } + nameBuilder.combineAppStackDetail(application, stack, freeFormDetails) + } @Canonical @ToString(includeNames = true) @@ -83,20 +99,25 @@ class BasicGoogleDeployDescription extends BaseGoogleInstanceDescription impleme Integer desired } - String getApplication() { + Collection getApplications() { if (application) { - return application + return Collections.singletonList(application) } if (source && source.serverGroupName) { - return Names.parseName(source.serverGroupName).app + return Collections.singletonList(Names.parseName(source.serverGroupName).app) } } @Canonical - static class Source implements ServerGroupNameable { + static class Source implements ServerGroupsNameable { // TODO(duftler): Add accountName/credentials to support cloning from one account to another. String region String serverGroupName Boolean useSourceCapacity + + @Override + Collection getServerGroupNames() { + return Collections.singletonList(serverGroupName) + } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/CreateGoogleInstanceDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/CreateGoogleInstanceDescription.groovy index def78318695..6993920e1ad 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/CreateGoogleInstanceDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/CreateGoogleInstanceDescription.groovy @@ -21,4 +21,9 @@ import com.netflix.spinnaker.clouddriver.deploy.DeployDescription class CreateGoogleInstanceDescription extends BaseGoogleInstanceDescription implements DeployDescription { String instanceName String zone + + @Override + String getName() { + return instanceName + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeleteGoogleAutoscalingPolicyDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeleteGoogleAutoscalingPolicyDescription.groovy index 585acc0ceb1..6ba650bbce7 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeleteGoogleAutoscalingPolicyDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeleteGoogleAutoscalingPolicyDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class DeleteGoogleAutoscalingPolicyDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable + +class DeleteGoogleAutoscalingPolicyDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable { String serverGroupName String accountName String region diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeleteGoogleLoadBalancerDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeleteGoogleLoadBalancerDescription.groovy index 1ac76904eee..a2b9dbe6ca8 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeleteGoogleLoadBalancerDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeleteGoogleLoadBalancerDescription.groovy @@ -16,13 +16,22 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description +import com.google.common.collect.ImmutableList +import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable -class DeleteGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescription { +class DeleteGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescription implements ApplicationNameable { Long deleteOperationTimeoutSeconds String loadBalancerName String region String accountName Boolean deleteHealthChecks = true GoogleLoadBalancerType loadBalancerType + + @Override + Collection getApplications() { + return ImmutableList.of(Names.parseName(loadBalancerName).getApp()) + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeregisterInstancesFromGoogleLoadBalancerDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeregisterInstancesFromGoogleLoadBalancerDescription.groovy index 9bacc79b023..9b1af8c3c16 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeregisterInstancesFromGoogleLoadBalancerDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DeregisterInstancesFromGoogleLoadBalancerDescription.groovy @@ -16,9 +16,23 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class DeregisterInstancesFromGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.frigga.Names +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable + +class DeregisterInstancesFromGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescription implements ApplicationNameable { List loadBalancerNames List instanceIds String region String accountName + + @Override + Collection getApplications() { + def list = (loadBalancerNames - null) + if (!list) { + return Collections.EMPTY_LIST + } + return list.collect { + Names.parseName(it).getApp() + } + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DestroyGoogleServerGroupDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DestroyGoogleServerGroupDescription.groovy index f604610549f..d325b565c7e 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DestroyGoogleServerGroupDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/DestroyGoogleServerGroupDescription.groovy @@ -16,7 +16,10 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class DestroyGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable + +class DestroyGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable { String serverGroupName String region String accountName diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/EnableDisableGoogleServerGroupDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/EnableDisableGoogleServerGroupDescription.groovy index 5be0213d2ac..ba7bbb6ff18 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/EnableDisableGoogleServerGroupDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/EnableDisableGoogleServerGroupDescription.groovy @@ -17,17 +17,22 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescriptionTrait -import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable /** * "Enabling" means adding a server group to the target pool of each of its network load balancers. * * "Disabling" means removing a server group from the target pool of each of its network load balancers. */ -class EnableDisableGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable, EnableDisableDescriptionTrait { +class EnableDisableGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription implements ServerGroupsNameable, EnableDisableDescriptionTrait { String region String accountName @Deprecated String zone + + @Override + Collection getServerGroupNames() { + return [getServerGroupName()] + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/GoogleInstanceListDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/GoogleInstanceListDescription.groovy index 0ddf81a18bc..201a4da5263 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/GoogleInstanceListDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/GoogleInstanceListDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class GoogleInstanceListDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable + +class GoogleInstanceListDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable { String serverGroupName String region List instanceIds diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/ModifyGoogleServerGroupInstanceTemplateDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/ModifyGoogleServerGroupInstanceTemplateDescription.groovy index 33b0f161427..f9cef4efb48 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/ModifyGoogleServerGroupInstanceTemplateDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/ModifyGoogleServerGroupInstanceTemplateDescription.groovy @@ -16,15 +16,21 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable import groovy.transform.AutoClone import groovy.transform.Canonical @AutoClone @Canonical -class ModifyGoogleServerGroupInstanceTemplateDescription extends BaseGoogleInstanceDescription { +class ModifyGoogleServerGroupInstanceTemplateDescription extends BaseGoogleInstanceDescription implements ServerGroupNameable { String serverGroupName String region @Deprecated String zone + + @Override + String getName() { + return serverGroupName + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/RebootGoogleInstancesDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/RebootGoogleInstancesDescription.groovy index 168a73eeee3..c5437cbc696 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/RebootGoogleInstancesDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/RebootGoogleInstancesDescription.groovy @@ -24,6 +24,6 @@ class RebootGoogleInstancesDescription extends AbstractGoogleCredentialsDescript String accountName List getNames() { - return instanceIds + return instanceIds ?: [] } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/RegisterInstancesWithGoogleLoadBalancerDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/RegisterInstancesWithGoogleLoadBalancerDescription.groovy index f9f1219ff40..56c066fdb01 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/RegisterInstancesWithGoogleLoadBalancerDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/RegisterInstancesWithGoogleLoadBalancerDescription.groovy @@ -16,9 +16,23 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class RegisterInstancesWithGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.frigga.Names +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable + +class RegisterInstancesWithGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescription implements ApplicationNameable{ List loadBalancerNames List instanceIds String region String accountName + + @Override + Collection getApplications() { + def list = (loadBalancerNames - null) + if (!list) { + return Collections.EMPTY_LIST + } + return list.collect { + Names.parseName(it).getApp() + } + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/ResizeGoogleServerGroupDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/ResizeGoogleServerGroupDescription.groovy index 070f362836c..01833c1adca 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/ResizeGoogleServerGroupDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/ResizeGoogleServerGroupDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class ResizeGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable + +class ResizeGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable { String serverGroupName Integer targetSize String region diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/TerminateAndDecrementGoogleServerGroupDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/TerminateAndDecrementGoogleServerGroupDescription.groovy index f63a6a6708c..12ebfbdb90a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/TerminateAndDecrementGoogleServerGroupDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/TerminateAndDecrementGoogleServerGroupDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class TerminateAndDecrementGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable + +class TerminateAndDecrementGoogleServerGroupDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable { String serverGroupName List instanceIds String region diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleAutoscalingPolicyDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleAutoscalingPolicyDescription.groovy index d01ecdb74e1..d1f991dfb2c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleAutoscalingPolicyDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleAutoscalingPolicyDescription.groovy @@ -18,11 +18,13 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoHealingPolicy import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable -class UpsertGoogleAutoscalingPolicyDescription extends AbstractGoogleCredentialsDescription { +class UpsertGoogleAutoscalingPolicyDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable { String serverGroupName String region String accountName GoogleAutoscalingPolicy autoscalingPolicy GoogleAutoHealingPolicy autoHealingPolicy + Boolean writeMetadata = true } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleImageTagsDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleImageTagsDescription.groovy index b7677f2936d..8940b685888 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleImageTagsDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleImageTagsDescription.groovy @@ -16,8 +16,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description +import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig + class UpsertGoogleImageTagsDescription extends AbstractGoogleCredentialsDescription { String imageName Map tags String accountName + + @Override + boolean requiresAuthorization(SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps) { + return !opsSecurityConfigProps.allowUnauthenticatedImageTaggingInAccounts.contains(account) + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleLoadBalancerDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleLoadBalancerDescription.groovy index 6979823627f..dee0e48f3df 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleLoadBalancerDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleLoadBalancerDescription.groovy @@ -16,11 +16,16 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description +import com.google.api.services.compute.model.HealthCheck +import com.google.common.collect.ImmutableList +import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHostRule import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleSessionAffinity +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable -class UpsertGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescription { +class UpsertGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescription implements ApplicationNameable { // Common attributes. String loadBalancerName HealthCheck healthCheck @@ -47,6 +52,9 @@ class UpsertGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescr */ List backendServiceDiff + // NLB attribues. + GoogleSessionAffinity sessionAffinity + // ILB attributes. String network String subnet @@ -63,4 +71,9 @@ class UpsertGoogleLoadBalancerDescription extends AbstractGoogleCredentialsDescr Integer timeoutSec String requestPath } + + @Override + Collection getApplications() { + return ImmutableList.of(Names.parseName(loadBalancerName).getApp()) + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleServerGroupTagsDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleServerGroupTagsDescription.groovy index 151b9da8b2a..2638a4b5ee7 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleServerGroupTagsDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/UpsertGoogleServerGroupTagsDescription.groovy @@ -16,7 +16,9 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description -class UpsertGoogleServerGroupTagsDescription extends AbstractGoogleCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable + +class UpsertGoogleServerGroupTagsDescription extends AbstractGoogleCredentialsDescription implements ServerGroupNameable { String serverGroupName List tags String region diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/snapshot/RestoreSnapshotDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/snapshot/RestoreSnapshotDescription.groovy index e2fbee480e9..6201f936a0b 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/snapshot/RestoreSnapshotDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/snapshot/RestoreSnapshotDescription.groovy @@ -16,10 +16,18 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description.snapshot +import com.google.common.collect.ImmutableList +import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.google.deploy.description.AbstractGoogleCredentialsDescription +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable -class RestoreSnapshotDescription extends AbstractGoogleCredentialsDescription { +class RestoreSnapshotDescription extends AbstractGoogleCredentialsDescription implements ApplicationNameable { String applicationName String accountName Long snapshotTimestamp + + @Override + Collection getApplications() { + return ImmutableList.of(Names.parseName(applicationName).getApp()) + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/snapshot/SaveSnapshotDescription.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/snapshot/SaveSnapshotDescription.groovy index ab47d421f5e..30511c1776c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/snapshot/SaveSnapshotDescription.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/description/snapshot/SaveSnapshotDescription.groovy @@ -16,9 +16,17 @@ package com.netflix.spinnaker.clouddriver.google.deploy.description.snapshot +import com.google.common.collect.ImmutableList +import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.google.deploy.description.AbstractGoogleCredentialsDescription +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable -class SaveSnapshotDescription extends AbstractGoogleCredentialsDescription { +class SaveSnapshotDescription extends AbstractGoogleCredentialsDescription implements ApplicationNameable { String applicationName String accountName + + @Override + Collection getApplications() { + return ImmutableList.of(Names.parseName(applicationName).getApp()) + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleOperationException.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleOperationException.groovy deleted file mode 100644 index 7eed92c993f..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleOperationException.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.deploy.exception - -import groovy.transform.InheritConstructors - -@InheritConstructors -class GoogleOperationException extends RuntimeException {} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleResourceIllegalStateException.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleResourceIllegalStateException.groovy index 26cb8b92830..1bd12404deb 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleResourceIllegalStateException.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleResourceIllegalStateException.groovy @@ -16,8 +16,28 @@ package com.netflix.spinnaker.clouddriver.google.deploy.exception +import com.google.common.base.Strings +import groovy.transform.CompileStatic import groovy.transform.InheritConstructors @InheritConstructors +@CompileStatic +class GoogleResourceIllegalStateException extends GoogleOperationException { -class GoogleResourceIllegalStateException extends GoogleOperationException {} + // @InheritConstructors apparently doesn't work with Java callers + GoogleResourceIllegalStateException(String message) { + super(message) + } + + static checkResourceState(boolean expression, Object message) { + if (!expression) { + throw new GoogleResourceIllegalStateException(String.valueOf(message)); + } + } + + static checkResourceState(boolean expression, String errorMessageTemplate, Object... errorMessageArgs) { + if (!expression) { + throw new GoogleResourceIllegalStateException(Strings.lenientFormat(errorMessageTemplate, errorMessageArgs)); + } + } +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.groovy deleted file mode 100644 index cb7af7c7e02..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.groovy +++ /dev/null @@ -1,603 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.deploy.handlers - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.services.compute.Compute -import com.google.api.services.compute.model.* -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.deploy.DeployHandler -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import com.netflix.spinnaker.config.GoogleConfiguration -import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits -import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties -import com.netflix.spinnaker.clouddriver.google.deploy.GCEServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil -import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller -import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry -import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription -import com.netflix.spinnaker.clouddriver.google.deploy.ops.GoogleUserDataProvider -import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup -import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancingPolicy -import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType -import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancingPolicy -import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider -import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancerProvider -import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider -import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSubnetProvider -import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -@Component -@Slf4j -class BasicGoogleDeployHandler implements DeployHandler, GoogleExecutorTraits { - - // TODO(duftler): This should move to a common location. - private static final String BASE_PHASE = "DEPLOY" - - // TODO(duftler): These should be exposed/configurable. - private static final String DEFAULT_NETWORK_NAME = "default" - private static final String ACCESS_CONFIG_NAME = "External NAT" - private static final String ACCESS_CONFIG_TYPE = "ONE_TO_ONE_NAT" - - @Autowired - private GoogleConfigurationProperties googleConfigurationProperties - - @Autowired - private GoogleClusterProvider googleClusterProvider - - @Autowired - private GoogleConfiguration.DeployDefaults googleDeployDefaults - - @Autowired - private GoogleOperationPoller googleOperationPoller - - @Autowired - private GoogleUserDataProvider googleUserDataProvider - - @Autowired - GoogleLoadBalancerProvider googleLoadBalancerProvider - - @Autowired - GoogleNetworkProvider googleNetworkProvider - - @Autowired - GoogleSubnetProvider googleSubnetProvider - - @Autowired - String clouddriverUserAgentApplicationName - - @Autowired - Cache cacheView - - @Autowired - ObjectMapper objectMapper - - @Autowired - SafeRetry safeRetry - - @Autowired - Registry registry - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Override - boolean handles(DeployDescription description) { - description instanceof BasicGoogleDeployDescription - } - - /** - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "myapp", "stack": "dev", "image": "ubuntu-1404-trusty-v20160509a", "targetSize": 3, "instanceType": "f1-micro", "zone": "us-central1-f", "credentials": "my-account-name" }} ]' localhost:7002/gce/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "myapp", "stack": "dev", "freeFormDetails": "something", "image": "ubuntu-1404-trusty-v20160509a", "targetSize": 3, "instanceType": "f1-micro", "zone": "us-central1-f", "credentials": "my-account-name" }} ]' localhost:7002/gce/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "myapp", "stack": "dev", "image": "ubuntu-1404-trusty-v20160509a", "targetSize": 3, "instanceType": "f1-micro", "zone": "us-central1-f", "loadBalancers": ["testlb", "testhttplb"], "instanceMetadata": { "load-balancer-names": "myapp-testlb", "global-load-balancer-names": "myapp-testhttplb", "backend-service-names": "my-backend-service"}, "credentials": "my-account-name" }} ]' localhost:7002/gce/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "myapp", "stack": "dev", "image": "ubuntu-1404-trusty-v20160509a", "targetSize": 3, "instanceType": "f1-micro", "zone": "us-central1-f", "tags": ["my-tag-1", "my-tag-2"], "credentials": "my-account-name" }} ]' localhost:7002/gce/ops - * - * @param description - * @param priorOutputs - * @return - */ - @Override - DeploymentResult handle(BasicGoogleDeployDescription description, List priorOutputs) { - def accountName = description.accountName - def credentials = description.credentials - def compute = credentials.compute - def project = credentials.project - def isRegional = description.regional - def zone = description.zone - def region = description.region ?: credentials.regionFromZone(zone) - def location = isRegional ? region : zone - def instanceMetadata = description.instanceMetadata - def labels = description.labels - def canIpForward = description.canIpForward - - def serverGroupNameResolver = new GCEServerGroupNameResolver(project, region, credentials, safeRetry, this) - def clusterName = serverGroupNameResolver.combineAppStackDetail(description.application, description.stack, description.freeFormDetails) - - task.updateStatus BASE_PHASE, "Initializing creation of server group for cluster $clusterName in $location..." - - task.updateStatus BASE_PHASE, "Looking up next sequence..." - - def serverGroupName = serverGroupNameResolver.resolveNextServerGroupName(description.application, description.stack, description.freeFormDetails, false) - task.updateStatus BASE_PHASE, "Produced server group name: $serverGroupName" - - def machineTypeName - if (description.instanceType.startsWith('custom')) { - machineTypeName = description.instanceType - } else { - machineTypeName = GCEUtil.queryMachineType(description.instanceType, location, credentials, task, BASE_PHASE) - } - - def network = GCEUtil.queryNetwork(accountName, description.network ?: DEFAULT_NETWORK_NAME, task, BASE_PHASE, googleNetworkProvider) - def subnet = - description.subnet ? GCEUtil.querySubnet(accountName, region, description.subnet, task, BASE_PHASE, googleSubnetProvider) : null - - // If no subnet is passed and the network is both an xpn host network and an auto-subnet network, then we need to set the subnet ourselves here. - // This shouldn't be required, but GCE complains otherwise. - if (!subnet && network.id.contains("/") && network.autoCreateSubnets) { - // Auto-created subnets have the same name as the containing network. - subnet = GCEUtil.querySubnet(accountName, region, network.id, task, BASE_PHASE, googleSubnetProvider) - } - - def targetPools = [] - def internalLoadBalancers = [] - def sslLoadBalancers = [] - def tcpLoadBalancers = [] - - // We need the full url for each referenced network load balancer, and also to check that the HTTP(S) - // load balancers exist. - if (description.loadBalancers) { - // GCEUtil.queryAllLoadBalancers() will throw an exception if a referenced load balancer cannot be resolved. - def foundLoadBalancers = GCEUtil.queryAllLoadBalancers(googleLoadBalancerProvider, - description.loadBalancers, - task, - BASE_PHASE) - - // Queue ILBs to update, but wait to update metadata until Https LBs are calculated. - internalLoadBalancers = foundLoadBalancers.findAll { it.loadBalancerType == GoogleLoadBalancerType.INTERNAL } - - // Queue SSL LBs to update. - sslLoadBalancers = foundLoadBalancers.findAll { it.loadBalancerType == GoogleLoadBalancerType.SSL } - - // Queue TCP LBs to update. - tcpLoadBalancers = foundLoadBalancers.findAll { it.loadBalancerType == GoogleLoadBalancerType.TCP } - - if (!description.disableTraffic) { - def networkLoadBalancers = foundLoadBalancers.findAll { it.loadBalancerType == GoogleLoadBalancerType.NETWORK } - targetPools = networkLoadBalancers.collect { it.targetPool } - } - } - - task.updateStatus BASE_PHASE, "Composing server group $serverGroupName..." - - def attachedDisks = GCEUtil.buildAttachedDisks(description, - null, - false, - googleDeployDefaults, - task, - BASE_PHASE, - clouddriverUserAgentApplicationName, - googleConfigurationProperties.baseImageProjects, - safeRetry, - this) - - def networkInterface = GCEUtil.buildNetworkInterface(network, - subnet, - description.associatePublicIpAddress == null || description.associatePublicIpAddress, - ACCESS_CONFIG_NAME, - ACCESS_CONFIG_TYPE) - - def hasBackendServices = (instanceMetadata && - instanceMetadata.containsKey(GoogleServerGroup.View.BACKEND_SERVICE_NAMES)) || sslLoadBalancers || tcpLoadBalancers - - // Resolve and queue the backend service updates, but don't execute yet. - // We need to resolve this information to set metadata in the template so enable can know about the - // load balancing policy this server group was configured with. - // If we try to execute the update, GCP will fail since the MIG is not created yet. - List backendServicesToUpdate = [] - if (hasBackendServices) { - List backendServices = instanceMetadata[GoogleServerGroup.View.BACKEND_SERVICE_NAMES]?.split(",") ?: [] - backendServices.addAll(sslLoadBalancers.collect { it.backendService.name }) - backendServices.addAll(tcpLoadBalancers.collect { it.backendService.name }) - - // Set the load balancer name metadata. - def globalLbNames = sslLoadBalancers.collect { it.name } + tcpLoadBalancers.collect { it.name } + GCEUtil.resolveHttpLoadBalancerNamesMetadata(backendServices, compute, project, this) - instanceMetadata[GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES] = globalLbNames.join(",") - - String sourcePolicyJson = instanceMetadata[GoogleServerGroup.View.LOAD_BALANCING_POLICY] - def loadBalancingPolicy = description.loadBalancingPolicy - - backendServices.each { String backendServiceName -> - BackendService backendService = timeExecute( - compute.backendServices().get(project, backendServiceName), - "compute.backendServices.get", - TAG_SCOPE, SCOPE_GLOBAL) - - Backend backendToAdd - GoogleHttpLoadBalancingPolicy policy - if (loadBalancingPolicy?.balancingMode) { - policy = loadBalancingPolicy - } else if (sourcePolicyJson) { - policy = objectMapper.readValue(sourcePolicyJson, GoogleHttpLoadBalancingPolicy) - } else { - log.warn("No load balancing policy found in the operation description or the source server group, adding defaults") - policy = new GoogleHttpLoadBalancingPolicy( - balancingMode: GoogleLoadBalancingPolicy.BalancingMode.UTILIZATION, - maxUtilization: 0.80, - capacityScaler: 1.0, - namedPorts: [new NamedPort(name: GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME, port: GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT)] - ) - } - GCEUtil.updateMetadataWithLoadBalancingPolicy(policy, instanceMetadata, objectMapper) - backendToAdd = GCEUtil.backendFromLoadBalancingPolicy(policy) - - if (isRegional) { - backendToAdd.setGroup(GCEUtil.buildRegionalServerGroupUrl(project, region, serverGroupName)) - } else { - backendToAdd.setGroup(GCEUtil.buildZonalServerGroupUrl(project, zone, serverGroupName)) - } - - if (backendService.backends == null) { - backendService.backends = new ArrayList() - } - backendService.backends << backendToAdd - backendServicesToUpdate << backendService - } - } - - // Update the instance metadata for ILBs and queue up region backend service calls. - List regionBackendServicesToUpdate = [] - if (internalLoadBalancers) { - List existingRegionalLbs = instanceMetadata[GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES]?.split(",") ?: [] - def ilbServices = internalLoadBalancers.collect { it.backendService.name } - def ilbNames = internalLoadBalancers.collect { it.name } - - ilbNames.each { String ilbName -> - if (!(ilbName in existingRegionalLbs)) { - existingRegionalLbs << ilbName - } - } - instanceMetadata[GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES] = existingRegionalLbs.join(",") - - ilbServices.each { String backendServiceName -> - BackendService backendService = timeExecute( - compute.regionBackendServices().get(project, region, backendServiceName), - "compute.regionBackendServices.get", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) - Backend backendToAdd = new Backend() - if (isRegional) { - backendToAdd.setGroup(GCEUtil.buildRegionalServerGroupUrl(project, region, serverGroupName)) - } else { - backendToAdd.setGroup(GCEUtil.buildZonalServerGroupUrl(project, zone, serverGroupName)) - } - - if (backendService.backends == null) { - backendService.backends = new ArrayList() - } - backendService.backends << backendToAdd - regionBackendServicesToUpdate << backendService - } - } - String instanceTemplateName = "$serverGroupName-${System.currentTimeMillis()}" - Map userDataMap = getUserData(description, serverGroupName, instanceTemplateName, credentials) - - if (instanceMetadata) { - instanceMetadata << userDataMap - } else { - instanceMetadata = userDataMap - } - - if (isRegional && description.selectZones) { - instanceMetadata[GoogleServerGroup.View.SELECT_ZONES] = true - } - - def metadata = GCEUtil.buildMetadataFromMap(instanceMetadata) - - def tags = GCEUtil.buildTagsFromList(description.tags) - - if (description.authScopes && !description.serviceAccountEmail) { - description.serviceAccountEmail = "default" - } - - def serviceAccount = GCEUtil.buildServiceAccount(description.serviceAccountEmail, description.authScopes) - - def scheduling = GCEUtil.buildScheduling(description) - - if (labels == null) { - labels = [:] - } - - // Used to group instances when querying for metrics from kayenta. - labels['spinnaker-region'] = region - labels['spinnaker-server-group'] = serverGroupName - - def instanceProperties = new InstanceProperties(machineType: machineTypeName, - disks: attachedDisks, - networkInterfaces: [networkInterface], - canIpForward: canIpForward, - metadata: metadata, - tags: tags, - labels: labels, - scheduling: scheduling, - serviceAccounts: serviceAccount) - - if (description.minCpuPlatform) { - instanceProperties.minCpuPlatform = description.minCpuPlatform - } - - def instanceTemplate = new InstanceTemplate(name: instanceTemplateName, - properties: instanceProperties) - - def instanceTemplateCreateOperation = timeExecute( - compute.instanceTemplates().insert(project, instanceTemplate), - "compute.instanceTemplates.insert", - TAG_SCOPE, SCOPE_GLOBAL) - def instanceTemplateUrl = instanceTemplateCreateOperation.targetLink - - // Before building the managed instance group we must check and wait until the instance template is built. - googleOperationPoller.waitForGlobalOperation(compute, project, instanceTemplateCreateOperation.getName(), - null, task, "instance template " + GCEUtil.getLocalName(instanceTemplateUrl), BASE_PHASE) - - if (description.capacity) { - description.targetSize = description.capacity.desired - } - - if (autoscalerIsSpecified(description)) { - GCEUtil.calibrateTargetSizeWithAutoscaler(description) - - if (description.capacity) { - description.autoscalingPolicy.minNumReplicas = description.capacity.min - description.autoscalingPolicy.maxNumReplicas = description.capacity.max - } - } - - if (description.source?.useSourceCapacity && description.source?.region && description.source?.serverGroupName) { - task.updateStatus BASE_PHASE, "Looking up server group $description.source.serverGroupName in $description.source.region " + - "in order to copy the current capacity..." - - // Locate the ancestor server group. - def ancestorServerGroup = GCEUtil.queryServerGroup(googleClusterProvider, - description.accountName, - description.source.region, - description.source.serverGroupName) - - description.targetSize = ancestorServerGroup.capacity.desired - description.autoscalingPolicy = GCEUtil.buildAutoscalingPolicyDescriptionFromAutoscalingPolicy(ancestorServerGroup.autoscalingPolicy) - } - - // Note: Cache queries for these health checks must occur in this order since queryHealthCheck() will make a live - // call that fails on a missing health check. - def autoHealingHealthCheck = null - if (description.autoHealingPolicy?.healthCheck) { - autoHealingHealthCheck = GCEUtil.queryNestedHealthCheck(project, description.accountName, description.autoHealingPolicy.healthCheck, compute, cacheView, task, BASE_PHASE, this) ?: - GCEUtil.queryHealthCheck(project, description.accountName, description.autoHealingPolicy.healthCheck, compute, cacheView, task, BASE_PHASE, this) - } - - List autoHealingPolicy = - autoHealingHealthCheck - ? [new InstanceGroupManagerAutoHealingPolicy( - healthCheck: autoHealingHealthCheck.selfLink, - initialDelaySec: description.autoHealingPolicy.initialDelaySec)] - : null - - if (autoHealingPolicy && description.autoHealingPolicy.maxUnavailable) { - def maxUnavailable = new FixedOrPercent(fixed: description.autoHealingPolicy.maxUnavailable.fixed as Integer, - percent: description.autoHealingPolicy.maxUnavailable.percent as Integer) - - autoHealingPolicy[0].setMaxUnavailable(maxUnavailable) - } - - def migCreateOperation - def instanceGroupManager = new InstanceGroupManager() - .setName(serverGroupName) - .setBaseInstanceName(serverGroupName) - .setInstanceTemplate(instanceTemplateUrl) - .setTargetSize(description.targetSize) - .setTargetPools(targetPools) - .setAutoHealingPolicies(autoHealingPolicy) - - if (hasBackendServices && (description?.loadBalancingPolicy || description?.source?.serverGroupName)) { - List namedPorts = [] - def sourceGroupName = description?.source?.serverGroupName - - // Note: this favors the explicitly specified load balancing policy over the source server group. - if (sourceGroupName && !description?.loadBalancingPolicy) { - def sourceServerGroup = googleClusterProvider.getServerGroup(description.accountName, description.source.region, sourceGroupName) - if (!sourceServerGroup) { - log.warn("Could not locate source server group ${sourceGroupName} to update named port.") - } - namedPorts = sourceServerGroup?.namedPorts?.collect { name, port -> new NamedPort(name: name, port: port) } - } else { - def loadBalancingPolicy = description?.loadBalancingPolicy - if (loadBalancingPolicy?.namedPorts != null) { - namedPorts = description?.loadBalancingPolicy?.namedPorts - } else if (loadBalancingPolicy?.listeningPort) { - log.warn("Deriving named ports from deprecated 'listeningPort' attribute. Please update your deploy description to use 'namedPorts'.") - namedPorts = [new NamedPort(name: GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME, port: loadBalancingPolicy?.listeningPort)] - } - } - - if (!namedPorts) { - log.warn("Could not locate named port on either load balancing policy or source server group. Setting default named port.") - namedPorts = [new NamedPort(name: GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME, port: GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT)] - } - instanceGroupManager.setNamedPorts(namedPorts) - } - - def willUpdateBackendServices = !description.disableTraffic && hasBackendServices - def willCreateAutoscaler = autoscalerIsSpecified(description) - def willUpdateIlbs = !description.disableTraffic && internalLoadBalancers - - if (isRegional) { - if (description.selectZones && description.distributionPolicy && description.distributionPolicy.zones) { - log.info("Configuring explicit zones selected for regional server group: ${description.distributionPolicy.zones}") - List selectedZones = description.distributionPolicy.zones.collect { String z -> - new DistributionPolicyZoneConfiguration().setZone(GCEUtil.buildZoneUrl(project, z)) - } - DistributionPolicy distributionPolicy = new DistributionPolicy().setZones(selectedZones) - instanceGroupManager.setDistributionPolicy(distributionPolicy) - } - migCreateOperation = timeExecute( - compute.regionInstanceGroupManagers().insert(project, region, instanceGroupManager), - "compute.regionInstanceGroupManagers.insert", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) - - if (willUpdateBackendServices || willCreateAutoscaler || willUpdateIlbs) { - // Before updating the Backend Services or creating the Autoscaler we must wait until the managed instance group is created. - googleOperationPoller.waitForRegionalOperation(compute, project, region, migCreateOperation.getName(), - null, task, "managed instance group $serverGroupName", BASE_PHASE) - - if (willCreateAutoscaler) { - task.updateStatus BASE_PHASE, "Creating regional autoscaler for $serverGroupName..." - - Autoscaler autoscaler = GCEUtil.buildAutoscaler(serverGroupName, - migCreateOperation.targetLink, - description.autoscalingPolicy) - - timeExecute( - compute.regionAutoscalers().insert(project, region, autoscaler), - "compute.regionAutoscalers.insert", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) - } - } - } else { - migCreateOperation = timeExecute( - compute.instanceGroupManagers().insert(project, zone, instanceGroupManager), - "compute.instanceGroupManagers.insert", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) - - if (willUpdateBackendServices || willCreateAutoscaler || willUpdateIlbs) { - // Before updating the Backend Services or creating the Autoscaler we must wait until the managed instance group is created. - googleOperationPoller.waitForZonalOperation(compute, project, zone, migCreateOperation.getName(), - null, task, "managed instance group $serverGroupName", BASE_PHASE) - - if (willCreateAutoscaler) { - task.updateStatus BASE_PHASE, "Creating zonal autoscaler for $serverGroupName..." - - Autoscaler autoscaler = GCEUtil.buildAutoscaler(serverGroupName, - migCreateOperation.targetLink, - description.autoscalingPolicy) - - timeExecute(compute.autoscalers().insert(project, zone, autoscaler), - "compute.autoscalers.insert", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) - } - } - } - - task.updateStatus BASE_PHASE, "Done creating server group $serverGroupName in $location." - - // Actually update the backend services. - if (willUpdateBackendServices) { - backendServicesToUpdate.each { BackendService backendService -> - safeRetry.doRetry( - updateBackendServices(compute, project, backendService.name, backendService), - "Load balancer backend service", - task, - [400, 412], - [], - [action: "update", phase: BASE_PHASE, operation: "updateBackendServices", (TAG_SCOPE): SCOPE_GLOBAL], - registry - ) - task.updateStatus BASE_PHASE, "Done associating server group $serverGroupName with backend service ${backendService.name}." - } - } - - if (willUpdateIlbs) { - regionBackendServicesToUpdate.each { BackendService backendService -> - safeRetry.doRetry( - updateRegionBackendServices(compute, project, region, backendService.name, backendService), - "Internal load balancer backend service", - task, - [400, 412], - [], - [action: "update", phase: BASE_PHASE, operation: "updateRegionBackendServices", (TAG_SCOPE): SCOPE_REGIONAL, (TAG_REGION): region], - registry - ) - task.updateStatus BASE_PHASE, "Done associating server group $serverGroupName with backend service ${backendService.name}." - } - } - - DeploymentResult deploymentResult = new DeploymentResult() - deploymentResult.serverGroupNames = ["$region:$serverGroupName".toString()] - deploymentResult.serverGroupNameByRegion[region] = serverGroupName - deploymentResult - } - - private boolean autoscalerIsSpecified(BasicGoogleDeployDescription description) { - return description.autoscalingPolicy?.with { - cpuUtilization || loadBalancingUtilization || customMetricUtilizations - } - } - - private Closure updateRegionBackendServices(Compute compute, String project, String region, String backendServiceName, BackendService backendService) { - return { - BackendService serviceToUpdate = timeExecute( - compute.regionBackendServices().get(project, region, backendServiceName), - "compute.regionBackendServices.get", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) - if (serviceToUpdate.backends == null) { - serviceToUpdate.backends = new ArrayList() - } - backendService?.backends?.each { serviceToUpdate.backends << it } - serviceToUpdate.getBackends().unique { backend -> backend.group } - timeExecute( - compute.regionBackendServices().update(project, region, backendServiceName, serviceToUpdate), - "compute.regionBackendServices.update", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) - null - } - } - - private Closure updateBackendServices(Compute compute, String project, String backendServiceName, BackendService backendService) { - return { - BackendService serviceToUpdate = timeExecute( - compute.backendServices().get(project, backendServiceName), - "compute.backendServices.get", - TAG_SCOPE, SCOPE_GLOBAL) - if (serviceToUpdate.backends == null) { - serviceToUpdate.backends = new ArrayList() - } - backendService?.backends?.each { serviceToUpdate.backends << it } - serviceToUpdate.getBackends().unique { backend -> backend.group } - timeExecute( - compute.backendServices().update(project, backendServiceName, serviceToUpdate), - "compute.backendServices.update", - TAG_SCOPE, SCOPE_GLOBAL) - null - } - } - - Map getUserData(BasicGoogleDeployDescription description, String serverGroupName, - String instanceTemplateName, GoogleNamedAccountCredentials credentials) { - String customUserData = '' - if (description.userData) { - customUserData = description.userData - } - Map userData = googleUserDataProvider.getUserData(serverGroupName, instanceTemplateName, - description, credentials, customUserData) - task.updateStatus BASE_PHASE, "Resolved user data." - return userData - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbandonAndDecrementGoogleServerGroupAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbandonAndDecrementGoogleServerGroupAtomicOperation.groovy index f1be1296c07..32b7b2b6e19 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbandonAndDecrementGoogleServerGroupAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbandonAndDecrementGoogleServerGroupAtomicOperation.groovy @@ -16,14 +16,13 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops -import com.google.api.services.compute.model.InstanceGroupManagersAbandonInstancesRequest -import com.google.api.services.compute.model.RegionInstanceGroupManagersAbandonInstancesRequest + import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory import com.netflix.spinnaker.clouddriver.google.deploy.description.AbandonAndDecrementGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import org.springframework.beans.factory.annotation.Autowired /** @@ -46,6 +45,9 @@ class AbandonAndDecrementGoogleServerGroupAtomicOperation extends GoogleAtomicOp @Autowired GoogleClusterProvider googleClusterProvider + @Autowired + GoogleComputeApiFactory computeApiFactory + AbandonAndDecrementGoogleServerGroupAtomicOperation(AbandonAndDecrementGoogleServerGroupDescription description) { this.description = description } @@ -62,34 +64,14 @@ class AbandonAndDecrementGoogleServerGroupAtomicOperation extends GoogleAtomicOp def accountName = description.accountName def credentials = description.credentials - def compute = credentials.compute - def project = credentials.project def region = description.region def serverGroupName = description.serverGroupName def serverGroup = GCEUtil.queryServerGroup(googleClusterProvider, accountName, region, serverGroupName) - def isRegional = serverGroup.regional - // Will return null if this is a regional server group. - def zone = serverGroup.zone def instanceIds = description.instanceIds def instanceUrls = GCEUtil.collectInstanceUrls(serverGroup, instanceIds) - if (isRegional) { - def instanceGroupManagers = compute.regionInstanceGroupManagers() - def abandonRequest = new RegionInstanceGroupManagersAbandonInstancesRequest().setInstances(instanceUrls) - - timeExecute( - instanceGroupManagers.abandonInstances(project, region, serverGroupName, abandonRequest), - "compute.regionInstanceGroupManagers.abandonInstances", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) - } else { - def instanceGroupManagers = compute.instanceGroupManagers() - def abandonRequest = new InstanceGroupManagersAbandonInstancesRequest().setInstances(instanceUrls) - - timeExecute( - instanceGroupManagers.abandonInstances(project, zone, serverGroupName, abandonRequest), - "compute.instanceGroupManagers.abandonInstances", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) - } + def serverGroupManagers = computeApiFactory.createServerGroupManagers(credentials, serverGroup) + serverGroupManagers.abandonInstances(instanceUrls).execute() task.updateStatus BASE_PHASE, "Done abandoning and decrementing instances " + "(${description.instanceIds.join(", ")}) from server group $serverGroupName in $region." diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbstractEnableDisableAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbstractEnableDisableAtomicOperation.groovy index 548e5f644f8..fcc5014f458 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbstractEnableDisableAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbstractEnableDisableAtomicOperation.groovy @@ -22,14 +22,14 @@ import com.netflix.spinnaker.clouddriver.consul.deploy.ops.EnableDisableConsulIn import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry import com.netflix.spinnaker.clouddriver.google.deploy.description.EnableDisableGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy -import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancerProvider +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import org.springframework.beans.factory.annotation.Autowired -import retrofit.RetrofitError abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperation { private static final List RETRY_ERROR_CODES = [400, 403, 412] @@ -44,6 +44,9 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio @Autowired GoogleClusterProvider googleClusterProvider + @Autowired + GoogleOperationPoller googleOperationPoller + @Autowired ObjectMapper objectMapper @@ -104,7 +107,7 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio disable ? EnableDisableConsulInstance.State.disable : EnableDisableConsulInstance.State.enable) - } catch (RetrofitError e) { + } catch (SpinnakerServerException ignored) { // Consul isn't running } } @@ -126,6 +129,18 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio registry ) + task.updateStatus phaseName, "Deregistering server group from Internal Http(s) load balancers..." + + safeRetry.doRetry( + destroyInternalHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName), + "Internal Http load balancer backends", + task, + RETRY_ERROR_CODES, + SUCCESSFUL_ERROR_CODES, + [operation: "destroyInternalHttpLoadBalancerBackends", action: "destroy", phase: phaseName, (TAG_SCOPE): SCOPE_REGIONAL, (TAG_REGION): region], + registry + ) + task.updateStatus phaseName, "Deregistering server group from internal load balancers..." safeRetry.doRetry( @@ -173,7 +188,7 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio getTargetPool(compute, project, region, targetPoolLocalName), "target pool", task, - RETRY_ERROR_CODES, + AbstractEnableDisableAtomicOperation.RETRY_ERROR_CODES, [], [operation: "getTargetPool", action: "destroy", phase: phaseName, (TAG_SCOPE): SCOPE_REGIONAL, (TAG_REGION): region], registry @@ -191,7 +206,7 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio removeInstancesFromTargetPool(compute, project, region, targetPoolLocalName, targetPoolsRemoveInstanceRequest), "instances", task, - RETRY_ERROR_CODES, + AbstractEnableDisableAtomicOperation.RETRY_ERROR_CODES, [], [operation: "removeInstancesFromTargetPool", action: "deregister", phase: phaseName, (TAG_SCOPE): SCOPE_REGIONAL, (TAG_REGION): region], registry @@ -215,7 +230,7 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio task.updateStatus phaseName, "Re-enabling autoscaling for server group enable..." Map metadataMap = GCEUtil.buildMapFromMetadata(instanceTemplate?.properties?.metadata) - String autoscalerJson = metadataMap?.(GoogleServerGroup.View.AUTOSCALING_POLICY) + String autoscalerJson = metadataMap?.(GCEUtil.AUTOSCALING_POLICY) if (autoscalerJson) { def autoscaler = objectMapper.readValue(autoscalerJson, Map) def enabledMode = GoogleAutoscalingPolicy.AutoscalingMode.valueOf(autoscaler?.autoscalingPolicy?.mode ?: "ON") @@ -236,6 +251,18 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio registry ) + task.updateStatus phaseName, "Registering server group with Internal Http(s) load balancers..." + + safeRetry.doRetry( + addInternalHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName), + "Internal Http load balancer backends", + task, + RETRY_ERROR_CODES, + [], + [operation: "addInternalHttpLoadBalancerBackends", action: "add", phase: phaseName, (TAG_SCOPE): SCOPE_REGIONAL, (TAG_REGION): region], + registry + ) + task.updateStatus phaseName, "Registering server group with Internal load balancers..." safeRetry.doRetry( @@ -324,7 +351,7 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio addInstancesToTargetPool(compute, project, region, targetPoolLocalName, targetPoolsAddInstanceRequest), "instances", task, - RETRY_ERROR_CODES, + AbstractEnableDisableAtomicOperation.RETRY_ERROR_CODES, [], [operation: "addInstancesToTargetPool", action: "register", phase: phaseName, (TAG_SCOPE): SCOPE_REGIONAL, (TAG_REGION): region], registry @@ -375,56 +402,71 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio Closure destroyHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { - GCEUtil.destroyHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, this) + GCEUtil.destroyHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) + null + } + } + + + Closure destroyInternalHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { + return { + GCEUtil.destroyInternalHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) null } } Closure destroyInternalLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { - GCEUtil.destroyInternalLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, this) + GCEUtil.destroyInternalLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) null } } Closure destroySslLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { - GCEUtil.destroySslLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, this) + GCEUtil.destroySslLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) null } } Closure destroyTcpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { - GCEUtil.destroyTcpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, this) + GCEUtil.destroyTcpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) null } } Closure addHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { - GCEUtil.addHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, this) + GCEUtil.addHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) + null + } + } + + Closure addInternalHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { + return { + GCEUtil.addInternalHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) null } } Closure addSslLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { - GCEUtil.addSslLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, this) + GCEUtil.addSslLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) null } } Closure addTcpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { - GCEUtil.addTcpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, this) + GCEUtil.addTcpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) null } } Closure addInternalLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { - GCEUtil.addInternalLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, this) + GCEUtil.addInternalLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) null } } @@ -488,23 +530,19 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio String region = serverGroup.region String zone = serverGroup.zone if (serverGroup.autoscalingPolicy) { - def policyDescription = - GCEUtil.buildAutoscalingPolicyDescriptionFromAutoscalingPolicy(serverGroup.autoscalingPolicy) - if (policyDescription) { - def autoscaler = GCEUtil.buildAutoscaler(serverGroupName, serverGroup.selfLink, policyDescription) - autoscaler.getAutoscalingPolicy().setMode(mode.toString()) - - if (serverGroup.regional) { - timeExecute( - compute.regionAutoscalers().update(project, region, autoscaler), - "compute.regionAutoscalers.update", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) - } else { - timeExecute( - compute.autoscalers().update(project, zone, autoscaler), - "compute.autoscalers.update", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) - } + def autoscaler = GCEUtil.buildAutoscaler(serverGroupName, serverGroup.selfLink, serverGroup.autoscalingPolicy) + autoscaler.getAutoscalingPolicy().setMode(mode.toString()) + + if (serverGroup.regional) { + timeExecute( + compute.regionAutoscalers().update(project, region, autoscaler), + "compute.regionAutoscalers.update", + TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) + } else { + timeExecute( + compute.autoscalers().update(project, zone, autoscaler), + "compute.autoscalers.update", + TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CopyLastGoogleServerGroupAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CopyLastGoogleServerGroupAtomicOperation.groovy index eb4da49ddf8..f7bb13d097b 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CopyLastGoogleServerGroupAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CopyLastGoogleServerGroupAtomicOperation.groovy @@ -75,7 +75,7 @@ class CopyLastGoogleServerGroupAtomicOperation extends GoogleAtomicOperation{ @@ -39,21 +40,24 @@ class DeleteGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation private static final String BASE_PHASE = "DELETE_SCALING_POLICY" private final DeleteGoogleAutoscalingPolicyDescription description - @Autowired private GoogleClusterProvider googleClusterProvider - @Autowired + private GoogleOperationPoller googleOperationPoller + AtomicOperationsRegistry atomicOperationsRegistry - @Autowired OrchestrationProcessor orchestrationProcessor private static Task getTask() { TaskRepository.threadLocalTask.get() } - DeleteGoogleAutoscalingPolicyAtomicOperation(DeleteGoogleAutoscalingPolicyDescription description) { + DeleteGoogleAutoscalingPolicyAtomicOperation(DeleteGoogleAutoscalingPolicyDescription description, @Autowired GoogleClusterProvider googleClusterProvider, @Autowired GoogleOperationPoller googleOperationPoller, @Autowired AtomicOperationsRegistry atomicOperationsRegistry, @Autowired OrchestrationProcessor orchestrationProcessor) { this.description = description + this.googleClusterProvider = googleClusterProvider + this.googleOperationPoller = googleOperationPoller + this.atomicOperationsRegistry = atomicOperationsRegistry + this.orchestrationProcessor = orchestrationProcessor } /** @@ -82,34 +86,44 @@ class DeleteGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation task.updateStatus BASE_PHASE, "Initializing deletion of autoHealing policy for $description.serverGroupName..." if (isRegional) { def request = new RegionInstanceGroupManagersSetAutoHealingRequest().setAutoHealingPolicies([]) - timeExecute( + def deleteOp = timeExecute( compute.regionInstanceGroupManagers().setAutoHealingPolicies(project, region, serverGroupName, request), "compute.regionInstanceGroupManagers.setAutoHealingPolicies", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_REGIONAL, GoogleExecutor.TAG_REGION, region) + googleOperationPoller.waitForRegionalOperation(compute, project, region, + deleteOp.getName(), null, task, "autoHealing policy for $serverGroupName", BASE_PHASE) + deletePolicyMetadata(compute, credentials, project, GCEUtil.buildRegionalServerGroupUrl(project, region, serverGroupName)) } else { def request = new InstanceGroupManagersSetAutoHealingRequest().setAutoHealingPolicies([]) - timeExecute( + def deleteOp = timeExecute( compute.instanceGroupManagers().setAutoHealingPolicies(project, zone, serverGroupName, request), "compute.instanceGroupManagers.setAutoHealingPolicies", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_ZONAL, GoogleExecutor.TAG_ZONE, zone) + googleOperationPoller.waitForZonalOperation(compute, project, zone, + deleteOp.getName(), null, task, "autoHealing policy for $serverGroupName", BASE_PHASE) + deletePolicyMetadata(compute, credentials, project, GCEUtil.buildZonalServerGroupUrl(project, zone, serverGroupName)) } task.updateStatus BASE_PHASE, "Done deleting autoHealing policy for $serverGroupName." - deletePolicyMetadata(compute, credentials, project, GCEUtil.buildRegionalServerGroupUrl(project, region, serverGroupName)) } else { task.updateStatus BASE_PHASE, "Initializing deletion of scaling policy for $description.serverGroupName..." if (isRegional) { - timeExecute( + def deleteOp = timeExecute( compute.regionAutoscalers().delete(project, region, serverGroupName), "compute.regionAutoscalers.delete", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_REGIONAL, GoogleExecutor.TAG_REGION, region) + googleOperationPoller.waitForRegionalOperation(compute, project, region, + deleteOp.getName(), null, task, "autoScaling policy for $serverGroupName", BASE_PHASE) + deletePolicyMetadata(compute, credentials, project, GCEUtil.buildRegionalServerGroupUrl(project, region, serverGroupName)) } else { - timeExecute( + def deleteOp = timeExecute( compute.autoscalers().delete(project, zone, serverGroupName), "compute.autoscalers.delete", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_ZONAL, GoogleExecutor.TAG_ZONE, zone) + googleOperationPoller.waitForZonalOperation(compute, project, zone, + deleteOp.getName(), null, task, "autoScaling policy for $serverGroupName", BASE_PHASE) + deletePolicyMetadata(compute, credentials, project, GCEUtil.buildZonalServerGroupUrl(project, zone, serverGroupName)) } task.updateStatus BASE_PHASE, "Done deleting scaling policy for $serverGroupName." - deletePolicyMetadata(compute, credentials, project, GCEUtil.buildZonalServerGroupUrl(project, zone, serverGroupName)) } return null @@ -128,7 +142,7 @@ class DeleteGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation templateUrl = timeExecute( compute.regionInstanceGroupManagers().get(project, groupRegion, groupName), "compute.regionInstanceGroupManagers.get", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, groupRegion) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_REGIONAL, GoogleExecutor.TAG_REGION, groupRegion) .getInstanceTemplate() break case GoogleServerGroup.ServerGroupType.ZONAL: @@ -136,7 +150,7 @@ class DeleteGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation templateUrl = timeExecute( compute.instanceGroupManagers().get(project, groupZone, groupName), "compute.instanceGroupManagers.get", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, groupZone) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_ZONAL, GoogleExecutor.TAG_ZONE, groupZone) .getInstanceTemplate() break default: @@ -147,8 +161,8 @@ class DeleteGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation InstanceTemplate template = timeExecute( compute.instanceTemplates().get(project, Utils.getLocalName(templateUrl)), "compute.instancesTemplates.get", - TAG_SCOPE, SCOPE_GLOBAL) - def instanceDescription = GCEUtil.buildInstanceDescriptionFromTemplate(template) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_GLOBAL) + def instanceDescription = GCEUtil.buildInstanceDescriptionFromTemplate(project, template) def templateOpMap = [ image : instanceDescription.image, @@ -173,10 +187,10 @@ class DeleteGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation } if (templateOpMap?.instanceMetadata) { - templateOpMap.instanceMetadata.remove(GoogleServerGroup.View.AUTOSCALING_POLICY) - def converter = atomicOperationsRegistry.getAtomicOperationConverter('modifyGoogleServerGroupInstanceTemplateDescription', 'gce', ProviderVersion.v1) + templateOpMap.instanceMetadata.remove(GCEUtil.AUTOSCALING_POLICY) + def converter = atomicOperationsRegistry.getAtomicOperationConverter('modifyGoogleServerGroupInstanceTemplateDescription', 'gce') AtomicOperation templateOp = converter.convertOperation(templateOpMap) - orchestrationProcessor.process([templateOp], UUID.randomUUID().toString()) + orchestrationProcessor.process('gce', [templateOp], UUID.randomUUID().toString()) } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DestroyGoogleServerGroupAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DestroyGoogleServerGroupAtomicOperation.groovy index 5ad480e5d40..c82838fc05f 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DestroyGoogleServerGroupAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DestroyGoogleServerGroupAtomicOperation.groovy @@ -23,9 +23,11 @@ import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry import com.netflix.spinnaker.clouddriver.google.deploy.description.DestroyGoogleServerGroupDescription +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancerProvider +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired @@ -53,6 +55,9 @@ class DestroyGoogleServerGroupAtomicOperation extends GoogleAtomicOperation @@ -143,7 +143,7 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperation extends GoogleAtomi def newDescriptionProperties = [:] + originalDescription.properties + overriddenProperties // Remove the properties we don't want to compare or override. - newDescriptionProperties.keySet().removeAll(["class", "accountName", "credentials", "account"]) + newDescriptionProperties.keySet().removeAll(["class", "accountName", "credentials", "account", "name"]) // Resolve the auth scopes since the scopes returned on the existing instance template will be fully-resolved. newDescriptionProperties.authScopes = GCEUtil.resolveAuthScopes(newDescriptionProperties.authScopes) @@ -167,6 +167,19 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperation extends GoogleAtomi clonedDescription.disks = overriddenProperties.disks + clonedDescription.baseDeviceName = description.serverGroupName + + def bootImage = GCEUtil.getBootImage(description, + task, + BASE_PHASE, + clouddriverUserAgentApplicationName, + googleConfigurationProperties.baseImageProjects, + safeRetry, + this) + + // We include a subset of the image's attributes and a reference in the disks. + // Furthermore, we're using the underlying raw compute model classes + // so we can't simply change the representation to support what we need for shielded VMs. def attachedDisks = GCEUtil.buildAttachedDisks(clonedDescription, null, false, @@ -175,6 +188,7 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperation extends GoogleAtomi BASE_PHASE, clouddriverUserAgentApplicationName, googleConfigurationProperties.baseImageProjects, + bootImage, safeRetry, this) @@ -204,6 +218,10 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperation extends GoogleAtomi instanceTemplateProperties.setCanIpForward(canIpForward) } + if (overriddenProperties.shieldedVmConfig) { + instanceTemplateProperties.setShieldedVmConfig(description.shieldedVmConfig) + } + // Override the instance template's metadata if instanceMetadata was specified. if (overriddenProperties.instanceMetadata) { def metadata = GCEUtil.buildMetadataFromMap(description.instanceMetadata) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleAutoscalingPolicyAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleAutoscalingPolicyAtomicOperation.groovy index ffa4d0b8c01..5deb6c5a7dd 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleAutoscalingPolicyAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleAutoscalingPolicyAtomicOperation.groovy @@ -23,7 +23,9 @@ import com.google.common.annotations.VisibleForTesting import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.google.GoogleExecutor import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoHealingPolicy import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy @@ -34,7 +36,6 @@ import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCrede import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor -import com.netflix.spinnaker.clouddriver.security.ProviderVersion import org.springframework.beans.factory.annotation.Autowired class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation { @@ -44,26 +45,29 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation TaskRepository.threadLocalTask.get() } - @Autowired private GoogleClusterProvider googleClusterProvider - @Autowired + private GoogleOperationPoller googleOperationPoller + AtomicOperationsRegistry atomicOperationsRegistry - @Autowired OrchestrationProcessor orchestrationProcessor - @Autowired Cache cacheView - @Autowired ObjectMapper objectMapper private final UpsertGoogleAutoscalingPolicyDescription description - UpsertGoogleAutoscalingPolicyAtomicOperation(UpsertGoogleAutoscalingPolicyDescription description) { + UpsertGoogleAutoscalingPolicyAtomicOperation(UpsertGoogleAutoscalingPolicyDescription description, @Autowired GoogleClusterProvider googleClusterProvider, @Autowired GoogleOperationPoller googleOperationPoller, @Autowired AtomicOperationsRegistry atomicOperationsRegistry, @Autowired OrchestrationProcessor orchestrationProcessor, @Autowired Cache cacheView, @Autowired ObjectMapper objectMapper) { this.description = description - } + this.googleClusterProvider = googleClusterProvider + this.googleOperationPoller = googleOperationPoller + this.atomicOperationsRegistry = atomicOperationsRegistry + this.orchestrationProcessor = orchestrationProcessor + this.cacheView = cacheView + this.objectMapper = objectMapper + } /** * Autoscaling policy: @@ -95,44 +99,51 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation def autoscaler = null if (description.autoscalingPolicy) { - def ancestorAutoscalingPolicyDescription = - GCEUtil.buildAutoscalingPolicyDescriptionFromAutoscalingPolicy(serverGroup.autoscalingPolicy) + def ancestorAutoscalingPolicyDescription = serverGroup.autoscalingPolicy if (ancestorAutoscalingPolicyDescription) { task.updateStatus BASE_PHASE, "Updating autoscaler for $serverGroupName..." autoscaler = GCEUtil.buildAutoscaler(serverGroupName, - serverGroup.selfLink, - copyAndOverrideAncestorAutoscalingPolicy(ancestorAutoscalingPolicyDescription, - description.autoscalingPolicy)) + serverGroup.selfLink, + copyAndOverrideAncestorAutoscalingPolicy(ancestorAutoscalingPolicyDescription, + description.autoscalingPolicy)) if (isRegional) { - timeExecute( - compute.regionAutoscalers().update(project, region, autoscaler), - "compute.regionAutoscalers.update", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) + def updateOp = timeExecute( + compute.regionAutoscalers().update(project, region, autoscaler), + "compute.regionAutoscalers.update", + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_REGIONAL, GoogleExecutor.TAG_REGION, region) + googleOperationPoller.waitForRegionalOperation(compute, project, region, + updateOp.getName(), null, task, "autoScaler ${autoscaler.getName()} for server group $serverGroupName", BASE_PHASE) } else { - timeExecute( - compute.autoscalers().update(project, zone, autoscaler), - "compute.autoscalers.update", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) + def updateOp = timeExecute( + compute.autoscalers().update(project, zone, autoscaler), + "compute.autoscalers.update", + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_ZONAL, GoogleExecutor.TAG_ZONE, zone) + googleOperationPoller.waitForZonalOperation(compute, project, zone, + updateOp.getName(), null, task, "autoScaler ${autoscaler.getName()} for server group $serverGroupName", BASE_PHASE) } } else { task.updateStatus BASE_PHASE, "Creating new autoscaler for $serverGroupName..." autoscaler = GCEUtil.buildAutoscaler(serverGroupName, - serverGroup.selfLink, - normalizeNewAutoscalingPolicy(description.autoscalingPolicy)) + serverGroup.selfLink, + normalizeNewAutoscalingPolicy(description.autoscalingPolicy)) if (isRegional) { - timeExecute( - compute.regionAutoscalers().insert(project, region, autoscaler), - "compute.regionAutoscalers.insert", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) + def insertOp = timeExecute( + compute.regionAutoscalers().insert(project, region, autoscaler), + "compute.regionAutoscalers.insert", + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_REGIONAL, GoogleExecutor.TAG_REGION, region) + googleOperationPoller.waitForRegionalOperation(compute, project, region, + insertOp.getName(), null, task, "autoScaler ${autoscaler.getName()} for server group $serverGroupName", BASE_PHASE) } else { - timeExecute( - compute.autoscalers().insert(project, zone, autoscaler), - "compute.autoscalers.insert", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) + def insertOp = timeExecute( + compute.autoscalers().insert(project, zone, autoscaler), + "compute.autoscalers.insert", + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_ZONAL, GoogleExecutor.TAG_ZONE, zone) + googleOperationPoller.waitForZonalOperation(compute, project, zone, + insertOp.getName(), null, task, "autoScaler ${autoscaler.getName()} for server group $serverGroupName", BASE_PHASE) } } } @@ -143,18 +154,22 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation def regionalRequest = { List policy -> def request = new RegionInstanceGroupManagersSetAutoHealingRequest().setAutoHealingPolicies(policy) - timeExecute( + def autoHealingOp = timeExecute( compute.regionInstanceGroupManagers().setAutoHealingPolicies(project, region, serverGroupName, request), "compute.regionInstanceGroupManagers.setAutoHealingPolicies", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_REGIONAL, GoogleExecutor.TAG_REGION, region) + googleOperationPoller.waitForRegionalOperation(compute, project, region, + autoHealingOp.getName(), null, task, "autoHealing policy ${policy} for server group $serverGroupName", BASE_PHASE) } def zonalRequest = { List policy -> def request = new InstanceGroupManagersSetAutoHealingRequest().setAutoHealingPolicies(policy) - timeExecute( + def autoHealingOp = timeExecute( compute.instanceGroupManagers().setAutoHealingPolicies(project, zone, serverGroupName, request), "compute.instanceGroupManagers.setAutoHealingPolicies", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_ZONAL, GoogleExecutor.TAG_ZONE, zone) + googleOperationPoller.waitForZonalOperation(compute, project, zone, + autoHealingOp.getName(), null, task, "autoHealing policy ${policy} for server group $serverGroupName", BASE_PHASE) } if (ancestorAutoHealingPolicyDescription) { @@ -180,18 +195,20 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation // TODO(jacobkiefer): Update metadata for autoHealingPolicy when 'mode' support lands. // NOTE: This block is here intentionally, we should wait until all the modifications are done before // updating the instance template metadata. - if (isRegional) { - updatePolicyMetadata(compute, - credentials, - project, - GCEUtil.buildRegionalServerGroupUrl(project, region, serverGroupName), - autoscaler) - } else { - updatePolicyMetadata(compute, - credentials, - project, - GCEUtil.buildZonalServerGroupUrl(project, zone, serverGroupName), - autoscaler) + if (description.writeMetadata == null || description.writeMetadata) { + if (isRegional) { + updatePolicyMetadata(compute, + credentials, + project, + GCEUtil.buildRegionalServerGroupUrl(project, region, serverGroupName), + autoscaler) + } else { + updatePolicyMetadata(compute, + credentials, + project, + GCEUtil.buildZonalServerGroupUrl(project, zone, serverGroupName), + autoscaler) + } } return null @@ -206,12 +223,23 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation } // Deletes existing customMetricUtilizations if passed an empty array. - ["minNumReplicas", "maxNumReplicas", "coolDownPeriodSec", "customMetricUtilizations", "mode"].each { + ["minNumReplicas", "maxNumReplicas", "coolDownPeriodSec", "customMetricUtilizations", "mode", "scalingSchedules"].each { if (update[it] != null) { newDescription[it] = update[it] } } + // If scaleInControl is completely absent, we leave the previous value. + // To remove it, set it to an empty object. + if (update.scaleInControl != null) { + def scaleInControl = update.scaleInControl + if (scaleInControl.timeWindowSec != null && scaleInControl.maxScaledInReplicas != null) { + newDescription.scaleInControl = scaleInControl + } else { + newDescription.scaleInControl = null + } + } + // Deletes existing cpuUtilization or loadBalancingUtilization if passed an empty object. ["cpuUtilization", "loadBalancingUtilization"].each { if (update[it] != null) { @@ -247,7 +275,7 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation return newDescription } - ["healthCheck", "initialDelaySec"].each { + ["healthCheck", "initialDelaySec", "healthCheckKind"].each { if (update[it] != null) { newDescription[it] = update[it] } @@ -276,20 +304,17 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation } private buildAutoHealingPolicyFromAutoHealingPolicyDescription(GoogleAutoHealingPolicy autoHealingPolicyDescription, String project, Compute compute) { - // Note: Cache queries for these health checks must occur in this order since queryHealthCheck() will make a live - // call that fails on a missing health check. - def autoHealingHealthCheck = GCEUtil.queryNestedHealthCheck(project, description.accountName, autoHealingPolicyDescription.healthCheck, compute, cacheView, task, BASE_PHASE, this) ?: - GCEUtil.queryHealthCheck(project, description.accountName, autoHealingPolicyDescription.healthCheck, compute, cacheView, task, BASE_PHASE, this) + def autoHealingHealthCheck = GCEUtil.queryHealthCheck(project, description.accountName, autoHealingPolicyDescription.healthCheck, autoHealingPolicyDescription.healthCheckKind, compute, cacheView, task, BASE_PHASE, this) List autoHealingPolicy = autoHealingPolicyDescription?.healthCheck ? [new InstanceGroupManagerAutoHealingPolicy( - healthCheck: autoHealingHealthCheck.selfLink, - initialDelaySec: autoHealingPolicyDescription.initialDelaySec)] + healthCheck: autoHealingHealthCheck.selfLink, + initialDelaySec: autoHealingPolicyDescription.initialDelaySec)] : null if (autoHealingPolicy && autoHealingPolicyDescription.maxUnavailable) { def maxUnavailable = new FixedOrPercent(fixed: autoHealingPolicyDescription.maxUnavailable.fixed as Integer, - percent: autoHealingPolicyDescription.maxUnavailable.percent as Integer) + percent: autoHealingPolicyDescription.maxUnavailable.percent as Integer) autoHealingPolicy[0].setMaxUnavailable(maxUnavailable) } @@ -311,7 +336,7 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation templateUrl = timeExecute( compute.regionInstanceGroupManagers().get(project, groupRegion, groupName), "compute.regionInstanceGroupManagers.get", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, groupRegion) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_REGIONAL, GoogleExecutor.TAG_REGION, groupRegion) .getInstanceTemplate() break case GoogleServerGroup.ServerGroupType.ZONAL: @@ -319,7 +344,7 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation templateUrl = timeExecute( compute.instanceGroupManagers().get(project, groupZone, groupName), "compute.instanceGroupManagers.get", - TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, groupZone) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_ZONAL, GoogleExecutor.TAG_ZONE, groupZone) .getInstanceTemplate() break default: @@ -330,8 +355,8 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation InstanceTemplate template = timeExecute( compute.instanceTemplates().get(project, Utils.getLocalName(templateUrl)), "compute.instancesTemplates.get", - TAG_SCOPE, SCOPE_GLOBAL) - def instanceDescription = GCEUtil.buildInstanceDescriptionFromTemplate(template) + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_GLOBAL) + def instanceDescription = GCEUtil.buildInstanceDescriptionFromTemplate(project, template) def templateOpMap = [ image : instanceDescription.image, @@ -357,17 +382,17 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation def instanceMetadata = templateOpMap?.instanceMetadata if (instanceMetadata && autoscaler) { - instanceMetadata.(GoogleServerGroup.View.AUTOSCALING_POLICY) = objectMapper.writeValueAsString(autoscaler) + instanceMetadata.(GCEUtil.AUTOSCALING_POLICY) = objectMapper.writeValueAsString(autoscaler) } else if (autoscaler) { templateOpMap.instanceMetadata = [ - (GoogleServerGroup.View.AUTOSCALING_POLICY): objectMapper.writeValueAsString(autoscaler) + (GCEUtil.AUTOSCALING_POLICY): objectMapper.writeValueAsString(autoscaler) ] } if (templateOpMap.instanceMetadata) { - def converter = atomicOperationsRegistry.getAtomicOperationConverter('modifyGoogleServerGroupInstanceTemplateDescription', 'gce', ProviderVersion.v1) + def converter = atomicOperationsRegistry.getAtomicOperationConverter('modifyGoogleServerGroupInstanceTemplateDescription', 'gce') AtomicOperation templateOp = converter.convertOperation(templateOpMap) - orchestrationProcessor.process([templateOp], UUID.randomUUID().toString()) + orchestrationProcessor.process('gce', [templateOp], UUID.randomUUID().toString()) } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleImageTagsAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleImageTagsAtomicOperation.groovy index 826570ede9f..ec86631acb4 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleImageTagsAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleImageTagsAtomicOperation.groovy @@ -22,7 +22,6 @@ import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleImageTagsDescription -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import org.springframework.beans.factory.annotation.Autowired /** @@ -62,10 +61,8 @@ class UpsertGoogleImageTagsAtomicOperation extends GoogleAtomicOperation { def project = credentials.project def imageName = description.imageName def tags = description.tags - def image = GCEUtil.queryImage(project, - imageName, + def image = GCEUtil.queryImage(imageName, credentials, - compute, task, BASE_PHASE, clouddriverUserAgentApplicationName, @@ -74,7 +71,7 @@ class UpsertGoogleImageTagsAtomicOperation extends GoogleAtomicOperation { if (image) { // Image self links are constructed like this: - // https://www.googleapis.com/compute/alpha/projects/rosco-oss-2/global/images/spinnaker-rosco-all-20161229193556-precise + // https://compute.googleapis.com/compute/alpha/projects/rosco-oss-2/global/images/spinnaker-rosco-all-20161229193556-precise def imageSelfLinkTokens = image.getSelfLink().split("/") def imageProject = imageSelfLinkTokens[imageSelfLinkTokens.length - 4] diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleServerGroupTagsAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleServerGroupTagsAtomicOperation.groovy index 77591170d85..855aaee368a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleServerGroupTagsAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleServerGroupTagsAtomicOperation.groovy @@ -16,6 +16,8 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops +import com.google.api.services.compute.Compute +import com.google.api.services.compute.model.InstanceGroupManager import com.google.api.services.compute.model.InstanceGroupManagersSetInstanceTemplateRequest import com.google.api.services.compute.model.InstanceGroupsListInstancesRequest import com.google.api.services.compute.model.RegionInstanceGroupManagersSetTemplateRequest @@ -77,7 +79,8 @@ class UpsertGoogleServerGroupTagsAtomicOperation extends GoogleAtomicOperation projectForwardingRules = timeExecute( @@ -110,9 +111,19 @@ class DeleteGoogleHttpLoadBalancerAtomicOperation extends DeleteGoogleLoadBalanc List listenersToDelete = [] projectForwardingRules.each { ForwardingRule rule -> - def proxy = GCEUtil.getTargetProxyFromRule(compute, project, rule, BASE_PHASE, safeRetry, this) - if (GCEUtil.getLocalName(proxy?.urlMap) == urlMapName) { - listenersToDelete << rule.getName() + try { + def proxy = GCEUtil.getTargetProxyFromRule(compute, project, rule, BASE_PHASE, safeRetry, this) + if (GCEUtil.getLocalName(proxy?.urlMap) == urlMapName) { + listenersToDelete << rule.getName() + } + } catch (GoogleJsonResponseException e) { + // 404 is thrown if the target proxy does not exist. + // We can ignore 404's here because we are iterating over all forwarding rules and some other process may have + // deleted the target proxy between the time we queried for the list of forwarding rules and now. + // Any other exception needs to be propagated. + if (e.getStatusCode() != 404) { + throw e + } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalLoadBalancerAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalLoadBalancerAtomicOperation.groovy index e8f94d5f841..d518c9d5674 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalLoadBalancerAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalLoadBalancerAtomicOperation.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer +import com.google.api.client.googleapis.json.GoogleJsonResponseException import com.google.api.services.compute.model.BackendService import com.google.api.services.compute.model.ForwardingRule import com.google.api.services.compute.model.Operation @@ -92,8 +93,18 @@ class DeleteGoogleInternalLoadBalancerAtomicOperation extends GoogleAtomicOperat // Determine which listeners to delete. List listenersToDelete = [] projectForwardingRules.each { ForwardingRule rule -> - if (GCEUtil.getLocalName(rule.getBackendService()) == backendServiceName) { - listenersToDelete << rule.getName() + try { + if (GCEUtil.getLocalName(rule.getBackendService()) == backendServiceName) { + listenersToDelete << rule.getName() + } + } catch (GoogleJsonResponseException e) { + // 404 is thrown if the target proxy does not exist. + // We can ignore 404's here because we are iterating over all forwarding rules and some other process may have + // deleted the target proxy between the time we queried for the list of forwarding rules and now. + // Any other exception needs to be propagated. + if (e.getStatusCode() != 404) { + throw e + } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleSslLoadBalancerAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleSslLoadBalancerAtomicOperation.groovy index 20aa88a3dd3..6a37a260d4e 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleSslLoadBalancerAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleSslLoadBalancerAtomicOperation.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer +import com.google.api.client.googleapis.json.GoogleJsonResponseException import com.google.api.services.compute.model.* import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository @@ -63,7 +64,7 @@ class DeleteGoogleSslLoadBalancerAtomicOperation extends DeleteGoogleLoadBalance def forwardingRuleName = description.loadBalancerName // First we look everything up. Then, we call delete on all of it. Finally, we wait (with timeout) for all to complete. - // Start with the forwaring rule. + // Start with the forwarding rule. task.updateStatus BASE_PHASE, "Retrieving global forwarding rule $forwardingRuleName..." List projectForwardingRules = timeExecute( @@ -91,9 +92,19 @@ class DeleteGoogleSslLoadBalancerAtomicOperation extends DeleteGoogleLoadBalance List listenersToDelete = [] projectForwardingRules.each { ForwardingRule rule -> - def proxy = GCEUtil.getTargetProxyFromRule(compute, project, rule, BASE_PHASE, safeRetry, this) - if (GCEUtil.getLocalName(proxy?.service) == backendServiceName) { - listenersToDelete << rule.getName() + try { + def proxy = GCEUtil.getTargetProxyFromRule(compute, project, rule, BASE_PHASE, safeRetry, this) + if (GCEUtil.getLocalName(proxy?.service) == backendServiceName) { + listenersToDelete << rule.getName() + } + } catch (GoogleJsonResponseException e) { + // 404 is thrown if the target proxy does not exist. + // We can ignore 404's here because we are iterating over all forwarding rules and some other process may have + // deleted the target proxy between the time we queried for the list of forwarding rules and now. + // Any other exception needs to be propagated. + if (e.getStatusCode() != 404) { + throw e + } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleTcpLoadBalancerAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleTcpLoadBalancerAtomicOperation.groovy index 5e0e4bdfbb9..e7241f16d99 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleTcpLoadBalancerAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleTcpLoadBalancerAtomicOperation.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer +import com.google.api.client.googleapis.json.GoogleJsonResponseException import com.google.api.services.compute.model.* import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository @@ -63,7 +64,7 @@ class DeleteGoogleTcpLoadBalancerAtomicOperation extends DeleteGoogleLoadBalance def forwardingRuleName = description.loadBalancerName // First we look everything up. Then, we call delete on all of it. Finally, we wait (with timeout) for all to complete. - // Start with the forwaring rule. + // Start with the forwarding rule. task.updateStatus BASE_PHASE, "Retrieving global forwarding rule $forwardingRuleName..." List projectForwardingRules = timeExecute( @@ -91,9 +92,19 @@ class DeleteGoogleTcpLoadBalancerAtomicOperation extends DeleteGoogleLoadBalance List listenersToDelete = [] projectForwardingRules.each { ForwardingRule rule -> - def proxy = GCEUtil.getTargetProxyFromRule(compute, project, rule, BASE_PHASE, safeRetry, this) - if (GCEUtil.getLocalName(proxy?.service) == backendServiceName) { - listenersToDelete << rule.getName() + try { + def proxy = GCEUtil.getTargetProxyFromRule(compute, project, rule, BASE_PHASE, safeRetry, this) + if (GCEUtil.getLocalName(proxy?.service) == backendServiceName) { + listenersToDelete << rule.getName() + } + } catch (GoogleJsonResponseException e) { + // 404 is thrown if the target proxy does not exist. + // We can ignore 404's here because we are iterating over all forwarding rules and some other process may have + // deleted the target proxy between the time we queried for the list of forwarding rules and now. + // Any other exception needs to be propagated. + if (e.getStatusCode() != 404) { + throw e + } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleHttpLoadBalancerAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleHttpLoadBalancerAtomicOperation.groovy index 40176ca2522..00c2eec9f76 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleHttpLoadBalancerAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleHttpLoadBalancerAtomicOperation.groovy @@ -34,10 +34,12 @@ import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCrede import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor -import com.netflix.spinnaker.clouddriver.security.ProviderVersion import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.BACKEND_SERVICE_NAMES +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.GLOBAL_LOAD_BALANCER_NAMES + @Slf4j class UpsertGoogleHttpLoadBalancerAtomicOperation extends UpsertGoogleLoadBalancerAtomicOperation { private static final String BASE_PHASE = "UPSERT_HTTP_LOAD_BALANCER" @@ -437,15 +439,19 @@ class UpsertGoogleHttpLoadBalancerAtomicOperation extends UpsertGoogleLoadBalanc TargetHttpsProxiesSetSslCertificatesRequest setSslReq = new TargetHttpsProxiesSetSslCertificatesRequest( sslCertificates: [GCEUtil.buildCertificateUrl(project, httpLoadBalancer.certificate)], ) - timeExecute( + def sslCertOp = timeExecute( compute.targetHttpsProxies().setSslCertificates(project, targetProxyName, setSslReq), "compute.targetHttpsProxies.setSslCertificates", TAG_SCOPE, SCOPE_GLOBAL) + googleOperationPoller.waitForGlobalOperation(compute, project, sslCertOp.getName(), null, task, + "set ssl cert ${httpLoadBalancer.certificate}", BASE_PHASE) UrlMapReference urlMapRef = new UrlMapReference(urlMap: urlMapUrl) def setUrlMapOp = timeExecute( compute.targetHttpsProxies().setUrlMap(project, targetProxyName, urlMapRef), "compute.targetHttpsProxies.setUrlMap", TAG_SCOPE, SCOPE_GLOBAL) + googleOperationPoller.waitForGlobalOperation(compute, project, setUrlMapOp.getName(), null, task, + "set urlMap $urlMapUrl for target proxy $targetProxyName", BASE_PHASE) targetProxyUrl = setUrlMapOp.getTargetLink() break default: @@ -547,24 +553,24 @@ class UpsertGoogleHttpLoadBalancerAtomicOperation extends UpsertGoogleLoadBalanc compute.instanceTemplates().get(project, Utils.getLocalName(templateUrl)), "compute.instancesTemplates.get", TAG_SCOPE, SCOPE_GLOBAL) - def instanceDescription = GCEUtil.buildInstanceDescriptionFromTemplate(template) + def instanceDescription = GCEUtil.buildInstanceDescriptionFromTemplate(project, template) def templateOpMap = [ - image: instanceDescription.image, - instanceType: instanceDescription.instanceType, - credentials: credentials.getName(), - disks: instanceDescription.disks, - instanceMetadata: instanceDescription.instanceMetadata, - tags: instanceDescription.tags, - network: instanceDescription.network, - subnet: instanceDescription.subnet, + image : instanceDescription.image, + instanceType : instanceDescription.instanceType, + credentials : credentials.getName(), + disks : instanceDescription.disks, + instanceMetadata : instanceDescription.instanceMetadata, + tags : instanceDescription.tags, + network : instanceDescription.network, + subnet : instanceDescription.subnet, serviceAccountEmail: instanceDescription.serviceAccountEmail, - authScopes: instanceDescription.authScopes, - preemptible: instanceDescription.preemptible, - automaticRestart: instanceDescription.automaticRestart, - onHostMaintenance: instanceDescription.onHostMaintenance, - region: groupRegion, - serverGroupName: groupName + authScopes : instanceDescription.authScopes, + preemptible : instanceDescription.preemptible, + automaticRestart : instanceDescription.automaticRestart, + onHostMaintenance : instanceDescription.onHostMaintenance, + region : groupRegion, + serverGroupName : groupName ] if (instanceDescription.minCpuPlatform) { @@ -573,23 +579,23 @@ class UpsertGoogleHttpLoadBalancerAtomicOperation extends UpsertGoogleLoadBalanc def instanceMetadata = templateOpMap?.instanceMetadata if (instanceMetadata) { - List globalLbs = instanceMetadata.(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES)?.split(',') ?: [] + List globalLbs = instanceMetadata.(GLOBAL_LOAD_BALANCER_NAMES)?.split(',') ?: [] globalLbs = globalLbs ? globalLbs + loadBalancerName : [loadBalancerName] - instanceMetadata.(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES) = globalLbs.unique().join(',') + instanceMetadata.(GLOBAL_LOAD_BALANCER_NAMES) = globalLbs.unique().join(',') - List bsNames = instanceMetadata.(GoogleServerGroup.View.BACKEND_SERVICE_NAMES)?.split(',') ?: [] + List bsNames = instanceMetadata.(BACKEND_SERVICE_NAMES)?.split(',') ?: [] bsNames = bsNames ? bsNames + backendService.name : [backendService.name] - instanceMetadata.(GoogleServerGroup.View.BACKEND_SERVICE_NAMES) = bsNames.unique().join(',') + instanceMetadata.(BACKEND_SERVICE_NAMES) = bsNames.unique().join(',') } else { templateOpMap.instanceMetadata = [ - (GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES): loadBalancerName, - (GoogleServerGroup.View.BACKEND_SERVICE_NAMES) : backendService.name, + (GLOBAL_LOAD_BALANCER_NAMES): loadBalancerName, + (BACKEND_SERVICE_NAMES) : backendService.name, ] } - def converter = atomicOperationsRegistry.getAtomicOperationConverter('modifyGoogleServerGroupInstanceTemplateDescription', 'gce', ProviderVersion.v1) + def converter = atomicOperationsRegistry.getAtomicOperationConverter('modifyGoogleServerGroupInstanceTemplateDescription', 'gce') AtomicOperation templateOp = converter.convertOperation(templateOpMap) - orchestrationProcessor.process([templateOp], UUID.randomUUID().toString()) + orchestrationProcessor.process('gce', [templateOp], UUID.randomUUID().toString()) } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleLoadBalancerAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleLoadBalancerAtomicOperation.groovy index 9b2b1cbab3f..a82e9d7b36e 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleLoadBalancerAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleLoadBalancerAtomicOperation.groovy @@ -35,6 +35,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException import com.netflix.spinnaker.clouddriver.google.deploy.ops.GoogleAtomicOperation +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleSessionAffinity import org.springframework.beans.factory.annotation.Autowired class UpsertGoogleLoadBalancerAtomicOperation extends GoogleAtomicOperation { @@ -131,6 +132,14 @@ class UpsertGoogleLoadBalancerAtomicOperation extends GoogleAtomicOperation project, region, GCEUtil.getLocalName(existingForwardingRule.target), compute, task, BASE_PHASE, this) if (existingTargetPool) { + GoogleSessionAffinity newSessionAffinity = Optional.ofNullable(description.sessionAffinity).orElse(GoogleSessionAffinity.NONE) + boolean sessionAffinityChanged = newSessionAffinity != GoogleSessionAffinity.valueOf(existingTargetPool.getSessionAffinity()) + if (sessionAffinityChanged && existingTargetPool.instances.any()) { + task.updateStatus BASE_PHASE, "Impossible to change Session Affinity for target pool with existing instances." + task.fail() + return + } + // Existing set of instances is only updated if the instances property is specified on description. We don't // want all instances removed from an existing target pool if the instances property is not specified on the // description. @@ -371,7 +380,8 @@ class UpsertGoogleLoadBalancerAtomicOperation extends GoogleAtomicOperation def targetPool = new TargetPool( name: targetPoolName, healthChecks: httpHealthChecksResourceLinks, - instances: description.instances + instances: description.instances, + sessionAffinity: description.sessionAffinity ) targetPoolResourceOperation = timeExecute( diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/snapshot/RestoreSnapshotAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/snapshot/RestoreSnapshotAtomicOperation.groovy index 6a24114d876..607fb4b56be 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/snapshot/RestoreSnapshotAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/snapshot/RestoreSnapshotAtomicOperation.groovy @@ -34,7 +34,7 @@ import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSecurityGrou import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.jobs.JobExecutor import com.netflix.spinnaker.clouddriver.jobs.JobRequest -import com.netflix.spinnaker.clouddriver.jobs.JobStatus +import com.netflix.spinnaker.clouddriver.jobs.JobResult import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository import org.springframework.beans.factory.annotation.Autowired @@ -142,11 +142,17 @@ class RestoreSnapshotAtomicOperation implements AtomicOperation { task.updateStatus BASE_PHASE, "Restoring snapshot with timestamp ${snapshotTimestamp} for application ${applicationName} in account ${accountName}" createTerraformConfig() - ArrayList command = ["terraform", "apply", "-state=$directory/terraform.tfstate", "$directory"] - String jobId = jobExecutor.startJob(new JobRequest(tokenizedCommand: command), System.getenv(), new ByteArrayInputStream()) - waitForJobCompletion(jobId) - + // JobRequest expects a List and will fail if some of the arguments are GStrings (as that is not a subclass + // of String). It is thus important to only add Strings to command. For example, adding a flag "--test=$testvalue" + // below will cause the job to fail unless you explicitly convert it to a String via "--test=$testvalue".toString() + ArrayList command = ["terraform", "apply", "-state=" + directory + "/terraform.tfstate", directory] + JobResult jobStatus = jobExecutor.runJob(new JobRequest(command)) cleanUpDirectory() + if (jobStatus.getResult() == JobResult.Result.FAILURE && jobStatus.getOutput()) { + String stdOut = jobStatus.getOutput() + String stdErr = jobStatus.getError() + throw new IllegalArgumentException("$stdOut + $stdErr") + } return null } @@ -220,21 +226,14 @@ class RestoreSnapshotAtomicOperation implements AtomicOperation { inputStream = new ByteArrayInputStream() env.GOOGLE_REGION = region } - ArrayList command = ["terraform", "import", "-state=$directory/terraform.tfstate", "$resource.$name", id] - String jobId = jobExecutor.startJob(new JobRequest(tokenizedCommand: command), env, inputStream) - waitForJobCompletion(jobId) - } - - private void waitForJobCompletion(String jobId) { - sleep(1000) - JobStatus jobStatus = jobExecutor.updateJob(jobId) - while (jobStatus.state == JobStatus.State.RUNNING) { - sleep(1000) - jobStatus = jobExecutor.updateJob(jobId) - } - if (jobStatus.result == JobStatus.Result.FAILURE && jobStatus.stdOut) { + // JobRequest expects a List and will fail if some of the arguments are GStrings (as that is not a subclass + // of String). It is thus important to only add Strings to command. For example, adding a flag "--test=$testvalue" + // below will cause the job to fail unless you explicitly convert it to a String via "--test=$testvalue".toString() + ArrayList command = ["terraform", "import", "-state=" + directory + "/terraform.tfstate", resource + "." + name, id] + JobResult jobStatus = jobExecutor.runJob(new JobRequest(command, env, inputStream)) + if (jobStatus.getResult() == JobResult.Result.FAILURE && jobStatus.getOutput()) { cleanUpDirectory() - throw new IllegalArgumentException("$jobStatus.stdOut + $jobStatus.stdErr") + throw new IllegalArgumentException(jobStatus.getOutput() + jobStatus.getError()) } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/snapshot/SaveSnapshotAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/snapshot/SaveSnapshotAtomicOperation.groovy index 1db721a88a7..851d5a3b02f 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/snapshot/SaveSnapshotAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/snapshot/SaveSnapshotAtomicOperation.groovy @@ -22,6 +22,7 @@ import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.google.deploy.description.snapshot.SaveSnapshotDescription import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceIllegalStateException +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy import com.netflix.spinnaker.clouddriver.google.model.GoogleCluster import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck import com.netflix.spinnaker.clouddriver.google.model.GoogleSecurityGroup @@ -262,6 +263,9 @@ class SaveSnapshotAtomicOperation implements AtomicOperation { instanceTemplateMap.metadata[item.key] = item.value } } + if (instanceTemplate.properties.shieldedVmConfig) { + addShieldedVmConfigToInstanceTemplateMap(instanceTemplate.properties.shieldedVmConfig as ShieldedVmConfig, instanceTemplateMap) + } numInstanceTemplates++ resourceMap.google_compute_instance_template[instanceTemplate.name as String] = instanceTemplateMap @@ -282,6 +286,20 @@ class SaveSnapshotAtomicOperation implements AtomicOperation { return null } + private Void addShieldedVmConfigToInstanceTemplateMap(ShieldedVmConfig shieldedVmConfig, Map instanceTemplateMap) { + instanceTemplateMap.shielded_vm_config = [:] + if (shieldedVmConfig.enableSecureBoot != null) { + instanceTemplateMap.shielded_vm_config.enable_secure_boot = shieldedVmConfig.enableSecureBoot + } + if (shieldedVmConfig.enableVtpm != null) { + instanceTemplateMap.shielded_vm_config.enable_vtpm = shieldedVmConfig.enableVtpm + } + if (shieldedVmConfig.enableIntegrityMonitoring != null) { + instanceTemplateMap.shielded_vm_config.enable_integrity_monitoring = shieldedVmConfig.enableIntegrityMonitoring + } + return null + } + private Void addNetworkInterfacesToInstanceTemplateMap(List networkInterfaces, Map instanceTemplateMap) { instanceTemplateMap.network_interface = [] @@ -361,7 +379,7 @@ class SaveSnapshotAtomicOperation implements AtomicOperation { return numDisks > 0 } - private Void addAutoscalerToResourceMap(String targetName, String targetZone, AutoscalingPolicy autoscalingPolicy, Map resourceMap) { + private Void addAutoscalerToResourceMap(String targetName, String targetZone, GoogleAutoscalingPolicy autoscalingPolicy, Map resourceMap) { def autoscalerMap = [:] autoscalerMap.name = targetName @@ -388,6 +406,14 @@ class SaveSnapshotAtomicOperation implements AtomicOperation { if (autoscalingPolicy.cpuUtilization?.utilizationTarget) { autoscalerMap.autoscaling_policy.cpu_utilization = [:] autoscalerMap.autoscaling_policy.cpu_utilization.target = autoscalingPolicy.cpuUtilization.utilizationTarget + switch (autoscalingPolicy.cpuUtilization.predictiveMethod) { + case "NONE": + autoscalerMap.autoscaling_policy.cpu_utilization.predictive_method = "none" + break + case "OPTIMIZE_AVAILABILITY": + autoscalerMap.autoscaling_policy.cpu_utilization.predictive_method = "optimize_availability" + break + } } if (autoscalingPolicy.customMetricUtilizations) { autoscalerMap.autoscaling_policy.metric = [] @@ -403,6 +429,12 @@ class SaveSnapshotAtomicOperation implements AtomicOperation { } else { return } + if (metric.filter) { + metricMap.filter = metric.filter + } + if (metric.singleInstanceAssignment) { + metricMap.single_instance_assignment = metric.singleInstanceAssignment + } //TODO(nwwebb) gce doesn't match terraform types switch(metric.utilizationTargetType) { case "GAUGE": @@ -420,6 +452,32 @@ class SaveSnapshotAtomicOperation implements AtomicOperation { autoscalerMap.autoscaling_policy.metric.add(metricMap) } } + + if (autoscalingPolicy.scalingSchedules) { + autoscalerMap.autoscaling_policy.scalingSchedules = [] + autoscalingPolicy.scalingSchedules.each {Map scalingSchedule -> + def scalingScheduleMap = [:] + if (scalingSchedule.scalingSchedule) { + scalingScheduleMap.name = scalingSchedule.scalingSchedule + } + if (scalingSchedule.description) { + scalingScheduleMap.description = scalingSchedule.description + } + if (scalingSchedule.disabled) { + scalingScheduleMap.disabled = scalingSchedule.disabled + } + if (scalingSchedule.durationSec) { + scalingScheduleMap.duration_sec = scalingSchedule.durationSec + } + if (scalingSchedule.minRequiredReplicas) { + scalingScheduleMap.min_required_replicas = scalingSchedule.minRequiredReplicas + } + if (scalingSchedule.timeZone) { + scalingScheduleMap.time_zone = scalingSchedule.timeZone + } + autoscalerMap.autoscaling_policy.scalingSchedules.add(scalingScheduleMap) + } + } if (autoscalingPolicy.loadBalancingUtilization?.utilizationTarget) { autoscalerMap.autoscaling_policy.load_balancing_utilization = [:] autoscalerMap.autoscaling_policy.load_balancing_utilization.target = autoscalingPolicy.loadBalancingUtilization.utilizationTarget @@ -612,6 +670,9 @@ class SaveSnapshotAtomicOperation implements AtomicOperation { instanceProperties.serviceAccounts.add(convertMapToServiceAccount(serviceAccountMap)) } } + if (instancePropertiesMap.shieldedVmConfig) { + instanceProperties.shieldedVmConfig = convertMapToShieldedVmConfig(instancePropertiesMap.shieldedVmConfig as Map) + } return instanceProperties } @@ -647,6 +708,16 @@ class SaveSnapshotAtomicOperation implements AtomicOperation { return scheduling } + private ShieldedVmConfig convertMapToShieldedVmConfig(Map shieldedVmConfigMap) { + + ShieldedVmConfig shieldedVmConfig = new ShieldedVmConfig() + + shieldedVmConfig.enableSecureBoot = shieldedVmConfigMap.enableSecureBoot as Boolean + shieldedVmConfig.enableVtpm = shieldedVmConfigMap.enableVtpm as Boolean + shieldedVmConfig.enableIntegrityMonitoring = shieldedVmConfigMap.enableIntegrityMonitoring as Boolean + return shieldedVmConfig + } + private NetworkInterface convertMapToNetworkInterface(Map networkInterfaceMap) { NetworkInterface networkInterface = new NetworkInterface() diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbandonAndDecrementGoogleServerGroupDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbandonAndDecrementGoogleServerGroupDescriptionValidator.groovy index 97fc6076e22..ea0fc407acb 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbandonAndDecrementGoogleServerGroupDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbandonAndDecrementGoogleServerGroupDescriptionValidator.groovy @@ -17,22 +17,23 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.AbandonAndDecrementGoogleServerGroupDescription -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("abandonAndDecrementGoogleServerGroupDescriptionValidator") class AbandonAndDecrementGoogleServerGroupDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, AbandonAndDecrementGoogleServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, AbandonAndDecrementGoogleServerGroupDescription description, ValidationErrors errors) { StandardGceAttributeValidator helper = new StandardGceAttributeValidator("abandonAndDecrementGoogleServerGroupDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateRegion(description.region, description.credentials) helper.validateInstanceIds(description.instanceIds) helper.validateServerGroupName(description.serverGroupName) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbstractEnableDisableGoogleServerGroupDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbstractEnableDisableGoogleServerGroupDescriptionValidator.groovy index 4b64f5b52b2..f9d48fbd3de 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbstractEnableDisableGoogleServerGroupDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbstractEnableDisableGoogleServerGroupDescriptionValidator.groovy @@ -17,20 +17,21 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.EnableDisableGoogleServerGroupDescription -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired -import org.springframework.validation.Errors abstract class AbstractEnableDisableGoogleServerGroupDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, EnableDisableGoogleServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableGoogleServerGroupDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("enableDisableGoogleServerGroupDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateRegion(description.region, description.credentials) helper.validateServerGroupName(description.serverGroupName) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/BasicGoogleDeployDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/BasicGoogleDeployDescriptionValidator.groovy index 13a302ced88..e5bf1cc8b93 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/BasicGoogleDeployDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/BasicGoogleDeployDescriptionValidator.groovy @@ -17,29 +17,30 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component("basicGoogleDeployDescriptionValidator") class BasicGoogleDeployDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired private GoogleConfiguration.DeployDefaults googleDeployDefaults @Override - void validate(List priorDescriptions, BasicGoogleDeployDescription description, Errors errors) { + void validate(List priorDescriptions, BasicGoogleDeployDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("basicGoogleDeployDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateImage(description.imageSource, description.image, description.imageArtifact) helper.validateInstanceType(description.instanceType, description.regional ? description.region : description.zone, diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CopyLastGoogleServerGroupDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CopyLastGoogleServerGroupDescriptionValidator.groovy index c5c65b7e077..b7ba10f4810 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CopyLastGoogleServerGroupDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CopyLastGoogleServerGroupDescriptionValidator.groovy @@ -17,33 +17,34 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.CLONE_SERVER_GROUP) @Component("copyLastGoogleServerGroupDescriptionValidator") class CopyLastGoogleServerGroupDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired private GoogleConfiguration.DeployDefaults googleDeployDefaults @Override - void validate(List priorDescriptions, BasicGoogleDeployDescription description, Errors errors) { + void validate(List priorDescriptions, BasicGoogleDeployDescription description, ValidationErrors errors) { // Passing 'copyLastGoogleServerGroupDescription' rather than 'basicGoogleDeployDescription' // here is a judgement call. The intent is to provide the context in which the validation // is performed rather than the actual type name being validated. The string is lower-cased // so isnt the literal typename anyway. def helper = new StandardGceAttributeValidator("copyLastGoogleServerGroupDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateInstanceTypeDisks(googleDeployDefaults.determineInstanceTypeDisk(description.instanceType), description.disks) helper.validateAuthScopes(description.authScopes) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CreateGoogleInstanceDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CreateGoogleInstanceDescriptionValidator.groovy index 637e66c041c..66e59fe1853 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CreateGoogleInstanceDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CreateGoogleInstanceDescriptionValidator.groovy @@ -17,26 +17,27 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.deploy.description.CreateGoogleInstanceDescription -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("createGoogleInstanceDescriptionValidator") class CreateGoogleInstanceDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired private GoogleConfiguration.DeployDefaults googleDeployDefaults @Override - void validate(List priorDescriptions, CreateGoogleInstanceDescription description, Errors errors) { + void validate(List priorDescriptions, CreateGoogleInstanceDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("createGoogleInstanceDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateInstanceName(description.instanceName) helper.validateImage(description.imageSource, description.image, description.imageArtifact) helper.validateInstanceType(description.instanceType, description.zone, description.credentials) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleAutoscalingPolicyDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleAutoscalingPolicyDescriptionValidator.groovy index 7855341b675..de2a8216c39 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleAutoscalingPolicyDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleAutoscalingPolicyDescriptionValidator.groovy @@ -17,25 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleAutoscalingPolicyDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.DELETE_SCALING_POLICY) @Component class DeleteGoogleAutoscalingPolicyDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, DeleteGoogleAutoscalingPolicyDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteGoogleAutoscalingPolicyDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("deleteGoogleScalingPolicyDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateName(description.serverGroupName, "serverGroupName") helper.validateRegion(description.region, description.credentials) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleLoadBalancerDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleLoadBalancerDescriptionValidator.groovy index 7e0f34ac700..5866cb15f17 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleLoadBalancerDescriptionValidator.groovy @@ -17,28 +17,29 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.DELETE_LOAD_BALANCER) @Component("deleteGoogleLoadBalancerDescriptionValidator") class DeleteGoogleLoadBalancerDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, DeleteGoogleLoadBalancerDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteGoogleLoadBalancerDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("deleteGoogleLoadBalancerDescription", errors) def loadBalancerType = description.loadBalancerType - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateName(description.loadBalancerName, "loadBalancerName") if (loadBalancerType == GoogleLoadBalancerType.NETWORK || loadBalancerType == GoogleLoadBalancerType.INTERNAL) { helper.validateRegion(description.region, description.credentials) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleSecurityGroupDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleSecurityGroupDescriptionValidator.groovy index 32b1b49cb89..1227e2afaf4 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleSecurityGroupDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleSecurityGroupDescriptionValidator.groovy @@ -17,25 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleSecurityGroupDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.DELETE_SECURITY_GROUP) @Component class DeleteGoogleSecurityGroupDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, DeleteGoogleSecurityGroupDescription description, Errors errors) { + void validate(List priorDescriptions, DeleteGoogleSecurityGroupDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("deleteGoogleSecurityGroupDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateName(description.securityGroupName, "securityGroupName") } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeregisterInstancesFromGoogleLoadBalancerDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeregisterInstancesFromGoogleLoadBalancerDescriptionValidator.groovy index 3dfba595a96..a49cff7e1d4 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeregisterInstancesFromGoogleLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeregisterInstancesFromGoogleLoadBalancerDescriptionValidator.groovy @@ -17,28 +17,29 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DeregisterInstancesFromGoogleLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.DEREGISTER_INSTANCES_FROM_LOAD_BALANCER) @Component("deregisterInstancesFromGoogleLoadBalancerDescriptionValidator") class DeregisterInstancesFromGoogleLoadBalancerDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override void validate(List priorDescriptions, - DeregisterInstancesFromGoogleLoadBalancerDescription description, Errors errors) { + DeregisterInstancesFromGoogleLoadBalancerDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("deregisterInstancesFromGoogleLoadBalancerDescription", errors) helper.validateNameList(description.loadBalancerNames, "loadBalancerName") - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateRegion(description.region, description.credentials) helper.validateInstanceIds(description.instanceIds) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DestroyGoogleServerGroupDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DestroyGoogleServerGroupDescriptionValidator.groovy index d81194270d5..c74b52a19c0 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DestroyGoogleServerGroupDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DestroyGoogleServerGroupDescriptionValidator.groovy @@ -17,25 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DestroyGoogleServerGroupDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.DESTROY_SERVER_GROUP) @Component("destroyGoogleServerGroupDescriptionValidator") class DestroyGoogleServerGroupDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, DestroyGoogleServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, DestroyGoogleServerGroupDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("destroyGoogleServerGroupDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateServerGroupName(description.serverGroupName) helper.validateRegion(description.region, description.credentials) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ModifyGoogleServerGroupInstanceTemplateDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ModifyGoogleServerGroupInstanceTemplateDescriptionValidator.groovy index 7016788c2ad..abcb7df49dc 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ModifyGoogleServerGroupInstanceTemplateDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ModifyGoogleServerGroupInstanceTemplateDescriptionValidator.groovy @@ -17,29 +17,30 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.ModifyGoogleServerGroupInstanceTemplateDescription import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.UPDATE_LAUNCH_CONFIG) @Component("modifyGoogleServerGroupInstanceTemplateDescriptionValidator") class ModifyGoogleServerGroupInstanceTemplateDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Autowired private GoogleConfiguration.DeployDefaults googleDeployDefaults @Override - void validate(List priorDescriptions, ModifyGoogleServerGroupInstanceTemplateDescription description, Errors errors) { + void validate(List priorDescriptions, ModifyGoogleServerGroupInstanceTemplateDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("modifyGoogleServerGroupInstanceTemplateDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateInstanceTypeDisks(googleDeployDefaults.determineInstanceTypeDisk(description.instanceType), description.disks) helper.validateAuthScopes(description.authScopes) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RebootGoogleInstancesDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RebootGoogleInstancesDescriptionValidator.groovy index 0f234a69cad..02687dfc46e 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RebootGoogleInstancesDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RebootGoogleInstancesDescriptionValidator.groovy @@ -17,25 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.RebootGoogleInstancesDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.REBOOT_INSTANCES) @Component class RebootGoogleInstancesDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, RebootGoogleInstancesDescription description, Errors errors) { + void validate(List priorDescriptions, RebootGoogleInstancesDescription description, ValidationErrors errors) { StandardGceAttributeValidator helper = new StandardGceAttributeValidator("rebootGoogleInstancesDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateZone(description.zone, description.credentials) helper.validateInstanceIds(description.instanceIds) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RegisterInstancesWithGoogleLoadBalancerDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RegisterInstancesWithGoogleLoadBalancerDescriptionValidator.groovy index a2d6615ba3e..2d9dcac1034 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RegisterInstancesWithGoogleLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RegisterInstancesWithGoogleLoadBalancerDescriptionValidator.groovy @@ -17,28 +17,29 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.RegisterInstancesWithGoogleLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.REGISTER_INSTANCES_WITH_LOAD_BALANCER) @Component("registerInstancesWithGoogleLoadBalancerDescriptionValidator") class RegisterInstancesWithGoogleLoadBalancerDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override void validate(List priorDescriptions, - RegisterInstancesWithGoogleLoadBalancerDescription description, Errors errors) { + RegisterInstancesWithGoogleLoadBalancerDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("registerInstancesWithGoogleLoadBalancerDescription", errors) helper.validateNameList(description.loadBalancerNames, "loadBalancerName") - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateRegion(description.region, description.credentials) helper.validateInstanceIds(description.instanceIds) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ResizeGoogleServerGroupDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ResizeGoogleServerGroupDescriptionValidator.groovy index a50d2b220f7..6277310c00e 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ResizeGoogleServerGroupDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ResizeGoogleServerGroupDescriptionValidator.groovy @@ -17,31 +17,32 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleAutoscalingPolicyDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.RESIZE_SERVER_GROUP) @Component("resizeGoogleServerGroupDescriptionValidator") class ResizeGoogleServerGroupDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, def description, Errors errors) { + void validate(List priorDescriptions, def description, ValidationErrors errors) { // If the target server group has an Autoscaler configured the converter will return an // UpsertGoogleAutoscalingPolicyDescription instead of a ResizeGoogleServerGroupDescription. if (description in UpsertGoogleAutoscalingPolicyDescription) { - new UpsertGoogleAutoscalingPolicyDescriptionValidator(accountCredentialsProvider: accountCredentialsProvider) + new UpsertGoogleAutoscalingPolicyDescriptionValidator(credentialsRepository: credentialsRepository) .validate(priorDescriptions, description, errors) } else { def helper = new StandardGceAttributeValidator("resizeGoogleServerGroupDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateServerGroupName(description.serverGroupName) helper.validateNotEmpty(description.targetSize ?: description.capacity?.desired, "targetSize") helper.validateNonNegativeLong(description.targetSize ?: description.capacity?.desired ?: 0, "targetSize") diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/StandardGceAttributeValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/StandardGceAttributeValidator.groovy index 5cd013a7f79..1ea61027a9e 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/StandardGceAttributeValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/StandardGceAttributeValidator.groovy @@ -1,4 +1,3 @@ - /* * Copyright 2015 Google, Inc. * @@ -17,7 +16,8 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactUtils +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.deploy.description.BaseGoogleInstanceDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoHealingPolicy import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy @@ -26,9 +26,9 @@ import com.netflix.spinnaker.clouddriver.google.model.GoogleDiskType import com.netflix.spinnaker.clouddriver.google.model.GoogleInstanceTypeDisk import com.netflix.spinnaker.clouddriver.google.security.GoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import com.netflix.spinnaker.kork.artifacts.model.Artifact -import org.springframework.validation.Errors +import org.springframework.scheduling.support.CronSequenceGenerator /** * Common validation routines for standard description attributes. @@ -78,7 +78,7 @@ class StandardGceAttributeValidator { /** * Bound at construction, this is used to collect validation errors. */ - Errors errors + ValidationErrors errors /** * Constructs validator for standard attributes added by GCE. @@ -86,7 +86,7 @@ class StandardGceAttributeValidator { * @param context The owner of the attributes to be validated is typically a {@code *Description} class. * @param errors Accumulates and reports on the validation errors over the lifetime of this validator. */ - StandardGceAttributeValidator(String context, Errors errors) { + StandardGceAttributeValidator(String context, ValidationErrors errors) { this.context = context this.errors = errors } @@ -162,10 +162,10 @@ class StandardGceAttributeValidator { return result } - def validateCredentials(String accountName, AccountCredentialsProvider accountCredentialsProvider) { + def validateCredentials(String accountName, CredentialsRepository credentialsRepository) { def result = validateNotEmpty(accountName, "credentials") if (result) { - def credentials = accountCredentialsProvider.getCredentials(accountName) + def credentials = credentialsRepository.getOne(accountName) if (!(credentials?.credentials instanceof GoogleCredentials)) { errors.rejectValue("credentials", "${context}.credentials.invalid") @@ -221,8 +221,8 @@ class StandardGceAttributeValidator { def result = true if (value < min || value > max) { errors.rejectValue(attribute, - "${context}.${attribute}.rangeViolation", - "${context}.${attribute} must be between ${min} and ${max}, inclusive.") + "${context}.${attribute}.rangeViolation", + "${context}.${attribute} must be between ${min} and ${max}, inclusive.") result = false } return result @@ -232,8 +232,8 @@ class StandardGceAttributeValidator { def result = true if (maxValue < minValue) { errors.rejectValue(maxAttribute, - "${context}.${maxAttribute}.lessThanMin", - "${context}.${maxAttribute} must not be less than ${context}.${minAttribute}.") + "${context}.${maxAttribute}.lessThanMin", + "${context}.${maxAttribute} must not be less than ${context}.${minAttribute}.") result = false } return result @@ -256,7 +256,7 @@ class StandardGceAttributeValidator { // If there's no artifact at all, return early rather than try to validate the null artifact return false } - if (imageArtifact.getType() != ArtifactUtils.GCE_IMAGE_TYPE) { + if (imageArtifact.getType() != GCEUtil.GCE_IMAGE_TYPE) { errors.rejectValue("imageArtifact.type", "${context}.imageArtifact.type.invalid") } } @@ -295,25 +295,31 @@ class StandardGceAttributeValidator { def validateInstanceType(String instanceType, String location, GoogleNamedAccountCredentials credentials) { validateNotEmpty(instanceType, "instanceType") - if (instanceType?.startsWith('custom')) { - validateCustomInstanceType(instanceType, location, credentials) + if (instanceType?.contains('custom')) { + boolean extendMemory = false; + if(instanceType?.contains('ext')){ + extendMemory = true; + } + validateCustomInstanceType(instanceType, location, credentials, extendMemory) } } - def customInstanceRegExp = /custom-\d{1,2}-\d{4,6}/ + def customInstanceRegExp = /(.*)-?custom-(\d{1,2})-(\d{3,6})(-ext)?/ - def validateCustomInstanceType(String instanceType, String location, GoogleNamedAccountCredentials credentials) { - if (!(instanceType ==~ customInstanceRegExp)) { - errors.rejectValue("instanceType", "${context}.instanceType.invalid", "Custom instance string must match pattern /custom-\\d{1,2}-\\d{4,6}/.") + def validateCustomInstanceType(String instanceType, String location, GoogleNamedAccountCredentials credentials,boolean extendMemory) { + def customTypeMatcher = instanceType =~ customInstanceRegExp + if (!customTypeMatcher.matches()) { + errors.rejectValue("instanceType", "${context}.instanceType.invalid", "Custom instance string must match pattern /(.*)-?custom-(\\d{1,2})-(\\d{3,6})(-ext)?/.") return false } - def ( vCpuCount, memory ) = instanceType.split('-').tail().collect { it.toDouble() } + def vCpuCount = customTypeMatcher.group(2).toDouble() + def memory = customTypeMatcher.group(3).toDouble() def memoryInGbs = memory / 1024 - // Memory per vCPU must be between .9 GB and 6.5 GB - def maxMemory = vCpuCount * 6.5 - def minMemory = Math.ceil((0.9 * vCpuCount) * 4) / 4 + // Memory per vCPU must be between .5 GB and 8 GB + def maxMemory = vCpuCount * 8 + def minMemory = Math.ceil((0.5 * vCpuCount) * 4) / 4 if (vCpuCount < 1) { errors.rejectValue("instanceType", "${context}.instanceType.invalid", "vCPU count must be greater than or equal to 1.") @@ -324,12 +330,16 @@ class StandardGceAttributeValidator { errors.rejectValue("instanceType", "${context}.instanceType.invalid", "Above 1, vCPU count must be even.") } - if (memoryInGbs > maxMemory) { - errors.rejectValue("instanceType", "${context}.instanceType.invalid", "Memory per vCPU must be less than 6.5GB.") - } + if(!extendMemory) { + + if (memoryInGbs > maxMemory) { + errors.rejectValue("instanceType", "${context}.instanceType.invalid", "Memory per vCPU must be less than 8GB.") + } + + if (memoryInGbs < minMemory) { + errors.rejectValue("instanceType", "${context}.instanceType.invalid", "Memory per vCPU must be greater than 0.5GB.") + } - if (memoryInGbs < minMemory) { - errors.rejectValue("instanceType", "${context}.instanceType.invalid", "Memory per vCPU must be greater than 0.9GB.") } if (memory % 256 != 0) { @@ -369,8 +379,8 @@ class StandardGceAttributeValidator { if (!persistentDiskCount) { errors.rejectValue("disks", - "${context}.disks.missingPersistentDisk", - "A persistent boot disk is required.") + "${context}.disks.missingPersistentDisk", + "A persistent boot disk is required.") } } @@ -378,8 +388,8 @@ class StandardGceAttributeValidator { specifiedDisks.findAll { it.persistent }.eachWithIndex { persistentDisk, index -> if (persistentDisk.sizeGb < 10) { errors.rejectValue("disks", - "${context}.disk${index}.sizeGb.invalidSize", - "Persistent disks must be at least 10GB.") + "${context}.disk${index}.sizeGb.invalidSize", + "Persistent disks must be at least 10GB.") } } @@ -390,13 +400,13 @@ class StandardGceAttributeValidator { if (disk.is(firstPersistentDisk)) { if (firstPersistentDisk.sourceImage) { errors.rejectValue("disks", - "${context}.disk${index}.sourceImage.unexpected", - "The boot disk must not specify source image, it must be specified at the top-level on the request as `image`.") + "${context}.disk${index}.sourceImage.unexpected", + "The boot disk must not specify source image, it must be specified at the top-level on the request as `image`.") } } else if (disk.persistent && !disk.sourceImage) { errors.rejectValue("disks", - "${context}.disk${index}.sourceImage.required", - "All non-boot persistent disks are required to specify source image.") + "${context}.disk${index}.sourceImage.required", + "All non-boot persistent disks are required to specify source image.") } } @@ -404,22 +414,22 @@ class StandardGceAttributeValidator { // Shared-core instance types do not support local-ssd. if (!instanceTypeDisk.supportsLocalSSD) { errors.rejectValue("disks", - "${context}.disk${index}.type.localSSDUnsupported", - "Instance type $instanceTypeDisk.instanceType does not support Local SSD.") + "${context}.disk${index}.type.localSSDUnsupported", + "Instance type $instanceTypeDisk.instanceType does not support Local SSD.") } // local-ssd disks must be exactly 375GB. if (localSSDDisk.sizeGb != 375) { errors.rejectValue("disks", - "${context}.disk${index}.sizeGb.invalidSize", - "Local SSD disks must be exactly 375GB.") + "${context}.disk${index}.sizeGb.invalidSize", + "Local SSD disks must be exactly 375GB.") } // local-ssd disks must have auto-delete set. if (!localSSDDisk.autoDelete) { errors.rejectValue("disks", - "${context}.disk${index}.autoDelete.required", - "Local SSD disks must have auto-delete set.") + "${context}.disk${index}.autoDelete.required", + "Local SSD disks must have auto-delete set.") } } } @@ -441,9 +451,21 @@ class StandardGceAttributeValidator { if (minNumReplicas != null && maxNumReplicas != null) { validateMaxNotLessThanMin(minNumReplicas, - maxNumReplicas, - "autoscalingPolicy.minNumReplicas", - "autoscalingPolicy.maxNumReplicas") + maxNumReplicas, + "autoscalingPolicy.minNumReplicas", + "autoscalingPolicy.maxNumReplicas") + } + + if (cpuUtilization != null) { + cpuUtilization.with { + if (utilizationTarget != null) { + validateInRangeExclusive(utilizationTarget, + 0, 1, "autoscalingPolicy.cpuUtilization.utilizationTarget") + } + if (predictiveMethod != null) { + validateNotEmpty(predictiveMethod, "autoscalingPolicy.cpuUtilization.predictiveMethod") + } + } } customMetricUtilizations.eachWithIndex { utilization, index -> @@ -452,20 +474,39 @@ class StandardGceAttributeValidator { utilization.with { validateNotEmpty(metric, "${path}.metric") - if (utilizationTarget <= 0) { - errors.rejectValue("${context}.${path}.utilizationTarget", - "${context}.${path}.utilizationTarget must be greater than zero.") + if (utilizationTarget != null){ + validateNotEmpty(utilizationTargetType, "${path}.utilizationTargetType") + if (utilizationTarget <= 0) { + errors.rejectValue("${context}.${path}.utilizationTarget", + "${context}.${path}.utilizationTarget must be greater than zero.") + } } - validateNotEmpty(utilizationTargetType, "${path}.utilizationTargetType") + if (singleInstanceAssignment != null && singleInstanceAssignment < 0) { + errors.rejectValue("${context}.${path}.singleInstanceAssignment", + "${context}.${path}.singleInstanceAssignment must be greater than zero.") + } + } + } + + scalingSchedules.each { scalingSchedule -> + if(scalingSchedule != null) { + if (scalingSchedule.duration != null) { + validateInRangeExclusive(scalingSchedule.duration, + 300, Integer.MAX_VALUE, "autoscalingPolicy.scalingSchedule.duration") + } + if (scalingSchedule.scheduleCron != null) { + validateCronExpression(scalingSchedule.scheduleCron, "autoscalingPolicy.scalingSchedule.scheduleCron") + } + if (scalingSchedule.timezone != null) { + validateTimeZone(scalingSchedule.timezone, "autoscalingPolicy.scalingSchedule.timezone") + } } } - } - [ "cpuUtilization", "loadBalancingUtilization" ].each { - if (policy[it] != null && policy[it].utilizationTarget != null) { - validateInRangeExclusive(policy[it].utilizationTarget, - 0, 1, "autoscalingPolicy.${it}.utilizationTarget") + if (loadBalancingUtilization != null && loadBalancingUtilization.utilizationTarget) { + validateInRangeExclusive(loadBalancingUtilization.utilizationTarget, + 0, 1, "autoscalingPolicy.loadBalancingUtilization.utilizationTarget") } } } @@ -485,10 +526,10 @@ class StandardGceAttributeValidator { validateNonNegativeLong(fixed as int, "autoHealingPolicy.maxUnavailable.fixed") } else if (percent != null) { validateInRangeInclusive(percent as int, - 0, 100, "autoHealingPolicy.maxUnavailable.percent") + 0, 100, "autoHealingPolicy.maxUnavailable.percent") } else if (rejectEmptyMaxUnavailable) { this.errors.rejectValue("autoHealingPolicy.maxUnavailable", - "${this.context}.autoHealingPolicy.maxUnavailable.neitherFixedNorPercent") + "${this.context}.autoHealingPolicy.maxUnavailable.neitherFixedNorPercent") } } } @@ -522,4 +563,23 @@ class StandardGceAttributeValidator { def validateAuthScopes(List authScopes) { return validateOptionalNameList(authScopes, "authScope") } + + def validateCronExpression(String expression, String attribute) { + def result = CronSequenceGenerator.isValidExpression("* " + expression) + if(!result){ + errors.rejectValue attribute, "${context}.${attribute} must be a valid CRON expression." + } + return result + } + + def validateTimeZone(String timeZone, String attribute) { + def result = true + try { + result = Set.of(TimeZone.getAvailableIDs()).contains(timeZone) + }catch (Exception e){ + errors.rejectValue attribute, "${context}.${attribute} must be a time zone name from the tz database." + result = false + } + return result + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateAndDecrementGoogleServerGroupDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateAndDecrementGoogleServerGroupDescriptionValidator.groovy index 354cf6ba5e0..403ad3b6677 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateAndDecrementGoogleServerGroupDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateAndDecrementGoogleServerGroupDescriptionValidator.groovy @@ -17,25 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.TerminateAndDecrementGoogleServerGroupDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.TERMINATE_INSTANCE_AND_DECREMENT) @Component("terminateAndDecrementGoogleServerGroupDescriptionValidator") class TerminateAndDecrementGoogleServerGroupDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, TerminateAndDecrementGoogleServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, TerminateAndDecrementGoogleServerGroupDescription description, ValidationErrors errors) { StandardGceAttributeValidator helper = new StandardGceAttributeValidator("terminateAndDecrementGoogleServerGroupDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateRegion(description.region, description.credentials) helper.validateInstanceIds(description.instanceIds) helper.validateServerGroupName(description.serverGroupName) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateGoogleInstancesDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateGoogleInstancesDescriptionValidator.groovy index 8ee8255ce5e..c378732f690 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateGoogleInstancesDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateGoogleInstancesDescriptionValidator.groovy @@ -17,25 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.TerminateGoogleInstancesDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.TERMINATE_INSTANCES) @Component class TerminateGoogleInstancesDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, TerminateGoogleInstancesDescription description, Errors errors) { + void validate(List priorDescriptions, TerminateGoogleInstancesDescription description, ValidationErrors errors) { StandardGceAttributeValidator helper = new StandardGceAttributeValidator("terminateGoogleInstancesDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) if (description.serverGroupName) { helper.validateRegion(description.region, description.credentials) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleAutoscalingPolicyDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleAutoscalingPolicyDescriptionValidator.groovy index 41e1d2d827d..88faadeebb0 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleAutoscalingPolicyDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleAutoscalingPolicyDescriptionValidator.groovy @@ -17,13 +17,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleAutoscalingPolicyDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.UPSERT_SCALING_POLICY) @Component("upsertGoogleScalingPolicyDescriptionValidator") @@ -31,13 +32,13 @@ class UpsertGoogleAutoscalingPolicyDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, UpsertGoogleAutoscalingPolicyDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertGoogleAutoscalingPolicyDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("upsertGoogleScalingPolicyDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateRegion(description.region, description.credentials) helper.validateServerGroupName(description.serverGroupName) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleImageTagsDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleImageTagsDescriptionValidator.groovy index addc577dc2a..58fcd048e6a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleImageTagsDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleImageTagsDescriptionValidator.groovy @@ -17,26 +17,28 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleImageTagsDescription import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleServerGroupTagsDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.UPSERT_IMAGE_TAGS) @Component class UpsertGoogleImageTagsDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, UpsertGoogleImageTagsDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertGoogleImageTagsDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("upsertGoogleImageTagsDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateImageName(description.imageName) helper.validateMap(description.tags, "tag") } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidator.groovy index c9378d07f7f..024c6eb274d 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidator.groovy @@ -17,17 +17,19 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.UPSERT_LOAD_BALANCER) @Component("upsertGoogleLoadBalancerDescriptionValidator") @@ -39,13 +41,13 @@ class UpsertGoogleLoadBalancerDescriptionValidator extends private static final List SUPPORTED_TCP_PROXY_PORTS = [25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995] @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, UpsertGoogleLoadBalancerDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertGoogleLoadBalancerDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("upsertGoogleLoadBalancerDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateName(description.loadBalancerName, "loadBalancerName") switch (description.loadBalancerType) { @@ -86,6 +88,34 @@ class UpsertGoogleLoadBalancerDescriptionValidator extends } } break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + + // portRange must be a single port. + try { + Integer.parseInt(description.portRange) + } catch (NumberFormatException _) { + errors.rejectValue("portRange", + "upsertGoogleLoadBalancerDescription.portRange.requireSinglePort") + } + + // Each backend service must have a health check. + def googleInternalHttpLoadBalancer = new GoogleInternalHttpLoadBalancer( + name: description.loadBalancerName, + defaultService: description.defaultService, + hostRules: description.hostRules, + certificate: description.certificate, + ipAddress: description.ipAddress, + ipProtocol: description.ipProtocol, + portRange: description.portRange + ) + List services = Utils.getBackendServicesFromInternalHttpLoadBalancerView(googleInternalHttpLoadBalancer.view) + services?.each { GoogleBackendService service -> + if (!service.healthCheck) { + errors.rejectValue("defaultService OR hostRules.pathMatcher.defaultService OR hostRules.pathMatcher.pathRules.backendService", + "upsertGoogleLoadBalancerDescription.backendServices.healthCheckRequired") + } + } + break case GoogleLoadBalancerType.INTERNAL: helper.validateRegion(description.region, description.credentials) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleSecurityGroupDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleSecurityGroupDescriptionValidator.groovy index fb1f57ef1a8..dce247ec574 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleSecurityGroupDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleSecurityGroupDescriptionValidator.groovy @@ -17,26 +17,27 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleSecurityGroupDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.UPSERT_SECURITY_GROUP) @Component class UpsertGoogleSecurityGroupDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, UpsertGoogleSecurityGroupDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertGoogleSecurityGroupDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("upsertGoogleSecurityGroupDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateName(description.securityGroupName, "securityGroupName") helper.validateNetwork(description.network) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleServerGroupTagsDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleServerGroupTagsDescriptionValidator.groovy index 45ea84ec5cd..5207f4b7e89 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleServerGroupTagsDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleServerGroupTagsDescriptionValidator.groovy @@ -17,25 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleServerGroupTagsDescription +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @GoogleOperation(AtomicOperations.UPSERT_SERVER_GROUP_TAGS) @Component("upsertGoogleServerGroupTagsDescriptionValidator") class UpsertGoogleServerGroupTagsDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, UpsertGoogleServerGroupTagsDescription description, Errors errors) { + void validate(List priorDescriptions, UpsertGoogleServerGroupTagsDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("upsertGoogleServerGroupTagsDescription", errors) - helper.validateCredentials(description.accountName, accountCredentialsProvider) + helper.validateCredentials(description.accountName, credentialsRepository) helper.validateRegion(description.region, description.credentials) helper.validateServerGroupName(description.serverGroupName) helper.validateTags(description.tags) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidator.groovy index 0829e013f24..3166a02595a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidator.groovy @@ -17,18 +17,19 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators.discovery import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.GoogleInstanceListDescription import com.netflix.spinnaker.clouddriver.google.deploy.validators.StandardGceAttributeValidator -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository import org.springframework.beans.factory.annotation.Autowired -import org.springframework.validation.Errors abstract class AbstractEnableDisableInstancesInDiscoveryDescriptionValidator extends DescriptionValidator { @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Override - void validate(List priorDescriptions, GoogleInstanceListDescription description, Errors errors) { + void validate(List priorDescriptions, GoogleInstanceListDescription description, ValidationErrors errors) { def helper = new StandardGceAttributeValidator("googleInstanceListDescription", errors) helper.validateNotEmpty(description.instanceIds, "instanceIds") diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/health/GoogleHealthIndicator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/health/GoogleHealthIndicator.groovy index 9aa040ef562..dd12cdd0603 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/health/GoogleHealthIndicator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/health/GoogleHealthIndicator.groovy @@ -18,9 +18,12 @@ package com.netflix.spinnaker.clouddriver.google.health import com.netflix.spectator.api.Registry import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties import com.netflix.spinnaker.clouddriver.google.security.GoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsTypeBaseConfiguration import groovy.transform.InheritConstructors import org.slf4j.Logger import org.slf4j.LoggerFactory @@ -43,10 +46,13 @@ class GoogleHealthIndicator implements HealthIndicator, GoogleExecutorTraits { Registry registry @Autowired - AccountCredentialsProvider accountCredentialsProvider + CredentialsTypeBaseConfiguration credentialsTypeBaseConfiguration private final AtomicReference lastException = new AtomicReference<>(null) + @Autowired + GoogleConfigurationProperties googleConfigurationProperties + @Override Health health() { def ex = lastException.get() @@ -61,21 +67,20 @@ class GoogleHealthIndicator implements HealthIndicator, GoogleExecutorTraits { @Scheduled(fixedDelay = 300000L) void checkHealth() { try { - Set googleCredentialsSet = accountCredentialsProvider.all.findAll { - it instanceof GoogleNamedAccountCredentials - } as Set - - for (GoogleNamedAccountCredentials accountCredentials in googleCredentialsSet) { - try { - // This verifies that the specified credentials are sufficient to access the referenced project. - timeExecute(accountCredentials.compute.projects().get(accountCredentials.project), - "compute.projects.get", - TAG_SCOPE, SCOPE_GLOBAL) - } catch (IOException e) { - throw new GoogleIOException(e) - } + if (googleConfigurationProperties.getHealth().getVerifyAccountHealth()) { + LOG.info("google.health.verifyAccountHealth flag is enabled - verifying connection to the Google accounts") + credentialsTypeBaseConfiguration.credentialsRepository?.all?.forEach({ + try { + timeExecute(it.compute.projects().get(it.project), + "compute.projects.get", + TAG_SCOPE, SCOPE_GLOBAL) + } catch (IOException e) { + throw new GoogleIOException(e) + } + }) + } else { + LOG.info("google.health.verifyAccountHealth flag is disabled - Not verifying connection to the Google accounts"); } - lastException.set(null) } catch (Exception ex) { LOG.warn "Unhealthy", ex diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleApplication.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleApplication.groovy index 56d6e69dc50..34f4ef97c43 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleApplication.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleApplication.groovy @@ -27,19 +27,26 @@ class GoogleApplication { String name View getView() { - new View() + new View(this) } @Canonical class View implements Application { - String name = GoogleApplication.this.name - Map attributes = [:] + String name + Map attributes /** * Account name -> cluster names */ - Map> clusterNames = [:].withDefault {[] as Set} + Map> clusterNames List> instances + + View(GoogleApplication googleApplication){ + name = googleApplication.name + attributes = [:] + clusterNames = [:].withDefault {[] as Set} + instances = [[:] as Map] + } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleAutoHealingPolicy.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleAutoHealingPolicy.groovy index 958970f9459..6352fb07a47 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleAutoHealingPolicy.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleAutoHealingPolicy.groovy @@ -25,6 +25,7 @@ import groovy.transform.ToString @ToString(includeNames = true) class GoogleAutoHealingPolicy { String healthCheck + GoogleHealthCheck.HealthCheckKind healthCheckKind Integer initialDelaySec FixedOrPercent maxUnavailable diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleAutoscalingPolicy.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleAutoscalingPolicy.groovy index 1ee0d0b0577..dba1c320803 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleAutoscalingPolicy.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleAutoscalingPolicy.groovy @@ -18,11 +18,13 @@ package com.netflix.spinnaker.clouddriver.google.model import groovy.transform.AutoClone import groovy.transform.Canonical +import groovy.transform.CompileStatic import groovy.transform.ToString @AutoClone @Canonical @ToString(includeNames = true) +@CompileStatic class GoogleAutoscalingPolicy { Integer minNumReplicas Integer maxNumReplicas @@ -31,11 +33,19 @@ class GoogleAutoscalingPolicy { CpuUtilization cpuUtilization LoadBalancingUtilization loadBalancingUtilization List customMetricUtilizations + ScaleInControl scaleInControl AutoscalingMode mode + List scalingSchedules @ToString(includeNames = true) static class CpuUtilization { Double utilizationTarget + PredictiveMethod predictiveMethod + + enum PredictiveMethod { + NONE, + OPTIMIZE_AVAILABILITY + } } @ToString(includeNames = true) @@ -52,15 +62,36 @@ class GoogleAutoscalingPolicy { enum UtilizationTargetType { GAUGE, DELTA_PER_SECOND, - DELTA_PER_MINUTE; + DELTA_PER_MINUTE } + String filter + Double singleInstanceAssignment + } + + static class ScaleInControl { + FixedOrPercent maxScaledInReplicas + Integer timeWindowSec } + static class FixedOrPercent { + Integer fixed + Integer percent + } static enum AutoscalingMode { ON, OFF, - ONLY_UP, - ONLY_DOWN + ONLY_SCALE_OUT + } + + @ToString(includeNames = true) + static class ScalingSchedule { + String scheduleName + String scheduleDescription + Boolean enabled = false + Integer duration + Integer minimumRequiredInstances + String scheduleCron + String timezone } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleCluster.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleCluster.groovy index 31b460d53a3..cb7ce567ddc 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleCluster.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleCluster.groovy @@ -20,6 +20,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerView import com.netflix.spinnaker.clouddriver.model.Cluster +import com.netflix.spinnaker.moniker.Moniker import groovy.transform.Canonical import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode @@ -32,18 +33,26 @@ class GoogleCluster { @JsonIgnore View getView() { - new View() + new View(this) } @Canonical class View implements Cluster { + View(GoogleCluster googleCluster) { + name = googleCluster.name + accountName = googleCluster.accountName + + serverGroups = [] as Set + loadBalancers = [] as Set + } + final String type = GoogleCloudProvider.ID - String name = GoogleCluster.this.name - String accountName = GoogleCluster.this.accountName + String name + String accountName - Set serverGroups = [] as Set - Set loadBalancers = [] as Set + Set serverGroups + Set loadBalancers } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDisk.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDisk.groovy index 60bfa17f67c..a7df16ea099 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDisk.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDisk.groovy @@ -30,6 +30,13 @@ class GoogleDisk { String sourceImage boolean autoDelete = true + // Unique disk device name addressable by a Linux OS in /dev/disk/by-id/google-* in the running instance. + // Used to reference disk for mounting, resizing, etc. + // Only applicable for persistent disks. + String deviceName + + Map labels + void setType(String type) { this.type = GoogleDiskType.fromValue(type) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDiskType.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDiskType.groovy index fd00e50f743..3aae196e360 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDiskType.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDiskType.groovy @@ -18,8 +18,11 @@ package com.netflix.spinnaker.clouddriver.google.model public enum GoogleDiskType { PD_STANDARD("pd-standard", true), + PD_EXTREME("pd-extreme", true), + PD_BALANCED("pd-balanced", true), PD_SSD("pd-ssd", true), - LOCAL_SSD("local-ssd", false) + LOCAL_SSD("local-ssd", false), + HYPERDISK_BALANCED("hyperdisk-balanced", true) String text boolean persistent diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleHealthCheck.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleHealthCheck.groovy index e56773d3e1b..a34e15dbcb7 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleHealthCheck.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleHealthCheck.groovy @@ -48,6 +48,8 @@ class GoogleHealthCheck { */ HealthCheckKind kind + String region + /** * Name of the GCP certificate, if HTTPS/SSL. */ @@ -55,7 +57,7 @@ class GoogleHealthCheck { @JsonIgnore View getView() { - new View() + new View(this) } /** @@ -96,17 +98,34 @@ class GoogleHealthCheck { @Canonical class View implements Serializable { - String name = GoogleHealthCheck.this.name - HealthCheckType healthCheckType = GoogleHealthCheck.this.healthCheckType - int interval = GoogleHealthCheck.this.checkIntervalSec - int timeout = GoogleHealthCheck.this.timeoutSec - int unhealthyThreshold = GoogleHealthCheck.this.unhealthyThreshold - int healthyThreshold = GoogleHealthCheck.this.healthyThreshold - int port = GoogleHealthCheck.this.port - String requestPath = GoogleHealthCheck.this.requestPath - String selfLink = GoogleHealthCheck.this.selfLink - String kind = GoogleHealthCheck.this.kind - String target = GoogleHealthCheck.this.target + String name + HealthCheckType healthCheckType + int interval + int timeout + int unhealthyThreshold + int healthyThreshold + int port + String requestPath + String selfLink + String kind + String target + String region + + View(GoogleHealthCheck googleHealthCheck){ + name = googleHealthCheck.name + healthCheckType = googleHealthCheck.healthCheckType + interval = googleHealthCheck.checkIntervalSec + timeout = googleHealthCheck.timeoutSec + unhealthyThreshold = googleHealthCheck.unhealthyThreshold + healthyThreshold = googleHealthCheck.healthyThreshold + port = googleHealthCheck.port + requestPath = googleHealthCheck.requestPath + selfLink = googleHealthCheck.selfLink + kind = googleHealthCheck.kind + target = googleHealthCheck.target + region = googleHealthCheck.region + } + } static enum HealthCheckType { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleInstance.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleInstance.groovy index 7f5cd699660..76eaaf670c6 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleInstance.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleInstance.groovy @@ -19,6 +19,7 @@ package com.netflix.spinnaker.clouddriver.google.model import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.core.type.TypeReference import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.client.json.GenericJson import com.google.api.services.compute.model.* import com.netflix.spinnaker.clouddriver.consul.model.ConsulHealth import com.netflix.spinnaker.clouddriver.consul.model.ConsulNode @@ -28,13 +29,16 @@ import com.netflix.spinnaker.clouddriver.google.model.health.GoogleInstanceHealt import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.model.Instance +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import com.netflix.spinnaker.moniker.Moniker import groovy.transform.Canonical import groovy.transform.EqualsAndHashCode @EqualsAndHashCode(includes = "name") -class GoogleInstance { +class GoogleInstance implements GoogleLabeledResource { String name + String account String gceId String instanceType String cpuPlatform @@ -47,7 +51,13 @@ class GoogleInstance { List networkInterfaces String networkName Metadata metadata - List disks + // This should be List but objectMapper.convertValue doesn't work with + // AttachedDisks. We deserialize the JSON to a Map first, which turns the diskSizeGb value into an + // Integer. Then convertValue tries to assign it to the Long field and throws an exception. We + // could solve this with a mixin (see AmazonObjectMapperConfigurer) but since no one actually + // cares about the type, we just use GenericJson to pass through the data to deck without + // interpreting it at all. + List disks List serviceAccounts String selfLink Tags tags @@ -61,7 +71,7 @@ class GoogleInstance { @JsonIgnore View getView() { - new View() + new View(this) } @Canonical @@ -70,27 +80,50 @@ class GoogleInstance { final String providerType = GoogleCloudProvider.ID final String cloudProvider = GoogleCloudProvider.ID - String name = GoogleInstance.this.name - String gceId = GoogleInstance.this.gceId - String instanceId = GoogleInstance.this.name - String instanceType = GoogleInstance.this.instanceType - String cpuPlatform = GoogleInstance.this.cpuPlatform - Long launchTime = GoogleInstance.this.launchTime - String zone = GoogleInstance.this.zone - String region = GoogleInstance.this.region - Map placement = ["availabilityZone": GoogleInstance.this.zone] - List networkInterfaces = GoogleInstance.this.networkInterfaces - Metadata metadata = GoogleInstance.this.metadata - List disks = GoogleInstance.this.disks - List serviceAccounts = GoogleInstance.this.serviceAccounts - String selfLink = GoogleInstance.this.selfLink - String serverGroup = GoogleInstance.this.serverGroup - Tags tags = GoogleInstance.this.tags - Map labels = GoogleInstance.this.labels - ConsulNode consulNode = GoogleInstance.this.consulNode + String name + String gceId + String instanceId + String instanceType + String cpuPlatform + Long launchTime + String zone + String region + Map placement + List networkInterfaces + Metadata metadata + List disks + List serviceAccounts + String selfLink + String serverGroup + Tags tags + Map labels + ConsulNode consulNode + List securityGroups + + View(GoogleInstance googleInstance){ + name = googleInstance.name + gceId = googleInstance.gceId + instanceId = googleInstance.name + instanceType = googleInstance.instanceType + cpuPlatform = googleInstance.cpuPlatform + launchTime = googleInstance.launchTime + zone = googleInstance.zone + region = googleInstance.region + placement = ["availabilityZone": googleInstance.zone] + networkInterfaces = googleInstance.networkInterfaces + metadata = googleInstance.metadata + disks = googleInstance.disks + serviceAccounts = googleInstance.serviceAccounts + selfLink = googleInstance.selfLink + serverGroup = googleInstance.serverGroup + tags = googleInstance.tags + labels = googleInstance.labels + consulNode = googleInstance.consulNode + securityGroups = googleInstance.securityGroups + } List> getSecurityGroups() { - GoogleInstance.this.securityGroups.collect { + securityGroups.collect { ["groupName": it, "groupId": it] } } @@ -106,7 +139,7 @@ class GoogleInstance { healths << mapper.convertValue(h, new TypeReference>() {}) } healths << mapper.convertValue(instanceHealth?.view, new TypeReference>() {}) - healths.unique() + healths } @JsonIgnore @@ -121,7 +154,7 @@ class GoogleInstance { consulNode?.healths?.each { allHealths << it } - allHealths.unique() + allHealths } @Override @@ -134,6 +167,14 @@ class GoogleInstance { HealthState.Unknown } + Moniker getMoniker() { + return NamerRegistry.lookup() + .withProvider(GoogleCloudProvider.ID) + .withAccount(account) + .withResource(GoogleLabeledResource) + .deriveMoniker(GoogleInstance.this) + } + private static boolean anyDown(List healthsList) { healthsList.any { it.state == HealthState.Down } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSecurityGroup.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSecurityGroup.groovy index 63f7862f6fe..a5281bfef82 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSecurityGroup.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSecurityGroup.groovy @@ -37,16 +37,32 @@ class GoogleSecurityGroup implements SecurityGroup { final String region final String network final String selfLink + + // GCE firewall rules (modeled by this class) can either use sourceTags/targetTags or + // sourceServiceAccounts/targetServiceAccounts. + // Read more at https://cloud.google.com/vpc/docs/firewalls#service-accounts-vs-tags. + // Don't see an elegant way to encapsulate source tags in an inbound rule. final List sourceTags final List targetTags + + final List sourceServiceAccounts + final List targetServiceAccounts + final Set inboundRules final Set outboundRules - void setMoniker(Moniker _ignored) {} - @Override SecurityGroupSummary getSummary() { - new GoogleSecurityGroupSummary(name: name, id: id, network: network, selfLink: selfLink, sourceTags: sourceTags, targetTags: targetTags) + new GoogleSecurityGroupSummary( + name: name, + id: id, + network: network, + selfLink: selfLink, + sourceTags: sourceTags, + targetTags: targetTags, + sourceServiceAccounts: sourceServiceAccounts, + targetServiceAccounts: targetServiceAccounts + ) } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSecurityGroupSummary.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSecurityGroupSummary.groovy index 91396c13256..0b9f2de5023 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSecurityGroupSummary.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSecurityGroupSummary.groovy @@ -31,4 +31,6 @@ class GoogleSecurityGroupSummary implements SecurityGroupSummary { String selfLink String sourceTags String targetTags + String sourceServiceAccounts + String targetServiceAccounts } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleServerGroup.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleServerGroup.groovy index 9f4b8cfca90..2bc27dcb2fe 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleServerGroup.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleServerGroup.groovy @@ -19,21 +19,30 @@ package com.netflix.spinnaker.clouddriver.google.model import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.annotation.JsonInclude import com.fasterxml.jackson.annotation.JsonTypeInfo -import com.google.api.services.compute.model.AutoscalingPolicy import com.google.api.services.compute.model.InstanceGroupManagerActionsSummary import com.google.api.services.compute.model.InstanceGroupManagerAutoHealingPolicy +import com.google.api.services.compute.model.ServiceAccount +import com.google.api.services.compute.model.StatefulPolicy import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider -import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.* +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancingPolicy +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerView import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.model.Instance import com.netflix.spinnaker.clouddriver.model.ServerGroup +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import com.netflix.spinnaker.moniker.Moniker import groovy.transform.Canonical +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.GLOBAL_LOAD_BALANCER_NAMES +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.LOAD_BALANCING_POLICY +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGIONAL_LOAD_BALANCER_NAMES + @Canonical -class GoogleServerGroup { +class GoogleServerGroup implements GoogleLabeledResource { String name String region + String account Boolean regional = false String zone Set zones = new HashSet<>() @@ -48,7 +57,11 @@ class GoogleServerGroup { Boolean discovery = false String networkName Boolean canIpForward = false + Boolean enableSecureBoot = false + Boolean enableVtpm = false + Boolean enableIntegrityMonitoring = false Set instanceTemplateTags = [] + Set instanceTemplateServiceAccounts = [] Map instanceTemplateLabels = [:] String selfLink InstanceGroupManagerActionsSummary currentActions @@ -58,10 +71,13 @@ class GoogleServerGroup { GoogleDistributionPolicy distributionPolicy Boolean selectZones + GoogleAutoscalingPolicy autoscalingPolicy + @JsonTypeInfo(use=JsonTypeInfo.Id.CLASS, include=JsonTypeInfo.As.PROPERTY, property="class") - AutoscalingPolicy autoscalingPolicy + StatefulPolicy statefulPolicy List autoscalingMessages + //Map scalingSchedulingMessages @JsonTypeInfo(use=JsonTypeInfo.Id.CLASS, include=JsonTypeInfo.As.PROPERTY, property="class") InstanceGroupManagerAutoHealingPolicy autoHealingPolicy @@ -72,7 +88,13 @@ class GoogleServerGroup { @JsonIgnore View getView() { - new View() + new View(this) + } + + @Override + @JsonIgnore + Map getLabels() { + return instanceTemplateLabels } @JsonInclude(JsonInclude.Include.NON_NULL) @@ -80,43 +102,82 @@ class GoogleServerGroup { class View implements ServerGroup { final String type = GoogleCloudProvider.ID final String cloudProvider = GoogleCloudProvider.ID - static final String REGIONAL_LOAD_BALANCER_NAMES = "load-balancer-names" - static final String GLOBAL_LOAD_BALANCER_NAMES = "global-load-balancer-names" - static final String BACKEND_SERVICE_NAMES = "backend-service-names" - static final String LOAD_BALANCING_POLICY = "load-balancing-policy" - static final String SELECT_ZONES = 'select-zones' - static final String AUTOSCALING_POLICY = 'autoscaling-policy' - - String name = GoogleServerGroup.this.name - String region = GoogleServerGroup.this.region - Boolean regional = GoogleServerGroup.this.regional - String zone = GoogleServerGroup.this.zone - Set zones = GoogleServerGroup.this.zones - Set instances = GoogleServerGroup.this.instances.collect { it?.view } - Map asg = GoogleServerGroup.this.asg - Map launchConfig = GoogleServerGroup.this.launchConfig - Map namedPorts = GoogleServerGroup.this.namedPorts - Set securityGroups = GoogleServerGroup.this.securityGroups - Map buildInfo = GoogleServerGroup.this.buildInfo - Boolean disabled = GoogleServerGroup.this.disabled - String networkName = GoogleServerGroup.this.networkName - Boolean canIpForward = GoogleServerGroup.this.canIpForward - Set instanceTemplateTags = GoogleServerGroup.this.instanceTemplateTags - Map instanceTemplateLabels = GoogleServerGroup.this.instanceTemplateLabels - String selfLink = GoogleServerGroup.this.selfLink - Boolean discovery = GoogleServerGroup.this.discovery - InstanceGroupManagerActionsSummary currentActions = GoogleServerGroup.this.currentActions - AutoscalingPolicy autoscalingPolicy = GoogleServerGroup.this.autoscalingPolicy - List autoscalingMessages = GoogleServerGroup.this.autoscalingMessages - InstanceGroupManagerAutoHealingPolicy autoHealingPolicy = GoogleServerGroup.this.autoHealingPolicy - GoogleDistributionPolicy distributionPolicy = GoogleServerGroup.this.distributionPolicy - Boolean selectZones = GoogleServerGroup.this.selectZones - @Override - Boolean isDisabled() { // Because groovy isn't smart enough to generate this method :-( + String name + String region + Boolean regional + String zone + Set zones + Set instances + Map asg + Map launchConfig + Map namedPorts + Set securityGroups + Map buildInfo + Boolean disabled + String networkName + Boolean canIpForward + Boolean enableSecureBoot + Boolean enableVtpm + Boolean enableIntegrityMonitoring + Set instanceTemplateTags + Set instanceTemplateServiceAccounts + Map instanceTemplateLabels + String selfLink + Boolean discovery + InstanceGroupManagerActionsSummary currentActions + GoogleAutoscalingPolicy autoscalingPolicy + StatefulPolicy statefulPolicy + List autoscalingMessages + InstanceGroupManagerAutoHealingPolicy autoHealingPolicy + GoogleDistributionPolicy distributionPolicy + Boolean selectZones + + View(GoogleServerGroup googleServerGroup){ + name = googleServerGroup.name + region = googleServerGroup.region + regional = googleServerGroup.regional + zone = googleServerGroup.zone + zones = googleServerGroup.zones + instances = googleServerGroup.instances.collect { it?.view } + asg = googleServerGroup.asg + launchConfig = googleServerGroup.launchConfig + namedPorts = googleServerGroup.namedPorts + securityGroups = googleServerGroup.securityGroups + buildInfo = googleServerGroup.buildInfo + disabled = googleServerGroup.disabled + networkName = googleServerGroup.networkName + canIpForward = googleServerGroup.canIpForward + enableSecureBoot = googleServerGroup.enableSecureBoot + enableVtpm = googleServerGroup.enableVtpm + enableIntegrityMonitoring = googleServerGroup.enableIntegrityMonitoring + instanceTemplateTags = googleServerGroup.instanceTemplateTags + instanceTemplateServiceAccounts = googleServerGroup.instanceTemplateServiceAccounts + instanceTemplateLabels = googleServerGroup.instanceTemplateLabels + selfLink = googleServerGroup.selfLink + discovery = googleServerGroup.discovery + currentActions = googleServerGroup.currentActions + autoscalingPolicy = googleServerGroup.autoscalingPolicy + statefulPolicy = googleServerGroup.statefulPolicy + autoscalingMessages = googleServerGroup.autoscalingMessages + autoHealingPolicy = googleServerGroup.autoHealingPolicy + distributionPolicy = googleServerGroup.distributionPolicy + selectZones = googleServerGroup.selectZones + } + + Boolean isDisabled() { disabled } + @Override + Moniker getMoniker() { + return NamerRegistry.lookup() + .withProvider(GoogleCloudProvider.ID) + .withAccount(account) + .withResource(GoogleLabeledResource) + .deriveMoniker(GoogleServerGroup.this) + } + @Override Long getCreatedTime() { launchConfig ? launchConfig.createdTime as Long : null @@ -126,10 +187,10 @@ class GoogleServerGroup { ServerGroup.Capacity getCapacity() { def asg = GoogleServerGroup.this.asg asg ? - new ServerGroup.Capacity(min: asg.minSize ? asg.minSize as Integer : 0, - max: asg.maxSize ? asg.maxSize as Integer : 0, - desired: asg.desiredCapacity ? asg.desiredCapacity as Integer : 0) : - null + new ServerGroup.Capacity(min: asg.minSize ? asg.minSize as Integer : 0, + max: asg.maxSize ? asg.maxSize as Integer : 0, + desired: asg.desiredCapacity ? asg.desiredCapacity as Integer : 0) : + null } /** @@ -188,12 +249,12 @@ class GoogleServerGroup { @Override ServerGroup.InstanceCounts getInstanceCounts() { new ServerGroup.InstanceCounts( - total: instances.size(), - up: filterInstancesByHealthState(instances, HealthState.Up)?.size() ?: 0, - down: filterInstancesByHealthState(instances, HealthState.Down)?.size() ?: 0, - unknown: filterInstancesByHealthState(instances, HealthState.Unknown)?.size() ?: 0, - starting: filterInstancesByHealthState(instances, HealthState.Starting)?.size() ?: 0, - outOfService: filterInstancesByHealthState(instances, HealthState.OutOfService)?.size() ?: 0 + total: instances.size(), + up: filterInstancesByHealthState(instances, HealthState.Up)?.size() ?: 0, + down: filterInstancesByHealthState(instances, HealthState.Down)?.size() ?: 0, + unknown: filterInstancesByHealthState(instances, HealthState.Unknown)?.size() ?: 0, + starting: filterInstancesByHealthState(instances, HealthState.Starting)?.size() ?: 0, + outOfService: filterInstancesByHealthState(instances, HealthState.OutOfService)?.size() ?: 0 ) } @@ -201,11 +262,12 @@ class GoogleServerGroup { Map getProviderMetadata() { [ tags: GoogleServerGroup.this.launchConfig?.instanceTemplate?.properties?.tags?.items, + serviceAccounts: GoogleServerGroup.this.launchConfig?.instanceTemplate?.properties?.serviceAccounts, networkName: GoogleServerGroup.this.networkName ] } - static Collection filterInstancesByHealthState(Set instances, HealthState healthState) { + Collection filterInstancesByHealthState(Set instances, HealthState healthState) { instances.findAll { Instance it -> it.getHealthState() == healthState } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSubnet.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSubnet.groovy index 561938a7cde..929828dd53f 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSubnet.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSubnet.groovy @@ -30,5 +30,5 @@ class GoogleSubnet implements Subnet { String account String region String selfLink - String purpose = "n/a" + String purpose } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/Utils.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/Utils.groovy index efa2dcfa09b..a6e52cdf4c8 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/Utils.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/Utils.groovy @@ -20,15 +20,36 @@ import com.google.api.services.compute.model.Metadata import com.google.api.services.compute.model.PathMatcher import com.google.api.services.compute.model.PathRule import com.google.api.services.compute.model.UrlMap -import com.netflix.spinnaker.cats.cache.CacheData +import com.google.common.base.Splitter +import com.google.common.base.Strings +import com.google.common.collect.Lists import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup -import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.* +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHostRule +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer.InternalHttpLbView; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancedBackend +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GooglePathMatcher +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GooglePathRule +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleSslLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleTargetProxyType +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleTcpLoadBalancer +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import org.springframework.util.ClassUtils +import javax.annotation.Nonnull +import javax.annotation.Nullable import java.text.SimpleDateFormat +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.BACKEND_SERVICE_NAMES +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGION_BACKEND_SERVICE_NAMES +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.GLOBAL_LOAD_BALANCER_NAMES +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGIONAL_LOAD_BALANCER_NAMES + @Slf4j class Utils { public static final String TARGET_POOL_NAME_PREFIX = "tp" @@ -70,33 +91,40 @@ class Utils { return lastIndex != -1 ? fullUrl.substring(lastIndex + 1) : fullUrl } - static GoogleTargetProxyType getTargetProxyType(String fullUrl) { - if (!fullUrl) { - throw new IllegalArgumentException("Target proxy url ${fullUrl} malformed.") - } + /** + * Given a URI representing a GCP target proxy, returns the corresponding + * {@link GoogleTargetProxyType}, or {@link GoogleTargetProxyType#UNKNOWN} + * if no {@link GoogleTargetProxyType} could be derived from the URI. + * @param fullUrl the URI to parse + * @return the corresponding {@link GoogleTargetProxyType} + */ + @CompileStatic + @Nonnull + static GoogleTargetProxyType getTargetProxyType(@Nullable String fullUrl) { + String resourceType = getResourceType(Strings.nullToEmpty(fullUrl)) + return GoogleTargetProxyType.fromResourceType(resourceType) + } - int lastIndex = fullUrl.lastIndexOf('/') - if (lastIndex == -1) { - throw new IllegalArgumentException("Target proxy url ${fullUrl} malformed.") - } - String withoutName = fullUrl.substring(0, lastIndex) - switch (getLocalName(withoutName)) { - case 'targetHttpProxies': - return GoogleTargetProxyType.HTTP - break - case 'targetHttpsProxies': - return GoogleTargetProxyType.HTTPS - break - case 'targetSslProxies': - return GoogleTargetProxyType.SSL - break - case 'targetTcpProxies': - return GoogleTargetProxyType.TCP - break - default: - throw new IllegalArgumentException("Target proxy url ${fullUrl} has unknown type.") - break - } + private static final Splitter onSlash = Splitter.on('/').omitEmptyStrings() + + /** + * Given a URI representing a GCP resource, returns the type of the resource. + * + * This function splits the input URI on slashes, and returns the second-to-last + * part, which will generally be the type of the resource. Callers must ensure + * that their URI follows this pattern for the results to be meaningful. + * @param uri URI to split + * @return The resource type of the URI, or the empty string if a resource type + * could not be parsed from the URI. + */ + @CompileStatic + @Nonnull + private static String getResourceType(@Nonnull String uri) { + return Lists.reverse(onSlash.splitToList(uri)) + .stream() + .skip(1) + .findFirst() + .orElse("") } static String getZoneFromInstanceUrl(String fullUrl) { @@ -128,9 +156,9 @@ class Utils { /** * Parses region from a full server group Url of the form: * - * "https://www.googleapis.com/compute/v1/projects/$projectName/zones/$zone/instanceGroups/$serverGroupName" + * "https://compute.googleapis.com/compute/v1/projects/$projectName/zones/$zone/instanceGroups/$serverGroupName" * OR - * "https://www.googleapis.com/compute/v1/projects/$projectName/regions/$region/instanceGroups/$serverGroupName" + * "https://compute.googleapis.com/compute/v1/projects/$projectName/regions/$region/instanceGroups/$serverGroupName" */ static String getRegionFromGroupUrl(String fullUrl) { if (!fullUrl) { @@ -244,14 +272,24 @@ class Utils { static List getBackendServicesFromHttpLoadBalancerView(GoogleHttpLoadBalancer.View googleLoadBalancer) { List backendServices = [googleLoadBalancer.defaultService] - List pathMatchers = googleLoadBalancer?.hostRules?.collect { GoogleHostRule hostRule -> hostRule.pathMatcher } + collectBackendServicesFromHostRules(googleLoadBalancer?.hostRules, backendServices) + return backendServices; + } + + static List getBackendServicesFromInternalHttpLoadBalancerView(InternalHttpLbView googleLoadBalancer) { + List backendServices = [googleLoadBalancer.defaultService] + collectBackendServicesFromHostRules(googleLoadBalancer?.hostRules, backendServices) + return backendServices + } + + static void collectBackendServicesFromHostRules(List hostRules, List backendServices) { + List pathMatchers = hostRules.collect { GoogleHostRule hostRule -> hostRule.pathMatcher } pathMatchers?.each { GooglePathMatcher pathMatcher -> backendServices << pathMatcher.defaultService pathMatcher?.pathRules?.each { GooglePathRule googlePathRule -> backendServices << googlePathRule.backendService } - } - return backendServices + }?.findAll { it != null } } static List getBackendServicesFromUrlMap(UrlMap urlMap) { @@ -267,8 +305,8 @@ class Utils { static boolean determineHttpLoadBalancerDisabledState(GoogleHttpLoadBalancer loadBalancer, GoogleServerGroup serverGroup) { - def httpLoadBalancersFromMetadata = serverGroup.asg.get(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES) - def backendServicesFromMetadata = serverGroup.asg.get(GoogleServerGroup.View.BACKEND_SERVICE_NAMES) + def httpLoadBalancersFromMetadata = serverGroup.asg.get(GLOBAL_LOAD_BALANCER_NAMES) + def backendServicesFromMetadata = serverGroup.asg.get(BACKEND_SERVICE_NAMES) List> serviceBackends = getBackendServicesFromHttpLoadBalancerView(loadBalancer.view) .findAll { it && it.name in backendServicesFromMetadata } .collect { it.backends } @@ -279,6 +317,20 @@ class Utils { return loadBalancer.name in httpLoadBalancersFromMetadata && !(serverGroup.name in backendGroupNames) } + static boolean determineInternalHttpLoadBalancerDisabledState(GoogleInternalHttpLoadBalancer loadBalancer, + GoogleServerGroup serverGroup) { + def loadBalancersFromMetadata = serverGroup.asg.get(REGIONAL_LOAD_BALANCER_NAMES) + def backendServicesFromMetadata = serverGroup.asg.get(REGION_BACKEND_SERVICE_NAMES) + List> serviceBackends = getBackendServicesFromInternalHttpLoadBalancerView(loadBalancer.view) + .findAll { it && it.name in backendServicesFromMetadata } + .collect { it.backends } + List backendGroupNames = serviceBackends.flatten() + .findAll { serverGroup.region == Utils.getRegionFromGroupUrl(it.serverGroupUrl) } + .collect { GCEUtil.getLocalName(it.serverGroupUrl) } + + return loadBalancer.name in loadBalancersFromMetadata && !(serverGroup.name in backendGroupNames) + } + static String decorateXpnResourceIdIfNeeded(String managedProjectId, String xpnResource) { if (!xpnResource) { return xpnResource @@ -295,7 +347,7 @@ class Utils { static boolean determineInternalLoadBalancerDisabledState(GoogleInternalLoadBalancer loadBalancer, GoogleServerGroup serverGroup) { - def regionalLoadBalancersFromMetadata = serverGroup.asg.get(GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES) + def regionalLoadBalancersFromMetadata = serverGroup.asg.get(REGIONAL_LOAD_BALANCER_NAMES) if (loadBalancer.backendService == null) { log.warn("Malformed internal load balancer encountered: ${loadBalancer}") @@ -309,7 +361,7 @@ class Utils { static boolean determineSslLoadBalancerDisabledState(GoogleSslLoadBalancer loadBalancer, GoogleServerGroup serverGroup) { - def globalLoadBalancersFromMetadata = serverGroup.asg.get(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES) + def globalLoadBalancersFromMetadata = serverGroup.asg.get(GLOBAL_LOAD_BALANCER_NAMES) if (loadBalancer.backendService == null) { log.warn("Malformed ssl load balancer encountered: ${loadBalancer}") @@ -323,7 +375,7 @@ class Utils { static boolean determineTcpLoadBalancerDisabledState(GoogleTcpLoadBalancer loadBalancer, GoogleServerGroup serverGroup) { - def globalLoadBalancersFromMetadata = serverGroup.asg.get(GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES) + def globalLoadBalancersFromMetadata = serverGroup.asg.get(GLOBAL_LOAD_BALANCER_NAMES) if (loadBalancer.backendService == null) { log.warn("Malformed tcp load balancer encountered: ${loadBalancer}") diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/health/GoogleLoadBalancerHealth.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/health/GoogleLoadBalancerHealth.groovy index 3b14c4f460e..26aa06d7510 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/health/GoogleLoadBalancerHealth.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/health/GoogleLoadBalancerHealth.groovy @@ -74,14 +74,18 @@ class GoogleLoadBalancerHealth { @JsonIgnore View getView() { - new View() + new View(this) } class View extends GoogleHealth implements Health { final Type type = Type.LoadBalancer final HealthClass healthClass = null - List loadBalancers = GoogleLoadBalancerHealth.this.lbHealthSummaries + List loadBalancers + + View(GoogleLoadBalancerHealth googleLoadBalancerHealth){ + loadBalancers = googleLoadBalancerHealth.lbHealthSummaries + } HealthState getState() { GoogleLoadBalancerHealth.this.status?.toHeathState() diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleBackendService.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleBackendService.groovy index 3c4539399e6..248027653ce 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleBackendService.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleBackendService.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.google.model.loadbalancing +import com.google.common.collect.ImmutableList import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck import groovy.transform.Canonical import groovy.transform.EqualsAndHashCode @@ -38,7 +39,7 @@ class GoogleBackendService { BackendServiceKind kind String healthCheckLink GoogleHealthCheck healthCheck - List backends + List backends = ImmutableList.of() GoogleSessionAffinity sessionAffinity Integer affinityCookieTtlSec GoogleLoadBalancingScheme loadBalancingScheme diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancer.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancer.groovy index 79e30996a1c..3afa9f6cfd7 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancer.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancer.groovy @@ -20,8 +20,11 @@ import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup import groovy.transform.Canonical +import groovy.transform.EqualsAndHashCode +import groovy.transform.ToString -@Canonical +@ToString(includeSuper=true) +@EqualsAndHashCode(callSuper=true) class GoogleHttpLoadBalancer extends GoogleLoadBalancer { GoogleLoadBalancerType type = GoogleLoadBalancerType.HTTP GoogleLoadBalancingScheme loadBalancingScheme = GoogleLoadBalancingScheme.EXTERNAL @@ -55,27 +58,45 @@ class GoogleHttpLoadBalancer extends GoogleLoadBalancer { @JsonIgnore GoogleLoadBalancerView getView() { - new View() + new View(this) } @Canonical class View extends GoogleLoadBalancerView { - GoogleLoadBalancerType loadBalancerType = GoogleHttpLoadBalancer.this.type - GoogleLoadBalancingScheme loadBalancingScheme = GoogleHttpLoadBalancer.this.loadBalancingScheme + GoogleLoadBalancerType loadBalancerType + GoogleLoadBalancingScheme loadBalancingScheme - String name = GoogleHttpLoadBalancer.this.name - String account = GoogleHttpLoadBalancer.this.account - String region = GoogleHttpLoadBalancer.this.region - Long createdTime = GoogleHttpLoadBalancer.this.createdTime - String ipAddress = GoogleHttpLoadBalancer.this.ipAddress - String ipProtocol = GoogleHttpLoadBalancer.this.ipProtocol - String portRange = GoogleHttpLoadBalancer.this.portRange + String name + String account + String region + Long createdTime + String ipAddress + String ipProtocol + String portRange - GoogleBackendService defaultService = GoogleHttpLoadBalancer.this.defaultService - List hostRules = GoogleHttpLoadBalancer.this.hostRules - String certificate = GoogleHttpLoadBalancer.this.certificate - String urlMapName = GoogleHttpLoadBalancer.this.urlMapName + GoogleBackendService defaultService + List hostRules + String certificate + String urlMapName + + Set serverGroups + + View(GoogleHttpLoadBalancer googleHttpLoadBalancer){ + loadBalancerType = googleHttpLoadBalancer.type + loadBalancingScheme = googleHttpLoadBalancer.loadBalancingScheme + name = googleHttpLoadBalancer.name + account = googleHttpLoadBalancer.account + region = googleHttpLoadBalancer.region + createdTime = googleHttpLoadBalancer.createdTime + ipAddress = googleHttpLoadBalancer.ipAddress + ipProtocol = googleHttpLoadBalancer.ipProtocol + portRange = googleHttpLoadBalancer.portRange + defaultService = googleHttpLoadBalancer.defaultService + hostRules = googleHttpLoadBalancer.hostRules + certificate = googleHttpLoadBalancer.certificate + urlMapName = googleHttpLoadBalancer.urlMapName + serverGroups = new HashSet<>() + } - Set serverGroups = new HashSet<>() } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancingPolicy.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancingPolicy.groovy index 6d9e3e34079..5b17ea448b9 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancingPolicy.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancingPolicy.groovy @@ -30,7 +30,7 @@ import com.google.api.services.compute.model.NamedPort @JsonIgnoreProperties(ignoreUnknown = true) class GoogleHttpLoadBalancingPolicy extends GoogleLoadBalancingPolicy { @JsonIgnore - static final String HTTP_DEFAULT_PORT_NAME = 'http' + public static final String HTTP_DEFAULT_PORT_NAME = 'http' @JsonIgnore static final Integer HTTP_DEFAULT_PORT = 80 diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalLoadBalancer.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalLoadBalancer.groovy index a210649f79f..8479d505721 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalLoadBalancer.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalLoadBalancer.groovy @@ -18,9 +18,11 @@ package com.netflix.spinnaker.clouddriver.google.model.loadbalancing import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup -import groovy.transform.Canonical +import groovy.transform.EqualsAndHashCode +import groovy.transform.ToString -@Canonical +@ToString(includeSuper=true) +@EqualsAndHashCode(callSuper=true) class GoogleInternalLoadBalancer extends GoogleLoadBalancer { GoogleLoadBalancerType type = GoogleLoadBalancerType.INTERNAL GoogleLoadBalancingScheme loadBalancingScheme = GoogleLoadBalancingScheme.INTERNAL @@ -32,26 +34,44 @@ class GoogleInternalLoadBalancer extends GoogleLoadBalancer { @JsonIgnore GoogleLoadBalancerView getView() { - new View() + new View(this) } class View extends GoogleLoadBalancerView { - GoogleLoadBalancerType loadBalancerType = GoogleInternalLoadBalancer.this.type - GoogleLoadBalancingScheme loadBalancingScheme = GoogleInternalLoadBalancer.this.loadBalancingScheme - - String name = GoogleInternalLoadBalancer.this.name - String account = GoogleInternalLoadBalancer.this.account - String region = GoogleInternalLoadBalancer.this.region - Long createdTime = GoogleInternalLoadBalancer.this.createdTime - String ipAddress = GoogleInternalLoadBalancer.this.ipAddress - String ipProtocol = GoogleInternalLoadBalancer.this.ipProtocol - String portRange = GoogleInternalLoadBalancer.this.portRange - - List ports = GoogleInternalLoadBalancer.this.ports - String network = GoogleInternalLoadBalancer.this.network - String subnet = GoogleInternalLoadBalancer.this.subnet - GoogleBackendService backendService = GoogleInternalLoadBalancer.this.backendService - - Set serverGroups = new HashSet<>() + GoogleLoadBalancerType loadBalancerType + GoogleLoadBalancingScheme loadBalancingScheme + + String name + String account + String region + Long createdTime + String ipAddress + String ipProtocol + String portRange + + List ports + String network + String subnet + GoogleBackendService backendService + + Set serverGroups + + View(GoogleInternalLoadBalancer googleInternalLoadBalancer){ + loadBalancerType = googleInternalLoadBalancer.type + loadBalancingScheme = googleInternalLoadBalancer.loadBalancingScheme + name = googleInternalLoadBalancer.name + account = googleInternalLoadBalancer.account + region = googleInternalLoadBalancer.region + createdTime = googleInternalLoadBalancer.createdTime + ipAddress = googleInternalLoadBalancer.ipAddress + ipProtocol = googleInternalLoadBalancer.ipProtocol + portRange = googleInternalLoadBalancer.portRange + ports = googleInternalLoadBalancer.ports + network = googleInternalLoadBalancer.network + subnet = googleInternalLoadBalancer.subnet + backendService = googleInternalLoadBalancer.backendService + serverGroups = new HashSet<>() + } + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancerType.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancerType.groovy index ea3298bb6e0..d3d2e85c44a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancerType.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancerType.groovy @@ -19,6 +19,7 @@ package com.netflix.spinnaker.clouddriver.google.model.loadbalancing enum GoogleLoadBalancerType { HTTP, INTERNAL, + INTERNAL_MANAGED, NETWORK, SSL, TCP diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancingScheme.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancingScheme.groovy index ce466ee371e..92c09e927c2 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancingScheme.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancingScheme.groovy @@ -18,4 +18,5 @@ package com.netflix.spinnaker.clouddriver.google.model.loadbalancing enum GoogleLoadBalancingScheme { EXTERNAL, INTERNAL, + INTERNAL_MANAGED, } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleNetworkLoadBalancer.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleNetworkLoadBalancer.groovy index 6201c412c1b..ddf6706589a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleNetworkLoadBalancer.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleNetworkLoadBalancer.groovy @@ -20,37 +20,58 @@ import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup -import groovy.transform.Canonical +import groovy.transform.EqualsAndHashCode +import groovy.transform.ToString -@Canonical +@ToString(includeSuper=true) +@EqualsAndHashCode(callSuper=true) class GoogleNetworkLoadBalancer extends GoogleLoadBalancer { GoogleLoadBalancerType type = GoogleLoadBalancerType.NETWORK GoogleLoadBalancingScheme loadBalancingScheme = GoogleLoadBalancingScheme.EXTERNAL String targetPool + String sessionAffinity GoogleHealthCheck healthCheck @JsonIgnore GoogleLoadBalancerView getView() { - new View() + new View(this) } class View extends GoogleLoadBalancerView { - final String type = GoogleCloudProvider.ID - GoogleLoadBalancerType loadBalancerType = GoogleNetworkLoadBalancer.this.type - GoogleLoadBalancingScheme loadBalancingScheme = GoogleNetworkLoadBalancer.this.loadBalancingScheme - - String name = GoogleNetworkLoadBalancer.this.name - String account = GoogleNetworkLoadBalancer.this.account - String region = GoogleNetworkLoadBalancer.this.region - Long createdTime = GoogleNetworkLoadBalancer.this.createdTime - String ipAddress = GoogleNetworkLoadBalancer.this.ipAddress - String ipProtocol = GoogleNetworkLoadBalancer.this.ipProtocol - String portRange = GoogleNetworkLoadBalancer.this.portRange - - String targetPool = GoogleNetworkLoadBalancer.this.targetPool - GoogleHealthCheck.View healthCheck = GoogleNetworkLoadBalancer.this.healthCheck?.view - - Set serverGroups = new HashSet<>() + GoogleLoadBalancerType loadBalancerType + GoogleLoadBalancingScheme loadBalancingScheme + + String name + String account + String region + Long createdTime + String ipAddress + String ipProtocol + String portRange + + String targetPool + String sessionAffinity + + GoogleHealthCheck.View healthCheck + + Set serverGroups + + View(GoogleNetworkLoadBalancer googleNetworkLoadBalancer){ + loadBalancerType = googleNetworkLoadBalancer.type + loadBalancingScheme = googleNetworkLoadBalancer.loadBalancingScheme + name = googleNetworkLoadBalancer.name + account = googleNetworkLoadBalancer.account + region = googleNetworkLoadBalancer.region + createdTime = googleNetworkLoadBalancer.createdTime + ipAddress = googleNetworkLoadBalancer.ipAddress + ipProtocol = googleNetworkLoadBalancer.ipProtocol + portRange = googleNetworkLoadBalancer.portRange + targetPool = googleNetworkLoadBalancer.targetPool + sessionAffinity = googleNetworkLoadBalancer.sessionAffinity + healthCheck = googleNetworkLoadBalancer.healthCheck?.view + serverGroups = new HashSet<>() + } + } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSessionAffinity.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSessionAffinity.groovy index 19223fb9971..9dea30bfe2d 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSessionAffinity.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSessionAffinity.groovy @@ -22,4 +22,7 @@ enum GoogleSessionAffinity { CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, + HEADER_FIELD, + HTTP_COOKIE, + STRONG_COOKIE_AFFINITY } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSslLoadBalancer.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSslLoadBalancer.groovy index 8ec86a56e3d..90109e5d1b2 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSslLoadBalancer.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSslLoadBalancer.groovy @@ -18,9 +18,11 @@ package com.netflix.spinnaker.clouddriver.google.model.loadbalancing import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup -import groovy.transform.Canonical +import groovy.transform.EqualsAndHashCode +import groovy.transform.ToString -@Canonical +@ToString(includeSuper=true) +@EqualsAndHashCode(callSuper=true) class GoogleSslLoadBalancer extends GoogleLoadBalancer { GoogleLoadBalancerType type = GoogleLoadBalancerType.SSL GoogleLoadBalancingScheme loadBalancingScheme = GoogleLoadBalancingScheme.EXTERNAL @@ -30,24 +32,40 @@ class GoogleSslLoadBalancer extends GoogleLoadBalancer { @JsonIgnore GoogleLoadBalancerView getView() { - new View() + new View(this) } class View extends GoogleLoadBalancerView { - GoogleLoadBalancerType loadBalancerType = GoogleSslLoadBalancer.this.type - GoogleLoadBalancingScheme loadBalancingScheme = GoogleSslLoadBalancer.this.loadBalancingScheme + GoogleLoadBalancerType loadBalancerType + GoogleLoadBalancingScheme loadBalancingScheme - String name = GoogleSslLoadBalancer.this.name - String account = GoogleSslLoadBalancer.this.account - String region = GoogleSslLoadBalancer.this.region - Long createdTime = GoogleSslLoadBalancer.this.createdTime - String ipAddress = GoogleSslLoadBalancer.this.ipAddress - String ipProtocol = GoogleSslLoadBalancer.this.ipProtocol - String portRange = GoogleSslLoadBalancer.this.portRange + String name + String account + String region + Long createdTime + String ipAddress + String ipProtocol + String portRange - String certificate = GoogleSslLoadBalancer.this.certificate - GoogleBackendService backendService = GoogleSslLoadBalancer.this.backendService + String certificate + GoogleBackendService backendService + + Set serverGroups + + View(GoogleSslLoadBalancer googleSslLoadBalancer){ + loadBalancerType = googleSslLoadBalancer.type + loadBalancingScheme = googleSslLoadBalancer.loadBalancingScheme + name = googleSslLoadBalancer.name + account = googleSslLoadBalancer.account + region = googleSslLoadBalancer.region + createdTime = googleSslLoadBalancer.createdTime + ipAddress = googleSslLoadBalancer.ipAddress + ipProtocol = googleSslLoadBalancer.ipProtocol + portRange = googleSslLoadBalancer.portRange + certificate = googleSslLoadBalancer.certificate + backendService = googleSslLoadBalancer.backendService + serverGroups = new HashSet<>() + } - Set serverGroups = new HashSet<>() } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTargetProxyType.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTargetProxyType.groovy deleted file mode 100644 index 83510395574..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTargetProxyType.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.model.loadbalancing - -enum GoogleTargetProxyType { - HTTP, - HTTPS, - SSL, - TCP -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTcpLoadBalancer.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTcpLoadBalancer.groovy index 45db4c7f3e4..1e05db8b6cd 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTcpLoadBalancer.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTcpLoadBalancer.groovy @@ -18,9 +18,11 @@ package com.netflix.spinnaker.clouddriver.google.model.loadbalancing import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup -import groovy.transform.Canonical +import groovy.transform.EqualsAndHashCode +import groovy.transform.ToString -@Canonical +@ToString(includeSuper=true) +@EqualsAndHashCode(callSuper=true) class GoogleTcpLoadBalancer extends GoogleLoadBalancer { GoogleLoadBalancerType type = GoogleLoadBalancerType.TCP GoogleLoadBalancingScheme loadBalancingScheme = GoogleLoadBalancingScheme.EXTERNAL @@ -29,23 +31,38 @@ class GoogleTcpLoadBalancer extends GoogleLoadBalancer { @JsonIgnore GoogleLoadBalancerView getView() { - new View() + new View(this) } class View extends GoogleLoadBalancerView { - GoogleLoadBalancerType loadBalancerType = GoogleTcpLoadBalancer.this.type - GoogleLoadBalancingScheme loadBalancingScheme = GoogleTcpLoadBalancer.this.loadBalancingScheme + GoogleLoadBalancerType loadBalancerType + GoogleLoadBalancingScheme loadBalancingScheme - String name = GoogleTcpLoadBalancer.this.name - String account = GoogleTcpLoadBalancer.this.account - String region = GoogleTcpLoadBalancer.this.region - Long createdTime = GoogleTcpLoadBalancer.this.createdTime - String ipAddress = GoogleTcpLoadBalancer.this.ipAddress - String ipProtocol = GoogleTcpLoadBalancer.this.ipProtocol - String portRange = GoogleTcpLoadBalancer.this.portRange + String name + String account + String region + Long createdTime + String ipAddress + String ipProtocol + String portRange - GoogleBackendService backendService = GoogleTcpLoadBalancer.this.backendService + GoogleBackendService backendService + + Set serverGroups + + View(GoogleTcpLoadBalancer googleTcpLoadBalancer){ + loadBalancerType = googleTcpLoadBalancer.type + loadBalancingScheme = googleTcpLoadBalancer.loadBalancingScheme + name = googleTcpLoadBalancer.name + account = googleTcpLoadBalancer.account + region = googleTcpLoadBalancer.region + createdTime = googleTcpLoadBalancer.createdTime + ipAddress = googleTcpLoadBalancer.ipAddress + ipProtocol = googleTcpLoadBalancer.ipProtocol + portRange = googleTcpLoadBalancer.portRange + backendService = googleTcpLoadBalancer.backendService + serverGroups = new HashSet<>() + } - Set serverGroups = new HashSet<>() } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/GoogleInfrastructureProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/GoogleInfrastructureProvider.groovy index e0ea2f1dbfd..304f0a9b582 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/GoogleInfrastructureProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/GoogleInfrastructureProvider.groovy @@ -21,16 +21,17 @@ import com.netflix.spinnaker.cats.agent.AgentSchedulerAware import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.clouddriver.cache.SearchableProvider +import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.security.BaseProvider import groovy.json.JsonOutput import static com.netflix.spinnaker.clouddriver.cache.SearchableProvider.SearchableResource import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.* -class GoogleInfrastructureProvider extends AgentSchedulerAware implements SearchableProvider { +class GoogleInfrastructureProvider extends BaseProvider implements SearchableProvider { - final Collection agents final String providerName = GoogleInfrastructureProvider.name final Set defaultCaches = [ @@ -47,20 +48,27 @@ class GoogleInfrastructureProvider extends AgentSchedulerAware implements Search SSL_CERTIFICATES.ns, ].asImmutable() - GoogleInfrastructureProvider(Collection agents) { - this.agents = agents + @Override + String getProviderName() { + return providerName + } + + + GoogleInfrastructureProvider() { } final Map urlMappingTemplates = [ (SECURITY_GROUPS.ns): '/securityGroups/$account/$provider/$name?region=$region' ] - final Map searchResultHydrators = [ + final Map searchResultHydrators = [ (new GoogleSearchableResource(ADDRESSES.ns)): new AddressResultHydrator(), (new GoogleSearchableResource(BACKEND_SERVICES.ns)): new BackendServiceResultHydrator(), (new GoogleSearchableResource(HEALTH_CHECKS.ns)): new HealthCheckResultHydrator(), (new GoogleSearchableResource(HTTP_HEALTH_CHECKS.ns)): new HttpHealthCheckResultHydrator(), - (new GoogleSearchableResource(INSTANCES.ns)): new InstanceSearchResultHydrator() + (new GoogleSearchableResource(INSTANCES.ns)): new InstanceSearchResultHydrator(), + (new GoogleSearchableResource(SERVER_GROUPS.ns)): new ServerGroupSearchResultHydrator(), + (new GoogleSearchableResource(CLUSTERS.ns)): new ClustersSearchResultHydrator(), ] @Override @@ -68,22 +76,22 @@ class GoogleInfrastructureProvider extends AgentSchedulerAware implements Search return Keys.parse(key) } - private static class AddressResultHydrator implements SearchableProvider.SearchResultHydrator { + private static class AddressResultHydrator implements SearchResultHydrator { @Override Map hydrateResult(Cache cacheView, Map result, String id) { - CacheData addressCacheData = cacheView.get(ADDRESSES.ns, id) + CacheData addressCacheData = cacheView.get(ADDRESSES.ns, id) return result + [ address: JsonOutput.toJson(addressCacheData.attributes.address) ] } } - private static class BackendServiceResultHydrator implements SearchableProvider.SearchResultHydrator { + private static class BackendServiceResultHydrator implements SearchResultHydrator { @Override Map hydrateResult(Cache cacheView, Map result, String id) { - CacheData backendService = cacheView.get(BACKEND_SERVICES.ns, id) + CacheData backendService = cacheView.get(BACKEND_SERVICES.ns, id) return result + [ healthCheckLink: backendService.attributes.healthCheckLink as String, sessionAffinity: backendService.attributes.sessionAffinity as String, @@ -96,7 +104,7 @@ class GoogleInfrastructureProvider extends AgentSchedulerAware implements Search } } - private static class HealthCheckResultHydrator implements SearchableProvider.SearchResultHydrator { + private static class HealthCheckResultHydrator implements SearchResultHydrator { @Override Map hydrateResult(Cache cacheView, Map result, String id) { @@ -107,25 +115,58 @@ class GoogleInfrastructureProvider extends AgentSchedulerAware implements Search } } - private static class InstanceSearchResultHydrator implements SearchableProvider.SearchResultHydrator { + private static class InstanceSearchResultHydrator implements SearchResultHydrator { @Override Map hydrateResult(Cache cacheView, Map result, String id) { def item = cacheView.get(INSTANCES.ns, id) - if (!item?.relationships["serverGroups"]) { + if (!item?.relationships[CLUSTERS.ns]) { + return result + } + + def cluster = Keys.parse(item.relationships[CLUSTERS.ns].first()) + return result + [ + application: cluster.application as String, + cluster: cluster.cluster as String + ] + } + } + + private static class ServerGroupSearchResultHydrator implements SearchResultHydrator { + + @Override + Map hydrateResult(Cache cacheView, Map result, String id) { + def item = cacheView.get(SERVER_GROUPS.ns, id) + if (!item?.relationships[CLUSTERS.ns]) { + return result + } + + def cluster = Keys.parse(item.relationships[CLUSTERS.ns].first()) + return result + [ + application: cluster.application as String, + cluster: cluster.cluster as String + ] + } + } + + private static class ClustersSearchResultHydrator implements SearchResultHydrator { + + @Override + Map hydrateResult(Cache cacheView, Map result, String id) { + def item = cacheView.get(CLUSTERS.ns, id) + if (!item?.relationships[CLUSTERS.ns]) { return result } - def serverGroup = Keys.parse(item.relationships[SERVER_GROUPS.ns].first()) + def cluster = Keys.parse(item.relationships[CLUSTERS.ns].first()) return result + [ - application: serverGroup.application as String, - cluster: serverGroup.cluster as String, - serverGroup: serverGroup.serverGroup as String + application: cluster.application as String, + cluster: cluster.cluster as String ] } } - private static class HttpHealthCheckResultHydrator implements SearchableProvider.SearchResultHydrator { + private static class HttpHealthCheckResultHydrator implements SearchResultHydrator { @Override Map hydrateResult(Cache cacheView, Map result, String id) { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleCachingAgent.groovy index 95a5a0f7d62..6c7eb1bde33 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleCachingAgent.groovy @@ -18,17 +18,20 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.core.type.TypeReference import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest -import com.google.api.client.http.HttpRequest -import com.google.api.client.http.HttpRequestInitializer import com.google.api.services.compute.Compute import com.google.common.annotations.VisibleForTesting import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.AccountAware import com.netflix.spinnaker.cats.agent.CachingAgent +import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource +import com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer import com.netflix.spinnaker.clouddriver.google.provider.GoogleInfrastructureProvider +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import com.netflix.spinnaker.moniker.Namer abstract class AbstractGoogleCachingAgent implements CachingAgent, AccountAware, GoogleExecutorTraits { @@ -36,13 +39,17 @@ abstract class AbstractGoogleCachingAgent implements CachingAgent, AccountAware, final String providerName = GoogleInfrastructureProvider.name + final Namer naming + String clouddriverUserAgentApplicationName // "Spinnaker/${version}" HTTP header string GoogleNamedAccountCredentials credentials ObjectMapper objectMapper Registry registry @VisibleForTesting - AbstractGoogleCachingAgent() {} + AbstractGoogleCachingAgent() { + this.naming = new GoogleLabeledResourceNamer() + } AbstractGoogleCachingAgent(String clouddriverUserAgentApplicationName, GoogleNamedAccountCredentials credentials, @@ -52,6 +59,10 @@ abstract class AbstractGoogleCachingAgent implements CachingAgent, AccountAware, this.credentials = credentials this.objectMapper = objectMapper this.registry = registry + this.naming = NamerRegistry.lookup() + .withProvider(GoogleCloudProvider.ID) + .withAccount(credentials.name) + .withResource(GoogleLabeledResource) } String getProject() { @@ -70,22 +81,13 @@ abstract class AbstractGoogleCachingAgent implements CachingAgent, AccountAware, credentials?.name } - def executeIfRequestsAreQueued(BatchRequest batch, String instrumentationContext) { - if (batch.size()) { - timeExecuteBatch(batch, instrumentationContext) - } + GoogleBatchRequest buildGoogleBatchRequest() { + return new GoogleBatchRequest(compute, clouddriverUserAgentApplicationName) } - BatchRequest buildBatchRequest() { - return compute.batch( - new HttpRequestInitializer() { - @Override - void initialize(HttpRequest request) throws IOException { - request.headers.setUserAgent(clouddriverUserAgentApplicationName); - request.setConnectTimeout(2 * 60000) // 2 minutes connect timeout - request.setReadTimeout(2 * 60000) // 2 minutes read timeout - } - } - ) + def executeIfRequestsAreQueued(GoogleBatchRequest googleBatchRequest, String instrumentationContext) { + if (googleBatchRequest.size()) { + timeExecuteBatch(googleBatchRequest, instrumentationContext) + } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleLoadBalancerCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleLoadBalancerCachingAgent.groovy index d5bb1082d18..1d4bbc0af22 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleLoadBalancerCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleLoadBalancerCachingAgent.groovy @@ -26,6 +26,7 @@ import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder import com.netflix.spinnaker.clouddriver.google.cache.Keys @@ -69,12 +70,12 @@ abstract class AbstractGoogleLoadBalancerCachingAgent extends AbstractGoogleCach this.metricsSupport = new OnDemandMetricsSupport( registry, this, - "${GoogleCloudProvider.ID}:${OnDemandAgent.OnDemandType.LoadBalancer}") + "${GoogleCloudProvider.ID}:${OnDemandType.LoadBalancer}") } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.LoadBalancer && cloudProvider == GoogleCloudProvider.ID + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.LoadBalancer && cloudProvider == GoogleCloudProvider.ID } @Override @@ -141,7 +142,7 @@ abstract class AbstractGoogleLoadBalancerCachingAgent extends AbstractGoogleCach evictions[LOAD_BALANCERS.ns].addAll(identifiers) } - log.info("On demand cache refresh succeeded. Data: ${data}. Added ${loadBalancer ? 1 : 0} items to the cache.") + log.debug("On demand cache refresh succeeded. Data: ${data}. Added ${loadBalancer ? 1 : 0} items to the cache.") return new OnDemandAgent.OnDemandResult( sourceAgentType: getOnDemandAgentType(), @@ -152,7 +153,7 @@ abstract class AbstractGoogleLoadBalancerCachingAgent extends AbstractGoogleCach } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { def keyOwnedByThisAgent = { Map parsedKey -> parsedKey && parsedKey.account == accountName && parsedKey.region == region } @@ -214,7 +215,7 @@ abstract class AbstractGoogleLoadBalancerCachingAgent extends AbstractGoogleCach abstract List constructLoadBalancers(String onDemandLoadBalancerName = null) CacheResult buildCacheResult(CacheResultBuilder cacheResultBuilder, List googleLoadBalancers) { - log.info "Describing items in ${agentType}" + log.debug "Describing items in ${agentType}" googleLoadBalancers.each { GoogleLoadBalancer loadBalancer -> // TODO(duftler): Pull out getLoadBalancerKey() like getServerGroupKey()? @@ -239,10 +240,10 @@ abstract class AbstractGoogleLoadBalancerCachingAgent extends AbstractGoogleCach } } - log.info "Caching ${cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keepSize()} load balancers in ${agentType}" - log.info "Caching ${cacheResultBuilder.namespace(INSTANCES.ns).keepSize()} instance relationships in ${agentType}" - log.info "Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}" - log.info "Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}" + log.debug "Caching ${cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keepSize()} load balancers in ${agentType}" + log.debug "Caching ${cacheResultBuilder.namespace(INSTANCES.ns).keepSize()} instance relationships in ${agentType}" + log.debug "Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}" + log.debug "Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}" return cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleBackendServiceCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleBackendServiceCachingAgent.groovy index 8f4b706c468..1b90f87d6c0 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleBackendServiceCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleBackendServiceCachingAgent.groovy @@ -17,8 +17,9 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.compute.ComputeRequest import com.google.api.services.compute.model.BackendService -import com.google.api.services.compute.model.Region +import com.google.api.services.compute.model.BackendServiceList import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.AgentDataType import com.netflix.spinnaker.cats.agent.CacheResult @@ -27,6 +28,7 @@ import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancingPolicy +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import groovy.util.logging.Slf4j @@ -60,21 +62,45 @@ class GoogleBackendServiceCachingAgent extends AbstractGoogleCachingAgent { List loadBackendServices() { List ret = [] - def globalBackendServices = timeExecute( - compute.backendServices().list(project), - "compute.backendServices.list", - TAG_SCOPE, SCOPE_GLOBAL - ).items as List + + GoogleBackendServiceCachingAgent cachingAgent = this + List globalBackendServices = new PaginatedRequest(cachingAgent) { + @Override + protected ComputeRequest request (String pageToken) { + return compute.backendServices().list(project).setPageToken(pageToken) + } + + @Override + String getNextPageToken(BackendServiceList t) { + return t.getNextPageToken(); + } + } + .timeExecute( + { BackendServiceList list -> list.getItems() }, + "compute.backendServices.list", + TAG_SCOPE, SCOPE_GLOBAL + ) if (globalBackendServices) { ret.addAll(globalBackendServices.collect { toGoogleBackendService(it, GoogleBackendService.BackendServiceKind.globalBackendService) }) } credentials.regions.collect { it.name }.each { String region -> - def regionBackendServices = timeExecute( - compute.regionBackendServices().list(project, region), - "compute.regionBackendServices.list", - TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region - )?.items as List + List regionBackendServices = new PaginatedRequest(cachingAgent) { + @Override + protected ComputeRequest request (String pageToken) { + return compute.regionBackendServices().list(project, region).setPageToken(pageToken) + } + + @Override + String getNextPageToken(BackendServiceList t) { + return t.getNextPageToken(); + } + } + .timeExecute( + { BackendServiceList list -> list.getItems()}, + "compute.regionBackendServices.list", + TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region + ) if (regionBackendServices) { ret.addAll(regionBackendServices.collect { toGoogleBackendService(it, GoogleBackendService.BackendServiceKind.regionBackendService) }) } @@ -83,7 +109,7 @@ class GoogleBackendServiceCachingAgent extends AbstractGoogleCachingAgent { } private CacheResult buildCacheResult(ProviderCache _, List backendServiceList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -103,7 +129,7 @@ class GoogleBackendServiceCachingAgent extends AbstractGoogleCachingAgent { } } - log.info("Caching ${cacheResultBuilder.namespace(BACKEND_SERVICES.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(BACKEND_SERVICES.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleGlobalAddressCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleGlobalAddressCachingAgent.groovy index da165428e66..9c96525ad98 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleGlobalAddressCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleGlobalAddressCachingAgent.groovy @@ -17,13 +17,16 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.compute.ComputeRequest import com.google.api.services.compute.model.Address +import com.google.api.services.compute.model.AddressList import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.AgentDataType import com.netflix.spinnaker.cats.agent.CacheResult import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder import com.netflix.spinnaker.clouddriver.google.cache.Keys +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import groovy.util.logging.Slf4j @@ -56,14 +59,26 @@ class GoogleGlobalAddressCachingAgent extends AbstractGoogleCachingAgent { } List
loadAddresses() { - timeExecute(compute.globalAddresses().list(project), - "compute.globalAddresses.list", - TAG_SCOPE, - SCOPE_GLOBAL).items as List + List
globalAddresses = new PaginatedRequest(this) { + @Override + protected ComputeRequest request (String pageToken) { + return compute.globalAddresses().list(project).setPageToken(pageToken) + } + + @Override + String getNextPageToken(AddressList t) { + return t.getNextPageToken(); + } + } + .timeExecute( + { AddressList list -> list.getItems() }, + "compute.globalAddresses.list", TAG_SCOPE, SCOPE_GLOBAL + ) + return globalAddresses } private CacheResult buildCacheResult(ProviderCache _, List
addressList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -75,7 +90,7 @@ class GoogleGlobalAddressCachingAgent extends AbstractGoogleCachingAgent { } } - log.info("Caching ${cacheResultBuilder.namespace(ADDRESSES.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(ADDRESSES.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgent.groovy index 3dc60a7a072..ffa74afd785 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgent.groovy @@ -17,9 +17,8 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.services.compute.model.HealthCheck -import com.google.api.services.compute.model.HttpHealthCheck -import com.google.api.services.compute.model.HttpsHealthCheck +import com.google.api.services.compute.ComputeRequest +import com.google.api.services.compute.model.* import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.AgentDataType import com.netflix.spinnaker.cats.agent.CacheResult @@ -27,6 +26,7 @@ import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import groovy.util.logging.Slf4j @@ -70,15 +70,27 @@ class GoogleHealthCheckCachingAgent extends AbstractGoogleCachingAgent { */ List loadHealthChecks() { List ret = [] - def httpHealthChecks = timeExecute( - compute.httpHealthChecks().list(project), - "compute.httpHealthChecks.list", - TAG_SCOPE, SCOPE_GLOBAL - ).items as List + + List httpHealthChecks = new PaginatedRequest(this) { + @Override + protected ComputeRequest request (String pageToken) { + return compute.httpHealthChecks().list(project).setPageToken(pageToken) + } + + @Override + String getNextPageToken(HttpHealthCheckList t) { + return t.getNextPageToken(); + } + } + .timeExecute( + { HttpHealthCheckList list -> list.getItems() }, + "compute.httpHealthChecks.list", TAG_SCOPE, SCOPE_GLOBAL + ) httpHealthChecks.each { HttpHealthCheck hc -> ret << new GoogleHealthCheck( name: hc.getName(), selfLink: hc.getSelfLink(), + region: "global", healthCheckType: GoogleHealthCheck.HealthCheckType.HTTP, kind: GoogleHealthCheck.HealthCheckKind.httpHealthCheck, port: hc.getPort(), @@ -90,15 +102,26 @@ class GoogleHealthCheckCachingAgent extends AbstractGoogleCachingAgent { ) } - def httpsHealthChecks = timeExecute( - compute.httpsHealthChecks().list(project), - "compute.httpsHealthChecks.list", - TAG_SCOPE, SCOPE_GLOBAL - ).items as List + List httpsHealthChecks = new PaginatedRequest(this) { + @Override + protected ComputeRequest request (String pageToken) { + return compute.httpsHealthChecks().list(project).setPageToken(pageToken) + } + + @Override + String getNextPageToken(HttpsHealthCheckList t) { + return t.getNextPageToken(); + } + } + .timeExecute( + { HttpsHealthCheckList list -> list.getItems() }, + "compute.httpsHealthChecks.list", TAG_SCOPE, SCOPE_GLOBAL + ) httpsHealthChecks.each { HttpsHealthCheck hc -> ret << new GoogleHealthCheck( name: hc.getName(), selfLink: hc.getSelfLink(), + region: "global", healthCheckType: GoogleHealthCheck.HealthCheckType.HTTPS, kind: GoogleHealthCheck.HealthCheckKind.httpsHealthCheck, port: hc.getPort(), @@ -110,60 +133,123 @@ class GoogleHealthCheckCachingAgent extends AbstractGoogleCachingAgent { ) } - def healthChecks = timeExecute( - compute.healthChecks().list(project), - "compute.healthChecks.list", - TAG_SCOPE, SCOPE_GLOBAL - ).items as List - healthChecks.each { HealthCheck hc -> - def newHC = new GoogleHealthCheck( - name: hc.getName(), - selfLink: hc.getSelfLink(), - kind: GoogleHealthCheck.HealthCheckKind.healthCheck, - checkIntervalSec: hc.getCheckIntervalSec(), - timeoutSec: hc.getTimeoutSec(), - healthyThreshold: hc.getHealthyThreshold(), - unhealthyThreshold: hc.getUnhealthyThreshold() - ) + List healthChecks = new PaginatedRequest(this) { + @Override + protected ComputeRequest request (String pageToken) { + return compute.healthChecks().list(project).setPageToken(pageToken) + } - // Health checks of kind 'healthCheck' are all nested -- the actual health check is contained - // in a field inside a wrapper HealthCheck object. The wrapper object specifies the type of nested - // health check as a string, and the proper field is populated based on the type. - switch(hc.getType()) { - case 'HTTP': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.HTTP - newHC.port = hc.getHttpHealthCheck().getPort() - newHC.requestPath = hc.getHttpHealthCheck().getRequestPath() - break - case 'HTTPS': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.HTTPS - newHC.port = hc.getHttpsHealthCheck().getPort() - newHC.requestPath = hc.getHttpsHealthCheck().getRequestPath() - break - case 'TCP': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.TCP - newHC.port = hc.getTcpHealthCheck().getPort() - break - case 'SSL': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.SSL - newHC.port = hc.getSslHealthCheck().getPort() - break - case 'UDP': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.UDP - newHC.port = hc.getUdpHealthCheck().getPort() - break - default: - log.warn("Health check ${hc.getName()} has unknown type ${hc.getType()}.") - return - break + @Override + String getNextPageToken(HealthCheckList t) { + return t.getNextPageToken(); } - ret << newHC + } + .timeExecute( + { HealthCheckList list -> list.getItems() }, + "compute.healthChecks.list", TAG_SCOPE, SCOPE_GLOBAL + ) + ret.addAll(healthChecks.findResults { toGoogleHealthCheck(it, "global") }) + def cachingAgent = this + credentials.regions.collect { it.name }.each { String region -> + List regionHealthChecks = new PaginatedRequest(cachingAgent) { + @Override + protected ComputeRequest request (String pageToken) { + return compute.regionHealthChecks().list(project, region).setPageToken(pageToken) + } + + @Override + String getNextPageToken(HealthCheckList t) { + return t.getNextPageToken(); + } + } + .timeExecute( + { HealthCheckList list -> list.getItems() }, + "compute.regionHealthChecks.list", TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region + ) + ret.addAll(regionHealthChecks.findResults { toGoogleHealthCheck(it, region) }) } ret } + private static GoogleHealthCheck toGoogleHealthCheck(HealthCheck hc, String region) { + def newHC = new GoogleHealthCheck( + name: hc.getName(), + selfLink: hc.getSelfLink(), + region: region, + kind: GoogleHealthCheck.HealthCheckKind.healthCheck, + checkIntervalSec: hc.getCheckIntervalSec(), + timeoutSec: hc.getTimeoutSec(), + healthyThreshold: hc.getHealthyThreshold(), + unhealthyThreshold: hc.getUnhealthyThreshold() + ) + + // Health checks of kind 'healthCheck' are all nested -- the actual health check is contained + // in a field inside a wrapper HealthCheck object. The wrapper object specifies the type of nested + // health check as a string, and the proper field is populated based on the type. + Integer port + switch(hc.getType()) { + case 'HTTP': + port = hc.getHttpHealthCheck().getPort() + if (port == null) { + log.warn("HTTP health check ${hc.getName()} has a null port, ignoring.") + return null + } + + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.HTTP + newHC.port = port + newHC.requestPath = hc.getHttpHealthCheck().getRequestPath() + break + case 'HTTPS': + port = hc.getHttpsHealthCheck().getPort() + if (port == null) { + log.warn("HTTPS health check ${hc.getName()} has a null port, ignoring.") + return null + } + + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.HTTPS + newHC.port = port + newHC.requestPath = hc.getHttpsHealthCheck().getRequestPath() + break + case 'TCP': + port = hc.getTcpHealthCheck().getPort() + if (port == null) { + log.warn("TCP health check ${hc.getName()} has a null port, ignoring.") + return null + } + + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.TCP + newHC.port = port + break + case 'SSL': + port = hc.getSslHealthCheck().getPort() + if (port == null) { + log.warn("SSL health check ${hc.getName()} has a null port, ignoring.") + return null + } + + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.SSL + newHC.port = port + break + case 'UDP': + port = hc.getUdpHealthCheck().getPort() + if (port == null) { + log.warn("UDP health check ${hc.getName()} has a null port, ignoring.") + return null + } + + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.UDP + newHC.port = port + break + default: + log.warn("Health check ${hc.getName()} has unknown type ${hc.getType()}.") + return null + break + } + return newHC + } + private CacheResult buildCacheResult(ProviderCache _, List healthCheckList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -175,7 +261,7 @@ class GoogleHealthCheckCachingAgent extends AbstractGoogleCachingAgent { } } - log.info("Caching ${cacheResultBuilder.namespace(HEALTH_CHECKS.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(HEALTH_CHECKS.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHttpHealthCheckCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHttpHealthCheckCachingAgent.groovy index 085f2d14cdf..5c6423e37fe 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHttpHealthCheckCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHttpHealthCheckCachingAgent.groovy @@ -62,7 +62,7 @@ class GoogleHttpHealthCheckCachingAgent extends AbstractGoogleCachingAgent { } private CacheResult buildCacheResult(ProviderCache _, List httpHealthCheckList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -74,7 +74,7 @@ class GoogleHttpHealthCheckCachingAgent extends AbstractGoogleCachingAgent { } } - log.info("Caching ${cacheResultBuilder.namespace(HTTP_HEALTH_CHECKS.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(HTTP_HEALTH_CHECKS.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHttpLoadBalancerCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHttpLoadBalancerCachingAgent.groovy index 1e692de8648..2c2bf523731 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHttpLoadBalancerCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHttpLoadBalancerCachingAgent.groovy @@ -17,10 +17,10 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback import com.google.api.client.googleapis.json.GoogleJsonError import com.google.api.client.http.HttpHeaders +import com.google.api.services.compute.ComputeRequest import com.google.api.services.compute.model.* import com.netflix.spectator.api.Registry import com.netflix.spinnaker.clouddriver.google.cache.Keys @@ -31,8 +31,11 @@ import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerH import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.* import com.netflix.spinnaker.clouddriver.google.provider.agent.util.GroupHealthRequest import com.netflix.spinnaker.clouddriver.google.provider.agent.util.LoadBalancerHealthResolution +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import groovy.util.logging.Slf4j +import org.slf4j.LoggerFactory @Slf4j class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachingAgent { @@ -45,7 +48,7 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi */ Map> bsNameToGroupHealthsMap = [:] Set queuedBsGroupHealthRequests = new HashSet<>() - List resolutions = [] + Set resolutions = new HashSet<>() GoogleHttpLoadBalancerCachingAgent(String clouddriverUserAgentApplicationName, @@ -64,15 +67,15 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi List loadBalancers = [] List failedLoadBalancers = [] - BatchRequest forwardingRulesRequest = buildBatchRequest() - BatchRequest targetProxyRequest = buildBatchRequest() - BatchRequest urlMapRequest = buildBatchRequest() - BatchRequest groupHealthRequest = buildBatchRequest() + GoogleBatchRequest forwardingRulesRequest = buildGoogleBatchRequest() + GoogleBatchRequest targetProxyRequest = buildGoogleBatchRequest() + GoogleBatchRequest urlMapRequest = buildGoogleBatchRequest() + GoogleBatchRequest groupHealthRequest = buildGoogleBatchRequest() // Reset the local getHealth caches/queues each caching agent cycle. bsNameToGroupHealthsMap = [:] queuedBsGroupHealthRequests = new HashSet<>() - resolutions = [] + resolutions = new HashSet<>() List projectBackendServices = GCEUtil.fetchBackendServices(this, compute, project) List projectHttpHealthChecks = GCEUtil.fetchHttpHealthChecks(this, compute, project) @@ -93,10 +96,20 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi if (onDemandLoadBalancerName) { ForwardingRuleCallbacks.ForwardingRuleSingletonCallback frCallback = forwardingRuleCallbacks.newForwardingRuleSingletonCallback() - compute.globalForwardingRules().get(project, onDemandLoadBalancerName).queue(forwardingRulesRequest, frCallback) + forwardingRulesRequest.queue(compute.globalForwardingRules().get(project, onDemandLoadBalancerName), frCallback) } else { ForwardingRuleCallbacks.ForwardingRuleListCallback frlCallback = forwardingRuleCallbacks.newForwardingRuleListCallback() - compute.globalForwardingRules().list(project).queue(forwardingRulesRequest, frlCallback) + new PaginatedRequest(this) { + @Override + ComputeRequest request(String pageToken) { + return compute.globalForwardingRules().list(project).setPageToken(pageToken) + } + + @Override + String getNextPageToken(ForwardingRuleList forwardingRuleList) { + return forwardingRuleList.getNextPageToken() + } + }.queue(forwardingRulesRequest, frlCallback, "HttpLoadBalancerCaching.forwardingRules") } executeIfRequestsAreQueued(forwardingRulesRequest, "HttpLoadBalancerCaching.forwardingRules") @@ -129,11 +142,11 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi List loadBalancers List failedLoadBalancers = [] - BatchRequest targetProxyRequest + GoogleBatchRequest targetProxyRequest // Pass through objects - BatchRequest urlMapRequest - BatchRequest groupHealthRequest + GoogleBatchRequest urlMapRequest + GoogleBatchRequest groupHealthRequest List projectBackendServices List projectHttpHealthChecks List projectHttpsHealthChecks @@ -179,6 +192,11 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi } } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + LoggerFactory.getLogger(this.class).error e.getMessage() + } } void cacheRemainderOfLoadBalancerResourceGraph(ForwardingRule forwardingRule) { @@ -221,13 +239,13 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi switch (Utils.getTargetProxyType(forwardingRule.target)) { case GoogleTargetProxyType.HTTP: - compute.targetHttpProxies().get(project, targetProxyName).queue(targetProxyRequest, targetProxyCallback) + targetProxyRequest.queue(compute.targetHttpProxies().get(project, targetProxyName), targetProxyCallback) break case GoogleTargetProxyType.HTTPS: - compute.targetHttpsProxies().get(project, targetProxyName).queue(targetProxyRequest, targetHttpsProxyCallback) + targetProxyRequest.queue(compute.targetHttpsProxies().get(project, targetProxyName), targetHttpsProxyCallback) break default: - log.info("Non-Http target type found for global forwarding rule ${forwardingRule.name}") + log.debug("Non-Http target type found for global forwarding rule ${forwardingRule.name}") break } } @@ -236,10 +254,10 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi // Note: The TargetProxyCallbacks assume that each proxy points to a unique urlMap. class TargetHttpsProxyCallback extends JsonBatchCallback implements FailedSubjectChronicler { GoogleHttpLoadBalancer googleLoadBalancer - BatchRequest urlMapRequest + GoogleBatchRequest urlMapRequest // Pass through objects - BatchRequest groupHealthRequest + GoogleBatchRequest groupHealthRequest List projectBackendServices List projectHttpHealthChecks List projectHttpsHealthChecks @@ -263,7 +281,15 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi projectHttpsHealthChecks: projectHttpsHealthChecks, projectHealthChecks: projectHealthChecks ) - compute.urlMaps().get(project, urlMapName).queue(urlMapRequest, urlMapCallback) + urlMapRequest.queue(compute.urlMaps().get(project, urlMapName), urlMapCallback) + } + } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn("Failed to read a component of subject ${subject}. The platform error message was:\n ${e.getMessage()}. \nReporting it as 'Failed' to the caching agent. ") + if (failedSubjects != null) { + failedSubjects << subject } } } @@ -271,10 +297,10 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi // Note: The TargetProxyCallbacks assume that each proxy points to a unique urlMap. class TargetProxyCallback extends JsonBatchCallback implements FailedSubjectChronicler { GoogleHttpLoadBalancer googleLoadBalancer - BatchRequest urlMapRequest + GoogleBatchRequest urlMapRequest // Pass through objects - BatchRequest groupHealthRequest + GoogleBatchRequest groupHealthRequest List projectBackendServices List projectHttpHealthChecks List projectHttpsHealthChecks @@ -295,7 +321,15 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi projectHttpsHealthChecks: projectHttpsHealthChecks, projectHealthChecks: projectHealthChecks ) - compute.urlMaps().get(project, urlMapName).queue(urlMapRequest, urlMapCallback) + urlMapRequest.queue(compute.urlMaps().get(project, urlMapName), urlMapCallback) + } + } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn("Failed to read a component of subject ${subject}. The platform error message was:\n ${e.getMessage()}. \nReporting it as 'Failed' to the caching agent. ") + if (failedSubjects != null) { + failedSubjects << subject } } } @@ -306,7 +340,7 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi List projectHttpHealthChecks List projectHttpsHealthChecks List projectHealthChecks - BatchRequest groupHealthRequest + GoogleBatchRequest groupHealthRequest @Override void onSuccess(UrlMap urlMap, HttpHeaders responseHeaders) throws IOException { @@ -364,6 +398,14 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi handleBackendService(service, googleLoadBalancer, projectHttpHealthChecks, projectHttpsHealthChecks, projectHealthChecks, groupHealthRequest) } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn("Failed to read a component of subject ${subject}. The platform error message was:\n ${e.getMessage()}. \nReporting it as 'Failed' to the caching agent. ") + if (failedSubjects != null) { + failedSubjects << subject + } + } } private void handleBackendService(BackendService backendService, @@ -371,7 +413,7 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi List httpHealthChecks, List httpsHealthChecks, List healthChecks, - BatchRequest groupHealthRequest) { + GoogleBatchRequest groupHealthRequest) { if (!backendService) { return } @@ -409,9 +451,9 @@ class GoogleHttpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachi // The groupHealthCallback updates the local cache. log.debug("Queueing a batch call for getHealth(): {}", ghr) queuedBsGroupHealthRequests.add(ghr) - compute.backendServices() - .getHealth(project, backendService.name, resourceGroup) - .queue(groupHealthRequest, groupHealthCallback) + groupHealthRequest + .queue(compute.backendServices().getHealth(project, backendService.name as String, resourceGroup), + groupHealthCallback) } else { log.debug("Passing, batch call result cached for getHealth(): {}", ghr) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleImageCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleImageCachingAgent.groovy index 3b42c0de65c..397da1b3681 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleImageCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleImageCachingAgent.groovy @@ -17,13 +17,13 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback +import com.google.api.client.googleapis.json.GoogleJsonError import com.google.api.client.http.HttpHeaders import com.google.api.services.compute.Compute import com.google.api.services.compute.model.Image import com.google.api.services.compute.model.ImageList -import com.netflix.servo.util.VisibleForTesting +import com.google.common.annotations.VisibleForTesting import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.AgentDataType import com.netflix.spinnaker.cats.agent.CacheResult @@ -31,8 +31,10 @@ import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import groovy.util.logging.Slf4j +import org.slf4j.LoggerFactory import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.IMAGES @@ -83,7 +85,7 @@ class GoogleImageCachingAgent extends AbstractGoogleCachingAgent { allImageProjects.each { imageProjectToNextPageTokenMap[it] = null } while (imageProjectToNextPageTokenMap) { - BatchRequest imageListBatch = buildBatchRequest() + GoogleBatchRequest imageListBatch = buildGoogleBatchRequest() AllImagesCallback imageListCallback = new AllImagesCallback(imageProjectToNextPageTokenMap: imageProjectToNextPageTokenMap, imageList: imageList) @@ -94,7 +96,7 @@ class GoogleImageCachingAgent extends AbstractGoogleCachingAgent { imagesList = imagesList.setPageToken(pageToken) } - imagesList.queue(imageListBatch, imageListCallback) + imageListBatch.queue(imagesList, imageListCallback) } executeIfRequestsAreQueued(imageListBatch, "ImageCaching.image") @@ -104,7 +106,7 @@ class GoogleImageCachingAgent extends AbstractGoogleCachingAgent { } private CacheResult buildCacheResult(ProviderCache _, List imageList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -116,7 +118,7 @@ class GoogleImageCachingAgent extends AbstractGoogleCachingAgent { } } - log.info("Caching ${cacheResultBuilder.namespace(IMAGES.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(IMAGES.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } @@ -147,5 +149,10 @@ class GoogleImageCachingAgent extends AbstractGoogleCachingAgent { imageProjectToNextPageTokenMap.remove(imageProject) } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + LoggerFactory.getLogger(this.class).error e.getMessage() + } } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInstanceCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInstanceCachingAgent.groovy index c59915146d3..7789ae77cc8 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInstanceCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInstanceCachingAgent.groovy @@ -16,6 +16,8 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.AgentDataType import com.netflix.spinnaker.cats.agent.CacheResult import com.netflix.spinnaker.cats.provider.ProviderCache @@ -23,11 +25,13 @@ import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.model.GoogleInstance +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import groovy.transform.InheritConstructors import groovy.util.logging.Slf4j import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.CLUSTERS import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.INSTANCES import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.SERVER_GROUPS @@ -37,10 +41,21 @@ class GoogleInstanceCachingAgent extends AbstractGoogleCachingAgent { final Set providedDataTypes = [ AUTHORITATIVE.forType(INSTANCES.ns), INFORMATIVE.forType(SERVER_GROUPS.ns), + INFORMATIVE.forType(CLUSTERS.ns) ] String agentType = "${accountName}/global/${GoogleInstanceCachingAgent.simpleName}" + GoogleInstanceCachingAgent(String clouddriverUserAgentApplicationName, + GoogleNamedAccountCredentials credentials, + ObjectMapper objectMapper, + Registry registry){ + super(clouddriverUserAgentApplicationName, + credentials, + objectMapper, + registry) + } + @Override CacheResult loadData(ProviderCache providerCache) { List instances = GCEUtil.fetchInstances(this, credentials) @@ -51,13 +66,16 @@ class GoogleInstanceCachingAgent extends AbstractGoogleCachingAgent { CacheResultBuilder cacheResultBuilder = new CacheResultBuilder() googleInstances.each { GoogleInstance instance -> + def moniker = instance.view.moniker + def clusterKey = Keys.getClusterKey(accountName, moniker.app, moniker.cluster) def instanceKey = Keys.getInstanceKey(accountName, instance.region, instance.name) cacheResultBuilder.namespace(INSTANCES.ns).keep(instanceKey).with { attributes = objectMapper.convertValue(instance, ATTRIBUTES) + relationships[CLUSTERS.ns].add(clusterKey) } } - log.info("Caching ${cacheResultBuilder.namespace(INSTANCES.ns).keepSize()} instances in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(INSTANCES.ns).keepSize()} instances in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalLoadBalancerCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalLoadBalancerCachingAgent.groovy index 2a490caf74d..8f93d64765c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalLoadBalancerCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalLoadBalancerCachingAgent.groovy @@ -17,10 +17,10 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback import com.google.api.client.googleapis.json.GoogleJsonError import com.google.api.client.http.HttpHeaders +import com.google.api.services.compute.ComputeRequest import com.google.api.services.compute.model.* import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.provider.ProviderCache @@ -30,8 +30,11 @@ import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.* import com.netflix.spinnaker.clouddriver.google.provider.agent.util.GroupHealthRequest import com.netflix.spinnaker.clouddriver.google.provider.agent.util.LoadBalancerHealthResolution +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import groovy.util.logging.Slf4j +import org.slf4j.LoggerFactory @Slf4j class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachingAgent { @@ -44,7 +47,7 @@ class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerC */ Map bsNameToGroupHealthsMap = [:] Set queuedBsGroupHealthRequests = new HashSet<>() - List resolutions = [] + Set resolutions = new HashSet<>() GoogleInternalLoadBalancerCachingAgent(String clouddriverUserAgentApplicationName, GoogleNamedAccountCredentials credentials, @@ -59,7 +62,7 @@ class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerC } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { // Just let GoogleNetworkLoadBalancerCachingAgent return the pending regional on demand requests. [] } @@ -69,13 +72,13 @@ class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerC List loadBalancers = [] List failedLoadBalancers = [] - BatchRequest forwardingRulesRequest = buildBatchRequest() - BatchRequest groupHealthRequest = buildBatchRequest() + GoogleBatchRequest forwardingRulesRequest = buildGoogleBatchRequest() + GoogleBatchRequest groupHealthRequest = buildGoogleBatchRequest() // Reset the local getHealth caches/queues each caching agent cycle. bsNameToGroupHealthsMap = [:] queuedBsGroupHealthRequests = new HashSet<>() - resolutions = [] + resolutions = new HashSet<>() List projectRegionBackendServices = GCEUtil.fetchRegionBackendServices(this, compute, project, region) List projectHttpHealthChecks = GCEUtil.fetchHttpHealthChecks(this, compute, project) @@ -94,10 +97,20 @@ class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerC if (onDemandLoadBalancerName) { ForwardingRuleCallbacks.ForwardingRuleSingletonCallback frCallback = forwardingRuleCallbacks.newForwardingRuleSingletonCallback() - compute.forwardingRules().get(project, region, onDemandLoadBalancerName).queue(forwardingRulesRequest, frCallback) + forwardingRulesRequest.queue(compute.forwardingRules().get(project, region, onDemandLoadBalancerName), frCallback) } else { ForwardingRuleCallbacks.ForwardingRuleListCallback frlCallback = forwardingRuleCallbacks.newForwardingRuleListCallback() - compute.forwardingRules().list(project, region).queue(forwardingRulesRequest, frlCallback) + new PaginatedRequest(this) { + @Override + ComputeRequest request(String pageToken) { + return compute.forwardingRules().list(project, region).setPageToken(pageToken) + } + + @Override + String getNextPageToken(ForwardingRuleList forwardingRuleList) { + return forwardingRuleList.getNextPageToken() + } + }.queue(forwardingRulesRequest, frlCallback, "InternalLoadBalancerCaching.forwardingRules") } executeIfRequestsAreQueued(forwardingRulesRequest, "InternalLoadBalancerCaching.forwardingRules") @@ -117,7 +130,7 @@ class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerC List failedLoadBalancers = [] // Pass through objects - BatchRequest groupHealthRequest + GoogleBatchRequest groupHealthRequest List projectRegionBackendServices List projectHttpHealthChecks List projectHttpsHealthChecks @@ -162,6 +175,12 @@ class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerC } } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + LoggerFactory.getLogger(this.class).error e.getMessage() + } + } void cacheRemainderOfLoadBalancerResourceGraph(ForwardingRule forwardingRule) { @@ -197,7 +216,7 @@ class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerC List httpHealthChecks, List httpsHealthChecks, List healthChecks, - BatchRequest groupHealthRequest) { + GoogleBatchRequest groupHealthRequest) { def groupHealthCallback = new GroupHealthCallback(backendServiceName: backendService.name) GoogleBackendService newService = new GoogleBackendService( @@ -224,9 +243,9 @@ class GoogleInternalLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerC // The groupHealthCallback updates the local cache. log.debug("Queueing a batch call for getHealth(): {}", ghr) queuedBsGroupHealthRequests.add(ghr) - compute.regionBackendServices() - .getHealth(project, region, backendService.name, resourceGroup) - .queue(groupHealthRequest, groupHealthCallback) + groupHealthRequest + .queue(compute.regionBackendServices().getHealth(project, region, backendService.name as String, resourceGroup), + groupHealthCallback) } else { log.debug("Passing, batch call result cached for getHealth(): {}", ghr) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleNetworkCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleNetworkCachingAgent.groovy index ebe3263367d..4341b245aef 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleNetworkCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleNetworkCachingAgent.groovy @@ -16,13 +16,16 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent +import com.fasterxml.jackson.databind.ObjectMapper import com.google.api.services.compute.model.Network +import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.AgentDataType import com.netflix.spinnaker.cats.agent.CacheResult import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import groovy.transform.InheritConstructors import groovy.util.logging.Slf4j @@ -39,6 +42,17 @@ class GoogleNetworkCachingAgent extends AbstractGoogleCachingAgent { String agentType = "${accountName}/global/${GoogleNetworkCachingAgent.simpleName}" + GoogleNetworkCachingAgent(String clouddriverUserAgentApplicationName, + GoogleNamedAccountCredentials credentials, + ObjectMapper objectMapper, + Registry registry){ + super(clouddriverUserAgentApplicationName, + credentials, + objectMapper, + registry) + } + + @Override CacheResult loadData(ProviderCache providerCache) { List networkList = loadNetworks() @@ -62,7 +76,7 @@ class GoogleNetworkCachingAgent extends AbstractGoogleCachingAgent { } private CacheResult buildCacheResult(ProviderCache _, List networkList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -74,7 +88,7 @@ class GoogleNetworkCachingAgent extends AbstractGoogleCachingAgent { } } - log.info("Caching ${cacheResultBuilder.namespace(NETWORKS.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(NETWORKS.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleNetworkLoadBalancerCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleNetworkLoadBalancerCachingAgent.groovy index a73275d50d7..0b0bb2ea26b 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleNetworkLoadBalancerCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleNetworkLoadBalancerCachingAgent.groovy @@ -17,10 +17,10 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback import com.google.api.client.googleapis.json.GoogleJsonError import com.google.api.client.http.HttpHeaders +import com.google.api.services.compute.ComputeRequest import com.google.api.services.compute.model.ForwardingRule import com.google.api.services.compute.model.ForwardingRuleList import com.google.api.services.compute.model.InstanceReference @@ -31,9 +31,12 @@ import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleNetworkLoadBalancer import com.netflix.spinnaker.clouddriver.google.provider.agent.util.LoadBalancerHealthResolution +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest import com.netflix.spinnaker.clouddriver.google.provider.agent.util.TargetPoolHealthRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import groovy.util.logging.Slf4j +import org.slf4j.LoggerFactory @Slf4j class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachingAgent { @@ -46,7 +49,7 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa */ Map tpNameToInstanceHealthsMap = [:] Set queuedTpHealthRequests = new HashSet<>() - List resolutions = [] + Set resolutions = new HashSet<>() GoogleNetworkLoadBalancerCachingAgent(String clouddriverUserAgentApplicationName, GoogleNamedAccountCredentials credentials, @@ -68,12 +71,12 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa // Reset the local getHealth caches/queues each caching agent cycle. tpNameToInstanceHealthsMap = [:] queuedTpHealthRequests = new HashSet<>() - resolutions = [] + resolutions = new HashSet<>() - BatchRequest forwardingRulesRequest = buildBatchRequest() - BatchRequest targetPoolsRequest = buildBatchRequest() - BatchRequest httpHealthChecksRequest = buildBatchRequest() - BatchRequest instanceHealthRequest = buildBatchRequest() + GoogleBatchRequest forwardingRulesRequest = buildGoogleBatchRequest() + GoogleBatchRequest targetPoolsRequest = buildGoogleBatchRequest() + GoogleBatchRequest httpHealthChecksRequest = buildGoogleBatchRequest() + GoogleBatchRequest instanceHealthRequest = buildGoogleBatchRequest() ForwardingRuleCallbacks forwardingRuleCallbacks = new ForwardingRuleCallbacks( loadBalancers: loadBalancers, @@ -85,10 +88,20 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa if (onDemandLoadBalancerName) { ForwardingRuleCallbacks.ForwardingRuleSingletonCallback frCallback = forwardingRuleCallbacks.newForwardingRuleSingletonCallback() - compute.forwardingRules().get(project, region, onDemandLoadBalancerName).queue(forwardingRulesRequest, frCallback) + forwardingRulesRequest.queue(compute.forwardingRules().get(project, region, onDemandLoadBalancerName), frCallback) } else { ForwardingRuleCallbacks.ForwardingRuleListCallback frlCallback = forwardingRuleCallbacks.newForwardingRuleListCallback() - compute.forwardingRules().list(project, region).queue(forwardingRulesRequest, frlCallback) + new PaginatedRequest(this) { + @Override + protected ComputeRequest request(String pageToken) { + return compute.forwardingRules().list(project, region).setPageToken(pageToken) + } + + @Override + protected String getNextPageToken(ForwardingRuleList forwardingRuleList) { + return forwardingRuleList.getNextPageToken() + } + }.queue(forwardingRulesRequest, frlCallback, "NetworkLoadBalancerCaching.forwardingRules") } executeIfRequestsAreQueued(forwardingRulesRequest, "NetworkLoadBalancerCaching.forwardingRules") @@ -109,11 +122,11 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa List loadBalancers List failedLoadBalancers = [] - BatchRequest targetPoolsRequest + GoogleBatchRequest targetPoolsRequest // Pass through objects - BatchRequest httpHealthChecksRequest - BatchRequest instanceHealthRequest + GoogleBatchRequest httpHealthChecksRequest + GoogleBatchRequest instanceHealthRequest ForwardingRuleSingletonCallback newForwardingRuleSingletonCallback() { return new ForwardingRuleSingletonCallback() @@ -154,6 +167,11 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa } } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + LoggerFactory.getLogger(this.class).error e.getMessage() + } } void cacheRemainderOfLoadBalancerResourceGraph(ForwardingRule forwardingRule) { @@ -181,7 +199,7 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa failedSubjects: failedLoadBalancers ) - compute.targetPools().get(project, region, targetPoolName).queue(targetPoolsRequest, targetPoolsCallback) + targetPoolsRequest.queue(compute.targetPools().get(project, region, targetPoolName), targetPoolsCallback) } } } @@ -190,12 +208,13 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa GoogleNetworkLoadBalancer googleLoadBalancer - BatchRequest httpHealthChecksRequest - BatchRequest instanceHealthRequest + GoogleBatchRequest httpHealthChecksRequest + GoogleBatchRequest instanceHealthRequest @Override void onSuccess(TargetPool targetPool, HttpHeaders responseHeaders) throws IOException { googleLoadBalancer.targetPool = targetPool?.selfLink + googleLoadBalancer.sessionAffinity = targetPool?.sessionAffinity boolean hasHealthChecks = targetPool?.healthChecks targetPool?.healthChecks?.each { def healthCheckUrl -> def localHealthCheckName = Utils.getLocalName(healthCheckUrl) @@ -207,7 +226,7 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa failedSubjects: failedSubjects ) - compute.httpHealthChecks().get(project, localHealthCheckName).queue(httpHealthChecksRequest, httpHealthCheckCallback) + httpHealthChecksRequest.queue(compute.httpHealthChecks().get(project, localHealthCheckName), httpHealthCheckCallback) } if (!hasHealthChecks) { new TargetPoolInstanceHealthCallInvoker(googleLoadBalancer: googleLoadBalancer, @@ -215,6 +234,14 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa instanceHealthRequest: instanceHealthRequest).doCall() } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn("Failed to read a component of subject ${subject}. The platform error message was:\n ${e.getMessage()}. \nReporting it as 'Failed' to the caching agent. ") + if (failedSubjects != null) { + failedSubjects << subject + } + } } class HttpHealthCheckCallback extends JsonBatchCallback implements FailedSubjectChronicler { @@ -222,7 +249,7 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa GoogleNetworkLoadBalancer googleLoadBalancer def targetPool - BatchRequest instanceHealthRequest + GoogleBatchRequest instanceHealthRequest @Override void onSuccess(HttpHealthCheck httpHealthCheck, HttpHeaders responseHeaders) throws IOException { @@ -242,6 +269,14 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa targetPool: targetPool, instanceHealthRequest: instanceHealthRequest).doCall() } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn("Failed to read a component of subject ${subject}. The platform error message was:\n ${e.getMessage()}. \nReporting it as 'Failed' to the caching agent. ") + if (failedSubjects != null) { + failedSubjects << subject + } + } } class TargetPoolInstanceHealthCallInvoker { @@ -249,7 +284,7 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa GoogleNetworkLoadBalancer googleLoadBalancer def targetPool - BatchRequest instanceHealthRequest + GoogleBatchRequest instanceHealthRequest def doCall() { def region = Utils.getLocalName(targetPool.region as String) @@ -265,9 +300,9 @@ class GoogleNetworkLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCa // The groupHealthCallback updates the local cache along with running handleHealthObject. log.debug("Queueing a batch call for getHealth(): {}", tphr) queuedTpHealthRequests.add(tphr) - compute.targetPools() - .getHealth(project, region, targetPoolName, instanceReference) - .queue(instanceHealthRequest, instanceHealthCallback) + instanceHealthRequest + .queue(compute.targetPools().getHealth(project, region, targetPoolName, instanceReference), + instanceHealthCallback) } else { log.debug("Passing, batch call result cached for getHealth(): {}", tphr) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalAddressCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalAddressCachingAgent.groovy index 22612313c3c..5065b2b348c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalAddressCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalAddressCachingAgent.groovy @@ -67,7 +67,7 @@ class GoogleRegionalAddressCachingAgent extends AbstractGoogleCachingAgent { } private CacheResult buildCacheResult(ProviderCache _, List
addressList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -79,7 +79,7 @@ class GoogleRegionalAddressCachingAgent extends AbstractGoogleCachingAgent { } } - log.info("Caching ${cacheResultBuilder.namespace(ADDRESSES.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(ADDRESSES.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgent.groovy deleted file mode 100644 index 21bd19cf19f..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgent.groovy +++ /dev/null @@ -1,524 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.provider.agent - -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest -import com.google.api.client.googleapis.batch.json.JsonBatchCallback -import com.google.api.client.googleapis.json.GoogleJsonError -import com.google.api.client.http.HttpHeaders -import com.google.api.services.compute.model.* -import com.netflix.frigga.Names -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider -import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits -import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.google.cache.Keys -import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil -import com.netflix.spinnaker.clouddriver.google.model.GoogleDistributionPolicy -import com.netflix.spinnaker.clouddriver.google.model.GoogleInstance -import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup -import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils -import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import groovy.transform.Canonical -import groovy.util.logging.Slf4j - -import java.util.concurrent.TimeUnit - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE -import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.* - -@Slf4j -class GoogleRegionalServerGroupCachingAgent extends AbstractGoogleCachingAgent implements OnDemandAgent, GoogleExecutorTraits { - final String region - final long maxMIGPageSize - - final Set providedDataTypes = [ - AUTHORITATIVE.forType(SERVER_GROUPS.ns), - AUTHORITATIVE.forType(APPLICATIONS.ns), - INFORMATIVE.forType(CLUSTERS.ns), - INFORMATIVE.forType(LOAD_BALANCERS.ns), - ] as Set - - String agentType = "${accountName}/${region}/${GoogleRegionalServerGroupCachingAgent.simpleName}" - String onDemandAgentType = "${agentType}-OnDemand" - final OnDemandMetricsSupport metricsSupport - - GoogleRegionalServerGroupCachingAgent(String clouddriverUserAgentApplicationName, - GoogleNamedAccountCredentials credentials, - ObjectMapper objectMapper, - Registry registry, - String region, - long maxMIGPageSize) { - super(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - this.region = region - this.maxMIGPageSize = maxMIGPageSize - this.metricsSupport = new OnDemandMetricsSupport( - registry, - this, - "${GoogleCloudProvider.ID}:${OnDemandAgent.OnDemandType.ServerGroup}") - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - def cacheResultBuilder = new CacheResultBuilder(startTime: System.currentTimeMillis()) - - List serverGroups = getServerGroups(providerCache) - def serverGroupKeys = serverGroups.collect { getServerGroupKey(it) } - - providerCache.getAll(ON_DEMAND.ns, serverGroupKeys).each { CacheData cacheData -> - // Ensure that we don't overwrite data that was inserted by the `handle` method while we retrieved the - // managed instance groups. Furthermore, cache data that hasn't been moved to the proper namespace needs to be - // updated in the ON_DEMAND cache, so don't evict data without a processedCount > 0. - if (cacheData.attributes.cacheTime < cacheResultBuilder.startTime && cacheData.attributes.processedCount > 0) { - cacheResultBuilder.onDemand.toEvict << cacheData.id - } else { - cacheResultBuilder.onDemand.toKeep[cacheData.id] = cacheData - } - } - - CacheResult cacheResults = buildCacheResult(cacheResultBuilder, serverGroups) - - cacheResults.cacheResults[ON_DEMAND.ns].each { CacheData cacheData -> - cacheData.attributes.processedTime = System.currentTimeMillis() - cacheData.attributes.processedCount = (cacheData.attributes.processedCount ?: 0) + 1 - } - - cacheResults - } - - private List getServerGroups(ProviderCache providerCache) { - constructServerGroups(providerCache) - } - - private GoogleServerGroup getServerGroup(ProviderCache providerCache, String onDemandServerGroupName) { - def serverGroups = constructServerGroups(providerCache, onDemandServerGroupName) - serverGroups ? serverGroups.first() : null - } - - private List constructServerGroups(ProviderCache providerCache, String onDemandServerGroupName = null) { - List serverGroups = [] - - BatchRequest igmRequest = buildBatchRequest() - BatchRequest instanceGroupsRequest = buildBatchRequest() - BatchRequest autoscalerRequest = buildBatchRequest() - - List instanceTemplates = GoogleZonalServerGroupCachingAgent.fetchInstanceTemplates(compute, project) - List instances = GCEUtil.fetchInstances(this, credentials) - - InstanceGroupManagerCallbacks instanceGroupManagerCallbacks = new InstanceGroupManagerCallbacks( - providerCache: providerCache, - serverGroups: serverGroups, - region: region, - instanceGroupsRequest: instanceGroupsRequest, - autoscalerRequest: autoscalerRequest, - instances: instances - ) - if (onDemandServerGroupName) { - InstanceGroupManagerCallbacks.InstanceGroupManagerSingletonCallback igmCallback = - instanceGroupManagerCallbacks.newInstanceGroupManagerSingletonCallback(instanceTemplates, instances) - compute.regionInstanceGroupManagers().get(project, region, onDemandServerGroupName).queue(igmRequest, igmCallback) - } else { - InstanceGroupManagerCallbacks.InstanceGroupManagerListCallback igmlCallback = - instanceGroupManagerCallbacks.newInstanceGroupManagerListCallback(instanceTemplates, instances) - compute.regionInstanceGroupManagers() - .list(project, region) - .setMaxResults(maxMIGPageSize) - .queue(igmRequest, igmlCallback) - } - executeIfRequestsAreQueued(igmRequest, "RegionalServerGroupCaching.igm") - executeIfRequestsAreQueued(instanceGroupsRequest, "RegionalServerGroupCaching.instanceGroups") - executeIfRequestsAreQueued(autoscalerRequest, "RegionalServerGroupCaching.autoscaler") - - serverGroups - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.ServerGroup && cloudProvider == GoogleCloudProvider.ID - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - if (!data.containsKey("serverGroupName") || data.account != accountName || data.region != region) { - return null - } - - GoogleServerGroup serverGroup = metricsSupport.readData { - getServerGroup(providerCache, data.serverGroupName as String) - } - - if (serverGroup && !serverGroup.regional) { - return null - } - - def cacheResultBuilder = new CacheResultBuilder(startTime: Long.MAX_VALUE) - CacheResult result = metricsSupport.transformData { - buildCacheResult(cacheResultBuilder, serverGroup ? [serverGroup] : []) - } - - def serverGroupKey = Keys.getServerGroupKey(data.serverGroupName as String, accountName, region) - - if (result.cacheResults.values().flatten().empty) { - // Avoid writing an empty onDemand cache record (instead delete any that may have previously existed). - providerCache.evictDeletedItems(ON_DEMAND.ns, [serverGroupKey]) - } else { - metricsSupport.onDemandStore { - def cacheData = new DefaultCacheData( - serverGroupKey, - TimeUnit.MINUTES.toSeconds(10) as Integer, // ttl - [ - cacheTime : System.currentTimeMillis(), - cacheResults : objectMapper.writeValueAsString(result.cacheResults), - processedCount: 0, - processedTime : null - ], - [:] - ) - - providerCache.putCacheData(ON_DEMAND.ns, cacheData) - } - } - - Map> evictions = [:].withDefault {_ -> []} - if (!serverGroup) { - evictions[SERVER_GROUPS.ns].add(serverGroupKey) - } - - log.info("On demand cache refresh succeeded. Data: ${data}. Added ${serverGroup ? 1 : 0} items to the cache. Evicted ${evictions[SERVER_GROUPS.ns]}.") - - return new OnDemandAgent.OnDemandResult( - sourceAgentType: getOnDemandAgentType(), - cacheResult: result, - evictions: evictions, - // Do not include "authoritativeTypes" here, as it will result in all other cache entries getting deleted! - ) - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - def keyOwnedByThisAgent = { Map parsedKey -> - parsedKey && parsedKey.account == accountName && parsedKey.region == region && !parsedKey.zone - } - - def keys = providerCache.getIdentifiers(ON_DEMAND.ns).findAll { String key -> - keyOwnedByThisAgent(Keys.parse(key)) - } - - providerCache.getAll(ON_DEMAND.ns, keys).collect { CacheData cacheData -> - def details = Keys.parse(cacheData.id) - - return [ - details : details, - moniker : convertOnDemandDetails(details), - cacheTime : cacheData.attributes.cacheTime, - processedCount: cacheData.attributes.processedCount, - processedTime : cacheData.attributes.processedTime - ] - } - - } - - private CacheResult buildCacheResult(CacheResultBuilder cacheResultBuilder, List serverGroups) { - log.info "Describing items in $agentType" - - serverGroups.each { GoogleServerGroup serverGroup -> - def names = Names.parseName(serverGroup.name) - def applicationName = names.app - def clusterName = names.cluster - - def serverGroupKey = getServerGroupKey(serverGroup) - def clusterKey = Keys.getClusterKey(accountName, applicationName, clusterName) - def appKey = Keys.getApplicationKey(applicationName) - - def loadBalancerKeys = [] - def instanceKeys = serverGroup?.instances?.collect { Keys.getInstanceKey(accountName, region, it.name) } ?: [] - - cacheResultBuilder.namespace(APPLICATIONS.ns).keep(appKey).with { - attributes.name = applicationName - relationships[CLUSTERS.ns].add(clusterKey) - relationships[INSTANCES.ns].addAll(instanceKeys) - } - - cacheResultBuilder.namespace(CLUSTERS.ns).keep(clusterKey).with { - attributes.name = clusterName - attributes.accountName = accountName - relationships[APPLICATIONS.ns].add(appKey) - relationships[SERVER_GROUPS.ns].add(serverGroupKey) - relationships[INSTANCES.ns].addAll(instanceKeys) - } - log.debug("Writing cache entry for cluster key ${clusterKey} adding relationships for application ${appKey} and server group ${serverGroupKey}") - - GoogleZonalServerGroupCachingAgent.populateLoadBalancerKeys(serverGroup, loadBalancerKeys, accountName, region) - - loadBalancerKeys.each { String loadBalancerKey -> - cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keep(loadBalancerKey).with { - relationships[SERVER_GROUPS.ns].add(serverGroupKey) - } - } - - if (GoogleZonalServerGroupCachingAgent.shouldUseOnDemandData(cacheResultBuilder, serverGroupKey)) { - moveOnDemandDataToNamespace(cacheResultBuilder, serverGroup) - } else { - cacheResultBuilder.namespace(SERVER_GROUPS.ns).keep(serverGroupKey).with { - attributes = objectMapper.convertValue(serverGroup, ATTRIBUTES) - relationships[APPLICATIONS.ns].add(appKey) - relationships[CLUSTERS.ns].add(clusterKey) - relationships[LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - relationships[INSTANCES.ns].addAll(instanceKeys) - } - } - } - - log.info("Caching ${cacheResultBuilder.namespace(APPLICATIONS.ns).keepSize()} applications in ${agentType}") - log.info("Caching ${cacheResultBuilder.namespace(CLUSTERS.ns).keepSize()} clusters in ${agentType}") - log.info("Caching ${cacheResultBuilder.namespace(SERVER_GROUPS.ns).keepSize()} server groups in ${agentType}") - log.info("Caching ${cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keepSize()} load balancer relationships in ${agentType}") - log.info("Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}") - log.info("Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}") - - cacheResultBuilder.build() - } - - void moveOnDemandDataToNamespace(CacheResultBuilder cacheResultBuilder, - GoogleServerGroup googleServerGroup) { - def serverGroupKey = getServerGroupKey(googleServerGroup) - Map> onDemandData = objectMapper.readValue( - cacheResultBuilder.onDemand.toKeep[serverGroupKey].attributes.cacheResults as String, - new TypeReference>>() {}) - - onDemandData.each { String namespace, List cacheDatas -> - if (namespace != 'onDemand') { - cacheDatas.each { MutableCacheData cacheData -> - cacheResultBuilder.namespace(namespace).keep(cacheData.id).with { it -> - it.attributes = cacheData.attributes - it.relationships = Utils.mergeOnDemandCacheRelationships(cacheData.relationships, it.relationships) - } - cacheResultBuilder.onDemand.toKeep.remove(cacheData.id) - } - } - } - } - - String getServerGroupKey(GoogleServerGroup googleServerGroup) { - return Keys.getServerGroupKey(googleServerGroup.name, accountName, region) - } - - // TODO(lwander) this was taken from the netflix cluster caching, and should probably be shared between all providers. - @Canonical - static class MutableCacheData implements CacheData { - String id - int ttlSeconds = -1 - Map attributes = [:] - Map> relationships = [:].withDefault { [] as Set } - } - - class InstanceGroupManagerCallbacks { - - ProviderCache providerCache - List serverGroups - String region - BatchRequest instanceGroupsRequest - BatchRequest autoscalerRequest - List instances - - InstanceGroupManagerSingletonCallback newInstanceGroupManagerSingletonCallback(List instanceTemplates, List instances) { - return new InstanceGroupManagerSingletonCallback(instanceTemplates: instanceTemplates, instances: instances) - } - - InstanceGroupManagerListCallback newInstanceGroupManagerListCallback(List instanceTemplates, List instances) { - return new InstanceGroupManagerListCallback(instanceTemplates: instanceTemplates, instances: instances) - } - - class InstanceGroupManagerSingletonCallback extends JsonBatchCallback { - - List instanceTemplates - List instances - - @Override - void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { - // 404 is thrown if the managed instance group does not exist in the given region. Any other exception needs to be propagated. - if (e.code != 404) { - def errorJson = new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(e) - log.error errorJson - } - } - - @Override - void onSuccess(InstanceGroupManager instanceGroupManager, HttpHeaders responseHeaders) throws IOException { - if (Names.parseName(instanceGroupManager.name)) { - GoogleServerGroup serverGroup = buildServerGroupFromInstanceGroupManager(instanceGroupManager, instances) - serverGroups << serverGroup - - populateInstanceTemplate(providerCache, instanceGroupManager, serverGroup, instanceTemplates) - - def autoscalerCallback = new AutoscalerSingletonCallback(serverGroup: serverGroup) - compute.regionAutoscalers().get(project, region, serverGroup.name).queue(autoscalerRequest, autoscalerCallback) - } - } - } - - class InstanceGroupManagerListCallback extends JsonBatchCallback implements FailureLogger { - - List instanceTemplates - List instances - - @Override - void onSuccess(RegionInstanceGroupManagerList instanceGroupManagerList, HttpHeaders responseHeaders) throws IOException { - instanceGroupManagerList?.items?.each { InstanceGroupManager instanceGroupManager -> - if (Names.parseName(instanceGroupManager.name)) { - GoogleServerGroup serverGroup = buildServerGroupFromInstanceGroupManager(instanceGroupManager, instances) - serverGroups << serverGroup - - populateInstanceTemplate(providerCache, instanceGroupManager, serverGroup, instanceTemplates) - } - } - - def autoscalerCallback = new AutoscalerAggregatedListCallback(serverGroups: serverGroups) - compute.autoscalers().aggregatedList(project).queue(autoscalerRequest, autoscalerCallback) - - def nextPageToken = instanceGroupManagerList.getNextPageToken() - - if (nextPageToken) { - BatchRequest igmRequest = buildBatchRequest() - compute.regionInstanceGroupManagers() - .list(project, region) - .setPageToken(nextPageToken) - .setMaxResults(maxMIGPageSize) - .queue(igmRequest, this) - executeIfRequestsAreQueued(igmRequest, "RegionalServerGroupCaching.igm") - } - } - } - - GoogleServerGroup buildServerGroupFromInstanceGroupManager(InstanceGroupManager instanceGroupManager, - List instances) { - - DistributionPolicy distributionPolicy = instanceGroupManager?.getDistributionPolicy() - // The distribution policy zones are URLs. - List zones = distributionPolicy?.getZones()?.collect { Utils.getLocalName(it.getZone()) } - List groupInstances = instances.findAll { it.getName().startsWith(instanceGroupManager.getBaseInstanceName()) } - - Map namedPorts = [:] - instanceGroupManager.namedPorts.each { namedPorts[(it.name)] = it.port } - return new GoogleServerGroup( - name: instanceGroupManager.name, - instances: groupInstances, - regional: true, - region: region, - namedPorts: namedPorts, - zones: zones, - distributionPolicy: zones ? new GoogleDistributionPolicy(zones: zones) : null, - selfLink: instanceGroupManager.selfLink, - currentActions: instanceGroupManager.currentActions, - launchConfig: [createdTime: Utils.getTimeFromTimestamp(instanceGroupManager.creationTimestamp)], - asg: [minSize : instanceGroupManager.targetSize, - maxSize : instanceGroupManager.targetSize, - desiredCapacity: instanceGroupManager.targetSize], - autoHealingPolicy: instanceGroupManager.autoHealingPolicies?.getAt(0) - ) - } - - void populateInstanceTemplate(ProviderCache providerCache, InstanceGroupManager instanceGroupManager, - GoogleServerGroup serverGroup, List instanceTemplates) { - String instanceTemplateName = Utils.getLocalName(instanceGroupManager.instanceTemplate) - List loadBalancerNames = - Utils.deriveNetworkLoadBalancerNamesFromTargetPoolUrls(instanceGroupManager.getTargetPools()) - - InstanceTemplate template = instanceTemplates.find { it -> it.getName() == instanceTemplateName } - GoogleZonalServerGroupCachingAgent.populateServerGroupWithTemplate(serverGroup, providerCache, loadBalancerNames, - template, accountName, project, objectMapper) - def instanceMetadata = template?.properties?.metadata - if (instanceMetadata) { - def metadataMap = Utils.buildMapFromMetadata(instanceMetadata) - serverGroup.selectZones = metadataMap?.get(GoogleServerGroup.View.SELECT_ZONES) ?: false - } - } - } - - class AutoscalerSingletonCallback extends JsonBatchCallback { - - GoogleServerGroup serverGroup - - @Override - void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { - // 404 is thrown if the autoscaler does not exist in the given region. Any other exception needs to be propagated. - if (e.code != 404) { - def errorJson = new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(e) - log.error errorJson - } - } - - @Override - void onSuccess(Autoscaler autoscaler, HttpHeaders responseHeaders) throws IOException { - serverGroup.autoscalingPolicy = autoscaler.getAutoscalingPolicy() - serverGroup.asg.minSize = serverGroup.autoscalingPolicy.minNumReplicas - serverGroup.asg.maxSize = serverGroup.autoscalingPolicy.maxNumReplicas - - List statusDetails = autoscaler.statusDetails - - if (statusDetails) { - serverGroup.autoscalingMessages = statusDetails.collect { it.message } - } - } - } - - class AutoscalerAggregatedListCallback extends JsonBatchCallback implements FailureLogger { - - List serverGroups - - @Override - void onSuccess(AutoscalerAggregatedList autoscalerAggregatedList, HttpHeaders responseHeaders) throws IOException { - autoscalerAggregatedList?.items?.each { String location, AutoscalersScopedList autoscalersScopedList -> - if (location.startsWith("regions/")) { - def region = Utils.getLocalName(location) - - autoscalersScopedList.autoscalers.each { Autoscaler autoscaler -> - def migName = Utils.getLocalName(autoscaler.target as String) - def serverGroup = serverGroups.find { - it.name == migName && it.region == region - } - - if (serverGroup) { - serverGroup.autoscalingPolicy = autoscaler.getAutoscalingPolicy() - serverGroup.asg.minSize = serverGroup.autoscalingPolicy.minNumReplicas - serverGroup.asg.maxSize = serverGroup.autoscalingPolicy.maxNumReplicas - - List statusDetails = autoscaler.statusDetails - - if (statusDetails) { - serverGroup.autoscalingMessages = statusDetails.collect { it.message } - } - } - } - } - } - } - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSecurityGroupCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSecurityGroupCachingAgent.groovy index 459f12ae20d..440774c4c06 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSecurityGroupCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSecurityGroupCachingAgent.groovy @@ -27,6 +27,7 @@ import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder import com.netflix.spinnaker.clouddriver.google.cache.Keys @@ -65,12 +66,12 @@ class GoogleSecurityGroupCachingAgent extends AbstractGoogleCachingAgent impleme this.metricsSupport = new OnDemandMetricsSupport( registry, this, - "${GoogleCloudProvider.ID}:${OnDemandAgent.OnDemandType.SecurityGroup}") + "${GoogleCloudProvider.ID}:${OnDemandType.SecurityGroup}") } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.SecurityGroup && cloudProvider == GoogleCloudProvider.ID + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.SecurityGroup && cloudProvider == GoogleCloudProvider.ID } @Override @@ -135,7 +136,7 @@ class GoogleSecurityGroupCachingAgent extends AbstractGoogleCachingAgent impleme evictions[SECURITY_GROUPS.ns].addAll(identifiers) } - log.info("On demand cache refresh succeeded. Data: ${data}. Added ${firewall ? 1 : 0} items to the cache.") + log.debug("On demand cache refresh succeeded. Data: ${data}. Added ${firewall ? 1 : 0} items to the cache.") return new OnDemandAgent.OnDemandResult( sourceAgentType: getOnDemandAgentType(), @@ -146,7 +147,7 @@ class GoogleSecurityGroupCachingAgent extends AbstractGoogleCachingAgent impleme } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { def keyOwnedByThisAgent = { Map parsedKey -> parsedKey && parsedKey.account == accountName && parsedKey.region == "global" } @@ -205,13 +206,6 @@ class GoogleSecurityGroupCachingAgent extends AbstractGoogleCachingAgent impleme List firewalls = timeExecute(compute.firewalls().list(project), "compute.firewalls.list", TAG_SCOPE, SCOPE_GLOBAL).items as List - if (xpnHostProject) { - List hostFirewalls = timeExecute(compute.firewalls().list(xpnHostProject), - "compute.firewalls.list", TAG_SCOPE, SCOPE_GLOBAL).items as List - - firewalls = (firewalls ?: []) + (hostFirewalls ?: []) - } - return firewalls } } @@ -223,7 +217,7 @@ class GoogleSecurityGroupCachingAgent extends AbstractGoogleCachingAgent impleme } private CacheResult buildCacheResult(CacheResultBuilder cacheResultBuilder, List firewalls) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") firewalls.each { Firewall firewall -> def securityGroupKey = Keys.getSecurityGroupKey(firewall.getName(), @@ -235,14 +229,14 @@ class GoogleSecurityGroupCachingAgent extends AbstractGoogleCachingAgent impleme moveOnDemandDataToNamespace(cacheResultBuilder, firewall) } else { cacheResultBuilder.namespace(SECURITY_GROUPS.ns).keep(securityGroupKey).with { - attributes = [firewall: firewall] + attributes = [firewall: firewall, project: project] } } } - log.info("Caching ${cacheResultBuilder.namespace(SECURITY_GROUPS.ns).keepSize()} security groups in ${agentType}") - log.info "Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}" - log.info "Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}" + log.debug("Caching ${cacheResultBuilder.namespace(SECURITY_GROUPS.ns).keepSize()} security groups in ${agentType}") + log.debug "Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}" + log.debug "Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}" return cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSslCertificateCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSslCertificateCachingAgent.groovy index 2482ce5bd2e..5bd2bfcb27c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSslCertificateCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSslCertificateCachingAgent.groovy @@ -62,7 +62,7 @@ class GoogleSslCertificateCachingAgent extends AbstractGoogleCachingAgent { } private CacheResult buildCacheResult(ProviderCache _, List sslCertificateList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -74,7 +74,7 @@ class GoogleSslCertificateCachingAgent extends AbstractGoogleCachingAgent { } } - log.info("Caching ${cacheResultBuilder.namespace(SSL_CERTIFICATES.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(SSL_CERTIFICATES.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSslLoadBalancerCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSslLoadBalancerCachingAgent.groovy index cd7038bd1aa..88d3e0acc19 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSslLoadBalancerCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSslLoadBalancerCachingAgent.groovy @@ -17,21 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback import com.google.api.client.googleapis.json.GoogleJsonError import com.google.api.client.http.HttpHeaders +import com.google.api.services.compute.ComputeRequest import com.google.api.services.compute.model.* import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils +import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.* import com.netflix.spinnaker.clouddriver.google.provider.agent.util.GroupHealthRequest import com.netflix.spinnaker.clouddriver.google.provider.agent.util.LoadBalancerHealthResolution +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import groovy.util.logging.Slf4j +import org.slf4j.LoggerFactory @Slf4j class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachingAgent { @@ -44,7 +49,7 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin */ Map bsNameToGroupHealthsMap = [:] Set queuedBsGroupHealthRequests = new HashSet<>() - List resolutions = [] + Set resolutions = new HashSet<>() GoogleSslLoadBalancerCachingAgent(String clouddriverUserAgentApplicationName, GoogleNamedAccountCredentials credentials, @@ -58,7 +63,7 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { // Just let GoogleHttpLoadBalancerCachingAgent return the pending global on demand requests. [] } @@ -68,14 +73,14 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin List loadBalancers = [] List failedLoadBalancers = [] - BatchRequest forwardingRulesRequest = buildBatchRequest() - BatchRequest targetSslProxyRequest = buildBatchRequest() - BatchRequest groupHealthRequest = buildBatchRequest() + GoogleBatchRequest forwardingRulesRequest = buildGoogleBatchRequest() + GoogleBatchRequest targetSslProxyRequest = buildGoogleBatchRequest() + GoogleBatchRequest groupHealthRequest = buildGoogleBatchRequest() // Reset the local getHealth caches/queues each caching agent cycle. bsNameToGroupHealthsMap = [:] queuedBsGroupHealthRequests = new HashSet<>() - resolutions = [] + resolutions = new HashSet<>() List projectBackendServices = GCEUtil.fetchBackendServices(this, compute, project) List projectHealthChecks = GCEUtil.fetchHealthChecks(this, compute, project) @@ -91,10 +96,20 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin if (onDemandLoadBalancerName) { ForwardingRuleCallbacks.ForwardingRuleSingletonCallback frCallback = forwardingRuleCallbacks.newForwardingRuleSingletonCallback() - compute.globalForwardingRules().get(project, onDemandLoadBalancerName).queue(forwardingRulesRequest, frCallback) + forwardingRulesRequest.queue(compute.globalForwardingRules().get(project, onDemandLoadBalancerName), frCallback) } else { ForwardingRuleCallbacks.ForwardingRuleListCallback frlCallback = forwardingRuleCallbacks.newForwardingRuleListCallback() - compute.globalForwardingRules().list(project).queue(forwardingRulesRequest, frlCallback) + new PaginatedRequest(this) { + @Override + ComputeRequest request(String pageToken) { + return compute.globalForwardingRules().list(project).setPageToken(pageToken) + } + + @Override + String getNextPageToken(ForwardingRuleList forwardingRuleList) { + return forwardingRuleList.getNextPageToken() + } + }.queue(forwardingRulesRequest, frlCallback, "SslLoadBalancerCaching.forwardingRules") } executeIfRequestsAreQueued(forwardingRulesRequest, "SslLoadBalancerCaching.forwardingRules") @@ -110,14 +125,23 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin return loadBalancers.findAll { !(it.name in failedLoadBalancers) } } + @Override + String determineInstanceKey(GoogleLoadBalancer loadBalancer, GoogleLoadBalancerHealth health) { + // Ssl load balancers' region is "global", so we have to determine the instance region from its zone. + def instanceZone = health.instanceZone + def instanceRegion = credentials.regionFromZone(instanceZone) + + return Keys.getInstanceKey(accountName, instanceRegion, health.instanceName) + } + class ForwardingRuleCallbacks { List loadBalancers List failedLoadBalancers = [] - BatchRequest targetSslProxyRequest + GoogleBatchRequest targetSslProxyRequest // Pass through objects - BatchRequest groupHealthRequest + GoogleBatchRequest groupHealthRequest List projectBackendServices List projectHealthChecks @@ -161,6 +185,11 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin } } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + LoggerFactory.getLogger(this.class).error e.getMessage() + } } void cacheRemainderOfLoadBalancerResourceGraph(ForwardingRule forwardingRule) { @@ -186,7 +215,7 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin projectBackendServices: projectBackendServices, projectHealthChecks: projectHealthChecks, ) - compute.targetSslProxies().get(project, targetSslProxyName).queue(targetSslProxyRequest, targetSslProxyCallback) + targetSslProxyRequest.queue(compute.targetSslProxies().get(project, targetSslProxyName), targetSslProxyCallback) } } @@ -194,7 +223,7 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin GoogleSslLoadBalancer googleLoadBalancer List projectBackendServices List projectHealthChecks - BatchRequest groupHealthRequest + GoogleBatchRequest groupHealthRequest @Override void onSuccess(TargetSslProxy targetSslProxy, HttpHeaders responseHeaders) throws IOException { @@ -204,12 +233,20 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin BackendService backendService = projectBackendServices?.find { BackendService bs -> bs.getName() == backendServiceName } handleBackendService(backendService, googleLoadBalancer, projectHealthChecks, groupHealthRequest) } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn("Failed to read a component of subject ${subject}. The platform error message was:\n ${e.getMessage()}. \nReporting it as 'Failed' to the caching agent. ") + if (failedSubjects != null) { + failedSubjects << subject + } + } } private void handleBackendService(BackendService backendService, GoogleSslLoadBalancer googleLoadBalancer, List healthChecks, - BatchRequest groupHealthRequest) { + GoogleBatchRequest groupHealthRequest) { if (!backendService) { return } @@ -243,9 +280,9 @@ class GoogleSslLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin // The groupHealthCallback updates the local cache. log.debug("Queueing a batch call for getHealth(): {}", ghr) queuedBsGroupHealthRequests.add(ghr) - compute.backendServices() - .getHealth(project, backendService.name, resourceGroup) - .queue(groupHealthRequest, groupHealthCallback) + groupHealthRequest + .queue(compute.backendServices().getHealth(project, backendService.name as String, resourceGroup), + groupHealthCallback) } else { log.debug("Passing, batch call result cached for getHealth(): {}", ghr) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSubnetCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSubnetCachingAgent.groovy index bf96df760d6..84863dde325 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSubnetCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSubnetCachingAgent.groovy @@ -78,7 +78,7 @@ class GoogleSubnetCachingAgent extends AbstractGoogleCachingAgent { } private CacheResult buildCacheResult(ProviderCache _, List subnetList) { - log.info("Describing items in ${agentType}") + log.debug("Describing items in ${agentType}") def cacheResultBuilder = new CacheResultBuilder() @@ -86,11 +86,11 @@ class GoogleSubnetCachingAgent extends AbstractGoogleCachingAgent { def subnetKey = Keys.getSubnetKey(deriveSubnetId(subnet), region, accountName) cacheResultBuilder.namespace(SUBNETS.ns).keep(subnetKey).with { - attributes.subnet = subnet + attributes = [subnet: subnet,project: project] } } - log.info("Caching ${cacheResultBuilder.namespace(SUBNETS.ns).keepSize()} items in ${agentType}") + log.debug("Caching ${cacheResultBuilder.namespace(SUBNETS.ns).keepSize()} items in ${agentType}") cacheResultBuilder.build() } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleTcpLoadBalancerCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleTcpLoadBalancerCachingAgent.groovy index d3e9e3b638a..b4a987d9c6a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleTcpLoadBalancerCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleTcpLoadBalancerCachingAgent.groovy @@ -17,21 +17,26 @@ package com.netflix.spinnaker.clouddriver.google.provider.agent import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback import com.google.api.client.googleapis.json.GoogleJsonError import com.google.api.client.http.HttpHeaders +import com.google.api.services.compute.ComputeRequest import com.google.api.services.compute.model.* import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.google.cache.Keys import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils +import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.* import com.netflix.spinnaker.clouddriver.google.provider.agent.util.GroupHealthRequest import com.netflix.spinnaker.clouddriver.google.provider.agent.util.LoadBalancerHealthResolution +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import groovy.util.logging.Slf4j +import org.slf4j.LoggerFactory @Slf4j class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachingAgent { @@ -44,7 +49,7 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin */ Map bsNameToGroupHealthsMap = [:] Set queuedBsGroupHealthRequests = new HashSet<>() - List resolutions = [] + Set resolutions = new HashSet<>() GoogleTcpLoadBalancerCachingAgent(String clouddriverUserAgentApplicationName, GoogleNamedAccountCredentials credentials, @@ -58,7 +63,7 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { // Just let GoogleHttpLoadBalancerCachingAgent return the pending global on demand requests. [] } @@ -68,14 +73,14 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin List loadBalancers = [] List failedSubjects = [] - BatchRequest forwardingRulesRequest = buildBatchRequest() - BatchRequest targetTcpProxyRequest = buildBatchRequest() - BatchRequest groupHealthRequest = buildBatchRequest() + GoogleBatchRequest forwardingRulesRequest = buildGoogleBatchRequest() + GoogleBatchRequest targetTcpProxyRequest = buildGoogleBatchRequest() + GoogleBatchRequest groupHealthRequest = buildGoogleBatchRequest() // Reset the local getHealth caches/queues each caching agent cycle. bsNameToGroupHealthsMap = [:] queuedBsGroupHealthRequests = new HashSet<>() - resolutions = [] + resolutions = new HashSet<>() List projectBackendServices = GCEUtil.fetchBackendServices(this, compute, project) List projectHealthChecks = GCEUtil.fetchHealthChecks(this, compute, project) @@ -91,10 +96,20 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin if (onDemandLoadBalancerName) { ForwardingRuleCallbacks.ForwardingRuleSingletonCallback frCallback = forwardingRuleCallbacks.newForwardingRuleSingletonCallback() - compute.globalForwardingRules().get(project, onDemandLoadBalancerName).queue(forwardingRulesRequest, frCallback) + forwardingRulesRequest.queue(compute.globalForwardingRules().get(project, onDemandLoadBalancerName), frCallback) } else { ForwardingRuleCallbacks.ForwardingRuleListCallback frlCallback = forwardingRuleCallbacks.newForwardingRuleListCallback() - compute.globalForwardingRules().list(project).queue(forwardingRulesRequest, frlCallback) + new PaginatedRequest(this) { + @Override + ComputeRequest request(String pageToken) { + return compute.globalForwardingRules().list(project).setPageToken(pageToken) + } + + @Override + String getNextPageToken(ForwardingRuleList forwardingRuleList) { + return forwardingRuleList.getNextPageToken() + } + }.queue(forwardingRulesRequest, frlCallback, "TcpLoadBalancerCaching.forwardingRules") } executeIfRequestsAreQueued(forwardingRulesRequest, "TcpLoadBalancerCaching.forwardingRules") @@ -110,13 +125,22 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin return loadBalancers.findAll { !(it.name in failedSubjects) } } + @Override + String determineInstanceKey(GoogleLoadBalancer loadBalancer, GoogleLoadBalancerHealth health) { + // Tcp load balancers' region is "global", so we have to determine the instance region from its zone. + def instanceZone = health.instanceZone + def instanceRegion = credentials.regionFromZone(instanceZone) + + return Keys.getInstanceKey(accountName, instanceRegion, health.instanceName) + } + class ForwardingRuleCallbacks { List loadBalancers List failedSubjects = [] - BatchRequest targetTcpProxyRequest + GoogleBatchRequest targetTcpProxyRequest // Pass through objects - BatchRequest groupHealthRequest + GoogleBatchRequest groupHealthRequest List projectBackendServices List projectHealthChecks @@ -160,6 +184,11 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin } } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + LoggerFactory.getLogger(this.class).error e.getMessage() + } } void cacheRemainderOfLoadBalancerResourceGraph(ForwardingRule forwardingRule) { @@ -185,7 +214,7 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin subject: newLoadBalancer.name, failedSubjects: failedSubjects, ) - compute.targetTcpProxies().get(project, targetTcpProxyName).queue(targetTcpProxyRequest, targetTcpProxyCallback) + targetTcpProxyRequest.queue(compute.targetTcpProxies().get(project, targetTcpProxyName), targetTcpProxyCallback) } } @@ -193,7 +222,7 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin GoogleTcpLoadBalancer googleLoadBalancer List projectBackendServices List projectHealthChecks - BatchRequest groupHealthRequest + GoogleBatchRequest groupHealthRequest @Override void onSuccess(TargetTcpProxy targetTcpProxy, HttpHeaders responseHeaders) throws IOException { @@ -207,12 +236,20 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin handleBackendService(backendService, googleLoadBalancer, projectHealthChecks, groupHealthRequest) } } + + @Override + void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn("Failed to read a component of subject ${subject}. The platform error message was:\n ${e.getMessage()}. \nReporting it as 'Failed' to the caching agent. ") + if (failedSubjects != null) { + failedSubjects << subject + } + } } private void handleBackendService(BackendService backendService, GoogleTcpLoadBalancer googleLoadBalancer, List healthChecks, - BatchRequest groupHealthRequest) { + GoogleBatchRequest groupHealthRequest) { def groupHealthCallback = new GroupHealthCallback(backendServiceName: backendService.name) GoogleBackendService newService = new GoogleBackendService( @@ -239,9 +276,9 @@ class GoogleTcpLoadBalancerCachingAgent extends AbstractGoogleLoadBalancerCachin // The groupHealthCallback updates the local cache. log.debug("Queueing a batch call for getHealth(): {}", ghr) queuedBsGroupHealthRequests.add(ghr) - compute.backendServices() - .getHealth(project, backendService.name, resourceGroup) - .queue(groupHealthRequest, groupHealthCallback) + groupHealthRequest + .queue(compute.backendServices().getHealth(project, backendService.name as String, resourceGroup), + groupHealthCallback) } else { log.debug("Passing, batch call result cached for getHealth(): {}", ghr) } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgent.groovy deleted file mode 100644 index 31f491a4168..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgent.groovy +++ /dev/null @@ -1,705 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.provider.agent - -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.batch.BatchRequest -import com.google.api.client.googleapis.batch.json.JsonBatchCallback -import com.google.api.client.googleapis.json.GoogleJsonError -import com.google.api.client.http.HttpHeaders -import com.google.api.services.compute.Compute -import com.google.api.services.compute.model.* -import com.netflix.frigga.Names -import com.netflix.frigga.ami.AppVersion -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider -import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits -import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.google.cache.Keys -import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil -import com.netflix.spinnaker.clouddriver.google.model.GoogleInstance -import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup -import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils -import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancingPolicy -import com.netflix.spinnaker.clouddriver.google.security.AccountForClient -import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.googlecommon.GoogleExecutor -import groovy.transform.Canonical -import groovy.util.logging.Slf4j - -import java.util.concurrent.TimeUnit - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE -import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.* - -@Slf4j -class GoogleZonalServerGroupCachingAgent extends AbstractGoogleCachingAgent implements OnDemandAgent, GoogleExecutorTraits { - - static final String GLOBAL_LOAD_BALANCER_NAMES = GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES - static final String REGIONAL_LOAD_BALANCER_NAMES = GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES - static final String BACKEND_SERVICE_NAMES = GoogleServerGroup.View.BACKEND_SERVICE_NAMES - static final String LOAD_BALANCING_POLICY = GoogleServerGroup.View.LOAD_BALANCING_POLICY - final String region - final long maxMIGPageSize - - final Set providedDataTypes = [ - AUTHORITATIVE.forType(SERVER_GROUPS.ns), - AUTHORITATIVE.forType(APPLICATIONS.ns), - INFORMATIVE.forType(CLUSTERS.ns), - INFORMATIVE.forType(LOAD_BALANCERS.ns), - ] as Set - - String agentType = "${accountName}/${region}/${GoogleZonalServerGroupCachingAgent.simpleName}" - String onDemandAgentType = "${agentType}-OnDemand" - final OnDemandMetricsSupport metricsSupport - - GoogleZonalServerGroupCachingAgent(String clouddriverUserAgentApplicationName, - GoogleNamedAccountCredentials credentials, - ObjectMapper objectMapper, - Registry registry, - String region, - long maxMIGPageSize) { - super(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - this.region = region - this.maxMIGPageSize = maxMIGPageSize - this.metricsSupport = new OnDemandMetricsSupport( - registry, - this, - "${GoogleCloudProvider.ID}:${OnDemandAgent.OnDemandType.ServerGroup}") - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - def cacheResultBuilder = new CacheResultBuilder(startTime: System.currentTimeMillis()) - - List serverGroups = getServerGroups(providerCache) - def serverGroupKeys = serverGroups.collect { getServerGroupKey(it) } - - providerCache.getAll(ON_DEMAND.ns, serverGroupKeys).each { CacheData cacheData -> - // Ensure that we don't overwrite data that was inserted by the `handle` method while we retrieved the - // managed instance groups. Furthermore, cache data that hasn't been moved to the proper namespace needs to be - // updated in the ON_DEMAND cache, so don't evict data without a processedCount > 0. - if (cacheData.attributes.cacheTime < cacheResultBuilder.startTime && cacheData.attributes.processedCount > 0) { - cacheResultBuilder.onDemand.toEvict << cacheData.id - } else { - cacheResultBuilder.onDemand.toKeep[cacheData.id] = cacheData - } - } - - CacheResult cacheResults = buildCacheResult(cacheResultBuilder, serverGroups) - - cacheResults.cacheResults[ON_DEMAND.ns].each { CacheData cacheData -> - cacheData.attributes.processedTime = System.currentTimeMillis() - cacheData.attributes.processedCount = (cacheData.attributes.processedCount ?: 0) + 1 - } - - cacheResults - } - - private List getServerGroups(ProviderCache providerCache) { - constructServerGroups(providerCache) - } - - private GoogleServerGroup getServerGroup(ProviderCache providerCache, String onDemandServerGroupName) { - def serverGroups = constructServerGroups(providerCache, onDemandServerGroupName) - serverGroups ? serverGroups.first() : null - } - - private List constructServerGroups(ProviderCache providerCache, String onDemandServerGroupName = null) { - List zones = credentials.getZonesFromRegion(region) - List serverGroups = [] - - BatchRequest igmRequest = buildBatchRequest() - BatchRequest instanceGroupsRequest = buildBatchRequest() - BatchRequest autoscalerRequest = buildBatchRequest() - - List instanceTemplates = fetchInstanceTemplates(compute, project) - List instances = GCEUtil.fetchInstances(this, credentials) - - zones?.each { String zone -> - InstanceGroupManagerCallbacks instanceGroupManagerCallbacks = new InstanceGroupManagerCallbacks( - providerCache: providerCache, - serverGroups: serverGroups, - zone: zone, - instanceGroupsRequest: instanceGroupsRequest, - autoscalerRequest: autoscalerRequest, - instances: instances) - if (onDemandServerGroupName) { - InstanceGroupManagerCallbacks.InstanceGroupManagerSingletonCallback igmCallback = - instanceGroupManagerCallbacks.newInstanceGroupManagerSingletonCallback(instanceTemplates, instances) - compute.instanceGroupManagers().get(project, zone, onDemandServerGroupName).queue(igmRequest, igmCallback) - } else { - InstanceGroupManagerCallbacks.InstanceGroupManagerListCallback igmlCallback = - instanceGroupManagerCallbacks.newInstanceGroupManagerListCallback(instanceTemplates, instances) - compute.instanceGroupManagers().list(project, zone).setMaxResults(maxMIGPageSize).queue(igmRequest, igmlCallback) - } - } - executeIfRequestsAreQueued(igmRequest, "ZonalServerGroupCaching.igm") - executeIfRequestsAreQueued(instanceGroupsRequest, "ZonalServerGroupCaching.instanceGroups") - executeIfRequestsAreQueued(autoscalerRequest, "ZonalServerGroupCaching.autoscaler") - - serverGroups - } - - static List fetchInstanceTemplates(Compute compute, String project) { - Boolean executedAtLeastOnce = false - String nextPageToken = null - List instanceTemplates = [] - while (!executedAtLeastOnce || nextPageToken) { - InstanceTemplateList instanceTemplateList = GoogleExecutor.timeExecute( - GoogleExecutor.getRegistry(), - compute.instanceTemplates().list(project).setPageToken(nextPageToken), - "google.api", - "compute.instanceTemplates.list", - GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_GLOBAL, - "account", AccountForClient.getAccount(compute)) - - executedAtLeastOnce = true - nextPageToken = instanceTemplateList.getNextPageToken() - instanceTemplates.addAll(instanceTemplateList.getItems() ?: []) - } - return instanceTemplates - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.ServerGroup && cloudProvider == GoogleCloudProvider.ID - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - if (!data.containsKey("serverGroupName") || data.account != accountName || data.region != region) { - return null - } - - GoogleServerGroup serverGroup = metricsSupport.readData { - getServerGroup(providerCache, data.serverGroupName as String) - } - - if (serverGroup?.regional) { - return null - } - - String serverGroupKey - Collection identifiers = [] - - if (serverGroup) { - serverGroupKey = getServerGroupKey(serverGroup) - } else { - serverGroupKey = Keys.getServerGroupKey(data.serverGroupName as String, accountName, region, "*") - - // No server group was found, so need to find identifiers for all zonal server groups in the region. - identifiers = providerCache.filterIdentifiers(SERVER_GROUPS.ns, serverGroupKey) - } - - def cacheResultBuilder = new CacheResultBuilder(startTime: Long.MAX_VALUE) - CacheResult result = metricsSupport.transformData { - buildCacheResult(cacheResultBuilder, serverGroup ? [serverGroup] : []) - } - - if (result.cacheResults.values().flatten().empty) { - // Avoid writing an empty onDemand cache record (instead delete any that may have previously existed). - providerCache.evictDeletedItems(ON_DEMAND.ns, identifiers) - } else { - metricsSupport.onDemandStore { - def cacheData = new DefaultCacheData( - serverGroupKey, - TimeUnit.MINUTES.toSeconds(10) as Integer, // ttl - [ - cacheTime : System.currentTimeMillis(), - cacheResults : objectMapper.writeValueAsString(result.cacheResults), - processedCount: 0, - processedTime : null - ], - [:] - ) - - providerCache.putCacheData(ON_DEMAND.ns, cacheData) - } - } - - Map> evictions = [:].withDefault {_ -> []} - if (!serverGroup) { - evictions[SERVER_GROUPS.ns].addAll(identifiers) - } - - log.info("On demand cache refresh succeeded. Data: ${data}. Added ${serverGroup ? 1 : 0} items to the cache. Evicted ${evictions[SERVER_GROUPS.ns]}.") - - return new OnDemandAgent.OnDemandResult( - sourceAgentType: getOnDemandAgentType(), - cacheResult: result, - evictions: evictions, - // Do not include "authoritativeTypes" here, as it will result in all other cache entries getting deleted! - ) - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - def keyOwnedByThisAgent = { Map parsedKey -> - parsedKey && parsedKey.account == accountName && parsedKey.region == region && parsedKey.zone - } - - def keys = providerCache.getIdentifiers(ON_DEMAND.ns).findAll { String key -> - keyOwnedByThisAgent(Keys.parse(key)) - } - - providerCache.getAll(ON_DEMAND.ns, keys).collect { CacheData cacheData -> - def details = Keys.parse(cacheData.id) - - [ - details : details, - moniker : convertOnDemandDetails(details), - cacheTime : cacheData.attributes.cacheTime, - processedCount: cacheData.attributes.processedCount, - processedTime : cacheData.attributes.processedTime - ] - } - - } - - private CacheResult buildCacheResult(CacheResultBuilder cacheResultBuilder, List serverGroups) { - log.info "Describing items in $agentType" - - serverGroups.each { GoogleServerGroup serverGroup -> - def names = Names.parseName(serverGroup.name) - def applicationName = names.app - def clusterName = names.cluster - - def serverGroupKey = getServerGroupKey(serverGroup) - def clusterKey = Keys.getClusterKey(accountName, applicationName, clusterName) - def appKey = Keys.getApplicationKey(applicationName) - - def loadBalancerKeys = [] - def instanceKeys = serverGroup?.instances?.collect { Keys.getInstanceKey(accountName, region, it.name) } ?: [] - - cacheResultBuilder.namespace(APPLICATIONS.ns).keep(appKey).with { - attributes.name = applicationName - relationships[CLUSTERS.ns].add(clusterKey) - relationships[INSTANCES.ns].addAll(instanceKeys) - } - - cacheResultBuilder.namespace(CLUSTERS.ns).keep(clusterKey).with { - attributes.name = clusterName - attributes.accountName = accountName - relationships[APPLICATIONS.ns].add(appKey) - relationships[SERVER_GROUPS.ns].add(serverGroupKey) - relationships[INSTANCES.ns].addAll(instanceKeys) - } - log.debug("Writing cache entry for cluster key ${clusterKey} adding relationships for application ${appKey} and server group ${serverGroupKey}") - - populateLoadBalancerKeys(serverGroup, loadBalancerKeys, accountName, region) - - loadBalancerKeys.each { String loadBalancerKey -> - cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keep(loadBalancerKey).with { - relationships[SERVER_GROUPS.ns].add(serverGroupKey) - } - } - - if (shouldUseOnDemandData(cacheResultBuilder, serverGroupKey)) { - moveOnDemandDataToNamespace(cacheResultBuilder, serverGroup) - } else { - cacheResultBuilder.namespace(SERVER_GROUPS.ns).keep(serverGroupKey).with { - attributes = objectMapper.convertValue(serverGroup, ATTRIBUTES) - relationships[APPLICATIONS.ns].add(appKey) - relationships[CLUSTERS.ns].add(clusterKey) - relationships[LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - relationships[INSTANCES.ns].addAll(instanceKeys) - } - } - } - - log.info("Caching ${cacheResultBuilder.namespace(APPLICATIONS.ns).keepSize()} applications in ${agentType}") - log.info("Caching ${cacheResultBuilder.namespace(CLUSTERS.ns).keepSize()} clusters in ${agentType}") - log.info("Caching ${cacheResultBuilder.namespace(SERVER_GROUPS.ns).keepSize()} server groups in ${agentType}") - log.info("Caching ${cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keepSize()} load balancer relationships in ${agentType}") - log.info("Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}") - log.info("Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}") - - cacheResultBuilder.build() - } - - static boolean shouldUseOnDemandData(CacheResultBuilder cacheResultBuilder, String serverGroupKey) { - CacheData cacheData = cacheResultBuilder.onDemand.toKeep[serverGroupKey] - return cacheData ? cacheData.attributes.cacheTime >= cacheResultBuilder.startTime : false - } - - void moveOnDemandDataToNamespace(CacheResultBuilder cacheResultBuilder, - GoogleServerGroup googleServerGroup) { - def serverGroupKey = getServerGroupKey(googleServerGroup) - Map> onDemandData = objectMapper.readValue( - cacheResultBuilder.onDemand.toKeep[serverGroupKey].attributes.cacheResults as String, - new TypeReference>>() {}) - - onDemandData.each { String namespace, List cacheDatas -> - if (namespace != 'onDemand') { - cacheDatas.each { MutableCacheData cacheData -> - cacheResultBuilder.namespace(namespace).keep(cacheData.id).with { it -> - it.attributes = cacheData.attributes - it.relationships = Utils.mergeOnDemandCacheRelationships(cacheData.relationships, it.relationships) - } - cacheResultBuilder.onDemand.toKeep.remove(cacheData.id) - } - } - } - } - - String getServerGroupKey(GoogleServerGroup googleServerGroup) { - return Keys.getServerGroupKey(googleServerGroup.name, accountName, region, googleServerGroup.zone) - } - - // TODO(lwander) this was taken from the netflix cluster caching, and should probably be shared between all providers. - @Canonical - static class MutableCacheData implements CacheData { - String id - int ttlSeconds = -1 - Map attributes = [:] - Map> relationships = [:].withDefault { [] as Set } - } - - class InstanceGroupManagerCallbacks { - - ProviderCache providerCache - List serverGroups - String zone - BatchRequest instanceGroupsRequest - BatchRequest autoscalerRequest - List instances - - InstanceGroupManagerSingletonCallback newInstanceGroupManagerSingletonCallback(List instanceTemplates, List instances) { - return new InstanceGroupManagerSingletonCallback(instanceTemplates: instanceTemplates, instances: instances) - } - - InstanceGroupManagerListCallback newInstanceGroupManagerListCallback(List instanceTemplates, List instances) { - return new InstanceGroupManagerListCallback(instanceTemplates: instanceTemplates, instances: instances) - } - - class InstanceGroupManagerSingletonCallback extends JsonBatchCallback { - - List instanceTemplates - List instances - - @Override - void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { - // 404 is thrown if the managed instance group does not exist in the given zone. Any other exception needs to be propagated. - if (e.code != 404) { - def errorJson = new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(e) - log.error errorJson - } - } - - @Override - void onSuccess(InstanceGroupManager instanceGroupManager, HttpHeaders responseHeaders) throws IOException { - if (Names.parseName(instanceGroupManager.name)) { - GoogleServerGroup serverGroup = buildServerGroupFromInstanceGroupManager(instanceGroupManager, instances) - serverGroups << serverGroup - - populateInstanceTemplate(providerCache, instanceGroupManager, serverGroup, instanceTemplates) - - def autoscalerCallback = new AutoscalerSingletonCallback(serverGroup: serverGroup) - compute.autoscalers().get(project, zone, serverGroup.name).queue(autoscalerRequest, autoscalerCallback) - } - } - } - - class InstanceGroupManagerListCallback extends JsonBatchCallback implements FailureLogger { - - List instanceTemplates - List instances - - @Override - void onSuccess(InstanceGroupManagerList instanceGroupManagerList, HttpHeaders responseHeaders) throws IOException { - instanceGroupManagerList?.items?.each { InstanceGroupManager instanceGroupManager -> - if (Names.parseName(instanceGroupManager.name)) { - GoogleServerGroup serverGroup = buildServerGroupFromInstanceGroupManager(instanceGroupManager, instances) - serverGroups << serverGroup - - populateInstanceTemplate(providerCache, instanceGroupManager, serverGroup, instanceTemplates) - } - } - - def autoscalerCallback = new AutoscalerAggregatedListCallback(serverGroups: serverGroups) - compute.autoscalers().aggregatedList(project).queue(autoscalerRequest, autoscalerCallback) - - def nextPageToken = instanceGroupManagerList.getNextPageToken() - - if (nextPageToken) { - BatchRequest igmRequest = buildBatchRequest() - compute.instanceGroupManagers() - .list(project, zone) - .setPageToken(nextPageToken) - .setMaxResults(maxMIGPageSize) - .queue(igmRequest, this) - executeIfRequestsAreQueued(igmRequest, "ZonalServerGroupCaching.igm") - } - } - } - - GoogleServerGroup buildServerGroupFromInstanceGroupManager(InstanceGroupManager instanceGroupManager, List instances) { - String zone = Utils.getLocalName(instanceGroupManager.zone) - List groupInstances = instances.findAll { it.getName().startsWith(instanceGroupManager.getBaseInstanceName()) } - - Map namedPorts = [:] - instanceGroupManager.namedPorts.each { namedPorts[(it.name)] = it.port } - return new GoogleServerGroup( - name: instanceGroupManager.name, - instances: groupInstances, - region: region, - zone: zone, - namedPorts: namedPorts, - zones: [zone], - selfLink: instanceGroupManager.selfLink, - currentActions: instanceGroupManager.currentActions, - launchConfig: [createdTime: Utils.getTimeFromTimestamp(instanceGroupManager.creationTimestamp)], - asg: [minSize : instanceGroupManager.targetSize, - maxSize : instanceGroupManager.targetSize, - desiredCapacity: instanceGroupManager.targetSize], - autoHealingPolicy: instanceGroupManager.autoHealingPolicies?.getAt(0) - ) - } - - void populateInstanceTemplate(ProviderCache providerCache, InstanceGroupManager instanceGroupManager, - GoogleServerGroup serverGroup, List instanceTemplates) { - String instanceTemplateName = Utils.getLocalName(instanceGroupManager.instanceTemplate) - List loadBalancerNames = - Utils.deriveNetworkLoadBalancerNamesFromTargetPoolUrls(instanceGroupManager.getTargetPools()) - InstanceTemplate template = instanceTemplates.find { it -> it.getName() == instanceTemplateName } - populateServerGroupWithTemplate(serverGroup, providerCache, loadBalancerNames, template, accountName, project, objectMapper) - } - } - - static void populateServerGroupWithTemplate(GoogleServerGroup serverGroup, ProviderCache providerCache, - List loadBalancerNames, InstanceTemplate instanceTemplate, - String accountName, String project, ObjectMapper objectMapper) { - serverGroup.with { - networkName = Utils.decorateXpnResourceIdIfNeeded(project, instanceTemplate?.properties?.networkInterfaces?.getAt(0)?.network) - canIpForward = instanceTemplate?.properties?.canIpForward - instanceTemplateTags = instanceTemplate?.properties?.tags?.items - instanceTemplateLabels = instanceTemplate?.properties?.labels - launchConfig.with { - launchConfigurationName = instanceTemplate?.name - instanceType = instanceTemplate?.properties?.machineType - minCpuPlatform = instanceTemplate?.properties?.minCpuPlatform - } - } - // "instanceTemplate = instanceTemplate" in the above ".with{ }" blocks doesn't work because Groovy thinks it's - // assigning the same variable to itself, instead of to the "launchConfig" entry - serverGroup.launchConfig.instanceTemplate = instanceTemplate - - sortWithBootDiskFirst(serverGroup) - - def sourceImageUrl = instanceTemplate?.properties?.disks?.find { disk -> - disk.boot - }?.initializeParams?.sourceImage - if (sourceImageUrl) { - serverGroup.launchConfig.imageId = Utils.getLocalName(sourceImageUrl) - - def imageKey = Keys.getImageKey(accountName, serverGroup.launchConfig.imageId) - def image = providerCache.get(IMAGES.ns, imageKey) - - extractBuildInfo(image?.attributes?.image?.description, serverGroup) - } - - def instanceMetadata = instanceTemplate?.properties?.metadata - setLoadBalancerMetadataOnInstance(loadBalancerNames, instanceMetadata, serverGroup, objectMapper) - } - - static void populateLoadBalancerKeys(GoogleServerGroup serverGroup, List loadBalancerKeys, String accountName, String region) { - serverGroup.asg.get(REGIONAL_LOAD_BALANCER_NAMES).each { String loadBalancerName -> - loadBalancerKeys << Keys.getLoadBalancerKey(region, accountName, loadBalancerName) - } - serverGroup.asg.get(GLOBAL_LOAD_BALANCER_NAMES).each { String loadBalancerName -> - loadBalancerKeys << Keys.getLoadBalancerKey("global", accountName, loadBalancerName) - } - } - - static void sortWithBootDiskFirst(GoogleServerGroup serverGroup) { - // Ensure that the boot disk is listed as the first persistent disk. - if (serverGroup.launchConfig.instanceTemplate?.properties?.disks) { - def persistentDisks = serverGroup.launchConfig.instanceTemplate.properties.disks.findAll { it.type == "PERSISTENT" } - - if (persistentDisks && !persistentDisks.first().boot) { - def sortedDisks = [] - def firstBootDisk = persistentDisks.find { it.boot } - - if (firstBootDisk) { - sortedDisks << firstBootDisk - } - - sortedDisks.addAll(serverGroup.launchConfig.instanceTemplate.properties.disks.findAll { !it.boot }) - serverGroup.launchConfig.instanceTemplate.properties.disks = sortedDisks - } - } - } - - /** - * Set load balancing metadata on the server group from the instance template. - * - * @param loadBalancerNames -- Network load balancer names specified by target pools. - * @param instanceMetadata -- Metadata associated with the instance template. - * @param serverGroup -- Server groups built from the instance template. - */ - static void setLoadBalancerMetadataOnInstance(List loadBalancerNames, - Metadata instanceMetadata, - GoogleServerGroup serverGroup, - ObjectMapper objectMapper) { - if (instanceMetadata) { - def metadataMap = Utils.buildMapFromMetadata(instanceMetadata) - def regionalLBNameList = metadataMap?.get(REGIONAL_LOAD_BALANCER_NAMES)?.split(",") - def globalLBNameList = metadataMap?.get(GLOBAL_LOAD_BALANCER_NAMES)?.split(",") - def backendServiceList = metadataMap?.get(BACKEND_SERVICE_NAMES)?.split(",") - def policyJson = metadataMap?.get(LOAD_BALANCING_POLICY) - - if (globalLBNameList) { - serverGroup.asg.put(GLOBAL_LOAD_BALANCER_NAMES, globalLBNameList) - } - if (backendServiceList) { - serverGroup.asg.put(BACKEND_SERVICE_NAMES, backendServiceList) - } - if (policyJson) { - serverGroup.asg.put(LOAD_BALANCING_POLICY, objectMapper.readValue(policyJson, GoogleHttpLoadBalancingPolicy)) - } - - if (regionalLBNameList) { - serverGroup.asg.put(REGIONAL_LOAD_BALANCER_NAMES, regionalLBNameList) - - // The isDisabled property of a server group is set based on whether there are associated target pools, - // and whether the metadata of the server group contains a list of load balancers to actually associate - // the server group with. - // We set the disabled state for L4 lBs here (before writing into the cache) and calculate - // the L7 disabled state when we read the server groups from the cache. - serverGroup.setDisabled(loadBalancerNames.empty) - } - } - } - - static void extractBuildInfo(String imageDescription, GoogleServerGroup googleServerGroup) { - if (imageDescription) { - def descriptionTokens = imageDescription?.tokenize(",") - def appVersionTag = findTagValue(descriptionTokens, "appversion") - Map buildInfo = null - - if (appVersionTag) { - def appVersion = AppVersion.parseName(appVersionTag) - - if (appVersion) { - buildInfo = [package_name: appVersion.packageName, version: appVersion.version, commit: appVersion.commit] as Map - - if (appVersion.buildJobName) { - buildInfo.jenkins = [name: appVersion.buildJobName, number: appVersion.buildNumber] - } - - def buildHostTag = findTagValue(descriptionTokens, "build_host") - - if (buildHostTag && buildInfo.containsKey("jenkins")) { - ((Map)buildInfo.jenkins).host = buildHostTag - } - - def buildInfoUrlTag = findTagValue(descriptionTokens, "build_info_url") - - if (buildInfoUrlTag) { - buildInfo.buildInfoUrl = buildInfoUrlTag - } - } - - if (buildInfo) { - googleServerGroup.buildInfo = buildInfo - } - } - } - } - - static String findTagValue(List descriptionTokens, String tagKey) { - def matchingKeyValuePair = descriptionTokens?.find { keyValuePair -> - keyValuePair.trim().startsWith("$tagKey: ") - } - - matchingKeyValuePair ? matchingKeyValuePair.trim().substring(tagKey.length() + 2) : null - } - - class AutoscalerSingletonCallback extends JsonBatchCallback { - - GoogleServerGroup serverGroup - - @Override - void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { - // 404 is thrown if the autoscaler does not exist in the given zone. Any other exception needs to be propagated. - if (e.code != 404) { - def errorJson = new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(e) - log.error errorJson - } - } - - @Override - void onSuccess(Autoscaler autoscaler, HttpHeaders responseHeaders) throws IOException { - serverGroup.autoscalingPolicy = autoscaler.getAutoscalingPolicy() - serverGroup.asg.minSize = serverGroup.autoscalingPolicy.minNumReplicas - serverGroup.asg.maxSize = serverGroup.autoscalingPolicy.maxNumReplicas - - List statusDetails = autoscaler.statusDetails - - if (statusDetails) { - serverGroup.autoscalingMessages = statusDetails.collect { it.message } - } - } - } - - class AutoscalerAggregatedListCallback extends JsonBatchCallback implements FailureLogger { - - List serverGroups - - @Override - void onSuccess(AutoscalerAggregatedList autoscalerAggregatedList, HttpHeaders responseHeaders) throws IOException { - autoscalerAggregatedList?.items?.each { String location, AutoscalersScopedList autoscalersScopedList -> - if (location.startsWith("zones/")) { - def localZoneName = Utils.getLocalName(location) - def region = localZoneName.substring(0, localZoneName.lastIndexOf('-')) - - autoscalersScopedList.autoscalers.each { Autoscaler autoscaler -> - def migName = Utils.getLocalName(autoscaler.target as String) - def serverGroup = serverGroups.find { - it.name == migName && it.region == region - } - - if (serverGroup) { - serverGroup.autoscalingPolicy = autoscaler.getAutoscalingPolicy() - serverGroup.asg.minSize = serverGroup.autoscalingPolicy.minNumReplicas - serverGroup.asg.maxSize = serverGroup.autoscalingPolicy.maxNumReplicas - - List statusDetails = autoscaler.statusDetails - - if (statusDetails) { - serverGroup.autoscalingMessages = statusDetails.collect { it.message } - } - } - } - } - } - } - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/config/GoogleInfrastructureProviderConfig.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/config/GoogleInfrastructureProviderConfig.groovy deleted file mode 100644 index 963c1626927..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/config/GoogleInfrastructureProviderConfig.groovy +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.provider.config - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.config.GoogleConfiguration -import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties -import com.netflix.spinnaker.clouddriver.google.provider.GoogleInfrastructureProvider -import com.netflix.spinnaker.clouddriver.google.provider.agent.* -import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.* - -import java.util.concurrent.ConcurrentHashMap - -@Configuration -@Import(GoogleConfiguration) -@EnableConfigurationProperties -class GoogleInfrastructureProviderConfig { - - @Autowired Registry registry - - @Bean - @DependsOn('googleNamedAccountCredentials') - GoogleInfrastructureProvider googleInfrastructureProvider(String clouddriverUserAgentApplicationName, - GoogleConfigurationProperties googleConfigurationProperties, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - Registry registry) { - def googleInfrastructureProvider = - new GoogleInfrastructureProvider(Collections.newSetFromMap(new ConcurrentHashMap())) - - synchronizeGoogleInfrastructureProvider(clouddriverUserAgentApplicationName, - googleConfigurationProperties, - googleInfrastructureProvider, - accountCredentialsRepository, - objectMapper, - registry) - - googleInfrastructureProvider - } - - @Bean - GoogleInfrastructureProviderSynchronizerTypeWrapper googleInfrastructureProviderSynchronizerTypeWrapper() { - new GoogleInfrastructureProviderSynchronizerTypeWrapper() - } - - class GoogleInfrastructureProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return GoogleInfrastructureProviderSynchronizer - } - } - - class GoogleInfrastructureProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - GoogleInfrastructureProviderSynchronizer synchronizeGoogleInfrastructureProvider( - String clouddriverUserAgentApplicationName, - GoogleConfigurationProperties googleConfigurationProperties, - GoogleInfrastructureProvider googleInfrastructureProvider, - AccountCredentialsRepository accountCredentialsRepository, - ObjectMapper objectMapper, - Registry registry) { - def scheduledAccounts = ProviderUtils.getScheduledAccounts(googleInfrastructureProvider) - def allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, - GoogleNamedAccountCredentials) - - objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) - - allAccounts.each { GoogleNamedAccountCredentials credentials -> - if (!scheduledAccounts.contains(credentials.name)) { - def newlyAddedAgents = [] - def regions = credentials.regions.collect { it.name } - - newlyAddedAgents << new GoogleSecurityGroupCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - newlyAddedAgents << new GoogleNetworkCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - - newlyAddedAgents << new GoogleGlobalAddressCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - - regions.each { String region -> - newlyAddedAgents << new GoogleSubnetCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry, - region) - newlyAddedAgents << new GoogleRegionalAddressCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry, - region) - } - - newlyAddedAgents << new GoogleHealthCheckCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - newlyAddedAgents << new GoogleHttpHealthCheckCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - newlyAddedAgents << new GoogleSslLoadBalancerCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - newlyAddedAgents << new GoogleSslCertificateCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - newlyAddedAgents << new GoogleTcpLoadBalancerCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - newlyAddedAgents << new GoogleBackendServiceCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - newlyAddedAgents << new GoogleInstanceCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - newlyAddedAgents << new GoogleImageCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry, - credentials.imageProjects, - googleConfigurationProperties.baseImageProjects) - newlyAddedAgents << new GoogleHttpLoadBalancerCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry) - regions.each { String region -> - newlyAddedAgents << new GoogleInternalLoadBalancerCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry, - region) - newlyAddedAgents << new GoogleNetworkLoadBalancerCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry, - region) - newlyAddedAgents << new GoogleRegionalServerGroupCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry, - region, - googleConfigurationProperties.maxMIGPageSize) - newlyAddedAgents << new GoogleZonalServerGroupCachingAgent(clouddriverUserAgentApplicationName, - credentials, - objectMapper, - registry, - region, - googleConfigurationProperties.maxMIGPageSize) - } - - // If there is an agent scheduler, then this provider has been through the AgentController in the past. - // In that case, we need to do the scheduling here (because accounts have been added to a running system). - if (googleInfrastructureProvider.agentScheduler) { - ProviderUtils.rescheduleAgents(googleInfrastructureProvider, newlyAddedAgents) - } - - googleInfrastructureProvider.agents.addAll(newlyAddedAgents) - } - } - - new GoogleInfrastructureProviderSynchronizer() - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleApplicationProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleApplicationProvider.groovy deleted file mode 100644 index 6aa4e66920f..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleApplicationProvider.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider -import com.netflix.spinnaker.clouddriver.google.cache.Keys -import com.netflix.spinnaker.clouddriver.google.model.GoogleApplication -import com.netflix.spinnaker.clouddriver.model.ApplicationProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.CLUSTERS -import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.INSTANCES - -@Component -class GoogleApplicationProvider implements ApplicationProvider { - @Autowired - Registry registry - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Autowired - Cache cacheView - - @Autowired - ObjectMapper objectMapper - - @Override - Set getApplications(boolean expand) { - def filter = expand ? RelationshipCacheFilter.include(CLUSTERS.ns) : RelationshipCacheFilter.none() - cacheView.getAll(APPLICATIONS.ns, - cacheView.filterIdentifiers(APPLICATIONS.ns, "$GoogleCloudProvider.ID:*"), - filter).collect { applicationFromCacheData(it) } as Set - } - - @Override - GoogleApplication.View getApplication(String name) { - CacheData cacheData = cacheView.get(APPLICATIONS.ns, - Keys.getApplicationKey(name), - RelationshipCacheFilter.include(CLUSTERS.ns, INSTANCES.ns)) - if (cacheData) { - return applicationFromCacheData(cacheData) - } - } - - GoogleApplication.View applicationFromCacheData(CacheData cacheData) { - GoogleApplication.View applicationView = objectMapper.convertValue(cacheData.attributes, GoogleApplication)?.view - - cacheData.relationships[CLUSTERS.ns].each { String clusterKey -> - def clusterKeyParsed = Keys.parse(clusterKey) - applicationView.clusterNames[clusterKeyParsed.account] << clusterKeyParsed.name - } - - applicationView.instances = cacheData?.relationships?.get(INSTANCES.ns).collect { Keys.parse(it) } ?: [] - - applicationView - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleClusterProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleClusterProvider.groovy index 9016c88bcd0..c7f5f640ce6 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleClusterProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleClusterProvider.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.google.provider.view import com.fasterxml.jackson.databind.ObjectMapper +import com.google.common.collect.ImmutableSet import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter @@ -27,6 +28,7 @@ import com.netflix.spinnaker.clouddriver.google.model.* import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType @@ -68,35 +70,50 @@ class GoogleClusterProvider implements ClusterProvider { @Override Map> getClusterDetails(String applicationName) { - getClusters(applicationName, true /* detailed */) + getClusters(applicationName, true /* detailed */).collectEntries { k, v -> [k, new HashSet<>(v)] } } @Override Map> getClusterSummaries(String applicationName) { - getClusters(applicationName, false /* detailed */) + getClusters(applicationName, false /* detailed */).collectEntries { k, v -> [k, new HashSet<>(v)] } } Map> getClusters(String applicationName, boolean includeInstanceDetails) { - GoogleApplication.View application = applicationProvider.getApplication(applicationName) + GoogleApplicationProvider.ApplicationCacheData applicationCacheData = applicationProvider.getApplicationCacheData(applicationName) - def clusterKeys = [] - application?.clusterNames?.each { String accountName, Set clusterNames -> - clusterNames.each { String clusterName -> - clusterKeys << Keys.getClusterKey(accountName, applicationName, clusterName) - } + if (applicationCacheData == null) { + return new HashMap<>() } - // TODO(jacobkiefer): Avoid parsing instance keys into map just to re-serialize? - Set allApplicationInstanceKeys = includeInstanceDetails ? application?.instances?.collect { Keys.getInstanceKey(it.account, it.region, it.name) } : [] as Set - - List clusters = cacheView.getAll( - CLUSTERS.ns, - clusterKeys, - RelationshipCacheFilter.include(SERVER_GROUPS.ns)).collect { CacheData cacheData -> - clusterFromCacheData(cacheData, allApplicationInstanceKeys) + Set clusterIdentifiers = applicationCacheData.getClusterIdentifiers(); + Collection clusterCacheData = cacheView.getAll( + CLUSTERS.ns, + clusterIdentifiers, + RelationshipCacheFilter.include(SERVER_GROUPS.ns) + ) + + Set instanceIdentifiers = includeInstanceDetails ? + applicationCacheData.getInstanceIdentifiers() : + Collections.emptySet() + Collection instanceCacheData = instanceProvider.getInstanceCacheData(instanceIdentifiers) + + Map> clustersByAccount = new HashMap<>() + Map> securityGroupsByAccount = new HashMap<>() + + clusterCacheData.each { cacheData -> + String accountName = cacheData.getAttributes().get("accountName") + Set accountSecurityGroups = securityGroupsByAccount.computeIfAbsent( + accountName, + { a -> securityGroupProvider.getAllByAccount(false, accountName) } + ) + Set accountClusters = clustersByAccount.computeIfAbsent( + accountName, + { a -> new HashSet() } + ) + accountClusters.add(clusterFromCacheData(cacheData, instanceCacheData, accountSecurityGroups)) } - clusters?.groupBy { it.accountName } as Map> + return clustersByAccount } @Override @@ -123,21 +140,11 @@ class GoogleClusterProvider implements ClusterProvider { @Override GoogleServerGroup.View getServerGroup(String account, String region, String name, boolean includeDetails) { - def cacheData = cacheView.get(SERVER_GROUPS.ns, - Keys.getServerGroupKey(name, account, region), - RelationshipCacheFilter.include(LOAD_BALANCERS.ns, INSTANCES.ns)) + def cacheData = searchCacheForServerGroup(Keys.getServerGroupKey(name, "*", account, region)) if (!cacheData) { // No regional server group was found, so attempt to query for all zonal server groups in the region. - def pattern = Keys.getServerGroupKey(name, account, region, "*") - def identifiers = cacheView.filterIdentifiers(SERVER_GROUPS.ns, pattern) - def cacheDataResults = cacheView.getAll(SERVER_GROUPS.ns, - identifiers, - RelationshipCacheFilter.include(LOAD_BALANCERS.ns, INSTANCES.ns)) - - if (cacheDataResults) { - cacheData = cacheDataResults.first() - } + cacheData = searchCacheForServerGroup(Keys.getServerGroupKey(name, "*", account, region, "*")) } if (cacheData) { @@ -150,6 +157,18 @@ class GoogleClusterProvider implements ClusterProvider { } } + private CacheData searchCacheForServerGroup(String pattern) { + def identifiers = cacheView.filterIdentifiers(SERVER_GROUPS.ns, pattern) + def cacheDataResults = cacheView.getAll(SERVER_GROUPS.ns, + identifiers, + RelationshipCacheFilter.include(LOAD_BALANCERS.ns, INSTANCES.ns)) + + if (cacheDataResults) { + return cacheDataResults.first() + } + return null + } + @Override GoogleServerGroup.View getServerGroup(String account, String region, String name) { return getServerGroup(account, region, name, true) @@ -165,10 +184,22 @@ class GoogleClusterProvider implements ClusterProvider { return false } - GoogleCluster.View clusterFromCacheData(CacheData cacheData, Set instanceKeySuperSet) { - GoogleCluster.View clusterView = objectMapper.convertValue(cacheData.attributes, GoogleCluster)?.view + GoogleCluster.View clusterFromCacheData(CacheData clusterCacheData, Set instanceKeySuperSet) { + return clusterFromCacheData( + clusterCacheData, + instanceProvider.getInstanceCacheData(instanceKeySuperSet), + securityGroupProvider.getAllByAccount(false, (String) clusterCacheData.getAttributes().get("accountName")) + ) + } + + GoogleCluster.View clusterFromCacheData( + CacheData clusterCacheData, + Collection instanceCacheDataSuperSet, + Set securityGroups) + { + GoogleCluster.View clusterView = objectMapper.convertValue(clusterCacheData.attributes, GoogleCluster)?.view - def serverGroupKeys = cacheData.relationships[SERVER_GROUPS.ns] + def serverGroupKeys = clusterCacheData.relationships[SERVER_GROUPS.ns] if (serverGroupKeys) { log.debug("Server group keys from cluster relationships: ${serverGroupKeys}") def filter = RelationshipCacheFilter.include(LOAD_BALANCERS.ns) @@ -176,12 +207,11 @@ class GoogleClusterProvider implements ClusterProvider { def serverGroupData = cacheView.getAll(SERVER_GROUPS.ns, serverGroupKeys, filter) log.debug("Retrieved cache data for server groups: ${serverGroupData?.collect { it?.attributes?.name }}") - def securityGroups = securityGroupProvider.getAllByAccount(false, clusterView.accountName) + def instanceCacheData = instanceCacheDataSuperSet.findAll { instance -> + instance.relationships.get(CLUSTERS.ns)?.collect { Keys.parse(it).cluster }?.any { it.contains(clusterView.name) } + } - // TODO(duftler): De-frigga this. - def clusterInstancePattern = Keys.getInstanceKey(clusterView.accountName, ".*", "$clusterView.name-.*") - def instanceKeys = instanceKeySuperSet.findAll { it ==~ clusterInstancePattern } - def instances = instanceProvider.getInstances(clusterView.accountName, instanceKeys as List, securityGroups) + def instances = instanceProvider.getInstancesFromCacheData(clusterView.accountName, instanceCacheData, securityGroups) def loadBalancerKeys = serverGroupData.collect { serverGroup -> serverGroup.relationships[LOAD_BALANCERS.ns] as List @@ -191,7 +221,7 @@ class GoogleClusterProvider implements ClusterProvider { serverGroupData.each { CacheData serverGroupCacheData -> GoogleServerGroup serverGroup = serverGroupFromCacheData(serverGroupCacheData, clusterView.accountName, instances, securityGroups, loadBalancers) clusterView.serverGroups << serverGroup.view - clusterView.loadBalancers.addAll(serverGroup.loadBalancers*.view) + clusterView.loadBalancers.addAll(serverGroup.loadBalancers) } log.debug("Server groups added to cluster: ${clusterView?.serverGroups?.collect { it?.name }}") } @@ -209,6 +239,9 @@ class GoogleClusterProvider implements ClusterProvider { case GoogleLoadBalancerType.HTTP: loadBalancer = objectMapper.convertValue(it.attributes, GoogleHttpLoadBalancer) break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + loadBalancer = objectMapper.convertValue(it.attributes, GoogleInternalHttpLoadBalancer) + break case GoogleLoadBalancerType.NETWORK: loadBalancer = objectMapper.convertValue(it.attributes, GoogleNetworkLoadBalancer) break @@ -232,6 +265,7 @@ class GoogleClusterProvider implements ClusterProvider { Set securityGroups, Set loadBalancers) { GoogleServerGroup serverGroup = objectMapper.convertValue(cacheData.attributes, GoogleServerGroup) + serverGroup.account = account def loadBalancerKeys = cacheData.relationships[LOAD_BALANCERS.ns] loadBalancers = loadBalancers.findAll { loadBalancer -> @@ -240,10 +274,11 @@ class GoogleClusterProvider implements ClusterProvider { } serverGroup.loadBalancers = loadBalancers*.view - serverGroup.securityGroups = GoogleSecurityGroupProvider.getMatchingServerGroupNames( + serverGroup.securityGroups = GoogleSecurityGroupProvider.getMatchingSecurityGroupNames( account, securityGroups, serverGroup.instanceTemplateTags, + serverGroup.instanceTemplateServiceAccounts, serverGroup.networkName) if (instances) { @@ -270,6 +305,11 @@ class GoogleClusterProvider implements ClusterProvider { Utils.determineHttpLoadBalancerDisabledState(loadBalancer, serverGroup) } + def internalHttpLoadBalancers = loadBalancers.findAll { it.type == GoogleLoadBalancerType.INTERNAL_MANAGED } + def internalHttpDisabledStates = internalHttpLoadBalancers.collect { loadBalancer -> + Utils.determineInternalHttpLoadBalancerDisabledState(loadBalancer, serverGroup) + } + def sslLoadBalancers = loadBalancers.findAll { it.type == GoogleLoadBalancerType.SSL } def sslDisabledStates = sslLoadBalancers.collect { loadBalancer -> Utils.determineSslLoadBalancerDisabledState(loadBalancer, serverGroup) @@ -300,6 +340,9 @@ class GoogleClusterProvider implements ClusterProvider { if (internalDisabledStates) { isDisabled &= internalDisabledStates.every { it } } + if (internalHttpDisabledStates) { + isDisabled &= internalHttpDisabledStates.every { it } + } if (sslDisabledStates) { isDisabled &= sslDisabledStates.every { it } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleInstanceProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleInstanceProvider.groovy index 6dbbc655277..4e502e38bbe 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleInstanceProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleInstanceProvider.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.google.provider.view import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.iam.v1.model.ServiceAccount import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData @@ -39,7 +40,7 @@ import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.* @Component @Slf4j -class GoogleInstanceProvider implements InstanceProvider, GoogleExecutorTraits { +class GoogleInstanceProvider implements InstanceProvider, GoogleExecutorTraits { @Autowired final Cache cacheView @@ -68,19 +69,24 @@ class GoogleInstanceProvider implements InstanceProvider, G } /** - * Non-interface method for efficient building of GoogleInstance models during cluster or server group requests. + * Non-interface methods for efficient building of GoogleInstance models during cluster or server group requests. */ List getInstances(String account, List instanceKeys, Set securityGroups) { - getInstanceCacheData(instanceKeys)?.collect { + getInstancesFromCacheData(account, getInstanceCacheData(instanceKeys), securityGroups) + } + + List getInstancesFromCacheData(String account, Collection cacheData, Set securityGroups) { + cacheData?.collect { instanceFromCacheData(it, account, securityGroups) } } - Collection getInstanceCacheData(List keys) { + Collection getInstanceCacheData(Collection keys) { cacheView.getAll(INSTANCES.ns, keys, RelationshipCacheFilter.include(LOAD_BALANCERS.ns, - SERVER_GROUPS.ns)) + SERVER_GROUPS.ns, + CLUSTERS.ns)) } @Override @@ -136,10 +142,11 @@ class GoogleInstanceProvider implements InstanceProvider, G instance.serverGroup = serverGroup } - instance.securityGroups = GoogleSecurityGroupProvider.getMatchingServerGroupNames( + instance.securityGroups = GoogleSecurityGroupProvider.getMatchingSecurityGroupNames( account, securityGroups, instance.tags.items as Set, + instance.serviceAccounts as Set, instance.networkName) instance diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProvider.groovy index 116352c2921..79dff8dc61c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProvider.groovy @@ -19,11 +19,13 @@ package com.netflix.spinnaker.clouddriver.google.provider.view import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.annotation.JsonProperty import com.fasterxml.jackson.databind.ObjectMapper +import com.google.common.base.Strings import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.cache.Keys +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils @@ -52,21 +54,30 @@ class GoogleLoadBalancerProvider implements LoadBalancerProvider getApplicationLoadBalancers(String application) { - def pattern = Keys.getLoadBalancerKey("*", "*", "${application}*") - def identifiers = cacheView.filterIdentifiers(LOAD_BALANCERS.ns, pattern) - - def applicationServerGroups = cacheView.getAll( + String pattern = Keys.getLoadBalancerKey("*", "*", "${application}*") + Set identifiers = cacheView.filterIdentifiers(LOAD_BALANCERS.ns, pattern).toSet() + + // It is possible to configure a server group in application A to use a load balancer from + // application B. Therefore, if (and only if) we have not already retrieved load balancer + // identifiers for all applications, we need to retrieve identifiers for every load balancer + // associated with a server group from this application. + if (!Strings.isNullOrEmpty(application)) { + Collection applicationServerGroups = cacheView.getAll( SERVER_GROUPS.ns, cacheView.filterIdentifiers(SERVER_GROUPS.ns, "${GoogleCloudProvider.ID}:*:${application}-*") - ) - applicationServerGroups.each { CacheData serverGroup -> - identifiers.addAll(serverGroup.relationships[LOAD_BALANCERS.ns] ?: []) + ) + applicationServerGroups.each { CacheData serverGroup -> + Collection relatedLoadBalancers = serverGroup.relationships[LOAD_BALANCERS.ns] ?: [] + relatedLoadBalancers.each { String lb -> + identifiers.add(lb) + } + } } // TODO(duftler): De-frigga this. cacheView.getAll(LOAD_BALANCERS.ns, - identifiers.unique(), + identifiers, RelationshipCacheFilter.include(SERVER_GROUPS.ns, INSTANCES.ns)).collect { CacheData loadBalancerCacheData -> loadBalancersFromCacheData(loadBalancerCacheData, (loadBalancerCacheData?.relationships?.get(INSTANCES.ns) ?: []) as Set) } as Set @@ -78,6 +89,9 @@ class GoogleLoadBalancerProvider implements LoadBalancerProvider - backendServices << hostRule?.pathMatcher?.defaultService?.name - hostRule?.pathMatcher?.pathRules?.each { GooglePathRule pathRule -> - backendServices << pathRule.backendService.name - } - } + backendServices = Utils.getBackendServicesFromHttpLoadBalancerView(httpView).collect { it.name } + urlMapName = httpView.urlMapName + break + case (GoogleLoadBalancerType.INTERNAL_MANAGED): + GoogleInternalHttpLoadBalancer.InternalHttpLbView httpView = view as GoogleInternalHttpLoadBalancer.InternalHttpLbView + backendServices = Utils.getBackendServicesFromInternalHttpLoadBalancerView(httpView).collect { it.name } urlMapName = httpView.urlMapName break case (GoogleLoadBalancerType.INTERNAL): @@ -249,24 +264,29 @@ class GoogleLoadBalancerProvider implements LoadBalancerProvider backendServices = Utils.getBackendServicesFromHttpLoadBalancerView(httpView) - backendServices?.each { GoogleBackendService backendService -> - backendServiceHealthChecks[backendService.name] = backendService.healthCheck.view - } - } - String instancePort String loadBalancerPort + String sessionAffinity switch (view.loadBalancerType) { case GoogleLoadBalancerType.NETWORK: + GoogleNetworkLoadBalancer.View nlbView = view as GoogleNetworkLoadBalancer.View + sessionAffinity = nlbView.sessionAffinity instancePort = Utils.derivePortOrPortRange(view.portRange) loadBalancerPort = Utils.derivePortOrPortRange(view.portRange) break case GoogleLoadBalancerType.HTTP: instancePort = 'http' loadBalancerPort = Utils.derivePortOrPortRange(view.portRange) + GoogleHttpLoadBalancer.View httpView = view as GoogleHttpLoadBalancer.View + List backendServices = Utils.getBackendServicesFromHttpLoadBalancerView(httpView) + backendServiceHealthChecks = backendServices.collectEntries { [it.name, it.healthCheck.view] } + break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + instancePort = 'http' + loadBalancerPort = Utils.derivePortOrPortRange(view.portRange) + GoogleInternalHttpLoadBalancer.InternalHttpLbView httpView = view as GoogleInternalHttpLoadBalancer.InternalHttpLbView + List backendServices = Utils.getBackendServicesFromInternalHttpLoadBalancerView(httpView) + backendServiceHealthChecks = backendServices.collectEntries { [it.name, it.healthCheck.view] } break case GoogleLoadBalancerType.INTERNAL: GoogleInternalLoadBalancer.View ilbView = view as GoogleInternalLoadBalancer.View @@ -291,6 +311,7 @@ class GoogleLoadBalancerProvider implements LoadBalancerProvider backendServiceHealthChecks = [:] // TODO(ttomsu): Bizarre nesting of data. Necessary? List> listenerDescriptions = [] diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSecurityGroupProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSecurityGroupProvider.groovy index ecdcd8722fa..aa2df66e2fc 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSecurityGroupProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSecurityGroupProvider.groovy @@ -17,11 +17,13 @@ package com.netflix.spinnaker.clouddriver.google.provider.view import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.iam.v1.model.ServiceAccount import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.cache.Keys +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.model.GoogleSecurityGroup import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials @@ -30,6 +32,7 @@ import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @@ -40,15 +43,15 @@ import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.SECU @Component class GoogleSecurityGroupProvider implements SecurityGroupProvider { - private final AccountCredentialsProvider accountCredentialsProvider + private final CredentialsRepository credentialsRepository private final Cache cacheView private final ObjectMapper objectMapper final String cloudProvider = GoogleCloudProvider.ID @Autowired - GoogleSecurityGroupProvider(AccountCredentialsProvider accountCredentialsProvider, Cache cacheView, ObjectMapper objectMapper) { - this.accountCredentialsProvider = accountCredentialsProvider + GoogleSecurityGroupProvider(CredentialsRepository credentialsRepository, Cache cacheView, ObjectMapper objectMapper) { + this.credentialsRepository = credentialsRepository this.cacheView = cacheView this.objectMapper = objectMapper } @@ -84,6 +87,11 @@ class GoogleSecurityGroupProvider implements SecurityGroupProvider getAllMatchingKeyPattern(String pattern, boolean includeRules) { loadResults(includeRules, cacheView.filterIdentifiers(SECURITY_GROUPS.ns, pattern)) } @@ -99,23 +107,26 @@ class GoogleSecurityGroupProvider implements SecurityGroupProvider parts = Keys.parse(cacheData.id) + def project = cacheData.attributes.project - return convertToGoogleSecurityGroup(includeRules, firewall, parts.account, parts.region) + return convertToGoogleSecurityGroup(includeRules, firewall, parts.account, parts.region, project) } - private GoogleSecurityGroup convertToGoogleSecurityGroup(boolean includeRules, Map firewall, String account, String region) { + private GoogleSecurityGroup convertToGoogleSecurityGroup(boolean includeRules, Map firewall, String account, String region, String project) { List inboundRules = includeRules ? buildInboundIpRangeRules(firewall) : [] new GoogleSecurityGroup( - id: deriveResourceId(account, firewall.selfLink), + id: deriveResourceId(project, firewall.selfLink), name: firewall.name, description: firewall.description, accountName: account, region: region, - network: deriveResourceId(account, firewall.network), + network: deriveResourceId(project, firewall.network), selfLink: firewall.selfLink, sourceTags: firewall.sourceTags, targetTags: firewall.targetTags, + sourceServiceAccounts: firewall.sourceServiceAccounts, + targetServiceAccounts: firewall.targetServiceAccounts, inboundRules: inboundRules ) } @@ -126,7 +137,7 @@ class GoogleSecurityGroupProvider implements SecurityGroupProvider getMatchingServerGroupNames(String account, - Set securityGroups, - Set tags, - String networkName) { + /** + * Calculates security group names that match account, networkName, and tags + * @param account - GCE account name. + * @param securityGroups - Set of server groups to filter. + * @param tags - GCE network tags to filter security groups by. + * @param serviceAccounts - GCE service accounts to filter security groups by. + * @param networkName - GCE network name. + * @return Security group names that match account, networkName, and network tags. + */ + static List getMatchingSecurityGroupNames(String account, + Set securityGroups, + Set tags, + Set serviceAccounts, + String networkName) { tags = tags ?: [] as Set + serviceAccounts = serviceAccounts ?: [] as Set securityGroups?.findResults { GoogleSecurityGroup securityGroup -> def accountAndNetworkMatch = securityGroup.accountName == account && securityGroup.network == networkName - boolean targetTagsEmpty = !securityGroup.targetTags + if (!accountAndNetworkMatch) { + return null + } + + boolean hasTargetTags = securityGroup.targetTags def targetTagsInCommon = [] - if (!targetTagsEmpty) { + if (hasTargetTags) { targetTagsInCommon = (securityGroup.targetTags).intersect(tags) } - accountAndNetworkMatch && (targetTagsEmpty || !targetTagsInCommon.empty) ? securityGroup.name : null + boolean hasTargetServiceAccounts = securityGroup.targetServiceAccounts + def targetServiceAccountsInCommon = [] + + serviceAccounts.each { serviceAccount -> + if (serviceAccount.email in securityGroup.targetServiceAccounts) + targetServiceAccountsInCommon.add(serviceAccount.email) + } + + // Firewall rules can apply to all instances, in which case neither tags nor service accounts are present. + boolean isDefaultFirewallRule = !hasTargetTags && !hasTargetServiceAccounts + + (isDefaultFirewallRule || targetTagsInCommon || targetServiceAccountsInCommon) ? securityGroup.name : null } ?: [] } - private String deriveResourceId(String account, String resourceLink) { - def accountCredentials = accountCredentialsProvider.getCredentials(account) - - if (!(accountCredentials instanceof GoogleNamedAccountCredentials)) { - throw new IllegalArgumentException("Invalid credentials: $account") - } + private String deriveResourceId(String project, String resourceLink) { - def project = accountCredentials.project def firewallProject = GCEUtil.deriveProjectId(resourceLink) def firewallId = GCEUtil.getLocalName(resourceLink) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProvider.groovy index 0dfa1c5d9f6..4f37e224f6c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProvider.groovy @@ -67,28 +67,24 @@ class GoogleSubnetProvider implements SubnetProvider { GoogleSubnet fromCacheData(CacheData cacheData) { Map subnet = cacheData.attributes.subnet Map parts = Keys.parse(cacheData.id) + def project = cacheData.attributes.project new GoogleSubnet( type: this.cloudProvider, id: parts.id, name: subnet.name, gatewayAddress: subnet.gatewayAddress, - network: deriveNetworkId(parts.account, subnet), + network: deriveNetworkId(project, subnet), cidrBlock: subnet.ipCidrRange, account: parts.account, region: parts.region, - selfLink: subnet.selfLink + selfLink: subnet.selfLink, + purpose: subnet.purpose ?: "n/a" ) } - private String deriveNetworkId(String account, Map subnet) { - def accountCredentials = accountCredentialsProvider.getCredentials(account) + private String deriveNetworkId(String project, Map subnet) { - if (!(accountCredentials instanceof GoogleNamedAccountCredentials)) { - throw new IllegalArgumentException("Invalid credentials: $account") - } - - def project = accountCredentials.project def networkProject = GCEUtil.deriveProjectId(subnet.network) def networkId = GCEUtil.getLocalName(subnet.network) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentials.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentials.groovy index 3125ac17a16..02a4cc02c7a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentials.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentials.groovy @@ -18,26 +18,27 @@ package com.netflix.spinnaker.clouddriver.google.security import com.google.api.client.http.HttpTransport import com.google.api.client.json.JsonFactory -import com.google.api.client.json.jackson2.JacksonFactory +import com.google.api.client.json.gson.GsonFactory import com.google.api.services.compute.Compute import com.netflix.spinnaker.clouddriver.google.ComputeVersion import com.netflix.spinnaker.clouddriver.googlecommon.security.GoogleCommonCredentials +import groovy.transform.CompileStatic import groovy.transform.TupleConstructor @TupleConstructor +@CompileStatic public class GoogleCredentials extends GoogleCommonCredentials { final String project final ComputeVersion computeVersion Compute getCompute(String applicationName) { - JsonFactory jsonFactory = JacksonFactory.getDefaultInstance() HttpTransport httpTransport = buildHttpTransport() + JsonFactory jsonFactory = GsonFactory.getDefaultInstance() - def credential = getCredential(httpTransport, jsonFactory) - def reqInit = setHttpTimeout(credential) - def computeBuilder = new Compute.Builder(httpTransport, jsonFactory, credential) + def credentials = getCredentials() + def reqInit = setHttpTimeout(credentials) + def computeBuilder = new Compute.Builder(httpTransport, jsonFactory, reqInit) .setApplicationName(applicationName) - .setHttpRequestInitializer(reqInit) if (computeVersion.servicePath) { computeBuilder.setServicePath(computeVersion.servicePath) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsInitializer.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsInitializer.groovy deleted file mode 100644 index bb602f59eb9..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsInitializer.groovy +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.security - -import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.clouddriver.google.ComputeVersion -import com.netflix.spinnaker.config.GoogleConfiguration.DeployDefaults -import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties -import com.netflix.spinnaker.clouddriver.googlecommon.GoogleExecutor -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.context.ApplicationContext -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope - -@Slf4j -@Configuration -class GoogleCredentialsInitializer implements CredentialsInitializerSynchronizable { - - @Autowired - GoogleExecutor _googleExecutor // Not used, just here to force initialization ordering - - @Bean - GoogleExecutor initGoogleExecutor() { // This is to satisfy the autowiring - return new GoogleExecutor() - } - - @Bean - List googleNamedAccountCredentials(String clouddriverUserAgentApplicationName, - GoogleConfigurationProperties googleConfigurationProperties, - ApplicationContext applicationContext, - AccountCredentialsRepository accountCredentialsRepository, - List providerSynchronizerTypeWrappers, - DeployDefaults googleDeployDefaults) { - synchronizeGoogleAccounts(clouddriverUserAgentApplicationName, googleConfigurationProperties, null, applicationContext, accountCredentialsRepository, providerSynchronizerTypeWrappers, googleDeployDefaults) - } - - @Override - String getCredentialsSynchronizationBeanName() { - return "synchronizeGoogleAccounts" - } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - List synchronizeGoogleAccounts(String clouddriverUserAgentApplicationName, - GoogleConfigurationProperties googleConfigurationProperties, - CatsModule catsModule, - ApplicationContext applicationContext, - AccountCredentialsRepository accountCredentialsRepository, - List providerSynchronizerTypeWrappers, - DeployDefaults googleDeployDefaults) { - def (ArrayList accountsToAdd, List namesOfDeletedAccounts) = - ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, - GoogleNamedAccountCredentials, - googleConfigurationProperties.accounts) - - accountsToAdd.each { GoogleConfigurationProperties.ManagedAccount managedAccount -> - try { - def jsonKey = GoogleCredentialsInitializer.getJsonKey(managedAccount) - def googleAccount = new GoogleNamedAccountCredentials.Builder() - .name(managedAccount.name) - .environment(managedAccount.environment ?: managedAccount.name) - .accountType(managedAccount.accountType ?: managedAccount.name) - .project(managedAccount.project) - .computeVersion(managedAccount.alphaListed ? ComputeVersion.ALPHA : ComputeVersion.DEFAULT) - .jsonKey(jsonKey) - .serviceAccountId(managedAccount.serviceAccountId) - .serviceAccountProject(managedAccount.serviceAccountProject) - .imageProjects(managedAccount.imageProjects) - .requiredGroupMembership(managedAccount.requiredGroupMembership) - .permissions(managedAccount.permissions.build()) - .applicationName(clouddriverUserAgentApplicationName) - .consulConfig(managedAccount.consul) - .instanceTypeDisks(googleDeployDefaults.instanceTypeDisks) - .userDataFile(managedAccount.userDataFile) - .regionsToManage(managedAccount.regions, googleConfigurationProperties.defaultRegions) - .build() - - if (!managedAccount.project) { - throw new IllegalArgumentException("No project was specified for Google account $managedAccount.name."); - } - - accountCredentialsRepository.save(managedAccount.name, googleAccount) - } catch (e) { - log.error "Could not load account ${managedAccount.name} for Google.", e - if (managedAccount.required) { - throw new IllegalArgumentException("Could not load required account ${managedAccount.name} for Google.", e) - } - } - } - - ProviderUtils.unscheduleAndDeregisterAgents(namesOfDeletedAccounts, catsModule) - - if (accountsToAdd && catsModule) { - ProviderUtils.synchronizeAgentProviders(applicationContext, providerSynchronizerTypeWrappers) - } - - accountCredentialsRepository.all.findAll { - it instanceof GoogleNamedAccountCredentials - } as List - } - - private static String getJsonKey(GoogleConfigurationProperties.ManagedAccount managedAccount) { - def inputStream = managedAccount.inputStream - - inputStream ? new String(managedAccount.inputStream.bytes) : null - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleImpersonatedCredential.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleImpersonatedCredential.groovy deleted file mode 100644 index 8fa9422fca2..00000000000 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleImpersonatedCredential.groovy +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.security - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.auth.oauth2.TokenRequest -import com.google.api.client.auth.oauth2.TokenResponse -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.http.GenericUrl -import com.google.api.services.compute.ComputeScopes -import com.google.api.services.iam.v1.Iam -import com.google.api.services.iam.v1.model.SignJwtRequest -import com.google.common.base.Joiner - -import java.security.GeneralSecurityException - -class GoogleImpersonatedCredential extends GoogleCredential { - - String serviceAccountId - String serviceAccountProject - - ObjectMapper mapper = new ObjectMapper() - - GoogleImpersonatedCredential(GoogleCredential.Builder builder, String serviceAccountId, String serviceAccountProject) { - super(builder) - // These fields can't be in the builder because of some null checking that prohibits the ability - // to do this impersonation. - this.serviceAccountId = serviceAccountId - this.serviceAccountProject = serviceAccountProject - } - - /** - * This modifies the behavior of the GoogleCredential refresh to allow the ability for the - * instance's service account (aka application default credentials) to impersonate another service - * account by using the signJwt endpoint. The instance's service account must have the - * 'Service Account Token Creator' role. - * - * This use case differs slightly from the 'GoogleCredential.Builder().setServiceAccountUser.' - * With that setter method, the credential still requires the private key of the service account. - * Attempts to circumvent that requirement (by using reflection magic to set the field) resulted - * in a 401 Unauthorized response from the server. - * - * This only applies when running Clouddriver on GCP (where Application Default Credentials are - * available). You can emulate this environment by setting `GOOGLE_APPLICATION_CREDENTIALS` to - * a location of a private key. It's not exactly the same, but works pretty similar. See - * https://developers.google.com/identity/protocols/application-default-credentials for more info. - * - * @return - * @throws IOException - */ - @Override - protected TokenResponse executeRefreshToken() throws IOException { - def scopes = Collections.singleton(ComputeScopes.CLOUD_PLATFORM) - long currentTime = getClock().currentTimeMillis() - - def payload = new JwtPayload( - iss: serviceAccountId, - aud: getTokenServerEncodedUrl(), - scope: Joiner.on(", ").join(scopes), - iat: currentTime / 1000, - ext: currentTime / 1000 + 3600 - ) - - SignJwtRequest req = new SignJwtRequest().setPayload(mapper.writeValueAsString(payload)) - String fullAccountId = "projects/${serviceAccountProject}/serviceAccounts/${serviceAccountId}" - - try { - Iam iam = new Iam.Builder(getTransport(), - jsonFactory, - getApplicationDefault().createScoped(scopes)).build() - def signedJwt = iam.projects().serviceAccounts().signJwt(fullAccountId, req).execute() - - TokenRequest request = new TokenRequest( - getTransport(), getJsonFactory(), new GenericUrl(getTokenServerEncodedUrl()), - "urn:ietf:params:oauth:grant-type:jwt-bearer") - request.put("assertion", signedJwt.getSignedJwt()) - return request.execute() - } catch (GeneralSecurityException exception) { - IOException e = new IOException() - e.initCause(exception) - throw e - } - } - - class JwtPayload { - String iss - String scope - String aud - Long iat - Long ext - } -} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleImpersonatedServiceAccountCredentials.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleImpersonatedServiceAccountCredentials.groovy index 8db1ae16870..767ba0fddbe 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleImpersonatedServiceAccountCredentials.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleImpersonatedServiceAccountCredentials.groovy @@ -16,12 +16,13 @@ package com.netflix.spinnaker.clouddriver.google.security -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.http.HttpTransport -import com.google.api.client.json.JsonFactory +import com.google.api.services.compute.ComputeScopes +import com.google.auth.oauth2.ImpersonatedCredentials import com.netflix.spinnaker.clouddriver.google.ComputeVersion +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j +@CompileStatic @Slf4j class GoogleImpersonatedServiceAccountCredentials extends GoogleCredentials { @@ -38,11 +39,16 @@ class GoogleImpersonatedServiceAccountCredentials extends GoogleCredentials { } @Override - GoogleCredential getCredential(HttpTransport httpTransport, JsonFactory jsonFactory) { - def builder = new GoogleCredential.Builder() - .setTransport(httpTransport) - .setJsonFactory(jsonFactory) - .setClientAuthentication(GoogleCredential.getApplicationDefault()) - return new GoogleImpersonatedCredential(builder, serviceAccountId, serviceAccountProject) + com.google.auth.oauth2.GoogleCredentials getCredentials() { + def sourceCredentials = com.google.auth.oauth2.GoogleCredentials.getApplicationDefault() + def targetPrincipal = "${serviceAccountId}@${serviceAccountProject}.iam.gserviceaccount.com".toString() + def scopes = Collections.singletonList(ComputeScopes.CLOUD_PLATFORM) + def lifetime = 3600 + + return ImpersonatedCredentials.create(sourceCredentials, + targetPrincipal, + null /* delegates */, + scopes, + lifetime) } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleJsonCredentials.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleJsonCredentials.groovy index 9b8df946b9d..b44d9c1f69b 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleJsonCredentials.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleJsonCredentials.groovy @@ -16,13 +16,12 @@ package com.netflix.spinnaker.clouddriver.google.security -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.http.HttpTransport -import com.google.api.client.json.JsonFactory import com.google.api.services.compute.ComputeScopes import com.netflix.spinnaker.clouddriver.google.ComputeVersion import com.netflix.spinnaker.clouddriver.googlecommon.security.GoogleCommonCredentialUtils +import groovy.transform.CompileStatic +@CompileStatic class GoogleJsonCredentials extends GoogleCredentials { final String jsonKey @@ -32,7 +31,7 @@ class GoogleJsonCredentials extends GoogleCredentials { } @Override - GoogleCredential getCredential(HttpTransport httpTransport, JsonFactory jsonFactory) { - GoogleCommonCredentialUtils.getCredentials(httpTransport, jsonFactory, jsonKey, ComputeScopes.COMPUTE); + com.google.auth.oauth2.GoogleCredentials getCredentials() { + GoogleCommonCredentialUtils.getCredentials(jsonKey, ComputeScopes.COMPUTE); } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleNamedAccountCredentials.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleNamedAccountCredentials.groovy index b30b6a25d65..e028123bcee 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleNamedAccountCredentials.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleNamedAccountCredentials.groovy @@ -17,26 +17,27 @@ package com.netflix.spinnaker.clouddriver.google.security import com.google.api.services.compute.Compute -import com.google.api.services.compute.model.MachineTypeAggregatedList -import com.google.api.services.compute.model.Region -import com.google.api.services.compute.model.RegionList -import com.google.api.services.compute.model.Zone -import com.google.api.services.compute.model.ZoneList +import com.google.api.services.compute.model.* import com.google.common.annotations.VisibleForTesting import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig import com.netflix.spinnaker.clouddriver.google.ComputeVersion import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.model.GoogleInstanceTypeDisk -import com.netflix.spinnaker.clouddriver.googlecommon.GoogleExecutor +import com.netflix.spinnaker.clouddriver.google.GoogleExecutor +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentials + import com.netflix.spinnaker.fiat.model.resources.Permissions +import com.netflix.spinnaker.moniker.Namer import groovy.transform.TupleConstructor import groovy.util.logging.Slf4j @Slf4j @TupleConstructor -class GoogleNamedAccountCredentials implements AccountCredentials { +class GoogleNamedAccountCredentials extends AbstractAccountCredentials { // Sorted in reverse clock speed order as per the table here (https://cloud.google.com/compute/docs/regions-zones/regions-zones#available). static final List SORTED_CPU_PLATFORMS = [ @@ -46,7 +47,7 @@ class GoogleNamedAccountCredentials implements AccountCredentials regionsToManage + final Map zoneToAcceleratorTypesMap static class Builder { String name String environment String accountType + Namer namer List requiredGroupMembership = [] Permissions permissions = Permissions.EMPTY String project @@ -82,6 +85,7 @@ class GoogleNamedAccountCredentials implements AccountCredentials> regionToZonesMap = [:] Map locationToInstanceTypesMap = [:] + Map zoneToAcceleratorTypesMap = [:] Map> locationToCpuPlatformsMap List instanceTypeDisks = [] String jsonKey @@ -113,6 +117,11 @@ class GoogleNamedAccountCredentials implements AccountCredentials requiredGroupMembership) { this.requiredGroupMembership = requiredGroupMembership return this @@ -258,9 +267,17 @@ class GoogleNamedAccountCredentials implements AccountCredentials queryAcceleratorTypes(Compute compute, + String project) { + AcceleratorTypeAggregatedList acceleratorTypeList = GoogleExecutor.timeExecute( + GoogleExecutor.getRegistry(), + compute.acceleratorTypes().aggregatedList(project), + "google.api", + "compute.acceleratorTypes.aggregatedList", + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_GLOBAL) + String nextPageToken = acceleratorTypeList.getNextPageToken() + Map zoneToAcceleratorTypesMap = convertAcceleratorTypeListToMap(acceleratorTypeList) + + while (nextPageToken) { + acceleratorTypeList = GoogleExecutor.timeExecute( + GoogleExecutor.getRegistry(), + compute.acceleratorTypes().aggregatedList(project), + "google.api", + "compute.acceleratorTypes.aggregatedList", + GoogleExecutor.TAG_SCOPE, GoogleExecutor.SCOPE_GLOBAL) + nextPageToken = acceleratorTypeList.getNextPageToken() + + Map subsequentZoneToInstanceTypesMap = convertAcceleratorTypeListToMap(acceleratorTypeList) + subsequentZoneToInstanceTypesMap.each { zone, acceleratorTypes -> + if (zone in zoneToAcceleratorTypesMap) { + zoneToAcceleratorTypesMap[zone].acceleratorTypes += acceleratorTypes.acceleratorTypes + } else { + zoneToAcceleratorTypesMap[zone] = acceleratorTypes + } + } + } + + return zoneToAcceleratorTypesMap + } + private static Map queryInstanceTypes(Compute compute, String project, Map> regionToZonesMap) { @@ -386,6 +437,19 @@ class GoogleNamedAccountCredentials implements AccountCredentials convertAcceleratorTypeListToMap(AcceleratorTypeAggregatedList acceleratorTypeList) { + def zoneToAcceleratorTypesMap = acceleratorTypeList.items.collectEntries { zone, acceleratorTypesScopedList -> + zone = GCEUtil.getLocalName(zone) + if (acceleratorTypesScopedList.acceleratorTypes) { + return [(zone): [ acceleratorTypes: acceleratorTypesScopedList ]] + } else { + return [:] + } + } + + return zoneToAcceleratorTypesMap + } + @VisibleForTesting static Map convertInstanceTypeListToMap(MachineTypeAggregatedList instanceTypeList) { // Populate zone to instance types mappings. diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/config/GoogleConfiguration.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/config/GoogleConfiguration.groovy index feb193bf83b..2e506f7c82a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/config/GoogleConfiguration.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/config/GoogleConfiguration.groovy @@ -17,13 +17,14 @@ package com.netflix.spinnaker.config import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties +import com.netflix.spinnaker.clouddriver.google.config.GoogleCredentialsConfiguration + import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.health.GoogleHealthIndicator import com.netflix.spinnaker.clouddriver.google.model.GoogleDisk import com.netflix.spinnaker.clouddriver.google.model.GoogleInstanceTypeDisk -import com.netflix.spinnaker.clouddriver.google.security.GoogleCredentialsInitializer +import com.netflix.spinnaker.clouddriver.google.provider.GoogleInfrastructureProvider import groovy.transform.ToString -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty import org.springframework.boot.context.properties.ConfigurationProperties import org.springframework.boot.context.properties.EnableConfigurationProperties @@ -35,12 +36,11 @@ import org.springframework.scheduling.annotation.EnableScheduling @EnableScheduling @ConditionalOnProperty('google.enabled') @ComponentScan(["com.netflix.spinnaker.clouddriver.google"]) -@Import([ GoogleCredentialsInitializer ]) +@Import([ GoogleCredentialsConfiguration ]) class GoogleConfiguration { private static final String DEFAULT_KEY = "default" - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean @ConfigurationProperties("google") GoogleConfigurationProperties googleConfigurationProperties() { @@ -48,13 +48,13 @@ class GoogleConfiguration { } @Bean - GoogleHealthIndicator googleHealthIndicator() { - new GoogleHealthIndicator() + GoogleOperationPoller googleOperationPoller() { + new GoogleOperationPoller() } @Bean - GoogleOperationPoller googleOperationPoller() { - new GoogleOperationPoller() + GoogleInfrastructureProvider googleInfrastructureProvider(){ + new GoogleInfrastructureProvider() } @Bean diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/GoogleExecutorTraits.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/GoogleExecutorTraits.java new file mode 100644 index 00000000000..eaf8b75ff7c --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/GoogleExecutorTraits.java @@ -0,0 +1,54 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.google; + +import com.google.api.client.googleapis.services.AbstractGoogleClientRequest; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest; +import com.netflix.spinnaker.clouddriver.google.security.AccountForClient; +import java.io.IOException; + +/** This class is syntactic sugar atop the static GoogleExecutor. */ +public interface GoogleExecutorTraits { + + String TAG_BATCH_CONTEXT = GoogleExecutor.getTAG_BATCH_CONTEXT(); + String TAG_REGION = GoogleExecutor.getTAG_REGION(); + String TAG_SCOPE = GoogleExecutor.getTAG_SCOPE(); + String TAG_ZONE = GoogleExecutor.getTAG_ZONE(); + String SCOPE_GLOBAL = GoogleExecutor.getSCOPE_GLOBAL(); + String SCOPE_REGIONAL = GoogleExecutor.getSCOPE_REGIONAL(); + String SCOPE_ZONAL = GoogleExecutor.getSCOPE_ZONAL(); + + Registry getRegistry(); + + default T timeExecuteBatch( + GoogleBatchRequest googleBatchRequest, String batchContext, String... tags) + throws IOException { + return GoogleExecutor.timeExecuteBatch(getRegistry(), googleBatchRequest, batchContext, tags); + } + + default T timeExecute(AbstractGoogleClientRequest request, String api, String... tags) + throws IOException { + + String account = AccountForClient.getAccount(request.getAbstractGoogleClient()); + String[] augmentedTags = new String[tags.length + 2]; + augmentedTags[0] = "account"; + augmentedTags[1] = account; + System.arraycopy(tags, 0, augmentedTags, 2, tags.length); + + return GoogleExecutor.timeExecute(getRegistry(), request, "google.api", api, augmentedTags); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/batch/GoogleBatchRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/batch/GoogleBatchRequest.java new file mode 100644 index 00000000000..8b49ee8a713 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/batch/GoogleBatchRequest.java @@ -0,0 +1,125 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.batch; + +import com.google.api.client.googleapis.batch.BatchRequest; +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.ComputeRequest; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.TimeUnit; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +/** Helper class for sending batch requests to GCE. */ +@Slf4j +public class GoogleBatchRequest { + + private static final int MAX_BATCH_SIZE = + 100; // Platform specified max to not overwhelm batch backends. + private static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = (int) TimeUnit.MINUTES.toMillis(2); + private static final int DEFAULT_READ_TIMEOUT_MILLIS = (int) TimeUnit.MINUTES.toMillis(2); + + private List queuedRequests; + private String clouddriverUserAgentApplicationName; + private Compute compute; + + public GoogleBatchRequest(Compute compute, String clouddriverUserAgentApplicationName) { + this.compute = compute; + this.clouddriverUserAgentApplicationName = clouddriverUserAgentApplicationName; + this.queuedRequests = new ArrayList<>(); + } + + public void execute() { + if (queuedRequests.size() == 0) { + log.debug("No requests queued in batch, exiting."); + return; + } + + List queuedBatches = new ArrayList<>(); + List> requestPartitions = Lists.partition(queuedRequests, MAX_BATCH_SIZE); + requestPartitions.forEach( + requestPart -> { + BatchRequest newBatch = newBatch(); + requestPart.forEach( + qr -> { + try { + qr.getRequest().queue(newBatch, qr.getCallback()); + } catch (IOException ioe) { + log.error("Queueing request {} in batch failed.", qr); + throw new RuntimeException(ioe); + } + }); + queuedBatches.add(newBatch); + }); + + ExecutorService threadPool = new ForkJoinPool(10); + try { + threadPool + .submit(() -> queuedBatches.stream().parallel().forEach(this::executeInternalBatch)) + .get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + threadPool.shutdown(); + } + + private void executeInternalBatch(BatchRequest b) { + try { + b.execute(); + } catch (IOException ioe) { + log.error("Executing batch {} failed.", b); + throw new RuntimeException(ioe); + } + } + + private BatchRequest newBatch() { + return compute.batch( + new HttpRequestInitializer() { + @Override + public void initialize(HttpRequest request) { + request.getHeaders().setUserAgent(clouddriverUserAgentApplicationName); + request.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS); + request.setReadTimeout(DEFAULT_READ_TIMEOUT_MILLIS); + } + }); + } + + public void queue(ComputeRequest request, JsonBatchCallback callback) { + queuedRequests.add(new QueuedRequest(request, callback)); + } + + public Integer size() { + return queuedRequests.size(); + } + + @Data + @AllArgsConstructor + private static class QueuedRequest { + private ComputeRequest request; + private JsonBatchCallback callback; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequest.java new file mode 100644 index 00000000000..e0fd099e297 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.services.compute.ComputeRequest; +import java.io.IOException; + +public interface BatchComputeRequest, ResponseT> { + + void queue( + GoogleComputeRequest request, JsonBatchCallback callback); + + void execute(String batchContext) throws IOException; +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequestImpl.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequestImpl.java new file mode 100644 index 00000000000..0aa05437369 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequestImpl.java @@ -0,0 +1,220 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static com.google.common.collect.Lists.partition; + +import com.google.api.client.googleapis.batch.BatchRequest; +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.util.Throwables; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.ComputeRequest; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.AbstractFuture; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.netflix.spectator.api.Registry; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.Value; +import org.apache.http.client.HttpResponseException; + +final class BatchComputeRequestImpl, ResponseT> + implements BatchComputeRequest { + + // Platform-specified max to not overwhelm batch backends. + @VisibleForTesting static final int MAX_BATCH_SIZE = 100; + private static final Duration CONNECT_TIMEOUT = Duration.ofMinutes(2); + private static final Duration READ_TIMEOUT = Duration.ofMinutes(2); + + private final Compute compute; + private final Registry registry; + private final String userAgent; + private final ListeningExecutorService executor; + private final List> queuedRequests; + + BatchComputeRequestImpl( + Compute compute, Registry registry, String userAgent, ListeningExecutorService executor) { + this.compute = compute; + this.registry = registry; + this.userAgent = userAgent; + this.executor = executor; + this.queuedRequests = new ArrayList<>(); + } + + @Override + public void queue( + GoogleComputeRequest request, JsonBatchCallback callback) { + queuedRequests.add(new QueuedRequest<>(request.getRequest(), callback)); + } + + @Override + public void execute(String batchContext) throws IOException { + if (queuedRequests.size() == 0) { + return; + } + + List>> requestPartitions = + partition(queuedRequests, MAX_BATCH_SIZE); + List queuedBatches = createBatchRequests(requestPartitions); + + var statusCode = "500"; + String success = "false"; + long start = registry.clock().monotonicTime(); + try { + executeBatches(queuedBatches); + success = "true"; + statusCode = "200"; + } catch (HttpResponseException e) { + statusCode = Integer.toString(e.getStatusCode()); + throw e; + } finally { + long nanos = registry.clock().monotonicTime() - start; + String status = statusCode.charAt(0) + "xx"; + Map tags = + ImmutableMap.of( + "context", batchContext, + "success", success, + "status", status, + "statusCode", statusCode); + registry + .timer(registry.createId("google.batchExecute", tags)) + .record(Duration.ofNanos(nanos)); + registry + .counter(registry.createId("google.batchSize", tags)) + .increment(queuedRequests.size()); + } + } + + private void executeBatches(List queuedBatches) throws IOException { + if (queuedBatches.size() == 1) { + queuedBatches.get(0).execute(); + return; + } + + List> futures = + queuedBatches.stream() + .map( + batchRequest -> + executor.submit( + (Callable) + () -> { + batchRequest.execute(); + return null; + })) + .collect(Collectors.toList()); + try { + new FailFastFuture(futures, executor).get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new InterruptedIOException(); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + Throwables.propagateIfPossible(cause, IOException.class); + throw new RuntimeException(cause); + } + } + + private List createBatchRequests( + List>> requestPartitions) throws IOException { + + List queuedBatches = new ArrayList<>(); + + try { + requestPartitions.forEach( + partition -> { + BatchRequest batch = newBatch(); + partition.forEach( + qr -> wrapIOException(() -> qr.getRequest().queue(batch, qr.getCallback()))); + queuedBatches.add(batch); + }); + return queuedBatches; + } catch (UncheckedIOException e) { + throw e.getCause(); + } + } + + private BatchRequest newBatch() { + return compute.batch( + request -> { + request.getHeaders().setUserAgent(userAgent); + request.setConnectTimeout((int) CONNECT_TIMEOUT.toMillis()); + request.setReadTimeout((int) READ_TIMEOUT.toMillis()); + }); + } + + @FunctionalInterface + private interface IoExceptionRunnable { + void run() throws IOException; + } + + private static void wrapIOException(IoExceptionRunnable runnable) { + try { + runnable.run(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Value + @AllArgsConstructor + private static class QueuedRequest, ResponseT> { + private RequestT request; + private JsonBatchCallback callback; + } + + private static class FailFastFuture extends AbstractFuture { + + private final AtomicInteger remainingFutures; + + FailFastFuture(List> futures, ExecutorService executor) { + remainingFutures = new AtomicInteger(futures.size()); + for (ListenableFuture future : futures) { + Futures.addCallback( + future, + new FutureCallback() { + @Override + public void onSuccess(Object result) { + if (remainingFutures.decrementAndGet() == 0) { + set(null); + } + } + + @Override + public void onFailure(Throwable t) { + setException(t); + } + }, + executor); + } + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequest.java new file mode 100644 index 00000000000..1c3c6bcb2b7 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.common.collect.ImmutableSet; +import java.io.IOException; + +/** + * A simple interface for executing multiple paged requests in batches. + * + *

Queued {@link PaginatedComputeRequest requests} will be sent off in a single batch. If the any + * of the responses indicate there are more pages of results, further batches will be sent until all + * pages have been queried. + */ +public interface BatchPaginatedComputeRequest, ItemT> { + + void queue(PaginatedComputeRequest request); + + ImmutableSet execute(String batchContext) throws IOException; +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequestImpl.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequestImpl.java new file mode 100644 index 00000000000..9ef09006b2f --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequestImpl.java @@ -0,0 +1,113 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static com.google.common.base.Strings.isNullOrEmpty; + +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpResponseException; +import com.google.api.services.compute.ComputeRequest; +import com.google.common.collect.ImmutableSet; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.Supplier; + +final class BatchPaginatedComputeRequestImpl< + ComputeRequestT extends ComputeRequest, ResponseT, ItemT> + implements BatchPaginatedComputeRequest { + + private final Supplier> batchRequestSupplier; + private final Map, String> + nextPageTokens = new HashMap<>(); + private IOException exception; + + BatchPaginatedComputeRequestImpl( + Supplier> batchRequestSupplier) { + this.batchRequestSupplier = batchRequestSupplier; + } + + @Override + public void queue(PaginatedComputeRequest request) { + nextPageTokens.put( + (PaginatedComputeRequestImpl) request, ""); + } + + @Override + public ImmutableSet execute(String batchContext) throws IOException { + ImmutableSet.Builder results = ImmutableSet.builder(); + + while (!nextPageTokens.isEmpty() && exception == null) { + BatchComputeRequest pageRequest = batchRequestSupplier.get(); + for (Map.Entry, String> entry : + nextPageTokens.entrySet()) { + GoogleComputeRequest request = + entry.getKey().requestGenerator.createRequest(entry.getValue()); + entry.getKey().requestModifier.accept(request.getRequest()); + pageRequest.queue(request, new PageCallback(entry.getKey(), results)); + } + pageRequest.execute(batchContext); + } + + if (exception != null) { + throw exception; + } + + return results.build(); + } + + private class PageCallback extends JsonBatchCallback { + + private final PaginatedComputeRequestImpl request; + private final ImmutableSet.Builder results; + + private PageCallback( + PaginatedComputeRequestImpl request, + ImmutableSet.Builder results) { + this.request = request; + this.results = results; + } + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) { + nextPageTokens.remove(request); + HttpResponseException newException = + new HttpResponseException.Builder(e.getCode(), e.getMessage(), responseHeaders) + .setMessage(e.getMessage()) + .build(); + if (exception == null) { + exception = newException; + } else { + exception.addSuppressed(newException); + } + } + + @Override + public void onSuccess(ResponseT response, HttpHeaders responseHeaders) { + Optional.ofNullable(request.itemRetriever.getItems(response)).ifPresent(results::addAll); + String nextPageToken = request.nextPageTokenRetriever.getNextPageToken(response); + if (isNullOrEmpty(nextPageToken)) { + nextPageTokens.remove(request); + } else { + nextPageTokens.put(request, nextPageToken); + } + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ComputeConfiguration.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ComputeConfiguration.java new file mode 100644 index 00000000000..89a7a556cff --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ComputeConfiguration.java @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.concurrent.Executors; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class ComputeConfiguration { + + public static final String BATCH_REQUEST_EXECUTOR = "batchRequestExecutor"; + + @Bean + @Qualifier(BATCH_REQUEST_EXECUTOR) + public ListeningExecutorService batchRequestExecutor() { + return MoreExecutors.listeningDecorator( + Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setNameFormat(ComputeConfiguration.class.getSimpleName() + "-%d") + .build())); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GetFirstBatchComputeRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GetFirstBatchComputeRequest.java new file mode 100644 index 00000000000..8bd4c81d125 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GetFirstBatchComputeRequest.java @@ -0,0 +1,105 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpResponseException; +import com.google.api.services.compute.ComputeRequest; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; + +/** + * Queues several {@link GoogleComputeGetRequest requests} into a single batched request and returns + * the first found object, if any. + */ +@Slf4j +public final class GetFirstBatchComputeRequest< + RequestT extends ComputeRequest, ResponseT> { + + private final BatchComputeRequest delegate; + private final Callback callback; + + private GetFirstBatchComputeRequest(BatchComputeRequest delegate) { + this.delegate = delegate; + this.callback = new Callback<>(); + } + + public static , ResponseT> + GetFirstBatchComputeRequest create( + BatchComputeRequest batchRequest) { + return new GetFirstBatchComputeRequest<>(batchRequest); + } + + public void queue(GoogleComputeGetRequest request) { + delegate.queue(request, callback); + } + + public Optional execute(String batchContext) throws IOException { + + delegate.execute(batchContext); + + if (callback.response != null) { + if (!callback.exceptions.isEmpty()) { + logIgnoredExceptions(); + } + return Optional.of(callback.response); + } + + if (!callback.exceptions.isEmpty()) { + HttpResponseException e = callback.exceptions.get(0); + callback.exceptions.subList(1, callback.exceptions.size()).forEach(e::addSuppressed); + throw e; + } + + return Optional.empty(); + } + + private void logIgnoredExceptions() { + callback.exceptions.forEach( + e -> + log.warn( + "Error in batch response, but ignoring because a valid response was found", e)); + } + + private static class Callback extends JsonBatchCallback { + + T response; + List exceptions = new ArrayList<>(); + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) { + if (e.getCode() != 404) { + exceptions.add( + new HttpResponseException.Builder(e.getCode(), e.getMessage(), responseHeaders) + .setMessage(e.getMessage()) + .build()); + } + } + + @Override + public synchronized void onSuccess(T response, HttpHeaders responseHeaders) { + if (this.response == null) { + this.response = response; + } + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GlobalGoogleComputeRequestFactory.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GlobalGoogleComputeRequestFactory.java new file mode 100644 index 00000000000..55cf1ab1c08 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GlobalGoogleComputeRequestFactory.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.Operation; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.google.GoogleExecutor; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeOperationRequestImpl.OperationWaiter; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; + +final class GlobalGoogleComputeRequestFactory { + + private static final ImmutableMap TAGS = + ImmutableMap.of(GoogleExecutor.getTAG_SCOPE(), GoogleExecutor.getSCOPE_GLOBAL()); + + private final String serviceName; + private final GoogleNamedAccountCredentials credentials; + private final Registry registry; + private final GoogleOperationPoller poller; + + GlobalGoogleComputeRequestFactory( + String serviceName, + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller poller, + Registry registry) { + this.serviceName = serviceName; + this.credentials = credentials; + this.registry = registry; + this.poller = poller; + } + + , ResponseT> + GoogleComputeRequest wrapRequest(RequestT request, String api) { + return new GoogleComputeRequestImpl<>(request, registry, getMetricName(api), TAGS); + } + + , ResponseT> + GoogleComputeGetRequest wrapGetRequest(RequestT request, String api) { + return new GoogleComputeGetRequestImpl<>(request, registry, getMetricName(api), TAGS); + } + + > + GoogleComputeOperationRequest wrapOperationRequest(RequestT request, String api) { + return new GoogleComputeOperationRequestImpl<>( + request, registry, getMetricName(api), TAGS, new GlobalOperationWaiter()); + } + + private String getMetricName(String api) { + return String.join(".", "compute", serviceName, api); + } + + private final class GlobalOperationWaiter implements OperationWaiter { + + @Override + public Operation wait(Operation operation, Task task, String phase) { + return poller.waitForGlobalOperation( + credentials.getCompute(), + credentials.getProject(), + operation.getName(), + /* timeoutSeconds= */ null, + task, + GCEUtil.getLocalName(operation.getTargetLink()), + phase); + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeApiFactory.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeApiFactory.java new file mode 100644 index 00000000000..25b39fa80bf --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeApiFactory.java @@ -0,0 +1,102 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Service; + +@Service +public class GoogleComputeApiFactory { + + private final GoogleOperationPoller operationPoller; + private final Registry registry; + private String clouddriverUserAgentApplicationName; + private ListeningExecutorService batchExecutor; + + @Autowired + public GoogleComputeApiFactory( + GoogleOperationPoller operationPoller, + Registry registry, + String clouddriverUserAgentApplicationName, + @Qualifier(ComputeConfiguration.BATCH_REQUEST_EXECUTOR) + ListeningExecutorService batchExecutor) { + this.operationPoller = operationPoller; + this.registry = registry; + this.clouddriverUserAgentApplicationName = clouddriverUserAgentApplicationName; + this.batchExecutor = batchExecutor; + } + + public Images createImages(GoogleNamedAccountCredentials credentials) { + return new Images(credentials, operationPoller, registry); + } + + public Instances createInstances(GoogleNamedAccountCredentials credentials) { + return new Instances(credentials, operationPoller, registry); + } + + public InstanceTemplates createInstanceTemplates(GoogleNamedAccountCredentials credentials) { + return new InstanceTemplates(credentials, operationPoller, registry); + } + + public RegionAutoscalers createRegionAutoscalers(GoogleNamedAccountCredentials credentials) { + return new RegionAutoscalers(credentials, operationPoller, registry); + } + + public RegionInstanceGroupManagers createRegionInstanceGroupManagers( + GoogleNamedAccountCredentials credentials) { + return new RegionInstanceGroupManagers(credentials, operationPoller, registry); + } + + public ZoneAutoscalers createZoneAutoscalers(GoogleNamedAccountCredentials credentials) { + return new ZoneAutoscalers(credentials, operationPoller, registry); + } + + public ZoneInstanceGroupManagers createZoneInstanceGroupManagers( + GoogleNamedAccountCredentials credentials) { + return new ZoneInstanceGroupManagers(credentials, operationPoller, registry); + } + + public GoogleServerGroupManagers createServerGroupManagers( + GoogleNamedAccountCredentials credentials, GoogleServerGroup.View serverGroup) { + return serverGroup.getRegional() + ? new RegionGoogleServerGroupManagers( + credentials, operationPoller, registry, serverGroup.getName(), serverGroup.getRegion()) + : new ZoneGoogleServerGroupManagers( + credentials, operationPoller, registry, serverGroup.getName(), serverGroup.getZone()); + } + + public , ResponseT> + BatchComputeRequest createBatchRequest( + GoogleNamedAccountCredentials credentials) { + return new BatchComputeRequestImpl<>( + credentials.getCompute(), registry, clouddriverUserAgentApplicationName, batchExecutor); + } + + public , ResponseT, ItemT> + BatchPaginatedComputeRequest createPaginatedBatchRequest( + GoogleNamedAccountCredentials credentials) { + return new BatchPaginatedComputeRequestImpl( + () -> createBatchRequest(credentials)); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeGetRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeGetRequest.java new file mode 100644 index 00000000000..679847ff308 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeGetRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.api.services.compute.ComputeRequest; +import java.io.IOException; +import java.util.Optional; + +public interface GoogleComputeGetRequest, ResponseT> + extends GoogleComputeRequest { + + default Optional executeGet() throws IOException { + try { + return Optional.of(execute()); + } catch (GoogleJsonResponseException e) { + if (e.getStatusCode() == 404) { + return Optional.empty(); + } + throw e; + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeGetRequestImpl.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeGetRequestImpl.java new file mode 100644 index 00000000000..ffbec5c4794 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeGetRequestImpl.java @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.netflix.spectator.api.Registry; +import java.util.Map; + +final class GoogleComputeGetRequestImpl, ResponseT> + extends GoogleComputeRequestImpl + implements GoogleComputeGetRequest { + + GoogleComputeGetRequestImpl( + RequestT request, Registry registry, String metricName, Map tags) { + super(request, registry, metricName, tags); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeOperationRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeOperationRequest.java new file mode 100644 index 00000000000..5118aa8ff74 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeOperationRequest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.Operation; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.io.IOException; + +public interface GoogleComputeOperationRequest> + extends GoogleComputeRequest { + + Operation executeAndWait(Task task, String phase) throws IOException; +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeOperationRequestImpl.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeOperationRequestImpl.java new file mode 100644 index 00000000000..a7f67044967 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeOperationRequestImpl.java @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.Operation; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.io.IOException; +import java.util.Map; + +final class GoogleComputeOperationRequestImpl> + extends GoogleComputeRequestImpl + implements GoogleComputeOperationRequest { + + @FunctionalInterface + interface OperationWaiter { + Operation wait(Operation operation, Task task, String phase); + } + + private final OperationWaiter operationWaiter; + + GoogleComputeOperationRequestImpl( + RequestT request, + Registry registry, + String metricName, + Map tags, + OperationWaiter operationWaiter) { + super(request, registry, metricName, tags); + this.operationWaiter = operationWaiter; + } + + @Override + public Operation executeAndWait(Task task, String phase) throws IOException { + return operationWaiter.wait(execute(), task, phase); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeRequest.java new file mode 100644 index 00000000000..a1539eebdea --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeRequest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import java.io.IOException; + +public interface GoogleComputeRequest, ResponseT> { + + ResponseT execute() throws IOException; + + RequestT getRequest(); +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeRequestImpl.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeRequestImpl.java new file mode 100644 index 00000000000..df36eb543e5 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleComputeRequestImpl.java @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static java.util.stream.Collectors.toList; + +import com.google.api.services.compute.ComputeRequest; +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.GoogleExecutor; +import com.netflix.spinnaker.clouddriver.google.security.AccountForClient; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +class GoogleComputeRequestImpl, ResponseT> + implements GoogleComputeRequest { + + private final RequestT request; + private final Registry registry; + private final String metricName; + private final Map tags; + + GoogleComputeRequestImpl( + RequestT request, Registry registry, String metricName, Map tags) { + this.request = request; + this.registry = registry; + this.metricName = metricName; + this.tags = tags; + } + + @Override + public ResponseT execute() throws IOException { + return timeExecute(request); + } + + private ResponseT timeExecute(RequestT request) throws IOException { + return GoogleExecutor.timeExecute( + registry, request, "google.api", metricName, getTimeExecuteTags(request)); + } + + private String[] getTimeExecuteTags(RequestT request) { + String account = AccountForClient.getAccount(request.getAbstractGoogleClient()); + return ImmutableList.builder() + .add("account") + .add(account) + .addAll(flattenTags()) + .build() + .toArray(new String[] {}); + } + + private List flattenTags() { + return tags.entrySet().stream() + .flatMap(e -> Stream.of(e.getKey(), e.getValue())) + .collect(toList()); + } + + @Override + public RequestT getRequest() { + return request; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleServerGroupManagers.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleServerGroupManagers.java new file mode 100644 index 00000000000..f4c72fba54e --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/GoogleServerGroupManagers.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute.InstanceGroupManagers; +import com.google.api.services.compute.Compute.RegionInstanceGroupManagers; +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.Operation; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import java.io.IOException; +import java.util.List; + +/** + * A wrapper around {@link InstanceGroupManagers} and {@link RegionInstanceGroupManagers} that + * performs operations on a specific {@link GoogleServerGroup}. + */ +public interface GoogleServerGroupManagers { + + GoogleComputeOperationRequest> abandonInstances(List instances) + throws IOException; + + GoogleComputeOperationRequest> delete() throws IOException; + + GoogleComputeGetRequest, InstanceGroupManager> get() + throws IOException; + + GoogleComputeOperationRequest patch(InstanceGroupManager content) throws IOException; + + GoogleComputeOperationRequest> update(InstanceGroupManager content) + throws IOException; +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/Images.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/Images.java new file mode 100644 index 00000000000..85564b84f52 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/Images.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Image; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; + +public class Images { + + private final GoogleNamedAccountCredentials credentials; + private final GlobalGoogleComputeRequestFactory requestFactory; + + public Images( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry) { + this.credentials = credentials; + this.requestFactory = + new GlobalGoogleComputeRequestFactory("images", credentials, operationPoller, registry); + } + + public GoogleComputeGetRequest get(String project, String image) + throws IOException { + Compute.Images.Get request = credentials.getCompute().images().get(project, image); + return requestFactory.wrapGetRequest(request, "get"); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/InstanceTemplates.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/InstanceTemplates.java new file mode 100644 index 00000000000..518e2bb3fd3 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/InstanceTemplates.java @@ -0,0 +1,78 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.InstanceTemplate; +import com.google.api.services.compute.model.InstanceTemplateList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; + +public class InstanceTemplates { + + private final Compute.InstanceTemplates computeApi; + private final GoogleNamedAccountCredentials credentials; + private final GlobalGoogleComputeRequestFactory requestFactory; + private static final String defaultView = + "FULL"; // https://cloud.google.com/sdk/gcloud/reference/beta/compute/instance-templates/list + + InstanceTemplates( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry) { + this.computeApi = credentials.getCompute().instanceTemplates(); + this.credentials = credentials; + this.requestFactory = + new GlobalGoogleComputeRequestFactory( + "instanceTemplates", credentials, operationPoller, registry); + } + + public GoogleComputeOperationRequest delete(String name) + throws IOException { + + Compute.InstanceTemplates.Delete request = computeApi.delete(credentials.getProject(), name); + return requestFactory.wrapOperationRequest(request, "delete"); + } + + public GoogleComputeGetRequest get(String name) + throws IOException { + Compute.InstanceTemplates.Get request = computeApi.get(credentials.getProject(), name); + return requestFactory.wrapGetRequest(request, "get"); + } + + public GoogleComputeOperationRequest insert( + InstanceTemplate template) throws IOException { + Compute.InstanceTemplates.Insert request = + computeApi.insert(credentials.getProject(), template); + return requestFactory.wrapOperationRequest(request, "insert"); + } + + public PaginatedComputeRequest list() { + return new PaginatedComputeRequestImpl<>( + pageToken -> + requestFactory.wrapRequest( + computeApi + .list(credentials.getProject()) + .setPageToken(pageToken) + .setView(defaultView), + "list"), + InstanceTemplateList::getNextPageToken, + InstanceTemplateList::getItems); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/Instances.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/Instances.java new file mode 100644 index 00000000000..ea60b122b42 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/Instances.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; + +public final class Instances { + + private final Compute.Instances computeApi; + private final GoogleNamedAccountCredentials credentials; + private final GlobalGoogleComputeRequestFactory requestFactory; + + Instances( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry) { + this.computeApi = credentials.getCompute().instances(); + this.credentials = credentials; + this.requestFactory = + new GlobalGoogleComputeRequestFactory("instances", credentials, operationPoller, registry); + } + + public PaginatedComputeRequest list(String zone) { + return new PaginatedComputeRequestImpl<>( + pageToken -> + requestFactory.wrapRequest( + computeApi.list(credentials.getProject(), zone).setPageToken(pageToken), "list"), + InstanceList::getNextPageToken, + InstanceList::getItems); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequest.java new file mode 100644 index 00000000000..8b251f8d7c1 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequest.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.util.List; +import java.util.function.Consumer; + +public interface PaginatedComputeRequest, ItemT> { + + void execute(Consumer> pageConsumer) throws IOException; + + /** + * Return a version of this object with an updated request modifier (overwriting the existing one + * if previously set). + * + *

The request modifier allows you to do things like set a filter on the outgoing requests. + */ + PaginatedComputeRequest withRequestModifier(Consumer requestModifier); + + default ImmutableList execute() throws IOException { + ImmutableList.Builder result = ImmutableList.builder(); + execute(result::addAll); + return result.build(); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequestImpl.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequestImpl.java new file mode 100644 index 00000000000..40264e97c29 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequestImpl.java @@ -0,0 +1,90 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static com.google.api.client.util.Strings.isNullOrEmpty; + +import com.google.api.services.compute.ComputeRequest; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.function.Consumer; +import javax.annotation.Nullable; + +final class PaginatedComputeRequestImpl< + RequestT extends ComputeRequest, ResponseT, ItemT> + implements PaginatedComputeRequest { + + @FunctionalInterface + interface RequestGenerator, ResponseT> { + GoogleComputeRequest createRequest(String pageToken) throws IOException; + } + + @FunctionalInterface + interface NextPageTokenRetriever { + String getNextPageToken(ResponseT response); + } + + @FunctionalInterface + interface ItemRetriever { + @Nullable + List getItems(ResponseT response); + } + + final RequestGenerator requestGenerator; + final NextPageTokenRetriever nextPageTokenRetriever; + final ItemRetriever itemRetriever; + final Consumer requestModifier; + + PaginatedComputeRequestImpl( + RequestGenerator requestGenerator, + NextPageTokenRetriever nextPageTokenRetriever, + ItemRetriever itemRetriever) { + this(requestGenerator, nextPageTokenRetriever, itemRetriever, request -> {}); + } + + private PaginatedComputeRequestImpl( + RequestGenerator requestGenerator, + NextPageTokenRetriever nextPageTokenRetriever, + ItemRetriever itemRetriever, + Consumer requestModifier) { + this.requestGenerator = requestGenerator; + this.nextPageTokenRetriever = nextPageTokenRetriever; + this.itemRetriever = itemRetriever; + this.requestModifier = requestModifier; + } + + @Override + public void execute(Consumer> pageConsumer) throws IOException { + + String pageToken = ""; + do { + GoogleComputeRequest request = requestGenerator.createRequest(pageToken); + requestModifier.accept(request.getRequest()); + ResponseT response = request.execute(); + Optional.ofNullable(itemRetriever.getItems(response)).ifPresent(pageConsumer); + pageToken = nextPageTokenRetriever.getNextPageToken(response); + } while (!isNullOrEmpty(pageToken)); + } + + @Override + public PaginatedComputeRequest withRequestModifier( + Consumer requestModifier) { + return new PaginatedComputeRequestImpl<>( + requestGenerator, nextPageTokenRetriever, itemRetriever, requestModifier); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionAutoscalers.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionAutoscalers.java new file mode 100644 index 00000000000..7c3fc097513 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionAutoscalers.java @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Autoscaler; +import com.google.api.services.compute.model.RegionAutoscalerList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; + +public final class RegionAutoscalers { + + private final Compute.RegionAutoscalers computeApi; + private final GoogleNamedAccountCredentials credentials; + private final RegionalGoogleComputeRequestFactory requestFactory; + + RegionAutoscalers( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry) { + this.computeApi = credentials.getCompute().regionAutoscalers(); + this.credentials = credentials; + this.requestFactory = + new RegionalGoogleComputeRequestFactory( + "regionAutoscalers", credentials, operationPoller, registry); + } + + public GoogleComputeGetRequest get( + String region, String name) throws IOException { + return requestFactory.wrapGetRequest( + computeApi.get(credentials.getProject(), region, name), "get", region); + } + + public PaginatedComputeRequest list(String region) { + return new PaginatedComputeRequestImpl<>( + pageToken -> + requestFactory.wrapRequest( + computeApi.list(credentials.getProject(), region).setPageToken(pageToken), + "list", + region), + RegionAutoscalerList::getNextPageToken, + RegionAutoscalerList::getItems); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionGoogleServerGroupManagers.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionGoogleServerGroupManagers.java new file mode 100644 index 00000000000..2e4f3b46da5 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionGoogleServerGroupManagers.java @@ -0,0 +1,94 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.Operation; +import com.google.api.services.compute.model.RegionInstanceGroupManagersAbandonInstancesRequest; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.util.List; + +final class RegionGoogleServerGroupManagers implements GoogleServerGroupManagers { + + private final GoogleNamedAccountCredentials credentials; + private final RegionalGoogleComputeRequestFactory requestFactory; + private final Compute.RegionInstanceGroupManagers managers; + private final String instanceGroupName; + private final String region; + + RegionGoogleServerGroupManagers( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry, + String instanceGroupName, + String region) { + this.credentials = credentials; + this.requestFactory = + new RegionalGoogleComputeRequestFactory( + "regionInstanceGroupManagers", credentials, operationPoller, registry); + this.managers = credentials.getCompute().regionInstanceGroupManagers(); + this.instanceGroupName = instanceGroupName; + this.region = region; + } + + @Override + public GoogleComputeOperationRequest> abandonInstances( + List instances) throws IOException { + RegionInstanceGroupManagersAbandonInstancesRequest request = + new RegionInstanceGroupManagersAbandonInstancesRequest(); + request.setInstances(instances); + return requestFactory.wrapOperationRequest( + managers.abandonInstances(credentials.getProject(), region, instanceGroupName, request), + "abandonInstances", + region); + } + + @Override + public GoogleComputeOperationRequest> delete() throws IOException { + return requestFactory.wrapOperationRequest( + managers.delete(credentials.getProject(), region, instanceGroupName), "delete", region); + } + + @Override + public GoogleComputeGetRequest, InstanceGroupManager> get() + throws IOException { + return requestFactory.wrapGetRequest( + managers.get(credentials.getProject(), region, instanceGroupName), "get", region); + } + + @Override + public GoogleComputeOperationRequest patch(InstanceGroupManager content) throws IOException { + return requestFactory.wrapOperationRequest( + managers.patch(credentials.getProject(), region, instanceGroupName, content), + "patch", + region); + } + + @Override + public GoogleComputeOperationRequest> update( + InstanceGroupManager content) throws IOException { + return requestFactory.wrapOperationRequest( + managers.update(credentials.getProject(), region, instanceGroupName, content), + "update", + region); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionInstanceGroupManagers.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionInstanceGroupManagers.java new file mode 100644 index 00000000000..28e3175dd90 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionInstanceGroupManagers.java @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.RegionInstanceGroupManagerList; +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.util.Optional; + +public final class RegionInstanceGroupManagers { + + private final Compute.RegionInstanceGroupManagers computeApi; + private final GoogleNamedAccountCredentials credentials; + private final RegionalGoogleComputeRequestFactory requestFactory; + + RegionInstanceGroupManagers( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry) { + this.computeApi = credentials.getCompute().regionInstanceGroupManagers(); + this.credentials = credentials; + this.requestFactory = + new RegionalGoogleComputeRequestFactory( + "regionInstanceGroupManagers", credentials, operationPoller, registry); + } + + public GoogleComputeGetRequest get( + String region, String name) throws IOException { + return requestFactory.wrapGetRequest( + computeApi.get(credentials.getProject(), region, name), "get", region); + } + + public PaginatedComputeRequest + list(String region) { + return new PaginatedComputeRequestImpl<>( + pageToken -> + requestFactory.wrapRequest( + computeApi.list(credentials.getProject(), region).setPageToken(pageToken), + "list", + region), + RegionInstanceGroupManagerList::getNextPageToken, + response -> Optional.ofNullable(response.getItems()).orElseGet(ImmutableList::of)); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionalGoogleComputeRequestFactory.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionalGoogleComputeRequestFactory.java new file mode 100644 index 00000000000..84991530c31 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/RegionalGoogleComputeRequestFactory.java @@ -0,0 +1,93 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.Operation; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.google.GoogleExecutor; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeOperationRequestImpl.OperationWaiter; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; + +final class RegionalGoogleComputeRequestFactory { + + private final String serviceName; + private final GoogleNamedAccountCredentials credentials; + private final Registry registry; + private final GoogleOperationPoller poller; + + RegionalGoogleComputeRequestFactory( + String serviceName, + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller poller, + Registry registry) { + this.serviceName = serviceName; + this.credentials = credentials; + this.registry = registry; + this.poller = poller; + } + + , ResponseT> + GoogleComputeRequest wrapRequest( + RequestT request, String api, String region) { + return new GoogleComputeRequestImpl<>(request, registry, getMetricName(api), tags(region)); + } + + , ResponseT> + GoogleComputeGetRequest wrapGetRequest( + RequestT request, String api, String region) { + return new GoogleComputeGetRequestImpl<>(request, registry, getMetricName(api), tags(region)); + } + + > + GoogleComputeOperationRequest wrapOperationRequest( + RequestT request, String api, String region) { + return new GoogleComputeOperationRequestImpl<>( + request, registry, getMetricName(api), tags(region), new RegionalOperationWaiter()); + } + + private String getMetricName(String api) { + return String.join(".", "compute", serviceName, api); + } + + private static ImmutableMap tags(String region) { + return ImmutableMap.builder() + .put(GoogleExecutor.getTAG_SCOPE(), GoogleExecutor.getSCOPE_REGIONAL()) + .put(GoogleExecutor.getTAG_REGION(), region) + .build(); + } + + private final class RegionalOperationWaiter implements OperationWaiter { + + @Override + public Operation wait(Operation operation, Task task, String phase) { + return poller.waitForRegionalOperation( + credentials.getCompute(), + credentials.getProject(), + GCEUtil.getLocalName(operation.getRegion()), + operation.getName(), + /* timeoutSeconds= */ null, + task, + GCEUtil.getLocalName(operation.getTargetLink()), + phase); + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZonalGoogleComputeRequestFactory.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZonalGoogleComputeRequestFactory.java new file mode 100644 index 00000000000..bb554f2cbf8 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZonalGoogleComputeRequestFactory.java @@ -0,0 +1,93 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.Operation; +import com.google.common.collect.ImmutableMap; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.google.GoogleExecutor; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeOperationRequestImpl.OperationWaiter; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; + +final class ZonalGoogleComputeRequestFactory { + + private final String serviceName; + private final GoogleNamedAccountCredentials credentials; + private final Registry registry; + private final GoogleOperationPoller poller; + + ZonalGoogleComputeRequestFactory( + String serviceName, + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller poller, + Registry registry) { + this.serviceName = serviceName; + this.credentials = credentials; + this.registry = registry; + this.poller = poller; + } + + , ResponseT> + GoogleComputeRequest wrapRequest( + RequestT request, String api, String zone) { + return new GoogleComputeRequestImpl<>(request, registry, getMetricName(api), tags(zone)); + } + + , ResponseT> + GoogleComputeGetRequest wrapGetRequest( + RequestT request, String api, String zone) { + return new GoogleComputeGetRequestImpl<>(request, registry, getMetricName(api), tags(zone)); + } + + > + GoogleComputeOperationRequest wrapOperationRequest( + RequestT request, String api, String zone) { + return new GoogleComputeOperationRequestImpl<>( + request, registry, getMetricName(api), tags(zone), new ZonalOperationWaiter()); + } + + private String getMetricName(String api) { + return String.join(".", "compute", serviceName, api); + } + + private static ImmutableMap tags(String zone) { + return ImmutableMap.builder() + .put(GoogleExecutor.getTAG_SCOPE(), GoogleExecutor.getSCOPE_ZONAL()) + .put(GoogleExecutor.getTAG_ZONE(), zone) + .build(); + } + + private final class ZonalOperationWaiter implements OperationWaiter { + + @Override + public Operation wait(Operation operation, Task task, String phase) { + return poller.waitForZonalOperation( + credentials.getCompute(), + credentials.getProject(), + GCEUtil.getLocalName(operation.getZone()), + operation.getName(), + /* timeoutSeconds= */ null, + task, + GCEUtil.getLocalName(operation.getTargetLink()), + phase); + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneAutoscalers.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneAutoscalers.java new file mode 100644 index 00000000000..c8fa94c8da8 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneAutoscalers.java @@ -0,0 +1,61 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Autoscaler; +import com.google.api.services.compute.model.AutoscalerList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; + +public final class ZoneAutoscalers { + + private final Compute.Autoscalers computeApi; + private final GoogleNamedAccountCredentials credentials; + private final ZonalGoogleComputeRequestFactory requestFactory; + + ZoneAutoscalers( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry) { + this.computeApi = credentials.getCompute().autoscalers(); + this.credentials = credentials; + this.requestFactory = + new ZonalGoogleComputeRequestFactory("autoscalers", credentials, operationPoller, registry); + } + + public GoogleComputeGetRequest get(String zone, String name) + throws IOException { + return requestFactory.wrapGetRequest( + credentials.getCompute().autoscalers().get(credentials.getProject(), zone, name), + "get", + zone); + } + + public PaginatedComputeRequest list(String zone) { + return new PaginatedComputeRequestImpl<>( + pageToken -> + requestFactory.wrapRequest( + computeApi.list(credentials.getProject(), zone).setPageToken(pageToken), + "list", + zone), + AutoscalerList::getNextPageToken, + AutoscalerList::getItems); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneGoogleServerGroupManagers.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneGoogleServerGroupManagers.java new file mode 100644 index 00000000000..f57ae2a51ff --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneGoogleServerGroupManagers.java @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.InstanceGroupManagersAbandonInstancesRequest; +import com.google.api.services.compute.model.Operation; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.util.List; + +final class ZoneGoogleServerGroupManagers implements GoogleServerGroupManagers { + + private final GoogleNamedAccountCredentials credentials; + private final ZonalGoogleComputeRequestFactory requestFactory; + private final Compute.InstanceGroupManagers managers; + private final String instanceGroupName; + private final String zone; + + ZoneGoogleServerGroupManagers( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry, + String instanceGroupName, + String zone) { + this.credentials = credentials; + this.requestFactory = + new ZonalGoogleComputeRequestFactory( + "instanceGroupManagers", credentials, operationPoller, registry); + this.managers = credentials.getCompute().instanceGroupManagers(); + this.instanceGroupName = instanceGroupName; + this.zone = zone; + } + + @Override + public GoogleComputeOperationRequest> abandonInstances( + List instances) throws IOException { + InstanceGroupManagersAbandonInstancesRequest request = + new InstanceGroupManagersAbandonInstancesRequest(); + request.setInstances(instances); + return requestFactory.wrapOperationRequest( + managers.abandonInstances(credentials.getProject(), zone, instanceGroupName, request), + "abandonInstances", + zone); + } + + @Override + public GoogleComputeOperationRequest> delete() throws IOException { + return requestFactory.wrapOperationRequest( + managers.delete(credentials.getProject(), zone, instanceGroupName), "delete", zone); + } + + @Override + public GoogleComputeGetRequest, InstanceGroupManager> get() + throws IOException { + return requestFactory.wrapGetRequest( + managers.get(credentials.getProject(), zone, instanceGroupName), "get", zone); + } + + @Override + public GoogleComputeOperationRequest patch(InstanceGroupManager content) throws IOException { + return requestFactory.wrapOperationRequest( + managers.patch(credentials.getProject(), zone, instanceGroupName, content), "patch", zone); + } + + @Override + public GoogleComputeOperationRequest> update( + InstanceGroupManager content) throws IOException { + return requestFactory.wrapOperationRequest( + managers.update(credentials.getProject(), zone, instanceGroupName, content), + "update", + zone); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneInstanceGroupManagers.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneInstanceGroupManagers.java new file mode 100644 index 00000000000..b60b028e4bb --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/compute/ZoneInstanceGroupManagers.java @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.InstanceGroupManagerList; +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.util.Optional; + +public final class ZoneInstanceGroupManagers { + + private final Compute.InstanceGroupManagers computeApi; + private final GoogleNamedAccountCredentials credentials; + private final ZonalGoogleComputeRequestFactory requestFactory; + + ZoneInstanceGroupManagers( + GoogleNamedAccountCredentials credentials, + GoogleOperationPoller operationPoller, + Registry registry) { + this.computeApi = credentials.getCompute().instanceGroupManagers(); + this.credentials = credentials; + this.requestFactory = + new ZonalGoogleComputeRequestFactory( + "instanceGroupManagers", credentials, operationPoller, registry); + } + + public GoogleComputeGetRequest get( + String zone, String name) throws IOException { + return requestFactory.wrapGetRequest( + computeApi.get(credentials.getProject(), zone, name), "get", zone); + } + + public PaginatedComputeRequest list( + String zone) { + return new PaginatedComputeRequestImpl<>( + pageToken -> + requestFactory.wrapRequest( + computeApi.list(credentials.getProject(), zone).setPageToken(pageToken), + "list", + zone), + InstanceGroupManagerList::getNextPageToken, + response -> Optional.ofNullable(response.getItems()).orElseGet(ImmutableList::of)); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/config/GoogleCredentialsConfiguration.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/config/GoogleCredentialsConfiguration.java new file mode 100644 index 00000000000..3a3ffddd7ad --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/config/GoogleCredentialsConfiguration.java @@ -0,0 +1,128 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.config; + +import com.netflix.spinnaker.clouddriver.google.ComputeVersion; +import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider; +import com.netflix.spinnaker.clouddriver.google.GoogleExecutor; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import com.netflix.spinnaker.config.GoogleConfiguration; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.CredentialsTypeBaseConfiguration; +import com.netflix.spinnaker.credentials.CredentialsTypeProperties; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.AbstractCredentialsLoader; +import com.netflix.spinnaker.credentials.poller.Poller; +import com.netflix.spinnaker.kork.configserver.ConfigFileService; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class GoogleCredentialsConfiguration { + private static final Logger log = LoggerFactory.getLogger(GoogleCredentialsConfiguration.class); + + @Autowired NamerRegistry namerRegistry; + + @Bean + public CredentialsTypeBaseConfiguration< + GoogleNamedAccountCredentials, GoogleConfigurationProperties.ManagedAccount> + googleCredentialsProperties( + ApplicationContext applicationContext, + GoogleConfigurationProperties configurationProperties, + ConfigFileService configFileService, + GoogleConfiguration.DeployDefaults googleDeployDefaults, + GoogleExecutor googleExecutor, + String clouddriverUserAgentApplicationName) { + + return new CredentialsTypeBaseConfiguration( + applicationContext, + CredentialsTypeProperties + .builder() + .type(GoogleNamedAccountCredentials.CREDENTIALS_TYPE) + .credentialsDefinitionClass(GoogleConfigurationProperties.ManagedAccount.class) + .credentialsClass(GoogleNamedAccountCredentials.class) + .credentialsParser( + a -> { + try { + String jsonKey = configFileService.getContents(a.getJsonPath()); + + return new GoogleNamedAccountCredentials.Builder() + .name(a.getName()) + .environment( + StringUtils.isEmpty(a.getEnvironment()) + ? a.getName() + : a.getEnvironment()) + .accountType( + StringUtils.isEmpty(a.getAccountType()) + ? a.getName() + : a.getAccountType()) + .project(a.getProject()) + .computeVersion( + a.isAlphaListed() ? ComputeVersion.ALPHA : ComputeVersion.DEFAULT) + .jsonKey(jsonKey) + .serviceAccountId(a.getServiceAccountId()) + .serviceAccountProject(a.getServiceAccountProject()) + .imageProjects(a.getImageProjects()) + .requiredGroupMembership(a.getRequiredGroupMembership()) + .permissions(a.getPermissions().build()) + .applicationName(clouddriverUserAgentApplicationName) + .consulConfig(a.getConsul()) + .instanceTypeDisks(googleDeployDefaults.getInstanceTypeDisks()) + .userDataFile(a.getUserDataFile()) + .regionsToManage( + a.getRegions(), configurationProperties.getDefaultRegions()) + .namer(namerRegistry.getNamingStrategy(a.getNamingStrategy())) + .build(); + } catch (Exception e) { + log.info("Error loading Google credentials: " + e.getMessage() + "."); + return null; + } + }) + .defaultCredentialsSource(configurationProperties::getAccounts) + .build()); + } + + @Bean + public CredentialsInitializerSynchronizable googleCredentialsInitializerSynchronizable( + AbstractCredentialsLoader loader) { + final Poller poller = new Poller<>(loader); + return new CredentialsInitializerSynchronizable() { + @Override + public void synchronize() { + poller.run(); + } + }; + } + + @Bean + @ConditionalOnMissingBean( + value = GoogleNamedAccountCredentials.class, + parameterizedContainer = CredentialsRepository.class) + public CredentialsRepository googleCredentialsRepository( + CredentialsLifecycleHandler eventHandler) { + return new MapBackedCredentialsRepository<>(GoogleCloudProvider.getID(), eventHandler); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/controllers/GoogleNamedImageLookupController.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/controllers/GoogleNamedImageLookupController.java new file mode 100644 index 00000000000..76ef4fd37c6 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/controllers/GoogleNamedImageLookupController.java @@ -0,0 +1,211 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.controllers; + +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.IMAGES; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.services.compute.model.Image; +import com.google.common.annotations.VisibleForTesting; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import groovy.util.logging.Slf4j; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import javax.servlet.http.HttpServletRequest; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@Slf4j +@RestController +@RequestMapping("/gce/images") +public class GoogleNamedImageLookupController { + + private final Cache cacheView; + private final GsonFactory jsonMapper = new GsonFactory(); + private final ObjectMapper objectMapper = + new ObjectMapper().configure(JsonGenerator.Feature.WRITE_NUMBERS_AS_STRINGS, true); + + @Autowired + private GoogleNamedImageLookupController(Cache cacheView) { + this.cacheView = cacheView; + } + + @RequestMapping(value = "/find", method = RequestMethod.GET) + public List list( + @RequestParam(required = false) String q, + @RequestParam(required = false) String account, + HttpServletRequest request) { + Collection imageCacheData = getImageCacheData(account); + Predicate queryFilter = getQueryFilter(q); + Predicate tagFilter = getTagFilter(request); + return imageCacheData.stream() + .map(this::createNamedImageFromCacheData) + .filter(queryFilter) + .filter(tagFilter) + .sorted(Comparator.comparing(image -> image.imageName)) + .collect(Collectors.toList()); + } + + private Collection getImageCacheData(String account) { + // If no account supplied, return images from all accounts + String pattern = + (account == null || account.isEmpty()) + ? String.format("%s:images:*", GoogleCloudProvider.getID()) + : String.format("%s:images:%s:*", GoogleCloudProvider.getID(), account); + Collection identifiers = cacheView.filterIdentifiers(IMAGES.getNs(), pattern); + return cacheView.getAll(IMAGES.getNs(), identifiers, RelationshipCacheFilter.none()); + } + + private NamedImage createNamedImageFromCacheData(CacheData cacheDatum) throws RuntimeException { + try { + Object hashImage = cacheDatum.getAttributes().get("image"); + Image image = jsonMapper.fromString(objectMapper.writeValueAsString(hashImage), Image.class); + String imageAccount = Keys.parse(cacheDatum.getId()).get("account"); + Map attributes = new HashMap<>(); + attributes.put("creationDate", image.get("creationTimestamp")); + return new NamedImage(imageAccount, image.getName(), attributes, buildTagsMap(image)); + } catch (IOException e) { + throw new RuntimeException("Image deserialization failed"); + } + } + + private Predicate getQueryFilter(String q) { + Predicate queryFilter = i -> true; + if (q != null && q.trim().length() > 0) { + String glob = q.trim(); + // Wrap in '*' if there are no glob-style characters in the query string. + if (!glob.contains("*") + && !glob.contains("?") + && !glob.contains("[") + && !glob.contains("\\")) { + glob = "*" + glob + "*"; + } + Pattern pattern = new InMemoryCache.Glob(glob).toPattern(); + queryFilter = i -> pattern.matcher(i.imageName).matches(); + } + return queryFilter; + } + + private Predicate getTagFilter(HttpServletRequest request) { + Predicate tagFilter = i -> true; + Map tagFilters = extractTagFilters(request); + if (!tagFilters.isEmpty()) { + tagFilter = i -> matchesTagFilters(i, tagFilters); + } + return tagFilter; + } + + @VisibleForTesting + public static Map buildTagsMap(Image image) { + Map tags = new HashMap<>(); + + String description = image.getDescription(); + // For a description of the form: + // key1: value1, key2: value2, key3: value3 + // we'll build a map associating each key with + // its associated value + if (description != null) { + tags = + Arrays.stream(description.split(",")) + .filter(token -> token.contains(": ")) + .map(token -> token.split(": ", 2)) + .collect( + Collectors.toMap( + token -> token[0].trim(), token -> token[1].trim(), (a, b) -> b)); + } + + Map labels = image.getLabels(); + if (labels != null) { + tags.putAll(labels); + } + + return tags; + } + + /** + * Apply tag-based filtering to the list of named images. + * + *

For example: /gce/images/find?q=PackageName&tag:stage=released&tag:somekey=someval + */ + private static List filter( + List namedImages, Map tagFilters) { + return namedImages.stream() + .filter(namedImage -> matchesTagFilters(namedImage, tagFilters)) + .collect(Collectors.toList()); + } + + private static boolean matchesTagFilters(NamedImage namedImage, Map tagFilters) { + Map tags = namedImage.tags; + return tagFilters.keySet().stream() + .allMatch( + tag -> + tags.containsKey(tag.toLowerCase()) + && tags.get(tag.toLowerCase()).equalsIgnoreCase(tagFilters.get(tag))); + } + + private static Map extractTagFilters(HttpServletRequest httpServletRequest) { + List parameterNames = Collections.list(httpServletRequest.getParameterNames()); + + return parameterNames.stream() + .filter(Objects::nonNull) + .filter(parameter -> parameter.toLowerCase().startsWith("tag:")) + .collect( + Collectors.toMap( + tagParameter -> tagParameter.replaceAll("tag:", "").toLowerCase(), + httpServletRequest::getParameter, + (a, b) -> b)); + } + + @VisibleForTesting + public static class NamedImage { + public String account; + public String imageName; + public Map attributes = new HashMap<>(); + public Map tags = new HashMap<>(); + + private NamedImage( + String account, + String imageName, + Map attributes, + Map tags) { + this.account = account; + this.imageName = imageName; + this.attributes = attributes; + this.tags = tags; + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/SafeRetry.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/SafeRetry.java new file mode 100644 index 00000000000..ba228a81402 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/SafeRetry.java @@ -0,0 +1,95 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleApiException; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleApiException.ResourceInUseException; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleCommonSafeRetry; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import groovy.lang.Closure; +import java.util.List; +import java.util.Map; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +@Slf4j +public class SafeRetry { + private final GoogleCommonSafeRetry googleCommonSafeRetry; + + @Autowired + @ParametersAreNullableByDefault + public SafeRetry( + @Value("${google.safe-retry-max-wait-interval-ms:60000}") Integer maxWaitInterval, + @Value("${google.safe-retry-retry-interval-base-sec:2}") Integer retryIntervalBase, + @Value("${google.safe-retry-jitter-multiplier:1000}") Integer jitterMultiplier, + @Value("${google.safe-retry-max-retries:10}") Integer maxRetries) { + googleCommonSafeRetry = + new GoogleCommonSafeRetry(maxWaitInterval, retryIntervalBase, jitterMultiplier, maxRetries); + } + + private SafeRetry(GoogleCommonSafeRetry googleCommonSafeRetry) { + this.googleCommonSafeRetry = googleCommonSafeRetry; + } + + /** + * Returns an instance of this class that never waits between retries, suitable for testing. + * + * @return An instance of {@link SafeRetry} + */ + public static SafeRetry withoutDelay() { + return new SafeRetry(GoogleCommonSafeRetry.withoutDelay()); + } + + @Nullable + public V doRetry( + Closure operation, + String resource, + @Nullable Task task, + List retryCodes, + List successCodes, + Map tags, + Registry registry) { + String action = tags.get("action"); + String description = String.format("%s of %s", action, resource); + if (task != null) { + task.updateStatus(tags.get("phase"), String.format("Attempting %s...", description)); + } + + try { + return googleCommonSafeRetry.doRetry( + operation, description, retryCodes, successCodes, tags, registry); + } catch (ResourceInUseException e) { + // Don't fail the operation if the resource is in use. The main use case for this is + // resiliency in delete operations - we don't want to fail the operation if something is in + // use by another resource. + log.warn(e.getMessage()); + return null; + } catch (GoogleApiException e) { + throw new GoogleOperationException( + String.format("Failed to " + description, action, resource), e); + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/converters/SetStatefulDiskAtomicOperationConverter.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/converters/SetStatefulDiskAtomicOperationConverter.java new file mode 100644 index 00000000000..548a1fd340b --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/converters/SetStatefulDiskAtomicOperationConverter.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.converters; + +import com.netflix.spinnaker.clouddriver.google.GoogleOperation; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.deploy.description.SetStatefulDiskDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.ops.SetStatefulDiskAtomicOperation; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@GoogleOperation(AtomicOperations.SET_STATEFUL_DISK) +@Component +public class SetStatefulDiskAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + + private final GoogleClusterProvider clusterProvider; + private final GoogleComputeApiFactory computeApiFactory; + + @Autowired + public SetStatefulDiskAtomicOperationConverter( + GoogleClusterProvider clusterProvider, GoogleComputeApiFactory computeApiFactory) { + this.clusterProvider = clusterProvider; + this.computeApiFactory = computeApiFactory; + } + + @Override + public SetStatefulDiskAtomicOperation convertOperation(Map input) { + return new SetStatefulDiskAtomicOperation( + clusterProvider, computeApiFactory, convertDescription(input)); + } + + @Override + public SetStatefulDiskDescription convertDescription(Map input) { + return GoogleAtomicOperationConverterHelper.convertDescription( + input, this, SetStatefulDiskDescription.class); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/converters/StatefullyUpdateBootImageOperationConverter.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/converters/StatefullyUpdateBootImageOperationConverter.java new file mode 100644 index 00000000000..dee12412622 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/converters/StatefullyUpdateBootImageOperationConverter.java @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.converters; + +import com.netflix.spinnaker.clouddriver.google.GoogleOperation; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.deploy.description.StatefullyUpdateBootImageDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.ops.StatefullyUpdateBootImageAtomicOperation; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@GoogleOperation(AtomicOperations.STATEFULLY_UPDATE_BOOT_IMAGE) +@Component +public class StatefullyUpdateBootImageOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + + private final GoogleClusterProvider clusterProvider; + private final GoogleComputeApiFactory computeApiFactory; + private final GoogleConfigurationProperties googleConfigurationProperties; + + @Autowired + public StatefullyUpdateBootImageOperationConverter( + GoogleClusterProvider clusterProvider, + GoogleComputeApiFactory computeApiFactory, + GoogleConfigurationProperties googleConfigurationProperties) { + this.clusterProvider = clusterProvider; + this.computeApiFactory = computeApiFactory; + this.googleConfigurationProperties = googleConfigurationProperties; + } + + @Override + public StatefullyUpdateBootImageAtomicOperation convertOperation(Map input) { + return new StatefullyUpdateBootImageAtomicOperation( + clusterProvider, + computeApiFactory, + googleConfigurationProperties, + convertDescription(input)); + } + + @Override + public StatefullyUpdateBootImageDescription convertDescription(Map input) { + return GoogleAtomicOperationConverterHelper.convertDescription( + input, this, StatefullyUpdateBootImageDescription.class); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/description/SetStatefulDiskDescription.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/description/SetStatefulDiskDescription.java new file mode 100644 index 00000000000..4867c3b339d --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/description/SetStatefulDiskDescription.java @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.description; + +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable; +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable; +import lombok.Data; + +@Data +public class SetStatefulDiskDescription implements CredentialsNameable, ServerGroupNameable { + + private GoogleNamedAccountCredentials credentials; + private String serverGroupName; + private String region; + private String deviceName; +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/description/StatefullyUpdateBootImageDescription.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/description/StatefullyUpdateBootImageDescription.java new file mode 100644 index 00000000000..62a72a25eb6 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/description/StatefullyUpdateBootImageDescription.java @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.description; + +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable; +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable; +import lombok.Data; + +@Data +public class StatefullyUpdateBootImageDescription + implements CredentialsNameable, ServerGroupNameable { + + private GoogleNamedAccountCredentials credentials; + private String serverGroupName; + private String region; + private String bootImage; +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleOperationException.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleOperationException.java new file mode 100644 index 00000000000..67d7d9a2e2b --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/exception/GoogleOperationException.java @@ -0,0 +1,30 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.google.deploy.exception; + +public class GoogleOperationException extends RuntimeException { + public GoogleOperationException() { + super(); + } + + public GoogleOperationException(String message) { + super(message); + } + + public GoogleOperationException(String message, Throwable t) { + super(message, t); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.java new file mode 100644 index 00000000000..acd9196e59a --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.java @@ -0,0 +1,1374 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.handlers; + +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.*; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.AttachedDisk; +import com.google.api.services.compute.model.Autoscaler; +import com.google.api.services.compute.model.Backend; +import com.google.api.services.compute.model.BackendService; +import com.google.api.services.compute.model.DistributionPolicy; +import com.google.api.services.compute.model.DistributionPolicyZoneConfiguration; +import com.google.api.services.compute.model.FixedOrPercent; +import com.google.api.services.compute.model.Image; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.InstanceGroupManagerAutoHealingPolicy; +import com.google.api.services.compute.model.InstanceProperties; +import com.google.api.services.compute.model.InstanceTemplate; +import com.google.api.services.compute.model.Metadata; +import com.google.api.services.compute.model.NamedPort; +import com.google.api.services.compute.model.NetworkInterface; +import com.google.api.services.compute.model.Operation; +import com.google.api.services.compute.model.Scheduling; +import com.google.api.services.compute.model.ServiceAccount; +import com.google.api.services.compute.model.Tags; +import com.netflix.frigga.Names; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; +import com.netflix.spinnaker.clouddriver.deploy.DeployHandler; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider; +import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.ops.GoogleUserDataProvider; +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck; +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource; +import com.netflix.spinnaker.clouddriver.google.model.GoogleNetwork; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.model.GoogleSubnet; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancingPolicy; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerView; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancingPolicy; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleNetworkLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleSslLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleTcpLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSubnetProvider; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.config.GoogleConfiguration; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; +import groovy.lang.Closure; +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; +import lombok.Data; +import lombok.extern.log4j.Log4j2; +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +@Log4j2 +public class BasicGoogleDeployHandler + implements DeployHandler, GoogleExecutorTraits { + + private static final String BASE_PHASE = "DEPLOY"; + private static final String DEFAULT_NETWORK_NAME = "default"; + private static final String ACCESS_CONFIG_NAME = "External NAT"; + private static final String ACCESS_CONFIG_TYPE = "ONE_TO_ONE_NAT"; + private static final Integer MAX_NAME_SIZE = 64; + private static final Integer TEMPLATE_UUID_SIZE = 8; + + @Autowired private GoogleConfigurationProperties googleConfigurationProperties; + + @Autowired private GoogleClusterProvider googleClusterProvider; + + @Autowired private GoogleConfiguration.DeployDefaults googleDeployDefaults; + + @Autowired private GoogleOperationPoller googleOperationPoller; + + @Autowired private GoogleUserDataProvider googleUserDataProvider; + + @Autowired GoogleLoadBalancerProvider googleLoadBalancerProvider; + + @Autowired GoogleNetworkProvider googleNetworkProvider; + + @Autowired GoogleSubnetProvider googleSubnetProvider; + + @Autowired String clouddriverUserAgentApplicationName; + + @Autowired Cache cacheView; + + @Autowired ObjectMapper objectMapper; + + @Autowired SafeRetry safeRetry; + + @Autowired Registry registry; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public DeploymentResult handle(BasicGoogleDeployDescription description, List priorOutputs) { + Task task = getTask(); + + try { + String region = getRegionFromInput(description); + String location = getLocationFromInput(description, region); + GCEServerGroupNameResolver nameResolver = getServerGroupNameResolver(description, region); + String clusterName = + nameResolver.getClusterName( + description.getApplication(), + description.getStack(), + description.getFreeFormDetails()); + + task.updateStatus( + BASE_PHASE, + String.format( + "Initializing creation of server group for cluster %s in %s...", + clusterName, location)); + task.updateStatus(BASE_PHASE, "Looking up next sequence..."); + + String nextServerGroupName = + nameResolver.resolveNextServerGroupName( + description.getApplication(), + description.getStack(), + description.getFreeFormDetails(), + false); + task.updateStatus( + BASE_PHASE, String.format("Produced server group name: %s", nextServerGroupName)); + + String machineTypeName = getMachineTypeNameFromInput(description, task, location); + GoogleNetwork network = buildNetworkFromInput(description, task); + GoogleSubnet subnet = buildSubnetFromInput(description, task, network, region); + LoadBalancerInfo lbToUpdate = getLoadBalancerToUpdateFromInput(description, task); + + task.updateStatus( + BASE_PHASE, String.format("Composing server group %s...", nextServerGroupName)); + + description.setBaseDeviceName( + nextServerGroupName); // I left this because I assumed GCEUtils use it at some point. + Image bootImage = buildBootImage(description, task); + List attachedDisks = buildAttachedDisks(description, task, bootImage); + NetworkInterface networkInterface = buildNetworkInterface(description, network, subnet); + GoogleHttpLoadBalancingPolicy lbPolicy = null; + List backendServicesToUpdate = + getBackendServiceToUpdate(description, nextServerGroupName, lbToUpdate, lbPolicy, region); + List regionBackendServicesToUpdate = + getRegionBackendServicesToUpdate( + description, nextServerGroupName, lbToUpdate, lbPolicy, region); + + String now = String.valueOf(System.currentTimeMillis()); + String suffix = now.substring(now.length() - TEMPLATE_UUID_SIZE); + String instanceTemplateName = String.format("%s-%s", nextServerGroupName, suffix); + if (instanceTemplateName.length() > MAX_NAME_SIZE) { + throw new IllegalArgumentException( + "Max name length ${MAX_NAME_SIZE} exceeded in resolved instance template name ${instanceTemplateName}."); + } + + addUserDataToInstanceMetadata(description, nextServerGroupName, instanceTemplateName, task); + addSelectZonesToInstanceMetadata(description); + + Metadata metadata = buildMetadataFromInstanceMetadata(description); + Tags tags = buildTagsFromInput(description); + List serviceAccounts = buildServiceAccountFromInput(description); + Scheduling scheduling = buildSchedulingFromInput(description); + Map labels = buildLabelsFromInput(description, nextServerGroupName, region); + + setupMonikerForOperation(description, nextServerGroupName, clusterName); + validateAcceleratorConfig(description); + + InstanceProperties instanceProperties = + buildInstancePropertiesFromInput( + description, + machineTypeName, + attachedDisks, + networkInterface, + metadata, + tags, + serviceAccounts, + scheduling, + labels); + addShieldedVmConfigToInstanceProperties(description, instanceProperties, bootImage); + addMinCpuPlatformToInstanceProperties(description, instanceProperties); + InstanceTemplate instanceTemplate = + buildInstanceTemplate(instanceTemplateName, instanceProperties); + + String instanceTemplateUrl = ""; + instanceTemplateUrl = + createInstanceTemplateAndWait(description.getCredentials(), instanceTemplate, task); + + setCapacityFromInput(description); + setAutoscalerCapacityFromInput(description); + setCapacityFromSource(description, task); + + List autoHealingPolicy = + buildAutoHealingPolicyFromInput(description, task); + InstanceGroupManager instanceGroupManager = + buildInstanceGroupFromInput( + description, + nextServerGroupName, + instanceTemplateUrl, + lbToUpdate.targetPools, + autoHealingPolicy); + setNamedPortsToInstanceGroup(description, lbToUpdate, instanceGroupManager); + + createInstanceGroupManagerFromInput( + description, instanceGroupManager, lbToUpdate, nextServerGroupName, region, task); + + task.updateStatus( + BASE_PHASE, + String.format("Done creating server group %s in %s.", nextServerGroupName, location)); + + updateBackendServices( + description, lbToUpdate, nextServerGroupName, backendServicesToUpdate, task); + updateRegionalBackendServices( + description, + lbToUpdate, + nextServerGroupName, + region, + regionBackendServicesToUpdate, + task); + + DeploymentResult deploymentResult = new DeploymentResult(); + deploymentResult.setServerGroupNames( + List.of(String.format("%s:%s", region, nextServerGroupName))); + deploymentResult.setServerGroupNameByRegion(Map.of(region, nextServerGroupName)); + return deploymentResult; + + } catch (IOException e) { + throw new IllegalStateException("Unexpected error in handler: " + e.getMessage()); + } + } + + protected GCEServerGroupNameResolver getServerGroupNameResolver( + BasicGoogleDeployDescription description, String region) { + GoogleNamedAccountCredentials credentials = description.getCredentials(); + return new GCEServerGroupNameResolver( + credentials.getProject(), region, credentials, googleClusterProvider, safeRetry, this); + } + + protected String getRegionFromInput(BasicGoogleDeployDescription description) { + return StringUtils.isNotBlank(description.getRegion()) + ? description.getRegion() + : description.getCredentials().regionFromZone(description.getZone()); + } + + protected String getLocationFromInput(BasicGoogleDeployDescription description, String region) { + return description.getRegional() ? region : description.getZone(); + } + + protected String getMachineTypeNameFromInput( + BasicGoogleDeployDescription description, Task task, String location) { + if (description.getInstanceType().contains("custom-")) { + return description.getInstanceType(); + } else { + return GCEUtil.queryMachineType( + description.getInstanceType(), location, description.getCredentials(), task, BASE_PHASE); + } + } + + protected GoogleNetwork buildNetworkFromInput( + BasicGoogleDeployDescription description, Task task) { + String networkName = + StringUtils.isNotBlank(description.getNetwork()) + ? description.getNetwork() + : DEFAULT_NETWORK_NAME; + return GCEUtil.queryNetwork( + description.getAccountName(), networkName, task, BASE_PHASE, googleNetworkProvider); + } + + protected GoogleSubnet buildSubnetFromInput( + BasicGoogleDeployDescription description, Task task, GoogleNetwork network, String region) { + GoogleSubnet subnet = + StringUtils.isNotBlank(description.getSubnet()) + ? GCEUtil.querySubnet( + description.getAccountName(), + region, + description.getSubnet(), + task, + BASE_PHASE, + googleSubnetProvider) + : null; + + // If no subnet is passed and the network is both an xpn host network and an auto-subnet + // network, then we need to set the subnet ourselves here. + // This shouldn't be required, but GCE complains otherwise. + if (subnet != null && network.getId().contains("/") && network.getAutoCreateSubnets()) { + // Auto-created subnets have the same name as the containing network. + subnet = + GCEUtil.querySubnet( + description.getAccountName(), + region, + network.getId(), + task, + BASE_PHASE, + googleSubnetProvider); + } + return subnet; + } + + protected LoadBalancerInfo getLoadBalancerToUpdateFromInput( + BasicGoogleDeployDescription description, Task task) { + // We need the full url for each referenced network load balancer, and also to check that the + // HTTP(S) + // load balancers exist. + LoadBalancerInfo info = new LoadBalancerInfo(); + if (description.getLoadBalancers().isEmpty()) { + return info; + } + // GCEUtil.queryAllLoadBalancers() will throw an exception if a referenced load balancer cannot + // be resolved. + List foundLB = + GCEUtil.queryAllLoadBalancers( + googleLoadBalancerProvider, description.getLoadBalancers(), task, BASE_PHASE); + // Queue ILBs to update, but wait to update metadata until Https LBs are calculated. + info.internalLoadBalancers = + foundLB.stream() + .filter(lb -> lb.getLoadBalancerType() == GoogleLoadBalancerType.INTERNAL) + .collect(Collectors.toList()); + info.internalHttpLoadBalancers = + foundLB.stream() + .filter(lb -> lb.getLoadBalancerType() == GoogleLoadBalancerType.INTERNAL_MANAGED) + .collect(Collectors.toList()); + // Queue SSL LBs to update. + info.sslLoadBalancers = + foundLB.stream() + .filter(lb -> lb.getLoadBalancerType() == GoogleLoadBalancerType.SSL) + .collect(Collectors.toList()); + // Queue TCP LBs to update. + info.tcpLoadBalancers = + foundLB.stream() + .filter(lb -> lb.getLoadBalancerType() == GoogleLoadBalancerType.TCP) + .collect(Collectors.toList()); + + if (!description.getDisableTraffic()) { + info.targetPools = + foundLB.stream() + .filter(lb -> lb.getLoadBalancerType() == GoogleLoadBalancerType.NETWORK) + .map(lb -> (GoogleNetworkLoadBalancer.View) lb) + .map(GoogleNetworkLoadBalancer.View::getTargetPool) + .distinct() + .collect(Collectors.toList()); + } + return info; + } + + protected Image buildBootImage(BasicGoogleDeployDescription description, Task task) { + return GCEUtil.getBootImage( + description, + task, + BASE_PHASE, + clouddriverUserAgentApplicationName, + googleConfigurationProperties.getBaseImageProjects(), + safeRetry, + this); + } + + protected List buildAttachedDisks( + BasicGoogleDeployDescription description, Task task, Image bootImage) { + return GCEUtil.buildAttachedDisks( + description, + null, + false, + googleDeployDefaults, + task, + BASE_PHASE, + clouddriverUserAgentApplicationName, + googleConfigurationProperties.getBaseImageProjects(), + bootImage, + safeRetry, + this); + } + + protected NetworkInterface buildNetworkInterface( + BasicGoogleDeployDescription description, GoogleNetwork network, GoogleSubnet subnet) { + boolean associatePublicIpAddress = + description.getAssociatePublicIpAddress() == null + || description.getAssociatePublicIpAddress(); + return GCEUtil.buildNetworkInterface( + network, subnet, associatePublicIpAddress, ACCESS_CONFIG_NAME, ACCESS_CONFIG_TYPE); + } + + protected boolean hasBackedServiceFromInput( + BasicGoogleDeployDescription description, LoadBalancerInfo loadBalancerInfo) { + Map instanceMetadata = description.getInstanceMetadata(); + return (!instanceMetadata.isEmpty() && instanceMetadata.containsKey(BACKEND_SERVICE_NAMES)) + || !loadBalancerInfo.getSslLoadBalancers().isEmpty() + || !loadBalancerInfo.getTcpLoadBalancers().isEmpty(); + } + + protected GoogleHttpLoadBalancingPolicy buildLoadBalancerPolicyFromInput( + BasicGoogleDeployDescription description) throws JsonProcessingException { + Map instanceMetadata = description.getInstanceMetadata(); + String sourcePolicyJson = instanceMetadata.get(LOAD_BALANCING_POLICY); + if (description.getLoadBalancingPolicy() != null + && description.getLoadBalancingPolicy().getBalancingMode() != null) { + return description.getLoadBalancingPolicy(); + } else if (StringUtils.isNotBlank(sourcePolicyJson)) { + return objectMapper.readValue(sourcePolicyJson, GoogleHttpLoadBalancingPolicy.class); + } else { + log.warn( + "No load balancing policy found in the operation description or the source server group, adding defaults"); + GoogleHttpLoadBalancingPolicy policy = new GoogleHttpLoadBalancingPolicy(); + policy.setBalancingMode(GoogleLoadBalancingPolicy.BalancingMode.UTILIZATION); + policy.setMaxUtilization(0.80f); + policy.setCapacityScaler(1.0f); + NamedPort namedPort = new NamedPort(); + namedPort.setName(GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME); + namedPort.setPort(GoogleHttpLoadBalancingPolicy.getHTTP_DEFAULT_PORT()); + policy.setNamedPorts(List.of(namedPort)); + return policy; + } + } + + protected List getBackendServiceToUpdate( + BasicGoogleDeployDescription description, + String serverGroupName, + LoadBalancerInfo lbInfo, + GoogleHttpLoadBalancingPolicy policy, + String region) { + // Resolve and queue the backend service updates, but don't execute yet. + // We need to resolve this information to set metadata in the template so enable can know about + // the + // load balancing policy this server group was configured with. + // If we try to execute the update, GCP will fail since the MIG is not created yet. + if (hasBackedServiceFromInput(description, lbInfo)) { + List backendServicesToUpdate = new ArrayList<>(); + Map instanceMetadata = description.getInstanceMetadata(); + List backendServices = + instanceMetadata.get(BACKEND_SERVICE_NAMES) != null + ? new ArrayList<>( + Arrays.asList(instanceMetadata.get(BACKEND_SERVICE_NAMES).split(","))) + : new ArrayList<>(); + backendServices.addAll( + lbInfo.getSslLoadBalancers().stream() + .map(lb -> (GoogleSslLoadBalancer.View) lb) + .map(it -> it.getBackendService().getName()) + .collect(Collectors.toList())); + backendServices.addAll( + lbInfo.getTcpLoadBalancers().stream() + .map(lb -> (GoogleTcpLoadBalancer.View) lb) + .map(it -> it.getBackendService().getName()) + .collect(Collectors.toList())); + + // Set the load balancer name metadata. + List globalLbNames = + lbInfo.getSslLoadBalancers().stream() + .map(GoogleLoadBalancerView::getName) + .collect(Collectors.toList()); + globalLbNames.addAll( + lbInfo.getTcpLoadBalancers().stream() + .map(GoogleLoadBalancerView::getName) + .collect(Collectors.toList())); + globalLbNames.addAll( + GCEUtil.resolveHttpLoadBalancerNamesMetadata( + backendServices, + description.getCredentials().getCompute(), + description.getCredentials().getProject(), + this)); + instanceMetadata.put(GLOBAL_LOAD_BALANCER_NAMES, String.join(",", globalLbNames)); + + backendServices.forEach( + backendServiceName -> { + try { + BackendService backendService = + getBackendServiceFromProvider(description.getCredentials(), backendServiceName); + GCEUtil.updateMetadataWithLoadBalancingPolicy(policy, instanceMetadata, objectMapper); + Backend backendToAdd = GCEUtil.backendFromLoadBalancingPolicy(policy); + if (description.getRegional()) { + backendToAdd.setGroup( + GCEUtil.buildRegionalServerGroupUrl( + description.getCredentials().getProject(), region, serverGroupName)); + } else { + backendToAdd.setGroup( + GCEUtil.buildZonalServerGroupUrl( + description.getCredentials().getProject(), + description.getZone(), + serverGroupName)); + } + if (backendService.getBackends() == null) { + backendService.setBackends(new ArrayList<>()); + } + backendService.getBackends().add(backendToAdd); + backendServicesToUpdate.add(backendService); + } catch (IOException e) { + log.error(e.getMessage()); + } + }); + return backendServicesToUpdate; + } + return Collections.emptyList(); + } + + protected List getRegionBackendServicesToUpdate( + BasicGoogleDeployDescription description, + String serverGroupName, + LoadBalancerInfo lbInfo, + GoogleHttpLoadBalancingPolicy policy, + String region) { + if (!lbInfo.getInternalLoadBalancers().isEmpty() + || !lbInfo.getInternalHttpLoadBalancers().isEmpty()) { + List regionBackendServicesToUpdate = new ArrayList<>(); + Map instanceMetadata = description.getInstanceMetadata(); + List existingRegionalLbs = + instanceMetadata.get(REGIONAL_LOAD_BALANCER_NAMES) != null + ? new ArrayList<>( + Arrays.asList(instanceMetadata.get(REGIONAL_LOAD_BALANCER_NAMES).split(","))) + : new ArrayList<>(); + List regionBackendServices = + instanceMetadata.get(REGION_BACKEND_SERVICE_NAMES) != null + ? new ArrayList<>( + Arrays.asList(instanceMetadata.get(REGION_BACKEND_SERVICE_NAMES).split(","))) + : new ArrayList<>(); + List ilbServices = + lbInfo.getInternalLoadBalancers().stream() + .map(lb -> (GoogleInternalLoadBalancer.View) lb) + .map(it -> it.getBackendService().getName()) + .collect(Collectors.toList()); + ilbServices.addAll(regionBackendServices); + ilbServices.stream().distinct().collect(Collectors.toList()); + List ilbNames = + lbInfo.getInternalLoadBalancers().stream() + .map(GoogleLoadBalancerView::getName) + .collect(Collectors.toList()); + ilbNames.addAll( + lbInfo.getInternalHttpLoadBalancers().stream() + .map(GoogleLoadBalancerView::getName) + .collect(Collectors.toList())); + + ilbNames.forEach( + ilbName -> { + if (!existingRegionalLbs.contains(ilbName)) { + existingRegionalLbs.add(ilbName); + } + }); + instanceMetadata.put(REGIONAL_LOAD_BALANCER_NAMES, String.join(",", existingRegionalLbs)); + + List internalHttpLbBackendServices = + lbInfo.getInternalHttpLoadBalancers().stream() + .map(lb -> (GoogleInternalHttpLoadBalancer.InternalHttpLbView) lb) + .map(Utils::getBackendServicesFromInternalHttpLoadBalancerView) + .flatMap(Collection::stream) + .map(GoogleBackendService::getName) + .collect(Collectors.toList()); + + ilbServices.forEach( + backendServiceName -> { + try { + BackendService backendService = + getRegionBackendServiceFromProvider( + description.getCredentials(), region, serverGroupName); + Backend backendToAdd; + if (internalHttpLbBackendServices.contains(backendServiceName)) { + backendToAdd = GCEUtil.backendFromLoadBalancingPolicy(policy); + } else { + backendToAdd = new Backend(); + } + if (description.getRegional()) { + backendToAdd.setGroup( + GCEUtil.buildRegionalServerGroupUrl( + description.getCredentials().getProject(), region, serverGroupName)); + } else { + backendToAdd.setGroup( + GCEUtil.buildZonalServerGroupUrl( + description.getCredentials().getProject(), + description.getZone(), + serverGroupName)); + } + + if (backendService.getBackends() == null) { + backendService.setBackends(new ArrayList<>()); + } + backendService.getBackends().add(backendToAdd); + regionBackendServicesToUpdate.add(backendService); + } catch (IOException e) { + log.error(e.getMessage()); + } + }); + return regionBackendServicesToUpdate; + } + return Collections.emptyList(); + } + + protected void addUserDataToInstanceMetadata( + BasicGoogleDeployDescription description, + String serverGroupName, + String instanceTemplateName, + Task task) { + Map userDataMap = getUserData(description, serverGroupName, instanceTemplateName, task); + Map instanceMetadata = description.getInstanceMetadata(); + if (!instanceMetadata.isEmpty()) { + instanceMetadata.putAll(userDataMap); + } else { + instanceMetadata = userDataMap; + } + description.setInstanceMetadata(instanceMetadata); + } + + protected Map getUserData( + BasicGoogleDeployDescription description, + String serverGroupName, + String instanceTemplateName, + Task task) { + String customUserData = + StringUtils.isNotBlank(description.getUserData()) ? description.getUserData() : ""; + Map userData = + googleUserDataProvider.getUserData( + serverGroupName, + instanceTemplateName, + description, + description.getCredentials(), + customUserData); + task.updateStatus(BASE_PHASE, "Resolved user data."); + return userData; + } + + protected void addSelectZonesToInstanceMetadata(BasicGoogleDeployDescription description) { + if (description.getRegional() && description.getSelectZones()) { + Map instanceMetadata = description.getInstanceMetadata(); + instanceMetadata.put(SELECT_ZONES, "true"); + description.setInstanceMetadata(instanceMetadata); + } + } + + protected Metadata buildMetadataFromInstanceMetadata(BasicGoogleDeployDescription description) { + return GCEUtil.buildMetadataFromMap(description.getInstanceMetadata()); + } + + protected Tags buildTagsFromInput(BasicGoogleDeployDescription description) { + return GCEUtil.buildTagsFromList(description.getTags()); + } + + protected List buildServiceAccountFromInput( + BasicGoogleDeployDescription description) { + if (!description.getAuthScopes().isEmpty() + && StringUtils.isBlank(description.getServiceAccountEmail())) { + description.setServiceAccountEmail("default"); + } + + return GCEUtil.buildServiceAccount( + description.getServiceAccountEmail(), description.getAuthScopes()); + } + + protected Scheduling buildSchedulingFromInput(BasicGoogleDeployDescription description) { + return GCEUtil.buildScheduling(description); + } + + protected Map buildLabelsFromInput( + BasicGoogleDeployDescription description, String serverGroupName, String region) { + Map labels = description.getLabels(); + if (labels == null) { + labels = new HashMap<>(); + } + + // Used to group instances when querying for metrics from kayenta. + labels.put("spinnaker-region", region); + labels.put("spinnaker-server-group", serverGroupName); + return labels; + } + + private void setupMonikerForOperation( + BasicGoogleDeployDescription description, String serverGroupName, String clusterName) { + Namer namer = + NamerRegistry.lookup() + .withProvider(GoogleCloudProvider.getID()) + .withAccount(description.getAccountName()) + .withResource(GoogleLabeledResource.class); + + Integer sequence = Names.parseName(serverGroupName).getSequence(); + + Moniker moniker = + Moniker.builder() + .app(description.getApplication()) + .cluster(clusterName) + .detail(description.getFreeFormDetails()) + .stack(description.getStack()) + .sequence(sequence) + .build(); + + // Apply moniker to labels which are subsequently recorded in the instance template. + GoogleInstanceTemplate googleInstanceTemplate = new GoogleInstanceTemplate(); + googleInstanceTemplate.labels = description.getLabels(); + namer.applyMoniker(googleInstanceTemplate, moniker); + } + + protected void validateAcceleratorConfig(BasicGoogleDeployDescription description) { + // Accelerators are supported for zonal server groups only. + if (description.getAcceleratorConfigs() != null + && !description.getAcceleratorConfigs().isEmpty() + && (!description.getRegional() || description.getSelectZones())) { + throw new IllegalArgumentException( + "Accelerators are only supported with regional server groups if the zones are specified by the user."); + } + } + + protected InstanceProperties buildInstancePropertiesFromInput( + BasicGoogleDeployDescription description, + String machineTypeName, + List attachedDisks, + NetworkInterface networkInterface, + Metadata metadata, + Tags tags, + List serviceAccounts, + Scheduling scheduling, + Map labels) { + return new InstanceProperties() + .setMachineType(machineTypeName) + .setDisks(attachedDisks) + .setGuestAccelerators( + description.getAcceleratorConfigs() != null + && !description.getAcceleratorConfigs().isEmpty() + ? description.getAcceleratorConfigs() + : Collections.emptyList()) + .setNetworkInterfaces(List.of(networkInterface)) + .setCanIpForward(description.getCanIpForward()) + .setMetadata(metadata) + .setTags(tags) + .setLabels(labels) + .setScheduling(scheduling) + .setServiceAccounts(serviceAccounts) + .setResourceManagerTags(description.getResourceManagerTags()) + .setPartnerMetadata(description.getPartnerMetadata()); + } + + protected void addShieldedVmConfigToInstanceProperties( + BasicGoogleDeployDescription description, + InstanceProperties instanceProperties, + Image bootImage) { + if (GCEUtil.isShieldedVmCompatible(bootImage)) { + instanceProperties.setShieldedVmConfig(GCEUtil.buildShieldedVmConfig(description)); + } + } + + protected void addMinCpuPlatformToInstanceProperties( + BasicGoogleDeployDescription description, InstanceProperties instanceProperties) { + if (StringUtils.isNotBlank(description.getMinCpuPlatform())) { + instanceProperties.setMinCpuPlatform(description.getMinCpuPlatform()); + } + } + + protected InstanceTemplate buildInstanceTemplate(String name, InstanceProperties properties) { + return new InstanceTemplate().setName(name).setProperties(properties); + } + + protected void setCapacityFromInput(BasicGoogleDeployDescription description) { + if (description.getCapacity() != null) { + description.setTargetSize(description.getCapacity().getDesired()); + } + } + + protected void setAutoscalerCapacityFromInput(BasicGoogleDeployDescription description) { + if (autoscalerIsSpecified(description)) { + if (description.getCapacity() != null) { + description.getAutoscalingPolicy().setMinNumReplicas(description.getCapacity().getMin()); + description.getAutoscalingPolicy().setMaxNumReplicas(description.getCapacity().getMax()); + } + GCEUtil.calibrateTargetSizeWithAutoscaler(description); + } + } + + protected boolean autoscalerIsSpecified(BasicGoogleDeployDescription description) { + return description.getAutoscalingPolicy() != null + && (description.getAutoscalingPolicy().getCpuUtilization() != null + || description.getAutoscalingPolicy().getLoadBalancingUtilization() != null + || description.getAutoscalingPolicy().getCustomMetricUtilizations() != null + || description.getAutoscalingPolicy().getScalingSchedules() != null); + } + + protected void setCapacityFromSource(BasicGoogleDeployDescription description, Task task) { + BasicGoogleDeployDescription.Source source = description.getSource(); + if (source != null + && source.getUseSourceCapacity() != null + && source.getUseSourceCapacity() + && StringUtils.isNotBlank(source.getRegion()) + && StringUtils.isNotBlank(source.getServerGroupName())) { + task.updateStatus( + BASE_PHASE, + String.format( + "Looking up server group %s in %s in order to copy the current capacity...", + source.getServerGroupName(), source.getRegion())); + + // Locate the ancestor server group. + GoogleServerGroup.View ancestorServerGroup = + GCEUtil.queryServerGroup( + googleClusterProvider, + description.getAccountName(), + source.getRegion(), + source.getServerGroupName()); + description.setTargetSize(ancestorServerGroup.getCapacity().getDesired()); + description.setAutoscalingPolicy(ancestorServerGroup.getAutoscalingPolicy()); + } + } + + protected List buildAutoHealingPolicyFromInput( + BasicGoogleDeployDescription description, Task task) { + GoogleHealthCheck autoHealingHealthCheck = null; + if (description.getAutoHealingPolicy() != null + && StringUtils.isNotBlank(description.getAutoHealingPolicy().getHealthCheck())) { + autoHealingHealthCheck = + (GoogleHealthCheck) + GCEUtil.queryHealthCheck( + description.getCredentials().getProject(), + description.getAccountName(), + description.getAutoHealingPolicy().getHealthCheck(), + description.getAutoHealingPolicy().getHealthCheckKind(), + description.getCredentials().getCompute(), + cacheView, + task, + BASE_PHASE, + this); + } + List autoHealingPolicy = null; + if (autoHealingHealthCheck != null) { + InstanceGroupManagerAutoHealingPolicy policy = new InstanceGroupManagerAutoHealingPolicy(); + policy.setHealthCheck(autoHealingHealthCheck.getSelfLink()); + policy.setInitialDelaySec(description.getAutoHealingPolicy().getInitialDelaySec()); + autoHealingPolicy = List.of(policy); + } + + if (autoHealingPolicy != null + && description.getAutoHealingPolicy().getMaxUnavailable() != null) { + FixedOrPercent maxUnavailable = new FixedOrPercent(); + maxUnavailable.setFixed( + description.getAutoHealingPolicy().getMaxUnavailable().getFixed().intValue()); + maxUnavailable.setPercent( + description.getAutoHealingPolicy().getMaxUnavailable().getPercent().intValue()); + autoHealingPolicy.get(0).set("maxUnavailable", maxUnavailable); + } + return autoHealingPolicy; + } + + protected InstanceGroupManager buildInstanceGroupFromInput( + BasicGoogleDeployDescription description, + String serverGroupName, + String instanceTemplateUrl, + List targetPools, + List autoHealingPolicy) { + return new InstanceGroupManager() + .setName(serverGroupName) + .setBaseInstanceName(serverGroupName) + .setInstanceTemplate(instanceTemplateUrl) + .setTargetSize(description.getTargetSize()) + .setTargetPools(targetPools) + .setAutoHealingPolicies(autoHealingPolicy); + } + + protected void setNamedPortsToInstanceGroup( + BasicGoogleDeployDescription description, + LoadBalancerInfo lbInfo, + InstanceGroupManager instanceGroupManager) { + if ((hasBackedServiceFromInput(description, lbInfo) + || !lbInfo.internalHttpLoadBalancers.isEmpty()) + && (description.getLoadBalancingPolicy() != null + || (description.getSource() != null + && StringUtils.isNotBlank(description.getSource().getServerGroupName())))) { + List namedPorts = new ArrayList<>(); + String sourceGroupName = description.getSource().getServerGroupName(); + + // Note: this favors the explicitly specified load balancing policy over the source server + // group. + if (StringUtils.isNotBlank(sourceGroupName) && description.getLoadBalancingPolicy() == null) { + GoogleServerGroup.View sourceServerGroup = + googleClusterProvider.getServerGroup( + description.getAccountName(), description.getSource().getRegion(), sourceGroupName); + if (sourceServerGroup == null) { + log.warn( + String.format( + "Could not locate source server group %s to update named port.", + sourceGroupName)); + } else { + namedPorts = + sourceServerGroup.getNamedPorts().entrySet().stream() + .map(entry -> new NamedPort().setName(entry.getKey()).setPort(entry.getValue())) + .collect(Collectors.toList()); + } + } else { + if (description.getLoadBalancingPolicy().getNamedPorts() != null) { + namedPorts = description.getLoadBalancingPolicy().getNamedPorts(); + } else if (description.getLoadBalancingPolicy().getListeningPort() != null) { + log.warn( + "Deriving named ports from deprecated 'listeningPort' attribute. Please update your deploy description to use 'namedPorts'."); + namedPorts.add( + new NamedPort() + .setName(GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME) + .setPort(description.getLoadBalancingPolicy().getListeningPort())); + } + } + + if (namedPorts.isEmpty()) { + log.warn( + "Could not locate named port on either load balancing policy or source server group. Setting default named port."); + namedPorts.add( + new NamedPort() + .setName(GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME) + .setPort(GoogleHttpLoadBalancingPolicy.getHTTP_DEFAULT_PORT())); + } + instanceGroupManager.setNamedPorts(namedPorts); + } + } + + protected void createInstanceGroupManagerFromInput( + BasicGoogleDeployDescription description, + InstanceGroupManager instanceGroupManager, + LoadBalancerInfo lbInfo, + String serverGroupName, + String region, + Task task) + throws IOException { + if (description.getRegional()) { + setDistributionPolicyToInstanceGroup(description, instanceGroupManager); + String targetLink = + createRegionalInstanceGroupManagerAndWait( + description, lbInfo, serverGroupName, region, instanceGroupManager, task); + createRegionalAutoscaler(description, serverGroupName, targetLink, region, task); + } else { + String targetLink = + createInstanceGroupManagerAndWait( + description, lbInfo, serverGroupName, instanceGroupManager, task); + createAutoscaler(description, serverGroupName, targetLink, task); + } + } + + protected void setDistributionPolicyToInstanceGroup( + BasicGoogleDeployDescription description, InstanceGroupManager instanceGroupManager) { + if (description.getDistributionPolicy() != null) { + DistributionPolicy distributionPolicy = new DistributionPolicy(); + + if (description.getSelectZones() + && !description.getDistributionPolicy().getZones().isEmpty()) { + log.info( + String.format( + "Configuring explicit zones selected for regional server group: %s", + String.join(", ", description.getDistributionPolicy().getZones()))); + List selectedZones = + description.getDistributionPolicy().getZones().stream() + .map( + it -> + new DistributionPolicyZoneConfiguration() + .setZone( + GCEUtil.buildZoneUrl( + description.getCredentials().getProject(), it))) + .collect(Collectors.toList()); + distributionPolicy.setZones(selectedZones); + } + + if (StringUtils.isNotBlank(description.getDistributionPolicy().getTargetShape())) { + distributionPolicy.setTargetShape(description.getDistributionPolicy().getTargetShape()); + } + + if (!distributionPolicy.getZones().isEmpty() + || StringUtils.isNotBlank(distributionPolicy.getTargetShape())) { + instanceGroupManager.setDistributionPolicy(distributionPolicy); + } + } + } + + private void updateBackendServices( + BasicGoogleDeployDescription description, + LoadBalancerInfo lbInfo, + String serverGroupName, + List backendServicesToUpdate, + Task task) { + if (!description.getDisableTraffic() && hasBackedServiceFromInput(description, lbInfo)) { + backendServicesToUpdate.forEach( + backendService -> { + safeRetry.doRetry( + updateBackendServices( + description.getCredentials().getCompute(), + description.getCredentials().getProject(), + backendService.getName(), + backendService), + "Load balancer backend service", + task, + List.of(400, 412), + Collections.emptyList(), + Map.of( + "action", + "update", + "phase", + BASE_PHASE, + "operation", + "updateBackendServices", + TAG_SCOPE, + SCOPE_GLOBAL), + registry); + task.updateStatus( + BASE_PHASE, + String.format( + "Done associating server group %s with backend service %s.", + serverGroupName, backendService.getName())); + }); + } + } + + private void updateRegionalBackendServices( + BasicGoogleDeployDescription description, + LoadBalancerInfo lbInfo, + String serverGroupName, + String region, + List regionBackendServicesToUpdate, + Task task) { + if (!description.getDisableTraffic() + && (!lbInfo.internalLoadBalancers.isEmpty() + || !lbInfo.internalHttpLoadBalancers.isEmpty())) { + regionBackendServicesToUpdate.forEach( + backendService -> { + safeRetry.doRetry( + updateBackendServices( + description.getCredentials().getCompute(), + description.getCredentials().getProject(), + backendService.getName(), + backendService, + region), + "Internal load balancer backend service", + task, + List.of(400, 412), + Collections.emptyList(), + Map.of( + "action", + "update", + "phase", + BASE_PHASE, + "operation", + "updateRegionBackendServices", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + registry); + task.updateStatus( + BASE_PHASE, + String.format( + "Done associating server group %s with backend service %s.", + serverGroupName, backendService.getName())); + }); + } + } + + protected BackendService getBackendServiceFromProvider( + GoogleNamedAccountCredentials credentials, String backendServiceName) throws IOException { + return timeExecute( + credentials + .getCompute() + .backendServices() + .get(credentials.getProject(), backendServiceName), + "compute.backendServices.get", + TAG_SCOPE, + SCOPE_GLOBAL); + } + + protected BackendService getRegionBackendServiceFromProvider( + GoogleNamedAccountCredentials credentials, String region, String backendServiceName) + throws IOException { + return timeExecute( + credentials + .getCompute() + .regionBackendServices() + .get(credentials.getProject(), region, backendServiceName), + "compute.regionBackendServices.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } + + private String createInstanceTemplateAndWait( + GoogleNamedAccountCredentials credentials, InstanceTemplate template, Task task) + throws IOException { + Operation instanceTemplateCreateOperation = + timeExecute( + credentials.getCompute().instanceTemplates().insert(credentials.getProject(), template), + "compute.instanceTemplates.insert", + TAG_SCOPE, + SCOPE_GLOBAL); + String instanceTemplateUrl = instanceTemplateCreateOperation.getTargetLink(); + + // Before building the managed instance group we must check and wait until the instance template + // is built. + googleOperationPoller.waitForGlobalOperation( + credentials.getCompute(), + credentials.getProject(), + instanceTemplateCreateOperation.getName(), + null, + task, + "instance template " + GCEUtil.getLocalName(instanceTemplateUrl), + BASE_PHASE); + return instanceTemplateUrl; + } + + protected String createRegionalInstanceGroupManagerAndWait( + BasicGoogleDeployDescription description, + LoadBalancerInfo lbInfo, + String serverGroupName, + String region, + InstanceGroupManager instanceGroupManager, + Task task) + throws IOException { + Operation migCreateOperation = + timeExecute( + description + .getCredentials() + .getCompute() + .regionInstanceGroupManagers() + .insert(description.getCredentials().getProject(), region, instanceGroupManager), + "compute.regionInstanceGroupManagers.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + + if ((!description.getDisableTraffic() && hasBackedServiceFromInput(description, lbInfo)) + || autoscalerIsSpecified(description) + || (!description.getDisableTraffic() + && (!lbInfo.internalLoadBalancers.isEmpty() + || !lbInfo.internalHttpLoadBalancers.isEmpty()))) { + // Before updating the Backend Services or creating the Autoscaler we must wait until the + // managed instance group is created. + googleOperationPoller.waitForRegionalOperation( + description.getCredentials().getCompute(), + description.getCredentials().getProject(), + region, + migCreateOperation.getName(), + null, + task, + String.format("managed instance group %s", serverGroupName), + BASE_PHASE); + } + return migCreateOperation.getTargetLink(); + } + + protected void createRegionalAutoscaler( + BasicGoogleDeployDescription description, + String serverGroupName, + String targetLink, + String region, + Task task) + throws IOException { + if (autoscalerIsSpecified(description)) { + task.updateStatus( + BASE_PHASE, String.format("Creating regional autoscaler for %s...", serverGroupName)); + + Autoscaler autoscaler = + GCEUtil.buildAutoscaler(serverGroupName, targetLink, description.getAutoscalingPolicy()); + + timeExecute( + description + .getCredentials() + .getCompute() + .regionAutoscalers() + .insert(description.getCredentials().getProject(), region, autoscaler), + "compute.regionAutoscalers.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } + } + + protected String createInstanceGroupManagerAndWait( + BasicGoogleDeployDescription description, + LoadBalancerInfo lbInfo, + String serverGroupName, + InstanceGroupManager instanceGroupManager, + Task task) + throws IOException { + Operation createOperation = + timeExecute( + description + .getCredentials() + .getCompute() + .instanceGroupManagers() + .insert( + description.getCredentials().getProject(), + description.getZone(), + instanceGroupManager), + "compute.instanceGroupManagers.insert", + TAG_SCOPE, + SCOPE_ZONAL, + TAG_ZONE, + description.getZone()); + + if ((!description.getDisableTraffic() && hasBackedServiceFromInput(description, lbInfo)) + || autoscalerIsSpecified(description) + || (!description.getDisableTraffic() + && (!lbInfo.internalLoadBalancers.isEmpty() + || !lbInfo.internalHttpLoadBalancers.isEmpty()))) { + // Before updating the Backend Services or creating the Autoscaler we must wait until the + // managed instance group is created. + googleOperationPoller.waitForZonalOperation( + description.getCredentials().getCompute(), + description.getCredentials().getProject(), + description.getZone(), + createOperation.getName(), + null, + task, + String.format("managed instance group %s", serverGroupName), + BASE_PHASE); + } + return createOperation.getTargetLink(); + } + + protected void createAutoscaler( + BasicGoogleDeployDescription description, + String serverGroupName, + String targetLink, + Task task) + throws IOException { + if (autoscalerIsSpecified(description)) { + task.updateStatus( + BASE_PHASE, String.format("Creating zonal autoscaler for %s...", serverGroupName)); + + Autoscaler autoscaler = + GCEUtil.buildAutoscaler(serverGroupName, targetLink, description.getAutoscalingPolicy()); + + timeExecute( + description + .getCredentials() + .getCompute() + .autoscalers() + .insert(description.getCredentials().getProject(), description.getZone(), autoscaler), + "compute.autoscalers.insert", + TAG_SCOPE, + SCOPE_ZONAL, + TAG_ZONE, + description.getZone()); + } + } + + private Closure updateBackendServices( + Compute compute, String project, String backendServiceName, BackendService backendService) { + return new Closure<>(this, this) { + @Override + public Object call() { + BackendService serviceToUpdate = null; + try { + serviceToUpdate = + timeExecute( + compute.backendServices().get(project, backendServiceName), + "compute.backendServices.get", + TAG_SCOPE, + SCOPE_GLOBAL); + } catch (IOException e) { + log.error(e.getMessage()); + } + if (serviceToUpdate.getBackends() == null) { + serviceToUpdate.setBackends(new ArrayList<>()); + } + BackendService finalServiceToUpdate = serviceToUpdate; + backendService.getBackends().forEach(it -> finalServiceToUpdate.getBackends().add(it)); + Set seenGroup = new HashSet<>(); + serviceToUpdate.getBackends().stream() + .filter(backend -> seenGroup.add(backend.getGroup())) + .collect(Collectors.toList()); + try { + timeExecute( + compute.backendServices().update(project, backendServiceName, serviceToUpdate), + "compute.backendServices.update", + TAG_SCOPE, + SCOPE_GLOBAL); + } catch (IOException e) { + log.error(e.getMessage()); + } + return null; + } + }; + } + + private Closure updateBackendServices( + Compute compute, + String project, + String backendServiceName, + BackendService backendService, + String region) { + return new Closure<>(this, this) { + @Override + public Object call() { + BackendService serviceToUpdate = null; + try { + serviceToUpdate = + timeExecute( + compute.backendServices().get(project, backendServiceName), + "compute.regionBackendServices.get", + TAG_SCOPE, + SCOPE_GLOBAL); + } catch (IOException e) { + log.error(e.getMessage()); + } + if (serviceToUpdate.getBackends() == null) { + serviceToUpdate.setBackends(new ArrayList<>()); + } + BackendService finalServiceToUpdate = serviceToUpdate; + backendService.getBackends().forEach(it -> finalServiceToUpdate.getBackends().add(it)); + Set seenGroup = new HashSet<>(); + serviceToUpdate.getBackends().stream() + .filter(backend -> seenGroup.add(backend.getGroup())) + .collect(Collectors.toList()); + try { + timeExecute( + compute.backendServices().update(project, backendServiceName, serviceToUpdate), + "compute.regionBackendServices.update", + TAG_SCOPE, + SCOPE_GLOBAL); + } catch (IOException e) { + log.error(e.getMessage()); + } + return null; + } + }; + } + + @Override + public boolean handles(DeployDescription description) { + return description instanceof BasicGoogleDeployDescription; + } + + @Override + public Registry getRegistry() { + return registry; + } + + @Data + class LoadBalancerInfo { + List targetPools = new ArrayList<>(); + List internalLoadBalancers = new ArrayList<>(); + List internalHttpLoadBalancers = new ArrayList<>(); + List sslLoadBalancers = new ArrayList<>(); + List tcpLoadBalancers = new ArrayList<>(); + } + + static class GoogleInstanceTemplate implements GoogleLabeledResource { + Map labels; + + @Override + public Map getLabels() { + return labels; + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/SetStatefulDiskAtomicOperation.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/SetStatefulDiskAtomicOperation.java new file mode 100644 index 00000000000..db05aaca46f --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/SetStatefulDiskAtomicOperation.java @@ -0,0 +1,125 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.ops; + +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.StatefulPolicy; +import com.google.api.services.compute.model.StatefulPolicyPreservedState; +import com.google.api.services.compute.model.StatefulPolicyPreservedStateDiskDevice; +import com.google.common.annotations.VisibleForTesting; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleServerGroupManagers; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.description.SetStatefulDiskDescription; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.HashMap; +import java.util.List; + +public class SetStatefulDiskAtomicOperation extends GoogleAtomicOperation { + + private static final String BASE_PHASE = "SET_STATEFUL_DISK"; + + private final GoogleClusterProvider clusterProvider; + private final GoogleComputeApiFactory computeApiFactory; + private final SetStatefulDiskDescription description; + + public SetStatefulDiskAtomicOperation( + GoogleClusterProvider clusterProvider, + GoogleComputeApiFactory computeApiFactory, + SetStatefulDiskDescription description) { + this.clusterProvider = clusterProvider; + this.computeApiFactory = computeApiFactory; + this.description = description; + } + + /* + curl -X POST -H "Content-Type: application/json" -d ' + [ { "setStatefulDisk": { + "serverGroupName": "myapp-dev-v000", + "region": "us-east1", + "device-name": "myapp-dev-v000-1", + "credentials": "my-account-name" + } } ]' localhost:7002/gce/ops + */ + @Override + public Void operate(List priorOutputs) { + + Task task = TaskRepository.threadLocalTask.get(); + + task.updateStatus( + BASE_PHASE, + String.format( + "Initializing set stateful disk of instance group %s in region %s", + description.getServerGroupName(), description.getRegion())); + + GoogleServerGroup.View serverGroup = + GCEUtil.queryServerGroup( + clusterProvider, + description.getAccount(), + description.getRegion(), + description.getServerGroupName()); + + try { + GoogleServerGroupManagers managers = + computeApiFactory.createServerGroupManagers(description.getCredentials(), serverGroup); + + task.updateStatus(BASE_PHASE, "Retrieving current instance group definition"); + + InstanceGroupManager instanceGroupManager = managers.get().execute(); + + setStatefulPolicy(instanceGroupManager); + + task.updateStatus(BASE_PHASE, "Storing updated instance group definition"); + + managers + .update(instanceGroupManager) + .executeAndWait(TaskRepository.threadLocalTask.get(), BASE_PHASE); + + return null; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private void setStatefulPolicy(InstanceGroupManager instanceGroupManager) { + + if (instanceGroupManager.getStatefulPolicy() == null) { + instanceGroupManager.setStatefulPolicy(new StatefulPolicy()); + } + StatefulPolicy statefulPolicy = instanceGroupManager.getStatefulPolicy(); + if (statefulPolicy.getPreservedState() == null) { + statefulPolicy.setPreservedState(new StatefulPolicyPreservedState()); + } + StatefulPolicyPreservedState preservedState = statefulPolicy.getPreservedState(); + if (preservedState.getDisks() == null) { + preservedState.setDisks(new HashMap<>()); + } + preservedState + .getDisks() + .put(description.getDeviceName(), new StatefulPolicyPreservedStateDiskDevice()); + } + + @VisibleForTesting + public SetStatefulDiskDescription getDescription() { + return description; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/StatefullyUpdateBootImageAtomicOperation.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/StatefullyUpdateBootImageAtomicOperation.java new file mode 100644 index 00000000000..9e2476cb742 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/StatefullyUpdateBootImageAtomicOperation.java @@ -0,0 +1,196 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.ops; + +import static com.google.common.base.Preconditions.checkState; +import static com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceIllegalStateException.checkResourceState; +import static java.util.stream.Collectors.toList; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.Compute.InstanceTemplates.Get; +import com.google.api.services.compute.model.AttachedDisk; +import com.google.api.services.compute.model.Image; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.InstanceGroupManagerUpdatePolicy; +import com.google.api.services.compute.model.InstanceTemplate; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.google.compute.GetFirstBatchComputeRequest; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeGetRequest; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeRequest; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleServerGroupManagers; +import com.netflix.spinnaker.clouddriver.google.compute.Images; +import com.netflix.spinnaker.clouddriver.google.compute.InstanceTemplates; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.description.StatefullyUpdateBootImageDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceIllegalStateException; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.Optional; +import java.util.Random; +import javax.annotation.Nonnull; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class StatefullyUpdateBootImageAtomicOperation extends GoogleAtomicOperation { + + private static final String BASE_PHASE = "STATEFULLY_UPDATE_BOOT_IMAGE"; + + private static final Random RANDOM = new Random(); + + private final GoogleClusterProvider clusterProvider; + private final GoogleComputeApiFactory computeApiFactory; + private final GoogleConfigurationProperties googleConfigurationProperties; + private final StatefullyUpdateBootImageDescription description; + + public StatefullyUpdateBootImageAtomicOperation( + GoogleClusterProvider clusterProvider, + GoogleComputeApiFactory computeApiFactory, + GoogleConfigurationProperties googleConfigurationProperties, + StatefullyUpdateBootImageDescription description) { + this.clusterProvider = clusterProvider; + this.computeApiFactory = computeApiFactory; + this.googleConfigurationProperties = googleConfigurationProperties; + this.description = description; + } + + /* + curl -X POST -H "Content-Type: application/json" -d ' + [ { "restartWithNewBootImage": { + "serverGroupName": "myapp-dev-v000", + "region": "us-east1", + "bootImage": "centos-7-v20190423", + "credentials": "my-account-name" + } } ]' localhost:7002/gce/ops + */ + @Override + public Void operate(List priorOutputs) { + + Task task = TaskRepository.threadLocalTask.get(); + + GoogleNamedAccountCredentials credentials = description.getCredentials(); + + GoogleServerGroup.View serverGroup = + GCEUtil.queryServerGroup( + clusterProvider, + description.getAccount(), + description.getRegion(), + description.getServerGroupName()); + + try { + + Image image = getImage(task, credentials); + + GoogleServerGroupManagers managers = + computeApiFactory.createServerGroupManagers(credentials, serverGroup); + + task.updateStatus( + BASE_PHASE, String.format("Retrieving server group %s.", serverGroup.getName())); + InstanceGroupManager instanceGroupManager = managers.get().execute(); + checkResourceState( + instanceGroupManager.getVersions().size() == 1, + "Found more than one instance template for the server group %s.", + description.getServerGroupName()); + checkResourceState( + instanceGroupManager.getStatefulPolicy() != null, + "Server group %s does not have a StatefulPolicy", + description.getServerGroupName()); + + String oldTemplateName = GCEUtil.getLocalName(instanceGroupManager.getInstanceTemplate()); + InstanceTemplates instanceTemplates = computeApiFactory.createInstanceTemplates(credentials); + + task.updateStatus( + BASE_PHASE, String.format("Retrieving instance template %s.", oldTemplateName)); + GoogleComputeRequest request = instanceTemplates.get(oldTemplateName); + InstanceTemplate template = request.execute(); + + String newTemplateName = getNewTemplateName(description.getServerGroupName()); + template.setName(newTemplateName); + List disks = + template.getProperties().getDisks().stream() + .filter(AttachedDisk::getBoot) + .collect(toList()); + checkState(disks.size() == 1, "Expected exactly one boot disk, found %s", disks.size()); + AttachedDisk bootDisk = disks.get(0); + bootDisk.getInitializeParams().setSourceImage(image.getSelfLink()); + + task.updateStatus( + BASE_PHASE, String.format("Saving new instance template %s.", newTemplateName)); + instanceTemplates.insert(template).executeAndWait(task, BASE_PHASE); + + instanceGroupManager + .setInstanceTemplate( + GCEUtil.buildInstanceTemplateUrl(credentials.getProject(), newTemplateName)) + .setVersions(ImmutableList.of()) + .setUpdatePolicy(new InstanceGroupManagerUpdatePolicy().setType("OPPORTUNISTIC")); + + task.updateStatus( + BASE_PHASE, String.format("Starting update of server group %s.", serverGroup.getName())); + managers.patch(instanceGroupManager).executeAndWait(task, BASE_PHASE); + + task.updateStatus( + BASE_PHASE, String.format("Deleting instance template %s.", oldTemplateName)); + instanceTemplates.delete(oldTemplateName).executeAndWait(task, BASE_PHASE); + + return null; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Nonnull + private Image getImage(Task task, GoogleNamedAccountCredentials credentials) throws IOException { + + task.updateStatus(BASE_PHASE, "Looking up image " + description.getBootImage()); + + Images imagesApi = computeApiFactory.createImages(credentials); + + GetFirstBatchComputeRequest batchRequest = + GetFirstBatchComputeRequest.create(computeApiFactory.createBatchRequest(credentials)); + for (String project : getImageProjects(credentials)) { + GoogleComputeGetRequest request = + imagesApi.get(project, description.getBootImage()); + batchRequest.queue(request); + } + Optional image = batchRequest.execute("findImage"); + + return image.orElseThrow( + () -> + new GoogleResourceIllegalStateException( + "Couldn't find an image named " + description.getBootImage())); + } + + private ImmutableSet getImageProjects(GoogleNamedAccountCredentials credentials) { + return ImmutableSet.builder() + .add(credentials.getProject()) + .addAll(credentials.getImageProjects()) + .addAll(googleConfigurationProperties.getBaseImageProjects()) + .build(); + } + + private static String getNewTemplateName(String serverGroupName) { + return String.format("%s-%08d", serverGroupName, RANDOM.nextInt(100000000)); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperation.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..742617cf5c5 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperation.java @@ -0,0 +1,497 @@ +package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer; + +import static java.lang.String.format; + +import com.google.api.client.json.GenericJson; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.*; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleApiException; +import groovy.lang.Closure; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.*; +import org.springframework.beans.factory.annotation.Autowired; + +public class DeleteGoogleInternalHttpLoadBalancerAtomicOperation + extends DeleteGoogleLoadBalancerAtomicOperation { + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private static void addServicesFromPathMatchers( + List backendServiceUrls, List pathMatchers) { + if (pathMatchers == null) return; + for (PathMatcher pathMatcher : pathMatchers) { + backendServiceUrls.add(pathMatcher.getDefaultService()); + for (PathRule pathRule : pathMatcher.getPathRules()) { + backendServiceUrls.add(pathRule.getService()); + } + } + } + + private static final String BASE_PHASE = "DELETE_INTERNAL_HTTP_LOAD_BALANCER"; + @Autowired private SafeRetry safeRetry; + @Autowired private GoogleOperationPoller googleOperationPoller; + private DeleteGoogleLoadBalancerDescription description; + + public DeleteGoogleInternalHttpLoadBalancerAtomicOperation( + DeleteGoogleLoadBalancerDescription description) { + this.description = description; + } + + /** + * curl -X POST -H "Content-Type: application/json" -d '[ { "deleteLoadBalancer": { "credentials": + * "my-account-name", "loadBalancerName": "spin-lb", "deleteHealthChecks": false, + * "loadBalancerType": "HTTP"}} ]' localhost:7002/gce/ops + */ + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + BASE_PHASE, + format( + "Initializing deletion of Internal HTTP load balancer %s...", + description.getLoadBalancerName())); + + if (description.getCredentials() == null) { + throw new IllegalArgumentException( + format( + "Unable to resolve credentials for Google account '%s'.", + description.getAccountName())); + } + + Compute compute = description.getCredentials().getCompute(); + String project = description.getCredentials().getProject(); + String region = description.getRegion(); + String forwardingRuleName = description.getLoadBalancerName(); + + // First we look everything up. Then, we call delete on all of it. Finally, we wait (with + // timeout) for all to complete. + // Start with the forwarding rule. + getTask() + .updateStatus( + BASE_PHASE, + "Retrieving forwarding rule " + forwardingRuleName + " in " + region + "..."); + + List projectForwardingRules = null; + try { + projectForwardingRules = + timeExecute( + compute.forwardingRules().list(project, region), + "compute.forwardingRules.list", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region) + .getItems(); + + ForwardingRule forwardingRule = + projectForwardingRules.stream() + .filter(f -> f.getName().equals(forwardingRuleName)) + .findFirst() + .orElse(null); + if (forwardingRule == null) { + GCEUtil.updateStatusAndThrowNotFoundException( + "Forwarding rule " + forwardingRuleName + " not found in " + region + " for " + project, + getTask(), + BASE_PHASE); + } + + String targetProxyName = GCEUtil.getLocalName(forwardingRule.getTarget()); + // Target HTTP(S) proxy. + getTask().updateStatus(BASE_PHASE, "Retrieving target proxy " + targetProxyName + "..."); + + GenericJson retrievedTargetProxy = + (GenericJson) + GCEUtil.getRegionTargetProxyFromRule( + compute, project, region, forwardingRule, BASE_PHASE, safeRetry, this); + + if (retrievedTargetProxy == null) { + GCEUtil.updateStatusAndThrowNotFoundException( + "Target proxy " + targetProxyName + " not found for " + project + " in " + region, + getTask(), + BASE_PHASE); + } + + final String urlMapName = GCEUtil.getLocalName((String) retrievedTargetProxy.get("urlMap")); + + final List listenersToDelete = new ArrayList(); + for (ForwardingRule rule : projectForwardingRules) { + if (!rule.getLoadBalancingScheme().equals("INTERNAL_MANAGED")) continue; + + try { + GenericJson proxy = + (GenericJson) + GCEUtil.getRegionTargetProxyFromRule( + compute, + project, + region, + rule, + BASE_PHASE, + getSafeRetry(), + DeleteGoogleInternalHttpLoadBalancerAtomicOperation.this); + if (GCEUtil.getLocalName((proxy == null ? null : (String) proxy.get("urlMap"))) + .equals(urlMapName)) { + listenersToDelete.add(rule.getName()); + } + } catch (GoogleOperationException e) { + // 404 is thrown if the target proxy does not exist. + // We can ignore 404's here because we are iterating over all forwarding rules and some + // other process may have + // deleted the target proxy between the time we queried for the list of forwarding rules + // and now. + // Any other exception needs to be propagated. + if (!(e.getCause() instanceof GoogleApiException.NotFoundException)) { + throw e; + } + } + } + + // URL map. + getTask().updateStatus(BASE_PHASE, "Retrieving URL map " + urlMapName + "..."); + + // NOTE: This call is necessary because we cross-check backend services later. + UrlMapList mapList = + timeExecute( + compute.regionUrlMaps().list(project, region), + "compute.regionUrlMaps.list", + TAG_SCOPE, + SCOPE_REGIONAL); + List projectUrlMaps = mapList.getItems(); + UrlMap urlMap = + projectUrlMaps.stream() + .filter(u -> u.getName().equals(urlMapName)) + .findFirst() + .orElseThrow( + () -> new IllegalStateException(format("urlMap %s not found.", urlMapName))); + projectUrlMaps.removeIf(u -> u.getName().equals(urlMapName)); + + List backendServiceUrls = new ArrayList<>(); + backendServiceUrls.add(urlMap.getDefaultService()); + addServicesFromPathMatchers(backendServiceUrls, urlMap.getPathMatchers()); + backendServiceUrls = ImmutableSet.copyOf(backendServiceUrls).asList(); + + // Backend services. Also, get health check URLs. + Set healthCheckUrls = new HashSet<>(); + for (String backendServiceUrl : backendServiceUrls) { + final String backendServiceName = GCEUtil.getLocalName(backendServiceUrl); + getTask() + .updateStatus( + BASE_PHASE, + "Retrieving backend service " + backendServiceName + " in " + region + "..."); + + BackendService backendService = + safeRetry.doRetry( + new Closure(this, this) { + @Override + public BackendService call() { + try { + return timeExecute( + compute.regionBackendServices().get(project, region, backendServiceName), + "compute.regionBackendServices.get", + TAG_SCOPE, + SCOPE_REGIONAL); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "Region Backend service " + backendServiceName, + getTask(), + ImmutableList.of(400, 403, 412), + new ArrayList<>(), + ImmutableMap.of( + "action", + "get", + "phase", + BASE_PHASE, + "operation", + "compute.backendServices.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + getRegistry()); + + if (backendService == null) continue; + + if (backendService.getBackends() != null && backendService.getBackends().size() > 0) { + getTask() + .updateStatus( + BASE_PHASE, + "Server groups still associated with Internal Http(s) load balancer " + + description.getLoadBalancerName() + + ". Failing..."); + throw new IllegalStateException( + "Server groups still associated with Internal Http(s) load balancer: " + + description.getLoadBalancerName() + + "."); + } + + healthCheckUrls.addAll(backendService.getHealthChecks()); + } + + final Long timeoutSeconds = description.getDeleteOperationTimeoutSeconds(); + + for (String ruleName : listenersToDelete) { + getTask() + .updateStatus(BASE_PHASE, "Deleting listener " + ruleName + " in " + region + "..."); + + Operation operation = + GCEUtil.deleteRegionalListener( + compute, + project, + region, + ruleName, + BASE_PHASE, + getSafeRetry(), + DeleteGoogleInternalHttpLoadBalancerAtomicOperation.this); + + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + operation.getName(), + timeoutSeconds, + getTask(), + "listener " + ruleName, + BASE_PHASE); + } + + getTask() + .updateStatus(BASE_PHASE, "Deleting URL map " + urlMapName + " in " + region + "..."); + Operation deleteUrlMapOperation = + safeRetry.doRetry( + new Closure(this, this) { + @Override + public Operation call() { + try { + return timeExecute( + compute.regionUrlMaps().delete(project, region, urlMapName), + "compute.regionUrlMaps.delete", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "Url map " + urlMapName, + getTask(), + ImmutableList.of(400, 403, 412), + ImmutableList.of(404), + ImmutableMap.of( + "action", + "delete", + "phase", + BASE_PHASE, + "operation", + "compute.regionUrlMaps.delete", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + getRegistry()); + + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + deleteUrlMapOperation.getName(), + timeoutSeconds, + getTask(), + "Regional url map " + urlMapName, + BASE_PHASE); + + // We make a list of the delete operations for backend services. + List deleteBackendServiceAsyncOperations = + new ArrayList<>(); + for (String backendServiceUrl : backendServiceUrls) { + final String backendServiceName = GCEUtil.getLocalName(backendServiceUrl); + Operation deleteBackendServiceOp = + GCEUtil.deleteIfNotInUse( + new Closure(this, this) { + @Override + public Operation call() { + try { + return timeExecute( + compute + .regionBackendServices() + .delete(project, region, backendServiceName), + "compute.regionBackendServices.delete", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "Backend service " + backendServiceName, + project, + getTask(), + ImmutableMap.of( + "action", + "delete", + "operation", + "compute.regionBackendServices.delete", + "phase", + BASE_PHASE, + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + safeRetry, + this); + if (deleteBackendServiceOp != null) { + deleteBackendServiceAsyncOperations.add( + new BackendServiceAsyncDeleteOperation( + backendServiceName, deleteBackendServiceOp.getName())); + } + } + + // Wait on all of these deletes to complete. + for (BackendServiceAsyncDeleteOperation asyncOperation : + deleteBackendServiceAsyncOperations) { + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + asyncOperation.getOperationName(), + timeoutSeconds, + getTask(), + "Region backend service " + asyncOperation.getBackendServiceName(), + BASE_PHASE); + } + + // Now make a list of the delete operations for health checks if description says to do so. + if (description.getDeleteHealthChecks()) { + List deleteHealthCheckAsyncOperations = new ArrayList<>(); + for (String healthCheckUrl : healthCheckUrls) { + final String healthCheckName = GCEUtil.getLocalName(healthCheckUrl); + Operation deleteHealthCheckOp = + GCEUtil.deleteIfNotInUse( + new Closure(this, this) { + @Override + public Operation call() { + try { + return timeExecute( + compute.regionHealthChecks().delete(project, region, healthCheckName), + "compute.regionHealthChecks.delete", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "Region Http health check " + healthCheckName, + project, + getTask(), + ImmutableMap.of( + "action", + "delete", + "operation", + "compute.regionHealthChecks.delete", + "phase", + BASE_PHASE, + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + safeRetry, + this); + if (deleteHealthCheckOp != null) { + deleteHealthCheckAsyncOperations.add( + new HealthCheckAsyncDeleteOperation( + healthCheckName, deleteHealthCheckOp.getName())); + } + } + + // Finally, wait on all of these deletes to complete. + for (HealthCheckAsyncDeleteOperation asyncOperation : deleteHealthCheckAsyncOperations) { + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + asyncOperation.getOperationName(), + timeoutSeconds, + getTask(), + "region health check " + asyncOperation.getHealthCheckName(), + BASE_PHASE); + } + } + + getTask() + .updateStatus( + BASE_PHASE, + "Done deleting internal http load balancer " + + description.getLoadBalancerName() + + " in " + + region + + "."); + return null; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public SafeRetry getSafeRetry() { + return safeRetry; + } + + public void setSafeRetry(SafeRetry safeRetry) { + this.safeRetry = safeRetry; + } + + public static class HealthCheckAsyncDeleteOperation { + public HealthCheckAsyncDeleteOperation(String healthCheckName, String operationName) { + this.healthCheckName = healthCheckName; + this.operationName = operationName; + } + + public String getHealthCheckName() { + return healthCheckName; + } + + public String getOperationName() { + return operationName; + } + + private String healthCheckName; + private String operationName; + } + + public static class BackendServiceAsyncDeleteOperation { + public BackendServiceAsyncDeleteOperation(String backendServiceName, String operationName) { + this.backendServiceName = backendServiceName; + this.operationName = operationName; + } + + public String getBackendServiceName() { + return backendServiceName; + } + + public String getOperationName() { + return operationName; + } + + private String backendServiceName; + private String operationName; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperation.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..3d1fa0a2462 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperation.java @@ -0,0 +1,1072 @@ +package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer; + +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGIONAL_LOAD_BALANCER_NAMES; +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGION_BACKEND_SERVICE_NAMES; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; + +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.api.client.json.GenericJson; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.*; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.deploy.description.BaseGoogleInstanceDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException; +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck; +import com.netflix.spinnaker.clouddriver.google.model.GoogleNetwork; +import com.netflix.spinnaker.clouddriver.google.model.GoogleSubnet; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.*; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSubnetProvider; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry; +import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor; +import groovy.lang.Closure; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.*; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.codehaus.groovy.runtime.StringGroovyMethods; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; + +public class UpsertGoogleInternalHttpLoadBalancerAtomicOperation + extends UpsertGoogleLoadBalancerAtomicOperation { + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private static final Logger log = LoggerFactory.getLogger(GoogleInternalHttpLoadBalancer.class); + private static final String BASE_PHASE = "UPSERT_INTERNAL_HTTP_LOAD_BALANCER"; + private static final String PATH_MATCHER_PREFIX = "pm"; + public static final String TARGET_HTTP_PROXY_NAME_PREFIX = "target-http-proxy"; + public static final String TARGET_HTTPS_PROXY_NAME_PREFIX = "target-https-proxy"; + @Autowired private GoogleOperationPoller googleOperationPoller; + @Autowired private AtomicOperationsRegistry atomicOperationsRegistry; + @Autowired private GoogleNetworkProvider googleNetworkProvider; + @Autowired private GoogleSubnetProvider googleSubnetProvider; + @Autowired private OrchestrationProcessor orchestrationProcessor; + @Autowired private SafeRetry safeRetry; + private final UpsertGoogleLoadBalancerDescription description; + + public UpsertGoogleInternalHttpLoadBalancerAtomicOperation( + UpsertGoogleLoadBalancerDescription description) { + this.description = description; + } + + /** + * minimal command: curl -v -X POST -H "Content-Type: application/json" -d '[{ + * "upsertLoadBalancer": {"credentials": "my-google-account", "loadBalancerType": + * "INTERNAL_MANAGED", "loadBalancerName": "internal-http-create", "portRange": "80", + * "backendServiceDiff": [], "defaultService": {"name": "default-backend-service", "backends": [], + * "healthCheck": {"name": "basic-check", "requestPath": "/", "port": 80, "checkIntervalSec": 1, + * "timeoutSec": 1, "healthyThreshold": 1, "unhealthyThreshold": 1}}, "certificate": "", + * "hostRules": [] }}]' localhost:7002/gce/ops + * + *

full command: curl -v -X POST -H "Content-Type: application/json" -d '[{ + * "upsertLoadBalancer": {"credentials": "my-google-account", "loadBalancerType": + * "INTERNAL_MANAGED", "loadBalancerName": "internal-http-create", "portRange": "80", + * "backendServiceDiff": [], "defaultService": {"name": "default-backend-service", "backends": [], + * "healthCheck": {"name": "basic-check", "requestPath": "/", "port": 80, "checkIntervalSec": 1, + * "timeoutSec": 1, "healthyThreshold": 1, "unhealthyThreshold": 1}}, "certificate": "", + * "hostRules": [{"hostPatterns": ["host1.com", "host2.com"], "pathMatcher": {"pathRules": + * [{"paths": ["/path", "/path2/more"], "backendService": {"name": "backend-service", "backends": + * [], "healthCheck": {"name": "health-check", "requestPath": "/", "port": 80, "checkIntervalSec": + * 1, "timeoutSec": 1, "healthyThreshold": 1, "unhealthyThreshold": 1}}}], "defaultService": + * {"name": "pm-backend-service", "backends": [], "healthCheck": {"name": "derp-check", + * "requestPath": "/", "port": 80, "checkIntervalSec": 1, "timeoutSec": 1, "healthyThreshold": 1, + * "unhealthyThreshold": 1}}}}]}}]' localhost:7002/gce/ops + * + * @param description + * @param priorOutputs + * @return + */ + @Override + public Map operate(List priorOutputs) { + GoogleNetwork network = + GCEUtil.queryNetwork( + description.getAccountName(), + description.getNetwork(), + getTask(), + BASE_PHASE, + googleNetworkProvider); + GoogleSubnet subnet = + GCEUtil.querySubnet( + description.getAccountName(), + description.getRegion(), + description.getSubnet(), + getTask(), + BASE_PHASE, + googleSubnetProvider); + GoogleInternalHttpLoadBalancer internalHttpLoadBalancer = new GoogleInternalHttpLoadBalancer(); + + internalHttpLoadBalancer.setName(description.getLoadBalancerName()); + internalHttpLoadBalancer.setUrlMapName(description.getUrlMapName()); + internalHttpLoadBalancer.setDefaultService(description.getDefaultService()); + internalHttpLoadBalancer.setHostRules( + description.getHostRules() != null ? description.getHostRules() : new ArrayList<>()); + internalHttpLoadBalancer.setCertificate(description.getCertificate()); + internalHttpLoadBalancer.setIpAddress(description.getIpAddress()); + internalHttpLoadBalancer.setIpProtocol(description.getIpProtocol()); + internalHttpLoadBalancer.setNetwork(network.getSelfLink()); + internalHttpLoadBalancer.setSubnet(subnet.getSelfLink()); + internalHttpLoadBalancer.setPortRange(description.getPortRange()); + + String internalHttpLoadBalancerName = internalHttpLoadBalancer.getName(); + + getTask() + .updateStatus( + BASE_PHASE, + "Initializing upsert of Internal HTTP load balancer " + + internalHttpLoadBalancerName + + "..."); + + if (description.getCredentials() == null) { + throw new IllegalArgumentException( + "Unable to resolve credentials for Google account '" + + description.getAccountName() + + "'."); + } + + Compute compute = description.getCredentials().getCompute(); + String project = description.getCredentials().getProject(); + String region = description.getRegion(); + + // Step 0: Set up state to formulate a plan for creating or updating the L7 LB. + + Set healthCheckExistsSet = new HashSet<>(); + Set healthCheckNeedsUpdatedSet = new HashSet<>(); + Set serviceExistsSet = new HashSet<>(); + Set serviceNeedsUpdatedSet = new HashSet<>(); + boolean urlMapExists; + boolean targetProxyExists = false; + boolean targetProxyNeedsUpdated = false; + boolean forwardingRuleExists; + + // The following are unique on object equality, not just name. This lets us check if a + // service/hc exists or + // needs updated by _name_ later. + List backendServicesFromDescription = + ImmutableSet.copyOf( + Utils.getBackendServicesFromInternalHttpLoadBalancerView( + internalHttpLoadBalancer.getView())) + .asList(); + List healthChecksFromDescription = + backendServicesFromDescription.stream() + .map(GoogleBackendService::getHealthCheck) + .distinct() + .collect(toList()); + + final String name = internalHttpLoadBalancer.getUrlMapName(); + String urlMapName = + name != null + ? name + : internalHttpLoadBalancerName; // An L7 load balancer is identified by its UrlMap name + // in Google Cloud Console. + + // Get all the existing infrastructure. + + // Look up the legacy health checks so we can do the work to transition smoothly to the UHCs. + try { + List existingHealthChecks = + timeExecute( + compute.regionHealthChecks().list(project, region), + "compute.regionHealthChecks.list", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region) + .getItems(); + List existingServices = + timeExecute( + compute.regionBackendServices().list(project, region), + "compute.regionBackendServices.list", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region) + .getItems(); + UrlMap existingUrlMap = null; + try { + existingUrlMap = + timeExecute( + compute.regionUrlMaps().get(project, region, urlMapName), + "compute.regionUrlMaps.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (GoogleJsonResponseException e) { + // 404 is thrown if the url map doesn't exist. Any other exception needs to be propagated. + if (e.getStatusCode() != 404) { + throw e; + } + } + + // Determine if the infrastructure in the description exists already. + // If it does, check and see if we need to update it from the description. + + // UrlMap + urlMapExists = existingUrlMap != null; + + // ForwardingRule + ForwardingRule existingRule = null; + try { + existingRule = + timeExecute( + compute.forwardingRules().get(project, region, internalHttpLoadBalancerName), + "compute.forwardingRules.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (GoogleJsonResponseException e) { + if (e.getStatusCode() != 404) { + throw e; + } + } + + forwardingRuleExists = existingRule != null; + + // TargetProxy + GenericJson existingProxy = null; + if (forwardingRuleExists) { + String targetProxyName = GCEUtil.getLocalName(existingRule.getTarget()); + switch (Utils.getTargetProxyType(existingRule.getTarget())) { + case HTTP: + existingProxy = + timeExecute( + compute.regionTargetHttpProxies().get(project, region, targetProxyName), + "compute.regionTargetHttpProxies.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + break; + case HTTPS: + existingProxy = + timeExecute( + compute.regionTargetHttpsProxies().get(project, region, targetProxyName), + "compute.regionTargetHttpsProxies.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + if (!StringGroovyMethods.asBoolean(internalHttpLoadBalancer.getCertificate())) { + throw new IllegalArgumentException( + internalHttpLoadBalancerName + + " is an Https load balancer, but the upsert description does not contain a certificate."); + } + + targetProxyNeedsUpdated = + !GCEUtil.getLocalName( + ((TargetHttpsProxy) existingProxy).getSslCertificates().get(0)) + .equals( + GCEUtil.getLocalName( + GCEUtil.buildCertificateUrl( + project, internalHttpLoadBalancer.getCertificate()))); + break; + default: + log.warn("Unexpected target proxy type for " + targetProxyName + "."); + break; + } + targetProxyExists = existingProxy != null; + if (targetProxyExists + && !GCEUtil.getLocalName((String) existingProxy.get("urlMap")) + .equals(description.getUrlMapName())) { + throw new IllegalStateException( + "Listener with name " + + existingRule.getName() + + " already exists and points to url map: " + + GCEUtil.getLocalName((String) existingProxy.get("urlMap")) + + "," + + " which is different from the description url map: " + + description.getUrlMapName() + + "."); + } + } + + // HealthChecks + if (healthChecksFromDescription.size() + != healthChecksFromDescription.stream() + .map(GoogleHealthCheck::getName) + .distinct() + .count()) { + throw new GoogleOperationException( + "Duplicate health checks with different attributes in the description. Please specify one object per named health check."); + } + + for (GoogleHealthCheck healthCheck : healthChecksFromDescription) { + String healthCheckName = healthCheck.getName(); + + existingHealthChecks.stream() + .filter(e -> e.getName().equals(healthCheckName)) + .findFirst() + .ifPresent( + existingHealthCheck -> { + healthCheckExistsSet.add(healthCheck.getName()); + if (GCEUtil.healthCheckShouldBeUpdated(existingHealthCheck, healthCheck)) { + healthCheckNeedsUpdatedSet.add(healthCheck.getName()); + } + }); + } + + // BackendServices + if (backendServicesFromDescription.size() + != backendServicesFromDescription.stream() + .map(GoogleBackendService::getName) + .distinct() + .count()) { + throw new GoogleOperationException( + "Duplicate backend services with different attributes in the description. Please specify one object per named backend service."); + } + + for (GoogleBackendService backendService : backendServicesFromDescription) { + final String backendServiceName = backendService.getName(); + + existingServices.stream() + .filter(e -> e.getName().equals(backendServiceName)) + .findFirst() + .ifPresent( + existingService -> { + serviceExistsSet.add(backendService.getName()); + + Set existingHcs = + existingService.getHealthChecks() == null + ? new HashSet<>() + : existingService.getHealthChecks().stream() + .map(GCEUtil::getLocalName) + .collect(toSet()); + Boolean differentHealthChecks = + Sets.difference( + existingHcs, + ImmutableSet.of(backendService.getHealthCheck().getName())) + .size() + > 0; + Boolean differentSessionAffinity = + !GoogleSessionAffinity.valueOf(existingService.getSessionAffinity()) + .equals(backendService.getSessionAffinity()); + Boolean differentSessionCookieTtl = + !Objects.equals( + existingService.getAffinityCookieTtlSec(), + backendService.getAffinityCookieTtlSec()); + Boolean differentPortName = + !Objects.equals(existingService.getPortName(), backendService.getPortName()); + Integer drainingSec = + existingService.getConnectionDraining() == null + ? 0 + : existingService.getConnectionDraining().getDrainingTimeoutSec(); + Boolean differentConnectionDraining = + !Objects.equals( + drainingSec, backendService.getConnectionDrainingTimeoutSec()); + if (differentHealthChecks + || differentSessionAffinity + || differentSessionCookieTtl + || differentPortName + || differentConnectionDraining) { + serviceNeedsUpdatedSet.add(backendService.getName()); + } + }); + } + + // Step 1: If there are no existing components in GCE, insert the new L7 components. + // If something exists and needs updated, update it. Else do nothing. + + // HealthChecks + for (GoogleHealthCheck healthCheck : healthChecksFromDescription) { + String healthCheckName = healthCheck.getName(); + + if (!healthCheckExistsSet.contains(healthCheck.getName())) { + getTask() + .updateStatus( + BASE_PHASE, "Creating health check " + healthCheckName + " in " + region + "..."); + HealthCheck newHealthCheck = GCEUtil.createNewHealthCheck(healthCheck); + Operation insertHealthCheckOperation = + timeExecute( + compute.regionHealthChecks().insert(project, region, newHealthCheck), + "compute.regionHealthChecks.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + insertHealthCheckOperation.getName(), + null, + getTask(), + "region health check " + healthCheckName, + BASE_PHASE); + } else if (healthCheckExistsSet.contains(healthCheck.getName()) + && healthCheckNeedsUpdatedSet.contains(healthCheck.getName())) { + getTask().updateStatus(BASE_PHASE, "Updating health check " + healthCheckName + "..."); + HealthCheck hcToUpdate = + existingHealthChecks.stream() + .filter(hc -> hc.getName().equals(healthCheckName)) + .findFirst() + .get(); + GCEUtil.updateExistingHealthCheck(hcToUpdate, healthCheck); + Operation updateHealthCheckOperation = + timeExecute( + compute.regionHealthChecks().update(project, region, healthCheckName, hcToUpdate), + "compute.regionHealthChecks.update", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + updateHealthCheckOperation.getName(), + null, + getTask(), + "region health check " + healthCheckName, + BASE_PHASE); + } + } + + // BackendServices + for (GoogleBackendService backendService : backendServicesFromDescription) { + String backendServiceName = backendService.getName(); + String sessionAffinity = + backendService.getSessionAffinity() != null + ? backendService.getSessionAffinity().toString() + : "NONE"; + + if (!serviceExistsSet.contains(backendService.getName())) { + getTask() + .updateStatus( + BASE_PHASE, + "Creating backend service " + backendServiceName + " in " + region + "..."); + BackendService service = new BackendService(); + + BackendService bs = service.setName(backendServiceName); + service.setLoadBalancingScheme("INTERNAL_MANAGED"); + service.setPortName( + backendService.getPortName() != null + ? backendService.getPortName() + : GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME); + service.setConnectionDraining( + new ConnectionDraining() + .setDrainingTimeoutSec(backendService.getConnectionDrainingTimeoutSec())); + service.setHealthChecks( + Arrays.asList( + GCEUtil.buildRegionalHealthCheckUrl( + project, region, backendService.getHealthCheck().getName()))); + service.setSessionAffinity(sessionAffinity); + service.setAffinityCookieTtlSec(backendService.getAffinityCookieTtlSec()); + Operation insertBackendServiceOperation = + timeExecute( + compute.regionBackendServices().insert(project, region, bs), + "compute.regionBackendServices.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + insertBackendServiceOperation.getName(), + null, + getTask(), + "region backend service " + backendServiceName, + BASE_PHASE); + } else if (serviceExistsSet.contains(backendService.getName())) { + // Update the actual backend service if necessary. + if (serviceNeedsUpdatedSet.contains(backendService.getName())) { + getTask() + .updateStatus( + BASE_PHASE, + "Updating backend service " + backendServiceName + " in " + region + "..."); + BackendService bsToUpdate = + existingServices.stream() + .filter(s -> s.getName().equals(backendServiceName)) + .findFirst() + .get(); + String hcName = backendService.getHealthCheck().getName(); + bsToUpdate.setPortName( + backendService.getPortName() != null + ? backendService.getPortName() + : GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME); + bsToUpdate.setConnectionDraining( + new ConnectionDraining() + .setDrainingTimeoutSec(backendService.getConnectionDrainingTimeoutSec())); + bsToUpdate.setHealthChecks( + Arrays.asList(GCEUtil.buildRegionalHealthCheckUrl(project, region, hcName))); + bsToUpdate.setSessionAffinity(sessionAffinity); + bsToUpdate.setAffinityCookieTtlSec(backendService.getAffinityCookieTtlSec()); + + Operation updateServiceOperation = + timeExecute( + compute + .regionBackendServices() + .update(project, region, backendServiceName, bsToUpdate), + "compute.regionBackendServices.update", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + updateServiceOperation.getName(), + null, + getTask(), + "region backend service " + backendServiceName, + BASE_PHASE); + } + + fixBackendMetadata( + compute, + description.getCredentials(), + project, + getAtomicOperationsRegistry(), + getOrchestrationProcessor(), + description.getLoadBalancerName(), + backendService); + } + } + if (description.getBackendServiceDiff() != null) { + for (GoogleBackendService backendService : description.getBackendServiceDiff()) { + fixBackendMetadata( + compute, + description.getCredentials(), + project, + getAtomicOperationsRegistry(), + getOrchestrationProcessor(), + description.getLoadBalancerName(), + backendService); + } + } + + // UrlMap + String urlMapUrl = null; + if (!urlMapExists) { + getTask() + .updateStatus(BASE_PHASE, "Creating URL map " + urlMapName + " in " + region + "..."); + UrlMap newUrlMap = new UrlMap(); + newUrlMap.setName(urlMapName); + newUrlMap.setHostRules(new ArrayList<>()); + newUrlMap.setPathMatchers(new ArrayList<>()); + newUrlMap.setDefaultService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, internalHttpLoadBalancer.getDefaultService().getName())); + for (GoogleHostRule hostRule : internalHttpLoadBalancer.getHostRules()) { + String pathMatcherName = PATH_MATCHER_PREFIX + "-" + UUID.randomUUID().toString(); + GooglePathMatcher pathMatcher = hostRule.getPathMatcher(); + PathMatcher matcher = new PathMatcher(); + matcher.setDefaultService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, pathMatcher.getDefaultService().getName())); + matcher.setPathRules( + pathMatcher.getPathRules().stream() + .map( + p -> { + PathRule rule = new PathRule(); + rule.setPaths(p.getPaths()); + rule.setService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, p.getBackendService().getName())); + return rule; + }) + .collect(toList())); + newUrlMap.getPathMatchers().add(matcher); + + HostRule rule = new HostRule(); + rule.setHosts(hostRule.getHostPatterns()); + rule.setPathMatcher(pathMatcherName); + newUrlMap.getHostRules().add(rule); + } + Operation insertUrlMapOperation = + timeExecute( + compute.regionUrlMaps().insert(project, region, newUrlMap), + "compute.regionUrlMaps.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + insertUrlMapOperation.getName(), + null, + getTask(), + "region url map " + urlMapName, + BASE_PHASE); + urlMapUrl = insertUrlMapOperation.getTargetLink(); + } else if (urlMapExists) { + getTask() + .updateStatus(BASE_PHASE, "Updating URL map " + urlMapName + " in " + region + "..."); + existingUrlMap.setDefaultService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, internalHttpLoadBalancer.getDefaultService().getName())); + existingUrlMap.setPathMatchers(new ArrayList<>()); + existingUrlMap.setHostRules(new ArrayList<>()); + for (GoogleHostRule hostRule : internalHttpLoadBalancer.getHostRules()) { + String pathMatcherName = PATH_MATCHER_PREFIX + "-" + UUID.randomUUID().toString(); + GooglePathMatcher pathMatcher = hostRule.getPathMatcher(); + PathMatcher matcher = new com.google.api.services.compute.model.PathMatcher(); + matcher.setName(pathMatcherName); + matcher.setDefaultService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, pathMatcher.getDefaultService().getName())); + matcher.setPathRules( + pathMatcher.getPathRules().stream() + .map( + p -> { + PathRule rule = new PathRule(); + rule.setService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, p.getBackendService().getName())); + rule.setPaths(p.getPaths()); + return rule; + }) + .collect(toList())); + existingUrlMap.getPathMatchers().add(matcher); + HostRule rule = new HostRule(); + rule.setHosts(hostRule.getHostPatterns()); + existingUrlMap.getHostRules().add(rule.setPathMatcher(pathMatcherName)); + } + Operation updateUrlMapOperation = + timeExecute( + compute.regionUrlMaps().update(project, region, urlMapName, existingUrlMap), + "compute.regionUrlMaps.update", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + updateUrlMapOperation.getName(), + null, + getTask(), + "region url map " + urlMapName, + BASE_PHASE); + urlMapUrl = updateUrlMapOperation.getTargetLink(); + } else { + urlMapUrl = existingUrlMap.getSelfLink(); + } + + // TargetProxy + String targetProxyName; + Object targetProxy; + Operation insertTargetProxyOperation; + String targetProxyUrl = null; + if (!targetProxyExists) { + if (!StringUtils.isEmpty(internalHttpLoadBalancer.getCertificate())) { + targetProxyName = internalHttpLoadBalancerName + "-" + TARGET_HTTPS_PROXY_NAME_PREFIX; + getTask() + .updateStatus( + BASE_PHASE, "Creating target proxy " + targetProxyName + " in " + region + "..."); + TargetHttpsProxy proxy = new TargetHttpsProxy(); + proxy.setSslCertificates( + Arrays.asList( + GCEUtil.buildCertificateUrl(project, internalHttpLoadBalancer.getCertificate()))); + proxy.setUrlMap(urlMapUrl); + proxy.setName(targetProxyName); + targetProxy = proxy; + insertTargetProxyOperation = + timeExecute( + compute + .regionTargetHttpsProxies() + .insert(project, region, (TargetHttpsProxy) targetProxy), + "compute.regionTargetHttpsProxies.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } else { + targetProxyName = internalHttpLoadBalancerName + "-" + TARGET_HTTP_PROXY_NAME_PREFIX; + getTask() + .updateStatus( + BASE_PHASE, "Creating target proxy " + targetProxyName + " in " + region + "..."); + TargetHttpProxy proxy = new TargetHttpProxy(); + proxy.setName(targetProxyName); + proxy.setUrlMap(urlMapUrl); + targetProxy = proxy; + insertTargetProxyOperation = + timeExecute( + compute + .regionTargetHttpProxies() + .insert(project, region, (TargetHttpProxy) targetProxy), + "compute.regionTargetHttpProxies.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } + + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + insertTargetProxyOperation.getName(), + null, + getTask(), + "region target proxy " + targetProxyName, + BASE_PHASE); + targetProxyUrl = insertTargetProxyOperation.getTargetLink(); + } else if (targetProxyExists && targetProxyNeedsUpdated) { + GoogleTargetProxyType proxyType = + Utils.getTargetProxyType((String) existingProxy.get("selfLink")); + switch (proxyType) { + case HTTP: + break; + case HTTPS: + targetProxyName = internalHttpLoadBalancerName + "-" + TARGET_HTTPS_PROXY_NAME_PREFIX; + getTask() + .updateStatus( + BASE_PHASE, + "Updating target proxy " + targetProxyName + " in " + region + "..."); + RegionTargetHttpsProxiesSetSslCertificatesRequest request = + new RegionTargetHttpsProxiesSetSslCertificatesRequest(); + RegionTargetHttpsProxiesSetSslCertificatesRequest setSslReq = + request.setSslCertificates( + Arrays.asList( + GCEUtil.buildRegionalCertificateUrl( + project, region, internalHttpLoadBalancer.getCertificate()))); + Operation sslCertOp = + timeExecute( + compute + .regionTargetHttpsProxies() + .setSslCertificates(project, region, targetProxyName, setSslReq), + "compute.regionTargetHttpsProxies.setSslCertificates", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + sslCertOp.getName(), + null, + getTask(), + "set ssl cert " + internalHttpLoadBalancer.getCertificate(), + BASE_PHASE); + UrlMapReference reference = new UrlMapReference(); + UrlMapReference urlMapRef = reference.setUrlMap(urlMapUrl); + Operation setUrlMapOp = + timeExecute( + compute + .regionTargetHttpsProxies() + .setUrlMap(project, region, targetProxyName, urlMapRef), + "compute.regionTargetHttpsProxies.setUrlMap", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + setUrlMapOp.getName(), + null, + getTask(), + "set urlMap " + urlMapUrl + " for target proxy " + targetProxyName, + BASE_PHASE); + targetProxyUrl = setUrlMapOp.getTargetLink(); + break; + default: + throw new IllegalStateException( + "Updating Internal Http load balancer " + + internalHttpLoadBalancerName + + " in " + + region + + " failed. Could not update target proxy; Illegal target proxy type " + + proxyType + + "."); + } + } else { + targetProxyUrl = (String) existingProxy.get("selfLink"); + } + + // ForwardingRule + if (!forwardingRuleExists) { + getTask() + .updateStatus( + BASE_PHASE, + "Creating internal forwarding rule " + + internalHttpLoadBalancerName + + " in " + + region + + "..."); + ForwardingRule rule = new ForwardingRule(); + + rule.setName(internalHttpLoadBalancerName); + rule.setLoadBalancingScheme("INTERNAL_MANAGED"); + rule.setIPAddress(internalHttpLoadBalancer.getIpAddress()); + rule.setIPProtocol(internalHttpLoadBalancer.getIpProtocol()); + rule.setNetwork(internalHttpLoadBalancer.getNetwork()); + rule.setSubnetwork(internalHttpLoadBalancer.getSubnet()); + rule.setPortRange( + StringGroovyMethods.asBoolean(internalHttpLoadBalancer.getCertificate()) + ? "443" + : internalHttpLoadBalancer.getPortRange()); + rule.setTarget(targetProxyUrl); + + Operation forwardingRuleOp = + safeRetry.doRetry( + new Closure(this, this) { + @Override + public Operation call() { + try { + return timeExecute( + compute.forwardingRules().insert(project, region, rule), + "compute.forwardingRules.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "forwarding rule " + description.getLoadBalancerName(), + getTask(), + Arrays.asList(400, 403, 412), + new ArrayList<>(), + ImmutableMap.of( + "action", + "insert", + "phase", + BASE_PHASE, + "operation", + "compute.forwardingRules.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + getRegistry()); + + // Orca's orchestration for upserting a Google load balancer does not contain a task + // to wait for the state of the platform to show that a load balancer was created (for good + // reason, + // that would be a complicated operation). Instead, Orca waits for Clouddriver to execute + // this operation + // and do a force cache refresh. We should wait for the whole load balancer to be created in + // the platform + // before we exit this upsert operation, so we wait for the forwarding rule to be created + // before continuing + // so we _know_ the state of the platform when we do a force cache refresh. + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + forwardingRuleOp.getName(), + null, + getTask(), + "forwarding rule " + internalHttpLoadBalancerName, + BASE_PHASE); + } + + // NOTE: there is no update for forwarding rules because we support adding/deleting multiple + // listeners in the frontend. + // Rotating or changing certificates updates the targetProxy only, so the forwarding rule + // doesn't need to change. + + // Delete extraneous listeners. + if (description.getListenersToDelete() != null) { + for (String forwardingRuleName : description.getListenersToDelete()) { + getTask() + .updateStatus( + BASE_PHASE, "Deleting listener " + forwardingRuleName + " in " + region + "..."); + GCEUtil.deleteRegionalListener( + compute, + project, + region, + forwardingRuleName, + BASE_PHASE, + getSafeRetry(), + UpsertGoogleInternalHttpLoadBalancerAtomicOperation.this); + } + } + getTask() + .updateStatus( + BASE_PHASE, + "Done upserting Internal HTTP load balancer " + + internalHttpLoadBalancerName + + " in " + + region); + + Map lb = new HashMap<>(1); + lb.put("name", internalHttpLoadBalancerName); + Map> regionToLb = new HashMap<>(1); + regionToLb.put("region", lb); + + Map>> lbs = new HashMap<>(1); + lbs.put("loadBalancers", regionToLb); + return lbs; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Update each instance template on all the server groups in the backend service to reflect being + * added to the new load balancer. + * + * @param compute + * @param credentials + * @param project + * @param loadBalancerName + * @param backendService + */ + private void fixBackendMetadata( + Compute compute, + GoogleNamedAccountCredentials credentials, + String project, + AtomicOperationsRegistry atomicOperationsRegistry, + OrchestrationProcessor orchestrationProcessor, + String loadBalancerName, + GoogleBackendService backendService) { + try { + for (GoogleLoadBalancedBackend backend : backendService.getBackends()) { + + String groupName = Utils.getLocalName(backend.getServerGroupUrl()); + String groupRegion = Utils.getRegionFromGroupUrl(backend.getServerGroupUrl()); + String templateUrl = null; + switch (Utils.determineServerGroupType(backend.getServerGroupUrl())) { + case REGIONAL: + templateUrl = + timeExecute( + compute.regionInstanceGroupManagers().get(project, groupRegion, groupName), + "compute.regionInstanceGroupManagers.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + groupRegion) + .getInstanceTemplate(); + break; + case ZONAL: + String groupZone = Utils.getZoneFromGroupUrl(backend.getServerGroupUrl()); + templateUrl = + timeExecute( + compute.instanceGroupManagers().get(project, groupZone, groupName), + "compute.instanceGroupManagers.get", + TAG_SCOPE, + SCOPE_ZONAL, + TAG_ZONE, + groupZone) + .getInstanceTemplate(); + break; + default: + throw new IllegalStateException( + "Server group referenced by " + backend.getServerGroupUrl() + " has illegal type."); + } + + InstanceTemplate template = + timeExecute( + compute.instanceTemplates().get(project, Utils.getLocalName(templateUrl)), + "compute.instancesTemplates.get", + TAG_SCOPE, + SCOPE_GLOBAL); + BaseGoogleInstanceDescription instanceDescription = + GCEUtil.buildInstanceDescriptionFromTemplate(project, template); + + Map templateOpMap = new HashMap<>(15); + templateOpMap.put("image", instanceDescription.getImage()); + templateOpMap.put("instanceType", instanceDescription.getInstanceType()); + templateOpMap.put("credentials", credentials.getName()); + templateOpMap.put("disks", instanceDescription.getDisks()); + templateOpMap.put("instanceMetadata", instanceDescription.getInstanceMetadata()); + templateOpMap.put("tags", instanceDescription.getTags()); + templateOpMap.put("network", instanceDescription.getNetwork()); + templateOpMap.put("subnet", instanceDescription.getSubnet()); + templateOpMap.put("serviceAccountEmail", instanceDescription.getServiceAccountEmail()); + templateOpMap.put("authScopes", instanceDescription.getAuthScopes()); + templateOpMap.put("preemptible", instanceDescription.getPreemptible()); + templateOpMap.put("automaticRestart", instanceDescription.getAutomaticRestart()); + templateOpMap.put("onHostMaintenance", instanceDescription.getOnHostMaintenance()); + templateOpMap.put("region", groupRegion); + templateOpMap.put("serverGroupName", groupName); + + if (StringGroovyMethods.asBoolean(instanceDescription.getMinCpuPlatform())) { + templateOpMap.put("minCpuPlatform", instanceDescription.getMinCpuPlatform()); + } + + if (templateOpMap.containsKey("instanceMetadata")) { + Map instanceMetadata = (Map) templateOpMap.get("instanceMetadata"); + String regionLbStr = instanceMetadata.get(REGIONAL_LOAD_BALANCER_NAMES); + List regionalLbs = + regionLbStr != null + ? new ArrayList<>(Arrays.asList(regionLbStr.split(","))) + : new ArrayList<>(); + regionalLbs.add(loadBalancerName); + instanceMetadata.put( + REGIONAL_LOAD_BALANCER_NAMES, + regionalLbs.stream().distinct().collect(Collectors.joining(","))); + + String backendsStr = instanceMetadata.get(REGION_BACKEND_SERVICE_NAMES); + List bsNames = + backendsStr != null + ? new ArrayList<>(Arrays.asList(backendsStr.split(","))) + : new ArrayList<>(); + bsNames.add(backendService.getName()); + instanceMetadata.put( + REGION_BACKEND_SERVICE_NAMES, + bsNames.stream().distinct().collect(Collectors.joining(","))); + } else { + Map instanceMetadata = new HashMap<>(2); + instanceMetadata.put(REGIONAL_LOAD_BALANCER_NAMES, loadBalancerName); + instanceMetadata.put(REGION_BACKEND_SERVICE_NAMES, backendService.getName()); + templateOpMap.put("instanceMetadata", instanceMetadata); + } + + AtomicOperationConverter converter = + atomicOperationsRegistry.getAtomicOperationConverter( + "modifyGoogleServerGroupInstanceTemplateDescription", "gce"); + AtomicOperation templateOp = converter.convertOperation(templateOpMap); + orchestrationProcessor.process( + "gce", new ArrayList<>(Arrays.asList(templateOp)), UUID.randomUUID().toString()); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public AtomicOperationsRegistry getAtomicOperationsRegistry() { + return atomicOperationsRegistry; + } + + public void setAtomicOperationsRegistry(AtomicOperationsRegistry atomicOperationsRegistry) { + this.atomicOperationsRegistry = atomicOperationsRegistry; + } + + public OrchestrationProcessor getOrchestrationProcessor() { + return orchestrationProcessor; + } + + public void setOrchestrationProcessor(OrchestrationProcessor orchestrationProcessor) { + this.orchestrationProcessor = orchestrationProcessor; + } + + public SafeRetry getSafeRetry() { + return safeRetry; + } + + public void setSafeRetry(SafeRetry safeRetry) { + this.safeRetry = safeRetry; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/validators/SetStatefulDiskDescriptionValidator.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/validators/SetStatefulDiskDescriptionValidator.java new file mode 100644 index 00000000000..73eee2025bd --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/validators/SetStatefulDiskDescriptionValidator.java @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.validators; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.google.GoogleOperation; +import com.netflix.spinnaker.clouddriver.google.deploy.description.SetStatefulDiskDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.List; +import org.springframework.stereotype.Component; + +@GoogleOperation(AtomicOperations.SET_STATEFUL_DISK) +@Component +public class SetStatefulDiskDescriptionValidator + extends DescriptionValidator { + + @Override + public void validate( + List priorDescriptions, SetStatefulDiskDescription description, ValidationErrors errors) { + StandardGceAttributeValidator helper = + new StandardGceAttributeValidator("setStatefulDiskDescription", errors); + helper.validateRegion(description.getRegion(), description.getCredentials()); + helper.validateServerGroupName(description.getServerGroupName()); + helper.validateName(description.getDeviceName(), "deviceName"); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/validators/StatefullyUpdateBootImageDescriptionValidator.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/validators/StatefullyUpdateBootImageDescriptionValidator.java new file mode 100644 index 00000000000..ed0607a0150 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/deploy/validators/StatefullyUpdateBootImageDescriptionValidator.java @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.validators; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.google.deploy.description.StatefullyUpdateBootImageDescription; +import java.util.List; + +public class StatefullyUpdateBootImageDescriptionValidator + extends DescriptionValidator { + + @Override + public void validate( + List priorDescriptions, + StatefullyUpdateBootImageDescription description, + ValidationErrors errors) { + StandardGceAttributeValidator helper = + new StandardGceAttributeValidator("statefullyUpdateBootImageDescription", errors); + helper.validateRegion(description.getRegion(), description.getCredentials()); + helper.validateServerGroupName(description.getServerGroupName()); + helper.validateName(description.getBootImage(), "bootImage"); + } +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDistributionPolicy.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleDistributionPolicy.java similarity index 89% rename from clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDistributionPolicy.java rename to clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleDistributionPolicy.java index 2cd0993b7ec..e58d2903e73 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleDistributionPolicy.java +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleDistributionPolicy.java @@ -16,18 +16,16 @@ package com.netflix.spinnaker.clouddriver.google.model; +import java.util.List; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; -import java.util.List; - -/** - * Distribution policy for selecting zones in a regional MIG. - */ +/** Distribution policy for selecting zones and target shape in a regional MIG. */ @Data @NoArgsConstructor @AllArgsConstructor public class GoogleDistributionPolicy { List zones; + String targetShape; } diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleInstances.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleInstances.java new file mode 100644 index 00000000000..bd9a8cd44ec --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleInstances.java @@ -0,0 +1,87 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.model; + +import com.google.api.services.compute.model.Instance; +import com.netflix.spinnaker.clouddriver.consul.model.ConsulNode; +import com.netflix.spinnaker.clouddriver.consul.provider.ConsulProviderUtils; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.model.health.GoogleInstanceHealth; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.math.BigInteger; +import java.util.Optional; + +public final class GoogleInstances { + + public static GoogleInstance createFromComputeInstance( + Instance input, GoogleNamedAccountCredentials credentials) { + + String localZone = Utils.getLocalName(input.getZone()); + + GoogleInstance output = new GoogleInstance(); + output.setName(input.getName()); + output.setAccount(credentials.getProject()); + output.setGceId(Optional.ofNullable(input.getId()).map(BigInteger::toString).orElse(null)); + output.setInstanceType(Utils.getLocalName(input.getMachineType())); + output.setCpuPlatform(input.getCpuPlatform()); + output.setLaunchTime(calculateInstanceTimestamp(input)); + output.setZone(localZone); + output.setRegion(credentials.regionFromZone(localZone)); + output.setNetworkInterfaces(input.getNetworkInterfaces()); + output.setNetworkName(calculateNetworkName(input, credentials)); + output.setMetadata(input.getMetadata()); + output.setDisks(input.getDisks()); + output.setServiceAccounts(input.getServiceAccounts()); + output.setSelfLink(input.getSelfLink()); + output.setTags(input.getTags()); + output.setLabels(input.getLabels()); + output.setConsulNode(calculateConsulNode(input, credentials)); + output.setInstanceHealth(createInstanceHealth(input)); + return output; + } + + private static long calculateInstanceTimestamp(Instance input) { + return input.getCreationTimestamp() != null + ? Utils.getTimeFromTimestamp(input.getCreationTimestamp()) + : Long.MAX_VALUE; + } + + private static String calculateNetworkName( + Instance input, GoogleNamedAccountCredentials credentials) { + return Utils.decorateXpnResourceIdIfNeeded( + credentials.getProject(), + input.getNetworkInterfaces() != null && !input.getNetworkInterfaces().isEmpty() + ? input.getNetworkInterfaces().get(0).getNetwork() + : null); + } + + private static ConsulNode calculateConsulNode( + Instance input, GoogleNamedAccountCredentials credentials) { + return credentials.getConsulConfig() != null && credentials.getConsulConfig().isEnabled() + ? ConsulProviderUtils.getHealths(credentials.getConsulConfig(), input.getName()) + : null; + } + + private static GoogleInstanceHealth createInstanceHealth(Instance input) { + if (input.getStatus() == null) { + return null; + } + GoogleInstanceHealth health = new GoogleInstanceHealth(); + health.setStatus(GoogleInstanceHealth.Status.valueOf(input.getStatus())); + return health; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleLabeledResource.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleLabeledResource.java new file mode 100644 index 00000000000..0e17a1d46fa --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/GoogleLabeledResource.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Schibsted ASA. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.model; + +import java.util.Map; + +public interface GoogleLabeledResource { + default String getName() { + return null; + } + + Map getLabels(); +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalHttpLoadBalancer.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalHttpLoadBalancer.java new file mode 100644 index 00000000000..70be27ad106 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalHttpLoadBalancer.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.model.loadbalancing; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.ToString; +import lombok.Value; + +@Data +@EqualsAndHashCode(callSuper = true) +@ToString(callSuper = true) +public class GoogleInternalHttpLoadBalancer extends GoogleLoadBalancer { + final GoogleLoadBalancerType type = GoogleLoadBalancerType.INTERNAL_MANAGED; + final GoogleLoadBalancingScheme loadBalancingScheme = GoogleLoadBalancingScheme.INTERNAL_MANAGED; + + /** Default backend service a request is sent to if no host rules are matched. */ + GoogleBackendService defaultService; + + /** List of host rules that map incoming requests to GooglePathMatchers based on host header. */ + List hostRules; + + /** SSL certificate. This is populated only if this load balancer is a HTTPS load balancer. */ + String certificate; + + /** + * The name of the UrlMap this load balancer uses to route traffic. In the Google Cloud Console, + * the L7 load balancer name is the same as this name. + */ + String urlMapName; + + String network; + String subnet; + + @JsonIgnore + public InternalHttpLbView getView() { + return new InternalHttpLbView(); + } + + @Value + @EqualsAndHashCode(callSuper = true) + @ToString(callSuper = true) + public class InternalHttpLbView extends GoogleLoadBalancerView { + GoogleLoadBalancerType loadBalancerType = GoogleInternalHttpLoadBalancer.this.type; + GoogleLoadBalancingScheme loadBalancingScheme = + GoogleInternalHttpLoadBalancer.this.loadBalancingScheme; + + String name = GoogleInternalHttpLoadBalancer.this.getName(); + String account = GoogleInternalHttpLoadBalancer.this.getAccount(); + String region = GoogleInternalHttpLoadBalancer.this.getRegion(); + Long createdTime = GoogleInternalHttpLoadBalancer.this.getCreatedTime(); + String ipAddress = GoogleInternalHttpLoadBalancer.this.getIpAddress(); + String ipProtocol = GoogleInternalHttpLoadBalancer.this.getIpProtocol(); + String portRange = GoogleInternalHttpLoadBalancer.this.getPortRange(); + + GoogleBackendService defaultService = GoogleInternalHttpLoadBalancer.this.defaultService; + List hostRules = GoogleInternalHttpLoadBalancer.this.hostRules; + String certificate = GoogleInternalHttpLoadBalancer.this.certificate; + String urlMapName = GoogleInternalHttpLoadBalancer.this.urlMapName; + String network = GoogleInternalHttpLoadBalancer.this.network; + String subnet = GoogleInternalHttpLoadBalancer.this.subnet; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTargetProxyType.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTargetProxyType.java new file mode 100644 index 00000000000..018395445ef --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleTargetProxyType.java @@ -0,0 +1,50 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.google.model.loadbalancing; + +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; + +@NonnullByDefault +public enum GoogleTargetProxyType { + HTTP, + HTTPS, + SSL, + TCP, + UNKNOWN; + + /** + * Given a string representing a resource type (as found in the URI for a target proxy), returns + * the corresponding {@link GoogleTargetProxyType}, or {@link GoogleTargetProxyType#UNKNOWN} if no + * {@link GoogleTargetProxyType} matches the resource type. + * + * @param identifier the identifier + * @return the corresponding {@link GoogleTargetProxyType} + */ + public static GoogleTargetProxyType fromResourceType(String identifier) { + switch (identifier) { + case "targetHttpProxies": + return GoogleTargetProxyType.HTTP; + case "targetHttpsProxies": + return GoogleTargetProxyType.HTTPS; + case "targetSslProxies": + return GoogleTargetProxyType.SSL; + case "targetTcpProxies": + return GoogleTargetProxyType.TCP; + default: + return UNKNOWN; + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/names/GoogleLabeledResourceNamer.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/names/GoogleLabeledResourceNamer.java new file mode 100644 index 00000000000..bbb31250e5d --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/names/GoogleLabeledResourceNamer.java @@ -0,0 +1,112 @@ +/* + * Copyright 2018 Schibsted ASA. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.names; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Map; +import java.util.function.Consumer; +import org.apache.commons.lang3.StringUtils; +import org.springframework.stereotype.Component; + +@Component +public class GoogleLabeledResourceNamer implements NamingStrategy { + static String GCE_MONIKER_PREFIX = "spinnaker-moniker-"; + static String APP = GCE_MONIKER_PREFIX + "application"; + static String CLUSTER = GCE_MONIKER_PREFIX + "cluster"; + static String DETAIL = GCE_MONIKER_PREFIX + "detail"; + static String STACK = GCE_MONIKER_PREFIX + "stack"; + static String SEQUENCE = GCE_MONIKER_PREFIX + "sequence"; + + @Override + public String getName() { + return "gceAnnotations"; + } + + public void applyMoniker(GoogleLabeledResource labeledResource, Moniker moniker) { + Map templateLabels = labeledResource.getLabels(); + setIfPresent(value -> templateLabels.putIfAbsent(APP, value.toLowerCase()), moniker.getApp()); + setIfPresent( + value -> templateLabels.putIfAbsent(CLUSTER, value.toLowerCase()), moniker.getCluster()); + setIfPresent( + value -> templateLabels.putIfAbsent(DETAIL, value.toLowerCase()), moniker.getDetail()); + setIfPresent( + value -> templateLabels.putIfAbsent(STACK, value.toLowerCase()), moniker.getStack()); + setIfPresent( + value -> templateLabels.put(SEQUENCE, value), + moniker.getSequence() != null + ? moniker.getSequence().toString() + : null); // Always overwrite sequence + } + + @Override + public Moniker deriveMoniker(GoogleLabeledResource labeledResource) { + String name = labeledResource.getName(); + Names parsed = Names.parseName(name); + + Moniker moniker = + Moniker.builder() + .app(parsed.getApp()) + .cluster(parsed.getCluster()) + .detail(parsed.getDetail()) + .stack(parsed.getStack()) + .sequence(parsed.getSequence()) + .build(); + + Map labels = labeledResource.getLabels(); + if (moniker.getApp() != null && labels != null) { + setIfPresent(moniker::setApp, labels.get(APP)); + String cluster = labels.get(CLUSTER); + String stack = labels.get(STACK); + String detail = labels.get(DETAIL); + String sequence = labels.get(SEQUENCE); + if (cluster == null && (detail != null || stack != null)) { + // If detail or stack is set and not cluster, we generate the cluster name using frigga + // convention (app-stack-detail) + cluster = getClusterName(moniker.getApp(), stack, detail); + } + setIfPresent(moniker::setStack, stack); + setIfPresent(moniker::setDetail, detail); + setIfPresent(moniker::setCluster, cluster); + setIfPresent(moniker::setSequence, sequence != null ? Integer.parseInt(sequence) : null); + } + return moniker; + } + + private static String getClusterName(String app, String stack, String detail) { + StringBuilder sb = new StringBuilder(app); + if (StringUtils.isNotEmpty(stack)) { + sb.append("-").append(stack); + } + if (StringUtils.isEmpty(stack) && StringUtils.isNotEmpty(detail)) { + sb.append("-"); + } + if (StringUtils.isNotEmpty(detail)) { + sb.append("-").append(detail); + } + return sb.toString(); + } + + private static void setIfPresent(Consumer setter, T value) { + if (value != null) { + setter.accept(value); + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgent.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgent.java new file mode 100644 index 00000000000..81200a45bae --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgent.java @@ -0,0 +1,1122 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.IMAGES; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.INSTANCES; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.ON_DEMAND; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.SERVER_GROUPS; +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.BACKEND_SERVICE_NAMES; +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.GLOBAL_LOAD_BALANCER_NAMES; +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.LOAD_BALANCING_POLICY; +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGIONAL_LOAD_BALANCER_NAMES; +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGION_BACKEND_SERVICE_NAMES; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.AttachedDisk; +import com.google.api.services.compute.model.Autoscaler; +import com.google.api.services.compute.model.AutoscalerStatusDetails; +import com.google.api.services.compute.model.AutoscalingPolicy; +import com.google.api.services.compute.model.AutoscalingPolicyCpuUtilization; +import com.google.api.services.compute.model.AutoscalingPolicyCustomMetricUtilization; +import com.google.api.services.compute.model.AutoscalingPolicyLoadBalancingUtilization; +import com.google.api.services.compute.model.AutoscalingPolicyScaleInControl; +import com.google.api.services.compute.model.AutoscalingPolicyScalingSchedule; +import com.google.api.services.compute.model.DistributionPolicy; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.InstanceProperties; +import com.google.api.services.compute.model.InstanceTemplate; +import com.google.api.services.compute.model.Metadata.Items; +import com.google.api.services.compute.model.NamedPort; +import com.google.common.base.Splitter; +import com.google.common.base.Splitter.MapSplitter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.frigga.ami.AppVersion; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.DefaultJsonCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider; +import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder.CacheDataBuilder; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.compute.BatchComputeRequest; +import com.netflix.spinnaker.clouddriver.google.compute.BatchPaginatedComputeRequest; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.compute.InstanceTemplates; +import com.netflix.spinnaker.clouddriver.google.compute.Instances; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.AutoscalingMode; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.CpuUtilization; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.CpuUtilization.PredictiveMethod; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.CustomMetricUtilization; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.CustomMetricUtilization.UtilizationTargetType; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.FixedOrPercent; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.LoadBalancingUtilization; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.ScaleInControl; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.ScalingSchedule; +import com.netflix.spinnaker.clouddriver.google.model.GoogleDistributionPolicy; +import com.netflix.spinnaker.clouddriver.google.model.GoogleInstance; +import com.netflix.spinnaker.clouddriver.google.model.GoogleInstances; +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancingPolicy; +import com.netflix.spinnaker.clouddriver.google.provider.GoogleInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Stream; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.Value; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@ParametersAreNonnullByDefault +public abstract class AbstractGoogleServerGroupCachingAgent + implements CachingAgent, OnDemandAgent, AccountAware { + + private static final ImmutableSet DATA_TYPES = + ImmutableSet.of( + AUTHORITATIVE.forType(SERVER_GROUPS.getNs()), + AUTHORITATIVE.forType(APPLICATIONS.getNs()), + AUTHORITATIVE.forType(CLUSTERS.getNs()), + INFORMATIVE.forType(LOAD_BALANCERS.getNs())); + + private static final String ON_DEMAND_TYPE = + String.join(":", GoogleCloudProvider.getID(), OnDemandType.ServerGroup.getValue()); + + private static final Splitter COMMA = Splitter.on(',').omitEmptyStrings().trimResults(); + private static final MapSplitter IMAGE_DESCRIPTION_SPLITTER = + Splitter.on(',').withKeyValueSeparator(": "); + + private final GoogleNamedAccountCredentials credentials; + private final GoogleComputeApiFactory computeApiFactory; + private final String region; + private final OnDemandMetricsSupport onDemandMetricsSupport; + private final ObjectMapper objectMapper; + private final Namer naming; + + AbstractGoogleServerGroupCachingAgent( + GoogleNamedAccountCredentials credentials, + GoogleComputeApiFactory computeApiFactory, + Registry registry, + String region, + ObjectMapper objectMapper) { + this.credentials = credentials; + this.computeApiFactory = computeApiFactory; + this.region = region; + this.onDemandMetricsSupport = new OnDemandMetricsSupport(registry, this, ON_DEMAND_TYPE); + this.objectMapper = objectMapper; + this.naming = + NamerRegistry.lookup() + .withProvider(GoogleCloudProvider.getID()) + .withAccount(credentials.getName()) + .withResource(GoogleLabeledResource.class); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + + try { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(DATA_TYPES); + cacheResultBuilder.setStartTime(System.currentTimeMillis()); + + List serverGroups = getServerGroups(providerCache); + + // If an entry in ON_DEMAND was generated _after_ we started our caching run, add it to the + // cacheResultBuilder, since we may use it in buildCacheResult. + // + // We don't evict things unless they've been processed because Orca, after sending an + // on-demand cache refresh, doesn't consider the request "finished" until it calls + // pendingOnDemandRequests and sees a processedCount of 1. In a saner world, Orca would + // probably just trust that if the key wasn't returned by pendingOnDemandRequests, it must + // have been processed. But we don't live in that world. + Set serverGroupKeys = + serverGroups.stream().map(this::getServerGroupKey).collect(toImmutableSet()); + providerCache + .getAll(ON_DEMAND.getNs(), serverGroupKeys) + .forEach( + cacheData -> { + long cacheTime = (long) cacheData.getAttributes().get("cacheTime"); + if (cacheTime < cacheResultBuilder.getStartTime() + && (int) cacheData.getAttributes().get("processedCount") > 0) { + cacheResultBuilder.getOnDemand().getToEvict().add(cacheData.getId()); + } else { + cacheResultBuilder.getOnDemand().getToKeep().put(cacheData.getId(), cacheData); + } + }); + + CacheResult cacheResult = buildCacheResult(cacheResultBuilder, serverGroups); + + // For all the ON_DEMAND entries that we marked as 'toKeep' earlier, here we mark them as + // processed so that they get evicted in future calls to this method. Why can't we just mark + // them as evicted here, though? Why wait for another run? + cacheResult + .getCacheResults() + .get(ON_DEMAND.getNs()) + .forEach( + cacheData -> { + cacheData.getAttributes().put("processedTime", System.currentTimeMillis()); + int processedCount = (Integer) cacheData.getAttributes().get("processedCount"); + cacheData.getAttributes().put("processedCount", processedCount + 1); + }); + + return cacheResult; + } catch (IOException e) { + // CatsOnDemandCacheUpdater handles this + throw new UncheckedIOException(e); + } + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return OnDemandType.ServerGroup.equals(type) + && GoogleCloudProvider.getID().equals(cloudProvider); + } + + @Nullable + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + + try { + String serverGroupName = (String) data.get("serverGroupName"); + if (serverGroupName == null + || !getAccountName().equals(data.get("account")) + || !region.equals(data.get("region"))) { + return null; + } + + Optional serverGroup = + getMetricsSupport().readData(() -> getServerGroup(serverGroupName, providerCache)); + + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(); + + if (serverGroup.isPresent()) { + String serverGroupKey = getServerGroupKey(serverGroup.get()); + CacheResult result = + getMetricsSupport() + .transformData( + () -> + buildCacheResult(cacheResultBuilder, ImmutableList.of(serverGroup.get()))); + String cacheResults = objectMapper.writeValueAsString(result.getCacheResults()); + CacheData cacheData = + getMetricsSupport() + .onDemandStore( + () -> + new DefaultCacheData( + serverGroupKey, + /* ttlSeconds= */ (int) Duration.ofMinutes(10).getSeconds(), + ImmutableMap.of( + "cacheTime", + System.currentTimeMillis(), + "cacheResults", + cacheResults, + "processedCount", + 0), + /* relationships= */ ImmutableMap.of())); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData); + return new OnDemandResult( + getOnDemandAgentType(), result, /* evictions= */ ImmutableMap.of()); + } else { + Collection existingIdentifiers = + getOnDemandKeysToEvictForMissingServerGroup(providerCache, serverGroupName); + providerCache.evictDeletedItems(ON_DEMAND.getNs(), existingIdentifiers); + return new OnDemandResult( + getOnDemandAgentType(), + new DefaultCacheResult(ImmutableMap.of()), + ImmutableMap.of(SERVER_GROUPS.getNs(), ImmutableList.copyOf(existingIdentifiers))); + } + } catch (IOException e) { + // CatsOnDemandCacheUpdater handles this + throw new UncheckedIOException(e); + } + } + + /** + * Return the keys that will be evicted from the on-demand cache if the given serverGroupName + * can't be found in the region. + */ + abstract Collection getOnDemandKeysToEvictForMissingServerGroup( + ProviderCache providerCache, String serverGroupName); + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + List ownedKeys = + providerCache.getIdentifiers(ON_DEMAND.getNs()).stream() + .filter(this::keyOwnedByThisAgent) + .collect(toImmutableList()); + + return providerCache.getAll(ON_DEMAND.getNs(), ownedKeys).stream() + .map( + cacheData -> { + Map map = new HashMap<>(); + map.put("details", Keys.parse(cacheData.getId())); + map.put("moniker", cacheData.getAttributes().get("moniker")); + map.put("cacheTime", cacheData.getAttributes().get("cacheTime")); + map.put("processedCount", cacheData.getAttributes().get("processedCount")); + map.put("processedTime", cacheData.getAttributes().get("processedTime")); + return map; + }) + .collect(toImmutableList()); + } + + private boolean keyOwnedByThisAgent(String key) { + Map parsedKey = Keys.parse(key); + return parsedKey != null + && parsedKey.get("type").equals(SERVER_GROUPS.getNs()) + && keyOwnedByThisAgent(parsedKey); + } + + /** + * Return whether or not the parsed key data (as returned from {@link + * Keys#parseKey(java.lang.String)} is a key that is "owned" by this caching agent. The key type + * will be checked before this method is called. + */ + abstract boolean keyOwnedByThisAgent(Map parsedKey); + + private CacheResult buildCacheResult( + CacheResultBuilder cacheResultBuilder, List serverGroups) { + + try { + for (GoogleServerGroup serverGroup : serverGroups) { + + Moniker moniker = naming.deriveMoniker(serverGroup); + + String applicationKey = Keys.getApplicationKey(moniker.getApp()); + String clusterKey = + Keys.getClusterKey(getAccountName(), moniker.getApp(), moniker.getCluster()); + String serverGroupKey = getServerGroupKey(serverGroup); + Set instanceKeys = + serverGroup.getInstances().stream() + .map(instance -> Keys.getInstanceKey(getAccountName(), region, instance.getName())) + .collect(toImmutableSet()); + + CacheDataBuilder application = + cacheResultBuilder.namespace(APPLICATIONS.getNs()).keep(applicationKey); + application.getAttributes().put("name", moniker.getApp()); + application.getRelationships().get(CLUSTERS.getNs()).add(clusterKey); + application.getRelationships().get(INSTANCES.getNs()).addAll(instanceKeys); + + CacheDataBuilder cluster = cacheResultBuilder.namespace(CLUSTERS.getNs()).keep(clusterKey); + cluster.getAttributes().put("name", moniker.getCluster()); + cluster.getAttributes().put("accountName", getAccountName()); + cluster.getAttributes().put("moniker", moniker); + cluster.getRelationships().get(APPLICATIONS.getNs()).add(applicationKey); + cluster.getRelationships().get(SERVER_GROUPS.getNs()).add(serverGroupKey); + cluster.getRelationships().get(INSTANCES.getNs()).addAll(instanceKeys); + + Set loadBalancerKeys = getLoadBalancerKeys(serverGroup); + loadBalancerKeys.forEach( + key -> + cacheResultBuilder + .namespace(LOAD_BALANCERS.getNs()) + .keep(key) + .getRelationships() + .get(SERVER_GROUPS.getNs()) + .add(serverGroupKey)); + + if (shouldUseOnDemandData(cacheResultBuilder, serverGroupKey)) { + moveOnDemandDataToNamespace(cacheResultBuilder, serverGroup); + } else { + CacheDataBuilder serverGroupCacheData = + cacheResultBuilder.namespace(SERVER_GROUPS.getNs()).keep(serverGroupKey); + serverGroupCacheData.setAttributes( + objectMapper.convertValue(serverGroup, new TypeReference>() {})); + serverGroupCacheData.getRelationships().get(APPLICATIONS.getNs()).add(applicationKey); + serverGroupCacheData.getRelationships().get(CLUSTERS.getNs()).add(clusterKey); + serverGroupCacheData + .getRelationships() + .get(LOAD_BALANCERS.getNs()) + .addAll(loadBalancerKeys); + serverGroupCacheData.getRelationships().get(INSTANCES.getNs()).addAll(instanceKeys); + } + } + } catch (IOException e) { + // CatsOnDemandCacheUpdater handles this + throw new UncheckedIOException(e); + } + + return cacheResultBuilder.build(); + } + + private ImmutableSet getLoadBalancerKeys(GoogleServerGroup serverGroup) { + ImmutableSet.Builder loadBalancerKeys = ImmutableSet.builder(); + nullableStream((Collection) serverGroup.getAsg().get(REGIONAL_LOAD_BALANCER_NAMES)) + .map(name -> Keys.getLoadBalancerKey(region, getAccountName(), name)) + .forEach(loadBalancerKeys::add); + nullableStream((Collection) serverGroup.getAsg().get(GLOBAL_LOAD_BALANCER_NAMES)) + .map(name -> Keys.getLoadBalancerKey("global", getAccountName(), name)) + .forEach(loadBalancerKeys::add); + return loadBalancerKeys.build(); + } + + private static Stream nullableStream(@Nullable Collection collection) { + return Optional.ofNullable(collection).orElse(ImmutableList.of()).stream(); + } + + private static boolean shouldUseOnDemandData( + CacheResultBuilder cacheResultBuilder, String serverGroupKey) { + CacheData cacheData = cacheResultBuilder.getOnDemand().getToKeep().get(serverGroupKey); + return cacheData != null + && (long) cacheData.getAttributes().get("cacheTime") > cacheResultBuilder.getStartTime(); + } + + private void moveOnDemandDataToNamespace( + CacheResultBuilder cacheResultBuilder, GoogleServerGroup serverGroup) throws IOException { + + String serverGroupKey = getServerGroupKey(serverGroup); + Map> onDemandData = + objectMapper.readValue( + (String) + cacheResultBuilder + .getOnDemand() + .getToKeep() + .get(serverGroupKey) + .getAttributes() + .get("cacheResults"), + new TypeReference>>() {}); + onDemandData.forEach( + (namespace, cacheDatas) -> { + if (namespace.equals(ON_DEMAND.getNs())) { + return; + } + + cacheDatas.forEach( + cacheData -> { + CacheDataBuilder cacheDataBuilder = + cacheResultBuilder.namespace(namespace).keep(cacheData.getId()); + cacheDataBuilder.setAttributes(cacheData.getAttributes()); + cacheDataBuilder.setRelationships( + Utils.mergeOnDemandCacheRelationships( + cacheData.getRelationships(), cacheDataBuilder.getRelationships())); + cacheResultBuilder.getOnDemand().getToKeep().remove(cacheData.getId()); + }); + }); + } + + private String getServerGroupKey(GoogleServerGroup serverGroup) { + return Keys.getServerGroupKey( + serverGroup.getName(), + naming.deriveMoniker(serverGroup).getCluster(), + getAccountName(), + region, + serverGroup.getZone()); + } + + private List getServerGroups(ProviderCache providerCache) throws IOException { + + ImmutableList instances = + retrieveAllInstancesInRegion().stream() + .map(instance -> GoogleInstances.createFromComputeInstance(instance, credentials)) + .collect(toImmutableList()); + return constructServerGroups( + providerCache, + retrieveInstanceGroupManagers(), + instances, + retrieveInstanceTemplates(), + retrieveAutoscalers()); + } + + /** + * Return all the instance group managers in this region that are handled by this caching agent. + */ + abstract Collection retrieveInstanceGroupManagers() throws IOException; + + /** + * Return all the autoscalers in this region that might apply to instance group managers returned + * from {@link #retrieveInstanceGroupManagers()}. + */ + abstract Collection retrieveAutoscalers() throws IOException; + + private Optional getServerGroup(String name, ProviderCache providerCache) { + + InstanceTemplates instanceTemplatesApi = computeApiFactory.createInstanceTemplates(credentials); + + try { + Optional managerOpt = retrieveInstanceGroupManager(name); + + if (!managerOpt.isPresent()) { + return Optional.empty(); + } + + InstanceGroupManager manager = managerOpt.get(); + + List instances = + retrieveRelevantInstances(manager).stream() + .map(instance -> GoogleInstances.createFromComputeInstance(instance, credentials)) + .collect(toImmutableList()); + + List autoscalers = + retrieveAutoscaler(manager).map(ImmutableList::of).orElse(ImmutableList.of()); + + List instanceTemplates = new ArrayList<>(); + if (manager.getInstanceTemplate() != null) { + instanceTemplatesApi + .get(Utils.getLocalName(manager.getInstanceTemplate())) + .executeGet() + .ifPresent(instanceTemplates::add); + } + + return constructServerGroups( + providerCache, ImmutableList.of(manager), instances, instanceTemplates, autoscalers) + .stream() + .findAny(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Retrieve the instance group manager named {@code name} that is managed by this caching agent. + */ + abstract Optional retrieveInstanceGroupManager(String name) + throws IOException; + + /** Retrieve the autoscaler that handles scaling for {@code manager}. */ + abstract Optional retrieveAutoscaler(InstanceGroupManager manager) throws IOException; + + /** + * Retrieve all instances in this region that may be managed by {@code manager}. A later + * step will winnow these down, so this should be a superset of those instances. + */ + abstract Collection retrieveRelevantInstances(InstanceGroupManager manager) + throws IOException; + + @Value + private static class TargetAndScope { + String target; + @Nullable String region; + @Nullable String zone; + + static TargetAndScope forAutoscaler(Autoscaler autoscaler) { + return new TargetAndScope( + Utils.getLocalName(autoscaler.getTarget()), + Utils.getLocalName(autoscaler.getRegion()), + Utils.getLocalName(autoscaler.getZone())); + } + + static TargetAndScope forInstanceGroupManager(InstanceGroupManager manager) { + return new TargetAndScope( + manager.getName(), + Utils.getLocalName(manager.getRegion()), + Utils.getLocalName(manager.getZone())); + } + } + + private List constructServerGroups( + ProviderCache providerCache, + Collection managers, + Collection instances, + Collection instanceTemplates, + Collection autoscalers) { + + Map autoscalerMap = + autoscalers.stream() + .collect(toImmutableMap(TargetAndScope::forAutoscaler, scaler -> scaler)); + Map instanceTemplatesMap = + instanceTemplates.stream().collect(toImmutableMap(InstanceTemplate::getName, i -> i)); + return managers.stream() + .map( + manager -> { + ImmutableSet ownedInstances = ImmutableSet.of(); + if (manager.getBaseInstanceName() != null) { + ownedInstances = + instances.stream() + .filter( + instance -> + instance.getName().startsWith(manager.getBaseInstanceName())) + .filter(instance -> instanceScopedToManager(instance, manager)) + .collect(toImmutableSet()); + } + TargetAndScope key = TargetAndScope.forInstanceGroupManager(manager); + Autoscaler autoscaler = autoscalerMap.get(key); + InstanceTemplate instanceTemplate = + instanceTemplatesMap.get(Utils.getLocalName(manager.getInstanceTemplate())); + return createServerGroup( + manager, ownedInstances, instanceTemplate, autoscaler, providerCache); + }) + .collect(toImmutableList()); + } + + private boolean instanceScopedToManager(GoogleInstance instance, InstanceGroupManager manager) { + if (manager.getZone() == null) { + // For a regional manager, all zones are in scope. (All zones in the region, anyway, which are + // the only instances we retrieved.) + return true; + } else { + return Utils.getLocalName(manager.getZone()).equals(instance.getZone()); + } + } + + private GoogleServerGroup createServerGroup( + InstanceGroupManager manager, + ImmutableSet instances, + @Nullable InstanceTemplate instanceTemplate, + @Nullable Autoscaler autoscaler, + ProviderCache providerCache) { + + GoogleServerGroup serverGroup = new GoogleServerGroup(); + serverGroup.setName(manager.getName()); + setRegionConfig(serverGroup, manager); + serverGroup.setAccount(getAccountName()); + serverGroup.setInstances(instances); + serverGroup.setNamedPorts(convertNamedPorts(manager)); + serverGroup.setSelfLink(manager.getSelfLink()); + serverGroup.setCurrentActions(manager.getCurrentActions()); + + setLaunchConfig(serverGroup, manager, instanceTemplate, providerCache); + setAutoscalerGroup(serverGroup, manager, instanceTemplate); + if (instanceTemplate != null) { + InstanceProperties properties = instanceTemplate.getProperties(); + if (properties != null) { + serverGroup.setCanIpForward(properties.getCanIpForward()); + if (properties.getServiceAccounts() != null) { + serverGroup.setInstanceTemplateServiceAccounts( + ImmutableSet.copyOf(properties.getServiceAccounts())); + } + if (properties.getTags() != null && properties.getTags().getItems() != null) { + serverGroup.setInstanceTemplateTags(ImmutableSet.copyOf(properties.getTags().getItems())); + } + if (properties.getLabels() != null) { + serverGroup.setInstanceTemplateLabels(ImmutableMap.copyOf(properties.getLabels())); + } + if (properties.getNetworkInterfaces() != null + && !properties.getNetworkInterfaces().isEmpty() + && properties.getNetworkInterfaces().get(0) != null) { + serverGroup.setNetworkName( + Utils.decorateXpnResourceIdIfNeeded( + credentials.getProject(), properties.getNetworkInterfaces().get(0).getNetwork())); + } + } + } + serverGroup.setStatefulPolicy(manager.getStatefulPolicy()); + if (manager.getAutoHealingPolicies() != null && !manager.getAutoHealingPolicies().isEmpty()) { + serverGroup.setAutoHealingPolicy(manager.getAutoHealingPolicies().get(0)); + } + populateAutoscaler(serverGroup, autoscaler); + return serverGroup; + } + + private void setRegionConfig(GoogleServerGroup serverGroup, InstanceGroupManager manager) { + + serverGroup.setRegional(manager.getZone() == null); + + if (serverGroup.getRegional()) { + serverGroup.setRegion(Utils.getLocalName(manager.getRegion())); + DistributionPolicy distributionPolicy = manager.getDistributionPolicy(); + ImmutableList zones = getZones(distributionPolicy); + serverGroup.setZones(ImmutableSet.copyOf(zones)); + serverGroup.setDistributionPolicy( + new GoogleDistributionPolicy(zones, getTargetShape(distributionPolicy))); + } else { + String zone = Utils.getLocalName(manager.getZone()); + serverGroup.setZone(zone); + serverGroup.setZones(ImmutableSet.of(zone)); + serverGroup.setRegion(credentials.regionFromZone(zone)); + } + } + + private static ImmutableList getZones(@Nullable DistributionPolicy distributionPolicy) { + if (distributionPolicy == null || distributionPolicy.getZones() == null) { + return ImmutableList.of(); + } + return distributionPolicy.getZones().stream() + .map(z -> Utils.getLocalName(z.getZone())) + .collect(toImmutableList()); + } + + @Nullable + private static String getTargetShape(@Nullable DistributionPolicy distributionPolicy) { + if (distributionPolicy == null) { + return null; + } + return distributionPolicy.getTargetShape(); + } + + @Nullable + private static ImmutableMap convertNamedPorts(InstanceGroupManager manager) { + if (manager.getNamedPorts() == null) { + return null; + } + return manager.getNamedPorts().stream() + .filter(namedPort -> namedPort.getName() != null) + .filter(namedPort -> namedPort.getPort() != null) + .collect(toImmutableMap(NamedPort::getName, NamedPort::getPort)); + } + + private void setLaunchConfig( + GoogleServerGroup serverGroup, + InstanceGroupManager manager, + @Nullable InstanceTemplate instanceTemplate, + ProviderCache providerCache) { + + HashMap launchConfig = new HashMap<>(); + launchConfig.put("createdTime", Utils.getTimeFromTimestamp(manager.getCreationTimestamp())); + + if (instanceTemplate != null) { + launchConfig.put("launchConfigurationName", instanceTemplate.getName()); + launchConfig.put("instanceTemplate", instanceTemplate); + if (instanceTemplate.getProperties() != null) { + List disks = getDisks(instanceTemplate); + instanceTemplate.getProperties().setDisks(disks); + if (instanceTemplate.getProperties().getMachineType() != null) { + launchConfig.put("instanceType", instanceTemplate.getProperties().getMachineType()); + } + if (instanceTemplate.getProperties().getMinCpuPlatform() != null) { + launchConfig.put("minCpuPlatform", instanceTemplate.getProperties().getMinCpuPlatform()); + } + setSourceImage(serverGroup, launchConfig, disks, providerCache); + } + } + serverGroup.setLaunchConfig(copyToImmutableMapWithoutNullValues(launchConfig)); + } + + private static ImmutableList getDisks(InstanceTemplate template) { + + if (template.getProperties() == null || template.getProperties().getDisks() == null) { + return ImmutableList.of(); + } + List persistentDisks = + template.getProperties().getDisks().stream() + .filter(disk -> "PERSISTENT".equals(disk.getType())) + .collect(toImmutableList()); + + if (persistentDisks.isEmpty() || persistentDisks.get(0).getBoot()) { + return ImmutableList.copyOf(template.getProperties().getDisks()); + } + + ImmutableList.Builder sortedDisks = ImmutableList.builder(); + Optional firstBootDisk = + persistentDisks.stream().filter(AttachedDisk::getBoot).findFirst(); + firstBootDisk.ifPresent(sortedDisks::add); + template.getProperties().getDisks().stream() + .filter(disk -> !disk.getBoot()) + .forEach(sortedDisks::add); + return sortedDisks.build(); + } + + private void setSourceImage( + GoogleServerGroup serverGroup, + Map launchConfig, + List disks, + ProviderCache providerCache) { + + if (disks.isEmpty()) { + return; + } + // Disks were sorted so boot disk comes first + AttachedDisk firstDisk = disks.get(0); + if (!firstDisk.getBoot()) { + return; + } + + if (firstDisk.getInitializeParams() != null + && firstDisk.getInitializeParams().getSourceImage() != null) { + String sourceImage = Utils.getLocalName(firstDisk.getInitializeParams().getSourceImage()); + launchConfig.put("imageId", sourceImage); + String imageKey = Keys.getImageKey(getAccountName(), sourceImage); + CacheData image = providerCache.get(IMAGES.getNs(), imageKey); + if (image != null) { + String description = + (String) ((Map) image.getAttributes().get("image")).get("description"); + ImmutableMap buildInfo = createBuildInfo(description); + if (buildInfo != null) { + serverGroup.setBuildInfo(buildInfo); + } + } + } + } + + @Nullable + private static ImmutableMap createBuildInfo(@Nullable String imageDescription) { + if (imageDescription == null) { + return null; + } + Map tags; + try { + tags = IMAGE_DESCRIPTION_SPLITTER.split(imageDescription); + } catch (IllegalArgumentException e) { + return null; + } + if (!tags.containsKey("appversion")) { + return null; + } + AppVersion appversion = AppVersion.parseName(tags.get("appversion")); + if (appversion == null) { + return null; + } + Map buildInfo = new HashMap<>(); + buildInfo.put("package_name", appversion.getPackageName()); + buildInfo.put("version", appversion.getVersion()); + buildInfo.put("commit", appversion.getCommit()); + if (appversion.getBuildJobName() != null) { + Map jenkinsInfo = new HashMap<>(); + jenkinsInfo.put("name", appversion.getBuildJobName()); + jenkinsInfo.put("number", appversion.getBuildNumber()); + if (tags.containsKey("build_host")) { + jenkinsInfo.put("host", tags.get("build_host")); + } + buildInfo.put("jenkins", copyToImmutableMap((jenkinsInfo))); + } + if (tags.containsKey("build_info_url")) { + buildInfo.put("buildInfoUrl", tags.get("build_info_url")); + } + return copyToImmutableMap(buildInfo); + } + + private static ImmutableMap copyToImmutableMap(Map map) { + return map.entrySet().stream() + .filter(e -> e.getValue() != null) + .collect(toImmutableMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private void setAutoscalerGroup( + GoogleServerGroup serverGroup, + InstanceGroupManager manager, + @Nullable InstanceTemplate instanceTemplate) { + + Map autoscalerGroup = new HashMap<>(); + + if (manager.getTargetSize() != null) { + autoscalerGroup.put("minSize", manager.getTargetSize()); + autoscalerGroup.put("maxSize", manager.getTargetSize()); + autoscalerGroup.put("desiredCapacity", manager.getTargetSize()); + } + + if (instanceTemplate != null + && instanceTemplate.getProperties() != null + && instanceTemplate.getProperties().getMetadata() != null + && instanceTemplate.getProperties().getMetadata().getItems() != null) { + + ImmutableMap metadata = + instanceTemplate.getProperties().getMetadata().getItems().stream() + .filter(item -> item.getKey() != null) + .filter(item -> item.getValue() != null) + .collect(toImmutableMap(Items::getKey, Items::getValue)); + + if (metadata.containsKey(GLOBAL_LOAD_BALANCER_NAMES)) { + autoscalerGroup.put( + GLOBAL_LOAD_BALANCER_NAMES, + COMMA.splitToList(metadata.get(GLOBAL_LOAD_BALANCER_NAMES))); + } + + if (metadata.containsKey(REGIONAL_LOAD_BALANCER_NAMES)) { + autoscalerGroup.put( + REGIONAL_LOAD_BALANCER_NAMES, + COMMA.splitToList(metadata.get(REGIONAL_LOAD_BALANCER_NAMES))); + List loadBalancerNames = + Utils.deriveNetworkLoadBalancerNamesFromTargetPoolUrls(manager.getTargetPools()); + + // The isDisabled property of a server group is set based on whether there are associated + // target pools, + // and whether the metadata of the server group contains a list of load balancers to + // actually + // associate + // the server group with. + // We set the disabled state for L4 lBs here (before writing into the cache) and calculate + // the L7 disabled state when we read the server groups from the cache. + serverGroup.setDisabled(loadBalancerNames.isEmpty()); + } + + if (metadata.containsKey(BACKEND_SERVICE_NAMES)) { + autoscalerGroup.put( + BACKEND_SERVICE_NAMES, COMMA.splitToList(metadata.get(BACKEND_SERVICE_NAMES))); + } + + if (metadata.containsKey(REGION_BACKEND_SERVICE_NAMES)) { + autoscalerGroup.put( + REGION_BACKEND_SERVICE_NAMES, + COMMA.splitToList(metadata.get(REGION_BACKEND_SERVICE_NAMES))); + } + + if (metadata.containsKey(LOAD_BALANCING_POLICY)) { + try { + autoscalerGroup.put( + LOAD_BALANCING_POLICY, + objectMapper.readValue( + metadata.get(LOAD_BALANCING_POLICY), GoogleHttpLoadBalancingPolicy.class)); + } catch (IOException e) { + log.warn("Error parsing load balancing policy", e); + } + } + } + + serverGroup.setAsg(copyToImmutableMapWithoutNullValues(autoscalerGroup)); + } + + private static void populateAutoscaler( + GoogleServerGroup serverGroup, @Nullable Autoscaler autoscaler) { + + if (autoscaler == null) { + return; + } + + AutoscalingPolicy autoscalingPolicy = autoscaler.getAutoscalingPolicy(); + if (autoscalingPolicy != null) { + serverGroup.setAutoscalingPolicy(convertAutoscalingPolicy(autoscalingPolicy)); + // is asg possibly null??? + HashMap autoscalingGroup = new HashMap<>(serverGroup.getAsg()); + autoscalingGroup.put("minSize", autoscalingPolicy.getMinNumReplicas()); + autoscalingGroup.put("maxSize", autoscalingPolicy.getMaxNumReplicas()); + serverGroup.setAsg(copyToImmutableMapWithoutNullValues(autoscalingGroup)); + } + if (autoscaler.getStatusDetails() != null) { + serverGroup.setAutoscalingMessages( + autoscaler.getStatusDetails().stream() + .map(AutoscalerStatusDetails::getMessage) + .filter(Objects::nonNull) + .collect(toImmutableList())); + } + } + + private static GoogleAutoscalingPolicy convertAutoscalingPolicy(AutoscalingPolicy input) { + CpuUtilization cpu = convertCpuUtilization(input.getCpuUtilization()); + LoadBalancingUtilization loadBalancing = + convertLoadBalancingUtilization(input.getLoadBalancingUtilization()); + List customMetrics = + convertCustomMetricUtilizations(input.getCustomMetricUtilizations()); + List scalingSchedules = convertScalingSchedules(input.getScalingSchedules()); + GoogleAutoscalingPolicy output = new GoogleAutoscalingPolicy(); + output.setCoolDownPeriodSec(input.getCoolDownPeriodSec()); + output.setCpuUtilization(cpu); + output.setCustomMetricUtilizations(customMetrics); + output.setScalingSchedules(scalingSchedules); + output.setLoadBalancingUtilization(loadBalancing); + output.setMaxNumReplicas(input.getMaxNumReplicas()); + output.setMinNumReplicas(input.getMinNumReplicas()); + output.setMode(convertAutoscalingMode(input.getMode())); + output.setScaleInControl(convertScaleInControl(input.getScaleInControl())); + return output; + } + + @Nullable + private static CpuUtilization convertCpuUtilization( + @Nullable AutoscalingPolicyCpuUtilization input) { + if (input == null) { + return null; + } + CpuUtilization output = new CpuUtilization(); + output.setUtilizationTarget(input.getUtilizationTarget()); + output.setPredictiveMethod(valueOf(PredictiveMethod.class, input.getPredictiveMethod())); + return output; + } + + @Nullable + private static LoadBalancingUtilization convertLoadBalancingUtilization( + @Nullable AutoscalingPolicyLoadBalancingUtilization input) { + if (input == null) { + return null; + } + LoadBalancingUtilization output = new LoadBalancingUtilization(); + output.setUtilizationTarget(input.getUtilizationTarget()); + return output; + } + + @Nullable + private static ImmutableList convertCustomMetricUtilizations( + @Nullable List input) { + if (input == null) { + return null; + } + return input.stream() + .map(AbstractGoogleServerGroupCachingAgent::convertCustomMetricUtilization) + .collect(toImmutableList()); + } + + private static CustomMetricUtilization convertCustomMetricUtilization( + AutoscalingPolicyCustomMetricUtilization input) { + CustomMetricUtilization output = new CustomMetricUtilization(); + output.setMetric(input.getMetric()); + output.setUtilizationTarget(input.getUtilizationTarget()); + output.setUtilizationTargetType( + valueOf(UtilizationTargetType.class, input.getUtilizationTargetType())); + return output; + } + + // ONLY_UP is deprecated, but may still be around in existing autoscaling policies + // (as of Q4 2020), so we'll just transform it into the replacement ASAP. + private static AutoscalingMode convertAutoscalingMode(@Nullable String input) { + if (Objects.equals(input, "ONLY_UP")) { + return AutoscalingMode.ONLY_SCALE_OUT; + } else { + return valueOf(AutoscalingMode.class, input); + } + } + + private static ScaleInControl convertScaleInControl( + @Nullable AutoscalingPolicyScaleInControl input) { + if (input == null) { + return null; + } + FixedOrPercent maxScaledInReplicas = null; + if (input.getMaxScaledInReplicas() != null) { + maxScaledInReplicas = new FixedOrPercent(); + maxScaledInReplicas.setFixed(input.getMaxScaledInReplicas().getFixed()); + maxScaledInReplicas.setPercent(input.getMaxScaledInReplicas().getPercent()); + } + ScaleInControl output = new ScaleInControl(); + output.setTimeWindowSec(input.getTimeWindowSec()); + output.setMaxScaledInReplicas(maxScaledInReplicas); + return output; + } + + private static List convertScalingSchedules( + @Nullable Map map) { + if (map == null) { + return null; + } + List scalingSchedules = new ArrayList<>(); + for (String key : map.keySet()) { + AutoscalingPolicyScalingSchedule input = map.get(key); + ScalingSchedule output = new ScalingSchedule(); + output.setScheduleName(key); + output.setScheduleDescription(input.getDescription()); + output.setEnabled(!input.getDisabled()); + output.setDuration(input.getDurationSec()); + output.setMinimumRequiredInstances(input.getMinRequiredReplicas()); + output.setScheduleCron(input.getSchedule()); + output.setTimezone(input.getTimeZone()); + scalingSchedules.add(output); + } + return scalingSchedules; + } + + private static > T valueOf(Class enumType, @Nullable String value) { + if (value == null) { + return null; + } + try { + return Enum.valueOf(enumType, value); + } catch (IllegalArgumentException e) { + return null; + } + } + + private static ImmutableMap copyToImmutableMapWithoutNullValues(Map map) { + return map.entrySet().stream() + .filter(e -> e.getValue() != null) + .collect(toImmutableMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + Collection retrieveAllInstancesInRegion() throws IOException { + + Instances instancesApi = computeApiFactory.createInstances(credentials); + BatchPaginatedComputeRequest instancesRequest = + computeApiFactory.createPaginatedBatchRequest(credentials); + + getZonesForRegion().forEach(zone -> instancesRequest.queue(instancesApi.list(zone))); + + return instancesRequest.execute(getBatchContext(".instance")); + } + + private Collection retrieveInstanceTemplates() throws IOException { + InstanceTemplates instanceTemplatesApi = computeApiFactory.createInstanceTemplates(credentials); + return instanceTemplatesApi.list().execute(); + } + + Collection getZonesForRegion() { + return Optional.ofNullable(credentials.getZonesFromRegion(region)).orElse(ImmutableList.of()); + } + + String getBatchContext(String subcontext) { + return String.join(".", getBatchContextPrefix(), subcontext); + } + + /** + * Get the first part of the batch context that will be passed to the {@link BatchComputeRequest + * batch requests} used by this caching agent. + */ + abstract String getBatchContextPrefix(); + + @Override + public String getProviderName() { + return GoogleInfrastructureProvider.class.getName(); + } + + @Override + public Collection getProvidedDataTypes() { + return DATA_TYPES; + } + + @Override + public String getAgentType() { + return String.format("%s/%s/%s", getAccountName(), region, getClass().getSimpleName()); + } + + @Override + public String getOnDemandAgentType() { + return getAgentType() + "-OnDemand"; + } + + @Override + public OnDemandMetricsSupport getMetricsSupport() { + return onDemandMetricsSupport; + } + + @Override + public String getAccountName() { + return credentials.getName(); + } + + GoogleNamedAccountCredentials getCredentials() { + return credentials; + } + + GoogleComputeApiFactory getComputeApiFactory() { + return computeApiFactory; + } + + String getRegion() { + return region; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalHttpLoadBalancerCachingAgent.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalHttpLoadBalancerCachingAgent.java new file mode 100644 index 00000000000..da4d75176e8 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalHttpLoadBalancerCachingAgent.java @@ -0,0 +1,717 @@ +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import static com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleTargetProxyType.HTTP; +import static com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleTargetProxyType.HTTPS; +import static java.util.stream.Collectors.toList; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.*; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.*; +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.GroupHealthRequest; +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.LoadBalancerHealthResolution; +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GoogleInternalHttpLoadBalancerCachingAgent + extends AbstractGoogleLoadBalancerCachingAgent { + private static final Logger log = LoggerFactory.getLogger(GoogleInternalHttpLoadBalancer.class); + + public GoogleInternalHttpLoadBalancerCachingAgent( + String clouddriverUserAgentApplicationName, + GoogleNamedAccountCredentials credentials, + ObjectMapper objectMapper, + Registry registry, + String region) { + super(clouddriverUserAgentApplicationName, credentials, objectMapper, registry, region); + } + + @Override + public List constructLoadBalancers(String onDemandLoadBalancerName) { + List loadBalancers = new ArrayList<>(); + List failedLoadBalancers = new ArrayList<>(); + + GoogleBatchRequest forwardingRulesRequest = buildGoogleBatchRequest(); + GoogleBatchRequest targetProxyRequest = buildGoogleBatchRequest(); + GoogleBatchRequest urlMapRequest = buildGoogleBatchRequest(); + GoogleBatchRequest groupHealthRequest = buildGoogleBatchRequest(); + + // Reset the local getHealth caches/queues each caching agent cycle. + bsNameToGroupHealthsMap = new HashMap<>(); + queuedBsGroupHealthRequests = new HashSet<>(); + resolutions = new HashSet<>(); + + List projectBackendServices = + GCEUtil.fetchRegionBackendServices(this, getCompute(), getProject(), getRegion()); + List projectHealthChecks = + GCEUtil.fetchRegionalHealthChecks(this, getCompute(), getProject(), getRegion()); + + ForwardingRuleCallbacks forwardingRuleCallbacks = + new ForwardingRuleCallbacks( + loadBalancers, + failedLoadBalancers, + targetProxyRequest, + urlMapRequest, + groupHealthRequest, + projectBackendServices, + projectHealthChecks); + + try { + if (onDemandLoadBalancerName != null) { + ForwardingRuleCallbacks.ForwardingRuleSingletonCallback frCallback = + forwardingRuleCallbacks.newForwardingRuleSingletonCallback(); + forwardingRulesRequest.queue( + getCompute().forwardingRules().get(getProject(), getRegion(), onDemandLoadBalancerName), + frCallback); + } else { + ForwardingRuleCallbacks.ForwardingRuleListCallback frlCallback = + forwardingRuleCallbacks.newForwardingRuleListCallback(); + new PaginatedRequest(this) { + @Override + public ComputeRequest request(String pageToken) { + try { + return getCompute() + .forwardingRules() + .list(getProject(), getRegion()) + .setPageToken(pageToken); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public String getNextPageToken(ForwardingRuleList forwardingRuleList) { + return forwardingRuleList.getNextPageToken(); + } + }.queue( + forwardingRulesRequest, frlCallback, "InternalHttpLoadBalancerCaching.forwardingRules"); + } + + executeIfRequestsAreQueued( + forwardingRulesRequest, "InternalHttpLoadBalancerCaching.forwardingRules"); + executeIfRequestsAreQueued(targetProxyRequest, "InternalHttpLoadBalancerCaching.targetProxy"); + executeIfRequestsAreQueued(urlMapRequest, "InternalHttpLoadBalancerCaching.urlMapRequest"); + executeIfRequestsAreQueued(groupHealthRequest, "InternalHttpLoadBalancerCaching.groupHealth"); + + for (LoadBalancerHealthResolution resolution : resolutions) { + for (Object groupHealth : bsNameToGroupHealthsMap.get(resolution.getTarget())) { + GCEUtil.handleHealthObject(resolution.getGoogleLoadBalancer(), groupHealth); + } + } + return loadBalancers.stream() + .filter(lb -> !failedLoadBalancers.contains(lb.getName())) + .collect(toList()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public List constructLoadBalancers() { + return constructLoadBalancers(null); + } + + @Override + public String determineInstanceKey( + GoogleLoadBalancer loadBalancer, GoogleLoadBalancerHealth health) { + // Http load balancers' region is "global", so we have to determine the instance region from its + // zone. + String instanceZone = health.getInstanceZone(); + String instanceRegion = getCredentials().regionFromZone(instanceZone); + + return Keys.getInstanceKey(getAccountName(), instanceRegion, health.getInstanceName()); + } + + /** + * Local cache of BackendServiceGroupHealth keyed by BackendService name. + * + *

It turns out that the types in the GCE Batch callbacks aren't the actual Compute types for + * some reason, which is why this map is String -> Object. + */ + private Map> bsNameToGroupHealthsMap = new HashMap<>(); + + private Set queuedBsGroupHealthRequests = new HashSet(); + private Set resolutions = + new HashSet(); + + public class ForwardingRuleCallbacks { + public ForwardingRuleSingletonCallback newForwardingRuleSingletonCallback() { + return new ForwardingRuleSingletonCallback(); + } + + public ForwardingRuleListCallback newForwardingRuleListCallback() { + return new ForwardingRuleListCallback(); + } + + public void cacheRemainderOfLoadBalancerResourceGraph(final ForwardingRule forwardingRule) { + GoogleInternalHttpLoadBalancer newLoadBalancer = new GoogleInternalHttpLoadBalancer(); + + newLoadBalancer.setName(forwardingRule.getName()); + newLoadBalancer.setAccount(getAccountName()); + newLoadBalancer.setRegion(Utils.getLocalName(forwardingRule.getRegion())); + newLoadBalancer.setCreatedTime( + Utils.getTimeFromTimestamp(forwardingRule.getCreationTimestamp())); + newLoadBalancer.setIpAddress(forwardingRule.getIPAddress()); + newLoadBalancer.setIpProtocol(forwardingRule.getIPProtocol()); + newLoadBalancer.setPortRange(forwardingRule.getPortRange()); + newLoadBalancer.setNetwork(forwardingRule.getNetwork()); + newLoadBalancer.setSubnet(forwardingRule.getSubnetwork()); + newLoadBalancer.setHealths(new ArrayList<>()); + newLoadBalancer.setHostRules(new ArrayList<>()); + loadBalancers.add(newLoadBalancer); + + String targetProxyName = Utils.getLocalName(forwardingRule.getTarget()); + TargetProxyCallback targetProxyCallback = + new TargetProxyCallback( + newLoadBalancer, + urlMapRequest, + groupHealthRequest, + projectBackendServices, + projectHealthChecks, + newLoadBalancer.getName(), + failedLoadBalancers); + + TargetHttpsProxyCallback targetHttpsProxyCallback = + new TargetHttpsProxyCallback( + newLoadBalancer, + urlMapRequest, + groupHealthRequest, + projectBackendServices, + projectHealthChecks, + newLoadBalancer.getName(), + failedLoadBalancers); + + try { + switch (Utils.getTargetProxyType(forwardingRule.getTarget())) { + case HTTP: + targetProxyRequest.queue( + getCompute() + .regionTargetHttpProxies() + .get(getProject(), getRegion(), targetProxyName), + targetProxyCallback); + break; + case HTTPS: + targetProxyRequest.queue( + getCompute() + .regionTargetHttpsProxies() + .get(getProject(), getRegion(), targetProxyName), + targetHttpsProxyCallback); + break; + default: + log.debug( + "Non-Http target type found for global forwarding rule " + + forwardingRule.getName()); + break; + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public List getLoadBalancers() { + return loadBalancers; + } + + public void setLoadBalancers(List loadBalancers) { + this.loadBalancers = loadBalancers; + } + + private List loadBalancers; + private List failedLoadBalancers; + private GoogleBatchRequest targetProxyRequest; + private GoogleBatchRequest urlMapRequest; + private GoogleBatchRequest groupHealthRequest; + private List projectBackendServices; + private List projectHealthChecks; + + public ForwardingRuleCallbacks( + List loadBalancers, + List failedLoadBalancers, + GoogleBatchRequest targetProxyRequest, + GoogleBatchRequest urlMapRequest, + GoogleBatchRequest groupHealthRequest, + List projectBackendServices, + List projectHealthChecks) { + this.loadBalancers = loadBalancers; + this.failedLoadBalancers = failedLoadBalancers; + this.targetProxyRequest = targetProxyRequest; + this.urlMapRequest = urlMapRequest; + this.groupHealthRequest = groupHealthRequest; + this.projectBackendServices = projectBackendServices; + this.projectHealthChecks = projectHealthChecks; + } + + public class ForwardingRuleSingletonCallback extends JsonBatchCallback { + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + // 404 is thrown if the forwarding rule does not exist in the given region. Any other + // exception needs to be propagated. + if (e.getCode() != 404) { + String errorJson = + new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(e); + log.error(errorJson); + } + } + + @Override + public void onSuccess(ForwardingRule forwardingRule, HttpHeaders responseHeaders) + throws IOException { + GoogleTargetProxyType type = + forwardingRule.getTarget() != null + ? Utils.getTargetProxyType(forwardingRule.getTarget()) + : null; + if (type == HTTP || type == HTTPS) { + cacheRemainderOfLoadBalancerResourceGraph(forwardingRule); + } else { + throw new IllegalArgumentException( + "Not responsible for on demand caching of load balancers without target " + + "proxy or with SSL proxy type."); + } + } + } + + public class ForwardingRuleListCallback extends JsonBatchCallback + implements FailureLogger { + @Override + public void onSuccess(ForwardingRuleList forwardingRuleList, HttpHeaders responseHeaders) { + if (forwardingRuleList.getItems() == null) return; + forwardingRuleList.getItems().stream() + .filter( + f -> + f.getLoadBalancingScheme() != null + && f.getLoadBalancingScheme().equals("INTERNAL_MANAGED")) + .forEach( + forwardingRule -> { + GoogleTargetProxyType type = + forwardingRule.getTarget() != null + ? Utils.getTargetProxyType(forwardingRule.getTarget()) + : null; + if (type == HTTP || type == HTTPS) { + cacheRemainderOfLoadBalancerResourceGraph(forwardingRule); + } else { + throw new IllegalArgumentException( + "Not responsible for on demand caching of load balancers without target " + + "proxy or with SSL proxy type."); + } + }); + } + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) { + log.error(e.getMessage()); + } + } + } + + abstract static class BaseCallback extends JsonBatchCallback { + List failedSubjects; + String subject; + + public BaseCallback(List failedSubjects, String subject) { + this.failedSubjects = failedSubjects; + this.subject = subject; + } + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn( + "Failed to read a component of subject " + + subject + + ". The platform error message was:\n" + + e.getMessage() + + ". \nReporting it as 'Failed' to the caching agent. "); + failedSubjects.add(subject); + } + } + + public class TargetHttpsProxyCallback extends BaseCallback { + @Override + public void onSuccess(TargetHttpsProxy targetHttpsProxy, HttpHeaders responseHeaders) + throws IOException { + // SslCertificates is a required field for TargetHttpsProxy, and contains exactly one cert. + googleLoadBalancer.setCertificate( + Utils.getLocalName((targetHttpsProxy.getSslCertificates().get(0)))); + + String urlMapURL = targetHttpsProxy.getUrlMap(); + if (urlMapURL != null) { + UrlMapCallback urlMapCallback = + new UrlMapCallback( + googleLoadBalancer, + projectBackendServices, + projectHealthChecks, + groupHealthRequest, + subject, + failedSubjects); + urlMapRequest.queue( + getCompute() + .regionUrlMaps() + .get(getProject(), getRegion(), Utils.getLocalName(urlMapURL)), + urlMapCallback); + } + } + + private GoogleInternalHttpLoadBalancer googleLoadBalancer; + private GoogleBatchRequest urlMapRequest; + private GoogleBatchRequest groupHealthRequest; + private List projectBackendServices; + private List projectHealthChecks; + + public TargetHttpsProxyCallback( + GoogleInternalHttpLoadBalancer googleLoadBalancer, + GoogleBatchRequest urlMapRequest, + GoogleBatchRequest groupHealthRequest, + List projectBackendServices, + List projectHealthChecks, + String subject, + List failedSubjects) { + super(failedSubjects, subject); + this.googleLoadBalancer = googleLoadBalancer; + this.urlMapRequest = urlMapRequest; + this.groupHealthRequest = groupHealthRequest; + this.projectBackendServices = projectBackendServices; + this.projectHealthChecks = projectHealthChecks; + } + } + + public class TargetProxyCallback extends BaseCallback { + @Override + public void onSuccess(TargetHttpProxy targetHttpProxy, HttpHeaders responseHeaders) + throws IOException { + String urlMapURL = targetHttpProxy.getUrlMap(); + if (urlMapURL != null) { + UrlMapCallback urlMapCallback = + new UrlMapCallback( + googleLoadBalancer, + projectBackendServices, + projectHealthChecks, + groupHealthRequest, + subject, + failedSubjects); + urlMapRequest.queue( + getCompute() + .regionUrlMaps() + .get(getProject(), getRegion(), Utils.getLocalName(urlMapURL)), + urlMapCallback); + } + } + + private GoogleInternalHttpLoadBalancer googleLoadBalancer; + private GoogleBatchRequest urlMapRequest; + private GoogleBatchRequest groupHealthRequest; + private List projectBackendServices; + private List projectHealthChecks; + + public TargetProxyCallback( + GoogleInternalHttpLoadBalancer googleLoadBalancer, + GoogleBatchRequest urlMapRequest, + GoogleBatchRequest groupHealthRequest, + List projectBackendServices, + List projectHealthChecks, + String subject, + List failedSubjects) { + super(failedSubjects, subject); + this.googleLoadBalancer = googleLoadBalancer; + this.urlMapRequest = urlMapRequest; + this.groupHealthRequest = groupHealthRequest; + this.projectBackendServices = projectBackendServices; + this.projectHealthChecks = projectHealthChecks; + } + } + + public class UrlMapCallback extends BaseCallback { + @Override + public void onSuccess(UrlMap urlMap, HttpHeaders responseHeaders) { + // Check that we aren't stomping on our URL map. If we are, log an error. + if (googleLoadBalancer.getDefaultService() != null + || (googleLoadBalancer.getHostRules() != null + && googleLoadBalancer.getHostRules().size() > 0)) { + log.error( + "Overwriting UrlMap " + + urlMap.getName() + + ". You may have a TargetHttp(s)Proxy naming collision."); + } + + googleLoadBalancer.setUrlMapName(urlMap.getName()); + // Queue up the backend services to process. + Set queuedServices = new HashSet<>(); + + // Default service is mandatory. + String urlMapDefaultService = Utils.getLocalName(urlMap.getDefaultService()); + queuedServices.add(urlMapDefaultService); + + GoogleBackendService service1 = new GoogleBackendService(); + service1.setName(urlMapDefaultService); + googleLoadBalancer.setDefaultService(service1); + if (urlMap.getPathMatchers() != null) { + for (PathMatcher pathMatcher : urlMap.getPathMatchers()) { + String pathMatchDefaultService = Utils.getLocalName(pathMatcher.getDefaultService()); + List pathRules = + pathMatcher.getPathRules() != null ? pathMatcher.getPathRules() : new ArrayList<>(); + for (HostRule hostRule : urlMap.getHostRules()) { + if (hostRule.getPathMatcher() != null + && hostRule.getPathMatcher().equals(pathMatcher.getName())) { + GoogleBackendService googleBackendService = new GoogleBackendService(); + googleBackendService.setName(pathMatchDefaultService); + + GooglePathMatcher gPathMatcher = new GooglePathMatcher(); + gPathMatcher.setPathRules(new ArrayList<>()); + gPathMatcher.setDefaultService(googleBackendService); + + GoogleHostRule gHostRule = new GoogleHostRule(); + gHostRule.setHostPatterns(hostRule.getHosts()); + gHostRule.setPathMatcher(gPathMatcher); + List collect = + pathRules.stream() + .map( + pathRule -> { + GoogleBackendService service = new GoogleBackendService(); + service.setName(Utils.getLocalName(pathRule.getService())); + + GooglePathRule googlePathRule = new GooglePathRule(); + googlePathRule.setPaths(pathRule.getPaths()); + googlePathRule.setBackendService(service); + return googlePathRule; + }) + .collect(toList()); + gPathMatcher.setPathRules(collect); + googleLoadBalancer.getHostRules().add(gHostRule); + } + } + + queuedServices.add(pathMatchDefaultService); + for (PathRule pathRule : pathRules) { + if (pathRule.getService() != null) { + queuedServices.add(Utils.getLocalName(pathRule.getService())); + } + } + } + } + + // Process queued backend services. + for (String queuedService : queuedServices) { + BackendService service = + projectBackendServices.stream() + .filter(bs -> Utils.getLocalName(bs.getName()).equals(queuedService)) + .findFirst() + .get(); + handleBackendService(service, googleLoadBalancer, projectHealthChecks, groupHealthRequest); + } + } + + private GoogleInternalHttpLoadBalancer googleLoadBalancer; + private List projectBackendServices; + private List projectHealthChecks; + private GoogleBatchRequest groupHealthRequest; + + public UrlMapCallback( + GoogleInternalHttpLoadBalancer googleLoadBalancer, + List projectBackendServices, + List projectHealthChecks, + GoogleBatchRequest groupHealthRequest, + String subject, + List failedSubjects) { + super(failedSubjects, subject); + this.googleLoadBalancer = googleLoadBalancer; + this.projectBackendServices = projectBackendServices; + this.projectHealthChecks = projectHealthChecks; + this.groupHealthRequest = groupHealthRequest; + } + } + + public class GroupHealthCallback extends JsonBatchCallback { + /** + * Tolerate of the group health calls failing. Spinnaker reports empty load balancer healths as + * 'unknown'. If healthStatus is null in the onSuccess() function, the same state is reported, + * so this shouldn't cause issues. + */ + public void onFailure(final GoogleJsonError e, HttpHeaders responseHeaders) { + log.debug( + "Failed backend service group health call for backend service " + + getBackendServiceName() + + " for Http load balancer. The platform error message was:\n " + + e.getMessage() + + "."); + } + + @Override + public void onSuccess( + BackendServiceGroupHealth backendServiceGroupHealth, HttpHeaders responseHeaders) { + if (!bsNameToGroupHealthsMap.containsKey(backendServiceName)) { + bsNameToGroupHealthsMap.put( + backendServiceName, new ArrayList<>(Arrays.asList(backendServiceGroupHealth))); + } else { + bsNameToGroupHealthsMap.get(backendServiceName).add(backendServiceGroupHealth); + } + } + + public String getBackendServiceName() { + return backendServiceName; + } + + public void setBackendServiceName(String backendServiceName) { + this.backendServiceName = backendServiceName; + } + + private String backendServiceName; + + public GroupHealthCallback(String backendServiceName) { + this.backendServiceName = backendServiceName; + } + } + + private void handleBackendService( + BackendService backendService, + GoogleInternalHttpLoadBalancer googleHttpLoadBalancer, + List healthChecks, + GoogleBatchRequest groupHealthRequest) { + if (backendService == null) { + return; + } + + final GroupHealthCallback groupHealthCallback = + new GroupHealthCallback(backendService.getName()); + + // We have to update the backend service objects we created from the UrlMapCallback. + // The UrlMapCallback knows which backend service is the defaultService, etc and the + // BackendServiceCallback has the actual serving capacity and server group data. + List backendServicesInMap = + Utils.getBackendServicesFromInternalHttpLoadBalancerView(googleHttpLoadBalancer.getView()); + List backendServicesToUpdate = + backendServicesInMap.stream() + .filter(b -> b.getName().equals(backendService.getName())) + .collect(toList()); + for (GoogleBackendService service : backendServicesToUpdate) { + service.setRegion(googleHttpLoadBalancer.getRegion()); + service.setSessionAffinity( + GoogleSessionAffinity.valueOf(backendService.getSessionAffinity())); + service.setAffinityCookieTtlSec(backendService.getAffinityCookieTtlSec()); + service.setEnableCDN(backendService.getEnableCDN()); + String name = backendService.getPortName(); + service.setPortName( + name != null ? name : GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME); + ConnectionDraining draining = backendService.getConnectionDraining(); + service.setConnectionDrainingTimeoutSec( + draining == null ? 0 : draining.getDrainingTimeoutSec()); + // Note: It's possible for a backend service to have backends that point to a null group. + if (backendService.getBackends() != null) { + List backends = + backendService.getBackends().stream() + .filter(backend -> backend.getGroup() != null) + .map( + backend -> { + GoogleLoadBalancedBackend googleBackend = new GoogleLoadBalancedBackend(); + googleBackend.setPolicy(GCEUtil.loadBalancingPolicyFromBackend(backend)); + googleBackend.setServerGroupUrl(backend.getGroup()); + return googleBackend; + }) + .collect(toList()); + service.setBackends(backends); + } + } + + // Note: It's possible for a backend service to have backends that point to a null group. + if (backendService.getBackends() != null) { + backendService.getBackends().stream() + .filter(backend -> backend.getGroup() != null) + .forEach( + backend -> { + ResourceGroupReference resourceGroup = new ResourceGroupReference(); + resourceGroup.setGroup(backend.getGroup()); + + // Make only the group health request calls we need to. + GroupHealthRequest ghr = + new GroupHealthRequest( + getProject(), backendService.getName(), resourceGroup.getGroup()); + if (!queuedBsGroupHealthRequests.contains(ghr)) { + // The groupHealthCallback updates the local cache. + log.debug("Queueing a batch call for getHealth(): {}", ghr); + queuedBsGroupHealthRequests.add(ghr); + try { + groupHealthRequest.queue( + getCompute() + .regionBackendServices() + .getHealth( + getProject(), getRegion(), backendService.getName(), resourceGroup), + groupHealthCallback); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + log.debug("Passing, batch call result cached for getHealth(): {}", ghr); + } + resolutions.add( + new LoadBalancerHealthResolution( + googleHttpLoadBalancer, backendService.getName())); + }); + } + for (String healthCheckURL : backendService.getHealthChecks()) { + String healthCheckName = Utils.getLocalName(healthCheckURL); + HealthCheck healthCheck = + healthChecks.stream() + .filter(hc -> Utils.getLocalName(hc.getName()).equals(healthCheckName)) + .findFirst() + .get(); + handleHealthCheck(healthCheck, backendServicesToUpdate); + } + } + + private static void handleHealthCheck( + final HealthCheck healthCheck, List googleBackendServices) { + if (healthCheck == null) return; + + Integer port = null; + GoogleHealthCheck.HealthCheckType hcType = null; + String requestPath = null; + if (healthCheck.getTcpHealthCheck() != null) { + port = healthCheck.getTcpHealthCheck().getPort(); + hcType = GoogleHealthCheck.HealthCheckType.TCP; + } else if (healthCheck.getSslHealthCheck() != null) { + port = healthCheck.getSslHealthCheck().getPort(); + hcType = GoogleHealthCheck.HealthCheckType.SSL; + } else if (healthCheck.getHttpHealthCheck() != null) { + port = healthCheck.getHttpHealthCheck().getPort(); + requestPath = healthCheck.getHttpHealthCheck().getRequestPath(); + hcType = GoogleHealthCheck.HealthCheckType.HTTP; + } else if (healthCheck.getHttpsHealthCheck() != null) { + port = healthCheck.getHttpsHealthCheck().getPort(); + requestPath = healthCheck.getHttpsHealthCheck().getRequestPath(); + hcType = GoogleHealthCheck.HealthCheckType.HTTPS; + } + // else if (healthCheck.getUdpHealthCheck() != null) { + // port = healthCheck.getUdpHealthCheck().getPort(); + // hcType = GoogleHealthCheck.HealthCheckType.UDP; + // } + + if (port != null && hcType != null) { + for (GoogleBackendService googleBackendService : googleBackendServices) { + GoogleHealthCheck googleHealthCheck = new GoogleHealthCheck(); + googleHealthCheck.setName(healthCheck.getName()); + googleHealthCheck.setRequestPath(requestPath); + googleHealthCheck.setSelfLink(healthCheck.getSelfLink()); + googleHealthCheck.setPort(port); + googleHealthCheck.setHealthCheckType(hcType); + googleHealthCheck.setCheckIntervalSec(healthCheck.getCheckIntervalSec()); + googleHealthCheck.setTimeoutSec(healthCheck.getTimeoutSec()); + googleHealthCheck.setUnhealthyThreshold(healthCheck.getUnhealthyThreshold()); + googleHealthCheck.setHealthyThreshold(healthCheck.getHealthyThreshold()); + googleHealthCheck.setRegion(healthCheck.getRegion()); + googleBackendService.setHealthCheck(googleHealthCheck); + } + } + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgent.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgent.java new file mode 100644 index 00000000000..7b019a9821d --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgent.java @@ -0,0 +1,108 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.compute.model.Autoscaler; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.common.collect.ImmutableSet; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.compute.RegionAutoscalers; +import com.netflix.spinnaker.clouddriver.google.compute.RegionInstanceGroupManagers; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Optional; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@ParametersAreNonnullByDefault +public final class GoogleRegionalServerGroupCachingAgent + extends AbstractGoogleServerGroupCachingAgent { + + public GoogleRegionalServerGroupCachingAgent( + GoogleNamedAccountCredentials credentials, + GoogleComputeApiFactory computeApiFactory, + Registry registry, + String region, + ObjectMapper objectMapper) { + super(credentials, computeApiFactory, registry, region, objectMapper); + } + + @Override + Collection getOnDemandKeysToEvictForMissingServerGroup( + ProviderCache providerCache, String serverGroupName) { + String clusterName = null; // getServerGroupKey will calculate this from serverGroupName + return ImmutableSet.of( + Keys.getServerGroupKey(serverGroupName, clusterName, getAccountName(), getRegion())); + } + + @Override + boolean keyOwnedByThisAgent(Map parsedKey) { + return getAccountName().equals(parsedKey.get("account")) + && getRegion().equals(parsedKey.get("region")) + && parsedKey.get("zone") == null; + } + + @Override + Collection retrieveInstanceGroupManagers() throws IOException { + + RegionInstanceGroupManagers managersApi = + getComputeApiFactory().createRegionInstanceGroupManagers(getCredentials()); + return managersApi.list(getRegion()).execute(); + } + + @Override + Collection retrieveAutoscalers() throws IOException { + + RegionAutoscalers autoscalersApi = + getComputeApiFactory().createRegionAutoscalers(getCredentials()); + return autoscalersApi.list(getRegion()).execute(); + } + + @Override + Optional retrieveInstanceGroupManager(String name) throws IOException { + RegionInstanceGroupManagers managersApi = + getComputeApiFactory().createRegionInstanceGroupManagers(getCredentials()); + return managersApi.get(getRegion(), name).executeGet(); + } + + @Override + Optional retrieveAutoscaler(InstanceGroupManager manager) throws IOException { + + RegionAutoscalers autoscalersApi = + getComputeApiFactory().createRegionAutoscalers(getCredentials()); + return autoscalersApi.get(getRegion(), manager.getName()).executeGet(); + } + + @Override + Collection retrieveRelevantInstances(InstanceGroupManager instanceGroupManager) + throws IOException { + return retrieveAllInstancesInRegion(); + } + + @Override + String getBatchContextPrefix() { + return "RegionalServerGroupCaching"; + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgent.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgent.java new file mode 100644 index 00000000000..2412f6ab479 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgent.java @@ -0,0 +1,148 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.base.Strings.isNullOrEmpty; +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.SERVER_GROUPS; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.Compute.InstanceGroupManagers; +import com.google.api.services.compute.Compute.InstanceGroupManagers.Get; +import com.google.api.services.compute.model.Autoscaler; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.compute.BatchPaginatedComputeRequest; +import com.netflix.spinnaker.clouddriver.google.compute.GetFirstBatchComputeRequest; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.compute.Instances; +import com.netflix.spinnaker.clouddriver.google.compute.ZoneAutoscalers; +import com.netflix.spinnaker.clouddriver.google.compute.ZoneInstanceGroupManagers; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Optional; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@ParametersAreNonnullByDefault +public final class GoogleZonalServerGroupCachingAgent + extends AbstractGoogleServerGroupCachingAgent { + + public GoogleZonalServerGroupCachingAgent( + GoogleNamedAccountCredentials credentials, + GoogleComputeApiFactory computeApiFactory, + Registry registry, + String region, + ObjectMapper objectMapper) { + super(credentials, computeApiFactory, registry, region, objectMapper); + } + + @Override + Collection getOnDemandKeysToEvictForMissingServerGroup( + ProviderCache providerCache, String serverGroupName) { + // If we didn't find this server group, look for any existing ON_DEMAND entries for it (in + // any zone) and evict them. + // TODO(plumpy): I think this is a bug and SERVER_GROUPS should be ON_DEMAND. + String serverGroupKey = + Keys.getServerGroupKey( + serverGroupName, /* cluster= */ null, getAccountName(), getRegion(), /* zone= */ "*"); + return providerCache.filterIdentifiers(SERVER_GROUPS.getNs(), serverGroupKey); + } + + @Override + boolean keyOwnedByThisAgent(Map parsedKey) { + return getAccountName().equals(parsedKey.get("account")) + && getRegion().equals(parsedKey.get("region")) + && parsedKey.get("zone") != null; + } + + @Override + Collection retrieveInstanceGroupManagers() throws IOException { + + ZoneInstanceGroupManagers managersApi = + getComputeApiFactory().createZoneInstanceGroupManagers(getCredentials()); + BatchPaginatedComputeRequest request = + getComputeApiFactory().createPaginatedBatchRequest(getCredentials()); + + getZonesForRegion().forEach(zone -> request.queue(managersApi.list(zone))); + + return request.execute(getBatchContext("igm")); + } + + @Override + Collection retrieveAutoscalers() throws IOException { + + ZoneAutoscalers autoscalersApi = getComputeApiFactory().createZoneAutoscalers(getCredentials()); + BatchPaginatedComputeRequest request = + getComputeApiFactory().createPaginatedBatchRequest(getCredentials()); + + getZonesForRegion().forEach(zone -> request.queue(autoscalersApi.list(zone))); + + return request.execute(getBatchContext("autoscaler")); + } + + @Override + Optional retrieveInstanceGroupManager(String name) throws IOException { + + ZoneInstanceGroupManagers managersApi = + getComputeApiFactory().createZoneInstanceGroupManagers(getCredentials()); + GetFirstBatchComputeRequest request = + GetFirstBatchComputeRequest.create( + getComputeApiFactory().createBatchRequest(getCredentials())); + for (String zone : getZonesForRegion()) { + request.queue(managersApi.get(zone, name)); + } + return request.execute(getBatchContext("igm")); + } + + @Override + Optional retrieveAutoscaler(InstanceGroupManager manager) throws IOException { + + ZoneAutoscalers autoscalersApi = getComputeApiFactory().createZoneAutoscalers(getCredentials()); + return autoscalersApi.get(getZone(manager), manager.getName()).executeGet(); + } + + @Override + Collection retrieveRelevantInstances(InstanceGroupManager manager) throws IOException { + + Instances instancesApi = getComputeApiFactory().createInstances(getCredentials()); + return instancesApi.list(getZone(manager)).execute().stream().collect(toImmutableList()); + } + + private String getZone(InstanceGroupManager manager) { + + checkState( + !isNullOrEmpty(manager.getZone()), + "Managed instance group %s did not have a zone.", + manager.getName()); + return Utils.getLocalName(manager.getZone()); + } + + @Override + String getBatchContextPrefix() { + return "ZonalServerGroupCaching"; + } +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/GroupHealthRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/GroupHealthRequest.java similarity index 91% rename from clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/GroupHealthRequest.java rename to clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/GroupHealthRequest.java index 8c2aa08a7f0..de78d995387 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/GroupHealthRequest.java +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/GroupHealthRequest.java @@ -21,9 +21,7 @@ import lombok.EqualsAndHashCode; import lombok.ToString; -/** - * Helper class for locally resolving queued backend service group health requests. - */ +/** Helper class for locally resolving queued backend service group health requests. */ @Data @EqualsAndHashCode @AllArgsConstructor diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/LoadBalancerHealthResolution.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/LoadBalancerHealthResolution.java similarity index 91% rename from clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/LoadBalancerHealthResolution.java rename to clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/LoadBalancerHealthResolution.java index efadb34cadd..1b5b640a962 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/LoadBalancerHealthResolution.java +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/LoadBalancerHealthResolution.java @@ -20,9 +20,7 @@ import lombok.AllArgsConstructor; import lombok.Data; -/** - * Helper class to resolve locally cached backend service get health call results. - */ +/** Helper class to resolve locally cached backend service get health call results. */ @Data @AllArgsConstructor public class LoadBalancerHealthResolution { diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/PaginatedCallback.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/PaginatedCallback.java new file mode 100644 index 00000000000..2897e499932 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/PaginatedCallback.java @@ -0,0 +1,43 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent.util; + +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import java.io.IOException; + +public abstract class PaginatedCallback extends JsonBatchCallback { + private final JsonBatchCallback innerCallback; + + PaginatedCallback(JsonBatchCallback innerCallback) { + this.innerCallback = innerCallback; + } + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + innerCallback.onFailure(e, responseHeaders); + } + + @Override + public void onSuccess(T t, HttpHeaders responseHeaders) throws IOException { + innerCallback.onSuccess(t, responseHeaders); + requestNextBatch(t); + } + + protected abstract void requestNextBatch(T t) throws IOException; +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/PaginatedRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/PaginatedRequest.java new file mode 100644 index 00000000000..09ea9351d17 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/PaginatedRequest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent.util; + +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.services.compute.ComputeRequest; +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest; +import com.netflix.spinnaker.clouddriver.google.provider.agent.AbstractGoogleCachingAgent; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; + +public abstract class PaginatedRequest { + private final AbstractGoogleCachingAgent cachingAgent; + + public PaginatedRequest(AbstractGoogleCachingAgent cachingAgent) { + this.cachingAgent = cachingAgent; + } + + public void queue( + GoogleBatchRequest googleBatchRequest, + JsonBatchCallback callback, + String instrumentationContext) + throws IOException { + PaginatedCallback paginatedCallback = + new PaginatedCallback(callback) { + @Override + protected void requestNextBatch(T t) throws IOException { + String nextPageToken = getNextPageToken(t); + GoogleBatchRequest batch = cachingAgent.buildGoogleBatchRequest(); + if (nextPageToken != null) { + batch.queue(request(nextPageToken), this); + } + cachingAgent.executeIfRequestsAreQueued(batch, instrumentationContext); + } + }; + googleBatchRequest.queue(request(null), paginatedCallback); + } + + public List timeExecute(Function> itemExtractor, String api, String... tags) + throws IOException { + String pageToken = null; + List resultList = new ArrayList<>(); + do { + T results = cachingAgent.timeExecute(request(pageToken), api, tags); + List newItems = itemExtractor.apply(results); + if (newItems != null) { + resultList.addAll(newItems); + } + pageToken = getNextPageToken(results); + } while (pageToken != null); + return resultList; + } + + protected abstract String getNextPageToken(T t); + + protected abstract ComputeRequest request(String pageToken); +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/TargetPoolHealthRequest.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/TargetPoolHealthRequest.java similarity index 92% rename from clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/TargetPoolHealthRequest.java rename to clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/TargetPoolHealthRequest.java index 7a89905974b..6bf5ecffe9d 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/util/TargetPoolHealthRequest.java +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/agent/util/TargetPoolHealthRequest.java @@ -21,9 +21,7 @@ import lombok.EqualsAndHashCode; import lombok.ToString; -/** - * Helper class for locally resolving queued target pool health requests. - */ +/** Helper class for locally resolving queued target pool health requests. */ @Data @EqualsAndHashCode @AllArgsConstructor diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleApplicationProvider.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleApplicationProvider.java new file mode 100644 index 00000000000..a93f9398da9 --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleApplicationProvider.java @@ -0,0 +1,143 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.google.provider.view; + +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.INSTANCES; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.google.GoogleCloudProvider; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace; +import com.netflix.spinnaker.clouddriver.google.model.GoogleApplication; +import com.netflix.spinnaker.clouddriver.model.Application; +import com.netflix.spinnaker.clouddriver.model.ApplicationProvider; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nullable; +import lombok.Value; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +final class GoogleApplicationProvider implements ApplicationProvider { + + private final Cache cacheView; + private final ObjectMapper objectMapper; + + @Autowired + GoogleApplicationProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheView = cacheView; + this.objectMapper = objectMapper; + } + + @Override + public Set getApplications(boolean expand) { + + RelationshipCacheFilter filter = + expand ? RelationshipCacheFilter.include(CLUSTERS.getNs()) : RelationshipCacheFilter.none(); + Collection data = + cacheView.getAll( + APPLICATIONS.getNs(), + cacheView.filterIdentifiers(APPLICATIONS.getNs(), GoogleCloudProvider.getID() + ":*"), + filter); + return data.stream().map(this::applicationFromCacheData).collect(toSet()); + } + + @Value + static class ApplicationCacheData { + Map applicationAttributes; + Set clusterIdentifiers; + Set instanceIdentifiers; + } + + @Nullable + ApplicationCacheData getApplicationCacheData(String name) { + CacheData cacheData = + cacheView.get( + APPLICATIONS.getNs(), + Keys.getApplicationKey(name), + RelationshipCacheFilter.include(CLUSTERS.getNs(), INSTANCES.getNs())); + return getApplicationCacheData(cacheData); + } + + private ApplicationCacheData getApplicationCacheData(CacheData cacheData) { + if (cacheData == null) { + return null; + } + return new ApplicationCacheData( + cacheData.getAttributes(), + getRelationships(cacheData, CLUSTERS), + getRelationships(cacheData, INSTANCES)); + } + + @Override + public Application getApplication(String name) { + return applicationFromCacheData(getApplicationCacheData(name)); + } + + private GoogleApplication.View applicationFromCacheData(CacheData cacheData) { + return applicationFromCacheData(getApplicationCacheData(cacheData)); + } + + private GoogleApplication.View applicationFromCacheData( + ApplicationCacheData applicationCacheData) { + if (applicationCacheData == null) { + return null; + } + GoogleApplication application = + objectMapper.convertValue( + applicationCacheData.getApplicationAttributes(), GoogleApplication.class); + if (application == null) { + return null; + } + + GoogleApplication.View applicationView = application.getView(); + + Set clusters = applicationCacheData.getClusterIdentifiers(); + clusters.forEach( + key -> { + Map parsedKey = Keys.parse(key); + applicationView + .getClusterNames() + .get(parsedKey.get("account")) + .add(parsedKey.get("name")); + }); + + List> instances = + applicationCacheData.getInstanceIdentifiers().stream().map(Keys::parse).collect(toList()); + applicationView.setInstances(instances); + + return applicationView; + } + + private Set getRelationships(CacheData cacheData, Namespace namespace) { + Collection relationships = cacheData.getRelationships().get(namespace.getNs()); + return relationships == null ? Collections.emptySet() : new HashSet<>(relationships); + } +} diff --git a/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsLifecycleHandler.java b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsLifecycleHandler.java new file mode 100644 index 00000000000..742e1e13ddb --- /dev/null +++ b/clouddriver-google/src/main/java/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsLifecycleHandler.java @@ -0,0 +1,142 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.security; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.provider.GoogleInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.google.provider.agent.*; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import java.util.*; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; + +@Component +@RequiredArgsConstructor +public class GoogleCredentialsLifecycleHandler + implements CredentialsLifecycleHandler { + + private final GoogleInfrastructureProvider googleInfrastructureProvider; + private final GoogleConfigurationProperties googleConfigurationProperties; + private final GoogleComputeApiFactory googleComputeApiFactory; + private final ObjectMapper objectMapper; + private final Registry registry; + private final String clouddriverUserAgentApplicationName; + + @Override + public void credentialsAdded(GoogleNamedAccountCredentials credentials) { + addAgentFor(credentials); + } + + @Override + public void credentialsUpdated(GoogleNamedAccountCredentials credentials) { + googleInfrastructureProvider.removeAgentsForAccounts( + Collections.singleton(credentials.getName())); + addAgentFor(credentials); + } + + @Override + public void credentialsDeleted(GoogleNamedAccountCredentials credentials) { + googleInfrastructureProvider.removeAgentsForAccounts( + Collections.singleton(credentials.getName())); + } + + private void addAgentFor(GoogleNamedAccountCredentials credentials) { + + List regionZonesMap = credentials.getRegions(); + + List regions = new ArrayList<>(); + for (Map> map : regionZonesMap) { + String reg = String.valueOf(map.get("name")); + regions.add(reg); + } + + List googleCachingAgents = new LinkedList<>(); + List googleServerGroupAgents = new LinkedList<>(); + + googleCachingAgents.add( + new GoogleSecurityGroupCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleNetworkCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleGlobalAddressCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleHealthCheckCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleHttpHealthCheckCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleSslLoadBalancerCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleSslCertificateCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleTcpLoadBalancerCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleBackendServiceCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleInstanceCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + googleCachingAgents.add( + new GoogleImageCachingAgent( + clouddriverUserAgentApplicationName, + credentials, + objectMapper, + registry, + credentials.getImageProjects(), + googleConfigurationProperties.getBaseImageProjects())); + googleCachingAgents.add( + new GoogleHttpLoadBalancerCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry)); + + for (String region : regions) { + googleCachingAgents.add( + new GoogleSubnetCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry, region)); + googleCachingAgents.add( + new GoogleRegionalAddressCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry, region)); + googleCachingAgents.add( + new GoogleInternalLoadBalancerCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry, region)); + googleCachingAgents.add( + new GoogleInternalHttpLoadBalancerCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry, region)); + googleCachingAgents.add( + new GoogleNetworkLoadBalancerCachingAgent( + clouddriverUserAgentApplicationName, credentials, objectMapper, registry, region)); + googleServerGroupAgents.add( + new GoogleRegionalServerGroupCachingAgent( + credentials, googleComputeApiFactory, registry, region, objectMapper)); + googleServerGroupAgents.add( + new GoogleZonalServerGroupCachingAgent( + credentials, googleComputeApiFactory, registry, region, objectMapper)); + } + + googleInfrastructureProvider.addAgents(googleCachingAgents); + googleInfrastructureProvider.addAgents(googleServerGroupAgents); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilderSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilderSpec.groovy deleted file mode 100644 index d0b0027969c..00000000000 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilderSpec.groovy +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.cache - -import spock.lang.Specification - -class CacheResultBuilderSpec extends Specification { - - def "should build a cache result"() { - given: - CacheResultBuilder crb = new CacheResultBuilder() - - crb.namespace("applications").keep("appKey").with { - attributes.santa = "clause" - relationships["clusters"].add("clusterKey") - } - crb.namespace("clusters").keep("clusterKey").with { - attributes.xmen = "wolverine" - relationships["foo"].add("bar") - } - - when: - def result = crb.build() - - then: - !result.cacheResults.empty - result.cacheResults.applications[0].id == "appKey" - result.cacheResults.applications[0].attributes.santa == "clause" - result.cacheResults.applications[0].relationships.clusters[0] == "clusterKey" - result.cacheResults.clusters[0].id == "clusterKey" - result.cacheResults.clusters[0].attributes.xmen == "wolverine" - result.cacheResults.clusters[0].relationships.foo[0] == "bar" - } -} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilderTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilderTest.java new file mode 100644 index 00000000000..68c50498a38 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/cache/CacheResultBuilderTest.java @@ -0,0 +1,120 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.cache; + +import static com.google.common.collect.Iterables.getOnlyElement; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.ON_DEMAND; +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.google.cache.CacheResultBuilder.CacheDataBuilder; +import java.util.Collection; +import java.util.Map; +import org.junit.jupiter.api.Test; + +final class CacheResultBuilderTest { + + @Test + public void testBuild() { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(); + + CacheDataBuilder appBuilder = cacheResultBuilder.namespace("applications").keep("appKey"); + appBuilder.setAttributes(ImmutableMap.of("santa", "claus")); + appBuilder.setRelationships(ImmutableMap.of("clusters", ImmutableList.of("clusterKey"))); + CacheDataBuilder clusterBuilder = cacheResultBuilder.namespace("clusters").keep("clusterKey"); + clusterBuilder.setAttributes(ImmutableMap.of("xmen", "wolverine")); + clusterBuilder.setRelationships(ImmutableMap.of("foo", ImmutableList.of("bar"))); + + Map> cacheResults = cacheResultBuilder.build().getCacheResults(); + + assertThat(cacheResults).isNotEmpty(); + assertThat(cacheResults.get("applications")).hasSize(1); + CacheData application = getOnlyElement(cacheResults.get("applications")); + assertThat(application.getId()).isEqualTo("appKey"); + assertThat(application.getAttributes().get("santa")).isEqualTo("claus"); + assertThat(application.getRelationships().get("clusters")).containsExactly("clusterKey"); + assertThat(cacheResults.get("clusters")).hasSize(1); + CacheData cluster = getOnlyElement(cacheResults.get("clusters")); + assertThat(cluster.getId()).isEqualTo("clusterKey"); + assertThat(cluster.getAttributes().get("xmen")).isEqualTo("wolverine"); + assertThat(cluster.getRelationships().get("foo")).containsExactly("bar"); + } + + @Test + public void testOnDemandEntries() { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(); + + cacheResultBuilder.getOnDemand().getToEvict().add("evict1"); + cacheResultBuilder.getOnDemand().getToEvict().add("evict2"); + cacheResultBuilder + .getOnDemand() + .getToKeep() + .put( + "applications", + new DefaultCacheData( + "appKey", + ImmutableMap.of("santa", "claus"), + ImmutableMap.of("clusters", ImmutableList.of("clusterKey")))); + + DefaultCacheResult result = cacheResultBuilder.build(); + + Map> evictions = result.getEvictions(); + assertThat(evictions).hasSize(1); + assertThat(evictions.get(ON_DEMAND.getNs())).containsExactly("evict1", "evict2"); + + Map> cacheResults = result.getCacheResults(); + assertThat(cacheResults).hasSize(1); + assertThat(cacheResults.get(ON_DEMAND.getNs())).hasSize(1); + CacheData application = getOnlyElement(cacheResults.get(ON_DEMAND.getNs())); + assertThat(application.getId()).isEqualTo("appKey"); + assertThat(application.getAttributes().get("santa")).isEqualTo("claus"); + assertThat(application.getRelationships().get("clusters")).containsExactly("clusterKey"); + } + + @Test + public void keepsEmptyListForAuthoritativeTypes() { + CacheResultBuilder cacheResultBuilder = + new CacheResultBuilder( + ImmutableSet.of( + AUTHORITATIVE.forType("auth1"), + AUTHORITATIVE.forType("auth2"), + INFORMATIVE.forType("inf1"), + INFORMATIVE.forType("inf2"))); + + cacheResultBuilder + .namespace("auth2") + .keep("id2") + .setAttributes(ImmutableMap.of("attr2", "value2")); + cacheResultBuilder + .namespace("auth3") + .keep("id3") + .setAttributes(ImmutableMap.of("attr3", "value3")); + + Map> cacheResults = cacheResultBuilder.build().getCacheResults(); + assertThat(cacheResults.get("auth1")).isEmpty(); + // Just to make sure the dataTypes constructor doesn't mess anything else up + assertThat(cacheResults.get("auth2")).extracting(CacheData::getId).containsExactly("id2"); + assertThat(cacheResults.get("auth3")).extracting(CacheData::getId).containsExactly("id3"); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequestImplTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequestImplTest.java new file mode 100644 index 00000000000..5a59b33a1ad --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/BatchComputeRequestImplTest.java @@ -0,0 +1,434 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIOException; +import static org.assertj.core.api.Assertions.catchThrowable; + +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.LowLevelHttpRequest; +import com.google.api.client.http.LowLevelHttpResponse; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.Compute.Images.Get; +import com.google.api.services.compute.model.Image; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.MoreExecutors; +import com.netflix.spectator.api.BasicTag; +import com.netflix.spectator.api.Counter; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Tag; +import com.netflix.spectator.api.Timer; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.http.client.HttpResponseException; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class BatchComputeRequestImplTest { + + private static final String USER_AGENT = "spinnaker-test"; + private static final String MIME_BOUNDARY = "batch_foobarbaz"; + private static final String MIME_PART_START = "--batch_foobarbaz\n"; + private static final String MIME_END = "--batch_foobarbaz--\n"; + private static final String BATCH_CONTENT_TYPE = "multipart/mixed; boundary=" + MIME_BOUNDARY; + + private Registry registry; + + @BeforeEach + public void setUp() { + registry = new DefaultRegistry(); + } + + @Test + public void exitsEarlyWithNoRequests() throws IOException { + + Compute compute = computeWithResponses(); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + batchRequest.execute("batchContext"); + } + + @Test + public void singleRequest() throws IOException { + + Compute compute = computeWithResponses(() -> successBatchResponse(1)); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + CountResponses responses = new CountResponses(); + batchRequest.queue(request(compute), responses); + + batchRequest.execute("batchContext"); + + assertThat(responses.successes).hasValue(1); + assertThat(responses.failures).hasValue(0); + } + + @Test + public void singleBatch() throws IOException { + + Compute compute = + computeWithResponses(() -> successBatchResponse(BatchComputeRequestImpl.MAX_BATCH_SIZE)); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + CountResponses responses = new CountResponses(); + for (int i = 0; i < BatchComputeRequestImpl.MAX_BATCH_SIZE; ++i) { + batchRequest.queue(request(compute), responses); + } + + batchRequest.execute("batchContext"); + + assertThat(responses.successes).hasValue(BatchComputeRequestImpl.MAX_BATCH_SIZE); + assertThat(responses.failures).hasValue(0); + } + + @Test + public void multipleBatches() throws IOException { + + Compute compute = + computeWithResponses( + () -> successBatchResponse(BatchComputeRequestImpl.MAX_BATCH_SIZE), + () -> successBatchResponse(BatchComputeRequestImpl.MAX_BATCH_SIZE), + () -> successBatchResponse(37)); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + CountResponses responses = new CountResponses(); + for (int i = 0; i < BatchComputeRequestImpl.MAX_BATCH_SIZE * 2 + 37; ++i) { + batchRequest.queue(request(compute), responses); + } + + batchRequest.execute("batchContext"); + + assertThat(responses.successes).hasValue(BatchComputeRequestImpl.MAX_BATCH_SIZE * 2 + 37); + assertThat(responses.failures).hasValue(0); + } + + @Test + public void handlesErrors() throws IOException { + + StringBuilder responseContent = new StringBuilder(); + appendSuccessResponse(responseContent); + appendSuccessResponse(responseContent); + appendSuccessResponse(responseContent); + appendFailureResponse(responseContent); // FAILURE! + appendSuccessResponse(responseContent); + responseContent.append(MIME_END); + + Compute compute = computeWithResponses(() -> batchResponse(responseContent.toString())); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + CountResponses responses = new CountResponses(); + for (int i = 0; i < 5; ++i) { + batchRequest.queue(request(compute), responses); + } + + batchRequest.execute("batchContext"); + + assertThat(responses.successes).hasValue(4); + assertThat(responses.failures).hasValue(1); + } + + @Test + public void propagatesFirstException() throws IOException { + + Compute compute = + computeWithResponses( + () -> successBatchResponse(BatchComputeRequestImpl.MAX_BATCH_SIZE), + () -> { + throw new IOException("first exception"); + }, + () -> { + throw new IOException("second exception"); + }, + () -> { + try { + Thread.sleep(Long.MAX_VALUE); + throw new AssertionError("slept forever"); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + CountResponses responses = new CountResponses(); + for (int i = 0; i < BatchComputeRequestImpl.MAX_BATCH_SIZE * 3; ++i) { + batchRequest.queue(request(compute), responses); + } + + Throwable throwable = catchThrowable(() -> batchRequest.execute("batchContext")); + + assertThat(throwable).isInstanceOf(IOException.class).hasMessage("first exception"); + } + + @Test + public void successMetrics() throws IOException { + + Compute compute = + computeWithResponses( + () -> successBatchResponse(BatchComputeRequestImpl.MAX_BATCH_SIZE), + () -> successBatchResponse(BatchComputeRequestImpl.MAX_BATCH_SIZE), + () -> successBatchResponse(37)); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + CountResponses responses = new CountResponses(); + for (int i = 0; i < BatchComputeRequestImpl.MAX_BATCH_SIZE * 2 + 37; ++i) { + batchRequest.queue(request(compute), responses); + } + + batchRequest.execute("batchContext"); + + assertThat(registry.timers()).hasSize(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.batchExecute"); + assertThat(timer.id().tags()) + .contains( + tag("context", "batchContext"), + tag("success", "true"), + tag("status", "2xx"), + tag("statusCode", "200")); + + assertThat(registry.counters()).hasSize(1); + Counter counter = registry.counters().findFirst().orElseThrow(AssertionError::new); + assertThat(counter.id().name()).isEqualTo("google.batchSize"); + assertThat(counter.id().tags()) + .contains( + tag("context", "batchContext"), + tag("success", "true"), + tag("status", "2xx"), + tag("statusCode", "200")); + assertThat(counter.actualCount()).isEqualTo(BatchComputeRequestImpl.MAX_BATCH_SIZE * 2 + 37); + } + + @Test + public void errorMetrics() throws IOException { + + Compute compute = + computeWithResponses( + () -> { + throw new IOException("uh oh"); + }); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + CountResponses responses = new CountResponses(); + for (int i = 0; i < 55; ++i) { + batchRequest.queue(request(compute), responses); + } + + assertThatIOException().isThrownBy(() -> batchRequest.execute("batchContext")); + + assertThat(registry.timers()).hasSize(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.batchExecute"); + assertThat(timer.id().tags()) + .contains( + tag("context", "batchContext"), + tag("success", "false"), + tag("status", "5xx"), + tag("statusCode", "500")); + + assertThat(registry.counters()).hasSize(1); + Counter counter = registry.counters().findFirst().orElseThrow(AssertionError::new); + assertThat(counter.id().name()).isEqualTo("google.batchSize"); + assertThat(counter.id().tags()) + .contains( + tag("context", "batchContext"), + tag("success", "false"), + tag("status", "5xx"), + tag("statusCode", "500")); + assertThat(counter.actualCount()).isEqualTo(55); + } + + @Test + public void httpErrorMetrics() throws IOException { + + Compute compute = + computeWithResponses( + () -> { + throw new HttpResponseException(404, "uh oh"); + }); + + BatchComputeRequest batchRequest = + new BatchComputeRequestImpl<>( + compute, registry, USER_AGENT, MoreExecutors.newDirectExecutorService()); + + CountResponses responses = new CountResponses(); + for (int i = 0; i < 55; ++i) { + batchRequest.queue(request(compute), responses); + } + + assertThatIOException().isThrownBy(() -> batchRequest.execute("batchContext")); + + assertThat(registry.timers()).hasSize(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.batchExecute"); + assertThat(timer.id().tags()) + .contains( + tag("context", "batchContext"), + tag("success", "false"), + tag("status", "4xx"), + tag("statusCode", "404")); + + assertThat(registry.counters()).hasSize(1); + Counter counter = registry.counters().findFirst().orElseThrow(AssertionError::new); + assertThat(counter.id().name()).isEqualTo("google.batchSize"); + assertThat(counter.id().tags()) + .contains( + tag("context", "batchContext"), + tag("success", "false"), + tag("status", "4xx"), + tag("statusCode", "404")); + assertThat(counter.actualCount()).isEqualTo(55); + } + + private static GoogleComputeRequest request(Compute compute) + throws IOException { + return new GoogleComputeRequestImpl<>( + compute.images().get("project", "image-name"), + new DefaultRegistry(), + /* metricName= */ "google.api", + /* tags= */ ImmutableMap.of()); + } + + @FunctionalInterface + private interface ResponseSupplier { + + LowLevelHttpResponse getResponse() throws IOException; + } + + private static Compute computeWithResponses(ResponseSupplier... responses) { + return new Compute( + responses(responses), GsonFactory.getDefaultInstance(), /* httpRequestInitializer= */ null); + } + + private static HttpTransport responses(ResponseSupplier... responses) { + return new HttpTransport() { + private AtomicInteger requests = new AtomicInteger(0); + + @Override + protected LowLevelHttpRequest buildRequest(String method, String url) { + int requestNum = requests.getAndIncrement(); + ResponseSupplier response; + if (requestNum < responses.length) { + response = responses[requestNum]; + } else { + response = + () -> + new MockLowLevelHttpResponse() + .setStatusCode(500) + .setContent("Sent more requests than expected."); + } + return new LowLevelHttpRequest() { + @Override + public void addHeader(String name, String value) {} + + @Override + public LowLevelHttpResponse execute() throws IOException { + return response.getResponse(); + } + }; + } + }; + } + + private static MockLowLevelHttpResponse successBatchResponse(int responses) { + return batchResponse(successBatchResponseContent(responses)); + } + + private static MockLowLevelHttpResponse batchResponse(String content) { + return new MockLowLevelHttpResponse() + .setStatusCode(200) + .addHeader("Content-Type", BATCH_CONTENT_TYPE) + .setContent(content); + } + + private static String successBatchResponseContent(int responses) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < responses; ++i) { + appendSuccessResponse(sb); + } + return sb.append(MIME_END).toString(); + } + + private static void appendSuccessResponse(StringBuilder sb) { + sb.append(MIME_PART_START) + .append("Content-Type: application/http\n") + .append('\n') + .append("HTTP/1.1 200 OK\n") + .append("Content-Type: application/json\n") + .append("\n") + .append("{\"name\":\"foobar\"}\n\n"); + } + + private static void appendFailureResponse(StringBuilder sb) { + sb.append(MIME_PART_START) + .append("Content-Type: application/http\n") + .append('\n') + .append("HTTP/1.1 500 Really Bad Error\n") + .append("Content-Type: application/json\n") + .append("\n") + .append("{}\n\n"); + } + + private static class CountResponses extends JsonBatchCallback { + AtomicInteger successes = new AtomicInteger(); + AtomicInteger failures = new AtomicInteger(); + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) { + failures.incrementAndGet(); + } + + @Override + public void onSuccess(Image image, HttpHeaders responseHeaders) { + successes.incrementAndGet(); + } + } + + private static Tag tag(String key, String value) { + return new BasicTag(key, value); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequestImplTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequestImplTest.java new file mode 100644 index 00000000000..c0513b59ea0 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/BatchPaginatedComputeRequestImplTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.mock; + +import com.google.api.client.googleapis.testing.json.GoogleJsonResponseExceptionFactoryTesting; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Image; +import com.google.api.services.compute.model.ImageList; +import com.google.common.collect.ImmutableSet; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.Test; + +final class BatchPaginatedComputeRequestImplTest { + + @Test + void execute() throws IOException { + + BatchPaginatedComputeRequestImpl batchRequest = + new BatchPaginatedComputeRequestImpl<>(FakeBatchComputeRequest::new); + + ImageListRequestGenerator set1 = new ImageListRequestGenerator(); + set1.itemPrefix = "set1-"; + set1.itemsPerPage = 2; + set1.pages = 3; + ImageListRequestGenerator set2 = new ImageListRequestGenerator(); + set2.itemPrefix = "noPages-"; + set2.itemsPerPage = 2; + set2.pages = 0; + ImageListRequestGenerator set3 = new ImageListRequestGenerator(); + set3.itemPrefix = "set3-"; + set3.itemsPerPage = 4; + set3.pages = 1; + + batchRequest.queue( + new PaginatedComputeRequestImpl<>(set1, ImageList::getNextPageToken, ImageList::getItems)); + batchRequest.queue( + new PaginatedComputeRequestImpl<>(set2, ImageList::getNextPageToken, ImageList::getItems)); + batchRequest.queue( + new PaginatedComputeRequestImpl<>(set3, ImageList::getNextPageToken, ImageList::getItems)); + + ImmutableSet result = batchRequest.execute("batchContext"); + + assertThat(result) + .extracting(Image::getName) + .containsExactlyInAnyOrder( + "set1-1", "set1-2", "set1-3", "set1-4", "set1-5", "set1-6", "set3-1", "set3-2", + "set3-3", "set3-4"); + } + + @Test + void nullItems() throws IOException { + + BatchPaginatedComputeRequestImpl batchRequest = + new BatchPaginatedComputeRequestImpl<>(FakeBatchComputeRequest::new); + batchRequest.queue( + new PaginatedComputeRequestImpl<>( + pageToken -> + FakeGoogleComputeRequest.createWithResponse( + new ImageList().setItems(null), mock(Compute.Images.List.class)), + ImageList::getNextPageToken, + ImageList::getItems)); + + ImmutableSet result = batchRequest.execute("batchContext"); + + assertThat(result).isEmpty(); + } + + @Test + void exception() { + + BatchPaginatedComputeRequestImpl batchRequest = + new BatchPaginatedComputeRequestImpl<>(FakeBatchComputeRequest::new); + batchRequest.queue( + new PaginatedComputeRequestImpl<>( + pageToken -> + FakeGoogleComputeRequest.createWithException( + GoogleJsonResponseExceptionFactoryTesting.newMock( + GsonFactory.getDefaultInstance(), 500, "bad news"), + mock(Compute.Images.List.class)), + ImageList::getNextPageToken, + ImageList::getItems)); + + assertThatThrownBy(() -> batchRequest.execute("batchContext")).hasMessageContaining("bad news"); + } + + private static class ImageListRequestGenerator + implements PaginatedComputeRequestImpl.RequestGenerator { + + String itemPrefix; + int itemsPerPage; + int pages; + + @Override + public GoogleComputeRequest createRequest(String pageToken) { + + int pageNum = 0; + if (!pageToken.isEmpty()) { + pageNum = Integer.parseInt(pageToken); + } + if (pageNum == 0 && pages == 0) { + return FakeGoogleComputeRequest.createWithResponse( + new ImageList(), mock(Compute.Images.List.class)); + } + if (pageNum >= pages) { + throw new AssertionError("requested too many pages"); + } + + List items = new ArrayList<>(); + for (int i = 1; i <= itemsPerPage; ++i) { + items.add(new Image().setName(itemPrefix + (pageNum * itemsPerPage + i))); + } + ImageList response = new ImageList().setItems(items); + + if (pageNum < pages - 1) { + response.setNextPageToken(Integer.toString(pageNum + 1)); + } + + return FakeGoogleComputeRequest.createWithResponse(response, mock(Compute.Images.List.class)); + } + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ComputeOperationMockHttpTransport.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ComputeOperationMockHttpTransport.java new file mode 100644 index 00000000000..a6913e27dfc --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ComputeOperationMockHttpTransport.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.LowLevelHttpRequest; +import com.google.api.client.testing.http.MockLowLevelHttpRequest; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; + +class ComputeOperationMockHttpTransport extends HttpTransport { + + private final MockLowLevelHttpResponse createOperationResponse; + + ComputeOperationMockHttpTransport(MockLowLevelHttpResponse createOperationResponse) { + this.createOperationResponse = createOperationResponse; + } + + @Override + protected LowLevelHttpRequest buildRequest(String method, String url) { + if (url.toLowerCase().contains("operation")) { + return new MockLowLevelHttpRequest(url) + .setResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent( + "" + "{" + " \"name\": \"opName\"," + " \"status\": \"DONE\"" + "}")); + } else { + return new MockLowLevelHttpRequest(url).setResponse(createOperationResponse); + } + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeBatchComputeRequest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeBatchComputeRequest.java new file mode 100644 index 00000000000..62e789059cb --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeBatchComputeRequest.java @@ -0,0 +1,74 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.api.client.http.HttpHeaders; +import com.google.api.services.compute.ComputeRequest; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.commons.lang3.StringUtils; + +public final class FakeBatchComputeRequest, ResponseT> + implements BatchComputeRequest { + + private List requests = new ArrayList<>(); + + @Override + public void queue( + GoogleComputeRequest request, JsonBatchCallback callback) { + requests.add(new QueuedRequest(request, callback)); + } + + @Override + public void execute(String batchContext) throws IOException { + for (QueuedRequest request : requests) { + try { + request.callback.onSuccess(request.request.execute(), new HttpHeaders()); + } catch (GoogleJsonResponseException e) { + // Exceptions created by GoogleJsonResponseExceptionFactoryTesting don't have details. + GoogleJsonError details = e.getDetails(); + if (details == null) { + details = new GoogleJsonError(); + details.setCode(e.getStatusCode()); + details.setMessage(e.getMessage()); + } else if (StringUtils.isEmpty(details.getMessage())) { + details.setMessage(e.getMessage()); + } + request.callback.onFailure(details, e.getHeaders()); + } catch (IOException | RuntimeException e) { + GoogleJsonError error = new GoogleJsonError(); + error.setMessage(e.getMessage()); + request.callback.onFailure(error, new HttpHeaders()); + } + } + } + + private final class QueuedRequest { + GoogleComputeRequest request; + JsonBatchCallback callback; + + QueuedRequest( + GoogleComputeRequest request, JsonBatchCallback callback) { + this.request = request; + this.callback = callback; + } + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeGoogleComputeOperationRequest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeGoogleComputeOperationRequest.java new file mode 100644 index 00000000000..36b531c687a --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeGoogleComputeOperationRequest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.Operation; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import java.io.IOException; + +public class FakeGoogleComputeOperationRequest> + extends FakeGoogleComputeRequest + implements GoogleComputeOperationRequest { + + private boolean waited = false; + + public FakeGoogleComputeOperationRequest() { + this(new Operation()); + } + + public FakeGoogleComputeOperationRequest(Operation response) { + super(response, /* request= */ null); + } + + @Override + public Operation executeAndWait(Task task, String phase) throws IOException { + waited = true; + return execute(); + } + + public boolean waitedForCompletion() { + return waited; + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeGoogleComputeRequest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeGoogleComputeRequest.java new file mode 100644 index 00000000000..54b7de16954 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/FakeGoogleComputeRequest.java @@ -0,0 +1,90 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.ComputeRequest; +import java.io.IOException; +import javax.annotation.Nullable; + +public class FakeGoogleComputeRequest, ResponseT> + implements GoogleComputeGetRequest { + + private final RequestT request; + private final ResponseT response; + private final IOException exception; + + private boolean executed = false; + + public static , ResponseT> + FakeGoogleComputeRequest createWithResponse(ResponseT response) { + return createWithResponse(response, /* request= */ null); + } + + public static , ResponseT> + FakeGoogleComputeRequest createWithResponse( + ResponseT response, RequestT request) { + return new FakeGoogleComputeRequest<>(response, request); + } + + public static , ResponseT> + FakeGoogleComputeRequest createWithException(IOException exception) { + return createWithException(exception, /* request= */ null); + } + + public static , ResponseT> + FakeGoogleComputeRequest createWithException( + IOException exception, RequestT request) { + return new FakeGoogleComputeRequest<>(exception, request); + } + + FakeGoogleComputeRequest(ResponseT response, @Nullable RequestT request) { + checkNotNull(response); + this.request = request; + this.response = response; + this.exception = null; + } + + FakeGoogleComputeRequest(IOException exception, @Nullable RequestT request) { + checkNotNull(exception); + this.request = request; + this.response = null; + this.exception = exception; + } + + @Override + public ResponseT execute() throws IOException { + executed = true; + if (exception != null) { + throw exception; + } + return response; + } + + @Override + public RequestT getRequest() { + if (request == null) { + throw new UnsupportedOperationException("FakeGoogleComputeRequest#getRequest()"); + } + return request; + } + + public boolean executed() { + return executed; + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/GetFirstBatchComputeRequestTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/GetFirstBatchComputeRequestTest.java new file mode 100644 index 00000000000..0dee0cbc684 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/GetFirstBatchComputeRequestTest.java @@ -0,0 +1,101 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.google.api.client.googleapis.testing.json.GoogleJsonResponseExceptionFactoryTesting; +import com.google.api.client.http.HttpResponseException; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Image; +import java.io.IOException; +import java.util.Optional; +import org.junit.jupiter.api.Test; + +final class GetFirstBatchComputeRequestTest { + + @Test + void noRequests() throws IOException { + + GetFirstBatchComputeRequest batchRequest = + GetFirstBatchComputeRequest.create(new FakeBatchComputeRequest<>()); + + Optional result = batchRequest.execute("batchContext"); + + assertThat(result).isEmpty(); + } + + @Test + void returnsFirstValidResponse() throws IOException { + + GetFirstBatchComputeRequest batchRequest = + GetFirstBatchComputeRequest.create(new FakeBatchComputeRequest<>()); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(new IOException("one"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(new IOException("two"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(new IOException("three"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(new IOException("four"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithResponse(new Image().setName("five"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithResponse(new Image().setName("six"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithResponse(new Image().setName("seven"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(new IOException("eight"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(new IOException("nine"))); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(new IOException("ten"))); + + Optional result = batchRequest.execute("batchContext"); + + assertThat(result).map(Image::getName).hasValue("five"); + } + + @Test + void notFound() throws IOException { + + GetFirstBatchComputeRequest batchRequest = + GetFirstBatchComputeRequest.create(new FakeBatchComputeRequest<>()); + HttpResponseException notFoundException = + GoogleJsonResponseExceptionFactoryTesting.newMock( + GsonFactory.getDefaultInstance(), 404, "not found"); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(notFoundException)); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(notFoundException)); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(notFoundException)); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(notFoundException)); + + Optional result = batchRequest.execute("batchContext"); + + assertThat(result).isEmpty(); + } + + @Test + void error() throws IOException { + + GetFirstBatchComputeRequest batchRequest = + GetFirstBatchComputeRequest.create(new FakeBatchComputeRequest<>()); + HttpResponseException notFoundException = + GoogleJsonResponseExceptionFactoryTesting.newMock( + GsonFactory.getDefaultInstance(), 404, "not found"); + HttpResponseException actualError = + GoogleJsonResponseExceptionFactoryTesting.newMock( + GsonFactory.getDefaultInstance(), 500, "bad news"); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(notFoundException)); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(notFoundException)); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(actualError)); + batchRequest.queue(FakeGoogleComputeRequest.createWithException(notFoundException)); + + assertThatThrownBy(() -> batchRequest.execute("batchContext")).hasMessageContaining("bad news"); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ImagesTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ImagesTest.java new file mode 100644 index 00000000000..d852bebc2cf --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ImagesTest.java @@ -0,0 +1,147 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIOException; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Image; +import com.netflix.spectator.api.BasicTag; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Tag; +import com.netflix.spectator.api.Timer; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +final class ImagesTest { + + private static final int CLOCK_STEP_TIME_MS = 1234; + private static final int CLOCK_STEP_TIME_NS = 1234 * 1000000; + + @Test + void get_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent("{ \"name\": \"my-wacky-image-name\" }")); + + Images imagesApi = createImages(transport); + + Image image = imagesApi.get("my-project", "my-image").execute(); + + assertThat(image.getName()).isEqualTo("my-wacky-image-name"); + } + + @Test + public void get_error() { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")); + + Images imagesApi = createImages(transport); + + assertThatIOException() + .isThrownBy(() -> imagesApi.get("my-project", "my-image").execute()) + .withMessageContaining("404"); + } + + @Test + public void get_successMetrics() throws IOException { + + Registry registry = new DefaultRegistry(new SteppingClock(CLOCK_STEP_TIME_MS)); + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse().setStatusCode(200).setContent("{\"items\": []}")); + + Images imagesApi = createImages(transport, registry); + + imagesApi.get("my-project", "my-image").execute(); + + assertThat(registry.timers().count()).isEqualTo(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.api"); + // TODO(plumpy): Come up with something better than AccountForClient (which uses a bunch of + // global state) so that we can test for the account tags + assertThat(timer.id().tags()) + .contains( + tag("api", "compute.images.get"), + tag("scope", "global"), + tag("status", "2xx"), + tag("success", "true")); + assertThat(timer.totalTime()).isEqualTo(CLOCK_STEP_TIME_NS); + } + + @Test + public void get_errorMetrics() { + + Registry registry = new DefaultRegistry(new SteppingClock(CLOCK_STEP_TIME_MS)); + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")); + + Images imagesApi = createImages(transport, registry); + + assertThatIOException().isThrownBy(() -> imagesApi.get("my-project", "my-image").execute()); + + assertThat(registry.timers().count()).isEqualTo(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.api"); + // TODO(plumpy): Come up with something better than AccountForClient (which uses a bunch of + // global state) so that we can test for the account tags + assertThat(timer.id().tags()) + .contains( + tag("api", "compute.images.get"), + tag("scope", "global"), + tag("status", "4xx"), + tag("success", "false")); + assertThat(timer.totalTime()).isEqualTo(CLOCK_STEP_TIME_NS); + } + + private static Images createImages(HttpTransport transport) { + return createImages(transport, new NoopRegistry()); + } + + private static Images createImages(HttpTransport transport, Registry registry) { + Compute compute = + new Compute( + transport, GsonFactory.getDefaultInstance(), /* httpRequestInitializer= */ null); + GoogleNamedAccountCredentials credentials = + new GoogleNamedAccountCredentials.Builder() + .name("plumpy") + .project("myproject") + .credentials(new FakeGoogleCredentials()) + .compute(compute) + .build(); + return new Images(credentials, new GoogleOperationPoller(), registry); + } + + private static Tag tag(String key, String value) { + return new BasicTag(key, value); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/InstanceTemplatesTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/InstanceTemplatesTest.java new file mode 100644 index 00000000000..372b680b5f3 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/InstanceTemplatesTest.java @@ -0,0 +1,284 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIOException; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.client.testing.http.MockHttpTransport; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.InstanceTemplate; +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.BasicTag; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Tag; +import com.netflix.spectator.api.Timer; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +public class InstanceTemplatesTest { + + private static final int CLOCK_STEP_TIME_MS = 1234; + private static final int CLOCK_STEP_TIME_NS = 1234 * 1000000; + + @Test + public void delete_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse().setStatusCode(200).setContent("{\"name\": \"xyzzy\"}")); + + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + instanceTemplates + .delete("my-instance-template") + .executeAndWait(new DefaultTask("task"), "phase"); + } + + @Test + public void delete_failure() { + + HttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + assertThatIOException() + .isThrownBy(() -> instanceTemplates.delete("my-instance-template").execute()); + } + + @Test + public void insert_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse().setStatusCode(200).setContent("{\"name\": \"xyzzy\"}")); + + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + instanceTemplates + .insert(new InstanceTemplate()) + .executeAndWait(new DefaultTask("task"), "phase"); + } + + @Test + public void insert_failure() { + + HttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + assertThatIOException() + .isThrownBy(() -> instanceTemplates.insert(new InstanceTemplate()).execute()); + } + + @Test + public void get_success() throws IOException { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent("{\"name\": \"my-instance-template\"}")) + .build(); + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + InstanceTemplate template = instanceTemplates.get("hello").execute(); + + assertThat(template.getName()).isEqualTo("my-instance-template"); + } + + @Test + public void get_error() { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + assertThatIOException().isThrownBy(() -> instanceTemplates.get("hello").execute()); + } + + @Test + public void get_successMetrics() throws IOException { + + Registry registry = new DefaultRegistry(new SteppingClock(CLOCK_STEP_TIME_MS)); + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent("{\"name\": \"my-instance-template\"}")) + .build(); + InstanceTemplates instanceTemplates = createInstanceTemplates(transport, registry); + + instanceTemplates.get("hello").execute(); + + assertThat(registry.timers().count()).isEqualTo(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.api"); + // TODO(plumpy): Come up with something better than AccountForClient (which uses a bunch of + // global state) so that we can test for the account tags + assertThat(timer.id().tags()) + .contains( + tag("api", "compute.instanceTemplates.get"), + tag("scope", "global"), + tag("status", "2xx"), + tag("success", "true")); + assertThat(timer.totalTime()).isEqualTo(CLOCK_STEP_TIME_NS); + } + + @Test + public void get_errorMetrics() { + + Registry registry = new DefaultRegistry(new SteppingClock(CLOCK_STEP_TIME_MS)); + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + InstanceTemplates instanceTemplates = createInstanceTemplates(transport, registry); + + try { + instanceTemplates.get("hello").execute(); + } catch (IOException expected) { + } + + assertThat(registry.timers().count()).isEqualTo(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.api"); + assertThat(timer.id().tags()) + .contains( + tag("api", "compute.instanceTemplates.get"), + tag("scope", "global"), + tag("status", "4xx"), + tag("success", "false")); + assertThat(timer.totalTime()).isEqualTo(CLOCK_STEP_TIME_NS); + } + + @Test + public void list_success() throws IOException { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .addHeader("Content-Type", "application/json") + .setContent( + "{\"items\": [{\"name\": \"template1\"}, {\"name\": \"template2\"}], \"nextPageToken\": \"\"}")) + .build(); + + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + PaginatedComputeRequest request = + instanceTemplates.list(); + + ImmutableList response = request.execute(); + assertThat(response).hasSize(2); + assertThat(response.get(0).getName()).isEqualTo("template1"); + assertThat(response.get(1).getName()).isEqualTo("template2"); + } + + @Test + public void list_noResults() throws IOException { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent("{\"items\": [], \"nextPageToken\": \"\"}")) + .build(); + + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + PaginatedComputeRequest request = + instanceTemplates.list(); + + ImmutableList response = request.execute(); + assertThat(response).isEmpty(); + } + + @Test + public void list_errorResponse() { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(500) + .setContent("{\"error\": \"Internal Server Error\"}")) + .build(); + + InstanceTemplates instanceTemplates = createInstanceTemplates(transport); + + PaginatedComputeRequest request = + instanceTemplates.list(); + + assertThatIOException().isThrownBy(() -> request.execute()); + } + + private static InstanceTemplates createInstanceTemplates(HttpTransport transport) { + return createInstanceTemplates(transport, new NoopRegistry()); + } + + private static InstanceTemplates createInstanceTemplates( + HttpTransport transport, Registry registry) { + Compute compute = + new Compute( + transport, GsonFactory.getDefaultInstance(), /* httpRequestInitializer= */ null); + GoogleNamedAccountCredentials credentials = + new GoogleNamedAccountCredentials.Builder() + .name("spin-user") + .project("myproject") + .credentials(new FakeGoogleCredentials()) + .compute(compute) + .build(); + GoogleOperationPoller poller = new GoogleOperationPoller(); + poller.setGoogleConfigurationProperties(new GoogleConfigurationProperties()); + poller.setRegistry(registry); + SafeRetry safeRetry = SafeRetry.withoutDelay(); + poller.setSafeRetry(safeRetry); + return new InstanceTemplates(credentials, poller, registry); + } + + private static Tag tag(String key, String value) { + return new BasicTag(key, value); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequestImplTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequestImplTest.java new file mode 100644 index 00000000000..c664184736b --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/PaginatedComputeRequestImplTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.mock; + +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Image; +import com.google.api.services.compute.model.ImageList; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.Test; + +final class PaginatedComputeRequestImplTest { + + @Test + void execute() throws IOException { + + ImageListRequestGenerator requestGenerator = new ImageListRequestGenerator(); + requestGenerator.itemPrefix = "myImage-"; + requestGenerator.itemsPerPage = 2; + requestGenerator.pages = 3; + + PaginatedComputeRequestImpl request = + new PaginatedComputeRequestImpl<>( + requestGenerator, ImageList::getNextPageToken, ImageList::getItems); + + ImmutableList result = request.execute(); + + assertThat(result) + .extracting(Image::getName) + .containsExactly( + "myImage-1", "myImage-2", "myImage-3", "myImage-4", "myImage-5", "myImage-6"); + } + + @Test + void nullItems() throws IOException { + + PaginatedComputeRequestImpl request = + new PaginatedComputeRequestImpl<>( + pageToken -> + FakeGoogleComputeRequest.createWithResponse( + new ImageList().setItems(null), mock(Compute.Images.List.class)), + ImageList::getNextPageToken, + ImageList::getItems); + + ImmutableList result = request.execute(); + + assertThat(result).isEmpty(); + } + + @Test + void exception() { + + PaginatedComputeRequestImpl request = + new PaginatedComputeRequestImpl<>( + pageToken -> + FakeGoogleComputeRequest.createWithException( + new IOException("bad news"), mock(Compute.Images.List.class)), + ImageList::getNextPageToken, + ImageList::getItems); + + assertThatThrownBy(request::execute).hasMessageContaining("bad news"); + } + + private static class ImageListRequestGenerator + implements PaginatedComputeRequestImpl.RequestGenerator { + + String itemPrefix; + int itemsPerPage; + int pages; + + @Override + public GoogleComputeRequest createRequest(String pageToken) { + + int pageNum = 0; + if (!pageToken.isEmpty()) { + pageNum = Integer.parseInt(pageToken); + } + if (pageNum >= pages) { + throw new AssertionError("requested too many pages"); + } + + List items = new ArrayList<>(); + for (int i = 1; i <= itemsPerPage; ++i) { + items.add(new Image().setName(itemPrefix + (pageNum * itemsPerPage + i))); + } + ImageList response = new ImageList().setItems(items); + + if (pageNum < pages - 1) { + response.setNextPageToken(Integer.toString(pageNum + 1)); + } + + return FakeGoogleComputeRequest.createWithResponse(response, mock(Compute.Images.List.class)); + } + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/RegionGoogleServerGroupManagersTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/RegionGoogleServerGroupManagersTest.java new file mode 100644 index 00000000000..cb81a9e0657 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/RegionGoogleServerGroupManagersTest.java @@ -0,0 +1,269 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIOException; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.client.testing.http.MockHttpTransport; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.BasicTag; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Tag; +import com.netflix.spectator.api.Timer; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +public class RegionGoogleServerGroupManagersTest { + + private static final String REGION = "us-central1"; + private static final int CLOCK_STEP_TIME_MS = 1234; + private static final int CLOCK_STEP_TIME_NS = 1234 * 1000000; + + @Test + public void abandonInstances_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent( + "" + + "{" + + " \"name\": \"xyzzy\"," + + " \"region\": \"http://compute/regions/us-central1\"" + + "}")); + + RegionGoogleServerGroupManagers managers = createManagers(transport); + + managers + .abandonInstances(ImmutableList.of("myServerGroup")) + .executeAndWait(new DefaultTask("task"), "phase"); + } + + @Test + public void abandonInstances_failure() { + + HttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + + RegionGoogleServerGroupManagers managers = createManagers(transport); + + assertThatIOException() + .isThrownBy(() -> managers.abandonInstances(ImmutableList.of("myServerGroup")).execute()); + } + + @Test + public void delete_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent( + "" + + "{" + + " \"name\": \"xyzzy\"," + + " \"region\": \"http://compute/regions/us-central1\"" + + "}")); + + RegionGoogleServerGroupManagers managers = createManagers(transport); + + managers.delete().executeAndWait(new DefaultTask("task"), "phase"); + } + + @Test + public void delete_failure() { + + HttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + + RegionGoogleServerGroupManagers managers = createManagers(transport); + + assertThatIOException().isThrownBy(() -> managers.delete().execute()); + } + + @Test + public void get_success() throws IOException { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent("{\"name\": \"myServerGroup\"}")) + .build(); + RegionGoogleServerGroupManagers managers = createManagers(transport); + + InstanceGroupManager manager = managers.get().execute(); + + assertThat(manager.getName()).isEqualTo("myServerGroup"); + } + + @Test + public void get_error() { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + RegionGoogleServerGroupManagers managers = createManagers(transport); + + assertThatIOException().isThrownBy(() -> managers.get().execute()); + } + + @Test + public void get_successMetrics() throws IOException { + + Registry registry = new DefaultRegistry(new SteppingClock(CLOCK_STEP_TIME_MS)); + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent("{\"name\": \"myServerGroup\"}")) + .build(); + RegionGoogleServerGroupManagers managers = createManagers(transport, registry); + + managers.get().execute(); + + assertThat(registry.timers().count()).isEqualTo(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.api"); + // TODO(plumpy): Come up with something better than AccountForClient (which uses a bunch of + // global state) so that we can test for the account tags + assertThat(timer.id().tags()) + .contains( + tag("api", "compute.regionInstanceGroupManagers.get"), + tag("scope", "regional"), + tag("region", REGION), + tag("status", "2xx"), + tag("success", "true")); + assertThat(timer.totalTime()).isEqualTo(CLOCK_STEP_TIME_NS); + } + + @Test + public void get_errorMetrics() { + + Registry registry = new DefaultRegistry(new SteppingClock(CLOCK_STEP_TIME_MS)); + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + RegionGoogleServerGroupManagers managers = createManagers(transport, registry); + + try { + managers.get().execute(); + } catch (IOException expected) { + } + + assertThat(registry.timers().count()).isEqualTo(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.api"); + assertThat(timer.id().tags()) + .contains( + tag("api", "compute.regionInstanceGroupManagers.get"), + tag("scope", "regional"), + tag("region", REGION), + tag("status", "4xx"), + tag("success", "false")); + assertThat(timer.totalTime()).isEqualTo(CLOCK_STEP_TIME_NS); + } + + @Test + public void update_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent( + "" + + "{" + + " \"name\": \"xyzzy\"," + + " \"region\": \"http://compute/regions/us-central1\"" + + "}")); + + RegionGoogleServerGroupManagers managers = createManagers(transport); + + managers.update(new InstanceGroupManager()).executeAndWait(new DefaultTask("task"), "phase"); + } + + @Test + public void update_failure() { + + HttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + + RegionGoogleServerGroupManagers managers = createManagers(transport); + + assertThatIOException().isThrownBy(() -> managers.update(new InstanceGroupManager()).execute()); + } + + private static RegionGoogleServerGroupManagers createManagers(HttpTransport transport) { + return createManagers(transport, new NoopRegistry()); + } + + private static RegionGoogleServerGroupManagers createManagers( + HttpTransport transport, Registry registry) { + Compute compute = + new Compute( + transport, GsonFactory.getDefaultInstance(), /* httpRequestInitializer= */ null); + GoogleNamedAccountCredentials credentials = + new GoogleNamedAccountCredentials.Builder() + .name("spin-user") + .project("myproject") + .credentials(new FakeGoogleCredentials()) + .compute(compute) + .build(); + GoogleOperationPoller poller = new GoogleOperationPoller(); + poller.setGoogleConfigurationProperties(new GoogleConfigurationProperties()); + poller.setRegistry(registry); + SafeRetry safeRetry = SafeRetry.withoutDelay(); + poller.setSafeRetry(safeRetry); + return new RegionGoogleServerGroupManagers( + credentials, poller, registry, "myInstanceGroup", REGION); + } + + private static Tag tag(String key, String value) { + return new BasicTag(key, value); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/SteppingClock.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/SteppingClock.java new file mode 100644 index 00000000000..9e9912f293d --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/SteppingClock.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import com.netflix.spectator.api.Clock; +import java.time.Duration; + +class SteppingClock implements Clock { + + private long currentTimeMs = 0; + private final int msAdjustmentBetweenCalls; + + public SteppingClock(int msAdjustmentBetweenCalls) { + this.msAdjustmentBetweenCalls = msAdjustmentBetweenCalls; + } + + @Override + public long wallTime() { + currentTimeMs += msAdjustmentBetweenCalls; + return currentTimeMs; + } + + @Override + public long monotonicTime() { + currentTimeMs += msAdjustmentBetweenCalls; + return Duration.ofMillis(currentTimeMs).toNanos(); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ZoneGoogleServerGroupManagersTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ZoneGoogleServerGroupManagersTest.java new file mode 100644 index 00000000000..0b4ccc72b38 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/compute/ZoneGoogleServerGroupManagersTest.java @@ -0,0 +1,269 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.compute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIOException; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.client.testing.http.MockHttpTransport; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.BasicTag; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Tag; +import com.netflix.spectator.api.Timer; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import org.junit.jupiter.api.Test; + +public class ZoneGoogleServerGroupManagersTest { + + private static final String ZONE = "us-central1-f"; + private static final int CLOCK_STEP_TIME_MS = 1234; + private static final int CLOCK_STEP_TIME_NS = 1234 * 1000000; + + @Test + public void abandonInstances_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent( + "" + + "{" + + " \"name\": \"xyzzy\"," + + " \"zone\": \"http://compute/zones/us-central1-f\"" + + "}")); + + ZoneGoogleServerGroupManagers managers = createManagers(transport); + + managers + .abandonInstances(ImmutableList.of("myServerGroup")) + .executeAndWait(new DefaultTask("task"), "phase"); + } + + @Test + public void abandonInstances_failure() { + + HttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + + ZoneGoogleServerGroupManagers managers = createManagers(transport); + + assertThatIOException() + .isThrownBy(() -> managers.abandonInstances(ImmutableList.of("myServerGroup")).execute()); + } + + @Test + public void delete_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent( + "" + + "{" + + " \"name\": \"xyzzy\"," + + " \"zone\": \"http://compute/zones/us-central1-f\"" + + "}")); + + ZoneGoogleServerGroupManagers managers = createManagers(transport); + + managers.delete().executeAndWait(new DefaultTask("task"), "phase"); + } + + @Test + public void delete_failure() { + + HttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + + ZoneGoogleServerGroupManagers managers = createManagers(transport); + + assertThatIOException().isThrownBy(() -> managers.delete().execute()); + } + + @Test + public void get_success() throws IOException { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent("{\"name\": \"myServerGroup\"}")) + .build(); + ZoneGoogleServerGroupManagers managers = createManagers(transport); + + InstanceGroupManager manager = managers.get().execute(); + + assertThat(manager.getName()).isEqualTo("myServerGroup"); + } + + @Test + public void get_error() { + + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + ZoneGoogleServerGroupManagers managers = createManagers(transport); + + assertThatIOException().isThrownBy(() -> managers.get().execute()); + } + + @Test + public void get_successMetrics() throws IOException { + + Registry registry = new DefaultRegistry(new SteppingClock(CLOCK_STEP_TIME_MS)); + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent("{\"name\": \"myServerGroup\"}")) + .build(); + ZoneGoogleServerGroupManagers managers = createManagers(transport, registry); + + managers.get().execute(); + + assertThat(registry.timers().count()).isEqualTo(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.api"); + // TODO(plumpy): Come up with something better than AccountForClient (which uses a bunch of + // global state) so that we can test for the account tags + assertThat(timer.id().tags()) + .contains( + tag("api", "compute.instanceGroupManagers.get"), + tag("scope", "zonal"), + tag("zone", ZONE), + tag("status", "2xx"), + tag("success", "true")); + assertThat(timer.totalTime()).isEqualTo(CLOCK_STEP_TIME_NS); + } + + @Test + public void get_errorMetrics() { + + Registry registry = new DefaultRegistry(new SteppingClock(CLOCK_STEP_TIME_MS)); + MockHttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + ZoneGoogleServerGroupManagers managers = createManagers(transport, registry); + + try { + managers.get().execute(); + } catch (IOException expected) { + } + + assertThat(registry.timers().count()).isEqualTo(1); + Timer timer = registry.timers().findFirst().orElseThrow(AssertionError::new); + assertThat(timer.id().name()).isEqualTo("google.api"); + assertThat(timer.id().tags()) + .contains( + tag("api", "compute.instanceGroupManagers.get"), + tag("scope", "zonal"), + tag("zone", ZONE), + tag("status", "4xx"), + tag("success", "false")); + assertThat(timer.totalTime()).isEqualTo(CLOCK_STEP_TIME_NS); + } + + @Test + public void update_success() throws IOException { + + HttpTransport transport = + new ComputeOperationMockHttpTransport( + new MockLowLevelHttpResponse() + .setStatusCode(200) + .setContent( + "" + + "{" + + " \"name\": \"xyzzy\"," + + " \"zone\": \"http://compute/zones/us-central1-f\"" + + "}")); + + ZoneGoogleServerGroupManagers managers = createManagers(transport); + + managers.update(new InstanceGroupManager()).executeAndWait(new DefaultTask("task"), "phase"); + } + + @Test + public void update_failure() { + + HttpTransport transport = + new MockHttpTransport.Builder() + .setLowLevelHttpResponse( + new MockLowLevelHttpResponse().setStatusCode(404).setContent("{}")) + .build(); + + ZoneGoogleServerGroupManagers managers = createManagers(transport); + + assertThatIOException().isThrownBy(() -> managers.update(new InstanceGroupManager()).execute()); + } + + private static ZoneGoogleServerGroupManagers createManagers(HttpTransport transport) { + return createManagers(transport, new NoopRegistry()); + } + + private static ZoneGoogleServerGroupManagers createManagers( + HttpTransport transport, Registry registry) { + Compute compute = + new Compute( + transport, GsonFactory.getDefaultInstance(), /* httpRequestInitializer= */ null); + GoogleNamedAccountCredentials credentials = + new GoogleNamedAccountCredentials.Builder() + .name("spin-user") + .project("myproject") + .credentials(new FakeGoogleCredentials()) + .compute(compute) + .build(); + GoogleOperationPoller poller = new GoogleOperationPoller(); + poller.setGoogleConfigurationProperties(new GoogleConfigurationProperties()); + poller.setRegistry(registry); + SafeRetry safeRetry = SafeRetry.withoutDelay(); + poller.setSafeRetry(safeRetry); + return new ZoneGoogleServerGroupManagers( + credentials, poller, registry, "myInstanceGroup", ZONE); + } + + private static Tag tag(String key, String value) { + return new BasicTag(key, value); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtilSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtilSpec.groovy index cb2390b6661..d5b87bccc3e 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtilSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtilSpec.groovy @@ -1,4 +1,4 @@ -/* + /* * Copyright 2014 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,36 +16,37 @@ package com.netflix.spinnaker.clouddriver.google.deploy -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.googleapis.batch.BatchRequest -import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport -import com.google.api.client.http.HttpRequest -import com.google.api.client.http.HttpRequestFactory -import com.google.api.client.http.HttpResponse -import com.google.api.client.json.jackson2.JacksonFactory -import com.google.api.services.compute.Compute -import com.google.api.services.compute.model.* -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits -import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription -import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException -import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceNotFoundException -import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy -import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup -import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer -import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleNetworkLoadBalancer -import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancerProvider -import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.kork.artifacts.model.Artifact -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -class GCEUtilSpec extends Specification { + import com.fasterxml.jackson.databind.ObjectMapper + import com.google.api.client.googleapis.auth.oauth2.GoogleCredential + import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport + import com.google.api.client.http.HttpRequest + import com.google.api.client.http.HttpRequestFactory + import com.google.api.client.http.HttpResponse + import com.google.api.client.json.gson.GsonFactory + import com.google.api.services.compute.Compute + import com.google.api.services.compute.model.* + import com.netflix.spectator.api.DefaultRegistry + import com.netflix.spectator.api.Registry + import com.netflix.spinnaker.clouddriver.data.task.Task + import com.netflix.spinnaker.clouddriver.data.task.TaskRepository + import com.netflix.spinnaker.clouddriver.google.GoogleExecutorTraits + import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest + import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription + import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException + import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceNotFoundException + import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy + import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup + import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer + import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleNetworkLoadBalancer + import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancerProvider + import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials + import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials + import com.netflix.spinnaker.kork.artifacts.model.Artifact + import spock.lang.Shared + import spock.lang.Specification + import spock.lang.Unroll + + class GCEUtilSpec extends Specification { class TestExecutor implements GoogleExecutorTraits { def Registry registry = new DefaultRegistry() } @@ -57,8 +58,8 @@ class GCEUtilSpec extends Specification { private static final PHASE = "SOME-PHASE" private static final INSTANCE_LOCAL_NAME_1 = "some-instance-name-1" private static final INSTANCE_LOCAL_NAME_2 = "some-instance-name-2" - private static final INSTANCE_URL_1 = "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-b/instances/$INSTANCE_LOCAL_NAME_1" - private static final INSTANCE_URL_2 = "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-b/instances/$INSTANCE_LOCAL_NAME_2" + private static final INSTANCE_URL_1 = "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-b/instances/$INSTANCE_LOCAL_NAME_1" + private static final INSTANCE_URL_2 = "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-b/instances/$INSTANCE_LOCAL_NAME_2" private static final IMAGE_PROJECT_NAME = "some-image-project" private static final GOOGLE_APPLICATION_NAME = "test" private static final BASE_IMAGE_PROJECTS = ["centos-cloud", "ubuntu-os-cloud"] @@ -87,28 +88,32 @@ class GCEUtilSpec extends Specification { def executorMock = Mock(GoogleExecutorTraits) def httpTransport = GoogleNetHttpTransport.newTrustedTransport() - def jsonFactory = JacksonFactory.defaultInstance + def jsonFactory = GsonFactory.defaultInstance def httpRequestInitializer = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).build() def compute = new Compute.Builder( httpTransport, jsonFactory, httpRequestInitializer).setApplicationName(GOOGLE_APPLICATION_NAME).build() - def soughtImage = new Image(name: IMAGE_NAME) + GoogleNamedAccountCredentials credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME) + .compute(compute) + .credentials(new FakeGoogleCredentials(PROJECT_NAME)) + .build(); + def soughtImage = new Image(name: IMAGE_NAME) def imageList = new ImageList( - selfLink: "https://www.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", + selfLink: "https://compute.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", items: [soughtImage] ) when: - def sourceImage = GCEUtil.queryImage(PROJECT_NAME, IMAGE_NAME, null, compute, taskMock, PHASE, GOOGLE_APPLICATION_NAME, BASE_IMAGE_PROJECTS, executorMock) + def sourceImage = GCEUtil.queryImage(IMAGE_NAME, credentials, taskMock, PHASE, GOOGLE_APPLICATION_NAME, BASE_IMAGE_PROJECTS, executorMock) then: 1 * executorMock.timeExecuteBatch(_, "findImage", _) >> { - BatchRequest batchRequest = it[0] - assert batchRequest.requestInfos != null + GoogleBatchRequest batchRequest = it[0] + assert batchRequest.queuedRequests.every { it.request != null } // 1 request for each of the 3 projects (PROJECT_NAME + BASE_IMAGE_PROJECTS) - assert batchRequest.requestInfos.size() == 3 - batchRequest.requestInfos.each { BatchRequest.RequestInfo requestInfo -> - requestInfo.callback.onSuccess(imageList, null) + assert batchRequest.queuedRequests.size() == 3 + batchRequest.queuedRequests.each { + it.callback.onSuccess(imageList, null) } } sourceImage == soughtImage @@ -119,29 +124,29 @@ class GCEUtilSpec extends Specification { def executorMock = Mock(GoogleExecutorTraits) def httpTransport = GoogleNetHttpTransport.newTrustedTransport() - def jsonFactory = JacksonFactory.defaultInstance + def jsonFactory = GsonFactory.defaultInstance def httpRequestInitializer = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).build() def compute = new Compute.Builder( httpTransport, jsonFactory, httpRequestInitializer).setApplicationName(GOOGLE_APPLICATION_NAME).build() - def credentials = new GoogleNamedAccountCredentials.Builder().compute(compute).imageProjects([IMAGE_PROJECT_NAME]).build() + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(compute).imageProjects([IMAGE_PROJECT_NAME]).build() def soughtImage = new Image(name: IMAGE_NAME) def imageList = new ImageList( - selfLink: "https://www.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", + selfLink: "https://compute.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", items: [soughtImage] ) when: - def sourceImage = GCEUtil.queryImage(PROJECT_NAME, IMAGE_NAME, credentials, compute, taskMock, PHASE, GOOGLE_APPLICATION_NAME, BASE_IMAGE_PROJECTS, executorMock) + def sourceImage = GCEUtil.queryImage(IMAGE_NAME, credentials, taskMock, PHASE, GOOGLE_APPLICATION_NAME, BASE_IMAGE_PROJECTS, executorMock) then: 1 * executorMock.timeExecuteBatch(_, "findImage", _) >> { - BatchRequest batchRequest = it[0] - assert batchRequest.requestInfos != null + GoogleBatchRequest batchRequest = it[0] + assert batchRequest.queuedRequests.every { it.request != null } // 1 request for each of the 4 projects (PROJECT_NAME + IMAGE_PROJECT_NAME + BASE_IMAGE_PROJECTS) - assert batchRequest.requestInfos.size() == 4 - batchRequest.requestInfos.each { BatchRequest.RequestInfo requestInfo -> - requestInfo.callback.onSuccess(imageList, null) + assert batchRequest.queuedRequests.size() == 4 + batchRequest.queuedRequests.each { + it.callback.onSuccess(imageList, null) } } sourceImage == soughtImage @@ -152,24 +157,28 @@ class GCEUtilSpec extends Specification { def executorMock = Mock(GoogleExecutorTraits) def httpTransport = GoogleNetHttpTransport.newTrustedTransport() - def jsonFactory = JacksonFactory.defaultInstance + def jsonFactory = GsonFactory.defaultInstance def httpRequestInitializer = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).build() def compute = new Compute.Builder( httpTransport, jsonFactory, httpRequestInitializer).setApplicationName(GOOGLE_APPLICATION_NAME).build() + GoogleNamedAccountCredentials credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME) + .compute(compute) + .credentials(new FakeGoogleCredentials(PROJECT_NAME)) + .build(); def emptyImageList = new ImageList() when: - GCEUtil.queryImage(PROJECT_NAME, IMAGE_NAME, null, compute, taskMock, PHASE, GOOGLE_APPLICATION_NAME, BASE_IMAGE_PROJECTS, executorMock) + GCEUtil.queryImage(IMAGE_NAME, credentials, taskMock, PHASE, GOOGLE_APPLICATION_NAME, BASE_IMAGE_PROJECTS, executorMock) then: 1 * executorMock.timeExecuteBatch(_, "findImage", _) >> { - BatchRequest batchRequest = it[0] - assert batchRequest.requestInfos != null + GoogleBatchRequest batchRequest = it[0] + assert batchRequest.queuedRequests.every { it.request != null } // 1 request for each of the 3 projects (PROJECT_NAME + IMAGE_PROJECT_NAME + BASE_IMAGE_PROJECTS) - assert batchRequest.requestInfos.size() == 3 - batchRequest.requestInfos.each { BatchRequest.RequestInfo requestInfo -> - requestInfo.callback.onSuccess(emptyImageList, null) + assert batchRequest.queuedRequests.size() == 3 + batchRequest.queuedRequests.each { + it.callback.onSuccess(emptyImageList, null) } } thrown GoogleResourceNotFoundException @@ -206,7 +215,7 @@ class GCEUtilSpec extends Specification { def soughtImage = new Image(name: IMAGE_NAME) def artifact = Artifact.builder() .name(IMAGE_NAME) - .reference("https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/global/images/$IMAGE_NAME") + .reference("https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/global/images/$IMAGE_NAME") .type("gce/image") .build() @@ -233,7 +242,7 @@ class GCEUtilSpec extends Specification { credentials.compute >> compute credentials.project >> PROJECT_NAME def executor = GroovyMock(GoogleExecutorTraits) - def artifact = new Artifact() + def artifact = Artifact.builder().build() GroovySpy(GCEUtil, global: true) when: @@ -247,10 +256,8 @@ class GCEUtilSpec extends Specification { executor) then: - 1 * GCEUtil.queryImage(PROJECT_NAME, - IMAGE_NAME, + 1 * GCEUtil.queryImage(IMAGE_NAME, credentials, - compute, taskMock, PHASE, GOOGLE_APPLICATION_NAME, @@ -522,18 +529,18 @@ class GCEUtilSpec extends Specification { regional: isRegional, zone: ZONE, asg: [ - (GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES): loadBalancerNameList, + (GCEUtil.GLOBAL_LOAD_BALANCER_NAMES): loadBalancerNameList, ], launchConfig: [ instanceTemplate: new InstanceTemplate(name: "irrelevant-instance-template-name", properties: [ 'metadata': new Metadata(items: [ new Metadata.Items( - key: (GoogleServerGroup.View.LOAD_BALANCING_POLICY), + key: (GCEUtil.LOAD_BALANCING_POLICY), value: "{\"balancingMode\": \"UTILIZATION\",\"maxUtilization\": 0.80, \"namedPorts\": [{\"name\": \"http\", \"port\": 8080}], \"capacityScaler\": 0.77}" ), new Metadata.Items( - key: (GoogleServerGroup.View.BACKEND_SERVICE_NAMES), + key: (GCEUtil.BACKEND_SERVICE_NAMES), value: backendServiceNames ) ]) @@ -552,25 +559,29 @@ class GCEUtilSpec extends Specification { googleLoadBalancerProviderMock.getApplicationLoadBalancers("") >> loadBalancerList def task = Mock(Task) + + def googleOperationPoller = Mock(GoogleOperationPoller) + def updateOpName = 'updateOp' def bs = new BackendService(backends: []) if (lbNames) { serverGroup.launchConfig.instanceTemplate.properties.metadata.items.add( new Metadata.Items( - key: (GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES), + key: (GCEUtil.GLOBAL_LOAD_BALANCER_NAMES), value: lbNames.join(",").trim() ) ) } when: - GCEUtil.addHttpLoadBalancerBackends(computeMock, new ObjectMapper(), PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock, task, "PHASE", executor) + GCEUtil.addHttpLoadBalancerBackends(computeMock, new ObjectMapper(), PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock, task, "PHASE", googleOperationPoller, executor) then: _ * computeMock.backendServices() >> backendServicesMock _ * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock _ * backendSvcGetMock.execute() >> bs _ * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock - _ * backendUpdateMock.execute() + _ * backendUpdateMock.execute() >> [name: updateOpName] // Mock for async op + _ * googleOperationPoller.waitForGlobalOperation(computeMock, PROJECT_NAME, updateOpName, null, task, _, _) _ * computeMock.globalForwardingRules() >> globalForwardingRules _ * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList @@ -579,7 +590,7 @@ class GCEUtilSpec extends Specification { _ * computeMock.forwardingRules() >> forwardingRules _ * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList _ * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) - bs.backends.size == lbNames.size + bs.backends.size() == lbNames.size() where: isRegional | location | loadBalancerList | lbNames | backendServiceNames @@ -598,8 +609,8 @@ class GCEUtilSpec extends Specification { where: fullResourceLink << [ - "https://www.googleapis.com/compute/v1/projects/my-test-project/global/firewalls/name-a", - "www.googleapis.com/compute/v1/projects/my-test-project/global/firewalls/name-a", + "https://compute.googleapis.com/compute/v1/projects/my-test-project/global/firewalls/name-a", + "compute.googleapis.com/compute/v1/projects/my-test-project/global/firewalls/name-a", "compute/v1/projects/my-test-project/global/firewalls/name-a", "projects/my-test-project/global/firewalls/name-a" ] @@ -617,7 +628,7 @@ class GCEUtilSpec extends Specification { fullResourceLink << [ null, "", - "https://www.googleapis.com/compute/v1/my-test-project/global/firewalls/name-a" + "https://compute.googleapis.com/compute/v1/my-test-project/global/firewalls/name-a" ] } } diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GoogleOperationPollerSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GoogleOperationPollerSpec.groovy index de3d1042ded..0717e5ece38 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GoogleOperationPollerSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GoogleOperationPollerSpec.groovy @@ -33,7 +33,7 @@ class GoogleOperationPollerSpec extends Specification { @Shared SafeRetry safeRetry def setupSpec() { - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "waitForOperation should query the operation at least once"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/AbandonAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/AbandonAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec.groovy index 466bc0a5d7d..59ab7af4bfd 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/AbandonAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/AbandonAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.AbandonAndDecrementGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.AbandonAndDecrementGoogleServerGroupAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -36,11 +36,11 @@ class AbandonAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec exten AbandonAndDecrementGoogleServerGroupAtomicOperationConverter converter def setupSpec() { - this.converter = new AbandonAndDecrementGoogleServerGroupAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new AbandonAndDecrementGoogleServerGroupAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "abandonAndDecrementGoogleServerGroupDescription type returns AbandonAndDecrementGoogleServerGroupDescription and AbandonAndDecrementGoogleServerGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/BasicGoogleDeployAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/BasicGoogleDeployAtomicOperationConverterUnitSpec.groovy index d491c3ea19c..7b97ddbcdcb 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/BasicGoogleDeployAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/BasicGoogleDeployAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.deploy.DeployAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -46,11 +46,11 @@ class BasicGoogleDeployAtomicOperationConverterUnitSpec extends Specification { BasicGoogleDeployAtomicOperationConverter converter def setupSpec() { - this.converter = new BasicGoogleDeployAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new BasicGoogleDeployAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "basicGoogleDeployDescription type returns BasicGoogleDeployDescription and DeployAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CopyLastGoogleServerGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CopyLastGoogleServerGroupAtomicOperationConverterUnitSpec.groovy index 69f8890d64e..84a936b983d 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CopyLastGoogleServerGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CopyLastGoogleServerGroupAtomicOperationConverterUnitSpec.groovy @@ -21,6 +21,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDe import com.netflix.spinnaker.clouddriver.google.deploy.ops.CopyLastGoogleServerGroupAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -42,11 +43,11 @@ class CopyLastGoogleServerGroupAtomicOperationConverterUnitSpec extends Specific CopyLastGoogleServerGroupAtomicOperationConverter converter def setupSpec() { - this.converter = new CopyLastGoogleServerGroupAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new CopyLastGoogleServerGroupAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "copyLastGoogleServerGroupDescription type returns BasicGoogleDeployDescription and CopyLastGoogleServerGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CreateGoogleInstanceAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CreateGoogleInstanceAtomicOperationConverterUnitSpec.groovy index c234c4e651a..738a0b0b35f 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CreateGoogleInstanceAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/CreateGoogleInstanceAtomicOperationConverterUnitSpec.groovy @@ -21,6 +21,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.description.CreateGoogleI import com.netflix.spinnaker.clouddriver.google.deploy.ops.CreateGoogleInstanceAtomicOperation import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -38,11 +39,11 @@ class CreateGoogleInstanceAtomicOperationConverterUnitSpec extends Specification CreateGoogleInstanceAtomicOperationConverter converter def setupSpec() { - this.converter = new CreateGoogleInstanceAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new CreateGoogleInstanceAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "createGoogleInstanceDescription type returns CreateGoogleInstanceDescription and CreateGoogleInstanceAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec.groovy index 68ea8293d05..4f7233c4cc8 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DeleteGoogleAutoscalingPolicyAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -35,11 +35,11 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec extends Spec DeleteGoogleAutoscalingPolicyAtomicOperationConverter converter def setupSpec() { - this.converter = new DeleteGoogleAutoscalingPolicyAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new DeleteGoogleAutoscalingPolicyAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "deleteGoogleScalingPolicyDescription type returns DeleteGoogleScalingPolicyDescription and DeleteGoogleScalingPolicyAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy index a3bae3b64e2..e0accb59500 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy @@ -21,6 +21,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleL import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -37,11 +38,11 @@ class DeleteGoogleLoadBalancerAtomicOperationConverterUnitSpec extends Specifica DeleteGoogleLoadBalancerAtomicOperationConverter converter def setupSpec() { - this.converter = new DeleteGoogleLoadBalancerAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new DeleteGoogleLoadBalancerAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "deleteGoogleLoadBalancerDescription type returns DeleteGoogleLoadBalancerDescription and DeleteGoogleLoadBalancerAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleSecurityGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleSecurityGroupAtomicOperationConverterUnitSpec.groovy index 5f31bc49c79..ea4aa513977 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleSecurityGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleSecurityGroupAtomicOperationConverterUnitSpec.groovy @@ -21,6 +21,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleS import com.netflix.spinnaker.clouddriver.google.deploy.ops.DeleteGoogleSecurityGroupAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -35,11 +36,11 @@ class DeleteGoogleSecurityGroupAtomicOperationConverterUnitSpec extends Specific DeleteGoogleSecurityGroupAtomicOperationConverter converter def setupSpec() { - this.converter = new DeleteGoogleSecurityGroupAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new DeleteGoogleSecurityGroupAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "deleteGoogleSecurityGroupDescription type returns DeleteGoogleSecurityGroupDescription and DeleteGoogleSecurityGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy index d16ad341a58..36792f641eb 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.DeregisterInstancesFromGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DeregisterInstancesFromGoogleLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -37,11 +37,11 @@ class DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverterUnitSpec DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverter converter def setupSpec() { - this.converter = new DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new DeregisterInstancesFromGoogleLoadBalancerAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "deregisterInstancesFromGoogleLoadBalancerDescription type returns correct description and operation types"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DestroyGoogleServerGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DestroyGoogleServerGroupAtomicOperationConverterUnitSpec.groovy index f16fd223116..fed7ddb6a80 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DestroyGoogleServerGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DestroyGoogleServerGroupAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.DestroyGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DestroyGoogleServerGroupAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -36,11 +36,11 @@ class DestroyGoogleServerGroupAtomicOperationConverterUnitSpec extends Specifica DestroyGoogleServerGroupAtomicOperationConverter converter def setupSpec() { - this.converter = new DestroyGoogleServerGroupAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new DestroyGoogleServerGroupAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "destroyGoogleServerGroupDescription type returns DestroyGoogleServerGroupDescription and DestroyGoogleServerGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DisableGoogleServerGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DisableGoogleServerGroupAtomicOperationConverterUnitSpec.groovy index 6be11d2e658..c0729049bbd 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DisableGoogleServerGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DisableGoogleServerGroupAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.EnableDisableGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.DisableGoogleServerGroupAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -37,11 +37,11 @@ class DisableGoogleServerGroupAtomicOperationConverterUnitSpec extends Specifica DisableGoogleServerGroupAtomicOperationConverter converter def setupSpec() { - this.converter = new DisableGoogleServerGroupAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new DisableGoogleServerGroupAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "disableGoogleServerGroupDescription type returns EnableDisableGoogleServerGroupDescription and DisableGoogleServerGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/EnableGoogleServerGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/EnableGoogleServerGroupAtomicOperationConverterUnitSpec.groovy index a29c69a867b..7dffc3f9268 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/EnableGoogleServerGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/EnableGoogleServerGroupAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.EnableDisableGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.EnableGoogleServerGroupAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -36,11 +36,11 @@ class EnableGoogleServerGroupAtomicOperationConverterUnitSpec extends Specificat EnableGoogleServerGroupAtomicOperationConverter converter def setupSpec() { - this.converter = new EnableGoogleServerGroupAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new EnableGoogleServerGroupAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "enableGoogleServerGroupDescription type returns EnableDisableGoogleServerGroupDescription and EnableGoogleServerGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverterUnitSpec.groovy index 6da8cb4363a..3a588268635 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.ModifyGoogleServerGroupInstanceTemplateDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.ModifyGoogleServerGroupInstanceTemplateAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -37,11 +37,11 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverterUnitSpec ex ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter converter def setupSpec() { - this.converter = new ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new ModifyGoogleServerGroupInstanceTemplateAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "modifyGoogleServerGroupInstanceTemplateDescription type returns ModifyGoogleServerGroupInstanceTemplateDescription and ModifyGoogleServerGroupInstanceTemplateAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RebootGoogleInstancesAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RebootGoogleInstancesAtomicOperationConverterUnitSpec.groovy index e6180ba2961..7a702ae797a 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RebootGoogleInstancesAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RebootGoogleInstancesAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.RebootGoogleInstancesDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.RebootGoogleInstancesAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -35,11 +35,11 @@ class RebootGoogleInstancesAtomicOperationConverterUnitSpec extends Specificatio RebootGoogleInstancesAtomicOperationConverter converter def setupSpec() { - this.converter = new RebootGoogleInstancesAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new RebootGoogleInstancesAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "rebootGoogleInstancesDescription type returns RebootGoogleInstancesDescription and RebootGoogleInstancesAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy index 044f81041db..8fc9fe9f1ec 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.RegisterInstancesWithGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.RegisterInstancesWithGoogleLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -37,11 +37,11 @@ class RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverterUnitSpec ex RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverter converter def setupSpec() { - this.converter = new RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new RegisterInstancesWithGoogleLoadBalancerAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "registerInstancesWithGoogleLoadBalancerDescription type returns correct description and operation types"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ResizeGoogleServerGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ResizeGoogleServerGroupAtomicOperationConverterUnitSpec.groovy index d1d8c0ff3ad..d1a7071283b 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ResizeGoogleServerGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/ResizeGoogleServerGroupAtomicOperationConverterUnitSpec.groovy @@ -25,7 +25,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleAutoscali import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -50,12 +50,11 @@ class ResizeGoogleServerGroupAtomicOperationConverterUnitSpec extends Specificat accountName: ACCOUNT_NAME] GoogleClusterProvider googleClusterProviderMock = Mock(GoogleClusterProvider) ResizeGoogleServerGroupAtomicOperationConverter converter = - new ResizeGoogleServerGroupAtomicOperationConverter(objectMapper: mapper, - googleClusterProvider: googleClusterProviderMock) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + new ResizeGoogleServerGroupAtomicOperationConverter(googleClusterProvider: googleClusterProviderMock, objectMapper: mapper) + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository when: def description = converter.convertDescription(input) @@ -84,12 +83,11 @@ class ResizeGoogleServerGroupAtomicOperationConverterUnitSpec extends Specificat def input = [application: "app", targetSize: desired, region: REGION] GoogleClusterProvider googleClusterProviderMock = Mock(GoogleClusterProvider) ResizeGoogleServerGroupAtomicOperationConverter converter = - new ResizeGoogleServerGroupAtomicOperationConverter(objectMapper: mapper, - googleClusterProvider: googleClusterProviderMock) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + new ResizeGoogleServerGroupAtomicOperationConverter(googleClusterProvider: googleClusterProviderMock, objectMapper: mapper) + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository when: def description = converter.convertDescription(input) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/SerializeApplicationAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/SerializeApplicationAtomicOperationConverterUnitSpec.groovy index 59900084991..f15e6c076e3 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/SerializeApplicationAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/SerializeApplicationAtomicOperationConverterUnitSpec.groovy @@ -21,7 +21,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.converters.snapshot.SaveS import com.netflix.spinnaker.clouddriver.google.deploy.description.snapshot.SaveSnapshotDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.snapshot.SaveSnapshotAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -37,11 +37,11 @@ class SerializeApplicationAtomicOperationConverterUnitSpec extends Specification SaveSnapshotAtomicOperationConverter converter def setupSpec() { - this.converter = new SaveSnapshotAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new SaveSnapshotAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "resizeGoogleServerGroupDescription type returns ResizeGoogleServerGroupDescription and ResizeGoogleServerGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/SetStatefulDiskAtomicOperationConverterTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/SetStatefulDiskAtomicOperationConverterTest.java new file mode 100644 index 00000000000..664b3aeb914 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/SetStatefulDiskAtomicOperationConverterTest.java @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.converters; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.deploy.description.SetStatefulDiskDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.ops.SetStatefulDiskAtomicOperation; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider; +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class SetStatefulDiskAtomicOperationConverterTest { + + private static final String ACCOUNT_NAME = "spinnaker-account"; + private static final String SERVER_GROUP_NAME = "spinnaker-test-v000"; + private static final String REGION = "us-central1"; + private static final String DEVICE_NAME = "spinnaker-test-v000-001"; + + SetStatefulDiskAtomicOperationConverter converter; + + @BeforeEach + public void setUp() { + GoogleClusterProvider clusterProvider = mock(GoogleClusterProvider.class); + GoogleComputeApiFactory serverGroupManagersFactory = mock(GoogleComputeApiFactory.class); + converter = + new SetStatefulDiskAtomicOperationConverter(clusterProvider, serverGroupManagersFactory); + + CredentialsRepository credentialsRepository = mock(CredentialsRepository.class); + GoogleNamedAccountCredentials accountCredentials = + new GoogleNamedAccountCredentials.Builder() + .name(ACCOUNT_NAME) + .credentials(new FakeGoogleCredentials()) + .build(); + when(credentialsRepository.getOne(any())).thenReturn(accountCredentials); + converter.setCredentialsRepository(credentialsRepository); + } + + @Test + public void testConvertDescription() { + Map input = new HashMap<>(); + input.put("accountName", ACCOUNT_NAME); + input.put("serverGroupName", SERVER_GROUP_NAME); + input.put("region", REGION); + input.put("deviceName", DEVICE_NAME); + SetStatefulDiskDescription description = converter.convertDescription(input); + + assertThat(description.getAccount()).isEqualTo(ACCOUNT_NAME); + assertThat(description.getServerGroupName()).isEqualTo(SERVER_GROUP_NAME); + assertThat(description.getRegion()).isEqualTo(REGION); + assertThat(description.getDeviceName()).isEqualTo(DEVICE_NAME); + } + + @Test + public void testConvertOperation() { + Map input = new HashMap<>(); + input.put("accountName", ACCOUNT_NAME); + input.put("serverGroupName", SERVER_GROUP_NAME); + input.put("region", REGION); + input.put("deviceName", DEVICE_NAME); + SetStatefulDiskAtomicOperation operation = converter.convertOperation(input); + + SetStatefulDiskDescription description = operation.getDescription(); + assertThat(description.getAccount()).isEqualTo(ACCOUNT_NAME); + assertThat(description.getServerGroupName()).isEqualTo(SERVER_GROUP_NAME); + assertThat(description.getRegion()).isEqualTo(REGION); + assertThat(description.getDeviceName()).isEqualTo(DEVICE_NAME); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec.groovy index cb67cec3dd6..4ed7e8a18a6 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.TerminateAndDecrementGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.TerminateAndDecrementGoogleServerGroupAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -36,11 +36,11 @@ class TerminateAndDecrementGoogleServerGroupAtomicOperationConverterUnitSpec ext TerminateAndDecrementGoogleServerGroupAtomicOperationConverter converter def setupSpec() { - this.converter = new TerminateAndDecrementGoogleServerGroupAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new TerminateAndDecrementGoogleServerGroupAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "terminateAndDecrementGoogleServerGroupDescription type returns TerminateAndDecrementGoogleServerGroupDescription and TerminateAndDecrementGoogleServerGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateGoogleInstancesAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateGoogleInstancesAtomicOperationConverterUnitSpec.groovy index 6854912fd9a..14adb6014eb 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateGoogleInstancesAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/TerminateGoogleInstancesAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.TerminateGoogleInstancesDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.TerminateGoogleInstancesAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -35,11 +35,11 @@ class TerminateGoogleInstancesAtomicOperationConverterUnitSpec extends Specifica TerminateGoogleInstancesAtomicOperationConverter converter def setupSpec() { - this.converter = new TerminateGoogleInstancesAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new TerminateGoogleInstancesAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "terminateGoogleInstancesDescription type returns TerminateGoogleInstancesDescription and TerminateGoogleInstancesAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec.groovy index 52953af9ec0..79e3524c147 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec.groovy @@ -21,7 +21,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleA import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleAutoscalingPolicyAtomicOperation import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -38,11 +38,12 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationConverterUnitSpec extends Spec UpsertGoogleAutoscalingPolicyAtomicOperationConverter converter def setupSpec() { - this.converter = new UpsertGoogleAutoscalingPolicyAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new UpsertGoogleAutoscalingPolicyAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository + converter.objectMapper = mapper } void "upsertGoogleScalingPolicyDescription type returns UpsertGoogleScalingPolicyDescription and UpsertGoogleScalingPolicyAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleImageTagsAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleImageTagsAtomicOperationConverterUnitSpec.groovy index 1b4b068c2bc..918ac7581ac 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleImageTagsAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleImageTagsAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleImageTagsDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleImageTagsAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -36,11 +36,11 @@ class UpsertGoogleImageTagsAtomicOperationConverterUnitSpec extends Specificatio UpsertGoogleImageTagsAtomicOperationConverter converter def setupSpec() { - this.converter = new UpsertGoogleImageTagsAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new UpsertGoogleImageTagsAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "upsertGoogleImageTagsDescription type returns UpsertGoogleImageTagsDescription and UpsertGoogleImageTagsAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy index bcefd53f78e..8886d9614ec 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverterUnitSpec.groovy @@ -25,7 +25,7 @@ import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -48,11 +48,11 @@ class UpsertGoogleLoadBalancerAtomicOperationConverterUnitSpec extends Specifica UpsertGoogleLoadBalancerAtomicOperationConverter converter def setupSpec() { - this.converter = new UpsertGoogleLoadBalancerAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new UpsertGoogleLoadBalancerAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "upsertGoogleLoadBalancerDescription type returns UpsertGoogleLoadBalancerDescription and UpsertGoogleLoadBalancerAtomicOperation"() { @@ -160,7 +160,7 @@ class UpsertGoogleLoadBalancerAtomicOperationConverterUnitSpec extends Specifica portRange: description.portRange ) List services = Utils.getBackendServicesFromHttpLoadBalancerView(httpLoadBalancer.view) - services.findAll { it.healthCheck == (hc as GoogleHealthCheck) }.size == 3 + services.findAll { it.healthCheck == (hc as GoogleHealthCheck) }.size() == 3 description.defaultService.name == DEFAULT_SERVICE description.hostRules[0].pathMatcher.defaultService.name == DEFAULT_PM_SERVICE description.hostRules[0].pathMatcher.pathRules[0].backendService.name == PM_SERVICE diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleSecurityGroupAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleSecurityGroupAtomicOperationConverterUnitSpec.groovy index 992a1f909d7..9d25de8620a 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleSecurityGroupAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleSecurityGroupAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleSecurityGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleSecurityGroupAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -41,11 +41,11 @@ class UpsertGoogleSecurityGroupAtomicOperationConverterUnitSpec extends Specific UpsertGoogleSecurityGroupAtomicOperationConverter converter def setupSpec() { - this.converter = new UpsertGoogleSecurityGroupAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new UpsertGoogleSecurityGroupAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "upsertGoogleSecurityGroupDescription type returns UpsertGoogleSecurityGroupDescription and UpsertGoogleSecurityGroupAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleServerGroupTagsAtomicOperationConverterUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleServerGroupTagsAtomicOperationConverterUnitSpec.groovy index 6545ea29147..82a34089cc5 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleServerGroupTagsAtomicOperationConverterUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleServerGroupTagsAtomicOperationConverterUnitSpec.groovy @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleServerGroupTagsDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.UpsertGoogleServerGroupTagsAtomicOperation import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.credentials.CredentialsRepository import spock.lang.Shared import spock.lang.Specification @@ -37,11 +37,11 @@ class UpsertGoogleServerGroupTagsAtomicOperationConverterUnitSpec extends Specif UpsertGoogleServerGroupTagsAtomicOperationConverter converter def setupSpec() { - this.converter = new UpsertGoogleServerGroupTagsAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) + this.converter = new UpsertGoogleServerGroupTagsAtomicOperationConverter() + def credentialsRepository = Mock(CredentialsRepository) def mockCredentials = Mock(GoogleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider + credentialsRepository.getOne(_) >> mockCredentials + converter.credentialsRepository = credentialsRepository } void "upsertGoogleServerGroupTagsDescription type returns UpsertGoogleServerGroupTagsDescription and UpsertGoogleServerGroupTagsAtomicOperation"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbandonAndDecrementGoogleServerGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbandonAndDecrementGoogleServerGroupAtomicOperationUnitSpec.groovy index 8f92d14aa80..f74c0b8cedb 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbandonAndDecrementGoogleServerGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbandonAndDecrementGoogleServerGroupAtomicOperationUnitSpec.groovy @@ -18,11 +18,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops import com.google.api.services.compute.Compute import com.google.api.services.compute.model.InstanceGroupManagersAbandonInstancesRequest +import com.google.common.util.concurrent.MoreExecutors import com.netflix.spectator.api.DefaultRegistry import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.description.AbandonAndDecrementGoogleServerGroupDescription +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory import com.netflix.spinnaker.clouddriver.google.model.GoogleInstance import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider @@ -39,8 +42,8 @@ class AbandonAndDecrementGoogleServerGroupAtomicOperationUnitSpec extends Specif private static final PROJECT_NAME = "my_project" private static final INSTANCE_IDS = ["my-app7-dev-v000-1", "my-app7-dev-v000-2"] private static final INSTANCE_URLS = [ - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-1", - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-2" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-1", + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-2" ] def registry = new DefaultRegistry() @@ -54,7 +57,9 @@ class AbandonAndDecrementGoogleServerGroupAtomicOperationUnitSpec extends Specif setup: def googleClusterProviderMock = Mock(GoogleClusterProvider) def serverGroup = new GoogleServerGroup( + name: SERVER_GROUP_NAME, regional: isRegional, + region: REGION, zone: ZONE, instances: INSTANCE_URLS.collect { new GoogleInstance( @@ -79,6 +84,7 @@ class AbandonAndDecrementGoogleServerGroupAtomicOperationUnitSpec extends Specif @Subject def operation = new AbandonAndDecrementGoogleServerGroupAtomicOperation(description) operation.registry = registry operation.googleClusterProvider = googleClusterProviderMock + operation.computeApiFactory = new GoogleComputeApiFactory(Mock(GoogleOperationPoller), registry, "user-agent", MoreExecutors.newDirectExecutorService()) when: operation.operate([]) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CopyLastGoogleServerGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CopyLastGoogleServerGroupAtomicOperationUnitSpec.groovy index 964efa4b6b1..03763ccff91 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CopyLastGoogleServerGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CopyLastGoogleServerGroupAtomicOperationUnitSpec.groovy @@ -23,6 +23,7 @@ import com.google.api.services.compute.model.Image import com.google.api.services.compute.model.InstanceProperties import com.google.api.services.compute.model.InstanceTemplate import com.google.api.services.compute.model.Scheduling +import com.google.api.services.compute.model.ShieldedVmConfig import com.netflix.spectator.api.DefaultRegistry import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository @@ -74,13 +75,14 @@ class CopyLastGoogleServerGroupAtomicOperationUnitSpec extends Specification { private static final long DISK_SIZE_GB = 100 private static final String DISK_TYPE = "pd-standard" - private static final GoogleDisk DISK_PD_STANDARD = new GoogleDisk(type: DISK_TYPE, sizeGb: DISK_SIZE_GB) + + private static final GoogleDisk DISK_PD_STANDARD = new GoogleDisk(type: DISK_TYPE, sizeGb: DISK_SIZE_GB, sourceImage: IMAGE) private static final String DEFAULT_NETWORK_NAME = "default" private static final String DEFAULT_NETWORK_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/global/networks/$DEFAULT_NETWORK_NAME" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/global/networks/$DEFAULT_NETWORK_NAME" private static final String SUBNET_NAME = "some-subnet" private static final String SUBNET_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/regions/$REGION/subnetworks/$SUBNET_NAME" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/regions/$REGION/subnetworks/$SUBNET_NAME" private static final String ACCESS_CONFIG_NAME = "External NAT" private static final String ACCESS_CONFIG_TYPE = "ONE_TO_ONE_NAT" @@ -95,6 +97,7 @@ class CopyLastGoogleServerGroupAtomicOperationUnitSpec extends Specification { private def instanceMetadata private def tags private def scheduling + private def shieldedVmConfig private def serviceAccount private def instanceProperties private def instanceTemplate @@ -107,7 +110,7 @@ class CopyLastGoogleServerGroupAtomicOperationUnitSpec extends Specification { def setup() { computeMock = Mock(Compute) - credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + credentials = new GoogleNamedAccountCredentials.Builder().name("gce").project(PROJECT_NAME).compute(computeMock).build() sourceImage = new Image(selfLink: IMAGE) network = new GoogleNetwork(selfLink: DEFAULT_NETWORK_URL) @@ -145,7 +148,7 @@ class CopyLastGoogleServerGroupAtomicOperationUnitSpec extends Specification { serverGroup = new GoogleServerGroup(name: ANCESTOR_SERVER_GROUP_NAME, zone: ZONE, - asg: [(GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES): LOAD_BALANCERS, + asg: [(GCEUtil.REGIONAL_LOAD_BALANCER_NAMES): LOAD_BALANCERS, desiredCapacity: 2], launchConfig: [instanceTemplate: instanceTemplate], autoscalingPolicy: [coolDownPeriodSec: 45, diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CreateGoogleInstanceAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CreateGoogleInstanceAtomicOperationUnitSpec.groovy index aca0a889f44..a521e67f875 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CreateGoogleInstanceAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/CreateGoogleInstanceAtomicOperationUnitSpec.groovy @@ -20,14 +20,14 @@ import com.google.api.client.googleapis.auth.oauth2.GoogleCredential import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport -import com.google.api.client.json.jackson2.JacksonFactory +import com.google.api.client.json.gson.GsonFactory import com.google.api.services.compute.Compute import com.google.api.services.compute.model.Image import com.google.api.services.compute.model.ImageList import com.netflix.spectator.api.DefaultRegistry import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.config.GoogleConfiguration +import com.netflix.spinnaker.clouddriver.google.GoogleApiTestUtils import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties import com.netflix.spinnaker.clouddriver.google.deploy.description.CreateGoogleInstanceDescription import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceNotFoundException @@ -36,7 +36,8 @@ import com.netflix.spinnaker.clouddriver.google.model.GoogleNetwork import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.google.security.TestDefaults -import com.netflix.spinnaker.clouddriver.google.GoogleApiTestUtils +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest +import com.netflix.spinnaker.config.GoogleConfiguration import groovy.mock.interceptor.MockFor import spock.lang.Specification import spock.lang.Subject @@ -59,7 +60,7 @@ class CreateGoogleInstanceAtomicOperationUnitSpec extends Specification implemen void "should create instance"() { setup: def computeMock = new MockFor(Compute) - def batchMock = new MockFor(BatchRequest) + def googleBatchMock = new MockFor(GoogleBatchRequest) def imageProjects = [PROJECT_NAME] + BASE_IMAGE_PROJECTS def listMock = new MockFor(Compute.Images.List) @@ -68,28 +69,27 @@ class CreateGoogleInstanceAtomicOperationUnitSpec extends Specification implemen def instancesInsertMock = Mock(Compute.Instances.Insert) def httpTransport = GoogleNetHttpTransport.newTrustedTransport() - def jsonFactory = JacksonFactory.defaultInstance + def jsonFactory = GsonFactory.defaultInstance def httpRequestInitializer = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).build() def images = new Compute.Builder( httpTransport, jsonFactory, httpRequestInitializer).setApplicationName("test").build().images() - computeMock.demand.batch { new BatchRequest(httpTransport, httpRequestInitializer) } - JsonBatchCallback callback = null for (def imageProject : imageProjects) { computeMock.demand.images { return images } listMock.demand.setFilter { } - listMock.demand.queue { imageListBatch, imageListCallback -> + googleBatchMock.demand.queue { imageList, imageListCallback -> callback = imageListCallback } } - batchMock.demand.size { return 1 } - batchMock.demand.execute { + googleBatchMock.demand.size() { return 1 } + + googleBatchMock.demand.execute { def imageList = new ImageList( - selfLink: "https://www.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", + selfLink: "https://compute.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", items: [new Image(name: IMAGE)] ) callback.onSuccess(imageList, null) @@ -100,7 +100,7 @@ class CreateGoogleInstanceAtomicOperationUnitSpec extends Specification implemen computeMock.ignore('asBoolean') when: - batchMock.use { + googleBatchMock.use { computeMock.use { listMock.use { def compute = new Compute.Builder( @@ -167,7 +167,7 @@ class CreateGoogleInstanceAtomicOperationUnitSpec extends Specification implemen def listMock = new MockFor(Compute.Images.List) def httpTransport = GoogleNetHttpTransport.newTrustedTransport() - def jsonFactory = JacksonFactory.defaultInstance + def jsonFactory = GsonFactory.defaultInstance def httpRequestInitializer = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).build() def images = new Compute.Builder( diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec.groovy index a3f8a14f0c7..ffad9fd6703 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec.groovy @@ -24,10 +24,15 @@ import com.netflix.spectator.api.DefaultRegistry import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.google.GoogleApiTestUtils +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry +import com.netflix.spinnaker.clouddriver.orchestration.DefaultOrchestrationProcessor +import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor import spock.lang.Specification import spock.lang.Subject import spock.lang.Unroll @@ -39,6 +44,12 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification private static final PROJECT_NAME = "my-project" private static final ZONE = "us-central1-f" + GoogleClusterProvider googleClusterProviderMock = Mock(GoogleClusterProvider) + Compute computeMock = Mock(Compute) + GoogleOperationPoller operationPollerMock = Mock(GoogleOperationPoller) + AtomicOperationsRegistry atomicOperationsRegistry = Mock(AtomicOperationsRegistry) + DefaultOrchestrationProcessor orchestrationProcessorMock = Mock(DefaultOrchestrationProcessor) + def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) } @@ -47,8 +58,6 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification void "should delete zonal and regional autoscaling policy"() { setup: def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) - def computeMock = Mock(Compute) // zonal setup def autoscalersMock = Mock(Compute.Autoscalers) @@ -70,9 +79,8 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification region: REGION, accountName: ACCOUNT_NAME, credentials: credentials) - @Subject def operation = Spy(DeleteGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description]) + @Subject def operation = Spy(DeleteGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description, googleClusterProviderMock, operationPollerMock, atomicOperationsRegistry, orchestrationProcessorMock]) operation.registry = registry - operation.googleClusterProvider = googleClusterProviderMock when: operation.operate([]) @@ -84,11 +92,11 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification if (isRegional) { 1 * computeMock.regionAutoscalers() >> regionAutoscalersMock 1 * regionAutoscalersMock.delete(PROJECT_NAME, REGION, SERVER_GROUP_NAME) >> regionDeleteMock - 1 * regionDeleteMock.execute() + 1 * regionDeleteMock.execute() >> [name: 'deleteOp'] } else { 1 * computeMock.autoscalers() >> autoscalersMock 1 * autoscalersMock.delete(PROJECT_NAME, ZONE, SERVER_GROUP_NAME) >> deleteMock - 1 * deleteMock.execute() + 1 * deleteMock.execute() >> [name: 'deleteOp'] } registry.timer(regionalTimerId).count() == (isRegional ? 1 : 0) registry.timer(zonalTimerId).count() == (isRegional ? 0 : 1) @@ -101,7 +109,6 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification void "should delete zonal and regional autoHealing policy"() { setup: def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) def computeMock = Mock(Compute) def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() def description = new DeleteGoogleAutoscalingPolicyDescription( @@ -131,9 +138,8 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification "compute.regionInstanceGroupManagers.setAutoHealingPolicies", [scope: "regional", region: REGION]) - @Subject def operation = Spy(DeleteGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description]) + @Subject def operation = Spy(DeleteGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description, googleClusterProviderMock, operationPollerMock, atomicOperationsRegistry, orchestrationProcessorMock]) operation.registry = registry - operation.googleClusterProvider = googleClusterProviderMock when: operation.operate([]) @@ -145,9 +151,11 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification if (isRegional) { computeMock.regionInstanceGroupManagers() >> regionalManagerMock regionalManagerMock.setAutoHealingPolicies(PROJECT_NAME, REGION, SERVER_GROUP_NAME, regionalRequest) >> regionalSetAutoHealingPolicyMock + regionalSetAutoHealingPolicyMock.execute() >> [name: 'autoHealingOp'] } else { computeMock.instanceGroupManagers() >> zonalManagerMock zonalManagerMock.setAutoHealingPolicies(PROJECT_NAME, ZONE, SERVER_GROUP_NAME, zonalRequest) >> zonalSetAutoHealingPolicyMock + zonalSetAutoHealingPolicyMock.execute() >> [name: 'autoHealingOp'] } registry.timer(regionalTimerId).count() == (isRegional ? 1 : 0) registry.timer(zonalTimerId).count() == (isRegional ? 0 : 1) @@ -159,7 +167,6 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification void "delete the instance template when deletePolicyMetadata is called"() { given: def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) def computeMock = Mock(Compute) def autoscaler = [:] @@ -181,13 +188,12 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification def template = new InstanceTemplate(properties: [ disks: [[getBoot: { return [initializeParams: [sourceImage: 'images/sourceImage']] }, initializeParams: [diskType: 'huge', diskSizeGb: 42], autoDelete: false]], name: 'template', - networkInterfaces: [[network: 'networks/my-network']], + networkInterfaces: [[network: "projects/$PROJECT_NAME/networks/my-network"]], serviceAccounts: [[email: 'serviceAccount@google.com']] ]) - @Subject def operation = Spy(DeleteGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description]) + @Subject def operation = Spy(DeleteGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description, googleClusterProviderMock, operationPollerMock, atomicOperationsRegistry, orchestrationProcessorMock]) operation.registry = registry - operation.googleClusterProvider = googleClusterProviderMock when: operation.deletePolicyMetadata(computeMock, credentials, PROJECT_NAME, groupUrl) @@ -208,7 +214,7 @@ class DeleteGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification where: isRegional | location | groupUrl - false | ZONE | "https://www.googleapis.com/compute/v1/projects/spinnaker-jtk54/zones/us-central1-f/autoscalers/okra-auto-v005" - true | REGION | "https://www.googleapis.com/compute/v1/projects/spinnaker-jtk54/regions/us-central1/autoscalers/okra-auto-v005" + false | ZONE | "https://compute.googleapis.com/compute/v1/projects/spinnaker-jtk54/zones/us-central1-f/autoscalers/okra-auto-v005" + true | REGION | "https://compute.googleapis.com/compute/v1/projects/spinnaker-jtk54/regions/us-central1/autoscalers/okra-auto-v005" } } diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeleteGoogleSecurityGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeleteGoogleSecurityGroupAtomicOperationUnitSpec.groovy index 877eb42fa33..34570352d89 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeleteGoogleSecurityGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeleteGoogleSecurityGroupAtomicOperationUnitSpec.groovy @@ -44,7 +44,7 @@ class DeleteGoogleSecurityGroupAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should delete firewall rule"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationUnitSpec.groovy index 075cf63772a..a977573e80b 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DeregisterInstancesFromGoogleLoadBalancerAtomicOperationUnitSpec.groovy @@ -50,9 +50,9 @@ class DeregisterInstancesFromGoogleLoadBalancerAtomicOperationUnitSpec extends S private static final INSTANCE_ID1 = "my-app7-dev-v000-instance1" private static final INSTANCE_ID2 = "my-app7-dev-v000-instance2" private static final INSTANCE_URL1 = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_ID1" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_ID1" private static final INSTANCE_URL2 = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_ID2" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_ID2" private static final INSTANCE_IDS = [INSTANCE_ID1, INSTANCE_ID2] private static final INSTANCE_URLS = [INSTANCE_URL1, INSTANCE_URL2] @@ -63,7 +63,7 @@ class DeregisterInstancesFromGoogleLoadBalancerAtomicOperationUnitSpec extends S def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should deregister instances"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DestroyGoogleServerGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DestroyGoogleServerGroupAtomicOperationUnitSpec.groovy index 43d143e1639..502aa047d20 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DestroyGoogleServerGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DestroyGoogleServerGroupAtomicOperationUnitSpec.groovy @@ -22,10 +22,13 @@ import com.google.api.client.http.HttpHeaders import com.google.api.client.http.HttpResponseException import com.google.api.services.compute.Compute import com.google.api.services.compute.model.* +import com.google.common.util.concurrent.MoreExecutors import com.netflix.frigga.Names import com.netflix.spectator.api.DefaultRegistry import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.google.GoogleApiTestUtils +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller @@ -38,8 +41,6 @@ import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleIntern import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancerProvider import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.google.GoogleApiTestUtils - import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -66,569 +67,583 @@ class DestroyGoogleServerGroupAtomicOperationUnitSpec extends Specification { TaskRepository.threadLocalTask.set(Mock(Task)) // Yes this can affect other tests; but only in a good way. - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should delete managed instance group"() { setup: - def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) - def serverGroup = - new GoogleServerGroup(region: REGION, - zone: ZONE, - launchConfig: [instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME)]).view - def computeMock = Mock(Compute) - def instanceGroupManagersMock = Mock(Compute.InstanceGroupManagers) - def zoneOperations = Mock(Compute.ZoneOperations) - def zoneOperationsGet = Mock(Compute.ZoneOperations.Get) - def instanceGroupManagersDeleteMock = Mock(Compute.InstanceGroupManagers.Delete) - def instanceGroupManagersDeleteOp = new Operation(name: INSTANCE_GROUP_OP_NAME, status: DONE) - def instanceTemplatesMock = Mock(Compute.InstanceTemplates) - def instanceTemplatesDeleteMock = Mock(Compute.InstanceTemplates.Delete) - def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) - - def forwardingRules = Mock(Compute.ForwardingRules) - def forwardingRulesList = Mock(Compute.ForwardingRules.List) - def globalForwardingRules = Mock(Compute.GlobalForwardingRules) - def globalForwardingRulesList = Mock(Compute.GlobalForwardingRules.List) - def targetSslProxies = Mock(Compute.TargetSslProxies) - def targetSslProxiesList = Mock(Compute.TargetSslProxies.List) - def targetTcpProxies = Mock(Compute.TargetTcpProxies) - def targetTcpProxiesList = Mock(Compute.TargetTcpProxies.List) - - googleLoadBalancerProviderMock.getApplicationLoadBalancers(APPLICATION_NAME) >> [] - def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() - def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, - region: REGION, - accountName: ACCOUNT_NAME, - credentials: credentials) - @Subject def operation = new DestroyGoogleServerGroupAtomicOperation(description) - operation.googleOperationPoller = - new GoogleOperationPoller( - googleConfigurationProperties: new GoogleConfigurationProperties(), - threadSleeper: threadSleeperMock, - registry: registry, - safeRetry: safeRetry - ) - operation.registry = registry - operation.safeRetry = safeRetry - operation.googleClusterProvider = googleClusterProviderMock - operation.googleLoadBalancerProvider = googleLoadBalancerProviderMock + def registry = new DefaultRegistry() + def googleClusterProviderMock = Mock(GoogleClusterProvider) + def serverGroup = + new GoogleServerGroup(name: SERVER_GROUP_NAME, + region: REGION, + zone: ZONE, + launchConfig: [instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME)]).view + def computeMock = Mock(Compute) + def instanceGroupManagersMock = Mock(Compute.InstanceGroupManagers) + def zoneOperations = Mock(Compute.ZoneOperations) + def zoneOperationsGet = Mock(Compute.ZoneOperations.Get) + def instanceGroupManagersDeleteMock = Mock(Compute.InstanceGroupManagers.Delete) + def instanceGroupManagersDeleteOp = new Operation(name: INSTANCE_GROUP_OP_NAME, status: DONE, zone: ZONE, targetLink: "/${SERVER_GROUP_NAME}") + def instanceTemplatesMock = Mock(Compute.InstanceTemplates) + def instanceTemplatesDeleteMock = Mock(Compute.InstanceTemplates.Delete) + def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesList = Mock(Compute.ForwardingRules.List) + def globalForwardingRules = Mock(Compute.GlobalForwardingRules) + def globalForwardingRulesList = Mock(Compute.GlobalForwardingRules.List) + def targetSslProxies = Mock(Compute.TargetSslProxies) + def targetSslProxiesList = Mock(Compute.TargetSslProxies.List) + def targetTcpProxies = Mock(Compute.TargetTcpProxies) + def targetTcpProxiesList = Mock(Compute.TargetTcpProxies.List) + + googleLoadBalancerProviderMock.getApplicationLoadBalancers(APPLICATION_NAME) >> [] + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DestroyGoogleServerGroupAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + operation.googleClusterProvider = googleClusterProviderMock + operation.googleLoadBalancerProvider = googleLoadBalancerProviderMock + operation.computeApiFactory = new GoogleComputeApiFactory(operation.googleOperationPoller, registry, "user-agent", MoreExecutors.newDirectExecutorService()) when: - operation.operate([]) + operation.operate([]) then: - 1 * googleClusterProviderMock.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup + 1 * googleClusterProviderMock.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup - 1 * computeMock.instanceGroupManagers() >> instanceGroupManagersMock - 1 * instanceGroupManagersMock.delete(PROJECT_NAME, ZONE, SERVER_GROUP_NAME) >> instanceGroupManagersDeleteMock - 1 * instanceGroupManagersDeleteMock.execute() >> instanceGroupManagersDeleteOp + 1 * computeMock.instanceGroupManagers() >> instanceGroupManagersMock + 1 * instanceGroupManagersMock.delete(PROJECT_NAME, ZONE, SERVER_GROUP_NAME) >> instanceGroupManagersDeleteMock + 1 * instanceGroupManagersDeleteMock.execute() >> instanceGroupManagersDeleteOp - 1 * computeMock.zoneOperations() >> zoneOperations - 1 * zoneOperations.get(PROJECT_NAME, ZONE, INSTANCE_GROUP_OP_NAME) >> zoneOperationsGet - 1 * zoneOperationsGet.execute() >> instanceGroupManagersDeleteOp + 1 * computeMock.zoneOperations() >> zoneOperations + 1 * zoneOperations.get(PROJECT_NAME, ZONE, INSTANCE_GROUP_OP_NAME) >> zoneOperationsGet + 1 * zoneOperationsGet.execute() >> instanceGroupManagersDeleteOp - 1 * computeMock.instanceTemplates() >> instanceTemplatesMock - 1 * instanceTemplatesMock.delete(PROJECT_NAME, INSTANCE_TEMPLATE_NAME) >> instanceTemplatesDeleteMock - 1 * instanceTemplatesDeleteMock.execute() + 1 * computeMock.instanceTemplates() >> instanceTemplatesMock + 1 * instanceTemplatesMock.delete(PROJECT_NAME, INSTANCE_TEMPLATE_NAME) >> instanceTemplatesDeleteMock + 1 * instanceTemplatesDeleteMock.execute() - 1 * computeMock.targetSslProxies() >> targetSslProxies - 1 * targetSslProxies.list(PROJECT_NAME) >> targetSslProxiesList - 1 * targetSslProxiesList.execute() >> new TargetSslProxyList(items: []) + 1 * computeMock.targetSslProxies() >> targetSslProxies + 1 * targetSslProxies.list(PROJECT_NAME) >> targetSslProxiesList + 1 * targetSslProxiesList.execute() >> new TargetSslProxyList(items: []) - 1 * computeMock.targetTcpProxies() >> targetTcpProxies - 1 * targetTcpProxies.list(PROJECT_NAME) >> targetTcpProxiesList - 1 * targetTcpProxiesList.execute() >> new TargetTcpProxyList(items: []) + 1 * computeMock.targetTcpProxies() >> targetTcpProxies + 1 * targetTcpProxies.list(PROJECT_NAME) >> targetTcpProxiesList + 1 * targetTcpProxiesList.execute() >> new TargetTcpProxyList(items: []) - 3 * computeMock.globalForwardingRules() >> globalForwardingRules - 3 * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList - 3 * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) + 3 * computeMock.globalForwardingRules() >> globalForwardingRules + 3 * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList + 3 * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) - 1 * computeMock.forwardingRules() >> forwardingRules - 1 * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList - 1 * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) + 1 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList + 1 * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) } @Unroll - void "should delete managed instance group and autoscaler if defined"() { + void "should delete managed instance group and autoscaler if defined (isRegional: #isRegional)"() { setup: - def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) - def serverGroup = - new GoogleServerGroup(region: REGION, - regional: isRegional, - zone: ZONE, - launchConfig: [instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME)], - autoscalingPolicy: [coolDownPeriodSec: 45, - minNumReplicas: 2, - maxNumReplicas: 5]).view - def computeMock = Mock(Compute) - def regionInstanceGroupManagersMock = Mock(Compute.RegionInstanceGroupManagers) - def instanceGroupManagersMock = Mock(Compute.InstanceGroupManagers) - def regionOperations = Mock(Compute.RegionOperations) - def regionOperationsGet = Mock(Compute.RegionOperations.Get) - def zoneOperations = Mock(Compute.ZoneOperations) - def zoneOperationsGet = Mock(Compute.ZoneOperations.Get) - def regionInstanceGroupManagersDeleteMock = Mock(Compute.RegionInstanceGroupManagers.Delete) - def regionalInstanceGroupTimerId = GoogleApiTestUtils.makeOkId( - registry, "compute.regionInstanceGroupManagers.delete", - [scope: "regional", region: REGION]) - def instanceGroupManagersDeleteMock = Mock(Compute.InstanceGroupManagers.Delete) - def instanceGroupManagersDeleteOp = new Operation(name: INSTANCE_GROUP_OP_NAME, status: DONE) - def zonalInstanceGroupTimerId = GoogleApiTestUtils.makeOkId( - registry, "compute.instanceGroupManagers.delete", - [scope: "zonal", zone: ZONE]) - - def instanceTemplatesMock = Mock(Compute.InstanceTemplates) - def instanceTemplatesDeleteMock = Mock(Compute.InstanceTemplates.Delete) - def regionAutoscalersMock = Mock(Compute.RegionAutoscalers) - def regionAutoscalersDeleteMock = Mock(Compute.RegionAutoscalers.Delete) - def regionalAutoscalerTimerId = GoogleApiTestUtils.makeOkId( - registry, "compute.regionAutoscalers.delete", - [scope: "regional", region: REGION]) - def autoscalersMock = Mock(Compute.Autoscalers) - def autoscalersDeleteMock = Mock(Compute.Autoscalers.Delete) - def autoscalersDeleteOp = new Operation(name: AUTOSCALERS_OP_NAME, status: DONE) - def zonalAutoscalerTimerId = GoogleApiTestUtils.makeOkId( - registry, "compute.autoscalers.delete", - [scope: "zonal", zone: ZONE]) - - def forwardingRules = Mock(Compute.ForwardingRules) - def forwardingRulesList = Mock(Compute.ForwardingRules.List) - def globalForwardingRules = Mock(Compute.GlobalForwardingRules) - def globalForwardingRulesList = Mock(Compute.GlobalForwardingRules.List) - def targetSslProxies = Mock(Compute.TargetSslProxies) - def targetSslProxiesList = Mock(Compute.TargetSslProxies.List) - def targetTcpProxies = Mock(Compute.TargetTcpProxies) - def targetTcpProxiesList = Mock(Compute.TargetTcpProxies.List) - - def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() - def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, - region: REGION, - accountName: ACCOUNT_NAME, - credentials: credentials) - def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) - googleLoadBalancerProviderMock.getApplicationLoadBalancers(APPLICATION_NAME) >> [] - @Subject def operation = new DestroyGoogleServerGroupAtomicOperation(description) - operation.googleOperationPoller = - new GoogleOperationPoller( - googleConfigurationProperties: new GoogleConfigurationProperties(), - threadSleeper: threadSleeperMock, - registry: registry, - safeRetry: safeRetry - ) - operation.registry = registry - operation.safeRetry = safeRetry - operation.googleClusterProvider = googleClusterProviderMock - operation.googleLoadBalancerProvider = googleLoadBalancerProviderMock + def registry = new DefaultRegistry() + def googleClusterProviderMock = Mock(GoogleClusterProvider) + def serverGroup = + new GoogleServerGroup(name: SERVER_GROUP_NAME, + region: REGION, + regional: isRegional, + zone: ZONE, + launchConfig: [instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME)], + autoscalingPolicy: [coolDownPeriodSec: 45, + minNumReplicas : 2, + maxNumReplicas : 5]).view + def computeMock = Mock(Compute) + def regionInstanceGroupManagersMock = Mock(Compute.RegionInstanceGroupManagers) + def instanceGroupManagersMock = Mock(Compute.InstanceGroupManagers) + def regionOperations = Mock(Compute.RegionOperations) + def regionOperationsGet = Mock(Compute.RegionOperations.Get) + def zoneOperations = Mock(Compute.ZoneOperations) + def zoneOperationsGet = Mock(Compute.ZoneOperations.Get) + def regionInstanceGroupManagersDeleteMock = Mock(Compute.RegionInstanceGroupManagers.Delete) + def regionalInstanceGroupTimerId = GoogleApiTestUtils.makeOkId( + registry, "compute.regionInstanceGroupManagers.delete", + [scope: "regional", region: REGION]) + def instanceGroupManagersDeleteMock = Mock(Compute.InstanceGroupManagers.Delete) + def instanceGroupManagersDeleteOp = new Operation(name: INSTANCE_GROUP_OP_NAME, status: DONE, zone: ZONE, region: REGION, targetLink: "/${SERVER_GROUP_NAME}") + def zonalInstanceGroupTimerId = GoogleApiTestUtils.makeOkId( + registry, "compute.instanceGroupManagers.delete", + [scope: "zonal", zone: ZONE]) + + def instanceTemplatesMock = Mock(Compute.InstanceTemplates) + def instanceTemplatesDeleteMock = Mock(Compute.InstanceTemplates.Delete) + def regionAutoscalersMock = Mock(Compute.RegionAutoscalers) + def regionAutoscalersDeleteMock = Mock(Compute.RegionAutoscalers.Delete) + def regionalAutoscalerTimerId = GoogleApiTestUtils.makeOkId( + registry, "compute.regionAutoscalers.delete", + [scope: "regional", region: REGION]) + def autoscalersMock = Mock(Compute.Autoscalers) + def autoscalersDeleteMock = Mock(Compute.Autoscalers.Delete) + def autoscalersDeleteOp = new Operation(name: AUTOSCALERS_OP_NAME, status: DONE) + def zonalAutoscalerTimerId = GoogleApiTestUtils.makeOkId( + registry, "compute.autoscalers.delete", + [scope: "zonal", zone: ZONE]) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesList = Mock(Compute.ForwardingRules.List) + def globalForwardingRules = Mock(Compute.GlobalForwardingRules) + def globalForwardingRulesList = Mock(Compute.GlobalForwardingRules.List) + def targetSslProxies = Mock(Compute.TargetSslProxies) + def targetSslProxiesList = Mock(Compute.TargetSslProxies.List) + def targetTcpProxies = Mock(Compute.TargetTcpProxies) + def targetTcpProxiesList = Mock(Compute.TargetTcpProxies.List) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) + googleLoadBalancerProviderMock.getApplicationLoadBalancers(APPLICATION_NAME) >> [] + @Subject def operation = new DestroyGoogleServerGroupAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + operation.googleClusterProvider = googleClusterProviderMock + operation.googleLoadBalancerProvider = googleLoadBalancerProviderMock + operation.computeApiFactory = new GoogleComputeApiFactory(operation.googleOperationPoller, registry, "user-agent", MoreExecutors.newDirectExecutorService()) when: - operation.operate([]) + operation.operate([]) then: - 1 * googleClusterProviderMock.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup - - 3 * computeMock.globalForwardingRules() >> globalForwardingRules - 3 * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList - 3 * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) - - 1 * computeMock.targetSslProxies() >> targetSslProxies - 1 * targetSslProxies.list(PROJECT_NAME) >> targetSslProxiesList - 1 * targetSslProxiesList.execute() >> new TargetSslProxyList(items: []) - - 1 * computeMock.targetTcpProxies() >> targetTcpProxies - 1 * targetTcpProxies.list(PROJECT_NAME) >> targetTcpProxiesList - 1 * targetTcpProxiesList.execute() >> new TargetTcpProxyList(items: []) - - 1 * computeMock.forwardingRules() >> forwardingRules - 1 * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList - 1 * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) - - if (isRegional) { - 1 * computeMock.regionAutoscalers() >> regionAutoscalersMock - 1 * regionAutoscalersMock.delete(PROJECT_NAME, location, SERVER_GROUP_NAME) >> regionAutoscalersDeleteMock - 1 * regionAutoscalersDeleteMock.execute() >> autoscalersDeleteOp - - 1 * computeMock.regionOperations() >> regionOperations - 1 * regionOperations.get(PROJECT_NAME, location, AUTOSCALERS_OP_NAME) >> regionOperationsGet - 1 * regionOperationsGet.execute() >> autoscalersDeleteOp - } else { - 1 * computeMock.autoscalers() >> autoscalersMock - 1 * autoscalersMock.delete(PROJECT_NAME, location, SERVER_GROUP_NAME) >> autoscalersDeleteMock - 1 * autoscalersDeleteMock.execute() >> autoscalersDeleteOp - - 1 * computeMock.zoneOperations() >> zoneOperations - 1 * zoneOperations.get(PROJECT_NAME, location, AUTOSCALERS_OP_NAME) >> zoneOperationsGet - 1 * zoneOperationsGet.execute() >> autoscalersDeleteOp - } - registry.timer(regionalAutoscalerTimerId).count() == (isRegional ? 1 : 0) - registry.timer(zonalAutoscalerTimerId).count() == (isRegional ? 0 : 1) + 1 * googleClusterProviderMock.getServerGroup(ACCOUNT_NAME, REGION, SERVER_GROUP_NAME) >> serverGroup + + 3 * computeMock.globalForwardingRules() >> globalForwardingRules + 3 * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList + 3 * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) + + 1 * computeMock.targetSslProxies() >> targetSslProxies + 1 * targetSslProxies.list(PROJECT_NAME) >> targetSslProxiesList + 1 * targetSslProxiesList.execute() >> new TargetSslProxyList(items: []) + + 1 * computeMock.targetTcpProxies() >> targetTcpProxies + 1 * targetTcpProxies.list(PROJECT_NAME) >> targetTcpProxiesList + 1 * targetTcpProxiesList.execute() >> new TargetTcpProxyList(items: []) + + 1 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList + 1 * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) + + if (isRegional) { + 1 * computeMock.regionAutoscalers() >> regionAutoscalersMock + 1 * regionAutoscalersMock.delete(PROJECT_NAME, location, SERVER_GROUP_NAME) >> regionAutoscalersDeleteMock + 1 * regionAutoscalersDeleteMock.execute() >> autoscalersDeleteOp + + 1 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, location, AUTOSCALERS_OP_NAME) >> regionOperationsGet + 1 * regionOperationsGet.execute() >> autoscalersDeleteOp + } else { + 1 * computeMock.autoscalers() >> autoscalersMock + 1 * autoscalersMock.delete(PROJECT_NAME, location, SERVER_GROUP_NAME) >> autoscalersDeleteMock + 1 * autoscalersDeleteMock.execute() >> autoscalersDeleteOp + + 1 * computeMock.zoneOperations() >> zoneOperations + 1 * zoneOperations.get(PROJECT_NAME, location, AUTOSCALERS_OP_NAME) >> zoneOperationsGet + 1 * zoneOperationsGet.execute() >> autoscalersDeleteOp + } + registry.timer(regionalAutoscalerTimerId).count() == (isRegional ? 1 : 0) + registry.timer(zonalAutoscalerTimerId).count() == (isRegional ? 0 : 1) then: - if (isRegional) { - 1 * computeMock.regionInstanceGroupManagers() >> regionInstanceGroupManagersMock - 1 * regionInstanceGroupManagersMock.delete(PROJECT_NAME, location, SERVER_GROUP_NAME) >> regionInstanceGroupManagersDeleteMock - 1 * regionInstanceGroupManagersDeleteMock.execute() >> instanceGroupManagersDeleteOp - - 1 * computeMock.regionOperations() >> regionOperations - 1 * regionOperations.get(PROJECT_NAME, location, INSTANCE_GROUP_OP_NAME) >> regionOperationsGet - 1 * regionOperationsGet.execute() >> instanceGroupManagersDeleteOp - } else { - 1 * computeMock.instanceGroupManagers() >> instanceGroupManagersMock - 1 * instanceGroupManagersMock.delete(PROJECT_NAME, location, SERVER_GROUP_NAME) >> instanceGroupManagersDeleteMock - 1 * instanceGroupManagersDeleteMock.execute() >> instanceGroupManagersDeleteOp - - 1 * computeMock.zoneOperations() >> zoneOperations - 1 * zoneOperations.get(PROJECT_NAME, location, INSTANCE_GROUP_OP_NAME) >> zoneOperationsGet - 1 * zoneOperationsGet.execute() >> instanceGroupManagersDeleteOp - } - registry.timer(regionalInstanceGroupTimerId).count() == (isRegional ? 1 : 0) - registry.timer(zonalInstanceGroupTimerId).count() == (isRegional ? 0 : 1) + if (isRegional) { + 1 * computeMock.regionInstanceGroupManagers() >> regionInstanceGroupManagersMock + 1 * regionInstanceGroupManagersMock.delete(PROJECT_NAME, location, SERVER_GROUP_NAME) >> regionInstanceGroupManagersDeleteMock + 1 * regionInstanceGroupManagersDeleteMock.execute() >> instanceGroupManagersDeleteOp + + 1 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, location, INSTANCE_GROUP_OP_NAME) >> regionOperationsGet + 1 * regionOperationsGet.execute() >> instanceGroupManagersDeleteOp + } else { + 1 * computeMock.instanceGroupManagers() >> instanceGroupManagersMock + 1 * instanceGroupManagersMock.delete(PROJECT_NAME, location, SERVER_GROUP_NAME) >> instanceGroupManagersDeleteMock + 1 * instanceGroupManagersDeleteMock.execute() >> instanceGroupManagersDeleteOp + + 1 * computeMock.zoneOperations() >> zoneOperations + 1 * zoneOperations.get(PROJECT_NAME, location, INSTANCE_GROUP_OP_NAME) >> zoneOperationsGet + 1 * zoneOperationsGet.execute() >> instanceGroupManagersDeleteOp + } + registry.timer(regionalInstanceGroupTimerId).count() == (isRegional ? 1 : 0) + registry.timer(zonalInstanceGroupTimerId).count() == (isRegional ? 0 : 1) then: - 1 * computeMock.instanceTemplates() >> instanceTemplatesMock - 1 * instanceTemplatesMock.delete(PROJECT_NAME, INSTANCE_TEMPLATE_NAME) >> instanceTemplatesDeleteMock - 1 * instanceTemplatesDeleteMock.execute() + 1 * computeMock.instanceTemplates() >> instanceTemplatesMock + 1 * instanceTemplatesMock.delete(PROJECT_NAME, INSTANCE_TEMPLATE_NAME) >> instanceTemplatesDeleteMock + 1 * instanceTemplatesDeleteMock.execute() where: - isRegional | location - false | ZONE - true | REGION + isRegional | location + false | ZONE + true | REGION } @Unroll void "should delete http loadbalancer backend if associated"() { setup: - def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) - def loadBalancerNameList = lbNames - def serverGroup = - new GoogleServerGroup( - name: SERVER_GROUP_NAME, - region: REGION, - regional: isRegional, - zone: ZONE, - asg: [ - (GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES): loadBalancerNameList, - ], - launchConfig: [ - instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME, - properties: [ - 'metadata': new Metadata(items: [ - new Metadata.Items( - key: (GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES), - value: 'spinnaker-http-load-balancer' - ), - new Metadata.Items( - key: (GoogleServerGroup.View.BACKEND_SERVICE_NAMES), - value: 'backend-service' - ) - ]) - ]) - ]).view - def computeMock = Mock(Compute) - def backendServicesMock = Mock(Compute.BackendServices) - def backendSvcGetMock = Mock(Compute.BackendServices.Get) - def backendUpdateMock = Mock(Compute.BackendServices.Update) - - def forwardingRules = Mock(Compute.ForwardingRules) - def forwardingRulesList = Mock(Compute.ForwardingRules.List) - def globalForwardingRules = Mock(Compute.GlobalForwardingRules) - def globalForwardingRulesList = Mock(Compute.GlobalForwardingRules.List) - - def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) - googleLoadBalancerProviderMock.getApplicationLoadBalancers("") >> loadBalancerList - def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() - def task = Mock(Task) - def bs = isRegional ? - new BackendService(backends: lbNames.collect { new Backend(group: GCEUtil.buildZonalServerGroupUrl(PROJECT_NAME, ZONE, serverGroup.name)) }) : - new BackendService(backends: lbNames.collect { new Backend(group: GCEUtil.buildRegionalServerGroupUrl(PROJECT_NAME, REGION, serverGroup.name)) }) - - def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, - region: REGION, - accountName: ACCOUNT_NAME, - credentials: credentials) - @Subject def operation = new DestroyGoogleServerGroupAtomicOperation(description) - operation.googleOperationPoller = - new GoogleOperationPoller( - googleConfigurationProperties: new GoogleConfigurationProperties(), - threadSleeper: threadSleeperMock, - registry: registry, - safeRetry: safeRetry - ) - operation.registry = registry - operation.safeRetry = safeRetry - operation.googleClusterProvider = googleClusterProviderMock - operation.googleLoadBalancerProvider = googleLoadBalancerProviderMock + def registry = new DefaultRegistry() + def googleClusterProviderMock = Mock(GoogleClusterProvider) + def loadBalancerNameList = lbNames + def serverGroup = + new GoogleServerGroup( + name: SERVER_GROUP_NAME, + region: REGION, + regional: isRegional, + zone: ZONE, + asg: [ + (GCEUtil.GLOBAL_LOAD_BALANCER_NAMES): loadBalancerNameList, + ], + launchConfig: [ + instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME, + properties: [ + 'metadata': new Metadata(items: [ + new Metadata.Items( + key: (GCEUtil.GLOBAL_LOAD_BALANCER_NAMES), + value: 'spinnaker-http-load-balancer' + ), + new Metadata.Items( + key: (GCEUtil.BACKEND_SERVICE_NAMES), + value: 'backend-service' + ) + ]) + ]) + ]).view + def computeMock = Mock(Compute) + def backendServicesMock = Mock(Compute.BackendServices) + def backendSvcGetMock = Mock(Compute.BackendServices.Get) + def backendUpdateMock = Mock(Compute.BackendServices.Update) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesList = Mock(Compute.ForwardingRules.List) + def globalForwardingRules = Mock(Compute.GlobalForwardingRules) + def globalForwardingRulesList = Mock(Compute.GlobalForwardingRules.List) + + def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) + googleLoadBalancerProviderMock.getApplicationLoadBalancers("") >> loadBalancerList + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def task = Mock(Task) + def bs = isRegional ? + new BackendService(backends: lbNames.collect { + new Backend(group: GCEUtil.buildZonalServerGroupUrl(PROJECT_NAME, ZONE, serverGroup.name)) + }) : + new BackendService(backends: lbNames.collect { + new Backend(group: GCEUtil.buildRegionalServerGroupUrl(PROJECT_NAME, REGION, serverGroup.name)) + }) + + def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DestroyGoogleServerGroupAtomicOperation(description) + def googleOperationPoller = Mock(GoogleOperationPoller) + operation.googleOperationPoller = googleOperationPoller + def updateOpName = 'updateOp' + + operation.registry = registry + operation.safeRetry = safeRetry + operation.googleClusterProvider = googleClusterProviderMock + operation.googleLoadBalancerProvider = googleLoadBalancerProviderMock + operation.computeApiFactory = new GoogleComputeApiFactory(operation.googleOperationPoller, registry, "user-agent", MoreExecutors.newDirectExecutorService()) when: - def closure = operation.destroyHttpLoadBalancerBackends(computeMock, PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock) - closure() + def closure = operation.destroyHttpLoadBalancerBackends(computeMock, PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock) + closure() then: - _ * computeMock.backendServices() >> backendServicesMock - _ * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock - _ * backendSvcGetMock.execute() >> bs - _ * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock - _ * backendUpdateMock.execute() - - _ * computeMock.globalForwardingRules() >> globalForwardingRules - _ * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList - _ * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) - - _ * computeMock.forwardingRules() >> forwardingRules - _ * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList - _ * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) - bs.backends.size == 0 + _ * computeMock.backendServices() >> backendServicesMock + _ * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock + _ * backendSvcGetMock.execute() >> bs + _ * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock + _ * backendUpdateMock.execute() >> [name: updateOpName] + _ * googleOperationPoller.waitForGlobalOperation(computeMock, PROJECT_NAME, updateOpName, null, task, _, _) + + _ * computeMock.globalForwardingRules() >> globalForwardingRules + _ * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList + _ * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) + + _ * computeMock.forwardingRules() >> forwardingRules + _ * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList + _ * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) + bs.backends.size() == 0 where: - isRegional | location | loadBalancerList | lbNames - false | ZONE | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] - true | REGION | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] - false | ZONE | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] - true | REGION | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] - false | ZONE | [] | [] - true | REGION | [] | [] + isRegional | location | loadBalancerList | lbNames + false | ZONE | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] + true | REGION | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] + false | ZONE | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] + true | REGION | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] + false | ZONE | [] | [] + true | REGION | [] | [] } @Unroll void "should delete internal loadbalancer backend if associated"() { setup: - def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) - def loadBalancerNameList = lbNames - def serverGroup = - new GoogleServerGroup( - name: SERVER_GROUP_NAME, - region: REGION, - regional: isRegional, - zone: ZONE, - asg: [ - (GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES): loadBalancerNameList, - ], - launchConfig: [ - instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME, - properties: [ - 'metadata': new Metadata(items: [ - new Metadata.Items( - key: (GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES), - value: 'spinnaker-int-load-balancer' - ) - ]) - ]) - ]).view - def computeMock = Mock(Compute) - def backendServicesMock = Mock(Compute.RegionBackendServices) - def backendSvcGetMock = Mock(Compute.RegionBackendServices.Get) - def backendUpdateMock = Mock(Compute.RegionBackendServices.Update) - def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) - - def forwardingRules = Mock(Compute.ForwardingRules) - def forwardingRulesList = Mock(Compute.ForwardingRules.List) - def globalForwardingRules = Mock(Compute.GlobalForwardingRules) - def globalForwardingRulesList = Mock(Compute.GlobalForwardingRules.List) - - googleLoadBalancerProviderMock.getApplicationLoadBalancers("") >> loadBalancerList - def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() - def bs = isRegional ? - new BackendService(backends: lbNames.collect { new Backend(group: GCEUtil.buildZonalServerGroupUrl(PROJECT_NAME, ZONE, serverGroup.name)) }) : - new BackendService(backends: lbNames.collect { new Backend(group: GCEUtil.buildRegionalServerGroupUrl(PROJECT_NAME, REGION, serverGroup.name)) }) - - def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, + def registry = new DefaultRegistry() + def googleClusterProviderMock = Mock(GoogleClusterProvider) + def loadBalancerNameList = lbNames + def serverGroup = + new GoogleServerGroup( + name: SERVER_GROUP_NAME, region: REGION, - accountName: ACCOUNT_NAME, - credentials: credentials) - @Subject def operation = new DestroyGoogleServerGroupAtomicOperation(description) - operation.googleOperationPoller = - new GoogleOperationPoller( - googleConfigurationProperties: new GoogleConfigurationProperties(), - threadSleeper: threadSleeperMock, - registry: registry, - safeRetry: safeRetry - ) - operation.registry = registry - operation.safeRetry = safeRetry - operation.googleClusterProvider = googleClusterProviderMock - operation.googleLoadBalancerProvider = googleLoadBalancerProviderMock + regional: isRegional, + zone: ZONE, + asg: [ + (GCEUtil.REGIONAL_LOAD_BALANCER_NAMES): loadBalancerNameList, + ], + launchConfig: [ + instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME, + properties: [ + 'metadata': new Metadata(items: [ + new Metadata.Items( + key: (GCEUtil.REGIONAL_LOAD_BALANCER_NAMES), + value: 'spinnaker-int-load-balancer' + ) + ]) + ]) + ]).view + def computeMock = Mock(Compute) + def backendServicesMock = Mock(Compute.RegionBackendServices) + def backendSvcGetMock = Mock(Compute.RegionBackendServices.Get) + def backendUpdateMock = Mock(Compute.RegionBackendServices.Update) + def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesList = Mock(Compute.ForwardingRules.List) + def globalForwardingRules = Mock(Compute.GlobalForwardingRules) + def globalForwardingRulesList = Mock(Compute.GlobalForwardingRules.List) + + googleLoadBalancerProviderMock.getApplicationLoadBalancers("") >> loadBalancerList + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def bs = isRegional ? + new BackendService(backends: lbNames.collect { + new Backend(group: GCEUtil.buildZonalServerGroupUrl(PROJECT_NAME, ZONE, serverGroup.name)) + }) : + new BackendService(backends: lbNames.collect { + new Backend(group: GCEUtil.buildRegionalServerGroupUrl(PROJECT_NAME, REGION, serverGroup.name)) + }) + + def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DestroyGoogleServerGroupAtomicOperation(description) + + def task = Mock(Task) + def googleOperationPoller = Mock(GoogleOperationPoller) + operation.googleOperationPoller = googleOperationPoller + def updateOpName = 'updateOp' + + operation.registry = registry + operation.safeRetry = safeRetry + operation.googleClusterProvider = googleClusterProviderMock + operation.googleLoadBalancerProvider = googleLoadBalancerProviderMock when: - def closure = operation.destroyInternalLoadBalancerBackends(computeMock, PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock) - closure() + def closure = operation.destroyInternalLoadBalancerBackends(computeMock, PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock) + closure() then: - _ * computeMock.regionBackendServices() >> backendServicesMock - _ * backendServicesMock.get(PROJECT_NAME, REGION, 'backend-service') >> backendSvcGetMock - _ * backendSvcGetMock.execute() >> bs - _ * backendServicesMock.update(PROJECT_NAME, REGION, 'backend-service', bs) >> backendUpdateMock - _ * backendUpdateMock.execute() - - _ * computeMock.globalForwardingRules() >> globalForwardingRules - _ * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList - _ * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) - - _ * computeMock.forwardingRules() >> forwardingRules - _ * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList - _ * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) - bs.backends.size == 0 + _ * computeMock.regionBackendServices() >> backendServicesMock + _ * backendServicesMock.get(PROJECT_NAME, REGION, 'backend-service') >> backendSvcGetMock + _ * backendSvcGetMock.execute() >> bs + _ * backendServicesMock.update(PROJECT_NAME, REGION, 'backend-service', bs) >> backendUpdateMock + _ * backendUpdateMock.execute() >> [name: updateOpName] + _ * googleOperationPoller.waitForRegionalOperation(computeMock, PROJECT_NAME, REGION, updateOpName, null, task, _, _) + + _ * computeMock.globalForwardingRules() >> globalForwardingRules + _ * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList + _ * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) + + _ * computeMock.forwardingRules() >> forwardingRules + _ * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList + _ * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) + bs.backends.size() == 0 where: - isRegional | location | loadBalancerList | lbNames - false | ZONE | [new GoogleInternalLoadBalancer(name: 'spinnaker-int-load-balancer', backendService: new GoogleBackendService(name: 'backend-service')).view] | ['spinnaker-int-load-balancer'] - true | REGION | [new GoogleInternalLoadBalancer(name: 'spinnaker-int-load-balancer', backendService: new GoogleBackendService(name: 'backend-service')).view] | ['spinnaker-int-load-balancer'] - false | ZONE | [new GoogleInternalLoadBalancer(name: 'spinnaker-int-load-balancer', backendService: new GoogleBackendService(name: 'backend-service')).view] | ['spinnaker-int-load-balancer'] - true | REGION | [new GoogleInternalLoadBalancer(name: 'spinnaker-int-load-balancer', backendService: new GoogleBackendService(name: 'backend-service')).view] | ['spinnaker-int-load-balancer'] - false | ZONE | [] | [] - true | REGION | [] | [] + isRegional | location | loadBalancerList | lbNames + false | ZONE | [new GoogleInternalLoadBalancer(name: 'spinnaker-int-load-balancer', backendService: new GoogleBackendService(name: 'backend-service')).view] | ['spinnaker-int-load-balancer'] + true | REGION | [new GoogleInternalLoadBalancer(name: 'spinnaker-int-load-balancer', backendService: new GoogleBackendService(name: 'backend-service')).view] | ['spinnaker-int-load-balancer'] + false | ZONE | [new GoogleInternalLoadBalancer(name: 'spinnaker-int-load-balancer', backendService: new GoogleBackendService(name: 'backend-service')).view] | ['spinnaker-int-load-balancer'] + true | REGION | [new GoogleInternalLoadBalancer(name: 'spinnaker-int-load-balancer', backendService: new GoogleBackendService(name: 'backend-service')).view] | ['spinnaker-int-load-balancer'] + false | ZONE | [] | [] + true | REGION | [] | [] } - @Unroll void "should retry http backend deletion on 400, 412, socket timeout, succeed on 404"() { // Note: Implicitly tests SafeRetry.doRetry setup: - def registry = new DefaultRegistry() - def computeMock = Mock(Compute) - def backendServicesMock = Mock(Compute.BackendServices) - def backendSvcGetMock = Mock(Compute.BackendServices.Get) - def backendUpdateMock = Mock(Compute.BackendServices.Update) - def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) - googleLoadBalancerProviderMock.getApplicationLoadBalancers("") >> loadBalancerList - - def serverGroup = - new GoogleServerGroup( - name: SERVER_GROUP_NAME, - region: REGION, - regional: isRegional, - zone: ZONE, - asg: [ - (GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES): lbNames, - ], - launchConfig: [ - instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME, - properties: [ - 'metadata': new Metadata(items: [ - new Metadata.Items( - key: (GoogleServerGroup.View.GLOBAL_LOAD_BALANCER_NAMES), - value: 'spinnaker-http-load-balancer' - ), - new Metadata.Items( - key: (GoogleServerGroup.View.BACKEND_SERVICE_NAMES), - value: 'backend-service' - ) - ]) - ]) - ]).view - - def errorMessage = "The resource 'my-backend-service' is not ready" - def errorInfo = new GoogleJsonError.ErrorInfo( - domain: "global", - message: errorMessage, - reason: "resourceNotReady") - def details = new GoogleJsonError( - code: 400, - errors: [errorInfo], - message: errorMessage) - def httpResponseExceptionBuilder = new HttpResponseException.Builder( - 400, - "Bad Request", - new HttpHeaders()).setMessage("400 Bad Request") - def googleJsonResponseException = new GoogleJsonResponseException(httpResponseExceptionBuilder, details) - - errorMessage = "Invalid fingerprint." - errorInfo = new GoogleJsonError.ErrorInfo( - domain: "global", - message: errorMessage, - reason: "conditionNotMet") - details = new GoogleJsonError( - code: 412, - errors: [errorInfo], - message: errorMessage) - httpResponseExceptionBuilder = new HttpResponseException.Builder( - 412, - "Precondition Failed", - new HttpHeaders()).setMessage("412 Precondition Failed") - def fingerPrintException = new GoogleJsonResponseException(httpResponseExceptionBuilder, details) - - errorMessage = "Resource 'stuff' could not be located" - errorInfo = new GoogleJsonError.ErrorInfo( - domain: "global", - message: errorMessage, - reason: "stuffNotFound") - details = new GoogleJsonError( - code: 404, - errors: [errorInfo], - message: errorMessage) - httpResponseExceptionBuilder = new HttpResponseException.Builder( - 404, - "Not Found", - new HttpHeaders()).setMessage("404 Not Found") - def notFoundException = new GoogleJsonResponseException(httpResponseExceptionBuilder, details) - - def socketTimeoutException = new SocketTimeoutException("Read timed out") - - def bs = isRegional ? - new BackendService(backends: lbNames.collect { new Backend(group: GCEUtil.buildZonalServerGroupUrl(PROJECT_NAME, ZONE, serverGroup.name)) }) : - new BackendService(backends: lbNames.collect { new Backend(group: GCEUtil.buildRegionalServerGroupUrl(PROJECT_NAME, REGION, serverGroup.name)) }) + def registry = new DefaultRegistry() + def computeMock = Mock(Compute) + def backendServicesMock = Mock(Compute.BackendServices) + def backendSvcGetMock = Mock(Compute.BackendServices.Get) + def backendUpdateMock = Mock(Compute.BackendServices.Update) + def googleLoadBalancerProviderMock = Mock(GoogleLoadBalancerProvider) + googleLoadBalancerProviderMock.getApplicationLoadBalancers("") >> loadBalancerList + + def serverGroup = + new GoogleServerGroup( + name: SERVER_GROUP_NAME, + region: REGION, + regional: isRegional, + zone: ZONE, + asg: [ + (GCEUtil.GLOBAL_LOAD_BALANCER_NAMES): lbNames, + ], + launchConfig: [ + instanceTemplate: new InstanceTemplate(name: INSTANCE_TEMPLATE_NAME, + properties: [ + 'metadata': new Metadata(items: [ + new Metadata.Items( + key: (GCEUtil.GLOBAL_LOAD_BALANCER_NAMES), + value: 'spinnaker-http-load-balancer' + ), + new Metadata.Items( + key: (GCEUtil.BACKEND_SERVICE_NAMES), + value: 'backend-service' + ) + ]) + ]) + ]).view + + def errorMessage = "The resource 'my-backend-service' is not ready" + def errorInfo = new GoogleJsonError.ErrorInfo( + domain: "global", + message: errorMessage, + reason: "resourceNotReady") + def details = new GoogleJsonError( + code: 400, + errors: [errorInfo], + message: errorMessage) + def httpResponseExceptionBuilder = new HttpResponseException.Builder( + 400, + "Bad Request", + new HttpHeaders()).setMessage("400 Bad Request") + def googleJsonResponseException = new GoogleJsonResponseException(httpResponseExceptionBuilder, details) + + errorMessage = "Invalid fingerprint." + errorInfo = new GoogleJsonError.ErrorInfo( + domain: "global", + message: errorMessage, + reason: "conditionNotMet") + details = new GoogleJsonError( + code: 412, + errors: [errorInfo], + message: errorMessage) + httpResponseExceptionBuilder = new HttpResponseException.Builder( + 412, + "Precondition Failed", + new HttpHeaders()).setMessage("412 Precondition Failed") + def fingerPrintException = new GoogleJsonResponseException(httpResponseExceptionBuilder, details) + + errorMessage = "Resource 'stuff' could not be located" + errorInfo = new GoogleJsonError.ErrorInfo( + domain: "global", + message: errorMessage, + reason: "stuffNotFound") + details = new GoogleJsonError( + code: 404, + errors: [errorInfo], + message: errorMessage) + httpResponseExceptionBuilder = new HttpResponseException.Builder( + 404, + "Not Found", + new HttpHeaders()).setMessage("404 Not Found") + def notFoundException = new GoogleJsonResponseException(httpResponseExceptionBuilder, details) + + def socketTimeoutException = new SocketTimeoutException("Read timed out") + + def bs = isRegional ? + new BackendService(backends: lbNames.collect { + new Backend(group: GCEUtil.buildZonalServerGroupUrl(PROJECT_NAME, ZONE, serverGroup.name)) + }) : + new BackendService(backends: lbNames.collect { + new Backend(group: GCEUtil.buildRegionalServerGroupUrl(PROJECT_NAME, REGION, serverGroup.name)) + }) + def updateOpName = 'updateOp' + def task = Mock(Task) + def googleOperationPoller = Mock(GoogleOperationPoller) when: - def destroy = new DestroyGoogleServerGroupAtomicOperation() - destroy.googleOperationPoller = - new GoogleOperationPoller( - googleConfigurationProperties: new GoogleConfigurationProperties(), - threadSleeper: threadSleeperMock, - registry: registry, - safeRetry: safeRetry - ) - destroy.registry = registry - destroy.safeRetry = safeRetry - destroy.destroy( - destroy.destroyHttpLoadBalancerBackends(computeMock, PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock), - "Http load balancer backends", [action: 'test'] - ) + def destroy = new DestroyGoogleServerGroupAtomicOperation() + + destroy.googleOperationPoller = googleOperationPoller + + destroy.registry = registry + destroy.safeRetry = safeRetry + destroy.destroy( + destroy.destroyHttpLoadBalancerBackends(computeMock, PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock), + "Http load balancer backends", [action: 'test'] + ) then: - 1 * backendUpdateMock.execute() >> { throw googleJsonResponseException } - 2 * computeMock.backendServices() >> backendServicesMock - 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock - 1 * backendSvcGetMock.execute() >> bs - 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock + 1 * backendUpdateMock.execute() >> { throw googleJsonResponseException } + 2 * computeMock.backendServices() >> backendServicesMock + 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock + 1 * backendSvcGetMock.execute() >> bs + 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock then: - 1 * backendUpdateMock.execute() >> { throw fingerPrintException } - 2 * computeMock.backendServices() >> backendServicesMock - 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock - 1 * backendSvcGetMock.execute() >> bs - 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock + 1 * backendUpdateMock.execute() >> { throw fingerPrintException } + 2 * computeMock.backendServices() >> backendServicesMock + 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock + 1 * backendSvcGetMock.execute() >> bs + 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock then: - 1 * backendUpdateMock.execute() >> { throw socketTimeoutException } - 2 * computeMock.backendServices() >> backendServicesMock - 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock - 1 * backendSvcGetMock.execute() >> bs - 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock + 1 * backendUpdateMock.execute() >> { throw socketTimeoutException } + 2 * computeMock.backendServices() >> backendServicesMock + 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock + 1 * backendSvcGetMock.execute() >> bs + 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock then: - 1 * backendUpdateMock.execute() - 2 * computeMock.backendServices() >> backendServicesMock - 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock - 1 * backendSvcGetMock.execute() >> bs - 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock + 1 * backendUpdateMock.execute() >> [name: updateOpName] + 2 * computeMock.backendServices() >> backendServicesMock + 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock + 1 * backendSvcGetMock.execute() >> bs + 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock + _ * googleOperationPoller.waitForGlobalOperation(computeMock, PROJECT_NAME, updateOpName, null, task, _, _) when: - destroy.destroy( - destroy.destroyHttpLoadBalancerBackends(computeMock, PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock), - "Http load balancer backends", [action: 'test'] - ) + destroy.destroy( + destroy.destroyHttpLoadBalancerBackends(computeMock, PROJECT_NAME, serverGroup, googleLoadBalancerProviderMock), + "Http load balancer backends", [action: 'test'] + ) then: - 1 * backendUpdateMock.execute() >> { throw notFoundException } - 2 * computeMock.backendServices() >> backendServicesMock - 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock - 1 * backendSvcGetMock.execute() >> bs - 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock + 1 * backendUpdateMock.execute() >> { throw notFoundException } + 2 * computeMock.backendServices() >> backendServicesMock + 1 * backendServicesMock.get(PROJECT_NAME, 'backend-service') >> backendSvcGetMock + 1 * backendSvcGetMock.execute() >> bs + 1 * backendServicesMock.update(PROJECT_NAME, 'backend-service', bs) >> backendUpdateMock where: - isRegional | location | loadBalancerList | lbNames - false | ZONE | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] + isRegional | location | loadBalancerList | lbNames + false | ZONE | [new GoogleHttpLoadBalancer(name: 'spinnaker-http-load-balancer').view] | ['spinnaker-http-load-balancer'] } } diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DisableGoogleServerGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DisableGoogleServerGroupAtomicOperationUnitSpec.groovy index baee7ce0406..b0b4758c7bc 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DisableGoogleServerGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DisableGoogleServerGroupAtomicOperationUnitSpec.groovy @@ -44,14 +44,14 @@ class DisableGoogleServerGroupAtomicOperationUnitSpec extends Specification { private static final TARGET_POOL_NAME_1 = "testlb-target-pool-1417967954401"; private static final TARGET_POOL_NAME_2 = "testlb2-target-pool-1417963107058"; private static final TARGET_POOL_URL_1 = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/regions/us-central1/targetPools/$TARGET_POOL_NAME_1" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/regions/us-central1/targetPools/$TARGET_POOL_NAME_1" private static final TARGET_POOL_URL_2 = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/regions/us-central1/targetPools/$TARGET_POOL_NAME_2" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/regions/us-central1/targetPools/$TARGET_POOL_NAME_2" private static final TARGET_POOL_URLS = [TARGET_POOL_URL_1, TARGET_POOL_URL_2] private static final INSTANCE_URL_1 = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-a/instances/mjdapp-dev-v009-hnyp" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-a/instances/mjdapp-dev-v009-hnyp" private static final INSTANCE_URL_2 = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-a/instances/mjdapp-dev-v009-qtow" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-a/instances/mjdapp-dev-v009-qtow" private static final ZONE = "us-central1-b" private static final REGION = "us-central1" @@ -77,7 +77,7 @@ class DisableGoogleServerGroupAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } def setup() { @@ -162,9 +162,9 @@ class DisableGoogleServerGroupAtomicOperationUnitSpec extends Specification { 3 * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList 3 * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) - 1 * computeMock.forwardingRules() >> forwardingRules - 1 * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList - 1 * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) + 2 * computeMock.forwardingRules() >> forwardingRules + 2 * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList + 2 * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) registry.timer( GoogleApiTestUtils.makeOkId( diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/EnableGoogleServerGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/EnableGoogleServerGroupAtomicOperationUnitSpec.groovy index 0c7746d35a5..1c79018353f 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/EnableGoogleServerGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/EnableGoogleServerGroupAtomicOperationUnitSpec.groovy @@ -49,14 +49,14 @@ class EnableGoogleServerGroupAtomicOperationUnitSpec extends Specification { private static final TARGET_POOL_NAME_1 = "testlb-target-pool-1417967954401"; private static final TARGET_POOL_NAME_2 = "testlb2-target-pool-1417963107058"; private static final TARGET_POOL_URL_1 = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/regions/us-central1/targetPools/$TARGET_POOL_NAME_1" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/regions/us-central1/targetPools/$TARGET_POOL_NAME_1" private static final TARGET_POOL_URL_2 = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/regions/us-central1/targetPools/$TARGET_POOL_NAME_2" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/regions/us-central1/targetPools/$TARGET_POOL_NAME_2" private static final TARGET_POOL_URLS = [TARGET_POOL_URL_1, TARGET_POOL_URL_2] private static final INSTANCE_URL_1 = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-a/instances/mjdapp-dev-v009-hnyp" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-a/instances/mjdapp-dev-v009-hnyp" private static final INSTANCE_URL_2 = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-a/instances/mjdapp-dev-v009-qtow" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-a/instances/mjdapp-dev-v009-qtow" private static final ZONE = "us-central1-b" private static final REGION = "us-central1" @@ -93,7 +93,7 @@ class EnableGoogleServerGroupAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } def setup() { @@ -166,9 +166,9 @@ class EnableGoogleServerGroupAtomicOperationUnitSpec extends Specification { 1 * instanceTemplatesMock.get(PROJECT_NAME, INSTANCE_TEMPLATE_NAME) >> instanceTemplatesGetMock 1 * instanceTemplatesGetMock.execute() >> instanceTemplate - 2 * computeMock.forwardingRules() >> forwardingRulesMock - 2 * forwardingRulesMock.list(PROJECT_NAME, REGION) >> forwardingRulesListMock - 2 * forwardingRulesListMock.execute() >> forwardingRulesList + 3 * computeMock.forwardingRules() >> forwardingRulesMock + 3 * forwardingRulesMock.list(PROJECT_NAME, REGION) >> forwardingRulesListMock + 3 * forwardingRulesListMock.execute() >> forwardingRulesList [TARGET_POOL_NAME_1, TARGET_POOL_NAME_2].each { targetPoolLocalName -> 1 * computeMock.targetPools() >> targetPoolsMock @@ -224,9 +224,9 @@ class EnableGoogleServerGroupAtomicOperationUnitSpec extends Specification { 1 * instanceTemplatesMock.get(PROJECT_NAME, INSTANCE_TEMPLATE_NAME) >> instanceTemplatesGetMock 1 * instanceTemplatesGetMock.execute() >> instanceTemplate - 2 * computeMock.forwardingRules() >> forwardingRulesMock - 2 * forwardingRulesMock.list(PROJECT_NAME, REGION) >> forwardingRulesListMock - 2 * forwardingRulesListMock.execute() >> forwardingRulesList2 + 3 * computeMock.forwardingRules() >> forwardingRulesMock + 3 * forwardingRulesMock.list(PROJECT_NAME, REGION) >> forwardingRulesListMock + 3 * forwardingRulesListMock.execute() >> forwardingRulesList2 3 * computeMock.globalForwardingRules() >> globalForwardingRules 3 * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec.groovy index 6a91eccd02a..429a6f1895e 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec.groovy @@ -52,11 +52,11 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec extends Spe private static final SERVER_GROUP_NAME = "spinnaker-test-v000" private static final ZONE = "us-central1-b" private static final REGION = "us-central1" - private static final REGION_URL = "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/regions/$REGION" + private static final REGION_URL = "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/regions/$REGION" private static final MACHINE_TYPE = "f1-micro" - private static final NETWORK_1 = "default" - private static final NETWORK_2 = "other-network" + private static final NETWORK_1 = "projects/$PROJECT_NAME/networks/default" + private static final NETWORK_2 = "projects/$PROJECT_NAME/networks/other-network" private static final IMAGE = "debian" private static final DISK_TYPE = "pd-standard" private static final DISK_SIZE_GB = 120 @@ -66,7 +66,7 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec extends Spe private static final TAGS_2 = ["some-tag-4", "some-tag-5"] private static final ORIG_INSTANCE_TEMPLATE_NAME = "$SERVER_GROUP_NAME-123" private static final ORIG_INSTANCE_TEMPLATE_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/global/instanceTemplates/$ORIG_INSTANCE_TEMPLATE_NAME" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/global/instanceTemplates/$ORIG_INSTANCE_TEMPLATE_NAME" private static final NEW_INSTANCE_TEMPLATE_NAME = "new-instance-template" private static final INSTANCE_TEMPLATE_INSERTION_OP_NAME = "instance-template-insertion-op" private static final SET_INSTANCE_TEMPLATE_OP_NAME = "set-instance-template-op" @@ -78,7 +78,7 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec extends Spe def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should not make any changes if no properties are overridden"() { @@ -109,7 +109,7 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec extends Spe def instanceGroupManagersMock = Mock(Compute.InstanceGroupManagers) def instanceGroupManagersGetMock = Mock(Compute.InstanceGroupManagers.Get) def instanceGroupManagerReal = new InstanceGroupManager(instanceTemplate: ORIG_INSTANCE_TEMPLATE_URL, group: SERVER_GROUP_NAME) - def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def credentials = new GoogleNamedAccountCredentials.Builder().name("gce").project(PROJECT_NAME).compute(computeMock).build() def description = new ModifyGoogleServerGroupInstanceTemplateDescription(serverGroupName: SERVER_GROUP_NAME, region: REGION, accountName: ACCOUNT_NAME, @@ -181,7 +181,7 @@ class ModifyGoogleServerGroupInstanceTemplateAtomicOperationUnitSpec extends Spe status: DONE) def setInstanceTemplateOperationGetMock = Mock(Compute.ZoneOperations.Get) def instanceTemplatesDeleteMock = Mock(Compute.InstanceTemplates.Delete) - def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def credentials = new GoogleNamedAccountCredentials.Builder().name("gce").project(PROJECT_NAME).compute(computeMock).build() def description = new ModifyGoogleServerGroupInstanceTemplateDescription(serverGroupName: SERVER_GROUP_NAME, region: REGION, instanceMetadata: METADATA_2, diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/RebootGoogleInstancesAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/RebootGoogleInstancesAtomicOperationUnitSpec.groovy index c6131bdc722..1395a33f11c 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/RebootGoogleInstancesAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/RebootGoogleInstancesAtomicOperationUnitSpec.groovy @@ -21,6 +21,7 @@ import com.netflix.spectator.api.DefaultRegistry import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.google.deploy.description.RebootGoogleInstancesDescription +import com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import spock.lang.Specification import spock.lang.Subject @@ -36,17 +37,27 @@ class RebootGoogleInstancesAtomicOperationUnitSpec extends Specification { private static final ALL_INSTANCE_IDS = ["${ID_GOOD_PREFIX}1", "${ID_BAD_PREFIX}1", "${ID_GOOD_PREFIX}2", "${ID_BAD_PREFIX}2"] def registry = new DefaultRegistry() + Compute computeMock + GoogleNamedAccountCredentials credentials def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) } + def setup() { + computeMock = Mock(Compute) + credentials = new GoogleNamedAccountCredentials.Builder() + .project(PROJECT_NAME) + .compute(computeMock) + .name("gce") + .namer(new GoogleLabeledResourceNamer()) + .build() + } + void "should reset all instances"() { setup: - def computeMock = Mock(Compute) def instancesMock = Mock(Compute.Instances) def resetMock = Mock(Compute.Instances.Reset) - def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() def description = new RebootGoogleInstancesDescription(zone: ZONE, instanceIds: GOOD_INSTANCE_IDS, accountName: ACCOUNT_NAME, @@ -68,10 +79,8 @@ class RebootGoogleInstancesAtomicOperationUnitSpec extends Specification { void "should reset all known instances and fail on all unknown instances"() { setup: - def computeMock = Mock(Compute) def instancesMock = Mock(Compute.Instances) def resetMock = Mock(Compute.Instances.Reset) - def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() def description = new RebootGoogleInstancesDescription(zone: ZONE, instanceIds: ALL_INSTANCE_IDS, accountName: ACCOUNT_NAME, diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/RegisterInstancesWithGoogleLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/RegisterInstancesWithGoogleLoadBalancerAtomicOperationUnitSpec.groovy index a7c1fd1de2e..927c2b7f7ad 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/RegisterInstancesWithGoogleLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/RegisterInstancesWithGoogleLoadBalancerAtomicOperationUnitSpec.groovy @@ -47,9 +47,9 @@ class RegisterInstancesWithGoogleLoadBalancerAtomicOperationUnitSpec extends Spe private static final INSTANCE_ID1 = "my-app7-dev-v000-instance1" private static final INSTANCE_ID2 = "my-app7-dev-v000-instance2" private static final INSTANCE_URL1 = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_ID1" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_ID1" private static final INSTANCE_URL2 = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_ID2" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_ID2" private static final INSTANCE_IDS = [INSTANCE_ID1, INSTANCE_ID2] private static final INSTANCE_URLS = [INSTANCE_URL1, INSTANCE_URL2] @@ -59,7 +59,7 @@ class RegisterInstancesWithGoogleLoadBalancerAtomicOperationUnitSpec extends Spe def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should register instances"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/SerializeApplicationAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/SerializeApplicationAtomicOperationUnitSpec.groovy index ee1172f2251..0f66864b003 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/SerializeApplicationAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/SerializeApplicationAtomicOperationUnitSpec.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops import com.google.api.services.compute.model.* +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil import com.netflix.spinnaker.clouddriver.google.deploy.description.snapshot.SaveSnapshotDescription import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceIllegalStateException import com.netflix.spinnaker.clouddriver.google.deploy.ops.snapshot.SaveSnapshotAtomicOperation @@ -47,6 +48,10 @@ class SerializeApplicationAtomicOperationUnitSpec extends Specification { private static final SCHEDULING_ON_HOST_MAINTENANCE = "MIGRATE" private static final SCHEDULING_PREEMPTIBLE = false + private static final SHIELDEDVMCONFIG_ENABLE_SECURE_BOOT = false + private static final SHIELDEDVMCONFIG_ENABLE_VTPM = false + private static final SHIELDEDVMCONFIG_ENABLE_INTEGRITY_MONITORING = false + private static final DISK_AUTO_DELETE = true private static final DISK_BOOT = false private static final DISK_DEVICE_NAME = "test_device_name" @@ -59,24 +64,26 @@ class SerializeApplicationAtomicOperationUnitSpec extends Specification { private static final DISK_SIZE_GB = 100 private static final DISK_SOURCE = "https://pantheon.corp.google.com/compute/disksDetail/zones/us-central1-f/disks/spinnaker-test-disk" - private static final NETWORK_URL = "https://www.googleapis.com/compute/v1/projects/test-proj/networking/networks/details/default" - private static final SUBNETWORK_URL = "https://www.googleapis.com/compute/v1/projects/test-proj/networking/subnetworks/details/us-central1/default" + private static final NETWORK_URL = "https://compute.googleapis.com/compute/v1/projects/test-proj/networking/networks/details/default" + private static final SUBNETWORK_URL = "https://compute.googleapis.com/compute/v1/projects/test-proj/networking/subnetworks/details/us-central1/default" private static final NETWORK_ACCESS_CONFIG = [] private static final AUTOSCALING_MAX_NUM_REPLICAS = 6 private static final AUTOSCALING_MIN_NUM_REPLICAS = 3 private static final AUTOSCALING_COOL_DOWN_PERIOD = 20 private static final AUTOSCALING_CPU_TARGET = 10.0 + private static final AUTOSCALING_CPU_PREDICTIVE_METHOD = "OPTIMIZE_AVAILABILITY" private static final AUTOSCALING_LOAD_BALANCER_TARGET = 25.0 private static final AUTOSCALING_METRIC_NAME = "agent.googleapis.com/apache/connections" private static final AUTOSCALING_METRIC_TARGET = 5.0 private static final AUTOSCALING_METRIC_TYPE = "GAUGE" + private static final AUTOSCALING_METRIC_SINGLE_INSTANCE_ASSIGNMENT = 1.0 private static final LOAD_BALANCER_NAME = "spinnaker_load_balancer" private static final LOAD_BALANCER_IP_PROTOCOL = "TCP" private static final LOAD_BALANCER_PORT_RANGE = "8080-8080" private static final LOAD_BALANCER_REGION = "us-east1" - private static final LOAD_BALANCER_TARGET_POOL = "https://www.googleapis.com/compute/v1/projects/test-proj/regions/us-central1/targetPools/spinnaker-load_balancer-tp" + private static final LOAD_BALANCER_TARGET_POOL = "https://compute.googleapis.com/compute/v1/projects/test-proj/regions/us-central1/targetPools/spinnaker-load_balancer-tp" private static final HEALTH_CHECK_NAME = "spinnaker-load-balancer-hc" private static final HEALTH_CHECK_INTERVAL = 15 @@ -100,6 +107,9 @@ class SerializeApplicationAtomicOperationUnitSpec extends Specification { def scheduling = new Scheduling(automaticRestart: SCHEDULING_AUTOMATIC_RESTART, onHostMaintenance: SCHEDULING_ON_HOST_MAINTENANCE, preemptible: SCHEDULING_PREEMPTIBLE) + def shieldedVmConfig = new ShieldedVmConfig(enableSecureBoot: SHIELDEDVMCONFIG_ENABLE_SECURE_BOOT, + enableVtpm: SHIELDEDVMCONFIG_ENABLE_VTPM, + enableIntegrityMonitoring: SHIELDEDVMCONFIG_ENABLE_INTEGRITY_MONITORING) def disk = new AttachedDisk(autoDelete: DISK_AUTO_DELETE, boot: DISK_BOOT, deviceName: DISK_DEVICE_NAME, @@ -121,21 +131,24 @@ class SerializeApplicationAtomicOperationUnitSpec extends Specification { metadata: INSTANCE_TEMPLATE_METADATA, scheduling: scheduling, disks: [disk], - networkInterfaces: [networkInterface]) + networkInterfaces: [networkInterface], + shieldedVmConfig: shieldedVmConfig) def instanceTemplate = new InstanceTemplate(description: INSTANCE_TEMPLATE_DESCRIPTION, name: INSTANCE_TEMPLATE_NAME, properties: instanceProperties) def autoscalingPolicy = new AutoscalingPolicy(maxNumReplicas: AUTOSCALING_MAX_NUM_REPLICAS, minNumReplicas: AUTOSCALING_MIN_NUM_REPLICAS, coolDownPeriodSec: AUTOSCALING_COOL_DOWN_PERIOD, - cpuUtilization: new AutoscalingPolicyCpuUtilization(utilizationTarget: AUTOSCALING_CPU_TARGET), + cpuUtilization: new AutoscalingPolicyCpuUtilization(utilizationTarget: AUTOSCALING_CPU_TARGET, + predictiveMethod: AUTOSCALING_CPU_PREDICTIVE_METHOD), loadBalancingUtilization: new AutoscalingPolicyLoadBalancingUtilization(utilizationTarget: AUTOSCALING_LOAD_BALANCER_TARGET), customMetricUtilizations: [new AutoscalingPolicyCustomMetricUtilization(metric: AUTOSCALING_METRIC_NAME, utilizationTarget: AUTOSCALING_METRIC_TARGET, - utilizationTargetType: AUTOSCALING_METRIC_TYPE)]) + utilizationTargetType: AUTOSCALING_METRIC_TYPE, + singleInstanceAssignment: AUTOSCALING_METRIC_SINGLE_INSTANCE_ASSIGNMENT)]) def serverGroup = new GoogleServerGroup(name: SERVER_GROUP_NAME, zone: SERVER_GROUP_ZONE, - asg: [(GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES): SERVER_GROUP_LOAD_BALANCERS], + asg: [(GCEUtil.REGIONAL_LOAD_BALANCER_NAMES): SERVER_GROUP_LOAD_BALANCERS], launchConfig: ["instanceTemplate": instanceTemplate], autoscalingPolicy: autoscalingPolicy) @@ -158,13 +171,18 @@ class SerializeApplicationAtomicOperationUnitSpec extends Specification { def schedulingMap = [automatic_restart: SCHEDULING_AUTOMATIC_RESTART, on_host_maintenance: SCHEDULING_ON_HOST_MAINTENANCE, preemptible: SCHEDULING_PREEMPTIBLE] + def shieldedVmConfigMap = [enable_secure_boot: SHIELDEDVMCONFIG_ENABLE_SECURE_BOOT, + enable_vtpm: SHIELDEDVMCONFIG_ENABLE_VTPM, + enable_integrity_monitoring: SHIELDEDVMCONFIG_ENABLE_INTEGRITY_MONITORING] def autoscalingPolicyMap = [max_replicas: AUTOSCALING_MAX_NUM_REPLICAS, min_replicas: AUTOSCALING_MIN_NUM_REPLICAS, cooldown_period: AUTOSCALING_COOL_DOWN_PERIOD, - cpu_utilization: [target: AUTOSCALING_CPU_TARGET], + cpu_utilization: [target: AUTOSCALING_CPU_TARGET, + predictive_method: AUTOSCALING_CPU_PREDICTIVE_METHOD.toLowerCase()], metric: [[name: AUTOSCALING_METRIC_NAME, target: AUTOSCALING_METRIC_TARGET, - type: AUTOSCALING_METRIC_TYPE.toLowerCase()]], + type: AUTOSCALING_METRIC_TYPE.toLowerCase(), + single_instance_assignment: AUTOSCALING_METRIC_SINGLE_INSTANCE_ASSIGNMENT]], load_balancing_utilization: [target: AUTOSCALING_LOAD_BALANCER_TARGET]] def autoscalingMap = [name: SERVER_GROUP_NAME, target: "\${google_compute_instance_group_manager.${SERVER_GROUP_NAME}.self_link}", @@ -185,7 +203,8 @@ class SerializeApplicationAtomicOperationUnitSpec extends Specification { project: null, network_interface: [networkInterfaceMap], scheduling: schedulingMap, - metadata: metadataMap] + metadata: metadataMap, + shielded_vm_config: shieldedVmConfigMap] def targetPools = [] SERVER_GROUP_LOAD_BALANCERS.each {String loadBalancer -> targetPools.add("\${google_compute_target_pool.${loadBalancer}.self_link}") @@ -294,7 +313,7 @@ class SerializeApplicationAtomicOperationUnitSpec extends Specification { // Create a server group with no instance template def serverGroup = new GoogleServerGroup(name: SERVER_GROUP_NAME, zone: SERVER_GROUP_ZONE, - asg: [(GoogleServerGroup.View.REGIONAL_LOAD_BALANCER_NAMES): SERVER_GROUP_LOAD_BALANCERS], + asg: [(GCEUtil.REGIONAL_LOAD_BALANCER_NAMES): SERVER_GROUP_LOAD_BALANCERS], launchConfig: ["instanceTemplate": null]) @Subject def operation = new SaveSnapshotAtomicOperation(new SaveSnapshotDescription()) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/SetStatefulDiskAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/SetStatefulDiskAtomicOperationUnitSpec.groovy new file mode 100644 index 00000000000..19f3eae54f5 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/SetStatefulDiskAtomicOperationUnitSpec.groovy @@ -0,0 +1,87 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.ops + +import com.google.api.services.compute.model.InstanceGroupManager +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.google.compute.FakeGoogleComputeOperationRequest +import com.netflix.spinnaker.clouddriver.google.compute.FakeGoogleComputeRequest +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory +import com.netflix.spinnaker.clouddriver.google.compute.GoogleServerGroupManagers +import com.netflix.spinnaker.clouddriver.google.deploy.description.SetStatefulDiskDescription +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import spock.lang.Specification + +class SetStatefulDiskAtomicOperationUnitSpec extends Specification { + + private static final String SERVER_GROUP = "testapp-v000" + private static final String REGION = "us-central1" + private static final String DEVICE_NAME = "testapp-v000-001" + private static final GoogleNamedAccountCredentials CREDENTIALS = + new GoogleNamedAccountCredentials.Builder() + .name("spinnaker-account") + .credentials(new FakeGoogleCredentials()) + .build() + + Task task + GoogleClusterProvider clusterProvider + GoogleComputeApiFactory computeApiFactory + GoogleServerGroupManagers serverGroupManagers + GoogleNamedAccountCredentials credentials + + def setup() { + task = Mock(Task) + TaskRepository.threadLocalTask.set(task) + + serverGroupManagers = Mock(GoogleServerGroupManagers) + + computeApiFactory = Mock(GoogleComputeApiFactory) { + _ * createServerGroupManagers(*_) >> serverGroupManagers + } + + clusterProvider = Mock(GoogleClusterProvider) { + _ * getServerGroup(*_) >> new GoogleServerGroup(name: SERVER_GROUP).view + } + } + + void "sets stateful policy on instance group"() { + setup: + def description = new SetStatefulDiskDescription( + serverGroupName: SERVER_GROUP, + region: REGION, + deviceName: DEVICE_NAME, + credentials: CREDENTIALS) + def operation = new SetStatefulDiskAtomicOperation(clusterProvider, computeApiFactory, description) + def updateOp = new FakeGoogleComputeOperationRequest<>() + def getManagerRequest = FakeGoogleComputeRequest.createWithResponse(new InstanceGroupManager()) + _ * serverGroupManagers.get() >> getManagerRequest + + when: + operation.operate([]) + + then: + 1 * serverGroupManagers.update({ + it.getStatefulPolicy().getPreservedState().getDisks().containsKey(DEVICE_NAME) + }) >> updateOp + + assert updateOp.waitedForCompletion() + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/StatefullyUpdateBootImageAtomicOperationTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/StatefullyUpdateBootImageAtomicOperationTest.java new file mode 100644 index 00000000000..a6db9786b00 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/StatefullyUpdateBootImageAtomicOperationTest.java @@ -0,0 +1,268 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.client.googleapis.testing.json.GoogleJsonResponseExceptionFactoryTesting; +import com.google.api.client.http.HttpStatusCodes; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.Compute.InstanceTemplates.Delete; +import com.google.api.services.compute.Compute.InstanceTemplates.Insert; +import com.google.api.services.compute.model.AttachedDisk; +import com.google.api.services.compute.model.AttachedDiskInitializeParams; +import com.google.api.services.compute.model.Image; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.InstanceGroupManagerVersion; +import com.google.api.services.compute.model.InstanceProperties; +import com.google.api.services.compute.model.InstanceTemplate; +import com.google.api.services.compute.model.StatefulPolicy; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.google.compute.FakeBatchComputeRequest; +import com.netflix.spinnaker.clouddriver.google.compute.FakeGoogleComputeOperationRequest; +import com.netflix.spinnaker.clouddriver.google.compute.FakeGoogleComputeRequest; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleServerGroupManagers; +import com.netflix.spinnaker.clouddriver.google.compute.Images; +import com.netflix.spinnaker.clouddriver.google.compute.InstanceTemplates; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.deploy.description.StatefullyUpdateBootImageDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceIllegalStateException; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider; +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +final class StatefullyUpdateBootImageAtomicOperationTest { + + private static final String SERVER_GROUP = "testapp-v000"; + private static final String REGION = "us-central1"; + private static final String IMAGE_NAME = "kool-new-os"; + private static final String IMAGE_URL = "http://cloud.google.com/images/my-project/" + IMAGE_NAME; + private static final String INSTANCE_TEMPLATE_NAME = SERVER_GROUP + "-1234567890"; + private static final String INSTANCE_TEMPLATE_URL = + "http://cloud.google.com/instance-templates/my-project/" + INSTANCE_TEMPLATE_NAME; + + @Mock private GoogleServerGroupManagers mockServerGroupManagers; + @Mock private Images mockImages; + @Mock private InstanceTemplates mockInstanceTemplates; + + private StatefullyUpdateBootImageAtomicOperation operation; + + @BeforeEach + void setUp() { + TaskRepository.threadLocalTask.set(new DefaultTask("taskId")); + + GoogleNamedAccountCredentials credentials = + new GoogleNamedAccountCredentials.Builder() + .name("spinnaker-account") + .credentials(new FakeGoogleCredentials()) + .project("foo") + .build(); + + GoogleConfigurationProperties config = new GoogleConfigurationProperties(); + config.setBaseImageProjects(ImmutableList.of("projectOne", "projectTwo")); + + GoogleClusterProvider mockClusterProvider = mock(GoogleClusterProvider.class); + when(mockClusterProvider.getServerGroup(any(), any(), any())) + .thenReturn(new GoogleServerGroup(SERVER_GROUP).getView()); + + StatefullyUpdateBootImageDescription description = + new StatefullyUpdateBootImageDescription() + .setServerGroupName(SERVER_GROUP) + .setRegion(REGION) + .setBootImage(IMAGE_NAME) + .setCredentials(credentials); + + GoogleComputeApiFactory mockComputeApiFactory = mock(GoogleComputeApiFactory.class); + lenient() + .when(mockComputeApiFactory.createServerGroupManagers(any(), any())) + .thenReturn(mockServerGroupManagers); + lenient().when(mockComputeApiFactory.createImages(any())).thenReturn(mockImages); + lenient() + .when(mockComputeApiFactory.createInstanceTemplates(any())) + .thenReturn(mockInstanceTemplates); + lenient() + .when(mockComputeApiFactory.createBatchRequest(any())) + .thenReturn(new FakeBatchComputeRequest<>()); + + operation = + new StatefullyUpdateBootImageAtomicOperation( + mockClusterProvider, mockComputeApiFactory, config, description); + } + + @Test + void couldNotFindImage() throws IOException { + when(mockImages.get(any(), any())).thenReturn(status404()); + + Exception e = + assertThrows( + GoogleResourceIllegalStateException.class, () -> operation.operate(ImmutableList.of())); + assertThat(e).hasMessageContaining(IMAGE_NAME); + } + + @Test + void exceptionFindingImage() throws IOException { + when(mockImages.get(any(), any())).thenThrow(new IOException("uh oh")); + + Exception e = assertThrows(Exception.class, () -> operation.operate(ImmutableList.of())); + assertThat(e).hasMessageContaining("uh oh"); + } + + @Test + void multipleInstanceGroupTemplates() throws IOException { + when(mockImages.get(any(), any())).thenReturn(image(baseImage())); + when(mockServerGroupManagers.get()) + .thenReturn( + FakeGoogleComputeRequest.createWithResponse( + baseInstanceGroupManager() + .setVersions( + ImmutableList.of( + new InstanceGroupManagerVersion(), + new InstanceGroupManagerVersion())))); + + Exception e = assertThrows(Exception.class, () -> operation.operate(ImmutableList.of())); + assertThat(e).hasMessageContaining("more than one instance template"); + } + + @Test + void noStatefulPolicy() throws IOException { + when(mockImages.get(any(), any())).thenReturn(image(baseImage())); + when(mockServerGroupManagers.get()) + .thenReturn( + FakeGoogleComputeRequest.createWithResponse( + baseInstanceGroupManager().setStatefulPolicy(null))); + + Exception e = assertThrows(Exception.class, () -> operation.operate(ImmutableList.of())); + assertThat(e).hasMessageContaining("StatefulPolicy"); + } + + @Test + void multipleBootDisks() throws IOException { + when(mockImages.get(any(), any())).thenReturn(image(baseImage())); + when(mockServerGroupManagers.get()) + .thenReturn(FakeGoogleComputeRequest.createWithResponse(baseInstanceGroupManager())); + InstanceTemplate instanceTemplate = baseInstanceTemplate(); + instanceTemplate + .getProperties() + .setDisks( + ImmutableList.of(new AttachedDisk().setBoot(true), new AttachedDisk().setBoot(true))); + when(mockInstanceTemplates.get(any())) + .thenReturn(FakeGoogleComputeRequest.createWithResponse(instanceTemplate)); + + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> operation.operate(ImmutableList.of())); + + assertThat(e).hasMessageContaining("one boot disk"); + } + + @Test + void success() throws IOException { + when(mockImages.get(any(), any())).thenReturn(image(new Image().setSelfLink(IMAGE_URL))); + when(mockServerGroupManagers.get()) + .thenReturn(FakeGoogleComputeRequest.createWithResponse(baseInstanceGroupManager())); + when(mockInstanceTemplates.get(any())) + .thenReturn(FakeGoogleComputeRequest.createWithResponse(baseInstanceTemplate())); + FakeGoogleComputeOperationRequest insertOp = new FakeGoogleComputeOperationRequest<>(); + when(mockInstanceTemplates.insert(any())).thenReturn(insertOp); + FakeGoogleComputeOperationRequest deleteOp = new FakeGoogleComputeOperationRequest<>(); + when(mockInstanceTemplates.delete(any())).thenReturn(deleteOp); + FakeGoogleComputeOperationRequest patchOp = new FakeGoogleComputeOperationRequest(); + when(mockServerGroupManagers.patch(any())).thenReturn(patchOp); + + operation.operate(ImmutableList.of()); + + ArgumentCaptor newTemplateCaptor = + ArgumentCaptor.forClass(InstanceTemplate.class); + verify(mockInstanceTemplates).insert(newTemplateCaptor.capture()); + InstanceTemplate newTemplate = newTemplateCaptor.getValue(); + + assertThat(newTemplate.getName()).matches(SERVER_GROUP + "-\\d{8}"); + AttachedDisk bootDisk = newTemplate.getProperties().getDisks().get(0); + assertThat(bootDisk.getInitializeParams().getSourceImage()).isEqualTo(IMAGE_URL); + assertThat(insertOp.waitedForCompletion()).isTrue(); + + ArgumentCaptor patchedManagerCaptor = + ArgumentCaptor.forClass(InstanceGroupManager.class); + verify(mockServerGroupManagers).patch(patchedManagerCaptor.capture()); + InstanceGroupManager patchedManager = patchedManagerCaptor.getValue(); + + assertThat(patchedManager.getInstanceTemplate()).endsWith("/" + newTemplate.getName()); + assertThat(patchedManager.getVersions()).isEmpty(); + assertThat(patchedManager.getUpdatePolicy().getType()).isEqualTo("OPPORTUNISTIC"); + assertThat(patchOp.waitedForCompletion()).isTrue(); + + verify(mockInstanceTemplates).delete(INSTANCE_TEMPLATE_NAME); + assertThat(deleteOp.waitedForCompletion()).isTrue(); + } + + private static Image baseImage() { + return new Image().setName(IMAGE_NAME).setSelfLink(IMAGE_URL); + } + + private static FakeGoogleComputeRequest image(Image image) { + return FakeGoogleComputeRequest.createWithResponse(image, mock(Compute.Images.Get.class)); + } + + private static FakeGoogleComputeRequest status404() + throws IOException { + return FakeGoogleComputeRequest.createWithException( + GoogleJsonResponseExceptionFactoryTesting.newMock( + GsonFactory.getDefaultInstance(), HttpStatusCodes.STATUS_CODE_NOT_FOUND, "not found")); + } + + private static InstanceTemplate baseInstanceTemplate() { + return new InstanceTemplate() + .setName(INSTANCE_TEMPLATE_NAME) + .setSelfLink(INSTANCE_TEMPLATE_URL) + .setProperties( + new InstanceProperties() + .setDisks( + ImmutableList.of( + new AttachedDisk() + .setBoot(true) + .setInitializeParams( + new AttachedDiskInitializeParams().setSourceImage("centos")), + new AttachedDisk().setBoot(false)))); + } + + private static InstanceGroupManager baseInstanceGroupManager() { + return new InstanceGroupManager() + .setInstanceTemplate(INSTANCE_TEMPLATE_URL) + .setVersions( + ImmutableList.of( + new InstanceGroupManagerVersion().setInstanceTemplate(INSTANCE_TEMPLATE_URL))) + .setStatefulPolicy(new StatefulPolicy()); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/TerminateAndDecrementGoogleServerGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/TerminateAndDecrementGoogleServerGroupAtomicOperationUnitSpec.groovy index 3f2d4ddfac4..cf9640ed0e1 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/TerminateAndDecrementGoogleServerGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/TerminateAndDecrementGoogleServerGroupAtomicOperationUnitSpec.groovy @@ -34,15 +34,15 @@ import spock.lang.Unroll class TerminateAndDecrementGoogleServerGroupAtomicOperationUnitSpec extends Specification { private static final SERVER_GROUP_NAME = "my-server-group" private static final SERVER_GROUP_SELF_LINK = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instanceGroupManagers/$SERVER_GROUP_NAME" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instanceGroupManagers/$SERVER_GROUP_NAME" private static final REGION = "us-central1" private static final ZONE = "us-central1-f" private static final ACCOUNT_NAME = "auto" private static final PROJECT_NAME = "my_project" private static final INSTANCE_IDS = ["my-app7-dev-v000-1", "my-app7-dev-v000-2"] private static final INSTANCE_URLS = [ - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-1", - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-2" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-1", + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-2" ] def setupSpec() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/TerminateGoogleInstancesAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/TerminateGoogleInstancesAtomicOperationUnitSpec.groovy index dd97f4633eb..cee29775ede 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/TerminateGoogleInstancesAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/TerminateGoogleInstancesAtomicOperationUnitSpec.groovy @@ -35,7 +35,7 @@ import spock.lang.Subject class TerminateGoogleInstancesAtomicOperationUnitSpec extends Specification { private static final MANAGED_INSTANCE_GROUP_NAME = "my-app7-dev-v000" private static final MANAGED_INSTANCE_GROUP_SELF_LINK = - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instanceGroupManagers/$MANAGED_INSTANCE_GROUP_NAME" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instanceGroupManagers/$MANAGED_INSTANCE_GROUP_NAME" private static final REGION = "us-central1" private static final ZONE = "us-central1-b" private static final ACCOUNT_NAME = "auto" @@ -47,8 +47,8 @@ class TerminateGoogleInstancesAtomicOperationUnitSpec extends Specification { private static final ALL_INSTANCE_IDS = ["${ID_GOOD_PREFIX}1", "${ID_BAD_PREFIX}1", "${ID_GOOD_PREFIX}2", "${ID_BAD_PREFIX}2"] private static final GOOD_INSTANCE_URLS = [ - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/${ID_GOOD_PREFIX}1", - "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/${ID_GOOD_PREFIX}2" + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/${ID_GOOD_PREFIX}1", + "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/${ID_GOOD_PREFIX}2" ] def setupSpec() { @@ -115,12 +115,12 @@ class TerminateGoogleInstancesAtomicOperationUnitSpec extends Specification { GoogleApiTestUtils.makeOkId( registry, "compute.instances.delete", [scope: "zonal", zone: ZONE]) - ).count() == GOOD_INSTANCE_IDS.size + ).count() == GOOD_INSTANCE_IDS.size() registry.timer( GoogleApiTestUtils.makeId( registry, "compute.instances.delete", 404, [scope: "zonal", zone: ZONE]) - ).count() == BAD_INSTANCE_IDS.size + ).count() == BAD_INSTANCE_IDS.size() thrown IOException } diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec.groovy index a06160e971a..de5ce525804 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec.groovy @@ -17,12 +17,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops import com.google.api.services.compute.Compute +import com.fasterxml.jackson.databind.ObjectMapper import com.google.api.services.compute.model.* +import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spectator.api.DefaultRegistry import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.google.GoogleApiTestUtils import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoHealingPolicy import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy @@ -30,6 +33,8 @@ import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.Cu import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry +import com.netflix.spinnaker.clouddriver.orchestration.DefaultOrchestrationProcessor import spock.lang.Specification import spock.lang.Subject import spock.lang.Unroll @@ -63,10 +68,18 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification loadBalancingUtilization: LOAD_BALANCING_UTILIZATION, customMetricUtilizations: CUSTOM_METRIC_UTILIZATIONS) private static - final SELF_LINK = "https://www.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-1" + final SELF_LINK = "https://compute.googleapis.com/compute/v1/projects/shared-spinnaker/zones/us-central1-f/instances/my-app7-dev-v000-1" private static final REGION = "us-central1" private static final AUTOSCALER = GCEUtil.buildAutoscaler(SERVER_GROUP_NAME, SELF_LINK, GOOGLE_SCALING_POLICY) + GoogleClusterProvider googleClusterProviderMock = Mock(GoogleClusterProvider) + Compute computeMock = Mock(Compute) + GoogleOperationPoller operationPollerMock = Mock(GoogleOperationPoller) + AtomicOperationsRegistry atomicOperationsRegistryMock = Mock(AtomicOperationsRegistry) + DefaultOrchestrationProcessor orchestrationProcessorMock = Mock(DefaultOrchestrationProcessor) + Cache cacheView = Mock(Cache) + ObjectMapper objectMapper = Mock(ObjectMapper) + def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) } @@ -75,7 +88,6 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification void "can create zonal and regional scaling policies"() { setup: def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) def serverGroup = new GoogleServerGroup(zone: ZONE, regional: isRegional, selfLink: SELF_LINK).view def computeMock = Mock(Compute) @@ -99,9 +111,8 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification def regionalTimerId = GoogleApiTestUtils.makeOkId(registry, "compute.regionAutoscalers.insert", [scope: "regional", region: REGION]) registry.timer(regionalTimerId) - @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description]) + @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description, googleClusterProviderMock, operationPollerMock,atomicOperationsRegistryMock, orchestrationProcessorMock, cacheView, objectMapper]) operation.registry = registry - operation.googleClusterProvider = googleClusterProviderMock when: operation.operate([]) @@ -113,11 +124,11 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification if (isRegional) { 1 * computeMock.regionAutoscalers() >> regionAutoscalerMock 1 * regionAutoscalerMock.insert(PROJECT_NAME, location, AUTOSCALER) >> regionInsertMock - 1 * regionInsertMock.execute() + 1 * regionInsertMock.execute() >> [name: 'insertOp'] } else { 1 * computeMock.autoscalers() >> autoscalerMock 1 * autoscalerMock.insert(PROJECT_NAME, location, AUTOSCALER) >> insertMock - 1 * insertMock.execute() + 1 * insertMock.execute() >> [name: 'insertOp'] } registry.timer(regionalTimerId).count() == (isRegional ? 1 : 0) @@ -133,7 +144,6 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification void "can update zonal and regional scaling policies"() { given: def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) def computeMock = Mock(Compute) def autoscalingPolicy = new AutoscalingPolicy( minNumReplicas: 1, @@ -160,9 +170,8 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification def regionUpdateMock = Mock(Compute.RegionAutoscalers.Update) def regionalTimerId = GoogleApiTestUtils.makeOkId(registry, "compute.regionAutoscalers.update", [scope: "regional", region: REGION]) - @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description]) + @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description, googleClusterProviderMock, operationPollerMock,atomicOperationsRegistryMock, orchestrationProcessorMock, cacheView, objectMapper]) operation.registry = registry - operation.googleClusterProvider = googleClusterProviderMock when: operation.operate([]) @@ -174,11 +183,11 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification if (isRegional) { 1 * computeMock.regionAutoscalers() >> regionAutoscalerMock 1 * regionAutoscalerMock.update(PROJECT_NAME, location, AUTOSCALER) >> regionUpdateMock - 1 * regionUpdateMock.execute() + 1 * regionUpdateMock.execute() >> [name: 'updateOp'] } else { 1 * computeMock.autoscalers() >> autoscalerMock 1 * autoscalerMock.update(PROJECT_NAME, location, AUTOSCALER) >> updateMock - 1 * updateMock.execute() + 1 * updateMock.execute() >> [name: 'updateOp'] } registry.timer(regionalTimerId).count() == (isRegional ? 1 : 0) registry.timer(zonalTimerId).count() == (isRegional ? 0 : 1) @@ -193,22 +202,20 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification void "builds autoscaler based on ancestor autoscaling policy and input description: input overrides nothing"() { setup: def registry = new DefaultRegistry() - def ancestorPolicy = new AutoscalingPolicy( + def ancestorPolicy = new GoogleAutoscalingPolicy( minNumReplicas: MIN_NUM_REPLICAS, maxNumReplicas: MAX_NUM_REPLICAS, coolDownPeriodSec: COOL_DOWN_PERIOD_SEC, - cpuUtilization: new AutoscalingPolicyCpuUtilization(utilizationTarget: UTILIZATION_TARGET), - loadBalancingUtilization: new AutoscalingPolicyLoadBalancingUtilization(utilizationTarget: UTILIZATION_TARGET), - customMetricUtilizations: [new AutoscalingPolicyCustomMetricUtilization( + cpuUtilization: new GoogleAutoscalingPolicy.CpuUtilization(utilizationTarget: UTILIZATION_TARGET), + loadBalancingUtilization: new GoogleAutoscalingPolicy.LoadBalancingUtilization(utilizationTarget: UTILIZATION_TARGET), + customMetricUtilizations: [new GoogleAutoscalingPolicy.CustomMetricUtilization( metric: METRIC, utilizationTarget: UTILIZATION_TARGET, utilizationTargetType: "DELTA_PER_MINUTE")]); - def ancestorDescription = GCEUtil.buildAutoscalingPolicyDescriptionFromAutoscalingPolicy(ancestorPolicy) def updatePolicy = new GoogleAutoscalingPolicy() def expectedAutoscaler = GCEUtil.buildAutoscaler( - SERVER_GROUP_NAME, SELF_LINK, ancestorDescription) + SERVER_GROUP_NAME, SELF_LINK, ancestorPolicy) - def googleClusterProviderMock = Mock(GoogleClusterProvider) def computeMock = Mock(Compute) def serverGroup = new GoogleServerGroup( zone: ZONE, @@ -232,9 +239,8 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification def regionAutoscalerMock = Mock(Compute.RegionAutoscalers) def regionUpdateMock = Mock(Compute.RegionAutoscalers.Update) - @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description]) + @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description, googleClusterProviderMock, operationPollerMock,atomicOperationsRegistryMock, orchestrationProcessorMock, cacheView, objectMapper]) operation.registry = registry - operation.googleClusterProvider = googleClusterProviderMock when: operation.operate([]) @@ -245,11 +251,11 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification if (isRegional) { 1 * computeMock.regionAutoscalers() >> regionAutoscalerMock 1 * regionAutoscalerMock.update(PROJECT_NAME, location, expectedAutoscaler) >> regionUpdateMock - 1 * regionUpdateMock.execute() + 1 * regionUpdateMock.execute() >> [name: 'updateOp'] } else { 1 * computeMock.autoscalers() >> autoscalerMock 1 * autoscalerMock.update(PROJECT_NAME, location, expectedAutoscaler) >> updateMock - 1 * updateMock.execute() + 1 * updateMock.execute() >> [name: 'updateOp'] } where: @@ -286,7 +292,6 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification def expectedAutoscaler = GCEUtil.buildAutoscaler( SERVER_GROUP_NAME, SELF_LINK, updatePolicy) - def googleClusterProviderMock = Mock(GoogleClusterProvider) def computeMock = Mock(Compute) def serverGroup = new GoogleServerGroup(zone: ZONE, selfLink: SELF_LINK, @@ -309,9 +314,8 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification def regionAutoscalerMock = Mock(Compute.RegionAutoscalers) def regionUpdateMock = Mock(Compute.RegionAutoscalers.Update) - @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description]) + @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description, googleClusterProviderMock, operationPollerMock,atomicOperationsRegistryMock, orchestrationProcessorMock, cacheView, objectMapper]) operation.registry = registry - operation.googleClusterProvider = googleClusterProviderMock when: operation.operate([]) @@ -322,11 +326,11 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification if (isRegional) { 1 * computeMock.regionAutoscalers() >> regionAutoscalerMock 1 * regionAutoscalerMock.update(PROJECT_NAME, location, expectedAutoscaler) >> regionUpdateMock - 1 * regionUpdateMock.execute() + 1 * regionUpdateMock.execute() >> [name: 'updateOp'] } else { 1 * computeMock.autoscalers() >> autoscalerMock 1 * autoscalerMock.update(PROJECT_NAME, location, expectedAutoscaler) >> updateMock - 1 * updateMock.execute() + 1 * updateMock.execute() >> [name: 'updateOp'] } where: @@ -395,7 +399,6 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification void "update the instance template when updatePolicyMetadata is called"() { given: def registry = new DefaultRegistry() - def googleClusterProviderMock = Mock(GoogleClusterProvider) def computeMock = Mock(Compute) def autoscaler = [:] @@ -419,13 +422,12 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification def template = new InstanceTemplate(properties: [ disks: [[getBoot: { return [initializeParams: [sourceImage: 'images/sourceImage']] }, initializeParams: [diskType: 'huge', diskSizeGb: 42], autoDelete: false]], name: 'template', - networkInterfaces: [[network: 'networks/my-network']], + networkInterfaces: [[network: "projects/$PROJECT_NAME/networks/my-network"]], serviceAccounts: [[email: 'serviceAccount@google.com']] ]) - @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description]) + @Subject def operation = Spy(UpsertGoogleAutoscalingPolicyAtomicOperation, constructorArgs: [description, googleClusterProviderMock, operationPollerMock,atomicOperationsRegistryMock, orchestrationProcessorMock, cacheView, objectMapper]) operation.registry = registry - operation.googleClusterProvider = googleClusterProviderMock when: operation.updatePolicyMetadata(computeMock, credentials, PROJECT_NAME, groupUrl, autoscaler) @@ -446,7 +448,7 @@ class UpsertGoogleAutoscalingPolicyAtomicOperationUnitSpec extends Specification where: isRegional | location | groupUrl - false | ZONE | "https://www.googleapis.com/compute/v1/projects/spinnaker-jtk54/zones/us-central1-f/autoscalers/okra-auto-v005" - true | REGION | "https://www.googleapis.com/compute/v1/projects/spinnaker-jtk54/regions/us-central1/autoscalers/okra-auto-v005" + false | ZONE | "https://compute.googleapis.com/compute/v1/projects/spinnaker-jtk54/zones/us-central1-f/autoscalers/okra-auto-v005" + true | REGION | "https://compute.googleapis.com/compute/v1/projects/spinnaker-jtk54/regions/us-central1/autoscalers/okra-auto-v005" } } diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleImageTagsAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleImageTagsAtomicOperationUnitSpec.groovy index 6a37d41d1ba..3dc131282cf 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleImageTagsAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleImageTagsAtomicOperationUnitSpec.groovy @@ -17,10 +17,9 @@ package com.netflix.spinnaker.clouddriver.google.deploy.ops import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.googleapis.batch.BatchRequest import com.google.api.client.googleapis.batch.json.JsonBatchCallback import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport -import com.google.api.client.json.jackson2.JacksonFactory +import com.google.api.client.json.gson.GsonFactory import com.google.api.services.compute.Compute import com.google.api.services.compute.model.Image import com.google.api.services.compute.model.ImageList @@ -32,6 +31,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleI import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceNotFoundException import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.google.security.TestDefaults +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest import groovy.mock.interceptor.MockFor import spock.lang.Specification import spock.lang.Subject @@ -40,7 +40,7 @@ class UpsertGoogleImageTagsAtomicOperationUnitSpec extends Specification impleme private static final ACCOUNT_NAME = "auto" private static final PROJECT_NAME = "my-project" private static final IMAGE_NAME = "debian-7-wheezy-v20140415" - private static final IMAGE_SELF_LINK = "https://www.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images/spinnaker-rosco-all-20161229193556-precise" + private static final IMAGE_SELF_LINK = "https://compute.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images/spinnaker-rosco-all-20161229193556-precise" private static final BASE_IMAGE_PROJECTS = ["centos-cloud", "ubuntu-os-cloud"] private static final TAGS = ['some-key-1': 'some-val-2'] private static final LABELS = ['some-existing-key-1': 'some-existing-val-2'] @@ -54,7 +54,7 @@ class UpsertGoogleImageTagsAtomicOperationUnitSpec extends Specification impleme void "should set labels on image with no existing labels"() { setup: def computeMock = new MockFor(Compute) - def batchMock = new MockFor(BatchRequest) + def googleBatchMock = new MockFor(GoogleBatchRequest) def imageProjects = [PROJECT_NAME] + BASE_IMAGE_PROJECTS def listMock = new MockFor(Compute.Images.List) @@ -63,28 +63,26 @@ class UpsertGoogleImageTagsAtomicOperationUnitSpec extends Specification impleme def globalSetLabelsRequest def httpTransport = GoogleNetHttpTransport.newTrustedTransport() - def jsonFactory = JacksonFactory.defaultInstance + def jsonFactory = GsonFactory.defaultInstance def httpRequestInitializer = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).build() def images = new Compute.Builder( httpTransport, jsonFactory, httpRequestInitializer).setApplicationName("test").build().images() - computeMock.demand.batch { new BatchRequest(httpTransport, httpRequestInitializer) } - JsonBatchCallback callback = null for (def imageProject : imageProjects) { computeMock.demand.images { return images } listMock.demand.setFilter { } - listMock.demand.queue { imageListBatch, imageListCallback -> + googleBatchMock.demand.queue { imageList, imageListCallback -> callback = imageListCallback } } - batchMock.demand.size { return 1 } - batchMock.demand.execute { + googleBatchMock.demand.size() { return 1 } + googleBatchMock.demand.execute { def imageList = new ImageList( - selfLink: "https://www.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", + selfLink: "https://compute.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", items: [new Image(name: IMAGE_NAME, selfLink: IMAGE_SELF_LINK)] ) callback.onSuccess(imageList, null) @@ -94,7 +92,7 @@ class UpsertGoogleImageTagsAtomicOperationUnitSpec extends Specification impleme computeMock.ignore('asBoolean') when: - batchMock.use { + googleBatchMock.use { computeMock.use { listMock.use { def compute = new Compute.Builder( @@ -119,15 +117,14 @@ class UpsertGoogleImageTagsAtomicOperationUnitSpec extends Specification impleme } then: - 1 * imagesMock.setLabels(PROJECT_NAME, IMAGE_NAME, { globalSetLabelsRequest = it }) >> setLabelsMock + 1 * imagesMock.setLabels(PROJECT_NAME, IMAGE_NAME, {it.labels == TAGS }) >> setLabelsMock 1 * setLabelsMock.execute() - globalSetLabelsRequest.labels == TAGS } void "should add to labels on image with existing labels"() { setup: def computeMock = new MockFor(Compute) - def batchMock = new MockFor(BatchRequest) + def googleBatchMock = new MockFor(GoogleBatchRequest) def imageProjects = [PROJECT_NAME] + BASE_IMAGE_PROJECTS def listMock = new MockFor(Compute.Images.List) @@ -136,28 +133,26 @@ class UpsertGoogleImageTagsAtomicOperationUnitSpec extends Specification impleme def globalSetLabelsRequest def httpTransport = GoogleNetHttpTransport.newTrustedTransport() - def jsonFactory = JacksonFactory.defaultInstance + def jsonFactory = GsonFactory.defaultInstance def httpRequestInitializer = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).build() def images = new Compute.Builder( httpTransport, jsonFactory, httpRequestInitializer).setApplicationName("test").build().images() - computeMock.demand.batch { new BatchRequest(httpTransport, httpRequestInitializer) } - JsonBatchCallback callback = null for (def imageProject : imageProjects) { computeMock.demand.images { return images } listMock.demand.setFilter { } - listMock.demand.queue { imageListBatch, imageListCallback -> + googleBatchMock.demand.queue { imageList, imageListCallback -> callback = imageListCallback } } - batchMock.demand.size { return 1 } - batchMock.demand.execute { + googleBatchMock.demand.size() { return 1 } + googleBatchMock.demand.execute { def imageList = new ImageList( - selfLink: "https://www.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", + selfLink: "https://compute.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", items: [new Image(name: IMAGE_NAME, selfLink: IMAGE_SELF_LINK, labels: LABELS)] ) callback.onSuccess(imageList, null) @@ -167,7 +162,7 @@ class UpsertGoogleImageTagsAtomicOperationUnitSpec extends Specification impleme computeMock.ignore('asBoolean') when: - batchMock.use { + googleBatchMock.use { computeMock.use { listMock.use { def compute = new Compute.Builder( @@ -192,51 +187,47 @@ class UpsertGoogleImageTagsAtomicOperationUnitSpec extends Specification impleme } then: - 1 * imagesMock.setLabels(PROJECT_NAME, IMAGE_NAME, { globalSetLabelsRequest = it }) >> setLabelsMock - 1 * setLabelsMock.execute() - globalSetLabelsRequest.labels == LABELS + TAGS + 1 * imagesMock.setLabels(PROJECT_NAME, IMAGE_NAME, {it.labels == LABELS + TAGS }) >> setLabelsMock + 1 * setLabelsMock.execute() } void "should fail to create instance because image is invalid"() { setup: def computeMock = new MockFor(Compute) - def batchMock = new MockFor(BatchRequest) + def googleBatchMock = new MockFor(GoogleBatchRequest) def imageProjects = [PROJECT_NAME] + BASE_IMAGE_PROJECTS def listMock = new MockFor(Compute.Images.List) def httpTransport = GoogleNetHttpTransport.newTrustedTransport() - def jsonFactory = JacksonFactory.defaultInstance + def jsonFactory = GsonFactory.defaultInstance def httpRequestInitializer = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).build() def images = new Compute.Builder( httpTransport, jsonFactory, httpRequestInitializer).setApplicationName("test").build().images() def emptyImageList = new ImageList( - selfLink: "https://www.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", + selfLink: "https://compute.googleapis.com/compute/alpha/projects/$PROJECT_NAME/global/images", items: [] ) - batchMock.demand.size { return 1 } - computeMock.demand.batch { new BatchRequest(httpTransport, httpRequestInitializer) } - computeMock.ignore('asBoolean') - JsonBatchCallback callback = null for (def imageProject : imageProjects) { computeMock.demand.images { return images } listMock.demand.setFilter { } - listMock.demand.queue { imageListBatch, imageListCallback -> + googleBatchMock.demand.queue { imageList, imageListCallback -> callback = imageListCallback } } - batchMock.demand.execute { + googleBatchMock.demand.size() { return 1 } + googleBatchMock.demand.execute { callback.onSuccess(emptyImageList, null) callback.onSuccess(emptyImageList, null) callback.onSuccess(emptyImageList, null) } when: - batchMock.use { + googleBatchMock.use { computeMock.use { listMock.use { def compute = new Compute.Builder( diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleSecurityGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleSecurityGroupAtomicOperationUnitSpec.groovy index 51b340d3193..5cb7b38c0be 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleSecurityGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleSecurityGroupAtomicOperationUnitSpec.groovy @@ -64,7 +64,7 @@ class UpsertGoogleSecurityGroupAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() googleOperationPoller = new GoogleOperationPoller( googleConfigurationProperties: new GoogleConfigurationProperties(), threadSleeper: threadSleeperMock, diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleServerGroupTagsAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleServerGroupTagsAtomicOperationUnitSpec.groovy index a6496e3f8e7..d410d811df0 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleServerGroupTagsAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/UpsertGoogleServerGroupTagsAtomicOperationUnitSpec.groovy @@ -48,16 +48,16 @@ class UpsertGoogleServerGroupTagsAtomicOperationUnitSpec extends Specification { private static final TAGS = ["some-tag-1", "some-tag-2", "some-tag-3"] private static final ORIG_INSTANCE_TEMPLATE_NAME = "$SERVER_GROUP_NAME-123" private static final ORIG_INSTANCE_TEMPLATE_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/global/instanceTemplates/$ORIG_INSTANCE_TEMPLATE_NAME" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/global/instanceTemplates/$ORIG_INSTANCE_TEMPLATE_NAME" private static final NEW_INSTANCE_TEMPLATE_NAME = "new-instance-template" private static final INSTANCE_TEMPLATE_INSERTION_OP_NAME = "instance-template-insertion-op" private static final SET_INSTANCE_TEMPLATE_OP_NAME = "set-instance-template-op" private static final INSTANCE_1_NAME = "instance-1" private static final INSTANCE_2_NAME = "instance-2" private static final INSTANCE_1_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_1_NAME" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_1_NAME" private static final INSTANCE_2_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_2_NAME" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$ZONE/instances/$INSTANCE_2_NAME" private static final DONE = "DONE" private static final INSTANCES_SET_TAGS_1_OP_NAME = "instances-set-tags-1-op" private static final INSTANCES_SET_TAGS_2_OP_NAME = "instances-set-tags-2-op" @@ -71,7 +71,7 @@ class UpsertGoogleServerGroupTagsAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should set tags on new instance template and on instances"() { @@ -150,7 +150,7 @@ class UpsertGoogleServerGroupTagsAtomicOperationUnitSpec extends Specification { 1 * computeMock.instanceGroupManagers() >> instanceGroupManagersMock 1 * instanceGroupManagersMock.get(PROJECT_NAME, ZONE, SERVER_GROUP_NAME) >> instanceGroupManagersGetMock 1 * instanceGroupManagersGetMock.execute() >> instanceGroupManagerReal - 1 * computeMock.instanceTemplates() >> instanceTemplatesMock + 2 * computeMock.instanceTemplates() >> instanceTemplatesMock 1 * instanceTemplatesMock.get(PROJECT_NAME, ORIG_INSTANCE_TEMPLATE_NAME) >> instanceTemplatesGetMock 1 * instanceTemplatesGetMock.execute() >> instanceTemplateReal @@ -264,7 +264,7 @@ class UpsertGoogleServerGroupTagsAtomicOperationUnitSpec extends Specification { 1 * computeMock.instanceGroupManagers() >> instanceGroupManagersMock 1 * instanceGroupManagersMock.get(PROJECT_NAME, ZONE, SERVER_GROUP_NAME) >> instanceGroupManagersGetMock 1 * instanceGroupManagersGetMock.execute() >> instanceGroupManagerReal - 1 * computeMock.instanceTemplates() >> instanceTemplatesMock + 2 * computeMock.instanceTemplates() >> instanceTemplatesMock 1 * instanceTemplatesMock.get(PROJECT_NAME, ORIG_INSTANCE_TEMPLATE_NAME) >> instanceTemplatesGetMock 1 * instanceTemplatesGetMock.execute() >> instanceTemplateReal diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleHttpLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleHttpLoadBalancerAtomicOperationUnitSpec.groovy index 34f0bb526a4..b5c3c811d12 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleHttpLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleHttpLoadBalancerAtomicOperationUnitSpec.groovy @@ -64,7 +64,7 @@ class DeleteGoogleHttpLoadBalancerAtomicOperationUnitSpec extends Specification def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should delete Http Load Balancer with one backend service"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy new file mode 100644 index 00000000000..78d138700e7 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy @@ -0,0 +1,791 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer + +import com.google.api.services.compute.Compute +import com.google.api.services.compute.model.* +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry +import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationTimedOutException +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceNotFoundException +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +class DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec extends Specification { + private static final BASE_PHASE = "test-phase" + private static final ACCOUNT_NAME = "auto" + private static final PROJECT_NAME = "my_project" + private static final HTTP_LOAD_BALANCER_NAME = "default" + private static final URL_MAP_NAME = "url-map" + private static final REGION = "us-central1" + private static final TARGET_HTTP_PROXY_URL = "projects/$PROJECT_NAME/global/targetHttpProxies/target-http-proxy" + private static final TARGET_HTTP_PROXY_NAME = "target-http-proxy" + private static final URL_MAP_URL = "project/url-map" + private static final BACKEND_SERVICE_URL = "project/backend-service" + private static final BACKEND_SERVICE_NAME = "backend-service" + private static final HEALTH_CHECK_URL = "project/health-check" + private static final HEALTH_CHECK_NAME = "health-check" + private static final FORWARDING_RULE_DELETE_OP_NAME = "delete-forwarding-rule" + private static final TARGET_HTTP_PROXY_DELETE_OP_NAME = "delete-target-http-proxy" + private static final URL_MAP_DELETE_OP_NAME = "delete-url-map" + private static final BACKEND_SERVICE_DELETE_OP_NAME = "delete-backend-service" + private static final HEALTH_CHECK_DELETE_OP_NAME = "delete-health-check" + private static final PENDING = "PENDING" + private static final DONE = "DONE" + + @Shared + def threadSleeperMock = Mock(GoogleOperationPoller.ThreadSleeper) + @Shared + def registry = new DefaultRegistry() + @Shared + SafeRetry safeRetry + + def setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + safeRetry = SafeRetry.withoutDelay() + } + + void "should delete Internal Http Load Balancer with one backend service"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME, + status: DONE) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> backendServicesDeleteOp + 1 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksDeleteOp + + 4 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME) >> backendServicesOperationGet + 1 * backendServicesOperationGet.execute() >> backendServicesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksDeleteOp + } + + void "should delete Internal Http Load Balancer with multiple backend services/health checks"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap( + name: URL_MAP_NAME, + defaultService: BACKEND_SERVICE_URL, + pathMatchers: [ + new PathMatcher(defaultService: BACKEND_SERVICE_URL + "2", + pathRules: [ + new PathRule(service: BACKEND_SERVICE_URL + "3"), new PathRule(service: BACKEND_SERVICE_URL) + ] + ) + ]) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendServicesGet2 = Mock(Compute.RegionBackendServices.Get) + def backendServicesGet3 = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def backendService2 = new BackendService(healthChecks: [HEALTH_CHECK_URL+"2"]) + def backendService3 = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete2 = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp2 = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME+"2", + status: DONE) + def backendServicesDelete3 = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp3 = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME+"3", + status: DONE) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE) + def healthChecksDelete2 = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp2 = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME+"2", + status: DONE) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet2 = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet3 = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet2 = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + + 6 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME+"2") >> backendServicesGet2 + 1 * backendServicesGet2.execute() >> backendService2 + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME+"3") >> backendServicesGet3 + 1 * backendServicesGet3.execute() >> backendService3 + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> backendServicesDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME+"2") >> backendServicesDelete2 + 1 * backendServicesDelete2.execute() >> backendServicesDeleteOp2 + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME+"3") >> backendServicesDelete3 + 1 * backendServicesDelete3.execute() >> backendServicesDeleteOp3 + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksDeleteOp + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME+"2") >> healthChecksDelete2 + 1 * healthChecksDelete2.execute() >> healthChecksDeleteOp2 + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME) >> backendServicesOperationGet + 1 * backendServicesOperationGet.execute() >> backendServicesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME+"2") >> backendServicesOperationGet2 + 1 * backendServicesOperationGet2.execute() >> backendServicesDeleteOp2 + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME+"3") >> backendServicesOperationGet3 + 1 * backendServicesOperationGet3.execute() >> backendServicesDeleteOp3 + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME+"2") >> healthChecksOperationGet2 + 1 * healthChecksOperationGet2.execute() >> healthChecksDeleteOp2 + } + + void "should fail to delete an Internal Http Load Balancer that does not exist"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.registry = registry + + when: + operation.operate([]) + + then: + 1 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: []] + thrown GoogleResourceNotFoundException + } + + void "should fail to delete Internal Http Load Balancer if failed to delete a resource"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME, + status: DONE) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksPendingDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: PENDING) + def healthChecksFailingDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE, + error: new Operation.Error(errors: [new Operation.Error.Errors(message: "error")])) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> backendServicesDeleteOp + 1 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksPendingDeleteOp + + 4 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME) >> backendServicesOperationGet + 1 * backendServicesOperationGet.execute() >> backendServicesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksFailingDeleteOp + thrown GoogleOperationException + } + + void "should fail to delete Internal Http Load Balancer if timed out while deleting a resource"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: PENDING) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + deleteOperationTimeoutSeconds: 0, + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + GCEUtil.deleteGlobalListener(computeMock, PROJECT_NAME, HTTP_LOAD_BALANCER_NAME, BASE_PHASE, safeRetry, operation) >> targetHttpProxiesDeleteOp + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 1 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 1 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, _) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, _) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + + 1 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + thrown GoogleOperationTimedOutException + } + + void "should wait on slow deletion of target HTTP proxy and successfully delete simple HTTP Load Balancer"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOpPending = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: PENDING) + def targetHttpProxiesDeleteOpDone = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME, + status: DONE) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOpPending + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> backendServicesDeleteOp + 1 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksDeleteOp + + 6 * computeMock.regionOperations() >> regionOperations + 3 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 2 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOpPending + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOpDone + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME) >> backendServicesOperationGet + 1 * backendServicesOperationGet.execute() >> backendServicesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksDeleteOp + } + + void "should not delete backend service in more than one url map"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + def conflictingMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: "conflicting") + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap, conflictingMap]) + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> null + 1 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksDeleteOp + + 3 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksDeleteOp + } + + void "should fail if server group still associated"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL], backends: [new Backend()]) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 1 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 2 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 1 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 1 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + thrown IllegalStateException + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalLoadBalancerAtomicOperationUnitSpec.groovy index 8177949139a..3fa0f61610f 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalLoadBalancerAtomicOperationUnitSpec.groovy @@ -60,7 +60,7 @@ class DeleteGoogleInternalLoadBalancerAtomicOperationUnitSpec extends Specificat def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should delete an Internal Load Balancer with http health check"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleLoadBalancerAtomicOperationUnitSpec.groovy index 8d3bb5a5136..691f86221ed 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleLoadBalancerAtomicOperationUnitSpec.groovy @@ -61,7 +61,7 @@ class DeleteGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should delete a Network Load Balancer with health checks"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleSslLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleSslLoadBalancerAtomicOperationUnitSpec.groovy index fdd1b35c729..b0e20ac92ae 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleSslLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleSslLoadBalancerAtomicOperationUnitSpec.groovy @@ -60,7 +60,7 @@ class DeleteGoogleSslLoadBalancerAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should delete ssl load balancer"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleTcpLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleTcpLoadBalancerAtomicOperationUnitSpec.groovy index 2cd17d0e307..77b1bcf5de7 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleTcpLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleTcpLoadBalancerAtomicOperationUnitSpec.groovy @@ -60,7 +60,7 @@ class DeleteGoogleTcpLoadBalancerAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should delete tcp load balancer"() { diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec.groovy index b8266edfed8..b84f2a1e2d6 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec.groovy @@ -31,6 +31,8 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -62,20 +64,19 @@ class UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec extends Specification "healthyThreshold" : 1, "unhealthyThreshold": 1 ] - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should create an HTTP Load Balancer with host rule, path matcher, path rules, etc with no existing infrastructure"() { setup: def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -236,13 +237,12 @@ class UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec extends Specification setup: def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -373,13 +373,12 @@ class UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec extends Specification setup: def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -510,13 +509,12 @@ class UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec extends Specification setup: def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -681,13 +679,12 @@ class UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec extends Specification setup: def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -861,13 +858,12 @@ class UpsertGoogleHttpLoadBalancerAtomicOperationUnitSpec extends Specification setup: def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy new file mode 100644 index 00000000000..784bae8e999 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy @@ -0,0 +1,1100 @@ +/* + * Copyright 2014 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer + +import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.compute.Compute +import com.google.api.services.compute.model.* +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry +import com.netflix.spinnaker.clouddriver.google.deploy.converters.UpsertGoogleLoadBalancerAtomicOperationConverter +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck +import com.netflix.spinnaker.clouddriver.google.model.GoogleNetwork +import com.netflix.spinnaker.clouddriver.google.model.GoogleSubnet +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSubnetProvider +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +import static com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleHttpLoadBalancerTestConstants.* + +class UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec extends Specification { + private static final PROJECT_NAME = "my-project" + private static final HEALTH_CHECK_OP_NAME = "health-check-op" + private static final BACKEND_SERVICE_OP_NAME = "backend-service-op" + private static final URL_MAP_OP_NAME = "url-map-op" + private static final TARGET_HTTP_PROXY_OP_NAME = "target-http-proxy-op" + private static final DONE = "DONE" + private static final REGION = "us-central1" + + @Shared GoogleHealthCheck hc + @Shared def threadSleeperMock = Mock(GoogleOperationPoller.ThreadSleeper) + @Shared def registry = new DefaultRegistry() + @Shared SafeRetry safeRetry + + def setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + hc = [ + "name" : "basic-check", + "requestPath" : "/", + "healthCheckType" : "HTTP", + "port" : 80, + "checkIntervalSec" : 1, + "timeoutSec" : 1, + "healthyThreshold" : 1, + "unhealthyThreshold": 1 + ] + safeRetry = SafeRetry.withoutDelay() + } + + void "should create an Internal HTTP Load Balancer with host rule, path matcher, path rules, etc with no existing infrastructure"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + credentialsRepository: credentialsRepo + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: []) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "network" : "some-network", + "subnet" : "some-subnet", + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : [ + [ + "hostPatterns": [ + "host1.com", + "host2.com" + ], + "pathMatcher" : [ + "pathRules" : [ + [ + "paths" : [ + "/path", + "/path2/more" + ], + "backendService": [ + "name" : PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ], + "defaultService": [ + "name" : DEFAULT_PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ] + ] + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 4 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 3 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 3 * backendServicesInsert.execute() >> backendServicesInsertOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksInsertOp + 3 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 3 * backendServiceOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should create an Internal HTTP Load Balancer with minimal description"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + credentialsRepository: credentialsRepo + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: []) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : null, + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 1 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 1 * backendServicesInsert.execute() >> backendServicesInsertOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 5 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 1 * backendServiceOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should create an Internal HTTPS Load Balancer when certificate specified"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + credentialsRepository: credentialsRepo + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpsProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: []) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpsProxies = Mock(Compute.RegionTargetHttpsProxies) + def targetHttpsProxiesInsert = Mock(Compute.RegionTargetHttpsProxies.Insert) + def targetHttpsProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "certificate" : "my-cert", + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "hostRules" : null, + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 1 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 1 * backendServicesInsert.execute() >> backendServicesInsertOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpsProxies() >> targetHttpsProxies + 1 * targetHttpsProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpsProxiesInsert + 1 * targetHttpsProxiesInsert.execute() >> targetHttpsProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 5 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 1 * backendServiceOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpsProxyOperationGet + 1 * targetHttpsProxyOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should update health check when it exists and needs updated"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + credentialsRepository: credentialsRepo + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + def healthChecksUpdateOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: []) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : [ + [ + "hostPatterns": [ + "host1.com", + "host2.com" + ], + "pathMatcher" : [ + "pathRules" : [ + [ + "paths" : [ + "/path", + "/path2/more" + ], + "backendService": [ + "name" : PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ], + "defaultService": [ + "name" : DEFAULT_PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ] + ], + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 4 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 3 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 3 * backendServicesInsert.execute() >> backendServicesInsertOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksUpdateOp + 3 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 3 * backendServiceOperationGet.execute() >> backendServicesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> urlMapsInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> targetHttpProxiesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should update backend service if it exists and needs updated"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + credentialsRepository: credentialsRepo + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + def healthChecksUpdateOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: [new BackendService(name: PM_SERVICE, sessionAffinity: 'NONE')]) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + def backendServicesUpdate = Mock(Compute.RegionBackendServices.Update) + def backendServicesUpdateOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME + "update", + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : [ + [ + "hostPatterns": [ + "host1.com", + "host2.com" + ], + "pathMatcher" : [ + "pathRules" : [ + [ + "paths" : [ + "/path", + "/path2/more" + ], + "backendService": [ + "name" : PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ], + "defaultService": [ + "name" : DEFAULT_PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ] + ], + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 4 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 2 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 2 * backendServicesInsert.execute() >> backendServicesInsertOp + 1 * backendServices.update(PROJECT_NAME, REGION, PM_SERVICE, _) >> backendServicesUpdate + 1 * backendServicesUpdate.execute() >> backendServicesUpdateOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksUpdateOp + 2 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 2 * backendServiceOperationGet.execute() >> backendServicesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME + "update") >> backendServiceOperationGet + 1 * backendServiceOperationGet.execute() >> backendServicesUpdateOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> urlMapsInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> targetHttpProxiesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should update url map if it exists and needs updated"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME,).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + credentialsRepository: credentialsRepo + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + def healthChecksUpdateOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: [new BackendService(name: PM_SERVICE, sessionAffinity: 'NONE')]) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + def backendServicesUpdate = Mock(Compute.RegionBackendServices.Update) + def backendServicesUpdateOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME + "update", + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapReal = new UrlMap(name: LOAD_BALANCER_NAME) + def urlMapsUpdate = Mock(Compute.RegionUrlMaps.Update) + def urlMapsUpdateOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : [ + [ + "hostPatterns": [ + "host1.com", + "host2.com" + ], + "pathMatcher" : [ + "pathRules" : [ + [ + "paths" : [ + "/path", + "/path2/more" + ], + "backendService": [ + "name" : PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ], + "defaultService": [ + "name" : DEFAULT_PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ] + ], + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 4 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 2 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 2 * backendServicesInsert.execute() >> backendServicesInsertOp + 1 * backendServices.update(PROJECT_NAME, REGION, PM_SERVICE, _) >> backendServicesUpdate + 1 * backendServicesUpdate.execute() >> backendServicesUpdateOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> urlMapReal + 1 * urlMaps.update(PROJECT_NAME, REGION, LOAD_BALANCER_NAME, _) >> urlMapsUpdate + 1 * urlMapsUpdate.execute() >> urlMapsUpdateOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsUpdateOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksUpdateOp + 2 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 2 * backendServiceOperationGet.execute() >> backendServicesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME + "update") >> backendServiceOperationGet + 1 * backendServiceOperationGet.execute() >> backendServicesUpdateOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> urlMapsUpdateOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> targetHttpProxiesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalLoadBalancerAtomicOperationUnitSpec.groovy index e7beef2c0b9..d89bb03d924 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalLoadBalancerAtomicOperationUnitSpec.groovy @@ -35,6 +35,8 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -65,20 +67,18 @@ class UpsertGoogleInternalLoadBalancerAtomicOperationUnitSpec extends Specificat "healthyThreshold" : 1, "unhealthyThreshold": 1 ] - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should create Internal load balancer if no infrastructure present."() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -199,14 +199,12 @@ class UpsertGoogleInternalLoadBalancerAtomicOperationUnitSpec extends Specificat void "should update backend service if it exists."() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -327,14 +325,12 @@ class UpsertGoogleInternalLoadBalancerAtomicOperationUnitSpec extends Specificat void "should update health check if it exists."() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleLoadBalancerAtomicOperationUnitSpec.groovy index 722e6ec7163..a89d324f363 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleLoadBalancerAtomicOperationUnitSpec.groovy @@ -38,6 +38,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleSessionAffinity import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import spock.lang.Shared import spock.lang.Specification @@ -55,11 +56,11 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { private static final INSTANCE_2 = "instance-2" private static final INSTANCE_3 = "instance-3" private static final INSTANCE_1_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-a/instances/$INSTANCE_1" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-a/instances/$INSTANCE_1" private static final INSTANCE_2_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-a/instances/$INSTANCE_2" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-a/instances/$INSTANCE_2" private static final INSTANCE_3_URL = - "https://www.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-a/instances/$INSTANCE_3" + "https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/us-central1-a/instances/$INSTANCE_3" private static final IP_PROTOCOL_TCP = "TCP" private static final IP_PROTOCOL_UDP = "UDP" private static final IP_ADDRESS = "1.1.1.1" @@ -81,7 +82,7 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { def setupSpec() { TaskRepository.threadLocalTask.set(Mock(Task)) - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should create a network load balancer with health checks"() { @@ -418,6 +419,206 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { 1 * regionForwardingRuleOperationGet.execute() >> forwardingRuleInsertOp } + void "should create a network load balancer with the specified session affinity if it is provided"() { + setup: + def computeMock = Mock(Compute) + def regionOperations = Mock(Compute.RegionOperations) + def regionTargetPoolOperationGet = Mock(Compute.RegionOperations.Get) + def targetPools = Mock(Compute.TargetPools) + def targetPoolsInsert = Mock(Compute.TargetPools.Insert) + def targetPoolsInsertOp = new Operation( + targetLink: "target-pool", + name: TARGET_POOL_OP_NAME, + status: DONE) + def regions = Mock(Compute.Regions) + def regionsList = Mock(Compute.Regions.List) + def regionsListReal = new RegionList( + items: [new Region(name: REGION_US), new Region(name: REGION_ASIA), new Region(name: REGION_EUROPE)]) + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def regionForwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new UpsertGoogleLoadBalancerDescription( + loadBalancerName: LOAD_BALANCER_NAME, + region: REGION_US, + accountName: ACCOUNT_NAME, + credentials: credentials, + sessionAffinity: GoogleSessionAffinity.CLIENT_IP + ) + @Subject def operation = new UpsertGoogleLoadBalancerAtomicOperation(description) + operation.registry = registry + operation.safeRetry = safeRetry + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + when: + operation.operate([]) + + then: + 1 * computeMock.regions() >> regions + 1 * regions.list(PROJECT_NAME) >> regionsList + 1 * regionsList.execute() >> regionsListReal + 3 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.get(PROJECT_NAME, REGION_US, LOAD_BALANCER_NAME) >> + { throw GoogleJsonResponseExceptionFactoryTesting.newMock(new MockJsonFactory(), 404, "not found") } + 1 * forwardingRules.get(PROJECT_NAME, REGION_ASIA, LOAD_BALANCER_NAME) >> + { throw GoogleJsonResponseExceptionFactoryTesting.newMock(new MockJsonFactory(), 404, "not found") } + 1 * forwardingRules.get(PROJECT_NAME, REGION_EUROPE, LOAD_BALANCER_NAME) >> + { throw GoogleJsonResponseExceptionFactoryTesting.newMock(new MockJsonFactory(), 404, "not found") } + 0 * computeMock.httpHealthChecks() + 1 * computeMock.targetPools() >> targetPools + 1 * targetPools.insert(PROJECT_NAME, REGION_US, { it.getSessionAffinity() == GoogleSessionAffinity.CLIENT_IP.toString() }) >> targetPoolsInsert + 1 * targetPoolsInsert.execute() >> targetPoolsInsertOp + 1 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION_US, {it.IPAddress == null && it.portRange == Constants.DEFAULT_PORT_RANGE}) >> forwardingRulesInsert + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + + 2 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION_US, TARGET_POOL_OP_NAME) >> regionTargetPoolOperationGet + 1 * regionTargetPoolOperationGet.execute() >> targetPoolsInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION_US, LOAD_BALANCER_NAME) >> regionForwardingRuleOperationGet + 1 * regionForwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should create a network load balancer with the no session affinity if session affinity values was not provided"() { + setup: + def computeMock = Mock(Compute) + def regionOperations = Mock(Compute.RegionOperations) + def regionTargetPoolOperationGet = Mock(Compute.RegionOperations.Get) + def targetPools = Mock(Compute.TargetPools) + def targetPoolsInsert = Mock(Compute.TargetPools.Insert) + def targetPoolsInsertOp = new Operation( + targetLink: "target-pool", + name: TARGET_POOL_OP_NAME, + status: DONE) + def regions = Mock(Compute.Regions) + def regionsList = Mock(Compute.Regions.List) + def regionsListReal = new RegionList( + items: [new Region(name: REGION_US), new Region(name: REGION_ASIA), new Region(name: REGION_EUROPE)]) + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def regionForwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new UpsertGoogleLoadBalancerDescription( + loadBalancerName: LOAD_BALANCER_NAME, + region: REGION_US, + accountName: ACCOUNT_NAME, + credentials: credentials + ) + @Subject def operation = new UpsertGoogleLoadBalancerAtomicOperation(description) + operation.registry = registry + operation.safeRetry = safeRetry + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + when: + operation.operate([]) + + then: + 1 * computeMock.regions() >> regions + 1 * regions.list(PROJECT_NAME) >> regionsList + 1 * regionsList.execute() >> regionsListReal + 3 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.get(PROJECT_NAME, REGION_US, LOAD_BALANCER_NAME) >> + { throw GoogleJsonResponseExceptionFactoryTesting.newMock(new MockJsonFactory(), 404, "not found") } + 1 * forwardingRules.get(PROJECT_NAME, REGION_ASIA, LOAD_BALANCER_NAME) >> + { throw GoogleJsonResponseExceptionFactoryTesting.newMock(new MockJsonFactory(), 404, "not found") } + 1 * forwardingRules.get(PROJECT_NAME, REGION_EUROPE, LOAD_BALANCER_NAME) >> + { throw GoogleJsonResponseExceptionFactoryTesting.newMock(new MockJsonFactory(), 404, "not found") } + 0 * computeMock.httpHealthChecks() + 1 * computeMock.targetPools() >> targetPools + 1 * targetPools.insert(PROJECT_NAME, REGION_US, { it.getSessionAffinity() == null }) >> targetPoolsInsert + 1 * targetPoolsInsert.execute() >> targetPoolsInsertOp + 1 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION_US, {it.IPAddress == null && it.portRange == Constants.DEFAULT_PORT_RANGE}) >> forwardingRulesInsert + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + + 2 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION_US, TARGET_POOL_OP_NAME) >> regionTargetPoolOperationGet + 1 * regionTargetPoolOperationGet.execute() >> targetPoolsInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION_US, LOAD_BALANCER_NAME) >> regionForwardingRuleOperationGet + 1 * regionForwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should throw an exception if changing session affinity for already existing target pool with instances"() { + setup: + def computeMock = Mock(Compute) + def regions = Mock(Compute.Regions) + def regionsList = Mock(Compute.Regions.List) + def regionsListReal = new RegionList( + items: [new Region(name: REGION_US), new Region(name: REGION_ASIA), new Region(name: REGION_EUROPE)]) + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleReal = new ForwardingRule( + name: LOAD_BALANCER_NAME, + region: REGION_US, + target: TARGET_POOL_NAME, + IPProtocol: Constants.DEFAULT_IP_PROTOCOL, + portRange: Constants.DEFAULT_PORT_RANGE) + def targetPools = Mock(Compute.TargetPools) + def targetPoolsList = Mock(Compute.TargetPools.List) + def targetPoolsListReal = new TargetPoolList(items: [ + new TargetPool( + name: TARGET_POOL_NAME, + sessionAffinity: GoogleSessionAffinity.NONE, + instances: ["instance1", "instance2", "instance3"] + ) + ]) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new UpsertGoogleLoadBalancerDescription( + loadBalancerName: LOAD_BALANCER_NAME, + region: REGION_US, + healthCheck: [:], + accountName: ACCOUNT_NAME, + sessionAffinity: GoogleSessionAffinity.CLIENT_IP, + credentials: credentials) + @Subject def operation = new UpsertGoogleLoadBalancerAtomicOperation(description) + operation.registry = registry + operation.safeRetry = safeRetry + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + when: + operation.operate([]) + + then: + // Query existing forwarding rules. + 1 * computeMock.regions() >> regions + 1 * regions.list(PROJECT_NAME) >> regionsList + 1 * regionsList.execute() >> regionsListReal + 1 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.get(PROJECT_NAME, REGION_US, LOAD_BALANCER_NAME) >> forwardingRulesGet + 1 * forwardingRulesGet.execute() >> forwardingRuleReal + + // Query existing target pools. + 1 * computeMock.targetPools() >> targetPools + 1 * targetPools.list(PROJECT_NAME, REGION_US) >> targetPoolsList + 1 * targetPoolsList.execute() >> targetPoolsListReal + } + void "should neither create anything new, nor edit anything existing, if a forwarding rule with the same name already exists in the same region"() { setup: def computeMock = Mock(Compute) @@ -437,7 +638,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { def targetPoolsList = Mock(Compute.TargetPools.List) def targetPoolsListReal = new TargetPoolList(items: [ new TargetPool( - name: TARGET_POOL_NAME + name: TARGET_POOL_NAME, + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() @@ -532,7 +734,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { new TargetPoolList(items: [ new TargetPool( name: TARGET_POOL_NAME, - healthChecks: [HEALTH_CHECK_NAME] + healthChecks: [HEALTH_CHECK_NAME], + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def httpHealthChecksList = Mock(Compute.HttpHealthChecks.List) @@ -617,7 +820,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { def targetPoolsListReal = new TargetPoolList(items: [ new TargetPool( name: TARGET_POOL_NAME, - healthChecks: [HEALTH_CHECK_NAME] + healthChecks: [HEALTH_CHECK_NAME], + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def httpHealthChecks = Mock(Compute.HttpHealthChecks) @@ -694,7 +898,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { def targetPoolsListReal = new TargetPoolList(items: [ new TargetPool( name: TARGET_POOL_NAME, - healthChecks: [HEALTH_CHECK_NAME] + healthChecks: [HEALTH_CHECK_NAME], + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def regionForwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) @@ -810,7 +1015,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { def targetPoolsList = Mock(Compute.TargetPools.List) def targetPoolsListReal = new TargetPoolList(items: [ new TargetPool( - name: TARGET_POOL_NAME + name: TARGET_POOL_NAME, + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def globalOperations = Mock(Compute.GlobalOperations) @@ -907,7 +1113,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { def targetPoolsListReal = new TargetPoolList(items: [ new TargetPool( name: TARGET_POOL_NAME, - healthChecks: [HEALTH_CHECK_NAME] + healthChecks: [HEALTH_CHECK_NAME], + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def httpHealthChecksList = Mock(Compute.HttpHealthChecks.List) @@ -1018,7 +1225,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { new TargetPool( name: TARGET_POOL_NAME, instances: [INSTANCE_2_URL], - healthChecks: [HEALTH_CHECK_NAME] + healthChecks: [HEALTH_CHECK_NAME], + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def httpHealthChecksList = Mock(Compute.HttpHealthChecks.List) @@ -1077,7 +1285,7 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { // Add missing instances to target pool. 1 * computeMock.targetPools() >> targetPools 1 * targetPools.addInstance(PROJECT_NAME, REGION_US, TARGET_POOL_NAME, - {it.instances.size == 2 && + {it.instances.size() == 2 && it.instances[0].instance == INSTANCE_1_URL && it.instances[1].instance == INSTANCE_3_URL}) >> targetPoolsAddInstance 1 * targetPoolsAddInstance.execute() @@ -1115,7 +1323,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { new TargetPool( name: TARGET_POOL_NAME, instances: [INSTANCE_1_URL, INSTANCE_2_URL, INSTANCE_3_URL], - healthChecks: [HEALTH_CHECK_NAME] + healthChecks: [HEALTH_CHECK_NAME], + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def httpHealthChecksList = Mock(Compute.HttpHealthChecks.List) @@ -1174,7 +1383,7 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { // Remove extraneous instances from target pool. 1 * computeMock.targetPools() >> targetPools 1 * targetPools.removeInstance(PROJECT_NAME, REGION_US, TARGET_POOL_NAME, - {it.instances.size == 2 && + {it.instances.size() == 2 && it.instances[0].instance == INSTANCE_1_URL && it.instances[1].instance == INSTANCE_2_URL}) >> targetPoolsRemoveInstance 1 * targetPoolsRemoveInstance.execute() @@ -1212,7 +1421,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { new TargetPool( name: TARGET_POOL_NAME, instances: [INSTANCE_1_URL, INSTANCE_2_URL], - healthChecks: [HEALTH_CHECK_NAME] + healthChecks: [HEALTH_CHECK_NAME], + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def httpHealthChecksList = Mock(Compute.HttpHealthChecks.List) @@ -1272,13 +1482,13 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { // Add missing instances to target pool. 1 * computeMock.targetPools() >> targetPools 1 * targetPools.addInstance(PROJECT_NAME, REGION_US, TARGET_POOL_NAME, - {it.instances.size == 1 && it.instances[0].instance == INSTANCE_3_URL}) >> targetPoolsAddInstance + {it.instances.size() == 1 && it.instances[0].instance == INSTANCE_3_URL}) >> targetPoolsAddInstance 1 * targetPoolsAddInstance.execute() // Remove extraneous instances from target pool. 1 * computeMock.targetPools() >> targetPools 1 * targetPools.removeInstance(PROJECT_NAME, REGION_US, TARGET_POOL_NAME, - {it.instances.size == 1 && it.instances[0].instance == INSTANCE_1_URL}) >> targetPoolsRemoveInstance + {it.instances.size() == 1 && it.instances[0].instance == INSTANCE_1_URL}) >> targetPoolsRemoveInstance 1 * targetPoolsRemoveInstance.execute() } @@ -1314,7 +1524,8 @@ class UpsertGoogleLoadBalancerAtomicOperationUnitSpec extends Specification { new TargetPool( name: TARGET_POOL_NAME, instances: [INSTANCE_1_URL, INSTANCE_2_URL, INSTANCE_3_URL], - healthChecks: [HEALTH_CHECK_NAME] + healthChecks: [HEALTH_CHECK_NAME], + sessionAffinity: GoogleSessionAffinity.NONE ) ]) def httpHealthChecksList = Mock(Compute.HttpHealthChecks.List) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleSslLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleSslLoadBalancerAtomicOperationUnitSpec.groovy index 4a006df25ea..cf0dea0b2e1 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleSslLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleSslLoadBalancerAtomicOperationUnitSpec.groovy @@ -34,6 +34,8 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -66,20 +68,18 @@ class UpsertGoogleSslLoadBalancerAtomicOperationUnitSpec extends Specification { "healthyThreshold" : 1, "unhealthyThreshold": 1 ] - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should create ssl load balancer if no infrastructure present."() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -186,14 +186,12 @@ class UpsertGoogleSslLoadBalancerAtomicOperationUnitSpec extends Specification { void "should update backend service if it exists"() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -300,14 +298,12 @@ class UpsertGoogleSslLoadBalancerAtomicOperationUnitSpec extends Specification { void "should update health check if it exists"() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleTcpLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleTcpLoadBalancerAtomicOperationUnitSpec.groovy index 54916799f6e..608db3d1a8f 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleTcpLoadBalancerAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleTcpLoadBalancerAtomicOperationUnitSpec.groovy @@ -34,6 +34,8 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -65,20 +67,18 @@ class UpsertGoogleTcpLoadBalancerAtomicOperationUnitSpec extends Specification { "healthyThreshold" : 1, "unhealthyThreshold": 1 ] - safeRetry = new SafeRetry(maxRetries: 10, maxWaitInterval: 60000, retryIntervalBase: 0, jitterMultiplier: 0) + safeRetry = SafeRetry.withoutDelay() } void "should create tcp load balancer if no infrastructure present."() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -184,14 +184,12 @@ class UpsertGoogleTcpLoadBalancerAtomicOperationUnitSpec extends Specification { void "should update backend service if it exists"() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) @@ -297,14 +295,12 @@ class UpsertGoogleTcpLoadBalancerAtomicOperationUnitSpec extends Specification { void "should update health check if it exists"() { def computeMock = Mock(Compute) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() - - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepo.save(credentials) def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) def globalOperations = Mock(Compute.GlobalOperations) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbandonAndDecrementGoogleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbandonAndDecrementGoogleServerGroupDescriptionValidatorSpec.groovy index 06deaf3d7e9..ee8e0f59812 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbandonAndDecrementGoogleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/AbandonAndDecrementGoogleServerGroupDescriptionValidatorSpec.groovy @@ -16,13 +16,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.AbandonAndDecrementGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.google.security.TestDefaults import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -40,16 +42,16 @@ class AbandonAndDecrementGoogleServerGroupDescriptionValidatorSpec extends Speci void setupSpec() { validator = new AbandonAndDecrementGoogleServerGroupDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) credentials = new GoogleNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) .credentials(new FakeGoogleCredentials()) .regionToZonesMap(REGION_TO_ZONES) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -60,7 +62,7 @@ class AbandonAndDecrementGoogleServerGroupDescriptionValidatorSpec extends Speci instanceIds: INSTANCE_IDS, accountName: ACCOUNT_NAME, credentials: credentials) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -72,7 +74,7 @@ class AbandonAndDecrementGoogleServerGroupDescriptionValidatorSpec extends Speci void "invalid instanceIds fail validation"() { setup: def description = new AbandonAndDecrementGoogleServerGroupDescription(instanceIds: [""], serverGroupName: SERVER_GROUP_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -84,7 +86,7 @@ class AbandonAndDecrementGoogleServerGroupDescriptionValidatorSpec extends Speci void "null input fails validation"() { setup: def description = new AbandonAndDecrementGoogleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/BasicGoogleDeployDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/BasicGoogleDeployDescriptionValidatorSpec.groovy index 2ea91274e5a..c8f13c3f1ed 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/BasicGoogleDeployDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/BasicGoogleDeployDescriptionValidatorSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleDisk @@ -24,7 +25,8 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -63,11 +65,11 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void setupSpec() { def googleDeployDefaults = new GoogleConfiguration.DeployDefaults(instanceTypeDisks: [INSTANCE_TYPE_DISK]) validator = new BasicGoogleDeployDescriptionValidator(googleDeployDefaults: googleDeployDefaults) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } @Unroll @@ -83,7 +85,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { region: region, zone: zone, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -108,7 +110,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { instanceType: INSTANCE_TYPE, zone: ZONE, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -128,7 +130,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { zone: ZONE, tags: TAGS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -148,7 +150,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { zone: ZONE, tags: TAGS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -169,7 +171,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { regional: true, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -190,7 +192,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "invalid targetSize fails validation"() { setup: def description = new BasicGoogleDeployDescription(targetSize: -1) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -203,15 +205,21 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "invalid capacity (min: #min, max: #max, desired: #desired) fails validation"() { setup: def description = new BasicGoogleDeployDescription(capacity: [min: min, max: max, desired: desired]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) + def matchingCalls = 0 when: validator.validate([], description, errors) then: - numErrors * errors.rejectValue( - { return it.startsWith("capacity.") }, - { return it.startsWith("basicGoogleDeployDescription.capacity") }) + errors.rejectValue(_,_) >> { arguments -> + String field = arguments.get(0) + String errorCode = arguments.get(1) + if (field.startsWith("capacity.") && errorCode.startsWith("basicGoogleDeployDescription.capacity")) { + matchingCalls += 1 + } + } + numErrors == matchingCalls where: min | max | desired | numErrors @@ -227,7 +235,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "invalid disk sizeGb fails validation"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], new BasicGoogleDeployDescription(disks: [DISK_NO_SIZE]), errors) @@ -268,7 +276,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "missing disk type fails validation"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], new BasicGoogleDeployDescription(disks: [DISK_NO_TYPE]), errors) @@ -279,7 +287,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "invalid number of persistent disks fails validation"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], new BasicGoogleDeployDescription(disks: [DISK_LOCAL_SSD]), errors) @@ -300,7 +308,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "source image specified directly on boot persistent disk fails validation"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], new BasicGoogleDeployDescription(disks: [DISK_PD_STANDARD_WITH_SOURCE_IMAGE]), errors) @@ -329,7 +337,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "missing source image on non-boot persistent disk fails validation"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], new BasicGoogleDeployDescription(disks: [DISK_PD_STANDARD, DISK_PD_STANDARD_2]), errors) @@ -364,7 +372,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "invalid local ssd settings fails validation"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], new BasicGoogleDeployDescription(disks: [DISK_LOCAL_SSD_NO_AUTO_DELETE]), errors) @@ -387,7 +395,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new BasicGoogleDeployDescription(regional: regional) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -411,7 +419,7 @@ class BasicGoogleDeployDescriptionValidatorSpec extends Specification { void "nonsensical autoscaling policy min, max or cooldown fails validation"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], new BasicGoogleDeployDescription(autoscalingPolicy: [minNumReplicas: -5]), errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CopyLastGoogleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CopyLastGoogleServerGroupDescriptionValidatorSpec.groovy index a2fa8a574a2..9691fb1a09b 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CopyLastGoogleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CopyLastGoogleServerGroupDescriptionValidatorSpec.groovy @@ -16,13 +16,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -43,11 +45,11 @@ class CopyLastGoogleServerGroupDescriptionValidatorSpec extends Specification { void setupSpec() { def googleDeployDefaults = new GoogleConfiguration.DeployDefaults() validator = new CopyLastGoogleServerGroupDescriptionValidator(googleDeployDefaults: googleDeployDefaults) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with minimal description inputs"() { @@ -55,7 +57,7 @@ class CopyLastGoogleServerGroupDescriptionValidatorSpec extends Specification { def description = new BasicGoogleDeployDescription(source: [region: REGION, serverGroupName: ANCESTOR_SERVER_GROUP_NAME], accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -75,7 +77,7 @@ class CopyLastGoogleServerGroupDescriptionValidatorSpec extends Specification { source: [region: REGION, serverGroupName: ANCESTOR_SERVER_GROUP_NAME], accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -87,7 +89,7 @@ class CopyLastGoogleServerGroupDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new BasicGoogleDeployDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CreateGoogleInstanceDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CreateGoogleInstanceDescriptionValidatorSpec.groovy index 3fe2baed8f7..8d8515332ac 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CreateGoogleInstanceDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/CreateGoogleInstanceDescriptionValidatorSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.deploy.description.CreateGoogleInstanceDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleDisk @@ -23,7 +24,8 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -42,11 +44,11 @@ class CreateGoogleInstanceDescriptionValidatorSpec extends Specification { void setupSpec() { def googleDeployDefaults = new GoogleConfiguration.DeployDefaults() validator = new CreateGoogleInstanceDescriptionValidator(googleDeployDefaults: googleDeployDefaults) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -57,7 +59,7 @@ class CreateGoogleInstanceDescriptionValidatorSpec extends Specification { disks: [DISK_PD_SSD, DISK_LOCAL_SSD], zone: ZONE, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -69,7 +71,7 @@ class CreateGoogleInstanceDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new CreateGoogleInstanceDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleAutoscalingPolicyDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleAutoscalingPolicyDescriptionValidatorSpec.groovy index 0b5c4053e7d..284fe16354d 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleAutoscalingPolicyDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleAutoscalingPolicyDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -35,11 +37,11 @@ class DeleteGoogleAutoscalingPolicyDescriptionValidatorSpec extends Specificatio void setupSpec() { validator = new DeleteGoogleAutoscalingPolicyDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -47,7 +49,7 @@ class DeleteGoogleAutoscalingPolicyDescriptionValidatorSpec extends Specificatio def description = new DeleteGoogleAutoscalingPolicyDescription(serverGroupName: SERVER_GROUP_NAME, accountName: ACCOUNT_NAME, region: REGION) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -59,7 +61,7 @@ class DeleteGoogleAutoscalingPolicyDescriptionValidatorSpec extends Specificatio void "null input fails validation"() { setup: def description = new DeleteGoogleAutoscalingPolicyDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleLoadBalancerDescriptionValidatorSpec.groovy index 648a0514acd..97b75a48a6b 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleLoadBalancerDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleLoadBalancerDescriptionValidatorSpec.groovy @@ -16,13 +16,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -37,11 +39,11 @@ class DeleteGoogleLoadBalancerDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new DeleteGoogleLoadBalancerDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with full description input"() { @@ -52,7 +54,7 @@ class DeleteGoogleLoadBalancerDescriptionValidatorSpec extends Specification { region: REGION, loadBalancerType: GoogleLoadBalancerType.NETWORK, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -68,7 +70,7 @@ class DeleteGoogleLoadBalancerDescriptionValidatorSpec extends Specification { region: REGION, loadBalancerType: GoogleLoadBalancerType.NETWORK, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -85,7 +87,7 @@ class DeleteGoogleLoadBalancerDescriptionValidatorSpec extends Specification { region: null, loadBalancerType: GoogleLoadBalancerType.NETWORK, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -97,7 +99,7 @@ class DeleteGoogleLoadBalancerDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new DeleteGoogleLoadBalancerDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleSecurityGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleSecurityGroupDescriptionValidatorSpec.groovy index 0c36874f49f..d18125c3138 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleSecurityGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeleteGoogleSecurityGroupDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleSecurityGroupDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -34,18 +36,18 @@ class DeleteGoogleSecurityGroupDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new DeleteGoogleSecurityGroupDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { setup: def description = new DeleteGoogleSecurityGroupDescription(securityGroupName: SECURITY_GROUP_NAME, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -57,7 +59,7 @@ class DeleteGoogleSecurityGroupDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new DeleteGoogleSecurityGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeregisterInstancesFromGoogleLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeregisterInstancesFromGoogleLoadBalancerDescriptionValidatorSpec.groovy index 7ef22fc7fe1..d3e4db6bada 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeregisterInstancesFromGoogleLoadBalancerDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DeregisterInstancesFromGoogleLoadBalancerDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.DeregisterInstancesFromGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -36,11 +38,11 @@ class DeregisterInstancesFromGoogleLoadBalancerDescriptionValidatorSpec extends void setupSpec() { validator = new DeregisterInstancesFromGoogleLoadBalancerDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -50,7 +52,7 @@ class DeregisterInstancesFromGoogleLoadBalancerDescriptionValidatorSpec extends instanceIds: INSTANCE_IDS, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -62,7 +64,7 @@ class DeregisterInstancesFromGoogleLoadBalancerDescriptionValidatorSpec extends void "null input fails validation"() { setup: def description = new DeregisterInstancesFromGoogleLoadBalancerDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DestroyGoogleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DestroyGoogleServerGroupDescriptionValidatorSpec.groovy index 067ca8f1d36..c4a43903d57 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DestroyGoogleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DestroyGoogleServerGroupDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.DestroyGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -35,11 +37,11 @@ class DestroyGoogleServerGroupDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new DestroyGoogleServerGroupDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -47,7 +49,7 @@ class DestroyGoogleServerGroupDescriptionValidatorSpec extends Specification { def description = new DestroyGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -59,7 +61,7 @@ class DestroyGoogleServerGroupDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new DestroyGoogleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DisableGoogleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DisableGoogleServerGroupDescriptionValidatorSpec.groovy index 821a19cb50a..05dcf935116 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DisableGoogleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/DisableGoogleServerGroupDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.EnableDisableGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -35,11 +37,11 @@ class DisableGoogleServerGroupDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new DisableGoogleServerGroupDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -47,7 +49,7 @@ class DisableGoogleServerGroupDescriptionValidatorSpec extends Specification { def description = new EnableDisableGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -59,7 +61,7 @@ class DisableGoogleServerGroupDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new EnableDisableGoogleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/EnableDisableGoogleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/EnableDisableGoogleServerGroupDescriptionValidatorSpec.groovy index 5aab7c7ab82..30416ae3e64 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/EnableDisableGoogleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/EnableDisableGoogleServerGroupDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.EnableDisableGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -35,11 +37,11 @@ class EnableDisableGoogleServerGroupDescriptionValidatorSpec extends Specificati void setupSpec() { validator = new AbstractEnableDisableGoogleServerGroupDescriptionValidator() {} - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -47,7 +49,7 @@ class EnableDisableGoogleServerGroupDescriptionValidatorSpec extends Specificati def description = new EnableDisableGoogleServerGroupDescription(serverGroupName: SERVER_GROUP_NAME, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -59,7 +61,7 @@ class EnableDisableGoogleServerGroupDescriptionValidatorSpec extends Specificati void "null input fails validation"() { setup: def description = new EnableDisableGoogleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ModifyGoogleServerGroupInstanceTemplateDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ModifyGoogleServerGroupInstanceTemplateDescriptionValidatorSpec.groovy index c420b7a8a67..a8ff3534fe3 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ModifyGoogleServerGroupInstanceTemplateDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ModifyGoogleServerGroupInstanceTemplateDescriptionValidatorSpec.groovy @@ -16,13 +16,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.config.GoogleConfiguration import com.netflix.spinnaker.clouddriver.google.deploy.description.ModifyGoogleServerGroupInstanceTemplateDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -45,11 +47,11 @@ class ModifyGoogleServerGroupInstanceTemplateDescriptionValidatorSpec extends Sp void setupSpec() { def googleDeployDefaults = new GoogleConfiguration.DeployDefaults() validator = new ModifyGoogleServerGroupInstanceTemplateDescriptionValidator(googleDeployDefaults: googleDeployDefaults) - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with minimum proper description inputs"() { @@ -57,7 +59,7 @@ class ModifyGoogleServerGroupInstanceTemplateDescriptionValidatorSpec extends Sp def description = new ModifyGoogleServerGroupInstanceTemplateDescription(serverGroupName: SERVER_GROUP_NAME, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -76,7 +78,7 @@ class ModifyGoogleServerGroupInstanceTemplateDescriptionValidatorSpec extends Sp tags: TAGS, network: NETWORK, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -88,7 +90,7 @@ class ModifyGoogleServerGroupInstanceTemplateDescriptionValidatorSpec extends Sp void "null input fails validation"() { setup: def description = new ModifyGoogleServerGroupInstanceTemplateDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RebootGoogleInstancesDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RebootGoogleInstancesDescriptionValidatorSpec.groovy index 78dc22675fe..91d75fac2f3 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RebootGoogleInstancesDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RebootGoogleInstancesDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.RebootGoogleInstancesDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -35,11 +37,11 @@ class RebootGoogleInstancesDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new RebootGoogleInstancesDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -47,7 +49,7 @@ class RebootGoogleInstancesDescriptionValidatorSpec extends Specification { def description = new RebootGoogleInstancesDescription(zone: ZONE, instanceIds: INSTANCE_IDS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -59,7 +61,7 @@ class RebootGoogleInstancesDescriptionValidatorSpec extends Specification { void "invalid instanceIds fail validation"() { setup: def description = new RebootGoogleInstancesDescription(instanceIds: [""]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -71,7 +73,7 @@ class RebootGoogleInstancesDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new RebootGoogleInstancesDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RegisterInstancesWithGoogleLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RegisterInstancesWithGoogleLoadBalancerDescriptionValidatorSpec.groovy index 55b0fb5e24e..fc49426114d 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RegisterInstancesWithGoogleLoadBalancerDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/RegisterInstancesWithGoogleLoadBalancerDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.RegisterInstancesWithGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -36,11 +38,11 @@ class RegisterInstancesWithGoogleLoadBalancerDescriptionValidatorSpec extends Sp void setupSpec() { validator = new RegisterInstancesWithGoogleLoadBalancerDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -50,7 +52,7 @@ class RegisterInstancesWithGoogleLoadBalancerDescriptionValidatorSpec extends Sp instanceIds: INSTANCE_IDS, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -62,7 +64,7 @@ class RegisterInstancesWithGoogleLoadBalancerDescriptionValidatorSpec extends Sp void "null input fails validation"() { setup: def description = new RegisterInstancesWithGoogleLoadBalancerDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ResizeGoogleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ResizeGoogleServerGroupDescriptionValidatorSpec.groovy index 7bd0443eec5..a559358f9cb 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ResizeGoogleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/ResizeGoogleServerGroupDescriptionValidatorSpec.groovy @@ -16,13 +16,15 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.ResizeGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -37,11 +39,11 @@ class ResizeGoogleServerGroupDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new ResizeGoogleServerGroupDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -50,7 +52,7 @@ class ResizeGoogleServerGroupDescriptionValidatorSpec extends Specification { targetSize: TARGET_SIZE, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -62,7 +64,7 @@ class ResizeGoogleServerGroupDescriptionValidatorSpec extends Specification { void "invalid targetSize fails validation"() { setup: def description = new ResizeGoogleServerGroupDescription(targetSize: -1) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -74,7 +76,7 @@ class ResizeGoogleServerGroupDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new ResizeGoogleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -92,7 +94,7 @@ class ResizeGoogleServerGroupDescriptionValidatorSpec extends Specification { region: REGION, accountName: ACCOUNT_NAME ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: // no description.capacity set. @@ -128,7 +130,7 @@ class ResizeGoogleServerGroupDescriptionValidatorSpec extends Specification { def description = new UpsertGoogleAutoscalingPolicyDescription(serverGroupName: SERVER_GROUP_NAME, region: REGION, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/SetStatefulDiskDescriptionValidatorTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/SetStatefulDiskDescriptionValidatorTest.java new file mode 100644 index 00000000000..f0312e322e5 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/SetStatefulDiskDescriptionValidatorTest.java @@ -0,0 +1,101 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.validators; + +import static org.assertj.core.api.Java6Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationErrors; +import com.netflix.spinnaker.clouddriver.google.deploy.description.SetStatefulDiskDescription; +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import org.assertj.core.api.Condition; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.springframework.validation.FieldError; +import org.springframework.validation.ObjectError; + +public class SetStatefulDiskDescriptionValidatorTest { + + private static final String ACCOUNT_NAME = "spintest"; + private static final String REGION = "us-central1"; + private GoogleNamedAccountCredentials CREDENTIALS = + new GoogleNamedAccountCredentials.Builder() + .name(ACCOUNT_NAME) + .credentials(new FakeGoogleCredentials()) + .regionToZonesMap(ImmutableMap.of(REGION, ImmutableList.of("us-central1-b"))) + .build(); + + private SetStatefulDiskDescriptionValidator validator; + + @BeforeEach + public void setUp() { + validator = new SetStatefulDiskDescriptionValidator(); + } + + @Test + public void testNoErrors() { + SetStatefulDiskDescription description = new SetStatefulDiskDescription(); + description.setCredentials(CREDENTIALS); + description.setRegion(REGION); + description.setServerGroupName("testapp-v000"); + description.setDeviceName("testapp-v000-1"); + + DescriptionValidationErrors errors = new DescriptionValidationErrors(description); + + validator.validate(ImmutableList.of(), description, errors); + + assertThat(errors.hasErrors()).isFalse(); + } + + @Test + public void testNoFields() { + SetStatefulDiskDescription description = new SetStatefulDiskDescription(); + + DescriptionValidationErrors errors = new DescriptionValidationErrors(description); + + validator.validate(ImmutableList.of(), description, errors); + + assertThat(errors.getAllErrors()).hasSize(3); + assertThat(errors.getAllErrors()).haveAtLeastOne(errorOnField("region")); + assertThat(errors.getAllErrors()).haveAtLeastOne(errorOnField("serverGroupName")); + assertThat(errors.getAllErrors()).haveAtLeastOne(errorOnField("deviceName")); + } + + @Test + public void testInvalidRegion() { + SetStatefulDiskDescription description = new SetStatefulDiskDescription(); + description.setCredentials(CREDENTIALS); + description.setRegion("some-unknown-region"); + description.setServerGroupName("testapp-v000"); + description.setDeviceName("testapp-v000-1"); + + DescriptionValidationErrors errors = new DescriptionValidationErrors(description); + + validator.validate(ImmutableList.of(), description, errors); + + assertThat(errors.getAllErrors()).hasSize(1); + assertThat(errors.getAllErrors()).haveAtLeastOne(errorOnField("region")); + } + + private Condition errorOnField(String field) { + return new Condition<>( + e -> e instanceof FieldError && ((FieldError) e).getField().equals(field), + "has field named " + field); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/StandardGceAttributeValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/StandardGceAttributeValidatorSpec.groovy index 2b98e19e3b0..c834d21ad39 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/StandardGceAttributeValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/StandardGceAttributeValidatorSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.BaseGoogleInstanceDescription import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoHealingPolicy @@ -25,8 +26,10 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import com.netflix.spinnaker.kork.artifacts.model.Artifact -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -80,14 +83,14 @@ class StandardGceAttributeValidatorSpec extends Specification { ] @Shared - DefaultAccountCredentialsProvider accountCredentialsProvider + CredentialsRepository credentialsRepository @Shared GoogleNamedAccountCredentials credentials void setupSpec() { - def credentialsRepo = new MapBackedAccountCredentialsRepository() - accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + credentialsRepository= new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) credentials = new GoogleNamedAccountCredentials.Builder() .name(ACCOUNT_NAME) @@ -95,12 +98,12 @@ class StandardGceAttributeValidatorSpec extends Specification { .locationToInstanceTypesMap(VCPU_MAX_BY_LOCATION) .regionToZonesMap(REGION_TO_ZONES) .build() - credentialsRepo.save(ACCOUNT_NAME, credentials) + credentialsRepository.save(credentials) } void "generic non-empty ok"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def label = "testAttribute" @@ -130,7 +133,7 @@ class StandardGceAttributeValidatorSpec extends Specification { @Unroll void "expect non-empty ok with numeric values"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def label = "testAttribute" @@ -145,7 +148,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "expect non-empty to fail with empty"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def label = "testAttribute" @@ -164,7 +167,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "nonNegativeInt ok if non-negative"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def label = "testAttribute" @@ -186,7 +189,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "nonNegativeInt invalid if negative"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def label = "testAttribute" @@ -199,7 +202,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid generic name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -216,7 +219,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid generic name"() { setup: def label = "label" - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -228,28 +231,28 @@ class StandardGceAttributeValidatorSpec extends Specification { void "validate simple account name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: - validator.validateCredentials(ACCOUNT_NAME, accountCredentialsProvider) + validator.validateCredentials(ACCOUNT_NAME, credentialsRepository) then: 0 * errors._ } void "empty account name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: - validator.validateCredentials(null, accountCredentialsProvider) + validator.validateCredentials(null, credentialsRepository) then: 1 * errors.rejectValue("credentials", "${DECORATOR}.credentials.empty") 0 * errors._ when: - validator.validateCredentials("", accountCredentialsProvider) + validator.validateCredentials("", credentialsRepository) then: 1 * errors.rejectValue("credentials", "${DECORATOR}.credentials.empty") 0 * errors._ @@ -257,11 +260,11 @@ class StandardGceAttributeValidatorSpec extends Specification { void "unknown account name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: - validator.validateCredentials("Unknown", accountCredentialsProvider) + validator.validateCredentials("Unknown", credentialsRepository) then: 1 * errors.rejectValue("credentials", "${DECORATOR}.credentials.invalid") @@ -270,7 +273,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid server group name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -286,7 +289,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid server group name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -298,7 +301,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid region name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -309,7 +312,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid region name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -333,7 +336,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid zone name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -344,7 +347,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid zone name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -368,7 +371,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid network name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -384,7 +387,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid network name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -396,7 +399,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid image name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -417,7 +420,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid image name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -435,7 +438,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid image artifact"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def artifact = Artifact.ArtifactBuilder.newInstance().type("gce/image").build() @@ -447,7 +450,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "missing image artifact"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -459,7 +462,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid image artifact type"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def artifact = Artifact.ArtifactBuilder.newInstance().type("github/file").build() @@ -472,7 +475,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid instance name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -488,7 +491,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid instance name"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -500,7 +503,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid instance type"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -516,7 +519,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid instance type"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -528,7 +531,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid custom instance type"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -545,11 +548,21 @@ class StandardGceAttributeValidatorSpec extends Specification { validator.validateInstanceType("custom-24-29696", REGION, credentials) then: 0 * errors._ + + when: + validator.validateInstanceType("e2-custom-24-29696", REGION, credentials) + then: + 0 * errors._ + + when: + validator.validateInstanceType("n2-custom-32-122880", REGION, credentials) + then: + 0 * errors._ } void "invalid custom instance type"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -558,17 +571,17 @@ class StandardGceAttributeValidatorSpec extends Specification { validator.validateInstanceType("custom--1234", ZONE, credentials) validator.validateInstanceType("custom-1-2345678", ZONE, credentials) then: - 4 * errors.rejectValue("instanceType", "${DECORATOR}.instanceType.invalid", "Custom instance string must match pattern /custom-\\d{1,2}-\\d{4,6}/.") + 4 * errors.rejectValue("instanceType", "${DECORATOR}.instanceType.invalid", "Custom instance string must match pattern /(.*)-?custom-(\\d{1,2})-(\\d{3,6})(-ext)?/.") when: - validator.validateInstanceType("custom-1-6912", ZONE, credentials) + validator.validateInstanceType("custom-1-8448", ZONE, credentials) then: - 1 * errors.rejectValue("instanceType", "${DECORATOR}.instanceType.invalid", "Memory per vCPU must be less than 6.5GB.") + 1 * errors.rejectValue("instanceType", "${DECORATOR}.instanceType.invalid", "Memory per vCPU must be less than 8GB.") when: - validator.validateInstanceType("custom-2-1024", ZONE, credentials) + validator.validateInstanceType("custom-2-768", ZONE, credentials) then: - 1 * errors.rejectValue("instanceType", "${DECORATOR}.instanceType.invalid", "Memory per vCPU must be greater than 0.9GB.") + 1 * errors.rejectValue("instanceType", "${DECORATOR}.instanceType.invalid", "Memory per vCPU must be greater than 0.5GB.") when: validator.validateInstanceType("custom-1-1000", ZONE, credentials) @@ -598,7 +611,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid name list"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -619,7 +632,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid name list"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -637,7 +650,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "mixed valid/invalid name list"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -656,7 +669,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid instance ids"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -677,7 +690,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid instance ids"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -695,7 +708,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "mixed valid/invalid instance ids"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -714,7 +727,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid in range exclusive"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def label = "testAttribute" @@ -728,7 +741,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid in range exclusive"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def label = "testAttribute" @@ -742,7 +755,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid basic scaling policy"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def scalingPolicy = new GoogleAutoscalingPolicy( minNumReplicas: 1, @@ -759,7 +772,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "valid complex scaling policy"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) def scalingPolicy = new GoogleAutoscalingPolicy( @@ -770,7 +783,8 @@ class StandardGceAttributeValidatorSpec extends Specification { loadBalancingUtilization: new GoogleAutoscalingPolicy.LoadBalancingUtilization(utilizationTarget: 0.7), customMetricUtilizations: [ new GoogleAutoscalingPolicy.CustomMetricUtilization(metric: "myMetric", utilizationTarget: 0.9, - utilizationTargetType: UtilizationTargetType.DELTA_PER_MINUTE) ]) + utilizationTargetType: UtilizationTargetType.DELTA_PER_MINUTE, + singleInstanceAssignment: 1.0) ]) when: validator.validateAutoscalingPolicy(scalingPolicy) @@ -781,7 +795,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid autoscaler min, max or cooldown"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -817,7 +831,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid autoscaler loadBalancingUtilization or cpuUtilization"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -841,7 +855,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid autoscaler customMetricUtilizations" () { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -865,10 +879,32 @@ class StandardGceAttributeValidatorSpec extends Specification { } + void "valid autoscaler customMetricUtilizations"(){ + setup: + def errors = Mock(ValidationErrors) + def validator = new StandardGceAttributeValidator(DECORATOR, errors) + + when: + validator.validateAutoscalingPolicy(new GoogleAutoscalingPolicy( + customMetricUtilizations: [ new GoogleAutoscalingPolicy.CustomMetricUtilization(utilizationTarget: 5, + metric: "myMetric", utilizationTargetType: UtilizationTargetType.DELTA_PER_MINUTE,singleInstanceAssignment: null) ])) + + then: + 0 * errors._ + + when: + validator.validateAutoscalingPolicy(new GoogleAutoscalingPolicy( + customMetricUtilizations: [ new GoogleAutoscalingPolicy.CustomMetricUtilization(utilizationTarget: null, + metric: "myMetric", utilizationTargetType: null, singleInstanceAssignment: 1) ])) + + then: + 0 * errors._ + } + @Unroll void "valid autoHealer maxUnavailable with fixed=#fixed and percent=#percent"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: @@ -893,7 +929,7 @@ class StandardGceAttributeValidatorSpec extends Specification { void "invalid autoHealer maxUnavailable"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardGceAttributeValidator(DECORATOR, errors) when: diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateAndDecrementGoogleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateAndDecrementGoogleServerGroupDescriptionValidatorSpec.groovy index fc950fcf14a..a7800dfdc5f 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateAndDecrementGoogleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateAndDecrementGoogleServerGroupDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.TerminateAndDecrementGoogleServerGroupDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -36,18 +38,18 @@ class TerminateAndDecrementGoogleServerGroupDescriptionValidatorSpec extends Spe void setupSpec() { validator = new TerminateAndDecrementGoogleServerGroupDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { setup: def description = new TerminateAndDecrementGoogleServerGroupDescription( region: REGION, serverGroupName: SERVER_GROUP_NAME, instanceIds: INSTANCE_IDS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -59,7 +61,7 @@ class TerminateAndDecrementGoogleServerGroupDescriptionValidatorSpec extends Spe void "invalid instanceIds fail validation"() { setup: def description = new TerminateAndDecrementGoogleServerGroupDescription(instanceIds: [""], serverGroupName: SERVER_GROUP_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -71,7 +73,7 @@ class TerminateAndDecrementGoogleServerGroupDescriptionValidatorSpec extends Spe void "null input fails validation"() { setup: def description = new TerminateAndDecrementGoogleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateGoogleInstancesDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateGoogleInstancesDescriptionValidatorSpec.groovy index d0ab8a20351..8fd96454ec1 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateGoogleInstancesDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/TerminateGoogleInstancesDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.TerminateGoogleInstancesDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -37,11 +39,11 @@ class TerminateGoogleInstancesDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new TerminateGoogleInstancesDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs without managed instance group"() { @@ -49,7 +51,7 @@ class TerminateGoogleInstancesDescriptionValidatorSpec extends Specification { def description = new TerminateGoogleInstancesDescription(zone: ZONE, instanceIds: INSTANCE_IDS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -64,7 +66,7 @@ class TerminateGoogleInstancesDescriptionValidatorSpec extends Specification { serverGroupName: MANAGED_INSTANCE_GROUP_NAME, instanceIds: INSTANCE_IDS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -78,7 +80,7 @@ class TerminateGoogleInstancesDescriptionValidatorSpec extends Specification { def description = new TerminateGoogleInstancesDescription(serverGroupName: MANAGED_INSTANCE_GROUP_NAME, instanceIds: INSTANCE_IDS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -90,7 +92,7 @@ class TerminateGoogleInstancesDescriptionValidatorSpec extends Specification { void "fail validation without managed instance group and no zone"() { setup: def description = new TerminateGoogleInstancesDescription(instanceIds: INSTANCE_IDS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -102,7 +104,7 @@ class TerminateGoogleInstancesDescriptionValidatorSpec extends Specification { void "invalid instanceIds fail validation"() { setup: def description = new TerminateGoogleInstancesDescription(instanceIds: [""]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -114,7 +116,7 @@ class TerminateGoogleInstancesDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new TerminateGoogleInstancesDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec.groovy index d887af87b57..506add1c28b 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleAutoscalingPolicyDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoHealingPolicy import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy @@ -24,7 +25,9 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -39,7 +42,8 @@ class UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec extends Specificatio private static final CUSTOM_METRIC_UTILIZATIONS = [new GoogleAutoscalingPolicy.CustomMetricUtilization( metric: METRIC, utilizationTargetType: UtilizationTargetType.DELTA_PER_MINUTE, - utilizationTarget: UTILIZATION_TARGET)] + utilizationTarget: UTILIZATION_TARGET, + singleInstanceAssignment: 1)] private static final MIN_NUM_REPLICAS = 1 private static final MAX_NUM_REPLICAS = 10 private static final COOL_DOWN_PERIOD_SEC = 60 @@ -61,11 +65,11 @@ class UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec extends Specificatio void setupSpec() { validator = new UpsertGoogleAutoscalingPolicyDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -76,7 +80,7 @@ class UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec extends Specificatio autoscalingPolicy: GOOGLE_SCALING_POLICY, autoHealingPolicy: GOOGLE_AUTOHEALING_POLICY, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -91,7 +95,7 @@ class UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec extends Specificatio region: REGION, serverGroupName: SERVER_GROUP_NAME, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -104,7 +108,7 @@ class UpsertGoogleAutoscalingPolicyDescriptionValidatorSpec extends Specificatio void "null input fails validation"() { setup: def description = new UpsertGoogleAutoscalingPolicyDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleImageTagsDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleImageTagsDescriptionValidatorSpec.groovy index c57f9277a56..b13baf6f38f 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleImageTagsDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleImageTagsDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleImageTagsDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -35,11 +37,11 @@ class UpsertGoogleImageTagsDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new UpsertGoogleImageTagsDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -47,7 +49,7 @@ class UpsertGoogleImageTagsDescriptionValidatorSpec extends Specification { def description = new UpsertGoogleImageTagsDescription(imageName: IMAGE_NAME, tags: TAGS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -61,7 +63,7 @@ class UpsertGoogleImageTagsDescriptionValidatorSpec extends Specification { def description = new UpsertGoogleImageTagsDescription(imageName: IMAGE_NAME, tags: [:], accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -75,7 +77,7 @@ class UpsertGoogleImageTagsDescriptionValidatorSpec extends Specification { def description = new UpsertGoogleImageTagsDescription(imageName: IMAGE_NAME, tags: TAGS + ['some-key-2': ''], accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -88,7 +90,7 @@ class UpsertGoogleImageTagsDescriptionValidatorSpec extends Specification { setup: def description = new UpsertGoogleImageTagsDescription(imageName: IMAGE_NAME, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -102,7 +104,7 @@ class UpsertGoogleImageTagsDescriptionValidatorSpec extends Specification { def description = new UpsertGoogleImageTagsDescription(imageName: IMAGE_NAME, tags: TAGS + ['': 'some-val-2'], accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -114,7 +116,7 @@ class UpsertGoogleImageTagsDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new UpsertGoogleImageTagsDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidatorSpec.groovy index 3592dd1fb69..c1de477e407 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidatorSpec.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.converters.UpsertGoogleLoadBalancerAtomicOperationConverter import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck @@ -25,7 +26,8 @@ import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll @@ -49,14 +51,13 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new UpsertGoogleLoadBalancerDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( - accountCredentialsProvider: credentialsProvider, - objectMapper: new ObjectMapper() + credentialsRepository: credentialsRepo ) hc = [ "name" : "basic-check", @@ -88,7 +89,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ipAddress: "1.1.1.1", ipProtocol: "TCP", portRange: "80-82") - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -105,7 +106,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { region: REGION, accountName: ACCOUNT_NAME, instances: [INSTANCE]) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -133,7 +134,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ipAddress: "1.1.1.1", ipProtocol: "ABC", portRange: "80-82") - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -145,7 +146,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new UpsertGoogleLoadBalancerDescription(loadBalancerType: GoogleLoadBalancerType.NETWORK) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -199,7 +200,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ] ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -224,7 +225,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { "hostRules" : null, ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -276,7 +277,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ] ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -329,7 +330,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ] ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -359,7 +360,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -379,7 +380,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { "backendService" : null, ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -403,7 +404,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -427,7 +428,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -457,7 +458,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -481,7 +482,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -504,7 +505,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -523,7 +524,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { "backendService" : null, ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -546,7 +547,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -569,7 +570,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -598,7 +599,7 @@ class UpsertGoogleLoadBalancerDescriptionValidatorSpec extends Specification { ], ] def description = converter.convertDescription(input) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleSecurityGroupDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleSecurityGroupDescriptionValidatorSpec.groovy index ea8c0f17657..b7ae33987a7 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleSecurityGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleSecurityGroupDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleSecurityGroupDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -41,11 +43,11 @@ class UpsertGoogleSecurityGroupDescriptionValidatorSpec extends Specification { void setupSpec() { validator = new UpsertGoogleSecurityGroupDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -65,7 +67,7 @@ class UpsertGoogleSecurityGroupDescriptionValidatorSpec extends Specification { targetTags: [TARGET_TAG], accountName: ACCOUNT_NAME ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -81,7 +83,7 @@ class UpsertGoogleSecurityGroupDescriptionValidatorSpec extends Specification { network: NETWORK_NAME, accountName: ACCOUNT_NAME ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -93,7 +95,7 @@ class UpsertGoogleSecurityGroupDescriptionValidatorSpec extends Specification { void "null input fails validation"() { setup: def description = new UpsertGoogleSecurityGroupDescription(network: "") - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleServerGroupTagsDescriptionValidatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleServerGroupTagsDescriptionValidatorSpec.groovy index a70f54916ca..80320aff628 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleServerGroupTagsDescriptionValidatorSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleServerGroupTagsDescriptionValidatorSpec.groovy @@ -16,12 +16,14 @@ package com.netflix.spinnaker.clouddriver.google.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleServerGroupTagsDescription import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification @@ -36,11 +38,11 @@ class UpsertGoogleServerGroupTagsDescriptionValidatorSpec extends Specification void setupSpec() { validator = new UpsertGoogleServerGroupTagsDescriptionValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).credentials(new FakeGoogleCredentials()).build() - credentialsRepo.save(ACCOUNT_NAME, credentials) - validator.accountCredentialsProvider = credentialsProvider + credentialsRepo.save(credentials) + validator.credentialsRepository = credentialsRepo } void "pass validation with proper description inputs"() { @@ -49,7 +51,7 @@ class UpsertGoogleServerGroupTagsDescriptionValidatorSpec extends Specification region: REGION, tags: TAGS, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -64,7 +66,7 @@ class UpsertGoogleServerGroupTagsDescriptionValidatorSpec extends Specification region: REGION, tags: [], accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -79,7 +81,7 @@ class UpsertGoogleServerGroupTagsDescriptionValidatorSpec extends Specification region: REGION, tags: null, accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -94,7 +96,7 @@ class UpsertGoogleServerGroupTagsDescriptionValidatorSpec extends Specification region: REGION, tags: TAGS + "", accountName: ACCOUNT_NAME) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -106,7 +108,7 @@ class UpsertGoogleServerGroupTagsDescriptionValidatorSpec extends Specification void "null input fails validation"() { setup: def description = new UpsertGoogleServerGroupTagsDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/health/GoogleHealthIndicatorSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/health/GoogleHealthIndicatorSpec.groovy new file mode 100644 index 00000000000..cdb94a3e8ed --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/health/GoogleHealthIndicatorSpec.groovy @@ -0,0 +1,167 @@ +/* + * Copyright 2023 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.health + + +import com.google.api.services.compute.model.Project +import com.google.common.collect.ImmutableList +import com.google.common.collect.ImmutableMap +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties +import com.netflix.spinnaker.clouddriver.google.provider.agent.StubComputeFactory +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository +import com.netflix.spinnaker.credentials.CredentialsTypeBaseConfiguration +import org.springframework.boot.actuate.health.Status +import org.springframework.context.ApplicationContext +import spock.lang.Specification +import spock.lang.Unroll + +class GoogleHealthIndicatorSpec extends Specification { + + private static final String ACCOUNT_NAME = "partypups" + private static final String PROJECT = "myproject" + private static final String REGION = "myregion" + private static final String ZONE = REGION + "-myzone" + private static final Registry REGISTRY = new NoopRegistry() + + @Unroll + def "health succeeds when google is reachable"() { + setup: + def applicationContext = Mock(ApplicationContext) + def project = new Project() + project.setName(PROJECT) + + def compute = new StubComputeFactory() + .setProjects(project) + .create() + + def googleNamedAccountCredentials = + new GoogleNamedAccountCredentials.Builder() + .project(PROJECT) + .name(ACCOUNT_NAME) + .compute(compute) + .regionToZonesMap(ImmutableMap.of(REGION, ImmutableList.of(ZONE))) + .build() + + def credentials = [googleNamedAccountCredentials] + def credentialsRepository = Stub(CredentialsRepository) { + getAll() >> credentials + } + + def credentialsTypeBaseConfiguration = new CredentialsTypeBaseConfiguration(applicationContext, null) + credentialsTypeBaseConfiguration.credentialsRepository = credentialsRepository + + def indicator = new GoogleHealthIndicator(googleConfigurationProperties: new GoogleConfigurationProperties()) + indicator.registry = REGISTRY + indicator.credentialsTypeBaseConfiguration = credentialsTypeBaseConfiguration + + when: + indicator.checkHealth() + def health = indicator.health() + + then: + health.status == Status.UP + health.details.isEmpty() + } + + @Unroll + def "health throws exception when google appears unreachable"() { + setup: + def applicationContext = Mock(ApplicationContext) + def project = new Project() + project.setName(PROJECT) + + def compute = new StubComputeFactory() + .setProjects(project) + .setProjectException(new IOException("Read timed out")) + .create() + + def googleNamedAccountCredentials = + new GoogleNamedAccountCredentials.Builder() + .project(PROJECT) + .name(ACCOUNT_NAME) + .compute(compute) + .regionToZonesMap(ImmutableMap.of(REGION, ImmutableList.of(ZONE))) + .build() + + def credentials = [googleNamedAccountCredentials] + def credentialsRepository = Stub(CredentialsRepository) { + getAll() >> credentials + } + + def credentialsTypeBaseConfiguration = new CredentialsTypeBaseConfiguration(applicationContext, null) + credentialsTypeBaseConfiguration.credentialsRepository = credentialsRepository + + def indicator = new GoogleHealthIndicator(googleConfigurationProperties: new GoogleConfigurationProperties()) + indicator.registry = REGISTRY + indicator.credentialsTypeBaseConfiguration = credentialsTypeBaseConfiguration + + when: + indicator.checkHealth() + def health = indicator.health() + + then: + thrown(GoogleHealthIndicator.GoogleIOException) + + health == null + } + + @Unroll + def "health succeeds when google is unreachable and verifyAccountHealth is false"() { + setup: + def applicationContext = Mock(ApplicationContext) + def project = new Project() + project.setName(PROJECT) + + def compute = new StubComputeFactory() + .setProjects(project) + .setProjectException(new IOException("Read timed out")) + .create() + + def googleNamedAccountCredentials = + new GoogleNamedAccountCredentials.Builder() + .project(PROJECT) + .name(ACCOUNT_NAME) + .compute(compute) + .regionToZonesMap(ImmutableMap.of(REGION, ImmutableList.of(ZONE))) + .build() + + def credentials = [googleNamedAccountCredentials] + def credentialsRepository = Stub(CredentialsRepository) { + getAll() >> credentials + } + + def credentialsTypeBaseConfiguration = new CredentialsTypeBaseConfiguration(applicationContext, null) + credentialsTypeBaseConfiguration.credentialsRepository = credentialsRepository + + def indicator = new GoogleHealthIndicator(googleConfigurationProperties: new GoogleConfigurationProperties()) + indicator.googleConfigurationProperties.health.setVerifyAccountHealth(false) + indicator.registry = REGISTRY + indicator.credentialsTypeBaseConfiguration = credentialsTypeBaseConfiguration + + + when: + indicator.checkHealth() + def health = indicator.health() + + then: + health.status == Status.UP + health.details.isEmpty() + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/UtilsSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/UtilsSpec.groovy index 8f0a2d75488..0de1a08c00e 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/UtilsSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/UtilsSpec.groovy @@ -77,15 +77,22 @@ class UtilsSpec extends Specification { expected == Utils.getTargetProxyType(input) where: - input | expected - "https://www.googleapis.com/compute/v1/projects/spinnaker-jtk54/global/targetHttpsProxies/https-proxy" | GoogleTargetProxyType.HTTPS - "https://www.googleapis.com/compute/v1/projects/spinnaker-jtk54/global/targetHttpProxies/http-proxy" | GoogleTargetProxyType.HTTP - "https://www.googleapis.com/compute/v1/projects/spinnaker-jtk54/global/targetSslProxies/ssl-proxy" | GoogleTargetProxyType.SSL - "https://www.googleapis.com/compute/v1/projects/spinnaker-jtk54/global/targetTcpProxies/tcp-proxy" | GoogleTargetProxyType.TCP - "projects/spinnaker-jtk54/global/targetHttpsProxies/https-proxy" | GoogleTargetProxyType.HTTPS - "projects/spinnaker-jtk54/global/targetHttpProxies/http-proxy" | GoogleTargetProxyType.HTTP - "projects/spinnaker-jtk54/global/targetSslProxies/ssl-proxy" | GoogleTargetProxyType.SSL - "projects/spinnaker-jtk54/global/targetTcpProxies/tcp-proxy" | GoogleTargetProxyType.TCP + input | expected + "https://compute.googleapis.com/compute/v1/projects/spinnaker-jtk54/global/targetHttpsProxies/https-proxy" | GoogleTargetProxyType.HTTPS + "https://compute.googleapis.com/compute/v1/projects/spinnaker-jtk54/global/targetHttpProxies/http-proxy" | GoogleTargetProxyType.HTTP + "https://compute.googleapis.com/compute/v1/projects/spinnaker-jtk54/global/targetSslProxies/ssl-proxy" | GoogleTargetProxyType.SSL + "https://compute.googleapis.com/compute/v1/projects/spinnaker-jtk54/global/targetTcpProxies/tcp-proxy" | GoogleTargetProxyType.TCP + "projects/spinnaker-jtk54/global/targetHttpsProxies/https-proxy" | GoogleTargetProxyType.HTTPS + "projects/spinnaker-jtk54/global/targetHttpProxies/http-proxy" | GoogleTargetProxyType.HTTP + "projects/spinnaker-jtk54/global/targetSslProxies/ssl-proxy" | GoogleTargetProxyType.SSL + "projects/spinnaker-jtk54/global/targetTcpProxies/tcp-proxy" | GoogleTargetProxyType.TCP + "https://www.googleapis.com/compute/beta/projects/my-project/regions/us-west3/targetPools/zsvrgrptestvmgvalx06-gce-fe-tp-1597099154789" | GoogleTargetProxyType.UNKNOWN + null | GoogleTargetProxyType.UNKNOWN + "" | GoogleTargetProxyType.UNKNOWN + "/abc" | GoogleTargetProxyType.UNKNOWN + "abc/" | GoogleTargetProxyType.UNKNOWN + "abc" | GoogleTargetProxyType.UNKNOWN + "abc//" | GoogleTargetProxyType.UNKNOWN } def "should get region from a full group Url"() { @@ -94,9 +101,9 @@ class UtilsSpec extends Specification { where: input | expected - "https://www.googleapis.com/compute/v1/projects/PROJECT/zones/us-central1-f/instanceGroups/svg-stack-v000" | "us-central1" + "https://compute.googleapis.com/compute/v1/projects/PROJECT/zones/us-central1-f/instanceGroups/svg-stack-v000" | "us-central1" "/projects/PROJECT/zones/us-central1-f/instanceGroups/svg-stack-v000" | "us-central1" - "https://www.googleapis.com/compute/v1/projects/PROJECT/regions/us-central1/instanceGroups/svg-stack-v00" | "us-central1" + "https://compute.googleapis.com/compute/v1/projects/PROJECT/regions/us-central1/instanceGroups/svg-stack-v00" | "us-central1" "projects/PROJECT/regions/us-central1/instanceGroups/svg-stack-v00" | "us-central1" } @@ -118,9 +125,9 @@ class UtilsSpec extends Specification { where: input | expected - "https://www.googleapis.com/compute/beta/projects/spinnaker-jtk54/global/healthChecks/jake-ilb" | "healthChecks" - "https://www.googleapis.com/compute/beta/projects/spinnaker-jtk54/global/httpHealthChecks/jake-ilb" | "httpHealthChecks" - "https://www.googleapis.com/compute/beta/projects/spinnaker-jtk54/global/httpsHealthChecks/jake-ilb" | "httpsHealthChecks" + "https://compute.googleapis.com/compute/beta/projects/spinnaker-jtk54/global/healthChecks/jake-ilb" | "healthChecks" + "https://compute.googleapis.com/compute/beta/projects/spinnaker-jtk54/global/httpHealthChecks/jake-ilb" | "httpHealthChecks" + "https://compute.googleapis.com/compute/beta/projects/spinnaker-jtk54/global/httpsHealthChecks/jake-ilb" | "httpsHealthChecks" } @Unroll diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/names/GoogleLabeledResourceNamerSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/names/GoogleLabeledResourceNamerSpec.groovy new file mode 100644 index 00000000000..3ab1b977bf0 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/names/GoogleLabeledResourceNamerSpec.groovy @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Schibsted ASA. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.names + +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup +import com.netflix.spinnaker.moniker.Namer +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +import static com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer.APP +import static com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer.CLUSTER +import static com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer.STACK +import static com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer.DETAIL +import static com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer.SEQUENCE + +class GoogleLabeledResourceNamerSpec extends Specification { + + @Shared + Namer namer = new GoogleLabeledResourceNamer() + + @Unroll + def "should derive correct moniker"() { + given: + def resource = new GoogleServerGroup(name: name, instanceTemplateLabels: labels) + def moniker = namer.deriveMoniker(resource) + + expect: + with(moniker) { + app == expectedApp + cluster == expectedCluster + stack == expectedStack + detail == expectedDetail + sequence == expectedSequence + } + + where: + name | labels || expectedApp | expectedCluster | expectedStack | expectedDetail | expectedSequence + "cass-nccpintegration-random-junk-d0prod-z0useast1a-v003" | null || "cass" | "cass-nccpintegration-random-junk-d0prod-z0useast1a" | "nccpintegration" | "random-junk-d0prod-z0useast1a" | 3 + "cass-nccpintegration-random-junk-d0prod-z0useast1a-v003" | [:] || "cass" | "cass-nccpintegration-random-junk-d0prod-z0useast1a" | "nccpintegration" | "random-junk-d0prod-z0useast1a" | 3 + "cass-nccpintegration-random-junk-d0prod-z0useast1a-v003" | [(APP): "myApp"] || "myApp" | "cass-nccpintegration-random-junk-d0prod-z0useast1a" | "nccpintegration" | "random-junk-d0prod-z0useast1a" | 3 + "cass-nccpintegration-random-junk-v003" | [(CLUSTER): "myCluster"] || "cass" | "myCluster" | "nccpintegration" | "random-junk" | 3 + "cass-nccpintegration-random-junk-v003" | [(STACK): "myStack"] || "cass" | "cass-myStack" | "myStack" | "random-junk" | 3 + "cass-nccpintegration-random-junk-v003" | [(STACK): "myStack", (DETAIL): ""] || "cass" | "cass-myStack" | "myStack" | "" | 3 + "cass-nccpintegration-random-junk-v003" | [(DETAIL): "myDetail"] || "cass" | "cass--myDetail" | "nccpintegration" | "myDetail" | 3 + "cass-nccpintegration-random-junk-v003" | [(SEQUENCE): "42"] || "cass" | "cass-nccpintegration-random-junk" | "nccpintegration" | "random-junk" | 42 + "app" | [(STACK): "myStack", (SEQUENCE): "2"] || "app" | "app-myStack" | "myStack" | null | 2 + "app" | null || "app" | "app" | null | null | null + "app-cluster" | null || "app" | "app-cluster" | "cluster" | null | null + "app-cluster" | [(CLUSTER): "myCluster"] || "app" | "myCluster" | "cluster" | null | null + "app-v042" | [(SEQUENCE): "13"] || "app" | "app" | null | null | 13 + "app-v042" | [(DETAIL): "myDetail"] || "app" | "app--myDetail" | null | "myDetail" | 42 + "awesomeapp--my-detail" | null || "awesomeapp" | "awesomeapp--my-detail" | null | "my-detail" | null + "awesomeapp--my-detail" | getAllMonikerLabels(true) || "myApp" | "myCluster" | "myStack" | "myDetail" | 13 + "awesomeapp--my-detail" | getAllMonikerLabels(false) || "myApp" | "myApp-myStack-myDetail" | "myStack" | "myDetail" | 13 + } + + def getAllMonikerLabels(includeCluster = false) { + def labels = [(APP): "myApp", (STACK): "myStack", (DETAIL): "myDetail", (SEQUENCE): "13"] + if (includeCluster) { + labels << [(CLUSTER): "myCluster"] + } + labels + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgentTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgentTest.java new file mode 100644 index 00000000000..4b2b768e5ab --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgentTest.java @@ -0,0 +1,1011 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.common.collect.Iterables.getOnlyElement; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.IMAGES; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.SERVER_GROUPS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.AttachedDisk; +import com.google.api.services.compute.model.AttachedDiskInitializeParams; +import com.google.api.services.compute.model.Autoscaler; +import com.google.api.services.compute.model.AutoscalerStatusDetails; +import com.google.api.services.compute.model.AutoscalingPolicy; +import com.google.api.services.compute.model.AutoscalingPolicyCpuUtilization; +import com.google.api.services.compute.model.AutoscalingPolicyCustomMetricUtilization; +import com.google.api.services.compute.model.AutoscalingPolicyLoadBalancingUtilization; +import com.google.api.services.compute.model.AutoscalingPolicyScaleInControl; +import com.google.api.services.compute.model.DistributionPolicy; +import com.google.api.services.compute.model.DistributionPolicyZoneConfiguration; +import com.google.api.services.compute.model.FixedOrPercent; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.InstanceGroupManagerActionsSummary; +import com.google.api.services.compute.model.InstanceGroupManagerAutoHealingPolicy; +import com.google.api.services.compute.model.InstanceProperties; +import com.google.api.services.compute.model.InstanceTemplate; +import com.google.api.services.compute.model.Metadata; +import com.google.api.services.compute.model.Metadata.Items; +import com.google.api.services.compute.model.NamedPort; +import com.google.api.services.compute.model.NetworkInterface; +import com.google.api.services.compute.model.ServiceAccount; +import com.google.api.services.compute.model.StatefulPolicy; +import com.google.api.services.compute.model.StatefulPolicyPreservedState; +import com.google.api.services.compute.model.StatefulPolicyPreservedStateDiskDevice; +import com.google.api.services.compute.model.Tags; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.MoreExecutors; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.cats.provider.DefaultProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.AutoscalingMode; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy.CustomMetricUtilization; +import com.netflix.spinnaker.clouddriver.google.model.GoogleInstance; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.model.health.GoogleInstanceHealth; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.math.BigInteger; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.Executors; +import javax.annotation.ParametersAreNonnullByDefault; +import org.assertj.core.data.Offset; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class AbstractGoogleServerGroupCachingAgentTest { + + private static final String ACCOUNT_NAME = "partypups"; + private static final String PROJECT = "myproject"; + private static final String REGION = "myregion"; + private static final String REGION_URL = "http://compute/regions/" + REGION; + private static final String ZONE = REGION + "-myzone"; + private static final String ZONE_URL = "http://compute/zones/" + ZONE; + + private ObjectMapper objectMapper; + + @BeforeEach + void createTestObjects() { + objectMapper = new ObjectMapper(); + } + + @Test + void basicServerGroupProperties() { + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager() + .setName("myServerGroup") + .setZone(ZONE_URL) + .setSelfLink("http://my/fun/link") + .setNamedPorts( + ImmutableList.of( + new NamedPort().setName("first").setPort(10111), + new NamedPort().setName("second").setPort(20222))) + .setCurrentActions( + new InstanceGroupManagerActionsSummary().setCreating(2).setDeleting(4)) + .setTargetSize(3) + .setStatefulPolicy( + new StatefulPolicy() + .setPreservedState( + new StatefulPolicyPreservedState() + .setDisks( + ImmutableMap.of( + "myDisk", new StatefulPolicyPreservedStateDiskDevice())))) + .setAutoHealingPolicies( + ImmutableList.of( + new InstanceGroupManagerAutoHealingPolicy().setInitialDelaySec(92))); + + Compute compute = + new StubComputeFactory().setInstanceGroupManagers(instanceGroupManager).create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent(compute, ImmutableList.of(instanceGroupManager)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + + assertThat(serverGroup.getName()).isEqualTo(instanceGroupManager.getName()); + assertThat(serverGroup.getSelfLink()).isEqualTo(instanceGroupManager.getSelfLink()); + + assertThat(serverGroup.getCurrentActions()).isEqualTo(instanceGroupManager.getCurrentActions()); + + assertThat(serverGroup.getStatefulPolicy()).isEqualTo(instanceGroupManager.getStatefulPolicy()); + assertThat(serverGroup.getAutoHealingPolicy()) + .isEqualTo(instanceGroupManager.getAutoHealingPolicies().get(0)); + assertThat(serverGroup.getLaunchConfig()).containsKeys("createdTime"); + + assertThat(serverGroup.getAccount()).isEqualTo(ACCOUNT_NAME); + assertThat(serverGroup.getRegional()).isFalse(); + assertThat(serverGroup.getRegion()).isEqualTo(REGION); + assertThat(serverGroup.getZone()).isEqualTo(ZONE); + assertThat(serverGroup.getZones()).containsExactly(ZONE); + + assertThat(serverGroup.getNamedPorts()) + .containsOnly(entry("first", 10111), entry("second", 20222)); + assertThat(serverGroup.getAsg()) + .contains(entry("minSize", 3), entry("maxSize", 3), entry("desiredCapacity", 3)); + } + + @Test + void serverGroupPropertiesForZonalServerGroup() { + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager() + .setName("myServerGroup") + .setZone(ZONE_URL) + // This should be ignored for zonal server groups, but we'll set one to make sure + .setDistributionPolicy( + new DistributionPolicy() + .setZones( + ImmutableList.of( + new DistributionPolicyZoneConfiguration() + .setZone("http://compute/zones/fakezone1")))); + + Compute compute = + new StubComputeFactory().setInstanceGroupManagers(instanceGroupManager).create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent(compute, ImmutableList.of(instanceGroupManager)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + assertThat(serverGroup.getRegional()).isFalse(); + assertThat(serverGroup.getRegion()).isEqualTo(REGION); + assertThat(serverGroup.getZone()).isEqualTo(ZONE); + assertThat(serverGroup.getZones()).containsExactly(ZONE); + } + + @Test + void serverGroupPropertiesForRegionalServerGroup() { + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager() + .setName("myServerGroup") + .setRegion(REGION_URL) + .setDistributionPolicy( + new DistributionPolicy() + .setZones( + ImmutableList.of( + new DistributionPolicyZoneConfiguration() + .setZone("http://compute/zones/fakezone1"), + new DistributionPolicyZoneConfiguration() + .setZone("http://compute/zones/fakezone2"), + new DistributionPolicyZoneConfiguration() + .setZone("http://compute/zones/fakezone3"))) + .setTargetShape("ANY")); + + Compute compute = + new StubComputeFactory().setInstanceGroupManagers(instanceGroupManager).create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent(compute, ImmutableList.of(instanceGroupManager)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + assertThat(serverGroup.getRegional()).isTrue(); + assertThat(serverGroup.getRegion()).isEqualTo(REGION); + assertThat(serverGroup.getZone()).isNull(); + assertThat(serverGroup.getZones()) + .containsExactlyInAnyOrder("fakezone1", "fakezone2", "fakezone3"); + assertThat(serverGroup.getDistributionPolicy().getTargetShape()).isEqualTo("ANY"); + } + + @Test + void serverGroupPropertiesFromInstanceTemplate() { + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager() + .setInstanceTemplate("http://compute/global/instanceTemplates/myInstanceTemplate") + .setZone(ZONE_URL); + InstanceTemplate instanceTemplate = + new InstanceTemplate() + .setName("myInstanceTemplate") + .setProperties( + new InstanceProperties() + .setDisks( + ImmutableList.of( + new AttachedDisk() + .setBoot(true) + .setInitializeParams( + new AttachedDiskInitializeParams() + .setSourceImage("http://compute/global/images/myImage")))) + .setServiceAccounts( + ImmutableList.of(new ServiceAccount().setEmail("spinnaker@spinnaker.io"))) + .setMachineType("machineType") + .setMinCpuPlatform("minCpuPlatform") + .setCanIpForward(true) + .setNetworkInterfaces( + ImmutableList.of( + new NetworkInterface() + .setNetwork( + String.format( + "http://compute/network/projects/%s/myNetworkName", + PROJECT)))) + .setMetadata( + new Metadata() + .setItems( + ImmutableList.of( + new Items().setKey("load-balancer-names").setValue("one,two"), + new Items() + .setKey("global-load-balancer-names") + .setValue("three,four"), + new Items() + .setKey("backend-service-names") + .setValue("five,six"), + new Items() + .setKey("load-balancing-policy") + .setValue("{\"maxUtilization\": 1.3}")))) + .setLabels(ImmutableMap.of("label1", "value1", "label2", "value2")) + .setTags(new Tags().setItems(ImmutableList.of("tag1", "tag2")))); + + DefaultProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData( + IMAGES.getNs(), + new DefaultCacheData( + Keys.getImageKey(ACCOUNT_NAME, "myImage"), + ImmutableMap.of( + "image", + ImmutableMap.of( + "description", + "appversion: myapp-1.0.0-12345.h777/999/10111," + + "build_host: spin.nyc.corp," + + "build_info_url: http://jenkins/artifact/12345")), + ImmutableMap.of())); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setInstanceTemplates(instanceTemplate) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent(compute, ImmutableList.of(instanceGroupManager)); + + CacheResult cacheResult = cachingAgent.loadData(providerCache); + + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + + assertThat(serverGroup.getInstanceTemplateTags()) + .isEqualTo(ImmutableSet.copyOf(instanceTemplate.getProperties().getTags().getItems())); + assertThat(serverGroup.getInstanceTemplateServiceAccounts()) + .isEqualTo(ImmutableSet.copyOf(instanceTemplate.getProperties().getServiceAccounts())); + assertThat(serverGroup.getInstanceTemplateLabels()) + .isEqualTo(instanceTemplate.getProperties().getLabels()); + assertThat(serverGroup.getLaunchConfig()) + .contains( + entry("imageId", "myImage"), + entry("launchConfigurationName", instanceTemplate.getName()), + entry("instanceType", instanceTemplate.getProperties().getMachineType()), + entry("minCpuPlatform", instanceTemplate.getProperties().getMinCpuPlatform()), + entry("instanceTemplate", instanceTemplate)); + assertThat(serverGroup.getAsg()) + .contains( + entry("load-balancer-names", ImmutableList.of("one", "two")), + entry("global-load-balancer-names", ImmutableList.of("three", "four")), + entry("backend-service-names", ImmutableList.of("five", "six"))); + assertThat(serverGroup.getAsg()).containsKey("load-balancing-policy"); + assertThat( + ((Map) serverGroup.getAsg().get("load-balancing-policy")) + .get("maxUtilization")) + .isEqualTo(1.3f, Offset.offset(.0000001f)); + assertThat(serverGroup.getNetworkName()).isEqualTo("myNetworkName"); + assertThat(serverGroup.getBuildInfo()) + .containsOnly( + entry("package_name", "myapp"), + entry("version", "1.0.0"), + entry("commit", "12345"), + entry( + "jenkins", + ImmutableMap.of("name", "999", "number", "777", "host", "spin.nyc.corp")), + entry("buildInfoUrl", "http://jenkins/artifact/12345")); + } + + @Test + void minimalBuildInfo() { + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager() + .setInstanceTemplate("http://compute/global/instanceTemplates/myInstanceTemplate") + .setZone(ZONE_URL); + InstanceTemplate instanceTemplate = + new InstanceTemplate() + .setName("myInstanceTemplate") + .setProperties( + new InstanceProperties() + .setDisks( + ImmutableList.of( + new AttachedDisk() + .setBoot(true) + .setInitializeParams( + new AttachedDiskInitializeParams() + .setSourceImage("http://compute/global/images/myImage"))))); + + DefaultProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData( + IMAGES.getNs(), + new DefaultCacheData( + Keys.getImageKey(ACCOUNT_NAME, "myImage"), + ImmutableMap.of( + "image", ImmutableMap.of("description", "appversion: myapp-1.0.0-h123")), + ImmutableMap.of())); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setInstanceTemplates(instanceTemplate) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent(compute, ImmutableList.of(instanceGroupManager)); + + CacheResult cacheResult = cachingAgent.loadData(providerCache); + + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + + assertThat(serverGroup.getBuildInfo()) + .containsOnly(entry("package_name", "myapp"), entry("version", "1.0.0")); + } + + @Test + void serverGroupPropertiesFromInstances() { + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager().setBaseInstanceName("myServerGroup-").setZone(ZONE_URL); + Instance serverInstance = + new Instance() + .setName("myServerGroup-1234") + .setId(BigInteger.valueOf(10111)) + .setMachineType("http://compute/global/machineTypes/reallyBigComputer") + .setCpuPlatform("goog86") + .setZone(ZONE_URL) + .setNetworkInterfaces( + ImmutableList.of( + new NetworkInterface() + .setNetwork( + String.format( + "http://compute/network/projects/%s/myNetworkName", PROJECT)))) + .setMetadata( + new Metadata() + .setItems( + ImmutableList.of(new Items().setKey("itemKey").setValue("itemValue")))) + .setDisks(ImmutableList.of(new AttachedDisk().setType("myDiskType"))) + .setServiceAccounts( + ImmutableList.of(new ServiceAccount().setEmail("spinnaker@spinnaker.io"))) + .setSelfLink("http://my/fun/link") + .setTags(new Tags().setItems(ImmutableList.of("tag1", "tag2"))) + .setLabels(ImmutableMap.of("label1", "value1", "label2", "value2")) + .setStatus("RUNNING"); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setInstances(serverInstance) + .create(); + + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent(compute, ImmutableList.of(instanceGroupManager)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + + assertThat(serverGroup.getInstances()).hasSize(1); + GoogleInstance cacheInstance = getOnlyElement(serverGroup.getInstances()); + + assertThat(cacheInstance.getName()).isEqualTo(serverInstance.getName()); + assertThat(cacheInstance.getAccount()).isEqualTo(PROJECT); + assertThat(cacheInstance.getGceId()).isEqualTo("10111"); + assertThat(cacheInstance.getInstanceType()).isEqualTo("reallyBigComputer"); + assertThat(cacheInstance.getCpuPlatform()).isEqualTo("goog86"); + assertThat(cacheInstance.getZone()).isEqualTo(ZONE); + assertThat(cacheInstance.getRegion()).isEqualTo(REGION); + assertThat(cacheInstance.getNetworkInterfaces()) + .isEqualTo(serverInstance.getNetworkInterfaces()); + assertThat(cacheInstance.getNetworkName()).isEqualTo("myNetworkName"); + assertThat(cacheInstance.getMetadata()).isEqualTo(serverInstance.getMetadata()); + AttachedDisk diskWithCorrectType = new AttachedDisk(); + diskWithCorrectType.putAll(cacheInstance.getDisks().get(0)); + assertThat(ImmutableList.of(diskWithCorrectType)).isEqualTo(serverInstance.getDisks()); + assertThat(cacheInstance.getServiceAccounts()).isEqualTo(serverInstance.getServiceAccounts()); + assertThat(cacheInstance.getSelfLink()).isEqualTo(serverInstance.getSelfLink()); + assertThat(cacheInstance.getTags()).isEqualTo(serverInstance.getTags()); + assertThat(cacheInstance.getLabels()).isEqualTo(serverInstance.getLabels()); + assertThat(cacheInstance.getInstanceHealth()) + .isEqualTo(new GoogleInstanceHealth(GoogleInstanceHealth.Status.RUNNING)); + } + + @Test + void serverGroupDisksAreSortedProperly() { + + List diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(true).setType("FLAKY").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk2"), + new AttachedDisk().setBoot(false).setType("FLAKY").setDeviceName("disk3"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk4"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk5"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk6")); + + // Non-persistent disks are removed, and then the first boot disk is moved to the front. + // Other boot disks are removed. + assertThat(diskNames).containsExactly("disk4", "disk2", "disk3", "disk6"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(true).setType("FLAKY").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk2"), + new AttachedDisk().setBoot(false).setType("FLAKY").setDeviceName("disk3"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk4"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk5"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk6"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk7")); + + // Since the first disk is persistent and bootable, we leave the disks untouched. + assertThat(diskNames) + .containsExactly("disk0", "disk1", "disk2", "disk3", "disk4", "disk5", "disk6", "disk7"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(true).setType("FLAKY").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk2"), + new AttachedDisk().setBoot(true).setType("FLAKY").setDeviceName("disk3"), + new AttachedDisk().setBoot(false).setType("FLAKY").setDeviceName("disk4"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk5")); + + // Since there is no persistent boot disk, we remove all boot disks. + assertThat(diskNames).containsExactly("disk2", "disk4", "disk5"); + + // These are copied from the original test code + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk0")); + assertThat(diskNames).containsExactly("disk0"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk1")); + assertThat(diskNames).containsExactly("disk0", "disk1"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk1")); + assertThat(diskNames).containsExactly("disk1", "disk0"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk2")); + assertThat(diskNames).containsExactly("disk0", "disk1", "disk2"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk2")); + assertThat(diskNames).containsExactly("disk1", "disk0", "disk2"); + + // Mix in a SCRATCH disk. + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk1")); + assertThat(diskNames).containsExactly("disk0", "disk1"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk2")); + assertThat(diskNames).containsExactly("disk0", "disk1", "disk2"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk2")); + assertThat(diskNames).containsExactly("disk1", "disk0", "disk2"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk0"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk2"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk3")); + assertThat(diskNames).containsExactly("disk0", "disk1", "disk2", "disk3"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(true).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk2"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk3")); + assertThat(diskNames).containsExactly("disk1", "disk0", "disk2", "disk3"); + + // Boot disk missing (really shouldn't happen, but want to ensure we don't disturb the results). + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0")); + assertThat(diskNames).containsExactly("disk0"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk1")); + assertThat(diskNames).containsExactly("disk0", "disk1"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk2")); + assertThat(diskNames).containsExactly("disk0", "disk1", "disk2"); + + // Mix in a SCRATCH disk and Boot disk missing. + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk1")); + assertThat(diskNames).containsExactly("disk0", "disk1"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk2")); + assertThat(diskNames).containsExactly("disk0", "disk1", "disk2"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk2"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk3")); + assertThat(diskNames).containsExactly("disk0", "disk1", "disk2", "disk3"); + + diskNames = + retrieveCachedDiskNames( + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk0"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk1"), + new AttachedDisk().setBoot(false).setType("SCRATCH").setDeviceName("disk2"), + new AttachedDisk().setBoot(false).setType("PERSISTENT").setDeviceName("disk3")); + assertThat(diskNames).containsExactly("disk0", "disk1", "disk2", "disk3"); + } + + private List retrieveCachedDiskNames(AttachedDisk... inputDisks) { + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager() + .setInstanceTemplate("http://compute/global/instanceTemplates/myInstanceTemplate") + .setZone(ZONE_URL); + InstanceTemplate instanceTemplate = + new InstanceTemplate() + .setName("myInstanceTemplate") + .setProperties(new InstanceProperties().setDisks(ImmutableList.copyOf(inputDisks))); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setInstanceTemplates(instanceTemplate) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent(compute, ImmutableList.of(instanceGroupManager)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + + return getDiskNames(serverGroup); + } + + private static ImmutableList getDiskNames(GoogleServerGroup serverGroup) { + Map launchConfig = serverGroup.getLaunchConfig(); + Map instanceTemplate = + (Map) launchConfig.get("instanceTemplate"); + Map properties = (Map) instanceTemplate.get("properties"); + List> disks = (List>) properties.get("disks"); + return disks.stream().map(disk -> (String) disk.get("deviceName")).collect(toImmutableList()); + } + + @Test + void serverGroupDisabledProperty() { + + Items loadBalancerItem = new Items().setKey("load-balancer-names"); + + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager() + .setInstanceTemplate("http://compute/global/instanceTemplates/myInstanceTemplate") + .setZone(ZONE_URL); + InstanceTemplate instanceTemplate = + new InstanceTemplate() + .setName("myInstanceTemplate") + .setProperties( + new InstanceProperties() + .setMetadata(new Metadata().setItems(ImmutableList.of(loadBalancerItem)))); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setInstanceTemplates(instanceTemplate) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent(compute, ImmutableList.of(instanceGroupManager)); + + // If there are load balancers and target pools, then we not disabled + instanceGroupManager.setTargetPools(ImmutableList.of("targetPool1")); + loadBalancerItem.setValue("loadBalancer1"); + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + assertThat(serverGroup.getDisabled()).isFalse(); + + // If there are load balancers and no target pools, then we _are_ disabled. + instanceGroupManager.setTargetPools(null); + loadBalancerItem.setValue("loadBalancer1"); + cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + serverGroup = getOnlyServerGroup(cacheResult); + assertThat(serverGroup.getDisabled()).isTrue(); + + // If there are no load balancers, then we are not disabled, regardless of the target pools + instanceGroupManager.setTargetPools(ImmutableList.of("targetPool1")); + loadBalancerItem.setValue(null); + cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + serverGroup = getOnlyServerGroup(cacheResult); + assertThat(serverGroup.getDisabled()).isFalse(); + + instanceGroupManager.setTargetPools(null); + loadBalancerItem.setValue(null); + cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + serverGroup = getOnlyServerGroup(cacheResult); + assertThat(serverGroup.getDisabled()).isFalse(); + } + + @Test + void serverGroupAutoscalerProperties() { + + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager().setName("myServerGroup").setTargetSize(303).setZone(ZONE_URL); + Autoscaler autoscaler = + new Autoscaler() + .setZone(ZONE_URL) + .setTarget("myServerGroup") + .setAutoscalingPolicy( + new AutoscalingPolicy().setMinNumReplicas(101).setMaxNumReplicas(202)) + .setStatusDetails( + ImmutableList.of( + new AutoscalerStatusDetails().setMessage("message1"), + new AutoscalerStatusDetails().setMessage("message2"))); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setAutoscalers(autoscaler) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent( + compute, ImmutableList.of(instanceGroupManager), ImmutableList.of(autoscaler)); + + // If there are load balancers and target pools, then we not disabled + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + assertThat(serverGroup.getAsg()) + .containsOnly(entry("minSize", 101), entry("maxSize", 202), entry("desiredCapacity", 303)); + assertThat(serverGroup.getAutoscalingMessages()).containsExactly("message1", "message2"); + } + + @Test + void serverGroupAutoscalingPolicy_allFields() { + + AutoscalingPolicy input = + new AutoscalingPolicy() + .setCoolDownPeriodSec(123) + .setCpuUtilization( + new AutoscalingPolicyCpuUtilization() + .setUtilizationTarget(9.87) + .setPredictiveMethod("OPTIMIZE_AVAILABILITY")) + .setLoadBalancingUtilization( + new AutoscalingPolicyLoadBalancingUtilization().setUtilizationTarget(6.54)) + .setMaxNumReplicas(99) + .setMinNumReplicas(11) + .setMode("ON") + .setCustomMetricUtilizations( + ImmutableList.of( + new AutoscalingPolicyCustomMetricUtilization() + .setMetric("myMetric") + .setUtilizationTarget(911.23) + .setUtilizationTargetType("GAUGE") + .setSingleInstanceAssignment(1.0), + new AutoscalingPolicyCustomMetricUtilization())) + .setScaleInControl( + new AutoscalingPolicyScaleInControl() + .setTimeWindowSec(10111) + .setMaxScaledInReplicas(new FixedOrPercent().setFixed(123).setPercent(456))); + + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager().setName("myServerGroup").setZone(ZONE_URL); + Autoscaler autoscaler = + new Autoscaler().setZone(ZONE_URL).setTarget("myServerGroup").setAutoscalingPolicy(input); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setAutoscalers(autoscaler) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent( + compute, ImmutableList.of(instanceGroupManager), ImmutableList.of(autoscaler)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + GoogleAutoscalingPolicy converted = serverGroup.getAutoscalingPolicy(); + + assertThat(converted.getCoolDownPeriodSec()).isEqualTo(input.getCoolDownPeriodSec()); + assertThat(converted.getCpuUtilization().getUtilizationTarget()) + .isEqualTo(input.getCpuUtilization().getUtilizationTarget()); + assertThat(converted.getCpuUtilization().getPredictiveMethod().toString()) + .isEqualTo(input.getCpuUtilization().getPredictiveMethod()); + assertThat(converted.getLoadBalancingUtilization().getUtilizationTarget()) + .isEqualTo(input.getLoadBalancingUtilization().getUtilizationTarget()); + assertThat(converted.getMaxNumReplicas()).isEqualTo(input.getMaxNumReplicas()); + assertThat(converted.getMinNumReplicas()).isEqualTo(input.getMinNumReplicas()); + assertThat(converted.getMode().toString()).isEqualTo(input.getMode()); + assertThat(converted.getScaleInControl().getTimeWindowSec()).isEqualTo(10111); + assertThat(converted.getScaleInControl().getMaxScaledInReplicas().getFixed()).isEqualTo(123); + assertThat(converted.getScaleInControl().getMaxScaledInReplicas().getPercent()).isEqualTo(456); + + assertThat(converted.getCustomMetricUtilizations()) + .hasSize(input.getCustomMetricUtilizations().size()); + for (int i = 0; i < converted.getCustomMetricUtilizations().size(); ++i) { + CustomMetricUtilization convertedCustomMetric = + converted.getCustomMetricUtilizations().get(i); + AutoscalingPolicyCustomMetricUtilization inputCustomMetric = + input.getCustomMetricUtilizations().get(i); + assertThat(convertedCustomMetric.getMetric()).isEqualTo(inputCustomMetric.getMetric()); + assertThat(convertedCustomMetric.getUtilizationTarget()) + .isEqualTo(inputCustomMetric.getUtilizationTarget()); + assertThat( + Optional.ofNullable(convertedCustomMetric.getUtilizationTargetType()) + .map(Object::toString) + .orElse(null)) + .isEqualTo(inputCustomMetric.getUtilizationTargetType()); + } + } + + @Test + void serverGroupAutoscalingPolicy_onlyUpIsTransformedToOnlyScaleOut() { + + AutoscalingPolicy input = new AutoscalingPolicy().setMode("ONLY_UP"); + + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager().setName("myServerGroup").setZone(ZONE_URL); + Autoscaler autoscaler = + new Autoscaler().setZone(ZONE_URL).setTarget("myServerGroup").setAutoscalingPolicy(input); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setAutoscalers(autoscaler) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent( + compute, ImmutableList.of(instanceGroupManager), ImmutableList.of(autoscaler)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + GoogleAutoscalingPolicy converted = serverGroup.getAutoscalingPolicy(); + + assertThat(converted.getMode()).isEqualTo(AutoscalingMode.ONLY_SCALE_OUT); + } + + @Test + void serverGroupAutoscalingPolicy_noFields() { + + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager().setName("myServerGroup").setZone(ZONE_URL); + Autoscaler autoscaler = + new Autoscaler() + .setZone(ZONE_URL) + .setTarget("myServerGroup") + .setAutoscalingPolicy(new AutoscalingPolicy()); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setAutoscalers(autoscaler) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent( + compute, ImmutableList.of(instanceGroupManager), ImmutableList.of(autoscaler)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + GoogleAutoscalingPolicy converted = serverGroup.getAutoscalingPolicy(); + + assertThat(converted.getCoolDownPeriodSec()).isNull(); + assertThat(converted.getCpuUtilization()).isNull(); + assertThat(converted.getCustomMetricUtilizations()).isNull(); + assertThat(converted.getLoadBalancingUtilization()).isNull(); + assertThat(converted.getMaxNumReplicas()).isNull(); + assertThat(converted.getMinNumReplicas()).isNull(); + assertThat(converted.getMode()).isNull(); + assertThat(converted.getScaleInControl()).isNull(); + } + + @Test + void serverGroupAutoscalingPolicy_unknownPredictiveAutoscalerMethod() { + + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager().setName("myServerGroup").setZone(ZONE_URL); + Autoscaler autoscaler = + new Autoscaler() + .setZone(ZONE_URL) + .setTarget("myServerGroup") + .setAutoscalingPolicy( + new AutoscalingPolicy() + .setCpuUtilization( + new AutoscalingPolicyCpuUtilization() + .setPredictiveMethod("SOME THING THAT DOESN'T REALLY EXIST"))); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setAutoscalers(autoscaler) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent( + compute, ImmutableList.of(instanceGroupManager), ImmutableList.of(autoscaler)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + GoogleAutoscalingPolicy converted = serverGroup.getAutoscalingPolicy(); + + assertThat(converted.getCpuUtilization().getPredictiveMethod()).isNull(); + } + + @Test + void serverGroupAutoscalingPolicy_emptyPredictiveAutoscalerMethod() { + + InstanceGroupManager instanceGroupManager = + new InstanceGroupManager().setName("myServerGroup").setZone(ZONE_URL); + Autoscaler autoscaler = + new Autoscaler() + .setZone(ZONE_URL) + .setTarget("myServerGroup") + .setAutoscalingPolicy( + new AutoscalingPolicy().setCpuUtilization(new AutoscalingPolicyCpuUtilization())); + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager) + .setAutoscalers(autoscaler) + .create(); + AbstractGoogleServerGroupCachingAgent cachingAgent = + createCachingAgent( + compute, ImmutableList.of(instanceGroupManager), ImmutableList.of(autoscaler)); + + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + GoogleServerGroup serverGroup = getOnlyServerGroup(cacheResult); + GoogleAutoscalingPolicy converted = serverGroup.getAutoscalingPolicy(); + + assertThat(converted.getCpuUtilization().getPredictiveMethod()).isNull(); + } + + public static AbstractGoogleServerGroupCachingAgent createCachingAgent( + Compute compute, Collection instanceGroupManagers) { + return createCachingAgent( + compute, instanceGroupManagers, /* autoscalers= */ ImmutableList.of()); + } + + private GoogleServerGroup getOnlyServerGroup(CacheResult cacheResult) { + Collection serverGroups = cacheResult.getCacheResults().get(SERVER_GROUPS.getNs()); + assertThat(serverGroups).hasSize(1); + return objectMapper.convertValue( + getOnlyElement(serverGroups).getAttributes(), GoogleServerGroup.class); + } + + private static DefaultProviderCache inMemoryProviderCache() { + return new DefaultProviderCache(new InMemoryCache()); + } + + public static AbstractGoogleServerGroupCachingAgent createCachingAgent( + Compute compute, + Collection instanceGroupManagers, + Collection autoscalers) { + GoogleNamedAccountCredentials credentials = + new GoogleNamedAccountCredentials.Builder() + .project(PROJECT) + .name(ACCOUNT_NAME) + .compute(compute) + .regionToZonesMap(ImmutableMap.of(REGION, ImmutableList.of(ZONE))) + .build(); + GoogleComputeApiFactory computeApiFactory = + new GoogleComputeApiFactory( + new GoogleOperationPoller(), + new DefaultRegistry(), + "user-agent", + MoreExecutors.listeningDecorator(Executors.newCachedThreadPool())); + return new TestCachingAgent(credentials, computeApiFactory, instanceGroupManagers, autoscalers); + } + + @ParametersAreNonnullByDefault + private static class TestCachingAgent extends AbstractGoogleServerGroupCachingAgent { + + private final Collection instanceGroupManagers; + private final Collection autoscalers; + + TestCachingAgent( + GoogleNamedAccountCredentials credentials, + GoogleComputeApiFactory computeApiFactory, + Collection instanceGroupManagers, + Collection autoscalers) { + super(credentials, computeApiFactory, new DefaultRegistry(), REGION, new ObjectMapper()); + this.instanceGroupManagers = instanceGroupManagers; + this.autoscalers = autoscalers; + } + + @Override + Collection retrieveInstanceGroupManagers() { + return instanceGroupManagers; + } + + @Override + Collection retrieveAutoscalers() { + return autoscalers; + } + + @Override + String getBatchContextPrefix() { + return getClass().getSimpleName(); + } + + @Override + Collection getOnDemandKeysToEvictForMissingServerGroup( + ProviderCache providerCache, String serverGroupName) { + throw new UnsupportedOperationException("#getOnDemandKeysToEvictForMissingServerGroup()"); + } + + @Override + boolean keyOwnedByThisAgent(Map parsedKey) { + throw new UnsupportedOperationException("#keyOwnedByThisAgent()"); + } + + @Override + Optional retrieveInstanceGroupManager(String name) { + throw new UnsupportedOperationException("#retrieveInstanceGroupManager()"); + } + + @Override + Optional retrieveAutoscaler(InstanceGroupManager manager) { + throw new UnsupportedOperationException("#retrieveAutoscaler()"); + } + + @Override + Collection retrieveRelevantInstances(InstanceGroupManager manager) { + throw new UnsupportedOperationException("#retrieveRelevantInstances()"); + } + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgentTest.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgentTest.groovy new file mode 100644 index 00000000000..c2ad8b43fcc --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgentTest.groovy @@ -0,0 +1,106 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent + +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck + +import static org.assertj.core.api.Assertions.assertThat + +import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.compute.Compute +import com.google.api.services.compute.model.* +import com.google.common.collect.ImmutableList +import com.google.common.collect.ImmutableMap +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import org.junit.jupiter.api.BeforeEach +import org.junit.jupiter.api.Test + +class GoogleHealthCheckCachingAgentTest { + + private static final String ACCOUNT_NAME = "partypups" + private static final String PROJECT = "myproject" + private static final String REGION = "myregion" + private static final String REGION_URL = "http://compute/regions/" + REGION + private static final String ZONE = REGION + "-myzone" + private static final String ZONE_URL = "http://compute/zones/" + ZONE + + private ObjectMapper objectMapper + private GoogleHealthCheckCachingAgent healthCheckAgent + + @BeforeEach + void createTestObjects() { + objectMapper = new ObjectMapper() + + Compute compute = new StubComputeFactory().create() + GoogleNamedAccountCredentials credentials = + new GoogleNamedAccountCredentials.Builder() + .project(PROJECT) + .name(ACCOUNT_NAME) + .compute(compute) + .regionToZonesMap(ImmutableMap.of(REGION, ImmutableList.of(ZONE))) + .build() + healthCheckAgent = new GoogleHealthCheckCachingAgent( + "app-name", + credentials, + objectMapper, + new DefaultRegistry(), + ) + } + + private static HealthCheck buildBaseHealthCheck(String name, String region) { + HealthCheck hc = new HealthCheck() + hc.setName(name) + hc.setSelfLink("http://selflink") + hc.setRegion(region) + hc.setCheckIntervalSec(60) + hc.setTimeoutSec(10) + hc.setHealthyThreshold(1) + hc.setUnhealthyThreshold(3) + return hc + } + + @Test + void createsValidHttpHealthCheck() { + HTTPHealthCheck httpHealthCheck = new HTTPHealthCheck() + httpHealthCheck.setPort(1234) + httpHealthCheck.setRequestPath("/healthz") + + HealthCheck hc = buildBaseHealthCheck("valid", REGION) + hc.setHttpHealthCheck(httpHealthCheck) + hc.setType("HTTP") + + GoogleHealthCheck ghc = healthCheckAgent.toGoogleHealthCheck(hc, REGION) + assertThat(ghc.getPort()).isEqualTo(1234) + assertThat(ghc.getRegion()).isEqualTo(REGION) + assertThat(ghc.getRequestPath()).isEqualTo("/healthz") + assertThat(ghc.getHealthCheckType()).isEqualTo(GoogleHealthCheck.HealthCheckType.HTTP) + } + + @Test + void handlesHttpHealthCheckWithoutPort() { + HTTPHealthCheck httpHealthCheck = new HTTPHealthCheck() + httpHealthCheck.setRequestPath("/healthz") + + HealthCheck hc = buildBaseHealthCheck("no-port", REGION) + hc.setHttpHealthCheck(httpHealthCheck) + hc.setType("HTTP") + + GoogleHealthCheck ghc = healthCheckAgent.toGoogleHealthCheck(hc, REGION) + assertThat(ghc).isNull() + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleImageCachingAgentSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleImageCachingAgentSpec.groovy index 6024420975f..8edd1b6fcde 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleImageCachingAgentSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleImageCachingAgentSpec.groovy @@ -30,7 +30,7 @@ class GoogleImageCachingAgentSpec extends Specification { def imagesCallback1 = new GoogleImageCachingAgent.AllImagesCallback(new GoogleImageCachingAgent()) imagesCallback1.imageProjectToNextPageTokenMap = [:] imagesCallback1.imageList = imageList - def imageListResult1 = new ImageList(selfLink: "https://www.googleapis.com/compute/alpha/projects/ubuntu-os-cloud/global/images") + def imageListResult1 = new ImageList(selfLink: "https://compute.googleapis.com/compute/alpha/projects/ubuntu-os-cloud/global/images") imageListResult1.setItems([new Image(name: "backports-debian-7-wheezy-v20141108"), new Image(name: "debian-7-wheezy-v20141108"), new Image(name: "someos-8-something-v20141108"), @@ -39,7 +39,7 @@ class GoogleImageCachingAgentSpec extends Specification { def imagesCallback2 = new GoogleImageCachingAgent.AllImagesCallback(new GoogleImageCachingAgent()) imagesCallback2.imageProjectToNextPageTokenMap = [:] imagesCallback2.imageList = imageList - def imageListResult2 = new ImageList(selfLink: "https://www.googleapis.com/compute/alpha/projects/ubuntu-os-cloud/global/images") + def imageListResult2 = new ImageList(selfLink: "https://compute.googleapis.com/compute/alpha/projects/ubuntu-os-cloud/global/images") imageListResult2.setItems([new Image(name: "ubuntu-1404-trusty-v20141028"), buildImage("ubuntu-1404-trusty-v20141029", true), new Image(name: "ubuntu-1404-trusty-v20141031a")]) @@ -63,7 +63,7 @@ class GoogleImageCachingAgentSpec extends Specification { def imagesCallback = new GoogleImageCachingAgent.AllImagesCallback(new GoogleImageCachingAgent()) imagesCallback.imageProjectToNextPageTokenMap = [:] imagesCallback.imageList = imageList - def imageListResult = new ImageList(selfLink: "https://www.googleapis.com/compute/alpha/projects/ubuntu-os-cloud/global/images") + def imageListResult = new ImageList(selfLink: "https://compute.googleapis.com/compute/alpha/projects/ubuntu-os-cloud/global/images") imageListResult.setItems([buildImage("my-image-1", false), buildImage("my-image-2", true), buildImage("my-image-3", false), diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgentTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgentTest.java new file mode 100644 index 00000000000..51f9b944230 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleRegionalServerGroupCachingAgentTest.java @@ -0,0 +1,677 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.google.common.collect.Iterables.getOnlyElement; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.INSTANCES; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.ON_DEMAND; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.SERVER_GROUPS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.api.services.compute.model.InstanceProperties; +import com.google.api.services.compute.model.InstanceTemplate; +import com.google.api.services.compute.model.Metadata; +import com.google.api.services.compute.model.Metadata.Items; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.MoreExecutors; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.AgentDataType.Authority; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.DefaultJsonCacheData; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.cats.provider.DefaultProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandResult; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.moniker.Moniker; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.Executors; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +final class GoogleRegionalServerGroupCachingAgentTest { + + private static final NamingStrategy NAMER = + new GoogleLabeledResourceNamer(); + + private static final String ACCOUNT_NAME = "partypups"; + private static final String PROJECT = "myproject"; + private static final String REGION = "myregion"; + private static final String REGION_URL = "http://compute/regions/" + REGION; + private static final String ZONE = REGION + "-myzone"; + + private ObjectMapper objectMapper; + + @BeforeEach + public void createTestObjects() { + objectMapper = new ObjectMapper(); + } + + @Test + void loadData_attributesAndRelationships() { + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers( + instanceGroupManager("myServerGroup-prod-v001") + .setInstanceTemplate( + "http://compute/instanceTemplates/global/myInstanceTemplate"), + instanceGroupManager("myOtherServerGroup-v003")) + .setInstances( + instance("myServerGroup-prod-v001-1111"), + instance("myServerGroup-prod-v001-2222"), + instance("myOtherServerGroup-v003-3333"), + instance("myOtherServerGroup-v003-4444")) + .setInstanceTemplates( + new InstanceTemplate() + .setName("myInstanceTemplate") + .setProperties( + new InstanceProperties() + .setMetadata( + new Metadata() + .setItems( + ImmutableList.of( + new Items() + .setKey("load-balancer-names") + .setValue( + "regionalLoadBalancer1,regionalLoadBalancer2"), + new Items() + .setKey("global-load-balancer-names") + .setValue( + "globalLoadBalancer1,globalLoadBalancer2")))))) + .create(); + + Moniker moniker = moniker("myServerGroup-prod-v001"); + + GoogleRegionalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + CacheResult cacheResult = cachingAgent.loadData(inMemoryProviderCache()); + + Collection applications = cacheResult.getCacheResults().get(APPLICATIONS.getNs()); + assertThat(applications) + .extracting(app -> app.getAttributes().get("name")) + .containsExactlyInAnyOrder("myServerGroup", "myOtherServerGroup"); + + CacheData application = getNamedItem(applications, moniker.getApp()); + assertThat(application.getRelationships().get(CLUSTERS.getNs())) + .containsExactly(clusterKey(moniker)); + assertThat(application.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder( + instanceKey("myServerGroup-prod-v001-1111"), + instanceKey("myServerGroup-prod-v001-2222")); + + Collection clusters = cacheResult.getCacheResults().get(CLUSTERS.getNs()); + assertThat(clusters) + .extracting(cluster -> cluster.getAttributes().get("name")) + .containsExactlyInAnyOrder("myServerGroup-prod", "myOtherServerGroup"); + + CacheData cluster = getNamedItem(clusters, "myServerGroup-prod"); + assertThat(cluster.getAttributes()) + .containsOnly( + entry("name", "myServerGroup-prod"), + entry("accountName", ACCOUNT_NAME), + entry("moniker", moniker)); + assertThat(cluster.getRelationships().get(APPLICATIONS.getNs())) + .containsExactly(applicationKey(moniker)); + assertThat(cluster.getRelationships().get(SERVER_GROUPS.getNs())) + .containsExactly(serverGroupKey("myServerGroup-prod-v001")); + assertThat(application.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder( + instanceKey("myServerGroup-prod-v001-1111"), + instanceKey("myServerGroup-prod-v001-2222")); + + ImmutableList expectedLoadBalancerKeys = + ImmutableList.of( + Keys.getLoadBalancerKey(REGION, ACCOUNT_NAME, "regionalLoadBalancer1"), + Keys.getLoadBalancerKey(REGION, ACCOUNT_NAME, "regionalLoadBalancer2"), + Keys.getLoadBalancerKey("global", ACCOUNT_NAME, "globalLoadBalancer1"), + Keys.getLoadBalancerKey("global", ACCOUNT_NAME, "globalLoadBalancer2")); + + Collection loadBalancers = cacheResult.getCacheResults().get(LOAD_BALANCERS.getNs()); + assertThat(loadBalancers) + .extracting(CacheData::getId) + .containsExactlyInAnyOrderElementsOf(expectedLoadBalancerKeys); + for (CacheData loadBalancer : loadBalancers) { + assertThat(loadBalancer.getRelationships().get(SERVER_GROUPS.getNs())) + .containsExactly(serverGroupKey("myServerGroup-prod-v001")); + } + + Collection serverGroups = cacheResult.getCacheResults().get(SERVER_GROUPS.getNs()); + assertThat(serverGroups) + .extracting(serverGroup -> serverGroup.getAttributes().get("name")) + .containsExactlyInAnyOrder("myServerGroup-prod-v001", "myOtherServerGroup-v003"); + CacheData serverGroup = getNamedItem(serverGroups, "myServerGroup-prod-v001"); + // serverGroup's attributes are tested in the variety of methods above, so we'll only test the + // relationships + assertThat(serverGroup.getRelationships().get(APPLICATIONS.getNs())) + .containsExactly(applicationKey(moniker)); + assertThat(serverGroup.getRelationships().get(CLUSTERS.getNs())) + .containsExactly(clusterKey(moniker)); + assertThat(serverGroup.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder( + instanceKey("myServerGroup-prod-v001-1111"), + instanceKey("myServerGroup-prod-v001-2222")); + assertThat(serverGroup.getRelationships().get(LOAD_BALANCERS.getNs())) + .containsExactlyInAnyOrderElementsOf(expectedLoadBalancerKeys); + } + + @Test + void loadData_existingOnDemandData() throws JsonProcessingException { + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers( + instanceGroupManager("cachedInPastUnprocessed-v001"), + instanceGroupManager("cachedInPastProcessed-v002"), + instanceGroupManager("cachedInFutureUnprocessedNoData-v003"), + instanceGroupManager("cachedInFutureUnprocessedData-v004"), + instanceGroupManager("cachedInFutureProcessedNoData-v005"), + instanceGroupManager("cachedInFutureProcessedData-v006")) + .create(); + + long timeInPast = System.currentTimeMillis() - 100000; + long timeInFuture = System.currentTimeMillis() + 100000; + + ProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInPastUnprocessed-v001"), + ImmutableMap.of("cacheTime", timeInPast, "processedCount", 0), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInPastProcessed-v002"), + ImmutableMap.of("cacheTime", timeInPast, "processedCount", 1), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInFutureUnprocessedNoData-v003"), + ImmutableMap.of("cacheTime", timeInFuture, "processedCount", 0, "cacheResults", "{}"), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInFutureUnprocessedData-v004"), + ImmutableMap.of( + "cacheTime", + timeInFuture, + "processedCount", + 0, + "cacheResults", + serverGroupCacheData("cachedInFutureUnprocessedData-v004")), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInFutureProcessedNoData-v005"), + ImmutableMap.of("cacheTime", timeInFuture, "processedCount", 1, "cacheResults", "{}"), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInFutureProcessedData-v006"), + ImmutableMap.of( + "cacheTime", + timeInFuture, + "processedCount", + 1, + "cacheResults", + serverGroupCacheData("cachedInFutureProcessedData-v006")), + ImmutableMap.of())); + GoogleRegionalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + + CacheResult cacheResult = cachingAgent.loadData(providerCache); + + // The already-processed item that was still lying around from a previous run should get + // evicted. + assertThat(cacheResult.getEvictions().get(ON_DEMAND.getNs())) + .containsExactlyInAnyOrder(serverGroupKey("cachedInPastProcessed-v002")); + + // These things weren't handled. The first was ignored because it was created before our caching + // run started. The second and third were ignored because they didn't have any data attached. + // They'll both have their "processedCount" incremented and get cleared in the next caching run + // (since they will then already-processed items from the past, as above). + Collection onDemandData = cacheResult.getCacheResults().get(ON_DEMAND.getNs()); + assertThat(onDemandData) + .extracting(CacheData::getId) + .containsExactlyInAnyOrder( + serverGroupKey("cachedInPastUnprocessed-v001"), + serverGroupKey("cachedInFutureUnprocessedNoData-v003"), + serverGroupKey("cachedInFutureProcessedNoData-v005")); + CacheData cachedInPastUnprocessed = + getKeyedItem(onDemandData, serverGroupKey("cachedInPastUnprocessed-v001")); + assertThat(cachedInPastUnprocessed.getAttributes()).contains(entry("processedCount", 1)); + CacheData cachedInFutureUnprocessedNoData = + getKeyedItem(onDemandData, serverGroupKey("cachedInFutureUnprocessedNoData-v003")); + assertThat(cachedInFutureUnprocessedNoData.getAttributes()) + .contains(entry("processedCount", 1)); + CacheData cachedInFutureProcessed = + getKeyedItem(onDemandData, serverGroupKey("cachedInFutureProcessedNoData-v005")); + assertThat(cachedInFutureProcessed.getAttributes()).contains(entry("processedCount", 2)); + + // Finally, these items, which contain on-demand data that was inserted in the middle of our + // caching run, should have their cache results copied from the on-demand data. Further + // validation of how this works is in the test below. + CacheData cachedInFutureUnprocessedData = + getKeyedItem( + cacheResult.getCacheResults().get(SERVER_GROUPS.getNs()), + serverGroupKey("cachedInFutureUnprocessedData-v004")); + assertThat(cachedInFutureUnprocessedData.getAttributes()).containsKeys("copiedFromCacheData"); + CacheData cachedInFutureProcessedData = + getKeyedItem( + cacheResult.getCacheResults().get(SERVER_GROUPS.getNs()), + serverGroupKey("cachedInFutureProcessedData-v006")); + assertThat(cachedInFutureProcessedData.getAttributes()).containsKeys("copiedFromCacheData"); + } + + private String serverGroupCacheData(String serverGroupName) throws JsonProcessingException { + return objectMapper.writeValueAsString( + ImmutableMap.of( + SERVER_GROUPS.getNs(), + ImmutableList.of( + new DefaultCacheData( + serverGroupKey(serverGroupName), + ImmutableMap.of("copiedFromCacheData", true), + ImmutableMap.of())))); + } + + @Test + void loadData_copyFromOnDemandBehavior() throws Exception { + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager("overwritten-v001")) + .setInstances(instance("overwritten-v001-abcd")) + .create(); + + long timeInFuture = System.currentTimeMillis() + 100000; + + ImmutableMap> instanceRelationshipFromOnDemandCache = + ImmutableMap.of(INSTANCES.getNs(), ImmutableList.of(instanceKey("overwritten-v001-efgh"))); + + Moniker moniker = moniker("overwritten-v001"); + + ProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("overwritten-v001"), + ImmutableMap.of( + "cacheTime", + timeInFuture, + "processedCount", + 0, + "cacheResults", + objectMapper.writeValueAsString( + ImmutableMap.of( + APPLICATIONS.getNs(), + ImmutableList.of( + new DefaultCacheData( + applicationKey(moniker), + ImmutableMap.of("onDemandAttribute", "application"), + instanceRelationshipFromOnDemandCache)), + SERVER_GROUPS.getNs(), + ImmutableList.of( + new DefaultCacheData( + serverGroupKey("overwritten-v001"), + ImmutableMap.of("onDemandAttribute", "serverGroup"), + instanceRelationshipFromOnDemandCache))))), + ImmutableMap.of())); + GoogleRegionalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + + CacheResult cacheResult = cachingAgent.loadData(providerCache); + + // This item, which was put into on-demand during our caching run, and which contains some valid + // data, should get copied over to the main results. For application, cluster, and load balancer + // data, the previous attributes get replaced, but relationships get merged. + CacheData application = + getKeyedItem( + cacheResult.getCacheResults().get(APPLICATIONS.getNs()), applicationKey(moniker)); + // Verify that keys from the loaded application are wiped out in favor of the keys from the + // on-demand cache + assertThat(application.getAttributes()).doesNotContainKey("name"); + assertThat(application.getAttributes().get("onDemandAttribute")).isEqualTo("application"); + // -abcd comes from the original cache, -efgh comes from the on-demand cache + assertThat(application.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder( + instanceKey("overwritten-v001-abcd"), instanceKey("overwritten-v001-efgh")); + + // The cluster didn't have an entry in the on-demand cache, so it should just have the data we + // loaded from GCE. + CacheData cluster = + getKeyedItem(cacheResult.getCacheResults().get(CLUSTERS.getNs()), clusterKey(moniker)); + assertThat(cluster.getAttributes().get("name")).isEqualTo("overwritten"); + assertThat(cluster.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder(instanceKey("overwritten-v001-abcd")); + + // Unlike the application, cluster, and load balancers, the server group does NOT get its + // relationships merged. It just uses the relationships from the on-demand server group. + // But why, you ask? ¯\_(ツ)_/¯ + CacheData serverGroup = + getKeyedItem( + cacheResult.getCacheResults().get(SERVER_GROUPS.getNs()), + serverGroupKey("overwritten-v001")); + // Verify that keys from the loaded server group are wiped out in favor of the keys from the + // on-demand cache + assertThat(serverGroup.getAttributes()).doesNotContainKey("name"); + assertThat(serverGroup.getAttributes().get("onDemandAttribute")).isEqualTo("serverGroup"); + assertThat(serverGroup.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder(instanceKey("overwritten-v001-efgh")); + } + + @Test + void pendingOnDemandRequests() { + ProviderCache providerCache = inMemoryProviderCache(); + String applicationKey = Keys.getApplicationKey("application"); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(applicationKey)); + String clusterKey = Keys.getClusterKey(ACCOUNT_NAME, "cluster", "cluster"); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(clusterKey)); + String loadBalancerKey = Keys.getLoadBalancerKey(REGION, ACCOUNT_NAME, "loadBalancer"); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(loadBalancerKey)); + String zonalServerGroupKey = + Keys.getServerGroupKey("mig1-v001", "mig1", ACCOUNT_NAME, REGION, ZONE); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(zonalServerGroupKey)); + String ownedServerGroupKey = Keys.getServerGroupKey("mig2-v002", "mig2", ACCOUNT_NAME, REGION); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(ownedServerGroupKey)); + String differentAccountServerGroupKey = + Keys.getServerGroupKey("mig1-v001", "mig1", "someOtherAccount", REGION, ZONE); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(differentAccountServerGroupKey)); + String differentRegionServerGroupKey = + Keys.getServerGroupKey("mig1-v001", "mig1", ACCOUNT_NAME, "someOtherRegion", ZONE); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(differentRegionServerGroupKey)); + + GoogleRegionalServerGroupCachingAgent cachingAgent = + createCachingAgent(new StubComputeFactory().create()); + Collection> pendingRequests = + cachingAgent.pendingOnDemandRequests(providerCache); + + assertThat(pendingRequests).hasSize(1); + assertThat(getOnlyElement(pendingRequests)) + .contains(entry("details", Keys.parse(ownedServerGroupKey))); + } + + @Test + void pendingOnDemandRequests_attributes() { + ProviderCache providerCache = inMemoryProviderCache(); + String key = Keys.getServerGroupKey("mig1-v001", "mig1", ACCOUNT_NAME, REGION); + providerCache.putCacheData( + ON_DEMAND.getNs(), + cacheData( + key, + ImmutableMap.of( + "moniker", moniker("mig1-v001"), + "cacheTime", 12345, + "processedCount", 3, + "processedTime", 67890))); + + GoogleRegionalServerGroupCachingAgent cachingAgent = + createCachingAgent(new StubComputeFactory().create()); + Collection> pendingRequests = + cachingAgent.pendingOnDemandRequests(providerCache); + + assertThat(pendingRequests).hasSize(1); + assertThat(getOnlyElement(pendingRequests)) + .containsOnly( + entry("details", Keys.parse(key)), + entry("moniker", moniker("mig1-v001")), + entry("cacheTime", 12345), + entry("processedCount", 3), + entry("processedTime", 67890)); + } + + @Test + void handle_serverGroupDoesNotExistAndIsNotInCache() { + ProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(serverGroupKey("myServerGroup"))); + + Compute compute = new StubComputeFactory().create(); + GoogleRegionalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + OnDemandResult result = + cachingAgent.handle( + providerCache, + ImmutableMap.of( + "serverGroupName", "myServerGroup", "account", ACCOUNT_NAME, "region", REGION)); + + assertThat(providerCache.get(ON_DEMAND.getNs(), serverGroupKey("myServerGroup"))).isNull(); + + assertThat(result.getSourceAgentType()).isEqualTo(cachingAgent.getOnDemandAgentType()); + assertThat(result.getEvictions()) + .containsExactly( + entry(SERVER_GROUPS.getNs(), ImmutableList.of(serverGroupKey("myServerGroup")))); + assertThat(result.getAuthoritativeTypes()).isEmpty(); + assertThat(result.getCacheResult().getCacheResults().values()).allMatch(Collection::isEmpty); + assertThat(result.getCacheResult().getEvictions()).isEmpty(); + } + + @Test + void handle_serverGroupDoesNotExistButIsInCache() { + ProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData(SERVER_GROUPS.getNs(), cacheData(serverGroupKey("myServerGroup"))); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(serverGroupKey("myServerGroup"))); + + Compute compute = new StubComputeFactory().create(); + GoogleRegionalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + OnDemandResult result = + cachingAgent.handle( + providerCache, + ImmutableMap.of( + "serverGroupName", "myServerGroup", "account", ACCOUNT_NAME, "region", REGION)); + + // It evicts the server group from ON_DEMAND, but not from SERVER_GROUPS + assertThat(providerCache.get(ON_DEMAND.getNs(), serverGroupKey("myServerGroup"))).isNull(); + assertThat(providerCache.get(SERVER_GROUPS.getNs(), serverGroupKey("myServerGroup"))) + .isNotNull(); + + assertThat(result.getSourceAgentType()).isEqualTo(cachingAgent.getOnDemandAgentType()); + assertThat(result.getEvictions()) + .containsExactly( + entry(SERVER_GROUPS.getNs(), ImmutableList.of(serverGroupKey("myServerGroup")))); + assertThat(result.getAuthoritativeTypes()).isEmpty(); + assertThat(result.getCacheResult().getCacheResults().values()).allMatch(Collection::isEmpty); + assertThat(result.getCacheResult().getEvictions()).isEmpty(); + } + + @Test + void handle_serverGroupExists() throws IOException { + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers( + instanceGroupManager("myservergroup-v001") + .setInstanceTemplate( + "http://compute/global/instanceTemplates/my-instance-template")) + .create(); + GoogleRegionalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + ProviderCache providerCache = inMemoryProviderCache(); + OnDemandResult result = + cachingAgent.handle( + providerCache, + ImmutableMap.of( + "serverGroupName", "myservergroup-v001", + "account", ACCOUNT_NAME, + "region", REGION)); + + CacheData cacheData = + providerCache.get(ON_DEMAND.getNs(), serverGroupKey("myservergroup-v001")); + Map> cacheResults = + objectMapper.readValue( + (String) cacheData.getAttributes().get("cacheResults"), + new TypeReference>>() {}); + assertThat(cacheResults.get(SERVER_GROUPS.getNs())) + .extracting(data -> data.getAttributes().get("name")) + .containsExactly("myservergroup-v001"); + + assertThat(result.getSourceAgentType()).isEqualTo(cachingAgent.getOnDemandAgentType()); + assertThat(result.getEvictions().values()).allMatch(Collection::isEmpty); + assertThat(result.getAuthoritativeTypes()).isEmpty(); + assertThat(result.getCacheResult().getCacheResults().get(SERVER_GROUPS.getNs())) + .extracting(data -> data.getAttributes().get("name")) + .containsExactly("myservergroup-v001"); + assertThat(result.getCacheResult().getEvictions()).isEmpty(); + } + + @Test + void pendingOnDemandRequestsPersistAcrossOneCachingCycle() { + + GoogleRegionalServerGroupCachingAgent cachingAgent = + createCachingAgent( + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager("myservergroup-v001")) + .create()); + + ProviderCache providerCache = inMemoryProviderCache(); + cachingAgent.handle( + providerCache, + ImmutableMap.of( + "serverGroupName", "myservergroup-v001", "account", ACCOUNT_NAME, "region", REGION)); + + Collection> pendingRequests = + cachingAgent.pendingOnDemandRequests(providerCache); + assertThat(pendingRequests).hasSize(1); + assertThat(pendingRequests).extracting(map -> map.get("processedCount")).containsExactly(0); + + CacheResult cacheResult = cachingAgent.loadData(providerCache); + storeResultInProviderCache(cacheResult, providerCache, cachingAgent); + + pendingRequests = cachingAgent.pendingOnDemandRequests(providerCache); + assertThat(pendingRequests).hasSize(1); + assertThat(pendingRequests).extracting(map -> map.get("processedCount")).containsExactly(1); + + cacheResult = cachingAgent.loadData(providerCache); + storeResultInProviderCache(cacheResult, providerCache, cachingAgent); + + pendingRequests = cachingAgent.pendingOnDemandRequests(providerCache); + assertThat(pendingRequests).isEmpty(); + } + + private static void storeResultInProviderCache( + CacheResult cacheResult, + ProviderCache providerCache, + GoogleRegionalServerGroupCachingAgent cachingAgent) { + + ImmutableSet authoritativeTypes = + cachingAgent.getProvidedDataTypes().stream() + .filter(type -> type.getAuthority().equals(Authority.AUTHORITATIVE)) + .map(AgentDataType::getTypeName) + .collect(toImmutableSet()); + providerCache.putCacheResult(cachingAgent.getAgentType(), authoritativeTypes, cacheResult); + } + + private static CacheData cacheData(String key) { + // InMemoryCache will ignore this if it doesn't have at least one attribute + return new DefaultCacheData(key, ImmutableMap.of("attribute", "value"), ImmutableMap.of()); + } + + private static CacheData cacheData(String key, Map attributes) { + return new DefaultCacheData(key, attributes, ImmutableMap.of()); + } + + private static CacheData getNamedItem(Collection items, String name) { + return items.stream() + .filter(item -> item.getAttributes().get("name").equals(name)) + .findAny() + .orElseThrow( + () -> new AssertionError(String.format("Couldn't find item named '%s'", name))); + } + + private static CacheData getKeyedItem(Collection items, String key) { + return items.stream() + .filter(item -> item.getId().equals(key)) + .findAny() + .orElseThrow( + () -> new AssertionError(String.format("Couldn't find item with key '%s'", key))); + } + + public static GoogleRegionalServerGroupCachingAgent createCachingAgent(Compute compute) { + return new GoogleRegionalServerGroupCachingAgent( + new GoogleNamedAccountCredentials.Builder() + .project(PROJECT) + .name(ACCOUNT_NAME) + .compute(compute) + .regionToZonesMap(ImmutableMap.of(REGION, ImmutableList.of(ZONE))) + .build(), + new GoogleComputeApiFactory( + new GoogleOperationPoller(), + new DefaultRegistry(), + "user-agent", + MoreExecutors.listeningDecorator(Executors.newCachedThreadPool())), + new DefaultRegistry(), + REGION, + new ObjectMapper()); + } + + private static InstanceGroupManager instanceGroupManager(String name) { + return new InstanceGroupManager() + .setName(name) + .setBaseInstanceName(name + "-") + .setRegion(REGION_URL); + } + + private static Instance instance(String name) { + return new Instance().setName(name).setZone(ZONE); + } + + private static Moniker moniker(String serverGroupName) { + return NAMER.deriveMoniker(new GoogleServerGroup(serverGroupName)); + } + + private static String applicationKey(Moniker moniker) { + return Keys.getApplicationKey(moniker.getApp()); + } + + private static String clusterKey(Moniker moniker) { + return Keys.getClusterKey(ACCOUNT_NAME, moniker.getApp(), moniker.getCluster()); + } + + private static String serverGroupKey(String serverGroupName) { + Moniker moniker = moniker(serverGroupName); + return Keys.getServerGroupKey(serverGroupName, moniker.getCluster(), ACCOUNT_NAME, REGION); + } + + private static String instanceKey(String instanceName) { + return Keys.getInstanceKey(ACCOUNT_NAME, REGION, instanceName); + } + + private static DefaultProviderCache inMemoryProviderCache() { + return new DefaultProviderCache(new InMemoryCache()); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSecurityGroupCachingAgentSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSecurityGroupCachingAgentSpec.groovy index 2b9fa1ac4b8..b1059464531 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSecurityGroupCachingAgentSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSecurityGroupCachingAgentSpec.groovy @@ -42,9 +42,9 @@ class GoogleSecurityGroupCachingAgentSpec extends Specification { def firewallsMock = Mock(Compute.Firewalls) def firewallsListMock = Mock(Compute.Firewalls.List) def securityGroupA = new Firewall(name: 'name-a', - selfLink: 'https://www.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-a') + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-a') def securityGroupB = new Firewall(name: 'name-b', - selfLink: 'https://www.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-b') + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-b') def keyGroupA = Keys.getSecurityGroupKey(securityGroupA.name as String, securityGroupA.name as String, REGION, @@ -85,4 +85,44 @@ class GoogleSecurityGroupCachingAgentSpec extends Specification { 1 * providerCache.getAll("onDemand", [keyGroupA, keyGroupB]) 0 * _ } + + void "should cache project name as an attribute along with firewalls"(){ + setup: + def registry = new DefaultRegistry() + def computeMock = Mock(Compute) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).name(ACCOUNT_NAME).compute(computeMock).build() + def firewallsMock = Mock(Compute.Firewalls) + def firewallsListMock = Mock(Compute.Firewalls.List) + def securityGroupA = new Firewall(name: 'name-a', + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-a') + def keyGroupA = Keys.getSecurityGroupKey(securityGroupA.name as String, + securityGroupA.name as String, + REGION, + ACCOUNT_NAME) + + def ProviderCache providerCache = Mock(ProviderCache) + @Subject GoogleSecurityGroupCachingAgent agent = new GoogleSecurityGroupCachingAgent("testApplicationName", + credentials, + new ObjectMapper(), + registry) + agent.registry = registry + + when: + def cache = agent.loadData(providerCache) + + then: + 1 * computeMock.firewalls() >> firewallsMock + 1 * firewallsMock.list(PROJECT_NAME) >> firewallsListMock + 1 * firewallsListMock.execute() >> new FirewallList(items: [securityGroupA]) + with(cache.cacheResults.get(Keys.Namespace.SECURITY_GROUPS.ns)) { Collection cd -> + cd.stream().forEach( { + Map attributes= it.getAttributes() + attributes.get(0) == "my-project" + } + ) + } + 1 * providerCache.getAll("onDemand", [keyGroupA]) + 0 * _ + } + } diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleServerGroupCachingAgentSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleServerGroupCachingAgentSpec.groovy deleted file mode 100644 index 4cb70cf86be..00000000000 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleServerGroupCachingAgentSpec.groovy +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.provider.agent - -import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup -import spock.lang.Specification -import spock.lang.Unroll - -class GoogleServerGroupCachingAgentSpec extends Specification { - private static final String BUILD_HOST = "http://some-jenkins-host:8080/" - - def "should not set build info if no image description is found"() { - setup: - GoogleServerGroup googleServerGroup = new GoogleServerGroup() - - when: - GoogleZonalServerGroupCachingAgent.extractBuildInfo(null, googleServerGroup) - - then: - !googleServerGroup.buildInfo - - when: - GoogleZonalServerGroupCachingAgent.extractBuildInfo("", googleServerGroup) - - then: - !googleServerGroup.buildInfo - } - - def "should not set build info if no relevant image description is found"() { - setup: - GoogleServerGroup googleServerGroup = new GoogleServerGroup() - - when: - GoogleZonalServerGroupCachingAgent.extractBuildInfo("Some non-appversion image description...", googleServerGroup) - - then: - !googleServerGroup.buildInfo - - when: - GoogleZonalServerGroupCachingAgent.extractBuildInfo("SomeKey1: SomeValue1, SomeKey2: SomeValue2", googleServerGroup) - - then: - !googleServerGroup.buildInfo - } - - def "should set build host if image description contains appversion and build_host"() { - setup: - GoogleServerGroup googleServerGroup = new GoogleServerGroup() - - when: - GoogleZonalServerGroupCachingAgent.extractBuildInfo( - "appversion: somepackage-1.0.0-586499.h150/WE-WAPP-somepackage/150, build_host: $BUILD_HOST", - googleServerGroup) - - then: - with(googleServerGroup.buildInfo) { - package_name == "somepackage" - version == "1.0.0" - commit == "586499" - jenkins == [ - name: "WE-WAPP-somepackage", - number: "150", - host: BUILD_HOST - ] - } - } - - @Unroll - def "should sort disks so boot disk is first persistent disk"() { - setup: - def launchConfig = [instanceTemplate: [properties: [disks: disks]]] - GoogleServerGroup googleServerGroup = new GoogleServerGroup(launchConfig: launchConfig) - - when: - GoogleZonalServerGroupCachingAgent.sortWithBootDiskFirst(googleServerGroup) - - then: - googleServerGroup.launchConfig.instanceTemplate.properties.disks == sortedWithBootFirst - - where: - disks || sortedWithBootFirst - [[boot: true, type: 'PERSISTENT']] || [[boot: true, type: 'PERSISTENT']] - [[boot: true, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2']] || [[boot: true, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2']] - [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: true, type: 'PERSISTENT', source: 'disk-url-2']] || [[boot: true, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-1']] - [[boot: true, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] || [[boot: true, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] - [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: true, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] || [[boot: true, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] - - // Mix in a SCRATCH disk. - [[boot: true, type: 'PERSISTENT'], [boot: false, type: 'SCRATCH']] || [[boot: true, type: 'PERSISTENT'], [boot: false, type: 'SCRATCH']] - [[boot: true, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'SCRATCH']] || [[boot: true, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'SCRATCH']] - [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: true, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'SCRATCH']] || [[boot: true, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'SCRATCH']] - [[boot: false, type: 'SCRATCH'], [boot: true, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] || [[boot: false, type: 'SCRATCH'], [boot: true, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] - [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: true, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'SCRATCH'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] || [[boot: true, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'SCRATCH'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] - - // Boot disk missing (really shouldn't happen, but want to ensure we don't disturb the results). - [[boot: false, type: 'PERSISTENT']] || [[boot: false, type: 'PERSISTENT']] - [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2']] || [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2']] - [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] || [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] - - // Mix in a SCRATCH disk and Boot disk missing. - [[boot: false, type: 'PERSISTENT'], [boot: false, type: 'SCRATCH']] || [[boot: false, type: 'PERSISTENT'], [boot: false, type: 'SCRATCH']] - [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'SCRATCH']] || [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'SCRATCH']] - [[boot: false, type: 'SCRATCH'], [boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] || [[boot: false, type: 'SCRATCH'], [boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] - [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'SCRATCH'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] || [[boot: false, type: 'PERSISTENT', source: 'disk-url-1'], [boot: false, type: 'PERSISTENT', source: 'disk-url-2'], [boot: false, type: 'SCRATCH'], [boot: false, type: 'PERSISTENT', source: 'disk-url-3']] - } - - @Unroll - def "malformed instance properties shouldn't break disk sorting logic"() { - setup: - def launchConfig = [instanceTemplate: instanceTemplate] - GoogleServerGroup googleServerGroup = new GoogleServerGroup(launchConfig: launchConfig) - - when: - GoogleZonalServerGroupCachingAgent.sortWithBootDiskFirst(googleServerGroup) - - then: - googleServerGroup.launchConfig.instanceTemplate == instanceTemplate - - where: - instanceTemplate << [ - null, - [properties: null], - [properties: [:]], - [properties: [disks: null]], - [properties: [disks: []]] - ] - } -} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSubnetCachingAgentSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSubnetCachingAgentSpec.groovy new file mode 100644 index 00000000000..cf33493aff0 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleSubnetCachingAgentSpec.groovy @@ -0,0 +1,56 @@ +package com.netflix.spinnaker.clouddriver.google.provider.agent + +import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.compute.Compute +import com.google.api.services.compute.model.Subnetwork +import com.google.api.services.compute.model.SubnetworkList +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.google.cache.Keys +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import spock.lang.Specification +import spock.lang.Subject + +class GoogleSubnetCachingAgentSpec extends Specification { + static final String PROJECT_NAME = "my-project" + static final String REGION = 'us-east1' + static final String ACCOUNT_NAME = 'some-account-name' + + void "should add subnets and cache project name as an attribute to cacheData"() { + setup: + def registry = new DefaultRegistry() + def computeMock = Mock(Compute) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).name(ACCOUNT_NAME).compute(computeMock).build() + def subnetsMock = Mock(Compute.Subnetworks) + def subnetworksListMock = Mock(Compute.Subnetworks.List) + def subnetA = new Subnetwork(name: 'name-a', + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/us-east1/subnetworks/name-a') + def keyGroupA = Keys.getSubnetKey(subnetA.name as String, + REGION, + ACCOUNT_NAME) + def SubnetsListReal = new SubnetworkList(items: [subnetA]) + def ProviderCache providerCache = Mock(ProviderCache) + @Subject GoogleSubnetCachingAgent agent = new GoogleSubnetCachingAgent("testApplicationName", + credentials, + new ObjectMapper(), + registry,REGION) + + when: + def cache = agent.loadData(providerCache) + + then: + 1 * computeMock.subnetworks() >> subnetsMock + 1 * subnetsMock.list(PROJECT_NAME,REGION) >> subnetworksListMock + 1 * subnetworksListMock.execute() >> SubnetsListReal + def cd = cache.cacheResults.get(Keys.Namespace.SUBNETS.ns) + cd.id.containsAll([keyGroupA]) + with(cd.asList().get(0)){ + def attributes = it.attributes + attributes.project == "my-project" + attributes.subnet.name == "name-a" + attributes.subnet.selfLink == "https://compute.googleapis.com/compute/v1/projects/my-project/us-east1/subnetworks/name-a" + } + } + +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgentTest.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgentTest.java new file mode 100644 index 00000000000..f9292340c24 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleZonalServerGroupCachingAgentTest.java @@ -0,0 +1,559 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.google.common.collect.Iterables.getOnlyElement; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.INSTANCES; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.ON_DEMAND; +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.SERVER_GROUPS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceGroupManager; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.MoreExecutors; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.AgentDataType.Authority; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.DefaultJsonCacheData; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.cats.provider.DefaultProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandResult; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.compute.GoogleComputeApiFactory; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.model.GoogleLabeledResource; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.names.GoogleLabeledResourceNamer; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.moniker.Moniker; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.Executors; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +final class GoogleZonalServerGroupCachingAgentTest { + + private static final NamingStrategy NAMER = + new GoogleLabeledResourceNamer(); + + private static final String ACCOUNT_NAME = "partypups"; + private static final String PROJECT = "myproject"; + private static final String REGION = "myregion"; + private static final String ZONE = REGION + "-myzone"; + private static final String ZONE_URL = "http://compute/zones/" + ZONE; + + private ObjectMapper objectMapper; + + @BeforeEach + public void createTestObjects() { + objectMapper = new ObjectMapper(); + } + + @Test + void loadData_existingOnDemandData() throws JsonProcessingException { + + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers( + instanceGroupManager("cachedInPastUnprocessed-v001"), + instanceGroupManager("cachedInPastProcessed-v002"), + instanceGroupManager("cachedInFutureUnprocessedNoData-v003"), + instanceGroupManager("cachedInFutureUnprocessedData-v004"), + instanceGroupManager("cachedInFutureProcessedNoData-v005"), + instanceGroupManager("cachedInFutureProcessedData-v006")) + .create(); + + long timeInPast = System.currentTimeMillis() - 100000; + long timeInFuture = System.currentTimeMillis() + 100000; + + ProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInPastUnprocessed-v001"), + ImmutableMap.of("cacheTime", timeInPast, "processedCount", 0), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInPastProcessed-v002"), + ImmutableMap.of("cacheTime", timeInPast, "processedCount", 1), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInFutureUnprocessedNoData-v003"), + ImmutableMap.of("cacheTime", timeInFuture, "processedCount", 0, "cacheResults", "{}"), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInFutureUnprocessedData-v004"), + ImmutableMap.of( + "cacheTime", + timeInFuture, + "processedCount", + 0, + "cacheResults", + serverGroupCacheData("cachedInFutureUnprocessedData-v004")), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInFutureProcessedNoData-v005"), + ImmutableMap.of("cacheTime", timeInFuture, "processedCount", 1, "cacheResults", "{}"), + ImmutableMap.of())); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("cachedInFutureProcessedData-v006"), + ImmutableMap.of( + "cacheTime", + timeInFuture, + "processedCount", + 1, + "cacheResults", + serverGroupCacheData("cachedInFutureProcessedData-v006")), + ImmutableMap.of())); + GoogleZonalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + + CacheResult cacheResult = cachingAgent.loadData(providerCache); + + // The already-processed item that was still lying around from a previous run should get + // evicted. + assertThat(cacheResult.getEvictions().get(ON_DEMAND.getNs())) + .containsExactlyInAnyOrder(serverGroupKey("cachedInPastProcessed-v002")); + + // These things weren't handled. The first was ignored because it was created before our caching + // run started. The second and third were ignored because they didn't have any data attached. + // They'll both have their "processedCount" incremented and get cleared in the next caching run + // (since they will then already-processed items from the past, as above). + Collection onDemandData = cacheResult.getCacheResults().get(ON_DEMAND.getNs()); + assertThat(onDemandData) + .extracting(CacheData::getId) + .containsExactlyInAnyOrder( + serverGroupKey("cachedInPastUnprocessed-v001"), + serverGroupKey("cachedInFutureUnprocessedNoData-v003"), + serverGroupKey("cachedInFutureProcessedNoData-v005")); + CacheData cachedInPastUnprocessed = + getKeyedItem(onDemandData, serverGroupKey("cachedInPastUnprocessed-v001")); + assertThat(cachedInPastUnprocessed.getAttributes()).contains(entry("processedCount", 1)); + CacheData cachedInFutureUnprocessedNoData = + getKeyedItem(onDemandData, serverGroupKey("cachedInFutureUnprocessedNoData-v003")); + assertThat(cachedInFutureUnprocessedNoData.getAttributes()) + .contains(entry("processedCount", 1)); + CacheData cachedInFutureProcessed = + getKeyedItem(onDemandData, serverGroupKey("cachedInFutureProcessedNoData-v005")); + assertThat(cachedInFutureProcessed.getAttributes()).contains(entry("processedCount", 2)); + + // Finally, these items, which contain on-demand data that was inserted in the middle of our + // caching run, should have their cache results copied from the on-demand data. Further + // validation of how this works is in the test below. + CacheData cachedInFutureUnprocessedData = + getKeyedItem( + cacheResult.getCacheResults().get(SERVER_GROUPS.getNs()), + serverGroupKey("cachedInFutureUnprocessedData-v004")); + assertThat(cachedInFutureUnprocessedData.getAttributes()).containsKeys("copiedFromCacheData"); + CacheData cachedInFutureProcessedData = + getKeyedItem( + cacheResult.getCacheResults().get(SERVER_GROUPS.getNs()), + serverGroupKey("cachedInFutureProcessedData-v006")); + assertThat(cachedInFutureProcessedData.getAttributes()).containsKeys("copiedFromCacheData"); + } + + private String serverGroupCacheData(String serverGroupName) throws JsonProcessingException { + return objectMapper.writeValueAsString( + ImmutableMap.of( + SERVER_GROUPS.getNs(), + ImmutableList.of( + new DefaultCacheData( + serverGroupKey(serverGroupName), + ImmutableMap.of("copiedFromCacheData", true), + ImmutableMap.of())))); + } + + @Test + void loadData_copyFromOnDemandBehavior() throws Exception { + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager("overwritten-v001")) + .setInstances(instance("overwritten-v001-abcd")) + .create(); + + long timeInFuture = System.currentTimeMillis() + 100000; + + ImmutableMap> instanceRelationshipFromOnDemandCache = + ImmutableMap.of(INSTANCES.getNs(), ImmutableList.of(instanceKey("overwritten-v001-efgh"))); + + Moniker moniker = moniker("overwritten-v001"); + + ProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData( + ON_DEMAND.getNs(), + new DefaultCacheData( + serverGroupKey("overwritten-v001"), + ImmutableMap.of( + "cacheTime", + timeInFuture, + "processedCount", + 0, + "cacheResults", + objectMapper.writeValueAsString( + ImmutableMap.of( + APPLICATIONS.getNs(), + ImmutableList.of( + new DefaultCacheData( + applicationKey(moniker), + ImmutableMap.of("onDemandAttribute", "application"), + instanceRelationshipFromOnDemandCache)), + SERVER_GROUPS.getNs(), + ImmutableList.of( + new DefaultCacheData( + serverGroupKey("overwritten-v001"), + ImmutableMap.of("onDemandAttribute", "serverGroup"), + instanceRelationshipFromOnDemandCache))))), + ImmutableMap.of())); + GoogleZonalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + + CacheResult cacheResult = cachingAgent.loadData(providerCache); + + // This item, which was put into on-demand during our caching run, and which contains some valid + // data, should get copied over to the main results. For application, cluster, and load balancer + // data, the previous attributes get replaced, but relationships get merged. + CacheData application = + getKeyedItem( + cacheResult.getCacheResults().get(APPLICATIONS.getNs()), applicationKey(moniker)); + // Verify that keys from the loaded application are wiped out in favor of the keys from the + // on-demand cache + assertThat(application.getAttributes()).doesNotContainKey("name"); + assertThat(application.getAttributes().get("onDemandAttribute")).isEqualTo("application"); + // -abcd comes from the original cache, -efgh comes from the on-demand cache + assertThat(application.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder( + instanceKey("overwritten-v001-abcd"), instanceKey("overwritten-v001-efgh")); + + // The cluster didn't have an entry in the on-demand cache, so it should just have the data we + // loaded from GCE. + CacheData cluster = + getKeyedItem(cacheResult.getCacheResults().get(CLUSTERS.getNs()), clusterKey(moniker)); + assertThat(cluster.getAttributes().get("name")).isEqualTo("overwritten"); + assertThat(cluster.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder(instanceKey("overwritten-v001-abcd")); + + // Unlike the application, cluster, and load balancers, the server group does NOT get its + // relationships merged. It just uses the relationships from the on-demand server group. + // But why, you ask? ¯\_(ツ)_/¯ + CacheData serverGroup = + getKeyedItem( + cacheResult.getCacheResults().get(SERVER_GROUPS.getNs()), + serverGroupKey("overwritten-v001")); + // Verify that keys from the loaded server group are wiped out in favor of the keys from the + // on-demand cache + assertThat(serverGroup.getAttributes()).doesNotContainKey("name"); + assertThat(serverGroup.getAttributes().get("onDemandAttribute")).isEqualTo("serverGroup"); + assertThat(serverGroup.getRelationships().get(INSTANCES.getNs())) + .containsExactlyInAnyOrder(instanceKey("overwritten-v001-efgh")); + } + + @Test + void pendingOnDemandRequests() { + ProviderCache providerCache = inMemoryProviderCache(); + String applicationKey = Keys.getApplicationKey("application"); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(applicationKey)); + String clusterKey = Keys.getClusterKey(ACCOUNT_NAME, "cluster", "cluster"); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(clusterKey)); + String loadBalancerKey = Keys.getLoadBalancerKey(REGION, ACCOUNT_NAME, "loadBalancer"); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(loadBalancerKey)); + String ownedServerGroupKey = + Keys.getServerGroupKey("mig1-v001", "mig1", ACCOUNT_NAME, REGION, ZONE); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(ownedServerGroupKey)); + String regionalServerGroupKey = + Keys.getServerGroupKey("mig2-v002", "mig2", ACCOUNT_NAME, REGION); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(regionalServerGroupKey)); + String differentAccountServerGroupKey = + Keys.getServerGroupKey("mig1-v001", "mig1", "someOtherAccount", REGION, ZONE); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(differentAccountServerGroupKey)); + String differentRegionServerGroupKey = + Keys.getServerGroupKey("mig1-v001", "mig1", ACCOUNT_NAME, "someOtherRegion", ZONE); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(differentRegionServerGroupKey)); + + GoogleZonalServerGroupCachingAgent cachingAgent = + createCachingAgent(new StubComputeFactory().create()); + Collection> pendingRequests = + cachingAgent.pendingOnDemandRequests(providerCache); + + assertThat(pendingRequests).hasSize(1); + assertThat(getOnlyElement(pendingRequests)) + .contains(entry("details", Keys.parse(ownedServerGroupKey))); + } + + @Test + void pendingOnDemandRequests_attributes() { + ProviderCache providerCache = inMemoryProviderCache(); + String key = Keys.getServerGroupKey("mig1-v001", "mig1", ACCOUNT_NAME, REGION, ZONE); + providerCache.putCacheData( + ON_DEMAND.getNs(), + cacheData( + key, + ImmutableMap.of( + "moniker", moniker("mig1-v001"), + "cacheTime", 12345, + "processedCount", 3, + "processedTime", 67890))); + + GoogleZonalServerGroupCachingAgent cachingAgent = + createCachingAgent(new StubComputeFactory().create()); + Collection> pendingRequests = + cachingAgent.pendingOnDemandRequests(providerCache); + + assertThat(pendingRequests).hasSize(1); + assertThat(getOnlyElement(pendingRequests)) + .containsOnly( + entry("details", Keys.parse(key)), + entry("moniker", moniker("mig1-v001")), + entry("cacheTime", 12345), + entry("processedCount", 3), + entry("processedTime", 67890)); + } + + @Test + void handle_serverGroupDoesNotExistAndIsNotInCache() { + ProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(serverGroupKey("myServerGroup"))); + + Compute compute = new StubComputeFactory().create(); + GoogleZonalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + OnDemandResult result = + cachingAgent.handle( + providerCache, + ImmutableMap.of( + "serverGroupName", "myServerGroup", "account", ACCOUNT_NAME, "region", REGION)); + + // Since there wasn't a matching server group under the provider cache's SERVER_GROUPS key, we + // leave the ON_DEMAND server group here. + assertThat(providerCache.get(ON_DEMAND.getNs(), serverGroupKey("myServerGroup"))).isNotNull(); + + assertThat(result.getSourceAgentType()).isEqualTo(cachingAgent.getOnDemandAgentType()); + assertThat(result.getEvictions().values()).allMatch(Collection::isEmpty); + assertThat(result.getAuthoritativeTypes()).isEmpty(); + assertThat(result.getCacheResult().getCacheResults().values()).allMatch(Collection::isEmpty); + assertThat(result.getCacheResult().getEvictions()).isEmpty(); + } + + @Test + void handle_serverGroupDoesNotExistButIsInCache() { + ProviderCache providerCache = inMemoryProviderCache(); + providerCache.putCacheData(SERVER_GROUPS.getNs(), cacheData(serverGroupKey("myServerGroup"))); + providerCache.putCacheData(ON_DEMAND.getNs(), cacheData(serverGroupKey("myServerGroup"))); + + Compute compute = new StubComputeFactory().create(); + GoogleZonalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + OnDemandResult result = + cachingAgent.handle( + providerCache, + ImmutableMap.of( + "serverGroupName", "myServerGroup", "account", ACCOUNT_NAME, "region", REGION)); + + // It evicts the server group from ON_DEMAND, but not from SERVER_GROUPS + assertThat(providerCache.get(ON_DEMAND.getNs(), serverGroupKey("myServerGroup"))).isNull(); + assertThat(providerCache.get(SERVER_GROUPS.getNs(), serverGroupKey("myServerGroup"))) + .isNotNull(); + + assertThat(result.getSourceAgentType()).isEqualTo(cachingAgent.getOnDemandAgentType()); + assertThat(result.getEvictions()) + .containsExactly( + entry(SERVER_GROUPS.getNs(), ImmutableList.of(serverGroupKey("myServerGroup")))); + assertThat(result.getAuthoritativeTypes()).isEmpty(); + assertThat(result.getCacheResult().getCacheResults().values()).allMatch(Collection::isEmpty); + assertThat(result.getCacheResult().getEvictions()).isEmpty(); + } + + @Test + void handle_serverGroupExists() throws IOException { + Compute compute = + new StubComputeFactory() + .setInstanceGroupManagers( + instanceGroupManager("myservergroup-v001") + .setInstanceTemplate( + "http://compute/global/instanceTemplates/my-instance-template")) + .create(); + GoogleZonalServerGroupCachingAgent cachingAgent = createCachingAgent(compute); + ProviderCache providerCache = inMemoryProviderCache(); + OnDemandResult result = + cachingAgent.handle( + providerCache, + ImmutableMap.of( + "serverGroupName", "myservergroup-v001", + "account", ACCOUNT_NAME, + "region", REGION)); + + CacheData cacheData = + providerCache.get(ON_DEMAND.getNs(), serverGroupKey("myservergroup-v001")); + Map> cacheResults = + objectMapper.readValue( + (String) cacheData.getAttributes().get("cacheResults"), + new TypeReference>>() {}); + assertThat(cacheResults.get(SERVER_GROUPS.getNs())) + .extracting(data -> data.getAttributes().get("name")) + .containsExactly("myservergroup-v001"); + + assertThat(result.getSourceAgentType()).isEqualTo(cachingAgent.getOnDemandAgentType()); + assertThat(result.getEvictions().values()).allMatch(Collection::isEmpty); + assertThat(result.getAuthoritativeTypes()).isEmpty(); + assertThat(result.getCacheResult().getCacheResults().get(SERVER_GROUPS.getNs())) + .extracting(data -> data.getAttributes().get("name")) + .containsExactly("myservergroup-v001"); + assertThat(result.getCacheResult().getEvictions()).isEmpty(); + } + + @Test + void pendingOnDemandRequestsPersistAcrossOneCachingCycle() { + + GoogleZonalServerGroupCachingAgent cachingAgent = + createCachingAgent( + new StubComputeFactory() + .setInstanceGroupManagers(instanceGroupManager("myservergroup-v001")) + .create()); + + ProviderCache providerCache = inMemoryProviderCache(); + cachingAgent.handle( + providerCache, + ImmutableMap.of( + "serverGroupName", "myservergroup-v001", "account", ACCOUNT_NAME, "region", REGION)); + + Collection> pendingRequests = + cachingAgent.pendingOnDemandRequests(providerCache); + assertThat(pendingRequests).hasSize(1); + assertThat(pendingRequests).extracting(map -> map.get("processedCount")).containsExactly(0); + + CacheResult cacheResult = cachingAgent.loadData(providerCache); + storeResultInProviderCache(cacheResult, providerCache, cachingAgent); + + pendingRequests = cachingAgent.pendingOnDemandRequests(providerCache); + assertThat(pendingRequests).hasSize(1); + assertThat(pendingRequests).extracting(map -> map.get("processedCount")).containsExactly(1); + + cacheResult = cachingAgent.loadData(providerCache); + storeResultInProviderCache(cacheResult, providerCache, cachingAgent); + + pendingRequests = cachingAgent.pendingOnDemandRequests(providerCache); + assertThat(pendingRequests).isEmpty(); + } + + private static void storeResultInProviderCache( + CacheResult cacheResult, + ProviderCache providerCache, + GoogleZonalServerGroupCachingAgent cachingAgent) { + + ImmutableSet authoritativeTypes = + cachingAgent.getProvidedDataTypes().stream() + .filter(type -> type.getAuthority().equals(Authority.AUTHORITATIVE)) + .map(AgentDataType::getTypeName) + .collect(toImmutableSet()); + providerCache.putCacheResult(cachingAgent.getAgentType(), authoritativeTypes, cacheResult); + } + + private static CacheData cacheData(String key) { + // InMemoryCache will ignore this if it doesn't have at least one attribute + return new DefaultCacheData(key, ImmutableMap.of("attribute", "value"), ImmutableMap.of()); + } + + private static CacheData cacheData(String key, Map attributes) { + return new DefaultCacheData(key, attributes, ImmutableMap.of()); + } + + private static CacheData getKeyedItem(Collection items, String key) { + return items.stream() + .filter(item -> item.getId().equals(key)) + .findAny() + .orElseThrow( + () -> new AssertionError(String.format("Couldn't find item with key '%s'", key))); + } + + public static GoogleZonalServerGroupCachingAgent createCachingAgent(Compute compute) { + return new GoogleZonalServerGroupCachingAgent( + new GoogleNamedAccountCredentials.Builder() + .project(PROJECT) + .name(ACCOUNT_NAME) + .compute(compute) + .regionToZonesMap(ImmutableMap.of(REGION, ImmutableList.of(ZONE))) + .build(), + new GoogleComputeApiFactory( + new GoogleOperationPoller(), + new DefaultRegistry(), + "user-agent", + MoreExecutors.listeningDecorator(Executors.newCachedThreadPool())), + new DefaultRegistry(), + REGION, + new ObjectMapper()); + } + + private static InstanceGroupManager instanceGroupManager(String name) { + return new InstanceGroupManager() + .setName(name) + .setBaseInstanceName(name + "-") + .setZone(ZONE_URL); + } + + private static Instance instance(String name) { + return new Instance().setName(name).setZone(ZONE); + } + + private static Moniker moniker(String serverGroupName) { + return NAMER.deriveMoniker(new GoogleServerGroup(serverGroupName)); + } + + private static String applicationKey(Moniker moniker) { + return Keys.getApplicationKey(moniker.getApp()); + } + + private static String clusterKey(Moniker moniker) { + return Keys.getClusterKey(ACCOUNT_NAME, moniker.getApp(), moniker.getCluster()); + } + + private static String serverGroupKey(String serverGroupName) { + Moniker moniker = moniker(serverGroupName); + return Keys.getServerGroupKey( + serverGroupName, moniker.getCluster(), ACCOUNT_NAME, REGION, ZONE); + } + + private static String instanceKey(String instanceName) { + return Keys.getInstanceKey(ACCOUNT_NAME, REGION, instanceName); + } + + private static DefaultProviderCache inMemoryProviderCache() { + return new DefaultProviderCache(new InMemoryCache()); + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/StubComputeFactory.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/StubComputeFactory.java new file mode 100644 index 00000000000..3cb1fdae1d2 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/StubComputeFactory.java @@ -0,0 +1,421 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.common.collect.ImmutableListMultimap.toImmutableListMultimap; +import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static org.assertj.core.api.Assertions.entry; + +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.googleapis.json.GoogleJsonErrorContainer; +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.LowLevelHttpRequest; +import com.google.api.client.json.GenericJson; +import com.google.api.client.json.JsonFactory; +import com.google.api.client.json.gson.GsonFactory; +import com.google.api.client.testing.http.MockLowLevelHttpRequest; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.*; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableListMultimap; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * A factory for {@link Compute} instances that handle (a subset of) Google Compute Engine API + * requests with a pre-specified set of objects. + */ +// At some point this could be turned into a proper fake by supporting POST requests for adding +// data, etc. Then we could use it for testing a lot more things, like tasks. For now, just +// providing simpler methods like setInstances() and using it more like a stub seems easier. +final class StubComputeFactory { + + private static final JsonFactory JSON_FACTORY = GsonFactory.getDefaultInstance(); + + private static final String COMPUTE_PATH_PREFIX = "/compute/[-.a-zA-Z0-9]+"; + + private static final String COMPUTE_PROJECT_PATH_PREFIX = + COMPUTE_PATH_PREFIX + "/projects/[-.a-zA-Z0-9]+"; + + private static final Pattern BATCH_COMPUTE_PATTERN = + Pattern.compile("/batch/compute/[-.a-zA-Z0-9]+"); + + private static final Pattern GET_ZONAL_IGM_PATTERN = + Pattern.compile( + COMPUTE_PROJECT_PATH_PREFIX + + "/zones/([-a-z0-9]+)/instanceGroupManagers/([-a-zA-Z0-9]+)"); + private static final Pattern LIST_ZONAL_IGM_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/zones/([-a-z0-9]+)/instanceGroupManagers"); + + private static final Pattern GET_REGIONAL_IGM_PATTERN = + Pattern.compile( + COMPUTE_PROJECT_PATH_PREFIX + + "/regions/([-a-z0-9]+)/instanceGroupManagers/([-a-zA-Z0-9]+)"); + private static final Pattern LIST_REGIONAL_IGM_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/regions/([-a-z0-9]+)/instanceGroupManagers"); + + private static final Pattern GET_INSTANCE_TEMPLATE_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/global/instanceTemplates/([-a-zA-Z0-9]+)"); + private static final Pattern LIST_INSTANCE_TEMPLATES_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/global/instanceTemplates"); + + private static final Pattern LIST_ZONAL_INSTANCES_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/zones/([-a-z0-9]+)/instances"); + private static final Pattern AGGREGATED_INSTANCES_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/aggregated/instances"); + + private static final Pattern GET_ZONAL_AUTOSCALER_PATTERN = + Pattern.compile( + COMPUTE_PROJECT_PATH_PREFIX + "/zones/([-a-z0-9]+)/autoscalers/([-a-zA-Z0-9]+)"); + private static final Pattern LIST_ZONAL_AUTOSCALERS_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/zones/([-a-z0-9]+)/autoscalers"); + private static final Pattern GET_REGIONAL_AUTOSCALER_PATTERN = + Pattern.compile( + COMPUTE_PROJECT_PATH_PREFIX + "/regions/([-a-z0-9]+)/autoscalers/([-a-zA-Z0-9]+)"); + private static final Pattern LIST_REGIONAL_AUTOSCALERS_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/regions/([-a-z0-9]+)/autoscalers"); + private static final Pattern AGGREGATED_AUTOSCALERS_PATTERN = + Pattern.compile(COMPUTE_PROJECT_PATH_PREFIX + "/aggregated/autoscalers"); + private static final Pattern GET_PROJECT_PATTERN = + Pattern.compile(COMPUTE_PATH_PREFIX + "/projects/([-.a-zA-Z0-9]+)"); + + private List instanceGroupManagers = new ArrayList<>(); + private List instanceTemplates = new ArrayList<>(); + private List instances = new ArrayList<>(); + private List autoscalers = new ArrayList<>(); + private List projects = new ArrayList<>(); + private Exception projectException; + + StubComputeFactory setInstanceGroupManagers(InstanceGroupManager... instanceGroupManagers) { + this.instanceGroupManagers = ImmutableList.copyOf(instanceGroupManagers); + return this; + } + + StubComputeFactory setInstanceTemplates(InstanceTemplate... instanceTemplates) { + this.instanceTemplates = ImmutableList.copyOf(instanceTemplates); + return this; + } + + StubComputeFactory setInstances(Instance... instances) { + this.instances = ImmutableList.copyOf(instances); + return this; + } + + StubComputeFactory setAutoscalers(Autoscaler... autoscalers) { + this.autoscalers = ImmutableList.copyOf(autoscalers); + return this; + } + + StubComputeFactory setProjects(Project... projects) { + this.projects = ImmutableList.copyOf(projects); + return this; + } + + StubComputeFactory setProjectException(Exception projectException) { + this.projectException = projectException; + return this; + } + + Compute create() { + HttpTransport httpTransport = + new StubHttpTransport() + .addBatchRequestHandlerForPath(BATCH_COMPUTE_PATTERN) + .addGetResponse(GET_ZONAL_IGM_PATTERN, this::getInstanceGroupManager) + .addGetResponse( + LIST_ZONAL_IGM_PATTERN, + new PathBasedJsonResponseGenerator(this::instanceGroupManagerList)) + .addGetResponse(GET_REGIONAL_IGM_PATTERN, this::getRegionInstanceGroupManager) + .addGetResponse( + LIST_REGIONAL_IGM_PATTERN, + new PathBasedJsonResponseGenerator(this::regionInstanceGroupManagerList)) + .addGetResponse(GET_INSTANCE_TEMPLATE_PATTERN, this::getInstanceTemplate) + .addGetResponse(LIST_INSTANCE_TEMPLATES_PATTERN, this::instanceTemplateList) + .addGetResponse( + LIST_ZONAL_INSTANCES_PATTERN, + new PathBasedJsonResponseGenerator(this::instanceList)) + .addGetResponse(AGGREGATED_INSTANCES_PATTERN, this::instanceAggregatedList) + .addGetResponse(GET_ZONAL_AUTOSCALER_PATTERN, this::getZonalAutoscaler) + .addGetResponse( + LIST_ZONAL_AUTOSCALERS_PATTERN, + new PathBasedJsonResponseGenerator(this::zonalAutoscalerList)) + .addGetResponse(GET_REGIONAL_AUTOSCALER_PATTERN, this::getRegionalAutoscaler) + .addGetResponse( + LIST_REGIONAL_AUTOSCALERS_PATTERN, + new PathBasedJsonResponseGenerator(this::regionalAutoscalerList)) + .addGetResponse(AGGREGATED_AUTOSCALERS_PATTERN, this::autoscalerAggregatedList) + .addGetResponse(GET_PROJECT_PATTERN, this::project); + return new Compute( + httpTransport, GsonFactory.getDefaultInstance(), /* httpRequestInitializer= */ null); + } + + private MockLowLevelHttpResponse getInstanceGroupManager(MockLowLevelHttpRequest request) { + Matcher matcher = GET_ZONAL_IGM_PATTERN.matcher(getPath(request)); + checkState(matcher.matches()); + String zone = matcher.group(1); + String name = matcher.group(2); + return instanceGroupManagers.stream() + .filter(igm -> name.equals(igm.getName())) + .filter(igm -> zone.equals(Utils.getLocalName(igm.getZone()))) + .findFirst() + .map(StubComputeFactory::jsonResponse) + .orElse(errorResponse(404)); + } + + private InstanceGroupManagerList instanceGroupManagerList(String path) { + Matcher matcher = LIST_ZONAL_IGM_PATTERN.matcher(path); + checkState(matcher.matches()); + String zone = matcher.group(1); + return new InstanceGroupManagerList() + .setItems( + instanceGroupManagers.stream() + .filter(igm -> zone.equals(Utils.getLocalName(igm.getZone()))) + .collect(toImmutableList())); + } + + private MockLowLevelHttpResponse getRegionInstanceGroupManager(MockLowLevelHttpRequest request) { + Matcher matcher = GET_REGIONAL_IGM_PATTERN.matcher(getPath(request)); + checkState(matcher.matches()); + String region = matcher.group(1); + String name = matcher.group(2); + return instanceGroupManagers.stream() + .filter(igm -> name.equals(igm.getName())) + .filter(igm -> region.equals(Utils.getLocalName(igm.getRegion()))) + .findFirst() + .map(StubComputeFactory::jsonResponse) + .orElse(errorResponse(404)); + } + + private RegionInstanceGroupManagerList regionInstanceGroupManagerList(String path) { + Matcher matcher = LIST_REGIONAL_IGM_PATTERN.matcher(path); + checkState(matcher.matches()); + String region = matcher.group(1); + return new RegionInstanceGroupManagerList() + .setItems( + instanceGroupManagers.stream() + .filter(igm -> region.equals(Utils.getLocalName(igm.getRegion()))) + .collect(toImmutableList())); + } + + private MockLowLevelHttpResponse getInstanceTemplate(MockLowLevelHttpRequest request) { + Matcher matcher = GET_INSTANCE_TEMPLATE_PATTERN.matcher(getPath(request)); + checkState(matcher.matches()); + String name = matcher.group(1); + return instanceTemplates.stream() + .filter(template -> name.equals(template.getName())) + .findFirst() + .map(StubComputeFactory::jsonResponse) + .orElse(errorResponse(404)); + } + + private MockLowLevelHttpResponse instanceTemplateList(LowLevelHttpRequest request) { + return jsonResponse(new InstanceTemplateList().setItems(instanceTemplates)); + } + + private InstanceList instanceList(String path) { + Matcher matcher = LIST_ZONAL_INSTANCES_PATTERN.matcher(path); + checkState(matcher.matches()); + String zone = matcher.group(1); + return new InstanceList() + .setItems( + instances.stream() + .filter(instance -> zone.equals(Utils.getLocalName(instance.getZone()))) + .collect(toImmutableList())); + } + + private MockLowLevelHttpResponse instanceAggregatedList(LowLevelHttpRequest request) { + ImmutableListMultimap instancesMultimap = + aggregate(instances, Instance::getZone, /* regionFunction= */ instance -> null); + ImmutableMap instances = + instancesMultimap.asMap().entrySet().stream() + .collect( + toImmutableMap( + Map.Entry::getKey, + e -> + new InstancesScopedList() + .setInstances(ImmutableList.copyOf(e.getValue())))); + InstanceAggregatedList result = new InstanceAggregatedList().setItems(instances); + return jsonResponse(result); + } + + private MockLowLevelHttpResponse getZonalAutoscaler(MockLowLevelHttpRequest request) { + Matcher matcher = GET_ZONAL_AUTOSCALER_PATTERN.matcher(getPath(request)); + checkState(matcher.matches()); + String zone = matcher.group(1); + String name = matcher.group(2); + return autoscalers.stream() + .filter(autoscaler -> name.equals(autoscaler.getName())) + .filter(autoscaler -> zone.equals(Utils.getLocalName(autoscaler.getZone()))) + .findFirst() + .map(StubComputeFactory::jsonResponse) + .orElse(errorResponse(404)); + } + + private AutoscalerList zonalAutoscalerList(String path) { + Matcher matcher = LIST_ZONAL_AUTOSCALERS_PATTERN.matcher(path); + checkState(matcher.matches()); + String zone = matcher.group(1); + return new AutoscalerList() + .setItems( + autoscalers.stream() + .filter(autoscaler -> zone.equals(Utils.getLocalName(autoscaler.getZone()))) + .collect(toImmutableList())); + } + + private MockLowLevelHttpResponse getRegionalAutoscaler(MockLowLevelHttpRequest request) { + Matcher matcher = GET_REGIONAL_AUTOSCALER_PATTERN.matcher(getPath(request)); + checkState(matcher.matches()); + String region = matcher.group(1); + String name = matcher.group(2); + return autoscalers.stream() + .filter(autoscaler -> name.equals(autoscaler.getName())) + .filter(autoscaler -> region.equals(Utils.getLocalName(autoscaler.getRegion()))) + .findFirst() + .map(StubComputeFactory::jsonResponse) + .orElse(errorResponse(404)); + } + + private AutoscalerList regionalAutoscalerList(String path) { + Matcher matcher = LIST_REGIONAL_AUTOSCALERS_PATTERN.matcher(path); + checkState(matcher.matches()); + String region = matcher.group(1); + return new AutoscalerList() + .setItems( + autoscalers.stream() + .filter(autoscaler -> region.equals(Utils.getLocalName(autoscaler.getRegion()))) + .collect(toImmutableList())); + } + + private MockLowLevelHttpResponse autoscalerAggregatedList(LowLevelHttpRequest request) { + ImmutableListMultimap autoscalersMultimap = + aggregate(autoscalers, Autoscaler::getZone, Autoscaler::getRegion); + ImmutableMap autoscalers = + autoscalersMultimap.asMap().entrySet().stream() + .collect( + toImmutableMap( + Map.Entry::getKey, + e -> + new AutoscalersScopedList() + .setAutoscalers(ImmutableList.copyOf(e.getValue())))); + return jsonResponse(new AutoscalerAggregatedList().setItems(autoscalers)); + } + + private MockLowLevelHttpResponse project(MockLowLevelHttpRequest request) { + if (projectException != null) { + return errorResponse(500, projectException); + } + + Matcher matcher = GET_PROJECT_PATTERN.matcher(getPath(request)); + checkState(matcher.matches()); + String name = matcher.group(1); + return projects.stream() + .filter(project -> name.equals(project.getName())) + .findFirst() + .map(StubComputeFactory::jsonResponse) + .orElse(errorResponse(404)); + } + + private static ImmutableListMultimap aggregate( + Collection items, Function zoneFunction, Function regionFunction) { + return items.stream() + .map(item -> entry(getAggregateKey(item, zoneFunction, regionFunction), item)) + .filter(entry -> entry.getKey() != null) + .collect(toImmutableListMultimap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private static String getAggregateKey( + T item, Function zoneFunction, Function regionFunction) { + + String zone = zoneFunction.apply(item); + if (zone != null) { + return "zones/" + Utils.getLocalName(zone); + } + String region = regionFunction.apply(item); + if (region != null) { + return "regions/" + Utils.getLocalName(region); + } + return null; + } + + private static MockLowLevelHttpResponse errorResponse(int statusCode) { + return errorResponse(statusCode, null); + } + + private static MockLowLevelHttpResponse errorResponse(int statusCode, Exception exception) { + GoogleJsonErrorContainer errorContainer = new GoogleJsonErrorContainer(); + GoogleJsonError error = new GoogleJsonError(); + error.setCode(statusCode); + + if (exception != null) { + error.setMessage(exception.getMessage()); + } + + errorContainer.setError(error); + return jsonResponse(statusCode, errorContainer); + } + + private static MockLowLevelHttpResponse jsonResponse(GenericJson jsonObject) { + return jsonResponse(200, jsonObject); + } + + private static MockLowLevelHttpResponse jsonResponse(int statusCode, GenericJson jsonObject) { + try { + return new MockLowLevelHttpResponse() + .setStatusCode(statusCode) + .setContent(JSON_FACTORY.toByteArray(jsonObject)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static class PathBasedJsonResponseGenerator + implements Function { + + final Function responseGenerator; + + private PathBasedJsonResponseGenerator(Function responseGenerator) { + this.responseGenerator = responseGenerator; + } + + @Override + public MockLowLevelHttpResponse apply(MockLowLevelHttpRequest request) { + GenericJson output = responseGenerator.apply(getPath(request)); + return jsonResponse(output); + } + } + + private static String getPath(MockLowLevelHttpRequest request) { + try { + return new URL(request.getUrl()).getPath(); + } catch (MalformedURLException e) { + throw new IllegalStateException(e); + } + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/StubHttpTransport.java b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/StubHttpTransport.java new file mode 100644 index 00000000000..45285b4f04d --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/StubHttpTransport.java @@ -0,0 +1,232 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.LowLevelHttpRequest; +import com.google.api.client.http.LowLevelHttpResponse; +import com.google.api.client.testing.http.MockLowLevelHttpRequest; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.ImmutableList; +import com.google.common.io.ByteStreams; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.apache.commons.fileupload.MultipartStream; +import org.apache.http.Header; +import org.apache.http.HttpException; +import org.apache.http.HttpRequest; +import org.apache.http.entity.mime.MultipartEntityBuilder; +import org.apache.http.impl.io.DefaultHttpRequestParser; +import org.apache.http.impl.io.HttpTransportMetricsImpl; +import org.apache.http.impl.io.SessionInputBufferImpl; + +/** + * A stub {@link HttpTransport} for Google API clients that generates user-supplied responses for + * requests. + * + *

A key feature of this class (and, in fact, its primary raison d'être) is that it can be easily + * configured to handle batch + * requests. + */ +final class StubHttpTransport extends HttpTransport { + + // An ordered map of request handlers. Each request will be passed through the predicates in the + // keys. The first one that matches will call the function to generate a response. + private final Map< + Predicate, + Function> + responses = new LinkedHashMap<>(); + + StubHttpTransport addBatchRequestHandlerForPath(Pattern pathPattern) { + return addResponseGenerator(methodAndPath("POST", pathPattern), this::processBatchRequest); + } + + StubHttpTransport addGetResponse( + Pattern pathPattern, + Function responseGenerator) { + + return addResponseGenerator(methodAndPath("GET", pathPattern), responseGenerator); + } + + private StubHttpTransport addResponseGenerator( + Predicate requestPredicate, + Function responseGenerator) { + + responses.put(requestPredicate, responseGenerator); + return this; + } + + @Override + protected LowLevelHttpRequest buildRequest(String method, String urlString) { + return new StubHttpTransportRequest(method, urlString); + } + + // This class only exists because MockLowLevelHttpResponse doesn't store the HTTP method, sadly. + final class StubHttpTransportRequest extends MockLowLevelHttpRequest { + + private final String method; + + StubHttpTransportRequest(String method, String urlString) { + super(urlString); + this.method = method; + } + + String getMethod() { + return method; + } + + @Override + public MockLowLevelHttpResponse getResponse() { + return responses.entrySet().stream() + .filter(e -> e.getKey().test(this)) + .findFirst() + .map(e -> e.getValue().apply(this)) + .orElseThrow( + () -> + new UnsupportedOperationException( + String.format( + "No response configured for %s request with URL %s", + this.getMethod(), this.getUrl()))); + } + + @Override + public LowLevelHttpResponse execute() { + return getResponse(); + } + } + + private static final Pattern REQUEST_BOUNDARY_PATTERN = + Pattern.compile("; boundary=([-_'()+,./:=?a-zA-Z0-9]+)"); + private static final String RESPONSE_BOUNDARY = "__batch_boundary__"; + + // Google APIs allow submitting multiple API requests in a single HTTP call. They do this by + // POSTing to a special URL. The content is a multipart/mixed message where each part is a full + // HTTP request, with status lines, headers, and body. The responses are similar. + // See https://cloud.google.com/compute/docs/api/how-tos/batch + private MockLowLevelHttpResponse processBatchRequest(MockLowLevelHttpRequest request) { + MultipartEntityBuilder multipartResponse = + MultipartEntityBuilder.create().setBoundary(RESPONSE_BOUNDARY); + try { + for (byte[] part : parts(request)) { + HttpRequest httpRequest = parseRequest(part); + + MockLowLevelHttpRequest googleRequest = + (MockLowLevelHttpRequest) + buildRequest( + httpRequest.getRequestLine().getMethod(), + httpRequest.getRequestLine().getUri()); + for (Header header : httpRequest.getAllHeaders()) { + googleRequest.addHeader(header.getName(), header.getValue()); + } + + LowLevelHttpResponse response = googleRequest.execute(); + addResponse(multipartResponse, response); + } + + return new MockLowLevelHttpResponse() + .setContentType("multipart/mixed; boundary=" + RESPONSE_BOUNDARY) + .setStatusCode(200) + .setContent(new String(ByteStreams.toByteArray(multipartResponse.build().getContent()))); + } catch (IOException | HttpException e) { + throw new IllegalStateException(e); + } + } + + private Iterable parts(MockLowLevelHttpRequest request) throws IOException { + + String contentType = request.getContentType(); + Matcher boundaryMatcher = REQUEST_BOUNDARY_PATTERN.matcher(contentType); + if (!boundaryMatcher.find()) { + throw new IllegalStateException("Couldn't find boundary in " + contentType); + } + String boundary = boundaryMatcher.group(1); + MultipartStream multipartStream = + new MultipartStream( + new ByteArrayInputStream(request.getContentAsString().getBytes()), + boundary.getBytes(), + /* bufSize= */ 2048, + /* pNotifier= */ null); + boolean foundData = multipartStream.skipPreamble(); + if (!foundData) { + return ImmutableList.of(); + } + + return () -> + new AbstractIterator() { + private boolean needsBoundaryRead = false; + + @Override + protected byte[] computeNext() { + try { + if (needsBoundaryRead && !multipartStream.readBoundary()) { + return endOfData(); + } else { + needsBoundaryRead = true; + } + multipartStream.readHeaders(); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + multipartStream.readBodyData(output); + return output.toByteArray(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }; + } + + private HttpRequest parseRequest(byte[] part) throws IOException, HttpException { + ByteArrayInputStream input = new ByteArrayInputStream(part); + SessionInputBufferImpl buffer = + new SessionInputBufferImpl(new HttpTransportMetricsImpl(), 10240); + buffer.bind(input); + DefaultHttpRequestParser requestParser = new DefaultHttpRequestParser(buffer); + return requestParser.parse(); + } + + private void addResponse(MultipartEntityBuilder multipartResponse, LowLevelHttpResponse response) + throws IOException { + byte[] responseBytes = ByteStreams.toByteArray(response.getContent()); + multipartResponse.addTextBody( + "part", "HTTP/1.1 " + response.getStatusLine() + "\n\n" + new String(responseBytes)); + } + + private static Predicate methodAndPath( + String method, Pattern pathPattern) { + return request -> + request.getMethod().equals(method) + && pathPattern.matcher(urlPath(request.getUrl())).matches(); + } + + private static String urlPath(String url) { + try { + return new URL(url).getPath(); + } catch (MalformedURLException e) { + throw new IllegalStateException(e); + } + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProviderSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProviderSpec.groovy new file mode 100644 index 00000000000..ba37c9ebf38 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProviderSpec.groovy @@ -0,0 +1,72 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.provider.view + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.* +import spock.lang.Specification +import spock.lang.Subject + + +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.LOAD_BALANCERS +import static com.netflix.spinnaker.clouddriver.google.cache.Keys.Namespace.SERVER_GROUPS + +class GoogleLoadBalancerProviderSpec extends Specification { + private static final ACCOUNT_NAME = "auto" + private static final LOAD_BALANCER_NAME = "default" + private static final REGION_EUROPE = "europe-west1" + private static final SERVER_GROUP_IDS = ["server_group_identifier"] + + void "should return session affinity"() { + setup: + def cacheView = Mock(Cache) + def serverGroup = Mock(CacheData) + def serverGroupRelationships = [:] + serverGroupRelationships.put(LOAD_BALANCERS.ns, ["lb_name"]) + + def loadBalancerRelations = [:] + loadBalancerRelations.put(SERVER_GROUPS.ns, []) + + def googleLoadBalancerView = Mock(CacheData) + def attributes = [ + type: GoogleLoadBalancerType.NETWORK, + account: ACCOUNT_NAME, + region: REGION_EUROPE, + sessionAffinity: GoogleSessionAffinity.CLIENT_IP_PORT_PROTO + ] + googleLoadBalancerView.getAttributes() >> attributes + googleLoadBalancerView.getRelationships() >> loadBalancerRelations + + @Subject def provider = new GoogleLoadBalancerProvider() + provider.cacheView = cacheView + provider.objectMapper = new ObjectMapper() + + when: + def details = provider.byAccountAndRegionAndName(ACCOUNT_NAME, REGION_EUROPE, LOAD_BALANCER_NAME) + then: + _ * cacheView.filterIdentifiers(LOAD_BALANCERS.ns, _) >> ["lb_identifier"] + 1 * cacheView.getAll(LOAD_BALANCERS.ns, _, _) >> [googleLoadBalancerView] + 1 * cacheView.filterIdentifiers(SERVER_GROUPS.ns, _) >> SERVER_GROUP_IDS + 1 * cacheView.getAll(SERVER_GROUPS.ns, SERVER_GROUP_IDS) >> [serverGroup] + 1 * serverGroup.getRelationships() >> serverGroupRelationships + details.size() == 1 + details[0].sessionAffinity == "CLIENT_IP_PORT_PROTO" + } + +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleNetworkProviderSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleNetworkProviderSpec.groovy index 36b90717de2..613762d52f8 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleNetworkProviderSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleNetworkProviderSpec.groovy @@ -53,19 +53,19 @@ class GoogleNetworkProviderSpec extends Specification { [ id: 6614377178691015954, name: 'some-network', - selfLink: 'https://www.googleapis.com/compute/alpha/projects/some-project/global/networks/some-network', + selfLink: 'https://compute.googleapis.com/compute/alpha/projects/some-project/global/networks/some-network', autoCreateSubnets: Boolean.TRUE, - subnets: ['https://www.googleapis.com/compute/alpha/projects/some-project/regions/europe-west1/subnetworks/some-network', - 'https://www.googleapis.com/compute/alpha/projects/some-project/regions/europe-west2/subnetworks/some-network'], + subnets: ['https://compute.googleapis.com/compute/alpha/projects/some-project/regions/europe-west1/subnetworks/some-network', + 'https://compute.googleapis.com/compute/alpha/projects/some-project/regions/europe-west2/subnetworks/some-network'], routingConfig: [routingMode: 'GLOBAL'] ], [ id: 6614377178691015955, name: 'some-network-2', - selfLink: 'https://www.googleapis.com/compute/alpha/projects/some-project/global/networks/some-network02', + selfLink: 'https://compute.googleapis.com/compute/alpha/projects/some-project/global/networks/some-network02', autoCreateSubnets: Boolean.TRUE, - subnets: ['https://www.googleapis.com/compute/alpha/projects/some-project/regions/europe-west1/subnetworks/some-network-2', - 'https://www.googleapis.com/compute/alpha/projects/some-project/regions/europe-west2/subnetworks/some-network-2'], + subnets: ['https://compute.googleapis.com/compute/alpha/projects/some-project/regions/europe-west1/subnetworks/some-network-2', + 'https://compute.googleapis.com/compute/alpha/projects/some-project/regions/europe-west2/subnetworks/some-network-2'], routingConfig: [routingMode: 'GLOBAL'] ] ] diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSecurityGroupProviderSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSecurityGroupProviderSpec.groovy index 6e753283d3c..96a101c4c7a 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSecurityGroupProviderSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSecurityGroupProviderSpec.groovy @@ -31,6 +31,8 @@ import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository +import com.netflix.spinnaker.credentials.NoopCredentialsLifecycleHandler import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject @@ -45,23 +47,19 @@ class GoogleSecurityGroupProviderSpec extends Specification { ObjectMapper mapper = new ObjectMapper() def setup() { - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) - credentialsRepo.save("test", - new GoogleNamedAccountCredentials - .Builder() + def credentialsRepo = new MapBackedCredentialsRepository(GoogleNamedAccountCredentials.CREDENTIALS_TYPE, + new NoopCredentialsLifecycleHandler<>()) + credentialsRepo.save(new GoogleNamedAccountCredentials.Builder() .name("test") .project("my-project") .credentials(new FakeGoogleCredentials()) .build()) - credentialsRepo.save("prod", - new GoogleNamedAccountCredentials - .Builder() + credentialsRepo.save(new GoogleNamedAccountCredentials.Builder() .name("prod") .project("my-project") .credentials(new FakeGoogleCredentials()) .build()) - provider = new GoogleSecurityGroupProvider(credentialsProvider, cache, mapper) + provider = new GoogleSecurityGroupProvider(credentialsRepo, cache, mapper) cache.mergeAll(Keys.Namespace.SECURITY_GROUPS.ns, getAllGroups()) } @@ -183,7 +181,7 @@ class GoogleSecurityGroupProviderSpec extends Specification { id: 'name-a', name: 'name-a', network: 'default', - selfLink: 'https://www.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-a', + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-a', targetTags: ['tag-1', 'tag-2'], description: 'a', accountName: account, @@ -287,6 +285,8 @@ class GoogleSecurityGroupProviderSpec extends Specification { cachedValue.inboundRules[1].portRanges.endPort == [65535] } + + @Shared Map>> firewallMap = [ prod: [ @@ -294,7 +294,7 @@ class GoogleSecurityGroupProviderSpec extends Specification { new Firewall( name: 'name-a', id: 6614377178691015951, - network: 'https://www.googleapis.com/compute/v1/projects/my-project/global/networks/default', + network: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/networks/default', targetTags: ['tag-1', 'tag-2'], description: 'a', sourceRanges: ['192.168.2.0/24'], @@ -303,19 +303,19 @@ class GoogleSecurityGroupProviderSpec extends Specification { new Firewall.Allowed(IPProtocol: 'udp', ports: ['4040-4042']), new Firewall.Allowed(IPProtocol: 'tcp', ports: ['9090']), ], - selfLink: 'https://www.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-a' + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-a' ), new Firewall( name: 'name-b', id: 6614377178691015952, - network: 'https://www.googleapis.com/compute/v1/projects/my-project/global/networks/default', - selfLink: 'https://www.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-b' + network: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/networks/default', + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/name-b' ), new Firewall( name: 'name-c', id: 6614377178691015954, - network: 'https://www.googleapis.com/compute/v1/projects/some-xpn-host-project/global/networks/default', - selfLink: 'https://www.googleapis.com/compute/v1/projects/some-xpn-host-project/global/firewalls/name-c' + network: 'https://compute.googleapis.com/compute/v1/projects/some-xpn-host-project/global/networks/default', + selfLink: 'https://compute.googleapis.com/compute/v1/projects/some-xpn-host-project/global/firewalls/name-c' ), ] ], @@ -324,13 +324,14 @@ class GoogleSecurityGroupProviderSpec extends Specification { new Firewall( name: 'a', id: 6614377178691015953, - network: 'https://www.googleapis.com/compute/v1/projects/my-project/global/networks/default', - selfLink: 'https://www.googleapis.com/compute/v1/projects/my-project/global/firewalls/a' + network: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/networks/default', + targetServiceAccounts: ['user@test.iam.gserviceaccount.com'], + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/a' ), new Firewall( name: 'b', id: 123, - network: 'https://www.googleapis.com/compute/v1/projects/my-project/global/networks/default', + network: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/networks/default', description: 'description of b', sourceRanges: ['192.168.3.100'], allowed: [ @@ -338,19 +339,19 @@ class GoogleSecurityGroupProviderSpec extends Specification { new Firewall.Allowed(IPProtocol: 'tcp', ports: ['1', '2', '3-100']), new Firewall.Allowed(IPProtocol: 'udp', ports: ['5050']), ], - selfLink: 'https://www.googleapis.com/compute/v1/projects/my-project/global/firewalls/b' + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/b' ), new Firewall( name: 'c', id: 456, - network: 'https://www.googleapis.com/compute/v1/projects/my-project/global/networks/default', + network: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/networks/default', description: 'description of c', sourceRanges: ['192.168.4.100/32'], allowed: [ new Firewall.Allowed(IPProtocol: 'tcp'), new Firewall.Allowed(IPProtocol: 'udp', ports: []), ], - selfLink: 'https://www.googleapis.com/compute/v1/projects/my-project/global/firewalls/c' + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/firewalls/c' ), ] ] @@ -360,7 +361,7 @@ class GoogleSecurityGroupProviderSpec extends Specification { firewallMap.collect { String account, Map> regions -> regions.collect { String region, List firewalls -> firewalls.collect { Firewall firewall -> - Map attributes = [firewall: firewall] + Map attributes = [firewall: firewall, project: 'my-project'] new DefaultCacheData(Keys.getSecurityGroupKey(firewall.getName(), firewall.getName(), "global", account), attributes, [:]) } }.flatten() diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProviderSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProviderSpec.groovy new file mode 100644 index 00000000000..03532446d14 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProviderSpec.groovy @@ -0,0 +1,73 @@ +package com.netflix.spinnaker.clouddriver.google.provider.view + +import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.compute.Compute +import com.google.api.services.compute.model.Subnetwork +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.clouddriver.google.cache.Keys +import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +class GoogleSubnetProviderSpec extends Specification { + @Subject + GoogleSubnetProvider provider + + WriteableCache cache = new InMemoryCache() + ObjectMapper mapper = new ObjectMapper() + + def setup() { + def accountCredentialsProvider = new DefaultAccountCredentialsProvider() + provider = new GoogleSubnetProvider(accountCredentialsProvider, cache, mapper) + cache.mergeAll(Keys.Namespace.SUBNETS.ns, getAllSubnets()) + } + + void "getAll lists all"() { + when: + def result = provider.getAll() + + then: + result.size() == 2 + } + + @Shared + Map> subnetsMap = [ + 'us-central1': [ + new Subnetwork( + name: 'a', + gatewayAddress: '10.0.0.1', + id: 6614377178691015953, + ipCidrRange: '10.0.0.0/24', + network: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/networks/default', + regionUrl: 'https://compute.googleapis.com/compute/v1/projects/my-project/regions/us-central1', + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/regions/us-central1/subnetworks/a' + ) + ], + 'asia-south1':[ + new Subnetwork( + name: 'b', + gatewayAddress: '10.1.0.1', + id: 6614377178691015954, + ipCidrRange: '10.1.0.0/24', + network: 'https://compute.googleapis.com/compute/v1/projects/my-project/global/networks/default', + regionUrl: 'https://compute.googleapis.com/compute/v1/projects/my-project/regions/asia-south1', + selfLink: 'https://compute.googleapis.com/compute/v1/projects/my-project/regions/us-central1/subnetworks/b' + ), + ] + ] + + + private List getAllSubnets() { + String account = 'my-account' + subnetsMap.collect { String regions, List region -> + region.collect { Subnetwork subnet -> + Map attributes = [subnet: subnet,project: "my-project"] + new DefaultCacheData(Keys.getSubnetKey(subnet.getName(),"global", account), attributes, [:]) + } + }.flatten() + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/security/FakeGoogleCredentials.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/security/FakeGoogleCredentials.groovy index be3b594f0d0..635904df910 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/security/FakeGoogleCredentials.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/security/FakeGoogleCredentials.groovy @@ -16,13 +16,13 @@ package com.netflix.spinnaker.clouddriver.google.security -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential -import com.google.api.client.googleapis.testing.auth.oauth2.MockGoogleCredential import com.google.api.client.http.HttpTransport -import com.google.api.client.json.JsonFactory import com.google.api.client.testing.http.MockHttpTransport +import com.google.auth.oauth2.AccessToken import com.netflix.spinnaker.clouddriver.google.ComputeVersion +import java.time.LocalDate + class FakeGoogleCredentials extends GoogleCredentials { FakeGoogleCredentials() { @@ -39,7 +39,8 @@ class FakeGoogleCredentials extends GoogleCredentials { } @Override - GoogleCredential getCredential(HttpTransport httpTransport, JsonFactory _) { - return new MockGoogleCredential.Builder().setTransport(httpTransport).build() + com.google.auth.oauth2.GoogleCredentials getCredentials() { + LocalDate tomorrow = LocalDate.now().plusDays(1) + com.google.auth.oauth2.GoogleCredentials.create(new AccessToken("some-token", java.sql.Date.valueOf(tomorrow))) } } diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsInitializerSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsInitializerSpec.groovy deleted file mode 100644 index db70520d331..00000000000 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/security/GoogleCredentialsInitializerSpec.groovy +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.google.security - -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spinnaker.config.GoogleConfiguration -import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties -import com.netflix.spinnaker.clouddriver.googlecommon.GoogleExecutor -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import spock.lang.Specification - -class GoogleCredentialsInitializerSpec extends Specification { - - def "load clouddriver when unable to connect to non-required accounts"() { - given: - GoogleExecutor.globalRegistry = new DefaultRegistry() - def init = new GoogleCredentialsInitializer() - def configProps = new GoogleConfigurationProperties( - accounts: [ - new GoogleConfigurationProperties.ManagedAccount( - name: "spec", - project: "a-project-that-doesnot-exist", - ) - ] - ) - def accountRepo = Mock(AccountCredentialsRepository) - def deployDefaults = new GoogleConfiguration.DeployDefaults() - - - when: - init.synchronizeGoogleAccounts("clouddriver", configProps, null, null, accountRepo, [], deployDefaults) - - then: - noExceptionThrown() - } - - def "do not load clouddriver when unable to connect to required accounts"() { - given: - GoogleExecutor.globalRegistry = new DefaultRegistry() - def init = new GoogleCredentialsInitializer() - def configProps = new GoogleConfigurationProperties( - accounts: [ - new GoogleConfigurationProperties.ManagedAccount( - name: "spec", - project: "a-project-that-doesnot-exist", - required: true, - ) - ] - ) - def accountRepo = Mock(AccountCredentialsRepository) - def deployDefaults = new GoogleConfiguration.DeployDefaults() - - - when: - init.synchronizeGoogleAccounts("clouddriver", configProps, null, null, accountRepo, [], deployDefaults) - - then: - thrown(IllegalArgumentException) - } -} diff --git a/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/config/GoogleCredentialsConfigurationTest.java b/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/config/GoogleCredentialsConfigurationTest.java new file mode 100644 index 00000000000..6292a3a1d4d --- /dev/null +++ b/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/config/GoogleCredentialsConfigurationTest.java @@ -0,0 +1,98 @@ +/* + * Copyright 2022 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.config; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.GoogleExecutor; +import com.netflix.spinnaker.clouddriver.google.deploy.converters.AbandonAndDecrementGoogleServerGroupAtomicOperationConverter; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.config.GoogleConfiguration; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.configserver.ConfigFileService; +import org.junit.jupiter.api.Test; +import org.springframework.boot.context.annotation.UserConfigurations; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.boot.test.context.runner.ApplicationContextRunner; +import org.springframework.context.annotation.Bean; + +public class GoogleCredentialsConfigurationTest { + + private final ApplicationContextRunner runner = + new ApplicationContextRunner() + .withConfiguration( + UserConfigurations.of( + GoogleCredentialsConfiguration.class, + AbandonAndDecrementGoogleServerGroupAtomicOperationConverter.class, + TestConfiguration.class)); + + @Test + void testCredentialsRepositoryBeanIsPresent() { + runner.run(ctx -> assertThat(ctx).hasSingleBean(CredentialsRepository.class)); + } + + static class TestConfiguration { + @Bean + ObjectMapper getObjectMapper() { + return new ObjectMapper(); + } + + @Bean + CredentialsLifecycleHandler getCredentialsLifecycleHandler() { + return mock(CredentialsLifecycleHandler.class); + } + + @Bean + NamerRegistry getNamerRegistry() { + return mock(NamerRegistry.class); + } + + @Bean + GoogleConfigurationProperties getGoogleConfigurationProperties() { + return mock(GoogleConfigurationProperties.class); + } + + @Bean + ConfigFileService getConfigFileService() { + return mock(ConfigFileService.class); + } + + @Bean + GoogleConfiguration.DeployDefaults getGoogleConfigurationDeployDefaults() { + return mock(GoogleConfiguration.DeployDefaults.class); + } + + @Bean + GoogleExecutor getGoogleExecutor() { + return mock(GoogleExecutor.class); + } + + @Bean + Registry getRegistry() { + return mock(Registry.class); + } + + @Bean + String getClouddriverUserAgentApplicationName() { + return "clouddriverUserAgentApplicationName"; + } + } +} diff --git a/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandlerTest.java b/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandlerTest.java new file mode 100644 index 00000000000..1c1b19bf428 --- /dev/null +++ b/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandlerTest.java @@ -0,0 +1,1925 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.handlers; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; +import static org.mockito.Mockito.contains; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.services.compute.model.*; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.ops.GoogleUserDataProvider; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoHealingPolicy; +import com.netflix.spinnaker.clouddriver.google.model.GoogleAutoscalingPolicy; +import com.netflix.spinnaker.clouddriver.google.model.GoogleDistributionPolicy; +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck; +import com.netflix.spinnaker.clouddriver.google.model.GoogleNetwork; +import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup; +import com.netflix.spinnaker.clouddriver.google.model.GoogleSubnet; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancingPolicy; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerView; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancingPolicy; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleNetworkLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleSslLoadBalancer; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleClusterProvider; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleLoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSubnetProvider; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.config.GoogleConfiguration; +import java.io.IOException; +import java.util.*; +import org.apache.commons.lang3.StringUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Answers; +import org.mockito.ArgumentCaptor; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +public class BasicGoogleDeployHandlerTest { + @Mock private GoogleConfigurationProperties googleConfigurationProperties; + @Mock private GoogleClusterProvider googleClusterProvider; + @Mock private GoogleConfiguration.DeployDefaults googleDeployDefaults; + @Mock private GoogleOperationPoller googleOperationPoller; + @Mock private GoogleUserDataProvider googleUserDataProvider; + @Mock private GoogleLoadBalancerProvider googleLoadBalancerProvider; + @Mock private GoogleNetworkProvider googleNetworkProvider; + @Mock private GoogleSubnetProvider googleSubnetProvider; + @Mock private Cache cacheView; + @Mock private ObjectMapper objectMapper; + @Mock private SafeRetry safeRetry; + @Mock private Registry registry; + + @InjectMocks @Spy private BasicGoogleDeployHandler basicGoogleDeployHandler; + + private BasicGoogleDeployDescription mockDescription; + private GoogleNamedAccountCredentials mockCredentials; + private Task mockTask; + GoogleAutoscalingPolicy mockAutoscalingPolicy; + private MockedStatic mockedGCEUtil; + private MockedStatic mockedUtils; + + @BeforeEach + void setUp() { + MockitoAnnotations.initMocks(this); + mockDescription = mock(BasicGoogleDeployDescription.class); + mockCredentials = mock(GoogleNamedAccountCredentials.class); + mockTask = mock(Task.class); + mockedGCEUtil = mockStatic(GCEUtil.class); + mockedUtils = mockStatic(Utils.class); + mockAutoscalingPolicy = mock(GoogleAutoscalingPolicy.class); + } + + @AfterEach + void tearDown() { + mockedGCEUtil.close(); + mockedUtils.close(); + } + + @Test + void testGetRegionFromInput_WithNonBlankRegion() { + when(mockDescription.getRegion()).thenReturn("us-central1"); + String result = basicGoogleDeployHandler.getRegionFromInput(mockDescription); + assertEquals("us-central1", result); + } + + @Test + void testGetRegionFromInput_WithBlankRegion() { + when(mockDescription.getRegion()).thenReturn(""); // Blank region + when(mockDescription.getZone()).thenReturn("us-central1-a"); + when(mockDescription.getCredentials()).thenReturn(mockCredentials); + when(mockCredentials.regionFromZone("us-central1-a")).thenReturn("us-central1"); + + String result = basicGoogleDeployHandler.getRegionFromInput(mockDescription); + assertEquals("us-central1", result); + } + + @Test + void testGetRegionFromInput_WithNullRegion() { + when(mockDescription.getRegion()).thenReturn(null); // Null region + when(mockDescription.getZone()).thenReturn("us-central1-a"); + when(mockDescription.getCredentials()).thenReturn(mockCredentials); + when(mockCredentials.regionFromZone("us-central1-a")).thenReturn("us-central1"); + + String result = basicGoogleDeployHandler.getRegionFromInput(mockDescription); + assertEquals("us-central1", result); + } + + @Test + void testGetLocationFromInput_RegionalTrue() { + String region = "us-central1"; + when(mockDescription.getRegional()).thenReturn(true); + + String result = basicGoogleDeployHandler.getLocationFromInput(mockDescription, region); + assertEquals(region, result); + } + + @Test + void testGetLocationFromInput_RegionalFalse() { + String zone = "us-central1-a"; + when(mockDescription.getRegional()).thenReturn(false); + when(mockDescription.getZone()).thenReturn(zone); + + String result = basicGoogleDeployHandler.getLocationFromInput(mockDescription, ""); + assertEquals(zone, result); + } + + @Test + void testGetMachineTypeNameFromInput_WithCustomInstanceType() { + String instanceType = "custom-4-16384"; + when(mockDescription.getInstanceType()).thenReturn(instanceType); + + String result = + basicGoogleDeployHandler.getMachineTypeNameFromInput(mockDescription, mockTask, "location"); + assertEquals(instanceType, result); + } + + @Test + void testGetMachineTypeNameFromInput_WithNonCustomInstanceType() { + String instanceType = "n1-standard-1"; + String location = "us-central1"; + String machineTypeName = "n1-standard-1-machine"; + + when(mockDescription.getInstanceType()).thenReturn(instanceType); + when(mockDescription.getCredentials()).thenReturn(mockCredentials); + + mockedGCEUtil + .when( + () -> + GCEUtil.queryMachineType( + eq(instanceType), + eq(location), + eq(mockCredentials), + eq(mockTask), + eq("DEPLOY"))) + .thenReturn(machineTypeName); + + String result = + basicGoogleDeployHandler.getMachineTypeNameFromInput(mockDescription, mockTask, location); + + assertEquals(machineTypeName, result); + mockedGCEUtil.verify( + () -> GCEUtil.queryMachineType(instanceType, location, mockCredentials, mockTask, "DEPLOY"), + times(1)); + } + + @Test + void testBuildNetworkFromInput_WithNonBlankNetworkName() { + String networkName = "custom-network"; + GoogleNetwork mockGoogleNetwork = mock(GoogleNetwork.class); + + when(mockGoogleNetwork.getName()).thenReturn(networkName); + when(mockDescription.getNetwork()).thenReturn(networkName); + when(mockDescription.getAccountName()).thenReturn("test-account"); + + mockedGCEUtil + .when( + () -> + GCEUtil.queryNetwork( + eq("test-account"), eq(networkName), eq(mockTask), eq("DEPLOY"), any())) + .thenReturn(mockGoogleNetwork); + + GoogleNetwork result = + basicGoogleDeployHandler.buildNetworkFromInput(mockDescription, mockTask); + + assertEquals(networkName, result.getName()); + } + + @Test + void testBuildNetworkFromInput_WithBlankNetworkName() { + String defaultNetworkName = "default"; + GoogleNetwork mockGoogleNetwork = mock(GoogleNetwork.class); + + when(mockDescription.getNetwork()).thenReturn(""); + when(mockDescription.getAccountName()).thenReturn("test-account"); + + mockedGCEUtil + .when( + () -> + GCEUtil.queryNetwork( + eq("test-account"), eq(defaultNetworkName), eq(mockTask), eq("DEPLOY"), any())) + .thenReturn(mockGoogleNetwork); + + GoogleNetwork result = + basicGoogleDeployHandler.buildNetworkFromInput(mockDescription, mockTask); + assertEquals(mockGoogleNetwork, result); + } + + @Test + void testBuildSubnetFromInput_WithNonBlankSubnetAndNoAutoCreateSubnets() { + String region = "us-central1"; + String subnetName = "custom-subnet"; + String networkId = "basic-network"; + + GoogleNetwork mockNetwork = mock(GoogleNetwork.class); + GoogleSubnet mockSubnet = mock(GoogleSubnet.class); + + when(mockDescription.getSubnet()).thenReturn(subnetName); + when(mockDescription.getAccountName()).thenReturn("test-account"); + when(mockNetwork.getId()).thenReturn(networkId); + + mockedGCEUtil + .when( + () -> + GCEUtil.querySubnet( + eq("test-account"), + eq(region), + eq(subnetName), + eq(mockTask), + eq("DEPLOY"), + any())) + .thenReturn(mockSubnet); + + GoogleSubnet result = + basicGoogleDeployHandler.buildSubnetFromInput( + mockDescription, mockTask, mockNetwork, region); + assertEquals(mockSubnet, result); + } + + @Test + void testBuildSubnetFromInput_WithNonBlankSubnetAndAutoCreateSubnets() { + String region = "us-central1"; + String subnetName = "custom-subnet"; + String networkId = "projects/test-network"; + + when(mockDescription.getSubnet()).thenReturn(subnetName); + when(mockDescription.getAccountName()).thenReturn("test-account"); + GoogleNetwork mockNetwork = mock(GoogleNetwork.class); + GoogleSubnet mockSubnet = mock(GoogleSubnet.class); + when(mockNetwork.getId()).thenReturn(networkId); + when(mockNetwork.getAutoCreateSubnets()).thenReturn(true); + + mockedGCEUtil + .when( + () -> + GCEUtil.querySubnet( + eq("test-account"), + eq(region), + eq(subnetName), + eq(mockTask), + eq("DEPLOY"), + any())) + .thenReturn(mockSubnet); + mockedGCEUtil + .when( + () -> + GCEUtil.querySubnet( + eq("test-account"), + eq(region), + eq(networkId), + eq(mockTask), + eq("DEPLOY"), + any())) + .thenReturn(mockSubnet); + + GoogleSubnet result = + basicGoogleDeployHandler.buildSubnetFromInput( + mockDescription, mockTask, mockNetwork, region); + assertEquals(mockSubnet, result); + mockedGCEUtil.verify( + () -> GCEUtil.querySubnet(any(), any(), any(), any(), any(), any()), times(2)); + } + + @Test + void testBuildSubnetFromInput_WithBlankSubnetAndNoAutoCreateSubnets() { + String region = "us-central1"; + + when(mockDescription.getSubnet()).thenReturn(""); // Blank subnet + GoogleNetwork mockNetwork = mock(GoogleNetwork.class); + + GoogleSubnet result = + basicGoogleDeployHandler.buildSubnetFromInput( + mockDescription, mockTask, mockNetwork, region); + assertNull(result); + mockedGCEUtil.verifyNoInteractions(); + } + + @Test + void testGetLoadBalancerToUpdateFromInput_WithEmptyLoadBalancers() { + when(mockDescription.getLoadBalancers()).thenReturn(Collections.emptyList()); + + BasicGoogleDeployHandler.LoadBalancerInfo result = + basicGoogleDeployHandler.getLoadBalancerToUpdateFromInput(mockDescription, mockTask); + + assertNotNull(result); + assertTrue(result.internalLoadBalancers.isEmpty()); + assertTrue(result.internalHttpLoadBalancers.isEmpty()); + assertTrue(result.sslLoadBalancers.isEmpty()); + assertTrue(result.tcpLoadBalancers.isEmpty()); + assertTrue(result.targetPools.isEmpty()); + + mockedGCEUtil.verifyNoInteractions(); + } + + @Test + void testGetLoadBalancerToUpdateFromInput_WithNonEmptyLoadBalancers_TrafficDisabled() { + List loadBalancerNames = Arrays.asList("lb1", "lb2"); + when(mockDescription.getLoadBalancers()).thenReturn(loadBalancerNames); + when(mockDescription.getDisableTraffic()).thenReturn(true); + + List foundLoadBalancers = + Arrays.asList( + mockLoadBalancer(GoogleLoadBalancerType.INTERNAL), + mockLoadBalancer(GoogleLoadBalancerType.INTERNAL_MANAGED), + mockLoadBalancer(GoogleLoadBalancerType.SSL), + mockLoadBalancer(GoogleLoadBalancerType.TCP), + mockLoadBalancer(GoogleLoadBalancerType.NETWORK)); + GoogleLoadBalancerProvider mockGoogleLoadBalancerProvider = + mock(GoogleLoadBalancerProvider.class); + + mockedGCEUtil + .when( + () -> + GCEUtil.queryAllLoadBalancers( + any(), eq(loadBalancerNames), eq(mockTask), eq("DEPLOY"))) + .thenReturn(foundLoadBalancers); + + BasicGoogleDeployHandler.LoadBalancerInfo result = + basicGoogleDeployHandler.getLoadBalancerToUpdateFromInput(mockDescription, mockTask); + + assertNotNull(result); + assertEquals(1, result.internalLoadBalancers.size()); + assertEquals(1, result.internalHttpLoadBalancers.size()); + assertEquals(1, result.sslLoadBalancers.size()); + assertEquals(1, result.tcpLoadBalancers.size()); + assertTrue(result.targetPools.isEmpty()); + + mockedGCEUtil.verify( + () -> + GCEUtil.queryAllLoadBalancers(any(), eq(loadBalancerNames), eq(mockTask), eq("DEPLOY")), + times(1)); + } + + @Test + void testGetLoadBalancerToUpdateFromInput_WithNonEmptyLoadBalancers_TrafficEnabled() { + List loadBalancerNames = Arrays.asList("lb1", "lb2"); + when(mockDescription.getLoadBalancers()).thenReturn(loadBalancerNames); + when(mockDescription.getDisableTraffic()).thenReturn(false); + + GoogleNetworkLoadBalancer nlb = new GoogleNetworkLoadBalancer(); + nlb.setTargetPool("target-pool"); + GoogleLoadBalancerView lbv = nlb.getView(); + + List foundLoadBalancers = + Arrays.asList( + mockLoadBalancer(GoogleLoadBalancerType.INTERNAL), + mockLoadBalancer(GoogleLoadBalancerType.INTERNAL_MANAGED), + mockLoadBalancer(GoogleLoadBalancerType.SSL), + mockLoadBalancer(GoogleLoadBalancerType.TCP), + lbv); + + mockedGCEUtil + .when( + () -> + GCEUtil.queryAllLoadBalancers( + any(), eq(loadBalancerNames), eq(mockTask), eq("DEPLOY"))) + .thenReturn(foundLoadBalancers); + + BasicGoogleDeployHandler.LoadBalancerInfo result = + basicGoogleDeployHandler.getLoadBalancerToUpdateFromInput(mockDescription, mockTask); + + assertNotNull(result); + assertEquals(1, result.internalLoadBalancers.size()); + assertEquals(1, result.internalHttpLoadBalancers.size()); + assertEquals(1, result.sslLoadBalancers.size()); + assertEquals(1, result.tcpLoadBalancers.size()); + assertEquals(1, result.targetPools.size()); + assertEquals("target-pool", result.targetPools.get(0)); + + mockedGCEUtil.verify( + () -> + GCEUtil.queryAllLoadBalancers(any(), eq(loadBalancerNames), eq(mockTask), eq("DEPLOY")), + times(1)); + } + + @Test + void testBuildBootImage() { + when(googleConfigurationProperties.getBaseImageProjects()) + .thenReturn(Arrays.asList("base-project-1", "base-project-2")); + Image mockedImage = mock(Image.class); + + mockedGCEUtil + .when( + () -> + GCEUtil.getBootImage( + eq(mockDescription), + eq(mockTask), + eq("DEPLOY"), + any(), + eq(Arrays.asList("base-project-1", "base-project-2")), + eq(safeRetry), + any())) + .thenReturn(mockedImage); + + Image result = basicGoogleDeployHandler.buildBootImage(mockDescription, mockTask); + + mockedGCEUtil.verify( + () -> + GCEUtil.getBootImage( + eq(mockDescription), + eq(mockTask), + eq("DEPLOY"), + any(), + eq(Arrays.asList("base-project-1", "base-project-2")), + eq(safeRetry), + any())); + + assertEquals(mockedImage, result); + } + + @Test + void testBuildAttachedDisks() { + when(googleConfigurationProperties.getBaseImageProjects()) + .thenReturn(Arrays.asList("base-project-1", "base-project-2")); + List attachedDisksMock = + Arrays.asList(mock(AttachedDisk.class), mock(AttachedDisk.class)); + Image bootImageMock = mock(Image.class); + + mockedGCEUtil + .when( + () -> + GCEUtil.buildAttachedDisks( + eq(mockDescription), + eq(null), + eq(false), + eq(googleDeployDefaults), + eq(mockTask), + eq("DEPLOY"), + any(), + eq(Arrays.asList("base-project-1", "base-project-2")), + eq(bootImageMock), + eq(safeRetry), + any())) + .thenReturn(attachedDisksMock); + + List result = + basicGoogleDeployHandler.buildAttachedDisks(mockDescription, mockTask, bootImageMock); + + // Step 8: Verify that the static method was called with correct arguments + mockedGCEUtil.verify( + () -> + GCEUtil.buildAttachedDisks( + eq(mockDescription), + eq(null), + eq(false), + eq(googleDeployDefaults), + eq(mockTask), + eq("DEPLOY"), + any(), + eq(Arrays.asList("base-project-1", "base-project-2")), + eq(bootImageMock), + eq(safeRetry), + any())); + + assertEquals(attachedDisksMock, result); + } + + @Test + void testBuildNetworkInterface_AssociatePublicIpAddress() { + GoogleNetwork networkMock = mock(GoogleNetwork.class); + GoogleSubnet subnetMock = mock(GoogleSubnet.class); + NetworkInterface networkInterfaceMock = mock(NetworkInterface.class); + + when(mockDescription.getAssociatePublicIpAddress()).thenReturn(null); + + mockedGCEUtil + .when( + () -> + GCEUtil.buildNetworkInterface( + eq(networkMock), + eq(subnetMock), + eq(true), + eq("External NAT"), + eq("ONE_TO_ONE_NAT"))) + .thenReturn(networkInterfaceMock); + + NetworkInterface result = + basicGoogleDeployHandler.buildNetworkInterface(mockDescription, networkMock, subnetMock); + + mockedGCEUtil.verify( + () -> + GCEUtil.buildNetworkInterface( + eq(networkMock), + eq(subnetMock), + eq(true), + eq("External NAT"), + eq("ONE_TO_ONE_NAT"))); + + assertEquals(networkInterfaceMock, result); + } + + @Test + void testBuildNetworkInterface_NoAssociatePublicIpAddress() { + GoogleNetwork networkMock = mock(GoogleNetwork.class); + GoogleSubnet subnetMock = mock(GoogleSubnet.class); + NetworkInterface networkInterfaceMock = mock(NetworkInterface.class); + + when(mockDescription.getAssociatePublicIpAddress()).thenReturn(false); + + mockedGCEUtil + .when( + () -> + GCEUtil.buildNetworkInterface( + eq(networkMock), + eq(subnetMock), + eq(false), + eq("External NAT"), + eq("ONE_TO_ONE_NAT"))) + .thenReturn(networkInterfaceMock); + + NetworkInterface result = + basicGoogleDeployHandler.buildNetworkInterface(mockDescription, networkMock, subnetMock); + + mockedGCEUtil.verify( + () -> + GCEUtil.buildNetworkInterface( + eq(networkMock), + eq(subnetMock), + eq(false), + eq("External NAT"), + eq("ONE_TO_ONE_NAT"))); + + assertEquals(networkInterfaceMock, result); + } + + @Test + void testHasBackedServiceFromInput_WithBackendServiceInMetadata() { + BasicGoogleDeployHandler.LoadBalancerInfo loadBalancerInfoMock = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + + Map instanceMetadata = new HashMap<>(); + instanceMetadata.put("backend-service-names", "some-backend-service"); + when(mockDescription.getInstanceMetadata()).thenReturn(instanceMetadata); + + boolean result = + basicGoogleDeployHandler.hasBackedServiceFromInput(mockDescription, loadBalancerInfoMock); + + assertTrue(result); + } + + @Test + void testHasBackedServiceFromInput_WithSslLoadBalancers() { + BasicGoogleDeployHandler.LoadBalancerInfo loadBalancerInfoMock = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + when(mockDescription.getInstanceMetadata()).thenReturn(Collections.emptyMap()); + + List sslLoadBalancers = + Arrays.asList(new GoogleLoadBalancerView() {}, new GoogleLoadBalancerView() {}); + when(loadBalancerInfoMock.getSslLoadBalancers()).thenReturn(sslLoadBalancers); + + boolean result = + basicGoogleDeployHandler.hasBackedServiceFromInput(mockDescription, loadBalancerInfoMock); + assertTrue(result); + } + + @Test + void testHasBackedServiceFromInput_WithoutBackedService() { + BasicGoogleDeployHandler.LoadBalancerInfo loadBalancerInfoMock = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + + when(mockDescription.getInstanceMetadata()).thenReturn(Collections.emptyMap()); + when(loadBalancerInfoMock.getSslLoadBalancers()).thenReturn(Collections.emptyList()); + when(loadBalancerInfoMock.getTcpLoadBalancers()).thenReturn(Collections.emptyList()); + + boolean result = + basicGoogleDeployHandler.hasBackedServiceFromInput(mockDescription, loadBalancerInfoMock); + assertFalse(result); + } + + @Test + void testHasBackedServiceFromInput_WithTcpLoadBalancers() { + BasicGoogleDeployHandler.LoadBalancerInfo loadBalancerInfoMock = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + + when(mockDescription.getInstanceMetadata()).thenReturn(Collections.emptyMap()); + + List tcpLoadBalancers = Arrays.asList(new GoogleLoadBalancerView() {}); + when(loadBalancerInfoMock.getSslLoadBalancers()).thenReturn(Collections.emptyList()); + when(loadBalancerInfoMock.getTcpLoadBalancers()).thenReturn(tcpLoadBalancers); + + boolean result = + basicGoogleDeployHandler.hasBackedServiceFromInput(mockDescription, loadBalancerInfoMock); + assertTrue(result); + } + + @Test + void testBuildLoadBalancerPolicyFromInput_PolicyInDescription() throws Exception { + GoogleHttpLoadBalancingPolicy policyMock = mock(GoogleHttpLoadBalancingPolicy.class); + when(mockDescription.getLoadBalancingPolicy()).thenReturn(policyMock); + when(policyMock.getBalancingMode()) + .thenReturn(GoogleLoadBalancingPolicy.BalancingMode.UTILIZATION); + when(mockDescription.getInstanceMetadata()).thenReturn(Collections.emptyMap()); + + GoogleHttpLoadBalancingPolicy result = + basicGoogleDeployHandler.buildLoadBalancerPolicyFromInput(mockDescription); + assertEquals(policyMock, result); + } + + @Test + void testBuildLoadBalancerPolicyFromInput_PolicyInMetadata() throws Exception { + when(mockDescription.getLoadBalancingPolicy()).thenReturn(null); + + Map instanceMetadata = new HashMap<>(); + String policyJson = "{\"balancingMode\": \"UTILIZATION\", \"maxUtilization\": 0.75}"; + instanceMetadata.put("load-balancing-policy", policyJson); + when(mockDescription.getInstanceMetadata()).thenReturn(instanceMetadata); + + GoogleHttpLoadBalancingPolicy deserializedPolicyMock = + mock(GoogleHttpLoadBalancingPolicy.class); + when(objectMapper.readValue(policyJson, GoogleHttpLoadBalancingPolicy.class)) + .thenReturn(deserializedPolicyMock); + + GoogleHttpLoadBalancingPolicy result = + basicGoogleDeployHandler.buildLoadBalancerPolicyFromInput(mockDescription); + assertEquals(deserializedPolicyMock, result); + } + + @Test + void testBuildLoadBalancerPolicyFromInput_DefaultPolicy() throws Exception { + when(mockDescription.getLoadBalancingPolicy()).thenReturn(null); + when(mockDescription.getInstanceMetadata()).thenReturn(Collections.emptyMap()); + + GoogleHttpLoadBalancingPolicy result = + basicGoogleDeployHandler.buildLoadBalancerPolicyFromInput(mockDescription); + + assertEquals(GoogleLoadBalancingPolicy.BalancingMode.UTILIZATION, result.getBalancingMode()); + assertEquals(0.80f, result.getMaxUtilization()); + assertEquals(1.0f, result.getCapacityScaler()); + assertNotNull(result.getNamedPorts()); + assertEquals(1, result.getNamedPorts().size()); + assertEquals( + GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME, + result.getNamedPorts().get(0).getName()); + assertEquals( + GoogleHttpLoadBalancingPolicy.getHTTP_DEFAULT_PORT(), + result.getNamedPorts().get(0).getPort()); + } + + @Test + void testGetBackendServiceToUpdate_NoBackendService() { + BasicGoogleDeployHandler.LoadBalancerInfo lbInfoMock = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + doReturn(false) + .when(basicGoogleDeployHandler) + .hasBackedServiceFromInput(mockDescription, lbInfoMock); + + List result = + basicGoogleDeployHandler.getBackendServiceToUpdate( + mockDescription, "serverGroupName", lbInfoMock, null, "region"); + assertTrue(result.isEmpty()); + } + + @Test + void testGetBackendServiceToUpdate_WithBackendService() throws Exception { + Map instanceMetadata = new HashMap<>(); + instanceMetadata.put("backend-service-names", "backend-service-1,backend-service-2"); + when(mockDescription.getInstanceMetadata()).thenReturn(instanceMetadata); + when(mockDescription.getCredentials()).thenReturn(mock(GoogleNamedAccountCredentials.class)); + when(mockDescription.getRegional()).thenReturn(true); + + BasicGoogleDeployHandler.LoadBalancerInfo lbInfoMock = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + GoogleBackendService backendServiceMock = mock(GoogleBackendService.class); + when(backendServiceMock.getName()).thenReturn("backend-service-ssl"); + + List sslLB = new ArrayList<>(); + GoogleSslLoadBalancer googleSslLB = new GoogleSslLoadBalancer(); + googleSslLB.setBackendService(backendServiceMock); + sslLB.add(googleSslLB.getView()); + when(lbInfoMock.getSslLoadBalancers()).thenReturn(sslLB); + + GoogleHttpLoadBalancingPolicy policyMock = mock(GoogleHttpLoadBalancingPolicy.class); + Backend backendToAdd = mock(Backend.class); + + mockedGCEUtil + .when( + () -> + GCEUtil.resolveHttpLoadBalancerNamesMetadata(anyList(), any(), anyString(), any())) + .thenReturn(Arrays.asList("lb-1", "lb-2")); + doReturn(mock(BackendService.class)) + .when(basicGoogleDeployHandler) + .getBackendServiceFromProvider(any(), anyString()); + mockedGCEUtil + .when(() -> GCEUtil.updateMetadataWithLoadBalancingPolicy(any(), any(), any())) + .then(Answers.RETURNS_SMART_NULLS); + mockedGCEUtil + .when(() -> GCEUtil.backendFromLoadBalancingPolicy(any())) + .thenReturn(backendToAdd); + doReturn(true) + .when(basicGoogleDeployHandler) + .hasBackedServiceFromInput(mockDescription, lbInfoMock); + + List result = + basicGoogleDeployHandler.getBackendServiceToUpdate( + mockDescription, "serverGroupName", lbInfoMock, policyMock, "region"); + assertNotNull(result); + assertEquals(3, result.size()); + } + + @Test + void testGetRegionBackendServicesToUpdateWithNoLoadBalancers() { + GoogleHttpLoadBalancingPolicy policyMock = mock(GoogleHttpLoadBalancingPolicy.class); + BasicGoogleDeployHandler.LoadBalancerInfo lbInfoMock = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + when(lbInfoMock.getInternalLoadBalancers()).thenReturn(Collections.emptyList()); + when(lbInfoMock.getInternalHttpLoadBalancers()).thenReturn(Collections.emptyList()); + + List result = + basicGoogleDeployHandler.getRegionBackendServicesToUpdate( + mockDescription, "server-group-name", lbInfoMock, policyMock, "region"); + + assertNotNull(result); + assertTrue(result.isEmpty()); + } + + @Test + void testGetRegionBackendServicesToUpdateWithInternalLoadBalancers() throws IOException { + GoogleHttpLoadBalancingPolicy policyMock = mock(GoogleHttpLoadBalancingPolicy.class); + BasicGoogleDeployHandler.LoadBalancerInfo lbInfoMock = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + GoogleBackendService backendServiceMock = mock(GoogleBackendService.class); + when(backendServiceMock.getName()).thenReturn("backend-service-internal"); + + List internalLB = new ArrayList<>(); + GoogleInternalLoadBalancer googleInternalLB = new GoogleInternalLoadBalancer(); + googleInternalLB.setBackendService(backendServiceMock); + googleInternalLB.setName("internal-load-balancer"); + internalLB.add(googleInternalLB.getView()); + when(lbInfoMock.getInternalLoadBalancers()).thenReturn(internalLB); + + List internaHttplLB = new ArrayList<>(); + GoogleInternalHttpLoadBalancer googleInternalHttpLB = new GoogleInternalHttpLoadBalancer(); + googleInternalHttpLB.setName("internal-http-load-balancer"); + internaHttplLB.add(googleInternalHttpLB.getView()); + when(lbInfoMock.getInternalHttpLoadBalancers()).thenReturn(internaHttplLB); + + Map instanceMetadata = new HashMap<>(); + instanceMetadata.put("load-balancer-names", "load-balancer-1,load-balancer-2"); + instanceMetadata.put("region-backend-service-names", "us-central1-backend"); + when(mockDescription.getInstanceMetadata()).thenReturn(instanceMetadata); + when(mockDescription.getCredentials()).thenReturn(mockCredentials); + when(mockDescription.getZone()).thenReturn("us-central1-a"); + + String region = "us-central1"; + doReturn(mock(BackendService.class)) + .when(basicGoogleDeployHandler) + .getRegionBackendServiceFromProvider(any(), any(), any()); + + mockedGCEUtil + .when(() -> GCEUtil.buildZonalServerGroupUrl(any(), any(), any())) + .thenReturn("zonal-server-group-url"); + GoogleBackendService googleBackendService = new GoogleBackendService(); + googleBackendService.setName("google-backend-service"); + mockedUtils + .when(() -> Utils.getBackendServicesFromInternalHttpLoadBalancerView(any())) + .thenReturn(List.of(googleBackendService)); + + List result = + basicGoogleDeployHandler.getRegionBackendServicesToUpdate( + mockDescription, "server-group-name", lbInfoMock, policyMock, region); + assertNotNull(result); + assertEquals(2, result.size()); + assertEquals( + "load-balancer-1,load-balancer-2,internal-load-balancer,internal-http-load-balancer", + instanceMetadata.get("load-balancer-names")); + } + + @Test + void testAddUserDataToInstanceMetadata_WithEmptyMetadata() { + String serverGroupName = "test-server-group"; + String instanceTemplateName = "test-template"; + Map userDataMap = new HashMap<>(); + userDataMap.put("key1", "value1"); + + when(mockDescription.getInstanceMetadata()).thenReturn(new HashMap<>()); + doReturn(userDataMap) + .when(basicGoogleDeployHandler) + .getUserData(mockDescription, serverGroupName, instanceTemplateName, mockTask); + + basicGoogleDeployHandler.addUserDataToInstanceMetadata( + mockDescription, serverGroupName, instanceTemplateName, mockTask); + + verify(basicGoogleDeployHandler) + .getUserData(mockDescription, serverGroupName, instanceTemplateName, mockTask); + + ArgumentCaptor> captor = ArgumentCaptor.forClass(Map.class); + verify(mockDescription).setInstanceMetadata(captor.capture()); + Map updatedMetadata = captor.getValue(); + assertEquals(1, updatedMetadata.size()); + assertEquals("value1", updatedMetadata.get("key1")); + } + + @Test + void testAddUserDataToInstanceMetadata_WithNonEmptyMetadata() { + String serverGroupName = "test-server-group"; + String instanceTemplateName = "test-template"; + Map existingMetadata = new HashMap<>(); + existingMetadata.put("existingKey", "existingValue"); + Map userDataMap = new HashMap<>(); + userDataMap.put("key1", "value1"); + + when(mockDescription.getInstanceMetadata()).thenReturn(existingMetadata); + doReturn(userDataMap) + .when(basicGoogleDeployHandler) + .getUserData(mockDescription, serverGroupName, instanceTemplateName, mockTask); + + basicGoogleDeployHandler.addUserDataToInstanceMetadata( + mockDescription, serverGroupName, instanceTemplateName, mockTask); + + verify(basicGoogleDeployHandler) + .getUserData(mockDescription, serverGroupName, instanceTemplateName, mockTask); + ArgumentCaptor> captor = ArgumentCaptor.forClass(Map.class); + verify(mockDescription).setInstanceMetadata(captor.capture()); + + Map updatedMetadata = captor.getValue(); + assertEquals(2, updatedMetadata.size()); + assertEquals("existingValue", updatedMetadata.get("existingKey")); + assertEquals("value1", updatedMetadata.get("key1")); + } + + @Test + void testGetUserData_WithCustomUserData() { + String serverGroupName = "test-server-group"; + String instanceTemplateName = "test-template"; + String customUserData = "custom-data"; + + when(mockDescription.getUserData()).thenReturn(customUserData); + + Map mockUserData = new HashMap<>(); + mockUserData.put("key", "value"); + + when(googleUserDataProvider.getUserData( + serverGroupName, + instanceTemplateName, + mockDescription, + mockDescription.getCredentials(), + customUserData)) + .thenReturn(mockUserData); + + Map result = + basicGoogleDeployHandler.getUserData( + mockDescription, serverGroupName, instanceTemplateName, mockTask); + + verify(googleUserDataProvider) + .getUserData( + serverGroupName, + instanceTemplateName, + mockDescription, + mockDescription.getCredentials(), + customUserData); + verify(mockTask).updateStatus("DEPLOY", "Resolved user data."); + assertEquals(mockUserData, result); + } + + @Test + void testGetUserData_WithEmptyCustomUserData() { + String serverGroupName = "test-server-group"; + String instanceTemplateName = "test-template"; + String emptyUserData = ""; + + when(mockDescription.getUserData()).thenReturn(null); + + Map mockUserData = new HashMap<>(); + mockUserData.put("key", "value"); + + when(googleUserDataProvider.getUserData( + serverGroupName, + instanceTemplateName, + mockDescription, + mockDescription.getCredentials(), + emptyUserData)) + .thenReturn(mockUserData); + + Map result = + basicGoogleDeployHandler.getUserData( + mockDescription, serverGroupName, instanceTemplateName, mockTask); + + verify(googleUserDataProvider) + .getUserData( + serverGroupName, + instanceTemplateName, + mockDescription, + mockDescription.getCredentials(), + emptyUserData); + verify(mockTask).updateStatus("DEPLOY", "Resolved user data."); + assertEquals(mockUserData, result); + } + + @Test + void testAddSelectZonesToInstanceMetadata_RegionalAndSelectZonesTrue() { + when(mockDescription.getRegional()).thenReturn(true); + when(mockDescription.getSelectZones()).thenReturn(true); + + Map mockMetadata = new HashMap<>(); + when(mockDescription.getInstanceMetadata()).thenReturn(mockMetadata); + + basicGoogleDeployHandler.addSelectZonesToInstanceMetadata(mockDescription); + + assertTrue(mockMetadata.containsKey("select-zones")); + assertEquals("true", mockMetadata.get("select-zones")); + verify(mockDescription).setInstanceMetadata(mockMetadata); + } + + @Test + void testAddSelectZonesToInstanceMetadata_NonRegional() { + when(mockDescription.getRegional()).thenReturn(false); + + basicGoogleDeployHandler.addSelectZonesToInstanceMetadata(mockDescription); + + verify(mockDescription, never()).setInstanceMetadata(any()); + } + + @Test + void testAddSelectZonesToInstanceMetadata_SelectZonesFalse() { + when(mockDescription.getRegional()).thenReturn(true); + when(mockDescription.getSelectZones()).thenReturn(false); + + basicGoogleDeployHandler.addSelectZonesToInstanceMetadata(mockDescription); + + verify(mockDescription, never()).setInstanceMetadata(any()); + } + + @Test + void testBuildMetadataFromInstanceMetadata() { + Map mockInstanceMetadata = new HashMap<>(); + mockInstanceMetadata.put("key1", "value1"); + mockInstanceMetadata.put("key2", "value2"); + + Metadata mockMetadata = new Metadata(); + mockMetadata.setItems(new ArrayList<>()); + + when(mockDescription.getInstanceMetadata()).thenReturn(mockInstanceMetadata); + mockedGCEUtil + .when(() -> GCEUtil.buildMetadataFromMap(mockInstanceMetadata)) + .thenReturn(mockMetadata); + + Metadata result = basicGoogleDeployHandler.buildMetadataFromInstanceMetadata(mockDescription); + + assertEquals(mockMetadata, result); + } + + @Test + void testBuildTagsFromInput() { + List inputTags = new ArrayList<>(); + inputTags.add("tag1"); + inputTags.add("tag2"); + + Tags mockTags = new Tags(); + mockTags.setItems(inputTags); + + when(mockDescription.getTags()).thenReturn(inputTags); + mockedGCEUtil.when(() -> GCEUtil.buildTagsFromList(inputTags)).thenReturn(mockTags); + + Tags result = basicGoogleDeployHandler.buildTagsFromInput(mockDescription); + + assertEquals(mockTags, result); + } + + @Test + void testBuildServiceAccountFromInput_AuthScopesPresent_ServiceAccountEmailBlank() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + description.setAuthScopes(List.of("scope1", "scope2")); + description.setServiceAccountEmail(""); + + ServiceAccount account = new ServiceAccount(); + account.setEmail("default"); + account.setScopes(List.of("scope1", "scope2")); + List mockServiceAccounts = List.of(account); + mockedGCEUtil + .when(() -> GCEUtil.buildServiceAccount(any(), any())) + .thenReturn(mockServiceAccounts); + + List result = + basicGoogleDeployHandler.buildServiceAccountFromInput(description); + + assertEquals("default", description.getServiceAccountEmail()); + assertNotNull(result); + assertEquals(mockServiceAccounts, result); + mockedGCEUtil.verify( + () -> GCEUtil.buildServiceAccount("default", List.of("scope1", "scope2")), times(1)); + } + + @Test + void testBuildServiceAccountFromInput_AuthScopesEmpty() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + description.setAuthScopes(List.of()); + description.setServiceAccountEmail("custom-email"); + + ServiceAccount account = new ServiceAccount(); + account.setEmail("custom-email"); + account.setScopes(List.of()); + List mockServiceAccounts = List.of(account); + mockedGCEUtil + .when(() -> GCEUtil.buildServiceAccount(any(), any())) + .thenReturn(mockServiceAccounts); + + List result = + basicGoogleDeployHandler.buildServiceAccountFromInput(description); + + assertEquals("custom-email", description.getServiceAccountEmail()); + assertNotNull(result); + assertEquals(mockServiceAccounts, result); + mockedGCEUtil.verify(() -> GCEUtil.buildServiceAccount("custom-email", List.of()), times(1)); + } + + @Test + void testBuildSchedulingFromInput() { + mockedGCEUtil + .when(() -> GCEUtil.buildScheduling(mockDescription)) + .thenReturn(mock(Scheduling.class)); + + basicGoogleDeployHandler.buildSchedulingFromInput(mockDescription); + + mockedGCEUtil.verify(() -> GCEUtil.buildScheduling(mockDescription), times(1)); + } + + @Test + void testBuildLabelsFromInput_ExistingLabels() { + Map existingLabels = new HashMap<>(); + existingLabels.put("key1", "value1"); + + when(mockDescription.getLabels()).thenReturn(existingLabels); + + Map labels = + basicGoogleDeployHandler.buildLabelsFromInput( + mockDescription, "my-server-group", "us-central1"); + + assertEquals(3, labels.size()); + assertEquals("us-central1", labels.get("spinnaker-region")); + assertEquals("my-server-group", labels.get("spinnaker-server-group")); + assertEquals("value1", labels.get("key1")); + + verify(mockDescription).getLabels(); + } + + @Test + void testBuildLabelsFromInput_NullLabels() { + when(mockDescription.getLabels()).thenReturn(null); + + Map labels = + basicGoogleDeployHandler.buildLabelsFromInput( + mockDescription, "my-server-group", "us-central1"); + + assertEquals(2, labels.size()); + assertEquals("us-central1", labels.get("spinnaker-region")); + assertEquals("my-server-group", labels.get("spinnaker-server-group")); + + verify(mockDescription).getLabels(); + } + + @Test + void validateAcceleratorConfig_throwsExceptionForInvalidConfig() { + when(mockDescription.getAcceleratorConfigs()).thenReturn(List.of(new AcceleratorConfig())); + when(mockDescription.getRegional()).thenReturn(false); + + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> { + basicGoogleDeployHandler.validateAcceleratorConfig(mockDescription); + }); + + assertEquals( + "Accelerators are only supported with regional server groups if the zones are specified by the user.", + exception.getMessage()); + } + + @Test + void validateAcceleratorConfig_noExceptionForValidConfig() { + when(mockDescription.getAcceleratorConfigs()).thenReturn(List.of()); + assertDoesNotThrow(() -> basicGoogleDeployHandler.validateAcceleratorConfig(mockDescription)); + } + + @Test + void validateAcceleratorConfig_noExceptionForNullConfig() { + when(mockDescription.getAcceleratorConfigs()).thenReturn(null); + assertDoesNotThrow(() -> basicGoogleDeployHandler.validateAcceleratorConfig(mockDescription)); + } + + @Test + void validateAcceleratorConfig_validRegionalWithZones() { + BasicGoogleDeployDescription description = mock(BasicGoogleDeployDescription.class); + when(description.getAcceleratorConfigs()).thenReturn(List.of(new AcceleratorConfig())); + when(description.getRegional()).thenReturn(true); + when(description.getSelectZones()).thenReturn(false); + + assertDoesNotThrow(() -> basicGoogleDeployHandler.validateAcceleratorConfig(description)); + } + + @Test + void buildInstancePropertiesFromInput_validInputs_success() { + String machineTypeName = "n1-standard-1"; + List attachedDisks = List.of(mock(AttachedDisk.class)); + NetworkInterface networkInterface = mock(NetworkInterface.class); + Metadata metadata = mock(Metadata.class); + Tags tags = mock(Tags.class); + List serviceAccounts = List.of(mock(ServiceAccount.class)); + Scheduling scheduling = mock(Scheduling.class); + Map labels = Map.of("key1", "value1"); + + when(mockDescription.getAcceleratorConfigs()) + .thenReturn(List.of(mock(AcceleratorConfig.class))); + when(mockDescription.getCanIpForward()).thenReturn(true); + when(mockDescription.getResourceManagerTags()) + .thenReturn(Map.of("resource-tag-key", "resource-tag-value")); + when(mockDescription.getPartnerMetadata()) + .thenReturn( + Map.of( + "partner-metadata-key", + new StructuredEntries().setEntries(Map.of("entries", new Object())))); + + InstanceProperties result = + basicGoogleDeployHandler.buildInstancePropertiesFromInput( + mockDescription, + machineTypeName, + attachedDisks, + networkInterface, + metadata, + tags, + serviceAccounts, + scheduling, + labels); + + assertEquals(machineTypeName, result.getMachineType()); + assertEquals(attachedDisks, result.getDisks()); + assertFalse(result.getGuestAccelerators().isEmpty()); + assertEquals(1, result.getNetworkInterfaces().size()); + assertEquals(networkInterface, result.getNetworkInterfaces().get(0)); + assertTrue(result.getCanIpForward()); + assertEquals(metadata, result.getMetadata()); + assertEquals(tags, result.getTags()); + assertEquals(labels, result.getLabels()); + assertEquals(scheduling, result.getScheduling()); + assertEquals(serviceAccounts, result.getServiceAccounts()); + assertEquals(mockDescription.getResourceManagerTags(), result.getResourceManagerTags()); + assertEquals(mockDescription.getPartnerMetadata(), result.getPartnerMetadata()); + } + + @Test + void buildInstancePropertiesFromInput_noAcceleratorConfigs_emptyGuestAccelerators() { + String machineTypeName = "n1-standard-1"; + List attachedDisks = List.of(mock(AttachedDisk.class)); + NetworkInterface networkInterface = mock(NetworkInterface.class); + Metadata metadata = mock(Metadata.class); + Tags tags = mock(Tags.class); + List serviceAccounts = List.of(mock(ServiceAccount.class)); + Scheduling scheduling = mock(Scheduling.class); + Map labels = Map.of("key1", "value1"); + + when(mockDescription.getAcceleratorConfigs()).thenReturn(Collections.emptyList()); + when(mockDescription.getCanIpForward()).thenReturn(false); + when(mockDescription.getResourceManagerTags()).thenReturn(Collections.emptyMap()); + when(mockDescription.getPartnerMetadata()).thenReturn(Collections.emptyMap()); + + InstanceProperties result = + basicGoogleDeployHandler.buildInstancePropertiesFromInput( + mockDescription, + machineTypeName, + attachedDisks, + networkInterface, + metadata, + tags, + serviceAccounts, + scheduling, + labels); + + assertEquals(machineTypeName, result.getMachineType()); + assertEquals(attachedDisks, result.getDisks()); + assertTrue(result.getGuestAccelerators().isEmpty()); + assertEquals(1, result.getNetworkInterfaces().size()); + assertEquals(networkInterface, result.getNetworkInterfaces().get(0)); + assertFalse(result.getCanIpForward()); + assertEquals(metadata, result.getMetadata()); + assertEquals(tags, result.getTags()); + assertEquals(labels, result.getLabels()); + assertEquals(scheduling, result.getScheduling()); + assertEquals(serviceAccounts, result.getServiceAccounts()); + assertTrue(result.getResourceManagerTags().isEmpty()); + assertTrue(result.getPartnerMetadata().isEmpty()); + } + + @Test + void buildInstancePropertiesFromInput_nullAcceleratorConfigs_emptyGuestAccelerators() { + String machineTypeName = "n1-standard-1"; + List attachedDisks = List.of(mock(AttachedDisk.class)); + NetworkInterface networkInterface = mock(NetworkInterface.class); + Metadata metadata = mock(Metadata.class); + Tags tags = mock(Tags.class); + List serviceAccounts = List.of(mock(ServiceAccount.class)); + Scheduling scheduling = mock(Scheduling.class); + Map labels = Map.of("key1", "value1"); + + when(mockDescription.getAcceleratorConfigs()).thenReturn(null); + when(mockDescription.getCanIpForward()).thenReturn(false); + when(mockDescription.getResourceManagerTags()).thenReturn(Collections.emptyMap()); + when(mockDescription.getPartnerMetadata()).thenReturn(Collections.emptyMap()); + + InstanceProperties result = + basicGoogleDeployHandler.buildInstancePropertiesFromInput( + mockDescription, + machineTypeName, + attachedDisks, + networkInterface, + metadata, + tags, + serviceAccounts, + scheduling, + labels); + + assertEquals(machineTypeName, result.getMachineType()); + assertEquals(attachedDisks, result.getDisks()); + assertTrue(result.getGuestAccelerators().isEmpty()); + assertEquals(1, result.getNetworkInterfaces().size()); + assertEquals(networkInterface, result.getNetworkInterfaces().get(0)); + assertFalse(result.getCanIpForward()); + assertEquals(metadata, result.getMetadata()); + assertEquals(tags, result.getTags()); + assertEquals(labels, result.getLabels()); + assertEquals(scheduling, result.getScheduling()); + assertEquals(serviceAccounts, result.getServiceAccounts()); + assertTrue(result.getResourceManagerTags().isEmpty()); + assertTrue(result.getPartnerMetadata().isEmpty()); + } + + @Test + void addShieldedVmConfigToInstanceProperties_shieldedVmCompatible_configAdded() { + InstanceProperties instanceProperties = new InstanceProperties(); + Image bootImage = mock(Image.class); + ShieldedVmConfig shieldedVmConfig = mock(ShieldedVmConfig.class); + + mockedGCEUtil.when(() -> GCEUtil.isShieldedVmCompatible(bootImage)).thenReturn(true); + mockedGCEUtil + .when(() -> GCEUtil.buildShieldedVmConfig(mockDescription)) + .thenReturn(shieldedVmConfig); + + basicGoogleDeployHandler.addShieldedVmConfigToInstanceProperties( + mockDescription, instanceProperties, bootImage); + assertEquals(shieldedVmConfig, instanceProperties.getShieldedVmConfig()); + } + + @Test + void addShieldedVmConfigToInstanceProperties_notShieldedVmCompatible_noConfigAdded() { + InstanceProperties instanceProperties = new InstanceProperties(); + Image bootImage = mock(Image.class); + + mockedGCEUtil.when(() -> GCEUtil.isShieldedVmCompatible(bootImage)).thenReturn(false); + + basicGoogleDeployHandler.addShieldedVmConfigToInstanceProperties( + mockDescription, instanceProperties, bootImage); + assertNull(instanceProperties.getShieldedVmConfig()); + } + + @Test + void addMinCpuPlatformToInstanceProperties_minCpuPlatformIsNotBlank_setMinCpuPlatform() { + InstanceProperties instanceProperties = new InstanceProperties(); + String minCpuPlatform = "Intel Skylake"; + when(mockDescription.getMinCpuPlatform()).thenReturn(minCpuPlatform); + + basicGoogleDeployHandler.addMinCpuPlatformToInstanceProperties( + mockDescription, instanceProperties); + assertEquals(minCpuPlatform, instanceProperties.getMinCpuPlatform()); + } + + @Test + void addMinCpuPlatformToInstanceProperties_minCpuPlatformIsBlank_doNotSetMinCpuPlatform() { + InstanceProperties instanceProperties = new InstanceProperties(); + String minCpuPlatform = ""; + when(mockDescription.getMinCpuPlatform()).thenReturn(minCpuPlatform); + + basicGoogleDeployHandler.addMinCpuPlatformToInstanceProperties( + mockDescription, instanceProperties); + + assertNull(instanceProperties.getMinCpuPlatform()); + } + + @Test + void buildInstanceTemplate_validInputs_returnsInstanceTemplate() { + String name = "test-instance-template"; + InstanceProperties instanceProperties = new InstanceProperties(); + + InstanceTemplate result = + basicGoogleDeployHandler.buildInstanceTemplate(name, instanceProperties); + + assertNotNull(result); + assertEquals(name, result.getName()); + assertEquals(instanceProperties, result.getProperties()); + } + + @Test + void setCapacityFromInput_withValidCapacity_setsTargetSize() { + BasicGoogleDeployDescription.Capacity capacity = new BasicGoogleDeployDescription.Capacity(); + capacity.setDesired(5); + Mockito.when(mockDescription.getCapacity()).thenReturn(capacity); + + basicGoogleDeployHandler.setCapacityFromInput(mockDescription); + + Mockito.verify(mockDescription).setTargetSize(5); + } + + @Test + void setCapacityFromInput_withNullCapacity_doesNotSetTargetSize() { + Mockito.when(mockDescription.getCapacity()).thenReturn(null); + basicGoogleDeployHandler.setCapacityFromInput(mockDescription); + Mockito.verify(mockDescription, Mockito.never()).setTargetSize(Mockito.anyInt()); + } + + @Test + void setAutoscalerCapacityFromInput_withValidAutoscalerAndCapacity_updatesAutoscalingPolicy() { + BasicGoogleDeployDescription.Capacity capacity = new BasicGoogleDeployDescription.Capacity(); + capacity.setMin(2); + capacity.setMax(10); + + when(mockDescription.getCapacity()).thenReturn(capacity); + when(mockDescription.getAutoscalingPolicy()).thenReturn(mockAutoscalingPolicy); + when(mockDescription.getCapacity()).thenReturn(capacity); + doReturn(true).when(basicGoogleDeployHandler).autoscalerIsSpecified(mockDescription); + + basicGoogleDeployHandler.setAutoscalerCapacityFromInput(mockDescription); + + verify(mockAutoscalingPolicy).setMinNumReplicas(2); + verify(mockAutoscalingPolicy).setMaxNumReplicas(10); + verify(mockDescription, times(2)).getAutoscalingPolicy(); + verify(mockDescription, times(3)).getCapacity(); + mockedGCEUtil.verify( + () -> GCEUtil.calibrateTargetSizeWithAutoscaler(mockDescription), times(1)); + } + + @Test + void setAutoscalerCapacityFromInput_withAutoscalerNotSpecified_doesNothing() { + doReturn(false).when(basicGoogleDeployHandler).autoscalerIsSpecified(mockDescription); + + basicGoogleDeployHandler.setAutoscalerCapacityFromInput(mockDescription); + + verify(mockDescription, never()).getAutoscalingPolicy(); + verify(mockDescription, never()).getCapacity(); + mockedGCEUtil.verify( + () -> GCEUtil.calibrateTargetSizeWithAutoscaler(mockDescription), times(0)); + } + + @Test + void setAutoscalerCapacityFromInput_withNullCapacity_doesNotUpdateAutoscalingPolicy() { + when(mockDescription.getCapacity()).thenReturn(null); + doReturn(true).when(basicGoogleDeployHandler).autoscalerIsSpecified(mockDescription); + + basicGoogleDeployHandler.setAutoscalerCapacityFromInput(mockDescription); + + verify(mockAutoscalingPolicy, never()).setMinNumReplicas(anyInt()); + verify(mockAutoscalingPolicy, never()).setMaxNumReplicas(anyInt()); + mockedGCEUtil.verify( + () -> GCEUtil.calibrateTargetSizeWithAutoscaler(mockDescription), times(1)); + } + + @Test + void autoscalerIsSpecified_whenAutoscalingPolicyIsNull_returnsFalse() { + when(mockDescription.getAutoscalingPolicy()).thenReturn(null); + boolean result = basicGoogleDeployHandler.autoscalerIsSpecified(mockDescription); + assertFalse(result, "Expected false when AutoscalingPolicy is null"); + } + + @Test + void autoscalerIsSpecified_whenAllUtilizationsAndSchedulesAreNull_returnsFalse() { + when(mockDescription.getAutoscalingPolicy()).thenReturn(mockAutoscalingPolicy); + when(mockAutoscalingPolicy.getCpuUtilization()).thenReturn(null); + when(mockAutoscalingPolicy.getLoadBalancingUtilization()).thenReturn(null); + when(mockAutoscalingPolicy.getCustomMetricUtilizations()).thenReturn(null); + when(mockAutoscalingPolicy.getScalingSchedules()).thenReturn(null); + + boolean result = basicGoogleDeployHandler.autoscalerIsSpecified(mockDescription); + + assertFalse(result, "Expected false when all utilizations and schedules are null"); + } + + @Test + void autoscalerIsSpecified_whenCpuUtilizationIsNotNull_returnsTrue() { + when(mockDescription.getAutoscalingPolicy()).thenReturn(mockAutoscalingPolicy); + when(mockAutoscalingPolicy.getCpuUtilization()) + .thenReturn(new GoogleAutoscalingPolicy.CpuUtilization()); + + boolean result = basicGoogleDeployHandler.autoscalerIsSpecified(mockDescription); + assertTrue(result, "Expected true when CpuUtilization is not null"); + } + + @Test + void autoscalerIsSpecified_whenLoadBalancingUtilizationIsNotNull_returnsTrue() { + when(mockDescription.getAutoscalingPolicy()).thenReturn(mockAutoscalingPolicy); + when(mockAutoscalingPolicy.getLoadBalancingUtilization()) + .thenReturn(new GoogleAutoscalingPolicy.LoadBalancingUtilization()); + + boolean result = basicGoogleDeployHandler.autoscalerIsSpecified(mockDescription); + assertTrue(result, "Expected true when LoadBalancingUtilization is not null"); + } + + @Test + void autoscalerIsSpecified_whenCustomMetricUtilizationsIsNotNull_returnsTrue() { + when(mockDescription.getAutoscalingPolicy()).thenReturn(mockAutoscalingPolicy); + when(mockAutoscalingPolicy.getCustomMetricUtilizations()).thenReturn(new ArrayList<>()); + + boolean result = basicGoogleDeployHandler.autoscalerIsSpecified(mockDescription); + assertTrue(result, "Expected true when CustomMetricUtilizations is not null"); + } + + @Test + void autoscalerIsSpecified_whenScalingSchedulesIsNotNull_returnsTrue() { + when(mockDescription.getAutoscalingPolicy()).thenReturn(mockAutoscalingPolicy); + boolean result = basicGoogleDeployHandler.autoscalerIsSpecified(mockDescription); + assertTrue(result, "Expected true when ScalingSchedules is not null"); + } + + @Test + void setCapacityFromSource_whenSourceIsNull_doesNothing() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + description.setSource(null); + + basicGoogleDeployHandler.setCapacityFromSource(description, mockTask); + verify(mockTask, never()).updateStatus(anyString(), anyString()); + assertNull(description.getTargetSize()); + } + + @Test + void setCapacityFromSource_whenUseSourceCapacityIsFalse_doesNothing() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + BasicGoogleDeployDescription.Source mockSource = + mock(BasicGoogleDeployDescription.Source.class); + description.setSource(mockSource); + when(mockSource.getUseSourceCapacity()).thenReturn(false); + + basicGoogleDeployHandler.setCapacityFromSource(description, mockTask); + verify(mockTask, never()).updateStatus(anyString(), anyString()); + assertNull(description.getTargetSize()); + } + + @Test + void setCapacityFromSource_whenRegionOrServerGroupNameIsBlank_doesNothing() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + BasicGoogleDeployDescription.Source mockSource = + mock(BasicGoogleDeployDescription.Source.class); + description.setSource(mockSource); + when(mockSource.getUseSourceCapacity()).thenReturn(true); + when(mockSource.getRegion()).thenReturn(StringUtils.EMPTY); + + basicGoogleDeployHandler.setCapacityFromSource(description, mockTask); + verify(mockTask, never()).updateStatus(anyString(), anyString()); + assertNull(description.getTargetSize()); + } + + @Test + void setCapacityFromSource_whenUseSourceCapacityIsNull_doesNothing() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + BasicGoogleDeployDescription.Source mockSource = + mock(BasicGoogleDeployDescription.Source.class); + description.setSource(mockSource); + when(mockSource.getUseSourceCapacity()).thenReturn(null); + + basicGoogleDeployHandler.setCapacityFromSource(description, mockTask); + verify(mockTask, never()).updateStatus(anyString(), anyString()); + assertNull(description.getTargetSize()); + } + + @Test + void setCapacityFromSource_whenValidSource_updatesDescriptionCapacityAndPolicy() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + BasicGoogleDeployDescription.Source mockSource = + mock(BasicGoogleDeployDescription.Source.class); + description.setSource(mockSource); + description.setAccountName("account-name"); + GoogleServerGroup.View mockServerGroupView = mock(GoogleServerGroup.View.class); + ServerGroup.Capacity capacity = new ServerGroup.Capacity(1, 5, 3); + when(mockSource.getUseSourceCapacity()).thenReturn(true); + when(mockSource.getRegion()).thenReturn("us-central1"); + when(mockSource.getServerGroupName()).thenReturn("test-server-group"); + when(mockServerGroupView.getCapacity()).thenReturn(capacity); + when(mockServerGroupView.getAutoscalingPolicy()).thenReturn(new GoogleAutoscalingPolicy()); + + mockedGCEUtil + .when(() -> GCEUtil.queryServerGroup(any(), anyString(), anyString(), anyString())) + .thenReturn(mockServerGroupView); + + basicGoogleDeployHandler.setCapacityFromSource(description, mockTask); + + verify(mockTask).updateStatus(eq("DEPLOY"), contains("Looking up server group")); + assertEquals(3, description.getTargetSize()); // Assuming target size is set to desired (3) + assertNotNull(description.getAutoscalingPolicy()); + } + + @Test + void buildAutoHealingPolicyFromInput_whenNoAutoHealingPolicy_returnsNull() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + description.setAutoHealingPolicy(null); + List result = + basicGoogleDeployHandler.buildAutoHealingPolicyFromInput(description, mockTask); + assertNull(result); + } + + @Test + void buildAutoHealingPolicyFromInput_whenHealthCheckIsValid_returnsPolicy() { + GoogleAutoHealingPolicy mockAutoHealingPolicy = mock(GoogleAutoHealingPolicy.class); + GoogleHealthCheck mockHealthCheck = mock(GoogleHealthCheck.class); + when(mockAutoHealingPolicy.getHealthCheck()).thenReturn("valid-health-check"); + when(mockAutoHealingPolicy.getHealthCheckKind()) + .thenReturn(GoogleHealthCheck.HealthCheckKind.healthCheck); + when(mockDescription.getCredentials()).thenReturn(mockCredentials); + when(mockDescription.getAccountName()).thenReturn("account-name"); + when(mockDescription.getAutoHealingPolicy()).thenReturn(mockAutoHealingPolicy); + mockedGCEUtil + .when( + () -> + GCEUtil.queryHealthCheck( + any(), any(), any(), any(), any(), any(), any(), any(), any())) + .thenReturn(mockHealthCheck); + + when(mockHealthCheck.getSelfLink()).thenReturn("health-check-link"); + when(mockAutoHealingPolicy.getInitialDelaySec()).thenReturn(300); + + List result = + basicGoogleDeployHandler.buildAutoHealingPolicyFromInput(mockDescription, mockTask); + + assertNotNull(result); + assertEquals(1, result.size()); + assertEquals("health-check-link", result.get(0).getHealthCheck()); + assertEquals(300, result.get(0).getInitialDelaySec()); + } + + @Test + void buildAutoHealingPolicyFromInput_whenHealthCheckIsBlank_returnsNull() { + List result = + basicGoogleDeployHandler.buildAutoHealingPolicyFromInput(mockDescription, mockTask); + assertNull(result); + } + + @Test + void buildAutoHealingPolicyFromInput_whenMaxUnavailableIsSet_updatesPolicy() { + GoogleAutoHealingPolicy mockAutoHealingPolicy = mock(GoogleAutoHealingPolicy.class); + GoogleHealthCheck mockHealthCheck = mock(GoogleHealthCheck.class); + when(mockAutoHealingPolicy.getHealthCheck()).thenReturn("valid-health-check"); + when(mockAutoHealingPolicy.getHealthCheckKind()) + .thenReturn(GoogleHealthCheck.HealthCheckKind.healthCheck); + when(mockDescription.getCredentials()).thenReturn(mockCredentials); + when(mockDescription.getAccountName()).thenReturn("account-name"); + when(mockDescription.getAutoHealingPolicy()).thenReturn(mockAutoHealingPolicy); + mockedGCEUtil + .when( + () -> + GCEUtil.queryHealthCheck( + any(), any(), any(), any(), any(), any(), any(), any(), any())) + .thenReturn(mockHealthCheck); + + when(mockHealthCheck.getSelfLink()).thenReturn("health-check-link"); + when(mockAutoHealingPolicy.getInitialDelaySec()).thenReturn(300); + + GoogleAutoHealingPolicy.FixedOrPercent mockMaxUnavailable = + new GoogleAutoHealingPolicy.FixedOrPercent(); + mockMaxUnavailable.setFixed(5.0); + mockMaxUnavailable.setPercent(10.0); + when(mockAutoHealingPolicy.getMaxUnavailable()).thenReturn(mockMaxUnavailable); + + List result = + basicGoogleDeployHandler.buildAutoHealingPolicyFromInput(mockDescription, mockTask); + + assertNotNull(result); + assertEquals(1, result.size()); + assertEquals(5, ((FixedOrPercent) result.get(0).get("maxUnavailable")).getFixed()); + assertEquals(10, ((FixedOrPercent) result.get(0).get("maxUnavailable")).getPercent()); + } + + @Test + void buildInstanceGroupFromInput_whenValidInput_returnsInstanceGroupManager() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + description.setTargetSize(3); + String serverGroupName = "test-server-group"; + String instanceTemplateUrl = "http://instance-template-url"; + List targetPools = List.of("target-pool-1", "target-pool-2"); + List autoHealingPolicies = + List.of(new InstanceGroupManagerAutoHealingPolicy()); + + InstanceGroupManager result = + basicGoogleDeployHandler.buildInstanceGroupFromInput( + description, serverGroupName, instanceTemplateUrl, targetPools, autoHealingPolicies); + + assertNotNull(result); + assertEquals(serverGroupName, result.getName()); + assertEquals(serverGroupName, result.getBaseInstanceName()); + assertEquals(instanceTemplateUrl, result.getInstanceTemplate()); + assertEquals(3, result.getTargetSize()); + assertEquals(targetPools, result.getTargetPools()); + assertEquals(autoHealingPolicies, result.getAutoHealingPolicies()); + } + + @Test + void + buildInstanceGroupFromInput_whenNullTargetPoolsAndAutoHealingPolicies_returnsInstanceGroupManager() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + description.setTargetSize(2); + String serverGroupName = "test-server-group"; + String instanceTemplateUrl = "http://instance-template-url"; + + InstanceGroupManager result = + basicGoogleDeployHandler.buildInstanceGroupFromInput( + description, serverGroupName, instanceTemplateUrl, null, null); + + assertNotNull(result); + assertEquals(serverGroupName, result.getName()); + assertEquals(serverGroupName, result.getBaseInstanceName()); + assertEquals(instanceTemplateUrl, result.getInstanceTemplate()); + assertEquals(2, result.getTargetSize()); + assertEquals(null, result.getTargetPools()); + assertEquals(null, result.getAutoHealingPolicies()); + } + + @Test + void + buildInstanceGroupFromInput_whenEmptyTargetPoolsAndAutoHealingPolicies_returnsInstanceGroupManager() { + BasicGoogleDeployDescription description = new BasicGoogleDeployDescription(); + description.setTargetSize(5); + String serverGroupName = "test-server-group"; + String instanceTemplateUrl = "http://instance-template-url"; + List emptyTargetPools = Collections.emptyList(); + List emptyAutoHealingPolicies = Collections.emptyList(); + + InstanceGroupManager result = + basicGoogleDeployHandler.buildInstanceGroupFromInput( + description, + serverGroupName, + instanceTemplateUrl, + emptyTargetPools, + emptyAutoHealingPolicies); + + assertNotNull(result); + assertEquals(serverGroupName, result.getName()); + assertEquals(serverGroupName, result.getBaseInstanceName()); + assertEquals(instanceTemplateUrl, result.getInstanceTemplate()); + assertEquals(5, result.getTargetSize()); + assertEquals(emptyTargetPools, result.getTargetPools()); + assertEquals(emptyAutoHealingPolicies, result.getAutoHealingPolicies()); + } + + @Test + void testSetNamedPortsToInstanceGroup_withLoadBalancingPolicyNamedPorts() { + List namedPorts = List.of(new NamedPort().setName("http").setPort(80)); + GoogleHttpLoadBalancingPolicy loadBalancingPolicy = new GoogleHttpLoadBalancingPolicy(); + loadBalancingPolicy.setNamedPorts(namedPorts); + BasicGoogleDeployHandler.LoadBalancerInfo mockLBInfo = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + InstanceGroupManager instanceGroupManager = mock(InstanceGroupManager.class); + + BasicGoogleDeployDescription.Source source = new BasicGoogleDeployDescription.Source(); + source.setServerGroupName("server-group"); + source.setRegion("us-central1"); + + when(mockDescription.getSource()).thenReturn(source); + + when(mockDescription.getLoadBalancingPolicy()).thenReturn(loadBalancingPolicy); + doReturn(true) + .when(basicGoogleDeployHandler) + .hasBackedServiceFromInput(mockDescription, mockLBInfo); + + basicGoogleDeployHandler.setNamedPortsToInstanceGroup( + mockDescription, mockLBInfo, instanceGroupManager); + verify(instanceGroupManager).setNamedPorts(namedPorts); + } + + @Test + void testSetNamedPortsToInstanceGroup_withSourceServerGroupNamedPorts() { + Map sourceNamedPorts = Map.of("http", 80, "https", 443); + GoogleServerGroup sourceServerGroup = new GoogleServerGroup(); + sourceServerGroup.setNamedPorts(sourceNamedPorts); + + BasicGoogleDeployDescription.Source source = new BasicGoogleDeployDescription.Source(); + source.setServerGroupName("source-server-group"); + source.setRegion("us-central1"); + + BasicGoogleDeployHandler.LoadBalancerInfo mockLBInfo = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + InstanceGroupManager instanceGroupManager = mock(InstanceGroupManager.class); + + when(mockDescription.getSource()).thenReturn(source); + when(mockDescription.getLoadBalancingPolicy()).thenReturn(null); + when(googleClusterProvider.getServerGroup(any(), anyString(), anyString())) + .thenReturn(sourceServerGroup.getView()); + doReturn(true) + .when(basicGoogleDeployHandler) + .hasBackedServiceFromInput(mockDescription, mockLBInfo); + + basicGoogleDeployHandler.setNamedPortsToInstanceGroup( + mockDescription, mockLBInfo, instanceGroupManager); + verify(instanceGroupManager) + .setNamedPorts( + argThat( + list -> + new HashSet<>( + List.of( + new NamedPort().setName("http").setPort(80), + new NamedPort().setName("https").setPort(443))) + .equals(new HashSet<>(list)))); + } + + @Test + void testSetNamedPortsToInstanceGroup_withNoNamedPortsOrSourceSetsDefault() { + BasicGoogleDeployHandler.LoadBalancerInfo mockLBInfo = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + InstanceGroupManager instanceGroupManager = mock(InstanceGroupManager.class); + + BasicGoogleDeployDescription.Source source = new BasicGoogleDeployDescription.Source(); + source.setServerGroupName("source-server-group"); + source.setRegion("us-central1"); + + when(mockDescription.getLoadBalancingPolicy()).thenReturn(null); + when(mockDescription.getSource()).thenReturn(source); + doReturn(true) + .when(basicGoogleDeployHandler) + .hasBackedServiceFromInput(mockDescription, mockLBInfo); + + basicGoogleDeployHandler.setNamedPortsToInstanceGroup( + mockDescription, mockLBInfo, instanceGroupManager); + + verify(instanceGroupManager) + .setNamedPorts( + List.of( + new NamedPort() + .setName(GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME) + .setPort(GoogleHttpLoadBalancingPolicy.getHTTP_DEFAULT_PORT()))); + } + + @Test + void testSetNamedPortsToInstanceGroup_withLoadBalancingPolicyListeningPort() { + BasicGoogleDeployHandler.LoadBalancerInfo mockLBInfo = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + InstanceGroupManager instanceGroupManager = mock(InstanceGroupManager.class); + GoogleHttpLoadBalancingPolicy loadBalancingPolicy = new GoogleHttpLoadBalancingPolicy(); + loadBalancingPolicy.setListeningPort(8080); + BasicGoogleDeployDescription.Source source = new BasicGoogleDeployDescription.Source(); + source.setServerGroupName(""); // empty serverGroupName + + when(mockDescription.getSource()).thenReturn(source); + when(mockDescription.getLoadBalancingPolicy()).thenReturn(loadBalancingPolicy); + doReturn(true) + .when(basicGoogleDeployHandler) + .hasBackedServiceFromInput(mockDescription, mockLBInfo); + + basicGoogleDeployHandler.setNamedPortsToInstanceGroup( + mockDescription, mockLBInfo, instanceGroupManager); + + verify(instanceGroupManager) + .setNamedPorts( + List.of( + new NamedPort() + .setName(GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME) + .setPort(8080))); + } + + @Test + void testCreateInstanceGroupManagerFromInput_whenRegional() throws IOException { + BasicGoogleDeployHandler.LoadBalancerInfo mockLBInfo = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + InstanceGroupManager instanceGroupManager = mock(InstanceGroupManager.class); + + when(mockDescription.getRegional()).thenReturn(true); + String serverGroupName = "test-server-group"; + String region = "us-central1"; + + doNothing().when(basicGoogleDeployHandler).setDistributionPolicyToInstanceGroup(any(), any()); + doReturn("") + .when(basicGoogleDeployHandler) + .createRegionalInstanceGroupManagerAndWait(any(), any(), any(), anyString(), any(), any()); + doNothing() + .when(basicGoogleDeployHandler) + .createRegionalAutoscaler(any(), any(), any(), any(), any()); + + basicGoogleDeployHandler.createInstanceGroupManagerFromInput( + mockDescription, instanceGroupManager, mockLBInfo, serverGroupName, region, mockTask); + + verify(basicGoogleDeployHandler).setDistributionPolicyToInstanceGroup(any(), any()); + verify(basicGoogleDeployHandler) + .createRegionalInstanceGroupManagerAndWait(any(), any(), any(), any(), any(), any()); + verify(basicGoogleDeployHandler).createRegionalAutoscaler(any(), any(), any(), any(), any()); + } + + @Test + void testCreateInstanceGroupManagerFromInput_whenNotRegional() throws IOException { + BasicGoogleDeployHandler.LoadBalancerInfo mockLBInfo = + mock(BasicGoogleDeployHandler.LoadBalancerInfo.class); + InstanceGroupManager instanceGroupManager = mock(InstanceGroupManager.class); + + when(mockDescription.getRegional()).thenReturn(false); + String serverGroupName = "test-server-group"; + String region = "us-central1"; + + doReturn("") + .when(basicGoogleDeployHandler) + .createInstanceGroupManagerAndWait(any(), any(), any(), any(), any()); + doNothing().when(basicGoogleDeployHandler).createAutoscaler(any(), any(), any(), any()); + + basicGoogleDeployHandler.createInstanceGroupManagerFromInput( + mockDescription, + instanceGroupManager, + mockLBInfo, + serverGroupName, + "us-central1", + mockTask); + + verify(basicGoogleDeployHandler, never()).setDistributionPolicyToInstanceGroup(any(), any()); + verify(basicGoogleDeployHandler) + .createInstanceGroupManagerAndWait(any(), any(), any(), any(), any()); + verify(basicGoogleDeployHandler).createAutoscaler(any(), any(), any(), any()); + } + + @Test + void testNoDistributionPolicySet() { + InstanceGroupManager instanceGroupManager = mock(InstanceGroupManager.class); + when(mockDescription.getDistributionPolicy()).thenReturn(null); + basicGoogleDeployHandler.setDistributionPolicyToInstanceGroup( + mockDescription, instanceGroupManager); + verify(instanceGroupManager, never()).setDistributionPolicy(any()); + } + + @Test + void testSetDistributionPolicyWithZones() { + InstanceGroupManager instanceGroupManager = mock(InstanceGroupManager.class); + GoogleDistributionPolicy mockPolicy = mock(GoogleDistributionPolicy.class); + when(mockDescription.getDistributionPolicy()).thenReturn(mockPolicy); + when(mockDescription.getSelectZones()).thenReturn(true); + + List zones = List.of("zone-1", "zone-2"); + when(mockPolicy.getZones()).thenReturn(zones); + + when(mockDescription.getCredentials()).thenReturn(mockCredentials); + when(mockCredentials.getProject()).thenReturn("test-project"); + when(mockPolicy.getTargetShape()).thenReturn("ANY_SHAPE"); + mockedGCEUtil.when(() -> GCEUtil.buildZoneUrl(any(), any())).thenReturn("static-zone"); + + basicGoogleDeployHandler.setDistributionPolicyToInstanceGroup( + mockDescription, instanceGroupManager); + + verify(instanceGroupManager) + .setDistributionPolicy( + argThat( + policy -> { + List zonesConfig = policy.getZones(); + return zonesConfig.size() == 2 + && zonesConfig.get(0).getZone().equals("static-zone") + && zonesConfig.get(1).getZone().equals("static-zone") + && "ANY_SHAPE".equals(policy.getTargetShape()); + })); + } + + private GoogleLoadBalancerView mockLoadBalancer(GoogleLoadBalancerType loadBalancerType) { + GoogleLoadBalancerView mockLB = mock(GoogleLoadBalancerView.class); + when(mockLB.getLoadBalancerType()).thenReturn(loadBalancerType); + return mockLB; + } +} diff --git a/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/model/GoogleDiskTest.java b/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/model/GoogleDiskTest.java new file mode 100644 index 00000000000..05fd52feb84 --- /dev/null +++ b/clouddriver-google/src/test/java/com/netflix/spinnaker/clouddriver/google/model/GoogleDiskTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.model; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.junit.jupiter.api.Test; + +public class GoogleDiskTest { + + @Test + public void testSettingDiskTypePdSsd() { + GoogleDisk disk = new GoogleDisk(); + disk.setType("pd-ssd"); + + assertEquals(GoogleDiskType.PD_SSD, disk.getType()); + } + + @Test + public void testSettingDiskTypeHyperdiskBalanced() { + GoogleDisk disk = new GoogleDisk(); + disk.setType("hyperdisk-balanced"); + + assertEquals(GoogleDiskType.HYPERDISK_BALANCED, disk.getType()); + } + + @Test + public void testDefaultDiskTypeOnUnknown() { + GoogleDisk disk = new GoogleDisk(); + disk.setType("UNKNOWN"); + + assertEquals(GoogleDiskType.PD_STANDARD, disk.getType()); + } + + @Test + public void testPersistentDiskDetection() { + GoogleDisk disk = new GoogleDisk(); + disk.setType("pd-ssd"); + + assertTrue(disk.isPersistent()); + } +} diff --git a/clouddriver-huaweicloud/clouddriver-huaweicloud.gradle b/clouddriver-huaweicloud/clouddriver-huaweicloud.gradle new file mode 100644 index 00000000000..080d62c3aa4 --- /dev/null +++ b/clouddriver-huaweicloud/clouddriver-huaweicloud.gradle @@ -0,0 +1,36 @@ +dependencies { + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + + implementation "io.spinnaker.kork:kork-exceptions" + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-moniker" + implementation "org.glassfish.jersey.inject:jersey-hk2:2.28" + implementation('com.huawei:openstack4j:1.0.17') { + // Use logback, and prevent warnings about multiple slf4j bindings + exclude group: 'org.slf4j', module: 'slf4j-simple' + } + implementation "org.glassfish.jersey.core:jersey-client:2.22.1" + implementation "org.glassfish.jersey.media:jersey-media-json-jackson:2.11" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation 'org.apache.commons:commons-lang3' + + testImplementation "cglib:cglib-nodep" + testImplementation "commons-fileupload:commons-fileupload:1.4" + testImplementation "org.apache.httpcomponents:httpmime" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.mockito:mockito-core" + testImplementation "org.mockito:mockito-junit-jupiter" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudOperation.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudOperation.java new file mode 100644 index 00000000000..47603d3cf4b --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudOperation.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface HuaweiCloudOperation { + String value(); +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudProvider.java new file mode 100644 index 00000000000..d6691397328 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudProvider.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud; + +import com.netflix.spinnaker.clouddriver.core.CloudProvider; +import java.lang.annotation.Annotation; +import org.springframework.stereotype.Component; + +@Component +public class HuaweiCloudProvider implements CloudProvider { + public static final String ID = "huaweicloud"; + + final String id = ID; + final String displayName = "HuaweiCloud"; + final Class operationAnnotationType = HuaweiCloudOperation.class; + + @Override + public String getId() { + return id; + } + + @Override + public String getDisplayName() { + return displayName; + } + + @Override + public Class getOperationAnnotationType() { + return operationAnnotationType; + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudUtils.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudUtils.java new file mode 100644 index 00000000000..5ee70bcc0ea --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/HuaweiCloudUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud; + +import java.util.Collection; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.util.StringUtils; + +public class HuaweiCloudUtils { + public static boolean isEmptyStr(Object str) { + return StringUtils.isEmpty(str); + } + + public static boolean isEmptyCollection(Collection c) { + return c == null || c.isEmpty(); + } + + public static boolean isEmptyMap(Map m) { + return m == null || m.isEmpty(); + } + + public static Logger getLogger(Class clazz) { + return LoggerFactory.getLogger(clazz); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/cache/CacheResultBuilder.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/cache/CacheResultBuilder.java new file mode 100644 index 00000000000..dbfa16a41fb --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/cache/CacheResultBuilder.java @@ -0,0 +1,152 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.cache; + +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.Getter; + +public class CacheResultBuilder { + + private final long startTime; + + private final CacheMutation onDemand = new CacheMutation(); + + private final Map namespaceBuilders = new HashMap(); + + public CacheResultBuilder(long startTime) { + this.startTime = startTime; + } + + public long getStartTime() { + return startTime; + } + + public CacheMutation getOnDemand() { + return this.onDemand; + } + + public NamespaceCache getNamespaceCache(String ns) { + if (namespaceBuilders.containsKey(ns)) { + return namespaceBuilders.get(ns); + } + NamespaceCache cache = new NamespaceCache(ns); + namespaceBuilders.put(ns, cache); + return cache; + } + + public DefaultCacheResult build() { + Map> evict = new HashMap(); + Map> keep = new HashMap(); + + if (!onDemand.getToKeep().isEmpty()) { + keep.put(Keys.Namespace.ON_DEMAND.ns, onDemand.getToKeep().values()); + } + + if (!onDemand.getToEvict().isEmpty()) { + evict.put(Keys.Namespace.ON_DEMAND.ns, onDemand.getToEvict()); + } + + namespaceBuilders.forEach( + (namespace, item) -> { + if (!item.getToKeep().isEmpty()) { + keep.put(namespace, item.getCacheDatas()); + } + + if (!item.getToEvict().isEmpty()) { + evict.put(namespace, item.getToEvict()); + } + }); + + return new DefaultCacheResult(keep, evict); + } + + @Getter + public static class CacheMutation { + private final List toEvict = new ArrayList(); + + private final Map toKeep = new HashMap(); + } + + @Getter + public static class NamespaceCache { + private final String namespace; + + private final List toEvict = new ArrayList(); + + private final Map toKeep = new HashMap(); + + public NamespaceCache(String namespace) { + this.namespace = namespace; + } + + public CacheDataBuilder getCacheDataBuilder(String key) { + if (toKeep.containsKey(key)) { + return toKeep.get(key); + } + + CacheDataBuilder builder = new CacheDataBuilder(key); + toKeep.put(key, builder); + return builder; + } + + public Collection getCacheDatas() { + Collection result = new ArrayList(toKeep.size()); + + toKeep.forEach((k, item) -> result.add(item.build())); + + return result; + } + } + + public static class CacheDataBuilder { + private final String id; + private int ttlSeconds = -1; + private Map attributes = new HashMap(); + private final Map> relationships = new HashMap(); + + public CacheDataBuilder(String id) { + this.id = id; + } + + public DefaultCacheData build() { + return new DefaultCacheData(id, ttlSeconds, attributes, relationships); + } + + public void setTtlSeconds(int value) { + this.ttlSeconds = value; + } + + public Map getAttributes() { + return this.attributes; + } + + public void setAttributes(Map attributes) { + this.attributes = attributes; + } + + public Map> getRelationships() { + return relationships; + } + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/cache/Keys.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/cache/Keys.java new file mode 100644 index 00000000000..99d77d2a073 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/cache/Keys.java @@ -0,0 +1,206 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.cache; + +import com.google.common.base.CaseFormat; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.cache.KeyParser; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("HuaweiCloudKeys") +public class Keys implements KeyParser { + + public static enum Namespace { + IMAGES, + INSTANCE_TYPES, + NETWORKS, + SECURITY_GROUPS, + SUBNETS, + ON_DEMAND; + + public final String ns; + + private Namespace() { + ns = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, this.name()); + } + + @Override + public String toString() { + return ns; + } + } + + @Override + public Map parseKey(String key) { + return parse(key); + } + + @Override + public String getCloudProvider() { + return getCloudProviderId(); + } + + @Override + public Boolean canParseType(String type) { + for (Namespace key : Namespace.values()) { + if (key.toString().equals(type)) { + return true; + } + } + return false; + } + + @Override + public Boolean canParseField(String field) { + return false; + } + + private static final String SEPARATOR = ":"; + + private static String getCloudProviderId() { + return HuaweiCloudProvider.ID; + } + + private static Map emptyMap() { + return Collections.emptyMap(); + } + + public static Map parse(String key, Namespace targetType) { + Map keys = parse(key); + return (!keys.isEmpty() && !targetType.ns.equals(keys.get("type"))) ? emptyMap() : keys; + } + + public static Map parse(String key) { + if (HuaweiCloudUtils.isEmptyStr(key)) { + return emptyMap(); + } + + String[] parts = key.split(SEPARATOR); + if ((parts.length < 2) || (!getCloudProviderId().equals(parts[0]))) { + return emptyMap(); + } + + Namespace ns; + try { + ns = Namespace.valueOf(CaseFormat.LOWER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, parts[1])); + } catch (Exception e) { + return emptyMap(); + } + + Map result; + + switch (ns) { + case IMAGES: + result = parseSimpleKey(parts); + break; + case INSTANCE_TYPES: + result = parseSimpleKey(parts); + break; + case NETWORKS: + result = parseSimpleKey(parts); + break; + case SECURITY_GROUPS: + result = parseSecurityGroupKey(parts); + break; + case SUBNETS: + result = parseSimpleKey(parts); + break; + default: + return emptyMap(); + } + + if (result.isEmpty()) { + return result; + } + + result.put("provider", parts[0]); + result.put("type", parts[1]); + return result; + } + + public static String getImageKey(String imageId, String account, String region) { + return getSimpleKey(Namespace.IMAGES, imageId, account, region); + } + + public static String getInstanceTypeKey(String instanceType, String account, String region) { + return getSimpleKey(Namespace.INSTANCE_TYPES, instanceType, account, region); + } + + public static String getNetworkKey(String networkId, String account, String region) { + return getSimpleKey(Namespace.NETWORKS, networkId, account, region); + } + + public static String getSecurityGroupKey( + String securityGroupName, String securityGroupId, String account, String region) { + String identifier = securityGroupName + SEPARATOR + securityGroupId; + return getSimpleKey(Namespace.SECURITY_GROUPS, identifier, account, region); + } + + private static Map parseSecurityGroupKey(String[] parts) { + if (parts.length != 6) { + return emptyMap(); + } + + Names names = Names.parseName(parts[4]); + if (HuaweiCloudUtils.isEmptyStr(names.getApp())) { + return emptyMap(); + } + + Map result = new HashMap(); + result.put("application", names.getApp()); + result.put("account", parts[2]); + result.put("region", parts[3]); + result.put("name", parts[4]); + result.put("id", parts[5]); + + return result; + } + + public static String getSubnetKey(String subnetId, String account, String region) { + return getSimpleKey(Namespace.SUBNETS, subnetId, account, region); + } + + private static String getSimpleKey( + Namespace namespace, String identifier, String account, String region) { + return getCloudProviderId() + + SEPARATOR + + namespace + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + identifier; + } + + private static Map parseSimpleKey(String[] parts) { + if (parts.length != 5) { + return emptyMap(); + } + + Map result = new HashMap(); + result.put("account", parts[2]); + result.put("region", parts[3]); + result.put("id", parts[4]); + return result; + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/AuthorizedClientProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/AuthorizedClientProvider.java new file mode 100644 index 00000000000..b32dc732bb2 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/AuthorizedClientProvider.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.client; + +import com.huawei.openstack4j.api.OSClient; + +public interface AuthorizedClientProvider { + /** + * get authorized huaweicloud client. + * + * @return + */ + OSClient getAuthClient(); +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/HuaweiCloudClient.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/HuaweiCloudClient.java new file mode 100644 index 00000000000..786abc800ea --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/HuaweiCloudClient.java @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.client; + +import com.huawei.openstack4j.model.compute.ext.AvailabilityZone; +import com.huawei.openstack4j.openstack.ecs.v1.domain.Flavor; +import com.huawei.openstack4j.openstack.ims.v2.domain.Image; +import com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroup; +import com.huawei.openstack4j.openstack.vpc.v1.domain.Subnet; +import com.huawei.openstack4j.openstack.vpc.v1.domain.Vpc; +import com.netflix.spinnaker.clouddriver.huaweicloud.exception.HuaweiCloudException; +import java.util.List; + +public interface HuaweiCloudClient { + /** + * List availability zones in a region + * + * @param region + * @return + */ + List getZones(String region) throws HuaweiCloudException; + + /** + * List images in a region + * + * @param region + * @return + */ + List getImages(String region) throws HuaweiCloudException; + + /** + * List instance types in a region + * + * @param region + * @param az + * @return + */ + List getInstanceTypes(String region, String az) throws HuaweiCloudException; + + /** + * List security groups in a region + * + * @param region + * @return + */ + List getSecurityGroups(String region) throws HuaweiCloudException; + + /** + * List the available subnets in a region. + * + * @param region + * @return + */ + List getSubnets(String region) throws HuaweiCloudException; + + /** + * List the available vpcs in a region. + * + * @param region + * @return + */ + List getVpcs(String region) throws HuaweiCloudException; +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/HuaweiCloudClientImpl.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/HuaweiCloudClientImpl.java new file mode 100644 index 00000000000..23b34315da6 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/client/HuaweiCloudClientImpl.java @@ -0,0 +1,116 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.client; + +import com.huawei.openstack4j.api.OSClient; +import com.huawei.openstack4j.model.compute.ext.AvailabilityZone; +import com.huawei.openstack4j.openstack.ecs.v1.domain.Flavor; +import com.huawei.openstack4j.openstack.ims.v2.domain.Image; +import com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroup; +import com.huawei.openstack4j.openstack.vpc.v1.domain.Subnet; +import com.huawei.openstack4j.openstack.vpc.v1.domain.Vpc; +import com.netflix.spinnaker.clouddriver.huaweicloud.exception.HuaweiCloudException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.Callable; + +public class HuaweiCloudClientImpl implements HuaweiCloudClient { + private final AuthorizedClientProvider provider; + + public HuaweiCloudClientImpl(AuthorizedClientProvider provider) { + this.provider = provider; + } + + private static T handleInvoking(String doWhat, Callable closure, T defaultResult) { + try { + T r = closure.call(); + return r == null ? defaultResult : r; + } catch (Exception e) { + throw new HuaweiCloudException(doWhat, e); + } + } + + private static List emptyList() { + return Collections.emptyList(); + } + + private OSClient getRegionClient(String region) { + return this.provider.getAuthClient().useRegion(region); + } + + @Override + public List getZones(String region) throws HuaweiCloudException { + return handleInvoking( + String.format("getting zones in region(%s)", region), + () -> getRegionClient(region).compute().zones().list(), + emptyList()); + } + + @Override + public List getImages(String region) throws HuaweiCloudException { + return handleInvoking( + String.format("getting images in region(%s)", region), + () -> + getRegionClient(region) + .imsV2() + .images() + .list( + new HashMap() { + { + put("__imagetype", "gold"); + put("status", "active"); + put("virtual_env_type", "FusionCompute"); + } + }), + emptyList()); + } + + @Override + public List getInstanceTypes(String region, String az) + throws HuaweiCloudException { + return handleInvoking( + String.format("getting flavors in availability zone(%s) of region(%s)", az, region), + () -> getRegionClient(region).ecs().servers().getSpecifications(az), + emptyList()); + } + + @Override + public List getSecurityGroups(String region) + throws HuaweiCloudException { + return handleInvoking( + String.format("getting all security groups in region(%s)", region), + () -> getRegionClient(region).vpc().securityGroups().list(), + emptyList()); + } + + @Override + public List getSubnets(String region) throws HuaweiCloudException { + return handleInvoking( + String.format("getting all subnets in region(%s)", region), + () -> getRegionClient(region).vpc().subnets().list(), + emptyList()); + } + + @Override + public List getVpcs(String region) throws HuaweiCloudException { + return handleInvoking( + String.format("getting all vpcs in region(%s)", region), + () -> getRegionClient(region).vpc().vpcs().list(), + emptyList()); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/config/HuaweiCloudConfigurationProperties.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/config/HuaweiCloudConfigurationProperties.java new file mode 100644 index 00000000000..3cc24c3de59 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/config/HuaweiCloudConfigurationProperties.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.config; + +import java.util.List; +import lombok.Data; + +@Data +public class HuaweiCloudConfigurationProperties { + + @Data + public static class ManagedAccount { + private String name; + private String environment; + private String accountType; + private String authUrl; + private String username; + private String password; + private String projectName; + private String domainName; + private Boolean insecure; + private List regions; + } + + private List accounts; +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/controller/HuaweiCloudImageController.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/controller/HuaweiCloudImageController.java new file mode 100644 index 00000000000..0cdbdb0580f --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/controller/HuaweiCloudImageController.java @@ -0,0 +1,84 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.controller; + +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudImage; +import java.util.Collections; +import java.util.Comparator; +import java.util.Set; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping("/huaweicloud/images") +public class HuaweiCloudImageController { + + private final ImageProvider imageProvider; + + @Autowired + public HuaweiCloudImageController(ImageProvider imageProvider) { + this.imageProvider = imageProvider; + } + + @RequestMapping(value = "/find", method = RequestMethod.GET) + public Set find( + @RequestParam(required = false) String account, + @RequestParam(required = false) String q, + @RequestParam(required = false) String region) { + Set result = this.imageProvider.getAll(account, region); + if (result.isEmpty()) { + return Collections.emptySet(); + } + + return result.stream() + .filter(getQueryFilter(q)) + .sorted(Comparator.comparing(image -> image.getName())) + .collect(Collectors.toSet()); + } + + private Predicate getQueryFilter(String q) { + Predicate queryFilter = + i -> { + return true; + }; + + if ((!HuaweiCloudUtils.isEmptyStr(q)) && (q.trim().length() > 0)) { + String glob = q.trim(); + if (!glob.contains("*") + && !glob.contains("?") + && !glob.contains("[") + && !glob.contains("\\")) { + glob = "*" + glob + "*"; + } + Pattern pattern = new InMemoryCache.Glob(glob).toPattern(); + queryFilter = + i -> { + return pattern.matcher(i.getName()).matches(); + }; + } + + return queryFilter; + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/controller/ImageProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/controller/ImageProvider.java new file mode 100644 index 00000000000..a9b890dd198 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/controller/ImageProvider.java @@ -0,0 +1,25 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.controller; + +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudImage; +import java.util.Set; + +public interface ImageProvider { + /** Get all image by account and region. */ + Set getAll(String account, String region); +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/exception/HuaweiCloudException.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/exception/HuaweiCloudException.java new file mode 100644 index 00000000000..df3c50ea803 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/exception/HuaweiCloudException.java @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.exception; + +import com.huawei.openstack4j.model.common.ActionResponse; +import com.netflix.spinnaker.kork.exceptions.IntegrationException; + +public class HuaweiCloudException extends IntegrationException { + + public HuaweiCloudException(String message) { + super(message); + } + + public HuaweiCloudException(String doWhat, Exception e) { + super(String.format("Error %s, error is: %s", doWhat, e.getMessage())); + } + + public HuaweiCloudException(String doWhat, ActionResponse actionResponse) { + super( + String.format( + "Error %s, error is: %s and error code is: %d", + doWhat, actionResponse.getFault(), actionResponse.getCode())); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudImage.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudImage.java new file mode 100644 index 00000000000..a29e0119fc4 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudImage.java @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.model; + +import com.netflix.spinnaker.clouddriver.model.Image; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +@AllArgsConstructor +public class HuaweiCloudImage implements Image { + private String id; + private String name; + private String region; + private String account; +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudInstanceType.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudInstanceType.java new file mode 100644 index 00000000000..553a17e6a9a --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudInstanceType.java @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.model; + +import com.netflix.spinnaker.clouddriver.model.InstanceType; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +@AllArgsConstructor +public class HuaweiCloudInstanceType implements InstanceType { + private String name; + private String region; + private String account; +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudNetwork.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudNetwork.java new file mode 100644 index 00000000000..7cb78311ce2 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudNetwork.java @@ -0,0 +1,33 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.model; + +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider; +import com.netflix.spinnaker.clouddriver.model.Network; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +@AllArgsConstructor +public class HuaweiCloudNetwork implements Network { + private final String cloudProvider = HuaweiCloudProvider.ID; + + private String id; + private String name; + private String region; + private String account; +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroup.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroup.java new file mode 100644 index 00000000000..5fd28d41287 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroup.java @@ -0,0 +1,47 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.model; + +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider; +import com.netflix.spinnaker.clouddriver.model.SecurityGroup; +import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; +import java.util.Set; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +@AllArgsConstructor +public class HuaweiCloudSecurityGroup implements SecurityGroup { + private final String type = HuaweiCloudProvider.ID; + private final String cloudProvider = HuaweiCloudProvider.ID; + + private String id; + private String name; + private String region; + private String accountName; + private String application; + private String vpcId; + private Set inboundRules; + private Set outboundRules; + + @Override + public SecurityGroupSummary getSummary() { + return new HuaweiCloudSecurityGroupSummary(id, name, vpcId); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroupCacheData.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroupCacheData.java new file mode 100644 index 00000000000..7c7e30ade06 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroupCacheData.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.model; + +import com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroup; +import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@AllArgsConstructor +public class HuaweiCloudSecurityGroupCacheData { + private SecurityGroup securityGroup; + + /** + * The remote security groups referenced by current security group rules. Key is the remote + * security group id, and value is the cache data id of that security group. + */ + private Map relevantSecurityGroups; +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroupSummary.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroupSummary.java new file mode 100644 index 00000000000..80f5e095f51 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSecurityGroupSummary.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.model; + +import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +@AllArgsConstructor +public class HuaweiCloudSecurityGroupSummary implements SecurityGroupSummary { + private String id; + private String name; + private String vpcId; +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSubnet.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSubnet.java new file mode 100644 index 00000000000..eac9f27dd1d --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/model/HuaweiCloudSubnet.java @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.model; + +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider; +import com.netflix.spinnaker.clouddriver.model.Subnet; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +@AllArgsConstructor +public class HuaweiCloudSubnet implements Subnet { + private final String type = HuaweiCloudProvider.ID; + + private String id; + private String name; + private String cidr; + private String vpcId; + private String region; + private String account; + private String purpose; +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/HuaweiCloudInfrastructureProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/HuaweiCloudInfrastructureProvider.java new file mode 100644 index 00000000000..321ec3eff4a --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/HuaweiCloudInfrastructureProvider.java @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider; + +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.IMAGES; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.INSTANCE_TYPES; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.NETWORKS; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.SECURITY_GROUPS; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.SUBNETS; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; +import com.netflix.spinnaker.clouddriver.cache.SearchableProvider; +import com.netflix.spinnaker.clouddriver.cache.SearchableProvider.SearchableResource; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; + +@ConditionalOnProperty("huaweicloud.enabled") +public class HuaweiCloudInfrastructureProvider extends AgentSchedulerAware + implements SearchableProvider { + + private final Collection agents; + + private final Set defaultCaches = + new HashSet() { + { + add(IMAGES.ns); + add(INSTANCE_TYPES.ns); + add(NETWORKS.ns); + add(SECURITY_GROUPS.ns); + add(SUBNETS.ns); + } + }; + + private final Map urlMappingTemplates = Collections.emptyMap(); + + private final Map + searchResultHydrators = Collections.emptyMap(); + + public HuaweiCloudInfrastructureProvider(Collection agents) { + this.agents = agents; + } + + @Override + public String getProviderName() { + return this.getClass().getName(); + } + + @Override + public Collection getAgents() { + return agents; + } + + @Override + public Set getDefaultCaches() { + return defaultCaches; + } + + @Override + public Map getUrlMappingTemplates() { + return urlMappingTemplates; + } + + @Override + public Map getSearchResultHydrators() { + return searchResultHydrators; + } + + @Override + public Map parseKey(String key) { + return Keys.parse(key); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/AbstractHuaweiCloudCachingAgent.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/AbstractHuaweiCloudCachingAgent.java new file mode 100644 index 00000000000..9b79e75b9a7 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/AbstractHuaweiCloudCachingAgent.java @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.clouddriver.huaweicloud.client.HuaweiCloudClient; +import com.netflix.spinnaker.clouddriver.huaweicloud.provider.HuaweiCloudInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials; + +public abstract class AbstractHuaweiCloudCachingAgent implements CachingAgent, AccountAware { + + final HuaweiCloudNamedAccountCredentials credentials; + final ObjectMapper objectMapper; + final String region; + + public AbstractHuaweiCloudCachingAgent( + HuaweiCloudNamedAccountCredentials credentials, ObjectMapper objectMapper, String region) { + this.credentials = credentials; + this.objectMapper = objectMapper; + this.region = region; + } + + @Override + public String getAccountName() { + return credentials.getName(); + } + + @Override + public String getProviderName() { + return HuaweiCloudInfrastructureProvider.class.getName(); + } + + @Override + public String getAgentType() { + return String.format("%s/%s/%s", getAccountName(), region, getAgentName()); + } + + HuaweiCloudClient getCloudClient() { + return credentials.getCloudClient(); + } + + abstract String getAgentName(); +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/AbstractOnDemandCachingAgent.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/AbstractOnDemandCachingAgent.java new file mode 100644 index 00000000000..d32030f453c --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/AbstractOnDemandCachingAgent.java @@ -0,0 +1,223 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent; + +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.ON_DEMAND; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials; +import java.time.Duration; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.slf4j.Logger; + +public abstract class AbstractOnDemandCachingAgent extends AbstractHuaweiCloudCachingAgent + implements OnDemandAgent { + + private static final Logger log = HuaweiCloudUtils.getLogger(AbstractOnDemandCachingAgent.class); + + private final String namespace; + + public AbstractOnDemandCachingAgent( + HuaweiCloudNamedAccountCredentials credentials, + ObjectMapper objectMapper, + String namespace, + String region) { + super(credentials, objectMapper, region); + + this.namespace = namespace; + } + + @Override + public String getOnDemandAgentType() { + return getAgentType() + "-OnDemand"; + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + Collection datas = providerCache.getAll(ON_DEMAND.ns); + if (HuaweiCloudUtils.isEmptyCollection(datas)) { + return Collections.emptyList(); + } + + return datas.stream() + .filter( + cacheData -> { + Map parsedKey = Keys.parse(cacheData.getId()); + + return !parsedKey.isEmpty() + && getAccountName().equals(parsedKey.get("account")) + && region.equals(parsedKey.get("region")); + }) + .map( + cacheData -> { + Map details = Keys.parse(cacheData.getId()); + Map attributes = cacheData.getAttributes(); + + return new HashMap() { + { + put("details", details); + put("moniker", convertOnDemandDetails(details)); + put("cacheTime", attributes.get("cacheTime")); + put("processedTime", attributes.get("processedTime")); + put("processedCount", attributes.get("processedCount")); + } + }; + }) + .collect(Collectors.toList()); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + long startTime = System.currentTimeMillis(); + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(startTime); + + buildCurrentNamespaceCacheData(cacheResultBuilder); + + Collection keys = + cacheResultBuilder.getNamespaceCache(this.namespace).getToKeep().keySet(); + + Collection datas = providerCache.getAll(ON_DEMAND.ns, keys); + if (!HuaweiCloudUtils.isEmptyCollection(datas)) { + datas.forEach( + cacheData -> { + long cacheTime = 0; // The cache time of old cache data should be smaller than now. + if (cacheData.getAttributes().get("cacheTime") != null) { + cacheTime = (long) cacheData.getAttributes().get("cacheTime"); + } + + if (cacheTime < startTime) { + // The "processedCount" will be set at bellow. + int processedCount = 0; + if (cacheData.getAttributes().get("processedCount") != null) { + processedCount = (int) cacheData.getAttributes().get("processedCount"); + } + + if (processedCount > 0) { + cacheResultBuilder.getOnDemand().getToEvict().add(cacheData.getId()); + } else { + cacheResultBuilder.getOnDemand().getToKeep().put(cacheData.getId(), cacheData); + } + } else { + // If the cache time is bigger than now, it shoud move the OnDemand cache data to + // namespace cache. But how did it happen? + log.warn( + "The cache time({}) of OnDemand data(key={}) is bigger than now({})", + cacheTime, + cacheData.getId(), + startTime); + } + }); + } + + CacheResult result = cacheResultBuilder.build(); + + result + .getCacheResults() + .getOrDefault(ON_DEMAND.ns, Collections.emptyList()) + .forEach( + cacheData -> { + cacheData.getAttributes().put("processedTime", System.currentTimeMillis()); + + int count = 0; + if (cacheData.getAttributes().get("processedCount") != null) { + count = (int) cacheData.getAttributes().get("processedCount"); + } + cacheData.getAttributes().put("processedCount", count + 1); + }); + + return result; + } + + protected OnDemandResult handle(ProviderCache providerCache, String name) { + + Optional resource = getMetricsSupport().readData(() -> getResourceByName(name)); + + if (resource.isPresent()) { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(Long.MAX_VALUE); + + buildSingleResourceCacheData(cacheResultBuilder, resource.get()); + + CacheResult cacheResult = getMetricsSupport().transformData(() -> cacheResultBuilder.build()); + + CacheData cacheData = + getMetricsSupport() + .onDemandStore( + () -> { + String cacheResults = ""; + try { + cacheResults = objectMapper.writeValueAsString(cacheResult.getCacheResults()); + } catch (Exception e) { + log.error("Error serializing cache results to string, error={}", e); + } + Map attributes = new HashMap(); + attributes.put("cacheTime", System.currentTimeMillis()); + attributes.put("cacheResults", cacheResults); + attributes.put("processedCount", 0); + + return new DefaultCacheData( + getResourceCacheDataId(resource.get()), + (int) Duration.ofMinutes(10).getSeconds(), + Collections.unmodifiableMap(attributes), + Collections.emptyMap()); + }); + providerCache.putCacheData(ON_DEMAND.ns, cacheData); + + return new OnDemandResult(this.getOnDemandAgentType(), cacheResult, Collections.emptyMap()); + } + + Collection identifiers = getOnDemandKeysToEvict(providerCache, name); + providerCache.evictDeletedItems(ON_DEMAND.ns, identifiers); + + return new OnDemandResult( + this.getOnDemandAgentType(), + new DefaultCacheResult(Collections.emptyMap()), + new HashMap() { + { + put(namespace, identifiers); + } + }); + } + + /** Load the current resources in the cloud and build namespace cache data for them. */ + abstract void buildCurrentNamespaceCacheData(CacheResultBuilder cacheResultBuilder); + + /** Build namespace cache data of single resource. */ + abstract void buildSingleResourceCacheData( + CacheResultBuilder cacheResultBuilder, Object resource); + + /** Load the single cloud resource(security group, server group etc) by name from cloud. */ + abstract Optional getResourceByName(String name); + + abstract String getResourceCacheDataId(Object resource); + + /** Build identifiers to evict when the resource of specified name is not found in cloud. */ + abstract Collection getOnDemandKeysToEvict(ProviderCache providerCache, String name); +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudImageCachingAgent.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudImageCachingAgent.java new file mode 100644 index 00000000000..9b47ad8565b --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudImageCachingAgent.java @@ -0,0 +1,82 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.IMAGES; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.ims.v2.domain.Image; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder.NamespaceCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class HuaweiCloudImageCachingAgent extends AbstractHuaweiCloudCachingAgent { + + public HuaweiCloudImageCachingAgent( + HuaweiCloudNamedAccountCredentials credentials, ObjectMapper objectMapper, String region) { + super(credentials, objectMapper, region); + } + + @Override + String getAgentName() { + return this.getClass().getSimpleName(); + } + + @Override + public Collection getProvidedDataTypes() { + return Collections.unmodifiableCollection( + new ArrayList() { + { + add(AUTHORITATIVE.forType(IMAGES.ns)); + } + }); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + List images = getCloudClient().getImages(region); + + return buildCacheResult(images); + } + + private CacheResult buildCacheResult(List images) { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(0); + NamespaceCache nsCache = cacheResultBuilder.getNamespaceCache(IMAGES.ns); + + TypeReference> typeRef = new TypeReference>() {}; + + images.forEach( + image -> { + nsCache + .getCacheDataBuilder(Keys.getImageKey(image.getId(), getAccountName(), region)) + .setAttributes(objectMapper.convertValue(image, typeRef)); + }); + + return cacheResultBuilder.build(); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudInstanceTypeCachingAgent.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudInstanceTypeCachingAgent.java new file mode 100644 index 00000000000..c88a96ccf08 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudInstanceTypeCachingAgent.java @@ -0,0 +1,100 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.INSTANCE_TYPES; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.ecs.v1.domain.Flavor; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder.NamespaceCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.slf4j.Logger; + +public class HuaweiCloudInstanceTypeCachingAgent extends AbstractHuaweiCloudCachingAgent { + + private static final Logger log = + HuaweiCloudUtils.getLogger(HuaweiCloudInstanceTypeCachingAgent.class); + + public HuaweiCloudInstanceTypeCachingAgent( + HuaweiCloudNamedAccountCredentials credentials, ObjectMapper objectMapper, String region) { + super(credentials, objectMapper, region); + } + + @Override + String getAgentName() { + return this.getClass().getSimpleName(); + } + + @Override + public Collection getProvidedDataTypes() { + return Collections.unmodifiableCollection( + new ArrayList() { + { + add(AUTHORITATIVE.forType(INSTANCE_TYPES.ns)); + } + }); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + List zones = + credentials.getRegionToZones().getOrDefault(region, Collections.emptyList()); + if (zones.isEmpty()) { + log.warn("no availability zones for region({})", region); + return null; + } + + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(0); + + zones.forEach( + zone -> { + List flavors = getCloudClient().getInstanceTypes(region, zone); + if (!flavors.isEmpty()) { + buildCacheData(cacheResultBuilder, flavors); + } + }); + + return cacheResultBuilder.build(); + } + + private void buildCacheData( + CacheResultBuilder cacheResultBuilder, List flavors) { + NamespaceCache nsCache = cacheResultBuilder.getNamespaceCache(INSTANCE_TYPES.ns); + TypeReference> typeRef = new TypeReference>() {}; + + flavors.forEach( + flavor -> { + nsCache + .getCacheDataBuilder( + Keys.getInstanceTypeKey(flavor.getId(), getAccountName(), region)) + .setAttributes(objectMapper.convertValue(flavor, typeRef)); + }); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudNetworkCachingAgent.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudNetworkCachingAgent.java new file mode 100644 index 00000000000..e4292bdc4c7 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudNetworkCachingAgent.java @@ -0,0 +1,82 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.NETWORKS; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.vpc.v1.domain.Vpc; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder.NamespaceCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class HuaweiCloudNetworkCachingAgent extends AbstractHuaweiCloudCachingAgent { + + public HuaweiCloudNetworkCachingAgent( + HuaweiCloudNamedAccountCredentials credentials, ObjectMapper objectMapper, String region) { + super(credentials, objectMapper, region); + } + + @Override + String getAgentName() { + return this.getClass().getSimpleName(); + } + + @Override + public Collection getProvidedDataTypes() { + return Collections.unmodifiableCollection( + new ArrayList() { + { + add(AUTHORITATIVE.forType(NETWORKS.ns)); + } + }); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + List vpcs = getCloudClient().getVpcs(region); + return buildCacheResult(vpcs); + } + + private CacheResult buildCacheResult(List vpcs) { + + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(0); + NamespaceCache nsCache = cacheResultBuilder.getNamespaceCache(NETWORKS.ns); + + TypeReference> typeRef = new TypeReference>() {}; + + vpcs.forEach( + vpc -> { + nsCache + .getCacheDataBuilder(Keys.getNetworkKey(vpc.getId(), getAccountName(), region)) + .setAttributes(objectMapper.convertValue(vpc, typeRef)); + }); + + return cacheResultBuilder.build(); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSecurityGroupCachingAgent.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSecurityGroupCachingAgent.java new file mode 100644 index 00000000000..c8c61e10916 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSecurityGroupCachingAgent.java @@ -0,0 +1,223 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider.ID; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.SECURITY_GROUPS; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroup; +import com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroupRule; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder.NamespaceCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudSecurityGroupCacheData; +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.slf4j.Logger; + +public class HuaweiCloudSecurityGroupCachingAgent extends AbstractOnDemandCachingAgent { + + private static final Logger log = + HuaweiCloudUtils.getLogger(HuaweiCloudSecurityGroupCachingAgent.class); + + private final OnDemandMetricsSupport onDemandMetricsSupport; + + public HuaweiCloudSecurityGroupCachingAgent( + HuaweiCloudNamedAccountCredentials credentials, + ObjectMapper objectMapper, + Registry registry, + String region) { + + super(credentials, objectMapper, SECURITY_GROUPS.ns, region); + + this.onDemandMetricsSupport = + new OnDemandMetricsSupport(registry, this, ID + ":" + OnDemandType.SecurityGroup); + } + + @Override + String getAgentName() { + return this.getClass().getSimpleName(); + } + + @Override + public OnDemandMetricsSupport getMetricsSupport() { + return this.onDemandMetricsSupport; + } + + @Override + public Collection getProvidedDataTypes() { + return Collections.unmodifiableCollection( + new ArrayList() { + { + add(AUTHORITATIVE.forType(SECURITY_GROUPS.ns)); + } + }); + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return OnDemandType.SecurityGroup.equals(type) && ID.equals(cloudProvider); + } + + @Override + public OnDemandAgent.OnDemandResult handle( + ProviderCache providerCache, Map data) { + + if (!(!HuaweiCloudUtils.isEmptyStr(data.get("securityGroupName")) + && this.getAccountName().equals(data.get("account")) + && region.equals(data.get("region")))) { + return null; + } + + return handle(providerCache, (String) data.get("securityGroupName")); + } + + @Override + Optional getResourceByName(String name) { + if (HuaweiCloudUtils.isEmptyStr(name)) { + return Optional.empty(); + } + + List groups = getCloudClient().getSecurityGroups(region); + if (groups.isEmpty()) { + return Optional.empty(); + } + + List groups1 = + groups.stream().filter(it -> name.equals(it.getName())).collect(Collectors.toList()); + + if (groups1.size() == 1) { + return Optional.of(groups1.get(0)); + } + + log.warn( + "There is {} with name={} in region={}", + groups1.isEmpty() ? "no security group" : "more than one security groups", + name, + region); + + return Optional.empty(); + } + + @Override + String getResourceCacheDataId(Object resource) { + SecurityGroup seg = (SecurityGroup) resource; + return Keys.getSecurityGroupKey(seg.getName(), seg.getId(), getAccountName(), region); + } + + @Override + Collection getOnDemandKeysToEvict(ProviderCache providerCache, String name) { + return providerCache.filterIdentifiers( + SECURITY_GROUPS.ns, Keys.getSecurityGroupKey(name, "*", getAccountName(), region)); + } + + @Override + void buildCurrentNamespaceCacheData(CacheResultBuilder cacheResultBuilder) { + List securityGroups = getCloudClient().getSecurityGroups(region); + buildNamespaceCacheData(cacheResultBuilder, securityGroups, securityGroups); + } + + @Override + void buildSingleResourceCacheData(CacheResultBuilder cacheResultBuilder, Object resource) { + List securityGroups = new ArrayList(1); + securityGroups.add((SecurityGroup) resource); + + List allSecurityGroups = getCloudClient().getSecurityGroups(region); + + buildNamespaceCacheData(cacheResultBuilder, securityGroups, allSecurityGroups); + } + + private void buildNamespaceCacheData( + CacheResultBuilder cacheResultBuilder, + List securityGroups, + List allSecurityGroups) { + + NamespaceCache nsCache = cacheResultBuilder.getNamespaceCache(SECURITY_GROUPS.ns); + + TypeReference> typeRef = new TypeReference>() {}; + + Map groupId2CacheIds = + allSecurityGroups.stream() + .collect( + Collectors.toMap( + it -> it.getId(), + it -> + Keys.getSecurityGroupKey( + it.getName(), it.getId(), getAccountName(), region))); + + securityGroups.forEach( + item -> { + if (!groupId2CacheIds.containsKey(item.getId())) { + log.warn( + String.format( + "Can't find the security group(id={}) in current security groups", + item.getId())); + return; + } + + Map relevantSecurityGroups = new HashMap(); + + List rules = item.getSecurityGroupRules(); + if (!HuaweiCloudUtils.isEmptyCollection(rules)) { + rules.forEach( + rule -> { + String remoteGroupId = rule.getRemoteGroupId(); + + if (!HuaweiCloudUtils.isEmptyStr(remoteGroupId)) { + + if (groupId2CacheIds.containsKey(remoteGroupId)) { + relevantSecurityGroups.put( + remoteGroupId, groupId2CacheIds.get(remoteGroupId)); + } else { + log.warn( + String.format( + "Can't find the remote security group(id={}) for rule({}) of security group(id={})", + remoteGroupId, + rule.getId(), + item.getId())); + } + } + }); + } + + nsCache + .getCacheDataBuilder(groupId2CacheIds.get(item.getId())) + .setAttributes( + objectMapper.convertValue( + new HuaweiCloudSecurityGroupCacheData(item, relevantSecurityGroups), + typeRef)); + }); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSubnetCachingAgent.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSubnetCachingAgent.java new file mode 100644 index 00000000000..7ffbd8a5a8d --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSubnetCachingAgent.java @@ -0,0 +1,84 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.SUBNETS; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.vpc.v1.domain.Subnet; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.CacheResultBuilder.NamespaceCache; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.springframework.util.StringUtils; + +public class HuaweiCloudSubnetCachingAgent extends AbstractHuaweiCloudCachingAgent { + + public HuaweiCloudSubnetCachingAgent( + HuaweiCloudNamedAccountCredentials credentials, ObjectMapper objectMapper, String region) { + super(credentials, objectMapper, region); + } + + @Override + String getAgentName() { + return this.getClass().getSimpleName(); + } + + @Override + public Collection getProvidedDataTypes() { + return Collections.unmodifiableCollection( + new ArrayList() { + { + add(AUTHORITATIVE.forType(SUBNETS.ns)); + } + }); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + List subnets = getCloudClient().getSubnets(region); + return buildCacheResult(subnets); + } + + private CacheResult buildCacheResult(List subnets) { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(0); + NamespaceCache nsCache = cacheResultBuilder.getNamespaceCache(SUBNETS.ns); + + TypeReference> typeRef = new TypeReference>() {}; + + subnets.forEach( + subnet -> { + if (!(StringUtils.isEmpty(subnet.getVpcId()))) { + nsCache + .getCacheDataBuilder(Keys.getSubnetKey(subnet.getId(), getAccountName(), region)) + .setAttributes(objectMapper.convertValue(subnet, typeRef)); + } + }); + + return cacheResultBuilder.build(); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/config/HuaweiCloudInfrastructureProviderConfig.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/config/HuaweiCloudInfrastructureProviderConfig.java new file mode 100644 index 00000000000..8cb26f3116c --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/config/HuaweiCloudInfrastructureProviderConfig.java @@ -0,0 +1,112 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent.HuaweiCloudImageCachingAgent; +import com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent.HuaweiCloudInstanceTypeCachingAgent; +import com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent.HuaweiCloudNetworkCachingAgent; +import com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent.HuaweiCloudSecurityGroupCachingAgent; +import com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent.HuaweiCloudSubnetCachingAgent; +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.ProviderUtils; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.DependsOn; +import org.springframework.context.annotation.Scope; + +@Configuration +public class HuaweiCloudInfrastructureProviderConfig { + + @Bean + @DependsOn("synchronizeHuaweiCloudNamedAccountCredentials") + public HuaweiCloudInfrastructureProvider huaweiCloudInfastructureProvider( + AccountCredentialsRepository accountCredentialsRepository, + ObjectMapper objectMapper, + Registry registry) { + + HuaweiCloudInfrastructureProvider provider = + new HuaweiCloudInfrastructureProvider( + Collections.newSetFromMap(new ConcurrentHashMap())); + + synchronizeHuaweiCloudInfrastructureProvider( + provider, accountCredentialsRepository, objectMapper, registry); + + return provider; + } + + @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + @Bean + public HuaweiCloudInfrastructureProviderSynchronizer synchronizeHuaweiCloudInfrastructureProvider( + HuaweiCloudInfrastructureProvider infastructureProvider, + AccountCredentialsRepository accountCredentialsRepository, + ObjectMapper objectMapper, + Registry registry) { + + Set scheduledAccounts = ProviderUtils.getScheduledAccounts(infastructureProvider); + + Set allAccounts = + ProviderUtils.buildThreadSafeSetOfAccounts( + accountCredentialsRepository, HuaweiCloudNamedAccountCredentials.class); + + List newlyAddedAgents = new ArrayList(); + + allAccounts.forEach( + credentials -> { + if (!scheduledAccounts.contains(credentials.getName())) { + credentials + .getRegions() + .forEach( + region -> { + newlyAddedAgents.add( + new HuaweiCloudImageCachingAgent(credentials, objectMapper, region)); + newlyAddedAgents.add( + new HuaweiCloudInstanceTypeCachingAgent( + credentials, objectMapper, region)); + newlyAddedAgents.add( + new HuaweiCloudNetworkCachingAgent(credentials, objectMapper, region)); + newlyAddedAgents.add( + new HuaweiCloudSubnetCachingAgent(credentials, objectMapper, region)); + newlyAddedAgents.add( + new HuaweiCloudSecurityGroupCachingAgent( + credentials, objectMapper, registry, region)); + }); + } + }); + + if (infastructureProvider.getAgentScheduler() != null) { + ProviderUtils.rescheduleAgents(infastructureProvider, newlyAddedAgents); + } + + if (!newlyAddedAgents.isEmpty()) { + infastructureProvider.getAgents().addAll(newlyAddedAgents); + } + + return new HuaweiCloudInfrastructureProviderSynchronizer(); + } + + class HuaweiCloudInfrastructureProviderSynchronizer {} +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudImageProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudImageProvider.java new file mode 100644 index 00000000000..5ad85fa6b64 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudImageProvider.java @@ -0,0 +1,83 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view; + +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.IMAGES; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.ims.v2.domain.Image; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.controller.ImageProvider; +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudImage; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; + +@Component +public class HuaweiCloudImageProvider implements ImageProvider { + + private final Cache cacheView; + private final ObjectMapper objectMapper; + + @Autowired + public HuaweiCloudImageProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheView = cacheView; + this.objectMapper = objectMapper; + } + + @Override + public Set getAll(String account, String region) { + Collection data = + cacheView.getAll( + IMAGES.ns, + cacheView.filterIdentifiers( + IMAGES.ns, + Keys.getImageKey( + "*", + StringUtils.isEmpty(account) ? "*" : account, + StringUtils.isEmpty(region) ? "*" : region))); + + if (HuaweiCloudUtils.isEmptyCollection(data)) { + return Collections.emptySet(); + } + + return data.stream() + .map(cacheData -> this.fromCacheData(cacheData)) + .filter(it -> it != null) + .collect(Collectors.toSet()); + } + + HuaweiCloudImage fromCacheData(CacheData cacheData) { + Map parts = Keys.parse(cacheData.getId(), IMAGES); + if (parts.isEmpty()) { + return null; + } + + Image image = objectMapper.convertValue(cacheData.getAttributes(), Image.class); + + return new HuaweiCloudImage( + image.getId(), image.getName(), parts.get("region"), parts.get("account")); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudInstanceTypeProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudInstanceTypeProvider.java new file mode 100644 index 00000000000..5690a3781a7 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudInstanceTypeProvider.java @@ -0,0 +1,77 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view; + +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.INSTANCE_TYPES; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.ecs.v1.domain.Flavor; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudInstanceType; +import com.netflix.spinnaker.clouddriver.model.InstanceTypeProvider; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class HuaweiCloudInstanceTypeProvider + implements InstanceTypeProvider { + + private final Cache cacheView; + private final ObjectMapper objectMapper; + + @Autowired + public HuaweiCloudInstanceTypeProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheView = cacheView; + this.objectMapper = objectMapper; + } + + @Override + public Set getAll() { + Collection data = + cacheView.getAll( + INSTANCE_TYPES.ns, + cacheView.filterIdentifiers(INSTANCE_TYPES.ns, Keys.getInstanceTypeKey("*", "*", "*"))); + + if (HuaweiCloudUtils.isEmptyCollection(data)) { + return Collections.emptySet(); + } + + return data.stream() + .map(cacheData -> this.fromCacheData(cacheData)) + .filter(it -> it != null) + .collect(Collectors.toSet()); + } + + private HuaweiCloudInstanceType fromCacheData(CacheData cacheData) { + Map parts = Keys.parse(cacheData.getId(), INSTANCE_TYPES); + if (parts.isEmpty()) { + return null; + } + + Flavor flavor = objectMapper.convertValue(cacheData.getAttributes(), Flavor.class); + + return new HuaweiCloudInstanceType(flavor.getName(), parts.get("region"), parts.get("account")); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudNetworkProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudNetworkProvider.java new file mode 100644 index 00000000000..47ccf365b7b --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudNetworkProvider.java @@ -0,0 +1,82 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view; + +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.NETWORKS; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.vpc.v1.domain.Vpc; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudNetwork; +import com.netflix.spinnaker.clouddriver.model.NetworkProvider; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class HuaweiCloudNetworkProvider implements NetworkProvider { + private final Cache cacheView; + private final ObjectMapper objectMapper; + + @Autowired + public HuaweiCloudNetworkProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheView = cacheView; + this.objectMapper = objectMapper; + } + + @Override + public String getCloudProvider() { + return HuaweiCloudProvider.ID; + } + + @Override + public Set getAll() { + Collection data = + cacheView.getAll( + NETWORKS.ns, + cacheView.filterIdentifiers(NETWORKS.ns, Keys.getNetworkKey("*", "*", "*"))); + + if (HuaweiCloudUtils.isEmptyCollection(data)) { + return Collections.emptySet(); + } + + return data.stream() + .map(cacheData -> this.fromCacheData(cacheData)) + .filter(it -> it != null) + .collect(Collectors.toSet()); + } + + private HuaweiCloudNetwork fromCacheData(CacheData cacheData) { + Map parts = Keys.parse(cacheData.getId(), NETWORKS); + if (parts.isEmpty()) { + return null; + } + + Vpc vpc = objectMapper.convertValue(cacheData.getAttributes(), Vpc.class); + + return new HuaweiCloudNetwork( + parts.get("id"), vpc.getName(), parts.get("region"), parts.get("account")); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSecurityGroupProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSecurityGroupProvider.java new file mode 100644 index 00000000000..8ab47f79b6b --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSecurityGroupProvider.java @@ -0,0 +1,323 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view; + +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.SECURITY_GROUPS; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudSecurityGroup; +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudSecurityGroupCacheData; +import com.netflix.spinnaker.clouddriver.model.AddressableRange; +import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider; +import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule; +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; +import com.netflix.spinnaker.clouddriver.model.securitygroups.SecurityGroupRule; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class HuaweiCloudSecurityGroupProvider + implements SecurityGroupProvider { + + private static final Logger log = + HuaweiCloudUtils.getLogger(HuaweiCloudSecurityGroupProvider.class); + + private final Cache cacheView; + private final ObjectMapper objectMapper; + + @Autowired + public HuaweiCloudSecurityGroupProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheView = cacheView; + this.objectMapper = objectMapper; + } + + @Override + public String getCloudProvider() { + return HuaweiCloudProvider.ID; + } + + @Override + public Set getAll(boolean includeRules) { + Set result = + loadResults( + Keys.getSecurityGroupKey("*", "*", "*", "*"), + this.cacheView, + this.objectMapper, + includeRules); + + log.debug("get all security group, return {} groups", result.size()); + return result; + } + + @Override + public Set getAllByRegion(boolean includeRules, String region) { + Set result = + loadResults( + Keys.getSecurityGroupKey("*", "*", "*", region), + this.cacheView, + this.objectMapper, + includeRules); + + log.debug("get all security group by region={}, return {} groups", region, result.size()); + return result; + } + + @Override + public Set getAllByAccount(boolean includeRules, String account) { + Set result = + loadResults( + Keys.getSecurityGroupKey("*", "*", account, "*"), + this.cacheView, + this.objectMapper, + includeRules); + + log.debug("get all security group by account={}, return {} groups", account, result.size()); + return result; + } + + @Override + public Set getAllByAccountAndName( + boolean includeRules, String account, String name) { + + Set result = + loadResults( + Keys.getSecurityGroupKey(name, "*", account, "*"), + this.cacheView, + this.objectMapper, + includeRules); + + log.debug( + "get all security group by account={} and group name={}, return {} groups", + account, + name, + result.size()); + return result; + } + + @Override + public Set getAllByAccountAndRegion( + boolean includeRules, String account, String region) { + + Set result = + loadResults( + Keys.getSecurityGroupKey("*", "*", account, region), + this.cacheView, + this.objectMapper, + includeRules); + + log.debug( + "get all security group by account={} and region={}, return {} groups", + account, + region, + result.size()); + return result; + } + + @Override + public HuaweiCloudSecurityGroup get(String account, String region, String name, String vpcId) { + Set result = + loadResults( + Keys.getSecurityGroupKey(name, "*", account, region), + this.cacheView, + this.objectMapper, + true); + + log.debug( + "get all security group by account={}, region={}, group name={} and vpc id={}, return {} groups", + account, + region, + name, + vpcId, + result.size()); + + return result.stream() + .filter( + it -> { + boolean e1 = HuaweiCloudUtils.isEmptyStr(it.getVpcId()); + boolean e2 = HuaweiCloudUtils.isEmptyStr(vpcId); + + return (e1 == e2) && (e1 || vpcId.equals(it.getVpcId())); + }) + .findFirst() + .orElse(null); + } + + @Override + public HuaweiCloudSecurityGroup getById(String account, String region, String id, String vpcId) { + Set result = + loadResults( + Keys.getSecurityGroupKey("*", id, account, region), + this.cacheView, + this.objectMapper, + true); + + return result.stream() + .filter( + it -> { + boolean e1 = HuaweiCloudUtils.isEmptyStr(it.getVpcId()); + boolean e2 = HuaweiCloudUtils.isEmptyStr(vpcId); + + return (e1 == e2) && (e1 || vpcId.equals(it.getVpcId())); + }) + .findFirst() + .orElse(null); + } + + private static Set loadResults( + String pattern, Cache cacheView, ObjectMapper objectMapper, boolean includeRules) { + + Collection data = + cacheView.getAll( + SECURITY_GROUPS.ns, cacheView.filterIdentifiers(SECURITY_GROUPS.ns, pattern)); + + if (HuaweiCloudUtils.isEmptyCollection(data)) { + return Collections.emptySet(); + } + + return data.stream() + .map(cacheData -> fromCacheData(cacheData, objectMapper, includeRules)) + .filter(it -> it != null) + .collect(Collectors.toSet()); + } + + private static HuaweiCloudSecurityGroup fromCacheData( + CacheData cacheData, ObjectMapper objectMapper, boolean includeRules) { + + Map parts = Keys.parse(cacheData.getId(), SECURITY_GROUPS); + if (parts.isEmpty()) { + return null; + } + + HuaweiCloudSecurityGroupCacheData segCacheData = + objectMapper.convertValue( + cacheData.getAttributes(), HuaweiCloudSecurityGroupCacheData.class); + + return new HuaweiCloudSecurityGroup( + parts.get("id"), + parts.get("name"), + parts.get("region"), + parts.get("account"), + parts.get("application"), + segCacheData.getSecurityGroup().getVpcId(), + includeRules ? buildInboundRules(segCacheData) : Collections.emptySet(), + Collections.emptySet()); + } + + private static Set buildInboundRules(HuaweiCloudSecurityGroupCacheData segCacheData) { + if (HuaweiCloudUtils.isEmptyCollection( + segCacheData.getSecurityGroup().getSecurityGroupRules())) { + return Collections.emptySet(); + } + + return segCacheData.getSecurityGroup().getSecurityGroupRules().stream() + .filter(rule -> "ingress".equals(rule.getDirection())) + .map( + rule -> { + SortedSet portRanges = new TreeSet(); + portRanges.add(buildPortRange(rule)); + + if (!HuaweiCloudUtils.isEmptyStr(rule.getRemoteIpPrefix())) { + + String[] parts = rule.getRemoteIpPrefix().split("/"); + String ip = parts[0]; + String cidr = ""; + if (parts.length > 1 && Integer.parseInt(parts[1]) != 32) { + cidr = rule.getRemoteIpPrefix(); + ip = ""; + } + + return new IpRangeRule( + new AddressableRange(ip, cidr), + rule.getProtocol(), + portRanges, + rule.getDescription()); + + } else if (!HuaweiCloudUtils.isEmptyStr(rule.getRemoteGroupId())) { + + return SecurityGroupRule.builder() + .portRanges(portRanges) + .protocol(rule.getProtocol()) + .securityGroup( + buildRemoteSecurityGroup( + segCacheData.getRelevantSecurityGroups().get(rule.getRemoteGroupId()))) + .build(); + } + + return null; + }) + .filter(it -> it != null) + .collect(Collectors.toSet()); + } + + private static Rule.PortRange buildPortRange( + com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroupRule rule) { + + Rule.PortRange portRange = new Rule.PortRange(); + + if (HuaweiCloudUtils.isEmptyStr(rule.getProtocol()) + || !("icmp".equals(rule.getProtocol().toLowerCase()))) { + + portRange.setStartPort( + rule.getPortRangeMin() != null ? rule.getPortRangeMin() : new Integer(1)); + + portRange.setEndPort( + rule.getPortRangeMax() != null ? rule.getPortRangeMax() : new Integer(65535)); + + return portRange; + } + + // there are two cases for icmp: both min and max are null or not. + int startPort = rule.getPortRangeMin() != null ? rule.getPortRangeMin().intValue() : 0; + int endPort = rule.getPortRangeMax() != null ? rule.getPortRangeMax().intValue() : 255; + + portRange.setStartPort(new Integer(startPort <= endPort ? startPort : endPort)); + portRange.setEndPort(new Integer(startPort <= endPort ? endPort : startPort)); + + return portRange; + } + + private static HuaweiCloudSecurityGroup buildRemoteSecurityGroup(String cacheDataId) { + Map parts = Keys.parse(cacheDataId, SECURITY_GROUPS); + if (parts.isEmpty()) { + return null; + } + + return new HuaweiCloudSecurityGroup( + parts.get("id"), + parts.get("name"), + parts.get("region"), + parts.get("account"), + parts.get("application"), + "", + null, + null); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSubnetProvider.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSubnetProvider.java new file mode 100644 index 00000000000..85dd2b81ea0 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSubnetProvider.java @@ -0,0 +1,88 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view; + +import static com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys.Namespace.SUBNETS; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.huawei.openstack4j.openstack.vpc.v1.domain.Subnet; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudUtils; +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys; +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudSubnet; +import com.netflix.spinnaker.clouddriver.model.SubnetProvider; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +class HuaweiCloudSubnetProvider implements SubnetProvider { + private final Cache cacheView; + private final ObjectMapper objectMapper; + + @Autowired + public HuaweiCloudSubnetProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheView = cacheView; + this.objectMapper = objectMapper; + } + + @Override + public String getCloudProvider() { + return HuaweiCloudProvider.ID; + } + + @Override + public Set getAll() { + Collection data = + cacheView.getAll( + SUBNETS.ns, cacheView.filterIdentifiers(SUBNETS.ns, Keys.getSubnetKey("*", "*", "*"))); + + if (HuaweiCloudUtils.isEmptyCollection(data)) { + return Collections.emptySet(); + } + + return data.stream() + .map(cacheData -> this.fromCacheData(cacheData)) + .filter(it -> it != null) + .collect(Collectors.toSet()); + } + + private HuaweiCloudSubnet fromCacheData(CacheData cacheData) { + Map parts = Keys.parse(cacheData.getId(), SUBNETS); + if (parts.isEmpty()) { + return null; + } + + Subnet subnet = objectMapper.convertValue(cacheData.getAttributes(), Subnet.class); + + return new HuaweiCloudSubnet( + parts.get("id"), + subnet.getName(), + subnet.getCidr(), + subnet.getVpcId(), + parts.get("region"), + parts.get("account"), + "n/a"); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudCredentials.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudCredentials.java new file mode 100644 index 00000000000..f76431460d0 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudCredentials.java @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.security; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.huawei.openstack4j.api.OSClient; +import com.huawei.openstack4j.core.transport.Config; +import com.huawei.openstack4j.model.common.Identifier; +import com.huawei.openstack4j.model.identity.v3.Token; +import com.huawei.openstack4j.openstack.OSFactory; +import com.netflix.spinnaker.clouddriver.huaweicloud.client.AuthorizedClientProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HuaweiCloudCredentials implements AuthorizedClientProvider { + private final Logger log = LoggerFactory.getLogger(HuaweiCloudCredentials.class); + + private final String authUrl; + private final String username; + @JsonIgnore private final String password; + private final String projectName; + private final String domainName; + private final Boolean insecure; + + @JsonIgnore private Token token = null; + + public HuaweiCloudCredentials( + String authUrl, + String username, + String password, + String projectName, + String domainName, + Boolean insecure) { + this.authUrl = authUrl; + this.username = username; + this.password = password; + this.projectName = projectName; + this.domainName = domainName; + this.insecure = insecure; + } + + public OSClient getAuthClient() { + Config config = + insecure ? Config.newConfig().withSSLVerificationDisabled() : Config.newConfig(); + OSClient client = null; + try { + if (needRefreshToken()) { + synchronized (this) { + if (needRefreshToken()) { + token = + OSFactory.builderV3() + .withConfig(config) + .endpoint(authUrl) + .credentials(username, password, Identifier.byName(domainName)) + .scopeToProject(Identifier.byName(projectName), Identifier.byName(domainName)) + .authenticate() + .getToken(); + } + } + } + + client = OSFactory.clientFromToken(token, config); + } catch (Exception e) { + log.error("Error building authorized client, error=%s", e); + } + return client; + } + + private boolean needRefreshToken() { + if (token == null) { + return true; + } + + long now = System.currentTimeMillis(); + long expires = token.getExpires().getTime(); + return now >= expires; + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudCredentialsInitializer.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudCredentialsInitializer.java new file mode 100644 index 00000000000..bbd0915cf64 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudCredentialsInitializer.java @@ -0,0 +1,89 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.security; + +import com.netflix.spinnaker.clouddriver.huaweicloud.config.HuaweiCloudConfigurationProperties; +import com.netflix.spinnaker.clouddriver.huaweicloud.config.HuaweiCloudConfigurationProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import com.netflix.spinnaker.clouddriver.security.ProviderUtils; +import java.util.List; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Scope; + +@Configuration +public class HuaweiCloudCredentialsInitializer implements CredentialsInitializerSynchronizable { + + private final Logger log = LoggerFactory.getLogger(HuaweiCloudCredentialsInitializer.class); + + @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + @Bean + @ConfigurationProperties("huaweicloud") + HuaweiCloudConfigurationProperties huaweiCloudConfigurationProperties() { + return new HuaweiCloudConfigurationProperties(); + } + + @Bean + List synchronizeHuaweiCloudNamedAccountCredentials( + HuaweiCloudConfigurationProperties huaweiCloudConfigurationProperties, + AccountCredentialsRepository accountCredentialsRepository) { + + List result = + ProviderUtils.calculateAccountDeltas( + accountCredentialsRepository, + HuaweiCloudNamedAccountCredentials.class, + huaweiCloudConfigurationProperties.getAccounts()); + + List accountsToAdd = (List) result.get(0); + accountsToAdd.forEach( + managedAccount -> { + try { + HuaweiCloudNamedAccountCredentials account = + new HuaweiCloudNamedAccountCredentials( + managedAccount.getName(), + managedAccount.getEnvironment(), + managedAccount.getAccountType(), + managedAccount.getAuthUrl(), + managedAccount.getUsername(), + managedAccount.getPassword(), + managedAccount.getProjectName(), + managedAccount.getDomainName(), + managedAccount.getInsecure(), + managedAccount.getRegions()); + + accountCredentialsRepository.save(managedAccount.getName(), account); + } catch (Exception e) { + log.error( + "Could not load account:{} for huaweicloud, error={}", managedAccount.getName(), e); + } + }); + + List accountNamesToDelete = (List) result.get(1); + ProviderUtils.unscheduleAndDeregisterAgents(accountNamesToDelete, null); + + return (List) + accountCredentialsRepository.getAll().stream() + .filter(it -> it instanceof HuaweiCloudNamedAccountCredentials) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudNamedAccountCredentials.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudNamedAccountCredentials.java new file mode 100644 index 00000000000..7eb39f9785f --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/clouddriver/huaweicloud/security/HuaweiCloudNamedAccountCredentials.java @@ -0,0 +1,90 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.security; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.huawei.openstack4j.model.compute.ext.AvailabilityZone; +import com.netflix.spinnaker.clouddriver.huaweicloud.HuaweiCloudProvider; +import com.netflix.spinnaker.clouddriver.huaweicloud.client.HuaweiCloudClient; +import com.netflix.spinnaker.clouddriver.huaweicloud.client.HuaweiCloudClientImpl; +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.Data; + +@Data +public class HuaweiCloudNamedAccountCredentials + extends AbstractAccountCredentials { + + private final String name; + private final String environment; + private final String accountType; + private final List regions; + private final Map> regionToZones; + private final HuaweiCloudCredentials credentials; + @JsonIgnore private final HuaweiCloudClient cloudClient; + + public HuaweiCloudNamedAccountCredentials( + String name, + String environment, + String accountType, + String authUrl, + String username, + String password, + String projectName, + String domainName, + Boolean insecure, + List regions) { + this.name = name; + this.environment = environment; + this.accountType = accountType; + this.regions = regions; + this.credentials = + new HuaweiCloudCredentials(authUrl, username, password, projectName, domainName, insecure); + this.cloudClient = new HuaweiCloudClientImpl(this.credentials); + + this.regionToZones = new HashMap(); + regions.forEach( + region -> { + List result = this.getZonesOfRegion(region); + if (!result.isEmpty()) { + regionToZones.put(region, result); + } + }); + } + + @Override + public String getCloudProvider() { + return HuaweiCloudProvider.ID; + } + + @Override + public List getRequiredGroupMembership() { + return new ArrayList(); + } + + private List getZonesOfRegion(String region) { + List zones = cloudClient.getZones(region); + return zones.stream() + .filter(zone -> zone.getZoneState().getAvailable()) + .map(zone -> zone.getZoneName()) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/config/HuaweiCloudConfiguration.java b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/config/HuaweiCloudConfiguration.java new file mode 100644 index 00000000000..5fa8b70fd34 --- /dev/null +++ b/clouddriver-huaweicloud/src/main/java/com/netflix/spinnaker/config/HuaweiCloudConfiguration.java @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty("huaweicloud.enabled") +@ComponentScan("com.netflix.spinnaker.clouddriver.huaweicloud") +public class HuaweiCloudConfiguration {} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudImageCachingAgentSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudImageCachingAgentSpec.groovy new file mode 100644 index 00000000000..c6ff72aa956 --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudImageCachingAgentSpec.groovy @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.ims.v2.domain.Image +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import com.netflix.spinnaker.clouddriver.huaweicloud.client.HuaweiCloudClient +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudImageCachingAgentSpec extends Specification { + + static final String REGION = 'cn-north-1' + static final String ACCOUNT_NAME = 'some-account-name' + + void "should add images on initial run"() { + setup: + def registry = new DefaultRegistry() + def cloudClient = Mock(HuaweiCloudClient); + def credentials = Mock(HuaweiCloudNamedAccountCredentials) + credentials.cloudClient >> cloudClient + credentials.name >> ACCOUNT_NAME + def ProviderCache providerCache = Mock(ProviderCache) + + @Subject + HuaweiCloudImageCachingAgent agent = new HuaweiCloudImageCachingAgent( + credentials, new ObjectMapper(), REGION) + + def imageA = Image.builder() + .name('image-a') + .id('image-a') + .build() + + def imageB = Image.builder() + .name('image-b') + .id('image-b') + .build() + + def keyA = Keys.getImageKey(imageA.id, + ACCOUNT_NAME, + REGION) + + def keyB = Keys.getImageKey(imageB.id, + ACCOUNT_NAME, + REGION) + + when: + def cache = agent.loadData(providerCache) + + then: + 1 * cloudClient.getImages(REGION) >> [imageA, imageB] + with(cache.cacheResults.get(Keys.Namespace.IMAGES.ns)) { Collection cd -> + cd.size() == 2 + cd.id.containsAll([keyA, keyB]) + } + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudInstanceTypeCachingAgentSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudInstanceTypeCachingAgentSpec.groovy new file mode 100644 index 00000000000..cb7b8ee4fb6 --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudInstanceTypeCachingAgentSpec.groovy @@ -0,0 +1,79 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.ecs.v1.domain.Flavor +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import com.netflix.spinnaker.clouddriver.huaweicloud.client.HuaweiCloudClient +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudInstanceTypeCachingAgentSpec extends Specification { + + static final String AZ = 'cn-north-1a' + static final String REGION = 'cn-north-1' + static final String ACCOUNT_NAME = 'some-account-name' + + void "should add instance types on initial run"() { + setup: + def registry = new DefaultRegistry() + def cloudClient = Mock(HuaweiCloudClient); + def credentials = Mock(HuaweiCloudNamedAccountCredentials) + credentials.cloudClient >> cloudClient + credentials.name >> ACCOUNT_NAME + credentials.regionToZones >> [(REGION): [AZ]] + def ProviderCache providerCache = Mock(ProviderCache) + + @Subject + HuaweiCloudInstanceTypeCachingAgent agent = new HuaweiCloudInstanceTypeCachingAgent( + credentials, new ObjectMapper(), REGION) + + def flavorA = Flavor.builder() + .name('c1.medium.1') + .id('id-a') + .build() + + def flavorB = Flavor.builder() + .name('c1.medium.2') + .id('id-b') + .build() + + def keyA = Keys.getInstanceTypeKey(flavorA.id, + ACCOUNT_NAME, + REGION) + + def keyB = Keys.getInstanceTypeKey(flavorB.id, + ACCOUNT_NAME, + REGION) + + when: + def cache = agent.loadData(providerCache) + + then: + credentials.regionToZones[REGION] == [AZ] + 1 * cloudClient.getInstanceTypes(REGION, AZ) >> [flavorA, flavorB] + with(cache.cacheResults.get(Keys.Namespace.INSTANCE_TYPES.ns)) { Collection cd -> + cd.size() == 2 + cd.id.containsAll([keyA, keyB]) + } + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudNetworkCachingAgentSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudNetworkCachingAgentSpec.groovy new file mode 100644 index 00000000000..a03d2443e91 --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudNetworkCachingAgentSpec.groovy @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.vpc.v1.domain.Vpc +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import com.netflix.spinnaker.clouddriver.huaweicloud.client.HuaweiCloudClient +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudNetworkCachingAgentSpec extends Specification { + + static final String REGION = 'cn-north-1' + static final String ACCOUNT_NAME = 'some-account-name' + + void "should add networks on initial run"() { + setup: + def registry = new DefaultRegistry() + def cloudClient = Mock(HuaweiCloudClient); + def credentials = Mock(HuaweiCloudNamedAccountCredentials) + credentials.cloudClient >> cloudClient + credentials.name >> ACCOUNT_NAME + def ProviderCache providerCache = Mock(ProviderCache) + + @Subject + HuaweiCloudNetworkCachingAgent agent = new HuaweiCloudNetworkCachingAgent( + credentials, new ObjectMapper(), REGION) + + def vpcA = Vpc.builder() + .name('name-a') + .id('name-a') + .build() + + def vpcB = Vpc.builder() + .name('name-b') + .id('name-b') + .build() + + def keyGroupA = Keys.getNetworkKey(vpcA.id, + ACCOUNT_NAME, + REGION) + + def keyGroupB = Keys.getNetworkKey(vpcB.id, + ACCOUNT_NAME, + REGION) + + when: + def cache = agent.loadData(providerCache) + + then: + 1 * cloudClient.getVpcs(REGION) >> [vpcA, vpcB] + with(cache.cacheResults.get(Keys.Namespace.NETWORKS.ns)) { Collection cd -> + cd.size() == 2 + cd.id.containsAll([keyGroupA, keyGroupB]) + } + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSecurityGroupCachingAgentSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSecurityGroupCachingAgentSpec.groovy new file mode 100644 index 00000000000..bfcc60f2be6 --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSecurityGroupCachingAgentSpec.groovy @@ -0,0 +1,80 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent + +import com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroup +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import com.netflix.spinnaker.clouddriver.huaweicloud.client.HuaweiCloudClient +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudSecurityGroupCachingAgentSpec extends Specification { + + static final String REGION = 'cn-north-1' + static final String ACCOUNT_NAME = 'some-account-name' + + void "should add security groups on initial run"() { + setup: + def registry = new DefaultRegistry() + def cloudClient = Mock(HuaweiCloudClient); + def credentials = Mock(HuaweiCloudNamedAccountCredentials) + credentials.cloudClient >> cloudClient + credentials.name >> ACCOUNT_NAME + def ProviderCache providerCache = Mock(ProviderCache) + + @Subject + HuaweiCloudSecurityGroupCachingAgent agent = new HuaweiCloudSecurityGroupCachingAgent( + credentials, new ObjectMapper(), registry, REGION) + + def securityGroupA = SecurityGroup.builder() + .name('name-a') + .id('name-a') + .build() + + def securityGroupB = SecurityGroup.builder() + .name('name-b') + .id('name-b') + .build() + + def keyGroupA = Keys.getSecurityGroupKey(securityGroupA.name, + securityGroupA.id, + ACCOUNT_NAME, + REGION) + + def keyGroupB = Keys.getSecurityGroupKey(securityGroupB.name, + securityGroupB.id, + ACCOUNT_NAME, + REGION) + + when: + def cache = agent.loadData(providerCache) + + then: + 1 * cloudClient.getSecurityGroups(REGION) >> [securityGroupA, securityGroupB] + with(cache.cacheResults.get(Keys.Namespace.SECURITY_GROUPS.ns)) { Collection cd -> + cd.size() == 2 + cd.id.containsAll([keyGroupA, keyGroupB]) + } + (0..1) * providerCache.getAll("onDemand", [keyGroupA, keyGroupB]) + (0..1) * providerCache.getAll("onDemand", [keyGroupB, keyGroupA]) + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSubnetCachingAgentSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSubnetCachingAgentSpec.groovy new file mode 100644 index 00000000000..b45229d1b8b --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/agent/HuaweiCloudSubnetCachingAgentSpec.groovy @@ -0,0 +1,78 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.agent + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.vpc.v1.domain.Subnet +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.provider.ProviderCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import com.netflix.spinnaker.clouddriver.huaweicloud.client.HuaweiCloudClient +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudSubnetCachingAgentSpec extends Specification { + + static final String REGION = 'cn-north-1' + static final String ACCOUNT_NAME = 'some-account-name' + + void "should add subnets on initial run"() { + setup: + def registry = new DefaultRegistry() + def cloudClient = Mock(HuaweiCloudClient); + def credentials = Mock(HuaweiCloudNamedAccountCredentials) + credentials.cloudClient >> cloudClient + credentials.name >> ACCOUNT_NAME + def ProviderCache providerCache = Mock(ProviderCache) + + @Subject + HuaweiCloudSubnetCachingAgent agent = new HuaweiCloudSubnetCachingAgent( + credentials, new ObjectMapper(), REGION) + + def subnetA = Subnet.builder() + .name('name-a') + .id('name-a') + .vpcId("vpc") + .build() + + def subnetB = Subnet.builder() + .name('name-b') + .id('name-b') + .vpcId("vpc") + .build() + + def keyA = Keys.getSubnetKey(subnetA.id, + ACCOUNT_NAME, + REGION) + + def keyB = Keys.getSubnetKey(subnetB.id, + ACCOUNT_NAME, + REGION) + + when: + def cache = agent.loadData(providerCache) + + then: + 1 * cloudClient.getSubnets(REGION) >> [subnetA, subnetB] + with(cache.cacheResults.get(Keys.Namespace.SUBNETS.ns)) { Collection cd -> + cd.size() == 2 + cd.id.containsAll([keyA, keyB]) + } + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudImageProviderSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudImageProviderSpec.groovy new file mode 100644 index 00000000000..cdf67c98efc --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudImageProviderSpec.groovy @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.ims.v2.domain.Image +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudImageProviderSpec extends Specification { + + @Subject + HuaweiCloudImageProvider provider + + WriteableCache cache = new InMemoryCache() + ObjectMapper mapper = new ObjectMapper() + + def setup() { + provider = new HuaweiCloudImageProvider(cache, mapper) + cache.mergeAll(Keys.Namespace.IMAGES.ns, getAllImages()) + } + + void "getAll lists all and does not choke on deserializing routingConfig"() { + when: + def result = provider.getAll("", "") + def result1 = provider.getAll('global', 'cn-north-1') + + then: + result.size() == 2 + result1.size() == 2 + } + + @Shared + List imageList = [ + Image.builder() + .id('16c10a5d-572a-47bf-bf52-be3aacf15845') + .name('some-image') + .build(), + + Image.builder() + .id('3b5ceb06-3b8d-43ee-866a-dc0443b85deg') + .name('some-image-2') + .build() + ] + + private List getAllImages() { + imageList.collect { Image image -> + String cacheId = Keys.getImageKey(image.id, 'global', 'cn-north-1') + Map attributes = [ + 'id': image.id, + 'name': image.name + ] + return new DefaultCacheData(cacheId, attributes, [:]) + } + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudInstanceTypeProviderSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudInstanceTypeProviderSpec.groovy new file mode 100644 index 00000000000..54a6bd54828 --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudInstanceTypeProviderSpec.groovy @@ -0,0 +1,74 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.ecs.v1.domain.Flavor +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudInstanceTypeProviderSpec extends Specification { + + @Subject + HuaweiCloudInstanceTypeProvider provider + + WriteableCache cache = new InMemoryCache() + ObjectMapper mapper = new ObjectMapper() + + def setup() { + provider = new HuaweiCloudInstanceTypeProvider(cache, mapper) + cache.mergeAll(Keys.Namespace.INSTANCE_TYPES.ns, getAllInstanceTypes()) + } + + void "getAll lists all and does not choke on deserializing routingConfig"() { + when: + def result = provider.getAll() + + then: + result.size() == 2 + } + + @Shared + List flavorList = [ + Flavor.builder() + .id('16c10a5d-572a-47bf-bf52-be3aacf15845') + .name('c1.medium.1') + .build(), + + Flavor.builder() + .id('3b5ceb06-3b8d-43ee-866a-dc0443b85deg') + .name('c1.medium.2') + .build() + ] + + private List getAllInstanceTypes() { + flavorList.collect { Flavor flavor -> + String cacheId = Keys.getInstanceTypeKey(flavor.id, 'global', 'cn-north-1') + Map attributes = [ + 'id': flavor.id, + 'name': flavor.name + ] + return new DefaultCacheData(cacheId, attributes, [:]) + } + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudNetworkProviderSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudNetworkProviderSpec.groovy new file mode 100644 index 00000000000..44111fb0e81 --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudNetworkProviderSpec.groovy @@ -0,0 +1,74 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.vpc.v1.domain.Vpc +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudNetworkProviderSpec extends Specification { + + @Subject + HuaweiCloudNetworkProvider provider + + WriteableCache cache = new InMemoryCache() + ObjectMapper mapper = new ObjectMapper() + + def setup() { + provider = new HuaweiCloudNetworkProvider(cache, mapper) + cache.mergeAll(Keys.Namespace.NETWORKS.ns, getAllNetworks()) + } + + void "getAll lists all and does not choke on deserializing routingConfig"() { + when: + def result = provider.getAll() + + then: + result.size() == 2 + } + + @Shared + List networkList = [ + Vpc.builder() + .id('16c10a5d-572a-47bf-bf52-be3aacf15845') + .name('some-network') + .build(), + + Vpc.builder() + .id('3b5ceb06-3b8d-43ee-866a-dc0443b85deg') + .name('some-network-2') + .build() + ] + + private List getAllNetworks() { + networkList.collect { Vpc vpc -> + String cacheId = Keys.getNetworkKey(vpc.id, 'global', 'cn-north-1') + Map attributes = [ + 'id': vpc.id, + 'name': vpc.name + ] + return new DefaultCacheData(cacheId, attributes, [:]) + } + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSecurityGroupProviderSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSecurityGroupProviderSpec.groovy new file mode 100644 index 00000000000..af3a18f07ce --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSecurityGroupProviderSpec.groovy @@ -0,0 +1,258 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroup +import com.huawei.openstack4j.openstack.vpc.v1.domain.SecurityGroupRule +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import com.netflix.spinnaker.clouddriver.huaweicloud.model.HuaweiCloudSecurityGroup +import com.netflix.spinnaker.clouddriver.huaweicloud.security.HuaweiCloudNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.model.AddressableRange +import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class HuaweiCloudSecurityGroupProviderSpec extends Specification { + + @Subject + HuaweiCloudSecurityGroupProvider provider + + WriteableCache cache = new InMemoryCache() + ObjectMapper mapper = new ObjectMapper() + + def setup() { + provider = new HuaweiCloudSecurityGroupProvider(cache, mapper) + cache.mergeAll(Keys.Namespace.SECURITY_GROUPS.ns, getAllGroups()) + } + + void "getAll lists all"() { + when: + def result = provider.getAll(false) + + then: + result.size() == 2 + } + + void "getAllByRegion lists only those in supplied region"() { + setup: + def region = 'cn-north-1' + + when: + def result = provider.getAllByRegion(false, region) + + then: + result.size() == 1 + result.count { it.region == region } == 1 + } + + @Unroll + void "getAllByAccount lists only those in supplied #account"() { + when: + def result = provider.getAllByAccount(false, account) + + then: + result.size() == count + result.count { it.accountName == account } == count + + where: + account | count + 'prod' | 1 + 'test' | 1 + } + + void "getAllByAccountAndRegion lists only those in supplied account and region"() { + setup: + def account = 'prod' + def region = 'cn-north-1' + + when: + def result = provider.getAllByAccountAndRegion(false, account, region) + + then: + result.size() == 1 + result.count { + it.accountName == account + it.region == region + } == 1 + } + + void "getAllByAccountAndName lists only those in supplied account with supplied name"() { + setup: + def account = 'test' + def name = 'name-b' + + when: + def result = provider.getAllByAccountAndName(false, account, name) + + then: + result.size() == 1 + result.count { + it.accountName == account + it.name == name + } == 1 + } + + void "get returns match based on account, region, and name"() { + setup: + def account = 'test' + def region = 'cn-north-2' + def name = 'name-b' + + when: + def result = provider.get(account, region, name, null) + + then: + result != null + result.accountName == account + result.region == region + result.name == name + } + + void "should add ipRangeRules with different protocols"() { + setup: + def account = 'prod' + def region = 'cn-north-1' + def name = 'name-a' + def vpcId = 'default' + + when: + def sg = provider.get(account, region, name, vpcId) + + then: + sg != null + sg.accountName == account + sg.region == region + sg.name == name + + def rule = sg.inboundRules.find { it.protocol } + + def rule1 = new IpRangeRule( + range: new AddressableRange(ip: '', cidr: '0.0.0.0/0'), + portRanges: [ + new Rule.PortRange(startPort: 80, endPort: 80) + ] as SortedSet, + protocol: 'tcp' + ) + rule == rule1 + + def rule2 = sg.inboundRules.find { !it.protocol } + + def rule3 = new com.netflix.spinnaker.clouddriver.model.securitygroups.SecurityGroupRule( + portRanges: [ + new Rule.PortRange(startPort: 1, endPort: 65535) + ] as SortedSet, + securityGroup: new HuaweiCloudSecurityGroup( + sg.id, + name, + region, + account, + 'name', + '', + null, + null) + ) + rule2.portRanges == rule3.portRanges + rule2.securityGroup.id == rule3.securityGroup.id + rule2.securityGroup.name == rule3.securityGroup.name + rule2.securityGroup.region == rule3.securityGroup.region + rule2.securityGroup.accountName == rule3.securityGroup.accountName + rule2.securityGroup.application == rule3.securityGroup.application + rule2.securityGroup.vpcId == rule3.securityGroup.vpcId + rule2.securityGroup.inboundRules == rule3.securityGroup.inboundRules + rule2.securityGroup.outboundRules == rule3.securityGroup.outboundRules + } + + @Shared + Map>> firewallMap = [ + 'prod': [ + 'cn-north-1': [ + SecurityGroup.builder() + .name('name-a') + .id('3b5ceb06-3b8d-43ee-866a-dc0443b85def') + .vpcId('default') + .securityGroupRules([ + SecurityGroupRule.builder() + .direction('ingress') + .ethertype('IPv6') + .id('976d4696-865f-4fb4-ac1d-385e3b06fd74') + .remoteGroupId('3b5ceb06-3b8d-43ee-866a-dc0443b85def') + .build(), + + SecurityGroupRule.builder() + .direction('ingress') + .ethertype('IPv4') + .id('16c10a5d-572a-47bf-bf52-be3aacf15845') + .portRangeMax(80) + .portRangeMin(80) + .protocol('tcp') + .remoteIpPrefix('0.0.0.0/0') + .build(), + ]) + .build() + ] + ], + test: [ + 'cn-north-2': [ + SecurityGroup.builder() + .name('name-b') + .id('3b5ceb06-3b8d-43ee-866a-dc0443b85deg') + .securityGroupRules([ + SecurityGroupRule.builder() + .direction('ingress') + .ethertype('IPv6') + .id('976d4696-865f-4fb4-ac1d-385e3b06fd73') + .remoteGroupId('3b5ceb06-3b8d-43ee-866a-dc0443b85deg') + .build(), + + SecurityGroupRule.builder() + .direction('ingress') + .ethertype('IPv4') + .id('16c10a5d-572a-47bf-bf52-be3aacf15844') + .portRangeMax(80) + .portRangeMin(80) + .protocol('tcp') + .remoteIpPrefix('0.0.0.0/0') + .build(), + ]) + .build() + ] + ] + ] + + private List getAllGroups() { + firewallMap.collect { String account, Map> regions -> + regions.collect { String region, List firewalls -> + firewalls.collect { SecurityGroup firewall -> + String cacheId = Keys.getSecurityGroupKey(firewall.getName(), firewall.getId(), account, region) + Map attributes = [ + 'securityGroup': firewall, + 'relevantSecurityGroups': [(firewall.getId()): cacheId] + ] + return new DefaultCacheData(cacheId, attributes, [:]) + } + }.flatten() + }.flatten() + } +} diff --git a/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSubnetProviderSpec.groovy b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSubnetProviderSpec.groovy new file mode 100644 index 00000000000..c97714936ed --- /dev/null +++ b/clouddriver-huaweicloud/src/test/groovy/com/netflix/spinnaker/clouddriver/huaweicloud/provider/view/HuaweiCloudSubnetProviderSpec.groovy @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Huawei Technologies Co.,Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.huaweicloud.provider.view + +import com.fasterxml.jackson.databind.ObjectMapper +import com.huawei.openstack4j.openstack.vpc.v1.domain.Subnet +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.cats.cache.DefaultCacheData +import com.netflix.spinnaker.cats.cache.WriteableCache +import com.netflix.spinnaker.cats.mem.InMemoryCache +import com.netflix.spinnaker.clouddriver.huaweicloud.cache.Keys +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +class HuaweiCloudSubnetProviderSpec extends Specification { + + @Subject + HuaweiCloudSubnetProvider provider + + WriteableCache cache = new InMemoryCache() + ObjectMapper mapper = new ObjectMapper() + + def setup() { + provider = new HuaweiCloudSubnetProvider(cache, mapper) + cache.mergeAll(Keys.Namespace.SUBNETS.ns, getAllSubnets()) + } + + void "getAll lists all and does not choke on deserializing routingConfig"() { + when: + def result = provider.getAll() + + then: + result.size() == 2 + } + + @Shared + List subnetList = [ + Subnet.builder() + .id('16c10a5d-572a-47bf-bf52-be3aacf15845') + .name('some-subnet') + .vpcId("vpc") + .build(), + + Subnet.builder() + .id('3b5ceb06-3b8d-43ee-866a-dc0443b85deg') + .name('some-subnet-2') + .vpcId("vpc") + .build() + ] + + private List getAllSubnets() { + subnetList.collect { Subnet subnet -> + String cacheId = Keys.getSubnetKey(subnet.id, 'global', 'cn-north-1') + Map attributes = [ + 'id': subnet.id, + 'name': subnet.name + ] + return new DefaultCacheData(cacheId, attributes, [:]) + } + } +} diff --git a/clouddriver-integration/clouddriver-integration.gradle b/clouddriver-integration/clouddriver-integration.gradle new file mode 100644 index 00000000000..81844d00413 --- /dev/null +++ b/clouddriver-integration/clouddriver-integration.gradle @@ -0,0 +1,28 @@ +dependencies { + testImplementation "com.fasterxml.jackson.core:jackson-databind" + testImplementation "com.mysql:mysql-connector-j" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.postgresql:postgresql" + testImplementation "org.slf4j:slf4j-api" + testImplementation "org.testcontainers:junit-jupiter" + testImplementation "org.testcontainers:mysql" + testImplementation "org.testcontainers:postgresql" + testImplementation "org.testcontainers:testcontainers" + testRuntimeOnly "ch.qos.logback:logback-classic" +} + +test.configure { + def fullDockerImageName = System.getenv('FULL_DOCKER_IMAGE_NAME') + onlyIf("there is a docker image to test") { + fullDockerImageName != null && fullDockerImageName.trim() != '' + } +} + +test { + // So stdout and stderr from the just-built container are available in CI + testLogging.showStandardStreams = true + + // Run the tests when the docker image changes + inputs.property 'fullDockerImageName', System.getenv('FULL_DOCKER_IMAGE_NAME') +} diff --git a/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/BaseContainerTest.java b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/BaseContainerTest.java new file mode 100644 index 00000000000..e81a4699aa0 --- /dev/null +++ b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/BaseContainerTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2024 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; +import java.time.Duration; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.utility.DockerImageName; + +public class BaseContainerTest { + + private static final Logger logger = LoggerFactory.getLogger(BaseContainerTest.class); + + protected final Network network = Network.newNetwork(); + + protected static final int CLOUDDRIVER_PORT = 7002; + + protected GenericContainer clouddriverContainer; + + private static DockerImageName dockerImageName; + + @BeforeAll + static void setupInit() { + String fullDockerImageName = System.getenv("FULL_DOCKER_IMAGE_NAME"); + // Skip the tests if there's no docker image. This allows gradlew build to work. + assumeTrue(fullDockerImageName != null); + dockerImageName = DockerImageName.parse(fullDockerImageName); + } + + @BeforeEach + void init(TestInfo testInfo) { + System.out.println("--------------- Test " + testInfo.getDisplayName()); + clouddriverContainer = + new GenericContainer(dockerImageName) + .withNetwork(network) + .withExposedPorts(CLOUDDRIVER_PORT) + .waitingFor(Wait.forHealthcheck().withStartupTimeout(Duration.ofSeconds(120))); + } + + void testHealthCheck() throws Exception { + // hit an arbitrary endpoint + HttpRequest request = + HttpRequest.newBuilder() + .uri( + new URI( + "http://" + + clouddriverContainer.getHost() + + ":" + + clouddriverContainer.getFirstMappedPort() + + "/health")) + .GET() + .build(); + + HttpClient client = HttpClient.newHttpClient(); + + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + assertThat(response).isNotNull(); + logger.info("response: {}, {}", response.statusCode(), response.body()); + assertThat(response.statusCode()).isEqualTo(200); + } +} diff --git a/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/MySqlContainerTest.java b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/MySqlContainerTest.java new file mode 100644 index 00000000000..04547faf22a --- /dev/null +++ b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/MySqlContainerTest.java @@ -0,0 +1,105 @@ +/* + * Copyright 2024 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.util.Map; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.MySQLContainer; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.junit.jupiter.Testcontainers; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@Testcontainers +public class MySqlContainerTest extends BaseContainerTest { + + private static final Logger logger = LoggerFactory.getLogger(MySqlContainerTest.class); + + private static final String MYSQL_NETWORK_ALIAS = "mysqlHost"; + + private static final int MYSQL_PORT = 3306; + + private MySQLContainer mysql; + + private String jdbcUrl = ""; + + @BeforeEach + void setup() throws Exception { + mysql = + new MySQLContainer<>("mysql:8.0.37") + .withDatabaseName("clouddriver") + .withUsername("root") + .withPassword("root") + .withNetwork(network) + .withNetworkAliases(MYSQL_NETWORK_ALIAS) + .withInitScript("mysql_init.sql"); + mysql.start(); + jdbcUrl = String.format("jdbc:mysql://%s:%d/clouddriver", MYSQL_NETWORK_ALIAS, MYSQL_PORT); + clouddriverContainer + .dependsOn(mysql) + .withEnv("SPRING_APPLICATION_JSON", getSpringApplicationJson()) + .start(); + + Slf4jLogConsumer logConsumer = new Slf4jLogConsumer(logger); + clouddriverContainer.followOutput(logConsumer); + } + + private String getSpringApplicationJson() throws JsonProcessingException { + logger.info("--------- jdbcUrl: '{}'", jdbcUrl); + Map connectionPool = + Map.of("jdbcUrl", jdbcUrl, "user", "clouddriver_service", "password", "c10uddriver"); + Map migration = + Map.of("jdbcUrl", jdbcUrl, "user", "clouddriver_migrate", "password", "c10uddriver"); + + Map properties = + Map.of( + "sql.enabled", + "true", + "services.fiat.baseUrl", + "http://nowhere", + "sql.connectionPool", + connectionPool, + "redis.enabled", + "false", + "sql.migration", + migration); + ObjectMapper mapper = new ObjectMapper(); + return mapper.writeValueAsString(properties); + } + + @AfterAll + void cleanupOnce() { + if (clouddriverContainer != null) { + clouddriverContainer.stop(); + } + + if (mysql != null) { + mysql.stop(); + } + } + + @Test + void testHealthCheckWithMySql() throws Exception { + super.testHealthCheck(); + } +} diff --git a/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/PostgresContainerTest.java b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/PostgresContainerTest.java new file mode 100644 index 00000000000..8b3554911eb --- /dev/null +++ b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/PostgresContainerTest.java @@ -0,0 +1,106 @@ +/* + * Copyright 2024 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.util.Map; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.PostgreSQLContainer; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.junit.jupiter.Testcontainers; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@Testcontainers +public class PostgresContainerTest extends BaseContainerTest { + + private static final Logger logger = LoggerFactory.getLogger(PostgresContainerTest.class); + + private static final String POSTGRES_NETWORK_ALIAS = "postgresHost"; + + private static final int POSTGRES_PORT = 5432; + + private PostgreSQLContainer postgres; + + private String jdbcUrl = ""; + + @BeforeEach + void setup() throws Exception { + postgres = + new PostgreSQLContainer<>("postgres:15") + .withDatabaseName("clouddriver") + .withUsername("postgres") + .withPassword("postgres") + .withNetwork(network) + .withNetworkAliases(POSTGRES_NETWORK_ALIAS) + .withInitScript("postgres_init.sql"); + postgres.start(); + jdbcUrl = + String.format("jdbc:postgresql://%s:%d/clouddriver", POSTGRES_NETWORK_ALIAS, POSTGRES_PORT); + clouddriverContainer + .dependsOn(postgres) + .withEnv("SPRING_APPLICATION_JSON", getSpringApplicationJson()) + .start(); + + Slf4jLogConsumer logConsumer = new Slf4jLogConsumer(logger); + clouddriverContainer.followOutput(logConsumer); + } + + private String getSpringApplicationJson() throws JsonProcessingException { + logger.info("----------- jdbcUrl: '{}'", jdbcUrl); + Map connectionPool = + Map.of("jdbcUrl", jdbcUrl, "user", "clouddriver_service", "password", "c10uddriver"); + Map migration = + Map.of("jdbcUrl", jdbcUrl, "user", "clouddriver_migrate", "password", "c10uddriver"); + + Map properties = + Map.of( + "sql.enabled", + "true", + "services.fiat.baseUrl", + "http://nowhere", + "sql.connectionPool", + connectionPool, + "redis.enabled", + "false", + "sql.migration", + migration); + ObjectMapper mapper = new ObjectMapper(); + return mapper.writeValueAsString(properties); + } + + @AfterAll + void cleanupOnce() { + if (clouddriverContainer != null) { + clouddriverContainer.stop(); + } + + if (postgres != null) { + postgres.stop(); + } + } + + @Test + void testHealthCheckWithPostgres() throws Exception { + super.testHealthCheck(); + } +} diff --git a/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/PostgresMigrationContainerTest.java b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/PostgresMigrationContainerTest.java new file mode 100644 index 00000000000..7ad1faadbb7 --- /dev/null +++ b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/PostgresMigrationContainerTest.java @@ -0,0 +1,136 @@ +/* + * Copyright 2024 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.time.Duration; +import java.util.Map; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.PostgreSQLContainer; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.junit.jupiter.Testcontainers; +import org.testcontainers.utility.DockerImageName; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@Testcontainers +public class PostgresMigrationContainerTest extends BaseContainerTest { + + private static final Logger logger = + LoggerFactory.getLogger(PostgresMigrationContainerTest.class); + + private static final String POSTGRES_NETWORK_ALIAS = "postgresHost"; + + private static final int POSTGRES_PORT = 5432; + + private PostgreSQLContainer postgres; + + private GenericContainer clouddriverInitialContainer; + + // this is the latest image that is still running on liquibase 3.10.3 which create the conditions + // similar to real scenario so that test identifies when validChecksums are not added in the later + // version of clouddriver where higher liquibase versions are used + private static final DockerImageName previousDockerImageName = + DockerImageName.parse( + "us-docker.pkg.dev/spinnaker-community/docker/clouddriver:5.82.2-dev-release-1.32.x-7a8e6e8b3-202406051721-unvalidated"); + + private String jdbcUrl = ""; + + @BeforeEach + void setup() throws Exception { + postgres = + new PostgreSQLContainer<>("postgres:15") + .withDatabaseName("clouddriver") + .withUsername("postgres") + .withPassword("postgres") + .withNetwork(network) + .withNetworkAliases(POSTGRES_NETWORK_ALIAS) + .withInitScript("postgres_init.sql") + .withReuse(true); + postgres.start(); + jdbcUrl = + String.format("jdbc:postgresql://%s:%d/clouddriver", POSTGRES_NETWORK_ALIAS, POSTGRES_PORT); + + // Start the first clouddriver(from previous release) container so that all the db changelog + // sets are executed + clouddriverInitialContainer = + new GenericContainer(previousDockerImageName) + .withNetwork(network) + .withExposedPorts(CLOUDDRIVER_PORT) + .waitingFor(Wait.forHealthcheck().withStartupTimeout(Duration.ofSeconds(120))) + .dependsOn(postgres) + .withEnv("SPRING_APPLICATION_JSON", getSpringApplicationJson()); + clouddriverInitialContainer.start(); + Slf4jLogConsumer logConsumer = new Slf4jLogConsumer(logger); + clouddriverInitialContainer.followOutput(logConsumer); + clouddriverInitialContainer.stop(); + + // Start the second clouddriver(latest) container to validate migration + clouddriverContainer + .dependsOn(postgres) + .withEnv("SPRING_APPLICATION_JSON", getSpringApplicationJson()) + .start(); + + clouddriverContainer.followOutput(logConsumer); + } + + private String getSpringApplicationJson() throws JsonProcessingException { + logger.info("----------- jdbcUrl: '{}'", jdbcUrl); + Map connectionPool = + Map.of("jdbcUrl", jdbcUrl, "user", "clouddriver_service", "password", "c10uddriver"); + Map migration = + Map.of("jdbcUrl", jdbcUrl, "user", "clouddriver_migrate", "password", "c10uddriver"); + + Map properties = + Map.of( + "sql.enabled", + "true", + "services.fiat.baseUrl", + "http://nowhere", + "sql.connectionPool", + connectionPool, + "redis.enabled", + "false", + "sql.migration", + migration); + ObjectMapper mapper = new ObjectMapper(); + return mapper.writeValueAsString(properties); + } + + @AfterAll + void cleanupOnce() { + if (clouddriverContainer != null) { + clouddriverContainer.stop(); + } + + if (postgres != null) { + postgres.stop(); + } + } + + @Test + void testHealthCheckWithPostgres() throws Exception { + super.testHealthCheck(); + } +} diff --git a/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/RedisContainerTest.java b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/RedisContainerTest.java new file mode 100644 index 00000000000..382a514c15b --- /dev/null +++ b/clouddriver-integration/src/test/java/com/netflix/spinnaker/clouddriver/RedisContainerTest.java @@ -0,0 +1,86 @@ +/* + * Copyright 2024 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.util.Map; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.junit.jupiter.Testcontainers; +import org.testcontainers.utility.DockerImageName; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@Testcontainers +public class RedisContainerTest extends BaseContainerTest { + + private static final Logger logger = LoggerFactory.getLogger(RedisContainerTest.class); + + private static final String REDIS_NETWORK_ALIAS = "redisHost"; + + private static final int REDIS_PORT = 6379; + + private GenericContainer redis; + + @BeforeEach + void setup() throws Exception { + redis = + new GenericContainer<>(DockerImageName.parse("library/redis:5-alpine")) + .withNetwork(network) + .withNetworkAliases(REDIS_NETWORK_ALIAS) + .withExposedPorts(REDIS_PORT); + redis.start(); + clouddriverContainer + .dependsOn(redis) + .withEnv("SPRING_APPLICATION_JSON", getSpringApplicationJson()) + .start(); + + Slf4jLogConsumer logConsumer = new Slf4jLogConsumer(logger); + clouddriverContainer.followOutput(logConsumer); + } + + private String getSpringApplicationJson() throws JsonProcessingException { + String redisUrl = "redis://" + REDIS_NETWORK_ALIAS + ":" + REDIS_PORT; + logger.info("----------- redisUrl: '{}'", redisUrl); + Map properties = + Map.of("redis.connection", redisUrl, "services.fiat.baseUrl", "http://nowhere"); + ObjectMapper mapper = new ObjectMapper(); + return mapper.writeValueAsString(properties); + } + + @AfterAll + void cleanupOnce() { + if (clouddriverContainer != null) { + clouddriverContainer.stop(); + } + + if (redis != null) { + redis.stop(); + } + } + + @Test + void testHealthCheckWithRedis() throws Exception { + super.testHealthCheck(); + } +} diff --git a/clouddriver-integration/src/test/resources/logback.xml b/clouddriver-integration/src/test/resources/logback.xml new file mode 100644 index 00000000000..6145d38780b --- /dev/null +++ b/clouddriver-integration/src/test/resources/logback.xml @@ -0,0 +1,36 @@ + + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + + + + diff --git a/clouddriver-integration/src/test/resources/mysql_init.sql b/clouddriver-integration/src/test/resources/mysql_init.sql new file mode 100644 index 00000000000..597096c5b5c --- /dev/null +++ b/clouddriver-integration/src/test/resources/mysql_init.sql @@ -0,0 +1,28 @@ +/* + * Copyright 2024 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE USER 'clouddriver_service'@'%' IDENTIFIED BY 'c10uddriver'; +CREATE USER 'clouddriver_migrate'@'%' IDENTIFIED BY 'c10uddriver'; + +GRANT + SELECT, INSERT, UPDATE, DELETE, CREATE, EXECUTE, SHOW VIEW + ON *.* + TO 'clouddriver_service'@'%'; + +GRANT + SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, REFERENCES, INDEX, ALTER, LOCK TABLES, EXECUTE, SHOW VIEW + ON *.* + TO 'clouddriver_migrate'@'%'; diff --git a/clouddriver-integration/src/test/resources/postgres_init.sql b/clouddriver-integration/src/test/resources/postgres_init.sql new file mode 100644 index 00000000000..ebca7eb24ef --- /dev/null +++ b/clouddriver-integration/src/test/resources/postgres_init.sql @@ -0,0 +1,26 @@ +/* + * Copyright 2024 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE USER clouddriver_service with PASSWORD 'c10uddriver'; +CREATE USER clouddriver_migrate with PASSWORD 'c10uddriver'; + +grant create on schema public to clouddriver_service; +grant create on schema public to clouddriver_migrate; + +GRANT pg_read_all_data TO clouddriver_service; +GRANT pg_write_all_data TO clouddriver_service; + + diff --git a/clouddriver-kubernetes/clouddriver-kubernetes.gradle b/clouddriver-kubernetes/clouddriver-kubernetes.gradle index 7d0a5080267..f9f3a83b6d0 100644 --- a/clouddriver-kubernetes/clouddriver-kubernetes.gradle +++ b/clouddriver-kubernetes/clouddriver-kubernetes.gradle @@ -1,17 +1,142 @@ +plugins { + id("net.ltgt.errorprone") version "4.0.0" + id 'com.adarshr.test-logger' version '2.1.0' +} + +tasks.compileGroovy.enabled = false + +sourceSets { + main { + java.srcDirs = ['src/main/java'] + } + integration { + java.srcDirs = ["src/integration/java"] + resources.srcDirs = ["src/integration/resources"] + } +} + +configurations { + integrationImplementation.extendsFrom testImplementation + integrationRuntime.extendsFrom testRuntime +} + +tasks.withType(JavaCompile) { + options.compilerArgs += [ + '-Xlint:all', + + // This check ensures that all annotations are handled at compile time, but + // many of the annotations we use are not intended to be handled at compile + // time and are instead used at runtime. + '-Xlint:-processing', + // This check ensures that all classes implementing Serializable set the + // serialVersionUID. We don't use Java serialization, but some classes + // extend JDK classes implementing Serializable. Rather than add a + // meaningless serialVersionUID, just disable the warning. + '-Xlint:-serial', + + // Temporarily suppressed warnings. These are here only while we fix or + // suppress the warnings in these categories. + '-Xlint:-unchecked', + ] + + // Temporarily disable error-prone checks that are generating warnings. These + // are only here while we fix or suppress the warnings in these categories. + options.errorprone.disable( + "EmptyCatch", + "MissingOverride", + ) +} + dependencies { - spinnaker.group('kubernetes') - compile project(":clouddriver-artifacts") - compile project(":clouddriver-core") - compile project(":clouddriver-docker") - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') - compile spinnaker.dependency('frigga') - compile spinnaker.dependency('korkArtifacts') - compile spinnaker.dependency('lombok') - - // TODO(lwander) move to spinnaker-dependencies when library stabilizes - compile 'io.kubernetes:client-java:1.0.0-beta1' - compile 'com.github.fge:json-patch:1.9' - compile 'com.netflix.spinnaker.moniker:moniker:0.2.0' - compile 'com.jayway.jsonpath:json-path:2.3.0' + errorprone("com.google.errorprone:error_prone_core:2.28.0") + implementation project(":clouddriver-api") + implementation project(":clouddriver-artifacts") + implementation project(":clouddriver-core") + implementation project(":clouddriver-configserver") + implementation project(":cats:cats-core") + implementation project(":clouddriver-security") + + implementation "org.apache.groovy:groovy" + + implementation "com.google.code.findbugs:jsr305" + implementation "com.google.guava:guava" + implementation 'com.jayway.jsonpath:json-path:2.3.0' + implementation "com.github.ben-manes.caffeine:guava" + implementation "com.github.wnameless.json:json-flattener:0.11.1" + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-annotations" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-credentials" + implementation "io.spinnaker.kork:kork-config" + implementation "io.spinnaker.kork:kork-cloud-config-server" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-moniker" + implementation "io.spinnaker.kork:kork-secrets" + implementation "io.spinnaker.kork:kork-security" + implementation "io.kubernetes:client-java" + implementation "io.kubernetes:client-java-api-fluent:13.0.2" + implementation "org.apache.commons:commons-lang3" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.springframework.security:spring-security-config" + implementation "org.springframework.cloud:spring-cloud-context" + implementation "org.springframework.cloud:spring-cloud-config-server" + implementation "io.github.resilience4j:resilience4j-retry" + implementation "io.github.resilience4j:resilience4j-micrometer" + implementation "io.swagger.core.v3:swagger-annotations" + + testImplementation "io.spinnaker.kork:kork-test" + testImplementation "org.apache.commons:commons-exec" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.junit.jupiter:junit-jupiter-params" + testImplementation "org.mockito:mockito-core" + testImplementation "org.mockito:mockito-junit-jupiter" + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation "org.apache.groovy:groovy-templates" + + integrationImplementation project(":clouddriver-web") + integrationImplementation "org.springframework.boot:spring-boot-starter-test" + integrationImplementation "org.testcontainers:testcontainers" + integrationImplementation "org.testcontainers:mysql" + integrationImplementation "org.testcontainers:junit-jupiter" + integrationImplementation "com.mysql:mysql-connector-j" + integrationImplementation "io.rest-assured:rest-assured" + integrationImplementation "org.yaml:snakeyaml" +} + +testlogger { + // don't show passed unit tests, it's difficult to know which ones failed among hundreds of tests + showPassed false + showPassedStandardStreams false +} + +task integrationTest(type: Test) { + description = 'Runs kubernetes provider integration tests.' + group = 'verification' + + environment "IT_BUILD_HOME", "$project.buildDir/it" + environment "IMAGE", project.getProperties().get("kubernetes-image") + environment "KUBECTL_VERSION", project.getProperties().get("kubectl-version") + environment "KUBERNETES_VERSION", project.getProperties().get("kubernetes-version") + useJUnitPlatform() + + testClassesDirs = sourceSets.integration.output.classesDirs + classpath = sourceSets.integration.runtimeClasspath + shouldRunAfter test + + testlogger { + theme 'standard' + showStandardStreams true + showPassedStandardStreams false + showFailedStandardStreams true + showPassed false + } } diff --git a/clouddriver-kubernetes/src/integration/README.md b/clouddriver-kubernetes/src/integration/README.md new file mode 100644 index 00000000000..7fe8423c770 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/README.md @@ -0,0 +1,18 @@ +### Kubernetes provider integration tests + +#### Run + +From the command line +```shell +./gradlew :clouddriver-kubernetes:integrationTest +``` + +From Intellij: Individual tests can be run or debugged by clicking the corresponding icon next to the test name within the IDE. + + +#### How they work + +The tests use spring test framework to start clouddriver on a random port, reading configuration from the `clouddriver.yml` config file in the resources folder. They use testcontainers framework for starting a real mysql server in a docker container, and use [kind](https://kind.sigs.k8s.io) for starting a real kubernetes cluster where deployments will happen. + +Kind and kubectl binaries are downloaded to `clouddriver-kubernetes/build/it` folder, and also the `kubeconfig` file for connecting to the test cluster is generated there, which runs as a docker container started by kind. + diff --git a/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/BaseTest.java b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/BaseTest.java new file mode 100644 index 00000000000..371a794d01b --- /dev/null +++ b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/BaseTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.it; + +import static io.restassured.RestAssured.get; +import static org.hamcrest.Matchers.hasItems; + +import com.netflix.spinnaker.clouddriver.Main; +import com.netflix.spinnaker.clouddriver.kubernetes.it.containers.KubernetesCluster; +import com.netflix.spinnaker.clouddriver.kubernetes.it.utils.TestLifecycleListener; +import io.restassured.RestAssured; +import io.restassured.response.Response; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.extension.ExtendWith; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.web.server.LocalServerPort; +import org.springframework.test.context.TestPropertySource; + +@SpringBootTest( + classes = {Main.class}, + webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) +@TestPropertySource(properties = {"spring.config.location = classpath:clouddriver.yml"}) +@ExtendWith(TestLifecycleListener.class) +public abstract class BaseTest { + + public static final String APP1_NAME = "testApp1"; + public static final String APP2_NAME = "testApp2"; + public static final String ACCOUNT1_NAME = "account1"; + public static final String ACCOUNT2_NAME = "account2"; + public static final String KUBERNETES_VERSION = System.getenv("KUBERNETES_VERSION"); + + @LocalServerPort int port; + + public static final KubernetesCluster kubeCluster; + + static { + kubeCluster = KubernetesCluster.getInstance(); + kubeCluster.start(); + RestAssured.enableLoggingOfRequestAndResponseIfValidationFails(); + } + + public String baseUrl() { + return "http://localhost:" + port; + } + + @BeforeEach + void givenAccountsReady() { + Response response = get(baseUrl() + "/credentials"); + response + .then() + .log() + .ifValidationFails() + .assertThat() + .statusCode(200) + .and() + .body("name", hasItems(ACCOUNT1_NAME)); + } +} diff --git a/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/DeleteManifestIT.java b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/DeleteManifestIT.java new file mode 100644 index 00000000000..62de3f6d7e8 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/DeleteManifestIT.java @@ -0,0 +1,546 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.it; + +import static io.restassured.RestAssured.given; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.*; + +import com.netflix.spinnaker.clouddriver.kubernetes.it.utils.KubeTestUtils; +import io.restassured.response.Response; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.springframework.http.HttpStatus; + +public class DeleteManifestIT extends BaseTest { + + private static String account1Ns; + + @BeforeAll + public static void setUpAll() throws IOException, InterruptedException { + account1Ns = kubeCluster.createNamespace(ACCOUNT1_NAME); + } + + @DisplayName( + ".\n===\n" + + "Given a secret deployed outside of Spinnaker\n" + + "When sending a delete manifest operation with static target\n" + + "Then the secret is deleted\n===") + @Test + public void shouldDeleteByStaticTarget() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + final String kind = "secret"; + final String name = "mysecret"; + Map manifest = + KubeTestUtils.loadYaml("classpath:manifests/secret.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", name) + .asMap(); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", manifest); + // ------------------------- when --------------------------- + List> request = + buildStaticRequestBody(String.format("%s %s", kind, name), "true"); + List deletions = KubeTestUtils.sendOperation(baseUrl(), request, account1Ns); + // ------------------------- then --------------------------- + String exist = + kubeCluster.execKubectl( + String.format("-n %s get %s %s --ignore-not-found", account1Ns, kind, name)); + assertTrue( + exist.isBlank() + && deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, name))); + } + + @DisplayName( + ".\n===\n" + + "Given two replicaset deployed inside of Spinnaker\n" + + "When sending a delete manifest operation using newest dynamic target criteria\n" + + "Then the newest replicaset is deleted\n===") + @Test + public void shouldDeleteNewestByDynamicTarget() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String kind = "replicaSet"; + String criteria = "newest"; + String name = String.format("nginx-%s-test", criteria); + String nameToDelete = String.format("%s-v001", name); + List> deployManifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.name", name) + .asList(); + List> deployRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.namespaceOverride", account1Ns) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", deployManifest) + .asList(); + for (byte i = 0; i < 2; i++) { + KubeTestUtils.deployAndWaitStable( + baseUrl(), deployRequest, account1Ns, String.format("replicaSet %s-v%03d", name, i)); + } + // ------------------------- when --------------------------- + List> deleteRequest = + buildDynamicRequestBody( + String.format("%s %s", kind, nameToDelete), + "true", + String.format("%s %s", kind, name), + criteria, + kind); + List deletions = KubeTestUtils.sendOperation(baseUrl(), deleteRequest, account1Ns); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl( + String.format("-n %s get %s %s --ignore-not-found", account1Ns, kind, nameToDelete)); + assertTrue( + exists.isBlank() + && deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, nameToDelete))); + } + + @DisplayName( + ".\n===\n" + + "Given two replicaset deployed inside of Spinnaker\n" + + "When sending a delete manifest operation using second newest dynamic target criteria\n" + + "Then the second newest replicaset is deleted\n===") + @Test + public void shouldDeleteSecondNewestByDynamicTarget() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String kind = "replicaSet"; + String criteria = "second-newest"; + String name = String.format("nginx-%s-test", criteria); + String nameToDelete = String.format("%s-v000", name); + List> deployManifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.name", name) + .asList(); + List> deployRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.namespaceOverride", account1Ns) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", deployManifest) + .asList(); + for (byte i = 0; i < 2; i++) { + KubeTestUtils.deployAndWaitStable( + baseUrl(), deployRequest, account1Ns, String.format("replicaSet %s-v%03d", name, i)); + } + // ------------------------- when --------------------------- + List> deleteRequest = + buildDynamicRequestBody( + String.format("%s %s", kind, nameToDelete), + "true", + String.format("%s %s", kind, name), + criteria, + kind); + List deletions = KubeTestUtils.sendOperation(baseUrl(), deleteRequest, account1Ns); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl( + String.format("-n %s get %s %s --ignore-not-found", account1Ns, kind, nameToDelete)); + assertTrue( + exists.isBlank() + && deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, nameToDelete))); + } + + @DisplayName( + ".\n===\n" + + "Given two replicaset deployed inside of Spinnaker\n" + + "When sending a delete manifest operation using oldest dynamic target criteria\n" + + "Then the oldest replicaset is deleted\n===") + @Test + public void shouldDeleteOldestByDynamicTarget() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String kind = "replicaSet"; + String criteria = "oldest"; + String name = String.format("nginx-%s-test", criteria); + String nameToDelete = String.format("%s-v000", name); + List> deployManifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.name", name) + .asList(); + List> deployRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.namespaceOverride", account1Ns) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", deployManifest) + .asList(); + for (byte i = 0; i < 2; i++) { + KubeTestUtils.deployAndWaitStable( + baseUrl(), deployRequest, account1Ns, String.format("replicaSet %s-v%03d", name, i)); + } + // ------------------------- when --------------------------- + List> deleteRequest = + buildDynamicRequestBody( + String.format("%s %s", kind, nameToDelete), + "true", + String.format("%s %s", kind, name), + criteria, + kind); + List deletions = KubeTestUtils.sendOperation(baseUrl(), deleteRequest, account1Ns); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl( + String.format("-n %s get %s %s --ignore-not-found", account1Ns, kind, nameToDelete)); + assertTrue( + exists.isBlank() + && deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, nameToDelete))); + } + + @DisplayName( + ".\n===\n" + + "Given two replicaset deployed inside of Spinnaker\n" + + "When sending a delete manifest operation using largest dynamic target criteria\n" + + "Then the replicaset that has the greater amount of replicas is deleted\n===") + @Test + public void shouldDeleteLargestByDynamicTarget() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String kind = "replicaSet"; + String criteria = "largest"; + String name = String.format("nginx-%s-test", criteria); + String nameToDelete = String.format("%s-v001", name); + for (byte i = 0; i < 2; i++) { + List> deployManifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.name", name) + .withValue("spec.replicas", i) + .asList(); + List> deployRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.namespaceOverride", account1Ns) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", deployManifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), deployRequest, account1Ns, String.format("replicaSet %s-v%03d", name, i)); + } + // ------------------------- when --------------------------- + List> deleteRequest = + buildDynamicRequestBody( + String.format("%s %s", kind, nameToDelete), + "true", + String.format("%s %s", kind, name), + criteria, + kind); + List deletions = KubeTestUtils.sendOperation(baseUrl(), deleteRequest, account1Ns); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl( + String.format("-n %s get %s %s --ignore-not-found", account1Ns, kind, nameToDelete)); + assertTrue( + exists.isBlank() + && deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, nameToDelete))); + } + + @DisplayName( + ".\n===\n" + + "Given two replicaset deployed inside of Spinnaker\n" + + "When sending a delete manifest operation using smallest dynamic target criteria\n" + + "Then the replicaset that has the lower amount of replicas is deleted\n===") + @Test + public void shouldDeleteSmallestByDynamicTarget() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String kind = "replicaSet"; + String criteria = "smallest"; + String name = String.format("nginx-%s-test", criteria); + String nameToDelete = String.format("%s-v000", name); + for (byte i = 0; i < 2; i++) { + List> deployManifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.name", name) + .withValue("spec.replicas", i) + .asList(); + List> deployRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.namespaceOverride", account1Ns) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", deployManifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), deployRequest, account1Ns, String.format("replicaSet %s-v%03d", name, i)); + } + // ------------------------- when --------------------------- + List> deleteRequest = + buildDynamicRequestBody( + String.format("%s %s", kind, nameToDelete), + "true", + String.format("%s %s", kind, name), + criteria, + kind); + List deletions = KubeTestUtils.sendOperation(baseUrl(), deleteRequest, account1Ns); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl( + String.format("-n %s get %s %s --ignore-not-found", account1Ns, kind, nameToDelete)); + assertTrue( + exists.isBlank() + && deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, nameToDelete))); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest with 3 replicas outside of Spinnaker\n" + + "When sending a delete manifest operation without cascading option enable\n" + + "Then just the deployment should be removed at once, but the replicaset/pods remain\n===") + @Test + public void shouldDeleteWithoutCascading() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String kind = "deployment"; + String name = "myapp"; + Map deployManifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.name", name) + .withValue("spec.replicas", 3) + .asMap(); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", deployManifest); + kubeCluster.execKubectl( + String.format( + "wait %s -n %s %s --for condition=Available=True --timeout=600s", + kind, account1Ns, name)); + // ------------------------- when --------------------------- + List> deleteRequest = + buildStaticRequestBody(String.format("%s %s", kind, name), "false"); + List deletions = KubeTestUtils.sendOperation(baseUrl(), deleteRequest, account1Ns); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl(String.format("-n %s get pods -l=app=%s", account1Ns, name)); + assertTrue( + deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, name)) + && exists.contains("Running")); + } + + @ParameterizedTest( + name = + ".\n===\n" + + "Given a deployment\n" + + "When sending a delete manifest operation with cascading={0}\n" + + "Then the deployment is deleted\n===") + @ValueSource(strings = {"foreground", "background", "orphan"}) + public void deleteWithValidCascadingValue(String cascadingValue) + throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String kind = "deployment"; + String name = "myapp"; + Map deployManifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.name", name) + .asMap(); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", deployManifest); + kubeCluster.execKubectl( + String.format( + "wait %s -n %s %s --for condition=Available=True --timeout=600s", + kind, account1Ns, name)); + + // ------------------------- when --------------------------- + List> deleteRequest = + buildStaticRequestBody(String.format("%s %s", kind, name), cascadingValue); + + // 30 seconds isn't long enough for delete with --cascade=foreground, so + // allow longer. + List deletions = + KubeTestUtils.sendOperation(baseUrl(), deleteRequest, account1Ns, 60, TimeUnit.SECONDS); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl( + String.format("-n %s get deployment --ignore-not-found %s", account1Ns, name)); + assertTrue(exists.isBlank()); + assertEquals(1, deletions.size()); + assertEquals(String.format("%s %s", kind, name), deletions.get(0)); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment\n" + + "When sending a delete manifest operation with invalid cascading value\n" + + "Then the delete manifest operation fails and the deployment remains\n===") + @Test + public void deleteWithInvalidCascadingValue() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String kind = "deployment"; + String name = "myapp"; + Map deployManifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.name", name) + .asMap(); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", deployManifest); + kubeCluster.execKubectl( + String.format( + "wait %s -n %s %s --for condition=Available=True --timeout=600s", + kind, account1Ns, name)); + + // ------------------------- when --------------------------- + String invalidCascadingValue = "bogus"; + List> deleteRequest = + buildStaticRequestBody(String.format("%s %s", kind, name), invalidCascadingValue); + + // KubeTestUtils.repeatUntilTrue waits in 5 second increments, and we need + // to wait at least 10 seconds to get it to try more than once. Even if it + // the operation completes more quickly than that, it doesn't happen on the + // first attempt. + String status = + KubeTestUtils.sendOperationExpectFailure( + baseUrl(), deleteRequest, account1Ns, 10, TimeUnit.SECONDS); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl( + String.format( + "-n %s get deployment %s -o jsonpath='{.metadata.name}'", account1Ns, name)); + assertThat(status).contains("invalid cascade value (" + invalidCascadingValue + ")"); + assertEquals(name, exists); + } + + @DisplayName( + ".\n===\n" + + "Given a NOT existing static deployment manifest\n" + + "When sending a delete manifest request\n" + + "Then it should return any deleted deployment\n===") + @Test + public void shouldNotDeleteStaticTarget() throws InterruptedException { + // ------------------------- given -------------------------- + // ------------------------- when --------------------------- + List> request = buildStaticRequestBody("deployment notExists", "true"); + // ------------------------- then --------------------------- + List deletions = KubeTestUtils.sendOperation(baseUrl(), request, account1Ns); + assertEquals(0, deletions.size()); + } + + @DisplayName( + ".\n===\n" + + "Given a NOT existing dynamic replicaSet manifest\n" + + "When sending a delete manifest operation using smallest dynamic target criteria\n" + + "Then it gets a 404 while fetching the manifest\n===") + @Test + public void shouldNotFoundDynamicTarget() throws InterruptedException { + // ------------------------- given -------------------------- + // ------------------------- when --------------------------- + String kind = "replicaSet"; + String criteria = "smallest"; + String name = String.format("not-exists-%s-test", criteria); + String nameToDelete = String.format("%s-v000", name); + String url = + String.format( + "%s/manifests/%s/%s/%s %s", baseUrl(), ACCOUNT1_NAME, account1Ns, kind, nameToDelete); + Response response = given().queryParam("includeEvents", false).get(url); + // ------------------------- then --------------------------- + assertEquals(HttpStatus.NOT_FOUND.value(), response.statusCode()); + } + + @DisplayName( + ".\n===\n" + + "Given a CRD deployed outside of Spinnaker\n" + + "When sending a delete manifest operation with CRD static target\n" + + "Then the CRD is deleted\n===") + @Test + public void shouldDeleteCrd() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + final String kind = "customResourceDefinition"; + final String crdName = "crontabs.stable.example.com"; + final Map crdManifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1.yml") + .withValue("metadata.name", crdName) + .withValue("spec.scope", "Namespaced") + .asMap(); + kubeCluster.execKubectl(" apply -f -", crdManifest); + // ------------------------- when --------------------------- + List> request = + buildStaticRequestBody(String.format("%s %s", kind, crdName), "true"); + List deletions = KubeTestUtils.sendOperation(baseUrl(), request, account1Ns); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl(String.format("get %s %s --ignore-not-found", kind, crdName)); + assertTrue( + exists.isBlank() + && deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, crdName))); + } + + @DisplayName( + ".\n===\n" + + "Given a CR deployed outside of Spinnaker\n" + + "When sending a delete manifest operation with CR static target\n" + + "Then the CR is deleted\n===") + @Test + public void shouldDeleteCr() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + final String kind = "crontab.stable.example.com"; + final String crdName = "crontabs.stable.example.com"; + final String crName = "my-new-cron-object"; + final Map crdManifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1.yml") + .withValue("metadata.name", crdName) + .withValue("spec.scope", "Namespaced") + .asMap(); + final Map crManifest = + KubeTestUtils.loadYaml("classpath:manifests/cr_v1.yml") + .withValue("metadata.name", crName) + .asMap(); + kubeCluster.execKubectl(" apply -f -", crdManifest); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", crManifest); + // ------------------------- when --------------------------- + List> request = + buildStaticRequestBody(String.format("%s %s", kind, crName), "true"); + List deletions = KubeTestUtils.sendOperation(baseUrl(), request, account1Ns); + // ------------------------- then --------------------------- + String exists = + kubeCluster.execKubectl( + String.format("-n %s get %s %s --ignore-not-found", account1Ns, kind, crName)); + assertTrue( + exists.isBlank() + && deletions.size() == 1 + && deletions.get(0).equals(String.format("%s %s", kind, crName))); + } + + private List> buildStaticRequestBody(String manifestName, String cascading) { + return KubeTestUtils.loadJson("classpath:requests/delete_manifest.json") + .withValue("deleteManifest.app", APP1_NAME) + .withValue("deleteManifest.mode", "static") + .withValue("deleteManifest.manifestName", manifestName) + .withValue("deleteManifest.options.cascading", cascading) + .withValue("deleteManifest.location", account1Ns) + .withValue("deleteManifest.account", ACCOUNT1_NAME) + .asList(); + } + + private List> buildDynamicRequestBody( + String manifestName, String cascading, String cluster, String criteria, String kind) { + return KubeTestUtils.loadJson("classpath:requests/delete_manifest.json") + .withValue("deleteManifest.app", APP1_NAME) + .withValue("deleteManifest.mode", "dynamic") + .withValue("deleteManifest.cluster", cluster) + .withValue("deleteManifest.criteria", criteria) + .withValue("deleteManifest.kind", kind) + .withValue("deleteManifest.manifestName", manifestName) + .withValue("deleteManifest.options.cascading", cascading) + .withValue("deleteManifest.location", account1Ns) + .withValue("deleteManifest.account", ACCOUNT1_NAME) + .asList(); + } +} diff --git a/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/DeployManifestIT.java b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/DeployManifestIT.java new file mode 100644 index 00000000000..11254064452 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/DeployManifestIT.java @@ -0,0 +1,1992 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.it; + +import static io.restassured.RestAssured.given; +import static org.assertj.core.api.Assertions.assertThat; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.it.utils.KubeTestUtils; +import io.restassured.response.Response; +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.logging.log4j.util.Strings; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +public class DeployManifestIT extends BaseTest { + + private static final String DEPLOYMENT_1_NAME = "deployment1"; + private static final String REPLICASET_1_NAME = "rs1"; + private static final String SERVICE_1_NAME = "service1"; + + private static final String SERVICE_2_NAME = "service2"; + private static String account1Ns; + + @BeforeAll + public static void setUpAll() throws IOException, InterruptedException { + account1Ns = kubeCluster.createNamespace(ACCOUNT1_NAME); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest with no namespace set\n" + + " And a namespace override\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then a pod is up and running in the overridden namespace\n===") + @Test + public void shouldDeployManifestFromText() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "deploy-from-text"; + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .asList(); + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.namespaceOverride", account1Ns) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + } + + @DisplayName( + ".\n===\n" + + "Given mutiple manifests\n" + + " where only one satisfies the given label selector\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then only one manifest has been deployed\n===") + @Test + public void labelSelectors() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "deploy-from-text"; + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/configmaps_with_selectors.yml") + .withValue("metadata.namespace", account1Ns) + .asList(); + Map labelSelectors = + Map.of( + "selectors", + List.of( + Map.of( + "kind", "EQUALS", + "key", "sample-configmap-selector", + "values", List.of("one")))); + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.labelSelectors", labelSelectors) + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "configMap sample-config-map-with-selector-one-v000"); + + // ------------------------- then -------------------------- + String configMaps = + kubeCluster.execKubectl("-n " + account1Ns + " get configmap -lselector-test=test -o name"); + assertThat(configMaps).hasLineCount(1); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest with default namespace set\n" + + " And a namespace override that is not listed in account's namespaces\n" + + "When sending deploy manifest request\n" + + "Then deployment fails with DescriptionValidationException\n===") + @Test + public void shouldNotDeployToNamespaceNotListed() { + // ------------------------- given -------------------------- + String overrideNamespace = "nonexistent"; + String appName = "namespace-forbidden"; + System.out.println("> Using namespace: " + overrideNamespace + ", appName: " + appName); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("metadata.namespace", "default") + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.namespaceOverride", overrideNamespace) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + Response resp = + given() + .log() + .uri() + .contentType("application/json") + .body(body) + .post(baseUrl() + "/kubernetes/ops"); + + // ------------------------- then -------------------------- + resp.then().statusCode(400); + assertTrue(resp.body().asString().contains("wrongNamespace")); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest with no namespace set\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then a pod is up and running in the default namespace\n===") + @Test + public void shouldDeployManifestToDefaultNs() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "default-ns"; + System.out.println("> Using namespace: default, appName: " + appName); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, "default", "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n default get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n default" + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + } + + @DisplayName( + ".\n===\n" + + "Given a document with multiple manifest definitions\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then a service and pod exist in the target cluster\n===") + @Test + public void shouldDeployMultidocManifest() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "deploy-multidoc"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/multi_deployment_service.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + appName, "service " + appName); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + appName + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", readyPods, "Expected one ready pod for " + appName + " deployment. Pods:\n" + pods); + String services = kubeCluster.execKubectl("-n " + account1Ns + " get services"); + assertTrue( + Strings.isNotEmpty( + kubeCluster.execKubectl("-n " + account1Ns + " get services " + appName)), + "Expected service " + appName + " to exist. Services: " + services); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment deployed with spinnaker\n" + + " And it gets updated with a new tag version\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then old version is deleted and new version is available\n===") + @Test + public void shouldUpdateExistingDeployment() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "update-deploy"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String oldImage = "index.docker.io/library/alpine:3.11"; + String newImage = "index.docker.io/library/alpine:3.12"; + + List> oldManifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.template.spec.containers[0].image", oldImage) + .asList(); + + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", oldManifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + String currentImage = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.containers[0].image}'"); + assertEquals( + oldImage, currentImage, "Expected correct " + DEPLOYMENT_1_NAME + " image to be deployed"); + + List> newManifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.template.spec.containers[0].image", newImage) + .asList(); + + // ------------------------- when -------------------------- + body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", newManifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + currentImage = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.containers[0].image}'"); + assertEquals( + newImage, currentImage, "Expected correct " + DEPLOYMENT_1_NAME + " image to be deployed"); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest without image tag\n" + + " And optional docker artifact present\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then the docker artifact is deployed\n===") + @Test + public void shouldBindOptionalDockerImage() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "bind-optional"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String imageNoTag = "index.docker.io/library/alpine"; + String imageWithTag = "index.docker.io/library/alpine:3.12"; + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.template.spec.containers[0].image", imageNoTag) + .asList(); + Map artifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", imageNoTag) + .withValue("type", "docker/image") + .withValue("reference", imageWithTag) + .withValue("version", imageWithTag.substring(imageNoTag.length() + 1)) + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue("deployManifest.optionalArtifacts[0]", artifact) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + String imageDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.containers[0].image}'"); + assertEquals( + imageWithTag, + imageDeployed, + "Expected correct " + DEPLOYMENT_1_NAME + " image to be deployed"); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest without image tag\n" + + " And required docker artifact present\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then the docker artifact is deployed\n===") + @Test + public void shouldBindRequiredDockerImage() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "bind-required"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String imageNoTag = "index.docker.io/library/alpine"; + String imageWithTag = "index.docker.io/library/alpine:3.12"; + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.template.spec.containers[0].image", imageNoTag) + .asList(); + Map artifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", imageNoTag) + .withValue("type", "docker/image") + .withValue("reference", imageWithTag) + .withValue("version", imageWithTag.substring(imageNoTag.length() + 1)) + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue("deployManifest.requiredArtifacts[0]", artifact) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + String imageDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.containers[0].image}'"); + assertEquals( + imageWithTag, + imageDeployed, + "Expected correct " + DEPLOYMENT_1_NAME + " image to be deployed"); + } + + @DisplayName( + ".\n===\n" + + "Given a replicaSet manifest without image tag\n" + + " And required docker artifact present\n" + + "When sending deploy manifest request two times\n" + + "Then there are two replicaSet versions deployed\n===") + @Test + public void shouldStepReplicaSetVersion() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "step-rs"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String imageNoTag = "index.docker.io/library/alpine"; + String imageWithTag = "index.docker.io/library/alpine:3.12"; + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", REPLICASET_1_NAME) + .withValue("spec.template.spec.containers[0].image", imageNoTag) + .withValue( + "spec.template.spec.containers[0].command", + ImmutableList.of("tail", "-f", "/dev/null")) + .asList(); + Map artifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", imageNoTag) + .withValue("type", "docker/image") + .withValue("reference", imageWithTag) + .withValue("version", imageWithTag.substring(imageNoTag.length() + 1)) + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue("deployManifest.requiredArtifacts[0]", artifact) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "replicaSet " + REPLICASET_1_NAME + "-v000"); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "replicaSet " + REPLICASET_1_NAME + "-v001"); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get rs " + + REPLICASET_1_NAME + + "-v001 -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + REPLICASET_1_NAME + "-v001 replicaSet. Pods:\n" + pods); + String imageDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get rs " + + REPLICASET_1_NAME + + "-v001 -o=jsonpath='{.spec.template.spec.containers[0].image}'"); + assertEquals( + imageWithTag, + imageDeployed, + "Expected correct " + REPLICASET_1_NAME + "-v001 image to be deployed"); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest without image tag\n" + + " And required docker artifact present\n" + + " And optional docker artifact present\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then required docker artifact is deployed\n===") + @Test + public void shouldBindRequiredOverOptionalDockerImage() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "bind-required-over-optional"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String imageNoTag = "index.docker.io/library/alpine"; + String requiredImage = "index.docker.io/library/alpine:3.11"; + String optionalImage = "index.docker.io/library/alpine:3.12"; + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.template.spec.containers[0].image", imageNoTag) + .asList(); + Map requiredArtifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", imageNoTag) + .withValue("type", "docker/image") + .withValue("reference", requiredImage) + .withValue("version", requiredImage.substring(imageNoTag.length() + 1)) + .asMap(); + Map optionalArtifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", imageNoTag) + .withValue("type", "docker/image") + .withValue("reference", optionalImage) + .withValue("version", optionalImage.substring(imageNoTag.length() + 1)) + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue("deployManifest.requiredArtifacts[0]", requiredArtifact) + .withValue("deployManifest.optionalArtifacts[0]", optionalArtifact) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + String imageDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.containers[0].image}'"); + assertEquals( + requiredImage, + imageDeployed, + "Expected correct " + DEPLOYMENT_1_NAME + " image to be deployed"); + } + + @DisplayName( + ".\n===\n" + + "Given a manifest referencing an unversioned configmap\n" + + " And versioned configmap deployed\n" + + " And versioned configmap artifact\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then the manifest is deployed mounting versioned configmap\n===") + @Test + public void shouldBindVersionedConfigMap() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "bind-config-map"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String cmName = generateManifestName("myconfig"); + String version = "v005"; + + // deploy versioned configmap + Map cm = + KubeTestUtils.loadYaml("classpath:manifests/configmap.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", cmName + "-" + version) + .asMap(); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", cm); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment_with_vol.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.template.spec.volumes[0].configMap.name", cmName) + .asList(); + Map artifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", cmName) + .withValue("type", "kubernetes/configMap") + .withValue("reference", cmName + "-" + version) + .withValue("location", account1Ns) + .withValue("version", version) + .withValue("metadata.account", ACCOUNT1_NAME) + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue("deployManifest.optionalArtifacts[0]", artifact) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + String cmNameDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.volumes[0].configMap.name}'"); + assertEquals( + cmName + "-" + version, cmNameDeployed, "Expected correct configmap to be referenced"); + } + + @DisplayName( + ".\n===\n" + + "Given a manifest referencing an unversioned secret\n" + + " And versioned secret deployed\n" + + " And versioned secret artifact\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then the manifest is deployed mounting versioned secret\n===") + @Test + public void shouldBindVersionedSecret() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "bind-secret"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String secretName = generateManifestName("mysecret"); + String version = "v009"; + + // deploy versioned secret + Map secret = + KubeTestUtils.loadYaml("classpath:manifests/secret.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", secretName + "-" + version) + .asMap(); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", secret); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment_with_vol.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.template.spec.volumes[0].secret.secretName", secretName) + .asList(); + Map artifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", secretName) + .withValue("type", "kubernetes/secret") + .withValue("reference", secretName + "-" + version) + .withValue("location", account1Ns) + .withValue("version", version) + .withValue("metadata.account", ACCOUNT1_NAME) + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue("deployManifest.optionalArtifacts[0]", artifact) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + String secretNameDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.volumes[0].secret.secretName}'"); + assertEquals( + secretName + "-" + version, secretNameDeployed, "Expected correct secret to be referenced"); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest with docker image tag\n" + + " And a required and optional docker artifacts\n" + + " And artifact binding disabled\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then the manifest is deployed with the original image tag in the manifest\n===") + @Test + public void shouldNotBindArtifacts() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "bind-disabled"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String imageInManifest = "index.docker.io/library/alpine:3.11"; + String requiredImage = "index.docker.io/library/alpine:3.12"; + String optionalImage = "index.docker.io/library/alpine:3.13"; + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.template.spec.containers[0].image", imageInManifest) + .asList(); + Map requiredArtifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", imageInManifest.substring(0, imageInManifest.indexOf(':'))) + .withValue("type", "docker/image") + .withValue("reference", requiredImage) + .withValue("version", requiredImage.substring(requiredImage.indexOf(':') + 1)) + .asMap(); + Map optionalArtifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", imageInManifest.substring(0, imageInManifest.indexOf(':'))) + .withValue("type", "docker/image") + .withValue("reference", optionalImage) + .withValue("version", optionalImage.substring(optionalImage.indexOf(':') + 1)) + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue("deployManifest.requiredArtifacts[0]", requiredArtifact) + .withValue("deployManifest.optionalArtifacts[0]", optionalArtifact) + .withValue("deployManifest.enableArtifactBinding", false) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + String imageDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.containers[0].image}'"); + assertEquals( + imageInManifest, + imageDeployed, + "Expected correct " + DEPLOYMENT_1_NAME + " image to be deployed"); + } + + @DisplayName( + ".\n===\n" + + "Given a configmap manifest\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then configmap is deployed with a version suffix name\n===") + @Test + public void shouldAddVersionToConfigmap() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "add-config-map-version"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String cmName = generateManifestName("myconfig"); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/configmap.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", cmName) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable(baseUrl(), body, account1Ns, "configMap " + cmName + "-v000"); + + // ------------------------- then -------------------------- + String cm = kubeCluster.execKubectl("-n " + account1Ns + " get cm " + cmName + "-v000"); + assertTrue(cm.contains("v000"), "Expected configmap with name " + cmName + "-v000"); + } + + @DisplayName( + ".\n===\n" + + "Given a secret manifest\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then secret is deployed with a version suffix name\n===") + @Test + public void shouldAddVersionToSecret() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "add-secret-version"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String secretName = generateManifestName("mysecret"); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/secret.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", secretName) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "secret " + secretName + "-v000"); + + // ------------------------- then -------------------------- + String cm = kubeCluster.execKubectl("-n " + account1Ns + " get secret " + secretName + "-v000"); + assertTrue(cm.contains("v000"), "Expected secret with name " + secretName + "-v000"); + } + + @DisplayName( + ".\n===\n" + + "Given a configmap deployed with spinnaker\n" + + " And configmap manifest changed\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then a new version of configmap is deployed\n" + + " And the previous version of configmap is not deleted or changed\n===") + @Test + public void shouldDeployNewConfigmapVersion() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "new-config-map-version"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String cmName = generateManifestName("myconfig"); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/configmap.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", cmName) + .asList(); + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable(baseUrl(), body, account1Ns, "configMap " + cmName + "-v000"); + + manifest = + KubeTestUtils.loadYaml("classpath:manifests/configmap.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", cmName) + .withValue("data.newfile", "new content") + .asList(); + + // ------------------------- when -------------------------- + body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable(baseUrl(), body, account1Ns, "configMap " + cmName + "-v001"); + + // ------------------------- then -------------------------- + String cm = kubeCluster.execKubectl("-n " + account1Ns + " get cm " + cmName + "-v001"); + assertTrue(cm.contains("v001"), "Expected configmap with name " + cmName + "-v001"); + cm = kubeCluster.execKubectl("-n " + account1Ns + " get cm " + cmName + "-v000"); + assertTrue(cm.contains("v000"), "Expected configmap with name " + cmName + "-v000"); + } + + @DisplayName( + ".\n===\n" + + "Given a secret deployed with spinnaker\n" + + " And secret manifest changed\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then a new version of secret is deployed\n" + + " And the previous version of secret is not deleted or changed\n===") + @Test + public void shouldDeployNewSecretVersion() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "new-secret-version"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String secretName = generateManifestName("mysecret"); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/secret.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", secretName) + .asList(); + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "secret " + secretName + "-v000"); + + manifest = + KubeTestUtils.loadYaml("classpath:manifests/secret.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", secretName) + .withValue("data.newfile", "SGVsbG8gd29ybGQK") + .asList(); + + // ------------------------- when -------------------------- + body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "secret " + secretName + "-v001"); + + // ------------------------- then -------------------------- + String secret = + kubeCluster.execKubectl("-n " + account1Ns + " get secret " + secretName + "-v001"); + assertTrue(secret.contains("v001"), "Expected secret with name " + secretName + "-v001"); + secret = kubeCluster.execKubectl("-n " + account1Ns + " get secret " + secretName + "-v000"); + assertTrue(secret.contains("v000"), "Expected secret with name " + secretName + "-v000"); + } + + @DisplayName( + ".\n===\n" + + "Given a configmap manifest with special annotation to avoid being versioned\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then configmap is deployed without version\n===") + @Test + public void shouldNotAddVersionToConfigmap() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "unversioned-config-map"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String cmName = generateManifestName("myconfig"); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/configmap.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", cmName) + .withValue( + "metadata.annotations", ImmutableMap.of("strategy.spinnaker.io/versioned", "false")) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable(baseUrl(), body, account1Ns, "configMap " + cmName); + + // ------------------------- then -------------------------- + String cm = kubeCluster.execKubectl("-n " + account1Ns + " get cm " + cmName); + assertFalse(cm.contains("v000"), "Expected configmap with name " + cmName); + } + + @DisplayName( + ".\n===\n" + + "Given a secret manifest with special annotation to avoid being versioned\n" + + "When sending deploy manifest request\n" + + " And waiting on manifest stable\n" + + "Then secret is deployed without version\n===") + @Test + public void shouldNotAddVersionToSecret() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "unversioned-secret"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String secretName = generateManifestName("mysecret"); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/secret.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", secretName) + .withValue( + "metadata.annotations", ImmutableMap.of("strategy.spinnaker.io/versioned", "false")) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable(baseUrl(), body, account1Ns, "secret " + secretName); + + // ------------------------- then -------------------------- + String cm = kubeCluster.execKubectl("-n " + account1Ns + " get secret " + secretName); + assertFalse(cm.contains("v000"), "Expected secret with name " + secretName); + } + + @DisplayName( + ".\n===\n" + + "Given a multidoc yaml with a service and replicaset\n" + + " And red/black deployment traffic strategy\n" + + "When sending deploy manifest request two times\n" + + " And sending disable manifest one time\n" + + "Then there are two replicasets with only the last one receiving traffic\n===") + @Test + public void shouldDeployRedBlackMultidoc() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "red-black-multidoc"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String selectorValue = appName + "traffichere"; + + Map replicaset = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .withValue("spec.selector.matchLabels", ImmutableMap.of("label1", "value1")) + .withValue("spec.template.metadata.labels", ImmutableMap.of("label1", "value1")) + .asMap(); + Map service = + KubeTestUtils.loadYaml("classpath:manifests/service.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", SERVICE_1_NAME) + .withValue("spec.selector", ImmutableMap.of("pointer", selectorValue)) + .withValue("spec.type", "NodePort") + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", List.of(replicaset, service)) + .withValue( + "deployManifest.services", Collections.singleton("service " + SERVICE_1_NAME)) + .withValue("deployManifest.strategy", "RED_BLACK") + .withValue("deployManifest.trafficManagement.enabled", true) + .withValue("deployManifest.trafficManagement.options.strategy", "redblack") + .withValue("deployManifest.trafficManagement.options.enableTraffic", true) + .withValue("deployManifest.trafficManagement.options.namespace", account1Ns) + .withValue( + "deployManifest.trafficManagement.options.services", + Collections.singleton("service " + appName)) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), + body, + account1Ns, + "service " + SERVICE_1_NAME, + "replicaSet " + appName + "-v000"); + KubeTestUtils.deployAndWaitStable( + baseUrl(), + body, + account1Ns, + "service " + SERVICE_1_NAME, + "replicaSet " + appName + "-v001"); + body = + KubeTestUtils.loadJson("classpath:requests/disable_manifest.json") + .withValue("disableManifest.app", appName) + .withValue("disableManifest.manifestName", "replicaSet " + appName + "-v000") + .withValue("disableManifest.location", account1Ns) + .withValue("disableManifest.account", ACCOUNT1_NAME) + .asList(); + KubeTestUtils.disableManifest(baseUrl(), body, account1Ns, "replicaSet " + appName + "-v000"); + + // ------------------------- then -------------------------- + List podNames = + Splitter.on(" ") + .splitToList( + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get pod -o=jsonpath='{.items[*].metadata.name}' -l=pointer=" + + selectorValue)); + assertEquals( + 1, podNames.size(), "Only one pod expected to have the label for traffic selection"); + } + + @DisplayName( + ".\n===\n" + + "Given a multidoc yaml with a service and replicaset\n" + + " And blue/green deployment traffic strategy\n" + + "When sending deploy manifest request two times\n" + + " And sending disable manifest one time\n" + + "Then there are two replicasets with only the last one receiving traffic\n===") + @Test + public void shouldDeployBlueGreenMultidoc() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "blue-green-multidoc"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String selectorValue = appName + "traffichere"; + + Map replicaset = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .withValue("spec.selector.matchLabels", ImmutableMap.of("label1", "value1")) + .withValue("spec.template.metadata.labels", ImmutableMap.of("label1", "value1")) + .asMap(); + Map service = + KubeTestUtils.loadYaml("classpath:manifests/service.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", SERVICE_2_NAME) + .withValue("spec.selector", ImmutableMap.of("pointer", selectorValue)) + .withValue("spec.type", "NodePort") + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", List.of(replicaset, service)) + .withValue( + "deployManifest.services", Collections.singleton("service " + SERVICE_2_NAME)) + .withValue("deployManifest.strategy", "BLUE_GREEN") + .withValue("deployManifest.trafficManagement.enabled", true) + .withValue("deployManifest.trafficManagement.options.strategy", "bluegreen") + .withValue("deployManifest.trafficManagement.options.enableTraffic", true) + .withValue("deployManifest.trafficManagement.options.namespace", account1Ns) + .withValue( + "deployManifest.trafficManagement.options.services", + Collections.singleton("service " + appName)) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), + body, + account1Ns, + "service " + SERVICE_2_NAME, + "replicaSet " + appName + "-v000"); + KubeTestUtils.deployAndWaitStable( + baseUrl(), + body, + account1Ns, + "service " + SERVICE_2_NAME, + "replicaSet " + appName + "-v001"); + body = + KubeTestUtils.loadJson("classpath:requests/disable_manifest.json") + .withValue("disableManifest.app", appName) + .withValue("disableManifest.manifestName", "replicaSet " + appName + "-v000") + .withValue("disableManifest.location", account1Ns) + .withValue("disableManifest.account", ACCOUNT1_NAME) + .asList(); + KubeTestUtils.disableManifest(baseUrl(), body, account1Ns, "replicaSet " + appName + "-v000"); + + // ------------------------- then -------------------------- + List podNames = + Splitter.on(" ") + .splitToList( + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get pod -o=jsonpath='{.items[*].metadata.name}' -l=pointer=" + + selectorValue)); + assertEquals( + 1, podNames.size(), "Only one pod expected to have the label for traffic selection"); + } + + @DisplayName( + ".\n===\n" + + "Given a replicaset yaml with red/black deployment traffic strategy\n" + + " And an existing service\n" + + "When sending deploy manifest request two times\n" + + " And sending disable manifest one time\n" + + "Then there are two replicasets with only the last one receiving traffic\n===") + @Test + public void shouldDeployRedBlackReplicaSet() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "red-black"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String selectorValue = appName + "traffichere"; + + Map service = + KubeTestUtils.loadYaml("classpath:manifests/service.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", SERVICE_1_NAME) + .withValue("spec.selector", ImmutableMap.of("pointer", selectorValue)) + .withValue("spec.type", "NodePort") + .asMap(); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", service); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .withValue("spec.selector.matchLabels", ImmutableMap.of("label1", "value1")) + .withValue("spec.template.metadata.labels", ImmutableMap.of("label1", "value1")) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue( + "deployManifest.services", Collections.singleton("service " + SERVICE_1_NAME)) + .withValue("deployManifest.strategy", "RED_BLACK") + .withValue("deployManifest.trafficManagement.enabled", true) + .withValue("deployManifest.trafficManagement.options.strategy", "redblack") + .withValue("deployManifest.trafficManagement.options.enableTraffic", true) + .withValue("deployManifest.trafficManagement.options.namespace", account1Ns) + .withValue( + "deployManifest.trafficManagement.options.services", + Collections.singleton("service " + appName)) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "replicaSet " + appName + "-v000"); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "replicaSet " + appName + "-v001"); + body = + KubeTestUtils.loadJson("classpath:requests/disable_manifest.json") + .withValue("disableManifest.app", appName) + .withValue("disableManifest.manifestName", "replicaSet " + appName + "-v000") + .withValue("disableManifest.location", account1Ns) + .withValue("disableManifest.account", ACCOUNT1_NAME) + .asList(); + KubeTestUtils.disableManifest(baseUrl(), body, account1Ns, "replicaSet " + appName + "-v000"); + + // ------------------------- then -------------------------- + List podNames = + Splitter.on(" ") + .splitToList( + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get pod -o=jsonpath='{.items[*].metadata.name}' -l=pointer=" + + selectorValue)); + assertEquals( + 1, podNames.size(), "Only one pod expected to have the label for traffic selection"); + } + + @DisplayName( + ".\n===\n" + + "Given a replicaset yaml with blue/green deployment traffic strategy\n" + + " And an existing service\n" + + "When sending deploy manifest request two times\n" + + " And sending disable manifest one time\n" + + "Then there are two replicasets with only the last one receiving traffic\n===") + @Test + public void shouldDeployBlueGreenReplicaSet() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "blue-green"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String selectorValue = appName + "traffichere"; + + Map service = + KubeTestUtils.loadYaml("classpath:manifests/service.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", SERVICE_2_NAME) + .withValue("spec.selector", ImmutableMap.of("pointer", selectorValue)) + .withValue("spec.type", "NodePort") + .asMap(); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", service); + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .withValue("spec.selector.matchLabels", ImmutableMap.of("label1", "value1")) + .withValue("spec.template.metadata.labels", ImmutableMap.of("label1", "value1")) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue( + "deployManifest.services", Collections.singleton("service " + SERVICE_2_NAME)) + .withValue("deployManifest.strategy", "BLUE_GREEN") + .withValue("deployManifest.trafficManagement.enabled", true) + .withValue("deployManifest.trafficManagement.options.strategy", "bluegreen") + .withValue("deployManifest.trafficManagement.options.enableTraffic", true) + .withValue("deployManifest.trafficManagement.options.namespace", account1Ns) + .withValue( + "deployManifest.trafficManagement.options.services", + Collections.singleton("service " + appName)) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "replicaSet " + appName + "-v000"); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "replicaSet " + appName + "-v001"); + body = + KubeTestUtils.loadJson("classpath:requests/disable_manifest.json") + .withValue("disableManifest.app", appName) + .withValue("disableManifest.manifestName", "replicaSet " + appName + "-v000") + .withValue("disableManifest.location", account1Ns) + .withValue("disableManifest.account", ACCOUNT1_NAME) + .asList(); + KubeTestUtils.disableManifest(baseUrl(), body, account1Ns, "replicaSet " + appName + "-v000"); + + // ------------------------- then -------------------------- + List podNames = + Splitter.on(" ") + .splitToList( + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get pod -o=jsonpath='{.items[*].metadata.name}' -l=pointer=" + + selectorValue)); + assertEquals( + 1, podNames.size(), "Only one pod expected to have the label for traffic selection"); + } + + @DisplayName( + ".\n===\n" + + "Given a cron job manifest without image tag\n" + + " And required docker artifact present\n" + + "When sending cron job manifest request\n" + + " And waiting on manifest stable\n" + + "Then the docker artifact is scheduled\n===") + @Test + public void shouldBindRequiredCronJobDockerImage() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "bind-required"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + String imageNoTag = "index.docker.io/library/alpine"; + String imageWithTag = "index.docker.io/library/alpine:3.12"; + String apiVersion = "batch/v1beta1"; + + if (KubeTestUtils.compareVersion(KUBERNETES_VERSION, "v1.20") > 0) { + apiVersion = "batch/v1"; + } + + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/cronJob.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("spec.jobTemplate.spec.template.spec.containers[0].image", imageNoTag) + .withValue("apiVersion", apiVersion) + .asList(); + Map artifact = + KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", imageNoTag) + .withValue("type", "docker/image") + .withValue("reference", imageWithTag) + .withValue("version", imageWithTag.substring(imageNoTag.length() + 1)) + .asMap(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .withValue("deployManifest.requiredArtifacts[0]", artifact) + .asList(); + KubeTestUtils.deployAndWaitStable(baseUrl(), body, account1Ns, "cronJob " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String imageDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get cronjobs " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.jobTemplate.spec.template.spec.containers[0].image}'"); + assertEquals( + imageWithTag, + imageDeployed, + "Expected correct " + DEPLOYMENT_1_NAME + " image to be scheduled"); + } + + @DisplayName( + ".\n===\n" + + "Given k8s version < 1.22.0 and a v1beta1 CRD manifest\n" + + "When sending deploy manifest request\n" + + "Then a v1beta1 CRD is created\n===") + @Test + public void shouldDeployCrdV1beta1IfSupported() throws IOException, InterruptedException { + if (KubeTestUtils.compareVersion(KUBERNETES_VERSION, "v1.21") > 0) { + return; + } + // ------------------------- given -------------------------- + final String crdName = "crontabs.stable.example.com"; + final List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1beta1.yml") + .withValue("metadata.name", crdName) + .asList(); + // ------------------------- when -------------------------- + final List> request = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), request, "", String.format("customResourceDefinition %s", crdName)); + // ------------------------- then -------------------------- + String exits = kubeCluster.execKubectl(String.format("get crd %s", crdName)); + assertTrue(exits.contains(crdName)); + } + + @DisplayName( + ".\n===\n" + + "Given a v1 CRD manifest\n" + + "When sending deploy manifest request\n" + + "Then a v1 CRD is created\n===") + @Test + public void shouldDeployCrdV1() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + final String crdName = "crontabs.stable.example.com"; + final List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1.yml") + .withValue("metadata.name", crdName) + .asList(); + // ------------------------- when -------------------------- + final List> request = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), request, "", String.format("customResourceDefinition %s", crdName)); + // ------------------------- then -------------------------- + String exits = kubeCluster.execKubectl(String.format("get crd %s", crdName)); + assertTrue(exits.contains(crdName)); + } + + @DisplayName( + ".\n===\n" + + "Given a CRD namespaced scope version v1\n" + + " And it's associated CR manifests" + + "When sending deploy manifests request\n" + + "Then a CRD and it's CR is created at namespaced level\n===") + @Test + public void shouldDeployCrCrdNamespacedV1() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + final String crdGroup = "stable.example.com"; + final String crdName = String.format("crontabs.%s", crdGroup); + final String crName = "my-new-cron-object"; + final List> crdManifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1.yml") + .withValue("metadata.name", crdName) + .withValue("spec.scope", "Namespaced") + .asList(); + final List> crManifest = + KubeTestUtils.loadYaml("classpath:manifests/cr_v1.yml") + .withValue("metadata.name", crName) + .asList(); + // ------------------------- when -------------------------- + final List> crdRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", crdManifest) + .asList(); + final List> crRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", crManifest) + .withValue("deployManifest.namespaceOverride", account1Ns) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), crdRequest, "", String.format("customResourceDefinition %s", crdName)); + KubeTestUtils.deployAndWaitStable( + baseUrl(), crRequest, account1Ns, String.format("CronTab.%s %s", crdGroup, crName)); + // ------------------------- then -------------------------- + String crdExists = kubeCluster.execKubectl(String.format("get crd %s", crdName)); + String crNotExists = + kubeCluster.execKubectl(String.format("get %s %s --ignore-not-found", crdName, crName)); + String crExists = + kubeCluster.execKubectl(String.format("-n %s get %s %s", account1Ns, crdName, crName)); + assertTrue( + crdExists.contains(crdName) && crExists.contains(crName) && !crNotExists.contains(crdName)); + } + + @DisplayName( + ".\n===\n" + + "Given a CRD cluster scope version v1\n" + + " And it's associated CR manifests" + + "When sending deploy manifests request\n" + + "Then a CRD and it's CR is created at cluster level\n===") + @Test + public void shouldDeployCrCrdClusterV1() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + final String crdGroup = "stable.cluster.com"; + final String crdName = String.format("crontabs.%s", crdGroup); + final String crName = "my-new-cron-object"; + kubeCluster.execKubectl(String.format("delete crd %s --ignore-not-found", crdName)); + final List> crdManifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1.yml") + .withValue("metadata.name", crdName) + .withValue("spec.scope", "Cluster") + .withValue("spec.group", crdGroup) + .asList(); + final List> crManifest = + KubeTestUtils.loadYaml("classpath:manifests/cr_v1.yml") + .withValue("apiVersion", String.format("%s/v1", crdGroup)) + .withValue("metadata.name", crName) + .asList(); + // ------------------------- when -------------------------- + final List> crdRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", crdManifest) + .asList(); + final List> crRequest = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", APP1_NAME) + .withValue("deployManifest.manifests", crManifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), crdRequest, "", String.format("customResourceDefinition %s", crdName)); + KubeTestUtils.deployAndWaitStable( + baseUrl(), crRequest, "", String.format("CronTab.%s %s", crdGroup, crName)); + // ------------------------- then -------------------------- + String crdExists = kubeCluster.execKubectl(String.format("get crd %s", crdName)); + String crExists = kubeCluster.execKubectl(String.format("get %s %s", crdName, crName)); + String crExistsNamespaced = + kubeCluster.execKubectl(String.format("-n %s get %s %s", account1Ns, crdName, crName)); + assertTrue( + crdExists.contains(crdName) + && crExists.contains(crName) + && crExistsNamespaced.contains(crName)); + } + + @DisplayName( + ".\n===\n" + + "Given a CRD already hardcoded in the account configuration\n" + + "When sending credentials request\n" + + "Then the credentials response contains the deployed CRD\n===") + @Test + public void shouldGetDeployedCrdsCredentials() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + // ------------------------- when -------------------------- + Response response = + given() + .log() + .body(false) + .queryParam("expand", true) + .get(String.format("%s/credentials", baseUrl())); + // ------------------------- then -------------------------- + System.out.println(response.prettyPrint()); + response + .then() + .statusCode(200) + .body( + "spinnakerKindMap.'crontab.stable.example.com'.findAll{ e -> e != null }", + hasSize(greaterThan(0))); + } + + private static String generateManifestName(String myconfig) { + return myconfig + Long.toHexString(UUID.randomUUID().getLeastSignificantBits()); + } + + @DisplayName( + ".\n===\n" + + "Given a deployed manifest with an special annotation to avoid being versioned\n" + + " And another annotation to avoid updating the replicas amount" + + "When sending an updated manifest\n" + + " With a a new env var\n" + + " And a different replica size\n" + + "The manifest is deployed with the new env var and the old replicas size\n===") + @Test + public void shouldUseSourceCapacityNonVersioned() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + int originalReplicasSize = 1; + int secondReplicasSize = 5; + ImmutableMap annotations = + ImmutableMap.of( + "strategy.spinnaker.io/versioned", + "false" // non-versioned + , + "strategy.spinnaker.io/use-source-capacity", + "true" // do not update replicas + ); + String appName = "unversionedsourcepacaity-deployment"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .withValue("spec.selector.matchLabels.app", appName) + .withValue("spec.template.metadata.labels.app", appName) + .withValue("spec.replicas", originalReplicasSize) + .withValue("metadata.annotations", annotations) + .asList(); + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + KubeTestUtils.deployAndWaitStable(baseUrl(), body, account1Ns, "deployment " + appName); + + // ------------------------- when -------------------------- + List> secondManifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .withValue("spec.selector.matchLabels.app", appName) + .withValue("spec.template.metadata.labels.app", appName) + .withValue("spec.replicas", secondReplicasSize) + .withValue( + "spec.template.spec.containers[0].env", + Collections.singletonList( + ImmutableMap.of( + "name", "test", + "value", "test"))) + .withValue("metadata.annotations", annotations) + .asList(); + List> secondBody = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", secondManifest) + .asList(); + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + KubeTestUtils.deployAndWaitStable(baseUrl(), secondBody, account1Ns, "deployment " + appName); + + // ------------------------- then -------------------------- + String currentReplicas = + kubeCluster.execKubectl( + "-n " + account1Ns + " get deployment " + appName + " -o=jsonpath='{.spec.replicas}'"); + assertEquals( + String.valueOf(originalReplicasSize), + currentReplicas, + "Expected " + + originalReplicasSize + + " replica for " + + appName + + " deployment. Pods:\n" + + currentReplicas); + + String envVarValue = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + appName + + " -o=jsonpath='{.spec.template.spec.containers[0].env[0].value}'"); + assertEquals("test", envVarValue, "Expected update env var for " + appName + " deployment.\n"); + } + + @DisplayName( + ".\n===\n" + + "Given a replicaset manifest with an special annotation for versioning\n" + + " And another annotation to avoid updating the replicas amount" + + "When sending an updated manifest\n" + + " With a a new env var\n" + + " And a different replica size\n" + + "The manifest is deployed with the new env var and the replicas value from the previous version\n===") + @Test + public void shouldUseSourceCapacityVersioned() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + int originalReplicasSize = 1; + int secondReplicasSize = 5; + ImmutableMap annotations = + ImmutableMap.of( + "strategy.spinnaker.io/versioned", + "true" // versioned + , + "strategy.spinnaker.io/use-source-capacity", + "true" // do not update replicas + ); + String appName = "unversionedsourcepacaity-replicaset"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .withValue("spec.selector.matchLabels.app", appName) + .withValue("spec.template.metadata.labels.app", appName) + .withValue("spec.replicas", originalReplicasSize) + .withValue("metadata.annotations", annotations) + .asList(); + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "replicaSet " + appName + "-v000"); + + // ------------------------- when -------------------------- + List> secondManifest = + KubeTestUtils.loadYaml("classpath:manifests/replicaset.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", appName) + .withValue("spec.selector.matchLabels.app", appName) + .withValue("spec.template.metadata.labels.app", appName) + .withValue("spec.replicas", secondReplicasSize) + .withValue( + "spec.template.spec.containers[0].env", + Collections.singletonList( + ImmutableMap.of( + "name", "test", + "value", "test"))) + .withValue("metadata.annotations", annotations) + .asList(); + List> secondBody = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", secondManifest) + .asList(); + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + KubeTestUtils.deployAndWaitStable( + baseUrl(), secondBody, account1Ns, "replicaSet " + appName + "-v001"); + + // ------------------------- then -------------------------- + String currentReplicas = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get replicaSet " + + appName + + "-v001" + + " -o=jsonpath='{.spec.replicas}'"); + assertEquals( + String.valueOf(originalReplicasSize), + currentReplicas, + "Expected " + + originalReplicasSize + + " replica for " + + appName + + " replicaset. Pods:\n" + + currentReplicas); + + String envVarValue = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get replicaSet " + + appName + + "-v001" + + " -o=jsonpath='{.spec.template.spec.containers[0].env[0].value}'"); + assertEquals("test", envVarValue, "Expected update env var for " + appName + " replicaset.\n"); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest with server-side-apply strategy set\n" + + "When sending deploy manifest request\n" + + "Then a deployment is created using server-side apply\n===") + @Test + public void shouldDeployUsingServerSideApply() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "server-side-apply"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue( + "metadata.annotations", + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "force-conflicts")) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + /* Expecting: + metadata: + managedFields: + - manager: kubectl + operation: Apply + fieldsType: FieldsV1 + */ + String managedFields = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.metadata.managedFields}'"); + assertTrue( + Strings.isNotEmpty(managedFields), + "Expected managedFields for " + + DEPLOYMENT_1_NAME + + " deployment to exist and be managed server-side. managedFields:\n" + + managedFields); + + String applyManager = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.metadata.managedFields[?(@.operation==\"Apply\")].manager}'"); + // kubectl v1.26+ adds a "kubectl-last-applied" manager as well. Remove it. The jsonpath + // implementation in kubectl is really limited, so we have to do this in java. + applyManager = applyManager.replaceAll("\\s?kubectl-last-applied\\s?", ""); + assertEquals( + "kubectl", + applyManager, + "Expected apply manager for " + + DEPLOYMENT_1_NAME + + " deployment to be managed server-side. managedFields:\n" + + managedFields); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest with server-side-apply disabled set\n" + + "When sending deploy manifest request\n" + + "Then a deployment is created using client-side apply\n===") + @Test + public void shouldDeployUsingApplyWithServerSideApplyDisabled() + throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "server-side-apply-disabled"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue( + "metadata.annotations", + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "false")) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String lastAppliedConfiguration = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.metadata.annotations.kubectl\\.kubernetes\\.io/last-applied-configuration}'"); + assertTrue( + Strings.isNotEmpty(lastAppliedConfiguration), + "Expected last-applied-configuration for " + + DEPLOYMENT_1_NAME + + " deployment to exist and be managed client-side. fields:\n" + + lastAppliedConfiguration); + } + + @DisplayName( + ".\n===\n" + + "Given a deployment manifest without a strategy set\n" + + "When sending deploy manifest request\n" + + "Then a deployment is created using client-side apply\n===") + @Test + public void shouldDeployUsingClientApply() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String appName = "client-side-apply"; + System.out.println("> Using namespace: " + account1Ns + ", appName: " + appName); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.namespace", account1Ns) + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .asList(); + + // ------------------------- when -------------------------- + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + // ------------------------- then -------------------------- + String lastAppliedConfiguration = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.metadata.annotations.kubectl\\.kubernetes\\.io/last-applied-configuration}'"); + assertTrue( + Strings.isNotEmpty(lastAppliedConfiguration), + "Expected last-applied-configuration for " + + DEPLOYMENT_1_NAME + + " deployment to exist and be managed client-side. fields:\n" + + lastAppliedConfiguration); + } +} diff --git a/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/InfrastructureIT.java b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/InfrastructureIT.java new file mode 100644 index 00000000000..21b16b5010d --- /dev/null +++ b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/InfrastructureIT.java @@ -0,0 +1,1353 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.it; + +import static io.restassured.RestAssured.get; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.common.base.Splitter; +import com.netflix.spinnaker.clouddriver.kubernetes.it.utils.KubeTestUtils; +import io.restassured.response.Response; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +public class InfrastructureIT extends BaseTest { + + private static final int CACHE_TIMEOUT_MIN = 5; + private static final String DEPLOYMENT_1_NAME = "deployment1"; + private static final String NETWORK_POLICY_1_NAME = "default-deny-ingress"; + private static final String NETWORK_POLICY_2_NAME = "default-deny-ingress-second"; + private static final String SERVICE_1_NAME = "service1"; + private static final String APP_SECURITY_GROUPS = "security-groups"; + private static final String APP_SERVER_GROUP_MGRS = "server-group-managers"; + private static final String APP_LOAD_BALANCERS = "load-balancers"; + private static String account1Ns; + private static String account2Ns; + + @BeforeAll + public static void setUpAll() throws IOException, InterruptedException { + account1Ns = kubeCluster.createNamespace(ACCOUNT1_NAME); + account2Ns = kubeCluster.createNamespace(ACCOUNT2_NAME); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes deployment made by spinnaker to two accounts\n" + + "When sending get clusters request\n" + + "Then both deployments should be returned\n===") + @Test + public void shouldGetClusters() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT2_NAME, + account2Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = baseUrl() + "/applications/" + APP_SERVER_GROUP_MGRS + "/clusters"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + resp.then().statusCode(200); + List clustersAcc1 = resp.jsonPath().getList(ACCOUNT1_NAME); + List clustersAcc2 = resp.jsonPath().getList(ACCOUNT1_NAME); + return clustersAcc1 != null + && clustersAcc1.contains("deployment " + DEPLOYMENT_1_NAME) + && clustersAcc2 != null + && clustersAcc2.contains("deployment " + DEPLOYMENT_1_NAME); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'deployment " + + DEPLOYMENT_1_NAME + + "' cluster to return from GET /applications/" + + APP_SERVER_GROUP_MGRS + + "/clusters"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes deployment made by spinnaker to two accounts\n" + + "When sending get clusters request for one account\n" + + "Then only the desired account deployment should be returned\n===") + @Test + public void shouldGetClustersByAccount() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT2_NAME, + account2Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + "/applications/" + APP_SERVER_GROUP_MGRS + "/clusters/" + ACCOUNT1_NAME; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + Map account1Map = + resp.jsonPath() + .getMap( + "find { it.account == '" + + ACCOUNT1_NAME + + "' && it.name == 'deployment " + + DEPLOYMENT_1_NAME + + "'}"); + Map account2Map = + resp.jsonPath().getMap("find { it.account == '" + ACCOUNT2_NAME + "'}"); + return (account1Map != null && !account1Map.isEmpty()) + && (account2Map == null || account2Map.isEmpty()); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'deployment " + + DEPLOYMENT_1_NAME + + "' cluster to return from GET /applications/" + + APP_SERVER_GROUP_MGRS + + "/clusters/" + + ACCOUNT1_NAME); + } + + @DisplayName( + ".\n===\n" + + "Given two kubernetes deployments made by spinnaker\n" + + "When sending get clusters request for the deployment name and account\n" + + "Then only the desired deployment should be returned\n===") + @Test + public void shouldGetClustersByAccountAndName() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + "other", + APP_SERVER_GROUP_MGRS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/applications/" + + APP_SERVER_GROUP_MGRS + + "/clusters/" + + ACCOUNT1_NAME + + "/deployment " + + DEPLOYMENT_1_NAME + + "?expand=true"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + Map map = + resp.jsonPath() + .getMap( + "find { it.accountName == '" + + ACCOUNT1_NAME + + "' && it.name == 'deployment " + + DEPLOYMENT_1_NAME + + "' && it.application == '" + + APP_SERVER_GROUP_MGRS + + "'}"); + return map != null && !map.isEmpty() && resp.jsonPath().getList("$").size() == 1; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'deployment " + + DEPLOYMENT_1_NAME + + "' cluster to return from GET /applications/" + + APP_SERVER_GROUP_MGRS + + "/clusters/" + + ACCOUNT1_NAME); + } + + @DisplayName( + ".\n===\n" + + "Given two kubernetes deployments made by spinnaker\n" + + "When sending get clusters request for the deployment name, account and type\n" + + "Then only the desired deployment should be returned\n===") + @Test + public void shouldGetClustersByAccountNameAndType() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployAndWaitStable( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + "other", + APP_SERVER_GROUP_MGRS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/applications/" + + APP_SERVER_GROUP_MGRS + + "/clusters/" + + ACCOUNT1_NAME + + "/deployment " + + DEPLOYMENT_1_NAME + + "/kubernetes?expand=true"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + return resp.jsonPath().getString("accountName").equals(ACCOUNT1_NAME) + && resp.jsonPath().getString("name").equals("deployment " + DEPLOYMENT_1_NAME) + && resp.jsonPath().getString("application").equals(APP_SERVER_GROUP_MGRS); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'deployment " + + DEPLOYMENT_1_NAME + + "' cluster to return from GET /applications/" + + APP_SERVER_GROUP_MGRS + + "/clusters/" + + ACCOUNT1_NAME); + } + + @DisplayName( + ".\n===\n" + + "Given one deployment associated with two replicasets\n" + + "When sending get server groups request\n" + + "Then two server groups should be returned\n===") + @Test + public void shouldGetServerGroups() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + "index.docker.io/library/alpine:3.11", + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + "index.docker.io/library/alpine:3.12", + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + "/applications/" + APP_SERVER_GROUP_MGRS + "/serverGroups?expand=true"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + resp.then().statusCode(200); + List list = + resp.jsonPath() + .getList( + "findAll { it.account == '" + + ACCOUNT1_NAME + + "' && it.region == '" + + account1Ns + + "' && it.cluster == 'deployment " + + DEPLOYMENT_1_NAME + + "'}"); + return list != null && list.size() > 1; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for at least two server groups to be returned from GET /applications/{app}/serverGroups"); + } + + @DisplayName( + ".\n===\n" + + "Given one deployment associated with two applications\n" + + "When sending get server groups request by the two application\n" + + "Then two server groups should be returned\n===") + @Test + public void shouldGetServerGroupsForApplications() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP2_NAME, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + "/serverGroups?applications=" + APP_SERVER_GROUP_MGRS + "," + APP2_NAME; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + resp.then().statusCode(200); + List list = + resp.jsonPath() + .getList( + "findAll { it.account == '" + + ACCOUNT1_NAME + + "' && it.region == '" + + account1Ns + + "'}"); + return list != null && list.size() > 1; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for at least two server groups to be returned from GET /serverGroups?applications"); + } + + @DisplayName( + ".\n===\n" + + "Given one deployment associated with two replicasets\n" + + "When sending get server group request for account, region and name\n" + + "Then only one server group should be returned\n===") + @Test + public void shouldGetServerGroupByMoniker() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + "index.docker.io/library/alpine:3.11", + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + "index.docker.io/library/alpine:3.12", + kubeCluster); + + List rsNames = + Splitter.on(" ") + .splitToList( + kubeCluster.execKubectl( + "get -n " + + account1Ns + + " rs -o=jsonpath='{.items[?(@.metadata.ownerReferences[*].name==\"" + + DEPLOYMENT_1_NAME + + "\")].metadata.name}'")); + assertTrue(rsNames.size() > 1, "Expected more than one replicaset deployed"); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/serverGroups/" + + ACCOUNT1_NAME + + "/" + + account1Ns + + "/replicaSet " + + rsNames.get(0); + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + return resp.jsonPath().getString("name").equals("replicaSet " + rsNames.get(0)); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'replicaSet " + + rsNames.get(0) + + "' to return from GET /serverGroups/{account}/{region}/{name}"); + } + + @DisplayName( + ".\n===\n" + + "Given one deployment associated with two replicasets\n" + + "When sending get server group request for application, account, region and name\n" + + "Then only one server group should be returned\n===") + @Test + public void shouldGetServerGroupByApplication() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + "index.docker.io/library/alpine:3.11", + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + "index.docker.io/library/alpine:3.12", + kubeCluster); + + List rsNames = + Splitter.on(" ") + .splitToList( + kubeCluster.execKubectl( + "get -n " + + account1Ns + + " rs -o=jsonpath='{.items[?(@.metadata.ownerReferences[*].name==\"" + + DEPLOYMENT_1_NAME + + "\")].metadata.name}'")); + assertTrue(rsNames.size() > 1, "Expected more than one replicaset deployed"); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/applications/" + + APP_SERVER_GROUP_MGRS + + "/serverGroups/" + + ACCOUNT1_NAME + + "/" + + account1Ns + + "/replicaSet " + + rsNames.get(0); + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + return resp.jsonPath().getString("name").equals("replicaSet " + rsNames.get(0)); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'replicaSet " + + rsNames.get(0) + + "' to return from GET /applications/{application}/serverGroups/{account}/{region}/{name}/"); + } + + @DisplayName( + ".\n===\n" + + "Given one deployment of two pods\n" + + "When sending get instance request for application, region and name\n" + + "Then only one pod should be returned\n===") + @Test + public void shouldGetInstanceByAccountRegionId() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/deployment.yml") + .withValue("metadata.name", DEPLOYMENT_1_NAME) + .withValue("metadata.namespace", account1Ns) + .withValue("spec.replicas", 2) + .asList(); + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", APP_SERVER_GROUP_MGRS) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), body, account1Ns, "deployment " + DEPLOYMENT_1_NAME); + + List allPodNames = + Splitter.on(" ") + .splitToList( + kubeCluster.execKubectl( + "get -n " + account1Ns + " pod -o=jsonpath='{.items[*].metadata.name}'")); + List podNames = new ArrayList<>(); + for (String name : allPodNames) { + if (name.startsWith(DEPLOYMENT_1_NAME)) { + podNames.add(name); + } + } + assertFalse(podNames.isEmpty()); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/instances/" + + ACCOUNT1_NAME + + "/" + + account1Ns + + "/pod " + + podNames.get(0); + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + return resp.jsonPath().getString("displayName").equals(podNames.get(0)); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'pod " + + podNames.get(0) + + "' to return from GET /instances/{account}/{region}/{name}/"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes deployment\n" + + "When sending get instance logs request for application, region and name\n" + + "Then the pod logs should be returned\n===") + @Test + public void shouldGetInstanceLogs() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + + List allPodNames = + Splitter.on(" ") + .splitToList( + kubeCluster.execKubectl( + "get -n " + account1Ns + " pod -o=jsonpath='{.items[*].metadata.name}'")); + List podNames = new ArrayList<>(); + for (String name : allPodNames) { + if (name.startsWith(DEPLOYMENT_1_NAME)) { + podNames.add(name); + } + } + assertFalse(podNames.isEmpty()); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/instances/" + + ACCOUNT1_NAME + + "/" + + account1Ns + + "/pod " + + podNames.get(0) + + "/console?provider=kubernetes"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + return resp.jsonPath().getString("output[0].output") != null; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for logs of pod " + + podNames.get(0) + + " to return from GET /instances/{account}/{region}/{name}/console"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes service deployed by spinnaker to two accounts\n" + + "When sending get load balancers request\n" + + "Then both services should be returned\n===") + @Test + public void shouldGetLoadBalancers() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_LOAD_BALANCERS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "service", + SERVICE_1_NAME, + APP_LOAD_BALANCERS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT2_NAME, + account2Ns, + "service", + SERVICE_1_NAME, + APP_LOAD_BALANCERS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = baseUrl() + "/applications/" + APP_LOAD_BALANCERS + "/loadBalancers"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + resp.then().statusCode(200); + Map lbAcc1 = + resp.jsonPath() + .getMap( + "find { it.account == '" + + ACCOUNT1_NAME + + "' && it.name == 'service " + + SERVICE_1_NAME + + "' && it.namespace == '" + + account1Ns + + "'}"); + Map lbAcc2 = + resp.jsonPath() + .getMap( + "find { it.account == '" + + ACCOUNT2_NAME + + "' && it.name == 'service " + + SERVICE_1_NAME + + "' && it.namespace == '" + + account2Ns + + "'}"); + return lbAcc1 != null && !lbAcc1.isEmpty() && lbAcc2 != null && !lbAcc2.isEmpty(); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'service " + + SERVICE_1_NAME + + "' to return from GET /applications/" + + APP_LOAD_BALANCERS + + "/loadBalancers"); + } + + @DisplayName( + ".\n===\n" + + "Given two kubernetes services\n" + + "When sending get load balancers by account region and name request\n" + + "Then only one service should be returned\n===") + @Test + public void shouldGetLoadBalancerByAccountRegionAndName() + throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_LOAD_BALANCERS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "service", + SERVICE_1_NAME, + APP_LOAD_BALANCERS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), ACCOUNT1_NAME, account1Ns, "service", "other", APP_LOAD_BALANCERS, kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/kubernetes/loadBalancers/" + + ACCOUNT1_NAME + + "/" + + account1Ns + + "/service " + + SERVICE_1_NAME; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + List list = + resp.jsonPath() + .getList( + "findAll { it.account == '" + + ACCOUNT1_NAME + + "' && it.region == '" + + account1Ns + + "' && it.name == 'service " + + SERVICE_1_NAME + + "' && it.moniker.app == '" + + APP_LOAD_BALANCERS + + "'}"); + return list != null && !list.isEmpty(); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'service " + + SERVICE_1_NAME + + "' to return from GET /kubernetes/loadBalancers/" + + ACCOUNT1_NAME + + "/" + + account1Ns + + "/service other"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes deployment\n" + + "When sending get manifest by account, location and name request\n" + + "Then only the desired manifest should be returned\n===") + @Test + public void shouldGetManifestByAccountLocationName() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_LOAD_BALANCERS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_LOAD_BALANCERS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/manifests/" + + ACCOUNT1_NAME + + "/" + + account1Ns + + "/deployment " + + DEPLOYMENT_1_NAME; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + return resp.jsonPath().getString("account").equals(ACCOUNT1_NAME) + && resp.jsonPath().getString("location").equals(account1Ns) + && resp.jsonPath().getString("name").equals("deployment " + DEPLOYMENT_1_NAME); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for GET /manifests/" + + ACCOUNT1_NAME + + "/" + + account1Ns + + "/deployment " + + DEPLOYMENT_1_NAME + + " to return valid data"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes configmap\n" + + "When sending get raw resources request\n" + + "Then only the desired manifest should be returned\n===") + // TODO: Uncomment after fixing rawResources endpoint + // @Test + public void shouldGetRawResources() throws InterruptedException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns); + String appName = "getrawresources"; + List> manifest = + KubeTestUtils.loadYaml("classpath:manifests/configmap.yml") + .withValue("metadata.name", "myconfig") + .withValue("metadata.namespace", account1Ns) + .asList(); + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", ACCOUNT1_NAME) + .withValue("deployManifest.moniker.app", appName) + .withValue("deployManifest.manifests", manifest) + .asList(); + KubeTestUtils.deployAndWaitStable(baseUrl(), body, account1Ns, "configMap myconfig-v000"); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + System.out.println("> Sending get rawResources request"); + Response resp = get(baseUrl() + "/applications/" + appName + "/rawResources"); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + List result = resp.jsonPath().getList("$"); + return result != null && !result.isEmpty(); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for GET /applications/" + + appName + + "/rawResources to return valid data"); + } + + @DisplayName( + ".\n===\n" + + "Given two kubernetes network policies\n" + + "When sending get securityGroups\n" + + "Then response should contain two lists with the security group for each\n===") + @Test + public void shouldListSecurityGroups() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println( + "> Using namespace " + + account1Ns + + " and " + + account2Ns + + ", appName: " + + APP_SECURITY_GROUPS); + + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "networkPolicy", + NETWORK_POLICY_1_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT2_NAME, + account2Ns, + "networkPolicy", + NETWORK_POLICY_1_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = baseUrl() + "/securityGroups"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + + resp.then().statusCode(200); + List list1 = resp.jsonPath().getList(ACCOUNT1_NAME + ".kubernetes." + account1Ns); + List list2 = resp.jsonPath().getList(ACCOUNT2_NAME + ".kubernetes." + account2Ns); + return list1 != null && !list1.isEmpty() && list2 != null && !list2.isEmpty(); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for GET /securityGroups" + + " to return valid data"); + } + + @DisplayName( + ".\n===\n" + + "Given two kubernetes network policies for one account\n" + + "When sending get securityGroups/{account}\n" + + "Then response should contain a list with security groups of size 2\n===") + @Test + public void shouldListSecurityGroupsByAccount() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SECURITY_GROUPS); + + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "networkPolicy", + NETWORK_POLICY_1_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "networkPolicy", + NETWORK_POLICY_2_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = baseUrl() + "/securityGroups/" + ACCOUNT1_NAME; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + List securityGroupList = resp.jsonPath().getList("kubernetes." + account1Ns); + return securityGroupList != null && securityGroupList.size() == 2; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for GET /securityGroups/" + + ACCOUNT1_NAME + + " to return valid data"); + } + + @DisplayName( + ".\n===\n" + + "Given two kubernetes network policies for different namespaces\n" + + "When sending get securityGroups/{account}?region={region}\n" + + "Then response should contain a list with security groups of size 1\n===") + @Test + public void shouldListSecurityGroupsByAccountAndRegion() + throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SECURITY_GROUPS); + + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "networkPolicy", + NETWORK_POLICY_1_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT2_NAME, + account2Ns, + "networkPolicy", + NETWORK_POLICY_2_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = baseUrl() + "/securityGroups/" + ACCOUNT1_NAME + "?region=" + account1Ns; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + List securityGroupList = resp.jsonPath().getList("kubernetes." + account1Ns); + return securityGroupList != null && securityGroupList.size() == 1; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for GET /securityGroups/" + + ACCOUNT1_NAME + + "?region=" + + account1Ns + + " to return valid data"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes network policies for one account\n" + + "When sending get securityGroups/{account}/{cloudprovider}\n" + + "Then response should contain the securityGroup\n===") + @Test + public void shouldListSecurityGroupsByAccountAndCloudProvider() + throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SECURITY_GROUPS); + + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "networkPolicy", + NETWORK_POLICY_1_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = baseUrl() + "/securityGroups/" + ACCOUNT1_NAME + "/kubernetes"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + List securityGroupList = resp.jsonPath().getList(account1Ns); + return securityGroupList != null && securityGroupList.size() > 0; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for GET /securityGroups/" + + ACCOUNT1_NAME + + "/kubernetes" + + " to return valid data"); + } + + @DisplayName( + ".\n===\n" + + "Given two kubernetes network policies for one account\n" + + "When sending get securityGroups/{account}/{cloudprovider}/{name}\n" + + "Then response should contain the security group specified in name\n===") + @Test + public void shouldListSecurityGroupsByAccountAndCloudProviderAndName() + throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SECURITY_GROUPS); + + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "networkPolicy", + NETWORK_POLICY_1_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "networkPolicy", + NETWORK_POLICY_2_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/securityGroups/" + + ACCOUNT1_NAME + + "/kubernetes/networkpolicy " + + NETWORK_POLICY_1_NAME; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + List securityGroupList = resp.jsonPath().getList(account1Ns); + return securityGroupList != null && securityGroupList.size() == 1; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for GET /securityGroups/" + + ACCOUNT1_NAME + + "/kubernetes/networkpolicy " + + NETWORK_POLICY_1_NAME + + " to return valid data"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes network policy for one account\n" + + "When sending get securityGroups/{account}/{cloudProvider}/{region}/{securityGroupNameOrId}\n" + + "Then response should contain the security group specified in securityGroupNameOrId\n===") + @Test + public void shouldGetSecurityGroupByAccountAndName() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SECURITY_GROUPS); + + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "networkPolicy", + NETWORK_POLICY_1_NAME, + APP_SECURITY_GROUPS, + kubeCluster); + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + + "/securityGroups/" + + ACCOUNT1_NAME + + "/kubernetes/" + + account1Ns + + "/networkolicy " + + NETWORK_POLICY_1_NAME; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + var displayName = resp.jsonPath().getString("displayName"); + return displayName != null && displayName.equals(NETWORK_POLICY_1_NAME); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for GET /securityGroups/" + + ACCOUNT1_NAME + + "/kubernetes/" + + account1Ns + + "/networkolicy " + + NETWORK_POLICY_1_NAME + + " to return valid data"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes deployments\n" + + "When sending get clusters /applications/{appName}/serverGroupManagers\n" + + "Then the deployment should be present in serverGroups list of the response\n===") + @Test + public void shouldGetServerGroupManagerForApplication() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = + baseUrl() + "/applications/" + APP_SERVER_GROUP_MGRS + "/serverGroupManagers"; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200); + List serverGroupList = + resp.jsonPath() + .getList( + "findAll { it.account == '" + + ACCOUNT1_NAME + + "' && it.namespace == '" + + account1Ns + + "' && it.name == 'deployment " + + DEPLOYMENT_1_NAME + + "'}"); + return serverGroupList != null && serverGroupList.size() == 1; + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'deployment " + + "deployment" + + "' cluster to return from GET /applications/" + + APP_SERVER_GROUP_MGRS + + "/serverGroupManagers"); + } + + @DisplayName( + ".\n===\n" + + "Given a kubernetes deployment of one application made by spinnaker\n" + + "When sending get /applications/{application} request\n" + + "Then an application object should be returned\n===") + @Test + public void shouldGetApplicationInCluster() throws InterruptedException, IOException { + // ------------------------- given -------------------------- + System.out.println("> Using namespace " + account1Ns + ", appName: " + APP_SERVER_GROUP_MGRS); + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + APP_SERVER_GROUP_MGRS, + kubeCluster); + + KubeTestUtils.repeatUntilTrue( + () -> { + // ------------------------- when -------------------------- + String url = baseUrl() + "/applications/" + APP_SERVER_GROUP_MGRS; + System.out.println("> GET " + url); + Response resp = get(url); + + // ------------------------- then -------------------------- + System.out.println(resp.asString()); + if (resp.statusCode() == 404) { + return false; + } + resp.then().statusCode(200).and(); + var appNameResp = resp.jsonPath().getString("name"); + return appNameResp != null && appNameResp.equals(APP_SERVER_GROUP_MGRS); + }, + CACHE_TIMEOUT_MIN, + TimeUnit.MINUTES, + "Waited " + + CACHE_TIMEOUT_MIN + + " minutes for 'deployment " + + "deployment" + + "' cluster to return from GET /applications/" + + APP_SERVER_GROUP_MGRS); + } +} diff --git a/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/PatchManifestIT.java b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/PatchManifestIT.java new file mode 100644 index 00000000000..e9c45171432 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/PatchManifestIT.java @@ -0,0 +1,328 @@ +package com.netflix.spinnaker.clouddriver.kubernetes.it; + +import static org.junit.jupiter.api.Assertions.*; + +import com.netflix.spinnaker.clouddriver.kubernetes.it.utils.KubeTestUtils; +import java.io.IOException; +import java.util.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +public class PatchManifestIT extends BaseTest { + private static final String DEPLOYMENT_1_NAME = "deployment1"; + private static final String MANIFEST_NAME = "deployment " + DEPLOYMENT_1_NAME; + private static String account1Ns; + + @BeforeAll + public static void setUpAll() throws IOException, InterruptedException { + account1Ns = kubeCluster.createNamespace(ACCOUNT1_NAME); + } + + @BeforeEach + public void deployIfMissing() throws InterruptedException, IOException { + KubeTestUtils.deployIfMissing( + baseUrl(), + ACCOUNT1_NAME, + account1Ns, + "deployment", + DEPLOYMENT_1_NAME, + "patch-manifests", + null, + kubeCluster); + } + + @DisplayName( + ".\n===\n" + + "Given a patch manifest\n" + + " And a label to add\n" + + "When sending patch manifest request\n" + + " And waiting on manifest stable\n" + + "Then a pod is up and running a label is added\n===") + @Test + public void shouldPatchManifestFromText() throws IOException, InterruptedException { + // ------------------------- when -------------------------- + Map patchManifest = + KubeTestUtils.loadYaml("classpath:manifests/patch.yml").asMap(); + List> patchBody = + createPatchBody(patchManifest, Collections.emptyList(), Collections.emptyList()); + KubeTestUtils.deployAndWaitStable(baseUrl(), patchBody, account1Ns, MANIFEST_NAME); + // ------------------------- then -------------------------- + podsAreReady(); + String labels = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.metadata.labels}'"); + assertTrue( + labels.contains("\"testPatch\":\"success\""), + "Expected patch to add label 'testPatch' with value 'success' to " + + DEPLOYMENT_1_NAME + + " deployment. Labels:\n" + + labels); + } + + @DisplayName( + ".\n===\n" + + "Given a patch manifest without image tag\n" + + " And optional docker artifact present\n" + + "When sending patch manifest request\n" + + " And waiting on manifest stable\n" + + "Then the docker artifact is applied with the patch\n===") + @Test + public void shouldBindOptionalDockerImage() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String imageNoTag = "index.docker.io/library/nginx"; + String imageWithTag = "index.docker.io/library/nginx:1.18"; + + // ------------------------- when -------------------------- + Map patchManifest = + KubeTestUtils.loadYaml("classpath:manifests/patch_container.yml") + .withValue("spec.template.spec.containers[0].image", imageNoTag) + .asMap(); + + KubeTestUtils.deployAndWaitStable( + baseUrl(), + createPatchBody( + patchManifest, + Collections.singletonList(createArtifact(imageNoTag, imageWithTag)), + Collections.emptyList()), + account1Ns, + MANIFEST_NAME); + + // ------------------------- then -------------------------- + podsAreReady(); + expectedImageIsDeployed(imageWithTag); + } + + @DisplayName( + ".\n===\n" + + "Given a patch manifest without image tag\n" + + " And required docker artifact present\n" + + "When sending patch manifest request\n" + + " And waiting on manifest stable\n" + + "Then the docker artifact is deployed\n===") + @Test + public void shouldBindRequiredDockerImage() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String imageNoTag = "index.docker.io/library/nginx"; + String imageWithTag = "index.docker.io/library/nginx:1.18"; + + // ------------------------- when -------------------------- + Map patchManifest = + KubeTestUtils.loadYaml("classpath:manifests/patch_container.yml") + .withValue("spec.template.spec.containers[0].image", imageNoTag) + .asMap(); + List> requiredArtifacts = + Collections.singletonList(createArtifact(imageNoTag, imageWithTag)); + KubeTestUtils.deployAndWaitStable( + baseUrl(), + createPatchBody(patchManifest, requiredArtifacts, requiredArtifacts), + account1Ns, + MANIFEST_NAME); + + // ------------------------- then -------------------------- + podsAreReady(); + expectedImageIsDeployed(imageWithTag); + } + + @DisplayName( + ".\n===\n" + + "Given a patch manifest without image tag\n" + + " And required docker artifact present\n" + + " And optional docker artifact present\n" + + "When sending patch manifest request\n" + + " And waiting on manifest stable\n" + + "Then required docker artifact is deployed\n===") + @Test + public void shouldBindRequiredOverOptionalDockerImage() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + String imageNoTag = "index.docker.io/library/nginx"; + String imageWithTag = "index.docker.io/library/nginx:1.18"; + String optionalImageWithTag = "index.docker.io/library/nginx:1.19"; + + // ------------------------- and -------------------------- + Map artifact = createArtifact(imageNoTag, imageWithTag); + Map optionalArtifact = createArtifact(imageNoTag, optionalImageWithTag); + + // ------------------------- when -------------------------- + Map patchManifest = + KubeTestUtils.loadYaml("classpath:manifests/patch_container.yml") + .withValue("spec.template.spec.containers[0].image", imageNoTag) + .asMap(); + + KubeTestUtils.deployAndWaitStable( + baseUrl(), + createPatchBody( + patchManifest, + Arrays.asList(optionalArtifact, artifact), + Collections.singletonList(artifact)), + account1Ns, + MANIFEST_NAME); + + // ------------------------- then -------------------------- + podsAreReady(); + expectedImageIsDeployed(imageWithTag); + } + + @DisplayName( + ".\n===\n" + + "Given a CRD with \"ct\" shortName deployed outside of Spinnaker\n" + + "When sending a patch shortName to \"cntb\" manifest operation\n" + + "Then the CRD shortName is change to \"cntb\"\n===") + @Test + public void shouldPatchCrd() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + final String kind = "customResourceDefinition"; + final String crdName = "crontabs.stable.example.com"; + final String shortName = "cntb"; + Map crdManifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1.yml") + .withValue("metadata.name", crdName) + .withValue("spec.scope", "Namespaced") + .withValue("spec.names.shortNames", new String[] {"ct"}) + .asMap(); + kubeCluster.execKubectl(" apply -f -", crdManifest); + // ------------------------- when --------------------------- + crdManifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1.yml") + .withValue("metadata.name", crdName) + .withValue("spec.scope", "Namespaced") + .withValue("spec.names.shortNames", new String[] {shortName}) + .asMap(); + List> request = + KubeTestUtils.loadJson("classpath:requests/patch_manifest.json") + .withValue("patchManifest.app", APP1_NAME) + .withValue("patchManifest.manifestName", String.format("%s %s", kind, crdName)) + .withValue("patchManifest.patchBody", crdManifest) + .withValue("patchManifest.allArtifacts", new Object[] {}) + .withValue("patchManifest.manifests", Arrays.asList(crdManifest)) + .withValue("patchManifest.trafficManagement", null) + .withValue("patchManifest.moniker", null) + .withValue("patchManifest.enableTraffic", null) + .withValue("patchManifest.location", account1Ns) + .withValue("patchManifest.account", ACCOUNT1_NAME) + .withValue("patchManifest.skipExpressionEvaluation", null) + .withValue("patchManifest.requiredArtifacts", new Object[] {}) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), request, account1Ns, String.format("%s %s", kind, crdName)); + // ------------------------- then --------------------------- + String patched = + kubeCluster.execKubectl( + String.format("get %s %s -o jsonpath=\"{.spec.names.shortNames}\"", kind, crdName)); + assertTrue(patched.contains(shortName)); + } + + @DisplayName( + ".\n===\n" + + "Given a CR with \"my-awesome-cron-image\" spec.image deployed outside of Spinnaker\n" + + "When sending a patch spec.image to \"cron-image\" manifest operation\n" + + "Then the CR spec.image is change to \"cron-image\"\n===") + @Test + public void shouldPatchCr() throws IOException, InterruptedException { + // ------------------------- given -------------------------- + final String kind = "crontab.stable.example.com"; + final String crdName = "crontabs.stable.example.com"; + final String crName = "my-new-cron-object"; + final String crImage = "cron-image"; + final Map crdManifest = + KubeTestUtils.loadYaml("classpath:manifests/crd_v1.yml") + .withValue("metadata.name", crdName) + .withValue("spec.scope", "Namespaced") + .asMap(); + Map crManifest = + KubeTestUtils.loadYaml("classpath:manifests/cr_v1.yml") + .withValue("metadata.name", crName) + .withValue("spec.image", "my-awesome-cron-image") + .asMap(); + kubeCluster.execKubectl(" apply -f -", crdManifest); + kubeCluster.execKubectl("-n " + account1Ns + " apply -f -", crManifest); + // ------------------------- when --------------------------- + crManifest = + KubeTestUtils.loadYaml("classpath:manifests/cr_v1.yml") + .withValue("metadata.name", crName) + .withValue("spec.image", crImage) + .asMap(); + List> request = + KubeTestUtils.loadJson("classpath:requests/patch_manifest.json") + .withValue("patchManifest.app", APP1_NAME) + .withValue("patchManifest.manifestName", String.format("%s %s", kind, crName)) + .withValue("patchManifest.patchBody", crManifest) + .withValue("patchManifest.allArtifacts", new Object[] {}) + .withValue("patchManifest.options.mergeStrategy", "merge") + .withValue("patchManifest.manifests", Arrays.asList(crManifest)) + .withValue("patchManifest.trafficManagement", null) + .withValue("patchManifest.moniker", null) + .withValue("patchManifest.enableTraffic", null) + .withValue("patchManifest.location", account1Ns) + .withValue("patchManifest.account", ACCOUNT1_NAME) + .withValue("patchManifest.skipExpressionEvaluation", null) + .withValue("patchManifest.requiredArtifacts", new Object[] {}) + .asList(); + KubeTestUtils.deployAndWaitStable( + baseUrl(), request, account1Ns, String.format("%s %s", kind, crName)); + // ------------------------- then --------------------------- + String patched = + kubeCluster.execKubectl( + String.format( + "-n %s get %s %s -o jsonpath=\"{.spec.image}\"", account1Ns, kind, crName)); + assertTrue(patched.contains(crImage)); + } + + private List> createPatchBody( + Map patchManifest, + List> allArtifacts, + List> requiredArtifacts) { + return KubeTestUtils.loadJson("classpath:requests/patch_manifest.json") + .withValue("patchManifest.account", ACCOUNT1_NAME) + .withValue("patchManifest.location", account1Ns) + .withValue("patchManifest.manifestName", MANIFEST_NAME) + .withValue("patchManifest.patchBody", patchManifest) + .withValue("patchManifest.allArtifacts", allArtifacts) + .withValue("patchManifest.requiredArtifacts", requiredArtifacts) + .asList(); + } + + private Map createArtifact(String name, String imageWithTag) { + return KubeTestUtils.loadJson("classpath:requests/artifact.json") + .withValue("name", name) + .withValue("type", "docker/image") + .withValue("reference", imageWithTag) + .withValue("version", imageWithTag.substring(name.length() + 1)) + .asMap(); + } + + private void podsAreReady() throws IOException, InterruptedException { + String pods = kubeCluster.execKubectl("-n " + account1Ns + " get pods"); + String readyPods = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.status.readyReplicas}'"); + assertEquals( + "1", + readyPods, + "Expected one ready pod for " + DEPLOYMENT_1_NAME + " deployment. Pods:\n" + pods); + } + + private void expectedImageIsDeployed(String expectedImageTag) + throws IOException, InterruptedException { + String imageDeployed = + kubeCluster.execKubectl( + "-n " + + account1Ns + + " get deployment " + + DEPLOYMENT_1_NAME + + " -o=jsonpath='{.spec.template.spec.containers[0].image}'"); + assertEquals( + expectedImageTag, + imageDeployed, + "Expected correct " + DEPLOYMENT_1_NAME + " image to be deployed"); + } +} diff --git a/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/containers/KubernetesCluster.java b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/containers/KubernetesCluster.java new file mode 100644 index 00000000000..d8d086a205b --- /dev/null +++ b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/containers/KubernetesCluster.java @@ -0,0 +1,211 @@ +/* + * Copyright Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.it.containers; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.google.gson.Gson; +import java.io.BufferedWriter; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.net.URL; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import org.springframework.util.FileCopyUtils; + +public class KubernetesCluster { + + private static KubernetesCluster INSTANCE; + private static final String IMAGE = System.getenv("IMAGE"); + private static final String KIND_VERSION = "0.20.0"; + private static final String KUBECTL_VERSION = System.getenv("KUBECTL_VERSION"); + private static final Path IT_BUILD_HOME = Paths.get(System.getenv("IT_BUILD_HOME")); + private static final Path KUBECFG_PATH = Paths.get(IT_BUILD_HOME.toString(), "kubecfg.yml"); + private static final Path KUBECTL_PATH = Paths.get(IT_BUILD_HOME.toString(), "kubectl"); + + private final Map> namespacesByAccount = new HashMap<>(); + + public static KubernetesCluster getInstance() { + if (INSTANCE == null) { + INSTANCE = new KubernetesCluster(); + } + return INSTANCE; + } + + private KubernetesCluster() {} + + public void start() { + try { + downloadDependencies(); + createCluster(); + } catch (Exception e) { + fail("Unable to start kubernetes cluster", e); + } + } + + public void stop() { + try { + runKindCmd("delete cluster --name=kube-int-tests"); + } catch (Exception e) { + System.out.println("Exception deleting test cluster: " + e.getMessage() + " ignoring"); + } + } + + public Path getKubecfgPath() { + return KUBECFG_PATH; + } + + public String createNamespace(String accountName) throws IOException, InterruptedException { + List existing = + namespacesByAccount.computeIfAbsent(accountName, k -> new ArrayList<>()); + String newNamespace = String.format("%s-testns%02d", accountName, existing.size()); + List allNamespaces = + Arrays.asList(execKubectl("get ns -o=jsonpath='{.items[*].metadata.name}'").split(" ")); + if (!allNamespaces.contains(newNamespace)) { + execKubectl("create ns " + newNamespace); + } + existing.add(newNamespace); + return newNamespace; + } + + public String execKubectl(String args) throws IOException, InterruptedException { + return execKubectl(args, null); + } + + public String execKubectl(String args, Map manifest) + throws IOException, InterruptedException { + String json = manifestToJson(manifest); + ProcessBuilder builder = new ProcessBuilder(); + List cmd = new ArrayList<>(); + cmd.add("sh"); + cmd.add("-c"); + cmd.add(KUBECTL_PATH + " --kubeconfig=" + KUBECFG_PATH + " " + args); + builder.command(cmd); + builder.redirectErrorStream(true); + Process process = builder.start(); + if (json != null) { + OutputStream os = process.getOutputStream(); + BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(os, UTF_8)); + writer.write(json); + writer.flush(); + writer.close(); + } + Reader reader = new InputStreamReader(process.getInputStream(), UTF_8); + String output = FileCopyUtils.copyToString(reader); + if (!process.waitFor(1, TimeUnit.MINUTES)) { + fail("Command %s did not return after one minute", cmd); + } + assertThat(process.exitValue()) + .as("Running %s returned non-zero exit code. Output:\n%s", cmd, output) + .isEqualTo(0); + System.out.println("kubectl " + args + ":\n" + output); + return output.trim(); + } + + private String manifestToJson(Map contents) { + return Optional.ofNullable(contents).map(v -> new Gson().toJson(v)).orElse(null); + } + + private void downloadDependencies() throws IOException { + Files.createDirectories(IT_BUILD_HOME); + String os = "linux"; + String arch = "amd64"; + // TODO: Support running tests in other os/archs + if (System.getProperty("os.name").toLowerCase().contains("mac")) { + os = "darwin"; + } + System.out.println("Detected os: " + os + " arch: " + arch); + + Path kind = Paths.get(IT_BUILD_HOME.toString(), "kind"); + if (!kind.toFile().exists()) { + String url = + String.format( + "https://github.com/kubernetes-sigs/kind/releases/download/v%s/kind-%s-%s", + KIND_VERSION, os, arch); + System.out.println("Downloading kind from " + url); + downloadFile(kind, url); + } + + Path kubectl = Paths.get(IT_BUILD_HOME.toString(), "kubectl"); + if (!kubectl.toFile().exists()) { + String url = + String.format( + "https://cdn.dl.k8s.io/release/v%s/bin/%s/%s/kubectl", KUBECTL_VERSION, os, arch); + System.out.println("Downloading kubectl from " + url); + downloadFile(kubectl, url); + } + } + + private void downloadFile(Path binary, String url) throws IOException { + try (InputStream is = new URL(url).openStream(); + ReadableByteChannel rbc = Channels.newChannel(is); + FileOutputStream fos = new FileOutputStream(binary.toFile())) { + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.flush(); + assertThat(binary.toFile().setExecutable(true, false)).isEqualTo(true); + } + } + + private void createCluster() throws IOException, InterruptedException { + String clusters = runKindCmd("get clusters"); + if (clusters.contains("kube-int-tests")) { + System.out.println("Deleting old test cluster"); + runKindCmd("delete cluster --name=kube-int-tests"); + } + runKindCmd( + "create cluster --name=kube-int-tests --kubeconfig=" + + KUBECFG_PATH + + " --wait=10m --image=" + + IMAGE); + } + + private String runKindCmd(String args) throws IOException, InterruptedException { + ProcessBuilder builder = new ProcessBuilder(); + List cmd = new ArrayList<>(); + cmd.add("sh"); + cmd.add("-c"); + cmd.add(Paths.get(IT_BUILD_HOME.toString(), "kind") + " " + args); + builder.command(cmd); + builder.redirectErrorStream(true); + Process process = builder.start(); + Reader reader = new InputStreamReader(process.getInputStream(), UTF_8); + String output = FileCopyUtils.copyToString(reader); + System.out.println(output); + process.waitFor(); + assertThat(process.exitValue()) + .as("Running %s returned non-zero exit code. Output:\n%s", cmd, output) + .isEqualTo(0); + return output; + } +} diff --git a/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/utils/KubeTestUtils.java b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/utils/KubeTestUtils.java new file mode 100644 index 00000000000..bcf1cc89980 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/utils/KubeTestUtils.java @@ -0,0 +1,509 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.it.utils; + +import static com.netflix.spinnaker.clouddriver.kubernetes.it.BaseTest.ACCOUNT1_NAME; +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.is; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.netflix.spinnaker.clouddriver.kubernetes.it.containers.KubernetesCluster; +import io.restassured.path.json.JsonPath; +import io.restassured.response.Response; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import org.springframework.core.io.DefaultResourceLoader; +import org.springframework.core.io.ResourceLoader; +import org.testcontainers.utility.ComparableVersion; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.constructor.SafeConstructor; + +public abstract class KubeTestUtils { + + private static final int SLEEP_STEP_SECONDS = 5; + + public static TestResourceFile loadYaml(String file) { + ResourceLoader resourceLoader = new DefaultResourceLoader(); + try { + InputStream is = resourceLoader.getResource(file).getInputStream(); + Yaml yaml = new Yaml(new SafeConstructor()); + Iterable contentIterable = yaml.loadAll(is); + List> content = + StreamSupport.stream(contentIterable.spliterator(), false) + .filter(Objects::nonNull) + .map(KubeTestUtils::coerceManifestToList) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + return new TestResourceFile(content); + } catch (IOException e) { + throw new UncheckedIOException("Unable to load manifest from file " + file, e); + } + } + + private static List> coerceManifestToList(Object manifest) { + ObjectMapper objectMapper = new ObjectMapper(); + if (manifest instanceof List) { + return objectMapper.convertValue(manifest, new TypeReference<>() {}); + } + Map singleManifest = + objectMapper.convertValue(manifest, new TypeReference<>() {}); + return Collections.singletonList(singleManifest); + } + + public static TestResourceFile loadJson(String file) { + ResourceLoader resourceLoader = new DefaultResourceLoader(); + try { + InputStream is = resourceLoader.getResource(file).getInputStream(); + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode jsonNode = objectMapper.readTree(is); + List> content; + if (jsonNode.isArray()) { + content = objectMapper.convertValue(jsonNode, new TypeReference<>() {}); + } else { + content = + Collections.singletonList( + objectMapper.convertValue(jsonNode, new TypeReference<>() {})); + } + return new TestResourceFile(content); + } catch (IOException e) { + throw new RuntimeException("Unable to load manifest from file " + file, e); + } + } + + public static void repeatUntilTrue( + BooleanSupplier func, long duration, TimeUnit unit, String errorMsg) + throws InterruptedException { + long durationSeconds = unit.toSeconds(duration); + for (int i = 0; i < (durationSeconds / SLEEP_STEP_SECONDS); i++) { + if (!func.getAsBoolean()) { + Thread.sleep(TimeUnit.SECONDS.toMillis(SLEEP_STEP_SECONDS)); + } else { + return; + } + } + fail(errorMsg); + } + + public static class TestResourceFile { + + private final List> content; + + public TestResourceFile(List> content) { + this.content = content; + } + + public List> asList() { + return content; + } + + public Map asMap() { + return content.get(0); + } + + @SuppressWarnings("unchecked") + public TestResourceFile withValue(String path, Object value) { + List parts = Splitter.on('.').splitToList(path); + + for (Map entry : content) { + for (int i = 0; i < parts.size(); i++) { + if (parts.get(i).matches("^.*\\[[0-9]*]$")) { + String key = parts.get(i).substring(0, parts.get(i).indexOf('[')); + int index = + Integer.parseInt( + parts + .get(i) + .substring(parts.get(i).indexOf('[') + 1, parts.get(i).indexOf(']'))); + List> list = (List>) entry.get(key); + if (i == parts.size() - 1) { + list.add(index, (Map) value); + break; + } + entry = list.get(index); + } else if (i == parts.size() - 1) { + entry.put(parts.get(i), value); + break; + } else if (!entry.containsKey(parts.get(i))) { + entry.put(parts.get(i), new HashMap<>()); + entry = (Map) entry.get(parts.get(i)); + } else { + entry = (Map) entry.get(parts.get(i)); + } + } + } + + return this; + } + } + + public static void deployAndWaitStable( + String baseUrl, List> reqBody, String targetNs, String... objectNames) + throws InterruptedException { + + System.out.println("> Sending deploy manifest request for " + Arrays.toString(objectNames)); + List deployedObjectNames = sendOperation(baseUrl, reqBody, targetNs); + + Arrays.sort(objectNames); + Collections.sort(deployedObjectNames); + assertEquals( + Arrays.asList(objectNames), + deployedObjectNames, + "Expected object names deployed: " + + Arrays.toString(objectNames) + + " but were: " + + deployedObjectNames); + + for (String objectName : objectNames) { + System.out.println( + "> Sending get manifest request for object \"" + objectName + "\" to check stability"); + long start = System.currentTimeMillis(); + KubeTestUtils.repeatUntilTrue( + () -> { + String url = + baseUrl + + "/manifests/" + + ACCOUNT1_NAME + + "/" + + (targetNs.isBlank() ? "default" : targetNs) + + "/" + + objectName; + System.out.println("GET " + url); + Response respWait = given().queryParam("includeEvents", false).get(url); + JsonPath jsonPath = respWait.jsonPath(); + System.out.println(jsonPath.getObject("status", Map.class)); + respWait.then().statusCode(200).body("status.failed.state", is(false)); + return jsonPath.getBoolean("status.stable.state"); + }, + 5, + TimeUnit.MINUTES, + "Waited 5 minutes on GET /manifest.. to return \"status.stable.state: true\""); + System.out.println( + "< Object \"" + + objectName + + "\" stable in " + + ((System.currentTimeMillis() - start) / 1000) + + " seconds"); + } + } + + public static void disableManifest( + String baseUrl, List> reqBody, String targetNs, String objectName) + throws InterruptedException { + System.out.println("> Sending disable manifest request for " + objectName); + sendOperation(baseUrl, reqBody, targetNs); + } + + public static List sendOperation( + String baseUrl, List> reqBody, String targetNs) + throws InterruptedException { + return sendOperation(baseUrl, reqBody, targetNs, 30, TimeUnit.SECONDS); + } + + public static List sendOperation( + String baseUrl, + List> reqBody, + String targetNs, + long duration, + TimeUnit unit) + throws InterruptedException { + + String taskId = submitOperation(baseUrl, reqBody); + + List deployedObjectNames = new ArrayList<>(); + processOperation( + baseUrl, + taskId, + duration, + unit, + (Response respTask) -> { + respTask.then().body("status.failed", is(false)); + deployedObjectNames.addAll( + respTask + .jsonPath() + .getList( + "resultObjects.manifestNamesByNamespace." + + (targetNs.isBlank() ? "''" : targetNs) + + ".flatten()", + String.class)); + }); + + return deployedObjectNames; + } + + public static String sendOperationExpectFailure( + String baseUrl, + List> reqBody, + String targetNs, + long duration, + TimeUnit unit) + throws InterruptedException { + String taskId = submitOperation(baseUrl, reqBody); + + // The last status is most interesting, but a single String isn't + // effectively final and so can't be used in a lambda. So, use a list. Any + // kind of wrapper object would do. + List status = new ArrayList<>(); + processOperation( + baseUrl, + taskId, + duration, + unit, + (Response respTask) -> { + respTask.then().body("status.failed", is(true)); + status.add(respTask.jsonPath().getString("status.status")); + }); + + // Return the status to the caller so it's possible to assert on it. + return Iterables.getOnlyElement(status); + } + + /** Wait until a task has completed, calling a consumer with the response once it has. */ + public static void processOperation( + String baseUrl, String taskId, long duration, TimeUnit unit, Consumer consumer) + throws InterruptedException { + System.out.println("> Waiting for task to complete"); + long start = System.currentTimeMillis(); + + // So it's possible to capture the Response from the lambda passed to + // repeatUntilTrue, uese a wrapper. List is an arbitrary choice. + List respList = new ArrayList(); + + KubeTestUtils.repeatUntilTrue( + () -> { + Response respTask = getTask(baseUrl, taskId); + if (respTask == null) { + return false; + } + respList.add(respTask); + return true; + }, + duration, + unit, + "Waited " + + duration + + " " + + unit + + " on GET /task/{id} to return \"status.completed: true\""); + System.out.println( + "< Task completed in " + ((System.currentTimeMillis() - start) / 1000) + " seconds"); + + consumer.accept(Iterables.getOnlyElement(respList)); + } + + /** + * Submit an operation to clouddriver + * + * @return the task id + */ + public static String submitOperation(String baseUrl, List> reqBody) { + String url = baseUrl + "/kubernetes/ops"; + System.out.println("POST " + url); + Response resp = + given().log().body(false).contentType("application/json").body(reqBody).post(url); + System.out.println(resp.asString()); + resp.then().statusCode(200); + System.out.println("< Completed in " + resp.getTimeIn(TimeUnit.SECONDS) + " seconds"); + return resp.jsonPath().get("id"); + } + + /** + * Get a task from clouddriver + * + * @return the response from the get task method, if the task has completed, or null if the task + * was not found, or the task hasn't completed yet. + */ + public static Response getTask(String baseUrl, String taskId) { + String taskUrl = baseUrl + "/task/" + taskId; + System.out.println("GET " + taskUrl); + Response respTask = given().get(taskUrl); + if (respTask.statusCode() == 404) { + return null; + } + respTask.then().statusCode(200); + System.out.println(respTask.jsonPath().getObject("status", Map.class)); + + if (!respTask.jsonPath().getBoolean("status.completed")) { + return null; + } + + return respTask; + } + + @SuppressWarnings("unchecked") + public static void forceCacheRefresh(String baseUrl, String targetNs, String objectName) + throws InterruptedException { + System.out.println("> Sending force cache refresh request for object \"" + objectName + "\""); + Response resp = + given() + .log() + .uri() + .contentType("application/json") + .body( + ImmutableMap.of( + "account", ACCOUNT1_NAME, + "location", targetNs, + "name", objectName)) + .post(baseUrl + "/cache/kubernetes/manifest"); + resp.then().statusCode(anyOf(is(200), is(202))); + System.out.println("< Completed in " + resp.getTimeIn(TimeUnit.SECONDS) + " seconds"); + + if (resp.statusCode() == 202) { + System.out.println("> Waiting cache to be refreshed for object \"" + objectName + "\""); + long start = System.currentTimeMillis(); + KubeTestUtils.repeatUntilTrue( + () -> { + Response fcrWaitResp = given().log().uri().get(baseUrl + "/cache/kubernetes/manifest"); + fcrWaitResp.then().log().body(false); + List list = + Stream.of(fcrWaitResp.as(Map[].class)) + .filter( + it -> { + Map details = (Map) it.get("details"); + String name = (String) details.get("name"); + String account = (String) details.get("account"); + String location = (String) details.get("location"); + Number processedTime = (Number) it.get("processedTime"); + return Objects.equals(ACCOUNT1_NAME, account) + && Objects.equals(targetNs, location) + && Objects.equals(objectName, name) + && processedTime != null + && processedTime.longValue() > -1; + }) + .collect(Collectors.toList()); + return !list.isEmpty(); + }, + 5, + TimeUnit.MINUTES, + "GET /cache/kubernetes/manifest did not returned processedTime > -1 for object \"" + + objectName + + "\" after 5 minutes"); + System.out.println( + "< Force cache refresh for \"" + + objectName + + "\" completed in " + + ((System.currentTimeMillis() - start) / 1000) + + " seconds"); + } else { + System.out.println( + "< Force cache refresh for object \"" + objectName + "\" succeeded immediately"); + } + } + + public static void deployAndWaitStable( + String baseUrl, String account, String namespace, String kind, String name, String app) + throws InterruptedException { + deployAndWaitStable(baseUrl, account, namespace, kind, name, app, null); + } + + public static void deployAndWaitStable( + String baseUrl, + String account, + String namespace, + String kind, + String name, + String app, + String image) + throws InterruptedException { + + TestResourceFile manifest = + KubeTestUtils.loadYaml("classpath:manifests/" + kind + ".yml") + .withValue("metadata.name", name) + .withValue("metadata.namespace", namespace); + if (image != null) { + manifest = manifest.withValue("spec.template.spec.containers[0].image", image); + } + List> body = + KubeTestUtils.loadJson("classpath:requests/deploy_manifest.json") + .withValue("deployManifest.account", account) + .withValue("deployManifest.moniker.app", app) + .withValue("deployManifest.manifests", manifest.asList()) + .asList(); + KubeTestUtils.deployAndWaitStable(baseUrl, body, namespace, kind + " " + name); + } + + public static void deployIfMissing( + String baseUrl, + String account, + String namespace, + String kind, + String name, + String app, + KubernetesCluster kubeCluster) + throws InterruptedException, IOException { + deployIfMissing(baseUrl, account, namespace, kind, name, app, null, kubeCluster); + } + + public static void deployIfMissing( + String baseUrl, + String account, + String namespace, + String kind, + String name, + String app, + String image, + KubernetesCluster kubeCluster) + throws InterruptedException, IOException { + + String path = ""; + if (image != null) { + path = ".spec.template.spec.containers[0].image"; + } + + String output = + kubeCluster.execKubectl( + "-n " + + namespace + + " get " + + kind + + " -l app.kubernetes.io/name=" + + app + + " -o=jsonpath='{.items[?(@.metadata.name==\"" + + name + + "\")]" + + path + + "}'"); + if (!output.isEmpty()) { + if (image == null || output.contains(image)) { + return; + } + } + + deployAndWaitStable(baseUrl, account, namespace, kind, name, app, image); + } + + public static int compareVersion(String sv1, String sv2) { + ComparableVersion v1 = new ComparableVersion(sv1.replace("v", "")); + ComparableVersion v2 = new ComparableVersion(sv2.replace("v", "")); + + return v1.compareTo(v2); + } +} diff --git a/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/utils/TestLifecycleListener.java b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/utils/TestLifecycleListener.java new file mode 100644 index 00000000000..fda4fd489e8 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/java/com/netflix/spinnaker/clouddriver/kubernetes/it/utils/TestLifecycleListener.java @@ -0,0 +1,36 @@ +/* + * Copyright 2021 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.it.utils; + +import com.netflix.spinnaker.clouddriver.kubernetes.it.BaseTest; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +public class TestLifecycleListener + implements BeforeAllCallback, ExtensionContext.Store.CloseableResource { + + @Override + public void beforeAll(ExtensionContext context) { + // initialize "after all test run hook" + context.getRoot().getStore(ExtensionContext.Namespace.GLOBAL).put("delete_cluster", this); + } + + @Override + public void close() { + BaseTest.kubeCluster.stop(); + } +} diff --git a/clouddriver-kubernetes/src/integration/resources/clouddriver.yml b/clouddriver-kubernetes/src/integration/resources/clouddriver.yml new file mode 100644 index 00000000000..ec5d624be49 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/clouddriver.yml @@ -0,0 +1,112 @@ +spring: + application: + name: clouddriver + +kubernetes: + kubectl: + executable: ${IT_BUILD_HOME}/kubectl + enabled: true + primaryAccount: account1 + accounts: + - name: account1 + cacheIntervalSeconds: 5 + requiredGroupMembership: [] + providerVersion: V2 + permissions: {} + dockerRegistries: [] + configureImagePullSecrets: true + cacheThreads: 3 + namespaces: + - default + - account1-testns00 + - account1-testns01 + - account1-testns02 + - account1-testns03 + - account1-testns04 + omitNamespaces: [] + kinds: + - pod + - replicaset + - deployment + - service + - configmap + - secret + - networkpolicy + - cronjob + - customresourcedefinition + - crontab.stable.example.com + omitKinds: [] + customResources: + - kubernetesKind: crontab.stable.example.com + versioned: false + cachingPolicies: [] + oauthScopes: [] + oAuthScopes: [] + onlySpinnakerManaged: true + metrics: false + kubeconfigFile: ${IT_BUILD_HOME}/kubecfg.yml # File is automatically created at runtime + - name: account2 + cacheIntervalSeconds: 5 + requiredGroupMembership: [] + providerVersion: V2 + permissions: {} + dockerRegistries: [] + configureImagePullSecrets: true + cacheThreads: 1 + namespaces: + - account2-testns00 + omitNamespaces: [] + kinds: + - pod + - replicaset + - deployment + - service + - configmap + - secret + - networkpolicy + - cronjob + omitKinds: [] + customResources: [] + cachingPolicies: [] + oauthScopes: [] + oAuthScopes: [] + onlySpinnakerManaged: true + metrics: false + kubeconfigFile: ${IT_BUILD_HOME}/kubecfg.yml # File is automatically created at runtime + +logging.level.com.netflix.spinnaker.cats.sql.cluster: INFO +logging.level.com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter: WARN + +sql: + enabled: true + taskRepository: + enabled: true + cache: + enabled: true + readBatchSize: 500 + writeBatchSize: 300 + scheduler: + enabled: true + connectionPools: + default: + default: true + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + tasks: + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + migration: + jdbcUrl: jdbc:tc:mysql:5.7.22://somehostname:someport/clouddriver?user=root?password=& + +redis: + enabled: false + cache: + enabled: false + scheduler: + enabled: false + taskRepository: + enabled: false + +services: + fiat: + baseUrl: http://fiat.net + front50: + baseUrl: http://front50.net diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/configmap.yml b/clouddriver-kubernetes/src/integration/resources/manifests/configmap.yml new file mode 100644 index 00000000000..6dbda78c701 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/configmap.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: myconfig +data: + file.txt: | + Hello world! diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/configmaps_with_selectors.yml b/clouddriver-kubernetes/src/integration/resources/manifests/configmaps_with_selectors.yml new file mode 100644 index 00000000000..8d54ee39cf8 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/configmaps_with_selectors.yml @@ -0,0 +1,25 @@ +apiVersion: v1 +data: + samplefile.yaml: |- + settings: + enabled: true +kind: ConfigMap +metadata: + labels: + sample-configmap-selector: one + selector-test: test + name: sample-config-map-with-selector-one + namespace: default +--- +apiVersion: v1 +data: + samplefile2.yaml: |- + more-settings: + enabled: false +kind: ConfigMap +metadata: + labels: + sample-configmap-selector: two + selector-test: test + name: sample-config-map-with-selector-two + namespace: default diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/cr_v1.yml b/clouddriver-kubernetes/src/integration/resources/manifests/cr_v1.yml new file mode 100644 index 00000000000..6e66452e55e --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/cr_v1.yml @@ -0,0 +1,7 @@ +apiVersion: "stable.example.com/v1" +kind: CronTab +metadata: + name: my-new-cron-object +spec: + cronSpec: "* * * * */5" + image: my-awesome-cron-image diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/crd_v1.yml b/clouddriver-kubernetes/src/integration/resources/manifests/crd_v1.yml new file mode 100644 index 00000000000..a59c573188f --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/crd_v1.yml @@ -0,0 +1,30 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: crontabs.stable.example.com +spec: + group: stable.example.com + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + image: + type: string + replicas: + type: integer + scope: Namespaced + names: + plural: crontabs + singular: crontab + kind: CronTab + shortNames: + - ct diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/crd_v1beta1.yml b/clouddriver-kubernetes/src/integration/resources/manifests/crd_v1beta1.yml new file mode 100644 index 00000000000..cea9d959133 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/crd_v1beta1.yml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: crontabs.stable.example.com +spec: + group: stable.example.com + scope: Namespaced + names: + plural: crontabs + singular: crontab + kind: CronTab + shortNames: + - ct + versions: + - name: v1 + served: true + storage: true diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/cronJob.yml b/clouddriver-kubernetes/src/integration/resources/manifests/cronJob.yml new file mode 100644 index 00000000000..500e07ca962 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/cronJob.yml @@ -0,0 +1,19 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: myapp +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: myapp + image: 'index.docker.io/library/alpine:3.12' + imagePullPolicy: IfNotPresent + command: + - tail + - -f + - /dev/null + restartPolicy: Never diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/deployment.yml b/clouddriver-kubernetes/src/integration/resources/manifests/deployment.yml new file mode 100644 index 00000000000..c6d810f1b4b --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/deployment.yml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myapp +spec: + replicas: 1 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - image: 'index.docker.io/library/alpine:3.12' + name: myapp + command: + - tail + - -f + - /dev/null diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/deployment_with_vol.yml b/clouddriver-kubernetes/src/integration/resources/manifests/deployment_with_vol.yml new file mode 100644 index 00000000000..f9c5618424e --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/deployment_with_vol.yml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myapp +spec: + replicas: 1 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - image: 'index.docker.io/library/alpine:3.12' + name: myapp + command: + - tail + - -f + - /dev/null + volumeMounts: + - mountPath: /tmp/mounted + name: mounted + volumes: + - name: mounted diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/multi_deployment_service.yml b/clouddriver-kubernetes/src/integration/resources/manifests/multi_deployment_service.yml new file mode 100644 index 00000000000..7a352644290 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/multi_deployment_service.yml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myapp +spec: + replicas: 1 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - image: 'index.docker.io/library/alpine:3.12' + name: myapp + command: + - tail + - -f + - /dev/null +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: myservice + name: myservice +spec: + type: ClusterIP + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: myapp + sessionAffinity: None diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/multi_replicaset_service.yml b/clouddriver-kubernetes/src/integration/resources/manifests/multi_replicaset_service.yml new file mode 100644 index 00000000000..2704731bcfd --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/multi_replicaset_service.yml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: nginx +spec: + replicas: 1 + selector: + matchLabels: + service: other + template: + metadata: + labels: + service: other + spec: + containers: + - image: 'index.docker.io/library/nginx:1.14.0' + name: nginx + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx +spec: + type: NodePort + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: nginx + sessionAffinity: None diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/networkPolicy.yml b/clouddriver-kubernetes/src/integration/resources/manifests/networkPolicy.yml new file mode 100644 index 00000000000..8cba285ed09 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/networkPolicy.yml @@ -0,0 +1,8 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny-ingress +spec: + podSelector: {} + policyTypes: + - Ingress diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/patch.yml b/clouddriver-kubernetes/src/integration/resources/manifests/patch.yml new file mode 100644 index 00000000000..e6799c9485f --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/patch.yml @@ -0,0 +1,5 @@ +spec: + template: + metadata: + labels: + testPatch: success diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/patch_container.yml b/clouddriver-kubernetes/src/integration/resources/manifests/patch_container.yml new file mode 100644 index 00000000000..5a543c55a7d --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/patch_container.yml @@ -0,0 +1,6 @@ +spec: + template: + spec: + containers: + - name: engine-x + image: index.docker.io/library/nginx:1.16 diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/replicaset.yml b/clouddriver-kubernetes/src/integration/resources/manifests/replicaset.yml new file mode 100644 index 00000000000..38410103dcb --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/replicaset.yml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: 'index.docker.io/library/nginx:1.14.0' + name: nginx + ports: + - containerPort: 80 diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/secret.yml b/clouddriver-kubernetes/src/integration/resources/manifests/secret.yml new file mode 100644 index 00000000000..15d91bc138a --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/secret.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: mysecret +data: + file.txt: SGVsbG8gd29ybGQK diff --git a/clouddriver-kubernetes/src/integration/resources/manifests/service.yml b/clouddriver-kubernetes/src/integration/resources/manifests/service.yml new file mode 100644 index 00000000000..08a77b6dc7a --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/manifests/service.yml @@ -0,0 +1,16 @@ +kind: Service +apiVersion: v1 +metadata: + name: myservice + labels: + app: myservice +spec: + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + selector: + app: myapp + sessionAffinity: None diff --git a/clouddriver-kubernetes/src/integration/resources/requests/artifact.json b/clouddriver-kubernetes/src/integration/resources/requests/artifact.json new file mode 100644 index 00000000000..24046747fc2 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/requests/artifact.json @@ -0,0 +1,9 @@ +{ + "name": "index.docker.io/library/nginx", + "type": "docker/image", + "reference": "index.docker.io/library/nginx:1.14.0", + "version": "1.14.0", + "location": "", + "metadata": {}, + "customKind": false +} diff --git a/clouddriver-kubernetes/src/integration/resources/requests/delete_manifest.json b/clouddriver-kubernetes/src/integration/resources/requests/delete_manifest.json new file mode 100644 index 00000000000..74d277472c2 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/requests/delete_manifest.json @@ -0,0 +1,15 @@ +[ + { + "deleteManifest": { + "app": "app", + "mode": "mode", + "cloudProvider": "kubernetes", + "manifestName": "kind name", + "options": { + "cascading": true + }, + "location": "namespace", + "account": "account" + } + } +] diff --git a/clouddriver-kubernetes/src/integration/resources/requests/deploy_manifest.json b/clouddriver-kubernetes/src/integration/resources/requests/deploy_manifest.json new file mode 100644 index 00000000000..8e96e67deb6 --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/requests/deploy_manifest.json @@ -0,0 +1,21 @@ +[ + { + "deployManifest": { + "enableTraffic": true, + "optionalArtifacts": [], + "cloudProvider": "kubernetes", + "trafficManagement": { + "options": { + "enableTraffic": false + }, + "enabled": false + }, + "moniker": {}, + "source": "text", + "account": "", + "skipExpressionEvaluation": false, + "requiredArtifacts": [], + "manifests": [] + } + } +] diff --git a/clouddriver-kubernetes/src/integration/resources/requests/disable_manifest.json b/clouddriver-kubernetes/src/integration/resources/requests/disable_manifest.json new file mode 100644 index 00000000000..47a039ffe2d --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/requests/disable_manifest.json @@ -0,0 +1,11 @@ +[ + { + "disableManifest": { + "app": "nginx", + "cloudProvider": "kubernetes", + "manifestName": "deployment nginx", + "location": "default", + "account": "account1" + } + } +] diff --git a/clouddriver-kubernetes/src/integration/resources/requests/patch_manifest.json b/clouddriver-kubernetes/src/integration/resources/requests/patch_manifest.json new file mode 100644 index 00000000000..14f7de166fd --- /dev/null +++ b/clouddriver-kubernetes/src/integration/resources/requests/patch_manifest.json @@ -0,0 +1,26 @@ +[ + { + "patchManifest": { + "enableTraffic": true, + "allArtifacts": [], + "cloudProvider": "kubernetes", + "options": { + "mergeStrategy": "strategic", + "record": true + }, + "trafficManagement": { + "options": { + "enableTraffic": false + }, + "enabled": false + }, + "moniker": {}, + "source": "text", + "account": "", + "skipExpressionEvaluation": false, + "requiredArtifacts": [], + "manifests": [], + "mode": "static" + } + } +] diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesCloudProvider.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesCloudProvider.groovy deleted file mode 100644 index 5a34fd701a3..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesCloudProvider.groovy +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes - -import com.netflix.spinnaker.clouddriver.core.CloudProvider -import org.springframework.stereotype.Component - -import java.lang.annotation.Annotation - -/** - * Kubernetes declaration as a {@link CloudProvider}. - */ -@Component -class KubernetesCloudProvider implements CloudProvider { - static final String ID = "kubernetes" - final String id = ID - final String displayName = "Kubernetes" - final Class operationAnnotationType = KubernetesOperation -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesOperation.groovy deleted file mode 100644 index d0129f28717..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesOperation.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes - -import java.lang.annotation.ElementType -import java.lang.annotation.Retention -import java.lang.annotation.RetentionPolicy -import java.lang.annotation.Target - -/** - * {@code KubernetesOperation}s specify implementation classes of Spinnaker AtomicOperations for Kubernetes. - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -@interface KubernetesOperation { - String value() -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesCachingAgent.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesCachingAgent.java deleted file mode 100644 index deaabe6fc56..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesCachingAgent.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.caching; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.AccountAware; -import com.netflix.spinnaker.cats.agent.CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import lombok.Getter; - -import java.util.List; -import java.util.stream.Collectors; - -public abstract class KubernetesCachingAgent implements CachingAgent, AccountAware { - @Getter final protected String accountName; - final protected Registry registry; - final protected C credentials; - final protected ObjectMapper objectMapper; - - final protected int agentIndex; - final protected int agentCount; - - protected List namespaces; - - protected KubernetesCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - this.accountName = namedAccountCredentials.getName(); - this.credentials = namedAccountCredentials.getCredentials(); - this.objectMapper = objectMapper; - this.registry = registry; - - this.agentIndex = agentIndex; - this.agentCount = agentCount; - - reloadNamespaces(); - } - - @Override - public String getAgentType() { - return String.format("%s/%s[%d/%d]", accountName, this.getClass().getSimpleName(), agentIndex + 1, agentCount); - } - - protected void reloadNamespaces() { - namespaces = credentials.getDeclaredNamespaces() - .stream() - .filter(n -> agentCount == 1 || Math.abs(n.hashCode() % agentCount) == agentIndex) - .collect(Collectors.toList()); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesCachingAgentDispatcher.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesCachingAgentDispatcher.java deleted file mode 100644 index f88196c2eb9..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesCachingAgentDispatcher.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.caching; - -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; - -import java.util.Collection; -import java.util.List; - -public interface KubernetesCachingAgentDispatcher { - Collection buildAllCachingAgents(KubernetesNamedAccountCredentials credentials); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesConfigurationProperties.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesConfigurationProperties.groovy deleted file mode 100644 index 572119eedeb..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesConfigurationProperties.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.config - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap -import com.netflix.spinnaker.clouddriver.security.ProviderVersion -import com.netflix.spinnaker.fiat.model.resources.Permissions -import groovy.transform.ToString - -@ToString(includeNames = true) -class KubernetesConfigurationProperties { - @ToString(includeNames = true) - static class ManagedAccount { - String name - ProviderVersion providerVersion - String environment - String accountType - String context - String cluster - String oAuthServiceAccount - List oAuthScopes - String user - String kubeconfigFile - String kubeconfigContents - String kubectlExecutable - Integer kubectlRequestTimeoutSeconds; - Boolean serviceAccount - Boolean configureImagePullSecrets - List namespaces - List omitNamespaces - String skin - Integer cacheThreads - List dockerRegistries - List requiredGroupMembership - Permissions.Builder permissions = new Permissions.Builder() - String namingStrategy = "kubernetesAnnotations" - Boolean debug = false - Boolean metrics = true - List customResources; - List cachingPolicies; - List kinds - List omitKinds - } - - List accounts = [] -} - -@ToString(includeNames = true) -class LinkedDockerRegistryConfiguration { - String accountName - List namespaces -} - -@ToString(includeNames = true) -class CustomKubernetesResource { - String kubernetesKind - String spinnakerKind = KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED.toString() - String deployPriority = "100" - boolean versioned = false -} - -@ToString(includeNames = true) -class KubernetesCachingPolicy { - String kubernetesKind - int maxEntriesPerAgent -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicator.groovy deleted file mode 100644 index 0ae5bee19f3..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicator.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.health - -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import groovy.transform.InheritConstructors -import org.slf4j.Logger -import org.slf4j.LoggerFactory -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.boot.actuate.health.Health -import org.springframework.boot.actuate.health.HealthIndicator -import org.springframework.http.HttpStatus -import org.springframework.scheduling.annotation.Scheduled -import org.springframework.stereotype.Component -import org.springframework.web.bind.annotation.ResponseStatus - -import java.util.concurrent.atomic.AtomicReference - -@Component -class KubernetesHealthIndicator implements HealthIndicator { - - private static final Logger LOG = LoggerFactory.getLogger(KubernetesHealthIndicator) - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - private final AtomicReference> warningMessages = new AtomicReference<>(null) - - @Override - Health health() { - def warnings = warningMessages.get() - - def resultBuilder = new Health.Builder().up() - - warnings.each { k, v -> resultBuilder.withDetail(k, v) } - - return resultBuilder.build() - } - - @Scheduled(fixedDelay = 300000L) - void checkHealth() { - def warnings = [:] - - Set kubernetesCredentialsSet = accountCredentialsProvider.all.findAll { - it instanceof KubernetesNamedAccountCredentials - } as Set - - for (KubernetesNamedAccountCredentials accountCredentials in kubernetesCredentialsSet) { - try { - KubernetesCredentials kubernetesCredentials = accountCredentials.credentials - kubernetesCredentials.getDeclaredNamespaces() - } catch (Exception ex) { - warnings.put("kubernetes:${accountCredentials.name}".toString(), ex.message) - } - } - - warningMessages.set(warnings) - } - - @ResponseStatus(value = HttpStatus.SERVICE_UNAVAILABLE, reason = "Problem communicating with Kubernetes.") - @InheritConstructors - static class KubernetesIOException extends RuntimeException {} -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/KubernetesModelUtil.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/KubernetesModelUtil.java deleted file mode 100644 index 2e3bc394ac9..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/KubernetesModelUtil.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.provider; - -import com.netflix.spinnaker.clouddriver.model.HealthState; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.StringUtils; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -@Slf4j -public class KubernetesModelUtil { - public static long translateTime(String time) { - try { - return StringUtils.isNotEmpty(time) ? (new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssX").parse(time)).getTime() : 0; - } catch (ParseException e) { - log.error("Failed to parse kubernetes timestamp", e); - return 0; - } - } - - public static HealthState getHealthState(List> health) { - return someUpRemainingUnknown(health) ? HealthState.Up : - someSucceededRemainingUnknown(health) ? HealthState.Succeeded : - anyStarting(health) ? HealthState.Starting : - anyDown(health) ? HealthState.Down : - anyFailed(health) ? HealthState.Failed : - anyOutOfService(health) ? HealthState.OutOfService : - HealthState.Unknown; - } - - private static boolean stateEquals(Map health, HealthState state) { - Object healthState = health.get("state"); - return healthState != null && healthState.equals(state.name()); - } - - private static boolean someUpRemainingUnknown(List> healthsList) { - List> knownHealthList = healthsList.stream() - .filter(h -> !stateEquals(h, HealthState.Unknown)) - .collect(Collectors.toList()); - - return !knownHealthList.isEmpty() && knownHealthList.stream().allMatch(h -> stateEquals(h, HealthState.Up)); - } - - private static boolean someSucceededRemainingUnknown(List> healthsList) { - List> knownHealthList = healthsList.stream() - .filter(h -> !stateEquals(h, HealthState.Unknown)) - .collect(Collectors.toList()); - - return !knownHealthList.isEmpty() && knownHealthList.stream().allMatch(h -> stateEquals(h, HealthState.Succeeded)); - } - - private static boolean anyDown(List> healthsList) { - return healthsList.stream().anyMatch(h -> stateEquals(h, HealthState.Down)); - } - - private static boolean anyStarting(List> healthsList) { - return healthsList.stream().anyMatch(h -> stateEquals(h, HealthState.Starting)); - } - - private static boolean anyFailed(List> healthsList) { - return healthsList.stream().anyMatch(h -> stateEquals(h, HealthState.Failed)); - } - - private static boolean anyOutOfService(List> healthsList) { - return healthsList.stream().anyMatch(h -> stateEquals(h, HealthState.OutOfService)); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesApiClientConfig.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesApiClientConfig.groovy deleted file mode 100644 index 2ffe708ac88..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesApiClientConfig.groovy +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2017 Cisco, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.security - -import groovy.util.logging.Slf4j -import io.kubernetes.client.ApiClient -import io.kubernetes.client.util.Config -import io.kubernetes.client.util.KubeConfig -import org.apache.commons.lang3.StringUtils -import org.yaml.snakeyaml.Yaml -import org.yaml.snakeyaml.constructor.SafeConstructor - -import java.nio.file.Files - -@Slf4j -public class KubernetesApiClientConfig extends Config { - String kubeconfigFile - String context - String cluster - String user - String userAgent - Boolean serviceAccount - - public KubernetesApiClientConfig(String kubeconfigFile, String context, String cluster, String user, String userAgent, Boolean serviceAccount) { - this.kubeconfigFile = kubeconfigFile - this.context = context - this.user = user - this.userAgent = userAgent - this.serviceAccount = serviceAccount - } - - public ApiClient getApiCient() throws Exception { - if (serviceAccount) { - return withServiceAccount() - } else { - return withKubeConfig() - } - } - - ApiClient withServiceAccount() { - ApiClient client = new ApiClient() - - try { - boolean serviceAccountCaCertExists = Files.isRegularFile(new File(io.fabric8.kubernetes.client.Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH).toPath()) - if (serviceAccountCaCertExists) { - client.setSslCaCert(new FileInputStream(io.fabric8.kubernetes.client.Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH)) - } else { - throw new IllegalStateException("Could not find CA cert for service account at $io.fabric8.kubernetes.client.Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH") - } - } catch(IOException e) { - throw new IllegalStateException("Could not find CA cert for service account at $io.fabric8.kubernetes.client.Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH", e) - } - - try { - String serviceTokenCandidate = new String(Files.readAllBytes(new File(io.fabric8.kubernetes.client.Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH).toPath())) - if (serviceTokenCandidate != null) { - client.setApiKey("Bearer " + serviceTokenCandidate) - } else { - throw new IllegalStateException("Did not find service account token at $io.fabric8.kubernetes.client.Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH") - } - } catch (IOException e) { - throw new IllegalStateException("Could not read service account token at $io.fabric8.kubernetes.client.Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH", e) - } - - return client - } - - ApiClient withKubeConfig() { - KubeConfig kubeconfig - - try { - if (StringUtils.isEmpty(kubeconfigFile)) { - kubeconfig = KubeConfig.loadDefaultKubeConfig() - } else { - kubeconfig = KubeConfig.loadKubeConfig(new FileReader(kubeconfigFile)) - } - } catch (FileNotFoundException e) { - throw new RuntimeException("Unable to create credentials from kubeconfig file: " + e, e) - } catch (Exception e2) { - throw new RuntimeException("Missing required field(s) in kubenetes configuration file.") - } - - InputStream is = new FileInputStream(kubeconfigFile) - Reader input = new InputStreamReader(is) - Yaml yaml = new Yaml(new SafeConstructor()) - Object config = yaml.load(input) - Map configMap = (Map)config - - //TODO: Need to validate cluster and user when client library exposes these api. - if (StringUtils.isEmpty(context) && !configMap.get("current-context")) { - throw new RuntimeException("Missing required field ${context} in kubeconfig file and clouddriver configuration.") - } - - if (!StringUtils.isEmpty(context)) { - kubeconfig.setContext(context); - } - - ApiClient client = Config.fromConfig(kubeconfig); - - if (!StringUtils.isEmpty(userAgent)) { - client.setUserAgent(userAgent); - } - - is.close() - input.close() - - return client - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentials.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentials.java deleted file mode 100644 index 379e5c156ad..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentials.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.security; - -import java.util.List; - -public interface KubernetesCredentials { - List getDeclaredNamespaces(); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentials.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentials.java deleted file mode 100644 index ec61e4e925c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentials.java +++ /dev/null @@ -1,547 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.security; - -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.config.CustomKubernetesResource; -import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesCachingPolicy; -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.names.NamerRegistry; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import com.netflix.spinnaker.fiat.model.resources.Permissions; -import com.netflix.spinnaker.moniker.Namer; -import org.apache.commons.lang3.StringUtils; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.security.ProviderVersion.v1; - -public class KubernetesNamedAccountCredentials implements AccountCredentials { - final private String cloudProvider = "kubernetes"; - final private String name; - final private ProviderVersion providerVersion; - final private String environment; - final private String accountType; - final private String context; - final private String cluster; - final private String user; - final private String userAgent; - final private String kubeconfigFile; - final private String kubectlExecutable; - final private Boolean serviceAccount; - final private Boolean metrics; - private List namespaces; - private List omitNamespaces; - private String skin; - final private int cacheThreads; - private C credentials; - private final List requiredGroupMembership; - private final Permissions permissions; - private final List dockerRegistries; - private final Registry spectatorRegistry; - private final AccountCredentialsRepository accountCredentialsRepository; - private final KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap; - - KubernetesNamedAccountCredentials(String name, - ProviderVersion providerVersion, - AccountCredentialsRepository accountCredentialsRepository, - String userAgent, - String environment, - String accountType, - String context, - String cluster, - String user, - String kubeconfigFile, - String kubectlExecutable, - Boolean serviceAccount, - Boolean metrics, - List namespaces, - List omitNamespaces, - String skin, - int cacheThreads, - List dockerRegistries, - List requiredGroupMembership, - Permissions permissions, - Registry spectatorRegistry, - KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, - C credentials) { - this.name = name; - this.providerVersion = providerVersion; - this.environment = environment; - this.accountType = accountType; - this.context = context; - this.cluster = cluster; - this.user = user; - this.userAgent = userAgent; - this.kubeconfigFile = kubeconfigFile; - this.kubectlExecutable = kubectlExecutable; - this.serviceAccount = serviceAccount; - this.metrics = metrics; - this.namespaces = namespaces; - this.omitNamespaces = omitNamespaces; - this.skin = skin; - this.cacheThreads = cacheThreads; - this.requiredGroupMembership = requiredGroupMembership; - this.permissions = permissions; - this.dockerRegistries = dockerRegistries; - this.accountCredentialsRepository = accountCredentialsRepository; - this.spectatorRegistry = spectatorRegistry; - this.credentials = credentials; - this.kubernetesSpinnakerKindMap = kubernetesSpinnakerKindMap; - } - - public List getNamespaces() { - return credentials.getDeclaredNamespaces(); - } - - @Override - public String getName() { - return name; - } - - @Override - public ProviderVersion getProviderVersion() { - return providerVersion; - } - - @Override - public String getSkin() { - return skin != null ? skin : getProviderVersion().toString(); - } - - @Override - public String getEnvironment() { - return environment; - } - - @Override - public String getAccountType() { - return accountType; - } - - @Override - public C getCredentials() { - return credentials; - } - - public String getKubectlExecutable() { - return kubectlExecutable; - } - - @Override - public String getCloudProvider() { - return cloudProvider; - } - - public int getCacheThreads() { - return cacheThreads; - } - - public List getDockerRegistries() { - return dockerRegistries; - } - - public Permissions getPermissions() { - return permissions; - } - - public Map getSpinnakerKindMap() { - if (kubernetesSpinnakerKindMap == null) { - return new HashMap(); - } - Map kindMap = new HashMap<>(kubernetesSpinnakerKindMap.kubernetesToSpinnakerKindStringMap()); - C creds = getCredentials(); - if (creds instanceof KubernetesV2Credentials) { - ((KubernetesV2Credentials) creds).getCustomResources().forEach(customResource -> { - kindMap.put(customResource.getKubernetesKind(), customResource.getSpinnakerKind()); - }); - } - return kindMap; - } - - @Override - public List getRequiredGroupMembership() { - return requiredGroupMembership; - } - - static class Builder { - String name; - ProviderVersion providerVersion; - String environment; - String accountType; - String context; - String cluster; - String oAuthServiceAccount; - List oAuthScopes; - String user; - String userAgent; - String kubeconfigFile; - String kubeconfigContents; - String kubectlExecutable; - Integer kubectlRequestTimeoutSeconds; - Boolean serviceAccount; - Boolean metrics; - Boolean configureImagePullSecrets; - List namespaces; - List omitNamespaces; - String skin; - int cacheThreads; - C credentials; - List requiredGroupMembership; - Permissions permissions; - List dockerRegistries; - Registry spectatorRegistry; - AccountCredentialsRepository accountCredentialsRepository; - KubectlJobExecutor jobExecutor; - Namer namer; - List customResources; - List cachingPolicies; - List kinds; - List omitKinds; - boolean debug; - KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap; - - Builder kubernetesSpinnakerKindMap(KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap) { - this.kubernetesSpinnakerKindMap = kubernetesSpinnakerKindMap; - return this; - } - - Builder name(String name) { - this.name = name; - return this; - } - - Builder providerVersion(ProviderVersion providerVersion) { - this.providerVersion = providerVersion; - return this; - } - - Builder environment(String environment) { - this.environment = environment; - return this; - } - - Builder accountType(String accountType) { - this.accountType = accountType; - return this; - } - - Builder context(String context) { - this.context = context; - return this; - } - - Builder cluster(String cluster) { - this.cluster = cluster; - return this; - } - - Builder oAuthServiceAccount(String oAuthServiceAccount) { - this.oAuthServiceAccount = oAuthServiceAccount; - return this; - } - - Builder oAuthScopes(List oAuthScopes) { - this.oAuthScopes = oAuthScopes; - return this; - } - - Builder user(String user) { - this.user = user; - return this; - } - - Builder userAgent(String userAgent) { - this.userAgent = userAgent; - return this; - } - - Builder kubeconfigFile(String kubeconfigFile) { - this.kubeconfigFile = kubeconfigFile; - return this; - } - - Builder kubeconfigContents(String kubeconfigContents) { - this.kubeconfigContents = kubeconfigContents; - return this; - } - - Builder kubectlExecutable(String kubectlExecutable) { - this.kubectlExecutable = kubectlExecutable; - return this; - } - - Builder kubectlRequestTimeoutSeconds(Integer kubectlRequestTimeoutSeconds) { - this.kubectlRequestTimeoutSeconds = kubectlRequestTimeoutSeconds; - return this; - } - - Builder serviceAccount(Boolean serviceAccount) { - this.serviceAccount = serviceAccount; - return this; - } - - Builder metrics(Boolean metrics) { - this.metrics = metrics; - return this; - } - - Builder configureImagePullSecrets(Boolean configureImagePullSecrets) { - this.configureImagePullSecrets = configureImagePullSecrets; - return this; - } - - Builder requiredGroupMembership(List requiredGroupMembership) { - this.requiredGroupMembership = requiredGroupMembership; - return this; - } - - Builder permissions(Permissions permissions) { - if (permissions.isRestricted()) { - this.requiredGroupMembership = Collections.emptyList(); - this.permissions = permissions; - } - return this; - } - - Builder dockerRegistries(List dockerRegistries) { - this.dockerRegistries = dockerRegistries; - return this; - } - - Builder namespaces(List namespaces) { - this.namespaces = namespaces; - return this; - } - - Builder omitNamespaces(List omitNamespaces) { - this.omitNamespaces = omitNamespaces; - return this; - } - - Builder skin(String skin) { - this.skin = skin; - return this; - } - - Builder cacheThreads(int cacheThreads) { - this.cacheThreads = cacheThreads; - return this; - } - - Builder credentials(C credentials) { - this.credentials = credentials; - return this; - } - - Builder spectatorRegistry(Registry spectatorRegistry) { - this.spectatorRegistry = spectatorRegistry; - return this; - } - - Builder accountCredentialsRepository(AccountCredentialsRepository accountCredentialsRepository) { - this.accountCredentialsRepository = accountCredentialsRepository; - return this; - } - - Builder jobExecutor(KubectlJobExecutor jobExecutor) { - this.jobExecutor = jobExecutor; - return this; - } - - Builder debug(boolean debug) { - this.debug = debug; - return this; - } - - Builder namer(Namer namer) { - this.namer = namer; - return this; - } - - Builder cachingPolicies(List cachingPolicies) { - this.cachingPolicies = cachingPolicies; - return this; - } - - Builder customResources(List customResources) { - this.customResources = customResources; - return this; - } - - Builder kinds(List kinds) { - this.kinds = kinds; - return this; - } - - Builder omitKinds(List omitKinds) { - this.omitKinds = omitKinds; - return this; - } - - private C buildCredentials() { - switch (providerVersion) { - case v1: - return (C) new KubernetesV1Credentials( - name, - kubeconfigFile, - context, - cluster, - user, - userAgent, - serviceAccount, - configureImagePullSecrets, - namespaces, - omitNamespaces, - dockerRegistries, - spectatorRegistry, - accountCredentialsRepository - ); - case v2: - NamerRegistry.lookup() - .withProvider(KubernetesCloudProvider.getID()) - .withAccount(name) - .setNamer(KubernetesManifest.class, namer); - return (C) new KubernetesV2Credentials.Builder() - .accountName(name) - .kubeconfigFile(kubeconfigFile) - .kubectlExecutable(kubectlExecutable) - .kubectlRequestTimeoutSeconds(kubectlRequestTimeoutSeconds) - .context(context) - .oAuthServiceAccount(oAuthServiceAccount) - .oAuthScopes(oAuthScopes) - .serviceAccount(serviceAccount) - .userAgent(userAgent) - .namespaces(namespaces) - .omitNamespaces(omitNamespaces) - .registry(spectatorRegistry) - .customResources(customResources) - .cachingPolicies(cachingPolicies) - .kinds(kinds) - .omitKinds(omitKinds) - .metrics(metrics) - .debug(debug) - .jobExecutor(jobExecutor) - .build(); - default: - throw new IllegalArgumentException("Unknown provider type: " + providerVersion); - } - } - - KubernetesNamedAccountCredentials build() { - if (StringUtils.isEmpty(name)) { - throw new IllegalArgumentException("Account name for Kubernetes provider missing."); - } - - if ((omitNamespaces != null && !omitNamespaces.isEmpty()) && (namespaces != null && !namespaces.isEmpty())) { - throw new IllegalArgumentException("At most one of 'namespaces' and 'omitNamespaces' can be specified"); - } - - if ((omitKinds != null && !omitKinds.isEmpty()) && (kinds != null && !kinds.isEmpty())) { - throw new IllegalArgumentException("At most one of 'kinds' and 'omitKinds' can be specified"); - } - - if (cacheThreads == 0) { - cacheThreads = 1; - } - - if (providerVersion == null) { - providerVersion = v1; - } - - if (StringUtils.isEmpty(kubeconfigFile)){ - if (StringUtils.isEmpty(kubeconfigContents)) { - kubeconfigFile = System.getProperty("user.home") + "/.kube/config"; - } else { - try { - File temp = File.createTempFile("kube", "config"); - BufferedWriter writer = new BufferedWriter(new FileWriter(temp)); - writer.write(kubeconfigContents); - writer.close(); - kubeconfigFile = temp.getAbsolutePath(); - } catch (IOException e) { - throw new RuntimeException("Unable to persist 'kubeconfigContents' parameter to disk: " + e.getMessage(), e); - } - } - } - - if (requiredGroupMembership != null && !requiredGroupMembership.isEmpty()) { - requiredGroupMembership = Collections.unmodifiableList(requiredGroupMembership); - } else { - requiredGroupMembership = Collections.emptyList(); - } - - if (configureImagePullSecrets == null) { - configureImagePullSecrets = true; - } - - if (serviceAccount == null) { - serviceAccount = false; - } - - if (metrics == null) { - // on by default - metrics = true; - } - - if (credentials == null) { - credentials = buildCredentials(); - } - - return new KubernetesNamedAccountCredentials( - name, - providerVersion, - accountCredentialsRepository, - userAgent, - environment, - accountType, - context, - cluster, - user, - kubeconfigFile, - kubectlExecutable, - serviceAccount, - metrics, - namespaces, - omitNamespaces, - skin, - cacheThreads, - dockerRegistries, - requiredGroupMembership, - permissions, - spectatorRegistry, - kubernetesSpinnakerKindMap, - credentials - ); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentialsInitializer.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentialsInitializer.groovy deleted file mode 100644 index 07252777b6a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentialsInitializer.groovy +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.security - -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor -import com.netflix.spinnaker.clouddriver.names.NamerRegistry -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.context.ApplicationContext -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope - -@Slf4j -@Configuration -class KubernetesNamedAccountCredentialsInitializer implements CredentialsInitializerSynchronizable { - private static final Integer DEFAULT_CACHE_THREADS = 1 - - @Autowired Registry spectatorRegistry - @Autowired KubectlJobExecutor jobExecutor - @Autowired NamerRegistry namerRegistry - @Autowired KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap - - @Bean - List kubernetesNamedAccountCredentials( - String clouddriverUserAgentApplicationName, - KubernetesConfigurationProperties kubernetesConfigurationProperties, - ApplicationContext applicationContext, - AccountCredentialsRepository accountCredentialsRepository, - List providerSynchronizerTypeWrappers - ) { - synchronizeKubernetesAccounts(clouddriverUserAgentApplicationName, kubernetesConfigurationProperties, null, applicationContext, accountCredentialsRepository, providerSynchronizerTypeWrappers) - } - - @Override - String getCredentialsSynchronizationBeanName() { - return "synchronizeKubernetesAccounts" - } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - List synchronizeKubernetesAccounts( - String clouddriverUserAgentApplicationName, - KubernetesConfigurationProperties kubernetesConfigurationProperties, - CatsModule catsModule, - ApplicationContext applicationContext, - AccountCredentialsRepository accountCredentialsRepository, - List providerSynchronizerTypeWrappers) { - def (ArrayList accountsToAdd, List namesOfDeletedAccounts) = - ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, - KubernetesNamedAccountCredentials, - kubernetesConfigurationProperties.accounts) - - // TODO(lwander): Modify accounts when their dockerRegistries attribute is updated as well -- need to ask @duftler. - accountsToAdd.each { KubernetesConfigurationProperties.ManagedAccount managedAccount -> - try { - def kubernetesAccount = new KubernetesNamedAccountCredentials.Builder() - .accountCredentialsRepository(accountCredentialsRepository) - .userAgent(clouddriverUserAgentApplicationName) - .name(managedAccount.name) - .providerVersion(managedAccount.providerVersion) - .environment(managedAccount.environment ?: managedAccount.name) - .accountType(managedAccount.accountType ?: managedAccount.name) - .context(managedAccount.context) - .cluster(managedAccount.cluster) - .oAuthServiceAccount(managedAccount.oAuthServiceAccount) - .oAuthScopes(managedAccount.oAuthScopes) - .user(managedAccount.user) - .kubeconfigFile(managedAccount.kubeconfigFile) - .kubeconfigContents(managedAccount.kubeconfigContents) - .kubectlExecutable(managedAccount.kubectlExecutable) - .kubectlRequestTimeoutSeconds(managedAccount.kubectlRequestTimeoutSeconds) - .serviceAccount(managedAccount.serviceAccount) - .configureImagePullSecrets(managedAccount.configureImagePullSecrets) - .namespaces(managedAccount.namespaces) - .omitNamespaces(managedAccount.omitNamespaces) - .skin(managedAccount.skin) - .cacheThreads(managedAccount.cacheThreads ?: DEFAULT_CACHE_THREADS) - .dockerRegistries(managedAccount.dockerRegistries) - .requiredGroupMembership(managedAccount.requiredGroupMembership) - .permissions(managedAccount.permissions.build()) - .spectatorRegistry(spectatorRegistry) - .jobExecutor(jobExecutor) - .namer(namerRegistry.getNamingStrategy(managedAccount.namingStrategy)) - .customResources(managedAccount.customResources) - .cachingPolicies(managedAccount.cachingPolicies) - .kinds(managedAccount.kinds) - .omitKinds(managedAccount.omitKinds) - .metrics(managedAccount.metrics) - .debug(managedAccount.debug) - .kubernetesSpinnakerKindMap(kubernetesSpinnakerKindMap) - .build() - - accountCredentialsRepository.save(managedAccount.name, kubernetesAccount) - } catch (e) { - log.info "Could not load account ${managedAccount.name} for Kubernetes.", e - } - } - - ProviderUtils.unscheduleAndDeregisterAgents(namesOfDeletedAccounts, catsModule) - - if (accountsToAdd && catsModule) { - ProviderUtils.synchronizeAgentProviders(applicationContext, providerSynchronizerTypeWrappers) - } - - accountCredentialsRepository.all.findAll { - it instanceof KubernetesNamedAccountCredentials - } as List - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesApiAdaptor.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesApiAdaptor.groovy deleted file mode 100644 index 8966f0fdd2d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesApiAdaptor.groovy +++ /dev/null @@ -1,553 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.api - -import com.netflix.spectator.api.Clock -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.ConfigMap -import io.fabric8.kubernetes.api.model.DoneableHorizontalPodAutoscaler -import io.fabric8.kubernetes.api.model.DoneableSecret -import io.fabric8.kubernetes.api.model.Event -import io.fabric8.kubernetes.api.model.HasMetadata -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscaler -import io.fabric8.kubernetes.api.model.batch.Job -import io.fabric8.kubernetes.api.model.Namespace -import io.fabric8.kubernetes.api.model.Pod -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.Secret -import io.fabric8.kubernetes.api.model.Service -import io.fabric8.kubernetes.api.model.ServiceAccount -import io.fabric8.kubernetes.api.model.apps.Deployment -import io.fabric8.kubernetes.api.model.apps.DoneableDeployment -import io.fabric8.kubernetes.api.model.extensions.Ingress -import io.fabric8.kubernetes.api.model.apps.ReplicaSet -import io.fabric8.kubernetes.client.DefaultKubernetesClient -import io.fabric8.kubernetes.client.KubernetesClient -import io.fabric8.kubernetes.client.KubernetesClientException - -import java.util.concurrent.TimeUnit - -@Slf4j -class KubernetesApiAdaptor { - io.fabric8.kubernetes.client.Config config - String account - - static final int RETRY_COUNT = 20 - static final long RETRY_MAX_WAIT_MILLIS = TimeUnit.SECONDS.toMillis(10) - static final long RETRY_INITIAL_WAIT_MILLIS = 100 - static final String DEPLOYMENT_ANNOTATION = "deployment.kubernetes.io" - final KubernetesClient client - final Registry spectatorRegistry - final Clock spectatorClock - - public spectatorRegistry() { return spectatorRegistry } - - KubernetesApiAdaptor(String account, io.fabric8.kubernetes.client.Config config, Registry spectatorRegistry) { - if (!config) { - throw new IllegalArgumentException("Config may not be null.") - } - this.config = config - this.account = account - this.client = new DefaultKubernetesClient(this.config) - this.spectatorRegistry = spectatorRegistry - this.spectatorClock = spectatorRegistry.clock() - } - - KubernetesOperationException formatException(String operation, String namespace, KubernetesClientException e) { - account ? new KubernetesOperationException(account, "$operation in $namespace", e) : - new KubernetesOperationException("$operation in $namespace", e) - } - - KubernetesOperationException formatException(String operation, KubernetesClientException e) { - account ? new KubernetesOperationException(account, "$operation", e) : - new KubernetesOperationException("$operation", e) - } - - Boolean blockUntilResourceConsistent(Object desired, Closure getGeneration, Closure getResource) { - def current = getResource() - - def wait = RETRY_INITIAL_WAIT_MILLIS - def attempts = 0 - while (getGeneration(current) < getGeneration(desired)) { - attempts += 1 - if (attempts > RETRY_COUNT) { - return false - } - - sleep(wait) - wait = [wait * 2, RETRY_MAX_WAIT_MILLIS].min() - - current = getResource() - } - - return true - } - - /* - * Atomically create a new client, and pass it to the given doOperation closure to operate against the kubernetes API - */ - private T exceptionWrapper(String methodName, String operationMessage, String namespace, Closure doOperation) { - T result = null - Exception failure - long startTime = spectatorClock.monotonicTime() - - try { - result = doOperation() - } catch (KubernetesClientException e) { - if (namespace) { - failure = formatException(operationMessage, namespace, e) - } else { - failure = formatException(operationMessage, e) - } - } catch (Exception e) { - failure = e - } finally { - - def tags = ["method": methodName, - "account": account, - "namespace" : namespace ? namespace : "none", - "success": failure ? "false": "true"] - if (failure) { - tags["reason"] = failure.class.simpleName - } - - spectatorRegistry.timer( - spectatorRegistry.createId("kubernetes.api", tags)) - .record(spectatorClock.monotonicTime() - startTime, TimeUnit.NANOSECONDS) - - if (failure) { - throw failure - } else { - return result - } - } - } - - List getEvents(String namespace, HasMetadata object) { - exceptionWrapper("events.list", "Get Events", namespace) { - client.events().inNamespace(namespace).withField("involvedObject.uid", object.metadata.uid).list().items - } - } - - Map> getEvents(String namespace, String type) { - exceptionWrapper("events.list", "Get Events", namespace) { - def events = client.events().inNamespace(namespace).withField("involvedObject.kind", type).list().items - def eventMap = [:].withDefault { _ -> [] } - events.each { Event event -> - eventMap[event.involvedObject.name] += [event] - } - return eventMap - } - } - - Ingress createIngress(String namespace, Ingress ingress) { - exceptionWrapper("ingresses.create", "Create Ingress ${ingress?.metadata?.name}", namespace) { - client.extensions().ingresses().inNamespace(namespace).create(ingress) - } - } - - Ingress replaceIngress(String namespace, String name, Ingress ingress) { - exceptionWrapper("ingresses.replace", "Replace Ingress ${name}", namespace) { - client.extensions().ingresses().inNamespace(namespace).withName(name).replace(ingress) - } - } - - Ingress getIngress(String namespace, String name) { - exceptionWrapper("ingresses.get", "Get Ingress $name", namespace) { - client.extensions().ingresses().inNamespace(namespace).withName(name).get() - } - } - - boolean deleteIngress(String namespace, String name) { - exceptionWrapper("ingresses.delete", "Delete Ingress $name", namespace) { - client.extensions().ingresses().inNamespace(namespace).withName(name).delete() - } - } - - List getIngresses(String namespace) { - exceptionWrapper("ingresses.list", "Get Ingresses", namespace) { - client.extensions().ingresses().inNamespace(namespace).list().items - } - } - - List getReplicaSets(String namespace) { - exceptionWrapper("replicaSets.list", "Get Replica Sets", namespace) { - client.extensions().replicaSets().inNamespace(namespace).list().items - } - } - - List getReplicaSets(String namespace, Map labels) { - exceptionWrapper("replicaSets.list", "Get Replica Sets", namespace) { - client.extensions().replicaSets().inNamespace(namespace).withLabels(labels).list().items - } - } - - boolean hardDestroyReplicaSet(String namespace, String name) { - exceptionWrapper("replicaSets.delete", "Hard Destroy Replica Set $name", namespace) { - client.extensions().replicaSets().inNamespace(namespace).withName(name).delete() - } - } - - List getReplicaSetPods(String namespace, String replicaSetName) { - exceptionWrapper("pods.list", "Get Replica Set Pods for $replicaSetName", namespace) { - client.pods().inNamespace(namespace).withLabel(KubernetesUtil.SERVER_GROUP_LABEL, replicaSetName).list().items - } - } - - ReplicaSet getReplicaSet(String namespace, String serverGroupName) { - exceptionWrapper("replicaSets.get", "Get Replica Set $serverGroupName", namespace) { - client.extensions().replicaSets().inNamespace(namespace).withName(serverGroupName).get() - } - } - - ReplicaSet resizeReplicaSet(String namespace, String name, int size) { - exceptionWrapper("replicaSets.scale", "Resize Replica Set $name to $size", namespace) { - client.extensions().replicaSets().inNamespace(namespace).withName(name).scale(size) - } - } - - ReplicaSet createReplicaSet(String namespace, ReplicaSet replicaSet) { - exceptionWrapper("replicaSets.create", "Create Replica Set ${replicaSet?.metadata?.name}", namespace) { - client.extensions().replicaSets().inNamespace(namespace).create(replicaSet) - } - } - - List getJobPods(String namespace, String jobName) { - exceptionWrapper("pods.list", "Get JobStatus Pods for $jobName", namespace) { - client.pods().inNamespace(namespace).withLabel(KubernetesUtil.JOB_LABEL, jobName).list().items - } - } - - Pod getPod(String namespace, String name) { - exceptionWrapper("pods.get", "Get Pod $name", namespace) { - client.pods().inNamespace(namespace).withName(name).get() - } - } - - List getPods(String namespace, Map labels) { - exceptionWrapper("pods.list", "Get Pods matching $labels", namespace) { - client.pods().inNamespace(namespace).withLabels(labels).list().items - } - } - - boolean deletePod(String namespace, String name) { - exceptionWrapper("pods.delete", "Delete Pod $name", namespace) { - client.pods().inNamespace(namespace).withName(name).delete() - } - } - - List getPods(String namespace) { - exceptionWrapper("pods.list", "Get Pods", namespace) { - client.pods().inNamespace(namespace).list().items - } - } - - String getLog(String namespace, String name, String containerId) { - exceptionWrapper("pod.logs", "Get Logs $name", namespace) { - client.pods().inNamespace(namespace).withName(name).inContainer(containerId).getLog() - } - } - - List getReplicationControllers(String namespace) { - exceptionWrapper("replicationControllers.list", "Get Replication Controllers", namespace) { - client.replicationControllers().inNamespace(namespace).list().items - } - } - - List getReplicationControllerPods(String namespace, String replicationControllerName) { - exceptionWrapper("pods.list", "Get Replication Controller Pods for $replicationControllerName", namespace) { - client.pods().inNamespace(namespace).withLabel(KubernetesUtil.SERVER_GROUP_LABEL, replicationControllerName).list().items - } - } - - ReplicationController getReplicationController(String namespace, String serverGroupName) { - exceptionWrapper("replicationControllers.get", "Get Replication Controller $serverGroupName", namespace) { - client.replicationControllers().inNamespace(namespace).withName(serverGroupName).get() - } - } - - ReplicationController createReplicationController(String namespace, ReplicationController replicationController) { - exceptionWrapper("replicationControllers.create", "Create Replication Controller ${replicationController?.metadata?.name}", namespace) { - client.replicationControllers().inNamespace(namespace).create(replicationController) - } - } - - ReplicationController resizeReplicationController(String namespace, String name, int size) { - exceptionWrapper("replicationControllers.scale", "Resize Replication Controller $name to $size", namespace) { - client.replicationControllers().inNamespace(namespace).withName(name).scale(size) - } - } - - boolean hardDestroyReplicationController(String namespace, String name) { - exceptionWrapper("replicationControllers.delete", "Hard Destroy Replication Controller $name", namespace) { - client.replicationControllers().inNamespace(namespace).withName(name).delete() - } - } - - void togglePodLabels(String namespace, String name, List keys, String value) { - exceptionWrapper("pods.edit", "Toggle Pod Labels to $value for $name", namespace) { - def edit = client.pods().inNamespace(namespace).withName(name).edit().editMetadata() - - keys.each { - edit.removeFromLabels(it.toString()) - edit.addToLabels(it.toString(), value.toString()) - } - - edit.endMetadata().done() - } - } - - ReplicationController toggleReplicationControllerSpecLabels(String namespace, String name, List keys, String value) { - exceptionWrapper("replicationControllers.edit", "Toggle Replication Controller Labels to $value for $name", namespace) { - def edit = client.replicationControllers().inNamespace(namespace).withName(name).cascading(false).edit().editSpec().editTemplate().editMetadata() - - keys.each { - edit.removeFromLabels(it.toString()) - edit.addToLabels(it.toString(), value.toString()) - } - - edit.endMetadata().endTemplate().endSpec().done() - } - } - - ReplicaSet toggleReplicaSetSpecLabels(String namespace, String name, List keys, String value) { - exceptionWrapper("replicaSets.edit", "Toggle Replica Set Labels to $value for $name", namespace) { - def edit = client.extensions().replicaSets().inNamespace(namespace).withName(name).cascading(false).edit().editSpec().editTemplate().editMetadata() - - keys.each { - edit.removeFromLabels(it.toString()) - edit.addToLabels(it.toString(), value.toString()) - } - - edit.endMetadata().endTemplate().endSpec().done() - } - } - - Service getService(String namespace, String service) { - exceptionWrapper("services.get", "Get Service $service", namespace) { - client.services().inNamespace(namespace).withName(service).get() - } - } - - Service createService(String namespace, Service service) { - exceptionWrapper("services.create", "Create Service $service", namespace) { - client.services().inNamespace(namespace).create(service) - } - } - - boolean deleteService(String namespace, String name) { - exceptionWrapper("services.delete","Delete Service $name", namespace) { - client.services().inNamespace(namespace).withName(name).delete() - } - } - - List getServices(String namespace) { - exceptionWrapper("services.list", "Get Services", namespace) { - client.services().inNamespace(namespace).list().items - } - } - - Service replaceService(String namespace, String name, Service service) { - exceptionWrapper("services.replace", "Replace Service $name", namespace) { - client.services().inNamespace(namespace).withName(name).replace(service) - } - } - - Secret getSecret(String namespace, String secret) { - exceptionWrapper("secrets.get", "Get Secret $secret", namespace) { - client.secrets().inNamespace(namespace).withName(secret).get() - } - } - - DoneableSecret editSecret(String namespace, String secret) { - exceptionWrapper("secrets.edit", "Edit Secret $secret", namespace) { - client.secrets().inNamespace(namespace).withName(secret).edit() - } - } - - Secret createSecret(String namespace, Secret secret) { - exceptionWrapper("secrets.create", "Create Secret $secret", namespace) { - client.secrets().inNamespace(namespace).create(secret) - } - } - - List getSecrets(String namespace) { - exceptionWrapper("secrets.list", "Get Secrets", namespace) { - client.secrets().inNamespace(namespace).list().items - } - } - - List getServiceAccounts(String namespace) { - exceptionWrapper("serviceAccounts.list", "Get Service Accounts", namespace) { - client.serviceAccounts().inNamespace(namespace).list().items - } - } - - List getConfigMaps(String namespace) { - exceptionWrapper("configMaps.list", "Get Config Maps", namespace) { - client.configMaps().inNamespace(namespace).list().items - } - } - - Namespace getNamespace(String namespace) { - exceptionWrapper("namespaces.get", "Get Namespace $namespace", null) { - client.namespaces().withName(namespace).get() - } - } - - List getNamespaces() { - exceptionWrapper("namespaces.list", "Get Namespaces", null) { - client.namespaces().list().items - } - } - - List getNamespacesByName() { - exceptionWrapper("namespaces.list", "Get Namespaces", null) { - client.namespaces().list().items.collect { - it.metadata.name - } - } - } - - Namespace createNamespace(Namespace namespace) { - exceptionWrapper("namespaces.create", "Create Namespace $namespace", null) { - client.namespaces().create(namespace) - } - } - - Pod createPod(String namespace, Pod pod) { - exceptionWrapper("pods.create", "Create Pod ${pod?.metadata?.name}", namespace) { - client.pods().inNamespace(namespace).create(pod) - } - } - - List getJobs(String namespace) { - exceptionWrapper("jobs.list", "Get Jobs", namespace) { - client.extensions().jobs().inNamespace(namespace).list().items - } - } - - Job getJob(String namespace, String name) { - exceptionWrapper("jobs.get", "Get JobStatus $name", namespace) { - client.extensions().jobs().inNamespace(namespace).withName(name).get() - } - } - - boolean hardDestroyPod(String namespace, String name) { - exceptionWrapper("pods.delete", "Hard Destroy Pod $name", namespace) { - client.pods().inNamespace(namespace).withName(name).delete() - } - } - - HorizontalPodAutoscaler createAutoscaler(String namespace, HorizontalPodAutoscaler autoscaler) { - exceptionWrapper("horizontalPodAutoscalers.create", "Create Autoscaler ${autoscaler?.metadata?.name}", namespace) { - client.autoscaling().horizontalPodAutoscalers().inNamespace(namespace).create(autoscaler) - } - } - - DoneableHorizontalPodAutoscaler editAutoscaler(String namespace, String name) { - exceptionWrapper("horizontalPodAutoscalers.edit", "Edit Autoscaler $name", namespace) { - client.autoscaling().horizontalPodAutoscalers().inNamespace(namespace).withName(name).edit() - } - } - - HorizontalPodAutoscaler getAutoscaler(String namespace, String name) { - exceptionWrapper("horizontalPodAutoscalers.get", "Get Autoscaler $name", namespace) { - client.autoscaling().horizontalPodAutoscalers().inNamespace(namespace).withName(name).get() - } - } - - Map getAutoscalers(String namespace, String kind) { - exceptionWrapper("horizontalPodAutoscalers.list", "Get Autoscalers", namespace) { - def items = client.autoscaling().horizontalPodAutoscalers().inNamespace(namespace).list().items ?: [] - items.collectEntries { def autoscaler -> - autoscaler.spec.scaleTargetRef.kind == kind ? [(autoscaler.metadata.name): autoscaler] : [:] - } - } - } - - boolean deleteAutoscaler(String namespace, String name) { - exceptionWrapper("horizontalPodAutoscalers.delete", "Destroy Autoscaler $name", namespace) { - client.autoscaling().horizontalPodAutoscalers().inNamespace(namespace).withName(name).delete() - } - } - - Deployment getDeployment(String namespace, String name) { - exceptionWrapper("deployments.get", "Get Deployment $name", namespace) { - client.extensions().deployments().inNamespace(namespace).withName(name).get() - } - } - - List getDeployments(String namespace) { - exceptionWrapper("deployments.list", "Get Deployments", namespace) { - client.extensions().deployments().inNamespace(namespace).list().items - } - } - - Deployment resizeDeployment(String namespace, String name, int size) { - exceptionWrapper("deployments.scale", "Resize Deployment $name to $size", namespace) { - client.extensions().deployments().inNamespace(namespace).withName(name).scale(size) - } - } - - Deployment createDeployment(String namespace, Deployment deployment) { - exceptionWrapper("deployments.create", "Create Deployment $deployment.metadata.name", namespace) { - client.extensions().deployments().inNamespace(namespace).create(deployment) - } - } - - DoneableDeployment editDeployment(String namespace, String name) { - exceptionWrapper("deployments.edit", "Edit deployment $name", namespace) { - client.extensions().deployments().inNamespace(namespace).withName(name).edit() - } - } - - ReplicaSet annotateReplicaSet(String namespace, String name, String key, String value) { - exceptionWrapper("replicaSets.annotate", "Annotate replica set $name", namespace) { - def rs = client.extensions().replicaSets().inNamespace(namespace).withName(name).cascading(false).edit() - return rs.editMetadata().addToAnnotations(key, value).endMetadata().done() - } - } - - ReplicationController annotateReplicationController(String namespace, String name, String key, String value) { - exceptionWrapper("replicationControllers.annotate", "Annotate replication controller $name", namespace) { - def rc = client.replicationControllers().inNamespace(namespace).withName(name).cascading(false).edit() - return rc.editMetadata().addToAnnotations(key, value).endMetadata().done() - } - } - - boolean deleteDeployment(String namespace, String name) { - exceptionWrapper("deployments.delete", "Delete Deployment $name", namespace) { - client.extensions().deployments().inNamespace(namespace).withName(name).delete() - } - } - - static boolean hasDeployment(ReplicaSet replicaSet) { - return replicaSet?.metadata?.annotations?.any { k, v -> k.startsWith(DEPLOYMENT_ANNOTATION) } - } - - static String getDeploymentRevision(Deployment deployment) { - return deployment?.metadata?.annotations?.get("$DEPLOYMENT_ANNOTATION/revision".toString()) - } - - static String getDeploymentRevision(ReplicaSet replicaSet) { - return replicaSet?.metadata?.annotations?.get("$DEPLOYMENT_ANNOTATION/revision".toString()) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesApiConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesApiConverter.groovy deleted file mode 100644 index 0700585d0a8..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesApiConverter.groovy +++ /dev/null @@ -1,1213 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.api - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.autoscaler.KubernetesAutoscalerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesNamedServicePort -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesHttpIngressPath -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesHttpIngressRuleValue -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressBackend -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressRule -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressRuleValue -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressTlS -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.Capacity -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.DeployKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KeyValuePair -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesAwsElasticBlockStoreVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesCapabilities -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesConfigMapEnvSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesConfigMapSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesConfigMapVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerPort -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesCpuUtilization -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesDeployment -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesEmptyDir -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesEnvFromSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesEnvVar -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesEnvVarSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesExecAction -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesFieldRefSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHandler -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHandlerType -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHostPath -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHttpGetAction -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesKeyToPath -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesLifecycle -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesNfsVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesPersistentVolumeClaim -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesProbe -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesPullPolicy -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesResourceDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesResourceFieldRefSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesRollingUpdate -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesScalingPolicy -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesSeLinuxOptions -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesSecretEnvSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesSecretSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesSecretVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesSecurityContext -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesStorageMediumType -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesStrategy -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesTcpSocketAction -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesToleration -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeMount -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeSourceType -import io.fabric8.kubernetes.api.model.AWSElasticBlockStoreVolumeSourceBuilder -import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder -import io.fabric8.kubernetes.api.model.Container -import io.fabric8.kubernetes.api.model.ContainerBuilder -import io.fabric8.kubernetes.api.model.EmptyDirVolumeSourceBuilder -import io.fabric8.kubernetes.api.model.EnvFromSourceBuilder -import io.fabric8.kubernetes.api.model.EnvVarBuilder -import io.fabric8.kubernetes.api.model.ExecAction -import io.fabric8.kubernetes.api.model.ExecActionBuilder -import io.fabric8.kubernetes.api.model.HTTPGetAction -import io.fabric8.kubernetes.api.model.HTTPGetActionBuilder -import io.fabric8.kubernetes.api.model.HTTPHeaderBuilder -import io.fabric8.kubernetes.api.model.Handler -import io.fabric8.kubernetes.api.model.HandlerBuilder -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscaler -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscalerBuilder -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscalerFluentImpl -import io.fabric8.kubernetes.api.model.HostPathVolumeSourceBuilder -import io.fabric8.kubernetes.api.model.IntOrString -import io.fabric8.kubernetes.api.model.KeyToPath -import io.fabric8.kubernetes.api.model.NFSVolumeSourceBuilder -import io.fabric8.kubernetes.api.model.PersistentVolumeClaimVolumeSourceBuilder -import io.fabric8.kubernetes.api.model.PodTemplateSpec -import io.fabric8.kubernetes.api.model.PodTemplateSpecBuilder -import io.fabric8.kubernetes.api.model.Probe -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.SecretVolumeSourceBuilder -import io.fabric8.kubernetes.api.model.Service -import io.fabric8.kubernetes.api.model.TCPSocketAction -import io.fabric8.kubernetes.api.model.TCPSocketActionBuilder -import io.fabric8.kubernetes.api.model.Toleration -import io.fabric8.kubernetes.api.model.Volume -import io.fabric8.kubernetes.api.model.VolumeMountBuilder -import io.fabric8.kubernetes.api.model.apps.Deployment -import io.fabric8.kubernetes.api.model.apps.DeploymentFluentImpl -import io.fabric8.kubernetes.api.model.extensions.Ingress -import io.fabric8.kubernetes.api.model.apps.ReplicaSet -import io.fabric8.kubernetes.api.model.apps.ReplicaSetBuilder - -class KubernetesApiConverter { - static KubernetesSecurityGroupDescription fromIngress(Ingress ingress) { - if (!ingress) { - return null - } - - def securityGroupDescription = new KubernetesSecurityGroupDescription() - - securityGroupDescription.securityGroupName = ingress.metadata.name - def parse = Names.parseName(securityGroupDescription.securityGroupName) - securityGroupDescription.app = parse.app - securityGroupDescription.stack = parse.stack - securityGroupDescription.detail = parse.detail - securityGroupDescription.namespace = ingress.metadata.namespace - securityGroupDescription.annotations = ingress.metadata.annotations - securityGroupDescription.labels = ingress.metadata.labels - - securityGroupDescription.ingress = new KubernetesIngressBackend() - securityGroupDescription.ingress.port = ingress.spec.backend?.servicePort?.intVal ?: 0 - securityGroupDescription.ingress.serviceName = ingress.spec.backend?.serviceName - - securityGroupDescription.rules = ingress.spec.rules.collect { rule -> - def resRule = new KubernetesIngressRule() - resRule.host = rule.host - if (rule.http) { - resRule.value = new KubernetesIngressRuleValue(http: new KubernetesHttpIngressRuleValue()) - resRule.value.http.paths = rule.http.paths?.collect { path -> - def resPath = new KubernetesHttpIngressPath() - resPath.path = path.path - if (path.backend) { - resPath.ingress = new KubernetesIngressBackend(port: path.backend.servicePort?.intVal ?: 0, - serviceName: path.backend.serviceName) - } - - return resPath - } - } - - return resRule - } - - securityGroupDescription.tls = ingress.spec.tls?.collect{ tlsSpecEntry -> - return new KubernetesIngressTlS(hosts: tlsSpecEntry.hosts, secretName: tlsSpecEntry.secretName) - } - - securityGroupDescription - } - - - static KubernetesLoadBalancerDescription fromService(Service service, String accountName) { - if (!service) { - return null - } - - def loadBalancerDescription = new KubernetesLoadBalancerDescription() - - loadBalancerDescription.account = accountName - loadBalancerDescription.name = service.metadata.name - def parse = Names.parseName(loadBalancerDescription.name) - loadBalancerDescription.app = parse.app - loadBalancerDescription.stack = parse.stack - loadBalancerDescription.detail = parse.detail - loadBalancerDescription.namespace = service.metadata.namespace - - loadBalancerDescription.clusterIp = service.spec.clusterIP - loadBalancerDescription.loadBalancerIp = service.spec.loadBalancerIP - loadBalancerDescription.sessionAffinity = service.spec.sessionAffinity - loadBalancerDescription.serviceType = service.spec.type - loadBalancerDescription.serviceAnnotations = service.metadata.annotations - loadBalancerDescription.serviceLabels = service.metadata.labels - - loadBalancerDescription.externalIps = service.spec.externalIPs ?: [] - loadBalancerDescription.ports = service.spec.ports?.collect { port -> - new KubernetesNamedServicePort( - name: port.name, - protocol: port.protocol, - port: port.port ?: 0, - targetPort: port.targetPort?.intVal ?: 0, - nodePort: port.nodePort ?: 0 - ) - } - - return loadBalancerDescription - } - - static Volume toVolumeSource(KubernetesVolumeSource volumeSource) { - Volume volume = new Volume(name: volumeSource.name) - - switch (volumeSource.type) { - case KubernetesVolumeSourceType.EmptyDir: - def res = new EmptyDirVolumeSourceBuilder() - - switch (volumeSource.emptyDir.medium) { - case KubernetesStorageMediumType.Memory: - res = res.withMedium("Memory") - break - - default: - res = res.withMedium("") // Empty string is default... - } - - volume.emptyDir = res.build() - break - - case KubernetesVolumeSourceType.HostPath: - def res = new HostPathVolumeSourceBuilder().withPath(volumeSource.hostPath.path) - - volume.hostPath = res.build() - break - - case KubernetesVolumeSourceType.PersistentVolumeClaim: - def res = new PersistentVolumeClaimVolumeSourceBuilder() - .withClaimName(volumeSource.persistentVolumeClaim.claimName) - .withReadOnly(volumeSource.persistentVolumeClaim.readOnly) - - volume.persistentVolumeClaim = res.build() - break - - case KubernetesVolumeSourceType.Secret: - def res = new SecretVolumeSourceBuilder() - .withSecretName(volumeSource.secret.secretName) - - volume.secret = res.build() - break - - case KubernetesVolumeSourceType.ConfigMap: - def res = new ConfigMapVolumeSourceBuilder().withName(volumeSource.configMap.configMapName) - def items = volumeSource.configMap.items?.collect { KubernetesKeyToPath item -> - new KeyToPath(key: item.key, path: item.path) - } - - res = res.withItems(items) - volume.configMap = res.build() - break - - case KubernetesVolumeSourceType.AwsElasticBlockStore: - def res = new AWSElasticBlockStoreVolumeSourceBuilder().withVolumeID(volumeSource.awsElasticBlockStore.volumeId) - res = res.withFsType(volumeSource.awsElasticBlockStore.fsType) - - if (volumeSource.awsElasticBlockStore.partition) { - res = res.withPartition(volumeSource.awsElasticBlockStore.partition) - } - - volume.awsElasticBlockStore = res.build() - break - - case KubernetesVolumeSourceType.NFS: - def res = new NFSVolumeSourceBuilder() - .withServer(volumeSource.nfs.server) - .withPath(volumeSource.nfs.path) - .withReadOnly(volumeSource.nfs.readOnly) - - volume.nfs = res.build() - break - - default: - return null - } - - return volume - } - - static ExecAction toExecAction(KubernetesExecAction action) { - def execActionBuilder = new ExecActionBuilder() - execActionBuilder = execActionBuilder.withCommand(action.commands) - return execActionBuilder.build() - } - - static TCPSocketAction toTcpSocketAction(KubernetesTcpSocketAction action) { - def tcpActionBuilder = new TCPSocketActionBuilder() - tcpActionBuilder = tcpActionBuilder.withNewPort(action.port) - return tcpActionBuilder.build() - } - - static HTTPGetAction toHttpGetAction(KubernetesHttpGetAction action) { - def httpGetActionBuilder = new HTTPGetActionBuilder() - - if (action.host) { - httpGetActionBuilder = httpGetActionBuilder.withHost(action.host) - } - - if (action.path) { - httpGetActionBuilder = httpGetActionBuilder.withPath(action.path) - } - - httpGetActionBuilder = httpGetActionBuilder.withPort(new IntOrString(action.port)) - - if (action.uriScheme) { - httpGetActionBuilder = httpGetActionBuilder.withScheme(action.uriScheme) - } - - if (action.httpHeaders) { - def headers = action.httpHeaders.collect() { - def builder = new HTTPHeaderBuilder() - return builder.withName(it.name).withValue(it.value).build() - } - - httpGetActionBuilder.withHttpHeaders(headers) - } - - return httpGetActionBuilder.build() - } - - static Handler toHandler(KubernetesHandler handler) { - def handlerBuilder = new HandlerBuilder() - - switch (handler.type) { - case KubernetesHandlerType.EXEC: - handlerBuilder = handlerBuilder.withExec(toExecAction(handler.execAction)) - break - - case KubernetesHandlerType.TCP: - handlerBuilder = handlerBuilder.withTcpSocket(toTcpSocketAction(handler.tcpSocketAction)) - break - - case KubernetesHandlerType.HTTP: - handlerBuilder = handlerBuilder.withHttpGet(toHttpGetAction(handler.httpGetAction)) - break - } - - return handlerBuilder.build() - } - - static Container toContainer(KubernetesContainerDescription container) { - KubernetesUtil.normalizeImageDescription(container.imageDescription) - def imageId = KubernetesUtil.getImageId(container.imageDescription) - def containerBuilder = new ContainerBuilder().withName(container.name).withImage(imageId) - - if (container.imagePullPolicy) { - containerBuilder = containerBuilder.withImagePullPolicy(container.imagePullPolicy.toString()) - } else { - containerBuilder = containerBuilder.withImagePullPolicy("IfNotPresent") - } - - if (container.ports) { - container.ports.forEach { - containerBuilder = containerBuilder.addNewPort() - if (it.name) { - containerBuilder = containerBuilder.withName(it.name) - } - - if (it.containerPort) { - containerBuilder = containerBuilder.withContainerPort(it.containerPort) - } - - if (it.hostPort) { - containerBuilder = containerBuilder.withHostPort(it.hostPort) - } - - if (it.protocol) { - containerBuilder = containerBuilder.withProtocol(it.protocol) - } - - if (it.hostIp) { - containerBuilder = containerBuilder.withHostIP(it.hostIp) - } - containerBuilder = containerBuilder.endPort() - } - } - - if (container.securityContext) { - def securityContext = container.securityContext - - containerBuilder = containerBuilder.withNewSecurityContext() - - containerBuilder.withRunAsNonRoot(securityContext.runAsNonRoot) - .withRunAsUser(securityContext.runAsUser) - .withPrivileged(securityContext.privileged) - .withReadOnlyRootFilesystem(securityContext.readOnlyRootFilesystem) - - if (securityContext.seLinuxOptions) { - def seLinuxOptions = securityContext.seLinuxOptions - - containerBuilder = containerBuilder.withNewSeLinuxOptions() - .withUser(seLinuxOptions.user) - .withRole(seLinuxOptions.role) - .withType(seLinuxOptions.type) - .withLevel(seLinuxOptions.level) - .endSeLinuxOptions() - } - - if (securityContext.capabilities) { - def capabilities = securityContext.capabilities - - containerBuilder = containerBuilder.withNewCapabilities() - .withAdd(capabilities.add) - .withDrop(capabilities.drop) - .endCapabilities() - } - - containerBuilder = containerBuilder.endSecurityContext() - } - - [liveness: container.livenessProbe, readiness: container.readinessProbe].each { k, v -> - def probe = v - if (probe) { - switch (k) { - case 'liveness': - containerBuilder = containerBuilder.withNewLivenessProbe() - break - case 'readiness': - containerBuilder = containerBuilder.withNewReadinessProbe() - break - default: - throw new IllegalArgumentException("Probe type $k not supported") - } - - containerBuilder = containerBuilder.withInitialDelaySeconds(probe.initialDelaySeconds) - - if (probe.timeoutSeconds) { - containerBuilder = containerBuilder.withTimeoutSeconds(probe.timeoutSeconds) - } - - if (probe.failureThreshold) { - containerBuilder = containerBuilder.withFailureThreshold(probe.failureThreshold) - } - - if (probe.successThreshold) { - containerBuilder = containerBuilder.withSuccessThreshold(probe.successThreshold) - } - - if (probe.periodSeconds) { - containerBuilder = containerBuilder.withPeriodSeconds(probe.periodSeconds) - } - - switch (probe.handler.type) { - case KubernetesHandlerType.EXEC: - containerBuilder = containerBuilder.withExec(toExecAction(probe.handler.execAction)) - break - - case KubernetesHandlerType.TCP: - containerBuilder = containerBuilder.withTcpSocket(toTcpSocketAction(probe.handler.tcpSocketAction)) - break - - case KubernetesHandlerType.HTTP: - containerBuilder = containerBuilder.withHttpGet(toHttpGetAction(probe.handler.httpGetAction)) - break - } - - switch (k) { - case 'liveness': - containerBuilder = containerBuilder.endLivenessProbe() - break - case 'readiness': - containerBuilder = containerBuilder.endReadinessProbe() - break - default: - throw new IllegalArgumentException("Probe type $k not supported") - } - } - } - - if (container.lifecycle) { - containerBuilder = containerBuilder.withNewLifecycle() - if (container.lifecycle.postStart) { - containerBuilder = containerBuilder.withPostStart(toHandler(container.lifecycle.postStart)) - } - if (container.lifecycle.preStop) { - containerBuilder = containerBuilder.withPreStop(toHandler(container.lifecycle.preStop)) - } - containerBuilder = containerBuilder.endLifecycle() - } - - containerBuilder = containerBuilder.withNewResources() - if (container.requests) { - def requests = [:] - - if (container.requests.memory) { - requests.memory = container.requests.memory - } - - if (container.requests.cpu) { - requests.cpu = container.requests.cpu - } - containerBuilder = containerBuilder.withRequests(requests) - } - - if (container.limits) { - def limits = [:] - - if (container.limits.memory) { - limits.memory = container.limits.memory - } - - if (container.limits.cpu) { - limits.cpu = container.limits.cpu - } - - containerBuilder = containerBuilder.withLimits(limits) - } - - containerBuilder = containerBuilder.endResources() - - if (container.volumeMounts) { - def volumeMounts = container.volumeMounts.collect { mount -> - def res = new VolumeMountBuilder() - - return res.withMountPath(mount.mountPath) - .withName(mount.name) - .withReadOnly(mount.readOnly) - .withSubPath(mount.subPath) - .build() - } - - containerBuilder = containerBuilder.withVolumeMounts(volumeMounts) - } - - if (container.envVars) { - def envVars = container.envVars.collect { envVar -> - def res = (new EnvVarBuilder()).withName(envVar.name) - if (envVar.value) { - res = res.withValue(envVar.value) - } else if (envVar.envSource) { - res = res.withNewValueFrom() - if (envVar.envSource.configMapSource) { - def configMap = envVar.envSource.configMapSource - res = res.withNewConfigMapKeyRef(configMap.key, configMap.configMapName, configMap.optional) - } else if (envVar.envSource.secretSource) { - def secret = envVar.envSource.secretSource - res = res.withNewSecretKeyRef(secret.key, secret.secretName, secret.optional) - } else if (envVar.envSource.fieldRef) { - def fieldPath = envVar.envSource.fieldRef.fieldPath - res = res.withNewFieldRef().withFieldPath(fieldPath).endFieldRef() - } else if (envVar.envSource.resourceFieldRef) { - def resource = envVar.envSource.resourceFieldRef.resource - def containerName = envVar.envSource.resourceFieldRef.containerName - def divisor = envVar.envSource.resourceFieldRef.divisor - res = res.withNewResourceFieldRef().withResource(resource) - res = res.withContainerName(containerName) - res = res.withNewDivisor(divisor).endResourceFieldRef() - } else { - return null - } - res = res.endValueFrom() - } else { - return null - } - return res.build() - } - null - - containerBuilder = containerBuilder.withEnv(envVars) - } - - if (container.envFrom) { - def envFrom = container.envFrom.collect { envFrom -> - def res = (new EnvFromSourceBuilder()).withPrefix(envFrom.prefix ?: '') - if (envFrom.configMapRef) { - def configMapRef = envFrom.configMapRef - res = res.withNewConfigMapRef(configMapRef.name, configMapRef.optional) - } else if (envFrom.secretRef) { - def secretRef = envFrom.secretRef - res = res.withNewSecretRef(secretRef.name, secretRef.optional) - } else { - return null - } - return res.build() - } - null - - containerBuilder.withEnvFrom(envFrom) - } - - if (container.command) { - containerBuilder = containerBuilder.withCommand(container.command) - } - - if (container.args) { - containerBuilder = containerBuilder.withArgs(container.args) - } - - return containerBuilder.build() - } - - static KubernetesContainerDescription fromContainer(Container container) { - if (!container) { - return null - } - - def containerDescription = new KubernetesContainerDescription() - containerDescription.name = container.name - containerDescription.imageDescription = KubernetesUtil.buildImageDescription(container.image) - - if (container.imagePullPolicy) { - containerDescription.imagePullPolicy = KubernetesPullPolicy.valueOf(container.imagePullPolicy) - } - - container.resources?.with { - containerDescription.limits = limits?.cpu?.amount || limits?.memory?.amount ? - new KubernetesResourceDescription( - cpu: limits?.cpu?.amount, - memory: limits?.memory?.amount - ) : null - - containerDescription.requests = requests?.cpu?.amount || requests?.memory?.amount ? - new KubernetesResourceDescription( - cpu: requests?.cpu?.amount, - memory: requests?.memory?.amount - ) : null - } - - if (container.lifecycle) { - containerDescription.lifecycle = new KubernetesLifecycle() - if (container.lifecycle.postStart) { - containerDescription.lifecycle.postStart = fromHandler(container.lifecycle.postStart) - } - if (container.lifecycle.preStop) { - containerDescription.lifecycle.preStop = fromHandler(container.lifecycle.preStop) - } - } - - containerDescription.ports = container.ports?.collect { - def port = new KubernetesContainerPort() - port.hostIp = it?.hostIP - if (it?.hostPort) { - port.hostPort = it?.hostPort?.intValue() - } - if (it?.containerPort) { - port.containerPort = it?.containerPort?.intValue() - } - port.name = it?.name - port.protocol = it?.protocol - - return port - } - - if (container.securityContext) { - def securityContext = container.securityContext - - containerDescription.securityContext = new KubernetesSecurityContext(privileged: securityContext.privileged, - runAsNonRoot: securityContext.runAsNonRoot, - runAsUser: securityContext.runAsUser, - readOnlyRootFilesystem: securityContext.readOnlyRootFilesystem - ) - - if (securityContext.capabilities) { - def capabilities = securityContext.capabilities - - containerDescription.securityContext.capabilities = new KubernetesCapabilities(add: capabilities.add, drop: capabilities.drop) - } - - if (securityContext.seLinuxOptions) { - def seLinuxOptions = securityContext.seLinuxOptions - - containerDescription.securityContext.seLinuxOptions = new KubernetesSeLinuxOptions(user: seLinuxOptions.user, - role: seLinuxOptions.role, - type: seLinuxOptions.type, - level: seLinuxOptions.level - ) - } - } - - containerDescription.livenessProbe = fromProbe(container?.livenessProbe) - containerDescription.readinessProbe = fromProbe(container?.readinessProbe) - - containerDescription.envVars = container?.env?.collect { envVar -> - def result = new KubernetesEnvVar(name: envVar.name) - if (envVar.value) { - result.value = envVar.value - } else if (envVar.valueFrom) { - def source = new KubernetesEnvVarSource() - if (envVar.valueFrom.configMapKeyRef) { - def configMap = envVar.valueFrom.configMapKeyRef - source.configMapSource = new KubernetesConfigMapSource(key: configMap.key, configMapName: configMap.name) - } else if (envVar.valueFrom.secretKeyRef) { - def secret = envVar.valueFrom.secretKeyRef - source.secretSource = new KubernetesSecretSource(key: secret.key, secretName: secret.name) - } else if (envVar.valueFrom.fieldRef) { - def fieldPath = envVar.valueFrom.fieldRef.fieldPath; - source.fieldRef = new KubernetesFieldRefSource(fieldPath: fieldPath) - } else if (envVar.valueFrom.resourceFieldRef) { - def resource = envVar.valueFrom.resourceFieldRef.resource - def containerName = envVar.valueFrom.resourceFieldRef.containerName - def divisor = envVar.valueFrom.resourceFieldRef.divisor - source.resourceFieldRef = new KubernetesResourceFieldRefSource(resource: resource, - containerName: containerName, - divisor: divisor) - } else { - return null - } - result.envSource = source - } else { - return null - } - return result - } - null - - containerDescription.envFrom = container?.envFrom?.collect { envFrom -> - def result = new KubernetesEnvFromSource(prefix: envFrom.prefix) - if (envFrom.configMapRef) { - def source = envFrom.configMapRef - result.configMapRef = new KubernetesConfigMapEnvSource(name: source.name, optional: source.optional ?: false) - } else if (envFrom.secretRef) { - def source = envFrom.secretRef - result.secretRef = new KubernetesSecretEnvSource(name: source.name, optional: source.optional ?: false) - } else { - return null - } - return result - } - null - - containerDescription.volumeMounts = container?.volumeMounts?.collect { volumeMount -> - new KubernetesVolumeMount( - name: volumeMount.name, - readOnly: volumeMount.readOnly, - mountPath: volumeMount.mountPath, - subPath: volumeMount.subPath - ) - } - - containerDescription.args = container?.args ?: [] - containerDescription.command = container?.command ?: [] - - return containerDescription - } - - static KubernetesVolumeSource fromVolume(Volume volume) { - def res = new KubernetesVolumeSource(name: volume.name) - - if (volume.emptyDir) { - res.type = KubernetesVolumeSourceType.EmptyDir - def medium = volume.emptyDir.medium - def mediumType - - if (medium == "Memory") { - mediumType = KubernetesStorageMediumType.Memory - } else { - mediumType = KubernetesStorageMediumType.Default - } - - res.emptyDir = new KubernetesEmptyDir(medium: mediumType) - } else if (volume.hostPath) { - res.type = KubernetesVolumeSourceType.HostPath - res.hostPath = new KubernetesHostPath(path: volume.hostPath.path) - } else if (volume.persistentVolumeClaim) { - res.type = KubernetesVolumeSourceType.PersistentVolumeClaim - res.persistentVolumeClaim = new KubernetesPersistentVolumeClaim(claimName: volume.persistentVolumeClaim.claimName, - readOnly: volume.persistentVolumeClaim.readOnly) - } else if (volume.secret) { - res.type = KubernetesVolumeSourceType.Secret - res.secret = new KubernetesSecretVolumeSource(secretName: volume.secret.secretName) - } else if (volume.configMap) { - res.type = KubernetesVolumeSourceType.ConfigMap - def items = volume.configMap.items?.collect { KeyToPath item -> - new KubernetesKeyToPath(key: item.key, path: item.path) - } - res.configMap = new KubernetesConfigMapVolumeSource(configMapName: volume.configMap.name, items: items) - } else if (volume.awsElasticBlockStore) { - res.type = KubernetesVolumeSourceType.AwsElasticBlockStore - def ebs = volume.awsElasticBlockStore - res.awsElasticBlockStore = new KubernetesAwsElasticBlockStoreVolumeSource(volumeId: ebs.volumeID, - fsType: ebs.fsType, - partition: ebs.partition) - } else if (volume.nfs) { - res.type = KubernetesVolumeSourceType.NFS - def nfs = volume.nfs - res.nfs = new KubernetesNfsVolumeSource(server: nfs.server, path: nfs.path, readOnly: nfs.readOnly) - } else { - res.type = KubernetesVolumeSourceType.Unsupported - } - - return res - } - - static DeployKubernetesAtomicOperationDescription fromReplicaSet(ReplicaSet replicaSet) { - def deployDescription = new DeployKubernetesAtomicOperationDescription() - def parsedName = Names.parseName(replicaSet?.metadata?.name) - - deployDescription.application = parsedName?.app - deployDescription.stack = parsedName?.stack - deployDescription.freeFormDetails = parsedName?.detail - deployDescription.loadBalancers = KubernetesUtil?.getLoadBalancers(replicaSet) - deployDescription.namespace = replicaSet?.metadata?.namespace - deployDescription.targetSize = replicaSet?.spec?.replicas - deployDescription.securityGroups = [] - deployDescription.replicaSetAnnotations = replicaSet?.metadata?.annotations - deployDescription.podAnnotations = replicaSet?.spec?.template?.metadata?.annotations - - deployDescription.volumeSources = replicaSet?.spec?.template?.spec?.volumes?.collect { - fromVolume(it) - } ?: [] - - deployDescription.hostNetwork = replicaSet?.spec?.template?.spec?.hostNetwork - - deployDescription.containers = replicaSet?.spec?.template?.spec?.containers?.collect { - fromContainer(it) - } ?: [] - - deployDescription.initContainers = replicaSet?.spec?.template?.spec?.initContainers?.collect { - fromContainer(it) - } ?: [] - - deployDescription.terminationGracePeriodSeconds = replicaSet?.spec?.template?.spec?.terminationGracePeriodSeconds - deployDescription.serviceAccountName = replicaSet?.spec?.template?.spec?.serviceAccountName - - deployDescription.nodeSelector = replicaSet?.spec?.template?.spec?.nodeSelector - deployDescription.dnsPolicy = replicaSet?.spec?.template?.spec?.dnsPolicy - - deployDescription.tolerations = replicaSet?.spec?.template?.spec?.tolerations?.collect { - fromToleration(it) - } ?: [] - - return deployDescription - } - - static void attachAutoscaler(DeployKubernetesAtomicOperationDescription description, HorizontalPodAutoscaler autoscaler) { - description.capacity = new Capacity(min: autoscaler.spec.minReplicas, - max: autoscaler.spec.maxReplicas, - desired: description.targetSize) - def cpuUtilization = new KubernetesCpuUtilization(target: autoscaler.spec.targetCPUUtilizationPercentage) - description.scalingPolicy = new KubernetesScalingPolicy(cpuUtilization: cpuUtilization) - } - - static HorizontalPodAutoscalerFluentImpl toAutoscaler(HorizontalPodAutoscalerFluentImpl autoscalerBuilder, - KubernetesAutoscalerDescription description, - String resourceName, - String resourceKind, - String apiVersion) { - autoscalerBuilder.withNewMetadata() - .withName(resourceName) - .withNamespace(description.namespace) - .endMetadata() - .withNewSpec() - .withMinReplicas(description.capacity.min) - .withMaxReplicas(description.capacity.max) - .withTargetCPUUtilizationPercentage(description.scalingPolicy.cpuUtilization.target) - .withNewScaleTargetRef() - .withKind(resourceKind) - .withName(resourceName) - .withApiVersion(apiVersion) - .endScaleTargetRef() - .endSpec() - } - - static DeployKubernetesAtomicOperationDescription fromReplicationController(ReplicationController replicationController) { - def deployDescription = new DeployKubernetesAtomicOperationDescription() - def parsedName = Names.parseName(replicationController?.metadata?.name) - - deployDescription.application = parsedName?.app - deployDescription.stack = parsedName?.stack - deployDescription.freeFormDetails = parsedName?.detail - deployDescription.loadBalancers = KubernetesUtil?.getLoadBalancers(replicationController) - deployDescription.namespace = replicationController?.metadata?.namespace - deployDescription.targetSize = replicationController?.spec?.replicas - deployDescription.securityGroups = [] - - deployDescription.volumeSources = replicationController?.spec?.template?.spec?.volumes?.collect { - fromVolume(it) - } ?: [] - - deployDescription.containers = replicationController?.spec?.template?.spec?.containers?.collect { - fromContainer(it) - } ?: [] - - deployDescription.initContainers = replicationController?.spec?.template?.spec?.initContainers?.collect { - fromContainer(it) - } ?: [] - - deployDescription.terminationGracePeriodSeconds = replicationController?.spec?.template?.spec?.terminationGracePeriodSeconds - deployDescription.serviceAccountName = replicationController.spec?.template?.spec?.serviceAccountName - - deployDescription.nodeSelector = replicationController?.spec?.template?.spec?.nodeSelector - deployDescription.dnsPolicy = replicationController?.spec?.template?.spec?.dnsPolicy - - return deployDescription - } - - static KubernetesHandler fromHandler(Handler handler) { - def kubernetesHandler = new KubernetesHandler() - if (handler.exec) { - kubernetesHandler.execAction = fromExecAction(handler.exec) - kubernetesHandler.type = KubernetesHandlerType.EXEC - } - - if (handler.tcpSocket) { - kubernetesHandler.tcpSocketAction = fromTcpSocketAction(handler.tcpSocket) - kubernetesHandler.type = KubernetesHandlerType.TCP - } - - if (handler.httpGet) { - kubernetesHandler.httpGetAction = fromHttpGetAction(handler.httpGet) - kubernetesHandler.type = KubernetesHandlerType.HTTP - } - - return kubernetesHandler - } - - static KubernetesProbe fromProbe(Probe probe) { - if (!probe) { - return null - } - - def kubernetesProbe = new KubernetesProbe() - kubernetesProbe.failureThreshold = probe.failureThreshold ?: 0 - kubernetesProbe.successThreshold = probe.successThreshold ?: 0 - kubernetesProbe.timeoutSeconds = probe.timeoutSeconds ?: 0 - kubernetesProbe.periodSeconds = probe.periodSeconds ?: 0 - kubernetesProbe.initialDelaySeconds = probe.initialDelaySeconds ?: 0 - kubernetesProbe.handler = new KubernetesHandler() - - if (probe.exec) { - kubernetesProbe.handler.execAction = fromExecAction(probe.exec) - kubernetesProbe.handler.type = KubernetesHandlerType.EXEC - } - - if (probe.tcpSocket) { - kubernetesProbe.handler.tcpSocketAction = fromTcpSocketAction(probe.tcpSocket) - kubernetesProbe.handler.type = KubernetesHandlerType.TCP - } - - if (probe.httpGet) { - kubernetesProbe.handler.httpGetAction = fromHttpGetAction(probe.httpGet) - kubernetesProbe.handler.type = KubernetesHandlerType.HTTP - } - - return kubernetesProbe - } - - static KubernetesExecAction fromExecAction(ExecAction exec) { - if (!exec) { - return null - } - - def kubernetesExecAction = new KubernetesExecAction() - kubernetesExecAction.commands = exec.command - return kubernetesExecAction - } - - static KubernetesTcpSocketAction fromTcpSocketAction(TCPSocketAction tcpSocket) { - if (!tcpSocket) { - return null - } - - def kubernetesTcpSocketAction = new KubernetesTcpSocketAction() - kubernetesTcpSocketAction.port = tcpSocket.port?.intVal ?: 0 - return kubernetesTcpSocketAction - } - - static KubernetesHttpGetAction fromHttpGetAction(HTTPGetAction httpGet) { - if (!httpGet) { - return null - } - - def kubernetesHttpGetAction = new KubernetesHttpGetAction() - kubernetesHttpGetAction.host = httpGet.host - kubernetesHttpGetAction.path = httpGet.path - kubernetesHttpGetAction.port = httpGet.port?.intVal ?: 0 - kubernetesHttpGetAction.uriScheme = httpGet.scheme - kubernetesHttpGetAction.httpHeaders = httpGet.httpHeaders?.collect() { - new KeyValuePair(name: it.name, value: it.value) - } - return kubernetesHttpGetAction - } - - static ReplicaSet toReplicaSet(ReplicaSetBuilder serverGroupBuilder, - DeployKubernetesAtomicOperationDescription description, - String replicaSetName) { - - def targetSize - if (description.targetSize == 0) { - targetSize = description.targetSize - } - else { - targetSize = description.targetSize ?: description.capacity?.desired - } - - return serverGroupBuilder.withNewMetadata() - .withName(replicaSetName) - .withAnnotations(description.replicaSetAnnotations) - .endMetadata() - .withNewSpec() - .withNewSelector() - .withMatchLabels(baseServerGroupLabels(description, replicaSetName) + restrictedServerGroupLabels(replicaSetName)) - .endSelector() - .withReplicas(targetSize) - .withNewTemplateLike(toPodTemplateSpec(description, replicaSetName)) - .endTemplate() - .endSpec() - .build() - } - - static DeploymentFluentImpl toDeployment(DeploymentFluentImpl serverGroupBuilder, - DeployKubernetesAtomicOperationDescription description, - String replicaSetName) { - - def parsedName = Names.parseName(replicaSetName) - def targetSize - if (description.targetSize == 0) { - targetSize = description.targetSize - } - else { - targetSize = description.targetSize ?: description.capacity?.desired - } - - def builder = serverGroupBuilder.withNewMetadata() - .withName(parsedName.cluster) - .withAnnotations(description.replicaSetAnnotations) - .endMetadata() - .withNewSpec() - .withNewSelector() - .withMatchLabels(baseServerGroupLabels(description, replicaSetName)) - .endSelector() - .withReplicas(targetSize) - .withNewTemplateLike(toPodTemplateSpec(description, replicaSetName)) - .endTemplate() - .withMinReadySeconds(description.deployment.minReadySeconds) - .withRevisionHistoryLimit(description.deployment.revisionHistoryLimit) - - if (description.deployment.deploymentStrategy) { - def strategy = description.deployment.deploymentStrategy - builder = builder.withNewStrategy() - .withType(strategy.type.toString()) - - if (strategy.rollingUpdate) { - def rollingUpdate = strategy.rollingUpdate - - builder = builder.withNewRollingUpdate() - - if (rollingUpdate.maxSurge) { - def maxSurge = rollingUpdate.maxSurge - if (maxSurge.isInteger()) { - maxSurge = maxSurge as int - } - builder = builder.withNewMaxSurge(maxSurge) - } - - if (rollingUpdate.maxUnavailable) { - def maxUnavailable = rollingUpdate.maxUnavailable - if (maxUnavailable.isInteger()) { - maxUnavailable = maxUnavailable as int - } - builder = builder.withNewMaxUnavailable(maxUnavailable) - } - - builder = builder.endRollingUpdate() - } - - builder = builder.endStrategy() - } - - return builder.endSpec() - } - - static KubernetesDeployment fromDeployment(Deployment deployment) { - if (!deployment) { - return null - } - - def kubernetesDeployment = new KubernetesDeployment() - - kubernetesDeployment.enabled = true - kubernetesDeployment.minReadySeconds = deployment.spec.minReadySeconds ?: 0 - kubernetesDeployment.revisionHistoryLimit = deployment.spec.revisionHistoryLimit - - if (deployment.spec.strategy) { - def strategy = deployment.spec.strategy - def deploymentStrategy = new KubernetesStrategy() - - deploymentStrategy.type = strategy.type - - if (strategy.rollingUpdate) { - def update = strategy.rollingUpdate - def rollingUpdate = new KubernetesRollingUpdate() - - rollingUpdate.maxSurge = update.maxSurge.getStrVal() ?: update.maxSurge.getIntVal().toString() - rollingUpdate.maxUnavailable = update.maxUnavailable.getStrVal() ?: update.maxUnavailable.getIntVal().toString() - - deploymentStrategy.rollingUpdate = rollingUpdate - } - - kubernetesDeployment.deploymentStrategy = deploymentStrategy - } - - return kubernetesDeployment - } - - static KubernetesToleration fromToleration(Toleration toleration) { - def t = new KubernetesToleration() - t.effect = toleration.effect - t.key = toleration.key - t.operator = toleration.operator - t.tolerationSeconds = toleration.tolerationSeconds - t.value = toleration.value - return t - } - - static Toleration toToleration(KubernetesToleration toleration) { - return new Toleration( - toleration.effect.toString(), - toleration.key, - toleration.operator.toString(), - toleration.tolerationSeconds, - toleration.value - ) - } - - static PodTemplateSpec toPodTemplateSpec(DeployKubernetesAtomicOperationDescription description, String name) { - def podTemplateSpecBuilder = new PodTemplateSpecBuilder() - .withNewMetadata() - .addToLabels(baseServerGroupLabels(description, name) + restrictedServerGroupLabels(name)) - - for (def loadBalancer : description.loadBalancers) { - podTemplateSpecBuilder = podTemplateSpecBuilder.addToLabels(KubernetesUtil.loadBalancerKey(loadBalancer), "true") - } - - podTemplateSpecBuilder = podTemplateSpecBuilder.withAnnotations(description.podAnnotations) - .endMetadata() - .withNewSpec() - - if (description.restartPolicy) { - podTemplateSpecBuilder.withRestartPolicy(description.restartPolicy) - } - - if (description.dnsPolicy) { - podTemplateSpecBuilder.withDnsPolicy(description.dnsPolicy.name()) - } - - if (description.terminationGracePeriodSeconds) { - podTemplateSpecBuilder.withTerminationGracePeriodSeconds(description.terminationGracePeriodSeconds) - } - - podTemplateSpecBuilder = podTemplateSpecBuilder.withImagePullSecrets() - - for (def imagePullSecret : description.imagePullSecrets) { - podTemplateSpecBuilder = podTemplateSpecBuilder.addNewImagePullSecret(imagePullSecret) - } - - if (description.serviceAccountName) { - podTemplateSpecBuilder = podTemplateSpecBuilder.withServiceAccountName(description.serviceAccountName) - } - - podTemplateSpecBuilder = podTemplateSpecBuilder.withNodeSelector(description.nodeSelector) - - if (description.volumeSources) { - def volumeSources = description.volumeSources.findResults { volumeSource -> - toVolumeSource(volumeSource) - } - - podTemplateSpecBuilder = podTemplateSpecBuilder.withVolumes(volumeSources) - } - - podTemplateSpecBuilder = podTemplateSpecBuilder.withHostNetwork(description.hostNetwork) - - def containers = description.containers.collect { container -> - toContainer(container) - } - - podTemplateSpecBuilder = podTemplateSpecBuilder.withContainers(containers) - - def initContainers = description.initContainers.collect { initContainer -> - toContainer(initContainer) - } - - podTemplateSpecBuilder = podTemplateSpecBuilder.withInitContainers(initContainers) - - - def tolerations = description.tolerations.collect { toleration -> - toToleration(toleration) - } - - podTemplateSpecBuilder = podTemplateSpecBuilder.withTolerations(tolerations) - - return podTemplateSpecBuilder.endSpec().build() - } - - static boolean hasDeployment(DeployKubernetesAtomicOperationDescription description) { - return description.deployment?.enabled - } - - /* - * This represents the set of labels that ties deployments, replica sets, and pods together - */ - static Map baseServerGroupLabels(DeployKubernetesAtomicOperationDescription description, String name) { - def parsedName = Names.parseName(name) - return hasDeployment(description) ? [(parsedName.cluster): "true"] : [(name): "true"] - } - - /* - * This represents the set of labels that differentiate replica sets from deployments - these are needed so - * different replica sets under the same deployment don't apply to the same pods - */ - static Map restrictedServerGroupLabels(String name) { - def parsedName = Names.parseName(name) - def labels = [ - "version": parsedName.sequence?.toString() ?: "na", - "app": parsedName.app, - "cluster": parsedName.cluster, - ] - - if (parsedName.stack) { - labels += ["stack": parsedName.stack] - } - - if (parsedName.detail) { - labels += ["detail": parsedName.detail] - } - - labels += [(KubernetesUtil.SERVER_GROUP_LABEL): name] - - return labels - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesClientApiAdapter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesClientApiAdapter.groovy deleted file mode 100644 index f1ebd97970b..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesClientApiAdapter.groovy +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright 2017 Cisco, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.api - -import com.fasterxml.jackson.databind.JsonNode -import com.fasterxml.jackson.databind.ObjectMapper -import com.github.fge.jsonpatch.diff.JsonDiff -import com.netflix.spectator.api.Clock -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesApiClientConfig -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesClientOperationException -import groovy.util.logging.Slf4j -import io.kubernetes.client.ApiClient -import io.kubernetes.client.ApiException -import io.kubernetes.client.Configuration -import io.kubernetes.client.apis.AppsV1beta1Api -import io.kubernetes.client.apis.AutoscalingV1Api -import io.kubernetes.client.apis.CoreV1Api -import io.kubernetes.client.apis.ExtensionsV1beta1Api -import io.kubernetes.client.models.V1DeleteOptions -import io.kubernetes.client.models.V1HorizontalPodAutoscaler -import io.kubernetes.client.models.V1NamespaceList -import io.kubernetes.client.models.V1Pod -import io.kubernetes.client.models.V1PodList -import io.kubernetes.client.models.V1Status -import io.kubernetes.client.models.V1beta1DaemonSet -import io.kubernetes.client.models.V1beta1DaemonSetList -import io.kubernetes.client.models.V1beta1StatefulSet -import io.kubernetes.client.models.V1beta1StatefulSetList - -import java.util.concurrent.TimeUnit - -@Slf4j -class KubernetesClientApiAdapter { - - String account - - static final int RETRY_COUNT = 20 - static final long RETRY_MAX_WAIT_MILLIS = TimeUnit.SECONDS.toMillis(10) - static final long RETRY_INITIAL_WAIT_MILLIS = 100 - static final int API_CALL_TIMEOUT_SECONDS = 60 - static final int TERMINATION_GRACE_PERIOD_SECONDS = 30 - static final String API_CALL_RESULT_FORMAT = "" - static final int SHUTDOWN_ALL_PODS = 0 - static final String DEPLOYMENT_ANNOTATION = "deployment.kubernetes.io" - private final ObjectMapper mapper = new ObjectMapper(); - final Registry spectatorRegistry - final Clock spectatorClock - final ApiClient client - final AppsV1beta1Api apiInstance - final ExtensionsV1beta1Api extApi - final AutoscalingV1Api scalerApi - final CoreV1Api coreApi - - public spectatorRegistry() { return spectatorRegistry } - - KubernetesClientApiAdapter(String account, KubernetesApiClientConfig config, Registry spectatorRegistry) { - if (!config) { - throw new IllegalArgumentException("Config may not be null.") - } - - this.account = account - this.spectatorRegistry = spectatorRegistry - this.spectatorClock = spectatorRegistry.clock() - - client = config.getApiCient() - Configuration.setDefaultApiClient(client) - apiInstance = new AppsV1beta1Api() - extApi = new ExtensionsV1beta1Api() - scalerApi = new AutoscalingV1Api() - coreApi = new CoreV1Api() - } - - KubernetesClientOperationException formatException(String operation, String namespace, ApiException e) { - account ? new KubernetesClientOperationException(account, "$operation in $namespace", e) : - new KubernetesClientOperationException("$operation in $namespace", e) - } - - KubernetesClientOperationException formatException(String operation, ApiException e) { - account ? new KubernetesClientOperationException(account, "$operation", e) : - new KubernetesClientOperationException("$operation", e) - } - - Boolean blockUntilResourceConsistent(Closure getResource) { - Boolean isPodRunning = getResource() - - def wait = RETRY_INITIAL_WAIT_MILLIS - def attempts = 0 - while (!isPodRunning ) { - attempts += 1 - if (attempts > RETRY_COUNT) { - return false - } - - sleep(wait) - wait = [wait * 2, RETRY_MAX_WAIT_MILLIS].min() - - isPodRunning = getResource() - } - - return true - } - - private T exceptionWrapper(String methodName, String operationMessage, String namespace, Closure doOperation) { - T result = null - Exception failure - long startTime = spectatorClock.monotonicTime() - - try { - result = doOperation() - } catch (ApiException e) { - if (namespace) { - failure = formatException(operationMessage, namespace, e) - } else { - failure = formatException(operationMessage, e) - } - } catch (Exception e) { - failure = e - } finally { - - def tags = ["method": methodName, - "account": account, - "namespace" : namespace ? namespace : "none", - "success": failure ? "false": "true"] - if (failure) { - tags["reason"] = failure.class.simpleName - } - - spectatorRegistry.timer( - spectatorRegistry.createId("kubernetes.api", tags)) - .record(spectatorClock.monotonicTime() - startTime, TimeUnit.NANOSECONDS) - - if (failure) { - throw failure - } else { - return result - } - } - } - - List getStatefulSets(String namespace) { - exceptionWrapper("statefulSets.list", "Get Stateful Sets", namespace) { - try { - V1beta1StatefulSetList list = apiInstance.listNamespacedStatefulSet(namespace, null, null, null, true, null, null, null, API_CALL_TIMEOUT_SECONDS, false); - String apiVersion = list.getApiVersion(); - for (V1beta1StatefulSet item : list.getItems()) { - item.setApiVersion(apiVersion); - item.setKind("StatefulSet"); - } - - return list.items - } catch(Exception e){ - log.debug(e.message.toString()) - } - } - } - - List getDaemonSets(String namespace) { - exceptionWrapper("daemonSets.list", "Get Daemon Sets", namespace) { - try { - V1beta1DaemonSetList list = extApi.listNamespacedDaemonSet(namespace, null, null, null, null, null, null, null, API_CALL_TIMEOUT_SECONDS, false) - String apiVersion = list.getApiVersion(); - for (V1beta1DaemonSet item : list.getItems()) { - item.setApiVersion(apiVersion); - item.setKind("DaemonSet"); - } - - return list.items - } catch (ApiException e) { - log.debug(e.message.toString()) - } - } - } - - V1beta1StatefulSet createStatfulSet(String namespace, V1beta1StatefulSet statefulSet) { - exceptionWrapper("statefulSets.create", "Create Stateful Set ${statefulSet?.metadata?.name}", namespace) { - return apiInstance.createNamespacedStatefulSet(namespace, statefulSet, API_CALL_RESULT_FORMAT) - } - } - - V1beta1StatefulSet replaceStatfulSet(String name, String namespace, V1beta1StatefulSet statefulSet) { - exceptionWrapper("statefulSets.replace", "Replace Stateful Set ${name}", namespace) { - def deployedControllerSet = getStatefulSet(name, namespace) - deployedControllerSet.spec.replicas = statefulSet.spec.replicas - deployedControllerSet.spec.template = statefulSet.spec.template - deployedControllerSet.spec.updateStrategy = statefulSet.spec.updateStrategy - - return apiInstance.replaceNamespacedStatefulSet(name, namespace, deployedControllerSet, API_CALL_RESULT_FORMAT) - } - } - - private Map[] determineJsonPatch(Object current, Object desired) { - JsonNode desiredNode = mapper.convertValue(desired, JsonNode.class); - JsonNode currentNode = mapper.convertValue(current, JsonNode.class); - - return mapper.convertValue(JsonDiff.asJson(currentNode, desiredNode), Map[].class); - } - - V1beta1StatefulSet resizeStatefulSet(String name, String namespace, int targetSize) { - exceptionWrapper("statefulSets.resize", "Resize Stateful Set $name", namespace) { - V1beta1StatefulSet current = getStatefulSet(name, namespace) - V1beta1StatefulSet desired = getStatefulSet(name, namespace) - desired.spec.replicas = targetSize - - final Map[] jsonPatch = determineJsonPatch(current, desired); - V1beta1StatefulSet statefulSet = apiInstance.patchNamespacedStatefulSet(name, namespace, jsonPatch, null) - - return statefulSet - } - } - - void hardDestroyStatefulSet(String name, String namespace, V1DeleteOptions deleteOptions, Boolean orphanDependents, String propagationPolicy) { - exceptionWrapper("statefulSets.delete", "Delete Stateful Set $name", namespace) { - V1beta1StatefulSet statefulSet = getStatefulSet(name, namespace) - resizeStatefulSet(name, namespace, 0) - - getPods(namespace, statefulSet.metadata.labels).items.forEach({ item -> - deletePod(item.metadata.name, namespace, null, null, null, true) - }) - - try { - apiInstance.deleteNamespacedStatefulSet(name, namespace, deleteOptions ?: new V1DeleteOptions(), API_CALL_RESULT_FORMAT, TERMINATION_GRACE_PERIOD_SECONDS, orphanDependents, propagationPolicy) - } catch (Exception e) { - log.debug(e.message) - } - } - } - - V1HorizontalPodAutoscaler getAutoscaler(String namespace, String name) { - exceptionWrapper("horizontalPodAutoscalers.get", "Get Autoscaler $name", namespace) { - V1HorizontalPodAutoscaler result = null - - try { - result = scalerApi.readNamespacedHorizontalPodAutoscalerStatus(name, namespace, API_CALL_RESULT_FORMAT) - } catch (Exception ex) { - log.info "Unable to find autoscaler {$name in $namespace}: $ex." - } - - return result - } - } - - V1HorizontalPodAutoscaler createAutoscaler(String namespace, V1HorizontalPodAutoscaler autoscaler) { - exceptionWrapper("horizontalPodAutoscalers.create", "Create Autoscaler ${autoscaler?.metadata?.name}", namespace) { - return scalerApi.createNamespacedHorizontalPodAutoscaler(namespace, autoscaler, API_CALL_RESULT_FORMAT) - } - } - - V1beta1StatefulSet getStatefulSet(String statefulSetName, String namespace) { - exceptionWrapper("statefulSets.create", "Get Stateful Set ${statefulSetName}", namespace) { - try { - return apiInstance.readNamespacedStatefulSet(statefulSetName, namespace, API_CALL_RESULT_FORMAT, null, null) - } catch (Exception e) { - log.debug(e.message) - return null - } - } - } - - V1PodList getPods(String namespace, Map labels) { - exceptionWrapper("pods.list", "Get Pods matching $labels", namespace) { - String label - if (labels != null) { - Map.Entry entry = labels.entrySet().iterator().next() - String key = entry.getKey() - String value = entry.getValue() - label = key + "=" + value - } - coreApi.listNamespacedPod(namespace, null, null, null, false, label, null, null, API_CALL_TIMEOUT_SECONDS,false) - } - } - - boolean deleteAutoscaler(String namespace, String name, V1DeleteOptions deleteOptions, Boolean orphanDependents, String propagationPolicy) { - exceptionWrapper("horizontalPodAutoscalers.delete", "Destroy Autoscaler $name", namespace) { - return scalerApi.deleteNamespacedHorizontalPodAutoscaler(name, namespace, deleteOptions, API_CALL_RESULT_FORMAT, TERMINATION_GRACE_PERIOD_SECONDS, orphanDependents, propagationPolicy); - } - } - - V1beta1DaemonSet createDaemonSet(String namespace, V1beta1DaemonSet daemonSet) { - exceptionWrapper("DaemonSet.create", "Create Daemon Set ${daemonSet?.metadata?.name}", namespace) { - return extApi.createNamespacedDaemonSet(namespace, daemonSet, API_CALL_RESULT_FORMAT) - } - } - - V1beta1DaemonSet replaceDaemonSet(String name, String namespace, V1beta1DaemonSet daemonSet) { - exceptionWrapper("DaemonSet.replace", "Replace Daemon Set ${name}", namespace) { - def deployedControllerSet = getDaemonSet(name, namespace) - deployedControllerSet.spec.template = daemonSet.spec.template - deployedControllerSet.spec.updateStrategy = daemonSet.spec.updateStrategy - - return extApi.replaceNamespacedDaemonSet(name, namespace, deployedControllerSet, API_CALL_RESULT_FORMAT) - } - } - - V1beta1DaemonSet getDaemonSet(String name, String namespace) { - exceptionWrapper("DaemonSet.get", "Get Daemon Set ${name}", namespace) { - try { - return extApi.readNamespacedDaemonSet(name, namespace, API_CALL_RESULT_FORMAT, true, false) - } catch (Exception e) { - log.debug(e.message) - return null - } - } - } - - void hardDestroyDaemonSet(String name, String namespace, V1DeleteOptions deleteoptions, Boolean orphanDependents, String propagationPolicy) { - exceptionWrapper("daemonSets.delete", "Hard Destroy Daemon Set ${name}", namespace) { - def deployedControllerSet = getDaemonSet(name, namespace) - - V1Status status - try { - status = extApi.deleteNamespacedDaemonSet(name, namespace, deleteoptions ?: new V1DeleteOptions(), API_CALL_RESULT_FORMAT, TERMINATION_GRACE_PERIOD_SECONDS, orphanDependents, propagationPolicy); - } catch (Exception e) { - log.debug(e.message) - } - - getPods(namespace, deployedControllerSet.metadata.labels).items.forEach({ item -> - deletePod(item.metadata.name, namespace, null, null, null, true) - }) - } - } - - List getNamespacesByName() { - exceptionWrapper("namespaces.list", "Get Namespaces", null) { - V1NamespaceList result = coreApi.listNamespace(API_CALL_RESULT_FORMAT, null, null, null, null, null, null, 30, null) - return result.items.collect { n -> n.getMetadata().getName() } - } - } - - void deletePod(String name, String namespace, V1DeleteOptions deleteOptions, Boolean orphanDependents, String propagationPolicy, Boolean force) { - exceptionWrapper("pod.delete", "Destroy Pod ${name}", namespace) { - V1Status status - try { - if (force) { - deleteOptions = new V1DeleteOptions() - deleteOptions.kind = "DeleteOptions" - deleteOptions.apiVersion "v1" - deleteOptions.gracePeriodSeconds = 0 - } - - status = coreApi.deleteNamespacedPod(name, namespace, deleteOptions ?: new V1DeleteOptions(), API_CALL_RESULT_FORMAT, TERMINATION_GRACE_PERIOD_SECONDS, null, null) - } catch (Exception e) { - log.debug(e.message) - } - } - } - - V1Pod getPodStatus(String name, String namespace) { - exceptionWrapper("pods.status", "Get pod status ${name}", namespace) { - V1Pod pod - try { - pod = coreApi.readNamespacedPodStatus(name, namespace, API_CALL_RESULT_FORMAT) - } catch (Exception e) { - log.debug(e.message) - } - } - } - - String getControllerKind(String name, String namespace, String controllerKind) { - exceptionWrapper("getControllerType", "Get controller type ${name}", namespace) { - def deployedControllerSet - if (controllerKind == KubernetesUtil.CONTROLLERS_STATEFULSET_KIND) { - deployedControllerSet = getStatefulSet(name, namespace) - } else if (controllerKind == KubernetesUtil.CONTROLLERS_DAEMONSET_KIND) { - deployedControllerSet = getDaemonSet(name, namespace) - } else { - deployedControllerSet = getStatefulSet(name, namespace) - if (!deployedControllerSet) { - deployedControllerSet = getDaemonSet(name, namespace) - } - } - - return deployedControllerSet?.kind - } - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesClientApiConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesClientApiConverter.groovy deleted file mode 100644 index f6b1f670b38..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/KubernetesClientApiConverter.groovy +++ /dev/null @@ -1,1026 +0,0 @@ -/* - * Copyright 2017 Cisco, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.kubernetes.v1.api - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.dataformat.yaml.YAMLFactory -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.autoscaler.KubernetesAutoscalerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.DeployKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KeyValuePair -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesAwsElasticBlockStoreVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesCapabilities -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesConfigMapVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerPort -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesEmptyDir -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesExecAction -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHandler -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHandlerType -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHostPath -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHttpGetAction -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesKeyToPath -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesLifecycle -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesPersistentVolumeClaim -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesProbe -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesPullPolicy -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesResourceDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesSeLinuxOptions -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesSecretVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesSecurityContext -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesStorageMediumType -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesTcpSocketAction -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeMount -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeSourceType -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesControllerConverter -import groovy.util.logging.Slf4j -import io.kubernetes.client.models.V1Capabilities -import io.kubernetes.client.models.V1ConfigMapVolumeSource -import io.kubernetes.client.models.V1Container -import io.kubernetes.client.models.V1ContainerPort -import io.kubernetes.client.models.V1CrossVersionObjectReference -import io.kubernetes.client.models.V1EmptyDirVolumeSource -import io.kubernetes.client.models.V1EnvVar -import io.kubernetes.client.models.V1EnvVarSource -import io.kubernetes.client.models.V1ExecAction -import io.kubernetes.client.models.V1HTTPGetAction -import io.kubernetes.client.models.V1HTTPHeader -import io.kubernetes.client.models.V1Handler -import io.kubernetes.client.models.V1HorizontalPodAutoscaler -import io.kubernetes.client.models.V1HorizontalPodAutoscalerSpec -import io.kubernetes.client.models.V1HostPathVolumeSource -import io.kubernetes.client.models.V1KeyToPath -import io.kubernetes.client.models.V1LabelSelector -import io.kubernetes.client.models.V1Lifecycle -import io.kubernetes.client.models.V1ObjectFieldSelector -import io.kubernetes.client.models.V1ObjectMeta -import io.kubernetes.client.models.V1ObjectReference -import io.kubernetes.client.models.V1PersistentVolumeClaim -import io.kubernetes.client.models.V1PersistentVolumeClaimSpec -import io.kubernetes.client.models.V1PersistentVolumeClaimVolumeSource -import io.kubernetes.client.models.V1PodSpec -import io.kubernetes.client.models.V1PodTemplateSpec -import io.kubernetes.client.models.V1Probe -import io.kubernetes.client.models.V1ResourceFieldSelector -import io.kubernetes.client.models.V1ResourceRequirements -import io.kubernetes.client.models.V1SELinuxOptions -import io.kubernetes.client.models.V1SecretVolumeSource -import io.kubernetes.client.models.V1SecurityContext -import io.kubernetes.client.models.V1TCPSocketAction -import io.kubernetes.client.models.V1Volume -import io.kubernetes.client.models.V1VolumeMount -import io.kubernetes.client.models.V1beta1DaemonSet -import io.kubernetes.client.models.V1beta1DaemonSetSpec -import io.kubernetes.client.models.V1beta1DaemonSetUpdateStrategy -import io.kubernetes.client.models.V1beta1RollingUpdateDaemonSet -import io.kubernetes.client.models.V1beta1RollingUpdateStatefulSetStrategy -import io.kubernetes.client.models.V1beta1StatefulSet -import io.kubernetes.client.models.V1beta1StatefulSetSpec -import io.kubernetes.client.models.V1beta1StatefulSetUpdateStrategy -import org.slf4j.Logger -import org.slf4j.LoggerFactory - -/** - * Created by spinnaker on 20/8/17. - */ -@Slf4j -class KubernetesClientApiConverter { - private static final Logger LOG = LoggerFactory.getLogger(KubernetesClientApiConverter) - - static DeployKubernetesAtomicOperationDescription fromStatefulSet(V1beta1StatefulSet statefulSet) { - def deployDescription = new DeployKubernetesAtomicOperationDescription() - def parsedName = Names.parseName(statefulSet?.metadata?.name) - - deployDescription.application = parsedName?.app - deployDescription.stack = parsedName?.stack - deployDescription.freeFormDetails = parsedName?.detail - deployDescription.loadBalancers = KubernetesUtil?.getLoadBalancers(statefulSet.spec?.template?.metadata?.labels ?: [:]) - deployDescription.namespace = statefulSet?.metadata?.namespace - deployDescription.targetSize = statefulSet?.spec?.replicas - deployDescription.securityGroups = [] - deployDescription.controllerAnnotations = statefulSet?.metadata?.annotations - deployDescription.podAnnotations = statefulSet?.spec?.template?.metadata?.annotations - deployDescription.volumeClaims = statefulSet?.spec?.getVolumeClaimTemplates() - deployDescription.volumeSources = statefulSet?.spec?.template?.spec?.volumes?.collect { - fromVolume(it) - } ?: [] - deployDescription.hostNetwork = statefulSet?.spec?.template?.spec?.hostNetwork - deployDescription.containers = statefulSet?.spec?.template?.spec?.containers?.collect { - fromContainer(it) - } ?: [] - deployDescription.terminationGracePeriodSeconds = statefulSet?.spec?.template?.spec?.terminationGracePeriodSeconds - deployDescription.serviceAccountName = statefulSet?.spec?.template?.spec?.serviceAccountName - deployDescription.nodeSelector = statefulSet?.spec?.template?.spec?.nodeSelector - - return deployDescription - } - - static DeployKubernetesAtomicOperationDescription fromDaemonSet(V1beta1DaemonSet daemonSet) { - def deployDescription = new DeployKubernetesAtomicOperationDescription() - def parsedName = Names.parseName(daemonSet?.metadata?.name) - - deployDescription.application = parsedName?.app - deployDescription.stack = parsedName?.stack - deployDescription.freeFormDetails = parsedName?.detail - deployDescription.loadBalancers = KubernetesUtil?.getLoadBalancers(daemonSet.spec?.template?.metadata?.labels ?: [:]) - deployDescription.namespace = daemonSet?.metadata?.namespace - deployDescription.securityGroups = [] - deployDescription.podAnnotations = daemonSet?.spec?.template?.metadata?.annotations - deployDescription.volumeSources = daemonSet?.spec?.template?.spec?.volumes?.collect { - fromVolume(it) - } ?: [] - - deployDescription.hostNetwork = daemonSet?.spec?.template?.spec?.hostNetwork - - deployDescription.containers = daemonSet?.spec?.template?.spec?.containers?.collect { - fromContainer(it) - } ?: [] - - deployDescription.terminationGracePeriodSeconds = daemonSet?.spec?.template?.spec?.terminationGracePeriodSeconds - - deployDescription.nodeSelector = daemonSet?.spec?.template?.spec?.nodeSelector - - return deployDescription - } - - static KubernetesContainerDescription fromContainer(V1Container container) { - if (!container) { - return null - } - - def containerDescription = new KubernetesContainerDescription() - containerDescription.name = container.name - containerDescription.imageDescription = KubernetesUtil.buildImageDescription(container.image) - - if (container.imagePullPolicy) { - containerDescription.imagePullPolicy = KubernetesPullPolicy.valueOf(container.imagePullPolicy) - } - - container.resources?.with { - containerDescription.limits = limits?.cpu || limits?.memory ? - new KubernetesResourceDescription( - cpu: limits?.cpu, - memory: limits?.memory - ) : null - - containerDescription.requests = requests?.cpu || requests?.memory ? - new KubernetesResourceDescription( - cpu: requests?.cpu, - memory: requests?.memory - ) : null - } - - if (container.lifecycle) { - containerDescription.lifecycle = new KubernetesLifecycle() - if (container.lifecycle.postStart) { - containerDescription.lifecycle.postStart = fromHandler(container.lifecycle.postStart) - } - if (container.lifecycle.preStop) { - containerDescription.lifecycle.preStop = fromHandler(container.lifecycle.preStop) - } - } - - containerDescription.ports = container.ports?.collect { - def port = new KubernetesContainerPort() - port.hostIp = it?.hostIP - if (it?.hostPort) { - port.hostPort = it?.hostPort?.intValue() - } - if (it?.containerPort) { - port.containerPort = it?.containerPort?.intValue() - } - port.name = it?.name - port.protocol = it?.protocol - - return port - } - - if (container.securityContext) { - def securityContext = container.securityContext - - containerDescription.securityContext = new KubernetesSecurityContext(privileged: securityContext.privileged, - runAsNonRoot: securityContext.runAsNonRoot, - runAsUser: securityContext.runAsUser, - readOnlyRootFilesystem: securityContext.readOnlyRootFilesystem - ) - - if (securityContext.capabilities) { - def capabilities = securityContext.capabilities - - containerDescription.securityContext.capabilities = new KubernetesCapabilities(add: capabilities.add, drop: capabilities.drop) - } - - if (securityContext.seLinuxOptions) { - def seLinuxOptions = securityContext.seLinuxOptions - - containerDescription.securityContext.seLinuxOptions = new KubernetesSeLinuxOptions(user: seLinuxOptions.user, - role: seLinuxOptions.role, - type: seLinuxOptions.type, - level: seLinuxOptions.level - ) - } - } - - containerDescription.livenessProbe = fromV1Probe(container?.livenessProbe) - containerDescription.readinessProbe = fromV1Probe(container?.readinessProbe) - - containerDescription.volumeMounts = container?.volumeMounts?.collect { volumeMount -> - new KubernetesVolumeMount( - name: volumeMount.name, - readOnly: volumeMount.readOnly, - mountPath: volumeMount.mountPath, - subPath: volumeMount.subPath - ) - } - - containerDescription.args = container?.args ?: [] - containerDescription.command = container?.command ?: [] - - return containerDescription - } - - static KubernetesVolumeSource fromVolume(V1Volume volume) { - def res = new KubernetesVolumeSource(name: volume.name) - - if (volume.emptyDir) { - res.type = KubernetesVolumeSourceType.EmptyDir - def medium = volume.emptyDir.medium - def mediumType - - if (medium == "Memory") { - mediumType = KubernetesStorageMediumType.Memory - } else { - mediumType = KubernetesStorageMediumType.Default - } - - res.emptyDir = new KubernetesEmptyDir(medium: mediumType) - } else if (volume.hostPath) { - res.type = KubernetesVolumeSourceType.HostPath - res.hostPath = new KubernetesHostPath(path: volume.hostPath.path) - } else if (volume.persistentVolumeClaim) { - res.type = KubernetesVolumeSourceType.PersistentVolumeClaim - res.persistentVolumeClaim = new KubernetesPersistentVolumeClaim(claimName: volume.persistentVolumeClaim.claimName, - readOnly: volume.persistentVolumeClaim.readOnly) - } else if (volume.secret) { - res.type = KubernetesVolumeSourceType.Secret - res.secret = new KubernetesSecretVolumeSource(secretName: volume.secret.secretName) - } else if (volume.configMap) { - res.type = KubernetesVolumeSourceType.ConfigMap - def items = volume.configMap.items?.collect { V1KeyToPath item -> - new KubernetesKeyToPath(key: item.key, path: item.path) - } - res.configMap = new KubernetesConfigMapVolumeSource(configMapName: volume.configMap.name, items: items) - } else if (volume.awsElasticBlockStore) { - res.type = KubernetesVolumeSourceType.AwsElasticBlockStore - def ebs = volume.awsElasticBlockStore - res.awsElasticBlockStore = new KubernetesAwsElasticBlockStoreVolumeSource(volumeId: ebs.volumeID, - fsType: ebs.fsType, - partition: ebs.partition) - } else { - res.type = KubernetesVolumeSourceType.Unsupported - } - - return res - } - - static KubernetesExecAction fromExecAction(V1ExecAction exec) { - if (!exec) { - return null - } - - def kubernetesExecAction = new KubernetesExecAction() - kubernetesExecAction.commands = exec.command - return kubernetesExecAction - } - - static KubernetesHandler fromHandler(V1Handler handler) { - def kubernetesHandler = new KubernetesHandler() - if (handler.exec) { - kubernetesHandler.execAction = fromExecAction(handler.exec) - kubernetesHandler.type = KubernetesHandlerType.EXEC - } - - if (handler.tcpSocket) { - kubernetesHandler.tcpSocketAction = fromTcpSocketAction(handler.tcpSocket) - kubernetesHandler.type = KubernetesHandlerType.TCP - } - - if (handler.httpGet) { - kubernetesHandler.httpGetAction = fromHttpGetAction(handler.httpGet) - kubernetesHandler.type = KubernetesHandlerType.HTTP - } - - return kubernetesHandler - } - - static KubernetesHttpGetAction fromHttpGetAction(V1HTTPGetAction httpGet) { - if (!httpGet) { - return null - } - - def kubernetesHttpGetAction = new KubernetesHttpGetAction() - kubernetesHttpGetAction.host = httpGet.host - kubernetesHttpGetAction.path = httpGet.path - try { - kubernetesHttpGetAction.port = httpGet.port?.toInteger() ?: 0 - } catch (NumberFormatException ex) { - log.warn "Port number is not Integer", ex - } - kubernetesHttpGetAction.uriScheme = httpGet.scheme - kubernetesHttpGetAction.httpHeaders = httpGet.httpHeaders?.collect() { - new KeyValuePair(name: it.name, value: it.value) - } - return kubernetesHttpGetAction - } - - static KubernetesTcpSocketAction fromTcpSocketAction(V1TCPSocketAction tcpSocket) { - if (!tcpSocket) { - return null - } - - def kubernetesTcpSocketAction = new KubernetesTcpSocketAction() - try { - kubernetesTcpSocketAction.port = tcpSocket.port.toInteger() ?: 0 - } catch (NumberFormatException ex) { - log.warn "Port number is not Integer", ex - } - return kubernetesTcpSocketAction - } - - static KubernetesProbe fromV1Probe(V1Probe probe) { - if (!probe) { - return null - } - - def kubernetesProbe = new KubernetesProbe() - kubernetesProbe.failureThreshold = probe.failureThreshold ?: 0 - kubernetesProbe.successThreshold = probe.successThreshold ?: 0 - kubernetesProbe.timeoutSeconds = probe.timeoutSeconds ?: 0 - kubernetesProbe.periodSeconds = probe.periodSeconds ?: 0 - kubernetesProbe.initialDelaySeconds = probe.initialDelaySeconds ?: 0 - kubernetesProbe.handler = new KubernetesHandler() - - if (probe.exec) { - kubernetesProbe.handler.execAction = fromExecAction(probe.exec) - kubernetesProbe.handler.type = KubernetesHandlerType.EXEC - } - - if (probe.tcpSocket) { - kubernetesProbe.handler.tcpSocketAction = fromTcpSocketAction(probe.tcpSocket) - kubernetesProbe.handler.type = KubernetesHandlerType.TCP - } - - if (probe.httpGet) { - kubernetesProbe.handler.httpGetAction = fromHttpGetAction(probe.httpGet) - kubernetesProbe.handler.type = KubernetesHandlerType.HTTP - } - - return kubernetesProbe - } - - /** - * This method converts the Object to YAML - * @param obj - * @return - */ - static String getYaml(Object obj) { - ObjectMapper m = new ObjectMapper(new YAMLFactory()); - return m.writeValueAsString(obj).replaceAll("\\\\", ""); - } - - static V1beta1StatefulSet toStatefulSet(DeployKubernetesAtomicOperationDescription description, - String statefulSetName) { - def targetSize = description.targetSize ?: description.capacity?.desired - def stateful = new V1beta1StatefulSet() - def spec = new V1beta1StatefulSetSpec() - - def templateSpec = toPodTemplateSpec(description, statefulSetName) - spec.template = templateSpec - - def metadata = new V1ObjectMeta() - def selector = new V1LabelSelector() - metadata.labels = genericLabels(description.application, statefulSetName, description.namespace) - if (description.controllerAnnotations) { - metadata.annotations = new HashMap() - description.controllerAnnotations.forEach({ k, v -> - metadata.annotations.put(k, v) - }) - } - spec.template.metadata = metadata - selector.matchLabels = metadata.labels - spec.selector = selector - spec.serviceName = statefulSetName - spec.replicas = description.targetSize - - if (description.podManagementPolicy) { - spec.podManagementPolicy = description.podManagementPolicy - } - - def persistentVolumeClaims = toPersistentVolumeClaims(description, statefulSetName) - persistentVolumeClaims.forEach({ persistentVolumeClaim -> - spec.addVolumeClaimTemplatesItem(persistentVolumeClaim) - }) - - if (description.updateController) { - def updateController = description.updateController - def updateStrategy = new V1beta1StatefulSetUpdateStrategy() - def rollingUpdate = new V1beta1RollingUpdateStatefulSetStrategy() - - if(updateController) { - if (updateController.updateStrategy.type.name() != "Recreate") { - updateStrategy.type = updateController.updateStrategy.type - if (updateController.updateStrategy.rollingUpdate) { - if (updateController.updateStrategy.rollingUpdate.partition) { - rollingUpdate.partition = updateController.updateStrategy.rollingUpdate.partition - } - updateStrategy.rollingUpdate = rollingUpdate - } - spec.updateStrategy = updateStrategy - } - } - } - - metadata = new V1ObjectMeta() - metadata.name = statefulSetName - metadata.namespace = description.namespace - metadata.labels = genericLabels(description.application, statefulSetName, description.namespace) - metadata.deletionGracePeriodSeconds = description.terminationGracePeriodSeconds - - stateful.metadata = metadata - stateful.spec = spec - stateful.apiVersion = description.apiVersion - stateful.kind = description.kind - - return stateful - } - - static List toPersistentVolumeClaims(DeployKubernetesAtomicOperationDescription description, String name) { - def persistentVolumeClaims = new ArrayList() - if (description.volumeClaims) { - description.volumeClaims.forEach({ claim -> - def spec = new V1PersistentVolumeClaimSpec() - def metadata = new V1ObjectMeta() - - if (description.volumeAnnotations) { - metadata.annotations = new HashMap() - description.volumeAnnotations.forEach({ k, v -> - metadata.annotations.put(k, v) - }) - } - metadata.name = claim.claimName - - if (claim.accessModes) { - spec.accessModes = claim.accessModes - } - - if (claim.requirements) { - def resources = new V1ResourceRequirements() - resources.limits = claim.requirements.limits - resources.requests = claim.requirements.requests - - spec.resources = resources - } - - if (claim.storageClassName) { - spec.storageClassName = claim.storageClassName - } - - def volumeClaim = new V1PersistentVolumeClaim() - volumeClaim.spec = spec - volumeClaim.metadata = metadata - - persistentVolumeClaims.add(volumeClaim) - }) - } - - return persistentVolumeClaims - } - - static V1PodTemplateSpec toPodTemplateSpec(DeployKubernetesAtomicOperationDescription description, String name) { - def podTemplateSpec = new V1PodTemplateSpec() - def podSpec = new V1PodSpec() - def metadata = new V1ObjectMeta() - - for (def loadBalancer : description.loadBalancers) { - metadata.labels.put(KubernetesUtil.loadBalancerKey(loadBalancer), "true") - } - - if (description.podAnnotations) { - metadata.annotations = new HashMap() - description.podAnnotations.forEach({ k, v -> - metadata.annotations.put(k, v) - }) - } - - podTemplateSpec.metadata = metadata - - if (description.restartPolicy) { - podSpec.restartPolicy = description.restartPolicy - } else { - podSpec.restartPolicy = "Always" - } - - if (description.terminationGracePeriodSeconds) { - podSpec.terminationGracePeriodSeconds = description.terminationGracePeriodSeconds - } - - if (description.imagePullSecrets) { - podSpec.imagePullSecrets = new ArrayList() - for (def imagePullSecret : description.imagePullSecrets) { - def secret = new V1ObjectReference() - secret.name = imagePullSecret - secret.namespace = description.namespace - podSpec.imagePullSecrets.add(secret) - } - } - - if (description.serviceAccountName) { - podSpec.serviceAccountName = description.serviceAccountName - } - - podSpec.nodeSelector = description.nodeSelector - - if (description.volumeSources) { - def volumeSources = description.volumeSources.findResults { volumeSource -> - toVolumeSource(volumeSource) - } - podSpec.volumes = volumeSources - } - - podSpec.hostNetwork = description.hostNetwork - def containers = description.containers.collect { container -> - toContainer(container) - } - - podSpec.dnsPolicy = "ClusterFirst" - podSpec.containers = containers - podTemplateSpec.spec = podSpec - - return podTemplateSpec - } - static V1Volume toVolumeSource(KubernetesVolumeSource volumeSource) { - def volume = new V1Volume(name: volumeSource.name) - switch (volumeSource.type) { - case KubernetesVolumeSourceType.EmptyDir: - def res = new V1EmptyDirVolumeSource() - switch (volumeSource.emptyDir.medium) { - case KubernetesStorageMediumType.Memory: - res.medium = "Memory" - break - - default: - res = "" // Empty string is default... - } - break - - case KubernetesVolumeSourceType.HostPath: - def res = new V1HostPathVolumeSource() - res.path = volumeSource.hostPath.path - volume.hostPath = res - break - - case KubernetesVolumeSourceType.PersistentVolumeClaim: - def res = new V1PersistentVolumeClaimVolumeSource() - res.claimName = volumeSource.persistentVolumeClaim.claimName - res.readOnly = volumeSource.persistentVolumeClaim.readOnly - volume.persistentVolumeClaim = res - break - - case KubernetesVolumeSourceType.Secret: - def res = new V1SecretVolumeSource() - res.secretName = volumeSource.secret.secretName - volume.secret = res - break - - case KubernetesVolumeSourceType.ConfigMap: - def res = new V1ConfigMapVolumeSource() - res.name = volumeSource.configMap.configMapName - - def items = volumeSource.configMap.items?.collect { KubernetesKeyToPath item -> - new V1KeyToPath(key: item.key, path: item.path) - } - - res.items = items - volume.configMap = res - break - - default: - LOG.warn "Unable to identify KubernetesVolumeSourceType $KubernetesVolumeSourceType".toString() - return null - } - - return volume - } - - static V1Container toContainer(KubernetesContainerDescription container) { - KubernetesUtil.normalizeImageDescription(container.imageDescription) - def imageId = KubernetesUtil.getImageId(container.imageDescription) - def v1container = new V1Container() - - v1container.image = imageId - - if (container.imagePullPolicy) { - v1container.imagePullPolicy = container.imagePullPolicy.toString() - } else { - v1container.imagePullPolicy = "ALWAYS" - } - - v1container.name = container.name - - if (container.ports) { - container.ports.forEach { it -> - def ports = new V1ContainerPort() - if (it.name) { - ports.name = it.name - } - - if (it.containerPort) { - ports.containerPort = it.containerPort - } - - if (it.hostPort) { - ports.hostPort = it.hostPort - } - - if (it.protocol) { - ports.protocol = it.protocol - } - - if (it.hostIp) { - ports.hostIP = it.hostIp - } - - v1container.addPortsItem(ports) - } - } - - if (container.securityContext) { - def securityContext = new V1SecurityContext() - securityContext.runAsNonRoot = container.securityContext.runAsNonRoot - securityContext.runAsUser = container.securityContext.runAsUser - securityContext.privileged = container.securityContext.privileged - securityContext.readOnlyRootFilesystem = container.securityContext.readOnlyRootFilesystem - v1container.securityContext = container.securityContext - - if (container.securityContext.seLinuxOptions) { - def seLinuxOptions = new V1SELinuxOptions() - seLinuxOptions.user = container.securityContext.seLinuxOptions.user - seLinuxOptions.role = container.securityContext.seLinuxOptions.role - seLinuxOptions.type = container.securityContext.seLinuxOptions.type - seLinuxOptions.level = container.securityContext.seLinuxOptions.level - - v1container.securityContext.seLinuxOptions = seLinuxOptions - } - - if (securityContext.capabilities) { - def capabilities = new V1Capabilities() - capabilities.add = securityContext.capabilities.add - capabilities.drop = securityContext.capabilities.drop - - v1container.securityContext.capabilities = capabilities - } - - v1container.securityContext = securityContext - } - - [liveness: container.livenessProbe, readiness: container.readinessProbe].each { k, v -> - def probe = v - def v1probe = new V1Probe() - if (probe) { - - v1probe.initialDelaySeconds = probe.initialDelaySeconds - - if (probe.timeoutSeconds) { - v1probe.timeoutSeconds = probe.timeoutSeconds - } - - if (probe.failureThreshold) { - v1probe.failureThreshold = probe.failureThreshold - } - - if (probe.successThreshold) { - v1probe.successThreshold = probe.successThreshold - } - - if(probe.periodSeconds) { - v1probe.periodSeconds = probe.periodSeconds - } - - switch (probe.handler.type) { - case KubernetesHandlerType.EXEC: - v1probe.exec = toExecAction(probe.handler.execAction) - break - - case KubernetesHandlerType.TCP: - v1probe.tcpSocket = toTcpSocketAction(probe.handler.tcpSocketAction) - break - - case KubernetesHandlerType.HTTP: - v1probe.httpGet = toHttpGetAction(probe.handler.httpGetAction) - break - } - - switch (k) { - case 'liveness': - v1container.livenessProbe = v1probe - break - case 'readiness': - v1container.readinessProbe = v1probe - break - default: - throw new IllegalArgumentException("Probe type $k not supported") - } - } - } - - if (container.lifecycle) { - def lifecycle = new V1Lifecycle() - - if (container.lifecycle.postStart) { - lifecycle.postStart = toHandler(container.lifecycle.postStart) - } - - if (container.lifecycle.preStop) { - lifecycle.preStop = toHandler(container.lifecycle.preStop) - } - v1container.lifecycle = lifecycle - } - - def resources = new V1ResourceRequirements() - if (container.requests) { - def requests = [:] - - if (container.requests.memory) { - requests.memory = container.requests.memory - } - - if (container.requests.cpu) { - requests.cpu = container.requests.cpu - } - resources.requests = requests - } - - if (container.limits) { - def limits = [:] - - if (container.limits.memory) { - limits.memory = container.limits.memory - } - - if (container.limits.cpu) { - limits.cpu = container.limits.cpu - } - - resources.limits = limits - } - v1container.resources = resources - - if (container.volumeMounts) { - def volumeMounts = new ArrayList() - container.volumeMounts.collect { mount -> - def res = new V1VolumeMount() - res.name = mount.name - res.mountPath = mount.mountPath - res.readOnly = mount.readOnly - res.subPath = mount.subPath - volumeMounts.add(res) - } - v1container.volumeMounts = volumeMounts - } - - if (container.envVars) { - def envVars = container.envVars.collect { envVar -> - def envVarRes = new V1EnvVar() - envVarRes.name = envVar.name - if (envVar.value) { - envVarRes.value = envVar.value - } else if (envVar.envSource) { - V1EnvVarSource envVarSource = new V1EnvVarSource() - - if (envVar.envSource.configMapSource) { - def configMap = envVar.envSource.configMapSource - envVarSource.configMapKeyRef = configMap - } else if (envVar.envSource.secretSource) { - def secret = envVar.envSource.secretSource - envVarSource.secretKeyRef = secret - } else if (envVar.envSource.fieldRef) { - V1ObjectFieldSelector fieldRef = new V1ObjectFieldSelector() - fieldRef.fieldPath = envVar.envSource.fieldRef.fieldPath - envVarSource.fieldRef = fieldRef - } else if (envVar.envSource.resourceFieldRef) { - def resource = envVar.envSource.resourceFieldRef.resource - def containerName = envVar.envSource.resourceFieldRef.containerName - def divisor = envVar.envSource.resourceFieldRef.divisor - def resouceField = new V1ResourceFieldSelector() - resouceField.resource = resource - resouceField.containerName = containerName - resouceField.divisor = divisor - - envVarSource.resourceFieldRef = resource - } - - envVarRes.valueFrom = envVarSource - } else { - return null - } - return envVarRes - } - null - v1container.env = envVars - } - - if (container.command) { - v1container.command = container.command - } - - if (container.args) { - v1container.args = container.args - } - - return v1container - } - - static V1ExecAction toExecAction(KubernetesExecAction action) { - def execAction = new V1ExecAction() - execAction.command = action.commands - - return execAction - } - - static V1TCPSocketAction toTcpSocketAction(KubernetesTcpSocketAction action) { - def tcpAction = new V1TCPSocketAction() - tcpAction.port = action.port - - return tcpAction - } - - static V1HTTPGetAction toHttpGetAction(KubernetesHttpGetAction action) { - def httpGetAction = new V1HTTPGetAction() - if (action.host) { - httpGetAction.host = action.host - } - - if (action.path) { - httpGetAction.path = action.path - } - - httpGetAction.port = String.valueOf(action.port) - - if (action.uriScheme) { - httpGetAction.scheme = action.uriScheme - } - - if (action.httpHeaders) { - def headers = action.httpHeaders.collect() { - V1HTTPHeader header = new V1HTTPHeader() - header.name = it.name - header.value = it.value - return header - } - httpGetAction.httpHeaders = headers - } - - return httpGetAction - } - - static V1Handler toHandler(KubernetesHandler handler) { - def handlerBuilder = new V1Handler() - switch (handler.type) { - case KubernetesHandlerType.EXEC: - handlerBuilder.exec = toExecAction(handler.execAction) - break - - case KubernetesHandlerType.TCP: - handler.tcpSocketAction = toHttpGetAction(handler.tcpSocketAction) - break - - case KubernetesHandlerType.HTTP: - handler.httpGetAction = toHttpGetAction(handler.httpGetAction) - break - } - - return handler - } - - static V1HorizontalPodAutoscaler toAutoscaler(KubernetesAutoscalerDescription description, - String resourceName, - String resourceKind) { - def autoscaler = new V1HorizontalPodAutoscaler() - - V1ObjectMeta metadata = new V1ObjectMeta() - metadata.name = resourceName - metadata.namespace = description.namespace - - autoscaler.metadata = metadata - - def spec = new V1HorizontalPodAutoscalerSpec() - spec.minReplicas = description.capacity.min - spec.maxReplicas = description.capacity.max - spec.targetCPUUtilizationPercentage = description.scalingPolicy.cpuUtilization.target - - def targetRef = new V1CrossVersionObjectReference() - targetRef.name = resourceName - targetRef.kind = resourceKind - - spec.scaleTargetRef = targetRef - autoscaler.spec = spec - - return autoscaler - } - - static KubernetesControllerConverter toKubernetesController(V1beta1StatefulSet controllerSet) { - //FIXME: Use this method for k8s client api transforms to fabric8 object till fully k8s client api compilant - return (new KubernetesControllerConverter(controllerSet.kind, controllerSet.apiVersion, controllerSet.metadata)) - } - - static KubernetesControllerConverter toKubernetesController(V1beta1DaemonSet controllerSet) { - //FIXME: Use this method for k8s client api transforms to fabric8 object till fully k8s client api compilant - return (new KubernetesControllerConverter(controllerSet.kind, controllerSet.apiVersion, controllerSet.metadata)) - } - - /* - TODO:Create some gneral purpose labels for helping identify a controller. Feel free to expend or fix this function. - */ - static Map genericLabels(String appName, String name, String namespace) { - def labels = [ - "app" : appName, - "cluster" : name, - "namespace": namespace, - ] - - return labels - } - - static V1beta1DaemonSet toDaemonSet(DeployKubernetesAtomicOperationDescription description, - String daemonsetName) { - def targetSize = description.targetSize ?: description.capacity?.desired - - def daemonset = new V1beta1DaemonSet() - def spec = new V1beta1DaemonSetSpec() - spec.template = toPodTemplateSpec(description, daemonsetName) - - def metadata = new V1ObjectMeta() - def selector = new V1LabelSelector() - metadata.labels = genericLabels(description.application, daemonsetName, description.namespace) - if (description.controllerAnnotations) { - metadata.annotations = new HashMap() - description.controllerAnnotations.forEach({ k, v -> - metadata.annotations.put(k, v) - }) - } - spec.template.metadata = metadata - selector.matchLabels = metadata.labels - - spec.template.metadata = metadata - spec.selector = selector - - if (description.updateController) { - def updateController = description.updateController - def updateStrategy = new V1beta1DaemonSetUpdateStrategy() - def rollingUpdate = new V1beta1RollingUpdateDaemonSet() - - if (updateController) { - //Note: Do not handle OnDelete because it is default. - if (updateController.updateStrategy.type.name() != "Recreate") { - updateStrategy.type = updateController.updateStrategy.type - if (updateController.updateStrategy.rollingUpdate) { - rollingUpdate.maxUnavailable = updateController.updateStrategy.rollingUpdate.maxUnavailable - updateStrategy.rollingUpdate = rollingUpdate - } - spec.updateStrategy = updateStrategy - } - } - } - - metadata.name = daemonsetName - metadata.namespace = description.namespace - daemonset.metadata = metadata - daemonset.spec = spec - daemonset.apiVersion = description.apiVersion - daemonset.kind = description.kind - - return daemonset - } - - static Boolean isUpdateControllerEnabled(DeployKubernetesAtomicOperationDescription description) { - return description.updateController?.enabled - } - - static Boolean validateSequence(DeployKubernetesAtomicOperationDescription description) { - return description.sequence?.intValue() >= 0 - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/SharedMutex.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/SharedMutex.groovy deleted file mode 100644 index ebe50c730ad..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/api/SharedMutex.groovy +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.api - -import java.util.concurrent.Semaphore - -/* - * TODO(lwander): Delete once https://github.com/fabric8io/kubernetes-client/issues/408 is resolved - * - * To ensure that once a k8s client has been created its config isn't overwritten by another thread, we briefly lock - * access to the client with this mutex. - * - * Note, this only needs to happen when accessing the `extensions` API (see above issue for details). - */ -class SharedMutex { - static Semaphore sem - - static void lock() { - if (sem == null) { - sem = new Semaphore(1) - } - - sem.acquire() - } - - static void unlock() { - if (sem == null) { - throw new IllegalStateException("Attempt made to unlock mutex that was never locked") - } - - sem.release() - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/caching/Keys.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/caching/Keys.groovy deleted file mode 100644 index a4ad9b097f3..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/caching/Keys.groovy +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.caching - -import com.netflix.frigga.Names - -class Keys { - static enum Namespace { - APPLICATIONS, - CLUSTERS, - SERVER_GROUPS, - INSTANCES, - LOAD_BALANCERS, - SECURITY_GROUPS, - EVENTS, - DEPLOYMENTS, - ON_DEMAND, - SERVICE_ACCOUNTS, - CONFIG_MAPS, - SECRETS, - - static String provider = "kubernetes" - - final String ns - - private Namespace() { - def parts = name().split('_') - - ns = parts.tail().inject(new StringBuilder(parts.head().toLowerCase())) { val, next -> val.append(next.charAt(0)).append(next.substring(1).toLowerCase()) } - } - - String toString() { - ns - } - } - - static Map parse(String key) { - def parts = key.split(':') - - if (parts.length < 2) { - return null - } - - def result = [provider: parts[0], type: parts[1]] - - if (result.provider != Namespace.provider) { - return null - } - - switch (result.type) { - case Namespace.APPLICATIONS.ns: - result << [ - application: parts[2] - ] - break - case Namespace.CLUSTERS.ns: - def names = Names.parseName(parts[4]) - result << [ - account: parts[2], - application: parts[3], - category: parts[4], // <- {`serverGroup`, `job`, `daemonSet`, etc...} - name: parts[5], - cluster: parts[5], - stack: names.stack, - detail: names.detail, - ] - break - case Namespace.SERVER_GROUPS.ns: - def names = Names.parseName(parts[4]) - result << [ - account: parts[2], - name: parts[4], - namespace: parts[3], - region: parts[3], - serverGroup: parts[4], - application: names.app, - stack: names.stack, - cluster: names.cluster, - detail: names.detail, - sequence: names.sequence?.toString(), - ] - break - case Namespace.LOAD_BALANCERS.ns: - def names = Names.parseName(parts[4]) - result << [ - account: parts[2], - namespace: parts[3], - name: parts[4], - loadBalancer: parts[4], - application: names.app, - stack: names.stack, - detail: names.detail - ] - break - case Namespace.INSTANCES.ns: - def names = Names.parseName(parts[4]) - result << [ - account: parts[2], - namespace: parts[3], - region: parts[3], - name: parts[4], - instanceId: parts[4], - application: names.app, - ] - break - case Namespace.SECURITY_GROUPS.ns: - def names = Names.parseName(parts[4]) - result << [ - account: parts[2], - namespace: parts[3], - region: parts[3], - name: parts[4], - id: parts[4], - application: names.app, - ] - break - case Namespace.DEPLOYMENTS.ns: - def names = Names.parseName(parts[4]) - result << [ - account: parts[2], - namespace: parts[3], - region: parts[3], - name: parts[4], - cluster: names.cluster, - application: names.app, - stack: names.stack, - detail: names.detail, - ] - break - case Namespace.SERVICE_ACCOUNTS.ns: - result << [ - account: parts[2], - namespace: parts[3], - region: parts[3], - name: parts[4], - serviceAccountName: parts[4], - ] - break - case Namespace.CONFIG_MAPS.ns: - result << [ - account: parts[2], - namespace: parts[3], - region: parts[3], - name: parts[4], - configMapName: parts[4], - ] - break - case Namespace.SECRETS.ns: - result << [ - account: parts[2], - namespace: parts[3], - region: parts[3], - name: parts[4], - secretName: parts[4], - ] - break - default: - return null - break - } - result - } - - static String getApplicationKey(String application) { - "${Namespace.provider}:${Namespace.APPLICATIONS}:${application}" - } - - static String getClusterKey(String account, String application, String category, String clusterName) { - "${Namespace.provider}:${Namespace.CLUSTERS}:${account}:${application}:${category}:${clusterName}" - } - - static String getServerGroupKey(String account, String namespace, String replicationControllerName) { - "${Namespace.provider}:${Namespace.SERVER_GROUPS}:${account}:${namespace}:${replicationControllerName}" - } - - static String getLoadBalancerKey(String account, String namespace, String serviceName) { - "${Namespace.provider}:${Namespace.LOAD_BALANCERS}:${account}:${namespace}:${serviceName}" - } - - static String getInstanceKey(String account, String namespace, String name) { - "${Namespace.provider}:${Namespace.INSTANCES}:${account}:${namespace}:${name}" - } - - static String getSecurityGroupKey(String account, String namespace, String ingressName) { - "${Namespace.provider}:${Namespace.SECURITY_GROUPS}:${account}:${namespace}:${ingressName}" - } - - static String getDeploymentKey(String account, String namespace, String deploymentName) { - "${Namespace.provider}:${Namespace.DEPLOYMENTS}:${account}:${namespace}:${deploymentName}" - } - - static String getServiceAccountKey(String account, String namespace, String serviceAccountName) { - "${Namespace.provider}:${Namespace.SERVICE_ACCOUNTS}:${account}:${namespace}:${serviceAccountName}" - } - - static String getConfigMapKey(String account, String namespace, String configMapName) { - "${Namespace.provider}:${Namespace.CONFIG_MAPS}:${account}:${namespace}:${configMapName}" - } - - static String getSecretKey(String account, String namespace, String secretName) { - "${Namespace.provider}:${Namespace.SECRETS}:${account}:${namespace}:${secretName}" - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesJobNameResolver.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesJobNameResolver.groovy deleted file mode 100644 index 5a049b93239..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesJobNameResolver.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy - -import com.netflix.frigga.NameBuilder - -class KubernetesJobNameResolver extends NameBuilder { - KubernetesJobNameResolver() { - - } - - String createJobName(String app, String stack, String detail) { - def prefix = super.combineAppStackDetail(app, stack, detail).toString() - def randString = Long.toHexString(Double.doubleToLongBits(Math.random())); - return "$prefix-$randString" - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesServerGroupNameResolver.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesServerGroupNameResolver.groovy deleted file mode 100644 index 73126be599d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesServerGroupNameResolver.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.helpers.AbstractServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.apps.ReplicaSet - -class KubernetesServerGroupNameResolver extends AbstractServerGroupNameResolver { - - private static final String PHASE = "DEPLOY" - - private final String namespace - private final KubernetesV1Credentials credentials - - KubernetesServerGroupNameResolver(String namespace, KubernetesV1Credentials credentials) { - this.namespace = namespace - this.credentials = credentials - } - - @Override - String getPhase() { - return PHASE - } - - @Override - String getRegion() { - return namespace - } - - @Override - List getTakenSlots(String clusterName) { - def replicationControllers = credentials.apiAdaptor.getReplicationControllers(namespace) ?: [] - def replicaSets = credentials.apiAdaptor.getReplicaSets(namespace) ?: [] - - return replicationControllers.findResults { ReplicationController replicationController -> - def names = Names.parseName(replicationController.metadata.name) - - if (names.cluster == clusterName) { - return new AbstractServerGroupNameResolver.TakenSlot( - serverGroupName: replicationController.metadata.name, - sequence : names.sequence, - createdTime : new Date(KubernetesModelUtil.translateTime(replicationController.metadata.creationTimestamp)) - ) - } else { - return null - } - } + replicaSets.findResults { ReplicaSet replicaSet -> - def names = Names.parseName(replicaSet.metadata.name) - - if (names.cluster == clusterName) { - return new AbstractServerGroupNameResolver.TakenSlot( - serverGroupName: replicaSet.metadata.name, - sequence : names.sequence, - createdTime : new Date(KubernetesModelUtil.translateTime(replicaSet.metadata.creationTimestamp)) - ) - } else { - return null - } - - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesUtil.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesUtil.groovy deleted file mode 100644 index aaaa783ed6c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/KubernetesUtil.groovy +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy - -import com.netflix.frigga.NameValidation -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesImageDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesIllegalArgumentException -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import io.fabric8.kubernetes.api.model.batch.Job -import io.fabric8.kubernetes.api.model.Pod -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.apps.ReplicaSet -import org.springframework.beans.factory.annotation.Value - -class KubernetesUtil { - static String SECURITY_GROUP_LABEL_PREFIX = "security-group-" - static String LOAD_BALANCER_LABEL_PREFIX = "load-balancer-" - static String SERVER_GROUP_LABEL = "replication-controller" - static String DEPRECATED_SERVER_GROUP_KIND = "ReplicationController" - static String SERVER_GROUP_KIND = "ReplicaSet" - static String DEPLOYMENT_KIND = "Deployment" - static String JOB_LABEL = "job" - static String CONTROLLERS_STATEFULSET_KIND = "StatefulSet" - static String CONTROLLERS_DAEMONSET_KIND = "DaemonSet" - @Value("kubernetes.defaultRegistry:gcr.io") - static String DEFAULT_REGISTRY - private static int SECURITY_GROUP_LABEL_PREFIX_LENGTH = SECURITY_GROUP_LABEL_PREFIX.length() - private static int LOAD_BALANCER_LABEL_PREFIX_LENGTH = LOAD_BALANCER_LABEL_PREFIX.length() - - static String ENABLE_DISABLE_ANNOTATION = "service.spinnaker.io/enabled" - - static String getNextSequence(String clusterName, String namespace, KubernetesV1Credentials credentials) { - def maxSeqNumber = -1 - def replicationControllers = credentials.apiAdaptor.getReplicationControllers(namespace) - - replicationControllers.forEach( { replicationController -> - def names = Names.parseName(replicationController.getMetadata().getName()) - - if (names.cluster == clusterName) { - maxSeqNumber = Math.max(maxSeqNumber, names.sequence) - } - }) - - String.format("%03d", ++maxSeqNumber) - } - - static List getImagePullSecrets(ReplicationController rc) { - rc.spec?.template?.spec?.imagePullSecrets?.collect({ it.name }) - } - - private static String extractRegistry(String image, KubernetesImageDescription description) { - def index = image.indexOf('/') - // No slash means we only provided a repository name & optional tag. - if (index >= 0) { - def sPrefix = image.substring(0, index) - - // Check if the content before the slash is a registry (either localhost, or a URL) - if (sPrefix.startsWith('localhost') || sPrefix.contains('.')) { - description.registry = sPrefix - image = image.substring(index + 1) - } - } - image - } - - private static String extractDigestOrTag(String image, KubernetesImageDescription description) { - def digestIndex = image.indexOf('@') - if (digestIndex >= 0) { - description.digest = image.substring(digestIndex + 1) - image = image.substring(0, digestIndex) - } else { - def tagIndex = image.indexOf(':') - if (tagIndex >= 0) { - description.tag = image.substring(tagIndex + 1) - image = image.substring(0, tagIndex) - } - } - image - } - - private static void populateFieldsFromUri(KubernetesImageDescription image) { - def uri = image.uri - if (uri) { - uri = extractRegistry(uri, image) - uri = extractDigestOrTag(uri, image) - // The repository is what's left after extracting the registry, and digest/tag - image.repository = uri - } - } - - static KubernetesImageDescription buildImageDescription(String image) { - def result = new KubernetesImageDescription() - result.uri = image - normalizeImageDescription(result) - result - } - - static Void normalizeImageDescription(KubernetesImageDescription image) { - populateFieldsFromUri(image) - - if (!image.registry) { - image.registry = DEFAULT_REGISTRY - } - - if (!image.tag && !image.digest) { - image.tag = "latest" - } - - if (!image.repository) { - throw new IllegalArgumentException("Image descriptions must provide a repository.") - } - } - - static String getImageId(KubernetesImageDescription image) { - return getImageId(image.registry, image.repository, image.tag, image.digest) - } - - static String getImageId(String registry, String repository, String tag, String digest) { - def tagSuffix = digest ? "@$digest" : ":$tag" - if (registry) { - return "$registry/$repository$tagSuffix".toString() - } else { - return "$repository$tagSuffix".toString() - } - } - - static getImageIdWithoutRegistry(KubernetesImageDescription image) { - def tagSuffix = image.digest ? "@$image.digest" : ":$image.tag" - "$image.repository$tagSuffix".toString() - } - - static String validateNamespace(KubernetesV1Credentials credentials, String namespace) { - def resolvedNamespace = namespace ?: "default" - if (!credentials.isRegisteredNamespace(resolvedNamespace)) { - def error = "Registered namespaces are ${credentials.getDeclaredNamespaces()}." - if (namespace) { - error = "Namespace '$namespace' was not registered with provided credentials. $error" - } else { - error = "No provided namespace assumed to mean 'default' was not registered with provided credentials. $error" - } - throw new KubernetesIllegalArgumentException(error) - } - return resolvedNamespace - } - - static Map getPodLoadBalancerStates(Pod pod) { - pod.metadata?.labels?.collectEntries { key, val -> - if (isLoadBalancerLabel(key)) { - return [(key): val] - } else { - return [:] - } - } as Map // Groovy resolves [:] as type ?CaptureOf, which is odd since key/val are clearly strings - } - - static List getLoadBalancers(Map labels) { - labels.findResults { key, val -> - if (isLoadBalancerLabel(key)) { - return key.substring(LOAD_BALANCER_LABEL_PREFIX_LENGTH, key.length()) - } else { - return null - } - } - } - - static List getLoadBalancers(Pod pod) { - return getLoadBalancers(pod.metadata?.labels ?: [:]) - } - - static List getLoadBalancers(ReplicaSet rs) { - return getLoadBalancers(rs.spec?.template?.metadata?.labels ?: [:]) - } - - static List getLoadBalancers(ReplicationController rc) { - return getLoadBalancers(rc.spec?.template?.metadata?.labels ?: [:]) - } - - static List getLoadBalancers(Job job) { - return getLoadBalancers(job.spec?.template?.metadata?.labels ?: [:]) - } - - static Boolean isLoadBalancerLabel(String key) { - key.startsWith(LOAD_BALANCER_LABEL_PREFIX) - } - - static String loadBalancerKey(String loadBalancer) { - return String.format("$LOAD_BALANCER_LABEL_PREFIX%s".toString(), loadBalancer) - } - - static String combineAppStackDetail(String appName, String stack, String detail) { - NameValidation.notEmpty(appName, "appName"); - - // Use empty strings, not null references that output "null" - stack = stack != null ? stack : ""; - - if (detail != null && !detail.isEmpty()) { - return appName + "-" + stack + "-" + detail; - } - - if (!stack.isEmpty()) { - return appName + "-" + stack; - } - - return appName; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/KubernetesAtomicOperationConverterHelper.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/KubernetesAtomicOperationConverterHelper.groovy deleted file mode 100644 index ae32a223654..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/KubernetesAtomicOperationConverterHelper.groovy +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters - -import com.fasterxml.jackson.databind.DeserializationFeature -import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport - -class KubernetesAtomicOperationConverterHelper { - static Object convertDescription(Map input, - AbstractAtomicOperationsCredentialsSupport credentialsSupport, - Class targetDescriptionType) { - def account = input.account as String - def removedAccount = input.remove('credentials') - account = account ?: removedAccount - - // Save these to re-assign after ObjectMapper does its work. - def credentials = (KubernetesNamedAccountCredentials) credentialsSupport.getCredentialsObject(account as String) - - def converted = (KubernetesAtomicOperationDescription) credentialsSupport.objectMapper - .copy() - .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) - .convertValue(input, targetDescriptionType) - - // Re-assign the credentials. - converted.credentials = credentials - if (removedAccount) { - input.credentials = removedAccount - converted.account = removedAccount - } - - converted - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/autoscaler/UpsertKubernetesAutoscalerAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/autoscaler/UpsertKubernetesAutoscalerAtomicOperationConverter.groovy deleted file mode 100644 index e5b1e364f7f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/autoscaler/UpsertKubernetesAutoscalerAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.autoscaler - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.autoscaler.KubernetesAutoscalerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.autoscaler.UpsertKubernetesAutoscalerAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.UPSERT_SCALING_POLICY) -@Component -class UpsertKubernetesAutoscalerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new UpsertKubernetesAutoscalerAtomicOperation(convertDescription(input)) - } - - @Override - KubernetesAutoscalerDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, KubernetesAutoscalerDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/DeregisterKubernetesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/DeregisterKubernetesAtomicOperationConverter.groovy deleted file mode 100644 index 81c97d98bd1..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/DeregisterKubernetesAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.instance - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.AbstractRegistrationKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.instance.DeregisterKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.DEREGISTER_INSTANCES_FROM_LOAD_BALANCER) -@Component -class DeregisterKubernetesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new DeregisterKubernetesAtomicOperation(convertDescription(input)) - } - - @Override - AbstractRegistrationKubernetesAtomicOperationDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, AbstractRegistrationKubernetesAtomicOperationDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/RegisterKubernetesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/RegisterKubernetesAtomicOperationConverter.groovy deleted file mode 100644 index f20a317bc57..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/RegisterKubernetesAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.instance - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.AbstractRegistrationKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.instance.RegisterKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.REGISTER_INSTANCES_WITH_LOAD_BALANCER) -@Component -class RegisterKubernetesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new RegisterKubernetesAtomicOperation(convertDescription(input)) - } - - @Override - AbstractRegistrationKubernetesAtomicOperationDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, AbstractRegistrationKubernetesAtomicOperationDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/TerminateKubernetesInstancesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/TerminateKubernetesInstancesAtomicOperationConverter.groovy deleted file mode 100644 index 7f58eac8072..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/instance/TerminateKubernetesInstancesAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.instance - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.KubernetesInstanceDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.instance.TerminateKubernetesInstancesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.TERMINATE_INSTANCES) -@Component -class TerminateKubernetesInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new TerminateKubernetesInstancesAtomicOperation(convertDescription(input)) - } - - @Override - KubernetesInstanceDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, KubernetesInstanceDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/CloneKubernetesJobAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/CloneKubernetesJobAtomicOperationConverter.groovy deleted file mode 100644 index 240afe040fa..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/CloneKubernetesJobAtomicOperationConverter.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.job - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.CloneKubernetesJobAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.job.CloneKubernetesJobAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.CLONE_JOB) -@Component -class CloneKubernetesJobAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new CloneKubernetesJobAtomicOperation(convertDescription(input)) - } - - CloneKubernetesJobAtomicOperationDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, CloneKubernetesJobAtomicOperationDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/DestroyKubernetesJobAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/DestroyKubernetesJobAtomicOperationConverter.groovy deleted file mode 100644 index 208843154bd..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/DestroyKubernetesJobAtomicOperationConverter.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.job - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.KubernetesJobDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.job.DestroyKubernetesJobAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.DESTROY_JOB) -@Component -class DestroyKubernetesJobAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new DestroyKubernetesJobAtomicOperation(convertDescription(input)) - } - - KubernetesJobDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, KubernetesJobDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/RunKubernetesJobAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/RunKubernetesJobAtomicOperationConverter.groovy deleted file mode 100644 index 180a82f3dcc..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/job/RunKubernetesJobAtomicOperationConverter.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.job - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.RunKubernetesJobDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.job.RunKubernetesJobAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.RUN_JOB) -@Component -class RunKubernetesJobAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new RunKubernetesJobAtomicOperation(convertDescription(input)) - } - - RunKubernetesJobDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, RunKubernetesJobDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationConverter.groovy deleted file mode 100644 index 5ab2aebb508..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationConverter.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.loadbalancer - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.DeleteKubernetesLoadBalancerAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.loadbalancer.DeleteKubernetesLoadBalancerAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.DELETE_LOAD_BALANCER) -@Component -class DeleteKubernetesLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new DeleteKubernetesLoadBalancerAtomicOperation(convertDescription(input)) - } - - DeleteKubernetesLoadBalancerAtomicOperationDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, DeleteKubernetesLoadBalancerAtomicOperationDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationConverter.groovy deleted file mode 100644 index 1d347100a92..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationConverter.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.loadbalancer - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.loadbalancer.UpsertKubernetesLoadBalancerAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.UPSERT_LOAD_BALANCER) -@Component -class UpsertKubernetesLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new UpsertKubernetesLoadBalancerAtomicOperation(convertDescription(input)) - } - - KubernetesLoadBalancerDescription convertDescription(Map input) { - if (input.loadBalancerName) { - input.name = input.loadBalancerName - } - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, KubernetesLoadBalancerDescription) - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/securitygroup/DeleteKubernetesSecurityGroupAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/securitygroup/DeleteKubernetesSecurityGroupAtomicOperationConverter.groovy deleted file mode 100644 index 009c50e885f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/securitygroup/DeleteKubernetesSecurityGroupAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.securitygroup - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.DeleteKubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.securitygroup.DeleteKubernetesSecurityGroupAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.DELETE_SECURITY_GROUP) -@Component -class DeleteKubernetesSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new DeleteKubernetesSecurityGroupAtomicOperation(convertDescription(input)) - } - - @Override - DeleteKubernetesSecurityGroupDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, DeleteKubernetesSecurityGroupDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/securitygroup/UpsertKubernetesSecurityGroupConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/securitygroup/UpsertKubernetesSecurityGroupConverter.groovy deleted file mode 100644 index 82d1ba8dd29..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/securitygroup/UpsertKubernetesSecurityGroupConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.securitygroup - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.securitygroup.UpsertKubernetesSecurityGroupAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.UPSERT_SECURITY_GROUP) -@Component -class UpsertKubernetesSecurityGroupConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new UpsertKubernetesSecurityGroupAtomicOperation(convertDescription(input)) - } - - @Override - KubernetesSecurityGroupDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, KubernetesSecurityGroupDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/CloneKubernetesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/CloneKubernetesAtomicOperationConverter.groovy deleted file mode 100644 index 62c5221f95c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/CloneKubernetesAtomicOperationConverter.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.CloneKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup.CloneKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.CLONE_SERVER_GROUP) -@Component -class CloneKubernetesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new CloneKubernetesAtomicOperation(convertDescription(input)) - } - - CloneKubernetesAtomicOperationDescription convertDescription(Map input) { - def converted = KubernetesAtomicOperationConverterHelper.convertDescription(input, this, CloneKubernetesAtomicOperationDescription) - converted.sourceCredentials = (KubernetesNamedAccountCredentials) this.getCredentialsObject(converted.source.account) - return converted - - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DeployKubernetesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DeployKubernetesAtomicOperationConverter.groovy deleted file mode 100644 index 153b16fb0f1..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DeployKubernetesAtomicOperationConverter.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.DeployKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup.DeployKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.CREATE_SERVER_GROUP) -@Component("deployKubernetesDescription") -class DeployKubernetesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new DeployKubernetesAtomicOperation(convertDescription(input)) - } - - DeployKubernetesAtomicOperationDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, DeployKubernetesAtomicOperationDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DestroyKubernetesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DestroyKubernetesAtomicOperationConverter.groovy deleted file mode 100644 index ea307716d74..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DestroyKubernetesAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup.DestroyKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.DESTROY_SERVER_GROUP) -@Component -class DestroyKubernetesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new DestroyKubernetesAtomicOperation(convertDescription(input)) - } - - @Override - KubernetesServerGroupDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, KubernetesServerGroupDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DisableKubernetesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DisableKubernetesAtomicOperationConverter.groovy deleted file mode 100644 index 1586c04fb0a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DisableKubernetesAtomicOperationConverter.groovy +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.EnableDisableKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup.DisableKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.DISABLE_SERVER_GROUP) -@Component -class DisableKubernetesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new DisableKubernetesAtomicOperation(convertDescription(input)) - } - - KubernetesServerGroupDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, EnableDisableKubernetesAtomicOperationDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/EnableKubernetesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/EnableKubernetesAtomicOperationConverter.groovy deleted file mode 100644 index 3d6a3cd70c0..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/EnableKubernetesAtomicOperationConverter.groovy +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.EnableDisableKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup.EnableKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.ENABLE_SERVER_GROUP) -@Component -class EnableKubernetesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new EnableKubernetesAtomicOperation(convertDescription(input)) - } - - KubernetesServerGroupDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, EnableDisableKubernetesAtomicOperationDescription) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/ResizeKubernetesAtomicOperationConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/ResizeKubernetesAtomicOperationConverter.groovy deleted file mode 100644 index 7eede9e711c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/ResizeKubernetesAtomicOperationConverter.groovy +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.ResizeKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup.ResizeKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@KubernetesOperation(AtomicOperations.RESIZE_SERVER_GROUP) -@Component -class ResizeKubernetesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - AtomicOperation convertOperation(Map input) { - new ResizeKubernetesAtomicOperation(convertDescription(input)) - } - - ResizeKubernetesAtomicOperationDescription convertDescription(Map input) { - KubernetesAtomicOperationConverterHelper.convertDescription(input, this, ResizeKubernetesAtomicOperationDescription) - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/KubernetesKindAtomicOperationDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/KubernetesKindAtomicOperationDescription.groovy deleted file mode 100644 index d870075ba1d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/KubernetesKindAtomicOperationDescription.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description - -import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription -import groovy.transform.AutoClone -import groovy.transform.Canonical - -// Pair of credentials name and associated kubernetes client -@AutoClone -@Canonical -class KubernetesKindAtomicOperationDescription extends KubernetesAtomicOperationDescription { - String kind - String apiVersion -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/autoscaler/KubernetesAutoscalerDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/autoscaler/KubernetesAutoscalerDescription.groovy deleted file mode 100644 index 12636b5a91c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/autoscaler/KubernetesAutoscalerDescription.groovy +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.autoscaler - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.DeployKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesScalingPolicy -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.ResizeKubernetesAtomicOperationDescription - -class KubernetesAutoscalerDescription extends ResizeKubernetesAtomicOperationDescription { - KubernetesScalingPolicy scalingPolicy - - KubernetesAutoscalerDescription() { } - - KubernetesAutoscalerDescription(String serverGroupName, DeployKubernetesAtomicOperationDescription description) { - this.capacity = description.capacity - this.scalingPolicy = description.scalingPolicy - this.namespace = description.namespace - this.serverGroupName = serverGroupName - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/instance/AbstractRegistrationKubernetesAtomicOperationDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/instance/AbstractRegistrationKubernetesAtomicOperationDescription.groovy deleted file mode 100644 index 9db5033735c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/instance/AbstractRegistrationKubernetesAtomicOperationDescription.groovy +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance - -class AbstractRegistrationKubernetesAtomicOperationDescription extends KubernetesInstanceDescription { - List loadBalancerNames -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/instance/KubernetesInstanceDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/instance/KubernetesInstanceDescription.groovy deleted file mode 100644 index 48c79ff09da..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/instance/KubernetesInstanceDescription.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription - -class KubernetesInstanceDescription extends KubernetesKindAtomicOperationDescription { - List instanceIds - String namespace -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/CloneKubernetesJobAtomicOperationDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/CloneKubernetesJobAtomicOperationDescription.groovy deleted file mode 100644 index 1b0a7db52ad..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/CloneKubernetesJobAtomicOperationDescription.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job - -import groovy.transform.Canonical - -class CloneKubernetesJobAtomicOperationDescription extends RunKubernetesJobDescription { - KubernetesCloneJobSource source -} - -@Canonical -class KubernetesCloneJobSource { - String jobName - String namespace -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/KubernetesJobDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/KubernetesJobDescription.groovy deleted file mode 100644 index 260b34df6ab..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/KubernetesJobDescription.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription - -class KubernetesJobDescription extends KubernetesKindAtomicOperationDescription { - String jobName - String namespace -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/RunKubernetesJobDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/RunKubernetesJobDescription.groovy deleted file mode 100644 index 9db17686ee8..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/job/RunKubernetesJobDescription.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesDnsPolicy -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesToleration -import groovy.transform.AutoClone -import groovy.transform.Canonical - -@AutoClone -@Canonical -class RunKubernetesJobDescription extends KubernetesKindAtomicOperationDescription { - String application - String stack - String freeFormDetails - String namespace - Boolean hostNetwork=false - Map nodeSelector - // this should be deprecated at some point - KubernetesContainerDescription container - List containers - List volumeSources - Map labels - Map annotations - String serviceAccountName - KubernetesDnsPolicy dnsPolicy - List tolerations -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationDescription.groovy deleted file mode 100644 index 863022d3880..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationDescription.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription - -class DeleteKubernetesLoadBalancerAtomicOperationDescription extends KubernetesKindAtomicOperationDescription { - String loadBalancerName - String namespace -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/loadbalancer/KubernetesLoadBalancerDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/loadbalancer/KubernetesLoadBalancerDescription.groovy deleted file mode 100644 index 6bfdb8b1606..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/loadbalancer/KubernetesLoadBalancerDescription.groovy +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer - -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription -import groovy.transform.AutoClone -import groovy.transform.Canonical - -@AutoClone -@Canonical -class KubernetesLoadBalancerDescription extends KubernetesKindAtomicOperationDescription implements DeployDescription { - String name - // If `loadBalancerName` is given in the description, it will override `name`. - String loadBalancerName - String app - String stack - String detail - String namespace - - List ports - List externalIps - String clusterIp - String loadBalancerIp - String sessionAffinity - - String serviceType - - Map serviceAnnotations - Map serviceLabels -} - -@AutoClone -@Canonical -class KubernetesNamedServicePort { - String name - String protocol - int port - int targetPort - int nodePort -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/securitygroup/DeleteKubernetesSecurityGroupDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/securitygroup/DeleteKubernetesSecurityGroupDescription.groovy deleted file mode 100644 index 4f68b6fad04..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/securitygroup/DeleteKubernetesSecurityGroupDescription.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription - -class DeleteKubernetesSecurityGroupDescription extends KubernetesKindAtomicOperationDescription { - String securityGroupName - String namespace -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/securitygroup/KubernetesSecurityGroupDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/securitygroup/KubernetesSecurityGroupDescription.groovy deleted file mode 100644 index c47ac012daa..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/securitygroup/KubernetesSecurityGroupDescription.groovy +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription -import groovy.transform.AutoClone -import groovy.transform.Canonical - -@AutoClone -@Canonical -class KubernetesSecurityGroupDescription extends KubernetesKindAtomicOperationDescription { - String securityGroupName - String app - String stack - String detail - String namespace - - KubernetesIngressBackend ingress - List tls - List rules - - Map annotations - Map labels -} - -@AutoClone -@Canonical -class KubernetesIngressBackend { - String serviceName - int port -} - -@AutoClone -@Canonical -class KubernetesIngressTlS { - List hosts - String secretName -} - -@AutoClone -@Canonical -class KubernetesIngressRule { - String host - KubernetesIngressRuleValue value -} - -@AutoClone -@Canonical -class KubernetesIngressRuleValue { - KubernetesHttpIngressRuleValue http -} - -@AutoClone -@Canonical -class KubernetesHttpIngressRuleValue { - List paths -} - -@AutoClone -@Canonical -class KubernetesHttpIngressPath { - String path - KubernetesIngressBackend ingress -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/CloneKubernetesAtomicOperationDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/CloneKubernetesAtomicOperationDescription.groovy deleted file mode 100644 index 3537feca293..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/CloneKubernetesAtomicOperationDescription.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials - -class CloneKubernetesAtomicOperationDescription extends DeployKubernetesAtomicOperationDescription { - KubernetesNamedAccountCredentials sourceCredentials -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/DeployKubernetesAtomicOperationDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/DeployKubernetesAtomicOperationDescription.groovy deleted file mode 100644 index 3b372d346d5..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/DeployKubernetesAtomicOperationDescription.groovy +++ /dev/null @@ -1,564 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.fasterxml.jackson.annotation.JsonProperty -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription -import groovy.transform.AutoClone -import groovy.transform.Canonical - -@AutoClone -@Canonical -class DeployKubernetesAtomicOperationDescription extends KubernetesKindAtomicOperationDescription implements DeployDescription { - String application - String stack - String freeFormDetails - String namespace - String restartPolicy - Integer targetSize - Boolean hostNetwork - List loadBalancers - List securityGroups - List containers - List initContainers - List volumeSources - Capacity capacity - KubernetesScalingPolicy scalingPolicy - Map replicaSetAnnotations - Map controllerAnnotations - Map podAnnotations - Map volumeAnnotations - Map nodeSelector - KubernetesSecurityContext securityContext - KubernetesDeployment deployment - KubernetesUpdateController updateController - Long terminationGracePeriodSeconds - String serviceAccountName - Integer sequence - KubernetesPodSpecDescription podSpec - String podManagementPolicy - KubernetesDnsPolicy dnsPolicy - Source source - List volumeClaims - List tolerations - - @JsonIgnore - Set imagePullSecrets -} - -@AutoClone -@Canonical -class Capacity { - Integer min - Integer max - Integer desired -} - -@AutoClone -@Canonical -class KubernetesContainerPort { - String name - Integer containerPort - String protocol - String hostIp - Integer hostPort -} - -@AutoClone -@Canonical -class KubernetesImageDescription { - String uri - String registry - String repository - String tag - String digest -} - -@AutoClone -@Canonical -class KubernetesContainerDescription { - String name - KubernetesImageDescription imageDescription - KubernetesPullPolicy imagePullPolicy - - KubernetesResourceDescription requests - KubernetesResourceDescription limits - - List ports - - KubernetesProbe livenessProbe - KubernetesProbe readinessProbe - - KubernetesLifecycle lifecycle - - List volumeMounts - List envVars - List envFrom - - List command - List args - - KubernetesSecurityContext securityContext -} - -@AutoClone -@Canonical -class KubernetesDeployment { - boolean enabled - KubernetesStrategy deploymentStrategy - int minReadySeconds - Integer revisionHistoryLimit // May be null - boolean paused - Integer rollbackRevision // May be null - Integer progressRollbackSeconds // May be null -} - -@AutoClone -@Canonical -class KubernetesUpdateController { - boolean enabled - KubernetesStrategy updateStrategy - int minReadySeconds - Integer revisionHistoryLimit -} - -@AutoClone -@Canonical -class KubernetesStrategy { - KubernetesStrategyType type - KubernetesRollingUpdate rollingUpdate -} - -@AutoClone -@Canonical -class KubernetesRollingUpdate { - String maxUnavailable - String maxSurge - Integer partition -} - -enum KubernetesStrategyType { - Recreate, - RollingUpdate -} - -@AutoClone -@Canonical -class KubernetesLifecycle { - KubernetesHandler postStart - KubernetesHandler preStop -} - -@AutoClone -@Canonical -class KubernetesEnvVar { - String name - String value - KubernetesEnvVarSource envSource -} - -@AutoClone -@Canonical -class KubernetesEnvFromSource { - String prefix - KubernetesConfigMapEnvSource configMapRef - KubernetesSecretEnvSource secretRef -} - -@AutoClone -@Canonical -class KubernetesConfigMapEnvSource { - String name - boolean optional -} - -@AutoClone -@Canonical -class KubernetesSecretEnvSource { - String name - boolean optional -} - -@AutoClone -@Canonical -class KubernetesScalingPolicy { - KubernetesCpuUtilization cpuUtilization -} - -@AutoClone -@Canonical -class KubernetesCpuUtilization { - Integer target -} - -enum KubernetesPullPolicy { - @JsonProperty("IFNOTPRESENT") - IfNotPresent, - - @JsonProperty("ALWAYS") - Always, - - @JsonProperty("NEVER") - Never, -} - -@AutoClone -@Canonical -class KubernetesEnvVarSource { - KubernetesSecretSource secretSource - KubernetesConfigMapSource configMapSource - KubernetesFieldRefSource fieldRef - KubernetesResourceFieldRefSource resourceFieldRef -} - -@AutoClone -@Canonical -class KubernetesResourceFieldRefSource { - String resource - String containerName - String divisor -} - -@AutoClone -@Canonical -class KubernetesFieldRefSource { - String fieldPath -} - -@AutoClone -@Canonical -class KubernetesSecretSource { - String secretName - String key - Boolean optional = true -} - -@AutoClone -@Canonical -class KubernetesConfigMapSource { - String configMapName - String key - Boolean optional = true -} - -@AutoClone -@Canonical -class KubernetesVolumeMount { - String name - Boolean readOnly - String mountPath - String subPath -} - -enum KubernetesVolumeSourceType { - @JsonProperty("HOSTPATH") - HostPath, - - @JsonProperty("EMPTYDIR") - EmptyDir, - - @JsonProperty("PERSISTENTVOLUMECLAIM") - PersistentVolumeClaim, - - @JsonProperty("SECRET") - Secret, - - @JsonProperty("CONFIGMAP") - ConfigMap, - - @JsonProperty("AWSELASTICBLOCKSTORE") - AwsElasticBlockStore, - - @JsonProperty("NFS") - NFS, - - @JsonProperty("UNSUPPORTED") - Unsupported, -} - -enum KubernetesDnsPolicy { - @JsonProperty("ClusterFirst") - ClusterFirst, - - @JsonProperty("Default") - Default, - - @JsonProperty("ClusterFirstWithHostNet") - ClusterFirstWithHostNet, -} - -enum KubernetesStorageMediumType { - @JsonProperty("DEFAULT") - Default, - - @JsonProperty("MEMORY") - Memory, -} - -@AutoClone -@Canonical -class KubernetesVolumeSource { - String name - KubernetesVolumeSourceType type - KubernetesHostPath hostPath - KubernetesEmptyDir emptyDir - KubernetesPersistentVolumeClaim persistentVolumeClaim - KubernetesSecretVolumeSource secret - KubernetesConfigMapVolumeSource configMap - KubernetesAwsElasticBlockStoreVolumeSource awsElasticBlockStore - KubernetesNfsVolumeSource nfs -} - -@AutoClone -@Canonical -class KubernetesConfigMapVolumeSource { - String configMapName - List items - Integer defaultMode -} - -@AutoClone -@Canonical -class KubernetesAwsElasticBlockStoreVolumeSource { - String volumeId - String fsType - Integer partition -} - -@AutoClone -@Canonical -class KubernetesNfsVolumeSource { - String server - String path - Boolean readOnly -} - -@AutoClone -@Canonical -class KubernetesKeyToPath { - String key - String path - Integer defaultMode -} - -@AutoClone -@Canonical -class KubernetesSecretVolumeSource { - String secretName -} - -@AutoClone -@Canonical -class KubernetesHostPath { - String path -} - -@AutoClone -@Canonical -class KubernetesEmptyDir { - KubernetesStorageMediumType medium -} - -@AutoClone -@Canonical -class KubernetesPersistentVolumeClaim { - String claimName - Boolean readOnly -} - -@AutoClone -@Canonical -class KubernetesProbe { - KubernetesHandler handler - int initialDelaySeconds - int timeoutSeconds - int periodSeconds - int successThreshold - int failureThreshold -} - -enum KubernetesHandlerType { - EXEC, TCP, HTTP -} - -@AutoClone -@Canonical -class KubernetesHandler { - KubernetesHandlerType type - KubernetesExecAction execAction - KubernetesHttpGetAction httpGetAction - KubernetesTcpSocketAction tcpSocketAction -} - -@AutoClone -@Canonical -class KubernetesExecAction { - List commands -} - -@AutoClone -@Canonical -class KubernetesHttpGetAction { - String path - int port - String host - String uriScheme - List httpHeaders -} - -@AutoClone -@Canonical -class KubernetesTcpSocketAction { - int port -} - -@AutoClone -@Canonical -class KubernetesResourceDescription { - String memory - String cpu -} - -@AutoClone -@Canonical -class KeyValuePair { - String name - String value -} - -@AutoClone -@Canonical -class KubernetesSecurityContext { - KubernetesCapabilities capabilities - Boolean privileged - KubernetesSeLinuxOptions seLinuxOptions - Long runAsUser - Boolean runAsNonRoot - Boolean readOnlyRootFilesystem -} - -@AutoClone -@Canonical -class KubernetesCapabilities { - List add - List drop -} - -@AutoClone -@Canonical -class KubernetesSeLinuxOptions { - String user - String role - String type - String level -} - -@AutoClone -@Canonical -//Base on model https://kubernetes.io/docs/resources-reference/v1.5/#podspec-v1 -//which will map to Kubernetes 1.5 API version -class KubernetesPodSpecDescription { - Long activeDeadlineSeconds - List containers - String dnsPolicy - Boolean hostIPC - Boolean hostNetwork - Boolean hostPID - String hostname - @JsonIgnore - Set imagePullSecrets - String nodeName - Map nodeSelector - String restartPolicy - KubernetesSecurityContext securityContext - String serviceAccountName - String subdomain - Long terminationGracePeriodSeconds - List volumeSources -} - -@AutoClone -@Canonical -class Source { - String serverGroupName - String region - String namespace - String account - Boolean useSourceCapacity -} - -class KubernetesPersistentVolumeClaimDescription { - String claimName - List accessModes - KubernetesPeristentResourceRequirement requirements - KubernetesPersistentLabelSelector selector - String storageClassName -} - -@AutoClone -@Canonical -class KubernetesPeristentResourceRequirement { - Map limits - Map requests -} - -@AutoClone -@Canonical -class KubernetesPersistentLabelSelector { - List matchExpressions - Map matchLabels -} - -@AutoClone -@Canonical -class KubernetesLabelSelectorRequirements { - String key - String operator - List values -} - -enum KubernetesTolerationEffect { - @JsonProperty("NoSchedule") - NoSchedule, - - @JsonProperty("PreferNoSchedule") - PreferNoSchedule, - - @JsonProperty("NoExecute") - NoExecute -} - -enum KubernetesTolerationOperator { - @JsonProperty("Exists") - Exists, - - @JsonProperty("Equal") - Equal -} - -@AutoClone -@Canonical -class KubernetesToleration { - KubernetesTolerationEffect effect - String key - KubernetesTolerationOperator operator - Long tolerationSeconds - String value -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/EnableDisableKubernetesAtomicOperationDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/EnableDisableKubernetesAtomicOperationDescription.groovy deleted file mode 100644 index 1857ba60e22..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/EnableDisableKubernetesAtomicOperationDescription.groovy +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescriptionTrait - -class EnableDisableKubernetesAtomicOperationDescription extends KubernetesServerGroupDescription implements EnableDisableDescriptionTrait { - Integer desiredPercentage -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/KubernetesServerGroupDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/KubernetesServerGroupDescription.groovy deleted file mode 100644 index 000b00b94f6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/KubernetesServerGroupDescription.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.KubernetesKindAtomicOperationDescription - -class KubernetesServerGroupDescription extends KubernetesKindAtomicOperationDescription { - String serverGroupName - String namespace - String region - - String getNamespace() { - namespace ?: region - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/ResizeKubernetesAtomicOperationDescription.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/ResizeKubernetesAtomicOperationDescription.groovy deleted file mode 100644 index f5411270a4f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/description/servergroup/ResizeKubernetesAtomicOperationDescription.groovy +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup - -class ResizeKubernetesAtomicOperationDescription extends KubernetesServerGroupDescription { - Capacity capacity -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesClientOperationException.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesClientOperationException.groovy deleted file mode 100644 index 35a1cfb39f1..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesClientOperationException.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2017 Cisco, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception - -import groovy.transform.InheritConstructors -import io.kubernetes.client.ApiException - -@InheritConstructors -class KubernetesClientOperationException extends RuntimeException { - KubernetesClientOperationException(String operation, ApiException e) { - super("$operation failed: ${e.message ?: e.responseBody}".toString(), e) - } - - KubernetesClientOperationException(String account, String operation, ApiException e) { - super("$operation for account $account failed: ${e.message ?: e.responseBody}".toString(), e) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesIllegalArgumentException.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesIllegalArgumentException.groovy deleted file mode 100644 index 8207dc6abf8..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesIllegalArgumentException.groovy +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception - -import groovy.transform.InheritConstructors - -@InheritConstructors -class KubernetesIllegalArgumentException extends IllegalArgumentException {} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesOperationException.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesOperationException.groovy deleted file mode 100644 index fe8f4d1b790..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesOperationException.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception - -import groovy.transform.InheritConstructors -import io.fabric8.kubernetes.client.KubernetesClientException - -@InheritConstructors -class KubernetesOperationException extends RuntimeException { - KubernetesOperationException(String operation, KubernetesClientException e) { - super("$operation failed: ${e.status?.message ?: e.message}".toString(), e) - } - - KubernetesOperationException(String account, String operation, KubernetesClientException e) { - super("$operation for account $account failed: ${e.status?.message ?: e.message}".toString(), e) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesResourceNotFoundException.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesResourceNotFoundException.groovy deleted file mode 100644 index f9aa0ed13a1..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/exception/KubernetesResourceNotFoundException.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception - -import groovy.transform.InheritConstructors - -@InheritConstructors -class KubernetesResourceNotFoundException extends KubernetesOperationException {} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/autoscaler/UpsertKubernetesAutoscalerAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/autoscaler/UpsertKubernetesAutoscalerAtomicOperation.groovy deleted file mode 100644 index f9fa24cff7f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/autoscaler/UpsertKubernetesAutoscalerAtomicOperation.groovy +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.autoscaler - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.autoscaler.KubernetesAutoscalerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.Capacity -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesCpuUtilization -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesScalingPolicy -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import io.fabric8.kubernetes.api.model.DoneableHorizontalPodAutoscaler -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscalerBuilder - -class UpsertKubernetesAutoscalerAtomicOperation implements AtomicOperation { - KubernetesAutoscalerDescription description - String BASE_PHASE = "UPSERT_AUTOSCALER" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - UpsertKubernetesAutoscalerAtomicOperation(KubernetesAutoscalerDescription description) { - this.description = description - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertScalingPolicy": { "serverGroupName": "myapp-dev-v000", "capacity": { "min": 1, "max": 5 }, "scalingPolicy": { "cpuUtilization": { "target": 40 } }, "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertScalingPolicy": { "serverGroupName": "myapp-dev-v000", "scalingPolicy": { "cpuUtilization": { "target": 40 } }, "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertScalingPolicy": { "serverGroupName": "myapp-dev-v000", "capacity": { "min": 1, "max": 5 }, "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing upsert of autoscaler for server group $description.serverGroupName..." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - def serverGroupName = description.serverGroupName - def parsedName = Names.parseName(serverGroupName) - - def replicaSet = credentials.apiAdaptor.getReplicaSet(namespace, serverGroupName) - def hasDeployment = credentials.apiAdaptor.hasDeployment(replicaSet) - def name = hasDeployment ? parsedName.cluster : serverGroupName - def kind = hasDeployment ? KubernetesUtil.DEPLOYMENT_KIND : KubernetesUtil.SERVER_GROUP_KIND - def version = hasDeployment ? credentials.apiAdaptor.getDeployment(namespace, parsedName.cluster).getApiVersion() : replicaSet.getApiVersion() - - task.updateStatus BASE_PHASE, "Looking up existing autoscaler..." - - def autoscaler = credentials.apiAdaptor.getAutoscaler(namespace, name) - - if (autoscaler) { - task.updateStatus BASE_PHASE, "Updating autoscaler settings..." - description.capacity = description.capacity ?: new Capacity() - description.capacity.min = description.capacity.min != null ? - description.capacity.min : - autoscaler.spec.minReplicas - description.capacity.max = description.capacity.max != null ? - description.capacity.max : - autoscaler.spec.maxReplicas - - description.scalingPolicy = description.scalingPolicy ?: new KubernetesScalingPolicy() - description.scalingPolicy.cpuUtilization = description.scalingPolicy.cpuUtilization ?: new KubernetesCpuUtilization() - description.scalingPolicy.cpuUtilization.target = description.scalingPolicy.cpuUtilization.target != null ? - description.scalingPolicy.cpuUtilization.target : - autoscaler.spec.targetCPUUtilizationPercentage - - ((DoneableHorizontalPodAutoscaler) KubernetesApiConverter.toAutoscaler( - credentials.apiAdaptor.editAutoscaler(namespace, name), description, name, kind, version - )).done() - } else { - if (!description.scalingPolicy || !description.scalingPolicy.cpuUtilization || description.scalingPolicy.cpuUtilization.target == null) { - throw new KubernetesOperationException("Scaling policy must be specified when the target server group has no autoscaler.") - } - - if (!description.capacity || description.capacity.min == null || description.capacity.max == null) { - throw new KubernetesOperationException("Capacity min and max must be fully specified when the target server group has no autoscaler.") - } - - task.updateStatus BASE_PHASE, "Creating autoscaler..." - credentials.apiAdaptor.createAutoscaler(namespace, ((HorizontalPodAutoscalerBuilder) KubernetesApiConverter.toAutoscaler(new HorizontalPodAutoscalerBuilder(), description, name, kind, version)).build()) - } - - return null - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/AbstractRegistrationKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/AbstractRegistrationKubernetesAtomicOperation.groovy deleted file mode 100644 index 731aa0b6847..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/AbstractRegistrationKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.AbstractRegistrationKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation - -abstract class AbstractRegistrationKubernetesAtomicOperation implements AtomicOperation { - abstract String getBasePhase() // Either 'REGISTER' or 'DEREGISTER'. - abstract String getAction() // Either 'true' or 'false', for Register and Deregister respectively. - abstract String getVerb() // Either 'registering' or 'deregistering'. - - AbstractRegistrationKubernetesAtomicOperationDescription description - - AbstractRegistrationKubernetesAtomicOperation(AbstractRegistrationKubernetesAtomicOperationDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Override - Void operate(List priorOutputs) { - task.updateStatus basePhase, "Initializing ${basePhase.toLowerCase()} operation..." - task.updateStatus basePhase, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - def services = description.loadBalancerNames.collect { - KubernetesUtil.loadBalancerKey(it) - } - - task.updateStatus basePhase, "Setting new service labels from each pod..." - - description.instanceIds.each { - credentials.apiAdaptor.togglePodLabels(namespace, it, services, action) - } - - task.updateStatus basePhase, "Finished $verb all pods." - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/DeregisterKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/DeregisterKubernetesAtomicOperation.groovy deleted file mode 100644 index 7054e81eca4..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/DeregisterKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.AbstractRegistrationKubernetesAtomicOperationDescription - -/* - * curl -X POST -H "Content-Type: application/json" -d '[ { "deregisterInstancesFromLoadBalancer": { "loadBalancers": ["kub-test-lb"], "instanceIds": ["kub-test-v000-beef"], "namespace": "default", "credentials": "my-kubernetes-account" }} ]' localhost:7002/kubernetes/ops - */ -class DeregisterKubernetesAtomicOperation extends AbstractRegistrationKubernetesAtomicOperation { - String basePhase = 'DEREGISTER' - - String action = 'false' - - String verb = 'deregistering' - - DeregisterKubernetesAtomicOperation(AbstractRegistrationKubernetesAtomicOperationDescription description) { - super(description) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/RegisterKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/RegisterKubernetesAtomicOperation.groovy deleted file mode 100644 index e7b48543ce7..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/RegisterKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.AbstractRegistrationKubernetesAtomicOperationDescription - -/* - * curl -X POST -H "Content-Type: application/json" -d '[ { "registerInstancesWithLoadBalancer": { "loadBalancers": ["kub-test-lb"], "instanceIds": ["kub-test-v000-beef"], "namespace": "default", "credentials": "my-kubernetes-account" }} ]' localhost:7002/kubernetes/ops - */ -class RegisterKubernetesAtomicOperation extends AbstractRegistrationKubernetesAtomicOperation { - String basePhase = 'REGISTER' - - String action = 'true' - - String verb = 'registering' - - RegisterKubernetesAtomicOperation(AbstractRegistrationKubernetesAtomicOperationDescription description) { - super(description) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/TerminateKubernetesInstancesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/TerminateKubernetesInstancesAtomicOperation.groovy deleted file mode 100644 index 4d2a33d6edb..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/instance/TerminateKubernetesInstancesAtomicOperation.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.KubernetesInstanceDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation - -class TerminateKubernetesInstancesAtomicOperation implements AtomicOperation { - private final String BASE_PHASE = "TERMINATE_INSTANCES" - KubernetesInstanceDescription description - - TerminateKubernetesInstancesAtomicOperation(KubernetesInstanceDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "terminateInstances": { "instanceIds": ["kub-test-v000-beef"], "namespace": "default", "credentials": "my-kubernetes-account" }} ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing terminate instances operation..." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - description.instanceIds.each { - if (!credentials.apiAdaptor.deletePod(namespace, it)) { - throw new KubernetesOperationException("Failed to delete pod $it in $namespace") - } - } - - task.updateStatus BASE_PHASE, "Successfully terminated provided instances." - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/CloneKubernetesJobAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/CloneKubernetesJobAtomicOperation.groovy deleted file mode 100644 index 71048e14b25..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/CloneKubernetesJobAtomicOperation.groovy +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.job - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.CloneKubernetesJobAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesResourceNotFoundException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import io.fabric8.kubernetes.api.model.Pod - -class CloneKubernetesJobAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "CLONE_JOB" - - CloneKubernetesJobAtomicOperation(CloneKubernetesJobAtomicOperationDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - CloneKubernetesJobAtomicOperationDescription description - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "cloneJob": { "source": { "jobName": "kub-test-xdfasdf" }, "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - */ - @Override - DeploymentResult operate(List priorOutputs) { - CloneKubernetesJobAtomicOperationDescription newDescription = cloneAndOverrideDescription() - - task.updateStatus BASE_PHASE, "Initializing copy of job for ${description.source.jobName}..." - - RunKubernetesJobAtomicOperation deployer = new RunKubernetesJobAtomicOperation(newDescription) - DeploymentResult deploymentResult = deployer.operate(priorOutputs) - - task.updateStatus BASE_PHASE, "Finished copying job for ${description.source.jobName}." - - task.updateStatus BASE_PHASE, "Finished copying job for ${description.source.jobName}. New job = ${deploymentResult.deployedNames[0]}." - - return deploymentResult - } - - CloneKubernetesJobAtomicOperationDescription cloneAndOverrideDescription() { - CloneKubernetesJobAtomicOperationDescription newDescription = description.clone() - - task.updateStatus BASE_PHASE, "Reading ancestor job ${description.source.jobName}..." - - def credentials = newDescription.credentials.credentials - - newDescription.source.namespace = description.source.namespace ?: "default" - Pod ancestorPod = credentials.apiAdaptor.getPod(newDescription.source.namespace, newDescription.source.jobName) - - if (!ancestorPod) { - throw new KubernetesResourceNotFoundException("Source job $newDescription.source.jobName does not exist.") - } - - def ancestorNames = Names.parseName(description.source.jobName) - - // Build description object from ancestor, override any values that were specified on the clone call - newDescription.application = description.application ?: ancestorNames.app - newDescription.stack = description.stack ?: ancestorNames.stack - newDescription.freeFormDetails = description.freeFormDetails ?: ancestorNames.detail - newDescription.namespace = description.namespace ?: description.source.namespace - if (!description.container) { - newDescription.container = KubernetesApiConverter.fromContainer(ancestorPod.spec?.containers?.get(0)) - } - - return newDescription - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/DestroyKubernetesJobAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/DestroyKubernetesJobAtomicOperation.groovy deleted file mode 100644 index da450bbfa89..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/DestroyKubernetesJobAtomicOperation.groovy +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.job - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.KubernetesJobDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation - -class DestroyKubernetesJobAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "DESTROY" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - private final KubernetesJobDescription description - - DestroyKubernetesJobAtomicOperation(KubernetesJobDescription description) { - this.description = description - } - - /** - * curl -X POST -H "Content-Type: application/json" -d '[ { "destroyJob": { "jobName": "kub-test-xy8813", "namespace": "default", "credentials": "my-kubernetes-account" }} ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing destroy of job." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - task.updateStatus BASE_PHASE, "Destroying job..." - - if (!credentials.apiAdaptor.hardDestroyPod(namespace, description.jobName)) { - throw new KubernetesOperationException("Failed to delete $description.jobName in $namespace.") - } - - task.updateStatus BASE_PHASE, "Successfully destroyed job $description.jobName." - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/RunKubernetesJobAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/RunKubernetesJobAtomicOperation.groovy deleted file mode 100644 index eed1f624fd0..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/job/RunKubernetesJobAtomicOperation.groovy +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.job - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesJobNameResolver -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.RunKubernetesJobDescription -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import io.fabric8.kubernetes.api.model.Pod -import io.fabric8.kubernetes.api.model.PodBuilder -import io.fabric8.kubernetes.api.model.Volume - -class RunKubernetesJobAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "RUN_JOB" - - RunKubernetesJobAtomicOperation(RunKubernetesJobDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - RunKubernetesJobDescription description - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "runJob": { "application": "kub", "stack": "test", "loadBalancers": [], "container": { "name": "librarynginx", "imageDescription": { "repository": "library/nginx" } }, "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - */ - @Override - DeploymentResult operate(List priorOutputs) { - Pod pod = podDescription() - return new DeploymentResult([ - deployedNames: [pod.metadata.name], - deployedNamesByLocation: [(pod.metadata.namespace): [pod.metadata.name]], - ]) - } - - Pod podDescription() { - task.updateStatus BASE_PHASE, "Initializing creation of job..." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - def podName = (new KubernetesJobNameResolver()).createJobName(description.application, description.stack, description.freeFormDetails) - task.updateStatus BASE_PHASE, "JobStatus name chosen to be ${podName}." - - def podLabels = description?.labels ?: [:] - def podAnnotations = description?.annotations ?: [:] - - def podBuilder = new PodBuilder().withNewMetadata().withNamespace(namespace).withName(podName).withLabels(podLabels).withAnnotations(podAnnotations).endMetadata().withNewSpec() - podBuilder.withRestartPolicy("Never") - if (description.volumeSources) { - List volumeSources = description.volumeSources.findResults { volumeSource -> - KubernetesApiConverter.toVolumeSource(volumeSource) - } - - podBuilder = podBuilder.withVolumes(volumeSources) - } - - for (def imagePullSecret : credentials.imagePullSecrets[namespace]) { - podBuilder = podBuilder.addNewImagePullSecret(imagePullSecret) - } - - if (description.serviceAccountName) { - podBuilder = podBuilder.withServiceAccountName(description.serviceAccountName) - } - - if (description.dnsPolicy) { - podBuilder = podBuilder.withDnsPolicy(description.dnsPolicy.name()) - } - - if (description.hostNetwork) { - podBuilder = podBuilder.withHostNetwork(description.hostNetwork) - } - - if (description.nodeSelector){ - podBuilder = podBuilder.withNodeSelector(description.nodeSelector) - } - - // if the description is still using the single container, convert to a list first - if (description.container) { - description.containers = [description.container] - } - - def containers = description.containers.collect { container -> - container.name = container.name ?: "job" - KubernetesApiConverter.toContainer(container) - } - - podBuilder = podBuilder.withContainers(containers) - - def tolerations = description.tolerations.collect { toleration -> - KubernetesApiConverter.toToleration(toleration) - } - - podBuilder = podBuilder.withTolerations(tolerations) - - podBuilder = podBuilder.endSpec() - - task.updateStatus BASE_PHASE, "Sending pod spec to the Kubernetes master." - Pod pod = credentials.apiAdaptor.createPod(namespace, podBuilder.build()) - - task.updateStatus BASE_PHASE, "Finished creating job ${pod.metadata.name}." - - return pod - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperation.groovy deleted file mode 100644 index 49fc69ebe9d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperation.groovy +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.loadbalancer - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.DeleteKubernetesLoadBalancerAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation - -class DeleteKubernetesLoadBalancerAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "DESTROY_LOAD_BALANCER" - - DeleteKubernetesLoadBalancerAtomicOperationDescription description - - DeleteKubernetesLoadBalancerAtomicOperation(DeleteKubernetesLoadBalancerAtomicOperationDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "deleteLoadBalancer": { "loadBalancerName": "kub-lb", "namespace": "default", "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing delete of load balancer $description.loadBalancerName..." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - if (!credentials.apiAdaptor.deleteService(namespace, description.loadBalancerName)) { - throw new KubernetesOperationException("Failed to delete service $description.loadBalancerName in $namespace") - } - - task.updateStatus BASE_PHASE, "Successfully deleted load balancer $description.loadBalancerName." - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperation.groovy deleted file mode 100644 index 2b7377dd0a6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperation.groovy +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.loadbalancer - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesNamedServicePort -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import io.fabric8.kubernetes.api.model.ServiceBuilder -import io.fabric8.kubernetes.api.model.ServicePort - -class UpsertKubernetesLoadBalancerAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "UPSERT_LOAD_BALANCER" - - KubernetesLoadBalancerDescription description - - UpsertKubernetesLoadBalancerAtomicOperation(KubernetesLoadBalancerDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertLoadBalancer": { "name": "kub-lb", "ports": [ { "name": "http", "port": 80, "targetPort": 9376 } ], "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - */ - @Override - Map operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing upsert of load balancer $description.name..." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - def name = description.name - - task.updateStatus BASE_PHASE, "Looking up existing load balancer..." - def existingService = credentials.apiAdaptor.getService(namespace, name) - - if (existingService) { - task.updateStatus BASE_PHASE, "Found existing load balancer with name $description.name." - } - - def serviceBuilder = new ServiceBuilder() - - task.updateStatus BASE_PHASE, "Setting name, label selectors, & annotations..." - - serviceBuilder = serviceBuilder.withNewMetadata().withName(name) - - def labels = description.serviceLabels == null ? existingService?.metadata?.labels : description.serviceLabels - - serviceBuilder = serviceBuilder.withLabels(labels) - - def annotations = description.serviceAnnotations == null ? existingService?.metadata?.annotations : description.serviceAnnotations - - serviceBuilder = serviceBuilder.withAnnotations(annotations) - - serviceBuilder = serviceBuilder.endMetadata().withNewSpec() - - serviceBuilder = serviceBuilder.addToSelector(KubernetesUtil.loadBalancerKey(name), 'true') - - task.updateStatus BASE_PHASE, "Adding ports..." - - List ports = [] - - for (ServicePort port : existingService?.spec?.ports) { - def namedPort = new KubernetesNamedServicePort() - port.name ? namedPort.name = port.name : null - port.nodePort ? namedPort.nodePort = port.nodePort : null - port.port ? namedPort.port = port.port : null - port.targetPort ? namedPort.targetPort = port.targetPort?.intVal : null - port.protocol ? namedPort.protocol = port.protocol : null - ports << namedPort - } - - ports = description.ports != null ? description.ports : ports - - for (def port : ports) { - serviceBuilder = serviceBuilder.addNewPort() - - serviceBuilder = port.name ? serviceBuilder.withName(port.name) : serviceBuilder - serviceBuilder = port.targetPort ? serviceBuilder.withNewTargetPort(port.targetPort) : serviceBuilder - serviceBuilder = port.port ? serviceBuilder.withPort(port.port) : serviceBuilder - serviceBuilder = port.nodePort ? serviceBuilder.withNodePort(port.nodePort) : serviceBuilder - serviceBuilder = port.protocol ? serviceBuilder.withProtocol(port.protocol) : serviceBuilder - - serviceBuilder = serviceBuilder.endPort() - } - - task.updateStatus BASE_PHASE, "Adding external IPs..." - - def externalIps = description.externalIps != null ? description.externalIps : existingService?.spec?.externalIPs - - for (def ip: externalIps) { - serviceBuilder = serviceBuilder.addToExternalIPs(ip) - } - - task.updateStatus BASE_PHASE, "Setting type..." - - def type = description.serviceType != null ? description.serviceType : existingService?.spec?.type - serviceBuilder = type ? serviceBuilder.withType(type) : serviceBuilder - - task.updateStatus BASE_PHASE, "Setting load balancer IP..." - - def loadBalancerIp = description.loadBalancerIp != null ? description.loadBalancerIp : existingService?.spec?.loadBalancerIP - serviceBuilder = loadBalancerIp ? serviceBuilder.withLoadBalancerIP(loadBalancerIp) : serviceBuilder - - task.updateStatus BASE_PHASE, "Setting cluster IP..." - - def clusterIp = description.clusterIp != null ? description.clusterIp : existingService?.spec?.clusterIP - serviceBuilder = clusterIp ? serviceBuilder.withClusterIP(clusterIp) : serviceBuilder - - task.updateStatus BASE_PHASE, "Setting session affinity..." - - def sessionAffinity = description.sessionAffinity != null ? description.sessionAffinity : existingService?.spec?.sessionAffinity - serviceBuilder = sessionAffinity ? serviceBuilder.withSessionAffinity(sessionAffinity) : serviceBuilder - - serviceBuilder = serviceBuilder.endSpec() - - def service = existingService ? - credentials.apiAdaptor.replaceService(namespace, name, serviceBuilder.build()) : - credentials.apiAdaptor.createService(namespace, serviceBuilder.build()) - - task.updateStatus BASE_PHASE, "Finished upserting load balancer $description.name." - - [loadBalancers: [(service.metadata.namespace): [name: service.metadata.name]]] - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/DeleteKubernetesSecurityGroupAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/DeleteKubernetesSecurityGroupAtomicOperation.groovy deleted file mode 100644 index 918ac1bf4ad..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/DeleteKubernetesSecurityGroupAtomicOperation.groovy +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.securitygroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.DeleteKubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation - -class DeleteKubernetesSecurityGroupAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "UPSERT_SECURITY_GROUP" - - DeleteKubernetesSecurityGroupAtomicOperation(DeleteKubernetesSecurityGroupDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - DeleteKubernetesSecurityGroupDescription description - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "deleteSecurityGroup": { "securityGroupName": "kub-sg", "namespace": "default", "credentials": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing delete of ingress." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - if (!credentials.apiAdaptor.deleteIngress(namespace, description.securityGroupName)) { - throw new KubernetesOperationException("Failed to delete ingress $description.securityGroupName in $namespace") - } - - task.updateStatus BASE_PHASE, "Successfully deleted $description.securityGroupName in $namespace." - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/UpsertKubernetesSecurityGroupAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/UpsertKubernetesSecurityGroupAtomicOperation.groovy deleted file mode 100644 index 659b266da60..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/UpsertKubernetesSecurityGroupAtomicOperation.groovy +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.securitygroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesHttpIngressPath -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressRule -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressTlS -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import io.fabric8.kubernetes.api.model.extensions.HTTPIngressPathBuilder -import io.fabric8.kubernetes.api.model.extensions.IngressBuilder -import io.fabric8.kubernetes.api.model.extensions.IngressRuleBuilder -import io.fabric8.kubernetes.api.model.extensions.IngressTLSBuilder - -class UpsertKubernetesSecurityGroupAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "UPSERT_SECURITY_GROUP" - - UpsertKubernetesSecurityGroupAtomicOperation(KubernetesSecurityGroupDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - KubernetesSecurityGroupDescription description - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertSecurityGroup": { "securityGroupName": "kub-sg", "namespace": "default", "credentials": "my-kubernetes-account", "ingress": { "serviceName": "kub-nginx", "port": 80 } } } ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing upsert of ingress." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - task.updateStatus BASE_PHASE, "Looking up old ingress..." - - def oldIngress = credentials.apiAdaptor.getIngress(namespace, description.securityGroupName) - - task.updateStatus BASE_PHASE, "Setting name, namespace, annotations & labels..." - def ingress = new IngressBuilder().withNewMetadata() - .withName(description.securityGroupName) - .withNamespace(namespace) - .withAnnotations(description.annotations) - .withLabels(description.labels) - .endMetadata().withNewSpec() - - task.updateStatus BASE_PHASE, "Attaching requested service..." - if (description.ingress?.serviceName) { - ingress = ingress.withNewBackend().withServiceName(description.ingress.serviceName).withNewServicePort(description.ingress.port).endBackend() - } - - task.updateStatus BASE_PHASE, "Setting requested rules..." - - def rules = description.rules?.collect { KubernetesIngressRule rule -> - def res = new IngressRuleBuilder().withHost(rule.host).withNewHttp() - - def paths = rule.value?.http?.paths?.collect { KubernetesHttpIngressPath path -> - return new HTTPIngressPathBuilder().withPath(path.path) - .withNewBackend() - .withServiceName(path.ingress?.serviceName) - .withNewServicePort(path.ingress?.port) - .endBackend() - .build() - } - - res = res.withPaths(paths) - - return res.endHttp().build() - } - - def tls = description.tls?.collect{ KubernetesIngressTlS tlsEntry -> - return new IngressTLSBuilder().withHosts(tlsEntry.hosts).withSecretName(tlsEntry.secretName).build() - } - - ingress = ingress.withRules(rules) - - ingress.withTls(tls) - - ingress = ingress.endSpec().build() - - oldIngress ? credentials.apiAdaptor.replaceIngress(namespace, description.securityGroupName, ingress) : - credentials.apiAdaptor.createIngress(namespace, ingress) - - null - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/AbstractEnableDisableKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/AbstractEnableDisableKubernetesAtomicOperation.groovy deleted file mode 100644 index a192854e259..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/AbstractEnableDisableKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.helpers.EnableDisablePercentageCategorizer -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.EnableDisableKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1ServerGroup -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.KubernetesV1ClusterProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.Pod -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.apps.ReplicaSet -import org.springframework.beans.factory.annotation.Autowired - -import java.util.concurrent.Executors -import java.util.concurrent.TimeUnit - -@Slf4j -abstract class AbstractEnableDisableKubernetesAtomicOperation implements AtomicOperation { - abstract String getBasePhase() // Either 'ENABLE' or 'DISABLE'. - abstract String getAction() // Either 'true' or 'false', for Enable or Disable respectively. - abstract String getVerb() // Either 'enabling' or 'disabling. - EnableDisableKubernetesAtomicOperationDescription description - - AbstractEnableDisableKubernetesAtomicOperation(KubernetesServerGroupDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Autowired - KubernetesV1ClusterProvider clusterProviders - - @Override - Void operate(List priorOutputs) { - task.updateStatus basePhase, "Initializing ${basePhase.toLowerCase()} operation for ${description.serverGroupName}..." - task.updateStatus basePhase, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - def desiredPercentage = description.desiredPercentage ?: 100 - def pods = [] - - task.updateStatus basePhase, "Finding requisite server group..." - - def replicationController = credentials.apiAdaptor.getReplicationController(namespace, description.serverGroupName) - def replicaSet = credentials.apiAdaptor.getReplicaSet(namespace, description.serverGroupName) - - if (!replicationController && !replicaSet) { - if (!supportsEnableDisable(credentials, description.serverGroupName, namespace)) { - return - } - throw new KubernetesOperationException("Only support operation for replication controller or replica set $description.serverGroupName in $namespace.") - } - - // If we edit the spec when disabling less than 100% of pods, we won't be able to handle autoscaling - // actively correctly. - if (desiredPercentage == 100 || action == "true") { - task.updateStatus basePhase, "Getting list of attached services..." - - List services = KubernetesUtil.getLoadBalancers(replicationController ?: replicaSet) - services = services.collect { - KubernetesUtil.loadBalancerKey(it) - } - - task.updateStatus basePhase, "Resetting server group service template labels and selectors..." - - def getGeneration = null - def getResource = null - def desired = null - def disableAnnotation = null - if (replicationController) { - desired = credentials.apiAdaptor.toggleReplicationControllerSpecLabels(namespace, description.serverGroupName, services, action) - getGeneration = { ReplicationController rc -> - return rc.metadata.generation - } - getResource = { - return credentials.apiAdaptor.getReplicationController(namespace, description.serverGroupName) - } - disableAnnotation = { -> - return credentials.apiAdaptor.annotateReplicationController(namespace, description.serverGroupName, KubernetesUtil.ENABLE_DISABLE_ANNOTATION, action) - } - } else if (replicaSet) { - desired = credentials.apiAdaptor.toggleReplicaSetSpecLabels(namespace, description.serverGroupName, services, action) - getGeneration = { ReplicaSet rs -> - return rs.metadata.generation - } - getResource = { - return credentials.apiAdaptor.getReplicaSet(namespace, description.serverGroupName) - } - disableAnnotation = { -> - return credentials.apiAdaptor.annotateReplicaSet(namespace, description.serverGroupName, KubernetesUtil.ENABLE_DISABLE_ANNOTATION, action) - } - } else { - throw new KubernetesOperationException("No replication controller or replica set $description.serverGroupName in $namespace.") - } - - if (!credentials.apiAdaptor.blockUntilResourceConsistent(desired, getGeneration, getResource)) { - throw new KubernetesOperationException("Server group failed to reach a consistent state while waiting for label to be applied. This is likely a bug with Kubernetes itself.") - } - - if (!credentials.apiAdaptor.blockUntilResourceConsistent(disableAnnotation(), getGeneration, getResource)) { - throw new KubernetesOperationException("Server group failed to reach a consistent state while waiting for annotation be applied. This is likely a bug with Kubernetes itself.") - } - } - - if (!replicationController && !replicaSet ) { - throw new KubernetesOperationException("No replication controller or replica set $description.serverGroupName in $namespace.") - } - - KubernetesV1ServerGroup serverGroup = clusterProviders.getServerGroup(description.account, namespace, description.serverGroupName) - serverGroup.instances.forEach( { instance -> pods.add(instance.getPod())}) - - if (!pods) { - task.updateStatus basePhase, "No pods to ${basePhase.toLowerCase()}. Operation finshed successfully." - return - } - - task.updateStatus basePhase, "Resetting service labels for each pod..." - - def pool = Executors.newWorkStealingPool((int) (pods.size() / 2) + 1) - - if (desiredPercentage != null) { - task.updateStatus basePhase, "Operating on $desiredPercentage% of pods" - List modifiedPods = pods.findAll { pod -> - KubernetesUtil.getPodLoadBalancerStates(pod).every { it.value == action } - } - - List unmodifiedPods = pods.findAll { pod -> - KubernetesUtil.getPodLoadBalancerStates(pod).any { it.value != action } - } - - pods = EnableDisablePercentageCategorizer.getInstancesToModify(modifiedPods, unmodifiedPods, desiredPercentage) - } - - pods.each { Pod pod -> - pool.submit({ _ -> - List podServices = KubernetesUtil.getLoadBalancers(pod) - podServices = podServices.collect { - KubernetesUtil.loadBalancerKey(it) - } - credentials.apiAdaptor.togglePodLabels(namespace, pod.metadata.name, podServices, action) - }) - } - - pool.shutdown() - pool.awaitTermination(1, TimeUnit.HOURS) - - task.updateStatus basePhase, "Finished ${verb} server group ${description.serverGroupName}." - - null // Return nothing from void - } - - boolean supportsEnableDisable(KubernetesV1Credentials credentials, String serverGroupName, String namespace) { - def controllerKind = description.kind - if (!description.kind) { - controllerKind = credentials.clientApiAdaptor.getControllerKind(serverGroupName, namespace, null) - } - switch (controllerKind) { - //disable/enable statefulset and daemonset server group operations are not support - case KubernetesUtil.CONTROLLERS_STATEFULSET_KIND: - log.info("Skip disable/enable StatefuSet server group $description.serverGroupName in $description.namespace because not applicible.") - task.updateStatus basePhase, "Skip disable/enable StatefuSet server group $description.serverGroupName in $description.namespace." - return false - case KubernetesUtil.CONTROLLERS_DAEMONSET_KIND: - log.info("Skip disable/enable DaemonSet server group $description.serverGroupName in $description.namespace because not applicible.") - task.updateStatus basePhase, "Skip disable/enable DaemonSet server group $description.serverGroupName in $description.namespace." - return false - case KubernetesUtil.SERVER_GROUP_KIND: - case KubernetesUtil.DEPRECATED_SERVER_GROUP_KIND: - default: - return true - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/CloneKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/CloneKubernetesAtomicOperation.groovy deleted file mode 100644 index 105ac23ea7f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/CloneKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.CloneKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesResourceNotFoundException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation - -class CloneKubernetesAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "CLONE_SERVER_GROUP" - - CloneKubernetesAtomicOperation(CloneKubernetesAtomicOperationDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - CloneKubernetesAtomicOperationDescription description - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "cloneServerGroup": { "source": { "serverGroupName": "kub-test-v000" }, "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "cloneServerGroup": { "stack": "prod", "freeFormDetails": "mdservice", "targetSize": "4", "source": { "serverGroupName": "kub-test-v000" }, "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - */ - @Override - DeploymentResult operate(List priorOutputs) { - description.source.namespace = description.source.namespace ?: description.source.region - - task.updateStatus BASE_PHASE, "Initializing copy of server group for " + - "${description.source.serverGroupName}..." - - CloneKubernetesAtomicOperationDescription newDescription = cloneAndOverrideDescription() - - DeployKubernetesAtomicOperation deployer = new DeployKubernetesAtomicOperation(newDescription) - DeploymentResult deploymentResult = deployer.operate(priorOutputs) - - task.updateStatus BASE_PHASE, "Finished copying server group for " + - "${description.source.serverGroupName}." - - task.updateStatus BASE_PHASE, "Finished copying server group for " + - "${description.source.serverGroupName}. " + - "New server group = ${deploymentResult.serverGroupNames[0]}." - - return deploymentResult - } - - CloneKubernetesAtomicOperationDescription cloneAndOverrideDescription() { - CloneKubernetesAtomicOperationDescription newDescription = description.clone() - - task.updateStatus BASE_PHASE, "Reading ancestor server group ${description.source.serverGroupName}..." - - def credentials = description.sourceCredentials.credentials - - description.source.namespace = description.source.namespace ?: "default" - def ancestorServerGroup = credentials.apiAdaptor.getReplicationController(description.source.namespace, description.source.serverGroupName) - if (!ancestorServerGroup) { - ancestorServerGroup = credentials.apiAdaptor.getReplicaSet(description.source.namespace, description.source.serverGroupName) - } - - if (!ancestorServerGroup) { - throw new KubernetesResourceNotFoundException("Source server group $description.source.serverGroupName does not exist.") - } - - def ancestorNames = Names.parseName(description.source.serverGroupName) - - // Build description object from ancestor, override any values that were specified on the clone call - newDescription.application = description.application ?: ancestorNames.app - newDescription.stack = description.stack ?: ancestorNames.stack - newDescription.freeFormDetails = description.freeFormDetails ?: ancestorNames.detail - newDescription.targetSize = description.targetSize ?: ancestorServerGroup.spec?.replicas - newDescription.namespace = description.namespace ?: description.source.namespace - newDescription.loadBalancers = description.loadBalancers != null ? description.loadBalancers : KubernetesUtil.getLoadBalancers(ancestorServerGroup) - newDescription.restartPolicy = description.restartPolicy ?: ancestorServerGroup.spec?.template?.spec?.restartPolicy - newDescription.nodeSelector = description.nodeSelector ?: ancestorServerGroup.spec?.template?.spec?.nodeSelector - newDescription.hostNetwork = description.hostNetwork ?: ancestorServerGroup.spec?.template?.spec?.hostNetwork - if (!description.containers) { - newDescription.containers = ancestorServerGroup.spec?.template?.spec?.containers?.collect { it -> - KubernetesApiConverter.fromContainer(it) - } - } - - return newDescription - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DeployKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DeployKubernetesAtomicOperation.groovy deleted file mode 100644 index cd1a4f4909c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DeployKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,288 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesClientApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.autoscaler.KubernetesAutoscalerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.DeployKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesClientOperationException -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesResourceNotFoundException -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import io.fabric8.kubernetes.api.model.HasMetadata -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscalerBuilder -import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder -import io.fabric8.kubernetes.api.model.apps.DeploymentFluentImpl -import io.fabric8.kubernetes.api.model.apps.DoneableDeployment -import io.fabric8.kubernetes.api.model.apps.ReplicaSetBuilder -import io.kubernetes.client.models.V1Pod - -class DeployKubernetesAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "DEPLOY" - - DeployKubernetesAtomicOperation(DeployKubernetesAtomicOperationDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - final DeployKubernetesAtomicOperationDescription description - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "kub", "stack": "test", "targetSize": "3", "securityGroups": [], "loadBalancers": [], "containers": [ { "name": "librarynginx", "imageDescription": { "repository": "library/nginx" } } ], "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "kub", "stack": "test", "targetSize": "3", "loadBalancers": ["frontend-lb"], "containers": [ { "name": "librarynginx", "imageDescription": { "repository": "library/nginx", "tag": "latest", "registry": "index.docker.io" }, "ports": [ { "containerPort": "80", "hostPort": "80", "name": "http", "protocol": "TCP", "hostIp": "10.239.18.11" } ] } ], "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "kub", "stack": "test", "targetSize": "3", "loadBalancers": [], "containers": [ { "name": "librarynginx", "imageDescription": { "repository": "library/nginx", "tag": "latest", "registry": "index.docker.io" }, "livenessProbe": { "handler": { "type": "EXEC", "execAction": { "commands": [ "ls" ] } } } } ], "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "kub", "stack": "test", "targetSize": "3", "loadBalancers": [], "volumeSources": [ { "name": "storage", "type": "EMPTYDIR", "emptyDir": {} } ], "containers": [ { "name": "librarynginx", "imageDescription": { "repository": "library/nginx", "tag": "latest", "registry": "index.docker.io" }, "volumeMounts": [ { "name": "storage", "mountPath": "/storage", "readOnly": false } ] } ], "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "kub", "stack": "test", "targetSize": "3", "securityGroups": [], "loadBalancers": [], "containers": [ { "name": "librarynginx", "imageDescription": { "repository": "library/nginx" } } ], "capacity": { "min": 1, "max": 5 }, "scalingPolicy": { "cpuUtilization": { "target": 40 } }, "account": "my-kubernetes-account" } } ]' localhost:7002/kubernetes/ops - * curl -X POST -H "Content-Type: application/json" -d '[ { "createServerGroup": { "application": "kub", "stack": "test", "targetSize": "3", "securityGroups": [], "loadBalancers": [], "containers": [ { "name": "librarynginx", "imageDescription": { "repository": "library/nginx" } } ], "account": "my-kubernetes-account", "deployment": { "enabled": "true" } } } ]' localhost:7002/kubernetes/ops - */ - - @Override - DeploymentResult operate(List priorOutputs) { - - HasMetadata serverGroup = deployDescription() - DeploymentResult deploymentResult = new DeploymentResult() - deploymentResult.serverGroupNames = Arrays.asList("${serverGroup.metadata.namespace}:${serverGroup.metadata.name}".toString()) - deploymentResult.serverGroupNameByRegion[serverGroup.metadata.namespace] = serverGroup.metadata.name - return deploymentResult - } - - HasMetadata deployDescription() { - task.updateStatus BASE_PHASE, "Initializing creation of replica set." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - - /* - * Prefer the source namespace when it is available - * Fall back on the current namespace and use 'default' if both are not set. - */ - def namespaceToValidate - if (description.source?.namespace) { - namespaceToValidate = description.source.namespace - } - else if (description.namespace) { - namespaceToValidate = description.namespace - } - else { namespaceToValidate = "default" } - - def namespace = KubernetesUtil.validateNamespace(credentials, namespaceToValidate) - description.imagePullSecrets = credentials.imagePullSecrets[namespace] - - def serverGroupNameResolver = new KubernetesServerGroupNameResolver(namespace, credentials) - def clusterName = serverGroupNameResolver.combineAppStackDetail(description.application, description.stack, description.freeFormDetails) - - if (description.kind) { - return deployController(credentials, serverGroupNameResolver, clusterName, namespace) - } - - task.updateStatus BASE_PHASE, "Looking up next sequence index for cluster ${clusterName}..." - - String replicaSetName - if (description.sequence) { - replicaSetName = serverGroupNameResolver.generateServerGroupName(description.application, description.stack, description.freeFormDetails, description.sequence, false) - } else { - replicaSetName = serverGroupNameResolver.resolveNextServerGroupName(description.application, description.stack, description.freeFormDetails, false) - } - - task.updateStatus BASE_PHASE, "Replica set name chosen to be ${replicaSetName}." - def hasDeployment = KubernetesApiConverter.hasDeployment(description) - def replicaSet - - if (description.source?.useSourceCapacity) { - task.updateStatus BASE_PHASE, "Searching for ancestor server group ${description.source.serverGroupName}..." - def ancestorServerGroup = credentials.apiAdaptor.getReplicationController(namespace, description.source.serverGroupName) - if (!ancestorServerGroup) { - ancestorServerGroup = credentials.apiAdaptor.getReplicaSet(namespace, description.source.serverGroupName) - } - if (!ancestorServerGroup) { - throw new KubernetesResourceNotFoundException("Source Server Group: $description.source.serverGroupName does not exist in Namespace: ${namespace}!") - } - task.updateStatus BASE_PHASE, "Ancestor Server Group Located: ${ancestorServerGroup}" - - description.targetSize = ancestorServerGroup.spec?.replicas - task.updateStatus BASE_PHASE, "Building replica set..." - replicaSet = KubernetesApiConverter.toReplicaSet(new ReplicaSetBuilder(), description, replicaSetName) - if (hasDeployment) { - replicaSet.spec.replicas = 0 - } - } - //User might set targetSize and useSourceCapacity to false - else { - task.updateStatus BASE_PHASE, "Building replica set..." - replicaSet = KubernetesApiConverter.toReplicaSet(new ReplicaSetBuilder(), description, replicaSetName) - - if (hasDeployment) { - replicaSet.spec.replicas = 0 - } - } - - replicaSet = credentials.apiAdaptor.createReplicaSet(namespace, replicaSet) - task.updateStatus BASE_PHASE, "Deployed replica set ${replicaSet.metadata.name}" - - if (hasDeployment) { - if (!credentials.apiAdaptor.getDeployment(namespace, clusterName)) { - task.updateStatus BASE_PHASE, "Building deployment..." - credentials.apiAdaptor.createDeployment(namespace, ((DeploymentBuilder) KubernetesApiConverter.toDeployment((DeploymentFluentImpl) new DeploymentBuilder(), description, replicaSetName)).build()) - } else { - task.updateStatus BASE_PHASE, "Updating deployment..." - ((DoneableDeployment) KubernetesApiConverter.toDeployment((DeploymentFluentImpl) credentials.apiAdaptor.editDeployment(namespace, clusterName), - description, - replicaSetName)).done() - } - task.updateStatus BASE_PHASE, "Configured deployment $clusterName" - } - - if (description.scalingPolicy) { - task.updateStatus BASE_PHASE, "Attaching a horizontal pod autoscaler..." - - def name = hasDeployment ? clusterName : replicaSetName - def kind = hasDeployment ? KubernetesUtil.DEPLOYMENT_KIND : KubernetesUtil.SERVER_GROUP_KIND - def version = hasDeployment ? credentials.apiAdaptor.getDeployment(namespace, clusterName).getApiVersion() : replicaSet.getApiVersion() - def autoscaler = ((HorizontalPodAutoscalerBuilder) KubernetesApiConverter.toAutoscaler(new HorizontalPodAutoscalerBuilder(), new KubernetesAutoscalerDescription(replicaSetName, description), name, kind, version)).build() - - if (credentials.apiAdaptor.getAutoscaler(namespace, name)) { - credentials.apiAdaptor.deleteAutoscaler(namespace, name) - } - - credentials.apiAdaptor.createAutoscaler(namespace, autoscaler) - } - - return replicaSet - } - - HasMetadata deployController(KubernetesV1Credentials credentials, def serverGroupNameResolver, String clusterName, String namespace) { - def controllerSet - def isUpdateControllerEnabled = KubernetesClientApiConverter.isUpdateControllerEnabled(description) - def controllerName - if (KubernetesClientApiConverter.validateSequence(description)) { - controllerName = serverGroupNameResolver.generateServerGroupName(description.application, description.stack, description.freeFormDetails, description.sequence.intValue(), false) - } else { - controllerName = clusterName - } - - switch(description.kind) { - case KubernetesUtil.CONTROLLERS_STATEFULSET_KIND: - controllerSet = deployStatefulSet(credentials, controllerName, namespace, isUpdateControllerEnabled) - break - case KubernetesUtil.CONTROLLERS_DAEMONSET_KIND: - controllerSet = deployDaemonSet(credentials, controllerName, namespace, isUpdateControllerEnabled) - break - default: - throw new KubernetesOperationException("Controller type $description.kind is not support.") - } - - return KubernetesClientApiConverter.toKubernetesController(controllerSet) - } - - def deployStatefulSet(KubernetesV1Credentials credentials, String controllerName, String namespace, Boolean isUpdateControllerEnabled) { - task.updateStatus BASE_PHASE, "Building stateful set..." - def controllerSet = KubernetesClientApiConverter.toStatefulSet(description, controllerName) - if (isUpdateControllerEnabled) { - def deployedControllerSet = credentials.clientApiAdaptor.getStatefulSet(controllerName, namespace) - if (deployedControllerSet) { - task.updateStatus BASE_PHASE, "Update stateful set ${controllerName}" - controllerSet = credentials.clientApiAdaptor.replaceStatfulSet(controllerName, namespace, controllerSet) - if (description.updateController?.updateStrategy?.type.name() == "Recreate") { - deletePods(credentials, namespace, controllerSet) - } - } else { - task.updateStatus BASE_PHASE, "Deployed stateful set ${controllerName}" - controllerSet = credentials.clientApiAdaptor.createStatfulSet(namespace, controllerSet) - } - } else { - task.updateStatus BASE_PHASE, "Deployed stateful set ${controllerName}" - controllerSet = credentials.clientApiAdaptor.createStatfulSet(namespace, controllerSet) - } - - if (description.scalingPolicy) { - task.updateStatus BASE_PHASE, "Attaching a horizontal pod autoscaler..." - - def autoscaler = KubernetesClientApiConverter.toAutoscaler(new KubernetesAutoscalerDescription(controllerName, description), controllerName, description.kind) - - if (credentials.clientApiAdaptor.getAutoscaler(namespace, controllerName)) { - credentials.clientApiAdaptor.deleteAutoscaler(namespace, controllerName, null, null, null, true) - } - credentials.clientApiAdaptor.createAutoscaler(namespace, autoscaler) - } - - return controllerSet - } - - def deployDaemonSet(KubernetesV1Credentials credentials, String controllerName, String namespace, Boolean isUpdateControllerEnabled) { - task.updateStatus BASE_PHASE, "Building daemon set..." - def controllerSet = KubernetesClientApiConverter.toDaemonSet(description, controllerName) - if (isUpdateControllerEnabled) { - def deployedControllerSet = credentials.clientApiAdaptor.getDaemonSet(controllerName, namespace) - if (deployedControllerSet) { - task.updateStatus BASE_PHASE, "Update daemon set ${controllerName}" - controllerSet = credentials.clientApiAdaptor.replaceDaemonSet(controllerName, namespace, controllerSet) - if (description.updateController?.updateStrategy?.type.name() == "Recreate") { - deletePods(credentials, namespace, controllerSet) - } - } else { - task.updateStatus BASE_PHASE, "Deployed daemon set ${controllerName}" - controllerSet = credentials.clientApiAdaptor.createDaemonSet(namespace, controllerSet) - } - } else { - task.updateStatus BASE_PHASE, "Deployed daemon set ${controllerName}" - controllerSet = credentials.clientApiAdaptor.createDaemonSet(namespace, controllerSet) - } - - return controllerSet - } - - void deletePods(KubernetesV1Credentials credentials, String namespace, def controllerSet) { - Map podNameList = new LinkedHashMap() - - credentials.clientApiAdaptor.getPods(namespace, controllerSet.metadata.labels).items.forEach({ item -> - podNameList.put(item.metadata.name, item.metadata.uid) - }) - - def getPodState = null - podNameList.toSorted(Map.Entry.comparingByKey().reversed()).forEach ({ k, v -> - credentials.clientApiAdaptor.deletePod(k, namespace, null, null, null, false) - - getPodState = { - V1Pod pod = credentials.clientApiAdaptor.getPodStatus(k, namespace) - if (pod) { - if (v != pod.metadata?.uid && pod.status?.phase == "Running") { - return true - } - } else { - if (controllerSet.kind == KubernetesUtil.CONTROLLERS_DAEMONSET_KIND) { - return true - } - } - return false - } - - if (!credentials.clientApiAdaptor.blockUntilResourceConsistent(getPodState)) { - throw new KubernetesClientOperationException("Failed to launch a new pod($k) for ServerGroup $controllerSet.metadata.name in $namespace.") - } - }) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DestroyKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DestroyKubernetesAtomicOperation.groovy deleted file mode 100644 index e6678fab8f6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DestroyKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import groovy.util.logging.Slf4j - -@Slf4j -class DestroyKubernetesAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "DESTROY" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - private final KubernetesServerGroupDescription description - - DestroyKubernetesAtomicOperation(KubernetesServerGroupDescription description) { - this.description = description - } - - /** - * curl -X POST -H "Content-Type: application/json" -d '[ { "destroyServerGroup": { "serverGroupName": "kub-test-v000", "namespace": "default", "credentials": "my-kubernetes-account" }} ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing destroy of server group." - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - - def credentials = description.credentials.credentials - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - def autoscalerName = description.serverGroupName - def parsedName = Names.parseName(description.serverGroupName) - - def deploymentName = parsedName.cluster - def deployment = credentials.apiAdaptor.getDeployment(namespace, deploymentName) - def replicaSet = credentials.apiAdaptor.getReplicaSet(namespace, description.serverGroupName) - def destroyAutoscalerIfExists = true - - if (deployment && replicaSet) { - task.updateStatus BASE_PHASE, "Checking if deployment ${deploymentName} needs to be destroyed..." - // If we selected to delete the replica set in the currently active deployment, this will delete everything owned by the deployment. - if (credentials.apiAdaptor.getDeploymentRevision(deployment) == credentials.apiAdaptor.getDeploymentRevision(replicaSet)) { - task.updateStatus BASE_PHASE, "Destroying deployment ${deploymentName}..." - if (!credentials.apiAdaptor.deleteDeployment(namespace, deploymentName)) { - throw new KubernetesOperationException("Failed to delete deployment ${deploymentName} in $namespace") - } - - task.updateStatus BASE_PHASE, "Successfully destroyed deployment ${deploymentName}..." - } else { - destroyAutoscalerIfExists = false - } - } - - if (credentials.apiAdaptor.getAutoscaler(namespace, autoscalerName) && destroyAutoscalerIfExists) { - task.updateStatus BASE_PHASE, "Destroying autoscaler..." - if (!credentials.apiAdaptor.deleteAutoscaler(namespace, autoscalerName)) { - throw new KubernetesOperationException("Failed to delete associated autoscaler $autoscalerName in $namespace.") - } - } - - task.updateStatus BASE_PHASE, "Destroying server group..." - - if (credentials.apiAdaptor.getReplicationController(namespace, description.serverGroupName)) { - task.updateStatus BASE_PHASE, "Underlying kind is 'ReplicationController'..." - if (!credentials.apiAdaptor.hardDestroyReplicationController(namespace, description.serverGroupName)) { - throw new KubernetesOperationException("Failed to delete $description.serverGroupName in $namespace.") - } - } else if (replicaSet) { - task.updateStatus BASE_PHASE, "Underlying kind is 'ReplicaSet'..." - credentials.apiAdaptor.hardDestroyReplicaSet(namespace, description.serverGroupName) - } else { - if (description.kind) { - destroyController(credentials, namespace, description.serverGroupName, autoscalerName) - } else { - log.error("Unable delete $description.serverGroupName in $description.namespace because kind has been presented in the request.") - } - } - - task.updateStatus BASE_PHASE, "Successfully destroyed server group $description.serverGroupName." - } - - void destroyController(KubernetesV1Credentials credentials, String namespace, String controllerName, String autoscalerName) { - def controllerKind = description.kind - - switch (controllerKind) { - case KubernetesUtil.CONTROLLERS_STATEFULSET_KIND: - if (credentials.apiClientAdaptor.getAutoscaler(namespace, autoscalerName)) { - task.updateStatus BASE_PHASE, "Destroying autoscaler..." - if (!credentials.clientApiAdaptor.deleteAutoscaler(namespace, autoscalerName, null, null, null)) { - throw new KubernetesOperationException("Failed to delete associated autoscaler $autoscalerName in $namespace.") - } - } - credentials.apiClientAdaptor.hardDestroyStatefulSet(controllerName, namespace, null, null, null) - break - case KubernetesUtil.CONTROLLERS_DAEMONSET_KIND: - credentials.apiClientAdaptor.hardDestroyDaemonSet(controllerName, namespace, null, null, null) - break - default: - break - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DisableKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DisableKubernetesAtomicOperation.groovy deleted file mode 100644 index 918d034f8e6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DisableKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription - -class DisableKubernetesAtomicOperation extends AbstractEnableDisableKubernetesAtomicOperation { - @Override - final String getBasePhase() { - 'DISABLE' - } - - @Override - final String getAction() { - 'false' - } - - @Override - final String getVerb() { - 'disabling' - } - - DisableKubernetesAtomicOperation(KubernetesServerGroupDescription description) { - super(description) - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "disableServerGroup": { "serverGroupName": "kub-test-v000", "account": "my-kubernetes-account", "namespace": "default" } } ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - super.operate(priorOutputs) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/EnableKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/EnableKubernetesAtomicOperation.groovy deleted file mode 100644 index 546485d9964..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/EnableKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription - -class EnableKubernetesAtomicOperation extends AbstractEnableDisableKubernetesAtomicOperation { - @Override - final String getBasePhase() { - 'ENABLE' - } - - @Override - final String getAction() { - 'true' - } - - @Override - final String getVerb() { - 'enabling' - } - - EnableKubernetesAtomicOperation(KubernetesServerGroupDescription description) { - super(description) - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "disableServerGroup": { "serverGroupName": "kub-test-v000", "account": "my-kubernetes-account", "namespace": "default" } } ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - super.operate(priorOutputs) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/ResizeKubernetesAtomicOperation.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/ResizeKubernetesAtomicOperation.groovy deleted file mode 100644 index 863e0237995..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/ResizeKubernetesAtomicOperation.groovy +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.ResizeKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesOperationException -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.apps.Deployment -import io.fabric8.kubernetes.api.model.apps.ReplicaSet - -class ResizeKubernetesAtomicOperation implements AtomicOperation { - private static final String BASE_PHASE = "RESIZE" - - ResizeKubernetesAtomicOperation(ResizeKubernetesAtomicOperationDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - ResizeKubernetesAtomicOperationDescription description - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "resizeServerGroup": { "serverGroupName": "kub-test-v000", "capacity": { "desired": 7 }, "account": "my-kubernetes-account" }} ]' localhost:7002/kubernetes/ops - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing resize of server group $description.serverGroupName..." - - def credentials = description.credentials.credentials - - task.updateStatus BASE_PHASE, "Looking up provided namespace..." - def namespace = KubernetesUtil.validateNamespace(credentials, description.namespace) - - def size = description.capacity.desired - def name = description.serverGroupName - - task.updateStatus BASE_PHASE, "Setting size to $size..." - - if (resizeController(credentials, namespace, name, size)) { - return - } - - def desired = null - def getGeneration = null - def getResource = null - def replicationController = credentials.apiAdaptor.getReplicationController(namespace, name) - def replicaSet = credentials.apiAdaptor.getReplicaSet(namespace, name) - if (replicationController) { - task.updateStatus BASE_PHASE, "Resizing replication controller..." - desired = credentials.apiAdaptor.resizeReplicationController(namespace, name, size) - getGeneration = { ReplicationController rc -> - return rc.metadata.generation - } - getResource = { - return credentials.apiAdaptor.getReplicationController(namespace, name) - } - } else if (replicaSet) { - if (credentials.apiAdaptor.hasDeployment(replicaSet)) { - String clusterName = Names.parseName(name).cluster - task.updateStatus BASE_PHASE, "Resizing deployment..." - desired = credentials.apiAdaptor.resizeDeployment(namespace, clusterName, size) - getGeneration = { Deployment d -> - return d.metadata.generation - } - getResource = { - return credentials.apiAdaptor.getDeployment(namespace, clusterName) - } - } else { - task.updateStatus BASE_PHASE, "Resizing replica set..." - desired = credentials.apiAdaptor.resizeReplicaSet(namespace, name, size) - getGeneration = { ReplicaSet rs -> - return rs.metadata.generation - } - getResource = { - return credentials.apiAdaptor.getReplicaSet(namespace, name) - } - } - } else { - throw new KubernetesOperationException("Neither a replication controller nor a replica set could be found by that name.") - } - - if (!credentials.apiAdaptor.blockUntilResourceConsistent(desired, getGeneration, getResource)) { - throw new KubernetesOperationException("Failed waiting for server group to acknowledge its new size. This is likely a bug within Kubernetes itself.") - } - - task.updateStatus BASE_PHASE, "Completed resize operation." - } - - boolean resizeController(KubernetesV1Credentials credentials, String namespace, String serverGroupName, int size) { - boolean isStatefulSetOrDaemonSet = false - def controllerKind = description.kind - if (!description.kind) { - controllerKind = credentials.clientApiAdaptor.getControllerKind(serverGroupName, namespace, null) - } - - if (controllerKind == KubernetesUtil.CONTROLLERS_STATEFULSET_KIND) { - def deployedControllerSet = credentials.clientApiAdaptor.getStatefulSet(serverGroupName, namespace) - if (deployedControllerSet) { - credentials.apiClientAdaptor.resizeStatefulSet(serverGroupName, namespace, size) - } - isStatefulSetOrDaemonSet = true - } else if (controllerKind == KubernetesUtil.CONTROLLERS_DAEMONSET_KIND) { - throw new KubernetesOperationException("Does not support resizing DaemoneSet.") - } - - task.updateStatus BASE_PHASE, "Completed resize operation." - return isStatefulSetOrDaemonSet - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/KubernetesContainerValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/KubernetesContainerValidator.groovy deleted file mode 100644 index e6a5085f5d7..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/KubernetesContainerValidator.groovy +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesHandlerType -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesProbe - -class KubernetesContainerValidator { - static void validate(KubernetesContainerDescription description, StandardKubernetesAttributeValidator helper, String prefix) { - helper.validateName(description.name, "${prefix}.name") - - helper.validateNotEmpty(description.imageDescription, "${prefix}.imageDescription") - - if (description.limits) { - helper.validateCpu(description.limits.cpu, "${prefix}.limits.cpu") - helper.validateMemory(description.limits.memory, "${prefix}.limits.memory") - } - - if (description.requests) { - helper.validateCpu(description.requests.cpu, "${prefix}.requests.cpu") - helper.validateMemory(description.requests.memory, "${prefix}.requests.memory") - } - - description.ports?.eachWithIndex { port, i -> - if (port.name) { - helper.validateName(port.name, "${prefix}.ports[$i].name") - } - if (port.containerPort) { - helper.validatePort(port.containerPort, "${prefix}.ports[$i].containerPort") - } - if (port.hostPort) { - helper.validatePort(port.hostPort, "${prefix}.ports[$i].hostPort") - } - if (port.hostIp) { - helper.validateIpv4(port.hostIp, "${prefix}.ports[$i].hostIp") - } - if (port.protocol) { - helper.validateProtocol(port.protocol, "${prefix}.ports[$i].protocol") - } - } - - description.envVars?.eachWithIndex { envVar, i -> - helper.validateNotEmpty(envVar.name, "${prefix}.envVars[$i].name") - } - - description.volumeMounts?.eachWithIndex { mount, i -> - helper.validateName(mount.name, "${prefix}.mounts[$i].name") - helper.validatePath(mount.mountPath, "${prefix}.mounts[$i].mountPath") - } - - if (description.livenessProbe) { - validateProbe(description.livenessProbe, helper, "${prefix}.livenessProbe") - } - - if (description.readinessProbe) { - validateProbe(description.readinessProbe, helper, "${prefix}.readinessProbe") - } - - description.command?.eachWithIndex { command, i -> - helper.validateNotEmpty(command, "${prefix}.command[$i]") - } - - description.args?.eachWithIndex { arg, i -> - helper.validateNotEmpty(arg, "${prefix}.args[$i]") - } - } - - static void validateProbe(KubernetesProbe probe, StandardKubernetesAttributeValidator helper, String prefix) { - if (probe.initialDelaySeconds) { - helper.validateNonNegative(probe.initialDelaySeconds, "${prefix}.initialDelaySeconds") - } - - if (probe.timeoutSeconds) { - helper.validatePositive(probe.timeoutSeconds, "${prefix}.timeoutSeconds") - } - - if (probe.periodSeconds) { - helper.validatePositive(probe.periodSeconds, "${prefix}.periodSeconds") - } - - if (probe.successThreshold) { - helper.validatePositive(probe.successThreshold, "${prefix}.successThreshold") - } - - if (probe.failureThreshold) { - helper.validatePositive(probe.failureThreshold, "${prefix}.failureThreshold") - } - - helper.validateNotEmpty(probe.handler, "${prefix}.handler") - helper.validateNotEmpty(probe.handler?.type, "${prefix}.handler.type") - - if (probe.handler?.type == KubernetesHandlerType.EXEC) { - helper.validateNotEmpty(probe.handler?.execAction?.commands, "${prefix}.handler.execAction.commands") - } - - if (probe.handler?.type == KubernetesHandlerType.TCP) { - helper.validatePort(probe.handler?.tcpSocketAction?.port, "${prefix}.handler.tcpSocketAction.port") - } - - if (probe.handler?.type == KubernetesHandlerType.HTTP) { - helper.validatePort(probe.handler?.httpGetAction?.port, "${prefix}.handler.httpGetAction.port") - - if (probe.handler?.httpGetAction?.uriScheme) { - helper.validateUriScheme(probe.handler?.httpGetAction?.uriScheme, "${prefix}.handler.httpGetAction.uriScheme") - } - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/KubernetesVolumeSourceValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/KubernetesVolumeSourceValidator.groovy deleted file mode 100644 index c6ebc82fcb3..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/KubernetesVolumeSourceValidator.groovy +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeSource -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesVolumeSourceType - -class KubernetesVolumeSourceValidator { - static void validate(KubernetesVolumeSource source, StandardKubernetesAttributeValidator helper, String prefix) { - helper.validateName(source.name, "${prefix}.name") - switch (source.type) { - case KubernetesVolumeSourceType.EmptyDir: - helper.validateNotEmpty(source.emptyDir, "${prefix}.emptyDir") - - break // Nothing else to validate, only property is an enum which is implicitly validated during deserialization - - case KubernetesVolumeSourceType.HostPath: - if (!helper.validateNotEmpty(source.hostPath, "${prefix}.hostPath")) { - break - } - helper.validatePath(source.hostPath.path, "${prefix}.hostPath.path") - - break - - case KubernetesVolumeSourceType.PersistentVolumeClaim: - if (!helper.validateNotEmpty(source.persistentVolumeClaim, "${prefix}.persistentVolumeClaim")) { - break - } - helper.validateName(source.persistentVolumeClaim.claimName, "${prefix}.persistentVolumeClaim.claimName") - - break - - case KubernetesVolumeSourceType.Secret: - if (!helper.validateNotEmpty(source.secret, "${prefix}.secret")) { - break - } - helper.validateSecretName(source.secret.secretName, "${prefix}.secret.secretName") - - break - - case KubernetesVolumeSourceType.ConfigMap: - if (! helper.validateNotEmpty(source.configMap, "${prefix}.configMap")) { - break - } - helper.validateNotEmpty(source.configMap.configMapName, "${prefix}.configMap.configMapName") - source.configMap.items.eachWithIndex { item, index -> - helper.validateRelativePath(item.path, "${prefix}.configMap.items[$index].path") - helper.validateNotEmpty(item.key, "${prefix}.configMap.items[$index].key") - } - break - - default: - helper.reject("${prefix}.type", "$source.type not supported") - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/StandardKubernetesAttributeValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/StandardKubernetesAttributeValidator.groovy deleted file mode 100644 index fe6950728b2..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/StandardKubernetesAttributeValidator.groovy +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.apache.http.conn.util.InetAddressUtils -import org.springframework.validation.Errors - -class StandardKubernetesAttributeValidator { - static final namePattern = /^[a-z0-9]+([-a-z0-9]*[a-z0-9])?$/ - static final dnsSubdomainPattern = /^[a-z0-9]+([-\.a-z0-9]*[a-z0-9])?$/ - static final credentialsPattern = /^[a-z0-9]+([-a-z0-9_]*[a-z0-9])?$/ - static final prefixPattern = /^[a-z0-9]+$/ - static final pathPattern = /^\/.*$/ - static final relativePathPattern = /^[^\/].*$/ - static final quantityPattern = /^([+-]?[0-9.]+)([eEimkKMGTP]*[-+]?[0-9]*)$/ - static final protocolList = ['TCP', 'UDP'] - static final serviceTypeList = ['ClusterIP', 'NodePort', 'LoadBalancer'] - static final sessionAffinityList = ['None', 'ClientIP'] - static final restartPolicyList = ['Always', 'OnFailure', 'Never'] - static final uriSchemeList = ['HTTP', 'HTTPS'] - static final maxPort = (1 << 16) - 1 - - String context - - Errors errors - - StandardKubernetesAttributeValidator(String context, Errors errors) { - this.context = context - this.errors = errors - } - - def validateByRegex(String value, String attribute, String regex) { - def result - if (value ==~ regex) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid (Must match ${regex})") - result = false - } - result - } - - def validateByContainment(Object value, String attribute, List list) { - def result - if (list.contains(value)) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid (Must be one of $list)") - result = false - } - result - } - - def reject(String attribute, String reason) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid ($reason)") - } - - def validateDetails(String value, String attribute) { - // Details are optional. - if (!value) { - return true - } else { - return validateByRegex(value, attribute, namePattern) - } - } - - def validateName(String value, String attribute) { - if (validateNotEmpty(value, attribute)) { - return validateByRegex(value, attribute, namePattern) - } else { - return false - } - } - - def validateSecretName(String value, String attribute) { - if (validateNotEmpty(value, attribute)) { - return validateByRegex(value, attribute, dnsSubdomainPattern) - } else { - return false - } - } - - def validatePath(String value, String attribute) { - if (validateNotEmpty(value, attribute)) { - return validateByRegex(value, attribute, pathPattern) - } else { - return false - } - } - - def validateRelativePath(String value, String attribute) { - if (validateNotEmpty(value, attribute)) { - return validateByRegex(value, attribute, relativePathPattern) - } else { - return false - } - } - - def validateProtocol(String value, String attribute) { - if (validateNotEmpty(value, attribute)) { - return validateByContainment(value, attribute, protocolList) - } else { - return false - } - } - - def validateSessionAffinity(String value, String attribute) { - value ? validateByContainment(value, attribute, sessionAffinityList) : null - } - - def validateUriScheme(String value, String attribute) { - value ? validateByContainment(value, attribute, uriSchemeList) : null - } - - def validateIpv4(String value, String attribute) { - def result = InetAddressUtils.isIPv4Address(value) - if (!result) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid (Not valid IPv4 address)") - } - result - } - - def validateServiceType(String value, String attribute) { - value ? validateByContainment(value, attribute, serviceTypeList) : true - } - - def validatePort(int port, String attribute) { - def result = (port >= 1 && port <= maxPort) - if (!result) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid (Must be in range [1, $maxPort])") - } - result - } - - def validateApplication(String value, String attribute) { - if (validateNotEmpty(value, attribute)) { - return validateByRegex(value, attribute, prefixPattern) - } else { - return false - } - } - - def validateStack(String value, String attribute) { - // Stack is optional - if (!value) { - return true - } else { - return validateByRegex(value, attribute, prefixPattern) - } - } - - def validateCpu(String value, String attribute) { - // CPU is optional. - if (!value) { - return true - } else { - return validateByRegex(value, attribute, quantityPattern) - } - } - - def validateMemory(String value, String attribute) { - // Memory is optional. - if (!value) { - return true - } else { - return validateByRegex(value, attribute, quantityPattern) - } - } - - def validateImagePullSecret(KubernetesV1Credentials credentials, String value, String namespace, String attribute) { - if (!credentials.isRegisteredImagePullSecret(value, namespace)) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notRegistered") - return false - } - return validateByRegex(value, attribute, namePattern) - } - - def validateNamespace(KubernetesV1Credentials credentials, String value, String attribute) { - // Namespace is optional, empty taken to mean 'default'. - if (!value) { - return true - } else { - if (!credentials.isRegisteredNamespace(value)) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notRegistered") - return false - } - return validateByRegex(value, attribute, namePattern) - } - } - - def validateNotEmpty(Object value, String attribute) { - def result - if (value != "" && value != null && value != []) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.empty") - result = false - } - result - } - - def validateCredentials(String credentials, AccountCredentialsProvider accountCredentialsProvider) { - def result = validateNotEmpty(credentials, "account") - if (result) { - def kubernetesCredentials = accountCredentialsProvider.getCredentials(credentials) - if (!(kubernetesCredentials?.credentials instanceof KubernetesV1Credentials)) { - errors.rejectValue("${context}.account", "${context}.account.notFound") - result = false - } - } - result - } - - def validateNonNegative(int value, String attribute) { - def result - if (value >= 0) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.negative") - result = false - } - result - } - - def validatePositive(int value, String attribute) { - def result - if (value > 0) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notPositive") - result = false - } - result - } - - def validateRestartPolicy(String value, String attribute) { - value ? validateByContainment(value, attribute, restartPolicyList) : null - } - - def validateJobCloneSource(Object value, String attribute) { - if (!value) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.empty") - return false - } else { - return validateNotEmpty(value.jobName, attribute) - } - } - - def validateServerGroupCloneSource(Object value, String attribute) { - if (!value) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.empty") - return false - } else { - return validateNotEmpty(value.serverGroupName, attribute) - } - } - - def validateNotLessThan(Integer value1, Integer value2, String attribute1, String attribute2) { - if (value1 < value2) { - errors.rejectValue("${context}.${attribute1}", "${context}.${attribute1}.lessThan ${context}.${attribute2}") - } - } - - def validateInRangeInclusive(Integer value, int min, int max, String attribute) { - if (min > value) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.greaterThan $min") - } - - if (max < value) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.lessThan $max") - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/autoscaler/UpsertKubernetesAutoscalerDescriptionValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/autoscaler/UpsertKubernetesAutoscalerDescriptionValidator.groovy deleted file mode 100644 index dc150ee7e33..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/autoscaler/UpsertKubernetesAutoscalerDescriptionValidator.groovy +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.autoscaler - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.autoscaler.KubernetesAutoscalerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.UPSERT_SCALING_POLICY) -@Component -class UpsertKubernetesAutoscalerDescriptionValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, KubernetesAutoscalerDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("KubernetesAutoscalerDescription", errors) - if (description.capacity?.min && description.capacity?.max) { - helper.validateNotLessThan(description.capacity.max, description.capacity.min, "description.capacity.max", "description.capacity.min") - } - - if (description.scalingPolicy?.cpuUtilization?.target != null) { - helper.validatePositive(description.scalingPolicy.cpuUtilization.target, "description.scalingPolicy.cpuUtilization.target") - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/AbstractKubernetesInstancesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/AbstractKubernetesInstancesAtomicOperationValidator.groovy deleted file mode 100644 index b7c1e3cdad1..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/AbstractKubernetesInstancesAtomicOperationValidator.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.KubernetesInstanceDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider - -class AbstractKubernetesInstancesAtomicOperationValidator { - static void validate(KubernetesInstanceDescription description, StandardKubernetesAttributeValidator helper, AccountCredentialsProvider accountCredentialsProvider) { - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - helper.validateNotEmpty(description.instanceIds, "instanceIds") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/AbstractRegistrationKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/AbstractRegistrationKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index 83c8cc38efa..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/AbstractRegistrationKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.AbstractRegistrationKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider - -class AbstractRegistrationKubernetesAtomicOperationValidator { - static void validate(AbstractRegistrationKubernetesAtomicOperationDescription description, StandardKubernetesAttributeValidator helper, AccountCredentialsProvider accountCredentialsProvider) { - AbstractKubernetesInstancesAtomicOperationValidator.validate(description, helper, accountCredentialsProvider) - helper.validateNotEmpty(description.loadBalancerNames, "loadBalancerNames") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/DeregisterKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/DeregisterKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index 97e181023af..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/DeregisterKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.AbstractRegistrationKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.DEREGISTER_INSTANCES_FROM_LOAD_BALANCER) -@Component -class DeregisterKubernetesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, AbstractRegistrationKubernetesAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("deregisterKubernetesAtomicOperationDescription", errors) - - AbstractRegistrationKubernetesAtomicOperationValidator.validate(description, helper, accountCredentialsProvider) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/RegisterKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/RegisterKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index f84b6197683..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/RegisterKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.AbstractRegistrationKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.REGISTER_INSTANCES_WITH_LOAD_BALANCER) -@Component -class RegisterKubernetesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, AbstractRegistrationKubernetesAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("registerKubernetesAtomicOperationDescription", errors) - - AbstractRegistrationKubernetesAtomicOperationValidator.validate(description, helper, accountCredentialsProvider) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/TerminateKubernetesInstancesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/TerminateKubernetesInstancesAtomicOperationValidator.groovy deleted file mode 100644 index 2df3579a163..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/instance/TerminateKubernetesInstancesAtomicOperationValidator.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.instance.KubernetesInstanceDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.TERMINATE_INSTANCES) -@Component -class TerminateKubernetesInstancesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, KubernetesInstanceDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("terminateKubernetesInstancesAtomicOperationDescription", errors) - - AbstractKubernetesInstancesAtomicOperationValidator.validate(description, helper, accountCredentialsProvider) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/CloneKubernetesJobAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/CloneKubernetesJobAtomicOperationValidator.groovy deleted file mode 100644 index 312078c7a6d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/CloneKubernetesJobAtomicOperationValidator.groovy +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.job - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.CloneKubernetesJobAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.KubernetesContainerValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.CLONE_JOB) -@Component -class CloneKubernetesJobAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, CloneKubernetesJobAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("cloneKubernetesJobAtomicOperationDescription", errors) - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - KubernetesV1Credentials credentials = (KubernetesV1Credentials) accountCredentialsProvider.getCredentials(description.account).credentials - - helper.validateJobCloneSource(description.source, "source") - if (description.application) { - helper.validateApplication(description.application, "application") - } - - if (description.stack) { - helper.validateStack(description.stack, "stack") - } - - if (description.freeFormDetails) { - helper.validateDetails(description.freeFormDetails, "details") - } - - if (description.namespace) { - helper.validateNamespace(credentials, description.namespace, "namespace") - } - - if (description.container) { - KubernetesContainerValidator.validate(description.container, helper, "container") - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/DestroyKubernetesJobAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/DestroyKubernetesJobAtomicOperationValidator.groovy deleted file mode 100644 index 8dd406c03f6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/DestroyKubernetesJobAtomicOperationValidator.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.job - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.KubernetesJobDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.DESTROY_JOB) -@Component -class DestroyKubernetesJobAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, KubernetesJobDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("destroyKubernetesJobAtomicOperationDescription", errors) - - KubernetesJobDescriptionValidator.validate(description, helper, accountCredentialsProvider) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/KubernetesJobDescriptionValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/KubernetesJobDescriptionValidator.groovy deleted file mode 100644 index 02aa7ac1ef0..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/KubernetesJobDescriptionValidator.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.job - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.KubernetesJobDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider - -class KubernetesJobDescriptionValidator { - static void validate(KubernetesJobDescription description, StandardKubernetesAttributeValidator helper, AccountCredentialsProvider accountCredentialsProvider) { - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - helper.validateName(description.jobName, "jobName") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/RunKubernetesJobAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/RunKubernetesJobAtomicOperationValidator.groovy deleted file mode 100644 index 752448341e3..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/job/RunKubernetesJobAtomicOperationValidator.groovy +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.job - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.job.RunKubernetesJobDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.KubernetesContainerValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.KubernetesVolumeSourceValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.RUN_JOB) -@Component -class RunKubernetesJobAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, RunKubernetesJobDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("runKubernetesJobAtomicOperationDescription", errors) - - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - KubernetesV1Credentials credentials = (KubernetesV1Credentials) accountCredentialsProvider.getCredentials(description.account).credentials - - helper.validateApplication(description.application, "application") - helper.validateStack(description.stack, "stack") - helper.validateDetails(description.freeFormDetails, "details") - helper.validateNamespace(credentials, description.namespace, "namespace") - - description.volumeSources.eachWithIndex { source, idx -> - KubernetesVolumeSourceValidator.validate(source, helper, "volumeSources[${idx}]") - } - - if (description.container) { - description.containers = [description.container] - } - - helper.validateNotEmpty(description.containers, "containers") - - description.containers.eachWithIndex { container, idx -> - container.name = container.name ?: "job" - KubernetesContainerValidator.validate(container, helper, "containers[${idx}]") - } - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationValidator.groovy deleted file mode 100644 index 22cc852a13d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/DeleteKubernetesLoadBalancerAtomicOperationValidator.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.loadbalancer - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.DeleteKubernetesLoadBalancerAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.DELETE_LOAD_BALANCER) -@Component -class DeleteKubernetesLoadBalancerAtomicOperationValidator extends DescriptionValidator{ - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, DeleteKubernetesLoadBalancerAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("deleteKubernetesLoadBalancerAtomicOperationDescription", errors) - - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - helper.validateName(description.loadBalancerName, "loadBalancerName") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationValidator.groovy deleted file mode 100644 index cb0805f5d61..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationValidator.groovy +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.loadbalancer - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.UPSERT_LOAD_BALANCER) -@Component -class UpsertKubernetesLoadBalancerAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, KubernetesLoadBalancerDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("upsertKubernetesLoadBalancerAtomicOperationDescription", errors) - - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - KubernetesV1Credentials credentials = (KubernetesV1Credentials) accountCredentialsProvider.getCredentials(description.account).credentials - - helper.validateName(description.name, "name") - helper.validateNamespace(credentials, description.namespace, "namespace") - - description.ports.eachWithIndex { port, idx -> - helper.validateName(port.name, "ports[$idx].name") - helper.validateProtocol(port.protocol, "ports[$idx].protocol") - port.nodePort ? helper.validatePort(port.nodePort, "ports[$idx].nodePort") : null - port.port ? helper.validatePort(port.port, "ports[$idx].port") : null - port.targetPort ? helper.validatePort(port.targetPort, "ports[$idx].targetPort") : null - } - - description.externalIps.eachWithIndex { ip, idx -> - helper.validateIpv4(ip, "externalIps[$idx]") - } - - if (description.clusterIp && description.clusterIp != "None") { - helper.validateIpv4(description.clusterIp, "clusterIp") - } - - description.loadBalancerIp ? helper.validateIpv4(description.loadBalancerIp, "loadBalancerIp") : null - - helper.validateSessionAffinity(description.sessionAffinity, "sessionAffinity") - - helper.validateServiceType(description.serviceType, "serviceType") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/securitygroup/DeleteKubernetesSecurityGroupAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/securitygroup/DeleteKubernetesSecurityGroupAtomicOperationValidator.groovy deleted file mode 100644 index 6949db87b8f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/securitygroup/DeleteKubernetesSecurityGroupAtomicOperationValidator.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.securitygroup - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.DeleteKubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.DELETE_SECURITY_GROUP) -@Component -class DeleteKubernetesSecurityGroupAtomicOperationValidator extends DescriptionValidator{ - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, DeleteKubernetesSecurityGroupDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("deleteKubernetesSecurityGroupDescription", errors) - - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - helper.validateName(description.securityGroupName, "securityGroupName") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/securitygroup/UpsertKubernetesSecurityGroupValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/securitygroup/UpsertKubernetesSecurityGroupValidator.groovy deleted file mode 100644 index de6ee7f56e3..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/securitygroup/UpsertKubernetesSecurityGroupValidator.groovy +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.securitygroup - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesHttpIngressPath -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressRule -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.validation.Errors - -class UpsertKubernetesSecurityGroupValidator { - class UpsertKubernetesLoadBalancerAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, KubernetesSecurityGroupDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("upsertKubernetesSecurityGroupDescription", errors) - - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - KubernetesV1Credentials credentials = (KubernetesV1Credentials) accountCredentialsProvider.getCredentials(description.account).credentials - - helper.validateName(description.securityGroupName, "securityGroupName") - helper.validateNamespace(credentials, description.namespace, "namespace") - - if (description.ingress) { - if (description.ingress.serviceName) { - helper.validateName(description.ingress.serviceName, "ingress.serviceName") - } - if (description.ingress.port) { - helper.validatePort(description.ingress.port, "ingress.port") - } - } - - if (description.rules) { - description.rules.eachWithIndex { KubernetesIngressRule rule, i -> - if (rule.host) { - helper.validateName(rule.host, "rules[$i].host") - } - rule.value?.http?.paths?.eachWithIndex{ KubernetesHttpIngressPath path, j -> - if (path.path) { - helper.validatePath(path.path, "rules[$i].value.http.paths[$j].path") - } - } - } - } - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/CloneKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/CloneKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index ff60ce53539..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/CloneKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.CloneKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.KubernetesContainerValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.CLONE_SERVER_GROUP) -@Component -class CloneKubernetesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, CloneKubernetesAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("cloneKubernetesAtomicOperationDescription", errors) - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - KubernetesV1Credentials credentials = (KubernetesV1Credentials) accountCredentialsProvider.getCredentials(description.account).credentials - - helper.validateServerGroupCloneSource(description.source, "source") - if (description.application) { - helper.validateApplication(description.application, "application") - } - - if (description.stack) { - helper.validateStack(description.stack, "stack") - } - - if (description.freeFormDetails) { - helper.validateDetails(description.freeFormDetails, "details") - } - - if (description.targetSize != null) { - helper.validateNonNegative(description.targetSize, "targetSize") - } - - if (description.namespace) { - helper.validateNamespace(credentials, description.namespace, "namespace") - } - - if (description.restartPolicy) { - helper.validateRestartPolicy(description.restartPolicy, "restartPolicy") - } - - if (description.loadBalancers) { - description.loadBalancers.eachWithIndex { name, idx -> - helper.validateName(name, "loadBalancers[${idx}]") - } - } - - if (description.securityGroups) { - description.securityGroups.eachWithIndex { name, idx -> - helper.validateName(name, "securityGroups[${idx}]") - } - } - - if (description.containers) { - description.containers.eachWithIndex { container, idx -> - KubernetesContainerValidator.validate(container, helper, "container[${idx}]") - } - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DeployKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DeployKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index 35ed8d3195e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DeployKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.DeployKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.KubernetesContainerValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.KubernetesVolumeSourceValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.CREATE_SERVER_GROUP) -@Component("deployKubernetesAtomicOperationValidator") -class DeployKubernetesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, DeployKubernetesAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("deployKubernetesAtomicOperationDescription", errors) - - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - KubernetesV1Credentials credentials = (KubernetesV1Credentials) accountCredentialsProvider.getCredentials(description.account).credentials - - helper.validateApplication(description.application, "application") - helper.validateStack(description.stack, "stack") - helper.validateDetails(description.freeFormDetails, "details") - helper.validateNonNegative(description.targetSize, "targetSize") - helper.validateNamespace(credentials, description.namespace, "namespace") - helper.validateRestartPolicy(description.restartPolicy, "restartPolicy") - - description.volumeSources.eachWithIndex { source, idx -> - KubernetesVolumeSourceValidator.validate(source, helper, "volumeSources[${idx}]") - } - - description.loadBalancers.eachWithIndex { name, idx -> - helper.validateName(name, "loadBalancers[${idx}]") - } - - description.securityGroups.eachWithIndex { name, idx -> - helper.validateName(name, "securityGroups[${idx}]") - } - - helper.validateNotEmpty(description.containers, "containers") - description.containers.eachWithIndex { container, idx -> - KubernetesContainerValidator.validate(container, helper, "container[${idx}]") - } - - if (description.scalingPolicy) { - helper.validateNotEmpty(description.scalingPolicy.cpuUtilization, "scalingPolicy.cpuUtilization") - helper.validatePositive(description.scalingPolicy.cpuUtilization.target, "scalingPolicy.cpuUtilization.target") - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DestroyKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DestroyKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index 2fba31dcf76..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DestroyKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.DESTROY_SERVER_GROUP) -@Component -class DestroyKubernetesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, KubernetesServerGroupDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("destroyKubernetesAtomicOperationDescription", errors) - - KubernetesServerGroupDescriptionValidator.validate(description, helper, accountCredentialsProvider) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DisableKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DisableKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index b9140d626ed..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DisableKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.EnableDisableKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.DISABLE_SERVER_GROUP) -@Component -class DisableKubernetesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, EnableDisableKubernetesAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("disableKubernetesAtomicOperationDescription", errors) - - EnableDisableKubernetesAtomicOperationValidator.validate(description, helper) - KubernetesServerGroupDescriptionValidator.validate(description, helper, accountCredentialsProvider) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/EnableDisableKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/EnableDisableKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index 59dbfb103d7..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/EnableDisableKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.EnableDisableKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator - -class EnableDisableKubernetesAtomicOperationValidator { - static void validate(EnableDisableKubernetesAtomicOperationDescription description, StandardKubernetesAttributeValidator helper) { - if (description.desiredPercentage != null) { - helper.validateInRangeInclusive(description.desiredPercentage, 0, 100, "desiredPercent") - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/EnableKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/EnableKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index 1249e285bd7..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/EnableKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.EnableDisableKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.ENABLE_SERVER_GROUP) -@Component -class EnableKubernetesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, EnableDisableKubernetesAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("enableKubernetesAtomicOperationDescription", errors) - - EnableDisableKubernetesAtomicOperationValidator.validate(description, helper) - KubernetesServerGroupDescriptionValidator.validate(description, helper, accountCredentialsProvider) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/KubernetesServerGroupDescriptionValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/KubernetesServerGroupDescriptionValidator.groovy deleted file mode 100644 index d9ddc9b6d39..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/KubernetesServerGroupDescriptionValidator.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesServerGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider - -class KubernetesServerGroupDescriptionValidator { - static void validate(KubernetesServerGroupDescription description, StandardKubernetesAttributeValidator helper, AccountCredentialsProvider accountCredentialsProvider) { - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - helper.validateName(description.serverGroupName, "serverGroupName") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/ResizeKubernetesAtomicOperationValidator.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/ResizeKubernetesAtomicOperationValidator.groovy deleted file mode 100644 index 2fc46311f54..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/ResizeKubernetesAtomicOperationValidator.groovy +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.ResizeKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@KubernetesOperation(AtomicOperations.RESIZE_SERVER_GROUP) -@Component -class ResizeKubernetesAtomicOperationValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, ResizeKubernetesAtomicOperationDescription description, Errors errors) { - def helper = new StandardKubernetesAttributeValidator("resizeKubernetesAtomicOperationDescription", errors) - - if (!helper.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - - helper.validateName(description.serverGroupName, "serverGroupName") - helper.validateNotEmpty(description.capacity, "capacity") - helper.validateNonNegative(description.capacity.desired, "capacity.desired") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesAutoscalerStatus.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesAutoscalerStatus.groovy deleted file mode 100644 index e5c2302a6a6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesAutoscalerStatus.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscaler -import groovy.util.logging.Slf4j - -@Slf4j -class KubernetesAutoscalerStatus { - Integer currentCpuUtilization - Integer currentReplicas - Integer desiredReplicas - Long lastScaleTime - - KubernetesAutoscalerStatus() { } - - KubernetesAutoscalerStatus(HorizontalPodAutoscaler autoscaler) { - if (autoscaler.status == null) { - log.warn("Autoscaler on ${autoscaler.metadata.name} has a null status. The replicaset may be missing a CPU request.") - } else { - this.currentCpuUtilization = autoscaler.status.currentCPUUtilizationPercentage - this.currentReplicas = autoscaler.status.currentReplicas - this.desiredReplicas = autoscaler.status.desiredReplicas - this.lastScaleTime = KubernetesModelUtil.translateTime(autoscaler.status.lastScaleTime) - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesControllerConverter.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesControllerConverter.groovy deleted file mode 100644 index 62eaa827c91..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesControllerConverter.groovy +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2017 Cisco, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import io.fabric8.kubernetes.api.model.HasMetadata -import io.fabric8.kubernetes.api.model.ObjectMeta -import io.kubernetes.client.models.V1ObjectMeta - -class KubernetesControllerConverter implements HasMetadata { - String kind - String apiVersion - ObjectMeta metadata - - KubernetesControllerConverter(String kind, String apiVersion, V1ObjectMeta metadata) { - this.kind = kind - this.apiVersion = apiVersion - - this.metadata = new ObjectMeta() - this.metadata.name = metadata.name - this.metadata.namespace = metadata.namespace - setMetadata(this.metadata) - } - - @Override - ObjectMeta getMetadata() { - return this.metadata - } - - @Override - void setMetadata(ObjectMeta metadata) { - this.metadata = metadata - } - - @Override - String getKind() { - return this.kind - } - - @Override - String getApiVersion() { - return this.apiVersion - } -} - - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesDeploymentStatus.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesDeploymentStatus.groovy deleted file mode 100644 index 41189ab8000..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesDeploymentStatus.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import io.fabric8.kubernetes.api.model.apps.Deployment - - -class KubernetesDeploymentStatus { - Integer replicas - Integer availableReplicas - Integer unavailableReplicas - Integer updatedReplicas - String revision - - KubernetesDeploymentStatus(Deployment deployment) { - replicas = deployment.status.replicas - availableReplicas = deployment.status.availableReplicas - unavailableReplicas = deployment.status.unavailableReplicas - updatedReplicas = deployment.status.updatedReplicas - revision = KubernetesApiAdaptor.getDeploymentRevision(deployment) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesEvent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesEvent.groovy deleted file mode 100644 index 88b5a9f226a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesEvent.groovy +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.Event - -@Slf4j -class KubernetesEvent { - String message - String reason - Severity type - Integer count - Long firstOccurrence - Long lastOccurrence - - KubernetesEvent() { } - - KubernetesEvent(Event event) { - this.message = event.message - this.count = event.count - this.reason = event.reason - - switch (event.type) { - case "Warning": - this.type = Severity.Warning - break - case "Normal": - this.type = Severity.Normal - break - default: - this.type = Severity.Unknown - log.info "Unknown event severity: ${event.type}" - break - } - - this.firstOccurrence = KubernetesModelUtil.translateTime(event.firstTimestamp) - this.lastOccurrence = KubernetesModelUtil.translateTime(event.lastTimestamp) - } -} - -enum Severity { - Warning, - Normal, - Unknown, -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesImageSummary.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesImageSummary.groovy deleted file mode 100644 index ecf273a6bdf..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesImageSummary.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.model.ServerGroup - -class KubernetesImageSummary implements ServerGroup.ImageSummary, Serializable { - String serverGroupName - String imageId - String imageName - Map image - Map buildInfo -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesJobStatus.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesJobStatus.groovy deleted file mode 100644 index 956d9bd9486..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesJobStatus.groovy +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil -import com.netflix.spinnaker.clouddriver.model.JobState -import com.netflix.spinnaker.clouddriver.model.JobStatus -import io.fabric8.kubernetes.api.model.Pod - -class KubernetesJobStatus implements JobStatus, Serializable { - String name - String cluster - String account - String id - String location - String provider = "kubernetes" - Long createdTime - Long completedTime - String message - String reason - Integer exitCode - Integer signal - Set loadBalancers - Set securityGroups - @JsonIgnore - Pod pod - String logs - - KubernetesJobStatus(Pod pod, String account) { - this.name = pod.metadata.name - this.cluster = Names.parseName(this.name).cluster - this.location = pod.metadata.namespace - this.account = account - this.createdTime = KubernetesModelUtil.translateTime(pod.metadata.creationTimestamp) - this.pod = pod - - } - - @Override - Map getCompletionDetails() { - [ - exitCode: exitCode?.toString(), - signal: signal?.toString(), - message: message?.toString(), - reason: reason?.toString(), - ] - } - - @Override - JobState getJobState() { - def state = pod?.status?.containerStatuses?.getAt(0)?.state - if (state?.getRunning()) { - return JobState.Running - } else if (state?.getWaiting()) { - def waiting = state.getWaiting() - if (waiting.reason in ["ImagePullBackoff", "RegistryUnavailable"] || waiting.reason.contains("Err")) { - message = waiting.getMessage() - reason = waiting.getReason() - return JobState.Failed - } - return JobState.Starting - } else if (state?.getTerminated()) { - def terminated = state.getTerminated() - completedTime = KubernetesModelUtil.translateTime(terminated.getFinishedAt()) - signal = terminated.getSignal() - exitCode = terminated.getExitCode() - message = terminated.getMessage() - reason = terminated.getReason() - - // Kind of a hack, seems that jobs can have exit code 0 even when being OOMKilled - if (reason.equalsIgnoreCase("oomkilled")) { - return JobState.Failed - } - - if (exitCode == 0) { - return JobState.Succeeded - } else { - return JobState.Failed - } - } else { - return JobState.Unknown - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Application.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Application.groovy deleted file mode 100644 index b57bf42c873..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Application.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.fasterxml.jackson.core.type.TypeReference -import com.netflix.spinnaker.clouddriver.model.Application -import groovy.transform.CompileStatic -import groovy.transform.EqualsAndHashCode - -@CompileStatic -@EqualsAndHashCode(includes = ["name"]) -class KubernetesV1Application implements Application, Serializable { - public static final TypeReference> ATTRIBUTES = new TypeReference>() {} - final String name - final Map attributes - final Map> clusterNames - - KubernetesV1Application(String name, Map attributes, Map> clusterNames) { - this.name = name - this.attributes = attributes - this.clusterNames = clusterNames - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Cluster.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Cluster.groovy deleted file mode 100644 index f3a203eb842..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Cluster.groovy +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.model.Cluster -import groovy.transform.CompileStatic -import groovy.transform.EqualsAndHashCode - -@CompileStatic -@EqualsAndHashCode(includes = ["name", "accountName"]) -class KubernetesV1Cluster implements Cluster, Serializable { - String name - String type = Keys.Namespace.provider - String accountName - Set serverGroups = Collections.synchronizedSet(new HashSet()) - Set loadBalancers = Collections.synchronizedSet(new HashSet()) -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Health.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Health.groovy deleted file mode 100644 index f4d2824da7c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Health.groovy +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.model.Health -import com.netflix.spinnaker.clouddriver.model.HealthState -import io.fabric8.kubernetes.api.model.ContainerStatus -import io.fabric8.kubernetes.api.model.Pod - -class KubernetesV1Health implements Health { - HealthState state - final String source - final String type - final String healthClass = "platform" - String description = "" - - KubernetesV1Health(Pod pod) { - source = "Pod" - type = "KubernetesPod" - def phase = pod.status.phase - - state = HealthState.Unknown - if (phase == "Pending") { - if (!pod.status.containerStatuses) { - description = pod.status?.conditions?.getAt(0)?.reason ?: "No containers scheduled" - state = HealthState.Down - } else { - state = HealthState.Unknown - } - } else if (phase == "Running") { - state = HealthState.Up - } else if (phase == "Succeeded") { - state = HealthState.Succeeded - } else if (phase == "Failed") { - state = HealthState.Failed - } - } - - KubernetesV1Health(String service, String enabled) { - source = "Service $service" - type = "KubernetesService" - state = enabled == "true" ? HealthState.Up : - enabled == "false" ? HealthState.OutOfService : HealthState.Unknown - } - - KubernetesV1Health(String name, ContainerStatus containerStatus) { - source = "Container $name" - type = "KubernetesContainer" - - state = HealthState.Unknown - if (containerStatus.state.running) { - if (containerStatus.ready) { - state = HealthState.Up - } else { - description = "Readiness probe hasn't passed" - state = HealthState.Down - } - } else if (containerStatus.state.terminated) { - if (containerStatus.state.terminated.reason == "Completed") { - description = "Container terminated with code $containerStatus.state.terminated.exitCode" - if (containerStatus.state.terminated.exitCode == 0) { - state = HealthState.Succeeded - } else { - state = HealthState.Failed - } - } else { - state = HealthState.Down - } - } else if (containerStatus.state.waiting) { - if (!containerStatus.ready) { - state = HealthState.Down - } - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Instance.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Instance.groovy deleted file mode 100644 index cd7ba8f438f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1Instance.groovy +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.model.HealthState -import com.netflix.spinnaker.clouddriver.model.Instance -import io.fabric8.kubernetes.api.model.Event -import io.fabric8.kubernetes.api.model.Pod -import io.fabric8.kubernetes.client.internal.SerializationUtils - -class KubernetesV1Instance implements Instance, Serializable { - String name - String location - String instanceId - Long launchTime - String zone - List> health - String controllerName - String controllerKind - Pod pod - List loadBalancers - List events - final String providerType = KubernetesCloudProvider.ID - final String cloudProvider = KubernetesCloudProvider.ID - String yaml - - boolean isAttached(String serviceName) { - KubernetesUtil.getPodLoadBalancerStates(pod)?.get(KubernetesUtil.loadBalancerKey(serviceName)) == "true" - } - - KubernetesV1Instance() { } - - KubernetesV1Instance(Pod pod) { - this(pod, []) - } - - KubernetesV1Instance(Pod pod, List events) { - this.name = pod.metadata?.name - this.location = pod.metadata?.namespace - this.instanceId = this.name - this.launchTime = KubernetesModelUtil.translateTime(pod.status?.startTime) - this.zone = pod.metadata?.namespace - this.pod = pod - this.yaml = SerializationUtils.dumpWithoutRuntimeStateAsYaml(pod) - this.events = events?.collect { event -> - new KubernetesEvent(event) - } - null - - def mapper = new ObjectMapper() - this.health = pod.status?.containerStatuses?.collect { - (Map) mapper.convertValue(new KubernetesV1Health(it.image, it), new TypeReference>() {}) - } ?: [] - - this.health.addAll(KubernetesUtil.getPodLoadBalancerStates(pod).collect { key, value -> - (Map) mapper.convertValue(new KubernetesV1Health(key, value), new TypeReference>() {}) - } ?: []) - - this.health << (Map) mapper.convertValue(new KubernetesV1Health(pod), new TypeReference>() {}) - - if (pod.metadata?.ownerReferences) { - this.controllerName = pod.metadata?.ownerReferences.get(0)?.getName() - this.controllerKind = pod.metadata?.ownerReferences.get(0)?.getKind() - } - } - - @Override - HealthState getHealthState() { - return KubernetesModelUtil.getHealthState(health) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1LoadBalancer.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1LoadBalancer.groovy deleted file mode 100644 index 25f9a0b015e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1LoadBalancer.groovy +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.model.LoadBalancer -import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance -import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider -import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup -import groovy.transform.EqualsAndHashCode -import io.fabric8.kubernetes.api.model.Service -import io.fabric8.kubernetes.client.internal.SerializationUtils - -@EqualsAndHashCode(includes = ["name", "namespace", "account"]) -class KubernetesV1LoadBalancer implements LoadBalancer, Serializable, LoadBalancerProvider.Item { - String name - final String type = KubernetesCloudProvider.ID - final String cloudProvider = KubernetesCloudProvider.ID - String region - String namespace - String account - Long createdTime - Service service - String yaml - // Set of server groups represented as maps of strings -> objects. - Set serverGroups = [] as Set - List securityGroups = [] - KubernetesLoadBalancerDescription description - - KubernetesV1LoadBalancer(String name, String namespace, String accountName) { - this.name = name - this.namespace = namespace - this.region = namespace - this.account = accountName - } - - KubernetesV1LoadBalancer(Service service, List serverGroupList, String accountName, List securityGroups) { - this.service = service - this.name = service.metadata.name - this.namespace = service.metadata.namespace - this.securityGroups = securityGroups - this.region = this.namespace - this.description = KubernetesApiConverter.fromService(service, accountName) - this.account = accountName - this.createdTime = KubernetesModelUtil.translateTime(service.metadata?.creationTimestamp) - this.yaml = SerializationUtils.dumpWithoutRuntimeStateAsYaml(service) - this.serverGroups = serverGroupList?.collect { serverGroup -> - new LoadBalancerServerGroup( - name: serverGroup?.name, - isDisabled: serverGroup?.isDisabled(), - instances: serverGroup?.instances?.findResults { instance -> - if (instance.isAttached(this.name)) { - return new LoadBalancerInstance( - id: instance.name, - zone: region, - health: [ - state: instance.healthState.toString() - ] - ) - } else { - return (LoadBalancerInstance) null // Groovy generics need to be convinced all control flow paths return the same object type - } - } as Set, - detachedInstances: serverGroup?.instances?.findResults { instance -> - if (!instance.isAttached(this.name)) { - return instance.name - } else { - return (String) null - } - } as Set) - } as Set - } - - @Override - @JsonIgnore - List getByAccounts() { - [new ByAccount(name: account)] - } - - static class ByAccount implements LoadBalancerProvider.ByAccount { - String name - List byRegions = [] - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1SecurityGroup.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1SecurityGroup.groovy deleted file mode 100644 index bca7319b794..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1SecurityGroup.groovy +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressTlS -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.model.SecurityGroup -import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary -import com.netflix.spinnaker.clouddriver.model.securitygroups.HttpRule -import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule -import groovy.transform.EqualsAndHashCode -import io.fabric8.kubernetes.api.model.extensions.Ingress -import io.fabric8.kubernetes.client.internal.SerializationUtils - -@EqualsAndHashCode(includes = ["name", "namespace", "accountName"]) -class KubernetesV1SecurityGroup implements SecurityGroup, Serializable { - final String type = KubernetesCloudProvider.ID - final String cloudProvider = KubernetesCloudProvider.ID - - static final private HTTP_PORT = 80 - static final private HTTPS_PORT = 443 - - String id - String name - String application - String accountName - String region - String namespace - String yaml - - Map annotations - Map labels - - Set inboundRules - Set outboundRules - - Set loadBalancers = [] as Set - - List tls - - Ingress ingress - KubernetesSecurityGroupDescription description - - KubernetesV1SecurityGroup(String application, String account, Ingress ingress, boolean includeRules) { - this.ingress = ingress - - this.application = application - this.accountName = account - this.region = ingress.metadata.namespace - this.namespace = this.region - this.name = ingress.metadata.name - this.id = this.name - this.description = KubernetesApiConverter.fromIngress(ingress) - this.yaml = SerializationUtils.dumpWithoutRuntimeStateAsYaml(ingress) - - this.annotations = ingress.metadata.annotations - this.labels = ingress.metadata.labels - - if (ingress.spec?.backend?.serviceName) { - loadBalancers.add(ingress.spec.backend.serviceName) - } - - this.inboundRules = (includeRules ? (ingress.spec.rules?.collect { rule -> - def defaultPort = new Rule.PortRange(startPort: HTTP_PORT, endPort: HTTP_PORT) - def tlsPort = new Rule.PortRange(startPort: HTTPS_PORT, endPort: HTTPS_PORT) - - def paths = rule.http?.paths?.collect { path -> - loadBalancers.add(path.backend.serviceName) - path.path - } - - def host = rule.host - - return new HttpRule(portRanges: ([defaultPort, tlsPort] as SortedSet), - paths: paths, - host: host) - }) : []) as Set - - tls = ingress.spec.tls?.collect{ tlsSpecEntry -> - return new KubernetesIngressTlS(hosts: tlsSpecEntry.hosts, secretName: tlsSpecEntry.secretName) - } - } - - SecurityGroupSummary getSummary() { - return new KubernetesV1SecurityGroupSummary(name: name, id: id) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1SecurityGroupSummary.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1SecurityGroupSummary.groovy deleted file mode 100644 index c942e11fb18..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1SecurityGroupSummary.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary - -class KubernetesV1SecurityGroupSummary implements SecurityGroupSummary, Serializable { - String name - String id -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1ServerGroup.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1ServerGroup.groovy deleted file mode 100644 index e5db7c5a242..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesV1ServerGroup.groovy +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesClientApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.DeployKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerDescription -import com.netflix.spinnaker.clouddriver.model.HealthState -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import groovy.transform.CompileStatic -import groovy.transform.EqualsAndHashCode -import io.fabric8.kubernetes.api.model.Event -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscaler -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.apps.ReplicaSet -import io.fabric8.kubernetes.client.internal.SerializationUtils -import io.kubernetes.client.models.V1beta1DaemonSet -import io.kubernetes.client.models.V1beta1StatefulSet - -import static com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil.ENABLE_DISABLE_ANNOTATION - -@CompileStatic -@EqualsAndHashCode(includes = ["name", "namespace", "account"]) -class KubernetesV1ServerGroup implements ServerGroup, Serializable { - String name - final String type = KubernetesCloudProvider.ID - final String cloudProvider = KubernetesCloudProvider.ID - String region - String namespace - String account - Long createdTime - Integer replicas = 0 - Boolean hostNetwork = false - Set zones - Set instances - Set loadBalancers = [] as Set - Set securityGroups = [] as Set - Map launchConfig - Map labels = [:] - Map annotations = [:] - DeployKubernetesAtomicOperationDescription deployDescription - KubernetesAutoscalerStatus autoscalerStatus - KubernetesDeploymentStatus deploymentStatus - String kind // Kubernetes resource-type - String yaml - String revision - Map buildInfo - List events - - Map getBuildInfo() { - def imageList = [] - def buildInfo = [:] - /** - * I have added a null check as in statefullset deployDescription is null - */ - if (deployDescription != null) { - for (def container : this.deployDescription.containers) { - imageList.add(KubernetesUtil.getImageIdWithoutRegistry(container.imageDescription)) - } - - buildInfo.images = imageList - - def parsedName = Names.parseName(name) - - buildInfo.createdBy = this.deployDescription?.deployment?.enabled ? parsedName.cluster : null - } - return buildInfo - } - - Boolean isDisabled() { - if (replicas == 0) { - return true - } - - if (labels) { - def lbCount = labels.count { key, value -> KubernetesUtil.isLoadBalancerLabel(key) } - if (lbCount == 0) { - return annotations?.get(ENABLE_DISABLE_ANNOTATION) == "false" - } - - def enabledCount = labels.count { key, value -> KubernetesUtil.isLoadBalancerLabel(key) && value == "true" } - return enabledCount == 0 - } - - return false - } - - KubernetesV1ServerGroup() { } - - KubernetesV1ServerGroup(String name, String namespace) { - this.name = name - this.region = namespace - this.namespace = namespace - } - - KubernetesV1ServerGroup(V1beta1StatefulSet statefulSet, String account, List events) { - this.name = statefulSet.metadata?.name - this.account = account - this.region = statefulSet.metadata?.namespace - this.namespace = this.region - this.createdTime = statefulSet.metadata?.creationTimestamp?.getMillis() - this.zones = [this.region] as Set - this.securityGroups = [] - this.replicas = statefulSet.spec?.replicas ?: 0 - this.launchConfig = [:] - this.labels = statefulSet.spec?.template?.metadata?.labels - this.deployDescription = KubernetesClientApiConverter.fromStatefulSet(statefulSet) - this.yaml = KubernetesClientApiConverter.getYaml(statefulSet) - this.kind = statefulSet.kind - this.events = events?.collect { - new KubernetesEvent(it) - } - } - - KubernetesV1ServerGroup(V1beta1DaemonSet daemonSet, String account, List events) { - this.name = daemonSet.metadata?.name - this.account = account - this.region = daemonSet.metadata?.namespace - this.namespace = this.region - this.createdTime = daemonSet.metadata?.creationTimestamp?.getMillis() - this.zones = [this.region] as Set - this.securityGroups = [] - this.launchConfig = [:] - this.labels = daemonSet.spec?.template?.metadata?.labels - this.deployDescription = KubernetesClientApiConverter.fromDaemonSet(daemonSet) - this.yaml = KubernetesClientApiConverter.getYaml(daemonSet) - this.kind = daemonSet.kind - this.events = events?.collect { - new KubernetesEvent(it) - } - } - - KubernetesV1ServerGroup(ReplicaSet replicaSet, String account, List events, HorizontalPodAutoscaler autoscaler) { - this.name = replicaSet.metadata?.name - this.account = account - this.region = replicaSet.metadata?.namespace - this.namespace = this.region - this.createdTime = KubernetesModelUtil.translateTime(replicaSet.metadata?.creationTimestamp) - this.zones = [this.region] as Set - this.securityGroups = [] - this.replicas = replicaSet.spec?.replicas ?: 0 - this.loadBalancers = KubernetesUtil.getLoadBalancers(replicaSet) as Set - this.launchConfig = [:] - this.labels = replicaSet.spec?.template?.metadata?.labels - this.deployDescription = KubernetesApiConverter.fromReplicaSet(replicaSet) - this.yaml = SerializationUtils.dumpWithoutRuntimeStateAsYaml(replicaSet) - this.kind = replicaSet.kind - this.annotations = replicaSet.metadata?.annotations - this.events = events?.collect { - new KubernetesEvent(it) - } - if (autoscaler) { - KubernetesApiConverter.attachAutoscaler(this.deployDescription, autoscaler) - this.autoscalerStatus = new KubernetesAutoscalerStatus(autoscaler) - } - this.revision = KubernetesApiAdaptor.getDeploymentRevision(replicaSet) - } - - KubernetesV1ServerGroup(ReplicationController replicationController, String account, List events, HorizontalPodAutoscaler autoscaler) { - this.name = replicationController.metadata?.name - this.account = account - this.region = replicationController.metadata?.namespace - this.namespace = this.region - this.createdTime = KubernetesModelUtil.translateTime(replicationController.metadata?.creationTimestamp) - this.zones = [this.region] as Set - this.securityGroups = [] - this.replicas = replicationController.spec?.replicas ?: 0 - this.loadBalancers = KubernetesUtil.getLoadBalancers(replicationController) as Set - this.launchConfig = [:] - this.labels = replicationController.spec?.template?.metadata?.labels - this.deployDescription = KubernetesApiConverter.fromReplicationController(replicationController) - this.yaml = SerializationUtils.dumpWithoutRuntimeStateAsYaml(replicationController) - this.kind = replicationController.kind - this.annotations = replicationController.metadata?.annotations - this.events = events?.collect { - new KubernetesEvent(it) - } - if (autoscaler) { - KubernetesApiConverter.attachAutoscaler(this.deployDescription, autoscaler) - this.autoscalerStatus = new KubernetesAutoscalerStatus(autoscaler) - } - } - - @Override - ServerGroup.InstanceCounts getInstanceCounts() { - new ServerGroup.InstanceCounts( - down: (Integer) instances?.count { it.healthState == HealthState.Down } ?: 0, - outOfService: (Integer) instances?.count { it.healthState == HealthState.OutOfService } ?: 0, - up: (Integer) instances?.count { it.healthState == HealthState.Up } ?: 0, - starting: (Integer) instances?.count { it.healthState == HealthState.Starting } ?: 0, - unknown: (Integer) instances?.count { it.healthState == HealthState.Unknown } ?: 0, - total: (Integer) instances?.size(), - ) - } - - @Override - ServerGroup.Capacity getCapacity() { - new ServerGroup.Capacity(min: replicas, max: replicas, desired: replicas) - } - - @Override - ServerGroup.ImagesSummary getImagesSummary() { - def bi = buildInfo - return new ServerGroup.ImagesSummary() { - @Override - List getSummaries () { - deployDescription.containers.collect({ KubernetesContainerDescription it -> - new ServerGroup.ImageSummary() { - String serverGroupName = name - String imageName = it.name - String imageId = KubernetesUtil.getImageId(it.imageDescription) - - @Override - Map getBuildInfo() { - return bi - } - - @Override - Map getImage() { - return (Map) [ - container: it.name, - registry: it.imageDescription.registry, - tag: it.imageDescription.tag, - repository: it.imageDescription.repository, - imageId: imageId - ] - } - } - }) - } - } - } - - @Override - ServerGroup.ImageSummary getImageSummary() { - imagesSummary?.summaries?.get(0) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/KubernetesV1Provider.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/KubernetesV1Provider.groovy deleted file mode 100644 index 21734dd90ba..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/KubernetesV1Provider.groovy +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider - -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.cache.SearchableProvider -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import groovy.util.logging.Slf4j - -import static com.netflix.spinnaker.clouddriver.cache.SearchableProvider.SearchableResource - -@Slf4j -class KubernetesV1Provider extends AgentSchedulerAware implements SearchableProvider { - public static final String PROVIDER_NAME = KubernetesV1Provider.name - - final Map urlMappingTemplates = Collections.emptyMap() - - final Collection agents - final KubernetesCloudProvider cloudProvider - - KubernetesV1Provider(KubernetesCloudProvider cloudProvider, Collection agents) { - this.cloudProvider = cloudProvider - this.agents = agents - } - - final Set defaultCaches = [ - Keys.Namespace.LOAD_BALANCERS.ns, - Keys.Namespace.CLUSTERS.ns, - Keys.Namespace.SERVER_GROUPS.ns, - Keys.Namespace.INSTANCES.ns, - Keys.Namespace.SECURITY_GROUPS.ns, - Keys.Namespace.SERVICE_ACCOUNTS.ns, - Keys.Namespace.CONFIG_MAPS.ns, - Keys.Namespace.SECRETS.ns, - ].asImmutable() - - @Override - String getProviderName() { - return PROVIDER_NAME - } - - final Map searchResultHydrators = [ - (new KubernetesSearchableResource(Keys.Namespace.SERVICE_ACCOUNTS.ns)): new ServiceAccountResultHydrator(), - (new KubernetesSearchableResource(Keys.Namespace.CONFIG_MAPS.ns)): new ConfigMapResultHydrator(), - (new KubernetesSearchableResource(Keys.Namespace.SECRETS.ns)): new SecretResultHydrator(), - ] - - @Override - Map parseKey(String key) { - return Keys.parse(key) - } - - private static class KubernetesSearchableResource extends SearchableResource { - public KubernetesSearchableResource(String resourceType) { - this.resourceType = resourceType.toLowerCase() - this.platform = "kubernetes" - } - } - - private static class ServiceAccountResultHydrator implements SearchableProvider.SearchResultHydrator { - - @Override - Map hydrateResult(Cache cacheView, Map result, String id) { - CacheData sa = cacheView.get(Keys.Namespace.SERVICE_ACCOUNTS.ns, id) - return result + [ - name: sa.attributes.name as String, - namespace: sa.attributes.namespace as String - ] - } - } - - private static class ConfigMapResultHydrator implements SearchableProvider.SearchResultHydrator { - - @Override - Map hydrateResult(Cache cacheView, Map result, String id) { - CacheData cm = cacheView.get(Keys.Namespace.CONFIG_MAPS.ns, id) - return result + [ - name: cm.attributes.name as String, - namespace: cm.attributes.namespace as String - ] - } - } - - private static class SecretResultHydrator implements SearchableProvider.SearchResultHydrator { - - @Override - Map hydrateResult(Cache cacheView, Map result, String id) { - CacheData secret = cacheView.get(Keys.Namespace.SECRETS.ns, id) - return result + [ - name: secret.attributes.name as String, - namespace: secret.attributes.namespace as String - ] - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/KubernetesV1ProviderConfig.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/KubernetesV1ProviderConfig.groovy deleted file mode 100644 index a1e92bdf515..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/KubernetesV1ProviderConfig.groovy +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider - -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.cats.thread.NamedThreadFactory -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent.KubernetesV1CachingAgentDispatcher -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import com.netflix.spinnaker.clouddriver.security.ProviderVersion -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope - -import java.util.concurrent.ConcurrentHashMap -import java.util.concurrent.Executors -import java.util.concurrent.ScheduledExecutorService -import java.util.concurrent.TimeUnit - -@Configuration -@Slf4j -class KubernetesV1ProviderConfig implements Runnable { - @Bean - @DependsOn('kubernetesNamedAccountCredentials') - KubernetesV1Provider kubernetesV1Provider(KubernetesCloudProvider kubernetesCloudProvider, - AccountCredentialsRepository accountCredentialsRepository, - KubernetesV1CachingAgentDispatcher kubernetesV1CachingAgentDispatcher) { - this.kubernetesV1Provider = new KubernetesV1Provider(kubernetesCloudProvider, Collections.newSetFromMap(new ConcurrentHashMap())) - this.kubernetesCloudProvider = kubernetesCloudProvider - this.accountCredentialsRepository = accountCredentialsRepository - this.kubernetesV1CachingAgentDispatcher = kubernetesV1CachingAgentDispatcher - - ScheduledExecutorService poller = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(KubernetesV1ProviderConfig.class.getSimpleName())) - - poller.scheduleAtFixedRate(this, 0, 30, TimeUnit.SECONDS) - - kubernetesV1Provider - } - - private KubernetesV1Provider kubernetesV1Provider - private KubernetesCloudProvider kubernetesCloudProvider - private AccountCredentialsRepository accountCredentialsRepository - private KubernetesV1CachingAgentDispatcher kubernetesV1CachingAgentDispatcher - - @Bean - KubernetesV1ProviderSynchronizerTypeWrapper kubernetesV1ProviderSynchronizerTypeWrapper() { - new KubernetesV1ProviderSynchronizerTypeWrapper() - } - - @Override - void run() { - synchronizeKubernetesV1Provider(kubernetesV1Provider, accountCredentialsRepository) - } - - class KubernetesV1ProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return KubernetesV1ProviderSynchronizer - } - } - - class KubernetesV1ProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - KubernetesV1ProviderSynchronizer synchronizeKubernetesV1Provider(KubernetesV1Provider kubernetesV1Provider, - AccountCredentialsRepository accountCredentialsRepository) { - def allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, KubernetesNamedAccountCredentials, ProviderVersion.v1) - - kubernetesV1Provider.agents.clear() - - for (KubernetesNamedAccountCredentials credentials : allAccounts) { - def newlyAddedAgents = kubernetesV1CachingAgentDispatcher.buildAllCachingAgents(credentials) - - log.info "Adding ${newlyAddedAgents.size()} agents for account ${credentials.name}" - - // If there is an agent scheduler, then this provider has been through the AgentController in the past. - // In that case, we need to do the scheduling here (because accounts have been added to a running system). - if (kubernetesV1Provider.agentScheduler) { - ProviderUtils.rescheduleAgents(kubernetesV1Provider, newlyAddedAgents) - } - - kubernetesV1Provider.agents.addAll(newlyAddedAgents) - } - - new KubernetesV1ProviderSynchronizer() - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesConfigMapCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesConfigMapCachingAgent.groovy deleted file mode 100644 index e7a4e527efe..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesConfigMapCachingAgent.groovy +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2017 Skuid, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.ConfigMap - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE - -@Slf4j -class KubernetesConfigMapCachingAgent extends KubernetesV1CachingAgent { - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.CONFIG_MAPS.ns), - ] as Set) - - KubernetesConfigMapCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - } - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - log.info("Loading config maps in $agentType") - reloadNamespaces() - - def configMaps = namespaces.collect { String namespace -> - credentials.apiAdaptor.getConfigMaps(namespace) - }.flatten() - - buildCacheResult(configMaps) - } - - private CacheResult buildCacheResult(List configMaps) { - log.info("Describing items in ${agentType}") - - Map cachedConfigMaps = MutableCacheData.mutableCacheMap() - - for (ConfigMap cm : configMaps) { - if (!cm) { - continue - } - - def key = Keys.getConfigMapKey(accountName, cm.metadata.namespace, cm.metadata.name) - - cachedConfigMaps[key].with { - attributes.name = cm.metadata.name - attributes.namespace = cm.metadata.namespace - } - - } - - log.info("Caching ${cachedConfigMaps.size()} configmaps in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.CONFIG_MAPS.ns): cachedConfigMaps.values(), - ], [:]) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesControllersCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesControllersCachingAgent.groovy deleted file mode 100644 index 7464756028b..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesControllersCachingAgent.groovy +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Copyright 2017 Cisco, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.frigga.Names -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1ServerGroup -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.Event -import io.kubernetes.client.models.V1PodList -import io.kubernetes.client.models.V1beta1DaemonSet -import io.kubernetes.client.models.V1beta1StatefulSet - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE - -@Slf4j -class KubernetesControllersCachingAgent extends KubernetesV1CachingAgent implements OnDemandAgent { - final String category = 'serverGroup' - final OnDemandMetricsSupport metricsSupport - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.APPLICATIONS.ns), - AUTHORITATIVE.forType(Keys.Namespace.CLUSTERS.ns), - INFORMATIVE.forType(Keys.Namespace.LOAD_BALANCERS.ns), - AUTHORITATIVE.forType(Keys.Namespace.SERVER_GROUPS.ns), - INFORMATIVE.forType(Keys.Namespace.INSTANCES.ns), - ] as Set) - - KubernetesControllersCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "$KubernetesCloudProvider.ID:$OnDemandAgent.OnDemandType.ServerGroup") - } - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - String getOnDemandAgentType() { - "${getAgentType()}-OnDemand" - } - - @Override - OnDemandMetricsSupport getMetricsSupport() { - return null - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - OnDemandAgent.OnDemandType.ServerGroup == type && cloudProvider == KubernetesCloudProvider.ID - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - if (!data.containsKey("serverGroupName")) { - return null - } - - if (data.account != accountName) { - return null - } - - reloadNamespaces() - String namespace = data.region - if (!namespaces.contains(namespace)) { - return null - } - - def serverGroupName = data.serverGroupName.toString() - - V1beta1StatefulSet statefulSet = metricsSupport.readData { - loadStatefulSet(namespace, serverGroupName) - } - - V1beta1DaemonSet daemonSet = metricsSupport.readData { - loadDaemonSet(namespace, serverGroupName) - } - - CacheResult result = metricsSupport.transformData { - buildCacheResult([new KubernetesController(statefulController: statefulSet, daemonController: daemonSet)], [:], [], Long.MAX_VALUE) - } - - def jsonResult = objectMapper.writeValueAsString(result.cacheResults) - boolean isControllerSetCachingAgentType = true - if (result.cacheResults.values().flatten().isEmpty()) { - - // Determine if this is the correct agent to delete cache which can avoid double deletion - CacheData serverGroup = providerCache.get(Keys.Namespace.SERVER_GROUPS.ns, Keys.getServerGroupKey(accountName, namespace, serverGroupName)) - - if (serverGroup) { - String kind = serverGroup.attributes?.get("serverGroup")?.get("kind") - if (kind == "StatefulSet" || kind == "DaemonSet") { - // Avoid writing an empty onDemand cache record (instead delete any that may have previously existed). - providerCache.evictDeletedItems(Keys.Namespace.ON_DEMAND.ns, [Keys.getServerGroupKey(accountName, namespace, serverGroupName)]) - }else{ - isControllerSetCachingAgentType = false - } - } - } else { - metricsSupport.onDemandStore { - def cacheData = new DefaultCacheData( - Keys.getServerGroupKey(accountName, namespace, serverGroupName), - 10 * 60, // ttl is 10 minutes - [ - cacheTime: System.currentTimeMillis(), - cacheResults: jsonResult, - processedCount: 0, - processedTime: null - ], - [:] - ) - providerCache.putCacheData(Keys.Namespace.ON_DEMAND.ns, cacheData) - } - } - - // Evict this server group if it no longer exists. - Map> evictions - if (isControllerSetCachingAgentType) { - evictions = statefulSet || daemonSet ? [:] : [ - (Keys.Namespace.SERVER_GROUPS.ns): [ - Keys.getServerGroupKey(accountName, namespace, serverGroupName) - ] - ] - } - - log.info("On demand cache refresh (data: ${data}) succeeded.") - - return new OnDemandAgent.OnDemandResult( - sourceAgentType: getOnDemandAgentType(), - cacheResult: result, - evictions: evictions - ) - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - def keys = providerCache.getIdentifiers(Keys.Namespace.ON_DEMAND.ns) - keys = keys.findResults { - def parse = Keys.parse(it) - if (parse && namespaces.contains(parse.namespace) && parse.account == accountName) { - return it - } else { - return null - } - } - - def keyCount = keys.size() - def be = keyCount == 1 ? "is" : "are" - def pluralize = keyCount == 1 ? "" : "s" - log.info("There $be $keyCount pending on demand request$pluralize") - - providerCache.getAll(Keys.Namespace.ON_DEMAND.ns, keys).collect { - def details = Keys.parse(it.id) - - return [ - details : details, - moniker : convertOnDemandDetails(details), - cacheTime : it.attributes.cacheTime, - processedCount: it.attributes.processedCount, - processedTime : it.attributes.processedTime - ] - } - } - - /** - * Triggered by an AgentScheduler to tell this Agent to load its data. - * - * @param providerCache Cache associated with this Agent's provider - * @return the complete set of data for this Agent. - */ - @Override - CacheResult loadData(ProviderCache providerCache) { - reloadNamespaces() - Long start = System.currentTimeMillis() - List statefulSet = loadStatefulSets() - List daemonSet = loadDaemonSets() - List serverGroups = (statefulSet.collect { - it ? new KubernetesController(statefulController: it) : null - }+ daemonSet.collect { - it ? new KubernetesController(daemonController: it) : null - } - ) - null - List evictFromOnDemand = [] - List keepInOnDemand = [] - providerCache.getAll(Keys.Namespace.ON_DEMAND.ns, - serverGroups.collect { serverGroup -> - Keys.getServerGroupKey(accountName, serverGroup.namespace, serverGroup.name) - }) - .each { CacheData onDemandEntry -> - // Ensure that we don't overwrite data that was inserted by the `handle` method while we retrieved the - // replication controllers. Furthermore, cache data that hasn't been processed needs to be updated in the ON_DEMAND - // cache, so don't evict data without a processedCount > 0. - if (onDemandEntry.attributes.cacheTime < start && onDemandEntry.attributes.processedCount > 0) { - evictFromOnDemand << onDemandEntry - } else { - keepInOnDemand << onDemandEntry - } - } - - def result = buildCacheResult(serverGroups, keepInOnDemand.collectEntries { CacheData onDemandEntry -> - [(onDemandEntry.id): onDemandEntry] - }, evictFromOnDemand*.id, start) - - result.cacheResults[Keys.Namespace.ON_DEMAND.ns].each { CacheData onDemandEntry -> - onDemandEntry.attributes.processedTime = System.currentTimeMillis() - onDemandEntry.attributes.processedCount = (onDemandEntry.attributes.processedCount ?: 0) + 1 - } - - return result - } - - List loadStatefulSets() { - namespaces.collect { String namespace -> - credentials.apiClientAdaptor.getStatefulSets(namespace) - }.flatten() - } - - List loadDaemonSets() { - namespaces.collect { String namespace -> - credentials.apiClientAdaptor.getDaemonSets(namespace) - }.flatten() - } - - V1PodList loadPods(KubernetesController serverGroup) { - credentials.apiClientAdaptor.getPods(serverGroup.namespace, serverGroup.selector) - } - - V1beta1StatefulSet loadStatefulSet(String namespace, String name) { - credentials.apiClientAdaptor.getStatefulSet(name, namespace) - } - - V1beta1DaemonSet loadDaemonSet(String namespace, String name) { - credentials.apiClientAdaptor.getDaemonSet(name, namespace) - } - - private CacheResult buildCacheResult(List serverGroups, Map onDemandKeep, List onDemandEvict, Long start) { - log.info("Describing items in ${agentType}") - - Map cachedApplications = MutableCacheData.mutableCacheMap() - Map cachedClusters = MutableCacheData.mutableCacheMap() - Map cachedServerGroups = MutableCacheData.mutableCacheMap() - Map cachedInstances = MutableCacheData.mutableCacheMap() - Map cachedLoadBalancers = MutableCacheData.mutableCacheMap() - - Map> stateFulsetEvents = [:].withDefault { _ -> [:] } - Map> daemonsetEvents = [:].withDefault { _ -> [:] } - - try { - namespaces.each { String namespace -> - stateFulsetEvents[namespace] = credentials.apiAdaptor.getEvents(namespace, "V1beta1StatefulSet") - daemonsetEvents[namespace] = credentials.apiAdaptor.getEvents(namespace, "V1beta1DaemonSet") - } - } catch (Exception e) { - log.warn "Failure fetching events for all server groups in $namespaces", e - } - - for (KubernetesController serverGroup: serverGroups) { - if (!serverGroup.exists()) { - continue - } - - def onDemandData = onDemandKeep ? onDemandKeep[Keys.getServerGroupKey(accountName, serverGroup.namespace, serverGroup.name)] : null - - if (onDemandData && onDemandData.attributes.cacheTime >= start) { - Map> cacheResults = objectMapper.readValue(onDemandData.attributes.cacheResults as String, - new TypeReference>>() { }) - cache(cacheResults, Keys.Namespace.APPLICATIONS.ns, cachedApplications) - cache(cacheResults, Keys.Namespace.CLUSTERS.ns, cachedClusters) - cache(cacheResults, Keys.Namespace.SERVER_GROUPS.ns, cachedServerGroups) - cache(cacheResults, Keys.Namespace.INSTANCES.ns, cachedInstances) - } else { - def serverGroupName = serverGroup.name - def pods = loadPods(serverGroup) - def names = Names.parseName(serverGroupName) - def applicationName = names.app - def clusterName = names.cluster - def serverGroupKey = Keys.getServerGroupKey(accountName, serverGroup.namespace, serverGroupName) - def applicationKey = Keys.getApplicationKey(applicationName) - def clusterKey = Keys.getClusterKey(accountName, applicationName, category, clusterName) - def instanceKeys = [] - def loadBalancerKeys = serverGroup.loadBalancers.collect({ - Keys.getLoadBalancerKey(accountName, serverGroup.namespace, it) - }) - cachedApplications[applicationKey].with { - attributes.name = applicationName - relationships[Keys.Namespace.CLUSTERS.ns].add(clusterKey) - relationships[Keys.Namespace.SERVER_GROUPS.ns].add(serverGroupKey) - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - } - - cachedClusters[clusterKey].with { - attributes.name = clusterName - relationships[Keys.Namespace.APPLICATIONS.ns].add(applicationKey) - relationships[Keys.Namespace.SERVER_GROUPS.ns].add(serverGroupKey) - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - } - pods?.getItems().forEach { pod -> - def key = Keys.getInstanceKey(accountName, pod.metadata.namespace, pod.metadata.name) - instanceKeys << key - cachedInstances[key].with { - relationships[Keys.Namespace.APPLICATIONS.ns].add(applicationKey) - relationships[Keys.Namespace.CLUSTERS.ns].add(clusterKey) - relationships[Keys.Namespace.SERVER_GROUPS.ns].add(serverGroupKey) - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - } - } - boolean isDaemonset - cachedServerGroups[serverGroupKey].with { - def events = null - attributes.name = serverGroupName - - if (serverGroup.statefulController instanceof V1beta1StatefulSet) { - events = stateFulsetEvents[serverGroup.namespace][serverGroupName] - } else if (serverGroup.daemonController instanceof V1beta1DaemonSet) { - events = daemonsetEvents[serverGroup.namespace][serverGroupName] - isDaemonset = true - } - attributes.serverGroup = new KubernetesV1ServerGroup(serverGroup.statefulController ?: serverGroup.daemonController, accountName, events) - if (isDaemonset) { - attributes.serverGroup.replicas = pods?.getItems().size() - } - - relationships[Keys.Namespace.APPLICATIONS.ns].add(applicationKey) - relationships[Keys.Namespace.CLUSTERS.ns].add(clusterKey) - relationships[Keys.Namespace.INSTANCES.ns].addAll(instanceKeys) - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - } - } - } - - log.info("Caching ${cachedApplications.size()} applications in ${agentType}") - log.info("Caching ${cachedClusters.size()} clusters in ${agentType}") - log.info("Caching ${cachedServerGroups.size()} server groups in ${agentType}") - log.info("Caching ${cachedInstances.size()} instances in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.APPLICATIONS.ns): cachedApplications.values(), - (Keys.Namespace.LOAD_BALANCERS.ns): cachedLoadBalancers.values(), - (Keys.Namespace.CLUSTERS.ns): cachedClusters.values(), - (Keys.Namespace.SERVER_GROUPS.ns): cachedServerGroups.values(), - (Keys.Namespace.INSTANCES.ns): cachedInstances.values(), - (Keys.Namespace.ON_DEMAND.ns): onDemandKeep.values() - ],[ - (Keys.Namespace.ON_DEMAND.ns): onDemandEvict, - ]) - - } - - private static void cache(Map> cacheResults, String cacheNamespace, Map cacheDataById) { - cacheResults[cacheNamespace].each { - def existingCacheData = cacheDataById[it.id] - if (existingCacheData) { - existingCacheData.attributes.putAll(it.attributes) - it.relationships.each { String relationshipName, Collection relationships -> - existingCacheData.relationships[relationshipName].addAll(relationships) - } - } else { - cacheDataById[it.id] = it - } - } - } - - static class KubernetesController{ - def statefulController - def daemonController - - String getName() { - statefulController ? statefulController.metadata.name : daemonController.metadata.name - } - - String getNamespace() { - statefulController ? statefulController.metadata.namespace : daemonController.metadata.namespace - } - - Map getSelector() { - statefulController ? statefulController.spec.selector?.matchLabels : daemonController.spec.selector?.matchLabels - } - - boolean exists() { - statefulController ?: daemonController - } - - List getLoadBalancers() { - statefulController ? KubernetesUtil.getLoadBalancers(statefulController.spec?.template?.metadata?.labels ?: [:]) : - KubernetesUtil.getLoadBalancers(daemonController.spec?.template?.metadata?.labels ?: [:]) - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesDeploymentCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesDeploymentCachingAgent.groovy deleted file mode 100644 index 6f7ee19c5f8..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesDeploymentCachingAgent.groovy +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.apps.Deployment -import io.fabric8.kubernetes.api.model.apps.ReplicaSet - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE - -@Slf4j -class KubernetesDeploymentCachingAgent extends KubernetesV1CachingAgent { - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.DEPLOYMENTS.ns), - INFORMATIVE.forType(Keys.Namespace.SERVER_GROUPS.ns), - ] as Set) - - KubernetesDeploymentCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - } - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - log.info("Loading deployemnts in $agentType") - reloadNamespaces() - - def deployments = namespaces.collect { String namespace -> - credentials.apiAdaptor.getDeployments(namespace) - }.flatten() - - buildCacheResult(deployments) - } - - private CacheResult buildCacheResult(List deployments) { - log.info("Describing items in ${agentType}") - - Map cachedDeployments = MutableCacheData.mutableCacheMap() - Map cachedReplicaSets = MutableCacheData.mutableCacheMap() - - for (Deployment deployment: deployments) { - if (!deployment) { - continue - } - - def namespace = deployment.metadata.namespace - def name = deployment.metadata.name - - // TODO(lwander) examine to see if this is a performance bottleneck at scale - def replicaSetKeys = credentials.apiAdaptor.getReplicaSets(namespace, [(name): "true"]).collect { ReplicaSet replicaSet -> - Keys.getServerGroupKey(accountName, namespace, replicaSet.metadata.name) - } - - def key = Keys.getDeploymentKey(accountName, namespace, name) - - replicaSetKeys.each { String rskey -> - cachedReplicaSets[rskey].with { - relationships[Keys.Namespace.DEPLOYMENTS.ns] = [key] - } - } - - cachedDeployments[key].with { - attributes.name = deployment.metadata.name - attributes.deployment = deployment - relationships[Keys.Namespace.SERVER_GROUPS.ns] = replicaSetKeys - } - } - - log.info("Caching ${cachedDeployments.size()} deployments in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.DEPLOYMENTS.ns): cachedDeployments.values(), - (Keys.Namespace.SERVER_GROUPS.ns): cachedReplicaSets.values(), - ], [:]) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesInstanceCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesInstanceCachingAgent.groovy deleted file mode 100644 index 5e1ada53e6d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesInstanceCachingAgent.groovy +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1Instance -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.Event -import io.fabric8.kubernetes.api.model.Pod - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE - -@Slf4j -class KubernetesInstanceCachingAgent extends KubernetesV1CachingAgent { - static final String CACHE_TTL_ANNOTATION = "cache.spinnaker.io/ttl" - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.INSTANCES.ns), - ] as Set) - - KubernetesInstanceCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - } - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - log.info("Loading pods in $agentType") - reloadNamespaces() - - def pods = namespaces.collect { String namespace -> - credentials.apiAdaptor.getPods(namespace) - }.flatten() - - buildCacheResult(pods) - } - - private CacheResult buildCacheResult(List pods) { - log.info("Describing items in ${agentType}") - - Map cachedInstances = MutableCacheData.mutableCacheMap() - - Map>> podEvents = [:].withDefault { _ -> [:] } - try { - namespaces.each { String namespace -> - podEvents[namespace] = credentials.apiAdaptor.getEvents(namespace, "Pod") - } - } catch (Exception e) { - log.warn "Failure fetching events for all pods in $namespaces", e - } - - for (Pod pod : pods) { - if (!pod) { - continue - } - - def events = podEvents[pod.metadata.namespace][pod.metadata.name] ?: [] - - def key = Keys.getInstanceKey(accountName, pod.metadata.namespace, pod.metadata.name) - cachedInstances[key].with { - if (pod.metadata?.annotations?.containsKey(CACHE_TTL_ANNOTATION)) { - attributes.cacheExpiry = pod.metadata.annotations[CACHE_TTL_ANNOTATION] - } - attributes.name = pod.metadata.name - attributes.instance = new KubernetesV1Instance(pod, events) - } - - } - - log.info("Caching ${cachedInstances.size()} instances in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.INSTANCES.ns): cachedInstances.values(), - ], [:]) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesLoadBalancerCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesLoadBalancerCachingAgent.groovy deleted file mode 100644 index 12976b9423d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesLoadBalancerCachingAgent.groovy +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.Service - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE - -@Slf4j -class KubernetesLoadBalancerCachingAgent extends KubernetesV1CachingAgent implements OnDemandAgent { - - final OnDemandMetricsSupport metricsSupport - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.LOAD_BALANCERS.ns), - INFORMATIVE.forType(Keys.Namespace.INSTANCES.ns), - ] as Set) - - KubernetesLoadBalancerCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "$KubernetesCloudProvider.ID:$OnDemandAgent.OnDemandType.LoadBalancer") - } - - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - String getOnDemandAgentType() { - "${getAgentType()}-OnDemand" - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - if (!data.containsKey("loadBalancerName")) { - return null - } - - if (data.account != accountName) { - return null - } - - reloadNamespaces() - String namespace = data.region - if (this.namespaces.contains(namespace)) { - return null - } - - def loadBalancerName = data.loadBalancerName.toString() - - Service service = metricsSupport.readData { - loadService(namespace, loadBalancerName) - } - - CacheResult result = metricsSupport.transformData { - buildCacheResult([service], [:], [], Long.MAX_VALUE) - } - - def jsonResult = objectMapper.writeValueAsString(result.cacheResults) - - if (result.cacheResults.values().flatten().isEmpty()) { - // Avoid writing an empty onDemand cache record (instead delete any that may have previously existed). - providerCache.evictDeletedItems(Keys.Namespace.ON_DEMAND.ns, [Keys.getLoadBalancerKey(accountName, namespace, loadBalancerName)]) - } else { - metricsSupport.onDemandStore { - def cacheData = new DefaultCacheData( - Keys.getLoadBalancerKey(accountName, namespace, loadBalancerName), - 10 * 60, // ttl is 10 minutes - [ - cacheTime: System.currentTimeMillis(), - cacheResults: jsonResult, - processedCount: 0, - processedTime: null - ], - [:] - ) - - providerCache.putCacheData(Keys.Namespace.ON_DEMAND.ns, cacheData) - } - } - - // Evict this load balancer if it no longer exists. - Map> evictions = service ? [:] : [ - (Keys.Namespace.LOAD_BALANCERS.ns): [ - Keys.getLoadBalancerKey(accountName, namespace, loadBalancerName) - ] - ] - - log.info("On demand cache refresh (data: ${data}) succeeded.") - - return new OnDemandAgent.OnDemandResult( - sourceAgentType: getOnDemandAgentType(), - cacheResult: result, - evictions: evictions - ) - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - def keys = providerCache.getIdentifiers(Keys.Namespace.ON_DEMAND.ns) - keys = keys.findResults { - def parse = Keys.parse(it) - if (parse && namespaces.contains(parse.namespace) && parse.account == accountName) { - return it - } else { - return null - } - } - - providerCache.getAll(Keys.Namespace.ON_DEMAND.ns, keys).collect { - def details = Keys.parse(it.id) - - [ - details : details, - moniker : convertOnDemandDetails(details), - cacheTime : it.attributes.cacheTime, - processedCount: it.attributes.processedCount, - processedTime : it.attributes.processedTime - ] - } - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - OnDemandAgent.OnDemandType.LoadBalancer == type && cloudProvider == KubernetesCloudProvider.ID - } - - List loadServices() { - namespaces.collect { String namespace -> - credentials.apiAdaptor.getServices(namespace) - }.flatten() - null - } - - Service loadService(String namespace, String name) { - credentials.apiAdaptor.getService(namespace, name) - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - Long start = System.currentTimeMillis() - List services = loadServices() - - def evictFromOnDemand = [] - def keepInOnDemand = [] - - providerCache.getAll(Keys.Namespace.ON_DEMAND.ns, - services.collect { Keys.getLoadBalancerKey(accountName, it.metadata.namespace, it.metadata.name) }).each { - // Ensure that we don't overwrite data that was inserted by the `handle` method while we retrieved the - // replication controllers. Furthermore, cache data that hasn't been processed needs to be updated in the ON_DEMAND - // cache, so don't evict data without a processedCount > 0. - if (it.attributes.cacheTime < start && it.attributes.processedCount > 0) { - evictFromOnDemand << it - } else { - keepInOnDemand << it - } - } - - def result = buildCacheResult(services, keepInOnDemand.collectEntries { [(it.id): it] }, evictFromOnDemand*.id, start) - - result.cacheResults[Keys.Namespace.ON_DEMAND.ns].each { - it.attributes.processedTime = System.currentTimeMillis() - it.attributes.processedCount = (it.attributes.processedCount ?: 0) + 1 - } - - return result - } - - private static void cache(Map> cacheResults, String namespace, Map cacheDataById) { - cacheResults[namespace].each { - def existingCacheData = cacheDataById[it.id] - if (!existingCacheData) { - cacheDataById[it.id] = it - } else { - existingCacheData.attributes.putAll(it.attributes) - it.relationships.each { String relationshipName, Collection relationships -> - existingCacheData.relationships[relationshipName].addAll(relationships) - } - } - } - } - - private CacheResult buildCacheResult(List services, Map onDemandKeep, List onDemandEvict, Long start) { - log.info("Describing items in ${agentType}") - - Map cachedLoadBalancers = MutableCacheData.mutableCacheMap() - - for (Service service : services) { - if (!service) { - continue - } - - def namespace = service.metadata.namespace - def onDemandData = onDemandKeep ? onDemandKeep[Keys.getLoadBalancerKey(accountName, namespace, service.metadata.name)] : null - - if (onDemandData && onDemandData.attributes.cachetime >= start) { - Map> cacheResults = objectMapper.readValue(onDemandData.attributes.cacheResults as String, new TypeReference>>() { }) - cache(cacheResults, Keys.Namespace.LOAD_BALANCERS.ns, cachedLoadBalancers) - } else { - def serviceName = service.metadata.name - def loadBalancerKey = Keys.getLoadBalancerKey(accountName, namespace, serviceName) - - cachedLoadBalancers[loadBalancerKey].with { - attributes.name = serviceName - attributes.service = service - // Relationships are stored in KubernetesServerGroupCachingAgent. - } - } - } - - log.info("Caching ${cachedLoadBalancers.size()} load balancers in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.LOAD_BALANCERS.ns): cachedLoadBalancers.values(), - (Keys.Namespace.ON_DEMAND.ns): onDemandKeep.values() - ],[ - (Keys.Namespace.ON_DEMAND.ns): onDemandEvict, - ]) - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesSecretCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesSecretCachingAgent.groovy deleted file mode 100644 index 4322e077587..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesSecretCachingAgent.groovy +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2017 Skuid, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.Secret - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE - -@Slf4j -class KubernetesSecretCachingAgent extends KubernetesV1CachingAgent { - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.SECRETS.ns), - ] as Set) - - KubernetesSecretCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - } - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - log.info("Loading secrets in $agentType") - reloadNamespaces() - - def secrets = namespaces.collect { String namespace -> - credentials.apiAdaptor.getSecrets(namespace) - }.flatten() - - buildCacheResult(secrets) - } - - private CacheResult buildCacheResult(List secrets) { - log.info("Describing items in ${agentType}") - - Map cachedSecrets = MutableCacheData.mutableCacheMap() - - for (Secret secret : secrets) { - if (!secret) { - continue - } - - def key = Keys.getSecretKey(accountName, secret.metadata.namespace, secret.metadata.name) - - cachedSecrets[key].with { - attributes.name = secret.metadata.name - attributes.namespace = secret.metadata.namespace - } - - } - - log.info("Caching ${cachedSecrets.size()} secrets in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.SECRETS.ns): cachedSecrets.values(), - ], [:]) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesSecurityGroupCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesSecurityGroupCachingAgent.groovy deleted file mode 100644 index 522bee87dfc..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesSecurityGroupCachingAgent.groovy +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.extensions.Ingress - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE - -@Slf4j -class KubernetesSecurityGroupCachingAgent extends KubernetesV1CachingAgent implements OnDemandAgent { - - private static final OnDemandAgent.OnDemandType ON_DEMAND_TYPE = OnDemandAgent.OnDemandType.SecurityGroup - - final OnDemandMetricsSupport metricsSupport - - static final Set types = Collections.unmodifiableSet([ - INFORMATIVE.forType(Keys.Namespace.LOAD_BALANCERS.ns), - AUTHORITATIVE.forType(Keys.Namespace.SECURITY_GROUPS.ns), - ] as Set) - - KubernetesSecurityGroupCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "$KubernetesCloudProvider.ID:$ON_DEMAND_TYPE") - } - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - String getOnDemandAgentType() { - "${getAgentType()}-OnDemand" - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - if (!data.containsKey("securityGroupName")) { - return null - } - - if (data.account != accountName) { - return null - } - - reloadNamespaces() - String namespace = data.region - if (namespaces.contains(namespace)) { - return null - } - - def securityGroupName = data.securityGroupName.toString() - - Ingress ingress = metricsSupport.readData { - credentials.apiAdaptor.getIngress(namespace, securityGroupName) - } - - CacheResult result = metricsSupport.transformData { - buildCacheResult([ingress], [:], [], Long.MAX_VALUE) - } - - def jsonResult = objectMapper.writeValueAsString(result.cacheResults) - - if (result.cacheResults.values().flatten().isEmpty()) { - // Avoid writing an empty onDemand cache record (instead delete any that may have previously existed). - providerCache.evictDeletedItems(Keys.Namespace.ON_DEMAND.ns, [Keys.getSecurityGroupKey(accountName, namespace, securityGroupName)]) - } else { - metricsSupport.onDemandStore { - def cacheData = new DefaultCacheData( - Keys.getSecurityGroupKey(accountName, namespace, securityGroupName), - 10 * 60, // ttl is 10 minutes - [ - cacheTime: System.currentTimeMillis(), - cacheResults: jsonResult, - processedCount: 0, - processedTime: null - ], - [:] - ) - - providerCache.putCacheData(Keys.Namespace.ON_DEMAND.ns, cacheData) - } - } - - // Evict this security group if it no longer exists. - Map> evictions = ingress ? [:] : [ - (Keys.Namespace.SECURITY_GROUPS.ns): [ - Keys.getSecurityGroupKey(accountName, namespace, securityGroupName) - ] - ] - - log.info("On demand cache refresh (data: ${data}) succeeded.") - - return new OnDemandAgent.OnDemandResult( - sourceAgentType: getOnDemandAgentType(), - cacheResult: result, - evictions: evictions - ) - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - def keys = providerCache.getIdentifiers(Keys.Namespace.ON_DEMAND.ns) - keys = keys.findResults { - def parse = Keys.parse(it) - if (parse && namespaces.contains(parse.namespace) && parse.account == accountName) { - return it - } else { - return null - } - } - - providerCache.getAll(Keys.Namespace.ON_DEMAND.ns, keys).collect { - def details = Keys.parse(it.id) - - return [ - details : details, - moniker : convertOnDemandDetails(details), - cacheTime : it.attributes.cacheTime, - processedCount: it.attributes.processedCount, - processedTime : it.attributes.processedTime - ] - } - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - ON_DEMAND_TYPE == type && cloudProvider == KubernetesCloudProvider.ID - } - - List loadIngresses() { - namespaces.collect { String namespace -> - credentials.apiAdaptor.getIngresses(namespace) - }.flatten() - null - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - Long start = System.currentTimeMillis() - List ingresses = loadIngresses() - - def evictFromOnDemand = [] - def keepInOnDemand = [] - - providerCache.getAll(Keys.Namespace.ON_DEMAND.ns, - ingresses.collect { Keys.getSecurityGroupKey(accountName, it.metadata.namespace, it.metadata.name) }).each { - // Ensure that we don't overwrite data that was inserted by the `handle` method while we retrieved the - // replication controllers. Furthermore, cache data that hasn't been processed needs to be updated in the ON_DEMAND - // cache, so don't evict data without a processedCount > 0. - if (it.attributes.cacheTime < start && it.attributes.processedCount > 0) { - evictFromOnDemand << it - } else { - keepInOnDemand << it - } - } - - def result = buildCacheResult(ingresses, keepInOnDemand.collectEntries { [(it.id): it] }, evictFromOnDemand*.id, start) - - result.cacheResults[Keys.Namespace.ON_DEMAND.ns].each { - it.attributes.processedTime = System.currentTimeMillis() - it.attributes.processedCount = (it.attributes.processedCount ?: 0) + 1 - } - - return result - } - - private static void cache(Map> cacheResults, String namespace, Map cacheDataById) { - cacheResults[namespace].each { - def existingCacheData = cacheDataById[it.id] - if (!existingCacheData) { - cacheDataById[it.id] = it - } else { - existingCacheData.attributes.putAll(it.attributes) - it.relationships.each { String relationshipName, Collection relationships -> - existingCacheData.relationships[relationshipName].addAll(relationships) - } - } - } - } - - private CacheResult buildCacheResult(List ingresses, Map onDemandKeep, List onDemandEvict, Long start) { - log.info("Describing items in ${agentType}") - - Map cachedSecurityGroups = MutableCacheData.mutableCacheMap() - Map cachedLoadBalancers = MutableCacheData.mutableCacheMap() - - for (Ingress ingress : ingresses) { - if (!ingress) { - continue - } - - def namespace = ingress.metadata.namespace - - def onDemandData = onDemandKeep ? onDemandKeep[Keys.getSecurityGroupKey(accountName, namespace, ingress.metadata.name)] : null - - if (onDemandData && onDemandData.attributes.cachetime >= start) { - Map> cacheResults = objectMapper.readValue(onDemandData.attributes.cacheResults as String, - new TypeReference>>() { }) - cache(cacheResults, Keys.Namespace.SECURITY_GROUPS.ns, cachedSecurityGroups) - } else { - def ingressName = ingress.metadata.name - def securityGroupKey = Keys.getSecurityGroupKey(accountName, namespace, ingressName) - - List loadBalancerKeys = ingress.spec.backend?.serviceName ? - [Keys.getLoadBalancerKey(accountName, namespace, ingress.spec.backend.serviceName)] : [] - - loadBalancerKeys.addAll(ingress.spec.rules?.findResults { rule -> - rule.http?.paths?.findResults { path -> - path?.backend?.serviceName ? Keys.getLoadBalancerKey(accountName, namespace, path.backend.serviceName) : null - } - }?.flatten() ?: []) - - cachedSecurityGroups[securityGroupKey].with { - attributes.name = ingressName - attributes.ingress = ingress - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - } - - loadBalancerKeys.each { - cachedLoadBalancers[it].with { - relationships[Keys.Namespace.SECURITY_GROUPS.ns].add(securityGroupKey) - } - } - } - } - - log.info("Caching ${cachedSecurityGroups.size()} security groups in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.SECURITY_GROUPS.ns): cachedSecurityGroups.values(), - (Keys.Namespace.LOAD_BALANCERS.ns): cachedLoadBalancers.values(), - (Keys.Namespace.ON_DEMAND.ns): onDemandKeep.values() - ],[ - (Keys.Namespace.ON_DEMAND.ns): onDemandEvict, - ]) - - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServerGroupCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServerGroupCachingAgent.groovy deleted file mode 100644 index a807553d00a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServerGroupCachingAgent.groovy +++ /dev/null @@ -1,480 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.frigga.Names -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1ServerGroup -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.Event -import io.fabric8.kubernetes.api.model.HorizontalPodAutoscaler -import io.fabric8.kubernetes.api.model.Pod -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.apps.ReplicaSet - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE - -@Slf4j -class KubernetesServerGroupCachingAgent extends KubernetesV1CachingAgent implements OnDemandAgent { - final String category = 'serverGroup' - - final OnDemandMetricsSupport metricsSupport - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.APPLICATIONS.ns), - AUTHORITATIVE.forType(Keys.Namespace.CLUSTERS.ns), - INFORMATIVE.forType(Keys.Namespace.LOAD_BALANCERS.ns), - AUTHORITATIVE.forType(Keys.Namespace.SERVER_GROUPS.ns), - INFORMATIVE.forType(Keys.Namespace.INSTANCES.ns), - ] as Set) - - KubernetesServerGroupCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "$KubernetesCloudProvider.ID:$OnDemandAgent.OnDemandType.ServerGroup") - } - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - String getOnDemandAgentType() { - "${getAgentType()}-OnDemand" - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - if (!data.containsKey("serverGroupName")) { - return null - } - - if (data.account != accountName) { - return null - } - - reloadNamespaces() - String namespace = data.region - if (!namespaces.contains(namespace)) { - return null - } - - def serverGroupName = data.serverGroupName.toString() - - ReplicationController replicationController = metricsSupport.readData { - loadReplicationController(namespace, serverGroupName) - } - - ReplicaSet replicaSet = metricsSupport.readData { - loadReplicaSet(namespace, serverGroupName) - } - - CacheResult result = metricsSupport.transformData { - buildCacheResult([new ReplicaSetOrController(replicationController: replicationController, replicaSet: replicaSet)], [:], [], Long.MAX_VALUE) - } - - def jsonResult = objectMapper.writeValueAsString(result.cacheResults) - boolean isControllerSetCachingAgentType = false - if (result.cacheResults.values().flatten().isEmpty()) { - // Avoid writing an empty onDemand cache record (instead delete any that may have previously existed). - providerCache.evictDeletedItems(Keys.Namespace.ON_DEMAND.ns, [Keys.getServerGroupKey(accountName, namespace, serverGroupName)]) - // Determine if this is the correct agent to delete cache which can avoid double deletion - CacheData serverGroup = providerCache.get(Keys.Namespace.SERVER_GROUPS.ns, Keys.getServerGroupKey(accountName, namespace, serverGroupName)) - if (serverGroup) { - String kind = serverGroup.attributes?.get("serverGroup")?.get("kind") - if (kind == "StatefulSet" || kind == "DaemonSet") { - isControllerSetCachingAgentType = true - } - } - } else { - metricsSupport.onDemandStore { - def cacheData = new DefaultCacheData( - Keys.getServerGroupKey(accountName, namespace, serverGroupName), - 10 * 60, // ttl is 10 minutes - [ - cacheTime: System.currentTimeMillis(), - cacheResults: jsonResult, - processedCount: 0, - processedTime: null - ], - [:] - ) - - providerCache.putCacheData(Keys.Namespace.ON_DEMAND.ns, cacheData) - } - } - - // Evict this server group if it no longer exists. - Map> evictions - if (!isControllerSetCachingAgentType) { - evictions = replicationController || replicaSet ? [:] : [ - (Keys.Namespace.SERVER_GROUPS.ns): [ - Keys.getServerGroupKey(accountName, namespace, serverGroupName) - ] - ] - } - - - log.info("On demand cache refresh (data: ${data}) succeeded.") - - return new OnDemandAgent.OnDemandResult( - sourceAgentType: getOnDemandAgentType(), - cacheResult: result, - evictions: evictions - ) - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - def keys = providerCache.getIdentifiers(Keys.Namespace.ON_DEMAND.ns) - keys = keys.findResults { - def parse = Keys.parse(it) - if (parse && namespaces.contains(parse.namespace) && parse.account == accountName) { - return it - } else { - return null - } - } - - def keyCount = keys.size() - def be = keyCount == 1 ? "is" : "are" - def pluralize = keyCount == 1 ? "" : "s" - log.info("There $be $keyCount pending on demand request$pluralize") - - providerCache.getAll(Keys.Namespace.ON_DEMAND.ns, keys).collect { - def details = Keys.parse(it.id) - - return [ - details : details, - moniker : convertOnDemandDetails(details), - cacheTime : it.attributes.cacheTime, - processedCount: it.attributes.processedCount, - processedTime : it.attributes.processedTime - ] - } - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - OnDemandAgent.OnDemandType.ServerGroup == type && cloudProvider == KubernetesCloudProvider.ID - } - - List loadReplicationControllers() { - namespaces.collect { String namespace -> - credentials.apiAdaptor.getReplicationControllers(namespace) - }.flatten() - } - - List loadReplicaSets() { - namespaces.collect { String namespace -> - credentials.apiAdaptor.getReplicaSets(namespace) - }.flatten() - } - - ReplicaSet loadReplicaSet(String namespace, String name) { - credentials.apiAdaptor.getReplicaSet(namespace, name) - } - - ReplicationController loadReplicationController(String namespace, String name) { - credentials.apiAdaptor.getReplicationController(namespace, name) - } - - List loadPods(ReplicaSetOrController serverGroup) { - credentials.apiAdaptor.getPods(serverGroup.namespace, serverGroup.selector) - } - - /** - * loaddata() need to load all pod in providers. So we load all pod in one request. That will decrease - * network io overhead. - * @param namespace - * @return - */ - Map> loadAllPods() { - def podsMap = [:] - namespaces.each { String namespace -> - def pods = credentials.apiAdaptor.getPods(namespace) - if (pods){ - podsMap.put(namespace,pods) - } - } - return podsMap - } - - /** - * If this pod is belong to specify server group return true - * @return - */ - boolean isBelongToServerGroup(Map podLabel, Map serverGroupSelector) { - !serverGroupSelector.any { - def podLabelValue = podLabel.get(it.key) - if (!podLabelValue) { - return true - } else if(podLabelValue != it.value) { - return true - } - } - } - - - - @Override - CacheResult loadData(ProviderCache providerCache) { - reloadNamespaces() - Long start = System.currentTimeMillis() - List replicationControllerList = loadReplicationControllers() - List replicaSetList = loadReplicaSets() - List serverGroups = (replicationControllerList.collect { - it ? new ReplicaSetOrController(replicationController: it) : null - } + replicaSetList.collect { - it ? new ReplicaSetOrController(replicaSet: it) : null - }) - null - - List evictFromOnDemand = [] - List keepInOnDemand = [] - - providerCache.getAll(Keys.Namespace.ON_DEMAND.ns, - serverGroups.collect { serverGroup -> - Keys.getServerGroupKey(accountName, serverGroup.namespace, serverGroup.name) - }) - .each { CacheData onDemandEntry -> - // Ensure that we don't overwrite data that was inserted by the `handle` method while we retrieved the - // replication controllers. Furthermore, cache data that hasn't been processed needs to be updated in the ON_DEMAND - // cache, so don't evict data without a processedCount > 0. - if (onDemandEntry.attributes.cacheTime < start && onDemandEntry.attributes.processedCount > 0) { - evictFromOnDemand << onDemandEntry - } else { - keepInOnDemand << onDemandEntry - } - } - - def result = buildCacheResult(serverGroups, keepInOnDemand.collectEntries { CacheData onDemandEntry -> - [(onDemandEntry.id): onDemandEntry] - }, evictFromOnDemand*.id, start) - - result.cacheResults[Keys.Namespace.ON_DEMAND.ns].each { CacheData onDemandEntry -> - onDemandEntry.attributes.processedTime = System.currentTimeMillis() - onDemandEntry.attributes.processedCount = (onDemandEntry.attributes.processedCount ?: 0) + 1 - } - - return result - } - - private static void cache(Map> cacheResults, String cacheNamespace, Map cacheDataById) { - cacheResults[cacheNamespace].each { - def existingCacheData = cacheDataById[it.id] - if (existingCacheData) { - existingCacheData.attributes.putAll(it.attributes) - it.relationships.each { String relationshipName, Collection relationships -> - existingCacheData.relationships[relationshipName].addAll(relationships) - } - } else { - cacheDataById[it.id] = it - } - } - } - - private CacheResult buildCacheResult(List serverGroups, Map onDemandKeep, List onDemandEvict, Long start) { - log.info("Describing items in ${agentType}") - - Map cachedApplications = MutableCacheData.mutableCacheMap() - Map cachedClusters = MutableCacheData.mutableCacheMap() - Map cachedServerGroups = MutableCacheData.mutableCacheMap() - Map cachedInstances = MutableCacheData.mutableCacheMap() - Map cachedLoadBalancers = MutableCacheData.mutableCacheMap() - - // Map namespace -> name -> event - Map> rcEvents = [:].withDefault { _ -> [:] } - Map> rsEvents = [:].withDefault { _ -> [:] } - try { - namespaces.each { String namespace -> - rcEvents[namespace] = credentials.apiAdaptor.getEvents(namespace, KubernetesUtil.DEPRECATED_SERVER_GROUP_KIND) - rsEvents[namespace] = credentials.apiAdaptor.getEvents(namespace, KubernetesUtil.SERVER_GROUP_KIND) - } - } catch (Exception e) { - log.warn "Failure fetching events for all server groups in $namespaces", e - } - - // Map namespace -> name -> autoscaler - Map> rcAutoscalers = [:].withDefault { _ -> [:] } - Map> rsAutoscalers = [:].withDefault { _ -> [:] } - Map> deployAutoscalers = [:].withDefault { _ -> [:] } - try { - namespaces.each { String namespace -> - rcAutoscalers[namespace] = credentials.apiAdaptor.getAutoscalers(namespace, KubernetesUtil.DEPRECATED_SERVER_GROUP_KIND) - rsAutoscalers[namespace] = credentials.apiAdaptor.getAutoscalers(namespace, KubernetesUtil.SERVER_GROUP_KIND) - deployAutoscalers[namespace] = credentials.apiAdaptor.getAutoscalers(namespace, KubernetesUtil.DEPLOYMENT_KIND) - } - } catch (Exception e) { - log.warn "Failure fetching autoscalers for all server groups in $namespaces", e - } - def podsInAllNamespace = loadAllPods() - for (ReplicaSetOrController serverGroup: serverGroups) { - if (!serverGroup.exists()) { - continue - } - - def onDemandData = onDemandKeep ? onDemandKeep[Keys.getServerGroupKey(accountName, serverGroup.namespace, serverGroup.name)] : null - if (onDemandData && onDemandData.attributes.cacheTime >= start) { - Map> cacheResults = objectMapper.readValue(onDemandData.attributes.cacheResults as String, - new TypeReference>>() { }) - cache(cacheResults, Keys.Namespace.APPLICATIONS.ns, cachedApplications) - cache(cacheResults, Keys.Namespace.CLUSTERS.ns, cachedClusters) - cache(cacheResults, Keys.Namespace.SERVER_GROUPS.ns, cachedServerGroups) - cache(cacheResults, Keys.Namespace.INSTANCES.ns, cachedInstances) - } else { - def serverGroupName = serverGroup.name - def pods = podsInAllNamespace.get(serverGroup.namespace)?.findAll { - if (it?.metadata?.labels && serverGroup.selector) { - if (isBelongToServerGroup(it.metadata.labels, serverGroup.selector)) { - return true - } - } - } - def names = Names.parseName(serverGroupName) - def applicationName = names.app - def clusterName = names.cluster - - def serverGroupKey = Keys.getServerGroupKey(accountName, serverGroup.namespace, serverGroupName) - def applicationKey = Keys.getApplicationKey(applicationName) - def clusterKey = Keys.getClusterKey(accountName, applicationName, category, clusterName) - def instanceKeys = [] - def loadBalancerKeys = serverGroup.loadBalancers.collect({ - Keys.getLoadBalancerKey(accountName, serverGroup.namespace, it) - }) - - cachedApplications[applicationKey].with { - attributes.name = applicationName - relationships[Keys.Namespace.CLUSTERS.ns].add(clusterKey) - relationships[Keys.Namespace.SERVER_GROUPS.ns].add(serverGroupKey) - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - } - - cachedClusters[clusterKey].with { - attributes.name = clusterName - relationships[Keys.Namespace.APPLICATIONS.ns].add(applicationKey) - relationships[Keys.Namespace.SERVER_GROUPS.ns].add(serverGroupKey) - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - } - - pods?.forEach { pod -> - def key = Keys.getInstanceKey(accountName, pod.metadata.namespace, pod.metadata.name) - instanceKeys << key - cachedInstances[key].with { - relationships[Keys.Namespace.APPLICATIONS.ns].add(applicationKey) - relationships[Keys.Namespace.CLUSTERS.ns].add(clusterKey) - relationships[Keys.Namespace.SERVER_GROUPS.ns].add(serverGroupKey) - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - } - } - - loadBalancerKeys?.forEach { loadBalancerKey -> - cachedLoadBalancers[loadBalancerKey].with { - relationships[Keys.Namespace.SERVER_GROUPS.ns].add(serverGroupKey) - relationships[Keys.Namespace.INSTANCES.ns].addAll(instanceKeys) - } - } - - cachedServerGroups[serverGroupKey].with { - def events = null - def autoscaler = null - attributes.name = serverGroupName - - if (serverGroup.replicaSet) { - if (credentials.apiAdaptor.hasDeployment(serverGroup.replicaSet)) { - autoscaler = deployAutoscalers[serverGroup.namespace][clusterName] - } else { - autoscaler = rsAutoscalers[serverGroup.namespace][serverGroupName] - } - events = rsEvents[serverGroup.namespace][serverGroupName] - } else { - autoscaler = rcAutoscalers[serverGroup.namespace][serverGroupName] - events = rcEvents[serverGroup.namespace][serverGroupName] - } - - attributes.serverGroup = new KubernetesV1ServerGroup(serverGroup.replicaSet ?: serverGroup.replicationController, accountName, events, autoscaler) - relationships[Keys.Namespace.APPLICATIONS.ns].add(applicationKey) - relationships[Keys.Namespace.CLUSTERS.ns].add(clusterKey) - relationships[Keys.Namespace.LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - relationships[Keys.Namespace.INSTANCES.ns].addAll(instanceKeys) - } - } - } - - log.info("Caching ${cachedApplications.size()} applications in ${agentType}") - log.info("Caching ${cachedClusters.size()} clusters in ${agentType}") - log.info("Caching ${cachedServerGroups.size()} server groups in ${agentType}") - log.info("Caching ${cachedInstances.size()} instances in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.APPLICATIONS.ns): cachedApplications.values(), - (Keys.Namespace.LOAD_BALANCERS.ns): cachedLoadBalancers.values(), - (Keys.Namespace.CLUSTERS.ns): cachedClusters.values(), - (Keys.Namespace.SERVER_GROUPS.ns): cachedServerGroups.values(), - (Keys.Namespace.INSTANCES.ns): cachedInstances.values(), - (Keys.Namespace.ON_DEMAND.ns): onDemandKeep.values() - ],[ - (Keys.Namespace.ON_DEMAND.ns): onDemandEvict, - ]) - - } - - class ReplicaSetOrController { - ReplicationController replicationController - ReplicaSet replicaSet - - String getName() { - replicaSet ? replicaSet.metadata.name : replicationController.metadata.name - } - - String getNamespace() { - replicaSet ? replicaSet.metadata.namespace : replicationController.metadata.namespace - } - - Map getSelector() { - replicaSet ? replicaSet.spec.selector?.matchLabels : replicationController.spec.selector - } - - boolean exists() { - replicaSet || replicationController - } - - List getLoadBalancers() { - replicaSet ? KubernetesUtil.getLoadBalancers(replicaSet) : KubernetesUtil.getLoadBalancers(replicationController) - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServiceAccountCachingAgent.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServiceAccountCachingAgent.groovy deleted file mode 100644 index c14b9df718e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServiceAccountCachingAgent.groovy +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2017 Skuid, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import groovy.util.logging.Slf4j -import io.fabric8.kubernetes.api.model.ServiceAccount - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE - -@Slf4j -class KubernetesServiceAccountCachingAgent extends KubernetesV1CachingAgent { - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.SERVICE_ACCOUNTS.ns), - ] as Set) - - KubernetesServiceAccountCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount) - } - - @Override - Collection getProvidedDataTypes() { - return types - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - log.info("Loading service accounts in $agentType") - reloadNamespaces() - - def serviceAccounts = namespaces.collect { String namespace -> - credentials.apiAdaptor.getServiceAccounts(namespace) - }.flatten() - - buildCacheResult(serviceAccounts) - } - - private CacheResult buildCacheResult(List serviceAccounts) { - log.info("Describing items in ${agentType}") - - Map cachedServiceAccounts = MutableCacheData.mutableCacheMap() - - for (ServiceAccount sa : serviceAccounts) { - if (!sa) { - continue - } - - def key = Keys.getServiceAccountKey(accountName, sa.metadata.namespace, sa.metadata.name) - - cachedServiceAccounts[key].with { - attributes.name = sa.metadata.name - attributes.namespace = sa.metadata.namespace - } - - } - - log.info("Caching ${cachedServiceAccounts.size()} service accounts in ${agentType}") - - new DefaultCacheResult([ - (Keys.Namespace.SERVICE_ACCOUNTS.ns): cachedServiceAccounts.values(), - ], [:]) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1CachingAgent.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1CachingAgent.java deleted file mode 100644 index d76d45deea2..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1CachingAgent.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.KubernetesV1Provider; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials; -import lombok.Getter; - -public abstract class KubernetesV1CachingAgent extends KubernetesCachingAgent { - @Getter - final protected String providerName = KubernetesV1Provider.PROVIDER_NAME; - - protected KubernetesV1CachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1CachingAgentDispatcher.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1CachingAgentDispatcher.groovy deleted file mode 100644 index ac5b39e2cee..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1CachingAgentDispatcher.groovy +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesCachingAgent -import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesCachingAgentDispatcher -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -@Component -class KubernetesV1CachingAgentDispatcher implements KubernetesCachingAgentDispatcher { - @Autowired - ObjectMapper objectMapper - - @Autowired - Registry registry - - @Override - Collection buildAllCachingAgents(KubernetesNamedAccountCredentials credentials) { - def agents = [] - for (def index = 0; index < credentials.cacheThreads; index++) { - agents << new KubernetesInstanceCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - agents << new KubernetesLoadBalancerCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - agents << new KubernetesSecurityGroupCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - agents << new KubernetesServerGroupCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - agents << new KubernetesDeploymentCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - agents << new KubernetesServiceAccountCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - agents << new KubernetesConfigMapCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - agents << new KubernetesSecretCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - agents << new KubernetesControllersCachingAgent(credentials, objectMapper, registry, index, credentials.cacheThreads) - } - - return agents - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesJobProvider.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesJobProvider.groovy deleted file mode 100644 index ff71820976e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesJobProvider.groovy +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view - -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesJobStatus -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.model.JobProvider -import com.netflix.spinnaker.clouddriver.model.JobState -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -@Component -class KubernetesJobProvider implements JobProvider { - String platform = "kubernetes" - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - KubernetesJobProvider() { } - - @Override - KubernetesJobStatus collectJob(String account, String location, String id) { - def credentials = accountCredentialsProvider.getCredentials(account) - if (!(credentials?.credentials instanceof KubernetesV1Credentials)) { - return null - } - def trueCredentials = (credentials as KubernetesNamedAccountCredentials).credentials - def pod = trueCredentials.apiAdaptor.getPod(location, id) - def status = new KubernetesJobStatus(pod, account) - - String podName = pod.getMetadata().getName() - StringBuilder logs = new StringBuilder() - - pod.getSpec().getContainers().collect { container-> - logs.append("===== ${container.getName()} =====\n\n") - try { - logs.append(trueCredentials.apiAdaptor.getLog(location, podName, container.getName())) - } catch(Exception e) { - logs.append(e.getMessage()) - } - logs.append("\n\n") - } - status.logs = logs.toString() - - if (status.jobState in [JobState.Failed, JobState.Succeeded]) { - trueCredentials.apiAdaptor.deletePod(location, id) - } - - return status - } - - @Override - Map getFileContents(String account, String location, String id, String fileName){ - return [:] - } - - @Override - void cancelJob(String account, String location, String id) { - def credentials = accountCredentialsProvider.getCredentials(account) - if (!(credentials?.credentials instanceof KubernetesV1Credentials)) { - return - } - - def trueCredentials = (KubernetesV1Credentials) (credentials as KubernetesNamedAccountCredentials).credentials - - try { - if (!trueCredentials.apiAdaptor.getPod(location, id)) { - return - } - - trueCredentials.apiAdaptor.deletePod(location, id) - } catch (Exception e) { - log.warn("Unable to delete $id in $location: $e.message", e); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesProviderUtils.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesProviderUtils.groovy deleted file mode 100644 index 558a79b2f99..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesProviderUtils.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.CacheFilter -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesDeploymentStatus -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1Instance -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1ServerGroup -import io.fabric8.kubernetes.api.model.apps.Deployment - -class KubernetesProviderUtils { - static Set getAllMatchingKeyPattern(Cache cacheView, String namespace, String pattern) { - loadResults(cacheView, namespace, cacheView.filterIdentifiers(namespace, pattern)) - } - - private static Set loadResults(Cache cacheView, String namespace, Collection identifiers) { - cacheView.getAll(namespace, identifiers, RelationshipCacheFilter.none()) - } - - static Collection resolveRelationshipData(Cache cacheView, CacheData source, String relationship) { - resolveRelationshipData(cacheView, source, relationship) { true } - } - - static Collection resolveRelationshipData(Cache cacheView, CacheData source, String relationship, Closure relFilter) { - Collection filteredRelationships = source?.relationships[relationship]?.findAll(relFilter) - filteredRelationships ? cacheView.getAll(relationship, filteredRelationships) : [] - } - - static Collection resolveRelationshipDataForCollection(Cache cacheView, Collection sources, String relationship, CacheFilter cacheFilter = null) { - Set relationships = sources.findResults { it.relationships[relationship]?: [] }.flatten() - relationships ? cacheView.getAll(relationship, relationships, cacheFilter) : [] - } - - static KubernetesV1Instance convertInstance(ObjectMapper objectMapper, CacheData instanceData) { - def instance = objectMapper.convertValue(instanceData.attributes.instance, KubernetesV1Instance) - def loadBalancers = instanceData.relationships[Keys.Namespace.LOAD_BALANCERS.ns].collect { - Keys.parse(it).name - } - instance.loadBalancers = loadBalancers - - return instance - } - - static Map> controllerToInstanceMap(ObjectMapper objectMapper, Collection instances) { - Map> instanceMap = [:].withDefault { _ -> [] as Set } - instances?.forEach { - def instance = convertInstance(objectMapper, it) - instanceMap[instance.controllerName].add(instance) - } - return instanceMap - } - - static KubernetesV1ServerGroup serverGroupFromCacheData(ObjectMapper objectMapper, CacheData cacheData, Set instances, Deployment deployment) { - KubernetesV1ServerGroup serverGroup = objectMapper.convertValue(cacheData.attributes.serverGroup, KubernetesV1ServerGroup) - serverGroup.instances = instances - serverGroup.deploymentStatus = deployment ? new KubernetesDeploymentStatus(deployment) : null - serverGroup.deployDescription.deployment = KubernetesApiConverter.fromDeployment(deployment) - return serverGroup - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1ApplicationProvider.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1ApplicationProvider.groovy deleted file mode 100644 index 47174675745..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1ApplicationProvider.groovy +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1Application -import com.netflix.spinnaker.clouddriver.model.Application -import com.netflix.spinnaker.clouddriver.model.ApplicationProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys.Namespace.CLUSTERS - -@Component -class KubernetesV1ApplicationProvider implements ApplicationProvider { - private final KubernetesCloudProvider kubernetesCloudProvider - private final Cache cacheView - private final ObjectMapper objectMapper - - @Autowired - KubernetesV1ApplicationProvider(KubernetesCloudProvider kubernetesCloudProvider, Cache cacheView, ObjectMapper objectMapper) { - this.kubernetesCloudProvider = kubernetesCloudProvider - this.cacheView = cacheView - this.objectMapper = objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) - } - - @Override - Set getApplications(boolean expand) { - def relationships = expand ? RelationshipCacheFilter.include(CLUSTERS.ns) : RelationshipCacheFilter.none() - Collection applications = cacheView.getAll(APPLICATIONS.ns, cacheView.filterIdentifiers(APPLICATIONS.ns, "${kubernetesCloudProvider.id}:*"), relationships) - applications.collect this.&translate - } - - @Override - Application getApplication(String name) { - translate(cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(name))) - } - - Application translate(CacheData cacheData) { - if (cacheData == null) { - return null - } - - String name = Keys.parse(cacheData.id).application - Map attributes = objectMapper.convertValue(cacheData.attributes, KubernetesV1Application.ATTRIBUTES) - Map> clusterNames = [:].withDefault { new HashSet() } - for (String clusterId : cacheData.relationships[CLUSTERS.ns]) { - Map cluster = Keys.parse(clusterId) - if (cluster.account && cluster.name) { - clusterNames[cluster.account].add(cluster.name) - } - } - - new KubernetesV1Application(name, attributes, clusterNames) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1ClusterProvider.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1ClusterProvider.groovy deleted file mode 100644 index 38088680900..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1ClusterProvider.groovy +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.frigga.Names -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.CacheFilter -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1Cluster -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1Instance -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1LoadBalancer -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1ServerGroup -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import io.fabric8.kubernetes.api.model.apps.Deployment -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -@Component -class KubernetesV1ClusterProvider implements ClusterProvider { - private final KubernetesCloudProvider kubernetesCloudProvider - private final Cache cacheView - private final ObjectMapper objectMapper - - @Autowired - KubernetesV1SecurityGroupProvider securityGroupProvider - - @Autowired - KubernetesV1ClusterProvider(KubernetesCloudProvider kubernetesCloudProvider, - Cache cacheView, - ObjectMapper objectMapper) { - this.kubernetesCloudProvider = kubernetesCloudProvider - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Set getClusters(String applicationName, String account) { - CacheData application = cacheView.get(Keys.Namespace.APPLICATIONS.ns, Keys.getApplicationKey(applicationName), RelationshipCacheFilter.include(Keys.Namespace.CLUSTERS.ns)) - if (!application) { - return [] as Set - } - - Collection clusterKeys = application.relationships[Keys.Namespace.CLUSTERS.ns].findAll { Keys.parse(it).account == account } - Collection clusters = cacheView.getAll(Keys.Namespace.CLUSTERS.ns, clusterKeys) - translateClusters(clusters, true) as Set - } - - @Override - Map> getClusterSummaries(String applicationName) { - CacheData application = cacheView.get(Keys.Namespace.APPLICATIONS.ns, Keys.getApplicationKey(applicationName)) - application ? mapResponse(translateClusters(KubernetesProviderUtils.resolveRelationshipData(cacheView, application, Keys.Namespace.CLUSTERS.ns), false)) : null - } - - @Override - Map> getClusterDetails(String applicationName) { - CacheData application = cacheView.get(Keys.Namespace.APPLICATIONS.ns, Keys.getApplicationKey(applicationName)) - application ? mapResponse(translateClusters(KubernetesProviderUtils.resolveRelationshipData(cacheView, application, Keys.Namespace.CLUSTERS.ns), true)) : null - } - - @Override - KubernetesV1Cluster getCluster(String application, String account, String name, boolean includeDetails) { - CacheData serverGroupCluster = cacheView.get(Keys.Namespace.CLUSTERS.ns, Keys.getClusterKey(account, application, "serverGroup", name)) - List clusters = [serverGroupCluster] - null - return clusters ? translateClusters(clusters, includeDetails).inject(new KubernetesV1Cluster()) { KubernetesV1Cluster acc, KubernetesV1Cluster val -> - acc.name = acc.name ?: val.name - acc.accountName = acc.accountName ?: val.accountName - acc.loadBalancers.addAll(val.loadBalancers) - acc.serverGroups.addAll(val.serverGroups) - return acc - } : null - } - - @Override - KubernetesV1Cluster getCluster(String applicationName, String account, String name) { - return getCluster(applicationName, account, name, true) - } - - static Collection resolveRelationshipDataForCollection(Cache cacheView, Collection sources, String relationship, CacheFilter cacheFilter = null) { - Collection relationships = sources?.findResults { it.relationships[relationship] ?: [] }?.flatten() ?: [] - relationships ? cacheView.getAll(relationship, relationships, cacheFilter) : [] - } - - static Map> preserveRelationshipDataForCollection(Cache cacheView, Collection sources, String relationship, CacheFilter cacheFilter = null) { - Map allData = resolveRelationshipDataForCollection(cacheView, sources, relationship, cacheFilter).collectEntries { cacheData -> - [(cacheData.id): cacheData] - } - - return sources.collectEntries { CacheData source -> - [(source.id): source.relationships[relationship].collect { String key -> allData[key] } - null] - } - } - - private Collection translateClusters(Collection clusterData, boolean includeDetails) { - Map loadBalancers - Map> serverGroups - - if (includeDetails) { - Collection allLoadBalancers = resolveRelationshipDataForCollection(cacheView, clusterData, Keys.Namespace.LOAD_BALANCERS.ns) - Collection allServerGroups = resolveRelationshipDataForCollection(cacheView, clusterData, Keys.Namespace.SERVER_GROUPS.ns, - RelationshipCacheFilter.include(Keys.Namespace.INSTANCES.ns, Keys.Namespace.LOAD_BALANCERS.ns, Keys.Namespace.DEPLOYMENTS.ns)) - loadBalancers = translateLoadBalancers(allLoadBalancers) - serverGroups = translateServerGroups(allServerGroups) - } - - Collection clusters = clusterData.collect { CacheData clusterDataEntry -> - Map clusterKey = Keys.parse(clusterDataEntry.id) - - def cluster = new KubernetesV1Cluster() - cluster.accountName = clusterKey.account - cluster.name = clusterKey.name - if (includeDetails) { - cluster.loadBalancers = clusterDataEntry.relationships[Keys.Namespace.LOAD_BALANCERS.ns]?.findResults { loadBalancers.get(it) } ?: [] - cluster.serverGroups = serverGroups[cluster.name]?.findAll { it.account == cluster.accountName } ?: [] - } else { - cluster.loadBalancers = clusterDataEntry.relationships[Keys.Namespace.LOAD_BALANCERS.ns]?.collect { loadBalancerKey -> - Map parts = Keys.parse(loadBalancerKey) - new KubernetesV1LoadBalancer(parts.name, parts.namespace, parts.account) - } - - cluster.serverGroups = clusterDataEntry.relationships[Keys.Namespace.SERVER_GROUPS.ns]?.collect { serverGroupKey -> - Map parts = Keys.parse(serverGroupKey) - new KubernetesV1ServerGroup(parts.name, parts.namespace) - } - } - cluster - } - - clusters - } - - private Map> translateServerGroups(Collection serverGroupData) { - Collection allLoadBalancers = resolveRelationshipDataForCollection(cacheView, serverGroupData, Keys.Namespace.LOAD_BALANCERS.ns, RelationshipCacheFilter.include(Keys.Namespace.SECURITY_GROUPS.ns)) - def securityGroups = loadBalancerToSecurityGroupMap(securityGroupProvider, cacheView, allLoadBalancers) - Map> instances = [:] - preserveRelationshipDataForCollection(cacheView, serverGroupData, Keys.Namespace.INSTANCES.ns, RelationshipCacheFilter.none()).each { key, cacheData -> - instances[key] = cacheData.collect { it -> KubernetesProviderUtils.convertInstance(objectMapper, it) } as Set - } - Map deployments = [:] - preserveRelationshipDataForCollection(cacheView, serverGroupData, Keys.Namespace.DEPLOYMENTS.ns, RelationshipCacheFilter.none()).each { key, cacheData -> - deployments[key] = cacheData.collect { it -> objectMapper.convertValue(it.attributes.deployment, Deployment.class) }[0] - } - - Map> serverGroups = [:].withDefault { _ -> [] as Set } - serverGroupData.forEach { cacheData -> - def serverGroup = KubernetesProviderUtils.serverGroupFromCacheData(objectMapper, cacheData, instances[cacheData.id], deployments[cacheData.id]) - - serverGroup.loadBalancers?.each { - serverGroup.securityGroups.addAll(securityGroups[it]) - } - - serverGroups[Names.parseName(serverGroup.name).cluster].add(serverGroup) - } - - serverGroups - } - - private static Map translateLoadBalancers(Collection loadBalancerData) { - loadBalancerData.collectEntries { loadBalancerEntry -> - Map parts = Keys.parse(loadBalancerEntry.id) - [(loadBalancerEntry.id) : new KubernetesV1LoadBalancer(parts.name, parts.namespace, parts.account)] - } - } - - @Override - Map> getClusters() { - Collection clusterData = cacheView.getAll(Keys.Namespace.CLUSTERS.ns) - Collection clusters = translateClusters(clusterData, true) - mapResponse(clusters) - } - - private static Map> mapResponse(Collection clusters) { - clusters.groupBy { it.accountName }.collectEntries { k, v -> [k, new HashSet(v)] } - } - - static loadBalancerToSecurityGroupMap(KubernetesV1SecurityGroupProvider securityGroupProvider, Cache cacheView, Collection loadBalancers) { - Collection allSecurityGroups = resolveRelationshipDataForCollection(cacheView, loadBalancers, Keys.Namespace.SECURITY_GROUPS.ns, RelationshipCacheFilter.none()) - - Map> securityGroups = [:].withDefault { _ -> [] as Set } - allSecurityGroups.each { securityGroup -> - def translated = securityGroupProvider.translateSecurityGroup(securityGroup, true) - - translated.loadBalancers.each { - securityGroups[it].add(translated.id) - } - } - - return securityGroups - } - - @Override - ServerGroup getServerGroup(String account, String namespace, String name, boolean includeDetails) { - String serverGroupKey = Keys.getServerGroupKey(account, namespace, name) - CacheData serverGroupData = cacheView.get(Keys.Namespace.SERVER_GROUPS.ns, serverGroupKey) - if (!serverGroupData) { - return null - } - - Collection allLoadBalancers = resolveRelationshipDataForCollection(cacheView, [serverGroupData], Keys.Namespace.LOAD_BALANCERS.ns, RelationshipCacheFilter.include(Keys.Namespace.SECURITY_GROUPS.ns)) - Deployment deployment = resolveRelationshipDataForCollection(cacheView, [serverGroupData], Keys.Namespace.DEPLOYMENTS.ns, RelationshipCacheFilter.none()).collect { cacheData -> - objectMapper.convertValue(cacheData.attributes.deployment, Deployment.class) - }[0] - Set instances = resolveRelationshipDataForCollection(cacheView, [serverGroupData], Keys.Namespace.INSTANCES.ns, RelationshipCacheFilter.none()).collect { - KubernetesProviderUtils.convertInstance(objectMapper, it) - } as Set - - def securityGroups = loadBalancerToSecurityGroupMap(securityGroupProvider, cacheView, allLoadBalancers) - - def serverGroup = KubernetesProviderUtils.serverGroupFromCacheData(objectMapper, serverGroupData, instances, deployment) - - serverGroup.loadBalancers?.each { - serverGroup.securityGroups.addAll(securityGroups[it]) - } - - return serverGroup - } - - @Override - ServerGroup getServerGroup(String account, String namespace, String name) { - return getServerGroup(account, namespace, name, true) - } - - @Override - String getCloudProviderId() { - return kubernetesCloudProvider.id - } - - @Override - boolean supportsMinimalClusters() { - return false - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1InstanceProvider.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1InstanceProvider.groovy deleted file mode 100644 index 7f7c60be1fd..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1InstanceProvider.groovy +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1Instance -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.model.InstanceProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.ProviderVersion -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -@Slf4j -@Component -class KubernetesV1InstanceProvider implements InstanceProvider { - private final Cache cacheView - private final ObjectMapper objectMapper - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Autowired - KubernetesV1InstanceProvider(Cache cacheView, ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - final String cloudProvider = KubernetesCloudProvider.ID - - @Override - KubernetesV1Instance getInstance(String account, String namespace, String name) { - Set instances = KubernetesProviderUtils.getAllMatchingKeyPattern(cacheView, Keys.Namespace.INSTANCES.ns, Keys.getInstanceKey(account, namespace, name)) - if (!instances || instances.size() == 0) { - return null - } - - if (instances.size() > 1) { - throw new IllegalStateException("Multiple kubernetes pods with name $name in namespace $namespace exist.") - } - - CacheData instanceData = (CacheData) instances.toArray()[0] - - if (!instanceData) { - return null - } - - def loadBalancers = instanceData.relationships[Keys.Namespace.LOAD_BALANCERS.ns].collect { - Keys.parse(it).name - } - - KubernetesV1Instance instance = objectMapper.convertValue(instanceData.attributes.instance, KubernetesV1Instance) - instance.loadBalancers = loadBalancers - - return instance - } - - @Override - String getConsoleOutput(String account, String region, String id) { - KubernetesNamedAccountCredentials credentials; - try { - credentials = (KubernetesNamedAccountCredentials) accountCredentialsProvider.getCredentials(account) - } catch(Exception e) { - log.warn("Failure getting account credentials for ${account}") - return null - } - if (credentials?.getProviderVersion() != ProviderVersion.v1) { - return null - } - - def trueCredentials = credentials.credentials - def pod = trueCredentials.apiAdaptor.getPod(region, id) - if (pod == null ) { - return null - } - - String podName = pod.getMetadata().getName() - StringBuilder result = new StringBuilder() - - pod.getSpec().getContainers().collect { container -> - result.append("===== ${container.getName()} =====\n\n") - try { - String log = trueCredentials.apiAdaptor.getLog(region, podName, container.getName()) - result.append(log) - } catch(Exception e){ - result.append(e.getMessage()) - } - result.append("\n\n") - } - - return result.toString() - - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1LoadBalancerProvider.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1LoadBalancerProvider.groovy deleted file mode 100644 index 68b626709fb..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1LoadBalancerProvider.groovy +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1LoadBalancer -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1ServerGroup -import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider -import io.fabric8.kubernetes.api.model.Service -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import javax.naming.OperationNotSupportedException - -@Component -class KubernetesV1LoadBalancerProvider implements LoadBalancerProvider { - - final String cloudProvider = KubernetesCloudProvider.ID - - private final Cache cacheView - private final ObjectMapper objectMapper - - @Autowired - KubernetesV1LoadBalancerProvider(Cache cacheView, ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Set getApplicationLoadBalancers(String applicationName) { - String applicationKey = Keys.getApplicationKey(applicationName) - - CacheData application = cacheView.get(Keys.Namespace.APPLICATIONS.ns, applicationKey) - Set loadBalancerKeys = [] - Set instanceKeys = [] - - - def applicationServerGroups = application ? KubernetesProviderUtils.resolveRelationshipData(cacheView, application, Keys.Namespace.SERVER_GROUPS.ns) : [] - applicationServerGroups.each { CacheData serverGroup -> - loadBalancerKeys.addAll(serverGroup.relationships[Keys.Namespace.LOAD_BALANCERS.ns] ?: []) - } - - loadBalancerKeys.addAll(cacheView.filterIdentifiers(Keys.Namespace.LOAD_BALANCERS.ns, - Keys.getLoadBalancerKey("*", "*", KubernetesUtil.combineAppStackDetail(applicationName, '*', null)))) - loadBalancerKeys.addAll(cacheView.filterIdentifiers(Keys.Namespace.LOAD_BALANCERS.ns, - Keys.getLoadBalancerKey("*", "*", KubernetesUtil.combineAppStackDetail(applicationName, null, null)))) - - def loadBalancers = cacheView.getAll(Keys.Namespace.LOAD_BALANCERS.ns, loadBalancerKeys) - Set allServerGroups = KubernetesProviderUtils.resolveRelationshipDataForCollection(cacheView, loadBalancers, Keys.Namespace.SERVER_GROUPS.ns) - allServerGroups.each { CacheData serverGroup -> - instanceKeys.addAll(serverGroup.relationships[Keys.Namespace.INSTANCES.ns] ?: []) - } - - def instances = cacheView.getAll(Keys.Namespace.INSTANCES.ns, instanceKeys) - - def instanceMap = KubernetesProviderUtils.controllerToInstanceMap(objectMapper, instances) - - Map serverGroupMap = allServerGroups.collectEntries { serverGroupData -> - def ownedInstances = instanceMap[(String) serverGroupData.attributes.name] - def serverGroup = KubernetesProviderUtils.serverGroupFromCacheData(objectMapper, serverGroupData, ownedInstances, null) - return [(serverGroupData.id): serverGroup] - } - - return loadBalancers.collect { - translateLoadBalancer(it, serverGroupMap) - } as Set - } - - private KubernetesV1LoadBalancer translateLoadBalancer(CacheData loadBalancerEntry, Map serverGroupMap) { - def parts = Keys.parse(loadBalancerEntry.id) - Service service = objectMapper.convertValue(loadBalancerEntry.attributes.service, Service) - List serverGroups = [] - List securityGroups - loadBalancerEntry.relationships[Keys.Namespace.SERVER_GROUPS.ns]?.forEach { String serverGroupKey -> - KubernetesV1ServerGroup serverGroup = serverGroupMap[serverGroupKey] - if (serverGroup) { - serverGroups << serverGroup - } - return - } - - securityGroups = KubernetesProviderUtils.resolveRelationshipData(cacheView, loadBalancerEntry, Keys.Namespace.SECURITY_GROUPS.ns).findResults { cacheData -> - if (cacheData.id) { - def parse = Keys.parse(cacheData.id) - parse ? parse.name : null - } else { - null - } - } - - return new KubernetesV1LoadBalancer(service, serverGroups, parts.account, securityGroups) - } - - // TODO(lwander): Groovy allows this to compile just fine, even though KubernetesLoadBalancer does - // not implement the LoadBalancerProvider.list interface. - @Override - List list() { - Collection loadBalancers = cacheView.getIdentifiers(Keys.Namespace.LOAD_BALANCERS.ns) - loadBalancers.findResults { - def parse = Keys.parse(it) - parse ? new KubernetesV1LoadBalancer(parse.name, parse.namespace, parse.account) : null - } - } - - // TODO(lwander): Implement if/when these methods are needed in Deck. - @Override - LoadBalancerProvider.Item get(String name) { - throw new OperationNotSupportedException("Kubernetes is a special snowflake.") - } - - @Override - List byAccountAndRegionAndName(String account, - String region, - String name) { - throw new OperationNotSupportedException("No balancers for you!") - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1SecurityGroupProvider.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1SecurityGroupProvider.groovy deleted file mode 100644 index 61263721eef..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/KubernetesV1SecurityGroupProvider.groovy +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.model.KubernetesV1SecurityGroup -import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider -import io.fabric8.kubernetes.api.model.extensions.Ingress -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -@Component -class KubernetesV1SecurityGroupProvider implements SecurityGroupProvider { - - final String cloudProvider = KubernetesCloudProvider.ID - private final Cache cacheView - private final ObjectMapper objectMapper - - @Autowired - KubernetesV1SecurityGroupProvider(Cache cacheView, ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Set getAll(boolean includeRules) { - lookup("*", "*", "*", includeRules) - } - - @Override - Set getAllByRegion(boolean includeRules, String namespace) { - lookup("*", namespace, "*", includeRules) - } - - @Override - Set getAllByAccount(boolean includeRules, String account) { - lookup(account, "*", "*", includeRules) - } - - @Override - Set getAllByAccountAndName(boolean includeRules, String account, String name) { - lookup(account, "*", name, includeRules) - } - - @Override - Set getAllByAccountAndRegion(boolean includeRules, String account, String namespace) { - lookup(account, namespace, "*", includeRules) - } - - @Override - KubernetesV1SecurityGroup get(String account, String namespace, String name, String vpcId) { - lookup(account, namespace, name, true).getAt(0) - } - - Set lookup(String account, String namespace, String name, boolean includeRule) { - def keys = cacheView.filterIdentifiers(Keys.Namespace.SECURITY_GROUPS.ns, Keys.getSecurityGroupKey(account, namespace, name)) - cacheView.getAll(Keys.Namespace.SECURITY_GROUPS.ns, keys).collect { - translateSecurityGroup(it, includeRule) - } - } - - public KubernetesV1SecurityGroup translateSecurityGroup(CacheData securityGroupEntry, boolean includeRule) { - def parts = Keys.parse(securityGroupEntry.id) - Ingress ingress = objectMapper.convertValue(securityGroupEntry.attributes.ingress, Ingress) - return new KubernetesV1SecurityGroup(parts.application, parts.account, ingress, includeRule) - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/MutableCacheData.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/MutableCacheData.groovy deleted file mode 100644 index 6cb41c51d03..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/view/MutableCacheData.groovy +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.view - -import com.fasterxml.jackson.annotation.JsonCreator -import com.fasterxml.jackson.annotation.JsonProperty -import com.netflix.spinnaker.cats.cache.CacheData - -/* TODO(lwander) this was taken from the netflix cluster caching, and should probably be shared between all providers. */ -class MutableCacheData implements CacheData { - final String id - int ttlSeconds = -1 - final Map attributes = [:] - final Map> relationships = [:].withDefault { [] as Set } - - public MutableCacheData(String id) { - this.id = id - } - - @JsonCreator - public MutableCacheData(@JsonProperty("id") String id, - @JsonProperty("attributes") Map attributes, - @JsonProperty("relationships") Map> relationships) { - this(id); - this.attributes.putAll(attributes); - this.relationships.putAll(relationships); - } - - public static Map mutableCacheMap() { - return [:].withDefault { String id -> new MutableCacheData(id) } - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesConfigParser.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesConfigParser.groovy deleted file mode 100644 index 0b513deb8ca..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesConfigParser.groovy +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.security - -import io.fabric8.kubernetes.api.model.AuthInfo -import io.fabric8.kubernetes.api.model.Cluster -import io.fabric8.kubernetes.api.model.Context -import io.fabric8.kubernetes.api.model.NamedContext -import io.fabric8.kubernetes.client.Config -import io.fabric8.kubernetes.client.internal.KubeConfigUtils - -import java.nio.file.Files - -class KubernetesConfigParser { - static Config parse(String kubeconfigFile, String context, String cluster, String user, List namespaces, Boolean serviceAccount) { - if (serviceAccount) { - return withServiceAccount() - } else { - return withKubeConfig(kubeconfigFile, context, cluster, user, namespaces) - } - } - - static Config withServiceAccount() { - Config config = new Config() - - boolean serviceAccountCaCertExists = Files.isRegularFile(new File(Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH).toPath()) - if (serviceAccountCaCertExists) { - config.setCaCertFile(Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH) - } else { - throw new IllegalStateException("Could not find CA cert for service account at $Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH") - } - - try { - String serviceTokenCandidate = new String(Files.readAllBytes(new File(Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH).toPath())) - if (serviceTokenCandidate != null) { - String error = "Configured service account doesn't have access. Service account may have been revoked." - config.setOauthToken(serviceTokenCandidate) - config.getErrorMessages().put(401, "Unauthorized! " + error) - config.getErrorMessages().put(403, "Forbidden! " + error) - } else { - throw new IllegalStateException("Did not find service account token at $Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH") - } - } catch (IOException e) { - throw new IllegalStateException("Could not read service account token at $Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH", e) - } - - return config - } - - static Config withKubeConfig(String kubeconfigFile, String context, String cluster, String user, List namespaces) { - def kubeConfig = KubeConfigUtils.parseConfig(new File(kubeconfigFile)) - Config config = new Config() - - String resolvedContext = context ?: kubeConfig.currentContext - Context currentContext = kubeConfig.contexts.find { NamedContext it -> - it.name == resolvedContext - }?.getContext() - - if (!context && !currentContext) { - throw new IllegalArgumentException("Context $context was not found in $kubeconfigFile".toString()) - } - - currentContext.user = user ?: currentContext.user - currentContext.cluster = cluster ?: currentContext.cluster - if (namespaces) { - currentContext.namespace = namespaces[0] - } else if (!currentContext.namespace) { - currentContext.namespace = "default" - } - - Cluster currentCluster = KubeConfigUtils.getCluster(kubeConfig, currentContext) - config.setApiVersion("v1") // TODO(lwander) Make config parameter when new versions arrive. - String httpProxy = System.getenv("HTTP_PROXY") ?: System.getenv("http_proxy") - String httpsProxy = System.getenv("HTTPS_PROXY") ?: System.getenv("https_proxy") - String noProxy = System.getenv("NO_PROXY") ?: System.getenv("no_proxy") - if (httpProxy != null && httpProxy != "") { - config.setHttpProxy(httpProxy) - } - if (httpsProxy != null && httpsProxy != "") { - config.setHttpsProxy(httpsProxy) - } - if (noProxy != null && noProxy != "") { - String[] noProxyList = noProxy.split(",") - config.setNoProxy(noProxyList) - } - if (currentCluster != null) { - config.setMasterUrl(currentCluster.getServer() + (currentCluster.getServer().endsWith("/") ? "": "/")) - - config.setNamespace(currentContext.getNamespace()) - config.setTrustCerts(currentCluster.getInsecureSkipTlsVerify() != null && currentCluster.getInsecureSkipTlsVerify()) - config.setCaCertFile(currentCluster.getCertificateAuthority()) - config.setCaCertData(currentCluster.getCertificateAuthorityData()) - - AuthInfo currentAuthInfo = KubeConfigUtils.getUserAuthInfo(kubeConfig, currentContext) - if (currentAuthInfo != null) { - config.setClientCertFile(currentAuthInfo.getClientCertificate()) - config.setClientCertData(currentAuthInfo.getClientCertificateData()) - config.setClientKeyFile(currentAuthInfo.getClientKey()) - config.setClientKeyData(currentAuthInfo.getClientKeyData()) - config.setOauthToken(currentAuthInfo.getToken()) - config.setUsername(currentAuthInfo.getUsername()) - config.setPassword(currentAuthInfo.getPassword()) - - config.getErrorMessages().put(401, "Unauthorized! Token may have expired! Please log-in again.") - config.getErrorMessages().put(403, "Forbidden! User ${currentContext.user} doesn't have permission.".toString()) - } - } - - return config - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesV1Credentials.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesV1Credentials.java deleted file mode 100644 index fe2cfbc9eb8..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesV1Credentials.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.security; - -import com.google.common.collect.Lists; -import com.netflix.servo.util.VisibleForTesting; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesApiClientConfig; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesClientApiAdapter; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; -import io.fabric8.kubernetes.api.model.Namespace; -import io.fabric8.kubernetes.api.model.NamespaceBuilder; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.api.model.SecretBuilder; -import io.fabric8.kubernetes.client.Config; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.validation.ConstraintViolationException; -import java.io.UnsupportedEncodingException; -import java.util.ArrayList; -import java.util.Base64; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class KubernetesV1Credentials implements KubernetesCredentials { - private final KubernetesApiAdaptor apiAdaptor; - private KubernetesClientApiAdapter apiClientAdaptor; - private final List namespaces; - private final List omitNamespaces; - private final List dockerRegistries; - private final HashMap> imagePullSecrets = new HashMap<>(); - private final Logger LOG; - private final AccountCredentialsRepository repository; - private final HashSet dynamicRegistries = new HashSet<>(); - private final boolean configureImagePullSecrets; - private List oldNamespaces; - - public KubernetesV1Credentials( - String name, - String kubeconfigFile, - String context, - String cluster, - String user, - String userAgent, - Boolean serviceAccount, - boolean configureImagePullSecrets, - List namespaces, - List omitNamespaces, - List dockerRegistries, - Registry spectatorRegistry, - AccountCredentialsRepository accountCredentialsRepository) { - if (dockerRegistries == null || dockerRegistries.size() == 0) { - throw new IllegalArgumentException("Docker registries for Kubernetes account " + name + " are required."); - } - - Config config = KubernetesConfigParser.parse(kubeconfigFile, context, cluster, user, namespaces, serviceAccount); - config.setUserAgent(userAgent); - - KubernetesApiClientConfig configClient = new KubernetesApiClientConfig(kubeconfigFile, context, cluster, user, userAgent, serviceAccount); - - this.apiAdaptor = new KubernetesApiAdaptor(name, config, spectatorRegistry); - this.apiClientAdaptor = new KubernetesClientApiAdapter(name, configClient, spectatorRegistry); - this.namespaces = namespaces != null ? namespaces : new ArrayList<>(); - this.omitNamespaces = omitNamespaces != null ? omitNamespaces : new ArrayList<>(); - this.dockerRegistries = dockerRegistries; - this.repository = accountCredentialsRepository; - this.LOG = LoggerFactory.getLogger(KubernetesV1Credentials.class); - this.configureImagePullSecrets = configureImagePullSecrets; - - configureDockerRegistries(); - } - - @VisibleForTesting - private KubernetesV1Credentials( - KubernetesApiAdaptor apiAdaptor, - List namespaces, - List omitNamespaces, - List dockerRegistries, - AccountCredentialsRepository repository) { - this.apiAdaptor = apiAdaptor; - this.namespaces = namespaces != null ? namespaces : new ArrayList<>(); - this.omitNamespaces = omitNamespaces != null ? omitNamespaces : new ArrayList<>(); - this.dockerRegistries = dockerRegistries; - this.repository = repository; - this.LOG = LoggerFactory.getLogger(KubernetesV1Credentials.class); - this.configureImagePullSecrets = true; - - configureDockerRegistries(); - } - - private void configureDockerRegistries() { - oldNamespaces = namespaces; - - for (LinkedDockerRegistryConfiguration dockerRegistryConfiguration : dockerRegistries) { - if (dockerRegistryConfiguration.getNamespaces() == null || dockerRegistryConfiguration.getNamespaces().isEmpty()) { - dynamicRegistries.add(dockerRegistryConfiguration.getAccountName()); - } - } - - try { - List knownNamespaces = !namespaces.isEmpty() ? namespaces : apiAdaptor.getNamespacesByName(); - reconfigureRegistries(knownNamespaces); - } catch (Exception e) { - LOG.warn("Could not determine kubernetes namespaces. Will try again later.", e); - } - - } - - public List getDeclaredNamespaces() { - try { - if (namespaces != null && !namespaces.isEmpty()) { - // If namespaces are provided, use them - reconfigureRegistries(namespaces); - return namespaces; - } else { - List addedNamespaces = apiAdaptor.getNamespacesByName(); - addedNamespaces.removeAll(omitNamespaces); - - List resultNamespaces = new ArrayList<>(addedNamespaces); - - // Find the namespaces that were added, and add docker secrets to them. No need to track deleted - // namespaces since they delete their secrets automatically. - addedNamespaces.removeAll(oldNamespaces); - reconfigureRegistries(resultNamespaces); - oldNamespaces = resultNamespaces; - - return resultNamespaces; - } - } catch (Exception e) { - LOG.warn("Could not determine kubernetes namespaces. Will try again later.", e); - return Lists.newArrayList(); - } - } - - private void reconfigureRegistries(List allNamespaces) { - List affectedNamespaces = new ArrayList<>(allNamespaces); - if (!configureImagePullSecrets) { - return; - } - - // only initialize namespaces that haven't been initialized yet. - List initializedNamespaces = new ArrayList<>(imagePullSecrets.keySet()); - affectedNamespaces.removeAll(initializedNamespaces); - - for (int i = 0; i < dockerRegistries.size(); i++) { - LinkedDockerRegistryConfiguration registry = dockerRegistries.get(i); - List registryNamespaces = registry.getNamespaces(); - // If a registry was not initially configured with any namespace, it can deploy to any namespace, otherwise - // restrict the deploy to the registryNamespaces - if (!dynamicRegistries.contains(registry.getAccountName())) { - affectedNamespaces = registryNamespaces; - } else { - registry.setNamespaces(allNamespaces); - } - - if (affectedNamespaces != null && !affectedNamespaces.isEmpty()) { - LOG.debug("Adding secrets for docker registry {} in {}", registry.getAccountName(), affectedNamespaces); - } - - DockerRegistryNamedAccountCredentials account = (DockerRegistryNamedAccountCredentials) repository.getOne(registry.getAccountName()); - - if (account == null) { - LOG.warn("The account " + registry.getAccountName() + " was not yet loaded inside Clouddriver. If you are seeing this message repeatedly, it likely cannot be loaded."); - continue; - } - - for (String namespace : affectedNamespaces) { - Namespace res = apiAdaptor.getNamespace(namespace); - if (res == null) { - NamespaceBuilder namespaceBuilder = new NamespaceBuilder(); - Namespace newNamespace = namespaceBuilder.withNewMetadata().withName(namespace).endMetadata().build(); - apiAdaptor.createNamespace(newNamespace); - } - - SecretBuilder secretBuilder = new SecretBuilder(); - String secretName = registry.getAccountName(); - - secretBuilder = secretBuilder.withNewMetadata().withName(secretName).withNamespace(namespace).endMetadata(); - - HashMap secretData = new HashMap<>(1); - String dockerCfg = String.format("{ \"%s\": { \"auth\": \"%s\", \"email\": \"%s\" } }", - account.getAddress(), - account.getBasicAuth(), - account.getEmail()); - - try { - dockerCfg = new String(Base64.getEncoder().encode(dockerCfg.getBytes("UTF-8")), "UTF-8"); - } catch (UnsupportedEncodingException uee) { - throw new IllegalStateException("Unable to encode docker config ", uee); - } - secretData.put(".dockercfg", dockerCfg); - - secretBuilder = secretBuilder.withData(secretData).withType("kubernetes.io/dockercfg"); - try { - Secret newSecret = secretBuilder.build(); - Secret oldSecret = apiAdaptor.getSecret(namespace, secretName); - if (oldSecret != null) { - if (oldSecret.getData().equals(newSecret.getData())) { - LOG.debug("Skipping creation of duplicate secret " + secretName + " in namespace " + namespace); - } else { - apiAdaptor.editSecret(namespace, secretName).addToData(newSecret.getData()).done(); - } - } else { - apiAdaptor.createSecret(namespace, secretBuilder.build()); - } - } catch (ConstraintViolationException cve) { - throw new IllegalStateException("Unable to build secret: " + cve.getMessage() + - " due to violations " + cve.getConstraintViolations(), - cve); - } - - Set existingSecrets = imagePullSecrets.get(namespace); - existingSecrets = existingSecrets != null ? existingSecrets : new HashSet<>(); - existingSecrets.add(secretName); - imagePullSecrets.put(namespace, existingSecrets); - } - } - } - - public KubernetesApiAdaptor getApiAdaptor() { - return apiAdaptor; - } - - public KubernetesClientApiAdapter getClientApiAdaptor() { - return apiClientAdaptor; - } - - - public List getDockerRegistries() { - return dockerRegistries; - } - - public Map> getImagePullSecrets() { - return imagePullSecrets; - } - - public Boolean isRegisteredNamespace(String namespace) { - return getDeclaredNamespaces().contains(namespace); - } - - public Boolean isRegisteredImagePullSecret(String secret, String namespace) { - Set secrets = imagePullSecrets.get(namespace); - if (secrets == null) { - return false; - } - return secrets.contains(secret); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/README.md b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/README.md deleted file mode 100644 index 1e8232610bf..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# Running the provider... - -For any account, add `providerVersion: v2` as a sibling to `name` and other account-level fields. e.g. - -```yaml -kubernetes: - enabled: true - accounts: - - name: k8s-v2 - context: my_context_in_the_kubeconfig - providerVersion: v2 -``` diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacer.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacer.java deleted file mode 100644 index 55af1ce38e6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacer.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ArrayNode; -import com.jayway.jsonpath.Configuration; -import com.jayway.jsonpath.DocumentContext; -import com.jayway.jsonpath.JsonPath; -import com.jayway.jsonpath.PathNotFoundException; -import com.jayway.jsonpath.spi.json.JacksonJsonNodeJsonProvider; -import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.Getter; -import lombok.NoArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.StringUtils; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.function.Function; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -@Slf4j -public class ArtifactReplacer { - private static final ObjectMapper mapper = new ObjectMapper(); - private static final Configuration configuration = Configuration.builder() - .jsonProvider(new JacksonJsonNodeJsonProvider()) - .mappingProvider(new JacksonMappingProvider()) - .build(); - - List replacers = new ArrayList<>(); - - public ArtifactReplacer addReplacer(Replacer replacer) { - replacers.add(replacer); - return this; - } - - private static List filterKubernetesArtifactsByNamespaceAndAccount(String namespace, String account, List artifacts) { - return artifacts.stream() - // Keep artifacts that either aren't k8s, or are in the same namespace and account as our manifest - .filter(a -> { - String type = a.getType(); - if (StringUtils.isEmpty(type)) { - log.warn("Artifact {} without a type, ignoring", a); - return false; - } - - if (!type.startsWith("kubernetes/")) { - return true; - } - - boolean locationMatches; - String location = a.getLocation(); - if (StringUtils.isEmpty(location)) { - locationMatches = StringUtils.isEmpty(namespace); - } else { - locationMatches = location.equals(namespace); - } - - boolean accountMatches; - String artifactAccount = KubernetesArtifactConverter.getAccount(a); - // If the artifact fails to provide an account, we'll assume this was unintentional and match anyways - accountMatches = StringUtils.isEmpty(artifactAccount) || artifactAccount.equals(account); - - return accountMatches && locationMatches; - }) - .collect(Collectors.toList()); - } - - public ReplaceResult replaceAll(KubernetesManifest input, List artifacts, String namespace, String account) { - log.debug("Doing replacement on {} using {}", input, artifacts); - // final to use in below lambda - final List finalArtifacts = filterKubernetesArtifactsByNamespaceAndAccount(namespace, account, artifacts); - DocumentContext document; - try { - document = JsonPath.using(configuration).parse(mapper.writeValueAsString(input)); - } catch (JsonProcessingException e) { - log.error("Malformed manifest", e); - throw new RuntimeException(e); - } - - Set replacedArtifacts = replacers.stream() - .map(r -> finalArtifacts.stream() - .filter(a -> r.replaceIfPossible(document, a)) - .collect(Collectors.toSet())) - .flatMap(Collection::stream) - .collect(Collectors.toSet()); - - try { - return ReplaceResult.builder() - .manifest(mapper.readValue(document.jsonString(), KubernetesManifest.class)) - .boundArtifacts(replacedArtifacts) - .build(); - } catch (IOException e) { - log.error("Malformed Document Context", e); - throw new RuntimeException(e); - } - } - - public Set findAll(KubernetesManifest input) { - DocumentContext document; - try { - document = JsonPath.using(configuration).parse(mapper.writeValueAsString(input)); - } catch (JsonProcessingException e) { - throw new RuntimeException("Malformed manifest", e); - } - - return replacers.stream() - .map(r -> { - try { - return ((List) mapper.convertValue(r.findAll(document), new TypeReference>() { })) - .stream() - .map(s -> { - String nameFromReference = r.getNameFromReference(s); - String name = nameFromReference == null ? s : nameFromReference; - if (r.namePattern == null || nameFromReference != null) { - return Artifact.builder() - .type(r.getType().toString()) - .reference(s) - .name(name) - .build(); - } else { - return null; - } - } - ).filter(Objects::nonNull); - } catch (Exception e) { - // This happens when a manifest isn't fully defined (e.g. not all properties are there) - log.debug("Failure converting artifacts for {} using {} (skipping)", input.getFullResourceName(), r, e); - return Stream. empty(); - } - } - ).flatMap(x -> x) - .collect(Collectors.toSet()); - } - - @Slf4j - @Builder - @AllArgsConstructor - public static class Replacer { - private final String replacePath; - private final String findPath; - private final Pattern namePattern; // the first group should be the artifact name - private final Function nameFromReference; - - @Getter - private final ArtifactTypes type; - - private static String substituteField(String result, String fieldName, String field) { - field = field == null ? "" : field; - return result.replace("{%" + fieldName + "%}", field); - } - - private static String processPath(String path, Artifact artifact) { - String result = substituteField(path, "name", artifact.getName()); - result = substituteField(result, "type", artifact.getType()); - result = substituteField(result, "version", artifact.getVersion()); - result = substituteField(result, "reference", artifact.getReference()); - return result; - } - - ArrayNode findAll(DocumentContext obj) { - return obj.read(findPath); - } - - String getNameFromReference(String reference) { - if (nameFromReference != null) { - return nameFromReference.apply(reference); - } else if (namePattern != null) { - Matcher m = namePattern.matcher(reference); - if (m.find() && m.groupCount() > 0 && StringUtils.isNotEmpty(m.group(1))) { - return m.group(1); - } else { - return null; - } - } else { - return null; - } - } - - boolean replaceIfPossible(DocumentContext obj, Artifact artifact) { - if (artifact == null || StringUtils.isEmpty(artifact.getType())) { - throw new IllegalArgumentException("Artifact and artifact type must be set."); - } - - if (!artifact.getType().equals(type.toString())) { - return false; - } - - String jsonPath = processPath(replacePath, artifact); - - log.debug("Processed jsonPath == {}", jsonPath); - - Object get; - try { - get = obj.read(jsonPath); - } catch (PathNotFoundException e) { - return false; - } - if (get == null || (get instanceof ArrayNode && ((ArrayNode) get).size() == 0)) { - return false; - } - - log.info("Found valid swap for " + artifact + " using " + jsonPath + ": " + get); - obj.set(jsonPath, artifact.getReference()); - - return true; - } - } - - @Data - @NoArgsConstructor - @AllArgsConstructor - @Builder - public static class ReplaceResult { - private KubernetesManifest manifest; - private Set boundArtifacts = new HashSet<>(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacerFactory.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacerFactory.java deleted file mode 100644 index de992b12032..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacerFactory.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacer.Replacer; - -import java.util.regex.Pattern; - -public class ArtifactReplacerFactory { - // The following was derived from - // https://github.com/docker/distribution/blob/95daa793b83a21656fe6c13e6d5cf1c3999108c7/reference/regexp.go - private final static String DOCKER_NAME_COMPONENT = "[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?"; - private final static String DOCKER_OPTIONAL_TAG = "(?::[\\w][\\w.-]{0,127})?"; - private final static String DOCKER_OPTIONAL_DIGEST = "(?:@[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][0-9A-Fa-f]{32,})?"; - private final static String DOCKER_DOMAIN = "(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?"; - private final static String DOCKER_OPTIONAL_PORT = "(?::[0-9]+)?"; - private final static String DOCKER_OPTIONAL_DOMAIN_AND_PORT = "(?:" + DOCKER_DOMAIN + DOCKER_OPTIONAL_PORT + "/)?"; - private final static String DOCKER_IMAGE_NAME = "(" + DOCKER_OPTIONAL_DOMAIN_AND_PORT + DOCKER_NAME_COMPONENT + "(?:/" + DOCKER_NAME_COMPONENT + ")*)"; - private final static String DOCKER_IMAGE_REFERENCE = DOCKER_IMAGE_NAME + "(" + DOCKER_OPTIONAL_TAG + "|"+ DOCKER_OPTIONAL_DIGEST + ")"; - - // the image reference pattern has two capture groups. - // - the first captures the image name - // - the second captures the image tag (including the leading ":") or digest (including the leading "@"). - public static final Pattern DOCKER_IMAGE_REFERENCE_PATTERN = Pattern.compile("^" + DOCKER_IMAGE_REFERENCE + "$"); - - public static Replacer dockerImageReplacer() { - return Replacer.builder() - .replacePath("$..spec.template.spec['containers', 'initContainers'].[?( @.image == \"{%name%}\" )].image") - .findPath("$..spec.template.spec['containers', 'initContainers'].*.image") - .nameFromReference(ref -> { - int atIndex = ref.indexOf('@'); - // @ can only show up in image references denoting a digest - // https://github.com/docker/distribution/blob/95daa793b83a21656fe6c13e6d5cf1c3999108c7/reference/regexp.go#L70 - if (atIndex >= 0) { - return ref.substring(0, atIndex); - } - - // : can be used to denote a port, part of a digest (already matched) or a tag - // https://github.com/docker/distribution/blob/95daa793b83a21656fe6c13e6d5cf1c3999108c7/reference/regexp.go#L69 - int lastColonIndex = ref.lastIndexOf(':'); - - if (lastColonIndex < 0) { - return ref; - } - - // we don't need to check if this is a tag, or a port. ports will be matched lazily if they are numeric, and are treated as tags first: - // https://github.com/docker/distribution/blob/95daa793b83a21656fe6c13e6d5cf1c3999108c7/reference/regexp.go#L34 - return ref.substring(0, lastColonIndex); - }) - .type(ArtifactTypes.DOCKER_IMAGE) - .build(); - } - - public static Replacer configMapVolumeReplacer() { - return Replacer.builder() - .replacePath("$..spec.template.spec.volumes.[?( @.configMap.name == \"{%name%}\" )].configMap.name") - .findPath("$..spec.template.spec.volumes.*.configMap.name") - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP) - .build(); - } - - public static Replacer secretVolumeReplacer() { - return Replacer.builder() - .replacePath("$..spec.template.spec.volumes.[?( @.secret.secretName == \"{%name%}\" )].secret.secretName") - .findPath("$..spec.template.spec.volumes.*.secret.secretName") - .type(ArtifactTypes.KUBERNETES_SECRET) - .build(); - } - - public static Replacer configMapKeyValueFromReplacer() { - return Replacer.builder() - .replacePath("$..spec.template.spec['containers', 'initContainers'].*.env.[?( @.valueFrom.configMapKeyRef.name == \"{%name%}\" )].valueFrom.configMapKeyRef.name") - .findPath("$..spec.template.spec['containers', 'initContainers'].*.env.*.valueFrom.configMapKeyRef.name") - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP) - .build(); - } - - public static Replacer secretKeyValueFromReplacer() { - return Replacer.builder() - .replacePath("$..spec.template.spec['containers', 'initContainers'].*.env.[?( @.valueFrom.secretKeyRef.name == \"{%name%}\" )].valueFrom.secretKeyRef.name") - .findPath("$..spec.template.spec['containers', 'initContainers'].*.env.*.valueFrom.secretKeyRef.name") - .type(ArtifactTypes.KUBERNETES_SECRET) - .build(); - } - - public static Replacer configMapEnvFromReplacer() { - return Replacer.builder() - .replacePath("$..spec.template.spec['containers', 'initContainers'].*.envFrom.[?( @.configMapRef.name == \"{%name%}\" )].configMapRef.name") - .findPath("$..spec.template.spec['containers', 'initContainers'].*.envFrom.*.configMapRef.name") - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP) - .build(); - } - - public static Replacer secretEnvFromReplacer() { - return Replacer.builder() - .replacePath("$..spec.template.spec['containers', 'initContainers'].*.envFrom.[?( @.secretRef.name == \"{%name%}\" )].secretRef.name") - .findPath("$..spec.template.spec['containers', 'initContainers'].*.envFrom.*.secretRef.name") - .type(ArtifactTypes.KUBERNETES_SECRET) - .build(); - } - - public static Replacer hpaDeploymentReplacer() { - return Replacer.builder() - .replacePath("$[?( (@.spec.scaleTargetRef.kind == \"Deployment\" || @.spec.scaleTargetRef.kind == \"deployment\") && @.spec.scaleTargetRef.name == \"{%name%}\" )].spec.scaleTargetRef.name") - .findPath("$[?( @.spec.scaleTargetRef.kind == \"Deployment\" || @.spec.scaleTargetRef.kind == \"deployment\" )].spec.scaleTargetRef.name") - .type(ArtifactTypes.KUBERNETES_DEPLOYMENT) - .build(); - } - - public static Replacer hpaReplicaSetReplacer() { - return Replacer.builder() - .replacePath("$[?( (@.spec.scaleTargetRef.kind == \"ReplicaSet\" || @.spec.scaleTargetRef.kind == \"replicaSet\") && @.spec.scaleTargetRef.name == \"{%name%}\" )].spec.scaleTargetRef.name") - .findPath("$[?( @.spec.scaleTargetRef.kind == \"ReplicaSet\" || @.spec.scaleTargetRef.kind == \"replicaSet\" )].spec.scaleTargetRef.name") - .type(ArtifactTypes.KUBERNETES_REPLICA_SET) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactTypes.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactTypes.java deleted file mode 100644 index 0339036d63c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactTypes.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact; - -// TODO(lwander): move to clouddriver-artifacts when ready -public class ArtifactTypes { - public static final ArtifactTypes DOCKER_IMAGE = new ArtifactTypes("docker/image"); - public static final ArtifactTypes KUBERNETES_CONFIG_MAP = new ArtifactTypes("kubernetes/configMap"); - public static final ArtifactTypes KUBERNETES_DEPLOYMENT = new ArtifactTypes("kubernetes/deployment"); - public static final ArtifactTypes KUBERNETES_REPLICA_SET = new ArtifactTypes("kubernetes/replicaSet"); - public static final ArtifactTypes KUBERNETES_SECRET = new ArtifactTypes("kubernetes/secret"); - - final private String id; - - public ArtifactTypes(String id) { - this.id = id; - } - - @Override - public String toString() { - return id; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesArtifactConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesArtifactConverter.java deleted file mode 100644 index 1e941de1c75..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesArtifactConverter.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.ArtifactProvider; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; - -import java.util.Map; - -public abstract class KubernetesArtifactConverter { - abstract public Artifact toArtifact(ArtifactProvider artifactProvider, KubernetesManifest manifest, String account); - abstract public KubernetesCoordinates toCoordinates(Artifact artifact); - abstract public String getDeployedName(Artifact artifact); - - protected String getType(KubernetesManifest manifest) { - return String.join("/", - KubernetesCloudProvider.getID(), - manifest.getKind().toString() - ); - } - - protected KubernetesKind getKind(Artifact artifact) { - String[] split = artifact.getType().split("/", -1); - if (split.length != 2) { - throw new IllegalArgumentException("Not a kubernetes artifact: " + artifact); - } - - if (!split[0].equals(KubernetesCloudProvider.getID())) { - throw new IllegalArgumentException("Not a kubernetes artifact: " + artifact); - } - - return KubernetesKind.fromString(split[1]); - } - - protected String getNamespace(Artifact artifact) { - return artifact.getLocation(); - } - - public static String getAccount(Artifact artifact) { - String account = ""; - Map metadata = artifact.getMetadata(); - if (metadata != null) { - account = (String) metadata.getOrDefault("account", ""); - } - - return account; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesUnversionedArtifactConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesUnversionedArtifactConverter.java deleted file mode 100644 index e22ea3ae201..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesUnversionedArtifactConverter.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.ArtifactProvider; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; - -import java.util.HashMap; -import java.util.Map; - -public class KubernetesUnversionedArtifactConverter extends KubernetesArtifactConverter { - @Override - public Artifact toArtifact(ArtifactProvider provider, KubernetesManifest manifest, String account) { - String type = getType(manifest); - String name = manifest.getName(); - String location = manifest.getNamespace(); - Map metadata = new HashMap<>(); - metadata.put("account", account); - return Artifact.builder() - .type(type) - .name(name) - .location(location) - .reference(name) - .metadata(metadata) - .build(); - } - - @Override - public KubernetesCoordinates toCoordinates(Artifact artifact) { - return KubernetesCoordinates.builder() - .kind(getKind(artifact)) - .namespace(getNamespace(artifact)) - .name(artifact.getName()) - .build(); - } - - @Override - public String getDeployedName(Artifact artifact) { - return artifact.getName(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesVersionedArtifactConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesVersionedArtifactConverter.java deleted file mode 100644 index 532704b5af5..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesVersionedArtifactConverter.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.ArtifactProvider; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import lombok.extern.slf4j.Slf4j; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.stream.Collectors; - -@Slf4j -public class KubernetesVersionedArtifactConverter extends KubernetesArtifactConverter { - final private static ObjectMapper objectMapper = new ObjectMapper(); - - @Override - public Artifact toArtifact(ArtifactProvider provider, KubernetesManifest manifest, String account) { - String type = getType(manifest); - String name = manifest.getName(); - String location = manifest.getNamespace(); - String version = getVersion(provider, type, name, location, manifest); - Map metadata = new HashMap<>(); - metadata.put("account", account); - return Artifact.builder() - .type(type) - .name(name) - .location(location) - .version(version) - .reference(getDeployedName(name, version)) - .metadata(metadata) - .build(); - } - - @Override - public KubernetesCoordinates toCoordinates(Artifact artifact) { - return KubernetesCoordinates.builder() - .kind(getKind(artifact)) - .name(getDeployedName(artifact)) - .namespace(getNamespace(artifact)) - .build(); - } - - @Override - public String getDeployedName(Artifact artifact) { - return getDeployedName(artifact.getName(), artifact.getVersion()); - } - - private String getDeployedName(String name, String version) { - return String.join("-", name, version); - } - - private String getVersion(ArtifactProvider provider, String type, String name, String location, KubernetesManifest manifest) { - List priorVersions = provider.getArtifacts(type, name, location); - - Optional maybeVersion = findMatchingVersion(priorVersions, manifest); - if (maybeVersion.isPresent()) { - String version = maybeVersion.get(); - log.info("Manifest {} was already deployed at version {} - reusing.", manifest, version); - return version; - } else { - return findGreatestUnusedVersion(priorVersions); - } - } - - private String findGreatestUnusedVersion(List priorVersions) { - List taken = priorVersions.stream() - .map(Artifact::getVersion) - .filter(Objects::nonNull) - .filter(v -> v.startsWith("v")) - .map(v -> v.substring(1)) - .map(v -> { - try { - return Integer.valueOf(v); - } catch (NumberFormatException e) { - return null; - } - } ) - .filter(Objects::nonNull) - .filter(i -> i >= 0) - .collect(Collectors.toList()); - - taken.sort(Integer::compareTo); - int sequence = 0; - if (!taken.isEmpty()) { - sequence = taken.get(taken.size() - 1) + 1; - } - - // Match vNNN pattern until impossible - if (sequence < 1000) { - return String.format("v%03d", sequence); - } else { - return String.format("v%d", sequence); - } - } - - private Optional findMatchingVersion(List priorVersions, KubernetesManifest manifest) { - return priorVersions.stream() - .filter(a -> getLastAppliedConfiguration(a) - .map(c -> c.nonMetadataEquals(manifest)) - .orElse(false)) - .findFirst() - .map(Artifact::getVersion); - } - - private Optional getLastAppliedConfiguration(Artifact artifact) { - if (artifact.getMetadata() == null) { - return Optional.empty(); - } - - Object rawLastAppliedConfiguration = artifact.getMetadata().get("lastAppliedConfiguration"); - - if (rawLastAppliedConfiguration == null) { - return Optional.empty(); - } - - try { - KubernetesManifest manifest = objectMapper.convertValue(rawLastAppliedConfiguration, KubernetesManifest.class); - return Optional.of(manifest); - } catch (Exception e) { - log.warn("Malformed lastAppliedConfiguration entry in {}: ", artifact, e); - return Optional.empty(); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/Keys.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/Keys.java deleted file mode 100644 index 692963fe527..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/Keys.java +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import lombok.Data; -import lombok.EqualsAndHashCode; -import lombok.extern.slf4j.Slf4j; - -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.Kind.KUBERNETES_METRIC; - -@Slf4j -public class Keys { - /** - * Keys are split into "logical" and "infrastructure" kinds. "logical" keys - * are for spinnaker groupings that exist by naming/moniker convention, whereas - * "infrastructure" keys correspond to real resources (e.g. replica set, service, ...). - */ - public enum Kind { - LOGICAL, - ARTIFACT, - INFRASTRUCTURE, - KUBERNETES_METRIC; - - @Override - public String toString() { - return name().toLowerCase(); - } - - @JsonCreator - public static Kind fromString(String name) { - return Arrays.stream(values()) - .filter(k -> k.toString().equalsIgnoreCase(name)) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("No matching kind with name " + name + " exists")); - } - } - - public enum LogicalKind { - APPLICATIONS, - CLUSTERS; - - public static boolean isLogicalGroup(String group) { - return group.equals(APPLICATIONS.toString()) || group.equals(CLUSTERS.toString()); - } - - @Override - public String toString() { - return name().toLowerCase(); - } - - public String singular() { - String name = toString(); - return name.substring(0, name.length() - 1); - } - - @JsonCreator - public static LogicalKind fromString(String name) { - return Arrays.stream(values()) - .filter(k -> k.toString().equalsIgnoreCase(name)) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("No matching kind with name " + name + " exists")); - } - } - - private static final String provider = "kubernetes.v2"; - - private static String createKey(Object... elems) { - List components = Arrays.stream(elems) - .map(s -> s == null ? "" : s.toString()) - .map(s -> s.replaceAll(":", ";")) - .collect(Collectors.toList()); - components.add(0, provider); - return String.join(":", components); - } - - public static String artifact(String type, String name, String location, String version) { - return createKey(Kind.ARTIFACT, type, name, location, version); - } - - public static String application(String name) { - return createKey(Kind.LOGICAL, LogicalKind.APPLICATIONS, name); - } - - public static String cluster(String account, String application, String name) { - return createKey(Kind.LOGICAL, LogicalKind.CLUSTERS, account, application, name); - } - - public static String infrastructure(KubernetesKind kind, String account, String namespace, String name) { - return createKey(Kind.INFRASTRUCTURE, kind, account, namespace, name); - } - - public static String infrastructure(KubernetesManifest manifest, String account) { - return infrastructure(manifest.getKind(), account, manifest.getNamespace(), manifest.getName()); - } - - public static String metric(KubernetesKind kind, String account, String namespace, String name) { - return createKey(KUBERNETES_METRIC, kind, account, namespace, name); - } - - public static Optional parseKey(String key) { - String[] parts = key.split(":", -1); - - if (parts.length < 3 || !parts[0].equals(provider)) { - return Optional.empty(); - } - - for (String part : parts) { - part.replaceAll(";", ":"); - } - - try { - Kind kind = Kind.fromString(parts[1]); - switch (kind) { - case LOGICAL: - return Optional.of(parseLogicalKey(parts)); - case ARTIFACT: - return Optional.of(new ArtifactCacheKey(parts)); - case INFRASTRUCTURE: - return Optional.of(new InfrastructureCacheKey(parts)); - case KUBERNETES_METRIC: - return Optional.of(new MetricCacheKey(parts)); - default: - throw new IllegalArgumentException("Unknown kind " + kind); - } - } catch (IllegalArgumentException e) { - log.warn("Kubernetes owned kind with unknown key structure '{}': {} (perhaps try flushing all clouddriver:* redis keys)", key, parts, e); - return Optional.empty(); - } - } - - private static CacheKey parseLogicalKey(String[] parts) { - assert(parts.length >= 3); - - LogicalKind logicalKind = LogicalKind.fromString(parts[2]); - - switch (logicalKind) { - case APPLICATIONS: - return new ApplicationCacheKey(parts); - case CLUSTERS: - return new ClusterCacheKey(parts); - default: - throw new IllegalArgumentException("Unknown kind " + logicalKind); - } - } - - @Data - public static abstract class CacheKey { - private Kind kind; - private String provider = KubernetesCloudProvider.getID(); - private String type; - public abstract String getGroup(); - public abstract String getName(); - } - - @EqualsAndHashCode(callSuper = true) - @Data - public static abstract class LogicalKey extends CacheKey { - private Kind kind = Kind.LOGICAL; - public abstract LogicalKind getLogicalKind(); - } - - @EqualsAndHashCode(callSuper = true) - @Data - public static class ArtifactCacheKey extends CacheKey { - private Kind kind = Kind.ARTIFACT; - private String type; - private String name; - private String location; - private String version; - - public ArtifactCacheKey(String[] parts) { - if (parts.length != 6) { - throw new IllegalArgumentException("Malformed artifact key" + Arrays.toString(parts)); - } - - type = parts[2]; - name = parts[3]; - location = parts[4]; - version = parts[5]; - } - - @Override - public String toString() { - return createKey(kind, type, name, version); - } - - @Override - public String getGroup() { - return kind.toString(); - } - } - - @EqualsAndHashCode(callSuper = true) - @Data - public static class ApplicationCacheKey extends LogicalKey { - private LogicalKind logicalKind = LogicalKind.APPLICATIONS; - private String name; - - public ApplicationCacheKey(String[] parts) { - if (parts.length != 4) { - throw new IllegalArgumentException("Malformed application key" + Arrays.toString(parts)); - } - - name = parts[3]; - } - - @Override - public String toString() { - return createKey(getKind(), logicalKind, name); - } - - @Override - public String getGroup() { - return logicalKind.toString(); - } - } - - @EqualsAndHashCode(callSuper = true) - @Data - public static class ClusterCacheKey extends LogicalKey { - private LogicalKind logicalKind = LogicalKind.CLUSTERS; - private String account; - private String application; - private String name; - - public ClusterCacheKey(String[] parts) { - if (parts.length != 6) { - throw new IllegalArgumentException("Malformed cluster key " + Arrays.toString(parts)); - } - - account = parts[3]; - application = parts[4]; - name = parts[5]; - } - - @Override - public String toString() { - return createKey(getKind(), logicalKind, account, name); - } - - @Override - public String getGroup() { - return logicalKind.toString(); - } - } - - @EqualsAndHashCode(callSuper = true) - @Data - public static class InfrastructureCacheKey extends CacheKey { - private Kind kind = Kind.INFRASTRUCTURE; - private KubernetesKind kubernetesKind; - private String account; - private String namespace; - private String name; - - public InfrastructureCacheKey(String[] parts) { - if (parts.length != 6) { - throw new IllegalArgumentException("Malformed infrastructure key " + Arrays.toString(parts)); - } - - kubernetesKind = KubernetesKind.fromString(parts[2]); - account = parts[3]; - namespace = parts[4]; - name = parts[5]; - } - - @Override - public String toString() { - return createKey(kind, kubernetesKind, account, namespace, name); - } - - @Override - public String getGroup() { - return kubernetesKind.toString(); - } - } - - @EqualsAndHashCode(callSuper = true) - @Data - public static class MetricCacheKey extends CacheKey { - private Kind kind = KUBERNETES_METRIC; - private KubernetesKind kubernetesKind; - private String account; - private String namespace; - private String name; - - public MetricCacheKey(String[] parts) { - if (parts.length != 6) { - throw new IllegalArgumentException("Malformed metric key " + Arrays.toString(parts)); - } - - kubernetesKind = KubernetesKind.fromString(parts[2]); - account = parts[3]; - namespace = parts[4]; - name = parts[5]; - } - - @Override - public String toString() { - return createKey(kind, kubernetesKind, account, namespace, name); - } - - @Override - public String getGroup() { - return KUBERNETES_METRIC.toString(); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2Provider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2Provider.java deleted file mode 100644 index fea678c37a0..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2Provider.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching; - -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; -import com.netflix.spinnaker.cats.provider.Provider; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import groovy.util.logging.Slf4j; -import lombok.Data; -import lombok.EqualsAndHashCode; - -import java.util.Collection; -import java.util.Collections; -import java.util.concurrent.ConcurrentHashMap; - -@EqualsAndHashCode(callSuper = true) -@Slf4j -@Data -class KubernetesV2Provider extends AgentSchedulerAware implements Provider { - public static final String PROVIDER_NAME = KubernetesCloudProvider.getID(); - - private Collection agents = emptyAgentCollection(); - - private Collection nextAgentSet = emptyAgentCollection(); - - private static Collection emptyAgentCollection() { - return Collections.newSetFromMap(new ConcurrentHashMap<>()); - } - - public void addAllAgents(Collection agents) { - nextAgentSet.addAll(agents); - } - - public void clearNewAgentSet() { - nextAgentSet.clear(); - } - - public void switchToNewAgents() { - Collection nextAgentSetCopy = emptyAgentCollection(); - nextAgentSetCopy.addAll(nextAgentSet); - agents = nextAgentSetCopy; - clearNewAgentSet(); - } - - @Override - public String getProviderName() { - return PROVIDER_NAME; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2ProviderConfig.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2ProviderConfig.java deleted file mode 100644 index 7ce951bdd54..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2ProviderConfig.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching; - -import com.netflix.spinnaker.cats.agent.Agent; -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper; -import com.netflix.spinnaker.cats.thread.NamedThreadFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgentDispatcher; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; -import com.netflix.spinnaker.clouddriver.security.ProviderUtils; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.DependsOn; -import org.springframework.context.annotation.Scope; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.stream.Collectors; - -@Configuration -@Slf4j -class KubernetesV2ProviderConfig { - @Bean - @DependsOn("kubernetesNamedAccountCredentials") - KubernetesV2Provider kubernetesV2Provider(KubernetesCloudProvider kubernetesCloudProvider, - AccountCredentialsRepository accountCredentialsRepository, - KubernetesV2CachingAgentDispatcher kubernetesV2CachingAgentDispatcher, - KubernetesResourcePropertyRegistry kubernetesResourcePropertyRegistry - ) { - this.kubernetesV2Provider = new KubernetesV2Provider(); - this.accountCredentialsRepository = accountCredentialsRepository; - this.kubernetesV2CachingAgentDispatcher = kubernetesV2CachingAgentDispatcher; - this.kubernetesResourcePropertyRegistry = kubernetesResourcePropertyRegistry; - - ScheduledExecutorService poller = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(KubernetesV2ProviderConfig.class.getSimpleName())); - - synchronizeKubernetesV2Provider(kubernetesV2Provider, accountCredentialsRepository); - - return kubernetesV2Provider; - } - - private KubernetesV2Provider kubernetesV2Provider; - private AccountCredentialsRepository accountCredentialsRepository; - private KubernetesV2CachingAgentDispatcher kubernetesV2CachingAgentDispatcher; - private KubernetesResourcePropertyRegistry kubernetesResourcePropertyRegistry; - - @Bean - KubernetesV2ProviderSynchronizerTypeWrapper kubernetesV2ProviderSynchronizerTypeWrapper() { - return new KubernetesV2ProviderSynchronizerTypeWrapper(); - } - - class KubernetesV2ProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - public Class getSynchronizerType() { - return KubernetesV2ProviderSynchronizer.class; - } - } - - class KubernetesV2ProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - KubernetesV2ProviderSynchronizer synchronizeKubernetesV2Provider( - KubernetesV2Provider kubernetesV2Provider, - AccountCredentialsRepository accountCredentialsRepository - ) { - Set allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, KubernetesNamedAccountCredentials.class, ProviderVersion.v2); - - try { - for (KubernetesNamedAccountCredentials credentials : allAccounts) { - KubernetesV2Credentials v2Credentials = (KubernetesV2Credentials) credentials.getCredentials(); - v2Credentials.getCustomResources().forEach(cr -> { - try { - KubernetesResourceProperties properties = KubernetesResourceProperties.fromCustomResource(cr); - kubernetesResourcePropertyRegistry.registerAccountProperty(credentials.getName(), properties); - } catch (Exception e) { - log.warn("Error encountered registering {}: ", cr, e); - } - }); - - List newlyAddedAgents = kubernetesV2CachingAgentDispatcher.buildAllCachingAgents(credentials) - .stream() - .map(c -> (Agent) c) - .collect(Collectors.toList()); - - log.info("Adding {} agents for account {}", newlyAddedAgents.size(), credentials.getName()); - - kubernetesV2Provider.addAllAgents(newlyAddedAgents); - } - } catch (Exception e) { - log.warn("Error encountered scheduling new agents -- using old agent set instead", e); - kubernetesV2Provider.clearNewAgentSet(); - return new KubernetesV2ProviderSynchronizer(); - } - - // If there is an agent scheduler, then this provider has been through the AgentController in the past. - // In that case, we need to do the scheduling here (because accounts have been added to a running system). - if (kubernetesV2Provider.getAgentScheduler() != null) { - ProviderUtils.rescheduleAgents(kubernetesV2Provider, new ArrayList<>(kubernetesV2Provider.getNextAgentSet())); - } - - kubernetesV2Provider.switchToNewAgents(); - - return new KubernetesV2ProviderSynchronizer(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2SearchProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2SearchProvider.java deleted file mode 100644 index b252ff7faf6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KubernetesV2SearchProvider.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKey; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.search.SearchProvider; -import com.netflix.spinnaker.clouddriver.search.SearchResultSet; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -@Component -@Slf4j -public class KubernetesV2SearchProvider implements SearchProvider { - final private KubernetesCacheUtils cacheUtils; - final private ObjectMapper mapper; - final private KubernetesSpinnakerKindMap kindMap; - final private KubernetesResourcePropertyRegistry registry; - final private List defaultTypes; - final private Set logicalTypes; - final private Set allCaches; - - @Autowired - public KubernetesV2SearchProvider(KubernetesCacheUtils cacheUtils, - KubernetesSpinnakerKindMap kindMap, - ObjectMapper objectMapper, - KubernetesResourcePropertyRegistry registry) { - this.cacheUtils = cacheUtils; - this.mapper = objectMapper; - this.kindMap = kindMap; - this.registry = registry; - - this.defaultTypes = kindMap.allKubernetesKinds() - .stream() - .map(KubernetesKind::toString) - .collect(Collectors.toList()); - this.logicalTypes = Arrays.stream(LogicalKind.values()) - .map(LogicalKind::toString) - .collect(Collectors.toSet()); - - this.allCaches = new HashSet<>(defaultTypes); - this.allCaches.addAll(logicalTypes); - } - - @Override - public String getPlatform() { - return KubernetesCloudProvider.getID(); - } - - @Override - public SearchResultSet search(String query, Integer pageNumber, Integer pageSize) { - return search(query, defaultTypes, pageNumber, pageSize); - } - - @Override - public SearchResultSet search(String query, Integer pageNumber, Integer pageSize, Map filters) { - return search(query, defaultTypes, pageNumber, pageSize, filters); - } - - @Override - public SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize) { - return search(query, types, pageNumber, pageSize, Collections.emptyMap()); - } - - @Override - public SearchResultSet search(String query, List types, Integer pageNumber, Integer pageSize, Map filters) { - log.info("Querying {} for term {}", types, query); - List> results = paginateResults(getMatches(query, types, filters), pageSize, pageNumber); - - return SearchResultSet.builder() - .pageNumber(pageNumber) - .pageSize(pageSize) - .platform(getPlatform()) - .query(query) - .totalMatches(results.size()) - .results(results) - .build(); - } - - private Map convertKeyToMap(String key) { - Optional optional = Keys.parseKey(key); - if (!optional.isPresent()) { - return null; - } - - Keys.CacheKey parsedKey = optional.get(); - Map result; - String type; - - if (parsedKey instanceof Keys.InfrastructureCacheKey) { - Keys.InfrastructureCacheKey infraKey = (Keys.InfrastructureCacheKey) parsedKey; - type = kindMap.translateKubernetesKind(infraKey.getKubernetesKind()).toString(); - - KubernetesResourceProperties properties = registry.get(infraKey.getAccount(), infraKey.getKubernetesKind()); - if (properties == null) { - log.warn("No hydrator for type {}, this is possibly a developer error", infraKey.getKubernetesKind()); - return null; - } - - result = properties.getHandler().hydrateSearchResult(infraKey, cacheUtils); - } else if (parsedKey instanceof Keys.LogicalKey) { - Keys.LogicalKey logicalKey = (Keys.LogicalKey) parsedKey; - - result = mapper.convertValue(parsedKey, new TypeReference>() {}); - result.put(logicalKey.getLogicalKind().singular(), logicalKey.getName()); - type = parsedKey.getGroup(); - } else { - log.warn("Unknown key type " + parsedKey + ", ignoring."); - return null; - } - - result.put("type", type); - return result; - } - - private Map> getKeysRelatedToLogicalMatches(String matchQuery) { - return logicalTypes.stream() - .map(type -> cacheUtils.getAllDataMatchingPattern(type, matchQuery) - .stream() - .map(e -> e.getRelationships() - .values() - .stream() - .flatMap(Collection::stream) - .filter(Objects::nonNull) - .map(k -> new ImmutablePair<>(k, e.getId())) - ).flatMap(x -> x) - ).flatMap(x -> x) - .collect( - Collectors.groupingBy(Pair::getLeft, - Collectors.reducing( - Collections.emptyList(), - i -> Collections.singletonList(i.getRight()), - (a, b) -> { - List res = new ArrayList<>(); - res.addAll(a); - res.addAll(b); - return res; - } - ) - ) - ); - } - - // TODO(lwander): use filters - private List> getMatches(String query, List types, Map filters) { - String matchQuery = String.format("*%s*", query.toLowerCase()); - Set typeSet = new HashSet<>(types); - - // We add k8s versions of Spinnaker types here to ensure that (for example) replica sets are returned when server groups are requested. - typeSet.addAll(types.stream() - .map(t -> { - try { - return KubernetesSpinnakerKindMap.SpinnakerKind.fromString(t); - } catch (IllegalArgumentException e) { - return null; - } - }).filter(Objects::nonNull) - .map(kindMap::translateSpinnakerKind) - .flatMap(Collection::stream) - .map(KubernetesKind::toString) - .collect(Collectors.toSet()) - ); - - // Remove caches that we can't search - typeSet.retainAll(allCaches); - - // Search caches directly - List> results = typeSet.stream() - .map(type -> cacheUtils.getAllKeysMatchingPattern(type, matchQuery)) - .flatMap(Collection::stream) - .map(this::convertKeyToMap) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - - // Search 'logical' caches (clusters, apps) for indirect matches - Map> keyToAllLogicalKeys = getKeysRelatedToLogicalMatches(matchQuery); - results.addAll(keyToAllLogicalKeys.entrySet().stream() - .map(kv -> { - Map result = convertKeyToMap(kv.getKey()); - if (result == null) { - return null; - } - - kv.getValue().stream() - .map(Keys::parseKey) - .filter(Optional::isPresent) - .map(Optional::get) - .filter(LogicalKey.class::isInstance) - .map(k -> (LogicalKey) k) - .forEach(k -> result.put(k.getLogicalKind().singular(), k.getName())); - - return result; - } - ) - .filter(Objects::nonNull) - .collect(Collectors.toList())); - - results = results.stream() - .filter(r -> typeSet.contains(r.get("type")) || typeSet.contains(r.get("group"))) - .collect(Collectors.toList()); - - return results; - } - - private static List paginateResults(List matches, Integer pageSize, Integer pageNumber) { - Integer startingIndex = pageSize * (pageNumber - 1); - Integer endIndex = Math.min(pageSize * pageNumber, matches.size()); - return startingIndex < endIndex ? matches.subList(startingIndex, endIndex) : new ArrayList<>(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/CustomKubernetesCachingAgentFactory.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/CustomKubernetesCachingAgentFactory.java deleted file mode 100644 index e0342a7d6df..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/CustomKubernetesCachingAgentFactory.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.AgentDataType; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; - -public class CustomKubernetesCachingAgentFactory { - public static KubernetesV2OnDemandCachingAgent create( - KubernetesKind kind, - KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry propertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount - ) { - return new Agent( - kind, - namedAccountCredentials, - propertyRegistry, - objectMapper, - registry, - agentIndex, - agentCount - ); - } - - private static class Agent extends KubernetesV2OnDemandCachingAgent { - private final KubernetesKind kind; - - Agent( - KubernetesKind kind, - KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry propertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount - ) { - super(namedAccountCredentials, propertyRegistry, objectMapper, registry, agentIndex, agentCount); - this.kind = kind; - } - - @Override - protected KubernetesKind primaryKind() { - return this.kind; - } - - @Override - final public Collection getProvidedDataTypes() { - return Collections.unmodifiableSet( - new HashSet<>(Collections.singletonList( - AUTHORITATIVE.forType(this.kind.toString()) - )) - ); - } - - @Override - public String getAgentType() { - return String.format("%s/CustomKubernetes(%s)[%d/%d]", accountName, kind, agentIndex + 1, agentCount); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCacheDataConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCacheDataConverter.java deleted file mode 100644 index 6793370538e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCacheDataConverter.java +++ /dev/null @@ -1,453 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.cache.DefaultCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPodMetric; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesCachingProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestAnnotater; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestMetadata; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestSpinnakerRelationships; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.names.KubernetesManifestNamer; -import com.netflix.spinnaker.clouddriver.names.NamerRegistry; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.netflix.spinnaker.moniker.Moniker; -import com.netflix.spinnaker.moniker.Namer; -import io.kubernetes.client.JSON; -import lombok.Builder; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.Kind.ARTIFACT; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKind.APPLICATIONS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKind.CLUSTERS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.NAMESPACE; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.POD; -import static java.lang.Math.toIntExact; - -@Slf4j -public class KubernetesCacheDataConverter { - private static ObjectMapper mapper = new ObjectMapper(); - private static final JSON json = new JSON(); - // TODO(lwander): make configurable - private static final int logicalTtlSeconds = toIntExact(TimeUnit.MINUTES.toSeconds(10)); - private static final int infrastructureTtlSeconds = -1; - - public static CacheData convertAsArtifact(String account, KubernetesManifest manifest) { - KubernetesCachingProperties cachingProperties = KubernetesManifestAnnotater.getCachingProperties(manifest); - if (cachingProperties.isIgnore()) { - return null; - } - - logMalformedManifest(() -> "Converting " + manifest + " to a cached artifact", manifest); - - String namespace = manifest.getNamespace(); - Optional optional = KubernetesManifestAnnotater.getArtifact(manifest); - if (!optional.isPresent()) { - return null; - } - - Artifact artifact = optional.get(); - - try { - KubernetesManifest lastAppliedConfiguration = KubernetesManifestAnnotater.getLastAppliedConfiguration(manifest); - if (artifact.getMetadata() == null) { - artifact.setMetadata(new HashMap<>()); - } - artifact.getMetadata().put("lastAppliedConfiguration", lastAppliedConfiguration); - artifact.getMetadata().put("account", account); - } catch (Exception e) { - log.warn("Unable to get last applied configuration from {}: ", manifest, e); - } - - if (artifact.getType() == null) { - log.debug("No assigned artifact type for resource " + namespace + ":" + manifest.getFullResourceName()); - return null; - } - - Map attributes = new ImmutableMap.Builder() - .put("artifact", artifact) - .put("creationTimestamp", Optional.ofNullable(manifest.getCreationTimestamp()).orElse("")) - .build(); - - Map> cacheRelationships = new HashMap<>(); - - String key = Keys.artifact(artifact.getType(), artifact.getName(), artifact.getLocation(), artifact.getVersion()); - String owner = Keys.infrastructure(manifest, account); - cacheRelationships.put(manifest.getKind().toString(), Collections.singletonList(owner)); - - return new DefaultCacheData(key, logicalTtlSeconds, attributes, cacheRelationships); - } - - public static Collection dedupCacheData(Collection input) { - Map cacheDataById = new HashMap<>(); - for (CacheData cd : input) { - String id = cd.getId(); - if (cacheDataById.containsKey(id)) { - CacheData other = cacheDataById.get(id); - cd = mergeCacheData(cd, other); - } - - cacheDataById.put(id, cd); - } - - return cacheDataById.values(); - } - - public static CacheData mergeCacheData(CacheData current, CacheData added) { - String id = current.getId(); - Map attributes = new HashMap<>(current.getAttributes()); - attributes.putAll(added.getAttributes()); - // Behavior is: if no ttl is set on either, the merged key won't expire - int ttl = Math.min(current.getTtlSeconds(), added.getTtlSeconds()); - Map> relationships = new HashMap<>(current.getRelationships()); - - added.getRelationships() - .entrySet() - .forEach(entry -> relationships.merge(entry.getKey(), entry.getValue(), - (a, b) -> { - Collection res = new HashSet<>(Math.max(a.size(), b.size())); - res.addAll(a); - res.addAll(b); - return res; - })); - - return new DefaultCacheData(id, ttl, attributes, relationships); - } - - public static CacheData convertPodMetric(String account, - String namespace, - KubernetesPodMetric podMetric) { - String podName = podMetric.getPodName(); - Map attributes = new ImmutableMap.Builder() - .put("name", podName) - .put("namespace", namespace) - .put("metrics", podMetric.getContainerMetrics()) - .build(); - - Map> relationships = new ImmutableMap.Builder>() - .put(POD.toString(), Collections.singletonList(Keys.infrastructure(POD, account, namespace, podName))) - .build(); - - String id = Keys.metric(POD, account, namespace, podName); - - return new DefaultCacheData(id, infrastructureTtlSeconds, attributes, relationships); - } - - public static CacheData convertAsResource(String account, - KubernetesManifest manifest, - List resourceRelationships) { - KubernetesCachingProperties cachingProperties = KubernetesManifestAnnotater.getCachingProperties(manifest); - if (cachingProperties.isIgnore()) { - return null; - } - - logMalformedManifest(() -> "Converting " + manifest + " to a cached resource", manifest); - - KubernetesKind kind = manifest.getKind(); - boolean hasClusterRelationship = false; - boolean isNamespaced = true; - if (kind != null) { - hasClusterRelationship = kind.hasClusterRelationship(); - isNamespaced = kind.isNamespaced(); - } - - KubernetesApiVersion apiVersion = manifest.getApiVersion(); - String name = manifest.getName(); - String namespace = manifest.getNamespace(); - Namer namer = account == null - ? new KubernetesManifestNamer() - : NamerRegistry.lookup() - .withProvider(KubernetesCloudProvider.getID()) - .withAccount(account) - .withResource(KubernetesManifest.class); - Moniker moniker = namer.deriveMoniker(manifest); - - Map attributes = new ImmutableMap.Builder() - .put("kind", kind) - .put("apiVersion", apiVersion) - .put("name", name) - .put("namespace", namespace) - .put("fullResourceName", manifest.getFullResourceName()) - .put("manifest", manifest) - .put("moniker", moniker) - .build(); - - KubernetesManifestSpinnakerRelationships relationships = KubernetesManifestAnnotater.getManifestRelationships(manifest); - Optional optional = KubernetesManifestAnnotater.getArtifact(manifest); - KubernetesManifestMetadata metadata = KubernetesManifestMetadata.builder() - .relationships(relationships) - .moniker(moniker) - .artifact(optional) - .build(); - - Map> cacheRelationships = new HashMap<>(); - - String application = moniker.getApp(); - if (StringUtils.isEmpty(application)) { - log.debug("Encountered not-spinnaker-owned resource " + namespace + ":" + manifest.getFullResourceName()); - } else { - cacheRelationships.putAll(annotatedRelationships(account, metadata, hasClusterRelationship)); - } - - // TODO(lwander) avoid overwriting keys here - cacheRelationships.putAll(ownerReferenceRelationships(account, namespace, manifest.getOwnerReferences())); - cacheRelationships.putAll(implicitRelationships(manifest, account, resourceRelationships)); - - String key = Keys.infrastructure(kind, account, namespace, name); - return new DefaultCacheData(key, infrastructureTtlSeconds, attributes, cacheRelationships); - } - - public static List getMetrics(CacheData cacheData) { - return mapper.convertValue(cacheData.getAttributes().get("metrics"), new TypeReference>() { }); - } - - public static KubernetesManifest getManifest(CacheData cacheData) { - return mapper.convertValue(cacheData.getAttributes().get("manifest"), KubernetesManifest.class); - } - - public static Moniker getMoniker(CacheData cacheData) { - return mapper.convertValue(cacheData.getAttributes().get("moniker"), Moniker.class); - } - - public static KubernetesManifest convertToManifest(Object o) { - return mapper.convertValue(o, KubernetesManifest.class); - } - - public static T getResource(KubernetesManifest manifest, Class clazz) { - // A little hacky, but the only way to deserialize any timestamps using string constructors - return json.deserialize(json.serialize(manifest), clazz); - } - - static Map> annotatedRelationships(String account, - KubernetesManifestMetadata metadata, - boolean hasClusterRelationship) { - Moniker moniker = metadata.getMoniker(); - String application = moniker.getApp(); - Optional optional = metadata.getArtifact(); - Map> cacheRelationships = new HashMap<>(); - - if (optional.isPresent()) { - Artifact artifact = optional.get(); - cacheRelationships.put(ARTIFACT.toString(), Collections.singletonList(Keys.artifact(artifact.getType(), artifact.getName(), artifact.getLocation(), artifact.getVersion()))); - } - - if (hasClusterRelationship) { - cacheRelationships.put(APPLICATIONS.toString(), Collections.singletonList(Keys.application(application))); - String cluster = moniker.getCluster(); - if (StringUtils.isNotEmpty(cluster)) { - cacheRelationships.put(CLUSTERS.toString(), Collections.singletonList(Keys.cluster(account, application, cluster))); - } - } - - return cacheRelationships; - } - - static void addSingleRelationship(Map> relationships, String account, String namespace, String fullName) { - Pair triple = KubernetesManifest.fromFullResourceName(fullName); - KubernetesKind kind = triple.getLeft(); - String name = triple.getRight(); - - Collection keys = relationships.get(kind.toString()); - - if (keys == null) { - keys = new ArrayList<>(); - } - - keys.add(Keys.infrastructure(kind, account, namespace, name)); - - relationships.put(kind.toString(), keys); - } - - static Map> implicitRelationships(KubernetesManifest source, String account, List manifests) { - String namespace = source.getNamespace(); - Map> relationships = new HashMap<>(); - manifests = manifests == null ? new ArrayList<>() : manifests; - logMalformedManifests(() -> "Determining implicit relationships for " + source + " in " + account, manifests); - for (KubernetesManifest manifest : manifests) { - KubernetesKind kind = manifest.getKind(); - String name = manifest.getName(); - Collection keys = relationships.get(kind.toString()); - if (keys == null) { - keys = new ArrayList<>(); - } - - keys.add(Keys.infrastructure(kind, account, namespace, name)); - relationships.put(kind.toString(), keys); - } - - return relationships; - } - - static Map> ownerReferenceRelationships(String account, String namespace, List references) { - Map> relationships = new HashMap<>(); - references = references == null ? new ArrayList<>() : references; - for (KubernetesManifest.OwnerReference reference : references) { - KubernetesKind kind = reference.getKind(); - String name = reference.getName(); - Collection keys = relationships.get(kind.toString()); - if (keys == null) { - keys = new ArrayList<>(); - } - - keys.add(Keys.infrastructure(kind, account, namespace, name)); - relationships.put(kind.toString(), keys); - } - - return relationships; - } - - /** - * To ensure the entire relationship graph is bidirectional, invert any relationship entries here to point back at the - * resource being cached (key). - */ - static List invertRelationships(CacheData cacheData) { - String key = cacheData.getId(); - Keys.CacheKey parsedKey = Keys.parseKey(key).orElseThrow(() -> new IllegalStateException("Cache data produced with illegal key format " + key)); - String group = parsedKey.getGroup(); - Map> relationshipGroupings = cacheData.getRelationships(); - List result = new ArrayList<>(); - - for (Collection relationships : relationshipGroupings.values()) { - for (String relationship : relationships) { - invertSingleRelationship(group, key, relationship).flatMap(cd -> { - result.add(cd); - return Optional.empty(); - }); - } - } - - return result; - } - - static void logStratifiedCacheData(String agentType, Map> stratifiedCacheData) { - for (Map.Entry> entry : stratifiedCacheData.entrySet()) { - log.info(agentType + ": grouping " + entry.getKey() + " has " + entry.getValue().size() + " entries and " + relationshipCount(entry.getValue()) + " relationships"); - } - } - - static void logMalformedManifests(Supplier contextMessage, List relationships) { - for (KubernetesManifest relationship : relationships) { - logMalformedManifest(contextMessage, relationship); - } - } - - static void logMalformedManifest(Supplier contextMessage, KubernetesManifest manifest) { - if (manifest == null) { - log.warn("{}: manifest may not be null", contextMessage.get()); - return; - } - - if (manifest.getKind() == null) { - log.warn("{}: manifest kind may not be null, {}", contextMessage.get(), manifest); - } - - if (StringUtils.isEmpty(manifest.getName())) { - log.warn("{}: manifest name may not be null, {}", contextMessage.get(), manifest); - } - - if (StringUtils.isEmpty(manifest.getNamespace()) && manifest.getKind().isNamespaced()) { - log.warn("{}: manifest namespace may not be null, {}", contextMessage.get(), manifest); - } - } - - static int relationshipCount(Collection data) { - return data.stream() - .map(d -> relationshipCount(d)) - .reduce(0, (a, b) -> a + b); - } - - static int relationshipCount(CacheData data) { - return data.getRelationships().values() - .stream() - .map(Collection::size) - .reduce(0, (a, b) -> a + b); - } - - @Builder - private static class CacheDataKeyPair { - Keys.CacheKey key; - CacheData cacheData; - } - - static Map> stratifyCacheDataByGroup(Collection ungroupedCacheData) { - return ungroupedCacheData.stream().map(cd -> CacheDataKeyPair.builder() - .cacheData(cd) - .key(Keys.parseKey(cd.getId()).orElseThrow(() -> new IllegalStateException("Cache data produced with illegal key format " + cd.getId()))) - .build()) - .filter(kp -> { - // given that we now have large caching agents that are authoritative for huge chunks of the cache, - // it's possible that some resources (like events) still point to deleted resources. these won't have - // any attributes, but if we add a cache entry here, the deleted item will still be cached - if (kp.key instanceof Keys.InfrastructureCacheKey) { - return !(kp.cacheData.getAttributes() == null || kp.cacheData.getAttributes().isEmpty()); - } else { - return true; - } - }) - .collect(Collectors.groupingBy(kp -> kp.key.getGroup(), Collectors.mapping(kp -> kp.cacheData, Collectors.toCollection(ArrayList::new)))); - } - - /* - * Worth noting the strange behavior here. If we are inverting a relationship to create a cache data for - * either a cluster or an application we need to insert attributes to ensure the cache data gets entered into - * the cache. If we are caching anything else, we don't want competing agents to overrwrite attributes, so - * we leave them blank. - */ - private static Optional invertSingleRelationship(String group, String key, String relationship) { - Map> relationships = new HashMap<>(); - relationships.put(group, Collections.singletonList(key)); - return Keys.parseKey(relationship).map(k -> { - Map attributes; - int ttl; - if (Keys.LogicalKind.isLogicalGroup(k.getGroup())) { - ttl = logicalTtlSeconds; - attributes = new ImmutableMap.Builder() - .put("name", k.getName()) - .build(); - } else { - ttl = infrastructureTtlSeconds; - attributes = new HashMap<>(); - } - return new DefaultCacheData(relationship, ttl, attributes, relationships); - }); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCoreCachingAgent.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCoreCachingAgent.java deleted file mode 100644 index d27c1e4736d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCoreCachingAgent.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.AgentDataType; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import lombok.extern.slf4j.Slf4j; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; - -@Slf4j -public class KubernetesCoreCachingAgent extends KubernetesV2OnDemandCachingAgent { - KubernetesCoreCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry propertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, propertyRegistry, objectMapper, registry, agentIndex, agentCount); - } - - public Collection getProvidedDataTypes() { - List types = new ArrayList<>(); - types.add(AUTHORITATIVE.forType(Keys.LogicalKind.APPLICATIONS.toString())); - types.add(AUTHORITATIVE.forType(Keys.LogicalKind.CLUSTERS.toString())); - - types.addAll(primaryKinds().stream().map(k -> AUTHORITATIVE.forType(k.toString())).collect(Collectors.toList())); - - return Collections.unmodifiableSet(new HashSet<>(types)); - } - - @Override - protected List primaryKinds() { - synchronized (KubernetesKind.getValues()) { - return KubernetesKind.getValues().stream() - .filter(credentials::isValidKind) - .filter(k -> !k.isDynamic()) - .collect(Collectors.toList()); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesMetricCachingAgent.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesMetricCachingAgent.java deleted file mode 100644 index 97ca9d33639..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesMetricCachingAgent.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.AgentDataType; -import com.netflix.spinnaker.cats.agent.CacheResult; -import com.netflix.spinnaker.cats.agent.DefaultCacheResult; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.Kind.KUBERNETES_METRIC; - -@Slf4j -public class KubernetesMetricCachingAgent extends KubernetesCachingAgent { - @Getter - protected String providerName = KubernetesCloudProvider.getID(); - - @Getter - protected Collection providedDataTypes = Collections.unmodifiableCollection( - Collections.singletonList(AUTHORITATIVE.forType(KUBERNETES_METRIC.toString())) - ); - - protected KubernetesMetricCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount); - } - - @Override - public CacheResult loadData(ProviderCache providerCache) { - log.info(getAgentType() + " is starting"); - reloadNamespaces(); - - List cacheData = namespaces.stream() - .map(n -> credentials.topPod(n).stream() - .map(m -> KubernetesCacheDataConverter.convertPodMetric(accountName, n, m)) - ).flatMap(x -> x) - .collect(Collectors.toList()); - - List invertedRelationships = cacheData.stream() - .map(KubernetesCacheDataConverter::invertRelationships) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - cacheData.addAll(invertedRelationships); - - Map> entries = KubernetesCacheDataConverter.stratifyCacheDataByGroup(KubernetesCacheDataConverter.dedupCacheData(cacheData)); - KubernetesCacheDataConverter.logStratifiedCacheData(getAgentType(), entries); - - return new DefaultCacheResult(entries); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesNamespaceCachingAgent.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesNamespaceCachingAgent.java deleted file mode 100644 index 64b663d11df..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesNamespaceCachingAgent.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.AgentDataType; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; - -@Slf4j -public class KubernetesNamespaceCachingAgent extends KubernetesV2CachingAgent { - KubernetesNamespaceCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry propertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, propertyRegistry, objectMapper, registry, agentIndex, agentCount); - } - - @Override - protected Map> loadPrimaryResourceList() { - reloadNamespaces(); - - // TODO perf: Only load desired namespaces rather than filter all. - Set desired = new HashSet<>(this.namespaces); - return Collections.singletonMap(KubernetesKind.NAMESPACE, credentials.list(KubernetesKind.NAMESPACE, "") - .stream() - .filter(ns -> desired.contains(ns.getName())) - .collect(Collectors.toList())); - } - - @Override - protected KubernetesKind primaryKind() { - return KubernetesKind.NAMESPACE; - } - - @Getter - final private Collection providedDataTypes = Collections.unmodifiableSet( - new HashSet<>(Arrays.asList( - INFORMATIVE.forType(Keys.LogicalKind.APPLICATIONS.toString()), - AUTHORITATIVE.forType(KubernetesKind.NAMESPACE.toString()) - )) - ); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesUnregisteredCustomResourceCachingAgent.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesUnregisteredCustomResourceCachingAgent.java deleted file mode 100644 index f2b3a886b27..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesUnregisteredCustomResourceCachingAgent.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Suppliers; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.AgentDataType; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor.KubectlException; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import lombok.extern.slf4j.Slf4j; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; - -@Slf4j -public class KubernetesUnregisteredCustomResourceCachingAgent extends KubernetesV2OnDemandCachingAgent { - KubernetesUnregisteredCustomResourceCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry propertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, propertyRegistry, objectMapper, registry, agentIndex, agentCount); - - this.liveCrdSupplier = Suppliers.memoizeWithExpiration(() -> { - try { - return credentials.list(KubernetesKind.CUSTOM_RESOURCE_DEFINITION, "") - .stream() - .map(c -> { - Map spec = (Map) c.getOrDefault("spec", new HashMap<>()); - String scope = (String) spec.getOrDefault("scope", ""); - Map names = (Map) spec.getOrDefault("names", new HashMap<>()); - String name = names.get("kind"); - - return KubernetesKind.fromString(name, false, scope.equalsIgnoreCase("namespaced")); - }) - .collect(Collectors.toList()); - } catch (KubectlException e) { - // not logging here -- it will generate a lot of noise in cases where crds aren't available/registered in the first place - return new ArrayList<>(); - } - }, crdExpirySeconds, TimeUnit.SECONDS); - } - - // TODO(lwander) make configurable - private final static int crdExpirySeconds = 30; - - private final com.google.common.base.Supplier> liveCrdSupplier; - - public Collection getProvidedDataTypes() { - return Collections.unmodifiableSet( - primaryKinds().stream().map(k -> AUTHORITATIVE.forType(k.toString())).collect(Collectors.toSet()) - ); - } - - @Override - protected List primaryKinds() { - return liveCrdSupplier.get(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2CachingAgent.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2CachingAgent.java deleted file mode 100644 index 9bf67f60617..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2CachingAgent.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.CacheResult; -import com.netflix.spinnaker.cats.agent.DefaultCacheResult; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesCachingPolicy; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.RegistryUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor.KubectlException; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.NotImplementedException; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; - -@Slf4j -public abstract class KubernetesV2CachingAgent extends KubernetesCachingAgent { - protected KubectlJobExecutor jobExecutor; - - @Getter - protected String providerName = KubernetesCloudProvider.getID(); - - private final KubernetesResourcePropertyRegistry propertyRegistry; - - protected KubernetesV2CachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry propertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, objectMapper, registry, agentIndex, agentCount); - this.propertyRegistry = propertyRegistry; - } - - protected KubernetesKind primaryKind() { - throw new NotImplementedException("No primary kind registered, this is an implementation error."); - } - - protected List primaryKinds() { - return Collections.singletonList(primaryKind()); - } - - protected Map> loadPrimaryResourceList() { - Map> result = namespaces.stream() - .map(n -> { - try { - return credentials.list(primaryKinds(), n); - } catch (KubectlException e) { - log.warn("Failed to read kind {} from namespace {}: {}", primaryKinds(), n, e.getMessage()); - return null; - } - }) - .filter(Objects::nonNull) - .flatMap(Collection::stream) - .collect(Collectors.groupingBy(KubernetesManifest::getKind)); - - for (KubernetesCachingPolicy policy : credentials.getCachingPolicies()) { - KubernetesKind policyKind = KubernetesKind.fromString(policy.getKubernetesKind()); - if (!result.containsKey(policyKind)) { - continue; - } - - List entries = result.get(policyKind); - if (entries == null) { - continue; - } - - if (entries.size() > policy.getMaxEntriesPerAgent()) { - log.warn("{}: Pruning {} entries from kind {}", getAgentType(), entries.size() - policy.getMaxEntriesPerAgent(), policyKind); - entries = entries.subList(0, policy.getMaxEntriesPerAgent()); - result.put(policyKind, entries); - } - } - - return result; - } - - protected KubernetesManifest loadPrimaryResource(KubernetesKind kind, String namespace, String name) { - return credentials.get(kind, namespace, name); - } - - @Override - public CacheResult loadData(ProviderCache providerCache) { - log.info(getAgentType() + " is starting"); - reloadNamespaces(); - - try { - return buildCacheResult(loadPrimaryResourceList()); - } catch (KubectlJobExecutor.NoResourceTypeException e) { - log.warn(getAgentType() + ": resource for this caching agent is not supported for this cluster"); - return new DefaultCacheResult(new HashMap<>()); - } - } - - protected CacheResult buildCacheResult(KubernetesManifest resource) { - return buildCacheResult(Collections.singletonMap(resource.getKind(), Collections.singletonList(resource))); - } - - protected CacheResult buildCacheResult(Map> resources) { - Map> relationships = loadSecondaryResourceRelationships(resources); - - List resourceData = resources.values() - .stream() - .flatMap(Collection::stream) - .peek(m -> RegistryUtils.removeSensitiveKeys(propertyRegistry, accountName, m)) - .map(rs -> { - try { - return KubernetesCacheDataConverter.convertAsResource(accountName, rs, relationships.get(rs)); - } catch (Exception e) { - log.warn("Failure converting {} as resource", rs, e); - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - - List invertedRelationships = resourceData.stream() - .map(KubernetesCacheDataConverter::invertRelationships) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - resourceData.addAll(resources.values() - .stream() - .flatMap(Collection::stream) - .map(rs -> KubernetesCacheDataConverter.convertAsArtifact(accountName, rs)) - .filter(Objects::nonNull) - .collect(Collectors.toList())); - - resourceData.addAll(invertedRelationships); - - Map> entries = KubernetesCacheDataConverter.stratifyCacheDataByGroup(KubernetesCacheDataConverter.dedupCacheData(resourceData)); - KubernetesCacheDataConverter.logStratifiedCacheData(getAgentType(), entries); - - return new DefaultCacheResult(entries); - } - - protected Map> loadSecondaryResourceRelationships(Map> allResources) { - Map> result = new HashMap<>(); - allResources.keySet().forEach(k -> { - try { - RegistryUtils.addRelationships(propertyRegistry, accountName, k, allResources, result); - } catch (Exception e) { - log.warn("Failure adding relationships for {}", k, e); - } - }); - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2CachingAgentDispatcher.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2CachingAgentDispatcher.java deleted file mode 100644 index 458f95b070a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2CachingAgentDispatcher.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesCachingAgentDispatcher; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.NONE; - -@Component -@Slf4j -public class KubernetesV2CachingAgentDispatcher implements KubernetesCachingAgentDispatcher { - @Autowired - private ObjectMapper objectMapper; - - @Autowired - private Registry registry; - - @Autowired - private KubernetesResourcePropertyRegistry propertyRegistry; - - @Override - public Collection buildAllCachingAgents(KubernetesNamedAccountCredentials credentials) { - KubernetesV2Credentials v2Credentials = (KubernetesV2Credentials) credentials.getCredentials(); - List result = new ArrayList<>(); - IntStream.range(0, credentials.getCacheThreads()) - .boxed() - .forEach(i -> propertyRegistry.values() - .stream() - .map(KubernetesResourceProperties::getHandler) - .filter(Objects::nonNull) - .filter(h -> v2Credentials.isValidKind(h.kind()) || h.kind() == NONE) - .map(h -> h.buildCachingAgent(credentials, propertyRegistry, objectMapper, registry, i, credentials.getCacheThreads())) - .filter(Objects::nonNull) - .forEach(c -> result.add((KubernetesCachingAgent) c)) - ); - - if (v2Credentials.isMetrics()) { - IntStream.range(0, credentials.getCacheThreads()) - .boxed() - .forEach(i -> result.add(new KubernetesMetricCachingAgent(credentials, objectMapper, registry, i, credentials.getCacheThreads()))); - } - - return result.stream() - .collect(Collectors.toMap(KubernetesCachingAgent::getAgentType, c -> c, (a, b) -> b)) - .values(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2OnDemandCachingAgent.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2OnDemandCachingAgent.java deleted file mode 100644 index 7a754fa28ce..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesV2OnDemandCachingAgent.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.agent.CacheResult; -import com.netflix.spinnaker.cats.agent.DefaultCacheResult; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.cache.DefaultCacheData; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.names.NamerRegistry; -import com.netflix.spinnaker.moniker.Namer; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.Pair; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandType.Manifest; - -@Slf4j -public abstract class KubernetesV2OnDemandCachingAgent extends KubernetesV2CachingAgent implements OnDemandAgent { - @Getter - protected final OnDemandMetricsSupport metricsSupport; - - protected final static String ON_DEMAND_TYPE = "onDemand"; - private final static String CACHE_TIME_KEY = "cacheTime"; - private final static String PROCESSED_COUNT_KEY = "processedCount"; - private final static String PROCESSED_TIME_KEY = "processedTime"; - private final static String CACHE_RESULTS_KEY = "cacheResults"; - private final static String MONIKER_KEY = "moniker"; - private final static String DETAILS_KEY = "details"; - private final Namer namer; - - protected KubernetesV2OnDemandCachingAgent(KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry resourcePropertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount) { - super(namedAccountCredentials, resourcePropertyRegistry, objectMapper, registry, agentIndex, agentCount); - namer = NamerRegistry.lookup() - .withProvider(KubernetesCloudProvider.getID()) - .withAccount(namedAccountCredentials.getName()) - .withResource(KubernetesManifest.class); - - metricsSupport = new OnDemandMetricsSupport(registry, this, KubernetesCloudProvider.getID() + ":" + Manifest); - } - - @Override - public CacheResult loadData(ProviderCache providerCache) { - log.info(getAgentType() + " is starting"); - reloadNamespaces(); - - Long start = System.currentTimeMillis(); - Map> primaryResource; - try { - primaryResource = loadPrimaryResourceList(); - } catch (KubectlJobExecutor.NoResourceTypeException e) { - log.warn(getAgentType() + ": resource for this caching agent is not supported for this cluster"); - return new DefaultCacheResult(new HashMap<>()); - } - - List primaryKeys = primaryResource.values() - .stream() - .flatMap(Collection::stream) - .map(rs -> objectMapper.convertValue(rs, KubernetesManifest.class)) - .map(mf -> Keys.infrastructure(mf, accountName)) - .collect(Collectors.toList()); - - List keepInOnDemand = new ArrayList<>(); - List evictFromOnDemand = new ArrayList<>(); - - Collection existingKeys = providerCache.existingIdentifiers(ON_DEMAND_TYPE, primaryKeys); - - providerCache.getAll(ON_DEMAND_TYPE, existingKeys).forEach(cd -> { - // can't be a ternary op due to restrictions on non-statement expressions in lambdas - if (shouldKeepInOnDemand(start, cd)) { - keepInOnDemand.add(cd); - } else { - evictFromOnDemand.add(cd); - } - processOnDemandEntry(cd); - }); - - // sort by increasing cache time to ensure newest entries are first - keepInOnDemand.sort(Comparator.comparing(a -> ((Long) a.getAttributes().get(CACHE_TIME_KEY)))); - - // first build the cache result, then decide which entries to overwrite with on demand data - CacheResult result = buildCacheResult(primaryResource); - Map> cacheResults = result.getCacheResults(); - - for (CacheData onDemandData : keepInOnDemand) { - if (!shouldOverwriteUsingOnDemand(start, onDemandData)) { - continue; - } - - String onDemandKey = onDemandData.getId(); - log.info("{}: On demand entry '{}' is overwriting load data entry", getAgentType(), onDemandKey); - - String onDemandResultsJson = (String) onDemandData.getAttributes().get(CACHE_RESULTS_KEY); - - log.debug("{}: On demand entry contents overwriting load data entry: {}", getAgentType(), onDemandResultsJson); - Map> onDemandResults; - try { - onDemandResults = objectMapper.readValue(onDemandResultsJson, new TypeReference>>() { }); - } catch (IOException e) { - log.error("Failure parsing stored on demand data for '{}'", onDemandKey, e); - continue; - } - - mergeCacheResults(cacheResults, onDemandResults); - } - - cacheResults.put(ON_DEMAND_TYPE, keepInOnDemand); - Map> evictionResults = new ImmutableMap.Builder>() - .put(ON_DEMAND_TYPE, evictFromOnDemand.stream().map(CacheData::getId).collect(Collectors.toList())) - .build(); - - return new DefaultCacheResult(cacheResults, evictionResults); - } - - protected void mergeCacheResults(Map> current, Map> added) { - for (String group : added.keySet()) { - Collection currentByGroup = current.get(group); - Collection addedByGroup = added.get(group); - - currentByGroup = currentByGroup == null ? new ArrayList<>() : currentByGroup; - addedByGroup = addedByGroup == null ? new ArrayList<>() : addedByGroup; - - for (CacheData addedCacheData : addedByGroup) { - CacheData mergedEntry = currentByGroup.stream() - .filter(cd -> cd.getId().equals(addedCacheData.getId())) - .findFirst() - .flatMap(cd -> Optional.of(KubernetesCacheDataConverter.mergeCacheData(cd, addedCacheData))) - .orElse(addedCacheData); - - currentByGroup.removeIf(cd -> cd.getId().equals(addedCacheData.getId())); - currentByGroup.add(mergedEntry); - } - - current.put(group, currentByGroup); - } - } - - private boolean shouldOverwriteUsingOnDemand(Long startTime, CacheData onDemandEntry) { - Map attributes = onDemandEntry.getAttributes(); - Long cacheTime = (Long) attributes.get(CACHE_TIME_KEY); - - return cacheTime != null && cacheTime >= startTime; - } - - private void processOnDemandEntry(CacheData onDemandEntry) { - Map attributes = onDemandEntry.getAttributes(); - Integer processedCount = (Integer) attributes.get(PROCESSED_COUNT_KEY); - Long processedTime = System.currentTimeMillis(); - - processedCount = processedCount == null ? 0 : processedCount; - processedCount += 1; - - attributes.put(PROCESSED_TIME_KEY, processedTime); - attributes.put(PROCESSED_COUNT_KEY, processedCount); - } - - private boolean shouldKeepInOnDemand(Long lastFullRefresh, CacheData onDemandEntry) { - Map attributes = onDemandEntry.getAttributes(); - Long cacheTime = (Long) attributes.get(CACHE_TIME_KEY); - Integer processedCount = (Integer) attributes.get(PROCESSED_COUNT_KEY); - - cacheTime = cacheTime == null ? 0L : cacheTime; - processedCount = processedCount == null ? 0 : processedCount; - - return cacheTime >= lastFullRefresh || processedCount == 0; - } - - private OnDemandAgent.OnDemandResult evictEntry(ProviderCache providerCache, KubernetesKind kind, String key) { - Map> evictions = new HashMap<>(); - CacheResult cacheResult = new DefaultCacheResult(new HashMap<>()); - - log.info("Evicting on demand '{}'", key); - providerCache.evictDeletedItems(ON_DEMAND_TYPE, Collections.singletonList(key)); - evictions.put(kind.toString(), Collections.singletonList(key)); - - return new OnDemandAgent.OnDemandResult(getOnDemandAgentType(), cacheResult, evictions); - } - - private OnDemandAgent.OnDemandResult addEntry(ProviderCache providerCache, String key, KubernetesManifest manifest) throws JsonProcessingException { - Map> evictions = new HashMap<>(); - CacheResult cacheResult; - - log.info("{}: Storing on demand '{}'", getAgentType(), key); - cacheResult = buildCacheResult(manifest); - String jsonResult = objectMapper.writeValueAsString(cacheResult.getCacheResults()); - log.debug("{}: On demand entry being written: {}", getAgentType(), jsonResult); - - Map attributes = new ImmutableMap.Builder() - .put(CACHE_TIME_KEY, System.currentTimeMillis()) - .put(CACHE_RESULTS_KEY, jsonResult) - .put(PROCESSED_COUNT_KEY, 0) - .put(PROCESSED_TIME_KEY, -1) - .put(MONIKER_KEY, namer.deriveMoniker(manifest)) - .build(); - - Map> relationships = new HashMap<>(); - CacheData onDemandData = new DefaultCacheData(key, attributes, relationships); - providerCache.putCacheData(ON_DEMAND_TYPE, onDemandData); - - return new OnDemandAgent.OnDemandResult(getOnDemandAgentType(), cacheResult, evictions); - } - - @Override - public OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - String account = (String) data.get("account"); - String namespace = (String) data.get("location"); - String fullName = (String) data.get("name"); - String name; - KubernetesKind kind; - - // todo(lwander): this can be removed - log.debug("Queried for on demand cache refresh of '{}'", data); - - try { - Pair parsedName = KubernetesManifest.fromFullResourceName(fullName); - kind = parsedName.getLeft(); - if (!primaryKinds().contains(kind)) { - return null; - } - - name = parsedName.getRight(); - } catch (Exception e) { - // This is OK - the cache controller tries (w/o much info) to get every cache agent to handle each request - return null; - } - - reloadNamespaces(); - if ((StringUtils.isEmpty(account) || !account.equals(accountName)) - || StringUtils.isEmpty(name) - || (!StringUtils.isEmpty(namespace) && !namespaces.contains(namespace))) { - return null; - } - - log.info("Accepted on demand refresh of '{}'", data); - OnDemandAgent.OnDemandResult result; - KubernetesManifest manifest = loadPrimaryResource(kind, namespace, name); - String resourceKey = Keys.infrastructure(kind, account, namespace, name); - try { - result = manifest == null ? evictEntry(providerCache, kind, resourceKey) : addEntry(providerCache, resourceKey, manifest); - } catch (Exception e) { - log.error("Failed to process update of '{}'", resourceKey, e); - return null; - } - - log.info("On demand cache refresh of (data: {}) succeeded", data); - return result; - } - - @Override - public String getOnDemandAgentType() { - return getAgentType() + "-OnDemand"; - } - - @Override - public boolean handles(OnDemandType type, String cloudProvider) { - return type == Manifest && cloudProvider.equals(KubernetesCloudProvider.getID()); - } - - @Override - public Collection pendingOnDemandRequests(ProviderCache providerCache) { - Collection keys = providerCache.getIdentifiers(ON_DEMAND_TYPE); - List infraKeys = keys.stream() - .map(Keys::parseKey) - .flatMap(o -> o.map(Stream::of).orElseGet(Stream::empty)) - .filter(k -> k instanceof Keys.InfrastructureCacheKey) - .map(i -> (Keys.InfrastructureCacheKey) i) - .collect(Collectors.toList()); - - List matchingKeys = infraKeys.stream() - .filter(i -> i.getAccount().equals(getAccountName()) - && (StringUtils.isEmpty(i.getNamespace())) || namespaces.contains(i.getNamespace()) - && primaryKinds().contains(i.getKubernetesKind())) - .map(Keys.InfrastructureCacheKey::toString) - .collect(Collectors.toList()); - - return providerCache.getAll(ON_DEMAND_TYPE, matchingKeys).stream() - .map(cd -> { - Keys.InfrastructureCacheKey parsedKey = (Keys.InfrastructureCacheKey) Keys.parseKey(cd.getId()).get(); - Map details = mapKeyToOnDemandResult(parsedKey); - Map attributes = cd.getAttributes(); - return new ImmutableMap.Builder() - .put(DETAILS_KEY, details) - .put(MONIKER_KEY, attributes.get(MONIKER_KEY)) - .put(CACHE_TIME_KEY, attributes.get(CACHE_TIME_KEY)) - .put(PROCESSED_COUNT_KEY, attributes.get(PROCESSED_COUNT_KEY)) - .put(PROCESSED_TIME_KEY, attributes.get(PROCESSED_TIME_KEY)) - .build(); - }) - .collect(Collectors.toList()); - } - - private Map mapKeyToOnDemandResult(Keys.InfrastructureCacheKey key) { - return new ImmutableMap.Builder() - .put("name", KubernetesManifest.getFullResourceName( - key.getKubernetesKind(), - key.getName() - )) - .put("account", key.getAccount()) - .put("location", key.getNamespace()) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Application.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Application.java deleted file mode 100644 index edc8177a16f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Application.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.google.common.collect.ImmutableMap; -import com.netflix.spinnaker.clouddriver.model.Application; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class KubernetesV2Application implements Application { - private String name; - private Map> clusterNames = new HashMap<>(); - - public Map getAttributes() { - return new ImmutableMap.Builder() - .put("name", name) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Cluster.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Cluster.java deleted file mode 100644 index 8f8d9e5ff8c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Cluster.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.model.Cluster; -import com.netflix.spinnaker.clouddriver.model.LoadBalancer; -import com.netflix.spinnaker.clouddriver.model.ServerGroup; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.Data; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -@Data -public class KubernetesV2Cluster implements Cluster { - String name; - Moniker moniker; - String type = KubernetesCloudProvider.getID(); - String accountName; - Set serverGroups = new HashSet<>(); - Set loadBalancers = new HashSet<>(); - - public KubernetesV2Cluster(String rawKey) { - Keys.ClusterCacheKey key = (Keys.ClusterCacheKey) Keys.parseKey(rawKey).get(); - this.name = key.getName(); - this.accountName = key.getAccount(); - this.moniker = Moniker.builder().cluster(name).build(); // TODO(lwander) if it turns out that cluster -> app is important, enforce constraints here. - } - - public KubernetesV2Cluster(String rawKey, List serverGroups, List loadBalancers) { - this(rawKey); - this.serverGroups = serverGroups.stream() - .map(sg -> (ServerGroup) sg) - .collect(Collectors.toSet()); - - this.loadBalancers = loadBalancers.stream() - .map(sg -> (LoadBalancer) sg) - .collect(Collectors.toSet()); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Health.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Health.java deleted file mode 100644 index 01ce4e41f28..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Health.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.google.common.collect.ImmutableMap; -import com.netflix.spinnaker.clouddriver.model.Health; -import com.netflix.spinnaker.clouddriver.model.HealthState; -import io.kubernetes.client.models.V1ContainerStatus; -import io.kubernetes.client.models.V1Pod; -import io.kubernetes.client.models.V1PodStatus; -import lombok.Data; - -import java.util.Map; - -@Data -// TODO(lwander): match spec described here https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/ -public class KubernetesV2Health implements Health { - private final HealthState state; - private final String source; - private final String type; - private final String healthClass = "platform"; - - public KubernetesV2Health(V1PodStatus status) { - String phase = status.getPhase(); - this.source = "Pod"; - this.type = "kubernetes/pod"; - - if (phase.equalsIgnoreCase("pending")) { - state = HealthState.Down; - } else if (phase.equalsIgnoreCase("running")) { - state = HealthState.Up; - } else { - state = HealthState.Unknown; - } - } - - public KubernetesV2Health(V1ContainerStatus status) { - this.source = "Container " + status.getName(); - this.type = "kuberentes/container"; - - if (!status.isReady()) { - state = HealthState.Down; - } else { - state = HealthState.Up; - } - } - - public Map toMap() { - return new ImmutableMap.Builder() - .put("state", state.toString()) - .put("source", source) - .put("type", type) - .put(healthClass, healthClass) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Instance.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Instance.java deleted file mode 100644 index c550ddc5d80..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Instance.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.HealthState; -import com.netflix.spinnaker.clouddriver.model.Instance; -import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; -import io.kubernetes.client.models.V1Pod; -import io.kubernetes.client.models.V1PodStatus; -import lombok.Data; -import lombok.EqualsAndHashCode; -import lombok.NoArgsConstructor; -import lombok.extern.slf4j.Slf4j; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -@EqualsAndHashCode(callSuper = true) -@Data -@NoArgsConstructor -@Slf4j -public class KubernetesV2Instance extends ManifestBasedModel implements Instance { - Long launchTime; - List> health = new ArrayList<>(); - KubernetesManifest manifest; - Keys.InfrastructureCacheKey key; - - private KubernetesV2Instance(KubernetesManifest manifest, String key) { - this.manifest = manifest; - this.key = (Keys.InfrastructureCacheKey) Keys.parseKey(key).get(); - - V1Pod pod = KubernetesCacheDataConverter.getResource(this.manifest, V1Pod.class); - V1PodStatus status = pod.getStatus(); - if (status != null) { - health.add(new KubernetesV2Health(status).toMap()); - if (status.getContainerStatuses() != null) { - health.addAll(status - .getContainerStatuses() - .stream() - .map(KubernetesV2Health::new) - .map(KubernetesV2Health::toMap) - .collect(Collectors.toList())); - } - } - } - - public static KubernetesV2Instance fromCacheData(CacheData cd) { - if (cd == null) { - return null; - } - - KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); - - if (manifest == null) { - log.warn("Cache data {} inserted without a manifest", cd.getId()); - return null; - } - - return new KubernetesV2Instance(manifest, cd.getId()); - } - - public LoadBalancerInstance toLoadBalancerInstance() { - return LoadBalancerInstance.builder() - .health(health.stream().reduce(new HashMap<>(), (a, b) -> { - Map result = new HashMap<>(); - result.putAll(a); - result.putAll(b); - return result; - })) - .id(getName()) - .zone(getZone()) - .name(getHumanReadableName()) - .build(); - } - - public HealthState getHealthState() { - return KubernetesModelUtil.getHealthState(health); - } - - // An implementor of the Instance interface is implicitly expected to return a globally-unique ID - // as its name because InstanceViewModel serializes it as such for API responses and Deck then - // relies on it to disambiguate between instances. - public String getName() { - return super.getUid(); - } - - public String getHumanReadableName() { - return super.getName(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2LoadBalancer.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2LoadBalancer.java deleted file mode 100644 index 2edeeed7fba..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2LoadBalancer.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.LoadBalancer; -import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; -import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; -import lombok.Data; -import lombok.EqualsAndHashCode; -import lombok.extern.slf4j.Slf4j; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -@EqualsAndHashCode(callSuper = true) -@Data -@Slf4j -public class KubernetesV2LoadBalancer extends ManifestBasedModel implements LoadBalancer, LoadBalancerProvider.Details { - Set serverGroups = new HashSet<>(); - KubernetesManifest manifest; - Keys.InfrastructureCacheKey key; - - private KubernetesV2LoadBalancer(KubernetesManifest manifest, String key, Set serverGroups) { - this.manifest = manifest; - this.key = (Keys.InfrastructureCacheKey) Keys.parseKey(key).get(); - this.serverGroups = serverGroups; - } - - public static KubernetesV2LoadBalancer fromCacheData(CacheData cd, List serverGroupData, Map> serverGroupToInstanceData) { - if (cd == null) { - return null; - } - - KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); - - if (manifest == null) { - log.warn("Cache data {} inserted without a manifest", cd.getId()); - return null; - } - - Set serverGroups = serverGroupData.stream() - // ignoring load balancers here since they are discarded by ::toLoadBalancerServerGroup - .map(d -> KubernetesV2ServerGroup.fromCacheData( - KubernetesV2ServerGroupCacheData.builder() - .serverGroupData(d) - .instanceData(serverGroupToInstanceData.get(d.getId())) - .loadBalancerData(new ArrayList<>()) - .build())) - .filter(Objects::nonNull) - .map(KubernetesV2ServerGroup::toLoadBalancerServerGroup) - .collect(Collectors.toSet()); - - return new KubernetesV2LoadBalancer(manifest, cd.getId(), serverGroups); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Manifest.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Manifest.java deleted file mode 100644 index fbc9a74de3c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2Manifest.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class KubernetesV2Manifest implements Manifest { - private String account; - private String location; - private Moniker moniker; - private KubernetesManifest manifest; - private Status status; - private Set artifacts = new HashSet<>(); - private List events = new ArrayList<>(); - private List warnings = new ArrayList<>(); - private List metrics = new ArrayList<>(); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2SecurityGroup.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2SecurityGroup.java deleted file mode 100644 index b89af95c76d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2SecurityGroup.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.SecurityGroup; -import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; -import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; -import io.kubernetes.client.models.V1NetworkPolicy; -import io.kubernetes.client.models.V1NetworkPolicyPort; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.EqualsAndHashCode; -import lombok.NoArgsConstructor; -import lombok.extern.slf4j.Slf4j; - -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion.NETWORKING_K8S_IO_V1; - -@EqualsAndHashCode(callSuper = true) -@Data -@Slf4j -public class KubernetesV2SecurityGroup extends ManifestBasedModel implements SecurityGroup { - private KubernetesManifest manifest; - private Keys.InfrastructureCacheKey key; - private String id; - - private Set inboundRules; - private Set outboundRules; - - @Override - public String getApplication() { - return getMoniker().getApp(); - } - - @Override - public SecurityGroupSummary getSummary() { - return KubernetesV2SecurityGroupSummary.builder() - .id(id) - .name(id) - .build(); - } - - KubernetesV2SecurityGroup(KubernetesManifest manifest, String key, Set inboundRules, Set outboundRules) { - this.manifest = manifest; - this.id = manifest.getFullResourceName(); - this.key = (Keys.InfrastructureCacheKey) Keys.parseKey(key).get(); - this.inboundRules = inboundRules; - this.outboundRules = outboundRules; - } - - public static KubernetesV2SecurityGroup fromCacheData(CacheData cd) { - if (cd == null) { - return null; - } - - KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); - - if (manifest == null) { - log.warn("Cache data {} inserted without a manifest", cd.getId()); - return null; - } - - Set inboundRules = new HashSet<>(); - Set outboundRules = new HashSet<>(); - - if (manifest.getKind() != KubernetesKind.NETWORK_POLICY) { - log.warn("Unknown security group kind " + manifest.getKind()); - } else { - if (manifest.getApiVersion().equals(NETWORKING_K8S_IO_V1)) { - V1NetworkPolicy v1beta1NetworkPolicy = KubernetesCacheDataConverter.getResource(manifest, V1NetworkPolicy.class); - inboundRules = inboundRules(v1beta1NetworkPolicy); - outboundRules = outboundRules(v1beta1NetworkPolicy); - } else { - log.warn("Could not determine (in)/(out)bound rules for " + manifest.getName() + " at version " + manifest.getApiVersion()); - } - } - - return new KubernetesV2SecurityGroup(manifest, cd.getId(), inboundRules, outboundRules); - } - - private static Set inboundRules(V1NetworkPolicy policy) { - return policy.getSpec().getIngress().stream() - .map(i -> i.getPorts().stream().map(KubernetesV2SecurityGroup::fromPolicyPort)) - .flatMap(s -> s) - .collect(Collectors.toSet()); - } - - private static Set outboundRules(V1NetworkPolicy policy) { - return policy.getSpec().getEgress().stream() - .map(i -> i.getPorts().stream().map(KubernetesV2SecurityGroup::fromPolicyPort)) - .flatMap(s -> s) - .collect(Collectors.toSet()); - } - - private static Rule fromPolicyPort(V1NetworkPolicyPort policyPort) { - String port = policyPort.getPort(); - return new PortRule() - .setProtocol(policyPort.getProtocol()) - .setPortRanges(new TreeSet<>(Collections.singletonList(new StringPortRange(port)))); - } - - @Data - @NoArgsConstructor - @AllArgsConstructor - @Builder - private static class KubernetesV2SecurityGroupSummary implements SecurityGroupSummary { - private String name; - private String id; - } - - @Data - private static class PortRule implements Rule { - private SortedSet portRanges; - private String protocol; - } - - @EqualsAndHashCode(callSuper = true) - @Data - public static class StringPortRange extends Rule.PortRange { - protected String startPortName; - protected String endPortName; - StringPortRange(String port) { - Integer numPort; - try { - numPort = Integer.parseInt(port); - this.startPort = numPort; - this.endPort = numPort; - } catch (Exception e) { - this.startPortName = port; - this.endPortName = port; - } - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroup.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroup.java deleted file mode 100644 index fabef18c79e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroup.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.google.common.collect.ImmutableMap; -import com.google.common.primitives.Ints; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacer; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.HealthState; -import com.netflix.spinnaker.clouddriver.model.Instance; -import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; -import com.netflix.spinnaker.clouddriver.model.ServerGroup; -import com.netflix.spinnaker.clouddriver.model.ServerGroupManager.ServerGroupManagerSummary; -import com.netflix.spinnaker.clouddriver.model.ServerGroupSummary; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import lombok.Data; -import lombok.EqualsAndHashCode; -import lombok.extern.slf4j.Slf4j; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -@EqualsAndHashCode(callSuper = true) -@Data -@Slf4j -public class KubernetesV2ServerGroup extends ManifestBasedModel implements ServerGroup { - Boolean disabled; - Set zones = new HashSet<>(); - Set instances = new HashSet<>(); - Set loadBalancers = new HashSet<>(); - Set securityGroups = new HashSet<>(); - List serverGroupManagers = new ArrayList<>(); - Map launchConfig = new HashMap<>(); - Capacity capacity = new Capacity(); - ImageSummary imageSummary; - ImagesSummary imagesSummary; - KubernetesManifest manifest; - Keys.InfrastructureCacheKey key; - - @JsonIgnore - private static final ArtifactReplacer dockerImageReplacer; - - static { - dockerImageReplacer = new ArtifactReplacer(); - dockerImageReplacer.addReplacer(ArtifactReplacerFactory.dockerImageReplacer()); - } - - @Override - public ServerGroup.InstanceCounts getInstanceCounts() { - return ServerGroup.InstanceCounts.builder() - .total(Ints.checkedCast(instances.size())) - .up(Ints.checkedCast(instances.stream().filter(i -> i.getHealthState().equals(HealthState.Up)).count())) - .down(Ints.checkedCast(instances.stream().filter(i -> i.getHealthState().equals(HealthState.Down)).count())) - .unknown(Ints.checkedCast(instances.stream().filter(i -> i.getHealthState().equals(HealthState.Unknown)).count())) - .outOfService(Ints.checkedCast(instances.stream().filter(i -> i.getHealthState().equals(HealthState.OutOfService)).count())) - .starting(Ints.checkedCast(instances.stream().filter(i -> i.getHealthState().equals(HealthState.Starting)).count())) - .build(); - } - - public Map getBuildInfo() { - return new ImmutableMap.Builder() - .put("images", dockerImageReplacer.findAll(getManifest()) - .stream() - .map(Artifact::getReference) - .collect(Collectors.toSet())) - .build(); - } - - @Override - public Boolean isDisabled() { - return disabled; - } - - protected KubernetesV2ServerGroup(KubernetesManifest manifest, String key, List instances, Set loadBalancers, List serverGroupManagers) { - this.manifest = manifest; - this.key = (Keys.InfrastructureCacheKey) Keys.parseKey(key).get(); - this.instances = new HashSet<>(instances); - this.loadBalancers = loadBalancers; - this.serverGroupManagers = serverGroupManagers; - - Object odesired = ((Map) manifest - .getOrDefault("spec", new HashMap())) - .getOrDefault("replicas", 0); - Integer desired = 0; - - if (odesired instanceof Number) { - desired = ((Number) odesired).intValue(); - } else { - log.warn("Unable to cast replica count from unexpected type: {}", odesired.getClass()); - } - - this.capacity = Capacity.builder() - .desired(desired) - .build(); - } - - private static KubernetesV2ServerGroup fromCacheData(CacheData cd, List instanceData, List loadBalancerData, List serverGroupManagerKeys) { - if (cd == null) { - return null; - } - - if (instanceData == null) { - instanceData = new ArrayList<>(); - } - - if (serverGroupManagerKeys == null) { - serverGroupManagerKeys = new ArrayList<>(); - } - - List serverGroupManagers = serverGroupManagerKeys.stream() - .map(k -> ServerGroupManagerSummary.builder() - .account(k.getAccount()) - .location(k.getNamespace()) - .name(k.getName()) - .build() - ).collect(Collectors.toList()); - - KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); - - if (manifest == null) { - log.warn("Cache data {} inserted without a manifest", cd.getId()); - return null; - } - - List instances = instanceData.stream() - .map(KubernetesV2Instance::fromCacheData) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - - Set loadBalancers = loadBalancerData.stream() - .map(CacheData::getId) - .map(Keys::parseKey) - .filter(Optional::isPresent) - .map(Optional::get) - .map(k -> (Keys.InfrastructureCacheKey) k) - .map(k -> KubernetesManifest.getFullResourceName(k.getKubernetesKind(), k.getName())) - .collect(Collectors.toSet()); - - return new KubernetesV2ServerGroup(manifest, cd.getId(), instances, loadBalancers, serverGroupManagers); - } - - public static KubernetesV2ServerGroup fromCacheData(KubernetesV2ServerGroupCacheData cacheData) { - return fromCacheData(cacheData.getServerGroupData(), cacheData.getInstanceData(), cacheData.getLoadBalancerData(), cacheData.getServerGroupManagerKeys()); - } - - public ServerGroupSummary toServerGroupSummary() { - return KubernetesV2ServerGroupSummary.builder() - .name(getName()) - .account(getAccount()) - .namespace(getRegion()) - .moniker(getMoniker()) - .build(); - } - - public LoadBalancerServerGroup toLoadBalancerServerGroup() { - return LoadBalancerServerGroup.builder() - .account(getAccount()) - .detachedInstances(new HashSet<>()) - .instances(instances.stream() - .map(i -> ((KubernetesV2Instance) i).toLoadBalancerInstance()) - .collect(Collectors.toSet())) - .name(getName()) - .region(getRegion()) - .isDisabled(isDisabled()) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroupManager.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroupManager.java deleted file mode 100644 index 880c55203cf..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroupManager.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupManagerCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.ServerGroupManager; -import com.netflix.spinnaker.clouddriver.model.ServerGroupSummary; -import lombok.Data; -import lombok.EqualsAndHashCode; -import lombok.extern.slf4j.Slf4j; - -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -@EqualsAndHashCode(callSuper = true) -@Data -@Slf4j -public class KubernetesV2ServerGroupManager extends ManifestBasedModel implements ServerGroupManager { - KubernetesManifest manifest; - Keys.InfrastructureCacheKey key; - Set serverGroups; - - KubernetesV2ServerGroupManager(KubernetesManifest manifest, String key, Set serverGroups) { - this.manifest = manifest; - this.key = (Keys.InfrastructureCacheKey) Keys.parseKey(key).get(); - this.serverGroups = serverGroups; - } - - private static KubernetesV2ServerGroupManager fromCacheData(CacheData cd, List serverGroupData) { - if (cd == null) { - return null; - } - - if (serverGroupData == null) { - serverGroupData = new ArrayList<>(); - } - - KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); - - if (manifest == null) { - log.warn("Cache data {} inserted without a manifest", cd.getId()); - return null; - } - - Set serverGroups = serverGroupData.stream() - .map(data -> KubernetesV2ServerGroup.fromCacheData( - KubernetesV2ServerGroupCacheData.builder() - .serverGroupData(data) - .instanceData(new ArrayList<>()) - .loadBalancerData(new ArrayList<>()) - .build())) - .filter(Objects::nonNull) - .map(KubernetesV2ServerGroup::toServerGroupSummary) - .collect(Collectors.toSet()); - - return new KubernetesV2ServerGroupManager(manifest, cd.getId(), serverGroups); - } - - public static KubernetesV2ServerGroupManager fromCacheData(KubernetesV2ServerGroupManagerCacheData data) { - return fromCacheData(data.getServerGroupManagerData(), data.getServerGroupData()); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroupSummary.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroupSummary.java deleted file mode 100644 index 60b82904dcd..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/KubernetesV2ServerGroupSummary.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.netflix.spinnaker.clouddriver.model.ServerGroupSummary; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -@Data -@NoArgsConstructor -@AllArgsConstructor -@Builder -public class KubernetesV2ServerGroupSummary implements ServerGroupSummary { - String name; - String account; - String namespace; - Moniker moniker; - - public String getRegion() { - return namespace; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/ManifestBasedModel.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/ManifestBasedModel.java deleted file mode 100644 index 09f620e8f53..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/model/ManifestBasedModel.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.names.NamerRegistry; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.StringUtils; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.HashMap; -import java.util.Map; - -@Slf4j -abstract public class ManifestBasedModel { - public String getName() { - return getManifest().getFullResourceName(); - } - - // Spinnaker namespace hacks - public String getZone() { - return getManifest().getNamespace(); - } - - // Spinnaker namespace hacks - public String getRegion() { - return getManifest().getNamespace(); - } - - public String getUid() { - return getManifest().getUid(); - } - - public String getType() { - return KubernetesCloudProvider.getID(); - } - - public String getCloudProvider() { - return KubernetesCloudProvider.getID(); - } - - public String getProviderType() { - return KubernetesCloudProvider.getID(); - } - - public Moniker getMoniker() { - return NamerRegistry.lookup() - .withProvider(KubernetesCloudProvider.getID()) - .withAccount(getAccountName()) - .withResource(KubernetesManifest.class) - .deriveMoniker(getManifest()); - } - - public String getAccountName() { - return getKey().getAccount(); - } - - public String getAccount() { - return getAccountName(); - } - - public Long getCreatedTime() { - Map metadata = (Map) getManifest().getOrDefault("metadata", new HashMap<>()); - String timestamp = metadata.get("creationTimestamp"); - try { - if (StringUtils.isNotEmpty(timestamp)) { - return (new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssX").parse(timestamp)).getTime(); - } - } catch (ParseException e) { - log.warn("Failed to parse timestamp: ", e); - } - - return null; - } - - public KubernetesKind getKind() { - return getManifest().getKind(); - } - - abstract protected KubernetesManifest getManifest(); - abstract protected Keys.InfrastructureCacheKey getKey(); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesCacheUtils.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesCacheUtils.java deleted file mode 100644 index 6d9ef003fb0..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesCacheUtils.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.netflix.spinnaker.cats.cache.Cache; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.ManifestBasedModel; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.ModelHandler; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.stream.Collectors; - -@Component -@Slf4j -public class KubernetesCacheUtils { - private final Cache cache; - private final KubernetesSpinnakerKindMap kindMap; - private final KubernetesResourcePropertyRegistry registry; - - @Autowired - public KubernetesCacheUtils( - Cache cache, - KubernetesSpinnakerKindMap kindMap, - KubernetesResourcePropertyRegistry resourcePropertyRegistry - ) { - this.cache = cache; - this.kindMap = kindMap; - this.registry = resourcePropertyRegistry; - } - - public Collection getAllKeys(String type) { - return cleanupCollection(cache.getAll(type)); - } - - public Collection getAllKeysMatchingPattern(String type, String key) { - return cleanupCollection(cache.filterIdentifiers(type, key)); - } - - public Collection getAllDataMatchingPattern(String type, String key) { - return cleanupCollection(cache.getAll(type, getAllKeysMatchingPattern(type, key))); - } - - public Optional getSingleEntry(String type, String key) { - CacheData result = cache.get(type, key); - return result == null ? Optional.empty() : Optional.of(result); - } - - public Optional getSingleEntryWithRelationships(String type, String key, String... to) { - CacheData result = cache.get(type, key, RelationshipCacheFilter.include(to)); - return Optional.ofNullable(result); - } - - public Collection aggregateRelationshipsBySpinnakerKind(CacheData source, SpinnakerKind kind) { - return kindMap.translateSpinnakerKind(kind) - .stream() - .map(g -> source.getRelationships().get(g.toString())) - .filter(Objects::nonNull) - .flatMap(Collection::stream) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - } - - public Collection getTransitiveRelationship(String from, List sourceKeys, String to) { - Collection sourceData = cleanupCollection(cache.getAll(from, sourceKeys, RelationshipCacheFilter.include(to))); - return cleanupCollection(cache.getAll(to, sourceData.stream() - .map(CacheData::getRelationships) - .filter(Objects::nonNull) - .map(r -> r.get(to)) - .filter(Objects::nonNull) - .flatMap(Collection::stream) - .collect(Collectors.toList()))); - } - - public Collection getAllRelationshipsOfSpinnakerKind(Collection cacheData, SpinnakerKind spinnakerKind) { - return kindMap.translateSpinnakerKind(spinnakerKind) - .stream() - .map(kind -> loadRelationshipsFromCache(cacheData, kind.toString())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - } - - public Collection loadRelationshipsFromCache(Collection sources, String relationshipType) { - List keys = cleanupCollection(sources).stream() - .map(CacheData::getRelationships) - .filter(Objects::nonNull) - .map(r -> r.get(relationshipType)) - .filter(Objects::nonNull) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - return cleanupCollection(cache.getAll(relationshipType, keys)); - } - - private Collection cleanupCollection(Collection items) { - if (items == null) { - return new ArrayList<>(); - } - - return items.stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); - } - - /* - * Builds a map of all keys belonging to `sourceKind` that are related to any entries in `targetData` - */ - public Map> mapByRelationship(Collection targetData, SpinnakerKind sourceKind) { - Map> result = new HashMap<>(); - - for (CacheData datum : targetData) { - Collection sourceKeys = aggregateRelationshipsBySpinnakerKind(datum, sourceKind); - - for (String sourceKey : sourceKeys) { - List storedData = result.getOrDefault(sourceKey, new ArrayList<>()); - storedData.add(datum); - result.put(sourceKey, storedData); - } - } - - return result; - } - - @SuppressWarnings("unchecked") - public T resourceModelFromCacheData(KubernetesV2CacheData cacheData) { - Keys.InfrastructureCacheKey key = (Keys.InfrastructureCacheKey) Keys.parseKey(cacheData.primaryData().getId()).get(); - KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cacheData.primaryData()); - - KubernetesResourceProperties properties = registry.get(key.getAccount(), manifest.getKind()); - KubernetesHandler handler = properties.getHandler(); - if (handler instanceof ModelHandler) { - return (T) ((ModelHandler) handler).fromCacheData(cacheData); - } else { - return null; - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ApplicationProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ApplicationProvider.java deleted file mode 100644 index ae3a7f22ced..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ApplicationProvider.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.ClusterCacheKey; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2Application; -import com.netflix.spinnaker.clouddriver.model.Application; -import com.netflix.spinnaker.clouddriver.model.ApplicationProvider; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKind.CLUSTERS; - -@Component -public class KubernetesV2ApplicationProvider implements ApplicationProvider { - private final KubernetesCacheUtils cacheUtils; - - @Autowired - KubernetesV2ApplicationProvider(KubernetesCacheUtils cacheUtils) { - this.cacheUtils = cacheUtils; - } - - @Override - public Set getApplications(boolean expand) { - // TODO(lwander) performance optimization: rely on expand parameter to make a more cache-efficient call - String clusterGlobKey = Keys.cluster("*", "*", "*"); - Map> keysByApplication = cacheUtils.getAllKeysMatchingPattern(CLUSTERS.toString(), clusterGlobKey).stream() - .map(Keys::parseKey) - .filter(Optional::isPresent) - .map(Optional::get) - .filter(ClusterCacheKey.class::isInstance) - .map(k -> (ClusterCacheKey) k) - .collect(Collectors.groupingBy( - ClusterCacheKey::getApplication, Collectors.toSet()) - ); - - return keysByApplication.entrySet() - .stream() - .map(e -> KubernetesV2Application.builder() - .name(e.getKey()) - .clusterNames(groupClustersByAccount(e.getValue())).build()) - .collect(Collectors.toSet()); - } - - @Override - public Application getApplication(String name) { - String clusterGlobKey = Keys.cluster("*", name, "*"); - List keys = cacheUtils.getAllKeysMatchingPattern(CLUSTERS.toString(), clusterGlobKey) - .stream() - .map(Keys::parseKey) - .filter(Optional::isPresent) - .map(Optional::get) - .filter(ClusterCacheKey.class::isInstance) - .map(k -> (ClusterCacheKey) k) - .collect(Collectors.toList()); - - if (keys.isEmpty()) { - return null; - } - - return KubernetesV2Application.builder() - .name(name) - .clusterNames(groupClustersByAccount(keys)) - .build(); - } - - private Map> groupClustersByAccount(Collection keys) { - return keys.stream() - .collect(Collectors.groupingBy( - ClusterCacheKey::getAccount, Collectors.mapping(ClusterCacheKey::getName, Collectors.toSet()) - )); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ArtifactProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ArtifactProvider.java deleted file mode 100644 index 65900af9b3a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ArtifactProvider.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.model.ArtifactProvider; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Comparator; -import java.util.List; -import java.util.stream.Collectors; - -@Component -public class KubernetesV2ArtifactProvider implements ArtifactProvider { - private final KubernetesCacheUtils cacheUtils; - private final ObjectMapper objectMapper; - - @Autowired - KubernetesV2ArtifactProvider(KubernetesCacheUtils cacheUtils, ObjectMapper objectMapper) { - this.cacheUtils = cacheUtils; - this.objectMapper = objectMapper; - } - - @Override - public List getArtifacts(String type, String name, String location) { - String key = Keys.artifact(type, name, location, "*"); - return cacheUtils.getAllDataMatchingPattern(Keys.Kind.ARTIFACT.toString(), key) - .stream() - .sorted(Comparator.comparing(cd -> (String) cd.getAttributes().getOrDefault("creationTimestamp", ""))) - .map(this::cacheDataToArtifact) - .collect(Collectors.toList()); - } - - private Artifact cacheDataToArtifact(CacheData cacheData) { - return objectMapper.convertValue(cacheData.getAttributes().get("artifact"), Artifact.class); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ClusterProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ClusterProvider.java deleted file mode 100644 index f825c9990e1..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ClusterProvider.java +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.InfrastructureCacheKey; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2Cluster; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2LoadBalancer; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2ServerGroup; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.ClusterProvider; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.tuple.Pair; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKind.APPLICATIONS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKind.CLUSTERS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.INSTANCES; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.LOAD_BALANCERS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.SERVER_GROUPS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.SERVER_GROUP_MANAGERS; - -@Component -@Slf4j -public class KubernetesV2ClusterProvider implements ClusterProvider { - private final KubernetesCacheUtils cacheUtils; - private final KubernetesSpinnakerKindMap kindMap; - - @Autowired - KubernetesV2ClusterProvider(KubernetesCacheUtils cacheUtils, - KubernetesSpinnakerKindMap kindMap) { - this.cacheUtils = cacheUtils; - this.kindMap = kindMap; - } - - @Override - public Map> getClusters() { - return groupByAccountName( - translateClustersWithRelationships(cacheUtils.getAllKeys(CLUSTERS.toString())) - ); - } - - @Override - public Map> getClusterSummaries(String application) { - String applicationKey = Keys.application(application); - return groupByAccountName( - translateClusters(cacheUtils.getTransitiveRelationship(APPLICATIONS.toString(), - Collections.singletonList(applicationKey), - CLUSTERS.toString())) - ); - } - - @Override - public Map> getClusterDetails(String application) { - String clusterGlobKey = Keys.cluster("*", application, "*"); - return groupByAccountName( - translateClustersWithRelationships( - cacheUtils.getAllDataMatchingPattern(CLUSTERS.toString(), clusterGlobKey)) - ); - } - - @Override - public Set getClusters(String application, String account) { - String globKey = Keys.cluster(account, application, "*"); - return translateClustersWithRelationships( - cacheUtils.getAllDataMatchingPattern(CLUSTERS.toString(), globKey) - ); - } - - @Override - public KubernetesV2Cluster getCluster(String application, String account, String name) { - return getCluster(application, account, name, true); - } - - @Override - public KubernetesV2Cluster getCluster(String application, String account, String name, boolean includeDetails) { - return cacheUtils.getSingleEntry(CLUSTERS.toString(), Keys.cluster(account, application, name)) - .map(entry -> { - Collection clusterData = Collections.singletonList(entry); - Set result = includeDetails ? translateClustersWithRelationships(clusterData) : translateClusters(clusterData); - return result.iterator().next(); - }).orElse(null); - } - - @Override - public KubernetesV2ServerGroup getServerGroup(String account, String namespace, String name, boolean includeDetails) { - Pair parsedName; - try { - parsedName = KubernetesManifest.fromFullResourceName(name); - } catch (IllegalArgumentException e) { - return null; - } - - KubernetesKind kind = parsedName.getLeft(); - String shortName = parsedName.getRight(); - String key = Keys.infrastructure(kind, account, namespace, shortName); - List relatedTypes = kindMap.translateSpinnakerKind(INSTANCES) - .stream() - .map(KubernetesKind::toString) - .collect(Collectors.toList()); - - relatedTypes.addAll(kindMap.translateSpinnakerKind(LOAD_BALANCERS) - .stream() - .map(KubernetesKind::toString) - .collect(Collectors.toList())); - - Optional serverGroupData = cacheUtils.getSingleEntryWithRelationships(kind.toString(), - key, - relatedTypes.toArray(new String[relatedTypes.size()])); - - return serverGroupData.map(cd -> { - List instanceData = kindMap.translateSpinnakerKind(INSTANCES) - .stream() - .map(k -> cacheUtils.loadRelationshipsFromCache(Collections.singletonList(cd), k.toString())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - List loadBalancerData = kindMap.translateSpinnakerKind(LOAD_BALANCERS) - .stream() - .map(k -> cacheUtils.loadRelationshipsFromCache(Collections.singletonList(cd), k.toString())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - return cacheUtils.resourceModelFromCacheData( - KubernetesV2ServerGroupCacheData.builder() - .serverGroupData(cd) - .instanceData(instanceData) - .loadBalancerData(loadBalancerData) - .build()); - }).orElse(null); - } - - @Override - public KubernetesV2ServerGroup getServerGroup(String account, String namespace, String name) { - return getServerGroup(account, namespace, name, true); - } - - @Override - public String getCloudProviderId() { - return KubernetesCloudProvider.getID(); - } - - @Override - public boolean supportsMinimalClusters() { - return true; - } - - private Map> groupByAccountName(Collection clusters) { - Map> result = new HashMap<>(); - for (KubernetesV2Cluster cluster : clusters) { - String accountName = cluster.getAccountName(); - Set grouping = result.get(accountName); - if (grouping == null) { - grouping = new HashSet<>(); - } - - grouping.add(cluster); - result.put(accountName, grouping); - } - - return result; - } - - private Set translateClusters(Collection clusterData) { - return clusterData.stream().map(this::translateCluster).filter(Objects::nonNull).collect(Collectors.toSet()); - } - - private Set translateClustersWithRelationships(Collection clusterData) { - // TODO(lwander) possible optimization: store lb relationships in cluster object to cut down on number of loads here. - List serverGroupData = kindMap.translateSpinnakerKind(SERVER_GROUPS) - .stream() - .map(kind -> cacheUtils.loadRelationshipsFromCache(clusterData, kind.toString())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - List loadBalancerData = kindMap.translateSpinnakerKind(LOAD_BALANCERS) - .stream() - .map(kind -> cacheUtils.loadRelationshipsFromCache(serverGroupData, kind.toString())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - List instanceData = kindMap.translateSpinnakerKind(INSTANCES) - .stream() - .map(kind -> cacheUtils.loadRelationshipsFromCache(serverGroupData, kind.toString())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - Map> clusterToServerGroups = new HashMap<>(); - for (CacheData serverGroupDatum : serverGroupData) { - Collection clusterKeys = serverGroupDatum.getRelationships().get(CLUSTERS.toString()); - if (clusterKeys == null || clusterKeys.size() != 1) { - log.warn("Malformed cache, server group stored without cluster"); - continue; - } - - String clusterKey = clusterKeys.iterator().next(); - List storedData = clusterToServerGroups.getOrDefault(clusterKey, new ArrayList<>()); - storedData.add(serverGroupDatum); - clusterToServerGroups.put(clusterKey, storedData); - } - - Map> serverGroupToServerGroupManagerKeys = new HashMap<>(); - for (CacheData serverGroupDatum : serverGroupData) { - serverGroupToServerGroupManagerKeys.put( - serverGroupDatum.getId(), - kindMap.translateSpinnakerKind(SERVER_GROUP_MANAGERS) - .stream() - .map(kind -> serverGroupDatum.getRelationships().get(kind.toString())) - .filter(Objects::nonNull) - .flatMap(Collection::stream) - .map(Keys::parseKey) - .filter(Optional::isPresent) - .map(Optional::get) - .filter(k -> k instanceof Keys.InfrastructureCacheKey) - .map(k -> (Keys.InfrastructureCacheKey) k) - .collect(Collectors.toList()) - ); - } - - Map> serverGroupToLoadBalancers = cacheUtils.mapByRelationship(loadBalancerData, SERVER_GROUPS); - Map> serverGroupToInstances = cacheUtils.mapByRelationship(instanceData, SERVER_GROUPS); - Map> loadBalancerToServerGroups = cacheUtils.mapByRelationship(serverGroupData, LOAD_BALANCERS); - - Set result = new HashSet<>(); - for (CacheData clusterDatum : clusterData) { - List clusterServerGroups = clusterToServerGroups.getOrDefault(clusterDatum.getId(), new ArrayList<>()); - List clusterLoadBalancers = clusterServerGroups.stream() - .map(CacheData::getId) - .map(id -> serverGroupToLoadBalancers.getOrDefault(id, new ArrayList<>())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - result.add( - translateCluster( - clusterDatum, - clusterServerGroups, - clusterLoadBalancers, - serverGroupToInstances, - loadBalancerToServerGroups, - serverGroupToLoadBalancers, - serverGroupToServerGroupManagerKeys - ) - ); - } - - return result.stream() - .filter(Objects::nonNull) - .collect(Collectors.toSet()); - } - - private KubernetesV2Cluster translateCluster(CacheData clusterDatum) { - if (clusterDatum == null) { - return null; - } - - return new KubernetesV2Cluster(clusterDatum.getId()); - } - - private KubernetesV2Cluster translateCluster(CacheData clusterDatum, - List serverGroupData, - List loadBalancerData, - Map> instanceDataByServerGroup, - Map> serverGroupDataByLoadBalancer, - Map> loadBalancerDataByServerGroup, - Map> serverGroupToServerGroupManagerKeys) { - if (clusterDatum == null) { - return null; - } - - List serverGroups = serverGroupData.stream() - .map(cd -> cacheUtils.resourceModelFromCacheData( - KubernetesV2ServerGroupCacheData.builder() - .serverGroupData(cd) - .instanceData(instanceDataByServerGroup.getOrDefault(cd.getId(), new ArrayList<>())) - .loadBalancerData(loadBalancerDataByServerGroup.getOrDefault(cd.getId(), new ArrayList<>())) - .serverGroupManagerKeys(serverGroupToServerGroupManagerKeys.getOrDefault(cd.getId(), new ArrayList<>())) - .build()) - ) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - - List loadBalancers = loadBalancerData.stream() - .map(cd -> KubernetesV2LoadBalancer.fromCacheData(cd, - serverGroupDataByLoadBalancer.getOrDefault(cd.getId(), new ArrayList<>()), - instanceDataByServerGroup)) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - - return new KubernetesV2Cluster(clusterDatum.getId(), serverGroups, loadBalancers); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2InstanceProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2InstanceProvider.java deleted file mode 100644 index 6ef476eb4ff..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2InstanceProvider.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2Instance; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.model.InstanceProvider; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import io.kubernetes.client.models.V1Container; -import io.kubernetes.client.models.V1Pod; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.tuple.Pair; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Optional; - -@Component -@Slf4j -public class KubernetesV2InstanceProvider implements InstanceProvider { - private final KubernetesCacheUtils cacheUtils; - private final KubernetesSpinnakerKindMap kindMap; - private final AccountCredentialsRepository accountCredentialsRepository; - private final KubectlJobExecutor jobExecutor; - - @Autowired - KubernetesV2InstanceProvider(KubernetesCacheUtils cacheUtils, - KubernetesSpinnakerKindMap kindMap, - AccountCredentialsRepository accountCredentialsRepository, - KubectlJobExecutor jobExecutor) { - this.cacheUtils = cacheUtils; - this.kindMap = kindMap; - this.accountCredentialsRepository = accountCredentialsRepository; - this.jobExecutor = jobExecutor; - } - - @Override - public String getCloudProvider() { - return KubernetesCloudProvider.getID(); - } - - @Override - public KubernetesV2Instance getInstance(String account, String location, String fullName) { - Pair parsedName; - try { - parsedName = KubernetesManifest.fromFullResourceName(fullName); - } catch (Exception e) { - return null; - } - - KubernetesKind kind = parsedName.getLeft(); - String name = parsedName.getRight(); - String key = Keys.infrastructure(kind, account, location, name); - - Optional optionalInstanceData = cacheUtils.getSingleEntry(kind.toString(), key); - if (!optionalInstanceData.isPresent()) { - return null; - } - - CacheData instanceData = optionalInstanceData.get(); - - return KubernetesV2Instance.fromCacheData(instanceData); - } - - @Override - public String getConsoleOutput(String account, String location, String fullName) { - KubernetesNamedAccountCredentials credentials; - try { - credentials = (KubernetesNamedAccountCredentials) accountCredentialsRepository.getOne(account); - } catch (Exception e) { - log.warn("Failure getting account {}", account); - return null; - } - - if (credentials == null || credentials.getProviderVersion() != ProviderVersion.v2) { - return null; - } - - Pair parsedName; - try { - parsedName = KubernetesManifest.fromFullResourceName(fullName); - } catch (Exception e) { - return null; - } - - String name = parsedName.getRight(); - - V1Pod pod = KubernetesCacheDataConverter.getResource( - credentials.getCredentials().get(KubernetesKind.POD, location, name), - V1Pod.class - ); - - StringBuilder result = new StringBuilder(); - - // Make live calls rather than abuse the cache for storing all logs - for (V1Container container : pod.getSpec().getContainers()) { - result.append("====== " + container.getName() + " ======\n\n"); - try { - result.append(credentials.getCredentials().logs(location, name, container.getName())); - } catch (KubectlJobExecutor.KubectlException e) { - // Typically happens if the container/pod isn't running yet - result.append(e.getMessage()); - } - result.append("\n\n"); - } - - return result.toString(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2LoadBalancerProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2LoadBalancerProvider.java deleted file mode 100644 index 6e028809f49..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2LoadBalancerProvider.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2LoadBalancer; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; -import lombok.Data; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.NotImplementedException; -import org.apache.commons.lang3.tuple.Pair; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKind.APPLICATIONS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.INSTANCES; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.LOAD_BALANCERS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.SERVER_GROUPS; - -@Component -@Slf4j -public class KubernetesV2LoadBalancerProvider implements LoadBalancerProvider { - final private KubernetesCacheUtils cacheUtils; - final private KubernetesSpinnakerKindMap kindMap; - - @Autowired - KubernetesV2LoadBalancerProvider(KubernetesCacheUtils cacheUtils, KubernetesSpinnakerKindMap kindMap) { - this.cacheUtils = cacheUtils; - this.kindMap = kindMap; - } - - @Override - public String getCloudProvider() { - return KubernetesCloudProvider.getID(); - } - - @Override - public List list() { - return new ArrayList<>(); - } - - @Override - public LoadBalancerProvider.Item get(String name) { - throw new NotImplementedException("Not a valid operation"); - } - - @Override - public List byAccountAndRegionAndName(String account, String namespace, String fullName) { - Pair parsedName; - try { - parsedName = KubernetesManifest.fromFullResourceName(fullName); - } catch (Exception e) { - return null; - } - - KubernetesKind kind = parsedName.getLeft(); - String name = parsedName.getRight(); - String key = Keys.infrastructure(kind, account, name, name); - - Optional optionalLoadBalancerData = cacheUtils.getSingleEntry(kind.toString(), key); - if (!optionalLoadBalancerData.isPresent()) { - return null; - } - - CacheData loadBalancerData = optionalLoadBalancerData.get(); - - return new ArrayList<>(fromLoadBalancerCacheData(Collections.singletonList(loadBalancerData))); - } - - @Override - public Set getApplicationLoadBalancers(String application) { - List loadBalancerData = kindMap.translateSpinnakerKind(LOAD_BALANCERS) - .stream() - .map(kind -> cacheUtils.getTransitiveRelationship(APPLICATIONS.toString(), - Collections.singletonList(Keys.application(application)), - kind.toString()) - ) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - return fromLoadBalancerCacheData(loadBalancerData); - } - - private Set fromLoadBalancerCacheData(List loadBalancerData) { - List serverGroupData = kindMap.translateSpinnakerKind(SERVER_GROUPS) - .stream() - .map(kind -> cacheUtils.loadRelationshipsFromCache(loadBalancerData, kind.toString())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - List instanceData = kindMap.translateSpinnakerKind(INSTANCES) - .stream() - .map(kind -> cacheUtils.loadRelationshipsFromCache(serverGroupData, kind.toString())) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - Map> loadBalancerToServerGroups = cacheUtils.mapByRelationship(serverGroupData, LOAD_BALANCERS); - Map> serverGroupToInstances = cacheUtils.mapByRelationship(instanceData, SERVER_GROUPS); - - return loadBalancerData.stream() - .map(cd -> KubernetesV2LoadBalancer.fromCacheData(cd, - loadBalancerToServerGroups.getOrDefault(cd.getId(), new ArrayList<>()), - serverGroupToInstances)) - .filter(Objects::nonNull) - .collect(Collectors.toSet()); - } - - @Data - private class Item implements LoadBalancerProvider.Item { - String name; - List byAccounts = new ArrayList<>(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ManifestProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ManifestProvider.java deleted file mode 100644 index cf31ab510b7..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ManifestProvider.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2Manifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPodMetric; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.model.ManifestProvider; -import com.netflix.spinnaker.moniker.Moniker; -import org.apache.commons.lang3.tuple.Pair; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.function.Function; -import java.util.stream.Collectors; - -@Component -public class KubernetesV2ManifestProvider implements ManifestProvider { - private final KubernetesResourcePropertyRegistry registry; - private final KubernetesCacheUtils cacheUtils; - - @Autowired - public KubernetesV2ManifestProvider(KubernetesResourcePropertyRegistry registry, KubernetesCacheUtils cacheUtils) { - this.registry = registry; - this.cacheUtils = cacheUtils; - } - - @Override - public KubernetesV2Manifest getManifest(String account, String location, String name) { - Pair parsedName; - try { - parsedName = KubernetesManifest.fromFullResourceName(name); - } catch (Exception e) { - return null; - } - - KubernetesKind kind = parsedName.getLeft(); - String key = Keys.infrastructure( - kind, - account, - location, - parsedName.getRight() - ); - - Optional dataOptional = cacheUtils.getSingleEntry(kind.toString(), key); - if (!dataOptional.isPresent()) { - return null; - } - - CacheData data = dataOptional.get(); - KubernetesResourceProperties properties = registry.get(account, kind); - if (properties == null) { - return null; - } - - Function lastEventTimestamp = (m) -> (String) m.getOrDefault("lastTimestamp", m.getOrDefault("firstTimestamp", "n/a")); - - List events = cacheUtils.getTransitiveRelationship(kind.toString(), Collections.singletonList(key), KubernetesKind.EVENT.toString()) - .stream() - .map(KubernetesCacheDataConverter::getManifest) - .sorted(Comparator.comparing(lastEventTimestamp)) - .collect(Collectors.toList()); - - String metricKey = Keys.metric(kind, account, location, parsedName.getRight()); - List metrics = cacheUtils.getSingleEntry(Keys.Kind.KUBERNETES_METRIC.toString(), metricKey) - .map(KubernetesCacheDataConverter::getMetrics) - .orElse(Collections.emptyList()); - - KubernetesHandler handler = properties.getHandler(); - - KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(data); - Moniker moniker = KubernetesCacheDataConverter.getMoniker(data); - - return new KubernetesV2Manifest().builder() - .account(account) - .location(location) - .manifest(manifest) - .moniker(moniker) - .status(handler.status(manifest)) - .artifacts(handler.listArtifacts(manifest)) - .events(events) - .warnings(handler.listWarnings(manifest)) - .metrics(metrics) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2SecurityGroupProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2SecurityGroupProvider.java deleted file mode 100644 index 6a5edd9683e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2SecurityGroupProvider.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2SecurityGroup; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Collection; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -@Component -@Slf4j -public class KubernetesV2SecurityGroupProvider implements SecurityGroupProvider { - private final KubernetesCacheUtils cacheUtils; - private final KubernetesSpinnakerKindMap kindMap; - - @Autowired - KubernetesV2SecurityGroupProvider(KubernetesCacheUtils cacheUtils, KubernetesSpinnakerKindMap kindMap) { - this.cacheUtils = cacheUtils; - this.kindMap = kindMap; - } - - @Override - public String getCloudProvider() { - return KubernetesCloudProvider.getID(); - } - - @Override - public Set getAll(boolean includeRules) { - return kindMap.translateSpinnakerKind(KubernetesSpinnakerKindMap.SpinnakerKind.SECURITY_GROUPS) - .stream() - .map(KubernetesKind::toString) - .map(cacheUtils::getAllKeys) - .flatMap(Collection::stream) - .map(KubernetesV2SecurityGroup::fromCacheData) - .collect(Collectors.toSet()); - } - - @Override - public Set getAllByRegion(boolean includeRules, String namespace) { - return kindMap.translateSpinnakerKind(KubernetesSpinnakerKindMap.SpinnakerKind.SECURITY_GROUPS) - .stream() - .map(k -> { - String key = Keys.infrastructure(k, "*", namespace, "*"); - return cacheUtils.getAllDataMatchingPattern(k.toString(), key); - }) - .flatMap(Collection::stream) - .map(KubernetesV2SecurityGroup::fromCacheData) - .collect(Collectors.toSet()); - } - - @Override - public Set getAllByAccount(boolean includeRules, String account) { - return kindMap.translateSpinnakerKind(KubernetesSpinnakerKindMap.SpinnakerKind.SECURITY_GROUPS) - .stream() - .map(k -> { - String key = Keys.infrastructure(k, account, "*", "*"); - return cacheUtils.getAllDataMatchingPattern(k.toString(), key); - }) - .flatMap(Collection::stream) - .map(KubernetesV2SecurityGroup::fromCacheData) - .collect(Collectors.toSet()); - } - - @Override - public Set getAllByAccountAndName(boolean includeRules, String account, String fullName) { - String name; - try { - name = KubernetesManifest.fromFullResourceName(fullName).getRight(); - } catch (Exception e) { - return null; - } - - return kindMap.translateSpinnakerKind(KubernetesSpinnakerKindMap.SpinnakerKind.SECURITY_GROUPS) - .stream() - .map(k -> { - String key = Keys.infrastructure(k, account, "*", name); - return cacheUtils.getAllDataMatchingPattern(k.toString(), key); - }) - .flatMap(Collection::stream) - .map(KubernetesV2SecurityGroup::fromCacheData) - .collect(Collectors.toSet()); - } - - @Override - public Set getAllByAccountAndRegion(boolean includeRule, String account, String namespace) { - return kindMap.translateSpinnakerKind(KubernetesSpinnakerKindMap.SpinnakerKind.SECURITY_GROUPS) - .stream() - .map(k -> { - String key = Keys.infrastructure(k, account, namespace, "*"); - return cacheUtils.getAllDataMatchingPattern(k.toString(), key); - }) - .flatMap(Collection::stream) - .map(KubernetesV2SecurityGroup::fromCacheData) - .collect(Collectors.toSet()); - } - - @Override - public KubernetesV2SecurityGroup get(String account, String namespace, String fullName, String _unused) { - String name; - try { - name = KubernetesManifest.fromFullResourceName(fullName).getRight(); - } catch (Exception e) { - return null; - } - - return kindMap.translateSpinnakerKind(KubernetesSpinnakerKindMap.SpinnakerKind.SECURITY_GROUPS) - .stream() - .map(k -> { - String key = Keys.infrastructure(k, account, namespace, name); - return cacheUtils.getSingleEntry(k.toString(), key).orElse(null); - }) - .filter(Objects::nonNull) - .map(KubernetesV2SecurityGroup::fromCacheData) - .findFirst() - .orElse(null); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ServerGroupManagerProvider.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ServerGroupManagerProvider.java deleted file mode 100644 index 6085aef9bbe..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/KubernetesV2ServerGroupManagerProvider.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2ServerGroupManager; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupManagerCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.model.ServerGroupManagerProvider; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys.LogicalKind.APPLICATIONS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.SERVER_GROUPS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind.SERVER_GROUP_MANAGERS; - -@Component -public class KubernetesV2ServerGroupManagerProvider implements ServerGroupManagerProvider { - private final KubernetesResourcePropertyRegistry registry; - private final KubernetesCacheUtils cacheUtils; - - @Autowired - public KubernetesV2ServerGroupManagerProvider(KubernetesResourcePropertyRegistry registry, KubernetesCacheUtils cacheUtils) { - this.registry = registry; - this.cacheUtils = cacheUtils; - } - - @Override - public Set getServerGroupManagersByApplication(String application) { - CacheData applicationDatum = cacheUtils.getSingleEntry(APPLICATIONS.toString(), Keys.application(application)).orElse(null); - if (applicationDatum == null) { - return null; - } - - Collection serverGroupManagerData = cacheUtils.getAllRelationshipsOfSpinnakerKind(Collections.singletonList(applicationDatum), SERVER_GROUP_MANAGERS); - Collection serverGroupData = cacheUtils.getAllRelationshipsOfSpinnakerKind(serverGroupManagerData, SERVER_GROUPS); - - Map> managerToServerGroupMap = cacheUtils.mapByRelationship(serverGroupData, SERVER_GROUP_MANAGERS); - - return serverGroupManagerData.stream() - .map(cd -> - cacheUtils.resourceModelFromCacheData(KubernetesV2ServerGroupManagerCacheData.builder() - .serverGroupManagerData(cd) - .serverGroupData(managerToServerGroupMap.getOrDefault(cd.getId(), new ArrayList<>())) - .build())) - .collect(Collectors.toSet()); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2CacheData.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2CacheData.java deleted file mode 100644 index d9e3c631e15..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2CacheData.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data; - -import com.netflix.spinnaker.cats.cache.CacheData; - -public interface KubernetesV2CacheData { - CacheData primaryData(); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2ServerGroupCacheData.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2ServerGroupCacheData.java deleted file mode 100644 index 334067a8828..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2ServerGroupCacheData.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data; - -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.model.ServerGroupManager; -import com.netflix.spinnaker.clouddriver.model.ServerGroupManager.ServerGroupManagerSummary; -import lombok.Builder; -import lombok.Data; - -import java.util.List; - -@Data -@Builder -public class KubernetesV2ServerGroupCacheData implements KubernetesV2CacheData { - private CacheData serverGroupData; - private List instanceData; - private List loadBalancerData; - private List serverGroupManagerKeys; - - public CacheData primaryData() { - return serverGroupData; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2ServerGroupManagerCacheData.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2ServerGroupManagerCacheData.java deleted file mode 100644 index 28bf72602ea..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/view/provider/data/KubernetesV2ServerGroupManagerCacheData.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data; - -import com.netflix.spinnaker.cats.cache.CacheData; -import lombok.Builder; -import lombok.Data; - -import java.util.List; - -@Data -@Builder -public class KubernetesV2ServerGroupManagerCacheData implements KubernetesV2CacheData { - private CacheData serverGroupManagerData; - private List serverGroupData; - - public CacheData primaryData() { - return serverGroupManagerData; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/artifact/KubernetesCleanupArtifactsConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/artifact/KubernetesCleanupArtifactsConverter.java deleted file mode 100644 index a7837506524..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/artifact/KubernetesCleanupArtifactsConverter.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.artifact; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.artifact.KubernetesCleanupArtifactsDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.artifact.KubernetesCleanupArtifactsOperation; -import com.netflix.spinnaker.clouddriver.model.ArtifactProvider; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.CLEANUP_ARTIFACTS; - -@KubernetesOperation(CLEANUP_ARTIFACTS) -@Component -public class KubernetesCleanupArtifactsConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - ArtifactProvider artifactProvider; - - @Autowired - KubernetesResourcePropertyRegistry registry; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesCleanupArtifactsOperation(convertDescription(input), artifactProvider, registry); - } - - @Override - public KubernetesCleanupArtifactsDescription convertDescription(Map input) { - return (KubernetesCleanupArtifactsDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesCleanupArtifactsDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesDeleteManifestConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesDeleteManifestConverter.java deleted file mode 100644 index dc89b97c88a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesDeleteManifestConverter.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.manifest; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesDeleteManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesDeleteManifestOperation; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DELETE_MANIFEST; - -@KubernetesOperation(DELETE_MANIFEST) -@Component -public class KubernetesDeleteManifestConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - private KubernetesResourcePropertyRegistry registry; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesDeleteManifestOperation(convertDescription(input), registry); - } - - @Override - public KubernetesDeleteManifestDescription convertDescription(Map input) { - return (KubernetesDeleteManifestDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesDeleteManifestDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesDeployManifestConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesDeployManifestConverter.java deleted file mode 100644 index c6f1751618b..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesDeployManifestConverter.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.manifest; - -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesV2ArtifactProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesDeployManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesDeployManifestOperation; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DEPLOY_MANIFEST; - -@KubernetesOperation(DEPLOY_MANIFEST) -@Component -public class KubernetesDeployManifestConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - private KubernetesResourcePropertyRegistry registry; - - @Autowired - private KubernetesV2ArtifactProvider artifactProvider; - - @Autowired - private ArtifactDownloader artifactDownloader; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesDeployManifestOperation(convertDescription(input), registry, artifactProvider); - } - - @Override - public KubernetesDeployManifestDescription convertDescription(Map input) { - return (KubernetesDeployManifestDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesDeployManifestDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesPatchManifestConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesPatchManifestConverter.java deleted file mode 100644 index 2350139647d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesPatchManifestConverter.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.manifest; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.PATCH_MANIFEST; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesPatchManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesPatchManifestOperation; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -@Component -@KubernetesOperation(PATCH_MANIFEST) -public class KubernetesPatchManifestConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - private KubernetesResourcePropertyRegistry registry; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesPatchManifestOperation(convertDescription(input), registry); - } - - @Override - public KubernetesPatchManifestDescription convertDescription(Map input) { - return (KubernetesPatchManifestDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesPatchManifestDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesPauseRolloutManifestConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesPauseRolloutManifestConverter.java deleted file mode 100644 index 0861dfe2c74..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesPauseRolloutManifestConverter.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.manifest; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesPauseRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesPauseRolloutManifestOperation; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.PAUSE_ROLLOUT_MANIFEST; - -@KubernetesOperation(PAUSE_ROLLOUT_MANIFEST) -@Component -public class KubernetesPauseRolloutManifestConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - private KubernetesResourcePropertyRegistry registry; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesPauseRolloutManifestOperation(convertDescription(input), registry); - } - - @Override - public KubernetesPauseRolloutManifestDescription convertDescription(Map input) { - return (KubernetesPauseRolloutManifestDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesPauseRolloutManifestDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesResumeRolloutManifestConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesResumeRolloutManifestConverter.java deleted file mode 100644 index 950feaa5c5d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesResumeRolloutManifestConverter.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.manifest; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesResumeRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesResumeRolloutManifestOperation; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RESUME_ROLLOUT_MANIFEST; - -@KubernetesOperation(RESUME_ROLLOUT_MANIFEST) -@Component -public class KubernetesResumeRolloutManifestConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - private KubernetesResourcePropertyRegistry registry; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesResumeRolloutManifestOperation(convertDescription(input), registry); - } - - @Override - public KubernetesResumeRolloutManifestDescription convertDescription(Map input) { - return (KubernetesResumeRolloutManifestDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesResumeRolloutManifestDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesScaleManifestConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesScaleManifestConverter.java deleted file mode 100644 index 08cbf266157..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesScaleManifestConverter.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.manifest; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesScaleManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesScaleManifestOperation; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.SCALE_MANIFEST; - -@KubernetesOperation(SCALE_MANIFEST) -@Component -public class KubernetesScaleManifestConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - private KubernetesResourcePropertyRegistry registry; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesScaleManifestOperation(convertDescription(input), registry); - } - - @Override - public KubernetesScaleManifestDescription convertDescription(Map input) { - return (KubernetesScaleManifestDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesScaleManifestDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesUndoRolloutManifestConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesUndoRolloutManifestConverter.java deleted file mode 100644 index 463a252b7ad..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/manifest/KubernetesUndoRolloutManifestConverter.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.manifest; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesUndoRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesUndoRolloutManifestOperation; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.UNDO_ROLLOUT_MANIFEST; - -@KubernetesOperation(UNDO_ROLLOUT_MANIFEST) -@Component -public class KubernetesUndoRolloutManifestConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - private KubernetesResourcePropertyRegistry registry; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesUndoRolloutManifestOperation(convertDescription(input), registry); - } - - @Override - public KubernetesUndoRolloutManifestDescription convertDescription(Map input) { - return (KubernetesUndoRolloutManifestDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesUndoRolloutManifestDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/servergroup/KubernetesResizeServerGroupConverter.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/servergroup/KubernetesResizeServerGroupConverter.java deleted file mode 100644 index a896ba31c86..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/converter/servergroup/KubernetesResizeServerGroupConverter.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.converter.servergroup; - -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.KubernetesAtomicOperationConverterHelper; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesDeployManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.servergroup.KubernetesResizeServerGroupDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.servergroup.KubernetesResizeServerGroupOperation; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RESIZE_SERVER_GROUP; - -@KubernetesOperation(RESIZE_SERVER_GROUP) -@Component -public class KubernetesResizeServerGroupConverter extends AbstractAtomicOperationsCredentialsSupport { - @Autowired - private KubernetesResourcePropertyRegistry registry; - - @Override - public AtomicOperation convertOperation(Map input) { - return new KubernetesResizeServerGroupOperation(convertDescription(input), registry); - } - - @Override - public KubernetesResizeServerGroupDescription convertDescription(Map input) { - return (KubernetesResizeServerGroupDescription) KubernetesAtomicOperationConverterHelper - .convertDescription(input, this, KubernetesDeployManifestDescription.class); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesCoordinates.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesCoordinates.java deleted file mode 100644 index 20e52b14b77..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesCoordinates.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -@Builder -@AllArgsConstructor -@NoArgsConstructor -@Data -public class KubernetesCoordinates { - KubernetesKind kind; - String namespace; - String name; -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesPatchOptions.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesPatchOptions.java deleted file mode 100644 index aed0624b01b..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesPatchOptions.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description; - -import lombok.Data; - -@Data -public class KubernetesPatchOptions { - private MergeStrategy mergeStrategy; - private boolean record; - - public enum MergeStrategy { - strategic, - json, - merge - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesPodMetric.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesPodMetric.java deleted file mode 100644 index 62414f6539c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesPodMetric.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description; - -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class KubernetesPodMetric { - String podName; - List containerMetrics = new ArrayList<>(); - - @Data - @Builder - @NoArgsConstructor - @AllArgsConstructor - public static class ContainerMetric { - String containerName; - Map metrics; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesResourceProperties.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesResourceProperties.java deleted file mode 100644 index 77a1fc2376d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesResourceProperties.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description; - -import com.netflix.spinnaker.clouddriver.kubernetes.config.CustomKubernetesResource; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.KubernetesUnversionedArtifactConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.KubernetesVersionedArtifactConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.CustomKubernetesHandlerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; -import org.apache.commons.lang.StringUtils; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class KubernetesResourceProperties { - KubernetesHandler handler; - boolean versioned; - KubernetesVersionedArtifactConverter versionedConverter; - KubernetesUnversionedArtifactConverter unversionedConverter; - - public static KubernetesResourceProperties fromCustomResource(CustomKubernetesResource customResource) { - String deployPriority = customResource.getDeployPriority(); - int deployPriorityValue; - if (StringUtils.isEmpty(deployPriority)) { - deployPriorityValue = WORKLOAD_CONTROLLER_PRIORITY.getValue(); - } else { - try { - deployPriorityValue = Integer.valueOf(deployPriority); - } catch (NumberFormatException e) { - deployPriorityValue = KubernetesHandler.DeployPriority.fromString(deployPriority).getValue(); - } - } - - KubernetesHandler handler = CustomKubernetesHandlerFactory.create(KubernetesKind.fromString(customResource.getKubernetesKind()), - KubernetesSpinnakerKindMap.SpinnakerKind.fromString(customResource.getSpinnakerKind()), - customResource.isVersioned(), - deployPriorityValue); - - return KubernetesResourceProperties.builder() - .handler(handler) - .versioned(customResource.isVersioned()) - .versionedConverter(new KubernetesVersionedArtifactConverter()) - .unversionedConverter(new KubernetesUnversionedArtifactConverter()) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesResourcePropertyRegistry.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesResourcePropertyRegistry.java deleted file mode 100644 index 44529fd56d1..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesResourcePropertyRegistry.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.KubernetesUnversionedArtifactConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.KubernetesVersionedArtifactConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; - -@Component -public class KubernetesResourcePropertyRegistry { - @Autowired - public KubernetesResourcePropertyRegistry(List handlers, - KubernetesSpinnakerKindMap kindMap - ) { - for (KubernetesHandler handler : handlers) { - KubernetesResourceProperties properties = KubernetesResourceProperties.builder() - .handler(handler) - .versioned(handler.versioned()) - .versionedConverter(new KubernetesVersionedArtifactConverter()) - .unversionedConverter(new KubernetesUnversionedArtifactConverter()) - .build(); - - kindMap.addRelationship(handler.spinnakerKind(), handler.kind()); - put(handler.kind(), properties); - } - } - - public KubernetesResourceProperties get(String account, KubernetesKind kind) { - ConcurrentHashMap propertyMap = accountProperties.get(account); - KubernetesResourceProperties properties = null; - - if (!kind.isRegistered()) { - return globalProperties.get(KubernetesKind.NONE); - } - - if (propertyMap != null) { - // account-level properties take precedence - properties = propertyMap.get(kind); - } - - if (properties == null) { - properties = globalProperties.get(kind); - } - - return properties; - } - - private void put(KubernetesKind kind, KubernetesResourceProperties properties) { - globalProperties.put(kind, properties); - } - - public void registerAccountProperty(String account, KubernetesResourceProperties properties) { - ConcurrentHashMap propertyMap = accountProperties.get(account); - if (propertyMap == null) { - propertyMap = new ConcurrentHashMap<>(); - } - - propertyMap.put(properties.getHandler().kind(), properties); - - accountProperties.put(account, propertyMap); - } - - public Collection values() { - Collection result = new ArrayList<>(globalProperties.values()); - result.addAll(accountProperties.values() - .stream() - .map(ConcurrentHashMap::values) - .flatMap(Collection::stream) - .collect(Collectors.toList()) - ); - - return result; - } - - private final ConcurrentHashMap globalProperties = new ConcurrentHashMap<>(); - - private final ConcurrentHashMap> accountProperties = new ConcurrentHashMap<>(); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesSpinnakerKindMap.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesSpinnakerKindMap.java deleted file mode 100644 index cd77e707d55..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesSpinnakerKindMap.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import java.util.stream.Collectors; -import org.springframework.stereotype.Component; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -@Component -public class KubernetesSpinnakerKindMap { - public enum SpinnakerKind { - INSTANCES("instances"), - CONFIGS("configs"), - SERVER_GROUPS("serverGroups"), - LOAD_BALANCERS("loadBalancers"), - SECURITY_GROUPS("securityGroups"), - SERVER_GROUP_MANAGERS("serverGroupManagers"), - UNCLASSIFIED("unclassified"); - - final private String id; - - SpinnakerKind(String id) { - this.id = id; - } - - @Override - public String toString() { - return id; - } - - @JsonCreator - public static SpinnakerKind fromString(String name) { - return Arrays.stream(values()) - .filter(k -> k.toString().equalsIgnoreCase(name)) - .findFirst() - .orElse(UNCLASSIFIED); - } - } - - private Map> spinnakerToKubernetes = new HashMap<>(); - private Map kubernetesToSpinnaker = new HashMap<>(); - - void addRelationship(SpinnakerKind spinnakerKind, KubernetesKind kubernetesKind) { - Set kinds = spinnakerToKubernetes.get(spinnakerKind); - if (kinds == null) { - kinds = new HashSet<>(); - } - - kinds.add(kubernetesKind); - spinnakerToKubernetes.put(spinnakerKind, kinds); - kubernetesToSpinnaker.put(kubernetesKind, spinnakerKind); - } - - public SpinnakerKind translateKubernetesKind(KubernetesKind kubernetesKind) { - return kubernetesToSpinnaker.getOrDefault(kubernetesKind, SpinnakerKind.UNCLASSIFIED); - } - - public Set translateSpinnakerKind(SpinnakerKind spinnakerKind) { - return spinnakerToKubernetes.get(spinnakerKind); - } - - public Set allKubernetesKinds() { - return kubernetesToSpinnaker.keySet(); - } - - public Map kubernetesToSpinnakerKindStringMap() { - return kubernetesToSpinnaker.entrySet().stream().filter( - x -> x.getValue() != SpinnakerKind.UNCLASSIFIED && x.getKey() != KubernetesKind.NONE - ).collect( - Collectors.toMap(x -> x.getKey().toString(), x -> x.getValue().toString())); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/RegistryUtils.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/RegistryUtils.java deleted file mode 100644 index f87029afafe..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/RegistryUtils.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import lombok.extern.slf4j.Slf4j; - -import java.util.List; -import java.util.Map; -import java.util.Optional; - -@Slf4j -public class RegistryUtils { - private static Optional lookupHandler(KubernetesResourcePropertyRegistry propertyRegistry, String account, KubernetesKind kind) { - if (kind == null) { - return Optional.empty(); - } - - KubernetesResourceProperties properties = propertyRegistry.get(account, kind); - if (properties == null) { - return Optional.empty(); - } - - KubernetesHandler handler = properties.getHandler(); - - if (handler == null) { - return Optional.empty(); - } - - return Optional.of(handler); - } - - static public void removeSensitiveKeys(KubernetesResourcePropertyRegistry propertyRegistry, String account, KubernetesManifest manifest) { - lookupHandler(propertyRegistry, account, manifest.getKind()).ifPresent(h -> h.removeSensitiveKeys(manifest)); - } - - static public void addRelationships(KubernetesResourcePropertyRegistry propertyRegistry, String account, KubernetesKind kind, Map> allResources, Map> relationshipMap) { - lookupHandler(propertyRegistry, account, kind).ifPresent(h -> h.addRelationships(allResources, relationshipMap)); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesApiVersion.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesApiVersion.java deleted file mode 100644 index c9261c46f46..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesApiVersion.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonValue; -import lombok.EqualsAndHashCode; -import org.apache.commons.lang3.StringUtils; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Optional; - -@EqualsAndHashCode -public class KubernetesApiVersion { - public static KubernetesApiVersion V1 = new KubernetesApiVersion("v1"); - public static KubernetesApiVersion EXTENSIONS_V1BETA1 = new KubernetesApiVersion("extensions/v1beta1"); - public static KubernetesApiVersion NETWORKING_K8S_IO_V1 = new KubernetesApiVersion("network.k8s.io/v1"); - public static KubernetesApiVersion APPS_V1BETA1 = new KubernetesApiVersion("apps/v1beta1"); - public static KubernetesApiVersion APPS_V1BETA2 = new KubernetesApiVersion("apps/v1beta2"); - public static KubernetesApiVersion BATCH_V1 = new KubernetesApiVersion("batch/v1"); - - private final String name; - - private static List values; - - protected KubernetesApiVersion(String name) { - if (values == null) { - values = Collections.synchronizedList(new ArrayList<>()); - } - - this.name = name; - values.add(this); - } - - @Override - @JsonValue - public String toString() { - return name; - } - - @JsonCreator - public static KubernetesApiVersion fromString(String name) { - if (StringUtils.isEmpty(name)) { - return null; - } - - synchronized (values) { - Optional versionOptional = values.stream() - .filter(v -> v.name.equalsIgnoreCase(name)) - .findAny(); - - // separate from the above chain to avoid concurrent modification of the values list - return versionOptional.orElseGet(() -> new KubernetesApiVersion(name)); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesCachingProperties.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesCachingProperties.java deleted file mode 100644 index 4e853902572..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesCachingProperties.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class KubernetesCachingProperties { - boolean ignore; -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesDeleteManifestDescription.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesDeleteManifestDescription.java deleted file mode 100644 index 7275de2a18b..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesDeleteManifestDescription.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import io.kubernetes.client.models.V1DeleteOptions; -import lombok.Data; -import lombok.EqualsAndHashCode; - -@EqualsAndHashCode(callSuper = true) -@Data -public class KubernetesDeleteManifestDescription extends KubernetesMultiManifestOperationDescription { - V1DeleteOptions options; -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesDeployManifestDescription.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesDeployManifestDescription.java deleted file mode 100644 index e4ed1f02aed..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesDeployManifestDescription.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.Data; -import lombok.EqualsAndHashCode; - -import java.util.List; - -@Data -@EqualsAndHashCode(callSuper = true) -public class KubernetesDeployManifestDescription extends KubernetesAtomicOperationDescription { - @Deprecated - KubernetesManifest manifest; - List manifests; - Moniker moniker; - KubernetesManifestSpinnakerRelationships relationships; - List requiredArtifacts; - List optionalArtifacts; - Boolean versioned; - Source source; - Artifact manifestArtifact; - - public enum Source { - artifact, - text - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesKind.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesKind.java deleted file mode 100644 index 98658b1b0a9..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesKind.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonValue; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; - -@Slf4j -public class KubernetesKind { - public static KubernetesKind API_SERVICE = new KubernetesKind("apiService", false); - public static KubernetesKind CLUSTER_ROLE = new KubernetesKind("clusterRole", false); - public static KubernetesKind CLUSTER_ROLE_BINDING = new KubernetesKind("clusterRoleBinding", false); - public static KubernetesKind CONFIG_MAP = new KubernetesKind("configMap", "cm"); - public static KubernetesKind CONTROLLER_REVISION = new KubernetesKind("controllerRevision"); - public static KubernetesKind CUSTOM_RESOURCE_DEFINITION = new KubernetesKind("customResourceDefinition", "crd", false, false); - public static KubernetesKind CRON_JOB = new KubernetesKind("cronJob"); - public static KubernetesKind DAEMON_SET = new KubernetesKind("daemonSet", "ds", true, true); - public static KubernetesKind DEPLOYMENT = new KubernetesKind("deployment", "deploy", true, true); - public static KubernetesKind EVENT = new KubernetesKind("event"); - public static KubernetesKind HORIZONTAL_POD_AUTOSCALER = new KubernetesKind("horizontalpodautoscaler", "hpa"); - public static KubernetesKind INGRESS = new KubernetesKind("ingress", "ing"); - public static KubernetesKind JOB = new KubernetesKind("job"); - public static KubernetesKind POD = new KubernetesKind("pod", "po", true, true); - public static KubernetesKind POD_DISRUPTION_BUDGET = new KubernetesKind("podDisruptionBudget"); - public static KubernetesKind REPLICA_SET = new KubernetesKind("replicaSet", "rs", true, true); - public static KubernetesKind ROLE = new KubernetesKind("role", false); - public static KubernetesKind ROLE_BINDING = new KubernetesKind("roleBinding", false); - public static KubernetesKind NAMESPACE = new KubernetesKind("namespace", "ns", false, false); - public static KubernetesKind NETWORK_POLICY = new KubernetesKind("networkPolicy", "netpol", true, true); - public static KubernetesKind PERSISTENT_VOLUME = new KubernetesKind("persistentVolume", "pv", false, false); - public static KubernetesKind PERSISTENT_VOLUME_CLAIM = new KubernetesKind("persistentVolumeClaim", "pvc"); - public static KubernetesKind SECRET = new KubernetesKind("secret"); - public static KubernetesKind SERVICE = new KubernetesKind("service", "svc", true, true); - public static KubernetesKind SERVICE_ACCOUNT = new KubernetesKind("serviceAccount", "sa"); - public static KubernetesKind STATEFUL_SET = new KubernetesKind("statefulSet", null, true, true); - public static KubernetesKind STORAGE_CLASS = new KubernetesKind("storageClass", "sc", false, false); - - // special kind that should never be assigned to a manifest, used only to represent objects whose kind is not in spinnaker's registry - public static KubernetesKind NONE = new KubernetesKind("none", null, true, false); - - private final String name; - private final String alias; - private boolean isNamespaced; - // generally reserved for workloads, can be read as "does this belong to a spinnaker cluster?" - private final boolean hasClusterRelationship; - // was this kind found after spinnaker started? - private boolean isDynamic; - // was this kind added by a user in their clouddriver.yml? - private boolean isRegistered; - - @Getter - private static List values; - - protected KubernetesKind(String name, String alias, boolean isNamespaced, boolean hasClusterRelationship) { - if (values == null) { - values = Collections.synchronizedList(new ArrayList<>()); - } - - this.name = name; - this.alias = alias; - this.isNamespaced = isNamespaced; - this.hasClusterRelationship = hasClusterRelationship; - this.isDynamic = false; - this.isRegistered = true; - values.add(this); - } - - protected KubernetesKind(String name) { - this(name, null, true, false); - } - - protected KubernetesKind(String name, String alias) { - this(name, alias, true, false); - } - - protected KubernetesKind(String name, boolean isNamespaced) { - this(name, null, isNamespaced, false); - } - - public boolean isNamespaced() { - return this.isNamespaced; - } - - public boolean hasClusterRelationship() { - return this.hasClusterRelationship; - } - - public boolean isDynamic() { - return this.isDynamic; - } - - public boolean isRegistered() { - return this.isRegistered; - } - - @Override - @JsonValue - public String toString() { - return name; - } - - @JsonCreator - public static KubernetesKind fromString(String name) { - return fromString(name, true, true); - } - - public static KubernetesKind fromString(String name, boolean registered, boolean namespaced) { - if (StringUtils.isEmpty(name)) { - return null; - } - - if (name.equalsIgnoreCase(KubernetesKind.NONE.toString())) { - throw new IllegalArgumentException("The 'NONE' kind cannot be read."); - } - - synchronized (values) { - Optional kindOptional = values.stream() - .filter(v -> v.name.equalsIgnoreCase(name) || (v.alias != null && v.alias.equalsIgnoreCase(name))) - .findAny(); - - // separate from the above chain to avoid concurrent modification of the values list - return kindOptional.orElseGet(() -> { - log.info("Dynamically registering {}, (namespaced: {}, registered: {})", name, namespaced, registered); - KubernetesKind result = new KubernetesKind(name); - result.isDynamic = true; - result.isRegistered = registered; - result.isNamespaced = namespaced; - return result; - }); - } - } - - public static List registeredStringList(List names) { - return names.stream() - .map(KubernetesKind::fromString) - .collect(Collectors.toList()); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifest.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifest.java deleted file mode 100644 index 8298e5e6b5f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifest.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import lombok.Data; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.lang.Double; - -public class KubernetesManifest extends HashMap { - private static ObjectMapper mapper = new ObjectMapper(); - - @Override - public KubernetesManifest clone() { - return (KubernetesManifest) super.clone(); - } - - private static T getRequiredField(KubernetesManifest manifest, String field) { - T res = (T) manifest.get(field); - if (res == null) { - throw MalformedManifestException.missingField(manifest, field); - } - - return res; - } - - @JsonIgnore - public KubernetesKind getKind() { - return KubernetesKind.fromString(getRequiredField(this, "kind")); - } - - @JsonIgnore - public void setKind(KubernetesKind kind) { - put("kind", kind.toString()); - } - - @JsonIgnore - public KubernetesApiVersion getApiVersion() { - return KubernetesApiVersion.fromString(getRequiredField(this, "apiVersion")); - } - - @JsonIgnore - public void setApiVersion(KubernetesApiVersion apiVersion) { - put("apiVersion", apiVersion.toString()); - } - - @JsonIgnore - private Map getMetadata() { - return getRequiredField(this, "metadata"); - } - - @JsonIgnore - public String getName() { - return (String) getMetadata().get("name"); - } - - @JsonIgnore - public String getUid() { - return (String) getMetadata().get("uid"); - } - - @JsonIgnore - public void setName(String name) { - getMetadata().put("name", name); - } - - @JsonIgnore - public String getNamespace() { - String namespace = (String) getMetadata().get("namespace"); - return StringUtils.isEmpty(namespace) ? "" : namespace; - } - - @JsonIgnore - public void setNamespace(String namespace) { - getMetadata().put("namespace", namespace); - } - - @JsonIgnore - public String getCreationTimestamp() { - return getMetadata().containsKey("creationTimestamp") - ? getMetadata().get("creationTimestamp").toString() - : null; - } - - @JsonIgnore - public List getOwnerReferences() { - Map metadata = getMetadata(); - Object ownerReferences = metadata.get("ownerReferences"); - if (ownerReferences == null) { - return new ArrayList<>(); - } - - return mapper.convertValue(ownerReferences, new TypeReference>() {}); - } - - @JsonIgnore - public Map getLabels() { - Map result = (Map) getMetadata().get("labels"); - if (result == null) { - result = new HashMap<>(); - getMetadata().put("labels", result); - } - - return result; - } - - @JsonIgnore - public Map getAnnotations() { - Map result = (Map) getMetadata().get("annotations"); - if (result == null) { - result = new HashMap<>(); - getMetadata().put("annotations", result); - } - - return result; - } - - @JsonIgnore - public Double getReplicas() { - if (!containsKey("spec")) { - return null; - } - - Map spec = (Map) get("spec"); - if (!spec.containsKey("replicas")) { - return null; - } - Double replicas = (Double) spec.get("replicas"); - return replicas; - } - - @JsonIgnore - public void setReplicas(Double replicas) { - if (!containsKey("spec")) { - return; - } - - Map spec = (Map) get("spec"); - if (!spec.containsKey("replicas")) { - return; - } - spec.put("replicas", replicas); - } - - @JsonIgnore - public Optional> getSpecTemplateLabels() { - if (!containsKey("spec")) { - return Optional.empty(); - } - - Map spec = (Map) get("spec"); - if (!spec.containsKey("template")) { - return Optional.empty(); - } - - Map template = (Map) spec.get("template"); - if (!template.containsKey("metadata")) { - return Optional.empty(); - } - - Map metadata = (Map) template.get("metadata"); - Map result = (Map) metadata.get("labels"); - if (result == null) { - result = new HashMap<>(); - metadata.put("labels", result); - } - - return Optional.of(result); - } - - @JsonIgnore - public Optional> getSpecTemplateAnnotations() { - if (!containsKey("spec")) { - return Optional.empty(); - } - - Map spec = (Map) get("spec"); - if (!spec.containsKey("template")) { - return Optional.empty(); - } - - Map template = (Map) spec.get("template"); - if (!template.containsKey("metadata")) { - return Optional.empty(); - } - - Map metadata = (Map) template.get("metadata"); - Map result = (Map) metadata.get("annotations"); - if (result == null) { - result = new HashMap<>(); - metadata.put("annotations", result); - } - - return Optional.of(result); - } - - @JsonIgnore - public Object getStatus() { - return get("status"); - } - - @JsonIgnore - public String getFullResourceName() { - return getFullResourceName(getKind(), getName()); - } - - public static String getFullResourceName(KubernetesKind kind, String name) { - return String.join(" ", kind.toString(), name); - } - - public boolean isNewerThanObservedGeneration() { - try { - Long generation = (Long) getMetadata().get("generation"); - Long observedGeneration = ((Map) getStatus()).get("observedGeneration"); - if (observedGeneration == null || (generation != null && generation > observedGeneration)) { - return false; - } - } catch (ClassCastException e) { - } - return true; - } - /* - * The reasoning behind removing metadata for comparison is that it shouldn't affect the runtime behavior - * of the resource we are creating. - */ - public boolean nonMetadataEquals(KubernetesManifest other) { - if (other == null) { - return false; - } - - KubernetesManifest cloneThis = this.clone(); - KubernetesManifest cloneOther = other.clone(); - - cloneThis.remove("metadata"); - cloneOther.remove("metadata"); - - return cloneThis.equals(cloneOther); - } - - public static Pair fromFullResourceName(String fullResourceName) { - String[] split = fullResourceName.split(" "); - if (split.length != 2) { - throw new IllegalArgumentException("Expected a full resource name of the form "); - } - - KubernetesKind kind = KubernetesKind.fromString(split[0]); - String name = split[1]; - - return new ImmutablePair<>(kind, name); - } - - @Data - public static class OwnerReference { - KubernetesApiVersion apiVersion; - KubernetesKind kind; - String name; - String uid; - boolean blockOwnerDeletion; - boolean controller; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestAnnotater.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestAnnotater.java deleted file mode 100644 index 24526579b90..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestAnnotater.java +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.frigga.Names; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; - -import java.util.List; -import java.util.Map; -import java.util.Optional; - -@Slf4j -public class KubernetesManifestAnnotater { - private static final String SPINNAKER_ANNOTATION = "spinnaker.io"; - private static final String RELATIONSHIP_ANNOTATION_PREFIX = "relationships." + SPINNAKER_ANNOTATION; - private static final String ARTIFACT_ANNOTATION_PREFIX = "artifact." + SPINNAKER_ANNOTATION; - private static final String MONIKER_ANNOTATION_PREFIX = "moniker." + SPINNAKER_ANNOTATION; - private static final String CACHING_ANNOTATION_PREFIX = "caching." + SPINNAKER_ANNOTATION; - private static final String STRATEGY_ANNOTATION_PREFIX = "strategy." + SPINNAKER_ANNOTATION; - private static final String LOAD_BALANCERS = RELATIONSHIP_ANNOTATION_PREFIX + "/loadBalancers"; - private static final String SECURITY_GROUPS = RELATIONSHIP_ANNOTATION_PREFIX + "/securityGroups"; - private static final String CLUSTER = MONIKER_ANNOTATION_PREFIX + "/cluster"; - private static final String APPLICATION = MONIKER_ANNOTATION_PREFIX + "/application"; - private static final String STACK = MONIKER_ANNOTATION_PREFIX + "/stack"; - private static final String DETAIL = MONIKER_ANNOTATION_PREFIX + "/detail"; - private static final String SEQUENCE = MONIKER_ANNOTATION_PREFIX + "/sequence"; - private static final String TYPE = ARTIFACT_ANNOTATION_PREFIX + "/type"; - private static final String NAME = ARTIFACT_ANNOTATION_PREFIX + "/name"; - private static final String LOCATION = ARTIFACT_ANNOTATION_PREFIX + "/location"; - private static final String VERSION = ARTIFACT_ANNOTATION_PREFIX + "/version"; - private static final String IGNORE_CACHING = CACHING_ANNOTATION_PREFIX + "/ignore"; - private static final String VERSIONED = STRATEGY_ANNOTATION_PREFIX + "/versioned"; - private static final String MAX_VERSION_HISTORY = STRATEGY_ANNOTATION_PREFIX + "/max-version-history"; - private static final String USE_SOURCE_CAPACITY = STRATEGY_ANNOTATION_PREFIX + "/use-source-capacity"; - - private static final String KUBERNETES_ANNOTATION = "kubernetes.io"; - private static final String KUBECTL_ANNOTATION_PREFIX = "kubectl." + KUBERNETES_ANNOTATION; - private static final String DEPLOYMENT_ANNOTATION_PREFIX = "deployment." + KUBERNETES_ANNOTATION; - private static final String DEPLOYMENT_REVISION = DEPLOYMENT_ANNOTATION_PREFIX + "/revision"; - private static final String KUBECTL_LAST_APPLIED_CONFIGURATION = KUBECTL_ANNOTATION_PREFIX + "/last-applied-configuration"; - - private static ObjectMapper objectMapper = new ObjectMapper(); - - private static void storeAnnotation(Map annotations, String key, Object value) { - if (value == null) { - return; - } - - if (annotations.containsKey(key)) { - return; - } - - try { - if (value instanceof String) { - // The "write value as string" method will attach quotes which are ugly to read - annotations.put(key, (String) value); - } else { - annotations.put(key, objectMapper.writeValueAsString(value)); - } - } catch (JsonProcessingException e) { - throw new IllegalArgumentException("Illegal annotation value for '" + key + "': " + e); - } - } - - private static T getAnnotation(Map annotations, String key, TypeReference typeReference) { - return getAnnotation(annotations, key, typeReference, null); - } - - private static boolean stringTypeReference(TypeReference typeReference) { - if (typeReference.getType() == null || typeReference.getType().getTypeName() == null) { - log.warn("Malformed type reference {}", typeReference); - return false; - } - - return typeReference.getType().getTypeName().equals(String.class.getName()); - } - - // This is to read values that were annotated with the ObjectMapper with quotes, before we started ignoring the quotes - private static boolean looksLikeSerializedString(String value) { - if (StringUtils.isEmpty(value) || value.length() == 1) { - return false; - } - - return value.charAt(0) == '"' && value.charAt(value.length() - 1) == '"'; - } - - private static T getAnnotation(Map annotations, String key, TypeReference typeReference, T defaultValue) { - String value = annotations.get(key); - if (value == null) { - return defaultValue; - } - - try { - boolean wantsString = stringTypeReference(typeReference); - - if (wantsString && !looksLikeSerializedString(value)) { - return (T) value; - } else { - return objectMapper.readValue(value, typeReference); - } - } catch (Exception e) { - log.warn("Illegally annotated resource for '" + key + "': " + e); - return null; - } - } - - public static void annotateManifest(KubernetesManifest manifest, KubernetesManifestSpinnakerRelationships relationships) { - Map annotations = manifest.getAnnotations(); - storeAnnotations(annotations, relationships); - - manifest.getSpecTemplateAnnotations().flatMap(a -> { - storeAnnotations(a, relationships); - return Optional.empty(); - }); - } - - public static void annotateManifest(KubernetesManifest manifest, Moniker moniker) { - Map annotations = manifest.getAnnotations(); - storeAnnotations(annotations, moniker); - - manifest.getSpecTemplateAnnotations().flatMap(a -> { - storeAnnotations(a, moniker); - return Optional.empty(); - }); - } - - public static void annotateManifest(KubernetesManifest manifest, Artifact artifact) { - Map annotations = manifest.getAnnotations(); - storeAnnotations(annotations, artifact); - - manifest.getSpecTemplateAnnotations().flatMap(a -> { - storeAnnotations(a, artifact); - return Optional.empty(); - }); - } - - private static void storeAnnotations(Map annotations, Moniker moniker) { - if (moniker == null) { - throw new IllegalArgumentException("Every resource deployed via spinnaker must be assigned a moniker"); - } - - storeAnnotation(annotations, CLUSTER, moniker.getCluster()); - storeAnnotation(annotations, APPLICATION, moniker.getApp()); - storeAnnotation(annotations, STACK, moniker.getStack()); - storeAnnotation(annotations, DETAIL, moniker.getDetail()); - storeAnnotation(annotations, SEQUENCE, moniker.getSequence()); - } - - - private static void storeAnnotations(Map annotations, KubernetesManifestSpinnakerRelationships relationships) { - if (relationships == null) { - return; - } - - storeAnnotation(annotations, LOAD_BALANCERS, relationships.getLoadBalancers()); - storeAnnotation(annotations, SECURITY_GROUPS, relationships.getSecurityGroups()); - } - - private static void storeAnnotations(Map annotations, Artifact artifact) { - if (artifact == null) { - return; - } - - storeAnnotation(annotations, TYPE, artifact.getType()); - storeAnnotation(annotations, NAME, artifact.getName()); - storeAnnotation(annotations, LOCATION, artifact.getLocation()); - storeAnnotation(annotations, VERSION, artifact.getVersion()); - } - - public static KubernetesManifestSpinnakerRelationships getManifestRelationships(KubernetesManifest manifest) { - Map annotations = manifest.getAnnotations(); - - return new KubernetesManifestSpinnakerRelationships() - .setLoadBalancers(getAnnotation(annotations, LOAD_BALANCERS, new TypeReference>() {})) - .setSecurityGroups(getAnnotation(annotations, SECURITY_GROUPS, new TypeReference>() {})); - } - - public static Optional getArtifact(KubernetesManifest manifest) { - Map annotations = manifest.getAnnotations(); - String type = getAnnotation(annotations, TYPE, new TypeReference() {}); - if (StringUtils.isEmpty(type)) { - return Optional.empty(); - } - - return Optional.of(Artifact.builder() - .type(type) - .name(getAnnotation(annotations, NAME, new TypeReference() {})) - .location(getAnnotation(annotations, LOCATION, new TypeReference() {})) - .version(getAnnotation(annotations, VERSION, new TypeReference() {})) - .build()); - } - - public static Moniker getMoniker(KubernetesManifest manifest) { - Names parsed = Names.parseName(manifest.getName()); - Map annotations = manifest.getAnnotations(); - - return Moniker.builder() - .cluster(getAnnotation(annotations, CLUSTER, new TypeReference() {}, parsed.getCluster())) - .app(getAnnotation(annotations, APPLICATION, new TypeReference() {}, parsed.getApp())) - .stack(getAnnotation(annotations, STACK, new TypeReference() {}, null)) - .detail(getAnnotation(annotations, DETAIL, new TypeReference() {}, null)) - .sequence(getAnnotation(annotations, SEQUENCE, new TypeReference() {}, - manifest.getKind() == KubernetesKind.REPLICA_SET ? - getAnnotation(annotations, DEPLOYMENT_REVISION, new TypeReference() {}, null) : - null - )) - .build(); - } - - public static KubernetesCachingProperties getCachingProperties(KubernetesManifest manifest) { - Map annotations = manifest.getAnnotations(); - - return KubernetesCachingProperties.builder() - .ignore(getAnnotation(annotations, IGNORE_CACHING, new TypeReference() {}, false)) - .build(); - } - - public static KubernetesManifestStrategy getStrategy(KubernetesManifest manifest) { - Map annotations = manifest.getAnnotations(); - - return KubernetesManifestStrategy.builder() - .versioned(getAnnotation(annotations, VERSIONED, new TypeReference() {})) - .maxVersionHistory(getAnnotation(annotations, MAX_VERSION_HISTORY, new TypeReference() {})) - .useSourceCapacity(getAnnotation(annotations, USE_SOURCE_CAPACITY, new TypeReference() {})) - .build(); - } - - public static KubernetesManifest getLastAppliedConfiguration(KubernetesManifest manifest) { - Map annotations = manifest.getAnnotations(); - - return getAnnotation(annotations, KUBECTL_LAST_APPLIED_CONFIGURATION, new TypeReference() { }, null); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestLabeler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestLabeler.java deleted file mode 100644 index 1f61089084e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestLabeler.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.extern.slf4j.Slf4j; - -import java.util.Map; -import java.util.Optional; - -@Slf4j -public class KubernetesManifestLabeler { - private static final String KUBERNETES_LABEL = "kubernetes.io"; - private static final String APP_LABEL_PREFIX = "app." + KUBERNETES_LABEL; - private static final String APP_NAME = APP_LABEL_PREFIX + "/name"; - private static final String APP_VERSION = APP_LABEL_PREFIX + "/version"; - private static final String APP_COMPONENT = APP_LABEL_PREFIX + "/component"; - private static final String APP_PART_OF = APP_LABEL_PREFIX + "/part-of"; - private static final String APP_MANAGED_BY = APP_LABEL_PREFIX + "/managed-by"; - - private static ObjectMapper objectMapper = new ObjectMapper(); - - private static void storeLabelAndOverwrite(Map labels, String key, String value) { - if (value == null) { - return; - } - - labels.put(key, value); - } - - private static void storeLabel(Map labels, String key, String value) { - if (value == null) { - return; - } - - if (labels.containsKey(key)) { - return; - } - - labels.put(key, value); - } - - public static void labelManifest(KubernetesManifest manifest, Moniker moniker) { - Map labels = manifest.getLabels(); - storeLabels(labels, moniker); - - manifest.getSpecTemplateLabels().flatMap(l -> { - storeLabels(l, moniker); - return Optional.empty(); - }); - } - - private static void storeLabels(Map labels, Moniker moniker) { - if (moniker == null) { - return; - } - - // other properties aren't currently set by Spinnaker - storeLabel(labels, APP_NAME, moniker.getApp()); - storeLabelAndOverwrite(labels, APP_MANAGED_BY, "spinnaker"); - } - - public static KubernetesApplicationProperties getApplicationProperties(KubernetesManifest manifest) { - Map labels = manifest.getLabels(); - - return new KubernetesApplicationProperties() - .setName(labels.get(APP_NAME)) - .setVersion(labels.get(APP_VERSION)) - .setComponent(labels.get(APP_COMPONENT)) - .setPartOf(labels.get(APP_PART_OF)) - .setManagedBy(labels.get(APP_MANAGED_BY)); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestMetadata.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestMetadata.java deleted file mode 100644 index e2391583fb4..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestMetadata.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.netflix.spinnaker.moniker.Moniker; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.Optional; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class KubernetesManifestMetadata { - KubernetesManifestSpinnakerRelationships relationships; - Optional artifact; - Moniker moniker; -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestOperationDescription.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestOperationDescription.java deleted file mode 100644 index 7b7f9509798..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestOperationDescription.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import lombok.Data; -import lombok.EqualsAndHashCode; -import org.apache.commons.lang3.tuple.Pair; - -@EqualsAndHashCode(callSuper = true) -@Data -public class KubernetesManifestOperationDescription extends KubernetesAtomicOperationDescription { - private String manifestName; - private String location; - - @JsonIgnore - public KubernetesCoordinates getPointCoordinates() { - Pair parsedName = KubernetesManifest.fromFullResourceName(manifestName); - - return KubernetesCoordinates.builder() - .namespace(location) - .kind(parsedName.getLeft()) - .name(parsedName.getRight()) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestSpinnakerRelationships.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestSpinnakerRelationships.java deleted file mode 100644 index 844bca5673f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestSpinnakerRelationships.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import lombok.Data; - -import java.util.ArrayList; -import java.util.List; - -@Data -public class KubernetesManifestSpinnakerRelationships { - List loadBalancers = new ArrayList<>(); - List securityGroups = new ArrayList<>(); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestStrategy.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestStrategy.java deleted file mode 100644 index 229edb467ec..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestStrategy.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class KubernetesManifestStrategy { - Boolean versioned; - Integer maxVersionHistory; - Boolean useSourceCapacity; -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesMultiManifestOperationDescription.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesMultiManifestOperationDescription.java deleted file mode 100644 index 4fed3446d7f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesMultiManifestOperationDescription.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesSelectorList; -import lombok.Data; -import lombok.EqualsAndHashCode; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -@EqualsAndHashCode(callSuper = true) -@Data -public class KubernetesMultiManifestOperationDescription extends KubernetesAtomicOperationDescription { - private String manifestName; - private String location; - private List kinds = new ArrayList<>(); - private KubernetesSelectorList labelSelectors = new KubernetesSelectorList(); - - @JsonIgnore - public boolean isDynamic() { - return StringUtils.isEmpty(manifestName); - } - - public List getAllCoordinates() { - return kinds.stream() - .map(k -> KubernetesCoordinates.builder() - .namespace(location) - .kind(KubernetesKind.fromString(k)) - .build()) - .collect(Collectors.toList()); - } - - @JsonIgnore - @Deprecated - public KubernetesCoordinates getPointCoordinates() { - Pair parsedName = KubernetesManifest.fromFullResourceName(manifestName); - - return KubernetesCoordinates.builder() - .namespace(location) - .kind(parsedName.getLeft()) - .name(parsedName.getRight()) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesPatchManifestDescription.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesPatchManifestDescription.java deleted file mode 100644 index d7be0cd4d0b..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesPatchManifestDescription.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPatchOptions; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import lombok.Data; -import lombok.EqualsAndHashCode; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.List; - -@Data -@EqualsAndHashCode(callSuper = true) -public class KubernetesPatchManifestDescription extends KubernetesAtomicOperationDescription { - private String manifestName; - private String location; - - // This will only be a portion of a full manifest so calls to some required fields can fail. - // Using the KubernetesManifest type makes it simpler to reuse the ArtifactReplacement logic. - // TODO: change Orca to only send a single manifest. - private KubernetesManifest patchBody; - private List requiredArtifacts; - private List allArtifacts; - private Artifact manifestArtifact; - private KubernetesPatchOptions options; - - @JsonIgnore - public KubernetesCoordinates getPointCoordinates() { - Pair parsedName = KubernetesManifest.fromFullResourceName(manifestName); - - return KubernetesCoordinates.builder() - .namespace(location) - .kind(parsedName.getLeft()) - .name(parsedName.getRight()) - .build(); - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesPauseRolloutManifestDescription.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesPauseRolloutManifestDescription.java deleted file mode 100644 index 39de52e5acd..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesPauseRolloutManifestDescription.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import lombok.Data; -import lombok.EqualsAndHashCode; - -@EqualsAndHashCode(callSuper = true) -@Data -public class KubernetesPauseRolloutManifestDescription extends KubernetesManifestOperationDescription { -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesResumeRolloutManifestDescription.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesResumeRolloutManifestDescription.java deleted file mode 100644 index 642773b2d83..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesResumeRolloutManifestDescription.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import lombok.Data; -import lombok.EqualsAndHashCode; - -@EqualsAndHashCode(callSuper = true) -@Data -public class KubernetesResumeRolloutManifestDescription extends KubernetesManifestOperationDescription { -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesSourceCapacity.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesSourceCapacity.java deleted file mode 100644 index ffe9d97eb1f..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesSourceCapacity.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import lombok.AllArgsConstructor; -import lombok.Data; -import lombok.NoArgsConstructor; -import java.lang.Double; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; - -public class KubernetesSourceCapacity { - - public static Double getSourceCapacity(KubernetesManifest manifest, KubernetesV2Credentials credentials) { - KubernetesManifest currentManifest = credentials.get(manifest.getKind(), manifest.getNamespace(), manifest.getName()); - if (currentManifest != null) { - return currentManifest.getReplicas(); - } - return null; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/servergroup/KubernetesServerGroupOperationDescription.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/servergroup/KubernetesServerGroupOperationDescription.java deleted file mode 100644 index 317b504066d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/servergroup/KubernetesServerGroupOperationDescription.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.servergroup; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import lombok.Data; -import lombok.EqualsAndHashCode; -import org.apache.commons.lang3.tuple.Pair; - -@EqualsAndHashCode(callSuper = true) -@Data -public class KubernetesServerGroupOperationDescription extends KubernetesAtomicOperationDescription { - private String serverGroupName; - private String region; // :( - - @JsonIgnore - public KubernetesCoordinates getCoordinates() { - Pair parsedName = KubernetesManifest.fromFullResourceName(serverGroupName); - - return KubernetesCoordinates.builder() - .namespace(region) - .kind(parsedName.getLeft()) - .name(parsedName.getRight()) - .build(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/names/KubernetesManifestNamer.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/names/KubernetesManifestNamer.java deleted file mode 100644 index 4b3a30b898b..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/names/KubernetesManifestNamer.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.names; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestAnnotater; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestLabeler; -import com.netflix.spinnaker.clouddriver.names.NamingStrategy; -import com.netflix.spinnaker.moniker.Moniker; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.stereotype.Component; - -@Component -public class KubernetesManifestNamer implements NamingStrategy { - @Value("${kubernetes.v2.applyAppLabels:true}") - boolean applyAppLabels; - - @Override - public String getName() { - return "kubernetesAnnotations"; - } - - @Override - public void applyMoniker(KubernetesManifest obj, Moniker moniker) { - KubernetesManifestAnnotater.annotateManifest(obj, moniker); - if (applyAppLabels) { - KubernetesManifestLabeler.labelManifest(obj, moniker); - } - } - - @Override - public Moniker deriveMoniker(KubernetesManifest obj) { - return KubernetesManifestAnnotater.getMoniker(obj); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/OperationResult.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/OperationResult.java deleted file mode 100644 index c1c0a46cb55..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/OperationResult.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.RegistryUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class OperationResult { - Map> manifestNamesByNamespace = new HashMap<>(); - Set manifests = new HashSet<>(); - Set createdArtifacts = new HashSet<>(); - Set boundArtifacts = new HashSet<>(); - - public void removeSensitiveKeys(KubernetesResourcePropertyRegistry propertyRegistry, String accountName) { - manifests.forEach(m -> RegistryUtils.removeSensitiveKeys(propertyRegistry, accountName, m)); - } - - public OperationResult addManifest(KubernetesManifest manifest) { - manifests.add(manifest); - - Set addedNames = manifestNamesByNamespace.getOrDefault(manifest.getNamespace(), new HashSet<>()); - addedNames.add(manifest.getFullResourceName()); - manifestNamesByNamespace.put(manifest.getNamespace(), addedNames); - return this; - } - - public void merge(OperationResult other) { - for (Map.Entry> entry : other.manifestNamesByNamespace.entrySet()) { - Set thisManifests = this.manifestNamesByNamespace.getOrDefault(entry.getKey(), new HashSet<>()); - thisManifests.addAll(entry.getValue()); - this.manifestNamesByNamespace.put(entry.getKey(), thisManifests); - } - - this.manifests.addAll(other.manifests); - this.createdArtifacts.addAll(other.createdArtifacts); - this.boundArtifacts.addAll(other.boundArtifacts); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/artifact/KubernetesCleanupArtifactsOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/artifact/KubernetesCleanupArtifactsOperation.java deleted file mode 100644 index 850e691ee5c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/artifact/KubernetesCleanupArtifactsOperation.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.artifact; - -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.artifact.KubernetesCleanupArtifactsDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestAnnotater; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestStrategy; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.model.ArtifactProvider; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import io.kubernetes.client.models.V1DeleteOptions; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.StringUtils; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; - -@Slf4j -public class KubernetesCleanupArtifactsOperation implements AtomicOperation { - private final KubernetesCleanupArtifactsDescription description; - private final KubernetesV2Credentials credentials; - private final String accountName; - private final ArtifactProvider artifactProvider; - private final KubernetesResourcePropertyRegistry registry; - private static final String OP_NAME = "CLEANUP_KUBERNETES_ARTIFACTS"; - - public KubernetesCleanupArtifactsOperation(KubernetesCleanupArtifactsDescription description, ArtifactProvider artifactProvider, KubernetesResourcePropertyRegistry registry) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.accountName = description.getCredentials().getName(); - this.artifactProvider = artifactProvider; - this.registry = registry; - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public Void operate(List priorOutputs) { - List artifacts = description.getManifests().stream() - .map(this::artifactsToDelete) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - - artifacts.forEach(a -> { - String type = a.getType(); - if (!type.startsWith("kubernetes/")) { - log.warn("Non-kubernetes type deletion requested..."); - return; - } - String kind = type.substring("kubernetes/".length()); - KubernetesResourceProperties properties = registry.get(accountName, KubernetesKind.fromString(kind)); - if (properties == null) { - log.warn("No properties for artifact {}, ignoring", a); - return; - } - - getTask().updateStatus(OP_NAME, "Deleting artifact '" + a + '"'); - KubernetesHandler handler = properties.getHandler(); - String name = a.getName(); - if (StringUtils.isNotEmpty(a.getVersion())) { - name = String.join("-", name, a.getVersion()); - } - // todo add to outputs - handler.delete(credentials, a.getLocation(), name, null, new V1DeleteOptions()); - }); - - return null; - } - - private List artifactsToDelete(KubernetesManifest manifest) { - KubernetesManifestStrategy strategy = KubernetesManifestAnnotater.getStrategy(manifest); - if (strategy.getMaxVersionHistory() == null) { - return new ArrayList<>(); - } - - int maxVersionHistory = strategy.getMaxVersionHistory(); - Optional optional = KubernetesManifestAnnotater.getArtifact(manifest); - if (!optional.isPresent()) { - return new ArrayList<>(); - } - - Artifact artifact = optional.get(); - - List artifacts = artifactProvider.getArtifacts(artifact.getType(), artifact.getName(), artifact.getLocation()) - .stream() - .filter(a -> a.getMetadata() != null && accountName.equals(a.getMetadata().get("account"))) - .collect(Collectors.toList()); - - if (maxVersionHistory >= artifacts.size()) { - return new ArrayList<>(); - } else { - return artifacts.subList(0, artifacts.size() - maxVersionHistory); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanDelete.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanDelete.java deleted file mode 100644 index 97511efc9ba..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanDelete.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.OperationResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesSelectorList; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import io.kubernetes.client.models.V1DeleteOptions; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -public interface CanDelete { - KubernetesKind kind(); - - default OperationResult delete(KubernetesV2Credentials credentials, String namespace, String name, KubernetesSelectorList labelSelectors, V1DeleteOptions options) { - options = options == null ? new V1DeleteOptions() : options; - List deletedNames = credentials.delete(kind(), namespace, name, labelSelectors, options); - OperationResult result = new OperationResult(); - Set fullNames = deletedNames.stream().map(n -> KubernetesManifest.getFullResourceName(kind(), n)).collect(Collectors.toSet()); - - result.setManifestNamesByNamespace(new HashMap<>(Collections.singletonMap(namespace, fullNames))); - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanDeploy.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanDeploy.java deleted file mode 100644 index af447055005..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanDeploy.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.OperationResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; - -public interface CanDeploy { - default OperationResult deploy(KubernetesV2Credentials credentials, KubernetesManifest manifest) { - credentials.deploy(manifest); - return new OperationResult().addManifest(manifest); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanPatch.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanPatch.java deleted file mode 100644 index 8b0d239ecf0..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanPatch.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPatchOptions; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.OperationResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import java.util.HashMap; - -public interface CanPatch { - KubernetesKind kind(); - - default OperationResult patch(KubernetesV2Credentials credentials, String namespace, String name, - KubernetesPatchOptions options, KubernetesManifest manifest) { - credentials.patch(kind(), namespace, name, options, manifest); - - KubernetesManifest patchedManifest = new KubernetesManifest(); - patchedManifest.putIfAbsent("metadata", new HashMap()); // Hack: Set mandatory field - patchedManifest.setNamespace(namespace); - patchedManifest.setName(name); - patchedManifest.setKind(kind()); - return new OperationResult().addManifest(patchedManifest); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanPauseRollout.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanPauseRollout.java deleted file mode 100644 index ce77bc3fec6..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanPauseRollout.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; - -public interface CanPauseRollout { - KubernetesKind kind(); - - default void pauseRollout(KubernetesV2Credentials credentials, String namespace, String name) { - credentials.pauseRollout(kind(), namespace, name); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanResize.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanResize.java deleted file mode 100644 index 78c983498fe..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanResize.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.model.ServerGroup.Capacity; - -public interface CanResize { - KubernetesKind kind(); - - default void resize(KubernetesV2Credentials credentials, String namespace, String name, Capacity capacity) { - credentials.scale(kind(), namespace, name, capacity.getDesired()); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanResumeRollout.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanResumeRollout.java deleted file mode 100644 index fb4399a2303..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanResumeRollout.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; - -public interface CanResumeRollout { - KubernetesKind kind(); - - default void resumeRollout(KubernetesV2Credentials credentials, String namespace, String name) { - credentials.resumeRollout(kind(), namespace, name); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanRollout.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanRollout.java deleted file mode 100644 index 6e3f6d75440..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanRollout.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; - -import java.util.List; - -public interface CanRollout { - KubernetesKind kind(); - - default List historyRollout(KubernetesV2Credentials credentials, String namespace, String name) { - return credentials.historyRollout(kind(), namespace, name); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanScale.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanScale.java deleted file mode 100644 index ae473f277b7..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanScale.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; - -public interface CanScale { - KubernetesKind kind(); - - default void scale(KubernetesV2Credentials credentials, String namespace, String name, int replicas) { - credentials.scale(kind(), namespace, name, replicas); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanUndoRollout.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanUndoRollout.java deleted file mode 100644 index 3f9702c5b0c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CanUndoRollout.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; - -public interface CanUndoRollout extends CanRollout { - KubernetesKind kind(); - - default void undoRollout(KubernetesV2Credentials credentials, String namespace, String name, int revision) { - credentials.undoRollout(kind(), namespace, name, revision); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CustomKubernetesHandlerFactory.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CustomKubernetesHandlerFactory.java deleted file mode 100644 index e605fde4243..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/CustomKubernetesHandlerFactory.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.CustomKubernetesCachingAgentFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2ServerGroup; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2ServerGroupManager; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.ManifestBasedModel; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2CacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupManagerCacheData; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.model.Manifest; -import lombok.extern.slf4j.Slf4j; - -public class CustomKubernetesHandlerFactory { - public static KubernetesHandler create(KubernetesKind kubernetesKind, SpinnakerKind spinnakerKind, boolean versioned, int deployPriority) { - return new Handler(kubernetesKind, spinnakerKind, versioned, deployPriority); - } - - @Slf4j - private static class Handler extends KubernetesHandler implements ModelHandler { - private final KubernetesKind kubernetesKind; - private final SpinnakerKind spinnakerKind; - private final boolean versioned; - private final int deployPriority; - - Handler(KubernetesKind kubernetesKind, SpinnakerKind spinnakerKind, boolean versioned, int deployPriority) { - this.kubernetesKind = kubernetesKind; - this.spinnakerKind = spinnakerKind; - this.versioned = versioned; - this.deployPriority = deployPriority; - } - - @Override - public int deployPriority() { - return deployPriority; - } - - @Override - public KubernetesKind kind() { - return kubernetesKind; - } - - @Override - public boolean versioned() { - return versioned; - } - - @Override - public SpinnakerKind spinnakerKind() { - return spinnakerKind; - } - - @Override - public Manifest.Status status(KubernetesManifest manifest) { - return new Manifest.Status(); - } - - @Override - public KubernetesV2CachingAgent buildCachingAgent( - KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry propertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount - ) { - return CustomKubernetesCachingAgentFactory.create( - kubernetesKind, - namedAccountCredentials, - propertyRegistry, - objectMapper, - registry, - agentIndex, - agentCount - ); - } - - @Override - public ManifestBasedModel fromCacheData(KubernetesV2CacheData cacheData) { - switch (spinnakerKind()) { - case SERVER_GROUPS: - return KubernetesV2ServerGroup.fromCacheData((KubernetesV2ServerGroupCacheData) cacheData); - case SERVER_GROUP_MANAGERS: - return KubernetesV2ServerGroupManager.fromCacheData((KubernetesV2ServerGroupManagerCacheData) cacheData); - default: - // TODO(dpeach): finish implementing for other SpinnakerKinds. - log.warn("No default cache data model mapping for Spinnaker kind " + spinnakerKind()); - return null; - } - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesAPIServiceHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesAPIServiceHandler.java deleted file mode 100644 index 902b7a30ebc..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesAPIServiceHandler.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2018 Mirantis, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.API_SERVICE_PRIORITY; - -@Component -public class KubernetesAPIServiceHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return API_SERVICE_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.API_SERVICE; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { return new Status(); } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesClusterRoleBindingHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesClusterRoleBindingHandler.java deleted file mode 100644 index 3d2cab17d6c..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesClusterRoleBindingHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Joel Wilsson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.ROLE_BINDING_PRIORITY; - -@Component -public class KubernetesClusterRoleBindingHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return ROLE_BINDING_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.CLUSTER_ROLE_BINDING; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { return new Status(); } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesClusterRoleHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesClusterRoleHandler.java deleted file mode 100644 index 605805ab37a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesClusterRoleHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Joel Wilsson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.ROLE_PRIORITY; - -@Component -public class KubernetesClusterRoleHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return ROLE_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.CLUSTER_ROLE; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { return new Status(); } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesConfigMapHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesConfigMapHandler.java deleted file mode 100644 index b3dce0d4221..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesConfigMapHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.MOUNTABLE_DATA_PRIORITY; - -@Component -public class KubernetesConfigMapHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return MOUNTABLE_DATA_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.CONFIG_MAP; - } - - @Override - public boolean versioned() { - return true; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.CONFIGS; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesControllerRevisionHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesControllerRevisionHandler.java deleted file mode 100644 index ecd2e06f2a9..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesControllerRevisionHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -@Component -public class KubernetesControllerRevisionHandler extends KubernetesHandler { - @Override - public int deployPriority() { - throw new IllegalStateException("Controller revisions cannot be deployed."); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.CONTROLLER_REVISION; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesCronJobHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesCronJobHandler.java deleted file mode 100644 index f31c5cf1416..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesCronJobHandler.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V2alpha1CronJob; -import io.kubernetes.client.models.V2alpha1CronJobStatus; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; - -@Component -public class KubernetesCronJobHandler extends KubernetesHandler implements - CanDelete, - ServerGroupHandler { - - public KubernetesCronJobHandler() { - registerReplacer(ArtifactReplacerFactory.dockerImageReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.secretVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapKeyValueFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretKeyValueFromReplacer()); - } - - @Override - public int deployPriority() { - return WORKLOAD_CONTROLLER_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.CRON_JOB; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.SERVER_GROUPS; - } - - @Override - public Status status(KubernetesManifest manifest) { - V2alpha1CronJob v2alpha1CronJob = KubernetesCacheDataConverter.getResource(manifest, V2alpha1CronJob.class); - return status(v2alpha1CronJob); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - private Status status(V2alpha1CronJob job) { - Status result = new Status(); - V2alpha1CronJobStatus status = job.getStatus(); - if (status == null) { - result.unstable("No status reported yet") - .unavailable("No availability reported"); - return result; - } - - if (status.getActive() != null) { - return result.unstable(String.format("%s job(s) in progress", status.getActive().size())); - } - - return result; - } - -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesCustomResourceDefinitionHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesCustomResourceDefinitionHandler.java deleted file mode 100644 index 858d5676aa8..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesCustomResourceDefinitionHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.RESOURCE_DEFINITION_PRIORITY; - -@Component -public class KubernetesCustomResourceDefinitionHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return RESOURCE_DEFINITION_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.CUSTOM_RESOURCE_DEFINITION; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDaemonSetHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDaemonSetHandler.java deleted file mode 100644 index 291cfe55271..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDaemonSetHandler.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V1beta2DaemonSet; -import io.kubernetes.client.models.V1beta2DaemonSetStatus; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; - -@Component -public class KubernetesDaemonSetHandler extends KubernetesHandler implements - CanResize, - CanPauseRollout, - CanResumeRollout, - CanUndoRollout, - ServerGroupHandler { - - public KubernetesDaemonSetHandler() { - registerReplacer(ArtifactReplacerFactory.dockerImageReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.secretVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapKeyValueFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretKeyValueFromReplacer()); - } - - @Override - public int deployPriority() { - return WORKLOAD_CONTROLLER_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.DAEMON_SET; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.SERVER_GROUPS; - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - @Override - public Status status(KubernetesManifest manifest) { - if (!manifest.isNewerThanObservedGeneration()) { - return (new Status()).unknown(); - } - V1beta2DaemonSet v1beta2DaemonSet = KubernetesCacheDataConverter.getResource(manifest, V1beta2DaemonSet.class); - return status(v1beta2DaemonSet); - } - - @Override - public Map hydrateSearchResult(Keys.InfrastructureCacheKey key, KubernetesCacheUtils cacheUtils) { - Map result = super.hydrateSearchResult(key, cacheUtils); - result.put("serverGroup", result.get("name")); - - return result; - } - - private Status status(V1beta2DaemonSet daemonSet) { - Status result = new Status(); - - V1beta2DaemonSetStatus status = daemonSet.getStatus(); - if (status == null) { - result.unstable("No status reported yet") - .unavailable("No availability reported"); - return result; - } - - if (!daemonSet.getSpec().getUpdateStrategy().getType().equalsIgnoreCase("rollingupdate")) { - return result; - } - - int desiredReplicas = status.getDesiredNumberScheduled(); - Integer existing = status.getCurrentNumberScheduled(); - if (existing == null || desiredReplicas > existing) { - return result.unstable("Waiting for all replicas to be scheduled"); - } - - existing = status.getUpdatedNumberScheduled(); - if (existing != null && desiredReplicas > existing) { - return result.unstable("Waiting for all updated replicas to be scheduled"); - } - - existing = status.getNumberAvailable(); - if (existing == null || desiredReplicas > existing) { - return result.unstable("Waiting for all replicas to be available"); - } - - existing = status.getNumberReady(); - if (existing == null || desiredReplicas > existing) { - return result.unstable("Waiting for all replicas to be ready"); - } - - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDeploymentHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDeploymentHandler.java deleted file mode 100644 index 079e84b94aa..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDeploymentHandler.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V1beta2Deployment; -import io.kubernetes.client.models.V1beta2DeploymentCondition; -import io.kubernetes.client.models.V1beta2DeploymentStatus; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion.APPS_V1BETA1; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion.APPS_V1BETA2; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion.EXTENSIONS_V1BETA1; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; - -@Component -public class KubernetesDeploymentHandler extends KubernetesHandler implements - CanResize, - CanScale, - CanPauseRollout, - CanResumeRollout, - CanUndoRollout, - ServerGroupManagerHandler { - - public KubernetesDeploymentHandler() { - registerReplacer(ArtifactReplacerFactory.dockerImageReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.secretVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapKeyValueFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretKeyValueFromReplacer()); - } - - @Override - public int deployPriority() { - return WORKLOAD_CONTROLLER_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.DEPLOYMENT; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.SERVER_GROUP_MANAGERS; - } - - @Override - public Status status(KubernetesManifest manifest) { - if (manifest.getApiVersion().equals(EXTENSIONS_V1BETA1) - || manifest.getApiVersion().equals(APPS_V1BETA1) - || manifest.getApiVersion().equals(APPS_V1BETA2)) { - if (!manifest.isNewerThanObservedGeneration()) { - return (new Status()).unknown(); - } - V1beta2Deployment appsV1beta2Deployment = KubernetesCacheDataConverter.getResource(manifest, V1beta2Deployment.class); - return status(appsV1beta2Deployment); - } else { - throw new UnsupportedVersionException(manifest); - } - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - private Status status(V1beta2Deployment deployment) { - Status result = new Status(); - V1beta2DeploymentStatus status = deployment.getStatus(); - if (status == null) { - result.unstable("No status reported yet") - .unavailable("No availability reported"); - return result; - } - - V1beta2DeploymentCondition paused = status.getConditions() - .stream() - .filter(c -> c.getReason().equalsIgnoreCase("deploymentpaused")) - .findAny() - .orElse(null); - - V1beta2DeploymentCondition available = status.getConditions() - .stream() - .filter(c -> c.getType().equalsIgnoreCase("available")) - .findAny() - .orElse(null); - - if (paused != null) { - result.paused(paused.getMessage()); - } - - if (available != null && available.getStatus().equalsIgnoreCase("false")) { - result.unavailable(available.getMessage()); - } - - V1beta2DeploymentCondition condition = status.getConditions() - .stream() - .filter(c -> c.getType().equalsIgnoreCase("progressing")) - .findAny() - .orElse(null); - if (condition != null && condition.getReason().equalsIgnoreCase("progressdeadlineexceeded")) { - return result.failed("Deployment exceeded its progress deadline"); - } - - Integer desiredReplicas = deployment.getSpec().getReplicas(); - Integer statusReplicas = status.getReplicas(); - if ((desiredReplicas == null || desiredReplicas == 0) && (statusReplicas == null || statusReplicas == 0)) { - return result; - } - - Integer updatedReplicas = status.getUpdatedReplicas(); - if (updatedReplicas == null || (desiredReplicas != null && desiredReplicas > updatedReplicas)) { - return result.unstable("Waiting for all replicas to be updated"); - } - - if (statusReplicas != null && statusReplicas > updatedReplicas) { - return result.unstable("Waiting for old replicas to finish termination"); - } - - Integer availableReplicas = status.getAvailableReplicas(); - if (availableReplicas == null || availableReplicas < updatedReplicas) { - return result.unstable("Waiting for all replicas to be available"); - } - - Integer readyReplicas = status.getReadyReplicas(); - if (readyReplicas == null || (desiredReplicas != null && desiredReplicas > readyReplicas)) { - return result.unstable("Waiting for all replicas to be ready"); - } - - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesEventHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesEventHandler.java deleted file mode 100644 index 95ba02814fb..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesEventHandler.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest; -import io.kubernetes.client.models.V1Event; -import io.kubernetes.client.models.V1ObjectReference; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.EVENT; - -@Component -public class KubernetesEventHandler extends KubernetesHandler { - @Override - public int deployPriority() { - throw new IllegalStateException("Events cannot be deployed."); - } - - @Override - public KubernetesKind kind() { - return EVENT; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Manifest.Status status(KubernetesManifest manifest) { - return new Manifest.Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - - @Override - public void addRelationships(Map> allResources, Map> relationshipMap) { - relationshipMap.putAll(allResources.getOrDefault(EVENT, new ArrayList<>()) - .stream() - .map(m -> ImmutablePair.of(m, involvedManifest(KubernetesCacheDataConverter.getResource(m, V1Event.class)))) - .filter(p -> p.getRight() != null) - .collect(Collectors.toMap(ImmutablePair::getLeft, p -> Collections.singletonList(p.getRight())))); - } - - private KubernetesManifest involvedManifest(V1Event event) { - if (event == null) { - return null; - } - - V1ObjectReference ref = event.getInvolvedObject(); - - if (ref == null - || StringUtils.isEmpty(ref.getApiVersion()) - || StringUtils.isEmpty(ref.getKind()) - || StringUtils.isEmpty(ref.getNamespace()) - || StringUtils.isEmpty(ref.getName())) { - return null; - } - - KubernetesManifest result = new KubernetesManifest(); - result.put("metadata", new HashMap()); - result.setApiVersion(KubernetesApiVersion.fromString(ref.getApiVersion())); - result.setKind(KubernetesKind.fromString(ref.getKind())); - result.setNamespace(ref.getNamespace()); - result.setName(ref.getName()); - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHandler.java deleted file mode 100644 index dbec80ce174..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHandler.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacer; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacer.ReplaceResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import com.netflix.spinnaker.clouddriver.model.Manifest.Warning; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; - -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Set; - -@Slf4j -public abstract class KubernetesHandler implements CanDeploy, CanDelete, CanPatch { - protected final static ObjectMapper objectMapper = new ObjectMapper(); - - private final ArtifactReplacer artifactReplacer = new ArtifactReplacer(); - - abstract public int deployPriority(); - abstract public KubernetesKind kind(); - abstract public boolean versioned(); - abstract public SpinnakerKind spinnakerKind(); - abstract public Status status(KubernetesManifest manifest); - - public List listWarnings(KubernetesManifest manifest) { - return new ArrayList<>(); - } - - public List sensitiveKeys() { - return new ArrayList<>(); - } - - protected void registerReplacer(ArtifactReplacer.Replacer replacer) { - artifactReplacer.addReplacer(replacer); - } - - public ReplaceResult replaceArtifacts(KubernetesManifest manifest, List artifacts, String account) { - return artifactReplacer.replaceAll(manifest, artifacts, manifest.getNamespace(), account); - } - - public ReplaceResult replaceArtifacts(KubernetesManifest manifest, List artifacts, String namespace, String account) { - return artifactReplacer.replaceAll(manifest, artifacts, namespace, account); - } - - protected Class cachingAgentClass() { - return null; - } - - public Set listArtifacts(KubernetesManifest manifest) { - return artifactReplacer.findAll(manifest); - } - - public KubernetesV2CachingAgent buildCachingAgent( - KubernetesNamedAccountCredentials namedAccountCredentials, - KubernetesResourcePropertyRegistry propertyRegistry, - ObjectMapper objectMapper, - Registry registry, - int agentIndex, - int agentCount - ) { - Constructor constructor; - Class clazz = cachingAgentClass(); - - if (clazz == null) { - log.error("No caching agent was registered for {} -- no resources will be cached", kind()); - } - - try { - constructor = clazz.getDeclaredConstructor( - KubernetesNamedAccountCredentials.class, - KubernetesResourcePropertyRegistry.class, - ObjectMapper.class, - Registry.class, - int.class, - int.class - ); - } catch (NoSuchMethodException e) { - log.warn("Missing canonical constructor for {} caching agent", kind(), e); - return null; - } - - try { - constructor.setAccessible(true); - return (KubernetesV2CachingAgent) constructor.newInstance( - namedAccountCredentials, - propertyRegistry, - objectMapper, - registry, - agentIndex, - agentCount - ); - } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { - log.warn("Can't invoke caching agent constructor for {} caching agent", kind(), e); - return null; - } - } - - // used for stripping sensitive values - public void removeSensitiveKeys(KubernetesManifest manifest) { - List sensitiveKeys = sensitiveKeys(); - sensitiveKeys.forEach(manifest::remove); - } - - public Map hydrateSearchResult(Keys.InfrastructureCacheKey key, KubernetesCacheUtils cacheUtils) { - Map result = objectMapper.convertValue(key, new TypeReference>() {}); - result.put("region", key.getNamespace()); - result.put("name", KubernetesManifest.getFullResourceName(key.getKubernetesKind(), key.getName())); - return result; - } - - public void addRelationships(Map> allResources, Map> relationshipMap) { } - - // lower "value" is deployed before higher "value" - public enum DeployPriority { - LOWEST_PRIORITY(1000), - WORKLOAD_ATTACHMENT_PRIORITY(110), - WORKLOAD_CONTROLLER_PRIORITY(100), - WORKLOAD_PRIORITY(100), - PDB_PRIORITY(90), - API_SERVICE_PRIORITY(80), - NETWORK_RESOURCE_PRIORITY(70), - MOUNTABLE_DATA_PRIORITY(50), - MOUNTABLE_DATA_BACKING_RESOURCE_PRIORITY(40), - SERVICE_ACCOUNT_PRIORITY(40), - STORAGE_CLASS_PRIORITY(40), - RESOURCE_DEFINITION_PRIORITY(30), - ROLE_BINDING_PRIORITY(30), - ROLE_PRIORITY(20), - NAMESPACE_PRIORITY(0); - - @Getter - private final int value; - - DeployPriority(int value) { - this.value = value; - } - - public static DeployPriority fromString(String val) { - if (val == null) { - return null; - } - - return Arrays.stream(values()) - .filter(v -> v.toString().equalsIgnoreCase(val)) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("No such priority '" + val + "'")); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHorizontalPodAutoscalerHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHorizontalPodAutoscalerHandler.java deleted file mode 100644 index 9c3bc316f01..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHorizontalPodAutoscalerHandler.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_ATTACHMENT_PRIORITY; - -@Component -public class KubernetesHorizontalPodAutoscalerHandler extends KubernetesHandler { - public KubernetesHorizontalPodAutoscalerHandler() { - registerReplacer(ArtifactReplacerFactory.hpaDeploymentReplacer()); - registerReplacer(ArtifactReplacerFactory.hpaReplicaSetReplacer()); - } - - @Override - public int deployPriority() { - return WORKLOAD_ATTACHMENT_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.HORIZONTAL_POD_AUTOSCALER; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesIngressHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesIngressHandler.java deleted file mode 100644 index e1dc542f10e..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesIngressHandler.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V1beta1HTTPIngressPath; -import io.kubernetes.client.models.V1beta1Ingress; -import io.kubernetes.client.models.V1beta1IngressBackend; -import io.kubernetes.client.models.V1beta1IngressRule; -import lombok.extern.slf4j.Slf4j; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.function.BiFunction; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion.EXTENSIONS_V1BETA1; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.INGRESS; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.SERVICE; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.NETWORK_RESOURCE_PRIORITY; - -@Component -@Slf4j -public class KubernetesIngressHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return NETWORK_RESOURCE_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.INGRESS; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.LOAD_BALANCERS; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - @Override - public void addRelationships(Map> allResources, Map> relationshipMap) { - Map> result; - - BiFunction manifestName = (namespace, name) -> namespace + ":" + name; - - Map services = allResources.getOrDefault(SERVICE, new ArrayList<>()) - .stream() - .collect(Collectors.toMap((m) -> manifestName.apply(m.getNamespace(), m.getName()), (m) -> m)); - - for (KubernetesManifest ingress : allResources.getOrDefault(INGRESS, new ArrayList<>())) { - List attachedServices = new ArrayList<>(); - try { - attachedServices = KubernetesIngressHandler.attachedServices(ingress) - .stream() - .map(s -> services.get(manifestName.apply(ingress.getNamespace(), s))) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - } catch (Exception e) { - log.warn("Failure getting services attached to {}", ingress.getName(), e); - } - - relationshipMap.put(ingress, attachedServices); - } - } - - public static List attachedServices(KubernetesManifest manifest) { - if (manifest.getApiVersion().equals(EXTENSIONS_V1BETA1)) { - V1beta1Ingress v1beta1Ingress = KubernetesCacheDataConverter.getResource(manifest, V1beta1Ingress.class); - return attachedServices(v1beta1Ingress); - } else { - throw new UnsupportedVersionException(manifest); - } - } - - private static List attachedServices(V1beta1Ingress ingress) { - Set result = new HashSet<>(); - V1beta1IngressBackend backend = ingress.getSpec().getBackend(); - if (backend != null) { - result.add(backend.getServiceName()); - } - - List rules = ingress.getSpec().getRules(); - rules = rules == null ? new ArrayList<>() : rules; - for (V1beta1IngressRule rule : rules) { - for (V1beta1HTTPIngressPath path : rule.getHttp().getPaths()) { - backend = path.getBackend(); - if (backend != null) { - result.add(backend.getServiceName()); - } - } - } - - return new ArrayList<>(result); - } - - @Override - public Map hydrateSearchResult(Keys.InfrastructureCacheKey key, KubernetesCacheUtils cacheUtils) { - Map result = super.hydrateSearchResult(key, cacheUtils); - result.put("loadBalancer", result.get("name")); - - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesJobHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesJobHandler.java deleted file mode 100644 index 3f330898ebe..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesJobHandler.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2018 Joel Wilsson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V1Job; -import io.kubernetes.client.models.V1JobCondition; -import io.kubernetes.client.models.V1JobSpec; -import io.kubernetes.client.models.V1JobStatus; -import org.springframework.stereotype.Component; - -import java.util.Collections; -import java.util.List; -import java.util.Optional; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; - -@Component -public class KubernetesJobHandler extends KubernetesHandler implements - ServerGroupHandler { - - public KubernetesJobHandler() { - registerReplacer(ArtifactReplacerFactory.dockerImageReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.secretVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapKeyValueFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretKeyValueFromReplacer()); - } - - @Override - public int deployPriority() { - return WORKLOAD_CONTROLLER_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.JOB; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.SERVER_GROUPS; - } - - @Override - public Status status(KubernetesManifest manifest) { - V1Job v1Job = KubernetesCacheDataConverter.getResource(manifest, V1Job.class); - return status(v1Job); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - private Status status(V1Job job) { - Status result = new Status(); - V1JobStatus status = job.getStatus(); - if (status == null) { - result.unstable("No status reported yet") - .unavailable("No availability reported"); - return result; - } - - int completions = 1; - V1JobSpec spec = job.getSpec(); - if (spec != null && spec.getCompletions() != null) { - completions = spec.getCompletions(); - } - int succeeded = 0; - if (status.getSucceeded() != null) { - succeeded = status.getSucceeded(); - } - - if (succeeded < completions) { - List conditions = status.getConditions(); - conditions = conditions != null ? conditions : Collections.emptyList(); - Optional condition = conditions.stream().filter(this::jobFailed).findAny(); - if (condition.isPresent()) { - return result.failed(condition.get().getMessage()); - } else { - return result.unstable("Waiting for jobs to finish"); - } - } - - return result; - } - - private boolean jobFailed(V1JobCondition condition) { - return "Failed".equalsIgnoreCase(condition.getType()) && "True".equalsIgnoreCase(condition.getStatus()); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesNamespaceHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesNamespaceHandler.java deleted file mode 100644 index e2538f44e95..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesNamespaceHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesNamespaceCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.NAMESPACE_PRIORITY; - -@Component -public class KubernetesNamespaceHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return NAMESPACE_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.NAMESPACE; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesNamespaceCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesNetworkPolicyHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesNetworkPolicyHandler.java deleted file mode 100644 index c6077910120..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesNetworkPolicyHandler.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.NETWORK_RESOURCE_PRIORITY; - -@Component -public class KubernetesNetworkPolicyHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return NETWORK_RESOURCE_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.NETWORK_POLICY; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.SECURITY_GROUPS; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - @Override - public Map hydrateSearchResult(Keys.InfrastructureCacheKey key, KubernetesCacheUtils cacheUtils) { - Map result = super.hydrateSearchResult(key, cacheUtils); - result.put("id", result.get("name")); - - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPersistentVolumeClaimHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPersistentVolumeClaimHandler.java deleted file mode 100644 index 833fd8685ff..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPersistentVolumeClaimHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.MOUNTABLE_DATA_PRIORITY; - -@Component -public class KubernetesPersistentVolumeClaimHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return MOUNTABLE_DATA_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.PERSISTENT_VOLUME_CLAIM; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.CONFIGS; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPersistentVolumeHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPersistentVolumeHandler.java deleted file mode 100644 index 7e28546a3a1..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPersistentVolumeHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.MOUNTABLE_DATA_BACKING_RESOURCE_PRIORITY; - -@Component -public class KubernetesPersistentVolumeHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return MOUNTABLE_DATA_BACKING_RESOURCE_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.PERSISTENT_VOLUME; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.CONFIGS; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPodDisruptionBudgetHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPodDisruptionBudgetHandler.java deleted file mode 100644 index b6d7d90f321..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPodDisruptionBudgetHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2017 Mirantis, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.PDB_PRIORITY; - -@Component -public class KubernetesPodDisruptionBudgetHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return PDB_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.POD_DISRUPTION_BUDGET; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { return new Status(); } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPodHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPodHandler.java deleted file mode 100644 index 5389f752b69..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesPodHandler.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacer; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactTypes; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V1Pod; -import io.kubernetes.client.models.V1PodStatus; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_PRIORITY; - -@Component -public class KubernetesPodHandler extends KubernetesHandler { - public KubernetesPodHandler() { - registerReplacer( - ArtifactReplacer.Replacer.builder() - .replacePath("$.spec.containers.[?( @.image == \"{%name%}\" )].image") - .findPath("$.spec.containers.*.image") - .type(ArtifactTypes.DOCKER_IMAGE) - .build() - ); - } - - @Override - public int deployPriority() { - return WORKLOAD_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.POD; - } - - @Override - public boolean versioned() { - return true; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.INSTANCES; - } - - @Override - public Status status(KubernetesManifest manifest) { - Status result = new Status(); - V1Pod pod = KubernetesCacheDataConverter.getResource(manifest, V1Pod.class); - V1PodStatus status = pod.getStatus(); - - if (status == null) { - result.unstable("No status reported yet") - .unavailable("No availability reported"); - return result; - } - - // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/ - String phase = status.getPhase(); - - if (phase == null ) { - result.unstable("No phase reported yet") - .unavailable("No availability reported"); - } else if (phase.equals("pending")) { - result.unstable("Pod is 'pending'") - .unavailable("Pod has not been scheduled yet"); - } else if (phase.equals("unknown")) { - result.unstable("Pod has 'unknown' phase") - .unavailable("No availability reported"); - } else if (phase.equals("failed")) { - result.failed("Pod has 'failed'") - .unavailable("Pod is not running"); - } - - return result; - } - - @Override - public Map hydrateSearchResult(Keys.InfrastructureCacheKey key, KubernetesCacheUtils cacheUtils) { - Map result = super.hydrateSearchResult(key, cacheUtils); - result.put("instanceId", result.get("name")); - - return result; - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesReplicaSetHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesReplicaSetHandler.java deleted file mode 100644 index e92e479c842..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesReplicaSetHandler.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V1beta1ReplicaSet; -import io.kubernetes.client.models.V1beta2ReplicaSet; -import io.kubernetes.client.models.V1beta2ReplicaSetStatus; -import org.springframework.stereotype.Component; - -import java.util.Map; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion.APPS_V1BETA2; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion.EXTENSIONS_V1BETA1; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; - -@Component -public class KubernetesReplicaSetHandler extends KubernetesHandler implements - CanResize, - CanScale, - ServerGroupHandler { - - public KubernetesReplicaSetHandler() { - registerReplacer(ArtifactReplacerFactory.dockerImageReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.secretVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapKeyValueFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretKeyValueFromReplacer()); - } - - @Override - public int deployPriority() { - return WORKLOAD_CONTROLLER_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.REPLICA_SET; - } - - @Override - public boolean versioned() { - return true; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.SERVER_GROUPS; - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - @Override - public Status status(KubernetesManifest manifest) { - if (manifest.getApiVersion().equals(EXTENSIONS_V1BETA1) || manifest.getApiVersion().equals(APPS_V1BETA2)) { - V1beta2ReplicaSet v1beta2ReplicaSet = KubernetesCacheDataConverter.getResource(manifest, V1beta2ReplicaSet.class); - return status(v1beta2ReplicaSet); - } else { - throw new UnsupportedVersionException(manifest); - } - } - - private Status status(V1beta2ReplicaSet replicaSet) { - Status result = new Status(); - int desiredReplicas = replicaSet.getSpec().getReplicas(); - V1beta2ReplicaSetStatus status = replicaSet.getStatus(); - if (status == null) { - result.unstable("No status reported yet") - .unavailable("No availability reported"); - return result; - } - - Long observedGeneration = status.getObservedGeneration(); - if (observedGeneration != null && observedGeneration != replicaSet.getMetadata().getGeneration()) { - result.unstable("Waiting for replicaset spec update to be observed"); - } - - Integer existing = status.getFullyLabeledReplicas(); - if (existing == null || desiredReplicas > existing) { - return result.unstable("Waiting for all replicas to be fully-labeled") - .unavailable("Not all replicas have become labeled yet"); - } - - existing = status.getAvailableReplicas(); - if (existing == null || desiredReplicas > existing) { - return result.unstable("Waiting for all replicas to be available") - .unavailable("Not all replicas have become available yet"); - } - - existing = status.getReadyReplicas(); - if (existing == null || desiredReplicas > existing) { - return result.unstable("Waiting for all replicas to be ready"); - } - - return result; - } - - public static Map getPodTemplateLabels(KubernetesManifest manifest) { - if (manifest.getApiVersion().equals(EXTENSIONS_V1BETA1)) { - V1beta1ReplicaSet v1beta1ReplicaSet = KubernetesCacheDataConverter.getResource(manifest, V1beta1ReplicaSet.class); - return getPodTemplateLabels(v1beta1ReplicaSet); - } else if (manifest.getApiVersion().equals(APPS_V1BETA2)) { - V1beta2ReplicaSet v1beta2ReplicaSet = KubernetesCacheDataConverter.getResource(manifest, V1beta2ReplicaSet.class); - return getPodTemplateLabels(v1beta2ReplicaSet); - } else { - throw new UnsupportedVersionException(manifest); - } - } - - private static Map getPodTemplateLabels(V1beta1ReplicaSet replicaSet) { - return replicaSet.getSpec().getTemplate().getMetadata().getLabels(); - } - - private static Map getPodTemplateLabels(V1beta2ReplicaSet replicaSet) { - return replicaSet.getSpec().getTemplate().getMetadata().getLabels(); - } - - @Override - public Map hydrateSearchResult(Keys.InfrastructureCacheKey key, KubernetesCacheUtils cacheUtils) { - Map result = super.hydrateSearchResult(key, cacheUtils); - result.put("serverGroup", result.get("name")); - - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesRoleBindingHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesRoleBindingHandler.java deleted file mode 100644 index f441c073902..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesRoleBindingHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Joel Wilsson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.ROLE_BINDING_PRIORITY; - -@Component -public class KubernetesRoleBindingHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return ROLE_BINDING_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.ROLE_BINDING; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { return new Status(); } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesRoleHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesRoleHandler.java deleted file mode 100644 index 20125c07539..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesRoleHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Joel Wilsson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.ROLE_PRIORITY; - -@Component -public class KubernetesRoleHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return ROLE_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.ROLE; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { return new Status(); } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesSecretHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesSecretHandler.java deleted file mode 100644 index 06f47d07b6a..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesSecretHandler.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import java.util.Collections; -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.MOUNTABLE_DATA_PRIORITY; - -@Component -public class KubernetesSecretHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return MOUNTABLE_DATA_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.SECRET; - } - - @Override - public List sensitiveKeys() { - return Collections.singletonList("data"); - } - - @Override - public boolean versioned() { - return true; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.CONFIGS; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesServiceAccountHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesServiceAccountHandler.java deleted file mode 100644 index dc2a9324e84..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesServiceAccountHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Joel Wilsson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.SERVICE_ACCOUNT_PRIORITY; - -@Component -public class KubernetesServiceAccountHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return SERVICE_ACCOUNT_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.SERVICE_ACCOUNT; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { return new Status(); } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesServiceHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesServiceHandler.java deleted file mode 100644 index 857d6aa1b2d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesServiceHandler.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V1Service; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion.V1; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.REPLICA_SET; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.SERVICE; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.NETWORK_RESOURCE_PRIORITY; - -@Component -public class KubernetesServiceHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return NETWORK_RESOURCE_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.SERVICE; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.LOAD_BALANCERS; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - @Override - public Map hydrateSearchResult(Keys.InfrastructureCacheKey key, KubernetesCacheUtils cacheUtils) { - Map result = super.hydrateSearchResult(key, cacheUtils); - result.put("loadBalancer", result.get("name")); - - return result; - } - - @Override - public void addRelationships(Map> allResources, Map> relationshipMap) { - Map> mapLabelToManifest = new HashMap<>(); - - allResources.getOrDefault(REPLICA_SET, new ArrayList<>()) - .forEach(r -> addAllReplicaSetLabels(mapLabelToManifest, r)); - - for (KubernetesManifest service : allResources.getOrDefault(SERVICE, new ArrayList<>())) { - relationshipMap.put(service, getRelatedManifests(service, mapLabelToManifest)); - } - } - - private Map getSelector(KubernetesManifest manifest) { - if (manifest.getApiVersion().equals(V1)) { - V1Service v1Service = KubernetesCacheDataConverter.getResource(manifest, V1Service.class); - return v1Service.getSpec().getSelector(); - } else { - throw new IllegalArgumentException("No services with version " + manifest.getApiVersion() + " supported"); - } - } - - private List getRelatedManifests(KubernetesManifest service, Map> mapLabelToManifest) { - return new ArrayList<>(intersectLabels(service, mapLabelToManifest)); - } - - private Set intersectLabels(KubernetesManifest service, Map> mapLabelToManifest) { - Map selector = getSelector(service); - if (selector == null || selector.isEmpty()) { - return new HashSet<>(); - } - - Set result = null; - String namespace = service.getNamespace(); - for (Map.Entry label : selector.entrySet()) { - String labelKey = podLabelKey(namespace, label); - Set manifests = mapLabelToManifest.get(labelKey); - manifests = manifests == null ? new HashSet<>() : manifests; - - if (result == null) { - result = manifests; - } else { - result.retainAll(manifests); - } - } - - return result; - } - - private void addAllReplicaSetLabels(Map> entries, KubernetesManifest replicaSet) { - String namespace = replicaSet.getNamespace(); - Map podLabels = KubernetesReplicaSetHandler.getPodTemplateLabels(replicaSet); - if (podLabels == null) { - return; - } - - for (Map.Entry label : podLabels.entrySet()) { - String labelKey = podLabelKey(namespace, label); - enterManifest(entries, labelKey, KubernetesCacheDataConverter.convertToManifest(replicaSet)); - } - } - - private void enterManifest(Map> entries, String label, KubernetesManifest manifest) { - Set pods = entries.get(label); - if (pods == null) { - pods = new HashSet<>(); - } - - pods.add(manifest); - - entries.put(label, pods); - } - - private String podLabelKey(String namespace, Map.Entry label) { - // Space can't be used in any of the values, so it's a safe separator. - return namespace + " " + label.getKey() + " " + label.getValue(); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesStatefulSetHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesStatefulSetHandler.java deleted file mode 100644 index 66772a7ffc3..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesStatefulSetHandler.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacerFactory; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesCacheUtils; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import io.kubernetes.client.models.V1beta2StatefulSet; -import io.kubernetes.client.models.V1beta2StatefulSetStatus; -import org.apache.commons.lang3.StringUtils; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.BiFunction; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.SERVICE; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind.STATEFUL_SET; -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; - -@Component -public class KubernetesStatefulSetHandler extends KubernetesHandler implements - CanResize, - CanScale, - CanPauseRollout, - CanResumeRollout, - CanUndoRollout, - ServerGroupHandler { - - public KubernetesStatefulSetHandler() { - registerReplacer(ArtifactReplacerFactory.dockerImageReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.secretVolumeReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretEnvFromReplacer()); - registerReplacer(ArtifactReplacerFactory.configMapKeyValueFromReplacer()); - registerReplacer(ArtifactReplacerFactory.secretKeyValueFromReplacer()); - } - - @Override - public int deployPriority() { - return WORKLOAD_CONTROLLER_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return STATEFUL_SET; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return SpinnakerKind.SERVER_GROUPS; - } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } - - @Override - public Status status(KubernetesManifest manifest) { - if (!manifest.isNewerThanObservedGeneration()) { - return (new Status()).unknown(); - } - V1beta2StatefulSet v1beta2StatefulSet = KubernetesCacheDataConverter.getResource(manifest, V1beta2StatefulSet.class); - return status(v1beta2StatefulSet); - } - - public static String serviceName(KubernetesManifest manifest) { - // TODO(lwander) perhaps switch on API version if this changes - Map spec = (Map) manifest.get("spec"); - return (String) spec.get("serviceName"); - } - - @Override - public Map hydrateSearchResult(Keys.InfrastructureCacheKey key, KubernetesCacheUtils cacheUtils) { - Map result = super.hydrateSearchResult(key, cacheUtils); - result.put("serverGroup", result.get("name")); - - return result; - } - - private Status status(V1beta2StatefulSet statefulSet) { - Status result = new Status(); - - if (statefulSet.getSpec().getUpdateStrategy().getType().equalsIgnoreCase("ondelete")) { - return result; - } - - V1beta2StatefulSetStatus status = statefulSet.getStatus(); - if (status == null) { - result.unstable("No status reported yet") - .unavailable("No availability reported"); - return result; - } - - Integer desiredReplicas = statefulSet.getSpec().getReplicas(); - Integer existing = status.getReplicas(); - if (existing == null || (desiredReplicas != null && desiredReplicas > existing)) { - return result.unstable("Waiting for at least the desired replica count to be met"); - } - - if (!status.getCurrentRevision().equals(status.getUpdateRevision())) { - return result.unstable("Waiting for the updated revision to match the current revision"); - } - - existing = status.getCurrentReplicas(); - if (existing == null || (desiredReplicas != null && desiredReplicas > existing)) { - return result.unstable("Waiting for all updated replicas to be scheduled"); - } - - existing = status.getReadyReplicas(); - if (existing == null || (desiredReplicas != null && desiredReplicas > existing)) { - return result.unstable("Waiting for all updated replicas to be ready"); - } - - return result; - } - - @Override - public void addRelationships(Map> allResources, Map> relationshipMap) { - BiFunction manifestName = (namespace, name) -> namespace + ":" + name; - - Map services = allResources.getOrDefault(SERVICE, new ArrayList<>()) - .stream() - .collect(Collectors.toMap((m) -> manifestName.apply(m.getNamespace(), m.getName()), (m) -> m)); - - for (KubernetesManifest manifest : allResources.getOrDefault(STATEFUL_SET, new ArrayList<>())) { - String serviceName = KubernetesStatefulSetHandler.serviceName(manifest); - if (StringUtils.isEmpty(serviceName)) { - continue; - } - - String key = manifestName.apply(manifest.getNamespace(), serviceName); - - if (!services.containsKey(key)) { - continue; - } - - KubernetesManifest service = services.get(key); - relationshipMap.put(manifest, Collections.singletonList(service)); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesStorageClassHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesStorageClassHandler.java deleted file mode 100644 index ffb21bb6014..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesStorageClassHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Joel Wilsson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCoreCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.STORAGE_CLASS_PRIORITY; - -@Component -public class KubernetesStorageClassHandler extends KubernetesHandler { - @Override - public int deployPriority() { - return STORAGE_CLASS_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.STORAGE_CLASS; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public KubernetesSpinnakerKindMap.SpinnakerKind spinnakerKind() { - return KubernetesSpinnakerKindMap.SpinnakerKind.UNCLASSIFIED; - } - - @Override - public Status status(KubernetesManifest manifest) { return new Status(); } - - @Override - public Class cachingAgentClass() { - return KubernetesCoreCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesUnregisteredCustomResourceHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesUnregisteredCustomResourceHandler.java deleted file mode 100644 index 965f8d4f44d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesUnregisteredCustomResourceHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesUnregisteredCustomResourceCachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap.SpinnakerKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.model.Manifest.Status; -import org.springframework.stereotype.Component; - -import static com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler.DeployPriority.LOWEST_PRIORITY; - -@Component -public class KubernetesUnregisteredCustomResourceHandler extends KubernetesHandler implements CanDelete { - @Override - public int deployPriority() { - return LOWEST_PRIORITY.getValue(); - } - - @Override - public KubernetesKind kind() { - return KubernetesKind.NONE; - } - - @Override - public boolean versioned() { - return false; - } - - @Override - public SpinnakerKind spinnakerKind() { - return null; - } - - @Override - public Status status(KubernetesManifest manifest) { - return new Status(); - } - - @Override - public Class cachingAgentClass() { - return KubernetesUnregisteredCustomResourceCachingAgent.class; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ModelHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ModelHandler.java deleted file mode 100644 index 5a60f75a950..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ModelHandler.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.ManifestBasedModel; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2CacheData; - -public interface ModelHandler { - ManifestBasedModel fromCacheData(T cacheData); -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ServerGroupHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ServerGroupHandler.java deleted file mode 100644 index e788dc65b31..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ServerGroupHandler.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2ServerGroup; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupCacheData; - -public interface ServerGroupHandler extends ModelHandler { - default KubernetesV2ServerGroup fromCacheData(KubernetesV2ServerGroupCacheData cacheData) { - return KubernetesV2ServerGroup.fromCacheData(cacheData); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ServerGroupManagerHandler.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ServerGroupManagerHandler.java deleted file mode 100644 index 1e24fa9cb68..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/ServerGroupManagerHandler.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.model.KubernetesV2ServerGroupManager; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.data.KubernetesV2ServerGroupManagerCacheData; - -public interface ServerGroupManagerHandler extends ModelHandler { - default KubernetesV2ServerGroupManager fromCacheData(KubernetesV2ServerGroupManagerCacheData cacheData) { - return KubernetesV2ServerGroupManager.fromCacheData(cacheData); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/job/KubectlJobExecutor.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/job/KubectlJobExecutor.java deleted file mode 100644 index 1e64aa3fcef..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/job/KubectlJobExecutor.java +++ /dev/null @@ -1,658 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job; - -import com.google.gson.Gson; -import com.google.gson.JsonSyntaxException; -import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; -import com.netflix.spinnaker.clouddriver.jobs.JobRequest; -import com.netflix.spinnaker.clouddriver.jobs.JobStatus; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPatchOptions; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPodMetric; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPodMetric.ContainerMetric; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestList; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesSelectorList; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import io.kubernetes.client.models.V1DeleteOptions; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.stereotype.Component; - -import java.io.ByteArrayInputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -@Component -@Slf4j -public class KubectlJobExecutor { - @Value("${kubernetes.kubectl.poll.minSleepMillis:200}") - Long minSleepMillis; - - @Value("${kubernetes.kubectl.poll.maxSleepMillis:4000}") - Long maxSleepMillis; - - @Value("${kubernetes.kubectl.poll.timeoutMillis:100000}") - Long timeoutMillis; - - @Value("${kubernetes.kubectl.poll.maxInterruptRetries:10}") - Long maxInterruptRetries; - - @Value("${kubernetes.kubectl.executable:kubectl}") - String executable; - - @Value("${kubernetes.oAuth.executable:oauth2l}") - String oAuthExecutable; - - private final static String NO_RESOURCE_TYPE_ERROR = "doesn't have a resource type"; - - private final JobExecutor jobExecutor; - - private final Gson gson = new Gson(); - - @Autowired - KubectlJobExecutor(JobExecutor jobExecutor) { - this.jobExecutor = jobExecutor; - } - - public String configCurrentContext(KubernetesV2Credentials credentials) { - List command = kubectlAuthPrefix(credentials); - command.add("config"); - command.add("current-context"); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Failed get current configuration context"); - } - - return status.getStdOut(); - } - - public String defaultNamespace(KubernetesV2Credentials credentials) { - String configCurrentContext = configCurrentContext(credentials); - if (StringUtils.isEmpty(configCurrentContext)) { - return ""; - } - - List command = kubectlAuthPrefix(credentials); - command.add("config"); - command.add("view"); - command.add("-o"); - String jsonPath = "{.contexts[?(@.name==\"" + configCurrentContext + "\")].context.namespace}"; - command.add("\"jsonPath=" + jsonPath + "\""); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Failed get current configuration context"); - } - return status.getStdOut(); - } - - public String logs(KubernetesV2Credentials credentials, String namespace, String podName, String containerName) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - command.add("logs"); - command.add(podName); - command.add("-c=" + containerName); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Failed to get logs from " + podName + "/" + containerName + " in " + namespace + ": " + status.getStdErr()); - } - - return status.getStdOut(); - } - - public List delete(KubernetesV2Credentials credentials, KubernetesKind kind, String namespace, String name, KubernetesSelectorList labelSelectors, V1DeleteOptions deleteOptions) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - - command.add("delete"); - - command = kubectlLookupInfo(command, kind, name, labelSelectors); - - // spinnaker generally accepts deletes of resources that don't exist - command.add("--ignore-not-found=true"); - - if (deleteOptions.isOrphanDependents() != null) { - command.add("--cascade=" + !deleteOptions.isOrphanDependents()); - } - - if (deleteOptions.getGracePeriodSeconds() != null) { - command.add("--grace-period=" + deleteOptions.getGracePeriodSeconds()); - } - - if (StringUtils.isNotEmpty(deleteOptions.getPropagationPolicy())) { - throw new IllegalArgumentException("Propagation policy is not yet supported as a delete option"); - } - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - String id; - if (StringUtils.isNotEmpty(name)) { - id = kind + "/" + name; - } else { - id = labelSelectors.toString(); - } - throw new KubectlException("Failed to delete " + id + " from " + namespace + ": " + status.getStdErr()); - } - - if (StringUtils.isEmpty(status.getStdOut()) || status.getStdOut().equals("No output from command.") || status.getStdOut().startsWith("No resources found")) { - return new ArrayList<>(); - } - - return Arrays.stream(status.getStdOut().split("\n")) - .map(m -> m.substring(m.indexOf("\"") + 1)) - .map(m -> m.substring(0, m.lastIndexOf("\""))) - .collect(Collectors.toList()); - } - - public Void scale(KubernetesV2Credentials credentials, KubernetesKind kind, String namespace, String name, int replicas) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - - command.add("scale"); - command = kubectlLookupInfo(command, kind, name, null); - command.add("--replicas=" + replicas); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Failed to scale " + kind + "/" + name + " from " + namespace + ": " + status.getStdErr()); - } - - return null; - } - - public List historyRollout(KubernetesV2Credentials credentials, KubernetesKind kind, String namespace, String name) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - - command.add("rollout"); - command.add("history"); - command.add(kind.toString() + "/" + name); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Failed to get rollout history of " + kind + "/" + name + " from " + namespace + ": " + status.getStdErr()); - } - - String stdout = status.getStdOut(); - if (StringUtils.isEmpty(stdout)) { - return new ArrayList<>(); - } - - // "name" - // REVISION CHANGE-CAUSE - // # - // # - // # - // ... - List splitOutput = Arrays.stream(stdout.split("\n")).collect(Collectors.toList()); - - if (splitOutput.size() <= 2) { - return new ArrayList<>(); - } - - splitOutput = splitOutput.subList(2, splitOutput.size()); - - return splitOutput.stream() - .map(l -> l.split("[ \t]")) - .filter(l -> l.length > 0) - .map(l -> l[0]) - .map(Integer::valueOf) - .collect(Collectors.toList()); - } - - public Void undoRollout(KubernetesV2Credentials credentials, KubernetesKind kind, String namespace, String name, int revision) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - - command.add("rollout"); - command.add("undo"); - command.add(kind.toString() + "/" + name); - command.add("--to-revision=" + revision); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Failed to undo rollout " + kind + "/" + name + " from " + namespace + ": " + status.getStdErr()); - } - - return null; - } - - public Void pauseRollout(KubernetesV2Credentials credentials, KubernetesKind kind, String namespace, String name) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - - command.add("rollout"); - command.add("pause"); - command.add(kind.toString() + "/" + name); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Failed to pause rollout " + kind + "/" + name + " from " + namespace + ": " + status.getStdErr()); - } - - return null; - } - - public Void resumeRollout(KubernetesV2Credentials credentials, KubernetesKind kind, String namespace, String name) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - - command.add("rollout"); - command.add("resume"); - command.add(kind.toString() + "/" + name); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Failed to resume rollout " + kind + "/" + name + " from " + namespace + ": " + status.getStdErr()); - } - - return null; - } - - - public KubernetesManifest get(KubernetesV2Credentials credentials, KubernetesKind kind, String namespace, String name) { - List command = kubectlNamespacedGet(credentials, Collections.singletonList(kind), namespace); - command.add(name); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - if (status.getStdErr().contains("(NotFound)")) { - return null; - } else if (status.getStdErr().contains(NO_RESOURCE_TYPE_ERROR)) { - throw new NoResourceTypeException(status.getStdErr()); - } - - throw new KubectlException("Failed to read " + kind + " from " + namespace + ": " + status.getStdErr()); - } - - try { - return gson.fromJson(status.getStdOut(), KubernetesManifest.class); - } catch (JsonSyntaxException e) { - throw new KubectlException("Failed to parse kubectl output: " + e.getMessage(), e); - } - } - - public List list(KubernetesV2Credentials credentials, List kinds, String namespace) { - String jobId = jobExecutor.startJob(new JobRequest(kubectlNamespacedGet(credentials, kinds, namespace)), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - if (status.getStdErr().contains(NO_RESOURCE_TYPE_ERROR)) { - throw new NoResourceTypeException(status.getStdErr()); - } else { - throw new KubectlException("Failed to read " + kinds + " from " + namespace + ": " + status.getStdErr()); - } - } - - if (status.getStdErr().contains("No resources found")) { - return new ArrayList<>(); - } - - try { - KubernetesManifestList list = gson.fromJson(status.getStdOut(), KubernetesManifestList.class); - return list.getItems(); - } catch (JsonSyntaxException e) { - throw new KubectlException("Failed to parse kubectl output: " + e.getMessage(), e); - } - } - - public Void deploy(KubernetesV2Credentials credentials, KubernetesManifest manifest) { - List command = kubectlAuthPrefix(credentials); - - String manifestAsJson = gson.toJson(manifest); - - // Read from stdin - command.add("apply"); - command.add("-f"); - command.add("-"); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(manifestAsJson.getBytes())); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Deploy failed: " + status.getStdErr()); - } - - return null; - } - - private JobStatus backoffWait(String jobId, boolean debug) { - long nextSleep = minSleepMillis; - long totalSleep = 0; - long interrupts = 0; - JobStatus jobStatus = null; - - while (totalSleep < timeoutMillis && interrupts < maxInterruptRetries) { - try { - Thread.sleep(nextSleep); - } catch (InterruptedException e) { - log.warn("{} was interrupted", jobId, e); - interrupts += 1; - } finally { - totalSleep += nextSleep; - nextSleep = Math.min(nextSleep * 2, maxSleepMillis); - } - - jobStatus = jobExecutor.updateJob(jobId); - if (jobStatus == null) { - log.warn("Job status couldn't be inferred from {}", jobId); - } else if (jobStatus.getState() == JobStatus.State.COMPLETED) { - if (debug) { - logDebugMessages(jobId, jobStatus); - } - return jobStatus; - } - } - - if (debug) { - logDebugMessages(jobId, jobStatus); - } - throw new KubectlException("Job took too long to complete"); - } - - private void logDebugMessages(String jobId, JobStatus jobStatus) { - if (jobStatus != null) { - log.info("{} stdout:\n{}", jobId, jobStatus.getStdOut()); - log.info("{} stderr:\n{}", jobId, jobStatus.getStdErr()); - } else { - log.info("{} job status not set"); - } - } - - private List kubectlAuthPrefix(KubernetesV2Credentials credentials) { - List command = new ArrayList<>(); - if (StringUtils.isNotEmpty(credentials.getKubectlExecutable())) { - command.add(credentials.getKubectlExecutable()); - } else { - command.add(executable); - } - - if (credentials.getKubectlRequestTimeoutSeconds() != null) { - command.add("--request-timeout=" + credentials.getKubectlRequestTimeoutSeconds()); - } - - if (credentials.isDebug()) { - command.add("-v"); - command.add("9"); - } - - if (!credentials.isServiceAccount()) { - if (credentials.getOAuthServiceAccount() != null && !credentials.getOAuthServiceAccount().isEmpty()) { - command.add("--token=" + getOAuthToken(credentials)); - } - - String kubeconfigFile = credentials.getKubeconfigFile(); - if (StringUtils.isNotEmpty(kubeconfigFile)) { - command.add("--kubeconfig=" + kubeconfigFile); - } - - String context = credentials.getContext(); - if (StringUtils.isNotEmpty(context)) { - command.add("--context=" + context); - } - } - - return command; - } - - private List kubectlLookupInfo(List command, KubernetesKind kind, String name, KubernetesSelectorList labelSelectors) { - if (StringUtils.isNotEmpty(name)) { - command.add(kind + "/" + name); - } else { - command.add(kind.toString()); - } - - if (labelSelectors != null && !labelSelectors.isEmpty()) { - command.add("-l=" + labelSelectors); - } - - return command; - } - - private List kubectlNamespacedAuthPrefix(KubernetesV2Credentials credentials, String namespace) { - List command = kubectlAuthPrefix(credentials); - if (StringUtils.isEmpty(namespace)) { - namespace = credentials.getDefaultNamespace(); - } - - if (StringUtils.isNotEmpty(namespace)) { - command.add("--namespace=" + namespace); - } - - return command; - } - - private List kubectlNamespacedGet(KubernetesV2Credentials credentials, List kind, String namespace) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - command.add("-o"); - command.add("json"); - - command.add("get"); - command.add(String.join(",", kind.stream().map(KubernetesKind::toString).collect(Collectors.toList()))); - - return command; - } - - private String getOAuthToken(KubernetesV2Credentials credentials) { - List command = new ArrayList<>(); - command.add(oAuthExecutable); - command.add("fetch"); - command.add("--json"); - command.add(credentials.getOAuthServiceAccount()); - command.addAll(credentials.getOAuthScopes()); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Could not fetch OAuth token: " + status.getStdErr()); - } - return status.getStdOut(); - } - - public Collection topPod(KubernetesV2Credentials credentials, String namespace) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - command.add("top"); - command.add("po"); - command.add("--containers"); - - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - throw new KubectlException("Could not read metrics: " + status.getStdErr()); - } - - Map result = new HashMap<>(); - - String output = status.getStdOut().trim(); - if (StringUtils.isEmpty(output)) { - log.warn("No output from `kubectl top` command, no metrics to report."); - return new ArrayList<>(); - } - - String[] lines = output.split("\n"); - if (lines.length <= 1) { - return new ArrayList<>(); - } - - // POD NAME CPU(cores) MEMORY(bytes) ... - String[] header = lines[0].trim().split("\\s+"); - - if (header.length <= 2) { - log.warn("Unexpected metric format -- no metrics to report based on table header {}.", header); - return new ArrayList<>(); - } - - // CPU(cores) MEMORY(bytes) - String[] metricKeys = Arrays.copyOfRange(header, 2, header.length); - for (int i = 1; i < lines.length; i++) { - String[] entry = lines[i].trim().split("\\s+"); - if (entry.length != header.length) { - log.warn("Entry {} does not match column width of {}, skipping", entry, header); - } - - String podName = entry[0]; - String containerName = entry[1]; - - Map metrics = new HashMap<>(); - for (int j = 0; j < metricKeys.length; j++) { - metrics.put(metricKeys[j], entry[j + 2]); - } - - ContainerMetric containerMetric = ContainerMetric.builder() - .containerName(containerName) - .metrics(metrics) - .build(); - - KubernetesPodMetric podMetric = result.getOrDefault(podName, KubernetesPodMetric.builder() - .podName(podName) - .containerMetrics(new ArrayList<>()) - .build()); - - podMetric.getContainerMetrics().add(containerMetric); - - result.put(podName, podMetric); - } - - return result.values(); - } - - - public Void patch(KubernetesV2Credentials credentials, KubernetesKind kind, String namespace, - String name, KubernetesPatchOptions options, KubernetesManifest manifest) { - List command = kubectlNamespacedAuthPrefix(credentials, namespace); - - command.add("patch"); - command.add(kind.toString()); - command.add(name); - - if (options.isRecord()) { - command.add("--record"); - } - - String mergeStrategy = options.getMergeStrategy().toString(); - if (StringUtils.isNotEmpty(mergeStrategy)) { - command.add("--type"); - command.add(mergeStrategy); - } - - command.add("--patch"); - command.add(gson.toJson(manifest)); - - String jobId = jobExecutor.startJob(new JobRequest(command), - System.getenv(), - new ByteArrayInputStream(new byte[0])); - - JobStatus status = backoffWait(jobId, credentials.isDebug()); - - if (status.getResult() != JobStatus.Result.SUCCESS) { - String errMsg = status.getStdErr(); - if (StringUtils.isEmpty(errMsg)) { - errMsg = status.getStdOut(); - } - throw new KubectlException("Patch failed: " + errMsg); - } - - return null; - } - - public static class NoResourceTypeException extends RuntimeException { - public NoResourceTypeException(String message) { - super(message); - } - } - - public static class KubectlException extends RuntimeException { - public KubectlException(String message) { - super(message); - } - - public KubectlException(String message, Throwable cause) { - super(message, cause); - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesDeleteManifestOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesDeleteManifestOperation.java deleted file mode 100644 index 5b7f3396d2d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesDeleteManifestOperation.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest; - -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesDeleteManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.OperationResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.CanDelete; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - -import java.util.Collections; -import java.util.List; - -public class KubernetesDeleteManifestOperation implements AtomicOperation { - private final KubernetesDeleteManifestDescription description; - private final KubernetesV2Credentials credentials; - private final KubernetesResourcePropertyRegistry registry; - private final String accountName; - private static final String OP_NAME = "DELETE_KUBERNETES_MANIFEST"; - - public KubernetesDeleteManifestOperation(KubernetesDeleteManifestDescription description, KubernetesResourcePropertyRegistry registry) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.accountName = description.getCredentials().getName(); - this.registry = registry; - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public OperationResult operate(List priorOutputs) { - getTask().updateStatus(OP_NAME, "Starting delete operation..."); - List coordinates; - - if (description.isDynamic()) { - coordinates = description.getAllCoordinates(); - } else { - coordinates = Collections.singletonList(description.getPointCoordinates()); - } - - OperationResult result = new OperationResult(); - coordinates.forEach(c -> { - getTask().updateStatus(OP_NAME, "Looking up resource properties for " + c.getKind() + "..."); - KubernetesResourceProperties properties = registry.get(accountName, c.getKind()); - KubernetesHandler deployer = properties.getHandler(); - - if (!(deployer instanceof CanDelete)) { - throw new IllegalArgumentException("Resource with " + c + " does not support delete"); - } - - CanDelete canDelete = (CanDelete) deployer; - - getTask().updateStatus(OP_NAME, "Calling delete operation..."); - result.merge(canDelete.delete(credentials, - c.getNamespace(), - c.getName(), - description.getLabelSelectors(), - description.getOptions())); - }); - - return result; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesDeployManifestOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesDeployManifestOperation.java deleted file mode 100644 index df952610130..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesDeployManifestOperation.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest; - -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacer.ReplaceResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.KubernetesArtifactConverter; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesDeployManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestAnnotater; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestStrategy; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesSourceCapacity; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.OperationResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.CanScale; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.model.ArtifactProvider; -import com.netflix.spinnaker.clouddriver.names.NamerRegistry; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import com.netflix.spinnaker.moniker.Moniker; -import com.netflix.spinnaker.moniker.Namer; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.StringUtils; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -@Slf4j -public class KubernetesDeployManifestOperation implements AtomicOperation { - private final KubernetesDeployManifestDescription description; - private final KubernetesV2Credentials credentials; - private final ArtifactProvider provider; - private final Namer namer; - private final KubernetesResourcePropertyRegistry registry; - private final String accountName; - private static final String OP_NAME = "DEPLOY_KUBERNETES_MANIFEST"; - - public KubernetesDeployManifestOperation(KubernetesDeployManifestDescription description, KubernetesResourcePropertyRegistry registry, ArtifactProvider provider) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.registry = registry; - this.provider = provider; - this.accountName = description.getCredentials().getName(); - this.namer = NamerRegistry.lookup() - .withProvider(KubernetesCloudProvider.getID()) - .withAccount(accountName) - .withResource(KubernetesManifest.class); - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public OperationResult operate(List _unused) { - getTask().updateStatus(OP_NAME, "Beginning deployment of manifest..."); - - List inputManifests = description.getManifests(); - List deployManifests = new ArrayList<>(); - if (inputManifests == null || inputManifests.isEmpty()) { - log.warn("Relying on deprecated single manifest input: " + description.getManifest()); - inputManifests = Collections.singletonList(description.getManifest()); - } - - inputManifests = inputManifests.stream().filter(Objects::nonNull).collect(Collectors.toList()); - - List requiredArtifacts = description.getRequiredArtifacts(); - if (requiredArtifacts == null) { - requiredArtifacts = new ArrayList<>(); - } - - List optionalArtifacts = description.getOptionalArtifacts(); - if (optionalArtifacts == null) { - optionalArtifacts = new ArrayList<>(); - } - - List artifacts = new ArrayList<>(); - artifacts.addAll(requiredArtifacts); - artifacts.addAll(optionalArtifacts); - - Set boundArtifacts = new HashSet<>(); - - for (KubernetesManifest manifest : inputManifests) { - if (StringUtils.isEmpty(manifest.getNamespace()) && manifest.getKind().isNamespaced()) { - manifest.setNamespace(credentials.getDefaultNamespace()); - } - - KubernetesResourceProperties properties = findResourceProperties(manifest); - if (properties == null) { - throw new IllegalArgumentException("Unsupported Kubernetes object kind '" + manifest.getKind().toString() + "', unable to continue."); - } - KubernetesHandler deployer = properties.getHandler(); - if (deployer == null) { - throw new IllegalArgumentException("No deployer available for Kubernetes object kind '" + manifest.getKind().toString() + "', unable to continue."); - } - - getTask().updateStatus(OP_NAME, "Swapping out artifacts in " + manifest.getFullResourceName() + " from context..."); - ReplaceResult replaceResult = deployer.replaceArtifacts(manifest, artifacts, description.getAccount()); - deployManifests.add(replaceResult.getManifest()); - boundArtifacts.addAll(replaceResult.getBoundArtifacts()); - } - - Set unboundArtifacts = new HashSet<>(requiredArtifacts); - unboundArtifacts.removeAll(boundArtifacts); - - getTask().updateStatus(OP_NAME, "Checking if all requested artifacts were bound..."); - if (!unboundArtifacts.isEmpty()) { - throw new IllegalArgumentException("The following artifacts could not be bound: '" + unboundArtifacts + "' . Failing the stage as this is likely a configuration error."); - } - - getTask().updateStatus(OP_NAME, "Sorting manifests by priority..."); - deployManifests.sort(Comparator.comparingInt(m -> findResourceProperties(m).getHandler().deployPriority())); - getTask().updateStatus(OP_NAME, "Deploy order is: " + String.join(", ", deployManifests.stream().map(KubernetesManifest::getFullResourceName).collect(Collectors.toList()))); - - OperationResult result = new OperationResult(); - for (KubernetesManifest manifest : deployManifests) { - KubernetesResourceProperties properties = findResourceProperties(manifest); - KubernetesManifestStrategy strategy = KubernetesManifestAnnotater.getStrategy(manifest); - boolean versioned = isVersioned(properties, strategy); - boolean useSourceCapacity = isUseSourceCapacity(strategy); - - KubernetesArtifactConverter converter = versioned ? properties.getVersionedConverter() : properties.getUnversionedConverter(); - KubernetesHandler deployer = properties.getHandler(); - - Moniker moniker = cloneMoniker(description.getMoniker()); - if (StringUtils.isEmpty(moniker.getCluster())) { - moniker.setCluster(manifest.getFullResourceName()); - } - - Artifact artifact = converter.toArtifact(provider, manifest, description.getAccount()); - - String version = artifact.getVersion(); - if (StringUtils.isNotEmpty(version) && version.startsWith("v")) { - try { - moniker.setSequence(Integer.valueOf(version.substring(1))); - } catch (NumberFormatException e) { - log.warn("Malformed moniker version {}", version, e); - } - } - - getTask().updateStatus(OP_NAME, "Annotating manifest " + manifest.getFullResourceName() + " with artifact, relationships & moniker..."); - KubernetesManifestAnnotater.annotateManifest(manifest, artifact); - - if (useSourceCapacity && deployer instanceof CanScale) { - Double replicas = KubernetesSourceCapacity.getSourceCapacity(manifest, credentials); - if (replicas != null) { - manifest.setReplicas(replicas); - } - } - - namer.applyMoniker(manifest, moniker); - manifest.setName(converter.getDeployedName(artifact)); - - getTask().updateStatus(OP_NAME, "Swapping out artifacts in " + manifest.getFullResourceName() + " from other deployments..."); - ReplaceResult replaceResult = deployer.replaceArtifacts(manifest, new ArrayList<>(result.getCreatedArtifacts()), description.getAccount()); - boundArtifacts.addAll(replaceResult.getBoundArtifacts()); - manifest = replaceResult.getManifest(); - - getTask().updateStatus(OP_NAME, "Submitting manifest " + manifest.getFullResourceName() + " to kubernetes master..."); - log.debug("Manifest in {} to be deployed: {}", accountName, manifest); - result.merge(deployer.deploy(credentials, manifest)); - - result.getCreatedArtifacts().add(artifact); - } - - result.getBoundArtifacts().addAll(boundArtifacts); - result.removeSensitiveKeys(registry, accountName); - - getTask().updateStatus(OP_NAME, "Deploy manifest task completed successfully."); - return result; - } - - private boolean isVersioned(KubernetesResourceProperties properties, KubernetesManifestStrategy strategy) { - if (strategy.getVersioned() != null) { - return strategy.getVersioned(); - } - - if (description.getVersioned() != null) { - return description.getVersioned(); - } - - return properties.isVersioned(); - } - - private boolean isUseSourceCapacity(KubernetesManifestStrategy strategy) { - if (strategy.getUseSourceCapacity() != null) { - return strategy.getUseSourceCapacity(); - } - - return false; - } - - // todo(lwander): move to kork - private static Moniker cloneMoniker(Moniker inp) { - return Moniker.builder() - .app(inp.getApp()) - .cluster(inp.getCluster()) - .stack(inp.getStack()) - .detail(inp.getDetail()) - .sequence(inp.getSequence()) - .build(); - } - - private KubernetesResourceProperties findResourceProperties(KubernetesManifest manifest) { - KubernetesKind kind = manifest.getKind(); - getTask().updateStatus(OP_NAME, "Finding deployer for " + kind + "..."); - return registry.get(accountName, kind); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesPatchManifestOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesPatchManifestOperation.java deleted file mode 100644 index 0a45b7a5340..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesPatchManifestOperation.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest; - -import com.google.common.collect.Sets; -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactReplacer.ReplaceResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesPatchManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.OperationResult; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import lombok.extern.slf4j.Slf4j; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -@Slf4j -public class KubernetesPatchManifestOperation implements AtomicOperation { - private final KubernetesPatchManifestDescription description; - private final KubernetesV2Credentials credentials; - private final KubernetesResourcePropertyRegistry registry; - private final String accountName; - private static final String OP_NAME = "PATCH_KUBERNETES_MANIFEST"; - - public KubernetesPatchManifestOperation(KubernetesPatchManifestDescription description, - KubernetesResourcePropertyRegistry registry) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.registry = registry; - this.accountName = description.getCredentials().getName(); - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public OperationResult operate(List _unused) { - updateStatus("Beginning patching of manifest"); - KubernetesCoordinates objToPatch = description.getPointCoordinates(); - - updateStatus("Finding patch handler for " + objToPatch + "..."); - KubernetesHandler patchHandler = findPatchHandler(objToPatch); - - updateStatus("Swapping out artifacts in " + objToPatch + " from context..."); - ReplaceResult replaceResult = replaceArtifacts(objToPatch, patchHandler); - - updateStatus("Submitting manifest " + description.getManifestName() + " to Kubernetes master..."); - OperationResult result = new OperationResult(); - result.merge(patchHandler.patch(credentials, objToPatch.getNamespace(), objToPatch.getName(), - description.getOptions(), replaceResult.getManifest())); - - result.getBoundArtifacts().addAll(replaceResult.getBoundArtifacts()); - result.removeSensitiveKeys(registry, accountName); - return result; - } - - private void updateStatus(String status) { - getTask().updateStatus(OP_NAME, status); - } - - private ReplaceResult replaceArtifacts(KubernetesCoordinates objToPatch, - KubernetesHandler patchHandler) { - List allArtifacts = description.getAllArtifacts() == null ? new ArrayList<>() : - description.getAllArtifacts(); - - ReplaceResult replaceResult = patchHandler.replaceArtifacts(description.getPatchBody(), - allArtifacts, objToPatch.getNamespace(), description.getAccount()); - - if (description.getRequiredArtifacts() != null) { - Set unboundArtifacts = Sets.difference(new HashSet<>(description.getRequiredArtifacts()), - replaceResult.getBoundArtifacts()); - if (!unboundArtifacts.isEmpty()) { - throw new IllegalArgumentException("The following required artifacts could not be bound: '" + - unboundArtifacts + "' . Failing the stage as this is likely a configuration error."); - } - } - return replaceResult; - } - - private KubernetesHandler findPatchHandler(KubernetesCoordinates objToPatch) { - KubernetesResourceProperties properties = registry.get(accountName, objToPatch.getKind()); - if (properties == null) { - throw new IllegalArgumentException("Unsupported Kubernetes object kind '" + - objToPatch.getKind() + "', unable to continue"); - } - KubernetesHandler patchHandler = properties.getHandler(); - if (patchHandler == null) { - throw new IllegalArgumentException("No patch handler available for Kubernetes object kind ' " - + objToPatch.getKind() + "', unable to continue"); - } - return patchHandler; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesPauseRolloutManifestOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesPauseRolloutManifestOperation.java deleted file mode 100644 index a80da936acb..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesPauseRolloutManifestOperation.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest; - -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesPauseRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.CanPauseRollout; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - -import java.util.List; - -public class KubernetesPauseRolloutManifestOperation implements AtomicOperation { - private final KubernetesPauseRolloutManifestDescription description; - private final KubernetesV2Credentials credentials; - private final KubernetesResourcePropertyRegistry registry; - private final String accountName; - private static final String OP_NAME = "PAUSE_ROLLOUT_KUBERNETES_MANIFEST"; - - public KubernetesPauseRolloutManifestOperation(KubernetesPauseRolloutManifestDescription description, KubernetesResourcePropertyRegistry registry) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.accountName = description.getCredentials().getName(); - this.registry = registry; - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public Void operate(List priorOutputs) { - getTask().updateStatus(OP_NAME, "Starting pause rollout operation..."); - KubernetesCoordinates coordinates = description.getPointCoordinates(); - - getTask().updateStatus(OP_NAME, "Looking up resource properties..."); - KubernetesResourceProperties properties = registry.get(accountName, coordinates.getKind()); - KubernetesHandler deployer = properties.getHandler(); - - if (!(deployer instanceof CanPauseRollout)) { - throw new IllegalArgumentException("Resource with " + coordinates + " does not support pause rollout"); - } - - CanPauseRollout canPauseRollout = (CanPauseRollout) deployer; - - getTask().updateStatus(OP_NAME, "Calling pause rollout operation..."); - canPauseRollout.pauseRollout(credentials, - coordinates.getNamespace(), - coordinates.getName()); - - return null; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesResumeRolloutManifestOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesResumeRolloutManifestOperation.java deleted file mode 100644 index 3d248fcc979..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesResumeRolloutManifestOperation.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest; - -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesResumeRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.CanResumeRollout; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - -import java.util.List; - -public class KubernetesResumeRolloutManifestOperation implements AtomicOperation { - private final KubernetesResumeRolloutManifestDescription description; - private final KubernetesV2Credentials credentials; - private final KubernetesResourcePropertyRegistry registry; - private final String accountName; - private static final String OP_NAME = "RESUME_ROLLOUT_KUBERNETES_MANIFEST"; - - public KubernetesResumeRolloutManifestOperation(KubernetesResumeRolloutManifestDescription description, KubernetesResourcePropertyRegistry registry) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.accountName = description.getCredentials().getName(); - this.registry = registry; - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public Void operate(List priorOutputs) { - getTask().updateStatus(OP_NAME, "Starting resume rollout operation..."); - KubernetesCoordinates coordinates = description.getPointCoordinates(); - - getTask().updateStatus(OP_NAME, "Looking up resource properties..."); - KubernetesResourceProperties properties = registry.get(accountName, coordinates.getKind()); - KubernetesHandler deployer = properties.getHandler(); - - if (!(deployer instanceof CanResumeRollout)) { - throw new IllegalArgumentException("Resource with " + coordinates + " does not support resume rollout"); - } - - CanResumeRollout canResumeRollout = (CanResumeRollout) deployer; - - getTask().updateStatus(OP_NAME, "Calling resume rollout operation..."); - canResumeRollout.resumeRollout(credentials, - coordinates.getNamespace(), - coordinates.getName()); - - return null; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesScaleManifestOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesScaleManifestOperation.java deleted file mode 100644 index bd800d2496d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesScaleManifestOperation.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest; - -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesScaleManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.CanScale; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - -import java.util.List; - -public class KubernetesScaleManifestOperation implements AtomicOperation { - private final KubernetesScaleManifestDescription description; - private final KubernetesV2Credentials credentials; - private final KubernetesResourcePropertyRegistry registry; - private final String accountName; - private static final String OP_NAME = "SCALE_KUBERNETES_MANIFEST"; - - public KubernetesScaleManifestOperation(KubernetesScaleManifestDescription description, KubernetesResourcePropertyRegistry registry) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.accountName = description.getCredentials().getName(); - this.registry = registry; - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public Void operate(List priorOutputs) { - getTask().updateStatus(OP_NAME, "Starting scale operation..."); - KubernetesCoordinates coordinates = description.getPointCoordinates(); - - getTask().updateStatus(OP_NAME, "Looking up resource properties..."); - KubernetesResourceProperties properties = registry.get(accountName, coordinates.getKind()); - KubernetesHandler deployer = properties.getHandler(); - - if (!(deployer instanceof CanScale)) { - throw new IllegalArgumentException("Resource with " + coordinates + " does not support scale"); - } - - CanScale canScale = (CanScale) deployer; - - getTask().updateStatus(OP_NAME, "Calling scale operation..."); - canScale.scale(credentials, - coordinates.getNamespace(), - coordinates.getName(), - description.getReplicas()); - - return null; - } -} - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesUndoRolloutManifestOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesUndoRolloutManifestOperation.java deleted file mode 100644 index 255a857ca8d..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/manifest/KubernetesUndoRolloutManifestOperation.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest; - -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesUndoRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.CanUndoRollout; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - -import java.util.List; - -public class KubernetesUndoRolloutManifestOperation implements AtomicOperation { - private final KubernetesUndoRolloutManifestDescription description; - private final KubernetesV2Credentials credentials; - private final KubernetesResourcePropertyRegistry registry; - private final String accountName; - private static final String OP_NAME = "UNDO_ROLLOUT_KUBERNETES_MANIFEST"; - - public KubernetesUndoRolloutManifestOperation(KubernetesUndoRolloutManifestDescription description, KubernetesResourcePropertyRegistry registry) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.accountName = description.getCredentials().getName(); - this.registry = registry; - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public Void operate(List priorOutputs) { - getTask().updateStatus(OP_NAME, "Starting undo rollout operation..."); - KubernetesCoordinates coordinates = description.getPointCoordinates(); - - getTask().updateStatus(OP_NAME, "Looking up resource properties..."); - KubernetesResourceProperties properties = registry.get(accountName, coordinates.getKind()); - KubernetesHandler deployer = properties.getHandler(); - - if (!(deployer instanceof CanUndoRollout)) { - throw new IllegalArgumentException("Resource with " + coordinates + " does not support undo rollout"); - } - - CanUndoRollout canUndoRollout = (CanUndoRollout) deployer; - - Integer revision = description.getRevision(); - if (description.getNumRevisionsBack() != null) { - getTask().updateStatus(OP_NAME, "Looking up rollout history..."); - List revisions = canUndoRollout.historyRollout(credentials, - coordinates.getNamespace(), - coordinates.getName()); - - revisions.sort(Integer::compareTo); - int numRevisions = revisions.size(); - int targetRevisionIndex = numRevisions - description.getNumRevisionsBack() - 1; - if (targetRevisionIndex < 0) { - throw new IllegalArgumentException("There are " + numRevisions + " revision(s) in total, cannot rollback " + description.getNumRevisionsBack()); - } - - revision = revisions.get(targetRevisionIndex); - getTask().updateStatus(OP_NAME, "Picked revision " + revision + "..."); - } - - getTask().updateStatus(OP_NAME, "Calling undo rollout operation..."); - canUndoRollout.undoRollout(credentials, - coordinates.getNamespace(), - coordinates.getName(), - revision); - - return null; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/servergroup/KubernetesResizeServerGroupOperation.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/servergroup/KubernetesResizeServerGroupOperation.java deleted file mode 100644 index 404e246d9fa..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/servergroup/KubernetesResizeServerGroupOperation.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.servergroup; - -import com.netflix.spinnaker.clouddriver.data.task.Task; -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourceProperties; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.servergroup.KubernetesResizeServerGroupDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.CanResize; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesHandler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; - -import java.util.List; - -public class KubernetesResizeServerGroupOperation implements AtomicOperation { - private final KubernetesResizeServerGroupDescription description; - private final KubernetesV2Credentials credentials; - private final KubernetesResourcePropertyRegistry registry; - private final String accountName; - private static final String OP_NAME = "RESIZE_KUBERNETES_SERVER_GROUP"; - - public KubernetesResizeServerGroupOperation(KubernetesResizeServerGroupDescription description, KubernetesResourcePropertyRegistry registry) { - this.description = description; - this.credentials = (KubernetesV2Credentials) description.getCredentials().getCredentials(); - this.accountName = description.getCredentials().getName(); - this.registry = registry; - } - - private static Task getTask() { - return TaskRepository.threadLocalTask.get(); - } - - @Override - public Void operate(List priorOutputs) { - getTask().updateStatus(OP_NAME, "Starting resize operation..."); - KubernetesCoordinates coordinates = description.getCoordinates(); - - getTask().updateStatus(OP_NAME, "Looking up resource properties..."); - KubernetesResourceProperties properties = registry.get(accountName, coordinates.getKind()); - KubernetesHandler deployer = properties.getHandler(); - - if (!(deployer instanceof CanResize)) { - throw new IllegalArgumentException("Resource with " + coordinates + " does not support resize"); - } - - CanResize canResize = (CanResize) deployer; - - getTask().updateStatus(OP_NAME, "Calling resize operation..."); - canResize.resize(credentials, coordinates.getNamespace(), coordinates.getName(), description.getCapacity()); - - return null; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesApiException.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesApiException.java deleted file mode 100644 index fdcaf110088..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesApiException.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.security; - -import com.fasterxml.jackson.databind.ObjectMapper; -import io.kubernetes.client.ApiException; -import io.kubernetes.client.models.V1Status; -import lombok.extern.slf4j.Slf4j; - -import java.io.IOException; - -@Slf4j -public class KubernetesApiException extends RuntimeException { - private static final ObjectMapper mapper = new ObjectMapper(); - - public KubernetesApiException(String operation, Throwable e) { - super(String.format("%s failed: %s", operation, e.getMessage()), e); - } - - public KubernetesApiException(String operation, ApiException e) { - super(String.format("%s failed (%d %s): %s", operation, e.getCode(), e.getMessage(), message(e)), e); - } - - private static String message(ApiException e) { - String responseBody = e.getResponseBody(); - try { - V1Status status = mapper.readValue(responseBody, V1Status.class); - return status.getMessage(); - } catch (IOException ioe) { - log.warn("ApiException encountered that can't be parsed into a V1Status", e); - return responseBody; - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelector.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelector.java deleted file mode 100644 index 77069f3b9d5..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelector.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.security; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import lombok.Data; -import org.apache.commons.lang3.StringUtils; - -import javax.validation.constraints.NotNull; -import java.util.Collections; -import java.util.List; - -@Data -public class KubernetesSelector { - private enum Kind { - ANY, - EQUALS, - NOT_EQUALS, - CONTAINS, - NOT_CONTAINS, - EXISTS, - NOT_EXISTS, - } - - final private Kind kind; - final private String key; - final private List values; - - @JsonCreator - private KubernetesSelector( - @JsonProperty("kind") @NotNull Kind kind, - @JsonProperty("key") String key, - @JsonProperty("values") List values - ) { - if (StringUtils.isEmpty(key) && kind != Kind.ANY) { - throw new IllegalArgumentException("Only an 'any' selector can have no key specified"); - } - - this.kind = kind; - this.key = key; - this.values = values; - } - - @Override - public String toString() { - switch (kind) { - case ANY: - return ""; - case EQUALS: - return String.format("%s = %s", key, values.get(0)); - case NOT_EQUALS: - return String.format("%s != %s", key, values.get(0)); - case CONTAINS: - return String.format("%s in (%s)", key, String.join(", ", values)); - case NOT_CONTAINS: - return String.format("%s notin (%s)", key, String.join(", ", values)); - case EXISTS: - return String.format("%s", key); - case NOT_EXISTS: - return String.format("!%s", key); - default: - throw new IllegalStateException("Unknown kind " + kind); - } - } - - public static KubernetesSelector any() { - return new KubernetesSelector(Kind.ANY, null, null); - } - - public static KubernetesSelector equals(String key, String value) { - return new KubernetesSelector(Kind.EQUALS, key, Collections.singletonList(value)); - } - - public static KubernetesSelector notEquals(String key, String value) { - return new KubernetesSelector(Kind.NOT_EQUALS, key, Collections.singletonList(value)); - } - - public static KubernetesSelector contains(String key, List values) { - if (values == null || values.isEmpty()) { - throw new IllegalArgumentException("At least one value must be supplied to a 'contains' selector"); - } - - return new KubernetesSelector(Kind.CONTAINS, key, values); - } - - public static KubernetesSelector notContains(String key, List values) { - if (values == null || values.isEmpty()) { - throw new IllegalArgumentException("At least one value must be supplied to a 'notcontains' selector"); - } - - return new KubernetesSelector(Kind.NOT_CONTAINS, key, values); - } - - public static KubernetesSelector exists(String key) { - return new KubernetesSelector(Kind.EXISTS, key, null); - } - - public static KubernetesSelector notExists(String key) { - return new KubernetesSelector(Kind.NOT_EXISTS, key, null); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelectorList.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelectorList.java deleted file mode 100644 index 8c6b75fb36b..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelectorList.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.security; - -import lombok.Data; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.stream.Collectors; - -@Data -public class KubernetesSelectorList { - private final List selectors = new ArrayList<>(); - - public KubernetesSelectorList() { } - - public KubernetesSelectorList(KubernetesSelector... selectors) { - this.selectors.addAll(Arrays.asList(selectors)); - } - - public KubernetesSelectorList addSelector(KubernetesSelector selector) { - selectors.add(selector); - return this; - } - - public boolean isEmpty() { - return selectors.isEmpty(); - } - - @Override - public String toString() { - return String.join(",", selectors.stream().map(KubernetesSelector::toString).collect(Collectors.toList())); - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesV2Credentials.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesV2Credentials.java deleted file mode 100644 index fa41a7120f9..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesV2Credentials.java +++ /dev/null @@ -1,514 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.security; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.google.common.base.Suppliers; -import com.netflix.spectator.api.Clock; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.kubernetes.config.CustomKubernetesResource; -import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesCachingPolicy; -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPatchOptions; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesPodMetric; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.job.KubectlJobExecutor.KubectlException; -import io.kubernetes.client.models.V1DeleteOptions; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; - -import javax.validation.constraints.NotNull; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -@Slf4j -public class KubernetesV2Credentials implements KubernetesCredentials { - private final KubectlJobExecutor jobExecutor; - private final Registry registry; - private final Clock clock; - private final String accountName; - @Getter - private final List namespaces; - @Getter - private final List omitNamespaces; - private final List kinds; - private final List omitKinds; - @Getter private final boolean serviceAccount; - @Getter private boolean metrics; - @Getter private final List cachingPolicies; - - // TODO(lwander) make configurable - private final static int namespaceExpirySeconds = 30; - - private final com.google.common.base.Supplier> liveNamespaceSupplier; - - @Getter - private final List customResources; - - // remove when kubectl is no longer a dependency - @Getter - private final String kubectlExecutable; - - @Getter - private final Integer kubectlRequestTimeoutSeconds; - - // remove when kubectl is no longer a dependency - @Getter - private final String kubeconfigFile; - - // remove when kubectl is no longer a dependency - @Getter - private final String context; - - @JsonIgnore - @Getter - private final String oAuthServiceAccount; - - @JsonIgnore - @Getter - private final List oAuthScopes; - - private final String defaultNamespace = "default"; - private String cachedDefaultNamespace; - - private final Path serviceAccountNamespacePath = Paths.get("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); - - public boolean isValidKind(KubernetesKind kind) { - if (kind == KubernetesKind.NONE) { - return false; - } else if (!this.kinds.isEmpty()) { - return kinds.contains(kind); - } else if (!this.omitKinds.isEmpty()) { - return !omitKinds.contains(kind); - } else { - return true; - } - } - - public String getDefaultNamespace() { - if (StringUtils.isEmpty(cachedDefaultNamespace)) { - cachedDefaultNamespace = lookupDefaultNamespace(); - } - - return cachedDefaultNamespace; - } - - public String lookupDefaultNamespace() { - String namespace = defaultNamespace; - try { - Optional serviceAccountNamespace = Files.lines(serviceAccountNamespacePath, StandardCharsets.UTF_8).findFirst(); - namespace = serviceAccountNamespace.orElse(""); - } catch (IOException e) { - try { - namespace = jobExecutor.defaultNamespace(this); - } catch (KubectlException ke) { - log.debug("Failure looking up desired namespace, defaulting to {}", defaultNamespace, ke); - } - } catch (Exception e) { - log.debug("Error encountered looking up default namespace, defaulting to {}", defaultNamespace, e); - } - if (StringUtils.isEmpty(namespace)) { - namespace = defaultNamespace; - } - return namespace; - } - - @Getter - private final boolean debug; - - public static class Builder { - String accountName; - String kubeconfigFile; - String context; - String kubectlExecutable; - Integer kubectlRequestTimeoutSeconds; - String oAuthServiceAccount; - List oAuthScopes; - String userAgent; - List namespaces = new ArrayList<>(); - List omitNamespaces = new ArrayList<>(); - Registry registry; - KubectlJobExecutor jobExecutor; - List customResources; - List cachingPolicies; - List kinds; - List omitKinds; - boolean debug; - boolean serviceAccount; - boolean metrics; - - public Builder accountName(String accountName) { - this.accountName = accountName; - return this; - } - - public Builder kubeconfigFile(String kubeconfigFile) { - this.kubeconfigFile = kubeconfigFile; - return this; - } - - public Builder kubectlExecutable(String kubectlExecutable) { - this.kubectlExecutable = kubectlExecutable; - return this; - } - - public Builder kubectlRequestTimeoutSeconds(Integer kubectlRequestTimeoutSeconds) { - this.kubectlRequestTimeoutSeconds = kubectlRequestTimeoutSeconds; - return this; - } - - public Builder context(String context) { - this.context = context; - return this; - } - - public Builder userAgent(String userAgent) { - this.userAgent = userAgent; - return this; - } - - public Builder namespaces(List namespaces) { - this.namespaces = namespaces; - return this; - } - - public Builder omitNamespaces(List omitNamespaces) { - this.omitNamespaces = omitNamespaces; - return this; - } - - public Builder registry(Registry registry) { - this.registry = registry; - return this; - } - - public Builder jobExecutor(KubectlJobExecutor jobExecutor) { - this.jobExecutor = jobExecutor; - return this; - } - - public Builder cachingPolicies(List cachingPolicies) { - this.cachingPolicies = cachingPolicies; - return this; - } - - public Builder customResources(List customResources) { - this.customResources = customResources; - return this; - } - - public Builder debug(boolean debug) { - this.debug = debug; - return this; - } - - public Builder serviceAccount(boolean serviceAccount) { - this.serviceAccount = serviceAccount; - return this; - } - - public Builder oAuthServiceAccount(String oAuthServiceAccount) { - this.oAuthServiceAccount = oAuthServiceAccount; - return this; - } - - public Builder oAuthScopes(List oAuthScopes) { - this.oAuthScopes = oAuthScopes; - return this; - } - - public Builder kinds(List kinds) { - this.kinds = kinds; - return this; - } - - public Builder omitKinds(List omitKinds) { - this.omitKinds = omitKinds; - return this; - } - - public Builder metrics(boolean metrics) { - this.metrics = metrics; - return this; - } - - public KubernetesV2Credentials build() { - namespaces = namespaces == null ? new ArrayList<>() : namespaces; - omitNamespaces = omitNamespaces == null ? new ArrayList<>() : omitNamespaces; - customResources = customResources == null ? new ArrayList<>() : customResources; - kinds = kinds == null ? new ArrayList<>() : kinds; - omitKinds = omitKinds == null ? new ArrayList<>() : omitKinds; - cachingPolicies = cachingPolicies == null ? new ArrayList<>() : cachingPolicies; - - return new KubernetesV2Credentials( - accountName, - jobExecutor, - namespaces, - omitNamespaces, - registry, - kubeconfigFile, - kubectlExecutable, - kubectlRequestTimeoutSeconds, - context, - oAuthServiceAccount, - oAuthScopes, - serviceAccount, - customResources, - cachingPolicies, - KubernetesKind.registeredStringList(kinds), - KubernetesKind.registeredStringList(omitKinds), - metrics, - debug - ); - } - } - - private KubernetesV2Credentials(@NotNull String accountName, - @NotNull KubectlJobExecutor jobExecutor, - @NotNull List namespaces, - @NotNull List omitNamespaces, - @NotNull Registry registry, - String kubeconfigFile, - String kubectlExecutable, - Integer kubectlRequestTimeoutSeconds, - String context, - String oAuthServiceAccount, - List oAuthScopes, - boolean serviceAccount, - @NotNull List customResources, - @NotNull List cachingPolicies, - @NotNull List kinds, - @NotNull List omitKinds, - boolean metrics, - boolean debug) { - this.registry = registry; - this.clock = registry.clock(); - this.accountName = accountName; - this.namespaces = namespaces; - this.omitNamespaces = omitNamespaces; - this.jobExecutor = jobExecutor; - this.debug = debug; - this.kubectlExecutable = kubectlExecutable; - this.kubectlRequestTimeoutSeconds = kubectlRequestTimeoutSeconds; - this.kubeconfigFile = kubeconfigFile; - this.context = context; - this.oAuthServiceAccount = oAuthServiceAccount; - this.oAuthScopes = oAuthScopes; - this.serviceAccount = serviceAccount; - this.customResources = customResources; - this.cachingPolicies = cachingPolicies; - this.kinds = kinds; - this.metrics = metrics; - this.omitKinds = omitKinds; - - this.liveNamespaceSupplier = Suppliers.memoizeWithExpiration(() -> jobExecutor.list(this, Collections.singletonList(KubernetesKind.NAMESPACE), "") - .stream() - .map(KubernetesManifest::getName) - .collect(Collectors.toList()), namespaceExpirySeconds, TimeUnit.SECONDS); - - determineOmitKinds(); - } - - @Override - public List getDeclaredNamespaces() { - List result; - if (!namespaces.isEmpty()) { - result = namespaces; - } else { - try { - result = liveNamespaceSupplier.get(); - - } catch (KubectlException e) { - log.warn("Could not list namespaces for account {}: {}", accountName, e.getMessage()); - return new ArrayList<>(); - } - } - - if (!omitNamespaces.isEmpty()) { - result = result.stream() - .filter(n -> !omitNamespaces.contains(n)) - .collect(Collectors.toList()); - } - - return result; - } - - private void determineOmitKinds() { - List namespaces = getDeclaredNamespaces(); - - if (namespaces.isEmpty()) { - log.warn("There are no namespaces configured (or loadable) -- please check that the list of 'omitNamespaces' for account '" - + accountName +"' doesn't prevent access from all namespaces in this cluster, or that the cluster is reachable."); - return; - } - - // we are making the assumption that the roles granted to spinnaker for this account in all namespaces are identical. - // otherwise, checking all namespaces for all kinds is too expensive in large clusters (imagine a cluster with 100s of namespaces). - String checkNamespace = namespaces.get(0); - List allKinds = KubernetesKind.getValues(); - - log.info("Checking permissions on configured kinds for account {}... {}", accountName, allKinds); - for (KubernetesKind kind : allKinds) { - if (kind == KubernetesKind.NONE || omitKinds.contains(kind)) { - continue; - } - - try { - log.info("Checking if {} is readable...", kind); - if (kind.isNamespaced()) { - list(kind, checkNamespace); - } else { - list(kind, null); - } - } catch (Exception e) { - log.info("Kind '{}' will not be cached in account '{}' for reason: '{}'", kind, accountName, e.getMessage()); - log.debug("Reading kind '{}' failed with exception: ", kind, e); - omitKinds.add(kind); - } - } - - if (metrics) { - try { - log.info("Checking if pod metrics are readable..."); - topPod(checkNamespace); - } catch (Exception e) { - log.warn("Could not read pod metrics in account '{}' for reason: {}", accountName, e.getMessage()); - log.debug("Reading logs failed with exception: ", e); - metrics = false; - } - } - } - - public KubernetesManifest get(KubernetesKind kind, String namespace, String name) { - return runAndRecordMetrics("get", kind, namespace, () -> jobExecutor.get(this, kind, namespace, name)); - } - - public List list(KubernetesKind kind, String namespace) { - return runAndRecordMetrics("list", kind, namespace, () -> jobExecutor.list(this, Collections.singletonList(kind), namespace)); - } - - public List list(List kinds, String namespace) { - if (kinds.isEmpty()) { - return new ArrayList<>(); - } else { - return runAndRecordMetrics("list", kinds, namespace, () -> jobExecutor.list(this, kinds, namespace)); - } - } - - public String logs(String namespace, String podName, String containerName) { - return runAndRecordMetrics("logs", KubernetesKind.POD, namespace, () -> jobExecutor.logs(this, namespace, podName, containerName)); - } - - public void scale(KubernetesKind kind, String namespace, String name, int replicas) { - runAndRecordMetrics("scale", kind, namespace, () -> jobExecutor.scale(this, kind, namespace, name, replicas)); - } - - public List delete(KubernetesKind kind, String namespace, String name, KubernetesSelectorList labelSelectors, V1DeleteOptions options) { - return runAndRecordMetrics("delete", kind, namespace, () -> jobExecutor.delete(this, kind, namespace, name, labelSelectors, options)); - } - - public Collection topPod(String namespace) { - return runAndRecordMetrics("top", KubernetesKind.POD, namespace, () -> jobExecutor.topPod(this, namespace)); - } - - public void deploy(KubernetesManifest manifest) { - runAndRecordMetrics("deploy", manifest.getKind(), manifest.getNamespace(), () -> jobExecutor.deploy(this, manifest)); - } - - public List historyRollout(KubernetesKind kind, String namespace, String name) { - return runAndRecordMetrics("historyRollout", kind, namespace, () -> jobExecutor.historyRollout(this, kind, namespace, name)); - } - - public void undoRollout(KubernetesKind kind, String namespace, String name, int revision) { - runAndRecordMetrics("undoRollout", kind, namespace, () -> jobExecutor.undoRollout(this, kind, namespace, name, revision)); - } - - public void pauseRollout(KubernetesKind kind, String namespace, String name) { - runAndRecordMetrics("pauseRollout", kind, namespace, () -> jobExecutor.pauseRollout(this, kind, namespace, name)); - } - - public void resumeRollout(KubernetesKind kind, String namespace, String name) { - runAndRecordMetrics("resumeRollout", kind, namespace, () -> jobExecutor.resumeRollout(this, kind, namespace, name)); - } - - public void patch(KubernetesKind kind, String namespace, String name, KubernetesPatchOptions options, - KubernetesManifest manifest) { - runAndRecordMetrics("patch", kind, namespace, () -> jobExecutor.patch(this, kind, namespace, name, options, manifest)); - } - - private T runAndRecordMetrics(String action, KubernetesKind kind, String namespace, Supplier op) { - return runAndRecordMetrics(action, Collections.singletonList(kind), namespace, op); - } - - private T runAndRecordMetrics(String action, List kinds, String namespace, Supplier op) { - T result = null; - Throwable failure = null; - KubectlException apiException = null; - long startTime = clock.monotonicTime(); - try { - result = op.get(); - } catch (KubectlException e) { - apiException = e; - } catch (Exception e) { - failure = e; - } finally { - Map tags = new HashMap<>(); - tags.put("action", action); - if (kinds.size() == 1) { - tags.put("kind", kinds.get(0).toString()); - } else { - tags.put("kinds", String.join(",", kinds.stream().map(KubernetesKind::toString).collect(Collectors.toList()))); - } - tags.put("account", accountName); - tags.put("namespace", StringUtils.isEmpty(namespace) ? "none" : namespace); - if (failure == null) { - tags.put("success", "true"); - } else { - tags.put("success", "false"); - tags.put("reason", failure.getClass().getSimpleName() + ": " + failure.getMessage()); - } - - registry.timer(registry.createId("kubernetes.api", tags)) - .record(clock.monotonicTime() - startTime, TimeUnit.NANOSECONDS); - - if (failure != null) { - throw new KubectlJobExecutor.KubectlException("Failure running " + action + " on " + kinds + ": " + failure.getMessage(), failure); - } else if (apiException != null) { - throw apiException; - } else { - return result; - } - } - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/KubernetesValidationUtil.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/KubernetesValidationUtil.java deleted file mode 100644 index 626baf8e7ac..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/KubernetesValidationUtil.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator; - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentials; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.springframework.validation.Errors; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -@Slf4j -public class KubernetesValidationUtil { - final private String context; - final private Errors errors; - - public KubernetesValidationUtil(String context, Errors errors) { - this.context = context; - this.errors = errors; - } - - private String joinAttributeChain(String... attributes) { - List chain = new ArrayList<>(); - chain.add(context); - Collections.addAll(chain, attributes); - return String.join(".", chain); - } - - public void reject(String errorName, String... attributes) { - String field = joinAttributeChain(attributes); - String error = joinAttributeChain(field, errorName); - errors.reject(field, error); - } - - public boolean validateNotEmpty(String attribute, Object value) { - if (value == null) { - reject("empty", attribute); - return false; - } - - return true; - } - - public boolean validateSizeEquals(String attribute, Collection items, int size) { - if (items.size() != size) { - reject("size!=" + size, attribute); - return false; - } - - return true; - } - - public boolean validateNotEmpty(String attribute, String value) { - if (StringUtils.isEmpty(value)) { - reject("empty", attribute); - return false; - } - - return true; - } - - public boolean validateV2Credentials(AccountCredentialsProvider provider, String accountName, String namespace) { - log.info("Validating credentials for {} {}", accountName, namespace); - if (!validateNotEmpty("account", accountName)) { - return false; - } - - if (StringUtils.isEmpty(namespace)) { - return true; - } - - AccountCredentials credentials = provider.getCredentials(accountName); - if (credentials == null) { - reject("notFound", "account"); - return false; - } - - if (!(credentials.getCredentials() instanceof KubernetesV2Credentials)) { - reject("wrongVersion", "account"); - return false; - } - - if (!validateNamespace(namespace, (KubernetesV2Credentials)credentials.getCredentials())) { - return false; - } - - return true; - } - - protected boolean validateNamespace(String namespace, KubernetesV2Credentials credentials) { - final List configuredNamespaces = credentials.getNamespaces(); - if (configuredNamespaces != null && !configuredNamespaces.isEmpty() && !configuredNamespaces.contains(namespace)) { - reject("wrongNamespace", namespace); - return false; - } - - final List omitNamespaces = credentials.getOmitNamespaces(); - if (omitNamespaces != null && omitNamespaces.contains(namespace)) { - reject("omittedNamespace", namespace); - return false; - } - return true; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/artifact/KubernetesArtifactCleanupValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/artifact/KubernetesArtifactCleanupValidator.java deleted file mode 100644 index e176aa441b7..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/artifact/KubernetesArtifactCleanupValidator.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.artifact; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.artifact.KubernetesCleanupArtifactsDescription; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.CLEANUP_ARTIFACTS; - -@KubernetesOperation(CLEANUP_ARTIFACTS) -@Component -public class KubernetesArtifactCleanupValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesCleanupArtifactsDescription description, Errors errors) { - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesDeleteManifestValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesDeleteManifestValidator.java deleted file mode 100644 index 2b8bb2e35ce..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesDeleteManifestValidator.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.manifest; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesCoordinates; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesDeleteManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.KubernetesValidationUtil; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.Collections; -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DELETE_MANIFEST; - -@KubernetesOperation(DELETE_MANIFEST) -@Component -public class KubernetesDeleteManifestValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesDeleteManifestDescription description, Errors errors) { - KubernetesValidationUtil util = new KubernetesValidationUtil("deleteKubernetesManifest", errors); - List coordinates; - if (description.isDynamic()) { - coordinates = description.getAllCoordinates(); - } else { - coordinates = Collections.singletonList(description.getPointCoordinates()); - } - - for (KubernetesCoordinates coordinate : coordinates) { - if (!util.validateV2Credentials(provider, description.getAccount(), coordinate.getNamespace())) { - return; - } - } - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesDeployManifestValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesDeployManifestValidator.java deleted file mode 100644 index 950a798d703..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesDeployManifestValidator.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.manifest; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesDeployManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesDeployManifestOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.KubernetesValidationUtil; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DEPLOY_MANIFEST; - -@KubernetesOperation(DEPLOY_MANIFEST) -@Component -public class KubernetesDeployManifestValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesDeployManifestDescription description, Errors errors) { - KubernetesValidationUtil util = new KubernetesValidationUtil("deployKubernetesManifest", errors); - if (!util.validateNotEmpty("moniker", description)) { - return; - } - - for (KubernetesManifest manifest : description.getManifests()) { - // technically OK - sometimes manifest multi-docs are submitted with trailing `---` entries - if (manifest == null) { - continue; - } - - if (!util.validateV2Credentials(provider, description.getAccount(), manifest.getNamespace())) { - return; - } - } - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesPatchManifestValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesPatchManifestValidator.java deleted file mode 100644 index 5d404287bbc..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesPatchManifestValidator.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2018 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.manifest; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.PATCH_MANIFEST; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesPatchManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.KubernetesValidationUtil; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import java.util.List; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -@KubernetesOperation(PATCH_MANIFEST) -@Component -public class KubernetesPatchManifestValidator extends DescriptionValidator { - - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesPatchManifestDescription description, - Errors errors) { - KubernetesValidationUtil util = new KubernetesValidationUtil("patchKubernetesManifest", errors); - - if (!util.validateNotEmpty("patchBody", description.getPatchBody())) { - return; - } - - if (!util.validateNotEmpty("options.mergeStrategy", description.getOptions().getMergeStrategy())){ - return; - } - - if (!util.validateNotEmpty("options.record", description.getOptions().isRecord())) { - return; - } - - if (!util.validateV2Credentials(provider, description.getAccount(), - description.getPointCoordinates().getNamespace())) { - return; - } - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesPauseRolloutManifestValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesPauseRolloutManifestValidator.java deleted file mode 100644 index d29ecb1ee94..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesPauseRolloutManifestValidator.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.manifest; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesPauseRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.KubernetesValidationUtil; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.PAUSE_ROLLOUT_MANIFEST; - -@KubernetesOperation(PAUSE_ROLLOUT_MANIFEST) -@Component -public class KubernetesPauseRolloutManifestValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesPauseRolloutManifestDescription description, Errors errors) { - KubernetesValidationUtil util = new KubernetesValidationUtil("pauseRolloutKubernetesManifest", errors); - if (!util.validateV2Credentials(provider, description.getAccount(), description.getPointCoordinates().getNamespace())) { - return; - } - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesResumeRolloutManifestValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesResumeRolloutManifestValidator.java deleted file mode 100644 index 84117748374..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesResumeRolloutManifestValidator.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.manifest; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesResumeRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.KubernetesValidationUtil; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RESUME_ROLLOUT_MANIFEST; - -@KubernetesOperation(RESUME_ROLLOUT_MANIFEST) -@Component -public class KubernetesResumeRolloutManifestValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesResumeRolloutManifestDescription description, Errors errors) { - KubernetesValidationUtil util = new KubernetesValidationUtil("resumeRolloutKubernetesManifest", errors); - if (!util.validateV2Credentials(provider, description.getAccount(), description.getPointCoordinates().getNamespace())) { - return; - } - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesScaleManifestValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesScaleManifestValidator.java deleted file mode 100644 index 96f77688c72..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesScaleManifestValidator.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.manifest; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesScaleManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.KubernetesValidationUtil; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.SCALE_MANIFEST; - -@KubernetesOperation(SCALE_MANIFEST) -@Component -public class KubernetesScaleManifestValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesScaleManifestDescription description, Errors errors) { - KubernetesValidationUtil util = new KubernetesValidationUtil("scaleKubernetesManifest", errors); - if (!util.validateV2Credentials(provider, description.getAccount(), description.getPointCoordinates().getNamespace())) { - return; - } - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesUndoRolloutManifestValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesUndoRolloutManifestValidator.java deleted file mode 100644 index 58bc751f2c9..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/manifest/KubernetesUndoRolloutManifestValidator.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.manifest; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesUndoRolloutManifestDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.KubernetesValidationUtil; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.UNDO_ROLLOUT_MANIFEST; - -@KubernetesOperation(UNDO_ROLLOUT_MANIFEST) -@Component -public class KubernetesUndoRolloutManifestValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesUndoRolloutManifestDescription description, Errors errors) { - KubernetesValidationUtil util = new KubernetesValidationUtil("undoRolloutKubernetesManifest", errors); - if (!util.validateV2Credentials(provider, description.getAccount(), description.getPointCoordinates().getNamespace())) { - return; - } - - if (description.getNumRevisionsBack() == null && description.getRevision() == null) { - util.reject("empty", "numRevisionsBack & revision"); - } - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/servergroup/KubernetesResizeServerGroupValidator.java b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/servergroup/KubernetesResizeServerGroupValidator.java deleted file mode 100644 index 8e5edfa1556..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/servergroup/KubernetesResizeServerGroupValidator.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.servergroup; - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.servergroup.KubernetesResizeServerGroupDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.validator.KubernetesValidationUtil; -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; -import com.netflix.spinnaker.clouddriver.security.ProviderVersion; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; -import org.springframework.validation.Errors; - -import java.util.List; - -import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RESIZE_SERVER_GROUP; - -@KubernetesOperation(RESIZE_SERVER_GROUP) -@Component -public class KubernetesResizeServerGroupValidator extends DescriptionValidator { - @Autowired - AccountCredentialsProvider provider; - - @Override - public void validate(List priorDescriptions, KubernetesResizeServerGroupDescription description, Errors errors) { - KubernetesValidationUtil util = new KubernetesValidationUtil("deployKubernetesManifest", errors); - if (!util.validateV2Credentials(provider, description.getAccount(), description.getCoordinates().getNamespace())) { - return; - } - - util.validateNotEmpty("capacity", description.getCapacity()); - } - - @Override - public boolean acceptsVersion(ProviderVersion version) { - return version == ProviderVersion.v2; - } -} - - diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/config/KubernetesConfiguration.groovy b/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/config/KubernetesConfiguration.groovy deleted file mode 100644 index cd470843ed8..00000000000 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/config/KubernetesConfiguration.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.config - -import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties -import com.netflix.spinnaker.clouddriver.kubernetes.health.KubernetesHealthIndicator -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentialsInitializer -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.ConfigurationProperties -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.ComponentScan -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Import -import org.springframework.context.annotation.Scope -import org.springframework.scheduling.annotation.EnableScheduling - -@Configuration -@EnableConfigurationProperties -@EnableScheduling -@ConditionalOnProperty('kubernetes.enabled') -@ComponentScan(["com.netflix.spinnaker.clouddriver.kubernetes"]) -@Import([ KubernetesNamedAccountCredentialsInitializer ]) -class KubernetesConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("kubernetes") - KubernetesConfigurationProperties kubernetesConfigurationProperties() { - new KubernetesConfigurationProperties() - } - - @Bean - KubernetesHealthIndicator kubernetesHealthIndicator() { - new KubernetesHealthIndicator() - } - - @Bean - KubernetesUtil kubernetesUtil() { - new KubernetesUtil() - } -} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesCloudProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesCloudProvider.java new file mode 100644 index 00000000000..e52090ba9f1 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesCloudProvider.java @@ -0,0 +1,42 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.kubernetes; + +import com.netflix.spinnaker.clouddriver.core.CloudProvider; +import java.lang.annotation.Annotation; +import org.springframework.stereotype.Component; + +/** Kubernetes declaration as a {@link CloudProvider}. */ +@Component +public class KubernetesCloudProvider implements CloudProvider { + public static final String ID = "kubernetes"; + + @Override + public String getId() { + return ID; + } + + @Override + public String getDisplayName() { + return "Kubernetes"; + } + + @Override + public Class getOperationAnnotationType() { + return KubernetesOperation.class; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesOperation.java new file mode 100644 index 00000000000..75d5c38cc00 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/KubernetesOperation.java @@ -0,0 +1,32 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.kubernetes; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * {@code KubernetesOperation}s specify implementation classes of Spinnaker AtomicOperations for + * Kubernetes. + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface KubernetesOperation { + String value(); +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactConverter.java new file mode 100644 index 00000000000..2bca17eb448 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactConverter.java @@ -0,0 +1,44 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.artifact; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.OptionalInt; + +@NonnullByDefault +public final class ArtifactConverter { + // Static methods only; prevent instantiation. + private ArtifactConverter() {} + + public static Artifact toArtifact( + KubernetesManifest manifest, String account, OptionalInt version) { + String name = manifest.getName(); + String versionString = version.isPresent() ? String.format("v%03d", version.getAsInt()) : ""; + String versionedName = versionString.isEmpty() ? name : String.join("-", name, versionString); + return Artifact.builder() + .type("kubernetes/" + manifest.getKind().toString()) + .name(name) + .location(manifest.getNamespace()) + .version(versionString) + .reference(versionedName) + .putMetadata("account", account) + .build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactReplacer.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactReplacer.java new file mode 100644 index 00000000000..f93273d5732 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactReplacer.java @@ -0,0 +1,154 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.artifact; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.common.collect.ImmutableSet.toImmutableSet; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.jayway.jsonpath.Configuration; +import com.jayway.jsonpath.DocumentContext; +import com.jayway.jsonpath.JsonPath; +import com.jayway.jsonpath.spi.json.JacksonJsonNodeJsonProvider; +import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.List; +import java.util.function.Predicate; +import java.util.stream.Stream; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.Value; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ParametersAreNonnullByDefault +public class ArtifactReplacer { + private static final Logger log = LoggerFactory.getLogger(ArtifactReplacer.class); + private static final ObjectMapper mapper = new ObjectMapper(); + private static final Configuration configuration = + Configuration.builder() + .jsonProvider(new JacksonJsonNodeJsonProvider()) + .mappingProvider(new JacksonMappingProvider()) + .build(); + + private final ImmutableList replacers; + + public ArtifactReplacer(Collection replacers) { + this.replacers = ImmutableList.copyOf(replacers); + } + + private static ImmutableList filterArtifacts( + @Nonnull String namespace, @Nonnull String account, List artifacts) { + return artifacts.stream() + .filter(a -> !Strings.isNullOrEmpty(a.getType())) + .filter(nonKubernetes().or(namespaceMatches(namespace).and(accountMatches(account)))) + .collect(toImmutableList()); + } + + private static Predicate nonKubernetes() { + return a -> !a.getType().startsWith("kubernetes/"); + } + + private static Predicate namespaceMatches(@Nonnull String namespace) { + return a -> Strings.nullToEmpty(a.getLocation()).equals(namespace); + } + + private static Predicate accountMatches(@Nonnull String account) { + return a -> { + String artifactAccount = Strings.nullToEmpty((String) a.getMetadata("account")); + // If the artifact fails to provide an account, assume this was unintentional and match + // anyways + return artifactAccount.isEmpty() || artifactAccount.equals(account); + }; + } + + @Nonnull + public ReplaceResult replaceAll( + String dockerImageBinding, + KubernetesManifest input, + List artifacts, + @Nonnull String namespace, + @Nonnull String account) { + log.debug("Doing replacement on {} using {}", input, artifacts); + DocumentContext document; + try { + document = JsonPath.using(configuration).parse(mapper.writeValueAsString(input)); + } catch (JsonProcessingException e) { + throw new UncheckedIOException("Malformed manifest", e); + } + + ImmutableList filteredArtifacts = filterArtifacts(namespace, account, artifacts); + ImmutableSet.Builder replacedArtifacts = ImmutableSet.builder(); + for (Replacer replacer : replacers) { + ImmutableCollection replaced = + replacer.replaceArtifacts(dockerImageBinding, document, filteredArtifacts); + replacedArtifacts.addAll(replaced); + } + + try { + return new ReplaceResult( + mapper.readValue(document.jsonString(), KubernetesManifest.class), + replacedArtifacts.build()); + } catch (IOException e) { + throw new UncheckedIOException("Malformed manifest", e); + } + } + + @Nonnull + public ImmutableSet findAll(KubernetesManifest input) { + DocumentContext document; + try { + document = JsonPath.using(configuration).parse(mapper.writeValueAsString(input)); + } catch (JsonProcessingException e) { + throw new UncheckedIOException("Malformed manifest", e); + } + + return replacers.stream() + .flatMap( + r -> { + try { + return r.getArtifacts(document); + } catch (Exception e) { + // This happens when a manifest isn't fully defined (e.g. not all properties are + // there) + log.debug( + "Failure converting artifacts for {} using {} (skipping)", + input.getFullResourceName(), + r, + e); + return Stream.empty(); + } + }) + .collect(toImmutableSet()); + } + + @Value + public static class ReplaceResult { + private final KubernetesManifest manifest; + private final ImmutableSet boundArtifacts; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/Replacer.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/Replacer.java new file mode 100644 index 00000000000..8141a32f095 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/Replacer.java @@ -0,0 +1,334 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.artifact; + +import static com.jayway.jsonpath.Criteria.where; +import static com.jayway.jsonpath.Filter.filter; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Streams; +import com.jayway.jsonpath.DocumentContext; +import com.jayway.jsonpath.Filter; +import com.jayway.jsonpath.JsonPath; +import com.jayway.jsonpath.PathNotFoundException; +import com.jayway.jsonpath.Predicate; +import com.jayway.jsonpath.internal.filter.ValueNode; +import com.netflix.spinnaker.clouddriver.artifacts.kubernetes.KubernetesArtifactType; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.Collection; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Stream; +import javax.annotation.Nullable; +import lombok.AccessLevel; +import lombok.Builder; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@NonnullByDefault +public final class Replacer { + private static final Logger log = LoggerFactory.getLogger(Replacer.class); + + private final KubernetesArtifactType type; + private final JsonPath findPath; + private final Function replacePathSupplier; + private final Function legacyReplacePathSupplier; + private final Function nameFromReference; + + /** + * @param type the type of artifact this replacer handles + * @param path a string representing a JsonPath expression containing a single [?] placeholder + * representing a filter + * @param findFilter a filter that should be applied to the path when finding any artifacts in a + * manifest; defaults to a filter matching all nodes + * @param legacyReplaceFilter a function that takes an artifact and returns the filter that should + * be applied to the path when replacing artifacts; if a findFilter is supplied both the + * findFilter and replaceFilter must match for the artifact to be replaced + * @param replacePathFromPlaceholder a string that represents the path from the [?] placeholder to + * the replaced field. + * @param nameFromReference a function to extract an artifact name from its reference; defaults to + * returning the reference + */ + @Builder(access = AccessLevel.PRIVATE) + private Replacer( + KubernetesArtifactType type, + String path, + @Nullable Filter findFilter, + Function legacyReplaceFilter, + String replacePathFromPlaceholder, + @Nullable Function nameFromReference) { + this.type = Objects.requireNonNull(type); + Objects.requireNonNull(path); + Objects.requireNonNull(replacePathFromPlaceholder); + this.nameFromReference = Optional.ofNullable(nameFromReference).orElse(a -> a); + Function replaceFilter = + a -> filter(createReplaceFilterPredicate(replacePathFromPlaceholder, a.getName())); + if (findFilter != null) { + this.findPath = JsonPath.compile(path, findFilter); + this.replacePathSupplier = + a -> JsonPath.compile(path, replaceFilter.apply(a).and(findFilter)); + this.legacyReplacePathSupplier = + a -> JsonPath.compile(path, legacyReplaceFilter.apply(a).and(findFilter)); + } else { + this.findPath = JsonPath.compile(path, filter(a -> true)); + this.replacePathSupplier = a -> JsonPath.compile(path, replaceFilter.apply(a)); + this.legacyReplacePathSupplier = a -> JsonPath.compile(path, legacyReplaceFilter.apply(a)); + } + } + + Stream getArtifacts(DocumentContext document) { + return Streams.stream(document.read(findPath).elements()) + .map(JsonNode::asText) + .map( + ref -> + Artifact.builder() + .type(type.getType()) + .reference(ref) + .name(nameFromReference.apply(ref)) + .build()); + } + + ImmutableCollection replaceArtifacts( + String dockerImageBinding, DocumentContext obj, Collection artifacts) { + ImmutableSet.Builder replacedArtifacts = ImmutableSet.builder(); + for (Artifact artifact : artifacts) { + boolean wasReplaced = replaceIfPossible(dockerImageBinding, obj, artifact); + if (wasReplaced) { + replacedArtifacts.add(artifact); + } + } + return replacedArtifacts.build(); + } + + private Predicate createReplaceFilterPredicate(String replacePath, String name) { + return ctx -> { + ValueNode node = ValueNode.toValueNode("@." + replacePath).asPathNode().evaluate(ctx); + if (!node.isStringNode()) { + return false; + } + String value = node.asStringNode().getString(); + return nameFromReference.apply(value).equals(name); + }; + } + + private boolean replaceIfPossible( + String dockerImageBinding, DocumentContext obj, Artifact artifact) { + if (!type.getType().equals(artifact.getType())) { + return false; + } + + JsonPath path; + if (!StringUtils.isBlank(dockerImageBinding) && dockerImageBinding.equals("match-name-only")) { + path = legacyReplacePathSupplier.apply(artifact); + } else { + path = replacePathSupplier.apply(artifact); + } + + log.debug("Processed jsonPath == {}", path.getPath()); + + Object get; + try { + get = obj.read(path); + } catch (PathNotFoundException e) { + return false; + } + if (get == null || (get instanceof ArrayNode && ((ArrayNode) get).size() == 0)) { + return false; + } + + log.info("Found valid swap for " + artifact + " using " + path.getPath() + ": " + get); + obj.set(path, artifact.getReference()); + + return true; + } + + private static final Replacer DOCKER_IMAGE = + builder() + // This matches not only resources where the path is + // e.g. .spec.template.spec.containers.[0].image (e.g. deployments and + // jobs), but also where the path is + // .spec.jobTemplate.spec.containers[0].image (e.g. cronjobs). The + // double dot at the beginning is a "descendant selector". See + // https://www.ietf.org/archive/id/draft-ietf-jsonpath-base-01.html#section-3.5.7. + .path("$..spec.template.spec['containers', 'initContainers'].[?].image") + .legacyReplaceFilter(a -> filter(where("image").is(a.getName()))) + .replacePathFromPlaceholder("image") + .nameFromReference( + ref -> { + // @ can only show up in image references denoting a digest + // https://github.com/docker/distribution/blob/95daa793b83a21656fe6c13e6d5cf1c3999108c7/reference/regexp.go#L70 + int atIndex = ref.indexOf('@'); + if (atIndex >= 0) { + return ref.substring(0, atIndex); + } + + // : can be used to denote a port, part of a digest (already matched) or a tag + // https://github.com/docker/distribution/blob/95daa793b83a21656fe6c13e6d5cf1c3999108c7/reference/regexp.go#L69 + int lastColonIndex = ref.lastIndexOf(':'); + if (lastColonIndex >= 0) { + // we don't need to check if this is a tag, or a port. ports will be matched + // lazily if they are numeric, and are treated as tags first: + // https://github.com/docker/distribution/blob/95daa793b83a21656fe6c13e6d5cf1c3999108c7/reference/regexp.go#L34 + return ref.substring(0, lastColonIndex); + } + return ref; + }) + .type(KubernetesArtifactType.DockerImage) + .build(); + private static final Replacer POD_DOCKER_IMAGE = + builder() + .path("$.spec.containers.[?].image") + .legacyReplaceFilter(a -> filter(where("image").is(a.getName()))) + .replacePathFromPlaceholder("image") + .type(KubernetesArtifactType.DockerImage) + .build(); + private static final Replacer CONFIG_MAP_VOLUME = + builder() + .path("$..spec.template.spec.volumes.[?].configMap.name") + .legacyReplaceFilter(a -> filter(where("configMap.name").is(a.getName()))) + .replacePathFromPlaceholder("configMap.name") + .type(KubernetesArtifactType.ConfigMap) + .build(); + private static final Replacer SECRET_VOLUME = + builder() + .path("$..spec.template.spec.volumes.[?].secret.secretName") + .legacyReplaceFilter(a -> filter(where("secret.secretName").is(a.getName()))) + .replacePathFromPlaceholder("secret.secretName") + .type(KubernetesArtifactType.Secret) + .build(); + private static final Replacer CONFIG_MAP_PROJECTED_VOLUME = + builder() + .path("$..spec.template.spec.volumes.*.projected.sources.[?].configMap.name") + .legacyReplaceFilter(a -> filter(where("configMap.name").is(a.getName()))) + .replacePathFromPlaceholder("configMap.name") + .type(KubernetesArtifactType.ConfigMap) + .build(); + private static final Replacer SECRET_PROJECTED_VOLUME = + builder() + .path("$..spec.template.spec.volumes.*.projected.sources.[?].secret.name") + .legacyReplaceFilter(a -> filter(where("secret.name").is(a.getName()))) + .replacePathFromPlaceholder("secret.name") + .type(KubernetesArtifactType.Secret) + .build(); + private static final Replacer CONFIG_MAP_KEY_VALUE = + builder() + .path( + "$..spec.template.spec['containers', 'initContainers'].*.env.[?].valueFrom.configMapKeyRef.name") + .legacyReplaceFilter(a -> filter(where("valueFrom.configMapKeyRef.name").is(a.getName()))) + .replacePathFromPlaceholder("valueFrom.configMapKeyRef.name") + .type(KubernetesArtifactType.ConfigMap) + .build(); + private static final Replacer SECRET_KEY_VALUE = + builder() + .path( + "$..spec.template.spec['containers', 'initContainers'].*.env.[?].valueFrom.secretKeyRef.name") + .legacyReplaceFilter(a -> filter(where("valueFrom.secretKeyRef.name").is(a.getName()))) + .replacePathFromPlaceholder("valueFrom.secretKeyRef.name") + .type(KubernetesArtifactType.Secret) + .build(); + private static final Replacer CONFIG_MAP_ENV = + builder() + .path( + "$..spec.template.spec['containers', 'initContainers'].*.envFrom.[?].configMapRef.name") + .legacyReplaceFilter(a -> filter(where("configMapRef.name").is(a.getName()))) + .replacePathFromPlaceholder("configMapRef.name") + .type(KubernetesArtifactType.ConfigMap) + .build(); + private static final Replacer SECRET_ENV = + builder() + .path( + "$..spec.template.spec['containers', 'initContainers'].*.envFrom.[?].secretRef.name") + .legacyReplaceFilter(a -> filter(where("secretRef.name").is(a.getName()))) + .replacePathFromPlaceholder("secretRef.name") + .type(KubernetesArtifactType.Secret) + .build(); + private static final Replacer HPA_DEPLOYMENT = + builder() + .path("$[?].spec.scaleTargetRef.name") + .findFilter( + filter(where("spec.scaleTargetRef.kind").is("Deployment")) + .or(where("spec.scaleTargetRef.kind").is("deployment"))) + .legacyReplaceFilter(a -> filter(where("spec.scaleTargetRef.name").is(a.getName()))) + .replacePathFromPlaceholder("spec.scaleTargetRef.name") + .type(KubernetesArtifactType.Deployment) + .build(); + private static final Replacer HPA_REPLICA_SET = + builder() + .path("$[?].spec.scaleTargetRef.name") + .findFilter( + filter(where("spec.scaleTargetRef.kind").is("ReplicaSet")) + .or(where("spec.scaleTargetRef.kind").is("replicaSet"))) + .legacyReplaceFilter(a -> filter(where("spec.scaleTargetRef.name").is(a.getName()))) + .replacePathFromPlaceholder("spec.scaleTargetRef.name") + .type(KubernetesArtifactType.ReplicaSet) + .build(); + + public static Replacer dockerImage() { + return DOCKER_IMAGE; + } + + public static Replacer podDockerImage() { + return POD_DOCKER_IMAGE; + } + + public static Replacer configMapVolume() { + return CONFIG_MAP_VOLUME; + } + + public static Replacer secretVolume() { + return SECRET_VOLUME; + } + + public static Replacer configMapProjectedVolume() { + return CONFIG_MAP_PROJECTED_VOLUME; + } + + public static Replacer secretProjectedVolume() { + return SECRET_PROJECTED_VOLUME; + } + + public static Replacer configMapKeyValue() { + return CONFIG_MAP_KEY_VALUE; + } + + public static Replacer secretKeyValue() { + return SECRET_KEY_VALUE; + } + + public static Replacer configMapEnv() { + return CONFIG_MAP_ENV; + } + + public static Replacer secretEnv() { + return SECRET_ENV; + } + + public static Replacer hpaDeployment() { + return HPA_DEPLOYMENT; + } + + public static Replacer hpaReplicaSet() { + return HPA_REPLICA_SET; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ResourceVersioner.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ResourceVersioner.java new file mode 100644 index 00000000000..39431fc1ee0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ResourceVersioner.java @@ -0,0 +1,139 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.artifact; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.ArtifactProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.stream.IntStream; +import java.util.stream.Stream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +public final class ResourceVersioner { + private static final Logger log = LoggerFactory.getLogger(ResourceVersioner.class); + + private final ArtifactProvider artifactProvider; + private final ObjectMapper objectMapper = new ObjectMapper(); + + @Autowired + public ResourceVersioner(ArtifactProvider artifactProvider) { + this.artifactProvider = Objects.requireNonNull(artifactProvider); + } + + public OptionalInt getVersion(KubernetesManifest manifest, KubernetesCredentials credentials) { + ImmutableList priorVersions = + artifactProvider.getArtifacts( + manifest.getKind(), manifest.getName(), manifest.getNamespace(), credentials); + + OptionalInt maybeVersion = findMatchingVersion(priorVersions, manifest); + if (maybeVersion.isPresent()) { + log.info( + "Manifest {} was already deployed at version {} - reusing.", + manifest, + maybeVersion.getAsInt()); + return maybeVersion; + } else { + return OptionalInt.of(findGreatestUnusedVersion(priorVersions)); + } + } + + public OptionalInt getLatestVersion( + KubernetesManifest manifest, KubernetesCredentials credentials) { + ImmutableList priorVersions = + artifactProvider.getArtifacts( + manifest.getKind(), manifest.getName(), manifest.getNamespace(), credentials); + return findLatestVersion(priorVersions); + } + + private static OptionalInt parseVersion(String versionString) { + if (!versionString.startsWith("v")) { + return OptionalInt.empty(); + } + try { + return OptionalInt.of(Integer.parseInt(versionString.substring(1))); + } catch (NumberFormatException e) { + return OptionalInt.empty(); + } + } + + private int findGreatestUnusedVersion(List priorVersions) { + OptionalInt latestVersion = findLatestVersion(priorVersions); + if (latestVersion.isPresent()) { + return latestVersion.getAsInt() + 1; + } + return 0; + } + + private OptionalInt findLatestVersion(List priorVersions) { + return extractVersions(priorVersions.stream()).max(); + } + + private OptionalInt findMatchingVersion( + List priorVersions, KubernetesManifest manifest) { + Stream matchingArtifacts = + priorVersions.stream() + .filter( + a -> + getLastAppliedConfiguration(a) + .map(c -> c.nonMetadataEquals(manifest)) + .orElse(false)); + + return extractVersions(matchingArtifacts).findFirst(); + } + + private IntStream extractVersions(Stream artifacts) { + return artifacts + .map(Artifact::getVersion) + .map(Strings::nullToEmpty) + .map(ResourceVersioner::parseVersion) + .filter(OptionalInt::isPresent) + .mapToInt(OptionalInt::getAsInt) + .filter(i -> i >= 0); + } + + private Optional getLastAppliedConfiguration(Artifact artifact) { + Object rawLastAppliedConfiguration = artifact.getMetadata("lastAppliedConfiguration"); + + if (rawLastAppliedConfiguration == null) { + return Optional.empty(); + } + + try { + KubernetesManifest manifest = + objectMapper.convertValue(rawLastAppliedConfiguration, KubernetesManifest.class); + return Optional.of(manifest); + } catch (RuntimeException e) { + log.warn("Malformed lastAppliedConfiguration entry in {}: ", artifact, e); + return Optional.empty(); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/Keys.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/Keys.java new file mode 100644 index 00000000000..f068abc67c9 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/Keys.java @@ -0,0 +1,301 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Keys { + private static final Logger log = LoggerFactory.getLogger(Keys.class); + /** + * Keys are split into "logical" and "infrastructure" kinds. "logical" keys are for spinnaker + * groupings that exist by naming/moniker convention, whereas "infrastructure" keys correspond to + * real resources (e.g. replica set, service, ...). + */ + public enum Kind { + LOGICAL, + @Deprecated + ARTIFACT, + INFRASTRUCTURE; + + private final String lcName; + + Kind() { + this.lcName = name().toLowerCase(); + } + + @Override + public String toString() { + return lcName; + } + + @JsonCreator + public static Kind fromString(String name) { + try { + return valueOf(name.toUpperCase()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("No matching kind with name " + name + " exists"); + } + } + } + + public enum LogicalKind { + APPLICATIONS, + CLUSTERS; + + private final String lcName; + + LogicalKind() { + this.lcName = name().toLowerCase(); + } + + public static boolean isLogicalGroup(String group) { + return group.equals(APPLICATIONS.toString()) || group.equals(CLUSTERS.toString()); + } + + @Override + public String toString() { + return lcName; + } + + public String singular() { + String name = toString(); + return name.substring(0, name.length() - 1); + } + + @JsonCreator + public static LogicalKind fromString(String name) { + try { + return valueOf(name.toUpperCase()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("No matching kind with name " + name + " exists"); + } + } + } + + private static final String provider = "kubernetes.v2"; + + private static String createKeyFromParts(Object... elems) { + List components = + Arrays.stream(elems) + .map(s -> s == null ? "" : s.toString()) + .map(s -> s.contains(":") ? s.replaceAll(":", ";") : s) + .collect(Collectors.toList()); + components.add(0, provider); + return String.join(":", components); + } + + public static Optional parseKey(String key) { + String[] parts = key.split(":", -1); + + if (parts.length < 3 || !parts[0].equals(provider)) { + return Optional.empty(); + } + + for (int i = 0; i < parts.length; i++) { + if (parts[i].contains(";")) { + parts[i] = parts[i].replaceAll(";", ":"); + } + } + + try { + Kind kind = Kind.fromString(parts[1]); + switch (kind) { + case LOGICAL: + return Optional.of(parseLogicalKey(parts)); + case ARTIFACT: + return Optional.empty(); + case INFRASTRUCTURE: + return Optional.of(new InfrastructureCacheKey(parts)); + default: + throw new IllegalArgumentException("Unknown kind " + kind); + } + } catch (IllegalArgumentException e) { + log.warn( + "Kubernetes owned kind with unknown key structure '{}': {} (perhaps try flushing all clouddriver:* redis keys)", + key, + parts, + e); + return Optional.empty(); + } + } + + private static CacheKey parseLogicalKey(String[] parts) { + assert (parts.length >= 3); + + LogicalKind logicalKind = LogicalKind.fromString(parts[2]); + + switch (logicalKind) { + case APPLICATIONS: + return new ApplicationCacheKey(parts); + case CLUSTERS: + return new ClusterCacheKey(parts); + default: + throw new IllegalArgumentException("Unknown kind " + logicalKind); + } + } + + @EqualsAndHashCode + public abstract static class CacheKey { + @Getter private final String provider = KubernetesCloudProvider.ID; + + public abstract String getGroup(); + + public abstract String getName(); + } + + @EqualsAndHashCode(callSuper = true) + @Getter + public abstract static class LogicalKey extends CacheKey { + @Getter private static final Kind kind = Kind.LOGICAL; + + public abstract LogicalKind getLogicalKind(); + + @Override + public final String getGroup() { + return getLogicalKind().toString(); + } + } + + @EqualsAndHashCode(callSuper = true) + @Getter + @RequiredArgsConstructor + public static class ApplicationCacheKey extends LogicalKey { + private static final LogicalKind logicalKind = LogicalKind.APPLICATIONS; + private final String name; + + protected ApplicationCacheKey(String[] parts) { + if (parts.length != 4) { + throw new IllegalArgumentException("Malformed application key" + Arrays.toString(parts)); + } + + name = parts[3]; + } + + public static String createKey(String name) { + return createKeyFromParts(getKind(), logicalKind, name); + } + + @Override + public LogicalKind getLogicalKind() { + return logicalKind; + } + + @Override + public String toString() { + return createKeyFromParts(getKind(), logicalKind, name); + } + } + + @EqualsAndHashCode(callSuper = true) + @Getter + @RequiredArgsConstructor + public static class ClusterCacheKey extends LogicalKey { + private static final LogicalKind logicalKind = LogicalKind.CLUSTERS; + private final String account; + private final String application; + private final String name; + + public ClusterCacheKey(String[] parts) { + if (parts.length != 6) { + throw new IllegalArgumentException("Malformed cluster key " + Arrays.toString(parts)); + } + + account = parts[3]; + application = parts[4]; + name = parts[5]; + } + + public static String createKey(String account, String application, String name) { + return createKeyFromParts(getKind(), logicalKind, account, application, name); + } + + @Override + public LogicalKind getLogicalKind() { + return logicalKind; + } + + @Override + public String toString() { + return createKeyFromParts(getKind(), logicalKind, account, application, name); + } + } + + @EqualsAndHashCode(callSuper = true) + @Getter + @RequiredArgsConstructor + public static class InfrastructureCacheKey extends CacheKey { + @Getter private static final Kind kind = Kind.INFRASTRUCTURE; + private final KubernetesKind kubernetesKind; + private final String account; + private final String namespace; + private final String name; + + protected InfrastructureCacheKey(String[] parts) { + if (parts.length != 6) { + throw new IllegalArgumentException( + "Malformed infrastructure key " + Arrays.toString(parts)); + } + + kubernetesKind = KubernetesKind.fromString(parts[2]); + account = parts[3]; + namespace = parts[4]; + name = parts[5]; + } + + public InfrastructureCacheKey(KubernetesManifest manifest, String account) { + this(manifest.getKind(), account, manifest.getNamespace(), manifest.getName()); + } + + public static String createKey( + KubernetesKind kubernetesKind, String account, String namespace, String name) { + return createKeyFromParts(kind, kubernetesKind, account, namespace, name); + } + + public static String createKey(String account, KubernetesCoordinates coords) { + return createKeyFromParts( + kind, coords.getKind(), account, coords.getNamespace(), coords.getName()); + } + + public static String createKey(KubernetesManifest manifest, String account) { + return createKey(manifest.getKind(), account, manifest.getNamespace(), manifest.getName()); + } + + @Override + public String toString() { + return createKeyFromParts(kind, kubernetesKind, account, namespace, name); + } + + @Override + public String getGroup() { + return kubernetesKind.toString(); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesProvider.java new file mode 100644 index 00000000000..889dad1c94c --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/KubernetesProvider.java @@ -0,0 +1,34 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.security.BaseProvider; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class KubernetesProvider extends BaseProvider { + public static final String PROVIDER_NAME = KubernetesCloudProvider.ID; + + @Override + public String getProviderName() { + return PROVIDER_NAME; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/CustomKubernetesCachingAgentFactory.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/CustomKubernetesCachingAgentFactory.java new file mode 100644 index 00000000000..380c66b9fc9 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/CustomKubernetesCachingAgentFactory.java @@ -0,0 +1,108 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import org.springframework.lang.Nullable; + +public class CustomKubernetesCachingAgentFactory { + public static KubernetesCachingAgent create( + KubernetesKind kind, + KubernetesNamedAccountCredentials namedAccountCredentials, + ObjectMapper objectMapper, + Registry registry, + int agentIndex, + int agentCount, + Long agentInterval, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + return new Agent( + kind, + namedAccountCredentials, + objectMapper, + registry, + agentIndex, + agentCount, + agentInterval, + configurationProperties, + kubernetesSpinnakerKindMap, + front50ApplicationLoader); + } + + /** + * Instances of this class cache kinds specified in the list + * "kubernetes.accounts[*].customResourceDefinitions" in config. + * + *

There's one instance of this class for every kind in the list, and only the kinds that are + * allowed by the configuration in "kubernetes.cache.*" are cached. + */ + private static class Agent extends KubernetesCachingAgent { + private final KubernetesKind kind; + + Agent( + KubernetesKind kind, + KubernetesNamedAccountCredentials namedAccountCredentials, + ObjectMapper objectMapper, + Registry registry, + int agentIndex, + int agentCount, + Long agentInterval, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + super( + namedAccountCredentials, + objectMapper, + registry, + agentIndex, + agentCount, + agentInterval, + configurationProperties, + kubernetesSpinnakerKindMap, + front50ApplicationLoader); + this.kind = kind; + } + + @Override + protected ImmutableList primaryKinds() { + return ImmutableList.of(this.kind); + } + + @Override + public final ImmutableSet getProvidedDataTypes() { + return ImmutableSet.of(AUTHORITATIVE.forType(this.kind.toString())); + } + + @Override + public String getAgentType() { + return String.format( + "%s/CustomKubernetes(%s)[%d/%d]", accountName, kind, agentIndex + 1, agentCount); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/Front50ApplicationLoader.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/Front50ApplicationLoader.java new file mode 100644 index 00000000000..4430d8b6607 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/Front50ApplicationLoader.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.clouddriver.model.Front50Application; +import com.netflix.spinnaker.security.AuthenticatedRequest; +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.lang.Nullable; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; + +/** + * we could have a conditional on both kubernetes.cache.checkApplicationInFront50 and + * services.front50.enabled properties. But if the former is enabled and the latter is not, that + * means the downstream clients relying on this will fail, since the cache will be empty. So we + * explicitly leave out the front50 conditional and log an error message in the refreshCache() + * stating the same. We could have logged the error message in the downstream clients if if couldn't + * find the front50ApplicationLoader bean but that can be extremely noisy. + */ +@Slf4j +@Component +@ConditionalOnProperty("kubernetes.cache.checkApplicationInFront50") +public class Front50ApplicationLoader { + + @Nullable private final Front50Service front50Service; + private AtomicReference> cache; + + Front50ApplicationLoader(@Nullable Front50Service front50Service) { + this.front50Service = front50Service; + this.cache = new AtomicReference<>(Collections.emptySet()); + } + + public Set getData() { + return cache.get(); + } + + @Scheduled( + fixedDelayString = "${kubernetes.cache.refreshFront50ApplicationsCacheIntervalInMs:60000}") + protected void refreshCache() { + try { + log.info("refreshing front50 applications cache"); + if (front50Service == null) { + log.info("front50 is disabled, cannot fetch applications"); + return; + } + Set response = + AuthenticatedRequest.allowAnonymous(front50Service::getAllApplicationsUnrestricted); + Set applicationsKnownToFront50 = + response.stream().map(Front50Application::getName).collect(Collectors.toSet()); + log.info("received {} applications from front50", applicationsKnownToFront50.size()); + cache.set(applicationsKnownToFront50); + } catch (Exception e) { + log.warn("failed to update application cache with new front50 data. Error: ", e); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheData.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheData.java new file mode 100644 index 00000000000..138f11bd3e3 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheData.java @@ -0,0 +1,148 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.CacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import java.util.*; +import java.util.stream.Collectors; +import lombok.Value; + +/** + * A collection of CacheItem entries used when building up the items being cached by the Kubernetes + * caching agent. This class supports adding items as well as adding relationships between items. + * + *

Once all cache items and relationships have been added, calling toCacheData() will return a + * Collection of CacheData entries that represent all added items and their relationships. The + * operations supported on the class guarantee that the resulting Collection<CacheData> has + * the following properties: (1) Each CacheData has a unique cache key, (2) all relationships + * between CacheData items are bidirectional + */ +public class KubernetesCacheData { + private final Map items = new HashMap<>(); + + /** + * Add an item to the cache with specified key and attributes. If there is already an item with + * the given key, the attributes are merged into the existing item's attributes (with the input + * attributes taking priority). + */ + public void addItem(CacheKey key, Map attributes) { + CacheItem item = items.computeIfAbsent(key, CacheItem::new); + item.getAttributes().putAll(attributes); + } + + /** + * Add a bidirectional relationship between two keys. If either of the keys is not yet in the + * cache, an entry is created for that item with an empty map of attributes. + */ + public void addRelationship(CacheKey a, CacheKey b) { + items.computeIfAbsent(a, CacheItem::new).getRelationships().add(b); + items.computeIfAbsent(b, CacheItem::new).getRelationships().add(a); + } + + /** + * Add a bidirectional relationships between key a and each of the keys in the Set b. If any of + * the encountered keys is not in the cache, an entry is created for that item with an empty map + * of attributes + */ + public void addRelationships(CacheKey a, Set b) { + items.computeIfAbsent(a, CacheItem::new).getRelationships().addAll(b); + b.forEach(k -> items.computeIfAbsent(k, CacheItem::new).getRelationships().add(a)); + } + + /** Return a List of CacheData entries representing the current items in the cache. */ + public List toCacheData() { + return items.values().stream() + .filter(item -> !item.omitItem()) + .map(CacheItem::toCacheData) + .collect(Collectors.toList()); + } + + /** + * Return a List of CacheData entries representing the current items in the cache, grouped by the + * item's group. + */ + public Map> toStratifiedCacheData() { + return items.values().stream() + .filter(item -> !item.omitItem()) + .collect( + Collectors.groupingBy( + item -> item.key.getGroup(), + Collectors.mapping( + CacheItem::toCacheData, Collectors.toCollection(ArrayList::new)))); + } + + /** + * An item being cached by the Kubernetes provider. This corresponds to a CacheData entry, but + * stores the information in a format that is more efficient to manipulate as we build up the + * cache data. + * + *

In particular:the cache key is stored as a Keys.CacheKey object (rather than serialized) so + * we can access properties of the key without re-parsing it and the relationships are stored as a + * flat Set<Keys.CacheKey> instead of as a Map<String, Collection<String>> so + * that we can efficiently add relationships. + * + *

A CacheItem can be converted to its corresponding CacheData by calling toCacheData() + */ + @Value + private static class CacheItem { + private final CacheKey key; + private final Map attributes = new HashMap<>(); + private final Set relationships = new HashSet<>(); + + private Map> groupedRelationships() { + Map> groups = new HashMap<>(); + for (KubernetesKind kind : KubernetesCacheDataConverter.getStickyKinds()) { + groups.put(kind.toString(), new HashSet<>()); + } + for (CacheKey key : relationships) { + groups.computeIfAbsent(key.getGroup(), k -> new HashSet<>()).add(key.toString()); + } + return groups; + } + + /** + * given that we now have large caching agents that are authoritative for huge chunks of the + * cache, it's possible that some resources (like events) still point to deleted resources. + * These won't have any attributes, but if we add a cache entry here, the deleted item will + * still be cached + */ + public boolean omitItem() { + return key instanceof InfrastructureCacheKey && attributes.isEmpty(); + } + + /** Convert this CacheItem to its corresponding CacheData object */ + public CacheData toCacheData() { + int ttlSeconds; + if (Keys.LogicalKind.isLogicalGroup(key.getGroup())) { + // If we are inverting a relationship to create a cache data for either a cluster or an + // application we + // need to insert attributes to ensure the cache data gets entered into the cache. + attributes.putIfAbsent("name", key.getName()); + ttlSeconds = KubernetesCacheDataConverter.getLogicalTtlSeconds(); + } else { + ttlSeconds = KubernetesCacheDataConverter.getInfrastructureTtlSeconds(); + } + return new DefaultCacheData(key.toString(), ttlSeconds, attributes, groupedRelationships()); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConverter.java new file mode 100644 index 00000000000..07e728ed217 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConverter.java @@ -0,0 +1,229 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.SECURITY_GROUPS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.SERVER_GROUPS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.SERVER_GROUP_MANAGERS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind.POD; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind.SERVICE; +import static java.lang.Math.toIntExact; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.CacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.ClusterCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest.OwnerReference; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; +import io.kubernetes.client.openapi.JSON; +import java.util.*; +import java.util.concurrent.TimeUnit; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.Getter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KubernetesCacheDataConverter { + private static final Logger log = LoggerFactory.getLogger(KubernetesCacheDataConverter.class); + private static final ObjectMapper mapper = new ObjectMapper(); + private static final JSON json = new JSON(); + // TODO(lwander): make configurable + @Getter private static final int logicalTtlSeconds = toIntExact(TimeUnit.MINUTES.toSeconds(10)); + @Getter private static final int infrastructureTtlSeconds = -1; + // These are kinds which are are frequently added/removed from other resources, and can sometimes + // persist in the cache when no relationships are found. + // todo(lwander) investigate if this can cause flapping in UI for on demand updates -- no + // consensus on this yet. + @Getter private static final List stickyKinds = Arrays.asList(SERVICE, POD); + + @Getter + private static final ImmutableSet logicalRelationshipKinds = + ImmutableSet.of(LOAD_BALANCERS, SECURITY_GROUPS, SERVER_GROUPS, SERVER_GROUP_MANAGERS); + + private static final ImmutableSet clusterRelationshipKinds = + ImmutableSet.of(SERVER_GROUPS, SERVER_GROUP_MANAGERS); + + @NonnullByDefault + public static CacheData mergeCacheData(CacheData current, CacheData added) { + String id = current.getId(); + Map attributes = new HashMap<>(current.getAttributes()); + attributes.putAll(added.getAttributes()); + // Behavior is: if no ttl is set on either, the merged key won't expire + int ttl = Math.min(current.getTtlSeconds(), added.getTtlSeconds()); + Map> relationships = new HashMap<>(current.getRelationships()); + + added + .getRelationships() + .forEach( + (key, value) -> + relationships.merge( + key, + value, + (a, b) -> { + Collection res = new HashSet<>(Math.max(a.size(), b.size())); + res.addAll(a); + res.addAll(b); + return res; + })); + + // when no relationship exists, and `null` is written in place of a value, the old value of the + // relationship (whatever was picked up the prior cache cycle) is persisted, leaving sticky + // relationship data in the cache. we don't zero out all non existing relationships because it + // winds up causing far more writes to redis. + stickyKinds.forEach(k -> relationships.computeIfAbsent(k.toString(), s -> new ArrayList<>())); + return new DefaultCacheData(id, ttl, attributes, relationships); + } + + @ParametersAreNonnullByDefault + public static void convertAsResource( + KubernetesCacheData kubernetesCacheData, + String account, + KubernetesSpinnakerKindMap kindMap, + Namer namer, + KubernetesManifest manifest, + List resourceRelationships, + boolean cacheAllRelationships) { + KubernetesKind kind = manifest.getKind(); + String name = manifest.getName(); + String namespace = manifest.getNamespace(); + Moniker moniker = namer.deriveMoniker(manifest); + + Map attributes = + new ImmutableMap.Builder() + .put("kind", kind) + .put("apiVersion", manifest.getApiVersion()) + .put("name", name) + .put("namespace", namespace) + .put("fullResourceName", manifest.getFullResourceName()) + .put("manifest", manifest) + .put("moniker", moniker) + .build(); + + Keys.CacheKey key = new Keys.InfrastructureCacheKey(kind, account, namespace, name); + kubernetesCacheData.addItem(key, attributes); + + SpinnakerKind spinnakerKind = kindMap.translateKubernetesKind(kind); + + if (cacheAllRelationships || logicalRelationshipKinds.contains(spinnakerKind)) { + addLogicalRelationships( + kubernetesCacheData, + key, + account, + moniker, + clusterRelationshipKinds.contains(spinnakerKind)); + } + kubernetesCacheData.addRelationships( + key, ownerReferenceRelationships(account, namespace, manifest.getOwnerReferences())); + kubernetesCacheData.addRelationships( + key, implicitRelationships(manifest, account, resourceRelationships)); + } + + public static KubernetesManifest getManifest(CacheData cacheData) { + return mapper.convertValue(cacheData.getAttributes().get("manifest"), KubernetesManifest.class); + } + + public static Moniker getMoniker(CacheData cacheData) { + return mapper.convertValue(cacheData.getAttributes().get("moniker"), Moniker.class); + } + + public static KubernetesManifest convertToManifest(Object o) { + return mapper.convertValue(o, KubernetesManifest.class); + } + + public static T getResource(Object manifest, Class clazz) { + // A little hacky, but the only way to deserialize any timestamps using string constructors + return json.deserialize(json.serialize(manifest), clazz); + } + + private static void addLogicalRelationships( + KubernetesCacheData kubernetesCacheData, + Keys.CacheKey infrastructureKey, + String account, + Moniker moniker, + boolean hasClusterRelationship) { + String application = moniker.getApp(); + if (Strings.isNullOrEmpty(application)) { + return; + } + Keys.CacheKey applicationKey = new Keys.ApplicationCacheKey(application); + kubernetesCacheData.addRelationship(infrastructureKey, applicationKey); + + String cluster = moniker.getCluster(); + if (hasClusterRelationship && !Strings.isNullOrEmpty(cluster)) { + CacheKey clusterKey = new ClusterCacheKey(account, application, cluster); + kubernetesCacheData.addRelationship(infrastructureKey, clusterKey); + kubernetesCacheData.addRelationship(applicationKey, clusterKey); + } + } + + @NonnullByDefault + private static ImmutableSet implicitRelationships( + KubernetesManifest source, String account, List manifests) { + return manifests.stream() + .map( + m -> + new Keys.InfrastructureCacheKey( + m.getKind(), account, source.getNamespace(), m.getName())) + .collect(toImmutableSet()); + } + + @NonnullByDefault + static ImmutableSet ownerReferenceRelationships( + String account, String namespace, List references) { + return references.stream() + .map( + r -> new Keys.InfrastructureCacheKey(r.computedKind(), account, namespace, r.getName())) + .collect(toImmutableSet()); + } + + static void logStratifiedCacheData( + String agentType, Map> stratifiedCacheData) { + for (Map.Entry> entry : stratifiedCacheData.entrySet()) { + log.info( + agentType + + ": grouping " + + entry.getKey() + + " has " + + entry.getValue().size() + + " entries and " + + relationshipCount(entry.getValue()) + + " relationships"); + } + } + + private static int relationshipCount(Collection data) { + return data.stream().mapToInt(KubernetesCacheDataConverter::relationshipCount).sum(); + } + + private static int relationshipCount(CacheData data) { + return data.getRelationships().values().stream().mapToInt(Collection::size).sum(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgent.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgent.java new file mode 100644 index 00000000000..4ee2c69c2bd --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgent.java @@ -0,0 +1,453 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSetMultimap; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentIntervalAware; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesCachingPolicy; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesCachingProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties.ResourceScope; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.annotation.Nonnull; +import lombok.Getter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.lang.Nullable; + +/** + * A kubernetes caching agent is a class that caches part of the kubernetes infrastructure. Every + * instance of a caching agent is responsible for caching only one account, and only some (but not + * all) kubernetes kinds of that account. + */ +public abstract class KubernetesCachingAgent + implements AgentIntervalAware, CachingAgent, AccountAware { + private static final Logger log = LoggerFactory.getLogger(KubernetesCachingAgent.class); + + public static final List SPINNAKER_UI_KINDS = + Arrays.asList( + SpinnakerKind.SERVER_GROUP_MANAGERS, + SpinnakerKind.SERVER_GROUPS, + SpinnakerKind.INSTANCES, + SpinnakerKind.LOAD_BALANCERS, + SpinnakerKind.SECURITY_GROUPS); + + @Getter @Nonnull protected final String accountName; + protected final Registry registry; + protected final KubernetesCredentials credentials; + protected final ObjectMapper objectMapper; + + protected final int agentIndex; + protected final int agentCount; + protected KubectlJobExecutor jobExecutor; + + @Getter protected String providerName = KubernetesCloudProvider.ID; + + @Getter protected final Long agentInterval; + + protected final KubernetesConfigurationProperties configurationProperties; + + protected final KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap; + @Nullable private final Front50ApplicationLoader front50ApplicationLoader; + + protected KubernetesCachingAgent( + KubernetesNamedAccountCredentials namedAccountCredentials, + ObjectMapper objectMapper, + Registry registry, + int agentIndex, + int agentCount, + Long agentInterval, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + this.accountName = namedAccountCredentials.getName(); + this.credentials = namedAccountCredentials.getCredentials(); + this.objectMapper = objectMapper; + this.registry = registry; + this.agentIndex = agentIndex; + this.agentCount = agentCount; + this.agentInterval = agentInterval; + this.configurationProperties = configurationProperties; + this.kubernetesSpinnakerKindMap = kubernetesSpinnakerKindMap; + this.front50ApplicationLoader = front50ApplicationLoader; + } + + protected Map defaultIntrospectionDetails() { + Map result = new HashMap<>(); + result.put("namespaces", getNamespaces()); + result.put("kinds", filteredPrimaryKinds()); + return result; + } + + protected abstract List primaryKinds(); + + /** + * Filters the list of kinds returned from primaryKinds according to configuration. + * + * @return filtered list of primaryKinds. + */ + protected List filteredPrimaryKinds() { + List primaryKinds = primaryKinds(); + List filteredPrimaryKinds; + + if (configurationProperties.getCache().isCacheAll()) { + filteredPrimaryKinds = primaryKinds; + + } else if (configurationProperties.getCache().getCacheKinds() != null + && configurationProperties.getCache().getCacheKinds().size() > 0) { + // If provider config specifies what kinds to cache, use it + filteredPrimaryKinds = + configurationProperties.getCache().getCacheKinds().stream() + .map(KubernetesKind::fromString) + .filter(primaryKinds::contains) + .collect(Collectors.toList()); + + } else { + // Only cache kinds used in Spinnaker's classic infrastructure screens, which are the kinds + // mapped to Spinnaker kinds like ServerGroups, Instances, etc. + filteredPrimaryKinds = + SPINNAKER_UI_KINDS.stream() + .map(kubernetesSpinnakerKindMap::translateSpinnakerKind) + .flatMap(Collection::stream) + .filter(primaryKinds::contains) + .collect(Collectors.toList()); + } + + // Filter out explicitly omitted kinds in provider config + if (configurationProperties.getCache().getCacheOmitKinds() != null + && configurationProperties.getCache().getCacheOmitKinds().size() > 0) { + List omitKinds = + configurationProperties.getCache().getCacheOmitKinds().stream() + .map(KubernetesKind::fromString) + .collect(Collectors.toList()); + filteredPrimaryKinds = + filteredPrimaryKinds.stream() + .filter(k -> !omitKinds.contains(k)) + .collect(Collectors.toList()); + } + + return filteredPrimaryKinds; + } + + private ImmutableList loadResources( + @Nonnull Iterable kubernetesKinds, Optional optionalNamespace) { + String namespace = optionalNamespace.orElse(null); + return credentials.list(ImmutableList.copyOf(kubernetesKinds), namespace); + } + + @Nonnull + private ImmutableList loadNamespaceScopedResources( + @Nonnull Iterable kubernetesKinds) { + return getNamespaces() + // Not using parallelStream. In ForkJoin.commonPool, the number of threads == (CPU cores - + // 1). Since we're already running in the AgentExecutionAction thread pool and the number of + // threads to compute namespaces is already configurable at account level, this is not + // needed and most importantly, avoids contention in the common pool, increasing + // performance. + .stream() + .map(n -> loadResources(kubernetesKinds, Optional.of(n))) + .flatMap(Collection::stream) + .collect(ImmutableList.toImmutableList()); + } + + @Nonnull + private ImmutableList loadClusterScopedResources( + @Nonnull Iterable kubernetesKinds) { + if (handleClusterScopedResources()) { + return loadResources(kubernetesKinds, Optional.empty()); + } else { + return ImmutableList.of(); + } + } + + private ImmutableSetMultimap primaryKindsByScope() { + return filteredPrimaryKinds().stream() + .collect( + ImmutableSetMultimap.toImmutableSetMultimap( + k -> credentials.getKindProperties(k).getResourceScope(), Function.identity())); + } + + protected Map> loadPrimaryResourceList() { + ImmutableSetMultimap kindsByScope = primaryKindsByScope(); + + Map> result = + Stream.concat( + loadClusterScopedResources( + kindsByScope.get(KubernetesKindProperties.ResourceScope.CLUSTER)) + .stream(), + loadNamespaceScopedResources( + kindsByScope.get(KubernetesKindProperties.ResourceScope.NAMESPACE)) + .stream()) + .collect(Collectors.groupingBy(KubernetesManifest::getKind)); + + for (KubernetesCachingPolicy policy : credentials.getCachingPolicies()) { + KubernetesKind policyKind = KubernetesKind.fromString(policy.getKubernetesKind()); + if (!result.containsKey(policyKind)) { + continue; + } + + List entries = result.get(policyKind); + if (entries == null) { + continue; + } + + if (entries.size() > policy.getMaxEntriesPerAgent()) { + log.warn( + "{}: Pruning {} entries from kind {}", + getAgentType(), + entries.size() - policy.getMaxEntriesPerAgent(), + policyKind); + entries = entries.subList(0, policy.getMaxEntriesPerAgent()); + result.put(policyKind, entries); + } + } + + return result; + } + + /** + * Deprecated in favor {@link KubernetesCachingAgent#loadPrimaryResource(KubernetesCoordinates)}. + */ + @Deprecated + protected KubernetesManifest loadPrimaryResource( + KubernetesKind kind, String namespace, String name) { + return loadPrimaryResource( + KubernetesCoordinates.builder().kind(kind).namespace(namespace).name(name).build()); + } + + protected KubernetesManifest loadPrimaryResource(KubernetesCoordinates coordinates) { + return credentials.get(coordinates); + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + log.info(getAgentType() + ": agent is starting"); + Map details = defaultIntrospectionDetails(); + + long start = System.currentTimeMillis(); + Map> primaryResourceList = loadPrimaryResourceList(); + details.put("timeSpentInKubectlMs", System.currentTimeMillis() - start); + return buildCacheResult(primaryResourceList); + } + + protected CacheResult buildCacheResult(KubernetesManifest resource) { + return buildCacheResult(ImmutableMap.of(resource.getKind(), ImmutableList.of(resource))); + } + + /** + * method that determines if the provided manifest should be cached or not. It makes that + * determination based on the following rules: + * + *

- if a manifest's caching properties has ignore == true, then it will not be cached. + * + *

- Otherwise, if account is configured to be "onlySpinnakerManaged", and + * "moniker.spinnaker.io/application" annotation is empty, then it will not be cached. + * + *

- if {@link KubernetesConfigurationProperties.Cache#isCheckApplicationInFront50()} is true, + * and the application name obtained from the manifest is not known to front50, then the manifest + * will not be cached as long as it belongs to one of the logical relationship kinds specified in + * {@link KubernetesCacheDataConverter#getLogicalRelationshipKinds()}. + * + *

- If none of the above criteria is satisfied, then the manifest will be cached. + * + * @param credentials account credentials + * @return true, if manifest should be cached, false otherwise + */ + private Predicate shouldCacheManifest(KubernetesCredentials credentials) { + return m -> { + KubernetesCachingProperties props = KubernetesManifestAnnotater.getCachingProperties(m); + if (props.isIgnore()) { + return false; + } + + if (credentials.isOnlySpinnakerManaged() && props.getApplication().isEmpty()) { + return false; + } + + if (configurationProperties.getCache().isCheckApplicationInFront50()) { + // only certain type of kinds are stored in cats_v1_applications table + SpinnakerKind spinnakerKind = + credentials.getKubernetesSpinnakerKindMap().translateKubernetesKind(m.getKind()); + log.debug( + "{}: manifest: {}, kind: {}, spinnakerKind: {}, logicalRelationshipKinds: {}", + getAgentType(), + m.getFullResourceName(), + m.getKind(), + spinnakerKind, + KubernetesCacheDataConverter.getLogicalRelationshipKinds()); + if (KubernetesCacheDataConverter.getLogicalRelationshipKinds().contains(spinnakerKind)) { + if (front50ApplicationLoader == null) { + return false; + } + + String appNameFromMoniker = credentials.getNamer().deriveMoniker(m).getApp(); + + boolean shouldCache = + front50ApplicationLoader.getData().stream() + .anyMatch(app -> app.equalsIgnoreCase(appNameFromMoniker)); + + log.debug( + "{}: manifest: {}, application name: {}, shouldCache: {}", + getAgentType(), + m.getFullResourceName(), + appNameFromMoniker, + shouldCache); + + return shouldCache; + } + } + return true; + }; + } + + protected CacheResult buildCacheResult(Map> resources) { + if (resources.isEmpty()) { + log.info("{} did not find anything to cache", getAgentType()); + return new DefaultCacheResult(Map.of()); + } + + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData(); + Map> relationships = + loadSecondaryResourceRelationships(resources); + + AtomicInteger successfulCachedManifests = new AtomicInteger(); + AtomicInteger cachingFailures = new AtomicInteger(); + resources.values().stream() + .flatMap(Collection::stream) + .peek( + m -> + credentials + .getResourcePropertyRegistry() + .get(m.getKind()) + .getHandler() + .removeSensitiveKeys(m)) + .filter(shouldCacheManifest(credentials)) + .forEach( + rs -> { + try { + KubernetesCacheDataConverter.convertAsResource( + kubernetesCacheData, + accountName, + credentials.getKubernetesSpinnakerKindMap(), + credentials.getNamer(), + rs, + relationships.getOrDefault(rs, ImmutableList.of()), + credentials.isCacheAllApplicationRelationships()); + successfulCachedManifests.incrementAndGet(); + } catch (RuntimeException e) { + log.warn( + "{}: Failure converting manifest: {}. Error: ", + getAgentType(), + rs.getFullResourceName(), + e); + log.debug("{}: Failure converting {}. Error: ", getAgentType(), rs, e); + cachingFailures.incrementAndGet(); + } + }); + + Map> entries = kubernetesCacheData.toStratifiedCacheData(); + int total = resources.values().stream().mapToInt(List::size).sum(); + int cachedEntriesTotal = entries.values().stream().mapToInt(Collection::size).sum(); + log.info( + "{}: Results: Attempted to cache {} manifests, belonging to {} kinds." + + " Successful: {}, Failed: {}, Skipped: {}," + + " Total Kubernetes caching groups: {}, containing: {} entries", + getAgentType(), + total, + resources.size(), + successfulCachedManifests.get(), + cachingFailures.get(), + total - (successfulCachedManifests.get() + cachingFailures.get()), + entries.size(), + cachedEntriesTotal); + KubernetesCacheDataConverter.logStratifiedCacheData(getAgentType(), entries); + + return new DefaultCacheResult(entries); + } + + protected Map> loadSecondaryResourceRelationships( + Map> allResources) { + Map> result = new HashMap<>(); + allResources + .keySet() + .forEach( + k -> { + try { + credentials + .getResourcePropertyRegistry() + .get(k) + .getHandler() + .addRelationships(allResources, result); + } catch (RuntimeException e) { + log.warn("{}: Failure adding relationships for {}", getAgentType(), k, e); + } + }); + return result; + } + + protected ImmutableList getNamespaces() { + return credentials.getDeclaredNamespaces().stream() + .filter(n -> agentCount == 1 || Math.abs(n.hashCode() % agentCount) == agentIndex) + .collect(ImmutableList.toImmutableList()); + } + + /** + * Should this caching agent be responsible for caching cluster-scoped resources (ie, those that + * do not live in a particular namespace)? + */ + protected boolean handleClusterScopedResources() { + return agentIndex == 0; + } + + @Override + public String getAgentType() { + return String.format( + "%s/%s[%d/%d]", accountName, this.getClass().getSimpleName(), agentIndex + 1, agentCount); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentDispatcher.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentDispatcher.java new file mode 100644 index 00000000000..77b59ee29ff --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentDispatcher.java @@ -0,0 +1,106 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesResourceProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.lang.Nullable; +import org.springframework.stereotype.Component; + +@Component +@Slf4j +public class KubernetesCachingAgentDispatcher { + private final ObjectMapper objectMapper; + private final Registry registry; + private final KubernetesConfigurationProperties configurationProperties; + private final KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap; + @Nullable private final Front50ApplicationLoader front50ApplicationLoader; + + @Autowired + public KubernetesCachingAgentDispatcher( + ObjectMapper objectMapper, + Registry registry, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + this.objectMapper = objectMapper; + this.registry = registry; + this.configurationProperties = configurationProperties; + this.kubernetesSpinnakerKindMap = kubernetesSpinnakerKindMap; + this.front50ApplicationLoader = front50ApplicationLoader; + } + + public Collection buildAllCachingAgents( + KubernetesNamedAccountCredentials credentials) { + + if (!configurationProperties.getCache().isEnabled()) { + log.info("Caching is disabled by configuration ('kubernetes.cache.enabled')"); + return Collections.emptyList(); + } + + KubernetesCredentials kubernetesCredentials = credentials.getCredentials(); + List result = new ArrayList<>(); + Long agentInterval = + Optional.ofNullable(credentials.getCacheIntervalSeconds()) + .map(TimeUnit.SECONDS::toMillis) + .orElse(null); + + ResourcePropertyRegistry propertyRegistry = kubernetesCredentials.getResourcePropertyRegistry(); + + IntStream.range(0, credentials.getCacheThreads()) + .forEach( + i -> + propertyRegistry.values().stream() + .map(KubernetesResourceProperties::getHandler) + .map( + h -> + h.buildCachingAgent( + credentials, + objectMapper, + registry, + i, + credentials.getCacheThreads(), + agentInterval, + configurationProperties, + kubernetesSpinnakerKindMap, + front50ApplicationLoader)) + .filter(Objects::nonNull) + .forEach(result::add)); + + return result.stream() + .collect(Collectors.toMap(KubernetesCachingAgent::getAgentType, c -> c, (a, b) -> b)) + .values(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentFactory.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentFactory.java new file mode 100644 index 00000000000..c9e621db1bf --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentFactory.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import org.springframework.lang.Nullable; + +@FunctionalInterface +public interface KubernetesCachingAgentFactory { + KubernetesCachingAgent buildCachingAgent( + KubernetesNamedAccountCredentials namedAccountCredentials, + ObjectMapper objectMapper, + Registry registry, + int agentIndex, + int agentCount, + Long agentInterval, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader); +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCoreCachingAgent.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCoreCachingAgent.java new file mode 100644 index 00000000000..66eb3f7be7b --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCoreCachingAgent.java @@ -0,0 +1,88 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import java.util.Collection; +import java.util.List; +import java.util.stream.Stream; +import org.springframework.lang.Nullable; + +/** + * Instances of this class cache kubernetes core kinds for one particular account at regular + * intervals. + * + *

Core kinds is a hardcoded, immutable list defined in {@link + * KubernetesKindProperties#getGlobalKindProperties()}. From this list, only the kinds to which + * clouddriver has access (kubectl get {kind}) and are allowed by configuration are cached. + */ +public class KubernetesCoreCachingAgent extends KubernetesCachingAgent { + + public KubernetesCoreCachingAgent( + KubernetesNamedAccountCredentials namedAccountCredentials, + ObjectMapper objectMapper, + Registry registry, + int agentIndex, + int agentCount, + Long agentInterval, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + super( + namedAccountCredentials, + objectMapper, + registry, + agentIndex, + agentCount, + agentInterval, + configurationProperties, + kubernetesSpinnakerKindMap, + front50ApplicationLoader); + } + + @Override + public Collection getProvidedDataTypes() { + // The ARTIFACT kind is deprecated; no new entries of this type will be created. We are leaving + // it in the authoritative types for now so that existing entries get evicted. + @SuppressWarnings("deprecation") + Stream logicalTypes = + Stream.of(Keys.LogicalKind.APPLICATIONS, Keys.LogicalKind.CLUSTERS, Keys.Kind.ARTIFACT) + .map(Enum::toString); + Stream kubernetesTypes = filteredPrimaryKinds().stream().map(KubernetesKind::toString); + + return Stream.concat(logicalTypes, kubernetesTypes) + .map(AUTHORITATIVE::forType) + .collect(toImmutableSet()); + } + + @Override + protected List primaryKinds() { + return credentials.getGlobalKinds(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesUnregisteredCustomResourceCachingAgent.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesUnregisteredCustomResourceCachingAgent.java new file mode 100644 index 00000000000..dd4557d7091 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesUnregisteredCustomResourceCachingAgent.java @@ -0,0 +1,78 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import org.springframework.lang.Nullable; + +/** + * Instances of this class cache CRDs for one particular account at regular intervals. + * + *

The list of CRDs to cache are the ones dynamically returned from "kubectl get crd" calls in + * {@link KubernetesCredentials#getCrds()}, so the kinds cached in this class change dynamically if + * CRDs are added or deleted from the cluster of a particular account. From this list, only the + * kinds to which clouddriver has access (kubectl get {kind}) and are allowed by configuration are + * cached. + */ +public class KubernetesUnregisteredCustomResourceCachingAgent extends KubernetesCachingAgent { + public KubernetesUnregisteredCustomResourceCachingAgent( + KubernetesNamedAccountCredentials namedAccountCredentials, + ObjectMapper objectMapper, + Registry registry, + int agentIndex, + int agentCount, + Long agentInterval, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + super( + namedAccountCredentials, + objectMapper, + registry, + agentIndex, + agentCount, + agentInterval, + configurationProperties, + kubernetesSpinnakerKindMap, + front50ApplicationLoader); + } + + @Override + public ImmutableSet getProvidedDataTypes() { + return filteredPrimaryKinds().stream() + .map(k -> AUTHORITATIVE.forType(k.toString())) + .collect(toImmutableSet()); + } + + @Override + protected ImmutableList primaryKinds() { + return credentials.getCrds(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesApplication.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesApplication.java new file mode 100644 index 00000000000..fefabfd073a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesApplication.java @@ -0,0 +1,35 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.model.Application; +import java.util.Map; +import java.util.Set; +import lombok.Value; + +@Value +public final class KubernetesApplication implements Application { + private final String name; + private final Map> clusterNames; + + @Override + public Map getAttributes() { + return new ImmutableMap.Builder().put("name", name).build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesCluster.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesCluster.java new file mode 100644 index 00000000000..191fac6eb76 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesCluster.java @@ -0,0 +1,56 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.model.Cluster; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import lombok.Value; + +@Value +public final class KubernetesCluster implements Cluster { + private final String name; + private final Moniker moniker; + private final String type = KubernetesCloudProvider.ID; + private final String accountName; + private final Set serverGroups; + private final Set loadBalancers; + private final String application; + + public KubernetesCluster(String rawKey) { + this(rawKey, ImmutableList.of(), ImmutableList.of()); + } + + public KubernetesCluster( + String rawKey, + Collection serverGroups, + Collection loadBalancers) { + Keys.ClusterCacheKey key = (Keys.ClusterCacheKey) Keys.parseKey(rawKey).get(); + this.name = key.getName(); + this.accountName = key.getAccount(); + this.application = key.getApplication(); + this.moniker = Moniker.builder().cluster(name).app(application).build(); + this.serverGroups = new HashSet<>(serverGroups); + this.loadBalancers = new HashSet<>(loadBalancers); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesHealth.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesHealth.java new file mode 100644 index 00000000000..0ba4041325e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesHealth.java @@ -0,0 +1,70 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.model.Health; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import io.kubernetes.client.openapi.models.V1ContainerStatus; +import io.kubernetes.client.openapi.models.V1PodStatus; +import java.util.Map; +import lombok.Value; + +@Value +// TODO(lwander): match spec described here +// https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/ +public final class KubernetesHealth implements Health { + private final HealthState state; + private final String source; + private final String type; + private final String healthClass = "platform"; + + public KubernetesHealth(V1PodStatus status) { + String phase = status.getPhase(); + this.source = "Pod"; + this.type = "kubernetes/pod"; + + if (phase.equalsIgnoreCase("pending")) { + state = HealthState.Down; + } else if (phase.equalsIgnoreCase("running")) { + state = HealthState.Up; + } else { + state = HealthState.Unknown; + } + } + + public KubernetesHealth(V1ContainerStatus status) { + this.source = "Container " + status.getName(); + this.type = "kubernetes/container"; + + if (!status.getReady()) { + state = HealthState.Down; + } else { + state = HealthState.Up; + } + } + + public Map toMap() { + return new ImmutableMap.Builder() + .put("state", state.toString()) + .put("source", source) + .put("type", type) + .put(healthClass, healthClass) + .build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesImageSummary.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesImageSummary.java new file mode 100644 index 00000000000..b7e6308537b --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesImageSummary.java @@ -0,0 +1,58 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.model.ServerGroup.ImageSummary; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Map; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Getter; + +@NonnullByDefault +final class KubernetesImageSummary implements ImageSummary { + @Getter private final String serverGroupName; + @Getter private final ImmutableMap buildInfo; + + @Builder + KubernetesImageSummary( + String serverGroupName, Map> buildInfo) { + this.serverGroupName = serverGroupName; + this.buildInfo = ImmutableMap.copyOf(buildInfo); + } + + @Nullable + @Override + public String getImageId() { + return null; + } + + @Nullable + @Override + public String getImageName() { + return null; + } + + @Nullable + @Override + public Map getImage() { + return null; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesInstance.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesInstance.java new file mode 100644 index 00000000000..0a4512314e2 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesInstance.java @@ -0,0 +1,143 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.provider.KubernetesModelUtil; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.Instance; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; +import com.netflix.spinnaker.moniker.Moniker; +import io.kubernetes.client.openapi.models.V1PodStatus; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import javax.validation.constraints.Null; +import lombok.Value; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Value +public final class KubernetesInstance implements Instance, KubernetesResource { + private static final Logger log = LoggerFactory.getLogger(KubernetesInstance.class); + private final List> health; + private final String account; + // An implementor of the Instance interface is implicitly expected to return a globally-unique ID + // as its name because InstanceViewModel serializes it as such for API responses and Deck then + // relies on it to disambiguate between instances. + private final String name; + private final String humanReadableName; + private final String namespace; + private final String displayName; + private final KubernetesApiVersion apiVersion; + private final KubernetesKind kind; + private final Map labels; + private final Moniker moniker; + private final Long createdTime; + + @Null + @Override + public Long getLaunchTime() { + return null; + } + + private KubernetesInstance(KubernetesManifest manifest, String key, Moniker moniker) { + this.account = ((Keys.InfrastructureCacheKey) Keys.parseKey(key).get()).getAccount(); + this.name = manifest.getUid(); + this.humanReadableName = manifest.getFullResourceName(); + this.namespace = manifest.getNamespace(); + this.displayName = manifest.getName(); + this.apiVersion = manifest.getApiVersion(); + this.kind = manifest.getKind(); + this.labels = ImmutableMap.copyOf(manifest.getLabels()); + this.moniker = moniker; + this.createdTime = manifest.getCreationTimestampEpochMillis(); + + this.health = new ArrayList<>(); + V1PodStatus status = + KubernetesCacheDataConverter.getResource(manifest.getStatus(), V1PodStatus.class); + if (status != null) { + health.add(new KubernetesHealth(status).toMap()); + if (status.getContainerStatuses() != null) { + health.addAll( + status.getContainerStatuses().stream() + .map(KubernetesHealth::new) + .map(KubernetesHealth::toMap) + .collect(Collectors.toList())); + } + } + } + + public static KubernetesInstance fromCacheData(CacheData cd) { + if (cd == null) { + return null; + } + + KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); + + if (manifest == null) { + log.warn("Cache data {} inserted without a manifest", cd.getId()); + return null; + } + + Moniker moniker = KubernetesCacheDataConverter.getMoniker(cd); + return new KubernetesInstance(manifest, cd.getId(), moniker); + } + + public LoadBalancerInstance toLoadBalancerInstance() { + return LoadBalancerInstance.builder() + .health( + health.stream() + .reduce( + new HashMap<>(), + (a, b) -> { + Map result = new HashMap<>(); + result.putAll(a); + result.putAll(b); + return result; + })) + .id(getName()) + .zone(getZone()) + .name(getHumanReadableName()) + .build(); + } + + @Override + public HealthState getHealthState() { + return KubernetesModelUtil.getHealthState(health); + } + + @Override + public String getZone() { + return namespace; + } + + @Override + public String getCloudProvider() { + return KubernetesCloudProvider.ID; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesLoadBalancer.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesLoadBalancer.java new file mode 100644 index 00000000000..f48cf087fa7 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesLoadBalancer.java @@ -0,0 +1,93 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.model.LoadBalancer; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.Value; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Value +public final class KubernetesLoadBalancer + implements KubernetesResource, LoadBalancer, LoadBalancerProvider.Details { + private static final Logger log = LoggerFactory.getLogger(KubernetesLoadBalancer.class); + private final Set serverGroups; + private final String account; + private final String name; + private final String namespace; + private final String displayName; + private final KubernetesApiVersion apiVersion; + private final KubernetesKind kind; + private final Map labels; + private final Moniker moniker; + private final Long createdTime; + + private KubernetesLoadBalancer( + KubernetesManifest manifest, + String key, + Moniker moniker, + Set serverGroups) { + this.account = ((Keys.InfrastructureCacheKey) Keys.parseKey(key).get()).getAccount(); + this.name = manifest.getFullResourceName(); + this.displayName = manifest.getName(); + this.apiVersion = manifest.getApiVersion(); + this.kind = manifest.getKind(); + this.namespace = manifest.getNamespace(); + this.labels = ImmutableMap.copyOf(manifest.getLabels()); + this.moniker = moniker; + this.serverGroups = serverGroups; + this.createdTime = manifest.getCreationTimestampEpochMillis(); + } + + @Nullable + @ParametersAreNonnullByDefault + public static KubernetesLoadBalancer fromCacheData( + CacheData cd, Set loadBalancerServerGroups) { + KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); + if (manifest == null) { + log.warn("Cache data {} inserted without a manifest", cd.getId()); + return null; + } + Moniker moniker = KubernetesCacheDataConverter.getMoniker(cd); + return new KubernetesLoadBalancer(manifest, cd.getId(), moniker, loadBalancerServerGroups); + } + + public String getRegion() { + return namespace; + } + + @Override + public String getCloudProvider() { + return KubernetesCloudProvider.ID; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesManifestContainer.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesManifestContainer.java new file mode 100644 index 00000000000..632c195c866 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesManifestContainer.java @@ -0,0 +1,47 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import lombok.Builder; +import lombok.Value; + +@Value +@Builder +public class KubernetesManifestContainer implements Manifest { + private final String account; + private final String name; + private final String location; + private final Moniker moniker; + private final KubernetesManifest manifest; + private final Status status; + @Builder.Default private final Set artifacts = new HashSet<>(); + @Builder.Default private final List events = new ArrayList<>(); + @Builder.Default private final List warnings = new ArrayList<>(); + + @Builder.Default + private final List metrics = new ArrayList<>(); +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesRawResource.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesRawResource.java new file mode 100644 index 00000000000..ff23e6a8932 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesRawResource.java @@ -0,0 +1,75 @@ +/* + * Copyright 2020 Coveo, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Map; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.Value; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Value +public final class KubernetesRawResource implements KubernetesResource { + private static final Logger log = LoggerFactory.getLogger(KubernetesRawResource.class); + private final String account; + private final String name; + private final String namespace; + private final String displayName; + private final KubernetesApiVersion apiVersion; + private final KubernetesKind kind; + private final Map labels; + private final Moniker moniker; + private final Long createdTime; + + private KubernetesRawResource(KubernetesManifest manifest, String key, Moniker moniker) { + this.account = ((Keys.InfrastructureCacheKey) Keys.parseKey(key).get()).getAccount(); + this.name = manifest.getFullResourceName(); + this.displayName = manifest.getName(); + this.apiVersion = manifest.getApiVersion(); + this.kind = manifest.getKind(); + this.namespace = manifest.getNamespace(); + this.labels = ImmutableMap.copyOf(manifest.getLabels()); + this.moniker = moniker; + this.createdTime = manifest.getCreationTimestampEpochMillis(); + } + + @Nullable + @ParametersAreNonnullByDefault + public static KubernetesRawResource fromCacheData(CacheData cd) { + KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); + if (manifest == null) { + log.warn("Cache data {} inserted without a manifest", cd.getId()); + return null; + } + Moniker moniker = KubernetesCacheDataConverter.getMoniker(cd); + return new KubernetesRawResource(manifest, cd.getId(), moniker); + } + + public String getRegion() { + return namespace; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesResource.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesResource.java new file mode 100644 index 00000000000..1fed3512d40 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesResource.java @@ -0,0 +1,37 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; + +/** + * This interface defines fields that all models in the Kubernetes provider should have; it maps to + * the IKubernetesResource interface in deck. + */ +public interface KubernetesResource { + String getDisplayName(); + + KubernetesKind getKind(); + + KubernetesApiVersion getApiVersion(); + + String getNamespace(); + + Long getCreatedTime(); +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesSecurityGroup.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesSecurityGroup.java new file mode 100644 index 00000000000..ff640cc8d5a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesSecurityGroup.java @@ -0,0 +1,228 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.EXTENSIONS_V1BETA1; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.NETWORKING_K8S_IO_V1; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.NETWORKING_K8S_IO_V1BETA1; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.model.SecurityGroup; +import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; +import com.netflix.spinnaker.moniker.Moniker; +import io.kubernetes.client.custom.IntOrString; +import io.kubernetes.client.openapi.models.V1NetworkPolicy; +import io.kubernetes.client.openapi.models.V1NetworkPolicyEgressRule; +import io.kubernetes.client.openapi.models.V1NetworkPolicyIngressRule; +import io.kubernetes.client.openapi.models.V1NetworkPolicyPort; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.NoArgsConstructor; +import lombok.Value; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Value +public final class KubernetesSecurityGroup implements KubernetesResource, SecurityGroup { + private static final Logger log = LoggerFactory.getLogger(KubernetesSecurityGroup.class); + private static final ImmutableSet SUPPORTED_API_VERSIONS = + ImmutableSet.of(EXTENSIONS_V1BETA1, NETWORKING_K8S_IO_V1BETA1, NETWORKING_K8S_IO_V1); + + private final String account; + private final String id; + private final String namespace; + private final String displayName; + private final KubernetesApiVersion apiVersion; + private final KubernetesKind kind; + private final Map labels; + private final Moniker moniker; + private final Long createdTime; + + private final Set inboundRules; + private final Set outboundRules; + + @Override + public String getAccountName() { + return account; + } + + @Override + public SecurityGroupSummary getSummary() { + return KubernetesSecurityGroupSummary.builder().id(id).name(id).build(); + } + + private KubernetesSecurityGroup( + KubernetesManifest manifest, + String key, + Moniker moniker, + Set inboundRules, + Set outboundRules) { + this.id = manifest.getFullResourceName(); + this.account = ((Keys.InfrastructureCacheKey) Keys.parseKey(key).get()).getAccount(); + this.kind = manifest.getKind(); + this.apiVersion = manifest.getApiVersion(); + this.displayName = manifest.getName(); + this.namespace = manifest.getNamespace(); + this.labels = ImmutableMap.copyOf(manifest.getLabels()); + this.moniker = moniker; + this.createdTime = manifest.getCreationTimestampEpochMillis(); + + this.inboundRules = inboundRules; + this.outboundRules = outboundRules; + } + + public static KubernetesSecurityGroup fromCacheData(CacheData cd) { + if (cd == null) { + return null; + } + + KubernetesManifest manifest = KubernetesCacheDataConverter.getManifest(cd); + + if (manifest == null) { + log.warn("Cache data {} inserted without a manifest", cd.getId()); + return null; + } + + Set inboundRules = new HashSet<>(); + Set outboundRules = new HashSet<>(); + + if (!manifest.getKind().equals(KubernetesKind.NETWORK_POLICY)) { + log.warn("Unknown security group kind " + manifest.getKind()); + } else { + if (SUPPORTED_API_VERSIONS.contains(manifest.getApiVersion())) { + V1NetworkPolicy v1beta1NetworkPolicy = + KubernetesCacheDataConverter.getResource(manifest, V1NetworkPolicy.class); + inboundRules = inboundRules(v1beta1NetworkPolicy); + outboundRules = outboundRules(v1beta1NetworkPolicy); + } else { + log.warn( + "Could not determine (in)/(out)bound rules for " + + manifest.getName() + + " at version " + + manifest.getApiVersion()); + } + } + + Moniker moniker = KubernetesCacheDataConverter.getMoniker(cd); + return new KubernetesSecurityGroup(manifest, cd.getId(), moniker, inboundRules, outboundRules); + } + + private static Set inboundRules(V1NetworkPolicy policy) { + if (policy.getSpec().getIngress() == null) { + return ImmutableSet.of(); + } + return policy.getSpec().getIngress().stream() + .map(V1NetworkPolicyIngressRule::getPorts) + .filter(Objects::nonNull) + .flatMap(Collection::stream) + .map(KubernetesSecurityGroup::fromPolicyPort) + .collect(Collectors.toSet()); + } + + private static Set outboundRules(V1NetworkPolicy policy) { + if (policy.getSpec().getEgress() == null) { + return ImmutableSet.of(); + } + return policy.getSpec().getEgress().stream() + .map(V1NetworkPolicyEgressRule::getPorts) + .filter(Objects::nonNull) + .flatMap(Collection::stream) + .map(KubernetesSecurityGroup::fromPolicyPort) + .collect(Collectors.toSet()); + } + + private static Rule fromPolicyPort(V1NetworkPolicyPort policyPort) { + IntOrString port = policyPort.getPort(); + return new PortRule() + .setProtocol(policyPort.getProtocol()) + .setPortRanges( + port == null + ? null + : new TreeSet<>(ImmutableList.of(new StringPortRange(port.toString())))); + } + + @Override + public String getName() { + return id; + } + + @Override + public String getRegion() { + return namespace; + } + + @Override + public String getCloudProvider() { + return KubernetesCloudProvider.ID; + } + + @Data + @NoArgsConstructor + @AllArgsConstructor + @Builder + private static class KubernetesSecurityGroupSummary implements SecurityGroupSummary { + private String name; + private String id; + } + + @Data + private static class PortRule implements Rule { + @Nullable private SortedSet portRanges; + @Nullable private String protocol; + } + + @EqualsAndHashCode(callSuper = true) + @Data + public static class StringPortRange extends Rule.PortRange { + protected String startPortName; + protected String endPortName; + + StringPortRange(String port) { + Integer numPort; + try { + numPort = Integer.parseInt(port); + this.startPort = numPort; + this.endPort = numPort; + } catch (Exception e) { + this.startPortName = port; + this.endPortName = port; + } + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroup.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroup.java new file mode 100644 index 00000000000..532a634f285 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroup.java @@ -0,0 +1,278 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import static com.google.common.collect.ImmutableList.toImmutableList; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.primitives.Ints; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ArtifactReplacer; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesServerGroupCacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestTraffic; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.clouddriver.model.ServerGroupManager.ServerGroupManagerSummary; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import javax.validation.constraints.Null; +import lombok.Value; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Value +public final class KubernetesServerGroup implements KubernetesResource, ServerGroup { + private static final Logger log = LoggerFactory.getLogger(KubernetesServerGroup.class); + private final boolean disabled; + private final Set instances; + private final Set loadBalancers; + private final List serverGroupManagers; + private final Capacity capacity; + private final String account; + private final String name; + private final String namespace; + private final String displayName; + private final KubernetesApiVersion apiVersion; + private final KubernetesKind kind; + private final Map labels; + private final Moniker moniker; + private final Long createdTime; + private final ImmutableMap> buildInfo; + + private final Set zones = ImmutableSet.of(); + private final Set securityGroups = ImmutableSet.of(); + private final Map launchConfig = ImmutableMap.of(); + + @JsonIgnore + private static final ArtifactReplacer dockerImageReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + + @Override + public ServerGroup.InstanceCounts getInstanceCounts() { + return ServerGroup.InstanceCounts.builder() + .total(Ints.checkedCast(instances.size())) + .up( + Ints.checkedCast( + instances.stream().filter(i -> i.getHealthState().equals(HealthState.Up)).count())) + .down( + Ints.checkedCast( + instances.stream() + .filter(i -> i.getHealthState().equals(HealthState.Down)) + .count())) + .unknown( + Ints.checkedCast( + instances.stream() + .filter(i -> i.getHealthState().equals(HealthState.Unknown)) + .count())) + .outOfService( + Ints.checkedCast( + instances.stream() + .filter(i -> i.getHealthState().equals(HealthState.OutOfService)) + .count())) + .starting( + Ints.checkedCast( + instances.stream() + .filter(i -> i.getHealthState().equals(HealthState.Starting)) + .count())) + .build(); + } + + @Override + public Boolean isDisabled() { + return disabled; + } + + private KubernetesServerGroup( + KubernetesManifest manifest, + String key, + Moniker moniker, + List instances, + Set loadBalancers, + List serverGroupManagers, + Boolean disabled) { + this.account = ((Keys.InfrastructureCacheKey) Keys.parseKey(key).get()).getAccount(); + this.kind = manifest.getKind(); + this.apiVersion = manifest.getApiVersion(); + this.namespace = manifest.getNamespace(); + this.name = manifest.getFullResourceName(); + this.displayName = manifest.getName(); + this.labels = ImmutableMap.copyOf(manifest.getLabels()); + this.moniker = moniker; + this.createdTime = manifest.getCreationTimestampEpochMillis(); + this.buildInfo = + ImmutableMap.of( + "images", + dockerImageReplacer.findAll(manifest).stream() + .map(Artifact::getReference) + .distinct() + .collect(toImmutableList())); + this.instances = new HashSet<>(instances); + this.loadBalancers = loadBalancers; + this.serverGroupManagers = serverGroupManagers; + this.disabled = disabled; + + Object odesired = + ((Map) manifest.getOrDefault("spec", new HashMap())) + .getOrDefault("replicas", 0); + int desired = 0; + + if (odesired instanceof Number) { + desired = ((Number) odesired).intValue(); + } else { + log.warn("Unable to cast replica count from unexpected type: {}", odesired.getClass()); + } + + this.capacity = Capacity.builder().desired(desired).build(); + } + + public static KubernetesServerGroup fromCacheData(KubernetesServerGroupCacheData cacheData) { + List serverGroupManagers = + cacheData.getServerGroupManagerKeys().stream() + .map(Keys::parseKey) + .filter(Optional::isPresent) + .map(Optional::get) + .filter(k -> k instanceof InfrastructureCacheKey) + .map(k -> (InfrastructureCacheKey) k) + .map( + k -> + ServerGroupManagerSummary.builder() + .account(k.getAccount()) + .location(k.getNamespace()) + .name(k.getName()) + .build()) + .collect(Collectors.toList()); + + KubernetesManifest manifest = + KubernetesCacheDataConverter.getManifest(cacheData.getServerGroupData()); + + if (manifest == null) { + log.warn("Cache data {} inserted without a manifest", cacheData.getServerGroupData().getId()); + return null; + } + + List instances = + cacheData.getInstanceData().stream() + .map(KubernetesInstance::fromCacheData) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + + KubernetesManifestTraffic traffic = KubernetesManifestAnnotater.getTraffic(manifest); + Set explicitLoadBalancers = + traffic.getLoadBalancers().stream() + // TODO(ezimanyi): Leaving this logic and the comment below, but I'm not sure that we + // still need the logic to parse then re-serialize the name. + // this ensures the names are serialized correctly when the get merged below + .map(lb -> KubernetesCoordinates.builder().fullResourceName(lb).build()) + .map(c -> KubernetesManifest.getFullResourceName(c.getKind(), c.getName())) + .collect(Collectors.toSet()); + + Set loadBalancers = + cacheData.getLoadBalancerKeys().stream() + .map(Keys::parseKey) + .filter(Optional::isPresent) + .map(Optional::get) + .map(k -> (InfrastructureCacheKey) k) + .map(k -> KubernetesManifest.getFullResourceName(k.getKubernetesKind(), k.getName())) + .collect(Collectors.toSet()); + + boolean disabled = loadBalancers.isEmpty() && !explicitLoadBalancers.isEmpty(); + loadBalancers.addAll(explicitLoadBalancers); + + Moniker moniker = KubernetesCacheDataConverter.getMoniker(cacheData.getServerGroupData()); + return new KubernetesServerGroup( + manifest, + cacheData.getServerGroupData().getId(), + moniker, + instances, + loadBalancers, + serverGroupManagers, + disabled); + } + + public KubernetesServerGroupSummary toServerGroupSummary() { + return KubernetesServerGroupSummary.builder() + .name(getName()) + .account(getAccount()) + .namespace(getRegion()) + .moniker(getMoniker()) + .build(); + } + + public LoadBalancerServerGroup toLoadBalancerServerGroup() { + return LoadBalancerServerGroup.builder() + .account(getAccount()) + .detachedInstances(new HashSet<>()) + .instances( + instances.stream() + .map(KubernetesInstance::toLoadBalancerInstance) + .collect(Collectors.toSet())) + .name(getName()) + .region(getRegion()) + .isDisabled(isDisabled()) + .cloudProvider(KubernetesCloudProvider.ID) + .build(); + } + + @Deprecated + @Null + @Override + public ImageSummary getImageSummary() { + return null; + } + + @Override + public ImagesSummary getImagesSummary() { + return () -> + ImmutableList.of( + KubernetesImageSummary.builder() + .serverGroupName(displayName) + .buildInfo(buildInfo) + .build()); + } + + @Override + public String getRegion() { + return namespace; + } + + @Override + public String getCloudProvider() { + return KubernetesCloudProvider.ID; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroupManager.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroupManager.java new file mode 100644 index 00000000000..603c0acab90 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroupManager.java @@ -0,0 +1,104 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesServerGroupCacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesServerGroupManagerCacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.model.ServerGroupManager; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.Value; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Value +public final class KubernetesServerGroupManager implements KubernetesResource, ServerGroupManager { + private static final Logger log = LoggerFactory.getLogger(KubernetesServerGroupManager.class); + // private final KubernetesManifest manifest; + private final String account; + private final Set serverGroups; + private final String name; + private final String namespace; + private final String displayName; + private final KubernetesApiVersion apiVersion; + private final KubernetesKind kind; + private final Map labels; + private final Moniker moniker; + private final Long createdTime; + + private KubernetesServerGroupManager( + KubernetesManifest manifest, + String key, + Moniker moniker, + Set serverGroups) { + this.account = ((Keys.InfrastructureCacheKey) Keys.parseKey(key).get()).getAccount(); + this.kind = manifest.getKind(); + this.apiVersion = manifest.getApiVersion(); + this.namespace = manifest.getNamespace(); + this.name = manifest.getFullResourceName(); + this.displayName = manifest.getName(); + this.labels = ImmutableMap.copyOf(manifest.getLabels()); + this.moniker = moniker; + this.serverGroups = serverGroups; + this.createdTime = manifest.getCreationTimestampEpochMillis(); + } + + public static KubernetesServerGroupManager fromCacheData( + KubernetesServerGroupManagerCacheData data) { + KubernetesManifest manifest = + KubernetesCacheDataConverter.getManifest(data.getServerGroupManagerData()); + if (manifest == null) { + log.warn( + "Cache data {} inserted without a manifest", data.getServerGroupManagerData().getId()); + return null; + } + + Set serverGroups = + data.getServerGroupData().stream() + .map( + sg -> + KubernetesServerGroup.fromCacheData( + KubernetesServerGroupCacheData.builder().serverGroupData(sg).build())) + .filter(Objects::nonNull) + .map(KubernetesServerGroup::toServerGroupSummary) + .collect(Collectors.toSet()); + + Moniker moniker = KubernetesCacheDataConverter.getMoniker(data.getServerGroupManagerData()); + return new KubernetesServerGroupManager( + manifest, data.getServerGroupManagerData().getId(), moniker, serverGroups); + } + + @Override + public String getRegion() { + return namespace; + } + + public String getCloudProvider() { + return KubernetesCloudProvider.ID; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroupSummary.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroupSummary.java new file mode 100644 index 00000000000..785b9d19b59 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/model/KubernetesServerGroupSummary.java @@ -0,0 +1,36 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model; + +import com.netflix.spinnaker.clouddriver.model.ServerGroupSummary; +import com.netflix.spinnaker.moniker.Moniker; +import lombok.Builder; +import lombok.Value; + +@Builder +@Value +public final class KubernetesServerGroupSummary implements ServerGroupSummary { + private final String name; + private final String account; + private final String namespace; + private final Moniker moniker; + + public String getRegion() { + return namespace; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/ArtifactProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/ArtifactProvider.java new file mode 100644 index 00000000000..34f178ae5ed --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/ArtifactProvider.java @@ -0,0 +1,47 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.google.common.collect.ImmutableList.toImmutableList; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.Comparator; +import java.util.Optional; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +public class ArtifactProvider { + public ImmutableList getArtifacts( + KubernetesKind kind, String name, String location, KubernetesCredentials credentials) { + return credentials.list(kind, location).stream() + .sorted(Comparator.comparing(KubernetesManifest::getCreationTimestamp)) + .map(m -> KubernetesManifestAnnotater.getArtifact(m, credentials.getAccountName())) + .filter(Optional::isPresent) + .map(Optional::get) + .filter(a -> Strings.nullToEmpty(a.getName()).equals(name)) + .collect(toImmutableList()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesAccountResolver.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesAccountResolver.java new file mode 100644 index 00000000000..8691119ddf4 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesAccountResolver.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +class KubernetesAccountResolver { + private final CredentialsRepository credentialsRepository; + private final ResourcePropertyRegistry globalResourcePropertyRegistry; + + KubernetesAccountResolver( + CredentialsRepository credentialsRepository, + ResourcePropertyRegistry globalResourcePropertyRegistry) { + this.credentialsRepository = credentialsRepository; + this.globalResourcePropertyRegistry = globalResourcePropertyRegistry; + } + + Optional getCredentials(String account) { + return Optional.ofNullable(credentialsRepository.getOne(account)) + .map(AccountCredentials::getCredentials); + } + + ResourcePropertyRegistry getResourcePropertyRegistry(String account) { + return getCredentials(account) + .map(KubernetesCredentials::getResourcePropertyRegistry) + .orElse(globalResourcePropertyRegistry); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesApplicationProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesApplicationProvider.java new file mode 100644 index 00000000000..6ff09966244 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesApplicationProvider.java @@ -0,0 +1,89 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKind.CLUSTERS; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.ClusterCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesApplication; +import com.netflix.spinnaker.clouddriver.model.ApplicationProvider; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesApplicationProvider implements ApplicationProvider { + private final KubernetesCacheUtils cacheUtils; + + @Autowired + KubernetesApplicationProvider(KubernetesCacheUtils cacheUtils) { + this.cacheUtils = cacheUtils; + } + + @Override + public Set getApplications(boolean expand) { + // TODO(lwander) performance optimization: rely on expand parameter to make a more + // cache-efficient call + String clusterGlobKey = ClusterCacheKey.createKey("*", "*", "*"); + Map> keysByApplication = + cacheUtils.getAllKeysMatchingPattern(CLUSTERS.toString(), clusterGlobKey).stream() + .map(Keys::parseKey) + .filter(Optional::isPresent) + .map(Optional::get) + .filter(ClusterCacheKey.class::isInstance) + .map(k -> (ClusterCacheKey) k) + .collect(Collectors.groupingBy(ClusterCacheKey::getApplication, Collectors.toSet())); + + return keysByApplication.entrySet().stream() + .map(e -> new KubernetesApplication(e.getKey(), groupClustersByAccount(e.getValue()))) + .collect(Collectors.toSet()); + } + + @Override + public KubernetesApplication getApplication(String name) { + String clusterGlobKey = ClusterCacheKey.createKey("*", name, "*"); + List keys = + cacheUtils.getAllKeysMatchingPattern(CLUSTERS.toString(), clusterGlobKey).stream() + .map(Keys::parseKey) + .filter(Optional::isPresent) + .map(Optional::get) + .filter(ClusterCacheKey.class::isInstance) + .map(k -> (ClusterCacheKey) k) + .collect(Collectors.toList()); + + if (keys.isEmpty()) { + return null; + } + + return new KubernetesApplication(name, groupClustersByAccount(keys)); + } + + private Map> groupClustersByAccount(Collection keys) { + return keys.stream() + .collect( + Collectors.groupingBy( + ClusterCacheKey::getAccount, + Collectors.mapping(ClusterCacheKey::getName, Collectors.toSet()))); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesCacheUtils.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesCacheUtils.java new file mode 100644 index 00000000000..fa7ca4c75a6 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesCacheUtils.java @@ -0,0 +1,243 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.google.common.collect.ImmutableSetMultimap.flatteningToImmutableSetMultimap; + +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableListMultimap; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableMultimap; +import com.google.common.collect.Multimap; +import com.google.common.collect.Multimaps; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesCacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Collection; +import java.util.HashSet; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Stream; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +class KubernetesCacheUtils { + private final Cache cache; + private final KubernetesSpinnakerKindMap kindMap; + private final KubernetesAccountResolver resourcePropertyResolver; + + @Autowired + public KubernetesCacheUtils( + Cache cache, + KubernetesSpinnakerKindMap kindMap, + KubernetesAccountResolver resourcePropertyResolver) { + this.cache = cache; + this.kindMap = kindMap; + this.resourcePropertyResolver = resourcePropertyResolver; + } + + Collection getAllKeys(String type) { + return cache.getAll(type); + } + + Collection getAllKeysMatchingPattern(String type, String key) { + return cache.filterIdentifiers(type, key); + } + + Collection getAllDataMatchingPattern(String type, String key) { + return cache.getAll(type, getAllKeysMatchingPattern(type, key)); + } + + Optional getSingleEntry(String type, String key) { + return Optional.ofNullable(cache.get(type, key)); + } + + /** + * Given an account, a namespace, and a resource name, returns the {@link CacheData} entry for + * that item. + * + *

If the resource name cannot be parsed into a kind and a name, or if there is no entry in the + * cache for the requested item, returjns an empty {@link Optional}. + * + * @param account the account of the requested item + * @param namespace the namespace for the requested item, which can be empty for a cluster-scoped + * resource + * @param name the full name of the requested item in the form "kind name" (ex: "pod my-pod-abcd") + * @return An optional containg the requested {@link CacheData} item, or an empty {@link Optional} + * if the item is not found. + */ + Optional getSingleEntry(String account, String namespace, String name) { + KubernetesCoordinates coords; + try { + coords = KubernetesCoordinates.builder().namespace(namespace).fullResourceName(name).build(); + } catch (IllegalArgumentException e) { + return Optional.empty(); + } + return getSingleEntry( + coords.getKind().toString(), Keys.InfrastructureCacheKey.createKey(account, coords)); + } + + Optional getSingleEntryWithRelationships( + String type, String key, RelationshipCacheFilter cacheFilter) { + return Optional.ofNullable(cache.get(type, key, cacheFilter)); + } + + /** Gets the keys for all relationships of a given Spinnaker kind for a CacheData item. */ + ImmutableCollection getRelationshipKeys( + CacheData cacheData, SpinnakerKind spinnakerKind) { + return relationshipTypes(spinnakerKind) + .flatMap(t -> getRelationshipKeys(cacheData, t)) + .collect(toImmutableSet()); + } + + /** Gets the keys for all relationships of a given type for a collection of CacheData items. */ + private ImmutableMultimap getRelationshipKeys( + Collection cacheData, String type) { + return cacheData.stream() + .collect( + flatteningToImmutableSetMultimap( + CacheData::getId, cd -> getRelationshipKeys(cd, type))); + } + + /** Gets the data for all relationships of a given type for a CacheData item. */ + Collection getRelationships(CacheData cacheData, String relationshipType) { + return getRelationships( + cacheData, relationshipType, getRelationshipKeys(cacheData, relationshipType)); + } + + /** + * Gets the data for all relationships of a given type for a CacheData item and all its + * relationship keys + */ + Collection getRelationships( + CacheData cacheData, String relationshipType, Stream relationshipKeys) { + return cache.getAll(relationshipType, relationshipKeys.collect(toImmutableSet())); + } + + /** Gets the data for all relationships of a given Spinnaker kind for a single CacheData item. */ + ImmutableCollection getRelationships( + CacheData cacheData, SpinnakerKind spinnakerKind) { + return getRelationships(ImmutableList.of(cacheData), spinnakerKind).get(cacheData.getId()); + } + + /** Gets the data for all relationships for a single CacheData item. */ + ImmutableCollection getAllRelationships(CacheData cacheData) { + ImmutableList.Builder result = ImmutableList.builder(); + cacheData + .getRelationships() + .forEach( + (kind, relationships) -> + result.addAll(getRelationships(cacheData, kind, relationships.stream()))); + return result.build(); + } + + /** + * Gets the data for all relationships of a given Spinnaker kind for a collection of CacheData + * items. + */ + ImmutableMultimap getRelationships( + Collection cacheData, SpinnakerKind spinnakerKind) { + ImmutableListMultimap.Builder result = ImmutableListMultimap.builder(); + relationshipTypes(spinnakerKind) + .forEach(type -> result.putAll(getRelationships(cacheData, type))); + return result.build(); + } + + /** Gets the data for all relationships of a given type for a collection of CacheData items. */ + private Multimap getRelationships( + Collection cacheData, String type) { + ImmutableMultimap relKeys = getRelationshipKeys(cacheData, type); + + // Prefetch the cache data for all relationships. This is to avoid making a separate call + // the cache for each of the source items. + // Note that relKeys.values() is not deduplicated; we'll defer to the cache implementation + // to decide whether it's worth deduplicating before fetching data. In the event that we + // do get back duplicates, we'll just keep the first for each key. + ImmutableMap relData = + cache.getAll(type, relKeys.values()).stream() + .collect(toImmutableMap(CacheData::getId, cd -> cd, (cd1, cd2) -> cd1)); + + // Note that the filterValues here is important to handle race conditions where a relationship + // is deleted by the time we look it up; in that case, relData might not contain the data for + // a requested key. + return Multimaps.filterValues( + Multimaps.transformValues(relKeys, relData::get), Objects::nonNull); + } + + /** Returns a stream of all relationships of a given type for a given CacheData. */ + private Stream getRelationshipKeys(CacheData cacheData, String type) { + Collection relationships = cacheData.getRelationships().get(type); + // Avoiding creating an Optional here as this is deeply nested in performance-sensitive code. + if (relationships == null) { + return Stream.empty(); + } + return relationships.stream(); + } + + /** Given a spinnaker kind, returns a stream of the relationship types representing that kind. */ + private Stream relationshipTypes(SpinnakerKind spinnakerKind) { + return kindMap.translateSpinnakerKind(spinnakerKind).stream().map(KubernetesKind::toString); + } + + /** + * Given a collection of Spinnaker kinds, return a cache filter restricting relationships to those + * kinds. + */ + RelationshipCacheFilter getCacheFilter(Collection spinnakerKinds) { + return RelationshipCacheFilter.include( + spinnakerKinds.stream().flatMap(this::relationshipTypes).toArray(String[]::new)); + } + + /** + * Returns a Predicate that returns true the first time it sees a CacheData with a given id, and + * false all subsequent times. + */ + Predicate distinctById() { + Set seen = new HashSet<>(); + return cd -> seen.add(cd.getId()); + } + + KubernetesHandler getHandler(KubernetesCacheData cacheData) { + Keys.InfrastructureCacheKey key = + (Keys.InfrastructureCacheKey) Keys.parseKey(cacheData.primaryData().getId()).get(); + // TODO(ezimanyi): The kind is also stored directly on the cache data; get it from there instead + // of reading it from the manifest. + KubernetesKind kind = + KubernetesCacheDataConverter.getManifest(cacheData.primaryData()).getKind(); + return resourcePropertyResolver + .getResourcePropertyRegistry(key.getAccount()) + .get(kind) + .getHandler(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesClusterProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesClusterProvider.java new file mode 100644 index 00000000000..9439da0e05d --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesClusterProvider.java @@ -0,0 +1,221 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKind.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKind.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.INSTANCES; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.SERVER_GROUPS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.SERVER_GROUP_MANAGERS; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toSet; + +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableMultimap; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesCluster; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesLoadBalancer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesServerGroup; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesServerGroupCacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.ServerGroupHandler; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesClusterProvider implements ClusterProvider { + private final KubernetesCacheUtils cacheUtils; + + @Autowired + KubernetesClusterProvider(KubernetesCacheUtils cacheUtils) { + this.cacheUtils = cacheUtils; + } + + @Override + public Map> getClusters() { + return groupByAccountName(loadClusters(cacheUtils.getAllKeys(CLUSTERS.toString()))); + } + + @Override + public Map> getClusterSummaries(String application) { + String applicationKey = Keys.ApplicationCacheKey.createKey(application); + return groupByAccountName( + loadClusterSummaries( + cacheUtils + .getSingleEntryWithRelationships( + APPLICATIONS.toString(), + applicationKey, + RelationshipCacheFilter.include(CLUSTERS.toString())) + .map(d -> cacheUtils.getRelationships(d, CLUSTERS.toString())) + .orElseGet(ImmutableList::of))); + } + + @Override + public Map> getClusterDetails(String application) { + String clusterGlobKey = Keys.ClusterCacheKey.createKey("*", application, "*"); + return groupByAccountName( + loadClusters(cacheUtils.getAllDataMatchingPattern(CLUSTERS.toString(), clusterGlobKey))); + } + + @Override + public Set getClusters(String application, String account) { + String globKey = Keys.ClusterCacheKey.createKey(account, application, "*"); + return loadClusters(cacheUtils.getAllDataMatchingPattern(CLUSTERS.toString(), globKey)); + } + + @Override + public KubernetesCluster getCluster(String application, String account, String name) { + return getCluster(application, account, name, true); + } + + @Override + public KubernetesCluster getCluster( + String application, String account, String name, boolean includeDetails) { + return cacheUtils + .getSingleEntry( + CLUSTERS.toString(), Keys.ClusterCacheKey.createKey(account, application, name)) + .map( + entry -> { + Collection clusterData = ImmutableList.of(entry); + Set result = + includeDetails ? loadClusters(clusterData) : loadClusterSummaries(clusterData); + return result.iterator().next(); + }) + .orElse(null); + } + + @Nullable + @Override + public KubernetesServerGroup getServerGroup( + String account, String namespace, String fullName, boolean includeDetails) { + return cacheUtils + .getSingleEntry(account, namespace, fullName) + .map( + serverGroupData -> + loadServerGroups(ImmutableList.of(serverGroupData)).get(serverGroupData.getId())) + .orElse(null); + } + + @Override + public KubernetesServerGroup getServerGroup(String account, String namespace, String name) { + return getServerGroup(account, namespace, name, true); + } + + @Override + public String getCloudProviderId() { + return KubernetesCloudProvider.ID; + } + + @Override + public boolean supportsMinimalClusters() { + return true; + } + + private Map> groupByAccountName( + Collection clusters) { + return clusters.stream().collect(groupingBy(KubernetesCluster::getAccountName, toSet())); + } + + private Set loadClusterSummaries(Collection clusterData) { + return clusterData.stream() + .map(clusterDatum -> new KubernetesCluster(clusterDatum.getId())) + .collect(toSet()); + } + + private Set loadClusters(Collection clusterData) { + ImmutableMultimap clusterToServerGroups = + cacheUtils.getRelationships(clusterData, SERVER_GROUPS); + + return clusterData.stream() + .map( + clusterDatum -> { + ImmutableCollection clusterServerGroups = + clusterToServerGroups.get(clusterDatum.getId()); + ImmutableMap serverGroups = + loadServerGroups(clusterServerGroups); + List loadBalancers = + cacheUtils.getRelationships(clusterServerGroups, LOAD_BALANCERS).values().stream() + .filter(cacheUtils.distinctById()) + .map( + cd -> + KubernetesLoadBalancer.fromCacheData( + cd, + cacheUtils.getRelationshipKeys(cd, SERVER_GROUPS).stream() + .map(serverGroups::get) + .filter(Objects::nonNull) + .map(KubernetesServerGroup::toLoadBalancerServerGroup) + .collect(toImmutableSet()))) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + + return new KubernetesCluster( + clusterDatum.getId(), serverGroups.values(), loadBalancers); + }) + .collect(toSet()); + } + + private ImmutableMap loadServerGroups( + ImmutableCollection serverGroupData) { + ImmutableMultimap serverGroupToInstances = + cacheUtils.getRelationships(serverGroupData, INSTANCES); + return serverGroupData.stream() + .collect( + toImmutableMap( + CacheData::getId, + cd -> + serverGroupFromCacheData( + KubernetesServerGroupCacheData.builder() + .serverGroupData(cd) + .instanceData(serverGroupToInstances.get(cd.getId())) + .loadBalancerKeys(cacheUtils.getRelationshipKeys(cd, LOAD_BALANCERS)) + .serverGroupManagerKeys( + cacheUtils.getRelationshipKeys(cd, SERVER_GROUP_MANAGERS)) + .build()), + (sg1, sg2) -> sg1)); + } + + private final ServerGroupHandler DEFAULT_SERVER_GROUP_HANDLER = new ServerGroupHandler() {}; + + @Nonnull + private KubernetesServerGroup serverGroupFromCacheData( + @Nonnull KubernetesServerGroupCacheData cacheData) { + KubernetesHandler handler = cacheUtils.getHandler(cacheData); + ServerGroupHandler serverGroupHandler = + handler instanceof ServerGroupHandler + ? (ServerGroupHandler) handler + : DEFAULT_SERVER_GROUP_HANDLER; + return serverGroupHandler.fromCacheData(cacheData); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesInstanceProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesInstanceProvider.java new file mode 100644 index 00000000000..f5d18c9e9af --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesInstanceProvider.java @@ -0,0 +1,127 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesInstance; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.model.ContainerLog; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.model.InstanceProvider; +import io.kubernetes.client.openapi.models.V1Container; +import io.kubernetes.client.openapi.models.V1ObjectMeta; +import io.kubernetes.client.openapi.models.V1Pod; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.annotation.Nonnull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesInstanceProvider + implements InstanceProvider> { + private static final Logger log = LoggerFactory.getLogger(KubernetesInstanceProvider.class); + private final KubernetesCacheUtils cacheUtils; + private final KubernetesAccountResolver accountResolver; + + @Autowired + KubernetesInstanceProvider( + KubernetesCacheUtils cacheUtils, KubernetesAccountResolver accountResolver) { + this.cacheUtils = cacheUtils; + this.accountResolver = accountResolver; + } + + @Override + public String getCloudProvider() { + return KubernetesCloudProvider.ID; + } + + @Override + public KubernetesInstance getInstance(String account, String namespace, String fullName) { + return cacheUtils + .getSingleEntry(account, namespace, fullName) + .map(KubernetesInstance::fromCacheData) + .orElse(null); + } + + @Override + public List getConsoleOutput(String account, String namespace, String fullName) { + Optional optionalCredentials = accountResolver.getCredentials(account); + if (!optionalCredentials.isPresent()) { + log.warn("Failure getting account {}", account); + return null; + } + + KubernetesCredentials credentials = optionalCredentials.get(); + KubernetesCoordinates coords; + try { + coords = + KubernetesCoordinates.builder().namespace(namespace).fullResourceName(fullName).build(); + } catch (IllegalArgumentException e) { + return null; + } + + V1Pod pod = KubernetesCacheDataConverter.getResource(credentials.get(coords), V1Pod.class); + + // Short-circuit if pod cannot be found + if (pod == null) { + return ImmutableList.of( + new ContainerLog("Error", "Failed to retrieve pod data; pod may have been deleted.")); + } + + return getPodLogs(credentials, pod); + } + + @Nonnull + private List getPodLogs( + @Nonnull KubernetesCredentials credentials, @Nonnull V1Pod pod) { + List initContainers = + Optional.ofNullable(pod.getSpec().getInitContainers()).orElse(ImmutableList.of()); + List containers = pod.getSpec().getContainers(); + + return Stream.concat(initContainers.stream(), containers.stream()) + .map(container -> getContainerLog(credentials, pod, container)) + .collect(Collectors.toList()); + } + + @Nonnull + private ContainerLog getContainerLog( + @Nonnull KubernetesCredentials credentials, + @Nonnull V1Pod pod, + @Nonnull V1Container container) { + String containerName = container.getName(); + V1ObjectMeta metadata = pod.getMetadata(); + + try { + // Make live calls rather than abuse the cache for storing all logs + String containerLogs = + credentials.logs(metadata.getNamespace(), metadata.getName(), containerName); + return new ContainerLog(containerName, containerLogs); + } catch (KubectlJobExecutor.KubectlException e) { + // Typically happens if the container/pod isn't running yet + return new ContainerLog(containerName, e.getMessage()); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesLoadBalancerProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesLoadBalancerProvider.java new file mode 100644 index 00000000000..514750ab5a5 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesLoadBalancerProvider.java @@ -0,0 +1,119 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKind.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.INSTANCES; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.SERVER_GROUPS; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMultimap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.ApplicationCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesLoadBalancer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesServerGroup; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesServerGroupCacheData; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.commons.lang3.NotImplementedException; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesLoadBalancerProvider + implements LoadBalancerProvider { + private final KubernetesCacheUtils cacheUtils; + + @Autowired + KubernetesLoadBalancerProvider(KubernetesCacheUtils cacheUtils) { + this.cacheUtils = cacheUtils; + } + + @Override + public String getCloudProvider() { + return KubernetesCloudProvider.ID; + } + + @Override + public List list() { + return new ArrayList<>(); + } + + @Override + public LoadBalancerProvider.Item get(String name) { + throw new NotImplementedException("Not a valid operation"); + } + + @Override + public List byAccountAndRegionAndName( + String account, String namespace, String fullName) { + return cacheUtils + .getSingleEntry(account, namespace, fullName) + .map(loadBalancerData -> fromLoadBalancerCacheData(ImmutableList.of(loadBalancerData))) + .map(ImmutableList::copyOf) + .orElse(null); + } + + @Override + public Set getApplicationLoadBalancers(String application) { + return cacheUtils + .getSingleEntry(APPLICATIONS.toString(), ApplicationCacheKey.createKey(application)) + .map( + applicationData -> + fromLoadBalancerCacheData( + cacheUtils.getRelationships(applicationData, LOAD_BALANCERS))) + .orElseGet(ImmutableSet::of); + } + + private Set fromLoadBalancerCacheData( + Collection loadBalancerData) { + ImmutableMultimap loadBalancerToServerGroups = + cacheUtils.getRelationships(loadBalancerData, SERVER_GROUPS); + ImmutableMultimap serverGroupToInstances = + cacheUtils.getRelationships(loadBalancerToServerGroups.values(), INSTANCES); + + return loadBalancerData.stream() + .map( + lb -> + KubernetesLoadBalancer.fromCacheData( + lb, + loadBalancerToServerGroups.get(lb.getId()).stream() + .map( + sg -> + KubernetesServerGroup.fromCacheData( + KubernetesServerGroupCacheData.builder() + .serverGroupData(sg) + .instanceData(serverGroupToInstances.get(sg.getId())) + .loadBalancerKeys(ImmutableList.of(lb.getId())) + .build())) + .filter(Objects::nonNull) + .map(KubernetesServerGroup::toLoadBalancerServerGroup) + .collect(toImmutableSet()))) + .filter(Objects::nonNull) + .collect(Collectors.toSet()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesManifestContainerBuilder.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesManifestContainerBuilder.java new file mode 100644 index 00000000000..d7eabe8ab12 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesManifestContainerBuilder.java @@ -0,0 +1,74 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static java.util.Comparator.*; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesManifestContainer; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesResourceProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Comparator; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +@NonnullByDefault +final class KubernetesManifestContainerBuilder { + static KubernetesManifestContainer buildManifest( + KubernetesCredentials credentials, + KubernetesManifest manifest, + List events, + List metrics) { + String namespace = manifest.getNamespace(); + KubernetesKind kind = manifest.getKind(); + + KubernetesResourceProperties properties = credentials.getResourcePropertyRegistry().get(kind); + + Function lastEventTimestamp = + (m) -> (String) m.getOrDefault("lastTimestamp", m.getOrDefault("firstTimestamp", "n/a")); + + Comparator eventComparator = + nullsLast(comparing(lastEventTimestamp, nullsLast(naturalOrder()))); + + events = events.stream().sorted(eventComparator).collect(Collectors.toList()); + + Moniker moniker = KubernetesManifestAnnotater.getMoniker(manifest); + + KubernetesHandler handler = properties.getHandler(); + + return KubernetesManifestContainer.builder() + .account(credentials.getAccountName()) + .name(manifest.getFullResourceName()) + .location(namespace) + .manifest(manifest) + .moniker(moniker) + .status(handler.status(manifest)) + .artifacts(handler.listArtifacts(manifest)) + .events(events) + .warnings(handler.listWarnings(manifest)) + .metrics(metrics) + .build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesManifestProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesManifestProvider.java new file mode 100644 index 00000000000..cc3d92427a2 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesManifestProvider.java @@ -0,0 +1,164 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.google.common.collect.ImmutableList.toImmutableList; + +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesManifestContainer; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric.ContainerMetric; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesManifestProvider { + private static final Logger log = LoggerFactory.getLogger(KubernetesManifestProvider.class); + private final KubernetesAccountResolver accountResolver; + private final ExecutorService executorService = + Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setNameFormat(getClass().getSimpleName() + "-%d").build()); + + @Autowired + public KubernetesManifestProvider(KubernetesAccountResolver accountResolver) { + this.accountResolver = accountResolver; + } + + @Nullable + public KubernetesManifestContainer getManifest( + String account, String location, String name, boolean includeEvents) { + Optional optionalCredentials = accountResolver.getCredentials(account); + if (!optionalCredentials.isPresent()) { + return null; + } + KubernetesCredentials credentials = optionalCredentials.get(); + + KubernetesCoordinates coords; + try { + coords = KubernetesCoordinates.builder().namespace(location).fullResourceName(name).build(); + } catch (IllegalArgumentException e) { + return null; + } + + Future> events = + includeEvents + ? executorService.submit(() -> credentials.eventsFor(coords)) + : Futures.immediateFuture(ImmutableList.of()); + + Future> metrics = + includeEvents + && coords.getKind().equals(KubernetesKind.POD) + && credentials.isMetricsEnabled() + ? executorService.submit(() -> getPodMetrics(credentials, coords)) + : Futures.immediateFuture(ImmutableList.of()); + + KubernetesManifest manifest = credentials.get(coords); + if (manifest == null) { + events.cancel(true); + metrics.cancel(true); + return null; + } + + try { + return KubernetesManifestContainerBuilder.buildManifest( + credentials, manifest, events.get(), metrics.get()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + events.cancel(true); + metrics.cancel(true); + log.warn("Interrupted while fetching manifest: {}", coords); + return null; + } catch (ExecutionException e) { + throw new RuntimeException(e.getCause()); + } + } + + private ImmutableList getPodMetrics( + KubernetesCredentials credentials, KubernetesCoordinates coords) { + return credentials.topPod(coords).stream() + .map(KubernetesPodMetric::getContainerMetrics) + .flatMap(Collection::stream) + .collect(toImmutableList()); + } + + public List getClusterAndSortAscending( + String account, String location, String kind, String cluster, String app, Sort sort) { + KubernetesKind kubernetesKind = KubernetesKind.fromString(kind); + return accountResolver + .getCredentials(account) + .map( + credentials -> + credentials.list(kubernetesKind, location).stream() + .filter( + m -> + cluster.equals(KubernetesManifestAnnotater.getManifestCluster(m)) + && app.equals( + KubernetesManifestAnnotater.getManifestApplication(m))) + .sorted( + (m1, m2) -> + credentials + .getResourcePropertyRegistry() + .get(kubernetesKind) + .getHandler() + .comparatorFor(sort) + .compare(m1, m2)) + .collect(Collectors.toList())) + .orElseThrow(() -> new IllegalArgumentException("Unable to resolve account: " + account)); + } + + public List getClusterManifestCoordinates( + String account, String location, String kind, String app, String cluster) { + KubernetesKind kubernetesKind = KubernetesKind.fromString(kind); + return accountResolver + .getCredentials(account) + .map( + credentials -> + credentials.list(kubernetesKind, location).stream() + .filter( + m -> + cluster.equals(KubernetesManifestAnnotater.getManifestCluster(m)) + && app.equals( + KubernetesManifestAnnotater.getManifestApplication(m))) + .map(KubernetesCoordinates::fromManifest) + .collect(Collectors.toList())) + .orElseThrow(() -> new IllegalArgumentException("Unable to resolve account: " + account)); + } + + public enum Sort { + AGE, + SIZE + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesRawResourceProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesRawResourceProvider.java new file mode 100644 index 00000000000..65c63090b5a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesRawResourceProvider.java @@ -0,0 +1,122 @@ +/* + * Copyright 2020 Coveo, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKind.APPLICATIONS; + +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.ApplicationCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesRawResource; +import com.netflix.spinnaker.clouddriver.kubernetes.config.RawResourcesEndpointConfig; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesRawResourceProvider { + private final KubernetesCacheUtils cacheUtils; + private final KubernetesAccountResolver accountResolver; + + private static final Logger log = LoggerFactory.getLogger(KubernetesRawResourceProvider.class); + + @Autowired + KubernetesRawResourceProvider( + KubernetesCacheUtils cacheUtils, KubernetesAccountResolver accountResolver) { + this.cacheUtils = cacheUtils; + this.accountResolver = accountResolver; + } + + public Set getApplicationRawResources(String application) { + return cacheUtils + .getSingleEntry(APPLICATIONS.toString(), ApplicationCacheKey.createKey(application)) + .map( + applicationData -> + fromRawResourceCacheData(cacheUtils.getAllRelationships(applicationData))) + .orElseGet(ImmutableSet::of); + } + + private Set fromRawResourceCacheData( + Collection rawResourceData) { + return rawResourceData.stream() + .map(KubernetesRawResource::fromCacheData) + .filter(Objects::nonNull) + .filter(resource -> includeInResponse(resource)) + .collect(Collectors.toSet()); + } + + private boolean includeInResponse(KubernetesRawResource resource) { + Optional optionalCredentials = + this.accountResolver.getCredentials(resource.getAccount()); + + if (!optionalCredentials.isPresent()) { + log.warn("Account {} has no credentials", resource.getAccount()); + return false; + } + + KubernetesCredentials credentials = optionalCredentials.get(); + ImmutableSet omitKinds = credentials.getOmitKinds(); + ImmutableSet kinds = credentials.getKinds(); + RawResourcesEndpointConfig epConfig = credentials.getRawResourcesEndpointConfig(); + List kindPatterns = epConfig.getKindPatterns(); + List omitKindPatterns = epConfig.getOmitKindPatterns(); + + log.debug( + "Kinds: {} OmitKinds: {} KindPatterns: {} OmitKindPatterns: {}", + kinds.size(), + omitKinds.size(), + kindPatterns.size(), + omitKindPatterns.size()); + + // check account level kinds and omitKinds first + if (!kinds.isEmpty() && !kinds.contains(resource.getKind())) { + return false; + } + if (omitKinds.contains(resource.getKind())) { + return false; + } + + // check kindPatterns + for (Pattern p : kindPatterns) { + Matcher m = p.matcher(resource.getKind().toString()); + if (m.matches()) { + return true; + } + } + // check omitKindPatterns + for (Pattern p : omitKindPatterns) { + Matcher m = p.matcher(resource.getKind().toString()); + if (m.matches()) { + return false; + } + } + // It didn't match any filters, default to include + return true; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSearchProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSearchProvider.java new file mode 100644 index 00000000000..9180f86a870 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSearchProvider.java @@ -0,0 +1,262 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesResourceProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.search.SearchProvider; +import com.netflix.spinnaker.clouddriver.search.SearchResultSet; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +@Component +@ConditionalOnProperty(value = "kubernetes.search.enabled", matchIfMissing = true) +public class KubernetesSearchProvider implements SearchProvider { + private static final Logger log = LoggerFactory.getLogger(KubernetesSearchProvider.class); + private final KubernetesCacheUtils cacheUtils; + private final ObjectMapper mapper; + private final KubernetesSpinnakerKindMap kindMap; + private final KubernetesAccountResolver resourcePropertyResolver; + private final List defaultTypes; + private final Set logicalTypes; + private final Set allCaches; + + @Autowired + public KubernetesSearchProvider( + KubernetesCacheUtils cacheUtils, + KubernetesSpinnakerKindMap kindMap, + ObjectMapper objectMapper, + KubernetesAccountResolver resourcePropertyResolver) { + this.cacheUtils = cacheUtils; + this.mapper = objectMapper; + this.kindMap = kindMap; + this.resourcePropertyResolver = resourcePropertyResolver; + + this.defaultTypes = + kindMap.allKubernetesKinds().stream() + .map(KubernetesKind::toString) + .collect(Collectors.toList()); + this.logicalTypes = + Arrays.stream(LogicalKind.values()).map(LogicalKind::toString).collect(Collectors.toSet()); + + this.allCaches = new HashSet<>(defaultTypes); + this.allCaches.addAll(logicalTypes); + } + + @Override + public String getPlatform() { + return KubernetesCloudProvider.ID; + } + + @Override + public SearchResultSet search(String query, Integer pageNumber, Integer pageSize) { + return search(query, defaultTypes, pageNumber, pageSize); + } + + @Override + public SearchResultSet search( + String query, Integer pageNumber, Integer pageSize, Map filters) { + return search(query, defaultTypes, pageNumber, pageSize, filters); + } + + @Override + public SearchResultSet search( + String query, List types, Integer pageNumber, Integer pageSize) { + return search(query, types, pageNumber, pageSize, ImmutableMap.of()); + } + + // TODO: Use filters + @Override + public SearchResultSet search( + String query, + List types, + Integer pageNumber, + Integer pageSize, + Map filters) { + log.info("Querying {} for term {}", types, query); + List> results = + paginateResults(getMatches(query, types), pageSize, pageNumber); + + return SearchResultSet.builder() + .pageNumber(pageNumber) + .pageSize(pageSize) + .platform(getPlatform()) + .query(query) + .totalMatches(results.size()) + .results(results) + .build(); + } + + private Map convertKeyToMap(String key) { + Optional optional = Keys.parseKey(key); + if (!optional.isPresent()) { + return null; + } + + Keys.CacheKey parsedKey = optional.get(); + Map result; + String type; + + if (parsedKey instanceof Keys.InfrastructureCacheKey) { + Keys.InfrastructureCacheKey infraKey = (Keys.InfrastructureCacheKey) parsedKey; + type = kindMap.translateKubernetesKind(infraKey.getKubernetesKind()).toString(); + + KubernetesResourceProperties properties = + resourcePropertyResolver + .getResourcePropertyRegistry(infraKey.getAccount()) + .get(infraKey.getKubernetesKind()); + + result = properties.getHandler().hydrateSearchResult(infraKey); + } else if (parsedKey instanceof Keys.LogicalKey) { + Keys.LogicalKey logicalKey = (Keys.LogicalKey) parsedKey; + + result = mapper.convertValue(logicalKey, new TypeReference>() {}); + result.put(logicalKey.getLogicalKind().singular(), logicalKey.getName()); + type = logicalKey.getGroup(); + } else { + log.warn("Unknown key type " + parsedKey + ", ignoring."); + return null; + } + + result.put("type", type); + return result; + } + + private static Stream getMatchingRelationships( + CacheData cacheData, Set typesToSearch) { + Keys.CacheKey cacheKey = Keys.parseKey(cacheData.getId()).orElse(null); + if (!(cacheKey instanceof LogicalKey)) { + return Stream.empty(); + } + Map> relationships = cacheData.getRelationships(); + return typesToSearch.stream() + .map(relationships::get) + .filter(Objects::nonNull) + .flatMap(Collection::stream) + .filter(Objects::nonNull) + .map(k -> new KeyRelationship(k, (LogicalKey) cacheKey)); + } + + private Map> getKeysRelatedToLogicalMatches( + String matchQuery, Set typesToSearch) { + return logicalTypes.stream() + .map(type -> cacheUtils.getAllDataMatchingPattern(type, matchQuery)) + .flatMap(Collection::stream) + .flatMap(cd -> getMatchingRelationships(cd, typesToSearch)) + .collect( + Collectors.groupingBy( + KeyRelationship::getInfrastructureKey, + Collectors.mapping(KeyRelationship::getLogicalKey, Collectors.toList()))); + } + + @Getter + @RequiredArgsConstructor + private static class KeyRelationship { + private final String infrastructureKey; + private final Keys.LogicalKey logicalKey; + } + + private List> getMatches(String query, List types) { + String matchQuery = String.format("*%s*", query.toLowerCase()); + Set typesToSearch = new HashSet<>(types); + + // We add k8s versions of Spinnaker types here to ensure that (for example) replica sets are + // returned when server groups are requested. + typesToSearch.addAll( + types.stream() + .map( + t -> { + try { + return SpinnakerKind.fromString(t); + } catch (IllegalArgumentException e) { + return null; + } + }) + .filter(k -> k != null && k != SpinnakerKind.UNCLASSIFIED) + .map(kindMap::translateSpinnakerKind) + .flatMap(Collection::stream) + .map(KubernetesKind::toString) + .collect(Collectors.toSet())); + + // Remove caches that we can't search + typesToSearch.retainAll(allCaches); + + if (typesToSearch.isEmpty()) { + return ImmutableList.of(); + } + + // Search caches directly + Stream> directResults = + typesToSearch.stream() + .map(type -> cacheUtils.getAllKeysMatchingPattern(type, matchQuery)) + .flatMap(Collection::stream) + .map(this::convertKeyToMap); + + // Search 'logical' caches (clusters, apps) for indirect matches + Stream> relatedResults = + getKeysRelatedToLogicalMatches(matchQuery, typesToSearch).entrySet().stream() + .map( + kv -> { + Map result = convertKeyToMap(kv.getKey()); + if (result != null) { + kv.getValue() + .forEach(k -> result.put(k.getLogicalKind().singular(), k.getName())); + } + return result; + }); + + return Stream.concat(directResults, relatedResults) + .filter(Objects::nonNull) + .filter(result -> typesToSearch.contains(result.get("group"))) + .collect(Collectors.toList()); + } + + private static List paginateResults( + List matches, Integer pageSize, Integer pageNumber) { + Integer startingIndex = pageSize * (pageNumber - 1); + Integer endIndex = Math.min(pageSize * pageNumber, matches.size()); + return startingIndex < endIndex ? matches.subList(startingIndex, endIndex) : new ArrayList<>(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSecurityGroupProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSecurityGroupProvider.java new file mode 100644 index 00000000000..db42a10f601 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSecurityGroupProvider.java @@ -0,0 +1,150 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesSecurityGroup; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider; +import java.util.Collection; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesSecurityGroupProvider + implements SecurityGroupProvider { + private final KubernetesCacheUtils cacheUtils; + private final KubernetesSpinnakerKindMap kindMap; + + @Autowired + KubernetesSecurityGroupProvider( + KubernetesCacheUtils cacheUtils, KubernetesSpinnakerKindMap kindMap) { + this.cacheUtils = cacheUtils; + this.kindMap = kindMap; + } + + @Override + public String getCloudProvider() { + return KubernetesCloudProvider.ID; + } + + @Override + public Set getAll(boolean includeRules) { + return kindMap.translateSpinnakerKind(SpinnakerKind.SECURITY_GROUPS).stream() + .map(KubernetesKind::toString) + .map(cacheUtils::getAllKeys) + .flatMap(Collection::stream) + .map(KubernetesSecurityGroup::fromCacheData) + .collect(Collectors.toSet()); + } + + @Override + public Set getAllByRegion(boolean includeRules, String namespace) { + return kindMap.translateSpinnakerKind(SpinnakerKind.SECURITY_GROUPS).stream() + .map( + k -> { + String key = Keys.InfrastructureCacheKey.createKey(k, "*", namespace, "*"); + return cacheUtils.getAllDataMatchingPattern(k.toString(), key); + }) + .flatMap(Collection::stream) + .map(KubernetesSecurityGroup::fromCacheData) + .collect(Collectors.toSet()); + } + + @Override + public Set getAllByAccount(boolean includeRules, String account) { + return kindMap.translateSpinnakerKind(SpinnakerKind.SECURITY_GROUPS).stream() + .map( + k -> { + String key = Keys.InfrastructureCacheKey.createKey(k, account, "*", "*"); + return cacheUtils.getAllDataMatchingPattern(k.toString(), key); + }) + .flatMap(Collection::stream) + .map(KubernetesSecurityGroup::fromCacheData) + .collect(Collectors.toSet()); + } + + @Override + public Set getAllByAccountAndName( + boolean includeRules, String account, String fullName) { + String name; + try { + name = KubernetesCoordinates.builder().fullResourceName(fullName).build().getName(); + } catch (IllegalArgumentException e) { + return null; + } + + return kindMap.translateSpinnakerKind(SpinnakerKind.SECURITY_GROUPS).stream() + .map( + k -> { + String key = Keys.InfrastructureCacheKey.createKey(k, account, "*", name); + return cacheUtils.getAllDataMatchingPattern(k.toString(), key); + }) + .flatMap(Collection::stream) + .map(KubernetesSecurityGroup::fromCacheData) + .collect(Collectors.toSet()); + } + + @Override + public Set getAllByAccountAndRegion( + boolean includeRule, String account, String namespace) { + return kindMap.translateSpinnakerKind(SpinnakerKind.SECURITY_GROUPS).stream() + .map( + k -> { + String key = Keys.InfrastructureCacheKey.createKey(k, account, namespace, "*"); + return cacheUtils.getAllDataMatchingPattern(k.toString(), key); + }) + .flatMap(Collection::stream) + .map(KubernetesSecurityGroup::fromCacheData) + .collect(Collectors.toSet()); + } + + @Override + public KubernetesSecurityGroup get( + String account, String namespace, String fullName, String _unused) { + String name; + try { + name = KubernetesCoordinates.builder().fullResourceName(fullName).build().getName(); + } catch (IllegalArgumentException e) { + return null; + } + + return kindMap.translateSpinnakerKind(SpinnakerKind.SECURITY_GROUPS).stream() + .map( + k -> { + String key = Keys.InfrastructureCacheKey.createKey(k, account, namespace, name); + return cacheUtils.getSingleEntry(k.toString(), key).orElse(null); + }) + .filter(Objects::nonNull) + .map(KubernetesSecurityGroup::fromCacheData) + .findFirst() + .orElse(null); + } + + @Override + public KubernetesSecurityGroup getById(String account, String region, String id, String vpcId) { + throw new UnsupportedOperationException("Not currently implemented."); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesServerGroupManagerProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesServerGroupManagerProvider.java new file mode 100644 index 00000000000..4a15866f522 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesServerGroupManagerProvider.java @@ -0,0 +1,90 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKind.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.SERVER_GROUPS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind.SERVER_GROUP_MANAGERS; + +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableMultimap; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesServerGroupManager; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesServerGroupManagerCacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.ServerGroupManagerHandler; +import com.netflix.spinnaker.clouddriver.model.ServerGroupManagerProvider; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesServerGroupManagerProvider + implements ServerGroupManagerProvider { + private final KubernetesCacheUtils cacheUtils; + + @Autowired + public KubernetesServerGroupManagerProvider(KubernetesCacheUtils cacheUtils) { + this.cacheUtils = cacheUtils; + } + + @Override + public Set getServerGroupManagersByApplication(String application) { + CacheData applicationDatum = + cacheUtils + .getSingleEntry( + APPLICATIONS.toString(), Keys.ApplicationCacheKey.createKey(application)) + .orElse(null); + if (applicationDatum == null) { + return null; + } + + ImmutableCollection serverGroupManagerData = + cacheUtils.getRelationships(applicationDatum, SERVER_GROUP_MANAGERS); + + ImmutableMultimap managerToServerGroupMap = + cacheUtils.getRelationships(serverGroupManagerData, SERVER_GROUPS); + + return serverGroupManagerData.stream() + .map( + cd -> + serverGroupManagerFromCacheData( + KubernetesServerGroupManagerCacheData.builder() + .serverGroupManagerData(cd) + .serverGroupData(managerToServerGroupMap.get(cd.getId())) + .build())) + .collect(Collectors.toSet()); + } + + private final ServerGroupManagerHandler DEFAULT_SERVER_GROUP_MANAGER_HANDLER = + new ServerGroupManagerHandler() {}; + + @Nonnull + private KubernetesServerGroupManager serverGroupManagerFromCacheData( + @Nonnull KubernetesServerGroupManagerCacheData cacheData) { + KubernetesHandler handler = cacheUtils.getHandler(cacheData); + ServerGroupManagerHandler serverGroupManagerHandler = + handler instanceof ServerGroupManagerHandler + ? (ServerGroupManagerHandler) handler + : DEFAULT_SERVER_GROUP_MANAGER_HANDLER; + return serverGroupManagerHandler.fromCacheData(cacheData); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesCacheData.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesCacheData.java new file mode 100644 index 00000000000..8a2771bc5e0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesCacheData.java @@ -0,0 +1,24 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data; + +import com.netflix.spinnaker.cats.cache.CacheData; + +public interface KubernetesCacheData { + CacheData primaryData(); +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesServerGroupCacheData.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesServerGroupCacheData.java new file mode 100644 index 00000000000..ab9321d987b --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesServerGroupCacheData.java @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Collection; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; + +@NonnullByDefault +@Value +public class KubernetesServerGroupCacheData implements KubernetesCacheData { + private final CacheData serverGroupData; + private final Collection instanceData; + private final Collection loadBalancerKeys; + private final Collection serverGroupManagerKeys; + + @Builder + @ParametersAreNullableByDefault + private KubernetesServerGroupCacheData( + @Nonnull CacheData serverGroupData, + Collection instanceData, + Collection loadBalancerKeys, + Collection serverGroupManagerKeys) { + this.serverGroupData = Objects.requireNonNull(serverGroupData); + this.instanceData = Optional.ofNullable(instanceData).orElseGet(ImmutableList::of); + this.loadBalancerKeys = Optional.ofNullable(loadBalancerKeys).orElseGet(ImmutableList::of); + this.serverGroupManagerKeys = + Optional.ofNullable(serverGroupManagerKeys).orElseGet(ImmutableList::of); + } + + @Override + public CacheData primaryData() { + return serverGroupData; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesServerGroupManagerCacheData.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesServerGroupManagerCacheData.java new file mode 100644 index 00000000000..4947f87d90c --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/data/KubernetesServerGroupManagerCacheData.java @@ -0,0 +1,49 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Collection; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; + +@NonnullByDefault +@Value +public class KubernetesServerGroupManagerCacheData implements KubernetesCacheData { + private final CacheData serverGroupManagerData; + private final Collection serverGroupData; + + @Builder + @ParametersAreNullableByDefault + private KubernetesServerGroupManagerCacheData( + @Nonnull CacheData serverGroupManagerData, Collection serverGroupData) { + this.serverGroupManagerData = Objects.requireNonNull(serverGroupManagerData); + this.serverGroupData = Optional.ofNullable(serverGroupData).orElseGet(ImmutableList::of); + } + + @Override + public CacheData primaryData() { + return serverGroupManagerData; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/CustomKubernetesResource.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/CustomKubernetesResource.java new file mode 100644 index 00000000000..56e2ddcf850 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/CustomKubernetesResource.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.kubernetes.config; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import lombok.Data; + +@Data +public class CustomKubernetesResource { + private String kubernetesKind; + private String spinnakerKind = SpinnakerKind.UNCLASSIFIED.toString(); + private String deployPriority = "100"; + private boolean versioned = false; + private boolean namespaced = true; +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesAccountProperties.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesAccountProperties.java new file mode 100644 index 00000000000..3262ca70e55 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesAccountProperties.java @@ -0,0 +1,105 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.config; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.security.AccessControlledAccountDefinition; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; + +/** + * Previously, accounts were stored in the {@link KubernetesConfigurationProperties} class. If there + * are loads of accounts defined in a configuration properties file, then letting Spring boot read + * and bind them is a fairly time-consuming process. For 1500 accounts, we observed that it took + * >10m to load them. + * + *

To speed this up, a feature-flagged change was introduced (see: + * https://github.com/spinnaker/clouddriver/pull/5125) to let us do a manual binding of the + * properties directly, instead of letting spring boot do it. This results in the load times + * dropping to ~1-2s. But the main drawback of this manual binding is the fact that we have to + * explicitly define all the properties that we need to bind. For example, if accounts are defined + * in one configuration file and the other properties are defined in a different file, those other + * properties will not be loaded unless they are defined in the same configuration file. Also, for + * that to work, we have to explicitly bind these properties to the target class. + * + *

By moving accounts out of the {@link KubernetesConfigurationProperties} class, we don't need + * to do any manual binding for those other properties. And we do the manual binding for accounts + * only, which makes it more maintainable. Plus, this leaves us with a {@link + * KubernetesConfigurationProperties} class which can cater to all the other configuration aspects + * related to Kubernetes. + */ +@Data +public class KubernetesAccountProperties { + private static final int DEFAULT_CACHE_THREADS = 1; + + @Data + @JsonTypeName("kubernetes") + public static class ManagedAccount implements AccessControlledAccountDefinition { + private String name; + private String environment; + private String accountType; + private String context; + private String oAuthServiceAccount; + private List oAuthScopes; + private String kubeconfigFile; + private String kubeconfigContents; + private String kubectlExecutable; + private Integer kubectlRequestTimeoutSeconds; + private boolean serviceAccount = false; + private List namespaces = new ArrayList<>(); + private List omitNamespaces = new ArrayList<>(); + private int cacheThreads = DEFAULT_CACHE_THREADS; + private List requiredGroupMembership = new ArrayList<>(); + private Permissions.Builder permissions = new Permissions.Builder(); + private String namingStrategy = "kubernetesAnnotations"; + private boolean debug = false; + private boolean metrics = true; + private boolean checkPermissionsOnStartup = true; + private List customResources = new ArrayList<>(); + private List cachingPolicies = new ArrayList<>(); + private List kinds = new ArrayList<>(); + private List omitKinds = new ArrayList<>(); + private boolean onlySpinnakerManaged = false; + private Long cacheIntervalSeconds; + private boolean cacheAllApplicationRelationships = false; + private RawResourcesEndpointConfig rawResourcesEndpointConfig = + new RawResourcesEndpointConfig(); + + public void validate() { + if (Strings.isNullOrEmpty(name)) { + throw new IllegalArgumentException("Account name for Kubernetes provider missing."); + } + + if (!omitNamespaces.isEmpty() && !namespaces.isEmpty()) { + throw new IllegalArgumentException( + "At most one of 'namespaces' and 'omitNamespaces' can be specified"); + } + + if (!omitKinds.isEmpty() && !kinds.isEmpty()) { + throw new IllegalArgumentException( + "At most one of 'kinds' and 'omitKinds' can be specified"); + } + rawResourcesEndpointConfig.validate(); + } + } + + private List accounts = new ArrayList<>(); +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesCachingPolicy.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesCachingPolicy.java new file mode 100644 index 00000000000..2b1fe511372 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesCachingPolicy.java @@ -0,0 +1,25 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.kubernetes.config; + +import lombok.Data; + +@Data +public class KubernetesCachingPolicy { + private String kubernetesKind; + private int maxEntriesPerAgent; +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesConfigurationProperties.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesConfigurationProperties.java new file mode 100644 index 00000000000..e8d88952aaf --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesConfigurationProperties.java @@ -0,0 +1,146 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.kubernetes.config; + +import java.util.List; +import lombok.Data; + +@Data +public class KubernetesConfigurationProperties { + private KubernetesJobExecutorProperties jobExecutor = new KubernetesJobExecutorProperties(); + + /** + * flag to toggle loading namespaces for a k8s account. By default, it is enabled, i.e., set to + * true. Disabling it is meant primarily for making clouddriver start up faster, since no calls + * are made to the k8s cluster to load these namespaces for accounts that are newly added + */ + private boolean loadNamespacesInAccount = true; + + /** flag to toggle account health check. Defaults to true. */ + private boolean verifyAccountHealth = true; + + private Cache cache = new Cache(); + + private KubectlProperties kubectl = new KubectlProperties(); + private OAuthProperties oAuth = new OAuthProperties(); + + public KubernetesConfigurationProperties kubernetesConfigurationProperties() { + return new KubernetesConfigurationProperties(); + } + + @Data + public static class KubernetesJobExecutorProperties { + private boolean persistTaskOutput = false; + private boolean enableTaskOutputForAllAccounts = false; + + private Retries retries = new Retries(); + + @Data + public static class Retries { + // flag to turn on/off kubectl retry on errors capability. + private boolean enabled = false; + + // total number of attempts that are made to complete a kubectl call + int maxAttempts = 3; + + // time in ms to wait before subsequent retry attempts + long backOffInMs = 5000; + + // list of error strings on which to retry since kubectl binary returns textual error messages + // back + List retryableErrorMessages = List.of("TLS handshake timeout"); + + // flag to enable exponential backoff - only applicable when enableRetries: true + boolean exponentialBackoffEnabled = false; + + // only applicable when exponentialBackoff = true + int exponentialBackoffMultiplier = 2; + + // only applicable when exponentialBackoff = true + long exponentialBackOffIntervalMs = 10000; + + private Metrics metrics = new Metrics(); + + @Data + public static class Metrics { + // flag to capture retry metrics. Turned off by default + private boolean enabled; + } + } + } + + @Data + public static class Cache { + + /** Whether caching is enabled in the kubernetes provider. */ + private boolean enabled = true; + + /** + * Whether to cache all kubernetes kinds or not. If this value is "true", the setting + * "cacheKinds" is ignored. + */ + private boolean cacheAll = false; + + /** + * Only cache the kubernetes kinds in this list. If not configured, only the kinds that show in + * Spinnaker's classic infrastructure screens are cached, which are the ones mapped to the + * following Spinnaker's kinds:
+ * - SERVER_GROUP_MANAGERS
+ * - SERVER_GROUPS
+ * - INSTANCES
+ * - LOAD_BALANCERS
+ * - SECURITY_GROUPS + * + *

Names are in {kind.group} format, where the group is optional for core kinds. Example: + *
+ * cacheKinds:
+ * - deployment.apps
+ * - replicaSet
+ * - pod
+ * - myCustomKind.my.group + * + *

If the setting {@link Cache#cacheAll} is true, this setting is ignored. + */ + private List cacheKinds = null; + + /** + * Do not cache the kinds in this list. The format of the list is the same as {@link + * Cache#cacheKinds} + */ + private List cacheOmitKinds = null; + + /** + * controls whether an application name obtained from a kubernetes manifest needs to be checked + * against front50. This can be needed in cases where we want front50 to be the definitive + * source of truth for applications. If you set this to true, please ensure that front50 is + * enabled. + */ + boolean checkApplicationInFront50 = false; + } + + /** kubectl configuration properties */ + @Data + public static class KubectlProperties { + private String executable = "kubectl"; + } + + /** oAuth configuration properties */ + @Data + public static class OAuthProperties { + private String executable = "oauth2l"; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesCustomAccountConfigurationProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesCustomAccountConfigurationProvider.java new file mode 100644 index 00000000000..044dd7bdcf3 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/KubernetesCustomAccountConfigurationProvider.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.config; + +import com.github.wnameless.json.unflattener.JsonUnflattener; +import com.netflix.spinnaker.clouddriver.config.AbstractBootstrapCredentialsConfigurationProvider; +import com.netflix.spinnaker.kork.configserver.CloudConfigResourceService; +import com.netflix.spinnaker.kork.secrets.SecretManager; +import java.util.List; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.context.properties.bind.BindResult; +import org.springframework.context.ConfigurableApplicationContext; + +/** + * If a configuration properties file has a large number of kubernetes accounts, as-is SpringBoot + * implementation of properties binding is inefficient. Hence, a custom logic for binding just the + * {@link KubernetesAccountProperties} is written but it still uses SpringBoot's Binder class. + * {@link KubernetesCustomAccountConfigurationProvider} class fetches the flattened kubernetes + * properties from Spring Cloud Config's BootstrapPropertySource and creates an {@link + * KubernetesAccountProperties} object. + */ +@Slf4j +public class KubernetesCustomAccountConfigurationProvider + extends AbstractBootstrapCredentialsConfigurationProvider { + private final String FIRST_ACCOUNT_NAME_KEY = "kubernetes.accounts[0].name"; + + public KubernetesCustomAccountConfigurationProvider( + ConfigurableApplicationContext applicationContext, + CloudConfigResourceService configResourceService, + SecretManager secretManager) { + super(applicationContext, configResourceService, secretManager); + } + + @Override + public KubernetesAccountProperties getConfigurationProperties() { + return getKubernetesAccounts(getPropertiesMap(FIRST_ACCOUNT_NAME_KEY)); + } + + @SuppressWarnings("unchecked") + private KubernetesAccountProperties getKubernetesAccounts( + Map kubernetesPropertiesMap) { + log.info("Started loading Kubernetes accounts"); + KubernetesAccountProperties accounts = new KubernetesAccountProperties(); + BindResult result; + + // unflatten + Map propertiesMap = + (Map) + JsonUnflattener.unflattenAsMap(kubernetesPropertiesMap).get("kubernetes"); + + // loop through each account and bind + for (Map unflattendAcc : + ((List>) propertiesMap.get("accounts"))) { + result = bind(getFlatMap(unflattendAcc), KubernetesAccountProperties.ManagedAccount.class); + accounts.getAccounts().add((KubernetesAccountProperties.ManagedAccount) result.get()); + } + log.info("Finished loading kubernetes accounts"); + return accounts; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/LinkedDockerRegistryConfiguration.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/LinkedDockerRegistryConfiguration.java new file mode 100644 index 00000000000..2fd876940ed --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/LinkedDockerRegistryConfiguration.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.config; + +import java.util.List; +import lombok.Data; + +@Data +public class LinkedDockerRegistryConfiguration { + private String accountName; + private List namespaces; +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/RawResourcesEndpointConfig.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/RawResourcesEndpointConfig.java new file mode 100644 index 00000000000..e480c5acd41 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/config/RawResourcesEndpointConfig.java @@ -0,0 +1,46 @@ +/* + * Copyright 2020 Coveo, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.kubernetes.config; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.regex.Pattern; +import lombok.Data; +import lombok.Getter; + +@Data +public class RawResourcesEndpointConfig { + private Set kindExpressions = new HashSet<>(); + private Set omitKindExpressions = new HashSet<>(); + @Getter private List kindPatterns = new ArrayList<>(); + @Getter private List omitKindPatterns = new ArrayList<>(); + + public void validate() { + if (!kindExpressions.isEmpty() && !omitKindExpressions.isEmpty()) { + throw new IllegalArgumentException( + "At most one of 'kindExpressions' and 'omitKindExpressions' can be specified"); + } + for (String exp : kindExpressions) { + kindPatterns.add(Pattern.compile(exp)); + } + for (String exp : omitKindExpressions) { + omitKindPatterns.add(Pattern.compile(exp)); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/ManifestController.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/ManifestController.java new file mode 100644 index 00000000000..29d3df69644 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/ManifestController.java @@ -0,0 +1,200 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.controllers; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.KubernetesManifestProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.KubernetesManifestProvider.Sort; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue; +import com.netflix.spinnaker.kork.exceptions.SpinnakerException; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.util.List; +import lombok.Getter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.security.access.prepost.PostAuthorize; +import org.springframework.security.access.prepost.PreAuthorize; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping("/manifests") +public class ManifestController { + private static final Logger log = LoggerFactory.getLogger(ManifestController.class); + private final KubernetesManifestProvider manifestProvider; + private final RequestQueue requestQueue; + + @Autowired + public ManifestController( + KubernetesManifestProvider manifestProvider, RequestQueue requestQueue) { + this.manifestProvider = manifestProvider; + this.requestQueue = requestQueue; + } + + @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") + @PostAuthorize("hasPermission(returnObject?.moniker?.app, 'APPLICATION', 'READ')") + @RequestMapping(value = "/{account:.+}/_/{name:.+}", method = RequestMethod.GET) + Manifest getForAccountAndName( + @PathVariable String account, + @PathVariable String name, + @RequestParam(value = "includeEvents", required = false, defaultValue = "true") + boolean includeEvents) { + return getForAccountLocationAndName(account, "", name, includeEvents); + } + + @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") + @PostAuthorize("hasPermission(returnObject?.moniker?.app, 'APPLICATION', 'READ')") + @RequestMapping(value = "/{account:.+}/{location:.+}/{name:.+}", method = RequestMethod.GET) + Manifest getForAccountLocationAndName( + @PathVariable String account, + @PathVariable String location, + @PathVariable String name, + @RequestParam(value = "includeEvents", required = false, defaultValue = "true") + boolean includeEvents) { + + Manifest manifest; + String request = + String.format("(account: %s, location: %s, name: %s)", account, location, name); + try { + manifest = + requestQueue.execute( + account, () -> manifestProvider.getManifest(account, location, name, includeEvents)); + } catch (Throwable t) { + String message = "Failed to read manifest: " + request; + log.warn(message, t); + throw new SpinnakerException(message, t); + } + + if (manifest == null) { + throw new NotFoundException("Manifest " + request + " not found"); + } + + // adding a check here so that it will make it easy to debug why the PostAuthorize() failed + if (manifest.getMoniker() == null || manifest.getMoniker().getApp() == null) { + log.error("could not derive a valid moniker with application for manifest: {}", request); + } + + return manifest; + } + + @RequestMapping(value = "/{account:.+}/{name:.+}", method = RequestMethod.GET) + Manifest getForAccountLocationAndName( + @PathVariable String account, + @PathVariable String name, + @RequestParam(value = "includeEvents", required = false, defaultValue = "true") + boolean includeEvents) { + return getForAccountLocationAndName(account, "", name, includeEvents); + } + + @RequestMapping( + value = + "/{account:.+}/{location:.+}/{kind:.+}/cluster/{app:.+}/{cluster:.+}/dynamic/{criteria:.+}", + method = RequestMethod.GET) + KubernetesCoordinates getDynamicManifestFromCluster( + @PathVariable String account, + @PathVariable String location, + @PathVariable String kind, + @PathVariable String app, + @PathVariable String cluster, + @PathVariable Criteria criteria) { + final String request = + String.format( + "(account: %s, location: %s, kind: %s, app %s, cluster: %s, criteria: %s)", + account, location, kind, app, cluster, criteria); + + List manifests; + try { + manifests = + requestQueue.execute( + account, + () -> + manifestProvider.getClusterAndSortAscending( + account, location, kind, cluster, app, criteria.getSort())); + } catch (Throwable t) { + log.warn("Failed to read {}", request, t); + return null; + } + + try { + switch (criteria) { + case oldest: + case smallest: + return KubernetesCoordinates.fromManifest(manifests.get(0)); + case newest: + case largest: + return KubernetesCoordinates.fromManifest(manifests.get(manifests.size() - 1)); + case second_newest: + return KubernetesCoordinates.fromManifest(manifests.get(manifests.size() - 2)); + default: + throw new IllegalArgumentException("Unknown criteria: " + criteria); + } + } catch (IndexOutOfBoundsException e) { + throw new NotFoundException("No manifests matching " + request + " found"); + } + } + + @RequestMapping( + value = "/{account:.+}/{location:.+}/{kind:.+}/cluster/{app:.+}/{cluster:.+}", + method = RequestMethod.GET) + List getClusterManifestCoordinates( + @PathVariable String account, + @PathVariable String location, + @PathVariable String kind, + @PathVariable String app, + @PathVariable String cluster) { + final String request = + String.format( + "(account: %s, location: %s, kind: %s, app %s, cluster: %s)", + account, location, kind, app, cluster); + + List coordinates; + try { + coordinates = + requestQueue.execute( + account, + () -> + manifestProvider.getClusterManifestCoordinates( + account, location, kind, app, cluster)); + } catch (Throwable t) { + log.warn("Failed to read {}", request, t); + return null; + } + + return coordinates; + } + + enum Criteria { + oldest(Sort.AGE), + newest(Sort.AGE), + second_newest(Sort.AGE), + largest(Sort.SIZE), + smallest(Sort.SIZE); + + @Getter private final Sort sort; + + Criteria(Sort sort) { + this.sort = sort; + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/PodController.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/PodController.java new file mode 100644 index 00000000000..0d487a1c39a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/PodController.java @@ -0,0 +1,69 @@ +/* + * Copyright 2022 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.controllers; + +import com.netflix.spinnaker.clouddriver.kubernetes.provider.view.KubernetesJobProvider; +import io.swagger.v3.oas.annotations.Operation; +import io.swagger.v3.oas.annotations.Parameter; +import java.util.Collections; +import java.util.Map; +import org.springframework.security.access.prepost.PreAuthorize; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping("/applications/{application}/kubernetes/pods") +public class PodController { + private final KubernetesJobProvider kubernetesJobProvider; + + public PodController(KubernetesJobProvider jobProvider) { + this.kubernetesJobProvider = jobProvider; + } + + @PreAuthorize( + "hasPermission(#application, 'APPLICATION', 'READ') and hasPermission(#account, 'ACCOUNT', 'READ')") + @Operation( + summary = "Collect a file from a pod", + description = "Collects the file result of a pod.") + @RequestMapping( + value = "/{account}/{namespace}/{podName}/{fileName:.+}", + method = RequestMethod.GET) + Map getFileContents( + @Parameter(description = "Application name", required = true) @PathVariable + String application, + @Parameter(description = "Account job was created by", required = true) @PathVariable + String account, + @Parameter(description = "Namespace in which the pod is running in", required = true) + @PathVariable + String namespace, + @Parameter(description = "Unique identifier of pod being looked up", required = true) + @PathVariable + String podName, + @Parameter(description = "File name to look up", required = true) @PathVariable + String fileName) { + Map results = + kubernetesJobProvider.getFileContentsFromPod(account, namespace, podName, fileName); + + if (results != null) { + return results; + } + + return Collections.emptyMap(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/RawResourceController.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/RawResourceController.java new file mode 100644 index 00000000000..232f4923336 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/controllers/RawResourceController.java @@ -0,0 +1,49 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.controllers; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesRawResource; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.KubernetesRawResourceProvider; +import java.util.ArrayList; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.security.access.prepost.PostAuthorize; +import org.springframework.security.access.prepost.PreAuthorize; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping(produces = MediaType.APPLICATION_JSON_VALUE) +class RawResourceController { + private final KubernetesRawResourceProvider rawResourceProvider; + + @Autowired + public RawResourceController(KubernetesRawResourceProvider rawResourceProvider) { + this.rawResourceProvider = rawResourceProvider; + } + + @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ')") + @PostAuthorize("@authorizationSupport.filterForAccounts(returnObject)") + @RequestMapping(value = "/applications/{application}/rawResources", method = RequestMethod.GET) + List list(@PathVariable String application) { + return new ArrayList<>(rawResourceProvider.getApplicationRawResources(application)); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/artifact/KubernetesCleanupArtifactsConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/artifact/KubernetesCleanupArtifactsConverter.java new file mode 100644 index 00000000000..aceb1d4ef8f --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/artifact/KubernetesCleanupArtifactsConverter.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.artifact; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.CLEANUP_ARTIFACTS; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.ArtifactProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.artifact.KubernetesCleanupArtifactsDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.artifact.KubernetesCleanupArtifactsOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(CLEANUP_ARTIFACTS) +@Component +public class KubernetesCleanupArtifactsConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Autowired ArtifactProvider artifactProvider; + + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesCleanupArtifactsOperation(convertDescription(input), artifactProvider); + } + + @Override + public KubernetesCleanupArtifactsDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesCleanupArtifactsDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/job/KubernetesRunJobOperationConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/job/KubernetesRunJobOperationConverter.java new file mode 100644 index 00000000000..0473ea5a229 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/job/KubernetesRunJobOperationConverter.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.job; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RUN_JOB; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ResourceVersioner; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.job.KubernetesRunJobOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubernetesRunJobDeploymentResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubernetesRunJobOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(RUN_JOB) +@Component +public class KubernetesRunJobOperationConverter + extends AbstractAtomicOperationsCredentialsConverter { + private final ResourceVersioner resourceVersioner; + + @Autowired + public KubernetesRunJobOperationConverter(ResourceVersioner resourceVersioner) { + this.resourceVersioner = resourceVersioner; + } + + @Override + public AtomicOperation convertOperation( + Map input) { + return new KubernetesRunJobOperation(convertDescription(input), resourceVersioner); + } + + @Override + public KubernetesRunJobOperationDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesRunJobOperationDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDeleteManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDeleteManifestConverter.java new file mode 100644 index 00000000000..f79be96a05e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDeleteManifestConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DELETE_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeleteManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesDeleteManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(DELETE_MANIFEST) +@Component +public class KubernetesDeleteManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesDeleteManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesDeleteManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesDeleteManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDeployManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDeployManifestConverter.java new file mode 100644 index 00000000000..2165a7362ff --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDeployManifestConverter.java @@ -0,0 +1,134 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DEPLOY_MANIFEST; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ResourceVersioner; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesDeployManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(DEPLOY_MANIFEST) +@Component +public class KubernetesDeployManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + + private static final String KIND_VALUE_LIST = "list"; + private static final String KIND_LIST_ITEMS_KEY = "items"; + + private final ResourceVersioner resourceVersioner; + + @Autowired + public KubernetesDeployManifestConverter( + CredentialsRepository credentialsRepository, + ResourceVersioner resourceVersioner) { + this.setCredentialsRepository(credentialsRepository); + this.resourceVersioner = resourceVersioner; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesDeployManifestOperation(convertDescription(input), resourceVersioner); + } + + @Override + public KubernetesDeployManifestDescription convertDescription(Map input) { + KubernetesDeployManifestDescription mainDescription = + KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesDeployManifestDescription.class); + return convertListDescription(mainDescription); + } + + /** + * If present, converts a KubernetesManifest of kind List into a list of KubernetesManifest + * objects. + * + * @param mainDescription deploy manifest description as received. + * @return updated description. + */ + @SuppressWarnings("unchecked") + private KubernetesDeployManifestDescription convertListDescription( + KubernetesDeployManifestDescription mainDescription) { + + if (mainDescription.getManifests() == null) { + return mainDescription; + } + + List updatedManifestList = + mainDescription.getManifests().stream() + .flatMap( + singleManifest -> { + if (singleManifest == null + || Strings.isNullOrEmpty(singleManifest.getKindName())) { + return Stream.of(singleManifest); + } + + if (!singleManifest.getKindName().equalsIgnoreCase(KIND_VALUE_LIST)) { + return Stream.of(updateNamespace(mainDescription, singleManifest)); + } + + Collection items = + (Collection) singleManifest.get(KIND_LIST_ITEMS_KEY); + + if (items == null) { + return Stream.of(); + } + + return items.stream() + .map( + i -> { + KubernetesManifest manifest = + getObjectMapper().convertValue(i, KubernetesManifest.class); + return updateNamespace(mainDescription, manifest); + }); + }) + .collect(Collectors.toList()); + + mainDescription.setManifests(updatedManifestList); + + return mainDescription; + } + + private KubernetesManifest updateNamespace( + KubernetesDeployManifestDescription description, KubernetesManifest manifest) { + KubernetesCredentials credentials = description.getCredentials().getCredentials(); + if (!StringUtils.isBlank(description.getNamespaceOverride()) + && credentials.getKindProperties(manifest.getKind()).isNamespaced()) { + manifest.setNamespace(description.getNamespaceOverride()); + } + return manifest; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDisableManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDisableManifestConverter.java new file mode 100644 index 00000000000..a4d457b83f6 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesDisableManifestConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DISABLE_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesEnableDisableManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesDisableManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(DISABLE_MANIFEST) +@Component +public class KubernetesDisableManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesDisableManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesEnableDisableManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesEnableDisableManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesEnableManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesEnableManifestConverter.java new file mode 100644 index 00000000000..02e31fe7aee --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesEnableManifestConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.ENABLE_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesEnableDisableManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesEnableManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(ENABLE_MANIFEST) +@Component +public class KubernetesEnableManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesEnableManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesEnableDisableManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesEnableDisableManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesPatchManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesPatchManifestConverter.java new file mode 100644 index 00000000000..1fc16c417b3 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesPatchManifestConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.PATCH_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesPatchManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesPatchManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component +@KubernetesOperation(PATCH_MANIFEST) +public class KubernetesPatchManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesPatchManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesPatchManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesPatchManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesPauseRolloutManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesPauseRolloutManifestConverter.java new file mode 100644 index 00000000000..4015e2abd8f --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesPauseRolloutManifestConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.PAUSE_ROLLOUT_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesPauseRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesPauseRolloutManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(PAUSE_ROLLOUT_MANIFEST) +@Component +public class KubernetesPauseRolloutManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesPauseRolloutManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesPauseRolloutManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesPauseRolloutManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesResumeRolloutManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesResumeRolloutManifestConverter.java new file mode 100644 index 00000000000..17ef3322119 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesResumeRolloutManifestConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RESUME_ROLLOUT_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesResumeRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesResumeRolloutManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(RESUME_ROLLOUT_MANIFEST) +@Component +public class KubernetesResumeRolloutManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesResumeRolloutManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesResumeRolloutManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesResumeRolloutManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesRollingRestartManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesRollingRestartManifestConverter.java new file mode 100644 index 00000000000..87b909ddc7e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesRollingRestartManifestConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.ROLLING_RESTART_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesRollingRestartManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesRollingRestartManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(ROLLING_RESTART_MANIFEST) +@Component +public class KubernetesRollingRestartManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesRollingRestartManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesRollingRestartManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesRollingRestartManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesScaleManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesScaleManifestConverter.java new file mode 100644 index 00000000000..ac19d522081 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesScaleManifestConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.SCALE_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesScaleManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesScaleManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(SCALE_MANIFEST) +@Component +public class KubernetesScaleManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesScaleManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesScaleManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesScaleManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesUndoRolloutManifestConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesUndoRolloutManifestConverter.java new file mode 100644 index 00000000000..9e7de4c2b97 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/manifest/KubernetesUndoRolloutManifestConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.UNDO_ROLLOUT_MANIFEST; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesUndoRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesUndoRolloutManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(UNDO_ROLLOUT_MANIFEST) +@Component +public class KubernetesUndoRolloutManifestConverter + extends AbstractAtomicOperationsCredentialsConverter { + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesUndoRolloutManifestOperation(convertDescription(input)); + } + + @Override + public KubernetesUndoRolloutManifestDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesUndoRolloutManifestDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/servergroup/KubernetesResizeServerGroupConverter.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/servergroup/KubernetesResizeServerGroupConverter.java new file mode 100644 index 00000000000..2d048aa545d --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/converter/servergroup/KubernetesResizeServerGroupConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.converter.servergroup; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RESIZE_SERVER_GROUP; + +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters.KubernetesAtomicOperationConverterHelper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.servergroup.KubernetesResizeServerGroupDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.servergroup.KubernetesResizeServerGroupOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.springframework.stereotype.Component; + +@KubernetesOperation(RESIZE_SERVER_GROUP) +@Component +public class KubernetesResizeServerGroupConverter + extends AbstractAtomicOperationsCredentialsConverter { + + @Override + public AtomicOperation convertOperation(Map input) { + return new KubernetesResizeServerGroupOperation(convertDescription(input)); + } + + @Override + public KubernetesResizeServerGroupDescription convertDescription(Map input) { + return KubernetesAtomicOperationConverterHelper.convertDescription( + input, this, KubernetesResizeServerGroupDescription.class); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/deploy/converters/KubernetesAtomicOperationConverterHelper.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/deploy/converters/KubernetesAtomicOperationConverterHelper.java new file mode 100644 index 00000000000..b8b009f3495 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/deploy/converters/KubernetesAtomicOperationConverterHelper.java @@ -0,0 +1,56 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.kubernetes.deploy.converters; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsConverter; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; + +public class KubernetesAtomicOperationConverterHelper { + public static T convertDescription( + Map input, + AbstractAtomicOperationsCredentialsConverter + credentialsSupport, + Class targetDescriptionType) { + String account = (String) input.get("account"); + String removedAccount = (String) input.remove("credentials"); + account = StringUtils.isNotEmpty(account) ? account : removedAccount; + + // Save these to re-assign after ObjectMapper does its work. + KubernetesNamedAccountCredentials credentials = + credentialsSupport.getCredentialsObject(account); + + T converted = + credentialsSupport + .getObjectMapper() + .copy() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + .convertValue(input, targetDescriptionType); + + // Re-assign the credentials. + converted.setCredentials(credentials); + if (StringUtils.isNotEmpty(removedAccount)) { + input.put("credentials", removedAccount); + converted.setAccount(removedAccount); + } + + return converted; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/AccountResourcePropertyRegistry.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/AccountResourcePropertyRegistry.java new file mode 100644 index 00000000000..cfcfd3c0b89 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/AccountResourcePropertyRegistry.java @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import static com.google.common.collect.ImmutableMap.toImmutableMap; + +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import java.util.Collection; +import java.util.function.Function; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNonnullByDefault; +import org.springframework.stereotype.Component; + +@ParametersAreNonnullByDefault +public class AccountResourcePropertyRegistry implements ResourcePropertyRegistry { + private final GlobalResourcePropertyRegistry globalResourcePropertyRegistry; + private final ImmutableMap propertyMap; + + private AccountResourcePropertyRegistry( + GlobalResourcePropertyRegistry globalResourcePropertyRegistry, + Collection resourceProperties) { + this.globalResourcePropertyRegistry = globalResourcePropertyRegistry; + this.propertyMap = + resourceProperties.stream() + .collect(toImmutableMap(p -> p.getHandler().kind(), Function.identity())); + } + + @Override + @Nonnull + public KubernetesResourceProperties get(KubernetesKind kind) { + KubernetesResourceProperties accountResult = propertyMap.get(kind); + if (accountResult != null) { + return accountResult; + } + + return globalResourcePropertyRegistry.get(kind); + } + + @Override + @Nonnull + public ImmutableCollection values() { + return new ImmutableList.Builder() + .addAll(globalResourcePropertyRegistry.values()) + .addAll(propertyMap.values()) + .build(); + } + + @Component + public static class Factory { + private final GlobalResourcePropertyRegistry globalResourcePropertyRegistry; + + public Factory(GlobalResourcePropertyRegistry globalResourcePropertyRegistry) { + this.globalResourcePropertyRegistry = globalResourcePropertyRegistry; + } + + public AccountResourcePropertyRegistry create( + Collection resourceProperties) { + return new AccountResourcePropertyRegistry( + globalResourcePropertyRegistry, resourceProperties); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/GlobalResourcePropertyRegistry.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/GlobalResourcePropertyRegistry.java new file mode 100644 index 00000000000..14608219d4d --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/GlobalResourcePropertyRegistry.java @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import static com.google.common.collect.ImmutableMap.toImmutableMap; + +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesUnregisteredCustomResourceHandler; +import java.util.List; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNonnullByDefault; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +@ParametersAreNonnullByDefault +public class GlobalResourcePropertyRegistry implements ResourcePropertyRegistry { + private final ImmutableMap globalProperties; + private ImmutableMap crdProperties = + ImmutableMap.of(); + private KubernetesResourceProperties defaultProperties; + + @Autowired + public GlobalResourcePropertyRegistry( + List handlers, + KubernetesUnregisteredCustomResourceHandler defaultHandler) { + this.globalProperties = + handlers.stream() + .collect( + toImmutableMap( + KubernetesHandler::kind, + h -> new KubernetesResourceProperties(h, h.versioned()))); + this.defaultProperties = + new KubernetesResourceProperties(defaultHandler, defaultHandler.versioned()); + } + + public void setDefaultHandler( + @Nonnull KubernetesUnregisteredCustomResourceHandler defaultHandler) { + this.defaultProperties = + new KubernetesResourceProperties(defaultHandler, defaultHandler.versioned()); + } + + public void updateCrdProperties(List handlers) { + this.crdProperties = + handlers.stream() + .collect( + toImmutableMap( + KubernetesHandler::kind, + h -> new KubernetesResourceProperties(h, h.versioned()))); + } + + @Override + @Nonnull + public KubernetesResourceProperties get(KubernetesKind kind) { + KubernetesResourceProperties result = globalProperties.get(kind); + if (result != null) { + return result; + } + + result = crdProperties.get(kind); + if (result != null) { + return result; + } + + return defaultProperties; + } + + @Override + @Nonnull + public ImmutableCollection values() { + return globalProperties.values(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/JsonPatch.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/JsonPatch.java new file mode 100644 index 00000000000..b8b5637e2e9 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/JsonPatch.java @@ -0,0 +1,52 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class JsonPatch { + Op op; + String path; + Object value; + + public enum Op { + replace, + add, + remove + } + + /** + * Returns an escaped JSON path node for use in a JSON pointer as defined in RFC6901 + * + *

~ is replaced by ~0 / is replaced by ~1 + * + * @param node a node to be used as part of a JSON pointer + * @return the node with escaped characters + * @see RFC6901, section 3 + */ + public static String escapeNode(String node) { + return node.replace("~", "~0").replace("/", "~1"); + } +} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescription.java similarity index 76% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescription.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescription.java index 9db6a9c5800..112ae25ea55 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescription.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescription.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.kubernetes.description; +import com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable; @@ -27,7 +28,15 @@ @Data @AllArgsConstructor @NoArgsConstructor -public class KubernetesAtomicOperationDescription implements DeployDescription, CredentialsNameable { +public class KubernetesAtomicOperationDescription + implements DeployDescription, CredentialsNameable { + @JsonProperty("account") String account; + KubernetesNamedAccountCredentials credentials; + + @Override + public boolean requiresApplicationRestriction() { + return false; + } } diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesCoordinates.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesCoordinates.java new file mode 100644 index 00000000000..088192326ed --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesCoordinates.java @@ -0,0 +1,89 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.annotations.FieldsAreNullableByDefault; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.List; +import java.util.Objects; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; + +@NonnullByDefault +@Value +public class KubernetesCoordinates { + private final KubernetesKind kind; + private final String namespace; + private final String name; + + @Builder(toBuilder = true) + @ParametersAreNullableByDefault + private KubernetesCoordinates(@Nonnull KubernetesKind kind, String namespace, String name) { + this.kind = Objects.requireNonNull(kind); + this.namespace = Strings.nullToEmpty(namespace); + this.name = Strings.nullToEmpty(name); + } + + @FieldsAreNullableByDefault + public static class KubernetesCoordinatesBuilder { + @Nonnull private static final Splitter splitter = Splitter.on(' ').limit(3); + + /** + * Given a full resource name of the type "kind name" (ex: "pod my-rs-v003-mnop"), parses out + * the kind and the name, and sets the corresponding fields on the builder. + * + * @param fullResourceName the full resource name + * @return this KubernetesCoordinatesBuilder object + * @throws IllegalArgumentException if the input string does not contain exactly two tokens + * separated by a space + */ + public KubernetesCoordinatesBuilder fullResourceName(String fullResourceName) { + List parts = splitter.splitToList(fullResourceName); + if (parts.size() != 2) { + throw new IllegalArgumentException( + String.format( + "Expected a full resource name of the form . Got: %s", + fullResourceName)); + } + this.kind = KubernetesKind.fromString(parts.get(0)); + this.name = parts.get(1); + return this; + } + } + + /** + * Given a full KubernetesManifest object, parses out the kind, namespace, and name to create a + * corresponding KubernetesCoordinates object. + * + * @param manifest the manifest to parse + * @return the KubernetesCoordinates object + */ + public static KubernetesCoordinates fromManifest(KubernetesManifest manifest) { + return KubernetesCoordinates.builder() + .kind(manifest.getKind()) + .namespace(manifest.getNamespace()) + .name(manifest.getName()) + .build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPatchOptions.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPatchOptions.java new file mode 100644 index 00000000000..ec0837fa231 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPatchOptions.java @@ -0,0 +1,38 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPatchOptions.MergeStrategy.json; + +import lombok.Data; + +@Data +public class KubernetesPatchOptions { + private MergeStrategy mergeStrategy; + private boolean record; + + public static KubernetesPatchOptions json() { + return new KubernetesPatchOptions().setMergeStrategy(json); + } + + public enum MergeStrategy { + strategic, + json, + merge + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPodMetric.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPodMetric.java new file mode 100644 index 00000000000..d3d7485e718 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPodMetric.java @@ -0,0 +1,67 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Map; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.*; + +@NonnullByDefault +@Value +public class KubernetesPodMetric { + private final String podName; + private final String namespace; + private final ImmutableList containerMetrics; + + @Builder + @ParametersAreNullableByDefault + public KubernetesPodMetric( + String podName, String namespace, Iterable containerMetrics) { + this.podName = Strings.nullToEmpty(podName); + this.namespace = Strings.nullToEmpty(namespace); + this.containerMetrics = + Optional.ofNullable(containerMetrics) + .map(ImmutableList::copyOf) + .orElseGet(ImmutableList::of); + } + + @JsonIgnoreProperties(ignoreUnknown = true) + @Value + public static class ContainerMetric { + private final String containerName; + private final ImmutableMap metrics; + + @JsonCreator + @ParametersAreNullableByDefault + public ContainerMetric( + @JsonProperty("containerName") String containerName, + @JsonProperty("metrics") Map metrics) { + this.containerName = Strings.nullToEmpty(containerName); + this.metrics = + Optional.ofNullable(metrics).map(ImmutableMap::copyOf).orElseGet(ImmutableMap::of); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesResourceProperties.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesResourceProperties.java new file mode 100644 index 00000000000..e8802a80452 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesResourceProperties.java @@ -0,0 +1,65 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.kubernetes.config.CustomKubernetesResource; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CustomKubernetesHandlerFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import java.util.Objects; +import javax.annotation.Nonnull; +import lombok.Getter; + +@Getter +public class KubernetesResourceProperties { + @Nonnull private final KubernetesHandler handler; + private final boolean versioned; + + public KubernetesResourceProperties(@Nonnull KubernetesHandler handler, boolean versioned) { + this.handler = Objects.requireNonNull(handler); + this.versioned = versioned; + } + + public static KubernetesResourceProperties fromCustomResource( + CustomKubernetesResource customResource) { + String deployPriority = customResource.getDeployPriority(); + int deployPriorityValue; + if (Strings.isNullOrEmpty(deployPriority)) { + deployPriorityValue = WORKLOAD_CONTROLLER_PRIORITY.getValue(); + } else { + try { + deployPriorityValue = Integer.parseInt(deployPriority); + } catch (NumberFormatException e) { + deployPriorityValue = + KubernetesHandler.DeployPriority.fromString(deployPriority).getValue(); + } + } + + KubernetesHandler handler = + CustomKubernetesHandlerFactory.create( + KubernetesKind.fromString(customResource.getKubernetesKind()), + SpinnakerKind.fromString(customResource.getSpinnakerKind()), + customResource.isVersioned(), + deployPriorityValue); + + return new KubernetesResourceProperties(handler, customResource.isVersioned()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesSpinnakerKindMap.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesSpinnakerKindMap.java new file mode 100644 index 00000000000..632ffdf0104 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesSpinnakerKindMap.java @@ -0,0 +1,68 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.ImmutableSetMultimap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesSpinnakerKindMap { + + private final ImmutableMap kubernetesToSpinnaker; + private final ImmutableSetMultimap spinnakerToKubernetes; + + public KubernetesSpinnakerKindMap(List handlers) { + ImmutableMap.Builder kubernetesToSpinnakerBuilder = + new ImmutableMap.Builder<>(); + ImmutableSetMultimap.Builder spinnakerToKubernetesBuilder = + new ImmutableSetMultimap.Builder<>(); + for (KubernetesHandler handler : handlers) { + SpinnakerKind spinnakerKind = handler.spinnakerKind(); + KubernetesKind kubernetesKind = handler.kind(); + kubernetesToSpinnakerBuilder.put(kubernetesKind, spinnakerKind); + spinnakerToKubernetesBuilder.put(spinnakerKind, kubernetesKind); + } + this.kubernetesToSpinnaker = kubernetesToSpinnakerBuilder.build(); + this.spinnakerToKubernetes = spinnakerToKubernetesBuilder.build(); + } + + public SpinnakerKind translateKubernetesKind(KubernetesKind kubernetesKind) { + return kubernetesToSpinnaker.getOrDefault(kubernetesKind, SpinnakerKind.UNCLASSIFIED); + } + + public ImmutableSet translateSpinnakerKind(SpinnakerKind spinnakerKind) { + return spinnakerToKubernetes.get(spinnakerKind); + } + + public ImmutableSet allKubernetesKinds() { + return kubernetesToSpinnaker.keySet(); + } + + public Map kubernetesToSpinnakerKindStringMap() { + return kubernetesToSpinnaker.entrySet().stream() + .filter(x -> !x.getKey().equals(KubernetesKind.NONE)) + .collect(Collectors.toMap(x -> x.getKey().toString(), x -> x.getValue().toString())); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/ResourcePropertyRegistry.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/ResourcePropertyRegistry.java new file mode 100644 index 00000000000..4bddccfa2b0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/ResourcePropertyRegistry.java @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import com.google.common.collect.ImmutableCollection; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNonnullByDefault; + +@ParametersAreNonnullByDefault +public interface ResourcePropertyRegistry { + @Nonnull + KubernetesResourceProperties get(KubernetesKind kind); + + @Nonnull + ImmutableCollection values(); +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/SpinnakerKind.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/SpinnakerKind.java new file mode 100644 index 00000000000..a624dd6f84f --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/SpinnakerKind.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import com.fasterxml.jackson.annotation.JsonCreator; +import java.util.Arrays; + +public enum SpinnakerKind { + INSTANCES("instances"), + CONFIGS("configs"), + SERVER_GROUPS("serverGroups"), + LOAD_BALANCERS("loadBalancers"), + SECURITY_GROUPS("securityGroups"), + SERVER_GROUP_MANAGERS("serverGroupManagers"), + UNCLASSIFIED("unclassified"); + + private final String id; + + SpinnakerKind(String id) { + this.id = id; + } + + @Override + public String toString() { + return id; + } + + @JsonCreator + public static SpinnakerKind fromString(String name) { + return Arrays.stream(values()) + .filter(k -> k.toString().equalsIgnoreCase(name)) + .findFirst() + .orElse(UNCLASSIFIED); + } +} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/artifact/KubernetesCleanupArtifactsDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/artifact/KubernetesCleanupArtifactsDescription.java similarity index 77% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/artifact/KubernetesCleanupArtifactsDescription.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/artifact/KubernetesCleanupArtifactsDescription.java index e25c71300a1..5d2280ea159 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/artifact/KubernetesCleanupArtifactsDescription.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/artifact/KubernetesCleanupArtifactsDescription.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,20 +15,21 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.artifact; +package com.netflix.spinnaker.clouddriver.kubernetes.description.artifact; import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import java.util.HashSet; +import java.util.Set; import lombok.AllArgsConstructor; import lombok.Data; +import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; -import java.util.HashSet; -import java.util.Set; - @Data @AllArgsConstructor @NoArgsConstructor +@EqualsAndHashCode(callSuper = true) public class KubernetesCleanupArtifactsDescription extends KubernetesAtomicOperationDescription { Set manifests = new HashSet<>(); } diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/job/KubernetesRunJobOperationDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/job/KubernetesRunJobOperationDescription.java new file mode 100644 index 00000000000..fa8c0114ccb --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/job/KubernetesRunJobOperationDescription.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.job; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class KubernetesRunJobOperationDescription extends KubernetesAtomicOperationDescription { + String application; + String namespace = ""; + KubernetesManifest manifest; + List requiredArtifacts; + List optionalArtifacts; +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApiGroup.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApiGroup.java new file mode 100644 index 00000000000..ac633807d89 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApiGroup.java @@ -0,0 +1,105 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; +import com.google.common.collect.ImmutableSet; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode +public class KubernetesApiGroup { + // from https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/ + public static final KubernetesApiGroup NONE = new KubernetesApiGroup(""); + public static final KubernetesApiGroup CORE = new KubernetesApiGroup("core"); + public static final KubernetesApiGroup BATCH = new KubernetesApiGroup("batch"); + public static final KubernetesApiGroup APPS = new KubernetesApiGroup("apps"); + public static final KubernetesApiGroup EXTENSIONS = new KubernetesApiGroup("extensions"); + public static final KubernetesApiGroup STORAGE_K8S_IO = new KubernetesApiGroup("storage.k8s.io"); + public static final KubernetesApiGroup APIEXTENSIONS_K8S_IO = + new KubernetesApiGroup("apiextensions.k8s.io"); + public static final KubernetesApiGroup APIREGISTRATION_K8S_IO = + new KubernetesApiGroup("apiregistration.k8s.io"); + public static final KubernetesApiGroup AUTOSCALING = new KubernetesApiGroup("autoscaling"); + public static final KubernetesApiGroup ADMISSIONREGISTRATION_K8S_IO = + new KubernetesApiGroup("admissionregistration.k8s.io"); + public static final KubernetesApiGroup POLICY = new KubernetesApiGroup("policy"); + public static final KubernetesApiGroup SCHEDULING_K8S_IO = + new KubernetesApiGroup("scheduling.k8s.io"); + public static final KubernetesApiGroup SETTINGS_K8S_IO = + new KubernetesApiGroup("settings.k8s.io"); + public static final KubernetesApiGroup AUTHORIZATION_K8S_IO = + new KubernetesApiGroup("authorization.k8s.io"); + public static final KubernetesApiGroup AUTHENTICATION_K8S_IO = + new KubernetesApiGroup("authentication.k8s.io"); + public static final KubernetesApiGroup RBAC_AUTHORIZATION_K8S_IO = + new KubernetesApiGroup("rbac.authorization.k8s.io"); + public static final KubernetesApiGroup CERTIFICATES_K8S_IO = + new KubernetesApiGroup("certificates.k8s.io"); + public static final KubernetesApiGroup NETWORKING_K8S_IO = + new KubernetesApiGroup("networking.k8s.io"); + + @Nonnull private final String name; + + // including NONE since it seems like any resource without an api group would have to be native + private static final ImmutableSet NATIVE_GROUPS = + ImmutableSet.of( + CORE, + BATCH, + APPS, + EXTENSIONS, + STORAGE_K8S_IO, + APIEXTENSIONS_K8S_IO, + APIREGISTRATION_K8S_IO, + AUTOSCALING, + ADMISSIONREGISTRATION_K8S_IO, + POLICY, + SCHEDULING_K8S_IO, + SETTINGS_K8S_IO, + AUTHORIZATION_K8S_IO, + AUTHENTICATION_K8S_IO, + RBAC_AUTHORIZATION_K8S_IO, + CERTIFICATES_K8S_IO, + NETWORKING_K8S_IO, + NONE); + + private KubernetesApiGroup(@Nonnull String name) { + this.name = name.toLowerCase(); + } + + @Override + @JsonValue + public String toString() { + return name; + } + + public boolean isNativeGroup() { + return NATIVE_GROUPS.contains(this); + } + + @JsonCreator + @Nonnull + public static KubernetesApiGroup fromString(@Nullable String name) { + if (name == null) { + return KubernetesApiGroup.NONE; + } + return new KubernetesApiGroup(name); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApiVersion.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApiVersion.java new file mode 100644 index 00000000000..776b35875fd --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApiVersion.java @@ -0,0 +1,71 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.EqualsAndHashCode; +import lombok.Getter; + +@EqualsAndHashCode +public class KubernetesApiVersion { + public static final KubernetesApiVersion V1 = new KubernetesApiVersion("v1"); + public static final KubernetesApiVersion EXTENSIONS_V1BETA1 = + new KubernetesApiVersion("extensions/v1beta1"); + public static final KubernetesApiVersion NETWORKING_K8S_IO_V1 = + new KubernetesApiVersion("networking.k8s.io/v1"); + public static final KubernetesApiVersion NETWORKING_K8S_IO_V1BETA1 = + new KubernetesApiVersion("networking.k8s.io/v1beta1"); + public static final KubernetesApiVersion APPS_V1 = new KubernetesApiVersion("apps/v1"); + public static final KubernetesApiVersion BATCH_V1 = new KubernetesApiVersion("batch/v1"); + public static final KubernetesApiVersion NONE = new KubernetesApiVersion(""); + + @Nonnull private final String name; + @Getter @Nonnull @EqualsAndHashCode.Exclude private final KubernetesApiGroup apiGroup; + + private KubernetesApiVersion(@Nonnull String name) { + this.name = name.toLowerCase(); + this.apiGroup = parseApiGroup(this.name); + } + + @Override + @JsonValue + public String toString() { + return name; + } + + @Nonnull + private static KubernetesApiGroup parseApiGroup(@Nonnull String name) { + int index = name.indexOf('/'); + if (index > 0) { + return KubernetesApiGroup.fromString(name.substring(0, index)); + } + return KubernetesApiGroup.NONE; + } + + @JsonCreator + @Nonnull + public static KubernetesApiVersion fromString(@Nullable String name) { + if (name == null) { + return KubernetesApiVersion.NONE; + } + return new KubernetesApiVersion(name); + } +} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesApplicationProperties.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApplicationProperties.java similarity index 87% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesApplicationProperties.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApplicationProperties.java index 30bebd80fd6..3842a6a2c92 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesApplicationProperties.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesApplicationProperties.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; import lombok.AllArgsConstructor; import lombok.Builder; diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesCachingProperties.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesCachingProperties.java new file mode 100644 index 00000000000..f60b0032134 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesCachingProperties.java @@ -0,0 +1,41 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; + +@NonnullByDefault +@Value +public class KubernetesCachingProperties { + /** if true, then the kubernetes manifest will not be cached */ + private final boolean ignore; + + /** this stores the application name for a kubernetes manifest */ + private final String application; + + @Builder + @ParametersAreNullableByDefault + private KubernetesCachingProperties(boolean ignore, String application) { + this.ignore = ignore; + this.application = Strings.nullToEmpty(application); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesDeleteManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesDeleteManifestDescription.java new file mode 100644 index 00000000000..f9e7f63ce46 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesDeleteManifestDescription.java @@ -0,0 +1,64 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class KubernetesDeleteManifestDescription extends KubernetesAtomicOperationDescription { + private Map options; + private String manifestName; + private String location; + private List kinds = new ArrayList<>(); + private KubernetesSelectorList labelSelectors = new KubernetesSelectorList(); + + @JsonIgnore + public boolean isDynamic() { + return Strings.isNullOrEmpty(manifestName); + } + + public List getAllCoordinates() { + return kinds.stream() + .map( + k -> + KubernetesCoordinates.builder() + .namespace(location) + .kind(KubernetesKind.fromString(k)) + .build()) + .collect(Collectors.toList()); + } + + @JsonIgnore + public KubernetesCoordinates getPointCoordinates() { + return KubernetesCoordinates.builder() + .namespace(location) + .fullResourceName(manifestName) + .build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesDeployManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesDeployManifestDescription.java new file mode 100644 index 00000000000..8a75f3cd480 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesDeployManifestDescription.java @@ -0,0 +1,70 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class KubernetesDeployManifestDescription extends KubernetesAtomicOperationDescription { + @Deprecated private KubernetesManifest manifest; + private List manifests; + private Moniker moniker; + private List requiredArtifacts; + private List optionalArtifacts; + private Boolean versioned; + private Source source; + private Artifact manifestArtifact; + private String namespaceOverride; + private boolean enableArtifactBinding = true; + + private boolean enableTraffic = true; + private List services; + private Strategy strategy; + private KubernetesSelectorList labelSelectors = new KubernetesSelectorList(); + private boolean skipSpecTemplateLabels = false; + + /** + * If false, and using (non-empty) label selectors, fail if a deploy manifest operation doesn't + * deploy anything. If a particular deploy manifest stage intentionally specifies label selectors + * that none of the resources satisfy, set this to true to allow the stage to succeed. + */ + private boolean allowNothingSelected = false; + + public boolean isBlueGreen() { + return Strategy.RED_BLACK.equals(this.strategy) || Strategy.BLUE_GREEN.equals(this.strategy); + } + + public enum Source { + artifact, + text + } + + public enum Strategy { + RED_BLACK, + BLUE_GREEN, + HIGHLANDER, + NONE + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesEnableDisableManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesEnableDisableManifestDescription.java new file mode 100644 index 00000000000..e8c9d3c27ed --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesEnableDisableManifestDescription.java @@ -0,0 +1,43 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public final class KubernetesEnableDisableManifestDescription + extends KubernetesManifestOperationDescription { + private int targetPercentage = 100; + // optional: can be inferred from the annotations as well + @Nonnull private ImmutableList loadBalancers = ImmutableList.of(); + + @Nonnull + public KubernetesEnableDisableManifestDescription setLoadBalancers( + @Nullable List loadBalancers) { + this.loadBalancers = + Optional.ofNullable(loadBalancers).map(ImmutableList::copyOf).orElseGet(ImmutableList::of); + return this; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKind.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKind.java new file mode 100644 index 00000000000..f5d31a5d4cc --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKind.java @@ -0,0 +1,169 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; +import com.google.common.base.Splitter; +import com.google.common.collect.Iterators; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinition; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import javax.annotation.Nullable; +import lombok.EqualsAndHashCode; +import lombok.Getter; + +@EqualsAndHashCode(onlyExplicitlyIncluded = true) +@NonnullByDefault +public class KubernetesKind { + private static final Splitter QUALIFIED_KIND_SPLITTER = Splitter.on('.').limit(2); + + private static final Map aliasMap = new ConcurrentHashMap<>(); + + public static final KubernetesKind API_SERVICE = + createWithAlias("apiService", null, KubernetesApiGroup.APIREGISTRATION_K8S_IO); + public static final KubernetesKind CLUSTER_ROLE = + createWithAlias("clusterRole", null, KubernetesApiGroup.RBAC_AUTHORIZATION_K8S_IO); + public static final KubernetesKind CLUSTER_ROLE_BINDING = + createWithAlias("clusterRoleBinding", null, KubernetesApiGroup.RBAC_AUTHORIZATION_K8S_IO); + public static final KubernetesKind CONFIG_MAP = + createWithAlias("configMap", "cm", KubernetesApiGroup.CORE); + public static final KubernetesKind CONTROLLER_REVISION = + createWithAlias("controllerRevision", null, KubernetesApiGroup.APPS); + public static final KubernetesKind CUSTOM_RESOURCE_DEFINITION = + createWithAlias("customResourceDefinition", "crd", KubernetesApiGroup.EXTENSIONS); + public static final KubernetesKind CRON_JOB = + createWithAlias("cronJob", null, KubernetesApiGroup.BATCH); + public static final KubernetesKind CSI_DRIVERS = + createWithAlias("csiDriver", null, KubernetesApiGroup.STORAGE_K8S_IO); + public static final KubernetesKind CSI_NODES = + createWithAlias("csiNode", null, KubernetesApiGroup.STORAGE_K8S_IO); + public static final KubernetesKind DAEMON_SET = + createWithAlias("daemonSet", "ds", KubernetesApiGroup.APPS); + public static final KubernetesKind DEPLOYMENT = + createWithAlias("deployment", "deploy", KubernetesApiGroup.APPS); + public static final KubernetesKind EVENT = + createWithAlias("event", null, KubernetesApiGroup.CORE); + public static final KubernetesKind HORIZONTAL_POD_AUTOSCALER = + createWithAlias("horizontalpodautoscaler", "hpa", KubernetesApiGroup.AUTOSCALING); + public static final KubernetesKind INGRESS = + createWithAlias("ingress", null, KubernetesApiGroup.NETWORKING_K8S_IO); + public static final KubernetesKind JOB = createWithAlias("job", null, KubernetesApiGroup.BATCH); + public static final KubernetesKind LIMIT_RANGE = + createWithAlias("limitRange", null, KubernetesApiGroup.NONE); + public static final KubernetesKind MUTATING_WEBHOOK_CONFIGURATION = + createWithAlias( + "mutatingWebhookConfiguration", null, KubernetesApiGroup.ADMISSIONREGISTRATION_K8S_IO); + public static final KubernetesKind NAMESPACE = + createWithAlias("namespace", "ns", KubernetesApiGroup.CORE); + public static final KubernetesKind NETWORK_POLICY = + createWithAlias("networkPolicy", "netpol", KubernetesApiGroup.NETWORKING_K8S_IO); + public static final KubernetesKind PERSISTENT_VOLUME = + createWithAlias("persistentVolume", "pv", KubernetesApiGroup.CORE); + public static final KubernetesKind PERSISTENT_VOLUME_CLAIM = + createWithAlias("persistentVolumeClaim", "pvc", KubernetesApiGroup.CORE); + public static final KubernetesKind POD = createWithAlias("pod", "po", KubernetesApiGroup.CORE); + public static final KubernetesKind POD_PRESET = + createWithAlias("podPreset", null, KubernetesApiGroup.SETTINGS_K8S_IO); + public static final KubernetesKind POD_SECURITY_POLICY = + createWithAlias("podSecurityPolicy", null, KubernetesApiGroup.POLICY); + public static final KubernetesKind POD_DISRUPTION_BUDGET = + createWithAlias("podDisruptionBudget", null, KubernetesApiGroup.POLICY); + public static final KubernetesKind REPLICA_SET = + createWithAlias("replicaSet", "rs", KubernetesApiGroup.APPS); + public static final KubernetesKind ROLE = + createWithAlias("role", null, KubernetesApiGroup.RBAC_AUTHORIZATION_K8S_IO); + public static final KubernetesKind ROLE_BINDING = + createWithAlias("roleBinding", null, KubernetesApiGroup.RBAC_AUTHORIZATION_K8S_IO); + public static final KubernetesKind SECRET = + createWithAlias("secret", null, KubernetesApiGroup.CORE); + public static final KubernetesKind SERVICE = + createWithAlias("service", "svc", KubernetesApiGroup.CORE); + public static final KubernetesKind SERVICE_ACCOUNT = + createWithAlias("serviceAccount", "sa", KubernetesApiGroup.CORE); + public static final KubernetesKind STATEFUL_SET = + createWithAlias("statefulSet", null, KubernetesApiGroup.APPS); + public static final KubernetesKind STORAGE_CLASS = + createWithAlias("storageClass", "sc", KubernetesApiGroup.STORAGE_K8S_IO); + public static final KubernetesKind VALIDATING_WEBHOOK_CONFIGURATION = + createWithAlias( + "validatingWebhookConfiguration", null, KubernetesApiGroup.ADMISSIONREGISTRATION_K8S_IO); + + // special kind that should never be assigned to a manifest, used only to represent objects whose + // kind is not in spinnaker's registry + public static final KubernetesKind NONE = createWithAlias("none", null, KubernetesApiGroup.NONE); + + private final String name; + @EqualsAndHashCode.Include private final String lcName; + @Getter private final KubernetesApiGroup apiGroup; + @EqualsAndHashCode.Include @Nullable private final KubernetesApiGroup customApiGroup; + + private KubernetesKind(String name, @Nullable KubernetesApiGroup apiGroup) { + this.name = name; + this.lcName = name.toLowerCase(); + this.apiGroup = apiGroup == null ? KubernetesApiGroup.NONE : apiGroup; + if (this.apiGroup.isNativeGroup()) { + this.customApiGroup = null; + } else { + this.customApiGroup = apiGroup; + } + } + + private static KubernetesKind createWithAlias( + String name, @Nullable String alias, @Nullable KubernetesApiGroup apiGroup) { + KubernetesKind kind = new KubernetesKind(name, apiGroup); + aliasMap.put(kind, kind); + if (alias != null) { + aliasMap.put(new KubernetesKind(alias, apiGroup), kind); + } + return kind; + } + + public static KubernetesKind from(@Nullable String name, @Nullable KubernetesApiGroup apiGroup) { + if (name == null || name.isEmpty()) { + return KubernetesKind.NONE; + } + KubernetesKind result = new KubernetesKind(name, apiGroup); + return aliasMap.getOrDefault(result, result); + } + + public static KubernetesKind fromCustomResourceDefinition(V1beta1CustomResourceDefinition crd) { + return from( + crd.getSpec().getNames().getKind(), + KubernetesApiGroup.fromString(crd.getSpec().getGroup())); + } + + @JsonCreator + public static KubernetesKind fromString(String qualifiedKind) { + Iterator parts = QUALIFIED_KIND_SPLITTER.split(qualifiedKind).iterator(); + String kindName = parts.next(); + String apiGroup = Iterators.getNext(parts, null); + return from(kindName, KubernetesApiGroup.fromString(apiGroup)); + } + + @Override + @JsonValue + public String toString() { + if (apiGroup.isNativeGroup()) { + return name; + } + return name + "." + apiGroup.toString(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindProperties.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindProperties.java new file mode 100644 index 00000000000..cf9e7808120 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindProperties.java @@ -0,0 +1,103 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.google.common.collect.ImmutableList; +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinition; +import java.util.List; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.EqualsAndHashCode; +import lombok.Getter; + +@EqualsAndHashCode +@ParametersAreNonnullByDefault +public class KubernetesKindProperties { + public static List getGlobalKindProperties() { + return ImmutableList.of( + new KubernetesKindProperties(KubernetesKind.API_SERVICE, false), + new KubernetesKindProperties(KubernetesKind.CLUSTER_ROLE, false), + new KubernetesKindProperties(KubernetesKind.CLUSTER_ROLE_BINDING, false), + new KubernetesKindProperties(KubernetesKind.CONFIG_MAP, true), + new KubernetesKindProperties(KubernetesKind.CONTROLLER_REVISION, true), + new KubernetesKindProperties(KubernetesKind.CUSTOM_RESOURCE_DEFINITION, false), + new KubernetesKindProperties(KubernetesKind.CRON_JOB, true), + new KubernetesKindProperties(KubernetesKind.DAEMON_SET, true), + new KubernetesKindProperties(KubernetesKind.DEPLOYMENT, true), + new KubernetesKindProperties(KubernetesKind.EVENT, true), + new KubernetesKindProperties(KubernetesKind.HORIZONTAL_POD_AUTOSCALER, true), + new KubernetesKindProperties(KubernetesKind.INGRESS, true), + new KubernetesKindProperties(KubernetesKind.JOB, true), + new KubernetesKindProperties(KubernetesKind.LIMIT_RANGE, true), + new KubernetesKindProperties(KubernetesKind.MUTATING_WEBHOOK_CONFIGURATION, false), + new KubernetesKindProperties(KubernetesKind.NAMESPACE, false), + new KubernetesKindProperties(KubernetesKind.NETWORK_POLICY, true), + new KubernetesKindProperties(KubernetesKind.PERSISTENT_VOLUME, false), + new KubernetesKindProperties(KubernetesKind.PERSISTENT_VOLUME_CLAIM, true), + new KubernetesKindProperties(KubernetesKind.POD, true), + new KubernetesKindProperties(KubernetesKind.POD_PRESET, true), + new KubernetesKindProperties(KubernetesKind.POD_SECURITY_POLICY, false), + new KubernetesKindProperties(KubernetesKind.POD_DISRUPTION_BUDGET, true), + new KubernetesKindProperties(KubernetesKind.REPLICA_SET, true), + new KubernetesKindProperties(KubernetesKind.ROLE, true), + new KubernetesKindProperties(KubernetesKind.ROLE_BINDING, true), + new KubernetesKindProperties(KubernetesKind.SECRET, true), + new KubernetesKindProperties(KubernetesKind.SERVICE, true), + new KubernetesKindProperties(KubernetesKind.SERVICE_ACCOUNT, true), + new KubernetesKindProperties(KubernetesKind.STATEFUL_SET, true), + new KubernetesKindProperties(KubernetesKind.STORAGE_CLASS, false), + new KubernetesKindProperties(KubernetesKind.VALIDATING_WEBHOOK_CONFIGURATION, false), + new KubernetesKindProperties(KubernetesKind.NONE, true)); + } + + @Nonnull @Getter private final KubernetesKind kubernetesKind; + @Getter private final boolean isNamespaced; + + private KubernetesKindProperties(KubernetesKind kubernetesKind, boolean isNamespaced) { + this.kubernetesKind = kubernetesKind; + this.isNamespaced = isNamespaced; + } + + @Nonnull + public static KubernetesKindProperties withDefaultProperties(KubernetesKind kubernetesKind) { + return new KubernetesKindProperties(kubernetesKind, true); + } + + @Nonnull + public static KubernetesKindProperties create( + KubernetesKind kubernetesKind, boolean isNamespaced) { + return new KubernetesKindProperties(kubernetesKind, isNamespaced); + } + + @Nonnull + public static KubernetesKindProperties fromCustomResourceDefinition( + V1beta1CustomResourceDefinition crd) { + return create( + KubernetesKind.fromCustomResourceDefinition(crd), + crd.getSpec().getScope().equalsIgnoreCase("namespaced")); + } + + public ResourceScope getResourceScope() { + return isNamespaced ? ResourceScope.NAMESPACE : ResourceScope.CLUSTER; + } + + public enum ResourceScope { + CLUSTER, + NAMESPACE + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifest.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifest.java new file mode 100644 index 00000000000..86ae89bb26e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifest.java @@ -0,0 +1,409 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import java.time.Instant; +import java.time.format.DateTimeParseException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Data; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Because this class maps the received Kubernetes manifest to an untyped map, it has no choice but + * to perform many unchecked casts when retrieving information. New logic should convert the + * manifest to an appropriate strongly-typed model object instead of adding more unchecked casts + * here. Methods that already perform unchecked casts are annotated to suppress them; please avoid + * adding more such methods if at all possible. + */ +public class KubernetesManifest extends HashMap { + private static final Logger log = LoggerFactory.getLogger(KubernetesManifest.class); + private static final ObjectMapper mapper = new ObjectMapper(); + + @Nullable private transient KubernetesKind computedKind; + + @Override + public KubernetesManifest clone() { + return (KubernetesManifest) super.clone(); + } + + @JsonIgnore + @Nonnull + public KubernetesKind getKind() { + if (computedKind == null) { + computedKind = computeKind(); + } + return computedKind; + } + + @Nonnull + private KubernetesKind computeKind() { + // using ApiVersion here allows a translation from a kind of NetworkPolicy in the manifest to + // something + // like NetworkPolicy.crd.projectcalico.org for custom resources + String kindName = getKindName(); + KubernetesApiGroup kubernetesApiGroup; + if (this.containsKey("apiVersion")) { + kubernetesApiGroup = getApiVersion().getApiGroup(); + } else { + kubernetesApiGroup = null; + } + return KubernetesKind.from(kindName, kubernetesApiGroup); + } + + @JsonIgnore + public String getKindName() { + return Optional.ofNullable((String) get("kind")) + .orElseThrow(() -> MalformedManifestException.missingField(this, "kind")); + } + + @JsonIgnore + public void setKind(KubernetesKind kind) { + put("kind", kind.toString()); + computedKind = null; + } + + @JsonIgnore + public KubernetesApiVersion getApiVersion() { + return Optional.ofNullable((String) get("apiVersion")) + .map(KubernetesApiVersion::fromString) + .orElseThrow(() -> MalformedManifestException.missingField(this, "apiVersion")); + } + + @JsonIgnore + public void setApiVersion(KubernetesApiVersion apiVersion) { + put("apiVersion", apiVersion.toString()); + computedKind = null; + } + + @JsonIgnore + @SuppressWarnings("unchecked") + private Map getMetadata() { + return Optional.ofNullable((Map) get("metadata")) + .orElseThrow(() -> MalformedManifestException.missingField(this, "metadata")); + } + + @JsonIgnore + public String getName() { + return (String) getMetadata().get("name"); + } + + @JsonIgnore + public String getGenerateName() { + return (String) getMetadata().get("generateName"); + } + + @JsonIgnore + public boolean hasGenerateName() { + if (!Strings.isNullOrEmpty(this.getName())) { + // If a name is present, it will be used instead of a generateName + return false; + } + return !Strings.isNullOrEmpty(this.getGenerateName()); + } + + @JsonIgnore + public String getUid() { + return (String) getMetadata().get("uid"); + } + + @JsonIgnore + public void setName(String name) { + getMetadata().put("name", name); + } + + @JsonIgnore + public void setGenerateName(String name) { + getMetadata().put("generateName", name); + } + + @JsonIgnore + @Nonnull + public String getNamespace() { + String namespace = (String) getMetadata().get("namespace"); + return Strings.nullToEmpty(namespace); + } + + @JsonIgnore + public void setNamespace(String namespace) { + getMetadata().put("namespace", namespace); + } + + @JsonIgnore + @Nonnull + public String getCreationTimestamp() { + Object timestamp = getMetadata().get("creationTimestamp"); + if (timestamp == null) { + return ""; + } + return timestamp.toString(); + } + + @JsonIgnore + @Nullable + public Long getCreationTimestampEpochMillis() { + try { + return Instant.parse(getCreationTimestamp()).toEpochMilli(); + } catch (DateTimeParseException e) { + log.warn("Failed to parse timestamp: ", e); + } + return null; + } + + @JsonIgnore + @Nonnull + public List getOwnerReferences() { + Map metadata = getMetadata(); + return Optional.ofNullable(metadata.get("ownerReferences")) + .map(r -> mapper.convertValue(r, new TypeReference>() {})) + .orElseGet(ImmutableList::of); + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public KubernetesManifestSelector getManifestSelector() { + Map spec = getSpecAsMap(); + if (spec == null) { + return null; + } + + if (!spec.containsKey("selector")) { + return null; + } + + Map selector = (Map) spec.get("selector"); + if (!selector.containsKey("matchExpressions") && !selector.containsKey("matchLabels")) { + return new KubernetesManifestSelector() + .setMatchLabels((Map) spec.get("selector")); + } else { + return mapper.convertValue(selector, KubernetesManifestSelector.class); + } + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public Map getLabels() { + Map result = (Map) getMetadata().get("labels"); + if (result == null) { + result = new HashMap<>(); + getMetadata().put("labels", result); + } + + return result; + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public Map getAnnotations() { + Map result = (Map) getMetadata().get("annotations"); + if (result == null) { + result = new HashMap<>(); + getMetadata().put("annotations", result); + } + + return result; + } + + @JsonIgnore + private Map getSpecAsMap() { + if (!containsKey("spec")) { + return null; + } + + Object specObject = get("spec"); + if (!(specObject instanceof Map)) { + return null; + } + + return (Map) specObject; + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public Integer getReplicas() { + Map spec = getSpecAsMap(); + if (spec == null) { + return null; + } + if (!spec.containsKey("replicas")) { + return null; + } + + return ((Number) spec.get("replicas")).intValue(); + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public void setReplicas(Number replicas) { + Map spec = getSpecAsMap(); + if (spec == null) { + return; + } + spec.put("replicas", replicas.intValue()); + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public Optional> getSpecTemplateLabels() { + Map spec = getSpecAsMap(); + if (spec == null) { + return Optional.empty(); + } + if (!spec.containsKey("template")) { + return Optional.empty(); + } + + if (!(spec.get("template") instanceof Map)) { + return Optional.empty(); + } + + Map template = (Map) spec.get("template"); + if (!template.containsKey("metadata")) { + return Optional.empty(); + } + + Map metadata = (Map) template.get("metadata"); + if (metadata == null) { + return Optional.empty(); + } + + Map result = (Map) metadata.get("labels"); + if (result == null) { + result = new HashMap<>(); + metadata.put("labels", result); + } + + return Optional.of(result); + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public Optional> getSpecTemplateAnnotations() { + Map spec = getSpecAsMap(); + if (spec == null) { + return Optional.empty(); + } + + if (!spec.containsKey("template")) { + return Optional.empty(); + } + + if (!(spec.get("template") instanceof Map)) { + return Optional.empty(); + } + + Map template = (Map) spec.get("template"); + if (!template.containsKey("metadata")) { + return Optional.empty(); + } + + Map metadata = (Map) template.get("metadata"); + if (metadata == null) { + return Optional.empty(); + } + + Map result = (Map) metadata.get("annotations"); + if (result == null) { + result = new HashMap<>(); + metadata.put("annotations", result); + } + + return Optional.of(result); + } + + @JsonIgnore + public Object getStatus() { + return get("status"); + } + + @JsonIgnore + public String getFullResourceName() { + // To try to avoid "null" in the return value, use the generateName field if + // there's no name. With neither name nor generateName, the return value + // still contains "null". + String name = Strings.isNullOrEmpty(getName()) ? getGenerateName() : getName(); + return getFullResourceName(getKind(), name); + } + + public static String getFullResourceName(KubernetesKind kind, String name) { + return String.join(" ", kind.toString(), name); + } + + /* + * The reasoning behind removing metadata for comparison is that it shouldn't affect the runtime behavior + * of the resource we are creating. + */ + public boolean nonMetadataEquals(KubernetesManifest other) { + if (other == null) { + return false; + } + + KubernetesManifest cloneThis = this.clone(); + KubernetesManifest cloneOther = other.clone(); + + cloneThis.remove("metadata"); + cloneOther.remove("metadata"); + + return cloneThis.equals(cloneOther); + } + + /** + * This method is deprecated in favor of creating a {@link KubernetesCoordinates} object using + * {@link KubernetesCoordinates.KubernetesCoordinatesBuilder#fullResourceName}, which has more + * clearly identified named than {@link Pair#getLeft()}) and {@link Pair#getRight()}). + */ + @Deprecated + public static Pair fromFullResourceName(String fullResourceName) { + KubernetesCoordinates coords = + KubernetesCoordinates.builder().fullResourceName(fullResourceName).build(); + return new ImmutablePair<>(coords.getKind(), coords.getName()); + } + + @Data + public static class OwnerReference { + KubernetesApiVersion apiVersion; + String kind; + String name; + String uid; + boolean blockOwnerDeletion; + boolean controller; + + public KubernetesKind computedKind() { + KubernetesApiGroup kubernetesApiGroup; + if (apiVersion != null) { + kubernetesApiGroup = getApiVersion().getApiGroup(); + } else { + kubernetesApiGroup = null; + } + return KubernetesKind.from(kind, kubernetesApiGroup); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestAnnotater.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestAnnotater.java new file mode 100644 index 00000000000..5f573e825f1 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestAnnotater.java @@ -0,0 +1,302 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesResourceAwareNames; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.OptionalInt; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KubernetesManifestAnnotater { + private static final Logger log = LoggerFactory.getLogger(KubernetesManifestAnnotater.class); + + static final String SPINNAKER_ANNOTATION = "spinnaker.io"; + private static final String TRAFFIC_ANNOTATION_PREFIX = "traffic." + SPINNAKER_ANNOTATION; + private static final String ARTIFACT_ANNOTATION_PREFIX = "artifact." + SPINNAKER_ANNOTATION; + private static final String MONIKER_ANNOTATION_PREFIX = "moniker." + SPINNAKER_ANNOTATION; + private static final String CACHING_ANNOTATION_PREFIX = "caching." + SPINNAKER_ANNOTATION; + private static final String CLUSTER = MONIKER_ANNOTATION_PREFIX + "/cluster"; + private static final String APPLICATION = MONIKER_ANNOTATION_PREFIX + "/application"; + private static final String STACK = MONIKER_ANNOTATION_PREFIX + "/stack"; + private static final String DETAIL = MONIKER_ANNOTATION_PREFIX + "/detail"; + private static final String SEQUENCE = MONIKER_ANNOTATION_PREFIX + "/sequence"; + private static final String TYPE = ARTIFACT_ANNOTATION_PREFIX + "/type"; + private static final String NAME = ARTIFACT_ANNOTATION_PREFIX + "/name"; + private static final String LOCATION = ARTIFACT_ANNOTATION_PREFIX + "/location"; + private static final String VERSION = ARTIFACT_ANNOTATION_PREFIX + "/version"; + private static final String IGNORE_CACHING = CACHING_ANNOTATION_PREFIX + "/ignore"; + private static final String LOAD_BALANCERS = TRAFFIC_ANNOTATION_PREFIX + "/load-balancers"; + + private static final String KUBERNETES_ANNOTATION = "kubernetes.io"; + private static final String KUBECTL_ANNOTATION_PREFIX = "kubectl." + KUBERNETES_ANNOTATION; + private static final String DEPLOYMENT_ANNOTATION_PREFIX = "deployment." + KUBERNETES_ANNOTATION; + private static final String DEPLOYMENT_REVISION = DEPLOYMENT_ANNOTATION_PREFIX + "/revision"; + private static final String KUBECTL_LAST_APPLIED_CONFIGURATION = + KUBECTL_ANNOTATION_PREFIX + "/last-applied-configuration"; + + private static final ObjectMapper objectMapper = new ObjectMapper(); + + private static void storeAnnotation(Map annotations, String key, Object value) { + if (value == null) { + return; + } + + if (annotations.containsKey(key)) { + return; + } + + try { + if (value instanceof String) { + // The "write value as string" method will attach quotes which are ugly to read + annotations.put(key, (String) value); + } else { + annotations.put(key, objectMapper.writeValueAsString(value)); + } + } catch (JsonProcessingException e) { + throw new IllegalArgumentException("Illegal annotation value for '" + key + "': " + e); + } + } + + private static T getAnnotation( + Map annotations, String key, TypeReference typeReference) { + return getAnnotation(annotations, key, typeReference, null); + } + + private static boolean stringTypeReference(TypeReference typeReference) { + if (typeReference.getType() == null || typeReference.getType().getTypeName() == null) { + log.warn("Malformed type reference {}", typeReference); + return false; + } + + return typeReference.getType().getTypeName().equals(String.class.getName()); + } + + // This is to read values that were annotated with the ObjectMapper with quotes, before we started + // ignoring the quotes + private static boolean looksLikeSerializedString(String value) { + if (Strings.isNullOrEmpty(value) || value.length() == 1) { + return false; + } + + return value.charAt(0) == '"' && value.charAt(value.length() - 1) == '"'; + } + + private static T getAnnotation( + Map annotations, String key, TypeReference typeReference, T defaultValue) { + String value = annotations.get(key); + if (value == null) { + return defaultValue; + } + + try { + boolean wantsString = stringTypeReference(typeReference); + + if (wantsString && !looksLikeSerializedString(value)) { + return (T) value; + } else { + return objectMapper.readValue(value, typeReference); + } + } catch (Exception e) { + log.warn("Illegally annotated resource for '" + key + "': " + e); + return null; + } + } + + public static void annotateManifest(KubernetesManifest manifest, Moniker moniker) { + Map annotations = manifest.getAnnotations(); + storeAnnotations(annotations, moniker); + + manifest.getSpecTemplateAnnotations().ifPresent(a -> storeAnnotations(a, moniker)); + } + + public static void annotateManifest(KubernetesManifest manifest, Artifact artifact) { + Map annotations = manifest.getAnnotations(); + storeAnnotations(annotations, artifact); + + manifest.getSpecTemplateAnnotations().ifPresent(a -> storeAnnotations(a, artifact)); + } + + private static void storeAnnotations(Map annotations, Moniker moniker) { + if (moniker == null) { + throw new IllegalArgumentException( + "Every resource deployed via spinnaker must be assigned a moniker"); + } + + storeAnnotation(annotations, CLUSTER, moniker.getCluster()); + storeAnnotation(annotations, APPLICATION, moniker.getApp()); + storeAnnotation(annotations, STACK, moniker.getStack()); + storeAnnotation(annotations, DETAIL, moniker.getDetail()); + storeAnnotation(annotations, SEQUENCE, moniker.getSequence()); + } + + private static void storeAnnotations(Map annotations, Artifact artifact) { + if (artifact == null) { + return; + } + + storeAnnotation(annotations, TYPE, artifact.getType()); + storeAnnotation(annotations, NAME, artifact.getName()); + storeAnnotation(annotations, LOCATION, artifact.getLocation()); + storeAnnotation(annotations, VERSION, artifact.getVersion()); + } + + public static Optional getArtifact(KubernetesManifest manifest, String account) { + Map annotations = manifest.getAnnotations(); + String type = getAnnotation(annotations, TYPE, new TypeReference() {}); + if (Strings.isNullOrEmpty(type)) { + return Optional.empty(); + } + + KubernetesManifest lastAppliedConfiguration = + KubernetesManifestAnnotater.getLastAppliedConfiguration(manifest); + + return Optional.of( + Artifact.builder() + .type(type) + .name(getAnnotation(annotations, NAME, new TypeReference() {})) + .location(getAnnotation(annotations, LOCATION, new TypeReference() {})) + .version(getAnnotation(annotations, VERSION, new TypeReference() {})) + .putMetadata("lastAppliedConfiguration", lastAppliedConfiguration) + .putMetadata("account", account) + .build()); + } + + public static Moniker getMoniker(KubernetesManifest manifest) { + // first get the annotations + Map annotations = manifest.getAnnotations(); + // attempt to get the names - this will be used in case there are no annotations + // use KubernetesResourceAwareNames so that it can handle special Kubernetes system resources. + // see KubernetesResourceAwareNames for more details + KubernetesResourceAwareNames parsed = + KubernetesResourceAwareNames.parseName(manifest.getName()); + Integer defaultSequence = parsed.getSequence(); + + return Moniker.builder() + .cluster( + getAnnotation( + annotations, CLUSTER, new TypeReference() {}, parsed.getCluster())) + .app( + getAnnotation( + annotations, APPLICATION, new TypeReference() {}, parsed.getApp())) + .stack(getAnnotation(annotations, STACK, new TypeReference() {}, null)) + .detail(getAnnotation(annotations, DETAIL, new TypeReference() {}, null)) + .sequence( + getAnnotation( + annotations, + SEQUENCE, + new TypeReference() {}, + manifest.getKind().equals(KubernetesKind.REPLICA_SET) + ? getAnnotation( + annotations, + DEPLOYMENT_REVISION, + new TypeReference() {}, + defaultSequence) + : defaultSequence)) + .build(); + } + + @NonnullByDefault + public static KubernetesManifestTraffic getTraffic(KubernetesManifest manifest) { + Map annotations = manifest.getAnnotations(); + + List loadBalancers = + getAnnotation( + annotations, LOAD_BALANCERS, new TypeReference>() {}, new ArrayList<>()); + return new KubernetesManifestTraffic(loadBalancers); + } + + @NonnullByDefault + public static void setTraffic(KubernetesManifest manifest, KubernetesManifestTraffic traffic) { + Map annotations = manifest.getAnnotations(); + ImmutableList loadBalancers = traffic.getLoadBalancers(); + + if (annotations.containsKey(LOAD_BALANCERS)) { + KubernetesManifestTraffic currentTraffic = getTraffic(manifest); + if (currentTraffic.getLoadBalancers().equals(loadBalancers)) { + return; + } else { + throw new RuntimeException( + String.format( + "Manifest already has %s annotation set to %s. Failed attempting to set it to %s.", + LOAD_BALANCERS, currentTraffic.getLoadBalancers(), loadBalancers)); + } + } + storeAnnotation(annotations, LOAD_BALANCERS, loadBalancers); + } + + public static void validateAnnotationsForRolloutStrategies( + KubernetesManifest manifest, KubernetesDeployManifestDescription deployManifestDescription) { + OptionalInt maxVersionHistory = getStrategy(manifest).getMaxVersionHistory(); + if (deployManifestDescription.isBlueGreen() + && maxVersionHistory.isPresent() + && maxVersionHistory.getAsInt() < 2) { + throw new RuntimeException( + String.format( + "The max version history specified in your manifest conflicts with the behavior of the Red/Black rollout strategy. Please update your %s annotation to a value greater than or equal to 2.", + KubernetesManifestStrategy.MAX_VERSION_HISTORY)); + } + } + + public static KubernetesCachingProperties getCachingProperties(KubernetesManifest manifest) { + Map annotations = manifest.getAnnotations(); + + return KubernetesCachingProperties.builder() + .ignore(getAnnotation(annotations, IGNORE_CACHING, new TypeReference() {}, false)) + .application(getAnnotation(annotations, APPLICATION, new TypeReference() {}, "")) + .build(); + } + + public static KubernetesManifestStrategy getStrategy(KubernetesManifest manifest) { + return KubernetesManifestStrategy.fromAnnotations(manifest.getAnnotations()); + } + + public static void setDeploymentStrategy( + KubernetesManifest manifest, KubernetesManifestStrategy.DeployStrategy strategy) { + strategy.setAnnotations(manifest.getAnnotations()); + } + + public static KubernetesManifest getLastAppliedConfiguration(KubernetesManifest manifest) { + Map annotations = manifest.getAnnotations(); + + return getAnnotation( + annotations, + KUBECTL_LAST_APPLIED_CONFIGURATION, + new TypeReference() {}, + null); + } + + public static String getManifestCluster(KubernetesManifest manifest) { + return Strings.nullToEmpty(manifest.getAnnotations().get(CLUSTER)); + } + + public static String getManifestApplication(KubernetesManifest manifest) { + return Strings.nullToEmpty(manifest.getAnnotations().get(APPLICATION)); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestLabeler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestLabeler.java new file mode 100644 index 00000000000..8f6a42d48bc --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestLabeler.java @@ -0,0 +1,107 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.Map; + +public class KubernetesManifestLabeler { + private static final String SPINNAKER_LABEL = "spinnaker.io"; + private static final String MONIKER_LABEL_PREFIX = "moniker." + SPINNAKER_LABEL; + private static final String SEQUENCE = MONIKER_LABEL_PREFIX + "/sequence"; + + private static final String KUBERNETES_LABEL = "kubernetes.io"; + private static final String APP_LABEL_PREFIX = "app." + KUBERNETES_LABEL; + private static final String APP_NAME = APP_LABEL_PREFIX + "/name"; + private static final String APP_VERSION = APP_LABEL_PREFIX + "/version"; + private static final String APP_COMPONENT = APP_LABEL_PREFIX + "/component"; + private static final String APP_PART_OF = APP_LABEL_PREFIX + "/part-of"; + private static final String APP_MANAGED_BY = APP_LABEL_PREFIX + "/managed-by"; + + private static void storeLabelAndOverwrite(Map labels, String key, String value) { + if (value == null) { + return; + } + + labels.put(key, value); + } + + private static void storeLabel(Map labels, String key, String value) { + if (value == null) { + return; + } + + if (labels.containsKey(key)) { + return; + } + + labels.put(key, value); + } + + public static void labelManifest( + String managedBySuffix, + KubernetesManifest manifest, + Moniker moniker, + Boolean skipSpecTemplateLabels) { + Map labels = manifest.getLabels(); + storeLabels(managedBySuffix, labels, moniker); + + // Deployment fails for some Kubernetes resources (e.g. Karpenter NodePool) when + // the app.kubernetes.io/* labels are applied to the manifest's + // .spec.template.metadata.labels. If skipSpecTemplateLabels is + // set to true in the manifest description, Spinnaker won't apply + // the Kubernetes and Moniker labels + // to the .spec.template.metadata.labels of the manifest. + if (!skipSpecTemplateLabels) { + manifest.getSpecTemplateLabels().ifPresent(l -> storeLabels(managedBySuffix, l, moniker)); + } + } + + public static void storeLabels( + String managedBySuffix, Map labels, Moniker moniker) { + if (moniker == null) { + return; + } + + String appManagedByValue = "spinnaker"; + + if (!Strings.isNullOrEmpty(managedBySuffix)) { + appManagedByValue = appManagedByValue + "-" + managedBySuffix; + } + + // other properties aren't currently set by Spinnaker + storeLabel(labels, APP_NAME, moniker.getApp()); + storeLabelAndOverwrite(labels, APP_MANAGED_BY, appManagedByValue); + if (moniker.getSequence() != null) { + storeLabelAndOverwrite(labels, SEQUENCE, moniker.getSequence() + ""); + } + } + + public static KubernetesApplicationProperties getApplicationProperties( + KubernetesManifest manifest) { + Map labels = manifest.getLabels(); + + return new KubernetesApplicationProperties() + .setName(labels.get(APP_NAME)) + .setVersion(labels.get(APP_VERSION)) + .setComponent(labels.get(APP_COMPONENT)) + .setPartOf(labels.get(APP_PART_OF)) + .setManagedBy(labels.get(APP_MANAGED_BY)); + } +} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestList.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestList.java similarity index 84% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestList.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestList.java index 41188c1feb0..81cfd70840c 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesManifestList.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestList.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +15,10 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; - -import lombok.Data; +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; import java.util.List; +import lombok.Data; @Data public class KubernetesManifestList { diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestOperationDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestOperationDescription.java new file mode 100644 index 00000000000..2ba04cb90f0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestOperationDescription.java @@ -0,0 +1,39 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class KubernetesManifestOperationDescription extends KubernetesAtomicOperationDescription { + private String manifestName; + private String location; + + @JsonIgnore + public KubernetesCoordinates getPointCoordinates() { + return KubernetesCoordinates.builder() + .namespace(location) + .fullResourceName(manifestName) + .build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestSelector.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestSelector.java new file mode 100644 index 00000000000..422b7e30f8a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestSelector.java @@ -0,0 +1,41 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.clouddriver.kubernetes.security.MatchExpression; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.Data; + +@Data +public class KubernetesManifestSelector { + private Map matchLabels = new HashMap<>(); + private List matchExpressions = new ArrayList<>(); + + @JsonIgnore + public KubernetesSelectorList toSelectorList() { + KubernetesSelectorList list = KubernetesSelectorList.fromMatchLabels(matchLabels); + list.addSelectors(KubernetesSelectorList.fromMatchExpressions(matchExpressions)); + + return list; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestStrategy.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestStrategy.java new file mode 100644 index 00000000000..419ce154f59 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestStrategy.java @@ -0,0 +1,190 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.google.common.collect.ImmutableMap; +import com.google.common.primitives.Ints; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.OptionalInt; +import javax.annotation.Nullable; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.Builder; +import lombok.Value; + +@Value +@NonnullByDefault +public final class KubernetesManifestStrategy { + private static final String STRATEGY_ANNOTATION_PREFIX = + "strategy." + KubernetesManifestAnnotater.SPINNAKER_ANNOTATION; + private static final String VERSIONED = STRATEGY_ANNOTATION_PREFIX + "/versioned"; + static final String MAX_VERSION_HISTORY = STRATEGY_ANNOTATION_PREFIX + "/max-version-history"; + private static final String USE_SOURCE_CAPACITY = + STRATEGY_ANNOTATION_PREFIX + "/use-source-capacity"; + + private static final String SERVER_SIDE_APPLY_STRATEGY = + STRATEGY_ANNOTATION_PREFIX + "/server-side-apply"; + private static final String SERVER_SIDE_APPLY_FORCE_CONFLICTS = "force-conflicts"; + + private final DeployStrategy deployStrategy; + private final Versioned versioned; + private final OptionalInt maxVersionHistory; + private final boolean useSourceCapacity; + private final ServerSideApplyStrategy serverSideApplyStrategy; + + @Builder + @ParametersAreNullableByDefault + private KubernetesManifestStrategy( + DeployStrategy deployStrategy, + Versioned versioned, + Integer maxVersionHistory, + boolean useSourceCapacity, + ServerSideApplyStrategy serverSideApplyStrategy) { + this.deployStrategy = Optional.ofNullable(deployStrategy).orElse(DeployStrategy.APPLY); + this.versioned = Optional.ofNullable(versioned).orElse(Versioned.DEFAULT); + this.maxVersionHistory = + maxVersionHistory == null ? OptionalInt.empty() : OptionalInt.of(maxVersionHistory); + this.useSourceCapacity = useSourceCapacity; + this.serverSideApplyStrategy = + Optional.ofNullable(serverSideApplyStrategy).orElse(ServerSideApplyStrategy.DEFAULT); + } + + static KubernetesManifestStrategy fromAnnotations(Map annotations) { + return KubernetesManifestStrategy.builder() + .versioned(Versioned.fromAnnotations(annotations)) + .deployStrategy(DeployStrategy.fromAnnotations(annotations)) + .serverSideApplyStrategy(ServerSideApplyStrategy.fromAnnotations(annotations)) + .useSourceCapacity(Boolean.parseBoolean(annotations.get(USE_SOURCE_CAPACITY))) + .maxVersionHistory(Ints.tryParse(annotations.getOrDefault(MAX_VERSION_HISTORY, ""))) + .build(); + } + + ImmutableMap toAnnotations() { + ImmutableMap.Builder builder = ImmutableMap.builder(); + builder.putAll(deployStrategy.toAnnotations()); + builder.putAll(versioned.toAnnotations()); + builder.putAll(serverSideApplyStrategy.toAnnotations()); + if (maxVersionHistory.isPresent()) { + builder.put(MAX_VERSION_HISTORY, Integer.toString(maxVersionHistory.getAsInt())); + } + if (useSourceCapacity) { + builder.put(USE_SOURCE_CAPACITY, Boolean.TRUE.toString()); + } + return builder.build(); + } + + public enum Versioned { + TRUE(ImmutableMap.of(VERSIONED, Boolean.TRUE.toString())), + FALSE(ImmutableMap.of(VERSIONED, Boolean.FALSE.toString())), + DEFAULT(ImmutableMap.of()); + + private final ImmutableMap annotations; + + Versioned(ImmutableMap annotations) { + this.annotations = annotations; + } + + static Versioned fromAnnotations(Map annotations) { + if (annotations.containsKey(VERSIONED)) { + return Boolean.parseBoolean(annotations.get(VERSIONED)) ? TRUE : FALSE; + } + return DEFAULT; + } + + ImmutableMap toAnnotations() { + return annotations; + } + } + + public enum DeployStrategy { + APPLY(null), + RECREATE(STRATEGY_ANNOTATION_PREFIX + "/recreate"), + REPLACE(STRATEGY_ANNOTATION_PREFIX + "/replace"), + SERVER_SIDE_APPLY(SERVER_SIDE_APPLY_STRATEGY); + + @Nullable private final String annotation; + + DeployStrategy(@Nullable String annotation) { + this.annotation = annotation; + } + + static DeployStrategy fromAnnotations(Map annotations) { + if (Boolean.parseBoolean(annotations.get(RECREATE.annotation))) { + return RECREATE; + } + if (Boolean.parseBoolean(annotations.get(REPLACE.annotation))) { + return REPLACE; + } + if (annotations.containsKey(SERVER_SIDE_APPLY.annotation) + && ServerSideApplyStrategy.fromAnnotations(annotations) + != ServerSideApplyStrategy.DISABLED) { + return SERVER_SIDE_APPLY; + } + return APPLY; + } + + ImmutableMap toAnnotations() { + if (annotation == null) { + return ImmutableMap.of(); + } + return ImmutableMap.of(annotation, Boolean.TRUE.toString()); + } + + void setAnnotations(Map annotations) { + // First clear out any existing deploy strategy annotations, then apply the one appropriate to + // the current strategy + Arrays.stream(DeployStrategy.values()) + .map(s -> s.annotation) + .filter(Objects::nonNull) + .forEach(annotations::remove); + annotations.putAll(toAnnotations()); + } + } + + public enum ServerSideApplyStrategy { + FORCE_CONFLICTS(ImmutableMap.of(SERVER_SIDE_APPLY_STRATEGY, SERVER_SIDE_APPLY_FORCE_CONFLICTS)), + DISABLED(ImmutableMap.of(SERVER_SIDE_APPLY_STRATEGY, Boolean.FALSE.toString())), + DEFAULT(ImmutableMap.of()); + private final ImmutableMap annotations; + + ServerSideApplyStrategy(ImmutableMap annotations) { + this.annotations = annotations; + } + + static ServerSideApplyStrategy fromAnnotations(Map annotations) { + if (annotations.containsKey(SERVER_SIDE_APPLY_STRATEGY)) { + String strategy = annotations.get(SERVER_SIDE_APPLY_STRATEGY); + if (Boolean.parseBoolean(strategy)) { + return DEFAULT; + } + + if (strategy.equals(SERVER_SIDE_APPLY_FORCE_CONFLICTS)) { + return FORCE_CONFLICTS; + } + } + return DISABLED; + } + + ImmutableMap toAnnotations() { + return annotations; + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTraffic.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTraffic.java new file mode 100644 index 00000000000..fd4e34ed93e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTraffic.java @@ -0,0 +1,39 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.List; +import java.util.Optional; +import javax.annotation.ParametersAreNullableByDefault; +import lombok.EqualsAndHashCode; +import lombok.Getter; + +@EqualsAndHashCode +@Getter +@NonnullByDefault +public final class KubernetesManifestTraffic { + private final ImmutableList loadBalancers; + + @ParametersAreNullableByDefault + public KubernetesManifestTraffic(List loadBalancers) { + this.loadBalancers = + Optional.ofNullable(loadBalancers).map(ImmutableList::copyOf).orElseGet(ImmutableList::of); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesPatchManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesPatchManifestDescription.java new file mode 100644 index 00000000000..559ede835e0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesPatchManifestDescription.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPatchOptions; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class KubernetesPatchManifestDescription extends KubernetesAtomicOperationDescription { + private String manifestName; + private String location; + + // This will only be a portion of a full manifest so calls to some required fields can fail. + // Using the KubernetesManifest type makes it simpler to reuse the ArtifactReplacement logic. + // TODO: change Orca to only send a single manifest. + private Object patchBody; + private List requiredArtifacts; + private List allArtifacts; + private Artifact manifestArtifact; + private KubernetesPatchOptions options; + + @JsonIgnore + public KubernetesCoordinates getPointCoordinates() { + return KubernetesCoordinates.builder() + .namespace(location) + .fullResourceName(manifestName) + .build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesPauseRolloutManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesPauseRolloutManifestDescription.java new file mode 100644 index 00000000000..f886f7fd3f5 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesPauseRolloutManifestDescription.java @@ -0,0 +1,26 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class KubernetesPauseRolloutManifestDescription + extends KubernetesManifestOperationDescription {} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesResumeRolloutManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesResumeRolloutManifestDescription.java new file mode 100644 index 00000000000..75c4f6a67e2 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesResumeRolloutManifestDescription.java @@ -0,0 +1,26 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class KubernetesResumeRolloutManifestDescription + extends KubernetesManifestOperationDescription {} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesRollingRestartManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesRollingRestartManifestDescription.java new file mode 100644 index 00000000000..43ce0f7cd8b --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesRollingRestartManifestDescription.java @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class KubernetesRollingRestartManifestDescription + extends KubernetesManifestOperationDescription {} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesScaleManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesScaleManifestDescription.java similarity index 85% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesScaleManifestDescription.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesScaleManifestDescription.java index 3c04ab23a42..5e8338ad935 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesScaleManifestDescription.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesScaleManifestDescription.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; import lombok.Data; import lombok.EqualsAndHashCode; diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesSourceCapacity.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesSourceCapacity.java new file mode 100644 index 00000000000..1da325fbd5d --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesSourceCapacity.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import java.util.OptionalInt; + +public class KubernetesSourceCapacity { + public static Integer getSourceCapacity( + KubernetesManifest manifest, KubernetesCredentials credentials, OptionalInt currentVersion) { + String name = currentManifestName(manifest, currentVersion); + KubernetesManifest currentManifest = + credentials.get( + KubernetesCoordinates.builder() + .kind(manifest.getKind()) + .namespace(manifest.getNamespace()) + .name(name) + .build()); + if (currentManifest != null) { + return currentManifest.getReplicas(); + } + return null; + } + + private static String currentManifestName( + KubernetesManifest manifest, OptionalInt currentVersion) { + if (currentVersion.isEmpty()) { + return manifest.getName(); + } + + int version = currentVersion.getAsInt(); + String versionString = String.format("v%03d", version); + return String.join("-", manifest.getName(), versionString); + } +} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesUndoRolloutManifestDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesUndoRolloutManifestDescription.java similarity index 75% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesUndoRolloutManifestDescription.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesUndoRolloutManifestDescription.java index a9383e2b5f2..88c547168e3 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/KubernetesUndoRolloutManifestDescription.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesUndoRolloutManifestDescription.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,15 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; import lombok.Data; import lombok.EqualsAndHashCode; @EqualsAndHashCode(callSuper = true) @Data -public class KubernetesUndoRolloutManifestDescription extends KubernetesManifestOperationDescription { +public class KubernetesUndoRolloutManifestDescription + extends KubernetesManifestOperationDescription { Integer revision; Integer numRevisionsBack; } diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/MalformedManifestException.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/MalformedManifestException.java similarity index 75% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/MalformedManifestException.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/MalformedManifestException.java index cf5dcb7e491..81af7e7d910 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/manifest/MalformedManifestException.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/MalformedManifestException.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,16 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest; +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; public class MalformedManifestException extends IllegalStateException { private MalformedManifestException(String msg) { super(msg); } - public static MalformedManifestException missingField(KubernetesManifest manifest, String fieldName) { + public static MalformedManifestException missingField( + KubernetesManifest manifest, String fieldName) { return new MalformedManifestException( - String.format("Missing required field '%s' on manifest:\n%s", fieldName, manifest) - ); + String.format("Missing required field '%s' on manifest:\n%s", fieldName, manifest)); } } diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/servergroup/KubernetesResizeServerGroupDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/servergroup/KubernetesResizeServerGroupDescription.java similarity index 75% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/servergroup/KubernetesResizeServerGroupDescription.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/servergroup/KubernetesResizeServerGroupDescription.java index f6b78f47a70..0579debf41b 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/servergroup/KubernetesResizeServerGroupDescription.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/servergroup/KubernetesResizeServerGroupDescription.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description.servergroup; +package com.netflix.spinnaker.clouddriver.kubernetes.description.servergroup; import com.netflix.spinnaker.clouddriver.model.ServerGroup.Capacity; import lombok.Data; @@ -23,6 +23,7 @@ @EqualsAndHashCode(callSuper = true) @Data -public class KubernetesResizeServerGroupDescription extends KubernetesServerGroupOperationDescription { +public class KubernetesResizeServerGroupDescription + extends KubernetesServerGroupOperationDescription { Capacity capacity; } diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/servergroup/KubernetesServerGroupOperationDescription.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/servergroup/KubernetesServerGroupOperationDescription.java new file mode 100644 index 00000000000..5be4458c011 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/description/servergroup/KubernetesServerGroupOperationDescription.java @@ -0,0 +1,40 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.servergroup; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesAtomicOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class KubernetesServerGroupOperationDescription + extends KubernetesAtomicOperationDescription { + private String serverGroupName; + private String region; // :( + + @JsonIgnore + public KubernetesCoordinates getCoordinates() { + return KubernetesCoordinates.builder() + .namespace(region) + .fullResourceName(serverGroupName) + .build(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicator.java new file mode 100644 index 00000000000..dbc341600be --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicator.java @@ -0,0 +1,71 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.health; + +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.core.AccountHealthIndicator; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; + +@Slf4j +public class KubernetesHealthIndicator + extends AccountHealthIndicator { + private static final String ID = "kubernetes"; + private final CredentialsRepository credentialsRepository; + private final KubernetesConfigurationProperties kubernetesConfigurationProperties; + + @Autowired + public KubernetesHealthIndicator( + Registry registry, + CredentialsRepository credentialsRepository, + KubernetesConfigurationProperties kubernetesConfigurationProperties) { + super(ID, registry); + this.credentialsRepository = credentialsRepository; + this.kubernetesConfigurationProperties = kubernetesConfigurationProperties; + + if (kubernetesConfigurationProperties.isVerifyAccountHealth()) { + log.info( + "kubernetes.verifyAccountHealth flag is enabled - declared namespaces will be retrieved for all accounts"); + } else { + log.warn( + "kubernetes.verifyAccountHealth flag is disabled - declared namespaces will not be retrieved for any account"); + } + } + + @Override + protected ImmutableList getAccounts() { + return ImmutableList.copyOf(credentialsRepository.getAll()); + } + + @Override + protected Optional accountHealth(KubernetesNamedAccountCredentials accountCredentials) { + if (kubernetesConfigurationProperties.isVerifyAccountHealth()) { + try { + accountCredentials.getCredentials().getDeclaredNamespaces(); + } catch (RuntimeException e) { + return Optional.of(e.getMessage()); + } + } + return Optional.empty(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/ContainerLog.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/ContainerLog.java new file mode 100644 index 00000000000..5fb25f5c32a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/ContainerLog.java @@ -0,0 +1,28 @@ +/* + * Copyright 2020 Discovery, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.model; + +import lombok.AllArgsConstructor; +import lombok.Getter; + +@AllArgsConstructor +@Getter +public class ContainerLog { + private final String name; + private final String output; +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/KubernetesJobStatus.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/KubernetesJobStatus.java new file mode 100644 index 00000000000..c4adea00513 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/KubernetesJobStatus.java @@ -0,0 +1,263 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.model; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.model.JobState; +import com.netflix.spinnaker.clouddriver.model.JobStatus; +import io.kubernetes.client.openapi.models.V1ContainerState; +import io.kubernetes.client.openapi.models.V1ContainerStateTerminated; +import io.kubernetes.client.openapi.models.V1ContainerStateWaiting; +import io.kubernetes.client.openapi.models.V1ContainerStatus; +import io.kubernetes.client.openapi.models.V1Job; +import io.kubernetes.client.openapi.models.V1JobCondition; +import io.kubernetes.client.openapi.models.V1JobSpec; +import io.kubernetes.client.openapi.models.V1JobStatus; +import io.kubernetes.client.openapi.models.V1Pod; +import io.kubernetes.client.openapi.models.V1PodStatus; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.Data; + +@Data +public class KubernetesJobStatus implements JobStatus { + + String name; + String cluster; + String account; + String id; + String location; + String provider = "kubernetes"; + Long createdTime; + Long completedTime; + String message; + String reason; + Integer exitCode; + Integer signal; + String failureDetails; + String logs; + @JsonIgnore V1Job job; + List pods; + String mostRecentPodName; + + public KubernetesJobStatus(V1Job job, String account) { + this.job = job; + this.account = account; + this.name = job.getMetadata().getName(); + this.location = job.getMetadata().getNamespace(); + this.createdTime = job.getMetadata().getCreationTimestamp().toInstant().toEpochMilli(); + } + + @Override + public Map getCompletionDetails() { + Map details = new HashMap<>(); + details.put("exitCode", this.exitCode != null ? this.exitCode.toString() : ""); + details.put("signal", this.signal != null ? this.signal.toString() : ""); + details.put("message", this.message != null ? this.message : ""); + details.put("reason", this.reason != null ? this.reason : ""); + return details; + } + + @Override + public JobState getJobState() { + V1JobStatus status = job.getStatus(); + if (status == null) { + return JobState.Running; + } + int completions = Optional.of(job.getSpec()).map(V1JobSpec::getCompletions).orElse(1); + int succeeded = Optional.of(status).map(V1JobStatus::getSucceeded).orElse(0); + + if (succeeded < completions) { + Optional condition = getFailedJobCondition(status); + return condition.isPresent() ? JobState.Failed : JobState.Running; + } + return JobState.Succeeded; + } + + private Optional getFailedJobCondition(V1JobStatus status) { + List conditions = status.getConditions(); + conditions = conditions != null ? conditions : ImmutableList.of(); + return conditions.stream().filter(this::jobFailed).findFirst(); + } + + private boolean jobFailed(V1JobCondition condition) { + return "Failed".equalsIgnoreCase(condition.getType()) + && "True".equalsIgnoreCase(condition.getStatus()); + } + + /** + * This function loops through all the pods in the job and finds the first container in a pod that + * has either terminated with a non-zero exit code or is stuck in waiting stage. Using this + * container's execution details, the job's failureDetails field is set. + */ + public void captureFailureDetails() { + V1JobStatus status = this.job.getStatus(); + if (status != null) { + Optional condition = getFailedJobCondition(status); + if (condition.isPresent()) { + this.setMessage(condition.get().getMessage()); + this.setReason(condition.get().getReason()); + } + // save all container outputs + this.getPods().forEach(KubernetesJobStatus.PodStatus::getAllContainerDetails); + + // construct a meaningful message to explain why the job failed. This will find the first pod + // that failed with an error + for (PodStatus pod : pods) { + // find the first container that failed with a non-zero exit code + Optional failedContainerDetails = + pod.getContainerExecutionDetails().stream() + .filter( + containerExecutionDetails -> + containerExecutionDetails + .getState() + .equals(V1ContainerState.SERIALIZED_NAME_TERMINATED) + && containerExecutionDetails.getExitCode() != null + && !containerExecutionDetails.getExitCode().equals("0")) + .findFirst(); + + // if we didn't find any terminated container, find the first container stuck in waiting + if (failedContainerDetails.isEmpty()) { + failedContainerDetails = + pod.getContainerExecutionDetails().stream() + .filter( + containerExecutionDetails -> + containerExecutionDetails + .getState() + .equals(V1ContainerState.SERIALIZED_NAME_WAITING)) + .findFirst(); + } + + // construct thr error if we found a failed container + if (failedContainerDetails.isPresent()) { + ContainerExecutionDetails failedContainer = failedContainerDetails.get(); + this.setFailureDetails( + String.format( + "Pod: '%s' had errors.\n Container: '%s' exited with code: %s.\n Status: %s.\n Logs: %s", + pod.getName(), + failedContainer.getName(), + failedContainer.getExitCode(), + failedContainer.getStatus(), + failedContainer.getLogs())); + break; + } + } + } + } + + @Data + public static class PodStatus { + private String name; + private V1PodStatus status; + /** + * containerExecutionDetails contains information about all the containers in the pod. + * + *

Since we make use of this info in constructing a failure message only, we can annotate it + * with {@link JsonIgnore} so that it doesn't show up in the execution context + */ + @JsonIgnore private Set containerExecutionDetails = new HashSet<>(); + + public PodStatus(V1Pod pod) { + this.name = pod.getMetadata().getName(); + this.status = pod.getStatus(); + } + + /** captures details for all containers (i.e. init containers and app containers) in a pod */ + public void getAllContainerDetails() { + if (this.status != null) { + // capture all init container details + this.containerExecutionDetails.addAll( + getContainerDetails(this.status.getInitContainerStatuses())); + + // capture all app container details + this.containerExecutionDetails.addAll( + getContainerDetails(this.status.getContainerStatuses())); + } + } + + /** + * this function accepts a list of {@link V1ContainerStatus} as a parameter and for each + * container in the list, it captures all the additional metadata for it, as defined in {@link + * ContainerExecutionDetails} + * + * @param containerStatuses - list of containers + * @return - list of {@link ContainerExecutionDetails} for each container that is non-null in + * the parameter + */ + private List getContainerDetails( + List containerStatuses) { + return Optional.ofNullable(containerStatuses).orElseGet(Collections::emptyList).stream() + .filter(status -> status.getState() != null) + .map(status -> new ContainerExecutionDetails(status.getName(), status.getState())) + .collect(Collectors.toList()); + } + } + + /** + * this class captures details about a container execution. These containers can be either init + * containers or app containers. + */ + @Data + @AllArgsConstructor + public static class ContainerExecutionDetails { + private String name; + private String logs; + private String status; + private String exitCode; + private String state; + + public ContainerExecutionDetails() { + this.name = ""; + this.logs = ""; + this.status = ""; + this.exitCode = ""; + this.state = V1ContainerState.SERIALIZED_NAME_RUNNING; + } + + public ContainerExecutionDetails(String name, V1ContainerState containerState) { + this(); + + this.name = name; + if (containerState.getTerminated() != null) { + V1ContainerStateTerminated terminatedContainerState = containerState.getTerminated(); + this.logs = terminatedContainerState.getMessage(); + this.status = terminatedContainerState.getReason(); + this.exitCode = + terminatedContainerState.getExitCode() != null + ? terminatedContainerState.getExitCode().toString() + : ""; + this.state = V1ContainerState.SERIALIZED_NAME_TERMINATED; + } else if (containerState.getWaiting() != null) { + V1ContainerStateWaiting waitingContainerState = containerState.getWaiting(); + this.logs = waitingContainerState.getMessage(); + this.status = waitingContainerState.getReason(); + this.state = V1ContainerState.SERIALIZED_NAME_WAITING; + } else { + this.logs = "container is still in running state"; + } + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/Manifest.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/Manifest.java new file mode 100644 index 00000000000..776e6cccfaa --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/model/Manifest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.model; + +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.List; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.ToString; +import lombok.Value; + +public interface Manifest { + Moniker getMoniker(); + + String getAccount(); + + String getName(); + + String getLocation(); + + Status getStatus(); + + List getWarnings(); + + @Getter + @EqualsAndHashCode + @NonnullByDefault + @ToString + class Status { + private Condition stable = Condition.withState(true); + private Condition paused = Condition.withState(false); + private Condition available = Condition.withState(true); + private Condition failed = Condition.withState(false); + + public static Status defaultStatus() { + return new Status(); + } + + public static Status noneReported() { + return defaultStatus() + .unstable("No status reported yet") + .unavailable("No availability reported"); + } + + public Status failed(@Nullable String message) { + failed = new Condition(true, message); + return this; + } + + public Status stable(@Nullable String message) { + stable = new Condition(true, message); + return this; + } + + public Status unstable(@Nullable String message) { + stable = new Condition(false, message); + return this; + } + + public Status paused(@Nullable String message) { + paused = new Condition(true, message); + return this; + } + + public Status unavailable(@Nullable String message) { + available = new Condition(false, message); + return this; + } + + @NonnullByDefault + @Value + public static final class Condition { + private static final Condition TRUE = new Condition(true, null); + private static final Condition FALSE = new Condition(false, null); + + private final boolean state; + @Nullable private final String message; + + private static Condition withState(boolean state) { + return state ? TRUE : FALSE; + } + + private Condition(boolean state, @Nullable String message) { + this.state = state; + this.message = message; + } + } + } + + @Data + @Builder + class Warning { + private String type; + private String message; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesManifestNamer.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesManifestNamer.java new file mode 100644 index 00000000000..de64ed780c3 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesManifestNamer.java @@ -0,0 +1,102 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.names; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestLabeler; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.orchestration.OperationDescription; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesManifestNamer implements NamingStrategy { + private final boolean applyAppLabels; + private final String managedBySuffix; + + @Autowired + public KubernetesManifestNamer( + @Value("${kubernetes.v2.apply-app-labels:true}") boolean applyAppLabels, + @Value("${kubernetes.v2.managed-by-suffix:}") String managedBySuffix) { + this.applyAppLabels = applyAppLabels; + this.managedBySuffix = managedBySuffix; + } + + public KubernetesManifestNamer() { + this(true, ""); + } + + @Override + public String getName() { + return "kubernetesAnnotations"; + } + + @Override + public void applyMoniker(KubernetesManifest obj, Moniker moniker) { + applyMoniker(obj, moniker, null); + } + + /** + * Applies the given Moniker to the specified KubernetesManifest. If the provided + * OperationDescription is an instance of KubernetesDeployManifestDescription, the method will + * annotate and label the manifest. If + * KubernetesDeployManifestDescription.isSkipSpecTemplateLabels() is true, skip applying the + * Kubernetes and Moniker labels to the manifest's spec.template.metadata.labels. If the + * OperationDescription is null, or the + * KubernetesDeployManifestDescription.isSkipSpecTemplateLabels() is false, apply the Kubernetes + * and Moniker labels to the manifest's spec.template.metadata.labels + * + * @param obj the KubernetesManifest to which the moniker will be applied + * @param moniker the moniker to apply + * @param description a description expected to be of type KubernetesDeployManifestDescription + * that provides context for the operation. + */ + @Override + public void applyMoniker( + KubernetesManifest obj, Moniker moniker, OperationDescription description) { + // The OperationDescription passed to this method must + // always have the dynamic type of KubernetesDeployManifestDescription. + // If not, fail the operation. + if (description != null && !(description instanceof KubernetesDeployManifestDescription)) { + throw new IllegalArgumentException( + String.format( + "OperationDescription passed to Namer.applyMoniker() must be a KubernetesDeployManifestDescription for the KubernetesDeployManifestOperation. Provided description: %s", + description.getClass().getName())); + } + KubernetesManifestAnnotater.annotateManifest(obj, moniker); + if (applyAppLabels) { + KubernetesDeployManifestDescription kubernetesDeployManifestDescription = + (KubernetesDeployManifestDescription) description; + boolean skipSpecTemplateLabels = + (kubernetesDeployManifestDescription != null) + ? kubernetesDeployManifestDescription.isSkipSpecTemplateLabels() + : false; + KubernetesManifestLabeler.labelManifest( + managedBySuffix, obj, moniker, skipSpecTemplateLabels); + } + } + + @Override + public Moniker deriveMoniker(KubernetesManifest obj) { + return KubernetesManifestAnnotater.getMoniker(obj); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesNamerRegistry.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesNamerRegistry.java new file mode 100644 index 00000000000..5e8a8117f01 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesNamerRegistry.java @@ -0,0 +1,59 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.names; + +import static com.google.common.collect.ImmutableMap.toImmutableMap; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.List; +import java.util.Optional; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +/** + * This class handles registering any naming strategies for kubernetes manifests that are on the + * classpath, and supports looking these up by name. It is in principle possible for users to add + * additional namers in a custom build, but it is not clear how often this is used. The only namer + * that exists upstream is {@link KubernetesManifestNamer}. + */ +@Component +@NonnullByDefault +public class KubernetesNamerRegistry { + private final ImmutableMap> strategies; + + @Autowired + public KubernetesNamerRegistry(List> strategies) { + this.strategies = + strategies.stream().collect(toImmutableMap(s -> s.getName().toLowerCase(), s -> s)); + } + + /** + * Returns a registered strategy with the supplied name (ignoring case); throws an + * IllegalArgumentException if there is no strategy with that name. + */ + public NamingStrategy get(String name) { + return Optional.ofNullable(strategies.get(name.toLowerCase())) + .orElseThrow( + () -> + new IllegalArgumentException( + String.format("Could not find naming strategy '%s'", name))); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesResourceAwareNames.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesResourceAwareNames.java new file mode 100644 index 00000000000..171d7c895a5 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesResourceAwareNames.java @@ -0,0 +1,175 @@ +/* + * Copyright 2022 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.names; + +import com.netflix.frigga.NameConstants; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import lombok.Getter; + +/** + * the {@link Names} class is used for deconstructing information about AWS Auto Scaling Groups, + * Load Balancers, Launch Configurations, and Security Groups created by Asgard based on their name. + * + *

While the above class is mainly used for AWS, it works for most of the Kubernetes resources as + * well, since Kubernetes resources follow a similar naming convention. But there are certain + * Kubernetes resources which are reserved for Kubernetes system use that have the prefix "system:". + * See Referring + * to subjects for more details. + * + *

One such use of these resources is in {@link KubernetesKind#CLUSTER_ROLE} kind, which you can + * attempt to patch via Spinnaker. Because the {@link Names} class cannot parse this name, Spinnaker + * fails to return the manifest. This class is used to handle such resources in addition to all the + * other resources. + */ +@Getter +public class KubernetesResourceAwareNames { + + /** + * identifier for the special system resources. See Referring + * to subjects for more details + */ + private static final String KUBERNETES_SYSTEM_RESOURCE_PREFIX = "system:"; + + /** + * A regex pattern to figure out if the manifest name has a version present in it. Used to obtain + * the sequence number for those resources that have the {@link + * KubernetesResourceAwareNames#KUBERNETES_SYSTEM_RESOURCE_PREFIX}. + */ + private static final Pattern PUSH_PATTERN = + Pattern.compile( + "^([" + KUBERNETES_SYSTEM_RESOURCE_PREFIX + "].*)-(" + NameConstants.PUSH_FORMAT + ")$"); + + /** + * It gets the value from {@link Names#parseName(String)} for most of the Kubernetes resources. + * But for manifests with {@link + * KubernetesResourceAwareNames#KUBERNETES_SYSTEM_RESOURCE_PREFIX},it gets it from {@link + * KubernetesResourceAwareNames#parseName(String)}. + * + *

For example, if manifest name = system:coredns, then cluster = system:coredns. + * + *

if manifest name = system:coredns-v003, then cluster = system:coredns. + * + *

If manifest name = test-abc, cluster = test-abc + * + *

If manifest name = test-abc-v003, cluster = test-abc + * + *

See + * ParseName Examples for more details. + */ + private final String cluster; + + /** + * The Spinnaker Application to which this resource belongs to. + * + *

It gets the value from {@link Names#parseName(String)} for most of the Kubernetes resources. + * But for manifests with {@link KubernetesResourceAwareNames#KUBERNETES_SYSTEM_RESOURCE_PREFIX}, + * it gets it from {@link KubernetesResourceAwareNames#parseName(String)}. + * + *

For example, if manifest name = system:coredns, then app = system. + * + *

If manifest name = test-abc, app = test + * + *

See + * ParseName Examples for more details. + */ + private final String app; + + /** + * The versioned sequence number of this manifest. + * + *

It gets the value from {@link Names#parseName(String)} for most of the Kubernetes + * resources.But for manifests with {@link + * KubernetesResourceAwareNames#KUBERNETES_SYSTEM_RESOURCE_PREFIX}, it gets it from {@link + * KubernetesResourceAwareNames#parseName(String)}. + * + *

For example, if manifest name = system:coredns, then sequence = null. + * + *

If manifest name = system:coredns-v003, then sequence = 3. + * + *

If manifest name = test-abc, sequence = null. + * + *

If manifest name = test-abc-v003, sequence = 3. + * + *

See + * ParseName Examples for more details. + */ + private final Integer sequence; + + public KubernetesResourceAwareNames(String cluster, String application, Integer sequence) { + this.cluster = cluster; + this.app = application; + this.sequence = sequence; + } + + /** + * parses the given manifestName into a {@link KubernetesResourceAwareNames} object. It handles + * all types of Kubernetes manifests + * + * @param manifestName given manifest name + * @return {@link KubernetesResourceAwareNames} representation of the manifest name + */ + public static KubernetesResourceAwareNames parseName(String manifestName) { + if (manifestName != null && !manifestName.trim().isEmpty()) { + if (manifestName.startsWith(KUBERNETES_SYSTEM_RESOURCE_PREFIX)) { + return parseSystemResourceName(manifestName); + } + } + + Names parsed = Names.parseName(manifestName); + return new KubernetesResourceAwareNames( + parsed.getCluster(), parsed.getApp(), parsed.getSequence()); + } + + /** + * handles Kubernetes manifests that contain the prefix {@link + * KubernetesResourceAwareNames#KUBERNETES_SYSTEM_RESOURCE_PREFIX}. + * + * @param manifestName given manifest name + * @return {@link KubernetesResourceAwareNames} representation of the manifest name + */ + private static KubernetesResourceAwareNames parseSystemResourceName(String manifestName) { + String[] split = manifestName.split(":"); + Integer sequence = null; + Matcher pushMatcher = PUSH_PATTERN.matcher(manifestName); + boolean hasPush = pushMatcher.matches(); + + // if manifestName == "system:certificates.k8s.io:certificatesigningrequests:nodeclient-v003", + // then + // pushMatcher.group(0) = + // "system:certificates.k8s.io:certificatesigningrequests:nodeclient-v003", + // pushMatcher.group(1) = "system:certificates.k8s.io:certificatesigningrequests:nodeclient", + // pushMatcher.group(2) = "v003", + // pushMatcher.group(3) = "3" + String theCluster = hasPush ? pushMatcher.group(1) : manifestName; + String sequenceString = hasPush ? pushMatcher.group(3) : null; + if (sequenceString != null) { + sequence = Integer.parseInt(sequenceString); + } + // since this method is called only when the manifest name contains + // KUBERNETES_SYSTEM_RESOURCE_PREFIX, split[0] will always contain what we need, which + // is KUBERNETES_SYSTEM_RESOURCE_PREFIX without the ":" + return new KubernetesResourceAwareNames(theCluster, split[0], sequence); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/OperationResult.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/OperationResult.java new file mode 100644 index 00000000000..b51bc18a8cc --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/OperationResult.java @@ -0,0 +1,64 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class OperationResult { + private Map> manifestNamesByNamespace = new HashMap<>(); + private Set manifests = new HashSet<>(); + private Set createdArtifacts = new HashSet<>(); + private Set boundArtifacts = new HashSet<>(); + + public void removeSensitiveKeys(ResourcePropertyRegistry propertyRegistry) { + manifests.forEach(m -> propertyRegistry.get(m.getKind()).getHandler().removeSensitiveKeys(m)); + } + + public OperationResult addManifest(KubernetesManifest manifest) { + manifests.add(manifest); + + Set addedNames = + manifestNamesByNamespace.getOrDefault(manifest.getNamespace(), new HashSet<>()); + addedNames.add(manifest.getFullResourceName()); + manifestNamesByNamespace.put(manifest.getNamespace(), addedNames); + return this; + } + + public void merge(OperationResult other) { + for (Map.Entry> entry : other.manifestNamesByNamespace.entrySet()) { + Set thisManifests = + this.manifestNamesByNamespace.getOrDefault(entry.getKey(), new HashSet<>()); + thisManifests.addAll(entry.getValue()); + this.manifestNamesByNamespace.put(entry.getKey(), thisManifests); + } + + this.manifests.addAll(other.manifests); + this.createdArtifacts.addAll(other.createdArtifacts); + this.boundArtifacts.addAll(other.boundArtifacts); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/artifact/KubernetesCleanupArtifactsOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/artifact/KubernetesCleanupArtifactsOperation.java new file mode 100644 index 00000000000..50ea30ebaa3 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/artifact/KubernetesCleanupArtifactsOperation.java @@ -0,0 +1,133 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.artifact; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.ArtifactProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesResourceProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.artifact.KubernetesCleanupArtifactsDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KubernetesCleanupArtifactsOperation implements AtomicOperation { + private static final Logger log = + LoggerFactory.getLogger(KubernetesCleanupArtifactsOperation.class); + private final KubernetesCleanupArtifactsDescription description; + private final KubernetesCredentials credentials; + @Nonnull private final String accountName; + private final ArtifactProvider artifactProvider; + private static final String OP_NAME = "CLEANUP_KUBERNETES_ARTIFACTS"; + + public KubernetesCleanupArtifactsOperation( + KubernetesCleanupArtifactsDescription description, ArtifactProvider artifactProvider) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + this.accountName = description.getCredentials().getName(); + this.artifactProvider = artifactProvider; + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public OperationResult operate(List priorOutputs) { + OperationResult result = new OperationResult(); + + List artifacts = + description.getManifests().stream() + .map(this::artifactsToDelete) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + + artifacts.forEach( + a -> { + String type = a.getType(); + if (!type.startsWith("kubernetes/")) { + log.warn("Non-kubernetes type deletion requested..."); + return; + } + String kind = type.substring("kubernetes/".length()); + KubernetesResourceProperties properties = + credentials.getResourcePropertyRegistry().get(KubernetesKind.fromString(kind)); + + getTask().updateStatus(OP_NAME, "Deleting artifact '" + a + '"'); + String name = a.getName(); + if (!Strings.isNullOrEmpty(a.getVersion())) { + name = String.join("-", name, a.getVersion()); + } + result.merge( + properties + .getHandler() + .delete( + credentials, + a.getLocation(), + name, + null, + new V1DeleteOptions(), + getTask(), + OP_NAME)); + }); + + result.setManifests(null); + return result; + } + + private ImmutableList artifactsToDelete(KubernetesManifest manifest) { + KubernetesManifestStrategy strategy = KubernetesManifestAnnotater.getStrategy(manifest); + OptionalInt optionalMaxVersionHistory = strategy.getMaxVersionHistory(); + if (!optionalMaxVersionHistory.isPresent()) { + return ImmutableList.of(); + } + + int maxVersionHistory = optionalMaxVersionHistory.getAsInt(); + Optional optional = KubernetesManifestAnnotater.getArtifact(manifest, accountName); + if (!optional.isPresent()) { + return ImmutableList.of(); + } + + Artifact artifact = optional.get(); + + ImmutableList artifacts = + artifactProvider.getArtifacts( + manifest.getKind(), artifact.getName(), artifact.getLocation(), credentials); + if (maxVersionHistory >= artifacts.size()) { + return ImmutableList.of(); + } else { + return artifacts.subList(0, artifacts.size() - maxVersionHistory); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDelete.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDelete.java new file mode 100644 index 00000000000..9f1d1d74ac3 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDelete.java @@ -0,0 +1,56 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import java.util.HashMap; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +public interface CanDelete { + KubernetesKind kind(); + + default OperationResult delete( + KubernetesCredentials credentials, + String namespace, + String name, + KubernetesSelectorList labelSelectors, + V1DeleteOptions options, + Task task, + String opName) { + options = options == null ? new V1DeleteOptions() : options; + List deletedNames = + credentials.delete(kind(), namespace, name, labelSelectors, options, task, opName); + OperationResult result = new OperationResult(); + Set fullNames = + deletedNames.stream() + .map(n -> KubernetesManifest.getFullResourceName(kind(), n)) + .collect(Collectors.toSet()); + + result.setManifestNamesByNamespace(new HashMap<>(ImmutableMap.of(namespace, fullNames))); + return result; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDeploy.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDeploy.java new file mode 100644 index 00000000000..2eab5894b02 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDeploy.java @@ -0,0 +1,97 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import java.util.ArrayList; +import java.util.List; + +public interface CanDeploy { + default OperationResult deploy( + KubernetesCredentials credentials, + KubernetesManifest manifest, + KubernetesManifestStrategy.DeployStrategy deployStrategy, + KubernetesManifestStrategy.ServerSideApplyStrategy serverSideApplyStrategy, + Task task, + String opName, + KubernetesSelectorList labelSelectors) { + // If the manifest has a generateName, we must apply with kubectl create as all other operations + // require looking up a manifest by name, which will fail. + if (manifest.hasGenerateName()) { + KubernetesManifest result = credentials.create(manifest, task, opName, labelSelectors); + OperationResult operationResult = new OperationResult(); + if (result != null) { + operationResult.addManifest(result); + } + return operationResult; + } + + KubernetesManifest deployedManifest; + switch (deployStrategy) { + case RECREATE: + try { + credentials.delete( + manifest.getKind(), + manifest.getNamespace(), + manifest.getName(), + labelSelectors, + new V1DeleteOptions(), + task, + opName); + } catch (KubectlJobExecutor.KubectlException ignored) { + } + deployedManifest = credentials.deploy(manifest, task, opName, labelSelectors); + break; + case REPLACE: + deployedManifest = credentials.createOrReplace(manifest, task, opName); + break; + case SERVER_SIDE_APPLY: + List cmdArgs = new ArrayList<>(); + cmdArgs.add("--server-side=true"); + if (serverSideApplyStrategy.equals( + KubernetesManifestStrategy.ServerSideApplyStrategy.FORCE_CONFLICTS)) { + cmdArgs.add("--force-conflicts=true"); + } + deployedManifest = + credentials.deploy( + manifest, + task, + opName, + labelSelectors, + cmdArgs.toArray(new String[cmdArgs.size()])); + break; + case APPLY: + deployedManifest = credentials.deploy(manifest, task, opName, labelSelectors); + break; + default: + throw new AssertionError(String.format("Unknown deploy strategy: %s", deployStrategy)); + } + OperationResult operationResult = new OperationResult(); + if (deployedManifest != null) { + operationResult.addManifest(deployedManifest); + } + return operationResult; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanLoadBalance.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanLoadBalance.java new file mode 100644 index 00000000000..27d748e6118 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanLoadBalance.java @@ -0,0 +1,45 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import java.util.List; +import javax.annotation.ParametersAreNonnullByDefault; + +@ParametersAreNonnullByDefault +public interface CanLoadBalance { + void attach(KubernetesManifest loadBalancer, KubernetesManifest target); + + List detachPatch(KubernetesManifest loadBalancer, KubernetesManifest target); + + List attachPatch(KubernetesManifest loadBalancer, KubernetesManifest target); + + static CanLoadBalance lookupProperties( + ResourcePropertyRegistry registry, KubernetesCoordinates coords) { + KubernetesHandler loadBalancerHandler = registry.get(coords.getKind()).getHandler(); + if (!(loadBalancerHandler instanceof CanLoadBalance)) { + throw new IllegalArgumentException( + "No support for load balancing via " + coords.getKind() + " exists in Spinnaker"); + } + + return (CanLoadBalance) loadBalancerHandler; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanPatch.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanPatch.java new file mode 100644 index 00000000000..3e16029afb0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanPatch.java @@ -0,0 +1,66 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPatchOptions; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import java.util.HashMap; +import java.util.List; + +public interface CanPatch { + KubernetesKind kind(); + + default OperationResult patchWithManifest( + KubernetesCredentials credentials, + String namespace, + String name, + KubernetesPatchOptions options, + KubernetesManifest manifest, + Task task, + String opName) { + credentials.patch(kind(), namespace, name, options, manifest, task, opName); + return patch(namespace, name); + } + + default OperationResult patchWithJson( + KubernetesCredentials credentials, + String namespace, + String name, + KubernetesPatchOptions options, + List patches, + Task task, + String opName) { + credentials.patch(kind(), namespace, name, options, patches, task, opName); + return patch(namespace, name); + } + + default OperationResult patch(String namespace, String name) { + KubernetesManifest patchedManifest = new KubernetesManifest(); + patchedManifest.putIfAbsent( + "metadata", new HashMap()); // Hack: Set mandatory field + patchedManifest.setNamespace(namespace); + patchedManifest.setName(name); + patchedManifest.setKind(kind()); + return new OperationResult().addManifest(patchedManifest); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanPauseRollout.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanPauseRollout.java new file mode 100644 index 00000000000..213ac1635c8 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanPauseRollout.java @@ -0,0 +1,29 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; + +public interface CanPauseRollout { + KubernetesKind kind(); + + default void pauseRollout(KubernetesCredentials credentials, String namespace, String name) { + credentials.pauseRollout(kind(), namespace, name); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanReceiveTraffic.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanReceiveTraffic.java new file mode 100644 index 00000000000..760002b2288 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanReceiveTraffic.java @@ -0,0 +1,19 @@ +/* + * Copyright 2021 J.P. Morgan Chase & Co. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +public interface CanReceiveTraffic {} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanResize.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanResize.java new file mode 100644 index 00000000000..d4fdc0afd48 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanResize.java @@ -0,0 +1,37 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.model.ServerGroup.Capacity; + +public interface CanResize { + KubernetesKind kind(); + + default void resize( + KubernetesCredentials credentials, + String namespace, + String name, + Capacity capacity, + Task task, + String opName) { + credentials.scale(kind(), namespace, name, capacity.getDesired(), task, opName); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanResumeRollout.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanResumeRollout.java new file mode 100644 index 00000000000..54f7fdbd619 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanResumeRollout.java @@ -0,0 +1,31 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; + +public interface CanResumeRollout { + KubernetesKind kind(); + + default void resumeRollout( + KubernetesCredentials credentials, String namespace, String name, Task task, String opName) { + credentials.resumeRollout(kind(), namespace, name, task, opName); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanRollingRestart.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanRollingRestart.java new file mode 100644 index 00000000000..db07b080417 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanRollingRestart.java @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; + +public interface CanRollingRestart { + KubernetesKind kind(); + + default void rollingRestart( + KubernetesCredentials credentials, String namespace, String name, Task task, String opName) { + credentials.rollingRestart(kind(), namespace, name, task, opName); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanRollout.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanRollout.java new file mode 100644 index 00000000000..4d75c9847bc --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanRollout.java @@ -0,0 +1,31 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import java.util.List; + +public interface CanRollout { + KubernetesKind kind(); + + default List historyRollout( + KubernetesCredentials credentials, String namespace, String name) { + return credentials.historyRollout(kind(), namespace, name); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanScale.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanScale.java new file mode 100644 index 00000000000..dea0b8b3d70 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanScale.java @@ -0,0 +1,36 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; + +public interface CanScale { + KubernetesKind kind(); + + default void scale( + KubernetesCredentials credentials, + String namespace, + String name, + int replicas, + Task task, + String opName) { + credentials.scale(kind(), namespace, name, replicas, task, opName); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanUndoRollout.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanUndoRollout.java new file mode 100644 index 00000000000..ef9163bac45 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanUndoRollout.java @@ -0,0 +1,31 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; + +public interface CanUndoRollout extends CanRollout { + @Override + KubernetesKind kind(); + + default void undoRollout( + KubernetesCredentials credentials, String namespace, String name, int revision) { + credentials.undoRollout(kind(), namespace, name, revision); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CustomKubernetesHandlerFactory.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CustomKubernetesHandlerFactory.java new file mode 100644 index 00000000000..5f62209eeaf --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CustomKubernetesHandlerFactory.java @@ -0,0 +1,118 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.CustomKubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.Front50ApplicationLoader; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import javax.annotation.Nonnull; +import org.springframework.lang.Nullable; + +public class CustomKubernetesHandlerFactory { + public static KubernetesHandler create( + KubernetesKind kubernetesKind, + SpinnakerKind spinnakerKind, + boolean versioned, + int deployPriority) { + return new Handler(kubernetesKind, spinnakerKind, versioned, deployPriority); + } + + private static class Handler extends KubernetesHandler { + private final KubernetesKind kubernetesKind; + private final SpinnakerKind spinnakerKind; + private final boolean versioned; + private final int deployPriority; + + Handler( + KubernetesKind kubernetesKind, + SpinnakerKind spinnakerKind, + boolean versioned, + int deployPriority) { + this.kubernetesKind = kubernetesKind; + this.spinnakerKind = spinnakerKind; + this.versioned = versioned; + this.deployPriority = deployPriority; + } + + @Override + public int deployPriority() { + return deployPriority; + } + + @Nonnull + @Override + public KubernetesKind kind() { + return kubernetesKind; + } + + @Override + public boolean versioned() { + return versioned; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return spinnakerKind; + } + + @Override + public Manifest.Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return this::buildCustomCachingAgent; + } + + private KubernetesCachingAgent buildCustomCachingAgent( + KubernetesNamedAccountCredentials namedAccountCredentials, + ObjectMapper objectMapper, + Registry registry, + int agentIndex, + int agentCount, + Long agentInterval, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + return CustomKubernetesCachingAgentFactory.create( + kubernetesKind, + namedAccountCredentials, + objectMapper, + registry, + agentIndex, + agentCount, + agentInterval, + configurationProperties, + kubernetesSpinnakerKindMap, + front50ApplicationLoader); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/HasPods.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/HasPods.java new file mode 100644 index 00000000000..a2987e0919f --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/HasPods.java @@ -0,0 +1,38 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import java.util.List; + +public interface HasPods { + List pods(KubernetesCredentials credentials, KubernetesManifest object); + + static HasPods lookupProperties(ResourcePropertyRegistry registry, KubernetesKind kind) { + KubernetesHandler hasPodsHandler = registry.get(kind).getHandler(); + if (!(hasPodsHandler instanceof HasPods)) { + throw new IllegalArgumentException( + "No support for pods via " + kind + " exists in Spinnaker"); + } + + return (HasPods) hasPodsHandler; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesAPIServiceHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesAPIServiceHandler.java new file mode 100644 index 00000000000..00eeba96794 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesAPIServiceHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Mirantis, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.API_SERVICE_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesAPIServiceHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return API_SERVICE_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.API_SERVICE; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesClusterRoleBindingHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesClusterRoleBindingHandler.java new file mode 100644 index 00000000000..34ff5f1a150 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesClusterRoleBindingHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Joel Wilsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.ROLE_BINDING_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesClusterRoleBindingHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return ROLE_BINDING_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.CLUSTER_ROLE_BINDING; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesClusterRoleHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesClusterRoleHandler.java new file mode 100644 index 00000000000..c8dd95c4c08 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesClusterRoleHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Joel Wilsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.ROLE_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesClusterRoleHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return ROLE_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.CLUSTER_ROLE; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesConfigMapHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesConfigMapHandler.java new file mode 100644 index 00000000000..e74055e3766 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesConfigMapHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.MOUNTABLE_DATA_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesConfigMapHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return MOUNTABLE_DATA_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.CONFIG_MAP; + } + + @Override + public boolean versioned() { + return true; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.CONFIGS; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesControllerRevisionHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesControllerRevisionHandler.java new file mode 100644 index 00000000000..10cbc66b183 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesControllerRevisionHandler.java @@ -0,0 +1,62 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesControllerRevisionHandler extends KubernetesHandler { + @Override + public int deployPriority() { + throw new IllegalStateException("Controller revisions cannot be deployed."); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.CONTROLLER_REVISION; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCronJobHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCronJobHandler.java new file mode 100644 index 00000000000..b5c6555709e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCronJobHandler.java @@ -0,0 +1,97 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.V2alpha1CronJob; +import io.kubernetes.client.openapi.models.V2alpha1CronJobStatus; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesCronJobHandler extends KubernetesHandler + implements CanDelete, ServerGroupHandler { + + @Nonnull + @Override + protected ImmutableList artifactReplacers() { + return ImmutableList.of( + Replacer.dockerImage(), + Replacer.configMapVolume(), + Replacer.secretVolume(), + Replacer.configMapProjectedVolume(), + Replacer.secretProjectedVolume(), + Replacer.configMapEnv(), + Replacer.secretEnv(), + Replacer.configMapKeyValue(), + Replacer.secretKeyValue()); + } + + @Override + public int deployPriority() { + return WORKLOAD_CONTROLLER_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.CRON_JOB; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.SERVER_GROUPS; + } + + @Override + public Status status(KubernetesManifest manifest) { + V2alpha1CronJob v2alpha1CronJob = + KubernetesCacheDataConverter.getResource(manifest, V2alpha1CronJob.class); + return status(v2alpha1CronJob); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + private Status status(V2alpha1CronJob job) { + V2alpha1CronJobStatus status = job.getStatus(); + if (status == null) { + return Status.noneReported(); + } + + return Status.defaultStatus(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCustomResourceDefinitionHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCustomResourceDefinitionHandler.java new file mode 100644 index 00000000000..1b7b6eabe51 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCustomResourceDefinitionHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.RESOURCE_DEFINITION_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesCustomResourceDefinitionHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return RESOURCE_DEFINITION_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.CUSTOM_RESOURCE_DEFINITION; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCustomResourceHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCustomResourceHandler.java new file mode 100644 index 00000000000..cfcdda21786 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCustomResourceHandler.java @@ -0,0 +1,69 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.LOWEST_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesUnregisteredCustomResourceCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import javax.annotation.Nonnull; + +public class KubernetesCustomResourceHandler extends KubernetesHandler implements CanDelete { + + private final KubernetesKind kind; + + public KubernetesCustomResourceHandler(KubernetesKind kind) { + this.kind = kind; + } + + @Override + public int deployPriority() { + return LOWEST_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return this.kind; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Manifest.Status status(KubernetesManifest manifest) { + return Manifest.Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesUnregisteredCustomResourceCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDaemonSetHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDaemonSetHandler.java new file mode 100644 index 00000000000..c4e77214813 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDaemonSetHandler.java @@ -0,0 +1,171 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.APPS_V1; +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.V1DaemonSet; +import io.kubernetes.client.openapi.models.V1DaemonSetStatus; +import io.kubernetes.client.openapi.models.V1ObjectMeta; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesDaemonSetHandler extends KubernetesHandler + implements CanResize, + CanPauseRollout, + CanResumeRollout, + CanUndoRollout, + CanRollingRestart, + ServerGroupHandler { + + private static final ImmutableSet SUPPORTED_API_VERSIONS = + ImmutableSet.of(APPS_V1); + + @Nonnull + @Override + protected ImmutableList artifactReplacers() { + return ImmutableList.of( + Replacer.dockerImage(), + Replacer.configMapVolume(), + Replacer.secretVolume(), + Replacer.configMapProjectedVolume(), + Replacer.secretProjectedVolume(), + Replacer.configMapEnv(), + Replacer.secretEnv(), + Replacer.configMapKeyValue(), + Replacer.secretKeyValue()); + } + + @Override + public int deployPriority() { + return WORKLOAD_CONTROLLER_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.DAEMON_SET; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.SERVER_GROUPS; + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + @Override + public Status status(KubernetesManifest manifest) { + if (!SUPPORTED_API_VERSIONS.contains(manifest.getApiVersion())) { + throw new UnsupportedVersionException(manifest); + } + V1DaemonSet v1DaemonSet = KubernetesCacheDataConverter.getResource(manifest, V1DaemonSet.class); + return status(v1DaemonSet); + } + + @Override + public Map hydrateSearchResult(InfrastructureCacheKey key) { + Map result = super.hydrateSearchResult(key); + result.put("serverGroup", result.get("name")); + + return result; + } + + private Status status(V1DaemonSet daemonSet) { + V1DaemonSetStatus status = daemonSet.getStatus(); + if (status == null) { + return Status.noneReported(); + } + + if (!generationMatches(daemonSet, status)) { + return Status.defaultStatus().unstable(UnstableReason.OLD_GENERATION.getMessage()); + } + + if (!daemonSet.getSpec().getUpdateStrategy().getType().equalsIgnoreCase("rollingupdate")) { + return Status.defaultStatus(); + } + + Long observedGeneration = status.getObservedGeneration(); + if (observedGeneration != null + && !observedGeneration.equals(daemonSet.getMetadata().getGeneration())) { + return Status.defaultStatus().unstable("Waiting for daemonset spec update to be observed"); + } + + int desiredReplicas = defaultToZero(status.getDesiredNumberScheduled()); + int existing = defaultToZero(status.getCurrentNumberScheduled()); + if (desiredReplicas > existing) { + return Status.defaultStatus().unstable("Waiting for all replicas to be scheduled"); + } + + existing = defaultToZero(status.getUpdatedNumberScheduled()); + if (desiredReplicas > existing) { + return Status.defaultStatus().unstable("Waiting for all updated replicas to be scheduled"); + } + + existing = defaultToZero(status.getNumberAvailable()); + if (desiredReplicas > existing) { + return Status.defaultStatus().unstable("Waiting for all replicas to be available"); + } + + existing = defaultToZero(status.getNumberReady()); + if (desiredReplicas > existing) { + return Status.defaultStatus().unstable("Waiting for all replicas to be ready"); + } + + return Status.defaultStatus(); + } + + private boolean generationMatches(V1DaemonSet daemonSet, V1DaemonSetStatus status) { + Optional metadataGeneration = + Optional.ofNullable(daemonSet.getMetadata()).map(V1ObjectMeta::getGeneration); + Optional statusGeneration = Optional.ofNullable(status.getObservedGeneration()); + + return statusGeneration.isPresent() && statusGeneration.equals(metadataGeneration); + } + + // Unboxes an Integer, returning 0 if the input is null + private int defaultToZero(@Nullable Integer input) { + return input == null ? 0 : input; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandler.java new file mode 100644 index 00000000000..997f6a69558 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandler.java @@ -0,0 +1,195 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.APPS_V1; +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import io.kubernetes.client.openapi.models.V1Deployment; +import io.kubernetes.client.openapi.models.V1DeploymentCondition; +import io.kubernetes.client.openapi.models.V1DeploymentStatus; +import io.kubernetes.client.openapi.models.V1ObjectMeta; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nullable; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +public class KubernetesDeploymentHandler extends KubernetesHandler + implements CanResize, + CanScale, + CanPauseRollout, + CanResumeRollout, + CanUndoRollout, + CanRollingRestart, + ServerGroupManagerHandler { + + private static final ImmutableSet SUPPORTED_API_VERSIONS = + ImmutableSet.of(APPS_V1); + + @Override + protected ImmutableList artifactReplacers() { + return ImmutableList.of( + Replacer.dockerImage(), + Replacer.configMapVolume(), + Replacer.secretVolume(), + Replacer.configMapProjectedVolume(), + Replacer.secretProjectedVolume(), + Replacer.configMapEnv(), + Replacer.secretEnv(), + Replacer.configMapKeyValue(), + Replacer.secretKeyValue()); + } + + @Override + public int deployPriority() { + return WORKLOAD_CONTROLLER_PRIORITY.getValue(); + } + + @Override + public KubernetesKind kind() { + return KubernetesKind.DEPLOYMENT; + } + + @Override + public boolean versioned() { + return false; + } + + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.SERVER_GROUP_MANAGERS; + } + + @Override + public Status status(KubernetesManifest manifest) { + if (!SUPPORTED_API_VERSIONS.contains(manifest.getApiVersion())) { + throw new UnsupportedVersionException(manifest); + } + V1Deployment appsV1Deployment = + KubernetesCacheDataConverter.getResource(manifest, V1Deployment.class); + return status(appsV1Deployment); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + private Status status(V1Deployment deployment) { + V1DeploymentStatus status = deployment.getStatus(); + if (status == null) { + return Status.noneReported(); + } + + if (!generationMatches(deployment, status)) { + return Status.defaultStatus().unstable(UnstableReason.OLD_GENERATION.getMessage()); + } + + List conditions = + Optional.ofNullable(status.getConditions()).orElse(ImmutableList.of()); + + Status result = Status.defaultStatus(); + getPausedReason(conditions).ifPresent(result::paused); + getUnavailableReason(conditions) + .ifPresent(reason -> result.unstable(reason).unavailable(reason)); + getFailedReason(conditions).ifPresent(result::failed); + checkReplicaCounts(deployment, status) + .ifPresent(reason -> result.unstable(reason.getMessage())); + return result; + } + + private static Optional getUnavailableReason( + Collection conditions) { + return conditions.stream() + .filter(c -> c.getType().equalsIgnoreCase("available")) + .filter(c -> c.getStatus().equalsIgnoreCase("false")) + .map(V1DeploymentCondition::getMessage) + .findAny(); + } + + private static Optional getPausedReason(Collection conditions) { + return conditions.stream() + .filter(c -> c.getReason() != null) + .filter(c -> c.getReason().equalsIgnoreCase("deploymentpaused")) + .map(V1DeploymentCondition::getMessage) + .findAny(); + } + + private static Optional getFailedReason(Collection conditions) { + return conditions.stream() + .filter(c -> c.getType().equalsIgnoreCase("progressing")) + .filter(c -> c.getReason() != null) + .filter(c -> c.getReason().equalsIgnoreCase("progressdeadlineexceeded")) + .map(c -> "Deployment exceeded its progress deadline") + .findAny(); + } + + private boolean generationMatches(V1Deployment deployment, V1DeploymentStatus status) { + Optional metadataGeneration = + Optional.ofNullable(deployment.getMetadata()).map(V1ObjectMeta::getGeneration); + Optional statusGeneration = Optional.ofNullable(status.getObservedGeneration()); + + return statusGeneration.isPresent() && statusGeneration.equals(metadataGeneration); + } + + // Unboxes an Integer, returning 0 if the input is null + private static int defaultToZero(@Nullable Integer input) { + return input == null ? 0 : input; + } + + private static Optional checkReplicaCounts( + V1Deployment deployment, V1DeploymentStatus status) { + int desiredReplicas = defaultToZero(deployment.getSpec().getReplicas()); + int updatedReplicas = defaultToZero(status.getUpdatedReplicas()); + if (updatedReplicas < desiredReplicas) { + return Optional.of(UnstableReason.UPDATED_REPLICAS); + } + + int statusReplicas = defaultToZero(status.getReplicas()); + if (statusReplicas > updatedReplicas) { + return Optional.of(UnstableReason.OLD_REPLICAS); + } + + int availableReplicas = defaultToZero(status.getAvailableReplicas()); + if (availableReplicas < desiredReplicas) { + return Optional.of(UnstableReason.AVAILABLE_REPLICAS); + } + + int readyReplicas = defaultToZero(status.getReadyReplicas()); + if (readyReplicas < desiredReplicas) { + return Optional.of(UnstableReason.READY_REPLICAS); + } + + return Optional.empty(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesEventHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesEventHandler.java new file mode 100644 index 00000000000..0f05ea29c13 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesEventHandler.java @@ -0,0 +1,117 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind.EVENT; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.CoreV1Event; +import io.kubernetes.client.openapi.models.V1ObjectReference; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesEventHandler extends KubernetesHandler { + @Override + public int deployPriority() { + throw new IllegalStateException("Events cannot be deployed."); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return EVENT; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Manifest.Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + @Override + public void addRelationships( + Map> allResources, + Map> relationshipMap) { + relationshipMap.putAll( + allResources.getOrDefault(EVENT, new ArrayList<>()).stream() + .map( + m -> + ImmutablePair.of( + m, + involvedManifest( + KubernetesCacheDataConverter.getResource(m, CoreV1Event.class)))) + .filter(p -> p.getRight() != null) + .collect( + Collectors.toMap(ImmutablePair::getLeft, p -> ImmutableList.of(p.getRight())))); + } + + private KubernetesManifest involvedManifest(CoreV1Event event) { + if (event == null) { + return null; + } + + V1ObjectReference ref = event.getInvolvedObject(); + + if (ref == null + || Strings.isNullOrEmpty(ref.getApiVersion()) + || Strings.isNullOrEmpty(ref.getKind()) + || Strings.isNullOrEmpty(ref.getName())) { + return null; + } + + KubernetesManifest result = new KubernetesManifest(); + result.put("metadata", new HashMap()); + result.setApiVersion(KubernetesApiVersion.fromString(ref.getApiVersion())); + result.setKind(KubernetesKind.fromString(ref.getKind())); + result.setNamespace(ref.getNamespace()); + result.setName(ref.getName()); + return result; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHandler.java new file mode 100644 index 00000000000..6e24b7845f4 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHandler.java @@ -0,0 +1,207 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ArtifactReplacer; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ArtifactReplacer.ReplaceResult; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.Front50ApplicationLoader; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.KubernetesManifestProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Warning; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.*; +import javax.annotation.Nonnull; +import lombok.Getter; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.lang.Nullable; + +public abstract class KubernetesHandler implements CanDeploy, CanDelete, CanPatch { + protected static final ObjectMapper objectMapper = new ObjectMapper(); + + private final ArtifactReplacer artifactReplacer; + + @Value("${kubernetes.artifact-binding.docker-image:match-name-and-tag}") + protected String dockerImageBinding; + + protected KubernetesHandler() { + this.artifactReplacer = new ArtifactReplacer(artifactReplacers()); + } + + public abstract int deployPriority(); + + @Override + @Nonnull + public abstract KubernetesKind kind(); + + public abstract boolean versioned(); + + @Nonnull + public abstract SpinnakerKind spinnakerKind(); + + public abstract Status status(KubernetesManifest manifest); + + public List listWarnings(KubernetesManifest manifest) { + return new ArrayList<>(); + } + + protected List sensitiveKeys() { + return new ArrayList<>(); + } + + @Nonnull + protected ImmutableList artifactReplacers() { + return ImmutableList.of(); + } + + public ReplaceResult replaceArtifacts( + KubernetesManifest manifest, List artifacts, @Nonnull String account) { + return artifactReplacer.replaceAll( + this.dockerImageBinding, manifest, artifacts, manifest.getNamespace(), account); + } + + public ReplaceResult replaceArtifacts( + KubernetesManifest manifest, + List artifacts, + @Nonnull String namespace, + @Nonnull String account) { + return artifactReplacer.replaceAll( + this.dockerImageBinding, manifest, artifacts, namespace, account); + } + + protected abstract KubernetesCachingAgentFactory cachingAgentFactory(); + + public ImmutableSet listArtifacts(KubernetesManifest manifest) { + return artifactReplacer.findAll(manifest); + } + + public KubernetesCachingAgent buildCachingAgent( + KubernetesNamedAccountCredentials namedAccountCredentials, + ObjectMapper objectMapper, + Registry registry, + int agentIndex, + int agentCount, + Long agentInterval, + KubernetesConfigurationProperties configurationProperties, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + return cachingAgentFactory() + .buildCachingAgent( + namedAccountCredentials, + objectMapper, + registry, + agentIndex, + agentCount, + agentInterval, + configurationProperties, + kubernetesSpinnakerKindMap, + front50ApplicationLoader); + } + + // used for stripping sensitive values + public void removeSensitiveKeys(KubernetesManifest manifest) { + List sensitiveKeys = sensitiveKeys(); + sensitiveKeys.forEach(manifest::remove); + } + + public Map hydrateSearchResult(InfrastructureCacheKey key) { + Map result = + objectMapper.convertValue(key, new TypeReference>() {}); + result.put("region", key.getNamespace()); + result.put( + "name", KubernetesManifest.getFullResourceName(key.getKubernetesKind(), key.getName())); + return result; + } + + public void addRelationships( + Map> allResources, + Map> relationshipMap) {} + + // lower "value" is deployed before higher "value" + public enum DeployPriority { + LOWEST_PRIORITY(1000), + WORKLOAD_ATTACHMENT_PRIORITY(110), + WORKLOAD_CONTROLLER_PRIORITY(100), + WORKLOAD_PRIORITY(100), + WORKLOAD_MODIFIER_PRIORITY(90), + PDB_PRIORITY(90), + API_SERVICE_PRIORITY(80), + NETWORK_RESOURCE_PRIORITY(70), + MOUNTABLE_DATA_PRIORITY(50), + MOUNTABLE_DATA_BACKING_RESOURCE_PRIORITY(40), + SERVICE_ACCOUNT_PRIORITY(40), + STORAGE_CLASS_PRIORITY(40), + ADMISSION_PRIORITY(40), + RESOURCE_DEFINITION_PRIORITY(30), + ROLE_BINDING_PRIORITY(30), + ROLE_PRIORITY(20), + NAMESPACE_PRIORITY(0); + + @Getter private final int value; + + DeployPriority(int value) { + this.value = value; + } + + public static DeployPriority fromString(String val) { + if (val == null) { + return null; + } + + return Arrays.stream(values()) + .filter(v -> v.toString().equalsIgnoreCase(val)) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException("No such priority '" + val + "'")); + } + } + + public Comparator comparatorFor(KubernetesManifestProvider.Sort sort) { + switch (sort) { + case AGE: + return ageComparator(); + case SIZE: + return sizeComparator(); + default: + throw new IllegalArgumentException("No comparator for " + sort + " found"); + } + } + + // can be overridden by each handler + protected Comparator ageComparator() { + return Comparator.comparing(KubernetesManifest::getCreationTimestamp); + } + + // can be overridden by each handler + protected Comparator sizeComparator() { + return Comparator.comparing(m -> m.getReplicas() == null ? -1 : m.getReplicas()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandler.java new file mode 100644 index 00000000000..e7e0b07bd4d --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandler.java @@ -0,0 +1,110 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_ATTACHMENT_PRIORITY; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.V1HorizontalPodAutoscaler; +import io.kubernetes.client.openapi.models.V1HorizontalPodAutoscalerStatus; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesHorizontalPodAutoscalerHandler extends KubernetesHandler { + @Nonnull + @Override + protected ImmutableList artifactReplacers() { + return ImmutableList.of(Replacer.hpaDeployment(), Replacer.hpaReplicaSet()); + } + + @Override + public int deployPriority() { + return WORKLOAD_ATTACHMENT_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.HORIZONTAL_POD_AUTOSCALER; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + V1HorizontalPodAutoscaler hpa = + KubernetesCacheDataConverter.getResource(manifest, V1HorizontalPodAutoscaler.class); + return status(hpa); + } + + private Status status(V1HorizontalPodAutoscaler hpa) { + V1HorizontalPodAutoscalerStatus status = hpa.getStatus(); + if (status == null) { + return Status.noneReported(); + } + + int desiredReplicas = defaultToZero(status.getDesiredReplicas()); + int existing = defaultToZero(status.getCurrentReplicas()); + if (desiredReplicas > existing) { + return Status.defaultStatus() + .unstable( + String.format( + "Waiting for HPA to complete a scale up, current: %d desired: %d", + existing, desiredReplicas)); + } + + if (desiredReplicas < existing) { + return Status.defaultStatus() + .unstable( + String.format( + "Waiting for HPA to complete a scale down, current: %d desired: %d", + existing, desiredReplicas)); + } + // desiredReplicas == existing, this is now stable + return Status.defaultStatus(); + } + + // Unboxes an Integer, returning 0 if the input is null + private static int defaultToZero(@Nullable Integer input) { + return input == null ? 0 : input; + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesIngressHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesIngressHandler.java new file mode 100644 index 00000000000..817ba77f8ef --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesIngressHandler.java @@ -0,0 +1,223 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.EXTENSIONS_V1BETA1; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.NETWORKING_K8S_IO_V1; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.NETWORKING_K8S_IO_V1BETA1; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind.INGRESS; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind.SERVICE; +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.NETWORK_RESOURCE_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.ExtensionsV1beta1HTTPIngressPath; +import io.kubernetes.client.openapi.models.ExtensionsV1beta1HTTPIngressRuleValue; +import io.kubernetes.client.openapi.models.ExtensionsV1beta1Ingress; +import io.kubernetes.client.openapi.models.ExtensionsV1beta1IngressBackend; +import io.kubernetes.client.openapi.models.ExtensionsV1beta1IngressRule; +import io.kubernetes.client.openapi.models.NetworkingV1beta1HTTPIngressPath; +import io.kubernetes.client.openapi.models.NetworkingV1beta1HTTPIngressRuleValue; +import io.kubernetes.client.openapi.models.NetworkingV1beta1Ingress; +import io.kubernetes.client.openapi.models.NetworkingV1beta1IngressBackend; +import io.kubernetes.client.openapi.models.NetworkingV1beta1IngressRule; +import io.kubernetes.client.openapi.models.V1HTTPIngressPath; +import io.kubernetes.client.openapi.models.V1HTTPIngressRuleValue; +import io.kubernetes.client.openapi.models.V1Ingress; +import io.kubernetes.client.openapi.models.V1IngressBackend; +import io.kubernetes.client.openapi.models.V1IngressRule; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesIngressHandler extends KubernetesHandler { + private static final Logger log = LoggerFactory.getLogger(KubernetesIngressHandler.class); + + @Override + public int deployPriority() { + return NETWORK_RESOURCE_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.INGRESS; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.LOAD_BALANCERS; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + @Override + public void addRelationships( + Map> allResources, + Map> relationshipMap) { + BiFunction manifestName = (namespace, name) -> namespace + ":" + name; + + Map services = + allResources.getOrDefault(SERVICE, new ArrayList<>()).stream() + .collect( + Collectors.toMap( + (m) -> manifestName.apply(m.getNamespace(), m.getName()), (m) -> m)); + + for (KubernetesManifest ingress : allResources.getOrDefault(INGRESS, new ArrayList<>())) { + List attachedServices = new ArrayList<>(); + try { + attachedServices = + KubernetesIngressHandler.attachedServices(ingress).stream() + .map(s -> services.get(manifestName.apply(ingress.getNamespace(), s))) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } catch (Exception e) { + log.warn("Failure getting services attached to {}", ingress.getName(), e); + } + + relationshipMap.put(ingress, attachedServices); + } + } + + private static List attachedServices(KubernetesManifest manifest) { + if (manifest.getApiVersion().equals(EXTENSIONS_V1BETA1)) { + ExtensionsV1beta1Ingress v1beta1Ingress = + KubernetesCacheDataConverter.getResource(manifest, ExtensionsV1beta1Ingress.class); + return attachedServices(v1beta1Ingress); + } else if (manifest.getApiVersion().equals(NETWORKING_K8S_IO_V1BETA1)) { + NetworkingV1beta1Ingress v1beta1Ingress = + KubernetesCacheDataConverter.getResource(manifest, NetworkingV1beta1Ingress.class); + return attachedServices(v1beta1Ingress); + } else if (manifest.getApiVersion().equals(NETWORKING_K8S_IO_V1)) { + V1Ingress v1Ingress = KubernetesCacheDataConverter.getResource(manifest, V1Ingress.class); + return attachedServices(v1Ingress); + } else { + throw new UnsupportedVersionException(manifest); + } + } + + private static List attachedServices(ExtensionsV1beta1Ingress ingress) { + Set result = new HashSet<>(); + ExtensionsV1beta1IngressBackend backend = ingress.getSpec().getBackend(); + if (backend != null) { + result.add(backend.getServiceName()); + } + + List rules = ingress.getSpec().getRules(); + rules = rules == null ? new ArrayList<>() : rules; + for (ExtensionsV1beta1IngressRule rule : rules) { + ExtensionsV1beta1HTTPIngressRuleValue http = rule.getHttp(); + if (http != null) { + for (ExtensionsV1beta1HTTPIngressPath path : http.getPaths()) { + backend = path.getBackend(); + if (backend != null) { + result.add(backend.getServiceName()); + } + } + } + } + + return new ArrayList<>(result); + } + + private static List attachedServices(NetworkingV1beta1Ingress ingress) { + Set result = new HashSet<>(); + NetworkingV1beta1IngressBackend backend = ingress.getSpec().getBackend(); + if (backend != null) { + result.add(backend.getServiceName()); + } + + List rules = ingress.getSpec().getRules(); + rules = rules == null ? new ArrayList<>() : rules; + for (NetworkingV1beta1IngressRule rule : rules) { + NetworkingV1beta1HTTPIngressRuleValue http = rule.getHttp(); + if (http != null) { + for (NetworkingV1beta1HTTPIngressPath path : http.getPaths()) { + backend = path.getBackend(); + if (backend != null) { + result.add(backend.getServiceName()); + } + } + } + } + + return new ArrayList<>(result); + } + + private static List attachedServices(V1Ingress ingress) { + Set result = new HashSet<>(); + V1IngressBackend backend = ingress.getSpec().getDefaultBackend(); + if (backend != null) { + result.add(backend.getService().getName()); + } + + List rules = ingress.getSpec().getRules(); + rules = rules == null ? new ArrayList<>() : rules; + for (V1IngressRule rule : rules) { + V1HTTPIngressRuleValue http = rule.getHttp(); + if (http != null) { + for (V1HTTPIngressPath path : http.getPaths()) { + backend = path.getBackend(); + if (backend != null) { + result.add(backend.getService().getName()); + } + } + } + } + + return new ArrayList<>(result); + } + + @Override + public Map hydrateSearchResult(InfrastructureCacheKey key) { + Map result = super.hydrateSearchResult(key); + result.put("loadBalancer", result.get("name")); + + return result; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesJobHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesJobHandler.java new file mode 100644 index 00000000000..b1e021b8730 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesJobHandler.java @@ -0,0 +1,122 @@ +/* + * Copyright 2018 Joel Wilsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.V1Job; +import io.kubernetes.client.openapi.models.V1JobCondition; +import io.kubernetes.client.openapi.models.V1JobSpec; +import io.kubernetes.client.openapi.models.V1JobStatus; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesJobHandler extends KubernetesHandler implements ServerGroupHandler { + @Nonnull + @Override + protected ImmutableList artifactReplacers() { + return ImmutableList.of( + Replacer.dockerImage(), + Replacer.configMapVolume(), + Replacer.secretVolume(), + Replacer.configMapProjectedVolume(), + Replacer.secretProjectedVolume(), + Replacer.configMapEnv(), + Replacer.secretEnv(), + Replacer.configMapKeyValue(), + Replacer.secretKeyValue()); + } + + @Override + public int deployPriority() { + return WORKLOAD_CONTROLLER_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.JOB; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.SERVER_GROUPS; + } + + @Override + public Status status(KubernetesManifest manifest) { + V1Job v1Job = KubernetesCacheDataConverter.getResource(manifest, V1Job.class); + return status(v1Job); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + private Status status(V1Job job) { + V1JobStatus status = job.getStatus(); + if (status == null) { + return Status.noneReported(); + } + + int completions = 1; + V1JobSpec spec = job.getSpec(); + if (spec != null && spec.getCompletions() != null) { + completions = spec.getCompletions(); + } + int succeeded = 0; + if (status.getSucceeded() != null) { + succeeded = status.getSucceeded(); + } + + if (succeeded < completions) { + List conditions = status.getConditions(); + conditions = conditions != null ? conditions : ImmutableList.of(); + Optional condition = conditions.stream().filter(this::jobFailed).findAny(); + return condition + .map(v1JobCondition -> Status.defaultStatus().failed(v1JobCondition.getMessage())) + .orElseGet(() -> Status.defaultStatus().unstable("Waiting for jobs to finish")); + } + + return Status.defaultStatus(); + } + + private boolean jobFailed(V1JobCondition condition) { + return "Failed".equalsIgnoreCase(condition.getType()) + && "True".equalsIgnoreCase(condition.getStatus()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesLimitRangeHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesLimitRangeHandler.java new file mode 100644 index 00000000000..35b96e437c1 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesLimitRangeHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_MODIFIER_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesLimitRangeHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return WORKLOAD_MODIFIER_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.LIMIT_RANGE; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesMutatingWebhookConfigurationHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesMutatingWebhookConfigurationHandler.java new file mode 100644 index 00000000000..a38b609f287 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesMutatingWebhookConfigurationHandler.java @@ -0,0 +1,63 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesMutatingWebhookConfigurationHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return DeployPriority.ADMISSION_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.MUTATING_WEBHOOK_CONFIGURATION; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Manifest.Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesNamespaceHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesNamespaceHandler.java new file mode 100644 index 00000000000..ff1b7281111 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesNamespaceHandler.java @@ -0,0 +1,62 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesNamespaceHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return DeployPriority.NAMESPACE_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.NAMESPACE; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesNetworkPolicyHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesNetworkPolicyHandler.java new file mode 100644 index 00000000000..5ecce926f64 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesNetworkPolicyHandler.java @@ -0,0 +1,74 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.NETWORK_RESOURCE_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import java.util.Map; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesNetworkPolicyHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return NETWORK_RESOURCE_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.NETWORK_POLICY; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.SECURITY_GROUPS; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + @Override + public Map hydrateSearchResult(InfrastructureCacheKey key) { + Map result = super.hydrateSearchResult(key); + result.put("id", result.get("name")); + + return result; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPersistentVolumeClaimHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPersistentVolumeClaimHandler.java new file mode 100644 index 00000000000..783e7ce05b9 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPersistentVolumeClaimHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.MOUNTABLE_DATA_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesPersistentVolumeClaimHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return MOUNTABLE_DATA_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.PERSISTENT_VOLUME_CLAIM; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.CONFIGS; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPersistentVolumeHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPersistentVolumeHandler.java new file mode 100644 index 00000000000..a0e61aa24e4 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPersistentVolumeHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.MOUNTABLE_DATA_BACKING_RESOURCE_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesPersistentVolumeHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return MOUNTABLE_DATA_BACKING_RESOURCE_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.PERSISTENT_VOLUME; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.CONFIGS; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodDisruptionBudgetHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodDisruptionBudgetHandler.java new file mode 100644 index 00000000000..511db60c851 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodDisruptionBudgetHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2017 Mirantis, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.PDB_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesPodDisruptionBudgetHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return PDB_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.POD_DISRUPTION_BUDGET; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodHandler.java new file mode 100644 index 00000000000..a2eb35650c0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodHandler.java @@ -0,0 +1,126 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_PRIORITY; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.V1PodStatus; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Getter; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesPodHandler extends KubernetesHandler implements CanReceiveTraffic { + @Nonnull + @Override + protected ImmutableList artifactReplacers() { + return ImmutableList.of(Replacer.podDockerImage()); + } + + @Override + public int deployPriority() { + return WORKLOAD_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.POD; + } + + @Override + public boolean versioned() { + return true; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.INSTANCES; + } + + @Override + public Status status(KubernetesManifest manifest) { + V1PodStatus status = + KubernetesCacheDataConverter.getResource(manifest.getStatus(), V1PodStatus.class); + + if (status == null) { + return Status.noneReported(); + } + + PodPhase phase = PodPhase.fromString(status.getPhase()); + if (phase.isUnstable()) { + return Status.defaultStatus().unstable(phase.getMessage()).unavailable(phase.getMessage()); + } + + return Status.defaultStatus(); + } + + private enum PodPhase { + // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/ + Pending(true, "Pod is pending"), + Running(false, ""), + Succeeded(false, ""), + Failed(true, "Pod has failed"), + Unknown(true, "Pod phase is unknown"); + + @Getter private final String message; + @Getter private final boolean unstable; + + PodPhase(boolean unstable, String message) { + this.message = message; + this.unstable = unstable; + } + + static PodPhase fromString(@Nullable String phase) { + if (phase == null) { + return Unknown; + } + try { + return valueOf(phase); + } catch (IllegalArgumentException e) { + return Unknown; + } + } + } + + @Override + public Map hydrateSearchResult(InfrastructureCacheKey key) { + Map result = super.hydrateSearchResult(key); + result.put("instanceId", result.get("name")); + + return result; + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodPresetHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodPresetHandler.java new file mode 100644 index 00000000000..60c421adfa2 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodPresetHandler.java @@ -0,0 +1,63 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesPodPresetHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return DeployPriority.WORKLOAD_MODIFIER_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.POD_PRESET; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Manifest.Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodSecurityPolicyHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodSecurityPolicyHandler.java new file mode 100644 index 00000000000..cc7b1e3c195 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodSecurityPolicyHandler.java @@ -0,0 +1,63 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesPodSecurityPolicyHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return DeployPriority.WORKLOAD_MODIFIER_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.POD_SECURITY_POLICY; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Manifest.Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesReplicaSetHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesReplicaSetHandler.java new file mode 100644 index 00000000000..48b6cd0f73c --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesReplicaSetHandler.java @@ -0,0 +1,196 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.APPS_V1; +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestSelector; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import io.kubernetes.client.openapi.models.V1ObjectMeta; +import io.kubernetes.client.openapi.models.V1ReplicaSet; +import io.kubernetes.client.openapi.models.V1ReplicaSetSpec; +import io.kubernetes.client.openapi.models.V1ReplicaSetStatus; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.springframework.stereotype.Component; + +@Component +@NonnullByDefault +public class KubernetesReplicaSetHandler extends KubernetesHandler + implements CanResize, CanScale, HasPods, ServerGroupHandler { + private static final ImmutableSet SUPPORTED_API_VERSIONS = + ImmutableSet.of(APPS_V1); + + @Override + protected ImmutableList artifactReplacers() { + return ImmutableList.of( + Replacer.dockerImage(), + Replacer.configMapVolume(), + Replacer.secretVolume(), + Replacer.configMapProjectedVolume(), + Replacer.secretProjectedVolume(), + Replacer.configMapEnv(), + Replacer.secretEnv(), + Replacer.configMapKeyValue(), + Replacer.secretKeyValue()); + } + + @Override + public int deployPriority() { + return WORKLOAD_CONTROLLER_PRIORITY.getValue(); + } + + @Override + public KubernetesKind kind() { + return KubernetesKind.REPLICA_SET; + } + + @Override + public boolean versioned() { + return true; + } + + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.SERVER_GROUPS; + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + @Override + public Status status(KubernetesManifest manifest) { + if (!SUPPORTED_API_VERSIONS.contains(manifest.getApiVersion())) { + throw new UnsupportedVersionException(manifest); + } + V1ReplicaSet v1ReplicaSet = + KubernetesCacheDataConverter.getResource(manifest, V1ReplicaSet.class); + return status(v1ReplicaSet); + } + + private Status status(V1ReplicaSet replicaSet) { + V1ReplicaSetStatus status = replicaSet.getStatus(); + if (status == null) { + return Status.noneReported(); + } + + Optional unstableReason = checkReplicaCounts(replicaSet, status); + if (unstableReason.isPresent()) { + return Status.defaultStatus() + .unstable(unstableReason.get().getMessage()) + .unavailable(unstableReason.get().getMessage()); + } + + if (!generationMatches(replicaSet, status)) { + return Status.defaultStatus().unstable("Waiting for replicaset spec update to be observed"); + } + + return Status.defaultStatus(); + } + + private Optional checkReplicaCounts( + V1ReplicaSet replicaSet, V1ReplicaSetStatus status) { + int desired = + Optional.ofNullable(replicaSet.getSpec()).map(V1ReplicaSetSpec::getReplicas).orElse(0); + int fullyLabeled = defaultToZero(status.getFullyLabeledReplicas()); + int available = defaultToZero(status.getAvailableReplicas()); + int ready = defaultToZero(status.getReadyReplicas()); + + if (desired > fullyLabeled) { + return Optional.of(UnstableReason.FULLY_LABELED_REPLICAS); + } + + if (desired > ready) { + return Optional.of(UnstableReason.READY_REPLICAS); + } + + if (desired > available) { + return Optional.of(UnstableReason.AVAILABLE_REPLICAS); + } + + return Optional.empty(); + } + + // Unboxes an Integer, returning 0 if the input is null + private static int defaultToZero(@Nullable Integer input) { + return input == null ? 0 : input; + } + + private boolean generationMatches(V1ReplicaSet replicaSet, V1ReplicaSetStatus status) { + Optional metadataGeneration = + Optional.ofNullable(replicaSet.getMetadata()).map(V1ObjectMeta::getGeneration); + Optional statusGeneration = Optional.ofNullable(status.getObservedGeneration()); + + return statusGeneration.isPresent() && statusGeneration.equals(metadataGeneration); + } + + public static Map getPodTemplateLabels(KubernetesManifest manifest) { + if (manifest.getApiVersion().equals(APPS_V1)) { + V1ReplicaSet v1ReplicaSet = + KubernetesCacheDataConverter.getResource(manifest, V1ReplicaSet.class); + return getPodTemplateLabels(v1ReplicaSet); + } else { + throw new UnsupportedVersionException(manifest); + } + } + + private static Map getPodTemplateLabels(V1ReplicaSet replicaSet) { + return replicaSet.getSpec().getTemplate().getMetadata().getLabels(); + } + + @Override + public Map hydrateSearchResult(InfrastructureCacheKey key) { + Map result = super.hydrateSearchResult(key); + result.put("serverGroup", result.get("name")); + + return result; + } + + @Override + public List pods( + KubernetesCredentials credentials, KubernetesManifest object) { + KubernetesManifestSelector selector = object.getManifestSelector(); + return credentials + .list(KubernetesKind.POD, object.getNamespace(), selector.toSelectorList()) + .stream() + .filter( + p -> + p.getOwnerReferences().stream() + .anyMatch(or -> or.getName().equals(object.getName()))) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesRoleBindingHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesRoleBindingHandler.java new file mode 100644 index 00000000000..013960359e6 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesRoleBindingHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Joel Wilsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.ROLE_BINDING_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesRoleBindingHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return ROLE_BINDING_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.ROLE_BINDING; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesRoleHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesRoleHandler.java new file mode 100644 index 00000000000..e70e3c1b8fc --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesRoleHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Joel Wilsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.ROLE_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesRoleHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return ROLE_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.ROLE; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesSecretHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesSecretHandler.java new file mode 100644 index 00000000000..bbc9613b0c5 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesSecretHandler.java @@ -0,0 +1,71 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.MOUNTABLE_DATA_PRIORITY; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import java.util.List; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesSecretHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return MOUNTABLE_DATA_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.SECRET; + } + + @Override + public List sensitiveKeys() { + return ImmutableList.of("data"); + } + + @Override + public boolean versioned() { + return true; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.CONFIGS; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceAccountHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceAccountHandler.java new file mode 100644 index 00000000000..139bb437a55 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceAccountHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Joel Wilsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.SERVICE_ACCOUNT_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesServiceAccountHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return SERVICE_ACCOUNT_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.SERVICE_ACCOUNT; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceHandler.java new file mode 100644 index 00000000000..7afdabef5b8 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceHandler.java @@ -0,0 +1,255 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch.Op.remove; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.V1; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind.REPLICA_SET; +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind.SERVICE; +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.NETWORK_RESOURCE_PRIORITY; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch.Op; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.V1Service; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +@Component +@Slf4j +public class KubernetesServiceHandler extends KubernetesHandler implements CanLoadBalance { + @Override + public int deployPriority() { + return NETWORK_RESOURCE_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.SERVICE; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.LOAD_BALANCERS; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + @Override + public Map hydrateSearchResult(InfrastructureCacheKey key) { + Map result = super.hydrateSearchResult(key); + result.put("loadBalancer", result.get("name")); + + return result; + } + + @Override + public void addRelationships( + Map> allResources, + Map> relationshipMap) { + Map> mapLabelToManifest = new HashMap<>(); + + allResources + .getOrDefault(REPLICA_SET, new ArrayList<>()) + .forEach(r -> addAllReplicaSetLabels(mapLabelToManifest, r)); + + for (KubernetesManifest service : allResources.getOrDefault(SERVICE, new ArrayList<>())) { + relationshipMap.put(service, getRelatedManifests(service, mapLabelToManifest)); + } + } + + @Nonnull + private ImmutableMap getSelector(KubernetesManifest manifest) { + if (manifest.getApiVersion().equals(V1)) { + V1Service v1Service = KubernetesCacheDataConverter.getResource(manifest, V1Service.class); + if (v1Service.getSpec() == null || v1Service.getSpec().getSelector() == null) { + return ImmutableMap.of(); + } + return ImmutableMap.copyOf(v1Service.getSpec().getSelector()); + } else { + throw new IllegalArgumentException( + "No services with version " + manifest.getApiVersion() + " supported"); + } + } + + private List getRelatedManifests( + KubernetesManifest service, Map> mapLabelToManifest) { + return new ArrayList<>(intersectLabels(service, mapLabelToManifest)); + } + + private Set intersectLabels( + KubernetesManifest service, Map> mapLabelToManifest) { + ImmutableMap selector = getSelector(service); + if (selector.isEmpty()) { + return new HashSet<>(); + } + + Set result = null; + String namespace = service.getNamespace(); + for (Map.Entry label : selector.entrySet()) { + String labelKey = podLabelKey(namespace, label); + Set manifests = mapLabelToManifest.get(labelKey); + manifests = manifests == null ? new HashSet<>() : manifests; + + if (result == null) { + result = manifests; + } else { + result.retainAll(manifests); + } + } + + return result; + } + + private void addAllReplicaSetLabels( + Map> entries, KubernetesManifest replicaSet) { + String namespace = replicaSet.getNamespace(); + Map podLabels = KubernetesReplicaSetHandler.getPodTemplateLabels(replicaSet); + if (podLabels == null) { + return; + } + + for (Map.Entry label : podLabels.entrySet()) { + String labelKey = podLabelKey(namespace, label); + enterManifest(entries, labelKey, replicaSet); + } + } + + private void enterManifest( + Map> entries, String label, KubernetesManifest manifest) { + Set pods = entries.get(label); + if (pods == null) { + pods = new HashSet<>(); + } + + pods.add(manifest); + + entries.put(label, pods); + } + + private String podLabelKey(String namespace, Map.Entry label) { + // Space can't be used in any of the values, so it's a safe separator. + return namespace + " " + label.getKey() + " " + label.getValue(); + } + + @Override + @ParametersAreNonnullByDefault + public void attach(KubernetesManifest loadBalancer, KubernetesManifest target) { + KubernetesCoordinates loadBalancerCoords = KubernetesCoordinates.fromManifest(loadBalancer); + if (loadBalancerCoords.equals(KubernetesCoordinates.fromManifest(target))) { + log.warn( + "Adding traffic selection labels to service {}, which itself is the source load balancer. This may change in the future.", + loadBalancerCoords); + } + Map labels = target.getSpecTemplateLabels().orElse(target.getLabels()); + ImmutableMap selector = getSelector(loadBalancer); + if (selector.isEmpty()) { + throw new IllegalArgumentException( + "Service must have a non-empty selector in order to be attached to a workload"); + } + if (!Collections.disjoint(labels.keySet(), selector.keySet())) { + throw new IllegalArgumentException( + "Service selector must have no label keys in common with target workload"); + } + labels.putAll(selector); + } + + private String pathPrefix(KubernetesManifest target) { + if (target.getSpecTemplateLabels().isPresent()) { + return "/spec/template/metadata/labels"; + } else { + return "/metadata/labels"; + } + } + + private Map labels(KubernetesManifest target) { + if (target.getSpecTemplateLabels().isPresent()) { + return target.getSpecTemplateLabels().get(); + } else { + return target.getLabels(); + } + } + + @Override + @ParametersAreNonnullByDefault + public List attachPatch(KubernetesManifest loadBalancer, KubernetesManifest target) { + String pathPrefix = pathPrefix(target); + Map labels = labels(target); + + return getSelector(loadBalancer).entrySet().stream() + .map( + kv -> + JsonPatch.builder() + .op(labels.containsKey(kv.getKey()) ? Op.replace : Op.add) + .path(String.join("/", pathPrefix, JsonPatch.escapeNode(kv.getKey()))) + .value(kv.getValue()) + .build()) + .collect(Collectors.toList()); + } + + @Override + @ParametersAreNonnullByDefault + public List detachPatch(KubernetesManifest loadBalancer, KubernetesManifest target) { + String pathPrefix = pathPrefix(target); + Map labels = labels(target); + + return getSelector(loadBalancer).keySet().stream() + .filter(labels::containsKey) + .map( + k -> + JsonPatch.builder() + .op(remove) + .path(String.join("/", pathPrefix, JsonPatch.escapeNode(k))) + .build()) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStatefulSetHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStatefulSetHandler.java new file mode 100644 index 00000000000..1a6bf7f3abe --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStatefulSetHandler.java @@ -0,0 +1,225 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion.APPS_V1; +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.WORKLOAD_CONTROLLER_PRIORITY; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.Replacer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.InfrastructureCacheKey; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import io.kubernetes.client.openapi.models.V1ObjectMeta; +import io.kubernetes.client.openapi.models.V1RollingUpdateStatefulSetStrategy; +import io.kubernetes.client.openapi.models.V1StatefulSet; +import io.kubernetes.client.openapi.models.V1StatefulSetStatus; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.BiFunction; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesStatefulSetHandler extends KubernetesHandler + implements CanResize, + CanScale, + CanPauseRollout, + CanResumeRollout, + CanUndoRollout, + CanRollingRestart, + ServerGroupHandler { + + private static final ImmutableSet SUPPORTED_API_VERSIONS = + ImmutableSet.of(APPS_V1); + + @Nonnull + @Override + protected ImmutableList artifactReplacers() { + return ImmutableList.of( + Replacer.dockerImage(), + Replacer.configMapVolume(), + Replacer.secretVolume(), + Replacer.configMapProjectedVolume(), + Replacer.secretProjectedVolume(), + Replacer.configMapEnv(), + Replacer.secretEnv(), + Replacer.configMapKeyValue(), + Replacer.secretKeyValue()); + } + + @Override + public int deployPriority() { + return WORKLOAD_CONTROLLER_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.STATEFUL_SET; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.SERVER_GROUPS; + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } + + @Override + public Status status(KubernetesManifest manifest) { + if (!SUPPORTED_API_VERSIONS.contains(manifest.getApiVersion())) { + throw new UnsupportedVersionException(manifest); + } + V1StatefulSet v1StatefulSet = + KubernetesCacheDataConverter.getResource(manifest, V1StatefulSet.class); + return status(v1StatefulSet); + } + + public static String serviceName(KubernetesManifest manifest) { + // TODO(lwander) perhaps switch on API version if this changes + Map spec = (Map) manifest.get("spec"); + return (String) spec.get("serviceName"); + } + + @Override + public Map hydrateSearchResult(InfrastructureCacheKey key) { + Map result = super.hydrateSearchResult(key); + result.put("serverGroup", result.get("name")); + + return result; + } + + private Status status(V1StatefulSet statefulSet) { + if (statefulSet.getSpec().getUpdateStrategy().getType().equalsIgnoreCase("ondelete")) { + return Status.defaultStatus(); + } + + V1StatefulSetStatus status = statefulSet.getStatus(); + if (status == null) { + return Status.noneReported(); + } + + if (!generationMatches(statefulSet, status)) { + return Status.defaultStatus().unstable(UnstableReason.OLD_GENERATION.getMessage()); + } + + int desiredReplicas = defaultToZero(statefulSet.getSpec().getReplicas()); + int existing = defaultToZero(status.getReplicas()); + if (desiredReplicas > existing) { + return Status.defaultStatus() + .unstable("Waiting for at least the desired replica count to be met"); + } + + existing = defaultToZero(status.getReadyReplicas()); + if (desiredReplicas > existing) { + return Status.defaultStatus().unstable("Waiting for all updated replicas to be ready"); + } + + String updateType = statefulSet.getSpec().getUpdateStrategy().getType(); + V1RollingUpdateStatefulSetStrategy rollingUpdate = + statefulSet.getSpec().getUpdateStrategy().getRollingUpdate(); + + Integer updated = status.getUpdatedReplicas(); + + if (updateType.equalsIgnoreCase("rollingupdate") && updated != null && rollingUpdate != null) { + Integer partition = rollingUpdate.getPartition(); + Integer replicas = status.getReplicas(); + if (replicas != null && partition != null && (updated < (replicas - partition))) { + return Status.defaultStatus().unstable("Waiting for partitioned roll out to finish"); + } + return Status.defaultStatus().stable("Partitioned roll out complete"); + } + + existing = defaultToZero(status.getCurrentReplicas()); + if (desiredReplicas > existing) { + return Status.defaultStatus().unstable("Waiting for all updated replicas to be scheduled"); + } + + if (!status.getCurrentRevision().equals(status.getUpdateRevision())) { + return Status.defaultStatus() + .unstable("Waiting for the updated revision to match the current revision"); + } + + return Status.defaultStatus(); + } + + private boolean generationMatches(V1StatefulSet statefulSet, V1StatefulSetStatus status) { + Optional metadataGeneration = + Optional.ofNullable(statefulSet.getMetadata()).map(V1ObjectMeta::getGeneration); + Optional statusGeneration = Optional.ofNullable(status.getObservedGeneration()); + + return statusGeneration.isPresent() && statusGeneration.equals(metadataGeneration); + } + + // Unboxes an Integer, returning 0 if the input is null + private static int defaultToZero(@Nullable Integer input) { + return input == null ? 0 : input; + } + + @Override + public void addRelationships( + Map> allResources, + Map> relationshipMap) { + BiFunction manifestName = (namespace, name) -> namespace + ":" + name; + + Map services = + allResources.getOrDefault(KubernetesKind.SERVICE, new ArrayList<>()).stream() + .collect( + Collectors.toMap( + (m) -> manifestName.apply(m.getNamespace(), m.getName()), (m) -> m)); + + for (KubernetesManifest manifest : + allResources.getOrDefault(KubernetesKind.STATEFUL_SET, new ArrayList<>())) { + String serviceName = KubernetesStatefulSetHandler.serviceName(manifest); + if (Strings.isNullOrEmpty(serviceName)) { + continue; + } + + String key = manifestName.apply(manifest.getNamespace(), serviceName); + + if (!services.containsKey(key)) { + continue; + } + + KubernetesManifest service = services.get(key); + relationshipMap.put(manifest, ImmutableList.of(service)); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStorageClassHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStorageClassHandler.java new file mode 100644 index 00000000000..325cc7a68a0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStorageClassHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Joel Wilsson + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.STORAGE_CLASS_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesStorageClassHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return STORAGE_CLASS_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.STORAGE_CLASS; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesUnregisteredCustomResourceHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesUnregisteredCustomResourceHandler.java new file mode 100644 index 00000000000..40b16668068 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesUnregisteredCustomResourceHandler.java @@ -0,0 +1,65 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler.DeployPriority.LOWEST_PRIORITY; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesUnregisteredCustomResourceCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesUnregisteredCustomResourceHandler extends KubernetesHandler + implements CanDelete { + @Override + public int deployPriority() { + return LOWEST_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.NONE; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesUnregisteredCustomResourceCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesValidatingWebhookConfigurationHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesValidatingWebhookConfigurationHandler.java new file mode 100644 index 00000000000..eaa5770a192 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesValidatingWebhookConfigurationHandler.java @@ -0,0 +1,63 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentFactory; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCoreCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesValidatingWebhookConfigurationHandler extends KubernetesHandler { + @Override + public int deployPriority() { + return DeployPriority.ADMISSION_PRIORITY.getValue(); + } + + @Nonnull + @Override + public KubernetesKind kind() { + return KubernetesKind.VALIDATING_WEBHOOK_CONFIGURATION; + } + + @Override + public boolean versioned() { + return false; + } + + @Nonnull + @Override + public SpinnakerKind spinnakerKind() { + return SpinnakerKind.UNCLASSIFIED; + } + + @Override + public Manifest.Status status(KubernetesManifest manifest) { + return Status.defaultStatus(); + } + + @Override + protected KubernetesCachingAgentFactory cachingAgentFactory() { + return KubernetesCoreCachingAgent::new; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ServerGroupHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ServerGroupHandler.java new file mode 100644 index 00000000000..062a73db149 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ServerGroupHandler.java @@ -0,0 +1,27 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesServerGroup; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesServerGroupCacheData; + +public interface ServerGroupHandler extends CanReceiveTraffic { + default KubernetesServerGroup fromCacheData(KubernetesServerGroupCacheData cacheData) { + return KubernetesServerGroup.fromCacheData(cacheData); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ServerGroupManagerHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ServerGroupManagerHandler.java new file mode 100644 index 00000000000..c0989a1c0af --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ServerGroupManagerHandler.java @@ -0,0 +1,28 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesServerGroupManager; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.data.KubernetesServerGroupManagerCacheData; + +public interface ServerGroupManagerHandler extends CanReceiveTraffic { + default KubernetesServerGroupManager fromCacheData( + KubernetesServerGroupManagerCacheData cacheData) { + return KubernetesServerGroupManager.fromCacheData(cacheData); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/UnstableReason.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/UnstableReason.java new file mode 100644 index 00000000000..f3f450d3d45 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/UnstableReason.java @@ -0,0 +1,35 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import lombok.Getter; + +enum UnstableReason { + AVAILABLE_REPLICAS("Waiting for all replicas to be available"), + FULLY_LABELED_REPLICAS("Waiting for all replicas to be fully-labeled"), + OLD_GENERATION("Waiting for status generation to match updated object generation"), + OLD_REPLICAS("Waiting for old replicas to finish termination"), + READY_REPLICAS("Waiting for all replicas to be ready"), + UPDATED_REPLICAS("Waiting for all replicas to be updated"); + + @Getter private final String message; + + UnstableReason(String message) { + this.message = message; + } +} diff --git a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/UnsupportedVersionException.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/UnsupportedVersionException.java similarity index 79% rename from clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/UnsupportedVersionException.java rename to clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/UnsupportedVersionException.java index 5f0a2924241..5301a9faaa6 100644 --- a/clouddriver-kubernetes/src/main/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/UnsupportedVersionException.java +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/UnsupportedVersionException.java @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,9 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler; +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; public class UnsupportedVersionException extends IllegalArgumentException { public UnsupportedVersionException(KubernetesManifest manifest) { diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubectlJobExecutor.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubectlJobExecutor.java new file mode 100644 index 00000000000..a912e64f29b --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubectlJobExecutor.java @@ -0,0 +1,1239 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.job; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSetMultimap; +import com.google.gson.Gson; +import com.google.gson.JsonSyntaxException; +import com.google.gson.stream.JsonReader; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import com.netflix.spinnaker.clouddriver.jobs.JobRequest; +import com.netflix.spinnaker.clouddriver.jobs.JobResult; +import com.netflix.spinnaker.clouddriver.jobs.local.ReaderConsumer; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPatchOptions; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.kork.annotations.VisibleForTesting; +import com.netflix.spinnaker.kork.resilience4j.Resilience4jHelper; +import io.github.resilience4j.core.IntervalFunction; +import io.github.resilience4j.micrometer.tagged.TaggedRetryMetrics; +import io.github.resilience4j.retry.Retry; +import io.github.resilience4j.retry.RetryConfig; +import io.github.resilience4j.retry.RetryRegistry; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import io.micrometer.core.instrument.MeterRegistry; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.EOFException; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.*; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.WillClose; +import lombok.Getter; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class KubectlJobExecutor { + private static final Logger log = LoggerFactory.getLogger(KubectlJobExecutor.class); + private static final String NOT_FOUND_STRING = "(NotFound)"; + private static final String NO_OBJECTS_PASSED_TO_STRING = "error: no objects passed to"; + private static final String NO_OBJECTS_PASSED_TO_APPLY_STRING = + NO_OBJECTS_PASSED_TO_STRING + " apply"; + private static final String NO_OBJECTS_PASSED_TO_CREATE_STRING = + NO_OBJECTS_PASSED_TO_STRING + " create"; + private static final String KUBECTL_COMMAND_OPTION_TOKEN = "--token="; + private static final String KUBECTL_COMMAND_OPTION_KUBECONFIG = "--kubeconfig="; + private static final String KUBECTL_COMMAND_OPTION_CONTEXT = "--context="; + + private final JobExecutor jobExecutor; + + private final Gson gson = new Gson(); + + private final KubernetesConfigurationProperties kubernetesConfigurationProperties; + + // @Getter is required so that this can be used in tests + @Getter private final Optional retryRegistry; + + private final MeterRegistry meterRegistry; + + @Autowired + public KubectlJobExecutor( + JobExecutor jobExecutor, + KubernetesConfigurationProperties kubernetesConfigurationProperties, + MeterRegistry meterRegistry) { + this.jobExecutor = jobExecutor; + this.kubernetesConfigurationProperties = kubernetesConfigurationProperties; + this.meterRegistry = meterRegistry; + + this.retryRegistry = + initializeRetryRegistry(kubernetesConfigurationProperties.getJobExecutor().getRetries()); + } + + /** + * This is used to initialize a RetryRegistry. RetryRegistry acts as a global store for all retry + * instances. The retry instances are shared for various kubectl actions. A retry instance is + * identified by the account name. + * + * @param retriesConfig - kubectl job retries configuration + * @return - If retries are enabled, it returns an Optional that contains a RetryRegistry, + * otherwise it returns an empty Optional + */ + private Optional initializeRetryRegistry( + KubernetesConfigurationProperties.KubernetesJobExecutorProperties.Retries retriesConfig) { + if (retriesConfig.isEnabled()) { + log.info("kubectl retries are enabled"); + + // this config will be applied to all retry instances created from the registry + RetryConfig.Builder retryConfig = + RetryConfig.custom().maxAttempts(retriesConfig.getMaxAttempts()); + if (retriesConfig.isExponentialBackoffEnabled()) { + retryConfig.intervalFunction( + IntervalFunction.ofExponentialBackoff( + Duration.ofMillis(retriesConfig.getExponentialBackOffIntervalMs()), + retriesConfig.getExponentialBackoffMultiplier())); + } else { + retryConfig.waitDuration(Duration.ofMillis(retriesConfig.getBackOffInMs())); + } + + // retry on all exceptions except NoRetryException + retryConfig.ignoreExceptions(NoRetryException.class); + + // create the retry registry + RetryRegistry retryRegistry = RetryRegistry.of(retryConfig.build()); + + Resilience4jHelper.configureLogging(retryRegistry, "Kubectl command", log); + + if (this.kubernetesConfigurationProperties + .getJobExecutor() + .getRetries() + .getMetrics() + .isEnabled()) { + TaggedRetryMetrics.ofRetryRegistry(retryRegistry).bindTo(meterRegistry); + } + + return Optional.of(retryRegistry); + } else { + log.info("kubectl retries are disabled"); + return Optional.empty(); + } + } + + public String logs( + KubernetesCredentials credentials, String namespace, String podName, String containerName) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + command.add("logs"); + command.add(podName); + command.add("-c=" + containerName); + + JobResult status = executeKubectlCommand(credentials, command); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to get logs from " + + podName + + "/" + + containerName + + " in " + + namespace + + ": " + + status.getError()); + } + + return status.getOutput(); + } + + public String jobLogs( + KubernetesCredentials credentials, String namespace, String jobName, String containerName) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + String resource = "job/" + jobName; + command.add("logs"); + command.add(resource); + command.add("-c=" + containerName); + + JobResult status = executeKubectlCommand(credentials, command); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to get logs from " + resource + " in " + namespace + ": " + status.getError()); + } + + return status.getOutput(); + } + + public List delete( + KubernetesCredentials credentials, + KubernetesKind kind, + String namespace, + String name, + KubernetesSelectorList labelSelectors, + V1DeleteOptions deleteOptions, + Task task, + String opName) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + + command.add("delete"); + + command = kubectlLookupInfo(command, kind, name, labelSelectors); + + // spinnaker generally accepts deletes of resources that don't exist + command.add("--ignore-not-found=true"); + + if (deleteOptions.getPropagationPolicy() != null) { + command.add("--cascade=" + deleteOptions.getPropagationPolicy()); + } + + if (deleteOptions.getGracePeriodSeconds() != null) { + command.add("--grace-period=" + deleteOptions.getGracePeriodSeconds()); + } + + String id; + if (!Strings.isNullOrEmpty(name)) { + id = kind + "/" + name; + } else { + id = labelSelectors.toString(); + } + + JobResult status = executeKubectlCommand(credentials, command); + + persistKubectlJobOutput(credentials, status, id, task, opName); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to delete " + id + " from " + namespace + ": " + status.getError()); + } + + if (Strings.isNullOrEmpty(status.getOutput()) + || status.getOutput().equals("No output from command.") + || status.getOutput().startsWith("No resources found")) { + return new ArrayList<>(); + } + + return Arrays.stream(status.getOutput().split("\n")) + .map(m -> m.substring(m.indexOf("\"") + 1)) + .map(m -> m.substring(0, m.lastIndexOf("\""))) + .collect(Collectors.toList()); + } + + public Void scale( + KubernetesCredentials credentials, + KubernetesKind kind, + String namespace, + String name, + int replicas, + Task task, + String opName) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + + command.add("scale"); + command = kubectlLookupInfo(command, kind, name, null); + command.add("--replicas=" + replicas); + + String resource = kind + "/" + name; + JobResult status = executeKubectlCommand(credentials, command); + persistKubectlJobOutput(credentials, status, resource, task, opName); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to scale " + resource + " from " + namespace + ": " + status.getError()); + } + + return null; + } + + public List historyRollout( + KubernetesCredentials credentials, KubernetesKind kind, String namespace, String name) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + String resource = kind + "/" + name; + command.add("rollout"); + command.add("history"); + command.add(resource); + + JobResult status = executeKubectlCommand(credentials, command); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to get rollout history of " + + resource + + " from " + + namespace + + ": " + + status.getError()); + } + + String stdout = status.getOutput(); + if (Strings.isNullOrEmpty(stdout)) { + return new ArrayList<>(); + } + + // "name" + // REVISION CHANGE-CAUSE + // # + // # + // # + // ... + List splitOutput = Arrays.stream(stdout.split("\n")).collect(Collectors.toList()); + + if (splitOutput.size() <= 2) { + return new ArrayList<>(); + } + + splitOutput = splitOutput.subList(2, splitOutput.size()); + + return splitOutput.stream() + .map(l -> l.split("[ \t]")) + .filter(l -> l.length > 0) + .map(l -> l[0]) + .map(Integer::valueOf) + .collect(Collectors.toList()); + } + + public Void undoRollout( + KubernetesCredentials credentials, + KubernetesKind kind, + String namespace, + String name, + int revision) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + + String resource = kind + "/" + name; + command.add("rollout"); + command.add("undo"); + command.add(resource); + command.add("--to-revision=" + revision); + + JobResult status = executeKubectlCommand(credentials, command); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to undo rollout " + resource + " from " + namespace + ": " + status.getError()); + } + + return null; + } + + public Void pauseRollout( + KubernetesCredentials credentials, KubernetesKind kind, String namespace, String name) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + + String resource = kind + "/" + name; + command.add("rollout"); + command.add("pause"); + command.add(resource); + + JobResult status = executeKubectlCommand(credentials, command); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to pause rollout " + resource + " from " + namespace + ": " + status.getError()); + } + + return null; + } + + public Void resumeRollout( + KubernetesCredentials credentials, + KubernetesKind kind, + String namespace, + String name, + Task task, + String opName) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + + String resource = kind + "/" + name; + command.add("rollout"); + command.add("resume"); + command.add(resource); + + JobResult status = executeKubectlCommand(credentials, command); + + persistKubectlJobOutput(credentials, status, resource, task, opName); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to resume rollout " + resource + " from " + namespace + ": " + status.getError()); + } + + return null; + } + + public Void rollingRestart( + KubernetesCredentials credentials, + KubernetesKind kind, + String namespace, + String name, + Task task, + String opName) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + + String resource = kind + "/" + name; + command.add("rollout"); + command.add("restart"); + command.add(resource); + + JobResult status = executeKubectlCommand(credentials, command); + + persistKubectlJobOutput(credentials, status, resource, task, opName); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to complete rolling restart of " + + resource + + " from " + + namespace + + ": " + + status.getError()); + } + + return null; + } + + @Nullable + public KubernetesManifest get( + KubernetesCredentials credentials, KubernetesKind kind, String namespace, String name) { + log.debug( + "Getting information for {} of Kind {} in namespace {}", name, kind.toString(), namespace); + List command = kubectlNamespacedGet(credentials, ImmutableList.of(kind), namespace); + command.add(name); + + JobResult status = executeKubectlCommand(credentials, command); + + if (status.getResult() != JobResult.Result.SUCCESS) { + if (status.getError().contains(NOT_FOUND_STRING)) { + return null; + } + + throw new KubectlException( + "Failed to get: " + + name + + " of kind: " + + kind + + " from namespace: " + + namespace + + ": " + + status.getError()); + } + + try { + return gson.fromJson(status.getOutput(), KubernetesManifest.class); + } catch (JsonSyntaxException e) { + throw new KubectlException( + "Failed to parse kubectl output for: " + + name + + " of kind: " + + kind + + " in namespace: " + + namespace + + ": " + + e.getMessage(), + e); + } + } + + @Nonnull + public ImmutableList eventsFor( + KubernetesCredentials credentials, KubernetesKind kind, String namespace, String name) { + log.debug("Getting events for {} of Kind {} in namespace {}", name, kind.toString(), namespace); + List command = + kubectlNamespacedGet(credentials, ImmutableList.of(KubernetesKind.EVENT), namespace); + command.add("--field-selector"); + command.add( + String.format( + "involvedObject.name=%s,involvedObject.kind=%s", + name, StringUtils.capitalize(kind.toString()))); + + JobResult> status = + executeKubectlCommand(credentials, command, parseManifestList()); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException( + "Failed to read events for: " + + kind + + "/" + + name + + " from " + + namespace + + ": " + + status.getError()); + } + + if (status.getError().contains("No resources found")) { + return ImmutableList.of(); + } + + return status.getOutput(); + } + + @Nonnull + public ImmutableList list( + KubernetesCredentials credentials, + List kinds, + String namespace, + KubernetesSelectorList selectors) { + log.debug("Getting list of kinds {} in namespace {}", kinds, namespace); + List command = kubectlNamespacedGet(credentials, kinds, namespace); + if (selectors.isNotEmpty()) { + log.debug("with selectors: {}", selectors.toString()); + command.add("-l=" + selectors.toString()); + } + + JobResult> status = + executeKubectlCommand(credentials, command, parseManifestList()); + + if (status.getResult() != JobResult.Result.SUCCESS) { + boolean permissionError = + org.apache.commons.lang3.StringUtils.containsIgnoreCase(status.getError(), "forbidden"); + if (permissionError) { + log.warn(status.getError()); + } else { + throw new KubectlException( + "Failed to read " + kinds + " from " + namespace + ": " + status.getError()); + } + } + + if (status.getError().contains("No resources found")) { + return ImmutableList.of(); + } + + return status.getOutput(); + } + + /** + * Invoke kubectl apply with the given manifest and (if present) label selectors. + * + * @param credentials k8s account credentials + * @param manifest the manifest to apply + * @param task the task performing this kubectl invocation + * @param opName the name of the operation performing this kubectl invocation + * @param labelSelectors label selectors + * @return the manifest parsed from stdout of the kubectl invocation, or null if a label selector + * is present and kubectl returned "no objects passed to apply" + */ + public KubernetesManifest deploy( + KubernetesCredentials credentials, + KubernetesManifest manifest, + Task task, + String opName, + KubernetesSelectorList labelSelectors, + String... cmdArgs) { + log.info("Deploying manifest {}", manifest.getFullResourceName()); + List command = kubectlAuthPrefix(credentials); + + // Read from stdin + command.add("apply"); + command.addAll(List.of(cmdArgs)); + command.add("-o"); + command.add("json"); + command.add("-f"); + command.add("-"); + addLabelSelectors(command, labelSelectors); + + JobResult status = executeKubectlCommand(credentials, command, Optional.of(manifest)); + + persistKubectlJobOutput(credentials, status, manifest.getFullResourceName(), task, opName); + + if (status.getResult() != JobResult.Result.SUCCESS) { + // If the caller provided a label selector, kubectl returns "no objects + // passed to apply" if none of the given objects satisfy the selector. + // Instead of throwing an exception, leave it to higher level logic to + // decide how to behave. + if (labelSelectors.isNotEmpty() + && status.getError().contains(NO_OBJECTS_PASSED_TO_APPLY_STRING)) { + return null; + } + + throw new KubectlException( + "Deploy failed for manifest: " + + manifest.getFullResourceName() + + ". Error: " + + status.getError()); + } + + return getKubernetesManifestFromJobResult(status, manifest); + } + + /** + * Invoke kubectl replace with the given manifest. Note that kubectl replace doesn't support label + * selectors. + * + * @param credentials k8s account credentials + * @param manifest the manifest to replace + * @param task the task performing this kubectl invocation + * @param opName the name of the operation performing this kubectl invocation + * @return the manifest parsed from stdout of the kubectl invocation + */ + public KubernetesManifest replace( + KubernetesCredentials credentials, KubernetesManifest manifest, Task task, String opName) { + log.info("Replacing manifest {}", manifest.getFullResourceName()); + List command = kubectlAuthPrefix(credentials); + + // Read from stdin + command.add("replace"); + command.add("-o"); + command.add("json"); + command.add("-f"); + command.add("-"); + + JobResult status = executeKubectlCommand(credentials, command, Optional.of(manifest)); + + persistKubectlJobOutput(credentials, status, manifest.getFullResourceName(), task, opName); + + if (status.getResult() != JobResult.Result.SUCCESS) { + if (status.getError().contains(NOT_FOUND_STRING)) { + throw new KubectlNotFoundException( + "Replace failed for manifest: " + + manifest.getFullResourceName() + + ". Error: " + + status.getError()); + } + throw new KubectlException( + "Replace failed for manifest: " + + manifest.getFullResourceName() + + ". Error: " + + status.getError()); + } + + return getKubernetesManifestFromJobResult(status, manifest); + } + + /** + * Invoke kubectl create with the given manifest and (if present) label selectors. + * + * @param credentials k8s account credentials + * @param manifest the manifest to create + * @param task the task performing this kubectl invocation + * @param opName the name of the operation performing this kubectl invocation + * @param labelSelectors label selectors + * @return the manifest parsed from stdout of the kubectl invocation, or null if a label selector + * is present and kubectl returned "no objects passed to create" + */ + public KubernetesManifest create( + KubernetesCredentials credentials, + KubernetesManifest manifest, + Task task, + String opName, + KubernetesSelectorList labelSelectors) { + log.info("Creating manifest {}", manifest.getFullResourceName()); + List command = kubectlAuthPrefix(credentials); + + // Read from stdin + command.add("create"); + command.add("-o"); + command.add("json"); + command.add("-f"); + command.add("-"); + addLabelSelectors(command, labelSelectors); + + JobResult status = executeKubectlCommand(credentials, command, Optional.of(manifest)); + + persistKubectlJobOutput(credentials, status, manifest.getFullResourceName(), task, opName); + + if (status.getResult() != JobResult.Result.SUCCESS) { + // If the caller provided a label selector, kubectl returns "no objects + // passed to create" if none of the given objects satisfy the selector. + // Instead of throwing an exception, leave it to higher level logic to + // decide how to behave. + if (labelSelectors.isNotEmpty() + && status.getError().contains(NO_OBJECTS_PASSED_TO_CREATE_STRING)) { + return null; + } + + throw new KubectlException( + "Create failed for manifest: " + + manifest.getFullResourceName() + + ". Error: " + + status.getError()); + } + + return getKubernetesManifestFromJobResult(status, manifest); + } + + private KubernetesManifest getKubernetesManifestFromJobResult( + JobResult status, KubernetesManifest inputManifest) { + try { + return gson.fromJson(status.getOutput(), KubernetesManifest.class); + } catch (JsonSyntaxException e) { + throw new KubectlException( + "Failed to parse kubectl output for manifest: " + + inputManifest.getName() + + ". Error: " + + e.getMessage(), + e); + } + } + + private List kubectlAuthPrefix(KubernetesCredentials credentials) { + List command = new ArrayList<>(); + if (!Strings.isNullOrEmpty(credentials.getKubectlExecutable())) { + command.add(credentials.getKubectlExecutable()); + } else { + command.add(this.kubernetesConfigurationProperties.getKubectl().getExecutable()); + } + + if (credentials.getKubectlRequestTimeoutSeconds() != null) { + command.add("--request-timeout=" + credentials.getKubectlRequestTimeoutSeconds()); + } + + if (credentials.isDebug()) { + command.add("-v"); + command.add("9"); + } + + if (!credentials.isServiceAccount()) { + if (credentials.getOAuthServiceAccount() != null + && !credentials.getOAuthServiceAccount().isEmpty()) { + command.add(KUBECTL_COMMAND_OPTION_TOKEN + getOAuthToken(credentials)); + } + + String kubeconfigFile = credentials.getKubeconfigFile(); + if (!Strings.isNullOrEmpty(kubeconfigFile)) { + command.add(KUBECTL_COMMAND_OPTION_KUBECONFIG + kubeconfigFile); + } + + String context = credentials.getContext(); + if (!Strings.isNullOrEmpty(context)) { + command.add(KUBECTL_COMMAND_OPTION_CONTEXT + context); + } + } + + return command; + } + + private List kubectlLookupInfo( + List command, + KubernetesKind kind, + String name, + KubernetesSelectorList labelSelectors) { + if (!Strings.isNullOrEmpty(name)) { + command.add(kind + "/" + name); + } else { + command.add(kind.toString()); + } + addLabelSelectors(command, labelSelectors); + + return command; + } + + private List kubectlNamespacedAuthPrefix( + KubernetesCredentials credentials, String namespace) { + List command = kubectlAuthPrefix(credentials); + + if (!Strings.isNullOrEmpty(namespace)) { + command.add("--namespace=" + namespace); + } + + return command; + } + + private List kubectlNamespacedGet( + KubernetesCredentials credentials, List kind, String namespace) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + command.add("-o"); + command.add("json"); + + command.add("get"); + command.add(kind.stream().map(KubernetesKind::toString).collect(Collectors.joining(","))); + + return command; + } + + private String getOAuthToken(KubernetesCredentials credentials) { + List command = new ArrayList<>(); + command.add(this.kubernetesConfigurationProperties.getOAuth().getExecutable()); + command.add("fetch"); + command.add("--json"); + command.add(credentials.getOAuthServiceAccount()); + command.addAll(credentials.getOAuthScopes()); + + JobResult status = executeKubectlCommand(credentials, command); + + if (status.getResult() != JobResult.Result.SUCCESS) { + throw new KubectlException("Could not fetch OAuth token: " + status.getError()); + } + return status.getOutput(); + } + + public ImmutableList topPod( + KubernetesCredentials credentials, String namespace, @Nonnull String pod) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + command.add("top"); + command.add("po"); + if (!pod.isEmpty()) { + command.add(pod); + } + command.add("--containers"); + + JobResult status = executeKubectlCommand(credentials, command); + if (status.getResult() != JobResult.Result.SUCCESS) { + if (status.getError().toLowerCase().contains("not available") + || status.getError().toLowerCase().contains("not found")) { + log.warn( + String.format( + "Error fetching metrics for account %s: %s", + credentials.getAccountName(), status.getError())); + return ImmutableList.of(); + } + throw new KubectlException("Could not read metrics: " + status.getError()); + } + + ImmutableSetMultimap metrics = + MetricParser.parseMetrics(status.getOutput()); + return metrics.asMap().entrySet().stream() + .map( + podMetrics -> + KubernetesPodMetric.builder() + .podName(podMetrics.getKey()) + .namespace(namespace) + .containerMetrics(podMetrics.getValue()) + .build()) + .collect(ImmutableList.toImmutableList()); + } + + public Void patch( + KubernetesCredentials credentials, + KubernetesKind kind, + String namespace, + String name, + KubernetesPatchOptions options, + List patches, + Task task, + String opName) { + return patch(credentials, kind, namespace, name, options, gson.toJson(patches), task, opName); + } + + public Void patch( + KubernetesCredentials credentials, + KubernetesKind kind, + String namespace, + String name, + KubernetesPatchOptions options, + KubernetesManifest manifest, + Task task, + String opName) { + return patch(credentials, kind, namespace, name, options, gson.toJson(manifest), task, opName); + } + + private Void patch( + KubernetesCredentials credentials, + KubernetesKind kind, + String namespace, + String name, + KubernetesPatchOptions options, + String patchBody, + Task task, + String opName) { + List command = kubectlNamespacedAuthPrefix(credentials, namespace); + + command.add("patch"); + command.add(kind.toString()); + command.add(name); + + if (options.isRecord()) { + command.add("--record"); + } + + String mergeStrategy = options.getMergeStrategy().toString(); + if (!Strings.isNullOrEmpty(mergeStrategy)) { + command.add("--type"); + command.add(mergeStrategy); + } + + command.add("--patch"); + command.add(patchBody); + + JobResult status = executeKubectlCommand(credentials, command); + + persistKubectlJobOutput(credentials, status, kind + "/" + name, task, opName); + + if (status.getResult() != JobResult.Result.SUCCESS) { + String errMsg = status.getError(); + if (Strings.isNullOrEmpty(errMsg)) { + errMsg = status.getOutput(); + } + if (errMsg.contains("not patched")) { + log.warn("No change occurred after patching {} {}:{}, ignoring", kind, namespace, name); + return null; + } + + throw new KubectlException( + "Patch failed for: " + name + " in namespace: " + namespace + ": " + errMsg); + } + + return null; + } + + private ReaderConsumer> parseManifestList() { + return (@WillClose BufferedReader r) -> { + try (JsonReader reader = new JsonReader(r)) { + try { + reader.beginObject(); + } catch (EOFException e) { + // If the stream we're parsing is empty, just return an empty list + return ImmutableList.of(); + } + ImmutableList.Builder manifestList = new ImmutableList.Builder<>(); + while (reader.hasNext()) { + if (reader.nextName().equals("items")) { + reader.beginArray(); + while (reader.hasNext()) { + KubernetesManifest manifest = gson.fromJson(reader, KubernetesManifest.class); + manifestList.add(manifest); + } + reader.endArray(); + } else { + reader.skipValue(); + } + } + reader.endObject(); + return manifestList.build(); + } catch (IllegalStateException | JsonSyntaxException e) { + // An IllegalStageException is thrown when we call beginObject, nextName(), etc. and the + // next token is not what we are asserting it to be. A JsonSyntaxException is thrown when + // gson.fromJson isn't able to map the next token to a KubernetesManifest. + // In both of these cases, the error is due to the output from kubectl being malformed (or + // at least malformed relative to our expectations) so we'll wrap the exception in a + // KubectlException. + throw new KubectlException("Failed to parse kubectl output: " + e.getMessage(), e); + } + }; + } + + /** + * This method executes the actual kubectl command and determines if retries are required, on + * failure. + * + * @param credentials k8s account credentials + * @param command the actual kubectl command to be performed + * @return - the result of the kubectl command + */ + private JobResult executeKubectlCommand( + KubernetesCredentials credentials, List command) { + return executeKubectlCommand(credentials, command, Optional.empty()); + } + + /** + * This method executes the actual kubectl command and determines if retries are required, on + * failure. + * + * @param credentials k8s account credentials + * @param command the actual kubectl command to be performed + * @param manifest the manifest supplied to the kubectl command + * @return - the result of the kubectl command + */ + private JobResult executeKubectlCommand( + KubernetesCredentials credentials, + List command, + Optional manifest) { + // retry registry is empty if retries are not enabled. + if (retryRegistry.isEmpty()) { + return jobExecutor.runJob(createJobRequest(command, manifest)); + } + + // capture the original result obtained from the jobExecutor.runJob(jobRequest) call. + JobResult.JobResultBuilder finalResult = JobResult.builder(); + + KubectlActionIdentifier identifier = + new KubectlActionIdentifier(credentials, command, manifest); + Retry retryContext = retryRegistry.get().retry(identifier.getRetryInstanceName()); + try { + return retryContext.executeSupplier( + () -> { + JobResult result = jobExecutor.runJob(createJobRequest(command, manifest)); + return processJobResult(identifier, result, finalResult); + }); + } catch (KubectlException | NoRetryException e) { + // the caller functions expect any failures to be defined in a JobResult object and not in + // the form of an exception. Hence, we need to translate the above exceptions back into a + // JobResult object - but we only need to do it for KubectlException and NoRetryException ( + // since these are the ones explicitly thrown above) and not for any other ones. + return finalResult.build(); + } + } + + /** + * This method executes the actual kubectl command and determines if retries are required, on + * failure. + * + * @param credentials k8s account credentials + * @param command the actual kubectl command to be performed + * @param readerConsumer A function that transforms the job's standard output + * @param return type of the JobResult output + * @return the result of the kubectl command + */ + private JobResult executeKubectlCommand( + KubernetesCredentials credentials, List command, ReaderConsumer readerConsumer) { + // retry registry is empty if retries are not enabled. + if (retryRegistry.isEmpty()) { + return jobExecutor.runJob(new JobRequest(command), readerConsumer); + } + + // capture the original result obtained from the jobExecutor.runJob(jobRequest, readerConsumer) + // call. + JobResult.JobResultBuilder finalResult = JobResult.builder(); + KubectlActionIdentifier identifier = new KubectlActionIdentifier(credentials, command); + Retry retryContext = retryRegistry.get().retry(identifier.getRetryInstanceName()); + try { + return retryContext.executeSupplier( + () -> { + JobResult result = jobExecutor.runJob(new JobRequest(command), readerConsumer); + return processJobResult(identifier, result, finalResult); + }); + } catch (KubectlException | NoRetryException e) { + // the caller functions expect any failures to be defined in a JobResult object and not in + // the form of an exception. Hence, we need to translate the above exceptions back into a + // JobResult object - but we only need to do it for KubectlException and NoRetryException + // (since these are the ones explicitly thrown above) and not for any other ones. + return finalResult.build(); + } + } + + /** + * helper function to create a JobRequest using the input parameters + * + * @param command the command to be executed in the job request + * @param manifest the manifest to be used by the command. This is optional. + * @return a job request object + */ + @VisibleForTesting + JobRequest createJobRequest(List command, Optional manifest) { + // depending on the presence of the manifest, an appropriate job request is created + if (manifest.isPresent()) { + String manifestAsJson = gson.toJson(manifest.get()); + return new JobRequest( + command, new ByteArrayInputStream(manifestAsJson.getBytes(StandardCharsets.UTF_8))); + } + + return new JobRequest(command); + } + + /** + * helper function to handle a job result obtained after performing a job request. This either + * returns the result, if successful, or throws an exception on failure. + * + * @param identifier uniquely identifies the job in the logs + * @param result the job result to be processed + * @param finalResult a buffer that keeps track of the result. This ensures on retries, the + * original is not lost + * @param the return type of the JobResult output + * @return the result of the kubectl command, in the form of a JobResult object + */ + @VisibleForTesting + JobResult processJobResult( + KubectlActionIdentifier identifier, + JobResult result, + JobResult.JobResultBuilder finalResult) { + if (result.getResult() == JobResult.Result.SUCCESS) { + return result; + } + + // save the result as it'll be needed later on when we are done with retries + finalResult + .error(result.getError()) + .killed(result.isKilled()) + .output(result.getOutput()) + .result(result.getResult()); + + // if result is not successful, that means we need to determine if we should retry + // or not. + // + // Since Kubectl binary doesn't throw any exceptions by default, we need to + // check the result to see if retries are needed. Resilience.4j needs an exception to be + // thrown to decide if retries are needed and also, to capture retry metrics correctly. + throw convertKubectlJobResultToException(identifier.getKubectlAction(), result); + } + + /** + * this method is meant to be invoked only for those JobResults which are unsuccessful. It + * determines if the error contained in the JobResult should be retried or not. If the error needs + * to be retried, then KubectlException is returned. Otherwise, NoRetryException is returned. + * + * @param identifier used to log which action's job result is being processed + * @param result the job result which needs to be checked to see if it has an error that can be + * retried + * @param job result generic type + * @return - Either KubectlException or NoRetryException + */ + private RuntimeException convertKubectlJobResultToException( + String identifier, JobResult result) { + // the error matches the configured list of retryable errors. + if (this.kubernetesConfigurationProperties + .getJobExecutor() + .getRetries() + .getRetryableErrorMessages() + .stream() + .anyMatch(errorMessage -> result.getError().contains(errorMessage))) { + return new KubectlException(identifier + " failed. Error: " + result.getError()); + } + + // even though the error is not explicitly configured to be retryable, the job was killed - + // hence, we should retry + if (result.isKilled()) { + return new KubectlException( + "retrying " + identifier + " since the job " + result + " was killed"); + } + + String message = + "Not retrying " + + identifier + + " as retries are not enabled for error: " + + result.getError(); + log.warn(message); + // we want to let the retry library know that such errors should not be retried. + // Since we have configured the global retry registry to ignore errors of type + // NoRetryException, we return this here + return new NoRetryException(message); + } + + private void persistKubectlJobOutput( + KubernetesCredentials credentials, + JobResult status, + String manifestName, + Task task, + String taskName) { + if (kubernetesConfigurationProperties.getJobExecutor().isPersistTaskOutput()) { + if (kubernetesConfigurationProperties.getJobExecutor().isEnableTaskOutputForAllAccounts() + || credentials.isDebug()) { + task.updateOutput(manifestName, taskName, status.getOutput(), status.getError()); + } + } + } + + private void addLabelSelectors(List command, KubernetesSelectorList labelSelectors) { + if (labelSelectors != null && !labelSelectors.isEmpty()) { + command.add("-l=" + labelSelectors); + } + } + + public static class KubectlException extends RuntimeException { + public KubectlException(String message) { + super(message); + } + + public KubectlException(String message, Throwable cause) { + super(message, cause); + } + } + + public static class KubectlNotFoundException extends KubectlException { + public KubectlNotFoundException(String message) { + super(message); + } + } + + /** + * this exception is only meant to be used in cases where we want resilience4j to not retry + * kubectl calls. It should not be used anywhere else. + */ + static class NoRetryException extends RuntimeException { + NoRetryException(String message) { + super(message); + } + } + + /** helper class to identify the kubectl command in logs and metrics when retries are enabled */ + static class KubectlActionIdentifier { + KubernetesCredentials credentials; + List command; + String namespace; + String resource; + + public KubectlActionIdentifier( + KubernetesCredentials credentials, + List command, + String namespace, + String resource) { + this.credentials = credentials; + this.command = command; + this.namespace = namespace; + this.resource = resource; + } + + public KubectlActionIdentifier(KubernetesCredentials credentials, List command) { + this(credentials, command, "", ""); + } + + public KubectlActionIdentifier( + KubernetesCredentials credentials, + List command, + Optional manifest) { + this(credentials, command); + if (manifest.isPresent()) { + this.namespace = manifest.get().getNamespace(); + this.resource = manifest.get().getFullResourceName(); + } + } + + /** + * this returns the sanitized kubectl command. This can be used to log the command during retry + * attempts, among other things. + * + * @return - the sanitized kubectl command + */ + public String getKubectlAction() { + // no need to display everything in a kubectl command + List commandToLog = + command.stream() + .filter( + s -> + !(s.contains(KUBECTL_COMMAND_OPTION_TOKEN) + || s.contains(KUBECTL_COMMAND_OPTION_KUBECONFIG) + || s.contains(KUBECTL_COMMAND_OPTION_CONTEXT))) + .collect(Collectors.toList()); + + String identifier = + "command: '" + + String.join(" ", commandToLog) + + "' in account: " + + this.credentials.getAccountName(); + + if (!namespace.isEmpty()) { + identifier += " in namespace: " + namespace; + } + + if (!resource.isEmpty()) { + identifier += " for resource: " + resource; + } + return identifier; + } + + /** + * this returns a name which uniquely identifies a retry instance. This name shows up in the + * logs when each retry event is logged. Also, when capturing the retry metrics, the 'name' tag + * in the metric corresponds to this. + * + * @return - the name to be used to uniquely identify a retry instance + */ + public String getRetryInstanceName() { + return this.credentials.getAccountName(); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobDeploymentResult.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobDeploymentResult.java new file mode 100644 index 00000000000..2654bfc546f --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobDeploymentResult.java @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.job; + +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class KubernetesRunJobDeploymentResult extends OperationResult { + Map> deployedNamesByLocation = new HashMap<>(); + + public KubernetesRunJobDeploymentResult(OperationResult result) { + this.setManifestNamesByNamespace(result.getManifestNamesByNamespace()); + this.setManifests(result.getManifests()); + this.setCreatedArtifacts(result.getCreatedArtifacts()); + this.setBoundArtifacts(result.getBoundArtifacts()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobOperation.java new file mode 100644 index 00000000000..6f513d7957d --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobOperation.java @@ -0,0 +1,107 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.job; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ResourceVersioner; +import com.netflix.spinnaker.clouddriver.kubernetes.description.job.KubernetesRunJobOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy.DeployStrategy; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesDeployManifestOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.*; + +public class KubernetesRunJobOperation + implements AtomicOperation { + private static final String OP_NAME = "RUN_KUBERNETES_JOB"; + private final KubernetesRunJobOperationDescription description; + private final ResourceVersioner resourceVersioner; + + public KubernetesRunJobOperation( + KubernetesRunJobOperationDescription description, ResourceVersioner resourceVersioner) { + this.description = description; + this.resourceVersioner = resourceVersioner; + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public KubernetesRunJobDeploymentResult operate(List _unused) { + getTask().updateStatus(OP_NAME, "Running Kubernetes job..."); + KubernetesManifest jobSpec = this.description.getManifest(); + KubernetesKind kind = jobSpec.getKind(); + if (!kind.equals(KubernetesKind.JOB)) { + throw new IllegalArgumentException("Only kind of Job is accepted for the Run Job operation."); + } + + jobSpec.computeIfAbsent("metadata", k -> new HashMap<>()); + + if (!this.description.getNamespace().isEmpty()) { + jobSpec.setNamespace(this.description.getNamespace()); + } + + // Jobs that don't specify 'generateName' require the recreate strategy because jobs are + // immutable and trying to re-run a job with apply will either: + // (1) succeed and leave the job unchanged (but will not trigger a re-run) + // (2) fail if we try to change anything + // As the purpose of a run job stage is to ensure that each execution causes a job to run, + // we'll force a new job to be created each time. + // + // The deployment strategy logic handles the generateName case, so don't add + // any additional logic here. + KubernetesManifestAnnotater.setDeploymentStrategy(jobSpec, DeployStrategy.RECREATE); + + KubernetesDeployManifestDescription deployManifestDescription = + new KubernetesDeployManifestDescription(); + // setup description + List manifests = new ArrayList<>(); + manifests.add(jobSpec); + + Moniker moniker = new Moniker(); + moniker.setApp(description.getApplication()); + + deployManifestDescription.setManifests(manifests); + deployManifestDescription.setRequiredArtifacts(description.getRequiredArtifacts()); + deployManifestDescription.setOptionalArtifacts(description.getOptionalArtifacts()); + deployManifestDescription.setSource(KubernetesDeployManifestDescription.Source.text); + deployManifestDescription.setCredentials(description.getCredentials()); + deployManifestDescription.setAccount(description.getAccount()); + deployManifestDescription.setMoniker(moniker); + + KubernetesDeployManifestOperation deployManifestOperation = + new KubernetesDeployManifestOperation(deployManifestDescription, resourceVersioner); + OperationResult operationResult = deployManifestOperation.operate(new ArrayList<>()); + KubernetesRunJobDeploymentResult deploymentResult = + new KubernetesRunJobDeploymentResult(operationResult); + Map> deployedNames = deploymentResult.getDeployedNamesByLocation(); + for (Map.Entry> e : + operationResult.getManifestNamesByNamespace().entrySet()) { + deployedNames.put(e.getKey(), new ArrayList<>(e.getValue())); + } + deploymentResult.setDeployedNamesByLocation(deployedNames); + return deploymentResult; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/MetricParser.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/MetricParser.java new file mode 100644 index 00000000000..442f25f1fe6 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/MetricParser.java @@ -0,0 +1,136 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.job; + +import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSetMultimap; +import com.google.common.collect.Streams; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric.ContainerMetric; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.regex.Pattern; +import lombok.Getter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@NonnullByDefault +final class MetricParser { + private static final Splitter lineSplitter = Splitter.on('\n').trimResults().omitEmptyStrings(); + + /** + * Given the output of a kubectl top command, parses the metrics returning a MetricLine for each + * line successfully parsed. + * + *

If the output is empty or is in an unrecognized format, returns an empty list. + * + * @param kubectlOutput the output from kubectl top + * @return The parsed metrics + */ + static ImmutableSetMultimap parseMetrics(String kubectlOutput) { + Iterator lines = lineSplitter.split(kubectlOutput.trim()).iterator(); + if (!lines.hasNext()) { + return ImmutableSetMultimap.of(); + } + + Optional optionalParser = + MetricParser.LineParser.withHeader(lines.next()); + if (!optionalParser.isPresent()) { + return ImmutableSetMultimap.of(); + } + MetricParser.LineParser parser = optionalParser.get(); + return Streams.stream(lines) + .map(parser::readLine) + .filter(Optional::isPresent) + .map(Optional::get) + .collect( + ImmutableSetMultimap.toImmutableSetMultimap( + MetricParser.MetricLine::getPod, MetricParser.MetricLine::toContainerMetric)); + } + + private static final class LineParser { + private static final Logger log = LoggerFactory.getLogger(LineParser.class); + private static final Splitter columnSplitter = + Splitter.on(Pattern.compile("\\s+")).trimResults(); + private final ImmutableList headers; + + private LineParser(Iterable header) throws IllegalArgumentException { + ImmutableList headers = ImmutableList.copyOf(header); + if (headers.size() <= 2) { + throw new IllegalArgumentException( + String.format( + "Unexpected metric format -- no metrics to report based on table header %s.", + headers)); + } + this.headers = headers; + } + + /** + * Returns a metric parser that parses metrics from an ASCII table with the input string as the + * header. If the header is not in the expected format, logs a warning and returns an empty + * optional. + * + * @param header header of the ASCII table + * @return A optional containing a metric parser if the header was in the expected format; an + * empty optional otherwise + */ + static Optional withHeader(String header) { + try { + return Optional.of(new LineParser(columnSplitter.split(header))); + } catch (IllegalArgumentException e) { + log.warn(e.getMessage()); + return Optional.empty(); + } + } + + private Optional readLine(String line) { + List entry = columnSplitter.splitToList(line); + if (entry.size() != headers.size()) { + log.warn("Entry {} does not match column width of {}, skipping", entry, headers); + return Optional.empty(); + } + String podName = entry.get(0); + String containerName = entry.get(1); + ImmutableMap.Builder metrics = ImmutableMap.builder(); + for (int j = 2; j < headers.size(); j++) { + metrics.put(headers.get(j), entry.get(j)); + } + return Optional.of(new MetricLine(podName, containerName, metrics.build())); + } + } + + private static final class MetricLine { + @Getter private final String pod; + private final String container; + private final ImmutableMap metrics; + + private MetricLine(String pod, String container, Map metrics) { + this.pod = pod; + this.container = container; + this.metrics = ImmutableMap.copyOf(metrics); + } + + ContainerMetric toContainerMetric() { + return new ContainerMetric(container, metrics); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/AbstractKubernetesEnableDisableManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/AbstractKubernetesEnableDisableManifestOperation.java new file mode 100644 index 00000000000..719664f78f0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/AbstractKubernetesEnableDisableManifestOperation.java @@ -0,0 +1,169 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPatchOptions; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesEnableDisableManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestTraffic; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanLoadBalance; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.HasPods; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNonnullByDefault; +import org.apache.commons.text.WordUtils; + +@ParametersAreNonnullByDefault +public abstract class AbstractKubernetesEnableDisableManifestOperation + implements AtomicOperation { + private final KubernetesEnableDisableManifestDescription description; + private final KubernetesCredentials credentials; + private final String OP_NAME = getVerbName().toUpperCase() + "_MANIFEST"; + + protected abstract String getVerbName(); + + protected abstract List patchResource( + CanLoadBalance loadBalancerHandler, + KubernetesManifest loadBalancer, + KubernetesManifest target); + + protected AbstractKubernetesEnableDisableManifestOperation( + KubernetesEnableDisableManifestDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Nonnull + private List determineLoadBalancers(@Nonnull KubernetesManifest target) { + getTask().updateStatus(OP_NAME, "Getting load balancer list to " + getVerbName() + "..."); + ImmutableList result = description.getLoadBalancers(); + if (!result.isEmpty()) { + getTask().updateStatus(OP_NAME, "Using supplied list [" + String.join(", ", result) + "]"); + } else { + KubernetesManifestTraffic traffic = KubernetesManifestAnnotater.getTraffic(target); + result = traffic.getLoadBalancers(); + getTask().updateStatus(OP_NAME, "Using annotated list [" + String.join(", ", result) + "]"); + } + + return result; + } + + private void op(String loadBalancerName, KubernetesManifest target) { + KubernetesCoordinates coords; + try { + coords = + KubernetesCoordinates.builder() + .namespace(target.getNamespace()) + .fullResourceName(loadBalancerName) + .build(); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + String.format( + "Failed to perform operation with load balancer '%s'. Load balancers must be specified in the form '{kind} {name}', e.g. 'service my-service'", + loadBalancerName), + e); + } + + CanLoadBalance loadBalancerHandler = + CanLoadBalance.lookupProperties(credentials.getResourcePropertyRegistry(), coords); + KubernetesManifest loadBalancer = + Optional.ofNullable(credentials.get(coords)) + .orElseThrow( + () -> + new IllegalStateException( + String.format("Could not find load balancer: %s.", coords))); + + List patch = patchResource(loadBalancerHandler, loadBalancer, target); + + getTask().updateStatus(OP_NAME, "Patching target for '" + loadBalancerName + '"'); + credentials.patch( + target.getKind(), + target.getNamespace(), + target.getName(), + KubernetesPatchOptions.json(), + patch, + getTask(), + OP_NAME); + + HasPods podHandler = null; + try { + podHandler = + HasPods.lookupProperties(credentials.getResourcePropertyRegistry(), target.getKind()); + } catch (IllegalArgumentException e) { + // this is OK, the workload might not have pods + } + + if (podHandler != null) { + getTask().updateStatus(OP_NAME, "Patching pods for '" + loadBalancerName + '"'); + List pods = podHandler.pods(credentials, target); + // todo(lwander) parallelize, this will get slow for large workloads + for (KubernetesManifest pod : pods) { + patch = patchResource(loadBalancerHandler, loadBalancer, pod); + credentials.patch( + pod.getKind(), + pod.getNamespace(), + pod.getName(), + KubernetesPatchOptions.json(), + patch, + getTask(), + OP_NAME); + } + } + } + + @Override + public OperationResult operate(List priorOutputs) { + getTask() + .updateStatus( + OP_NAME, + "Starting " + + getVerbName() + + " operation in account " + + credentials.getAccountName() + + "..."); + KubernetesCoordinates coordinates = description.getPointCoordinates(); + KubernetesManifest target = + Optional.ofNullable(credentials.get(coordinates)) + .orElseThrow( + () -> + new IllegalStateException( + String.format( + "Could not find kubernetes manifest: %s", coordinates.toString()))); + determineLoadBalancers(target).forEach(l -> op(l, target)); + + getTask() + .updateStatus( + OP_NAME, + WordUtils.capitalize(getVerbName()) + " operation for " + coordinates + " succeeded"); + return null; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/ArtifactKey.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/ArtifactKey.java new file mode 100644 index 00000000000..e50775f14ff --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/ArtifactKey.java @@ -0,0 +1,76 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; + +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.Collection; +import java.util.Objects; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.EqualsAndHashCode; +import lombok.ToString; + +/** + * When determining whether the deploy and patch manifest stages bound all required artifacts, the + * artifacts in the list of required artifacts have an artifact account set while those in the list + * we're trying to bind don't. + * + *

As the .equals function of Artifact includes the account in its comparison, this means that we + * don't recognize the replaced artifacts as the ones we expected to replace and fail the stage. + * + *

As a temporary fix until we can refactor the artifact passing code to consistently include (or + * not) account, or decide that account should always be excluded from Artifact.equals(), create a + * class to hold the fields of Artifact that these two stages should use when deciding whether + * artifacts are equal. + */ +@EqualsAndHashCode +@ToString +class ArtifactKey { + private final String type; + private final String name; + private final String version; + private final String location; + private final String reference; + + private ArtifactKey(Artifact artifact) { + this.type = artifact.getType(); + this.name = artifact.getName(); + this.version = artifact.getVersion(); + this.location = artifact.getLocation(); + this.reference = artifact.getReference(); + } + + @Nonnull + static ArtifactKey fromArtifact(@Nonnull Artifact artifact) { + return new ArtifactKey(artifact); + } + + @Nonnull + static ImmutableSet fromArtifacts(@Nullable Collection artifacts) { + if (artifacts == null) { + return ImmutableSet.of(); + } + return artifacts.stream() + .filter(Objects::nonNull) + .map(ArtifactKey::fromArtifact) + .collect(toImmutableSet()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDeleteManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDeleteManifestOperation.java new file mode 100644 index 00000000000..d71dec4b83e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDeleteManifestOperation.java @@ -0,0 +1,134 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeleteManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KubernetesDeleteManifestOperation implements AtomicOperation { + private static final Logger log = + LoggerFactory.getLogger(KubernetesDeleteManifestOperation.class); + private final KubernetesDeleteManifestDescription description; + private final KubernetesCredentials credentials; + private static final String OP_NAME = "DELETE_KUBERNETES_MANIFEST"; + + public KubernetesDeleteManifestOperation(KubernetesDeleteManifestDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public OperationResult operate(List priorOutputs) { + getTask() + .updateStatus( + OP_NAME, + "Starting delete operation in account " + credentials.getAccountName() + "..."); + List coordinates; + + if (description.isDynamic()) { + coordinates = description.getAllCoordinates(); + } else { + coordinates = ImmutableList.of(description.getPointCoordinates()); + } + + // If "orphanDependents" is strictly defined by the stage then the cascade flag of kubectl + // delete will honor the setting. + // + // If orphanDependents isn't set, then look at the value of the delete + // option. The "Cascading" delete checkbox in the UI sets it to true/false, + // but support other values (e.g. foreground/background/orphan) if set + // directly in the pipeline json. + V1DeleteOptions deleteOptions = new V1DeleteOptions(); + Map options = + description.getOptions() == null ? new HashMap<>() : description.getOptions(); + if (options.containsKey("orphanDependents")) { + deleteOptions.setPropagationPolicy( + options.get("orphanDependents").equalsIgnoreCase("true") ? "orphan" : "background"); + } else if (options.containsKey("cascading")) { + // For compatibility with pipelines that specify cascading as true/false, + // map to the appropriate propagation policy. Clouddriver currently uses + // kubectl 1.22.17, where --cascade=true/false works, but generates a + // warning. + // + // See + // https://github.com/kubernetes/kubernetes/blob/v1.22.17/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go#L243-L249 + // + // --cascade=false --> orphan + // --cascade=true --> background + String propagationPolicy = null; + if (options.get("cascading").equalsIgnoreCase("false")) { + propagationPolicy = "orphan"; + } else if (options.get("cascading").equalsIgnoreCase("true")) { + propagationPolicy = "background"; + } else { + propagationPolicy = options.get("cascading"); + } + deleteOptions.setPropagationPolicy(propagationPolicy); + } + + if (options.containsKey("gracePeriodSeconds")) { + try { + deleteOptions.setGracePeriodSeconds(Long.parseLong(options.get("gracePeriodSeconds"))); + } catch (NumberFormatException nfe) { + log.warn("Unable to parse gracePeriodSeconds; {}", nfe.getMessage()); + } + } + OperationResult result = new OperationResult(); + coordinates.forEach( + c -> { + getTask() + .updateStatus(OP_NAME, "Looking up resource properties for " + c.getKind() + "..."); + KubernetesHandler deployer = + credentials.getResourcePropertyRegistry().get(c.getKind()).getHandler(); + getTask().updateStatus(OP_NAME, "Calling delete operation for resource" + c + "..."); + result.merge( + deployer.delete( + credentials, + c.getNamespace(), + c.getName(), + description.getLabelSelectors(), + deleteOptions, + getTask(), + OP_NAME)); + getTask() + .updateStatus(OP_NAME, " delete operation completed successfully for " + c.getName()); + }); + + getTask() + .updateStatus( + OP_NAME, " delete operation completed successfully for all applicable resources"); + return result; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDeployManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDeployManifestOperation.java new file mode 100644 index 00000000000..2e559728815 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDeployManifestOperation.java @@ -0,0 +1,435 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ArtifactConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ArtifactReplacer.ReplaceResult; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ResourceVersioner; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesResourceProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.*; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy.Versioned; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.*; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.*; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import lombok.Data; +import lombok.RequiredArgsConstructor; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KubernetesDeployManifestOperation implements AtomicOperation { + private static final Logger log = + LoggerFactory.getLogger(KubernetesDeployManifestOperation.class); + private final KubernetesDeployManifestDescription description; + private final KubernetesCredentials credentials; + private final ResourceVersioner resourceVersioner; + @Nonnull private final String accountName; + private static final String OP_NAME = "DEPLOY_KUBERNETES_MANIFEST"; + + public KubernetesDeployManifestOperation( + KubernetesDeployManifestDescription description, ResourceVersioner resourceVersioner) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + this.resourceVersioner = resourceVersioner; + this.accountName = description.getCredentials().getName(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public OperationResult operate(List _unused) { + getTask() + .updateStatus( + OP_NAME, "Beginning deployment of manifests in account " + accountName + " ..."); + + final List inputManifests = getManifestsFromDescription(); + sortManifests(inputManifests); + Map allArtifacts = initializeArtifacts(); + + OperationResult result = new OperationResult(); + List toDeploy = + inputManifests.stream() + .map( + manifest -> { + KubernetesManifestAnnotater.validateAnnotationsForRolloutStrategies( + manifest, description); + + // Bind artifacts + manifest = bindArtifacts(manifest, allArtifacts.values(), result); + + KubernetesResourceProperties properties = findResourceProperties(manifest); + KubernetesManifestStrategy strategy = + KubernetesManifestAnnotater.getStrategy(manifest); + + OptionalInt version = + isVersioned(properties, strategy) + ? resourceVersioner.getVersion(manifest, credentials) + : OptionalInt.empty(); + + Moniker moniker = cloneMoniker(description.getMoniker()); + version.ifPresent(moniker::setSequence); + if (Strings.isNullOrEmpty(moniker.getCluster())) { + moniker.setCluster(manifest.getFullResourceName()); + } + + Artifact artifact = + ArtifactConverter.toArtifact(manifest, description.getAccount(), version); + // Artifacts generated in this stage replace any required or optional artifacts + // coming from the request + allArtifacts.put(getArtifactKey(artifact), artifact); + + getTask() + .updateStatus( + OP_NAME, + "Annotating manifest " + + manifest.getFullResourceName() + + " with artifact, relationships & moniker..."); + KubernetesManifestAnnotater.annotateManifest(manifest, artifact); + + KubernetesHandler deployer = properties.getHandler(); + if (strategy.isUseSourceCapacity() && deployer instanceof CanScale) { + OptionalInt latestVersion = latestVersion(manifest, version); + Integer replicas = + KubernetesSourceCapacity.getSourceCapacity( + manifest, credentials, latestVersion); + if (replicas != null) { + manifest.setReplicas(replicas); + } + } + + if (deployer instanceof CanReceiveTraffic) { + setTrafficAnnotation(description.getServices(), manifest); + + if (description.isEnableTraffic()) { + KubernetesManifestTraffic traffic = + KubernetesManifestAnnotater.getTraffic(manifest); + applyTraffic(traffic, manifest, inputManifests); + } + } + + credentials.getNamer().applyMoniker(manifest, moniker, description); + manifest.setName(artifact.getReference()); + + return new ManifestArtifactHolder(manifest, artifact, strategy); + }) + .collect(Collectors.toList()); + + checkIfArtifactsBound(result); + + KubernetesSelectorList labelSelectors = this.description.getLabelSelectors(); + + // kubectl replace doesn't support selectors, so fail if any manifest uses + // the replace strategy + if (labelSelectors.isNotEmpty() + && toDeploy.stream() + .map((holder) -> holder.getStrategy().getDeployStrategy()) + .anyMatch( + (strategy) -> strategy == KubernetesManifestStrategy.DeployStrategy.REPLACE)) { + throw new IllegalArgumentException( + "label selectors not supported with replace strategy, not deploying"); + } + + toDeploy.forEach( + holder -> { + KubernetesResourceProperties properties = findResourceProperties(holder.manifest); + KubernetesManifestStrategy strategy = holder.strategy; + KubernetesHandler deployer = properties.getHandler(); + getTask() + .updateStatus( + OP_NAME, + "Submitting manifest " + + holder.manifest.getFullResourceName() + + " to kubernetes master..."); + result.merge( + deployer.deploy( + credentials, + holder.manifest, + strategy.getDeployStrategy(), + strategy.getServerSideApplyStrategy(), + getTask(), + OP_NAME, + labelSelectors)); + + result.getCreatedArtifacts().add(holder.artifact); + getTask() + .updateStatus( + OP_NAME, + "Deploy manifest task completed successfully for manifest " + + holder.manifest.getFullResourceName() + + " in account " + + accountName); + }); + + // If a label selector was specified and nothing has been deployed, throw an + // exception to fail the task if configured to do so. + if (!description.isAllowNothingSelected() + && labelSelectors.isNotEmpty() + && result.getManifests().isEmpty()) { + throw new IllegalStateException( + "nothing deployed to account " + + accountName + + " with label selector(s) " + + labelSelectors.toString()); + } + result.removeSensitiveKeys(credentials.getResourcePropertyRegistry()); + + getTask() + .updateStatus( + OP_NAME, + "Deploy manifest task completed successfully for all manifests in account " + + accountName); + return result; + } + + @NotNull + private OptionalInt latestVersion(KubernetesManifest manifest, OptionalInt version) { + if (version.isEmpty()) { + return OptionalInt.empty(); + } + OptionalInt latestVersion = resourceVersioner.getLatestVersion(manifest, credentials); + return latestVersion; + } + + @NotNull + private List getManifestsFromDescription() { + List inputManifests = description.getManifests(); + if (inputManifests == null || inputManifests.isEmpty()) { + // The stage currently only supports using the `manifests` field but we need to continue to + // check `manifest` for backwards compatibility until all existing stages have been updated. + @SuppressWarnings("deprecation") + KubernetesManifest manifest = description.getManifest(); + + // manifest may be null as well, so check + if (manifest != null) { + log.warn( + "Relying on deprecated single manifest input (account: {}, kind: {}, name: {})", + accountName, + manifest.getKind(), + manifest.getName()); + inputManifests = ImmutableList.of(manifest); + } + } + inputManifests = inputManifests.stream().filter(Objects::nonNull).collect(Collectors.toList()); + return inputManifests; + } + + @NotNull + private Map initializeArtifacts() { + Map allArtifacts = new HashMap<>(); + if (!description.isEnableArtifactBinding()) { + return allArtifacts; + } + // Required artifacts are explicitly set in stage configuration + if (description.getRequiredArtifacts() != null) { + description + .getRequiredArtifacts() + .forEach(a -> allArtifacts.putIfAbsent(getArtifactKey(a), a)); + } + // Optional artifacts are taken from the pipeline trigger or pipeline execution context + if (description.getOptionalArtifacts() != null) { + description + .getOptionalArtifacts() + .forEach(a -> allArtifacts.putIfAbsent(getArtifactKey(a), a)); + } + return allArtifacts; + } + + private KubernetesManifest bindArtifacts( + KubernetesManifest manifest, + Collection artifacts, + OperationResult operationResult) { + getTask() + .updateStatus(OP_NAME, "Binding artifacts in " + manifest.getFullResourceName() + "..."); + + ReplaceResult replaceResult = + findResourceProperties(manifest) + .getHandler() + .replaceArtifacts(manifest, List.copyOf(artifacts), description.getAccount()); + + getTask() + .updateStatus(OP_NAME, "Bound artifacts: " + replaceResult.getBoundArtifacts() + "..."); + + operationResult.getBoundArtifacts().addAll(replaceResult.getBoundArtifacts()); + return replaceResult.getManifest(); + } + + private void checkIfArtifactsBound(OperationResult operationResult) { + getTask().updateStatus(OP_NAME, "Checking if all requested artifacts were bound..."); + if (description.isEnableArtifactBinding()) { + Set unboundArtifacts = + Sets.difference( + ArtifactKey.fromArtifacts(description.getRequiredArtifacts()), + ArtifactKey.fromArtifacts(operationResult.getBoundArtifacts())); + + if (!unboundArtifacts.isEmpty()) { + throw new IllegalArgumentException( + String.format( + "The following required artifacts could not be bound: '%s'. " + + "Check that the Docker image name above matches the name used in the image field of your manifest. " + + "Failing the stage as this is likely a configuration error.", + unboundArtifacts)); + } + } + } + + private void sortManifests(List manifests) { + getTask().updateStatus(OP_NAME, "Sorting manifests by priority..."); + manifests.sort( + Comparator.comparingInt(m -> findResourceProperties(m).getHandler().deployPriority())); + getTask() + .updateStatus( + OP_NAME, + "Deploy order is: " + + manifests.stream() + .map(KubernetesManifest::getFullResourceName) + .collect(Collectors.joining(", "))); + } + + private String getArtifactKey(Artifact artifact) { + return String.format( + "[%s]-[%s]-[%s]", + artifact.getType(), + artifact.getName(), + artifact.getLocation() != null ? artifact.getLocation() : ""); + } + + private void setTrafficAnnotation(List services, KubernetesManifest manifest) { + if (services == null || services.isEmpty()) { + return; + } + KubernetesManifestTraffic traffic = new KubernetesManifestTraffic(services); + KubernetesManifestAnnotater.setTraffic(manifest, traffic); + } + + private void applyTraffic( + KubernetesManifestTraffic traffic, + KubernetesManifest target, + Collection manifestsFromRequest) { + traffic.getLoadBalancers().forEach(l -> attachLoadBalancer(l, target, manifestsFromRequest)); + } + + private void attachLoadBalancer( + String loadBalancerName, + KubernetesManifest target, + Collection manifestsFromRequest) { + + KubernetesCoordinates coords; + try { + coords = + KubernetesCoordinates.builder() + .namespace(target.getNamespace()) + .fullResourceName(loadBalancerName) + .build(); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + String.format( + "Failed to attach load balancer '%s'. Load balancers must be specified in the form '{kind} {name}', e.g. 'service my-service'.", + loadBalancerName), + e); + } + + KubernetesManifest loadBalancer = getLoadBalancer(coords, manifestsFromRequest); + + CanLoadBalance handler = + CanLoadBalance.lookupProperties(credentials.getResourcePropertyRegistry(), coords); + + getTask() + .updateStatus( + OP_NAME, + "Attaching load balancer " + + loadBalancer.getFullResourceName() + + " to " + + target.getFullResourceName()); + + handler.attach(loadBalancer, target); + } + + private KubernetesManifest getLoadBalancer( + KubernetesCoordinates coords, Collection manifestsFromRequest) { + Optional loadBalancer = + manifestsFromRequest.stream() + .filter(m -> KubernetesCoordinates.fromManifest(m).equals(coords)) + .findFirst(); + + return loadBalancer.orElseGet( + () -> + Optional.ofNullable(credentials.get(coords)) + .orElseThrow( + () -> + new IllegalArgumentException( + "Load balancer " + + coords.getKind().toString() + + " " + + coords.getName() + + " does not exist"))); + } + + private boolean isVersioned( + KubernetesResourceProperties properties, KubernetesManifestStrategy strategy) { + if (strategy.getVersioned() != Versioned.DEFAULT) { + return strategy.getVersioned() == Versioned.TRUE; + } + + if (description.getVersioned() != null) { + return description.getVersioned(); + } + + return properties.isVersioned(); + } + + // todo(lwander): move to kork + private static Moniker cloneMoniker(Moniker inp) { + return Moniker.builder() + .app(inp.getApp()) + .cluster(inp.getCluster()) + .stack(inp.getStack()) + .detail(inp.getDetail()) + .sequence(inp.getSequence()) + .build(); + } + + @Nonnull + private KubernetesResourceProperties findResourceProperties(KubernetesManifest manifest) { + KubernetesKind kind = manifest.getKind(); + getTask().updateStatus(OP_NAME, "Finding deployer for " + kind + "..."); + return credentials.getResourcePropertyRegistry().get(kind); + } + + @Data + @RequiredArgsConstructor + private static class ManifestArtifactHolder { + @Nonnull private KubernetesManifest manifest; + @Nonnull private Artifact artifact; + @Nonnull private KubernetesManifestStrategy strategy; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDisableManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDisableManifestOperation.java new file mode 100644 index 00000000000..23092b1e62a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesDisableManifestOperation.java @@ -0,0 +1,45 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesEnableDisableManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanLoadBalance; +import java.util.List; + +public class KubernetesDisableManifestOperation + extends AbstractKubernetesEnableDisableManifestOperation { + public KubernetesDisableManifestOperation( + KubernetesEnableDisableManifestDescription description) { + super(description); + } + + @Override + protected String getVerbName() { + return "disable"; + } + + @Override + protected List patchResource( + CanLoadBalance loadBalancerHandler, + KubernetesManifest loadBalancer, + KubernetesManifest target) { + return loadBalancerHandler.detachPatch(loadBalancer, target); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesEnableManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesEnableManifestOperation.java new file mode 100644 index 00000000000..1f0dc46360f --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesEnableManifestOperation.java @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesEnableDisableManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanLoadBalance; +import java.util.List; + +public class KubernetesEnableManifestOperation + extends AbstractKubernetesEnableDisableManifestOperation { + public KubernetesEnableManifestOperation(KubernetesEnableDisableManifestDescription description) { + super(description); + } + + @Override + protected String getVerbName() { + return "enable"; + } + + @Override + protected List patchResource( + CanLoadBalance loadBalancerHandler, + KubernetesManifest loadBalancer, + KubernetesManifest target) { + return loadBalancerHandler.attachPatch(loadBalancer, target); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesPatchManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesPatchManifestOperation.java new file mode 100644 index 00000000000..9d2ff60147c --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesPatchManifestOperation.java @@ -0,0 +1,148 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.Sets; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ArtifactReplacer.ReplaceResult; +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPatchOptions.MergeStrategy; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesPatchManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +public class KubernetesPatchManifestOperation implements AtomicOperation { + private final KubernetesPatchManifestDescription description; + private final KubernetesCredentials credentials; + private static final String OP_NAME = "PATCH_KUBERNETES_MANIFEST"; + + private static final ObjectMapper objectMapper = new ObjectMapper(); + + public KubernetesPatchManifestOperation(KubernetesPatchManifestDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public OperationResult operate(List _unused) { + updateStatus( + "Beginning patching of manifest in account " + credentials.getAccountName() + "..."); + KubernetesCoordinates objToPatch = description.getPointCoordinates(); + + updateStatus("Finding patch handler for " + objToPatch + "..."); + KubernetesHandler patchHandler = findPatchHandler(objToPatch); + + OperationResult result = new OperationResult(); + + MergeStrategy mergeStrategy = description.getOptions().getMergeStrategy(); + + if (mergeStrategy == MergeStrategy.json) { + // Skip artifact replacement for json patches + updateStatus( + "Submitting manifest " + description.getManifestName() + " to Kubernetes master..."); + List jsonPatches = + objectMapper.convertValue( + description.getPatchBody(), new TypeReference>() {}); + result.merge( + patchHandler.patchWithJson( + credentials, + objToPatch.getNamespace(), + objToPatch.getName(), + description.getOptions(), + jsonPatches, + getTask(), + OP_NAME)); + } else { + updateStatus("Swapping out artifacts in " + objToPatch + " from context..."); + ReplaceResult replaceResult = replaceArtifacts(objToPatch, patchHandler); + + updateStatus( + "Submitting manifest " + description.getManifestName() + " to Kubernetes master..."); + result.merge( + patchHandler.patchWithManifest( + credentials, + objToPatch.getNamespace(), + objToPatch.getName(), + description.getOptions(), + replaceResult.getManifest(), + getTask(), + OP_NAME)); + result.getBoundArtifacts().addAll(replaceResult.getBoundArtifacts()); + } + + result.removeSensitiveKeys(credentials.getResourcePropertyRegistry()); + + getTask().updateStatus(OP_NAME, "Patch manifest operation completed successfully"); + return result; + } + + private void updateStatus(String status) { + getTask().updateStatus(OP_NAME, status); + } + + private ReplaceResult replaceArtifacts( + KubernetesCoordinates objToPatch, KubernetesHandler patchHandler) { + List allArtifacts = + description.getAllArtifacts() == null ? new ArrayList<>() : description.getAllArtifacts(); + + KubernetesManifest manifest = + objectMapper.convertValue(description.getPatchBody(), KubernetesManifest.class); + ReplaceResult replaceResult = + patchHandler.replaceArtifacts( + manifest, + allArtifacts, + Strings.nullToEmpty(objToPatch.getNamespace()), + description.getAccount()); + + if (description.getRequiredArtifacts() != null) { + Set unboundArtifacts = + Sets.difference( + ArtifactKey.fromArtifacts(description.getRequiredArtifacts()), + ArtifactKey.fromArtifacts(replaceResult.getBoundArtifacts())); + if (!unboundArtifacts.isEmpty()) { + throw new IllegalArgumentException( + String.format( + "The following required artifacts could not be bound: '%s'." + + "Check that the Docker image name above matches the name used in the image field of your manifest." + + "Failing the stage as this is likely a configuration error.", + unboundArtifacts)); + } + } + return replaceResult; + } + + private KubernetesHandler findPatchHandler(KubernetesCoordinates objToPatch) { + return credentials.getResourcePropertyRegistry().get(objToPatch.getKind()).getHandler(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesPauseRolloutManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesPauseRolloutManifestOperation.java new file mode 100644 index 00000000000..9d8a21460a5 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesPauseRolloutManifestOperation.java @@ -0,0 +1,77 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesPauseRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanPauseRollout; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class KubernetesPauseRolloutManifestOperation implements AtomicOperation { + private final KubernetesPauseRolloutManifestDescription description; + private final KubernetesCredentials credentials; + private static final String OP_NAME = "PAUSE_ROLLOUT_KUBERNETES_MANIFEST"; + + public KubernetesPauseRolloutManifestOperation( + KubernetesPauseRolloutManifestDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + OP_NAME, + "Starting pause rollout operation in account " + credentials.getAccountName() + "..."); + KubernetesCoordinates coordinates = description.getPointCoordinates(); + + getTask().updateStatus(OP_NAME, "Looking up resource properties..."); + KubernetesHandler deployer = + credentials.getResourcePropertyRegistry().get(coordinates.getKind()).getHandler(); + + if (!(deployer instanceof CanPauseRollout)) { + throw new IllegalArgumentException( + "Resource with " + coordinates + " does not support pause rollout"); + } + + CanPauseRollout canPauseRollout = (CanPauseRollout) deployer; + + getTask() + .updateStatus( + OP_NAME, + "Calling pause rollout operation for " + + coordinates.getName() + + " in namespace " + + coordinates.getName() + + "..."); + canPauseRollout.pauseRollout(credentials, coordinates.getNamespace(), coordinates.getName()); + getTask().updateStatus(OP_NAME, "Pause rollout operation completed successfully"); + + return null; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesResumeRolloutManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesResumeRolloutManifestOperation.java new file mode 100644 index 00000000000..72ad67fed9a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesResumeRolloutManifestOperation.java @@ -0,0 +1,79 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesResumeRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanResumeRollout; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class KubernetesResumeRolloutManifestOperation implements AtomicOperation { + private final KubernetesResumeRolloutManifestDescription description; + private final KubernetesCredentials credentials; + private static final String OP_NAME = "RESUME_ROLLOUT_KUBERNETES_MANIFEST"; + + public KubernetesResumeRolloutManifestOperation( + KubernetesResumeRolloutManifestDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + OP_NAME, + "Starting resume rollout operation in account " + + credentials.getAccountName() + + " ..."); + KubernetesCoordinates coordinates = description.getPointCoordinates(); + + getTask().updateStatus(OP_NAME, "Looking up resource properties..."); + KubernetesHandler deployer = + credentials.getResourcePropertyRegistry().get(coordinates.getKind()).getHandler(); + + if (!(deployer instanceof CanResumeRollout)) { + throw new IllegalArgumentException( + "Resource with " + coordinates + " does not support resume rollout"); + } + + CanResumeRollout canResumeRollout = (CanResumeRollout) deployer; + + getTask() + .updateStatus( + OP_NAME, + "Calling resume rollout operation for " + + coordinates.getName() + + " in namespace " + + coordinates.getName() + + "..."); + canResumeRollout.resumeRollout( + credentials, coordinates.getNamespace(), coordinates.getName(), getTask(), OP_NAME); + getTask().updateStatus(OP_NAME, "Resume rollout operation completed successfully"); + return null; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesRollingRestartManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesRollingRestartManifestOperation.java new file mode 100644 index 00000000000..89a43521ff8 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesRollingRestartManifestOperation.java @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesRollingRestartManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanRollingRestart; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class KubernetesRollingRestartManifestOperation implements AtomicOperation { + private final KubernetesRollingRestartManifestDescription description; + private final KubernetesCredentials credentials; + private static final String OP_NAME = "ROLLING_RESTART_KUBERNETES_MANIFEST"; + + public KubernetesRollingRestartManifestOperation( + KubernetesRollingRestartManifestDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + OP_NAME, + "Starting rolling restart operation in account " + + credentials.getAccountName() + + "..."); + KubernetesCoordinates coordinates = description.getPointCoordinates(); + + getTask().updateStatus(OP_NAME, "Looking up resource properties..."); + KubernetesHandler deployer = + credentials.getResourcePropertyRegistry().get(coordinates.getKind()).getHandler(); + + if (!(deployer instanceof CanRollingRestart)) { + throw new IllegalArgumentException( + "Resource with " + coordinates + " does not support rolling restart"); + } + + CanRollingRestart canRollingRestart = (CanRollingRestart) deployer; + + getTask() + .updateStatus( + OP_NAME, + "Calling rolling restart operation for" + + coordinates.getName() + + " in namespace " + + coordinates.getName() + + "..."); + canRollingRestart.rollingRestart( + credentials, coordinates.getNamespace(), coordinates.getName(), getTask(), OP_NAME); + getTask().updateStatus(OP_NAME, "Rolling rollout operation completed successfully"); + + return null; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesScaleManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesScaleManifestOperation.java new file mode 100644 index 00000000000..58449d9ffb5 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesScaleManifestOperation.java @@ -0,0 +1,85 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesScaleManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanScale; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class KubernetesScaleManifestOperation implements AtomicOperation { + private final KubernetesScaleManifestDescription description; + private final KubernetesCredentials credentials; + private static final String OP_NAME = "SCALE_KUBERNETES_MANIFEST"; + + public KubernetesScaleManifestOperation(KubernetesScaleManifestDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + OP_NAME, "Starting scale operation in account " + credentials.getAccountName() + "..."); + KubernetesCoordinates coordinates = description.getPointCoordinates(); + + getTask().updateStatus(OP_NAME, "Looking up resource properties..."); + KubernetesHandler deployer = + credentials.getResourcePropertyRegistry().get(coordinates.getKind()).getHandler(); + + if (!(deployer instanceof CanScale)) { + throw new IllegalArgumentException( + "Resource with " + coordinates + " does not support scale"); + } + + CanScale canScale = (CanScale) deployer; + + getTask() + .updateStatus( + OP_NAME, + "Calling scale operation for " + + coordinates.getName() + + " in namespace " + + coordinates.getName() + + " with replicas " + + description.getReplicas() + + "..."); + + canScale.scale( + credentials, + coordinates.getNamespace(), + coordinates.getName(), + description.getReplicas(), + getTask(), + OP_NAME); + + getTask().updateStatus(OP_NAME, "Scale operation completed successfully"); + + return null; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesUndoRolloutManifestOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesUndoRolloutManifestOperation.java new file mode 100644 index 00000000000..01a0dfa1f78 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/KubernetesUndoRolloutManifestOperation.java @@ -0,0 +1,103 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesUndoRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanUndoRollout; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class KubernetesUndoRolloutManifestOperation implements AtomicOperation { + private final KubernetesUndoRolloutManifestDescription description; + private final KubernetesCredentials credentials; + private static final String OP_NAME = "UNDO_ROLLOUT_KUBERNETES_MANIFEST"; + + public KubernetesUndoRolloutManifestOperation( + KubernetesUndoRolloutManifestDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + OP_NAME, + "Starting undo rollout operation in account " + credentials.getAccountName() + "..."); + KubernetesCoordinates coordinates = description.getPointCoordinates(); + + getTask().updateStatus(OP_NAME, "Looking up resource properties..."); + KubernetesHandler deployer = + credentials.getResourcePropertyRegistry().get(coordinates.getKind()).getHandler(); + + if (!(deployer instanceof CanUndoRollout)) { + throw new IllegalArgumentException( + "Resource with " + coordinates + " does not support undo rollout"); + } + + CanUndoRollout canUndoRollout = (CanUndoRollout) deployer; + + Integer revision = description.getRevision(); + if (description.getNumRevisionsBack() != null) { + getTask().updateStatus(OP_NAME, "Looking up rollout history..."); + List revisions = + canUndoRollout.historyRollout( + credentials, coordinates.getNamespace(), coordinates.getName()); + + revisions.sort(Integer::compareTo); + int numRevisions = revisions.size(); + int targetRevisionIndex = numRevisions - description.getNumRevisionsBack() - 1; + if (targetRevisionIndex < 0) { + throw new IllegalArgumentException( + "There are " + + numRevisions + + " revision(s) in total, cannot rollback " + + description.getNumRevisionsBack()); + } + + revision = revisions.get(targetRevisionIndex); + getTask().updateStatus(OP_NAME, "Picked revision " + revision + "..."); + } + + getTask() + .updateStatus( + OP_NAME, + "Calling undo rollout operation for " + + coordinates.getName() + + " in namespace " + + coordinates.getNamespace() + + " with revision " + + revision + + "..."); + + canUndoRollout.undoRollout( + credentials, coordinates.getNamespace(), coordinates.getName(), revision); + + getTask().updateStatus(OP_NAME, "Undo rollout operation completed successfully"); + return null; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/servergroup/KubernetesResizeServerGroupOperation.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/servergroup/KubernetesResizeServerGroupOperation.java new file mode 100644 index 00000000000..1ffdf5a0f15 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/op/servergroup/KubernetesResizeServerGroupOperation.java @@ -0,0 +1,71 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.servergroup; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.servergroup.KubernetesResizeServerGroupDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.CanResize; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class KubernetesResizeServerGroupOperation implements AtomicOperation { + private final KubernetesResizeServerGroupDescription description; + private final KubernetesCredentials credentials; + private static final String OP_NAME = "RESIZE_KUBERNETES_SERVER_GROUP"; + + public KubernetesResizeServerGroupOperation(KubernetesResizeServerGroupDescription description) { + this.description = description; + this.credentials = description.getCredentials().getCredentials(); + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public Void operate(List priorOutputs) { + getTask().updateStatus(OP_NAME, "Starting resize operation..."); + KubernetesCoordinates coordinates = description.getCoordinates(); + + getTask().updateStatus(OP_NAME, "Looking up resource properties..."); + KubernetesHandler deployer = + credentials.getResourcePropertyRegistry().get(coordinates.getKind()).getHandler(); + + if (!(deployer instanceof CanResize)) { + throw new IllegalArgumentException( + "Resource with " + coordinates + " does not support resize"); + } + + CanResize canResize = (CanResize) deployer; + + getTask().updateStatus(OP_NAME, "Calling resize operation..."); + canResize.resize( + credentials, + coordinates.getNamespace(), + coordinates.getName(), + description.getCapacity(), + getTask(), + OP_NAME); + + return null; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/KubernetesModelUtil.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/KubernetesModelUtil.java new file mode 100644 index 00000000000..f5aeb6aff04 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/KubernetesModelUtil.java @@ -0,0 +1,80 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.provider; + +import com.netflix.spinnaker.clouddriver.model.HealthState; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class KubernetesModelUtil { + public static HealthState getHealthState(List> health) { + return someUpRemainingUnknown(health) + ? HealthState.Up + : someSucceededRemainingUnknown(health) + ? HealthState.Succeeded + : anyStarting(health) + ? HealthState.Starting + : anyDown(health) + ? HealthState.Down + : anyFailed(health) + ? HealthState.Failed + : anyOutOfService(health) ? HealthState.OutOfService : HealthState.Unknown; + } + + private static boolean stateEquals(Map health, HealthState state) { + Object healthState = health.get("state"); + return healthState != null && healthState.equals(state.name()); + } + + private static boolean someUpRemainingUnknown(List> healthsList) { + List> knownHealthList = + healthsList.stream() + .filter(h -> !stateEquals(h, HealthState.Unknown)) + .collect(Collectors.toList()); + + return !knownHealthList.isEmpty() + && knownHealthList.stream().allMatch(h -> stateEquals(h, HealthState.Up)); + } + + private static boolean someSucceededRemainingUnknown(List> healthsList) { + List> knownHealthList = + healthsList.stream() + .filter(h -> !stateEquals(h, HealthState.Unknown)) + .collect(Collectors.toList()); + + return !knownHealthList.isEmpty() + && knownHealthList.stream().allMatch(h -> stateEquals(h, HealthState.Succeeded)); + } + + private static boolean anyDown(List> healthsList) { + return healthsList.stream().anyMatch(h -> stateEquals(h, HealthState.Down)); + } + + private static boolean anyStarting(List> healthsList) { + return healthsList.stream().anyMatch(h -> stateEquals(h, HealthState.Starting)); + } + + private static boolean anyFailed(List> healthsList) { + return healthsList.stream().anyMatch(h -> stateEquals(h, HealthState.Failed)); + } + + private static boolean anyOutOfService(List> healthsList) { + return healthsList.stream().anyMatch(h -> stateEquals(h, HealthState.OutOfService)); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProvider.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProvider.java new file mode 100644 index 00000000000..cd401def01a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProvider.java @@ -0,0 +1,220 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.provider.view; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesManifestContainer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.KubernetesManifestProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.KubernetesJobStatus; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.clouddriver.model.JobProvider; +import com.netflix.spinnaker.clouddriver.model.JobState; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import io.kubernetes.client.openapi.models.V1Job; +import io.kubernetes.client.openapi.models.V1Pod; +import java.time.OffsetDateTime; +import java.util.*; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.Getter; +import org.apache.commons.lang3.NotImplementedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +@Component +public class KubernetesJobProvider implements JobProvider { + private static final Logger log = LoggerFactory.getLogger(KubernetesJobProvider.class); + @Getter private final String platform = "kubernetes"; + private final AccountCredentialsProvider accountCredentialsProvider; + private final KubernetesManifestProvider manifestProvider; + private final boolean detailedPodStatus; + + KubernetesJobProvider( + AccountCredentialsProvider accountCredentialsProvider, + KubernetesManifestProvider manifestProvider, + @Value("${kubernetes.jobs.detailed-pod-status:true}") boolean detailedPodStatus) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.manifestProvider = manifestProvider; + this.detailedPodStatus = detailedPodStatus; + } + + @Override + @Nullable + public KubernetesJobStatus collectJob(String account, String location, String id) { + Optional optionalJob = getKubernetesJob(account, location, id); + if (!optionalJob.isPresent()) { + return null; + } + V1Job job = optionalJob.get(); + KubernetesJobStatus jobStatus = new KubernetesJobStatus(job, account); + KubernetesCredentials credentials = + (KubernetesCredentials) accountCredentialsProvider.getCredentials(account).getCredentials(); + + Map selector = job.getSpec().getSelector().getMatchLabels(); + List pods = + credentials.list( + KubernetesKind.POD, + jobStatus.getLocation(), + KubernetesSelectorList.fromMatchLabels(selector)); + + List typedPods = + pods.stream() + .map(m -> KubernetesCacheDataConverter.getResource(m, V1Pod.class)) + .sorted( + (p1, p2) -> { + OffsetDateTime dtDefault = OffsetDateTime.now(); + OffsetDateTime time1 = + p1.getStatus() != null + ? Optional.ofNullable(p1.getStatus().getStartTime()).orElse(dtDefault) + : dtDefault; + OffsetDateTime time2 = + p2.getStatus() != null + ? Optional.ofNullable(p2.getStatus().getStartTime()).orElse(dtDefault) + : dtDefault; + return time1.compareTo(time2); + }) + .collect(Collectors.toList()); + + // Handle an edge case where a Job may not have any pods, for example + // if a webhook explicitly denies the creation of a pod + if (typedPods.size() != 0) { + V1Pod mostRecentPod = typedPods.get(typedPods.size() - 1); + jobStatus.setMostRecentPodName( + mostRecentPod.getMetadata() != null ? mostRecentPod.getMetadata().getName() : ""); + } + + jobStatus.setPods( + typedPods.stream().map(KubernetesJobStatus.PodStatus::new).collect(Collectors.toList())); + + if (jobStatus.getJobState() == JobState.Failed) { + jobStatus.captureFailureDetails(); + } + + // if detailedPodStatus is not needed, then remove all the pod related information + if (!detailedPodStatus) { + jobStatus.setPods(List.of()); + } + + return jobStatus; + } + + @Override + @Nullable + public Map getFileContents( + String account, String location, String id, String containerName) { + KubernetesCredentials credentials = + (KubernetesCredentials) accountCredentialsProvider.getCredentials(account).getCredentials(); + return getKubernetesJob(account, location, id) + .map( + job -> { + String logContents; + try { + logContents = + credentials.jobLogs(location, job.getMetadata().getName(), containerName); + } catch (Exception jobLogsException) { + log.error( + "Failed to get logs from job: {}, container: {} in namespace: {} for account: {}. Error: ", + id, + containerName, + location, + account, + jobLogsException); + return null; + } + try { + if (logContents != null) { + return PropertyParser.extractPropertiesFromLog(logContents); + } + } catch (Exception e) { + log.error( + "Couldn't parse properties for job: {}, container: {} in namespace: {} for account: {}. Error: ", + id, + containerName, + location, + account, + e); + } + return null; + }) + .orElse(null); + } + + /** + * This method queries a pod for logs, from which it extracts properties which it returns as a map + * to the caller. This is needed in cases where a pod needs to be queried directly for logs, and + * getFileContents() doesn't give us all the required information. + * + * @param account - account to which the pod belongs + * @param namespace - namespace in which the pod runs in + * @param podName - pod to query the logs + * @param containerName - containerName in the pod from which logs should be queried + * @return map of property file contents + */ + @Nullable + public Map getFileContentsFromPod( + String account, String namespace, String podName, String containerName) { + Map props = null; + String logContents = null; + KubernetesCredentials credentials = + (KubernetesCredentials) accountCredentialsProvider.getCredentials(account).getCredentials(); + try { + logContents = credentials.logs(namespace, podName, containerName); + } catch (Exception podLogsException) { + log.error( + "Failed to get logs from pod: {}, container: {} in namespace: {} for account: {}. Error: ", + podName, + containerName, + namespace, + account, + podLogsException); + } + + try { + if (logContents != null) { + props = PropertyParser.extractPropertiesFromLog(logContents); + } + } catch (Exception e) { + log.error( + "Couldn't parse properties from pod: {}, container: {} in namespace: {} for account: {}. Error: ", + podName, + containerName, + namespace, + account, + e); + } + + return props; + } + + @Override + public void cancelJob(String account, String location, String id) { + throw new NotImplementedException("cancelJob is not implemented for the Kubernetes provider"); + } + + private Optional getKubernetesJob(String account, String location, String id) { + log.debug("Getting kubernetesJob for account {} at {} with id {}", account, location, id); + return Optional.ofNullable(manifestProvider.getManifest(account, location, id, false)) + .map(KubernetesManifestContainer::getManifest) + .map(m -> KubernetesCacheDataConverter.getResource(m, V1Job.class)); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/PropertyParser.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/PropertyParser.java new file mode 100644 index 00000000000..1aeee6139d0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/PropertyParser.java @@ -0,0 +1,79 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.provider.view; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Splitter; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PropertyParser { + private static final Logger log = LoggerFactory.getLogger(PropertyParser.class); + + private static final String MAGIC_SEARCH_STRING = "SPINNAKER_PROPERTY_"; + private static final Pattern MAGIC_SEARCH_PATTERN = Pattern.compile(MAGIC_SEARCH_STRING); + private static final String MAGIC_JSON_SEARCH_STRING = "SPINNAKER_CONFIG_JSON="; + private static final Pattern MAGIC_JSON_SEARCH_PATTERN = + Pattern.compile("^\\s*" + MAGIC_JSON_SEARCH_STRING); + + private static final Splitter lineSplitter = Splitter.on("\n").omitEmptyStrings().trimResults(); + private static final Splitter equalsSplitter = Splitter.on("=").omitEmptyStrings().trimResults(); + + public static Map extractPropertiesFromLog(String buildLog) throws IOException { + final Map map = new HashMap<>(); + + for (String line : lineSplitter.split(buildLog)) { + if (MAGIC_SEARCH_PATTERN.matcher(line).find()) { + log.debug("Identified: " + line); + List splittedLine = equalsSplitter.splitToList(line); + // if the split line doesn't match our expected length it cannot + // be parsed so we should skip it. + if (splittedLine.size() != 2) { + continue; + } + final String key = splittedLine.get(0).replaceFirst(MAGIC_SEARCH_STRING, "").toLowerCase(); + final String value = splittedLine.get(1); + log.debug(key + ":" + value); + map.put(key, value); + } + + if (MAGIC_JSON_SEARCH_PATTERN.matcher(line).find()) { + log.debug("Identified Spinnaker JSON properties magic string: " + line); + final String jsonContent = line.replaceFirst(MAGIC_JSON_SEARCH_STRING, ""); + ObjectMapper objectMapper = new ObjectMapper(); + try { + map.putAll( + objectMapper.readValue(jsonContent, new TypeReference>() {})); + } catch (IOException e) { + log.error( + "Unable to parse content from {}. Content is: {}", + MAGIC_JSON_SEARCH_STRING, + jsonContent); + throw e; + } + } + } + return map; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/GlobalKubernetesKindRegistry.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/GlobalKubernetesKindRegistry.java new file mode 100644 index 00000000000..7c5ce869a6b --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/GlobalKubernetesKindRegistry.java @@ -0,0 +1,74 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import static com.google.common.collect.ImmutableMap.toImmutableMap; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Optional; +import java.util.stream.StreamSupport; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +/** + * A class representing the Kubernetes kind properties that are built into Spinnaker. By design, + * this class does not support updating any of the properties in the registry; by making instances + * immutable, they can be shared across threads without need for further synchronization. + */ +@Component +@NonnullByDefault +public final class GlobalKubernetesKindRegistry { + private final ImmutableMap nameMap; + + /** + * Creates a {@link GlobalKubernetesKindRegistry} populated with default {@link + * KubernetesKindProperties}. + */ + @Autowired + public GlobalKubernetesKindRegistry() { + this(KubernetesKindProperties.getGlobalKindProperties()); + } + + /** + * Creates a {@link GlobalKubernetesKindRegistry} populated with the supplied {@link + * KubernetesKindProperties}. + */ + public GlobalKubernetesKindRegistry(Iterable kubernetesKindProperties) { + this.nameMap = + StreamSupport.stream(kubernetesKindProperties.spliterator(), false) + .collect(toImmutableMap(KubernetesKindProperties::getKubernetesKind, p -> p)); + } + + /** + * Searches the registry for a {@link KubernetesKindProperties} with the supplied {@link + * KubernetesKind}. If the kind has been registered, returns the {@link KubernetesKindProperties} + * that were registered for the kind; otherwise, returns an empty {@link Optional}. + */ + Optional getKindProperties(KubernetesKind kind) { + return Optional.ofNullable(nameMap.get(kind)); + } + + /** Returns a list of all registered kinds */ + ImmutableSet getRegisteredKinds() { + return nameMap.keySet(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubeconfigFileHasher.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubeconfigFileHasher.java new file mode 100644 index 00000000000..201f29396ea --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubeconfigFileHasher.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import com.google.common.hash.Hashing; +import java.nio.file.Files; +import java.nio.file.Paths; +import javax.annotation.Nonnull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KubeconfigFileHasher { + private static final Logger log = LoggerFactory.getLogger(KubeconfigFileHasher.class); + + public static String hashKubeconfigFile(@Nonnull String filepath) { + if (filepath.isEmpty()) { + return ""; + } + try { + byte[] contents = Files.readAllBytes(Paths.get(filepath)); + return Hashing.sha256().hashBytes(contents).toString(); + } catch (Exception e) { + log.warn("failed to hash kubeconfig file at {}: {}", filepath, e); + return ""; + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentials.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentials.java new file mode 100644 index 00000000000..e74d15cf037 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentials.java @@ -0,0 +1,849 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static lombok.EqualsAndHashCode.Include; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.base.Suppliers; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spectator.api.Clock; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.config.CustomKubernetesResource; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesCachingPolicy; +import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration; +import com.netflix.spinnaker.clouddriver.kubernetes.config.RawResourcesEndpointConfig; +import com.netflix.spinnaker.clouddriver.kubernetes.description.AccountResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPatchOptions; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesResourceProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesNamerRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesCustomResourceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor.KubectlException; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor.KubectlNotFoundException; +import com.netflix.spinnaker.kork.configserver.ConfigFileService; +import com.netflix.spinnaker.moniker.Namer; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinition; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Component; + +@EqualsAndHashCode(onlyExplicitlyIncluded = true) +public class KubernetesCredentials { + private static final Logger log = LoggerFactory.getLogger(KubernetesCredentials.class); + private static final int CRD_EXPIRY_SECONDS = 30; + private static final int NAMESPACE_EXPIRY_SECONDS = 30; + + private final Registry registry; + private final Clock clock; + private final KubectlJobExecutor jobExecutor; + private final GlobalResourcePropertyRegistry globalResourcePropertyRegistry; + + @Include @Getter @Nonnull private final String accountName; + + @Include @Getter private final ImmutableList namespaces; + + @Include @Getter private final ImmutableList omitNamespaces; + + @Include @Getter private final ImmutableSet kinds; + + @Include @Getter private final ImmutableSet omitKinds; + + @Include @Getter private final List customResources; + + @Include @Getter private final String kubectlExecutable; + + @Include @Getter private final Integer kubectlRequestTimeoutSeconds; + + @Getter private final String kubeconfigFile; + + @Include private final String kubeconfigFileHash; + + @Include @Getter private final boolean serviceAccount; + + @Include @Getter private final String context; + + @Include @Getter private final boolean onlySpinnakerManaged; + + @Include @Getter private final boolean cacheAllApplicationRelationships; + + @Include @Getter private final RawResourcesEndpointConfig rawResourcesEndpointConfig; + + @Include private final boolean checkPermissionsOnStartup; + + @Include @Getter private final List cachingPolicies; + + @Include @JsonIgnore @Getter private final String oAuthServiceAccount; + + @Include @JsonIgnore @Getter private final List oAuthScopes; + + @Include private boolean metrics; + + @Include @Getter private final boolean debug; + + @Getter private final ResourcePropertyRegistry resourcePropertyRegistry; + private final KubernetesKindRegistry kindRegistry; + @Getter private final KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap; + private final PermissionValidator permissionValidator; + private final Supplier> crdSupplier = + Suppliers.memoizeWithExpiration(this::crdSupplier, CRD_EXPIRY_SECONDS, TimeUnit.SECONDS); + private final Memoizer> liveNamespaceSupplier = + Memoizer.memoizeWithExpiration( + this::namespaceSupplier, NAMESPACE_EXPIRY_SECONDS, TimeUnit.SECONDS); + @Getter private final Namer namer; + + public KubernetesCredentials( + Registry registry, + KubectlJobExecutor jobExecutor, + ManagedAccount managedAccount, + AccountResourcePropertyRegistry.Factory resourcePropertyRegistryFactory, + KubernetesKindRegistry.Factory kindRegistryFactory, + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap, + String kubeconfigFile, + Namer manifestNamer, + GlobalResourcePropertyRegistry globalResourcePropertyRegistry) { + this.registry = registry; + this.clock = registry.clock(); + this.jobExecutor = jobExecutor; + this.kindRegistry = + kindRegistryFactory.create( + this::getCrdProperties, + managedAccount.getCustomResources().stream() + .map( + cr -> + KubernetesKindProperties.create( + KubernetesKind.fromString(cr.getKubernetesKind()), cr.isNamespaced())) + .collect(toImmutableList())); + + this.accountName = Objects.requireNonNull(managedAccount.getName()); + this.namespaces = ImmutableList.copyOf(managedAccount.getNamespaces()); + this.omitNamespaces = ImmutableList.copyOf(managedAccount.getOmitNamespaces()); + this.kinds = + managedAccount.getKinds().stream() + .map(KubernetesKind::fromString) + .collect(toImmutableSet()); + this.omitKinds = + managedAccount.getOmitKinds().stream() + .map(KubernetesKind::fromString) + .collect(toImmutableSet()); + this.permissionValidator = new PermissionValidator(); + + this.customResources = managedAccount.getCustomResources(); + this.resourcePropertyRegistry = + resourcePropertyRegistryFactory.create( + managedAccount.getCustomResources().stream() + .map(KubernetesResourceProperties::fromCustomResource) + .collect(toImmutableList())); + this.kubernetesSpinnakerKindMap = kubernetesSpinnakerKindMap; + + this.kubectlExecutable = managedAccount.getKubectlExecutable(); + this.kubectlRequestTimeoutSeconds = managedAccount.getKubectlRequestTimeoutSeconds(); + this.kubeconfigFile = kubeconfigFile; + this.kubeconfigFileHash = KubeconfigFileHasher.hashKubeconfigFile(kubeconfigFile); + this.serviceAccount = managedAccount.isServiceAccount(); + this.context = managedAccount.getContext(); + + this.onlySpinnakerManaged = managedAccount.isOnlySpinnakerManaged(); + this.checkPermissionsOnStartup = managedAccount.isCheckPermissionsOnStartup(); + this.cachingPolicies = managedAccount.getCachingPolicies(); + + this.oAuthServiceAccount = managedAccount.getOAuthServiceAccount(); + this.oAuthScopes = managedAccount.getOAuthScopes(); + + this.metrics = managedAccount.isMetrics(); + + this.debug = managedAccount.isDebug(); + this.namer = manifestNamer; + this.cacheAllApplicationRelationships = managedAccount.isCacheAllApplicationRelationships(); + this.rawResourcesEndpointConfig = managedAccount.getRawResourcesEndpointConfig(); + this.globalResourcePropertyRegistry = globalResourcePropertyRegistry; + } + + /** + * Thin wrapper around a Caffeine cache that handles memoizing a supplier function with expiration + */ + private static class Memoizer implements Supplier { + private static final String CACHE_KEY = "key"; + private final LoadingCache cache; + + private Memoizer(Supplier supplier, long expirySeconds, TimeUnit timeUnit) { + this.cache = + Caffeine.newBuilder() + .refreshAfterWrite(expirySeconds, timeUnit) + .build(key -> supplier.get()); + } + + @Override + public T get() { + return cache.get(CACHE_KEY); + } + + /** Return the value from the cache or null if there is no cached value */ + @Nullable + public T getIfPresent() { + return cache.getIfPresent(CACHE_KEY); + } + + public static Memoizer memoizeWithExpiration( + Supplier supplier, long expirySeconds, TimeUnit timeUnit) { + return new Memoizer<>(supplier, expirySeconds, timeUnit); + } + } + + public enum KubernetesKindStatus { + VALID("Kind [%s] is a valid kind"), + KIND_NONE("Kind [%s] is invalid"), + EXPLICITLY_OMITTED_BY_CONFIGURATION( + "Kind [%s] included in 'omitKinds' of kubernetes account configuration"), + MISSING_FROM_ALLOWED_KINDS("Kind [%s] missing in 'kinds' of kubernetes account configuration"), + UNKNOWN("Kind [%s] is has not been registered and is not a valid CRD installed in the cluster"), + READ_ERROR( + "Error reading kind [%s]. Please check connectivity and access permissions to the cluster"); + + private final String errorMessage; + + KubernetesKindStatus(String errorMessage) { + this.errorMessage = errorMessage; + } + + public String getErrorMessage(KubernetesKind kind) { + return String.format(this.errorMessage, kind); + } + } + + public boolean isValidKind(@Nonnull KubernetesKind kind) { + return getKindStatus(kind) == KubernetesKindStatus.VALID; + } + + /** + * Returns the status of a given kubernetes kind with respect to the current account. Checks of + * whether a kind is readable are cached for the lifetime of the process (and are only performed + * when a kind is otherwise considered valid for the account). + */ + @Nonnull + public KubernetesKindStatus getKindStatus(@Nonnull KubernetesKind kind) { + if (kind.equals(KubernetesKind.NONE)) { + return KubernetesKindStatus.KIND_NONE; + } + + if (!kinds.isEmpty()) { + return kinds.contains(kind) + ? KubernetesKindStatus.VALID + : KubernetesKindStatus.MISSING_FROM_ALLOWED_KINDS; + } + + if (omitKinds.contains(kind)) { + return KubernetesKindStatus.EXPLICITLY_OMITTED_BY_CONFIGURATION; + } + + if (!kindRegistry.isKindRegistered(kind)) { + return KubernetesKindStatus.UNKNOWN; + } + + if (!permissionValidator.isKindReadable(kind)) { + return KubernetesKindStatus.READ_ERROR; + } + + return KubernetesKindStatus.VALID; + } + + private Optional getCrdProperties( + @Nonnull KubernetesKind kubernetesKind) { + return Optional.ofNullable(crdSupplier.get().get(kubernetesKind)); + } + + @Nonnull + public ImmutableList getGlobalKinds() { + return kindRegistry.getGlobalKinds().stream() + .filter(this::isValidKind) + .collect(toImmutableList()); + } + + @Nonnull + public KubernetesKindProperties getKindProperties(@Nonnull KubernetesKind kind) { + return kindRegistry.getKindPropertiesOrDefault(kind); + } + + @Nonnull + public ImmutableList getCrds() { + return crdSupplier.get().keySet().stream().filter(this::isValidKind).collect(toImmutableList()); + } + + @Nonnull + private ImmutableMap crdSupplier() { + // Short-circuit if the account is not configured (or does not have permission) to read CRDs + if (!isValidKind(KubernetesKind.CUSTOM_RESOURCE_DEFINITION)) { + return ImmutableMap.of(); + } + try { + ImmutableMap crds = + list(KubernetesKind.CUSTOM_RESOURCE_DEFINITION, "").stream() + .map( + manifest -> + KubernetesCacheDataConverter.getResource( + manifest, V1beta1CustomResourceDefinition.class)) + .map(KubernetesKindProperties::fromCustomResourceDefinition) + .collect( + toImmutableMap(KubernetesKindProperties::getKubernetesKind, Function.identity())); + + List crdHandlers = + crds.keySet().stream() + .map(KubernetesCustomResourceHandler::new) + .collect(toImmutableList()); + this.globalResourcePropertyRegistry.updateCrdProperties(crdHandlers); + + return crds; + } catch (KubectlException e) { + // not logging here -- it will generate a lot of noise in cases where crds aren't + // available/registered in the first place + return ImmutableMap.of(); + } + } + + @Nonnull + private ImmutableList namespaceSupplier() { + try { + return jobExecutor + .list(this, ImmutableList.of(KubernetesKind.NAMESPACE), "", new KubernetesSelectorList()) + .stream() + .map(KubernetesManifest::getName) + .collect(toImmutableList()); + } catch (KubectlException e) { + log.error("Could not list namespaces for account {}: {}", accountName, e.getMessage()); + return ImmutableList.of(); + } + } + + @Nonnull + public ImmutableList filterNamespaces(@Nonnull ImmutableList namespaces) { + ImmutableList result = namespaces; + if (!omitNamespaces.isEmpty()) { + result = + result.stream() + .filter(n -> !omitNamespaces.contains(n)) + .collect(ImmutableList.toImmutableList()); + } + + return result; + } + + /** Get declared namespaces without making a call to the kubernetes cluster */ + @Nonnull + public ImmutableList getDeclaredNamespacesFromCache() { + ImmutableList result; + if (!namespaces.isEmpty()) { + result = namespaces; + } else { + result = liveNamespaceSupplier.getIfPresent(); + if (result == null) { + // There's nothing in the cache, so return an empty list + log.warn("No cached namespaces for account {}", accountName); + result = ImmutableList.of(); + } + } + + return filterNamespaces(result); + } + + /** + * Get declared namespaces, making a call to the kubernetes cluster if there's no cached value, or + * the cache is stale. Note that this is a best-effort call. If there's an error communicating to + * the kubernetes cluster, this routine may return an empty list. + */ + @Nonnull + public ImmutableList getDeclaredNamespaces() { + ImmutableList result; + if (!namespaces.isEmpty()) { + result = namespaces; + } else { + result = liveNamespaceSupplier.get(); + } + + return filterNamespaces(result); + } + + public boolean isMetricsEnabled() { + return metrics && permissionValidator.isMetricsReadable(); + } + + public Map getSpinnakerKindMap() { + Map kindMap = + new HashMap<>(kubernetesSpinnakerKindMap.kubernetesToSpinnakerKindStringMap()); + getCustomResources() + .forEach( + customResource -> + kindMap.put(customResource.getKubernetesKind(), customResource.getSpinnakerKind())); + return kindMap; + } + + public ImmutableList getDockerRegistries() { + return ImmutableList.of(); + } + + /** Deprecated in favor of {@link KubernetesCredentials#get(KubernetesCoordinates)}. */ + @Deprecated + @Nullable + public KubernetesManifest get(KubernetesKind kind, String namespace, String name) { + return get(KubernetesCoordinates.builder().kind(kind).namespace(namespace).name(name).build()); + } + + @Nullable + public KubernetesManifest get(KubernetesCoordinates coords) { + return runAndRecordMetrics( + "get", + coords.getKind(), + coords.getNamespace(), + () -> jobExecutor.get(this, coords.getKind(), coords.getNamespace(), coords.getName())); + } + + @Nonnull + public ImmutableList list(KubernetesKind kind, String namespace) { + return runAndRecordMetrics( + "list", + kind, + namespace, + () -> + jobExecutor.list( + this, ImmutableList.of(kind), namespace, new KubernetesSelectorList())); + } + + @Nonnull + public ImmutableList list( + KubernetesKind kind, String namespace, KubernetesSelectorList selectors) { + return runAndRecordMetrics( + "list", + kind, + namespace, + () -> jobExecutor.list(this, ImmutableList.of(kind), namespace, selectors)); + } + + @Nonnull + public ImmutableList list(List kinds, String namespace) { + if (kinds.isEmpty()) { + return ImmutableList.of(); + } else { + return runAndRecordMetrics( + "list", + kinds, + namespace, + () -> jobExecutor.list(this, kinds, namespace, new KubernetesSelectorList())); + } + } + + /** Deprecated in favor of {@link KubernetesCredentials#eventsFor(KubernetesCoordinates)}. */ + @Deprecated + @Nonnull + public ImmutableList eventsFor( + KubernetesKind kind, String namespace, String name) { + return eventsFor( + KubernetesCoordinates.builder().kind(kind).namespace(namespace).name(name).build()); + } + + @Nonnull + public ImmutableList eventsFor(KubernetesCoordinates coords) { + return runAndRecordMetrics( + "list", + KubernetesKind.EVENT, + coords.getNamespace(), + () -> + jobExecutor.eventsFor(this, coords.getKind(), coords.getNamespace(), coords.getName())); + } + + public String logs(String namespace, String podName, String containerName) { + return runAndRecordMetrics( + "logs", + KubernetesKind.POD, + namespace, + () -> jobExecutor.logs(this, namespace, podName, containerName)); + } + + public String jobLogs(String namespace, String jobName, String containerName) { + return runAndRecordMetrics( + "logs", + KubernetesKind.JOB, + namespace, + () -> jobExecutor.jobLogs(this, namespace, jobName, containerName)); + } + + public void scale( + KubernetesKind kind, String namespace, String name, int replicas, Task task, String opName) { + runAndRecordMetrics( + "scale", + kind, + namespace, + () -> jobExecutor.scale(this, kind, namespace, name, replicas, task, opName)); + } + + public List delete( + KubernetesKind kind, + String namespace, + String name, + KubernetesSelectorList labelSelectors, + V1DeleteOptions options, + Task task, + String opName) { + return runAndRecordMetrics( + "delete", + kind, + namespace, + () -> + jobExecutor.delete(this, kind, namespace, name, labelSelectors, options, task, opName)); + } + + /** Deprecated in favor of {@link KubernetesCredentials#topPod(KubernetesCoordinates)} */ + @Deprecated + public Collection topPod(String namespace, String pod) { + return topPod( + KubernetesCoordinates.builder() + .kind(KubernetesKind.POD) + .namespace(namespace) + .name(pod) + .build()); + } + + public Collection topPod(KubernetesCoordinates coords) { + Preconditions.checkState( + coords.getKind().equals(KubernetesKind.POD), "Metrics are only available for pods."); + return runAndRecordMetrics( + "top", + KubernetesKind.POD, + coords.getNamespace(), + () -> jobExecutor.topPod(this, coords.getNamespace(), coords.getName())); + } + + public KubernetesManifest deploy( + KubernetesManifest manifest, + Task task, + String opName, + KubernetesSelectorList selectorList, + String... cmdArgs) { + return runAndRecordMetrics( + "deploy", + manifest.getKind(), + manifest.getNamespace(), + () -> jobExecutor.deploy(this, manifest, task, opName, selectorList, cmdArgs)); + } + + private KubernetesManifest replace(KubernetesManifest manifest, Task task, String opName) { + return runAndRecordMetrics( + "replace", + manifest.getKind(), + manifest.getNamespace(), + () -> jobExecutor.replace(this, manifest, task, opName)); + } + + public KubernetesManifest createOrReplace(KubernetesManifest manifest, Task task, String opName) { + try { + return replace(manifest, task, opName); + } catch (KubectlNotFoundException e) { + // Although create supports label selectors, replace doesn't. Assume that + // some higher-level logic prevents this operation in combination with + // label selectors. + return create(manifest, task, opName, new KubernetesSelectorList()); + } + } + + public KubernetesManifest create( + KubernetesManifest manifest, Task task, String opName, KubernetesSelectorList selectorList) { + return runAndRecordMetrics( + "create", + manifest.getKind(), + manifest.getNamespace(), + () -> jobExecutor.create(this, manifest, task, opName, selectorList)); + } + + public List historyRollout(KubernetesKind kind, String namespace, String name) { + return runAndRecordMetrics( + "historyRollout", + kind, + namespace, + () -> jobExecutor.historyRollout(this, kind, namespace, name)); + } + + public void undoRollout(KubernetesKind kind, String namespace, String name, int revision) { + runAndRecordMetrics( + "undoRollout", + kind, + namespace, + () -> jobExecutor.undoRollout(this, kind, namespace, name, revision)); + } + + public void pauseRollout(KubernetesKind kind, String namespace, String name) { + runAndRecordMetrics( + "pauseRollout", + kind, + namespace, + () -> jobExecutor.pauseRollout(this, kind, namespace, name)); + } + + public void resumeRollout( + KubernetesKind kind, String namespace, String name, Task task, String opName) { + runAndRecordMetrics( + "resumeRollout", + kind, + namespace, + () -> jobExecutor.resumeRollout(this, kind, namespace, name, task, opName)); + } + + public void rollingRestart( + KubernetesKind kind, String namespace, String name, Task task, String opName) { + runAndRecordMetrics( + "rollingRestart", + kind, + namespace, + () -> jobExecutor.rollingRestart(this, kind, namespace, name, task, opName)); + } + + public void patch( + KubernetesKind kind, + String namespace, + String name, + KubernetesPatchOptions options, + KubernetesManifest manifest, + Task task, + String opName) { + runAndRecordMetrics( + "patch", + kind, + namespace, + () -> jobExecutor.patch(this, kind, namespace, name, options, manifest, task, opName)); + } + + public void patch( + KubernetesKind kind, + String namespace, + String name, + KubernetesPatchOptions options, + List patches, + Task task, + String opName) { + runAndRecordMetrics( + "patch", + kind, + namespace, + () -> jobExecutor.patch(this, kind, namespace, name, options, patches, task, opName)); + } + + private T runAndRecordMetrics( + String action, KubernetesKind kind, String namespace, Supplier op) { + return runAndRecordMetrics(action, ImmutableList.of(kind), namespace, op); + } + + private T runAndRecordMetrics( + String action, List kinds, String namespace, Supplier op) { + Map tags = new HashMap<>(); + tags.put("action", action); + tags.put( + "kinds", + kinds.stream().map(KubernetesKind::toString).sorted().collect(Collectors.joining(","))); + tags.put("account", accountName); + tags.put("namespace", Strings.isNullOrEmpty(namespace) ? "none" : namespace); + tags.put("success", "true"); + long startTime = clock.monotonicTime(); + try { + return op.get(); + } catch (RuntimeException e) { + tags.put("success", "false"); + tags.put("reason", e.getClass().getSimpleName()); + throw e; + } finally { + registry + .timer(registry.createId("kubernetes.api", tags)) + .record(clock.monotonicTime() - startTime, TimeUnit.NANOSECONDS); + } + } + + /** + * Handles validating which kubernetes kinds the current account has permission to read, as well + * as whether the current account has permission to read pod metrics. + */ + private class PermissionValidator { + private final Supplier checkNamespace = Suppliers.memoize(this::computeCheckNamespace); + private final Map readableKinds = new ConcurrentHashMap<>(); + private final Supplier metricsReadable = Suppliers.memoize(this::checkMetricsReadable); + + private String getCheckNamespace() { + return checkNamespace.get(); + } + + private String computeCheckNamespace() { + List namespaces = getDeclaredNamespaces(); + + if (namespaces.isEmpty()) { + log.warn( + "There are no namespaces configured (or loadable) -- please check that the list of" + + " 'omitNamespaces' for account '{}' doesn't prevent access from all namespaces" + + " in this cluster, or that the cluster is reachable.", + accountName); + return null; + } + + // we are making the assumption that the roles granted to spinnaker for this account in all + // namespaces are identical. + // otherwise, checking all namespaces for all kinds is too expensive in large clusters + // (imagine a cluster with 100s of namespaces). + return namespaces.get(0); + } + + private boolean skipPermissionChecks() { + // checkPermissionsOnStartup exists from when permission checks were done at startup (and took + // a long time); this flag was added to skip the checks and assume all kinds were readable. + // Now that permissions are checked on-the-fly, this flag is probably not necessary, but for + // now we'll continue to support the prior behavior, which is to short-circuit and assume all + // kinds are readable before checking. + // Before removing this flag, we'll need to check that nobody is depending on Spinnaker + // skipping permission checks for reasons other than performance. (For example, users may + // be relying on the skipped permission checks because of differences in permissions between + // namespaces.) + return !checkPermissionsOnStartup; + } + + private boolean canReadKind(KubernetesKind kind) { + if (skipPermissionChecks()) { + return true; + } + log.info("Checking if {} is readable in account '{}'...", kind, accountName); + try { + if (kindRegistry.getKindPropertiesOrDefault(kind).isNamespaced()) { + list(kind, checkNamespace.get()); + } else { + list(kind, null); + } + return true; + } catch (Exception e) { + log.info( + "Kind {} will not be cached in account '{}' because it cannot be listed.", + kind, + accountName); + return false; + } + } + + private boolean checkMetricsReadable() { + if (skipPermissionChecks()) { + return true; + } + try { + log.info("Checking if pod metrics are readable for account {}...", accountName); + topPod(getCheckNamespace(), null); + return true; + } catch (Exception e) { + log.warn( + "Could not read pod metrics in account '{}' for reason: {}", + accountName, + e.getMessage()); + log.debug("Reading logs for account '{}' failed with exception: ", accountName, e); + return false; + } + } + + /** + * Returns whether the given kind is readable for the current kubernetes account. This check is + * cached for each kind for the lifetime of the process, and subsequent calls return the cached + * value. + */ + boolean isKindReadable(@Nonnull KubernetesKind kind) { + return readableKinds.computeIfAbsent(kind, this::canReadKind); + } + + /** + * Returns whether metrics are readable for the current kubernetes account. This check is cached + * for the lifetime of the process, and subsequent calls return the cached value. + */ + boolean isMetricsReadable() { + return metricsReadable.get(); + } + } + + @Component + @RequiredArgsConstructor + public static class Factory { + private final Registry spectatorRegistry; + private final KubernetesNamerRegistry kubernetesNamerRegistry; + private final KubectlJobExecutor jobExecutor; + private final ConfigFileService configFileService; + private final AccountResourcePropertyRegistry.Factory resourcePropertyRegistryFactory; + private final KubernetesKindRegistry.Factory kindRegistryFactory; + private final KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap; + private final GlobalResourcePropertyRegistry globalResourcePropertyRegistry; + + public KubernetesCredentials build(ManagedAccount managedAccount) { + Namer manifestNamer = + kubernetesNamerRegistry.get(managedAccount.getNamingStrategy()); + return new KubernetesCredentials( + spectatorRegistry, + jobExecutor, + managedAccount, + resourcePropertyRegistryFactory, + kindRegistryFactory, + kubernetesSpinnakerKindMap, + getKubeconfigFile(configFileService, managedAccount), + manifestNamer, + globalResourcePropertyRegistry); + } + + private String getKubeconfigFile( + ConfigFileService configFileService, ManagedAccount managedAccount) { + if (StringUtils.isNotEmpty(managedAccount.getKubeconfigFile())) { + return configFileService.getLocalPath(managedAccount.getKubeconfigFile()); + } + + if (StringUtils.isNotEmpty(managedAccount.getKubeconfigContents())) { + return configFileService.getLocalPathForContents( + managedAccount.getKubeconfigContents(), managedAccount.getName()); + } + + return ""; + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsLifecycleHandler.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsLifecycleHandler.java new file mode 100644 index 00000000000..164dff0d391 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsLifecycleHandler.java @@ -0,0 +1,93 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentDispatcher; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import lombok.RequiredArgsConstructor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Component; + +@Component +@RequiredArgsConstructor +public class KubernetesCredentialsLifecycleHandler + implements CredentialsLifecycleHandler { + private static final Logger log = + LoggerFactory.getLogger(KubernetesCredentialsLifecycleHandler.class); + private final KubernetesProvider provider; + private final KubernetesCachingAgentDispatcher cachingAgentDispatcher; + private final KubernetesConfigurationProperties kubernetesConfigurationProperties; + + @Override + public void credentialsAdded(KubernetesNamedAccountCredentials credentials) { + if (kubernetesConfigurationProperties.isLoadNamespacesInAccount()) { + // Attempt to get namespaces to resolve any connectivity error without blocking /credentials + log.info( + "kubernetes.loadNamespacesInAccount flag is set to true - loading all namespaces for new account: {}", + credentials.getName()); + List namespaces = credentials.getCredentials().getDeclaredNamespaces(); + if (namespaces.isEmpty()) { + log.warn( + "New account {} did not return any namespace and could be unreachable or misconfigured", + credentials.getName()); + } + } else { + log.info( + "kubernetes.loadNamespacesInAccount flag is disabled - new account: {} is unverified", + credentials.getName()); + } + + Collection newlyAddedAgents = + cachingAgentDispatcher.buildAllCachingAgents(credentials); + + log.info("Adding {} agents for new account {}", newlyAddedAgents.size(), credentials.getName()); + provider.addAgents(newlyAddedAgents); + } + + @Override + public void credentialsUpdated(KubernetesNamedAccountCredentials credentials) { + // Attempt to get namespaces to resolve any connectivity error without blocking /credentials + List namespaces = credentials.getCredentials().getDeclaredNamespaces(); + if (namespaces.isEmpty()) { + log.warn( + "Modified account {} did not return any namespace and could be unreachable or misconfigured", + credentials.getName()); + } + + Collection updatedAgents = + cachingAgentDispatcher.buildAllCachingAgents(credentials); + + log.info( + "Scheduling {} agents for updated account {}", updatedAgents.size(), credentials.getName()); + // Remove existing agents belonging to changed accounts + provider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + provider.addAgents(updatedAgents); + } + + @Override + public void credentialsDeleted(KubernetesNamedAccountCredentials credentials) { + provider.removeAgentsForAccounts(Collections.singleton(credentials.getName())); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsParser.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsParser.java new file mode 100644 index 00000000000..3e9da020d11 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsParser.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.credentials.definition.CredentialsParser; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; +import lombok.extern.log4j.Log4j2; + +/** + * Custom CredentialsParser for Kubernetes credentials to handle configuration errors when parsing + * account credentials. As account credentials can be created by users through the credentials API, + * this parser is provided for more robust protection from user error. + */ +@RequiredArgsConstructor +@Log4j2 +public class KubernetesCredentialsParser + implements CredentialsParser { + private final KubernetesCredentials.Factory credentialFactory; + + @Nullable + @Override + public KubernetesNamedAccountCredentials parse(@Nonnull ManagedAccount managedAccount) { + try { + return new KubernetesNamedAccountCredentials(managedAccount, credentialFactory); + } catch (RuntimeException e) { + log.warn("Skipping invalid account definition account={}", managedAccount.getName(), e); + return null; + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesKindRegistry.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesKindRegistry.java new file mode 100644 index 00000000000..7d959500d5f --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesKindRegistry.java @@ -0,0 +1,128 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import lombok.AccessLevel; +import lombok.RequiredArgsConstructor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Component; + +@NonnullByDefault +@RequiredArgsConstructor(access = AccessLevel.PRIVATE) +public final class KubernetesKindRegistry { + private static final Logger log = LoggerFactory.getLogger(KubernetesKindRegistry.class); + private final Map kindMap = new ConcurrentHashMap<>(); + private final GlobalKubernetesKindRegistry globalKindRegistry; + private final Function> crdLookup; + + private KubernetesKindRegistry( + GlobalKubernetesKindRegistry globalKindRegistry, + Function> crdLookup, + Iterable customProperties) { + this.globalKindRegistry = globalKindRegistry; + this.crdLookup = crdLookup; + customProperties.forEach(this::registerKind); + } + + /** Registers a given {@link KubernetesKindProperties} into the registry */ + private KubernetesKindProperties registerKind(KubernetesKindProperties kindProperties) { + return kindMap.computeIfAbsent( + kindProperties.getKubernetesKind(), + k -> { + log.info( + "Dynamically registering {}, (namespaced: {})", + kindProperties.getKubernetesKind().toString(), + kindProperties.isNamespaced()); + return kindProperties; + }); + } + + /** + * Searches the registry for a {@link KubernetesKindProperties} with the supplied {@link + * KubernetesKind}. If the kind has been registered, returns the {@link KubernetesKindProperties} + * that were registered for the kind. If the kind is not registered, tries to look up the + * properties using the registry's CRD lookup function. If the lookup returns properties, + * registers them for this kind and returns them; otherwise returns a {@link + * KubernetesKindProperties} with default properties. + */ + KubernetesKindProperties getKindPropertiesOrDefault(KubernetesKind kind) { + return getKindProperties(kind) + .orElseGet(() -> KubernetesKindProperties.withDefaultProperties(kind)); + } + + private Optional getKindProperties(KubernetesKind kind) { + Optional globalResult = globalKindRegistry.getKindProperties(kind); + if (globalResult.isPresent()) { + return globalResult; + } + + KubernetesKindProperties result = kindMap.get(kind); + if (result != null) { + return Optional.of(result); + } + + return crdLookup.apply(kind).map(this::registerKind); + } + + /** + * Returns true if the supplied {@link KubernetesKind} is registered. If the kind is not + * registered, tries register the kind properties using the registry's CRD lookup function, and + * returns true if the kind was successfully registered. + * + * @param kind The kind whose registration status will be queried + * @return true if the kind was registered or was successfully registered using the CRD lookup + */ + boolean isKindRegistered(KubernetesKind kind) { + return getKindProperties(kind).isPresent(); + } + + /** Returns a list of all global kinds */ + ImmutableSet getGlobalKinds() { + return globalKindRegistry.getRegisteredKinds(); + } + + @Component + public static class Factory { + private final GlobalKubernetesKindRegistry globalKindRegistry; + + public Factory(GlobalKubernetesKindRegistry globalKindRegistry) { + this.globalKindRegistry = globalKindRegistry; + } + + KubernetesKindRegistry create( + Function> crdLookup, + Iterable customProperties) { + return new KubernetesKindRegistry(globalKindRegistry, crdLookup, customProperties); + } + + KubernetesKindRegistry create() { + return new KubernetesKindRegistry( + globalKindRegistry, k -> Optional.empty(), ImmutableList.of()); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentials.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentials.java new file mode 100644 index 00000000000..9d0a217c591 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentials.java @@ -0,0 +1,107 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import static lombok.EqualsAndHashCode.Include; + +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration; +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.*; +import javax.annotation.Nonnull; +import javax.annotation.ParametersAreNonnullByDefault; +import lombok.EqualsAndHashCode; +import lombok.Getter; + +@Getter +@EqualsAndHashCode(onlyExplicitlyIncluded = true, callSuper = false) +@ParametersAreNonnullByDefault +public class KubernetesNamedAccountCredentials + extends AbstractAccountCredentials { + private final String cloudProvider = "kubernetes"; + + @Nonnull @Include private final String name; + + @Include private final String environment; + + @Include private final String accountType; + + @Include private final int cacheThreads; + + @Include private final KubernetesCredentials credentials; + + @Include private final List requiredGroupMembership; + + @Include private final Permissions permissions; + + @Include private final Long cacheIntervalSeconds; + + public KubernetesNamedAccountCredentials( + ManagedAccount managedAccount, KubernetesCredentials.Factory credentialFactory) { + managedAccount.validate(); + this.name = Objects.requireNonNull(managedAccount.getName()); + this.environment = + Optional.ofNullable(managedAccount.getEnvironment()).orElse(managedAccount.getName()); + this.accountType = + Optional.ofNullable(managedAccount.getAccountType()).orElse(managedAccount.getName()); + this.cacheThreads = managedAccount.getCacheThreads(); + this.cacheIntervalSeconds = managedAccount.getCacheIntervalSeconds(); + + Permissions permissions = managedAccount.getPermissions().build(); + if (permissions.isRestricted()) { + this.permissions = permissions; + this.requiredGroupMembership = Collections.emptyList(); + } else { + this.permissions = null; + this.requiredGroupMembership = + Collections.unmodifiableList(managedAccount.getRequiredGroupMembership()); + } + this.credentials = credentialFactory.build(managedAccount); + } + + /** + * This method is deprecated and users should instead supply {@link + * KubernetesNamedAccountCredentials#permissions}. In order to continue to support users who have + * `requiredGroupMembership` in their account config, we still need to override this method. We'll + * need to either communicate the backwards-incompatible change or translate the supplied + * `requiredGroupMembership` into {@link KubernetesNamedAccountCredentials#permissions} before + * removing this override. + */ + @Override + @SuppressWarnings("deprecation") + public List getRequiredGroupMembership() { + return requiredGroupMembership; + } + + /** + * Get the namespaces without making a call to the kubernetes cluster. If the cache is empty, + * return an empty list. + */ + public List getNamespaces() { + return credentials.getDeclaredNamespacesFromCache(); + } + + public Map getSpinnakerKindMap() { + return credentials.getSpinnakerKindMap(); + } + + public List getDockerRegistries() { + return credentials.getDockerRegistries(); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelector.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelector.java new file mode 100644 index 00000000000..dd483419b15 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelector.java @@ -0,0 +1,117 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import java.util.List; +import javax.annotation.Nonnull; +import lombok.Data; + +@Data +public class KubernetesSelector { + public enum Kind { + ANY, + EQUALS, + NOT_EQUALS, + CONTAINS, + NOT_CONTAINS, + EXISTS, + NOT_EXISTS, + } + + private final Kind kind; + private final String key; + private final List values; + + @JsonCreator + public KubernetesSelector( + @JsonProperty("kind") @Nonnull Kind kind, + @JsonProperty("key") String key, + @JsonProperty("values") List values) { + if (Strings.isNullOrEmpty(key) && kind != Kind.ANY) { + throw new IllegalArgumentException("Only an 'any' selector can have no key specified"); + } + + this.kind = kind; + this.key = key; + this.values = values; + } + + @Override + public String toString() { + switch (kind) { + case ANY: + return ""; + case EQUALS: + return String.format("%s = %s", key, values.get(0)); + case NOT_EQUALS: + return String.format("%s != %s", key, values.get(0)); + case CONTAINS: + return String.format("%s in (%s)", key, String.join(", ", values)); + case NOT_CONTAINS: + return String.format("%s notin (%s)", key, String.join(", ", values)); + case EXISTS: + return String.format("%s", key); + case NOT_EXISTS: + return String.format("!%s", key); + default: + throw new IllegalStateException("Unknown kind " + kind); + } + } + + public static KubernetesSelector any() { + return new KubernetesSelector(Kind.ANY, null, null); + } + + public static KubernetesSelector equals(String key, String value) { + return new KubernetesSelector(Kind.EQUALS, key, ImmutableList.of(value)); + } + + public static KubernetesSelector notEquals(String key, String value) { + return new KubernetesSelector(Kind.NOT_EQUALS, key, ImmutableList.of(value)); + } + + public static KubernetesSelector contains(String key, List values) { + if (values == null || values.isEmpty()) { + throw new IllegalArgumentException( + "At least one value must be supplied to a 'contains' selector"); + } + + return new KubernetesSelector(Kind.CONTAINS, key, values); + } + + public static KubernetesSelector notContains(String key, List values) { + if (values == null || values.isEmpty()) { + throw new IllegalArgumentException( + "At least one value must be supplied to a 'notcontains' selector"); + } + + return new KubernetesSelector(Kind.NOT_CONTAINS, key, values); + } + + public static KubernetesSelector exists(String key) { + return new KubernetesSelector(Kind.EXISTS, key, null); + } + + public static KubernetesSelector notExists(String key) { + return new KubernetesSelector(Kind.NOT_EXISTS, key, null); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorList.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorList.java new file mode 100644 index 00000000000..8582db1e614 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorList.java @@ -0,0 +1,105 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelector.Kind; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.Data; + +@Data +public class KubernetesSelectorList { + private final List selectors = new ArrayList<>(); + + public KubernetesSelectorList() {} + + private KubernetesSelectorList(List selectors) { + this.selectors.addAll(selectors); + } + + public KubernetesSelectorList(KubernetesSelector... selectors) { + this.selectors.addAll(Arrays.asList(selectors)); + } + + public boolean isNotEmpty() { + return !selectors.isEmpty(); + } + + public KubernetesSelectorList addSelector(KubernetesSelector selector) { + selectors.add(selector); + return this; + } + + public KubernetesSelectorList addSelectors(KubernetesSelectorList selectors) { + this.selectors.addAll(selectors.selectors); + return this; + } + + public boolean isEmpty() { + return selectors.isEmpty(); + } + + @Override + public String toString() { + return selectors.stream().map(KubernetesSelector::toString).collect(Collectors.joining(",")); + } + + public static KubernetesSelectorList fromMatchLabels(Map matchLabels) { + return new KubernetesSelectorList( + matchLabels.entrySet().stream() + .map( + kv -> + new KubernetesSelector( + Kind.EQUALS, kv.getKey(), ImmutableList.of(kv.getValue()))) + .collect(Collectors.toList())); + } + + public static KubernetesSelectorList fromMatchExpressions( + List matchExpressions) { + return new KubernetesSelectorList( + matchExpressions.stream() + .map(KubernetesSelectorList::fromMatchExpression) + .collect(Collectors.toList())); + } + + private static KubernetesSelector fromMatchExpression(MatchExpression matchExpression) { + KubernetesSelector.Kind kind; + switch (matchExpression.getOperator()) { + case In: + kind = KubernetesSelector.Kind.CONTAINS; + break; + case NotIn: + kind = KubernetesSelector.Kind.NOT_CONTAINS; + break; + case Exists: + kind = KubernetesSelector.Kind.EXISTS; + break; + case DoesNotExist: + kind = KubernetesSelector.Kind.NOT_EXISTS; + break; + default: + throw new IllegalArgumentException("Unknown operator: " + matchExpression.getOperator()); + } + + return new KubernetesSelector(kind, matchExpression.getKey(), matchExpression.getValues()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/MatchExpression.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/MatchExpression.java new file mode 100644 index 00000000000..139e02f0612 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/security/MatchExpression.java @@ -0,0 +1,35 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import java.util.List; +import lombok.Data; + +@Data +public class MatchExpression { + String key; + Operator operator; + List values; + + public enum Operator { + In, + NotIn, + Exists, + DoesNotExist + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/KubernetesValidationUtil.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/KubernetesValidationUtil.java new file mode 100644 index 00000000000..3c61cbc5bda --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/KubernetesValidationUtil.java @@ -0,0 +1,112 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class KubernetesValidationUtil { + private static final Logger log = LoggerFactory.getLogger(KubernetesValidationUtil.class); + private final String context; + private final ValidationErrors errors; + + public KubernetesValidationUtil(String context, ValidationErrors errors) { + this.context = context; + this.errors = errors; + } + + private String joinAttributeChain(String... attributes) { + List chain = new ArrayList<>(); + chain.add(context); + Collections.addAll(chain, attributes); + return String.join(".", chain); + } + + public void reject(String errorName, String... attributes) { + String field = joinAttributeChain(attributes); + String error = joinAttributeChain(field, errorName); + errors.reject(field, error); + } + + public boolean validateNotEmpty(String attribute, Object value) { + if (value == null) { + reject("empty", attribute); + return false; + } + + return true; + } + + public boolean validateCredentials( + AccountCredentialsProvider provider, String accountName, KubernetesManifest manifest) { + String namespace = manifest.getNamespace(); + return validateCredentials(provider, accountName, namespace); + } + + public boolean validateCredentials( + AccountCredentialsProvider provider, String accountName, String namespace) { + log.info("Validating credentials for {} {}", accountName, namespace); + if (Strings.isNullOrEmpty(accountName)) { + reject("empty", "account"); + return false; + } + + if (Strings.isNullOrEmpty(namespace)) { + return true; + } + + AccountCredentials credentials = provider.getCredentials(accountName); + if (credentials == null) { + reject("notFound", "account"); + return false; + } + + if (!(credentials.getCredentials() instanceof KubernetesCredentials)) { + reject("wrongVersion", "account"); + return false; + } + + return validateNamespace(namespace, (KubernetesCredentials) credentials.getCredentials()); + } + + protected boolean validateNamespace(String namespace, KubernetesCredentials credentials) { + final List configuredNamespaces = credentials.getNamespaces(); + if (configuredNamespaces != null + && !configuredNamespaces.isEmpty() + && !configuredNamespaces.contains(namespace)) { + reject("wrongNamespace", namespace); + return false; + } + + final List omitNamespaces = credentials.getOmitNamespaces(); + if (omitNamespaces != null && omitNamespaces.contains(namespace)) { + reject("omittedNamespace", namespace); + return false; + } + return true; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/artifact/KubernetesArtifactCleanupValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/artifact/KubernetesArtifactCleanupValidator.java new file mode 100644 index 00000000000..16264b99a4e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/artifact/KubernetesArtifactCleanupValidator.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.artifact; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.CLEANUP_ARTIFACTS; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.artifact.KubernetesCleanupArtifactsDescription; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(CLEANUP_ARTIFACTS) +@Component +public class KubernetesArtifactCleanupValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesCleanupArtifactsDescription description, + ValidationErrors errors) {} +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDeleteManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDeleteManifestValidator.java new file mode 100644 index 00000000000..d22422f6dc8 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDeleteManifestValidator.java @@ -0,0 +1,61 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DELETE_MANIFEST; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeleteManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(DELETE_MANIFEST) +@Component +public class KubernetesDeleteManifestValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesDeleteManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("deleteKubernetesManifest", errors); + List coordinates; + if (description.isDynamic()) { + coordinates = description.getAllCoordinates(); + } else { + coordinates = ImmutableList.of(description.getPointCoordinates()); + } + + for (KubernetesCoordinates coordinate : coordinates) { + if (!util.validateCredentials( + provider, description.getAccount(), coordinate.getNamespace())) { + return; + } + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDeployManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDeployManifestValidator.java new file mode 100644 index 00000000000..42ad890444c --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDeployManifestValidator.java @@ -0,0 +1,61 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DEPLOY_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(DEPLOY_MANIFEST) +@Component +public class KubernetesDeployManifestValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesDeployManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("deployKubernetesManifest", errors); + if (!util.validateNotEmpty("moniker", description)) { + return; + } + + for (KubernetesManifest manifest : description.getManifests()) { + // technically OK - sometimes manifest multi-docs are submitted with trailing `---` entries + if (manifest == null) { + continue; + } + + if (!util.validateCredentials(provider, description.getAccount(), manifest)) { + return; + } + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDisableManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDisableManifestValidator.java new file mode 100644 index 00000000000..70686894ba3 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesDisableManifestValidator.java @@ -0,0 +1,50 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.DISABLE_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesEnableDisableManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(DISABLE_MANIFEST) +@Component +public class KubernetesDisableManifestValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesEnableDisableManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("disableKubernetesManifest", errors); + if (!util.validateCredentials( + provider, description.getAccount(), description.getPointCoordinates().getNamespace())) { + return; + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesEnableManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesEnableManifestValidator.java new file mode 100644 index 00000000000..1e01e46c1b7 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesEnableManifestValidator.java @@ -0,0 +1,48 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.ENABLE_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesEnableDisableManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(ENABLE_MANIFEST) +@Component +public class KubernetesEnableManifestValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesEnableDisableManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("enableKubernetesManifest", errors); + util.validateCredentials( + provider, description.getAccount(), description.getPointCoordinates().getNamespace()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesPatchManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesPatchManifestValidator.java new file mode 100644 index 00000000000..865e7cc35e0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesPatchManifestValidator.java @@ -0,0 +1,62 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.PATCH_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesPatchManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(PATCH_MANIFEST) +@Component +public class KubernetesPatchManifestValidator + extends DescriptionValidator { + + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesPatchManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = new KubernetesValidationUtil("patchKubernetesManifest", errors); + + if (!util.validateNotEmpty("patchBody", description.getPatchBody())) { + return; + } + + if (!util.validateNotEmpty( + "options.mergeStrategy", description.getOptions().getMergeStrategy())) { + return; + } + + if (!util.validateNotEmpty("options.record", description.getOptions().isRecord())) { + return; + } + + util.validateCredentials( + provider, description.getAccount(), description.getPointCoordinates().getNamespace()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesPauseRolloutManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesPauseRolloutManifestValidator.java new file mode 100644 index 00000000000..bef8fb0f50e --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesPauseRolloutManifestValidator.java @@ -0,0 +1,48 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.PAUSE_ROLLOUT_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesPauseRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(PAUSE_ROLLOUT_MANIFEST) +@Component +public class KubernetesPauseRolloutManifestValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesPauseRolloutManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("pauseRolloutKubernetesManifest", errors); + util.validateCredentials( + provider, description.getAccount(), description.getPointCoordinates().getNamespace()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesResumeRolloutManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesResumeRolloutManifestValidator.java new file mode 100644 index 00000000000..1e741131950 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesResumeRolloutManifestValidator.java @@ -0,0 +1,48 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RESUME_ROLLOUT_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesResumeRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(RESUME_ROLLOUT_MANIFEST) +@Component +public class KubernetesResumeRolloutManifestValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesResumeRolloutManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("resumeRolloutKubernetesManifest", errors); + util.validateCredentials( + provider, description.getAccount(), description.getPointCoordinates().getNamespace()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesRollingRestartManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesRollingRestartManifestValidator.java new file mode 100644 index 00000000000..76271ed679a --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesRollingRestartManifestValidator.java @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.ROLLING_RESTART_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesRollingRestartManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(ROLLING_RESTART_MANIFEST) +@Component +public class KubernetesRollingRestartManifestValidator + extends DescriptionValidator { + private final AccountCredentialsProvider provider; + + @Autowired + public KubernetesRollingRestartManifestValidator(AccountCredentialsProvider provider) { + this.provider = provider; + } + + @Override + public void validate( + List priorDescriptions, + KubernetesRollingRestartManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("rollingRestartKubernetesManifest", errors); + util.validateCredentials( + provider, description.getAccount(), description.getPointCoordinates().getNamespace()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesScaleManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesScaleManifestValidator.java new file mode 100644 index 00000000000..27be4086c44 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesScaleManifestValidator.java @@ -0,0 +1,47 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.SCALE_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesScaleManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(SCALE_MANIFEST) +@Component +public class KubernetesScaleManifestValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesScaleManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = new KubernetesValidationUtil("scaleKubernetesManifest", errors); + util.validateCredentials( + provider, description.getAccount(), description.getPointCoordinates().getNamespace()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesUndoRolloutManifestValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesUndoRolloutManifestValidator.java new file mode 100644 index 00000000000..0d439e380ea --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/manifest/KubernetesUndoRolloutManifestValidator.java @@ -0,0 +1,54 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.manifest; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.UNDO_ROLLOUT_MANIFEST; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesUndoRolloutManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(UNDO_ROLLOUT_MANIFEST) +@Component +public class KubernetesUndoRolloutManifestValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesUndoRolloutManifestDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("undoRolloutKubernetesManifest", errors); + if (!util.validateCredentials( + provider, description.getAccount(), description.getPointCoordinates().getNamespace())) { + return; + } + + if (description.getNumRevisionsBack() == null && description.getRevision() == null) { + util.reject("empty", "numRevisionsBack & revision"); + } + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/servergroup/KubernetesResizeServerGroupValidator.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/servergroup/KubernetesResizeServerGroupValidator.java new file mode 100644 index 00000000000..8b013408606 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/clouddriver/kubernetes/validator/servergroup/KubernetesResizeServerGroupValidator.java @@ -0,0 +1,52 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator.servergroup; + +import static com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations.RESIZE_SERVER_GROUP; + +import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator; +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.description.servergroup.KubernetesResizeServerGroupDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.validator.KubernetesValidationUtil; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@KubernetesOperation(RESIZE_SERVER_GROUP) +@Component +public class KubernetesResizeServerGroupValidator + extends DescriptionValidator { + @Autowired AccountCredentialsProvider provider; + + @Override + public void validate( + List priorDescriptions, + KubernetesResizeServerGroupDescription description, + ValidationErrors errors) { + KubernetesValidationUtil util = + new KubernetesValidationUtil("deployKubernetesManifest", errors); + if (!util.validateCredentials( + provider, description.getAccount(), description.getCoordinates().getNamespace())) { + return; + } + + util.validateNotEmpty("capacity", description.getCapacity()); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesAccountDefinitionSourceConfiguration.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesAccountDefinitionSourceConfiguration.java new file mode 100644 index 00000000000..74e5a17a1d0 --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesAccountDefinitionSourceConfiguration.java @@ -0,0 +1,46 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionRepository; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionSource; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource; +import java.util.List; +import java.util.Optional; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; + +@Configuration +@ConditionalOnProperty({"account.storage.enabled", "account.storage.kubernetes.enabled"}) +public class KubernetesAccountDefinitionSourceConfiguration { + @Bean + @Primary + public CredentialsDefinitionSource + kubernetesAccountSource( + AccountDefinitionRepository repository, + Optional>> + additionalSources, + KubernetesAccountProperties accountProperties) { + return new AccountDefinitionSource<>( + repository, + KubernetesAccountProperties.ManagedAccount.class, + additionalSources.orElseGet(() -> List.of(accountProperties::getAccounts))); + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesConfiguration.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesConfiguration.java new file mode 100644 index 00000000000..c030ce440fc --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesConfiguration.java @@ -0,0 +1,127 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.config; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.health.KubernetesHealthIndicator; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentialsParser; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.credentials.definition.AbstractCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.BasicCredentialsLoader; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinitionSource; +import com.netflix.spinnaker.credentials.definition.CredentialsParser; +import com.netflix.spinnaker.credentials.poller.Poller; +import javax.annotation.Nullable; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.cloud.context.config.annotation.RefreshScope; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.scheduling.annotation.EnableScheduling; + +@Configuration +@EnableConfigurationProperties +@EnableScheduling +@ConditionalOnProperty("kubernetes.enabled") +@ComponentScan({"com.netflix.spinnaker.clouddriver.kubernetes"}) +public class KubernetesConfiguration { + @Bean + @RefreshScope + @ConfigurationProperties("kubernetes") + public KubernetesConfigurationProperties kubernetesConfigurationProperties() { + return new KubernetesConfigurationProperties(); + } + + @Bean + @RefreshScope + @ConfigurationProperties("kubernetes") + public KubernetesAccountProperties kubernetesAccountProperties() { + return new KubernetesAccountProperties(); + } + + @Bean + public KubernetesHealthIndicator kubernetesHealthIndicator( + Registry registry, + CredentialsRepository credentialsRepository, + KubernetesConfigurationProperties kubernetesConfigurationProperties) { + return new KubernetesHealthIndicator( + registry, credentialsRepository, kubernetesConfigurationProperties); + } + + @Bean + public KubernetesProvider kubernetesProvider() { + return new KubernetesProvider(); + } + + @Bean + public CredentialsParser + kubernetesCredentialsParser(KubernetesCredentials.Factory credentialFactory) { + return new KubernetesCredentialsParser(credentialFactory); + } + + @Bean + @ConditionalOnMissingBean( + value = KubernetesNamedAccountCredentials.class, + parameterizedContainer = AbstractCredentialsLoader.class) + public AbstractCredentialsLoader kubernetesCredentialsLoader( + @Nullable CredentialsDefinitionSource kubernetesCredentialSource, + KubernetesAccountProperties accountProperties, + CredentialsParser credentialsParser, + CredentialsRepository kubernetesCredentialsRepository) { + + if (kubernetesCredentialSource == null) { + kubernetesCredentialSource = accountProperties::getAccounts; + } + return new BasicCredentialsLoader<>( + kubernetesCredentialSource, credentialsParser, kubernetesCredentialsRepository); + } + + @Bean + @ConditionalOnMissingBean( + value = KubernetesNamedAccountCredentials.class, + parameterizedContainer = CredentialsRepository.class) + public CredentialsRepository kubernetesCredentialsRepository( + CredentialsLifecycleHandler eventHandler) { + return new MapBackedCredentialsRepository<>(KubernetesProvider.PROVIDER_NAME, eventHandler); + } + + @Bean + @ConditionalOnMissingBean( + value = ManagedAccount.class, + parameterizedContainer = CredentialsDefinitionSource.class) + public CredentialsInitializerSynchronizable kubernetesCredentialsInitializerSynchronizable( + AbstractCredentialsLoader loader) { + final Poller poller = new Poller<>(loader); + return new CredentialsInitializerSynchronizable() { + @Override + public void synchronize() { + poller.run(); + } + }; + } +} diff --git a/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesCustomBinderConfiguration.java b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesCustomBinderConfiguration.java new file mode 100644 index 00000000000..4d1daa5f2cf --- /dev/null +++ b/clouddriver-kubernetes/src/main/java/com/netflix/spinnaker/config/KubernetesCustomBinderConfiguration.java @@ -0,0 +1,49 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesCustomAccountConfigurationProvider; +import com.netflix.spinnaker.kork.configserver.CloudConfigResourceService; +import com.netflix.spinnaker.kork.secrets.SecretManager; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.cloud.context.config.annotation.RefreshScope; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConditionalOnProperty({"kubernetes.enabled", "kubernetes.custom-property-binding-enabled"}) +public class KubernetesCustomBinderConfiguration { + + @Bean + public KubernetesCustomAccountConfigurationProvider kubernetesCustomAccountConfigurationProvider( + ConfigurableApplicationContext context, + CloudConfigResourceService configResourceService, + SecretManager secretManager) { + return new KubernetesCustomAccountConfigurationProvider( + context, configResourceService, secretManager); + } + + @Bean + @RefreshScope + public KubernetesAccountProperties kubernetesAccountProperties( + KubernetesCustomAccountConfigurationProvider kubernetesCustomAccountConfigurationProvider) { + return kubernetesCustomAccountConfigurationProvider.getConfigurationProperties(); + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KeysSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KeysSpec.groovy new file mode 100644 index 00000000000..58ec3583460 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/KeysSpec.groovy @@ -0,0 +1,191 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import spock.lang.Specification +import spock.lang.Unroll + +/** + * WARNING: if you're modifying these tests due to a key format change, you're likely + * breaking all user's infrastructure caches. if this is intentional, keep in mind + * that every user will have to flush redis to get clouddriver to run correctly + */ +class KeysSpec extends Specification { + @Unroll + def "produces correct app keys #key"() { + expect: + Keys.ApplicationCacheKey.createKey(application) == key + + where: + application || key + "app" || "kubernetes.v2:logical:applications:app" + "" || "kubernetes.v2:logical:applications:" + } + + @Unroll + def "produces correct cluster keys #key"() { + expect: + Keys.ClusterCacheKey.createKey(account, application, cluster) == key + + where: + account | application | cluster || key + "ac" | "app" | "cluster" || "kubernetes.v2:logical:clusters:ac:app:cluster" + "" | "" | "" || "kubernetes.v2:logical:clusters:::" + } + + @Unroll + def "produces correct infra keys #key"() { + expect: + Keys.InfrastructureCacheKey.createKey(kind, account, namespace, name) == key + + where: + kind | apiVersion | account | namespace | name || key + KubernetesKind.REPLICA_SET | KubernetesApiVersion.APPS_V1 | "ac" | "namespace" | "v1-v000" || "kubernetes.v2:infrastructure:replicaSet:ac:namespace:v1-v000" + KubernetesKind.SERVICE | KubernetesApiVersion.V1 | "ac" | "namespace" | "v1" || "kubernetes.v2:infrastructure:service:ac:namespace:v1" + KubernetesKind.DEPLOYMENT | KubernetesApiVersion.APPS_V1 | "ac" | "namespace" | "v1" || "kubernetes.v2:infrastructure:deployment:ac:namespace:v1" + } + + @Unroll + def "unpacks application key for #name"() { + when: + def key = "kubernetes.v2:logical:applications:$name" + def parsed = Keys.parseKey(key).get() + + then: + parsed instanceof Keys.ApplicationCacheKey + def parsedApplicationKey = (Keys.ApplicationCacheKey) parsed + parsedApplicationKey.name == name + + where: + name | unused + "app" | "" + "" | "" + } + + @Unroll + def "unpacks cluster key for '#name' and '#account'"() { + when: + def key = "kubernetes.v2:logical:clusters:$account:$application:$name" + def parsed = Keys.parseKey(key).get() + + then: + parsed instanceof Keys.ClusterCacheKey + def parsedClusterKey = (Keys.ClusterCacheKey) parsed + parsedClusterKey.account == account + parsedClusterKey.application == application + parsedClusterKey.name == name + + where: + account | application | name + "ac" | "" | "name" + "" | "asdf" | "sdf" + "ac" | "ll" | "" + "" | "" | "" + } + + @Unroll + def "unpacks infrastructure key for '#kind' and '#version'"() { + when: + def key = "kubernetes.v2:infrastructure:$kind:$account:$namespace:$name" + def parsed = Keys.parseKey(key).get() + + then: + parsed instanceof Keys.InfrastructureCacheKey + def parsedInfrastructureKey = (Keys.InfrastructureCacheKey) parsed + parsedInfrastructureKey.kubernetesKind == kind + parsedInfrastructureKey.account == account + parsedInfrastructureKey.namespace == namespace + parsedInfrastructureKey.name == name + + where: + kind | version | account | namespace | name + KubernetesKind.DEPLOYMENT | KubernetesApiVersion.APPS_V1 | "ac" | "name" | "nameer" + KubernetesKind.REPLICA_SET | KubernetesApiVersion.APPS_V1 | "" | "" | "" + KubernetesKind.SERVICE | KubernetesApiVersion.V1 | "account" | "namespace" | "" + KubernetesKind.INGRESS | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "ac" | "" | "nameer" + KubernetesKind.INGRESS | KubernetesApiVersion.NETWORKING_K8S_IO_V1 | "ac" | "" | "nameer" + } + + def "correctly unpacks resource names containing a ';' character"() { + when: + def key = "kubernetes.v2:infrastructure:clusterRole:k8s::system;controller;resourcequota-controller" + def parsed = Keys.parseKey(key).get() + + then: + parsed instanceof Keys.InfrastructureCacheKey + def parsedInfrastructureKey = (Keys.InfrastructureCacheKey) parsed + parsedInfrastructureKey.kubernetesKind == KubernetesKind.CLUSTER_ROLE + parsedInfrastructureKey.account == "k8s" + parsedInfrastructureKey.namespace == "" + parsedInfrastructureKey.name == "system:controller:resourcequota-controller" + } + + @Unroll + def "Kind fromString returns the correct kind"() { + expect: + result == Keys.Kind.fromString(input) + + where: + input | result + "logical" | Keys.Kind.LOGICAL + "LOGICAL" | Keys.Kind.LOGICAL + "lOgiCAl" | Keys.Kind.LOGICAL + "artifacT" | Keys.Kind.ARTIFACT + "InfraStructurE" | Keys.Kind.INFRASTRUCTURE + } + + @Unroll + def "Kind toString correctly serializes the kind to lowercase"() { + expect: + result == input.toString() + + where: + input | result + Keys.Kind.LOGICAL | "logical" + Keys.Kind.ARTIFACT | "artifact" + Keys.Kind.INFRASTRUCTURE | "infrastructure" + } + + @Unroll + def "LogicalKind toString returns the correct string"() { + expect: + result == Keys.LogicalKind.fromString(input) + + where: + input | result + "applications" | Keys.LogicalKind.APPLICATIONS + "APPLICATIONS" | Keys.LogicalKind.APPLICATIONS + "appliCatiOns" | Keys.LogicalKind.APPLICATIONS + "clusters" | Keys.LogicalKind.CLUSTERS + "CLUSTERS" | Keys.LogicalKind.CLUSTERS + "clUsTerS" | Keys.LogicalKind.CLUSTERS + } + + @Unroll + def "LogicalKind toString correctly serializes the logical kind to lowercase"() { + expect: + result == input.toString() + + where: + input | result + Keys.LogicalKind.APPLICATIONS | "applications" + Keys.LogicalKind.CLUSTERS | "clusters" + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConvertSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConvertSpec.groovy new file mode 100644 index 00000000000..00deacb3a05 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConvertSpec.groovy @@ -0,0 +1,206 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent + +import com.fasterxml.jackson.databind.ObjectMapper +import com.google.common.collect.ImmutableList +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiGroup +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesManifestNamer +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesDeploymentHandler +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesReplicaSetHandler +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesServiceHandler +import com.netflix.spinnaker.clouddriver.names.NamerRegistry +import com.netflix.spinnaker.moniker.Moniker +import org.apache.commons.lang3.tuple.Pair +import org.yaml.snakeyaml.Yaml +import org.yaml.snakeyaml.constructor.SafeConstructor +import spock.lang.Specification +import spock.lang.Unroll + +class KubernetesCacheDataConvertSpec extends Specification { + def mapper = new ObjectMapper() + def yaml = new Yaml(new SafeConstructor()) + + KubernetesManifest stringToManifest(String input) { + return mapper.convertValue(yaml.load(input), KubernetesManifest.class) + } + + @Unroll + def "given a correctly annotated manifest, build attributes & infer relationships"() { + setup: + def rawManifest = """ +apiVersion: $apiVersion +kind: $kind +metadata: + name: $name + namespace: $namespace +""" + def moniker = Moniker.builder() + .app(application) + .cluster(cluster) + .build() + + if (account != null) { + NamerRegistry.lookup() + .withProvider(KubernetesCloudProvider.ID) + .withAccount(account) + .setNamer(KubernetesManifest, new KubernetesManifestNamer()) + } + + def manifest = stringToManifest(rawManifest) + KubernetesManifestAnnotater.annotateManifest(manifest, moniker) + + when: + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData() + KubernetesCacheDataConverter.convertAsResource( + kubernetesCacheData, + account, + new KubernetesSpinnakerKindMap(ImmutableList.of(new KubernetesDeploymentHandler(), new KubernetesReplicaSetHandler(), new KubernetesServiceHandler())), + new KubernetesManifestNamer(), + manifest, + [], + false) + def optional = kubernetesCacheData.toCacheData().stream().filter({ + cd -> cd.id == Keys.InfrastructureCacheKey.createKey(kind, account, namespace, name) + }).findFirst() + + then: + if (application == null) { + true + } else { + optional.isPresent() + def cacheData = optional.get() + cacheData.relationships.get(Keys.LogicalKind.APPLICATIONS.toString()) == [Keys.ApplicationCacheKey.createKey(application)] + if (cluster) { + cacheData.relationships.get(Keys.LogicalKind.CLUSTERS.toString()) == [Keys.ClusterCacheKey.createKey(account, application, cluster)] + } else { + cacheData.relationships.get(Keys.LogicalKind.CLUSTERS.toString()) == null + } + cacheData.attributes.get("name") == name + cacheData.attributes.get("namespace") == namespace + cacheData.attributes.get("kind") == kind + cacheData.id == Keys.InfrastructureCacheKey.createKey(kind, account, namespace, name) + } + + where: + kind | apiVersion | account | application | cluster | namespace | name + KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "one-app" | "the-cluster" | "some-namespace" | "a-name-v000" + KubernetesKind.DEPLOYMENT | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "one-app" | "the-cluster" | "some-namespace" | "a-name" + KubernetesKind.SERVICE | KubernetesApiVersion.V1 | "another-account" | "your-app" | null | "some-namespace" | "what-name" + } + + @Unroll + void "given an unclassified resource, application relationships are only cached if `cacheAllRelationships` is set: #cacheAllRelationships"() { + setup: + def apiGroup = "any.resource.com" + def kind = "MyCRD" + def qualifiedKind = KubernetesKind.from("MyCRD", KubernetesApiGroup.fromString(apiGroup)) + def name = "my-crd" + def namespace = "my-namespace" + def application = "one-app" + def cluster = "the-cluster" + def account = "my-account" + def rawManifest = """ +apiVersion: ${apiGroup}/v1 +kind: $kind +metadata: + name: $name + namespace: $namespace +""" + + def moniker = Moniker.builder() + .app(application) + .cluster(cluster) + .build() + + NamerRegistry.lookup() + .withProvider(KubernetesCloudProvider.ID) + .withAccount(account) + .setNamer(KubernetesManifest, new KubernetesManifestNamer()) + + def manifest = stringToManifest(rawManifest) + KubernetesManifestAnnotater.annotateManifest(manifest, moniker) + + when: + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData() + KubernetesCacheDataConverter.convertAsResource( + kubernetesCacheData, + account, + new KubernetesSpinnakerKindMap(ImmutableList.of(new KubernetesDeploymentHandler(), new KubernetesReplicaSetHandler(), new KubernetesServiceHandler())), + new KubernetesManifestNamer(), + manifest, + [], + cacheAllRelationships + ) + def optional = kubernetesCacheData.toCacheData().stream().filter({ + cd -> cd.id == Keys.InfrastructureCacheKey.createKey(qualifiedKind, account, namespace, name) + }).findFirst() + + then: + optional.isPresent() + def relationships = optional.get().relationships + def applicationRelationships = relationships.get(Keys.LogicalKind.APPLICATIONS.toString()) + applicationRelationships.equals(cacheAllRelationships ? [Keys.ApplicationCacheKey.createKey(application)].toSet() : null); + + where: + cacheAllRelationships << [false, true] + } + + @Unroll + def "given a single owner reference, correctly build relationships"() { + setup: + def ownerRefs = [new KubernetesManifest.OwnerReference(kind: kind, apiVersion: apiVersion, name: name)] + + when: + def result = KubernetesCacheDataConverter.ownerReferenceRelationships(account, namespace, ownerRefs) + + then: + result.contains(new Keys.InfrastructureCacheKey(kind, account, namespace, name)) + + where: + kind | apiVersion | account | cluster | namespace | name + KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "another-clu" | "some-namespace" | "a-name-v000" + KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "the-cluster" | "some-namespace" | "a-name-v000" + KubernetesKind.DEPLOYMENT | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "the-cluster" | "some-namespace" | "a-name" + KubernetesKind.SERVICE | KubernetesApiVersion.V1 | "another-account" | "cluster" | "some-namespace" | "what-name" + } + + def containerMetric(String containerName) { + return new KubernetesPodMetric.ContainerMetric(containerName, [ + "CPU(cores)": "10m", + "MEMORY(bytes)": "2Mi" + ]) + } + + def filterRelationships(Collection keys, List> existingResources) { + return keys.findAll { sk -> + def key = (Keys.InfrastructureCacheKey) Keys.parseKey(sk).get() + return existingResources.find { Pair lb -> + return lb.getLeft() == key.getKubernetesKind() && lb.getRight() == key.getName() + } != null + } + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataSpec.groovy new file mode 100644 index 00000000000..6fe28dd8174 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataSpec.groovy @@ -0,0 +1,170 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent + +import com.google.common.collect.ImmutableMap +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import spock.lang.Specification + +class KubernetesCacheDataSpec extends Specification { + private static final String ACCOUNT = "my-account" + private static final String NAMESPACE = "my-namespace" + private static final Keys.CacheKey REPLICA_SET_KEY = new Keys.InfrastructureCacheKey(KubernetesKind.REPLICA_SET, ACCOUNT, NAMESPACE, "testing") + private static final Keys.CacheKey OTHER_REPLICA_SET_KEY = new Keys.InfrastructureCacheKey(KubernetesKind.REPLICA_SET, ACCOUNT, NAMESPACE, "other-key") + private static final Keys.CacheKey APPLICATION_KEY = new Keys.ApplicationCacheKey("app") + + def "returns an empty collection when no entries are added"() { + given: + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData() + + when: + Collection cacheData = kubernetesCacheData.toCacheData() + + then: + cacheData.isEmpty() + } + + def "correctly caches a single item"() { + given: + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData() + Map attributes = new ImmutableMap.Builder().put("key", "value").build(); + + when: + kubernetesCacheData.addItem(REPLICA_SET_KEY, attributes) + Collection cacheData = kubernetesCacheData.toCacheData() + + then: + cacheData.size() == 1 + + def optionalData = cacheData.stream().filter({cd -> cd.id == REPLICA_SET_KEY.toString()}).findFirst() + optionalData.isPresent() + optionalData.get().attributes == attributes + + // Ensure that we have explicitly added an empty list of relationships for "sticky" kinds + def relationships = optionalData.get().getRelationships() + KubernetesCacheDataConverter.getStickyKinds().forEach({kind -> + relationships.get(kind.toString()) == [] + }) + } + + def "correctly merges new attributes when adding the same key twice"() { + given: + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData() + Map oldAttributes = new ImmutableMap.Builder() + .put("key1", "oldvalue1") + .put("key2", "oldvalue2") + .build(); + Map newAttributes = new ImmutableMap.Builder() + .put("key2", "newvalue2") + .put("key3", "newvalue3") + .build(); + + when: + kubernetesCacheData.addItem(REPLICA_SET_KEY, oldAttributes) + kubernetesCacheData.addItem(REPLICA_SET_KEY, newAttributes) + Collection cacheData = kubernetesCacheData.toCacheData() + + then: + cacheData.size() == 1 + + def optionalData = cacheData.stream().filter({cd -> cd.id == REPLICA_SET_KEY.toString()}).findFirst() + optionalData.isPresent() + optionalData.get().attributes == [ + "key1" : "oldvalue1", + "key2" : "newvalue2", + "key3" : "newvalue3" + ] + } + + def "correctly creates a bidirectional application relationship"() { + given: + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData() + Map attributes = new ImmutableMap.Builder().put("key", "value").build(); + + when: + kubernetesCacheData.addItem(REPLICA_SET_KEY, attributes) + kubernetesCacheData.addRelationship(REPLICA_SET_KEY, APPLICATION_KEY) + Collection cacheData = kubernetesCacheData.toCacheData() + + then: + cacheData.size() == 2 + + def replicaSet = cacheData.stream().filter({cd -> cd.id == REPLICA_SET_KEY.toString()}).findFirst().get() + replicaSet.attributes == attributes + def replicaSetRelationships = replicaSet.relationships.get("applications") as Collection + replicaSetRelationships.size() == 1 + replicaSetRelationships.contains(APPLICATION_KEY.toString()) + + def application = cacheData.stream().filter({cd -> cd.id == APPLICATION_KEY.toString()}).findFirst().get() + // Ensure that the default "name" key was added to the logical key + application.attributes.get("name") == "app" + def applicationRelationships = application.relationships.get("replicaSet") as Collection + applicationRelationships.size() == 1 + applicationRelationships.contains(REPLICA_SET_KEY.toString()) + } + + def "correctly groups cache data items"() { + given: + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData() + Map attributes = new ImmutableMap.Builder().put("key", "value").build(); + Map otherAttributes = new ImmutableMap.Builder().put("otherKey", "otherValue").build(); + + when: + kubernetesCacheData.addItem(REPLICA_SET_KEY, attributes) + kubernetesCacheData.addItem(OTHER_REPLICA_SET_KEY, otherAttributes) + kubernetesCacheData.addRelationship(REPLICA_SET_KEY, APPLICATION_KEY) + Map> cacheData = kubernetesCacheData.toStratifiedCacheData() + def replicaSetData = cacheData.get(REPLICA_SET_KEY.getGroup()) + def applicationData = cacheData.get(APPLICATION_KEY.getGroup()) + + then: + replicaSetData.size() == 2 + def replicaSet = replicaSetData.stream().filter({cd -> cd.id == REPLICA_SET_KEY.toString()}).findFirst().get() + replicaSet.attributes == attributes + def otherReplicaSet = replicaSetData.stream().filter({cd -> cd.id == OTHER_REPLICA_SET_KEY.toString()}).findFirst().get() + otherReplicaSet.attributes == otherAttributes + + def application = applicationData.stream().filter({cd -> cd.id == APPLICATION_KEY.toString()}).findFirst().get() + application.attributes.get("name") == "app" + } + + def "omits infrastructure keys without attribtues from returned cache data"() { + given: + KubernetesCacheData kubernetesCacheData = new KubernetesCacheData() + Map attributes = new ImmutableMap.Builder().put("key", "value").build(); + Map emptyAttributes = new ImmutableMap.Builder().build(); + + when: + kubernetesCacheData.addItem(REPLICA_SET_KEY, attributes) + kubernetesCacheData.addItem(OTHER_REPLICA_SET_KEY, emptyAttributes) + Collection cacheData = kubernetesCacheData.toCacheData() + Map> stratifiedCacheData = kubernetesCacheData.toStratifiedCacheData() + + then: + cacheData.size() == 1 + def replicaSet = cacheData.stream().filter({cd -> cd.id == REPLICA_SET_KEY.toString()}).findFirst().get() + replicaSet.attributes == attributes + + def replicaSetData = stratifiedCacheData.get(REPLICA_SET_KEY.getGroup()) + replicaSetData.size() == 1 + def groupedReplicaSet = replicaSetData.stream().filter({cd -> cd.id == REPLICA_SET_KEY.toString()}).findFirst().get() + groupedReplicaSet.attributes == attributes + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesReplicaSetCachingAgentSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesReplicaSetCachingAgentSpec.groovy similarity index 89% rename from clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesReplicaSetCachingAgentSpec.groovy rename to clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesReplicaSetCachingAgentSpec.groovy index 2b1d0233916..ca7fc62830d 100644 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesReplicaSetCachingAgentSpec.groovy +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesReplicaSetCachingAgentSpec.groovy @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +15,12 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials import spock.lang.Specification import spock.lang.Unroll @@ -34,7 +34,7 @@ class KubernetesReplicaSetCachingAgentSpec extends Specification { @Unroll void "merges two cache data"() { when: - def credentials = Mock(KubernetesV2Credentials) + def credentials = Mock(KubernetesCredentials) credentials.getDeclaredNamespaces() >> [NAMESPACE] def namedAccountCredentials = Mock(KubernetesNamedAccountCredentials) diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesAccountResolverSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesAccountResolverSpec.groovy new file mode 100644 index 00000000000..ea367788069 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesAccountResolverSpec.groovy @@ -0,0 +1,98 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider + + +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.credentials.CredentialsRepository +import spock.lang.Specification + +class KubernetesAccountResolverSpec extends Specification { + String ACCOUNT_NAME = "test" + CredentialsRepository credentialsRepository = Mock(CredentialsRepository) + ResourcePropertyRegistry globalResourcePropertyRegistry = Mock(GlobalResourcePropertyRegistry) + + void "returns an account in the repository if and only if it is a kubernetes account"() { + given: + KubernetesAccountResolver accountResolver = new KubernetesAccountResolver(credentialsRepository, globalResourcePropertyRegistry) + KubernetesCredentials kubernetesCredentials = Mock(KubernetesCredentials) + Optional credentials + + when: + credentials = accountResolver.getCredentials(ACCOUNT_NAME) + + then: + 1 * credentialsRepository.getOne(ACCOUNT_NAME) >> Mock(KubernetesNamedAccountCredentials) { + getCredentials() >> kubernetesCredentials + } + credentials.isPresent() + credentials.get() == kubernetesCredentials + + when: + credentials = accountResolver.getCredentials(ACCOUNT_NAME) + + then: + 1 * credentialsRepository.getOne(ACCOUNT_NAME) >> Mock(KubernetesNamedAccountCredentials) + !credentials.isPresent() + + when: + credentials = accountResolver.getCredentials(ACCOUNT_NAME) + + then: + 1 * credentialsRepository.getOne(ACCOUNT_NAME) >> null + !credentials.isPresent() + } + + void "returns the account's property registry, falling back to the global registry"() { + given: + KubernetesAccountResolver accountResolver = new KubernetesAccountResolver(credentialsRepository, globalResourcePropertyRegistry) + ResourcePropertyRegistry resourcePropertyRegistry = Mock(ResourcePropertyRegistry) + ResourcePropertyRegistry registry + + when: + registry = accountResolver.getResourcePropertyRegistry(ACCOUNT_NAME) + + then: + 1 * credentialsRepository.getOne(ACCOUNT_NAME) >> Mock(KubernetesNamedAccountCredentials) { + getCredentials() >> Mock(KubernetesCredentials) { + getResourcePropertyRegistry() >> resourcePropertyRegistry + } + } + registry == resourcePropertyRegistry + + when: + registry = accountResolver.getResourcePropertyRegistry(ACCOUNT_NAME) + + then: + 1 * credentialsRepository.getOne(ACCOUNT_NAME) >> Mock(KubernetesNamedAccountCredentials) { + getCredentials() >> Mock(KubernetesCredentials) + } + registry == globalResourcePropertyRegistry + + when: + registry = accountResolver.getResourcePropertyRegistry(ACCOUNT_NAME) + + then: + 1 * credentialsRepository.getOne(ACCOUNT_NAME) >> null + registry == globalResourcePropertyRegistry + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/AccountResourcePropertyRegistrySpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/AccountResourcePropertyRegistrySpec.groovy new file mode 100644 index 00000000000..538e0f0a125 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/AccountResourcePropertyRegistrySpec.groovy @@ -0,0 +1,68 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description + +import com.google.common.collect.ImmutableList +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesReplicaSetHandler +import spock.lang.Specification + +class AccountResourcePropertyRegistrySpec extends Specification { + void "returns account-specific properties when defined"() { + given: + def replicaSetProperties = new KubernetesResourceProperties(new KubernetesReplicaSetHandler(), true) + def globalResourcePropertyRegistry = Mock(GlobalResourcePropertyRegistry) { + values() >> ImmutableList.of() + get(_ as KubernetesKind) >> KubernetesKind.NONE + } + def factory = new AccountResourcePropertyRegistry.Factory(globalResourcePropertyRegistry) + + when: + AccountResourcePropertyRegistry registry = factory.create([]) + + then: + registry instanceof AccountResourcePropertyRegistry + registry.values().isEmpty() + + when: + registry = factory.create([replicaSetProperties]) + + then: + registry.values().size() == 1 + registry.get(KubernetesKind.REPLICA_SET) == replicaSetProperties + } + + void "returns global properties when account-specific properties are not defined"() { + given: + def properties = new KubernetesResourceProperties(Mock(KubernetesHandler), true) + def globalResourcePropertyRegistry = Mock(GlobalResourcePropertyRegistry) { + values() >> ImmutableList.of(properties) + get(KubernetesKind.DEPLOYMENT) >> properties + } + def factory = new AccountResourcePropertyRegistry.Factory(globalResourcePropertyRegistry) + + when: + AccountResourcePropertyRegistry registry = factory.create([]) + + then: + registry instanceof AccountResourcePropertyRegistry + registry.values().size() == 1 + registry.get(KubernetesKind.DEPLOYMENT) == properties + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/GlobalResourcePropertyRegistrySpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/GlobalResourcePropertyRegistrySpec.groovy new file mode 100644 index 00000000000..0fe03471ca4 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/GlobalResourcePropertyRegistrySpec.groovy @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description + +import com.google.common.collect.ImmutableList +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiGroup +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesCustomResourceHandler +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesReplicaSetHandler +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesUnregisteredCustomResourceHandler +import spock.lang.Specification + +class GlobalResourcePropertyRegistrySpec extends Specification { + KubernetesUnregisteredCustomResourceHandler defaultHandler = new KubernetesUnregisteredCustomResourceHandler() + void "creates an empty resource map"() { + given: + def replicaSetHandler = new KubernetesReplicaSetHandler() + + when: + GlobalResourcePropertyRegistry registry = new GlobalResourcePropertyRegistry(ImmutableList.of(), defaultHandler) + + then: + registry instanceof GlobalResourcePropertyRegistry + registry.values().isEmpty() + + when: + registry = new GlobalResourcePropertyRegistry([replicaSetHandler], defaultHandler) + + then: + registry.values().size() == 1 + registry.get(KubernetesKind.REPLICA_SET).getHandler() == replicaSetHandler + registry.get(KubernetesKind.REPLICA_SET).isVersioned() == replicaSetHandler.versioned() + } + + void "defaults to the default handler if no handler is specified"() { + when: + GlobalResourcePropertyRegistry registry = new GlobalResourcePropertyRegistry([], defaultHandler) + + then: + registry.values().isEmpty() + registry.get(KubernetesKind.REPLICA_SET).getHandler() == defaultHandler + registry.get(KubernetesKind.REPLICA_SET).isVersioned() == defaultHandler.versioned() + } + + void "registers handlers passed to the constructor"() { + given: + def unregisteredHandler = new KubernetesUnregisteredCustomResourceHandler() + def replicaSetHandler = new KubernetesReplicaSetHandler() + + when: + GlobalResourcePropertyRegistry registry = new GlobalResourcePropertyRegistry([unregisteredHandler, replicaSetHandler], defaultHandler) + + then: + registry.values().size() == 2 + registry.get(KubernetesKind.NONE).getHandler() == unregisteredHandler + registry.get(KubernetesKind.REPLICA_SET).getHandler() == replicaSetHandler + } + + void "is aware of custom resources only after updateCrdProperties has been called"() { + given: + KubernetesKind customResource = KubernetesKind.from("MyCRD", KubernetesApiGroup.fromString("foo.com")) // arbitrary custom/non-native kind + KubernetesHandler customResourceHandler = new KubernetesCustomResourceHandler(customResource) + + when: + GlobalResourcePropertyRegistry registry = new GlobalResourcePropertyRegistry([], defaultHandler) + + then: + registry.get(customResource).getHandler() == defaultHandler + + when: + registry.updateCrdProperties([customResourceHandler]) + + then: + registry.get(customResource).getHandler() == customResourceHandler + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesApiGroupSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesApiGroupSpec.groovy new file mode 100644 index 00000000000..dd51f8c3b60 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesApiGroupSpec.groovy @@ -0,0 +1,73 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiGroup +import spock.lang.Specification +import spock.lang.Unroll + +class KubernetesApiGroupSpec extends Specification { + @Unroll + void "creates built-in API groups by name"() { + when: + def apiGroup = KubernetesApiGroup.fromString(name) + + then: + apiGroup.equals(expectedApiGroup) + + where: + name | expectedApiGroup + null | KubernetesApiGroup.NONE + "" | KubernetesApiGroup.NONE + "batch" | KubernetesApiGroup.BATCH + "BATCH" | KubernetesApiGroup.BATCH + "settings.k8s.io" | KubernetesApiGroup.SETTINGS_K8S_IO + "seTtiNgs.k8S.IO" | KubernetesApiGroup.SETTINGS_K8S_IO + } + + @Unroll + void "creates custom API groups"() { + when: + def apiGroup = KubernetesApiGroup.fromString(name) + + then: + noExceptionThrown() + apiGroup.toString() == expectedName + + where: + name | expectedName + "test.api.group" | "test.api.group" + "TEST.api.Group" | "test.api.group" + } + + @Unroll + void "returns whether an API group is a native group"() { + when: + def apiGroup = KubernetesApiGroup.fromString(name) + + then: + apiGroup.isNativeGroup() == isNative + + where: + name | isNative + "test.api.group" | false + "batch" | true + "apps" | true + "" | true + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesApiVersionSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesApiVersionSpec.groovy new file mode 100644 index 00000000000..ed89018c309 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesApiVersionSpec.groovy @@ -0,0 +1,74 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiGroup +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion +import spock.lang.Specification +import spock.lang.Unroll + +class KubernetesApiVersionSpec extends Specification { + @Unroll + void "creates built-in API versions by name"() { + when: + def apiVersion = KubernetesApiVersion.fromString(name) + + then: + apiVersion.equals(expectedApiGroup) + + where: + name | expectedApiGroup + null | KubernetesApiVersion.NONE + "" | KubernetesApiVersion.NONE + "v1" | KubernetesApiVersion.V1 + "networking.k8s.io/v1beta1" | KubernetesApiVersion.NETWORKING_K8S_IO_V1BETA1 + "neTwoRkiNG.k8s.io/v1beTA1" | KubernetesApiVersion.NETWORKING_K8S_IO_V1BETA1 + } + + @Unroll + void "creates custom API versions"() { + when: + def apiVersion = KubernetesApiVersion.fromString(name) + + then: + noExceptionThrown() + apiVersion.toString() == expectedName + + where: + name | expectedName + "test.api.group" | "test.api.group" + "test.api.group/version" | "test.api.group/version" + } + + @Unroll + void "correctly parses the group from the version"() { + when: + def apiVersion = KubernetesApiVersion.fromString(name) + + then: + apiVersion.getApiGroup().equals(expectedGroup) + + where: + name | expectedGroup + null | KubernetesApiGroup.NONE + "" | KubernetesApiGroup.NONE + "test.api.group" | KubernetesApiGroup.NONE + "test.api.group/version" | KubernetesApiGroup.fromString("test.api.group") + "apps/v1" | KubernetesApiGroup.APPS + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescriptionSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescriptionSpec.groovy new file mode 100644 index 00000000000..0b6a58397a0 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesAtomicOperationDescriptionSpec.groovy @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description + +import com.fasterxml.jackson.databind.DeserializationFeature +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.SerializationFeature +import spock.lang.Specification + +class KubernetesAtomicOperationDescriptionSpec extends Specification { + def objectMapper = new ObjectMapper() + .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + + def "correctly deserializes the account field"() { + when: + def accountName = "my-k8s-account" + def input = [ + account: accountName + ] + def output = objectMapper.convertValue(input, KubernetesAtomicOperationDescription.class) + + then: + output.getAccount() == accountName + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesManifestAnnotatorSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesManifestAnnotatorSpec.groovy new file mode 100644 index 00000000000..bd26630e842 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesManifestAnnotatorSpec.groovy @@ -0,0 +1,141 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestTraffic +import com.netflix.spinnaker.moniker.Moniker +import spock.lang.Specification +import spock.lang.Unroll + +class KubernetesManifestAnnotatorSpec extends Specification { + def clusterKey = "moniker.spinnaker.io/cluster" + def applicationKey = "moniker.spinnaker.io/application" + + private KubernetesManifest freshManifest() { + def result = new KubernetesManifest() + result.put("kind", "replicaSet") + result.put("apiVersion", KubernetesApiVersion.V1.toString()) + result.put("metadata", ["annotations": [:]]) + return result + } + + @Unroll + void "manifests are annotated and deannotated symmetrically"() { + expect: + def manifest = freshManifest() + def moniker = Moniker.builder() + .cluster(cluster) + .app(application) + .build() + + KubernetesManifestAnnotater.annotateManifest(manifest, moniker) + moniker == KubernetesManifestAnnotater.getMoniker(manifest) + + where: + loadBalancers | securityGroups | cluster | application + [] | [] | "" | "" + [] | [] | " " | "" + null | null | null | null + [] | null | "" | null + ["lb"] | ["sg"] | "" | null + ["lb1", "lb2"] | ["sg"] | "x" | "my app" + ["lb1", "lb2"] | null | null | null + null | ["x1, x2", "x3"] | null | null + ["1"] | ["1"] | "1" | "1" + } + + @Unroll + void "manifests are annotated with the expected prefix"() { + expect: + def manifest = freshManifest() + def moniker = Moniker.builder() + .cluster(cluster) + .app(application) + .build() + + KubernetesManifestAnnotater.annotateManifest(manifest, moniker) + manifest.getAnnotations().get(clusterKey) == cluster + manifest.getAnnotations().get(applicationKey) == application + + where: + cluster | application + "" | "" + "c" | "a" + "" | "a" + + } + + void "setTraffic correctly sets traffic on a manifest without traffic defined"() { + given: + def manifest = freshManifest() + def traffic = new KubernetesManifestTraffic(["service my-service"]) + + when: + KubernetesManifestAnnotater.setTraffic(manifest, traffic) + + then: + KubernetesManifestAnnotater.getTraffic(manifest) == traffic + } + + void "setTraffic is a no-op if the new traffic is equal to the existing traffic"() { + given: + def manifest + def traffic = new KubernetesManifestTraffic(loadBalancers) + + when: + manifest = freshManifest() + KubernetesManifestAnnotater.setTraffic(manifest, traffic) + KubernetesManifestAnnotater.setTraffic(manifest, traffic) + + then: + KubernetesManifestAnnotater.getTraffic(manifest) == traffic + + where: + loadBalancers << [ + [], + ["service my-service"], + ["service my-service", "service my-other-service"] + ] + } + + void "setTraffic fails if the new traffic is not equal to the existing traffic"() { + given: + def manifest + def existingTraffic = new KubernetesManifestTraffic(existingLoadBalancers) + def newTraffic = new KubernetesManifestTraffic(newLoadBalancers) + + when: + manifest = freshManifest() + KubernetesManifestAnnotater.setTraffic(manifest, existingTraffic) + KubernetesManifestAnnotater.setTraffic(manifest, newTraffic) + + then: + thrown(Exception) + + where: + existingLoadBalancers | newLoadBalancers + [] | ["service my-service"] + ["service my-service"] | [] + ["service my-service"] | ["service my-other-service"] + ["service my-service"] | ["service my-service", "service my-other-service"] + ["service my-service", "service my-other-service"] | ["service my-other-service", "service my-service"] + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesManifestSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesManifestSpec.groovy new file mode 100644 index 00000000000..754574411b3 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesManifestSpec.groovy @@ -0,0 +1,132 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description + +import com.fasterxml.jackson.databind.ObjectMapper +import com.google.gson.Gson +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest +import groovy.text.SimpleTemplateEngine +import spock.lang.Specification + +class KubernetesManifestSpec extends Specification { + def objectMapper = new ObjectMapper() + + def gsonObj = new Gson() + def NAME = "my-name" + def NAMESPACE = "my-namespace" + def KIND = KubernetesKind.REPLICA_SET + def API_VERSION = KubernetesApiVersion.EXTENSIONS_V1BETA1 + def KEY = "hi" + def VALUE = "there" + def CRD_NAME = "default-custom1" + def CRD_KIND = "Custom1" + def CRD_API_VERSION = "test.example/v1alpha1" + + String basicManifestSource() { + def sourceJson = KubernetesManifest.class.getResource("manifest.json").getText("utf-8") + def templateEngine = new SimpleTemplateEngine() + def binding = [ + "name": getNAME(), + "namespace": getNAMESPACE(), + "api_version": getAPI_VERSION(), + "key": getKEY(), + "value": getVALUE(), + "kind": getKIND() + ] + def template = templateEngine.createTemplate(sourceJson).make(binding) + return template.toString() + } + + + KubernetesManifest objectToManifest(Object input) { + return objectMapper.convertValue(input, KubernetesManifest) + } + + void "correctly reads fields from basic manifest definition"() { + when: + def testPayload = gsonObj.fromJson(basicManifestSource(), Object) + KubernetesManifest manifest = objectToManifest(testPayload) + + then: + manifest.getName() == NAME + manifest.getNamespace() == NAMESPACE + manifest.getKind() == KIND + manifest.getApiVersion() == API_VERSION + manifest.getSpecTemplateAnnotations().get().get(KEY) == VALUE + } + + void "correctly handles a change to the manifest's kind"() { + when: + def testPayload = gsonObj.fromJson(basicManifestSource(), Object) + KubernetesManifest manifest = objectToManifest(testPayload) + + then: + manifest.getKind() == KIND + + when: + manifest.setKind(KubernetesKind.DEPLOYMENT) + + then: + manifest.getKind() == KubernetesKind.DEPLOYMENT + } + + void "correctly handles a change to the manifest's API group"() { + when: + def testPayload = gsonObj.fromJson(basicManifestSource(), Object) + KubernetesManifest manifest = objectToManifest(testPayload) + + then: + manifest.getApiVersion() == KubernetesApiVersion.EXTENSIONS_V1BETA1 + + when: + manifest.setApiVersion(KubernetesApiVersion.NETWORKING_K8S_IO_V1) + + then: + manifest.getApiVersion() == KubernetesApiVersion.NETWORKING_K8S_IO_V1 + } + + void "correctly handles a crd with custom cases"() { + when: + def sourceJson = KubernetesManifest.class.getResource("crd-manifest-spec.json").getText("utf-8") + def testPayload = gsonObj.fromJson(sourceJson, Object) + KubernetesManifest manifest = objectToManifest(testPayload) + + then: + manifest.getName() == CRD_NAME + manifest.getKindName() == CRD_KIND + manifest.getApiVersion().toString() == CRD_API_VERSION + manifest.getSpecTemplateAnnotations() == Optional.empty() + } + + void "correctly reads fields a custom resource where spec is a list"() { + when: + def sourceJson = KubernetesManifest.class.getResource("crd-manifest-spec-is-list.json").getText("utf-8") + def testPayload = gsonObj.fromJson(sourceJson, Object) + KubernetesManifest manifest = objectToManifest(testPayload) + + then: + manifest.getName() == CRD_NAME + manifest.getKindName() == CRD_KIND + manifest.getApiVersion().toString() == CRD_API_VERSION + manifest.getReplicas() == null + manifest.getSpecTemplateLabels() == Optional.empty() + manifest.getSpecTemplateAnnotations() == Optional.empty() + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesSpinnakerKindMapSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesSpinnakerKindMapSpec.groovy new file mode 100644 index 00000000000..bc5687309ca --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesSpinnakerKindMapSpec.groovy @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description + +import com.google.common.collect.ImmutableSet +import com.netflix.spinnaker.clouddriver.kubernetes.description.SpinnakerKind +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler +import spock.lang.Specification + +class KubernetesSpinnakerKindMapSpec extends Specification { + void "the kind map is properly initialized"() { + given: + def mockHandler = Mock(KubernetesHandler) { + spinnakerKind() >> SpinnakerKind.INSTANCES + kind() >> KubernetesKind.REPLICA_SET + } + + when: + def kindMap = new KubernetesSpinnakerKindMap([mockHandler]) + + then: + kindMap.translateSpinnakerKind(SpinnakerKind.INSTANCES) == ImmutableSet.of(KubernetesKind.REPLICA_SET) + kindMap.translateKubernetesKind(KubernetesKind.REPLICA_SET) == SpinnakerKind.INSTANCES + } + + void "the kind map properly groups kinds"() { + when: + def kindMap = new KubernetesSpinnakerKindMap([ + Mock(KubernetesHandler) { + spinnakerKind() >> SpinnakerKind.INSTANCES + kind() >> KubernetesKind.REPLICA_SET + }, + Mock(KubernetesHandler) { + spinnakerKind() >> SpinnakerKind.INSTANCES + kind() >> KubernetesKind.DEPLOYMENT + }, + Mock(KubernetesHandler) { + spinnakerKind() >> SpinnakerKind.LOAD_BALANCERS + kind() >> KubernetesKind.SERVICE + } + ]) + + then: + kindMap.translateSpinnakerKind(SpinnakerKind.INSTANCES) == ImmutableSet.of(KubernetesKind.REPLICA_SET, KubernetesKind.DEPLOYMENT) + kindMap.translateSpinnakerKind(SpinnakerKind.LOAD_BALANCERS) == ImmutableSet.of(KubernetesKind.SERVICE) + kindMap.translateKubernetesKind(KubernetesKind.REPLICA_SET) == SpinnakerKind.INSTANCES + kindMap.translateKubernetesKind(KubernetesKind.DEPLOYMENT) == SpinnakerKind.INSTANCES + kindMap.translateKubernetesKind(KubernetesKind.SERVICE) == SpinnakerKind.LOAD_BALANCERS + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindPropertiesSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindPropertiesSpec.groovy new file mode 100644 index 00000000000..61df6860bcc --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindPropertiesSpec.groovy @@ -0,0 +1,105 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest + + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinition +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinitionBuilder +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinitionNamesBuilder +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinitionSpecBuilder +import spock.lang.Specification +import spock.lang.Unroll + +class KubernetesKindPropertiesSpec extends Specification { + def "creates and returns the supplied properties"() { + when: + def properties = KubernetesKindProperties.create(KubernetesKind.REPLICA_SET, true) + + then: + properties.getKubernetesKind() == KubernetesKind.REPLICA_SET + properties.isNamespaced() + + when: + properties = KubernetesKindProperties.create(KubernetesKind.REPLICA_SET, false) + + then: + properties.getKubernetesKind() == KubernetesKind.REPLICA_SET + !properties.isNamespaced() + } + + def "sets default properties to the expected values"() { + when: + def properties = KubernetesKindProperties.withDefaultProperties(KubernetesKind.REPLICA_SET) + + then: + properties.isNamespaced() + } + + def "returns expected results for built-in kinds"() { + when: + def defaultProperties = KubernetesKindProperties.getGlobalKindProperties() + def replicaSetProperties = defaultProperties.stream() + .filter({p -> p.getKubernetesKind().equals(KubernetesKind.REPLICA_SET)}) + .findFirst() + def namespaceProperties = defaultProperties.stream() + .filter({p -> p.getKubernetesKind().equals(KubernetesKind.NAMESPACE)}) + .findFirst() + + then: + replicaSetProperties.isPresent() + replicaSetProperties.get().isNamespaced() + + namespaceProperties.isPresent() + !namespaceProperties.get().isNamespaced() + } + + @Unroll + void "creates properties from a custom resource definition spec"() { + when: + def kind = "TestKind" + def group = "stable.example.com" + V1beta1CustomResourceDefinition crd = + new V1beta1CustomResourceDefinitionBuilder() + .withSpec( + new V1beta1CustomResourceDefinitionSpecBuilder() + .withNames( + new V1beta1CustomResourceDefinitionNamesBuilder().withKind(kind).build()) + .withGroup(group) + .withScope(scope) + .build()) + .build() + def kindProperties = KubernetesKindProperties.fromCustomResourceDefinition(crd) + + then: + kindProperties.getKubernetesKind().equals(KubernetesKind.from(kind, KubernetesApiGroup.fromString(group))) + kindProperties.isNamespaced() == expectedNamespaced + + where: + scope | expectedNamespaced + "namespaced" | true + "Namespaced" | true + "NAMESPACED" | true + "nAmESpaceD" | true + "" | false + "cluster" | false + "Cluster" | false + "hello" | false + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindSpec.groovy new file mode 100644 index 00000000000..b9c53ebcb90 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesKindSpec.groovy @@ -0,0 +1,216 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiGroup +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinition +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinitionBuilder +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinitionNames +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinitionNamesBuilder +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinitionSpec +import io.kubernetes.client.openapi.models.V1beta1CustomResourceDefinitionSpecBuilder +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +class KubernetesKindSpec extends Specification { + @Shared ObjectMapper objectMapper = new ObjectMapper() + @Shared KubernetesKind CUSTOM_RESOURCE_KIND = KubernetesKind.from("deployment", KubernetesApiGroup.fromString("stable.example.com")) + + @Unroll + void "creates built-in API kinds by name"() { + when: + def kind = KubernetesKind.fromString(name) + + then: + kind.equals(expectedKind) + + where: + name | expectedKind + "" | KubernetesKind.NONE + "replicaSet" | KubernetesKind.REPLICA_SET + "replicaSet.extensions" | KubernetesKind.REPLICA_SET + "replicaSet.apps" | KubernetesKind.REPLICA_SET + "networkPolicy" | KubernetesKind.NETWORK_POLICY + "NETWORKPOLICY" | KubernetesKind.NETWORK_POLICY + "networkpolicy" | KubernetesKind.NETWORK_POLICY + } + + @Unroll + void "kinds are serialized using the Spinnaker-canonical form"() { + when: + def kind = KubernetesKind.fromString(name) + + then: + kind.toString().equals("replicaSet") + + where: + name << [ + "replicaSet", + "replicaset", + "ReplicaSet", + "REPLICASET", + ] + } + + @Unroll + void "kinds from core API groups are returned if any core API group is input"() { + when: + def kind = KubernetesKind.from(name, apiGroup) + + then: + result == kind + + where: + name | apiGroup | result + "replicaSet" | null | KubernetesKind.REPLICA_SET + "replicaSet" | KubernetesApiGroup.APPS | KubernetesKind.REPLICA_SET + "replicaSet" | KubernetesApiGroup.EXTENSIONS | KubernetesKind.REPLICA_SET + "rs" | null | KubernetesKind.REPLICA_SET + "rs" | KubernetesApiGroup.APPS | KubernetesKind.REPLICA_SET + + } + + void "kinds from custom API groups do not return core Kubernetes kinds"() { + when: + def kind = KubernetesKind.from("replicaSet", KubernetesApiGroup.fromString("custom")) + + then: + kind != KubernetesKind.REPLICA_SET + } + + @Unroll + void "kinds from core API groups are equal regardless of group"() { + when: + def kind1 = KubernetesKind.fromString(name1) + def kind2 = KubernetesKind.fromString(name2) + + then: + kind1.equals(kind2) == shouldEqual + + where: + name1 | name2 | shouldEqual + "deployment.extensions" | "deployment" | true + "deployment" | "deployment.extensions" | true + "deployment" | "deployment" | true + "replicaSet.extensions" | "replicaSet.apps" | true + "replicaSet.apps" | "replicaSet.extensions" | true + "replicaSet.apps" | "deployment.extensions" | false + "replicaSet" | "deployment.extensions" | false + "replicaSet.apps" | "deployment" | false + "replicaSet" | "deployment" | false + } + + @Unroll + void "kinds from custom API groups are not equal unless the group is equal"() { + when: + def kind1 = KubernetesKind.fromString(name1) + def kind2 = KubernetesKind.fromString(name2) + + then: + kind1.equals(kind2) == shouldEqual + + where: + name1 | name2 | shouldEqual + "deployment.stable.example.com" | "deployment.stable.example.com" | true + "deployment.stable.example.com" | "deployment" | false + "deployment" | "deployment.stable.example.com" | false + "deployment.stable.example.com" | "something.stable.example.com" | false + "deployment.stable.example.com" | "deployment.other.example.com" | false + "deployment.extensions" | "deployment.stable.example.com" | false + } + + void "kinds from core API groups are serialized without the group name"() { + when: + def kind = KubernetesKind.fromString(name) + def string = kind.toString() + + then: + string == expectedString + + where: + name | expectedString + "replicaSet" | "replicaSet" + "replicaSet.apps" | "replicaSet" + "deployment.extensions" | "deployment" + } + + void "kinds from custom API groups are serialized with the group name"() { + when: + def kind = KubernetesKind.fromString(name) + def string = kind.toString() + + then: + string == expectedString + + where: + name | expectedString + "deployment.stable.example.com" | "deployment.stable.example.com" + } + + void "deserializes kinds from their string representation"() { + when: + def kind = objectMapper.convertValue(input, KubernetesKind.class) + + then: + kind == expectedKind + + where: + input | expectedKind + "replicaSet" | KubernetesKind.REPLICA_SET + "ReplicaSet" | KubernetesKind.REPLICA_SET + "replicaSet" | KubernetesKind.REPLICA_SET + "service" | KubernetesKind.SERVICE + "deployment.stable.example.com" | CUSTOM_RESOURCE_KIND + } + + void "serializes kinds to their string representation"() { + when: + def serialized = objectMapper.writeValueAsString(kind) + + then: + serialized == '"' + expectedSerialized + '"' + + where: + kind | expectedSerialized + KubernetesKind.REPLICA_SET | "replicaSet" + KubernetesKind.SERVICE | "service" + CUSTOM_RESOURCE_KIND | "deployment.stable.example.com" + } + + void "creates a kind from a custom resource definition spec"() { + when: + def kind = "TestKind" + def group = "stable.example.com" + V1beta1CustomResourceDefinition crd = + new V1beta1CustomResourceDefinitionBuilder() + .withSpec( + new V1beta1CustomResourceDefinitionSpecBuilder() + .withNames( + new V1beta1CustomResourceDefinitionNamesBuilder().withKind(kind).build()) + .withGroup(group) + .build()) + .build() + def kubernetesKind = KubernetesKind.fromCustomResourceDefinition(crd) + + then: + kubernetesKind == kubernetesKind.fromString("TestKind.stable.example.com") + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestLabelerSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestLabelerSpec.groovy new file mode 100644 index 00000000000..a81e6a61c0b --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestLabelerSpec.groovy @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest + +import com.netflix.spinnaker.moniker.Moniker +import spock.lang.Specification +import spock.lang.Unroll + +class KubernetesManifestLabelerSpec extends Specification { + + @Unroll + void "manifests are annotated and deannotated symmetrically"() { + given: + def moniker = Moniker.builder().build() + def labels = ["some-key": "some-value"] + + when: + KubernetesManifestLabeler.storeLabels(managedBySuffix, labels, moniker) + + then: + labels["some-key"] == "some-value" + labels["app.kubernetes.io/managed-by"] == expectedManagedByLabel + + where: + managedBySuffix || expectedManagedByLabel + null || "spinnaker" + "" || "spinnaker" + "custom" || "spinnaker-custom" + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/HandlerPrioritySpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/HandlerPrioritySpec.groovy similarity index 94% rename from clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/HandlerPrioritySpec.groovy rename to clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/HandlerPrioritySpec.groovy index ebc4275cb2e..867a706bc0f 100644 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/HandlerPrioritySpec.groovy +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/HandlerPrioritySpec.groovy @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler import spock.lang.Specification import spock.lang.Unroll diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandlerSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandlerSpec.groovy new file mode 100644 index 00000000000..a82ad7573f7 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandlerSpec.groovy @@ -0,0 +1,420 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.artifacts.kubernetes.KubernetesArtifactType +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest +import com.netflix.spinnaker.kork.artifacts.model.Artifact +import org.yaml.snakeyaml.Yaml +import org.yaml.snakeyaml.constructor.SafeConstructor +import spock.lang.Specification + +class KubernetesDeploymentHandlerSpec extends Specification { + def objectMapper = new ObjectMapper() + def yaml = new Yaml(new SafeConstructor()) + def handler = new KubernetesDeploymentHandler() + + def IMAGE = "gcr.io/project/image" + def CONFIG_MAP_VOLUME = "my-config-map" + def SECRET_ENV = "my-secret-env" + def CONFIG_MAP_ENV_KEY = "my-config-map-env" + def PROJECTED_CONFIG_MAP_VOLUME = "my-projected-config-map" + def PROJECTED_SECRET_VOLUME = "my-projected-secret" + def ACCOUNT = "my-account" + + def BASIC_DEPLOYMENT = """ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: $IMAGE + ports: + - containerPort: 80 + envFrom: + - secretRef: + name: $SECRET_ENV + env: + - name: KEY + valueFrom: + configMapKeyRef: + name: $CONFIG_MAP_ENV_KEY + key: value + volumeMounts: + - name: all-in-one + mountPath: "/projected-volume" + readOnly: true + volumes: + - configMap: + name: $CONFIG_MAP_VOLUME + - name: all-in-one + projected: + sources: + - configMap: + name: $PROJECTED_CONFIG_MAP_VOLUME + - secret: + name: $PROJECTED_SECRET_VOLUME +""" + + KubernetesManifest stringToManifest(String input) { + return objectMapper.convertValue(yaml.load(input), KubernetesManifest) + } + + void "check that image is replaced by the artifact replacer"() { + when: + def target = "$IMAGE:version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.DockerImage.type) + .name(IMAGE) + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.containers[0].image == target + result.boundArtifacts.size() == 1 + result.boundArtifacts.contains(artifact) == true + } + + void "check that image isn't replaced by the artifact replacer"() { + when: + def target = "$IMAGE:version-bad" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.DockerImage.type) + .name("not-$IMAGE") + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.containers[0].image == IMAGE + result.boundArtifacts.isEmpty() == true + } + + void "check that image is found"() { + when: + def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) + + then: + result.findAll { a -> a.getReference() == IMAGE && a.getType() == KubernetesArtifactType.DockerImage.type }.size() == 1 + } + + void "check that configmap volume is replaced by the artifact replacer without an account specified"() { + when: + def target = "$CONFIG_MAP_VOLUME-version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name(CONFIG_MAP_VOLUME) + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[0].configMap.name == target + } + + void "check that configmap volume is replaced by the artifact replacer"() { + when: + def target = "$CONFIG_MAP_VOLUME-version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name(CONFIG_MAP_VOLUME) + .reference(target) + .metadata(["account": ACCOUNT]) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[0].configMap.name == target + } + + void "check that configmap volume replaced by the artifact replacer"() { + when: + def target = "$CONFIG_MAP_VOLUME:version-bad" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name("not-$CONFIG_MAP_VOLUME") + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[0].configMap.name == CONFIG_MAP_VOLUME + } + + void "check that configmap volume is not replaced by the artifact replacer in the wrong account"() { + when: + def target = "$CONFIG_MAP_VOLUME:version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name("$CONFIG_MAP_VOLUME") + .reference(target) + .metadata(["account": "not-$ACCOUNT".toString()]) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[0].configMap.name != target + } + + void "check that configmap volume is found"() { + when: + def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) + + then: + result.findAll { a -> a.getReference() == CONFIG_MAP_VOLUME && a.getType() == KubernetesArtifactType.ConfigMap.type}.size() == 1 + } + + + void "check that only secret ref is replaced by the artifact replacer"() { + when: + def target = "$SECRET_ENV-version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.Secret.type) + .name(SECRET_ENV) + .reference(target) + .metadata(["account": ACCOUNT]) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.containers[0].envFrom[0].secretRef.name == target + } + + void "check that secret ref is not replaced by the artifact replacer"() { + when: + def target = "$SECRET_ENV:version-bad" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.Secret.type) + .name("not-$SECRET_ENV") + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.containers[0].envFrom[0].secretRef.name == SECRET_ENV + } + + void "check that secret ref is found"() { + when: + def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) + + then: + result.findAll { a -> a.getReference() == SECRET_ENV && a.getType() == KubernetesArtifactType.Secret.type}.size() == 1 + } + + void "check that only configmap value ref is replaced by the artifact replacer"() { + when: + def target = "$CONFIG_MAP_ENV_KEY-version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name(CONFIG_MAP_ENV_KEY) + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.containers[0].env[0].valueFrom.configMapKeyRef.name == target + } + + void "check that configmap value ref is not replaced by the artifact replacer"() { + when: + def target = "$CONFIG_MAP_ENV_KEY:version-bad" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name("not-$CONFIG_MAP_ENV_KEY") + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.containers[0].env[0].valueFrom.configMapKeyRef.name == CONFIG_MAP_ENV_KEY + } + + void "check that configmap value ref is found"() { + when: + def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) + + then: + result.findAll { a -> a.getReference() == CONFIG_MAP_ENV_KEY && a.getType() == KubernetesArtifactType.ConfigMap.type}.size() == 1 + } + + void "check that projected configmap volume is replaced by the artifact replacer without an account specified"() { + when: + def target = "$PROJECTED_CONFIG_MAP_VOLUME-version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name(PROJECTED_CONFIG_MAP_VOLUME) + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[1].projected.sources[0].configMap.name == target + } + + void "check that projected configmap volume is replaced by the artifact replacer"() { + when: + def target = "$PROJECTED_CONFIG_MAP_VOLUME-version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name(PROJECTED_CONFIG_MAP_VOLUME) + .reference(target) + .metadata(["account": ACCOUNT]) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[1].projected.sources[0].configMap.name == target + } + + void "check that projected configmap volume replaced by the artifact replacer"() { + when: + def target = "$PROJECTED_CONFIG_MAP_VOLUME:version-bad" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name("not-$PROJECTED_CONFIG_MAP_VOLUME") + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[1].projected.sources[0].configMap.name == PROJECTED_CONFIG_MAP_VOLUME + } + + void "check that projected configmap volume is not replaced by the artifact replacer in the wrong account"() { + when: + def target = "$PROJECTED_CONFIG_MAP_VOLUME:version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.ConfigMap.type) + .name("$PROJECTED_CONFIG_MAP_VOLUME") + .reference(target) + .metadata(["account": "not-$ACCOUNT".toString()]) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[1].projected.sources[0].configMap.name != target + } + + void "check that projected configmap volume is found"() { + when: + def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) + + then: + result.findAll { a -> a.getReference() == PROJECTED_CONFIG_MAP_VOLUME && a.getType() == KubernetesArtifactType.ConfigMap.type}.size() == 1 + } + + void "check that projected secret volume is replaced by the artifact replacer without an account specified"() { + when: + def target = "$PROJECTED_SECRET_VOLUME-version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.Secret.type) + .name(PROJECTED_SECRET_VOLUME) + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[1].projected.sources[1].secret.name == target + } + + void "check that projected secret volume is replaced by the artifact replacer"() { + when: + def target = "$PROJECTED_SECRET_VOLUME-version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.Secret.type) + .name(PROJECTED_SECRET_VOLUME) + .reference(target) + .metadata(["account": ACCOUNT]) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[1].projected.sources[1].secret.name == target + } + + void "check that projected secret volume replaced by the artifact replacer"() { + when: + def target = "$PROJECTED_SECRET_VOLUME:version-bad" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.Secret.type) + .name("not-$PROJECTED_SECRET_VOLUME") + .reference(target) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[1].projected.sources[1].secret.name == PROJECTED_SECRET_VOLUME + } + + void "check that projected secret volume is not replaced by the artifact replacer in the wrong account"() { + when: + def target = "$PROJECTED_SECRET_VOLUME:version-1.2.3" + def artifact = Artifact.builder() + .type(KubernetesArtifactType.Secret.type) + .name("$PROJECTED_SECRET_VOLUME") + .reference(target) + .metadata(["account": "not-$ACCOUNT".toString()]) + .build() + + def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) + + then: + result.manifest.spec.template.spec.volumes[1].projected.sources[1].secret.name != target + } + + void "check that projected secret volume is found"() { + when: + def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) + + then: + result.findAll { a -> a.getReference() == PROJECTED_SECRET_VOLUME && a.getType() == KubernetesArtifactType.Secret.type}.size() == 1 + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHorizontalPodAutoscalerHandlerSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandlerSpec.groovy similarity index 76% rename from clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHorizontalPodAutoscalerHandlerSpec.groovy rename to clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandlerSpec.groovy index 881b35c118e..f5bae2aa37e 100644 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesHorizontalPodAutoscalerHandlerSpec.groovy +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandlerSpec.groovy @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +15,11 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactTypes -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest +import com.netflix.spinnaker.clouddriver.artifacts.kubernetes.KubernetesArtifactType +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest import com.netflix.spinnaker.kork.artifacts.model.Artifact import org.yaml.snakeyaml.Yaml import org.yaml.snakeyaml.constructor.SafeConstructor @@ -56,7 +56,7 @@ spec: void "check that the #kind #name is replaced by the artifact replacer"() { expect: def artifact = Artifact.builder() - .type(type.toString()) + .type(type.type) .name(name) .reference(reference) .location(namespace) @@ -70,10 +70,10 @@ spec: where: kind | name | reference | type - "deployment" | "abc" | "abc-v000" | ArtifactTypes.KUBERNETES_DEPLOYMENT - "Deployment" | "abc" | "abc-v000" | ArtifactTypes.KUBERNETES_DEPLOYMENT - "replicaSet" | "xyz" | "xyz-v000" | ArtifactTypes.KUBERNETES_REPLICA_SET - "ReplicaSet" | "xyz" | "xyz-v000" | ArtifactTypes.KUBERNETES_REPLICA_SET + "deployment" | "abc" | "abc-v000" | KubernetesArtifactType.Deployment + "Deployment" | "abc" | "abc-v000" | KubernetesArtifactType.Deployment + "replicaSet" | "xyz" | "xyz-v000" | KubernetesArtifactType.ReplicaSet + "ReplicaSet" | "xyz" | "xyz-v000" | KubernetesArtifactType.ReplicaSet } @Unroll @@ -92,7 +92,7 @@ spec: where: kind | name | location | type - "deployment" | "abc" | namespace | ArtifactTypes.KUBERNETES_REPLICA_SET - "Deployment" | "abc" | "$namespace-" | ArtifactTypes.KUBERNETES_DEPLOYMENT + "deployment" | "abc" | namespace | KubernetesArtifactType.ReplicaSet + "Deployment" | "abc" | "$namespace-" | KubernetesArtifactType.Deployment } } diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceHandlerSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceHandlerSpec.groovy new file mode 100644 index 00000000000..cef1e08c81b --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesServiceHandlerSpec.groovy @@ -0,0 +1,133 @@ +/* + * Copyright 2019 Air France-KLM Group + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest +import com.netflix.spinnaker.clouddriver.kubernetes.description.JsonPatch.Op; +import org.yaml.snakeyaml.Yaml +import org.yaml.snakeyaml.constructor.SafeConstructor +import spock.lang.Specification + +class KubernetesServiceHandlerSpec extends Specification { + def objectMapper = new ObjectMapper() + def yaml = new Yaml(new SafeConstructor()) + def handler = new KubernetesServiceHandler() + + def BASIC_SERVICE = """ +apiVersion: v1 +kind: Service +metadata: + name: test-service +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + load-balancer-test-app: 'true' +""" + + def SERVICE_WITH_SLASH = """ +apiVersion: v1 +kind: Service +metadata: + name: test-service +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + load-balancer/test-app: 'true' +status: + loadBalancer: {} +""" + + def SERVICE_WITH_NAME_LABEL = """ +apiVersion: v1 +kind: Service +metadata: + name: test-service +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app.kubernetes.io/name: test-app +status: + loadBalancer: {} +""" + + def BASIC_REPLICASET = """ +apiVersion: extensions/v1beta1 +kind: ReplicaSet +metadata: + name: app-replicaset +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: app-instance + app.kubernetes.io/name: test-app + template: + metadata: + labels: + app.kubernetes.io/instance: app-instance + app.kubernetes.io/name: test-app +""" + + KubernetesManifest stringToManifest(String input) { + return objectMapper.convertValue(yaml.load(input), KubernetesManifest) + } + + void "check that loadbalancer label is added"() { + when: + def service = stringToManifest(BASIC_SERVICE) + def target = stringToManifest(BASIC_REPLICASET) + + def result = handler.attachPatch(service, target) + + then: + result[0].op == Op.add && result[0].path == "/spec/template/metadata/labels/load-balancer-test-app" + } + + void "check that loadbalancer label with slash is escaped"() { + when: + def service = stringToManifest(SERVICE_WITH_SLASH) + def target = stringToManifest(BASIC_REPLICASET) + + def result = handler.attachPatch(service, target) + + then: + result[0].op == Op.add && result[0].path == "/spec/template/metadata/labels/load-balancer~1test-app" + } + + void "check that loadbalancer label with slash is escaped when removing"() { + when: + def service = stringToManifest(SERVICE_WITH_NAME_LABEL) + def target = stringToManifest(BASIC_REPLICASET) + + def result = handler.detachPatch(service, target) + + then: + result[0].op == Op.remove && result[0].path == "/spec/template/metadata/labels/app.kubernetes.io~1name" + } + +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/ArtifactKeyTest.java b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/ArtifactKeyTest.java new file mode 100644 index 00000000000..cc902b1cdcb --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/op/manifest/ArtifactKeyTest.java @@ -0,0 +1,130 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.manifest; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.kork.artifacts.model.Artifact.ArtifactBuilder; +import java.util.Collection; +import org.junit.jupiter.api.Test; + +final class ArtifactKeyTest { + private static String TYPE = "docker/image"; + private static String NAME = "gcr.io/test/test-image"; + private static String VERSION = "latest"; + private static String REFERENCE = "gcr.io/test/test-image:latest"; + private static String ACCOUNT = "docker-registry"; + + private static ArtifactBuilder defaultArtifactBuilder() { + return Artifact.builder().type(TYPE).name(NAME).version(VERSION).reference(REFERENCE); + } + + @Test + public void equalsTest() { + Artifact artifact1 = defaultArtifactBuilder().build(); + Artifact artifact2 = defaultArtifactBuilder().build(); + assertThat(ArtifactKey.fromArtifact(artifact1)).isEqualTo(ArtifactKey.fromArtifact(artifact2)); + } + + @Test + public void equalsWithDifferentAccountsTest() { + Artifact artifact1 = defaultArtifactBuilder().artifactAccount(ACCOUNT).build(); + Artifact artifact2 = defaultArtifactBuilder().build(); + assertThat(ArtifactKey.fromArtifact(artifact1)).isEqualTo(ArtifactKey.fromArtifact(artifact2)); + } + + @Test + public void differentTypeTest() { + Artifact artifact1 = defaultArtifactBuilder().type("gcs/file").build(); + Artifact artifact2 = defaultArtifactBuilder().build(); + assertThat(ArtifactKey.fromArtifact(artifact1)) + .isNotEqualTo(ArtifactKey.fromArtifact(artifact2)); + } + + @Test + public void differentNameTest() { + Artifact artifact1 = defaultArtifactBuilder().name("aaa").build(); + Artifact artifact2 = defaultArtifactBuilder().build(); + assertThat(ArtifactKey.fromArtifact(artifact1)) + .isNotEqualTo(ArtifactKey.fromArtifact(artifact2)); + } + + @Test + public void differentVersionTest() { + Artifact artifact1 = defaultArtifactBuilder().version("oldest").build(); + Artifact artifact2 = defaultArtifactBuilder().build(); + assertThat(ArtifactKey.fromArtifact(artifact1)) + .isNotEqualTo(ArtifactKey.fromArtifact(artifact2)); + } + + @Test + public void differentLocationTest() { + Artifact artifact1 = defaultArtifactBuilder().location("test").build(); + Artifact artifact2 = defaultArtifactBuilder().build(); + assertThat(ArtifactKey.fromArtifact(artifact1)) + .isNotEqualTo(ArtifactKey.fromArtifact(artifact2)); + } + + @Test + public void differentReferenceTest() { + Artifact artifact1 = defaultArtifactBuilder().reference("zzz").build(); + Artifact artifact2 = defaultArtifactBuilder().build(); + assertThat(ArtifactKey.fromArtifact(artifact1)) + .isNotEqualTo(ArtifactKey.fromArtifact(artifact2)); + } + + @Test + public void nullSafetyTest() { + Artifact artifact1 = defaultArtifactBuilder().build(); + Artifact artifact2 = Artifact.builder().build(); + assertThat(ArtifactKey.fromArtifact(artifact1)) + .isNotEqualTo(ArtifactKey.fromArtifact(artifact2)); + } + + @Test + public void fromArtifactsTest() { + Collection artifacts = + ImmutableList.of( + defaultArtifactBuilder().build(), + defaultArtifactBuilder().build(), // duplicate of above entry + defaultArtifactBuilder().version("oldest").build(), + Artifact.builder().build()); + ImmutableSet keys = ArtifactKey.fromArtifacts(artifacts); + assertThat(keys.size()).isEqualTo(3); + assertThat(keys) + .containsOnly( + ArtifactKey.fromArtifact(defaultArtifactBuilder().build()), + ArtifactKey.fromArtifact(defaultArtifactBuilder().version("oldest").build()), + ArtifactKey.fromArtifact(Artifact.builder().build())); + } + + @Test + public void fromArtifactsNullSafety() { + ImmutableSet keys = ArtifactKey.fromArtifacts(null); + assertThat(keys.size()).isEqualTo(0); + } + + @Test + public void toStringTest() { + ArtifactKey key = ArtifactKey.fromArtifact(defaultArtifactBuilder().build()); + assertThat(key.toString()).contains(TYPE, NAME, REFERENCE); + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProviderSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProviderSpec.groovy new file mode 100644 index 00000000000..644d12f89b6 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProviderSpec.groovy @@ -0,0 +1,172 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.provider.view + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesManifestContainer +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.KubernetesManifestProvider +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import spock.lang.Specification + +class KubernetesJobProviderSpec extends Specification { + + def "getFileContents return a map with properties"() { + given: + def mockCredentials = Mock(KubernetesCredentials) { + jobLogs(*_) >> logs + } + + def mockAccountCredentialsProvider = Mock(AccountCredentialsProvider) { + getCredentials(*_) >> Mock(AccountCredentials) { + getCredentials(*_) >> mockCredentials + } + } + + def testManifest = new KubernetesManifest() + testManifest.putAll([ + apiVersion: 'batch/v1', + kind: 'Job', + metadata: [ + name: 'a', + namespace: 'b', + ] + ]) + + def mockManifestProvider = Mock(KubernetesManifestProvider) { + getManifest(*_) >> KubernetesManifestContainer.builder() + .account("a") + .name("a") + .manifest(testManifest) + .build() + } + + when: + def provider = new KubernetesJobProvider(mockAccountCredentialsProvider, mockManifestProvider, true) + def logResult = provider.getFileContents("a", "b", "c", "d") + + then: + logResult == result + + where: + logs | result + "SPINNAKER_PROPERTY_a=b" | [a: 'b'] + "Spinnaker_Property_a=b" | [:] + 'SPINNAKER_CONFIG_JSON={"a": "b"}' | [a: 'b'] + 'SPINNAKER_CONFIG_JSON={"a": "b}' | null + 'SPINNAKER_CONFIG_JSON=syntax error' | null + "doesn't contain any magic strings" | [:] + } + + def "if getFileContents throws an exception, then logResult == null"() { + given: + def mockCredentials = Mock(KubernetesCredentials) { + jobLogs(*_) >> { + throw new KubectlJobExecutor.KubectlException("some exception while getting logs", new Exception()) + } + } + + def mockAccountCredentialsProvider = Mock(AccountCredentialsProvider) { + getCredentials(*_) >> Mock(AccountCredentials) { + getCredentials(*_) >> mockCredentials + } + } + + def testManifest = new KubernetesManifest() + testManifest.putAll([ + apiVersion: 'batch/v1', + kind: 'Job', + metadata: [ + name: 'a', + namespace: 'b', + ] + ]) + + def mockManifestProvider = Mock(KubernetesManifestProvider) { + getManifest(*_) >> KubernetesManifestContainer.builder() + .account("a") + .name("a") + .manifest(testManifest) + .build() + } + + when: + def provider = new KubernetesJobProvider(mockAccountCredentialsProvider, mockManifestProvider, true) + def logResult = provider.getFileContents("a", "b", "c", "d") + + then: + logResult == null + } + + def "getFileContentsFromPod should return a map with properties if there are no errors"() { + given: + def mockCredentials = Mock(KubernetesCredentials) { + logs(*_) >> podLogs + } + + def mockAccountCredentialsProvider = Mock(AccountCredentialsProvider) { + getCredentials(*_) >> Mock(AccountCredentials) { + getCredentials(*_) >> mockCredentials + } + } + + def mockManifestProvider = Mock(KubernetesManifestProvider) + + when: + def provider = new KubernetesJobProvider(mockAccountCredentialsProvider, mockManifestProvider, true) + def logResult = provider.getFileContentsFromPod("a", "b", "c", "d") + + then: + logResult == result + + where: + podLogs | result + "SPINNAKER_PROPERTY_a=b" | [a: 'b'] + "Spinnaker_Property_a=b" | [:] + 'SPINNAKER_CONFIG_JSON={"a": "b"}' | [a: 'b'] + 'SPINNAKER_CONFIG_JSON={"a": "b}' | null + 'SPINNAKER_CONFIG_JSON=syntax error' | null + "doesn't contain any magic strings" | [:] + } + + def "if getFileContentsFromPod throws an exception, then logResult == null"() { + given: + def mockCredentials = Mock(KubernetesCredentials) { + logs(*_) >> { + throw new KubectlJobExecutor.KubectlException("some exception while getting logs", new Exception()) + } + } + + def mockAccountCredentialsProvider = Mock(AccountCredentialsProvider) { + getCredentials(*_) >> Mock(AccountCredentials) { + getCredentials(*_) >> mockCredentials + } + } + + def mockManifestProvider = Mock(KubernetesManifestProvider) + + when: + def provider = new KubernetesJobProvider(mockAccountCredentialsProvider, mockManifestProvider, true) + def logResult = provider.getFileContentsFromPod("a", "b", "c", "d") + + then: + logResult == null + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/PropertyParserSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/PropertyParserSpec.groovy new file mode 100644 index 00000000000..b4629f85572 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/PropertyParserSpec.groovy @@ -0,0 +1,94 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.provider.view + +import com.fasterxml.jackson.core.JsonParseException +import spock.lang.Specification + +class PropertyParserSpec extends Specification { + def "ExtractPropertiesFromLog"() { + String buildLog = "[Thread 0] Uploading artifact: https://foo.host/artifactory/debian-local/some/nice/path/some-package_0.0.7_amd64.deb;deb.distribution=trusty;deb.component=main;deb.architecture=amd64\n" + + "[Thread 0] Artifactory response: 201 Created" + when: + Map properties = PropertyParser.extractPropertiesFromLog(buildLog) + + then: + properties.size() == 0 + } + + def "ExtractPropertiesFromLog works"() { + String buildLog = "SPINNAKER_PROPERTY_MY_PROPERTY=MYVALUE\r" + + when: + Map properties = PropertyParser.extractPropertiesFromLog(buildLog) + + then: + properties.size() == 1 + } + + def "ExtractPropertiesFromLog with JSON"() { + String buildLog = "SPINNAKER_CONFIG_JSON={\"key1\":\"value1\"}\r" + + when: + Map properties = PropertyParser.extractPropertiesFromLog(buildLog) + + then: + properties.size() == 1 + } + + def "ExtractPropertiesFromLog with JSON and 1 property works"() { + String buildLog = "SPINNAKER_PROPERTY_MY_PROPERTY=MYVALUE\n" + + "SPINNAKER_CONFIG_JSON={\"key1\":\"value1\"}\r" + + when: + Map properties = PropertyParser.extractPropertiesFromLog(buildLog) + + then: + properties.size() == 2 + } + + def "ExtractPropertiesFromLog with malformed JSON throws exception"() { + String buildLog = "SPINNAKER_CONFIG_JSON={\"key1\";\"value1\"}\r" + + when: + PropertyParser.extractPropertiesFromLog(buildLog) + + then: + thrown(JsonParseException) + } + + def "Do not detect json magic string if it is not first non-whitespace substring in the line"() { + String buildLog = "some log SPINNAKER_CONFIG_JSON={\"key1\":\"value1\"}\r" + + when: + Map properties = PropertyParser.extractPropertiesFromLog(buildLog) + + then: + properties.size() == 0 + } + + def "Does not attempt to parse properties with empty values"() { + String buildLog = "SPINNAKER_PROPERTY_FOO=" + + when: + Map properties = PropertyParser.extractPropertiesFromLog(buildLog) + + then: + properties.size() == 0 + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/GlobalKubernetesKindRegistrySpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/GlobalKubernetesKindRegistrySpec.groovy new file mode 100644 index 00000000000..ade1b2d7ae1 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/GlobalKubernetesKindRegistrySpec.groovy @@ -0,0 +1,84 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.* +import com.netflix.spinnaker.clouddriver.kubernetes.security.GlobalKubernetesKindRegistry +import spock.lang.Specification +import spock.lang.Subject + +class GlobalKubernetesKindRegistrySpec extends Specification { + static final KubernetesApiGroup CUSTOM_API_GROUP = KubernetesApiGroup.fromString("test") + static final KubernetesKindProperties REPLICA_SET = KubernetesKindProperties.create(KubernetesKind.REPLICA_SET, true) + static final KubernetesKindProperties CUSTOM_KIND = KubernetesKindProperties.create(KubernetesKind.from("customKind", CUSTOM_API_GROUP), true) + + void "an empty registry returns no kinds"() { + given: + @Subject GlobalKubernetesKindRegistry kindRegistry = new GlobalKubernetesKindRegistry([]) + + when: + def kinds = kindRegistry.getRegisteredKinds() + + then: + kinds.isEmpty() + } + + void "getRegisteredKinds returns all registered kinds"() { + given: + @Subject GlobalKubernetesKindRegistry kindRegistry = new GlobalKubernetesKindRegistry([ + REPLICA_SET, + CUSTOM_KIND + ]) + + when: + def kinds = kindRegistry.getRegisteredKinds() + + then: + kinds.size() == 2 + kinds.contains(KubernetesKind.REPLICA_SET) + kinds.contains(KubernetesKind.from("customKind", CUSTOM_API_GROUP)) + } + + void "getRegisteredKind returns kinds that have been registered"() { + given: + @Subject GlobalKubernetesKindRegistry kindRegistry = new GlobalKubernetesKindRegistry([ + REPLICA_SET, + CUSTOM_KIND + ]) + + when: + def properties = kindRegistry.getKindProperties(KubernetesKind.from("customKind", CUSTOM_API_GROUP)) + + then: + properties.get() == CUSTOM_KIND + } + + void "getRegisteredKind returns an empty optional for kinds that have not been registered"() { + given: + @Subject GlobalKubernetesKindRegistry kindRegistry = new GlobalKubernetesKindRegistry([ + REPLICA_SET, + CUSTOM_KIND + ]) + + when: + def properties = kindRegistry.getKindProperties(KubernetesKind.from("otherKind", CUSTOM_API_GROUP)) + + then: + !properties.isPresent() + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsSpec.groovy new file mode 100644 index 00000000000..dae204ea975 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsSpec.groovy @@ -0,0 +1,196 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security + +import com.google.common.collect.ImmutableList +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount +import com.netflix.spinnaker.clouddriver.kubernetes.description.AccountResourcePropertyRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiGroup +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesManifestNamer +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesNamerRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesUnregisteredCustomResourceHandler +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials.KubernetesKindStatus +import com.netflix.spinnaker.kork.configserver.ConfigFileService +import spock.lang.Specification + +class KubernetesCredentialsSpec extends Specification { + Registry registry = Stub(Registry) + KubectlJobExecutor kubectlJobExecutor = Stub(KubectlJobExecutor) + String NAMESPACE = "my-namespace" + AccountResourcePropertyRegistry.Factory resourcePropertyRegistryFactory = Mock(AccountResourcePropertyRegistry.Factory) + KubernetesKindRegistry.Factory kindRegistryFactory = new KubernetesKindRegistry.Factory( + new GlobalKubernetesKindRegistry(KubernetesKindProperties.getGlobalKindProperties()) + ) + KubernetesNamerRegistry namerRegistry = new KubernetesNamerRegistry([new KubernetesManifestNamer()]) + ConfigFileService configFileService = new ConfigFileService() + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap = new KubernetesSpinnakerKindMap(ImmutableList.of()) + GlobalResourcePropertyRegistry globalResourcePropertyRegistry = new GlobalResourcePropertyRegistry(ImmutableList.of(), new KubernetesUnregisteredCustomResourceHandler()) + + KubernetesCredentials.Factory credentialFactory = new KubernetesCredentials.Factory( + new NoopRegistry(), + namerRegistry, + kubectlJobExecutor, + configFileService, + resourcePropertyRegistryFactory, + kindRegistryFactory, + kubernetesSpinnakerKindMap, + globalResourcePropertyRegistry + ) + + + + void "Built-in Kubernetes kinds are considered valid by default"() { + when: + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: false, + )) + + then: + credentials.getKindStatus(KubernetesKind.DEPLOYMENT) == KubernetesKindStatus.VALID + credentials.getKindStatus(KubernetesKind.REPLICA_SET) == KubernetesKindStatus.VALID + } + + void "Built-in Kubernetes kinds are considered valid by default when kinds is empty"() { + when: + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: false, + kinds: [] + )) + + then: + credentials.getKindStatus(KubernetesKind.DEPLOYMENT) == KubernetesKindStatus.VALID + credentials.getKindStatus(KubernetesKind.REPLICA_SET) == KubernetesKindStatus.VALID + } + + void "Only explicitly listed kinds are valid when kinds is not empty"() { + when: + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: false, + kinds: ["deployment"] + )) + + then: + credentials.getKindStatus(KubernetesKind.DEPLOYMENT) == KubernetesKindStatus.VALID + credentials.getKindStatus(KubernetesKind.REPLICA_SET) == KubernetesKindStatus.MISSING_FROM_ALLOWED_KINDS + } + + void "Explicitly omitted kinds are not valid"() { + when: + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: false, + omitKinds: ["deployment"] + )) + + then: + credentials.getKindStatus(KubernetesKind.DEPLOYMENT) == KubernetesKindStatus.EXPLICITLY_OMITTED_BY_CONFIGURATION + credentials.getKindStatus(KubernetesKind.REPLICA_SET) == KubernetesKindStatus.VALID + } + + void "CRDs that are not installed return unknown"() { + given: + KubernetesApiGroup customGroup = KubernetesApiGroup.fromString("deployment.stable.example.com") + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: true, + )) + + expect: + credentials.getKindStatus(KubernetesKind.from("my-kind", customGroup)) == KubernetesKindStatus.UNKNOWN + } + + void "Kinds that are not readable are considered invalid"() { + given: + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: true, + )) + kubectlJobExecutor.list(_ as KubernetesCredentials, ImmutableList.of(KubernetesKind.DEPLOYMENT), NAMESPACE, _ as KubernetesSelectorList) >> { + throw new KubectlJobExecutor.KubectlException("Error", new Exception()) + } + kubectlJobExecutor.list(_ as KubernetesCredentials, ImmutableList.of(KubernetesKind.REPLICA_SET), NAMESPACE, _ as KubernetesSelectorList) >> { + return ImmutableList.of() + } + + expect: + credentials.getKindStatus(KubernetesKind.DEPLOYMENT) == KubernetesKindStatus.READ_ERROR + credentials.getKindStatus(KubernetesKind.REPLICA_SET) == KubernetesKindStatus.VALID + } + + void "Metrics are properly set on the account when not checking permissions"() { + given: + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: false, + metrics: metrics + )) + + expect: + credentials.isMetricsEnabled() == metrics + + where: + metrics << [true, false] + } + + void "Metrics are properly enabled when readable"() { + given: + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: true, + metrics: true + )) + kubectlJobExecutor.topPod(_ as KubernetesCredentials, NAMESPACE, _) >> ImmutableList.of() + + expect: + credentials.isMetricsEnabled() == true + } + + void "Metrics are properly disabled when not readable"() { + given: + KubernetesCredentials credentials = credentialFactory.build(new ManagedAccount( + name: "k8s", + namespaces: [NAMESPACE], + checkPermissionsOnStartup: true, + metrics: true + )) + kubectlJobExecutor.topPod(_ as KubernetesCredentials, NAMESPACE, _) >> { + throw new KubectlJobExecutor.KubectlException("Error", new Exception()) + } + + expect: + credentials.isMetricsEnabled() == false + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentialsSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentialsSpec.groovy new file mode 100644 index 00000000000..a6a86f667e3 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesNamedAccountCredentialsSpec.groovy @@ -0,0 +1,106 @@ +/* + * Copyright 2019 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security + +import com.google.common.collect.ImmutableList +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount +import com.netflix.spinnaker.clouddriver.kubernetes.description.AccountResourcePropertyRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesManifestNamer +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesNamerRegistry +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesUnregisteredCustomResourceHandler +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor +import com.netflix.spinnaker.fiat.model.Authorization +import com.netflix.spinnaker.kork.configserver.ConfigFileService +import spock.lang.Specification + +import java.nio.file.Files + +class KubernetesNamedAccountCredentialsSpec extends Specification { + KubernetesNamerRegistry namerRegistry = new KubernetesNamerRegistry([new KubernetesManifestNamer()]) + ConfigFileService configFileService = new ConfigFileService() + AccountResourcePropertyRegistry.Factory resourcePropertyRegistryFactory = Mock(AccountResourcePropertyRegistry.Factory) + KubernetesKindRegistry.Factory kindRegistryFactory = Mock(KubernetesKindRegistry.Factory) + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap = new KubernetesSpinnakerKindMap(ImmutableList.of()) + GlobalResourcePropertyRegistry globalResourcePropertyRegistry = new GlobalResourcePropertyRegistry(ImmutableList.of(), new KubernetesUnregisteredCustomResourceHandler()) + + KubectlJobExecutor mockKubectlJobExecutor = Mock(KubectlJobExecutor) + + KubernetesCredentials.Factory credentialFactory = new KubernetesCredentials.Factory( + new NoopRegistry(), + namerRegistry, + mockKubectlJobExecutor, + configFileService, + resourcePropertyRegistryFactory, + kindRegistryFactory, + kubernetesSpinnakerKindMap, + globalResourcePropertyRegistry + ) + + + void "should equal 2 Kubernetes accounts with same kubeconfig content"() { + setup: + def file1 = Files.createTempFile("test", "") + file1.toFile().append("some content") + def account1Def = new ManagedAccount() + account1Def.setName("test") + account1Def.setCacheThreads(1) + account1Def.getPermissions().add(Authorization.READ, "test@test.com") + account1Def.setNamespaces(["ns1", "ns2"]) + account1Def.setKubeconfigFile(file1.toString()) + + def file2 = Files.createTempFile("other", "") + file2.toFile().append("some content") + def account2Def = new ManagedAccount() + account2Def.setName("test") + account2Def.setCacheThreads(1) + account2Def.getPermissions().add(Authorization.READ, "test@test.com") + account2Def.setNamespaces(["ns1", "ns2"]) + account2Def.setKubeconfigFile(file2.toString()) + + + when: + def account1 = new KubernetesNamedAccountCredentials(account1Def, credentialFactory) + def account2 = new KubernetesNamedAccountCredentials(account2Def, credentialFactory) + + then: + account1.equals(account2) + + cleanup: + Files.delete(file1) + Files.delete(file2) + } + + void 'getting namespaces makes no calls to kubernetes'() { + given: 'an account that does not specify namespaces' + def account1Def = new ManagedAccount() + account1Def.setName("test") + account1Def.setCacheThreads(1) + account1Def.getPermissions().add(Authorization.READ, "test@test.com") + account1Def.setServiceAccount(true); + def account1 = new KubernetesNamedAccountCredentials(account1Def, credentialFactory) + + when: 'retrieving namespaces for the account' + account1.getNamespaces() + + then: 'no calls to kubernetes occurred' + 0 * mockKubectlJobExecutor._ + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorListSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorListSpec.groovy new file mode 100644 index 00000000000..be6cfc48193 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorListSpec.groovy @@ -0,0 +1,61 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security + +import com.fasterxml.jackson.core.type.TypeReference +import com.fasterxml.jackson.databind.ObjectMapper +import org.yaml.snakeyaml.Yaml +import org.yaml.snakeyaml.constructor.SafeConstructor +import spock.lang.Specification +import spock.lang.Unroll + +class KubernetesSelectorListSpec extends Specification { + def objectMapper = new ObjectMapper() + def yaml = new Yaml(new SafeConstructor()) + + List matchExpressionsFromYaml(String input) { + return objectMapper.convertValue(yaml.load(input), new TypeReference>() {}) + } + + Map matchLabelsFromYaml(String input) { + return objectMapper.convertValue(yaml.load(input), new TypeReference>() {}) + } + + @Unroll + def "renders well-formed match expressions for #selectorQuery"() { + when: + def matchExpressions = matchExpressionsFromYaml(matchExpressionsYaml) + def matchLabels = matchLabelsFromYaml(matchLabelsYaml) + KubernetesSelectorList list = KubernetesSelectorList.fromMatchExpressions(matchExpressions) + list.addSelectors(KubernetesSelectorList.fromMatchLabels(matchLabels)) + + then: + list.toString() == selectorQuery + + where: + matchExpressionsYaml | matchLabelsYaml | selectorQuery + "[{ key: tier, operator: In, values: [cache] }]" | "{}" | "tier in (cache)" + "[]" | "{load: balancer}" | "load = balancer" + "[{ key: tier, operator: In, values: [cache] }]" | "{load: balancer}" | "tier in (cache),load = balancer" + "[{ key: stack, operator: NotIn, values: [canary, backend] }]" | "{}" | "stack notin (canary, backend)" + "[{ key: stack, operator: NotIn, values: [canary, backend] }, { key: production, operator: Exists }]" | "{}" | "stack notin (canary, backend),production" + "[]" | "{load: balancer, balance: loader}" | "load = balancer,balance = loader" + "[{ key: stack, operator: NotIn, values: [canary, backend] }, { key: production, operator: Exists }]" | "{load: balancer, balance: loader}" | "stack notin (canary, backend),production,load = balancer,balance = loader" + "[{ key: production, operator: DoesNotExist }]" | "{}" | "!production" + } +} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelectorSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorSpec.groovy similarity index 94% rename from clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelectorSpec.groovy rename to clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorSpec.groovy index de9cb5dc2fa..cda7726439a 100644 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/security/KubernetesSelectorSpec.groovy +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesSelectorSpec.groovy @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ * */ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.security +package com.netflix.spinnaker.clouddriver.kubernetes.security import spock.lang.Specification import spock.lang.Unroll diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationConverterSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationConverterSpec.groovy deleted file mode 100644 index c6bf11d30f6..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationConverterSpec.groovy +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.loadbalancer - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.loadbalancer.UpsertKubernetesLoadBalancerAtomicOperation -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import spock.lang.Shared -import spock.lang.Specification - -class UpsertKubernetesLoadBalancerAtomicOperationConverterSpec extends Specification { - private static final ACCOUNT = "my-test-account" - private static final NAME = "johanson" - - @Shared - ObjectMapper mapper = new ObjectMapper() - - @Shared - UpsertKubernetesLoadBalancerAtomicOperationConverter converter - - def mockCredentials - - def setupSpec() { - converter = new UpsertKubernetesLoadBalancerAtomicOperationConverter(objectMapper: mapper) - } - - def setup() { - mockCredentials = Mock(KubernetesNamedAccountCredentials) - converter.accountCredentialsProvider = Mock(AccountCredentialsProvider) - } - - void "UpsertKubernetesLoadBalancerAtomicOperationSpec matches type signature of parent method"() { - setup: - def input = [name: NAME, - account: ACCOUNT] - when: - def description = converter.convertDescription(input) - - then: - 1 * converter.accountCredentialsProvider.getCredentials(_) >> mockCredentials - description instanceof KubernetesLoadBalancerDescription - - when: - def operation = converter.convertOperation(input) - - then: - 1 * converter.accountCredentialsProvider.getCredentials(_) >> mockCredentials - operation instanceof UpsertKubernetesLoadBalancerAtomicOperation - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/CloneKubernetesAtomicOperationConverterSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/CloneKubernetesAtomicOperationConverterSpec.groovy deleted file mode 100644 index f5221fbb33b..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/CloneKubernetesAtomicOperationConverterSpec.groovy +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.servergroup - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.CloneKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup.CloneKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import spock.lang.Shared -import spock.lang.Specification - -class CloneKubernetesAtomicOperationConverterSpec extends Specification { - private static final ACCOUNT = "my-test-account" - private static final APPLICATION = "app" - private static final STACK = "stack" - private static final DETAILS = "details" - - @Shared - ObjectMapper mapper = new ObjectMapper() - - @Shared - CloneKubernetesAtomicOperationConverter converter - - def mockCredentials - - def setupSpec() { - converter = new CloneKubernetesAtomicOperationConverter(objectMapper: mapper) - } - - def setup() { - mockCredentials = Mock(KubernetesNamedAccountCredentials) - converter.accountCredentialsProvider = Mock(AccountCredentialsProvider) - } - - void "CloneKubernetesAtomicOperationConverter type returns CloneKubernetesAtomicOperation and DeployKubernetesAtomicOperationDescription"() { - setup: - def input = [app: APPLICATION, - stack: STACK, - freeFormDetails: DETAILS, - account: ACCOUNT, - source: [ - account: ACCOUNT - ]] - when: - def description = converter.convertDescription(input) - - then: - 2 * converter.accountCredentialsProvider.getCredentials(_) >> mockCredentials - description instanceof CloneKubernetesAtomicOperationDescription - - when: - def operation = converter.convertOperation(input) - - then: - 2 * converter.accountCredentialsProvider.getCredentials(_) >> mockCredentials - operation instanceof CloneKubernetesAtomicOperation - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DeployKubernetesAtomicOperationConverterSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DeployKubernetesAtomicOperationConverterSpec.groovy deleted file mode 100644 index 589823d11be..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/converters/servergroup/DeployKubernetesAtomicOperationConverterSpec.groovy +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.DeployKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup.DeployKubernetesAtomicOperation -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.fasterxml.jackson.databind.ObjectMapper -import spock.lang.Shared -import spock.lang.Specification - -class DeployKubernetesAtomicOperationConverterSpec extends Specification { - private static final ACCOUNT = "my-test-account" - private static final APPLICATION = "app" - private static final STACK = "stack" - private static final DETAILS = "details" - - @Shared - ObjectMapper mapper = new ObjectMapper() - - @Shared - DeployKubernetesAtomicOperationConverter converter - - def mockCredentials - - def setupSpec() { - converter = new DeployKubernetesAtomicOperationConverter(objectMapper: mapper) - } - - def setup() { - mockCredentials = Mock(KubernetesNamedAccountCredentials) - converter.accountCredentialsProvider = Mock(AccountCredentialsProvider) - } - - void "DeployKubernetesAtomicOperationConverter type returns DeployKubernetesAtomicOperation and DeployKubernetesAtomicOperationDescription"() { - setup: - def input = [app: APPLICATION, - stack: STACK, - freeFormDetails: DETAILS, - account: ACCOUNT] - when: - def description = converter.convertDescription(input) - - then: - 1 * converter.accountCredentialsProvider.getCredentials(_) >> mockCredentials - description instanceof DeployKubernetesAtomicOperationDescription - - when: - def operation = converter.convertOperation(input) - - then: - 1 * converter.accountCredentialsProvider.getCredentials(_) >> mockCredentials - operation instanceof DeployKubernetesAtomicOperation - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/KubernetesUtilSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/KubernetesUtilSpec.groovy deleted file mode 100644 index 7b4d51d81e9..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/KubernetesUtilSpec.groovy +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesUtilSpec extends Specification { - private static final String REGISTRY1 = 'gcr.io' - private static final String REGISTRY2 = 'localhost:5000' - private static final String REPOSITORY1 = 'ubuntu' - private static final String REPOSITORY2 = 'library/nginx' - private static final String TAG1 = '1.0' - private static final String TAG2 = 'mytag' - private static final String DIGEST1 = 'sha256:1b0a6c01c29ff911bf5c9857e29b8847a98f80b2b1b785622d78e317d25503dd' - private static final String DIGEST2 = 'sha256:c98c24b677eff44860afea6f493bbaec5bb1c4cbb209c6fc2bbb47f66ff2ad31' - - @Unroll - void "should correctly build an image description"() { - when: - def imageDescription = KubernetesUtil.buildImageDescription("$registry/$repository:$tag") - - then: - imageDescription.registry == registry - imageDescription.repository == repository - imageDescription.tag == tag - imageDescription.digest == null - - where: - registry | repository | tag - REGISTRY1 | REPOSITORY1 | TAG1 - REGISTRY1 | REPOSITORY1 | TAG2 - REGISTRY1 | REPOSITORY2 | TAG1 - REGISTRY1 | REPOSITORY2 | TAG2 - REGISTRY2 | REPOSITORY1 | TAG1 - REGISTRY2 | REPOSITORY1 | TAG2 - REGISTRY2 | REPOSITORY2 | TAG1 - REGISTRY2 | REPOSITORY2 | TAG2 - } - - @Unroll - void "should correctly build an image description from a digest"() { - when: - def imageDescription = KubernetesUtil.buildImageDescription("$registry/$repository@$digest") - - then: - imageDescription.registry == registry - imageDescription.repository == repository - imageDescription.tag == null - imageDescription.digest == digest - - where: - registry | repository | digest - REGISTRY1 | REPOSITORY1 | DIGEST1 - REGISTRY1 | REPOSITORY1 | DIGEST2 - REGISTRY1 | REPOSITORY2 | DIGEST1 - REGISTRY1 | REPOSITORY2 | DIGEST2 - REGISTRY2 | REPOSITORY1 | DIGEST1 - REGISTRY2 | REPOSITORY1 | DIGEST2 - REGISTRY2 | REPOSITORY2 | DIGEST1 - REGISTRY2 | REPOSITORY2 | DIGEST2 - } - -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationSpec.groovy deleted file mode 100644 index ec80dac81d6..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationSpec.groovy +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.loadbalancer - -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesNamedServicePort -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import io.fabric8.kubernetes.api.model.ObjectMeta -import io.fabric8.kubernetes.api.model.Service -import io.fabric8.kubernetes.api.model.ServicePort -import io.fabric8.kubernetes.api.model.ServiceSpec -import spock.lang.Specification -import spock.lang.Subject - -class UpsertKubernetesLoadBalancerAtomicOperationSpec extends Specification { - final static List NAMESPACES = ['default', 'prod'] - final static String NAMESPACE = 'prod' - final static int VALID_PORT1 = 80 - final static int VALID_PORT2 = 7002 - final static int INVALID_PORT = 0 - final static String VALID_PROTOCOL1 = "TCP" - final static String VALID_PROTOCOL2 = "UDP" - final static String INVALID_PROTOCOL = "PCT" - final static String VALID_NAME1 = "name" - final static String VALID_NAME2 = "eman" - final static String INVALID_NAME = "bad name ?" - final static String VALID_IP1 = "127.0.0.1" - final static Map VALID_LABELS = ["foo": "bar", "bar": "baz"] - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def apiMock - def accountCredentialsRepositoryMock - def credentials - def namedAccountCredentials - def dockerRegistry - def dockerRegistries - def spectatorRegistry - KubernetesNamedServicePort namedPort1 - - def setup() { - apiMock = Mock(KubernetesApiAdaptor) - - spectatorRegistry = new DefaultRegistry() - dockerRegistry = Mock(LinkedDockerRegistryConfiguration) - dockerRegistries = [dockerRegistry] - accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - credentials = new KubernetesV1Credentials(apiMock, NAMESPACES, [], [], accountCredentialsRepositoryMock) - namedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name("accountName") - .credentials(credentials) - .dockerRegistries(dockerRegistries) - .spectatorRegistry(spectatorRegistry) - .build() - - namedPort1 = new KubernetesNamedServicePort(name: VALID_NAME1, port: VALID_PORT1, targetPort: VALID_PORT1, nodePort: VALID_PORT1, protocol: VALID_PROTOCOL1) - } - - void "should upsert a new loadbalancer"() { - setup: - def description = new KubernetesLoadBalancerDescription( - name: VALID_NAME1, - externalIps: [VALID_IP1], - ports: [namedPort1], - credentials: namedAccountCredentials, - namespace: NAMESPACE - ) - def resultServiceMock = Mock(Service) - - @Subject def operation = new UpsertKubernetesLoadBalancerAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * apiMock.getService(NAMESPACE, VALID_NAME1) >> null - 1 * apiMock.createService(NAMESPACE, { service -> - service.metadata.name == description.name - service.spec.externalIPs.eachWithIndex { ip, idx -> - ip == description.externalIps[idx] - } - def port = service.spec.ports[0] - port.port == namedPort1.port - port.name == namedPort1.name - port.targetPort.intVal == namedPort1.targetPort - port.nodePort == namedPort1.nodePort - port.protocol == namedPort1.protocol - }) >> resultServiceMock - resultServiceMock.getMetadata() >> [name: '', namespace: ''] - } - - - void "should upsert a new loadbalancer, and overwrite port data"() { - setup: - def description = new KubernetesLoadBalancerDescription( - name: VALID_NAME1, - externalIps: [VALID_IP1], - ports: [namedPort1], - credentials: namedAccountCredentials, - namespace: NAMESPACE - ) - def resultServiceMock = Mock(Service) - def existingServiceMock = Mock(Service) - def servicePortMock = Mock(ServicePort) - def serviceSpecMock = Mock(ServiceSpec) - - existingServiceMock.getSpec() >> serviceSpecMock - serviceSpecMock.getPorts() >> [servicePortMock] - servicePortMock.getPort() >> VALID_PORT2 - servicePortMock.getName() >> VALID_NAME2 - servicePortMock.getNodePort() >> VALID_PORT2 - servicePortMock.getProtocol() >> VALID_PROTOCOL2 - - @Subject def operation = new UpsertKubernetesLoadBalancerAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * apiMock.getService(NAMESPACE, VALID_NAME1) >> existingServiceMock - 1 * apiMock.replaceService(NAMESPACE, VALID_NAME1, { service -> - service.metadata.name == description.name - service.spec.externalIPs.eachWithIndex { ip, idx -> - ip == description.externalIps[idx] - } - def port = service.spec.ports[0] - port.port == namedPort1.port - port.name == namedPort1.name - port.targetPort.intVal == namedPort1.targetPort - port.nodePort == namedPort1.nodePort - port.protocol == namedPort1.protocol - }) >> resultServiceMock - resultServiceMock.getMetadata() >> [name: '', namespace: ''] - } - - void "should upsert a new loadbalancer, and insert port data"() { - setup: - def description = new KubernetesLoadBalancerDescription( - name: VALID_NAME1, - externalIps: [VALID_IP1], - credentials: namedAccountCredentials, - namespace: NAMESPACE - ) - def resultServiceMock = Mock(Service) - def existingServiceMock = Mock(Service) - def servicePortMock = Mock(ServicePort) - def serviceSpecMock = Mock(ServiceSpec) - - existingServiceMock.getSpec() >> serviceSpecMock - serviceSpecMock.getPorts() >> [servicePortMock] - servicePortMock.getPort() >> VALID_PORT2 - servicePortMock.getName() >> VALID_NAME2 - servicePortMock.getNodePort() >> VALID_PORT2 - servicePortMock.getProtocol() >> VALID_PROTOCOL2 - - @Subject def operation = new UpsertKubernetesLoadBalancerAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * apiMock.getService(NAMESPACE, VALID_NAME1) >> existingServiceMock - 1 * apiMock.replaceService(NAMESPACE, VALID_NAME1, { service -> - service.metadata.name == description.name - service.spec.externalIPs.eachWithIndex { ip, idx -> - ip == description.externalIps[idx] - } - def port = service.spec.ports[0] - port.port == VALID_PORT2 - port.name == VALID_NAME2 - port.nodePort == VALID_PORT2 - port.protocol == VALID_PROTOCOL2 - }) >> resultServiceMock - resultServiceMock.getMetadata() >> [name: '', namespace: ''] - } - - void "should upsert a new loadbalancer, and insert ip data"() { - setup: - def description = new KubernetesLoadBalancerDescription( - name: VALID_NAME1, - credentials: namedAccountCredentials, - namespace: NAMESPACE - ) - def resultServiceMock = Mock(Service) - def existingServiceMock = Mock(Service) - def serviceSpecMock = Mock(ServiceSpec) - - existingServiceMock.getSpec() >> serviceSpecMock - serviceSpecMock.getExternalIPs() >> [VALID_IP1] - - @Subject def operation = new UpsertKubernetesLoadBalancerAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * apiMock.getService(NAMESPACE, VALID_NAME1) >> existingServiceMock - 1 * apiMock.replaceService(NAMESPACE, VALID_NAME1, { service -> - service.metadata.name == description.name - service.spec.externalIPs[0] = VALID_IP1 - }) >> resultServiceMock - resultServiceMock.getMetadata() >> [name: '', namespace: ''] - } - - void "should upsert a new loadbalancer, and set labels"() { - setup: - def description = new KubernetesLoadBalancerDescription( - name: VALID_NAME1, - externalIps: [VALID_IP1], - credentials: namedAccountCredentials, - namespace: NAMESPACE, - serviceLabels: VALID_LABELS - ) - def resultServiceMock = Mock(Service) - def mockMetaData = Mock(ObjectMeta) - resultServiceMock.metadata >> mockMetaData - - @Subject def operation = new UpsertKubernetesLoadBalancerAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * apiMock.getService(NAMESPACE, VALID_NAME1) >> null - 1 * apiMock.createService(NAMESPACE, { service -> - service.metadata.name == description.name - service.metadata.labels == VALID_LABELS - }) >> resultServiceMock - } - - void "should upsert a new loadbalancer, and copy labels over"() { - setup: - def description = new KubernetesLoadBalancerDescription( - name: VALID_NAME1, - externalIps: [VALID_IP1], - credentials: namedAccountCredentials, - namespace: NAMESPACE - ) - def resultServiceMock = Mock(Service) - def existingServiceMock = Mock(Service) - def metadataMock = Mock(ObjectMeta) - - existingServiceMock.getMetadata() >> metadataMock - metadataMock.getLabels() >> VALID_LABELS - - @Subject def operation = new UpsertKubernetesLoadBalancerAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * apiMock.getService(NAMESPACE, VALID_NAME1) >> existingServiceMock - 1 * apiMock.replaceService(NAMESPACE, VALID_NAME1, { service -> - service.metadata.name == description.name - service.metadata.labels == VALID_LABELS - }) >> resultServiceMock - resultServiceMock.getMetadata() >> [name: '', namespace: ''] - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/UpsertKubernetesV1SecurityGroupAtomicOperationSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/UpsertKubernetesV1SecurityGroupAtomicOperationSpec.groovy deleted file mode 100644 index fdcb5736312..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/securitygroup/UpsertKubernetesV1SecurityGroupAtomicOperationSpec.groovy +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.securitygroup - -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesIngressTlS -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.securitygroup.KubernetesSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import io.fabric8.kubernetes.api.model.extensions.Ingress -import io.fabric8.kubernetes.api.model.extensions.IngressTLS -import spock.lang.Specification -import spock.lang.Subject - -class UpsertKubernetesV1SecurityGroupAtomicOperationSpec extends Specification { - final static List NAMESPACES = ['default', 'prod'] - final static String NAMESPACE = 'prod' - final static String INGRESS_NAME = "fooingress" - final static String TLS_HOST = "supersecure.com" - final static String TLS_SECRET = "mumstheword" - final static Map ANNOTATIONS = ["foo": "bar", "bar": "baz"] - final static Map LABELS = ["can_you": "kick_it", "yes": "you_can"] - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - KubernetesApiAdaptor apiMock - def accountCredentialsRepositoryMock - def credentials - def namedAccountCredentials - def dockerRegistry - def dockerRegistries - def spectatorRegistry - def testTLS, resultTLS - - def setup() { - apiMock = Mock(KubernetesApiAdaptor) - - spectatorRegistry = new DefaultRegistry() - dockerRegistry = Mock(LinkedDockerRegistryConfiguration) - dockerRegistries = [dockerRegistry] - accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - credentials = new KubernetesV1Credentials(apiMock, NAMESPACES, [], [], accountCredentialsRepositoryMock) - namedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name("accountName") - .credentials(credentials) - .dockerRegistries(dockerRegistries) - .spectatorRegistry(spectatorRegistry) - .build() - - testTLS = [new KubernetesIngressTlS([TLS_HOST], TLS_SECRET)].asList() - resultTLS = [new IngressTLS([TLS_HOST], TLS_SECRET)].asList() - - } - - void "should upsert a new SecurityGroup with labels and annotations"() { - setup: - def description = new KubernetesSecurityGroupDescription( - securityGroupName: INGRESS_NAME, - namespace: NAMESPACE, - annotations: ANNOTATIONS, - labels: LABELS, - credentials: namedAccountCredentials, - tls: testTLS, - ) - def resultIngressMock = Mock(Ingress) - - @Subject def operation = new UpsertKubernetesSecurityGroupAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * apiMock.getIngress(NAMESPACE, INGRESS_NAME) >> null - 1 * apiMock.createIngress(NAMESPACE, { ingress -> - ingress.metadata.name == description.securityGroupName - ingress.metadata.annotations == description.annotations - ingress.metadata.labels == description.labels - ingress.spec.tls == resultTLS - }) >> resultIngressMock - resultIngressMock.getMetadata() >> [name: '', namespace: ''] - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/CloneKubernetesAtomicOperationSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/CloneKubernetesAtomicOperationSpec.groovy deleted file mode 100644 index 57ee2e8e420..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/CloneKubernetesAtomicOperationSpec.groovy +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.CloneKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesResourceDescription -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import io.fabric8.kubernetes.api.model.* -import spock.lang.Specification -import spock.lang.Subject - -class CloneKubernetesAtomicOperationSpec extends Specification { - private static final APPLICATION = "myapp" - private static final STACK = "test" - private static final DETAIL = "mdservice" - private static final NAMESPACE1 = "default" - private static final NAMESPACE2 = "nondefault" - private static final SEQUENCE = "v000" - private static final TARGET_SIZE = 2 - private static final LOAD_BALANCER_NAMES = ["lb1", "lb2"] - private static final LABELS = ["load-balancer-lb1": true, "load-balancer-lb2": true] - private static final CONTAINER_NAMES = ["c1", "c2"] - private static final REGISTRY = 'index.docker.io' - private static final TAG = 'latest' - private static final REPOSITORY = 'library/nginx' - private static final REQUEST_CPU = ["100m", null] - private static final REQUEST_MEMORY = ["100Mi", "200Mi"] - private static final LIMIT_CPU = ["120m", "200m"] - private static final LIMIT_MEMORY = ["200Mi", "300Mi"] - private static final ANCESTOR_SERVER_GROUP_NAME = "$APPLICATION-$STACK-$DETAIL-$SEQUENCE" - - def containers - def ancestorNames - def expectedResultDescription - def replicationController - def replicationControllerSpec - def podTemplateSpec - def objectMetadata - def podSpec - def replicationControllerContainers - def apiMock - def dockerRegistry - def dockerRegistries - def credentials - def namedAccountCredentials - def sourceNamedAccountCredentials - def accountCredentialsRepositoryMock - def spectatorRegistry - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - apiMock = Mock(KubernetesApiAdaptor) - - def imageId = KubernetesUtil.getImageId(REGISTRY, REPOSITORY, TAG, null) - def imageDescription = KubernetesUtil.buildImageDescription(imageId) - - containers = [] - CONTAINER_NAMES.eachWithIndex { name, idx -> - def requests = new KubernetesResourceDescription(cpu: REQUEST_CPU[idx], memory: REQUEST_MEMORY[idx]) - def limits = new KubernetesResourceDescription(cpu: LIMIT_CPU[idx], memory: LIMIT_MEMORY[idx]) - containers = containers << new KubernetesContainerDescription(name: name, imageDescription: imageDescription, requests: requests, limits: limits) - } - - ancestorNames = [ - "app": APPLICATION, - "stack": STACK, - "detail": DETAIL - ] - - expectedResultDescription = new CloneKubernetesAtomicOperationDescription( - application: APPLICATION, - stack: STACK, - freeFormDetails: DETAIL, - targetSize: TARGET_SIZE, - loadBalancers: LOAD_BALANCER_NAMES, - containers: containers, - namespace: NAMESPACE1 - ) - - spectatorRegistry = new DefaultRegistry() - replicationController = new ReplicationController() - replicationControllerSpec = new ReplicationControllerSpec() - podTemplateSpec= new PodTemplateSpec() - objectMetadata = new ObjectMeta() - podSpec = new PodSpec() - accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - dockerRegistry = Mock(LinkedDockerRegistryConfiguration) - dockerRegistries = [dockerRegistry] - credentials = new KubernetesV1Credentials(apiMock, [], [], [], accountCredentialsRepositoryMock) - namedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name("name") - .dockerRegistries(dockerRegistries) - .spectatorRegistry(spectatorRegistry) - .credentials(credentials) - .build() - - sourceNamedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name("name") - .dockerRegistries(dockerRegistries) - .spectatorRegistry(spectatorRegistry) - .credentials(credentials) - .build() - - objectMetadata.setLabels(LABELS) - podTemplateSpec.setMetadata(objectMetadata) - replicationControllerSpec.setTemplate(podTemplateSpec) - - replicationControllerContainers = [] - containers = [] - def l = CONTAINER_NAMES.size() - CONTAINER_NAMES.eachWithIndex { name, idx -> - def container = new Container() - container.setName(name) - container.setImage(name) - - def requestsBuilder = new ResourceRequirementsBuilder() - // Rotate indices to ensure they are overwritten by request - requestsBuilder = requestsBuilder.addToLimits([cpu: new Quantity(LIMIT_CPU[l - idx]), memory: new Quantity(LIMIT_MEMORY[l - idx])]) - requestsBuilder = requestsBuilder.addToRequests([cpu: new Quantity(REQUEST_CPU[l - idx]), memory: new Quantity(REQUEST_MEMORY[l - idx])]) - container.setResources(requestsBuilder.build()) - replicationControllerContainers = replicationControllerContainers << container - - def requests = new KubernetesResourceDescription(cpu: REQUEST_CPU[l - idx], memory: REQUEST_MEMORY[l - idx]) - def limits = new KubernetesResourceDescription(cpu: LIMIT_CPU[l - idx], memory: LIMIT_MEMORY[l - idx]) - containers = containers << new KubernetesContainerDescription(name: name, imageDescription: imageDescription, requests: requests, limits: limits) - } - - podSpec.setContainers(replicationControllerContainers) - podTemplateSpec.setSpec(podSpec) - replicationControllerSpec.setReplicas(TARGET_SIZE) - replicationController.setSpec(replicationControllerSpec) - } - - void "builds a description based on ancestor server group, overrides nothing"() { - setup: - def inputDescription = new CloneKubernetesAtomicOperationDescription( - source: [serverGroupName: ANCESTOR_SERVER_GROUP_NAME, namespace: NAMESPACE1], - credentials: namedAccountCredentials, - sourceCredentials: sourceNamedAccountCredentials - ) - - @Subject def operation = new CloneKubernetesAtomicOperation(inputDescription) - - apiMock.getReplicationController(NAMESPACE1, inputDescription.source.serverGroupName) >> replicationController - - when: - def resultDescription = operation.cloneAndOverrideDescription() - - then: - resultDescription.application == expectedResultDescription.application - resultDescription.stack == expectedResultDescription.stack - resultDescription.freeFormDetails == expectedResultDescription.freeFormDetails - resultDescription.targetSize == expectedResultDescription.targetSize - resultDescription.loadBalancers == expectedResultDescription.loadBalancers - resultDescription.namespace == expectedResultDescription.namespace - resultDescription.containers.eachWithIndex { c, idx -> - c.imageDescription.registry == expectedResultDescription.containers[idx].imageDescription.registry - c.imageDescription.tag == expectedResultDescription.containers[idx].imageDescription.tag - c.imageDescription.repository == expectedResultDescription.containers[idx].imageDescription.repository - c.name == expectedResultDescription.containers[idx].name - c.requests?.cpu == expectedResultDescription.containers[idx].requests?.cpu - c.requests?.memory == expectedResultDescription.containers[idx].requests?.memory - c.limits?.cpu == expectedResultDescription.containers[idx].limits?.cpu - c.limits?.memory == expectedResultDescription.containers[idx].limits?.memory - } - } - - void "operation builds a description based on ancestor server group, overrides everything"() { - setup: - def inputDescription = new CloneKubernetesAtomicOperationDescription( - application: APPLICATION, - stack: STACK, - namespace: NAMESPACE1, - freeFormDetails: DETAIL, - targetSize: TARGET_SIZE, - loadBalancers: LOAD_BALANCER_NAMES, - containers: containers, - credentials: namedAccountCredentials, - sourceCredentials: sourceNamedAccountCredentials, - source: [serverGroupName: ANCESTOR_SERVER_GROUP_NAME, namespace: NAMESPACE2] - ) - - @Subject def operation = new CloneKubernetesAtomicOperation(inputDescription) - - apiMock.getReplicationController(NAMESPACE2, inputDescription.source.serverGroupName) >> replicationController - - when: - def resultDescription = operation.cloneAndOverrideDescription() - - then: - resultDescription.application == expectedResultDescription.application - resultDescription.stack == expectedResultDescription.stack - resultDescription.freeFormDetails == expectedResultDescription.freeFormDetails - resultDescription.targetSize == expectedResultDescription.targetSize - resultDescription.loadBalancers == expectedResultDescription.loadBalancers - resultDescription.namespace == expectedResultDescription.namespace - resultDescription.containers.eachWithIndex { c, idx -> - c.imageDescription.registry == expectedResultDescription.containers[idx].imageDescription.registry - c.imageDescription.tag == expectedResultDescription.containers[idx].imageDescription.tag - c.imageDescription.repository == expectedResultDescription.containers[idx].imageDescription.repository - c.name == expectedResultDescription.containers[idx].name - c.requests.cpu == expectedResultDescription.containers[idx].requests.cpu - c.requests.memory == expectedResultDescription.containers[idx].requests.memory - c.limits.cpu == expectedResultDescription.containers[idx].limits.cpu - c.limits.memory == expectedResultDescription.containers[idx].limits.memory - } - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DeployKubernetesAtomicOperationSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DeployKubernetesAtomicOperationSpec.groovy deleted file mode 100644 index ed50a43cb03..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/ops/servergroup/DeployKubernetesAtomicOperationSpec.groovy +++ /dev/null @@ -1,285 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.ops.servergroup - -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.* -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.exception.KubernetesResourceNotFoundException -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import io.fabric8.kubernetes.api.model.* -import io.fabric8.kubernetes.api.model.apps.ReplicaSet -import io.fabric8.kubernetes.client.dsl.internal.ReplicationControllerOperationsImpl -import io.fabric8.kubernetes.client.dsl.internal.ServiceOperationsImpl -import spock.lang.Specification -import spock.lang.Subject - -class DeployKubernetesAtomicOperationSpec extends Specification { - private static final NAMESPACE = "default" - private static final APPLICATION = "app" - private static final SERVER_GROUP_NAME = "serverGroup" - private static final STACK = "stack" - private static final DETAILS = "details" - private static final SEQUENCE = "v000" - private static final TARGET_SIZE = 3 - private static final REGISTRY = 'index.docker.io' - private static final TAG = 'latest' - private static final REPOSITORY = 'library/nginx' - private static final LOAD_BALANCER_NAMES = ["lb1", "lb2"] - private static final CONTAINER_NAMES = ["c1", "c2"] - private static final REQUEST_CPU = ["100m", null] - private static final REQUEST_MEMORY = ["100Mi", "200Mi"] - private static final LIMIT_CPU = ["120m", "200m"] - private static final LIMIT_MEMORY = ["200Mi", "300Mi"] - private static final DOCKER_REGISTRY_ACCOUNTS = [new LinkedDockerRegistryConfiguration(accountName: "my-docker-account")] - private static final PORT = 80 - private static final PERIOD_SECONDS = 20 - private static final SOURCE_CAPACITY = 10 - private static final SOURCE = new Source(account: "account", region: "region", namespace: NAMESPACE, serverGroupName: SERVER_GROUP_NAME, useSourceCapacity: true) - - def spectatorRegistry - def apiMock - def credentials - def namedAccountCredentials - def dockerRegistry - def dockerRegistries - def containers - def description - def replicationControllerOperationsMock - def replicationControllerListMock - def replicationControllerMock - def replicationControllerSpecMock - def replicaSetMock - - def serviceOperationsMock - def serviceListMock - def serviceMock - def serviceSpecMock - def servicePortMock - def metadataMock - - def intOrStringMock - - def clusterName - def replicationControllerName - def imageId - - def accountCredentialsRepositoryMock - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - spectatorRegistry = new DefaultRegistry() - apiMock = Mock(KubernetesApiAdaptor) - replicationControllerOperationsMock = Mock(ReplicationControllerOperationsImpl) - replicationControllerListMock = Mock(ReplicationControllerList) - replicaSetMock = Mock(ReplicaSet) - serviceOperationsMock = Mock(ServiceOperationsImpl) - serviceListMock = Mock(ServiceList) - serviceMock = Mock(Service) - serviceSpecMock = Mock(ServiceSpec) - servicePortMock = Mock(ServicePort) - metadataMock = Mock(ObjectMeta) - intOrStringMock = Mock(IntOrString) - accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - replicationControllerMock = Mock(ReplicationController) - replicationControllerSpecMock = Mock(ReplicationControllerSpec) - - def livenessProbe = new KubernetesProbe([ - periodSeconds: PERIOD_SECONDS, - handler: new KubernetesHandler([ - type: KubernetesHandlerType.TCP, - tcpSocketAction: new KubernetesTcpSocketAction([ - port: PORT - ]) - ]) - ]) - - imageId = KubernetesUtil.getImageId(REGISTRY, REPOSITORY, TAG, null) - def imageDescription = KubernetesUtil.buildImageDescription(imageId) - - DOCKER_REGISTRY_ACCOUNTS.forEach({ account -> - def dockerRegistryAccountMock = Mock(DockerRegistryNamedAccountCredentials) - accountCredentialsRepositoryMock.getOne(account.accountName) >> dockerRegistryAccountMock - dockerRegistryAccountMock.getAccountName() >> account - apiMock.getSecret(NAMESPACE, account.accountName) >> null - apiMock.createSecret(NAMESPACE, _) >> null - }) - - dockerRegistry = Mock(LinkedDockerRegistryConfiguration) - dockerRegistries = [dockerRegistry] - credentials = new KubernetesV1Credentials(apiMock, [NAMESPACE], [], DOCKER_REGISTRY_ACCOUNTS, accountCredentialsRepositoryMock,) - namedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name("name") - .dockerRegistries(dockerRegistries) - .credentials(credentials) - .spectatorRegistry(spectatorRegistry) - .build() - clusterName = KubernetesUtil.combineAppStackDetail(APPLICATION, STACK, DETAILS) - replicationControllerName = String.format("%s-v%s", clusterName, SEQUENCE) - - replicationControllerSpecMock.replicas >> SOURCE_CAPACITY - replicationControllerMock.spec >> replicationControllerSpecMock - apiMock.getReplicationController(NAMESPACE, SERVER_GROUP_NAME) >> replicationControllerMock - - containers = [] - CONTAINER_NAMES.eachWithIndex { name, idx -> - def requests = new KubernetesResourceDescription(cpu: REQUEST_CPU[idx], memory: REQUEST_MEMORY[idx]) - def limits = new KubernetesResourceDescription(cpu: LIMIT_CPU[idx], memory: LIMIT_MEMORY[idx]) - containers = containers << new KubernetesContainerDescription(name: name, - imageDescription: imageDescription, - requests: requests, - limits: limits, - livenessProbe: livenessProbe - ) - } - } - - void "should deploy a replication controller"() { - setup: - description = new DeployKubernetesAtomicOperationDescription( - application: APPLICATION, - stack: STACK, - freeFormDetails: DETAILS, - targetSize: TARGET_SIZE, - loadBalancers: LOAD_BALANCER_NAMES, - containers: containers, - credentials: namedAccountCredentials - ) - - @Subject def operation = new DeployKubernetesAtomicOperation(description) - - when: - operation.operate([]) - - then: - - 1 * apiMock.getReplicationControllers(NAMESPACE) >> [] - 1 * apiMock.getReplicaSets(NAMESPACE) >> [] - 5 * replicaSetMock.getMetadata() >> metadataMock - 3 * metadataMock.getName() >> replicationControllerName - 1 * apiMock.createReplicaSet(NAMESPACE, { ReplicaSet rs -> - LOAD_BALANCER_NAMES.each { name -> - assert(rs.spec.template.metadata.labels[KubernetesUtil.loadBalancerKey(name)]) - } - - assert(rs.spec.replicas == TARGET_SIZE) - - CONTAINER_NAMES.eachWithIndex { name, idx -> - assert(rs.spec.template.spec.containers[idx].name == name) - assert(rs.spec.template.spec.containers[idx].image == imageId) - assert(rs.spec.template.spec.containers[idx].resources.requests.cpu == REQUEST_CPU[idx]) - assert(rs.spec.template.spec.containers[idx].resources.requests.memory == REQUEST_MEMORY[idx]) - assert(rs.spec.template.spec.containers[idx].resources.limits.cpu == LIMIT_CPU[idx]) - assert(rs.spec.template.spec.containers[idx].resources.limits.memory == LIMIT_MEMORY[idx]) - assert(rs.spec.template.spec.containers[idx].livenessProbe.periodSeconds == PERIOD_SECONDS) - assert(rs.spec.template.spec.containers[idx].livenessProbe.tcpSocket.port.intVal == PORT) - } - }) >> replicaSetMock - } - - void "should error when source capacity specified but no source exists"() { - setup: - description = new DeployKubernetesAtomicOperationDescription( - application: APPLICATION, - stack: STACK, - freeFormDetails: DETAILS, - targetSize: TARGET_SIZE, - loadBalancers: LOAD_BALANCER_NAMES, - containers: containers, - credentials: namedAccountCredentials, - source: SOURCE - ) - - @Subject def operation = new DeployKubernetesAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * apiMock.getReplicationController(NAMESPACE, SERVER_GROUP_NAME) >> null - 1 * apiMock.getReplicaSet(NAMESPACE, SERVER_GROUP_NAME) >> null - thrown(KubernetesResourceNotFoundException) - } - - void "should copy source capacity when specified"() { - setup: - description = new DeployKubernetesAtomicOperationDescription( - application: APPLICATION, - stack: STACK, - freeFormDetails: DETAILS, - targetSize: TARGET_SIZE, - loadBalancers: LOAD_BALANCER_NAMES, - containers: containers, - credentials: namedAccountCredentials, - source: SOURCE - ) - - @Subject def operation = new DeployKubernetesAtomicOperation(description) - - when: - operation.operate([]) - - then: - - 1 * apiMock.getReplicationControllers(NAMESPACE) >> [] - 1 * apiMock.getReplicaSets(NAMESPACE) >> [] - 5 * replicaSetMock.getMetadata() >> metadataMock - 3 * metadataMock.getName() >> replicationControllerName - 1 * apiMock.createReplicaSet(NAMESPACE, { ReplicaSet rs -> - assert(rs.spec.replicas == SOURCE_CAPACITY) - true - }) >> replicaSetMock - } - - void "should favor sequence when specified"() { - setup: - description = new DeployKubernetesAtomicOperationDescription( - application: APPLICATION, - sequence: 10, - targetSize: TARGET_SIZE, - containers: containers, - credentials: namedAccountCredentials - ) - def replicaSetName = new KubernetesServerGroupNameResolver(NAMESPACE, null) - .generateServerGroupName(APPLICATION, null, null, description.sequence, false) - def operation = new DeployKubernetesAtomicOperation(description) - - when: - operation.operate([]) - - then: - 0 * apiMock.getReplicationControllers(NAMESPACE) >> [] - 0 * apiMock.getReplicaSets(NAMESPACE) >> [] - 5 * replicaSetMock.getMetadata() >> metadataMock - 3 * metadataMock.getName() >> replicationControllerName - 1 * apiMock.createReplicaSet(NAMESPACE, { ReplicaSet rs -> - assert rs.metadata.name == replicaSetName - true - }) >> replicaSetMock - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/StandardKubernetesAttributeValidatorSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/StandardKubernetesAttributeValidatorSpec.groovy deleted file mode 100644 index ff2e2af9755..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/StandardKubernetesAttributeValidatorSpec.groovy +++ /dev/null @@ -1,810 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators - -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -class StandardKubernetesAttributeValidatorSpec extends Specification { - private static final ACCOUNT_NAME = "auto" - private static final DECORATOR = "decorator" - private static final List NAMESPACES = ["default", "prod"] - private static final List DOCKER_REGISTRY_ACCOUNTS = [ - new LinkedDockerRegistryConfiguration(accountName: "my-docker-account"), - new LinkedDockerRegistryConfiguration(accountName: "restricted-docker-account", namespaces: ["prod"])] - - @Shared - KubernetesV1Credentials credentials - - @Shared - DefaultAccountCredentialsProvider accountCredentialsProvider - - void setupSpec() { - def credentialsRepo = new MapBackedAccountCredentialsRepository() - accountCredentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) - def apiMock = Mock(KubernetesApiAdaptor) - def accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - - DOCKER_REGISTRY_ACCOUNTS.forEach({ account -> - def dockerRegistryAccountMock = Mock(DockerRegistryNamedAccountCredentials) - accountCredentialsRepositoryMock.getOne(account.accountName) >> dockerRegistryAccountMock - dockerRegistryAccountMock.getAccountName() >> account - NAMESPACES.forEach({ namespace -> - apiMock.getSecret(namespace, account.accountName) >> null - apiMock.createSecret(namespace, _) >> null - }) - }) - - def dockerRegistry = Mock(LinkedDockerRegistryConfiguration) - def dockerRegistries = [dockerRegistry] - credentials = new KubernetesV1Credentials(apiMock, NAMESPACES, [], DOCKER_REGISTRY_ACCOUNTS, accountCredentialsRepositoryMock) - def namedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name(ACCOUNT_NAME) - .dockerRegistries(dockerRegistries) - .credentials(credentials) - .build() - credentialsRepo.save(ACCOUNT_NAME, namedAccountCredentials) - } - - void "notEmpty accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateNotEmpty("not-empty", label) - then: - 0 * errorsMock._ - - when: - validator.validateNotEmpty(" ", label) - then: - 0 * errorsMock._ - - when: - validator.validateNotEmpty([[]], label) - then: - 0 * errorsMock._ - - when: - validator.validateNotEmpty([null], label) - then: - 0 * errorsMock._ - - when: - validator.validateNotEmpty(0, label) - then: - 0 * errorsMock._ - } - - @Unroll - void "notEmpty reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateNotEmpty(null, label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.empty") - 0 * errorsMock._ - - when: - validator.validateNotEmpty("", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.empty") - 0 * errorsMock._ - - when: - validator.validateNotEmpty([], label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.empty") - 0 * errorsMock._ - } - - void "nonNegative accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateNonNegative(0, label) - then: - 0 * errorsMock._ - - when: - validator.validateNonNegative(1, label) - then: - 0 * errorsMock._ - - when: - validator.validateNonNegative(1 << 30, label) - then: - 0 * errorsMock._ - } - - void "nonNegative reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateNonNegative(-1, label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.negative") - 0 * errorsMock._ - } - - void "byRegex accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - def pattern = /^[a-z0-9A-Z_-]{2,10}$/ - - when: - validator.validateByRegex("check-me", label, pattern) - then: - 0 * errorsMock._ - - when: - validator.validateByRegex("1-2_3-f", label, pattern) - then: - 0 * errorsMock._ - } - - void "byRegex reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - def pattern = /^[a-z0-9A-Z_-]{2,10}$/ - - when: - validator.validateByRegex("too-big-to-fail", label, pattern) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${pattern})") - 0 * errorsMock._ - - when: - validator.validateByRegex("1", label, pattern) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${pattern})") - 0 * errorsMock._ - - when: - validator.validateByRegex("a space", label, pattern) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${pattern})") - 0 * errorsMock._ - } - - void "credentials reject (empty)"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - - when: - validator.validateCredentials(null, accountCredentialsProvider) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.account", "${DECORATOR}.account.empty") - 0 * errorsMock._ - - when: - validator.validateCredentials("", accountCredentialsProvider) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.account", "${DECORATOR}.account.empty") - 0 * errorsMock._ - } - - void "credentials reject (unknown)"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - - when: - validator.validateCredentials("You-don't-know-me", accountCredentialsProvider) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.account", "${DECORATOR}.account.notFound") - 0 * errorsMock._ - } - - void "credentials accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - - when: - validator.validateCredentials(ACCOUNT_NAME, accountCredentialsProvider) - then: - 0 * errorsMock._ - } - - void "details accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateDetails("valid", label) - then: - 0 * errorsMock._ - - when: - validator.validateDetails("also-valid", label) - then: - 0 * errorsMock._ - - when: - validator.validateDetails("123-456-789", label) - then: - 0 * errorsMock._ - - when: - validator.validateDetails("", label) - then: - 0 * errorsMock._ - } - - void "details reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateDetails("-", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - - when: - validator.validateDetails("a space", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - - when: - validator.validateDetails("bad*details", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - - when: - validator.validateDetails("-k-e-b-a-b-", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - } - - void "name accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateName("valid", label) - then: - 0 * errorsMock._ - - when: - validator.validateName("mega-valid-name", label) - then: - 0 * errorsMock._ - - when: - validator.validateName("call-me-123-456-7890", label) - then: - 0 * errorsMock._ - } - - void "name reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateName("-", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - - when: - validator.validateName("an_underscore", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - - when: - validator.validateName("?name", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - - when: - validator.validateName("", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.empty") - 0 * errorsMock._ - } - - void "secretName accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateSecretName("valid", label) - then: - 0 * errorsMock._ - - when: - validator.validateSecretName("mega-valid-name", label) - then: - 0 * errorsMock._ - - when: - validator.validateSecretName("call-me-123-456-7890", label) - then: - 0 * errorsMock._ - - when: - validator.validateSecretName("dots.are.valid-too", label) - then: - 0 * errorsMock._ - } - - void "secretName reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateSecretName("-", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.dnsSubdomainPattern})") - 0 * errorsMock._ - - when: - validator.validateSecretName("an_underscore", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.dnsSubdomainPattern})") - 0 * errorsMock._ - - when: - validator.validateSecretName("?name", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.dnsSubdomainPattern})") - 0 * errorsMock._ - - when: - validator.validateSecretName("", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.empty") - 0 * errorsMock._ - } - - - void "application accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateApplication("valid", label) - then: - 0 * errorsMock._ - - when: - validator.validateApplication("application", label) - then: - 0 * errorsMock._ - - when: - validator.validateApplication("7890", label) - then: - 0 * errorsMock._ - } - - void "application reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateApplication("l-l", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.prefixPattern})") - 0 * errorsMock._ - - when: - validator.validateApplication("?application", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.prefixPattern})") - 0 * errorsMock._ - - when: - validator.validateApplication("", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.empty") - 0 * errorsMock._ - } - - void "stack accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateStack("valid", label) - then: - 0 * errorsMock._ - - when: - validator.validateStack("stack", label) - then: - 0 * errorsMock._ - - when: - validator.validateStack("7890", label) - then: - 0 * errorsMock._ - } - - void "stack reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateStack("l-l", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.prefixPattern})") - 0 * errorsMock._ - - when: - validator.validateStack("?stack", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.prefixPattern})") - 0 * errorsMock._ - } - - void "memory accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateMemory("", label) - then: - 0 * errorsMock._ - - when: - validator.validateMemory("100Mi", label) - then: - 0 * errorsMock._ - - when: - validator.validateMemory("1Gi", label) - then: - 0 * errorsMock._ - } - - void "memory reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateMemory(" 100", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 0 * errorsMock._ - - when: - validator.validateMemory("x100Gi", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 0 * errorsMock._ - - when: - validator.validateMemory("1Tt!i", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 0 * errorsMock._ - } - - void "cpu accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateCpu("", label) - then: - 0 * errorsMock._ - - when: - validator.validateCpu("100m", label) - then: - 0 * errorsMock._ - - when: - validator.validateCpu("2m", label) - then: - 0 * errorsMock._ - } - - void "cpu reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateCpu("100z", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 0 * errorsMock._ - - when: - validator.validateCpu("?", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 0 * errorsMock._ - - when: - validator.validateCpu("- ", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 0 * errorsMock._ - } - - void "namespace accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateNamespace(credentials, "", label) - then: - 0 * errorsMock._ - - when: - validator.validateNamespace(credentials, NAMESPACES[0], label) - then: - 0 * errorsMock._ - - when: - validator.validateNamespace(credentials, NAMESPACES[1], label) - then: - 0 * errorsMock._ - } - - void "namespace reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateNamespace(credentials, " .-100z", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.notRegistered") - 0 * errorsMock._ - - when: - validator.validateNamespace(credentials, "?", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.notRegistered") - 0 * errorsMock._ - - when: - validator.validateNamespace(credentials, "- ", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.notRegistered") - 0 * errorsMock._ - } - - void "image pull secret accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateImagePullSecret(credentials, DOCKER_REGISTRY_ACCOUNTS[0].accountName, NAMESPACES[0], label) - then: - 0 * errorsMock._ - - when: - validator.validateImagePullSecret(credentials, DOCKER_REGISTRY_ACCOUNTS[1].accountName, NAMESPACES[1], label) - then: - 0 * errorsMock._ - } - - void "image pull secret reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateImagePullSecret(credentials, DOCKER_REGISTRY_ACCOUNTS[1].accountName, NAMESPACES[0], label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.notRegistered") - 0 * errorsMock._ - - when: - validator.validateImagePullSecret(credentials, "?", NAMESPACES[0], label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.notRegistered") - 0 * errorsMock._ - - when: - validator.validateImagePullSecret(credentials, DOCKER_REGISTRY_ACCOUNTS[0].accountName, "not a namespace", label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.notRegistered") - 0 * errorsMock._ - } - - void "port accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validatePort(80, label) - then: - 0 * errorsMock._ - - when: - validator.validatePort(111, label) - then: - 0 * errorsMock._ - - when: - validator.validatePort(65535, label) - then: - 0 * errorsMock._ - } - - void "port reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validatePort(0, label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must be in range [1, $StandardKubernetesAttributeValidator.maxPort])") - 0 * errorsMock._ - - when: - validator.validatePort(-1, label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must be in range [1, $StandardKubernetesAttributeValidator.maxPort])") - 0 * errorsMock._ - - when: - validator.validatePort(65536, label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must be in range [1, $StandardKubernetesAttributeValidator.maxPort])") - 0 * errorsMock._ - } - - void "protocol accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateProtocol('TCP', label) - then: - 0 * errorsMock._ - - when: - validator.validateProtocol('UDP', label) - then: - 0 * errorsMock._ - } - - void "protocol reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validateProtocol('', label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.empty") - 0 * errorsMock._ - - when: - validator.validateProtocol('UPD', label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must be one of $StandardKubernetesAttributeValidator.protocolList)") - 0 * errorsMock._ - } - - void "path accept"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validatePath('/', label) - then: - 0 * errorsMock._ - - when: - validator.validatePath('/path-to/dir12\\ 3/4', label) - then: - 0 * errorsMock._ - } - - void "path reject"() { - setup: - def errorsMock = Mock(Errors) - def validator = new StandardKubernetesAttributeValidator(DECORATOR, errorsMock) - def label = "label" - - when: - validator.validatePath('', label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.empty") - 0 * errorsMock._ - - when: - validator.validatePath('path-to/dir12\\ 3/4', label) - then: - 1 * errorsMock.rejectValue("${DECORATOR}.${label}", "${DECORATOR}.${label}.invalid (Must match ${StandardKubernetesAttributeValidator.pathPattern})") - 0 * errorsMock._ - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationValidatorSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationValidatorSpec.groovy deleted file mode 100644 index 218b70d590e..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/loadbalancer/UpsertKubernetesLoadBalancerAtomicOperationValidatorSpec.groovy +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.loadbalancer - -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.loadbalancer.KubernetesNamedServicePort -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors -import spock.lang.Specification - -class UpsertKubernetesLoadBalancerAtomicOperationValidatorSpec extends Specification { - final static DESCRIPTION = "upsertKubernetesLoadBalancerAtomicOperationDescription" - final static List NAMESPACES = ['default', 'prod'] - final static String NAMESPACE = 'prod' - final static int VALID_PORT = 80 - final static int INVALID_PORT = 104729 - final static String VALID_PROTOCOL = "TCP" - final static String INVALID_PROTOCOL = "PCT" - final static String VALID_NAME = "name" - final static String INVALID_NAME = "bad name ?" - final static String VALID_IP = "127.0.0.1" - final static String INVALID_IP = "0.127.0.0.1" - final static String VALID_CLUSTER_IP_NONE = "None" - final static String VALID_ACCOUNT = "my-kubernetes-account" - - UpsertKubernetesLoadBalancerAtomicOperationValidator validator - - def spectatorRegistry - def dockerRegistry - def dockerRegistries - def credentials - def namedAccountCredentials - def validPort - def invalidPortPort - def invalidNamePort - def invalidProtocolPort - - void setup() { - validator = new UpsertKubernetesLoadBalancerAtomicOperationValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) - - def apiMock = Mock(KubernetesApiAdaptor) - def accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - - spectatorRegistry = new DefaultRegistry() - dockerRegistry = Mock(LinkedDockerRegistryConfiguration) - dockerRegistries = [dockerRegistry] - credentials = new KubernetesV1Credentials(apiMock, NAMESPACES, [], [], accountCredentialsRepositoryMock) - namedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name(VALID_ACCOUNT) - .dockerRegistries(dockerRegistries) - .spectatorRegistry(spectatorRegistry) - .credentials(credentials) - .build() - credentialsRepo.save(VALID_ACCOUNT, namedAccountCredentials) - validator.accountCredentialsProvider = credentialsProvider - - validPort = new KubernetesNamedServicePort(name: VALID_NAME, port: VALID_PORT, protocol: VALID_PROTOCOL) - invalidNamePort = new KubernetesNamedServicePort(name: INVALID_NAME, protocol: VALID_PROTOCOL, port: VALID_PORT) - invalidPortPort = new KubernetesNamedServicePort(name: VALID_NAME, port: INVALID_PORT, protocol: VALID_PROTOCOL) - invalidProtocolPort = new KubernetesNamedServicePort(name: VALID_NAME, protocol: INVALID_PROTOCOL, port: VALID_PORT) - } - - void "validation accept (all fields filled)"() { - setup: - def description = new KubernetesLoadBalancerDescription(name: VALID_NAME, - externalIps: [VALID_IP], - ports: [validPort], - account: VALID_ACCOUNT, - namespace: NAMESPACE) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - - then: - 0 * errorsMock._ - } - - void "validation accept (some fields filled)"() { - setup: - def description = new KubernetesLoadBalancerDescription(name: VALID_NAME, - ports: [validPort], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - - then: - 0 * errorsMock._ - } - - void "validation accept (none cluster ip)"() { - setup: - def description = new KubernetesLoadBalancerDescription(name: VALID_NAME, - ports: [validPort], - externalIps: [VALID_IP], - account: VALID_ACCOUNT, - clusterIp: VALID_CLUSTER_IP_NONE) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - - then: - 0 * errorsMock._ - } - - void "validation reject (bad protocol)"() { - setup: - def description = new KubernetesLoadBalancerDescription(name: VALID_NAME, - ports: [invalidProtocolPort], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - - then: - 1 * errorsMock.rejectValue(_, "${DESCRIPTION}.ports[0].protocol.invalid (Must be one of $StandardKubernetesAttributeValidator.protocolList)") - } - - void "validation reject (bad port name)"() { - setup: - def description = new KubernetesLoadBalancerDescription(name: VALID_NAME, - ports: [invalidNamePort], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - - then: - 1 * errorsMock.rejectValue(_, "${DESCRIPTION}.ports[0].name.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - } - - void "validation reject (bad port value)"() { - setup: - def description = new KubernetesLoadBalancerDescription(name: VALID_NAME, - ports: [invalidPortPort], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - - then: - 1 * errorsMock.rejectValue(_, "${DESCRIPTION}.ports[0].port.invalid (Must be in range [1, $StandardKubernetesAttributeValidator.maxPort])") - } - - void "validation reject (bad ip value)"() { - setup: - def description = new KubernetesLoadBalancerDescription(name: VALID_NAME, - ports: [validPort], - externalIps: [INVALID_IP], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - - then: - 1 * errorsMock.rejectValue(_, "${DESCRIPTION}.externalIps[0].invalid (Not valid IPv4 address)") - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/CloneKubernetesAtomicOperationValidatorSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/CloneKubernetesAtomicOperationValidatorSpec.groovy deleted file mode 100644 index d2785d26149..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/CloneKubernetesAtomicOperationValidatorSpec.groovy +++ /dev/null @@ -1,387 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.CloneKubernetesAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesContainerDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.KubernetesResourceDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors -import spock.lang.Shared -import spock.lang.Specification - -class CloneKubernetesAtomicOperationValidatorSpec extends Specification { - private static final DESCRIPTION = "cloneKubernetesAtomicOperationDescription" - private static final List NAMESPACES = ["default", "prod"] - private static final List DOCKER_REGISTRY_ACCOUNTS = [ - new LinkedDockerRegistryConfiguration(accountName: "my-docker-account"), - new LinkedDockerRegistryConfiguration(accountName: "restricted-docker-account", namespaces: ["prod"])] - - private static final VALID_APPLICATION = "app" - private static final VALID_STACK = "stack" - private static final VALID_DETAILS = "the-details" - private static final VALID_TARGET_SIZE = 3 - private static final VALID_IMAGE = "container-image" - private static final VALID_NAME = "a-name" - private static final VALID_MEMORY1 = "200" - private static final VALID_MEMORY2 = "200Mi" - private static final VALID_CPU1 = "200" - private static final VALID_CPU2 = "200m" - private static final VALID_ACCOUNT = "auto" - private static final VALID_LOAD_BALANCERS = ["x", "y"] - private static final VALID_SECURITY_GROUPS = ["a-1", "b-2"] - private static final VALID_SOURCE_SERVER_GROUP_NAME = "myapp-test-v000" - private static final VALID_SECRET = DOCKER_REGISTRY_ACCOUNTS[0].accountName - - private static final INVALID_APPLICATION = "-app-" - private static final INVALID_STACK = " stack" - private static final INVALID_DETAILS = "the details" - private static final INVALID_TARGET_SIZE = -7 - private static final INVALID_IMAGE = "" - private static final INVALID_NAME = "a?name" - private static final INVALID_MEMORY = "200asdf" - private static final INVALID_CPU = "-1_" - private static final INVALID_ACCOUNT = "valid" - private static final INVALID_LOAD_BALANCERS = [" ", "--"] - private static final INVALID_SECURITY_GROUPS = [" ", "--"] - - @Shared - CloneKubernetesAtomicOperationValidator validator - - void setupSpec() { - validator = new CloneKubernetesAtomicOperationValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) - - def spectatorRegistry = new DefaultRegistry() - def apiMock = Mock(KubernetesApiAdaptor) - def accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - - DOCKER_REGISTRY_ACCOUNTS.forEach({ account -> - def dockerRegistryAccountMock = Mock(DockerRegistryNamedAccountCredentials) - accountCredentialsRepositoryMock.getOne(account.accountName) >> dockerRegistryAccountMock - dockerRegistryAccountMock.getAccountName() >> account - NAMESPACES.forEach({ namespace -> - apiMock.getSecret(namespace, account.accountName) >> null - apiMock.createSecret(namespace, _) >> null - }) - }) - - def dockerRegistry = Mock(LinkedDockerRegistryConfiguration) - def dockerRegistries = [dockerRegistry] - def credentials = new KubernetesV1Credentials(apiMock, NAMESPACES, [], DOCKER_REGISTRY_ACCOUNTS, accountCredentialsRepositoryMock) - def namedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name(VALID_ACCOUNT) - .dockerRegistries(dockerRegistries) - .credentials(credentials) - .spectatorRegistry(spectatorRegistry) - .build() - credentialsRepo.save(VALID_ACCOUNT, namedAccountCredentials) - validator.accountCredentialsProvider = credentialsProvider - } - - KubernetesContainerDescription fullValidContainerDescription1 - KubernetesContainerDescription fullValidContainerDescription2 - KubernetesContainerDescription partialValidContainerDescription - KubernetesResourceDescription fullValidResourceDescription1 - KubernetesResourceDescription fullValidResourceDescription2 - KubernetesResourceDescription partialValidResourceDescription - - KubernetesContainerDescription fullInvalidContainerDescription - KubernetesContainerDescription partialInvalidContainerDescription - KubernetesResourceDescription fullInvalidResourceDescription - - void setup() { - def imageDescription = KubernetesUtil.buildImageDescription(VALID_IMAGE) - - fullValidResourceDescription1 = new KubernetesResourceDescription(memory: VALID_MEMORY1, cpu: VALID_CPU1) - fullValidResourceDescription2 = new KubernetesResourceDescription(memory: VALID_MEMORY2, cpu: VALID_CPU2) - partialValidResourceDescription = new KubernetesResourceDescription(memory: VALID_MEMORY1) - fullValidContainerDescription1 = new KubernetesContainerDescription(name: VALID_NAME, imageDescription: imageDescription, limits: fullValidResourceDescription1, requests: fullValidResourceDescription1) - fullValidContainerDescription2 = new KubernetesContainerDescription(name: VALID_NAME, imageDescription: imageDescription, limits: fullValidResourceDescription2, requests: fullValidResourceDescription2) - partialValidContainerDescription = new KubernetesContainerDescription(name: VALID_NAME, imageDescription: imageDescription, limits: partialValidResourceDescription) - - fullInvalidResourceDescription = new KubernetesResourceDescription(memory: INVALID_MEMORY, cpu: INVALID_CPU) - fullInvalidContainerDescription = new KubernetesContainerDescription(name: INVALID_NAME, limits: fullInvalidResourceDescription, requests: fullInvalidResourceDescription) - partialInvalidContainerDescription = new KubernetesContainerDescription(name: INVALID_NAME) - } - - void "validation accept (all fields filled)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - freeFormDetails: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - fullValidContainerDescription1, - fullValidContainerDescription2 - ], - loadBalancers: VALID_LOAD_BALANCERS, - securityGroups: VALID_SECURITY_GROUPS, - account: VALID_ACCOUNT, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 0 * errorsMock._ - } - - void "validation accept (minimal fields filled)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 0 * errorsMock._ - } - - void "validation reject (missing credentials)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.account", "${DESCRIPTION}.account.empty") - 0 * errorsMock._ - } - - void "validation reject (invalid stack)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: INVALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.stack", "${DESCRIPTION}.stack.invalid (Must match ${StandardKubernetesAttributeValidator.prefixPattern})") - 0 * errorsMock._ - } - - void "validation reject (invalid application)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: INVALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.application", "${DESCRIPTION}.application.invalid (Must match ${StandardKubernetesAttributeValidator.prefixPattern})") - 0 * errorsMock._ - } - - void "validation reject (invalid target size)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: INVALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.targetSize", "${DESCRIPTION}.targetSize.negative") - 0 * errorsMock._ - } - - void "validation reject (invalid partial container)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialInvalidContainerDescription - ], - account: VALID_ACCOUNT, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].name", "${DESCRIPTION}.container[0].name.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].imageDescription", "${DESCRIPTION}.container[0].imageDescription.empty") - 0 * errorsMock._ - } - - void "validation reject (invalid full container)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - fullInvalidContainerDescription - ], - account: VALID_ACCOUNT, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].name", "${DESCRIPTION}.container[0].name.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].imageDescription", "${DESCRIPTION}.container[0].imageDescription.empty") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].requests.memory", "${DESCRIPTION}.container[0].requests.memory.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].limits.memory", "${DESCRIPTION}.container[0].limits.memory.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].requests.cpu", "${DESCRIPTION}.container[0].requests.cpu.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].limits.cpu", "${DESCRIPTION}.container[0].limits.cpu.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 0 * errorsMock._ - } - - void "validation reject (invalid load balancers)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - loadBalancers: INVALID_LOAD_BALANCERS, - account: VALID_ACCOUNT, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.loadBalancers[0]", "${DESCRIPTION}.loadBalancers[0].invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.loadBalancers[1]", "${DESCRIPTION}.loadBalancers[1].invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - } - - void "validation reject (invalid security groups)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - securityGroups: INVALID_SECURITY_GROUPS, - account: VALID_ACCOUNT, - source: [ - serverGroupName: VALID_SOURCE_SERVER_GROUP_NAME - ]) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.securityGroups[0]", "${DESCRIPTION}.securityGroups[0].invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.securityGroups[1]", "${DESCRIPTION}.securityGroups[1].invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - } - - void "validation reject (empty source)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - freeFormDetails: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - fullValidContainerDescription1, - fullValidContainerDescription2 - ], - loadBalancers: VALID_LOAD_BALANCERS, - securityGroups: VALID_SECURITY_GROUPS, - account: VALID_ACCOUNT - ) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.source", "${DESCRIPTION}.source.empty") - 0 * errorsMock._ - } - - void "validation reject (empty source server group name)"() { - setup: - def description = new CloneKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - freeFormDetails: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - fullValidContainerDescription1, - fullValidContainerDescription2 - ], - loadBalancers: VALID_LOAD_BALANCERS, - securityGroups: VALID_SECURITY_GROUPS, - account: VALID_ACCOUNT, - source: [ - serverGroupName: "" - ] - ) - def errorsMock = Mock(Errors) - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.source", "${DESCRIPTION}.source.empty") - 0 * errorsMock._ - } - -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DeployKubernetesAtomicOperationValidatorSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DeployKubernetesAtomicOperationValidatorSpec.groovy deleted file mode 100644 index 56834a538b5..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/deploy/validators/servergroup/DeployKubernetesAtomicOperationValidatorSpec.groovy +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.servergroup - -import com.netflix.spectator.api.DefaultRegistry -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.description.servergroup.* -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.validators.StandardKubernetesAttributeValidator -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository -import org.springframework.validation.Errors -import spock.lang.Shared -import spock.lang.Specification - -class DeployKubernetesAtomicOperationValidatorSpec extends Specification { - private static final DESCRIPTION = "deployKubernetesAtomicOperationDescription" - private static final List NAMESPACES = ["default", "prod"] - private static final List DOCKER_REGISTRY_ACCOUNTS = [ - new LinkedDockerRegistryConfiguration(accountName: "my-docker-account"), - new LinkedDockerRegistryConfiguration(accountName: "restricted-docker-account", namespaces: ["prod"])] - - private static final VALID_APPLICATION = "app" - private static final VALID_STACK = "stack" - private static final VALID_DETAILS = "the-details" - private static final VALID_TARGET_SIZE = 3 - private static final VALID_IMAGE = "container-image" - private static final VALID_NAME = "a-name" - private static final VALID_MEMORY1 = "200" - private static final VALID_MEMORY2 = "200Mi" - private static final VALID_CPU1 = "200" - private static final VALID_CPU2 = "200m" - private static final VALID_ACCOUNT = "auto" - private static final VALID_LOAD_BALANCERS = ["x", "y"] - private static final VALID_SECURITY_GROUPS = ["a-1", "b-2"] - private static final VALID_NAMESPACE = NAMESPACES[0] - private static final VALID_SECRET = DOCKER_REGISTRY_ACCOUNTS[0].accountName - private static final VALID_PATH = "a/b/c" - private static final VALID_PORT = 80 - private static final VALID_SCHEME = "HTTPS" - private static final POSITIVE_NUMBER = 100 - - private static final INVALID_APPLICATION = "-app-" - private static final INVALID_STACK = " stack" - private static final INVALID_DETAILS = "the details" - private static final INVALID_TARGET_SIZE = -7 - private static final INVALID_IMAGE = "" - private static final INVALID_NAME = "a?name" - private static final INVALID_MEMORY = "200?" - private static final INVALID_CPU = "9z" - private static final INVALID_ACCOUNT = "valid" - private static final INVALID_LOAD_BALANCERS = [" ", "--"] - private static final INVALID_SECURITY_GROUPS = [" ", "--"] - private static final INVALID_NAMESPACE = "!default" - private static final INVALID_SCHEME = "tcp" - private static final NEGATIVE_NUMBER = -100 - - @Shared - DeployKubernetesAtomicOperationValidator validator - - void setupSpec() { - validator = new DeployKubernetesAtomicOperationValidator() - def credentialsRepo = new MapBackedAccountCredentialsRepository() - def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) - - def spectatorRegistry = new DefaultRegistry() - def apiMock = Mock(KubernetesApiAdaptor) - def accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - - DOCKER_REGISTRY_ACCOUNTS.forEach({ account -> - def dockerRegistryAccountMock = Mock(DockerRegistryNamedAccountCredentials) - accountCredentialsRepositoryMock.getOne(account.accountName) >> dockerRegistryAccountMock - dockerRegistryAccountMock.getAccountName() >> account - NAMESPACES.forEach({ namespace -> - apiMock.getSecret(namespace, account.accountName) >> null - apiMock.createSecret(namespace, _) >> null - }) - }) - - def dockerRegistry = Mock(LinkedDockerRegistryConfiguration) - def dockerRegistries = [dockerRegistry] - def credentials = new KubernetesV1Credentials(apiMock, NAMESPACES, [], DOCKER_REGISTRY_ACCOUNTS, accountCredentialsRepositoryMock) - def namedAccountCredentials = new KubernetesNamedAccountCredentials.Builder() - .name(VALID_ACCOUNT) - .dockerRegistries(dockerRegistries) - .spectatorRegistry(spectatorRegistry) - .credentials(credentials) - .build() - credentialsRepo.save(VALID_ACCOUNT, namedAccountCredentials) - validator.accountCredentialsProvider = credentialsProvider - } - - KubernetesContainerDescription fullValidContainerDescription1 - KubernetesContainerDescription fullValidContainerDescription2 - KubernetesContainerDescription partialValidContainerDescription - KubernetesResourceDescription fullValidResourceDescription1 - KubernetesResourceDescription fullValidResourceDescription2 - KubernetesResourceDescription partialValidResourceDescription - KubernetesProbe fullValidProbe - KubernetesHttpGetAction fullValidHttpGetAction - - KubernetesContainerDescription fullInvalidContainerDescription - KubernetesContainerDescription partialInvalidContainerDescription - KubernetesResourceDescription fullInvalidResourceDescription - KubernetesProbe partialInvalidProbe - KubernetesProbe fullInvalidProbe - KubernetesHttpGetAction partialInvalidHttpGetAction - - void setup() { - def imageDescription = KubernetesUtil.buildImageDescription(VALID_IMAGE) - - fullValidResourceDescription1 = new KubernetesResourceDescription(memory: VALID_MEMORY1, cpu: VALID_CPU1) - - fullValidResourceDescription2 = new KubernetesResourceDescription(memory: VALID_MEMORY2, cpu: VALID_CPU2) - - fullValidHttpGetAction = new KubernetesHttpGetAction( - path: VALID_PATH, - uriScheme: VALID_SCHEME, - port: VALID_PORT, - ) - - fullValidProbe = new KubernetesProbe( - periodSeconds: POSITIVE_NUMBER, - timeoutSeconds: POSITIVE_NUMBER, - initialDelaySeconds: POSITIVE_NUMBER, - successThreshold: POSITIVE_NUMBER, - failureThreshold: POSITIVE_NUMBER, - handler: new KubernetesHandler( - type: KubernetesHandlerType.HTTP, - httpGetAction: fullValidHttpGetAction - ) - ) - - partialValidResourceDescription = new KubernetesResourceDescription(memory: VALID_MEMORY1) - - fullValidContainerDescription1 = new KubernetesContainerDescription(name: VALID_NAME, - imageDescription: imageDescription, - limits: fullValidResourceDescription1, - requests: fullValidResourceDescription1, - livenessProbe: fullValidProbe - ) - - fullValidContainerDescription2 = new KubernetesContainerDescription(name: VALID_NAME, - imageDescription: imageDescription, - limits: fullValidResourceDescription2, - requests: fullValidResourceDescription2, - readinessProbe: fullValidProbe - ) - - partialValidContainerDescription = new KubernetesContainerDescription(name: VALID_NAME, - imageDescription: imageDescription, - limits: partialValidResourceDescription - ) - - partialInvalidHttpGetAction = new KubernetesHttpGetAction( - port: VALID_PORT, - uriScheme: INVALID_SCHEME - ) - - partialInvalidProbe = new KubernetesProbe( - handler: new KubernetesHandler() - ) - - fullInvalidProbe = new KubernetesProbe( - periodSeconds: NEGATIVE_NUMBER, - timeoutSeconds: NEGATIVE_NUMBER, - initialDelaySeconds: NEGATIVE_NUMBER, - successThreshold: NEGATIVE_NUMBER, - failureThreshold: NEGATIVE_NUMBER, - handler: new KubernetesHandler( - type: KubernetesHandlerType.HTTP, - httpGetAction: partialInvalidHttpGetAction - ) - ) - - fullInvalidResourceDescription = new KubernetesResourceDescription(memory: INVALID_MEMORY, cpu: INVALID_CPU) - - fullInvalidContainerDescription = new KubernetesContainerDescription(name: INVALID_NAME, - limits: fullInvalidResourceDescription, - requests: fullInvalidResourceDescription, - readinessProbe: partialInvalidProbe, - livenessProbe: fullInvalidProbe, - ) - - partialInvalidContainerDescription = new KubernetesContainerDescription(name: INVALID_NAME) - } - - void "validation accept (all fields filled)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - namespace: VALID_NAMESPACE, - freeFormDetails: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - fullValidContainerDescription1, - fullValidContainerDescription2 - ], - loadBalancers: VALID_LOAD_BALANCERS, - securityGroups: VALID_SECURITY_GROUPS, - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 0 * errorsMock._ - } - - void "validation accept (minimal fields filled)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 0 * errorsMock._ - } - - void "validation reject (missing credentials)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ]) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.account", "${DESCRIPTION}.account.empty") - 0 * errorsMock._ - } - - void "validation reject (missing application)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.application", "${DESCRIPTION}.application.empty") - 0 * errorsMock._ - } - - void "validation reject (invalid stack)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: INVALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.stack", "${DESCRIPTION}.stack.invalid (Must match ${StandardKubernetesAttributeValidator.prefixPattern})") - 0 * errorsMock._ - } - - void "validation reject (invalid application)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: INVALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.application", "${DESCRIPTION}.application.invalid (Must match ${StandardKubernetesAttributeValidator.prefixPattern})") - 0 * errorsMock._ - } - - void "validation reject (invalid namespace)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - namespace: INVALID_NAMESPACE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.namespace", "${DESCRIPTION}.namespace.notRegistered") - 0 * errorsMock._ - } - - void "validation reject (invalid target size)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: INVALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.targetSize", "${DESCRIPTION}.targetSize.negative") - 0 * errorsMock._ - } - - void "validation reject (invalid partial container)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialInvalidContainerDescription - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].name", "${DESCRIPTION}.container[0].name.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].imageDescription", "${DESCRIPTION}.container[0].imageDescription.empty") - 0 * errorsMock._ - } - - void "validation reject (invalid full container)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - fullInvalidContainerDescription - ], - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].name", "${DESCRIPTION}.container[0].name.invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].imageDescription", "${DESCRIPTION}.container[0].imageDescription.empty") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].requests.memory", "${DESCRIPTION}.container[0].requests.memory.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].limits.memory", "${DESCRIPTION}.container[0].limits.memory.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].requests.cpu", "${DESCRIPTION}.container[0].requests.cpu.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].limits.cpu", "${DESCRIPTION}.container[0].limits.cpu.invalid (Must match ${StandardKubernetesAttributeValidator.quantityPattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].livenessProbe.periodSeconds", "${DESCRIPTION}.container[0].livenessProbe.periodSeconds.notPositive") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].livenessProbe.timeoutSeconds", "${DESCRIPTION}.container[0].livenessProbe.timeoutSeconds.notPositive") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].livenessProbe.initialDelaySeconds", "${DESCRIPTION}.container[0].livenessProbe.initialDelaySeconds.negative") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].livenessProbe.successThreshold", "${DESCRIPTION}.container[0].livenessProbe.successThreshold.notPositive") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].livenessProbe.failureThreshold", "${DESCRIPTION}.container[0].livenessProbe.failureThreshold.notPositive") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].livenessProbe.handler.httpGetAction.uriScheme", "${DESCRIPTION}.container[0].livenessProbe.handler.httpGetAction.uriScheme.invalid (Must be one of ${StandardKubernetesAttributeValidator.uriSchemeList})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.container[0].readinessProbe.handler.type", "${DESCRIPTION}.container[0].readinessProbe.handler.type.empty") - 0 * errorsMock._ - } - - void "validation reject (invalid load balancers)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - loadBalancers: INVALID_LOAD_BALANCERS, - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.loadBalancers[0]", "${DESCRIPTION}.loadBalancers[0].invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.loadBalancers[1]", "${DESCRIPTION}.loadBalancers[1].invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - } - - void "validation reject (invalid security groups)"() { - setup: - def description = new DeployKubernetesAtomicOperationDescription(application: VALID_APPLICATION, - stack: VALID_STACK, - targetSize: VALID_TARGET_SIZE, - containers: [ - partialValidContainerDescription - ], - securityGroups: INVALID_SECURITY_GROUPS, - account: VALID_ACCOUNT) - def errorsMock = Mock(Errors) - - when: - validator.validate([], description, errorsMock) - then: - 1 * errorsMock.rejectValue("${DESCRIPTION}.securityGroups[0]", "${DESCRIPTION}.securityGroups[0].invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 1 * errorsMock.rejectValue("${DESCRIPTION}.securityGroups[1]", "${DESCRIPTION}.securityGroups[1].invalid (Must match ${StandardKubernetesAttributeValidator.namePattern})") - 0 * errorsMock._ - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesInstanceSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesInstanceSpec.groovy deleted file mode 100644 index dc53d7b3ad8..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesInstanceSpec.groovy +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.model.HealthState -import io.fabric8.kubernetes.api.model.* -import spock.lang.Specification - -class KubernetesInstanceSpec extends Specification { - private final static String REPLICATION_CONTROLLER = "arcim" - private static final List ownerReferences = [ - new OwnerReference(name: REPLICATION_CONTROLLER)] - - ContainerState containerStateAsRunningMock - ContainerState containerStateAsTerminatedMock - ContainerState containerStateAsWaitingMock - ContainerState containerStateAsNoneMock - - ContainerStatus containerStatusAsRunningMock - ContainerStatus containerStatusAsTerminatedMock - ContainerStatus containerStatusAsWaitingMock - ContainerStatus containerStatusAsNoneMock - - PodStatus podStatusMock - ObjectMeta metadataMock - Pod podMock - - def setup() { - - containerStateAsRunningMock = Mock(ContainerState) - containerStateAsRunningMock.getRunning() >> new ContainerStateRunning() - containerStateAsRunningMock.getTerminated() >> null - containerStateAsRunningMock.getWaiting() >> null - containerStatusAsRunningMock = Mock(ContainerStatus) - containerStatusAsRunningMock.getReady() >> true - containerStatusAsRunningMock.getState() >> containerStateAsRunningMock - - containerStateAsTerminatedMock = Mock(ContainerState) - containerStateAsTerminatedMock.getRunning() >> null - containerStateAsTerminatedMock.getTerminated() >> new ContainerStateTerminated() - containerStateAsTerminatedMock.getWaiting() >> null - containerStatusAsTerminatedMock = Mock(ContainerStatus) - containerStatusAsTerminatedMock.getReady() >> false - containerStatusAsTerminatedMock.getState() >> containerStateAsTerminatedMock - - containerStateAsWaitingMock = Mock(ContainerState) - containerStateAsWaitingMock.getRunning() >> null - containerStateAsWaitingMock.getTerminated() >> null - containerStateAsWaitingMock.getWaiting() >> new ContainerStateWaiting() - containerStatusAsWaitingMock = Mock(ContainerStatus) - containerStatusAsWaitingMock.getReady() >> false - containerStatusAsWaitingMock.getState() >> containerStateAsWaitingMock - - containerStateAsNoneMock = Mock(ContainerState) - containerStateAsNoneMock.getRunning() >> null - containerStateAsNoneMock.getTerminated() >> null - containerStateAsNoneMock.getWaiting() >> null - containerStatusAsNoneMock = Mock(ContainerStatus) - containerStatusAsNoneMock.getReady() >> false - containerStatusAsNoneMock.getState() >> containerStateAsNoneMock - - podStatusMock = Mock(PodStatus) - metadataMock = Mock(ObjectMeta) - podMock = Mock(Pod) - - podMock.getStatus() >> podStatusMock - podMock.getMetadata() >> metadataMock - // There is nothing interesting to test here, it is already handled by the - // convertContainerState(..) tests. - podStatusMock.getContainerStatuses() >> [] - - } - - void "Should report state as Down"() { - when: - def state = (new KubernetesV1Health('', containerStatusAsTerminatedMock)).state - - then: - state == HealthState.Down - - when: - state = (new KubernetesV1Health('', containerStatusAsWaitingMock)).state - - then: - state == HealthState.Down - } - - void "Should report state as Up"() { - when: - def state = (new KubernetesV1Health('', containerStatusAsRunningMock)).state - - then: - state == HealthState.Up - } - - void "Should report state as Unknown"() { - when: - def state = (new KubernetesV1Health('', containerStatusAsNoneMock)).state - - then: - state == HealthState.Unknown - } - - void "Should report pod state as Up"() { - setup: - podStatusMock.getPhase() >> "Running" - - when: - def instance = new KubernetesV1Instance(podMock, []) - - then: - instance.healthState == HealthState.Up - } - - void "Should report pod state as Unscheduled"() { - when: - podStatusMock.getPhase() >> "Pending" - def instance = new KubernetesV1Instance(podMock, []) - - then: - instance.healthState == HealthState.Down - } - - void "Should report pod state as Unknown"() { - setup: - podStatusMock.getPhase() >> "floof" - - when: - def instance = new KubernetesV1Instance(podMock, []) - - then: - instance.healthState == HealthState.Unknown - - when: - podStatusMock.getPhase() >> "Failed" - instance = new KubernetesV1Instance(podMock, []) - - then: - instance.healthState == HealthState.Unknown - - } - - void "Should report pod controller"() { - setup: - metadataMock.getOwnerReferences() >> ownerReferences - - when: - - def instance = new KubernetesV1Instance(podMock, []) - - then: - instance.controllerName == REPLICATION_CONTROLLER - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesServerGroupSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesServerGroupSpec.groovy deleted file mode 100644 index c7b34f2571e..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/model/KubernetesServerGroupSpec.groovy +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.model - -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.model.HealthState -import io.fabric8.kubernetes.api.model.ReplicationController -import spock.lang.Specification - -class KubernetesServerGroupSpec extends Specification { - final private String ACCOUNT = "account" - KubernetesV1Instance upInstanceMock - KubernetesV1Instance downInstanceMock - KubernetesV1Instance startingInstanceMock - KubernetesV1Instance unknownInstanceMock - KubernetesV1Instance outOfServiceInstanceMock - - def setup() { - upInstanceMock = Mock(KubernetesV1Instance) - downInstanceMock = Mock(KubernetesV1Instance) - startingInstanceMock = Mock(KubernetesV1Instance) - unknownInstanceMock = Mock(KubernetesV1Instance) - outOfServiceInstanceMock = Mock(KubernetesV1Instance) - - upInstanceMock.getHealthState() >> HealthState.Up - downInstanceMock.getHealthState() >> HealthState.Down - startingInstanceMock.getHealthState() >> HealthState.Starting - unknownInstanceMock.getHealthState() >> HealthState.Unknown - outOfServiceInstanceMock.getHealthState() >> HealthState.OutOfService - } - - void "Should return 1 up instances"() { - when: - def serverGroup = new KubernetesV1ServerGroup(new ReplicationController(), ACCOUNT, [], null) - serverGroup.instances = [upInstanceMock] as Set - - then: - serverGroup.instanceCounts.up == 1 - serverGroup.instanceCounts.down == 0 - serverGroup.instanceCounts.unknown == 0 - serverGroup.instanceCounts.outOfService == 0 - serverGroup.instanceCounts.starting == 0 - } - - void "Should return 1 up, 1 down, 1 starting, 1 oos, 1 unknown instances"() { - when: - def serverGroup = new KubernetesV1ServerGroup(new ReplicationController(), ACCOUNT, [], null) - serverGroup.instances = [upInstanceMock, downInstanceMock, startingInstanceMock, unknownInstanceMock, outOfServiceInstanceMock] as Set - - then: - serverGroup.instanceCounts.up == 1 - serverGroup.instanceCounts.down == 1 - serverGroup.instanceCounts.unknown == 1 - serverGroup.instanceCounts.outOfService == 1 - serverGroup.instanceCounts.starting == 1 - } - - void "Should list servergroup with no load balancers as enabled"() { - when: - def serverGroup = new KubernetesV1ServerGroup(new ReplicationController(), ACCOUNT, [], null) - serverGroup.instances = [] as Set - serverGroup.replicas = 1 - serverGroup.labels = ["hi": "there"] - - then: - !serverGroup.isDisabled() - } - - void "Should list servergroup with no enabled load balancers as disabled"() { - when: - def serverGroup = new KubernetesV1ServerGroup(new ReplicationController(), ACCOUNT, [], null) - serverGroup.instances = [] as Set - serverGroup.replicas = 1 - serverGroup.labels = [(KubernetesUtil.loadBalancerKey("1")): "false"] - - then: - serverGroup.isDisabled() - } - - void "Should list servergroup with enabled load balancers as enabled"() { - when: - def serverGroup = new KubernetesV1ServerGroup(new ReplicationController(), ACCOUNT, [], null) - serverGroup.instances = [] as Set - serverGroup.replicas = 1 - serverGroup.labels = [(KubernetesUtil.loadBalancerKey("1")): "true"] - - then: - !serverGroup.isDisabled() - } - - void "Should list servergroup with mix of load balancers as enabled"() { - when: - def serverGroup = new KubernetesV1ServerGroup(new ReplicationController(), ACCOUNT, [], null) - serverGroup.instances = [] as Set - serverGroup.replicas = 1 - serverGroup.labels = [(KubernetesUtil.loadBalancerKey("1")): "true", (KubernetesUtil.loadBalancerKey("2")): "false"] - - then: - !serverGroup.isDisabled() - } - - void "Should list servergroup with enabled load balancers but no instances as disabled"() { - when: - def serverGroup = new KubernetesV1ServerGroup(new ReplicationController(), ACCOUNT, [], null) - serverGroup.instances = [] as Set - serverGroup.replicas = 0 - serverGroup.labels = [(KubernetesUtil.loadBalancerKey("1")): "true"] - - then: - serverGroup.isDisabled() - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesInstanceCachingAgentSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesInstanceCachingAgentSpec.groovy deleted file mode 100644 index 33542d0324d..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesInstanceCachingAgentSpec.groovy +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import io.fabric8.kubernetes.api.model.ObjectMeta -import io.fabric8.kubernetes.api.model.Pod -import io.fabric8.kubernetes.api.model.PodSpec -import io.fabric8.kubernetes.api.model.PodStatus -import spock.lang.Specification - -class KubernetesInstanceCachingAgentSpec extends Specification { - static final String accountName = "account" - static final String namespace = "namespace" - static final ObjectMapper mapper = new ObjectMapper() - - KubernetesV1Credentials credentials - KubernetesApiAdaptor apiAdaptor - ProviderCache providerCache - KubernetesInstanceCachingAgent agent - - def setup() { - apiAdaptor = Mock(KubernetesApiAdaptor) - credentials = Mock(KubernetesV1Credentials) { - getApiAdaptor() >> apiAdaptor - } - def namedCrededentialsMock = Mock(KubernetesNamedAccountCredentials) - namedCrededentialsMock.getCredentials() >> credentials - namedCrededentialsMock.getName() >> accountName - providerCache = Mock(ProviderCache) - credentials.getDeclaredNamespaces() >> [namespace] - agent = new KubernetesInstanceCachingAgent(namedCrededentialsMock, mapper, null, 0, 1) - } - - void "should apply cache-ttl annotation to pod"() { - setup: - def cacheExpiry = "1000" - def metadata = new ObjectMeta() - metadata.annotations = [(KubernetesInstanceCachingAgent.CACHE_TTL_ANNOTATION): cacheExpiry] - def pod = new Pod("v1", "Pod", metadata, new PodSpec(), new PodStatus()) - - when: - def data = agent.loadData(providerCache) - - then: - 1 * apiAdaptor.getPods(namespace) >> [pod] - data.cacheResults[Keys.Namespace.INSTANCES.ns].size() == 1 - data.cacheResults[Keys.Namespace.INSTANCES.ns][0].attributes.containsKey("cacheExpiry") - data.cacheResults[Keys.Namespace.INSTANCES.ns][0].attributes["cacheExpiry"] == cacheExpiry - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesLoadBalancerCachingAgentSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesLoadBalancerCachingAgentSpec.groovy deleted file mode 100644 index 3fb8a4e393e..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesLoadBalancerCachingAgentSpec.groovy +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesLoadBalancerCachingAgentSpec extends Specification { - static final private String NAMESPACE = "default" - static final private String ACCOUNT_NAME = "account1" - - KubernetesLoadBalancerCachingAgent cachingAgent - KubernetesApiAdaptor apiMock - Registry registryMock - KubernetesV1Credentials kubernetesCredentials - - def setup() { - registryMock = Mock(Registry) - registryMock.get('id') >> 'id' - registryMock.timer(_) >> null - - apiMock = Mock(KubernetesApiAdaptor) - - apiMock.getNamespacesByName() >> [NAMESPACE] - - def accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - - kubernetesCredentials = new KubernetesV1Credentials(apiMock, [], [], [], accountCredentialsRepositoryMock) - - def namedCrededentialsMock = Mock(KubernetesNamedAccountCredentials) - namedCrededentialsMock.getCredentials() >> kubernetesCredentials - namedCrededentialsMock.getName() >> ACCOUNT_NAME - - cachingAgent = new KubernetesLoadBalancerCachingAgent(namedCrededentialsMock, new ObjectMapper(), registryMock, 0, 1) - } - - - @Unroll - void "correctly reports #type/#provider is handled by the load balancer caching agent (#result)"() { - expect: - cachingAgent.handles(type, provider) == result - - where: - type | provider || result - OnDemandAgent.OnDemandType.LoadBalancer | KubernetesCloudProvider.ID || true - OnDemandAgent.OnDemandType.ServerGroup | KubernetesCloudProvider.ID || false - OnDemandAgent.OnDemandType.SecurityGroup | KubernetesCloudProvider.ID || false - OnDemandAgent.OnDemandType.ServerGroup | "google " || false - OnDemandAgent.OnDemandType.LoadBalancer | "" || false - null | "" || false - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServerGroupCachingAgentSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServerGroupCachingAgentSpec.groovy deleted file mode 100644 index bf15ef6b077..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesServerGroupCachingAgentSpec.groovy +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.v1.deploy.KubernetesUtil -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import io.fabric8.kubernetes.api.model.ObjectMeta -import io.fabric8.kubernetes.api.model.PodList -import io.fabric8.kubernetes.api.model.ReplicationController -import io.fabric8.kubernetes.api.model.ReplicationControllerList -import io.fabric8.kubernetes.api.model.ReplicationControllerSpec -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesServerGroupCachingAgentSpec extends Specification { - static final private String NAMESPACE = "default" - static final private String ACCOUNT_NAME = "account1" - static final private String APP = "app" - static final private String CLUSTER = "$APP-cluster" - static final private String REPLICATION_CONTROLLER = "$CLUSTER-v000" - static final private String POD = "$REPLICATION_CONTROLLER-instance" - - KubernetesServerGroupCachingAgent cachingAgent - ReplicationControllerList replicationControllerList - PodList podList - KubernetesApiAdaptor apiMock - - Registry registryMock - KubernetesV1Credentials kubernetesCredentials - - String applicationKey - String clusterKey - String serverGroupKey - String instanceKey - - def setup() { - registryMock = Mock(Registry) - registryMock.get('id') >> 'id' - registryMock.timer(_) >> null - - replicationControllerList = Mock(ReplicationControllerList) - podList = Mock(PodList) - apiMock = Mock(KubernetesApiAdaptor) - - def accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - - kubernetesCredentials = new KubernetesV1Credentials(apiMock, [], [], [], accountCredentialsRepositoryMock) - - def namedCrededentialsMock = Mock(KubernetesNamedAccountCredentials) - namedCrededentialsMock.getCredentials() >> kubernetesCredentials - namedCrededentialsMock.getName() >> ACCOUNT_NAME - - applicationKey = Keys.getApplicationKey(APP) - clusterKey = Keys.getClusterKey(ACCOUNT_NAME, APP, 'serverGroup', CLUSTER) - serverGroupKey = Keys.getServerGroupKey(ACCOUNT_NAME, NAMESPACE, REPLICATION_CONTROLLER) - instanceKey = Keys.getInstanceKey(ACCOUNT_NAME, NAMESPACE, POD) - - cachingAgent = new KubernetesServerGroupCachingAgent(namedCrededentialsMock, new ObjectMapper(), registryMock, 0, 1) - } - - void "Should store a single replication controller object and relationships"() { - setup: - def replicationControllerMock = Mock(ReplicationController) - def replicationControllerMetadataMock = Mock(ObjectMeta) - def replicationControllerSpecMock = Mock(ReplicationControllerSpec) - def selector = ['replicationController': REPLICATION_CONTROLLER.toString()] - replicationControllerSpecMock.getSelector() >> selector - replicationControllerMetadataMock.getName() >> REPLICATION_CONTROLLER - replicationControllerMetadataMock.getNamespace() >> NAMESPACE - replicationControllerMock.getMetadata() >> replicationControllerMetadataMock - replicationControllerMock.getSpec() >> replicationControllerSpecMock - - def podMock = Mock(ReplicationController) - def podMetadataMock = Mock(ObjectMeta) - podMetadataMock.getLabels() >> selector - podMetadataMock.getName() >> POD - podMetadataMock.getNamespace() >> NAMESPACE - podMock.getMetadata() >> podMetadataMock - apiMock.getReplicationControllers(NAMESPACE) >> [replicationControllerMock] - apiMock.getEvents(NAMESPACE, KubernetesUtil.DEPRECATED_SERVER_GROUP_KIND) >> [:].withDefault { _ -> [] } - apiMock.getAutoscalers(NAMESPACE, KubernetesUtil.DEPRECATED_SERVER_GROUP_KIND) >> [:] - apiMock.getPods(NAMESPACE) >> [podMock] - apiMock.getNamespacesByName() >> [NAMESPACE] - - def providerCacheMock = Mock(ProviderCache) - providerCacheMock.getAll(_, _) >> [] - - when: - def result = cachingAgent.loadData(providerCacheMock) - - then: - result.cacheResults.applications.attributes.name == [APP] - result.cacheResults.applications.relationships.serverGroups[0][0] == serverGroupKey - result.cacheResults.applications.relationships.clusters[0][0] == clusterKey - - result.cacheResults.clusters.attributes.name == [CLUSTER] - result.cacheResults.clusters.relationships.serverGroups[0][0] == serverGroupKey - result.cacheResults.clusters.relationships.applications[0][0] == applicationKey - - result.cacheResults.serverGroups.attributes.name == [REPLICATION_CONTROLLER] - result.cacheResults.serverGroups.relationships.clusters[0][0] == clusterKey - result.cacheResults.serverGroups.relationships.applications[0][0] == applicationKey - result.cacheResults.serverGroups.relationships.instances[0][0] == instanceKey - - result.cacheResults.instances.relationships.clusters[0][0] == clusterKey - result.cacheResults.instances.relationships.applications[0][0] == applicationKey - result.cacheResults.instances.relationships.serverGroups[0][0] == serverGroupKey - } - - @Unroll - void "correctly reports #type/#provider is handled by the server group caching agent (#result)"() { - expect: - cachingAgent.handles(type, provider) == result - - where: - type | provider || result - OnDemandAgent.OnDemandType.ServerGroup | KubernetesCloudProvider.ID || true - OnDemandAgent.OnDemandType.LoadBalancer | KubernetesCloudProvider.ID || false - OnDemandAgent.OnDemandType.SecurityGroup | KubernetesCloudProvider.ID || false - OnDemandAgent.OnDemandType.ServerGroup | "google " || false - OnDemandAgent.OnDemandType.LoadBalancer | "" || false - null | "" || false - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1SecurityGroupCachingAgentSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1SecurityGroupCachingAgentSpec.groovy deleted file mode 100644 index 1eb71d72695..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/provider/agent/KubernetesV1SecurityGroupCachingAgentSpec.groovy +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.security.KubernetesV1Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesV1SecurityGroupCachingAgentSpec extends Specification { - static final private String NAMESPACE = "default" - static final private String ACCOUNT_NAME = "account1" - - KubernetesSecurityGroupCachingAgent cachingAgent - KubernetesApiAdaptor apiMock - Registry registryMock - KubernetesV1Credentials kubernetesCredentials - - def setup() { - registryMock = Mock(Registry) - registryMock.get('id') >> 'id' - registryMock.timer(_) >> null - - apiMock = Mock(KubernetesApiAdaptor) - - apiMock.getNamespacesByName() >> [NAMESPACE] - - def accountCredentialsRepositoryMock = Mock(AccountCredentialsRepository) - - kubernetesCredentials = new KubernetesV1Credentials(apiMock, [], [], [], accountCredentialsRepositoryMock) - - def namedCrededentialsMock = Mock(KubernetesNamedAccountCredentials) - namedCrededentialsMock.getCredentials() >> kubernetesCredentials - namedCrededentialsMock.getName() >> ACCOUNT_NAME - - cachingAgent = new KubernetesSecurityGroupCachingAgent(namedCrededentialsMock, new ObjectMapper(), registryMock, 0, 1) - } - - @Unroll - void "correctly reports #type/#provider is handled by the security group caching agent (#result)"() { - expect: - cachingAgent.handles(type, provider) == result - - where: - type | provider || result - OnDemandAgent.OnDemandType.SecurityGroup | KubernetesCloudProvider.ID || true - OnDemandAgent.OnDemandType.ServerGroup | KubernetesCloudProvider.ID || false - OnDemandAgent.OnDemandType.LoadBalancer | KubernetesCloudProvider.ID || false - OnDemandAgent.OnDemandType.ServerGroup | "google " || false - OnDemandAgent.OnDemandType.ServerGroup | "" || false - null | "" || false - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesConfigParserSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesConfigParserSpec.groovy deleted file mode 100644 index 246e08c870d..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesConfigParserSpec.groovy +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2018 Bol.com - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.security - -import spock.lang.Specification - -import java.nio.file.Files - -class KubernetesConfigParserSpec extends Specification { - - void "master url should be set irregardless of trailing slash"() { - setup: - def f = Files.createTempFile("kubeconfig", "tmp") - f.setText(""" -apiVersion: v1 -clusters: -- cluster: - certificate-authority: /opt/spinnaker/config/ca.crt - server: https://1.2.3.4/ - name: tst -contexts: -- context: - cluster: tst - user: tst - name: tst -current-context: tst -kind: Config -preferences: {} -users: -- name: tst - user: - client-certificate: /opt/spinnaker/config/client.crt - client-key: /opt/spinnaker/config/client.key -""") - - when: - def result = KubernetesConfigParser.withKubeConfig(f.toFile().getAbsolutePath(), "tst", "tst", "tst", ["default"]) - - then: - result.getMasterUrl() == "https://1.2.3.4/" - - cleanup: - Files.delete(f) - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesV1CredentialsSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesV1CredentialsSpec.groovy deleted file mode 100644 index 6b1492f3f40..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v1/security/KubernetesV1CredentialsSpec.groovy +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v1.security - -import com.netflix.spinnaker.clouddriver.docker.registry.security.DockerRegistryNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v1.api.KubernetesApiAdaptor -import com.netflix.spinnaker.clouddriver.kubernetes.config.LinkedDockerRegistryConfiguration -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import spock.lang.Specification - -class KubernetesV1CredentialsSpec extends Specification { - List NAMESPACES1 = ['default', 'kube-system'] - List NAMESPACES2 = ['default', 'spacename'] - - String ACCOUNT1 = 'account-default' - - // These aren't pertinent to the test, so they can be reused for each account - String ADDRESS = 'gcr.io' - String BASIC_AUTH = 'lwander:hunter2' - String EMAIL = 'lwander@google.com' - - List REGISTRIES1 = [ - new LinkedDockerRegistryConfiguration(accountName: ACCOUNT1, namespaces: NAMESPACES1) - ] - - List REGISTRIES2 = [ - new LinkedDockerRegistryConfiguration(accountName: ACCOUNT1, namespaces: null) - ] - - DockerRegistryNamedAccountCredentials mockCredentials(String accountName) { - DockerRegistryNamedAccountCredentials registryAccountMock = Mock(DockerRegistryNamedAccountCredentials) - registryAccountMock.getAccountName() >> ACCOUNT1 - registryAccountMock.getAddress() >> ADDRESS - registryAccountMock.getEmail() >> EMAIL - - return registryAccountMock - } - - void "should ignore kubernetes namespaces"() { - setup: - KubernetesApiAdaptor adaptorMock = Mock(KubernetesApiAdaptor) - adaptorMock.getNamespacesByName() >> NAMESPACES2 - - AccountCredentialsRepository repositoryMock = Mock(AccountCredentialsRepository) - DockerRegistryNamedAccountCredentials registryAccountMock = mockCredentials(ACCOUNT1) - repositoryMock.getOne(ACCOUNT1) >> registryAccountMock - - when: - def result = new KubernetesV1Credentials(adaptorMock, NAMESPACES1, [], REGISTRIES1, repositoryMock) - - then: - result.getDeclaredNamespaces() == NAMESPACES1 - result.dockerRegistries.get(0).namespaces == NAMESPACES1 - } - - void "should use kubernetes namespaces"() { - setup: - KubernetesApiAdaptor adaptorMock = Mock(KubernetesApiAdaptor) - adaptorMock.getNamespacesByName() >> NAMESPACES2 - - AccountCredentialsRepository repositoryMock = Mock(AccountCredentialsRepository) - DockerRegistryNamedAccountCredentials registryAccountMock = mockCredentials(ACCOUNT1) - repositoryMock.getOne(ACCOUNT1) >> registryAccountMock - - when: - def result = new KubernetesV1Credentials(adaptorMock, null, [], REGISTRIES2, repositoryMock) - - then: - result.getDeclaredNamespaces() == NAMESPACES2 - result.dockerRegistries.get(0).namespaces == NAMESPACES2 - } - - void "should omit kubernetes namespaces"() { - setup: - KubernetesApiAdaptor adaptorMock = Mock(KubernetesApiAdaptor) - adaptorMock.getNamespacesByName() >> NAMESPACES2 - - AccountCredentialsRepository repositoryMock = Mock(AccountCredentialsRepository) - DockerRegistryNamedAccountCredentials registryAccountMock = mockCredentials(ACCOUNT1) - repositoryMock.getOne(ACCOUNT1) >> registryAccountMock - - when: - def result = new KubernetesV1Credentials(adaptorMock, null, NAMESPACES2, REGISTRIES2, repositoryMock) - - then: - result.getDeclaredNamespaces() == [] - result.dockerRegistries.get(0).namespaces == NAMESPACES2 - } - - void "should not use kubernetes namespaces only in registry"() { - setup: - KubernetesApiAdaptor adaptorMock = Mock(KubernetesApiAdaptor) - adaptorMock.getNamespacesByName() >> NAMESPACES2 - - AccountCredentialsRepository repositoryMock = Mock(AccountCredentialsRepository) - DockerRegistryNamedAccountCredentials registryAccountMock = mockCredentials(ACCOUNT1) - repositoryMock.getOne(ACCOUNT1) >> registryAccountMock - - when: - def result = new KubernetesV1Credentials(adaptorMock, null, [], REGISTRIES1, repositoryMock) - - then: - result.getDeclaredNamespaces() == NAMESPACES2 - result.dockerRegistries.get(0).namespaces == NAMESPACES1 - } - - void "shouldn't destroy itself when api server is dead"() { - setup: - KubernetesApiAdaptor adaptorMock = Stub() { - getNamespacesByName() >> { throw new RuntimeException() } - } - - AccountCredentialsRepository repositoryMock = Mock(AccountCredentialsRepository) - DockerRegistryNamedAccountCredentials registryAccountMock = mockCredentials(ACCOUNT1) - repositoryMock.getOne(ACCOUNT1) >> registryAccountMock - - when: - def namespaces = new KubernetesV1Credentials(adaptorMock, null, [], REGISTRIES1, repositoryMock).getDeclaredNamespaces() - - then: - namespaces == [] - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacerSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacerSpec.groovy deleted file mode 100644 index 33b77d2cba7..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/ArtifactReplacerSpec.groovy +++ /dev/null @@ -1,176 +0,0 @@ -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest -import com.netflix.spinnaker.kork.artifacts.model.Artifact -import org.yaml.snakeyaml.Yaml -import org.yaml.snakeyaml.constructor.SafeConstructor -import spock.lang.Specification -import spock.lang.Unroll - - -class ArtifactReplacerSpec extends Specification { - def objectMapper = new ObjectMapper() - def yaml = new Yaml(new SafeConstructor()) - - KubernetesManifest stringToManifest(String input) { - return objectMapper.convertValue(yaml.load(input), KubernetesManifest) - } - - def "correctly extracts deployment name from hpa"() { - when: - def name = "my-deployment" - def hpaManifest = """ -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: my-hpa - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: $name -""" - def artifactReplacer = new ArtifactReplacer() - artifactReplacer.addReplacer(ArtifactReplacerFactory.hpaDeploymentReplacer()) - def manifest = stringToManifest(hpaManifest) - def artifacts = artifactReplacer.findAll(manifest) - - then: - artifacts.size() == 1 - Artifact artifact = artifacts.toList().get(0) - artifact.getType() == ArtifactTypes.KUBERNETES_DEPLOYMENT.toString() - artifact.getName() == name - } - - def "doesn't extract bad kind from hpa"() { - when: - def name = "my-deployment" - def hpaManifest = """ -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: my-hpa - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: UNKNOWN - name: $name -""" - def artifactReplacer = new ArtifactReplacer() - artifactReplacer.addReplacer(ArtifactReplacerFactory.hpaDeploymentReplacer()) - def manifest = stringToManifest(hpaManifest) - def artifacts = artifactReplacer.findAll(manifest) - - then: - artifacts.size() == 0 - } - - @Unroll - def "correctly extracts Docker artifacts from image names"() { - expect: - def deploymentManifest = """ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app-deployment - labels: - app: my-app -spec: - replicas: 3 - selector: - matchLabels: - app: my-app - template: - metadata: - labels: - app: my-app - spec: - containers: - - name: container - image: $image - ports: - - containerPort: 80 -""" - def artifactReplacer = new ArtifactReplacer() - artifactReplacer.addReplacer(ArtifactReplacerFactory.dockerImageReplacer()) - def manifest = stringToManifest(deploymentManifest) - def artifacts = artifactReplacer.findAll(manifest) - - artifacts.size() == 1 - Artifact artifact = artifacts.toList().get(0) - artifact.getType() == ArtifactTypes.DOCKER_IMAGE.toString() - artifact.getName() == name - artifact.getReference() == image - - where: - image || name - "nginx:112" || "nginx" - "nginx:1.12-alpine" || "nginx" - "my-nginx:100000" || "my-nginx" - "my.nginx:100000" || "my.nginx" - "reg/repo:1.2.3" || "reg/repo" - "reg.repo:123@sha256:13" || "reg.repo:123" - "reg.default.svc/r/j:485fabc" || "reg.default.svc/r/j" - "reg:5000/r/j:485fabc" || "reg:5000/r/j" - "reg:5000/r__j:485fabc" || "reg:5000/r__j" - "clouddriver" || "clouddriver" - "clouddriver@sha256:9145" || "clouddriver" - "localhost:5000/test/busybox@sha256:cbbf22" || "localhost:5000/test/busybox" - } - - @Unroll - def "correctly extracts Docker artifacts from image names in initContainers"() { - expect: - def deploymentManifest = """ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app-deployment - labels: - app: my-app -spec: - replicas: 3 - selector: - matchLabels: - app: my-app - template: - metadata: - labels: - app: my-app - spec: - initContainers: - - name: container - image: $image - ports: - - containerPort: 80 -""" - def artifactReplacer = new ArtifactReplacer() - artifactReplacer.addReplacer(ArtifactReplacerFactory.dockerImageReplacer()) - def manifest = stringToManifest(deploymentManifest) - def artifacts = artifactReplacer.findAll(manifest) - - artifacts.size() == 1 - Artifact artifact = artifacts.toList().get(0) - artifact.getType() == ArtifactTypes.DOCKER_IMAGE.toString() - artifact.getName() == name - artifact.getReference() == image - - where: - image || name - "nginx:112" || "nginx" - "nginx:1.12-alpine" || "nginx" - "my-nginx:100000" || "my-nginx" - "my.nginx:100000" || "my.nginx" - "reg/repo:1.2.3" || "reg/repo" - "reg.repo:123@sha256:13" || "reg.repo:123" - "reg.default.svc/r/j:485fabc" || "reg.default.svc/r/j" - "reg:5000/r/j:485fabc" || "reg:5000/r/j" - "reg:5000/r__j:485fabc" || "reg:5000/r__j" - "clouddriver" || "clouddriver" - "clouddriver@sha256:9145" || "clouddriver" - "localhost:5000/test/busybox@sha256:cbbf22" || "localhost:5000/test/busybox" - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesUnversionedArtifactConverterSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesUnversionedArtifactConverterSpec.groovy deleted file mode 100644 index 06b68e9012a..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesUnversionedArtifactConverterSpec.groovy +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Unversion 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind -import com.netflix.spinnaker.kork.artifacts.model.Artifact -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesUnversionedArtifactConverterSpec extends Specification { - @Unroll - def "correctly infer unversioned artifact properties"() { - expect: - def type = "kubernetes/$kind" - - def artifact = Artifact.builder() - .type(type) - .name(name) - .build() - - def converter = new KubernetesUnversionedArtifactConverter() - converter.getKind(artifact) == kind - converter.getDeployedName(artifact) == "$name" - - where: - apiVersion | kind | name - KubernetesApiVersion.APPS_V1BETA1 | KubernetesKind.DEPLOYMENT | "my-deploy" - KubernetesApiVersion.V1 | KubernetesKind.SERVICE | "my-other-rs-_-" - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesVersionedArtifactConverterSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesVersionedArtifactConverterSpec.groovy deleted file mode 100644 index 22bb45657c2..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/artifact/KubernetesVersionedArtifactConverterSpec.groovy +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesV2ArtifactProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest -import com.netflix.spinnaker.kork.artifacts.model.Artifact -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesVersionedArtifactConverterSpec extends Specification { - @Unroll - def "correctly infer versioned artifact properties"() { - expect: - def type = "kubernetes/$kind" - - def artifact = Artifact.builder() - .type(type) - .name(name) - .version(version) - .build() - - def converter = new KubernetesVersionedArtifactConverter() - converter.getKind(artifact) == kind - converter.getDeployedName(artifact) == "$name-$version" - - where: - apiVersion | kind | name | version - KubernetesApiVersion.EXTENSIONS_V1BETA1 | KubernetesKind.REPLICA_SET | "my-rs" | "v000" - KubernetesApiVersion.EXTENSIONS_V1BETA1 | KubernetesKind.REPLICA_SET | "my-other-rs-_-" | "v010" - } - - @Unroll - def "correctly pick next version"() { - when: - def artifacts = versions.collect { v -> Artifact.builder().version("v$v").build() } - def artifactProvider = Mock(KubernetesV2ArtifactProvider) - def type = "type" - def name = "name" - def location = "location" - - artifactProvider.getArtifacts(type, name, location) >> artifacts - - def converter = new KubernetesVersionedArtifactConverter() - - then: - converter.getVersion(artifactProvider, type, name, location, null) == expected - - where: - versions | expected - [0, 1, 2] | "v003" - [0] | "v001" - [] | "v000" - [1] | "v002" - [1, 2, 3] | "v004" - [0, 2, 3] | "v004" - [2, 0, 1] | "v003" - [0, 1, 3] | "v004" - [1, 0, 3] | "v004" - [1000] | "v1001" - } - - def "find a matching version by equality"() { - when: - def manifest1 = new KubernetesManifest() - def manifest2 = new KubernetesManifest() - manifest1.put("data", ["key": 1, "value": 2]) - manifest2.put("data", ["key": 3, "value": 2]) - - def version1 = "v001" - def version2 = "v002" - - def artifact1 = new Artifact(version: version1, metadata: [lastAppliedConfiguration: manifest1]) - def artifact2 = new Artifact(version: version2, metadata: [lastAppliedConfiguration: manifest2]) - def artifacts = [artifact1, artifact2] - - def artifactProvider = Mock(KubernetesV2ArtifactProvider) - def type = "type" - def name = "name" - def location = "location" - - artifactProvider.getArtifacts(type, name, location) >> artifacts - - def converter = new KubernetesVersionedArtifactConverter() - - then: - converter.getVersion(artifactProvider, type, name, location, manifest1) == version1 - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KeysSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KeysSpec.groovy deleted file mode 100644 index 41ac916337a..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/KeysSpec.groovy +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind -import spock.lang.Specification -import spock.lang.Unroll - -/** - * WARNING: if you're modifying these tests due to a key format change, you're likely - * breaking all user's infrastructure caches. if this is intentional, keep in mind - * that every user will have to flush redis to get clouddriver to run correctly - */ -class KeysSpec extends Specification { - @Unroll - def "produces correct app keys #key"() { - expect: - Keys.application(application) == key - - where: - application || key - "app" || "kubernetes.v2:logical:applications:app" - "" || "kubernetes.v2:logical:applications:" - } - - @Unroll - def "produces correct cluster keys #key"() { - expect: - Keys.cluster(account, application, cluster) == key - - where: - account | application | cluster || key - "ac" | "app" | "cluster" || "kubernetes.v2:logical:clusters:ac:app:cluster" - "" | "" | "" || "kubernetes.v2:logical:clusters:::" - } - - @Unroll - def "produces correct infra keys #key"() { - expect: - Keys.infrastructure(kind, account, namespace, name) == key - - where: - kind | apiVersion | account | namespace | name || key - KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "ac" | "namespace" | "v1-v000" || "kubernetes.v2:infrastructure:replicaSet:ac:namespace:v1-v000" - KubernetesKind.SERVICE | KubernetesApiVersion.V1 | "ac" | "namespace" | "v1" || "kubernetes.v2:infrastructure:service:ac:namespace:v1" - KubernetesKind.DEPLOYMENT | KubernetesApiVersion.APPS_V1BETA1 | "ac" | "namespace" | "v1" || "kubernetes.v2:infrastructure:deployment:ac:namespace:v1" - } - - @Unroll - def "unpacks application key for #name"() { - when: - def key = "kubernetes.v2:logical:applications:$name" - def parsed = Keys.parseKey(key).get() - - then: - parsed instanceof Keys.ApplicationCacheKey - def parsedApplicationKey = (Keys.ApplicationCacheKey) parsed - parsedApplicationKey.name == name - - where: - name | unused - "app" | "" - "" | "" - } - - @Unroll - def "unpacks cluster key for '#name' and '#account'"() { - when: - def key = "kubernetes.v2:logical:clusters:$account:$application:$name" - def parsed = Keys.parseKey(key).get() - - then: - parsed instanceof Keys.ClusterCacheKey - def parsedClusterKey = (Keys.ClusterCacheKey) parsed - parsedClusterKey.account == account - parsedClusterKey.application == application - parsedClusterKey.name == name - - where: - account | application | name - "ac" | "" | "name" - "" | "asdf" | "sdf" - "ac" | "ll" | "" - "" | "" | "" - } - - @Unroll - def "unpacks infrastructure key for '#kind' and '#version'"() { - when: - def key = "kubernetes.v2:infrastructure:$kind:$account:$namespace:$name" - def parsed = Keys.parseKey(key).get() - - then: - parsed instanceof Keys.InfrastructureCacheKey - def parsedInfrastructureKey = (Keys.InfrastructureCacheKey) parsed - parsedInfrastructureKey.kubernetesKind == kind - parsedInfrastructureKey.account == account - parsedInfrastructureKey.namespace == namespace - parsedInfrastructureKey.name == name - - where: - kind | version | account | namespace | name - KubernetesKind.DEPLOYMENT | KubernetesApiVersion.APPS_V1BETA1 | "ac" | "name" | "nameer" - KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "" | "" | "" - KubernetesKind.SERVICE | KubernetesApiVersion.V1 | "account" | "namespace" | "" - KubernetesKind.INGRESS | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "ac" | "" | "nameer" - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCacheDataConvertSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCacheDataConvertSpec.groovy deleted file mode 100644 index aadfc434d0b..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/caching/agent/KubernetesCacheDataConvertSpec.groovy +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.Keys -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestAnnotater -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestMetadata -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestSpinnakerRelationships -import com.netflix.spinnaker.clouddriver.kubernetes.v2.names.KubernetesManifestNamer -import com.netflix.spinnaker.clouddriver.names.NamerRegistry -import com.netflix.spinnaker.kork.artifacts.model.Artifact -import com.netflix.spinnaker.moniker.Moniker -import org.apache.commons.lang3.tuple.Pair -import org.yaml.snakeyaml.Yaml -import org.yaml.snakeyaml.constructor.SafeConstructor -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesCacheDataConvertSpec extends Specification { - def mapper = new ObjectMapper() - def yaml = new Yaml(new SafeConstructor()) - def ACCOUNT = "my-account" - def NAMESPACE = "spinnaker" - - KubernetesManifest stringToManifest(String input) { - return mapper.convertValue(yaml.load(input), KubernetesManifest.class) - } - - @Unroll - def "given a correctly annotated manifest, build attributes & infer relationships"() { - setup: - def rawManifest = """ -apiVersion: $apiVersion -kind: $kind -metadata: - name: $name - namespace: $namespace -""" - def moniker = Moniker.builder() - .app(application) - .cluster(cluster) - .build() - - if (account != null) { - NamerRegistry.lookup() - .withProvider(KubernetesCloudProvider.ID) - .withAccount(account) - .setNamer(KubernetesManifest, new KubernetesManifestNamer()) - } - - def manifest = stringToManifest(rawManifest) - KubernetesManifestAnnotater.annotateManifest(manifest, moniker) - - when: - def cacheData = KubernetesCacheDataConverter.convertAsResource(account, manifest, []) - - then: - if (application == null) { - true - } else { - cacheData.relationships.get(Keys.LogicalKind.APPLICATIONS.toString()) == [Keys.application(application)] - if (cluster) { - cacheData.relationships.get(Keys.LogicalKind.CLUSTERS.toString()) == [Keys.cluster(account, application, cluster)] - } else { - cacheData.relationships.get(Keys.LogicalKind.CLUSTERS.toString()) == null - } - cacheData.attributes.get("name") == name - cacheData.attributes.get("namespace") == namespace - cacheData.attributes.get("kind") == kind - cacheData.id == Keys.infrastructure(kind, account, namespace, name) - } - - where: - kind | apiVersion | account | application | cluster | namespace | name - KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | null | null | null | "some-namespace" | "a-name-v000" - KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "one-app" | "the-cluster" | "some-namespace" | "a-name-v000" - KubernetesKind.DEPLOYMENT | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "one-app" | "the-cluster" | "some-namespace" | "a-name" - KubernetesKind.SERVICE | KubernetesApiVersion.V1 | "another-account" | "your-app" | null | "some-namespace" | "what-name" - } - - @Unroll - def "given a single owner reference, correctly build relationships"() { - setup: - def ownerRefs = [new KubernetesManifest.OwnerReference(kind: kind, apiVersion: apiVersion, name: name)] - - when: - def result = KubernetesCacheDataConverter.ownerReferenceRelationships(account, namespace, ownerRefs) - - then: - result.get(kind.toString()) == [Keys.infrastructure(kind, account, namespace, name)] - - where: - kind | apiVersion | account | cluster | namespace | name - KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "another-clu" | "some-namespace" | "a-name-v000" - KubernetesKind.REPLICA_SET | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "the-cluster" | "some-namespace" | "a-name-v000" - KubernetesKind.DEPLOYMENT | KubernetesApiVersion.EXTENSIONS_V1BETA1 | "my-account" | "the-cluster" | "some-namespace" | "a-name" - KubernetesKind.SERVICE | KubernetesApiVersion.V1 | "another-account" | "cluster" | "some-namespace" | "what-name" - } - - @Unroll - def "given a cache data entry, invert its relationships"() { - setup: - def id = Keys.infrastructure(kind, "account", "namespace", "version") - def cacheData = new DefaultCacheData(id, null, relationships) - - when: - def result = KubernetesCacheDataConverter.invertRelationships(cacheData) - - then: - relationships.collect { - group, keys -> keys.collect { - key -> result.find { - data -> data.id == key && data.relationships.get(kind.toString()) == [id] - } != null - }.inject true, { a, b -> a && b } - }.inject true, { a, b -> a && b } - - where: - kind | version | relationships - KubernetesKind.REPLICA_SET | KubernetesApiVersion.APPS_V1BETA1 | ["application": [Keys.application("app")]] - KubernetesKind.REPLICA_SET | KubernetesApiVersion.APPS_V1BETA1 | ["application": []] - KubernetesKind.REPLICA_SET | KubernetesApiVersion.APPS_V1BETA1 | [:] - KubernetesKind.REPLICA_SET | KubernetesApiVersion.APPS_V1BETA1 | ["deployment": [Keys.infrastructure(KubernetesKind.DEPLOYMENT, "account", "namespace", "a-name")]] - KubernetesKind.SERVICE | KubernetesApiVersion.V1 | ["cluster": [Keys.cluster("account", "app", "name")], "application": [Keys.application("blarg")]] - KubernetesKind.SERVICE | KubernetesApiVersion.V1 | ["cluster": [Keys.cluster("account", "app", "name")], "application": [Keys.application("blarg"), Keys.application("asdfasdf")]] - } - - def filterRelationships(Collection keys, List> existingResources) { - return keys.findAll { sk -> - def key = (Keys.InfrastructureCacheKey) Keys.parseKey(sk).get() - return existingResources.find { Pair lb -> - return lb.getLeft() == key.getKubernetesKind() && lb.getRight() == key.getName() - } != null - } - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesManifestAnnotatorSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesManifestAnnotatorSpec.groovy deleted file mode 100644 index 5d03af7b239..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesManifestAnnotatorSpec.groovy +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestAnnotater -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestSpinnakerRelationships -import com.netflix.spinnaker.moniker.Moniker -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesManifestAnnotatorSpec extends Specification { - def clusterKey = "moniker.spinnaker.io/cluster" - def applicationKey = "moniker.spinnaker.io/application" - - private KubernetesManifest freshManifest() { - def result = new KubernetesManifest() - result.put("kind", "replicaSet") - result.put("metadata", ["annotations": [:]]) - return result - } - - @Unroll - void "manifests are annotated and deannotated symmetrically"() { - expect: - def manifest = freshManifest() - def relationships = new KubernetesManifestSpinnakerRelationships() - .setLoadBalancers(loadBalancers) - .setSecurityGroups(securityGroups) - def moniker = Moniker.builder() - .cluster(cluster) - .app(application) - .build() - - KubernetesManifestAnnotater.annotateManifest(manifest, relationships) - KubernetesManifestAnnotater.annotateManifest(manifest, moniker) - relationships == KubernetesManifestAnnotater.getManifestRelationships(manifest) - moniker == KubernetesManifestAnnotater.getMoniker(manifest) - - where: - loadBalancers | securityGroups | cluster | application - [] | [] | "" | "" - [] | [] | " " | "" - null | null | null | null - [] | null | "" | null - ["lb"] | ["sg"] | "" | null - ["lb1", "lb2"] | ["sg"] | "x" | "my app" - ["lb1", "lb2"] | null | null | null - null | ["x1, x2", "x3"] | null | null - ["1"] | ["1"] | "1" | "1" - } - - @Unroll - void "manifests are annotated with the expected prefix"() { - expect: - def manifest = freshManifest() - def moniker = Moniker.builder() - .cluster(cluster) - .app(application) - .build() - - KubernetesManifestAnnotater.annotateManifest(manifest, moniker) - manifest.getAnnotations().get(clusterKey) == cluster - manifest.getAnnotations().get(applicationKey) == application - - where: - cluster | application - "" | "" - "c" | "a" - "" | "a" - - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesManifestSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesManifestSpec.groovy deleted file mode 100644 index 8776d2b8941..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/description/KubernetesManifestSpec.groovy +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.description - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest -import org.yaml.snakeyaml.Yaml -import org.yaml.snakeyaml.constructor.SafeConstructor -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesManifestSpec extends Specification { - def objectMapper = new ObjectMapper() - def yaml = new Yaml(new SafeConstructor()) - - def NAME = "my-name" - def NAMESPACE = "my-namespace" - def KIND = KubernetesKind.REPLICA_SET - def API_VERSION = KubernetesApiVersion.EXTENSIONS_V1BETA1 - def KEY = "hi" - def VALUE = "there" - - def BASIC_REPLICA_SET = """ -apiVersion: $API_VERSION -kind: $KIND -metadata: - name: $NAME - namespace: $NAMESPACE -spec: - template: - metadata: - annotations: - $KEY: $VALUE -""" - - KubernetesManifest stringToManifest(String input) { - return objectMapper.convertValue(yaml.load(input), KubernetesManifest) - } - - void "correctly reads fields from basic manifest definition"() { - when: - KubernetesManifest manifest = stringToManifest(BASIC_REPLICA_SET) - - then: - manifest.getName() == NAME - manifest.getNamespace() == NAMESPACE - manifest.getKind() == KIND - manifest.getApiVersion() == API_VERSION - manifest.getSpecTemplateAnnotations().get().get(KEY) == VALUE - } - - @Unroll - void "correctly parses a fully qualified resource name #kind/#name"() { - expect: - def pair = KubernetesManifest.fromFullResourceName(fullResourceName) - pair.getRight() == name - pair.getLeft() == kind - - where: - fullResourceName || kind | name - "replicaSet abc" || KubernetesKind.REPLICA_SET | "abc" - "rs abc" || KubernetesKind.REPLICA_SET | "abc" - "service abc" || KubernetesKind.SERVICE | "abc" - "SERVICE abc" || KubernetesKind.SERVICE | "abc" - "ingress abc" || KubernetesKind.INGRESS | "abc" - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/KubernetesDeployManifestOperationSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/KubernetesDeployManifestOperationSpec.groovy deleted file mode 100644 index 5a019483521..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/KubernetesDeployManifestOperationSpec.groovy +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider -import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.KubernetesVersionedArtifactConverter -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesResourcePropertyRegistry -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.KubernetesSpinnakerKindMap -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesApiVersion -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesDeployManifestDescription -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesKind -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifestSpinnakerRelationships -import com.netflix.spinnaker.clouddriver.kubernetes.v2.names.KubernetesManifestNamer -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler.KubernetesReplicaSetHandler -import com.netflix.spinnaker.clouddriver.kubernetes.v2.op.manifest.KubernetesDeployManifestOperation -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials -import com.netflix.spinnaker.clouddriver.names.NamerRegistry -import com.netflix.spinnaker.kork.artifacts.model.Artifact -import com.netflix.spinnaker.moniker.Moniker -import org.yaml.snakeyaml.Yaml -import org.yaml.snakeyaml.constructor.SafeConstructor -import spock.lang.Specification - -class KubernetesDeployManifestOperationSpec extends Specification { - def objectMapper = new ObjectMapper() - def yaml = new Yaml(new SafeConstructor()) - - def ACCOUNT = "account" - def NAME = "my-name" - def VERSION = "version" - def NAMESPACE = "my-namespace" - def DEFAULT_NAMESPACE = "default" - def IMAGE = "gcr.io/project/image" - def KIND = KubernetesKind.REPLICA_SET - def API_VERSION = KubernetesApiVersion.EXTENSIONS_V1BETA1 - - def BASIC_REPLICA_SET = """ -apiVersion: $API_VERSION -kind: $KIND -metadata: - name: $NAME - namespace: $NAMESPACE -""" - - def BASIC_REPLICA_SET_NO_NAMESPACE = """ -apiVersion: $API_VERSION -kind: $KIND -metadata: - name: $NAME -""" - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - KubernetesManifest stringToManifest(String input) { - return objectMapper.convertValue(yaml.load(input), KubernetesManifest) - } - - KubernetesDeployManifestOperation createMockDeployer(KubernetesV2Credentials credentials, String manifest) { - def deployDescription = new KubernetesDeployManifestDescription() - .setManifest(stringToManifest(manifest)) - .setMoniker(new Moniker()) - .setRelationships(new KubernetesManifestSpinnakerRelationships()) - .setSource(KubernetesDeployManifestDescription.Source.text) - - def namedCredentialsMock = Mock(KubernetesNamedAccountCredentials) - namedCredentialsMock.getCredentials() >> credentials - namedCredentialsMock.getName() >> ACCOUNT - deployDescription.setCredentials(namedCredentialsMock) - - credentials.deploy(_, _) >> null - - def replicaSetDeployer = new KubernetesReplicaSetHandler() - replicaSetDeployer.versioned() >> true - replicaSetDeployer.kind() >> KIND - def versionedArtifactConverterMock = Mock(KubernetesVersionedArtifactConverter) - versionedArtifactConverterMock.getDeployedName(_) >> "$NAME-$VERSION" - versionedArtifactConverterMock.toArtifact(_, _, _) >> new Artifact() - def registry = new KubernetesResourcePropertyRegistry(Collections.singletonList(replicaSetDeployer), - new KubernetesSpinnakerKindMap()) - - NamerRegistry.lookup().withProvider(KubernetesCloudProvider.ID) - .withAccount(ACCOUNT) - .setNamer(KubernetesManifest.class, new KubernetesManifestNamer()) - - registry.get("any", KubernetesKind.REPLICA_SET).versionedConverter = versionedArtifactConverterMock - - def deployOp = new KubernetesDeployManifestOperation(deployDescription, registry, null) - - return deployOp - } - - void "replica set deployer is correctly invoked"() { - setup: - def credentialsMock = Mock(KubernetesV2Credentials) - credentialsMock.getDefaultNamespace() >> NAMESPACE - def deployOp = createMockDeployer(credentialsMock, BASIC_REPLICA_SET) - - when: - def result = deployOp.operate([]) - then: - result.manifestNamesByNamespace[NAMESPACE].size() == 1 - result.manifestNamesByNamespace[NAMESPACE][0] == "$KIND $NAME-$VERSION" - } - - void "replica set deployer uses backup namespace"() { - setup: - def credentialsMock = Mock(KubernetesV2Credentials) - credentialsMock.getDefaultNamespace() >> DEFAULT_NAMESPACE - def deployOp = createMockDeployer(credentialsMock, BASIC_REPLICA_SET_NO_NAMESPACE) - - when: - def result = deployOp.operate([]) - - then: - result.manifestNamesByNamespace[DEFAULT_NAMESPACE].size() == 1 - result.manifestNamesByNamespace[DEFAULT_NAMESPACE][0] == "$KIND $NAME-$VERSION" - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDeploymentHandlerSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDeploymentHandlerSpec.groovy deleted file mode 100644 index 978a7efeb4f..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/op/handler/KubernetesDeploymentHandlerSpec.groovy +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.op.handler - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.kubernetes.v2.artifact.ArtifactTypes -import com.netflix.spinnaker.clouddriver.kubernetes.v2.description.manifest.KubernetesManifest -import com.netflix.spinnaker.kork.artifacts.model.Artifact -import org.yaml.snakeyaml.Yaml -import org.yaml.snakeyaml.constructor.SafeConstructor -import spock.lang.Specification - -class KubernetesDeploymentHandlerSpec extends Specification { - def objectMapper = new ObjectMapper() - def yaml = new Yaml(new SafeConstructor()) - def handler = new KubernetesDeploymentHandler() - - def IMAGE = "gcr.io/project/image" - def CONFIG_MAP_VOLUME = "my-config-map" - def SECRET_ENV = "my-secret-env" - def CONFIG_MAP_ENV_KEY = "my-config-map-env" - def ACCOUNT = "my-account" - - def BASIC_DEPLOYMENT = """ -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - name: nginx-deployment - labels: - app: nginx -spec: - replicas: 3 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: nginx - image: $IMAGE - ports: - - containerPort: 80 - envFrom: - - secretRef: - name: $SECRET_ENV - env: - - name: KEY - valueFrom: - configMapKeyRef: - name: $CONFIG_MAP_ENV_KEY - key: value - volumes: - - configMap: - name: $CONFIG_MAP_VOLUME -""" - - KubernetesManifest stringToManifest(String input) { - return objectMapper.convertValue(yaml.load(input), KubernetesManifest) - } - - void "check that image is replaced by the artifact replacer"() { - when: - def target = "$IMAGE:version-1.2.3" - def artifact = Artifact.builder() - .type(ArtifactTypes.DOCKER_IMAGE.toString()) - .name(IMAGE) - .reference(target) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.containers[0].image == target - result.boundArtifacts.size() == 1 - result.boundArtifacts.contains(artifact) == true - } - - void "check that image isn't replaced by the artifact replacer"() { - when: - def target = "$IMAGE:version-bad" - def artifact = Artifact.builder() - .type(ArtifactTypes.DOCKER_IMAGE.toString()) - .name("not-$IMAGE") - .reference(target) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.containers[0].image == IMAGE - result.boundArtifacts.isEmpty() == true - } - - void "check that image is found"() { - when: - def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) - - then: - result.findAll { a -> a.getReference() == IMAGE && a.getType() == ArtifactTypes.DOCKER_IMAGE.toString() }.size() == 1 - } - - void "check that configmap volume is replaced by the artifact replacer without an account specified"() { - when: - def target = "$CONFIG_MAP_VOLUME-version-1.2.3" - def artifact = Artifact.builder() - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP.toString()) - .name(CONFIG_MAP_VOLUME) - .reference(target) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.volumes[0].configMap.name == target - } - - void "check that configmap volume is replaced by the artifact replacer"() { - when: - def target = "$CONFIG_MAP_VOLUME-version-1.2.3" - def artifact = Artifact.builder() - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP.toString()) - .name(CONFIG_MAP_VOLUME) - .reference(target) - .metadata(["account": ACCOUNT]) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.volumes[0].configMap.name == target - } - - void "check that configmap volume replaced by the artifact replacer"() { - when: - def target = "$CONFIG_MAP_VOLUME:version-bad" - def artifact = Artifact.builder() - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP.toString()) - .name("not-$CONFIG_MAP_VOLUME") - .reference(target) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.volumes[0].configMap.name == CONFIG_MAP_VOLUME - } - - void "check that configmap volume is not replaced by the artifact replacer in the wrong account"() { - when: - def target = "$CONFIG_MAP_VOLUME:version-1.2.3" - def artifact = Artifact.builder() - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP.toString()) - .name("$CONFIG_MAP_VOLUME") - .reference(target) - .metadata(["account": "not-$ACCOUNT".toString()]) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.volumes[0].configMap.name != target - } - - void "check that configmap volume is found"() { - when: - def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) - - then: - result.findAll { a -> a.getReference() == CONFIG_MAP_VOLUME && a.getType() == ArtifactTypes.KUBERNETES_CONFIG_MAP.toString()}.size() == 1 - } - - - void "check that only secret ref is replaced by the artifact replacer"() { - when: - def target = "$SECRET_ENV-version-1.2.3" - def artifact = Artifact.builder() - .type(ArtifactTypes.KUBERNETES_SECRET.toString()) - .name(SECRET_ENV) - .reference(target) - .metadata(["account": ACCOUNT]) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.containers[0].envFrom[0].secretRef.name == target - } - - void "check that secret ref is not replaced by the artifact replacer"() { - when: - def target = "$SECRET_ENV:version-bad" - def artifact = Artifact.builder() - .type(ArtifactTypes.KUBERNETES_SECRET.toString()) - .name("not-$SECRET_ENV") - .reference(target) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.containers[0].envFrom[0].secretRef.name == SECRET_ENV - } - - void "check that secret ref is found"() { - when: - def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) - - then: - result.findAll { a -> a.getReference() == SECRET_ENV && a.getType() == ArtifactTypes.KUBERNETES_SECRET.toString()}.size() == 1 - } - - void "check that only configmap value ref is replaced by the artifact replacer"() { - when: - def target = "$CONFIG_MAP_ENV_KEY-version-1.2.3" - def artifact = Artifact.builder() - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP.toString()) - .name(CONFIG_MAP_ENV_KEY) - .reference(target) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.containers[0].env[0].valueFrom.configMapKeyRef.name == target - } - - void "check that configmap value ref is not replaced by the artifact replacer"() { - when: - def target = "$CONFIG_MAP_ENV_KEY:version-bad" - def artifact = Artifact.builder() - .type(ArtifactTypes.KUBERNETES_CONFIG_MAP.toString()) - .name("not-$CONFIG_MAP_ENV_KEY") - .reference(target) - .build() - - def result = handler.replaceArtifacts(stringToManifest(BASIC_DEPLOYMENT), [artifact], ACCOUNT) - - then: - result.manifest.spec.template.spec.containers[0].env[0].valueFrom.configMapKeyRef.name == CONFIG_MAP_ENV_KEY - } - - void "check that configmap value ref is found"() { - when: - def result = handler.listArtifacts(stringToManifest(BASIC_DEPLOYMENT)) - - then: - result.findAll { a -> a.getReference() == CONFIG_MAP_ENV_KEY && a.getType() == ArtifactTypes.KUBERNETES_CONFIG_MAP.toString()}.size() == 1 - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/KubernetesValidationUtilSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/KubernetesValidationUtilSpec.groovy deleted file mode 100644 index 8eb8357764c..00000000000 --- a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/v2/validator/KubernetesValidationUtilSpec.groovy +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2018 Schibsted ASA. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.kubernetes.v2.validator - -import com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification -import spock.lang.Unroll - -class KubernetesValidationUtilSpec extends Specification { - @Unroll - void "wiring of namespace validation"() { - given: - Errors errors = Mock(Errors) - String kubernetesAccount = "testAccount" - def namespaces = ["test-namespace"] - def omitNamespaces = ["omit-namespace"] - AccountCredentials accountCredentials = Mock(AccountCredentials) - KubernetesV2Credentials credentials = Mock(KubernetesV2Credentials) - KubernetesValidationUtil kubernetesValidationUtil = new KubernetesValidationUtil("currentContext", errors); - AccountCredentialsProvider accountCredentialsProvider = Mock(AccountCredentialsProvider) - - when: - def judgement = kubernetesValidationUtil.validateV2Credentials(accountCredentialsProvider, kubernetesAccount, testNamespace) - - then: - accountCredentialsProvider.getCredentials(kubernetesAccount) >> accountCredentials - accountCredentials.getCredentials() >> credentials - credentials.getOmitNamespaces() >> omitNamespaces - credentials.namespaces >> namespaces - judgement == expectedResult - - where: - testNamespace || expectedResult - null || true - "" || true - "test-namespace" || true - "omit-namespace" || false - "unknown-namespace" || false - } - - @Unroll - void "validation of namespaces"() { - given: - Errors errors = Mock(Errors) - KubernetesV2Credentials credentials = Mock(KubernetesV2Credentials) - KubernetesValidationUtil kubernetesValidationUtil = new KubernetesValidationUtil("currentContext", errors); - - when: - def judgement = kubernetesValidationUtil.validateNamespace(testNamespace, credentials) - - then: - credentials.getOmitNamespaces() >> omitNamespaces - credentials.namespaces >> namespaces - judgement == allowedNamespace - - where: - namespaces | omitNamespaces | testNamespace || allowedNamespace - ["test-namespace"] | ["omit-namespace"] | "test-namespace" || true - null | ["omit-namespace"] | "test-namespace" || true - ["test-namespace"] | null | "test-namespace" || true - ["test-namespace"] | ["omit-namespace"] | "omit-namespace" || false - null | ["omit-namespace"] | "omit-namespace" || false - ["test-namespace"] | ["omit-namespace"] | "unknown-namespace" || false - null | null | "unknown-namespace" || true - [] | [] | "unknown-namespace" || true - } -} diff --git a/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/validator/KubernetesValidationUtilSpec.groovy b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/validator/KubernetesValidationUtilSpec.groovy new file mode 100644 index 00000000000..5d3df06a0d2 --- /dev/null +++ b/clouddriver-kubernetes/src/test/groovy/com/netflix/spinnaker/clouddriver/kubernetes/validator/KubernetesValidationUtilSpec.groovy @@ -0,0 +1,108 @@ +/* + * Copyright 2018 Schibsted ASA. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.validator + +import com.google.common.collect.ImmutableList +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import spock.lang.Specification +import spock.lang.Unroll + +import javax.annotation.Nullable + +class KubernetesValidationUtilSpec extends Specification { + @Unroll + void "wiring of kind/namespace validation"() { + given: + ValidationErrors errors = Mock(ValidationErrors) + String kubernetesAccount = "testAccount" + def namespaces = ImmutableList.of("test-namespace") + def omitNamespaces = ImmutableList.of("omit-namespace") + def kind = KubernetesKind.DEPLOYMENT + AccountCredentials accountCredentials = Mock(AccountCredentials) + KubernetesCredentials credentials = Mock(KubernetesCredentials) + KubernetesValidationUtil kubernetesValidationUtil = new KubernetesValidationUtil("currentContext", errors); + AccountCredentialsProvider accountCredentialsProvider = Mock(AccountCredentialsProvider) + KubernetesManifest manifest = Mock(KubernetesManifest) + + when: + def judgement = kubernetesValidationUtil.validateCredentials(accountCredentialsProvider, kubernetesAccount, manifest) + + then: + accountCredentialsProvider.getCredentials(kubernetesAccount) >> accountCredentials + accountCredentials.getCredentials() >> credentials + credentials.getOmitNamespaces() >> omitNamespaces + credentials.namespaces >> namespaces + manifest.getNamespace() >> testNamespace + manifest.getKind() >> kind + credentials.isValidKind(kind) >> true + credentials.getKindStatus(kind) >> KubernetesCredentials.KubernetesKindStatus.VALID + judgement == expectedResult + + where: + testNamespace || expectedResult + null || true + "" || true + "test-namespace" || true + "omit-namespace" || false + "unknown-namespace" || false + } + + @Unroll + void "validation of namespaces"() { + given: + ValidationErrors errors = Mock(ValidationErrors) + KubernetesCredentials credentials = Mock(KubernetesCredentials) + KubernetesValidationUtil kubernetesValidationUtil = new KubernetesValidationUtil("currentContext", errors); + + when: + def judgement = kubernetesValidationUtil.validateNamespace(testNamespace, credentials) + + then: + credentials.getOmitNamespaces() >> toImmutableList(omitNamespaces) + credentials.namespaces >> toImmutableList(namespaces) + judgement == allowedNamespace + + where: + namespaces | omitNamespaces | testNamespace || allowedNamespace + ["test-namespace"] | ["omit-namespace"] | "test-namespace" || true + null | ["omit-namespace"] | "test-namespace" || true + ["test-namespace"] | null | "test-namespace" || true + ["test-namespace"] | ["omit-namespace"] | "omit-namespace" || false + null | ["omit-namespace"] | "omit-namespace" || false + ["test-namespace"] | ["omit-namespace"] | "unknown-namespace" || false + null | null | "unknown-namespace" || true + // When namespaces is not specified (and we rely on dynamic discovery) we need to treat an unknown namespace as + // allowed. This is because we might be adding the namespace as part of the same deploy operation, so can't rely + // on looking in the namespace cache for the unknown namespace. + [] | [] | "unknown-namespace" || true + [] | ["omit-namespace"] | "unknown-namespace" || true + } + + @Nullable + private static ImmutableList toImmutableList(@Nullable Iterable list) { + if (list == null) { + return null; + } + return ImmutableList.copyOf(list); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactConverterTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactConverterTest.java new file mode 100644 index 00000000000..1c83718d757 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactConverterTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.artifact; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.OptionalInt; +import org.junit.jupiter.api.Test; + +final class ArtifactConverterTest { + private static final String ACCOUNT = "my-account"; + private static final String NAMESPACE = "my-namespace"; + private static final String NAME = "my-name"; + private static final String KIND = "Pod"; + + private static final ObjectMapper mapper = new ObjectMapper(); + + @Test + void artifactWithoutVersion() { + KubernetesManifest manifest = getStubManifest(); + Artifact artifact = ArtifactConverter.toArtifact(manifest, ACCOUNT, OptionalInt.empty()); + + assertThat(artifact.getType()).isEqualTo("kubernetes/pod"); + assertThat(artifact.getName()).isEqualTo(NAME); + assertThat(artifact.getLocation()).isEqualTo(NAMESPACE); + assertThat(artifact.getVersion()).isNullOrEmpty(); + assertThat(artifact.getReference()).isEqualTo(NAME); + assertThat(artifact.getMetadata("account")).isEqualTo(ACCOUNT); + } + + private static KubernetesManifest getStubManifest() { + return mapper.convertValue( + ImmutableMap.of( + "kind", KIND, "metadata", ImmutableMap.of("name", NAME, "namespace", NAMESPACE)), + KubernetesManifest.class); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactReplacerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactReplacerTest.java new file mode 100644 index 00000000000..3c85de4ceca --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ArtifactReplacerTest.java @@ -0,0 +1,512 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.artifact; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.google.gson.Gson; +import com.netflix.spinnaker.clouddriver.artifacts.kubernetes.KubernetesArtifactType; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ArtifactReplacer.ReplaceResult; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import io.kubernetes.client.openapi.JSON; +import io.kubernetes.client.openapi.models.V1ConfigMapEnvSource; +import io.kubernetes.client.openapi.models.V1Container; +import io.kubernetes.client.openapi.models.V1ContainerBuilder; +import io.kubernetes.client.openapi.models.V1Deployment; +import io.kubernetes.client.openapi.models.V1DeploymentBuilder; +import io.kubernetes.client.openapi.models.V1DeploymentSpec; +import io.kubernetes.client.openapi.models.V1EnvFromSource; +import io.kubernetes.client.openapi.models.V1HorizontalPodAutoscalerBuilder; +import io.kubernetes.client.openapi.models.V1PodSpec; +import io.kubernetes.client.openapi.models.V1PodTemplateSpec; +import io.kubernetes.client.openapi.models.V1ReplicaSet; +import io.kubernetes.client.openapi.models.V1ReplicaSetBuilder; +import io.kubernetes.client.openapi.models.V1ReplicaSetSpec; +import java.util.Collection; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Stream; +import lombok.RequiredArgsConstructor; +import lombok.Value; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +final class ArtifactReplacerTest { + // We serialized generated Kubernetes metadata objects with JSON io.kubernetes.client.openapi.JSON + // so that they match what we get back from kubectl. We'll just gson from converting to a + // KubernetesManifest because that's what we currently use to parse the result from kubectl and + // we want this test to be realistic. + private static final JSON json = new JSON(); + private static final Gson gson = new Gson(); + + private static final String NAMESPACE = "ns"; + private static final String ACCOUNT = "my-account"; + private static final String DEFAULT_BINDING = "match-name-and-tag"; + + @Test + void extractsDeploymentNameFromHpa() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaDeployment())); + KubernetesManifest hpa = getHpa("Deployment", "my-deployment"); + Set artifacts = artifactReplacer.findAll(hpa); + + assertThat(artifacts).hasSize(1); + Artifact artifact = Iterables.getOnlyElement(artifacts); + assertThat(artifact.getName()).isEqualTo("my-deployment"); + assertThat(artifact.getType()).isEqualTo(KubernetesArtifactType.Deployment.getType()); + } + + @Test + void skipsHpaWithUnknownKind() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaDeployment())); + KubernetesManifest hpa = getHpa("Unknown", "my-deployment"); + Set artifacts = artifactReplacer.findAll(hpa); + + assertThat(artifacts).isEmpty(); + } + + @ParameterizedTest + @MethodSource("imageArtifactTestCases") + void extractsDockerImageArtifacts(ImageTestCase testCase) { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest deployment = getDeploymentWithContainer(getContainer(testCase.getImage())); + Set artifacts = artifactReplacer.findAll(deployment); + + assertThat(artifacts).hasSize(1); + Artifact artifact = Iterables.getOnlyElement(artifacts); + assertThat(artifact.getType()).isEqualTo(KubernetesArtifactType.DockerImage.getType()); + assertThat(artifact.getName()).isEqualTo(testCase.getName()); + assertThat(artifact.getReference()).isEqualTo(testCase.getImage()); + } + + @ParameterizedTest + @MethodSource("imageArtifactTestCases") + void extractsDockerImageArtifactsFromInitContainers(ImageTestCase testCase) { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest deployment = + getDeploymentWithInitContainer(getContainer(testCase.getImage())); + Set artifacts = artifactReplacer.findAll(deployment); + + assertThat(artifacts).hasSize(1); + Artifact artifact = Iterables.getOnlyElement(artifacts); + assertThat(artifact.getType()).isEqualTo(KubernetesArtifactType.DockerImage.getType()); + assertThat(artifact.getName()).isEqualTo(testCase.getName()); + assertThat(artifact.getReference()).isEqualTo(testCase.getImage()); + } + + // Called by @MethodSource which error-prone does not detect. + @SuppressWarnings("unused") + private static Stream imageArtifactTestCases() { + return Stream.of( + ImageTestCase.of("nginx:112", "nginx"), + ImageTestCase.of("nginx:1.12-alpine", "nginx"), + ImageTestCase.of("my-nginx:100000", "my-nginx"), + ImageTestCase.of("my.nginx:100000", "my.nginx"), + ImageTestCase.of("reg/repo:1.2.3", "reg/repo"), + ImageTestCase.of("reg.repo:123@sha256:13", "reg.repo:123"), + ImageTestCase.of("reg.default.svc/r/j:485fabc", "reg.default.svc/r/j"), + ImageTestCase.of("reg:5000/r/j:485fabc", "reg:5000/r/j"), + ImageTestCase.of("reg:5000/r__j:485fabc", "reg:5000/r__j"), + ImageTestCase.of("clouddriver", "clouddriver"), + ImageTestCase.of("clouddriver@sha256:9145", "clouddriver"), + ImageTestCase.of( + "localhost:5000/test/busybox@sha256:cbbf22", "localhost:5000/test/busybox")); + } + + @RequiredArgsConstructor + @Value + private static class ImageTestCase { + final String image; + final String name; + + static ImageTestCase of(String image, String name) { + return new ImageTestCase(image, name); + } + } + + @Test + void emptyReplace() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest deployment = getDeploymentWithContainer(getContainer("nginx:112")); + + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, deployment, ImmutableList.of(), NAMESPACE, ACCOUNT); + + assertThat(result.getManifest()).isEqualTo(deployment); + assertThat(result.getBoundArtifacts()).isEmpty(); + } + + @Test + void replacesDockerImage() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest deployment = getDeploymentWithContainer(getContainer("nginx")); + + Artifact inputArtifact = + Artifact.builder().type("docker/image").name("nginx").reference("nginx:1.19.1").build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, deployment, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(extractImage(result.getManifest())).contains("nginx:1.19.1"); + assertThat(result.getBoundArtifacts()).hasSize(1); + assertThat(Iterables.getOnlyElement(result.getBoundArtifacts())).isEqualTo(inputArtifact); + } + + @Test + void replacesDockerImageWithTag() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest deployment = getDeploymentWithContainer(getContainer("nginx:1.18.0")); + + Artifact inputArtifact = + Artifact.builder().type("docker/image").name("nginx").reference("nginx:1.19.1").build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, deployment, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(extractImage(result.getManifest())).contains("nginx:1.19.1"); + assertThat(result.getBoundArtifacts()).hasSize(1); + assertThat(Iterables.getOnlyElement(result.getBoundArtifacts())).isEqualTo(inputArtifact); + } + + /** + * This is a support for a legacy behavior, it's disabled by default and enabled by + * kubernetes.artifact-binding.docker-image with value 'match-name-only'. If there is already a + * tag on the image in the manifest, we are not replacing it. + */ + @Test + void doesNotReplaceImageWithTag() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest deployment = getDeploymentWithContainer(getContainer("nginx:1.18.0")); + + Artifact inputArtifact = + Artifact.builder().type("docker/image").name("nginx").reference("nginx:1.19.1").build(); + ReplaceResult result = + artifactReplacer.replaceAll( + "match-name-only", deployment, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(result.getManifest()).isEqualTo(deployment); + assertThat(result.getBoundArtifacts()).isEmpty(); + } + + /** + * Only artifacts of type kubernetes/* need to have the same account as the manifest to be + * replaced. + */ + @Test + void nonKubernetesArtifactIgnoresDifferentAccount() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest deployment = getDeploymentWithContainer(getContainer("nginx")); + + Artifact inputArtifact = + Artifact.builder() + .type("docker/image") + .name("nginx") + .putMetadata("account", "another-account") + .reference("nginx:1.19.1") + .build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, deployment, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(extractImage(result.getManifest())).contains("nginx:1.19.1"); + assertThat(result.getBoundArtifacts()).hasSize(1); + assertThat(Iterables.getOnlyElement(result.getBoundArtifacts())).isEqualTo(inputArtifact); + } + + /** + * Only artifacts of type kubernetes/* need to have the same namespace as the manifest to be + * replaced. + */ + @Test + void nonKubernetesArtifactIgnoresDifferentNamespace() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest deployment = getDeploymentWithContainer(getContainer("nginx")); + + Artifact inputArtifact = + Artifact.builder() + .type("docker/image") + .name("nginx") + .location("another-namespace") + .reference("nginx:1.19.1") + .build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, deployment, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(extractImage(result.getManifest())).contains("nginx:1.19.1"); + assertThat(result.getBoundArtifacts()).hasSize(1); + assertThat(Iterables.getOnlyElement(result.getBoundArtifacts())).isEqualTo(inputArtifact); + } + + @Test + void replacesConfigMap() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom("my-config-map"); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("my-config-map") + .location(NAMESPACE) + .version("v003") + .reference("my-config-map-v003") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, replicaSet, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(extractEnvRef(result.getManifest())).contains("my-config-map-v003"); + assertThat(result.getBoundArtifacts()).hasSize(1); + + Artifact replacedArtifact = Iterables.getOnlyElement(result.getBoundArtifacts()); + assertThat(replacedArtifact).isEqualTo(inputArtifact); + } + + @Test + void replacesConfigMapArtifactMissingAccount() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom("my-config-map"); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("my-config-map") + .location(NAMESPACE) + .version("v003") + .reference("my-config-map-v003") + .build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, replicaSet, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(extractEnvRef(result.getManifest())).contains("my-config-map-v003"); + assertThat(result.getBoundArtifacts()).hasSize(1); + + Artifact replacedArtifact = Iterables.getOnlyElement(result.getBoundArtifacts()); + assertThat(replacedArtifact).isEqualTo(inputArtifact); + } + + @Test + void doesNotReplaceConfigmapWrongAccount() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom("my-config-map"); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("my-config-map") + .location(NAMESPACE) + .version("v003") + .reference("my-config-map-v003") + .putMetadata("account", "other-account") + .build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, replicaSet, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(extractEnvRef(result.getManifest())).contains("my-config-map"); + assertThat(result.getBoundArtifacts()).hasSize(0); + } + + @Test + void doesNotReplaceConfigmapWrongNamespace() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom("my-config-map"); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("my-config-map") + .location("other-namespace") + .version("v003") + .reference("my-config-map-v003") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, replicaSet, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + assertThat(extractEnvRef(result.getManifest())).contains("my-config-map"); + assertThat(result.getBoundArtifacts()).hasSize(0); + } + + @Test + void replacesConfigMapNoNamespace() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom("my-config-map"); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("my-config-map") + .version("v003") + .reference("my-config-map-v003") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult result = + artifactReplacer.replaceAll( + DEFAULT_BINDING, replicaSet, ImmutableList.of(inputArtifact), "", ACCOUNT); + + assertThat(extractEnvRef(result.getManifest())).contains("my-config-map-v003"); + assertThat(result.getBoundArtifacts()).hasSize(1); + + Artifact replacedArtifact = Iterables.getOnlyElement(result.getBoundArtifacts()); + assertThat(replacedArtifact).isEqualTo(inputArtifact); + } + + // Extracts the first container image from a Kubernetes manifest representing a deployment + private Optional extractImage(KubernetesManifest manifest) { + // We want to use the Kubernetes-supported json deserializer so need to first serialize + // the manifest to a string. + V1Deployment deployment = json.deserialize(json.serialize(manifest), V1Deployment.class); + return Optional.ofNullable(deployment.getSpec()) + .map(V1DeploymentSpec::getTemplate) + .map(V1PodTemplateSpec::getSpec) + .map(V1PodSpec::getContainers) + .map(c -> c.get(0)) + .map(V1Container::getImage); + } + + // Extracts the config map ref for the first env ref for the first container in a Kubernetes + // manifest representing a deployment. + private Optional extractEnvRef(KubernetesManifest manifest) { + // We want to use the Kubernetes-supported json deserializer so need to first serialize + // the manifest to a string. + V1ReplicaSet replicaSet = json.deserialize(json.serialize(manifest), V1ReplicaSet.class); + return Optional.ofNullable(replicaSet.getSpec()) + .map(V1ReplicaSetSpec::getTemplate) + .map(V1PodTemplateSpec::getSpec) + .map(V1PodSpec::getContainers) + .map(c -> c.get(0)) + .map(V1Container::getEnvFrom) + .map(e -> e.get(0)) + .map(V1EnvFromSource::getConfigMapRef) + .map(V1ConfigMapEnvSource::getName); + } + + private KubernetesManifest getHpa(String kind, String name) { + String hpa = + json.serialize( + new V1HorizontalPodAutoscalerBuilder() + .withNewMetadata() + .withName("my-hpa") + .withNamespace("default") + .endMetadata() + .withNewSpec() + .withNewScaleTargetRef() + .withApiVersion("apps/v1") + .withKind(kind) + .withName(name) + .endScaleTargetRef() + .endSpec() + .build()); + return gson.fromJson(hpa, KubernetesManifest.class); + } + + private V1Container getContainer(String image) { + return new V1ContainerBuilder() + .withName("container") + .withImage(image) + .addNewPort() + .withContainerPort(80) + .endPort() + .build(); + } + + private KubernetesManifest getDeploymentWithContainer(V1Container container) { + return getDeployment(ImmutableList.of(container), ImmutableList.of()); + } + + private KubernetesManifest getDeploymentWithInitContainer(V1Container container) { + return getDeployment(ImmutableList.of(), ImmutableList.of(container)); + } + + private KubernetesManifest getDeployment( + Collection containers, Collection initContainers) { + String deployment = + json.serialize( + new V1DeploymentBuilder() + .withNewMetadata() + .withName("my-app-deployment") + .withLabels(ImmutableMap.of("app", "my-app")) + .endMetadata() + .withNewSpec() + .withReplicas(3) + .withNewSelector() + .withMatchLabels(ImmutableMap.of("app", "my-app")) + .endSelector() + .withNewTemplate() + .withNewMetadata() + .withLabels(ImmutableMap.of("app", "my-app")) + .endMetadata() + .withNewSpec() + .addAllToContainers(containers) + .addAllToInitContainers(initContainers) + .endSpec() + .endTemplate() + .endSpec() + .build()); + return gson.fromJson(deployment, KubernetesManifest.class); + } + + private KubernetesManifest getReplicaSetWithEnvFrom(String configMapRef) { + String deployment = + json.serialize( + new V1ReplicaSetBuilder() + .withNewMetadata() + .withName("my-app-deployment") + .endMetadata() + .withNewSpec() + .withReplicas(3) + .withNewTemplate() + .withNewSpec() + .addNewContainer() + .addNewEnvFrom() + .withNewConfigMapRef() + .withNewName(configMapRef) + .endConfigMapRef() + .endEnvFrom() + .endContainer() + .endSpec() + .endTemplate() + .endSpec() + .build()); + return gson.fromJson(deployment, KubernetesManifest.class); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ReplacerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ReplacerTest.java new file mode 100644 index 00000000000..278a7a9190b --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ReplacerTest.java @@ -0,0 +1,1436 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.artifact; + +import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.gson.Gson; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ArtifactReplacer.ReplaceResult; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import io.kubernetes.client.openapi.JSON; +import io.kubernetes.client.openapi.models.*; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +/** + * The goal of this class is to do a test on each of the statically-defined replacers in {@link + * Replacer}. Given {@link Replacer} has only package-private functions and users would only be + * consuming these wrapped in an {@link ArtifactReplacer} we will do the same here; for each {@link + * Replacer}, we wrap it in an {@link ArtifactReplacer} and check that it can find and replace the + * expected artifacts on a Kubernetes object. + * + *

While {@link ArtifactReplacerTest} is focused more on the logic of {@link ArtifactReplacer} + * (ex: do we properly filter artifacts by namespace/account) this class focuses on ensuring that + * each static replacer works as expected. + */ +final class ReplacerTest { + // We serialized generated Kubernetes metadata objects with JSON io.kubernetes.client.openapi.JSON + // so that they match what we get back from kubectl. We'll just gson from converting to a + // KubernetesManifest because that's what we currently use to parse the result from kubectl and + // we want this test to be realistic. + private static final JSON json = new JSON(); + private static final Gson gson = new Gson(); + + private static final String NAMESPACE = "ns"; + private static final String ACCOUNT = "my-account"; + private static final String DEFAULT_DOCKER_IMAGE_BINDING = "match-name-and-tag"; + private static final String MATCH_NAME_ONLY_ARTIFACT_BINDING_STRATEGY = "match-name-only"; + + @Test + void findReplicaSetDockerImages() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest replicaSet = getReplicaSetWithContainers(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("gcr.io/my-repository/my-image:my-tag")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("docker/image"); + assertThat(artifact.getName()).isEqualTo("gcr.io/my-repository/my-image"); + assertThat(artifact.getReference()).isEqualTo("gcr.io/my-repository/my-image:my-tag"); + }); + + assertThat(byReference.get("gcr.io/my-other-repository/some-image")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("docker/image"); + assertThat(artifact.getName()).isEqualTo("gcr.io/my-other-repository/some-image"); + assertThat(artifact.getReference()) + .isEqualTo("gcr.io/my-other-repository/some-image"); + }); + } + + @Test + void replaceReplicaSetDockerImages() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest replicaSet = getReplicaSetWithContainers(); + + Artifact inputArtifact = + Artifact.builder() + .type("docker/image") + .name("gcr.io/my-other-repository/some-image") + .reference("gcr.io/my-other-repository/some-image:some-tag") + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .extracting(V1Container::getImage) + .containsExactly( + // Only the second image should have been replaced. + "gcr.io/my-repository/my-image:my-tag", + "gcr.io/my-other-repository/some-image:some-tag"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + private KubernetesManifest getReplicaSetWithContainers() { + String replicaSet = + json.serialize( + new V1ReplicaSetBuilder() + .withNewSpec() + .withNewTemplate() + .withNewSpec() + .addToContainers( + new V1ContainerBuilder() + .withName("my-image-with-tag") + .withImage("gcr.io/my-repository/my-image:my-tag") + .build()) + .addToContainers( + new V1ContainerBuilder() + .withName("my-image-without-tag") + .withImage("gcr.io/my-other-repository/some-image") + .build()) + .endSpec() + .endTemplate() + .endSpec() + .build()); + return gson.fromJson(replicaSet, KubernetesManifest.class); + } + + @Test + void findPodDockerImages() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.podDockerImage())); + KubernetesManifest pod = getPod(); + + Set artifacts = artifactReplacer.findAll(pod); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("gcr.io/my-repository/my-image:my-tag")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("docker/image"); + assertThat(artifact.getName()).isEqualTo("gcr.io/my-repository/my-image:my-tag"); + assertThat(artifact.getReference()).isEqualTo("gcr.io/my-repository/my-image:my-tag"); + }); + + assertThat(byReference.get("gcr.io/my-other-repository/some-image")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("docker/image"); + assertThat(artifact.getName()).isEqualTo("gcr.io/my-other-repository/some-image"); + assertThat(artifact.getReference()) + .isEqualTo("gcr.io/my-other-repository/some-image"); + }); + } + + @Test + void replacePodDockerImages() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.podDockerImage())); + KubernetesManifest pod = getPod(); + + Artifact inputArtifact = + Artifact.builder() + .type("docker/image") + .name("gcr.io/my-other-repository/some-image") + .reference("gcr.io/my-other-repository/some-image:some-tag") + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, pod, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + V1Pod replacedPod = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1Pod.class); + assertThat(replacedPod.getSpec().getContainers()) + .extracting(V1Container::getImage) + .containsExactly( + // Only the second image should have been replaced. + "gcr.io/my-repository/my-image:my-tag", + "gcr.io/my-other-repository/some-image:some-tag"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + private KubernetesManifest getPod() { + String pod = + json.serialize( + new V1PodBuilder() + .withNewSpec() + .addNewContainer() + .withName("my-image-with-tag") + .withImage("gcr.io/my-repository/my-image:my-tag") + .endContainer() + .addNewContainer() + .withName("my-image-without-tag") + .withImage("gcr.io/my-other-repository/some-image") + .endContainer() + .endSpec() + .build()); + return gson.fromJson(pod, KubernetesManifest.class); + } + + @Test + void findConfigMapVolume() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapVolume())); + KubernetesManifest replicaSet = getReplicaSetWithVolumes(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("first-config-map")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/configMap"); + assertThat(artifact.getName()).isEqualTo("first-config-map"); + assertThat(artifact.getReference()).isEqualTo("first-config-map"); + }); + + assertThat(byReference.get("second-config-map")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/configMap"); + assertThat(artifact.getName()).isEqualTo("second-config-map"); + assertThat(artifact.getReference()).isEqualTo("second-config-map"); + }); + } + + @Test + void replaceConfigMapVolume() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapVolume())); + KubernetesManifest replicaSet = getReplicaSetWithVolumes(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("second-config-map") + .location(NAMESPACE) + .version("v003") + .reference("second-config-map-v003") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getVolumes()) + .extracting(V1Volume::getConfigMap) + .filteredOn(Objects::nonNull) + .extracting(V1ConfigMapVolumeSource::getName) + .containsExactly( + // Only the second config map should have been replaced. + "first-config-map", "second-config-map-v003"); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getVolumes()) + .extracting(V1Volume::getSecret) + .filteredOn(Objects::nonNull) + .extracting(V1SecretVolumeSource::getSecretName) + .containsExactly( + // No secrets should have been replaced. + "first-secret", "second-secret"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + @Test + void findSecretVolume() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.secretVolume())); + KubernetesManifest replicaSet = getReplicaSetWithVolumes(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("first-secret")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/secret"); + assertThat(artifact.getName()).isEqualTo("first-secret"); + assertThat(artifact.getReference()).isEqualTo("first-secret"); + }); + + assertThat(byReference.get("second-secret")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/secret"); + assertThat(artifact.getName()).isEqualTo("second-secret"); + assertThat(artifact.getReference()).isEqualTo("second-secret"); + }); + } + + @Test + void replaceSecretVolume() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.secretVolume())); + KubernetesManifest replicaSet = getReplicaSetWithVolumes(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/secret") + .name("first-secret") + .location(NAMESPACE) + .version("v007") + .reference("first-secret-v007") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getVolumes()) + .extracting(V1Volume::getConfigMap) + .filteredOn(Objects::nonNull) + .extracting(V1ConfigMapVolumeSource::getName) + .containsExactly( + // No config maps should have been replaced. + "first-config-map", "second-config-map"); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getVolumes()) + .extracting(V1Volume::getSecret) + .filteredOn(Objects::nonNull) + .extracting(V1SecretVolumeSource::getSecretName) + .containsExactly( + // Only the first secret should have been replaced. + "first-secret-v007", "second-secret"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + private KubernetesManifest getReplicaSetWithVolumes() { + String replicaSet = + json.serialize( + new V1ReplicaSetBuilder() + .withNewSpec() + .withNewTemplate() + .withNewSpec() + .addToVolumes( + new V1VolumeBuilder() + .withConfigMap( + new V1ConfigMapVolumeSourceBuilder() + .withName("first-config-map") + .build()) + .build()) + .addToVolumes( + new V1VolumeBuilder() + .withConfigMap( + new V1ConfigMapVolumeSourceBuilder() + .withName("second-config-map") + .build()) + .build()) + .addToVolumes( + new V1VolumeBuilder() + .withSecret( + new V1SecretVolumeSourceBuilder() + .withSecretName("first-secret") + .build()) + .build()) + .addToVolumes( + new V1VolumeBuilder() + .withSecret( + new V1SecretVolumeSourceBuilder() + .withSecretName("second-secret") + .build()) + .build()) + .endSpec() + .endTemplate() + .endSpec() + .build()); + return gson.fromJson(replicaSet, KubernetesManifest.class); + } + + @Test + void findProjectedConfigMapVolume() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapProjectedVolume())); + KubernetesManifest replicaSet = getReplicaSetWithProjectedVolumes(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("first-config-map")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/configMap"); + assertThat(artifact.getName()).isEqualTo("first-config-map"); + assertThat(artifact.getReference()).isEqualTo("first-config-map"); + }); + + assertThat(byReference.get("second-config-map")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/configMap"); + assertThat(artifact.getName()).isEqualTo("second-config-map"); + assertThat(artifact.getReference()).isEqualTo("second-config-map"); + }); + } + + @Test + void replaceProjectedConfigMapVolume() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapProjectedVolume())); + KubernetesManifest replicaSet = getReplicaSetWithProjectedVolumes(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("second-config-map") + .location(NAMESPACE) + .version("v003") + .reference("second-config-map-v003") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getVolumes()) + .extracting(V1Volume::getProjected) + .filteredOn(Objects::nonNull) + .extracting(V1ProjectedVolumeSource::getSources) + .flatExtracting(list -> list) + .extracting("configMap") + .filteredOn(Objects::nonNull) + .extracting("name") + .containsExactly( + // Only the second config map should have been replaced. + "first-config-map", "second-config-map-v003"); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getVolumes()) + .extracting(V1Volume::getProjected) + .filteredOn(Objects::nonNull) + .extracting(V1ProjectedVolumeSource::getSources) + .flatExtracting(list -> list) + .extracting("secret") + .filteredOn(Objects::nonNull) + .extracting("name") + .containsExactly( + // No secrets should have been replaced. + "first-secret", "second-secret"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + @Test + void findProjectedSecretVolume() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.secretProjectedVolume())); + KubernetesManifest replicaSet = getReplicaSetWithProjectedVolumes(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("first-secret")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/secret"); + assertThat(artifact.getName()).isEqualTo("first-secret"); + assertThat(artifact.getReference()).isEqualTo("first-secret"); + }); + + assertThat(byReference.get("second-secret")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/secret"); + assertThat(artifact.getName()).isEqualTo("second-secret"); + assertThat(artifact.getReference()).isEqualTo("second-secret"); + }); + } + + @Test + void replaceProjectedSecretVolume() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.secretProjectedVolume())); + KubernetesManifest replicaSet = getReplicaSetWithProjectedVolumes(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/secret") + .name("first-secret") + .location(NAMESPACE) + .version("v007") + .reference("first-secret-v007") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getVolumes()) + .extracting(V1Volume::getProjected) + .filteredOn(Objects::nonNull) + .extracting(V1ProjectedVolumeSource::getSources) + .flatExtracting(list -> list) + .extracting("configMap") + .filteredOn(Objects::nonNull) + .extracting("name") + .containsExactly( + // No config maps should have been replaced. + "first-config-map", "second-config-map"); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getVolumes()) + .extracting(V1Volume::getProjected) + .filteredOn(Objects::nonNull) + .extracting(V1ProjectedVolumeSource::getSources) + .flatExtracting(list -> list) + .extracting("secret") + .filteredOn(Objects::nonNull) + .extracting("name") + .containsExactly( + // Only the first secret should have been replaced. + "first-secret-v007", "second-secret"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + private KubernetesManifest getReplicaSetWithProjectedVolumes() { + String replicaSet = + json.serialize( + new V1ReplicaSetBuilder() + .withNewSpec() + .withNewTemplate() + .withNewSpec() + .addToVolumes( + new V1VolumeBuilder() + .withName("first-projected-volume") + .withProjected( + new V1ProjectedVolumeSourceBuilder() + .build() + .addSourcesItem( + new V1VolumeProjectionBuilder() + .withConfigMap( + new V1ConfigMapProjectionBuilder() + .withName("first-config-map") + .build()) + .build()) + .addSourcesItem( + new V1VolumeProjectionBuilder() + .withConfigMap( + new V1ConfigMapProjectionBuilder() + .withName("second-config-map") + .build()) + .build()) + .addSourcesItem( + new V1VolumeProjectionBuilder() + .withSecret( + new V1SecretProjectionBuilder() + .withName("first-secret") + .build()) + .build()) + .addSourcesItem( + new V1VolumeProjectionBuilder() + .withSecret( + new V1SecretProjectionBuilder() + .withName("second-secret") + .build()) + .build())) + .build()) + .endSpec() + .endTemplate() + .endSpec() + .build()); + KubernetesManifest kubernetesManifest = gson.fromJson(replicaSet, KubernetesManifest.class); + return kubernetesManifest; + } + + @Test + void findConfigMapKeyValue() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapKeyValue())); + KubernetesManifest replicaSet = getReplicaSetWithKeyRefs(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("first-name")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/configMap"); + assertThat(artifact.getName()).isEqualTo("first-name"); + assertThat(artifact.getReference()).isEqualTo("first-name"); + }); + + assertThat(byReference.get("second-name")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/configMap"); + assertThat(artifact.getName()).isEqualTo("second-name"); + assertThat(artifact.getReference()).isEqualTo("second-name"); + }); + } + + @Test + void replaceConfigMapKeyValue() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapKeyValue())); + KubernetesManifest replicaSet = getReplicaSetWithKeyRefs(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("first-name") + .location(NAMESPACE) + .version("v006") + .reference("first-name-v006") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .flatExtracting(V1Container::getEnv) + .extracting(V1EnvVar::getValueFrom) + .extracting(V1EnvVarSource::getConfigMapKeyRef) + .filteredOn(Objects::nonNull) + .extracting(V1ConfigMapKeySelector::getName) + .containsExactly( + // We should have replaced both references to the first name. + "first-name-v006", "first-name-v006", "second-name"); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .flatExtracting(V1Container::getEnv) + .extracting(V1EnvVar::getValueFrom) + .extracting(V1EnvVarSource::getSecretKeyRef) + .filteredOn(Objects::nonNull) + .extracting(V1SecretKeySelector::getName) + .containsExactly( + // We should not have replaced any secret references. + "first-name", "second-name", "second-name"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + @Test + void findSecretKeyValue() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.secretKeyValue())); + KubernetesManifest replicaSet = getReplicaSetWithKeyRefs(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("first-name")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/secret"); + assertThat(artifact.getName()).isEqualTo("first-name"); + assertThat(artifact.getReference()).isEqualTo("first-name"); + }); + + assertThat(byReference.get("second-name")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/secret"); + assertThat(artifact.getName()).isEqualTo("second-name"); + assertThat(artifact.getReference()).isEqualTo("second-name"); + }); + } + + @Test + void replaceSecretKeyValue() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.secretKeyValue())); + KubernetesManifest replicaSet = getReplicaSetWithKeyRefs(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/secret") + .name("second-name") + .location(NAMESPACE) + .version("v009") + .reference("second-name-v009") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .flatExtracting(V1Container::getEnv) + .extracting(V1EnvVar::getValueFrom) + .extracting(V1EnvVarSource::getConfigMapKeyRef) + .filteredOn(Objects::nonNull) + .extracting(V1ConfigMapKeySelector::getName) + .containsExactly( + // We should not have replaced any config map references. + "first-name", "first-name", "second-name"); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .flatExtracting(V1Container::getEnv) + .extracting(V1EnvVar::getValueFrom) + .extracting(V1EnvVarSource::getSecretKeyRef) + .filteredOn(Objects::nonNull) + .extracting(V1SecretKeySelector::getName) + .containsExactly( + // We should have replaced both references to second-name. + "first-name", "second-name-v009", "second-name-v009"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + private KubernetesManifest getReplicaSetWithKeyRefs() { + String replicaSet = + json.serialize( + new V1ReplicaSetBuilder() + .withNewSpec() + .withNewTemplate() + .withNewSpec() + .addToContainers( + new V1ContainerBuilder() + .withName("my-image-with-tag") + .withEnv( + new V1EnvVarBuilder() + .withValueFrom( + new V1EnvVarSourceBuilder() + .withConfigMapKeyRef( + new V1ConfigMapKeySelectorBuilder() + .withName("first-name") + .withKey("first-key") + .build()) + .build()) + .build(), + new V1EnvVarBuilder() + .withValueFrom( + new V1EnvVarSourceBuilder() + .withConfigMapKeyRef( + new V1ConfigMapKeySelectorBuilder() + // Second key also from the first config map + .withName("first-name") + .withKey("second-key") + .build()) + .build()) + .build(), + new V1EnvVarBuilder() + .withValueFrom( + new V1EnvVarSourceBuilder() + .withConfigMapKeyRef( + new V1ConfigMapKeySelectorBuilder() + .withName("second-name") + .withKey("third-key") + .build()) + .build()) + .build(), + new V1EnvVarBuilder() + .withValueFrom( + new V1EnvVarSourceBuilder() + .withSecretKeyRef( + new V1SecretKeySelectorBuilder() + .withName("first-name") + .withKey("first-key") + .build()) + .build()) + .build(), + new V1EnvVarBuilder() + .withValueFrom( + new V1EnvVarSourceBuilder() + .withSecretKeyRef( + new V1SecretKeySelectorBuilder() + .withName("second-name") + .withKey("second-key") + .build()) + .build()) + .build(), + new V1EnvVarBuilder() + .withValueFrom( + new V1EnvVarSourceBuilder() + .withSecretKeyRef( + new V1SecretKeySelectorBuilder() + // Third key also from the second secret + .withName("second-name") + .withKey("third-key") + .build()) + .build()) + .build()) + .build()) + .endSpec() + .endTemplate() + .endSpec() + .build()); + return gson.fromJson(replicaSet, KubernetesManifest.class); + } + + @Test + void findConfigMapEnvFrom() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("config-map-name")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/configMap"); + assertThat(artifact.getName()).isEqualTo("config-map-name"); + assertThat(artifact.getReference()).isEqualTo("config-map-name"); + }); + + assertThat(byReference.get("shared-name")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/configMap"); + assertThat(artifact.getName()).isEqualTo("shared-name"); + assertThat(artifact.getReference()).isEqualTo("shared-name"); + }); + } + + @Test + void replaceConfigMapEnvFrom() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.configMapEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/configMap") + .name("shared-name") + .location(NAMESPACE) + .version("v020") + .reference("shared-name-v020") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .flatExtracting(V1Container::getEnvFrom) + .extracting(V1EnvFromSource::getConfigMapRef) + .filteredOn(Objects::nonNull) + .extracting(V1ConfigMapEnvSource::getName) + .containsExactly( + // We should have replaced only shared-name. + "config-map-name", "shared-name-v020"); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .flatExtracting(V1Container::getEnvFrom) + .extracting(V1EnvFromSource::getSecretRef) + .filteredOn(Objects::nonNull) + .extracting(V1SecretEnvSource::getName) + .containsExactly( + // We should not have replaced any secret references, even the one with the same name as + // the artifact. + "secret-name", "shared-name"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + @Test + void findSecretEnvFrom() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.secretEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom(); + + Set artifacts = artifactReplacer.findAll(replicaSet); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("secret-name")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/secret"); + assertThat(artifact.getName()).isEqualTo("secret-name"); + assertThat(artifact.getReference()).isEqualTo("secret-name"); + }); + + assertThat(byReference.get("shared-name")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/secret"); + assertThat(artifact.getName()).isEqualTo("shared-name"); + assertThat(artifact.getReference()).isEqualTo("shared-name"); + }); + } + + @Test + void replaceSecretEnvFrom() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.secretEnv())); + KubernetesManifest replicaSet = getReplicaSetWithEnvFrom(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/secret") + .name("shared-name") + .location(NAMESPACE) + .version("v987") + .reference("shared-name-v987") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + replicaSet, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1ReplicaSet replacedReplicaSet = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1ReplicaSet.class); + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .flatExtracting(V1Container::getEnvFrom) + .extracting(V1EnvFromSource::getConfigMapRef) + .filteredOn(Objects::nonNull) + .extracting(V1ConfigMapEnvSource::getName) + .containsExactly( + // We should not have replaced any config map references, even the one with the same + // name as the artifact. + "config-map-name", "shared-name"); + + assertThat(replacedReplicaSet.getSpec().getTemplate().getSpec().getContainers()) + .flatExtracting(V1Container::getEnvFrom) + .extracting(V1EnvFromSource::getSecretRef) + .filteredOn(Objects::nonNull) + .extracting(V1SecretEnvSource::getName) + .containsExactly( + // We should not have replaced any secret references, even the one with the same name as + // the artifact. + "secret-name", "shared-name-v987"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + private KubernetesManifest getReplicaSetWithEnvFrom() { + String replicaSet = + json.serialize( + new V1ReplicaSetBuilder() + .withNewSpec() + .withNewTemplate() + .withNewSpec() + .addToContainers( + new V1ContainerBuilder() + .withName("my-image-with-tag") + .withEnvFrom( + // Give them both the same name so we can ensure we don't mix + // secrets/configMaps + new V1EnvFromSourceBuilder() + .withConfigMapRef( + new V1ConfigMapEnvSourceBuilder() + .withName("config-map-name") + .build()) + .build(), + new V1EnvFromSourceBuilder() + .withConfigMapRef( + new V1ConfigMapEnvSourceBuilder() + .withName("shared-name") + .build()) + .build(), + new V1EnvFromSourceBuilder() + .withSecretRef( + new V1SecretEnvSourceBuilder().withName("secret-name").build()) + .build(), + new V1EnvFromSourceBuilder() + .withSecretRef( + new V1SecretEnvSourceBuilder().withName("shared-name").build()) + .build()) + .build()) + .endSpec() + .endTemplate() + .endSpec() + .build()); + return gson.fromJson(replicaSet, KubernetesManifest.class); + } + + @Test + void findHpaDeployment() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaDeployment())); + KubernetesManifest hpa = getHpaForDeployment(); + + Set artifacts = artifactReplacer.findAll(hpa); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/deployment"); + assertThat(artifact.getName()).isEqualTo("my-deployment"); + assertThat(artifact.getReference()).isEqualTo("my-deployment"); + }); + } + + @Test + void findHpaDeploymentIgnoresReplicaSet() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaDeployment())); + KubernetesManifest hpa = getHpaForReplicaSet(); + + Set artifacts = artifactReplacer.findAll(hpa); + assertThat(artifacts).isEmpty(); + } + + @Test + void replaceHpaDeployment() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaDeployment())); + KubernetesManifest hpa = getHpaForDeployment(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/deployment") + .name("my-deployment") + .location(NAMESPACE) + .version("v020") + .reference("my-deployment-v020") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, hpa, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + V1HorizontalPodAutoscaler replacedHpa = + KubernetesCacheDataConverter.getResource( + replaceResult.getManifest(), V1HorizontalPodAutoscaler.class); + assertThat(replacedHpa.getSpec().getScaleTargetRef().getName()).isEqualTo("my-deployment-v020"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + @Test + void findHpaReplicaSet() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaReplicaSet())); + KubernetesManifest hpa = getHpaForReplicaSet(); + + Set artifacts = artifactReplacer.findAll(hpa); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("kubernetes/replicaSet"); + assertThat(artifact.getName()).isEqualTo("my-replica-set"); + assertThat(artifact.getReference()).isEqualTo("my-replica-set"); + }); + } + + @Test + void findHpaReplicaSetIgnoresDeployment() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaReplicaSet())); + KubernetesManifest hpa = getHpaForDeployment(); + + Set artifacts = artifactReplacer.findAll(hpa); + assertThat(artifacts).isEmpty(); + } + + @Test + void replaceHpaReplicaSet() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaReplicaSet())); + KubernetesManifest hpa = getHpaForReplicaSet(); + + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/replicaSet") + .name("my-replica-set") + .location(NAMESPACE) + .version("v020") + .reference("my-replica-set-v013") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, hpa, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + V1HorizontalPodAutoscaler replacedHpa = + KubernetesCacheDataConverter.getResource( + replaceResult.getManifest(), V1HorizontalPodAutoscaler.class); + assertThat(replacedHpa.getSpec().getScaleTargetRef().getName()) + .isEqualTo("my-replica-set-v013"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + @Test + void replaceHpaWrongArtifact() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.hpaReplicaSet())); + KubernetesManifest hpa = getHpaForReplicaSet(); + + // The input artifact has the correct name but is of type deployment; we should not replace + // the reference. + Artifact inputArtifact = + Artifact.builder() + .type("kubernetes/deployment") + .name("my-replica-set") + .location(NAMESPACE) + .version("v020") + .reference("my-replica-set-v013") + .putMetadata("account", ACCOUNT) + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, hpa, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + V1HorizontalPodAutoscaler replacedHpa = + KubernetesCacheDataConverter.getResource( + replaceResult.getManifest(), V1HorizontalPodAutoscaler.class); + assertThat(replacedHpa.getSpec().getScaleTargetRef().getName()).isEqualTo("my-replica-set"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).isEmpty(); + } + + private KubernetesManifest getHpaForDeployment() { + String hpa = + json.serialize( + new V1HorizontalPodAutoscalerBuilder() + .withNewSpec() + .withScaleTargetRef( + new V1CrossVersionObjectReferenceBuilder() + .withKind("deployment") + .withName("my-deployment") + .build()) + .endSpec() + .build()); + return gson.fromJson(hpa, KubernetesManifest.class); + } + + private KubernetesManifest getHpaForReplicaSet() { + String hpa = + json.serialize( + new V1HorizontalPodAutoscalerBuilder() + .withNewSpec() + .withScaleTargetRef( + new V1CrossVersionObjectReferenceBuilder() + .withKind("replicaSet") + .withName("my-replica-set") + .build()) + .endSpec() + .build()); + return gson.fromJson(hpa, KubernetesManifest.class); + } + + @Test + void findCronJobDockerImages() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest cronJob = getCronJob(); + + Set artifacts = artifactReplacer.findAll(cronJob); + assertThat(artifacts).hasSize(2); + + Map byReference = + artifacts.stream().collect(toImmutableMap(Artifact::getReference, a -> a)); + + assertThat(byReference.get("gcr.io/my-repository/my-image:my-tag")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("docker/image"); + assertThat(artifact.getName()).isEqualTo("gcr.io/my-repository/my-image"); + assertThat(artifact.getReference()).isEqualTo("gcr.io/my-repository/my-image:my-tag"); + }); + + assertThat(byReference.get("gcr.io/my-other-repository/some-image")) + .satisfies( + artifact -> { + assertThat(artifact).isNotNull(); + assertThat(artifact.getType()).isEqualTo("docker/image"); + assertThat(artifact.getName()).isEqualTo("gcr.io/my-other-repository/some-image"); + assertThat(artifact.getReference()) + .isEqualTo("gcr.io/my-other-repository/some-image"); + }); + } + + @Test + void replaceCronJobDockerImages() { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest cronJob = getCronJob(); + + Artifact inputArtifact = + Artifact.builder() + .type("docker/image") + .name("gcr.io/my-other-repository/some-image") + .reference("gcr.io/my-other-repository/some-image:some-tag") + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + DEFAULT_DOCKER_IMAGE_BINDING, + cronJob, + ImmutableList.of(inputArtifact), + NAMESPACE, + ACCOUNT); + + V1beta1CronJob replacedCronJob = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1beta1CronJob.class); + assertThat( + replacedCronJob + .getSpec() + .getJobTemplate() + .getSpec() + .getTemplate() + .getSpec() + .getContainers()) + .extracting(V1Container::getImage) + .containsExactly( + // Only the second image should have been replaced. + "gcr.io/my-repository/my-image:my-tag", + "gcr.io/my-other-repository/some-image:some-tag"); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(1); + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + + @ParameterizedTest(name = "{index} ==> with docker image artifact binding strategy = {0}") + @ValueSource(strings = {DEFAULT_DOCKER_IMAGE_BINDING, MATCH_NAME_ONLY_ARTIFACT_BINDING_STRATEGY}) + void replaceCronJobDockerImageWithArtifactBindingStrategyUsingDockerImageReplacer( + String artifactBindingStrategy) { + ArtifactReplacer artifactReplacer = + new ArtifactReplacer(ImmutableList.of(Replacer.dockerImage())); + KubernetesManifest cronJob = getCronJobWithOneContainerWithImageTag(); + Artifact inputArtifact = + Artifact.builder() + .type("docker/image") + .name("gcr.io/my-repository/my-image") + .reference("gcr.io/my-repository/my-image:expected-tag") + .build(); + ReplaceResult replaceResult = + artifactReplacer.replaceAll( + artifactBindingStrategy, cronJob, ImmutableList.of(inputArtifact), NAMESPACE, ACCOUNT); + + int expectedBoundArtifacts = 0; + String expectedImageAndTag = "gcr.io/my-repository/my-image:original-tag"; + if (artifactBindingStrategy.equals(DEFAULT_DOCKER_IMAGE_BINDING)) { + expectedBoundArtifacts = 1; + expectedImageAndTag = "gcr.io/my-repository/my-image:expected-tag"; + } + + V1beta1CronJob replacedCronJob = + KubernetesCacheDataConverter.getResource(replaceResult.getManifest(), V1beta1CronJob.class); + assertThat( + replacedCronJob + .getSpec() + .getJobTemplate() + .getSpec() + .getTemplate() + .getSpec() + .getContainers()) + .extracting(V1Container::getImage) + .containsExactly(expectedImageAndTag); + + Set artifacts = replaceResult.getBoundArtifacts(); + assertThat(artifacts).hasSize(expectedBoundArtifacts); + if (expectedBoundArtifacts > 0) { + assertThat(Iterables.getOnlyElement(artifacts)).isEqualTo(inputArtifact); + } + } + + private KubernetesManifest getCronJob() { + String cronJob = + json.serialize( + new V1beta1CronJobBuilder() + .withNewSpec() + .withNewJobTemplate() + .withNewSpec() + .withNewTemplate() + .withNewSpec() + .addNewContainer() + .withName("my-image-with-tag") + .withImage("gcr.io/my-repository/my-image:my-tag") + .endContainer() + .addNewContainer() + .withName("my-image-without-tag") + .withImage("gcr.io/my-other-repository/some-image") + .endContainer() + .endSpec() + .endTemplate() + .endSpec() + .endJobTemplate() + .endSpec() + .build()); + + return gson.fromJson(cronJob, KubernetesManifest.class); + } + + private KubernetesManifest getCronJobWithOneContainerWithImageTag() { + String cronJob = + json.serialize( + new V1beta1CronJobBuilder() + .withNewSpec() + .withNewJobTemplate() + .withNewSpec() + .withNewTemplate() + .withNewSpec() + .addNewContainer() + .withName("my-image-with-tag") + .withImage("gcr.io/my-repository/my-image:original-tag") + .endContainer() + .endSpec() + .endTemplate() + .endSpec() + .endJobTemplate() + .endSpec() + .build()); + + return gson.fromJson(cronJob, KubernetesManifest.class); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ResourceVersionerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ResourceVersionerTest.java new file mode 100644 index 00000000000..08bb97ac3f7 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/artifact/ResourceVersionerTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.artifact; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.ArtifactProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.util.OptionalInt; +import java.util.stream.Stream; +import lombok.RequiredArgsConstructor; +import lombok.Value; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +final class ResourceVersionerTest { + private static final ObjectMapper mapper = new ObjectMapper(); + + private static final String ACCOUNT = "my-account"; + private static final String NAMESPACE = "ns"; + private static final String NAME = "name"; + private static final String KIND = "Pod"; + + @Mock private KubernetesCredentials mockCredentials; + @Mock private ArtifactProvider artifactProvider; + private ResourceVersioner versioner; + + @BeforeEach + void setUp() { + versioner = new ResourceVersioner(artifactProvider); + } + + @Test + void findsMatchingVersionByEquality() { + KubernetesManifest manifest1 = getStubManifest(); + KubernetesManifest manifest2 = getStubManifest(); + // Add some random data so that the two manifests are different. + manifest1.put("data", ImmutableMap.of("key", 1)); + manifest2.put("data", ImmutableMap.of("key", 3)); + + when(artifactProvider.getArtifacts( + KubernetesKind.fromString(KIND), NAME, NAMESPACE, mockCredentials)) + .thenReturn( + ImmutableList.of( + Artifact.builder() + .putMetadata("lastAppliedConfiguration", manifest1) + .putMetadata("account", ACCOUNT) + .version("v001") + .build(), + Artifact.builder() + .putMetadata("lastAppliedConfiguration", manifest2) + .putMetadata("account", ACCOUNT) + .version("v002") + .build())); + + OptionalInt version = versioner.getVersion(manifest1, mockCredentials); + assertThat(version).hasValue(1); + } + + @ParameterizedTest + @MethodSource("versionTestCases") + void correctlyPicksNextVersion(VersionTestCase testCase) { + when(artifactProvider.getArtifacts( + KubernetesKind.fromString(KIND), NAME, NAMESPACE, mockCredentials)) + .thenReturn( + testCase.getExistingVersions().stream() + .map(v -> Artifact.builder().putMetadata("account", ACCOUNT).version(v).build()) + .collect(toImmutableList())); + + OptionalInt version = versioner.getVersion(getStubManifest(), mockCredentials); + assertThat(version).hasValue(testCase.getNextVersion()); + } + + // Called by @MethodSource which error-prone does not detect. + @SuppressWarnings("unused") + private static Stream versionTestCases() { + return Stream.of( + new VersionTestCase(ImmutableList.of("v000", "v001", "v002"), 3), + new VersionTestCase(ImmutableList.of("v000"), 1), + new VersionTestCase(ImmutableList.of(), 0), + new VersionTestCase(ImmutableList.of("v001"), 2), + // Unparseable version should be ignored + new VersionTestCase(ImmutableList.of("v0abcde", "v000"), 1), + // Version that somehow ended up negative should be ignored + new VersionTestCase(ImmutableList.of("v-20", "v000"), 1), + new VersionTestCase(ImmutableList.of("abc", "", "v001"), 2), + new VersionTestCase(ImmutableList.of("v001", "v002", "v003"), 4), + new VersionTestCase(ImmutableList.of("v000", "v002", "v003"), 4), + new VersionTestCase(ImmutableList.of("v002", "v000", "v001"), 3), + new VersionTestCase(ImmutableList.of("v000", "v001", "v003"), 4), + new VersionTestCase(ImmutableList.of("v001", "v000", "v003"), 4), + new VersionTestCase(ImmutableList.of("v999"), 1000), + new VersionTestCase(ImmutableList.of("v1000"), 1001), + new VersionTestCase(ImmutableList.of("v12345", "v98765"), 98766)); + } + + @RequiredArgsConstructor + @Value + private static class VersionTestCase { + private final ImmutableCollection existingVersions; + private final int nextVersion; + } + + private static KubernetesManifest getStubManifest() { + return mapper.convertValue( + ImmutableMap.of( + "kind", KIND, "metadata", ImmutableMap.of("name", NAME, "namespace", NAMESPACE)), + KubernetesManifest.class); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/KeysTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/KeysTest.java new file mode 100644 index 00000000000..878da9e3158 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/KeysTest.java @@ -0,0 +1,41 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.CacheKey; +import java.util.Optional; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +final class KeysTest { + @ParameterizedTest + @ValueSource( + strings = { + "kubernetes.v2:infrastructure:secret:k8s:spin:spinnaker", + "kubernetes.v2:logical:applications:spinnaker", + "kubernetes.v2:logical:clusters:k8s:docs:docs-site" + }) + void roundTripParse(String key) { + Optional parsed = Keys.parseKey(key); + + assertThat(parsed).isPresent(); + assertThat(parsed.get().toString()).isEqualTo(key); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConverterTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConverterTest.java new file mode 100644 index 00000000000..362f9a3b850 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCacheDataConverterTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import java.io.IOException; +import java.io.InputStream; +import java.util.Set; +import org.junit.jupiter.api.Test; + +public class KubernetesCacheDataConverterTest { + + @Test + public void testOwnerRefUnregisteredKind() throws IOException { + try (InputStream stream = KubernetesManifest.class.getResourceAsStream("owned-manifest.json")) { + ObjectMapper objectMapper = new ObjectMapper(); + KubernetesManifest manifest = objectMapper.readValue(stream, KubernetesManifest.class); + Set ownerKeys = + KubernetesCacheDataConverter.ownerReferenceRelationships( + "account", "ns", manifest.getOwnerReferences()); + + assertThat(ownerKeys).hasSize(1); + ownerKeys.stream() + .findFirst() + .ifPresent(key -> assertThat(key.getGroup()).isEqualTo("Owner.group")); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentDispatcherTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentDispatcherTest.java new file mode 100644 index 00000000000..3d8846a78a5 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCachingAgentDispatcherTest.java @@ -0,0 +1,104 @@ +/* + * Copyright 2022 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesResourceProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesDeploymentHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import java.util.ArrayList; +import java.util.Collection; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.Test; + +public class KubernetesCachingAgentDispatcherTest { + + @Test + public void buildAllCachingAgentsOneThread() { + KubernetesCachingAgentDispatcher dispatcher = + new KubernetesCachingAgentDispatcher( + new ObjectMapper(), + null, + new KubernetesConfigurationProperties(), + new KubernetesSpinnakerKindMap(new ArrayList<>()), + null); + KubernetesNamedAccountCredentials creds = mockCredentials(1); + Collection agents = dispatcher.buildAllCachingAgents(creds); + + assertNotNull(agents); + assertEquals(1, agents.size()); + } + + @Test + public void buildAllCachingAgentsTwoThreads() { + KubernetesCachingAgentDispatcher dispatcher = + new KubernetesCachingAgentDispatcher( + new ObjectMapper(), + null, + new KubernetesConfigurationProperties(), + new KubernetesSpinnakerKindMap(new ArrayList<>()), + null); + KubernetesNamedAccountCredentials creds = mockCredentials(2); + Collection agents = dispatcher.buildAllCachingAgents(creds); + + assertNotNull(agents); + assertEquals(2, agents.size()); + } + + @Test + public void buildAllCachingAgentsCacheDisabled() { + KubernetesConfigurationProperties configProperties = new KubernetesConfigurationProperties(); + configProperties.getCache().setEnabled(false); + KubernetesCachingAgentDispatcher dispatcher = + new KubernetesCachingAgentDispatcher( + new ObjectMapper(), + null, + configProperties, + new KubernetesSpinnakerKindMap(new ArrayList<>()), + null); + KubernetesNamedAccountCredentials creds = mockCredentials(2); + Collection agents = dispatcher.buildAllCachingAgents(creds); + + assertNotNull(agents); + assertEquals(0, agents.size()); + } + + @NotNull + private KubernetesNamedAccountCredentials mockCredentials(int threads) { + ResourcePropertyRegistry propertyRegistry = mock(ResourcePropertyRegistry.class); + when(propertyRegistry.values()) + .thenReturn( + ImmutableList.of( + new KubernetesResourceProperties(new KubernetesDeploymentHandler(), false))); + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + when(credentials.getResourcePropertyRegistry()).thenReturn(propertyRegistry); + KubernetesNamedAccountCredentials namedCredentials = + mock(KubernetesNamedAccountCredentials.class); + when(namedCredentials.getCredentials()).thenReturn(credentials); + when(namedCredentials.getCacheThreads()).thenReturn(threads); + return namedCredentials; + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCoreCachingAgentTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCoreCachingAgentTest.java new file mode 100644 index 00000000000..4a1dc24982f --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesCoreCachingAgentTest.java @@ -0,0 +1,606 @@ +/* + * Copyright 2019 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.ImmutableSetMultimap; +import com.google.common.io.Resources; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultJsonCacheData; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.cats.provider.DefaultProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.core.services.Front50Service; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.*; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesManifestNamer; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.*; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.model.Front50Application; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.util.*; +import java.util.stream.IntStream; +import lombok.Value; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.stubbing.Answer; + +final class KubernetesCoreCachingAgentTest { + private static final String ACCOUNT = "my-account"; + private static final String NAMESPACE1 = "test-namespace"; + private static final String NAMESPACE2 = "test-namespace2"; + private static final String DEPLOYMENT_NAME = "my-deployment"; + private static final String STORAGE_CLASS_NAME = "my-storage-class"; + + private static final String DEPLOYMENT_KIND = KubernetesKind.DEPLOYMENT.toString(); + private static final String STORAGE_CLASS_KIND = KubernetesKind.STORAGE_CLASS.toString(); + + private static final ImmutableMap kindProperties = + ImmutableMap.builder() + .put( + KubernetesKind.DEPLOYMENT, + KubernetesKindProperties.create(KubernetesKind.DEPLOYMENT, true)) + .put( + KubernetesKind.STORAGE_CLASS, + KubernetesKindProperties.create(KubernetesKind.STORAGE_CLASS, false)) + .put( + KubernetesKind.NAMESPACE, + KubernetesKindProperties.create(KubernetesKind.NAMESPACE, false)) + .put(KubernetesKind.POD, KubernetesKindProperties.create(KubernetesKind.POD, true)) + .put( + KubernetesKind.REPLICA_SET, + KubernetesKindProperties.create(KubernetesKind.REPLICA_SET, true)) + .build(); + + private static final ObjectMapper objectMapper = new ObjectMapper(); + private static final ResourcePropertyRegistry resourcePropertyRegistry = + new GlobalResourcePropertyRegistry( + ImmutableList.of(), new KubernetesUnregisteredCustomResourceHandler()); + private static final ImmutableList handlers = + ImmutableList.of( + new KubernetesDeploymentHandler(), + new KubernetesReplicaSetHandler(), + new KubernetesServiceHandler(), + new KubernetesPodHandler(), + new KubernetesConfigMapHandler()); + private static final KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap = + new KubernetesSpinnakerKindMap(handlers); + + /** A test Deployment manifest */ + private static KubernetesManifest deploymentManifest(String deploymentName) { + KubernetesManifest deployment = new KubernetesManifest(); + deployment.put("metadata", new HashMap<>()); + deployment.setNamespace(NAMESPACE1); + deployment.setKind(KubernetesKind.DEPLOYMENT); + deployment.setApiVersion(KubernetesApiVersion.APPS_V1); + deployment.setName(deploymentName); + return deployment; + } + + /** A test StorageClass manifest object */ + private static KubernetesManifest storageClassManifest() { + KubernetesManifest storageClass = new KubernetesManifest(); + storageClass.put("metadata", new HashMap<>()); + storageClass.setKind(KubernetesKind.STORAGE_CLASS); + storageClass.setApiVersion(KubernetesApiVersion.fromString("storage.k8s.io/v1")); + storageClass.setName(STORAGE_CLASS_NAME); + return storageClass; + } + + /** Returns a mock KubernetesCredentials object */ + private static KubernetesCredentials mockKubernetesCredentials(String deploymentName) { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + when(credentials.getGlobalKinds()).thenReturn(kindProperties.keySet().asList()); + when(credentials.getKindProperties(any(KubernetesKind.class))) + .thenAnswer(invocation -> kindProperties.get(invocation.getArgument(0))); + when(credentials.getDeclaredNamespaces()).thenReturn(ImmutableList.of(NAMESPACE1, NAMESPACE2)); + when(credentials.getResourcePropertyRegistry()).thenReturn(resourcePropertyRegistry); + when(credentials.get( + KubernetesCoordinates.builder() + .kind(KubernetesKind.DEPLOYMENT) + .namespace(NAMESPACE1) + .name(deploymentName) + .build())) + .thenReturn(deploymentManifest(deploymentName)); + when(credentials.get( + KubernetesCoordinates.builder() + .kind(KubernetesKind.STORAGE_CLASS) + .name(STORAGE_CLASS_NAME) + .build())) + .thenReturn(storageClassManifest()); + when(credentials.list(any(List.class), any())) + .thenAnswer( + (Answer>) + invocation -> { + Object[] args = invocation.getArguments(); + ImmutableSet kinds = + ImmutableSet.copyOf((List) args[0]); + String namespace = (String) args[1]; + ImmutableList.Builder result = new ImmutableList.Builder<>(); + if (kinds.contains(KubernetesKind.DEPLOYMENT) && NAMESPACE1.equals(namespace)) { + result.add(deploymentManifest(deploymentName)); + } + if (kinds.contains(KubernetesKind.STORAGE_CLASS)) { + result.add(storageClassManifest()); + } + return result.build(); + }); + when(credentials.getNamer()).thenReturn(new KubernetesManifestNamer()); + when(credentials.isValidKind(any(KubernetesKind.class))).thenReturn(true); + when(credentials.getKubernetesSpinnakerKindMap()) + .thenReturn( + new KubernetesSpinnakerKindMap( + List.of(new KubernetesDeploymentHandler(), new KubernetesStorageClassHandler()))); + return credentials; + } + + /** + * Returns a KubernetesNamedAccountCredentials that contains a mock KubernetesCredentials object + */ + private static KubernetesNamedAccountCredentials getNamedAccountCredentials() { + return getNamedAccountCredentials(DEPLOYMENT_NAME); + } + + /** + * Returns a KubernetesNamedAccountCredentials with a custom deployment name that contains a mock + * KubernetesCredentials object + */ + private static KubernetesNamedAccountCredentials getNamedAccountCredentials( + String deploymentName) { + ManagedAccount managedAccount = new ManagedAccount(); + managedAccount.setName(ACCOUNT); + + KubernetesCredentials mockCredentials = mockKubernetesCredentials(deploymentName); + KubernetesCredentials.Factory credentialFactory = mock(KubernetesCredentials.Factory.class); + when(credentialFactory.build(managedAccount)).thenReturn(mockCredentials); + return new KubernetesNamedAccountCredentials(managedAccount, credentialFactory); + } + + /** + * Given a KubernetesNamedAccountCredentials object and the number of caching agents to build, + * builds a set of caching agents responsible for caching the account's data and returns a + * collection of those agents. + */ + private static ImmutableCollection createCachingAgents( + KubernetesNamedAccountCredentials credentials, + int agentCount, + KubernetesConfigurationProperties configurationProperties) { + return IntStream.range(0, agentCount) + .mapToObj( + i -> + new KubernetesCoreCachingAgent( + credentials, + objectMapper, + new NoopRegistry(), + i, + agentCount, + 10L, + configurationProperties, + kubernetesSpinnakerKindMap, + null)) + .collect(toImmutableList()); + } + + /** + * Given a KubernetesNamedAccountCredentials object, the number of caching agents to build and + * whether front50 needs to be queried for presence of an application, builds a set of caching + * agents responsible for caching the account's data and returns a collection of those agents + */ + private static ImmutableCollection createCachingAgents( + KubernetesNamedAccountCredentials credentials, + int agentCount, + Front50ApplicationLoader front50ApplicationLoader, + boolean checkApplicationInFront50) { + KubernetesConfigurationProperties kubernetesConfigurationProperties = + new KubernetesConfigurationProperties(); + if (!checkApplicationInFront50) { + return createCachingAgents(credentials, agentCount, kubernetesConfigurationProperties); + } + + kubernetesConfigurationProperties.getCache().setCheckApplicationInFront50(true); + return IntStream.range(0, agentCount) + .mapToObj( + i -> + new KubernetesCoreCachingAgent( + credentials, + objectMapper, + new NoopRegistry(), + i, + agentCount, + 10L, + kubernetesConfigurationProperties, + kubernetesSpinnakerKindMap, + front50ApplicationLoader)) + .collect(toImmutableList()); + } + + /** + * Given a collection of OnDemandAgent.OnDemandResult, return all cache results in these on-demand + * results. + */ + private static ImmutableMap> extractCacheResults( + Collection onDemandResults) { + return onDemandResults.stream() + .map(result -> result.getCacheResults().entrySet()) + .flatMap(Collection::stream) + .collect( + ImmutableSetMultimap.flatteningToImmutableSetMultimap( + Map.Entry::getKey, e -> e.getValue().stream())) + .asMap(); + } + + /** Given a collection of ProviderCache, return all on-demand entries in these caches. */ + private static ImmutableMap> extractCacheEntries( + Collection providerCaches) { + return providerCaches.stream() + .map(providerCache -> providerCache.getAll("onDemand")) + .flatMap(Collection::stream) + .filter(Objects::nonNull) + .map( + cacheData -> { + try { + return objectMapper.readValue( + (String) cacheData.getAttributes().get("cacheResults"), + new TypeReference>>() {}); + } catch (IOException e) { + throw new RuntimeException(e); + } + }) + .map(Map::entrySet) + .flatMap(Collection::stream) + .collect( + ImmutableSetMultimap.flatteningToImmutableSetMultimap( + Map.Entry::getKey, e -> e.getValue().stream())) + .asMap(); + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void loadData(int numAgents) { + String deploymentKey = + Keys.InfrastructureCacheKey.createKey( + KubernetesKind.DEPLOYMENT, ACCOUNT, NAMESPACE1, DEPLOYMENT_NAME); + + String storageClassKey = + Keys.InfrastructureCacheKey.createKey( + KubernetesKind.STORAGE_CLASS, ACCOUNT, "", STORAGE_CLASS_NAME); + + KubernetesConfigurationProperties configurationProperties = + new KubernetesConfigurationProperties(); + configurationProperties.getCache().setCacheAll(true); + + ImmutableCollection cachingAgents = + createCachingAgents(getNamedAccountCredentials(), numAgents, configurationProperties); + LoadDataResult loadDataResult = processLoadData(cachingAgents, ImmutableMap.of()); + + assertThat(loadDataResult.getResults()).containsKey(DEPLOYMENT_KIND); + Collection deployments = loadDataResult.getResults().get(DEPLOYMENT_KIND); + assertThat(deployments).extracting(CacheData::getId).containsExactly(deploymentKey); + assertThat(deployments) + .extracting(deployment -> deployment.getAttributes().get("name")) + .containsExactly(DEPLOYMENT_NAME); + + // storage class kind should be cached + validateStorageClassInCacheResult(storageClassKey, loadDataResult.getResults()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testCheckingOfApplicationsInFront50ForLoadData(boolean checkApplicationInFront50) + throws JsonProcessingException { + // setup: + String deploymentKey = + Keys.InfrastructureCacheKey.createKey( + KubernetesKind.DEPLOYMENT, ACCOUNT, NAMESPACE1, DEPLOYMENT_NAME); + + Front50Service front50Service = mock(Front50Service.class); + Front50ApplicationLoader front50ApplicationLoader = + new Front50ApplicationLoader(front50Service); + + ImmutableCollection cachingAgents = + createCachingAgents( + getNamedAccountCredentials(), 1, front50ApplicationLoader, checkApplicationInFront50); + + when(front50Service.getAllApplicationsUnrestricted()) + .thenReturn(getApplicationsFromFront50("applications-response-from-front50.json")); + front50ApplicationLoader.refreshCache(); + verify(front50Service).getAllApplicationsUnrestricted(); + + // when: + LoadDataResult loadDataResult = processLoadData(cachingAgents, ImmutableMap.of()); + + // then: + verifyNoMoreInteractions(front50Service); + + assertThat(loadDataResult.getResults()).containsKey(DEPLOYMENT_KIND); + Collection deployments = loadDataResult.getResults().get(DEPLOYMENT_KIND); + assertThat(deployments).extracting(CacheData::getId).containsExactly(deploymentKey); + assertThat(deployments) + .extracting(deployment -> deployment.getAttributes().get("name")) + .containsExactly(DEPLOYMENT_NAME); + } + + @Test + public void testK8sManifestWithNoApplicationInFront50ShouldNotBeCachedInLoadData() + throws JsonProcessingException { + // setup: + String deploymentName = "some-name-not-in-front50"; + + Front50Service front50Service = mock(Front50Service.class); + Front50ApplicationLoader front50ApplicationLoader = + new Front50ApplicationLoader(front50Service); + + ImmutableCollection cachingAgents = + createCachingAgents( + getNamedAccountCredentials(deploymentName), 1, front50ApplicationLoader, true); + + when(front50Service.getAllApplicationsUnrestricted()) + .thenReturn(getApplicationsFromFront50("applications-response-from-front50.json")); + + front50ApplicationLoader.refreshCache(); + verify(front50Service).getAllApplicationsUnrestricted(); + + // when: + LoadDataResult loadDataResult = processLoadData(cachingAgents, ImmutableMap.of()); + + // then: + verifyNoMoreInteractions(front50Service); + + // the deployment should not be cached as its application is not known to front50 + assertThat(loadDataResult.getResults()).doesNotContainKey(DEPLOYMENT_KIND); + Collection deployments = loadDataResult.getResults().get(DEPLOYMENT_KIND); + assertThat(deployments).isNullOrEmpty(); + } + + /** + * Given an on-demand cache request, constructs a set of caching agents and sends the on-demand + * request to those agents, returning a collection of all non-null results of handing those + * requests. Any cache entries in primeCacheData will be added to each agent's backing cache + * before processing the request. + */ + private static LoadDataResult processLoadData( + Collection cachingAgents, + Map> primeCacheData) { + ImmutableList.Builder resultBuilder = new ImmutableList.Builder<>(); + ImmutableList.Builder providerCacheBuilder = new ImmutableList.Builder<>(); + cachingAgents.forEach( + cachingAgent -> { + ProviderCache providerCache = new DefaultProviderCache(new InMemoryCache()); + providerCacheBuilder.add(providerCache); + for (String type : primeCacheData.keySet()) { + for (CacheData cacheData : primeCacheData.get(type)) { + providerCache.putCacheData(type, cacheData); + } + } + CacheResult result = cachingAgent.loadData(providerCache); + if (result != null) { + resultBuilder.add(result); + } + }); + return new LoadDataResult(resultBuilder.build(), providerCacheBuilder.build()); + } + + @Value + private static class LoadDataResult { + Map> results; + Map> cacheEntries; + + LoadDataResult( + Collection loadDataResults, Collection providerCaches) { + this.results = extractCacheResults(loadDataResults); + this.cacheEntries = extractCacheEntries(providerCaches); + } + } + + /** + * See comment in {@link KubernetesCoreCachingAgent#getProvidedDataTypes()} for why we are + * continuing to use the deprecated Keys.Kind.ARTIFACT. + */ + @SuppressWarnings("deprecation") + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void authoritativeForLogicalTypes(int numAgents) { + KubernetesConfigurationProperties configurationProperties = + new KubernetesConfigurationProperties(); + configurationProperties.getCache().setCacheAll(true); + + ImmutableCollection cachingAgents = + createCachingAgents(getNamedAccountCredentials(), numAgents, configurationProperties); + cachingAgents.forEach( + cachingAgent -> + assertThat(getAuthoritativeTypes(cachingAgent.getProvidedDataTypes())) + .containsAll( + ImmutableList.of( + Keys.LogicalKind.APPLICATIONS.toString(), + Keys.LogicalKind.CLUSTERS.toString(), + Keys.Kind.ARTIFACT.toString()))); + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void authoritativeForKubernetesKinds(int numAgents) { + KubernetesConfigurationProperties configurationProperties = + new KubernetesConfigurationProperties(); + configurationProperties.getCache().setCacheAll(true); + + ImmutableCollection cachingAgents = + createCachingAgents(getNamedAccountCredentials(), numAgents, configurationProperties); + cachingAgents.forEach( + cachingAgent -> + assertThat(getAuthoritativeTypes(cachingAgent.getProvidedDataTypes())) + .containsAll( + ImmutableList.of( + KubernetesKind.NAMESPACE.toString(), + KubernetesKind.POD.toString(), + KubernetesKind.REPLICA_SET.toString()))); + } + + /** + * filteredPrimaryKinds returns all registered core kinds, coming from {@link + * KubernetesCoreCachingAgentTest#kindProperties} + */ + @Test + public void filteredPrimaryKindsAll() { + KubernetesConfigurationProperties configurationProperties = + new KubernetesConfigurationProperties(); + configurationProperties.getCache().setCacheAll(true); + KubernetesNamedAccountCredentials namedAccountCredentials = getNamedAccountCredentials(); + KubernetesCoreCachingAgent cachingAgent = + createCachingAgents(namedAccountCredentials, 1, configurationProperties).asList().get(0); + + List filteredPrimaryKinds = cachingAgent.filteredPrimaryKinds(); + + KubernetesKind[] expected = kindProperties.keySet().toArray(new KubernetesKind[0]); + assertThat(filteredPrimaryKinds) + .containsExactlyInAnyOrder(expected); // has everything in global kinds + } + + /** + * filteredPrimaryKinds returns only core kinds specified in {@link + * KubernetesConfigurationProperties.Cache#getCacheKinds()} + */ + @Test + public void filteredPrimaryKindsFromConfig() { + KubernetesConfigurationProperties configurationProperties = + new KubernetesConfigurationProperties(); + configurationProperties.getCache().setCacheAll(false); + configurationProperties + .getCache() + .setCacheKinds(Arrays.asList("deployment", "myCustomKind.my.group")); + KubernetesCoreCachingAgent cachingAgent = + createCachingAgents(getNamedAccountCredentials(), 1, configurationProperties) + .asList() + .get(0); + + List filteredPrimaryKinds = cachingAgent.filteredPrimaryKinds(); + + assertThat(filteredPrimaryKinds) + .containsExactlyInAnyOrder(KubernetesKind.fromString("deployment")); // only has core kinds + } + + /** + * filteredPrimaryKinds returns only core kinds mapped to SpinnakerKinds that show in classic + * infrastructure screens {@link KubernetesCachingAgent#SPINNAKER_UI_KINDS} + */ + @Test + public void filteredPrimaryKindsSpinnakerUI() { + KubernetesConfigurationProperties configurationProperties = + new KubernetesConfigurationProperties(); + KubernetesCoreCachingAgent cachingAgent = + createCachingAgents(getNamedAccountCredentials(), 1, configurationProperties) + .asList() + .get(0); + + List filteredPrimaryKinds = cachingAgent.filteredPrimaryKinds(); + + KubernetesKind[] expected = + KubernetesCachingAgent.SPINNAKER_UI_KINDS.stream() + .map(kubernetesSpinnakerKindMap::translateSpinnakerKind) + .flatMap(Collection::stream) + .filter(kindProperties::containsKey) + .toArray(KubernetesKind[]::new); + assertThat(filteredPrimaryKinds).containsExactlyInAnyOrder(expected); // only has UI kinds + } + + /** + * filteredPrimaryKinds doesn't include kinds specified in {@link + * KubernetesConfigurationProperties.Cache#getCacheOmitKinds()} + */ + @Test + public void filteredPrimaryKindsOmitKind() { + KubernetesConfigurationProperties configurationProperties = + new KubernetesConfigurationProperties(); + configurationProperties.getCache().setCacheOmitKinds(Collections.singletonList("deployment")); + KubernetesCoreCachingAgent cachingAgent = + createCachingAgents(getNamedAccountCredentials(), 1, configurationProperties) + .asList() + .get(0); + + List filteredPrimaryKinds = cachingAgent.filteredPrimaryKinds(); + + KubernetesKind[] expected = + KubernetesCachingAgent.SPINNAKER_UI_KINDS.stream() + .map(kubernetesSpinnakerKindMap::translateSpinnakerKind) + .flatMap(Collection::stream) + .filter(kindProperties::containsKey) + .filter(k -> !k.equals(KubernetesKind.DEPLOYMENT)) + .toArray(KubernetesKind[]::new); + assertThat(filteredPrimaryKinds).containsExactlyInAnyOrder(expected); // excludes Deployment + } + + private static ImmutableList getAuthoritativeTypes( + Collection agentDataTypes) { + return agentDataTypes.stream() + .filter(dataType -> dataType.getAuthority() == AUTHORITATIVE) + .map(AgentDataType::getTypeName) + .collect(toImmutableList()); + } + + private void validateStorageClassInCacheResult( + String storageClassKey, Map> cacheResults) { + assertThat(cacheResults).containsKey(STORAGE_CLASS_KIND); + Collection storageClasses = cacheResults.get(STORAGE_CLASS_KIND); + assertThat(storageClasses).extracting(CacheData::getId).contains(storageClassKey); + assertThat(storageClasses) + .extracting(storageClass -> storageClass.getAttributes().get("name")) + .containsExactly(STORAGE_CLASS_NAME); + } + + private Set getApplicationsFromFront50(String fileName) + throws JsonProcessingException { + return objectMapper.readValue( + getResource(fileName), new TypeReference>() {}); + } + + private String getResource(String name) { + try { + return Resources.toString( + KubernetesCoreCachingAgentTest.class.getResource(name), StandardCharsets.UTF_8); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesUnregisteredCustomResourceCachingAgentTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesUnregisteredCustomResourceCachingAgentTest.java new file mode 100644 index 00000000000..97f67ced9b5 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/KubernetesUnregisteredCustomResourceCachingAgentTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2022 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.agent; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.jupiter.api.Test; +import org.springframework.lang.Nullable; + +public class KubernetesUnregisteredCustomResourceCachingAgentTest { + + private static final String ACCOUNT = "my-account"; + private static final ObjectMapper objectMapper = new ObjectMapper(); + private static final String CRD_NAME = "myCustomKind.my.group"; + + /** + * filteredPrimaryKinds returns only non-core kinds specified in {@link + * KubernetesConfigurationProperties.Cache#getCacheKinds()} + */ + @Test + public void filteredPrimaryKindsFromConfig() { + KubernetesConfigurationProperties configurationProperties = + new KubernetesConfigurationProperties(); + configurationProperties.getCache().setCacheAll(false); + configurationProperties.getCache().setCacheKinds(Arrays.asList("deployment", CRD_NAME)); + KubernetesUnregisteredCustomResourceCachingAgent cachingAgent = + createCachingAgent(getNamedAccountCredentials(), configurationProperties, null); + + List filteredPrimaryKinds = cachingAgent.filteredPrimaryKinds(); + + assertThat(filteredPrimaryKinds) + .containsExactlyInAnyOrder(KubernetesKind.fromString(CRD_NAME)); // only has CRD kinds + } + + /** + * Returns a KubernetesNamedAccountCredentials that contains a mock KubernetesCredentials object + */ + private static KubernetesNamedAccountCredentials getNamedAccountCredentials() { + KubernetesAccountProperties.ManagedAccount managedAccount = + new KubernetesAccountProperties.ManagedAccount(); + managedAccount.setName(ACCOUNT); + + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + when(credentials.getCrds()).thenReturn(ImmutableList.of(KubernetesKind.fromString(CRD_NAME))); + KubernetesCredentials.Factory credentialFactory = mock(KubernetesCredentials.Factory.class); + when(credentialFactory.build(managedAccount)).thenReturn(credentials); + return new KubernetesNamedAccountCredentials(managedAccount, credentialFactory); + } + + private static KubernetesUnregisteredCustomResourceCachingAgent createCachingAgent( + KubernetesNamedAccountCredentials credentials, + KubernetesConfigurationProperties configurationProperties, + @Nullable Front50ApplicationLoader front50ApplicationLoader) { + return new KubernetesUnregisteredCustomResourceCachingAgent( + credentials, + objectMapper, + new NoopRegistry(), + 0, + 1, + 10L, + configurationProperties, + new KubernetesSpinnakerKindMap(new ArrayList<>()), + front50ApplicationLoader); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesDataProviderIntegrationTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesDataProviderIntegrationTest.java new file mode 100644 index 00000000000..53b402549c6 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesDataProviderIntegrationTest.java @@ -0,0 +1,1157 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.ImmutableSetMultimap; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.mem.InMemoryNamedCacheFactory; +import com.netflix.spinnaker.cats.provider.DefaultProviderRegistry; +import com.netflix.spinnaker.cats.provider.ProviderRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentDispatcher; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesApplication; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesCluster; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesInstance; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesLoadBalancer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesServerGroup; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesServerGroupManager; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesServerGroupSummary; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.KubernetesManifestProvider.Sort; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.AccountResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesManifestNamer; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesNamerRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesDeploymentHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesPodHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesReplicaSetHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesServiceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesUnregisteredCustomResourceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.ManifestFetcher; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor; +import com.netflix.spinnaker.clouddriver.kubernetes.security.*; +import com.netflix.spinnaker.clouddriver.model.Application; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.Instance; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.clouddriver.model.ServerGroupManager.ServerGroupManagerSummary; +import com.netflix.spinnaker.clouddriver.model.ServerGroupSummary; +import com.netflix.spinnaker.clouddriver.search.SearchResultSet; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.kork.configserver.CloudConfigResourceService; +import com.netflix.spinnaker.kork.configserver.ConfigFileService; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import org.assertj.core.api.SoftAssertions; +import org.assertj.core.api.junit.jupiter.SoftAssertionsExtension; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.internal.stubbing.defaultanswers.ReturnsSmartNulls; + +@ExtendWith(SoftAssertionsExtension.class) +final class KubernetesDataProviderIntegrationTest { + private static final String ACCOUNT_NAME = "my-account"; + private static final Registry registry = new NoopRegistry(); + private static final ObjectMapper objectMapper = new ObjectMapper(); + private static final KubernetesProvider kubernetesProvider = new KubernetesProvider(); + private static final ImmutableList handlers = + ImmutableList.of( + new KubernetesDeploymentHandler(), + new KubernetesReplicaSetHandler(), + new KubernetesServiceHandler(), + new KubernetesPodHandler()); + private static final KubernetesSpinnakerKindMap kindMap = + new KubernetesSpinnakerKindMap(handlers); + private static final KubernetesCachingAgentDispatcher dispatcher = + new KubernetesCachingAgentDispatcher( + objectMapper, registry, new KubernetesConfigurationProperties(), kindMap, null); + private static final GlobalResourcePropertyRegistry resourcePropertyRegistry = + new GlobalResourcePropertyRegistry( + handlers, new KubernetesUnregisteredCustomResourceHandler()); + private static final CredentialsRepository + credentialsRepository = + new MapBackedCredentialsRepository<>( + KubernetesProvider.PROVIDER_NAME, new NoopCredentialsLifecycleHandler()); + private static final KubernetesAccountResolver accountResolver = + new KubernetesAccountResolver(credentialsRepository, resourcePropertyRegistry); + private static final ProviderRegistry providerRegistry = + new DefaultProviderRegistry( + ImmutableList.of(kubernetesProvider), new InMemoryNamedCacheFactory()); + private static final KubernetesCacheUtils cacheUtils = + new KubernetesCacheUtils( + providerRegistry.getProviderCache(kubernetesProvider.getProviderName()), + kindMap, + accountResolver); + private static final ImmutableSetMultimap manifestsByNamespace = + ImmutableSetMultimap.builder() + .putAll( + "backend-ns", + ImmutableSet.of( + "backend-service.yml", + "backend-rs-014.yml", + "backend-pod-014.yml", + "backend-rs-015.yml", + "backend-pod-015.yml")) + .putAll( + "frontend-ns", + ImmutableSet.of( + "frontend-service.yml", + "frontend-deployment.yml", + "frontend-rs-old.yml", + "frontend-rs-new.yml", + "frontend-pod-1.yml", + "frontend-pod-2.yml")) + .build(); + + private static KubernetesApplicationProvider applicationProvider = + new KubernetesApplicationProvider(cacheUtils); + private static KubernetesClusterProvider clusterProvider = + new KubernetesClusterProvider(cacheUtils); + private static KubernetesInstanceProvider instanceProvider = + new KubernetesInstanceProvider(cacheUtils, accountResolver); + private static KubernetesLoadBalancerProvider loadBalancerProvider = + new KubernetesLoadBalancerProvider(cacheUtils); + private static KubernetesSearchProvider searchProvider = + new KubernetesSearchProvider(cacheUtils, kindMap, objectMapper, accountResolver); + private static KubernetesServerGroupManagerProvider serverGroupManagerProvider = + new KubernetesServerGroupManagerProvider(cacheUtils); + private static ArtifactProvider artifactProvider = new ArtifactProvider(); + private static KubernetesManifestProvider manifestProvider = + new KubernetesManifestProvider(accountResolver); + + private static KubernetesNamedAccountCredentials credentials = getNamedAccountCredentials(); + + @BeforeAll + static void prepareCache() { + credentialsRepository.save(credentials); + dispatcher + .buildAllCachingAgents(credentials) + .forEach(agent -> agent.getAgentExecution(providerRegistry).executeAgent(agent)); + } + + @Test + void getClusters(SoftAssertions softly) { + Map> results = clusterProvider.getClusters(); + assertThat(results).hasSize(1); + assertThat(results).containsKey(ACCOUNT_NAME); + + Set clusters = results.get(ACCOUNT_NAME); + assertThat(clusters).hasSize(2); + + assertThat(clusters) + .extracting(KubernetesCluster::getName) + .containsExactlyInAnyOrder("deployment frontend", "replicaSet backend"); + + Map clusterLookup = + clusters.stream().collect(toImmutableMap(KubernetesCluster::getName, c -> c)); + + assertFrontendCluster(softly, clusterLookup.get("deployment frontend"), true); + assertBackendCluster(softly, clusterLookup.get("replicaSet backend"), true); + } + + @Test + void getClustersForApplication(SoftAssertions softly) { + Map> results = clusterProvider.getClusterDetails("backendapp"); + assertThat(results).hasSize(1); + assertThat(results).containsKey(ACCOUNT_NAME); + + Set clusters = results.get(ACCOUNT_NAME); + assertThat(clusters).hasSize(1); + + assertThat(clusters) + .extracting(KubernetesCluster::getName) + .containsExactlyInAnyOrder("replicaSet backend"); + + Map clusterLookup = + clusters.stream().collect(toImmutableMap(KubernetesCluster::getName, c -> c)); + + assertBackendCluster(softly, clusterLookup.get("replicaSet backend"), true); + } + + @Test + void getClustersForApplicationAndAccount(SoftAssertions softly) { + Set clusters = clusterProvider.getClusters("backendapp", ACCOUNT_NAME); + assertThat(clusters).hasSize(1); + + assertThat(clusters) + .extracting(KubernetesCluster::getName) + .containsExactlyInAnyOrder("replicaSet backend"); + + Map clusterLookup = + clusters.stream().collect(toImmutableMap(KubernetesCluster::getName, c -> c)); + + assertBackendCluster(softly, clusterLookup.get("replicaSet backend"), true); + } + + @Test + void getClustersForApplicationAndWrongAccount(SoftAssertions softly) { + Set clusters = clusterProvider.getClusters("backendapp", "non-existent"); + assertThat(clusters).hasSize(0); + } + + @Test + void getSingleCluster(SoftAssertions softly) { + // When not explicitly passing the includeDetails flag, it should default to true. + KubernetesCluster cluster = + clusterProvider.getCluster("frontendapp", ACCOUNT_NAME, "deployment frontend"); + assertThat(cluster).isNotNull(); + assertFrontendCluster(softly, cluster, true); + } + + @Test + void getSingleClusterWithDetails(SoftAssertions softly) { + KubernetesCluster cluster = + clusterProvider.getCluster("frontendapp", ACCOUNT_NAME, "deployment frontend", true); + assertThat(cluster).isNotNull(); + assertFrontendCluster(softly, cluster, true); + } + + @Test + void getSingleClusterWithoutDetails(SoftAssertions softly) { + KubernetesCluster cluster = + clusterProvider.getCluster("frontendapp", ACCOUNT_NAME, "deployment frontend", false); + assertThat(cluster).isNotNull(); + assertFrontendCluster(softly, cluster, false); + } + + @Test + void getSingleClusterWrongApp(SoftAssertions softly) { + KubernetesCluster cluster = + clusterProvider.getCluster("backendapp", ACCOUNT_NAME, "deployment frontend"); + assertThat(cluster).isNull(); + } + + @Test + void getClusterSummaries(SoftAssertions softly) { + Map> results = clusterProvider.getClusterSummaries("backendapp"); + assertThat(results).hasSize(1); + assertThat(results).containsKey(ACCOUNT_NAME); + + Set clusters = results.get(ACCOUNT_NAME); + assertThat(clusters).hasSize(1); + + assertThat(clusters) + .extracting(KubernetesCluster::getName) + .containsExactlyInAnyOrder("replicaSet backend"); + + Map clusterLookup = + clusters.stream().collect(toImmutableMap(KubernetesCluster::getName, c -> c)); + + assertBackendCluster(softly, clusterLookup.get("replicaSet backend"), false); + } + + @Test + void getServerGroup(SoftAssertions softly) { + KubernetesServerGroup serverGroup = + clusterProvider.getServerGroup(ACCOUNT_NAME, "backend-ns", "replicaSet backend-v014"); + assertThat(serverGroup).isNotNull(); + assertBackendPriorServerGroup(softly, serverGroup); + } + + @Test + void getServerGroupWithManager(SoftAssertions softly) { + KubernetesServerGroup serverGroup = + clusterProvider.getServerGroup( + ACCOUNT_NAME, "frontend-ns", "replicaSet frontend-5c6559f75f"); + assertThat(serverGroup).isNotNull(); + assertFrontendCurrentServerGroup(softly, serverGroup); + } + + @Test + void getServerGroupWrongNamespace(SoftAssertions softly) { + KubernetesServerGroup serverGroup = + clusterProvider.getServerGroup(ACCOUNT_NAME, "frontend-ns", "replicaSet backend-v014"); + assertThat(serverGroup).isNull(); + } + + @Test + void getServerGroupWithDetails(SoftAssertions softly) { + KubernetesServerGroup serverGroup = + clusterProvider.getServerGroup(ACCOUNT_NAME, "backend-ns", "replicaSet backend-v014", true); + assertThat(serverGroup).isNotNull(); + assertBackendPriorServerGroup(softly, serverGroup); + } + + @Test + void getServerGroupWithoutDetails(SoftAssertions softly) { + KubernetesServerGroup serverGroup = + clusterProvider.getServerGroup( + ACCOUNT_NAME, "backend-ns", "replicaSet backend-v014", false); + assertThat(serverGroup).isNotNull(); + // Looks like we ignore the includeDetails flag, so this is the same serverGroup as when we do + // include details. + assertBackendPriorServerGroup(softly, serverGroup); + } + + @Test + void getApplicationsUnexpanded(SoftAssertions softly) { + Set result = applicationProvider.getApplications(false); + softly.assertThat(result).hasSize(2); + + Map applicationLookup = + result.stream().collect(toImmutableMap(Application::getName, a -> a)); + + KubernetesApplication frontendApplication = applicationLookup.get("frontendapp"); + softly.assertThat(frontendApplication).isNotNull(); + if (frontendApplication != null) { + assertFrontendApplication(softly, frontendApplication); + } + + KubernetesApplication backendApplication = applicationLookup.get("backendapp"); + softly.assertThat(frontendApplication).isNotNull(); + if (frontendApplication != null) { + assertBackendApplication(softly, backendApplication); + } + } + + @Test + void getApplicationsExpanded(SoftAssertions softly) { + // This is the same as the unexpanded test, as it seems like we ignore the flag. + Set result = applicationProvider.getApplications(true); + softly.assertThat(result).hasSize(2); + + Map applicationLookup = + result.stream().collect(toImmutableMap(Application::getName, a -> a)); + + KubernetesApplication frontendApplication = applicationLookup.get("frontendapp"); + softly.assertThat(frontendApplication).isNotNull(); + if (frontendApplication != null) { + assertFrontendApplication(softly, frontendApplication); + } + + KubernetesApplication backendApplication = applicationLookup.get("backendapp"); + softly.assertThat(frontendApplication).isNotNull(); + if (frontendApplication != null) { + assertBackendApplication(softly, backendApplication); + } + } + + @Test + void getApplication(SoftAssertions softly) { + KubernetesApplication result = applicationProvider.getApplication("backendapp"); + assertThat(result).isNotNull(); + assertBackendApplication(softly, result); + } + + @Test + void getInstance(SoftAssertions softly) { + KubernetesInstance result = + instanceProvider.getInstance(ACCOUNT_NAME, "backend-ns", "pod backend-v015-vhglj"); + assertThat(result).isNotNull(); + assertBackendCurrentServerGroupInstance(softly, result); + } + + @Test + void getServerGroupManagers(SoftAssertions softly) { + Set results = + serverGroupManagerProvider.getServerGroupManagersByApplication("frontendapp"); + assertThat(results).hasSize(1); + if (!results.isEmpty()) { + assertFrontEndServerGroupManager(softly, results.iterator().next()); + } + } + + @Test + void getApplicationLoadBalancers(SoftAssertions softly) { + Set results = + loadBalancerProvider.getApplicationLoadBalancers("frontendapp"); + assertThat(results).hasSize(1); + assertFrontendLoadBalancer(softly, results.iterator().next()); + } + + @Test + void getLoadBalancersByName(SoftAssertions softly) { + List results = + loadBalancerProvider.byAccountAndRegionAndName( + ACCOUNT_NAME, "frontend-ns", "service frontend"); + assertThat(results).hasSize(1); + assertFrontendLoadBalancer(softly, results.iterator().next()); + } + + @Test + void searchBackendReplicaSet(SoftAssertions softly) { + SearchResultSet resultSet = searchProvider.search("backend-v014", 1, 100); + + softly.assertThat(resultSet.getQuery()).isEqualTo("backend-v014"); + softly.assertThat(resultSet.getTotalMatches()).isEqualTo(2); + + List> results = resultSet.getResults(); + softly.assertThat(results).hasSize(2); + + Optional> optionalRs = + results.stream().filter(r -> r.get("name").equals("replicaSet backend-v014")).findFirst(); + softly.assertThat(optionalRs).isPresent(); + optionalRs.ifPresent( + rs -> + softly + .assertThat(rs) + .containsAllEntriesOf( + ImmutableMap.builder() + .put("account", ACCOUNT_NAME) + .put("group", "replicaSet") + .put("kubernetesKind", "replicaSet") + .put("name", "replicaSet backend-v014") + .put("namespace", "backend-ns") + .put("provider", "kubernetes") + .put("region", "backend-ns") + .put("serverGroup", "replicaSet backend-v014") + .put("type", "serverGroups") + .build())); + + Optional> optionalPod = + results.stream().filter(r -> r.get("name").equals("pod backend-v014-xkvwh")).findFirst(); + softly.assertThat(optionalPod).isPresent(); + optionalPod.ifPresent( + pod -> + softly + .assertThat(pod) + .containsAllEntriesOf( + ImmutableMap.builder() + .put("account", ACCOUNT_NAME) + .put("group", "pod") + .put("instanceId", "pod backend-v014-xkvwh") + .put("kubernetesKind", "pod") + .put("name", "pod backend-v014-xkvwh") + .put("namespace", "backend-ns") + .put("provider", "kubernetes") + .put("region", "backend-ns") + .put("type", "instances") + .build())); + } + + @Test + void getArtifacts(SoftAssertions softly) { + List artifacts = + artifactProvider.getArtifacts( + KubernetesKind.REPLICA_SET, "backend", "backend-ns", credentials.getCredentials()); + softly.assertThat(artifacts).hasSize(2); + softly + .assertThat(artifacts) + .allSatisfy( + artifact -> { + softly.assertThat(artifact.getType()).isEqualTo("kubernetes/replicaSet"); + softly.assertThat(artifact.getName()).isEqualTo("backend"); + softly.assertThat(artifact.getLocation()).isEqualTo("backend-ns"); + softly + .assertThat(Optional.ofNullable((String) artifact.getMetadata("account"))) + .contains(ACCOUNT_NAME); + }); + // Order matters here because we're expecting to get the artifacts back in the order they were + // created. + softly.assertThat(artifacts).extracting(Artifact::getVersion).containsExactly("v014", "v015"); + } + + @Test + void getArtifactsWrongType(SoftAssertions softly) { + List artifacts = + artifactProvider.getArtifacts( + KubernetesKind.DEPLOYMENT, "backend", "backend-ns", credentials.getCredentials()); + softly.assertThat(artifacts).isEmpty(); + } + + @Test + void getArtifactsWrongNamespace(SoftAssertions softly) { + List artifacts = + artifactProvider.getArtifacts( + KubernetesKind.REPLICA_SET, "backend", "frontend-ns", credentials.getCredentials()); + softly.assertThat(artifacts).isEmpty(); + } + + @Test + void getClusterAndSortAscending(SoftAssertions softly) { + List manifests = + manifestProvider.getClusterAndSortAscending( + ACCOUNT_NAME, "backend-ns", "replicaSet", "replicaSet backend", "backendapp", Sort.AGE); + assertThat(manifests).isNotNull(); + softly + .assertThat( + manifests.stream() + .map(KubernetesManifest::getFullResourceName) + .collect(toImmutableList())) + .containsExactly("replicaSet backend-v014", "replicaSet backend-v015"); + } + + @Test + void getClusterAndSortAscendingBadAccount(SoftAssertions softly) { + assertThrows( + IllegalArgumentException.class, + () -> + manifestProvider.getClusterAndSortAscending( + "not-an-account", + "backend-ns", + "replicaSet", + "replicaSet backend", + "backendapp", + Sort.AGE)); + } + + @Test + void getClusterManifestCoordinates(SoftAssertions softly) { + List coordinates = + manifestProvider.getClusterManifestCoordinates( + ACCOUNT_NAME, "backend-ns", "replicaSet", "backendapp", "replicaSet backend"); + assertThat(coordinates).isNotNull(); + softly + .assertThat(coordinates.stream().collect(toImmutableList())) + .containsExactlyInAnyOrder( + KubernetesCoordinates.builder() + .kind(KubernetesKind.REPLICA_SET) + .name("backend-v014") + .namespace("backend-ns") + .build(), + KubernetesCoordinates.builder() + .kind(KubernetesKind.REPLICA_SET) + .name("backend-v015") + .namespace("backend-ns") + .build()); + } + + @Test + void getClusterManifestCoordinatesBadAccount(SoftAssertions softly) { + assertThrows( + IllegalArgumentException.class, + () -> + manifestProvider.getClusterManifestCoordinates( + "not-an-account", "backend-ns", "replicaSet", "backendapp", "replicaSet backend")); + } + + @Test + void getClusterManifestCoordinatesEmptyNamespace(SoftAssertions softly) { + List coordinates = + manifestProvider.getClusterManifestCoordinates( + ACCOUNT_NAME, "empty", "replicaSet", "backendapp", "replicaSet backend"); + softly.assertThat(coordinates).isEmpty(); + } + + @Test + void getClusterManifestCoordinatesEmptyCluster(SoftAssertions softly) { + List coordinates = + manifestProvider.getClusterManifestCoordinates( + ACCOUNT_NAME, "empty-namespace", "replicaSet", "backendapp", "replicaSet empty"); + softly.assertThat(coordinates).isEmpty(); + } + + private static KubectlJobExecutor getJobExecutor() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class, new ReturnsSmartNulls()); + when(jobExecutor.list( + any(KubernetesCredentials.class), + anyList(), + any(String.class), + any(KubernetesSelectorList.class))) + .thenAnswer( + invocation -> + manifestsByNamespace.get(invocation.getArgument(2, String.class)).stream() + .map( + file -> + ManifestFetcher.getManifest( + KubernetesDataProviderIntegrationTest.class, file) + .get(0)) + .filter(m -> invocation.getArgument(1, List.class).contains(m.getKind())) + .collect(toImmutableList())); + return jobExecutor; + } + + private static KubernetesNamedAccountCredentials getNamedAccountCredentials() { + ManagedAccount managedAccount = new ManagedAccount(); + managedAccount.setName(ACCOUNT_NAME); + managedAccount.setNamespaces(manifestsByNamespace.keySet().asList()); + managedAccount.setKinds(ImmutableList.of("deployment", "replicaSet", "service", "pod")); + managedAccount.setMetrics(false); + + KubernetesCredentials.Factory credentialFactory = + new KubernetesCredentials.Factory( + new NoopRegistry(), + new KubernetesNamerRegistry(ImmutableList.of(new KubernetesManifestNamer())), + getJobExecutor(), + new ConfigFileService(new CloudConfigResourceService()), + new AccountResourcePropertyRegistry.Factory(resourcePropertyRegistry), + new KubernetesKindRegistry.Factory(new GlobalKubernetesKindRegistry()), + kindMap, + new GlobalResourcePropertyRegistry( + ImmutableList.of(), new KubernetesUnregisteredCustomResourceHandler())); + return new KubernetesNamedAccountCredentials(managedAccount, credentialFactory); + } + + private void assertFrontendLoadBalancer( + SoftAssertions softly, KubernetesLoadBalancer loadBalancer) { + softly.assertThat(loadBalancer.getRegion()).isEqualTo("frontend-ns"); + softly.assertThat(loadBalancer.getAccount()).isEqualTo(ACCOUNT_NAME); + softly + .assertThat(loadBalancer.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "frontendapp", + "app.kubernetes.io/managed-by", "spinnaker")); + softly.assertThat(loadBalancer.getKind()).isEqualTo(KubernetesKind.SERVICE); + softly.assertThat(loadBalancer.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(loadBalancer.getMoniker().getApp()).isEqualTo("frontendapp"); + softly.assertThat(loadBalancer.getMoniker().getCluster()).isEqualTo("service frontend"); + softly.assertThat(loadBalancer.getName()).isEqualTo("service frontend"); + assertFrontendLoadBalancerServerGroups(softly, loadBalancer.getServerGroups()); + } + + private void assertFrontendCluster( + SoftAssertions softly, KubernetesCluster cluster, boolean includeDetails) { + softly.assertThat(cluster.getMoniker().getApp()).isEqualTo("frontendapp"); + softly.assertThat(cluster.getMoniker().getCluster()).isEqualTo("deployment frontend"); + softly.assertThat(cluster.getType()).isEqualTo("kubernetes"); + softly.assertThat(cluster.getAccountName()).isEqualTo(ACCOUNT_NAME); + softly.assertThat(cluster.getName()).isEqualTo("deployment frontend"); + softly.assertThat(cluster.getApplication()).isEqualTo("frontendapp"); + + if (includeDetails) { + assertFrontendServerGroups(softly, cluster.getServerGroups()); + softly.assertThat(cluster.getLoadBalancers()).hasSize(1); + if (!cluster.getLoadBalancers().isEmpty()) { + assertFrontendLoadBalancer(softly, cluster.getLoadBalancers().iterator().next()); + } + } else { + softly.assertThat(cluster.getServerGroups()).isEmpty(); + softly.assertThat(cluster.getLoadBalancers()).isEmpty(); + } + } + + private void assertFrontendServerGroups( + SoftAssertions softly, Collection serverGroups) { + softly.assertThat(serverGroups).hasSize(2); + softly + .assertThat(serverGroups) + .extracting(ServerGroup::getName) + .containsExactlyInAnyOrder( + "replicaSet frontend-5c6559f75f", "replicaSet frontend-64545c4c54"); + Map serverGroupLookup = + serverGroups.stream().collect(toImmutableMap(ServerGroup::getName, sg -> sg)); + + KubernetesServerGroup currentServerGroup = + serverGroupLookup.get("replicaSet frontend-5c6559f75f"); + softly.assertThat(currentServerGroup).isNotNull(); + // If the soft assertion already failed; don't NPE trying to validate further. + if (currentServerGroup != null) { + assertFrontendCurrentServerGroup(softly, currentServerGroup); + } + + KubernetesServerGroup priorServerGroup = + serverGroupLookup.get("replicaSet frontend-64545c4c54"); + softly.assertThat(currentServerGroup).isNotNull(); + // If the soft assertion already failed; don't NPE trying to validate further. + if (currentServerGroup != null) { + assertFrontendPriorServerGroup(softly, priorServerGroup); + } + } + + private void assertFrontendPriorServerGroup( + SoftAssertions softly, KubernetesServerGroup serverGroup) { + softly.assertThat(serverGroup.getMoniker().getApp()).isEqualTo("frontendapp"); + softly.assertThat(serverGroup.getMoniker().getCluster()).isEqualTo("deployment frontend"); + softly.assertThat(serverGroup.getMoniker().getSequence()).isEqualTo(1); + softly.assertThat(serverGroup.getCapacity().getDesired()).isEqualTo(0); + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getKind()).isEqualTo(KubernetesKind.REPLICA_SET); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet frontend-64545c4c54"); + softly.assertThat(serverGroup.getInstanceCounts().getUp()).isEqualTo(0); + softly.assertThat(serverGroup.getInstanceCounts().getTotal()).isEqualTo(0); + softly.assertThat(serverGroup.getLoadBalancers()).containsExactly("service frontend"); + // When using a deployment, the prior server group is not disabled as labels aren't changed; + // instead this server group is scaled down to 0 instances. + softly.assertThat(serverGroup.isDisabled()).isFalse(); + softly.assertThat(serverGroup.getRegion()).isEqualTo("frontend-ns"); + softly.assertThat(serverGroup.getServerGroupManagers()).hasSize(1); + if (!serverGroup.getServerGroupManagers().isEmpty()) { + assertFrontEndServerGroupManagerSummary( + softly, serverGroup.getServerGroupManagers().iterator().next()); + } + softly + .assertThat(serverGroup.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "frontendapp", + "app.kubernetes.io/managed-by", "spinnaker")); + softly + .assertThat((Collection) serverGroup.getBuildInfo().get("images")) + .containsExactly("nginx:1.19.0"); + softly.assertThat(serverGroup.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(serverGroup.getInstances()).isEmpty(); + } + + private void assertFrontendCurrentServerGroup( + SoftAssertions softly, KubernetesServerGroup serverGroup) { + softly.assertThat(serverGroup.getMoniker().getApp()).isEqualTo("frontendapp"); + softly.assertThat(serverGroup.getMoniker().getCluster()).isEqualTo("deployment frontend"); + softly.assertThat(serverGroup.getMoniker().getSequence()).isEqualTo(2); + softly.assertThat(serverGroup.getCapacity().getDesired()).isEqualTo(2); + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getKind()).isEqualTo(KubernetesKind.REPLICA_SET); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet frontend-5c6559f75f"); + softly.assertThat(serverGroup.getInstanceCounts().getUp()).isEqualTo(2); + softly.assertThat(serverGroup.getInstanceCounts().getTotal()).isEqualTo(2); + softly.assertThat(serverGroup.getLoadBalancers()).containsExactly("service frontend"); + softly.assertThat(serverGroup.isDisabled()).isFalse(); + softly.assertThat(serverGroup.getRegion()).isEqualTo("frontend-ns"); + softly.assertThat(serverGroup.getServerGroupManagers()).hasSize(1); + if (!serverGroup.getServerGroupManagers().isEmpty()) { + assertFrontEndServerGroupManagerSummary( + softly, serverGroup.getServerGroupManagers().iterator().next()); + } + softly + .assertThat(serverGroup.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "frontendapp", + "app.kubernetes.io/managed-by", "spinnaker")); + softly + .assertThat((Collection) serverGroup.getBuildInfo().get("images")) + .containsExactly("nginx:1.19.1"); + softly.assertThat(serverGroup.getCloudProvider()).isEqualTo("kubernetes"); + assertFrontendCurrentServerGroupInstances(softly, serverGroup.getInstances()); + } + + private void assertFrontendCurrentServerGroupInstances( + SoftAssertions softly, Collection instances) { + softly.assertThat(instances).hasSize(2); + Map instanceLookup = + instances.stream().collect(toImmutableMap(Instance::getName, i -> i)); + + KubernetesInstance firstInstance = instanceLookup.get("477dcf19-be44-4853-88fd-1d9aedfcddba"); + softly.assertThat(firstInstance).isNotNull(); + if (firstInstance != null) { + assertFrontendFirstInstance(softly, firstInstance); + } + + KubernetesInstance secondInstance = instanceLookup.get("a2280982-e745-468f-9176-21ff1642fa8d"); + softly.assertThat(firstInstance).isNotNull(); + if (secondInstance != null) { + assertFrontendSecondInstance(softly, secondInstance); + } + } + + private void assertFrontendFirstInstance(SoftAssertions softly, KubernetesInstance instance) { + softly.assertThat(instance.getAccount()).isEqualTo(ACCOUNT_NAME); + softly.assertThat(instance.getZone()).isEqualTo("frontend-ns"); + softly.assertThat(instance.getKind()).isEqualTo(KubernetesKind.POD); + softly.assertThat(instance.getHealthState()).isEqualTo(HealthState.Up); + softly.assertThat(instance.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(instance.getHumanReadableName()).isEqualTo("pod frontend-5c6559f75f-4ml8h"); + softly.assertThat(instance.getName()).isEqualTo("477dcf19-be44-4853-88fd-1d9aedfcddba"); + softly + .assertThat(instance.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "load-balancer", "frontend", + "app.kubernetes.io/name", "frontendapp", + "app.kubernetes.io/managed-by", "spinnaker", + "app", "nginx")); + softly.assertThat(instance.getMoniker().getApp()).isEqualTo("frontendapp"); + softly.assertThat(instance.getMoniker().getCluster()).isEqualTo("deployment frontend"); + } + + private void assertFrontendSecondInstance(SoftAssertions softly, KubernetesInstance instance) { + softly.assertThat(instance.getAccount()).isEqualTo(ACCOUNT_NAME); + softly.assertThat(instance.getZone()).isEqualTo("frontend-ns"); + softly.assertThat(instance.getKind()).isEqualTo(KubernetesKind.POD); + softly.assertThat(instance.getHealthState()).isEqualTo(HealthState.Up); + softly.assertThat(instance.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(instance.getHumanReadableName()).isEqualTo("pod frontend-5c6559f75f-6fdmt"); + softly.assertThat(instance.getName()).isEqualTo("a2280982-e745-468f-9176-21ff1642fa8d"); + softly + .assertThat(instance.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "load-balancer", "frontend", + "app.kubernetes.io/name", "frontendapp", + "app.kubernetes.io/managed-by", "spinnaker", + "app", "nginx")); + softly.assertThat(instance.getMoniker().getApp()).isEqualTo("frontendapp"); + softly.assertThat(instance.getMoniker().getCluster()).isEqualTo("deployment frontend"); + } + + private void assertFrontEndServerGroupManagerSummary( + SoftAssertions softly, ServerGroupManagerSummary summary) { + softly.assertThat(summary.getAccount()).isEqualTo(ACCOUNT_NAME); + softly.assertThat(summary.getLocation()).isEqualTo("frontend-ns"); + softly.assertThat(summary.getName()).isEqualTo("frontend"); + } + + private void assertFrontEndServerGroupManager( + SoftAssertions softly, KubernetesServerGroupManager serverGroupManager) { + softly.assertThat(serverGroupManager.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(serverGroupManager.getRegion()).isEqualTo("frontend-ns"); + softly.assertThat(serverGroupManager.getAccount()).isEqualTo(ACCOUNT_NAME); + softly.assertThat(serverGroupManager.getName()).isEqualTo("deployment frontend"); + softly.assertThat(serverGroupManager.getKind()).isEqualTo(KubernetesKind.DEPLOYMENT); + softly.assertThat(serverGroupManager.getMoniker().getApp()).isEqualTo("frontendapp"); + softly + .assertThat(serverGroupManager.getMoniker().getCluster()) + .isEqualTo("deployment frontend"); + softly + .assertThat(serverGroupManager.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "frontendapp", + "app.kubernetes.io/managed-by", "spinnaker")); + assertFrontendServerGroupSummaries(softly, serverGroupManager.getServerGroups()); + } + + private void assertFrontendLoadBalancerServerGroups( + SoftAssertions softly, Collection serverGroups) { + softly.assertThat(serverGroups).hasSize(2); + Map serverGroupLookup = + serverGroups.stream().collect(toImmutableMap(LoadBalancerServerGroup::getName, sg -> sg)); + + LoadBalancerServerGroup priorServerGroup = + serverGroupLookup.get("replicaSet frontend-64545c4c54"); + softly.assertThat(priorServerGroup).isNotNull(); + if (priorServerGroup != null) { + assertFrontendPriorLoadBalancerServerGroup(softly, priorServerGroup); + } + + LoadBalancerServerGroup currentServerGroup = + serverGroupLookup.get("replicaSet frontend-5c6559f75f"); + softly.assertThat(currentServerGroup).isNotNull(); + if (currentServerGroup != null) { + assertFrontendCurrentLoadBalancerServerGroup(softly, currentServerGroup); + } + } + + private void assertFrontendPriorLoadBalancerServerGroup( + SoftAssertions softly, LoadBalancerServerGroup serverGroup) { + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet frontend-64545c4c54"); + softly.assertThat(serverGroup.getRegion()).isEqualTo("frontend-ns"); + softly.assertThat(serverGroup.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(serverGroup.getIsDisabled()).isFalse(); + softly.assertThat(serverGroup.getDetachedInstances()).isEmpty(); + softly.assertThat(serverGroup.getInstances()).isEmpty(); + } + + private void assertFrontendCurrentLoadBalancerServerGroup( + SoftAssertions softly, LoadBalancerServerGroup serverGroup) { + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet frontend-5c6559f75f"); + softly.assertThat(serverGroup.getRegion()).isEqualTo("frontend-ns"); + softly.assertThat(serverGroup.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(serverGroup.getIsDisabled()).isFalse(); + softly.assertThat(serverGroup.getDetachedInstances()).isEmpty(); + softly.assertThat(serverGroup.getInstances()).hasSize(2); + softly + .assertThat(serverGroup.getInstances()) + .extracting(LoadBalancerInstance::getName) + .containsExactlyInAnyOrder( + "pod frontend-5c6559f75f-4ml8h", "pod frontend-5c6559f75f-6fdmt"); + softly + .assertThat(serverGroup.getInstances()) + .allMatch(sg -> sg.getZone().equals("frontend-ns")); + } + + private void assertFrontendServerGroupSummaries( + SoftAssertions softly, Collection serverGroups) { + softly.assertThat(serverGroups).hasSize(2); + Map serverGroupLookup = + serverGroups.stream().collect(toImmutableMap(ServerGroupSummary::getName, sg -> sg)); + + ServerGroupSummary priorServerGroup = serverGroupLookup.get("replicaSet frontend-64545c4c54"); + softly.assertThat(priorServerGroup).isNotNull(); + if (priorServerGroup != null) { + assertFrontendPriorServerGroupSummary(softly, priorServerGroup); + } + + ServerGroupSummary currentServerGroup = serverGroupLookup.get("replicaSet frontend-5c6559f75f"); + softly.assertThat(currentServerGroup).isNotNull(); + if (currentServerGroup != null) { + assertFrontendCurrentServerGroupSummary(softly, currentServerGroup); + } + } + + private void assertFrontendPriorServerGroupSummary( + SoftAssertions softly, ServerGroupSummary serverGroup) { + softly.assertThat(serverGroup.getMoniker().getApp()).isEqualTo("frontendapp"); + softly.assertThat(serverGroup.getMoniker().getCluster()).isEqualTo("deployment frontend"); + softly.assertThat(serverGroup.getMoniker().getSequence()).isEqualTo(1); + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet frontend-64545c4c54"); + softly.assertThat(serverGroup.getRegion()).isEqualTo("frontend-ns"); + } + + private void assertFrontendCurrentServerGroupSummary( + SoftAssertions softly, ServerGroupSummary serverGroup) { + softly.assertThat(serverGroup.getMoniker().getApp()).isEqualTo("frontendapp"); + softly.assertThat(serverGroup.getMoniker().getCluster()).isEqualTo("deployment frontend"); + softly.assertThat(serverGroup.getMoniker().getSequence()).isEqualTo(2); + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet frontend-5c6559f75f"); + softly.assertThat(serverGroup.getRegion()).isEqualTo("frontend-ns"); + } + + private void assertBackendLoadBalancer( + SoftAssertions softly, KubernetesLoadBalancer loadBalancer) { + softly.assertThat(loadBalancer.getRegion()).isEqualTo("backend-ns"); + softly.assertThat(loadBalancer.getAccount()).isEqualTo(ACCOUNT_NAME); + softly + .assertThat(loadBalancer.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "backendapp", + "app.kubernetes.io/managed-by", "spinnaker")); + softly.assertThat(loadBalancer.getKind()).isEqualTo(KubernetesKind.SERVICE); + softly.assertThat(loadBalancer.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(loadBalancer.getMoniker().getApp()).isEqualTo("backendapp"); + softly.assertThat(loadBalancer.getMoniker().getCluster()).isEqualTo("service backendlb"); + softly.assertThat(loadBalancer.getName()).isEqualTo("service backendlb"); + assertBackendLoadBalancerServerGroups(softly, loadBalancer.getServerGroups()); + } + + private void assertBackendLoadBalancerServerGroups( + SoftAssertions softly, Collection serverGroups) { + softly.assertThat(serverGroups).hasSize(1); + + if (!serverGroups.isEmpty()) { + LoadBalancerServerGroup serverGroup = serverGroups.iterator().next(); + assertBackendLoadBalancerServerGroup(softly, serverGroup); + } + } + + private void assertBackendLoadBalancerServerGroup( + SoftAssertions softly, LoadBalancerServerGroup serverGroup) { + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet backend-v015"); + softly.assertThat(serverGroup.getRegion()).isEqualTo("backend-ns"); + softly.assertThat(serverGroup.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(serverGroup.getIsDisabled()).isFalse(); + softly.assertThat(serverGroup.getDetachedInstances()).isEmpty(); + softly.assertThat(serverGroup.getInstances()).hasSize(1); + if (!serverGroup.getInstances().isEmpty()) { + assertBackendLoadBalancerInstance(softly, serverGroup.getInstances().iterator().next()); + } + } + + private void assertBackendLoadBalancerInstance( + SoftAssertions softly, LoadBalancerInstance instance) { + softly.assertThat(instance.getName()).isEqualTo("pod backend-v015-vhglj"); + softly.assertThat(instance.getZone()).isEqualTo("backend-ns"); + } + + private void assertBackendCluster( + SoftAssertions softly, KubernetesCluster cluster, boolean includeDetails) { + softly.assertThat(cluster.getMoniker().getApp()).isEqualTo("backendapp"); + softly.assertThat(cluster.getMoniker().getCluster()).isEqualTo("replicaSet backend"); + softly.assertThat(cluster.getType()).isEqualTo("kubernetes"); + softly.assertThat(cluster.getAccountName()).isEqualTo(ACCOUNT_NAME); + softly.assertThat(cluster.getName()).isEqualTo("replicaSet backend"); + softly.assertThat(cluster.getApplication()).isEqualTo("backendapp"); + + if (includeDetails) { + assertBackendServerGroups(softly, cluster.getServerGroups()); + softly.assertThat(cluster.getLoadBalancers()).hasSize(1); + // If soft assertion above already failed, don't try to further validate. + if (!cluster.getLoadBalancers().isEmpty()) { + assertBackendLoadBalancer(softly, cluster.getLoadBalancers().iterator().next()); + } + } else { + softly.assertThat(cluster.getServerGroups()).isEmpty(); + softly.assertThat(cluster.getLoadBalancers()).isEmpty(); + } + } + + private void assertBackendServerGroups( + SoftAssertions softly, Collection serverGroups) { + softly.assertThat(serverGroups).hasSize(2); + softly + .assertThat(serverGroups) + .extracting(ServerGroup::getName) + .containsExactlyInAnyOrder("replicaSet backend-v014", "replicaSet backend-v015"); + Map serverGroupLookup = + serverGroups.stream().collect(toImmutableMap(ServerGroup::getName, sg -> sg)); + + KubernetesServerGroup currentServerGroup = serverGroupLookup.get("replicaSet backend-v015"); + softly.assertThat(currentServerGroup).isNotNull(); + // If the soft assertion already failed; don't NPE trying to validate further. + if (currentServerGroup != null) { + assertBackendCurrentServerGroup(softly, currentServerGroup); + } + + KubernetesServerGroup priorServerGroup = serverGroupLookup.get("replicaSet backend-v014"); + softly.assertThat(priorServerGroup).isNotNull(); + // If the soft assertion already failed; don't NPE trying to validate further. + if (priorServerGroup != null) { + assertBackendPriorServerGroup(softly, priorServerGroup); + } + } + + private void assertBackendPriorServerGroup( + SoftAssertions softly, KubernetesServerGroup serverGroup) { + softly.assertThat(serverGroup.getMoniker().getApp()).isEqualTo("backendapp"); + softly.assertThat(serverGroup.getMoniker().getCluster()).isEqualTo("replicaSet backend"); + softly.assertThat(serverGroup.getMoniker().getSequence()).isEqualTo(14); + softly.assertThat(serverGroup.getCapacity().getDesired()).isEqualTo(1); + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getKind()).isEqualTo(KubernetesKind.REPLICA_SET); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet backend-v014"); + softly.assertThat(serverGroup.getInstanceCounts().getUp()).isEqualTo(1); + softly.assertThat(serverGroup.getInstanceCounts().getTotal()).isEqualTo(1); + softly.assertThat(serverGroup.getLoadBalancers()).containsExactly("service backendlb"); + // When using a replica set with traffic management, the prior server group is disabled. + softly.assertThat(serverGroup.isDisabled()).isTrue(); + softly.assertThat(serverGroup.getRegion()).isEqualTo("backend-ns"); + softly.assertThat(serverGroup.getServerGroupManagers()).isEmpty(); + softly + .assertThat(serverGroup.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "backendapp", + "moniker.spinnaker.io/sequence", "14", + "app.kubernetes.io/managed-by", "spinnaker")); + softly + .assertThat((Collection) serverGroup.getBuildInfo().get("images")) + .containsExactly( + "gcr.io/my-gcr-repository/backend-service@sha256:2eefbb528a4619311555f92ea9b781af101c62f4c70b73c4a5e93d15624ba94c"); + softly.assertThat(serverGroup.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(serverGroup.getInstances()).hasSize(1); + if (!serverGroup.getInstances().isEmpty()) { + assertBackendPriorServerGroupInstance(softly, serverGroup.getInstances().iterator().next()); + } + } + + void assertBackendCurrentServerGroup(SoftAssertions softly, KubernetesServerGroup serverGroup) { + softly.assertThat(serverGroup.getMoniker().getApp()).isEqualTo("backendapp"); + softly.assertThat(serverGroup.getMoniker().getCluster()).isEqualTo("replicaSet backend"); + softly.assertThat(serverGroup.getMoniker().getSequence()).isEqualTo(15); + softly.assertThat(serverGroup.getCapacity().getDesired()).isEqualTo(1); + softly.assertThat(serverGroup.getAccount()).isEqualTo("my-account"); + softly.assertThat(serverGroup.getKind()).isEqualTo(KubernetesKind.REPLICA_SET); + softly.assertThat(serverGroup.getName()).isEqualTo("replicaSet backend-v015"); + softly.assertThat(serverGroup.getInstanceCounts().getUp()).isEqualTo(1); + softly.assertThat(serverGroup.getInstanceCounts().getTotal()).isEqualTo(1); + softly.assertThat(serverGroup.getLoadBalancers()).containsExactly("service backendlb"); + softly.assertThat(serverGroup.isDisabled()).isFalse(); + softly.assertThat(serverGroup.getRegion()).isEqualTo("backend-ns"); + softly.assertThat(serverGroup.getServerGroupManagers()).isEmpty(); + softly + .assertThat(serverGroup.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "backendapp", + "moniker.spinnaker.io/sequence", "15", + "app.kubernetes.io/managed-by", "spinnaker")); + softly + .assertThat((Collection) serverGroup.getBuildInfo().get("images")) + .containsExactly( + "gcr.io/my-gcr-repository/backend-service@sha256:51f29a570a484fbae4da912199ff27ed21f91b1caf51564a9d3afe3a201c1f32"); + softly.assertThat(serverGroup.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(serverGroup.getInstances()).hasSize(1); + if (!serverGroup.getInstances().isEmpty()) { + assertBackendCurrentServerGroupInstance(softly, serverGroup.getInstances().iterator().next()); + } + } + + private void assertBackendPriorServerGroupInstance( + SoftAssertions softly, KubernetesInstance instance) { + softly.assertThat(instance.getAccount()).isEqualTo(ACCOUNT_NAME); + softly.assertThat(instance.getZone()).isEqualTo("backend-ns"); + softly.assertThat(instance.getKind()).isEqualTo(KubernetesKind.POD); + softly.assertThat(instance.getHealthState()).isEqualTo(HealthState.Up); + softly.assertThat(instance.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(instance.getHumanReadableName()).isEqualTo("pod backend-v014-xkvwh"); + softly.assertThat(instance.getName()).isEqualTo("d05606fe-aa69-4f16-b56a-371c2313fe9c"); + softly + .assertThat(instance.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "backendapp", + "app.kubernetes.io/managed-by", "spinnaker", + "app", "nginx")); + softly.assertThat(instance.getLabels()).doesNotContainEntry("load-balancer", "backend"); + softly.assertThat(instance.getMoniker().getApp()).isEqualTo("backendapp"); + softly.assertThat(instance.getMoniker().getCluster()).isEqualTo("replicaSet backend"); + } + + private void assertBackendCurrentServerGroupInstance( + SoftAssertions softly, KubernetesInstance instance) { + softly.assertThat(instance.getAccount()).isEqualTo(ACCOUNT_NAME); + softly.assertThat(instance.getZone()).isEqualTo("backend-ns"); + softly.assertThat(instance.getKind()).isEqualTo(KubernetesKind.POD); + softly.assertThat(instance.getHealthState()).isEqualTo(HealthState.Up); + softly.assertThat(instance.getCloudProvider()).isEqualTo("kubernetes"); + softly.assertThat(instance.getHumanReadableName()).isEqualTo("pod backend-v015-vhglj"); + softly.assertThat(instance.getName()).isEqualTo("45db7673-e3d2-4746-9ecd-38f868f853e5"); + softly + .assertThat(instance.getLabels()) + .containsAllEntriesOf( + ImmutableMap.of( + "app.kubernetes.io/name", "backendapp", + "app.kubernetes.io/managed-by", "spinnaker", + "app", "nginx")); + softly.assertThat(instance.getLabels()).containsEntry("load-balancer", "backend"); + softly.assertThat(instance.getMoniker().getApp()).isEqualTo("backendapp"); + softly.assertThat(instance.getMoniker().getCluster()).isEqualTo("replicaSet backend"); + } + + void assertFrontendApplication(SoftAssertions softly, KubernetesApplication application) { + softly.assertThat(application.getName()).isEqualTo("frontendapp"); + softly + .assertThat(application.getAttributes()) + .containsExactlyInAnyOrderEntriesOf(ImmutableMap.of("name", "frontendapp")); + softly.assertThat(application.getClusterNames()).hasSize(1); + + Set clusterNames = application.getClusterNames().get(ACCOUNT_NAME); + softly.assertThat(clusterNames).isNotNull(); + if (clusterNames != null) { + softly.assertThat(clusterNames).containsExactlyInAnyOrder("deployment frontend"); + } + } + + void assertBackendApplication(SoftAssertions softly, KubernetesApplication application) { + softly.assertThat(application.getName()).isEqualTo("backendapp"); + softly + .assertThat(application.getAttributes()) + .containsExactlyInAnyOrderEntriesOf(ImmutableMap.of("name", "backendapp")); + softly.assertThat(application.getClusterNames()).hasSize(1); + + Set clusterNames = application.getClusterNames().get(ACCOUNT_NAME); + softly.assertThat(clusterNames).isNotNull(); + if (clusterNames != null) { + softly.assertThat(clusterNames).containsExactlyInAnyOrder("replicaSet backend"); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesInstanceProviderTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesInstanceProviderTest.java new file mode 100644 index 00000000000..5ddd7b6b0ac --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesInstanceProviderTest.java @@ -0,0 +1,245 @@ +/* + * Copyright 2020 Discovery, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.Lists; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesInstance; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.ContainerLog; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import io.kubernetes.client.openapi.JSON; +import io.kubernetes.client.openapi.models.V1Container; +import io.kubernetes.client.openapi.models.V1ObjectMeta; +import io.kubernetes.client.openapi.models.V1Pod; +import io.kubernetes.client.openapi.models.V1PodSpec; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +final class KubernetesInstanceProviderTest { + + private KubernetesInstanceProvider provider; + private KubernetesCredentials credentials; + private KubernetesAccountResolver accountResolver; + private KubernetesCacheUtils cacheUtils; + + private static final JSON json = new JSON(); + private static final KubernetesKind KIND = KubernetesKind.POD; + private static final String ACCOUNT = "account"; + private static final String NAMESPACE = "namespace"; + private static final String POD_NAME = "mypod"; + private static final String POD_FULL_NAME = KIND + " " + POD_NAME; + private static final String CONTAINER = "container"; + private static final String INIT_CONTAINER = "initContainer"; + private static final String LOG_OUTPUT = "logs"; + private static final String CACHE_KEY = + Keys.InfrastructureCacheKey.createKey(KIND, ACCOUNT, NAMESPACE, POD_NAME); + + @BeforeEach + public void setup() { + accountResolver = mock(KubernetesAccountResolver.class); + cacheUtils = mock(KubernetesCacheUtils.class); + credentials = mock(KubernetesCredentials.class); + provider = new KubernetesInstanceProvider(cacheUtils, accountResolver); + when(accountResolver.getCredentials(ACCOUNT)).thenReturn(Optional.of(credentials)); + } + + @Test + void getCloudProvider() { + assertThat(provider.getCloudProvider()).isEqualTo(KubernetesCloudProvider.ID); + } + + @Test + void getInstanceSuccess() { + CacheData cacheData = mock(CacheData.class); + Map attributes = new HashMap<>(); + KubernetesManifest manifest = getKubernetesManifest(); + attributes.put("manifest", manifest); + when(cacheData.getAttributes()).thenReturn(attributes); + when(cacheUtils.getSingleEntry(ACCOUNT, NAMESPACE, POD_FULL_NAME)) + .thenReturn(Optional.of(cacheData)); + when(cacheData.getId()).thenReturn(CACHE_KEY); + + KubernetesInstance instance = provider.getInstance(ACCOUNT, NAMESPACE, POD_FULL_NAME); + + assertThat(instance.getName()).isEqualTo(manifest.getUid()); + assertThat(instance.getDisplayName()).isEqualTo(manifest.getName()); + assertThat(instance.getZone()).isEqualTo(manifest.getNamespace()); + assertThat(instance.getKind()).isEqualTo(manifest.getKind()); + assertThat(instance.getApiVersion()).isEqualTo(manifest.getApiVersion()); + } + + @Test + void getInstanceBadPodNameShouldReturnNull() { + KubernetesInstance instance = provider.getInstance(ACCOUNT, NAMESPACE, "badname"); + + assertThat(instance).isNull(); + } + + @Test + void getInstancePodNotFoundShouldReturnNull() { + when(cacheUtils.getSingleEntry(KIND.toString(), CACHE_KEY)).thenReturn(Optional.empty()); + + KubernetesInstance instance = provider.getInstance(ACCOUNT, NAMESPACE, POD_FULL_NAME); + + assertThat(instance).isNull(); + } + + @Test + void getConsoleOutputSuccess() { + KubernetesManifest manifest = getKubernetesManifest(); + when(credentials.get( + KubernetesCoordinates.builder() + .kind(KubernetesKind.POD) + .namespace(NAMESPACE) + .name(POD_NAME) + .build())) + .thenReturn(manifest); + when(credentials.logs(anyString(), anyString(), anyString())).thenReturn(LOG_OUTPUT); + + List logs = provider.getConsoleOutput(ACCOUNT, NAMESPACE, POD_FULL_NAME); + + assertThat(logs).isNotEmpty(); + assertThat(logs).hasSize(2); + assertThat(logs.get(0).getName()).isEqualTo(INIT_CONTAINER); + assertThat(logs.get(0).getOutput()).isEqualTo(LOG_OUTPUT); + assertThat(logs.get(1).getName()).isEqualTo(CONTAINER); + assertThat(logs.get(1).getOutput()).isEqualTo(LOG_OUTPUT); + } + + @Test + void getConsoleOutputNoInitContainer() { + V1Pod pod = getPod(); + pod.getSpec().setInitContainers(null); + KubernetesManifest manifest = json.deserialize(json.serialize(pod), KubernetesManifest.class); + + when(credentials.get( + KubernetesCoordinates.builder() + .kind(KubernetesKind.POD) + .namespace(NAMESPACE) + .name(POD_NAME) + .build())) + .thenReturn(manifest); + when(credentials.logs(anyString(), anyString(), anyString())).thenReturn(LOG_OUTPUT); + + List logs = provider.getConsoleOutput(ACCOUNT, NAMESPACE, POD_FULL_NAME); + + assertThat(logs).isNotEmpty(); + assertThat(logs).hasSize(1); + assertThat(logs.get(0).getName()).isEqualTo(CONTAINER); + assertThat(logs.get(0).getOutput()).isEqualTo(LOG_OUTPUT); + } + + @Test + void getConsoleOutputKubectlException() { + KubernetesManifest manifest = getKubernetesManifest(); + when(credentials.get( + KubernetesCoordinates.builder() + .kind(KubernetesKind.POD) + .namespace(NAMESPACE) + .name(POD_NAME) + .build())) + .thenReturn(manifest); + when(credentials.logs(anyString(), anyString(), anyString())) + .thenThrow(new KubectlJobExecutor.KubectlException(LOG_OUTPUT, null)); + + List logs = provider.getConsoleOutput(ACCOUNT, NAMESPACE, POD_FULL_NAME); + + assertThat(logs).isNotEmpty(); + assertThat(logs).hasSize(2); + assertThat(logs.get(0).getName()).isEqualTo(INIT_CONTAINER); + assertThat(logs.get(0).getOutput()).isEqualTo(LOG_OUTPUT); + assertThat(logs.get(1).getName()).isEqualTo(CONTAINER); + assertThat(logs.get(1).getOutput()).isEqualTo(LOG_OUTPUT); + } + + private V1Pod getPod() { + V1Pod pod = new V1Pod(); + pod.setApiVersion("v1"); + pod.setKind("Pod"); + V1PodSpec podSpec = new V1PodSpec(); + V1ObjectMeta metadata = new V1ObjectMeta(); + V1Container container = new V1Container(); + V1Container initContainer = new V1Container(); + + metadata.setName(POD_NAME); + metadata.setNamespace(NAMESPACE); + container.setName(CONTAINER); + initContainer.setName(INIT_CONTAINER); + pod.setMetadata(metadata); + pod.setSpec(podSpec); + podSpec.setContainers(Lists.newArrayList(container)); + podSpec.setInitContainers(Lists.newArrayList(initContainer)); + + return pod; + } + + private KubernetesManifest getKubernetesManifest() { + V1Pod pod = getPod(); + return json.deserialize(json.serialize(pod), KubernetesManifest.class); + } + + @Test + void getConsoleOutputAccountNotFoundShouldReturnNull() { + when(accountResolver.getCredentials(ACCOUNT)).thenReturn(Optional.empty()); + + List logs = provider.getConsoleOutput(ACCOUNT, NAMESPACE, POD_FULL_NAME); + + assertThat(logs).isNull(); + } + + @Test + void getConsoleOutputBadPodNameShouldReturnNull() { + List logs = provider.getConsoleOutput(ACCOUNT, NAMESPACE, "badname"); + + assertThat(logs).isNull(); + } + + @Test + void getConsoleOutputPodNotFoundShouldReturnErrorContainerLog() { + when(credentials.get( + KubernetesCoordinates.builder() + .kind(KubernetesKind.POD) + .namespace(NAMESPACE) + .name(POD_NAME) + .build())) + .thenReturn(null); + + List logs = provider.getConsoleOutput(ACCOUNT, NAMESPACE, POD_FULL_NAME); + + assertThat(logs).isNotEmpty(); + assertThat(logs).hasSize(1); + assertThat(logs.get(0).getName()).isEqualTo("Error"); + assertThat(logs.get(0).getOutput()) + .isEqualTo("Failed to retrieve pod data; pod may have been deleted."); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesRawResourceProviderTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesRawResourceProviderTest.java new file mode 100644 index 00000000000..e86b5122023 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesRawResourceProviderTest.java @@ -0,0 +1,220 @@ +/* + * Copyright 2020 Coveo, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys.LogicalKind.APPLICATIONS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.Keys; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesRawResource; +import com.netflix.spinnaker.clouddriver.kubernetes.config.RawResourcesEndpointConfig; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.moniker.Moniker; +import java.time.Instant; +import java.util.*; +import java.util.regex.Pattern; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +final class KubernetesRawResourceProviderTest { + + private KubernetesRawResourceProvider provider; + private KubernetesCredentials credentials; + private KubernetesAccountResolver accountResolver; + + private static final String APPLICATION = "application"; + private static final String ACCOUNT = "account"; + private static final String NAMESPACE = "namespace"; + + private static final String SECRET_NAME = "mysecret"; + private static final KubernetesKind SECRET_KIND = KubernetesKind.SECRET; + private static final String SECRET_FULL_NAME = SECRET_KIND + " " + SECRET_NAME; + + private static final String POD_NAME = "mypod"; + private static final KubernetesKind POD_KIND = KubernetesKind.POD; + private static final String POD_FULL_NAME = POD_KIND + " " + POD_NAME; + + @BeforeEach + public void setup() { + KubernetesCacheUtils cacheUtils = mock(KubernetesCacheUtils.class); + accountResolver = mock(KubernetesAccountResolver.class); + credentials = mock(KubernetesCredentials.class); + provider = new KubernetesRawResourceProvider(cacheUtils, accountResolver); + + ImmutableList.Builder cacheDataBuilder = ImmutableList.builder(); + cacheDataBuilder.add(getResourceCacheData(POD_NAME, POD_KIND)); + cacheDataBuilder.add(getResourceCacheData(SECRET_NAME, SECRET_KIND)); + + CacheData cacheData = mock(CacheData.class); + when(cacheUtils.getSingleEntry( + APPLICATIONS.toString(), Keys.ApplicationCacheKey.createKey(APPLICATION))) + .thenReturn(Optional.of(cacheData)); + when(cacheUtils.getAllRelationships(cacheData)).thenReturn(cacheDataBuilder.build()); + when(accountResolver.getCredentials(ACCOUNT)).thenReturn(Optional.of(credentials)); + } + + @Test + void getKubernetesResources() { + when(credentials.getRawResourcesEndpointConfig()).thenReturn(new RawResourcesEndpointConfig()); + when(credentials.getKinds()).thenReturn(ImmutableSet.of()); + when(credentials.getOmitKinds()).thenReturn(ImmutableSet.of()); + + Set rawResources = provider.getApplicationRawResources(APPLICATION); + + assertThat(rawResources.size()).isEqualTo(2); + assertThat(rawResources).anyMatch(item -> item.getName().equals(SECRET_FULL_NAME)); + assertThat(rawResources).anyMatch(item -> item.getName().equals(POD_FULL_NAME)); + } + + @Test + void getKubernetesResourcesWithSpecificKind() { + when(credentials.getRawResourcesEndpointConfig()).thenReturn(new RawResourcesEndpointConfig()); + when(credentials.getKinds()).thenReturn(ImmutableSet.of(POD_KIND)); + when(credentials.getOmitKinds()).thenReturn(ImmutableSet.of()); + + Set rawResources = provider.getApplicationRawResources(APPLICATION); + + assertThat(rawResources.size()).isEqualTo(1); + assertThat(rawResources.iterator().next().getName()).isEqualTo(POD_FULL_NAME); + } + + @Test + void getKubernetesResourcesOmitingAKind() { + when(credentials.getRawResourcesEndpointConfig()).thenReturn(new RawResourcesEndpointConfig()); + when(credentials.getKinds()).thenReturn(ImmutableSet.of()); + when(credentials.getOmitKinds()).thenReturn(ImmutableSet.of(POD_KIND)); + Set rawResources = provider.getApplicationRawResources(APPLICATION); + + assertThat(rawResources.size()).isEqualTo(1); + assertThat(rawResources.iterator().next().getName()).isEqualTo(SECRET_FULL_NAME); + } + + @Test + void getKubernetesResourcesFilteringARawResource() { + RawResourcesEndpointConfig epConfig = mock(RawResourcesEndpointConfig.class); + List omitKindExpressions = new ArrayList<>(); + omitKindExpressions.add("^" + POD_KIND.toString() + "$"); + List omitKindPatterns = new ArrayList<>(); + for (String exp : omitKindExpressions) { + omitKindPatterns.add(Pattern.compile(exp)); + } + when(epConfig.getOmitKindPatterns()).thenReturn(omitKindPatterns); + when(credentials.getRawResourcesEndpointConfig()).thenReturn(epConfig); + when(credentials.getKinds()).thenReturn(ImmutableSet.of()); + when(credentials.getOmitKinds()).thenReturn(ImmutableSet.of()); + + Set rawResources = provider.getApplicationRawResources(APPLICATION); + + assertThat(rawResources.size()).isEqualTo(1); + assertThat(rawResources.iterator().next().getName()).isEqualTo(SECRET_FULL_NAME); + } + + @Test + void getKubernetesResourcesIncludingThenFilteringAKind() { + RawResourcesEndpointConfig epConfig = mock(RawResourcesEndpointConfig.class); + List kindExpressions = new ArrayList<>(); + kindExpressions.add("^" + POD_KIND.toString() + "$"); + List kindPatterns = new ArrayList<>(); + for (String exp : kindExpressions) { + kindPatterns.add(Pattern.compile(exp)); + } + when(epConfig.getOmitKindPatterns()).thenReturn(kindPatterns); + when(credentials.getRawResourcesEndpointConfig()).thenReturn(epConfig); + when(credentials.getKinds()).thenReturn(ImmutableSet.of(POD_KIND, SECRET_KIND)); + when(credentials.getOmitKinds()).thenReturn(ImmutableSet.of()); + + Set rawResources = provider.getApplicationRawResources(APPLICATION); + + assertThat(rawResources.size()).isEqualTo(1); + } + + @Test + void getKubernetesResourcesFilteringAllResources() { + RawResourcesEndpointConfig epConfig = mock(RawResourcesEndpointConfig.class); + List kindExpressions = new ArrayList<>(); + kindExpressions.add(".*"); + List kindPatterns = new ArrayList<>(); + for (String exp : kindExpressions) { + kindPatterns.add(Pattern.compile(exp)); + } + when(epConfig.getOmitKindPatterns()).thenReturn(kindPatterns); + when(credentials.getRawResourcesEndpointConfig()).thenReturn(epConfig); + when(credentials.getKinds()).thenReturn(ImmutableSet.of()); + when(credentials.getOmitKinds()).thenReturn(ImmutableSet.of()); + + Set rawResources = provider.getApplicationRawResources(APPLICATION); + + assertThat(rawResources.size()).isEqualTo(0); + } + + @Test + void getKubernetesResourcesFilteringAllExceptOne() { + RawResourcesEndpointConfig epConfig = mock(RawResourcesEndpointConfig.class); + List kindExpressions = new ArrayList<>(); + kindExpressions.add("^" + POD_KIND.toString() + "$"); + List kindPatterns = new ArrayList<>(); + for (String exp : kindExpressions) { + kindPatterns.add(Pattern.compile(exp)); + } + List omitKindExpressions = new ArrayList<>(); + omitKindExpressions.add(".*"); + List omitKindPatterns = new ArrayList<>(); + for (String exp : omitKindExpressions) { + omitKindPatterns.add(Pattern.compile(exp)); + } + when(epConfig.getOmitKindPatterns()).thenReturn(omitKindPatterns); + when(epConfig.getKindPatterns()).thenReturn(kindPatterns); + when(credentials.getRawResourcesEndpointConfig()).thenReturn(epConfig); + when(credentials.getKinds()).thenReturn(ImmutableSet.of()); + when(credentials.getOmitKinds()).thenReturn(ImmutableSet.of()); + + Set rawResources = provider.getApplicationRawResources(APPLICATION); + + assertThat(rawResources.size()).isEqualTo(1); + assertThat(rawResources.iterator().next().getName()).isEqualTo(POD_FULL_NAME); + } + + private CacheData getResourceCacheData(String name, KubernetesKind kind) { + String cacheKey = Keys.InfrastructureCacheKey.createKey(kind, ACCOUNT, NAMESPACE, name); + CacheData cacheData = mock(CacheData.class); + when(cacheData.getId()).thenReturn(cacheKey); + + Map attributes = new HashMap<>(); + KubernetesManifest manifest = new KubernetesManifest(); + Map metadata = new HashMap<>(); + metadata.put("creationTimestamp", Instant.now().toString()); + manifest.put("metadata", metadata); + manifest.setApiVersion(KubernetesApiVersion.V1); + manifest.setKind(kind); + manifest.setName(name); + manifest.setNamespace(NAMESPACE); + attributes.put("manifest", manifest); + + attributes.put("moniker", Moniker.builder().app(APPLICATION).cluster(ACCOUNT).build()); + when(cacheData.getAttributes()).thenReturn(attributes); + return cacheData; + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSearchProviderTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSearchProviderTest.java new file mode 100644 index 00000000000..60948ceb245 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/KubernetesSearchProviderTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2021 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import java.util.List; +import org.junit.jupiter.api.Test; +import org.springframework.boot.context.annotation.UserConfigurations; +import org.springframework.boot.test.context.runner.ApplicationContextRunner; +import org.springframework.context.annotation.Bean; + +public class KubernetesSearchProviderTest { + private final ApplicationContextRunner runner = + new ApplicationContextRunner() + .withConfiguration( + UserConfigurations.of(KubernetesSearchProvider.class, TestConfiguration.class)); + + @Test + void testKubernetesSearchProviderBeanIsPresentByDefault() { + runner.run(ctx -> assertThat(ctx).hasSingleBean(KubernetesSearchProvider.class)); + } + + @Test + void testKubernetesSearchProviderBeanIsPresentWhenConfiguredInSuchAWay() { + runner + .withPropertyValues("kubernetes.search.enabled=true") + .run(ctx -> assertThat(ctx).hasSingleBean(KubernetesSearchProvider.class)); + } + + @Test + void testKubernetesSearchProviderBeanIsNotPresentWhenConfiguredInSuchAWay() { + runner + .withPropertyValues("kubernetes.search.enabled=false") + .run(ctx -> assertThat(ctx).doesNotHaveBean(KubernetesSearchProvider.class)); + } + + /** test class that supplies beans needed to autowire the KubernetesSearchProvider bean */ + static class TestConfiguration { + @Bean + ObjectMapper getObjectMapper() { + return new ObjectMapper(); + } + + @Bean + KubernetesCacheUtils getKubernetesCacheUtils() { + return mock(KubernetesCacheUtils.class); + } + + @Bean + KubernetesSpinnakerKindMap kubernetesSpinnakerKindMap() { + return new KubernetesSpinnakerKindMap(List.of()); + } + + @Bean + KubernetesAccountResolver kubernetesAccountResolver() { + return mock(KubernetesAccountResolver.class); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesCoordinatesTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesCoordinatesTest.java new file mode 100644 index 00000000000..fc20275da82 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesCoordinatesTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import java.util.stream.Stream; +import lombok.RequiredArgsConstructor; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +final class KubernetesCoordinatesTest { + @ParameterizedTest + @MethodSource("parseNameCases") + void parseName(ParseTestCase testCase) { + KubernetesCoordinates coordinates = + KubernetesCoordinates.builder().fullResourceName(testCase.fullResourceName).build(); + + assertThat(coordinates.getKind()).isEqualTo(testCase.expectedKind); + assertThat(coordinates.getName()).isEqualTo(testCase.expectedName); + } + + static Stream parseNameCases() { + return Stream.of( + new ParseTestCase("replicaSet abc", KubernetesKind.REPLICA_SET, "abc"), + new ParseTestCase("replicaSet abc", KubernetesKind.REPLICA_SET, "abc"), + new ParseTestCase("rs abc", KubernetesKind.REPLICA_SET, "abc"), + new ParseTestCase("service abc", KubernetesKind.SERVICE, "abc"), + new ParseTestCase("SERVICE abc", KubernetesKind.SERVICE, "abc"), + new ParseTestCase("ingress abc", KubernetesKind.INGRESS, "abc")); + } + + @RequiredArgsConstructor + private static class ParseTestCase { + final String fullResourceName; + final KubernetesKind expectedKind; + final String expectedName; + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPodMetricTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPodMetricTest.java new file mode 100644 index 00000000000..a1318972c35 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/KubernetesPodMetricTest.java @@ -0,0 +1,80 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.Resources; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric.ContainerMetric; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import org.assertj.core.api.AssertionsForClassTypes; +import org.junit.jupiter.api.Test; + +final class KubernetesPodMetricTest { + private static final ObjectMapper objectMapper = new ObjectMapper(); + + @Test + public void deserializeContainerMetric() throws IOException { + String json = + Resources.toString( + KubernetesPodMetricTest.class.getResource("pod-metric.json"), StandardCharsets.UTF_8); + + KubernetesPodMetric.ContainerMetric containerMetric = + objectMapper.readValue(json, KubernetesPodMetric.ContainerMetric.class); + assertThat(containerMetric.getContainerName()).isEqualTo("istio-proxy"); + assertThat(containerMetric.getMetrics()) + .containsOnly(entry("MEMORY(bytes)", "27Mi"), entry("CPU(cores)", "3m")); + } + + @Test + public void deserializeContainerMetricWithUnknownField() throws IOException { + String json = + Resources.toString( + KubernetesPodMetricTest.class.getResource("pod-metric-extra-property.json"), + StandardCharsets.UTF_8); + + KubernetesPodMetric.ContainerMetric containerMetric = + objectMapper.readValue(json, KubernetesPodMetric.ContainerMetric.class); + assertThat(containerMetric.getContainerName()).isEqualTo("istio-proxy"); + assertThat(containerMetric.getMetrics()) + .containsOnly(entry("MEMORY(bytes)", "27Mi"), entry("CPU(cores)", "3m")); + } + + @Test + public void serializeContainerMetric() throws IOException { + String expectedResult = + Resources.toString( + KubernetesPodMetricTest.class.getResource("pod-metric.json"), StandardCharsets.UTF_8); + + KubernetesPodMetric.ContainerMetric metric = + new ContainerMetric( + "istio-proxy", + ImmutableMap.of( + "MEMORY(bytes)", "27Mi", + "CPU(cores)", "3m")); + String result = objectMapper.writeValueAsString(metric); + + // Compare the parsed trees of the two results, which is agnostic to key order + AssertionsForClassTypes.assertThat(objectMapper.readTree(result)) + .isEqualTo(objectMapper.readTree(expectedResult)); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesEnableDisableManifestDescriptionTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesEnableDisableManifestDescriptionTest.java new file mode 100644 index 00000000000..c5b85927e4d --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesEnableDisableManifestDescriptionTest.java @@ -0,0 +1,101 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.junit.jupiter.api.Test; + +final class KubernetesEnableDisableManifestDescriptionTest { + private static final JsonNodeFactory jsonFactory = JsonNodeFactory.instance; + private static final ObjectMapper objectMapper = new ObjectMapper(); + private static final int DEFAULT_TARGET_PERCENTAGE = 100; + + @Test + void deserializeEmpty() throws Exception { + String serialized = jsonFactory.objectNode().toString(); + KubernetesEnableDisableManifestDescription description = + objectMapper.readValue(serialized, KubernetesEnableDisableManifestDescription.class); + assertThat(description.getLoadBalancers()).isNotNull(); + assertThat(description.getLoadBalancers()).isEmpty(); + assertThat(description.getTargetPercentage()).isEqualTo(DEFAULT_TARGET_PERCENTAGE); + } + + @Test + void deserializeNullLoadBalancers() throws Exception { + String serialized = + jsonFactory + .objectNode() + .set("loadBalancers", jsonFactory.nullNode()) + .toString(); + KubernetesEnableDisableManifestDescription description = + objectMapper.readValue(serialized, KubernetesEnableDisableManifestDescription.class); + assertThat(description.getLoadBalancers()).isNotNull(); + assertThat(description.getLoadBalancers()).isEmpty(); + } + + @Test + void deserializEmptyLoadBalancers() throws Exception { + String serialized = + jsonFactory + .objectNode() + .set("loadBalancers", jsonFactory.arrayNode()) + .toString(); + KubernetesEnableDisableManifestDescription description = + objectMapper.readValue(serialized, KubernetesEnableDisableManifestDescription.class); + assertThat(description.getLoadBalancers()).isNotNull(); + assertThat(description.getLoadBalancers()).isEmpty(); + } + + @Test + void deserializNonEmptyLoadBalancers() throws Exception { + String serialized = + jsonFactory + .objectNode() + .set("loadBalancers", jsonFactory.arrayNode().add("abc").add("def")) + .toString(); + KubernetesEnableDisableManifestDescription description = + objectMapper.readValue(serialized, KubernetesEnableDisableManifestDescription.class); + assertThat(description.getLoadBalancers()).isNotNull(); + assertThat(description.getLoadBalancers()).containsExactly("abc", "def"); + } + + @Test + void deserializeTargetPercentage() throws Exception { + String serialized = jsonFactory.objectNode().put("targetPercentage", 50).toString(); + KubernetesEnableDisableManifestDescription description = + objectMapper.readValue(serialized, KubernetesEnableDisableManifestDescription.class); + assertThat(description.getTargetPercentage()).isEqualTo(50); + } + + @Test + void deserializeManifestNameLocation() throws Exception { + String serialized = + jsonFactory + .objectNode() + .put("manifestName", "replicaSet my-rs") + .put("location", "my-namespace") + .toString(); + KubernetesEnableDisableManifestDescription description = + objectMapper.readValue(serialized, KubernetesEnableDisableManifestDescription.class); + assertThat(description.getManifestName()).isEqualTo("replicaSet my-rs"); + assertThat(description.getLocation()).isEqualTo("my-namespace"); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestAnnotatorTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestAnnotatorTest.java new file mode 100644 index 00000000000..f0f3904109c --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestAnnotatorTest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2022 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import com.netflix.spinnaker.moniker.Moniker; +import java.util.HashMap; +import org.junit.jupiter.api.Test; + +public class KubernetesManifestAnnotatorTest { + @Test + public void testDeriveMonikerForAnUnversionedManifest() { + // when: + Moniker moniker = + KubernetesManifestAnnotater.getMoniker(manifest("testapp-abc", KubernetesKind.DEPLOYMENT)); + + // then: + assertThat(moniker.getApp()).isEqualTo("testapp"); + assertThat(moniker.getCluster()).isEqualTo("testapp-abc"); + assertThat(moniker.getStack()).isNull(); + assertThat(moniker.getDetail()).isNull(); + assertThat(moniker.getSequence()).isNull(); + } + + @Test + public void testDeriveMonikerForAVersionedManifestName() { + // when: + Moniker moniker = + KubernetesManifestAnnotater.getMoniker( + manifest("testapp-abc-v003", KubernetesKind.DEPLOYMENT)); + + // then: + assertThat(moniker.getApp()).isEqualTo("testapp"); + assertThat(moniker.getCluster()).isEqualTo("testapp-abc"); + assertThat(moniker.getStack()).isNull(); + assertThat(moniker.getDetail()).isNull(); + assertThat(moniker.getSequence()).isEqualTo(3); + } + + @Test + public void testDeriveMonikerForAnUnversionedKubernetesSystemResource() { + // when: + Moniker moniker = + KubernetesManifestAnnotater.getMoniker( + manifest("system:coredns", KubernetesKind.DEPLOYMENT)); + + // then: + assertThat(moniker).isNotNull(); + assertThat(moniker.getApp()).isEqualTo("system"); + assertThat(moniker.getCluster()).isEqualTo("system:coredns"); + assertThat(moniker.getStack()).isNull(); + assertThat(moniker.getDetail()).isNull(); + assertThat(moniker.getSequence()).isNull(); + } + + @Test + public void testDeriveMonikerForAnUnversionedKubernetesSystemResourceWithMultipleColons() { + // when: + Moniker moniker = + KubernetesManifestAnnotater.getMoniker( + manifest( + "system:certificates.k8s.io:certificatesigningrequests:nodeclient", + KubernetesKind.CLUSTER_ROLE)); + + // then: + assertThat(moniker).isNotNull(); + assertThat(moniker.getApp()).isEqualTo("system"); + assertThat(moniker.getCluster()) + .isEqualTo("system:certificates.k8s.io:certificatesigningrequests:nodeclient"); + assertThat(moniker.getStack()).isNull(); + assertThat(moniker.getDetail()).isNull(); + assertThat(moniker.getSequence()).isNull(); + } + + @Test + public void testDeriveMonikerForAVersionedKubernetesSystemResource() { + // when: + Moniker moniker = + KubernetesManifestAnnotater.getMoniker( + manifest( + "system:certificates.k8s.io:certificatesigningrequests:nodeclient-v003", + KubernetesKind.CLUSTER_ROLE)); + + // then: + assertThat(moniker).isNotNull(); + assertThat(moniker.getApp()).isEqualTo("system"); + assertThat(moniker.getCluster()) + .isEqualTo("system:certificates.k8s.io:certificatesigningrequests:nodeclient"); + assertThat(moniker.getStack()).isNull(); + assertThat(moniker.getDetail()).isNull(); + assertThat(moniker.getSequence()).isEqualTo(3); + } + + /** A test manifest */ + private static KubernetesManifest manifest(String deploymentName, KubernetesKind kind) { + KubernetesManifest deployment = new KubernetesManifest(); + deployment.put("metadata", new HashMap<>()); + deployment.setNamespace("namespace"); + deployment.setKind(kind); + deployment.setApiVersion(KubernetesApiVersion.APPS_V1); + deployment.setName(deploymentName); + return deployment; + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestOwnerRefTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestOwnerRefTest.java new file mode 100644 index 00000000000..03e0e16dcd5 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestOwnerRefTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +public class KubernetesManifestOwnerRefTest { + + @ParameterizedTest + @CsvSource( + value = { + "{\"kind\":\"Pod\"}|pod", + "{\"kind\":\"Deployment\",\"apiVersion\":\"apps/v1\"}|deployment", + "{\"kind\":\"Custom\",\"apiVersion\":\"mygroup/v1\"}|Custom.mygroup", + }, + delimiter = '|') + public void testOwnerRef(String referenceAsJson, String computedKind) + throws JsonProcessingException { + ObjectMapper objectMapper = new ObjectMapper(); + KubernetesManifest.OwnerReference ref = + objectMapper.readValue(referenceAsJson, KubernetesManifest.OwnerReference.class); + assertThat(ref).isNotNull(); + assertThat(ref.computedKind().toString()).isEqualTo(computedKind); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestReplicasTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestReplicasTest.java new file mode 100644 index 00000000000..149a21898df --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestReplicasTest.java @@ -0,0 +1,102 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.junit.jupiter.api.Test; + +public class KubernetesManifestReplicasTest { + private static final JsonNodeFactory jsonFactory = JsonNodeFactory.instance; + private static final ObjectMapper objectMapper = new ObjectMapper(); + + @Test + public void replicasValueShouldBeNullWithoutSpecDefined() throws JsonProcessingException { + String serialized = jsonFactory.objectNode().toString(); + KubernetesManifest manifest = objectMapper.readValue(serialized, KubernetesManifest.class); + assertThat(manifest.getReplicas()).isNull(); + } + + @Test + public void replicasValueShouldBeNullWithoutReplicasDefined() throws JsonProcessingException { + String serialized = + jsonFactory.objectNode().set("spec", jsonFactory.objectNode()).toString(); + KubernetesManifest manifest = objectMapper.readValue(serialized, KubernetesManifest.class); + assertThat(manifest.getReplicas()).isNull(); + } + + @Test + public void shouldGetReplicasDoubleValue() throws JsonProcessingException { + String serialized = + jsonFactory + .objectNode() + .set("spec", jsonFactory.objectNode().put("replicas", 2.0)) + .toString(); + KubernetesManifest manifest = objectMapper.readValue(serialized, KubernetesManifest.class); + assertThat(manifest.getReplicas()).isEqualTo(2); + } + + @Test + public void shouldGetReplicasIntegerValue() throws JsonProcessingException { + String serialized = + jsonFactory + .objectNode() + .set("spec", jsonFactory.objectNode().put("replicas", 2)) + .toString(); + KubernetesManifest manifest = objectMapper.readValue(serialized, KubernetesManifest.class); + assertThat(manifest.getReplicas()).isEqualTo(2); + } + + @Test + public void replicasValueShouldRemainUnsetWithoutSpecDefined() throws JsonProcessingException { + String serialized = jsonFactory.objectNode().toString(); + KubernetesManifest manifest = objectMapper.readValue(serialized, KubernetesManifest.class); + + manifest.setReplicas(2.0); + + assertThat(manifest.getReplicas()).isEqualTo(null); + } + + @Test + public void replicasValueShouldBeSetWithReplicasDefined() throws JsonProcessingException { + String serialized = + jsonFactory + .objectNode() + .set("spec", jsonFactory.objectNode().put("replicas", 1)) + .toString(); + KubernetesManifest manifest = objectMapper.readValue(serialized, KubernetesManifest.class); + + manifest.setReplicas(2.0); + + assertThat(manifest.getReplicas()).isEqualTo(2); + } + + @Test + public void replicasValueShouldBeSetWithReplicasUnDefined() throws JsonProcessingException { + String serialized = + jsonFactory.objectNode().set("spec", jsonFactory.objectNode()).toString(); + KubernetesManifest manifest = objectMapper.readValue(serialized, KubernetesManifest.class); + + manifest.setReplicas(2.0); + + assertThat(manifest.getReplicas()).isEqualTo(2); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestStrategyTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestStrategyTest.java new file mode 100644 index 00000000000..d4b89a8b946 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestStrategyTest.java @@ -0,0 +1,406 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy.DeployStrategy; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy.ServerSideApplyStrategy; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy.Versioned; +import java.util.HashMap; +import java.util.Map; +import java.util.OptionalInt; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +final class KubernetesManifestStrategyTest { + @Test + void deployStrategyDefaultsToApply() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations(ImmutableMap.of()); + assertThat(strategy).isEqualTo(DeployStrategy.APPLY); + } + + @Test + void otherStrategiesFalse() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of( + "strategy.spinnaker.io/recreate", "false", + "strategy.spinnaker.io/replace", "false")); + assertThat(strategy).isEqualTo(DeployStrategy.APPLY); + } + + @Test + void recreateStrategy() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/recreate", "true")); + assertThat(strategy).isEqualTo(DeployStrategy.RECREATE); + } + + @Test + void replaceStrategy() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/replace", "true")); + assertThat(strategy).isEqualTo(DeployStrategy.REPLACE); + } + + @Test + void deployStrategysSrverSideApplyForce() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "force-conflicts")); + assertThat(strategy).isEqualTo(DeployStrategy.SERVER_SIDE_APPLY); + } + + @Test + void deployStrategyServerSideApplyDefault() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "true")); + assertThat(strategy).isEqualTo(DeployStrategy.SERVER_SIDE_APPLY); + } + + @Test + void deployStrategyServerSideApplyDisabled() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "false")); + assertThat(strategy).isEqualTo(DeployStrategy.APPLY); + } + + @Test + void serverSideApplyStrategyForceConflict() { + KubernetesManifestStrategy.ServerSideApplyStrategy conflictResolution = + KubernetesManifestStrategy.ServerSideApplyStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "force-conflicts")); + assertThat(conflictResolution) + .isEqualTo(KubernetesManifestStrategy.ServerSideApplyStrategy.FORCE_CONFLICTS); + } + + @Test + void serverSideApplyStrategyDefault() { + KubernetesManifestStrategy.ServerSideApplyStrategy conflictResolution = + KubernetesManifestStrategy.ServerSideApplyStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "true")); + assertThat(conflictResolution).isEqualTo(ServerSideApplyStrategy.DEFAULT); + } + + @Test + void serverSideApplyStrategyDisabled() { + KubernetesManifestStrategy.ServerSideApplyStrategy conflictResolution = + KubernetesManifestStrategy.ServerSideApplyStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "false")); + assertThat(conflictResolution).isEqualTo(ServerSideApplyStrategy.DISABLED); + } + + @Test + void serverSideApplyStrategyInvalidValue() { + KubernetesManifestStrategy.ServerSideApplyStrategy conflictResolution = + KubernetesManifestStrategy.ServerSideApplyStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/server-side-apply", "zzzz")); + assertThat(conflictResolution).isEqualTo(ServerSideApplyStrategy.DISABLED); + } + + @Test + void nonBooleanValue() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/replace", "zzzz")); + assertThat(strategy).isEqualTo(DeployStrategy.APPLY); + } + + @Test + void recreatePreferredOverReplace() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of( + "strategy.spinnaker.io/replace", "true", + "strategy.spinnaker.io/recreate", "true")); + assertThat(strategy).isEqualTo(DeployStrategy.RECREATE); + } + + @Test + void replacePreferredOverServerSideApply() { + KubernetesManifestStrategy.DeployStrategy strategy = + KubernetesManifestStrategy.DeployStrategy.fromAnnotations( + ImmutableMap.of( + "strategy.spinnaker.io/replace", "true", + "strategy.spinnaker.io/server-side-apply", "true")); + assertThat(strategy).isEqualTo(DeployStrategy.REPLACE); + } + + @Test + void applyToAnnotations() { + Map annotations = DeployStrategy.APPLY.toAnnotations(); + assertThat(annotations).isEmpty(); + } + + @Test + void recreateToAnnotations() { + Map annotations = DeployStrategy.RECREATE.toAnnotations(); + assertThat(annotations).containsOnly(entry("strategy.spinnaker.io/recreate", "true")); + } + + @Test + void replaceToAnnotations() { + Map annotations = DeployStrategy.REPLACE.toAnnotations(); + assertThat(annotations).containsOnly(entry("strategy.spinnaker.io/replace", "true")); + } + + @Test + void versionedDefaultsToDefault() { + KubernetesManifestStrategy.Versioned versioned = + KubernetesManifestStrategy.Versioned.fromAnnotations(ImmutableMap.of()); + assertThat(versioned).isEqualTo(Versioned.DEFAULT); + } + + @Test + void versionedTrue() { + KubernetesManifestStrategy.Versioned versioned = + KubernetesManifestStrategy.Versioned.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/versioned", "true")); + assertThat(versioned).isEqualTo(Versioned.TRUE); + } + + @Test + void versionedFalse() { + KubernetesManifestStrategy.Versioned versioned = + KubernetesManifestStrategy.Versioned.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/versioned", "false")); + assertThat(versioned).isEqualTo(Versioned.FALSE); + } + + @Test + void versionedNonsense() { + KubernetesManifestStrategy.Versioned versioned = + KubernetesManifestStrategy.Versioned.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/versioned", "zzz")); + assertThat(versioned).isEqualTo(Versioned.FALSE); + } + + @Test + void versionedDefaultToAnnotations() { + Map annotations = Versioned.DEFAULT.toAnnotations(); + assertThat(annotations).isEmpty(); + } + + @Test + void versionedTrueToAnnotations() { + Map annotations = Versioned.TRUE.toAnnotations(); + assertThat(annotations).containsOnly(entry("strategy.spinnaker.io/versioned", "true")); + } + + @Test + void versionedFalseToAnnotations() { + Map annotations = Versioned.FALSE.toAnnotations(); + assertThat(annotations).containsOnly(entry("strategy.spinnaker.io/versioned", "false")); + } + + @Test + void fromEmptyAnnotations() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations(ImmutableMap.of()); + assertThat(strategy.getDeployStrategy()).isEqualTo(DeployStrategy.APPLY); + assertThat(strategy.getVersioned()).isEqualTo(Versioned.DEFAULT); + assertThat(strategy.getMaxVersionHistory()).isEqualTo(OptionalInt.empty()); + assertThat(strategy.isUseSourceCapacity()).isFalse(); + } + + @Test + void fromDeployStrategyAnnotation() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/replace", "true")); + assertThat(strategy.getDeployStrategy()).isEqualTo(DeployStrategy.REPLACE); + } + + @Test + void fromVersionedAnnotation() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/versioned", "true")); + assertThat(strategy.getVersioned()).isEqualTo(Versioned.TRUE); + } + + @Test + void fromMaxVersionHistoryAnnotation() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/max-version-history", "10")); + assertThat(strategy.getMaxVersionHistory()).isEqualTo(OptionalInt.of(10)); + } + + @Test + void fromNonIntegerMaxVersionHistoryAnnotation() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/max-version-history", "zz")); + assertThat(strategy.getMaxVersionHistory()).isEqualTo(OptionalInt.empty()); + } + + @Test + void fromUseSourceCapacityAnnotation() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/use-source-capacity", "true")); + assertThat(strategy.isUseSourceCapacity()).isTrue(); + } + + @Test + void fromUseSourceCapacityAnnotationFalse() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/use-source-capacity", "false")); + assertThat(strategy.isUseSourceCapacity()).isFalse(); + } + + @Test + void fromUseSourceCapacityAnnotationNonsense() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations( + ImmutableMap.of("strategy.spinnaker.io/use-source-capacity", "zzz")); + assertThat(strategy.isUseSourceCapacity()).isFalse(); + } + + @Test + void allAnnotationsPresent() { + KubernetesManifestStrategy strategy = + KubernetesManifestStrategy.fromAnnotations( + ImmutableMap.of( + "strategy.spinnaker.io/replace", "true", + "strategy.spinnaker.io/versioned", "true", + "strategy.spinnaker.io/max-version-history", "20", + "strategy.spinnaker.io/use-source-capacity", "true", + "strategy.spinnaker.io/random-annotation", "abc")); + + assertThat(strategy.getDeployStrategy()).isEqualTo(DeployStrategy.REPLACE); + assertThat(strategy.getVersioned()).isEqualTo(Versioned.TRUE); + assertThat(strategy.getMaxVersionHistory()).isEqualTo(OptionalInt.of(20)); + assertThat(strategy.isUseSourceCapacity()).isTrue(); + } + + @Test + void builderDefaults() { + KubernetesManifestStrategy strategy = KubernetesManifestStrategy.builder().build(); + assertThat(strategy.getDeployStrategy()).isEqualTo(DeployStrategy.APPLY); + assertThat(strategy.getVersioned()).isEqualTo(Versioned.DEFAULT); + assertThat(strategy.getMaxVersionHistory()).isEqualTo(OptionalInt.empty()); + assertThat(strategy.isUseSourceCapacity()).isFalse(); + } + + @Test + void emptyAnnotations() { + Map annotations = KubernetesManifestStrategy.builder().build().toAnnotations(); + assertThat(annotations).isEmpty(); + } + + @Test + void deployStrategyRecreateToAnnotations() { + Map annotations = + KubernetesManifestStrategy.builder() + .deployStrategy(DeployStrategy.RECREATE) + .build() + .toAnnotations(); + assertThat(annotations).containsOnly(entry("strategy.spinnaker.io/recreate", "true")); + } + + @Test + void deployStrategyReplaceToAnnotations() { + Map annotations = + KubernetesManifestStrategy.builder() + .deployStrategy(DeployStrategy.REPLACE) + .build() + .toAnnotations(); + assertThat(annotations).containsOnly(entry("strategy.spinnaker.io/replace", "true")); + } + + @Test + void versionedToAnnotations() { + Map annotations = + KubernetesManifestStrategy.builder().versioned(Versioned.FALSE).build().toAnnotations(); + assertThat(annotations).containsOnly(entry("strategy.spinnaker.io/versioned", "false")); + } + + @Test + void maxVersionHistoryToAnnotations() { + Map annotations = + KubernetesManifestStrategy.builder().maxVersionHistory(10).build().toAnnotations(); + assertThat(annotations).containsOnly(entry("strategy.spinnaker.io/max-version-history", "10")); + } + + @Test + void useSourceCapacityToAnnotations() { + Map annotations = + KubernetesManifestStrategy.builder().useSourceCapacity(true).build().toAnnotations(); + assertThat(annotations) + .containsOnly(entry("strategy.spinnaker.io/use-source-capacity", "true")); + } + + @ParameterizedTest + @EnumSource(DeployStrategy.class) + void deploymentStrategySetsAnnotations(DeployStrategy deployStrategy) { + Map annotations = new HashMap<>(); + deployStrategy.setAnnotations(annotations); + assertThat(annotations).isEqualTo(deployStrategy.toAnnotations()); + } + + @ParameterizedTest + @EnumSource(DeployStrategy.class) + void deploymentStrategyOverwritesAnnotations(DeployStrategy deployStrategy) { + Map annotations = new HashMap<>(DeployStrategy.RECREATE.toAnnotations()); + deployStrategy.setAnnotations(annotations); + assertThat(annotations).isEqualTo(deployStrategy.toAnnotations()); + } + + @ParameterizedTest + @EnumSource(DeployStrategy.class) + void deploymentStrategyIgnoresIrrelevantAnnotations(DeployStrategy deployStrategy) { + ImmutableMap irrelevantAnnotations = + ImmutableMap.of( + "strategy.spinnaker.io/versioned", "false", + "artifact.spinnaker.io/version", "v001", + "my-custom-annotation", "my-custom-value"); + Map annotations = new HashMap<>(irrelevantAnnotations); + deployStrategy.setAnnotations(annotations); + assertThat(annotations).containsAllEntriesOf(irrelevantAnnotations); + } + + @Test + void toAnnotationsMultipleAnnotations() { + Map annotations = + KubernetesManifestStrategy.builder() + .deployStrategy(DeployStrategy.RECREATE) + .versioned(Versioned.TRUE) + .maxVersionHistory(30) + .useSourceCapacity(true) + .build() + .toAnnotations(); + assertThat(annotations) + .containsOnly( + entry("strategy.spinnaker.io/recreate", "true"), + entry("strategy.spinnaker.io/versioned", "true"), + entry("strategy.spinnaker.io/max-version-history", "30"), + entry("strategy.spinnaker.io/use-source-capacity", "true")); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTest.java new file mode 100644 index 00000000000..9e89b5efd8c --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertNull; + +import java.util.HashMap; +import org.junit.jupiter.api.Test; + +final class KubernetesManifestTest { + + private static final String GENERATE_NAME = "my-generate-name"; + + @Test + void fullResourceNameConsidersGenerateName() { + KubernetesManifest manifest = new KubernetesManifest(); + + // Job is an arbitrary choice since kubernetes supports generateName in + // other resources. But it's often used with jobs so it's possible to + // run the same job multiple times. + manifest.setKind(KubernetesKind.JOB); + + manifest.put("metadata", new HashMap<>()); + manifest.setGenerateName(GENERATE_NAME); + + // To be explicit, make sure the name is null + assertNull(manifest.getName()); + + assertThat(manifest.getFullResourceName()).isEqualTo("job " + GENERATE_NAME); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTrafficTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTrafficTest.java new file mode 100644 index 00000000000..7acf5890e8b --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesManifestTrafficTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.Test; + +final class KubernetesManifestTrafficTest { + @Test + final void createNullTraffic() { + KubernetesManifestTraffic traffic = new KubernetesManifestTraffic(null); + assertThat(traffic.getLoadBalancers()).isNotNull(); + assertThat(traffic.getLoadBalancers()).isEmpty(); + } + + @Test + final void createEmptyTraffic() { + KubernetesManifestTraffic traffic = new KubernetesManifestTraffic(ImmutableList.of()); + assertThat(traffic.getLoadBalancers()).isEmpty(); + } + + @Test + final void createNonEmptyTraffic() { + KubernetesManifestTraffic traffic = + new KubernetesManifestTraffic(ImmutableList.of("abc", "def")); + assertThat(traffic.getLoadBalancers()).containsExactly("abc", "def"); + } + + @Test + final void listIsImmutable() { + List loadBalancers = new ArrayList<>(); + loadBalancers.add("abc"); + KubernetesManifestTraffic traffic = new KubernetesManifestTraffic(loadBalancers); + assertThat(traffic.getLoadBalancers()).containsExactly("abc"); + + loadBalancers.add("def"); + assertThat(traffic.getLoadBalancers()).containsExactly("abc"); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesSourceCapacityTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesSourceCapacityTest.java new file mode 100644 index 00000000000..262f1ae9b01 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/KubernetesSourceCapacityTest.java @@ -0,0 +1,190 @@ +/* + * Copyright 2023 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.description.manifest; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import java.util.HashMap; +import java.util.OptionalInt; +import org.assertj.core.api.Assertions; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +public class KubernetesSourceCapacityTest { + + public static final String MANIFEST_NAME = "my-manifest"; + public static final String NAMESPACE = "my-namespace"; + + @Test + public void testInitialSourceCapacityNonVersioned() { + // given: + OptionalInt currentVersion = OptionalInt.empty(); // non-versioned manifest + KubernetesManifest manifest = getKubernetesManifest(KubernetesKind.REPLICA_SET); // any manifest + + // no previous manifest is found + KubernetesCredentials credentials = Mockito.mock(KubernetesCredentials.class); + Mockito.doReturn(null) + .when(credentials) + .get(matchCoords(KubernetesKind.REPLICA_SET, MANIFEST_NAME)); + + // when: + Integer ret = KubernetesSourceCapacity.getSourceCapacity(manifest, credentials, currentVersion); + + // then: + Assertions.assertThat(ret).isNull(); + Mockito.verify(credentials, Mockito.only()) + .get(matchCoords(KubernetesKind.REPLICA_SET, MANIFEST_NAME)); + } + + @Test + public void testInitialSourceCapacityVersioned() { + // given: + String manifestName = MANIFEST_NAME + "-v000"; + OptionalInt currentVersion = OptionalInt.of(0); // versioned manifest + KubernetesManifest manifest = getKubernetesManifest(KubernetesKind.REPLICA_SET); // any manifest + + // no previous manifest is found + KubernetesCredentials credentials = Mockito.mock(KubernetesCredentials.class); + Mockito.doReturn(null) + .when(credentials) + .get(matchCoords(KubernetesKind.REPLICA_SET, manifestName)); + + // when: + Integer ret = KubernetesSourceCapacity.getSourceCapacity(manifest, credentials, currentVersion); + + // then: + Assertions.assertThat(ret).isNull(); + Mockito.verify(credentials, Mockito.only()) + .get(matchCoords(KubernetesKind.REPLICA_SET, manifestName)); + } + + @Test + public void testSubsequentSourceCapacityNonVersioned() { + // given: + int previousCapacity = 5; + String previousManifestName = MANIFEST_NAME; + OptionalInt currentVersion = OptionalInt.empty(); // non-versioned + KubernetesManifest manifest = getKubernetesManifest(KubernetesKind.REPLICA_SET); // any manifest + KubernetesManifest previousManifest = manifest.clone(); + previousManifest.setReplicas(5); + + // previous manifest is found + KubernetesCredentials credentials = Mockito.mock(KubernetesCredentials.class); + Mockito.doReturn(previousManifest) + .when(credentials) + .get(matchCoords(KubernetesKind.REPLICA_SET, previousManifestName)); + + // when: + Integer ret = KubernetesSourceCapacity.getSourceCapacity(manifest, credentials, currentVersion); + + // then: + Assertions.assertThat(ret).isEqualTo(previousCapacity); + Mockito.verify(credentials, Mockito.only()) + .get(matchCoords(KubernetesKind.REPLICA_SET, previousManifestName)); + } + + @Test + public void testSubsequentSourceCapacityVersioned() { + // given: + int previousCapacity = 5; + int previousVersion = 2; + String previousManifestName = MANIFEST_NAME + "-v002"; + OptionalInt currentVersion = OptionalInt.of(previousVersion); // versioned manifest + KubernetesManifest manifest = getKubernetesManifest(KubernetesKind.REPLICA_SET); // any manifest + KubernetesManifest previousManifest = manifest.clone(); + previousManifest.setReplicas(5); + + // previous manifest is found + KubernetesCredentials credentials = Mockito.mock(KubernetesCredentials.class); + Mockito.doReturn(previousManifest) + .when(credentials) + .get(matchCoords(KubernetesKind.REPLICA_SET, previousManifestName)); + + // when: + Integer ret = KubernetesSourceCapacity.getSourceCapacity(manifest, credentials, currentVersion); + + // then: + Assertions.assertThat(ret).isEqualTo(previousCapacity); + Mockito.verify(credentials, Mockito.only()) + .get(matchCoords(KubernetesKind.REPLICA_SET, previousManifestName)); + } + + @Test + public void testVersionedNotFound() { + // given: + int previousVersion = 2; + String previousManifestName = MANIFEST_NAME + "-v002"; + OptionalInt currentVersion = OptionalInt.of(previousVersion); // versioned manifest + KubernetesManifest manifest = getKubernetesManifest(KubernetesKind.REPLICA_SET); // any manifest + + // previous manifest is found + KubernetesCredentials credentials = Mockito.mock(KubernetesCredentials.class); + Mockito.doReturn(null) + .when(credentials) + .get(matchCoords(KubernetesKind.REPLICA_SET, previousManifestName)); + + // when: + Integer ret = KubernetesSourceCapacity.getSourceCapacity(manifest, credentials, currentVersion); + + // then: + Assertions.assertThat(ret).isNull(); + Mockito.verify(credentials, Mockito.only()) + .get(matchCoords(KubernetesKind.REPLICA_SET, previousManifestName)); + } + + @Test + public void testNonVersionedNotFound() { + // given: + String previousManifestName = MANIFEST_NAME; + OptionalInt currentVersion = OptionalInt.empty(); // versioned manifest + KubernetesManifest manifest = getKubernetesManifest(KubernetesKind.REPLICA_SET); // any manifest + + // previous manifest is found + KubernetesCredentials credentials = Mockito.mock(KubernetesCredentials.class); + Mockito.doReturn(null) + .when(credentials) + .get(matchCoords(KubernetesKind.REPLICA_SET, previousManifestName)); + + // when: + Integer ret = KubernetesSourceCapacity.getSourceCapacity(manifest, credentials, currentVersion); + + // then: + Assertions.assertThat(ret).isNull(); + Mockito.verify(credentials, Mockito.only()) + .get(matchCoords(KubernetesKind.REPLICA_SET, previousManifestName)); + } + + @NotNull + private static KubernetesManifest getKubernetesManifest(KubernetesKind kind) { + KubernetesManifest manifest = new KubernetesManifest(); // any manifest + manifest.put("metadata", new HashMap()); + manifest.put("spec", new HashMap()); + manifest.setKind(kind); + manifest.setName(MANIFEST_NAME); + manifest.setNamespace(NAMESPACE); + return manifest; + } + + private static KubernetesCoordinates matchCoords(KubernetesKind kind, String manifestName) { + return Mockito.argThat( + a -> + a.getKind().equals(kind) + && a.getName().equals(manifestName) + && a.getNamespace().equals(NAMESPACE)); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicatorTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicatorTest.java new file mode 100644 index 00000000000..f62667790b5 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/health/KubernetesHealthIndicatorTest.java @@ -0,0 +1,180 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.health; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.credentials.MapBackedCredentialsRepository; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.boot.actuate.health.Health; +import org.springframework.boot.actuate.health.Status; + +@ExtendWith(MockitoExtension.class) +final class KubernetesHealthIndicatorTest { + private static final String ERROR_MESSAGE = "Failed to get namespaces"; + private static final Registry REGISTRY = new NoopRegistry(); + private static final String HEALTHY_ACCOUNT_NAME = "healthy"; + private static final String UNHEALTHY_ACCOUNT_NAME_FIRST = "unhealthy1"; + private static final String UNHEALTHY_ACCOUNT_NAME_SECOND = "unhealthy2"; + + private KubernetesNamedAccountCredentials healthyNamedCredentials; + private KubernetesNamedAccountCredentials unhealthyNamedCredentialsFirst; + private KubernetesNamedAccountCredentials unhealthyNamedCredentialsSecond; + private KubernetesConfigurationProperties kubernetesConfigurationProperties; + + @Mock private KubernetesCredentials.Factory healthyCredentialsFactory; + @Mock private KubernetesCredentials.Factory unhealthyCredentialsFactory; + @Mock private KubernetesCredentials healthyCredentials; + @Mock private KubernetesCredentials unhealthyCredentials; + + @BeforeEach + void setup() { + when(healthyCredentialsFactory.build(any())).thenReturn(healthyCredentials); + when(unhealthyCredentialsFactory.build(any())).thenReturn(unhealthyCredentials); + lenient() + .when(unhealthyCredentials.getDeclaredNamespaces()) + .thenThrow(new RuntimeException(ERROR_MESSAGE)); + + healthyNamedCredentials = + new KubernetesNamedAccountCredentials( + getManagedAccount(HEALTHY_ACCOUNT_NAME), healthyCredentialsFactory); + unhealthyNamedCredentialsFirst = + new KubernetesNamedAccountCredentials( + getManagedAccount(UNHEALTHY_ACCOUNT_NAME_FIRST), unhealthyCredentialsFactory); + unhealthyNamedCredentialsSecond = + new KubernetesNamedAccountCredentials( + getManagedAccount(UNHEALTHY_ACCOUNT_NAME_SECOND), unhealthyCredentialsFactory); + + kubernetesConfigurationProperties = new KubernetesConfigurationProperties(); + } + + @Test + void healthyWithNoAccounts() { + CredentialsRepository repository = + stubCredentialsRepository(ImmutableList.of()); + + KubernetesHealthIndicator healthIndicator = + new KubernetesHealthIndicator(REGISTRY, repository, kubernetesConfigurationProperties); + + healthIndicator.checkHealth(); + Health result = healthIndicator.getHealth(true); + + assertThat(result.getStatus()).isEqualTo(Status.UP); + assertThat(result.getDetails()).isEmpty(); + } + + @Test + void healthyWithOnlyHealthyAccounts() { + CredentialsRepository repository = + stubCredentialsRepository(ImmutableList.of(healthyNamedCredentials)); + + KubernetesHealthIndicator healthIndicator = + new KubernetesHealthIndicator(REGISTRY, repository, kubernetesConfigurationProperties); + + healthIndicator.checkHealth(); + Health result = healthIndicator.getHealth(true); + + assertThat(result.getStatus()).isEqualTo(Status.UP); + assertThat(result.getDetails()).isEmpty(); + } + + @DisplayName( + "parameterized test to see how errors are reported based on the verifyAccountHealth flag") + @ParameterizedTest(name = "{index} => verifyAccountHealth = {0}") + @ValueSource(booleans = {true, false}) + void reportsErrorForUnhealthyAccount(boolean verifyAccountHealth) { + CredentialsRepository repository = + stubCredentialsRepository(ImmutableList.of(unhealthyNamedCredentialsFirst)); + kubernetesConfigurationProperties.setVerifyAccountHealth(verifyAccountHealth); + KubernetesHealthIndicator healthIndicator = + new KubernetesHealthIndicator(REGISTRY, repository, kubernetesConfigurationProperties); + + healthIndicator.checkHealth(); + Health result = healthIndicator.getHealth(true); + + assertThat(result.getStatus()).isEqualTo(Status.UP); + if (verifyAccountHealth) { + assertThat(result.getDetails()) + .containsOnly(entry(UNHEALTHY_ACCOUNT_NAME_FIRST, ERROR_MESSAGE)); + } else { + assertThat(result.getDetails()).isEmpty(); + } + } + + @Test + void reportsMultipleErrors() { + CredentialsRepository repository = + stubCredentialsRepository( + ImmutableList.of( + healthyNamedCredentials, + unhealthyNamedCredentialsFirst, + unhealthyNamedCredentialsSecond)); + + KubernetesHealthIndicator healthIndicator = + new KubernetesHealthIndicator(REGISTRY, repository, kubernetesConfigurationProperties); + + healthIndicator.checkHealth(); + Health result = healthIndicator.getHealth(true); + + assertThat(result.getStatus()).isEqualTo(Status.UP); + assertThat(result.getDetails()) + .containsOnly( + entry(UNHEALTHY_ACCOUNT_NAME_FIRST, ERROR_MESSAGE), + entry(UNHEALTHY_ACCOUNT_NAME_SECOND, ERROR_MESSAGE)); + assertThat(result.getDetails()) + .containsOnly( + entry(UNHEALTHY_ACCOUNT_NAME_FIRST, ERROR_MESSAGE), + entry(UNHEALTHY_ACCOUNT_NAME_SECOND, ERROR_MESSAGE)); + } + + private static ManagedAccount getManagedAccount(String name) { + ManagedAccount managedAccount = new ManagedAccount(); + managedAccount.setName(name); + return managedAccount; + } + + private static CredentialsRepository stubCredentialsRepository( + Iterable accounts) { + CredentialsRepository repository = + new MapBackedCredentialsRepository<>(KubernetesCloudProvider.ID, null); + for (KubernetesNamedAccountCredentials account : accounts) { + repository.save(account); + } + return repository; + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/manifest/KubernetesDeployManifestConverterTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/manifest/KubernetesDeployManifestConverterTest.java new file mode 100644 index 00000000000..83e5e7980df --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/manifest/KubernetesDeployManifestConverterTest.java @@ -0,0 +1,178 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.manifest; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.type.MapType; +import com.google.common.io.CharStreams; +import com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest.KubernetesDeployManifestConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.Charset; +import java.util.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +public class KubernetesDeployManifestConverterTest { + private static KubernetesDeployManifestConverter converter; + private static ObjectMapper mapper; + private static MapType mapType; + + @BeforeAll + static void setup() { + CredentialsRepository credentialsRepository = + Mockito.mock(CredentialsRepository.class); + Mockito.when(credentialsRepository.getOne("kubernetes")) + .thenReturn(Mockito.mock(KubernetesNamedAccountCredentials.class)); + converter = new KubernetesDeployManifestConverter(credentialsRepository, null); + mapper = converter.getObjectMapper(); + mapType = mapper.getTypeFactory().constructMapType(Map.class, String.class, Object.class); + } + + @Test + public void manifestListDeserialized() throws IOException { + String deploymentJson = getResourceAsString("deployment-manifest.json"); + String serviceJson = getResourceAsString("service-manifest.json"); + + Map deploymentMap = mapper.readValue(deploymentJson, mapType); + Map serviceMap = mapper.readValue(serviceJson, mapType); + List> manifestList = Arrays.asList(deploymentMap, serviceMap); + Map inputMap = + new HashMap<>(Map.of("account", "kubernetes", "manifests", manifestList)); + KubernetesDeployManifestDescription description = converter.convertDescription(inputMap); + + assertThat(description.getManifests()).hasSize(2); + assertThat(description.getManifests().get(0).getKindName()).isEqualTo("Deployment"); + assertThat(description.getManifests().get(1).getKindName()).isEqualTo("Service"); + } + + @Test + public void splitManifestList() throws IOException { + String listTemplate = getResourceAsString("list-manifest.json"); + String deploymentJson = getResourceAsString("deployment-manifest.json"); + String serviceJson = getResourceAsString("service-manifest.json"); + + String listJson = String.format(listTemplate, deploymentJson, serviceJson); + Map listMap = mapper.readValue(listJson, mapType); + + Map inputMap = + new HashMap<>( + Map.of("account", "kubernetes", "manifests", Collections.singletonList(listMap))); + KubernetesDeployManifestDescription description = converter.convertDescription(inputMap); + + assertThat(description.getManifests()).hasSize(2); + assertThat(description.getManifests().get(0).getKindName()).isEqualTo("Deployment"); + assertThat(description.getManifests().get(1).getKindName()).isEqualTo("Service"); + } + + @Test + public void noInput() { + Map inputMap = new HashMap<>(Map.of("account", "kubernetes")); + KubernetesDeployManifestDescription description = converter.convertDescription(inputMap); + assertThat(description.getManifests()).isNull(); + } + + @Test + public void inputWithCustomResource() throws IOException { + String crdJson = getResourceAsString("crd-manifest.json"); + Map crdMap = mapper.readValue(crdJson, mapType); + + Map inputMap = + new HashMap<>( + Map.of("account", "kubernetes", "manifests", Collections.singletonList(crdMap))); + KubernetesDeployManifestDescription description = converter.convertDescription(inputMap); + + assertThat(description.getManifests()).hasSize(1); + assertThat(description.getManifests().get(0).getKindName()).isEqualTo("Custom1"); + } + + @Test + public void splitListManifestWithCustomResource() throws IOException { + String listTemplate = getResourceAsString("list-manifest.json"); + String deploymentJson = getResourceAsString("deployment-manifest.json"); + String crdJson = getResourceAsString("crd-manifest.json"); + + String listJson = String.format(listTemplate, deploymentJson, crdJson); + Map listMap = mapper.readValue(listJson, mapType); + + Map inputMap = + new HashMap<>( + Map.of("account", "kubernetes", "manifests", Collections.singletonList(listMap))); + KubernetesDeployManifestDescription description = converter.convertDescription(inputMap); + + assertThat(description.getManifests()).hasSize(2); + assertThat(description.getManifests().get(0).getKindName()).isEqualTo("Deployment"); + assertThat(description.getManifests().get(1).getKindName()).isEqualTo("Custom1"); + } + + @Test + public void splitListManifestWithNamespaceOverride() throws IOException { + KubernetesKindProperties prop1 = Mockito.mock(KubernetesKindProperties.class); + Mockito.when(prop1.isNamespaced()).thenReturn(true, false, true); + KubernetesCredentials credentials = Mockito.mock(KubernetesCredentials.class); + Mockito.when(credentials.getKindProperties(Mockito.any())).thenReturn(prop1); + KubernetesNamedAccountCredentials accountCredentials = + Mockito.mock(KubernetesNamedAccountCredentials.class); + Mockito.when(accountCredentials.getCredentials()).thenReturn(credentials); + + CredentialsRepository credentialsRepository = + Mockito.mock(CredentialsRepository.class); + Mockito.when(credentialsRepository.getOne("kubernetes")).thenReturn(accountCredentials); + converter = new KubernetesDeployManifestConverter(credentialsRepository, null); + + String listTemplate = getResourceAsString("list-manifest.json"); + String deploymentJson = getResourceAsString("deployment-manifest.json"); + String crdJson = getResourceAsString("crd-manifest.json"); + + String listJson = String.format(listTemplate, deploymentJson, crdJson); + Map listMap = mapper.readValue(listJson, mapType); + Map deploymentMap = mapper.readValue(deploymentJson, mapType); + + Map inputMap = + new HashMap<>( + Map.of( + "account", + "kubernetes", + "manifests", + Arrays.asList(listMap, deploymentMap), + "namespaceOverride", + "testNamespace")); + KubernetesDeployManifestDescription description = converter.convertDescription(inputMap); + assertThat(description.getManifests()).hasSize(3); + assertThat(description.getManifests().get(0).getNamespace()).isEqualTo("testNamespace"); + assertThat(description.getManifests().get(1).getNamespace()).isEqualTo(""); + assertThat(description.getManifests().get(2).getNamespace()).isEqualTo("testNamespace"); + } + + protected String getResourceAsString(String name) throws IOException { + try (InputStreamReader reader = + new InputStreamReader( + KubernetesManifest.class.getResourceAsStream(name), Charset.defaultCharset())) { + return CharStreams.toString(reader); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/model/KubernetesJobStatusTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/model/KubernetesJobStatusTest.java new file mode 100644 index 00000000000..a85cb2dbd8f --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/model/KubernetesJobStatusTest.java @@ -0,0 +1,188 @@ +/* + * Copyright 2021 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.model; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.Mockito.spy; + +import com.google.common.collect.ImmutableList; +import com.google.common.io.Resources; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCacheDataConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import io.kubernetes.client.openapi.models.V1Job; +import io.kubernetes.client.openapi.models.V1Pod; +import io.kubernetes.client.util.Yaml; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.junit.jupiter.api.Test; + +class KubernetesJobStatusTest { + @Test + void testCompletionDetailsForSuccessfulJobCompletion() { + // setup + KubernetesJobStatus kubernetesJobStatus = spy(getKubernetesJobStatus("successful-job.yml")); + + // when + kubernetesJobStatus.captureFailureDetails(); + + // then + Map result = kubernetesJobStatus.getCompletionDetails(); + assertFalse(result.isEmpty()); + assertThat(result.get("message")).isNullOrEmpty(); + assertThat(result.get("exitCode")).isNullOrEmpty(); + assertThat(result.get("signal")).isNullOrEmpty(); + assertThat(result.get("reason")).isNullOrEmpty(); + + // we shouldn't see any failure details if the job was successful + assertNull(kubernetesJobStatus.getFailureDetails()); + } + + @Test + void testJobFailureDetailsForJobWithSinglePodWithFailedInitContainers() { + // setup + KubernetesJobStatus jobStatus = getKubernetesJobStatus("failed-job-init-container-error.yml"); + + // when + jobStatus.captureFailureDetails(); + + // then + assertThat(jobStatus.getMessage()).isEqualTo("Job has reached the specified backoff limit"); + assertThat(jobStatus.getReason()).isEqualTo("BackoffLimitExceeded"); + assertThat(jobStatus.getPods().size()).isEqualTo(1); + KubernetesJobStatus.PodStatus podStatus = jobStatus.getPods().get(0); + assertThat(podStatus.getContainerExecutionDetails().size()).isEqualTo(2); + Optional failedContainer = + podStatus.getContainerExecutionDetails().stream() + .filter(c -> c.getName().equals("init-myservice")) + .findFirst(); + + assertFalse(failedContainer.isEmpty()); + assertThat(failedContainer.get().getLogs()).isEqualTo("foo"); + assertThat(failedContainer.get().getStatus()).isEqualTo("Error"); + assertThat(failedContainer.get().getExitCode()).isEqualTo("1"); + + assertThat(jobStatus.getFailureDetails()) + .isEqualTo( + "Pod: 'hello' had errors.\n" + + " Container: 'init-myservice' exited with code: 1.\n" + + " Status: Error.\n" + + " Logs: foo"); + } + + @Test + void testJobFailureDetailsForJobWithSinglePodWithFailedAppContainers() { + // setup + KubernetesJobStatus jobStatus = getKubernetesJobStatus("failed-job.yml"); + + // when + jobStatus.captureFailureDetails(); + + // then + assertThat(jobStatus.getMessage()).isEqualTo("Job has reached the specified backoff limit"); + assertThat(jobStatus.getReason()).isEqualTo("BackoffLimitExceeded"); + assertThat(jobStatus.getPods().size()).isEqualTo(1); + KubernetesJobStatus.PodStatus podStatus = jobStatus.getPods().get(0); + assertThat(podStatus.getContainerExecutionDetails().size()).isEqualTo(2); + Optional failedContainer = + podStatus.getContainerExecutionDetails().stream() + .filter(c -> c.getName().equals("some-container-name")) + .findFirst(); + + assertFalse(failedContainer.isEmpty()); + assertThat(failedContainer.get().getLogs()) + .isEqualTo( + "Failed to download the file: foo.\n" + + "GET Request failed with status code', 404, 'Expected', )\n"); + assertThat(failedContainer.get().getStatus()).isEqualTo("Error"); + assertThat(failedContainer.get().getExitCode()).isEqualTo("1"); + + assertThat(jobStatus.getFailureDetails()) + .isEqualTo( + "Pod: 'hello' had errors.\n" + + " Container: 'some-container-name' exited with code: 1.\n" + + " Status: Error.\n" + + " Logs: Failed to download the file: foo.\n" + + "GET Request failed with status code', 404, 'Expected', )\n"); + } + + @Test + void testJobFailureDetailsForJobFailureOnlyWithNoContainerLogs() { + // setup + KubernetesManifest testManifest = + Yaml.loadAs(getResource("runjob-deadline-exceeded.yml"), KubernetesManifest.class); + V1Job job = KubernetesCacheDataConverter.getResource(testManifest, V1Job.class); + + KubernetesJobStatus jobStatus = new KubernetesJobStatus(job, "mock-account"); + + List pods = ImmutableList.of(testManifest); + jobStatus.setPods( + pods.stream() + .map( + p -> { + V1Pod pod = KubernetesCacheDataConverter.getResource(p, V1Pod.class); + return new KubernetesJobStatus.PodStatus(pod); + }) + .collect(Collectors.toList())); + + // when + jobStatus.captureFailureDetails(); + + // then + assertThat(jobStatus.getMessage()).isEqualTo("Job was active longer than specified deadline"); + assertThat(jobStatus.getReason()).isEqualTo("DeadlineExceeded"); + assertNull(jobStatus.getFailureDetails()); + } + + private String getResource(String name) { + try { + return Resources.toString( + KubernetesJobStatusTest.class.getResource(name), StandardCharsets.UTF_8); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private KubernetesJobStatus getKubernetesJobStatus(String manifestPath) { + KubernetesManifest testManifest = + Yaml.loadAs(getResource("base.yml"), KubernetesManifest.class); + + KubernetesManifest overlay = Yaml.loadAs(getResource(manifestPath), KubernetesManifest.class); + testManifest.putAll(overlay); + + V1Job job = KubernetesCacheDataConverter.getResource(testManifest, V1Job.class); + KubernetesJobStatus kubernetesJobStatus = new KubernetesJobStatus(job, "mock-account"); + + List pods = ImmutableList.of(testManifest); + kubernetesJobStatus.setPods( + pods.stream() + .map( + p -> { + V1Pod pod = KubernetesCacheDataConverter.getResource(p, V1Pod.class); + return new KubernetesJobStatus.PodStatus(pod); + }) + .collect(Collectors.toList())); + return kubernetesJobStatus; + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesManifestNamerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesManifestNamerTest.java new file mode 100644 index 00000000000..f9854411aa7 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesManifestNamerTest.java @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.names; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiVersion; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.HashMap; +import org.junit.jupiter.api.Test; + +public class KubernetesManifestNamerTest { + @Test + public void testDeriveMoniker() { + // setup: + KubernetesManifestNamer kubernetesManifestNamer = new KubernetesManifestNamer(); + + // when: + Moniker moniker = kubernetesManifestNamer.deriveMoniker(deploymentManifest("testapp-abc")); + + // then: + assertThat(moniker.getApp()).isEqualTo("testapp"); + } + + /** A test Deployment manifest */ + private static KubernetesManifest deploymentManifest(String deploymentName) { + KubernetesManifest deployment = new KubernetesManifest(); + deployment.put("metadata", new HashMap<>()); + deployment.setNamespace("namespace"); + deployment.setKind(KubernetesKind.DEPLOYMENT); + deployment.setApiVersion(KubernetesApiVersion.APPS_V1); + deployment.setName(deploymentName); + return deployment; + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesNamerRegistryTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesNamerRegistryTest.java new file mode 100644 index 00000000000..d7ef20b22b7 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/names/KubernetesNamerRegistryTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.names; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.moniker.Moniker; +import org.junit.jupiter.api.Test; + +final class KubernetesNamerRegistryTest { + private static final NamingStrategy DEFAULT_NAMER = + new KubernetesManifestNamer(); + private static final NamingStrategy CUSTOM_NAMER = new CustomNamer(); + + private static final KubernetesNamerRegistry registry = + new KubernetesNamerRegistry(ImmutableList.of(DEFAULT_NAMER, CUSTOM_NAMER)); + + @Test + void returnsDefaultNamer() { + assertThat(registry.get("kubernetesAnnotations")).isSameAs(DEFAULT_NAMER); + } + + @Test + void returnsDefaultNamerCaseInsensitive() { + assertThat(registry.get("KubeRneteSannotaTions")).isSameAs(DEFAULT_NAMER); + } + + @Test + void throwsOnMissingNamer() { + assertThatThrownBy(() -> registry.get("missing")).isInstanceOf(IllegalArgumentException.class); + } + + @Test + void returnsCustomNamer() { + assertThat(registry.get("customNamer")).isSameAs(CUSTOM_NAMER); + } + + @Test + void returnsCustomNamerCaseInsensitive() { + assertThat(registry.get("CUSTOMNAmeR")).isSameAs(CUSTOM_NAMER); + } + + private static class CustomNamer implements NamingStrategy { + @Override + public String getName() { + return "customNamer"; + } + + @Override + public void applyMoniker(KubernetesManifest obj, Moniker moniker) {} + + @Override + public Moniker deriveMoniker(KubernetesManifest obj) { + return null; + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/KubernetesDeleteManifestOperationTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/KubernetesDeleteManifestOperationTest.java new file mode 100644 index 00000000000..1a17fbb5bf8 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/KubernetesDeleteManifestOperationTest.java @@ -0,0 +1,411 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.type.MapType; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.converter.manifest.KubernetesDeleteManifestConverter; +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiGroup; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeleteManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesCustomResourceDefinitionHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesCustomResourceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesReplicaSetHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesServiceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesUnregisteredCustomResourceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesDeleteManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import java.io.IOException; +import java.util.Map; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +/** Test the deleteManifest stage. */ +public class KubernetesDeleteManifestOperationTest { + private static final GlobalResourcePropertyRegistry resourcePropertyRegistry = + new GlobalResourcePropertyRegistry( + ImmutableList.of( + new KubernetesReplicaSetHandler(), + new KubernetesServiceHandler(), + new KubernetesCustomResourceDefinitionHandler()), + new KubernetesUnregisteredCustomResourceHandler()); + private static final KubernetesKind customResource = + KubernetesKind.from( + "MyCRD", KubernetesApiGroup.fromString("foo.com")); // arbitrary custom/non-native kind + private static final KubernetesHandler customResourceHandler = + new KubernetesCustomResourceHandler(customResource); + private static KubernetesDeleteManifestConverter converter; + private static ObjectMapper mapper; + private static MapType mapType; + + @BeforeAll + static void setup() { + CredentialsRepository credentialsRepository = + Mockito.mock(CredentialsRepository.class); + when(credentialsRepository.getOne(any(String.class))) + .thenReturn(Mockito.mock(KubernetesNamedAccountCredentials.class)); + + converter = new KubernetesDeleteManifestConverter(); + converter.setCredentialsRepository(credentialsRepository); + + mapper = converter.getObjectMapper(); + mapType = mapper.getTypeFactory().constructMapType(Map.class, String.class, Object.class); + + // Add a handler for custom resources to demonstrate that it's possible to delete them. + resourcePropertyRegistry.updateCrdProperties(ImmutableList.of(customResourceHandler)); + } + + @BeforeEach + void setupTest() { + // Store a Mock Task in a thread before each test + // so TaskRepository will be able to get it during delete to update the Task + TaskRepository.threadLocalTask.set(new DefaultTask("task-id")); + } + + @Test + public void deleteUnregisteredCRD() throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"manifestName\": \"customResourceDefinition mycrd.foo.com\"" + + " }"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + KubernetesCredentials mockKubernetesCreds = description.getCredentials().getCredentials(); + + new KubernetesDeleteManifestOperation(description).operate(ImmutableList.of()); + + ArgumentCaptor kindCaptor = ArgumentCaptor.forClass(KubernetesKind.class); + ArgumentCaptor namespaceCaptor = ArgumentCaptor.forClass(String.class); + verify(mockKubernetesCreds) + .delete( + kindCaptor.capture(), + namespaceCaptor.capture(), + anyString(), + any(KubernetesSelectorList.class), + any(V1DeleteOptions.class), + any(Task.class), + anyString()); + + assertEquals(KubernetesKind.CUSTOM_RESOURCE_DEFINITION, kindCaptor.getValue()); + } + + @ParameterizedTest(name = "deleteUnregisteredCustomResource useNamespace = {0}") + @ValueSource(booleans = {true, false}) + public void deleteUnregisteredCustomResource(boolean useNamespace) throws IOException { + String namespace = ""; + String namespaceJson = ""; + + if (useNamespace) { + namespace = "test-namespace"; + namespaceJson = ", \"location\": \"" + namespace + "\""; + } + + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"manifestName\": \"" + + customResource.toString() + + " my-custom-resource\"" + + namespaceJson + + " }"; + + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + KubernetesCredentials mockKubernetesCreds = description.getCredentials().getCredentials(); + + new KubernetesDeleteManifestOperation(description).operate(ImmutableList.of()); + + ArgumentCaptor kindCaptor = ArgumentCaptor.forClass(KubernetesKind.class); + ArgumentCaptor namespaceCaptor = ArgumentCaptor.forClass(String.class); + verify(mockKubernetesCreds) + .delete( + kindCaptor.capture(), + namespaceCaptor.capture(), + anyString(), + any(KubernetesSelectorList.class), + any(V1DeleteOptions.class), + any(Task.class), + anyString()); + + assertEquals(customResource, kindCaptor.getValue()); + assertEquals(namespace, namespaceCaptor.getValue()); + } + + @Test + public void deleteUnregisteredCustomResourceViaLabelSelector() throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"kinds\": [ \"" + + customResource.toString() + + "\" ]," + + " \"labelSelectors\": {" + + " \"selectors\": [" + + " {" + + " \"key\": \"foo\"," + + " \"kind\": \"EQUALS\"," + + " \"values\": [" + + " \"bar\"" + + " ]" + + " }" + + " ]" + + " }" + + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + KubernetesCredentials mockKubernetesCreds = description.getCredentials().getCredentials(); + + new KubernetesDeleteManifestOperation(description).operate(ImmutableList.of()); + + ArgumentCaptor kindCaptor = ArgumentCaptor.forClass(KubernetesKind.class); + ArgumentCaptor labelSelectorsCaptor = + ArgumentCaptor.forClass(KubernetesSelectorList.class); + verify(mockKubernetesCreds) + .delete( + kindCaptor.capture(), + anyString(), + anyString(), + labelSelectorsCaptor.capture(), + any(V1DeleteOptions.class), + any(Task.class), + anyString()); + + assertEquals(customResource, kindCaptor.getValue()); + assertEquals( + KubernetesSelectorList.fromMatchLabels(Map.of("foo", "bar")), + labelSelectorsCaptor.getValue()); + } + + @Test + public void orphanDependentsTrue() throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"kinds\": [" + + " \"deployment\"" + + " ]," + + " \"options\": {" + + " \"orphanDependents\": \"true\"" + + " }" + + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + V1DeleteOptions deleteOptions = deleteAndCaptureDeleteOptions(description); + assertEquals("orphan", deleteOptions.getPropagationPolicy()); + } + + @Test + public void orphanDependentsFalse() throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"kinds\": [ \"deployment\" ]," + + " \"options\": {" + + " \"orphanDependents\": \"false\"" + + " }" + + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + V1DeleteOptions deleteOptions = deleteAndCaptureDeleteOptions(description); + assertEquals("background", deleteOptions.getPropagationPolicy()); + } + + @Test + public void noOptionsDefault() throws IOException { + String pipelineJSON = + "{ \"account\": \"kubernetes-account\"," + " \"kinds\": [ \"deployment\" ]" + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + V1DeleteOptions deleteOptions = deleteAndCaptureDeleteOptions(description); + assertNull(deleteOptions.getOrphanDependents()); + } + + @Test + public void cascadingTrue() throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"kinds\": [ \"deployment\" ]," + + " \"options\": {" + + " \"cascading\": \"true\"" + + " }" + + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + V1DeleteOptions deleteOptions = deleteAndCaptureDeleteOptions(description); + assertEquals("background", deleteOptions.getPropagationPolicy()); + } + + @Test + public void cascadingFalse() throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"kinds\": [ \"deployment\" ]," + + " \"options\": {" + + " \"cascading\": \"false\"" + + " }" + + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + V1DeleteOptions deleteOptions = deleteAndCaptureDeleteOptions(description); + assertEquals("orphan", deleteOptions.getPropagationPolicy()); + } + + @ParameterizedTest(name = "cascadingLiteralValues {0}") + @ValueSource(strings = {"foregound", "background", "orphan", "bogus"}) + public void cascadingStringValues(String cascadingValue) throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"kinds\": [ \"deployment\" ]," + + " \"options\": {" + + " \"cascading\": \"" + + cascadingValue + + "\"" + + " }" + + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + V1DeleteOptions deleteOptions = deleteAndCaptureDeleteOptions(description); + assertEquals(cascadingValue, deleteOptions.getPropagationPolicy()); + } + + @Test + // Set both orphanDependents and cascading options and show that + // the orphanDependents options has precedence + public void cascadingTrueOrphanDependentsPrecedenceTest() throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"kinds\": [ \"deployment\" ]," + + " \"options\": {" + + " \"cascading\": \"true\"," + + " \"orphanDependents\": \"true\"" + + " }" + + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + V1DeleteOptions deleteOptions = deleteAndCaptureDeleteOptions(description); + assertEquals("orphan", deleteOptions.getPropagationPolicy()); + } + + @Test + public void cascadingFalseOrphanDependentsPrecedenceTest() throws IOException { + String pipelineJSON = + "{ " + + " \"account\": \"kubernetes-account\"," + + " \"kinds\": [ \"deployment\" ]," + + " \"options\": {" + + " \"cascading\": \"false\"," + + " \"orphanDependents\": \"false\"" + + " }" + + "}"; + Map pipeline = mapper.readValue(pipelineJSON, mapType); + + KubernetesDeleteManifestDescription description = buildDeleteManifestDescription(pipeline); + + V1DeleteOptions deleteOptions = deleteAndCaptureDeleteOptions(description); + assertEquals("background", deleteOptions.getPropagationPolicy()); + } + + private static V1DeleteOptions deleteAndCaptureDeleteOptions( + KubernetesDeleteManifestDescription description) { + KubernetesCredentials mockKubernetesCreds = description.getCredentials().getCredentials(); + + new KubernetesDeleteManifestOperation(description).operate(ImmutableList.of()); + ArgumentCaptor deleteOptionsCaptor = + ArgumentCaptor.forClass(V1DeleteOptions.class); + verify(mockKubernetesCreds) + .delete( + any(KubernetesKind.class), + anyString(), + anyString(), + any(KubernetesSelectorList.class), + deleteOptionsCaptor.capture(), + any(Task.class), + anyString()); + + return deleteOptionsCaptor.getValue(); + } + + private static KubernetesDeleteManifestDescription buildDeleteManifestDescription( + Map inputMap) { + KubernetesDeleteManifestDescription description = converter.convertDescription(inputMap); + description.setCredentials(getNamedAccountCredentials()); + return description; + } + + private static KubernetesNamedAccountCredentials getNamedAccountCredentials() { + ManagedAccount managedAccount = new ManagedAccount(); + managedAccount.setName("my-account"); + + KubernetesCredentials mockCredentials = mock(KubernetesCredentials.class); + when(mockCredentials.getResourcePropertyRegistry()).thenReturn(resourcePropertyRegistry); + + KubernetesCredentials.Factory credentialFactory = mock(KubernetesCredentials.Factory.class); + when(credentialFactory.build(managedAccount)).thenReturn(mockCredentials); + + return new KubernetesNamedAccountCredentials(managedAccount, credentialFactory); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/KubernetesDeployManifestOperationTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/KubernetesDeployManifestOperationTest.java new file mode 100644 index 00000000000..5b05816d088 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/KubernetesDeployManifestOperationTest.java @@ -0,0 +1,631 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ResourceVersioner; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.ArtifactProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesCoordinates; +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesDeployManifestDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestAnnotater; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestTraffic; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesManifestNamer; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.*; +import com.netflix.spinnaker.clouddriver.kubernetes.op.manifest.KubernetesDeployManifestOperation; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelector; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +final class KubernetesDeployManifestOperationTest { + private static final String DEFAULT_NAMESPACE = "default-namespace"; + private static final ResourcePropertyRegistry resourcePropertyRegistry = + new GlobalResourcePropertyRegistry( + ImmutableList.of( + new KubernetesReplicaSetHandler(), + new KubernetesServiceHandler(), + new KubernetesConfigMapHandler()), + new KubernetesUnregisteredCustomResourceHandler()); + private static final Namer NAMER = new KubernetesManifestNamer(); + private static final String ACCOUNT = "my-account"; + + @BeforeEach + void setTask() { + TaskRepository.threadLocalTask.set(new DefaultTask("task-id")); + } + + @Test + void replicaSetDeployerInvoked() { + String namespace = "my-namespace"; + KubernetesDeployManifestDescription deployManifestDescription = + baseDeployDescription("deploy/replicaset.yml"); + OperationResult result = deploy(deployManifestDescription); + + assertThat(result.getManifestNamesByNamespace()).containsOnlyKeys(namespace); + assertThat(result.getManifestNamesByNamespace().get(namespace)) + .containsExactlyInAnyOrder("replicaSet my-name-v000"); + } + + @Test + void replicaSetDeployerUsesDefaultNamespace() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-no-namespace.yml"); + OperationResult result = deploy(description); + + assertThat(result.getManifestNamesByNamespace()).containsOnlyKeys(DEFAULT_NAMESPACE); + assertThat(result.getManifestNamesByNamespace().get(DEFAULT_NAMESPACE)) + .containsExactlyInAnyOrder("replicaSet my-name-v000"); + } + + @Test + void sendsTrafficWhenEnabledTrafficTrue() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset.yml") + .setServices(ImmutableList.of("service my-service")) + .setEnableTraffic(true); + OperationResult result = deploy(description); + + KubernetesManifest manifest = Iterables.getOnlyElement(result.getManifests()); + assertThat(manifest.getSpecTemplateLabels().orElse(manifest.getLabels())) + .contains(entry("selector-key", "selector-value")); + + KubernetesManifestTraffic traffic = KubernetesManifestAnnotater.getTraffic(manifest); + assertThat(traffic.getLoadBalancers()).containsExactly("service my-service"); + } + + @Test + void doesNotSendTrafficWhenEnableTrafficFalse() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset.yml") + .setServices(ImmutableList.of("service my-service")) + .setEnableTraffic(false); + OperationResult result = deploy(description); + + KubernetesManifest manifest = Iterables.getOnlyElement(result.getManifests()); + assertThat(manifest.getSpecTemplateLabels().orElse(manifest.getLabels())) + .doesNotContain(entry("selector-key", "selector-value")); + + KubernetesManifestTraffic traffic = KubernetesManifestAnnotater.getTraffic(manifest); + assertThat(traffic.getLoadBalancers()).containsExactly("service my-service"); + } + + @Test + void doesNotSendTrafficWhenEnableTrafficTrueAndCantHandleTraffic() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/configmap.yml") + .setServices(ImmutableList.of("service my-service")) + .setEnableTraffic(true); + OperationResult result = deploy(description); + + KubernetesManifest manifest = Iterables.getOnlyElement(result.getManifests()); + assertThat(manifest.getSpecTemplateLabels().orElse(manifest.getLabels())) + .doesNotContain(entry("selector-key", "selector-value")); + + KubernetesManifestTraffic traffic = KubernetesManifestAnnotater.getTraffic(manifest); + assertThat(traffic.getLoadBalancers()).doesNotContain("service my-service"); + } + + @Test + void failsWhenServiceHasNoSelector() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset.yml") + .setServices(ImmutableList.of("service my-service-no-selector")) + .setEnableTraffic(true); + assertThatThrownBy(() -> deploy(description)).isInstanceOf(IllegalArgumentException.class); + } + + @Test + void failsWhenServiceSelectorOverlapsWithTargetLabels() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-overlapping-selector.yml") + .setServices(ImmutableList.of("service my-service")) + .setEnableTraffic(true); + assertThatThrownBy(() -> deploy(description)).isInstanceOf(IllegalArgumentException.class); + } + + @Test + void appliesSpecTemplateLabelsWhenSkipSpecTemplateLabelsFalse() { + // When skipSpecTemplateLabels is false, defaults to the standard flow, + // where applyMoniker() applies the Kubernetes and Moniker labels to both the + // manifest's metadata.labels and the spec.template.metadata.labels. + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-no-namespace.yml") + .setSkipSpecTemplateLabels(false); + OperationResult result = deploy(description); + + KubernetesManifest manifest = Iterables.getOnlyElement(result.getManifests()); + KubernetesCredentials credentials = description.getCredentials().getCredentials(); + + // Verifying that the getNamer() method is called only once + // (credentials.getNamer().applyMoniker()). + verify(credentials, times(1)).getNamer(); + // Assert that the Kubernetes labels are also applied to the spec template labels. The test + // manifest only has "app: nginx" within it's spec.template.metadata.labels. + assertThat(manifest.getSpecTemplateLabels()).isPresent(); + manifest + .getSpecTemplateLabels() + .ifPresent( + l -> + assertThat(l) + .contains( + entry("app", "nginx"), entry("app.kubernetes.io/managed-by", "spinnaker"))); + // Verify that the Kubernetes and Moniker labels are applied to the metadata labels. + assertThat(manifest.getLabels()).contains(entry("app.kubernetes.io/managed-by", "spinnaker")); + } + + @Test + void doesNotApplySpecTemplateLabelsWhenSkipSpecTemplateLabelsTrue() { + // When skipSpecTemplateLabels is true, applyMoniker() skips applying + // the Kubernetes and Moniker labels to the manifest's spec.template.metadata.labels. + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-no-namespace.yml").setSkipSpecTemplateLabels(true); + OperationResult result = deploy(description); + + KubernetesManifest manifest = Iterables.getOnlyElement(result.getManifests()); + KubernetesCredentials credentials = description.getCredentials().getCredentials(); + + // Verifying that the getNamer() method is called only once + // (credentials.getNamer().applyMoniker()). + verify(credentials, times(1)).getNamer(); + // Assert that the spec template labels only has the "app: nginx" from the manifest and that no + // other labels were applied. + assertThat(manifest.getSpecTemplateLabels()).isPresent(); + manifest + .getSpecTemplateLabels() + .ifPresent(l -> assertThat(l).containsExactly(entry("app", "nginx"))); + // Verify that the Kubernetes and Moniker labels are still applied to the metadata labels. + assertThat(manifest.getLabels()).contains(entry("app.kubernetes.io/managed-by", "spinnaker")); + } + + @Test + void deploysWithArtifactBindingDisabled() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-configmap.yml"); + description.setEnableArtifactBinding(false); + description.setRequiredArtifacts( + ImmutableList.of( + Artifact.builder() + .name("index.docker.io/library/nginx") + .type("docker/image") + .reference("index.docker.io/library/nginx:required") + .build())); + description.setOptionalArtifacts( + ImmutableList.of( + Artifact.builder() + .name("index.docker.io/library/nginx") + .type("docker/image") + .reference("index.docker.io/library/nginx:optional") + .build())); + OperationResult result = deploy(description); + + assertThat(result.getBoundArtifacts().size()).isEqualTo(1); + assertThat(result.getBoundArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v000"); + assertThat(result.getCreatedArtifacts().size()).isEqualTo(2); + assertThat(result.getCreatedArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v000", "my-name-v000"); + } + + @Test + void deploysWithArtifactBindingUnspecified() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-configmap.yml"); + description.setRequiredArtifacts( + ImmutableList.of( + Artifact.builder() + .name("index.docker.io/library/nginx") + .type("docker/image") + .reference("index.docker.io/library/nginx:required") + .build())); + description.setOptionalArtifacts( + ImmutableList.of( + Artifact.builder() + .name("index.docker.io/library/nginx") + .type("docker/image") + .reference("index.docker.io/library/nginx:optional") + .build())); + OperationResult result = deploy(description); + + assertThat(result.getBoundArtifacts().size()).isEqualTo(2); + assertThat(result.getBoundArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v000", "index.docker.io/library/nginx:required"); + assertThat(result.getCreatedArtifacts().size()).isEqualTo(2); + assertThat(result.getCreatedArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v000", "my-name-v000"); + } + + @Test + void deploysBindingRequiredArtifact() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-configmap.yml"); + description.setEnableArtifactBinding(true); + description.setRequiredArtifacts( + ImmutableList.of( + Artifact.builder() + .name("index.docker.io/library/nginx") + .type("docker/image") + .reference("index.docker.io/library/nginx:required") + .build())); + description.setOptionalArtifacts( + ImmutableList.of( + Artifact.builder() + .name("index.docker.io/library/nginx") + .type("docker/image") + .reference("index.docker.io/library/nginx:optional") + .build())); + OperationResult result = deploy(description); + + assertThat(result.getBoundArtifacts().size()).isEqualTo(2); + assertThat(result.getBoundArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v000", "index.docker.io/library/nginx:required"); + assertThat(result.getCreatedArtifacts().size()).isEqualTo(2); + assertThat(result.getCreatedArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v000", "my-name-v000"); + } + + @Test + void deploysBindingOptionalArtifact() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-configmap.yml"); + description.setEnableArtifactBinding(true); + description.setOptionalArtifacts( + ImmutableList.of( + Artifact.builder() + .name("index.docker.io/library/nginx") + .type("docker/image") + .reference("index.docker.io/library/nginx:optional") + .build())); + OperationResult result = deploy(description); + + assertThat(result.getBoundArtifacts().size()).isEqualTo(2); + assertThat(result.getBoundArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v000", "index.docker.io/library/nginx:optional"); + assertThat(result.getCreatedArtifacts().size()).isEqualTo(2); + assertThat(result.getCreatedArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v000", "my-name-v000"); + } + + @Test + void deploysBindingOptionalArtifactMultiNamespace() { + KubernetesDeployManifestDescription description = + baseDeployDescription("deploy/replicaset-volumes.yml"); + description.setEnableArtifactBinding(true); + description.setOptionalArtifacts( + ImmutableList.of( + Artifact.builder() + .name("myconfig") + .type("kubernetes/configMap") + .location("other-namespace") + .reference("myconfig-v002") + .build(), + Artifact.builder() + .name("myconfig") + .type("kubernetes/configMap") + .location("my-namespace") + .reference("myconfig-v001") + .build())); + OperationResult result = deploy(description); + + assertThat(result.getBoundArtifacts().size()).isEqualTo(1); + assertThat(result.getBoundArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v001"); + assertThat(result.getCreatedArtifacts().size()).isEqualTo(1); + assertThat(result.getCreatedArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("my-name-v000"); + } + + @Test + void deploysBindingUnmodifiedConfigMap() { + String manifestFile = "deploy/replicaset-configmap.yml"; + KubernetesDeployManifestDescription description = baseDeployDescription(manifestFile); + KubernetesManifest existingConfigMap = + ManifestFetcher.getManifest(KubernetesDeployManifestOperationTest.class, manifestFile) + .get(1); + existingConfigMap.setName("myconfig-v001"); + Map existingArtifacts = + ImmutableMap.of( + KubernetesKind.CONFIG_MAP, + Artifact.builder() + .type("kubernetes/configMap") + .name("myconfig") + .version("v001") + .reference("myconfig-v001") + .metadata(ImmutableMap.of("lastAppliedConfiguration", existingConfigMap)) + .build()); + OperationResult result = deploy(description, existingArtifacts); + + assertThat(result.getBoundArtifacts().size()).isEqualTo(1); + assertThat(result.getBoundArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("myconfig-v001"); + assertThat(result.getCreatedArtifacts().size()).isEqualTo(2); + assertThat(result.getCreatedArtifacts().stream().map(Artifact::getReference)) + .containsExactlyInAnyOrder("my-name-v000", "myconfig-v001"); + } + + @Test + void deploysCrdWhereSpecIsList() { + KubernetesDeployManifestDescription deployManifestDescription = + baseDeployDescription("deploy/crd-manifest-spec-is-list.yml"); + deploy(deployManifestDescription); + } + + @ParameterizedTest( + name = "{index} ==> deployWhenNothingDeploys: useLabelSelector {0}, allowNothingSelected {1}") + @CsvSource({"false,false", "false,true", "true,false", "true,true"}) + void deployWhenNothingDeploys(boolean useLabelSelector, boolean allowNothingSelected) { + // Testing the label selector logic is tough. Pass true to + // baseDeployDescription so KubernetesCredentials.deploy return returns + // null. That's what happens when kubectl apply doesn't actually apply + // anything. That happens when the label selector doesn't match any of the + // input manifests. + // + // Since we depend on kubectl's logic to do the filtering, really all + // we need to test here is that we throw an exception if kubectl never + // deploys anything. + + // The manifest to deploy is arbitrary. + KubernetesDeployManifestDescription deployManifestDescription = + baseDeployDescription("deploy/configmaps-with-selectors.yaml", true); + deployManifestDescription.setAllowNothingSelected(allowNothingSelected); + + KubernetesSelectorList selectorList = deployManifestDescription.getLabelSelectors(); + if (useLabelSelector) { + // Because we're mocking the return value KubernetesCredentials.deploy (to + // null), the actual contents of the label selector list is arbitrary as + // long as there is at least one selector. + selectorList.addSelector( + new KubernetesSelector( + KubernetesSelector.Kind.EQUALS, "sample-configmap-selector", List.of("one"))); + } + + // With a label selector, and therefore no KubernetesManifest objects returned from, expect an + // exception that nothing has been deployed if allowNothingSelected is false. + if (useLabelSelector && !allowNothingSelected) { + assertThatThrownBy(() -> deploy(deployManifestDescription)) + .hasMessage( + "nothing deployed to account " + + ACCOUNT + + " with label selector(s) " + + selectorList.toString()); + } else { + // Without a label selector (or if allowNothingSelected is true), we'd + // normally expect a KubectlException because kubectl fails, except we're + // using a mock KubernetesCredentials class, so KubectlJobExecutor isn't + // involved, and so expect no manifests. + OperationResult result = deploy(deployManifestDescription); + assertThat(result.getManifests()).isEmpty(); + } + + // Although we're really only responsible for verifying that + // KubernetesDeployManifestOperation calls the appropriate method on the + // relevant KubernetesHandler, since we're testing with real handler objects + // and not mocks, let's verify that the appropriate method on + // KubernetesCredentials gets called. + KubernetesCredentials credentials = deployManifestDescription.getCredentials().getCredentials(); + + // two calls because there are two objects in deploy/configmaps-with-selectors.yaml + verify(credentials, times(2)) + .deploy(any(KubernetesManifest.class), any(Task.class), anyString(), eq(selectorList)); + } + + @Test + void replaceStrategyWithLabelSelector() { + // The manifest to deploy is arbitrary, as long as it has a + // strategy.spinnaker.io/replace: "true" annotation. It's helpful that + // there are multiple manifests with the replace strategy one coming last. + // That way we can more strongly assert that the earlier ones didn't get + // deployed. + KubernetesDeployManifestDescription deployManifestDescription = + baseDeployDescription("deploy/replace-strategy.yaml"); + + KubernetesSelectorList selectorList = deployManifestDescription.getLabelSelectors(); + selectorList.addSelector( + new KubernetesSelector( + KubernetesSelector.Kind.EQUALS, "sample-configmap-selector", List.of("one"))); + + assertThatThrownBy(() -> deploy(deployManifestDescription)) + .hasMessage("label selectors not supported with replace strategy, not deploying"); + + // Make sure no methods to actually deploy anything got called. Most direct + // would be to grab all the relevant KubernetesHandler objects from + // deployManifestDescription. Trouble is, those aren't mocks. So, go one + // level deeper to the KubernetesCredentials level where we do have mocks + // and couple the tests to implementation details of CanDeploy.deploy. + KubernetesCredentials credentials = deployManifestDescription.getCredentials().getCredentials(); + + verify(credentials, never()) + .deploy( + any(KubernetesManifest.class), + any(Task.class), + anyString(), + any(KubernetesSelectorList.class)); + verify(credentials, never()) + .createOrReplace(any(KubernetesManifest.class), any(Task.class), anyString()); + verify(credentials, never()) + .delete( + any(KubernetesKind.class), + anyString(), + anyString(), + any(KubernetesSelectorList.class), + any(V1DeleteOptions.class), + any(Task.class), + anyString()); + } + + @Test + void deployEmptyResource() { + KubernetesDeployManifestDescription deployManifestDescription = + baseDeployDescription("deploy/empty-resource.yml"); + deploy(deployManifestDescription); + } + + private static KubernetesDeployManifestDescription baseDeployDescription(String manifest) { + return baseDeployDescription(manifest, false); + } + + private static KubernetesDeployManifestDescription baseDeployDescription( + String manifest, boolean deployReturnsNull) { + KubernetesDeployManifestDescription deployManifestDescription = + new KubernetesDeployManifestDescription() + .setManifests( + ManifestFetcher.getManifest(KubernetesDeployManifestOperationTest.class, manifest)) + .setMoniker(new Moniker()) + .setSource(KubernetesDeployManifestDescription.Source.text); + deployManifestDescription.setAccount(ACCOUNT); + deployManifestDescription.setCredentials(getNamedAccountCredentials(deployReturnsNull)); + return deployManifestDescription; + } + + private static KubernetesNamedAccountCredentials getNamedAccountCredentials( + boolean deployReturnsNull) { + ManagedAccount managedAccount = new ManagedAccount(); + managedAccount.setName("my-account"); + + NamerRegistry.lookup() + .withProvider(KubernetesCloudProvider.ID) + .withAccount(managedAccount.getName()) + .setNamer(KubernetesManifest.class, new KubernetesManifestNamer()); + + KubernetesCredentials mockCredentials = getMockKubernetesCredentials(deployReturnsNull); + KubernetesCredentials.Factory credentialFactory = mock(KubernetesCredentials.Factory.class); + when(credentialFactory.build(managedAccount)).thenReturn(mockCredentials); + return new KubernetesNamedAccountCredentials(managedAccount, credentialFactory); + } + + private static KubernetesCredentials getMockKubernetesCredentials(boolean deployReturnsNull) { + KubernetesCredentials credentialsMock = mock(KubernetesCredentials.class); + when(credentialsMock.getKindProperties(any(KubernetesKind.class))) + .thenAnswer( + invocation -> + KubernetesKindProperties.withDefaultProperties( + invocation.getArgument(0, KubernetesKind.class))); + when(credentialsMock.getResourcePropertyRegistry()).thenReturn(resourcePropertyRegistry); + when(credentialsMock.get( + KubernetesCoordinates.builder() + .kind(KubernetesKind.SERVICE) + .namespace("my-namespace") + .name("my-service") + .build())) + .thenReturn( + ManifestFetcher.getManifest( + KubernetesDeployManifestOperationTest.class, "deploy/service.yml") + .get(0)); + when(credentialsMock.get( + KubernetesCoordinates.builder() + .kind(KubernetesKind.SERVICE) + .namespace("my-namespace") + .name("my-service-no-selector") + .build())) + .thenReturn( + ManifestFetcher.getManifest( + KubernetesDeployManifestOperationTest.class, "deploy/service-no-selector.yml") + .get(0)); + if (!deployReturnsNull) { + when(credentialsMock.deploy( + any(KubernetesManifest.class), + any(Task.class), + anyString(), + any(KubernetesSelectorList.class))) + .thenAnswer( + invocation -> { + // This simulates the fact that the Kubernetes API will add the default namespace if + // none is supplied on the manifest. + KubernetesManifest result = + invocation.getArgument(0, KubernetesManifest.class).clone(); + if (Strings.isNullOrEmpty(result.getNamespace())) { + result.setNamespace(DEFAULT_NAMESPACE); + } + return result; + }); + } + when(credentialsMock.getNamer()).thenReturn(NAMER); + return credentialsMock; + } + + private static OperationResult deploy(KubernetesDeployManifestDescription description) { + ArtifactProvider artifactProvider = mock(ArtifactProvider.class); + when(artifactProvider.getArtifacts( + any(KubernetesKind.class), + any(String.class), + any(String.class), + any(KubernetesCredentials.class))) + .thenReturn(ImmutableList.of()); + ResourceVersioner resourceVersioner = new ResourceVersioner(artifactProvider); + return new KubernetesDeployManifestOperation(description, resourceVersioner) + .operate(ImmutableList.of()); + } + + private static OperationResult deploy( + KubernetesDeployManifestDescription description, + Map artifactsByKind) { + ArtifactProvider artifactProvider = mock(ArtifactProvider.class); + when(artifactProvider.getArtifacts( + any(KubernetesKind.class), + any(String.class), + any(String.class), + any(KubernetesCredentials.class))) + .thenReturn(ImmutableList.of()); + for (Map.Entry entry : artifactsByKind.entrySet()) { + when(artifactProvider.getArtifacts( + eq(entry.getKey()), + any(String.class), + any(String.class), + any(KubernetesCredentials.class))) + .thenReturn(ImmutableList.of(entry.getValue())); + } + ResourceVersioner resourceVersioner = new ResourceVersioner(artifactProvider); + return new KubernetesDeployManifestOperation(description, resourceVersioner) + .operate(ImmutableList.of()); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDeployTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDeployTest.java new file mode 100644 index 00000000000..80344494b76 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/CanDeployTest.java @@ -0,0 +1,296 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy.DeployStrategy; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifestStrategy.ServerSideApplyStrategy; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import io.kubernetes.client.openapi.models.V1DeleteOptions; +import org.junit.jupiter.api.Test; + +final class CanDeployTest { + private final CanDeploy handler = new CanDeploy() {}; + private final String OP_NAME = "Can Deploy Test"; + private final Task task = new DefaultTask("task-id"); + + @Test + void applyMutations() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.deploy(manifest, task, OP_NAME, selectorList)).thenReturn(manifest); + handler.deploy( + credentials, + manifest, + DeployStrategy.APPLY, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + verify(credentials).deploy(manifest, task, OP_NAME, selectorList); + verifyNoMoreInteractions(credentials); + } + + @Test + void applyReturnValue() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.deploy(manifest, task, OP_NAME, selectorList)).thenReturn(manifest); + OperationResult result = + handler.deploy( + credentials, + manifest, + DeployStrategy.APPLY, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + verify(credentials).deploy(manifest, task, OP_NAME, selectorList); + assertThat(result.getManifests()).containsExactlyInAnyOrder(manifest); + } + + @Test + void applyServerSideMutations() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.deploy(manifest, task, OP_NAME, selectorList, "--server-side=true")) + .thenReturn(manifest); + handler.deploy( + credentials, + manifest, + DeployStrategy.SERVER_SIDE_APPLY, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + verify(credentials).deploy(manifest, task, OP_NAME, selectorList, "--server-side=true"); + verifyNoMoreInteractions(credentials); + } + + @Test + void applyServerSideForceConflictMutations() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.deploy( + manifest, task, OP_NAME, selectorList, "--server-side=true", "--force-conflicts=true")) + .thenReturn(manifest); + handler.deploy( + credentials, + manifest, + DeployStrategy.SERVER_SIDE_APPLY, + ServerSideApplyStrategy.FORCE_CONFLICTS, + task, + OP_NAME, + selectorList); + verify(credentials) + .deploy( + manifest, task, OP_NAME, selectorList, "--server-side=true", "--force-conflicts=true"); + verifyNoMoreInteractions(credentials); + } + + @Test + void replaceMutations() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.createOrReplace(manifest, task, OP_NAME)).thenReturn(manifest); + handler.deploy( + credentials, + manifest, + DeployStrategy.REPLACE, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + verify(credentials).createOrReplace(manifest, task, OP_NAME); + verifyNoMoreInteractions(credentials); + } + + @Test + void replaceReturnValue() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.createOrReplace(manifest, task, OP_NAME)).thenReturn(manifest); + OperationResult result = + handler.deploy( + credentials, + manifest, + DeployStrategy.REPLACE, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + assertThat(result.getManifests()).containsExactlyInAnyOrder(manifest); + } + + @Test + void recreateMutations() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.deploy(manifest, task, OP_NAME, selectorList)).thenReturn(manifest); + handler.deploy( + credentials, + manifest, + DeployStrategy.RECREATE, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + verify(credentials).deploy(manifest, task, OP_NAME, selectorList); + verify(credentials) + .delete( + eq(manifest.getKind()), + eq(manifest.getNamespace()), + eq(manifest.getName()), + eq(selectorList), + any(V1DeleteOptions.class), + any(Task.class), + anyString()); + verifyNoMoreInteractions(credentials); + } + + @Test + void recreateReturnValue() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.deploy(manifest, task, OP_NAME, selectorList)).thenReturn(manifest); + OperationResult result = + handler.deploy( + credentials, + manifest, + DeployStrategy.RECREATE, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + assertThat(result.getManifests()).containsExactlyInAnyOrder(manifest); + } + + @Test + void createMutation() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = + ManifestFetcher.getManifest("candeploy/deployment-generate-name.yml"); + KubernetesManifest createResult = + ManifestFetcher.getManifest("candeploy/deployment-generate-name-result.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.create(manifest, task, OP_NAME, selectorList)).thenReturn(createResult); + handler.deploy( + credentials, + manifest, + DeployStrategy.APPLY, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + verify(credentials).create(manifest, task, OP_NAME, selectorList); + verifyNoMoreInteractions(credentials); + } + + @Test + void createReturnValue() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = + ManifestFetcher.getManifest("candeploy/deployment-generate-name.yml"); + KubernetesManifest createResult = + ManifestFetcher.getManifest("candeploy/deployment-generate-name-result.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.create(manifest, task, OP_NAME, selectorList)).thenReturn(createResult); + OperationResult result = + handler.deploy( + credentials, + manifest, + DeployStrategy.APPLY, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + assertThat(result.getManifests()).containsExactlyInAnyOrder(createResult); + } + + @Test + void nullManifest() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = ManifestFetcher.getManifest("candeploy/deployment.yml"); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + + // arguments to deploy are arbitrary since we're mocking the return value. + when(credentials.deploy(manifest, task, OP_NAME, selectorList)).thenReturn(null); + + // DeployStrategy.APPLY is arbitrary too since the code to handle null + // manifests works for all strategies. + OperationResult result = + handler.deploy( + credentials, + manifest, + DeployStrategy.APPLY, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + + verify(credentials).deploy(manifest, task, OP_NAME, selectorList); + assertThat(result.getManifests()).isEmpty(); + } + + @Test + void nullManifestWithGenerateName() { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + KubernetesManifest manifest = + ManifestFetcher.getManifest("candeploy/deployment-generate-name.yml"); + assertThat(manifest.getGenerateName()).isNotBlank(); + + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(credentials.create(manifest, task, OP_NAME, selectorList)).thenReturn(null); + + // DeployStrategy.APPLY and ServerSideApplyStrategy.DEFAULT are arbitrary + // too since they're ignored for manifests with generateName. + OperationResult result = + handler.deploy( + credentials, + manifest, + DeployStrategy.APPLY, + ServerSideApplyStrategy.DEFAULT, + task, + OP_NAME, + selectorList); + + verify(credentials).create(manifest, task, OP_NAME, selectorList); + assertThat(result.getManifests()).isEmpty(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCronJobHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCronJobHandlerTest.java new file mode 100644 index 00000000000..57d9eb61f0d --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesCronJobHandlerTest.java @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import org.junit.jupiter.api.Test; + +final class KubernetesCronJobHandlerTest { + private KubernetesCronJobHandler handler = new KubernetesCronJobHandler(); + + @Test + void noStatus() { + KubernetesManifest cronJob = ManifestFetcher.getManifest("cronjob/base.yml"); + Status status = handler.status(cronJob); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("No status reported yet"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("No availability reported"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void emptyStatus() { + KubernetesManifest cronJob = + ManifestFetcher.getManifest("cronjob/base.yml", "cronjob/empty-status.yml"); + Status status = handler.status(cronJob); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void stableWithNoReplicas() { + KubernetesManifest cronJob = ManifestFetcher.getManifest("cronjob/scheduled-status.yml"); + Status status = handler.status(cronJob); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDaemonSetHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDaemonSetHandlerTest.java new file mode 100644 index 00000000000..822017ea6cb --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDaemonSetHandlerTest.java @@ -0,0 +1,147 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import org.junit.jupiter.api.Test; + +final class KubernetesDaemonSetHandlerTest { + private KubernetesDaemonSetHandler handler = new KubernetesDaemonSetHandler(); + + @Test + void noStatus() { + KubernetesManifest daemonSet = ManifestFetcher.getManifest("daemonset/base.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("No status reported yet"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("No availability reported"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void unstableWhenUnavailable() { + KubernetesManifest daemonSet = + ManifestFetcher.getManifest("daemonset/base.yml", "daemonset/unavailable.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be available"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingScheduled() { + KubernetesManifest daemonSet = + ManifestFetcher.getManifest("daemonset/base.yml", "daemonset/awaiting-scheduled.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be scheduled"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingUpdatedScheduled() { + KubernetesManifest daemonSet = + ManifestFetcher.getManifest( + "daemonset/base.yml", "daemonset/awaiting-updated-scheduled.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all updated replicas to be scheduled"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingReady() { + KubernetesManifest daemonSet = + ManifestFetcher.getManifest("daemonset/base.yml", "daemonset/awaiting-ready.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for all replicas to be ready"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void oldGeneration() { + KubernetesManifest daemonSet = + ManifestFetcher.getManifest("daemonset/base.yml", "daemonset/old-generation.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for status generation to match updated object generation"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void stableWhenAllAvailable() { + KubernetesManifest daemonSet = + ManifestFetcher.getManifest("daemonset/base.yml", "daemonset/available.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void stableWhenNoneDesired() { + KubernetesManifest daemonSet = + ManifestFetcher.getManifest("daemonset/base.yml", "daemonset/none-desired.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void stableWhenOnDelete() { + KubernetesManifest daemonSet = + ManifestFetcher.getManifest("daemonset/base-on-delete.yml", "daemonset/unavailable.yml"); + Status status = handler.status(daemonSet); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandlerTest.java new file mode 100644 index 00000000000..6ab236a4cf0 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesDeploymentHandlerTest.java @@ -0,0 +1,265 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import org.junit.jupiter.api.Test; + +final class KubernetesDeploymentHandlerTest { + private KubernetesDeploymentHandler handler = new KubernetesDeploymentHandler(); + + @Test + void noStatus() { + KubernetesManifest deployment = ManifestFetcher.getManifest("deployment/base.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("No status reported yet"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("No availability reported"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void stable() { + KubernetesManifest deployment = + ManifestFetcher.getManifest("deployment/base.yml", "deployment/stable.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void noReplicas() { + KubernetesManifest deployment = + ManifestFetcher.getManifest("deployment/base.yml", "deployment/no-replicas.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for all replicas to be updated"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void noReplicasWhenNoneDesired() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base-no-replicas.yml", "deployment/no-replicas.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void unknownWithOldGeneration() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/stable-with-old-generation.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for status generation to match updated object generation"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void conditionReportsUnavailable() { + KubernetesManifest deployment = + ManifestFetcher.getManifest("deployment/base.yml", "deployment/condition-unavailable.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be available"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()) + .isEqualTo("Deployment does not have minimum availability."); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void progressDeadlineExceededUnavailable() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/progress-deadline-exceeded-unavailable.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be available"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()) + .isEqualTo("Deployment does not have minimum availability."); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isTrue(); + assertThat(status.getFailed().getMessage()) + .isEqualTo("Deployment exceeded its progress deadline"); + } + + @Test + void progressDeadlineExceeded() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/progress-deadline-exceeded.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be available"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isTrue(); + assertThat(status.getFailed().getMessage()) + .isEqualTo("Deployment exceeded its progress deadline"); + } + + @Test + void awaitingUpdatedReplicas() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/awaiting-updated-replicas.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for all replicas to be updated"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingUpdatedReplicasPaused() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/awaiting-updated-replicas-paused.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for all replicas to be updated"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isTrue(); + assertThat(status.getPaused().getMessage()).isEqualTo("Deployment is paused"); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingTermination() { + KubernetesManifest deployment = + ManifestFetcher.getManifest("deployment/base.yml", "deployment/awaiting-termination.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for old replicas to finish termination"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingTerminationPaused() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/awaiting-termination-paused.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for old replicas to finish termination"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isTrue(); + assertThat(status.getPaused().getMessage()).isEqualTo("Deployment is paused"); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingAvailableReplicas() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/awaiting-available-replicas.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be available"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingAvailableReplicasPaused() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/awaiting-available-replicas-paused.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be available"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isTrue(); + assertThat(status.getPaused().getMessage()).isEqualTo("Deployment is paused"); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingReadyReplicas() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/awaiting-ready-replicas.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for all replicas to be ready"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingReadyReplicasPaused() { + KubernetesManifest deployment = + ManifestFetcher.getManifest( + "deployment/base.yml", "deployment/awaiting-ready-replicas-paused.yml"); + Status status = handler.status(deployment); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for all replicas to be ready"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isTrue(); + assertThat(status.getPaused().getMessage()).isEqualTo("Deployment is paused"); + assertThat(status.getFailed().isState()).isFalse(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandlerTest.java new file mode 100644 index 00000000000..5a79ff6d2c4 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesHorizontalPodAutoscalerHandlerTest.java @@ -0,0 +1,98 @@ +/* + * Copyright 2020 Snap Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import org.junit.jupiter.api.Test; + +final class KubernetesHorizontalPodAutoscalerHandlerTest { + private KubernetesHorizontalPodAutoscalerHandler handler = + new KubernetesHorizontalPodAutoscalerHandler(); + + @Test + void noStatus() { + KubernetesManifest hpa = ManifestFetcher.getManifest("horizontalpodautoscaler/base.yml"); + Status status = handler.status(hpa); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("No status reported yet"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void waitingForScaleup() { + KubernetesManifest hpa = + ManifestFetcher.getManifest( + "horizontalpodautoscaler/base.yml", "horizontalpodautoscaler/waiting-for-scaleup.yml"); + Status status = handler.status(hpa); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for HPA to complete a scale up, current: 2 desired: 3"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void waitingForScaledown() { + KubernetesManifest hpa = + ManifestFetcher.getManifest( + "horizontalpodautoscaler/base.yml", + "horizontalpodautoscaler/waiting-for-scaledown.yml"); + Status status = handler.status(hpa); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for HPA to complete a scale down, current: 5 desired: 2"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void noReplicas() { + KubernetesManifest hpa = + ManifestFetcher.getManifest( + "horizontalpodautoscaler/base.yml", "horizontalpodautoscaler/no-replicas.yml"); + Status status = handler.status(hpa); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void stable() { + KubernetesManifest hpa = + ManifestFetcher.getManifest( + "horizontalpodautoscaler/base.yml", "horizontalpodautoscaler/stable.yml"); + Status status = handler.status(hpa); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesJobHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesJobHandlerTest.java new file mode 100644 index 00000000000..3f7633d018a --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesJobHandlerTest.java @@ -0,0 +1,118 @@ +/* + * Copyright 2020 Google, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import org.junit.jupiter.api.Test; + +final class KubernetesJobHandlerTest { + private KubernetesJobHandler handler = new KubernetesJobHandler(); + + @Test + void noStatus() { + KubernetesManifest job = ManifestFetcher.getManifest("job/base.yml"); + Status status = handler.status(job); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("No status reported yet"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("No availability reported"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void activeJob() { + KubernetesManifest job = ManifestFetcher.getManifest("job/base.yml", "job/active-job.yml"); + Status status = handler.status(job); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for jobs to finish"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void completedJob() { + KubernetesManifest job = ManifestFetcher.getManifest("job/base.yml", "job/completed-job.yml"); + Status status = handler.status(job); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void multipleCompletionsInProgress() { + KubernetesManifest job = + ManifestFetcher.getManifest("job/base-with-completions.yml", "job/active-job.yml"); + Status status = handler.status(job); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for jobs to finish"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void multipleCompletionsSomeCompleted() { + KubernetesManifest job = + ManifestFetcher.getManifest("job/base-with-completions.yml", "job/completed-job.yml"); + Status status = handler.status(job); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for jobs to finish"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void multipleCompletionsSomeFailed() { + KubernetesManifest job = + ManifestFetcher.getManifest( + "job/base-with-completions.yml", "job/in-progress-some-failed.yml"); + Status status = handler.status(job); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for jobs to finish"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void multipleCompletionsFailed() { + KubernetesManifest job = + ManifestFetcher.getManifest("job/base-with-completions.yml", "job/failed-job.yml"); + Status status = handler.status(job); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isTrue(); + assertThat(status.getFailed().getMessage()) + .isEqualTo("Job has reached the specified backoff limit"); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodHandlerTest.java new file mode 100644 index 00000000000..3132fa006d7 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesPodHandlerTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import org.junit.jupiter.api.Test; + +final class KubernetesPodHandlerTest { + private KubernetesPodHandler handler = new KubernetesPodHandler(); + + @Test + void noStatus() { + KubernetesManifest pod = ManifestFetcher.getManifest("pod/base.yml"); + Status status = handler.status(pod); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("No status reported yet"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("No availability reported"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void nullPhase() { + KubernetesManifest pod = ManifestFetcher.getManifest("pod/base.yml", "pod/null-phase.yml"); + Status status = handler.status(pod); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Pod phase is unknown"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("Pod phase is unknown"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void pendingPhase() { + KubernetesManifest pod = ManifestFetcher.getManifest("pod/base.yml", "pod/pending-phase.yml"); + Status status = handler.status(pod); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Pod is pending"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("Pod is pending"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void runningPhase() { + KubernetesManifest pod = ManifestFetcher.getManifest("pod/base.yml", "pod/running-phase.yml"); + Status status = handler.status(pod); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void succeededPhase() { + KubernetesManifest pod = ManifestFetcher.getManifest("pod/base.yml", "pod/succeeded-phase.yml"); + Status status = handler.status(pod); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void failedPhase() { + KubernetesManifest pod = ManifestFetcher.getManifest("pod/base.yml", "pod/failed-phase.yml"); + Status status = handler.status(pod); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Pod has failed"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("Pod has failed"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void unknownPhase() { + KubernetesManifest pod = ManifestFetcher.getManifest("pod/base.yml", "pod/unknown-phase.yml"); + Status status = handler.status(pod); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Pod phase is unknown"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("Pod phase is unknown"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesReplicaSetHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesReplicaSetHandlerTest.java new file mode 100644 index 00000000000..e9cc9453ff4 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesReplicaSetHandlerTest.java @@ -0,0 +1,143 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import org.junit.jupiter.api.Test; + +final class KubernetesReplicaSetHandlerTest { + private KubernetesReplicaSetHandler handler = new KubernetesReplicaSetHandler(); + + @Test + void noStatus() { + KubernetesManifest replicaSet = ManifestFetcher.getManifest("replicaset/base.yml"); + Status status = handler.status(replicaSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("No status reported yet"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("No availability reported"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void stable() { + KubernetesManifest replicaSet = + ManifestFetcher.getManifest("replicaset/base.yml", "replicaset/stable.yml"); + Status status = handler.status(replicaSet); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void oldGeneration() { + KubernetesManifest replicaSet = + ManifestFetcher.getManifest("replicaset/base.yml", "replicaset/old-generation.yml"); + Status status = handler.status(replicaSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for replicaset spec update to be observed"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingFullyLabeled() { + KubernetesManifest replicaSet = + ManifestFetcher.getManifest("replicaset/base.yml", "replicaset/awaiting-fully-labeled.yml"); + Status status = handler.status(replicaSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be fully-labeled"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()) + .isEqualTo("Waiting for all replicas to be fully-labeled"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingAvailable() { + KubernetesManifest replicaSet = + ManifestFetcher.getManifest("replicaset/base.yml", "replicaset/awaiting-available.yml"); + Status status = handler.status(replicaSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be available"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()) + .isEqualTo("Waiting for all replicas to be available"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingReady() { + KubernetesManifest replicaSet = + ManifestFetcher.getManifest("replicaset/base.yml", "replicaset/awaiting-ready.yml"); + Status status = handler.status(replicaSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("Waiting for all replicas to be ready"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()) + .isEqualTo("Waiting for all replicas to be ready"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void noReplicas() { + KubernetesManifest replicaSet = + ManifestFetcher.getManifest("replicaset/base.yml", "replicaset/no-replicas.yml"); + Status status = handler.status(replicaSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all replicas to be fully-labeled"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()) + .isEqualTo("Waiting for all replicas to be fully-labeled"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void noReplicasWhenNoneDesired() { + KubernetesManifest replicaSet = + ManifestFetcher.getManifest( + "replicaset/base-no-replicas.yml", "replicaset/no-replicas.yml"); + Status status = handler.status(replicaSet); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStatefulSetHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStatefulSetHandlerTest.java new file mode 100644 index 00000000000..d30cf47eda1 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/KubernetesStatefulSetHandlerTest.java @@ -0,0 +1,181 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.Manifest.Status; +import org.junit.jupiter.api.Test; + +final class KubernetesStatefulSetHandlerTest { + private KubernetesStatefulSetHandler handler = new KubernetesStatefulSetHandler(); + + @Test + void noStatus() { + KubernetesManifest statefulSet = ManifestFetcher.getManifest("statefulset/base.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()).isEqualTo("No status reported yet"); + assertThat(status.getAvailable().isState()).isFalse(); + assertThat(status.getAvailable().getMessage()).isEqualTo("No availability reported"); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void oldGeneration() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest("statefulset/base.yml", "statefulset/old-generation.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for status generation to match updated object generation"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingReplicas() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest("statefulset/base.yml", "statefulset/awaiting-replicas.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for at least the desired replica count to be met"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingReadyReplicas() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest( + "statefulset/base.yml", "statefulset/awaiting-ready-replicas.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all updated replicas to be ready"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void awaitingPartitionedRollout() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest( + "statefulset/base-with-partition.yml", "statefulset/awaiting-partitioned-rollout.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for partitioned roll out to finish"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void partitionedRolloutComplete() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest( + "statefulset/base-with-partition.yml", "statefulset/partitioned-rollout-complete.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getStable().getMessage()).isEqualTo("Partitioned roll out complete"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void waitingForReplicas() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest("statefulset/base.yml", "statefulset/waiting-for-replicas.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for all updated replicas to be scheduled"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void waitingForUpdatedRevision() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest( + "statefulset/base.yml", "statefulset/waiting-for-updated-revision.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for the updated revision to match the current revision"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void noReplicas() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest("statefulset/base.yml", "statefulset/no-replicas.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isFalse(); + assertThat(status.getStable().getMessage()) + .isEqualTo("Waiting for at least the desired replica count to be met"); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void noReplicasWhenNoneDesired() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest( + "statefulset/base-no-desired-replicas.yml", "statefulset/no-replicas.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } + + @Test + void stable() { + KubernetesManifest statefulSet = + ManifestFetcher.getManifest("statefulset/base.yml", "statefulset/stable.yml"); + Status status = handler.status(statefulSet); + + assertThat(status.getStable().isState()).isTrue(); + assertThat(status.getAvailable().isState()).isTrue(); + assertThat(status.getPaused().isState()).isFalse(); + assertThat(status.getFailed().isState()).isFalse(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ManifestFetcher.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ManifestFetcher.java new file mode 100644 index 00000000000..4ea833af313 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/ManifestFetcher.java @@ -0,0 +1,66 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.handler; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.io.Resources; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.kork.annotations.NonnullByDefault; +import io.kubernetes.client.util.Yaml; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.util.Objects; +import java.util.stream.StreamSupport; + +/** + * Helper class to fetch Kubernetes manifest objects stored as resources on the classpath. Only + * intended for use in tests. + */ +@NonnullByDefault +public final class ManifestFetcher { + static KubernetesManifest getManifest(String basePath, String overlayPath) { + KubernetesManifest base = getManifest(basePath); + KubernetesManifest overlay = getManifest(overlayPath); + base.putAll(overlay); + return base; + } + + public static KubernetesManifest getManifest(String basePath) { + return getManifest(ManifestFetcher.class, basePath).get(0); + } + + public static ImmutableList getManifest( + Class referenceClass, String basePath) { + ObjectMapper mapper = new ObjectMapper(); + return StreamSupport.stream( + Yaml.getSnakeYaml().loadAll(getResource(referenceClass, basePath)).spliterator(), false) + .filter(Objects::nonNull) + .map(o -> mapper.convertValue(o, KubernetesManifest.class)) + .collect(ImmutableList.toImmutableList()); + } + + public static String getResource(Class referenceClass, String name) { + try { + return Resources.toString(referenceClass.getResource(name), StandardCharsets.UTF_8); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubectlJobExecutorTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubectlJobExecutorTest.java new file mode 100644 index 00000000000..6b0eaddf567 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubectlJobExecutorTest.java @@ -0,0 +1,832 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.job; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.LoggerContext; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.read.ListAppender; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSetMultimap; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.gson.Gson; +import com.netflix.spinnaker.clouddriver.data.task.InMemoryTaskRepository; +import com.netflix.spinnaker.clouddriver.jobs.JobExecutionException; +import com.netflix.spinnaker.clouddriver.jobs.JobExecutor; +import com.netflix.spinnaker.clouddriver.jobs.JobRequest; +import com.netflix.spinnaker.clouddriver.jobs.JobResult; +import com.netflix.spinnaker.clouddriver.jobs.JobResult.Result; +import com.netflix.spinnaker.clouddriver.jobs.local.JobExecutorLocal; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesPodMetric.ContainerMetric; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.ManifestFetcher; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelector; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.kork.test.log.MemoryAppender; +import io.github.resilience4j.retry.Retry; +import io.github.resilience4j.retry.RetryRegistry; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.stream.Collectors; +import org.apache.commons.exec.CommandLine; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.slf4j.LoggerFactory; + +final class KubectlJobExecutorTest { + private static final String NAMESPACE = "test-namespace"; + JobExecutor jobExecutor; + KubernetesConfigurationProperties kubernetesConfigurationProperties; + + @BeforeEach + public void setup() { + jobExecutor = mock(JobExecutor.class); + kubernetesConfigurationProperties = new KubernetesConfigurationProperties(); + kubernetesConfigurationProperties.getJobExecutor().getRetries().setBackOffInMs(500); + } + + @ParameterizedTest(name = "{index} ==> retries enabled = {0}") + @ValueSource(booleans = {true, false}) + void topPodEmptyOutput(boolean retriesEnabled) { + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder().result(Result.SUCCESS).output("").error("").build()); + + KubernetesConfigurationProperties kubernetesConfigurationProperties = + new KubernetesConfigurationProperties(); + kubernetesConfigurationProperties.getJobExecutor().getRetries().setEnabled(retriesEnabled); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + Collection podMetrics = + kubectlJobExecutor.topPod(mockKubernetesCredentials(), "test", ""); + assertThat(podMetrics).isEmpty(); + + // should only be called once as no retries are performed + verify(jobExecutor).runJob(any(JobRequest.class)); + + if (retriesEnabled) { + // verify retry registry + assertTrue(kubectlJobExecutor.getRetryRegistry().isPresent()); + RetryRegistry retryRegistry = kubectlJobExecutor.getRetryRegistry().get(); + assertThat(retryRegistry.getAllRetries().size()).isEqualTo(1); + assertThat(retryRegistry.getAllRetries().get(0).getName()).isEqualTo("mock-account"); + + // verify retry metrics + Retry.Metrics retryMetrics = retryRegistry.getAllRetries().get(0).getMetrics(); + assertThat(retryMetrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(0); + // in this test, the action succeeded without retries. So number of unique calls == 1. + assertThat(retryMetrics.getNumberOfSuccessfulCallsWithoutRetryAttempt()).isEqualTo(1); + assertThat(retryMetrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(0); + assertThat(retryMetrics.getNumberOfFailedCallsWithoutRetryAttempt()).isEqualTo(0); + } + } + + @Test + void topPodMultipleContainers() { + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder() + .result(Result.SUCCESS) + .output(ManifestFetcher.getResource(KubectlJobExecutorTest.class, "top-pod.txt")) + .error("") + .build()); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, new KubernetesConfigurationProperties(), new SimpleMeterRegistry()); + Collection podMetrics = + kubectlJobExecutor.topPod(mockKubernetesCredentials(), NAMESPACE, ""); + assertThat(podMetrics).hasSize(2); + + ImmutableSetMultimap expectedMetrics = + ImmutableSetMultimap.builder() + .putAll( + "spinnaker-io-nginx-v000-42gnq", + ImmutableList.of( + new ContainerMetric( + "spinnaker-github-io", + ImmutableMap.of("CPU(cores)", "0m", "MEMORY(bytes)", "2Mi")), + new ContainerMetric( + "istio-proxy", + ImmutableMap.of("CPU(cores)", "3m", "MEMORY(bytes)", "28Mi")), + new ContainerMetric( + "istio-init", ImmutableMap.of("CPU(cores)", "0m", "MEMORY(bytes)", "0Mi")))) + .putAll( + "spinnaker-io-nginx-v001-jvkgb", + ImmutableList.of( + new ContainerMetric( + "spinnaker-github-io", + ImmutableMap.of("CPU(cores)", "0m", "MEMORY(bytes)", "2Mi")), + new ContainerMetric( + "istio-proxy", + ImmutableMap.of("CPU(cores)", "32m", "MEMORY(bytes)", "30Mi")), + new ContainerMetric( + "istio-init", ImmutableMap.of("CPU(cores)", "0m", "MEMORY(bytes)", "0Mi")))) + .build(); + + for (String pod : expectedMetrics.keys()) { + Optional podMetric = + podMetrics.stream() + .filter(metric -> metric.getPodName().equals(pod)) + .filter(metric -> metric.getNamespace().equals(NAMESPACE)) + .findAny(); + assertThat(podMetric.isPresent()).isTrue(); + assertThat(podMetric.get().getContainerMetrics()) + .containsExactlyInAnyOrderElementsOf(expectedMetrics.get(pod)); + } + } + + @DisplayName("test to verify how kubectl errors are handled when retries are disabled") + @Test + void kubectlJobExecutorErrorHandlingWhenRetriesAreDisabled() { + // when + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder() + .result(Result.FAILURE) + .output("") + .error("some error") + .build()); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + + // then + KubectlJobExecutor.KubectlException thrown = + assertThrows( + KubectlJobExecutor.KubectlException.class, + () -> kubectlJobExecutor.topPod(mockKubernetesCredentials(), "test", "")); + + assertTrue(thrown.getMessage().contains("some error")); + // should only be called once as no retries are performed for this error + verify(jobExecutor).runJob(any(JobRequest.class)); + } + + @DisplayName( + "parameterized test to verify retry behavior for configured retryable errors that fail even after all " + + "attempts are exhausted") + @ParameterizedTest( + name = "{index} ==> number of simultaneous executions of the action under test = {0}") + @ValueSource(ints = {1, 10}) + void kubectlRetryHandlingForConfiguredErrorsThatContinueFailingAfterMaxRetryAttempts( + int numberOfThreads) { + // setup + kubernetesConfigurationProperties.getJobExecutor().getRetries().setEnabled(true); + + // to test log messages + MemoryAppender memoryAppender = new MemoryAppender(KubectlJobExecutor.class); + + final ExecutorService executor = + Executors.newFixedThreadPool( + numberOfThreads, + new ThreadFactoryBuilder() + .setNameFormat(KubectlJobExecutorTest.class.getSimpleName() + "-%d") + .build()); + + final ArrayList>> futures = + new ArrayList<>(numberOfThreads); + + // when + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder() + .result(Result.FAILURE) + .output("") + .error("Unable to connect to the server: net/http: TLS handshake timeout") + .build()); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + + for (int i = 1; i <= numberOfThreads; i++) { + futures.add( + executor.submit( + () -> kubectlJobExecutor.topPod(mockKubernetesCredentials(), NAMESPACE, "test-pod"))); + } + + // then + for (Future> future : futures) { + try { + future.get(); + } catch (final ExecutionException e) { + assertTrue(e.getCause() instanceof KubectlJobExecutor.KubectlException); + assertTrue( + e.getMessage() + .contains("Unable to connect to the server: net/http: TLS handshake timeout")); + } catch (final InterruptedException ignored) { + } + } + + executor.shutdown(); + + // verify that the kubectl job executor made max configured attempts per thread to execute the + // action + verify( + jobExecutor, + times( + kubernetesConfigurationProperties.getJobExecutor().getRetries().getMaxAttempts() + * numberOfThreads)) + .runJob(any(JobRequest.class)); + + // verify retry registry + assertTrue(kubectlJobExecutor.getRetryRegistry().isPresent()); + RetryRegistry retryRegistry = kubectlJobExecutor.getRetryRegistry().get(); + assertThat(retryRegistry.getAllRetries().size()).isEqualTo(1); + assertThat(retryRegistry.getAllRetries().get(0).getName()).isEqualTo("mock-account"); + + // verify retry metrics + Retry.Metrics retryMetrics = retryRegistry.getAllRetries().get(0).getMetrics(); + assertThat(retryMetrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(0); + assertThat(retryMetrics.getNumberOfSuccessfulCallsWithoutRetryAttempt()).isEqualTo(0); + // in this test, all threads failed. So number of unique failed calls == 1 per thread. + assertThat(retryMetrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(numberOfThreads); + assertThat(retryMetrics.getNumberOfFailedCallsWithoutRetryAttempt()).isEqualTo(0); + + // verify that no duplicate messages are shown in the logs + List numberOfFailedRetryAttemptLogMessages = + memoryAppender.search( + "Kubectl command for mock-account failed after " + + kubernetesConfigurationProperties.getJobExecutor().getRetries().getMaxAttempts() + + " attempts. Exception: com.netflix.spinnaker.clouddriver.kubernetes.op." + + "job.KubectlJobExecutor$KubectlException: command: 'kubectl " + + "--request-timeout=0 --namespace=test-namespace top po test-pod " + + "--containers' in account: mock-account failed. Error: Unable to " + + "connect to the server: net/http: TLS handshake timeout", + Level.ERROR); + + // we should only see 1 failed retry attempt message per thread + assertThat(numberOfFailedRetryAttemptLogMessages.size()).isEqualTo(numberOfThreads); + } + + @DisplayName( + "parameterized test to verify retry behavior for errors that are not configured to be retryable") + @ParameterizedTest( + name = "{index} ==> number of simultaneous executions of the action under test = {0}") + @ValueSource(ints = {1, 10}) + void kubectlMultiThreadedRetryHandlingForErrorsThatAreNotConfiguredToBeRetryable( + int numberOfThreads) { + // setup + kubernetesConfigurationProperties.getJobExecutor().getRetries().setEnabled(true); + + // to test log messages + Logger logger = (Logger) LoggerFactory.getLogger(KubectlJobExecutor.class); + ListAppender listAppender = new ListAppender<>(); + listAppender.setContext((LoggerContext) LoggerFactory.getILoggerFactory()); + logger.addAppender(listAppender); + listAppender.start(); + + final ExecutorService executor = + Executors.newFixedThreadPool( + numberOfThreads, + new ThreadFactoryBuilder() + .setNameFormat(KubectlJobExecutorTest.class.getSimpleName() + "-%d") + .build()); + + final ArrayList>> futures = + new ArrayList<>(numberOfThreads); + + // when + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder() + .result(Result.FAILURE) + .output("") + .error("un-retryable error") + .build()); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + + for (int i = 1; i <= numberOfThreads; i++) { + futures.add( + executor.submit( + () -> kubectlJobExecutor.topPod(mockKubernetesCredentials(), NAMESPACE, "test-pod"))); + } + + // then + for (Future> future : futures) { + try { + future.get(); + } catch (final ExecutionException e) { + assertTrue(e.getCause() instanceof KubectlJobExecutor.KubectlException); + assertTrue(e.getMessage().contains("un-retryable error")); + } catch (final InterruptedException ignored) { + } + } + + executor.shutdown(); + + // verify that the kubectl job executor tried once to execute the action once per thread + verify(jobExecutor, times(numberOfThreads)).runJob(any(JobRequest.class)); + + // verify retry registry + assertTrue(kubectlJobExecutor.getRetryRegistry().isPresent()); + RetryRegistry retryRegistry = kubectlJobExecutor.getRetryRegistry().get(); + assertThat(retryRegistry.getAllRetries().size()).isEqualTo(1); + assertThat(retryRegistry.getAllRetries().get(0).getName()).isEqualTo("mock-account"); + + // verify retry metrics + Retry.Metrics retryMetrics = retryRegistry.getAllRetries().get(0).getMetrics(); + assertThat(retryMetrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(0); + assertThat(retryMetrics.getNumberOfSuccessfulCallsWithoutRetryAttempt()).isEqualTo(0); + assertThat(retryMetrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(0); + // in this test, all threads failed without retrying. So number of unique failed calls == 1 per + // thread. + assertThat(retryMetrics.getNumberOfFailedCallsWithoutRetryAttempt()).isEqualTo(numberOfThreads); + + // verify that no duplicate messages are shown in the logs + List logsList = listAppender.list; + List numberOfFailedRetryAttemptLogMessages = + logsList.stream() + .filter( + iLoggingEvent -> + iLoggingEvent + .getFormattedMessage() + .contains( + "Not retrying command: 'kubectl --request-timeout=0 --namespace=test-namespace" + + " top po test-pod --containers' in account: mock-account as retries are not" + + " enabled for error: un-retryable error")) + .collect(Collectors.toList()); + + // we should only see 1 failed retry attempt message per thread + assertThat(numberOfFailedRetryAttemptLogMessages.size()).isEqualTo(numberOfThreads); + } + + @Test + void kubectlRetryHandlingForConfiguredErrorsThatSucceedAfterAFewRetries() { + // setup + kubernetesConfigurationProperties.getJobExecutor().getRetries().setEnabled(true); + + // to test log messages + Logger logger = (Logger) LoggerFactory.getLogger(KubectlJobExecutor.class); + ListAppender listAppender = new ListAppender<>(); + listAppender.setContext((LoggerContext) LoggerFactory.getILoggerFactory()); + logger.addAppender(listAppender); + listAppender.start(); + + // when + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder() + .result(Result.FAILURE) + .output("") + .error("Unable to connect to the server: net/http: TLS handshake timeout") + .build()) + .thenReturn( + JobResult.builder() + .result(Result.SUCCESS) + .output(ManifestFetcher.getResource(KubectlJobExecutorTest.class, "top-pod.txt")) + .error("") + .build()); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + + Collection podMetrics = + kubectlJobExecutor.topPod(mockKubernetesCredentials(), NAMESPACE, "test-pod"); + + // then + + // job executor should be called twice - as it failed on the first call but succeeded + // in the second one + verify(jobExecutor, times(2)).runJob(any(JobRequest.class)); + + // verify retry registry + assertTrue(kubectlJobExecutor.getRetryRegistry().isPresent()); + RetryRegistry retryRegistry = kubectlJobExecutor.getRetryRegistry().get(); + assertThat(retryRegistry.getAllRetries().size()).isEqualTo(1); + assertThat(retryRegistry.getAllRetries().get(0).getName()).isEqualTo("mock-account"); + + // verify retry metrics + Retry.Metrics retryMetrics = retryRegistry.getAllRetries().get(0).getMetrics(); + // in this test, the action succeeded eventually. So number of unique calls == 1. + assertThat(retryMetrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(1); + assertThat(retryMetrics.getNumberOfSuccessfulCallsWithoutRetryAttempt()).isEqualTo(0); + assertThat(retryMetrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(0); + assertThat(retryMetrics.getNumberOfFailedCallsWithoutRetryAttempt()).isEqualTo(0); + + // verify that no duplicate messages are shown in the logs + List logsList = listAppender.list; + List numberOfSucceededRetryAttemptsLogMessages = + logsList.stream() + .filter( + iLoggingEvent -> + iLoggingEvent + .getFormattedMessage() + .contains( + "Kubectl command for mock-account is now successful in attempt #2. Last " + + "attempt had failed with exception: com.netflix.spinnaker.clouddriver" + + ".kubernetes.op.job.KubectlJobExecutor$KubectlException: command: " + + "'kubectl --request-timeout=0 --namespace=test-namespace top po test-pod" + + " --containers' in account: mock-account failed. Error: Unable to connect to" + + " the server: net/http: TLS handshake timeout")) + .collect(Collectors.toList()); + + // we should only see 1 succeeded retry attempt message + assertThat(numberOfSucceededRetryAttemptsLogMessages.size()).isEqualTo(1); + + // verify output of the command + assertThat(podMetrics).hasSize(2); + ImmutableSetMultimap expectedMetrics = + ImmutableSetMultimap.builder() + .putAll( + "spinnaker-io-nginx-v000-42gnq", + ImmutableList.of( + new ContainerMetric( + "spinnaker-github-io", + ImmutableMap.of("CPU(cores)", "0m", "MEMORY(bytes)", "2Mi")), + new ContainerMetric( + "istio-proxy", + ImmutableMap.of("CPU(cores)", "3m", "MEMORY(bytes)", "28Mi")), + new ContainerMetric( + "istio-init", ImmutableMap.of("CPU(cores)", "0m", "MEMORY(bytes)", "0Mi")))) + .putAll( + "spinnaker-io-nginx-v001-jvkgb", + ImmutableList.of( + new ContainerMetric( + "spinnaker-github-io", + ImmutableMap.of("CPU(cores)", "0m", "MEMORY(bytes)", "2Mi")), + new ContainerMetric( + "istio-proxy", + ImmutableMap.of("CPU(cores)", "32m", "MEMORY(bytes)", "30Mi")), + new ContainerMetric( + "istio-init", ImmutableMap.of("CPU(cores)", "0m", "MEMORY(bytes)", "0Mi")))) + .build(); + + for (String pod : expectedMetrics.keys()) { + Optional podMetric = + podMetrics.stream() + .filter(metric -> metric.getPodName().equals(pod)) + .filter(metric -> metric.getNamespace().equals(NAMESPACE)) + .findAny(); + assertThat(podMetric.isPresent()).isTrue(); + assertThat(podMetric.get().getContainerMetrics()) + .containsExactlyInAnyOrderElementsOf(expectedMetrics.get(pod)); + } + } + + @ParameterizedTest(name = "{index} ==> retries enabled = {0}") + @ValueSource(booleans = {true, false}) + void kubectlJobExecutorRaisesException(boolean retriesEnabled) { + when(jobExecutor.runJob(any(JobRequest.class))) + .thenThrow(new JobExecutionException("unknown exception", new IOException())); + + if (retriesEnabled) { + kubernetesConfigurationProperties.getJobExecutor().getRetries().setEnabled(true); + } + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + + JobExecutionException thrown = + assertThrows( + JobExecutionException.class, + () -> kubectlJobExecutor.topPod(mockKubernetesCredentials(), "test", "test-pod")); + + if (retriesEnabled) { + // should be called 3 times as there were max 3 attempts made + verify(jobExecutor, times(3)).runJob(any(JobRequest.class)); + } else { + verify(jobExecutor).runJob(any(JobRequest.class)); + } + + // at the end, with or without retries, the exception should still be thrown + assertTrue(thrown.getMessage().contains("unknown exception")); + } + + @DisplayName( + "test to verify that kubectl commands that read data from stdin can succeed in subsequent retry attempts") + @Test + void kubectlRetryHandlingForKubectlCallsThatUseStdinWhichSucceedAfterAFewRetries() { + // setup + kubernetesConfigurationProperties.getJobExecutor().getRetries().setEnabled(true); + + // fetch a test manifest + KubernetesManifest inputManifest = + ManifestFetcher.getManifest(KubectlJobExecutorTest.class, "job.yml").get(0); + + KubectlJobExecutor kubectlJobExecutor = + new TestScriptJobExecutor( + new JobExecutorLocal(/* timeoutMinutes */ 1), + kubernetesConfigurationProperties, + new SimpleMeterRegistry(), + TestScriptJobExecutor.RetryBehavior.SUCCESS_AFTER_INITIAL_FAILURE); + + // We are using a real job executor. Therefore, we can simulate the call `kubectl apply -f -` + // by substituting kubectl with a test script that accepts stdin + KubernetesManifest returnedManifest = + kubectlJobExecutor.deploy( + mockKubernetesCredentials( + "src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/mock-kubectl-stdin-command.sh"), + inputManifest, + new InMemoryTaskRepository().create("starting", "starting"), + "starting", + new KubernetesSelectorList()); + + // even after retries occur, the inputStream should not empty. This is verified by + // checking the stdout generated from the script + assertThat(returnedManifest.getFullResourceName()) + .isEqualTo(inputManifest.getFullResourceName()); + } + + @DisplayName( + "test to verify that kubectl commands that read data from stdin fail after all retries. In each retry attempt," + + " the input stream data should still be made available to the call") + @Test + void kubectlRetryHandlingForKubectlCallsThatUseStdinWhichContinueFailingAfterAllRetries() { + // setup + kubernetesConfigurationProperties.getJobExecutor().getRetries().setEnabled(true); + + // fetch a test manifest + KubernetesManifest inputManifest = + ManifestFetcher.getManifest(KubectlJobExecutorTest.class, "job.yml").get(0); + + KubectlJobExecutor kubectlJobExecutor = + new TestScriptJobExecutor( + new JobExecutorLocal(/* timeoutMinutes */ 1), + kubernetesConfigurationProperties, + new SimpleMeterRegistry(), + TestScriptJobExecutor.RetryBehavior.FAILED); + + // We are using a real job executor. Therefore, we can simulate the call `kubectl apply -f -` + // by substituting kubectl with a test script that accepts stdin + KubectlJobExecutor.KubectlException thrown = + assertThrows( + KubectlJobExecutor.KubectlException.class, + () -> + kubectlJobExecutor.deploy( + mockKubernetesCredentials( + "src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/mock-kubectl-stdin-command.sh"), + inputManifest, + new InMemoryTaskRepository().create("starting", "starting"), + "starting", + new KubernetesSelectorList())); + + assertThat(thrown.getMessage()).contains("Deploy failed for manifest: job my-job"); + // verify that the final error contained stdin data + assertThat(thrown.getMessage()).contains(new Gson().toJson(inputManifest)); + } + + @Test + void testDeployNoObjectsPassedToApplyNoLabelSelectors() { + // given + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder() + .result(Result.FAILURE) + .output("") + .error("error: no objects passed to apply") + .build()); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + + KubernetesManifest manifest = new KubernetesManifest(); + manifest.putAll( + Map.of( + "kind", + "Job", // arbitrary kind + "metadata", + Map.of("name", "my-name"))); + + KubernetesSelectorList labelSelectors = new KubernetesSelectorList(); + assertThat(labelSelectors.isEmpty()).isTrue(); + + // With no label selectors, expect deploy to throw a KubectlException because kubectl has failed + // (i.e. returned a non-zero exit code). + assertThatThrownBy( + () -> + kubectlJobExecutor.deploy( + mockKubernetesCredentials(), + manifest, + new InMemoryTaskRepository().create("task", "task"), + "operation", + labelSelectors)) + .isInstanceOf(KubectlJobExecutor.KubectlException.class) + .hasMessageContaining("Deploy failed for manifest:"); + } + + @Test + void testDeployNoObjectsPassedToApplyWithLabelSelectors() { + // given + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder() + .result(Result.FAILURE) + .output("") + .error("error: no objects passed to apply") + .build()); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + + KubernetesManifest manifest = new KubernetesManifest(); + manifest.putAll( + Map.of( + "kind", + "Job", // arbitrary kind + "metadata", + Map.of("name", "my-name"))); + + KubernetesSelectorList labelSelectors = new KubernetesSelectorList(); + KubernetesSelector selector = + new KubernetesSelector(KubernetesSelector.Kind.EQUALS, "some-key", List.of("some-value")); + labelSelectors.addSelector(selector); + assertThat(labelSelectors.isNotEmpty()).isTrue(); + + // With a label selectors, expect deploy to return null with the expectation + // that higher level code (e.g. KubernetesDeployManifestOperation) raises an + // exception if none of the deploy calls it makes result in kubectl actually + // deploying a manifest. + KubernetesManifest returnedManifest = + kubectlJobExecutor.deploy( + mockKubernetesCredentials(), + manifest, + new InMemoryTaskRepository().create("task", "task"), + "operation", + labelSelectors); + assertThat(returnedManifest).isNull(); + } + + @Test + void testCreateNoObjectsPassedToCreateWithLabelSelectors() { + // given + when(jobExecutor.runJob(any(JobRequest.class))) + .thenReturn( + JobResult.builder() + .result(Result.FAILURE) + .output("") + .error("error: no objects passed to create") + .build()); + + KubectlJobExecutor kubectlJobExecutor = + new KubectlJobExecutor( + jobExecutor, kubernetesConfigurationProperties, new SimpleMeterRegistry()); + + KubernetesManifest manifest = new KubernetesManifest(); + manifest.putAll( + Map.of( + "kind", + "Job", // arbitrary kind + "metadata", + Map.of("name", "my-name"))); + + KubernetesSelectorList labelSelectors = new KubernetesSelectorList(); + KubernetesSelector selector = + new KubernetesSelector(KubernetesSelector.Kind.EQUALS, "some-key", List.of("some-value")); + labelSelectors.addSelector(selector); + assertThat(labelSelectors.isNotEmpty()).isTrue(); + + // With a label selectors, expect create to return null with the + // expectation that higher level code + // (e.g. KubernetesDeployManifestOperation) raises an exception if none of + // the deploy calls it makes result in kubectl actually deploying a + // manifest. + KubernetesManifest returnedManifest = + kubectlJobExecutor.create( + mockKubernetesCredentials(), + manifest, + new InMemoryTaskRepository().create("task", "task"), + "operation", + labelSelectors); + assertThat(returnedManifest).isNull(); + } + + /** Returns a mock KubernetesCredentials object */ + private static KubernetesCredentials mockKubernetesCredentials() { + return mockKubernetesCredentials(""); + } + + /** + * Returns a mock KubernetesCredentials object which has a custom path set for the kubectl + * executable + */ + private static KubernetesCredentials mockKubernetesCredentials(String pathToExecutable) { + KubernetesCredentials credentials = mock(KubernetesCredentials.class); + when(credentials.getAccountName()).thenReturn("mock-account"); + when(credentials.getKubectlExecutable()).thenReturn(pathToExecutable); + return credentials; + } + + /** + * This is a helper class that is meant to execute a custom command instead of kubectl commands. + * Only meant to be used in tests where mocking certain kubectl calls prove to be tricky. This is + * currently used in tests that verify retry behavior for such calls. + */ + private static class TestScriptJobExecutor extends KubectlJobExecutor { + /** + * depending on the custom script provided, to simulate retry attempts, we need to let the + * script know when to emit an error message vs when to emit a success message. These enums help + * govern that + */ + private enum RetryBehavior { + SUCCESS_AFTER_INITIAL_FAILURE, + FAILED + } + + private final RetryBehavior retryBehavior; + + // this keeps a track of how many times has the createJobRequest() been invoked. + + private int createJobRequestInvokedCounter; + + TestScriptJobExecutor( + JobExecutor jobExecutor, + KubernetesConfigurationProperties kubernetesConfigurationProperties, + MeterRegistry meterRegistry, + RetryBehavior retryBehavior) { + + super(jobExecutor, kubernetesConfigurationProperties, meterRegistry); + this.retryBehavior = retryBehavior; + this.createJobRequestInvokedCounter = 1; + } + + @Override + JobRequest createJobRequest(List command, Optional manifest) { + // command[0] contains the path to the custom script. This path is read from the credentials + // object used for running the command. + // Note: CommandLine requires a File object containing the path to the script to be able to + // execute these scripts. This is different from running executables like kubectl. + CommandLine commandLine = new CommandLine(new File(command.get(0))); + + // this adds a special argument to the test script. The script can use this to decide at + // runtime if it needs to exit successfully or with a failure. + // This will be the first argument to the script + if (createJobRequestInvokedCounter > 1 + && retryBehavior == RetryBehavior.SUCCESS_AFTER_INITIAL_FAILURE) { + commandLine.addArgument("success"); + } + + createJobRequestInvokedCounter++; + + // update the command line to include all the other arguments to the script + for (int i = 1; i < command.size(); i++) { + commandLine.addArgument(command.get(i)); + } + + // depending on the presence of the manifest, an appropriate job request is created + if (manifest.isPresent()) { + String manifestAsJson = new Gson().toJson(manifest.get()); + return new JobRequest( + commandLine, new ByteArrayInputStream(manifestAsJson.getBytes(StandardCharsets.UTF_8))); + } + + return new JobRequest(commandLine, new ByteArrayInputStream(new byte[0])); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobOperationTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobOperationTest.java new file mode 100644 index 00000000000..5ff1f1a00e3 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/op/job/KubernetesRunJobOperationTest.java @@ -0,0 +1,192 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.op.job; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterators; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.kubernetes.KubernetesCloudProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.artifact.ResourceVersioner; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.ArtifactProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.ResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.job.KubernetesRunJobOperationDescription; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesManifestNamer; +import com.netflix.spinnaker.clouddriver.kubernetes.op.OperationResult; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesReplicaSetHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesServiceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesUnregisteredCustomResourceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.ManifestFetcher; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesSelectorList; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.moniker.Namer; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +final class KubernetesRunJobOperationTest { + private static final String NAMESPACE = "my-namespace"; + private static final String GENERATE_SUFFIX = "-abcd"; + private static final String DEPLOYED_JOB = "job my-job"; + private static final ResourcePropertyRegistry resourcePropertyRegistry = + new GlobalResourcePropertyRegistry( + ImmutableList.of(new KubernetesReplicaSetHandler(), new KubernetesServiceHandler()), + new KubernetesUnregisteredCustomResourceHandler()); + private static final Namer NAMER = new KubernetesManifestNamer(); + + @BeforeEach + void setTask() { + TaskRepository.threadLocalTask.set(new DefaultTask("task-id")); + } + + @Test + void deploysJobWithName() { + KubernetesRunJobOperationDescription runJobDescription = baseJobDescription("job.yml"); + OperationResult result = operate(runJobDescription); + + assertThat(result.getManifestNamesByNamespace()).containsOnlyKeys(NAMESPACE); + assertThat(result.getManifestNamesByNamespace().get(NAMESPACE)) + .containsExactlyInAnyOrder(DEPLOYED_JOB); + } + + @Test + void deploysJobWithGenerateName() { + KubernetesRunJobOperationDescription runJobDescription = + baseJobDescription("job-generate-name.yml"); + OperationResult result = operate(runJobDescription); + + assertThat(result.getManifestNamesByNamespace()).containsOnlyKeys(NAMESPACE); + assertThat(result.getManifestNamesByNamespace().get(NAMESPACE)) + .containsExactlyInAnyOrder(DEPLOYED_JOB + GENERATE_SUFFIX); + } + + @Test + void overridesNamespace() { + String overrideNamespace = "override-namespace"; + KubernetesRunJobOperationDescription runJobDescription = + baseJobDescription("job.yml").setNamespace(overrideNamespace); + OperationResult result = operate(runJobDescription); + + assertThat(result.getManifestNamesByNamespace()).containsOnlyKeys(overrideNamespace); + assertThat(result.getManifestNamesByNamespace().get(overrideNamespace)).hasSize(1); + String job = + Iterators.getOnlyElement( + result.getManifestNamesByNamespace().get(overrideNamespace).iterator()); + // In this test, we don't care whether a suffix was added, we're just checking that the job + // ended up in the right namespace, so we only check that the entry starts with the expected + // job name. + assertThat(job).startsWith(DEPLOYED_JOB); + } + + private static KubernetesRunJobOperationDescription baseJobDescription(String manifest) { + KubernetesRunJobOperationDescription runJobDescription = + new KubernetesRunJobOperationDescription() + .setManifest( + ManifestFetcher.getManifest(KubernetesRunJobOperationTest.class, manifest).get(0)); + runJobDescription.setCredentials(getNamedAccountCredentials()); + return runJobDescription; + } + + private static KubernetesNamedAccountCredentials getNamedAccountCredentials() { + ManagedAccount managedAccount = new ManagedAccount(); + managedAccount.setName("my-account"); + + NamerRegistry.lookup() + .withProvider(KubernetesCloudProvider.ID) + .withAccount(managedAccount.getName()) + .setNamer(KubernetesManifest.class, new KubernetesManifestNamer()); + + KubernetesCredentials mockCredentials = getMockKubernetesCredential(); + KubernetesCredentials.Factory credentialFactory = mock(KubernetesCredentials.Factory.class); + when(credentialFactory.build(managedAccount)).thenReturn(mockCredentials); + return new KubernetesNamedAccountCredentials(managedAccount, credentialFactory); + } + + private static KubernetesCredentials getMockKubernetesCredential() { + KubernetesCredentials credentialsMock = mock(KubernetesCredentials.class); + when(credentialsMock.getKindProperties(any(KubernetesKind.class))) + .thenAnswer( + invocation -> + KubernetesKindProperties.withDefaultProperties( + invocation.getArgument(0, KubernetesKind.class))); + when(credentialsMock.getResourcePropertyRegistry()).thenReturn(resourcePropertyRegistry); + when(credentialsMock.deploy( + any(KubernetesManifest.class), + any(Task.class), + anyString(), + any(KubernetesSelectorList.class))) + .thenAnswer( + invocation -> { + KubernetesManifest result = + invocation.getArgument(0, KubernetesManifest.class).clone(); + if (Strings.isNullOrEmpty(result.getName())) { + // We can't apply if there is no name; throw an exception here + throw new KubectlJobExecutor.KubectlException( + "error: error when retrieving current configuration"); + } + return result; + }); + when(credentialsMock.create( + any(KubernetesManifest.class), + any(Task.class), + anyString(), + any(KubernetesSelectorList.class))) + .thenAnswer( + invocation -> { + // This simulates the fact that the Kubernetes API will add a suffix to a generated + // name. + KubernetesManifest result = + invocation.getArgument(0, KubernetesManifest.class).clone(); + if (Strings.isNullOrEmpty(result.getName())) { + Map metadata = (Map) result.get("metadata"); + metadata.put("name", metadata.get("generateName") + GENERATE_SUFFIX); + } + return result; + }); + when(credentialsMock.getNamer()).thenReturn(NAMER); + return credentialsMock; + } + + private static OperationResult operate(KubernetesRunJobOperationDescription description) { + ArtifactProvider artifactProvider = mock(ArtifactProvider.class); + when(artifactProvider.getArtifacts( + any(KubernetesKind.class), + any(String.class), + any(String.class), + any(KubernetesCredentials.class))) + .thenReturn(ImmutableList.of()); + ResourceVersioner resourceVersioner = new ResourceVersioner(artifactProvider); + return new KubernetesRunJobOperation(description, resourceVersioner) + .operate(ImmutableList.of()); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProviderTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProviderTest.java new file mode 100644 index 00000000000..5d31683f274 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/KubernetesJobProviderTest.java @@ -0,0 +1,179 @@ +/* + * Copyright 2021 Salesforce.com, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.provider.view; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.io.Resources; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.model.KubernetesManifestContainer; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.view.provider.KubernetesManifestProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.model.KubernetesJobStatus; +import com.netflix.spinnaker.clouddriver.kubernetes.security.KubernetesCredentials; +import com.netflix.spinnaker.clouddriver.model.JobState; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import io.kubernetes.client.util.Yaml; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +class KubernetesJobProviderTest { + KubernetesManifestProvider mockManifestProvider; + AccountCredentialsProvider credentialsProvider; + AccountCredentials accountCredentials; + KubernetesCredentials mockCredentials; + + @BeforeEach + public void setup() { + mockManifestProvider = mock(KubernetesManifestProvider.class); + credentialsProvider = mock(AccountCredentialsProvider.class); + accountCredentials = mock(AccountCredentials.class); + mockCredentials = mock(KubernetesCredentials.class); + + doReturn(mockCredentials).when(accountCredentials).getCredentials(); + doReturn(accountCredentials).when(credentialsProvider).getCredentials(anyString()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testFailedJobWithContainerLogsAvailable(boolean detailedPodStatus) { + // setup + KubernetesManifest testManifest = + Yaml.loadAs(getResource("base-with-completions.yml"), KubernetesManifest.class); + KubernetesManifest overlay = + Yaml.loadAs(getResource("failed-job.yml"), KubernetesManifest.class); + testManifest.putAll(overlay); + + doReturn( + KubernetesManifestContainer.builder() + .account("mock_account") + .name("a") + .manifest(testManifest) + .build()) + .when(mockManifestProvider) + .getManifest(anyString(), anyString(), anyString(), anyBoolean()); + + doReturn(ImmutableList.of(testManifest)).when(mockCredentials).list(any(), isNull(), any()); + + // when + KubernetesJobProvider kubernetesJobProvider = + new KubernetesJobProvider(credentialsProvider, mockManifestProvider, detailedPodStatus); + KubernetesJobStatus jobStatus = kubernetesJobProvider.collectJob("mock_account", "a", "b"); + + // then + assertNotNull(jobStatus.getJobState()); + assertEquals(/* expected= */ JobState.Failed, /* actual= */ jobStatus.getJobState()); + + assertThat(jobStatus.getMessage()).isEqualTo("Job has reached the specified backoff limit"); + assertThat(jobStatus.getReason()).isEqualTo("BackoffLimitExceeded"); + + if (detailedPodStatus) { + assertThat(jobStatus.getPods().size()).isEqualTo(1); + } else { + assertThat(jobStatus.getPods()).isEmpty(); + } + + assertThat(jobStatus.getFailureDetails()) + .isEqualTo( + "Pod: 'hello' had errors.\n" + + " Container: 'some-container-name' exited with code: 1.\n" + + " Status: Error.\n" + + " Logs: Failed to download the file: foo.\n" + + "GET Request failed with status code', 404, 'Expected', )\n"); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testFailedJobWithoutContainerLogs(boolean detailedPodStatus) { + // setup + KubernetesManifest testManifest = + Yaml.loadAs(getResource("base-with-completions.yml"), KubernetesManifest.class); + KubernetesManifest overlay = + Yaml.loadAs(getResource("runjob-deadline-exceeded.yml"), KubernetesManifest.class); + testManifest.putAll(overlay); + doReturn( + KubernetesManifestContainer.builder() + .account("mock_account") + .name("a") + .manifest(testManifest) + .build()) + .when(mockManifestProvider) + .getManifest(anyString(), anyString(), anyString(), anyBoolean()); + doReturn(ImmutableList.of(testManifest)).when(mockCredentials).list(any(), isNull(), any()); + + // when + KubernetesJobProvider kubernetesJobProvider = + new KubernetesJobProvider(credentialsProvider, mockManifestProvider, detailedPodStatus); + KubernetesJobStatus jobStatus = kubernetesJobProvider.collectJob("mock_account", "a", "b"); + + // then + assertNotNull(jobStatus.getJobState()); + assertEquals(/* expected= */ JobState.Failed, /* actual= */ jobStatus.getJobState()); + + assertThat(jobStatus.getMessage()).isEqualTo("Job was active longer than specified deadline"); + assertThat(jobStatus.getReason()).isEqualTo("DeadlineExceeded"); + + assertNull(jobStatus.getFailureDetails()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testCollectJobWithoutPod(boolean detailedPodStatus) { + // setup + KubernetesManifest testManifest = + Yaml.loadAs(getResource("base-with-completions.yml"), KubernetesManifest.class); + doReturn( + KubernetesManifestContainer.builder() + .account("mock_account") + .name("a") + .manifest(testManifest) + .build()) + .when(mockManifestProvider) + .getManifest(anyString(), anyString(), anyString(), anyBoolean()); + doReturn(ImmutableList.of()).when(mockCredentials).list(any(), isNull(), any()); + + KubernetesJobProvider kubernetesJobProvider = + new KubernetesJobProvider(credentialsProvider, mockManifestProvider, detailedPodStatus); + + assertDoesNotThrow(() -> kubernetesJobProvider.collectJob("mock_account", "location", "id")); + } + + private String getResource(String name) { + try { + return Resources.toString( + KubernetesJobProviderTest.class.getResource(name), StandardCharsets.UTF_8); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsLifecycleHandlerTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsLifecycleHandlerTest.java new file mode 100644 index 00000000000..3bbda9be0ff --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsLifecycleHandlerTest.java @@ -0,0 +1,123 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.KubernetesProvider; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgent; +import com.netflix.spinnaker.clouddriver.kubernetes.caching.agent.KubernetesCachingAgentDispatcher; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesConfigurationProperties; +import java.util.Collections; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.ArgumentMatchers; + +public class KubernetesCredentialsLifecycleHandlerTest { + KubernetesProvider provider; + KubernetesCachingAgentDispatcher cachingAgentDispatcher; + KubernetesNamedAccountCredentials namedCredentials; + KubernetesCredentials kubernetesCredentials; + KubernetesConfigurationProperties kubernetesConfigurationProperties; + + @BeforeEach + void setup() { + provider = new KubernetesProvider(); + cachingAgentDispatcher = mock(KubernetesCachingAgentDispatcher.class); + namedCredentials = mock(KubernetesNamedAccountCredentials.class); + kubernetesCredentials = mock(KubernetesCredentials.class); + kubernetesConfigurationProperties = new KubernetesConfigurationProperties(); + + when(namedCredentials.getCredentials()).thenReturn(kubernetesCredentials); + } + + @DisplayName( + "parameterized test to see how loadNamespacesInAccount config property works when adding credentials") + @ParameterizedTest(name = "{index} => loadNamespacesInAccount = {0}") + @ValueSource(booleans = {true, false}) + public void testAddCredentials(boolean loadNamespacesInAccount) { + // setup: + when(cachingAgentDispatcher.buildAllCachingAgents(ArgumentMatchers.any())) + .thenAnswer(d -> Collections.singleton(mock(KubernetesCachingAgent.class))); + kubernetesConfigurationProperties.setLoadNamespacesInAccount(loadNamespacesInAccount); + KubernetesCredentialsLifecycleHandler handler = + new KubernetesCredentialsLifecycleHandler( + provider, cachingAgentDispatcher, kubernetesConfigurationProperties); + + // Check we start with no agents + assertThat(provider.getAgents()).isEmpty(); + when(kubernetesCredentials.getDeclaredNamespaces()).thenReturn(ImmutableList.of()); + + // when: + handler.credentialsAdded(namedCredentials); + + // then: + if (loadNamespacesInAccount) { + verify(kubernetesCredentials, times(1)).getDeclaredNamespaces(); + } else { + verify(kubernetesCredentials, never()).getDeclaredNamespaces(); + } + // We should have added an agent + assertThat(provider.getAgents()).hasSize(1); + + // when: + handler.credentialsAdded(namedCredentials); + + // then: + // We should have yet another one + assertThat(provider.getAgents()).hasSize(2); + } + + @Test + public void testRemoveCredentials() { + String ACCOUNT1 = "account1"; + String ACCOUNT2 = "account2"; + + KubernetesCachingAgent agent1 = mock(KubernetesCachingAgent.class); + when(agent1.handlesAccount(ACCOUNT1)).thenReturn(true); + + KubernetesCachingAgent agent2 = mock(KubernetesCachingAgent.class); + when(agent1.handlesAccount(ACCOUNT2)).thenReturn(true); + + provider.addAgents(List.of(agent1, agent2)); + + KubernetesCredentialsLifecycleHandler handler = + new KubernetesCredentialsLifecycleHandler( + provider, null, kubernetesConfigurationProperties); + + assertThat(provider.getAgents()).hasSize(2); + + KubernetesNamedAccountCredentials cred1 = mock(KubernetesNamedAccountCredentials.class); + when(cred1.getName()).thenReturn(ACCOUNT1); + handler.credentialsDeleted(cred1); + + // We removed account1 so only agent2 should remain + assertThat(provider.getAgents()).hasSize(1); + assertThat(provider.getAgents()).contains(agent2); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsTest.java new file mode 100644 index 00000000000..d87c5747845 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesCredentialsTest.java @@ -0,0 +1,406 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import com.google.gson.JsonSyntaxException; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.ManualClock; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.Tag; +import com.netflix.spectator.api.Timer; +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.kubernetes.config.KubernetesAccountProperties.ManagedAccount; +import com.netflix.spinnaker.clouddriver.kubernetes.description.AccountResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.GlobalResourcePropertyRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.description.KubernetesSpinnakerKindMap; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesManifest; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesManifestNamer; +import com.netflix.spinnaker.clouddriver.kubernetes.names.KubernetesNamerRegistry; +import com.netflix.spinnaker.clouddriver.kubernetes.op.handler.KubernetesUnregisteredCustomResourceHandler; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor.KubectlException; +import com.netflix.spinnaker.clouddriver.kubernetes.op.job.KubectlJobExecutor.KubectlNotFoundException; +import com.netflix.spinnaker.kork.configserver.CloudConfigResourceService; +import com.netflix.spinnaker.kork.configserver.ConfigFileService; +import java.util.HashMap; +import org.junit.jupiter.api.Test; + +final class KubernetesCredentialsTest { + private static final String ACCOUNT_NAME = "my-account"; + private static final String DEPLOYMENT_NAME = "my-deployment"; + private static final String NAMESPACE = "my-namespace"; + private final String OP_NAME = "KubernetesCredentialsTest"; + private final Task task = new DefaultTask("task-id"); + + private KubernetesCredentials getCredentials(Registry registry, KubectlJobExecutor jobExecutor) { + KubernetesCredentials.Factory factory = + new KubernetesCredentials.Factory( + registry, + new KubernetesNamerRegistry(ImmutableList.of(new KubernetesManifestNamer())), + jobExecutor, + new ConfigFileService(new CloudConfigResourceService()), + new AccountResourcePropertyRegistry.Factory( + new GlobalResourcePropertyRegistry( + ImmutableList.of(), new KubernetesUnregisteredCustomResourceHandler())), + new KubernetesKindRegistry.Factory( + new GlobalKubernetesKindRegistry(ImmutableList.of())), + new KubernetesSpinnakerKindMap(ImmutableList.of()), + new GlobalResourcePropertyRegistry( + ImmutableList.of(), new KubernetesUnregisteredCustomResourceHandler())); + ManagedAccount managedAccount = new ManagedAccount(); + managedAccount.setName("my-account"); + return factory.build(managedAccount); + } + + private KubernetesManifest getManifest() { + KubernetesManifest manifest = new KubernetesManifest(); + manifest.put("metadata", new HashMap<>()); + manifest.setName(DEPLOYMENT_NAME); + manifest.setNamespace(NAMESPACE); + manifest.setKind(KubernetesKind.DEPLOYMENT); + return manifest; + } + + @Test + void metricTagsForSuccessfulDeploy() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + credentials.deploy(getManifest(), task, OP_NAME, new KubernetesSelectorList()); + + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + assertThat(timer.id().tags()) + .containsExactlyInAnyOrder( + Tag.of("account", ACCOUNT_NAME), + Tag.of("action", "deploy"), + Tag.of("kinds", KubernetesKind.DEPLOYMENT.toString()), + Tag.of("namespace", NAMESPACE), + Tag.of("success", "true")); + } + + @Test + void metricTagsForSuccessfulList() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), NAMESPACE); + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + + assertThat(timer.id().tags()) + .containsExactlyInAnyOrder( + Tag.of("account", ACCOUNT_NAME), + Tag.of("action", "list"), + Tag.of("kinds", "deployment,replicaSet"), + Tag.of("namespace", NAMESPACE), + Tag.of("success", "true")); + } + + @Test + void metricTagsForSuccessfulListNoNamespace() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + credentials.list(ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), null); + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + + assertThat(timer.id().tags()).contains(Tag.of("namespace", "none")); + } + + @Test + void metricTagsForSuccessfulListEmptyNamespace() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + credentials.list(ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), ""); + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + + assertThat(timer.id().tags()).contains(Tag.of("namespace", "none")); + } + + @Test + void returnValueForSuccessfulList() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + + KubernetesManifest manifest = getManifest(); + when(jobExecutor.list(eq(credentials), any(), any(), any())) + .thenReturn(ImmutableList.of(manifest)); + ImmutableList result = + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), NAMESPACE); + assertThat(result).containsExactly(manifest); + } + + @Test + void timeRecordedForSuccessfulList() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + + ManualClock clock = new ManualClock(); + Registry registry = new DefaultRegistry(clock); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + + clock.setMonotonicTime(1000); + when(jobExecutor.list(eq(credentials), any(), any(), any())) + .then( + call -> { + clock.setMonotonicTime(1500); + return ImmutableList.of(); + }); + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), NAMESPACE); + + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + assertThat(timer.totalTime()).isEqualTo(500); + } + + @Test + void metricTagsForListThrowingKubectlException() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + + when(jobExecutor.list(eq(credentials), any(), any(), any())) + .thenThrow( + new KubectlException( + "Failed to parse kubectl output: failure", new JsonSyntaxException("failure"))); + + assertThatThrownBy( + () -> + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), + NAMESPACE)) + .isInstanceOf(KubectlException.class); + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + + assertThat(timer.id().tags()) + .containsExactlyInAnyOrder( + Tag.of("account", ACCOUNT_NAME), + Tag.of("action", "list"), + Tag.of("kinds", "deployment,replicaSet"), + Tag.of("namespace", NAMESPACE), + Tag.of("success", "false"), + Tag.of("reason", "KubectlException")); + } + + @Test + void propagatedExceptionForListThrowingKubectlException() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + + KubectlException exception = + new KubectlException( + "Failed to parse kubectl output: failure", new JsonSyntaxException("failure")); + when(jobExecutor.list(eq(credentials), any(), any(), any())).thenThrow(exception); + + // Assert that a KubectlException is passed through without modification + assertThatThrownBy( + () -> + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), + NAMESPACE)) + .isEqualTo(exception); + } + + @Test + void timeRecordedForListThrowingKubectlException() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + + ManualClock clock = new ManualClock(); + Registry registry = new DefaultRegistry(clock); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + + clock.setMonotonicTime(1000); + when(jobExecutor.list(eq(credentials), any(), any(), any())) + .then( + call -> { + clock.setMonotonicTime(1500); + throw new KubectlException( + "Failed to parse kubectl output: failure", new JsonSyntaxException("failure")); + }); + assertThatThrownBy( + () -> + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), + NAMESPACE)) + .isInstanceOf(KubectlException.class); + + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + assertThat(timer.totalTime()).isEqualTo(500); + } + + @Test + void metricTagsForListThrowingOtherException() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + + when(jobExecutor.list(eq(credentials), any(), any(), any())) + .thenThrow(new CustomException("Kubernetes error")); + + assertThatThrownBy( + () -> + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), + NAMESPACE)) + .isInstanceOf(CustomException.class); + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + + assertThat(timer.id().tags()) + .containsExactlyInAnyOrder( + Tag.of("account", ACCOUNT_NAME), + Tag.of("action", "list"), + Tag.of("kinds", "deployment,replicaSet"), + Tag.of("namespace", NAMESPACE), + Tag.of("success", "false"), + Tag.of("reason", "CustomException")); + } + + @Test + void timeRecordedForListThrowingOtherException() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + + ManualClock clock = new ManualClock(); + Registry registry = new DefaultRegistry(clock); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + + clock.setMonotonicTime(1000); + when(jobExecutor.list(eq(credentials), any(), any(), any())) + .then( + call -> { + clock.setMonotonicTime(1500); + throw new CustomException("Kubernetes errror"); + }); + assertThatThrownBy( + () -> + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), + NAMESPACE)) + .isInstanceOf(CustomException.class); + + ImmutableList timers = registry.timers().collect(toImmutableList()); + assertThat(timers).hasSize(1); + + Timer timer = timers.get(0); + assertThat(timer.id().name()).isEqualTo("kubernetes.api"); + assertThat(timer.totalTime()).isEqualTo(500); + } + + @Test + void propagatedExceptionForListThrowingOtherException() { + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + Registry registry = new DefaultRegistry(); + KubernetesCredentials credentials = getCredentials(registry, jobExecutor); + + Exception cause = new CustomException("Kubernetes error"); + when(jobExecutor.list(eq(credentials), any(), any(), any())).thenThrow(cause); + + // Assert that the source exception is wrapped in a KubectlException with details about the call + // that failed + assertThatThrownBy( + () -> + credentials.list( + ImmutableList.of(KubernetesKind.DEPLOYMENT, KubernetesKind.REPLICA_SET), + NAMESPACE)) + .isEqualTo(cause); + } + + @Test + void replaceWhenResourceExists() { + KubernetesManifest manifest = getManifest(); + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + KubernetesCredentials credentials = getCredentials(new NoopRegistry(), jobExecutor); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(jobExecutor.create(credentials, manifest, task, OP_NAME, selectorList)) + .thenThrow(new KubectlException("Create failed: Error from server (AlreadyExists)")); + when(jobExecutor.replace(credentials, manifest, task, OP_NAME)).thenReturn(manifest); + + KubernetesManifest result = credentials.createOrReplace(getManifest(), task, OP_NAME); + assertThat(result).isEqualTo(manifest); + } + + @Test + void replaceWhenResourceDoesNotExist() { + KubernetesManifest manifest = getManifest(); + KubectlJobExecutor jobExecutor = mock(KubectlJobExecutor.class); + KubernetesCredentials credentials = getCredentials(new NoopRegistry(), jobExecutor); + KubernetesSelectorList selectorList = new KubernetesSelectorList(); + when(jobExecutor.replace(credentials, manifest, task, OP_NAME)) + .thenThrow(new KubectlNotFoundException("Not found")); + when(jobExecutor.create(credentials, manifest, task, OP_NAME, selectorList)) + .thenReturn(manifest); + + KubernetesManifest result = credentials.createOrReplace(getManifest(), task, OP_NAME); + assertThat(result).isEqualTo(manifest); + } + + // This is an error type that will only ever be thrown by stubs in this test; that way we can + // assert that it is thrown and be sure that we aren't accidentally passing due to an unrelated + // exception. + private static class CustomException extends RuntimeException { + CustomException(String message) { + super(message); + } + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesKindRegistryTest.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesKindRegistryTest.java new file mode 100644 index 00000000000..a1c8c1b7f6c --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/KubernetesKindRegistryTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2020 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesApiGroup; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKind; +import com.netflix.spinnaker.clouddriver.kubernetes.description.manifest.KubernetesKindProperties; +import java.util.Collection; +import java.util.Optional; +import org.junit.jupiter.api.Test; + +final class KubernetesKindRegistryTest { + private static final KubernetesApiGroup CUSTOM_API_GROUP = KubernetesApiGroup.fromString("test"); + private static final KubernetesKind CUSTOM_KIND = + KubernetesKind.from("customKind", CUSTOM_API_GROUP); + private static final KubernetesKindProperties CUSTOM_KIND_PROPERTIES = + KubernetesKindProperties.create(CUSTOM_KIND, true); + private static final KubernetesKindProperties REPLICA_SET_PROPERTIES = + KubernetesKindProperties.create(KubernetesKind.REPLICA_SET, true); + + private KubernetesKindRegistry.Factory getFactory( + Collection globalKinds) { + return new KubernetesKindRegistry.Factory(new GlobalKubernetesKindRegistry(globalKinds)); + } + + @Test + void getKindProperties() { + KubernetesKindRegistry kindRegistry = getFactory(ImmutableList.of()).create(); + assertThat(kindRegistry.getKindPropertiesOrDefault(CUSTOM_KIND)) + .isEqualTo(CUSTOM_KIND_PROPERTIES); + } + + @Test + void getKindPropertiesFallsBackToGlobal() { + KubernetesKindRegistry kindRegistry = + getFactory(ImmutableList.of(CUSTOM_KIND_PROPERTIES)).create(); + assertThat(kindRegistry.getKindPropertiesOrDefault(CUSTOM_KIND)) + .isEqualTo(CUSTOM_KIND_PROPERTIES); + } + + @Test + void getKindPropertiesFallsBackToDefault() { + KubernetesKindRegistry kindRegistry = getFactory(ImmutableList.of()).create(); + assertThat(kindRegistry.getKindPropertiesOrDefault(CUSTOM_KIND)) + .isEqualTo(KubernetesKindProperties.withDefaultProperties(CUSTOM_KIND)); + } + + @Test + void getKindPropertiesLooksUpCrd() { + KubernetesKindProperties customProperties = KubernetesKindProperties.create(CUSTOM_KIND, false); + KubernetesKindRegistry kindRegistry = + getFactory(ImmutableList.of()) + .create(k -> Optional.of(customProperties), ImmutableList.of()); + assertThat(kindRegistry.getKindPropertiesOrDefault(CUSTOM_KIND)).isEqualTo(customProperties); + } + + @Test + void emptyCRDLookupFallsBackToDefault() { + KubernetesKindRegistry kindRegistry = + getFactory(ImmutableList.of()).create(k -> Optional.empty(), ImmutableList.of()); + assertThat(kindRegistry.getKindPropertiesOrDefault(CUSTOM_KIND)) + .isEqualTo(KubernetesKindProperties.withDefaultProperties(CUSTOM_KIND)); + } + + @Test + void isKindRegisteredFalseForUnregisteredKind() { + KubernetesKindRegistry kindRegistry = + getFactory(ImmutableList.of()).create(k -> Optional.empty(), ImmutableList.of()); + assertThat(kindRegistry.isKindRegistered(CUSTOM_KIND)).isFalse(); + } + + @Test + void isKindRegisteredTrueForGlobalKind() { + KubernetesKindRegistry kindRegistry = + getFactory(ImmutableList.of(REPLICA_SET_PROPERTIES)) + .create(k -> Optional.empty(), ImmutableList.of()); + assertThat(kindRegistry.isKindRegistered(KubernetesKind.REPLICA_SET)).isTrue(); + } + + @Test + void isKindRegisteredTrueForRegisteredKind() { + KubernetesKindRegistry kindRegistry = + getFactory(ImmutableList.of()) + .create(k -> Optional.empty(), ImmutableList.of(CUSTOM_KIND_PROPERTIES)); + assertThat(kindRegistry.isKindRegistered(CUSTOM_KIND)).isTrue(); + } + + @Test + void isKindRegisteredTrueForSuccessfulCRDLookup() { + KubernetesKindRegistry kindRegistry = + getFactory(ImmutableList.of()).create(k -> Optional.empty(), ImmutableList.of()); + assertThat(kindRegistry.isKindRegistered(CUSTOM_KIND)).isFalse(); + } + + @Test + void getGlobalKinds() { + KubernetesKindProperties customProperties = KubernetesKindProperties.create(CUSTOM_KIND, false); + KubernetesKindRegistry kindRegistry = + getFactory(ImmutableList.of()) + .create(k -> Optional.of(customProperties), ImmutableList.of()); + assertThat(kindRegistry.isKindRegistered(CUSTOM_KIND)).isTrue(); + } +} diff --git a/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/NoopCredentialsLifecycleHandler.java b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/NoopCredentialsLifecycleHandler.java new file mode 100644 index 00000000000..04ea71a1c91 --- /dev/null +++ b/clouddriver-kubernetes/src/test/java/com/netflix/spinnaker/clouddriver/kubernetes/security/NoopCredentialsLifecycleHandler.java @@ -0,0 +1,32 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.kubernetes.security; + +import com.netflix.spinnaker.credentials.CredentialsLifecycleHandler; + +public class NoopCredentialsLifecycleHandler + implements CredentialsLifecycleHandler { + @Override + public void credentialsAdded(KubernetesNamedAccountCredentials credentials) {} + + @Override + public void credentialsUpdated(KubernetesNamedAccountCredentials credentials) {} + + @Override + public void credentialsDeleted(KubernetesNamedAccountCredentials credentials) {} +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/applications-response-from-front50.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/applications-response-from-front50.json new file mode 100644 index 00000000000..6e32c3551c3 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/agent/applications-response-from-front50.json @@ -0,0 +1,26 @@ +[ + { + "cloudProviders": null, + "createTs": null, + "description": null, + "email": "test@abc.com", + "instancePort": 80, + "lastModifiedBy": "test@abc.com", + "name": "MY", + "permissions": {}, + "trafficGuards": [], + "updateTs": "1627312672356" + }, + { + "cloudProviders": null, + "createTs": null, + "description": null, + "email": "test1@abc.com", + "instancePort": 80, + "lastModifiedBy": "test1@abc.com", + "name": "APP2", + "permissions": {}, + "trafficGuards": [], + "updateTs": "1627312672359" + } +] diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-pod-014.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-pod-014.yml new file mode 100644 index 00000000000..ed9bbce1750 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-pod-014.yml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + artifact.spinnaker.io/location: backend-ns + artifact.spinnaker.io/name: backend + artifact.spinnaker.io/type: kubernetes/replicaSet + artifact.spinnaker.io/version: v014 + moniker.spinnaker.io/application: backendapp + moniker.spinnaker.io/cluster: replicaSet backend + moniker.spinnaker.io/sequence: "14" + creationTimestamp: "2020-07-24T14:08:00Z" + generateName: backend-v014- + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: backendapp + moniker.spinnaker.io/sequence: "14" + name: backend-v014-xkvwh + namespace: backend-ns + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: backend-v014 + uid: ded56bd9-2034-4196-a7e4-b6b736c997ba + resourceVersion: "83985048" + selfLink: /api/v1/namespaces/backend-ns/pods/backend-v014-xkvwh + uid: d05606fe-aa69-4f16-b56a-371c2313fe9c +spec: + containers: + - image: gcr.io/my-gcr-repository/backend-service@sha256:2eefbb528a4619311555f92ea9b781af101c62f4c70b73c4a5e93d15624ba94c + imagePullPolicy: IfNotPresent + name: backend-service + ports: + - containerPort: 4000 + protocol: TCP + resources: + requests: + cpu: 10m + memory: 8Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: [] + dnsPolicy: ClusterFirst + enableServiceLinks: true + initContainers: [] + nodeName: gke-spinnaker-e2-small-c528c905-f1ub + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: [] + volumes: [] +status: + conditions: + - lastProbeTime: null + lastTransitionTime: "2020-07-24T14:08:11Z" + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: "2020-07-24T14:08:25Z" + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: "2020-07-24T14:08:25Z" + status: "True" + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: "2020-07-24T14:08:00Z" + status: "True" + type: PodScheduled + containerStatuses: + - containerID: containerd://ab3d6b767a3dbb4524897ff8f6af035e2cfed8a58aa1451869e4377ee0489fa9 + image: sha256:6146cbec26fd547a5975fb6a48c860455a13a50bc9a61c398c8bd0b41af8dbe7 + imageID: gcr.io/my-gcr-repository/backend-service@sha256:2eefbb528a4619311555f92ea9b781af101c62f4c70b73c4a5e93d15624ba94c + lastState: {} + name: backend-service + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2020-07-24T14:08:23Z" + hostIP: 10.128.0.25 + initContainerStatuses: [] + phase: Running + podIP: 10.52.2.9 + podIPs: + - ip: 10.52.2.9 + qosClass: Burstable + startTime: "2020-07-24T14:08:00Z" diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-pod-015.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-pod-015.yml new file mode 100644 index 00000000000..cf83215a503 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-pod-015.yml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + artifact.spinnaker.io/location: backend-ns + artifact.spinnaker.io/name: backend + artifact.spinnaker.io/type: kubernetes/replicaSet + artifact.spinnaker.io/version: v015 + moniker.spinnaker.io/application: backendapp + moniker.spinnaker.io/cluster: replicaSet backend + moniker.spinnaker.io/sequence: "15" + creationTimestamp: "2020-07-24T17:59:52Z" + generateName: backend-v015- + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: backendapp + load-balancer: backend + moniker.spinnaker.io/sequence: "15" + name: backend-v015-vhglj + namespace: backend-ns + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: backend-v015 + uid: 518fdd80-8949-47c4-806e-1fd3ac1e1d3c + resourceVersion: "83984595" + selfLink: /api/v1/namespaces/backend-ns/pods/backend-v015-vhglj + uid: 45db7673-e3d2-4746-9ecd-38f868f853e5 +spec: + containers: + - image: gcr.io/my-gcr-repository/backend-service@sha256:51f29a570a484fbae4da912199ff27ed21f91b1caf51564a9d3afe3a201c1f32 + imagePullPolicy: IfNotPresent + name: backend-service + ports: + - containerPort: 4000 + protocol: TCP + resources: + requests: + cpu: 10m + memory: 8Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: [] + dnsPolicy: ClusterFirst + enableServiceLinks: true + initContainers: [] + nodeName: gke-spinnaker-e2-small-c528c905-w20h + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: [] + volumes: [] +status: + conditions: + - lastProbeTime: null + lastTransitionTime: "2020-07-24T17:59:56Z" + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:00:07Z" + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:00:07Z" + status: "True" + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: "2020-07-24T17:59:52Z" + status: "True" + type: PodScheduled + containerStatuses: + - containerID: containerd://a003e133b2b7d9e72dc2276776274f299e2267ce718d7493e5710bcfe68040dc + image: sha256:8dc352f819381bfb316dc470a30515e8538aace729e456c63eba775da7c5edf6 + imageID: gcr.io/my-gcr-repository/backend-service@sha256:51f29a570a484fbae4da912199ff27ed21f91b1caf51564a9d3afe3a201c1f32 + lastState: {} + name: backend-service + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2020-07-24T18:00:05Z" + hostIP: 10.128.0.14 + initContainerStatuses: [] + phase: Running + podIP: 10.52.1.15 + podIPs: + - ip: 10.52.1.15 + qosClass: Burstable + startTime: "2020-07-24T17:59:53Z" diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-rs-014.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-rs-014.yml new file mode 100644 index 00000000000..45bcb72d137 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-rs-014.yml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + annotations: + artifact.spinnaker.io/location: backend-ns + artifact.spinnaker.io/name: backend + artifact.spinnaker.io/type: kubernetes/replicaSet + artifact.spinnaker.io/version: v014 + moniker.spinnaker.io/application: backendapp + moniker.spinnaker.io/cluster: replicaSet backend + moniker.spinnaker.io/sequence: "14" + traffic.spinnaker.io/load-balancers: '["service backendlb"]' + creationTimestamp: "2020-07-15T01:39:59Z" + generation: 2 + labels: + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: backendapp + moniker.spinnaker.io/sequence: "14" + name: backend-v014 + namespace: backend-ns + resourceVersion: "83985046" + selfLink: /apis/apps/v1/namespaces/backend-ns/replicasets/backend-v014 + uid: ded56bd9-2034-4196-a7e4-b6b736c997ba +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + annotations: + artifact.spinnaker.io/location: backend-ns + artifact.spinnaker.io/name: backend + artifact.spinnaker.io/type: kubernetes/replicaSet + artifact.spinnaker.io/version: v014 + moniker.spinnaker.io/application: kubernetes + moniker.spinnaker.io/cluster: replicaSet backend + moniker.spinnaker.io/sequence: "14" + creationTimestamp: null + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: kubernetes + moniker.spinnaker.io/sequence: "14" + spec: + containers: + - image: gcr.io/my-gcr-repository/backend-service@sha256:2eefbb528a4619311555f92ea9b781af101c62f4c70b73c4a5e93d15624ba94c + imagePullPolicy: IfNotPresent + name: backend-service + ports: + - containerPort: 4000 + protocol: TCP + resources: + requests: + cpu: 10m + memory: 8Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 1 + fullyLabeledReplicas: 1 + observedGeneration: 2 + readyReplicas: 1 + replicas: 1 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-rs-015.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-rs-015.yml new file mode 100644 index 00000000000..4ca30231407 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-rs-015.yml @@ -0,0 +1,70 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + annotations: + artifact.spinnaker.io/location: backend-ns + artifact.spinnaker.io/name: backend + artifact.spinnaker.io/type: kubernetes/replicaSet + artifact.spinnaker.io/version: v015 + moniker.spinnaker.io/application: backendapp + moniker.spinnaker.io/cluster: replicaSet backend + moniker.spinnaker.io/sequence: "15" + traffic.spinnaker.io/load-balancers: '["service backendlb"]' + creationTimestamp: "2020-07-24T17:59:52Z" + generation: 1 + labels: + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: backendapp + moniker.spinnaker.io/sequence: "15" + name: backend-v015 + namespace: backend-ns + resourceVersion: "83984596" + selfLink: /apis/apps/v1/namespaces/backend-ns/replicasets/backend-v015 + uid: 518fdd80-8949-47c4-806e-1fd3ac1e1d3c +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + annotations: + artifact.spinnaker.io/location: backend-ns + artifact.spinnaker.io/name: backend + artifact.spinnaker.io/type: kubernetes/replicaSet + artifact.spinnaker.io/version: v015 + moniker.spinnaker.io/application: kubernetes + moniker.spinnaker.io/cluster: replicaSet backend + moniker.spinnaker.io/sequence: "15" + creationTimestamp: null + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: kubernetes + load-balancer: backend + moniker.spinnaker.io/sequence: "15" + spec: + containers: + - image: gcr.io/my-gcr-repository/backend-service@sha256:51f29a570a484fbae4da912199ff27ed21f91b1caf51564a9d3afe3a201c1f32 + imagePullPolicy: IfNotPresent + name: backend-service + ports: + - containerPort: 4000 + protocol: TCP + resources: + requests: + cpu: 10m + memory: 8Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 1 + fullyLabeledReplicas: 1 + observedGeneration: 1 + readyReplicas: 1 + replicas: 1 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-service.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-service.yml new file mode 100644 index 00000000000..6ee7dfe57fa --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/backend-service.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + artifact.spinnaker.io/location: backend-ns + artifact.spinnaker.io/name: backendlb + artifact.spinnaker.io/type: kubernetes/service + moniker.spinnaker.io/application: backendapp + moniker.spinnaker.io/cluster: service backendlb + creationTimestamp: "2020-03-08T02:21:22Z" + labels: + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: backendapp + name: backendlb + namespace: backend-ns + resourceVersion: "76025043" + selfLink: /api/v1/namespaces/backend-ns/services/backendlb + uid: 7caf0eac-4850-4c12-a2f9-0bba063da35e +spec: + clusterIP: 10.117.0.129 + ports: + - port: 4000 + protocol: TCP + targetPort: 4000 + selector: + load-balancer: backend + sessionAffinity: None + type: ClusterIP +status: + loadBalancer: {} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-deployment.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-deployment.yml new file mode 100644 index 00000000000..2b6c8f92c7f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-deployment.yml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/deployment + deployment.kubernetes.io/revision: "2" + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: deployment frontend + creationTimestamp: "2020-07-24T18:45:00Z" + generation: 2 + labels: + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + name: frontend + namespace: frontend-ns + resourceVersion: "84008208" + selfLink: /apis/apps/v1/namespaces/frontend-ns/deployments/frontend + uid: 4060c028-521b-4a81-ad44-b06ebee25f2e +spec: + progressDeadlineSeconds: 600 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: nginx + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/deployment + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: deployment frontend + creationTimestamp: null + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + load-balancer: frontend + spec: + containers: + - image: nginx:1.19.1 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 2 + conditions: + - lastTransitionTime: "2020-07-24T18:45:20Z" + lastUpdateTime: "2020-07-24T18:45:20Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-07-24T18:45:00Z" + lastUpdateTime: "2020-07-24T18:47:23Z" + message: ReplicaSet "frontend-5c6559f75f" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 2 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-pod-1.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-pod-1.yml new file mode 100644 index 00000000000..6abfb6cc188 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-pod-1.yml @@ -0,0 +1,106 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/deployment + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: deployment frontend + sidecar.istio.io/inject: "false" + creationTimestamp: "2020-07-24T18:47:12Z" + generateName: frontend-5c6559f75f- + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + load-balancer: frontend + pod-template-hash: 5c6559f75f + name: frontend-5c6559f75f-4ml8h + namespace: frontend-ns + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: frontend-5c6559f75f + uid: 29630998-bdee-4586-ac64-45223d7ef7d5 + resourceVersion: "84008199" + selfLink: /api/v1/namespaces/frontend-ns/pods/frontend-5c6559f75f-4ml8h + uid: 477dcf19-be44-4853-88fd-1d9aedfcddba +spec: + containers: + - image: nginx:1.19.1 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-fhqgl + readOnly: true + dnsPolicy: ClusterFirst + enableServiceLinks: true + nodeName: gke-spinnaker-e2-small-c528c905-f1ub + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-fhqgl + secret: + defaultMode: 420 + secretName: default-token-fhqgl +status: + conditions: + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:47:12Z" + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:47:23Z" + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:47:23Z" + status: "True" + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:47:12Z" + status: "True" + type: PodScheduled + containerStatuses: + - containerID: containerd://fd37d96ba9d422884f9d83d7da37b6dd0e004b5090d92d2775e793544c53e58a + image: docker.io/library/nginx:1.19.1 + imageID: docker.io/library/nginx@sha256:0e188877aa60537d1a1c6484b8c3929cfe09988145327ee47e8e91ddf6f76f5c + lastState: {} + name: nginx + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2020-07-24T18:47:22Z" + hostIP: 10.128.0.25 + phase: Running + podIP: 10.52.2.25 + podIPs: + - ip: 10.52.2.25 + qosClass: BestEffort + startTime: "2020-07-24T18:47:12Z" diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-pod-2.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-pod-2.yml new file mode 100644 index 00000000000..8d573586347 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-pod-2.yml @@ -0,0 +1,106 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/deployment + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: deployment frontend + sidecar.istio.io/inject: "false" + creationTimestamp: "2020-07-24T18:47:01Z" + generateName: frontend-5c6559f75f- + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + load-balancer: frontend + pod-template-hash: 5c6559f75f + name: frontend-5c6559f75f-6fdmt + namespace: frontend-ns + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: frontend-5c6559f75f + uid: 29630998-bdee-4586-ac64-45223d7ef7d5 + resourceVersion: "84008090" + selfLink: /api/v1/namespaces/frontend-ns/pods/frontend-5c6559f75f-6fdmt + uid: a2280982-e745-468f-9176-21ff1642fa8d +spec: + containers: + - image: nginx:1.19.1 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-fhqgl + readOnly: true + dnsPolicy: ClusterFirst + enableServiceLinks: true + nodeName: gke-spinnaker-e2-small-c528c905-w20h + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-fhqgl + secret: + defaultMode: 420 + secretName: default-token-fhqgl +status: + conditions: + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:47:01Z" + status: "True" + type: Initialized + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:47:12Z" + status: "True" + type: Ready + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:47:12Z" + status: "True" + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: "2020-07-24T18:47:01Z" + status: "True" + type: PodScheduled + containerStatuses: + - containerID: containerd://4156e8c60407052d85005381140b9417348a7a796119c93cf29f06701aca52f0 + image: docker.io/library/nginx:1.19.1 + imageID: docker.io/library/nginx@sha256:0e188877aa60537d1a1c6484b8c3929cfe09988145327ee47e8e91ddf6f76f5c + lastState: {} + name: nginx + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2020-07-24T18:47:11Z" + hostIP: 10.128.0.14 + phase: Running + podIP: 10.52.1.17 + podIPs: + - ip: 10.52.1.17 + qosClass: BestEffort + startTime: "2020-07-24T18:47:01Z" diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-rs-new.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-rs-new.yml new file mode 100644 index 00000000000..8702ef1e1be --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-rs-new.yml @@ -0,0 +1,76 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/deployment + deployment.kubernetes.io/desired-replicas: "2" + deployment.kubernetes.io/max-replicas: "3" + deployment.kubernetes.io/revision: "2" + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: deployment frontend + creationTimestamp: "2020-07-24T18:47:01Z" + generation: 2 + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + load-balancer: frontend + pod-template-hash: 5c6559f75f + name: frontend-5c6559f75f + namespace: frontend-ns + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: Deployment + name: frontend + uid: 4060c028-521b-4a81-ad44-b06ebee25f2e + resourceVersion: "84008200" + selfLink: /apis/apps/v1/namespaces/frontend-ns/replicasets/frontend-5c6559f75f + uid: 29630998-bdee-4586-ac64-45223d7ef7d5 +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + pod-template-hash: 5c6559f75f + template: + metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/deployment + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: deployment frontend + sidecar.istio.io/inject: "false" + creationTimestamp: null + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + load-balancer: frontend + pod-template-hash: 5c6559f75f + spec: + containers: + - image: nginx:1.19.1 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 2 + fullyLabeledReplicas: 2 + observedGeneration: 2 + readyReplicas: 2 + replicas: 2 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-rs-old.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-rs-old.yml new file mode 100644 index 00000000000..eb8718d956f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-rs-old.yml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/deployment + deployment.kubernetes.io/desired-replicas: "2" + deployment.kubernetes.io/max-replicas: "3" + deployment.kubernetes.io/revision: "1" + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: deployment frontend + creationTimestamp: "2020-07-24T18:45:00Z" + generation: 3 + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + load-balancer: frontend + pod-template-hash: 64545c4c54 + name: frontend-64545c4c54 + namespace: frontend-ns + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: Deployment + name: frontend + uid: 4060c028-521b-4a81-ad44-b06ebee25f2e + resourceVersion: "84008207" + selfLink: /apis/apps/v1/namespaces/frontend-ns/replicasets/frontend-64545c4c54 + uid: 13939207-d970-4e19-8f8d-ffcd353016ff +spec: + replicas: 0 + selector: + matchLabels: + app: nginx + pod-template-hash: 64545c4c54 + template: + metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/deployment + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: deployment frontend + creationTimestamp: null + labels: + app: nginx + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + load-balancer: frontend + pod-template-hash: 64545c4c54 + spec: + containers: + - image: nginx:1.19.0 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + observedGeneration: 3 + replicas: 0 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-service.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-service.yml new file mode 100644 index 00000000000..c3d979206bf --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/caching/view/provider/frontend-service.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + artifact.spinnaker.io/location: frontend-ns + artifact.spinnaker.io/name: frontend + artifact.spinnaker.io/type: kubernetes/service + moniker.spinnaker.io/application: frontendapp + moniker.spinnaker.io/cluster: service frontend + creationTimestamp: "2020-07-24T18:41:17Z" + labels: + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: frontendapp + name: frontend + namespace: frontend-ns + resourceVersion: "84005141" + selfLink: /api/v1/namespaces/frontend-ns/services/frontend + uid: d7045409-18dc-4889-9b07-6126b0357435 +spec: + clusterIP: 10.117.5.201 + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + load-balancer: frontend + sessionAffinity: None + type: ClusterIP +status: + loadBalancer: {} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest-spec-is-list.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest-spec-is-list.json new file mode 100644 index 00000000000..deb5c0eb815 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest-spec-is-list.json @@ -0,0 +1,16 @@ +{ + "apiVersion": "test.example/v1alpha1", + "kind": "Custom1", + "metadata": { + "labels": { + "app": "app1" + }, + "name": "default-custom1" + }, + "spec": [ + { + "id": "my-id", + "description": "my-description" + } + ] +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest-spec.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest-spec.json new file mode 100644 index 00000000000..159201e5453 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest-spec.json @@ -0,0 +1,16 @@ +{ + "apiVersion": "test.example/v1alpha1", + "kind": "Custom1", + "metadata": { + "labels": { + "app": "app1" + }, + "name": "default-custom1" + }, + "spec": { + "template": "template1", + "params": { + "value": "1" + } + } +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest.json new file mode 100644 index 00000000000..9f2e3cd8d60 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/crd-manifest.json @@ -0,0 +1,12 @@ +{ + "apiVersion": "test.example/v1alpha1", + "kind": "Custom1", + "metadata": { + "labels": { + "app": "app1" + }, + "name": "default-custom1" + }, + "spec": { + } +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/deployment-manifest.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/deployment-manifest.json new file mode 100644 index 00000000000..28b577034b6 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/deployment-manifest.json @@ -0,0 +1,38 @@ +{ + "apiVersion": "extensions/v1beta1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deployment" + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "image": "nginx:1.12.0", + "imagePullPolicy": "IfNotPresent", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ], + "resources": {} + } + ] + } + } + } +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/list-manifest.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/list-manifest.json new file mode 100644 index 00000000000..1ccd9691b37 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/list-manifest.json @@ -0,0 +1,11 @@ +{ + "metadata": { + "name": "list-test" + }, + "apiVersion": "v1", + "kind": "List", + "items": [ + %s, + %s + ] +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/manifest.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/manifest.json new file mode 100644 index 00000000000..9a33314e7b4 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/manifest.json @@ -0,0 +1,139 @@ +{ + "apiVersion": "$api_version", + "kind": "$kind", + "metadata": { + "annotations": { + "deployment.kubernetes.io/desired-replicas": "3", + "deployment.kubernetes.io/max-replicas": "4", + "deployment.kubernetes.io/revision": "3", + "$key": "$value", + "time": "2018-10-29 15:40:52.062718 -0400 EDT m=+0.047437104" + }, + "creationTimestamp": "2018-10-29T19:40:53Z", + "generation": 3, + "labels": { + "app": "$name-$name", + "name": "$name", + "pod-template-hash": "2746006312" + }, + "name": "$name", + "namespace": "$namespace", + "ownerReferences": [ + { + "apiVersion": "extensions/v1beta1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "Deployment", + "name": "$name", + "uid": "423e3217-dbb1-11e8-bcfd-02ff462358c8" + } + ], + "resourceVersion": "38075718", + "selfLink": "/apis/extensions/v1beta1/namespaces/$namespace/replicasets/$name-6c8b44b756", + "uid": "8bfb57ad-dbb2-11e8-9989-0efba1451dfa" + }, + "spec": { + "replicas": 3, + "selector": { + "matchLabels": { + "app": "$name-$name", + "name": "$name", + "pod-template-hash": "2746006312" + } + }, + "template": { + "metadata": { + "annotations": { + "time": "2018-10-29 15:40:52.062811 -0400 EDT m=+0.047529510", + "$key": "$value" + }, + "creationTimestamp": null, + "labels": { + "app": "$name-$name", + "chartVersion": "0.0.6", + "name": "$name", + "pod-template-hash": "2746006312" + } + }, + "spec": { + "affinity": { + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "node-role.kubernetes.io/$name", + "operator": "Exists" + } + ] + } + ] + } + } + }, + "containers": [ + { + "image": "$name/test:0.1", + "imagePullPolicy": "IfNotPresent", + "livenessProbe": { + "failureThreshold": 3, + "httpGet": { + "path": "/", + "port": 5000, + "scheme": "HTTP" + }, + "initialDelaySeconds": 20, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 2 + }, + "name": "$name", + "ports": [ + { + "containerPort": 5000, + "name": "http", + "protocol": "TCP" + } + ], + "readinessProbe": { + "failureThreshold": 3, + "httpGet": { + "path": "/", + "port": 5000, + "scheme": "HTTP" + }, + "initialDelaySeconds": 20, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 2 + }, + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File" + } + ], + "dnsPolicy": "ClusterFirst", + "restartPolicy": "Always", + "schedulerName": "default-scheduler", + "securityContext": {}, + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoSchedule", + "key": "node", + "operator": "Equal", + "value": "$name" + } + ] + } + } + }, + "status": { + "availableReplicas": 3, + "fullyLabeledReplicas": 3, + "observedGeneration": 3, + "readyReplicas": 3, + "replicas": 3 + } +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/owned-manifest.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/owned-manifest.json new file mode 100644 index 00000000000..f8c7847bb22 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/owned-manifest.json @@ -0,0 +1,20 @@ +{ + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "metadata": { + "name": "test-rs-7f4557b6b", + "namespace": "ns1", + "ownerReferences": [ + { + "apiVersion": "group/v1alpha1", + "blockOwnerDeletion": true, + "kind": "Owner", + "name": "test-owner", + "uid": "d4a5c8da-5f4e-485c-be7a-28ffdff13437" + } + ] + }, + "spec": { + "replicas": 2 + } +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/service-manifest.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/service-manifest.json new file mode 100644 index 00000000000..a836d93e3e6 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/manifest/service-manifest.json @@ -0,0 +1,23 @@ +{ + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "labels": { + "app": "nginx" + }, + "name": "nginx-service" + }, + "spec": { + "ports": [ + { + "port": 80, + "protocol": "TCP", + "targetPort": 80 + } + ], + "selector": { + "app": "nginx" + }, + "type": "ClusterIP" + } +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/pod-metric-extra-property.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/pod-metric-extra-property.json new file mode 100644 index 00000000000..d198a3031b7 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/pod-metric-extra-property.json @@ -0,0 +1,8 @@ +{ + "containerName" : "istio-proxy", + "something": "what-am-i", + "metrics" : { + "MEMORY(bytes)" : "27Mi", + "CPU(cores)" : "3m" + } +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/pod-metric.json b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/pod-metric.json new file mode 100644 index 00000000000..bed46378c29 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/description/pod-metric.json @@ -0,0 +1,7 @@ +{ + "containerName" : "istio-proxy", + "metrics" : { + "MEMORY(bytes)" : "27Mi", + "CPU(cores)" : "3m" + } +} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/base.yml new file mode 100644 index 00000000000..6789f24907f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/base.yml @@ -0,0 +1,21 @@ +# Base job without completions set +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: "2020-01-31T16:43:33Z" + labels: + job-name: hello + name: hello +spec: + template: + metadata: + labels: + job-name: hello + spec: + containers: + - command: + - echo + - Hello world + image: busybox + name: hello + restartPolicy: Never diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/failed-job-init-container-error.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/failed-job-init-container-error.yml new file mode 100644 index 00000000000..9b634205cfc --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/failed-job-init-container-error.yml @@ -0,0 +1,41 @@ +# Job has failed +status: + conditions: + - lastProbeTime: "2020-01-31T19:13:13Z" + lastTransitionTime: "2020-01-31T19:13:13Z" + message: Job has reached the specified backoff limit + reason: BackoffLimitExceeded + status: "True" + type: Failed + failed: 2 + startTime: "2020-01-31T19:13:02Z" + hostIP: "0.0.0.0" + containerStatuses: + - image: some-image:test2 + imageID: "" + lastState: {} + name: some-image-container + ready: false + restartCount: 0 + state: + waiting: + reason: PodInitializing + initContainerStatuses: + - containerID: "some-init-container-id" + image: busybox:1.28 + imageID: "some-init-container-image-id" + lastState: {} + name: "init-myservice" + ready: true + restartCount: 0 + state: + terminated: + containerID: "some-init-container-id" + exitCode: 1 + finishedAt: "2020-01-31T13:56:20Z" + reason: Error + message: foo + startedAt: "2020-01-31T23:53:20Z" + phase: Failed + podIP: "1.1.1.1" + qosClass: BestEffort diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/failed-job.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/failed-job.yml new file mode 100644 index 00000000000..2b173bc2df2 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/failed-job.yml @@ -0,0 +1,48 @@ +# Job has failed +status: + conditions: + - lastProbeTime: "2020-01-31T19:13:13Z" + lastTransitionTime: "2020-01-31T19:13:13Z" + message: Job has reached the specified backoff limit + reason: BackoffLimitExceeded + status: "True" + type: Failed + failed: 2 + startTime: "2020-01-31T19:13:02Z" + containerStatuses: + - containerID: "some-container-id" + image: "some-image:test" + imageID: "some-image-id" + lastState: {} + name: "some-container-name" + ready: false + restartCount: 0 + state: + terminated: + containerID: "some-container-id" + exitCode: 1 + finishedAt: "2020-01-31T19:14:32Z" + message: | + Failed to download the file: foo. + GET Request failed with status code', 404, 'Expected', ) + reason: Error + startedAt: "2020-01-31T19:14:02Z" + hostIP: "0.0.0.0" + initContainerStatuses: + - containerID: "some-init-container-id" + image: busybox:1.28 + imageID: "some-init-container-image-id" + lastState: {} + name: "init-myservice" + ready: true + restartCount: 0 + state: + terminated: + containerID: "some-init-container-id" + exitCode: 0 + finishedAt: "2020-01-31T13:56:20Z" + reason: Completed + startedAt: "2020-01-31T23:53:20Z" + phase: Failed + podIP: "1.1.1.1" + qosClass: BestEffort diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/runjob-deadline-exceeded.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/runjob-deadline-exceeded.yml new file mode 100644 index 00000000000..786632c26b5 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/runjob-deadline-exceeded.yml @@ -0,0 +1,66 @@ +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + artifact.spinnaker.io/location: test + artifact.spinnaker.io/name: test-runjob + artifact.spinnaker.io/type: kubernetes/job + artifact.spinnaker.io/version: "" + moniker.spinnaker.io/application: amahajantest + moniker.spinnaker.io/cluster: job test-runjob + strategy.spinnaker.io/recreate: "true" + creationTimestamp: "2021-01-29T00:00:20Z" + labels: + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: amahajantest + job-name: test-runjob + name: test-runjob + namespace: test +spec: + activeDeadlineSeconds: 900 + backoffLimit: 0 + completions: 1 + parallelism: 1 + selector: + matchLabels: + controller-uid: 5e9d1a7c-6a9d-443b-8629-da6dcb73fb7c + template: + metadata: + annotations: + artifact.spinnaker.io/location: test + artifact.spinnaker.io/name: test-runjob + artifact.spinnaker.io/type: kubernetes/job + artifact.spinnaker.io/version: "" + moniker.spinnaker.io/application: amahajantest + moniker.spinnaker.io/cluster: job test-runjob + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: spinnaker + app.kubernetes.io/name: amahajantest + controller-uid: 5e9d1a7c-6a9d-443b-8629-da6dcb73fb7c + job-name: test-runjob + spec: + containers: + - env: + - name: LEVEL + value: invalid + image: image:invalid-tag + imagePullPolicy: IfNotPresent + name: test-runjob + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + dnsPolicy: ClusterFirst + restartPolicy: Never + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + conditions: + - lastProbeTime: "2021-01-29T00:15:20Z" + lastTransitionTime: "2021-01-29T00:15:20Z" + message: Job was active longer than specified deadline + reason: DeadlineExceeded + status: "True" + type: Failed + startTime: "2021-01-29T00:00:20Z" diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/successful-job.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/successful-job.yml new file mode 100644 index 00000000000..d9de0961b5e --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/model/successful-job.yml @@ -0,0 +1,44 @@ +# Job has succeeded +status: + completionTime: "2020-01-31T19:44:10Z" + conditions: + - lastProbeTime: "2020-01-31T19:44:10Z" + lastTransitionTime: "2020-01-31T19:44:10Z" + status: "True" + type: Complete + startTime: "2020-01-31T19:43:33Z" + succeeded: 1 + containerStatuses: + - containerID: "some-container-id" + image: "some-image:test" + imageID: "some-image-id" + lastState: {} + name: "some-container-name" + ready: false + restartCount: 0 + state: + terminated: + containerID: "some-container-id" + exitCode: 0 + finishedAt: "2020-01-31T19:14:32Z" + reason: Completed + startedAt: "2020-01-31T19:14:02Z" + hostIP: "0.0.0.0" + initContainerStatuses: + - containerID: "some-init-container-id" + image: busybox:1.28 + imageID: "some-init-container-image-id" + lastState: {} + name: "init-myservice" + ready: true + restartCount: 0 + state: + terminated: + containerID: "some-init-container-id" + exitCode: 0 + finishedAt: "2020-01-31T19:12:20Z" + reason: Completed + startedAt: "2020-01-31T19:12:20Z" + phase: Failed + podIP: "1.1.1.1" + qosClass: BestEffort diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/configmap.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/configmap.yml new file mode 100644 index 00000000000..c72055a251a --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/configmap.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-configmap +data: + test.properties: | + enabled=true diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/configmaps-with-selectors.yaml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/configmaps-with-selectors.yaml new file mode 100644 index 00000000000..8d54ee39cf8 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/configmaps-with-selectors.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +data: + samplefile.yaml: |- + settings: + enabled: true +kind: ConfigMap +metadata: + labels: + sample-configmap-selector: one + selector-test: test + name: sample-config-map-with-selector-one + namespace: default +--- +apiVersion: v1 +data: + samplefile2.yaml: |- + more-settings: + enabled: false +kind: ConfigMap +metadata: + labels: + sample-configmap-selector: two + selector-test: test + name: sample-config-map-with-selector-two + namespace: default diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/crd-manifest-spec-is-list.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/crd-manifest-spec-is-list.yml new file mode 100644 index 00000000000..355a10dcced --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/crd-manifest-spec-is-list.yml @@ -0,0 +1,9 @@ +apiVersion: test.example/v1alpha1 +kind: Custom1 +metadata: + name: default-custom1 + labels: + app: app1 +spec: + - id: my-id + description: my-description diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/empty-resource.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/empty-resource.yml new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/empty-resource.yml @@ -0,0 +1 @@ + diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replace-strategy.yaml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replace-strategy.yaml new file mode 100644 index 00000000000..1854b71779f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replace-strategy.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +data: + samplefile.yaml: |- + settings: + enabled: true +kind: ConfigMap +metadata: + labels: + sample-configmap-selector: one + selector-test: test + name: sample-config-map-with-selector-one + namespace: default +--- +apiVersion: v1 +data: + samplefile2.yaml: |- + more-settings: + enabled: false +kind: ConfigMap +metadata: + annotations: + strategy.spinnaker.io/replace: "true" + labels: + sample-configmap-selector: two + selector-test: test + name: config-map-replace-strategy + namespace: default diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-configmap.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-configmap.yml new file mode 100644 index 00000000000..215071932ea --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-configmap.yml @@ -0,0 +1,32 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: my-name +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: 'index.docker.io/library/nginx' + name: nginx + volumeMounts: + - mountPath: /tmp/mounted + name: myconfig + volumes: + - name: myconfig + configMap: + name: myconfig +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: myconfig +data: + file.txt: | + Hello world! diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-no-namespace.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-no-namespace.yml new file mode 100644 index 00000000000..b871f41e4c2 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-no-namespace.yml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: my-name +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: 'index.docker.io/library/nginx' + name: nginx diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-overlapping-selector.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-overlapping-selector.yml new file mode 100644 index 00000000000..a5e50785072 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-overlapping-selector.yml @@ -0,0 +1,13 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: my-name + namespace: my-namespace +spec: + selector: + matchLabels: + selector-key: selector-value + template: + metadata: + labels: + selector-key: selector-value diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-volumes.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-volumes.yml new file mode 100644 index 00000000000..dd82b6b72a9 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset-volumes.yml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: my-name + namespace: my-namespace +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: 'index.docker.io/library/nginx' + name: nginx + volumeMounts: + - mountPath: /tmp/mounted + name: myconfig + volumes: + - name: myconfig + configMap: + name: myconfig diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset.yml new file mode 100644 index 00000000000..9bf9ee64356 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/replicaset.yml @@ -0,0 +1,18 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: my-name + namespace: my-namespace +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: 'index.docker.io/library/nginx' + name: nginx diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/service-no-selector.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/service-no-selector.yml new file mode 100644 index 00000000000..6974169058d --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/service-no-selector.yml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-namespace diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/service.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/service.yml new file mode 100644 index 00000000000..d3ca4314069 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/deploy/service.yml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-namespace +spec: + selector: + selector-key: selector-value diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment-generate-name-result.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment-generate-name-result.yml new file mode 100644 index 00000000000..a3cbc37e0f8 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment-generate-name-result.yml @@ -0,0 +1,17 @@ +# Base deployment spec +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-cknv6 + generateName: nginx- +spec: + replicas: 5 + template: + spec: + containers: + - image: nginx:1.7.9 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment-generate-name.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment-generate-name.yml new file mode 100644 index 00000000000..bbf4d147618 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment-generate-name.yml @@ -0,0 +1,16 @@ +# Base deployment spec +apiVersion: apps/v1 +kind: Deployment +metadata: + generateName: nginx- +spec: + replicas: 5 + template: + spec: + containers: + - image: nginx:1.7.9 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment.yml new file mode 100644 index 00000000000..d32b5555541 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/candeploy/deployment.yml @@ -0,0 +1,16 @@ +# Base deployment spec +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + replicas: 5 + template: + spec: + containers: + - image: nginx:1.7.9 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/base.yml new file mode 100644 index 00000000000..74e9d7d5a0b --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/base.yml @@ -0,0 +1,23 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + creationTimestamp: "2020-01-31T17:53:10Z" + name: my-cron +spec: + jobTemplate: + spec: + template: + spec: + containers: + - args: + - /bin/sh + - -c + - date; echo Hello world + image: busybox + imagePullPolicy: Always + name: hello + dnsPolicy: ClusterFirst + restartPolicy: OnFailure + schedulerName: default-scheduler + terminationGracePeriodSeconds: 30 + schedule: '*/10 * * * *' diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/empty-status.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/empty-status.yml new file mode 100644 index 00000000000..121d004ef97 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/empty-status.yml @@ -0,0 +1 @@ +status: {} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/scheduled-status.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/scheduled-status.yml new file mode 100644 index 00000000000..57a47332ebf --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/cronjob/scheduled-status.yml @@ -0,0 +1,2 @@ +status: + lastScheduleTime: "2020-01-31T17:57:00Z" diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/available.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/available.yml new file mode 100644 index 00000000000..deefc4a0d09 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/available.yml @@ -0,0 +1,9 @@ +# All are available and ready +status: + currentNumberScheduled: 1 + desiredNumberScheduled: 1 + numberAvailable: 1 + numberMisscheduled: 0 + numberReady: 1 + observedGeneration: 2 + updatedNumberScheduled: 1 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-ready.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-ready.yml new file mode 100644 index 00000000000..60dd70df8a1 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-ready.yml @@ -0,0 +1,9 @@ +# All are scheduled and available, but not ready +status: + currentNumberScheduled: 1 + desiredNumberScheduled: 1 + numberAvailable: 1 + numberMisscheduled: 0 + numberReady: 0 + observedGeneration: 2 + updatedNumberScheduled: 1 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-scheduled.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-scheduled.yml new file mode 100644 index 00000000000..cfe86decb7e --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-scheduled.yml @@ -0,0 +1,9 @@ +# Current number scheduled is less than desired +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 1 + numberMisscheduled: 0 + numberReady: 0 + numberUnavailable: 1 + observedGeneration: 2 + updatedNumberScheduled: 0 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-updated-scheduled.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-updated-scheduled.yml new file mode 100644 index 00000000000..b0871826ae1 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/awaiting-updated-scheduled.yml @@ -0,0 +1,9 @@ +# Current number scheduled is as desired, but updated number scheduled is less than desired +status: + currentNumberScheduled: 1 + desiredNumberScheduled: 1 + numberMisscheduled: 0 + numberReady: 0 + numberUnavailable: 1 + observedGeneration: 2 + updatedNumberScheduled: 0 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/base-on-delete.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/base-on-delete.yml new file mode 100644 index 00000000000..32468146b99 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/base-on-delete.yml @@ -0,0 +1,26 @@ +# Base daemonset with update strategy set to OnDelete +apiVersion: apps/v1 +kind: DaemonSet +metadata: + generation: 2 + name: my-daemonset +spec: + selector: + matchLabels: + name: my-daemonset + template: + metadata: + labels: + name: my-daemonset + spec: + containers: + - image: nginx:1.7.9 + imagePullPolicy: IfNotPresent + name: my-daemonset + dnsPolicy: ClusterFirst + nodeSelector: + type: prod + restartPolicy: Always + templateGeneration: 1 + updateStrategy: + type: OnDelete diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/base.yml new file mode 100644 index 00000000000..c3d4b403fd1 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/base.yml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + generation: 2 + name: my-daemonset +spec: + selector: + matchLabels: + name: my-daemonset + template: + metadata: + labels: + name: my-daemonset + spec: + containers: + - image: nginx:1.7.9 + imagePullPolicy: IfNotPresent + name: my-daemonset + dnsPolicy: ClusterFirst + nodeSelector: + type: prod + restartPolicy: Always + templateGeneration: 1 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/none-desired.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/none-desired.yml new file mode 100644 index 00000000000..34a6c4cda9b --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/none-desired.yml @@ -0,0 +1,7 @@ +# No replicas desired (perhaps because no node selctors match) +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 + observedGeneration: 2 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/old-generation.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/old-generation.yml new file mode 100644 index 00000000000..e4cdf5cd677 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/old-generation.yml @@ -0,0 +1,9 @@ +# All are available and ready but generation is older than spec +status: + currentNumberScheduled: 1 + desiredNumberScheduled: 1 + numberAvailable: 1 + numberMisscheduled: 0 + numberReady: 1 + observedGeneration: 1 + updatedNumberScheduled: 1 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/unavailable.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/unavailable.yml new file mode 100644 index 00000000000..742652f330c --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/daemonset/unavailable.yml @@ -0,0 +1,9 @@ +# Scheduled is as desired, but a replica is unavailable +status: + currentNumberScheduled: 1 + desiredNumberScheduled: 1 + numberMisscheduled: 0 + numberReady: 0 + numberUnavailable: 1 + observedGeneration: 2 + updatedNumberScheduled: 1 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-available-replicas-paused.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-available-replicas-paused.yml new file mode 100644 index 00000000000..3575122e43a --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-available-replicas-paused.yml @@ -0,0 +1,21 @@ +# Paused when status reports minimum availability, but replica counts show not all replicas available. +status: + availableReplicas: 62 + conditions: + - lastTransitionTime: "2020-01-31T16:34:44Z" + lastUpdateTime: "2020-01-31T16:34:44Z" + message: Deployment is paused + reason: DeploymentPaused + status: Unknown + type: Progressing + - lastTransitionTime: "2020-01-31T16:20:03Z" + lastUpdateTime: "2020-01-31T16:20:03Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + observedGeneration: 3 + readyReplicas: 62 + replicas: 100 + unavailableReplicas: 38 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-available-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-available-replicas.yml new file mode 100644 index 00000000000..9577dd0cbb4 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-available-replicas.yml @@ -0,0 +1,21 @@ +# Status reports minimum availability, but replica counts show not all replicas available +status: + availableReplicas: 62 + conditions: + - lastTransitionTime: "2020-01-31T00:01:24Z" + lastUpdateTime: "2020-01-31T16:16:55Z" + message: ReplicaSet "nginx-deployment-6685876674" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2020-01-31T16:20:03Z" + lastUpdateTime: "2020-01-31T16:20:03Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + observedGeneration: 3 + readyReplicas: 62 + replicas: 100 + unavailableReplicas: 38 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-ready-replicas-paused.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-ready-replicas-paused.yml new file mode 100644 index 00000000000..1975f85985a --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-ready-replicas-paused.yml @@ -0,0 +1,21 @@ +# Paused when status reports minimum availability, but replica counts show not all replicas ready +status: + availableReplicas: 100 + conditions: + - lastTransitionTime: "2020-01-31T16:34:44Z" + lastUpdateTime: "2020-01-31T16:34:44Z" + message: Deployment is paused + reason: DeploymentPaused + status: Unknown + type: Progressing + - lastTransitionTime: "2020-01-31T16:20:03Z" + lastUpdateTime: "2020-01-31T16:20:03Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + observedGeneration: 3 + readyReplicas: 62 + replicas: 100 + unavailableReplicas: 100 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-ready-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-ready-replicas.yml new file mode 100644 index 00000000000..32a27b34aa7 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-ready-replicas.yml @@ -0,0 +1,21 @@ +# Status reports minimum availability, but replica counts show not all replicas ready +status: + availableReplicas: 100 + conditions: + - lastTransitionTime: "2020-01-31T00:01:24Z" + lastUpdateTime: "2020-01-31T16:16:55Z" + message: ReplicaSet "nginx-deployment-6685876674" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2020-01-31T16:20:03Z" + lastUpdateTime: "2020-01-31T16:20:03Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + observedGeneration: 3 + readyReplicas: 62 + replicas: 100 + unavailableReplicas: 100 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-termination-paused.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-termination-paused.yml new file mode 100644 index 00000000000..7b11af48d78 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-termination-paused.yml @@ -0,0 +1,21 @@ +# Paused when awaiting termination of old replicas +status: + availableReplicas: 75 + conditions: + - lastTransitionTime: "2020-01-31T16:35:32Z" + lastUpdateTime: "2020-01-31T16:35:32Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-01-31T16:34:44Z" + lastUpdateTime: "2020-01-31T16:34:44Z" + message: Deployment is paused + reason: DeploymentPaused + status: Unknown + type: Progressing + observedGeneration: 3 + readyReplicas: 75 + replicas: 114 + unavailableReplicas: 39 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-termination.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-termination.yml new file mode 100644 index 00000000000..994f05b1d32 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-termination.yml @@ -0,0 +1,21 @@ +# Awaiting termination of old replicas +status: + availableReplicas: 75 + conditions: + - lastTransitionTime: "2020-01-31T16:35:32Z" + lastUpdateTime: "2020-01-31T16:35:32Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-01-31T16:35:30Z" + lastUpdateTime: "2020-01-31T16:38:57Z" + message: ReplicaSet "nginx-deployment-7cc75c7b9d" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + observedGeneration: 3 + readyReplicas: 75 + replicas: 114 + unavailableReplicas: 39 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-updated-replicas-paused.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-updated-replicas-paused.yml new file mode 100644 index 00000000000..0a5748ddde4 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-updated-replicas-paused.yml @@ -0,0 +1,21 @@ +# Paused while rollout in progress and paused; waiting for all replicas to be updated to new spec +status: + availableReplicas: 75 + conditions: + - lastTransitionTime: "2020-01-31T16:32:31Z" + lastUpdateTime: "2020-01-31T16:32:31Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-01-31T16:34:44Z" + lastUpdateTime: "2020-01-31T16:34:44Z" + message: Deployment is paused + reason: DeploymentPaused + status: Unknown + type: Progressing + observedGeneration: 3 + readyReplicas: 75 + replicas: 125 + unavailableReplicas: 50 + updatedReplicas: 50 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-updated-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-updated-replicas.yml new file mode 100644 index 00000000000..dc60c840240 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/awaiting-updated-replicas.yml @@ -0,0 +1,21 @@ +# Rollout in progress, waiting for all replicas to be updated to new spec +status: + availableReplicas: 75 + conditions: + - lastTransitionTime: "2020-01-31T16:02:59Z" + lastUpdateTime: "2020-01-31T16:02:59Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-01-31T00:01:24Z" + lastUpdateTime: "2020-01-31T16:14:39Z" + message: ReplicaSet "nginx-deployment-6685876674" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + observedGeneration: 3 + readyReplicas: 75 + replicas: 125 + unavailableReplicas: 50 + updatedReplicas: 50 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/base-no-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/base-no-replicas.yml new file mode 100644 index 00000000000..fa31d7d7f2a --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/base-no-replicas.yml @@ -0,0 +1,18 @@ +# Deployment with spec of 0 replicas +apiVersion: apps/v1 +kind: Deployment +metadata: + generation: 3 + name: nginx-deployment +spec: + progressDeadlineSeconds: 600 + replicas: 0 + template: + spec: + containers: + - image: nginx:1.7.9 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/base.yml new file mode 100644 index 00000000000..a89d1e503f6 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/base.yml @@ -0,0 +1,18 @@ +# Base deployment spec +apiVersion: apps/v1 +kind: Deployment +metadata: + generation: 3 + name: nginx-deployment +spec: + progressDeadlineSeconds: 600 + replicas: 100 + template: + spec: + containers: + - image: nginx:1.7.9 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/condition-unavailable.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/condition-unavailable.yml new file mode 100644 index 00000000000..1b5df2429c4 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/condition-unavailable.yml @@ -0,0 +1,21 @@ +# Status reports MinimumReplicasUnavailable +status: + availableReplicas: 20 + conditions: + - lastTransitionTime: "2020-01-31T00:01:24Z" + lastUpdateTime: "2020-01-31T16:01:27Z" + message: ReplicaSet "nginx-deployment-5754944d6c" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2020-01-31T16:02:23Z" + lastUpdateTime: "2020-01-31T16:02:23Z" + message: Deployment does not have minimum availability. + reason: MinimumReplicasUnavailable + status: "False" + type: Available + observedGeneration: 3 + readyReplicas: 20 + replicas: 100 + unavailableReplicas: 80 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/no-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/no-replicas.yml new file mode 100644 index 00000000000..528244fd0a6 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/no-replicas.yml @@ -0,0 +1,4 @@ +# Status reporting no replicas and no conditions +status: + conditions: [] + observedGeneration: 3 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/progress-deadline-exceeded-unavailable.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/progress-deadline-exceeded-unavailable.yml new file mode 100644 index 00000000000..40b2268b842 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/progress-deadline-exceeded-unavailable.yml @@ -0,0 +1,21 @@ +# Condition reports progress deadline exceeded and deployment is not available +status: + availableReplicas: 98 + conditions: + - lastTransitionTime: "2020-01-31T16:02:23Z" + lastUpdateTime: "2020-01-31T16:02:23Z" + message: Deployment does not have minimum availability. + reason: MinimumReplicasUnavailable + status: "False" + type: Available + - lastTransitionTime: "2020-01-31T17:27:49Z" + lastUpdateTime: "2020-01-31T17:27:49Z" + message: ReplicaSet "nginx-deployment-5754944d6c" has timed out progressing. + reason: ProgressDeadlineExceeded + status: "False" + type: Progressing + observedGeneration: 3 + readyReplicas: 98 + replicas: 100 + unavailableReplicas: 2 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/progress-deadline-exceeded.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/progress-deadline-exceeded.yml new file mode 100644 index 00000000000..ac751e9d5db --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/progress-deadline-exceeded.yml @@ -0,0 +1,21 @@ +# Condition reports progress deadline exceeded +status: + availableReplicas: 98 + conditions: + - lastTransitionTime: "2020-01-31T17:27:28Z" + lastUpdateTime: "2020-01-31T17:27:28Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-01-31T17:27:49Z" + lastUpdateTime: "2020-01-31T17:27:49Z" + message: ReplicaSet "nginx-deployment-5754944d6c" has timed out progressing. + reason: ProgressDeadlineExceeded + status: "False" + type: Progressing + observedGeneration: 3 + readyReplicas: 98 + replicas: 100 + unavailableReplicas: 2 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/stable-with-old-generation.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/stable-with-old-generation.yml new file mode 100644 index 00000000000..4ce4628e88f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/stable-with-old-generation.yml @@ -0,0 +1,20 @@ +# Conditions report available and replica counts match spec, but generation is older than spec +status: + availableReplicas: 100 + conditions: + - lastTransitionTime: "2020-01-31T00:01:32Z" + lastUpdateTime: "2020-01-31T00:01:32Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-01-31T00:01:24Z" + lastUpdateTime: "2020-01-31T00:01:32Z" + message: ReplicaSet "nginx-deployment-5754944d6c" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 2 + readyReplicas: 100 + replicas: 100 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/stable.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/stable.yml new file mode 100644 index 00000000000..ca759726b73 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/deployment/stable.yml @@ -0,0 +1,20 @@ +# Conditions report available and replica counts match spec +status: + availableReplicas: 100 + conditions: + - lastTransitionTime: "2020-01-31T00:01:32Z" + lastUpdateTime: "2020-01-31T00:01:32Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2020-01-31T00:01:24Z" + lastUpdateTime: "2020-01-31T00:01:32Z" + message: ReplicaSet "nginx-deployment-5754944d6c" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 3 + readyReplicas: 100 + replicas: 100 + updatedReplicas: 100 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/base.yml new file mode 100644 index 00000000000..b0d9e031e2c --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/base.yml @@ -0,0 +1,20 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + annotations: {} + creationTimestamp: '2020-02-10T21:45:07Z' + labels: {} + name: test-hpa + namespace: default + resourceVersion: '169474131' + selfLink: >- + /apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers/test-hpa + uid: 00000000-0000-0000-0000-000000000000 +spec: + maxReplicas: 5 + minReplicas: 2 + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: test-deployment + targetCPUUtilizationPercentage: 40 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/no-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/no-replicas.yml new file mode 100644 index 00000000000..248c018d0e5 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/no-replicas.yml @@ -0,0 +1,4 @@ +status: + currentReplicas: 0 + desiredReplicas: 0 + lastScaleTime: '2020-02-25T20:32:16Z' diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/stable.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/stable.yml new file mode 100644 index 00000000000..eff76aa8338 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/stable.yml @@ -0,0 +1,4 @@ +status: + currentReplicas: 2 + desiredReplicas: 2 + lastScaleTime: '2020-02-25T20:32:16Z' diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/waiting-for-scaledown.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/waiting-for-scaledown.yml new file mode 100644 index 00000000000..4cb1ead7053 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/waiting-for-scaledown.yml @@ -0,0 +1,4 @@ +status: + currentReplicas: 5 + desiredReplicas: 2 + lastScaleTime: '2020-02-25T20:32:16Z' diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/waiting-for-scaleup.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/waiting-for-scaleup.yml new file mode 100644 index 00000000000..1de30d65bad --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/horizontalpodautoscaler/waiting-for-scaleup.yml @@ -0,0 +1,4 @@ +status: + currentReplicas: 2 + desiredReplicas: 3 + lastScaleTime: '2020-02-25T20:32:16Z' diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/active-job.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/active-job.yml new file mode 100644 index 00000000000..02b5b45e0ad --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/active-job.yml @@ -0,0 +1,4 @@ +# One job is actively running and none have completed +status: + active: 1 + startTime: "2020-01-31T17:43:33Z" diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/base-with-completions.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/base-with-completions.yml new file mode 100644 index 00000000000..e3ee5fcb37f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/base-with-completions.yml @@ -0,0 +1,22 @@ +# Base job with >1 completions set +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: "2020-01-31T16:43:33Z" + labels: + job-name: hello + name: hello +spec: + completions: 5 + template: + metadata: + labels: + job-name: hello + spec: + containers: + - command: + - echo + - Hello world + image: busybox + name: hello + restartPolicy: Never diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/base.yml new file mode 100644 index 00000000000..6789f24907f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/base.yml @@ -0,0 +1,21 @@ +# Base job without completions set +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: "2020-01-31T16:43:33Z" + labels: + job-name: hello + name: hello +spec: + template: + metadata: + labels: + job-name: hello + spec: + containers: + - command: + - echo + - Hello world + image: busybox + name: hello + restartPolicy: Never diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/completed-job.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/completed-job.yml new file mode 100644 index 00000000000..6ca328238bd --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/completed-job.yml @@ -0,0 +1,10 @@ +# Job with 1 completion has succeeded +status: + completionTime: "2020-01-31T18:44:10Z" + conditions: + - lastProbeTime: "2020-01-31T18:44:10Z" + lastTransitionTime: "2020-01-31T18:44:10Z" + status: "True" + type: Complete + startTime: "2020-01-31T18:43:33Z" + succeeded: 1 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/failed-job.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/failed-job.yml new file mode 100644 index 00000000000..51419dc3863 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/failed-job.yml @@ -0,0 +1,11 @@ +# Job has failed +status: + conditions: + - lastProbeTime: "2020-01-31T19:13:13Z" + lastTransitionTime: "2020-01-31T19:13:13Z" + message: Job has reached the specified backoff limit + reason: BackoffLimitExceeded + status: "True" + type: Failed + failed: 2 + startTime: "2020-01-31T19:13:02Z" diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/in-progress-some-failed.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/in-progress-some-failed.yml new file mode 100644 index 00000000000..cf9c81daf28 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/job/in-progress-some-failed.yml @@ -0,0 +1,6 @@ +# Job with >2 completions has some succeeded and some failed, still trying +status: + active: 1 + failed: 3 + startTime: "2020-01-31T19:08:58Z" + succeeded: 2 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/base.yml new file mode 100644 index 00000000000..773fd053221 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/base.yml @@ -0,0 +1,13 @@ +# Base pod +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx:1.7.9 + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + protocol: TCP diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/failed-phase.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/failed-phase.yml new file mode 100644 index 00000000000..f5bf90a4fbc --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/failed-phase.yml @@ -0,0 +1,2 @@ +status: + phase: Failed diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/null-phase.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/null-phase.yml new file mode 100644 index 00000000000..121d004ef97 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/null-phase.yml @@ -0,0 +1 @@ +status: {} diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/pending-phase.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/pending-phase.yml new file mode 100644 index 00000000000..8cebb41ce97 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/pending-phase.yml @@ -0,0 +1,2 @@ +status: + phase: Pending diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/running-phase.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/running-phase.yml new file mode 100644 index 00000000000..b90b96e466a --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/running-phase.yml @@ -0,0 +1,2 @@ +status: + phase: Running diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/succeeded-phase.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/succeeded-phase.yml new file mode 100644 index 00000000000..94f435d4388 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/succeeded-phase.yml @@ -0,0 +1,2 @@ +status: + phase: Succeeded diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/unknown-phase.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/unknown-phase.yml new file mode 100644 index 00000000000..a8eeeb9979e --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/pod/unknown-phase.yml @@ -0,0 +1,2 @@ +status: + phase: Unknown diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-available.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-available.yml new file mode 100644 index 00000000000..a56760f0d97 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-available.yml @@ -0,0 +1,6 @@ +status: + availableReplicas: 3 + fullyLabeledReplicas: 4 + observedGeneration: 2 + readyReplicas: 4 + replicas: 2 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-fully-labeled.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-fully-labeled.yml new file mode 100644 index 00000000000..2ef589971cd --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-fully-labeled.yml @@ -0,0 +1,6 @@ +status: + availableReplicas: 0 + fullyLabeledReplicas: 2 + observedGeneration: 2 + readyReplicas: 0 + replicas: 2 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-ready.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-ready.yml new file mode 100644 index 00000000000..1367b361beb --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/awaiting-ready.yml @@ -0,0 +1,6 @@ +status: + availableReplicas: 3 + fullyLabeledReplicas: 4 + observedGeneration: 2 + readyReplicas: 3 + replicas: 2 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/base-no-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/base-no-replicas.yml new file mode 100644 index 00000000000..10ac90f22c5 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/base-no-replicas.yml @@ -0,0 +1,26 @@ +# Base replicaset with no desired replicas +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + creationTimestamp: "2020-01-30T16:34:22Z" + generation: 2 + name: my-rs +spec: + replicas: 0 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 4000 + protocol: TCP + dnsPolicy: ClusterFirst + restartPolicy: Always diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/base.yml new file mode 100644 index 00000000000..3e753efe0ca --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/base.yml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + creationTimestamp: "2020-01-30T16:34:22Z" + generation: 2 + name: my-rs +spec: + replicas: 4 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 4000 + protocol: TCP + dnsPolicy: ClusterFirst + restartPolicy: Always diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/no-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/no-replicas.yml new file mode 100644 index 00000000000..d4fea28e20e --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/no-replicas.yml @@ -0,0 +1,3 @@ +status: + observedGeneration: 2 + replicas: 0 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/old-generation.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/old-generation.yml new file mode 100644 index 00000000000..a3161f4521f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/old-generation.yml @@ -0,0 +1,6 @@ +status: + availableReplicas: 4 + fullyLabeledReplicas: 4 + observedGeneration: 1 + readyReplicas: 4 + replicas: 4 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/stable.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/stable.yml new file mode 100644 index 00000000000..3f7d7d2c36c --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/replicaset/stable.yml @@ -0,0 +1,6 @@ +status: + availableReplicas: 4 + fullyLabeledReplicas: 4 + observedGeneration: 2 + readyReplicas: 4 + replicas: 4 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-partitioned-rollout.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-partitioned-rollout.yml new file mode 100644 index 00000000000..42f1fea198f --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-partitioned-rollout.yml @@ -0,0 +1,10 @@ +# Parititioned rollout in progress (where partition = 2) +status: + collisionCount: 0 + currentReplicas: 3 + currentRevision: web-b46f789c4 + observedGeneration: 2 + readyReplicas: 4 + replicas: 4 + updateRevision: web-c5f4bdc97 + updatedReplicas: 1 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-ready-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-ready-replicas.yml new file mode 100644 index 00000000000..73f0c07f923 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-ready-replicas.yml @@ -0,0 +1,9 @@ +# Fewer than desired number of replicas ready +status: + collisionCount: 0 + currentRevision: web-b46f789c4 + observedGeneration: 2 + readyReplicas: 3 + replicas: 4 + updateRevision: web-c5f4bdc97 + updatedReplicas: 4 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-replicas.yml new file mode 100644 index 00000000000..bac8aee9596 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/awaiting-replicas.yml @@ -0,0 +1,9 @@ +# All replicas ready, but fewer than desired number +status: + collisionCount: 0 + currentReplicas: 3 + currentRevision: web-b486b4959 + observedGeneration: 2 + readyReplicas: 3 + replicas: 3 + updateRevision: web-849bd7f5f9 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base-no-desired-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base-no-desired-replicas.yml new file mode 100644 index 00000000000..181039c4b5c --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base-no-desired-replicas.yml @@ -0,0 +1,30 @@ +# Stateful set with no desired replicas +apiVersion: apps/v1 +kind: StatefulSet +metadata: + creationTimestamp: "2020-01-31T19:49:29Z" + generation: 2 + name: my-stateful-set +spec: + replicas: 0 + selector: + matchLabels: + app: my-app + serviceName: my-service + template: + metadata: + labels: + app: my-app + spec: + containers: + - image: nginx + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: web + protocol: TCP + dnsPolicy: ClusterFirst + restartPolicy: Always + updateStrategy: + type: RollingUpdate diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base-with-partition.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base-with-partition.yml new file mode 100644 index 00000000000..327932ad299 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base-with-partition.yml @@ -0,0 +1,32 @@ +# Base statefulset with partition set to 1 +apiVersion: apps/v1 +kind: StatefulSet +metadata: + creationTimestamp: "2020-01-31T19:49:29Z" + generation: 2 + name: my-stateful-set +spec: + replicas: 4 + selector: + matchLabels: + app: my-app + serviceName: my-service + template: + metadata: + labels: + app: my-app + spec: + containers: + - image: nginx + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: web + protocol: TCP + dnsPolicy: ClusterFirst + restartPolicy: Always + updateStrategy: + rollingUpdate: + partition: 2 + type: RollingUpdate diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base.yml new file mode 100644 index 00000000000..947977baa47 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/base.yml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + creationTimestamp: "2020-01-31T19:49:29Z" + generation: 2 + name: my-stateful-set +spec: + replicas: 4 + selector: + matchLabels: + app: my-app + serviceName: my-service + template: + metadata: + labels: + app: my-app + spec: + containers: + - image: nginx + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + name: web + protocol: TCP + dnsPolicy: ClusterFirst + restartPolicy: Always + updateStrategy: + type: RollingUpdate diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/no-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/no-replicas.yml new file mode 100644 index 00000000000..2b639c32116 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/no-replicas.yml @@ -0,0 +1,6 @@ +status: + collisionCount: 0 + currentRevision: web-c5f4bdc97 + observedGeneration: 2 + replicas: 0 + updateRevision: web-c5f4bdc97 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/old-generation.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/old-generation.yml new file mode 100644 index 00000000000..29a41e4a9b6 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/old-generation.yml @@ -0,0 +1,10 @@ +# All replicas up to date but observed generation is old +status: + collisionCount: 0 + currentReplicas: 4 + currentRevision: web-b486b4959 + observedGeneration: 1 + readyReplicas: 4 + replicas: 4 + updateRevision: web-b486b4959 + updatedReplicas: 4 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/partitioned-rollout-complete.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/partitioned-rollout-complete.yml new file mode 100644 index 00000000000..4519041a14b --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/partitioned-rollout-complete.yml @@ -0,0 +1,10 @@ +# Parititioned rollout complete (where partition = 2) +status: + collisionCount: 0 + currentReplicas: 2 + currentRevision: web-b46f789c4 + observedGeneration: 2 + readyReplicas: 4 + replicas: 4 + updateRevision: web-c5f4bdc97 + updatedReplicas: 2 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/stable.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/stable.yml new file mode 100644 index 00000000000..56bb693930e --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/stable.yml @@ -0,0 +1,9 @@ +status: + collisionCount: 0 + currentReplicas: 4 + currentRevision: web-c5f4bdc97 + observedGeneration: 2 + readyReplicas: 4 + replicas: 4 + updateRevision: web-c5f4bdc97 + updatedReplicas: 4 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/waiting-for-replicas.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/waiting-for-replicas.yml new file mode 100644 index 00000000000..5be8c832adb --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/waiting-for-replicas.yml @@ -0,0 +1,8 @@ +status: + collisionCount: 0 + currentRevision: web-c5f4bdc97 + observedGeneration: 2 + readyReplicas: 4 + replicas: 4 + updateRevision: web-b46f789c4 + updatedReplicas: 3 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/waiting-for-updated-revision.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/waiting-for-updated-revision.yml new file mode 100644 index 00000000000..e48638ad51b --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/handler/statefulset/waiting-for-updated-revision.yml @@ -0,0 +1,8 @@ +status: + collisionCount: 0 + currentRevision: web-c5f4bdc97 + observedGeneration: 2 + readyReplicas: 4 + replicas: 4 + currentReplicas: 4 + updateRevision: web-b46f789c4 diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/job-generate-name.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/job-generate-name.yml new file mode 100644 index 00000000000..6a9119d5a0d --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/job-generate-name.yml @@ -0,0 +1,15 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: my-job + namespace: my-namespace +spec: + backoffLimit: 2 + template: + spec: + containers: + - command: + - start + image: my-job + name: my-job + restartPolicy: Never diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/job.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/job.yml new file mode 100644 index 00000000000..b0c1bde2b3c --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/job.yml @@ -0,0 +1,15 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: my-job + namespace: my-namespace +spec: + backoffLimit: 2 + template: + spec: + containers: + - command: + - start + image: my-job + name: my-job + restartPolicy: Never diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/mock-kubectl-stdin-command.sh b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/mock-kubectl-stdin-command.sh new file mode 100755 index 00000000000..9149300fd00 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/mock-kubectl-stdin-command.sh @@ -0,0 +1,34 @@ +#!/bin/sh +# +# Copyright 2022 Salesforce, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +# This script is meant to mock kubectl apply -f - commands in unit tests. The functionality +# being tested is simply the fact that the retry attempts for such calls continue to read +# data from stdin. To simulate retries, we exit the script with an error that is configured +# to be retryable as long as $1 != "success" +input=$(cat -) +# simulate error case +if [ "$1" != "success" ] +then + echo "\n########################" >&2 + echo "data received from stdin: $input" >&2 + echo "Error: TLS handshake timeout" >&2 + echo "########################" >&2 + exit 1 +else + echo "$input" +fi diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/top-pod.txt b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/top-pod.txt new file mode 100644 index 00000000000..6d0de4b6826 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/op/job/top-pod.txt @@ -0,0 +1,7 @@ +POD NAME CPU(cores) MEMORY(bytes) +spinnaker-io-nginx-v000-42gnq spinnaker-github-io 0m 2Mi +spinnaker-io-nginx-v000-42gnq istio-proxy 3m 28Mi +spinnaker-io-nginx-v000-42gnq istio-init 0m 0Mi +spinnaker-io-nginx-v001-jvkgb spinnaker-github-io 0m 2Mi +spinnaker-io-nginx-v001-jvkgb istio-proxy 32m 30Mi +spinnaker-io-nginx-v001-jvkgb istio-init 0m 0Mi diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/base-with-completions.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/base-with-completions.yml new file mode 100644 index 00000000000..10001a9a0ca --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/base-with-completions.yml @@ -0,0 +1,26 @@ +# Base job with >1 completions set +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: "2020-01-31T16:43:33Z" + labels: + job-name: hello + name: hello +spec: + selector: + matchLabels: + job-name: hello + completions: 5 + template: + metadata: + namespace: mock-namespace + labels: + job-name: hello + spec: + containers: + - command: + - echo + - Hello world + image: busybox + name: hello + restartPolicy: Never diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/failed-job.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/failed-job.yml new file mode 100644 index 00000000000..2b173bc2df2 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/failed-job.yml @@ -0,0 +1,48 @@ +# Job has failed +status: + conditions: + - lastProbeTime: "2020-01-31T19:13:13Z" + lastTransitionTime: "2020-01-31T19:13:13Z" + message: Job has reached the specified backoff limit + reason: BackoffLimitExceeded + status: "True" + type: Failed + failed: 2 + startTime: "2020-01-31T19:13:02Z" + containerStatuses: + - containerID: "some-container-id" + image: "some-image:test" + imageID: "some-image-id" + lastState: {} + name: "some-container-name" + ready: false + restartCount: 0 + state: + terminated: + containerID: "some-container-id" + exitCode: 1 + finishedAt: "2020-01-31T19:14:32Z" + message: | + Failed to download the file: foo. + GET Request failed with status code', 404, 'Expected', ) + reason: Error + startedAt: "2020-01-31T19:14:02Z" + hostIP: "0.0.0.0" + initContainerStatuses: + - containerID: "some-init-container-id" + image: busybox:1.28 + imageID: "some-init-container-image-id" + lastState: {} + name: "init-myservice" + ready: true + restartCount: 0 + state: + terminated: + containerID: "some-init-container-id" + exitCode: 0 + finishedAt: "2020-01-31T13:56:20Z" + reason: Completed + startedAt: "2020-01-31T23:53:20Z" + phase: Failed + podIP: "1.1.1.1" + qosClass: BestEffort diff --git a/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/runjob-deadline-exceeded.yml b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/runjob-deadline-exceeded.yml new file mode 100644 index 00000000000..c6db911a905 --- /dev/null +++ b/clouddriver-kubernetes/src/test/resources/com/netflix/spinnaker/clouddriver/kubernetes/provider/view/runjob-deadline-exceeded.yml @@ -0,0 +1,9 @@ +status: + conditions: + - lastProbeTime: "2021-01-29T00:15:20Z" + lastTransitionTime: "2021-01-29T00:15:20Z" + message: Job was active longer than specified deadline + reason: DeadlineExceeded + status: "True" + type: Failed + startTime: "2021-01-29T00:00:20Z" diff --git a/clouddriver-lambda/README.md b/clouddriver-lambda/README.md new file mode 100644 index 00000000000..2302bfc0edc --- /dev/null +++ b/clouddriver-lambda/README.md @@ -0,0 +1,530 @@ +# AWS Lambda Support + +### **Background ** + +Spinnaker CloudDriver has been enhanced to add support for AWS Lambda. Below lists the API contract input that have been coded in this repository. + +## clouddriver.yml override ## + +```yaml +aws: + features: + lambda: + enabled: true + accounts: + - name: test + lambdaEnabled: true +``` + +_Deprecation Notice_: + +We are deprecating the following configuration in favor of unifying configuration of AWS services and features: + +```yaml +aws: + lambda: + enabled: true +``` + +# Controller calls + +## Get all lambda functions + +### Purpose + +Retrieves all cached lambda functions. + +***Sample Request*** + +``` +curl -X GET --header 'Accept: application/json' +'http://localhost:7002/functions' +``` + +***Sample Response*** + +``` +`[ + { + "account": "mylambda", + "codeSha256": "rHHd9Lk3j7h6MMZKqb3lQzAHKO1eWrmW8Wh/QP1+KuE=", + "codeSize": 7011514, + "description": "sample", + "eventSourceMappings": [], + "functionArn": "arn:aws:lambda:us-west-2::function:mylambdafunctiontwo", + "functionName": "mylambdafunctiontwo", + "functionname": "aws:lambdaFunctions:mylambda:us-west-2:mylambdafunctiontwo", + "handler": "lambda_function.lambda_handler", + "lastModified": "2019-03-29T15:52:33.054+0000", + "layers": [], + "memorySize": 512, + "region": "us-west-2", + "revisionId": "58cb0acc-4a20-4e57-b935-cc97ae1769fd", + "revisions": { + "58cb0acc-4a20-4e57-b935-cc97ae1769fd": "$LATEST", + "ee17b471-d6e3-47a3-9e7b-8cae9b92c626": "2" + }, + "role": "arn:aws:iam:::role/service-role/test", + "runtime": "python3.6", + "timeout": 60, + "tracingConfig": { + "mode": "PassThrough" + }, + "version": "$LATEST" + }, + { + "account": "mylambda", + "codeSha256": "rHHd9Lk3j7h6MMZKqb3lQzAHKO1eWrmW8Wh/QP1+KuE=", + "codeSize": 7011514, + "description": "sample", + "eventSourceMappings": [], + "functionArn": "arn:aws:lambda:us-west-2::function:mylambdafunctionone", + "functionName": "mylambdafunctionone", + "functionname": "aws:lambdaFunctions:mylambda:us-west-2:mylambdafunctionone", + "handler": "lambda_function.lambda_handler", + "lastModified": "2019-03-29T15:46:04.995+0000", + "layers": [], + "memorySize": 512, + "region": "us-west-2", + "revisionId": "73e5500a-3751-4073-adc0-877dfc3c720d", + "revisions": { + "1e280c63-1bcd-4840-92dc-bef5f1b46028": "1", + "73e5500a-3751-4073-adc0-877dfc3c720d": "$LATEST" + }, + "role": "arn:aws:iam:::role/service-role/test", + "runtime": "python3.6", + "timeout": 68, + "tracingConfig": { + "mode": "PassThrough" + }, + "version": "$LATEST" + } + ]` +``` + +### Purpose + +Retrieves details corresponding to a single lambda function. + +***Sample Request*** + +``` +curl -X GET --header 'Accept: application/json' +'http://localhost:7002/functions?functionName=mylambdafunctionone®ion=us-west-2&account=mylambda' +``` + +***Sample Response*** + +``` +[ + { + "account": "mylambda", + "codeSha256": "rHHd9Lk3j7h6MMZKqb3lQzAHKO1eWrmW8Wh/QP1+KuE=", + "codeSize": 7011514, + "description": "sample", + "eventSourceMappings": [], + "functionArn": "arn:aws:lambda:us-west-2::function:mylambdafunctionone", + "functionName": "mylambdafunctionone", + "functionname": "aws:lambdaFunctions:mylambda:us-west-2:mylambdafunctionone", + "handler": "lambda_function.lambda_handler", + "lastModified": "2019-03-29T15:46:04.995+0000", + "layers": [], + "memorySize": 512, + "region": "us-west-2", + "revisionId": "73e5500a-3751-4073-adc0-877dfc3c720d", + "revisions": { + "1e280c63-1bcd-4840-92dc-bef5f1b46028": "1", + "73e5500a-3751-4073-adc0-877dfc3c720d": "$LATEST" + }, + "role": "arn:aws:iam::481090335964:role/service-role/test", + "runtime": "python3.6", + "timeout": 68, + "tracingConfig": { + "mode": "PassThrough" + }, + "version": "$LATEST" + } +] + +``` +### Purpose + +Create a lambda function. + +***Sample Request*** + +``` +curl -X POST \ + http://localhost:7002/aws/ops/createLambdaFunction \ + -H 'Accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "region": "us-west-2", + "functionName": "mylambdafunctiontwo", + "description": "sample", + "credentials": "mylambda", + "handler": "lambda_function.lambda_handler", + "s3bucket": "my_s3_bucket_name", + "s3key": "my_s3_object_key", + "memory": 512, + "publish": "true", + "role": "arn:aws:iam:::role/service-role/test", + "runtime": "python3.6", + "timeout": "60", + "tags": { + "key":"value" + } +}' +``` + +***Sample Response*** + +``` +{ + "id": "c3bd961d-c951-423e-aad6-918f29e78ccb", + "resourceUri": "/task/c3bd961d-c951-423e-aad6-918f29e78ccb" +} + +You may navigate to +http://localhost:7002/$resourceUri to see +the orchestration details. +In this case, resourceUri generated for my post request is +/task/c3bd961d-c951-423e-aad6-918f29e78ccb. So, I'll have to navigate to +http://localhost:7002/task/c3bd961d-c951-423e-aad6-918f29e78ccb for orchestration details +``` + +### Purpose + +Update lambda function configuration. + +***Sample Request*** + +``` +curl -X POST \ + http://localhost:7002/aws/ops/updateLambdaFunctionConfiguration \ + -H 'Accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "region": "us-west-2", + "functionName": "mylambdafunctionone", + "description": "sample", + "credentials": "mylambda", + "handler": "lambda_function.lambda_handler", + "memory": 512, + "role": "arn:aws:iam:::role/service-role/test", + "runtime": "python3.6", + "timeout": "68", + "tags": { + "key":"value" + } +}' +``` +Note: I've changed the timeout from 60 to 68. Naviagate to the aws console to see +if that change is being reflected. + +***Sample Response*** + +``` +{ + "id": "bfdb1201-1c31-4a83-84bb-a807d69291fc", + "resourceUri": "/task/bfdb1201-1c31-4a83-84bb-a807d69291fc" +} + +You may navigate to +http://localhost:7002/$resourceUri to see +the orchestration details. +In this case, resourceUri generated for my post request is +/task/bfdb1201-1c31-4a83-84bb-a807d69291fc. So, I'll have to navigate to +http://localhost:7002/task/bfdb1201-1c31-4a83-84bb-a807d69291fc for orchestration details +``` + +### Purpose + +Delete a lambda function. + +***Sample Request*** + +``` +curl -X POST \ + http://localhost:7002/aws/ops/deleteLambdaFunction \ + -H 'Accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "region": "us-west-2", + "functionName": "mylambdafunctiontwo", + "credentials": "mylambda" +}' +``` + +***Sample Response*** + +``` +{ + "id": "4c316ba9-7db8-4675-82d9-5adf118c541c", + "resourceUri": "/task/4c316ba9-7db8-4675-82d9-5adf118c541c" +} + +You may navigate to +http://localhost:7002/$resourceUri to see +the orchestration details. +In this case, resourceUri generated for my post request is +/task/4c316ba9-7db8-4675-82d9-5adf118c541c. So, I'll have to navigate to +http://localhost:7002/task/4c316ba9-7db8-4675-82d9-5adf118c541c for orchestration details +``` + +### Purpose + +Invoke a lambda function. + +***Sample Request*** + +``` + +curl -X POST \ + http://localhost:7002/aws/ops/invokeLambdaFunction \ + -H 'Accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "region": "us-west-2", + "functionName": "mylambdafunctionone", + "credentials": "mylambda", + "description": "sample", + "Invocation-Type": "RequestResponse", + "log-type": "Tail", + "qualifier": "$LATEST", + "outfile": "out.txt" +}' +``` + +***Sample Response*** + +``` +{ + "id": "e4dfdfa1-0b3c-4980-a745-413eb9806332", + "resourceUri": "/task/e4dfdfa1-0b3c-4980-a745-413eb9806332" +} + +You may navigate to +http://localhost:7002/$resourceUri to see +the orchestration details. +In this case, resourceUri generated for my post request is +/task/4c316ba9-7db8-4675-82d9-5adf118c541c. So, I'll have to navigate to +http://localhost:7002/task/4c316ba9-7db8-4675-82d9-5adf118c541c for orchestration details +``` + + +### Purpose + +Update lambda function code. + +***Sample Request*** + +``` + +curl -X POST \ + http://localhost:7002/aws/ops/updateLambdaFunctionCode \ + -H 'Accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "region": "us-west-2", + "functionName": "mylambdafunctiontwo", + "credentials": "mylambda", + "s3Bucket": "my_s3_bucket_name", + "s3Key": "my_s3_object_key", + "publish": "true" +}' +``` + +***Sample Response*** + +``` +{ + "id": "3a43157d-7f5d-4077-bc8d-8a21381eb6b7", + "resourceUri": "/task/3a43157d-7f5d-4077-bc8d-8a21381eb6b7" +} + +You may navigate to +http://localhost:7002/$resourceUri to see +the orchestration details. +In this case, resourceUri generated for my post request is +/task/4c316ba9-7db8-4675-82d9-5adf118c541c. So, I'll have to navigate to +http://localhost:7002/task/4c316ba9-7db8-4675-82d9-5adf118c541c for orchestration details +``` + + +### Purpose + +Upsert Event Mapping. + +***Sample Request*** + +``` +curl -X POST \ + http://localhost:7002/aws/ops/upsertLambdaFunctionEventMapping \ + -H 'Accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "region": "us-west-2", + "functionName": "mylambdafunctiontwo", + "credentials": "mylambda", + "batchsize" : "10", + "majorFunctionVersion": "1", + "enabled": "false", + "eventSourceArn" : "arn:aws:kinesis:us-west-2::stream/myteststream" +}' +``` + +***Sample Response*** + +``` +{ + "id": "451b5171-7050-43b7-9176-483790e77bb6", + "resourceUri": "/task/50540cf6-5859-44f6-9f13-9c4944386666" +} + +You may navigate to +http://localhost:7002/$resourceUri to see +the orchestration details. +In this case, resourceUri generated for my post request is +/task/4c316ba9-7db8-4675-82d9-5adf118c541c. So, I'll have to navigate to +http://localhost:7002/task/4c316ba9-7db8-4675-82d9-5adf118c541c for orchestration details +It is important to capture the UUID from the orchestration details, +If you plan to delete the event mapping later + +{ + "history": [ + { + "phase": "ORCHESTRATION", + "status": "Initializing Orchestration Task..." + }, + { + "phase": "ORCHESTRATION", + "status": "Processing op: UpsertLambdaEventSourceAtomicOperation" + }, + { + "phase": "UPSERT_LAMBDA_FUNCTION_EVENT_MAPPING", + "status": "Initializing Creation of AWS Lambda Function Event Source Mapping..." + }, + { + "phase": "UPSERT_LAMBDA_FUNCTION_EVENT_MAPPING", + "status": "Finished Creation of AWS Lambda Function Event Mapping Operation..." + }, + { + "phase": "ORCHESTRATION", + "status": "Orchestration completed." + } + ], + "id": "50540cf6-5859-44f6-9f13-9c4944386666", + "ownerId": "831f24c7-a083-40fa-9b42-c106e6d5edb0@spin-clouddriver-d66d9f79b-tq8mw", + "resultObjects": [ + { + "batchSize": 10, + "eventSourceArn": "arn:aws:kinesis:us-west-2::stream/mytest", + "functionArn": "arn:aws:lambda:us-west-2::function:mylambdafunctiontwo", + "lastModified": 1554382614013, + "lastProcessingResult": "No records processed", + "sdkHttpMetadata": { + "httpHeaders": { + "Connection": "keep-alive", + "Content-Length": "352", + "Content-Type": "application/json", + "Date": "Thu, 04 Apr 2019 12:56:54 GMT", + "x-amzn-RequestId": "1ef75be6-56d9-11e9-8874-479d47ecf826" + }, + "httpStatusCode": 202 + }, + "sdkResponseMetadata": { + "requestId": "1ef75be6-56d9-11e9-8874-479d47ecf826" + }, + "state": "Creating", + "stateTransitionReason": "User action", + "uuid": "4101b421-f0fb-4c89-8f99-6c2c153ec8d3" + } + ], + "startTimeMs": 1554382613881, + "status": { + "complete": true, + "completed": true, + "failed": false, + "phase": "ORCHESTRATION", + "status": "Orchestration completed." + } +} + +In my case the UUID is 4101b421-f0fb-4c89-8f99-6c2c153ec8d3 +``` + +### Purpose + +Delete lambda function eventmapping + +***Sample Request*** + +``` +curl -X POST \ + http://localhost:7002/aws/ops/deleteLambdaFunctionEventMapping \ + -H 'Accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "region": "us-west-2", + "functionName": "mylambdafunctiontwo", + "application": "LAMBDA-PRINT-FUNCTION", + "credentials": "mylambda", + "UUID": "0ee2253a-737d-4863-9f19-84627785e85e" +}' +``` + +***Sample Response*** + +``` +{ + "id": "0a01d76c-7942-46f0-810f-0f879f22e498", + "resourceUri": "/task/0a01d76c-7942-46f0-810f-0f879f22e498" +} + +You may navigate to +http://localhost:7002/$resourceUri to see +the orchestration details. +In this case, resourceUri generated for my post request is +/task/0a01d76c-7942-46f0-810f-0f879f22e498. So, I'll have to navigate to +http://localhost:7002/task/0a01d76c-7942-46f0-810f-0f879f22e498 for orchestration details +``` + +### Purpose + +Upsert Lambda Function alias + +***Sample Request*** + +``` +curl -X POST \ + http://localhost:7002/aws/ops/upsertLambdaFunctionAlias \ + -H 'Accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "region": "us-west-2", + "functionName": "mylambdafunctionone", + "credentials": "mylambda", + "aliasDescription" : "description for alias 1", + "majorFunctionVersion": "1", + "aliasName": "spinnaker-alias-2", + "minorFunctionVersion" : "2", + "weightToMinorFunctionVersion" : "0.3" +}' +``` + +***Sample Response*** + +``` +{ + "id": "0a01d76c-7942-46f0-810f-0f879f22e498", + "resourceUri": "/task/0a01d76c-7942-46f0-810f-0f879f22e498" +} + +You may navigate to +http://localhost:7002/$resourceUri to see +the orchestration details. +In this case, resourceUri generated for my post request is +/task/0a01d76c-7942-46f0-810f-0f879f22e498. So, I'll have to navigate to +http://localhost:7002/task/0a01d76c-7942-46f0-810f-0f879f22e498 for orchestration details +``` diff --git a/clouddriver-lambda/clouddriver-lambda.gradle b/clouddriver-lambda/clouddriver-lambda.gradle new file mode 100644 index 00000000000..f9318a202fb --- /dev/null +++ b/clouddriver-lambda/clouddriver-lambda.gradle @@ -0,0 +1,36 @@ +dependencies { + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-artifacts") + implementation project(":clouddriver-aws") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + + implementation "commons-io:commons-io" + implementation "com.amazonaws:aws-java-sdk" + implementation "com.github.ben-manes.caffeine:guava" + implementation "com.netflix.awsobjectmapper:awsobjectmapper" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-moniker" + implementation "org.apache.commons:commons-lang3" + implementation "org.apache.httpcomponents:httpclient" + implementation "org.apache.httpcomponents:httpcore" + implementation "org.apache.commons:commons-compress:1.20" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "com.squareup.retrofit:converter-jackson" + implementation "com.squareup.retrofit:retrofit" + implementation "com.netflix.spectator:spectator-api" + implementation "com.netflix.frigga:frigga" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.junit.jupiter:junit-jupiter-params" + testImplementation "org.mockito:mockito-core" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/Keys.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/Keys.java new file mode 100644 index 00000000000..e7026118994 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/Keys.java @@ -0,0 +1,111 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.cache; + +import static com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider.ID; + +import com.google.common.base.CaseFormat; +import com.netflix.spinnaker.clouddriver.cache.KeyParser; +import java.util.HashMap; +import java.util.Map; + +public class Keys implements KeyParser { + public enum Namespace { + IAM_ROLE, + LAMBDA_FUNCTIONS; + + public final String ns; + + Namespace() { + ns = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, this.name()); + } + + public String toString() { + return ns; + } + } + + public static final String SEPARATOR = ":"; + + @Override + public String getCloudProvider() { + return ID; + } + + @Override + public Map parseKey(String key) { + return parse(key); + } + + @Override + public Boolean canParseType(String type) { + return canParse(type); + } + + private static Boolean canParse(String type) { + for (Namespace key : Namespace.values()) { + if (key.toString().equals(type)) { + return true; + } + } + return false; + } + + public static Map parse(String key) { + String[] parts = key.split(SEPARATOR); + + if (parts.length < 3 || !parts[0].equals(ID)) { + return null; + } + + Map result = new HashMap<>(); + result.put("provider", parts[0]); + result.put("type", parts[1]); + result.put("account", parts[2]); + + Namespace namespace = + Namespace.valueOf(CaseFormat.LOWER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, parts[1])); + + switch (namespace) { + case LAMBDA_FUNCTIONS: + result.put("region", parts[3]); + result.put("AwsLambdaName", parts[4]); + break; + case IAM_ROLE: + result.put("roleName", parts[3]); + break; + default: + break; + } + + return result; + } + + @Override + public Boolean canParseField(String type) { + return false; + } + + public static String getLambdaFunctionKey(String account, String region, String functionName) { + return String.format( + "%s:%s:%s:%s:%s", ID, Namespace.LAMBDA_FUNCTIONS, account, region, functionName); + } + + public static String getIamRoleKey(String account, String iamRoleName) { + return String.format("%s:%s:%s:%s", ID, Namespace.IAM_ROLE, account, iamRoleName); + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/client/AbstractCacheClient.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/client/AbstractCacheClient.java new file mode 100644 index 00000000000..49621165d13 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/client/AbstractCacheClient.java @@ -0,0 +1,109 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.cache.client; + +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.lambda.cache.Keys; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +abstract class AbstractCacheClient { + + private final String keyNamespace; + protected final Cache cacheView; + + /** + * @param cacheView The Cache that the client will query. + * @param keyNamespace The key namespace that the client is responsible for. + */ + AbstractCacheClient(Cache cacheView, String keyNamespace) { + this.cacheView = cacheView; + this.keyNamespace = keyNamespace; + } + + /** + * @param cacheData CacheData that will be converted into an object. + * @return An object of the generic type. + */ + protected abstract T convert(CacheData cacheData); + + /** @return A list of all generic type objects belonging to the key namespace. */ + public Collection getAll() { + Collection allData = cacheView.getAll(keyNamespace); + return convertAll(allData); + } + + /** + * @param account name of the AWS account, as defined in clouddriver.yml + * @param region region of the AWS account, as defined in clouddriver.yml + * @return A list of all generic type objects belonging to the account and region in the key + * namespace. + */ + public Collection getAll(String account, String region) { + Collection data = fetchFromCache(account, region); + return convertAll(data); + } + + /** + * @param key A key within the key namespace that will be used to retrieve the object. + * @return An object of the generic type that is associated to the key. + */ + public T get(String key) { + CacheData cacheData = cacheView.get(keyNamespace, key); + if (cacheData != null) { + return convert(cacheData); + } + return null; + } + + /** + * @param cacheData A collection of CacheData that will be converted into a collection of generic + * typ objects. + * @return A collection of generic typ objects. + */ + private Collection convertAll(Collection cacheData) { + return cacheData.stream().map(this::convert).collect(Collectors.toList()); + } + + /** + * @param account name of the AWS account, as defined in clouddriver.yml + * @param region region of the AWS account, as defined in clouddriver.yml + * @return + */ + private Collection fetchFromCache(String account, String region) { + String accountFilter = account != null ? account + Keys.SEPARATOR : "*" + Keys.SEPARATOR; + String regionFilter = region != null ? region + Keys.SEPARATOR : "*" + Keys.SEPARATOR; + Set keys = new HashSet<>(); + String pattern = + "aws" + Keys.SEPARATOR + keyNamespace + Keys.SEPARATOR + accountFilter + regionFilter + "*"; + Collection nameMatches = cacheView.filterIdentifiers(keyNamespace, pattern); + + keys.addAll(nameMatches); + + Collection allData = cacheView.getAll(keyNamespace, keys); + + if (allData == null) { + return Collections.emptyList(); + } + + return allData; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/client/LambdaCacheClient.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/client/LambdaCacheClient.java new file mode 100644 index 00000000000..0680eef0f8c --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/client/LambdaCacheClient.java @@ -0,0 +1,63 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.cache.client; + +import static com.netflix.spinnaker.clouddriver.lambda.cache.Keys.Namespace.LAMBDA_FUNCTIONS; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import java.util.*; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class LambdaCacheClient extends AbstractCacheClient { + private final ObjectMapper objectMapper = AmazonObjectMapperConfigurer.createConfigured(); + + @Autowired + public LambdaCacheClient(Cache cacheView) { + super(cacheView, LAMBDA_FUNCTIONS.ns); + } + + @Override + protected LambdaFunction convert(CacheData cacheData) { + Map attributes = cacheData.getAttributes(); + LambdaFunction lambdaFunction = objectMapper.convertValue(attributes, LambdaFunction.class); + // Fix broken translation of uuid fields. Perhaps this is better fixed by configuring the + // objectMapper right + List eventSourceMappings = (List) attributes.get("eventSourceMappings"); + if (eventSourceMappings == null) { + return lambdaFunction; + } + Map arnUuidMap = new HashMap<>(); + eventSourceMappings.stream() + .forEach( + xx -> { + arnUuidMap.put((String) xx.get("eventSourceArn"), (String) xx.get("uuid")); + }); + lambdaFunction + .getEventSourceMappings() + .forEach( + currMapping -> { + currMapping.setUUID((String) arnUuidMap.get(currMapping.getEventSourceArn())); + }); + return lambdaFunction; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/model/IamRole.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/model/IamRole.java new file mode 100644 index 00000000000..aac863628d7 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/model/IamRole.java @@ -0,0 +1,39 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.cache.model; + +import com.netflix.spinnaker.clouddriver.aws.model.Role; +import com.netflix.spinnaker.clouddriver.aws.model.TrustRelationship; +import java.util.Set; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class IamRole implements Role { + + /* + The ID is the AWS ARN, in the format arn:aws:iam::account-id:role/role-name + */ + String id; + + String name; + String accountName; + Set trustRelationships; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/model/LambdaFunction.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/model/LambdaFunction.java new file mode 100644 index 00000000000..f76f5265e8f --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/cache/model/LambdaFunction.java @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.cache.model; + +import com.amazonaws.services.lambda.model.AliasConfiguration; +import com.amazonaws.services.lambda.model.EventSourceMappingConfiguration; +import com.amazonaws.services.lambda.model.FunctionCodeLocation; +import com.amazonaws.services.lambda.model.FunctionConfiguration; +import com.netflix.spinnaker.clouddriver.model.Function; +import java.util.List; +import java.util.Map; +import lombok.Data; + +@Data +public class LambdaFunction extends FunctionConfiguration implements Function { + private String cloudProvider; + private String account; + private String region; + + private Map revisions; + private List aliasConfigurations; + private List eventSourceMappings; + private FunctionCodeLocation code; + private Map tags; + private List targetGroups; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/CreateLambdaFunctionAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/CreateLambdaFunctionAtomicOperationConverter.java new file mode 100644 index 00000000000..1054ad37f9b --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/CreateLambdaFunctionAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.CreateLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.CreateLambdaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("createLambdaFunction") +public class CreateLambdaFunctionAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new CreateLambdaAtomicOperation(convertDescription(input)); + } + + @Override + public CreateLambdaFunctionDescription convertDescription(Map input) { + CreateLambdaFunctionDescription converted = + getObjectMapper().convertValue(input, CreateLambdaFunctionDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaFunctionAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaFunctionAtomicOperationConverter.java new file mode 100644 index 00000000000..1f3b7d38b2c --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaFunctionAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.DeleteLambdaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("deleteLambdaFunction") +public class DeleteLambdaFunctionAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + public AtomicOperation convertOperation(Map input) { + return new DeleteLambdaAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteLambdaFunctionDescription convertDescription(Map input) { + DeleteLambdaFunctionDescription converted = + getObjectMapper().convertValue(input, DeleteLambdaFunctionDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaFunctionEventMappingAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaFunctionEventMappingAtomicOperationConverter.java new file mode 100644 index 00000000000..9096756e7b1 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaFunctionEventMappingAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpsertLambdaFunctionEventMappingDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.DeleteLambdaEventSourceAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("deleteLambdaFunctionEventMapping") +public class DeleteLambdaFunctionEventMappingAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteLambdaEventSourceAtomicOperation(convertDescription(input)); + } + + @Override + public UpsertLambdaFunctionEventMappingDescription convertDescription(Map input) { + UpsertLambdaFunctionEventMappingDescription converted = + getObjectMapper().convertValue(input, UpsertLambdaFunctionEventMappingDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaProvisionedConcurrencyAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaProvisionedConcurrencyAtomicOperationConverter.java new file mode 100644 index 00000000000..0c6e08dd81e --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaProvisionedConcurrencyAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaProvisionedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.DeleteLambdaProvisionedConcurrencyAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("deleteLambdaProvisionedConcurrency") +public class DeleteLambdaProvisionedConcurrencyAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteLambdaProvisionedConcurrencyAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteLambdaProvisionedConcurrencyDescription convertDescription(Map input) { + DeleteLambdaProvisionedConcurrencyDescription converted = + getObjectMapper().convertValue(input, DeleteLambdaProvisionedConcurrencyDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaReservedConcurrencyAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaReservedConcurrencyAtomicOperationConverter.java new file mode 100644 index 00000000000..95bc30f00e8 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/DeleteLambdaReservedConcurrencyAtomicOperationConverter.java @@ -0,0 +1,43 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaReservedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.DeleteLambdaReservedConcurrencyAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("deleteLambdaReservedConcurrency") +public class DeleteLambdaReservedConcurrencyAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteLambdaReservedConcurrencyAtomicOperation(convertDescription(input)); + } + + @Override + public DeleteLambdaReservedConcurrencyDescription convertDescription(Map input) { + DeleteLambdaReservedConcurrencyDescription converted = + getObjectMapper().convertValue(input, DeleteLambdaReservedConcurrencyDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/InvokeLambdaFunctionAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/InvokeLambdaFunctionAtomicOperationConverter.java new file mode 100644 index 00000000000..0ea9ba5a27f --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/InvokeLambdaFunctionAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.InvokeLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.InvokeLambdaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("invokeLambdaFunction") +public class InvokeLambdaFunctionAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new InvokeLambdaAtomicOperation(convertDescription(input)); + } + + @Override + public InvokeLambdaFunctionDescription convertDescription(Map input) { + InvokeLambdaFunctionDescription converted = + getObjectMapper().convertValue(input, InvokeLambdaFunctionDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PublishLambdaFunctionVersionAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PublishLambdaFunctionVersionAtomicOperationConverter.java new file mode 100644 index 00000000000..3457abe6d41 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PublishLambdaFunctionVersionAtomicOperationConverter.java @@ -0,0 +1,43 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PublishLambdaFunctionVersionDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.PublishLambdaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("publishLambdaFunctionVersion") +public class PublishLambdaFunctionVersionAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + @Override + public AtomicOperation convertOperation(Map input) { + return new PublishLambdaAtomicOperation(convertDescription(input)); + } + + @Override + public PublishLambdaFunctionVersionDescription convertDescription(Map input) { + PublishLambdaFunctionVersionDescription converted = + getObjectMapper().convertValue(input, PublishLambdaFunctionVersionDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PutLambdaProvisionedConcurrencyAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PutLambdaProvisionedConcurrencyAtomicOperationConverter.java new file mode 100644 index 00000000000..8a10c12a64d --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PutLambdaProvisionedConcurrencyAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PutLambdaProvisionedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.PutLambdaProvisionedConcurrencyAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("putLambdaProvisionedConcurrency") +public class PutLambdaProvisionedConcurrencyAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + @Override + public AtomicOperation convertOperation(Map input) { + return new PutLambdaProvisionedConcurrencyAtomicOperation(convertDescription(input)); + } + + @Override + public PutLambdaProvisionedConcurrencyDescription convertDescription(Map input) { + PutLambdaProvisionedConcurrencyDescription converted = + getObjectMapper().convertValue(input, PutLambdaProvisionedConcurrencyDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PutLambdaReservedConcurrencyAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PutLambdaReservedConcurrencyAtomicOperationConverter.java new file mode 100644 index 00000000000..f86a180010f --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/PutLambdaReservedConcurrencyAtomicOperationConverter.java @@ -0,0 +1,43 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PutLambdaReservedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.PutLambdaReservedConcurrencyAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("putLambdaReservedConcurrency") +public class PutLambdaReservedConcurrencyAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + @Override + public AtomicOperation convertOperation(Map input) { + return new PutLambdaReservedConcurrencyAtomicOperation(convertDescription(input)); + } + + @Override + public PutLambdaReservedConcurrencyDescription convertDescription(Map input) { + PutLambdaReservedConcurrencyDescription converted = + getObjectMapper().convertValue(input, PutLambdaReservedConcurrencyDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpdateLambdaFunctionCodeAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpdateLambdaFunctionCodeAtomicOperationConverter.java new file mode 100644 index 00000000000..fbf4aca4fd8 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpdateLambdaFunctionCodeAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpdateLambdaFunctionCodeDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.UpdateLambdaCodeAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("updateLambdaFunctionCode") +public class UpdateLambdaFunctionCodeAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new UpdateLambdaCodeAtomicOperation(convertDescription(input)); + } + + @Override + public UpdateLambdaFunctionCodeDescription convertDescription(Map input) { + UpdateLambdaFunctionCodeDescription converted = + getObjectMapper().convertValue(input, UpdateLambdaFunctionCodeDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpdateLambdaFunctionConfigurationAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpdateLambdaFunctionConfigurationAtomicOperationConverter.java new file mode 100644 index 00000000000..27b4a70c04d --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpdateLambdaFunctionConfigurationAtomicOperationConverter.java @@ -0,0 +1,43 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.CreateLambdaFunctionConfigurationDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.UpdateLambdaConfigurationAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("updateLambdaFunctionConfiguration") +public class UpdateLambdaFunctionConfigurationAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new UpdateLambdaConfigurationAtomicOperation(convertDescription(input)); + } + + @Override + public CreateLambdaFunctionConfigurationDescription convertDescription(Map input) { + CreateLambdaFunctionConfigurationDescription converted = + getObjectMapper().convertValue(input, CreateLambdaFunctionConfigurationDescription.class); + + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpsertLambdaFunctionAliasAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpsertLambdaFunctionAliasAtomicOperationConverter.java new file mode 100644 index 00000000000..83e3b0c35f6 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpsertLambdaFunctionAliasAtomicOperationConverter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpsertLambdaFunctionAliasDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.UpsertLambdaAliasAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("upsertLambdaFunctionAlias") +public class UpsertLambdaFunctionAliasAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new UpsertLambdaAliasAtomicOperation(convertDescription(input)); + } + + @Override + public UpsertLambdaFunctionAliasDescription convertDescription(Map input) { + UpsertLambdaFunctionAliasDescription converted = + getObjectMapper().convertValue(input, UpsertLambdaFunctionAliasDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpsertLambdaFunctionEventMappingAtomicOperationConverter.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpsertLambdaFunctionEventMappingAtomicOperationConverter.java new file mode 100644 index 00000000000..18d9a2c0909 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/converters/UpsertLambdaFunctionEventMappingAtomicOperationConverter.java @@ -0,0 +1,43 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.converters; + +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpsertLambdaFunctionEventMappingDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.UpsertLambdaEventSourceAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Component("upsertLambdaFunctionEventMapping") +public class UpsertLambdaFunctionEventMappingAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + @Override + public AtomicOperation convertOperation(Map input) { + return new UpsertLambdaEventSourceAtomicOperation(convertDescription(input)); + } + + @Override + public UpsertLambdaFunctionEventMappingDescription convertDescription(Map input) { + UpsertLambdaFunctionEventMappingDescription converted = + getObjectMapper().convertValue(input, UpsertLambdaFunctionEventMappingDescription.class); + + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + + return converted; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/AbstractLambdaFunctionDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/AbstractLambdaFunctionDescription.java new file mode 100644 index 00000000000..ac3f1e2308c --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/AbstractLambdaFunctionDescription.java @@ -0,0 +1,41 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import com.netflix.spinnaker.clouddriver.aws.deploy.description.AbstractAmazonCredentialsDescription; +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public abstract class AbstractLambdaFunctionDescription extends AbstractAmazonCredentialsDescription + implements ApplicationNameable { + String region; + String appName; + + @Override + public Collection getApplications() { + if (appName == null || appName.isEmpty()) { + return Collections.emptyList(); + } + return List.of(getAppName()); + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/CreateLambdaFunctionConfigurationDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/CreateLambdaFunctionConfigurationDescription.java new file mode 100644 index 00000000000..47a57db6524 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/CreateLambdaFunctionConfigurationDescription.java @@ -0,0 +1,47 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import com.amazonaws.services.lambda.model.DeadLetterConfig; +import com.amazonaws.services.lambda.model.TracingConfig; +import java.util.List; +import java.util.Map; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class CreateLambdaFunctionConfigurationDescription + extends AbstractLambdaFunctionDescription { + String functionName; + String description; + String handler; + Integer memorySize; + String role; + String runtime; + Integer timeout; + List subnetIds; + List securityGroupIds; + List layers; + Map envVariables; + Map tags; + DeadLetterConfig deadLetterConfig; + String kmskeyArn; + TracingConfig tracingConfig; + String targetGroups; + String runTime; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/CreateLambdaFunctionDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/CreateLambdaFunctionDescription.java new file mode 100644 index 00000000000..bfbbe0888b4 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/CreateLambdaFunctionDescription.java @@ -0,0 +1,54 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import com.amazonaws.services.lambda.model.DeadLetterConfig; +import com.amazonaws.services.lambda.model.TracingConfig; +import java.util.List; +import java.util.Map; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class CreateLambdaFunctionDescription extends AbstractLambdaFunctionDescription { + String functionName; + String description; + String s3bucket; + String s3key; + String handler; + String role; + String runtime; + List layers; + + Integer memorySize; + Integer timeout; + + Map tags; + + Boolean publish; + + Map envVariables; + List subnetIds; + List securityGroupIds; + + String targetGroups; + + DeadLetterConfig deadLetterConfig; + TracingConfig tracingConfig; + String kmskeyArn; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaFunctionDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaFunctionDescription.java new file mode 100644 index 00000000000..8daf7307142 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaFunctionDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class DeleteLambdaFunctionDescription extends AbstractLambdaFunctionDescription { + String functionName; + String qualifier; + // TODO : Stub, implement later +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaProvisionedConcurrencyDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaProvisionedConcurrencyDescription.java new file mode 100644 index 00000000000..bc5210cde7e --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaProvisionedConcurrencyDescription.java @@ -0,0 +1,27 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class DeleteLambdaProvisionedConcurrencyDescription + extends AbstractLambdaFunctionDescription { + String functionName; + String qualifier; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaReservedConcurrencyDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaReservedConcurrencyDescription.java new file mode 100644 index 00000000000..c2d86d966f2 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/DeleteLambdaReservedConcurrencyDescription.java @@ -0,0 +1,26 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class DeleteLambdaReservedConcurrencyDescription extends AbstractLambdaFunctionDescription { + String functionName; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/InvokeLambdaFunctionDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/InvokeLambdaFunctionDescription.java new file mode 100644 index 00000000000..1ed7dbb1399 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/InvokeLambdaFunctionDescription.java @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class InvokeLambdaFunctionDescription extends AbstractLambdaFunctionDescription { + String functionName; + String qualifier; + + String payload; + Artifact payloadArtifact; + // -1 disables the timeout which is the default behavior - default to the ClientConfiguration + // timeout + int timeout = -1; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/InvokeLambdaFunctionOutputDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/InvokeLambdaFunctionOutputDescription.java new file mode 100644 index 00000000000..3ef20b67713 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/InvokeLambdaFunctionOutputDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import com.amazonaws.services.lambda.model.InvokeResult; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class InvokeLambdaFunctionOutputDescription { + InvokeResult invokeResult; + private String responseString; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PublishLambdaFunctionVersionDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PublishLambdaFunctionVersionDescription.java new file mode 100644 index 00000000000..6098c09e221 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PublishLambdaFunctionVersionDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class PublishLambdaFunctionVersionDescription extends AbstractLambdaFunctionDescription { + String functionName; + String description; + String revisionId; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PutLambdaProvisionedConcurrencyDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PutLambdaProvisionedConcurrencyDescription.java new file mode 100644 index 00000000000..9edee94df95 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PutLambdaProvisionedConcurrencyDescription.java @@ -0,0 +1,27 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class PutLambdaProvisionedConcurrencyDescription extends AbstractLambdaFunctionDescription { + String functionName; + String qualifier; + int provisionedConcurrentExecutions; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PutLambdaReservedConcurrencyDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PutLambdaReservedConcurrencyDescription.java new file mode 100644 index 00000000000..29cf48791af --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/PutLambdaReservedConcurrencyDescription.java @@ -0,0 +1,27 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class PutLambdaReservedConcurrencyDescription extends AbstractLambdaFunctionDescription { + String functionName; + int reservedConcurrentExecutions; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpdateLambdaFunctionCodeDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpdateLambdaFunctionCodeDescription.java new file mode 100644 index 00000000000..cca4e4757e7 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpdateLambdaFunctionCodeDescription.java @@ -0,0 +1,30 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class UpdateLambdaFunctionCodeDescription extends AbstractLambdaFunctionDescription { + String functionName; + + String s3bucket; + String s3key; + Boolean publish; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpsertLambdaFunctionAliasDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpsertLambdaFunctionAliasDescription.java new file mode 100644 index 00000000000..7908292ccfc --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpsertLambdaFunctionAliasDescription.java @@ -0,0 +1,33 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class UpsertLambdaFunctionAliasDescription extends AbstractLambdaFunctionDescription { + String functionName; + + String majorFunctionVersion; + String minorFunctionVersion; + Double weightToMinorFunctionVersion; + + String aliasDescription; + String aliasName; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpsertLambdaFunctionEventMappingDescription.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpsertLambdaFunctionEventMappingDescription.java new file mode 100644 index 00000000000..2306b812a21 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/description/UpsertLambdaFunctionEventMappingDescription.java @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.description; + +import com.amazonaws.services.lambda.model.DestinationConfig; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = false) +public class UpsertLambdaFunctionEventMappingDescription extends AbstractLambdaFunctionDescription { + String functionName; + String qualifier; + Integer batchsize = 1; + Boolean bisectBatchOnError = null; + Integer maxBatchingWindowSecs = null; + Integer maxRecordAgeSecs = null; + Integer maxRetryAttempts = null; + Integer parallelizationFactor = null; + Integer tumblingWindowSecs = null; + Boolean enabled = false; + String eventSourceArn = null; + String uuid = null; + String startingPosition = null; + DestinationConfig destinationConfig = null; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/exception/InvalidAccountException.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/exception/InvalidAccountException.java new file mode 100644 index 00000000000..64da377e844 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/exception/InvalidAccountException.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.exception; + +public class InvalidAccountException extends IllegalArgumentException { + public InvalidAccountException(String message) { + super(message); + } + + public InvalidAccountException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/exception/LambdaOperationException.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/exception/LambdaOperationException.java new file mode 100644 index 00000000000..710444476b5 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/exception/LambdaOperationException.java @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.exception; + +public class LambdaOperationException extends RuntimeException { + public LambdaOperationException(String message) { + super(message); + } + + public LambdaOperationException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/AbstractLambdaAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/AbstractLambdaAtomicOperation.java new file mode 100644 index 00000000000..9686f6c80c5 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/AbstractLambdaAtomicOperation.java @@ -0,0 +1,48 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.AbstractLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import org.springframework.beans.factory.annotation.Autowired; + +public abstract class AbstractLambdaAtomicOperation + extends LambdaClientProvider implements AtomicOperation { + + @Autowired LambdaFunctionProvider lambdaFunctionProvider; + + private final String basePhase; + + T description; + + AbstractLambdaAtomicOperation(T description, String basePhase) { + super(description.getRegion(), description.getCredentials()); + this.description = description; + this.basePhase = basePhase; + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + void updateTaskStatus(String status) { + getTask().updateStatus(basePhase, status); + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/CreateLambdaAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/CreateLambdaAtomicOperation.java new file mode 100644 index 00000000000..d9ac8e222d8 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/CreateLambdaAtomicOperation.java @@ -0,0 +1,186 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsRequest; +import com.amazonaws.services.elasticloadbalancingv2.model.DescribeTargetGroupsResult; +import com.amazonaws.services.elasticloadbalancingv2.model.RegisterTargetsRequest; +import com.amazonaws.services.elasticloadbalancingv2.model.RegisterTargetsResult; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetDescription; +import com.amazonaws.services.elasticloadbalancingv2.model.TargetGroup; +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.AddPermissionRequest; +import com.amazonaws.services.lambda.model.CreateFunctionRequest; +import com.amazonaws.services.lambda.model.CreateFunctionResult; +import com.amazonaws.services.lambda.model.Environment; +import com.amazonaws.services.lambda.model.FunctionCode; +import com.amazonaws.services.lambda.model.VpcConfig; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.CreateLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.UUID; + +public class CreateLambdaAtomicOperation + extends AbstractLambdaAtomicOperation + implements AtomicOperation { + + public CreateLambdaAtomicOperation(CreateLambdaFunctionDescription description) { + super(description, "CREATE_LAMBDA_FUNCTION"); + } + + @Override + public CreateFunctionResult operate(List priorOutputs) { + updateTaskStatus("Initializing Creation of AWS Lambda Function Operation..."); + return createFunction(); + } + + private CreateFunctionResult createFunction() { + FunctionCode code = + new FunctionCode() + .withS3Bucket(description.getProperty("s3bucket").toString()) + .withS3Key(description.getProperty("s3key").toString()); + + Map objTag = new HashMap<>(); + if (null != description.getTags()) { + + for (Entry entry : description.getTags().entrySet()) { + objTag.put(entry.getKey(), entry.getValue()); + } + } + + AWSLambda client = getLambdaClient(); + + CreateFunctionRequest request = new CreateFunctionRequest(); + request.setFunctionName( + combineAppDetail(description.getAppName(), description.getFunctionName())); + request.setDescription(description.getDescription()); + request.setHandler(description.getHandler()); + request.setMemorySize(description.getMemorySize()); + request.setPublish(description.getPublish()); + request.setRole(description.getRole()); + request.setRuntime(description.getRuntime()); + request.setTimeout(description.getTimeout()); + request.setLayers(description.getLayers()); + + request.setCode(code); + request.setTags(objTag); + + Map envVariables = description.getEnvVariables(); + if (null != envVariables) { + request.setEnvironment(new Environment().withVariables(envVariables)); + } + + if (null != description.getSecurityGroupIds() || null != description.getSubnetIds()) { + request.setVpcConfig( + new VpcConfig() + .withSecurityGroupIds(description.getSecurityGroupIds()) + .withSubnetIds(description.getSubnetIds())); + } + if (!description.getDeadLetterConfig().getTargetArn().isEmpty()) { + request.setDeadLetterConfig(description.getDeadLetterConfig()); + } + request.setKMSKeyArn(description.getKmskeyArn()); + request.setTracingConfig(description.getTracingConfig()); + + CreateFunctionResult result = client.createFunction(request); + updateTaskStatus("Finished Creation of AWS Lambda Function Operation..."); + if (description.getTargetGroups() != null && !description.getTargetGroups().isEmpty()) { + + updateTaskStatus( + String.format( + "Started registering lambda to targetGroup (%s)", description.getTargetGroups())); + String functionArn = result.getFunctionArn(); + registerTargetGroup(functionArn, client); + } + + return result; + } + + protected String combineAppDetail(String appName, String functionName) { + Names functionAppName = Names.parseName(functionName); + if (null != functionAppName) { + return functionAppName.getApp().equals(appName) + ? functionName + : (appName + "-" + functionName); + } else { + throw new IllegalArgumentException( + String.format("Function name {%s} contains invlaid charachetrs ", functionName)); + } + } + + private RegisterTargetsResult registerTargetGroup(String functionArn, AWSLambda lambdaClient) { + + AmazonElasticLoadBalancing loadBalancingV2 = getAmazonElasticLoadBalancingClient(); + TargetGroup targetGroup = retrieveTargetGroup(loadBalancingV2); + + AddPermissionRequest addPermissionRequest = + new AddPermissionRequest() + .withFunctionName(functionArn) + .withAction("lambda:InvokeFunction") + .withSourceArn(targetGroup.getTargetGroupArn()) + .withPrincipal("elasticloadbalancing.amazonaws.com") + .withStatementId(UUID.randomUUID().toString()); + + lambdaClient.addPermission(addPermissionRequest); + + updateTaskStatus( + String.format( + "Lambda (%s) invoke permissions added to Target group (%s).", + functionArn, targetGroup.getTargetGroupArn())); + + RegisterTargetsResult result = + loadBalancingV2.registerTargets( + new RegisterTargetsRequest() + .withTargets(new TargetDescription().withId(functionArn)) + .withTargetGroupArn(targetGroup.getTargetGroupArn())); + + updateTaskStatus( + String.format( + "Registered the Lambda (%s) with Target group (%s).", + functionArn, targetGroup.getTargetGroupArn())); + return result; + } + + private TargetGroup retrieveTargetGroup(AmazonElasticLoadBalancing loadBalancingV2) { + + DescribeTargetGroupsRequest request = + new DescribeTargetGroupsRequest().withNames(description.getTargetGroups()); + DescribeTargetGroupsResult describeTargetGroupsResult = + loadBalancingV2.describeTargetGroups(request); + + if (describeTargetGroupsResult.getTargetGroups().size() == 1) { + return describeTargetGroupsResult.getTargetGroups().get(0); + } else if (describeTargetGroupsResult.getTargetGroups().size() > 1) { + throw new IllegalArgumentException( + "There are multiple target groups with the name " + description.getTargetGroups() + "."); + } else { + throw new IllegalArgumentException( + "There is no target group with the name " + description.getTargetGroups() + "."); + } + } + + private AmazonElasticLoadBalancing getAmazonElasticLoadBalancingClient() { + + return getAmazonClientProvider() + .getAmazonElasticLoadBalancingV2(description.getCredentials(), getRegion(), false); + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaAtomicOperation.java new file mode 100644 index 00000000000..045a4fa597b --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaAtomicOperation.java @@ -0,0 +1,66 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.DeleteFunctionRequest; +import com.amazonaws.services.lambda.model.DeleteFunctionResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class DeleteLambdaAtomicOperation + extends AbstractLambdaAtomicOperation + implements AtomicOperation { + + public DeleteLambdaAtomicOperation(DeleteLambdaFunctionDescription description) { + super(description, "DELETE_LAMBDA_FUNCTION_CODE"); + } + + @Override + public DeleteFunctionResult operate(List priorOutputs) { + updateTaskStatus("Initializing deletion of AWS Lambda Function Operation..."); + return deleteFunctionResult(); + } + + private DeleteFunctionResult deleteFunctionResult() { + String functionName = description.getFunctionName(); + String region = description.getRegion(); + String account = description.getAccount(); + + LambdaFunction cache = + (LambdaFunction) lambdaFunctionProvider.getFunction(account, region, functionName); + + AWSLambda client = getLambdaClient(); + + if (cache != null && client != null) { + DeleteFunctionRequest request = + new DeleteFunctionRequest().withFunctionName(cache.getFunctionArn()); + + request.withQualifier(description.getQualifier()); + + DeleteFunctionResult result = client.deleteFunction(request); + + updateTaskStatus("Finished deletion of AWS Lambda Function Operation..."); + + return result; + } + + return null; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaEventSourceAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaEventSourceAtomicOperation.java new file mode 100644 index 00000000000..179ea44854e --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaEventSourceAtomicOperation.java @@ -0,0 +1,70 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.*; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpsertLambdaFunctionEventMappingDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class DeleteLambdaEventSourceAtomicOperation + extends AbstractLambdaAtomicOperation + implements AtomicOperation { + + public DeleteLambdaEventSourceAtomicOperation( + UpsertLambdaFunctionEventMappingDescription description) { + super(description, "DELETE_LAMBDA_FUNCTION_EVENT_MAPPING"); + } + + @Override + public Object operate(List priorOutputs) { + LambdaFunction lambdaFunction = + (LambdaFunction) + lambdaFunctionProvider.getFunction( + description.getAccount(), description.getRegion(), description.getFunctionName()); + + List eventSourceMappingConfigurations = + lambdaFunction.getEventSourceMappings(); + + for (EventSourceMappingConfiguration eventSourceMappingConfiguration : + eventSourceMappingConfigurations) { + if (eventSourceMappingConfiguration + .getEventSourceArn() + .equalsIgnoreCase(description.getEventSourceArn())) { + description.setUuid(eventSourceMappingConfiguration.getUUID()); + return deleteEventSourceMappingResult(); + } + } + + return null; + } + + private DeleteEventSourceMappingResult deleteEventSourceMappingResult() { + updateTaskStatus("Initializing Deleting of AWS Lambda Function Event Mapping Operation..."); + + AWSLambda client = getLambdaClient(); + DeleteEventSourceMappingRequest request = + new DeleteEventSourceMappingRequest().withUUID(description.getUuid()); + + DeleteEventSourceMappingResult result = client.deleteEventSourceMapping(request); + updateTaskStatus("Finished Deleting of AWS Lambda Function Event Mapping Operation..."); + + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaProvisionedConcurrencyAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaProvisionedConcurrencyAtomicOperation.java new file mode 100644 index 00000000000..265b9b4b86c --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaProvisionedConcurrencyAtomicOperation.java @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.DeleteProvisionedConcurrencyConfigRequest; +import com.amazonaws.services.lambda.model.DeleteProvisionedConcurrencyConfigResult; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaProvisionedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class DeleteLambdaProvisionedConcurrencyAtomicOperation + extends AbstractLambdaAtomicOperation< + DeleteLambdaProvisionedConcurrencyDescription, DeleteProvisionedConcurrencyConfigResult> + implements AtomicOperation { + + public DeleteLambdaProvisionedConcurrencyAtomicOperation( + DeleteLambdaProvisionedConcurrencyDescription description) { + super(description, "DELETE_LAMBDA_FUNCTION_PROVISIONED_CONCURRENCY"); + } + + @Override + public DeleteProvisionedConcurrencyConfigResult operate(List priorOutputs) { + updateTaskStatus( + "Initializing Atomic Operation AWS Lambda for DeleteProvisionedConcurrency..."); + return deleteProvisionedFunctionConcurrency( + description.getFunctionName(), description.getQualifier()); + } + + private DeleteProvisionedConcurrencyConfigResult deleteProvisionedFunctionConcurrency( + String functionName, String qualifier) { + AWSLambda client = getLambdaClient(); + DeleteProvisionedConcurrencyConfigRequest req = + new DeleteProvisionedConcurrencyConfigRequest() + .withFunctionName(functionName) + .withQualifier(qualifier); + + DeleteProvisionedConcurrencyConfigResult result = + client.deleteProvisionedConcurrencyConfig(req); + updateTaskStatus("Finished Atomic Operation AWS Lambda for DeleteProvisionedConcurrency..."); + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaReservedConcurrencyAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaReservedConcurrencyAtomicOperation.java new file mode 100644 index 00000000000..7b52e6fea27 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaReservedConcurrencyAtomicOperation.java @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.DeleteFunctionConcurrencyRequest; +import com.amazonaws.services.lambda.model.DeleteFunctionConcurrencyResult; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaReservedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class DeleteLambdaReservedConcurrencyAtomicOperation + extends AbstractLambdaAtomicOperation< + DeleteLambdaReservedConcurrencyDescription, DeleteFunctionConcurrencyResult> + implements AtomicOperation { + + public DeleteLambdaReservedConcurrencyAtomicOperation( + DeleteLambdaReservedConcurrencyDescription description) { + super(description, "DELETE_LAMBDA_FUNCTION_RESERVED_CONCURRENCY"); + } + + @Override + public DeleteFunctionConcurrencyResult operate(List priorOutputs) { + updateTaskStatus("Initializing Atomic Operation AWS Lambda for DeleteReservedConcurrency..."); + return deleteReservedFunctionConcurrency(description.getFunctionName()); + } + + private DeleteFunctionConcurrencyResult deleteReservedFunctionConcurrency(String functionName) { + AWSLambda client = getLambdaClient(); + DeleteFunctionConcurrencyRequest req = + new DeleteFunctionConcurrencyRequest().withFunctionName(functionName); + + DeleteFunctionConcurrencyResult result = client.deleteFunctionConcurrency(req); + updateTaskStatus("Finished Atomic Operation AWS Lambda for DeleteReservedConcurrency..."); + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/InvokeLambdaAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/InvokeLambdaAtomicOperation.java new file mode 100644 index 00000000000..a4afcd69492 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/InvokeLambdaAtomicOperation.java @@ -0,0 +1,155 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.InvokeRequest; +import com.amazonaws.services.lambda.model.InvokeResult; +import com.amazonaws.services.lambda.model.LogType; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.InvokeLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.InvokeLambdaFunctionOutputDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.exception.LambdaOperationException; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import lombok.extern.log4j.Log4j2; +import org.apache.commons.compress.utils.IOUtils; +import org.apache.commons.io.FileUtils; +import org.springframework.beans.factory.annotation.Autowired; + +@Log4j2 +public class InvokeLambdaAtomicOperation + extends AbstractLambdaAtomicOperation< + InvokeLambdaFunctionDescription, InvokeLambdaFunctionOutputDescription> + implements AtomicOperation { + + @Autowired private ArtifactDownloader artifactDownloader; + + public InvokeLambdaAtomicOperation(InvokeLambdaFunctionDescription description) { + super(description, "INVOKE_LAMBDA_FUNCTION"); + } + + @Override + public InvokeLambdaFunctionOutputDescription operate(List priorOutputs) { + updateTaskStatus("Initializing Invoking AWS Lambda Function Operation..."); + + if (description.getPayloadArtifact() != null) { + String payload = getPayloadFromArtifact(description.getPayloadArtifact()); + return invokeFunction(description.getFunctionName(), payload); + } else if (description.getPayload() != null) { + return invokeFunction(description.getFunctionName(), description.getPayload()); + } + + return null; + } + + private InvokeLambdaFunctionOutputDescription invokeFunction( + String functionName, String payload) { + AWSLambda client = getLambdaClient(); + InvokeRequest req = + new InvokeRequest() + .withFunctionName(functionName) + .withLogType(LogType.Tail) + .withPayload(payload); + + String qualifierRegex = "|[a-zA-Z0-9$_-]+"; + if (description.getQualifier().matches(qualifierRegex)) { + req.setQualifier(description.getQualifier()); + } + + if (description.getTimeout() != -1) { + // UI & API are in seconds, SDK is in MS. + req.setSdkRequestTimeout(description.getTimeout() * 1000); + } + log.info("Invoking Lmabda function {} and waiting for it to complete", functionName); + InvokeResult result = client.invoke(req); + String ans = byteBuffer2String(result.getPayload(), Charset.forName("UTF-8")); + InvokeLambdaFunctionOutputDescription is = new InvokeLambdaFunctionOutputDescription(); + is.setInvokeResult(result); + is.setResponseString(ans); + updateTaskStatus("Finished Invoking of AWS Lambda Function Operation..."); + return is; + } + + public static String byteBuffer2String(ByteBuffer buf, Charset charset) { + if (buf == null) { + return null; + } + byte[] bytes; + if (buf.hasArray()) { + bytes = buf.array(); + } else { + buf.rewind(); + bytes = new byte[buf.remaining()]; + } + return new String(bytes, charset); + } + + private String getPayloadFromArtifact(Artifact artifact) { + Path directory = createEmptyDirectory(); + File payloadFile = downloadFileToDirectory(artifact, directory); + String payloadString; + + try { + payloadString = FileUtils.readFileToString(payloadFile, "UTF8"); + } catch (IOException e) { + throw new LambdaOperationException("Unable to read Artifact file to string."); + } finally { + try { + FileUtils.cleanDirectory(directory.toFile()); + FileUtils.forceDelete(directory.toFile()); + } catch (Exception e) { + throw new LambdaOperationException("Unable to clean up and delete directory."); + } + } + + return payloadString; + } + + private Path createEmptyDirectory() { + Path path; + try { + path = Files.createTempDirectory("awslambdainvoke-"); + FileUtils.cleanDirectory(path.toFile()); + } catch (IOException ex) { + throw new LambdaOperationException( + "Unable to create empty directory for AWS Lambda Invocation."); + } + return path; + } + + private File downloadFileToDirectory(Artifact artifact, Path directory) { + File targetFile; + try { + InputStream inStream = artifactDownloader.download(artifact); + targetFile = new File(directory + "/ARTIFACT.yaml"); + FileUtils.copyInputStreamToFile(inStream, targetFile); + IOUtils.closeQuietly(inStream); + } catch (IOException e) { + throw new LambdaOperationException("Failed to load payload Artifact."); + } + return targetFile; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/LambdaClientProvider.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/LambdaClientProvider.java new file mode 100644 index 00000000000..ea90450c3c7 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/LambdaClientProvider.java @@ -0,0 +1,86 @@ +/* + * Copyright 2023 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.retry.PredefinedBackoffStrategies; +import com.amazonaws.retry.RetryPolicy; +import com.amazonaws.services.lambda.AWSLambda; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.lambda.deploy.exception.InvalidAccountException; +import com.netflix.spinnaker.config.LambdaServiceConfig; +import org.springframework.beans.factory.annotation.Autowired; + +public class LambdaClientProvider { + @Autowired protected AmazonClientProvider amazonClientProvider; + + @Autowired protected LambdaServiceConfig operationsConfig; + private String region; + private NetflixAmazonCredentials credentials; + + public LambdaClientProvider(String region, NetflixAmazonCredentials credentials) { + this.region = region; + this.credentials = credentials; + } + + // See the AwsSdkClientSupplier class. IN particular the "load" method. This is a MOSTLY cached + // operation + // COULD ALSO potentially have timeouts eventually PER region so API calls to a completely + // different region would allow + // different timeouts. future thing with maybe a more intelligent selector pattern that allows + // chaining of + // timeouts based upon a precedent/business logic (e.g. if configured, and no account overrides + // use region timeouts) + protected AWSLambda getLambdaClient() { + + ClientConfiguration clientConfiguration = new ClientConfiguration(); + clientConfiguration.setSocketTimeout(operationsConfig.getInvokeTimeoutMs()); + // only override if non-negative, and can't just set to the negative default :( + if (operationsConfig.getRetry().getRetries() >= 0) { + clientConfiguration.setRetryPolicy( + RetryPolicy.builder() + .withBackoffStrategy( + new PredefinedBackoffStrategies.SDKDefaultBackoffStrategy( + 100, 500, operationsConfig.getRetry().getTimeout() * 1000)) + .withMaxErrorRetry(operationsConfig.getRetry().getRetries()) + .build()); + // doing it here as well as in the retry policy to be safe. + clientConfiguration.setMaxErrorRetry(operationsConfig.getRetry().getRetries()); + } + + if (!credentials.getLambdaEnabled()) { + throw new InvalidAccountException("AWS Lambda is not enabled for provided account. \n"); + } + // Note this is a CACHED response call. AKA the clientConfig is ONLY set when this is first + // cached/loaded + // and won't be changed if OTHER requests make this. + return amazonClientProvider.getAmazonLambda(credentials, clientConfiguration, region); + } + + protected String getRegion() { + return region; + } + + protected NetflixAmazonCredentials getCredentials() { + return credentials; + } + + public AmazonClientProvider getAmazonClientProvider() { + return amazonClientProvider; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PublishLambdaAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PublishLambdaAtomicOperation.java new file mode 100644 index 00000000000..b021a4ebcb2 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PublishLambdaAtomicOperation.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.*; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PublishLambdaFunctionVersionDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class PublishLambdaAtomicOperation + extends AbstractLambdaAtomicOperation< + PublishLambdaFunctionVersionDescription, PublishVersionResult> + implements AtomicOperation { + + public PublishLambdaAtomicOperation(PublishLambdaFunctionVersionDescription description) { + super(description, "PUBLISH_LAMBDA_FUNCTION_VERSION"); + } + + @Override + public PublishVersionResult operate(List priorOutputs) { + updateTaskStatus("Initializing Atomic Operation AWS Lambda for PublishVersion..."); + return publishFunctionVersion( + description.getFunctionName(), description.getDescription(), description.getRevisionId()); + } + + private PublishVersionResult publishFunctionVersion( + String functionName, String description, String revisionId) { + AWSLambda client = getLambdaClient(); + PublishVersionRequest req = + new PublishVersionRequest() + .withFunctionName(functionName) + .withDescription(description) + .withRevisionId(revisionId); + + PublishVersionResult result = client.publishVersion(req); + updateTaskStatus("Finished Atomic Operation AWS Lambda for PublishVersion..."); + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutLambdaProvisionedConcurrencyAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutLambdaProvisionedConcurrencyAtomicOperation.java new file mode 100644 index 00000000000..2abd069a97a --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutLambdaProvisionedConcurrencyAtomicOperation.java @@ -0,0 +1,58 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.PutProvisionedConcurrencyConfigRequest; +import com.amazonaws.services.lambda.model.PutProvisionedConcurrencyConfigResult; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PutLambdaProvisionedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class PutLambdaProvisionedConcurrencyAtomicOperation + extends AbstractLambdaAtomicOperation< + PutLambdaProvisionedConcurrencyDescription, PutProvisionedConcurrencyConfigResult> + implements AtomicOperation { + + public PutLambdaProvisionedConcurrencyAtomicOperation( + PutLambdaProvisionedConcurrencyDescription description) { + super(description, "PUT_LAMBDA_FUNCTION_PROVISIONED_CONCURRENCY"); + } + + @Override + public PutProvisionedConcurrencyConfigResult operate(List priorOutputs) { + updateTaskStatus("Initializing Atomic Operation AWS Lambda for PutProvisionedConcurrency..."); + return putProvisionedFunctionConcurrency( + description.getFunctionName(), + description.getQualifier(), + description.getProvisionedConcurrentExecutions()); + } + + private PutProvisionedConcurrencyConfigResult putProvisionedFunctionConcurrency( + String functionName, String qualifier, int provisionedConcurrentExecutions) { + AWSLambda client = getLambdaClient(); + PutProvisionedConcurrencyConfigRequest req = + new PutProvisionedConcurrencyConfigRequest() + .withFunctionName(functionName) + .withQualifier(qualifier) + .withProvisionedConcurrentExecutions(provisionedConcurrentExecutions); + + PutProvisionedConcurrencyConfigResult result = client.putProvisionedConcurrencyConfig(req); + updateTaskStatus("Finished Atomic Operation AWS Lambda for PutProvisionedConcurrency..."); + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutLambdaReservedConcurrencyAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutLambdaReservedConcurrencyAtomicOperation.java new file mode 100644 index 00000000000..45971dfa08a --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutLambdaReservedConcurrencyAtomicOperation.java @@ -0,0 +1,55 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.PutFunctionConcurrencyRequest; +import com.amazonaws.services.lambda.model.PutFunctionConcurrencyResult; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PutLambdaReservedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class PutLambdaReservedConcurrencyAtomicOperation + extends AbstractLambdaAtomicOperation< + PutLambdaReservedConcurrencyDescription, PutFunctionConcurrencyResult> + implements AtomicOperation { + + public PutLambdaReservedConcurrencyAtomicOperation( + PutLambdaReservedConcurrencyDescription description) { + super(description, "PUT_LAMBDA_FUNCTION_RESERVED_CONCURRENCY"); + } + + @Override + public PutFunctionConcurrencyResult operate(List priorOutputs) { + updateTaskStatus("Initializing Atomic Operation AWS Lambda for PutReservedConcurrency..."); + return putReservedFunctionConcurrency( + description.getFunctionName(), description.getReservedConcurrentExecutions()); + } + + private PutFunctionConcurrencyResult putReservedFunctionConcurrency( + String functionName, int reservedConcurrentExecutions) { + AWSLambda client = getLambdaClient(); + PutFunctionConcurrencyRequest req = + new PutFunctionConcurrencyRequest() + .withFunctionName(functionName) + .withReservedConcurrentExecutions(reservedConcurrentExecutions); + + PutFunctionConcurrencyResult result = client.putFunctionConcurrency(req); + updateTaskStatus("Finished Atomic Operation AWS Lambda for PutReservedConcurrency..."); + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaCodeAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaCodeAtomicOperation.java new file mode 100644 index 00000000000..1bf46c30699 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaCodeAtomicOperation.java @@ -0,0 +1,62 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.UpdateFunctionCodeRequest; +import com.amazonaws.services.lambda.model.UpdateFunctionCodeResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpdateLambdaFunctionCodeDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; + +public class UpdateLambdaCodeAtomicOperation + extends AbstractLambdaAtomicOperation< + UpdateLambdaFunctionCodeDescription, UpdateFunctionCodeResult> + implements AtomicOperation { + + public UpdateLambdaCodeAtomicOperation(UpdateLambdaFunctionCodeDescription description) { + super(description, "UPDATE_LAMBDA_FUNCTION_CODE"); + } + + @Override + public UpdateFunctionCodeResult operate(List priorOutputs) { + updateTaskStatus("Initializing Updating of AWS Lambda Function Code Operation..."); + return updateFunctionConfigurationResult(); + } + + private UpdateFunctionCodeResult updateFunctionConfigurationResult() { + LambdaFunction lambdaFunction = + (LambdaFunction) + lambdaFunctionProvider.getFunction( + description.getAccount(), description.getRegion(), description.getFunctionName()); + + AWSLambda client = getLambdaClient(); + + UpdateFunctionCodeRequest request = + new UpdateFunctionCodeRequest() + .withFunctionName(lambdaFunction.getFunctionArn()) + .withPublish(description.getPublish()) + .withS3Bucket(description.getS3bucket()) + .withS3Key(description.getS3key()); + + UpdateFunctionCodeResult result = client.updateFunctionCode(request); + updateTaskStatus("Finished Updating of AWS Lambda Function Code Operation..."); + + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaConfigurationAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaConfigurationAtomicOperation.java new file mode 100644 index 00000000000..6fb237eb49c --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaConfigurationAtomicOperation.java @@ -0,0 +1,188 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.elasticloadbalancingv2.AmazonElasticLoadBalancing; +import com.amazonaws.services.elasticloadbalancingv2.model.*; +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.*; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.CreateLambdaFunctionConfigurationDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.springframework.util.StringUtils; + +public class UpdateLambdaConfigurationAtomicOperation + extends AbstractLambdaAtomicOperation< + CreateLambdaFunctionConfigurationDescription, UpdateFunctionConfigurationResult> + implements AtomicOperation { + + public UpdateLambdaConfigurationAtomicOperation( + CreateLambdaFunctionConfigurationDescription description) { + super(description, "UPDATE_LAMBDA_FUNCTION_CONFIGURATION"); + } + + @Override + public UpdateFunctionConfigurationResult operate(List priorOutputs) { + updateTaskStatus("Initializing Updating of AWS Lambda Function Configuration Operation..."); + return updateFunctionConfigurationResult(); + } + + private UpdateFunctionConfigurationResult updateFunctionConfigurationResult() { + LambdaFunction cache = + (LambdaFunction) + lambdaFunctionProvider.getFunction( + description.getAccount(), description.getRegion(), description.getFunctionName()); + + AWSLambda client = getLambdaClient(); + + UpdateFunctionConfigurationRequest request = + new UpdateFunctionConfigurationRequest() + .withFunctionName(cache.getFunctionArn()) + .withDescription(description.getDescription()) + .withHandler(description.getHandler()) + .withMemorySize(description.getMemorySize()) + .withRole(description.getRole()) + .withTimeout(description.getTimeout()) + .withDeadLetterConfig(description.getDeadLetterConfig()) + .withLayers(description.getLayers()) + .withVpcConfig( + new VpcConfig() + .withSecurityGroupIds(description.getSecurityGroupIds()) + .withSubnetIds(description.getSubnetIds())) + .withKMSKeyArn(description.getKmskeyArn()) + .withTracingConfig(description.getTracingConfig()) + .withRuntime(description.getRuntime()); + + if (null != description.getEnvVariables()) { + request.setEnvironment(new Environment().withVariables(description.getEnvVariables())); + } + + UpdateFunctionConfigurationResult result = client.updateFunctionConfiguration(request); + TagResourceRequest tagResourceRequest = new TagResourceRequest(); + Map objTag = new HashMap<>(); + if (null != description.getTags()) { + + for (Map.Entry entry : description.getTags().entrySet()) { + objTag.put(entry.getKey(), entry.getValue()); + } + } + if (!objTag.isEmpty()) { + + UntagResourceRequest untagResourceRequest = + new UntagResourceRequest().withResource(result.getFunctionArn()); + ListTagsResult existingTags = + client.listTags(new ListTagsRequest().withResource(result.getFunctionArn())); + for (Map.Entry entry : existingTags.getTags().entrySet()) { + untagResourceRequest.getTagKeys().add(entry.getKey()); + } + if (!untagResourceRequest.getTagKeys().isEmpty()) { + client.untagResource(untagResourceRequest); + } + for (Map.Entry entry : objTag.entrySet()) { + tagResourceRequest.addTagsEntry(entry.getKey(), entry.getValue()); + } + tagResourceRequest.setResource(result.getFunctionArn()); + client.tagResource(tagResourceRequest); + } + updateTaskStatus("Finished Updating of AWS Lambda Function Configuration Operation..."); + if (StringUtils.isEmpty(description.getTargetGroups())) { + if (cache.getTargetGroups() != null && !cache.getTargetGroups().isEmpty()) { + AmazonElasticLoadBalancing loadBalancingV2 = getAmazonElasticLoadBalancingClient(); + for (String groupName : cache.getTargetGroups()) { + deregisterTarget( + loadBalancingV2, + cache.getFunctionArn(), + retrieveTargetGroup(loadBalancingV2, groupName).getTargetGroupArn()); + updateTaskStatus("De-registered the target group..."); + } + } + + } else { + AmazonElasticLoadBalancing loadBalancingV2 = getAmazonElasticLoadBalancingClient(); + if (cache.getTargetGroups().isEmpty()) { + registerTarget( + loadBalancingV2, + cache.getFunctionArn(), + retrieveTargetGroup(loadBalancingV2, description.getTargetGroups()) + .getTargetGroupArn()); + updateTaskStatus("Registered the target group..."); + } else { + for (String groupName : cache.getTargetGroups()) { + if (!groupName.equals(description.getTargetGroups())) { + registerTarget( + loadBalancingV2, + cache.getFunctionArn(), + retrieveTargetGroup(loadBalancingV2, description.getTargetGroups()) + .getTargetGroupArn()); + updateTaskStatus("Registered the target group..."); + } + } + } + } + return result; + } + + private TargetGroup retrieveTargetGroup( + AmazonElasticLoadBalancing loadBalancingV2, String targetGroupName) { + + DescribeTargetGroupsRequest request = + new DescribeTargetGroupsRequest().withNames(targetGroupName); + DescribeTargetGroupsResult describeTargetGroupsResult = + loadBalancingV2.describeTargetGroups(request); + + if (describeTargetGroupsResult.getTargetGroups().size() == 1) { + return describeTargetGroupsResult.getTargetGroups().get(0); + } else if (describeTargetGroupsResult.getTargetGroups().size() > 1) { + throw new IllegalArgumentException( + "There are multiple target groups with the name " + targetGroupName + "."); + } else { + throw new IllegalArgumentException( + "There is no target group with the name " + targetGroupName + "."); + } + } + + private AmazonElasticLoadBalancing getAmazonElasticLoadBalancingClient() { + AWSCredentialsProvider credentialsProvider = getCredentials().getCredentialsProvider(); + NetflixAmazonCredentials credentialAccount = description.getCredentials(); + + return getAmazonClientProvider() + .getAmazonElasticLoadBalancingV2(credentialAccount, getRegion(), false); + } + + private void registerTarget( + AmazonElasticLoadBalancing loadBalancingV2, String functionArn, String targetGroupArn) { + RegisterTargetsResult result = + loadBalancingV2.registerTargets( + new RegisterTargetsRequest() + .withTargets(new TargetDescription().withId(functionArn)) + .withTargetGroupArn(targetGroupArn)); + } + + private void deregisterTarget( + AmazonElasticLoadBalancing loadBalancingV2, String functionArn, String targetGroupArn) { + DeregisterTargetsResult result = + loadBalancingV2.deregisterTargets( + new DeregisterTargetsRequest() + .withTargetGroupArn(targetGroupArn) + .withTargets(new TargetDescription().withId(functionArn))); + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpsertLambdaAliasAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpsertLambdaAliasAtomicOperation.java new file mode 100644 index 00000000000..ead0255aeae --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpsertLambdaAliasAtomicOperation.java @@ -0,0 +1,115 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.AliasConfiguration; +import com.amazonaws.services.lambda.model.AliasRoutingConfiguration; +import com.amazonaws.services.lambda.model.CreateAliasRequest; +import com.amazonaws.services.lambda.model.CreateAliasResult; +import com.amazonaws.services.lambda.model.UpdateAliasRequest; +import com.amazonaws.services.lambda.model.UpdateAliasResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpsertLambdaFunctionAliasDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.*; +import org.apache.commons.lang3.StringUtils; + +public class UpsertLambdaAliasAtomicOperation + extends AbstractLambdaAtomicOperation + implements AtomicOperation { + + public UpsertLambdaAliasAtomicOperation(UpsertLambdaFunctionAliasDescription description) { + super(description, "UPSERT_LAMBDA_FUNCTION_ALIAS"); + } + + @Override + public Object operate(List priorOutputs) { + + String functionName = description.getFunctionName(); + String region = description.getRegion(); + String account = description.getAccount(); + LambdaFunction cache = + (LambdaFunction) lambdaFunctionProvider.getFunction(account, region, functionName); + List aliasConfigurations = cache.getAliasConfigurations(); + boolean aliasExists = false; + + for (AliasConfiguration aliasConfiguration : aliasConfigurations) { + if (aliasConfiguration.getName().equalsIgnoreCase(description.getAliasName())) { + aliasExists = true; + } + } + + return aliasExists ? updateAliasResult(cache) : createAliasResult(cache); + } + + private UpdateAliasResult updateAliasResult(LambdaFunction cache) { + updateTaskStatus("Initializing Updating of AWS Lambda Function Alias Operation..."); + + Map routingConfig = new LinkedHashMap<>(); + String minorFunctionVersion = description.getMinorFunctionVersion(); + Double weightToMinorFunctionVersion = description.getWeightToMinorFunctionVersion(); + + if (StringUtils.isNotEmpty(minorFunctionVersion) && weightToMinorFunctionVersion != null) { + routingConfig.put( + description.getMinorFunctionVersion(), description.getWeightToMinorFunctionVersion()); + } + + AWSLambda client = getLambdaClient(); + UpdateAliasRequest request = + new UpdateAliasRequest() + .withFunctionName(cache.getFunctionArn()) + .withDescription(description.getAliasDescription()) + .withFunctionVersion(description.getMajorFunctionVersion()) + .withName(description.getAliasName()) + .withRoutingConfig( + new AliasRoutingConfiguration().withAdditionalVersionWeights(routingConfig)); + + UpdateAliasResult result = client.updateAlias(request); + updateTaskStatus("Finished Updating of AWS Lambda Function Alias Operation..."); + + return result; + } + + private CreateAliasResult createAliasResult(LambdaFunction cache) { + updateTaskStatus("Initializing Creation of AWS Lambda Function Alias Operation..."); + + Map routingConfig = new LinkedHashMap<>(); + String minorFunctionVersion = description.getMinorFunctionVersion(); + Double weightToMinorFunctionVersion = description.getWeightToMinorFunctionVersion(); + + if (StringUtils.isNotEmpty(minorFunctionVersion) && weightToMinorFunctionVersion != null) { + routingConfig.put( + description.getMinorFunctionVersion(), description.getWeightToMinorFunctionVersion()); + } + + AWSLambda client = getLambdaClient(); + CreateAliasRequest request = + new CreateAliasRequest() + .withFunctionName(cache.getFunctionArn()) + .withDescription(description.getAliasDescription()) + .withFunctionVersion(description.getMajorFunctionVersion()) + .withName(description.getAliasName()) + .withRoutingConfig( + new AliasRoutingConfiguration().withAdditionalVersionWeights(routingConfig)); + + CreateAliasResult result = client.createAlias(request); + updateTaskStatus("Finished Creation of AWS Lambda Function Alias Operation..."); + + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpsertLambdaEventSourceAtomicOperation.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpsertLambdaEventSourceAtomicOperation.java new file mode 100644 index 00000000000..716f103c63a --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpsertLambdaEventSourceAtomicOperation.java @@ -0,0 +1,120 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.*; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpsertLambdaFunctionEventMappingDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import java.util.List; +import org.pf4j.util.StringUtils; + +public class UpsertLambdaEventSourceAtomicOperation + extends AbstractLambdaAtomicOperation + implements AtomicOperation { + + public UpsertLambdaEventSourceAtomicOperation( + UpsertLambdaFunctionEventMappingDescription description) { + super(description, "UPSERT_LAMBDA_FUNCTION_EVENT_MAPPING"); + } + + @Override + public Object operate(List priorOutputs) { + String functionName = description.getFunctionName(); + String region = description.getRegion(); + String account = description.getAccount(); + + LambdaFunction cache = + (LambdaFunction) lambdaFunctionProvider.getFunction(account, region, functionName); + + List eventSourceMappingConfigurations = + cache.getEventSourceMappings(); + for (EventSourceMappingConfiguration eventSourceMappingConfiguration : + eventSourceMappingConfigurations) { + if (eventSourceMappingConfiguration + .getEventSourceArn() + .equalsIgnoreCase(description.getEventSourceArn())) { + description.setProperty("uuid", eventSourceMappingConfiguration.getUUID()); + return updateEventSourceMappingResult(cache); + } + } + + return createEventSourceMapping(cache); + } + + private UpdateEventSourceMappingResult updateEventSourceMappingResult(LambdaFunction cache) { + updateTaskStatus("Initializing Updating of AWS Lambda Function Event Mapping Operation..."); + + AWSLambda client = getLambdaClient(); + UpdateEventSourceMappingRequest request = + new UpdateEventSourceMappingRequest() + .withFunctionName(cache.getFunctionArn()) + .withBatchSize(description.getBatchsize()) + .withBisectBatchOnFunctionError(description.getBisectBatchOnError()) + .withMaximumBatchingWindowInSeconds(description.getMaxBatchingWindowSecs()) + .withMaximumRecordAgeInSeconds(description.getMaxRecordAgeSecs()) + .withMaximumRetryAttempts(description.getMaxRetryAttempts()) + .withParallelizationFactor(description.getParallelizationFactor()) + .withTumblingWindowInSeconds(description.getTumblingWindowSecs()) + .withDestinationConfig(description.getDestinationConfig()) + .withEnabled(description.getEnabled()) + .withUUID(description.getUuid()); + + if (StringUtils.isNotNullOrEmpty(description.getQualifier())) { + String fullArnWithQualifier = + String.format("%s:%s", cache.getFunctionArn(), description.getQualifier()); + request.setFunctionName(fullArnWithQualifier); + } + + UpdateEventSourceMappingResult result = client.updateEventSourceMapping(request); + updateTaskStatus("Finished Updating of AWS Lambda Function Event Mapping Operation..."); + + return result; + } + + private CreateEventSourceMappingResult createEventSourceMapping(LambdaFunction cache) { + updateTaskStatus("Initializing Creation of AWS Lambda Function Event Source Mapping..."); + + AWSLambda client = getLambdaClient(); + CreateEventSourceMappingRequest request = + new CreateEventSourceMappingRequest() + .withFunctionName(cache.getFunctionArn()) + .withBatchSize(description.getBatchsize()) + .withBisectBatchOnFunctionError(description.getBisectBatchOnError()) + .withMaximumBatchingWindowInSeconds(description.getMaxBatchingWindowSecs()) + .withMaximumRecordAgeInSeconds(description.getMaxRecordAgeSecs()) + .withMaximumRetryAttempts(description.getMaxRetryAttempts()) + .withParallelizationFactor(description.getParallelizationFactor()) + .withTumblingWindowInSeconds(description.getTumblingWindowSecs()) + .withDestinationConfig(description.getDestinationConfig()) + .withEnabled(description.getEnabled()) + .withStartingPosition(description.getStartingPosition()) + .withEventSourceArn(description.getEventSourceArn()); + + if (StringUtils.isNotNullOrEmpty(description.getQualifier())) { + String fullArnWithQualifier = + String.format("%s:%s", cache.getFunctionArn(), description.getQualifier()); + request.setFunctionName(fullArnWithQualifier); + } + + CreateEventSourceMappingResult result = client.createEventSourceMapping(request); + updateTaskStatus("Finished Creation of AWS Lambda Function Event Mapping Operation..."); + + return result; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/IamRoleCachingAgent.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/IamRoleCachingAgent.java new file mode 100644 index 00000000000..9cd8f293f7c --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/IamRoleCachingAgent.java @@ -0,0 +1,250 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.clouddriver.lambda.cache.Keys.Namespace.IAM_ROLE; + +import com.amazonaws.regions.Regions; +import com.amazonaws.services.identitymanagement.AmazonIdentityManagement; +import com.amazonaws.services.identitymanagement.model.ListRolesRequest; +import com.amazonaws.services.identitymanagement.model.ListRolesResult; +import com.amazonaws.services.identitymanagement.model.Role; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; +import com.netflix.spinnaker.clouddriver.lambda.cache.Keys; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.IamRole; +import java.io.IOException; +import java.net.URLDecoder; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class IamRoleCachingAgent implements CachingAgent, CustomScheduledAgent { + private static final long POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(30); + private static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(5); + + private final Logger log = LoggerFactory.getLogger(getClass()); + private final Collection types = + Collections.singletonList(AUTHORITATIVE.forType(IAM_ROLE.toString())); + + private final ObjectMapper objectMapper; + + private AmazonClientProvider amazonClientProvider; + private NetflixAmazonCredentials account; + private String accountName; + + IamRoleCachingAgent( + ObjectMapper objectMapper, + NetflixAmazonCredentials account, + AmazonClientProvider amazonClientProvider) { + this.objectMapper = objectMapper; + + this.account = account; + this.accountName = account.getName(); + this.amazonClientProvider = amazonClientProvider; + } + + @Override + public String getAgentType() { + return accountName + "/" + getClass().getSimpleName(); + } + + @Override + public String getProviderName() { + return AwsProvider.PROVIDER_NAME; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public long getPollIntervalMillis() { + return POLL_INTERVAL_MILLIS; + } + + @Override + public long getTimeoutMillis() { + return DEFAULT_TIMEOUT_MILLIS; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + AmazonIdentityManagement iam = + amazonClientProvider.getIam(account, Regions.DEFAULT_REGION.getName(), false); + + Set cacheableRoles = fetchIamRoles(iam, accountName); + Map> newDataMap = generateFreshData(cacheableRoles); + Collection newData = newDataMap.get(IAM_ROLE.toString()); + + Set oldKeys = + providerCache.getAll(IAM_ROLE.toString()).stream() + .map(CacheData::getId) + .filter(this::keyAccountFilter) + .collect(Collectors.toSet()); + Map> evictionsByKey = computeEvictableData(newData, oldKeys); + + logUpcomingActions(newDataMap, evictionsByKey); + + return new DefaultCacheResult(newDataMap, evictionsByKey); + } + + private void logUpcomingActions( + Map> newDataMap, + Map> evictionsByKey) { + log.info( + String.format( + "Caching %s IAM roles in %s for account %s", + newDataMap.get(IAM_ROLE.toString()).size(), getAgentType(), accountName)); + + if (evictionsByKey.get(IAM_ROLE.toString()).size() > 0) { + log.info( + String.format( + "Evicting %s IAM roles in %s for account %s", + evictionsByKey.get(IAM_ROLE.toString()).size(), getAgentType(), accountName)); + } + } + + private Map> computeEvictableData( + Collection newData, Collection oldKeys) { + + Set newKeys = newData.stream().map(CacheData::getId).collect(Collectors.toSet()); + + Set evictedKeys = new HashSet<>(); + for (String oldKey : oldKeys) { + if (!newKeys.contains(oldKey)) { + evictedKeys.add(oldKey); + } + } + Map> evictionsByKey = new HashMap<>(); + evictionsByKey.put(IAM_ROLE.toString(), evictedKeys); + return evictionsByKey; + } + + private Map> generateFreshData(Set cacheableRoles) { + Collection dataPoints = new HashSet<>(); + Map> newDataMap = new HashMap<>(); + + for (IamRole iamRole : cacheableRoles) { + String key = Keys.getIamRoleKey(accountName, iamRole.getName()); + Map attributes = convertIamRoleToAttributes(iamRole); + + CacheData data = new DefaultCacheData(key, attributes, Collections.emptyMap()); + dataPoints.add(data); + } + + newDataMap.put(IAM_ROLE.toString(), dataPoints); + return newDataMap; + } + + private Set fetchIamRoles(AmazonIdentityManagement iam, String accountName) { + Set cacheableRoles = new HashSet<>(); + String marker = null; + do { + ListRolesRequest request = new ListRolesRequest(); + if (marker != null) { + request.setMarker(marker); + } + + ListRolesResult listRolesResult = iam.listRoles(request); + List roles = listRolesResult.getRoles(); + for (Role role : roles) { + cacheableRoles.add( + new IamRole( + role.getArn(), + role.getRoleName(), + accountName, + getTrustedEntities(role.getAssumeRolePolicyDocument()))); + } + + if (listRolesResult.isTruncated()) { + marker = listRolesResult.getMarker(); + } else { + marker = null; + } + + } while (marker != null && marker.length() != 0); + + return cacheableRoles; + } + + private boolean keyAccountFilter(String key) { + Map keyParts = Keys.parse(key); + return keyParts != null && keyParts.get("account").equals(accountName); + } + + private Set getTrustedEntities(String urlEncodedPolicyDocument) { + Set trustedEntities = new HashSet<>(); + + String decodedPolicyDocument = URLDecoder.decode(urlEncodedPolicyDocument); + + Map policyDocument; + try { + policyDocument = objectMapper.readValue(decodedPolicyDocument, Map.class); + List> statementItems = + (List>) policyDocument.get("Statement"); + for (Map statementItem : statementItems) { + if ("sts:AssumeRole".equals(statementItem.get("Action"))) { + Map principal = (Map) statementItem.get("Principal"); + + for (Map.Entry principalEntry : principal.entrySet()) { + if (principalEntry.getValue() instanceof List) { + ((List) principalEntry.getValue()) + .stream() + .forEach( + o -> + trustedEntities.add( + new IamTrustRelationship(principalEntry.getKey(), o.toString()))); + } else { + trustedEntities.add( + new IamTrustRelationship( + principalEntry.getKey(), principalEntry.getValue().toString())); + } + } + } + } + } catch (IOException e) { + log.error( + "Unable to extract trusted entities (policyDocument: {})", urlEncodedPolicyDocument, e); + } + + return trustedEntities; + } + + private static Map convertIamRoleToAttributes(IamRole iamRole) { + Map attributes = new HashMap<>(); + attributes.put("name", iamRole.getName()); + attributes.put("accountName", iamRole.getAccountName()); + attributes.put("arn", iamRole.getId()); + attributes.put("trustRelationships", iamRole.getTrustRelationships()); + return attributes; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/IamTrustRelationship.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/IamTrustRelationship.java new file mode 100644 index 00000000000..8b433c08aa3 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/IamTrustRelationship.java @@ -0,0 +1,30 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.provider.agent; + +import com.netflix.spinnaker.clouddriver.aws.model.TrustRelationship; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class IamTrustRelationship implements TrustRelationship { + String type; + String value; +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaAgentProvider.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaAgentProvider.java new file mode 100644 index 00000000000..ef8330774f8 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaAgentProvider.java @@ -0,0 +1,80 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.awsobjectmapper.AmazonObjectMapperConfigurer; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration; +import com.netflix.spinnaker.config.LambdaServiceConfig; +import com.netflix.spinnaker.credentials.Credentials; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class LambdaAgentProvider implements AgentProvider { + private final ObjectMapper objectMapper; + private final AmazonClientProvider amazonClientProvider; + private final LambdaServiceConfig lambdaServiceConfig; + private final ServiceLimitConfiguration serviceLimitConfiguration; + + @Autowired + public LambdaAgentProvider( + AmazonClientProvider amazonClientProvider, + LambdaServiceConfig lambdaServiceConfig, + ServiceLimitConfiguration serviceLimitConfiguration) { + this.objectMapper = AmazonObjectMapperConfigurer.createConfigured(); + this.amazonClientProvider = amazonClientProvider; + this.lambdaServiceConfig = lambdaServiceConfig; + this.serviceLimitConfiguration = serviceLimitConfiguration; + } + + @Override + public boolean supports(String providerName) { + return providerName.equalsIgnoreCase(AwsProvider.PROVIDER_NAME); + } + + @Override + public Collection agents(Credentials credentials) { + List agents = new ArrayList<>(); + NetflixAmazonCredentials netflixAmazonCredentials = (NetflixAmazonCredentials) credentials; + if (netflixAmazonCredentials.getLambdaEnabled()) { + agents.add( + new IamRoleCachingAgent(objectMapper, netflixAmazonCredentials, amazonClientProvider)); + + for (AmazonCredentials.AWSRegion region : netflixAmazonCredentials.getRegions()) { + agents.add( + new LambdaCachingAgent( + objectMapper, + amazonClientProvider, + netflixAmazonCredentials, + region.getName(), + lambdaServiceConfig, + serviceLimitConfiguration)); + } + } + return agents; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaCachingAgent.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaCachingAgent.java new file mode 100644 index 00000000000..e532906310d --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaCachingAgent.java @@ -0,0 +1,444 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.provider.agent; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND; +import static com.netflix.spinnaker.clouddriver.lambda.cache.Keys.Namespace.LAMBDA_FUNCTIONS; +import static java.util.stream.Collectors.toSet; + +import com.amazonaws.services.lambda.model.*; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.CaseFormat; +import com.netflix.frigga.Names; +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider; +import com.netflix.spinnaker.clouddriver.aws.provider.AwsProvider; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration; +import com.netflix.spinnaker.clouddriver.lambda.cache.Keys; +import com.netflix.spinnaker.clouddriver.lambda.service.LambdaService; +import com.netflix.spinnaker.config.LambdaServiceConfig; +import com.netflix.spinnaker.kork.exceptions.SpinnakerException; +import java.time.Clock; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class LambdaCachingAgent implements CachingAgent, AccountAware, OnDemandAgent { + private static final Set types = + new HashSet<>() { + { + add(AUTHORITATIVE.forType(LAMBDA_FUNCTIONS.ns)); + add(INFORMATIVE.forType(APPLICATIONS.ns)); + } + }; + + private final NetflixAmazonCredentials account; + private final String region; + private OnDemandMetricsSupport metricsSupport; + private final Registry registry; + private final Clock clock = Clock.systemDefaultZone(); + private LambdaService lambdaService; + + LambdaCachingAgent( + ObjectMapper objectMapper, + AmazonClientProvider amazonClientProvider, + NetflixAmazonCredentials account, + String region, + LambdaServiceConfig lambdaServiceConfig, + ServiceLimitConfiguration serviceLimitConfiguration) { + this.account = account; + this.region = region; + this.registry = new DefaultRegistry(); + this.metricsSupport = + new OnDemandMetricsSupport( + registry, + this, + AmazonCloudProvider.ID + ":" + AmazonCloudProvider.ID + ":" + OnDemandType.Function); + this.lambdaService = + new LambdaService(amazonClientProvider, account, region, objectMapper, lambdaServiceConfig); + } + + @Override + public String getProviderName() { + return AwsProvider.PROVIDER_NAME; + } + + @Override + public String getAgentType() { + return account.getName() + "/" + region + "/" + LambdaCachingAgent.class.getSimpleName(); + } + + @Override + public String getAccountName() { + return account.getName(); + } + + public String getRegion() { + return region; + } + + @Override + public Collection getProvidedDataTypes() { + return types; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + long loadDataStart = clock.instant().toEpochMilli(); + log.info("Describing items in {}", getAgentType()); + + Map lambdaCacheData = new ConcurrentHashMap<>(); + Map> appLambdaRelationships = new ConcurrentHashMap<>(); + + // Get All Lambda's + List> allLambdas; + try { + allLambdas = lambdaService.getAllFunctions(); + } catch (Exception e) { + throw new SpinnakerException( + "Failed to populate the lambda cache for account '" + + account.getName() + + "' and region '" + + region + + "' because: " + + e.getMessage()); + } + + buildCacheData(lambdaCacheData, appLambdaRelationships, allLambdas); + + Collection processedOnDemandCache = new ArrayList<>(); + + // Process on demand cache + Collection onDemandCacheData = + providerCache + .getAll( + ON_DEMAND.getNs(), + providerCache.filterIdentifiers( + ON_DEMAND.getNs(), + Keys.getLambdaFunctionKey(getAccountName(), getRegion(), "*"))) + .stream() + .filter(d -> (int) d.getAttributes().get("processedCount") == 0) + .collect(Collectors.toList()); + + for (CacheData onDemandItem : onDemandCacheData) { + try { + long cachedAt = (long) onDemandItem.getAttributes().get("cacheTime"); + if (cachedAt > loadDataStart) { + CacheData currentLambda = lambdaCacheData.get(onDemandItem.getId()); + if (currentLambda != null) { + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + LocalDateTime onDemandLastModified = + LocalDateTime.parse( + (String) onDemandItem.getAttributes().get("lastModified"), formatter); + LocalDateTime currentLambdaLastModified = + LocalDateTime.parse( + (String) currentLambda.getAttributes().get("lastModified"), formatter); + if (onDemandLastModified.isAfter(currentLambdaLastModified)) { + lambdaCacheData.put(onDemandItem.getId(), onDemandItem); + String appKey = + onDemandItem.getRelationships().get(APPLICATIONS.ns).stream().findFirst().get(); + Collection functionkeys = + appLambdaRelationships.getOrDefault(appKey, new ArrayList<>()); + functionkeys.add(onDemandItem.getId()); + appLambdaRelationships.put(appKey, functionkeys); + } + } else { + lambdaCacheData.put(onDemandItem.getId(), onDemandItem); + String appKey = + onDemandItem.getRelationships().get(APPLICATIONS.ns).stream().findFirst().get(); + Collection functionkeys = + appLambdaRelationships.getOrDefault(appKey, new ArrayList<>()); + functionkeys.add(onDemandItem.getId()); + appLambdaRelationships.put(appKey, functionkeys); + } + } + Map attr = onDemandItem.getAttributes(); + attr.put("processedCount", 1); + processedOnDemandCache.add( + new DefaultCacheData(onDemandItem.getId(), attr, Collections.emptyMap())); + } catch (Exception e) { + log.warn("Failed to process onDemandCache for Lambda's: " + e.getMessage()); + } + } + + // Create the INFORMATIVE spinnaker application cache with lambda relationships + Collection appCacheData = new LinkedList<>(); + for (String appKey : appLambdaRelationships.keySet()) { + appCacheData.add( + new DefaultCacheData( + appKey, + Collections.emptyMap(), + Collections.singletonMap(LAMBDA_FUNCTIONS.ns, appLambdaRelationships.get(appKey)))); + } + + Map> cacheResults = new HashMap<>(); + + cacheResults.put(LAMBDA_FUNCTIONS.ns, lambdaCacheData.values()); + cacheResults.put(APPLICATIONS.ns, appCacheData); + cacheResults.put(ON_DEMAND.ns, processedOnDemandCache); + + Map> evictions = + computeEvictableData(lambdaCacheData.values(), providerCache); + + log.info("Caching {} items in {}", String.valueOf(lambdaCacheData.size()), getAgentType()); + return new DefaultCacheResult(cacheResults, evictions); + } + + void buildCacheData( + Map lambdaCacheData, + Map> appLambdaRelationships, + List> allLambdas) { + allLambdas.stream() + .forEach( + lf -> { + String functionName = (String) lf.get("functionName"); + String functionKey = + Keys.getLambdaFunctionKey(getAccountName(), getRegion(), functionName); + + /* TODO: If the functionName follows frigga by chance (i.e. somename-someothername), it will try to store the + lambda as a relationship with the app name (somename), even if it wasn't deployed by spinnaker! + */ + // Add the spinnaker application relationship and store it + Names names = Names.parseName(functionName); + if (names.getApp() != null) { + String appKey = + com.netflix.spinnaker.clouddriver.aws.data.Keys.getApplicationKey( + names.getApp()); + appLambdaRelationships.compute( + appKey, + (k, v) -> { + Collection fKeys = v; + if (fKeys == null) fKeys = new ArrayList<>(); + fKeys.add(functionKey); + return fKeys; + }); + // No other thread should be putting the same function in this map. Its safe to use + // put + lambdaCacheData.put( + functionKey, + new DefaultCacheData( + functionKey, + lf, + Collections.singletonMap( + APPLICATIONS.ns, Collections.singletonList(appKey)))); + } else { + // TODO: Do we care about non spinnaker deployed lambdas? + lambdaCacheData.put( + functionKey, new DefaultCacheData(functionKey, lf, Collections.emptyMap())); + } + }); + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return type.equals(OnDemandType.Function) && cloudProvider.equals(AmazonCloudProvider.ID); + } + + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + if (!validKeys(data) + || !data.get("account").equals(getAccountName()) + || !data.get("region").equals(region)) { + return null; + } + + String appName = (String) data.get("appName"); + String functionName = combineAppDetail(appName, (String) data.get("functionName")); + + String functionKey = + Keys.getLambdaFunctionKey( + (String) data.get("credentials"), (String) data.get("region"), functionName); + + String appKey = com.netflix.spinnaker.clouddriver.aws.data.Keys.getApplicationKey(appName); + + Map lambdaAttributes = null; + try { + lambdaAttributes = lambdaService.getFunctionByName(functionName); + } catch (Exception e) { + if (e instanceof ResourceNotFoundException) { + // do nothing, the lambda was deleted + } else { + throw new SpinnakerException( + "Failed to populate the onDemandCache for lambda '" + functionName + "'"); + } + } + + DefaultCacheResult defaultCacheResult; + Map> evictions; + + if (lambdaAttributes != null && !lambdaAttributes.isEmpty()) { + lambdaAttributes.put("cacheTime", clock.instant().toEpochMilli()); + lambdaAttributes.put("processedCount", 0); + DefaultCacheData lambdaCacheData = + new DefaultCacheData( + functionKey, + lambdaAttributes, + Collections.singletonMap(APPLICATIONS.ns, Collections.singletonList(appKey))); + + defaultCacheResult = + new DefaultCacheResult( + Collections.singletonMap(ON_DEMAND.ns, Collections.singletonList(lambdaCacheData))); + + evictions = Collections.emptyMap(); + } else { + defaultCacheResult = + new DefaultCacheResult( + Collections.singletonMap(LAMBDA_FUNCTIONS.ns, Collections.emptyList())); + + evictions = + Collections.singletonMap( + LAMBDA_FUNCTIONS.ns, + providerCache.filterIdentifiers(LAMBDA_FUNCTIONS.ns, functionKey)); + } + + return new OnDemandAgent.OnDemandResult(getAgentType(), defaultCacheResult, evictions); + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + Collection keys = + providerCache.filterIdentifiers( + ON_DEMAND.getNs(), Keys.getLambdaFunctionKey(account.getName(), getRegion(), "*")); + return providerCache.getAll(ON_DEMAND.getNs(), keys, RelationshipCacheFilter.none()).stream() + .map( + it -> { + String lambdaId = it.getId(); + Map details = Keys.parse(lambdaId); + Map attributes = it.getAttributes(); + Map resp = new HashMap<>(); + resp.put("id", lambdaId); + resp.put("details", details); + resp.put("attributes", it.getAttributes()); + resp.put("cacheTime", attributes.get("cacheTime")); + resp.put("processedCount", attributes.get("processedCount")); + resp.put("processedTime", attributes.getOrDefault("processedTime", null)); + return resp; + }) + .collect(toSet()); + } + + @Override + public String getOnDemandAgentType() { + return getAgentType() + "-OnDemand"; + } + + @Override + public OnDemandMetricsSupport getMetricsSupport() { + return metricsSupport; + } + + private Boolean validKeys(Map data) { + return (data.containsKey("functionName") + && data.containsKey("credentials") + && data.containsKey("region")); + } + + protected String combineAppDetail(String appName, String functionName) { + Names functionAppName = Names.parseName(functionName); + if (null != functionAppName) { + return functionAppName.getApp().equals(appName) + ? functionName + : (appName + "-" + functionName); + } else { + throw new IllegalArgumentException( + String.format("Function name {%s} contains invlaid charachetrs ", functionName)); + } + } + + /** + * Provides the key namespace that the caching agent is authoritative of. Currently only supports + * the caching agent being authoritative over one key namespace. Taken from + * AbstractEcsCachingAgent + * + * @return Key namespace. + */ + String getAuthoritativeKeyName() { + Collection authoritativeNamespaces = + getProvidedDataTypes().stream() + .filter(agentDataType -> agentDataType.getAuthority().equals(AUTHORITATIVE)) + .collect(Collectors.toSet()); + + if (authoritativeNamespaces.size() != 1) { + throw new RuntimeException( + "LambdaCachingAgent supports only one authoritative key namespace. " + + authoritativeNamespaces.size() + + " authoritative key namespace were given."); + } + + return authoritativeNamespaces.iterator().next().getTypeName(); + } + + Map> computeEvictableData( + Collection newData, ProviderCache providerCache) { + + // Get all old keys from the cache for the region and account + String authoritativeKeyName = getAuthoritativeKeyName(); + Set oldKeys = + providerCache.getIdentifiers(authoritativeKeyName).stream() + .filter( + key -> { + Map keyParts = Keys.parse(key); + return keyParts.get("account").equalsIgnoreCase(account.getName()) + && keyParts.get("region").equalsIgnoreCase(region); + }) + .collect(Collectors.toSet()); + + // New data can only come from the current account and region, no need to filter. + Set newKeys = newData.stream().map(CacheData::getId).collect(Collectors.toSet()); + + Set evictedKeys = + oldKeys.stream().filter(oldKey -> !newKeys.contains(oldKey)).collect(Collectors.toSet()); + + Map> evictionsByKey = new HashMap<>(); + evictionsByKey.put(getAuthoritativeKeyName(), evictedKeys); + String prettyKeyName = + CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, getAuthoritativeKeyName()); + + log.info( + "Evicting " + + evictedKeys.size() + + " " + + prettyKeyName + + (evictedKeys.size() > 1 ? "s" : "") + + " in " + + getAgentType()); + + return evictionsByKey; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/view/LambdaFunctionProvider.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/view/LambdaFunctionProvider.java new file mode 100644 index 00000000000..a7592ec8956 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/provider/view/LambdaFunctionProvider.java @@ -0,0 +1,86 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.provider.view; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.lambda.cache.Keys.Namespace.LAMBDA_FUNCTIONS; + +import com.amazonaws.services.lambda.model.FunctionConfiguration; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.lambda.cache.Keys; +import com.netflix.spinnaker.clouddriver.lambda.cache.client.LambdaCacheClient; +import com.netflix.spinnaker.clouddriver.model.Function; +import com.netflix.spinnaker.clouddriver.model.FunctionProvider; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class LambdaFunctionProvider implements FunctionProvider { + private LambdaCacheClient awsLambdaCacheClient; + private final Cache cacheView; + + @Autowired + public LambdaFunctionProvider(Cache cacheView) { + this.awsLambdaCacheClient = new LambdaCacheClient(cacheView); + this.cacheView = cacheView; + } + + @Override + public Collection getAllFunctions() { + return new ArrayList<>(awsLambdaCacheClient.getAll()); + } + + public Function getFunction(String account, String region, String functionName) { + String key = Keys.getLambdaFunctionKey(account, region, functionName); + return awsLambdaCacheClient.get(key); + } + + public Set getApplicationFunctions(String applicationName) { + + CacheData application = + cacheView.get( + APPLICATIONS.ns, + com.netflix.spinnaker.clouddriver.aws.data.Keys.getApplicationKey(applicationName)); + + Set appFunctions = new HashSet<>(); + if (null != application && null != application.getRelationships()) { + Collection functionRel = application.getRelationships().get(LAMBDA_FUNCTIONS.ns); + if (null != functionRel && !functionRel.isEmpty()) { + functionRel.forEach( + functionKey -> { + Function function = awsLambdaCacheClient.get(functionKey); + if (null != function) { + appFunctions.add(function); + } + }); + } + } else { + getAllFunctions().stream() + .filter(f -> f instanceof FunctionConfiguration) + .map(f -> (FunctionConfiguration) f) + .filter(f -> f.getFunctionName() != null) + .filter(f -> f.getFunctionName().startsWith(applicationName)) + .forEach(f -> appFunctions.add((Function) f)); + } + return appFunctions; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/service/LambdaService.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/service/LambdaService.java new file mode 100644 index 00000000000..1bc3559b852 --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/clouddriver/lambda/service/LambdaService.java @@ -0,0 +1,282 @@ +/* + * Copyright 2021 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.service; + +import com.amazonaws.auth.policy.Policy; +import com.amazonaws.auth.policy.Statement; +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.*; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.aws.data.ArnUtils; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.LambdaClientProvider; +import com.netflix.spinnaker.config.LambdaServiceConfig; +import java.util.*; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import lombok.extern.log4j.Log4j2; + +@Log4j2 +public class LambdaService extends LambdaClientProvider { + + private final ObjectMapper mapper; + + public LambdaService( + AmazonClientProvider amazonClientProvider, + NetflixAmazonCredentials account, + String region, + ObjectMapper mapper, + LambdaServiceConfig lambdaServiceConfig) { + super(region, account); + super.operationsConfig = lambdaServiceConfig; + super.amazonClientProvider = amazonClientProvider; + this.mapper = mapper; + } + + public List> getAllFunctions() { + List functions = listAllFunctionConfigurations(); + List> hydratedFunctionList = + Collections.synchronizedList(new ArrayList<>()); + functions.stream() + .forEach( + f -> { + Map functionAttributes = new ConcurrentHashMap<>(); + addBaseAttributes(functionAttributes, f.getFunctionName()); + addRevisionsAttributes(functionAttributes, f.getFunctionName()); + addAliasAndEventSourceMappingConfigurationAttributes( + functionAttributes, f.getFunctionName()); + addTargetGroupAttributes(functionAttributes, f.getFunctionName()); + hydratedFunctionList.add(functionAttributes); + }); + + // if addBaseAttributes returned null, the name won't be included. There is a chance other + // resources still have + // associations to the deleted lambda + return hydratedFunctionList.stream() + .filter(lf -> lf.get("functionName") != null) + .collect(Collectors.toList()); + } + + public Map getFunctionByName(String functionName) throws InterruptedException { + List> functionTasks = Collections.synchronizedList(new ArrayList<>()); + Map functionAttributes = new ConcurrentHashMap<>(); + addBaseAttributes(functionAttributes, functionName); + if (functionAttributes.isEmpty()) { + // return quick so we don't make extra api calls for a delete lambda + return null; + } + addRevisionsAttributes(functionAttributes, functionName); + addAliasAndEventSourceMappingConfigurationAttributes(functionAttributes, functionName); + addTargetGroupAttributes(functionAttributes, functionName); + return functionAttributes; + } + + public List listAllFunctionConfigurations() { + AWSLambda lambda = getLambdaClient(); + String nextMarker = null; + List lstFunction = new ArrayList<>(); + do { + ListFunctionsRequest listFunctionsRequest = new ListFunctionsRequest(); + if (nextMarker != null) { + listFunctionsRequest.setMarker(nextMarker); + } + + ListFunctionsResult listFunctionsResult = lambda.listFunctions(listFunctionsRequest); + + if (listFunctionsResult == null) { + break; + } + + lstFunction.addAll(listFunctionsResult.getFunctions()); + nextMarker = listFunctionsResult.getNextMarker(); + + } while (nextMarker != null && nextMarker.length() != 0); + return lstFunction; + } + + private Void addBaseAttributes(Map functionAttributes, String functionName) { + GetFunctionResult result = + getLambdaClient().getFunction(new GetFunctionRequest().withFunctionName(functionName)); + if (result == null) { + return null; + } + Map attr = mapper.convertValue(result.getConfiguration(), Map.class); + attr.put("account", getCredentials().getName()); + attr.put("region", getRegion()); + attr.put("code", result.getCode()); + attr.put("tags", result.getTags()); + attr.put("concurrency", result.getConcurrency()); + attr.values().removeAll(Collections.singleton(null)); + functionAttributes.putAll(attr); + return null; + } + + private Void addRevisionsAttributes(Map functionAttributes, String functionName) { + Map revisions = listFunctionRevisions(functionName); + functionAttributes.put("revisions", revisions); + return null; + } + + private Map listFunctionRevisions(String functionName) { + AWSLambda lambda = getLambdaClient(); + String nextMarker = null; + Map listRevionIds = new HashMap<>(); + do { + ListVersionsByFunctionRequest listVersionsByFunctionRequest = + new ListVersionsByFunctionRequest(); + listVersionsByFunctionRequest.setFunctionName(functionName); + if (nextMarker != null) { + listVersionsByFunctionRequest.setMarker(nextMarker); + } + + ListVersionsByFunctionResult listVersionsByFunctionResult = + lambda.listVersionsByFunction(listVersionsByFunctionRequest); + if (listVersionsByFunctionResult == null) { + return listRevionIds; + } + for (FunctionConfiguration x : listVersionsByFunctionResult.getVersions()) { + listRevionIds.put(x.getRevisionId(), x.getVersion()); + } + nextMarker = listVersionsByFunctionResult.getNextMarker(); + + } while (nextMarker != null && nextMarker.length() != 0); + return listRevionIds; + } + + private Void addAliasAndEventSourceMappingConfigurationAttributes( + Map functionAttributes, String functionName) { + List aliasConfigurationList = listAliasConfiguration(functionName); + functionAttributes.put("aliasConfigurations", aliasConfigurationList); + + // TODO: should we also process these concurrently? + List eventSourceMappingConfigurationsList = + listEventSourceMappingConfiguration(functionName); + for (AliasConfiguration currAlias : aliasConfigurationList) { + List currAliasEvents = + listEventSourceMappingConfiguration(currAlias.getAliasArn()); + eventSourceMappingConfigurationsList.addAll(currAliasEvents); + } + functionAttributes.put("eventSourceMappings", eventSourceMappingConfigurationsList); + return null; + } + + private List listAliasConfiguration(String functionName) { + AWSLambda lambda = getLambdaClient(); + String nextMarker = null; + List aliasConfigurations = new ArrayList<>(); + do { + ListAliasesRequest listAliasesRequest = new ListAliasesRequest(); + listAliasesRequest.setFunctionName(functionName); + if (nextMarker != null) { + listAliasesRequest.setMarker(nextMarker); + } + + ListAliasesResult listAliasesResult = lambda.listAliases(listAliasesRequest); + if (listAliasesResult == null) { + return aliasConfigurations; + } + for (AliasConfiguration x : listAliasesResult.getAliases()) { + aliasConfigurations.add(x); + } + nextMarker = listAliasesResult.getNextMarker(); + + } while (nextMarker != null && nextMarker.length() != 0); + return aliasConfigurations; + } + + private List listEventSourceMappingConfiguration( + String functionName) { + List eventSourceMappingConfigurations = new ArrayList<>(); + AWSLambda lambda = getLambdaClient(); + String nextMarker = null; + do { + ListEventSourceMappingsRequest listEventSourceMappingsRequest = + new ListEventSourceMappingsRequest(); + listEventSourceMappingsRequest.setFunctionName(functionName); + + if (nextMarker != null) { + listEventSourceMappingsRequest.setMarker(nextMarker); + } + + ListEventSourceMappingsResult listEventSourceMappingsResult = + lambda.listEventSourceMappings(listEventSourceMappingsRequest); + if (listEventSourceMappingsResult == null) { + return eventSourceMappingConfigurations; + } + + for (EventSourceMappingConfiguration x : + listEventSourceMappingsResult.getEventSourceMappings()) { + eventSourceMappingConfigurations.add(x); + } + nextMarker = listEventSourceMappingsResult.getNextMarker(); + + } while (nextMarker != null && nextMarker.length() != 0); + + return eventSourceMappingConfigurations; + } + + private Void addTargetGroupAttributes( + Map functionAttributes, String functionName) { + List targetGroups = getTargetGroupNames(functionName); + functionAttributes.put("targetGroups", targetGroups); + return null; + } + + private static final Predicate isLambdaInvokeAction = + statement -> + statement.getActions().stream() + .anyMatch(action -> "lambda:InvokeFunction".equals(action.getActionName())); + private static final Predicate isElbPrincipal = + statement -> + statement.getPrincipals().stream() + .anyMatch( + principal -> "elasticloadbalancing.amazonaws.com".equals(principal.getId())); + + private List getTargetGroupNames(String functionName) { + List targetGroupNames = new ArrayList<>(); + Predicate isAllowStatement = + statement -> statement.getEffect().toString().equals(Statement.Effect.Allow.toString()); + + try { + AWSLambda lambda = getLambdaClient(); + GetPolicyResult result = + lambda.getPolicy(new GetPolicyRequest().withFunctionName(functionName)); + Policy policy = Policy.fromJson(result.getPolicy()); + + targetGroupNames = + policy.getStatements().stream() + .filter(isAllowStatement.and(isLambdaInvokeAction).and(isElbPrincipal)) + .flatMap(statement -> statement.getConditions().stream()) + .filter( + condition -> + "ArnLike".equals(condition.getType()) + && "AWS:SourceArn".equals(condition.getConditionKey())) + .flatMap(condition -> condition.getValues().stream()) + .flatMap(value -> ArnUtils.extractTargetGroupName(value).stream()) + .collect(Collectors.toList()); + + } catch (NullPointerException | ResourceNotFoundException e) { + // ignore the exception. Log it + log.info("Unable to find target group names for {}", functionName); + } + + return targetGroupNames; + } +} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/config/LambdaConfiguration.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/config/LambdaConfiguration.java new file mode 100644 index 00000000000..810a6f4a51f --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/config/LambdaConfiguration.java @@ -0,0 +1,27 @@ +/* + * Copyright 2018 Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ComponentScan("com.netflix.spinnaker.clouddriver.lambda") +@ConditionalOnExpression( + "${aws.enabled:false} and (${aws.lambda.enabled:false} or ${aws.features.lambda.enabled:false})") +public class LambdaConfiguration {} diff --git a/clouddriver-lambda/src/main/java/com/netflix/spinnaker/config/LambdaServiceConfig.java b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/config/LambdaServiceConfig.java new file mode 100644 index 00000000000..61ad7f5392e --- /dev/null +++ b/clouddriver-lambda/src/main/java/com/netflix/spinnaker/config/LambdaServiceConfig.java @@ -0,0 +1,69 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import lombok.Data; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.stereotype.Component; + +/** + * The "defaults" here apply to ALL AWS Lambda operations. Several settings on AWS operations have + * interesting precedents. Examples below: + * + *

IF timeout is set on the pipeline that is BELOW the defaults that timeout will win, UNLESS + * it's greater than the global timeout. Whoever is the MOST restrictive wins on timeouts. + * + *

Retry policy is different. A retry here of -1 is the default, which delegates to the RETRY + * policy which tries to make intelligence decisions on whether it SHOULD retry (500's or timeouts + * are generally retry situations) IF instead of trying to deterministically retry, you can set this + * to a set value which will then determine how many retry attempts to make WITH the same policy + * restrictions. See the {@link AmazonClientProvider.Builder#buildPolicy()} build policy method for + * the default policy. + * + *

{@link + * com.netflix.spinnaker.clouddriver.lambda.deploy.description.InvokeLambdaFunctionDescription} + * operation. These are JUST defaults and nominally set higher so you can lower them PER request. + */ +@Component +@ConfigurationProperties(prefix = "aws.lambda") +@Data +public class LambdaServiceConfig { + + // Matches AWS SDK default value & moves the LambdaOperationsConfig stuff here + // Supported new "ops" concept for timeouts that... really is dead on arrival due to how the SDK + // and lambda code worked. + @Value("#{'${aws.lambda.invokeTimeoutMs:${aws.lambda.ops.invokeTimeoutMs:50000}}'}") + private int invokeTimeoutMs = 50000; + + private Retry retry = new Retry(); + + /** + * Duplicated by the {@link + * com.netflix.spinnaker.clouddriver.aws.AwsConfigurationProperties.ClientConfig} class and the + * native SDK Retry handling. + */ + @Data + public static class Retry { + // SDK Default is 20 seconds.... this is a touch lower + private int timeout = 15; + // Default to the aws client max error retries if NOT set + @Value("#{'${aws.lambda.retries:${aws.client.maxErrorRetry}}'}") + private int retries = 3; + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/AbstractLambdaAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/AbstractLambdaAtomicOperationTest.java new file mode 100644 index 00000000000..621c2ccd198 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/AbstractLambdaAtomicOperationTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2023 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.*; + +import com.amazonaws.ClientConfiguration; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.InvokeLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.InvokeLambdaFunctionOutputDescription; +import com.netflix.spinnaker.config.LambdaServiceConfig; +import java.util.List; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; + +class AbstractLambdaAtomicOperationTest { + + @Test + public void verifyLambdaClientGetsDefaultConfigPassed() { + InvokeLambdaFunctionDescription desc = new InvokeLambdaFunctionDescription(); + desc.setRegion("someplace"); + NetflixAmazonCredentials creds = mock(NetflixAmazonCredentials.class); + when(creds.getLambdaEnabled()).thenReturn(true); + desc.setCredentials(creds); + AbstractLambdaAtomicOperation< + InvokeLambdaFunctionDescription, InvokeLambdaFunctionOutputDescription> + operation = + new AbstractLambdaAtomicOperation<>(desc, null) { + + @Override + public InvokeLambdaFunctionOutputDescription operate( + List priorOutputs) { + return null; + } + }; + operation.operationsConfig = new LambdaServiceConfig(); + operation.amazonClientProvider = mock(AmazonClientProvider.class); + ArgumentCaptor captureclientConfig = + ArgumentCaptor.forClass(ClientConfiguration.class); + operation.getLambdaClient(); + verify(operation.amazonClientProvider) + .getAmazonLambda(any(), captureclientConfig.capture(), eq("someplace")); + assertEquals(3, captureclientConfig.getValue().getMaxErrorRetry()); + assertEquals(50000, captureclientConfig.getValue().getSocketTimeout()); + } + + @Test + public void verifyLambdaClientSetsTimeouts() { + InvokeLambdaFunctionDescription desc = new InvokeLambdaFunctionDescription(); + desc.setRegion("someplace"); + NetflixAmazonCredentials creds = mock(NetflixAmazonCredentials.class); + when(creds.getLambdaEnabled()).thenReturn(true); + desc.setCredentials(creds); + AbstractLambdaAtomicOperation< + InvokeLambdaFunctionDescription, InvokeLambdaFunctionOutputDescription> + operation = + new AbstractLambdaAtomicOperation<>(desc, null) { + + @Override + public InvokeLambdaFunctionOutputDescription operate( + List priorOutputs) { + return null; + } + }; + operation.operationsConfig = new LambdaServiceConfig(); + operation.operationsConfig.setInvokeTimeoutMs(300 * 1000); + operation.operationsConfig.getRetry().setRetries(0); + operation.amazonClientProvider = mock(AmazonClientProvider.class); + ArgumentCaptor captureclientConfig = + ArgumentCaptor.forClass(ClientConfiguration.class); + operation.getLambdaClient(); + verify(operation.amazonClientProvider) + .getAmazonLambda(any(), captureclientConfig.capture(), eq("someplace")); + // assertEquals(4, captureclientConfig.getValue().getMaxErrorRetry()); + assertEquals(0, captureclientConfig.getValue().getMaxErrorRetry()); + assertEquals(300000, captureclientConfig.getValue().getSocketTimeout()); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/CreateLambdaAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/CreateLambdaAtomicOperationTest.java new file mode 100644 index 00000000000..8aef2ff0c66 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/CreateLambdaAtomicOperationTest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.CreateFunctionRequest; +import com.amazonaws.services.lambda.model.CreateFunctionResult; +import com.amazonaws.services.lambda.model.DeadLetterConfig; +import com.amazonaws.services.lambda.model.FunctionCode; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.CreateLambdaFunctionDescription; +import org.junit.jupiter.api.Test; + +public class CreateLambdaAtomicOperationTest { + @Test + void testPublishLambda() { + // given + CreateLambdaFunctionDescription b = + new CreateLambdaFunctionDescription() + .setS3bucket("s3://bucket") + .setS3key("key/key/path") + .setFunctionName("funcName") + .setDeadLetterConfig(new DeadLetterConfig().withTargetArn("")); + b.setAppName("appName"); + CreateLambdaAtomicOperation clao = spy(new CreateLambdaAtomicOperation(b)); + doNothing().when(clao).updateTaskStatus(anyString()); + AWSLambda lambdaClient = mock(AWSLambda.class); + doReturn(lambdaClient).when(clao).getLambdaClient(); + CreateFunctionRequest createRequest = + new CreateFunctionRequest() + .withFunctionName("appName-funcName") + .withCode(new FunctionCode().withS3Bucket("s3://bucket").withS3Key("key/key/path")); + CreateFunctionResult createLambdaResult = + new CreateFunctionResult() + .withFunctionName("appName-funcName") + .withCodeSha256("abc123def456"); + doReturn(createLambdaResult).when(lambdaClient).createFunction(createRequest); + // when + CreateFunctionResult output = clao.operate(null); + // then + verify(clao, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(createLambdaResult); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaAtomicOperationTest.java new file mode 100644 index 00000000000..304daf7f1ba --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaAtomicOperationTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.DeleteFunctionRequest; +import com.amazonaws.services.lambda.model.DeleteFunctionResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +public class DeleteLambdaAtomicOperationTest implements LambdaTestingDefaults { + @Test + void testDeleteLambda() { + DeleteLambdaFunctionDescription deleteDesc = new DeleteLambdaFunctionDescription(); + deleteDesc.setFunctionName(fName).setQualifier(version).setRegion(region).setAccount(account); + + DeleteLambdaAtomicOperation deleteOperation = spy(new DeleteLambdaAtomicOperation(deleteDesc)); + doNothing().when(deleteOperation).updateTaskStatus(anyString()); + + AWSLambda lambdaClient = mock(AWSLambda.class); + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + ReflectionTestUtils.setField(deleteOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + + doReturn(lambdaClient).when(deleteOperation).getLambdaClient(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + + DeleteFunctionRequest deleteRequest = new DeleteFunctionRequest(); + deleteRequest.withQualifier(version).withFunctionName(functionArn); + DeleteFunctionResult mockDeleteResult = new DeleteFunctionResult(); + doReturn(mockDeleteResult).when(lambdaClient).deleteFunction(deleteRequest); + + DeleteFunctionResult output = deleteOperation.operate(null); + verify(deleteOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(mockDeleteResult); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaEventSourceAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaEventSourceAtomicOperationTest.java new file mode 100644 index 00000000000..63dcd4d4075 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaEventSourceAtomicOperationTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.DeleteEventSourceMappingRequest; +import com.amazonaws.services.lambda.model.DeleteEventSourceMappingResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpsertLambdaFunctionEventMappingDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +public class DeleteLambdaEventSourceAtomicOperationTest implements LambdaTestingDefaults { + @Test + void testUpdateLambdaEventMapping() { + UpsertLambdaFunctionEventMappingDescription eventMappingDesc = + new UpsertLambdaFunctionEventMappingDescription(); + eventMappingDesc + .setFunctionName(fName) + .setBatchsize(1) + .setEnabled(true) + .setEventSourceArn(eventArn) + .setUuid(eventUuid) + .setRegion(region) + .setAccount(account); + ; + DeleteLambdaEventSourceAtomicOperation deleteEventSourceOperation = + spy(new DeleteLambdaEventSourceAtomicOperation(eventMappingDesc)); + doNothing().when(deleteEventSourceOperation).updateTaskStatus(anyString()); + + AWSLambda lambdaClient = mock(AWSLambda.class); + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + + ReflectionTestUtils.setField( + deleteEventSourceOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + doReturn(lambdaClient).when(deleteEventSourceOperation).getLambdaClient(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + + DeleteEventSourceMappingRequest testRequest = new DeleteEventSourceMappingRequest(); + testRequest.setUUID(eventUuid); + + DeleteEventSourceMappingResult mockDeleteEventResult = new DeleteEventSourceMappingResult(); + doReturn(mockDeleteEventResult).when(lambdaClient).deleteEventSourceMapping(testRequest); + + Object output = deleteEventSourceOperation.operate(null); + verify(deleteEventSourceOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(mockDeleteEventResult); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaProvisionedConcurrencyAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaProvisionedConcurrencyAtomicOperationTest.java new file mode 100644 index 00000000000..c8f74e1dbc8 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaProvisionedConcurrencyAtomicOperationTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2022 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.DeleteProvisionedConcurrencyConfigRequest; +import com.amazonaws.services.lambda.model.DeleteProvisionedConcurrencyConfigResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaProvisionedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +public class DeleteLambdaProvisionedConcurrencyAtomicOperationTest + implements LambdaTestingDefaults { + @Test + void testDeleteProvisionedConcurrency() { + DeleteLambdaProvisionedConcurrencyDescription deleteDesc = + new DeleteLambdaProvisionedConcurrencyDescription(); + deleteDesc.setFunctionName(fName).setQualifier(version); + + DeleteLambdaProvisionedConcurrencyAtomicOperation deleteOperation = + spy(new DeleteLambdaProvisionedConcurrencyAtomicOperation(deleteDesc)); + doNothing().when(deleteOperation).updateTaskStatus(anyString()); + + AWSLambda lambdaClient = mock(AWSLambda.class); + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + ReflectionTestUtils.setField(deleteOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + + doReturn(lambdaClient).when(deleteOperation).getLambdaClient(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + + DeleteProvisionedConcurrencyConfigRequest deleteRequest = + new DeleteProvisionedConcurrencyConfigRequest(); + deleteRequest.withQualifier(version).withFunctionName(fName); + DeleteProvisionedConcurrencyConfigResult mockDeleteResult = + new DeleteProvisionedConcurrencyConfigResult(); + doReturn(mockDeleteResult).when(lambdaClient).deleteProvisionedConcurrencyConfig(deleteRequest); + + DeleteProvisionedConcurrencyConfigResult output = deleteOperation.operate(null); + verify(deleteOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(mockDeleteResult); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaReservedConcurrencyAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaReservedConcurrencyAtomicOperationTest.java new file mode 100644 index 00000000000..fe2a782aaf9 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/DeleteLambdaReservedConcurrencyAtomicOperationTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2022 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.DeleteFunctionConcurrencyRequest; +import com.amazonaws.services.lambda.model.DeleteFunctionConcurrencyResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.DeleteLambdaReservedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +public class DeleteLambdaReservedConcurrencyAtomicOperationTest implements LambdaTestingDefaults { + @Test + void testDeleteReservedConcurrency() { + DeleteLambdaReservedConcurrencyDescription deleteDesc = + new DeleteLambdaReservedConcurrencyDescription(); + deleteDesc.setFunctionName(fName); + + DeleteLambdaReservedConcurrencyAtomicOperation deleteOperation = + spy(new DeleteLambdaReservedConcurrencyAtomicOperation(deleteDesc)); + doNothing().when(deleteOperation).updateTaskStatus(anyString()); + + AWSLambda lambdaClient = mock(AWSLambda.class); + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + ReflectionTestUtils.setField(deleteOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + + doReturn(lambdaClient).when(deleteOperation).getLambdaClient(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + + DeleteFunctionConcurrencyRequest deleteRequest = new DeleteFunctionConcurrencyRequest(); + deleteRequest.withFunctionName(fName); + DeleteFunctionConcurrencyResult mockDeleteResult = new DeleteFunctionConcurrencyResult(); + doReturn(mockDeleteResult).when(lambdaClient).deleteFunctionConcurrency(deleteRequest); + + DeleteFunctionConcurrencyResult output = deleteOperation.operate(null); + verify(deleteOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(mockDeleteResult); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/InvokeLambdaAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/InvokeLambdaAtomicOperationTest.java new file mode 100644 index 00000000000..f943ffecf9b --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/InvokeLambdaAtomicOperationTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.InvokeRequest; +import com.amazonaws.services.lambda.model.InvokeResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.InvokeLambdaFunctionDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.InvokeLambdaFunctionOutputDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.springframework.test.util.ReflectionTestUtils; + +/* +* To test against clouddriver, you can verify results using the following example CURL. Note this +* is all "sample" data. The return type will be a callback that can be queried to know the status +* of the invoke operation. curl -XPOST -k -H "X-SPINNAKER-USER: jason.mcintosh@armory.io" -H + "Accept: application/json" -H "Content-Type: application/json" + https://spin-clouddriver:7002/aws/ops/invokeLambdaFunction -d ' { "appName":"simple", + "functionName": "simple-hello-world", "qualifier":"$LATEST", "region": "us-west-2", + "credentials": "aws-internal-dev", "account": "aws-internal-dev", "timeout": 600000 }' +*/ +public class InvokeLambdaAtomicOperationTest implements LambdaTestingDefaults { + InvokeLambdaAtomicOperation invokeOperation; + InvokeLambdaFunctionDescription invokeDesc; + + @BeforeEach + public void setup() { + invokeDesc = new InvokeLambdaFunctionDescription(); + invokeDesc.setFunctionName(fName).setQualifier(version).setRegion(region).setAccount(account); + invokeDesc.setPayload("example"); + invokeOperation = spy(new InvokeLambdaAtomicOperation(invokeDesc)); + + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + ReflectionTestUtils.setField(invokeOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + doNothing().when(invokeOperation).updateTaskStatus(anyString()); + } + + @Test + void testInvokeLambda() { + + AWSLambda lambdaClient = mock(AWSLambda.class); + doReturn(lambdaClient).when(invokeOperation).getLambdaClient(); + + ArgumentCaptor captor = ArgumentCaptor.forClass(InvokeRequest.class); + InvokeResult result = new InvokeResult(); + doReturn(result).when(lambdaClient).invoke(any(InvokeRequest.class)); + + InvokeLambdaFunctionOutputDescription output = invokeOperation.operate(null); + assertEquals(result, output.getInvokeResult()); + verify(lambdaClient).invoke(captor.capture()); + verify(invokeOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertEquals(fName, captor.getValue().getFunctionName()); + assertNull(captor.getValue().getSdkRequestTimeout()); + } + + @Test + void verifyTimeoutIsSet() { + // Allows a base timeout for all operations of 100,000 then short it to 55 seconds for a + // specific request per invoked request + invokeDesc.setTimeout(55); + + AWSLambda lambdaClient = mock(AWSLambda.class); + doReturn(lambdaClient).when(invokeOperation).getLambdaClient(); + + ArgumentCaptor invokeCaptor = ArgumentCaptor.forClass(InvokeRequest.class); + doReturn(new InvokeResult()).when(lambdaClient).invoke(invokeCaptor.capture()); + invokeOperation.operate(null); + assertEquals(55000, invokeCaptor.getValue().getSdkRequestTimeout().intValue()); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/LambdaTestingDefaults.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/LambdaTestingDefaults.java new file mode 100644 index 00000000000..2ad5b8884e6 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/LambdaTestingDefaults.java @@ -0,0 +1,70 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import com.amazonaws.services.lambda.model.AliasConfiguration; +import com.amazonaws.services.lambda.model.EventSourceMappingConfiguration; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import java.util.ArrayList; +import java.util.List; + +public interface LambdaTestingDefaults { + + String fName = "app1-stack1-detail1-function1"; + String functionArn = "function1:arn"; + String region = "us-west-2"; + String account = "account-1"; + String version = "1"; + String eventArn = "arn-1"; + String eventUuid = "uuid-1"; + String revisionId = "1"; + String revisionDesc = "Revision Desc"; + String aliasName = "fAlias"; + String aliasArn = "alias-arn-1"; + String aliasDesc = "Alias Description"; + + default LambdaFunction getMockedFunctionDefintion() { + LambdaFunction cachedFunction = new LambdaFunction(); + cachedFunction.setFunctionName(fName); + cachedFunction.setFunctionArn(functionArn); + cachedFunction.setRegion(region); + cachedFunction.setAccount(account); + cachedFunction.setAliasConfigurations(getMockAliases()); + cachedFunction.setEventSourceMappings(getMockEventSourceList()); + return cachedFunction; + } + + default List getMockAliases() { + AliasConfiguration es = new AliasConfiguration(); + es.setAliasArn(aliasArn); + es.setDescription(aliasDesc); + es.setName(aliasName); + es.setRevisionId(revisionId); + List le = new ArrayList(); + le.add(es); + return le; + } + + default List getMockEventSourceList() { + EventSourceMappingConfiguration es = new EventSourceMappingConfiguration(); + es.setUUID(eventUuid); + es.setEventSourceArn(eventArn); + List le = new ArrayList(); + le.add(es); + return le; + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PublishLambdaAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PublishLambdaAtomicOperationTest.java new file mode 100644 index 00000000000..ec6a57f0444 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PublishLambdaAtomicOperationTest.java @@ -0,0 +1,74 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.PublishVersionRequest; +import com.amazonaws.services.lambda.model.PublishVersionResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PublishLambdaFunctionVersionDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +public class PublishLambdaAtomicOperationTest implements LambdaTestingDefaults { + + @Test + void testPublishLambda() { + PublishLambdaFunctionVersionDescription publishDesc = + new PublishLambdaFunctionVersionDescription(); + publishDesc + .setFunctionName(fName) + .setDescription(revisionDesc) + .setRevisionId(revisionId) + .setRegion(region) + .setAccount(account); + ; + + PublishLambdaAtomicOperation publishOperation = + spy(new PublishLambdaAtomicOperation(publishDesc)); + doNothing().when(publishOperation).updateTaskStatus(anyString()); + + AWSLambda lambdaClient = mock(AWSLambda.class); + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + + ReflectionTestUtils.setField( + publishOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + doReturn(lambdaClient).when(publishOperation).getLambdaClient(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + + PublishVersionRequest testRequest = new PublishVersionRequest(); + testRequest.setFunctionName(fName); + testRequest.setRevisionId(revisionId); + testRequest.setDescription(revisionDesc); + + PublishVersionResult mockPublishResult = new PublishVersionResult(); + doReturn(mockPublishResult).when(lambdaClient).publishVersion(testRequest); + + PublishVersionResult output = publishOperation.operate(null); + verify(publishOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(mockPublishResult); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutConcurrencyAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutConcurrencyAtomicOperationTest.java new file mode 100644 index 00000000000..cad4d0a87d5 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/PutConcurrencyAtomicOperationTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.PutFunctionConcurrencyRequest; +import com.amazonaws.services.lambda.model.PutFunctionConcurrencyResult; +import com.amazonaws.services.lambda.model.PutProvisionedConcurrencyConfigRequest; +import com.amazonaws.services.lambda.model.PutProvisionedConcurrencyConfigResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PutLambdaProvisionedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.PutLambdaReservedConcurrencyDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +public class PutConcurrencyAtomicOperationTest implements LambdaTestingDefaults { + + @Test + void testProvisionedConcurrency() { + PutLambdaProvisionedConcurrencyDescription provisionedConcurrencyDescription = + new PutLambdaProvisionedConcurrencyDescription(); + provisionedConcurrencyDescription + .setFunctionName(fName) + .setQualifier(version) + .setProvisionedConcurrentExecutions(2) + .setRegion(region) + .setAccount(account); + ; + + PutLambdaProvisionedConcurrencyAtomicOperation putConcurrencyOperation = + spy(new PutLambdaProvisionedConcurrencyAtomicOperation(provisionedConcurrencyDescription)); + doNothing().when(putConcurrencyOperation).updateTaskStatus(anyString()); + + AWSLambda lambdaClient = mock(AWSLambda.class); + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + + ReflectionTestUtils.setField( + putConcurrencyOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + doReturn(lambdaClient).when(putConcurrencyOperation).getLambdaClient(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + + PutProvisionedConcurrencyConfigRequest testRequest = + new PutProvisionedConcurrencyConfigRequest(); + testRequest.setFunctionName(fName); + testRequest.setQualifier(version); + testRequest.setProvisionedConcurrentExecutions(2); + + PutProvisionedConcurrencyConfigResult mockProvisionResult = + new PutProvisionedConcurrencyConfigResult(); + mockProvisionResult.setAllocatedProvisionedConcurrentExecutions(2); + mockProvisionResult.setAvailableProvisionedConcurrentExecutions(4); + mockProvisionResult.setStatus("provisioned"); + doReturn(mockProvisionResult).when(lambdaClient).putProvisionedConcurrencyConfig(testRequest); + + PutProvisionedConcurrencyConfigResult output = putConcurrencyOperation.operate(null); + verify(putConcurrencyOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(mockProvisionResult); + } + + @Test + void testReservedConcurrency() { + PutLambdaReservedConcurrencyDescription provisionedConcurrencyDescription = + new PutLambdaReservedConcurrencyDescription(); + provisionedConcurrencyDescription + .setFunctionName(fName) + .setReservedConcurrentExecutions(2) + .setRegion(region) + .setAccount(account); + ; + + PutLambdaReservedConcurrencyAtomicOperation putConcurrencyOperation = + spy(new PutLambdaReservedConcurrencyAtomicOperation(provisionedConcurrencyDescription)); + doNothing().when(putConcurrencyOperation).updateTaskStatus(anyString()); + + AWSLambda lambdaClient = mock(AWSLambda.class); + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + + ReflectionTestUtils.setField( + putConcurrencyOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + doReturn(lambdaClient).when(putConcurrencyOperation).getLambdaClient(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + + PutFunctionConcurrencyRequest testRequest = new PutFunctionConcurrencyRequest(); + testRequest.setReservedConcurrentExecutions(2); + testRequest.setFunctionName(fName); + + PutFunctionConcurrencyResult mockProvisionResult = new PutFunctionConcurrencyResult(); + doReturn(mockProvisionResult).when(lambdaClient).putFunctionConcurrency(testRequest); + + PutFunctionConcurrencyResult output = putConcurrencyOperation.operate(null); + verify(putConcurrencyOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(mockProvisionResult); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaAtomicOperationTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaAtomicOperationTest.java new file mode 100644 index 00000000000..c5b38e36e67 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/deploy/ops/UpdateLambdaAtomicOperationTest.java @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.deploy.ops; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.*; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.UpdateFunctionCodeRequest; +import com.amazonaws.services.lambda.model.UpdateFunctionCodeResult; +import com.netflix.spinnaker.clouddriver.lambda.cache.model.LambdaFunction; +import com.netflix.spinnaker.clouddriver.lambda.deploy.description.UpdateLambdaFunctionCodeDescription; +import com.netflix.spinnaker.clouddriver.lambda.provider.view.LambdaFunctionProvider; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +public class UpdateLambdaAtomicOperationTest implements LambdaTestingDefaults { + + @Test + void testUpdateLambdaCode() { + UpdateLambdaFunctionCodeDescription updateCodeDesc = new UpdateLambdaFunctionCodeDescription(); + updateCodeDesc.setFunctionName(fName).setRegion(region).setAccount(account); + + UpdateLambdaCodeAtomicOperation updateCodeOperation = + spy(new UpdateLambdaCodeAtomicOperation(updateCodeDesc)); + doNothing().when(updateCodeOperation).updateTaskStatus(anyString()); + + AWSLambda lambdaClient = mock(AWSLambda.class); + LambdaFunction cachedFunction = getMockedFunctionDefintion(); + + LambdaFunctionProvider lambdaFunctionProvider = mock(LambdaFunctionProvider.class); + ReflectionTestUtils.setField( + updateCodeOperation, "lambdaFunctionProvider", lambdaFunctionProvider); + + doReturn(lambdaClient).when(updateCodeOperation).getLambdaClient(); + doReturn(cachedFunction) + .when(lambdaFunctionProvider) + .getFunction(anyString(), anyString(), anyString()); + + UpdateFunctionCodeRequest updateCodeRequest = new UpdateFunctionCodeRequest(); + updateCodeRequest.withFunctionName(functionArn); + UpdateFunctionCodeResult mockCodeResult = new UpdateFunctionCodeResult(); + doReturn(mockCodeResult).when(lambdaClient).updateFunctionCode(updateCodeRequest); + + UpdateFunctionCodeResult output = updateCodeOperation.operate(null); + verify(updateCodeOperation, atLeastOnce()).updateTaskStatus(anyString()); + assertThat(output).isEqualTo(mockCodeResult); + } + + @Test + void testUpdateLambdaConfig() { + // TODO + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaCachingAgentTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaCachingAgentTest.java new file mode 100644 index 00000000000..1ecce046ab6 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/provider/agent/LambdaCachingAgentTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2021 Armory, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.lambda.provider.agent; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration; +import com.netflix.spinnaker.clouddriver.lambda.cache.Keys; +import com.netflix.spinnaker.config.LambdaServiceConfig; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class LambdaCachingAgentTest { + private ObjectMapper objectMapper = new ObjectMapper(); + private AmazonClientProvider clientProvider = mock(AmazonClientProvider.class); + private String REGION = "us-west-2"; + private NetflixAmazonCredentials netflixAmazonCredentials = mock(NetflixAmazonCredentials.class); + private LambdaServiceConfig config = mock(LambdaServiceConfig.class); + private ServiceLimitConfiguration serviceLimitConfiguration = + mock(ServiceLimitConfiguration.class); + private LambdaCachingAgent lambdaCachingAgent; + private final ProviderCache cache = mock(ProviderCache.class); + + @BeforeEach + public void setup() { + when(serviceLimitConfiguration.getLimit(any(), any(), any(), any(), any())).thenReturn(1.0); + lambdaCachingAgent = + new LambdaCachingAgent( + objectMapper, + clientProvider, + netflixAmazonCredentials, + REGION, + config, + serviceLimitConfiguration); + } + + @Test + public void shouldGetAuthoritativeName() { + assertThat(lambdaCachingAgent.getAuthoritativeKeyName()).isEqualTo("lambdaFunctions"); + } + + @Test + public void shouldReturnEvictions() { + when(netflixAmazonCredentials.getName()).thenReturn("test-account"); + + Map attributes = new HashMap<>(); + attributes.put("functionName", "function-3"); + Collection data = new HashSet<>(); + data.add( + new DefaultCacheData( + Keys.getLambdaFunctionKey(netflixAmazonCredentials.getName(), REGION, "function-3"), + attributes, + Collections.emptyMap())); + + HashSet oldKeys = new HashSet<>(); + oldKeys.add( + Keys.getLambdaFunctionKey(netflixAmazonCredentials.getName(), REGION, "function-1")); + oldKeys.add( + Keys.getLambdaFunctionKey(netflixAmazonCredentials.getName(), REGION, "function-2")); + + when(cache.getIdentifiers(any())).thenReturn(oldKeys); + + Map> evictions = + lambdaCachingAgent.computeEvictableData(data, cache); + + assertThat(evictions.get(lambdaCachingAgent.getAuthoritativeKeyName()).size()).isEqualTo(2); + assertThat(evictions.get(lambdaCachingAgent.getAuthoritativeKeyName())).isEqualTo(oldKeys); + } + + @Test + public void shouldNotEvictionNewData() { + when(netflixAmazonCredentials.getName()).thenReturn("test-account"); + + Map attributes = new HashMap<>(); + attributes.put("functionName", "function-1"); + Collection data = new HashSet<>(); + data.add( + new DefaultCacheData( + Keys.getLambdaFunctionKey(netflixAmazonCredentials.getName(), REGION, "function-1"), + attributes, + Collections.emptyMap())); + + Collection oldKeys = + List.of( + Keys.getLambdaFunctionKey(netflixAmazonCredentials.getName(), REGION, "function-1"), + Keys.getLambdaFunctionKey(netflixAmazonCredentials.getName(), REGION, "function-2")); + + when(cache.getIdentifiers(any())).thenReturn(oldKeys); + + Map> evictions = + lambdaCachingAgent.computeEvictableData(data, cache); + + assertThat(evictions.get(lambdaCachingAgent.getAuthoritativeKeyName()).size()).isEqualTo(1); + assertThat(evictions.get(lambdaCachingAgent.getAuthoritativeKeyName()).stream().findAny().get()) + .isNotEqualTo( + Keys.getLambdaFunctionKey(netflixAmazonCredentials.getName(), REGION, "function-1")); + } + + @Test + public void buildCacheDataShouldAddInfo() { + ConcurrentHashMap lambdaCacheData = new ConcurrentHashMap<>(); + ConcurrentHashMap> appLambdaRelationships = + new ConcurrentHashMap<>(); + List> allLambdas = + List.of( + Map.of("functionName", "appName-functionName-something"), + Map.of("functionName", "appName2-functionName2-something2")); + + lambdaCachingAgent.buildCacheData(lambdaCacheData, appLambdaRelationships, allLambdas); + + assertThat(lambdaCacheData.size()).isEqualTo(2); + assertThat(appLambdaRelationships.size()).isEqualTo(2); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/provider/view/LambdaFunctionProviderTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/provider/view/LambdaFunctionProviderTest.java new file mode 100644 index 00000000000..9c30e109796 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/provider/view/LambdaFunctionProviderTest.java @@ -0,0 +1,72 @@ +package com.netflix.spinnaker.clouddriver.lambda.provider.view; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.lambda.cache.Keys.Namespace.LAMBDA_FUNCTIONS; +import static java.util.Collections.emptyMap; +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.clouddriver.lambda.deploy.ops.LambdaTestingDefaults; +import com.netflix.spinnaker.clouddriver.model.Function; +import java.util.List; +import java.util.Set; +import org.junit.jupiter.api.Test; + +class LambdaFunctionProviderTest implements LambdaTestingDefaults { + + @Test + void getApplicationFunctionsWithApp() { + String applicationName = "lambdaTest"; + String appKey = + com.netflix.spinnaker.clouddriver.aws.data.Keys.getApplicationKey(applicationName); + String functionKey = "functionKey"; + + Cache cache = mock(Cache.class); + when(cache.get(APPLICATIONS.ns, appKey)) + .thenReturn( + new DefaultCacheData( + appKey, emptyMap(), ImmutableMap.of(LAMBDA_FUNCTIONS.ns, List.of(functionKey)))); + + when(cache.get(LAMBDA_FUNCTIONS.ns, functionKey)) + .thenReturn( + new DefaultCacheData( + appKey, ImmutableMap.of(LAMBDA_FUNCTIONS.ns, functionKey), emptyMap())); + + Set applicationFunctions = + new LambdaFunctionProvider(cache).getApplicationFunctions(applicationName); + + assertEquals(1, applicationFunctions.size()); + verify(cache, times(1)).get(APPLICATIONS.ns, appKey); + verify(cache, times(1)).get(LAMBDA_FUNCTIONS.ns, functionKey); + } + + @Test + void getApplicationFunctionsWithoutApp() { + String applicationName = "lambdaTest"; + String appKey = + com.netflix.spinnaker.clouddriver.aws.data.Keys.getApplicationKey(applicationName); + String functionKey = "functionKey"; + + Cache cache = mock(Cache.class); + when(cache.get(APPLICATIONS.ns, appKey)).thenReturn(null); + + when(cache.getAll(LAMBDA_FUNCTIONS.ns)) + .thenReturn( + List.of( + new DefaultCacheData( + appKey, + ImmutableMap.of("functionName", applicationName + "-" + functionKey), + ImmutableMap.of(LAMBDA_FUNCTIONS.ns, List.of(functionKey))))); + + Set applicationFunctions = + new LambdaFunctionProvider(cache).getApplicationFunctions(applicationName); + + assertEquals(1, applicationFunctions.size()); + verify(cache, times(1)).get(APPLICATIONS.ns, appKey); + verify(cache, times(1)).getAll(LAMBDA_FUNCTIONS.ns); + verify(cache, times(0)).get(LAMBDA_FUNCTIONS.ns, functionKey); + } +} diff --git a/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/service/LambdaServiceTest.java b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/service/LambdaServiceTest.java new file mode 100644 index 00000000000..a02c61a4fc4 --- /dev/null +++ b/clouddriver-lambda/src/test/java/com/netflix/spinnaker/clouddriver/lambda/service/LambdaServiceTest.java @@ -0,0 +1,158 @@ +package com.netflix.spinnaker.clouddriver.lambda.service; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.amazonaws.services.lambda.AWSLambda; +import com.amazonaws.services.lambda.model.FunctionConfiguration; +import com.amazonaws.services.lambda.model.GetFunctionResult; +import com.amazonaws.services.lambda.model.GetPolicyResult; +import com.amazonaws.services.lambda.model.ListFunctionsResult; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.core.limits.ServiceLimitConfiguration; +import com.netflix.spinnaker.config.LambdaServiceConfig; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class LambdaServiceTest { + + private ObjectMapper objectMapper = new ObjectMapper(); + private AmazonClientProvider clientProvider = mock(AmazonClientProvider.class); + private LambdaServiceConfig lambdaServiceConfig = mock(LambdaServiceConfig.class); + private ServiceLimitConfiguration serviceLimitConfiguration = + mock(ServiceLimitConfiguration.class); + private String REGION = "us-west-2"; + private NetflixAmazonCredentials netflixAmazonCredentials = mock(NetflixAmazonCredentials.class); + + @BeforeEach + public void makeSureBaseSettings() { + when(netflixAmazonCredentials.getLambdaEnabled()).thenReturn(true); + } + + @Test + void getAllFunctionsWhenFunctionsResultIsNullExpectEmpty() throws InterruptedException { + when(lambdaServiceConfig.getRetry()).thenReturn(new LambdaServiceConfig.Retry()); + when(serviceLimitConfiguration.getLimit(any(), any(), any(), any(), any())).thenReturn(1.0); + AWSLambda lambda = mock(AWSLambda.class); // returns null by default + when(clientProvider.getAmazonLambda(any(), any(), any())).thenReturn(lambda); + + LambdaService lambdaService = + new LambdaService( + clientProvider, netflixAmazonCredentials, REGION, objectMapper, lambdaServiceConfig); + + List> allFunctions = lambdaService.getAllFunctions(); + + assertEquals(0, allFunctions.size()); + } + + @Test + void getAllFunctionsWhenFunctionsResultIsEmptyExpectEmpty() throws InterruptedException { + when(lambdaServiceConfig.getRetry()).thenReturn(new LambdaServiceConfig.Retry()); + when(serviceLimitConfiguration.getLimit(any(), any(), any(), any(), any())).thenReturn(1.0); + + ListFunctionsResult functionsResult = mock(ListFunctionsResult.class); + when(functionsResult.getFunctions()).thenReturn(List.of()); // Empty list + + AWSLambda lambda = mock(AWSLambda.class); + when(lambda.listFunctions()).thenReturn(functionsResult); + when(clientProvider.getAmazonLambda(any(), any(), any())).thenReturn(lambda); + + LambdaService lambdaService = + new LambdaService( + clientProvider, netflixAmazonCredentials, REGION, objectMapper, lambdaServiceConfig); + + List> allFunctions = lambdaService.getAllFunctions(); + + assertEquals(0, allFunctions.size()); + } + + @Test + void getAllFunctionsWhenFunctionNameIsEmptyExpectEmpty() throws InterruptedException { + when(lambdaServiceConfig.getRetry()).thenReturn(new LambdaServiceConfig.Retry()); + when(serviceLimitConfiguration.getLimit(any(), any(), any(), any(), any())).thenReturn(1.0); + + ListFunctionsResult functionsResult = mock(ListFunctionsResult.class); + when(functionsResult.getFunctions()).thenReturn(List.of(new FunctionConfiguration())); + + AWSLambda lambda = mock(AWSLambda.class); + when(lambda.listFunctions(any())).thenReturn(functionsResult); + when(clientProvider.getAmazonLambda(any(), any(), any())).thenReturn(lambda); + + LambdaService lambdaService = + new LambdaService( + clientProvider, netflixAmazonCredentials, REGION, objectMapper, lambdaServiceConfig); + + List> allFunctions = lambdaService.getAllFunctions(); + + assertEquals(0, allFunctions.size()); + } + + @Test + void getAllFunctionsWhenFunctionNameIsNotEmptyExpectNotEmpty() throws InterruptedException { + when(lambdaServiceConfig.getRetry()).thenReturn(new LambdaServiceConfig.Retry()); + when(serviceLimitConfiguration.getLimit(any(), any(), any(), any(), any())).thenReturn(1.0); + + ListFunctionsResult functionsResult = mock(ListFunctionsResult.class); + FunctionConfiguration functionConfiguration = new FunctionConfiguration(); + functionConfiguration.setFunctionName("testFunction"); + when(functionsResult.getFunctions()).thenReturn(List.of(functionConfiguration)); + + AWSLambda lambda = mock(AWSLambda.class); + when(lambda.listFunctions(any())).thenReturn(functionsResult); + GetFunctionResult functionResult = new GetFunctionResult(); + functionResult.setConfiguration(functionConfiguration); + when(lambda.getFunction(any())).thenReturn(functionResult); + GetPolicyResult getPolicyResult = new GetPolicyResult(); + getPolicyResult.setPolicy( + "{\n" + + " \"Version\": \"2012-10-17\",\n" + + " \"Statement\": [\n" + + " {\n" + + " \"Sid\": \"FirstStatement\",\n" + + " \"Effect\": \"Allow\",\n" + + " \"Action\": [\"iam:ChangePassword\"],\n" + + " \"Resource\": \"*\"\n" + + " },\n" + + " {\n" + + " \"Sid\": \"SecondStatement\",\n" + + " \"Effect\": \"Allow\",\n" + + " \"Action\": \"s3:ListAllMyBuckets\",\n" + + " \"Resource\": \"*\"\n" + + " },\n" + + " {\n" + + " \"Sid\": \"ThirdStatement\",\n" + + " \"Effect\": \"Allow\",\n" + + " \"Principal\": {\"AWS\":[ \"elasticloadbalancing.amazonaws.com\"]},\n" + + " \"Action\": [\n" + + " \"lambda:InvokeFunction\",\n" + + " \"s3:List*\",\n" + + " \"s3:Get*\"\n" + + " ],\n" + + " \"Resource\": [\n" + + " \"arn:aws:s3:::confidential-data\",\n" + + " \"arn:aws:s3:::confidential-data/*\"\n" + + " ],\n" + + " \"Condition\": {\"ArnLike\":{ \"AWS:SourceArn\": \"arn:aws:elasticloadbalancing:something:something:targetgroup/targetGroupName/abc\"}}\n" + + " }\n" + + " ]\n" + + "}"); + when(lambda.getPolicy(any())).thenReturn(getPolicyResult); + when(clientProvider.getAmazonLambda(any(), any(), any())).thenReturn(lambda); + + LambdaService lambdaService = + new LambdaService( + clientProvider, netflixAmazonCredentials, REGION, objectMapper, lambdaServiceConfig); + + List> allFunctions = lambdaService.getAllFunctions(); + + assertEquals(1, allFunctions.size()); + Map function = allFunctions.get(0); + assertEquals("testFunction", function.get("functionName")); + } +} diff --git a/clouddriver-openstack/clouddriver-openstack.gradle b/clouddriver-openstack/clouddriver-openstack.gradle deleted file mode 100644 index 7e3a132ae59..00000000000 --- a/clouddriver-openstack/clouddriver-openstack.gradle +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -ext { - openstack4jVersion = '3.1.0' - commonsNetVersion = '3.5' - commonsIOVersion = '1.3.2' -} - -dependencies { - compile project(":clouddriver-core") - compile project(":clouddriver-consul") - - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') - - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${spinnaker.version('jackson')}" - compile "org.pacesys:openstack4j-core:$openstack4jVersion" - compile "org.pacesys.openstack4j.connectors:openstack4j-jersey2:$openstack4jVersion" - compile "commons-net:commons-net:$commonsNetVersion" - compile "org.apache.commons:commons-io:$commonsIOVersion" -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackCloudProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackCloudProvider.groovy deleted file mode 100644 index 87cc4004390..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackCloudProvider.groovy +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack - -import com.netflix.spinnaker.clouddriver.core.CloudProvider -import org.springframework.stereotype.Component - -import java.lang.annotation.Annotation - -/** - * Openstack declaration as a {@link CloudProvider}. - */ -@Component -class OpenstackCloudProvider implements CloudProvider { - static final String ID = "openstack" - final String id = ID - final String displayName = "Openstack" - final Class operationAnnotationType = OpenstackOperation -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackOperation.groovy deleted file mode 100644 index eac94a5ccfa..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackOperation.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack - -import java.lang.annotation.ElementType -import java.lang.annotation.Retention -import java.lang.annotation.RetentionPolicy -import java.lang.annotation.Target - - -/** - * {@code OpenstackOperation}s specify implementation classes of Spinnaker AtomicOperations for Openstack. - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -@interface OpenstackOperation { - String value() -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/CacheResultBuilder.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/CacheResultBuilder.groovy deleted file mode 100644 index 349b29b7688..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/CacheResultBuilder.groovy +++ /dev/null @@ -1,106 +0,0 @@ -/* - * - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.cache - -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import groovy.util.logging.Slf4j - -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND - -//TODO - Move into core. -@Slf4j -class CacheResultBuilder { - - Long startTime - - CacheMutation onDemand = new CacheMutation() - - Map namespaceBuilders = [:].withDefault { - String ns -> new NamespaceBuilder(namespace: ns) - } - - NamespaceBuilder namespace(String ns) { - namespaceBuilders.get(ns) - } - - DefaultCacheResult build() { - Map> keep = [:] - Map> evict = [:] - - if (!onDemand.toKeep.empty) { - keep += [(ON_DEMAND.ns): onDemand.toKeep.values()] - } - if (!onDemand.toEvict.empty) { - evict += [(ON_DEMAND.ns): onDemand.toEvict] - } - namespaceBuilders.each { String namespace, NamespaceBuilder nsBuilder -> - def buildResult = nsBuilder.build() - if (!buildResult.toKeep.empty) { - keep += [(namespace): buildResult.toKeep.values()] - } - if (!buildResult.toEvict.empty) { - evict += [(namespace): buildResult.toEvict] - } - } - - new DefaultCacheResult(keep, evict) - } - - class NamespaceBuilder { - String namespace - - private Map toKeep = [:].withDefault { - String id -> new CacheDataBuilder(id: id) - } - - private List toEvict = [] - - CacheDataBuilder keep(String key) { - toKeep.get(key) - } - - int keepSize() { - toKeep.size() - } - - CacheMutation build() { - def keepers = toKeep.collectEntries { k, b -> [(k): b.build()] } - new CacheMutation(toKeep: keepers, toEvict: toEvict) - } - } - - class CacheMutation { - // CacheData.id -> CacheData - Map toKeep = [:] - List toEvict = [] - } - - class CacheDataBuilder { - String id = '' - int ttlSeconds = -1 - Map attributes = [:] - Map> relationships = [:].withDefault({ _ -> [] as Set }) - - public DefaultCacheData build() { - new DefaultCacheData(id, ttlSeconds, attributes, relationships) - } - } -} - diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/Keys.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/Keys.groovy deleted file mode 100644 index 10787e13e77..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/Keys.groovy +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.cache - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import groovy.transform.CompileStatic - -import static com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider.ID - -/** - * Defines custom namespaces, keys for Openstack caching ... Encapsulates parsing logic for keys across - * providers. - */ -@CompileStatic -class Keys { - - static enum Namespace { - NETWORKS, - SUBNETS, - INSTANCES, - INSTANCE_TYPES, - APPLICATIONS, - CLUSTERS, - SERVER_GROUPS, - SECURITY_GROUPS, - LOAD_BALANCERS, - VIPS, - FLOATING_IPS, - PORTS, - IMAGES, - ON_DEMAND - - final String ns - - private Namespace() { - def parts = name().split('_') - - ns = parts.tail().inject(new StringBuilder(parts.head().toLowerCase())) { val, next -> val.append(next.charAt(0)).append(next.substring(1).toLowerCase()) } - } - - String toString() { - ns - } - } - - static Map parse(String key) { - def result = [:] - - def parts = key.split(':') - - if (parts.length > 2) { - String provider = parts[0] - if (provider == OpenstackCloudProvider.ID) { - String type = parts[1] - - switch (type) { - case Namespace.INSTANCES.ns: - if (parts.length == 5) { - result << [account: parts[2], region: parts[3], instanceId: parts[4]] - } - break - case Namespace.INSTANCE_TYPES.ns: - if (parts.length == 5) { - result << [account: parts[2], region: parts[3], instanceTypeId: parts[4]] - } - break - case Namespace.APPLICATIONS.ns: - if (parts.length == 3) { - result << [application: parts[2].toLowerCase()] - } - break - case Namespace.CLUSTERS.ns: - if (parts.length == 5) { - def names = Names.parseName(parts[4]) - result << [application: parts[3].toLowerCase(), account: parts[2], cluster: parts[4], stack: names.stack, detail: names.detail] - } - break - case Namespace.SUBNETS.ns: - if (parts.length == 5) { - result << [account: parts[2], region: parts[3], id: parts[4]] - } - break - case Namespace.NETWORKS.ns: - if (parts.length == 5) { - result << [account: parts[2], region: parts[3], id: parts[4]] - } - break - case Namespace.SECURITY_GROUPS.ns: - if (parts.length == 6) { - def names = Names.parseName(parts[2]) - result << [application: names.app, name: parts[2], id: parts[3], region: parts[4], account: parts[5]] - } - break - case Namespace.IMAGES.ns: - if (parts.length == 5) - result << [account: parts[2], region: parts[3], imageId: parts[4]] - break - case Namespace.SERVER_GROUPS.ns: - def names = Names.parseName(parts[5]) - if (parts.length == 6) { - result << [application: names.app.toLowerCase(), cluster: parts[2], account: parts[3], region: parts[4], serverGroup: parts[5], stack: names.stack, detail: names.detail, sequence: names.sequence?.toString()] - } - break - case Namespace.LOAD_BALANCERS.ns: - if (parts.length == 6) { - result << [account: parts[2], region: parts[3], id: parts[4], name: parts[5]] - } - break - case Namespace.VIPS.ns: - if (parts.length == 5) { - result << [account: parts[2], region: parts[3], id: parts[4]] - } - break - case Namespace.FLOATING_IPS.ns: - if (parts.length == 5) { - result << [account: parts[2], region: parts[3], id: parts[4]] - } - break - case Namespace.PORTS.ns: - if (parts.length == 5) { - result << [account: parts[2], region: parts[3], id: parts[4]] - } - break - } - - if (!result.isEmpty()) { - result << [provider: provider, type: type] - } - } - } - result.isEmpty() ? null : result - } - - static String getInstanceKey(String instanceId, String account, String region) { - "${ID}:${Namespace.INSTANCES}:${account}:${region}:${instanceId}" - } - - static String getSubnetKey(String subnetId, String account, String region) { - "${ID}:${Namespace.SUBNETS}:${account}:${region}:${subnetId}" - } - - static String getApplicationKey(String application) { - "${ID}:${Namespace.APPLICATIONS}:${application.toLowerCase()}" - } - - static String getServerGroupKey(String serverGroupName, String account, String region) { - Names names = Names.parseName(serverGroupName) - "${ID}:${Namespace.SERVER_GROUPS}:${names.cluster}:${account}:${region}:${names.group}" - } - - //this one works with wildcards - static String getServerGroupKey(String cluster, String serverGroupName, String account, String region) { - "${ID}:${Namespace.SERVER_GROUPS}:${cluster}:${account}:${region}:${serverGroupName}" - } - - static String getClusterKey(String account, String application, String clusterName) { - "${ID}:${Namespace.CLUSTERS}:${account}:${application}:${clusterName}" - } - - static String getNetworkKey(String networkId, String account, String region) { - "${ID}:${Namespace.NETWORKS}:${account}:${region}:${networkId}" - } - - static String getSecurityGroupKey(String securityGroupName, String securityGroupId, String account, String region) { - "${ID}:${Namespace.SECURITY_GROUPS}:${securityGroupName}:${securityGroupId}:${region}:${account}" - } - - //loadBalancerName = appname or appname-stack or appname-stack-lbdescription - static String getLoadBalancerKey(String loadBalancerName, String loadBalancerId, String account, String region) { - "${ID}:${Namespace.LOAD_BALANCERS}:${account}:${region}:${loadBalancerId}:${loadBalancerName}" - } - - static String getVipKey(String vipId, String account, String region) { - "${ID}:${Namespace.VIPS}:${account}:${region}:${vipId}" - } - - static String getFloatingIPKey(String ipId, String account, String region) { - "${ID}:${Namespace.FLOATING_IPS}:${account}:${region}:${ipId}" - } - - static String getPortKey(String portId, String account, String region) { - "${ID}:${Namespace.PORTS}:${account}:${region}:${portId}" - } - - static String getImageKey(String imageId, String account, String region) { - "${ID}:${Namespace.IMAGES}:${account}:${region}:${imageId}" - } - - static String getInstanceTypeKey(String instanceType, String account, String region) { - "${ID}:${Namespace.INSTANCE_TYPES}:${account}:${region}:${instanceType}" - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OnDemandAware.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OnDemandAware.groovy deleted file mode 100644 index 8021434edb9..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OnDemandAware.groovy +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.cache - -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandResult -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.openstack.provider.view.MutableCacheData - -import java.util.concurrent.TimeUnit - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.ON_DEMAND - - -trait OnDemandAware { - - /** - * Helper method to inspect onDemand 'toKeep' cache to see if the cacheData should be used - * instead of recreating it. - * @param cacheResultBuilder - * @param key - * @return - */ - boolean shouldUseOnDemandData(CacheResultBuilder cacheResultBuilder, String key) { - CacheData cacheData = cacheResultBuilder.onDemand.toKeep[key] - cacheData ? cacheData.attributes.cacheTime >= cacheResultBuilder.startTime : false - } - - /** - * Generically will move cached data from onDemand to resource (Server Group, Load Balancer, Security Group) namespace. - * @param objectMapper - * @param cacheResultBuilder - * @param serverGroupKey - */ - void moveOnDemandDataToNamespace(ObjectMapper objectMapper, TypeReference>> typeReference, CacheResultBuilder cacheResultBuilder, String key) { - String cacheResults = cacheResultBuilder.onDemand.toKeep[key].attributes.cacheResults as String - Map> onDemandData = objectMapper.readValue(cacheResults, typeReference) - - onDemandData.each { String namespace, List cacheDatas -> - cacheDatas.each { MutableCacheData cacheData -> - cacheResultBuilder.namespace(namespace).keep(cacheData.id).with { - it.attributes = cacheData.attributes - it.relationships = cacheData.relationships - } - cacheResultBuilder.onDemand.toKeep.remove(cacheData.id) - } - } - } - - /** - * Finds all on demand cache data for a given account and region. - * @param providerCache - * @param accountName - * @param region - * @return - */ - Collection getAllOnDemandCacheByRegionAndAccount(ProviderCache providerCache, String accountName, String region) { - Collection keys = providerCache.getIdentifiers(ON_DEMAND.ns).findAll { String key -> - Map parsedKey = Keys.parse(key) - parsedKey && parsedKey.account == accountName && parsedKey.region == region - } - - providerCache.getAll(ON_DEMAND.ns, keys).collect { CacheData cacheData -> - [ - details : Keys.parse(cacheData.id), - cacheTime : cacheData.attributes.cacheTime, - processedCount: cacheData.attributes.processedCount, - processedTime : cacheData.attributes.processedTime - ] - } - } - - /** - * Builds on demand cache result based upon the cache result. - * @param onDemandAgentType - * @param cacheResult - * @param namespace - * @param key - * @param providerCache - * @return - */ - OnDemandResult buildOnDemandCache(Object cachedItem, String onDemandAgentType, CacheResult cacheResult, String namespace, String key) { - OnDemandResult result = new OnDemandResult(sourceAgentType: onDemandAgentType, cacheResult: cacheResult, evictions: [:].withDefault { _ -> [] }) - - if (!cachedItem && key) { - // Evict this cached item if it no longer exists. - result.evictions[namespace] << key - } - - result - } - - /** - * Resolves a key that contains a wildcard but still returns a unique result. - * @param providerCache - * @param namespace - * @param key - * @return - */ - String resolveKey(ProviderCache providerCache, String namespace, String key) { - String result = key - if (key.contains('*')) { - Collection identifiers = providerCache.filterIdentifiers(namespace, key) - if (identifiers && identifiers.size() == 1) { - result = identifiers.first() - } else { - throw new UnresolvableKeyException("Unable to resolve ${key}") - } - } - result - } - - /** - * Processes the on-demand cache by removing the record if the cache result is empty or - * by adding the cache record to ON_DEMAND namespace. - * @param cacheResult - * @param objectMapper - * @param metricsSupport - * @param providerCache - * @param namespace - * @param key - * @return - */ - void processOnDemandCache(CacheResult cacheResult, ObjectMapper objectMapper, OnDemandMetricsSupport metricsSupport, ProviderCache providerCache, String key) { - - if (cacheResult.cacheResults.values().flatten().isEmpty()) { - // Avoid writing an empty onDemand cache record (instead delete any that may have previously existed). - providerCache.evictDeletedItems(ON_DEMAND.ns, [key]) - } else { - metricsSupport.onDemandStore { - CacheData cacheData = new DefaultCacheData( - key, - TimeUnit.MINUTES.toSeconds(10) as Integer, // ttl - [ - cacheTime : System.currentTimeMillis(), - cacheResults : objectMapper.writeValueAsString(cacheResult.cacheResults), - processedCount: 0, - processedTime : null - ], - [:] - ) - - providerCache.putCacheData(ON_DEMAND.ns, cacheData) - } - } - } - - /** - * Method is a template for handling ON_DEMAND cache normal load data scenarios. It will - * check to see that ON_DEMAND data is evicted or keep based upon timestamp and process count. At the end of processing - * it will set processing time and increment count by 1. - * @param providerCache - * @param keys - * @param cacheResultClosure - * @return - */ - CacheResult buildLoadDataCache(ProviderCache providerCache, List keys, Closure cacheResultClosure) { - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(startTime: System.currentTimeMillis()) - - providerCache.getAll(ON_DEMAND.ns, keys).each { CacheData cacheData -> - // Ensure that we don't overwrite data that was inserted by the `handle` method while we retrieved the - // replication controllers. Furthermore, cache data that hasn't been processed needs to be updated in the ON_DEMAND - // cache, so don't evict data without a processedCount > 0. - if (cacheData.attributes.cacheTime < cacheResultBuilder.startTime && cacheData.attributes.processedCount > 0) { - cacheResultBuilder.onDemand.toEvict << cacheData.id - } else { - cacheResultBuilder.onDemand.toKeep[cacheData.id] = cacheData - } - } - - CacheResult result = cacheResultClosure.call(cacheResultBuilder) - - result.cacheResults[ON_DEMAND.ns].each { - it.attributes.processedTime = System.currentTimeMillis() - it.attributes.processedCount = (it.attributes.processedCount ?: 0) + 1 - } - - result - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/UnresolvableKeyException.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/UnresolvableKeyException.groovy deleted file mode 100644 index 69fccdd0164..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/UnresolvableKeyException.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.cache - -class UnresolvableKeyException extends RuntimeException { - - public UnresolvableKeyException(String message) { - super(message) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/BlockingStatusChecker.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/BlockingStatusChecker.groovy deleted file mode 100644 index dccb544c231..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/BlockingStatusChecker.groovy +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import groovy.transform.PackageScope -import lombok.SneakyThrows - -import java.util.concurrent.TimeUnit - -/** - * This class abstracts the algorithm to continually poll until a status is obtained or a timeout occurs. Openstack - * API requires that the load balancer be in an ACTIVE state for it to create associated relationships (i.e. listeners, pools, - * monitors). Each modification will cause the load balancer to go into a PENDING state and back to ACTIVE once the change - * has been made. Depending on your implementation, the timeout and polling intervals would need to be tweaked. - */ -class BlockingStatusChecker { - final long timeout - final long pollInterval - final StatusChecker statusChecker - - private BlockingStatusChecker(StatusChecker statusChecker, long timeout, long pollInterval) { - this.statusChecker = statusChecker - this.timeout = timeout - this.pollInterval = pollInterval - } - - /** - * Creation method. - * @param pollTimeout - defined in seconds - * @param pollInterval - defined in seconds - * @param s - * @return - */ - static BlockingStatusChecker from(long pollTimeout, long pollInterval, StatusChecker s) { - new BlockingStatusChecker(s, TimeUnit.SECONDS.toMillis(pollTimeout), TimeUnit.SECONDS.toMillis(pollInterval)) - } - - @PackageScope - @SneakyThrows // used for the Thread.sleep(pollInterval) - T execute(Closure closure) { - long startTime = System.currentTimeMillis() - T result - - while(true) { - result = closure.call() - if (statusChecker.isReady(result)) { - return result - } - if ((System.currentTimeMillis() - startTime) > timeout) { - throw new OpenstackProviderException('Operation timed out') - } - sleep(pollInterval) - } - } - - @PackageScope - void execute() { - execute ( { null } ) - } - - static interface StatusChecker { - boolean isReady(T input) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackClientProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackClientProvider.groovy deleted file mode 100644 index 2e42892341c..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackClientProvider.groovy +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -/** - * Provides access to the Openstack API. - */ -class OpenstackClientProvider { - - @Delegate - OpenstackIdentityProvider identityProvider - - @Delegate - OpenstackComputeV2Provider computeProvider - - @Delegate - OpenstackNetworkingProvider networkingProvider - - @Delegate - OpenstackOrchestrationProvider orchestrationProvider - - @Delegate - OpenstackImageProvider imageProvider - - @Delegate - OpenstackLoadBalancerProvider loadBalancerProvider - - @Delegate - OpenstackSwiftProvider swiftProvider - - public OpenstackClientProvider(OpenstackIdentityProvider identityProvider, - OpenstackComputeV2Provider computeProvider, - OpenstackNetworkingProvider networkingProvider, - OpenstackOrchestrationProvider orchestrationProvider, - OpenstackImageProvider imageProvider, - OpenstackLoadBalancerProvider loadBalancerProvider, - OpenstackSwiftProvider swiftProvider) { - this.identityProvider = identityProvider - this.computeProvider = computeProvider - this.networkingProvider = networkingProvider - this.orchestrationProvider = orchestrationProvider - this.imageProvider = imageProvider - this.loadBalancerProvider = loadBalancerProvider - this.swiftProvider = swiftProvider - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeProvider.groovy deleted file mode 100644 index 57f1763839e..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeProvider.groovy +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import org.openstack4j.model.compute.Address -import org.openstack4j.model.compute.Flavor -import org.openstack4j.model.compute.FloatingIP -import org.openstack4j.model.compute.IPProtocol -import org.openstack4j.model.compute.RebootType -import org.openstack4j.model.compute.SecGroupExtension -import org.openstack4j.model.compute.Server -import org.openstack4j.model.compute.ext.AvailabilityZone - -/** - * Methods for interacting with the current compute api. - */ -interface OpenstackComputeProvider { - - /** - * Requests a list of the availability zones in a given region. - * @param region - * @return - */ - List getZones(String region) - - /** - * Returns a list of instances in a given region. - * @param region - * @return - */ - List getInstances(String region) - - /** - * Returns all of the console output for a given server and region. - * @param region - * @param serverId - * @return - */ - String getConsoleOutput(String region, String serverId) - - /** - * Delete an instance. - * @param instanceId - * @return - */ - void deleteInstance(String region, String instanceId) - - /** - * Reboot an instance. - * @param region - * @param instanceId - * @param rebootType - */ - void rebootInstance(String region, String instanceId, RebootType rebootType) - - /** - * Reboot an instance ... Default to SOFT reboot. - * @param region - * @param instanceId - */ - void rebootInstance(String region, String instanceId) - - /** - * Get an unallocated IP from the network, or if none are found, try to create a new floating IP in the network. - * @param region - * @param networkName - * @return - */ - FloatingIP getOrCreateFloatingIp(final String region, final String networkName) - - /** - * List all floating ips in the region. - * @param region - * @return - */ - List listFloatingIps(final String region) - - /** - * Deletes a security group. - * - * @param region the region the security group is in - * @param securityGroupId id of the security group - */ - void deleteSecurityGroup(String region, String securityGroupId) - - /** - * Deletes a security group rule - * @param region the region to delete the rule from - * @param id id of the rule to delete - */ - void deleteSecurityGroupRule(String region, String id) - /** - * Creates a security group rule. - * - * If the rule is for TCP or UDP, the fromPort and toPort are used. For ICMP rules, the imcpType and icmpCode are used instead. - * - * @param region the region to create the rule in - * @param securityGroupId id of the security group which this rule belongs to - * @param protocol the protocol of the rule - * @param cidr the cidr for the rule - * @param remoteSecurityGroupId id of security group referenced by this rule - * @param fromPort the fromPort for the rule - * @param toPort the toPort for the rule - * @param icmpType the type of the ICMP control message - * @param icmpCode the code or subtype of the ICMP control message - * @return the created rule - */ - SecGroupExtension.Rule createSecurityGroupRule(String region, - String securityGroupId, - IPProtocol protocol, - String cidr, - String remoteSecurityGroupId, - Integer fromPort, - Integer toPort, - Integer icmpType, - Integer icmpCode) - - /** - * Updates a security group with the new name and description - * @param region the region the security group is in - * @param id the id of the security group to update - * @param name the new name for the security group - * @param description the new description for the security group - * @return the updated security group - */ - SecGroupExtension updateSecurityGroup(String region, String id, String name, String description) - - /** - * Creates a security group with the given name and description - * @return the created security group - */ - SecGroupExtension createSecurityGroup(String region, String name, String description) - - /** - * Returns the security group for the given id. - * @param region the region to look up the security group in - * @param id id of the security group. - */ - SecGroupExtension getSecurityGroup(String region, String id) - - /** - * Returns the list of all security groups for the given region - */ - List getSecurityGroups(String region) - - /** - * Get a compute server based on id. - * @param instanceId - * @return - */ - Server getServerInstance(String region, String instanceId) - - /** - * Returns a list of flavors by region. - * @param region - * @return - */ - List listFlavors(String region) - - /** - * Get the first v4 IP address from a server. - * @param server - * @return - */ - String getIpForInstance(String region, String instanceId) - - /** - * Get all addresses for a server instance. - * @param region - * @param instanceId - * @return - */ - List getIpsForInstance(String region, String instanceId) -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeV2Provider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeV2Provider.groovy deleted file mode 100644 index becdfc6f690..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeV2Provider.groovy +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import org.apache.commons.lang.StringUtils -import org.openstack4j.api.Builders -import org.openstack4j.model.compute.Address -import org.openstack4j.model.compute.Flavor -import org.openstack4j.model.compute.FloatingIP -import org.openstack4j.model.compute.IPProtocol -import org.openstack4j.model.compute.RebootType -import org.openstack4j.model.compute.SecGroupExtension -import org.openstack4j.model.compute.Server -import org.openstack4j.model.compute.ext.AvailabilityZone - -class OpenstackComputeV2Provider implements OpenstackComputeProvider, OpenstackRequestHandler, OpenstackIdentityAware { - - OpenstackIdentityProvider identityProvider - - OpenstackComputeV2Provider(OpenstackIdentityProvider identityProvider) { - this.identityProvider = identityProvider - } - - @Override - List getZones(String region) { - handleRequest { - getRegionClient(region).compute().zones().list() - } - } - - @Override - List getInstances(String region) { - handleRequest { - getRegionClient(region).compute().servers().list() - } - } - - @Override - String getConsoleOutput(String region, String serverId) { - handleRequest { - getRegionClient(region).compute().servers().getConsoleOutput(serverId, -1) - } - } - - @Override - void deleteInstance(String region, String instanceId) { - handleRequest { - getRegionClient(region).compute().servers().delete(instanceId) - } - } - - @Override - void rebootInstance(String region, String instanceId, RebootType rebootType) { - handleRequest { - getRegionClient(region).compute().servers().reboot(instanceId, rebootType) - } - } - - @Override - void rebootInstance(String region, String instanceId) { - handleRequest { - getRegionClient(region).compute().servers().reboot(instanceId, RebootType.SOFT) - } - } - - @Override - FloatingIP getOrCreateFloatingIp(final String region, final String networkName) { - handleRequest { - FloatingIP ip = listFloatingIps(region).find { !it.fixedIpAddress } - if (!ip) { - ip = client.useRegion(region).compute().floatingIps().allocateIP(networkName) - if (!ip) { - throw new OpenstackProviderException("Unable to allocate new IP address on network $networkName") - } - } - ip - } - } - - @Override - List listFloatingIps(final String region) { - handleRequest { - getRegionClient(region).compute().floatingIps().list() - } - } - - @Override - void deleteSecurityGroup(String region, String securityGroupId) { - handleRequest { - getRegionClient(region).compute().securityGroups().delete(securityGroupId) - } - } - - @Override - void deleteSecurityGroupRule(String region, String id) { - handleRequest { - client.useRegion(region).compute().securityGroups().deleteRule(id) - } - } - - @Override - SecGroupExtension.Rule createSecurityGroupRule(String region, - String securityGroupId, - IPProtocol protocol, - String cidr, - String remoteSecurityGroupId, - Integer fromPort, - Integer toPort, - Integer icmpType, - Integer icmpCode) { - - def builder = Builders.secGroupRule() - .parentGroupId(securityGroupId) - .protocol(protocol) - - /* - * Openstack/Openstack4J overload the port range to indicate ICMP type and code. This isn't immediately - * obvious and was found through testing and inferring things from the Openstack documentation. - */ - if (protocol == IPProtocol.ICMP) { - builder.range(icmpType, icmpCode) - } else { - builder.range(fromPort, toPort) - } - - if (remoteSecurityGroupId) { - builder.groupId(remoteSecurityGroupId) - } else { - builder.cidr(cidr) - } - - handleRequest { - client.useRegion(region).compute().securityGroups().createRule(builder.build()) - } - } - - @Override - SecGroupExtension updateSecurityGroup(String region, String id, String name, String description) { - handleRequest { - client.useRegion(region).compute().securityGroups().update(id, name, description) - } - } - - @Override - SecGroupExtension createSecurityGroup(String region, String name, String description) { - handleRequest { - client.useRegion(region).compute().securityGroups().create(name, description) - } - } - - @Override - SecGroupExtension getSecurityGroup(String region, String id) { - handleRequest { - client.useRegion(region).compute().securityGroups().get(id) - } - } - - @Override - List getSecurityGroups(String region) { - handleRequest { - getRegionClient(region).compute().securityGroups().list() - } - } - - @Override - Server getServerInstance(String region, String instanceId) { - handleRequest { - client.useRegion(region).compute().servers().get(instanceId) - } - } - - @Override - List listFlavors(String region) { - handleRequest { - this.getRegionClient(region).compute().flavors().list() - } - } - - @Override - String getIpForInstance(String region, String instanceId) { - Server server = getServerInstance(region, instanceId) - if (!server) { - throw new OpenstackResourceNotFoundException("unable to find instance: $instanceId in region: $region") - } - /* TODO - For now just get the first ipv4 address found. Openstack does not associate an instance id - with load balancer membership, just an ip address. An instance can have multiple IP addresses. - perhaps we just look for the first 192.* address found. It would also help to know the network name - from which to choose the IP list. I am not sure if we will have that. We can certainly add that into - the api later on when we know what data deck will have access to. - */ - String ip = server.addresses?.addresses?.collect { n -> n.value }?.find()?.find { it.version == 4 }?.addr - if (StringUtils.isEmpty(ip)) { - throw new OpenstackProviderException("Instance ${instanceId} has no IP address") - } - ip - } - - @Override - List getIpsForInstance(String region, String instanceId) { - Server server = getServerInstance(region, instanceId) - if (!server) { - throw new OpenstackResourceNotFoundException("unable to find instance: $instanceId in region: $region") - } - server.addresses?.addresses?.collect { n -> n.value }?.flatten() - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityAware.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityAware.groovy deleted file mode 100644 index 35c4e0d5314..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityAware.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import org.openstack4j.api.OSClient - -/** - * Allow sub-providers access to necessary identity methods. - * These are defined once here, instead of repeated in each sub-provider. - */ -trait OpenstackIdentityAware { - - OSClient getRegionClient(String region) { - identityProvider.getRegionClient(region) - } - - OSClient getClient() { - identityProvider.client - } - - abstract OpenstackIdentityProvider getIdentityProvider() - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityProvider.groovy deleted file mode 100644 index ef8b276841c..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityProvider.groovy +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import org.openstack4j.api.OSClient - -interface OpenstackIdentityProvider { - - OSClient buildClient() - - /** - * Returns a list of regions. - * @return - */ - List getAllRegions() - - /** - * Thread-safe way to get client. - * @return - */ - OSClient getClient() - - /** - * Get a new token id. - * @return - */ - String getTokenId() - - /** - * Check if a token is expired - * @return - */ - boolean isTokenExpired() - - /** - * Helper method to get region based thread-safe OS client. - * @param region - * @return - */ - OSClient getRegionClient(String region) - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityV3Provider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityV3Provider.groovy deleted file mode 100644 index 238587e0f07..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityV3Provider.groovy +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.api.OSClient -import org.openstack4j.core.transport.Config -import org.openstack4j.model.common.Identifier -import org.openstack4j.model.identity.v3.Token -import org.openstack4j.openstack.OSFactory - -class OpenstackIdentityV3Provider implements OpenstackIdentityProvider, OpenstackRequestHandler { - - OpenstackNamedAccountCredentials credentials - Token token = null - Config config = null - - OpenstackIdentityV3Provider(OpenstackNamedAccountCredentials credentials) { - this.credentials = credentials - this.config = credentials?.insecure ? Config.newConfig().withSSLVerificationDisabled() : Config.newConfig() - } - - @Override - OSClient buildClient() { - handleRequest { - OSFactory.builderV3() - .withConfig(config) - .endpoint(credentials.authUrl) - .credentials(credentials.username, credentials.password, Identifier.byName(credentials.domainName)) - .scopeToProject(Identifier.byName(credentials.projectName), Identifier.byName(credentials.domainName)) - .authenticate() - } - } - - @Override - OSClient getClient() { - if (!token || tokenExpired) { - synchronized (this) { - if (!token || tokenExpired) { - token = buildClient().token - } - } - } - OSFactory.clientFromToken(token, config) - } - - @Override - String getTokenId() { - token?.id - } - - @Override - boolean isTokenExpired() { - long now = System.currentTimeMillis() - long expires = token.expires.time - now >= expires - } - - /** - * Returns configuration based regions if provided, otherwise will use the - * API to look up regions and return a list. - * @return - */ - @Override - List getAllRegions() { - credentials.regions ?: handleRequest { - client.identity().regions().list()?.collect { it.id } - } - } - - @Override - OSClient getRegionClient(String region) { - client.useRegion(region) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageProvider.groovy deleted file mode 100644 index 58fab60d70f..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageProvider.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage - -/** - * - */ -interface OpenstackImageProvider { - - /** - * Returns a list of images. - * @param region - * @param filters - * @return - */ - List listImages(String region, Map filters) - - /** - * Returns a list of images. - * @param region - * @return - */ - List listImages(String region) - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageV2Provider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageV2Provider.groovy deleted file mode 100644 index e898cd75276..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageV2Provider.groovy +++ /dev/null @@ -1,43 +0,0 @@ -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage -import org.openstack4j.model.image.v2.Image - -class OpenstackImageV2Provider implements OpenstackImageProvider, OpenstackRequestHandler, OpenstackIdentityAware { - - OpenstackIdentityProvider identityProvider; - - OpenstackImageV2Provider(OpenstackIdentityProvider identityProvider) { - this.identityProvider = identityProvider; - } - - @Override - List listImages(String region, Map filters) { - handleRequest { - getRegionClient(region).imagesV2().list(filters)?.collect { buildImage(it, region) } - } - } - - @Override - List listImages(String region) { - handleRequest { - getRegionClient(region).imagesV2().list()?.collect { buildImage(it, region) } - } - } - - static OpenstackImage buildImage(Image image, String region) { - def properties = new HashMap() - image.properties.each { properties[it.key] = it.value.toString() } - OpenstackImage.builder() - .id(image.id) - .status(image.status?.value()) - .size(image.size) - .location(image.locations?.get(0)?.toString()) - .createdAt(image.createdAt?.time) - .updatedAt(image.updatedAt?.time) - .properties(properties) - .name(image.name) - .region(region) - .build() - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadBalancerProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadBalancerProvider.groovy deleted file mode 100644 index a3f88ae77bb..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadBalancerProvider.groovy +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.network.ext.HealthMonitorV2 -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2StatusTree -import org.openstack4j.model.network.ext.MemberV2 - -/** - * Operations associated to load balancer and relevant building blocks. - */ -interface OpenstackLoadBalancerProvider { - - /** - * Get all load balancers in a region. - * @param region - * @return - */ - List getLoadBalancers(final String region) - - /** - * Creates new openstack load balancer. - * @param region - * @param name - * @param description - * @param subnetId - * @return - */ - LoadBalancerV2 createLoadBalancer(final String region, final String name, final String description, final String subnetId) - - /** - * Retreives load balancer by id. - * @param region - * @param id - * @return - */ - LoadBalancerV2 getLoadBalancer(final String region, final String id) - - /** - * Removes load balancer by id. - * @param region - * @param id - * @return - */ - ActionResponse deleteLoadBalancer(final String region, final String id) - - /** - * Retreives load balancer by name. - * @param region - * @param id - * @return - */ - LoadBalancerV2 getLoadBalancerByName(final String region, final String id) - - /** - * Get all load balancer listeners in a region. - * @param region - * @return - */ - List getListeners(final String region) - - /** - * Creates listener associated to an existing load balancer. - * @param region - * @param name - * @param externalProtocol - * @param externalPort - * @param description - * @param loadBalancerId - * @return - */ - ListenerV2 createListener(final String region, final String name, final String externalProtocol, final Integer externalPort, final String description, final String loadBalancerId) - - /** - * Retreives listener by id. - * @param region - * @param id - * @return - */ - ListenerV2 getListener(final String region, final String id) - - /** - * Removes listener by id. - * @param region - * @param id - * @return - */ - ActionResponse deleteListener(final String region, final String id) - - /** - * Get a list of all load balancer pools in a region. - * @param region - * @return - */ - List getPools(final String region) - - /** - * Creates load balancer pool for a given listener. - * @param region - * @param name - * @param internalProtocol - * @param algorithm - * @param listenerId - * @return - */ - LbPoolV2 createPool(final String region, final String name, final String internalProtocol, final String algorithm, final String listenerId) - - /** - * Retreives pool by id. - * @param region - * @param id - * @return - */ - LbPoolV2 getPool(final String region, final String id) - - /** - * Updates pool by id. - * @param region - * @param id - * @param method - * @return - */ - LbPoolV2 updatePool(final String region, final String id, final String method) - - /** - * Removes pool by id. - * @param region - * @param lbPoolId - * @return - */ - ActionResponse deletePool(final String region, final String lbPoolId) - - /** - * List all health monitors in a region. - * @param region - * @return - */ - List getHealthMonitors(final String region) - - /** - * Creates monitor for an existing pool. - * @param region - * @param poolId - * @param healthMonitor - * @return - */ - HealthMonitorV2 createMonitor(final String region, final String poolId, final HealthMonitor healthMonitor) - - /** - * Retreives monitor by id. - * @param region - * @param id - * @return - */ - HealthMonitorV2 getMonitor(final String region, final String id) - - /** - * Updates monitor by id. - * @param region - * @param id - * @param healthMonitor - * @return - */ - HealthMonitorV2 updateMonitor(final String region, final String id, final HealthMonitor healthMonitor) - - /** - * Removes monitor by id. - * @param region - * @param id - * @return - */ - ActionResponse deleteMonitor(final String region, final String id) - - /** - * Get port from load balancer listener description. Openstack load balancers have no native concept of internal port, - * so we store in the description field of the load balancer. - * - * This may be changed in a future version. - * - * @param region - * @param listenerId - * @return - */ - Integer getInternalLoadBalancerPort(String region, String listenerId) - - /** - * Get the member id for instance - * @param region - * @param ip - * @param lbPoolId - */ - String getMemberIdForInstance(String region, String ip, String lbPoolId) - - /** - * Add a member to a pool. - * @param region - * @param ip - * @param lbPoolId - * @param subnetId - * @param internalPort - * @param weight - */ - MemberV2 addMemberToLoadBalancerPool(String region, String ip, String lbPoolId, String subnetId, Integer internalPort, int weight) - - /** - * Remove a member from a pool. - * @param region - * @param lbPoolId - * @param memberId - * @return - */ - ActionResponse removeMemberFromLoadBalancerPool(String region, String lbPoolId, String memberId) - - - /** - * Returns current status of the entire load balancer tree (lb, listeners, pool, etc). - * @param region - * @param id - * @return - */ - LoadBalancerV2StatusTree getLoadBalancerStatusTree(final String region, final String id) - - /** - * Updates a pool member status. - * @param region - * @param poolId - * @param memberId - * @param status true means the pool member is enabled, false means it's disabled and can receive no traffic - */ - MemberV2 updatePoolMemberStatus(final String region, final String poolId, final String memberId, final boolean status) -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadBalancerV2Provider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadBalancerV2Provider.groovy deleted file mode 100644 index 737532349bf..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadBalancerV2Provider.groovy +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor -import com.netflix.spinnaker.clouddriver.openstack.domain.LoadBalancerResolver -import org.apache.commons.lang.StringUtils -import org.openstack4j.api.Builders -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.network.ext.HealthMonitorType -import org.openstack4j.model.network.ext.HealthMonitorV2 -import org.openstack4j.model.network.ext.LbMethod -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.ListenerProtocol -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2StatusTree -import org.openstack4j.model.network.ext.MemberV2 -import org.openstack4j.model.network.ext.MemberV2Update -import org.openstack4j.model.network.ext.Protocol - -class OpenstackLoadBalancerV2Provider implements OpenstackLoadBalancerProvider, OpenstackRequestHandler, OpenstackIdentityAware, LoadBalancerResolver { - - final int minPort = 1 - final int maxPort = (1 << 16) - 1 - - OpenstackIdentityProvider identityProvider - - OpenstackLoadBalancerV2Provider(OpenstackIdentityProvider identityProvider) { - this.identityProvider = identityProvider - } - - @Override - List getLoadBalancers(final String region) { - handleRequest { - getRegionClient(region).networking().lbaasV2().loadbalancer().list() - } - } - - @Override - LoadBalancerV2 createLoadBalancer(final String region, final String name, final String description, final String subnetId) { - handleRequest { - getRegionClient(region).networking().lbaasV2().loadbalancer().create(Builders.loadbalancerV2() - .name(name) - .description(description) - .subnetId(subnetId) - .build()) - } - } - - @Override - LoadBalancerV2 getLoadBalancer(final String region, final String id) { - handleRequest { - getRegionClient(region).networking().lbaasV2().loadbalancer().get(id) - } - } - - @Override - ActionResponse deleteLoadBalancer(String region, String id) { - handleRequest { - getRegionClient(region).networking().lbaasV2().loadbalancer().delete(id) - } - } - - @Override - LoadBalancerV2 getLoadBalancerByName(final String region, final String name) { - handleRequest { - List lbs = getRegionClient(region).networking().lbaasV2().loadbalancer().list(['name':name]) - lbs.size() > 0 ? lbs.first() : null - } - } - - @Override - List getListeners(final String region) { - handleRequest { - getRegionClient(region).networking().lbaasV2().listener().list() - } - } - - @Override - ListenerV2 createListener(final String region, final String name, final String externalProtocol, final Integer externalPort, final String description, final String loadBalancerId) { - handleRequest { - getRegionClient(region).networking().lbaasV2().listener().create(Builders.listenerV2() - .name(name) - .description(description) - .loadBalancerId(loadBalancerId) - .protocolPort(externalPort) - .protocol(ListenerProtocol.forValue(externalProtocol)) - .adminStateUp(Boolean.TRUE) - .build()) - } - } - - @Override - ListenerV2 getListener(final String region, final String id) { - ListenerV2 result = handleRequest { - getRegionClient(region).networking().lbaasV2().listener().get(id) - } - - if (!result) { - throw new OpenstackResourceNotFoundException("Unable to find listener ${id} in ${region}") - } - result - } - - @Override - ActionResponse deleteListener(final String region, final String id) { - handleRequest { - getRegionClient(region).networking().lbaasV2().listener().delete(id) - } - } - - @Override - List getPools(final String region) { - handleRequest { - getRegionClient(region).networking().lbaasV2().lbPool().list() - } - } - - @Override - LbPoolV2 createPool(final String region, final String name, final String internalProtocol, final String method, final String listenerId) { - handleRequest { - getRegionClient(region).networking().lbaasV2().lbPool().create(Builders.lbpoolV2() - .name(name) - .lbMethod(LbMethod.forValue(method)) - .listenerId(listenerId) - .protocol(Protocol.forValue(internalProtocol)) - .adminStateUp(Boolean.TRUE) - .build()) - } - } - - @Override - LbPoolV2 getPool(final String region, final String id) { - LbPoolV2 result = handleRequest { - getRegionClient(region).networking().lbaasV2().lbPool().get(id) - } - - if (!result) { - throw new OpenstackResourceNotFoundException("Unable to find pool ${id} in ${region}") - } - result - } - - @Override - LbPoolV2 updatePool(final String region, final String id, final String method) { - handleRequest { - getRegionClient(region).networking().lbaasV2().lbPool().update(id, Builders.lbPoolV2Update() - .lbMethod(LbMethod.forValue(method)) - .adminStateUp(Boolean.TRUE) - .build()) - } - } - - @Override - ActionResponse deletePool(final String region, final String id) { - handleRequest { - getRegionClient(region).networking().lbaasV2().lbPool().delete(id) - } - } - - @Override - List getHealthMonitors(final String region) { - handleRequest { - getRegionClient(region).networking().lbaasV2().healthMonitor().list() - } - } - - @Override - ActionResponse deleteMonitor(final String region, final String id) { - handleRequest { - getRegionClient(region).networking().lbaasV2().healthMonitor().delete(id) - } - } - - @Override - HealthMonitorV2 getMonitor(final String region, final String id) { - handleRequest { - getRegionClient(region).networking().lbaasV2().healthMonitor().get(id) - } - } - - @Override - HealthMonitorV2 createMonitor(final String region, final String poolId, final HealthMonitor healthMonitor) { - handleRequest { - getRegionClient(region).networking().lbaasV2().healthMonitor().create(Builders.healthmonitorV2() - .poolId(poolId) - .type(HealthMonitorType.forValue(healthMonitor.type?.name())) - .delay(healthMonitor.delay) - .expectedCodes(healthMonitor.expectedCodes?.join(',')) - .httpMethod(healthMonitor.httpMethod) - .maxRetries(healthMonitor.maxRetries) - .timeout(healthMonitor.timeout) - .urlPath(healthMonitor.url) - .adminStateUp(Boolean.TRUE) - .build()) - } - } - - @Override - HealthMonitorV2 updateMonitor(final String region, final String id, final HealthMonitor healthMonitor) { - handleRequest { - getRegionClient(region).networking().lbaasV2().healthMonitor().update(id, Builders.healthMonitorV2Update() - .delay(healthMonitor.delay) - .expectedCodes(healthMonitor.expectedCodes?.join(',')) - .httpMethod(healthMonitor.httpMethod) - .maxRetries(healthMonitor.maxRetries) - .timeout(healthMonitor.timeout) - .urlPath(healthMonitor.url) - .adminStateUp(Boolean.TRUE) - .build()) - } - } - - @Override - Integer getInternalLoadBalancerPort(String region, String listenerId) { - handleRequest { - ListenerV2 listener = getListener(region, listenerId) - Integer internalPort = parseListenerKey(listener.description)?.get('internalPort')?.toInteger() - if (!internalPort || internalPort < minPort || internalPort > maxPort) { - throw new OpenstackProviderException("Internal pool port $internalPort is outside of the valid range.") - } - internalPort - } - } - - @Override - String getMemberIdForInstance(String region, String ip, String lbPoolId) { - String memberId = handleRequest { - client.useRegion(region).networking().lbaasV2().lbPool().listMembers(lbPoolId)?.find { m -> m.address == ip }?.id - } - if (StringUtils.isEmpty(memberId)) { - throw new OpenstackProviderException("Instance with ip ${ip} is not associated with any load balancer memberships") - } - memberId - } - - @Override - MemberV2 addMemberToLoadBalancerPool(String region, String ip, String lbPoolId, String subnetId, Integer internalPort, int weight) { - MemberV2 member = handleRequest { - client.useRegion(region).networking().lbaasV2().lbPool().createMember( - lbPoolId, - Builders.memberV2().address(ip).subnetId(subnetId).protocolPort(internalPort).weight(weight).build() - ) - } - if (!member) { - throw new OpenstackProviderException("Unable to add ip $ip to load balancer ${lbPoolId}") - } - member - } - - @Override - ActionResponse removeMemberFromLoadBalancerPool(String region, String lbPoolId, String memberId) { - handleRequest { - client.useRegion(region).networking().lbaasV2().lbPool().deleteMember(lbPoolId, memberId) - } - } - - @Override - LoadBalancerV2StatusTree getLoadBalancerStatusTree(final String region, final String id) { - handleRequest { - getRegionClient(region).networking().lbaasV2().loadbalancer().statusTree(id) - } - } - - @Override - MemberV2 updatePoolMemberStatus(final String region, final String poolId, final String memberId, final boolean status) { - handleRequest { - MemberV2Update update = Builders.memberV2Update().adminStateUp(status).build() - getRegionClient(region).networking().lbaasV2().lbPool().updateMember(poolId, memberId, update) - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingProvider.groovy deleted file mode 100644 index 156bd43813e..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingProvider.groovy +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import org.openstack4j.model.network.NetFloatingIP -import org.openstack4j.model.network.Network -import org.openstack4j.model.network.Port -import org.openstack4j.model.network.Subnet - -interface OpenstackNetworkingProvider { - -/** - * Get a network from the network id. - * @param region - * @param networkId - * @return - */ - Network getNetwork(final String region, final String networkId) - - /** - * Associate already known floating IP address to VIP in specified region. - * @param region - * @param floatingIpId - * @param vipId - * @return - */ - NetFloatingIP associateFloatingIpToVip(final String region, final String floatingIpId, final String vipId) - - /** - * Remove port associated with floating IP. - * @param region - * @param floatingIpId - * @return - */ - NetFloatingIP disassociateFloatingIp(final String region, final String floatingIpId) - - /** - * List all ports in the region. - * @param region - * @return - */ - List listPorts(final String region) - - /** - * List the available networks in a region. These will be both internal and external networks. - * @param region - * @return - */ - List listNetworks(String region) - - /** - * Returns a list of available subnets by region. - * @param region - * @return - */ - List listSubnets(String region) - - /** - * Get the subnet in a region. - * @param region - * @param subnetId - * @return boolean - */ - Subnet getSubnet(final String region, final String subnetId) - - /** - * Internal helper to look up port associated to vip. - * @param region - * @param vipId - * @return - */ - Port getPortForVip(final String region, final String vipId) - - /** - * Helper to get the floating ip bound to a port. - * @param region - * @param portId - * @return - */ - NetFloatingIP getFloatingIpForPort(final String region, final String portId) - - /** - * List network floating ips. - * @param region - * @return - */ - List listNetFloatingIps(final String region) - - /** - * Retreives port by id. - * @param region - * @param portId - * @return - */ - Port getPort(final String region, final String portId) - - /** - * Updates port by id. - * @param region - * @param portId - * @param securityGroups - * @return - */ - Port updatePort(final String region, final String portId, final List securityGroups) - - /** - * Associates floating ip address to port. - * @param region - * @param floatingIpId - * @param portId - * @return - */ - NetFloatingIP associateFloatingIpToPort(final String region, final String floatingIpId, final String portId) - - /** - * Disassociates floating ip address from port. - * @param region - * @param floatingIpId - * @return - */ - NetFloatingIP disassociateFloatingIpFromPort(final String region, final String floatingIpId) -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingV2Provider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingV2Provider.groovy deleted file mode 100644 index 04aee1144e6..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingV2Provider.groovy +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.domain.LoadBalancerResolver -import org.openstack4j.api.Builders -import org.openstack4j.model.network.NetFloatingIP -import org.openstack4j.model.network.Network -import org.openstack4j.model.network.Port -import org.openstack4j.model.network.Subnet -import org.openstack4j.model.network.builder.PortBuilder - -class OpenstackNetworkingV2Provider implements OpenstackNetworkingProvider, OpenstackRequestHandler, OpenstackIdentityAware, LoadBalancerResolver { - - OpenstackIdentityProvider identityProvider - - OpenstackNetworkingV2Provider(OpenstackIdentityProvider identityProvider) { - this.identityProvider = identityProvider - } - - @Override - Network getNetwork(final String region, final String networkId) { - handleRequest { - getRegionClient(region).networking().network().list().find { it.id == networkId } - } - } - - @Override - NetFloatingIP associateFloatingIpToVip(final String region, final String floatingIpId, final String vipId) { - Port port = getPortForVip(region, vipId) - if (!port) { - throw new OpenstackProviderException("Unable to find port for vip ${vipId}") - } else { - handleRequest { - getRegionClient(region).networking().floatingip().associateToPort(floatingIpId, port.id) - } - } - } - - @Override - NetFloatingIP disassociateFloatingIp(final String region, final String floatingIpId) { - handleRequest { - getRegionClient(region).networking().floatingip().disassociateFromPort(floatingIpId) - } - } - - @Override - List listPorts(final String region) { - handleRequest { - getRegionClient(region).networking().port().list() - } - } - - @Override - List listNetworks(String region) { - handleRequest { - getRegionClient(region).networking().network().list() - } - } - - @Override - List listSubnets(String region) { - handleRequest { - getRegionClient(region).networking().subnet().list() - } - } - - @Override - Subnet getSubnet(final String region, final String subnetId) { - handleRequest { - getRegionClient(region).networking().subnet().get(subnetId) - } - } - - @Override - Port getPortForVip(final String region, final String vipId) { - handleRequest { - getRegionClient(region).networking().port().list()?.find { it.name == "vip-${vipId}".toString() } - } - } - - @Override - NetFloatingIP getFloatingIpForPort(final String region, final String portId) { - handleRequest { - listNetFloatingIps(region)?.find { it.portId == portId } - } - } - - @Override - List listNetFloatingIps(final String region) { - handleRequest { - getRegionClient(region).networking().floatingip().list() - } - } - - @Override - Port getPort(final String region, final String portId) { - handleRequest { - getRegionClient(region).networking().port().get(portId) - } - } - - @Override - Port updatePort(final String region, final String portId, final List securityGroups) { - handleRequest { - // Builder doesn't take in list of security groups and doesn't allow you to set the ID so, adding some ugly code :) - PortBuilder portBuilder = Builders.port() - securityGroups.each { portBuilder = portBuilder.securityGroup(it) } - Port changedPort = portBuilder.build() - changedPort.setId(portId) - getRegionClient(region).networking().port().update(changedPort) - } - } - - @Override - NetFloatingIP associateFloatingIpToPort(final String region, final String floatingIpId, final String portId) { - handleRequest { - getRegionClient(region).networking().floatingip().associateToPort(floatingIpId, portId) - } - } - - @Override - NetFloatingIP disassociateFloatingIpFromPort(final String region, final String floatingIpId) { - handleRequest { - getRegionClient(region).networking().floatingip().disassociateFromPort(floatingIpId) - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationProvider.groovy deleted file mode 100644 index 1978b2c3fb4..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationProvider.groovy +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import org.openstack4j.model.heat.Resource -import org.openstack4j.model.heat.ResourceHealth -import org.openstack4j.model.heat.Stack - -interface OpenstackOrchestrationProvider { - - /** - * TODO: Handle heat autoscaling migration to senlin in versions > Mitaka - * Create a Spinnaker Server Group (Openstack Heat Stack). - * @param region the openstack region - * @param stackName the openstack stack name - * @param template the main heat template - * @param subtemplate a map of subtemplate files references by the template - * @param parameters the parameters substituted into the heat template - * @param disableRollback if true, resources are not removed upon stack create failure - * @param timeoutMins stack create timeout, after which the operation will fail - * @param tags tags to pass to stack - */ - void deploy(String region, String stackName, String template, Map subtemplate, - ServerGroupParameters parameters, boolean disableRollback, Long timeoutMins, List tags) - - /** - * TODO: Handle heat autoscaling migration to senlin in versions > Mitaka - * Updates a Spinnaker Server Group (Openstack Heat Stack). - * @param region the openstack region - * @param stackName the openstack stack name - * @param stackId the openstack stack id - * @param template the main heat template - * @param subtemplate a map of subtemplate files references by the template - * @param parameters the parameters substituted into the heat template - * @param tags the tags to pass to the stack. These replace existing tags. - */ - void updateStack(String region, String stackName, String stackId, String template, Map subtemplate, - ServerGroupParameters parameters, List tags) - - /** - * Get a heat template from an existing Openstack Heat Stack - * @param region - * @param stackName - * @param stackId - * @return - */ - String getHeatTemplate(String region, String stackName, String stackId) - - /** - * List existing heat stacks (server groups) - * @return List < ? extends Stack > stacks - */ - List listStacks(String region) - - /** - * List stack associated to these load balancers. - * @param region - * @param loadBalancerIds - * @return - */ - List listStacksWithLoadBalancers(String region, List loadBalancerIds) - - /** - * Get a stack in a specific region. - * @param stackName - * @return - */ - Stack getStack(String region, String stackName) - - /** - * Delete a stack in a specific region. - * @param stack - */ - void destroy(String region, Stack stack) - - /** - * Get all instance ids of server resources associated with a stack. - * @param region - * @param stackName - */ - List getInstanceIdsForStack(String region, String stackName) - - /** - * Get the resource associated to an instance. - * @param region - * @param stackName - * @param instanceId - * @return - */ - Resource getInstanceResourceForStack(String region, Stack stack, String instanceId) - - /** - * Get the autoscaling resource for a stack. - * @param region - * @param stack - * @return - */ - Resource getAsgResourceForStack(String region, Stack stack) - - /** - * - * @param region - * @param stackName - * @param stackId - * @param resource - * @param resourceHealth - */ - void markStackResourceUnhealthy(String region, String stackName, String stackId, String resource, ResourceHealth resourceHealth) - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationV1Provider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationV1Provider.groovy deleted file mode 100644 index 894ca0f6b09..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationV1Provider.groovy +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.ServerGroupConstants -import org.openstack4j.api.Builders -import org.openstack4j.model.heat.Resource -import org.openstack4j.model.heat.ResourceHealth -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.heat.StackCreate -import org.openstack4j.model.heat.StackUpdate - -class OpenstackOrchestrationV1Provider implements OpenstackOrchestrationProvider, OpenstackRequestHandler, OpenstackIdentityAware { - - OpenstackIdentityProvider identityProvider - - OpenstackOrchestrationV1Provider(OpenstackIdentityProvider identityProvider) { - this.identityProvider = identityProvider - } - - @Override - void deploy(String region, String stackName, String template, Map subtemplate, - ServerGroupParameters parameters, boolean disableRollback, Long timeoutMins, List tags) { - handleRequest { - Map params = parameters.toParamsMap() - StackCreate create = Builders.stack() - .name(stackName) - .template(template) - .parameters(params) - .files(subtemplate) - .disableRollback(disableRollback) - .timeoutMins(timeoutMins) - .tags(tags ? tags.join(",") : null) - .build() - getRegionClient(region).heat().stacks().create(create) - } - } - - @Override - void updateStack(String region, String stackName, String stackId, String template, Map subtemplate, - ServerGroupParameters parameters, List tags) { - handleRequest { - Map params = parameters.toParamsMap() - StackUpdate update = Builders.stackUpdate() - .template(template) - .files(subtemplate) - .parameters(params) - .tags(tags ? tags.join(",") : null) - .build() - getRegionClient(region).heat().stacks().update(stackName, stackId, update) - } - } - - @Override - String getHeatTemplate(String region, String stackName, String stackId) { - handleRequest { - client.useRegion(region).heat().templates().getTemplateAsString(stackName, stackId) - } - } - - @Override - List listStacks(String region) { - handleRequest { - getRegionClient(region).heat().stacks().list() - } - } - - @Override - List listStacksWithLoadBalancers(String region, List loadBalancerIds) { - handleRequest { - getRegionClient(region).heat().stacks().list([tags:loadBalancerIds.join(",")]) - } - } - - @Override - Stack getStack(String region, String stackName) { - handleRequest { - getRegionClient(region).heat().stacks().getStackByName(stackName) - } - } - - @Override - void destroy(String region, Stack stack) { - handleRequest { - getRegionClient(region).heat().stacks().delete(stack.name, stack.id) - } - } - - @Override - List getInstanceIdsForStack(String region, String stackName) { - List resources = handleRequest { - //this means it has the ability to list resources 10 levels deep in the heat template hierarchy. - //provide a depth of 10 for insurance - the default template has a depth of 4. - getRegionClient(region).heat().resources().list(stackName, 10) - } - resources?.findResults { - it.type == ServerGroupConstants.HEAT_SERVER_RESOURCE ? it.physicalResourceId : null - } - } - - @Override - Resource getInstanceResourceForStack(String region, Stack stack, String instanceName) { - List resources = handleRequest { - //this means it has the ability to list resources 10 levels deep in the heat template hierarchy. - //provide a depth of 10 for insurance - the default template has a depth of 4. - getRegionClient(region).heat().resources().list(stack.name, 10) - } - List parts = instanceName.split("-")?.toList() - if (parts && parts.size() > 2) { - String instanceResourceId = parts.get(2) - resources?.find { - it.type == ServerGroupParameters.resolveResourceFilename(stack.parameters) && it.resourceName == instanceResourceId - } - } else { - null - } - } - - @Override - Resource getAsgResourceForStack(String region, Stack stack) { - List resources = handleRequest { - //this means it has the ability to list resources 10 levels deep in the heat template hierarchy. - //provide a depth of 10 for insurance - the default template has a depth of 4. - getRegionClient(region).heat().resources().list(stack.name, 10) - } - resources?.find { - it.type == ServerGroupConstants.HEAT_ASG_RESOURCE && it.resourceName == ServerGroupConstants.SERVERGROUP_RESOURCE_NAME - } - } - - @Override - void markStackResourceUnhealthy(String region, String stackName, String stackId, String resource, ResourceHealth resourceHealth) { - handleRequest { - getRegionClient(region).heat().resources().markUnhealthy(stackName, stackId, resource, resourceHealth) - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackProviderFactory.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackProviderFactory.groovy deleted file mode 100644 index 2648e8b042f..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackProviderFactory.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials - -/** - * Builds the appropriate {@link OpenstackClientProvider} based on the configuration. - */ -class OpenstackProviderFactory { - - static OpenstackClientProvider createProvider(OpenstackNamedAccountCredentials credentials) { - OpenstackIdentityProvider identityProvider = new OpenstackIdentityV3Provider(credentials) - OpenstackComputeV2Provider computeProvider = new OpenstackComputeV2Provider(identityProvider) - OpenstackNetworkingProvider networkingProvider = new OpenstackNetworkingV2Provider(identityProvider) - OpenstackOrchestrationProvider orchestrationProvider = new OpenstackOrchestrationV1Provider(identityProvider) - OpenstackImageProvider imageProvider = new OpenstackImageV2Provider(identityProvider) - OpenstackLoadBalancerProvider loadBalancerProvider = new OpenstackLoadBalancerV2Provider(identityProvider) - OpenstackSwiftProvider swiftProvider = new OpenstackSwiftV1Provider(identityProvider) - new OpenstackClientProvider(identityProvider, computeProvider, networkingProvider, orchestrationProvider, imageProvider, loadBalancerProvider, swiftProvider) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackRequestHandler.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackRequestHandler.groovy deleted file mode 100644 index ffc990fa2c7..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackRequestHandler.groovy +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import org.openstack4j.model.common.ActionResponse - -import java.lang.reflect.UndeclaredThrowableException - -trait OpenstackRequestHandler { - - /** - * Handler for an Openstack4J request with error common handling. - * @param closure makes the needed Openstack4J request - * @return returns the result from the closure - */ - static T handleRequest(Closure closure) { - T result - try { - result = closure() - } catch (UndeclaredThrowableException e) { - throw new OpenstackProviderException('Unable to process request', e.cause) - } catch (OpenstackProviderException e) { //allows nested calls to handleRequest - throw e - } catch (Exception e) { - throw new OpenstackProviderException('Unable to process request', e) - } - if (result instanceof ActionResponse && !result.isSuccess()) { - throw new OpenstackProviderException(result) - } - result - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftProvider.groovy deleted file mode 100644 index 8a91d4eb7f3..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftProvider.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -/** - * Methods for interacting with the Openstack Swift API. - */ -interface OpenstackSwiftProvider { - - /** - * Returns the content of a Swift object. - * @param container the container that holds the object - * @param name the name the object within the container - * @return contents of the object - */ - String readSwiftObject(final String region, final String container, final String name) -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftV1Provider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftV1Provider.groovy deleted file mode 100644 index 99a50cdb5e1..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftV1Provider.groovy +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import org.apache.http.protocol.HTTP -import org.openstack4j.core.transport.HttpResponse -import org.openstack4j.model.common.DLPayload -import org.springframework.http.HttpStatus - -class OpenstackSwiftV1Provider implements OpenstackSwiftProvider, OpenstackRequestHandler, OpenstackIdentityAware { - - OpenstackIdentityProvider identityProvider - - OpenstackSwiftV1Provider(OpenstackIdentityProvider identityProvider) { - this.identityProvider = identityProvider - } - - @Override - String readSwiftObject(String region, String container, String name) { - handleRequest { - DLPayload payload = getRegionClient(region).objectStorage().objects().download(container, name) - HttpResponse response = payload?.httpResponse - if (!response) { - throw new OpenstackResourceNotFoundException("Unable to find Swift object ${container}/${name} in region ${region}") - } - - // Testing against HTTP OK is a bit limited, but we want to actually read the response - if (response.status != HttpStatus.OK.value()) { - throw new OpenstackProviderException("Failed to read the Swift object ${container}/${name} in region ${region}; status=${response.status}") - } - - // TODO consider checking content type before reading the response to ensure it is text - return response.getEntity(String) - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/config/OpenstackConfigurationProperties.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/config/OpenstackConfigurationProperties.groovy deleted file mode 100644 index 37789e421ab..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/config/OpenstackConfigurationProperties.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.config - -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import groovy.transform.ToString - -import java.util.concurrent.TimeUnit - -@ToString(includeNames = true) -class OpenstackConfigurationProperties { - - @ToString(includeNames = true, excludes = "password") - static class ManagedAccount { - String name - String environment - String accountType - String username - String password - String projectName - String domainName - String authUrl - List regions - Boolean insecure - String heatTemplatePath - LbaasConfig lbaas = new LbaasConfig() - StackConfig stack = new StackConfig() - ConsulConfig consul - String userDataFile - } - - static class LbaasConfig { - int pollTimeout = 60 // seconds - int pollInterval = 5 // seconds - } - - static class StackConfig { - int pollTimeout = TimeUnit.MINUTES.toSeconds(10).toInteger() - int pollInterval = 5 // seconds - } - - List accounts = [] -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/controllers/OpenstackImageLookupController.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/controllers/OpenstackImageLookupController.groovy deleted file mode 100644 index a9f4e3036e2..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/controllers/OpenstackImageLookupController.groovy +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.controllers - -import com.netflix.spinnaker.cats.mem.InMemoryCache -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage -import com.netflix.spinnaker.clouddriver.openstack.provider.ImageProvider -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.web.bind.annotation.RequestMapping -import org.springframework.web.bind.annotation.RequestMethod -import org.springframework.web.bind.annotation.RequestParam -import org.springframework.web.bind.annotation.RestController - -import java.util.regex.Pattern -import java.util.stream.Collectors - -@Slf4j -@RestController -@RequestMapping("/openstack/images") -class OpenstackImageLookupController { - - final ImageProvider imageProvider - - @Autowired - OpenstackImageLookupController(final ImageProvider imageProvider) { - this.imageProvider = imageProvider - } - - @RequestMapping(value = '/find', method = RequestMethod.GET) - Set find(@RequestParam(required = false) String account, @RequestParam(required = false) String q, @RequestParam(required = false) String region) { - Set result - Map> imageMap = this.imageProvider.listImagesByAccount() - if (!imageMap) { - result = Collections.emptySet() - } else { - if (account) { - result = imageMap.get(account) - } else { - result = imageMap.entrySet().stream() - .map{it.value} - .flatMap{it.stream()} - .collect(Collectors.toSet()) - .sort { OpenstackImage a, OpenstackImage b -> a.name <=> b.name } - } - - if (region) { - result = result.findAll { it.region == region } - } - - Pattern pattern = resolveQueryToPattern(q) - log.info('filtering images using pattern {}', pattern) - result = result.findAll { it.name != null && pattern.matcher(it.name).matches() } - } - - result - } - - Pattern resolveQueryToPattern(String query) { - String glob = query?.trim() ?: '*' - // Wrap in '*' if there are no glob-style characters in the query string. - if (!glob.contains('*') && !glob.contains('?') && !glob.contains('[') && !glob.contains('\\')) { - glob = "*${glob}*" - } - new InMemoryCache.Glob(glob).toPattern() - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/OpenstackServerGroupNameResolver.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/OpenstackServerGroupNameResolver.groovy deleted file mode 100644 index 56272397234..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/OpenstackServerGroupNameResolver.groovy +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.helpers.AbstractServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import org.openstack4j.model.heat.Stack - -import java.text.SimpleDateFormat - -class OpenstackServerGroupNameResolver extends AbstractServerGroupNameResolver { - private static final String PHASE = "DEPLOY" - - private final String region - private final OpenstackCredentials credentials - - OpenstackServerGroupNameResolver(OpenstackCredentials credentials, String region) { - this.credentials = credentials - this.region = region - } - - @Override - String getPhase() { - return PHASE - } - - @Override - String getRegion() { - return region - } - - @Override - List getTakenSlots(String clusterName) { - def stacks = credentials.provider.listStacks(region) - - return stacks.findResults { Stack stack -> - def names = Names.parseName(stack.name) - - if (names.cluster == clusterName) { - return new AbstractServerGroupNameResolver.TakenSlot( - serverGroupName: stack.name, - sequence: names.sequence, - createdTime: new Date(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss").parse(stack.creationTime).getTime()) - ) - } else { - return null - } - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/CloneOpenstackAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/CloneOpenstackAtomicOperationConverter.groovy deleted file mode 100644 index ed72bec4fe8..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/CloneOpenstackAtomicOperationConverter.groovy +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.CloneOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.CloneOpenstackAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.CLONE_SERVER_GROUP) -@Component -class CloneOpenstackAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport{ - @Override - AtomicOperation convertOperation(Map input) { - new CloneOpenstackAtomicOperation(convertDescription(input)) - } - - @Override - CloneOpenstackAtomicOperationDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, CloneOpenstackAtomicOperationDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/OpenstackAtomicOperationConverterHelper.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/OpenstackAtomicOperationConverterHelper.groovy deleted file mode 100644 index aa9546c227a..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/OpenstackAtomicOperationConverterHelper.groovy +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters - -import com.fasterxml.jackson.databind.DeserializationFeature -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport - -class OpenstackAtomicOperationConverterHelper { - - static T convertDescription(Map input, - AbstractAtomicOperationsCredentialsSupport credentialsSupport, - Class targetDescriptionType) { - - // Deck sends in the account name as 'credentials', but that name means something else here - // So doing a little bit of hand-waving around the names of things - if (!input.account) { - input.account = input.credentials - } - // Remove this so it is not confused with the actual credentials object - input.remove('credentials') - - // Save the credentials off to re-assign after ObjectMapper does its work - def credentials = credentialsSupport.getCredentialsObject(input.account as String).getCredentials() - - T converted = credentialsSupport.getObjectMapper() - .copy() - .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) - .convertValue(input, targetDescriptionType) - - converted.credentials = (OpenstackCredentials) credentials - - converted - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/discovery/DisableInstancesInDiscoveryConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/discovery/DisableInstancesInDiscoveryConverter.groovy deleted file mode 100644 index 808f7567cb0..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/discovery/DisableInstancesInDiscoveryConverter.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.discovery - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.discovery.DisableInstancesInDiscoveryOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.DISABLE_INSTANCES_IN_DISCOVERY) -@Component -class DisableInstancesInDiscoveryConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new DisableInstancesInDiscoveryOperation(convertDescription(input)) - } - - @Override - OpenstackInstancesDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, OpenstackInstancesDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/discovery/EnableInstancesInDiscoveryConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/discovery/EnableInstancesInDiscoveryConverter.groovy deleted file mode 100644 index c5b5bde6547..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/discovery/EnableInstancesInDiscoveryConverter.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.discovery - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.discovery.EnableInstancesInDiscoveryOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.ENABLE_INSTANCES_IN_DISCOVERY) -@Component -class EnableInstancesInDiscoveryConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new EnableInstancesInDiscoveryOperation(convertDescription(input)) - } - - @Override - OpenstackInstancesDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, OpenstackInstancesDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/DeregisterOpenstackInstancesAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/DeregisterOpenstackInstancesAtomicOperationConverter.groovy deleted file mode 100644 index 5fd0e295d2c..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/DeregisterOpenstackInstancesAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.instance - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesRegistrationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance.DeregisterOpenstackInstancesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.DEREGISTER_INSTANCES_FROM_LOAD_BALANCER) -@Component("deregisterOpenstackInstancesDescription") -class DeregisterOpenstackInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new DeregisterOpenstackInstancesAtomicOperation(convertDescription(input)) - } - - @Override - OpenstackInstancesRegistrationDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, OpenstackInstancesRegistrationDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/RebootOpenstackInstancesAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/RebootOpenstackInstancesAtomicOperationConverter.groovy deleted file mode 100644 index c574977c1bf..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/RebootOpenstackInstancesAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.instance - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance.RebootOpenstackInstancesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.REBOOT_INSTANCES) -@Component("rebootOpenstackInstancesDescription") -class RebootOpenstackInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new RebootOpenstackInstancesAtomicOperation(convertDescription(input)) - } - - @Override - OpenstackInstancesDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, OpenstackInstancesDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/RegisterOpenstackInstancesAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/RegisterOpenstackInstancesAtomicOperationConverter.groovy deleted file mode 100644 index 924594a50ff..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/RegisterOpenstackInstancesAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.instance - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesRegistrationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance.RegisterOpenstackInstancesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.REGISTER_INSTANCES_WITH_LOAD_BALANCER) -@Component("registerOpenstackInstancesDescription") -class RegisterOpenstackInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new RegisterOpenstackInstancesAtomicOperation(convertDescription(input)) - } - - @Override - OpenstackInstancesRegistrationDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, OpenstackInstancesRegistrationDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/TerminateOpenstackInstancesAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/TerminateOpenstackInstancesAtomicOperationConverter.groovy deleted file mode 100644 index 2f88bc0a19c..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/instance/TerminateOpenstackInstancesAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.instance - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance.TerminateOpenstackInstancesAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.TERMINATE_INSTANCES) -@Component("terminateOpenstackInstancesDescription") -class TerminateOpenstackInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new TerminateOpenstackInstancesAtomicOperation(convertDescription(input)) - } - - @Override - OpenstackInstancesDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, OpenstackInstancesDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/loadbalancer/DeleteOpenstackLoadbalancerAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/loadbalancer/DeleteOpenstackLoadbalancerAtomicOperationConverter.groovy deleted file mode 100644 index a7f1c2be9b1..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/loadbalancer/DeleteOpenstackLoadbalancerAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.DeleteOpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer.DeleteOpenstackLoadBalancerAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.DELETE_LOAD_BALANCER) -@Component("deleteOpenstackLoadBalancerDescription") -class DeleteOpenstackLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new DeleteOpenstackLoadBalancerAtomicOperation(convertDescription(input)) - } - - @Override - DeleteOpenstackLoadBalancerDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, DeleteOpenstackLoadBalancerDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationConverter.groovy deleted file mode 100644 index c8c8b721214..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationConverter.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer.UpsertOpenstackLoadBalancerAtomicOperation -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.UPSERT_LOAD_BALANCER) -@Component -class UpsertOpenstackLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new UpsertOpenstackLoadBalancerAtomicOperation(convertDescription(input)) - } - - @Override - OpenstackLoadBalancerDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, OpenstackLoadBalancerDescription) - } -} - diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/securitygroup/DeleteOpenstackSecurityGroupAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/securitygroup/DeleteOpenstackSecurityGroupAtomicOperationConverter.groovy deleted file mode 100644 index 4fac7bba21e..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/securitygroup/DeleteOpenstackSecurityGroupAtomicOperationConverter.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.securitygroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.DeleteOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.securitygroup.DeleteOpenstackSecurityGroupAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - - -@OpenstackOperation(AtomicOperations.DELETE_SECURITY_GROUP) -@Component -class DeleteOpenstackSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new DeleteOpenstackSecurityGroupAtomicOperation(convertDescription(input)) - } - - @Override - Object convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, DeleteOpenstackSecurityGroupDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/securitygroup/UpsertOpenstackSecurityGroupAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/securitygroup/UpsertOpenstackSecurityGroupAtomicOperationConverter.groovy deleted file mode 100644 index a60a7642668..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/securitygroup/UpsertOpenstackSecurityGroupAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.securitygroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.UpsertOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.securitygroup.UpsertOpenstackSecurityGroupAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.UPSERT_SECURITY_GROUP) -@Component -class UpsertOpenstackSecurityGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new UpsertOpenstackSecurityGroupAtomicOperation(convertDescription(input)) - } - - @Override - UpsertOpenstackSecurityGroupDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, UpsertOpenstackSecurityGroupDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DeployOpenstackAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DeployOpenstackAtomicOperationConverter.groovy deleted file mode 100644 index b17da6f2760..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DeployOpenstackAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.DeployOpenstackAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.CREATE_SERVER_GROUP) -@Component("deployOpenstackDescription") -class DeployOpenstackAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new DeployOpenstackAtomicOperation(convertDescription(input)) - } - - @Override - DeployOpenstackAtomicOperationDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, DeployOpenstackAtomicOperationDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DestroyOpenstackAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DestroyOpenstackAtomicOperationConverter.groovy deleted file mode 100644 index 7f86cbcac74..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DestroyOpenstackAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.DestroyOpenstackAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.DESTROY_SERVER_GROUP) -@Component -class DestroyOpenstackAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new DestroyOpenstackAtomicOperation(convertDescription(input)) - } - - @Override - OpenstackServerGroupAtomicOperationDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, OpenstackServerGroupAtomicOperationDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DisableOpenstackAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DisableOpenstackAtomicOperationConverter.groovy deleted file mode 100644 index 62682c09b70..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DisableOpenstackAtomicOperationConverter.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.EnableDisableAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.DisableOpenstackAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.DISABLE_SERVER_GROUP) -@Component -class DisableOpenstackAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new DisableOpenstackAtomicOperation(convertDescription(input)) - } - - @Override - EnableDisableAtomicOperationDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, EnableDisableAtomicOperationDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/EnableOpenstackAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/EnableOpenstackAtomicOperationConverter.groovy deleted file mode 100644 index 389894c9f03..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/EnableOpenstackAtomicOperationConverter.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.EnableDisableAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.EnableOpenstackAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.ENABLE_SERVER_GROUP) -@Component -class EnableOpenstackAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new EnableOpenstackAtomicOperation(convertDescription(input)) - } - - @Override - EnableDisableAtomicOperationDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, EnableDisableAtomicOperationDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/ResizeOpenstackAtomicOperationConverter.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/ResizeOpenstackAtomicOperationConverter.groovy deleted file mode 100644 index 83f7303b1c6..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/ResizeOpenstackAtomicOperationConverter.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.converters.OpenstackAtomicOperationConverterHelper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ResizeOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.ResizeOpenstackAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.RESIZE_SERVER_GROUP) -@Component -class ResizeOpenstackAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - @Override - AtomicOperation convertOperation(Map input) { - new ResizeOpenstackAtomicOperation(convertDescription(input)) - } - - @Override - ResizeOpenstackAtomicOperationDescription convertDescription(Map input) { - OpenstackAtomicOperationConverterHelper.convertDescription(input, this, ResizeOpenstackAtomicOperationDescription) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/OpenstackAtomicOperationDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/OpenstackAtomicOperationDescription.groovy deleted file mode 100644 index 235a7015263..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/OpenstackAtomicOperationDescription.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description - -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import groovy.transform.AutoClone -import groovy.transform.Canonical - -// Region, credentials name and associated openstack account -@AutoClone -@Canonical -class OpenstackAtomicOperationDescription implements DeployDescription { - String region - String account - OpenstackCredentials credentials -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/instance/OpenstackInstancesDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/instance/OpenstackInstancesDescription.groovy deleted file mode 100644 index 425d0178039..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/instance/OpenstackInstancesDescription.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription - -class OpenstackInstancesDescription extends OpenstackServerGroupAtomicOperationDescription { - List instanceIds - - String getInstances() { - instanceIds?.join(", ") - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/instance/OpenstackInstancesRegistrationDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/instance/OpenstackInstancesRegistrationDescription.groovy deleted file mode 100644 index c948a69fab0..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/instance/OpenstackInstancesRegistrationDescription.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance - -class OpenstackInstancesRegistrationDescription extends OpenstackInstancesDescription { - int weight = 1 - List loadBalancerIds -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/loadbalancer/DeleteOpenstackLoadBalancerDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/loadbalancer/DeleteOpenstackLoadBalancerDescription.groovy deleted file mode 100644 index 5879a5fc967..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/loadbalancer/DeleteOpenstackLoadBalancerDescription.groovy +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription - -class DeleteOpenstackLoadBalancerDescription extends OpenstackAtomicOperationDescription { - String id -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/loadbalancer/OpenstackLoadBalancerDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/loadbalancer/OpenstackLoadBalancerDescription.groovy deleted file mode 100644 index ea178255141..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/loadbalancer/OpenstackLoadBalancerDescription.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor -import groovy.transform.AutoClone -import groovy.transform.Canonical - -@AutoClone -@Canonical -class OpenstackLoadBalancerDescription extends OpenstackAtomicOperationDescription { - String id - String name - String subnetId - Algorithm algorithm - String networkId - List securityGroups - List listeners - HealthMonitor healthMonitor - - enum Algorithm { - ROUND_ROBIN, LEAST_CONNECTIONS, SOURCE_IP - } - - static class Listener { - enum ListenerType { - HTTP('HTTP'), - TERMINATED_HTTPS('HTTP'), - TCP('TCP') - - String internalProtocol - - ListenerType(String protocol) { - this.internalProtocol = protocol - } - } - - Integer externalPort - ListenerType externalProtocol - Integer internalPort - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/securitygroup/DeleteOpenstackSecurityGroupDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/securitygroup/DeleteOpenstackSecurityGroupDescription.groovy deleted file mode 100644 index e71af9bed64..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/securitygroup/DeleteOpenstackSecurityGroupDescription.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription - -/** - * Description for deleting security groups. - */ -class DeleteOpenstackSecurityGroupDescription extends OpenstackAtomicOperationDescription { - String id -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/securitygroup/UpsertOpenstackSecurityGroupDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/securitygroup/UpsertOpenstackSecurityGroupDescription.groovy deleted file mode 100644 index f86553614ad..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/securitygroup/UpsertOpenstackSecurityGroupDescription.groovy +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription - -/** - * Description for creating security groups with rules - */ -class UpsertOpenstackSecurityGroupDescription extends OpenstackAtomicOperationDescription { - - String id - String name - String description - List rules - - static class Rule { - // Expected values: TCP, UDP, & ICMP - String ruleType - - // Used by TCP and UDP rules - Integer fromPort - Integer toPort - - // Used by ICMP rules - Integer icmpType - Integer icmpCode - - // Rule applies to either the given CIDR or another security group - String remoteSecurityGroupId - String cidr - } - - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/CloneOpenstackAtomicOperationDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/CloneOpenstackAtomicOperationDescription.groovy deleted file mode 100644 index d30f0b3a1d3..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/CloneOpenstackAtomicOperationDescription.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import groovy.transform.Canonical - -class CloneOpenstackAtomicOperationDescription extends DeployOpenstackAtomicOperationDescription { - OpenstackCloneSource source - - @Canonical - static class OpenstackCloneSource { - String serverGroupName - String region - } - -} - diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/DeployOpenstackAtomicOperationDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/DeployOpenstackAtomicOperationDescription.groovy deleted file mode 100644 index 49c83766c88..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/DeployOpenstackAtomicOperationDescription.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import groovy.transform.AutoClone -import groovy.transform.Canonical - -@AutoClone -@Canonical -class DeployOpenstackAtomicOperationDescription extends OpenstackAtomicOperationDescription implements DeployDescription { - String application - String stack - String freeFormDetails - ServerGroupParameters serverGroupParameters - Boolean disableRollback = false - Integer timeoutMins = 5 - String userDataType - String userData -} - - diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/EnableDisableAtomicOperationDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/EnableDisableAtomicOperationDescription.groovy deleted file mode 100644 index 33edb0ffd60..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/EnableDisableAtomicOperationDescription.groovy +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescriptionTrait -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import groovy.transform.AutoClone -import groovy.transform.Canonical - -@AutoClone -@Canonical -class EnableDisableAtomicOperationDescription extends OpenstackAtomicOperationDescription implements EnableDisableDescriptionTrait, DeployDescription { -} - - diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/MemberData.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/MemberData.groovy deleted file mode 100644 index 6aa75f0a5b2..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/MemberData.groovy +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import groovy.transform.Canonical - -/** - * This is used to represent an OS::Neutron::LBaaS::PoolMember resource. These resources - * are dynamically added to the heat template servergroup_resource_member.yaml. - */ -@Canonical -class MemberData { - String loadBalancerName - String listenerShortId - String poolId - String externalPort - String internalPort - String subnetId -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/OpenstackServerGroupAtomicOperationDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/OpenstackServerGroupAtomicOperationDescription.groovy deleted file mode 100644 index 01fec855800..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/OpenstackServerGroupAtomicOperationDescription.groovy +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import groovy.transform.AutoClone -import groovy.transform.Canonical - -@AutoClone -@Canonical -class OpenstackServerGroupAtomicOperationDescription extends OpenstackAtomicOperationDescription implements DeployDescription { - String serverGroupName -} - - diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ResizeOpenstackAtomicOperationDescription.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ResizeOpenstackAtomicOperationDescription.groovy deleted file mode 100644 index 36d9335d7d2..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ResizeOpenstackAtomicOperationDescription.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import groovy.transform.Canonical - -@Canonical -class ResizeOpenstackAtomicOperationDescription extends OpenstackServerGroupAtomicOperationDescription { - - Capacity capacity = new Capacity() - - static class Capacity { - int min - int max - int desired - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ServerGroupParameters.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ServerGroupParameters.groovy deleted file mode 100644 index dd8f858e752..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ServerGroupParameters.groovy +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import com.fasterxml.jackson.annotation.JsonCreator -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.ServerGroupConstants -import groovy.transform.AutoClone -import groovy.transform.Canonical - -/** - * This class is a wrapper for parameters that are passed to an openstack heat template - * when auto scaling groups are created. - * - * This class only contains values that are directly sent to the heat templates as parameters. - */ -@AutoClone -@Canonical -class ServerGroupParameters { - - String instanceType - String image - Integer internalPort - Integer maxSize - Integer minSize - Integer desiredSize - String networkId - String subnetId - List loadBalancers - List securityGroups - AutoscalingType autoscalingType - Scaler scaleup - Scaler scaledown - String rawUserData - String sourceUserDataType - String sourceUserData - Map tags - String floatingNetworkId - List zones - Map schedulerHints - - // This is only used when migrating a stack from a previous version of clouddriver - static String resolveResourceFilename(Map paramsMap) { - return paramsMap.get(ServerGroupConstants.LEGACY_RESOURCE_FILENAME_KEY) ?: ServerGroupConstants.SUBTEMPLATE_FILE - } - String resourceFilename - - static final ObjectMapper objectMapper = new ObjectMapper() - - Map toParamsMap() { - def params = [ - flavor : instanceType, - image : image, - max_size : maxSize?.toString() ?: null, - min_size : minSize?.toString() ?: null, - desired_size : desiredSize?.toString() ?: null, - network_id : networkId, - subnet_id : subnetId, - load_balancers : loadBalancers?.join(',') ?: null, - security_groups : securityGroups?.join(',') ?: null, - autoscaling_type : autoscalingType?.toString() ?: null, - scaleup_cooldown : scaleup?.cooldown?.toString() ?: null, - scaleup_adjustment : scaleup?.adjustment?.toString() ?: null, - scaleup_period : scaleup?.period?.toString() ?: null, - scaleup_threshold : scaleup?.threshold?.toString() ?: null, - scaledown_cooldown : scaledown?.cooldown?.toString() ?: null, - scaledown_adjustment : scaledown?.adjustment?.toString() ?: null, - scaledown_period : scaledown?.period?.toString() ?: null, - scaledown_threshold : scaledown?.threshold?.toString() ?: null, - source_user_data_type: sourceUserDataType ?: null, - source_user_data : sourceUserData ?: null, - tags : objectMapper.writeValueAsString(tags ?: [:]) ?: null, - user_data : rawUserData ?: null, - ] - if (floatingNetworkId) { - params << [floating_network_id: floatingNetworkId] - } - - // This is only used when migrating a stack from a previous version of clouddriver - if (resourceFilename) { - params << [resource_filename: resourceFilename] - } - - // These are new properties. We include them conditionally so as not to mess up resize operations on older, pre-existing stacks. - if (zones) { - params << [zones: zones.join(',')] - } - if (schedulerHints) { - params << [scheduler_hints: objectMapper.writeValueAsString(schedulerHints ?: [:])] - } - - params - } - - static ServerGroupParameters fromParamsMap(Map params) { - new ServerGroupParameters( - instanceType: params.get('flavor'), - image: params.get('image'), - maxSize: params.get('max_size')?.toInteger(), - minSize: params.get('min_size')?.toInteger(), - desiredSize: params.get('desired_size')?.toInteger(), - floatingNetworkId: params.get('floating_network_id'), - networkId: params.get('network_id'), - subnetId: params.get('subnet_id'), - loadBalancers: unescapePythonUnicodeJsonList(params.get('load_balancers')), - securityGroups: unescapePythonUnicodeJsonList(params.get('security_groups')), - autoscalingType: params.get('autoscaling_type') ? AutoscalingType.fromString(params.get('autoscaling_type')) : null, - scaleup: new Scaler( - cooldown: params.get('scaleup_cooldown')?.toInteger(), - adjustment: params.get('scaleup_adjustment')?.toInteger(), - period: params.get('scaleup_period')?.toInteger(), - threshold: params.get('scaleup_threshold')?.toInteger() - ), - scaledown: new Scaler( - cooldown: params.get('scaledown_cooldown')?.toInteger(), - adjustment: params.get('scaledown_adjustment')?.toInteger(), - period: params.get('scaledown_period')?.toInteger(), - threshold: params.get('scaledown_threshold')?.toInteger() - ), - rawUserData: params.get('user_data'), - tags: unescapePythonUnicodeJsonMap(params.get('tags') ?: '{}'), - sourceUserDataType: params.get('source_user_data_type'), - sourceUserData: params.get('source_user_data'), - zones: unescapePythonUnicodeJsonList(params.get('zones') ), - schedulerHints: unescapePythonUnicodeJsonMap(params.get('scheduler_hints') ?: '{}'), - resourceFilename: params.get('resource_filename') - ) - } - - /** - * Stack parameters of type 'comma_delimited_list' come back as a unicode json string. We need to split that up. - * - * TODO See https://bugs.launchpad.net/heat/+bug/1613415 - * - * @param string - * @return - */ - static List unescapePythonUnicodeJsonList(String string) { - List result = string?.split(",")?.collect { s -> - s.replace("u'", "").replace("'", "").replace("[", "").replace("]", "").replaceAll("([ ][ ]*)", "") - } ?: [] - return result - } - - /** - * Some stack parameters of type 'json' come back as a unicode json string. We need to split that up. - * - * TODO See https://bugs.launchpad.net/heat/+bug/1613415 - * - * @param string - * @return - */ - static Map unescapePythonUnicodeJsonMap(String string) { - String parsed = string - ?.replaceAll(':\\p{javaWhitespace}*None\\p{javaWhitespace}*([,}])', ': null$1') // first replace python None with json null - ?.replaceAll("u'(.*?)'", '"$1"') // replace u'python strings' with "python strings" (actually json strings) - ?.replaceAll('u"(.*?\'.*?)"', '"$1"') // replace u"python strings containing a ' char" with "python strings containing a ' char" (actually json) - def m = objectMapper.readValue(parsed, Map) - def result = m.collectEntries { k, v -> - if (v instanceof Collection || v instanceof Map) { - return [(k): objectMapper.writeValueAsString(v)] - } - [(k): v] - } - return result - } - - /** - * Scaleup/scaledown parameters for a server group - */ - @AutoClone - @Canonical - static class Scaler { - Integer cooldown - Integer adjustment - Integer period - Integer threshold - } - - /** - * CPU: average cpu utilization across server group. meter name is cpu_util. - * NETWORK_INCOMING: average incoming bytes/second across server group. meter name is network.incoming.bytes.rate - * NETWORK_OUTGOING: average outgoing bytes/second across server group. meter name is network.outgoing.bytes.rate - */ - static enum AutoscalingType { - CPU('cpu_util'), NETWORK_INCOMING('network.incoming.bytes.rate'), NETWORK_OUTGOING('network.outgoing.bytes.rate') - - String meterName - - AutoscalingType(String meterName) { - this.meterName = meterName - } - - @Override - String toString() { - meterName - } - - String jsonValue() { - fromMeter(meterName) - } - - @JsonCreator - static String fromMeter(String meter) { - switch (meter) { - case CPU.meterName: - CPU.name().toLowerCase() - break - case NETWORK_INCOMING.meterName: - NETWORK_INCOMING.name().toLowerCase() - break - case NETWORK_OUTGOING.meterName: - NETWORK_OUTGOING.name().toLowerCase() - break - default: - throw new IllegalArgumentException("Invalid enum meter name: $meter") - } - } - - static AutoscalingType fromString(String value) { - switch (value) { - case CPU.toString(): - CPU - break - case NETWORK_INCOMING.toString(): - NETWORK_INCOMING - break - case NETWORK_OUTGOING.toString(): - NETWORK_OUTGOING - break - default: - throw new IllegalArgumentException("Invalid enum meter name: $value") - } - } - - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/UserDataType.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/UserDataType.groovy deleted file mode 100644 index a947a59e304..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/UserDataType.groovy +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import com.fasterxml.jackson.annotation.JsonCreator - -enum UserDataType { - URL('URL'), TEXT('Text'), SWIFT('Swift') - - String type - - UserDataType(String type) { - this.type = type - } - - @Override - String toString() { - type - } - - @JsonCreator - static String fromType(String type) { - switch (type) { - case URL.type: - URL.name().toLowerCase() - break - case TEXT.type: - TEXT.name().toLowerCase() - break - case SWIFT.type: - SWIFT.name().toLowerCase() - break - default: - throw new IllegalArgumentException("Invalid enum type: $type") - } - } - - static UserDataType fromString(String value) { - switch (value) { - case URL.toString(): - URL - break - case TEXT.toString(): - TEXT - break - case SWIFT.toString(): - SWIFT - break - default: - throw new IllegalArgumentException("Invalid enum type: $value") - } - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackOperationException.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackOperationException.groovy deleted file mode 100644 index 88da0a44610..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackOperationException.groovy +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.exception - -import groovy.transform.InheritConstructors -import org.openstack4j.model.common.ActionResponse - -@InheritConstructors -class OpenstackOperationException extends RuntimeException { - OpenstackOperationException(String operation, String message) { - super("$operation failed: ${message}") - } - - OpenstackOperationException(String operation, Exception e) { - super("$operation failed: ${e.message}", e) - } - - OpenstackOperationException(ActionResponse actionResponse, String operation) { - super("$operation failed: fault $actionResponse.fault with code $actionResponse.code") - } - - OpenstackOperationException(String account, String operation, Exception e) { - super("$operation for account $account failed: ${e.message}", e) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackProviderException.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackProviderException.groovy deleted file mode 100644 index 08eb9eccb25..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackProviderException.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.exception - -import groovy.transform.InheritConstructors -import org.openstack4j.model.common.ActionResponse - -@InheritConstructors -class OpenstackProviderException extends RuntimeException { - OpenstackProviderException(ActionResponse actionResponse) { - super("Action request failed with fault $actionResponse.fault and code $actionResponse.code") - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackResourceNotFoundException.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackResourceNotFoundException.groovy deleted file mode 100644 index 563a2fdffe6..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/exception/OpenstackResourceNotFoundException.groovy +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.exception - -import groovy.transform.InheritConstructors - -@InheritConstructors -class OpenstackResourceNotFoundException extends OpenstackProviderException {} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/OpenstackUserDataProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/OpenstackUserDataProvider.groovy deleted file mode 100644 index a51b4561209..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/OpenstackUserDataProvider.groovy +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.transform.PackageScope -import groovy.util.logging.Slf4j - -// TODO (jshimek) Refactor the LocalFileUserDataProvider the AWS driver uses to be more driver agnostic -// See https://github.com/spinnaker/spinnaker/issues/1274 -/** - * Provides the common user data from a local file to be applied to all OpenStack deployments. - * - * Any custom user data specified for each deployment will be appended to common user data, allowing custom user data - * to override the common user data. - */ -@Slf4j -public class OpenstackUserDataProvider { - - final OpenstackNamedAccountCredentials credentials - - OpenstackUserDataProvider(OpenstackNamedAccountCredentials credentials) { - this.credentials = credentials - } - - /** - * Returns the custom user data or the empty string if non is found. - */ - String getUserData(final String serverGroupName, final String region, final String customUserData) { - - String userDataFile = credentials.getUserDataFile() - String rawUserData = getFileContents(userDataFile) - String commonUserData = replaceTokens(rawUserData, serverGroupName, region) - - - StringBuilder userData = new StringBuilder(); - if (commonUserData) { - userData.append(commonUserData) - userData.append('\n') - } - if (customUserData) { - userData.append(customUserData) - } - - userData.toString() - } - - /** - * Returns the contents of a file or an empty string if the file doesn't exist. - */ - @PackageScope - String getFileContents(String filename) { - - if (!filename) { - return '' - } - - try { - File file = new File(filename) - String contents = file.getText('UTF-8') - if (contents.length() && !contents.endsWith("\n")) { - contents = contents + '\n' - } - return contents - } catch (IOException e) { - log.warn("Failed to read user data file ${filename}; ${e.message}") - return '' - } - } - /** - * Returns the user data with the tokens replaced. - * - * Currently supports the following tokens: - * - * %%account%% the name of the account - * %%accounttype%% the accountType of the account - * %%env%% the environment of the account - * %%app%% the name of the app - * %%region%% the deployment region - * %%group%% the name of the server group - * %%autogrp%% the name of the server group - * %%cluster%% the name of the cluster - * %%stack%% the stack component of the cluster name - * %%detail%% the detail component of the cluster name - * %%launchconfig%% the name of the launch configuration (server group name) - */ - private String replaceTokens(String rawUserData, String serverGroupName, String region) { - - if (!rawUserData) { - return '' - } - - Names names = Names.parseName(serverGroupName) - - // Replace the tokens & return the result - String result = rawUserData - .replace('%%account%%', credentials.name ?: '') - .replace('%%accounttype%%', credentials.accountType ?: '') - .replace('%%env%%', credentials.environment ?: '') - .replace('%%app%%', names.app ?: '') - .replace('%%region%%', region ?: '') - .replace('%%group%%', names.group ?: '') - .replace('%%autogrp%%', names.group ?: '') - .replace('%%cluster%%', names.cluster ?: '') - .replace('%%stack%%', names.stack ?: '') - .replace('%%detail%%', names.detail ?: '') - .replace('%%launchconfig%%', serverGroupName ?: '') - - result - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/StackPoolMemberAware.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/StackPoolMemberAware.groovy deleted file mode 100644 index f4b0f8b98a0..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/StackPoolMemberAware.groovy +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.MemberData -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 - -trait StackPoolMemberAware { - - /** - * Build pool member resources for the given load balancers. - * @param credentials - * @param region - * @param subnetId - * @param lbIds - * @param portParser - * @return - */ - List buildMemberData(OpenstackCredentials credentials, String region, String subnetId, List lbIds, Closure portParser) { - lbIds.collectMany { loadBalancerId -> - LoadBalancerV2 loadBalancer = credentials.provider.getLoadBalancer(region, loadBalancerId) - if (!loadBalancer) { - throw new OpenstackResourceNotFoundException("Could not find load balancer: $loadBalancerId in region: $region") - } - - loadBalancer.listeners.collect { item -> - ListenerV2 listener = credentials.provider.getListener(region, item.id) - String listenerShortId - try { - listenerShortId = listener.id[0, listener.id.indexOf("-")] - } catch (StringIndexOutOfBoundsException e) { - throw new RuntimeException("Listener ID: ${listener.id}", e) - } - String internalPort = portParser(listener.description).internalPort - String poolId = listener.defaultPoolId - new MemberData(loadBalancerName: loadBalancer.name, listenerShortId: listenerShortId, subnetId: subnetId ?: loadBalancer.vipSubnetId, externalPort: listener.protocolPort.toString(), internalPort: internalPort, poolId: poolId) - } - } - } - - /** - * Build pool member resources for the given load balancers. - * @param credentials - * @param region - * @param lbIds - * @param portParser - * @return - */ - List buildMemberData(OpenstackCredentials credentials, String region, List lbIds, Closure portParser) { - buildMemberData(credentials, region, null, lbIds, portParser) - } - - /** - * Convert a list of pool members to an embeddable heat template. - * @param memberData - * @return - */ - Map buildPoolMemberTemplate(List memberData) { - Map parameters = [address: [type: "string", description: "Server address for autoscaling group resource"]] - Map resources = memberData.collectEntries { - [ - ("member-$it.loadBalancerName-$it.listenerShortId-$it.externalPort-$it.internalPort".toString()): [ - type : "OS::Neutron::LBaaS::PoolMember", - properties: [ - address : [get_param: "address"], - pool : it.poolId, - protocol_port: it.internalPort, - subnet : it.subnetId - ] - ] - ] - } - Map memberTemplate = [ - heat_template_version: "2016-04-08", - description : "Pool members for autoscaling group resource", - parameters : parameters, - resources : resources] - return memberTemplate - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/AbstractEnableDisableInstancesInDiscoveryOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/AbstractEnableDisableInstancesInDiscoveryOperation.groovy deleted file mode 100644 index f09c7d13093..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/AbstractEnableDisableInstancesInDiscoveryOperation.groovy +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.discovery - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.task.TaskStatusAware -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.consul.deploy.ops.EnableDisableConsulInstance - -abstract class AbstractEnableDisableInstancesInDiscoveryOperation implements AtomicOperation, TaskStatusAware { - - OpenstackInstancesDescription description - - AbstractEnableDisableInstancesInDiscoveryOperation(OpenstackInstancesDescription description) { - this.description = description - } - - @Override - Void operate(List priorOutputs) { - def credentials = description.credentials.credentials - def instances = description.instanceIds - String verb = disable ? 'disable' : 'enable' - String presentParticipling = disable ? 'Disabling' : 'Enabling' - - task.updateStatus phaseName, "Initializing $verb server group operation for instances $instances in $description.region..." - - if (!credentials.consulConfig?.enabled) { - throw new IllegalArgumentException("Consul isn't enabled for account $credentials.name.") - } - - instances.each { String instance -> - //TODO - Need to functionally test yet. - String ipAddress = clientProvider.getIpForInstance(description.region, instance) - if (ipAddress) { - task.updateStatus phaseName, "$presentParticipling instance $instance at $ipAddress..." - EnableDisableConsulInstance.operate(credentials.consulConfig, - instance, - disable - ? EnableDisableConsulInstance.State.disable - : EnableDisableConsulInstance.State.enable) - } - } - - return null - } - - /** - * Operations must indicate if they are disabling the instance from service discovery. - * @return - */ - abstract boolean isDisable() - - /** - * Phase name associated to operation. - * @return - */ - abstract String getPhaseName() - - /** - * Helper method to access client provider via account credentials. - * @return - */ - OpenstackClientProvider getClientProvider() { - description.credentials.provider - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/DisableInstancesInDiscoveryOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/DisableInstancesInDiscoveryOperation.groovy deleted file mode 100644 index 42e4b7b3e3d..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/DisableInstancesInDiscoveryOperation.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.discovery - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription - -/** - * curl -X POST -H "Content-Type: application/json" -d '[ { "disableInstancesInDiscovery": { "instanceIds": ["155e68a7-a7dd-433a-b2c1-c8d6d38fb89a"], "region": "RegionOne", "account": "my-openstack-account" }} ]' localhost:7002/openstack/ops - */ -class DisableInstancesInDiscoveryOperation extends AbstractEnableDisableInstancesInDiscoveryOperation { - boolean disable = true - String phaseName = 'DISABLE_INSTANCES_IN_DISCOVERY' - - DisableInstancesInDiscoveryOperation(OpenstackInstancesDescription description) { - super(description) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/EnableInstancesInDiscoveryOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/EnableInstancesInDiscoveryOperation.groovy deleted file mode 100644 index 9e74c0f4940..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/EnableInstancesInDiscoveryOperation.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.discovery - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription - -/** - * curl -X POST -H "Content-Type: application/json" -d '[ { "enableInstancesInDiscovery": { "instanceIds": ["155e68a7-a7dd-433a-b2c1-c8d6d38fb89a"], "region": "RegionOne", "account": "my-openstack-account" }} ]' localhost:7002/openstack/ops - */ -class EnableInstancesInDiscoveryOperation extends AbstractEnableDisableInstancesInDiscoveryOperation { - boolean disable = false - String phaseName = 'ENABLE_INSTANCES_IN_DISCOVERY' - - EnableInstancesInDiscoveryOperation(OpenstackInstancesDescription description) { - super(description) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/AbstractRegistrationOpenstackInstancesAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/AbstractRegistrationOpenstackInstancesAtomicOperation.groovy deleted file mode 100644 index 1747a2b70ec..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/AbstractRegistrationOpenstackInstancesAtomicOperation.groovy +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesRegistrationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer.LoadBalancerChecker -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 - -/** - * Base class that will handle both load balancer registration and deregistration. - */ -abstract class AbstractRegistrationOpenstackInstancesAtomicOperation implements AtomicOperation { - - abstract String getBasePhase() // Either 'REGISTER' or 'DEREGISTER'. - abstract Boolean getAction() // Either 'true' or 'false', for Register and Deregister respectively. - abstract String getVerb() // Either 'registering' or 'deregistering'. - abstract String getPreposition() // Either 'with' or 'from' - - OpenstackInstancesRegistrationDescription description - - AbstractRegistrationOpenstackInstancesAtomicOperation(OpenstackInstancesRegistrationDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - //TODO we should be able to get all the instance ips once, instead of refetching for each load balancer - //TODO we should also not refetch listeners for each instance, that should only happen once per balancer - @Override - Void operate(List priorOutputs) { - try { - task.updateStatus basePhase, "Start $verb all instances $preposition load balancers..." - OpenstackClientProvider provider = description.credentials.provider - description.loadBalancerIds.each { lb -> - task.updateStatus basePhase, "Getting details for load balancer $lb..." - LoadBalancerV2 loadBalancer = provider.getLoadBalancer(description.region, lb) - if (!loadBalancer) { - throw new OpenstackResourceNotFoundException("Could not find load balancer: $lb in region: $description.region") - } - - description.instanceIds.each { id -> - task.updateStatus basePhase, "Getting ip address for service instance $id..." - String ip = provider.getIpForInstance(description.region, id) - loadBalancer.listeners.each { listenerItem -> - task.updateStatus basePhase, "Getting listener details for listener $listenerItem.id..." - ListenerV2 listener = provider.getListener(description.region, listenerItem.id) - if (action) { - task.updateStatus basePhase, "Getting internal port from load balancer $loadBalancer.name for listener $listenerItem.id..." - int internalPort = provider.getInternalLoadBalancerPort(description.region, listenerItem.id) - task.updateStatus basePhase, "Adding member with ip $ip to load balancer $loadBalancer.name on internal port $internalPort with weight $description.weight..." - provider.addMemberToLoadBalancerPool(description.region, ip, listener.defaultPoolId, loadBalancer.vipSubnetId, internalPort, description.weight) - task.updateStatus basePhase, "Waiting on member add status with ip $ip to load balancer $loadBalancer.name on internal port $internalPort with weight $description.weight..." - LoadBalancerChecker.from(description.credentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(description.region, lb) - } - } else { - task.updateStatus basePhase, "Getting member id for server instance $id and ip $ip on load balancer $loadBalancer.name..." - String memberId = provider.getMemberIdForInstance(description.region, ip, listener.defaultPoolId) - task.updateStatus basePhase, "Removing member with ip $ip from load balancer $loadBalancer.name..." - provider.removeMemberFromLoadBalancerPool(description.region, listener.defaultPoolId, memberId) - task.updateStatus basePhase, "Waiting on remove status for mmber with ip $ip from load balancer $loadBalancer.name..." - LoadBalancerChecker.from(description.credentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(description.region, lb) - } - } - } - task.updateStatus basePhase, "Completed $verb instance $id $preposition load balancer $lb." - } - } - task.updateStatus basePhase, "Completed $verb instances $preposition load balancers." - } catch (OpenstackProviderException e) { - throw new OpenstackOperationException(e) - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/DeregisterOpenstackInstancesAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/DeregisterOpenstackInstancesAtomicOperation.groovy deleted file mode 100644 index a9beff74672..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/DeregisterOpenstackInstancesAtomicOperation.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesRegistrationDescription -import groovy.util.logging.Slf4j - -/** - * Each instance in the set of instances will be deregistered from each load balancer in the set of load balancers. - */ -@Slf4j -class DeregisterOpenstackInstancesAtomicOperation extends AbstractRegistrationOpenstackInstancesAtomicOperation { - - String basePhase = 'DEREGISTER' - Boolean action = Boolean.FALSE - String verb = 'deregistering' - String preposition = 'from' - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "deregisterInstancesFromLoadBalancer": { "loadBalancerIds": ["2112e340-4714-492c-b9db-e45e1b1102c5"], "instanceIds": ["155e68a7-a7dd-433a-b2c1-c8d6d38fb89a"], "account": "test", "region": "region" }} ]' localhost:7002/openstack/ops - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - DeregisterOpenstackInstancesAtomicOperation(OpenstackInstancesRegistrationDescription description) { - super(description) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RebootOpenstackInstancesAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RebootOpenstackInstancesAtomicOperation.groovy deleted file mode 100644 index 30e98f532df..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RebootOpenstackInstancesAtomicOperation.groovy +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription - -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import groovy.util.logging.Slf4j - -/** - * Reboots an Openstack instance. - */ -@Slf4j -class RebootOpenstackInstancesAtomicOperation implements AtomicOperation { - - private final String BASE_PHASE = "REBOOT_INSTANCES" - OpenstackInstancesDescription description - - RebootOpenstackInstancesAtomicOperation(OpenstackInstancesDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "rebootInstances": { "instanceIds": ["os-test-v000-beef"], "account": "test", "region": "region1" }} ]' localhost:7002/openstack/ops - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - - @Override - Void operate(List priorOutputs) { - String instances = description.instances - task.updateStatus BASE_PHASE, "Initializing Reboot Instances Operation for ${instances}..." - - description.instanceIds.each { - task.updateStatus BASE_PHASE, "Rebooting $it" - description.credentials.provider.rebootInstance(description.region, it) - task.updateStatus BASE_PHASE, "Rebooted $it" - } - - task.updateStatus BASE_PHASE, "Done rebooting instances ${instances}." - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RegisterOpenstackInstancesAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RegisterOpenstackInstancesAtomicOperation.groovy deleted file mode 100644 index 317089bd4e7..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RegisterOpenstackInstancesAtomicOperation.groovy +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesRegistrationDescription -import groovy.util.logging.Slf4j - -/** - * Each instance in the set of instances will be registered with each load balancer in the set of load balancers. - */ -@Slf4j -class RegisterOpenstackInstancesAtomicOperation extends AbstractRegistrationOpenstackInstancesAtomicOperation { - - String basePhase = 'REGISTER' - Boolean action = Boolean.TRUE - String verb = 'registering' - String preposition = 'with' - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "registerInstancesWithLoadBalancer": { "loadBalancerIds": ["2112e340-4714-492c-b9db-e45e1b1102c5"], "instanceIds": ["155e68a7-a7dd-433a-b2c1-c8d6d38fb89a"], "account": "test", "region": "region" }} ]' localhost:7002/openstack/ops - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - RegisterOpenstackInstancesAtomicOperation(OpenstackInstancesRegistrationDescription description) { - super(description) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/TerminateOpenstackInstancesAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/TerminateOpenstackInstancesAtomicOperation.groovy deleted file mode 100644 index 4d9b5d64383..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/TerminateOpenstackInstancesAtomicOperation.groovy +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.AbstractStackUpdateOpenstackAtomicOperation -import com.netflix.spinnaker.clouddriver.openstack.domain.LoadBalancerResolver -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import groovy.util.logging.Slf4j -import org.openstack4j.api.Builders -import org.openstack4j.model.compute.Server -import org.openstack4j.model.heat.Resource -import org.openstack4j.model.heat.Stack - -/** - * Terminates an Openstack instance by marking the stack resource unhealthy and doing a stack update. This will - * recreate instances until the stack reaches the correct size. - * - * TODO test upsert load balancer - */ -@Slf4j -class TerminateOpenstackInstancesAtomicOperation extends AbstractStackUpdateOpenstackAtomicOperation implements LoadBalancerResolver { - - final String phaseName = "TERMINATE_INSTANCES" - - final String operation = AtomicOperations.TERMINATE_INSTANCES - - - TerminateOpenstackInstancesAtomicOperation(OpenstackInstancesDescription description) { - super(description) - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "terminateInstances": { "instanceIds": ["os-test-v000-beef"], "account": "test", "region": "region1" }} ]' localhost:7002/openstack/ops - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - - @Override - String getServerGroupName() { - String instanceId = description.instanceIds?.find() ?: null - String serverGroupName = "" - if (instanceId) { - task.updateStatus phaseName, "Getting server group name from instance $instanceId ..." - Server server = provider.getServerInstance(description.region, instanceId) - if (!server) { - throw new OpenstackResourceNotFoundException("Could not find server: $instanceId in region: $description.region") - } - serverGroupName = server.metadata?.get("metering.stack.name") ?: provider.getStack(description.region, server.metadata?.get("metering.stack"))?.name - if (!serverGroupName) { - throw new OpenstackResourceNotFoundException("Could not find server group name for server: $instanceId") - } - task.updateStatus phaseName, "Found server group name $serverGroupName from instance $instanceId." - } - serverGroupName - } - - @Override - void preUpdate(Stack stack) { - - //get asg_resource stack id and name - task.updateStatus phaseName, "Finding asg resource for $stack.name ..." - Resource asg = provider.getAsgResourceForStack(description.region, stack) - task.updateStatus phaseName, "Finding nested stack for resource $asg.type ..." - Stack nested = provider.getStack(description.region, asg.physicalResourceId) - if (!nested) { - throw new OpenstackResourceNotFoundException("Could not find stack $asg.physicalResourceId in region: $description.region") - } - - description.instanceIds.each { id -> - - //get server name - task.updateStatus phaseName, "Getting server details for $id ..." - Server server = provider.getServerInstance(description.region, id) - if (!server) { - throw new OpenstackResourceNotFoundException("Could not find server: $id in region: $description.region") - } - - //get resource - task.updateStatus phaseName, "Finding server group resource for $id ..." - //for some reason it only works to look up the resource from the parent stack, not the nested stack - Resource instance = provider.getInstanceResourceForStack(description.region, stack, server.name) - - //mark unhealthy - subsequent stack update will delete and recreate the resource - task.updateStatus phaseName, "Marking server group resource $instance.resourceName unhealthy ..." - provider.markStackResourceUnhealthy(description.region, nested.name, nested.id, instance.resourceName, - Builders.resourceHealth().markUnhealthy(true).resourceStatusReason("Deleted instance $id").build()) - - } - } - - OpenstackClientProvider getProvider() { - description.credentials.provider - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/AbstractOpenstackLoadBalancerAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/AbstractOpenstackLoadBalancerAtomicOperation.groovy deleted file mode 100644 index 8c4fff2a9cc..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/AbstractOpenstackLoadBalancerAtomicOperation.groovy +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.StackPoolMemberAware -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.ServerGroupConstants -import com.netflix.spinnaker.clouddriver.openstack.domain.LoadBalancerResolver -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.task.TaskStatusAware -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.openstack.networking.domain.ext.ListItem - -abstract class AbstractOpenstackLoadBalancerAtomicOperation implements TaskStatusAware, StackPoolMemberAware, LoadBalancerResolver { - - OpenstackCredentials openstackCredentials - - AbstractOpenstackLoadBalancerAtomicOperation(OpenstackCredentials openstackCredentials) { - this.openstackCredentials = openstackCredentials - } - - /** - * Update the server group to remove the given load balancer. - * @param loadBalancerId - */ - void updateServerGroup(String operation, String region, String loadBalancerId, List loadBalancersToDelete = []) { - task.updateStatus operation, "Updating server groups that reference load balancer $loadBalancerId..." - provider.listStacksWithLoadBalancers(region, [loadBalancerId]).each { stackSummary -> - //get stack details - task.updateStatus operation, "Fetching stack details for server group $stackSummary.name..." - Stack stack = provider.getStack(region, stackSummary.name) - if (!stack) { - throw new OpenstackResourceNotFoundException("Could not find stack $stackSummary.name in region: $region") - } - task.updateStatus operation, "Fetched stack details for server group $stackSummary.name." - - //update parameters - ServerGroupParameters newParams = ServerGroupParameters.fromParamsMap(stack.parameters) - if (loadBalancersToDelete) { - newParams.loadBalancers.removeAll(loadBalancersToDelete) - } - - //get the current template from the stack - task.updateStatus operation, "Fetching current template for server group $stack.name..." - String template = provider.getHeatTemplate(region, stack.name, stack.id) - task.updateStatus operation, "Successfully fetched current template for server group $stack.name." - - //we need to store subtemplate in server group output from create, as it is required to do an update and there is no native way of - //obtaining it from a stack - task.updateStatus operation, "Fetching subtemplates for server group $stack.name..." - List> outputs = stack.outputs - String subtemplate = outputs.find { m -> m.get("output_key") == ServerGroupConstants.SUBTEMPLATE_OUTPUT }.get("output_value") - - //rebuild memberTemplate - String memberTemplate = buildPoolMemberTemplate( - buildMemberData(openstackCredentials, region, newParams.loadBalancers, this.&parseListenerKey) - ) - task.updateStatus operation, "Fetched subtemplates for server group $stack.name." - - //update stack - task.updateStatus operation, "Updating server group $stack.name..." - // TODO: this should be wrapped with a checker - provider.updateStack(region, stack.name, stack.id, template, [(ServerGroupConstants.SUBTEMPLATE_FILE): subtemplate, (ServerGroupConstants.MEMBERTEMPLATE_FILE): memberTemplate], newParams, newParams.loadBalancers) - task.updateStatus operation, "Successfully updated server group $stack.name." - } - - task.updateStatus operation, "Updated server groups that reference load balancer $loadBalancerId." - } - - /** - * Removes load balancer listeners/pools/monitor associated with load balancer. - * @param operation - * @param region - * @param loadbalancerId - * @param listenerStatuses - */ - protected void deleteLoadBalancerPeripherals(String operation, String region, String loadBalancerId, Collection listeners) { - //remove elements - listeners?.each { ListenerV2 currentListener -> - try { - LbPoolV2 lbPool = provider.getPool(region, currentListener.defaultPoolId) - if (lbPool.healthMonitorId) { - removeHealthMonitor(operation, region, loadBalancerId, lbPool.healthMonitorId) - } - //delete pool - task.updateStatus operation, "Deleting pool $lbPool.id on listener $currentListener.id in $region ..." - provider.deletePool(region, lbPool.id) - task.updateStatus operation, "Waiting for delete on pool $lbPool.id on listener $currentListener.id in $region ..." - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus operation, "Deleted pool $lbPool.id on listener $currentListener.id in $region." - - } catch (OpenstackResourceNotFoundException ope) { - // Do nothing. - } - - //delete listener - task.updateStatus operation, "Deleting listener $currentListener.id on load balancer $loadBalancerId in $region..." - provider.deleteListener(region, currentListener.id) - task.updateStatus operation, "Waiting for delete on listener $currentListener.id in $region ..." - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus operation, "Deleted listener $currentListener.id on load balancer $loadBalancerId in $region." - } - } - - /** - * Shared method to remove a health monitor given its ID. - * @param operation - * @param region - * @param id - */ - protected void removeHealthMonitor(String operation, String region, String loadBalancerId, String id) { - task.updateStatus operation, "Removing existing monitor ${id} in ${region}..." - provider.deleteMonitor(region, id) - task.updateStatus operation, "Waiting on remove of monitor ${id} in ${region}..." - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus operation, "Removed existing monitor ${id} in ${region}." - } - - /** - * Checks to see if the load balancer is in a pending state. - * @param loadBalancer - */ - protected void checkPendingLoadBalancerState(LoadBalancerV2 loadBalancer) { - if (loadBalancer.provisioningStatus.name().contains('PENDING')) { - throw new OpenstackOperationException(AtomicOperations.DELETE_LOAD_BALANCER, "Load balancer $loadBalancer.id must not be in PENDING provisioning status to be deleted. Current status is $loadBalancer.provisioningStatus") - } - } - - /** - * Helper method to lookup listeners associated to load balancers into a map by listener key. - * @param region - * @param loadBalancer - * @return - */ - protected Map buildListenerMap(String region, LoadBalancerV2 loadBalancer) { - loadBalancer?.listeners?.collectEntries([:]) { ListItem item -> - ListenerV2 listenerV2 = provider.getListener(region, item.id) - [(listenerV2.description): listenerV2] - } - } - - /** - * Utility method to get provider - * @return - */ - OpenstackClientProvider getProvider() { - openstackCredentials.provider - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/DeleteOpenstackLoadBalancerAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/DeleteOpenstackLoadBalancerAtomicOperation.groovy deleted file mode 100644 index 95e4e334396..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/DeleteOpenstackLoadBalancerAtomicOperation.groovy +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.DeleteOpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.StackPoolMemberAware -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import groovy.util.logging.Slf4j -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 - -/** - * Removes an openstack load balancer. - */ -@Slf4j -class DeleteOpenstackLoadBalancerAtomicOperation extends AbstractOpenstackLoadBalancerAtomicOperation implements AtomicOperation, StackPoolMemberAware { - - static final String BASE_PHASE = 'DELETE_LOAD_BALANCER' - DeleteOpenstackLoadBalancerDescription description - - DeleteOpenstackLoadBalancerAtomicOperation(DeleteOpenstackLoadBalancerDescription description) { - super(description.credentials) - this.description = description - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "deleteLoadBalancer": { "id": "6adc02a8-7b01-4f90-9e6f-9a4c3411e7ad", "region": "RegionOne", "account": "test" } } ]' localhost:7002/openstack/ops - */ - - @Override - Void operate(List priorOutputs) { - String region = description.region - String loadBalancerId = description.id - OpenstackClientProvider provider = description.credentials.provider - - try { - task.updateStatus BASE_PHASE, "Deleting load balancer ${loadBalancerId} in region ${region}..." - - task.updateStatus BASE_PHASE, "Fetching status tree..." - LoadBalancerV2 loadBalancer = provider.getLoadBalancer(region, loadBalancerId) - task.updateStatus BASE_PHASE, "Fetched status tree." - - if (loadBalancer) { - checkPendingLoadBalancerState(loadBalancer) - - //step 1 - delete load balancer - deleteLoadBalancer(region, loadBalancer) - - //step 2 - update stack(s) that reference load balancer - updateServerGroup(BASE_PHASE, region, loadBalancerId, [loadBalancerId]) - } - } catch (OpenstackProviderException e) { - task.updateStatus BASE_PHASE, "Failed deleting load balancer ${e.message}." - throw new OpenstackOperationException(AtomicOperations.DELETE_LOAD_BALANCER, e) - } - - task.updateStatus BASE_PHASE, "Finished deleting load balancer ${loadBalancerId}." - } - - /** - * Delete the load balancer and all sub-elements. - * @param loadBalancerStatus - */ - void deleteLoadBalancer(String region, LoadBalancerV2 loadBalancer) { - Map listenerMap = buildListenerMap(region, loadBalancer) - - this.deleteLoadBalancerPeripherals(BASE_PHASE, region, loadBalancer.id, listenerMap.values()) - - //delete load balancer - task.updateStatus BASE_PHASE, "Deleting load balancer $loadBalancer.id in $region ..." - provider.deleteLoadBalancer(region, loadBalancer.id) - task.updateStatus BASE_PHASE, "Waiting on delete of load balancer $loadBalancer.id in $region ..." - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.DELETE).execute { - provider.getLoadBalancer(region, loadBalancer.id) - } - task.updateStatus BASE_PHASE, "Deleted load balancer $loadBalancer.id in $region." - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/LoadBalancerChecker.java b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/LoadBalancerChecker.java deleted file mode 100644 index e5e43fdf0cd..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/LoadBalancerChecker.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2018 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer; - -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker; -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.LbaasConfig; -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException; -import org.openstack4j.model.common.ActionResponse; -import org.openstack4j.model.network.ext.LbProvisioningStatus; -import org.openstack4j.model.network.ext.LoadBalancerV2; - -class LoadBalancerChecker implements BlockingStatusChecker.StatusChecker { - Operation operation; - - enum Operation { - CREATE, - UPDATE, - DELETE - } - - LoadBalancerChecker(Operation operation) { - this.operation = operation; - } - - @Override - public boolean isReady(LoadBalancerV2 loadBalancer) { - if (loadBalancer == null) { - if (operation == Operation.DELETE) { - return true; - } - ActionResponse actionResponse = ActionResponse.actionFailed("Cannot get status for null loadbalancer", 404); - throw new OpenstackProviderException(actionResponse); - } - LbProvisioningStatus status = loadBalancer.getProvisioningStatus(); - if (status == LbProvisioningStatus.ERROR) { - String failureMessage = String.format("Error in load balancer provision: %s, %s", loadBalancer.getName(), loadBalancer.getId()); - ActionResponse actionResponse = ActionResponse.actionFailed(failureMessage, 500); - throw new OpenstackProviderException(actionResponse); - } - return status == LbProvisioningStatus.ACTIVE; - } - - static BlockingStatusChecker from(LbaasConfig lbaasConfig, Operation operation) { - LoadBalancerChecker checker = new LoadBalancerChecker(operation); - return BlockingStatusChecker.from(lbaasConfig.getPollTimeout(), lbaasConfig.getPollInterval(), checker); - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperation.groovy deleted file mode 100644 index 8ddd33e0422..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperation.groovy +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer - -import com.google.common.collect.Sets -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Algorithm -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Listener -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor -import com.netflix.spinnaker.clouddriver.openstack.task.TaskStatusAware -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.openstack4j.model.compute.FloatingIP -import org.openstack4j.model.network.NetFloatingIP -import org.openstack4j.model.network.Network -import org.openstack4j.model.network.Port -import org.openstack4j.model.network.ext.HealthMonitorV2 -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 - -class UpsertOpenstackLoadBalancerAtomicOperation extends AbstractOpenstackLoadBalancerAtomicOperation implements AtomicOperation, TaskStatusAware { - OpenstackLoadBalancerDescription description - - UpsertOpenstackLoadBalancerAtomicOperation(OpenstackLoadBalancerDescription description) { - super(description.credentials) - this.description = description - } - - /* - * Create: - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertLoadBalancer": { "region": "RegionOne", "account": "test", "name": "stack-test", "subnetId": "8802895b-c46f-4074-b494-0a992b38e8c5", "networkId": "bcfdcd2f-57ec-4153-b145-139c81fa698e", "algorithm": "ROUND_ROBIN", "securityGroups": ["3c213029-f4f1-46ad-823b-d27dead4bf3f"], "healthMonitor": { "type": "PING", "delay": 10, "timeout": 10, "maxRetries": 10 }, "listeners": [ { "externalPort": 80, "externalProtocol":"HTTP", "internalPort": 8181 }] } } ]' localhost:7002/openstack/ops - * - * Update: - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertLoadBalancer": { "region": "RegionOne", "account": "test", "id": "413910e0-ec00-448a-9427-228450c78bf0", "name": "stack-test", "subnetId": "8802895b-c46f-4074-b494-0a992b38e8c5", "networkId": "bcfdcd2f-57ec-4153-b145-139c81fa698e", "algorithm": "ROUND_ROBIN", "securityGroups": ["3c213029-f4f1-46ad-823b-d27dead4bf3f"], "healthMonitor": { "type": "PING", "delay": 10, "timeout": 10, "maxRetries": 10 }, "listeners": [ { "externalPort": 80, "externalProtocol":"HTTP", "internalPort": 8282 }] } } ]' localhost:7002/openstack/ops - */ - - @Override - Map operate(List priorOutputs) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Initializing upsert of load balancer ${description.id ?: description.name} in ${description.region}..." - - String region = description.region - LoadBalancerV2 resultLoadBalancer - - try { - if (!this.description.id) { - validatePeripherals(region, description.subnetId, description.networkId, description.securityGroups) - resultLoadBalancer = createLoadBalancer(region, description.name, description.subnetId) - } else { - resultLoadBalancer = provider.getLoadBalancer(region, description.id) - if (!resultLoadBalancer) { - throw new OpenstackResourceNotFoundException("Could not find load balancer: $description.id in region: $region") - } - checkPendingLoadBalancerState(resultLoadBalancer) - } - - Map existingListenerMap = buildListenerMap(region, resultLoadBalancer) - Map listenerMap = description.listeners.collectEntries([:]) { Listener current -> - [(getListenerKey(current.externalProtocol.name(), current.externalPort, current.internalPort)): current] - } - Map listenersToUpdate = [:] - Map listenersToAdd = [:] - - listenerMap.entrySet()?.each { Map.Entry entry -> - ListenerV2 foundListener = existingListenerMap.get(entry.key) - if (foundListener) { - listenersToUpdate.put(entry.key, foundListener) - } else { - listenersToAdd.put(entry.key, entry.value) - } - } - - Set listenersToDelete = Sets.difference(existingListenerMap.keySet(), Sets.union(listenersToAdd.keySet(), listenersToUpdate.keySet())) - - if (listenersToDelete) { - List deleteValues = existingListenerMap.findAll { listenersToDelete.contains(it.key) }.collect { - it.value - } - deleteLoadBalancerPeripherals(UPSERT_LOADBALANCER_PHASE, region, resultLoadBalancer.id, deleteValues) - } - - if (listenersToAdd) { - addListenersAndPools(region, resultLoadBalancer.id, description.name, description.algorithm, listenersToAdd, description.healthMonitor) - } - - if (listenersToUpdate) { - updateListenersAndPools(region, resultLoadBalancer.id, description.algorithm, listenersToUpdate.values(), description.healthMonitor) - } - - updateFloatingIp(region, description.networkId, resultLoadBalancer.vipPortId) - updateSecurityGroups(region, resultLoadBalancer.vipPortId, description.securityGroups) - - // Add members to newly created pools through an existing stack only - if (description.id && (!listenersToAdd.isEmpty() || !listenersToDelete.isEmpty())) { - updateServerGroup(UPSERT_LOADBALANCER_PHASE, region, resultLoadBalancer.id) - } - } catch (OpenstackProviderException ope) { - throw new OpenstackOperationException(AtomicOperations.UPSERT_LOAD_BALANCER, ope) - } - - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Done upserting load balancer ${resultLoadBalancer?.name} in ${region}" - return [(region): [id: resultLoadBalancer?.id]] - } - - /** - * Validates load balancer components subnet, network, and security groups are real. - * @param region - * @param subnetId - * @param networkId - * @param securityGroups - */ - protected void validatePeripherals(String region, String subnetId, String networkId, List securityGroups) { - if (!provider.getSubnet(region, subnetId)) { - throw new OpenstackResourceNotFoundException("Subnet provided is invalid ${subnetId}") - } - - if (networkId && !provider.getNetwork(region, networkId)) { - throw new OpenstackResourceNotFoundException("Network provided is invalid ${networkId}") - } - - securityGroups?.each { - if (!provider.getSecurityGroup(region, it)) { - throw new OpenstackResourceNotFoundException("Could not find securityGroup: $it in region: $region") - } - } - } - /** - * Creates a load balancer in given subnet. - * @param region - * @param name - * @param subnetId - * @return - */ - protected LoadBalancerV2 createLoadBalancer(String region, String name, String subnetId) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Creating load balancer $name in ${region} ..." - String createdTime = generateCreatedTime(System.currentTimeMillis()) - - LoadBalancerV2 result = provider.createLoadBalancer(region, name, createdTime, subnetId) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Waiting on creation of load balancer $name in ${region} ..." - result = LoadBalancerChecker.from(description.credentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.CREATE).execute { - provider.getLoadBalancer(region, result.id) - } - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Created load balancer $name in ${region}." - result - } - - /** - * Updates security groups to vip port associated to load balancer. - * @param region - * @param portId - * @param securityGroups - */ - protected void updateSecurityGroups(String region, String portId, List securityGroups) { - Port port = provider.getPort(region, portId) - if (securityGroups && port.securityGroups != securityGroups) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Updating port ${portId} with security groups ${securityGroups} in ${region}..." - provider.updatePort(region, portId, securityGroups) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Updated port ${portId} with security groups ${securityGroups} in ${region}." - } - } - - /** - * Adds/Removes floating ip address associated to load balancer. - * @param region - * @param networkId - * @param portId - */ - protected void updateFloatingIp(String region, String networkId, String portId) { - NetFloatingIP existingFloatingIp = provider.getFloatingIpForPort(region, portId) - if (networkId) { - Network network = provider.getNetwork(region, networkId) - FloatingIP ip = provider.getOrCreateFloatingIp(region, network.name) - if (!existingFloatingIp) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Associating floating IP ${ip.floatingIpAddress} with ${portId}..." - provider.associateFloatingIpToPort(region, ip.id, portId) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Associated floating IP ${ip.floatingIpAddress} with ${portId}." - } else { - if (networkId != existingFloatingIp.floatingNetworkId) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Disassociating ip ${existingFloatingIp.floatingIpAddress} and associating ip ${ip.floatingIpAddress} with vip ${portId}..." - provider.disassociateFloatingIpFromPort(region, existingFloatingIp.id) - provider.associateFloatingIpToPort(region, ip.id, portId) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Disassociated ip ${existingFloatingIp.id} and associated ip ${ip.floatingIpAddress} with vip ${portId}." - } - } - } else { - if (existingFloatingIp) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Disassociating ip ${existingFloatingIp.floatingIpAddress} with vip ${portId}..." - provider.disassociateFloatingIpFromPort(region, existingFloatingIp.id) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Disassociated ip ${existingFloatingIp.floatingIpAddress} with vip ${portId}." - } - } - } - - /** - * Adds listeners and pools to an existing load balancer. - * @param region - * @param loadBalancerId - * @param name - * @param listeners - * @return - */ - protected void addListenersAndPools(String region, String loadBalancerId, String name, Algorithm algorithm, Map listeners, HealthMonitor healthMonitor) { - listeners?.each { String key, Listener currentListener -> - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Creating listener $name in ${region}" - ListenerV2 listener = provider.createListener(region, name, currentListener.externalProtocol.name(), currentListener.externalPort, key, loadBalancerId) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Waiting on creation of listener $name in ${region}" - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Created listener $name in ${region}" - - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Creating pool $name in ${region}" - LbPoolV2 pool = provider.createPool(region, name, currentListener.externalProtocol.internalProtocol, algorithm.name(), listener.id) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Waiting on creation of pool $name in ${region}" - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Created pool $name in ${region}" - updateHealthMonitor(region, loadBalancerId, pool, healthMonitor) - } - } - - /** - * Updates existing load balancer listener and pool. - * @param region - * @param algorithm - * @param loadBalancerId - * @param listeners - */ - protected void updateListenersAndPools(String region, String loadBalancerId, Algorithm algorithm, Collection listeners, HealthMonitor healthMonitor) { - listeners?.each { ListenerV2 currentListener -> - LbPoolV2 lbPool = provider.getPool(region, currentListener.defaultPoolId) - if (lbPool.lbMethod.name() != algorithm.name()) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Updating pool $lbPool.name in ${region} ..." - provider.updatePool(region, lbPool.id, algorithm.name()) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "waiting on update for pool $lbPool.name in ${region} ..." - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Updated pool $lbPool.name in ${region}." - } - - updateHealthMonitor(region, loadBalancerId, lbPool, healthMonitor) - } - } - - /** - * Adds/Removes/Updates a health monitor to a given load balancer and pool. - * @param region - * @param loadBalancerId - * @param lbPool - */ - protected void updateHealthMonitor(String region, String loadBalancerId, LbPoolV2 lbPool, HealthMonitor healthMonitor) { - if (lbPool.healthMonitorId) { - if (healthMonitor) { - HealthMonitorV2 existingMonitor = provider.getMonitor(region, lbPool.healthMonitorId) - if (existingMonitor.type.name() == healthMonitor.type.name()) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Updating health monitor $lbPool.name in ${region} ..." - provider.updateMonitor(region, lbPool.healthMonitorId, healthMonitor) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Waiting on update to health monitor $lbPool.name in ${region} ..." - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Updated health monitor $lbPool.name in ${region}." - } else { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Removing existing monitor ${existingMonitor.id} and creating health monitor for ${lbPool.name} in ${region}..." - provider.deleteMonitor(region, lbPool.healthMonitorId) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Waiting on remove of existing monitor ${existingMonitor.id} in ${region}" - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Waiting on creattion of health monitor for ${lbPool.name} in ${region}..." - provider.createMonitor(region, lbPool.id, healthMonitor) - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Removed existing monitor ${existingMonitor.id} and created health monitor for ${lbPool.name} in ${region}." - } - } else { - removeHealthMonitor(UPSERT_LOADBALANCER_PHASE, region, loadBalancerId, lbPool.healthMonitorId) - } - } else { - if (healthMonitor) { - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Creating health monitor for pool $lbPool.name in ${region} ..." - provider.createMonitor(region, lbPool.id, healthMonitor) - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Waiting on creation of health monitor for pool $lbPool.name in ${region} ..." - LoadBalancerChecker.from(openstackCredentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(region, loadBalancerId) - } - task.updateStatus UPSERT_LOADBALANCER_PHASE, "Created health monitor for pool $lbPool.name in ${region}." - } - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/DeleteOpenstackSecurityGroupAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/DeleteOpenstackSecurityGroupAtomicOperation.groovy deleted file mode 100644 index 7788da00d9d..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/DeleteOpenstackSecurityGroupAtomicOperation.groovy +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.securitygroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.DeleteOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation - -/** - * Deletes an Openstack security group. - * - * Delete will fail in Openstack if the security group is associated to an instance. - */ -class DeleteOpenstackSecurityGroupAtomicOperation implements AtomicOperation { - - private final String BASE_PHASE = 'DELETE_SECURITY_GROUP' - DeleteOpenstackSecurityGroupDescription description - - DeleteOpenstackSecurityGroupAtomicOperation(DeleteOpenstackSecurityGroupDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /* - * Delete: - * curl -X POST -H "Content-Type: application/json" -d '[ { "deleteSecurityGroup": { "account": "test", "region": "west", "id": "ee411748-88b5-4825-a9d4-ec549d1a1276" } } ]' localhost:7002/openstack/ops - * Task status: - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - @Override - Void operate(List priorOutputs) { - task.updateStatus(BASE_PHASE, "Deleting security group ${description.id}") - - // TODO: Check if Openstack failure gives a decent error - description.credentials.provider.deleteSecurityGroup(description.region, description.id) - - task.updateStatus(BASE_PHASE, "Finished deleting security group ${description.id}") - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/UpsertOpenstackSecurityGroupAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/UpsertOpenstackSecurityGroupAtomicOperation.groovy deleted file mode 100644 index 96d52eb9dde..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/UpsertOpenstackSecurityGroupAtomicOperation.groovy +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.securitygroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.UpsertOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import groovy.util.logging.Slf4j -import org.apache.commons.lang.StringUtils -import org.openstack4j.model.compute.IPProtocol -import org.openstack4j.model.compute.SecGroupExtension - -/** - * Creates or updates an Openstack security group. - * - * Note that this can only manage ingress rules for the security groups. It appears that this is a limitation of the - * Openstack API itself. Egress rules can be created as part of default rules for security groups, but that needs to - * be managed in Openstack itself, not through Spinnaker. - */ -@Slf4j -class UpsertOpenstackSecurityGroupAtomicOperation implements AtomicOperation { - - private final String BASE_PHASE = 'UPSERT_SECURITY_GROUP' - static final String SELF_REFERENCIAL_RULE = 'SELF' - UpsertOpenstackSecurityGroupDescription description - - UpsertOpenstackSecurityGroupAtomicOperation(UpsertOpenstackSecurityGroupDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /* - * Create: - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertSecurityGroup": { "region": "west", "name": "sg-test-1", "description": "test", "account": "test", "rules": [ { "ruleType": "TCP", "fromPort": 80, "toPort": 90, "cidr": "0.0.0.0/0" } ] } } ]' localhost:7002/openstack/ops - * Update: - * curl -X POST -H "Content-Type: application/json" -d '[ { "upsertSecurityGroup": { "region": "west", "id": "e56fa7eb-550d-42d4-8d3f-f658fbacd496", "name": "sg-test-1", "description": "test", "account": "test", "rules": [ { "ruleType": "TCP", "fromPort": 80, "toPort": 90, "cidr": "0.0.0.0/0" } ] } } ]' localhost:7002/openstack/ops - * Task status: - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Upserting security group ${description.name} in region ${description.region}..." - - OpenstackClientProvider provider = description.credentials.provider - - try { - - // Try getting existing security group, update if needed - SecGroupExtension securityGroup - if (StringUtils.isNotEmpty(description.id)) { - task.updateStatus BASE_PHASE, "Looking up existing security group with id ${description.id}" - securityGroup = provider.getSecurityGroup(description.region, description.id) - if (!securityGroup) { - throw new OpenstackResourceNotFoundException("Could not find securityGroup: $description.id in region: $description.region") - } - task.updateStatus BASE_PHASE, "Updating security group with name ${description.name} and description '${description.description}'" - securityGroup = provider.updateSecurityGroup(description.region, description.id, description.name, description.description) - } else { - task.updateStatus BASE_PHASE, "Creating new security group with name ${description.name}" - securityGroup = provider.createSecurityGroup(description.region, description.name, description.description) - } - - // TODO: Find the different between existing rules and only apply that instead of deleting and re-creating all the rules - securityGroup.rules.each { rule -> - task.updateStatus BASE_PHASE, "Deleting rule ${rule.id}" - provider.deleteSecurityGroupRule(description.region, rule.id) - } - - description.rules.each { rule -> - task.updateStatus BASE_PHASE, "Creating rule for ${rule.cidr} from port ${rule.fromPort} to port ${rule.toPort}" - String remoteSecurityGroupId = rule.remoteSecurityGroupId == SELF_REFERENCIAL_RULE ? securityGroup.id : rule.remoteSecurityGroupId - provider.createSecurityGroupRule(description.region, - securityGroup.id, - IPProtocol.value(rule.ruleType), - rule.cidr, - remoteSecurityGroupId, - rule.fromPort, - rule.toPort, - rule.icmpType, - rule.icmpCode - ) - } - task.updateStatus BASE_PHASE, "Finished upserting security group ${description.name}." - } catch (OpenstackProviderException e) { - throw new OpenstackOperationException(AtomicOperations.UPSERT_SECURITY_GROUP, e) - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractEnableDisableOpenstackAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractEnableDisableOpenstackAtomicOperation.groovy deleted file mode 100644 index 5c0aa0041f6..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractEnableDisableOpenstackAtomicOperation.groovy +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.consul.deploy.ops.EnableDisableConsulInstance -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.EnableDisableAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer.LoadBalancerChecker -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import groovy.util.logging.Slf4j -import org.openstack4j.model.compute.Server -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.network.ext.LoadBalancerV2StatusTree -import retrofit.RetrofitError - -import java.util.concurrent.CompletableFuture -import java.util.concurrent.Future -import java.util.function.Supplier - -@Slf4j -abstract class AbstractEnableDisableOpenstackAtomicOperation implements AtomicOperation { - abstract boolean isDisable() - - abstract String getPhaseName() - - abstract String getOperation() - - static final int DEFAULT_WEIGHT = 1 - - EnableDisableAtomicOperationDescription description - - AbstractEnableDisableOpenstackAtomicOperation(EnableDisableAtomicOperationDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Override - Void operate(List priorOutputs) { - String verb = disable ? 'disable' : 'enable' - String gerund = disable ? 'Disabling' : 'Enabling' - - task.updateStatus phaseName, "Initializing $verb server group operation for $description.serverGroupName in $description.region..." - def credentials = description.credentials - - if (credentials.credentials.consulConfig?.enabled) { - task.updateStatus phaseName, "$gerund server group in Consul..." - - List instanceIds = provider.getInstanceIdsForStack(description.region, description.serverGroupName) - instanceIds.each { String instanceId -> - Server instance = provider.getServerInstance(description.region, instanceId) - if (!instance) { - throw new OpenstackResourceNotFoundException("Could not find server: $instanceId in region: $description.region") - } - try { - EnableDisableConsulInstance.operate(credentials.credentials.consulConfig, - instance.name, - disable - ? EnableDisableConsulInstance.State.disable - : EnableDisableConsulInstance.State.enable) - } catch (RetrofitError e) { - // Consul isn't running - log.warn(e.message) - } - } - } - - try { - task.updateStatus phaseName, "Getting stack details for $description.serverGroupName..." - List instanceIds = provider.getInstanceIdsForStack(description.region, description.serverGroupName) - if (instanceIds?.size() > 0) { - Stack stack = provider.getStack(description.region, description.serverGroupName) - if (!stack) { - throw new OpenstackResourceNotFoundException("Could not find stack $description.serverGroupName in region: $description.region") - } - if (stack.tags?.size() > 0) { - enableDisableLoadBalancerMembers(instanceIds, stack.tags) - task.updateStatus phaseName, "Done ${gerund.toLowerCase()} server group $description.serverGroupName in $description.region." - } else { - task.updateStatus phaseName, "Did not find any load balancers associated with $description.serverGroupName, nothing to do." - } - } else { - task.updateStatus phaseName, "Did not find any instances for $description.serverGroupName, nothing to do." - } - } catch (Exception e) { - throw new OpenstackOperationException(operation, e) - } - } - - void enableDisableLoadBalancerMembers(List instanceIds, List loadBalancerIds) { - String gerund = disable ? 'Disabling' : 'Enabling' - task.updateStatus phaseName, "$gerund instances in load balancers..." - Map> statusTrees = [:] - Map>> ips = instanceIds.collectEntries { instanceId -> - task.updateStatus phaseName, "Getting ip for instance $instanceId..." - [(instanceId): CompletableFuture.supplyAsync({ - provider.getIpsForInstance(description.region, instanceId).collect { it.addr } - } as Supplier).exceptionally { t -> - null - }] - } - loadBalancerIds.each { lbId -> - task.updateStatus phaseName, "Getting load balancer tree for $lbId..." - statusTrees << [(lbId): CompletableFuture.supplyAsync({ - provider.getLoadBalancerStatusTree(description.region, lbId) - } as Supplier)] - } - CompletableFuture.allOf([statusTrees.values(), ips.values()].flatten() as CompletableFuture[]).join() - for (String id : instanceIds) { - List ip = ips[(id)].get() - if (!ip) { - task.updateStatus phaseName, "Could not find floating ip for instance $id, continuing with next instance" - } else { - loadBalancerIds.each { lbId -> - LoadBalancerV2StatusTree status = statusTrees[(lbId)].get() - status.loadBalancerV2Status?.listenerStatuses?.each { listenerStatus -> - listenerStatus.lbPoolV2Statuses?.each { poolStatus -> - poolStatus.memberStatuses?.each { memberStatus -> - if (memberStatus.address && ip.contains(memberStatus.address)) { - task.updateStatus phaseName, "$gerund member instance $id with ip $memberStatus.address on load balancer $lbId with listener ${listenerStatus.id} and pool ${poolStatus.id}..." - provider.updatePoolMemberStatus(description.region, poolStatus.id, memberStatus.id, !disable) - task.updateStatus phaseName, "Waiting on $gerund member instance $id with ip $memberStatus.address on load balancer $lbId with listener ${listenerStatus.id} and pool ${poolStatus.id}..." - LoadBalancerChecker.from(description.credentials.credentials.lbaasConfig, LoadBalancerChecker.Operation.UPDATE).execute { - provider.getLoadBalancer(description.region, lbId) - } - } - } - } - } - } - } - } - task.updateStatus phaseName, "Finished deregistering instances from load balancers." - } - - OpenstackClientProvider getProvider() { - description.credentials.provider - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractStackUpdateOpenstackAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractStackUpdateOpenstackAtomicOperation.groovy deleted file mode 100644 index 3a3153763eb..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractStackUpdateOpenstackAtomicOperation.groovy +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import org.openstack4j.model.heat.Stack - -abstract class AbstractStackUpdateOpenstackAtomicOperation implements AtomicOperation { - - OpenstackServerGroupAtomicOperationDescription description - - AbstractStackUpdateOpenstackAtomicOperation(OpenstackServerGroupAtomicOperationDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /** - * Return the phase name. - * @return - */ - abstract String getPhaseName() - - /** - * Returm the operation. - * @return - */ - abstract String getOperation() - - /** - * Get the server group name to operate on. Defaults to what was passed in. - * If server group name was not passed in, you can override to find an alternate server group to work with. - * @return - */ - String getServerGroupName() { - description.serverGroupName - } - - /** - * Return the new parameters that you want to apply to the stack. - * Defaults to return existing parameters. - * @param stack - * @return - */ - ServerGroupParameters buildServerGroupParameters(Stack stack) { - ServerGroupParameters.fromParamsMap(stack.parameters) - } - - /** - * Defaults to noop. - * @param stack - */ - void preUpdate(Stack stack) { - } - - /** - * Defaults to noop. - * @param stack - */ - void postUpdate(Stack stack) { - } - - @Override - Void operate(List priorOutputs) { - try { - task.updateStatus phaseName, "Initializing $operation" - OpenstackClientProvider provider = description.credentials.provider - - //get stack from server group - String foundServerGroupName = serverGroupName - task.updateStatus phaseName, "Fetching server group $foundServerGroupName" - Stack stack = provider.getStack(description.region, foundServerGroupName) - if (!stack) { - throw new OpenstackResourceNotFoundException("Could not find stack $foundServerGroupName in region: $description.region") - } - - //pre update ops - preUpdate(stack) - - String resourceFileName = ServerGroupConstants.SUBTEMPLATE_FILE - - List> outputs = stack.outputs - String resourceSubtemplate = outputs.find { m -> m.get("output_key") == ServerGroupConstants.SUBTEMPLATE_OUTPUT }.get("output_value") - String memberTemplate = outputs.find { m -> m.get("output_key") == ServerGroupConstants.MEMBERTEMPLATE_OUTPUT }.get("output_value") - task.updateStatus phaseName, "Successfully fetched server group $foundServerGroupName" - - //get the current template from the stack - task.updateStatus phaseName, "Fetching current template for server group $foundServerGroupName" - String template = provider.getHeatTemplate(description.region, stack.name, stack.id) - task.updateStatus phaseName, "Successfully fetched current template for server group $foundServerGroupName" - - Map templateMap = [(resourceFileName): resourceSubtemplate] - if (memberTemplate) { - templateMap << [(ServerGroupConstants.MEMBERTEMPLATE_FILE): memberTemplate] - } - - //update stack - task.updateStatus phaseName, "Updating server group $stack.name" - provider.updateStack(description.region, stack.name, stack.id, template, templateMap, buildServerGroupParameters(stack), stack.tags) - - task.updateStatus phaseName, "Waiting on heat stack update status ${stack.name}..." - def config = description.credentials.credentials.stackConfig - StackChecker stackChecker = new StackChecker(StackChecker.Operation.UPDATE) - BlockingStatusChecker statusChecker = BlockingStatusChecker.from(config.pollTimeout, config.pollInterval, stackChecker) - statusChecker.execute { - provider.getStack(description.region, description.serverGroupName) - } - task.updateStatus phaseName, "Successfully updated server group $stack.name" - - //post update ops - postUpdate(stack) - - task.updateStatus phaseName, "Successfully completed $operation." - } catch (Exception e) { - throw new OpenstackOperationException(operation, e) - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/CloneOpenstackAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/CloneOpenstackAtomicOperation.groovy deleted file mode 100644 index a1a22ab94e0..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/CloneOpenstackAtomicOperation.groovy +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.CloneOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.openstack4j.model.heat.Stack - -class CloneOpenstackAtomicOperation implements AtomicOperation{ - private static final String BASE_PHASE = "CLONE_SERVER_GROUP" - - CloneOpenstackAtomicOperationDescription description - - CloneOpenstackAtomicOperation(CloneOpenstackAtomicOperationDescription description) { - this.description = description - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[{"cloneServerGroup": {"source": {"serverGroupName": "myapp-teststack-v000", "region": "RegionOne"}, "region": "RegionTwo", "account": "test"}}]' localhost:7002/openstack/ops - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - @Override - DeploymentResult operate (List priorOutputs) { - DeploymentResult deploymentResult - try { - DeployOpenstackAtomicOperationDescription newDescription = cloneAndOverrideDescription() - - task.updateStatus BASE_PHASE, "Initializing cloning of server group ${description.source.serverGroupName}" - - DeployOpenstackAtomicOperation deployer = new DeployOpenstackAtomicOperation(newDescription) - deploymentResult = deployer.operate(priorOutputs) - - task.updateStatus BASE_PHASE, "Finished cloning server group ${description.source.serverGroupName}" - } catch (Exception e) { - throw new OpenstackOperationException(AtomicOperations.CLONE_SERVER_GROUP, e) - } - deploymentResult - } - - DeployOpenstackAtomicOperationDescription cloneAndOverrideDescription() { - DeployOpenstackAtomicOperationDescription deployDescription = description.clone() - - task.updateStatus BASE_PHASE, "Reading ancestor stack name ${description.source.serverGroupName}" - - Stack ancestorStack = description.credentials.provider.getStack(description.source.region, description.source.serverGroupName) - if (!ancestorStack) { - throw new OpenstackOperationException(AtomicOperations.CLONE_SERVER_GROUP, "Source stack ${description.source.serverGroupName} does not exist") - } - ServerGroupParameters ancestorParams = ServerGroupParameters.fromParamsMap(ancestorStack.parameters) - Names ancestorNames = Names.parseName(description.source.serverGroupName) - - task.updateStatus BASE_PHASE, "Done reading ancestor stack name ${description.source.serverGroupName}" - - task.updateStatus BASE_PHASE, "Creating new server group description" - - deployDescription.application = description.application ?: ancestorNames.app - deployDescription.stack = description.stack ?: ancestorNames.stack - deployDescription.freeFormDetails = description.freeFormDetails ?: ancestorNames.detail - deployDescription.serverGroupParameters = description.serverGroupParameters ?: new ServerGroupParameters() - deployDescription.serverGroupParameters.with { - image = description.serverGroupParameters?.image ?: ancestorParams.image - instanceType = description.serverGroupParameters?.instanceType ?: ancestorParams.instanceType - maxSize = description.serverGroupParameters?.maxSize ?: ancestorParams.maxSize - minSize = description.serverGroupParameters?.minSize ?: ancestorParams.minSize - desiredSize = description.serverGroupParameters?.desiredSize ?: ancestorParams.desiredSize - subnetId = description.serverGroupParameters?.subnetId ?: ancestorParams.subnetId - loadBalancers = description.serverGroupParameters?.loadBalancers ?: ancestorParams.loadBalancers - securityGroups = description.serverGroupParameters?.securityGroups ?: ancestorParams.securityGroups - autoscalingType = description.serverGroupParameters?.autoscalingType ?: ancestorParams.autoscalingType - scaleup = description.serverGroupParameters?.scaleup ?: ancestorParams.scaleup - scaledown = description.serverGroupParameters?.scaledown ?: ancestorParams.scaledown - tags = description.serverGroupParameters?.tags ?: ancestorParams.tags - - // Lack of floatingNetworkId means to not set one, so can't pull this value from the ancestorParams - floatingNetworkId = description.serverGroupParameters?.floatingNetworkId - } - deployDescription.userDataType = description.userDataType ?: ancestorParams.sourceUserDataType - deployDescription.userData = description.userData ?: ancestorParams.sourceUserData - deployDescription.disableRollback = description.disableRollback ?: false - deployDescription.timeoutMins = description.timeoutMins ?: ancestorStack.timeoutMins - deployDescription.region = description.region - - task.updateStatus BASE_PHASE, "Finished creating new server group description" - - deployDescription - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DeployOpenstackAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DeployOpenstackAtomicOperation.groovy deleted file mode 100644 index fe539010e7c..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DeployOpenstackAtomicOperation.groovy +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Copyright 2016 The original authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.dataformat.yaml.YAMLFactory -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.OpenstackServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.MemberData -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.UserDataType -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.StackPoolMemberAware -import com.netflix.spinnaker.clouddriver.openstack.domain.LoadBalancerResolver -import com.netflix.spinnaker.clouddriver.openstack.task.TaskStatusAware -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.apache.commons.io.FileUtils -import org.apache.commons.io.IOUtils -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.network.Subnet - -import java.util.concurrent.ConcurrentHashMap - -/** - * For now, we want to provide 'the standard' way of being able to configure an autoscaling group in much the same way - * as it is done with other providers, albeit with the hardcoded templates. - * Later on we should consider adding in the feature to provide custom templates. - * - * Overriding the default via configuration is a good idea, as long as people do their diligence to honor - * the properties that the template can expect to be given to it. The Openstack API is finicky when properties - * are provided but not used, and doesn't work at all when properties are not provided but expected. - * - * Being able to pass in the template via free-form text is also a good idea, - * but again it would need to honor the expected parameters. - * We could use the freeform details field to store the template string. - */ -class DeployOpenstackAtomicOperation implements TaskStatusAware, AtomicOperation, StackPoolMemberAware, LoadBalancerResolver { - - private final String BASE_PHASE = "DEPLOY" - - DeployOpenstackAtomicOperationDescription description - - static final Map templateMap = new ConcurrentHashMap<>() - - private ObjectMapper objectMapper = new ObjectMapper(new YAMLFactory()) - - DeployOpenstackAtomicOperation(DeployOpenstackAtomicOperationDescription description) { - this.description = description - } - - /* - curl -X POST -H "Content-Type: application/json" -d '[{ - "createServerGroup": { - "stack": "teststack", - "application": "myapp", - "serverGroupParameters": { - "instanceType": "m1.medium", - "image": "4e0d0b4b-8089-4703-af99-b6a0c90fbbc7", - "maxSize": 5, - "minSize": 3, - "desiredSize": 4, - "subnetId": "77bb3aeb-c1e2-4ce5-8d8f-b8e9128af651", - "floatingNetworkId: "99bb3aeb-c1e2-4ce5-8d8f-b8e9128af699", - "loadBalancers": ["87077f97-83e7-4ea1-9ca9-40dc691846db"], - "securityGroups": ["e56fa7eb-550d-42d4-8d3f-f658fbacd496"], - "scaleup": { - "cooldown": 60, - "adjustment": 1, - "period": 60, - "threshold": 50 - }, - "scaledown": { - "cooldown": 60, - "adjustment": -1, - "period": 600, - "threshold": 15 - }, - "tags": { - "foo": "bar", - "bar": "foo" - } - }, - "userDataType": "URL", - "userData": "http://foobar.com", - "region": "REGION1", - "disableRollback": false, - "timeoutMins": 5, - "account": "test" - } - }]' localhost:7002/openstack/ops - */ - @Override - DeploymentResult operate(List priorOutputs) { - DeploymentResult deploymentResult = new DeploymentResult() - try { - task.updateStatus BASE_PHASE, "Initializing creation of server group..." - OpenstackClientProvider provider = description.credentials.provider - - def serverGroupNameResolver = new OpenstackServerGroupNameResolver(description.credentials, description.region) - def groupName = serverGroupNameResolver.combineAppStackDetail(description.application, description.stack, description.freeFormDetails) - - task.updateStatus BASE_PHASE, "Looking up next sequence index for cluster ${groupName}..." - def stackName = serverGroupNameResolver.resolveNextServerGroupName(description.application, description.stack, description.freeFormDetails, false) - task.updateStatus BASE_PHASE, "Heat stack name chosen to be ${stackName}." - - Map templates = [ - main: objectMapper.readValue(getTemplateFile(ServerGroupConstants.TEMPLATE_FILE), Map) - ] - - if (description.serverGroupParameters.floatingNetworkId) { - templates.main.parameters.floating_network_id = [type: "string", description: "Network used to allocate a floating IP for each server."] - templates.main.resources.servergroup.properties.resource.properties.floating_network_id = [get_param: "floating_network_id"] - - } - if (description.serverGroupParameters.loadBalancers && !description.serverGroupParameters.loadBalancers.isEmpty()) { - //look up all load balancer listeners -> pool ids and internal ports - task.updateStatus BASE_PHASE, "Getting load balancer details for load balancers $description.serverGroupParameters.loadBalancers..." - List memberDataList = buildMemberData(description.credentials, description.region, description.serverGroupParameters.subnetId, description.serverGroupParameters.loadBalancers, this.&parseListenerKey) - task.updateStatus BASE_PHASE, "Finished getting load balancer details for load balancers $description.serverGroupParameters.loadBalancers." - - templates[ServerGroupConstants.SUBTEMPLATE_FILE] = objectMapper.readValue(getTemplateFile(ServerGroupConstants.SUBTEMPLATE_FILE), Map) - //check for floating ip - if (description.serverGroupParameters.floatingNetworkId) { - templates[ServerGroupConstants.SUBTEMPLATE_FILE].parameters.floating_network_id = [type: "string", description: "Network used to allocate a floating IP for each server."] - templates[ServerGroupConstants.SUBTEMPLATE_FILE].resources.server_floating_ip = [ - type: "OS::Neutron::FloatingIP", - properties: [ - floating_network_id: [get_param: "floating_network_id"], - port_id: [get_attr: ["server", "addresses", [get_param: "network_id"], 0, "port"]] - ] - ] - } - - task.updateStatus BASE_PHASE, "Loading lbaas subtemplates..." - if (objectMapper.writeValueAsString(templates[ServerGroupConstants.SUBTEMPLATE_FILE]).contains(ServerGroupConstants.MEMBERTEMPLATE_FILE)) { - templates[ServerGroupConstants.MEMBERTEMPLATE_FILE] = buildPoolMemberTemplate(memberDataList) - } - task.updateStatus BASE_PHASE, "Finished loading lbaas templates." - } else { - task.updateStatus BASE_PHASE, "Loading subtemplates..." - - //check for floating ip - templates[ServerGroupConstants.SUBTEMPLATE_FILE] = objectMapper.readValue(getTemplateFile(ServerGroupConstants.SUBTEMPLATE_SERVER_FILE), Map) - if (description.serverGroupParameters.floatingNetworkId) { - templates[ServerGroupConstants.SUBTEMPLATE_FILE].parameters.floating_network_id = [type: "string", description: "Network used to allocate a floating IP for each server."] - templates[ServerGroupConstants.SUBTEMPLATE_FILE].resources.server_floating_ip = [ - type: "OS::Neutron::FloatingIP", - properties: [ - floating_network_id: [get_param: "floating_network_id"], - port_id: [get_attr: ["server", "addresses", [get_param: "network_id"], 0, "port"]] - ] - ] - } - task.updateStatus BASE_PHASE, "Finished loading templates." - } - - String subnetId = description.serverGroupParameters.subnetId - task.updateStatus BASE_PHASE, "Getting network id from subnet $subnetId..." - Subnet subnet = provider.getSubnet(description.region, subnetId) - task.updateStatus BASE_PHASE, "Found network id $subnet.networkId from subnet $subnetId." - - String userData = getUserData(provider, stackName) - - if (description.serverGroupParameters.zones) { - task.updateStatus BASE_PHASE, "Creating zone policy for ${description.serverGroupParameters.zones.size()} zones" - addZonePlacementPolicy(description.serverGroupParameters.zones, templates.main, templates[ServerGroupConstants.SUBTEMPLATE_FILE]) - } - - task.updateStatus BASE_PHASE, "Creating heat stack $stackName..." - ServerGroupParameters params = description.serverGroupParameters.identity { - it.networkId = subnet.networkId - it.rawUserData = userData - it.sourceUserDataType = description.userDataType - it.sourceUserData = description.userData - it - } - - def template = objectMapper.writeValueAsString(templates.main) - //drop the primary template and convert everything to string - def subtemplates = (Map) templates.findAll { it.key != "main"}.collectEntries {k, v -> [(k): objectMapper.writeValueAsString(v)]} - - provider.deploy(description.region, stackName, template, subtemplates, params, - description.disableRollback, description.timeoutMins, description.serverGroupParameters.loadBalancers) - - task.updateStatus BASE_PHASE, "Waiting on heat stack creation status $stackName..." - // create a status checker for the stack creation status - def config = description.credentials.credentials.stackConfig - StackChecker stackChecker = new StackChecker(StackChecker.Operation.CREATE) - BlockingStatusChecker statusChecker = BlockingStatusChecker.from(config.pollTimeout, config.pollInterval, stackChecker) - statusChecker.execute { - provider.getStack(description.region, stackName) - } - - task.updateStatus BASE_PHASE, "Finished creating heat stack $stackName." - - task.updateStatus BASE_PHASE, "Successfully created server group." - - deploymentResult.serverGroupNames = ["$description.region:$stackName".toString()] //stupid GString - deploymentResult.serverGroupNameByRegion = [(description.region): stackName] - } catch (Exception e) { - throw new OpenstackOperationException(AtomicOperations.CREATE_SERVER_GROUP, e) - } - deploymentResult - } - - String getUserData(OpenstackClientProvider provider, String serverGroupName) { - String customUserData = '' - if (description.userDataType && description.userData) { - if (UserDataType.fromString(description.userDataType) == UserDataType.URL) { - task.updateStatus BASE_PHASE, "Resolving user data from url $description.userData..." - customUserData = description.userData.toURL()?.text - } else if (UserDataType.fromString(description.userDataType) == UserDataType.SWIFT) { - String[] parts = description.userData.split(":") - if (parts?.length == 2) { - customUserData = provider.readSwiftObject(description.region, parts[0], parts[1]) - if (!customUserData) { - throw new OpenstackResourceNotFoundException("Failed to read the Swift object ${parts[0]}/${parts[1]} in region ${description.region}") - } - } - } else { - customUserData = description.userData - } - } - - String userData = description.credentials.userDataProvider.getUserData(serverGroupName, description.region, customUserData) - task.updateStatus BASE_PHASE, "Resolved user data." - userData - } - - /** - * Return the file contents of a template, either from the account config location or from the classpath. - * @param filename - * @return - */ - String getTemplateFile(String filename) { - Optional.ofNullable(templateMap.get(filename)).orElseGet { - String template - String tmplDir = description.credentials.credentials.heatTemplateLocation - if (tmplDir && new File("$tmplDir/${filename}").exists()) { - template = FileUtils.readFileToString(new File("$tmplDir/${filename}")) - } else { - template = IOUtils.toString(this.class.classLoader.getResourceAsStream(filename)) - } - templateMap.put(filename, template) - template ?: "" - } - } - - private static void addZonePlacementPolicy(List zones, Map mainTemplate, Map resourceTemplate) { - def placementList = zones.collect { zone -> - [ - name: zone, - weight: 100 - ] - } - mainTemplate.resources.zone_policy = [ - type: "OS::Senlin::Policy", - properties: [ - type: "senlin.policy.zone_placement", - version: "1.0", - properties: [ - regions: placementList - ] - ] - ] - mainTemplate.resources.zone_policy_group = [ - type: "OS::Nova::ServerGroup", - properties: [ - policies: [ - [ - get_resource: "zone_policy" - ] - ] - ] - ] - resourceTemplate.resources.server.properties.scheduler_hints = [ - group: [ - get_resource: "zone_policy_group" - ] - ] - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DestroyOpenstackAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DestroyOpenstackAtomicOperation.groovy deleted file mode 100644 index a5e25ee4a34..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DestroyOpenstackAtomicOperation.groovy +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import groovy.util.logging.Slf4j -import org.openstack4j.model.heat.Stack - -@Slf4j -class DestroyOpenstackAtomicOperation implements AtomicOperation { - private final String BASE_PHASE = "DESTROY" - OpenstackServerGroupAtomicOperationDescription description; - - DestroyOpenstackAtomicOperation(OpenstackServerGroupAtomicOperationDescription description) { - this.description = description - } - - protected static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "destroyServerGroup": { "serverGroupName": "drmaastestapp-drmaasteststack-v000", "region": "region", "account": "test" }} ]' localhost:7002/openstack/ops - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - @Override - Void operate(List priorOutputs) { - try { - OpenstackClientProvider provider = description.credentials.provider - - task.updateStatus BASE_PHASE, "Initializing destruction of server group" - - task.updateStatus BASE_PHASE, "Looking up heat stack ${description.serverGroupName}..." - Stack stack = provider.getStack(description.region, description.serverGroupName) - if (!stack) { - throw new OpenstackResourceNotFoundException("Could not find stack $description.serverGroupName in region: $description.region") - } - task.updateStatus BASE_PHASE, "Found heat stack ${description.serverGroupName}..." - - task.updateStatus BASE_PHASE, "Destroying heat stack ${stack.name} with id ${stack.id}..." - - provider.destroy(description.region, stack) - - task.updateStatus BASE_PHASE, "Waiting on heat stack deletion status ${description.serverGroupName}..." - def config = description.credentials.credentials.stackConfig - StackChecker stackChecker = new StackChecker(StackChecker.Operation.DELETE) - BlockingStatusChecker statusChecker = BlockingStatusChecker.from(config.pollTimeout, config.pollInterval, stackChecker) - statusChecker.execute { - provider.getStack(description.region, description.serverGroupName) - } - - task.updateStatus BASE_PHASE, "Destroyed heat stack ${stack.name} with id ${stack.id}..." - - task.updateStatus BASE_PHASE, "Successfully destroyed server group" - } catch (OpenstackProviderException e) { - throw new OpenstackOperationException("Failed to destroy server group $description.serverGroupName in region $description.region", e) - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DisableOpenstackAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DisableOpenstackAtomicOperation.groovy deleted file mode 100644 index 81f7dc57b40..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DisableOpenstackAtomicOperation.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.EnableDisableAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations - -/** - * curl -X POST -H "Content-Type: application/json" -d '[ { "disableServerGroup": { "serverGroupName": "myapp-teststack-v006", "region": "RegionOne", "account": "test" }} ]' localhost:7002/openstack/ops - */ -class DisableOpenstackAtomicOperation extends AbstractEnableDisableOpenstackAtomicOperation { - final String phaseName = "DISABLE_SERVER_GROUP" - final String operation = AtomicOperations.DISABLE_SERVER_GROUP - DisableOpenstackAtomicOperation(EnableDisableAtomicOperationDescription description) { - super(description) - } - - @Override - boolean isDisable() { - true - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/EnableOpenstackAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/EnableOpenstackAtomicOperation.groovy deleted file mode 100644 index 6ccb6e485a3..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/EnableOpenstackAtomicOperation.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.EnableDisableAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations - -/** - * curl -X POST -H "Content-Type: application/json" -d '[ { "enableServerGroup": { "serverGroupName": "myapp-test-v000", "region": "RegionOne", "account": "test" }} ]' localhost:7002/openstack/ops - */ -class EnableOpenstackAtomicOperation extends AbstractEnableDisableOpenstackAtomicOperation { - final String phaseName = "ENABLE_SERVER_GROUP" - final String operation = AtomicOperations.ENABLE_SERVER_GROUP - EnableOpenstackAtomicOperation(EnableDisableAtomicOperationDescription description) { - super(description) - } - - @Override - boolean isDisable() { - false - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ResizeOpenstackAtomicOperation.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ResizeOpenstackAtomicOperation.groovy deleted file mode 100644 index 57c8f9fd44b..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ResizeOpenstackAtomicOperation.groovy +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ResizeOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.openstack4j.model.heat.Stack - -class ResizeOpenstackAtomicOperation extends AbstractStackUpdateOpenstackAtomicOperation { - - final String phaseName = "RESIZE" - - final String operation = AtomicOperations.RESIZE_SERVER_GROUP - - ResizeOpenstackAtomicOperation(ResizeOpenstackAtomicOperationDescription description) { - super(description) - } - - /* - * curl -X POST -H "Content-Type: application/json" -d '[ { "resizeServerGroup": { "serverGroupName": "myapp-teststack-v000", "capacity": { "min": 1, "desired": 2, "max": 3 }, "account": "test", "region": "REGION1" }} ]' localhost:7002/openstack/ops - * curl -X GET -H "Accept: application/json" localhost:7002/task/1 - */ - - @Override - ServerGroupParameters buildServerGroupParameters(Stack stack) { - ServerGroupParameters params = ServerGroupParameters.fromParamsMap(stack.parameters) - ServerGroupParameters newParams = params.clone() - newParams.with { - minSize = description.capacity.min - maxSize = description.capacity.max - desiredSize = description.capacity.desired - it - } - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ServerGroupConstants.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ServerGroupConstants.groovy deleted file mode 100644 index 4596a332878..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ServerGroupConstants.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -class ServerGroupConstants { - public static final String HEAT_SERVER_RESOURCE = "OS::Nova::Server" - public static final String HEAT_ASG_RESOURCE = "OS::Heat::AutoScalingGroup" - final static String SERVERGROUP_RESOURCE_NAME = 'servergroup' - - final static String SUBTEMPLATE_OUTPUT = 'servergroup_resource' - final static String MEMBERTEMPLATE_OUTPUT = 'servergroup_resource_member' - - //this is the file name of the heat template used to create the auto scaling group, - //and needs to be loaded into memory as a String - final static String TEMPLATE_FILE = 'servergroup.yaml' - - //this is the name of the subtemplate referenced by the template, - //and needs to be loaded into memory as a String - final static String SUBTEMPLATE_SERVER_FILE = "servergroup_server.yaml" - - //this is the name of the subtemplate referenced by the template, - //and needs to be loaded into memory as a String - final static String SUBTEMPLATE_FILE = "${SUBTEMPLATE_OUTPUT}.yaml".toString() - - //this is the name of the member template referenced by the subtemplate, - //and is contructed on the fly - final static String MEMBERTEMPLATE_FILE = "${MEMBERTEMPLATE_OUTPUT}.yaml".toString() - - // Only used for backward compatibility when interacting with heat stacks from before clouddriver v1.772.0 - final static String LEGACY_RESOURCE_FILENAME_KEY = 'resource_filename' -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/StackChecker.java b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/StackChecker.java deleted file mode 100644 index 1b64b79e1d0..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/StackChecker.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2018 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup; - -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker; -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException; -import org.openstack4j.model.common.ActionResponse; -import org.openstack4j.model.heat.Stack; - -/** - * This class checks if an OpenStack stack is in a ready state. - */ -public class StackChecker implements BlockingStatusChecker.StatusChecker { - Operation operation; - - enum Operation { - CREATE, - UPDATE, - DELETE - } - - StackChecker (Operation operation) { - this.operation = operation; - } - - @Override - public boolean isReady(Stack stack) { - if (stack == null) { - if (operation == Operation.DELETE) { - return true; - } - ActionResponse actionResponse = ActionResponse.actionFailed("Cannot get state for null stack", 404); - throw new OpenstackProviderException(actionResponse); - } - - String status = stack.getStatus(); - String operationString = operation.toString(); - if ((operationString + "_IN_PROGRESS").equals(status)) { - return false; - } else if ((operationString + "_FAILED").equals(status)) { - String message = String.format("Failed to %s stack %s: %s", operation.toString().toLowerCase(), stack.getName(), stack.getStackStatusReason()); - ActionResponse actionResponse = ActionResponse.actionFailed(message, 500); - throw new OpenstackProviderException(actionResponse); - } else if ((operationString + "_COMPLETE").equals(status)) { - return true; - } else { - String message = String.format("Unknown status for stack %s: %s %s", stack.getName(), stack.getStatus(), stack.getStackStatusReason()); - ActionResponse actionResponse = ActionResponse.actionFailed(message, 500); - throw new OpenstackProviderException(actionResponse); - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/AbstractOpenstackDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/AbstractOpenstackDescriptionValidator.groovy deleted file mode 100644 index 54eb23d4637..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/AbstractOpenstackDescriptionValidator.groovy +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators - -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.validation.Errors - -/** - * This class serves as base class for all openstack atomic operation validators. - * It validates region and account information, which are common to all atomic operations. - * @param - */ -abstract class AbstractOpenstackDescriptionValidator extends DescriptionValidator { - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Override - void validate(List priorDescriptions, T description, Errors errors) { - def validator = new OpenstackAttributeValidator(context, errors) - if (!validator.validateCredentials(description.account, accountCredentialsProvider)) { - return - } - if (!validator.validateRegion(description.region, description.credentials.provider)) { - return - } - validate(validator, priorDescriptions, description, errors) - } - - /** - * Subclasses will implement this to provide operation-specific validation - * @param validator - * @param priorDescriptions - * @param description - * @param errors - */ - abstract void validate(OpenstackAttributeValidator validator, List priorDescriptions, T description, Errors errors) - - /** - * String description of this validation - * @return - */ - abstract String getContext() -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackAttributeValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackAttributeValidator.groovy deleted file mode 100644 index ef82db47893..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackAttributeValidator.groovy +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.UpsertOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.apache.commons.net.util.SubnetUtils -import org.openstack4j.model.compute.IPProtocol -import org.springframework.http.HttpMethod -import org.springframework.http.HttpStatus -import org.springframework.validation.Errors - -import static UpsertOpenstackSecurityGroupDescription.Rule - -/** - * TODO most of the validate methods can be moved into base class, - * since other drivers are doing the same thing. - */ -class OpenstackAttributeValidator { - static final namePattern = /^[a-z0-9]+([-a-z0-9]*[a-z0-9])?$/ - static final prefixPattern = /^[a-z0-9]+$/ - static final int MIN_PORT = -1 - static final int MAX_PORT = (1 << 16) - 1 - - String context - Errors errors - - OpenstackAttributeValidator(String context, Errors errors) { - this.context = context - this.errors = errors - } - - boolean validateByRegex(String value, String attribute, String regex) { - def result - if (value ==~ regex) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid (Must match ${regex})") - result = false - } - result - } - - boolean validateByContainment(Object value, String attribute, List list) { - def result - if (list.contains(value)) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid (Must be one of $list)") - result = false - } - result - } - - void reject(String attribute, String reason) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid ($reason)") - } - - def validateRange(Integer value, int min, int max, String attribute) { - def result = validateNotEmpty(value, attribute) - if (result) { - result = value >= min && value <= max - if (!result) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notInRange (Must be in range [${min}, ${max}])") - } - } - result - } - - def validatePortRange(Integer value, String attribute) { - validateRange(value, MIN_PORT, MAX_PORT, attribute) - } - - boolean validateNotNull(Object obj, String attribute) { - boolean result = true - if (!obj) { - result = false - reject(attribute, 'null') - } - result - } - - boolean validateNotEmpty(Object value, String attribute) { - def result - if (value != "" && value != null && value != []) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.empty") - result = false - } - result - } - - boolean validateNotEmpty(List value, String attribute) { - def result - if (value != null && value.size() > 0) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.empty") - result = false - } - result - } - - boolean validateNonNegative(Integer value, String attribute) { - def result - if (value != null && value >= 0) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.negative") - result = false - } - result - } - - boolean validatePositive(Integer value, String attribute) { - def result - if (value != null && value > 0) { - result = true - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notPositive") - result = false - } - result - } - - boolean validateGreaterThan(Integer subject, Integer other, String attribute) { - def result - if (subject != null && other != null && subject > other) { - result = true - } - else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notGreaterThan") - result = false - } - result - } - - boolean validateGreaterThanEqual(Integer subject, Integer other, String attribute) { - def result - if (subject != null && other != null && subject >= other) { - result = true - } - else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notGreaterThan") - result = false - } - result - } - - boolean validateLessThanEqual(Integer subject, Integer other, String attribute) { - def result - if (subject != null && other != null && subject <= other) { - result = true - } - else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notLessThan") - result = false - } - result - } - - def validateApplication(String value, String attribute) { - if (validateNotEmpty(value, attribute)) { - return validateByRegex(value, attribute, prefixPattern) - } else { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalid (Must match ${prefixPattern})") - return false - } - } - - def validateStack(String value, String attribute) { - // Stack is optional - if (!value) { - return true - } else { - return validateByRegex(value, attribute, prefixPattern) - } - } - - def validateDetails(String value, String attribute) { - // Details are optional. - if (!value) { - return true - } else { - return validateByRegex(value, attribute, namePattern) - } - } - - /** - * Validate credentials. - * @param credentials - * @param accountCredentialsProvider - * @return - */ - def validateCredentials(String account, AccountCredentialsProvider accountCredentialsProvider) { - def result = validateNotEmpty(account, "account") - if (result) { - def openstackCredentials = accountCredentialsProvider.getCredentials(account) - if (!(openstackCredentials?.credentials instanceof OpenstackCredentials)) { - errors.rejectValue("${context}.account", "${context}.account.notFound") - result = false - } - } - result - } - - /** - * Validate string is in UUID format. - * @param value - * @param attribute - * @return - */ - def validateUUID(String value, String attribute) { - boolean result = validateNotEmpty(value, attribute) - if (result) { - try { - UUID.fromString(value) - result = true - } catch (IllegalArgumentException e) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.notUUID") - result = false - } - } - result - } - - /** - * Validate string is in CIDR format. - * @param value - * @param attribute - * @return - */ - def validateCIDR(String value, String attribute) { - boolean result = validateNotEmpty(value, attribute) - if (result) { - try { - new SubnetUtils(value) - } catch (IllegalArgumentException e) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalidCIDR") - result = false - } - } - result - } - - /** - * Validates a security rule type. Should be TCP, UDP, or ICMP. - */ - def validateRuleType(String value, String attribute) { - boolean result = validateNotEmpty(value, attribute) - if (result) { - def type = IPProtocol.value(value) - if (type == IPProtocol.UNRECOGNIZED) { - errors.rejectValue("${context}.${attribute}", "${context}.${attribute}.invalidSecurityGroupRuleType") - result = false - } - } - result - } - - /** - * Validate integer is an HTTP status code - * @param value - * @param attribute - * @return - */ - def validateHttpStatusCode(Integer value, String attribute) { - boolean result = validateNotEmpty(value, attribute) - if (result) { - try { - HttpStatus.valueOf(value) - } catch (IllegalArgumentException e) { - reject(attribute, 'invalid Http Status Code') - result = false - } - } - result - } - - /** - * Validate string is an HTTP method - * @param value - * @param attribute - * @return - */ - def validateHttpMethod(String value, String attribute) { - boolean result = validateNotEmpty(value, attribute) - if (result) { - try { - HttpMethod.valueOf(value) - } catch (IllegalArgumentException e) { - reject(attribute, 'invalid Http Method') - result = false - } - } - result - } - - /** - * Validate string is a URI - * @param value - * @param attribute - * @return - */ - def validateURI(String value, String attribute) { - boolean result = validateNotEmpty(value, attribute) - - try { - new URI(value) - } catch (MalformedURLException | URISyntaxException | IllegalArgumentException e) { - result = false - reject(attribute, 'invalid URL') - } - result - } - - /** - * Validate the region - * @param region - * @param credentials - * @return - */ - def validateRegion(String region, OpenstackClientProvider provider) { - boolean result = validateNotEmpty(region, 'region') - if (result) { - result = provider?.allRegions?.contains(region) - if (!result) { - reject('region', 'invalid region') - } - } - result - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidator.groovy deleted file mode 100644 index 2ed9c2d9fed..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidator.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.discovery - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import org.springframework.validation.Errors - -abstract class AbstractEnableDisableInstancesInDiscoveryDescriptionValidator extends AbstractOpenstackDescriptionValidator { - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, OpenstackInstancesDescription description, Errors errors) { - validator.validateNotEmpty(description.instanceIds, 'instanceIds') - validator.validateRegion(description.region, description.credentials.provider) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/DisableInstancesInDiscoveryDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/DisableInstancesInDiscoveryDescriptionValidator.groovy deleted file mode 100644 index 2313f9e9534..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/DisableInstancesInDiscoveryDescriptionValidator.groovy +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.discovery - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations - -@OpenstackOperation(AtomicOperations.DISABLE_INSTANCES_IN_DISCOVERY) -class DisableInstancesInDiscoveryDescriptionValidator extends AbstractEnableDisableInstancesInDiscoveryDescriptionValidator { - String context = 'disableInstancesInDiscoveryDescriptionValidator' -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/EnableInstancesInDiscoveryDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/EnableInstancesInDiscoveryDescriptionValidator.groovy deleted file mode 100644 index 49256133fd5..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/EnableInstancesInDiscoveryDescriptionValidator.groovy +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.discovery - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations - -@OpenstackOperation(AtomicOperations.ENABLE_INSTANCES_IN_DISCOVERY) -class EnableInstancesInDiscoveryDescriptionValidator extends AbstractEnableDisableInstancesInDiscoveryDescriptionValidator { - String context = 'enableInstancesInDiscoveryDescriptionValidator' -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/AbstractRegistrationOpenstackInstancesDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/AbstractRegistrationOpenstackInstancesDescriptionValidator.groovy deleted file mode 100644 index 14fc08ac5a4..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/AbstractRegistrationOpenstackInstancesDescriptionValidator.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesRegistrationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import org.springframework.validation.Errors - -abstract class AbstractRegistrationOpenstackInstancesDescriptionValidator extends AbstractOpenstackDescriptionValidator { - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, OpenstackInstancesRegistrationDescription description, Errors errors) { - validator.validateNotEmpty(description.instanceIds, "instanceIds") - validator.validateNotEmpty(description.loadBalancerIds, "loadBalancerIds") - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/DeregisterOpenstackInstancesDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/DeregisterOpenstackInstancesDescriptionValidator.groovy deleted file mode 100644 index dbbe5b581c7..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/DeregisterOpenstackInstancesDescriptionValidator.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.DEREGISTER_INSTANCES_FROM_LOAD_BALANCER) -@Component -class DeregisterOpenstackInstancesDescriptionValidator extends AbstractRegistrationOpenstackInstancesDescriptionValidator { - String context = "deregisterOpenstackRegistrationAtomicOperationDescription" -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RebootOpenstackInstancesDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RebootOpenstackInstancesDescriptionValidator.groovy deleted file mode 100644 index 51bf66ddf0d..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RebootOpenstackInstancesDescriptionValidator.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.REBOOT_INSTANCES) -@Component -class RebootOpenstackInstancesDescriptionValidator extends AbstractOpenstackDescriptionValidator { - - String context = "rebootOpenstackRegistrationAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, OpenstackInstancesDescription description, Errors errors) { - validator.validateNotEmpty(description.instanceIds, "instanceIds") - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RegisterOpenstackInstancesDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RegisterOpenstackInstancesDescriptionValidator.groovy deleted file mode 100644 index 2fc5925ff73..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RegisterOpenstackInstancesDescriptionValidator.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component - -@OpenstackOperation(AtomicOperations.REGISTER_INSTANCES_WITH_LOAD_BALANCER) -@Component -class RegisterOpenstackInstancesDescriptionValidator extends AbstractRegistrationOpenstackInstancesDescriptionValidator { - String context = "registerOpenstackRegistrationAtomicOperationDescription" -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/TerminateOpenstackInstancesDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/TerminateOpenstackInstancesDescriptionValidator.groovy deleted file mode 100644 index 290a10e14ca..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/TerminateOpenstackInstancesDescriptionValidator.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.TERMINATE_INSTANCES) -@Component -class TerminateOpenstackInstancesDescriptionValidator extends AbstractOpenstackDescriptionValidator { - - String context = "terminateOpenstackRegistrationAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, OpenstackInstancesDescription description, Errors errors) { - validator.validateNotEmpty(description.instanceIds, "instanceIds") - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/DeleteOpenstackLoadBalancerDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/DeleteOpenstackLoadBalancerDescriptionValidator.groovy deleted file mode 100644 index 8984220980a..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/DeleteOpenstackLoadBalancerDescriptionValidator.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.DeleteOpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.DELETE_LOAD_BALANCER) -@Component -class DeleteOpenstackLoadBalancerDescriptionValidator extends AbstractOpenstackDescriptionValidator { - - String context = "deleteOpenstackLoadBalancerAtomicOperationDescription" - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, DeleteOpenstackLoadBalancerDescription description, Errors errors) { - validator.validateUUID(description.id, 'id') - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationValidator.groovy deleted file mode 100644 index acbb98e4056..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationValidator.groovy +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - - -@OpenstackOperation(AtomicOperations.UPSERT_LOAD_BALANCER) -@Component -class UpsertOpenstackLoadBalancerAtomicOperationValidator extends AbstractOpenstackDescriptionValidator { - - String context = "upsertOpenstackLoadBalancerAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, OpenstackLoadBalancerDescription description, Errors errors) { - if (description.id) { - validator.validateUUID(description.id, 'id') - } - - validator.validateNotEmpty(description.name, 'name') - validator.validateUUID(description.subnetId, 'subnetId') - validator.validateNotEmpty(description.algorithm, 'algorithm') - - if (description.networkId) { - validator.validateUUID(description.networkId, 'networkId') - } - - description.securityGroups.each { - validator.validateUUID(it, 'securityGroup') - } - - validator.validateNotEmpty(description.listeners, 'listeners') - description.listeners.each { - validator.validatePortRange(it.externalPort, 'externalPort') - validator.validatePortRange(it.internalPort, 'internalPort') - validator.validateNotNull(it.externalProtocol, 'externalProtocol') - } - - validateHealthMonitor(validator, description.healthMonitor) - } - - /** - * Helper method to validate load balancer - * @param validator - * @param healthMonitor - */ - protected void validateHealthMonitor(OpenstackAttributeValidator validator, HealthMonitor healthMonitor) { - if (healthMonitor) { - validator.validateNotNull(healthMonitor.type, 'type') - validator.validatePositive(healthMonitor.delay, 'delay') - validator.validatePositive(healthMonitor.timeout, 'timeout') - validator.validatePositive(healthMonitor.maxRetries, 'maxRetries') - if (healthMonitor.httpMethod) { - validator.validateHttpMethod(healthMonitor.httpMethod, 'httpMethod') - } - if (healthMonitor.expectedCodes) { - healthMonitor.expectedCodes.each { - validator.validateHttpStatusCode(it, 'expectedCodes') - } - } - if (healthMonitor.url) { - validator.validateURI(healthMonitor.url, 'url') - } - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/DeleteOpenstackSecurityGroupDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/DeleteOpenstackSecurityGroupDescriptionValidator.groovy deleted file mode 100644 index 23b32da7b75..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/DeleteOpenstackSecurityGroupDescriptionValidator.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.securitygroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.DeleteOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -/** - * Validates the delete security group operation description. - */ -@OpenstackOperation(AtomicOperations.DELETE_SECURITY_GROUP) -@Component -class DeleteOpenstackSecurityGroupDescriptionValidator extends AbstractOpenstackDescriptionValidator { - - String context = "deleteOpenstackSecurityGroupAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, DeleteOpenstackSecurityGroupDescription description, Errors errors) { - validator.validateUUID(description.id, "id") - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/UpsertOpenstackSecurityGroupDescriptionValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/UpsertOpenstackSecurityGroupDescriptionValidator.groovy deleted file mode 100644 index d4d430a491c..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/UpsertOpenstackSecurityGroupDescriptionValidator.groovy +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.securitygroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.UpsertOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.securitygroup.UpsertOpenstackSecurityGroupAtomicOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.apache.commons.lang.StringUtils -import org.openstack4j.model.compute.IPProtocol -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -/** - * Validates the upsert security group operation description. - */ -@OpenstackOperation(AtomicOperations.UPSERT_SECURITY_GROUP) -@Component -class UpsertOpenstackSecurityGroupDescriptionValidator extends AbstractOpenstackDescriptionValidator { - - static final int MIN_PORT = -1 - static final int MAX_PORT = (1 << 16) - 1 - static final int ICMP_MIN = -1 - static final int ICMP_MAX = 255 - - String context = "upsertOpenstackSecurityGroupAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, UpsertOpenstackSecurityGroupDescription description, Errors errors) { - if (StringUtils.isNotEmpty(description.id)) { - validator.validateUUID(description.id, 'id') - } - - if (!description.rules?.isEmpty()) { - description.rules.each { r -> - validator.validateRuleType(r.ruleType, 'ruleType') - - // Either the remote security group id or cidr must be provided - if (r.remoteSecurityGroupId) { - /* Remote Security Group ID can be either 'self' or a UUID. Self means the rule references the security group - * being upserted. Since the ID may not exist yet, a place holder of 'self' is used. - */ - if (r.remoteSecurityGroupId != UpsertOpenstackSecurityGroupAtomicOperation.SELF_REFERENCIAL_RULE) { - validator.validateUUID(r.remoteSecurityGroupId, 'remoteSecurityGroupId') - } - } else { - validator.validateCIDR(r.cidr, 'cidr') - } - - if (IPProtocol.value(r.ruleType) == IPProtocol.ICMP) { - validator.validateRange(r.icmpCode, ICMP_MIN, ICMP_MAX, 'icmpCode') - validator.validateRange(r.icmpType, ICMP_MIN, ICMP_MAX, 'icmpType') - } else { - validator.validateRange(r.fromPort, MIN_PORT, MAX_PORT, 'fromPort') - validator.validateRange(r.toPort, MIN_PORT, MAX_PORT, 'toPort') - } - } - } - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/AbstractServergroupOpenstackAtomicOperationValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/AbstractServergroupOpenstackAtomicOperationValidator.groovy deleted file mode 100644 index eba7026b188..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/AbstractServergroupOpenstackAtomicOperationValidator.groovy +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters.Scaler -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.UserDataType -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator - -/** - * This class adds validation for creating and cloning server groups. - */ -abstract class AbstractServergroupOpenstackAtomicOperationValidator extends AbstractOpenstackDescriptionValidator { - - final String prefix = "serverGroupParameters" - final String scaleupPrefix = "scaleup" - final String scaledownPrefix = "scaledown" - - /** - * Validate server group parameters. - * @param validator - * @param parameters - * @return - */ - def validateServerGroup(OpenstackAttributeValidator validator, ServerGroupParameters parameters) { - parameters.with { - validator.validateNotEmpty(instanceType, "${prefix}.instanceType") - validator.validateNotEmpty(image, "${prefix}.image") - validator.validateNonNegative(minSize, "${prefix}.minSize") - validator.validateGreaterThanEqual(desiredSize, minSize, "${prefix}.desiredSize") - validator.validateGreaterThanEqual(maxSize, desiredSize, "${prefix}.maxSize") - validator.validateNotEmpty(subnetId, "${prefix}.subnetId") - validator.validateNotEmpty(securityGroups, "${prefix}.securityGroups") - int maxAdjustment = (maxSize && minSize) ? maxSize - minSize : 0 - [(scaleupPrefix):scaleup, (scaledownPrefix):scaledown].each { e -> validateScaler(validator, maxAdjustment, e.key, e.value) } - } - } - - def validateScaler(OpenstackAttributeValidator validator, int maxAdjustment, String type, Scaler scaler) { - scaler?.with { - if (adjustment) { - validator.validateLessThanEqual(Math.abs(adjustment), maxAdjustment, "${prefix}.${type}.adjustment") - if (scaleupPrefix == type) validator.validateGreaterThanEqual(adjustment, 0, "${prefix}.${type}.adjustment") - if (scaledownPrefix == type) validator.validateLessThanEqual(adjustment, 0, "${prefix}.${type}.adjustment") - } - if (cooldown) validator.validatePositive(cooldown, "${prefix}.${type}.cooldown") - if (period) validator.validatePositive(period, "${prefix}.${type}.period") - if (threshold) validator.validatePositive(threshold, "${prefix}.${type}.threshold") - } - } - - def validateUserData(OpenstackAttributeValidator validator, String userDataType, String userData) { - if (userDataType && userData) { - validator.validateByContainment(UserDataType.fromString(userDataType), "userDataType", UserDataType.values().toList()) - if (UserDataType.URL == UserDataType.fromString(userDataType)) { - validator.validateURI(userData, "userData") - } else if (UserDataType.SWIFT == UserDataType.fromString(userDataType)) { - validator.validateByRegex(userData, "userData", "(.+:.+)") - } - } - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/CloneOpenstackAtomicOperationValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/CloneOpenstackAtomicOperationValidator.groovy deleted file mode 100644 index 1134728f675..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/CloneOpenstackAtomicOperationValidator.groovy +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.CloneOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.CLONE_SERVER_GROUP) -@Component -class CloneOpenstackAtomicOperationValidator extends AbstractServergroupOpenstackAtomicOperationValidator { - - String context = "cloneOpenstackAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, CloneOpenstackAtomicOperationDescription description, Errors errors) { - - if (!validator.validateNotNull(description.source, "source")) { - return - } - validator.validateNotEmpty(description.source.serverGroupName, "serverGroupName") - if (description.application) { - validator.validateApplication(description.application, "application") - } - validator.validateStack(description.stack, "stack") - validator.validateDetails(description.freeFormDetails, "details") - if (description.serverGroupParameters) { - validateServerGroup(validator, description.serverGroupParameters) - } - if (description.timeoutMins) { - validator.validateNonNegative(description.timeoutMins, "timeoutMins") - } - validateUserData(validator, description.userDataType, description.userData) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DeployOpenstackAtomicOperationValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DeployOpenstackAtomicOperationValidator.groovy deleted file mode 100644 index 5de41b550c0..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DeployOpenstackAtomicOperationValidator.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 The original authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.CREATE_SERVER_GROUP) -@Component -class DeployOpenstackAtomicOperationValidator extends AbstractServergroupOpenstackAtomicOperationValidator { - - String context = "deployOpenstackAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, DeployOpenstackAtomicOperationDescription description, Errors errors) { - validator.validateApplication(description.application, "application") - validator.validateStack(description.stack, "stack") - validator.validateDetails(description.freeFormDetails, "details") - validator.validateNonNegative(description.timeoutMins, "timeoutMins") - validateServerGroup(validator, description.serverGroupParameters) - validateUserData(validator, description.userDataType, description.userData) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DestroyOpenstackAtomicOperationValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DestroyOpenstackAtomicOperationValidator.groovy deleted file mode 100644 index 63ca12e72b9..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DestroyOpenstackAtomicOperationValidator.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.DESTROY_SERVER_GROUP) -@Component -class DestroyOpenstackAtomicOperationValidator extends AbstractOpenstackDescriptionValidator { - - String context = "destroyOpenstackServerGroupAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, OpenstackServerGroupAtomicOperationDescription description, Errors errors) { - validator.validateNotEmpty(description.serverGroupName, "serverGroupName") - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DisableOpenstackAtomicOperationValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DisableOpenstackAtomicOperationValidator.groovy deleted file mode 100644 index 10b05ca9c3f..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DisableOpenstackAtomicOperationValidator.groovy +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.EnableDisableAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.DISABLE_SERVER_GROUP) -@Component -class DisableOpenstackAtomicOperationValidator extends AbstractOpenstackDescriptionValidator { - - String context = "disableOpenstackServerGroupAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, EnableDisableAtomicOperationDescription description, Errors errors) { - validator.validateNotEmpty(description.serverGroupName, "serverGroupName") - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/EnableOpenstackAtomicOperationValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/EnableOpenstackAtomicOperationValidator.groovy deleted file mode 100644 index ee87d633d81..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/EnableOpenstackAtomicOperationValidator.groovy +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.EnableDisableAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.ENABLE_SERVER_GROUP) -@Component -class EnableOpenstackAtomicOperationValidator extends AbstractOpenstackDescriptionValidator { - - String context = "enableOpenstackServerGroupAtomicOperationDescription" - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, EnableDisableAtomicOperationDescription description, Errors errors) { - validator.validateNotEmpty(description.serverGroupName, "serverGroupName") - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/ResizeOpenstackAtomicOperationValidator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/ResizeOpenstackAtomicOperationValidator.groovy deleted file mode 100644 index 475a8e46305..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/ResizeOpenstackAtomicOperationValidator.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.OpenstackOperation -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ResizeOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.AbstractOpenstackDescriptionValidator -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.springframework.stereotype.Component -import org.springframework.validation.Errors - -@OpenstackOperation(AtomicOperations.RESIZE_SERVER_GROUP) -@Component -class ResizeOpenstackAtomicOperationValidator extends AbstractOpenstackDescriptionValidator { - - String context = "resizeOpenstackServerGroupAtomicOperationDescription" - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, ResizeOpenstackAtomicOperationDescription description, Errors errors) { - validator.validateNotEmpty(description.serverGroupName, "serverGroupName") - validator.validateNotNull(description.capacity, "capacity") - validator.validateNonNegative(description.capacity.min, "capacity.min") - validator.validateGreaterThanEqual(description.capacity.desired, description.capacity.min, "capacity.desired") - validator.validateGreaterThanEqual(description.capacity.max, description.capacity.desired, "capacity.max") - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/HealthMonitor.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/HealthMonitor.groovy deleted file mode 100644 index bd6430e36b4..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/HealthMonitor.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.domain - -import com.fasterxml.jackson.annotation.JsonCreator - -class HealthMonitor { - enum HealthMonitorType { - PING, TCP, HTTP, HTTPS - - @JsonCreator - public static HealthMonitorType forValue(String value) { - HealthMonitorType result = null - - if (value) { - result = values().find { it.name().equalsIgnoreCase(value) } - } - - result ?: HTTP - } - } - - String id - HealthMonitorType type - Integer delay - Integer timeout - Integer maxRetries - String httpMethod - String url - List expectedCodes -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/LoadBalancerResolver.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/LoadBalancerResolver.groovy deleted file mode 100644 index 3cf4c1a9ab6..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/LoadBalancerResolver.groovy +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.domain - -import java.util.regex.Matcher -import java.util.regex.Pattern - -/** - * Load balancer descriptions are used to store the created_time for a load balancer. - * - * It is a key value pair. - * - * For example: - * - * {@code created_time=12345678} - * - * Load balancer listener descriptions are used to store external protocol and ports. - * - * For example: - * - * {@code HTTP:80:8080} - */ -trait LoadBalancerResolver { - - final Pattern createdPattern = Pattern.compile(".*created_time=([0-9]+).*") - - /** - * Generate key=value port string, e.g. internal_port - * @param port - * @return - */ - String getListenerKey(String externalProtocol, int externalPort, int port) { - "${externalProtocol}:${externalPort}:${port}" - } - - Map parseListenerKey(String key) { - Map result = [:] - - String[] parts = key?.split(':') - - if (parts?.length == 3) { - result << [externalProtocol: parts[0], externalPort: parts[1], internalPort: parts[2]] - } - - result - } - - - - /** - * Parse the created time from a load balancer description in the following format. - *

- * {@code - * ...,created_time=12345678,... - * } - * @param description - * @return the port value - */ - Long parseCreatedTime(final String description) { - String s = match(description, createdPattern) - s ? s.toLong() : null - } - - /** - * Generate key=value createdTime string, e.g. created_time=12345678 - * @param time - * @return - */ - String generateCreatedTime(long time) { - "created_time=${time}" - } - - /** - * Match a pattern in the comma-separated fields of the description - * @param description - * @param pattern - * @return - */ - String match(final String description, final Pattern pattern) { - String result = null - for (String s : description?.split(',')) { - Matcher matcher = pattern.matcher(s) - if (matcher.matches() && matcher.groupCount() == 1) { - result = matcher.group(1) - break - } - } - result - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/health/OpenstackHealthIndicator.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/health/OpenstackHealthIndicator.groovy deleted file mode 100644 index 53b5629dff4..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/health/OpenstackHealthIndicator.groovy +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.health - -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import groovy.transform.InheritConstructors -import org.slf4j.Logger -import org.slf4j.LoggerFactory -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.boot.actuate.health.Health -import org.springframework.boot.actuate.health.HealthIndicator -import org.springframework.http.HttpStatus -import org.springframework.scheduling.annotation.Scheduled -import org.springframework.stereotype.Component -import org.springframework.web.bind.annotation.ResponseStatus - -import java.util.concurrent.atomic.AtomicReference - -@Component -class OpenstackHealthIndicator implements HealthIndicator { - private static final Logger LOG = LoggerFactory.getLogger(OpenstackHealthIndicator) - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - private final AtomicReference lastException = new AtomicReference<>(null) - - @Override - Health health() { - def ex = lastException.get() - - if (ex) throw ex - - new Health.Builder().up().build() - } - - @Scheduled(fixedDelay = 300000L) - void checkHealth() { - try { - Set openstackCredentialsSet = accountCredentialsProvider.all.findAll { - it instanceof OpenstackNamedAccountCredentials - } as Set - - for (OpenstackNamedAccountCredentials accountCredentials in openstackCredentialsSet) { - OpenstackCredentials openstackCredentials = accountCredentials.credentials - openstackCredentials.provider.tokenId - } - - lastException.set(null) - } catch (Exception ex) { - LOG.warn "Unhealthy", ex - lastException.set(ex) - } - } - - @ResponseStatus(value = HttpStatus.SERVICE_UNAVAILABLE, reason = "Problem communicating with Openstack.") - @InheritConstructors - static class OpenstackIOException extends RuntimeException {} -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackApplication.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackApplication.groovy deleted file mode 100644 index 9a7e35f13f8..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackApplication.groovy +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.netflix.spinnaker.clouddriver.model.Application -import groovy.transform.EqualsAndHashCode - -@EqualsAndHashCode -class OpenstackApplication implements Application { - final String name - final Map attributes - final Map> clusterNames - - OpenstackApplication(String name, Map attributes, Map> clusterNames) { - this.name = name - this.attributes = attributes - this.clusterNames = clusterNames - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackCluster.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackCluster.groovy deleted file mode 100644 index 3161b9539d3..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackCluster.groovy +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import groovy.transform.Canonical - -@Canonical -class OpenstackCluster { - String accountName - String name - Set serverGroups = [].toSet() - Set loadBalancers = [].toSet() - String type = OpenstackCloudProvider.ID - - @JsonIgnore - View getView() { - new View() - } - - @Canonical - class View implements Cluster { - String accountName = OpenstackCluster.this.accountName - String name = OpenstackCluster.this.name - Set serverGroups = OpenstackCluster.this.serverGroups.collect { it.view } - Set loadBalancers = OpenstackCluster.this.loadBalancers.collect { it.view } - String type = OpenstackCluster.this.type - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackFloatingIP.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackFloatingIP.groovy deleted file mode 100644 index ea67118ac28..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackFloatingIP.groovy +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import groovy.transform.Canonical -import org.openstack4j.model.network.NetFloatingIP - -@Canonical -class OpenstackFloatingIP { - String id - String portId - String networkId - String fixedIpAddress - String floatingIpAddress - String account - String region - - /** - * Produce a domain specific floating IP from an openstack floating IP. - * @param vip - * @return - */ - static OpenstackFloatingIP from(NetFloatingIP ip, String account, String region) { - new OpenstackFloatingIP(id: ip.id, portId: ip.portId, networkId: ip.floatingNetworkId, fixedIpAddress: ip.fixedIpAddress, - floatingIpAddress: ip.floatingIpAddress, account: account, region: region) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackHealth.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackHealth.groovy deleted file mode 100644 index 7c5605f4d1b..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackHealth.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.netflix.spinnaker.clouddriver.model.Health - -abstract class OpenstackHealth implements Health, Serializable { - - abstract OpenstackHealthType getType() - - enum HealthClass { - platform - } - - static enum OpenstackHealthType { - Openstack, LoadBalancer - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackImage.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackImage.groovy deleted file mode 100644 index 085c0f00b40..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackImage.groovy +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.netflix.spinnaker.clouddriver.model.Image -import groovy.transform.builder.Builder - -@Builder -class OpenstackImage implements Image { - String id - String name - String region - String status - Long size - String location - Long createdAt - Long updatedAt - Long deletedAt - Map properties -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstance.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstance.groovy deleted file mode 100644 index 84630e5425b..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstance.groovy +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.consul.model.ConsulNode -import com.netflix.spinnaker.clouddriver.model.HealthState -import com.netflix.spinnaker.clouddriver.model.Instance -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import groovy.transform.Canonical -import org.openstack4j.model.compute.Address -import org.openstack4j.model.compute.Server - -@Canonical -class OpenstackInstance { - String name - String instanceId - String instanceType - Long launchTime - String zone - String region - String keyName - Map metadata - String account - String ipv4 - String ipv6 - String floatingIp - List loadBalancerHealths = [] - OpenstackInstanceHealth instanceHealth - ConsulNode consulNode - List securityGroups = [] - - static OpenstackInstance from(Server server, ConsulNode consulNode, String account, String region) { - //find first fixed v4 address - Address fixedIpAddressV4 = server?.addresses?.addresses?.collectMany { it.value }?.find { it.type == 'fixed' && it.version == 4 } - //find first fixed v6 address - Address fixedIpAddressV6 = server?.addresses?.addresses?.collectMany { it.value }?.find { it.type == 'fixed' && it.version == 6 } - //floating ip - Address floatingIpAddress = server?.addresses?.addresses?.collectMany { it.value }?.find { it.type == 'floating' && it.version == 4 } - - new OpenstackInstance(name: server.name - , region: region - , account: account - , zone: server.availabilityZone - , instanceId: server.id - , instanceType: server.flavor?.name - , launchTime: server.launchedAt?.time - , metadata: server.metadata - , instanceHealth: new OpenstackInstanceHealth(status: server.status) - , keyName: server.keyName - , ipv4: fixedIpAddressV4?.addr - , ipv6: fixedIpAddressV6?.addr - , floatingIp: floatingIpAddress?.addr - , securityGroups: server.securityGroups?.collect { it.name } - , consulNode: consulNode) - } - - @JsonIgnore - View getView() { - new View() - } - - @Canonical - class View implements Instance { - - final String providerType = OpenstackCloudProvider.ID //expected by deck - final String cloudProvider = OpenstackCloudProvider.ID //expected by deck - - String name = OpenstackInstance.this.instanceId //expected by deck - String instanceId = OpenstackInstance.this.instanceId - String instanceName = OpenstackInstance.this.name - String instanceType = OpenstackInstance.this.instanceType //expected by deck - Long launchTime = OpenstackInstance.this.launchTime - String zone = OpenstackInstance.this.zone - String region = OpenstackInstance.this.region - Map placement = ["availabilityZone": OpenstackInstance.this.zone] //expected by deck - String keyName = OpenstackInstance.this.keyName - Map metadata = OpenstackInstance.this.metadata - String account = OpenstackInstance.this.account - String ipv4 = OpenstackInstance.this.ipv4 - String ipv6 = OpenstackInstance.this.ipv6 - String floatingIp = OpenstackInstance.this.floatingIp - - List> getSecurityGroups() { - OpenstackInstance.this.securityGroups.collect { - ["groupName": it, "groupId": it] - } - } - ConsulNode consulNode = OpenstackInstance.this.consulNode - - @Override - List> getHealth() { - ObjectMapper mapper = new ObjectMapper() - List> healths = [] - - // load balancer health - loadBalancerHealths?.each { - healths << mapper.convertValue(it.view, OpenstackInfrastructureProvider.ATTRIBUTES) - } - - //instance health - healths << mapper.convertValue(instanceHealth?.view, OpenstackInfrastructureProvider.ATTRIBUTES) - - //consul health - consulNode?.healths?.each { - healths << mapper.convertValue(it, OpenstackInfrastructureProvider.ATTRIBUTES) - } - - healths - } - - @JsonIgnore - List allHealths() { - def allHealths = [] - - loadBalancerHealths?.each { - allHealths << it.view - } - if (instanceHealth) { - allHealths << instanceHealth.view - } - consulNode?.healths?.each { - allHealths << it - } - - allHealths - } - - @Override - HealthState getHealthState() { - def allHealths = allHealths() - someUpRemainingUnknown(allHealths) ? HealthState.Up : - anyStarting(allHealths) ? HealthState.Starting : - anyDown(allHealths) ? HealthState.Down : - anyOutOfService(allHealths) ? HealthState.OutOfService : - HealthState.Unknown - } - - private static boolean anyDown(List healthsList) { - healthsList.any { it.state == HealthState.Down } - } - - private static boolean someUpRemainingUnknown(List healthsList) { - List knownHealthList = healthsList.findAll { it.state != HealthState.Unknown } - knownHealthList ? knownHealthList.every { it.state == HealthState.Up } : false - } - - private static boolean anyStarting(List healthsList) { - healthsList.any { it.state == HealthState.Starting } - } - - private static boolean anyOutOfService(List healthsList) { - healthsList.any { it.state == HealthState.OutOfService } - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstanceHealth.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstanceHealth.groovy deleted file mode 100644 index 53f3351d2d7..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstanceHealth.groovy +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.model.Health -import com.netflix.spinnaker.clouddriver.model.HealthState - -import static org.openstack4j.model.compute.Server.Status - -class OpenstackInstanceHealth { - Status status - - HealthState toHealthState() { - switch (status) { - case Status.ACTIVE: - HealthState.Unknown - break - case Status.BUILD: - HealthState.Starting - break - case Status.ERROR: - HealthState.Failed - break - case Status.UNKNOWN: - HealthState.Unknown - break - default: - HealthState.Unknown - } - } - - @JsonIgnore - View getView() { - new View() - } - - class View extends OpenstackHealth implements Health { - - final OpenstackHealthType type = OpenstackHealthType.Openstack - final HealthClass healthClass = HealthClass.platform - - HealthState getState() { - OpenstackInstanceHealth.this.toHealthState() - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstanceType.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstanceType.groovy deleted file mode 100644 index 7c37b100622..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackInstanceType.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.netflix.spinnaker.clouddriver.model.InstanceType -import groovy.transform.builder.Builder - -@Builder -class OpenstackInstanceType implements InstanceType, Serializable { - String account - String region - String id - String name - int ram - int vcpus - int disk - int swap - int ephemeral - boolean available - boolean disabled -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLaunchConfig.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLaunchConfig.groovy deleted file mode 100644 index 74a045ed225..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLaunchConfig.groovy +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import groovy.transform.builder.Builder - -@Builder -class OpenstackLaunchConfig { - String instanceType - String image - String networkId - String loadBalancerId - List securityGroups - boolean associatePublicIpAddress - String floatingNetworkId -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancer.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancer.groovy deleted file mode 100644 index 73032a5505f..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancer.groovy +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.fasterxml.jackson.annotation.JsonIgnoreProperties -import com.netflix.spinnaker.clouddriver.model.LoadBalancer -import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider -import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.domain.LoadBalancerResolver -import com.netflix.spinnaker.moniker.Moniker -import groovy.transform.Canonical -import org.openstack4j.model.network.ext.HealthMonitorV2 -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 - -@Canonical -@JsonIgnoreProperties(['createdRegex', 'createdPattern']) -class OpenstackLoadBalancer implements LoadBalancerResolver, LoadBalancer { - - final String type = OpenstackCloudProvider.ID - final String cloudProvider = OpenstackCloudProvider.ID - String account - String region - String id - String name - String description - String status - String algorithm - Set listeners - OpenstackHealthMonitor healthMonitor - Set healths - Set serverGroups - OpenstackFloatingIP floatingIP - OpenstackNetwork network - OpenstackSubnet subnet - Set securityGroups - String vipAddress - - void setMoniker(Moniker _ignored) {} - - static OpenstackLoadBalancer from(LoadBalancerV2 loadBalancer, Set listeners, LbPoolV2 pool, - HealthMonitorV2 healthMonitor, String account, String region) { - if (!loadBalancer) { - throw new IllegalArgumentException("Load balancer must not be null.") - } - Set openstackListeners = listeners?.collect { listener -> - new OpenstackLoadBalancerListener(externalProtocol: listener.protocol.toString(), - externalPort: listener.protocolPort, - description: listener.description) - }?.toSet() ?: [].toSet() - OpenstackHealthMonitor openstackHealthMonitor = healthMonitor ? new OpenstackHealthMonitor(id: healthMonitor.id, - adminStateUp: healthMonitor.adminStateUp, delay: healthMonitor.delay, maxRetries: healthMonitor.maxRetries, - expectedCodes: healthMonitor.expectedCodes?.split(',')?.collect { - it?.toInteger() - }, httpMethod: healthMonitor.httpMethod, - timeout: healthMonitor.timeout, type: healthMonitor.type.toString(), url: healthMonitor.urlPath) : null - new OpenstackLoadBalancer(account: account, region: region, id: loadBalancer.id, name: loadBalancer.name, - description: loadBalancer.description, status: loadBalancer.operatingStatus, - algorithm: pool?.lbMethod?.toString(), listeners: openstackListeners, healthMonitor: openstackHealthMonitor, - vipAddress: loadBalancer.vipAddress) - } - - Long getCreatedTime() { - parseCreatedTime(description) - } - - @JsonIgnore - View getView() { - new View(account: account, region: region, id: id, name: name, - description: description, status: status, algorithm: algorithm, - listeners: listeners, healthMonitor: healthMonitor, ip: floatingIP?.floatingIpAddress ?: vipAddress, - subnetId: subnet?.id, subnetName: subnet?.name, healths: healths, - networkId: network?.id, networkName: network?.name, serverGroups: serverGroups ?: [].toSet(), securityGroups: securityGroups ?: [].toSet()) - } - - @Canonical - @JsonIgnoreProperties(['createdRegex', 'createdPattern']) - static class OpenstackLoadBalancerListener implements LoadBalancerResolver { - String description - String externalProtocol - Integer externalPort - - Integer getInternalPort() { - parseListenerKey(description)?.get('internalPort')?.toInteger() - } - } - - @Canonical - static class OpenstackHealthMonitor { - String id - boolean adminStateUp - Integer delay - Integer maxRetries - Integer timeout - List expectedCodes - String httpMethod - String type - String url - } - - @Canonical - @JsonIgnoreProperties(['createdRegex', 'createdPattern']) - static class View extends OpenstackLoadBalancer implements LoadBalancer, LoadBalancerProvider.Details { - String ip = "" - String subnetId = "" - String subnetName = "" - String networkId = "" - String networkName = "" - - void setMoniker(Moniker _ignored) {} - - //oh groovy asts are fun - they bring insanity for everyone - //we need this for creating sets - @Override - boolean equals(Object other) { - View view = (View) other - ip == view.ip && subnetId == view.subnetId && subnetName == view.subnetName && - networkId == view.networkId && networkName == view.networkName && super.equals((OpenstackLoadBalancer) view) - } - - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancerHealth.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancerHealth.groovy deleted file mode 100644 index 5836a9dbc27..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancerHealth.groovy +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.model.Health -import com.netflix.spinnaker.clouddriver.model.HealthState -import groovy.transform.Canonical - -@Canonical -class OpenstackLoadBalancerHealth { - String instanceId - - List lbHealthSummaries - PlatformStatus status - - enum PlatformStatus { - ONLINE, - OFFLINE, - DISABLED - - HealthState toHealthState() { - this == ONLINE ? HealthState.Up : HealthState.Down - } - - LBHealthSummary.ServiceStatus toServiceStatus() { - this == ONLINE ? LBHealthSummary.ServiceStatus.InService : LBHealthSummary.ServiceStatus.OutOfService - } - } - - static class LBHealthSummary { - // These aren't the most descriptive names, but it's what's expected in Deck. - String loadBalancerName - String instanceId - ServiceStatus state - - String getDescription() { - state == ServiceStatus.OutOfService ? - "Instance has failed at least the Unhealthy Threshold number of health checks consecutively." : - "Healthy" - } - - /** - * This seems to be needed by deck - * @return - */ - ServiceStatus getHealthState() { - state - } - - enum ServiceStatus { - InService, - OutOfService - } - } - - @JsonIgnore - View getView() { - new View() - } - - class View extends OpenstackHealth implements Health { - final OpenstackHealthType type = OpenstackHealthType.LoadBalancer - final HealthClass healthClass = null - - List loadBalancers = OpenstackLoadBalancerHealth.this.lbHealthSummaries - - HealthState getState() { - OpenstackLoadBalancerHealth.this.status?.toHealthState() - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancerSummary.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancerSummary.groovy deleted file mode 100644 index bb523f02174..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackLoadBalancerSummary.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import groovy.transform.Canonical - -@Canonical -class OpenstackLoadBalancerSummary { - - String account - String region - String id - String name - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackNetwork.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackNetwork.groovy deleted file mode 100644 index eb6636c0d68..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackNetwork.groovy +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.netflix.spinnaker.clouddriver.model.Network -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import groovy.transform.Canonical - -@Canonical -class OpenstackNetwork implements Network { - - //core attributes - String cloudProvider = OpenstackCloudProvider.ID - String id - String name - String account - String region - - //openstack attribute extensions - Boolean external - - /** - * Convert an openstack4j object into a spinnaker openstack domain object. - * @param network - * @param account - * @param region - * @return - */ - static OpenstackNetwork from(org.openstack4j.model.network.Network network, String account, String region) { - new OpenstackNetwork(id: network.id, name: network.name, external: network.routerExternal, account: account, region: region) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSecurityGroup.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSecurityGroup.groovy deleted file mode 100644 index 1f1562c8fb3..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSecurityGroup.groovy +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.fasterxml.jackson.annotation.JsonInclude -import com.netflix.spinnaker.clouddriver.model.SecurityGroup -import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary -import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.moniker.Moniker -import groovy.transform.Immutable - -@Immutable -@JsonInclude(JsonInclude.Include.NON_EMPTY) -class OpenstackSecurityGroup implements SecurityGroup { - final String type = OpenstackCloudProvider.ID - final String cloudProvider = OpenstackCloudProvider.ID - final String id - final String name - final String description - final String application - final String accountName - final String region - final Set inboundRules - final Set outboundRules - - void setMoniker(Moniker _ignored) {} - - @JsonIgnore - @Override - SecurityGroupSummary getSummary() { - new OpenstackSecurityGroupSummary(name: name, id: id) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSecurityGroupSummary.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSecurityGroupSummary.groovy deleted file mode 100644 index 57d49fcab55..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSecurityGroupSummary.groovy +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary -import groovy.transform.EqualsAndHashCode -import groovy.transform.Immutable - -@Immutable -@EqualsAndHashCode(includes = ['id'], cache = true) -class OpenstackSecurityGroupSummary implements SecurityGroupSummary { - final String name - final String id -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackServerGroup.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackServerGroup.groovy deleted file mode 100644 index faf849222da..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackServerGroup.groovy +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.fasterxml.jackson.annotation.JsonInclude -import com.netflix.spinnaker.clouddriver.model.HealthState -import com.netflix.spinnaker.clouddriver.model.Instance -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.model.ServerGroup.Capacity -import com.netflix.spinnaker.clouddriver.model.ServerGroup.ImageSummary -import com.netflix.spinnaker.clouddriver.model.ServerGroup.ImagesSummary -import com.netflix.spinnaker.clouddriver.model.ServerGroup.InstanceCounts -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import groovy.transform.Canonical -import groovy.transform.builder.Builder - -@Builder -@Canonical -class OpenstackServerGroup { - String account - String name - String region - Set zones - Set instances - Set health - Map image // Represented as map instead of OpenstackImage for convenience. - Map launchConfig - Map scalingConfig = [:] - Long createdTime - Set loadBalancers - Map buildInfo - Boolean disabled - String type = OpenstackCloudProvider.ID - String subnetId - Map advancedConfig = [:] - Map tags - boolean discovery - - @JsonIgnore - View getView() { - new View() - } - - - @JsonInclude(JsonInclude.Include.NON_NULL) - @Canonical - class View implements ServerGroup { - - String account = OpenstackServerGroup.this.account - String name = OpenstackServerGroup.this.name - String region = OpenstackServerGroup.this.region - Set zones = OpenstackServerGroup.this.zones - Set instances = OpenstackServerGroup.this.instances.collect { it?.view } - Set health = OpenstackServerGroup.this.health - Map image = OpenstackServerGroup.this.image // Represented as map instead of OpenstackImage for convenience. - Map launchConfig = OpenstackServerGroup.this.launchConfig - Map scalingConfig = OpenstackServerGroup.this.scalingConfig - Long createdTime = OpenstackServerGroup.this.createdTime - Set loadBalancers = OpenstackServerGroup.this.loadBalancers - Map buildInfo = OpenstackServerGroup.this.buildInfo - Boolean disabled = OpenstackServerGroup.this.disabled - String type = OpenstackServerGroup.this.type - String cloudProvider = OpenstackServerGroup.this.type - String subnetId = OpenstackServerGroup.this.subnetId - Map advancedConfig = OpenstackServerGroup.this.advancedConfig - Map tags = OpenstackServerGroup.this.tags - boolean discovery = OpenstackServerGroup.this.discovery - - @Override - Boolean isDisabled() { // Because groovy isn't smart enough to generate this method :-( - disabled - } - - @Override - Set getSecurityGroups() { - (launchConfig && launchConfig.containsKey('securityGroups')) ? (Set) launchConfig.securityGroups : [] - } - - @Override - InstanceCounts getInstanceCounts() { - new InstanceCounts(total: instances ? instances.size() : 0, - up: filterInstancesByHealthState(instances, HealthState.Up)?.size() ?: 0, - down: filterInstancesByHealthState(instances, HealthState.Down)?.size() ?: 0, - unknown: filterInstancesByHealthState(instances, HealthState.Unknown)?.size() ?: 0, - starting: filterInstancesByHealthState(instances, HealthState.Starting)?.size() ?: 0, - outOfService: filterInstancesByHealthState(instances, HealthState.OutOfService)?.size() ?: 0) - } - - @Override - Capacity getCapacity() { - scalingConfig ? - new Capacity( - min: scalingConfig.minSize ? scalingConfig.minSize as Integer : 0, - max: scalingConfig.maxSize ? scalingConfig.maxSize as Integer : 0, - desired: scalingConfig.desiredSize ? scalingConfig.desiredSize as Integer : 0) - : null - } - - @Override - ImagesSummary getImagesSummary() { - new DefaultImagesSummary(summaries: [new DefaultImageSummary(serverGroupName: name, imageName: image?.name, imageId: image?.id, buildInfo: buildInfo, image: image)]) - } - - @Override - ImageSummary getImageSummary() { - imagesSummary?.summaries?.getAt(0) - } - - static Collection filterInstancesByHealthState(Set instances, HealthState healthState) { - instances.findAll { Instance it -> it.getHealthState() == healthState } - } - - static class DefaultImageSummary implements ImageSummary { - String serverGroupName - String imageId - String imageName - Map image - Map buildInfo - } - - static class DefaultImagesSummary implements ImagesSummary { - List summaries - } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSubnet.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSubnet.groovy deleted file mode 100644 index 3de00aecfc8..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/model/OpenstackSubnet.groovy +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.model - -import com.netflix.spinnaker.clouddriver.model.Subnet -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import groovy.transform.Canonical -import org.openstack4j.model.network.Subnet as DomainSubnet - -@Canonical -class OpenstackSubnet implements Subnet { - String type - String name - String id - String cidrBlock - List allocationPools - List dnsNameservers - boolean enableDhcp - String gatewayIp - Integer ipVersion - String networkId - String account - String region - String purpose = 'n/a' - - static class Range { - String start - String end - } - - static OpenstackSubnet from(DomainSubnet subnet, String account, String region) { - new OpenstackSubnet( - type: OpenstackCloudProvider.ID, - account: account, - region: region, - id: subnet.id, - name: subnet.name, - cidrBlock: subnet.cidr, - allocationPools: subnet.allocationPools?.collect { it -> new Range(start: it.start, end: it.end) }, - dnsNameservers: subnet.dnsNames, - enableDhcp: subnet.DHCPEnabled, - gatewayIp: subnet.gateway, - ipVersion: subnet.ipVersion?.version, - networkId: subnet.networkId) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/ImageProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/ImageProvider.groovy deleted file mode 100644 index 164b1bc3f12..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/ImageProvider.groovy +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider - -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage - -/** - * Provider image interface to be promoted to core. - * //TODO - Promote to core. - */ -interface ImageProvider { - Map> listImagesByAccount() -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/OpenstackInfrastructureProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/OpenstackInfrastructureProvider.groovy deleted file mode 100644 index df6c39710d7..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/OpenstackInfrastructureProvider.groovy +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider - -import com.fasterxml.jackson.core.type.TypeReference -import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.agent.AgentSchedulerAware -import com.netflix.spinnaker.clouddriver.cache.SearchableProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.CLUSTERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCE_TYPES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SECURITY_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SERVER_GROUPS - -import static com.netflix.spinnaker.clouddriver.cache.SearchableProvider.SearchableResource - -@ConditionalOnProperty('openstack.enabled') -class OpenstackInfrastructureProvider extends AgentSchedulerAware implements SearchableProvider { - public static final TypeReference> ATTRIBUTES = new TypeReference>() {} - - public static final String PROVIDER_NAME = OpenstackInfrastructureProvider.name - - private final Collection agents - - OpenstackInfrastructureProvider(Collection agents) { - this.agents = agents - } - - final Set defaultCaches = [ - APPLICATIONS.ns, - CLUSTERS.ns, - INSTANCES.ns, - SECURITY_GROUPS.ns, - SERVER_GROUPS.ns, - INSTANCE_TYPES.ns, - LOAD_BALANCERS.ns - ].asImmutable() - - //TODO - Need to define urlMappingTemplates - final Map urlMappingTemplates = Collections.emptyMap() - //TODO - Need to define (if applicable) - final Map searchResultHydrators = Collections.emptyMap() - - @Override - Map parseKey(String key) { - Keys.parse(key) - } - - @Override - String getProviderName() { - PROVIDER_NAME - } - - @Override - Collection getAgents() { - agents - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/AbstractOpenstackCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/AbstractOpenstackCachingAgent.groovy deleted file mode 100644 index 3fc8ab2fed5..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/AbstractOpenstackCachingAgent.groovy +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.core.type.TypeReference -import com.netflix.spinnaker.cats.agent.AccountAware -import com.netflix.spinnaker.cats.agent.CachingAgent -import com.netflix.spinnaker.clouddriver.openstack.cache.OnDemandAware -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.provider.view.MutableCacheData -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials - -/** - * Base agent that implements common logic for all agents. - */ -abstract class AbstractOpenstackCachingAgent implements CachingAgent, AccountAware, OnDemandAware { - - final TypeReference>> typeReference = new TypeReference>>() {} - - final OpenstackNamedAccountCredentials account - final String region - final String providerName = OpenstackInfrastructureProvider.PROVIDER_NAME - - AbstractOpenstackCachingAgent(OpenstackNamedAccountCredentials account, String region) { - this.account = account - this.region = region - } - - @Override - String getAccountName() { - account.name - } - - OpenstackClientProvider getClientProvider() { - account?.credentials?.provider - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackFloatingIPCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackFloatingIPCachingAgent.groovy deleted file mode 100644 index 1269ea61fdd..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackFloatingIPCachingAgent.groovy +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackFloatingIP -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.util.logging.Slf4j -import org.openstack4j.model.compute.FloatingIP -import org.openstack4j.model.network.NetFloatingIP - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.FLOATING_IPS -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES - -@Slf4j -class OpenstackFloatingIPCachingAgent extends AbstractOpenstackCachingAgent { - - final ObjectMapper objectMapper - - Collection providedDataTypes = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(FLOATING_IPS.ns) - ] as Set) - - String agentType = "${accountName}/${region}/${OpenstackFloatingIPCachingAgent.simpleName}" - - OpenstackFloatingIPCachingAgent(OpenstackNamedAccountCredentials account, String region, ObjectMapper objectMapper) { - super(account, region) - this.objectMapper = objectMapper - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - List ips = clientProvider.listNetFloatingIps(region) - buildCacheResult(ips) - } - - private CacheResult buildCacheResult(List ips) { - log.info("Describing items in ${agentType}") - - def cacheResultBuilder = new CacheResultBuilder() - - ips.each { NetFloatingIP ip -> - String ipKey = Keys.getFloatingIPKey(ip.id, accountName, region) - - Map ipAttributes = objectMapper.convertValue(OpenstackFloatingIP.from(ip, accountName, region), ATTRIBUTES) - - cacheResultBuilder.namespace(FLOATING_IPS.ns).keep(ipKey).with { - attributes = ipAttributes - } - } - - log.info("Caching ${cacheResultBuilder.namespace(FLOATING_IPS.ns).keepSize()} floating ips in ${agentType}") - - cacheResultBuilder.build() - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackImageCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackImageCachingAgent.groovy deleted file mode 100644 index 8d7e37dfc6d..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackImageCachingAgent.groovy +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.util.logging.Slf4j -import org.openstack4j.model.image.Image - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.IMAGES -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES - -@Slf4j -class OpenstackImageCachingAgent extends AbstractOpenstackCachingAgent { - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(IMAGES.ns) - ] as Set) - - final ObjectMapper objectMapper - - OpenstackImageCachingAgent( - final OpenstackNamedAccountCredentials account, final String region, final ObjectMapper objectMapper) { - super(account, region) - this.objectMapper = objectMapper - } - - @Override - String getAgentType() { - "${account.name}/${region}/${OpenstackImageCachingAgent.simpleName}" - } - - @Override - Collection getProvidedDataTypes() { - types - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - log.info("Describing items in ${agentType}") - - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder() - - List images = this.clientProvider.listImages(region) - - images?.each { OpenstackImage image -> - cacheResultBuilder.namespace(IMAGES.ns).keep(Keys.getImageKey(image.id, accountName, region)).with { - attributes = objectMapper.convertValue(image, ATTRIBUTES) - } - } - - log.info("Caching ${cacheResultBuilder.namespace(IMAGES.ns).keepSize()} items in ${agentType}") - - cacheResultBuilder.build() - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceCachingAgent.groovy deleted file mode 100644 index 6004fbd0752..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceCachingAgent.groovy +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.consul.model.ConsulNode -import com.netflix.spinnaker.clouddriver.consul.provider.ConsulProviderUtils -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackInstance -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.util.logging.Slf4j -import retrofit.RetrofitError - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES - -@Slf4j -class OpenstackInstanceCachingAgent extends AbstractOpenstackCachingAgent { - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(INSTANCES.ns) - ] as Set) - - final ObjectMapper objectMapper - - OpenstackInstanceCachingAgent( - final OpenstackNamedAccountCredentials account, final String region, final ObjectMapper objectMapper) { - super(account, region) - this.objectMapper = objectMapper - } - - @Override - String getAgentType() { - "${account.name}/${region}/${OpenstackInstanceCachingAgent.simpleName}" - } - - @Override - Collection getProvidedDataTypes() { - types - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder() - - clientProvider.getInstances(region)?.each { server -> - String instanceKey = Keys.getInstanceKey(server.id, accountName, region) - - ConsulNode consulNode = null - if (account?.consulConfig?.enabled) { - try{ - consulNode = ConsulProviderUtils.getHealths(account.consulConfig, server.name) - } catch (RetrofitError e){ - log.warn(e.message) - } - } - - Map instanceAttributes = objectMapper.convertValue(OpenstackInstance.from(server, consulNode, accountName, region), ATTRIBUTES) - - cacheResultBuilder.namespace(INSTANCES.ns).keep(instanceKey).with { - attributes = instanceAttributes - } - } - - log.info("Caching ${cacheResultBuilder.namespace(INSTANCES.ns).keepSize()} items in ${agentType}") - - cacheResultBuilder.build() - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceTypeCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceTypeCachingAgent.groovy deleted file mode 100644 index 694d2ad215a..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceTypeCachingAgent.groovy +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackInstanceType -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.util.logging.Slf4j -import org.openstack4j.model.compute.Flavor - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCE_TYPES -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES - -@Slf4j -class OpenstackInstanceTypeCachingAgent extends AbstractOpenstackCachingAgent { - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(INSTANCE_TYPES.ns) - ] as Set) - - final ObjectMapper objectMapper - - OpenstackInstanceTypeCachingAgent( - final OpenstackNamedAccountCredentials account, final String region, final ObjectMapper objectMapper) { - super(account, region) - this.objectMapper = objectMapper - } - - @Override - String getAgentType() { - "${account.name}/${region}/${OpenstackInstanceTypeCachingAgent.simpleName}" - } - - @Override - Collection getProvidedDataTypes() { - types - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder() - - clientProvider.listFlavors(region)?.each { Flavor flavor -> - String instanceTypeKey = Keys.getInstanceTypeKey(flavor.id, accountName, region) - Map instanceTypeAttributes = objectMapper.convertValue(buildInstanceType(flavor), ATTRIBUTES) - - cacheResultBuilder.namespace(INSTANCE_TYPES.ns).keep(instanceTypeKey).with { - attributes = instanceTypeAttributes - } - } - - log.info("Caching ${cacheResultBuilder.namespace(INSTANCE_TYPES.ns).keepSize()} items in ${agentType}") - - cacheResultBuilder.build() - } - - OpenstackInstanceType buildInstanceType(Flavor flavor) { - OpenstackInstanceType.builder() - .account(accountName) - .region(region) - .id(flavor.id) - .name(flavor.name) - .available(flavor.isPublic()) - .disabled(flavor.isDisabled()) - .disk(flavor.disk) - .ephemeral(flavor.ephemeral) - .ram(flavor.ram) - .swap(flavor.swap) - .vcpus(flavor.vcpus) - .build() - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackLoadBalancerCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackLoadBalancerCachingAgent.groovy deleted file mode 100644 index f9ae8eb3594..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackLoadBalancerCachingAgent.groovy +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandResult -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.cache.UnresolvableKeyException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancer -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancerHealth -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancerHealth.PlatformStatus -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.util.logging.Slf4j -import org.openstack4j.model.network.Port -import org.openstack4j.model.network.ext.HealthMonitorV2 -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2StatusTree -import org.openstack4j.model.network.ext.status.LbPoolV2Status -import org.openstack4j.model.network.ext.status.ListenerV2Status -import org.openstack4j.model.network.ext.status.MemberV2Status - -import java.util.concurrent.CompletableFuture -import java.util.concurrent.Future - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE -import static com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandType.LoadBalancer -import static com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider.ID -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.FLOATING_IPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.NETWORKS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SECURITY_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SUBNETS -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES - -/* - TODO drmaas - we could cache the load balancer status tree with each load balancer too, which would be used - in the server group caching agent instead of re-querying openstack for the status trees - */ -@Slf4j -class OpenstackLoadBalancerCachingAgent extends AbstractOpenstackCachingAgent implements OnDemandAgent { - - final ObjectMapper objectMapper - final OnDemandMetricsSupport metricsSupport - - Collection providedDataTypes = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(LOAD_BALANCERS.ns), - INFORMATIVE.forType(INSTANCES.ns) - ] as Set) - - String agentType = "${accountName}/${region}/${OpenstackLoadBalancerCachingAgent.simpleName}" - String onDemandAgentType = "${agentType}-OnDemand" - - OpenstackLoadBalancerCachingAgent(final OpenstackNamedAccountCredentials account, - final String region, - final ObjectMapper objectMapper, - final Registry registry) { - super(account, region) - this.objectMapper = objectMapper - this.metricsSupport = new OnDemandMetricsSupport( - registry, - this, - "${ID}:${LoadBalancer}") - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - log.info("Describing items in ${agentType}") - - //Get all data in parallel to cut down on processing time - Future> loadBalancers = CompletableFuture.supplyAsync { - clientProvider.getLoadBalancers(region)?.toSet() - } - Future> statusTrees = loadBalancers.thenApplyAsync { lbs -> - lbs.collectEntries { lb -> - [(lb.id): clientProvider.getLoadBalancerStatusTree(region, lb.id)] - } - } - Future> listeners = CompletableFuture.supplyAsync { - clientProvider.getListeners(region)?.toSet() - } - Future> pools = CompletableFuture.supplyAsync { - clientProvider.getPools(region)?.toSet() - } - Future> healthMonitors = CompletableFuture.supplyAsync { - clientProvider.getHealthMonitors(region)?.toSet() - } - Future> ports = CompletableFuture.supplyAsync { - clientProvider.listPorts(region)?.collectEntries { [it.id, it] } - } - - CompletableFuture.allOf(loadBalancers, listeners, pools, healthMonitors, statusTrees, ports).join() - - List loadBalancerKeys = loadBalancers.get().collect { - Keys.getLoadBalancerKey(it.name, it.id, accountName, region) - } - - buildLoadDataCache(providerCache, loadBalancerKeys) { CacheResultBuilder cacheResultBuilder -> - buildCacheResult(providerCache, loadBalancers.get(), listeners.get(), pools.get(), healthMonitors.get(), statusTrees.get(), ports.get(), cacheResultBuilder) - } - } - - CacheResult buildCacheResult(ProviderCache providerCache, - Set loadBalancers, - Set listeners, - Set pools, - Set healthMonitors, - Map statusTreeMap, - Map portMap, - CacheResultBuilder cacheResultBuilder) { - loadBalancers?.each { loadBalancer -> - String loadBalancerKey = Keys.getLoadBalancerKey(loadBalancer.name, loadBalancer.id, accountName, region) - if (shouldUseOnDemandData(cacheResultBuilder, loadBalancerKey)) { - moveOnDemandDataToNamespace(objectMapper, typeReference, cacheResultBuilder, loadBalancerKey) - } else { - Set resultlisteners = [].toSet() - LbPoolV2 pool = null - HealthMonitorV2 healthMonitor = null - if (listeners) { - resultlisteners = loadBalancer.listeners.collect { lblistener -> - listeners.find { listener -> listener.id == lblistener.id } - } - if (resultlisteners) { - pool = resultlisteners.collect { listener -> pools.find { p -> p.id == listener.defaultPoolId } }.first() - if (pool) { - healthMonitor = healthMonitors.find { hm -> hm.id == pool.healthMonitorId } - } - } - } - //create load balancer. Server group relationships are not cached here as they are cached in the server group caching agent. - OpenstackLoadBalancer openstackLoadBalancer = OpenstackLoadBalancer.from(loadBalancer, resultlisteners, pool, healthMonitor, accountName, region) - - // Populate load balancer healths and find instance ids which are members of the current lb via membership - Set healths = [] - Set instanceKeys = [] - - Map memberStatusMap = statusTreeMap?.get(loadBalancer.id)?.loadBalancerV2Status?.listenerStatuses?.collectEntries { ListenerV2Status listenerStatus -> - listenerStatus.lbPoolV2Statuses?.collectEntries { LbPoolV2Status poolStatus -> - poolStatus.memberStatuses?.collectEntries { MemberV2Status memberStatus -> - [memberStatus.address, memberStatus.operatingStatus] - } - } - } - - // Read instances from cache and create a map indexed by ipv6 address to compare to load balancer member status - Collection instanceFilters = providerCache.filterIdentifiers(INSTANCES.ns, Keys.getInstanceKey('*', accountName, region)) - Collection instancesData = providerCache.getAll(INSTANCES.ns, instanceFilters, RelationshipCacheFilter.none()) - Map addressCacheDataMap = instancesData.collectEntries { data -> - [(data.attributes.ipv4): data, (data.attributes.ipv6): data] - } - - // Find corresponding instance id, save key for caching below, and add new lb health based upon current member status - memberStatusMap.each { String key, String value -> - CacheData instanceData = addressCacheDataMap[key] ?: null - if (instanceData) { - String instanceId = instanceData.attributes.instanceId - instanceKeys << Keys.getInstanceKey(instanceId, accountName, region) - PlatformStatus status = PlatformStatus.valueOf(value) - healths << new OpenstackLoadBalancerHealth( - instanceId: instanceId, - status: status, - lbHealthSummaries: [new OpenstackLoadBalancerHealth.LBHealthSummary( - loadBalancerName: loadBalancer.name - , instanceId: instanceId - , state: status?.toServiceStatus())]) - } - } - openstackLoadBalancer.healths = healths - - //ips cached - Collection ipFilters = providerCache.filterIdentifiers(FLOATING_IPS.ns, Keys.getFloatingIPKey('*', accountName, region)) - Collection ipsData = providerCache.getAll(FLOATING_IPS.ns, ipFilters, RelationshipCacheFilter.none()) - CacheData ipCacheData = ipsData.find { i -> i.attributes?.fixedIpAddress == loadBalancer.vipAddress } - String floatingIpKey = ipCacheData?.id - - //subnets cached - String subnetKey = Keys.getSubnetKey(loadBalancer.vipSubnetId, accountName, region) - - //networks cached - String networkKey = ipCacheData ? Keys.getNetworkKey(ipCacheData.attributes.networkId.toString(), accountName, region) : null - - //instances cached - instanceKeys.each { String instanceKey -> - cacheResultBuilder.namespace(INSTANCES.ns).keep(instanceKey).with { - relationships[LOAD_BALANCERS.ns].add(loadBalancerKey) - } - } - - // security groups cached - Port vipPort = portMap.get(loadBalancer.vipPortId) - Set securityGroupKeys = [] - if (vipPort) { - vipPort.securityGroups?.each { - securityGroupKeys << Keys.getSecurityGroupKey('*', it, accountName, region) - } - } - - cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keep(loadBalancerKey).with { - attributes = objectMapper.convertValue(openstackLoadBalancer, ATTRIBUTES) - relationships[FLOATING_IPS.ns] = [floatingIpKey] - relationships[NETWORKS.ns] = [networkKey] - relationships[SUBNETS.ns] = [subnetKey] - relationships[SECURITY_GROUPS.ns] = securityGroupKeys - } - } - } - - log.info("Caching ${cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keepSize()} load balancers in ${agentType}") - log.info "Caching ${cacheResultBuilder.namespace(INSTANCES.ns).keepSize()} instance relationships in ${agentType}" - log.info("Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}") - log.info("Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}") - - cacheResultBuilder.build() - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == LoadBalancer && cloudProvider == ID - } - - @Override - OnDemandResult handle(ProviderCache providerCache, Map data) { - OnDemandResult result = null - - if (data.containsKey("loadBalancerName") && data.account == accountName && data.region == region) { - String loadBalancerName = data.loadBalancerName.toString() - - LoadBalancerV2 loadBalancer = metricsSupport.readData { - try { - clientProvider.getLoadBalancerByName(region, loadBalancerName) - } catch (OpenstackProviderException e) { - } - } - - Set loadBalancers = [].toSet() - Set listeners = [].toSet() - Set pools = [].toSet() - Set healthMonitors = [].toSet() - Map statusMap = [:] - Map portMap = [:] - String loadBalancerKey = Keys.getLoadBalancerKey(loadBalancerName, '*', accountName, region) - - if (loadBalancer) { - loadBalancers << loadBalancer - loadBalancer.listeners.each { listenerItem -> - ListenerV2 listener = clientProvider.getListener(region, listenerItem.id) - if (listener) { - LbPoolV2 pool = clientProvider.getPool(region, listener.defaultPoolId) - if (pool) { - HealthMonitorV2 healthMonitor = clientProvider.getMonitor(region, pool.healthMonitorId) - if (healthMonitor) { - healthMonitors << healthMonitor - } - pools << pool - } - listeners << listener - } - } - loadBalancerKey = Keys.getLoadBalancerKey(loadBalancerName, loadBalancer.id, accountName, region) - statusMap[loadBalancer.id] = clientProvider.getLoadBalancerStatusTree(region, loadBalancer.id) - portMap[loadBalancer.vipPortId] = clientProvider.getPort(region, loadBalancer.vipPortId) - } - - CacheResult cacheResult = metricsSupport.transformData { - buildCacheResult(providerCache, loadBalancers, listeners, pools, healthMonitors, statusMap, portMap, new CacheResultBuilder(startTime: Long.MAX_VALUE)) - } - - String namespace = LOAD_BALANCERS.ns - String resolvedKey = null - try { - resolvedKey = resolveKey(providerCache, namespace, loadBalancerKey) - processOnDemandCache(cacheResult, objectMapper, metricsSupport, providerCache, resolvedKey) - } catch (UnresolvableKeyException uke) { - log.info("Load balancer ${loadBalancerName} is not resolvable", uke) - } - - result = buildOnDemandCache(loadBalancer, onDemandAgentType, cacheResult, namespace, resolvedKey) - } - - log.info("On demand cache refresh (data: ${data}) succeeded.") - - result - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - getAllOnDemandCacheByRegionAndAccount(providerCache, accountName, region) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackNetworkCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackNetworkCachingAgent.groovy deleted file mode 100644 index cee75247681..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackNetworkCachingAgent.groovy +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackNetwork -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.util.logging.Slf4j -import org.openstack4j.model.network.Network - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES - -@Slf4j -class OpenstackNetworkCachingAgent extends AbstractOpenstackCachingAgent { - - Collection providedDataTypes = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(Keys.Namespace.NETWORKS.ns) - ] as Set) - - final ObjectMapper objectMapper - - String agentType = "${accountName}/${region}/${OpenstackNetworkCachingAgent.simpleName}" - - OpenstackNetworkCachingAgent(OpenstackNamedAccountCredentials account, String region, final ObjectMapper objectMapper) { - super(account, region) - this.objectMapper = objectMapper - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - List networkList = clientProvider.listNetworks(region) - buildCacheResult(networkList) - } - - private CacheResult buildCacheResult(List networkList) { - log.info("Describing items in ${agentType}") - - def cacheResultBuilder = new CacheResultBuilder() - - networkList.each { Network network -> - String networkKey = Keys.getNetworkKey(network.id, accountName, region) - - Map networkAttributes = objectMapper.convertValue(OpenstackNetwork.from(network, accountName, region), ATTRIBUTES) - - cacheResultBuilder.namespace(Keys.Namespace.NETWORKS.ns).keep(networkKey).with { - attributes = networkAttributes - } - } - - log.info("Caching ${cacheResultBuilder.namespace(Keys.Namespace.NETWORKS.ns).keepSize()} networks in ${agentType}") - - cacheResultBuilder.build() - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSecurityGroupCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSecurityGroupCachingAgent.groovy deleted file mode 100644 index f31b9a0a4ab..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSecurityGroupCachingAgent.groovy +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.model.AddressableRange -import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule -import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule -import com.netflix.spinnaker.clouddriver.model.securitygroups.SecurityGroupRule -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.cache.UnresolvableKeyException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSecurityGroup -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.util.logging.Slf4j -import org.openstack4j.model.compute.SecGroupExtension - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SECURITY_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES -import static com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider.ID -import static com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandType.SecurityGroup - - -@Slf4j -class OpenstackSecurityGroupCachingAgent extends AbstractOpenstackCachingAgent implements OnDemandAgent { - - final Set providedDataTypes = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(SECURITY_GROUPS.ns) - ] as Set) - - final String agentType = "${account.name}/${region}/${OpenstackSecurityGroupCachingAgent.simpleName}" - final String onDemandAgentType = "${agentType}-OnDemand" - final OnDemandMetricsSupport metricsSupport - final ObjectMapper objectMapper - - OpenstackSecurityGroupCachingAgent(final OpenstackNamedAccountCredentials account, - final String region, - final ObjectMapper objectMapper, - final Registry registry) { - super(account, region) - this.objectMapper = objectMapper - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${ID}:${SecurityGroup}") - } - - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == SecurityGroup && cloudProvider == ID - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - getAllOnDemandCacheByRegionAndAccount(providerCache, accountName, region) - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - /* - * Get security groups and map the names to the ids for a later lookup. Since there is a possibility - * that there are duplicate security groups by name, the lookup is to a list of ids. - */ - List securityGroups = clientProvider.getSecurityGroups(region) - List keys = securityGroups.collect{ Keys.getSecurityGroupKey(it.name, it.id, accountName, region) } - - buildLoadDataCache(providerCache, keys) { CacheResultBuilder cacheResultBuilder -> - buildCacheResult(cacheResultBuilder, securityGroups) - } - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - log.debug("Handling on-demand cache update; account=${account}, region=${region}, data=${data}") - - if (data.account != accountName) { - return null - } - - if (data.region != region) { - return null - } - - if (!data.containsKey('securityGroupName')) { - return null - } - - String name = data.securityGroupName as String - - SecGroupExtension securityGroup = metricsSupport.readData { - SecGroupExtension group = null - try { - /* - * Since we only have a name, we need to get all groups and filter by name. Also, since name is unique, - * ensure there is only security group by this name. - */ - List groups = clientProvider.getSecurityGroups(region).findAll { it.name == name } - if (groups.size() == 1) { - group = groups.first() - } else { - log.warn("Failed to find unique security group with name ${name} in region ${region}") - } - } catch (OpenstackProviderException e) { - //Do nothing ... Exception is thrown if a security group isn't found - log.debug("Unable to find security group to add to OnDemand cache", e) - } - return group - } - - List securityGroups = [] - String key = Keys.getSecurityGroupKey(name, '*', accountName, region) - if (securityGroup) { - securityGroups = [securityGroup] - key = Keys.getSecurityGroupKey(name, securityGroup.id, accountName, region) - } - - CacheResult cacheResult = metricsSupport.transformData { - buildCacheResult(new CacheResultBuilder(startTime: Long.MAX_VALUE), securityGroups) - } - - String namespace = SECURITY_GROUPS.ns - String resolvedKey = null - try { - resolvedKey = resolveKey(providerCache, namespace, key) - processOnDemandCache(cacheResult, objectMapper, metricsSupport, providerCache, resolvedKey) - } catch(UnresolvableKeyException e) { - log.info("Security group ${name} is not resolvable", e) - } - - log.info("On demand cache refresh succeeded. Data: ${data}") - - buildOnDemandCache(securityGroup, onDemandAgentType, cacheResult, namespace, resolvedKey) - } - - protected CacheResult buildCacheResult(CacheResultBuilder cacheResultBuilder, List securityGroups) { - Map> namesToIds = [:].withDefault {[]} - securityGroups.each { namesToIds[it.name] << it.id } - - securityGroups.each { securityGroup -> - log.debug("Caching security group for account $accountName in region $region: $securityGroup") - - List inboundRules = securityGroup.rules.collect { rule -> - // The Openstack4J library doesn't put a type on the rule, instead, it includes a range object with a null cidr - rule.range?.cidr ? buildIpRangeRule(rule) : buildSecurityGroupRule(rule, namesToIds.get(rule.group.name)) - } - - OpenstackSecurityGroup openstackSecurityGroup = new OpenstackSecurityGroup(id: securityGroup.id, - accountName: accountName, - region: region, - name: securityGroup.name, - description: securityGroup.description, - inboundRules: inboundRules - ) - - String key = Keys.getSecurityGroupKey(securityGroup.name, securityGroup.id, accountName, region) - - if (shouldUseOnDemandData(cacheResultBuilder, key)) { - moveOnDemandDataToNamespace(objectMapper, typeReference, cacheResultBuilder, key) - } else { - cacheResultBuilder.namespace(SECURITY_GROUPS.ns).keep(key).with { - attributes = objectMapper.convertValue(openstackSecurityGroup, ATTRIBUTES) - } - } - } - - log.info("Caching ${cacheResultBuilder.namespace(SECURITY_GROUPS.ns).keepSize()} items in ${agentType}") - log.info("Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}") - log.info("Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}") - - cacheResultBuilder.build() - } - - /** - * Build a security group rule the references another security group. - * - * This will Look up the referenced security group by name. This lookup may fail if multiple security groups - * by name are found or no security groups with that name can be found. - */ - private SecurityGroupRule buildSecurityGroupRule(SecGroupExtension.Rule rule, List possibleSecurityGroupReferences) { - - String id = null - if (possibleSecurityGroupReferences.isEmpty()) { - log.warn("Could not find any security groups by name ${rule.group.name} in account ${accountName}") - } else if (possibleSecurityGroupReferences.size() > 1) { - log.warn("Found too many security groups by name ${rule.group.name} in account ${accountName}") - } else { - id = possibleSecurityGroupReferences[0] - } - - def portRange = new Rule.PortRange(startPort: rule.fromPort, endPort: rule.toPort) - def securityGroup = new OpenstackSecurityGroup( - name: rule.group.name, - type: ID, - accountName: accountName, - region: region, - id: id - ) - new SecurityGroupRule(protocol: rule.IPProtocol.value(), - portRanges: [portRange] as SortedSet, - securityGroup: securityGroup - ) - } - - /** - * Build a security group based on a IP range (cidr) - */ - private IpRangeRule buildIpRangeRule(SecGroupExtension.Rule rule) { - def portRange = new Rule.PortRange(startPort: rule.fromPort, endPort: rule.toPort) - def addressableRange = buildAddressableRangeFromCidr(rule.range.cidr) - new IpRangeRule(protocol: rule.IPProtocol.value(), - portRanges: [portRange] as SortedSet, - range: addressableRange - ) - } - - /** - * Builds an {@link AddressableRange} from a CIDR string. - */ - private AddressableRange buildAddressableRangeFromCidr(String cidr) { - if (!cidr) { - return null - } - - def rangeParts = cidr.split('/') as List - - // If the cidr just a single IP address, use 32 as the mask - if (rangeParts.size() == 1) { - rangeParts << "32" - } - - new AddressableRange(ip: rangeParts[0], cidr: "/${rangeParts[1]}") - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackServerGroupCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackServerGroupCachingAgent.groovy deleted file mode 100644 index df1c387a889..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackServerGroupCachingAgent.groovy +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.Maps -import com.netflix.frigga.Names -import com.netflix.frigga.ami.AppVersion -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.cache.OnDemandAware -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLaunchConfig -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackServerGroup -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.openstack.utils.DateUtils -import groovy.util.logging.Slf4j -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.network.ext.status.LoadBalancerV2Status - -import java.util.concurrent.CompletableFuture -import java.util.concurrent.Future - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE -import static com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandType.ServerGroup -import static com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider.ID -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.CLUSTERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.IMAGES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SERVER_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES - -@Slf4j -class OpenstackServerGroupCachingAgent extends AbstractOpenstackCachingAgent implements OnDemandAgent, OnDemandAware { - - final Set providedDataTypes = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(SERVER_GROUPS.ns), - INFORMATIVE.forType(APPLICATIONS.ns), - INFORMATIVE.forType(CLUSTERS.ns), - INFORMATIVE.forType(INSTANCES.ns), - INFORMATIVE.forType(LOAD_BALANCERS.ns), - ] as Set) - - final ObjectMapper objectMapper - final OnDemandMetricsSupport metricsSupport - final String agentType = "${account.name}/${region}/${OpenstackServerGroupCachingAgent.simpleName}" - final String onDemandAgentType = "${agentType}-OnDemand" - - OpenstackServerGroupCachingAgent(final OpenstackNamedAccountCredentials account, final String region, - final ObjectMapper objectMapper, final Registry registry) { - super(account, region) - this.objectMapper = objectMapper - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${ID}:${ServerGroup}") - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - List stacks = clientProvider.listStacks(region) - List serverGroupKeys = stacks.collect { Keys.getServerGroupKey(it.name, accountName, region) } - - buildLoadDataCache(providerCache, serverGroupKeys) { CacheResultBuilder cacheResultBuilder -> - buildCacheResult(providerCache, cacheResultBuilder, stacks) - } - } - - protected CacheResult buildCacheResult(ProviderCache providerCache, CacheResultBuilder cacheResultBuilder, List stacks) { - // Lookup all instances and group by stack Id - Map> instancesByStackId = getInstanceIdsByStack(region, stacks) - - stacks?.each { Stack stack -> - try { - String serverGroupName = stack.name - Names names = Names.parseName(serverGroupName) - if (!names && !names.app && !names.cluster) { - log.info("Skipping server group ${serverGroupName}") - } else { - String applicationName = names.app - String clusterName = names.cluster - - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, accountName, region) - String clusterKey = Keys.getClusterKey(accountName, applicationName, clusterName) - String appKey = Keys.getApplicationKey(applicationName) - - cacheResultBuilder.namespace(APPLICATIONS.ns).keep(appKey).with { - attributes.name = applicationName - relationships[CLUSTERS.ns].add(clusterKey) - } - - cacheResultBuilder.namespace(CLUSTERS.ns).keep(clusterKey).with { - attributes.name = clusterName - attributes.accountName = accountName - relationships[APPLICATIONS.ns].add(appKey) - relationships[SERVER_GROUPS.ns].add(serverGroupKey) - } - - Stack detail = clientProvider.getStack(region, stack.name) - Set loadBalancerKeys = [].toSet() - Set statuses = [].toSet() - if (detail && detail.parameters) { - statuses = ServerGroupParameters.fromParamsMap(detail.parameters).loadBalancers?.collect { loadBalancerId -> - LoadBalancerV2Status status = null - try { - status = clientProvider.getLoadBalancerStatusTree(region, loadBalancerId)?.loadBalancerV2Status - if (status) { - String loadBalancerKey = Keys.getLoadBalancerKey(status.name, status.id, accountName, region) - cacheResultBuilder.namespace(LOAD_BALANCERS.ns).keep(loadBalancerKey).with { - relationships[SERVER_GROUPS.ns].add(serverGroupKey) - } - loadBalancerKeys << loadBalancerKey - } - } catch (OpenstackProviderException e) { - //Do nothing ... Load balancer not found. - } - status - }?.findAll()?.toSet() - } - - List instanceKeys = [] - instancesByStackId[stack.id]?.each { String id -> - String instanceKey = Keys.getInstanceKey(id, accountName, region) - cacheResultBuilder.namespace(INSTANCES.ns).keep(instanceKey).relationships[SERVER_GROUPS.ns].add(serverGroupKey) - instanceKeys.add(instanceKey) - } - - OpenstackServerGroup openstackServerGroup = buildServerGroup(providerCache, detail, statuses, instanceKeys) - - if (shouldUseOnDemandData(cacheResultBuilder, serverGroupKey)) { - moveOnDemandDataToNamespace(objectMapper, typeReference, cacheResultBuilder, serverGroupKey) - } else { - cacheResultBuilder.namespace(SERVER_GROUPS.ns).keep(serverGroupKey).with { - attributes = objectMapper.convertValue(openstackServerGroup, ATTRIBUTES) - relationships[APPLICATIONS.ns].add(appKey) - relationships[CLUSTERS.ns].add(clusterKey) - relationships[LOAD_BALANCERS.ns].addAll(loadBalancerKeys) - relationships[INSTANCES.ns].addAll(instanceKeys) - } - } - } - } catch (Exception e) { - log.error("Error building cache for stack ${stack}", e) - } - } - - cacheResultBuilder.namespaceBuilders.keySet().each { String namespace -> - log.info("Caching ${cacheResultBuilder.namespace(namespace).keepSize()} ${namespace} in ${agentType}") - } - - log.info("Caching ${cacheResultBuilder.onDemand.toKeep.size()} onDemand entries in ${agentType}") - log.info("Evicting ${cacheResultBuilder.onDemand.toEvict.size()} onDemand entries in ${agentType}") - - cacheResultBuilder.build() - } - - /** - * Transform stacks into a map of stacks by instance ids. - * @param stacks - * @return - */ - Map> getInstanceIdsByStack(String region, List stacks) { - Map>> resourceMap = stacks.collectEntries { - String name = it.name - String id = it.id - [(id) : CompletableFuture.supplyAsync { - clientProvider.getInstanceIdsForStack(region, name) - }.exceptionally { t -> [] } ] - } - - CompletableFuture.allOf(resourceMap.values().flatten() as CompletableFuture[]).join() - - resourceMap.collectEntries([:]) { [it.key, it.value.get()] } - } - - /** - * Helper method for creating server group. - * @param providerCache - * @param stack - * @param loadbalancerIds - * @return - */ - OpenstackServerGroup buildServerGroup(ProviderCache providerCache, Stack stack, Set statuses, List instanceKeys) { - ServerGroupParameters params = ServerGroupParameters.fromParamsMap(stack?.parameters ?: [:]) - Map launchConfig = buildLaunchConfig(params) - Map openstackImage = buildImage(providerCache, (String) launchConfig?.image) - Map advancedConfig = buildAdvancedConfig(params) - Set loadbalancerIds = statuses.collect { status -> Keys.getLoadBalancerKey(status.name, status.id, accountName, region) } - - OpenstackServerGroup.builder() - .account(accountName) - .region(region) - .name(stack?.name) - .createdTime(stack == null ? null : DateUtils.parseZonedDateTime(stack.creationTime).toInstant().toEpochMilli()) - .scalingConfig(buildScalingConfig(params)) - .launchConfig(launchConfig) - .loadBalancers(loadbalancerIds) - .image(openstackImage) - .buildInfo(buildInfo((Map) openstackImage?.properties)) - .disabled(calculateServerGroupStatus(providerCache, statuses, instanceKeys)) - .subnetId(params.subnetId) - .advancedConfig(advancedConfig) - .tags(params.tags ?: [:]) - .build() - } - - /** - * Creates build info from image definition. - * @return - */ - Map buildInfo(Map properties) { - Map result = [:] - - if (properties) { - String appVersionKey = properties.get('appversion') - - if (appVersionKey) { - AppVersion appVersion = AppVersion.parseName(appVersionKey) - - if (appVersion) { - result.packageName = appVersion.packageName - result.version = appVersion.version - result.commit = appVersion.commit - } - - String buildHost = properties.get('build_host') - String buildInfoUrl = properties.get('build_info_url') - - if (appVersion && appVersion.buildJobName) { - Map jenkinsMap = [name: appVersion.buildJobName, number: appVersion.buildNumber] - if (buildHost) { - jenkinsMap.put('host', buildHost) - } - result.jenkins = jenkinsMap - } - - if (buildInfoUrl) { - result.buildInfoUrl = buildInfoUrl - } - } - } - - result - } - - /** - * Builds scaling config map from stack definition. - * @param stack - * @return - */ - Map buildScalingConfig(ServerGroupParameters parameters) { - Map result = Maps.newHashMap() - - if (parameters) { - // Using a default value of 0 for min, max, & desired size - result.put('minSize', parameters.minSize ?: 0) - result.put('maxSize', parameters.maxSize ?: 0) - result.put('desiredSize', parameters.desiredSize ?: 0) - result.put('autoscalingType', parameters.autoscalingType ? parameters.autoscalingType.jsonValue() : "") - [up:parameters.scaleup, down:parameters.scaledown].each { - result.put("scale${it.key}".toString(), objectMapper.convertValue(it.value, ATTRIBUTES)) - } - } - - result - } - - /** - * Builds a new launch config based upon template parameters. - * @param parameters - * @return - */ - Map buildLaunchConfig(ServerGroupParameters parameters) { - Map result = Collections.emptyMap() - - if (parameters) { - OpenstackLaunchConfig launchConfig = OpenstackLaunchConfig.builder() - .image(parameters.image) - .instanceType(parameters.instanceType) - .networkId(parameters.networkId) - .loadBalancerId(parameters.loadBalancers?.join(",")) - .securityGroups(parameters.securityGroups) - .associatePublicIpAddress(parameters.floatingNetworkId != null) - .floatingNetworkId(parameters.floatingNetworkId) - .build() - - result = ((Map)objectMapper.convertValue(launchConfig, ATTRIBUTES)).findAll { it.value } - } - - result - } - - /** - * Builds advanced config from the advanced server group inputs. - * @param parameters - * @return - */ - Map buildAdvancedConfig(ServerGroupParameters parameters) { - Map params = [:] - if (parameters.sourceUserDataType) { - params << [userDataType:parameters.sourceUserDataType] - } - if (parameters.sourceUserData) { - params << [userData:parameters.sourceUserData] - } - params - } - - /** - * Builds an image from cache. Needed for build info, so looking up now. - * @param imageId - * @return - */ - Map buildImage(ProviderCache providerCache, String image) { - Map result = null - - CacheData cacheData = providerCache.get(IMAGES.ns, Keys.getImageKey(image, accountName, region)) - if (cacheData) { - result = cacheData.attributes - } - - result - } - - // TODO drmaas if we cache the load balancer status tree for each load balancer, we can do this calculation - // in the OpenstackClusterProvider instead. - /** - * If no instances match load balancer members, or if there are no instanceKeys or load balancer statuses, - * then this will return true (disabled). - * - * calculate server group healthStatus - * @param statuses - * @return - */ - boolean calculateServerGroupStatus(ProviderCache providerCache, Set statuses, List instanceKeys) { - - //when all members for this server group are disabled, the server group is disabled, otherwise it is enabled. - Map memberStatusMap = statuses?.collectEntries { lbStatus -> - lbStatus.listenerStatuses?.collectEntries { listenerStatus -> - listenerStatus.lbPoolV2Statuses?.collectEntries { poolStatus -> - poolStatus.memberStatuses?.collectEntries { memberStatus -> - [(memberStatus.address): memberStatus.operatingStatus.toString()] - } - } - } - } - - // Read instances from cache and create a map indexed by ipv4/ipv6 address to compare to load balancer member status - Collection instancesData = providerCache.getAll(INSTANCES.ns, instanceKeys, RelationshipCacheFilter.none()) - Map addressCacheDataMap = instancesData.collectEntries { data -> - [(data.attributes.ipv4): data, (data.attributes.ipv6): data] - } - - // Find corresponding instance id, save key for caching below, and add new lb health based upon current member status - memberStatusMap - .findAll { key, value -> - key == addressCacheDataMap[key]?.attributes?.ipv4?.toString() || - key == addressCacheDataMap[key]?.attributes?.ipv6?.toString() - } - .every { key, value -> value == "DISABLED" } - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == ServerGroup && cloudProvider == ID - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - OnDemandAgent.OnDemandResult result = null - - if (data.containsKey("serverGroupName") && data.account == accountName && data.region == region) { - String serverGroupName = data.serverGroupName.toString() - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, accountName, region) - - Stack stack = metricsSupport.readData { - clientProvider.getStack(region, serverGroupName) - } - - CacheResult cacheResult = metricsSupport.transformData { - buildCacheResult(providerCache, new CacheResultBuilder(startTime: Long.MAX_VALUE), stack ? [stack] : []) - } - - processOnDemandCache(cacheResult, objectMapper, metricsSupport, providerCache, serverGroupKey) - result = buildOnDemandCache(stack, onDemandAgentType, cacheResult, SERVER_GROUPS.ns, serverGroupKey) - - log.info("On demand cache refresh succeeded. Data: ${data}. Added ${stack ? 1 : 0} items to the cache.") - } - - result - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - getAllOnDemandCacheByRegionAndAccount(providerCache, accountName, region) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSubnetCachingAgent.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSubnetCachingAgent.groovy deleted file mode 100644 index a88a0570531..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSubnetCachingAgent.groovy +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSubnet -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import groovy.util.logging.Slf4j -import org.openstack4j.model.network.Subnet - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SUBNETS -import static com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider.ATTRIBUTES - -@Slf4j -class OpenstackSubnetCachingAgent extends AbstractOpenstackCachingAgent { - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(SUBNETS.ns) - ] as Set) - - final ObjectMapper objectMapper - - OpenstackSubnetCachingAgent( - final OpenstackNamedAccountCredentials account, final String region, final ObjectMapper objectMapper) { - super(account, region) - - this.objectMapper = objectMapper - } - - @Override - Collection getProvidedDataTypes() { - types - } - - @Override - String getAgentType() { - "${account.name}/${region}/${OpenstackSubnetCachingAgent.simpleName}" - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - log.info("Describing items in ${agentType}") - - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder() - - clientProvider.listSubnets(region)?.each { Subnet subnet -> - String subnetKey = Keys.getSubnetKey(subnet.id, accountName, region) - - Map subnetAttributes = objectMapper.convertValue(OpenstackSubnet.from(subnet, accountName, region), ATTRIBUTES) - - cacheResultBuilder.namespace(SUBNETS.ns).keep(subnetKey).with { - attributes = subnetAttributes - attributes.account = accountName - attributes.region = region - } - } - - log.info("Caching ${cacheResultBuilder.namespace(SUBNETS.ns).keepSize()} items in ${agentType}") - - cacheResultBuilder.build() - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/config/OpenstackInfrastructureProviderConfig.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/config/OpenstackInfrastructureProviderConfig.groovy deleted file mode 100644 index 2d8796c5dac..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/config/OpenstackInfrastructureProviderConfig.groovy +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.config - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.Sets -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.CachingAgent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackFloatingIPCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackImageCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackInstanceCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackInstanceTypeCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackLoadBalancerCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackNetworkCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackSecurityGroupCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackServerGroupCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.OpenstackSubnetCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.springframework.beans.factory.annotation.Qualifier -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.DependsOn -import org.springframework.context.annotation.Scope - -@Configuration -class OpenstackInfrastructureProviderConfig { - - @Bean - ObjectMapper infraObjectMapper() { - new ObjectMapper() - } - - @Bean - @DependsOn('openstackNamedAccountCredentials') - OpenstackInfrastructureProvider openstackInfastructureProvider(AccountCredentialsRepository accountCredentialsRepository, - @Qualifier('infraObjectMapper') ObjectMapper objectMapper, Registry registry) { - OpenstackInfrastructureProvider provider = new OpenstackInfrastructureProvider(Sets.newConcurrentHashSet()) - synchronizeOpenstackProvider(provider, accountCredentialsRepository, objectMapper, registry) - provider - } - - @Bean - OpenstackProviderSynchronizerTypeWrapper openstackProviderSynchronizerTypeWrapper() { - new OpenstackProviderSynchronizerTypeWrapper() - } - - class OpenstackProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - @Override - Class getSynchronizerType() { - return OpenstackProviderSynchronizer - } - } - - class OpenstackProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - OpenstackProviderSynchronizer synchronizeOpenstackProvider(OpenstackInfrastructureProvider openstackInfastructureProvider, - AccountCredentialsRepository accountCredentialsRepository, - @Qualifier('infraObjectMapper') ObjectMapper objectMapper, - Registry registry) { - def scheduledAccounts = ProviderUtils.getScheduledAccounts(openstackInfastructureProvider) - def allAccounts = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, OpenstackNamedAccountCredentials) - - List newlyAddedAgents = [] - - allAccounts.each { OpenstackNamedAccountCredentials credentials -> - if (!scheduledAccounts.contains(credentials.name)) { - credentials.credentials.provider.allRegions.each { String region -> - newlyAddedAgents << new OpenstackInstanceCachingAgent(credentials, region, objectMapper) - newlyAddedAgents << new OpenstackServerGroupCachingAgent(credentials, region, objectMapper, registry) - newlyAddedAgents << new OpenstackSubnetCachingAgent(credentials, region, objectMapper) - newlyAddedAgents << new OpenstackNetworkCachingAgent(credentials, region, objectMapper) - newlyAddedAgents << new OpenstackImageCachingAgent(credentials, region, objectMapper) - newlyAddedAgents << new OpenstackSecurityGroupCachingAgent(credentials, region, objectMapper, registry) - newlyAddedAgents << new OpenstackFloatingIPCachingAgent(credentials, region, objectMapper) - newlyAddedAgents << new OpenstackLoadBalancerCachingAgent(credentials, region, objectMapper, registry) - newlyAddedAgents << new OpenstackInstanceTypeCachingAgent(credentials, region, objectMapper) - } - } - } - - if (!newlyAddedAgents.isEmpty()) { - openstackInfastructureProvider.agents.addAll(newlyAddedAgents) - } - - new OpenstackProviderSynchronizer() - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/MutableCacheData.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/MutableCacheData.groovy deleted file mode 100644 index b9fc4db1471..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/MutableCacheData.groovy +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.annotation.JsonCreator -import com.fasterxml.jackson.annotation.JsonProperty -import com.netflix.spinnaker.cats.cache.CacheData - -/* TODO(lwander) this was taken from the netflix cluster caching, and should probably be shared between all providers. */ - -class MutableCacheData implements CacheData { - final String id - int ttlSeconds = -1 - final Map attributes = [:] - final Map> relationships = [:].withDefault { [] as Set } - - public MutableCacheData(String id) { - this.id = id - } - - @JsonCreator - public MutableCacheData(@JsonProperty("id") String id, - @JsonProperty("attributes") Map attributes, - @JsonProperty("relationships") Map> relationships) { - this(id); - this.attributes.putAll(attributes); - this.relationships.putAll(relationships); - } - - public static Map mutableCacheMap() { - return [:].withDefault { String id -> new MutableCacheData(id) } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackApplicationProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackApplicationProvider.groovy deleted file mode 100644 index 366dc9231cb..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackApplicationProvider.groovy +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.model.ApplicationProvider -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackApplication -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.CLUSTERS - -@Component -class OpenstackApplicationProvider implements ApplicationProvider { - final Cache cacheView - final ObjectMapper objectMapper - - @Autowired - OpenstackApplicationProvider(final Cache cacheView, final ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Set getApplications(boolean expand) { - RelationshipCacheFilter relationships = expand ? RelationshipCacheFilter.include(CLUSTERS.ns) : RelationshipCacheFilter.none() - Collection applications = cacheView.getAll( - APPLICATIONS.ns, cacheView.filterIdentifiers(APPLICATIONS.ns, "${OpenstackCloudProvider.ID}:*"), relationships - ) - applications.collect(this.&translate) - } - - @Override - OpenstackApplication getApplication(String name) { - translate(cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(name))) - } - - OpenstackApplication translate(CacheData cacheData) { - OpenstackApplication result = null - if (cacheData) { - String name = Keys.parse(cacheData.id).application - Map attributes = objectMapper.convertValue(cacheData.attributes, OpenstackInfrastructureProvider.ATTRIBUTES) - Map> clusterNames = [:].withDefault { new HashSet() } - for (String clusterId : cacheData.relationships[CLUSTERS.ns]) { - Map cluster = Keys.parse(clusterId) - if (cluster.account && cluster.cluster) { - clusterNames[cluster.account].add(cluster.cluster) - } - } - result = new OpenstackApplication(name, attributes, clusterNames) - } - result - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackClusterProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackClusterProvider.groovy deleted file mode 100644 index e4ce32c85ec..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackClusterProvider.groovy +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.consul.provider.ConsulProviderUtils -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackCluster -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancer -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackServerGroup -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import java.util.stream.Collectors - -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.CLUSTERS -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.SERVER_GROUPS - -@Component -class OpenstackClusterProvider implements ClusterProvider { - final OpenstackCloudProvider openstackCloudProvider - final Cache cacheView - final ObjectMapper objectMapper - final Closure clusterAccountMapper = { Cluster it -> it.accountName } - final OpenstackInstanceProvider instanceProvider - - @Autowired - OpenstackClusterProvider(final OpenstackCloudProvider openstackCloudProvider, - final Cache cacheView, - final ObjectMapper objectMapper, - final OpenstackInstanceProvider instanceProvider) { - this.openstackCloudProvider = openstackCloudProvider - this.cacheView = cacheView - this.objectMapper = objectMapper - this.instanceProvider = instanceProvider - } - - @Override - Map> getClusters() { - Map> result = Collections.emptyMap() - - final Collection cacheResults = cacheView.getAll(CLUSTERS.ns) - - if (cacheResults) { - result = cacheResults.stream().map { CacheData cacheData -> objectMapper.convertValue(cacheData.attributes, OpenstackCluster)?.view } - .collect(Collectors.groupingBy(this.&clusterAccountMapper, Collectors.toSet())) - } - - result - } - - @Override - Map> getClusterSummaries(final String application) { - getClustersInternal(application, false) - } - - @Override - Map> getClusterDetails(final String application) { - getClustersInternal(application, true) - } - - @Override - Set getClusters(final String application, final String account) { - getClusterDetails(application)?.get(account) - } - - @Override - OpenstackCluster.View getCluster(String application, String account, String name, boolean includeDetails) { - getClusters(application, account)?.find { it.name == name } - } - - @Override - OpenstackCluster.View getCluster(String application, String account, String name) { - return getCluster(application, account, name, true) - } - - @Override - OpenstackServerGroup.View getServerGroup(final String account, final String region, final String name, final boolean includeDetails) { - ServerGroup result = null - CacheData cacheData = cacheView.get(SERVER_GROUPS.ns, Keys.getServerGroupKey(name, account, region), - RelationshipCacheFilter.include(INSTANCES.ns, LOAD_BALANCERS.ns)) - - if (cacheData) { - result = serverGroupFromCacheData(cacheData) - } - - result - } - - @Override - OpenstackServerGroup.View getServerGroup(final String account, final String region, final String name) { - return getServerGroup(account, region, name, true) - } - - @Override - String getCloudProviderId() { - return openstackCloudProvider.id - } - - @Override - boolean supportsMinimalClusters() { - return false - } - - protected Map> getClustersInternal( - final String applicationName, final boolean includeInstanceDetails) { - Map> result = null - - CacheData application = cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(applicationName)) - if (application) { - Collection clusterKeys = application.relationships[CLUSTERS.ns] - Collection clusters = cacheView.getAll(CLUSTERS.ns, clusterKeys, RelationshipCacheFilter.include(SERVER_GROUPS.ns)) - - result = clusters.stream() - .map { this.clusterFromCacheData(it, includeInstanceDetails) } - .collect(Collectors.groupingBy(this.&clusterAccountMapper, Collectors.toSet())) - } - result - } - - protected OpenstackCluster.View clusterFromCacheData(final CacheData cacheData, final boolean includeDetails = false) { - OpenstackCluster.View openstackCluster = objectMapper.convertValue(cacheData.attributes, OpenstackCluster)?.view - - Collection serverGroupKeys = cacheData.relationships[SERVER_GROUPS.ns] - if (serverGroupKeys) { - RelationshipCacheFilter filter = includeDetails ? - RelationshipCacheFilter.include(LOAD_BALANCERS.ns, INSTANCES.ns) : - RelationshipCacheFilter.include(LOAD_BALANCERS.ns) - cacheView.getAll(SERVER_GROUPS.ns, serverGroupKeys, filter).each { CacheData serverGroupCacheData -> - openstackCluster.serverGroups << serverGroupFromCacheData(serverGroupCacheData) - openstackCluster.loadBalancers.addAll(loadBalancersFromCacheData(serverGroupCacheData)) - } - } - openstackCluster - } - - protected OpenstackServerGroup.View serverGroupFromCacheData(final CacheData cacheData) { - OpenstackServerGroup.View serverGroup = objectMapper.convertValue(cacheData.attributes, OpenstackServerGroup)?.view - - Collection instanceKeys = cacheData.relationships[INSTANCES.ns] - if (instanceKeys) { - serverGroup.instances = instanceProvider.getInstances(instanceKeys) - - // Add zones from instances to server group - serverGroup.zones = serverGroup?.instances?.collect { it.zone }?.toSet() - } - - // Disabled status for Consul. - def consulNodes = serverGroup.instances?.collect { it.consulNode } ?: [] - def consulDiscoverable = ConsulProviderUtils.consulServerGroupDiscoverable(consulNodes) - if (consulDiscoverable) { - // If the server group is disabled (members are disabled or there are no load balancers), but Consul isn't, - // we say the server group is disabled and discoverable. - // If the server group isn't disabled, but Consul is, we say the server group is not disabled (can be reached via load balancer). - // If the server group and Consul are both disabled, the server group remains disabled. - // If the server group and Consul are both not disabled, the server group is not disabled. - serverGroup.disabled &= ConsulProviderUtils.serverGroupDisabled(consulNodes) - serverGroup.discovery = true - } - - serverGroup - } - - protected Set loadBalancersFromCacheData(final CacheData cacheData) { - List result = [] - Collection loadBalancerKeys = cacheData.relationships[LOAD_BALANCERS.ns] - if (loadBalancerKeys) { - cacheView.getAll(LOAD_BALANCERS.ns, loadBalancerKeys).collect { - result << objectMapper.convertValue(it.attributes, OpenstackLoadBalancer) - } - } - result.findResults { it.view } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackImageProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackImageProvider.groovy deleted file mode 100644 index ef1e9615bd0..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackImageProvider.groovy +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.Sets -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage -import com.netflix.spinnaker.clouddriver.openstack.provider.ImageProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.IMAGES - -@Component -class OpenstackImageProvider implements ImageProvider { - - final Cache cacheView - final ObjectMapper objectMapper - - @Autowired - OpenstackImageProvider(final Cache cacheView, final ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Map> listImagesByAccount() { - Map> result = [:].withDefault { _ -> Sets.newHashSet() } - Collection filter = cacheView.filterIdentifiers(IMAGES.ns, "$OpenstackCloudProvider.ID:*") - - cacheView.getAll(IMAGES.ns, filter).each { CacheData cacheData -> - String account = Keys.parse(cacheData.id).account - result[account] << objectMapper.convertValue(cacheData.attributes, OpenstackImage) - } - - result - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceProvider.groovy deleted file mode 100644 index 6c570db1de6..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceProvider.groovy +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.model.InstanceProvider -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackInstance -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancer -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancerHealth -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS - -@Component -class OpenstackInstanceProvider implements InstanceProvider { - final String cloudProvider = OpenstackCloudProvider.ID - final Cache cacheView - final AccountCredentialsProvider accountCredentialsProvider - final ObjectMapper objectMapper - - @Autowired - OpenstackInstanceProvider(Cache cacheView, AccountCredentialsProvider accountCredentialsProvider, ObjectMapper objectMapper) { - this.cacheView = cacheView - this.accountCredentialsProvider = accountCredentialsProvider - this.objectMapper = objectMapper - } - - Set getInstances(Collection cacheKeys) { - cacheKeys.findResults(this.&getInstanceInternal).collect { it.view }.toSet() - } - - @Override - OpenstackInstance.View getInstance(String account, String region, String id) { - getInstanceInternal(Keys.getInstanceKey(id, account, region))?.view - } - - /** - * Shared logic between getInstance and getInstances - * @param cacheKey - * @return - */ - protected OpenstackInstance getInstanceInternal(String cacheKey) { - OpenstackInstance result = null - - CacheData instanceEntry = cacheView.get(INSTANCES.ns, cacheKey, RelationshipCacheFilter.include(LOAD_BALANCERS.ns)) - if (instanceEntry) { - result = objectMapper.convertValue(instanceEntry.attributes, OpenstackInstance) - - def loadBalancerKeys = instanceEntry.relationships[LOAD_BALANCERS.ns] - if (loadBalancerKeys) { - cacheView.getAll(LOAD_BALANCERS.ns, loadBalancerKeys).each { CacheData loadBalancerCacheData -> - OpenstackLoadBalancer loadBalancer = objectMapper.convertValue(loadBalancerCacheData.attributes, OpenstackLoadBalancer) - def foundHealths = loadBalancer.healths.findAll { OpenstackLoadBalancerHealth health -> - health.instanceId == result.instanceId - } - if (foundHealths) { - result.loadBalancerHealths?.addAll(foundHealths) - } - } - } - } - result - } - - @Override - String getConsoleOutput(String account, String region, String id) { - String result - OpenstackNamedAccountCredentials namedAccountCredentials = (OpenstackNamedAccountCredentials) this.accountCredentialsProvider.getCredentials(account) - if (!namedAccountCredentials) { - throw new IllegalArgumentException("Invalid credentials: ${account}:${region}") - } else { - result = namedAccountCredentials.credentials.provider.getConsoleOutput(region, id) - } - result - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceTypeProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceTypeProvider.groovy deleted file mode 100644 index eaf06f91c62..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceTypeProvider.groovy +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.model.InstanceTypeProvider -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackInstanceType -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCE_TYPES - -@Component -class OpenstackInstanceTypeProvider implements InstanceTypeProvider { - - private final Cache cacheView - private final ObjectMapper objectMapper - - @Autowired - OpenstackInstanceTypeProvider(Cache cacheView, ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Set getAll() { - cacheView.getAll(INSTANCE_TYPES.ns, RelationshipCacheFilter.none()) - .collect { objectMapper.convertValue(it.attributes, OpenstackInstanceType) } - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackLoadBalancerProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackLoadBalancerProvider.groovy deleted file mode 100644 index aabab568a3e..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackLoadBalancerProvider.groovy +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.annotation.JsonProperty -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.Sets -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance -import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider -import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackFloatingIP -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancer -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackNetwork -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSubnet -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component -import groovy.util.logging.Slf4j -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.FLOATING_IPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.NETWORKS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SECURITY_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SERVER_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SUBNETS - -@Slf4j -@Component -class OpenstackLoadBalancerProvider implements LoadBalancerProvider { - - final String cloudProvider = OpenstackCloudProvider.ID - - final Cache cacheView - final ObjectMapper objectMapper - final OpenstackClusterProvider clusterProvider - - @Autowired - OpenstackLoadBalancerProvider( - final Cache cacheView, final ObjectMapper objectMapper, final OpenstackClusterProvider clusterProvider) { - this.cacheView = cacheView - this.objectMapper = objectMapper - this.clusterProvider = clusterProvider - } - - /** - * Find all load balancers associated with all clusters that are a part of the application. - * @param application - * @return - */ - @Override - Set getApplicationLoadBalancers(String application) { - //get all load balancers tied to this app (via their name) - Collection identifiers = cacheView.filterIdentifiers(LOAD_BALANCERS.ns, Keys.getLoadBalancerKey(application, '*', '*', '*')) - identifiers.addAll(cacheView.filterIdentifiers(LOAD_BALANCERS.ns, Keys.getLoadBalancerKey("$application-*", '*', '*', '*'))) - Collection data = cacheView.getAll(LOAD_BALANCERS.ns, identifiers, RelationshipCacheFilter.include(SERVER_GROUPS.ns, FLOATING_IPS.ns, NETWORKS.ns, SUBNETS.ns, SECURITY_GROUPS.ns)) - !data ? Sets.newHashSet() : data.collect(this.&fromCacheData) - } - - /** - * Get load balancer(s) by account, region, and id. - * @param account - * @param region - * @param id - * @return - */ - Set getLoadBalancers(String account, String region, String id) { - String pattern = Keys.getLoadBalancerKey('*', id, account, region) - Collection identifiers = cacheView.filterIdentifiers(LOAD_BALANCERS.ns, pattern) - Collection data = cacheView.getAll(LOAD_BALANCERS.ns, identifiers, RelationshipCacheFilter.include(SERVER_GROUPS.ns, FLOATING_IPS.ns, NETWORKS.ns, SUBNETS.ns, SECURITY_GROUPS.ns)) - !data ? Sets.newHashSet() : data.collect(this.&fromCacheData) - } - - /** - * Convert load balancer cache data to a load balancer domain item. - * @param cacheData - * @return - */ - OpenstackLoadBalancer.View fromCacheData(CacheData cacheData) { - //get relationship data - OpenstackFloatingIP ip = getRelationshipData(cacheData, FLOATING_IPS.ns, OpenstackFloatingIP) - OpenstackNetwork network = getRelationshipData(cacheData, NETWORKS.ns, OpenstackNetwork) - OpenstackSubnet subnet = getRelationshipData(cacheData, SUBNETS.ns, OpenstackSubnet) - Set securityGroups = cacheData.relationships[SECURITY_GROUPS.ns]?.collect { Keys.parse(it)?.id }?.toSet() - - //build load balancer - OpenstackLoadBalancer loadBalancer = objectMapper.convertValue(cacheData.attributes, OpenstackLoadBalancer) - loadBalancer.with { - it.floatingIP = ip - it.network = network - it.subnet = subnet - it.securityGroups = securityGroups ?: [].toSet() - } - - //build load balancer server groups - Set serverGroups = cacheData.relationships[SERVER_GROUPS.ns]?.findResults { key -> - LoadBalancerServerGroup loadBalancerServerGroup = null - ServerGroup serverGroup = clusterProvider.getServerGroup(loadBalancer.account, loadBalancer.region, Keys.parse(key)['serverGroup']) - if (serverGroup) { - loadBalancerServerGroup = new LoadBalancerServerGroup(name: serverGroup.name, isDisabled: serverGroup.isDisabled()) - loadBalancerServerGroup.instances = serverGroup.instances?.collect { instance -> - new LoadBalancerInstance(id: instance.name, health: [state: instance.healthState?.toString()]) - }?.toSet() - } - loadBalancerServerGroup - }?.toSet() - loadBalancer.serverGroups = serverGroups ?: [].toSet() - - //construct view - loadBalancer.view - } - - private T getRelationshipData(CacheData parent, String type, Class clazz) { - CacheData cacheData = cacheView.getAll(type, parent.relationships[type] ?: [])?.find() - objectMapper.convertValue(cacheData?.attributes, clazz) - } - - List list() { - def searchKey = Keys.getLoadBalancerKey('*', '*', '*', '*'); - Collection identifiers = cacheView.filterIdentifiers(LOAD_BALANCERS.ns, searchKey) - def result = getSummaryForLoadBalancers(identifiers).values() as List - result - } - - LoadBalancerProvider.Item get(String name) { - throw new UnsupportedOperationException("TODO: Support a single getter") - } - - List byAccountAndRegionAndName(String account, - String region, - String name) { - getLoadBalancers(account, region, name) as List - } - - private Map getSummaryForLoadBalancers(Collection loadBalancerKeys) { - Map map = [:] - Map loadBalancers = cacheView.getAll(LOAD_BALANCERS.ns, loadBalancerKeys, RelationshipCacheFilter.include(SERVER_GROUPS.ns, FLOATING_IPS.ns, NETWORKS.ns, SUBNETS.ns, SECURITY_GROUPS.ns)).collectEntries { [(it.id): it] } - - - for (lb in loadBalancerKeys) { - CacheData loadBalancerFromCache = loadBalancers[lb] - if (loadBalancerFromCache) { - def parts = Keys.parse(lb) - String name = parts.name - String region = parts.region - String account = parts.account - def summary = map.get(name) - if (!summary) { - summary = new OpenstackLoadBalancerSummary(name: name) - map.put name, summary - } - def loadBalancer = new OpenstackLoadBalancerDetail() - loadBalancer.account = parts.account - loadBalancer.region = parts.region - loadBalancer.name = parts.name - loadBalancer.id = parts.id - loadBalancer.securityGroups = loadBalancerFromCache.attributes.securityGroups - loadBalancer.loadBalancerType = parts.type - if (loadBalancer.loadBalancerType == null) { - loadBalancer.loadBalancerType = "classic" - } - - // Add target group list to the load balancer. At time of implementation, this is only used - // to get the list of available target groups to deploy a server group into. Since target - // groups only exist within load balancers (in clouddriver, Openstack allows them to exist - // independently), this was an easy way to get them into deck without creating a whole new - // provider type. - if (loadBalancerFromCache.relationships[TARGET_GROUPS.ns]) { - loadBalancer.targetGroups = loadBalancerFromCache.relationships[TARGET_GROUPS.ns].collect { - Keys.parse(it).targetGroup - } - } - - summary.getOrCreateAccount(account).getOrCreateRegion(region).loadBalancers << loadBalancer - } - } - map - } - - - // view models... - - static class OpenstackLoadBalancerSummary implements LoadBalancerProvider.Item { - private Map mappedAccounts = [:] - String name - - OpenstackLoadBalancerAccount getOrCreateAccount(String name) { - if (!mappedAccounts.containsKey(name)) { - mappedAccounts.put(name, new OpenstackLoadBalancerAccount(name: name)) - } - mappedAccounts[name] - } - - @JsonProperty("accounts") - List getByAccounts() { - mappedAccounts.values() as List - } - } - - static class OpenstackLoadBalancerAccount implements LoadBalancerProvider.ByAccount { - private Map mappedRegions = [:] - String name - - OpenstackLoadBalancerByRegion getOrCreateRegion(String name) { - if (!mappedRegions.containsKey(name)) { - mappedRegions.put(name, new OpenstackLoadBalancerByRegion(name: name, loadBalancers: [])) - } - mappedRegions[name] - } - - @JsonProperty("regions") - List getByRegions() { - mappedRegions.values() as List - } - } - - static class OpenstackLoadBalancerByRegion implements LoadBalancerProvider.ByRegion { - String name - List loadBalancers - } - - static class OpenstackLoadBalancerDetail implements LoadBalancerProvider.Details { - String account - String region - String name - String id - String type = 'openstack' - String loadBalancerType - List securityGroups = [] - List targetGroups = [] - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackNetworkProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackNetworkProvider.groovy deleted file mode 100644 index e93116b29f7..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackNetworkProvider.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.Sets -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.model.NetworkProvider -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackNetwork -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.NETWORKS - -@Component -class OpenstackNetworkProvider implements NetworkProvider { - - final Cache cacheView - final ObjectMapper objectMapper - - String cloudProvider = OpenstackCloudProvider.ID - - @Autowired - OpenstackNetworkProvider(final Cache cacheView, final ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Set getAll() { - Collection filters = cacheView.filterIdentifiers(NETWORKS.ns, Keys.getNetworkKey('*', '*', '*')) - Collection data = cacheView.getAll(NETWORKS.ns, filters, RelationshipCacheFilter.none()) - !data ? Sets.newHashSet() : data.collect(this.&fromCacheData) - } - - OpenstackNetwork fromCacheData(CacheData cacheData) { - objectMapper.convertValue(cacheData.attributes, OpenstackNetwork) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSecurityGroupProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSecurityGroupProvider.groovy deleted file mode 100644 index 0affc5f9627..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSecurityGroupProvider.groovy +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.model.SecurityGroupProvider -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSecurityGroup -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SECURITY_GROUPS - -/** - * Provides a view of existing Openstack security groups in all configured Openstack accounts. - */ -@Slf4j -@Component -class OpenstackSecurityGroupProvider implements SecurityGroupProvider { - - final String cloudProvider = OpenstackCloudProvider.ID - final Cache cacheView - final ObjectMapper objectMapper - - @Autowired - OpenstackSecurityGroupProvider(Cache cacheView, ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Set getAll(boolean includeRules) { - getAllMatchingKeyPattern(Keys.getSecurityGroupKey('*', '*', '*', '*'), includeRules) - } - - @Override - Set getAllByRegion(boolean includeRules, String region) { - getAllMatchingKeyPattern(Keys.getSecurityGroupKey('*', '*', '*', region), includeRules) - } - - @Override - Set getAllByAccount(boolean includeRules, String account) { - getAllMatchingKeyPattern(Keys.getSecurityGroupKey('*', '*', account, '*'), includeRules) - } - - @Override - Set getAllByAccountAndName(boolean includeRules, String account, String name) { - getAllMatchingKeyPattern(Keys.getSecurityGroupKey(name, '*', account, '*'), includeRules) - } - - @Override - Set getAllByAccountAndRegion(boolean includeRules, String account, String region) { - getAllMatchingKeyPattern(Keys.getSecurityGroupKey('*', '*', account, region), includeRules) - } - - @Override - OpenstackSecurityGroup get(String account, String region, String name, String vpcId) { - getAllMatchingKeyPattern(Keys.getSecurityGroupKey(name, '*', account, region), true)[0] - } - - private Set getAllMatchingKeyPattern(String pattern, boolean includeRules) { - loadResults(includeRules, cacheView.filterIdentifiers(SECURITY_GROUPS.ns, pattern)) - } - - private Set loadResults(boolean includeRules, Collection identifiers) { - Closure handleRules = includeRules ? {x -> x} : this.&stripRules - Collection data = cacheView.getAll(SECURITY_GROUPS.ns, identifiers, RelationshipCacheFilter.none()) - data.collect(this.&fromCacheData).collect(handleRules) - } - - private OpenstackSecurityGroup fromCacheData(CacheData cacheData) { - objectMapper.convertValue(cacheData.attributes, OpenstackSecurityGroup) - } - - private OpenstackSecurityGroup stripRules(OpenstackSecurityGroup securityGroup) { - new OpenstackSecurityGroup(id: securityGroup.id, - accountName: securityGroup.accountName, - region: securityGroup.region, - name: securityGroup.name, - description: securityGroup.description, - inboundRules: [] - ) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSubnetProvider.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSubnetProvider.groovy deleted file mode 100644 index 29084449565..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSubnetProvider.groovy +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.Sets -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.model.SubnetProvider -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSubnet -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SUBNETS - -@Component -class OpenstackSubnetProvider implements SubnetProvider { - final Cache cacheView - final ObjectMapper objectMapper - - final String cloudProvider = OpenstackCloudProvider.ID - - @Autowired - OpenstackSubnetProvider(final Cache cacheView, final ObjectMapper objectMapper) { - this.cacheView = cacheView - this.objectMapper = objectMapper - } - - @Override - Set getAll() { - Collection filters = cacheView.filterIdentifiers(SUBNETS.ns, Keys.getSubnetKey('*', '*', '*')) - Collection data = cacheView.getAll(SUBNETS.ns, filters, RelationshipCacheFilter.none()) - !data ? Sets.newHashSet() : data.collect(this.&fromCacheData) - } - - OpenstackSubnet fromCacheData(CacheData cacheData) { - objectMapper.convertValue(cacheData.attributes, OpenstackSubnet) - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackCredentials.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackCredentials.groovy deleted file mode 100644 index 136c8012f45..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackCredentials.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.security - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.OpenstackUserDataProvider - -public class OpenstackCredentials { - - final OpenstackClientProvider provider - final OpenstackNamedAccountCredentials credentials - final OpenstackUserDataProvider userDataProvider - - OpenstackCredentials(OpenstackNamedAccountCredentials accountCredentials) { - this.provider = OpenstackProviderFactory.createProvider(accountCredentials) - this.credentials = accountCredentials - this.userDataProvider = new OpenstackUserDataProvider(this.credentials) - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackCredentialsInitializer.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackCredentialsInitializer.groovy deleted file mode 100644 index 7860ac4a7fb..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackCredentialsInitializer.groovy +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.security - -import com.netflix.spinnaker.cats.module.CatsModule -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable -import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.apache.log4j.Logger -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.context.ApplicationContext -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope -import org.springframework.stereotype.Component - -@Component -@Configuration -class OpenstackCredentialsInitializer implements CredentialsInitializerSynchronizable { - private static final Logger LOG = Logger.getLogger(this.class.simpleName) - - @Bean - List openstackNamedAccountCredentials(OpenstackConfigurationProperties openstackConfigurationProperties, - AccountCredentialsRepository accountCredentialsRepository, - ApplicationContext applicationContext, - List providerSynchronizerTypeWrappers) { - synchronizeOpenstackAccounts(openstackConfigurationProperties, accountCredentialsRepository, null, applicationContext, providerSynchronizerTypeWrappers) - } - - @Override - String getCredentialsSynchronizationBeanName() { - return "synchronizeOpenstackAccounts" - } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - List synchronizeOpenstackAccounts(OpenstackConfigurationProperties openstackConfigurationProperties, - AccountCredentialsRepository accountCredentialsRepository, - CatsModule catsModule, - ApplicationContext applicationContext, - List providerSynchronizerTypeWrappers) { - def (ArrayList accountsToAdd, List namesOfDeletedAccounts) = - ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, - OpenstackNamedAccountCredentials, - openstackConfigurationProperties.accounts) - - accountsToAdd.each { OpenstackConfigurationProperties.ManagedAccount managedAccount -> - LOG.info("Found openstack managed account $managedAccount") - try { - def openstackAccount = new OpenstackNamedAccountCredentials.Builder() - .name(managedAccount.name) - .environment(managedAccount.environment ?: managedAccount.name) - .accountType(managedAccount.accountType ?: managedAccount.name) - .username(managedAccount.username) - .password(managedAccount.password) - .projectName(managedAccount.projectName) - .domainName(managedAccount.domainName) - .authUrl(managedAccount.authUrl) - .regions(managedAccount.regions) - .insecure(managedAccount.insecure) - .heatTemplateLocation(managedAccount.heatTemplatePath) - .lbaasConfig(managedAccount.lbaas) - .stackConfig(managedAccount.stack) - .consulConfig(managedAccount.consul) - .userDataFile(managedAccount.userDataFile) - .build() - LOG.info("Saving openstack account $openstackAccount") - accountCredentialsRepository.save(managedAccount.name, openstackAccount) - } catch (e) { - LOG.info "Could not load account ${managedAccount.name} for Openstack.", e - } - } - ProviderUtils.unscheduleAndDeregisterAgents(namesOfDeletedAccounts, catsModule) - - if ((namesOfDeletedAccounts || accountsToAdd) && catsModule) { - ProviderUtils.synchronizeAgentProviders(applicationContext, providerSynchronizerTypeWrappers) - } - - accountCredentialsRepository.all.findAll { - it instanceof OpenstackNamedAccountCredentials - } as List - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackNamedAccountCredentials.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackNamedAccountCredentials.groovy deleted file mode 100644 index 6099a1779af..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackNamedAccountCredentials.groovy +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.security - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.LbaasConfig -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.StackConfig -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import groovy.transform.ToString - -@ToString(includeNames = true, excludes = "password") -class OpenstackNamedAccountCredentials implements AccountCredentials { - static final String CLOUD_PROVIDER = "openstack" - final String name - final String environment - final String accountType - final String username - @JsonIgnore - final String password - final String projectName - final String domainName - final String authUrl - final List requiredGroupMembership - final OpenstackCredentials credentials - List regions - final Boolean insecure - final String heatTemplateLocation - final LbaasConfig lbaasConfig - final StackConfig stackConfig - final ConsulConfig consulConfig - final String userDataFile - Map> regionToZones - - - - OpenstackNamedAccountCredentials(String accountName, - String environment, - String accountType, - String username, - String password, - String projectName, - String domainName, - String authUrl, - List regions, - Boolean insecure, - String heatTemplateLocation, - LbaasConfig lbaasConfig, - StackConfig stackConfig, - ConsulConfig consulConfig, - String userDataFile) { - this(accountName, environment, accountType, username, password, null, projectName, domainName, authUrl, regions, insecure, heatTemplateLocation, lbaasConfig, stackConfig, consulConfig, userDataFile) - } - - // Explicit getter so that we can mock - LbaasConfig getLbaasConfig() { - return lbaasConfig - } - - // Explicit getter so that we can mock - StackConfig getStackConfig() { - return stackConfig - } - - OpenstackNamedAccountCredentials(String accountName, - String environment, - String accountType, - String username, - String password, - List requiredGroupMembership, - String projectName, - String domainName, - String authUrl, - List regions, - Boolean insecure, - String heatTemplateLocation, - LbaasConfig lbaasConfig, - StackConfig stackConfig, - ConsulConfig consulConfig, - String userDataFile) { - this.name = accountName - this.environment = environment - this.accountType = accountType - this.username = username - this.password = password - this.projectName = projectName - this.domainName = domainName - this.authUrl = authUrl - this.requiredGroupMembership = requiredGroupMembership - this.regions = regions - this.insecure = insecure - this.heatTemplateLocation = heatTemplateLocation - this.lbaasConfig = lbaasConfig - this.stackConfig = stackConfig - this.consulConfig = consulConfig - this.userDataFile = userDataFile - if (this.consulConfig?.enabled) { - this.consulConfig.applyDefaults() - } - this.credentials = buildCredentials() - } - - private OpenstackCredentials buildCredentials() { - new OpenstackCredentials(this) - } - - static class Builder { - String name - String environment - String accountType - String username - String password - String projectName - String domainName - String authUrl - List requiredGroupMembership - OpenstackCredentials credentials - List regions - Boolean insecure - String heatTemplateLocation - LbaasConfig lbaasConfig - StackConfig stackConfig - ConsulConfig consulConfig - String userDataFile - - Builder() {} - - Builder name(String name) { - this.name = name - return this - } - - Builder environment(String environment) { - this.environment = environment - return this - } - - Builder accountType(String accountType) { - this.accountType = accountType - return this - } - - Builder username(String username) { - this.username = username - return this - } - - Builder password(String password) { - this.password = password - return this - } - - Builder projectName(String projectName) { - this.projectName = projectName - return this - } - - Builder domainName(String domainName) { - this.domainName = domainName - return this - } - - Builder authUrl(String authUrl) { - this.authUrl = authUrl - return this - } - - Builder requiredGroupMembership(List requiredGroupMembership) { - this.requiredGroupMembership = requiredGroupMembership - return this - } - - Builder credentials(OpenstackCredentials credentials) { - this.credentials = credentials - return this - } - - Builder regions(List regions) { - this.regions = regions - return this - } - - Builder insecure(Boolean insecure) { - this.insecure = insecure - return this - } - - Builder heatTemplateLocation(String heatTemplateLocation) { - this.heatTemplateLocation = heatTemplateLocation - return this - } - - Builder lbaasConfig(LbaasConfig lbaasConfig) { - this.lbaasConfig = lbaasConfig - return this - } - - Builder stackConfig(StackConfig stackConfig) { - this.stackConfig = stackConfig - return this - } - - Builder consulConfig(ConsulConfig consulConfig) { - this.consulConfig = consulConfig - return this - } - - Builder userDataFile(String userDataFile) { - this.userDataFile = userDataFile - return this - } - - public OpenstackNamedAccountCredentials build() { - def account = new OpenstackNamedAccountCredentials(name, - environment, - accountType, - username, - password, - projectName, - domainName, - authUrl, - regions, - insecure, - heatTemplateLocation, - lbaasConfig, - stackConfig, - consulConfig, - userDataFile) - def provider = account.credentials.provider - def regionToZoneMap = regions.collectEntries { region -> - [(region): provider.getZones(region).findAll { zone -> zone.zoneState.available }.collect { zone -> zone.zoneName}] - } - account.regionToZones = regionToZoneMap - return account - } - } - - @Override - String getCloudProvider() { - CLOUD_PROVIDER - } - - /** - * Note: this is needed because there is an interface method of this name that should be called - * in lieu of the synthetic getter for the credentials instance variable. - * @return - */ - @Override - OpenstackCredentials getCredentials() { - credentials - } - -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/task/TaskStatusAware.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/task/TaskStatusAware.groovy deleted file mode 100644 index 99aa444fa9b..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/task/TaskStatusAware.groovy +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.openstack.task - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository - -/** - * TODO - Refeactor operations to use trait and remove boilerplate logic. - */ -trait TaskStatusAware { - final String UPSERT_LOADBALANCER_PHASE = 'UPSERT_LOAD_BALANCER' - - Task getTask() { - TaskRepository.threadLocalTask.get() - } -} diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/utils/DateUtils.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/utils/DateUtils.groovy deleted file mode 100644 index 3e588d64de5..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/clouddriver/openstack/utils/DateUtils.groovy +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.utils - -import groovy.util.logging.Slf4j - -import java.time.LocalDateTime -import java.time.ZoneId -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter -import java.time.format.DateTimeParseException - -@Slf4j -class DateUtils { - - /** - * Parses a date time string. - * - * It tries the following time formats: - * - * ISO_LOCAL_DATE_TIME - * ISO_OFFSET_DATE_TIME - * - * @param time the date time string to parse - * @param defaultTime a default time to use if the given date time is null, defaults to Now - * @return a parsed date time object - */ - static ZonedDateTime parseZonedDateTime(String time, ZonedDateTime defaultTime = null) { - - if (time) { - // Try a couple formats because OpenStack keeps change formats. Sigh. - - try { - // For date time strings that are the local time without a timezone - return LocalDateTime.parse(time, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) - } catch (DateTimeParseException e) { - log.info("Failed to parse datetime ${time} as ISO_LOCAL_DATE_TIME; ${e.message}") - } - - try { - // For date time strings that include an offset (or Z which is no offset) - return ZonedDateTime.parse(time, DateTimeFormatter.ISO_OFFSET_DATE_TIME) - } catch (DateTimeParseException e) { - log.info("Failed to parse datetime ${time} as ISO_OFFSET_DATE_TIME") - - // This is the last attempt, rethrow the exception - throw(e) - } - } else { - return defaultTime ?: ZonedDateTime.now() - } - - } -} - diff --git a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/config/OpenstackConfiguration.groovy b/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/config/OpenstackConfiguration.groovy deleted file mode 100644 index 94ef44d014d..00000000000 --- a/clouddriver-openstack/src/main/groovy/com/netflix/spinnaker/config/OpenstackConfiguration.groovy +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.config - -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.health.OpenstackHealthIndicator -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentialsInitializer -import org.springframework.beans.factory.config.ConfigurableBeanFactory -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.ConfigurationProperties -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.ComponentScan -import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Import -import org.springframework.context.annotation.Scope -import org.springframework.scheduling.annotation.EnableScheduling - - -@Configuration -@EnableConfigurationProperties -@EnableScheduling -@ConditionalOnProperty('openstack.enabled') -@ComponentScan(["com.netflix.spinnaker.clouddriver.openstack"]) -@Import([ OpenstackCredentialsInitializer ]) -class OpenstackConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - @ConfigurationProperties("openstack") - OpenstackConfigurationProperties openstackConfigurationProperties() { - new OpenstackConfigurationProperties() - } - - @Bean - OpenstackHealthIndicator openstackHealthIndicator() { - new OpenstackHealthIndicator() - } -} diff --git a/clouddriver-openstack/src/main/resources/servergroup.yaml b/clouddriver-openstack/src/main/resources/servergroup.yaml deleted file mode 100644 index 5e4b042bc76..00000000000 --- a/clouddriver-openstack/src/main/resources/servergroup.yaml +++ /dev/null @@ -1,184 +0,0 @@ -heat_template_version: 2016-04-08 -description: Auto scaling group for Spinnaker -parameters: - flavor: - type: string - description: Flavor used by the web servers - image: - type: string - description: Image used for servers - max_size: - type: number - description: Maximum cluster size - min_size: - type: number - description: Minimum cluster size - desired_size: - type: number - description: Desired cluster size - network_id: - type: string - description: Network used by the servers. Retained for auditing purposes. - load_balancers: - type: comma_delimited_list - description: Comma-separated string of load balancers to associate to the stack. This is not used in the stack and is defined for auditing purposes. - default: [] - zones: - type: comma_delimited_list - description: Comma-separated string of availability zones - default: [] - security_groups: - type: comma_delimited_list - description: Comma-separated string of security groups to use - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server - autoscaling_type: - type: string - description: Type of autoscaling to perform. can be cpu_util, network.incoming.bytes.rate, or network.outgoing.bytes.rate - default: cpu_util - scaleup_cooldown: - type: number - description: Minimum amount of time (in seconds) between scaleup operations - default: 60 - scaleup_adjustment: - type: number - description: Amount by which to change the instance count. Must be positive - default: 1 - scaleup_period: - type: number - description: Amount of time (in seconds) before the scaleup action is taken - default: 60 - scaleup_threshold: - type: number - description: Threshold that causes the scaleup action to occur, if held for scaleup_period seconds - default: 50 - scaledown_cooldown: - type: number - description: Minimum amount of time (in seconds) between scaledown operations - default: 60 - scaledown_adjustment: - type: number - description: Amount by which to change the instance count. Must be negative - default: -1 - scaledown_period: - type: number - description: Amount of time (in seconds) before the scaledown action is taken - default: 600 - scaledown_threshold: - type: number - description: Threshold that causes the scaledown action to occur, if held for scaledown_period seconds - default: 15 - source_user_data_type: - type: string - description: The source user data type (Swift, URL, Text), retained for auditing purposes - default: "" - source_user_data: - type: string - description: The unencoded source user data, retained for auditing purposes - default: "" - tags: - type: json - description: Map of key-value pairs to store in instance metadata - default: {} - user_data: - type: string - description: Raw base64-encoded string that will execute upon server boot, if cloud-init is installed - default: "" - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - servergroup: - type: OS::Heat::AutoScalingGroup - properties: - min_size: {get_param: min_size} - max_size: {get_param: max_size} - desired_capacity: {get_param: desired_size} - resource: - type: servergroup_resource.yaml - properties: - flavor: {get_param: flavor} - image: {get_param: image} - # metering.stack is used by ceilometer to autoscale against instances that are part of this stack - # the others are user-specified - metadata: - map_merge: - - {"metering.stack": {get_param: "OS::stack_id"}} - - {"metering.stack.name": {get_param: "OS::stack_name"}} - - {get_param: tags} - network_id: {get_param: network_id} - security_groups: {get_param: security_groups} - subnet_id: {get_param: subnet_id} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - web_server_scaleup_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: servergroup} - cooldown: {get_param: scaleup_cooldown} - scaling_adjustment: {get_param: scaleup_adjustment} - web_server_scaledown_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: servergroup} - cooldown: {get_param: scaledown_cooldown} - scaling_adjustment: {get_param: scaledown_adjustment} - meter_alarm_high: - type: OS::Ceilometer::Alarm - properties: - description: Scale up if the average meter_name > scaleup_threshold for scaleup_period seconds - meter_name: {get_param: autoscaling_type} - statistic: avg - period: {get_param: scaleup_period} - evaluation_periods: 1 - threshold: {get_param: scaleup_threshold} - alarm_actions: - - {get_attr: [web_server_scaleup_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: gt - meter_alarm_low: - type: OS::Ceilometer::Alarm - properties: - description: Scale up if the average meter_name < scaledown_threshold for scaledown_period seconds - meter_name: {get_param: autoscaling_type} - statistic: avg - period: {get_param: scaledown_period} - evaluation_periods: 1 - threshold: {get_param: scaledown_threshold} - alarm_actions: - - {get_attr: [web_server_scaledown_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: lt -outputs: - OS::stack_id: - value: {get_resource: servergroup} - # we need to store subtemplate in servergroup output from create, as it is required to do an update and there is no native way - # of obtaining it from a stack - servergroup_resource: - description: servergroup_resource.yaml template value - value: {get_file: servergroup_resource.yaml } - # we need to store subtemplate in servergroup output from create, as it is required to do an update and there is no native way - # of obtaining it from a stack - servergroup_resource_member: - description: servergroup_resource_member.yaml template value - value: {get_file: servergroup_resource_member.yaml} - scale_up_url: - description: > - This URL is the webhook to scale up the autoscaling group. You - can invoke the scale-up operation by doing an HTTP POST to this - URL; no body nor extra headers are needed. - value: {get_attr: [web_server_scaleup_policy, alarm_url]} - scale_dn_url: - description: > - This URL is the webhook to scale down the autoscaling group. - You can invoke the scale-down operation by doing an HTTP POST to - this URL; no body nor extra headers are needed. - value: {get_attr: [web_server_scaledown_policy, alarm_url]} - servergroup_size: - description: > - This is the current size of the auto scaling group. - value: {get_attr: [servergroup, current_size]} diff --git a/clouddriver-openstack/src/main/resources/servergroup_resource.yaml b/clouddriver-openstack/src/main/resources/servergroup_resource.yaml deleted file mode 100644 index cb914ee1996..00000000000 --- a/clouddriver-openstack/src/main/resources/servergroup_resource.yaml +++ /dev/null @@ -1,56 +0,0 @@ -heat_template_version: 2016-04-08 -description: A load balanced server for Spinnaker. -parameters: - flavor: - type: string - description: Flavor used by the servers. - image: - type: string - description: Image used for servers. - metadata: - type: json - description: Server instance metadata. - network_id: - type: string - description: Network used by each server. Retained for auditing purposes. - security_groups: - type: comma_delimited_list - description: Security groups associate to each server. - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server. - user_data: - type: string - description: String that will execute upon server boot, if cloud-init is installed. - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - server: - type: OS::Nova::Server - properties: - flavor: {get_param: flavor} - image: {get_param: image} - metadata: {get_param: metadata} - networks: - - subnet: {get_param: subnet_id} - security_groups: {get_param: security_groups} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - user_data_format: RAW - member: - type: OS::Heat::ResourceGroup - properties: - resource_def: - # this is dynamically generated to associate a load balancer pool member from each listener to each server - type: servergroup_resource_member.yaml - properties: - address: {get_attr: [server, first_address]} -outputs: - server_ip: - description: IP Address of the load-balanced server - value: { get_attr: [server, first_address] } - lb_member: - description: LB member details - value: { get_attr: [member, show] } diff --git a/clouddriver-openstack/src/main/resources/servergroup_server.yaml b/clouddriver-openstack/src/main/resources/servergroup_server.yaml deleted file mode 100644 index 970ca798fb4..00000000000 --- a/clouddriver-openstack/src/main/resources/servergroup_server.yaml +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: 2016-04-08 -description: An auto-scaled server for Spinnaker without any load balancer association. -parameters: - flavor: - type: string - description: Flavor used by the servers. - image: - type: string - description: Image used for servers. - metadata: - type: json - description: Server instance metadata. - network_id: - type: string - description: Network used by each server. Retained for auditing purposes. - security_groups: - type: comma_delimited_list - description: Security groups associate to each server. - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server. - user_data: - type: string - description: Raw base64-encoded string that will execute upon server boot, if cloud-init is installed. - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - server: - type: OS::Nova::Server - properties: - flavor: {get_param: flavor} - image: {get_param: image} - metadata: {get_param: metadata} - networks: - - subnet: {get_param: subnet_id} - security_groups: {get_param: security_groups} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - user_data_format: RAW - -outputs: - server_ip: - description: IP Address of the load-balanced server - value: { get_attr: [server, first_address] } diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackCloudProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackCloudProviderSpec.groovy deleted file mode 100644 index 37432ef257c..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/OpenstackCloudProviderSpec.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack - -import spock.lang.Specification - -class OpenstackCloudProviderSpec extends Specification { - def openstackCloudProvider - - def setup() { - openstackCloudProvider = new OpenstackCloudProvider() - } - - def "Testing default values of OpenstackCloudProvider"() { - expect: - openstackCloudProvider.id == "openstack" - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/KeysSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/KeysSpec.groovy deleted file mode 100644 index b04d5c16f2c..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/KeysSpec.groovy +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.cache - -import spock.lang.Specification -import spock.lang.Unroll - -import static com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider.ID -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.FLOATING_IPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.NETWORKS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.PORTS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SUBNETS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.CLUSTERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SERVER_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SECURITY_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.VIPS - -@Unroll -class KeysSpec extends Specification { - - void "test parse key format - #testCase"() { - when: - Map result = Keys.parse(value) - - then: - result == expected - - where: - testCase | value | expected - 'no delimiter' | 'test' | null - 'less than 5 parts' | 'test:test' | null - 'more than 5 parts' | 't:t:t:t:t:t' | null - } - - void "test invalid parts - #testCase"() { - when: - Map result = Keys.parse(value) - - then: - result == expected - - where: - testCase | value | expected - 'provider' | 'openstackprovider' | null - 'namespace' | 'stuff' | null - } - - void "test instance map"() { - given: - String instanceId = 'testInstance' - String account = 'testAccount' - String region = 'testRegion' - String key = Keys.getInstanceKey(instanceId, account, region) - - when: - Map result = Keys.parse(key) - - then: - result == [account: account, region: region, instanceId: instanceId, provider: ID, type: INSTANCES.ns] - } - - void "test application map"() { - given: - String application = 'application' - String key = Keys.getApplicationKey(application) - - when: - Map result = Keys.parse(key) - - then: - result == [application: application, provider: ID, type: APPLICATIONS.ns] - } - - void "test cluster map"() { - given: - String application = 'myapp' - String stack = 'stack' - String detail = 'detail' - String cluster = "$application-$stack-$detail-v000" - String account = 'account' - String key = Keys.getClusterKey(account, application, cluster) - - when: - Map result = Keys.parse(key) - - then: - result == [application: application, account: account, cluster: cluster, stack: stack, detail: detail, provider: ID, type: CLUSTERS.ns] - } - - void "test subnet map"() { - given: - String subnetId = UUID.randomUUID().toString() - String region = 'region' - String account = 'account' - String subnetKey = Keys.getSubnetKey(subnetId, account, region) - - when: - Map result = Keys.parse(subnetKey) - - then: - result == [region: region, id: subnetId, account: account, provider: ID, type: SUBNETS.ns] - } - - void "test get instance key"() { - given: - String instanceId = UUID.randomUUID().toString() - String account = 'account' - String region = 'region' - - when: - String result = Keys.getInstanceKey(instanceId, account, region) - - then: - result == "${ID}:${INSTANCES}:${account}:${region}:${instanceId}" as String - } - - void "test get application key"() { - given: - String application = 'application' - - when: - String result = Keys.getApplicationKey(application) - - then: - result == "${ID}:${APPLICATIONS}:${application}" as String - } - - void "test get server group key"() { - given: - String cluster = 'myapp-teststack' - String serverGroupName = "$cluster-v000" - String account = 'account' - String region = 'region' - - when: - String result = Keys.getServerGroupKey(serverGroupName, account, region) - - then: - result == "${ID}:${SERVER_GROUPS}:${cluster}:${account}:${region}:${serverGroupName}" as String - } - - void "test get server group key by cluster and server group name"() { - given: - String cluster = 'myapp-teststack' - String serverGroupName = "$cluster-v000" - String account = 'account' - String region = 'region' - - when: - String result = Keys.getServerGroupKey(cluster, serverGroupName, account, region) - - then: - result == "${ID}:${SERVER_GROUPS}:${cluster}:${account}:${region}:${serverGroupName}" as String - } - - void "test get cluster key"() { - given: - String application = 'myapp' - String cluster = 'cluster' - String account = 'account' - - when: - String result = Keys.getClusterKey(account, application, cluster) - - then: - result == "${ID}:${CLUSTERS}:${account}:${application}:${cluster}" as String - } - - void "test get subnet key"() { - given: - String subnetId = UUID.randomUUID().toString() - String region = 'region' - String account = 'account' - - when: - String result = Keys.getSubnetKey(subnetId, account, region) - - then: - result == "${ID}:${SUBNETS}:${account}:${region}:${subnetId}" as String - } - - void "test get network key"() { - given: - String networkId = UUID.randomUUID().toString() - String region = 'region' - String account = 'account' - - when: - String result = Keys.getNetworkKey(networkId, account, region) - - then: - result == "${ID}:${NETWORKS}:${account}:${region}:${networkId}" as String - } - - def "test get security group key"() { - given: - def id = UUID.randomUUID().toString() - def name = 'name' - def region = 'region' - def account = 'account' - - when: - def result = Keys.getSecurityGroupKey(name, id, account, region) - - then: - result == "${ID}:${SECURITY_GROUPS}:${name}:${id}:${region}:${account}" as String - } - - def "test security group map"() { - given: - def id = UUID.randomUUID().toString() - def name = 'name' - def region = 'region' - def account = 'account' - def key = Keys.getSecurityGroupKey(name, id, account, region) - - when: - Map result = Keys.parse(key) - - then: - result == [application: name, account: account, region: region, id: id, name: name, provider: ID, type: SECURITY_GROUPS.ns] - } - - void "test get lb key"() { - given: - String lbId = UUID.randomUUID().toString() - String lbName = 'myapp-lb' - String region = 'region' - String account = 'account' - - when: - String result = Keys.getLoadBalancerKey(lbName, lbId, account, region) - - then: - result == "${ID}:${LOAD_BALANCERS}:${account}:${region}:${lbId}:${lbName}" as String - } - - void "test get vip key"() { - given: - String vipId = UUID.randomUUID().toString() - String region = 'region' - String account = 'account' - - when: - String result = Keys.getVipKey(vipId, account, region) - - then: - result == "${ID}:${VIPS}:${account}:${region}:${vipId}" as String - } - - void "test get ip key"() { - given: - String ipId = UUID.randomUUID().toString() - String region = 'region' - String account = 'account' - - when: - String result = Keys.getFloatingIPKey(ipId, account, region) - - then: - result == "${ID}:${FLOATING_IPS}:${account}:${region}:${ipId}" as String - } - - void "test get port key"() { - given: - String portId = UUID.randomUUID().toString() - String region = 'region' - String account = 'account' - - when: - String result = Keys.getPortKey(portId, account, region) - - then: - result == "${ID}:${PORTS}:${account}:${region}:${portId}" as String - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OnDemandAwareSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OnDemandAwareSpec.groovy deleted file mode 100644 index 57e0ad94733..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OnDemandAwareSpec.groovy +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.netflix.spinnaker.clouddriver.openstack.cache - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spectator.api.Timer -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.openstack.provider.agent.AbstractOpenstackCachingAgent -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -import static com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandType.LoadBalancer -import static com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider.getID -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.ON_DEMAND - -@Unroll -class OnDemandAwareSpec extends Specification { - - OnDemandAware onDemandAware - ObjectMapper objectMapper - OpenstackNamedAccountCredentials namedAccountCredentials - - @Shared - String region = 'region' - @Shared - String account = 'account' - - void 'setup'() { - namedAccountCredentials = Mock(OpenstackNamedAccountCredentials) - objectMapper = new ObjectMapper() - onDemandAware = new DefaultOnDemandAware(namedAccountCredentials, region) - } - - void 'should use on demand data empty'() { - given: - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(startTime: Long.MAX_VALUE) - String serverGroupKey = UUID.randomUUID().toString() - - when: - boolean result = onDemandAware.shouldUseOnDemandData(cacheResultBuilder, serverGroupKey) - - then: - !result - } - - void 'should use on demand data - #testCase'() { - given: - String serverGroupKey = UUID.randomUUID().toString() - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(startTime: 5) - cacheResultBuilder.onDemand.toKeep[serverGroupKey] = new DefaultCacheData('id', attributes, [:]) - - when: - boolean result = onDemandAware.shouldUseOnDemandData(cacheResultBuilder, serverGroupKey) - - then: - result == expectedResult - - where: - testCase | attributes | expectedResult - 'cache time greater' | [cacheTime: 6] | true - 'cache time equal' | [cacheTime: 5] | true - 'cache time less than' | [cacheTime: 4] | false - } - - void 'move on demand data to namespace'() { - given: - String serverGroupKey = UUID.randomUUID().toString() - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder() - Map cacheData = ['test': [[id: serverGroupKey, attributes: ['key1': 'value1'], relationships: ['key2': ['value2']]]]] - String cacheDataString = objectMapper.writeValueAsString(cacheData) - cacheResultBuilder.onDemand.toKeep[serverGroupKey] = [attributes: [cacheResults: cacheDataString]] - - when: - onDemandAware.moveOnDemandDataToNamespace(objectMapper, onDemandAware.typeReference, cacheResultBuilder, serverGroupKey) - - then: - cacheResultBuilder.onDemand.toKeep[serverGroupKey] == null - cacheResultBuilder.namespace('test').keep(serverGroupKey).id == serverGroupKey - cacheResultBuilder.namespace('test').keep(serverGroupKey).attributes == cacheData['test'].first().attributes - cacheResultBuilder.namespace('test').keep(serverGroupKey).relationships == cacheData['test'].first().relationships - } - - void 'get all on demand cache by region and account'() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String lbKey = Keys.getLoadBalancerKey('name', 'id', account, region) - Collection keys = [lbKey] - Map attributes = [cacheTime: System.currentTimeMillis(), processedCount: 10, processedTime: System.currentTimeMillis()] - CacheData cacheData = new DefaultCacheData(lbKey, attributes, [:]) - - when: - Collection result = onDemandAware.getAllOnDemandCacheByRegionAndAccount(providerCache, account, region) - - then: - 1 * providerCache.getIdentifiers(ON_DEMAND.ns) >> keys - 1 * providerCache.getAll(ON_DEMAND.ns, keys) >> [cacheData] - - and: - result == [[details: Keys.parse(lbKey), cacheTime: attributes.cacheTime, processedCount: attributes.processedCount, processedTime: attributes.processedTime]] - } - - void 'get all on demand cache by region and account - empty'() { - given: - ProviderCache providerCache = Mock(ProviderCache) - - when: - Collection result = onDemandAware.getAllOnDemandCacheByRegionAndAccount(providerCache, account, region) - - then: - 1 * providerCache.getIdentifiers(ON_DEMAND.ns) >> [] - 1 * providerCache.getAll(ON_DEMAND.ns, []) >> [] - - and: - result == [] - } - - void 'build On Demand Cache - #testCase'() { - given: - Object object = Mock(Object) - String onDemandType = 'type' - CacheResult cacheResult = Mock(CacheResult) - String namespace = 'namespace' - - when: - OnDemandAgent.OnDemandResult result = onDemandAware.buildOnDemandCache(object, onDemandType, cacheResult, namespace, key) - - then: - result.sourceAgentType == onDemandType - result.cacheResult == cacheResult - result.evictions.isEmpty() - - where: - testCase | key - 'with key' | 'key' - 'without key' | null - } - - void 'build On Demand Cache with evictions'() { - given: - Object object = null - String onDemandType = 'type' - CacheResult cacheResult = Mock(CacheResult) - String namespace = 'namespace' - String key = 'key' - - when: - OnDemandAgent.OnDemandResult result = onDemandAware.buildOnDemandCache(object, onDemandType, cacheResult, namespace, key) - - then: - result.sourceAgentType == onDemandType - result.cacheResult == cacheResult - result.evictions[namespace] == [key] - } - - void 'resolve key - #testCase'() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String namespace = 'namespace' - - when: - String result = onDemandAware.resolveKey(providerCache, namespace, key) - - then: - calls * providerCache.filterIdentifiers(namespace, key) >> lookupKeys - - and: - result == expected - noExceptionThrown() - - where: - testCase | key | calls | lookupKeys | expected - 'no asterik' | 'key' | 0 | [] | 'key' - 'asterik - one result' | 'key*' | 1 | ['keykey'] | 'keykey' - } - - void 'resolve key exception - #testCase'() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String namespace = 'namespace' - - when: - onDemandAware.resolveKey(providerCache, namespace, key) - - then: - calls * providerCache.filterIdentifiers(namespace, key) >> lookupKeys - - and: - thrown(UnresolvableKeyException) - - where: - testCase | key | calls | lookupKeys - 'asterik - empty' | 'key*' | 1 | [] - 'asterik - multiple' | 'key*' | 1 | ['keya', 'keyb'] - } - - void 'process on demand cache - evict deleted items'() { - given: - CacheResult cacheResult = Mock(CacheResult) - OnDemandMetricsSupport onDemandMetricsSupport = Mock(OnDemandMetricsSupport) - ProviderCache providerCache = Mock(ProviderCache) - String key = 'key' - - when: - onDemandAware.processOnDemandCache(cacheResult, objectMapper, onDemandMetricsSupport, providerCache, key) - - then: - 1 * cacheResult.cacheResults >> [:] - 1 * providerCache.evictDeletedItems(ON_DEMAND.ns, [key]) - } - - void 'process on demand cache - put cache data'() { - given: - CacheResult cacheResult = Mock(CacheResult) - Registry registry = Stub(Registry) { - timer(_, _) >> Mock(Timer) - } - - OnDemandMetricsSupport onDemandMetricsSupport = new OnDemandMetricsSupport(registry, onDemandAware, "${ID}:${LoadBalancer}") - ProviderCache providerCache = Mock(ProviderCache) - String key = 'key' - CacheData cacheData = new DefaultCacheData('id', [:], [:]) - Map> results = ['test': [cacheData]] - - when: - onDemandAware.processOnDemandCache(cacheResult, objectMapper, onDemandMetricsSupport, providerCache, key) - - then: - _ * cacheResult.cacheResults >> results - 1 * providerCache.putCacheData(ON_DEMAND.ns, _) - - } - - void 'build Load Data Cache - keep cache'() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String key = 'key' - List keys = [key] - Closure closure = { CacheResultBuilder builder -> builder.build() } - CacheData cacheData = Mock(CacheData) - Map attributes = Mock(Map) - - when: - CacheResult result = onDemandAware.buildLoadDataCache(providerCache, keys, closure) - - then: - 1 * providerCache.getAll(ON_DEMAND.ns, keys) >> [cacheData] - _ * cacheData.attributes >> attributes - 1 * attributes.get('cacheTime') >> System.currentTimeMillis() + 100 - 1 * attributes.get('processedCount') >> 0 - - and: - !result.cacheResults[ON_DEMAND.ns].isEmpty() - result.evictions.isEmpty() - } - - void 'build Load Data Cache - evict cache'() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String key = 'key' - List keys = [key] - Closure closure = { CacheResultBuilder builder -> builder.build() } - CacheData cacheData = Mock(CacheData) - Map attributes = Mock(Map) - - when: - CacheResult result = onDemandAware.buildLoadDataCache(providerCache, keys, closure) - - then: - 1 * providerCache.getAll(ON_DEMAND.ns, keys) >> [cacheData] - _ * cacheData.attributes >> attributes - 1 * attributes.get('cacheTime') >> System.currentTimeMillis() - 100 - 1 * attributes.get('processedCount') >> 1 - - and: - result.cacheResults[ON_DEMAND.ns].isEmpty() - !result.evictions.isEmpty() - } - - public class DefaultOnDemandAware extends AbstractOpenstackCachingAgent implements OnDemandAgent { - - Collection providedDataTypes = Collections.emptyList() - String agentType = 'agentType' - String onDemandAgentType = 'agentTypeOndemand' - - DefaultOnDemandAware(OpenstackNamedAccountCredentials account, String region) { - super(account, region) - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - return null - } - - @Override - OnDemandMetricsSupport getMetricsSupport() { - return null - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - return false - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - return null - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - return null - } - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OpenstackOnDemandCacheIntgSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OpenstackOnDemandCacheIntgSpec.groovy deleted file mode 100644 index 434088a3d57..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/cache/OpenstackOnDemandCacheIntgSpec.groovy +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.cache - -import com.squareup.okhttp.MediaType -import com.squareup.okhttp.OkHttpClient -import com.squareup.okhttp.Request -import com.squareup.okhttp.RequestBody -import com.squareup.okhttp.Response -import com.sun.xml.internal.ws.util.CompletedFuture -import org.springframework.http.HttpStatus -import spock.lang.Ignore -import spock.lang.Specification - -import java.util.concurrent.CompletableFuture -import java.util.concurrent.ExecutorService -import java.util.concurrent.Executors -import java.util.stream.Collectors -import java.util.stream.IntStream - -class OpenstackOnDemandCacheIntgSpec extends Specification { - public static final MediaType JSON = MediaType.parse("application/json; charset=utf-8") - OkHttpClient client - - void 'setup'() { - client = new OkHttpClient() - } - - // Use for local testing - @Ignore - void 'parallel on-demand server group' () { - given: - ExecutorService executor = Executors.newFixedThreadPool(20) - - and: - String requestBody = '{"serverGroupName": "myapp-teststack-v002", "account": "test", "region": "east"}' - RequestBody body = RequestBody.create(JSON, requestBody) - Request request = new Request.Builder() - .url('http://localhost:7002/cache/openstack/serverGroup') - .post(body) - .build() - - when: - List> completedFutureList = IntStream.rangeClosed(0, 20) - .boxed() - .map { index -> CompletableFuture.supplyAsync ({ client.newCall(request).execute() }, executor) } - .collect(Collectors.toList()) - - then: - completedFutureList.every { - it.get().code() == HttpStatus.ACCEPTED.value() - } - } - - @Ignore - void 'parallel on-demand load balancer' () { - given: - ExecutorService executor = Executors.newFixedThreadPool(20) - - and: - String requestBody = '{"loadBalancerName": "test", "account": "test", "region": "east"}' - RequestBody body = RequestBody.create(JSON, requestBody) - Request request = new Request.Builder() - .url('http://localhost:7002/cache/openstack/loadBalancer') - .post(body) - .build() - - when: - List> completedFutureList = IntStream.rangeClosed(0, 20) - .boxed() - .map { index -> CompletableFuture.supplyAsync ({ client.newCall(request).execute() }, executor) } - .collect(Collectors.toList()) - - then: - completedFutureList.every { - it.get().code() == HttpStatus.ACCEPTED.value() - } - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/BlockingStatusCheckerSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/BlockingStatusCheckerSpec.groovy deleted file mode 100644 index fbb1c36c661..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/BlockingStatusCheckerSpec.groovy +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import org.openstack4j.model.network.ext.LoadBalancerV2 -import spock.lang.Specification - -class BlockingStatusCheckerSpec extends Specification { - - void 'test execute success' () { - given: - BlockingStatusChecker adapter = BlockingStatusChecker.from(60, 5) { true } - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) - - when: - LoadBalancerV2 result = adapter.execute { loadBalancer } - - then: - result == loadBalancer - } - - void 'test execute timeout' () { - given: - BlockingStatusChecker adapter = BlockingStatusChecker.from (1, 3) { - false - } - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) - - when: - adapter.execute { - loadBalancer - } - - then: - thrown(OpenstackProviderException) - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackClientProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackClientProviderSpec.groovy deleted file mode 100644 index 58ece12c76c..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackClientProviderSpec.groovy +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.api.OSClient -import spock.lang.Specification -import spock.lang.Unroll - -@Unroll -class OpenstackClientProviderSpec extends Specification { - - OpenstackClientProvider provider - OSClient mockClient - String region = 'region1' - - def setup() { - mockClient = Mock(OSClient) - mockClient.useRegion(region) >> mockClient - OpenstackNamedAccountCredentials credentials = Mock(OpenstackNamedAccountCredentials) - OpenstackIdentityProvider identityProvider = Spy(OpenstackIdentityV3Provider, contructorArgs:[credentials]) { - getClient() >> { mockClient } - getTokenId() >> { null } - getRegionClient(_ as String) >> { mockClient } - getAllRegions() >> { [region] } - } - OpenstackComputeProvider computeProvider = new OpenstackComputeV2Provider(identityProvider) - OpenstackNetworkingProvider networkingProvider = new OpenstackNetworkingV2Provider(identityProvider) - OpenstackOrchestrationProvider orchestrationProvider = new OpenstackOrchestrationV1Provider(identityProvider) - OpenstackImageProvider imageProvider = new OpenstackImageV2Provider(identityProvider) - OpenstackLoadBalancerProvider loadBalancerProvider = new OpenstackLoadBalancerV2Provider(identityProvider) - OpenstackSwiftProvider swiftProvider = new OpenstackSwiftV1Provider(identityProvider) - provider = new OpenstackClientProvider(identityProvider, - computeProvider, - networkingProvider, - orchestrationProvider, - imageProvider, - loadBalancerProvider, - swiftProvider) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeV2ProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeV2ProviderSpec.groovy deleted file mode 100644 index afb877ef648..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackComputeV2ProviderSpec.groovy +++ /dev/null @@ -1,464 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.model.SecurityGroup -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import org.openstack4j.api.compute.ComputeFloatingIPService -import org.openstack4j.api.compute.ComputeSecurityGroupService -import org.openstack4j.api.compute.ComputeService -import org.openstack4j.api.compute.ServerService -import org.openstack4j.api.exceptions.ServerResponseException -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.compute.Address -import org.openstack4j.model.compute.Addresses -import org.openstack4j.model.compute.FloatingIP -import org.openstack4j.model.compute.IPProtocol -import org.openstack4j.model.compute.SecGroupExtension -import org.openstack4j.model.compute.Server -import org.openstack4j.openstack.compute.domain.NovaSecGroupExtension -import org.springframework.http.HttpStatus - -class OpenstackComputeV2ProviderSpec extends OpenstackClientProviderSpec { - - def "create security group rule"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - - when: - provider.createSecurityGroupRule(region, id, protocol, cidr, remoteSecurityGroupId, fromPort, toPort, icmpType, icmpCode) - - then: - 1 * securityGroupService.createRule({ r -> - def from = protocol == IPProtocol.ICMP ? icmpType : fromPort - def to = protocol == IPProtocol.ICMP ? icmpCode : toPort - r.parentGroupId == id && r.ipProtocol == protocol && r.cidr == cidr && r.fromPort == from && r.toPort == to - }) - - where: - protocol | cidr | remoteSecurityGroupId | fromPort | toPort | icmpType | icmpCode - IPProtocol.TCP | '0.0.0.0/0' | null | 80 | 81 | null | null - IPProtocol.UDP | null | UUID.randomUUID().toString() | 80 | 81 | null | null - IPProtocol.ICMP | '0.0.0.0/0' | null | null | null | 2 | 3 - - } - - def "create security group rule throws exception"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - def protocol = IPProtocol.TCP - def cidr = '0.0.0.0/0' - def fromPort = 80 - def toPort = 8080 - - when: - provider.createSecurityGroupRule(region, id, protocol, cidr, null, fromPort, toPort, null, null) - - then: - 1 * securityGroupService.createRule(_) >> { throw new RuntimeException('foo') } - thrown(OpenstackProviderException) - } - - def "delete security group rule"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - - when: - provider.deleteSecurityGroupRule(region, id) - - then: - 1 * securityGroupService.deleteRule(id) - } - - def "delete security group rule throws exception"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - - when: - provider.deleteSecurityGroupRule(region, id) - - then: - 1 * securityGroupService.deleteRule(id) >> { throw new RuntimeException('foo') } - thrown(OpenstackProviderException) - } - - def "delete security group"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - def id = UUID.randomUUID().toString() - def success = ActionResponse.actionSuccess() - - when: - provider.deleteSecurityGroup(region, id) - - then: - 1 * securityGroupService.delete(id) >> success - } - - def "delete security group handles failure"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - def id = UUID.randomUUID().toString() - def failure = ActionResponse.actionFailed('foo', 500) - - when: - provider.deleteSecurityGroup(region, id) - - then: - 1 * securityGroupService.delete(id) >> failure - thrown(OpenstackProviderException) - } - - def "create security group"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def name = 'security-group' - def description = 'description 1' - - when: - provider.createSecurityGroup(region, name, description) - - then: - 1 * securityGroupService.create(name, description) - } - - def "create security group throws exception"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def name = 'security-group' - def description = 'description 1' - - when: - provider.createSecurityGroup(region, name, description) - - then: - 1 * securityGroupService.create(name, description) >> { throw new RuntimeException('foo') } - thrown(OpenstackProviderException) - } - - def "update security group"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - def name = 'security-group' - def description = 'description 1' - - when: - provider.updateSecurityGroup(region, id, name, description) - - then: - 1 * securityGroupService.update(id, name, description) - } - - def "update security group throws exception"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - def name = 'security-group' - def description = 'description 1' - - when: - provider.updateSecurityGroup(region, id, name, description) - - then: - 1 * securityGroupService.update(id, name, description) >> { throw new RuntimeException('foo') } - thrown(OpenstackProviderException) - } - - def "get security group"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - SecGroupExtension securityGroup = new NovaSecGroupExtension() - - when: - def actual = provider.getSecurityGroup(region, id) - - then: - actual == securityGroup - 1 * securityGroupService.get(id) >> securityGroup - } - - def "get security group throws exception when not found"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - - when: - SecurityGroup actual = provider.getSecurityGroup(region, id) - - then: - !actual - noExceptionThrown() - } - - def "get security group throws exception"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - - def id = UUID.randomUUID().toString() - - when: - provider.getSecurityGroup(region, id) - - then: - 1 * securityGroupService.get(id) >> { throw new RuntimeException('foo') } - thrown(OpenstackProviderException) - } - - def "get all security groups by region"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - def expected = [new NovaSecGroupExtension()] - - when: - def actual = provider.getSecurityGroups(region) - - then: - 1 * securityGroupService.list() >> expected - expected == actual - } - - def "get all security groups by region throws exception"() { - setup: - ComputeService compute = Mock() - ComputeSecurityGroupService securityGroupService = Mock() - mockClient.compute() >> compute - compute.securityGroups() >> securityGroupService - def exception = new RuntimeException('foo') - - when: - provider.getSecurityGroups(region) - - then: - 1 * securityGroupService.list() >> { throw exception } - def e = thrown(OpenstackProviderException) - e.cause == exception - } - - def "get instances success"() { - setup: - ComputeService computeService = Mock() - ServerService serversService = Mock() - List servers = Mock() - - when: - List result = provider.getInstances(region) - - then: - 1 * mockClient.compute() >> computeService - 1 * computeService.servers() >> serversService - 1 * serversService.list() >> servers - result == servers - noExceptionThrown() - } - - def "get instances exception"() { - setup: - ComputeService computeService = Mock() - ServerService serversService = Mock() - Throwable throwable = new ServerResponseException('foo', HttpStatus.INTERNAL_SERVER_ERROR.value()) - - when: - provider.getInstances(region) - - then: - 1 * mockClient.compute() >> computeService - 1 * computeService.servers() >> serversService - 1 * serversService.list() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get console output success"() { - setup: - String serverId = UUID.randomUUID().toString() - ComputeService computeService = Mock() - ServerService serversService = Mock() - Throwable throwable = new ServerResponseException('foo', HttpStatus.INTERNAL_SERVER_ERROR.value()) - - when: - provider.getConsoleOutput(region, serverId) - - then: - 1 * mockClient.compute() >> computeService - 1 * computeService.servers() >> serversService - 1 * serversService.getConsoleOutput(serverId, -1) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get console output exception"() { - setup: - String serverId = UUID.randomUUID().toString() - ComputeService computeService = Mock() - ServerService serversService = Mock() - String output = 'output' - - when: - String result = provider.getConsoleOutput(region, serverId) - - then: - 1 * mockClient.compute() >> computeService - 1 * computeService.servers() >> serversService - 1 * serversService.getConsoleOutput(serverId, -1) >> output - result == output - noExceptionThrown() - } - - def "test get ip address for instance succeeds"() { - setup: - String id = UUID.randomUUID().toString() - ComputeService computeService = Mock(ComputeService) - mockClient.compute() >> computeService - ServerService serverService = Mock(ServerService) - computeService.servers() >> serverService - Server server = Mock(Server) - Addresses addresses = Mock(Addresses) - server.addresses >> addresses - Address address = Mock(Address) - addresses.addresses >> ['test': [address]] - address.version >> 4 - address.addr >> '1.2.3.4' - - when: - String ip = provider.getIpForInstance(region, id) - - then: - 1 * serverService.get(id) >> server - ip == '1.2.3.4' - } - - def "test get ip address for instance throws exception"() { - setup: - String id = UUID.randomUUID().toString() - ComputeService computeService = Mock(ComputeService) - mockClient.compute() >> computeService - ServerService serverService = Mock(ServerService) - computeService.servers() >> serverService - Server server = Mock(Server) - Addresses addresses = Mock(Addresses) - server.addresses >> addresses - addresses.addresses >> [:] - - when: - provider.getIpForInstance(region, id) - - then: - 1 * serverService.get(id) >> server - Exception e = thrown(OpenstackProviderException) - e.message == "Instance ${id} has no IP address".toString() - } - - def "list floating ips success"() { - setup: - ComputeService computeService = Mock() - ComputeFloatingIPService ipService = Mock() - List ips = Mock() - - when: - List result = provider.listFloatingIps(region) - - then: - 1 * mockClient.compute() >> computeService - 1 * computeService.floatingIps() >> ipService - 1 * ipService.list() >> ips - result == ips - noExceptionThrown() - } - - def "list floating ips exception"() { - setup: - ComputeService computeService = Mock() - ComputeFloatingIPService ipService = Mock() - Throwable throwable = new ServerResponseException('foo', HttpStatus.INTERNAL_SERVER_ERROR.value()) - - when: - provider.listFloatingIps(region) - - then: - 1 * mockClient.compute() >> computeService - 1 * computeService.floatingIps() >> ipService - 1 * ipService.list() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityV3ProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityV3ProviderSpec.groovy deleted file mode 100644 index db03ac86e1c..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackIdentityV3ProviderSpec.groovy +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.LbaasConfig -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.StackConfig -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.api.OSClient -import org.openstack4j.api.exceptions.ServerResponseException -import org.openstack4j.api.identity.v3.IdentityService -import org.openstack4j.api.identity.v3.RegionService -import org.openstack4j.model.identity.v3.Region -import org.openstack4j.model.identity.v3.Token -import org.springframework.http.HttpStatus -import spock.lang.Specification -import spock.lang.Unroll - - -@Unroll -class OpenstackIdentityV3ProviderSpec extends Specification { - OpenstackNamedAccountCredentials credentials - OpenstackIdentityV3Provider provider - OSClient.OSClientV3 mockClient - - def "setup"() { - String accountName = 'test' - String environment = 'env' - String accountType = 'main' - String username = 'foo' - String password = 'bar' - String projectName = 'demo' - String domainName = 'domain' - String authUrl = 'http://fake.com' - Boolean insecure = true - LbaasConfig lbassConfig = new LbaasConfig(pollInterval: 5, pollTimeout: 60) - StackConfig stackConfig = new StackConfig(pollInterval: 5, pollTimeout: 60) - ConsulConfig consulConfig = new ConsulConfig() - credentials = new OpenstackNamedAccountCredentials(accountName, environment, accountType, username, password, projectName, domainName, authUrl, [], insecure, "", lbassConfig, stackConfig, consulConfig, null) - mockClient = Mock(OSClient.OSClientV3) { - getToken() >> { Mock(Token) } - } - //IOSClientBuilder.V3.metaClass.authenticate = { mockClient } - provider = Spy(OpenstackIdentityV3Provider, constructorArgs:[credentials]) { - buildClient() >> { mockClient } - getClient() >> { mockClient } - getRegionClient(_ as String) >> { mockClient } - } - } - - def "test get regions lookup"() { - given: - IdentityService identityService = Mock(IdentityService) - RegionService regionService = Mock(RegionService) - Region region = Mock(Region) - String regionId = UUID.randomUUID().toString() - List regions = [region] - - when: - List result = provider.allRegions - - then: - 1 * mockClient.identity() >> identityService - 1 * identityService.regions() >> regionService - 1 * regionService.list() >> regions - 1 * region.id >> regionId - result == [regionId] - noExceptionThrown() - } - - def "test get regions lookup exception"() { - given: - IdentityService identityService = Mock(IdentityService) - RegionService regionService = Mock(RegionService) - Throwable throwable = new ServerResponseException('foo', HttpStatus.INTERNAL_SERVER_ERROR.value()) - - when: - provider.getAllRegions() - - then: - 1 * mockClient.identity() >> identityService - 1 * identityService.regions() >> regionService - 1 * regionService.list() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageV2ClientProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageV2ClientProviderSpec.groovy deleted file mode 100644 index c4c47d3bfef..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackImageV2ClientProviderSpec.groovy +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage -import org.openstack4j.api.exceptions.ServerResponseException -import org.openstack4j.api.image.v2.ImageService -import org.openstack4j.model.image.v2.Image -import org.springframework.http.HttpStatus - -class OpenstackImageV1ClientProviderSpec extends OpenstackClientProviderSpec { - - def "list images succeeds"() { - setup: - Map filters = null - ImageService imageService = Mock(ImageService) - def imageLocation = "http://example.com/image.iso" - Image image = Mock(Image) { - getLocations() >> [imageLocation] - } - - when: - List result = provider.listImages(region, filters) - - then: - 1 * mockClient.imagesV2() >> imageService - 1 * imageService.list(filters) >> [image] - - and: - result[0] instanceof OpenstackImage - result[0].location == imageLocation - noExceptionThrown() - } - - def "list images exception"() { - setup: - Map filters = null - ImageService imageService = Mock(ImageService) - Throwable throwable = new ServerResponseException('foo', HttpStatus.INTERNAL_SERVER_ERROR.value()) - - when: - provider.listImages(region, filters) - - then: - 1 * mockClient.imagesV2() >> imageService - 1 * imageService.list(filters) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadbalancerV2ClientProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadbalancerV2ClientProviderSpec.groovy deleted file mode 100644 index d3827833891..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackLoadbalancerV2ClientProviderSpec.groovy +++ /dev/null @@ -1,925 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Algorithm -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Listener -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Listener.ListenerType -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor -import org.openstack4j.api.exceptions.ServerResponseException -import org.openstack4j.api.networking.NetworkingService -import org.openstack4j.api.networking.ext.HealthMonitorV2Service -import org.openstack4j.api.networking.ext.LbPoolV2Service -import org.openstack4j.api.networking.ext.LbaasV2Service -import org.openstack4j.api.networking.ext.ListenerV2Service -import org.openstack4j.api.networking.ext.LoadBalancerV2Service -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.network.ext.HealthMonitorV2 -import org.openstack4j.model.network.ext.HealthMonitorV2Update -import org.openstack4j.model.network.ext.LbMethod -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.LbPoolV2Update -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.model.network.ext.Member -import org.openstack4j.model.network.ext.MemberV2 -import org.springframework.http.HttpStatus -import spock.lang.Shared - -class OpenstackLoadbalancerV2ClientProviderSpec extends OpenstackClientProviderSpec { - - @Shared - Throwable throwable = new ServerResponseException('foo', HttpStatus.INTERNAL_SERVER_ERROR.value()) - - def "create load balancer success"() { - setup: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LoadBalancerV2Service loadBalancerV2Service = Mock(LoadBalancerV2Service) - LoadBalancerV2 expected = Mock(LoadBalancerV2) - - when: - LoadBalancerV2 result = provider.createLoadBalancer(region, 'name', 'desc', UUID.randomUUID().toString()) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.loadbalancer() >> loadBalancerV2Service - 1 * loadBalancerV2Service.create(_ as LoadBalancerV2) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "create load balancer exception"() { - setup: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LoadBalancerV2Service loadBalancerV2Service = Mock(LoadBalancerV2Service) - - when: - provider.createLoadBalancer(region, 'name', 'desc', UUID.randomUUID().toString()) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.loadbalancer() >> loadBalancerV2Service - 1 * loadBalancerV2Service.create(_ as LoadBalancerV2) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get load balancer success"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LoadBalancerV2Service loadBalancerV2Service = Mock(LoadBalancerV2Service) - LoadBalancerV2 expected = Mock(LoadBalancerV2) - - when: - LoadBalancerV2 result = provider.getLoadBalancer(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.loadbalancer() >> loadBalancerV2Service - 1 * loadBalancerV2Service.get(id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "get load balancer not found"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LoadBalancerV2Service loadBalancerV2Service = Mock(LoadBalancerV2Service) - LoadBalancerV2 expected = null - - when: - LoadBalancerV2 actual = provider.getLoadBalancer(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.loadbalancer() >> loadBalancerV2Service - 1 * loadBalancerV2Service.get(id) >> expected - - and: - !actual - noExceptionThrown() - } - - def "get load balancer exception"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LoadBalancerV2Service loadBalancerV2Service = Mock(LoadBalancerV2Service) - - when: - provider.getLoadBalancer(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.loadbalancer() >> loadBalancerV2Service - 1 * loadBalancerV2Service.get(id) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "create listener success"() { - setup: - String name = 'name' - String externalProtocol = Listener.ListenerType.HTTP.toString() - Integer externalPort = 80 - String description = 'HTTP:80:HTTP:8080' - String loadBalancerId = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - ListenerV2Service listenerV2Service = Mock(ListenerV2Service) - ListenerV2 expected = Mock(ListenerV2) - - when: - ListenerV2 result = provider.createListener(region, name, externalProtocol, externalPort, description, loadBalancerId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.listener() >> listenerV2Service - 1 * listenerV2Service.create(_ as ListenerV2) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "create listener exception"() { - setup: - String name = 'name' - String externalProtocol = Listener.ListenerType.HTTP.toString() - Integer externalPort = 80 - String description = 'HTTP:80:HTTP:8080' - String loadBalancerId = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - ListenerV2Service listenerV2Service = Mock(ListenerV2Service) - - when: - provider.createListener(region, name, externalProtocol, externalPort, description, loadBalancerId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.listener() >> listenerV2Service - 1 * listenerV2Service.create(_ as ListenerV2) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get listener success"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - ListenerV2Service listenerV2Service = Mock(ListenerV2Service) - ListenerV2 expected = Mock(ListenerV2) - - when: - ListenerV2 result = provider.getListener(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.listener() >> listenerV2Service - 1 * listenerV2Service.get(id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "get listener not found"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - ListenerV2Service listenerV2Service = Mock(ListenerV2Service) - ListenerV2 expected = null - - when: - provider.getListener(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.listener() >> listenerV2Service - 1 * listenerV2Service.get(id) >> expected - - and: - thrown(OpenstackProviderException) - } - - def "get listener exception"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - ListenerV2Service listenerV2Service = Mock(ListenerV2Service) - - when: - provider.getListener(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.listener() >> listenerV2Service - 1 * listenerV2Service.get(id) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "delete listener success"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - ListenerV2Service listenerV2Service = Mock(ListenerV2Service) - ActionResponse expected = ActionResponse.actionSuccess() - - when: - ActionResponse result = provider.deleteListener(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.listener() >> listenerV2Service - 1 * listenerV2Service.delete(id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "delete listener exception"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - ListenerV2Service listenerV2Service = Mock(ListenerV2Service) - ActionResponse expected = ActionResponse.actionFailed('failed', 404) - - when: - provider.deleteListener(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.listener() >> listenerV2Service - 1 * listenerV2Service.delete(id) >> expected - - and: - Exception e = thrown(OpenstackProviderException) - [String.valueOf(expected.code), expected.fault].every { e.message.contains(it) } - } - - def "create pool success"() { - setup: - String name = 'name' - String internalProtocol = ListenerType.HTTP.toString() - String listenerId = UUID.randomUUID().toString() - String algorithm = Algorithm.ROUND_ROBIN.name() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - LbPoolV2 expected = Mock(LbPoolV2) - - when: - LbPoolV2 result = provider.createPool(region, name, internalProtocol, algorithm, listenerId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.create(_ as LbPoolV2) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "create pool exception"() { - setup: - String name = 'name' - String internalProtocol = ListenerType.HTTP.toString() - String listenerId = UUID.randomUUID().toString() - String algorithm = Algorithm.ROUND_ROBIN.name() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - - when: - provider.createPool(region, name, internalProtocol, algorithm, listenerId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.create(_ as LbPoolV2) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get pool success"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - LbPoolV2 expected = Mock(LbPoolV2) - - when: - LbPoolV2 result = provider.getPool(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.get(id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "get pool not found"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - LbPoolV2 expected = null - - when: - provider.getPool(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.get(id) >> expected - - and: - thrown(OpenstackProviderException) - } - - def "get pool exception"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - - when: - provider.getPool(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.get(id) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "update pool success"() { - setup: - String id = UUID.randomUUID().toString() - String method = LbMethod.ROUND_ROBIN.name() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - LbPoolV2 expected = Mock(LbPoolV2) - - when: - LbPoolV2 result = provider.updatePool(region, id, method) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.update(id, _ as LbPoolV2Update) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "update pool exception"() { - setup: - String id = UUID.randomUUID().toString() - String method = LbMethod.ROUND_ROBIN.name() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - - when: - provider.updatePool(region, id, method) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.update(id, _ as LbPoolV2Update) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "delete pool success"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - ActionResponse expected = ActionResponse.actionSuccess() - - when: - ActionResponse result = provider.deletePool(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.delete(id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "delete pool exception"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - LbPoolV2Service lbPoolV2Service = Mock(LbPoolV2Service) - ActionResponse expected = ActionResponse.actionFailed('failed', 404) - - when: - provider.deletePool(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.lbPool() >> lbPoolV2Service - 1 * lbPoolV2Service.delete(id) >> expected - - and: - Exception e = thrown(OpenstackProviderException) - [String.valueOf(expected.code), expected.fault].every { e.message.contains(it) } - } - - def "create monitor success"() { - setup: - String poolId = UUID.randomUUID().toString() - HealthMonitor healthMonitor = Mock(HealthMonitor) - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - HealthMonitorV2Service healthMonitorV2Service = Mock(HealthMonitorV2Service) - HealthMonitorV2 expected = Mock(HealthMonitorV2) - - when: - HealthMonitorV2 result = provider.createMonitor(region, poolId, healthMonitor) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.healthMonitor() >> healthMonitorV2Service - 1 * healthMonitorV2Service.create(_ as HealthMonitorV2) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "create monitor exception"() { - setup: - String poolId = UUID.randomUUID().toString() - HealthMonitor healthMonitor = Mock(HealthMonitor) - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - HealthMonitorV2Service healthMonitorV2Service = Mock(HealthMonitorV2Service) - - when: - provider.createMonitor(region, poolId, healthMonitor) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.healthMonitor() >> healthMonitorV2Service - 1 * healthMonitorV2Service.create(_ as HealthMonitorV2) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get monitor success"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - HealthMonitorV2Service healthMonitorV2Service = Mock(HealthMonitorV2Service) - HealthMonitorV2 expected = Mock(HealthMonitorV2) - - when: - HealthMonitorV2 result = provider.getMonitor(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.healthMonitor() >> healthMonitorV2Service - 1 * healthMonitorV2Service.get(id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "get monitor exception"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - HealthMonitorV2Service healthMonitorV2Service = Mock(HealthMonitorV2Service) - - when: - provider.getMonitor(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.healthMonitor() >> healthMonitorV2Service - 1 * healthMonitorV2Service.get(id) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "update monitor success"() { - setup: - String id = UUID.randomUUID().toString() - HealthMonitor healthMonitor = Mock(HealthMonitor) - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - HealthMonitorV2Service healthMonitorV2Service = Mock(HealthMonitorV2Service) - HealthMonitorV2 expected = Mock(HealthMonitorV2) - - when: - HealthMonitorV2 result = provider.updateMonitor(region, id, healthMonitor) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.healthMonitor() >> healthMonitorV2Service - 1 * healthMonitorV2Service.update(id, _ as HealthMonitorV2Update) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "update monitor exception"() { - setup: - String id = UUID.randomUUID().toString() - HealthMonitor healthMonitor = Mock(HealthMonitor) - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - HealthMonitorV2Service healthMonitorV2Service = Mock(HealthMonitorV2Service) - - when: - provider.updateMonitor(region, id, healthMonitor) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.healthMonitor() >> healthMonitorV2Service - 1 * healthMonitorV2Service.update(id, _ as HealthMonitorV2Update) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "delete monitor success"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - HealthMonitorV2Service healthMonitorV2Service = Mock(HealthMonitorV2Service) - ActionResponse expected = ActionResponse.actionSuccess() - - when: - ActionResponse result = provider.deleteMonitor(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.healthMonitor() >> healthMonitorV2Service - 1 * healthMonitorV2Service.delete(id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "delete monitor exception"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - LbaasV2Service lbaasV2Service = Mock(LbaasV2Service) - HealthMonitorV2Service healthMonitorV2Service = Mock(HealthMonitorV2Service) - ActionResponse expected = ActionResponse.actionFailed('failed', 404) - - when: - provider.deleteMonitor(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.lbaasV2() >> lbaasV2Service - 1 * lbaasV2Service.healthMonitor() >> healthMonitorV2Service - 1 * healthMonitorV2Service.delete(id) >> expected - - and: - Exception e = thrown(OpenstackProviderException) - [String.valueOf(expected.code), expected.fault].every { e.message.contains(it) } - } - - def "test add member to load balancer pool succeeds"() { - setup: - String ip = '1.2.3.4' - int port = 8100 - int weight = 1 - String lbPoolId = UUID.randomUUID().toString() - String subnetId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock(NetworkingService) - mockClient.networking() >> networkingService - LbaasV2Service lbService = Mock(LbaasV2Service) - networkingService.lbaasV2() >> lbService - LbPoolV2Service poolService = Mock(LbPoolV2Service) - lbService.lbPool() >> poolService - MemberV2 mockMember = Mock(MemberV2) - - when: - MemberV2 actual = provider.addMemberToLoadBalancerPool(region, ip, lbPoolId, subnetId, port, weight) - - then: - 1 * poolService.createMember(lbPoolId, _ as MemberV2) >> mockMember - mockMember == actual - } - - def "test add member to load balancer pool throws exception"() { - setup: - String ip = '1.2.3.4' - int port = 8100 - int weight = 1 - String lbPoolId = UUID.randomUUID().toString() - String subnetId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock(NetworkingService) - mockClient.networking() >> networkingService - LbaasV2Service lbService = Mock(LbaasV2Service) - networkingService.lbaasV2() >> lbService - LbPoolV2Service poolService = Mock(LbPoolV2Service) - lbService.lbPool() >> poolService - - when: - provider.addMemberToLoadBalancerPool(region, ip, lbPoolId, subnetId, port, weight) - - then: - 1 * poolService.createMember(lbPoolId, _ as MemberV2) >> { throw new Exception("foobar") } - Exception e = thrown(OpenstackProviderException) - e.message == "Unable to process request" - } - - def "test remove member from load balancer pool succeeds"() { - setup: - def success = ActionResponse.actionSuccess() - String lbPoolId = UUID.randomUUID().toString() - String memberId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock(NetworkingService) - mockClient.networking() >> networkingService - LbaasV2Service lbService = Mock(LbaasV2Service) - networkingService.lbaasV2() >> lbService - LbPoolV2Service poolService = Mock(LbPoolV2Service) - lbService.lbPool() >> poolService - - when: - ActionResponse response = provider.removeMemberFromLoadBalancerPool(region, lbPoolId, memberId) - - then: - 1 * poolService.deleteMember(lbPoolId, memberId) >> success - response != null - response.code == 200 - response.success - response == success - } - - def "test remove member from load balancer pool fails"() { - setup: - def failure = ActionResponse.actionFailed('failed', 404) - String lbPoolId = UUID.randomUUID().toString() - String memberId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock(NetworkingService) - mockClient.networking() >> networkingService - LbaasV2Service lbService = Mock(LbaasV2Service) - networkingService.lbaasV2() >> lbService - LbPoolV2Service poolService = Mock(LbPoolV2Service) - lbService.lbPool() >> poolService - - when: - provider.removeMemberFromLoadBalancerPool(region, lbPoolId, memberId) - - then: - 1 * poolService.deleteMember(lbPoolId, memberId) >> failure - Exception e = thrown(OpenstackProviderException) - e.message.contains('failed') - e.message.contains('404') - } - - def "test remove member from load balancer pool throws exception"() { - setup: - String lbPoolId = UUID.randomUUID().toString() - String memberId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock(NetworkingService) - mockClient.networking() >> networkingService - LbaasV2Service lbService = Mock(LbaasV2Service) - networkingService.lbaasV2() >> lbService - LbPoolV2Service poolService = Mock(LbPoolV2Service) - lbService.lbPool() >> poolService - - when: - provider.removeMemberFromLoadBalancerPool(region, lbPoolId, memberId) - - then: - 1 * poolService.deleteMember(lbPoolId, memberId) >> { throw new Exception('foobar') } - Exception e = thrown(OpenstackProviderException) - e.message == "Unable to process request" - } - - def "test get member id for instance succeeds"() { - setup: - String lbPoolId = UUID.randomUUID().toString() - String ip = '1.2.3.4' - String memberId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock(NetworkingService) - mockClient.networking() >> networkingService - LbaasV2Service lbService = Mock(LbaasV2Service) - networkingService.lbaasV2() >> lbService - LbPoolV2Service poolService = Mock(LbPoolV2Service) - lbService.lbPool() >> poolService - Member member = Mock(Member) - member.id >> memberId - member.address >> ip - - when: - String actual = provider.getMemberIdForInstance(region, ip, lbPoolId) - - then: - 1 * poolService.listMembers(lbPoolId) >> [member] - actual == memberId - } - - def "test get member id for instance, member not found, throws exception"() { - setup: - String lbPoolId = UUID.randomUUID().toString() - String ip = '1.2.3.4' - String memberId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock(NetworkingService) - mockClient.networking() >> networkingService - LbaasV2Service lbService = Mock(LbaasV2Service) - networkingService.lbaasV2() >> lbService - LbPoolV2Service poolService = Mock(LbPoolV2Service) - lbService.lbPool() >> poolService - Member member = Mock(Member) - member.id >> memberId - member.address >> ip - - when: - provider.getMemberIdForInstance(region, ip, lbPoolId) - - then: - 1 * poolService.listMembers(lbPoolId) >> [] - Exception e = thrown(OpenstackProviderException) - e.message == "Instance with ip ${ip} is not associated with any load balancer memberships".toString() - } - - def "test get member id for instance throws exception"() { - setup: - String lbPoolId = UUID.randomUUID().toString() - String ip = '1.2.3.4' - NetworkingService networkingService = Mock(NetworkingService) - mockClient.networking() >> networkingService - LbaasV2Service lbService = Mock(LbaasV2Service) - networkingService.lbaasV2() >> lbService - LbPoolV2Service poolService = Mock(LbPoolV2Service) - lbService.lbPool() >> poolService - - when: - provider.getMemberIdForInstance(region, ip, lbPoolId) - - then: - 1 * poolService.listMembers(lbPoolId) >> { throw new Exception('foobar') } - Exception e = thrown(OpenstackProviderException) - e.message == "Unable to process request" - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingV2ClientProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingV2ClientProviderSpec.groovy deleted file mode 100644 index 6a5b469a376..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackNetworkingV2ClientProviderSpec.groovy +++ /dev/null @@ -1,608 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import org.openstack4j.api.exceptions.ServerResponseException -import org.openstack4j.api.networking.NetFloatingIPService -import org.openstack4j.api.networking.NetworkingService -import org.openstack4j.api.networking.PortService -import org.openstack4j.api.networking.SubnetService -import org.openstack4j.model.network.NetFloatingIP -import org.openstack4j.model.network.Port -import org.openstack4j.model.network.Subnet -import org.openstack4j.openstack.networking.domain.NeutronPort -import org.springframework.http.HttpStatus -import spock.lang.Shared - -class OpenstackNetworkingV2ClientProviderSpec extends OpenstackClientProviderSpec { - - @Shared - Throwable throwable = new ServerResponseException('foo', HttpStatus.INTERNAL_SERVER_ERROR.value()) - - def "get subnet - #testCase"() { - setup: - String region = 'region1' - String subnetId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - SubnetService subnetService = Mock() - - when: - Subnet subnet = provider.getSubnet(region, subnetId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.subnet() >> subnetService - 1 * subnetService.get(subnetId) >> expected - subnet == expected - noExceptionThrown() - - where: - testCase | expected - 'Subnet found' | Mock(Subnet) - 'Subnet not found' | null - } - - def "get subnet - exception"() { - setup: - String region = 'region1' - String subnetId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - SubnetService subnetService = Mock() - - when: - provider.getSubnet(region, subnetId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.subnet() >> subnetService - 1 * subnetService.get(subnetId) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "associate floating ip to vip success"() { - setup: - String floatingIp = UUID.randomUUID().toString() - String vipId = UUID.randomUUID().toString() - String portId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - PortService portService = Mock() - Port port = Mock() - NetFloatingIPService floatingIPService = Mock() - NetFloatingIP netFloatingIP = Mock() - - when: - NetFloatingIP result = provider.associateFloatingIpToVip(region, floatingIp, vipId) - - then: - 2 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> [port] - 1 * port.name >> "vip-${vipId}" - 1 * port.id >> portId - 1 * networkingService.floatingip() >> floatingIPService - 1 * floatingIPService.associateToPort(floatingIp, portId) >> netFloatingIP - result == netFloatingIP - noExceptionThrown() - } - - def "associate floating ip to vip - not found"() { - setup: - String floatingIp = UUID.randomUUID().toString() - String vipId = UUID.randomUUID().toString() - String portId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - PortService portService = Mock() - Port port = Mock() - NetFloatingIPService floatingIPService = Mock() - NetFloatingIP netFloatingIP = Mock() - - when: - provider.associateFloatingIpToVip(region, floatingIp, vipId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> [port] - 1 * port.name >> "vip" - 0 * port.id >> portId - 0 * networkingService.floatingip() >> floatingIPService - 0 * floatingIPService.associateToPort(floatingIp, portId) >> netFloatingIP - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.message.contains(vipId) - } - - def "associate floating ip to vip - exception"() { - setup: - String floatingIp = UUID.randomUUID().toString() - String vipId = UUID.randomUUID().toString() - String portId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - PortService portService = Mock() - Port port = Mock() - NetFloatingIPService floatingIPService = Mock() - - when: - provider.associateFloatingIpToVip(region, floatingIp, vipId) - - then: - 2 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> [port] - 1 * port.name >> "vip-${vipId}" - 1 * port.id >> portId - 1 * networkingService.floatingip() >> floatingIPService - 1 * floatingIPService.associateToPort(floatingIp, portId) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "disassociate floating ip success"() { - setup: - String floatingIp = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - NetFloatingIPService floatingIPService = Mock() - NetFloatingIP netFloatingIP = Mock() - - when: - NetFloatingIP result = provider.disassociateFloatingIp(region, floatingIp) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> floatingIPService - 1 * floatingIPService.disassociateFromPort(floatingIp) >> netFloatingIP - result == netFloatingIP - noExceptionThrown() - } - - def "disassociate floating ip exception"() { - setup: - String floatingIp = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - NetFloatingIPService floatingIPService = Mock() - - - when: - provider.disassociateFloatingIp(region, floatingIp) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> floatingIPService - 1 * floatingIPService.disassociateFromPort(floatingIp) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get port for vip found"() { - setup: - String vipId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - PortService portService = Mock() - Port port = Stub(Port) { - getName() >> "vip-$vipId" - } - - when: - Port result = provider.getPortForVip(region, vipId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> [port] - result == port - noExceptionThrown() - } - - def "get port for vip not found"() { - setup: - String vipId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - PortService portService = Mock() - Port port = Stub(Port) { - getName() >> "vip-${UUID.randomUUID().toString()}" - } - - when: - Port result = provider.getPortForVip(region, vipId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> [port] - result == null - noExceptionThrown() - } - - def "get port for vip not found empty list"() { - setup: - String vipId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - PortService portService = Mock() - Port port = Stub(Port) { - getName() >> "vip-${UUID.randomUUID().toString()}" - } - - when: - Port result = provider.getPortForVip(region, vipId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> [] - result == null - noExceptionThrown() - } - - def "get port for vip exception"() { - setup: - String vipId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - PortService portService = Mock() - - - when: - provider.getPortForVip(region, vipId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "list subnets succeeds"() { - setup: - NetworkingService networkingService = Mock(NetworkingService) - SubnetService subnetService = Mock(SubnetService) - List mockSubnets = [Mock(Subnet)] - - when: - List result = provider.listSubnets(region) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.subnet() >> subnetService - 1 * subnetService.list() >> mockSubnets - - and: - result == mockSubnets - noExceptionThrown() - } - - def "list subnets exception"() { - setup: - NetworkingService networkingService = Mock(NetworkingService) - SubnetService subnetService = Mock(SubnetService) - - when: - provider.listSubnets(region) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.subnet() >> subnetService - 1 * subnetService.list() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "list ports success"() { - setup: - NetworkingService networkingService = Mock() - PortService portService = Mock() - List ports = Mock() - - when: - List result = provider.listPorts(region) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> ports - result == ports - noExceptionThrown() - } - - def "list ports exception"() { - setup: - NetworkingService networkingService = Mock() - PortService portService = Mock() - - when: - provider.listPorts(region) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.list() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get associated floating ip success"() { - setup: - String portId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - NetFloatingIPService floatingIPService = Mock() - NetFloatingIP floatingIP = Stub() { - getPortId() >> portId - } - - when: - NetFloatingIP result = provider.getFloatingIpForPort(region, portId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> floatingIPService - 1 * floatingIPService.list() >> [floatingIP] - result == floatingIP - noExceptionThrown() - } - - def "get associated floating ip - exception"() { - setup: - String portId = UUID.randomUUID().toString() - NetworkingService networkingService = Mock() - NetFloatingIPService floatingIPService = Mock() - NetFloatingIP floatingIP = Stub() { - getPortId() >> portId - } - - when: - provider.getFloatingIpForPort(region, portId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> floatingIPService - 1 * floatingIPService.list() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "list floating ip success"() { - setup: - List ips = [Mock(NetFloatingIP)] - NetworkingService networkingService = Mock() - NetFloatingIPService floatingIPService = Mock() - - when: - List result = provider.listNetFloatingIps(region) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> floatingIPService - 1 * floatingIPService.list() >> ips - result == ips - noExceptionThrown() - } - - def "list floating ip - exception"() { - setup: - NetworkingService networkingService = Mock() - NetFloatingIPService floatingIPService = Mock() - - - when: - provider.listNetFloatingIps(region) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> floatingIPService - 1 * floatingIPService.list() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - - def "get port success"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - PortService portService = Mock(PortService) - Port expected = Mock(Port) - - when: - Port result = provider.getPort(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.get(id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "get port exception"() { - setup: - String id = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - PortService portService = Mock(PortService) - - when: - provider.getPort(region, id) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.port() >> portService - 1 * portService.get(id) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "update port success"() { - setup: - String id = UUID.randomUUID().toString() - List groups = ['test'] - - and: - NetworkingService networkingService = Mock(NetworkingService) - PortService portService = Mock(PortService) - NeutronPort expected = Mock(NeutronPort) - - when: - Port result = provider.updatePort(region, id, groups) - - then: - _ * mockClient.networking() >> networkingService - _ * networkingService.port() >> portService - 1 * portService.update(_) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "update port exception"() { - setup: - String id = UUID.randomUUID().toString() - List groups = ['test'] - - and: - NetworkingService networkingService = Mock(NetworkingService) - PortService portService = Mock(PortService) - NeutronPort expected = Mock(NeutronPort) - - when: - provider.updatePort(region, id, groups) - - then: - _ * mockClient.networking() >> networkingService - _ * networkingService.port() >> portService - 1 * portService.update(_) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "associate floating ip to port success"() { - setup: - String floatingIpId = UUID.randomUUID().toString() - String portId = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - NetFloatingIPService netFloatingIPService = Mock(NetFloatingIPService) - NetFloatingIP expected = Mock(NetFloatingIP) - Port port = Mock(Port) { getId() >> portId } - - when: - NetFloatingIP result = provider.associateFloatingIpToPort(region, floatingIpId, portId) - - then: - _ * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> netFloatingIPService - 1 * netFloatingIPService.associateToPort(floatingIpId, port.id) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "associate floating ip to port exception"() { - setup: - String floatingIpId = UUID.randomUUID().toString() - String portId = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - NetFloatingIPService netFloatingIPService = Mock(NetFloatingIPService) - Port port = Mock(Port) { getId() >> portId } - - when: - provider.associateFloatingIpToPort(region, floatingIpId, portId) - - then: - _ * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> netFloatingIPService - 1 * netFloatingIPService.associateToPort(floatingIpId, port.id) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "disassociate floating ip to port success"() { - setup: - String floatingIpId = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - NetFloatingIPService netFloatingIPService = Mock(NetFloatingIPService) - NetFloatingIP expected = Mock(NetFloatingIP) - - when: - NetFloatingIP result = provider.disassociateFloatingIpFromPort(region, floatingIpId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> netFloatingIPService - 1 * netFloatingIPService.disassociateFromPort(floatingIpId) >> expected - - and: - result == expected - noExceptionThrown() - } - - def "disassociate floating ip to port exception"() { - setup: - String floatingIpId = UUID.randomUUID().toString() - - and: - NetworkingService networkingService = Mock(NetworkingService) - NetFloatingIPService netFloatingIPService = Mock(NetFloatingIPService) - - when: - provider.disassociateFloatingIpFromPort(region, floatingIpId) - - then: - 1 * mockClient.networking() >> networkingService - 1 * networkingService.floatingip() >> netFloatingIPService - 1 * netFloatingIPService.disassociateFromPort(floatingIpId) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationV1ClientProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationV1ClientProviderSpec.groovy deleted file mode 100644 index aea3054232d..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackOrchestrationV1ClientProviderSpec.groovy +++ /dev/null @@ -1,461 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import org.openstack4j.api.Builders -import org.openstack4j.api.exceptions.ServerResponseException -import org.openstack4j.api.heat.HeatService -import org.openstack4j.api.heat.ResourcesService -import org.openstack4j.api.heat.StackService -import org.openstack4j.api.heat.TemplateService -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.heat.Resource -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.heat.StackCreate -import org.openstack4j.model.heat.StackUpdate -import org.openstack4j.openstack.heat.domain.HeatStack -import org.springframework.http.HttpStatus - -class OpenstackOrchestrationV1ClientProviderSpec extends OpenstackClientProviderSpec { - - def "deploy heat stack succeeds"() { - setup: - Stack stack = Mock(Stack) - HeatService heat = Mock(HeatService) - StackService stackApi = Mock(StackService) - mockClient.heat() >> heat - heat.stacks() >> stackApi - String stackName = "mystack" - String tmpl = "foo: bar" - Map subtmpl = [sub: "foo: bar"] - String region = 'region' - boolean disableRollback = false - int timeoutMins = 5 - String instanceType = 'm1.small' - String image = 'ubuntu-latest' - int maxSize = 5 - int minSize = 3 - int desiredSize = 4 - String subnetId = '9999' - String networkId = '1234' - List loadBalancerIds = ['5678'] - List securityGroups = ['sg1'] - ServerGroupParameters parameters = new ServerGroupParameters(instanceType: instanceType, image: image, - maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, - subnetId: subnetId, networkId: networkId, loadBalancers: loadBalancerIds, securityGroups: securityGroups, - autoscalingType: ServerGroupParameters.AutoscalingType.CPU, - scaleup: new ServerGroupParameters.Scaler(cooldown: 60, period: 60, adjustment: 1, threshold: 50), - scaledown: new ServerGroupParameters.Scaler(cooldown: 60, period: 600, adjustment: -1, threshold: 15), - rawUserData: 'echo foobar', - tags: ['foo': 'bar'], - sourceUserDataType: 'Text', - sourceUserData: 'echo foobar', - zones: ["az1","az2"], - schedulerHints: ["key": "value"] - ) - Map params = [ - flavor : parameters.instanceType, - image : parameters.image, - max_size : "$parameters.maxSize".toString(), - min_size : "$parameters.minSize".toString(), - desired_size : "$parameters.desiredSize".toString(), - network_id : parameters.networkId, - subnet_id : "$parameters.subnetId".toString(), - load_balancers : parameters.loadBalancers.join(','), - security_groups : parameters.securityGroups.join(','), - autoscaling_type : 'cpu_util', - scaleup_cooldown : 60, - scaleup_adjustment : 1, - scaleup_period : 60, - scaleup_threshold : 50, - scaledown_cooldown : 60, - scaledown_adjustment : -1, - scaledown_period : 600, - scaledown_threshold : 15, - source_user_data_type: 'Text', - source_user_data : 'echo foobar', - tags : '{"foo":"bar"}', - user_data : parameters.rawUserData, - zones : 'az1,az2', - scheduler_hints : '{"key":"value"}' - ] - List tags = loadBalancerIds.collect { "lb-${it}" } - StackCreate stackCreate = Builders.stack().disableRollback(disableRollback).files(subtmpl).name(stackName).parameters(params).template(tmpl).timeoutMins(timeoutMins).tags(tags.join(',')).build() - - when: - provider.deploy(region, stackName, tmpl, subtmpl, parameters, disableRollback, timeoutMins, tags) - - then: - 1 * stackApi.create(_ as StackCreate) >> { StackCreate sc -> - assert sc.disableRollback == stackCreate.disableRollback - assert sc.name == stackCreate.name - assert sc.parameters.toString() == stackCreate.parameters.toString() - assert sc.template == stackCreate.template - stack - } - noExceptionThrown() - } - - def "deploy heat stack fails - exception"() { - setup: - HeatService heat = Mock(HeatService) - StackService stackApi = Mock(StackService) - mockClient.heat() >> heat - heat.stacks() >> stackApi - String stackName = "mystack" - String tmpl = "foo: bar" - Map subtmpl = [sub: "foo: bar"] - String region = 'region' - boolean disableRollback = false - int timeoutMins = 5 - String instanceType = 'm1.small' - String image = 'ubuntu-latest' - int maxSize = 5 - int minSize = 3 - String networkId = '1234' - List loadBalancerIds = ['5678'] - List securityGroups = ['sg1'] - List tags = loadBalancerIds.collect { "lb-${it}" } - ServerGroupParameters parameters = new ServerGroupParameters(instanceType: instanceType, image: image, maxSize: maxSize, minSize: minSize, networkId: networkId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - - when: - provider.deploy(region, stackName, tmpl, subtmpl, parameters, disableRollback, timeoutMins, tags) - - then: - 1 * stackApi.create(_ as StackCreate) >> { throw new Exception('foobar') } - Exception e = thrown(OpenstackProviderException) - e.message == 'Unable to process request' - e.cause.message == 'foobar' - } - - def "get instance ids for stack succeeds"() { - setup: - HeatService heat = Mock() - ResourcesService resourcesService = Mock() - String id1 = UUID.randomUUID().toString() - String id2 = UUID.randomUUID().toString() - Resource r1 = Stub() { - getPhysicalResourceId() >> id1 - getType() >> "OS::Nova::Server" - } - Resource r2 = Stub() { - getPhysicalResourceId() >> id2 - getType() >> "not:a:server" - } - List resources = [r1, r2] - - when: - List result = provider.getInstanceIdsForStack(region, "mystack") - - then: - 1 * mockClient.heat() >> heat - 1 * heat.resources() >> resourcesService - 1 * resourcesService.list("mystack", 10) >> resources - result == [id1] - noExceptionThrown() - } - - def "get instance ids for stack throws exception"() { - setup: - HeatService heat = Mock() - ResourcesService resourcesService = Mock() - Throwable throwable = new ServerResponseException('foo', HttpStatus.INTERNAL_SERVER_ERROR.value()) - - when: - provider.getInstanceIdsForStack(region, "mystack") - - then: - 1 * mockClient.heat() >> heat - 1 * heat.resources() >> resourcesService - 1 * resourcesService.list("mystack", 10) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException.cause == throwable - } - - def "get heat template succeeds"() { - setup: - HeatService heat = Mock() - TemplateService templateApi = Mock() - mockClient.useRegion(_ as String).heat() >> heat - heat.templates() >> templateApi - - when: - provider.getHeatTemplate("myregion", "mystack", "mystackid") - - then: - 1 * mockClient.useRegion("myregion") >> mockClient - 1 * mockClient.heat() >> heat - 1 * templateApi.getTemplateAsString("mystack", "mystackid") - noExceptionThrown() - } - - def "get stack succeeds"() { - setup: - String stackName = 'stackofpancakesyumyum' - HeatService heatService = Mock(HeatService) - StackService stackService = Mock(StackService) - mockClient.heat() >> heatService - heatService.stacks() >> stackService - Stack mockStack = Mock(Stack) - - when: - Stack actual = provider.getStack('region1', stackName) - - then: - 1 * stackService.getStackByName(stackName) >> mockStack - actual == mockStack - } - - def "get stack does not find stack"() { - given: - HeatService heat = Mock() - StackService stackApi = Mock() - mockClient.heat() >> heat - heat.stacks() >> stackApi - def name = 'mystack' - - when: - Stack actual = provider.getStack(region, name) - - then: - 1 * stackApi.getStackByName(name) >> null - !actual - noExceptionThrown() - } - - def "get stack fails - exception"() { - setup: - String stackName = 'stackofpancakesyumyum' - HeatService heatService = Mock(HeatService) - StackService stackService = Mock(StackService) - mockClient.heat() >> heatService - heatService.stacks() >> stackService - - when: - provider.getStack('region1', stackName) - - then: - 1 * stackService.getStackByName(stackName) >> { throw new Exception('foo') } - Exception e = thrown(OpenstackProviderException) - e.cause.message == 'foo' - } - - def "delete stack succeeds"() { - setup: - def success = ActionResponse.actionSuccess() - String stackName = 'stackofpancakesyumyum' - String stackId = UUID.randomUUID().toString() - HeatService heatService = Mock(HeatService) - StackService stackService = Mock(StackService) - mockClient.heat() >> heatService - heatService.stacks() >> stackService - Stack mockStack = Mock(Stack) - mockStack.name >> stackName - mockStack.id >> stackId - - when: - provider.destroy('region1', mockStack) - - then: - 1 * stackService.delete(stackName, stackId) >> success - } - - def "delete stack fails - exception"() { - setup: - String stackName = 'stackofpancakesyumyum' - String stackId = UUID.randomUUID().toString() - HeatService heatService = Mock(HeatService) - StackService stackService = Mock(StackService) - mockClient.heat() >> heatService - heatService.stacks() >> stackService - Stack mockStack = Mock(Stack) - mockStack.name >> stackName - mockStack.id >> stackId - - when: - provider.destroy('region1', mockStack) - - then: - 1 * stackService.delete(stackName, stackId) >> { throw new Exception('foo') } - Exception e = thrown(OpenstackProviderException) - e.cause.message == 'foo' - } - - def "delete stack fails - failed status"() { - setup: - ActionResponse failed = ActionResponse.actionFailed('foo', 400) - String stackName = 'stackofpancakesyumyum' - String stackId = UUID.randomUUID().toString() - HeatService heatService = Mock(HeatService) - StackService stackService = Mock(StackService) - mockClient.heat() >> heatService - heatService.stacks() >> stackService - Stack mockStack = Mock(Stack) - mockStack.name >> stackName - mockStack.id >> stackId - - when: - provider.destroy('region1', mockStack) - - then: - 1 * stackService.delete(stackName, stackId) >> failed - Exception e = thrown(OpenstackProviderException) - e.message == "Action request failed with fault foo and code 400" - } - - def "resize stack succeeds"() { - setup: - ActionResponse success = ActionResponse.actionSuccess() - String stackName = 'stackofpancakesyumyum' - String stackId = UUID.randomUUID().toString() - HeatService heatService = Mock(HeatService) - StackService stackService = Mock(StackService) - mockClient.heat() >> heatService - heatService.stacks() >> stackService - Stack mockStack = Mock(Stack) - mockStack.name >> stackName - mockStack.id >> stackId - String region = 'r1' - String instanceType = 'm1.small' - String image = 'ubuntu-latest' - int maxSize = 5 - int minSize = 3 - int desiredSize = 4 - String networkId = '1234' - String subnetId = '9999' - List loadBalancerIds = ['5678'] - List securityGroups = ['sg1'] - String resourceFileName = 'servergroup_resource' - ServerGroupParameters parameters = new ServerGroupParameters(instanceType: instanceType, image: image, - maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, networkId: networkId, subnetId: subnetId, - loadBalancers: loadBalancerIds, securityGroups: securityGroups, rawUserData: 'echo foobar', tags: ['foo': 'bar'], - sourceUserDataType: 'Text', sourceUserData: 'echo foobar', zones: ["az1","az2"], schedulerHints: ["key": "value"]) - Map params = [ - flavor : parameters.instanceType, - image : parameters.image, - max_size : "$parameters.maxSize".toString(), - min_size : "$parameters.minSize".toString(), - desired_size : "$parameters.desiredSize".toString(), - network_id : parameters.networkId, - subnet_id : parameters.subnetId, - load_balancers : parameters.loadBalancers.join(','), - security_groups : parameters.securityGroups.join(','), - autoscaling_type : null, - scaleup_cooldown : null, - scaleup_adjustment : null, - scaleup_period : null, - scaleup_threshold : null, - scaledown_cooldown : null, - scaledown_adjustment : null, - scaledown_period : null, - scaledown_threshold : null, - source_user_data_type: 'Text', - source_user_data : 'echo foobar', - tags : '{"foo":"bar"}', - user_data : parameters.rawUserData, - zones : 'az1,az2', - scheduler_hints : '{"key":"value"}' - ] - String template = "foo: bar" - Map subtmpl = [sub: "foo: bar"] - List tags = loadBalancerIds.collect { "lb-${it}" } - - when: - provider.updateStack(region, stackName, stackId, template, subtmpl, parameters, tags) - - then: - 1 * stackService.update(stackName, stackId, _ as StackUpdate) >> { String name, String id, StackUpdate su -> - assert name == stackName - assert id == stackId - assert su.parameters.toString() == params.toString() - success - } - noExceptionThrown() - } - - def "resize stack failed - failed status"() { - setup: - ActionResponse failed = ActionResponse.actionFailed('ERROR', 500) - String stackName = 'stackofpancakesyumyum' - String stackId = UUID.randomUUID().toString() - HeatService heatService = Mock(HeatService) - StackService stackService = Mock(StackService) - mockClient.heat() >> heatService - heatService.stacks() >> stackService - Stack mockStack = Mock(Stack) - mockStack.name >> stackName - mockStack.id >> stackId - String region = 'r1' - String instanceType = 'm1.small' - String image = 'ubuntu-latest' - int maxSize = 5 - int minSize = 3 - String networkId = '1234' - List loadBalancerIds = ['5678'] - List securityGroups = ['sg1'] - ServerGroupParameters parameters = new ServerGroupParameters(instanceType: instanceType, image: image, maxSize: maxSize, minSize: minSize, networkId: networkId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - String template = "foo: bar" - Map subtmpl = [sub: "foo: bar"] - List tags = loadBalancerIds.collect { "lb-${it}" } - - when: - provider.updateStack(region, stackName, stackId, template, subtmpl, parameters, tags) - - then: - 1 * stackService.update(stackName, stackId, _ as StackUpdate) >> { String name, String id, StackUpdate su -> - failed - } - thrown(OpenstackProviderException) - } - - def "resize stack failed - exception thrown"() { - setup: - String stackName = 'stackofpancakesyumyum' - String stackId = UUID.randomUUID().toString() - HeatService heatService = Mock(HeatService) - StackService stackService = Mock(StackService) - mockClient.heat() >> heatService - heatService.stacks() >> stackService - Stack mockStack = Mock(Stack) - mockStack.name >> stackName - mockStack.id >> stackId - String region = 'r1' - String instanceType = 'm1.small' - String image = 'ubuntu-latest' - int maxSize = 5 - int minSize = 3 - String networkId = '1234' - List loadBalancerIds = ['5678'] - List securityGroups = ['sg1'] - ServerGroupParameters parameters = new ServerGroupParameters(instanceType: instanceType, image: image, maxSize: maxSize, minSize: minSize, networkId: networkId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - String template = "foo: bar" - Map subtmpl = [sub: "foo: bar"] - List tags = loadBalancerIds.collect { "lb-${it}" } - - when: - provider.updateStack(region, stackName, stackId, template, subtmpl, parameters, tags) - - then: - 1 * stackService.update(stackName, stackId, _ as StackUpdate) >> { throw new Exception('foo') } - thrown(OpenstackProviderException) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackRequestHandlerSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackRequestHandlerSpec.groovy deleted file mode 100644 index 20391c191bb..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackRequestHandlerSpec.groovy +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import org.openstack4j.model.common.ActionResponse -import spock.lang.Specification - -/** - * - */ -class OpenstackRequestHandlerSpec extends Specification { - - OpenstackRequestHandler provider = new Provider() - - def "handle request succeeds"() { - setup: - def success = ActionResponse.actionSuccess() - - when: - def response = provider.handleRequest { success } - - then: - success == response - noExceptionThrown() - } - - def "handle request fails with failed action request"() { - setup: - def failed = ActionResponse.actionFailed("foo", 500) - - when: - provider.handleRequest { failed } - - then: - Exception ex = thrown(OpenstackProviderException) - ex.message.contains("foo") - ex.message.contains("500") - } - - def "handle request fails with closure throwing exception"() { - setup: - def exception = new Exception("foo") - - when: - provider.handleRequest { throw exception } - - then: - Exception ex = thrown(OpenstackProviderException) - ex.cause == exception - ex.cause.message.contains("foo") - } - - def "handle request non-action response"() { - setup: - def object = new Object() - - when: - def response = provider.handleRequest { object } - - then: - object == response - noExceptionThrown() - } - - def "handle request null response"() { - when: - def response = provider.handleRequest { null } - - then: - response == null - noExceptionThrown() - } - - static class Provider implements OpenstackRequestHandler {} - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftV1ProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftV1ProviderSpec.groovy deleted file mode 100644 index b406c712ff5..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/client/OpenstackSwiftV1ProviderSpec.groovy +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.client - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import org.openstack4j.api.storage.ObjectStorageObjectService -import org.openstack4j.api.storage.ObjectStorageService -import org.openstack4j.core.transport.HttpResponse -import org.openstack4j.model.common.DLPayload -import org.springframework.http.HttpStatus -import spock.lang.Unroll - -class OpenstackSwiftV1ProviderSpec extends OpenstackClientProviderSpec { - - def swift - def objectsService - def payload - def response - def container = 'test-container' - def name = 'some/path/to/object' - def userData = '#!/bin/sh...some user data script' - - def setup() { - swift = Mock(ObjectStorageService) - mockClient.objectStorage() >> swift - objectsService = Mock(ObjectStorageObjectService) - swift.objects() >> objectsService - payload = Mock(DLPayload) - response = Mock(HttpResponse) - } - - def "reads object from swift"() { - when: - def object = provider.readSwiftObject(region, container, name) - - then: - object == userData - 1 * objectsService.download(container, name) >> payload - 1 * payload.getHttpResponse() >> response - 1 * response.getStatus() >> 200 - 1 * response.getEntity(String) >> userData - } - - def "did not get a payload for swift object"() { - when: - provider.readSwiftObject(region, container, name) - - then: - thrown(OpenstackResourceNotFoundException) - 1 * objectsService.download(container, name) >> null - } - - @Unroll - def "bad status from response #status"() { - when: - provider.readSwiftObject(region, container, name) - - then: - thrown(OpenstackProviderException) - 1 * objectsService.download(container, name) >> payload - 1 * payload.getHttpResponse() >> response - 2 * response.getStatus() >> status - - where: - status << [100, 201, 204, 302, 400, 418, 500] - } - - def "unable to get entity from response"() { - when: - String actual = provider.readSwiftObject(region, container, name) - - then: - !actual - noExceptionThrown() - 1 * objectsService.download(container, name) >> payload - 1 * payload.getHttpResponse() >> response - 1 * response.getStatus() >> 200 - 1 * response.getEntity(String) >> null - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/controllers/OpenstackImageLookupControllerSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/controllers/OpenstackImageLookupControllerSpec.groovy deleted file mode 100644 index 667385c4a1f..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/controllers/OpenstackImageLookupControllerSpec.groovy +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Copyright 2016 Target Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.controllers - -import com.google.common.collect.Sets -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage -import com.netflix.spinnaker.clouddriver.openstack.provider.ImageProvider -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Specification -import spock.lang.Unroll - -import java.lang.Void as Should -import java.util.regex.Pattern - -class OpenstackImageLookupControllerSpec extends Specification { - OpenstackImageLookupController controller - ImageProvider imageProvider - - def setup() { - imageProvider = Mock(ImageProvider) - controller = new OpenstackImageLookupController(imageProvider) - } - - Should 'search for all images'() { - given: - String account = null - String query = null - String region = null - - OpenstackImage imageA = Mock(OpenstackImage) - OpenstackImage imageB = Mock(OpenstackImage) - Set imageSetA = Sets.newHashSet(imageA) - Set imageSetB = Sets.newHashSet(imageB) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - 3 * imageA.name >> 'image' - 3 * imageB.name >> 'image' - result == Sets.union(imageSetA, imageSetB) - noExceptionThrown() - } - - Should 'search for all images - no images'() { - given: - String account = null - String query = null - String region = null - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> [:] - result == [] as Set - noExceptionThrown() - } - - Should 'search for images by account only'() { - given: - String account = 'test' - String query = null - String region = null - - OpenstackImage image = Mock(OpenstackImage) - Set imageSet = Sets.newHashSet(image) - Map> imagesByAccounts = [(account): imageSet] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - 2 * image.name >> 'image' - result == imageSet - noExceptionThrown() - } - - Should 'search for images by account only - not found'() { - given: - String account = 'test' - String query = null - String region = null - - OpenstackImage image = Mock(OpenstackImage) - Set imageSet = Sets.newHashSet(image) - Map> imagesByAccounts = ['stage': imageSet] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - 0 * image.name >> 'image' - result == [] as Set - noExceptionThrown() - } - - Should 'search for images query only'() { - given: - String account = null - String query = 'im' - String region = null - - OpenstackImage imageA = Mock(OpenstackImage) { getName() >> 'imageA' } - OpenstackImage imageB = Mock(OpenstackImage) { getName() >> 'mock' } - Set imageSetA = Sets.newHashSet(imageA) - Set imageSetB = Sets.newHashSet(imageB) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - result == imageSetA - noExceptionThrown() - } - - Should 'search for images query only - not found'() { - given: - String account = null - String query = 'tes' - String region = null - - OpenstackImage imageA = Mock(OpenstackImage) { getName() >> 'imageA' } - OpenstackImage imageB = Mock(OpenstackImage) { getName() >> 'mock' } - Set imageSetA = Sets.newHashSet(imageA) - Set imageSetB = Sets.newHashSet(imageB) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - result == [] as Set - noExceptionThrown() - } - - Should 'search for images account and query'() { - given: - String account = 'accountA' - String query = 'im' - String region = null - - OpenstackImage imageA = Mock(OpenstackImage) { getName() >> 'imageA' } - OpenstackImage imageB = Mock(OpenstackImage) { getName() >> 'mock' } - Set imageSetA = Sets.newHashSet(imageA, imageB) - Set imageSetB = Sets.newHashSet(imageB) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - result == Sets.newHashSet(imageA) - noExceptionThrown() - } - - Should 'search for images account and query - not found'() { - given: - String account = 'accountA' - String query = 'tes' - String region = null - - OpenstackImage imageA = Mock(OpenstackImage) { getName() >> 'imageA' } - OpenstackImage imageB = Mock(OpenstackImage) { getName() >> 'mock' } - Set imageSetA = Sets.newHashSet(imageA, imageB) - Set imageSetB = Sets.newHashSet(imageB) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - result == [] as Set - noExceptionThrown() - } - - Should 'search for images throw exception'() { - given: - String account = 'accountA' - String query = 'tes' - String region = null - - Throwable throwable = new JedisException('exception') - - when: - controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> { throw throwable } - Throwable thrownException = thrown(JedisException) - thrownException == throwable - } - - Should 'search for images account, query, and region'() { - given: - String account = 'accountA' - String query = 'im' - String region = 'east' - - OpenstackImage imageA = Mock(OpenstackImage) { - getName() >> 'imageA' - getRegion() >> 'east' - } - OpenstackImage imageB = Mock(OpenstackImage) { - getName() >> 'mock' - getRegion() >> 'central' - } - OpenstackImage imageC = Mock(OpenstackImage) { - getName() >> 'imageC' - getRegion() >> 'east' - } - - Set imageSetA = Sets.newHashSet(imageA, imageB) - Set imageSetB = Sets.newHashSet(imageC) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - result == Sets.newHashSet(imageA) - noExceptionThrown() - } - - Should 'search for images account, query, and region - not found'() { - given: - String account = 'accountA' - String query = 'im' - String region = 'east' - - OpenstackImage imageA = Mock(OpenstackImage) { - getName() >> 'imageA' - getRegion() >> 'central' - } - OpenstackImage imageB = Mock(OpenstackImage) { - getName() >> 'mock' - getRegion() >> 'east' - } - OpenstackImage imageC = Mock(OpenstackImage) { - getName() >> 'imageC' - getRegion() >> 'east' - } - Set imageSetA = Sets.newHashSet(imageA, imageB) - Set imageSetB = Sets.newHashSet(imageC) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - result == [] as Set - noExceptionThrown() - } - - Should 'search for images query and region'() { - given: - String account = null - String query = 'im' - String region = 'east' - - OpenstackImage imageA = Mock(OpenstackImage) { - getName() >> 'imageA' - getRegion() >> 'east' - } - OpenstackImage imageB = Mock(OpenstackImage) { - getName() >> 'imageB' - getRegion() >> 'central' - } - Set imageSetA = Sets.newHashSet(imageA) - Set imageSetB = Sets.newHashSet(imageB) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - result == imageSetA - noExceptionThrown() - } - - Should 'search for images query and region - not found'() { - given: - String account = null - String query = 'im' - String region = 'east' - - OpenstackImage imageA = Mock(OpenstackImage) { - getName() >> 'imageA' - getRegion() >> 'central' - } - OpenstackImage imageB = Mock(OpenstackImage) { - getName() >> 'mock' - getRegion() >> 'central' - } - Set imageSetA = Sets.newHashSet(imageA) - Set imageSetB = Sets.newHashSet(imageB) - Map> imagesByAccounts = [accountA: imageSetA, accountB: imageSetB] - - when: - Set result = controller.find(account, query, region) - - then: - 1 * imageProvider.listImagesByAccount() >> imagesByAccounts - result == [] as Set - noExceptionThrown() - } - - @Unroll - Should 'resolve to pattern - #testCase'() { - when: - Pattern result = controller.resolveQueryToPattern(query) - - then: - result.pattern() == expected - - where: - testCase | query | expected - 'default' | null | '.*' - 'normal' | 'ubuntu' | '.*\\Qubuntu\\E.*' - 'wildcard 1' | 'ub*' | '\\Qub\\E.*' - 'wildcard 2' | '*test' | '.*\\Qtest\\E' - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/OpenstackAtomicOperationConverterHelperSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/OpenstackAtomicOperationConverterHelperSpec.groovy deleted file mode 100644 index 145c2cb1ea6..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/OpenstackAtomicOperationConverterHelperSpec.groovy +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import spock.lang.Shared -import spock.lang.Specification - -class OpenstackAtomicOperationConverterHelperSpec extends Specification { - - def "handles the account name"() { - given: - def openstackCredentials = Mock(OpenstackCredentials) - def creds = Mock(OpenstackNamedAccountCredentials) { - 1 * getCredentials() >> openstackCredentials - } - def credentialsSupport = Mock(AbstractAtomicOperationsCredentialsSupport) { - 1 * getCredentialsObject('os-account') >> creds - 1 * getObjectMapper() >> new ObjectMapper() - } - - when: - def description = OpenstackAtomicOperationConverterHelper.convertDescription(input, credentialsSupport, OpenstackAtomicOperationDescription) - - then: - description - description.account == 'os-account' - description.region == 'west' - description.credentials == openstackCredentials - - where: - input << [ - [account: 'os-account', region: 'west'], - [credentials: 'os-account', region: 'west'], - [account: 'os-account', credentials: 'something else', region: 'west'] - ] - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DeployOpenstackAtomicOperationConverterSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DeployOpenstackAtomicOperationConverterSpec.groovy deleted file mode 100644 index 417f3255c12..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/converters/servergroup/DeployOpenstackAtomicOperationConverterSpec.groovy +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.converters.servergroup - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.DeployOpenstackAtomicOperation -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import spock.lang.Shared -import spock.lang.Specification - -class DeployOpenstackAtomicOperationConverterSpec extends Specification { - private static final String ACCOUNT_NAME = 'myaccount' - private static final APPLICATION = "app" - private static final STACK = "stack" - private static final REGION = "region" - private static final DETAILS = "details" - private static final String HEAT_TEMPLATE = '{"heat_template_version":"2013-05-23",' + - '"description":"Simple template to test heat commands",' + - '"parameters":{"flavor":{' + - '"default":"1 GB General Purpose v1","type":"string"}},' + - '"resources":{"hello_world":{"type":"OS::Nova::Server",' + - '"properties":{"key_name":"heat_key","flavor":{"get_param":"flavor"},' + - '"image":"Ubuntu 12.04 LTS (Precise Pangolin) (PV)",' + - '"user_data":"#!/bin/bash -xv\\necho \\"hello world\\" > /root/hello-world.txt\\n"}}}}' - private static final Integer TIMEOUT_MINS = 5 - private static final Map PARAMS_MAP = Collections.emptyMap() - private static final Boolean DISABLE_ROLLBACK = false - - @Shared - ObjectMapper mapper = new ObjectMapper() - - @Shared - DeployOpenstackAtomicOperationConverter converter - - def mockCredentials - - def setupSpec() { - converter = new DeployOpenstackAtomicOperationConverter(objectMapper: mapper) - } - - def setup() { - mockCredentials = Mock(OpenstackNamedAccountCredentials) - converter.accountCredentialsProvider = Mock(AccountCredentialsProvider) - } - - void "DeployOpenstackAtomicOperationConverter type returns DeployOpenstackAtomicOperation and DeployOpenstackAtomicOperationDescription"() { - setup: - def input = [stack: STACK, - application: APPLICATION, - freeFormDetails: DETAILS, - region: REGION, - heatTemplate: HEAT_TEMPLATE, - parameters: PARAMS_MAP, - disableRollback: DISABLE_ROLLBACK, - timeoutMins: TIMEOUT_MINS, - account: ACCOUNT_NAME] - when: - def description = converter.convertDescription(input) - - then: - 1 * converter.accountCredentialsProvider.getCredentials(_) >> mockCredentials - description instanceof DeployOpenstackAtomicOperationDescription - - when: - def operation = converter.convertOperation(input) - - then: - 1 * converter.accountCredentialsProvider.getCredentials(_) >> mockCredentials - operation instanceof DeployOpenstackAtomicOperation - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ServerGroupParametersSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ServerGroupParametersSpec.groovy deleted file mode 100644 index f6ad31ac399..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/description/servergroup/ServerGroupParametersSpec.groovy +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.ServerGroupConstants -import spock.lang.Ignore -import spock.lang.Specification -import spock.lang.Unroll - -class ServerGroupParametersSpec extends Specification { - - @Unroll - def "resolves resource filename: #description"() { - expect: - ServerGroupParameters.resolveResourceFilename(params) == expected - - where: - description | params | expected - 'from stack params if present' | ['resource_filename': 'valueinparam'] | 'valueinparam' - 'from constants if not in stack params' | [:] | ServerGroupConstants.SUBTEMPLATE_FILE - } - - def "converts params object to map"() { - given: - ServerGroupParameters params = createServerGroupParams() - Map expected = getMap() - - when: - Map result = params.toParamsMap() - - then: - //need to compare string values due to some map formatting issue - result.sort{ a, b -> a.key <=> b.key }.toString() == expected.sort{ a, b -> a.key <=> b.key }.toString() - } - - @Unroll - def "toParamsMap - output excludes #mapKey when null in the input"() { - given: - ServerGroupParameters params = createServerGroupParams() - params[serverGroupProperty] = null - - when: - Map result = params.toParamsMap() - - then: - //need to compare string values due to some map formatting issue - ! result.containsKey(mapKey) - - where: - description | mapKey | serverGroupProperty - "no zones present" | "zones" | "zones" - "no scheduler hints present" | "scheduler_hints" | "schedulerHints" - "no resource filename present" | "resource_filename" | "resourceFilename" - } - - def "converts map to params object"() { - given: - ServerGroupParameters expected = createServerGroupParams() - Map params = getMap() - - when: - def result = ServerGroupParameters.fromParamsMap(params) - - then: - result == expected - } - - @Unroll - def "converts unicode list to list: #input"() { - when: - List result = ServerGroupParameters.unescapePythonUnicodeJsonList(input) - - then: - result == expected - - where: - input | expected - 'test' | ['test'] - 'test, test2' | ['test', 'test2'] - '[test]' | ['test'] - '[u\'test\']' | ['test'] - 'u\'test\'' | ['test'] - '[u\'test\',u\'test\',u\'test\']' | ['test', 'test', 'test'] - "[u\'\',u'']" | ["", ""] -// Is this really how it should behave? - null | [] - "[]" | [""] -// Shouldn't these work, too? -// '["test"]' | ["test"] -// '["test", "test"]' | ["test", "test"] - } - - @Unroll - def "converts #inType to map: #description"() { - // Older versions of OpenStack return python-encoded dictionaries for several fields. Newer ones have converted them - // to JSON. For forward and backward compatibility, we need to handle either. - - when: - Map result = ServerGroupParameters.unescapePythonUnicodeJsonMap(input) - - then: - result == expected - - where: - inType | description | input | expected - 'python' | 'one entry' | '{u\'test\':u\'test\'}' | ['test': 'test'] - 'python' | 'multiple entries' | '{u\'test\':u\'test\',u\'a\':u\'a\'}' | ['test': 'test', 'a': 'a'] - 'python' | 'spaces in value' | '{u\'test\': u\'this is a string\'}' | ['test': "this is a string"] - 'python' | 'comma in value' | '{u\'test\':u\'test1,test2\'}' | ['test': 'test1,test2'] - 'python' | 'colon in value' | '{u\'url\':u\'http://localhost:8080\'}' | ['url': 'http://localhost:8080'] - 'python' | 'value is Empty' | '{u\'test\':u\'\'}' | ['test': ''] - 'python' | 'value is None' | '{u\'test\': None}' | ['test': null] - 'python' | 'multiple None values' | "{u'test': \t None \t \n, u'test2': None\n}" | ['test': null, 'test2': null] - 'python' | 'string contains "None"' | '{u\'test\': u\'And None Either\'}' | ['test': "And None Either"] - 'python' | 'integer value' | '{u\'port\': 1337}' | ['port': 1337] - 'python' | 'single quotes in value' | '{u\'test\': u"\'this is a string\'"}' | ['test': "'this is a string'"] - 'python' | '1 single quote in value' | '{u\'test\': u"Surf\'s up!"}' | ['test': "Surf\'s up!"] - 'python' | 'string ends in "u"' | '{u\'SuperValu\':u\'test\',u\'a\':u\'a\'}' | ['SuperValu': 'test', 'a': 'a'] - 'python' | 'json object in value' | '{u\'health\':{u\'http\':u\'http://lh:80\',u\'a\':u\'b\'}}' | ['health': '{"http":"http://lh:80","a":"b"}'] - 'python' | 'layers of objects' | '{u\'a\': {u\'b\': {u\'c\': u\'d\', u\'e\': u\'f\'}}}' | ['a': '{"b":{"c":"d","e":"f"}}'] - 'json' | 'empty map' | '{}' | [:] - 'json' | 'one entry' | '{"test":"test"}' | ['test': 'test'] - 'json' | 'multiple entries' | '{"test":"test","a": "a"}' | ['test': 'test', 'a': 'a'] - 'json' | 'spaces in value' | '{"test": "this is a string"}' | ['test': "this is a string"] - 'json' | 'comma in value' | '{"test":"test1,test2"}' | ['test': 'test1,test2'] - 'json' | 'colon in value' | '{"url":"http://lh:80"}' | ['url': 'http://lh:80'] - 'json' | 'value is Empty' | '{"test": ""}' | ['test': ''] - 'json' | 'value is null' | '{"test": null}' | ['test': null] - 'json' | 'multiple null values' | '{"test": \t null \t, "test2":\tnull\n}' | ['test': null, 'test2': null] - 'json' | 'string contains "None"' | '{"test": "And None Either"}' | ['test': "And None Either"] - 'json' | 'integer value' | '{"port": 1337}' | ['port': 1337] - 'json' | 'single quotes in value' | '{"test": "\'this is a string\'"}' | ['test': "'this is a string'"] - 'json' | '1 single quote in value' | '{"test": "Surf\'s up!"}' | ['test': "Surf\'s up!"] - 'json' | 'string ends in "u"' | '{"SuperValu":"test","a":"a"}' | ['SuperValu': 'test', 'a': 'a'] - 'json' | 'json object in value' | '{"health":{"http":"http://lh:80", "a": "b"}}' | ['health': '{"http":"http://lh:80","a":"b"}'] - 'json' | 'layers of objects' | '{"a": {"b": {"c": "d", "e": "f"}}}' | ['a': '{"b":{"c":"d","e":"f"}}'] - } - - @Ignore - def createServerGroupParams() { - ServerGroupParameters.Scaler scaleup = new ServerGroupParameters.Scaler(cooldown: 60, period: 60, adjustment: 1, threshold: 50) - ServerGroupParameters.Scaler scaledown = new ServerGroupParameters.Scaler(cooldown: 60, period: 600, adjustment: -1, threshold: 15) - new ServerGroupParameters(instanceType: "m1.medium", - image: "image", - maxSize: 5, minSize: 3, desiredSize: 4, - networkId: "net", - subnetId: "sub", - loadBalancers: ["poop"], - securityGroups: ["sg1"], - autoscalingType: ServerGroupParameters.AutoscalingType.CPU, - scaleup: scaleup, - scaledown: scaledown, - rawUserData: "echo foobar", - tags: ["foo": "bar"], - sourceUserDataType: 'Text', - sourceUserData: 'echo foobar', - zones: ["az1", "az2"], - schedulerHints: ["key": "value"], - resourceFilename: "fileMcFileface") - } - - @Ignore - def getMap() { - [flavor: 'm1.medium', - image: 'image', - max_size: 5, - min_size: 3, - desired_size: 4, - network_id: 'net', - subnet_id: 'sub', - load_balancers: 'poop', - security_groups: 'sg1', - autoscaling_type: 'cpu_util', - scaleup_cooldown: 60, - scaleup_adjustment: 1, - scaleup_period: 60, - scaleup_threshold: 50, - scaledown_cooldown: 60, - scaledown_adjustment: -1, - scaledown_period: 600, - scaledown_threshold: 15, - source_user_data_type: 'Text', - source_user_data: 'echo foobar', - tags: '{"foo":"bar"}', - user_data: "echo foobar", - zones: "az1,az2", - scheduler_hints: '{"key":"value"}', - resource_filename: "fileMcFileface" - ] - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/OpenstackUserDataProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/OpenstackUserDataProviderSpec.groovy deleted file mode 100644 index f87510153e6..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/OpenstackUserDataProviderSpec.groovy +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops - -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import spock.lang.Specification -import spock.lang.Unroll - -@Unroll -class OpenstackUserDataProviderSpec extends Specification { - - def "combines common and custom user data"() { - given: - def credentials = new OpenstackNamedAccountCredentials('account', 'test', 'main', 'user', 'pw', 'project', 'domain', - 'endpoint', [], false, '', null, null, null, '/some/user/data/file/udf') - def provider = Spy(OpenstackUserDataProvider, constructorArgs: [credentials]) { - it.getFileContents(_) >> commonUserData - } - def serverGroupName = 'app-stack-detail-v001' - def region = 'west' - - when: - def userData = provider.getUserData(serverGroupName, region, customUserData) - - then: - userData == expectedUserData - noExceptionThrown() - - where: - customUserData | commonUserData || expectedUserData - null | null | '' - '' | null | '' - null | '' | '' - null | 'echo "common user data"' | 'echo "common user data"\n' - '' | 'echo "common user data"' | 'echo "common user data"\n' - 'echo "custom user data"' | null | 'echo "custom user data"' - 'echo "custom user data"' | '' | 'echo "custom user data"' - 'echo "custom user data"' | 'echo "common user data"' | 'echo "common user data"\necho "custom user data"' - '%%account%%' | '%%region%%' | 'west\n%%account%%' - } - - - def "handles unreadable user data file"() { - given: - def credentials = new OpenstackNamedAccountCredentials('account', 'test', 'main', 'user', 'pw', 'project', 'domain', - 'endpoint', [], false, '', null, null, null, userDataFile) - def provider = new OpenstackUserDataProvider(credentials) - def serverGroupName = 'app-stack-detail-v001' - def region = 'west' - - when: - def userData = provider.getUserData(serverGroupName, region, 'custom user data') - - then: - userData == 'custom user data' - noExceptionThrown() - - where: - userDataFile << [null, '', '/a/non/existant/file/udf'] - } - - def "ensure replace tokens works"() { - given: - def credentials = new OpenstackNamedAccountCredentials('my-account', 'test', 'main', 'user', 'pw', 'project', 'domain', - 'endpoint', [], false, '', null, null, null, '/user/data/file/udf') - def provider = Spy(OpenstackUserDataProvider, constructorArgs: [credentials]) { - it.getFileContents(_) >> rawUserData - } - def serverGroupName = 'myapp-dev-green-v001' - def region = 'west' - - when: - def userData = provider.getUserData(serverGroupName, region, '') - - then: - userData == expectedUserData - - where: - rawUserData | expectedUserData - '' | '' - null | '' - '%%account%%' | 'my-account\n' - '%account%' | '%account%\n' - '%%accounttype%%' | 'main\n' - '%%env%%' | 'test\n' - '%%region%%' | 'west\n' - '%%env%%\n%%region%%' | 'test\nwest\n' - '%%app%%' | 'myapp\n' - '%%stack%%' | 'dev\n' - '%%detail%%' | 'green\n' - '%%cluster%%' | 'myapp-dev-green\n' - '%%group%%' | 'myapp-dev-green-v001\n' - '%%autogrp%%' | 'myapp-dev-green-v001\n' - '%%launchconfig%%' | 'myapp-dev-green-v001\n' - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/AbstractEnableDisableInstancesInDiscoveryAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/AbstractEnableDisableInstancesInDiscoveryAtomicOperationSpec.groovy deleted file mode 100644 index 9042ba5cf1c..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/discovery/AbstractEnableDisableInstancesInDiscoveryAtomicOperationSpec.groovy +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.discovery - -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.consul.deploy.ops.EnableDisableConsulInstance -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import spock.lang.Specification -import spock.lang.Subject -import spock.lang.Unroll - -@Unroll -class AbstractEnableDisableInstancesInDiscoveryAtomicOperationSpec extends Specification { - - private static final String ACCOUNT_NAME = 'myaccount' - private static final INSTANCE_IDS = ['instance1', 'instance2', 'instance3'] - - def credentials - def description - String region = 'region1' - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - initDescription() - } - - def initDescription(boolean consulEnabled = true) { - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - ConsulConfig consulConfig = Mock(ConsulConfig) { - getEnabled() >> consulEnabled - applyDefaults() >> {} - } - OpenstackNamedAccountCredentials credz = new OpenstackNamedAccountCredentials("name", "test", "main", "user", "pw", "tenant", "domain", "endpoint", [], false, "", new OpenstackConfigurationProperties.LbaasConfig(pollTimeout: 60, pollInterval: 5), new OpenstackConfigurationProperties.StackConfig(pollTimeout: 600, pollInterval: 5), consulConfig, null) - OpenstackProviderFactory.createProvider(credz) >> { provider } - credentials = new OpenstackCredentials(credz) - description = new OpenstackInstancesDescription(credentials: credentials, region: region, instanceIds: INSTANCE_IDS, account: ACCOUNT_NAME) - } - - def "should perform #opClass"() { - given: - GroovyMock(EnableDisableConsulInstance, global: true) - @Subject def operation = opClass.newInstance(description) - - when: - operation.operate([]) - - then: - INSTANCE_IDS.each { - 1 * credentials.provider.getIpForInstance(region, it) >> '10.0.0.0' - 1 * EnableDisableConsulInstance.operate(_, _, _) >> {} - } - - where: - opClass << [EnableInstancesInDiscoveryOperation, DisableInstancesInDiscoveryOperation] - } - - def "should not perform with missing address #opClass"() { - given: - GroovyMock(EnableDisableConsulInstance, global: true) - @Subject def operation = opClass.newInstance(description) - - when: - operation.operate([]) - - then: - INSTANCE_IDS.each { - 1 * credentials.provider.getIpForInstance(region, it) >> '' - 0 * EnableDisableConsulInstance.operate(_, _, _) >> {} - } - - where: - opClass << [EnableInstancesInDiscoveryOperation, DisableInstancesInDiscoveryOperation] - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/AbstractRegistrationOpenstackInstancesAtomicOperationUnitSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/AbstractRegistrationOpenstackInstancesAtomicOperationUnitSpec.groovy deleted file mode 100644 index a0799f43f55..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/AbstractRegistrationOpenstackInstancesAtomicOperationUnitSpec.groovy +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesRegistrationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.network.ext.LbProvisioningStatus -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.openstack.networking.domain.ext.ListItem -import spock.lang.Specification -import spock.lang.Subject -import spock.lang.Unroll - -@Unroll -class AbstractRegistrationOpenstackInstancesAtomicOperationUnitSpec extends Specification { - - private static final String ACCOUNT_NAME = 'myaccount' - private static final INSTANCE_IDS = ['instance1','instance2','instance3'] - private static final LB_IDS = ['lb1','lb2','lb3'] - - def credentials - def description - LoadBalancerV2 loadBalancer - LoadBalancerV2 mockLB - Map listenerMap - List listeners = [new ListItem(id: UUID.randomUUID().toString()), new ListItem(id: UUID.randomUUID().toString())] - String ip = '1.2.3.4' - Integer port = 8100 - String subnetId = UUID.randomUUID().toString() - String memberId = UUID.randomUUID().toString() - String region = 'region1' - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials credz = new OpenstackNamedAccountCredentials("name", "test", "main", "user", "pw", "tenant", "domain", "endpoint", [], false, "", new OpenstackConfigurationProperties.LbaasConfig(pollTimeout: 60, pollInterval: 5), new OpenstackConfigurationProperties.StackConfig(pollTimeout: 600, pollInterval: 5), new ConsulConfig(), null) - OpenstackProviderFactory.createProvider(credz) >> { provider } - credentials = new OpenstackCredentials(credz) - description = new OpenstackInstancesRegistrationDescription(region: region, loadBalancerIds: LB_IDS, instanceIds: INSTANCE_IDS, weight: 1, account: ACCOUNT_NAME, credentials: credentials) - loadBalancer = Mock(LoadBalancerV2) { - it.listeners >> { listeners } - it.vipSubnetId >> { subnetId } - it.provisioningStatus >> { LbProvisioningStatus.ACTIVE} - } - mockLB = Mock(LoadBalancerV2) { - it.id >> { _ } - it.provisioningStatus >> { LbProvisioningStatus.ACTIVE } - } - listenerMap = (0..1).collectEntries { i -> - [(listeners[i].id):Mock(ListenerV2) { - it.defaultPoolId >> { 'poo' } - }] - } - } - - def "should perform #method"() { - given: - @Subject def operation = opClass.newInstance(description) - OpenstackClientProvider provider = credentials.provider - - when: - operation.operate([]) - - then: - LB_IDS.each { lbid -> - 1 * provider.getLoadBalancer(region, lbid) >> loadBalancer - INSTANCE_IDS.each { id -> - 1 * provider.getIpForInstance(region, id) >> ip - loadBalancer.listeners.each { listItem -> - 1 * provider.getListener(region, listItem.id) >> listenerMap[listItem.id] - if (method == 'registerInstancesWithLoadBalancer') { - 1 * provider.getInternalLoadBalancerPort(region, listItem.id) >> port - 1 * provider.addMemberToLoadBalancerPool(region, ip, listenerMap[listItem.id].defaultPoolId, subnetId, port, description.weight) - _ * provider.getLoadBalancer(region, lbid) >> mockLB - } else { - 1 * provider.getMemberIdForInstance(region, ip, listenerMap[listItem.id].defaultPoolId) >> memberId - 1 * provider.removeMemberFromLoadBalancerPool(region, listenerMap[listItem.id].defaultPoolId, memberId) - _ * provider.getLoadBalancer(region, lbid) >> mockLB - } - } - } - } - noExceptionThrown() - - where: - opClass << [RegisterOpenstackInstancesAtomicOperation, DeregisterOpenstackInstancesAtomicOperation] - method << ['registerInstancesWithLoadBalancer','deregisterInstancesFromLoadBalancer'] - } - - def "should throw exception when load balancer not found"() { - given: - @Subject def operation = opClass.newInstance(description) - OpenstackClientProvider provider = credentials.provider - - when: - operation.operate([]) - - then: - LB_IDS.each { lbid -> - provider.getLoadBalancer(region, lbid) >> { throw new OpenstackProviderException("foobar") } - } - Exception ex = thrown(OpenstackOperationException) - ex.cause.message == "foobar" - - where: - opClass << [RegisterOpenstackInstancesAtomicOperation, DeregisterOpenstackInstancesAtomicOperation] - } - - def "should throw exception when server has no IP"() { - given: - @Subject def operation = opClass.newInstance(description) - OpenstackClientProvider provider = credentials.provider - - when: - operation.operate([]) - - then: - LB_IDS.each { lbid -> - provider.getLoadBalancer(region, lbid) >> loadBalancer - INSTANCE_IDS.each { id -> - provider.getIpForInstance(region, id) >> { throw new OpenstackProviderException("foobar") } - } - } - Exception ex = thrown(OpenstackOperationException) - ex.cause.message == "foobar" - - where: - opClass << [RegisterOpenstackInstancesAtomicOperation, DeregisterOpenstackInstancesAtomicOperation] - } - - def "should throw exception when internal port is not found"() { - given: - @Subject def operation = opClass.newInstance(description) - OpenstackClientProvider provider = credentials.provider - - when: - operation.operate([]) - - then: - LB_IDS.each { lbid -> - provider.getLoadBalancer(region, lbid) >> loadBalancer - INSTANCE_IDS.each { id -> - provider.getIpForInstance(region, id) >> ip - loadBalancer.listeners.each { listItem -> - provider.getInternalLoadBalancerPort(region, listItem.id) >> { throw new OpenstackProviderException("foobar") } - } - } - } - Exception ex = thrown(OpenstackOperationException) - ex.cause.message == "foobar" - - where: - opClass << [RegisterOpenstackInstancesAtomicOperation] - } - - def "should throw exception when failing to add member"() { - given: - @Subject def operation = opClass.newInstance(description) - OpenstackClientProvider provider = credentials.provider - - when: - operation.operate([]) - - then: - LB_IDS.each { lbid -> - provider.getLoadBalancer(region, lbid) >> loadBalancer - INSTANCE_IDS.each { id -> - provider.getIpForInstance(region, id) >> ip - loadBalancer.listeners.each { listItem -> - provider.getListener(region, listItem.id) >> listenerMap[listItem.id] - provider.getInternalLoadBalancerPort(region, listItem.id) >> port - provider.addMemberToLoadBalancerPool(region, ip, listenerMap[listItem.id].defaultPoolId, subnetId, port, description.weight) >> { throw new OpenstackProviderException("foobar") } - } - } - } - Exception ex = thrown(OpenstackOperationException) - ex.cause.message == "foobar" - - where: - opClass << [RegisterOpenstackInstancesAtomicOperation] - } - - def "should throw exception when member id cannot be found for server instance"() { - given: - @Subject def operation = opClass.newInstance(description) - OpenstackClientProvider provider = credentials.provider - - when: - operation.operate([]) - - then: - LB_IDS.each { lbid -> - provider.getLoadBalancer(region, lbid) >> loadBalancer - INSTANCE_IDS.each { id -> - provider.getIpForInstance(region, id) >> ip - loadBalancer.listeners.each { listItem -> - provider.getListener(region, listItem.id) >> listenerMap[listItem.id] - provider.getMemberIdForInstance(region, ip, listenerMap[listItem.id].defaultPoolId) >> { - throw new OpenstackProviderException("foobar") - } - } - } - } - Exception ex = thrown(OpenstackOperationException) - ex.cause.message == "foobar" - - where: - opClass << [DeregisterOpenstackInstancesAtomicOperation] - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RebootOpenstackInstancesAtomicOperationUnitSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RebootOpenstackInstancesAtomicOperationUnitSpec.groovy deleted file mode 100644 index a9b10fb8cef..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/RebootOpenstackInstancesAtomicOperationUnitSpec.groovy +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import spock.lang.Specification -import spock.lang.Subject - -class RebootOpenstackInstancesAtomicOperationUnitSpec extends Specification { - - private static final String ACCOUNT_NAME = 'myaccount' - private static final INSTANCE_IDS = ['instance1', 'instance2', 'instance3'] - - def credentials - def description - - String region = 'r1' - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials credz = Mock(OpenstackNamedAccountCredentials) - OpenstackProviderFactory.createProvider(credz) >> { provider } - credentials = new OpenstackCredentials(credz) - description = new OpenstackInstancesDescription(instanceIds: INSTANCE_IDS, account: ACCOUNT_NAME, credentials: credentials, region: region) - } - - def "should reboot instances"() { - given: - @Subject def operation = new RebootOpenstackInstancesAtomicOperation(description) - - when: - operation.operate([]) - - then: - INSTANCE_IDS.each { - 1 * credentials.provider.rebootInstance(region, it) - } - noExceptionThrown() - } - - def "should throw exception"() { - given: - @Subject def operation = new RebootOpenstackInstancesAtomicOperation(description) - - when: - operation.operate([]) - - then: - INSTANCE_IDS.each { - credentials.provider.rebootInstance(region, it) >> { throw new OpenstackOperationException("foobar") } - } - OpenstackOperationException ex = thrown(OpenstackOperationException) - ex.message == "foobar" - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/TerminateOpenstackInstancesAtomicOperationUnitSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/TerminateOpenstackInstancesAtomicOperationUnitSpec.groovy deleted file mode 100644 index fc595d5ccea..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/instance/TerminateOpenstackInstancesAtomicOperationUnitSpec.groovy +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.instance - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.compute.Server -import org.openstack4j.model.heat.Resource -import org.openstack4j.model.heat.ResourceHealth -import org.openstack4j.model.heat.Stack -import spock.lang.Specification -import spock.lang.Subject - -class TerminateOpenstackInstancesAtomicOperationUnitSpec extends Specification { - - private static final String ACCOUNT_NAME = 'myaccount' - private static final INSTANCE_IDS = ['1-2-3-4','2-3-4-5','3-4-5-6'] - - def credentials - def description - - String region = 'r1' - String serverGroupName = 'asg1' - - Map servers - Stack stack - Stack asgStack - Resource asg - Resource instance - String asgId = 'asgId' - String resourceName = 'r' - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials credz = Mock(OpenstackNamedAccountCredentials) - OpenstackProviderFactory.createProvider(credz) >> { provider } - credentials = new OpenstackCredentials(credz) - description = new OpenstackInstancesDescription(serverGroupName: serverGroupName, instanceIds: INSTANCE_IDS, account: ACCOUNT_NAME, credentials: credentials, region: region) - servers = INSTANCE_IDS.collectEntries { id -> - [(id): Mock(Server) { it.id >> { id } ; it.name >> { id } }] - } - stack = Mock(Stack) { - it.name >> { 'stack' } - } - asg = Mock(Resource) { - it.physicalResourceId >> { asgId } - it.type >> { "OS::Heat::AutoScalingGroup" } - } - asgStack = Mock(Stack) { - it.id >> { 'id' } - it.name >> { 'name' } - } - instance = Mock(Resource) { - it.resourceName >> { resourceName } - } - } - - def "test pre update"() { - given: - @Subject def operation = new TerminateOpenstackInstancesAtomicOperation(description) - - when: - operation.preUpdate(stack) - - then: - 1 * credentials.provider.getAsgResourceForStack(region, stack) >> asg - 1 * credentials.provider.getStack(region, asgId) >> asgStack - INSTANCE_IDS.each { - 1 * credentials.provider.getServerInstance(region, it) >> servers[it] - 1 * credentials.provider.getInstanceResourceForStack(region, stack, servers[it].name) >> instance - 1 * credentials.provider.markStackResourceUnhealthy(region, 'name', 'id', resourceName, _ as ResourceHealth) - } - noExceptionThrown() - } - - def "should throw exception"() { - given: - @Subject def operation = new TerminateOpenstackInstancesAtomicOperation(description) - - when: - operation.preUpdate(stack) - - then: - 1 * credentials.provider.getAsgResourceForStack(region, stack) >> asg - 1 * credentials.provider.getStack(region, asgId) >> asgStack - INSTANCE_IDS.findAll { it == INSTANCE_IDS[0] }.each { - 1 * credentials.provider.getServerInstance(region, it) >> servers[it] - 1 * credentials.provider.getInstanceResourceForStack(region, stack, servers[it].name) >> instance - 1 * credentials.provider.markStackResourceUnhealthy(region, 'name', 'id', resourceName, _ as ResourceHealth) >> { throw new OpenstackOperationException("foobar") } - } - OpenstackOperationException ex = thrown(OpenstackOperationException) - ex.message == "foobar" - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/AbstractOpenstackLoadBalancerAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/AbstractOpenstackLoadBalancerAtomicOperationSpec.groovy deleted file mode 100644 index 5c8c617946d..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/AbstractOpenstackLoadBalancerAtomicOperationSpec.groovy +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer - -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup.ServerGroupConstants -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.LbProvisioningStatus -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.openstack.networking.domain.ext.ListItem -import spock.lang.Shared -import spock.lang.Specification - -class AbstractOpenstackLoadBalancerAtomicOperationSpec extends Specification { - OpenstackClientProvider provider - OpenstackCredentials credentials - OpenstackAtomicOperationDescription description - @Shared - String region = 'region' - @Shared - String account = 'test' - @Shared - Throwable openstackProviderException = new OpenstackProviderException('foo') - @Shared - String opName = 'TEST_PHASE' - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials credz = new OpenstackNamedAccountCredentials("name", "test", "main", "user", "pw", "project", "domain", "endpoint", [], false, "", new OpenstackConfigurationProperties.LbaasConfig(pollTimeout: 60, pollInterval: 5), new OpenstackConfigurationProperties.StackConfig(pollTimeout: 60, pollInterval: 5), new ConsulConfig(), null) - OpenstackProviderFactory.createProvider(credz) >> { provider } - credentials = new OpenstackCredentials(credz) - - description = new OpenstackAtomicOperationDescription(credentials: credentials, region: region, account: account) - } - - def "remove listeners and pools"() { - given: - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) - String loadBalancerId = UUID.randomUUID() - ListenerV2 listener = Mock(ListenerV2) { - getId() >> '123' - } - String poolId = UUID.randomUUID() - String healthMonitorId = UUID.randomUUID() - LbPoolV2 lbPool = Mock(LbPoolV2) { - getId() >> poolId - } - - and: - def operation = Spy(ObjectUnderTest, constructorArgs: [description]) - - when: - operation.deleteLoadBalancerPeripherals(opName, region, loadBalancerId, [listener]) - - then: - _ * listener.defaultPoolId >> poolId - 1 * provider.getPool(region, poolId) >> lbPool - 2 * lbPool.getHealthMonitorId() >> healthMonitorId - 1 * provider.deleteMonitor(region, healthMonitorId) >> ActionResponse.actionSuccess() - 1 * provider.deletePool(region, poolId) >> ActionResponse.actionSuccess() - 1 * provider.deleteListener(region, listener.id) >> ActionResponse.actionSuccess() - 3 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - 3 * loadBalancer.getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - noExceptionThrown() - } - - def "remove listeners and pools - no health monitor"() { - given: - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) - String loadBalancerId = UUID.randomUUID() - ListenerV2 listener = Mock(ListenerV2) { - getId() >> '123' - } - String poolId = UUID.randomUUID() - String healthMonitorId = null - LbPoolV2 lbPool = Mock(LbPoolV2) { - getId() >> poolId - } - - and: - def operation = Spy(ObjectUnderTest, constructorArgs: [description]) - - when: - operation.deleteLoadBalancerPeripherals(opName, region, loadBalancerId, [listener]) - - then: - _ * listener.defaultPoolId >> poolId - 1 * provider.getPool(region, poolId) >> lbPool - _ * lbPool.healthMonitorId >> healthMonitorId - 0 * operation.removeHealthMonitor(opName, region, loadBalancerId, healthMonitorId) >> {} - 1 * provider.deletePool(region, poolId) >> ActionResponse.actionSuccess() - 1 * provider.deleteListener(region, listener.id) >> ActionResponse.actionSuccess() - 2 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - 2 * loadBalancer.getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - def "remove listeners and pools - no pool"() { - given: - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) - String loadBalancerId = UUID.randomUUID() - ListenerV2 listener = Mock(ListenerV2) { - getId() >> '123' - } - String poolId = null - String healthMonitorId = UUID.randomUUID() - LbPoolV2 lbPool = Mock(LbPoolV2) { - getId() >> poolId - } - - and: - def operation = Spy(ObjectUnderTest, constructorArgs: [description]) - - when: - operation.deleteLoadBalancerPeripherals(opName, region, loadBalancerId, [listener]) - - then: - _ * listener.defaultPoolId >> poolId - 1 * provider.getPool(region, poolId) >> { throw new OpenstackResourceNotFoundException('test') } - _ * lbPool.healthMonitorId >> healthMonitorId - 0 * operation.removeHealthMonitor(opName, region, loadBalancerId, healthMonitorId) >> {} - 0 * provider.deletePool(region, poolId) >> ActionResponse.actionSuccess() - 1 * provider.deleteListener(region, listener.id) >> ActionResponse.actionSuccess() - 1 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - 1 * loadBalancer.getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - def "update server group success"() { - given: - String loadBalancerId = UUID.randomUUID() - String stackId = UUID.randomUUID() - String createdStackName = 'test-stack' - String template = "foo: bar" - Stack summary = Mock(Stack) { - getName() >> createdStackName - } - Stack detail = Mock(Stack) - Map sub = ['servergroup_resource.yaml': 'foo: bar', 'servergroup_resource_member.yaml': 'foo: bar'] - List tags = [loadBalancerId] - ServerGroupParameters serverGroupParams = new ServerGroupParameters(loadBalancers: tags) - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) { - getName() >> "lb" - } - ListItem listItem = Mock(ListItem) - String listenerId = UUID.randomUUID() - ListenerV2 listener = Mock(ListenerV2) { - getId() >> listenerId - getDescription() >> 'HTTP:80:HTTP:8080' - getDefaultPoolId() >> UUID.randomUUID() - } - - and: - def operation = Spy(ObjectUnderTest, constructorArgs: [description]) - - when: - operation.updateServerGroup(opName, region, loadBalancerId) - - then: - 1 * provider.listStacksWithLoadBalancers(region, [loadBalancerId]) >> [summary] - 1 * provider.getStack(region, createdStackName) >> detail - 1 * provider.getHeatTemplate(region, createdStackName, stackId) >> template - 1 * detail.getOutputs() >> [[output_key: ServerGroupConstants.SUBTEMPLATE_OUTPUT, output_value: sub['servergroup_resource.yaml']], [output_key: ServerGroupConstants.MEMBERTEMPLATE_OUTPUT, output_value: sub['servergroup_resource_member.yaml']]] - 1 * detail.getParameters() >> serverGroupParams.toParamsMap() - _ * detail.getId() >> stackId - _ * detail.getName() >> createdStackName - _ * detail.getTags() >> tags - 1 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - 1 * loadBalancer.listeners >> [listItem] - _ * listItem.id >> listenerId - 1 * provider.getListener(region, listenerId) >> listener - 1 * provider.updateStack(region, createdStackName, stackId, template, _ as Map, _ as ServerGroupParameters, tags) - } - - static class ObjectUnderTest extends AbstractOpenstackLoadBalancerAtomicOperation { - OpenstackAtomicOperationDescription description - - ObjectUnderTest(OpenstackAtomicOperationDescription description) { - super(description.credentials) - this.description = description - } - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/DeleteOpenstackLoadBalancerAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/DeleteOpenstackLoadBalancerAtomicOperationSpec.groovy deleted file mode 100644 index 8cbc22a99a5..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/DeleteOpenstackLoadBalancerAtomicOperationSpec.groovy +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.DeleteOpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.network.ext.LbProvisioningStatus -import org.openstack4j.model.network.ext.LoadBalancerV2 -import spock.lang.Specification -import spock.lang.Subject - -class DeleteOpenstackLoadBalancerAtomicOperationSpec extends Specification { - - private static final String ACCOUNT_NAME = 'myaccount' - - OpenstackCredentials credentials - DeleteOpenstackLoadBalancerDescription description - String lbId = UUID.randomUUID() - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials credz = Mock(OpenstackNamedAccountCredentials) { - it.lbaasConfig >> new OpenstackConfigurationProperties.LbaasConfig(pollInterval: 0, pollTimeout: 1) - } - - OpenstackProviderFactory.createProvider(credz) >> { provider } - credentials = new OpenstackCredentials(credz) - description = new DeleteOpenstackLoadBalancerDescription(region: 'region1', id: lbId, account: ACCOUNT_NAME, credentials: credentials) - } - - def "should not delete load balancer"() { - given: - def operation = new DeleteOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * credentials.provider.getLoadBalancer(description.region, description.id) >> null - 0 * operation.deleteLoadBalancer(description.region, _) >> {} - 0 * operation.updateServerGroup(DeleteOpenstackLoadBalancerAtomicOperation.BASE_PHASE, description.region, description.id) >> { - } - noExceptionThrown() - - } - - def "should not delete in pending state"() { - given: - def operation = new DeleteOpenstackLoadBalancerAtomicOperation(description) - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) - - when: - operation.operate([]) - - then: - 1 * credentials.provider.getLoadBalancer(description.region, description.id) >> loadBalancer - 2 * loadBalancer.provisioningStatus >> LbProvisioningStatus.PENDING_UPDATE - - and: - thrown(OpenstackOperationException) - } - - def "should throw exception"() { - given: - def operation = new DeleteOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * credentials.provider.getLoadBalancer(description.region, description.id) >> { - throw new OpenstackProviderException('foobar') - } - OpenstackOperationException ex = thrown(OpenstackOperationException) - ex.message == 'deleteLoadBalancer failed: foobar' - } - - def "should delete load balancer"() { - given: - def operation = Spy(DeleteOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) { - getId() >> lbId - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - Map listenerMap = [:] - - when: - operation.operate([]) - - then: - 2 * credentials.provider.getLoadBalancer(description.region, lbId) >>> [loadBalancer, null] - 1 * operation.buildListenerMap(description.region, loadBalancer) >> listenerMap - 1 * credentials.provider.deleteLoadBalancer(description.region, loadBalancer.id) - noExceptionThrown() - } - - def "should delete load balancer exception"() { - given: - def operation = Spy(DeleteOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) { - getId() >> UUID.randomUUID() - } - Map listenerMap = Mock(Map) - Collection values = Mock(Collection) - - when: - operation.deleteLoadBalancer(description.region, loadBalancer) - - then: - 1 * operation.buildListenerMap(description.region, loadBalancer) >> listenerMap - 1 * listenerMap.values() >> values - 1 * operation.deleteLoadBalancerPeripherals(DeleteOpenstackLoadBalancerAtomicOperation.BASE_PHASE, description.region, loadBalancer.id, values) >> { - throw new OpenstackProviderException('test') - } - 0 * credentials.provider.deleteLoadBalancer(description.region, loadBalancer.id) - - and: - thrown(OpenstackProviderException) - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationSpec.groovy deleted file mode 100644 index 2f044f4686f..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/loadbalancer/UpsertOpenstackLoadBalancerAtomicOperationSpec.groovy +++ /dev/null @@ -1,686 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.loadbalancer - -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.BlockingStatusChecker -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.LbaasConfig -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.StackConfig -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Algorithm -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Listener -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.openstack.task.TaskStatusAware -import org.openstack4j.model.compute.FloatingIP -import org.openstack4j.model.compute.SecGroupExtension -import org.openstack4j.model.network.NetFloatingIP -import org.openstack4j.model.network.Network -import org.openstack4j.model.network.Port -import org.openstack4j.model.network.Subnet -import org.openstack4j.model.network.ext.HealthMonitorType -import org.openstack4j.model.network.ext.HealthMonitorV2 -import org.openstack4j.model.network.ext.LbMethod -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.LbProvisioningStatus -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.openstack.networking.domain.ext.ListItem -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -@Unroll -class UpsertOpenstackLoadBalancerAtomicOperationSpec extends Specification implements TaskStatusAware { - OpenstackClientProvider provider - OpenstackCredentials credentials - OpenstackLoadBalancerDescription description - - @Shared - String region = 'region' - @Shared - String account = 'test' - @Shared - String opName = UPSERT_LOADBALANCER_PHASE - @Shared - Throwable openstackProviderException = new OpenstackProviderException('foo') - @Shared - BlockingStatusChecker blockingClientAdapter = BlockingStatusChecker.from(60, 5) { true } - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials credz = new OpenstackNamedAccountCredentials("name", "test", "main", "user", "pw", "tenant", "domain", "endpoint", [], false, "", new LbaasConfig(pollTimeout: 60, pollInterval: 5), new StackConfig(pollTimeout: 60, pollInterval: 5), new ConsulConfig(), null) - OpenstackProviderFactory.createProvider(credz) >> { provider } - credentials = new OpenstackCredentials(credz) - description = new OpenstackLoadBalancerDescription(credentials: credentials, region: region, account: account) - } - - def "operate - create load balancer"() { - given: - description.with { - name = 'name' - subnetId = UUID.randomUUID() - algorithm = Algorithm.ROUND_ROBIN - listeners = [new Listener(externalPort: 80, externalProtocol: 'HTTP', internalPort: 8080)] - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> '123' - getVipPortId() >> '321' - } - - when: - Map result = operation.operate([]) - - then: - 1 * operation.validatePeripherals(region, description.subnetId, description.networkId, description.securityGroups) >> { - } - 1 * operation.createLoadBalancer(region, description.name, description.subnetId) >> loadBalancer - 1 * operation.buildListenerMap(region, loadBalancer) >> [:] - 1 * operation.addListenersAndPools(region, loadBalancer.id, description.name, description.algorithm, _, description.healthMonitor) >> { - } - 1 * operation.updateFloatingIp(region, description.networkId, loadBalancer.vipPortId) - 1 * operation.updateSecurityGroups(region, loadBalancer.vipPortId, description.securityGroups) - 0 * operation.updateServerGroup(opName, region, loadBalancer.id) - - and: - result == [(description.region): [id: loadBalancer.id]] - } - - def "operate - add / remove load balancer listener pools"() { - given: - description.with { - name = 'name' - id = UUID.randomUUID() - subnetId = UUID.randomUUID() - algorithm = Algorithm.ROUND_ROBIN - listeners = [new Listener(externalPort: 80, externalProtocol: 'HTTP', internalPort: 8080)] - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> '123' - getVipPortId() >> '321' - } - ListenerV2 listenerV2 = Mock(ListenerV2) - - when: - Map result = operation.operate([]) - - then: - 0 * operation.validatePeripherals(region, description.subnetId, description.networkId, description.securityGroups) >> { - } - 1 * provider.getLoadBalancer(region, description.id) >> loadBalancer - 1 * operation.buildListenerMap(region, loadBalancer) >> ['HTTPS:443:HTTPS:8181': listenerV2] - 1 * operation.addListenersAndPools(region, loadBalancer.id, description.name, description.algorithm, _, description.healthMonitor) >> { - } - 1 * operation.deleteLoadBalancerPeripherals(opName, region, loadBalancer.id, _ as Collection) >> {} - 1 * operation.updateFloatingIp(region, description.networkId, loadBalancer.vipPortId) - 1 * operation.updateSecurityGroups(region, loadBalancer.vipPortId, description.securityGroups) - 1 * operation.updateServerGroup(_ as String, region, loadBalancer.id) - - and: - result == [(description.region): [id: loadBalancer.id]] - } - - def "operate - update load balancer listener pools"() { - given: - description.with { - name = 'name' - id = UUID.randomUUID() - subnetId = UUID.randomUUID() - listeners = [new Listener(externalPort: 80, externalProtocol: 'HTTP', internalPort: 8080)] - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> '123' - getVipPortId() >> '321' - } - ListenerV2 listenerV2 = Mock(ListenerV2) - - when: - Map result = operation.operate([]) - - then: - 0 * operation.validatePeripherals(region, description.subnetId, description.networkId, description.securityGroups) >> { - } - 1 * provider.getLoadBalancer(region, description.id) >> loadBalancer - 1 * operation.buildListenerMap(region, loadBalancer) >> ['HTTP:80:8080': listenerV2] - 1 * operation.updateListenersAndPools(region, loadBalancer.id, description.algorithm, _, description.healthMonitor) >> { - } - 1 * operation.updateFloatingIp(region, description.networkId, loadBalancer.vipPortId) - 1 * operation.updateSecurityGroups(region, loadBalancer.vipPortId, description.securityGroups) - 0 * operation.updateServerGroup(opName, region, loadBalancer.id) - - and: - result == [(description.region): [id: loadBalancer.id]] - } - - def "operate - throw exception"() { - given: - description.with { - name = 'name' - id = UUID.randomUUID() - subnetId = UUID.randomUUID() - listeners = [new Listener(externalPort: 80, externalProtocol: 'HTTP', internalPort: 8080)] - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> '123' - getVipPortId() >> '321' - } - ListenerV2 listenerV2 = Mock(ListenerV2) - - when: - operation.operate([]) - - then: - 1 * provider.getLoadBalancer(region, description.id) >> loadBalancer - 1 * operation.buildListenerMap(region, loadBalancer) >> ['HTTP:80:8080': listenerV2] - 1 * operation.updateListenersAndPools(region, loadBalancer.id, description.algorithm, _, description.healthMonitor) >> { - throw openstackProviderException - } - - and: - OpenstackOperationException exception = thrown(OpenstackOperationException) - exception.cause == openstackProviderException - } - - def "create load balancer"() { - given: - String name = 'name' - String subnetId = UUID.randomUUID() - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) { - getId() >> '123' - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - when: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - LoadBalancerV2 result = operation.createLoadBalancer(region, name, subnetId) - - then: - 1 * loadBalancer.provisioningStatus >> LbProvisioningStatus.ACTIVE - 1 * provider.createLoadBalancer(region, name, _ as String, subnetId) >> loadBalancer - 1 * provider.getLoadBalancer(region, "123") >> loadBalancer - - and: - result == loadBalancer - } - - def "create load balancer exception"() { - given: - String name = 'name' - String subnetId = UUID.randomUUID() - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.createLoadBalancer(region, name, subnetId) - - then: - 1 * provider.createLoadBalancer(region, name, _ as String, subnetId) >> { throw openstackProviderException } - - and: - thrown(OpenstackProviderException) - } - - def "no update security groups - #testCase"() { - given: - String id = UUID.randomUUID() - Port port = Mock(Port) - - and: - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.updateSecurityGroups(region, id, [securityGroup]) - - then: - 1 * provider.getPort(region, id) >> port - 1 * port.getSecurityGroups() >> [securityGroup] - - where: - testCase | groups | securityGroup - 'empty' | [] | '123' - 'equal' | ['123'] | '123' - } - - def "update security groups"() { - given: - String id = UUID.randomUUID() - String securityGroup = UUID.randomUUID() - Port port = Mock(Port) - - and: - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.updateSecurityGroups(region, id, [securityGroup]) - - then: - 1 * provider.getPort(region, id) >> port - 1 * port.getSecurityGroups() >> [] - 1 * provider.updatePort(region, id, [securityGroup]) - } - - def "update floating ip - create new floating ip"() { - given: - String networkId = UUID.randomUUID() - String portId = UUID.randomUUID() - NetFloatingIP netFloatingIP = null - Network network = Mock(Network) - FloatingIP floatingIp = Mock(FloatingIP) - - and: - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.updateFloatingIp(region, networkId, portId) - - then: - 1 * provider.getFloatingIpForPort(region, portId) >> netFloatingIP - 1 * provider.getNetwork(region, networkId) >> network - 1 * provider.getOrCreateFloatingIp(region, network.name) >> floatingIp - 1 * provider.associateFloatingIpToPort(region, floatingIp.id, portId) - } - - def "update floating ip - remove floating ip"() { - given: - String networkId = null - String portId = UUID.randomUUID() - NetFloatingIP netFloatingIP = Mock(NetFloatingIP) - - and: - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.updateFloatingIp(region, networkId, portId) - - then: - 1 * provider.getFloatingIpForPort(region, portId) >> netFloatingIP - 1 * provider.disassociateFloatingIpFromPort(region, netFloatingIP.id) - } - - def "update floating ip - already exists"() { - given: - String networkId = UUID.randomUUID() - String portId = UUID.randomUUID() - NetFloatingIP netFloatingIP = Mock(NetFloatingIP) - Network network = Mock(Network) - FloatingIP floatingIp = Mock(FloatingIP) - - and: - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.updateFloatingIp(region, networkId, portId) - - then: - 1 * provider.getFloatingIpForPort(region, portId) >> netFloatingIP - 1 * provider.getNetwork(region, networkId) >> network - 1 * provider.getOrCreateFloatingIp(region, network.name) >> floatingIp - } - - def "update floating ip - network changed"() { - given: - String networkId = UUID.randomUUID() - String portId = UUID.randomUUID() - NetFloatingIP netFloatingIP = Mock(NetFloatingIP) { - getFloatingNetworkId() >> { UUID.randomUUID() } - } - Network network = Mock(Network) - FloatingIP floatingIp = Mock(FloatingIP) - - and: - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.updateFloatingIp(region, networkId, portId) - - then: - 1 * provider.getFloatingIpForPort(region, portId) >> netFloatingIP - 1 * provider.getNetwork(region, networkId) >> network - 1 * provider.getOrCreateFloatingIp(region, network.name) >> floatingIp - 1 * provider.disassociateFloatingIpFromPort(region, netFloatingIP.id) - 1 * provider.associateFloatingIpToPort(region, floatingIp.id, portId) - } - - def "add listeners and pools"() { - given: - String name = 'name' - Algorithm algorithm = Algorithm.ROUND_ROBIN - String loadBalancerId = UUID.randomUUID() - String key = 'HTTP:80:8080' - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> loadBalancerId - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - and: - Listener listener = new Listener(externalProtocol: 'HTTP', externalPort: 80, internalPort: 8080) - ListenerV2 newListener = Mock(ListenerV2) - LbPoolV2 newLbPool = Mock(LbPoolV2) - HealthMonitor healthMonitor = Mock(HealthMonitor) - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.addListenersAndPools(region, loadBalancerId, name, algorithm, [(key): listener], healthMonitor) - - then: - 2 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - 1 * provider.createListener(region, name, listener.externalProtocol.name(), listener.externalPort, key, loadBalancerId) >> newListener - //todo: is this right? just doing listener.externalProtocol.name() - 1 * provider.createPool(region, name, listener.externalProtocol.name(), algorithm.name(), newListener.id) >> newLbPool - 1 * operation.updateHealthMonitor(region, loadBalancerId, newLbPool, healthMonitor) >> {} - } - - def "update listeners and pools - change algorithm"() { - given: - Algorithm algorithm = Algorithm.ROUND_ROBIN - String loadBalancerId = UUID.randomUUID() - ListenerV2 listener = Mock(ListenerV2) { - getId() >> '123' - } - HealthMonitor healthMonitor = Mock(HealthMonitor) - String poolId = UUID.randomUUID() - LbPoolV2 lbPool = Mock(LbPoolV2) - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> loadBalancerId - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.updateListenersAndPools(region, loadBalancerId, algorithm, [listener], healthMonitor) - - then: - 2 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - _ * listener.defaultPoolId >> poolId - 1 * provider.getPool(region, poolId) >> lbPool - _ * lbPool.lbMethod >> LbMethod.LEAST_CONNECTIONS - 1 * provider.updatePool(region, lbPool.id, algorithm.name()) >> lbPool - } - - def "update listeners and pools - no updates"() { - given: - Algorithm algorithm = Algorithm.ROUND_ROBIN - String loadBalancerId = UUID.randomUUID() - ListenerV2 listener = Mock(ListenerV2) { - getId() >> '123' - } - HealthMonitor healthMonitor = Mock(HealthMonitor) - String poolId = UUID.randomUUID() - LbPoolV2 lbPool = Mock(LbPoolV2) - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.updateListenersAndPools(region, loadBalancerId, algorithm, [listener], healthMonitor) - - then: - _ * listener.defaultPoolId >> poolId - 1 * provider.getPool(region, poolId) >> lbPool - _ * lbPool.lbMethod >> LbMethod.ROUND_ROBIN - 0 * provider.updatePool(region, lbPool.id, algorithm.name()) >> lbPool - 1 * operation.updateHealthMonitor(region, loadBalancerId, lbPool, healthMonitor) >> {} - } - - def "update health monitor"() { - given: - String loadBalancerId = UUID.randomUUID() - LbPoolV2 lbPool = Mock(LbPoolV2) - HealthMonitor healthMonitor = Mock(HealthMonitor) - String healthMonitorId = UUID.randomUUID() - HealthMonitorV2 healthMonitorV2 = Mock(HealthMonitorV2) - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> loadBalancerId - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.updateHealthMonitor(region, loadBalancerId, lbPool, healthMonitor) - - then: - 1 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - _ * lbPool.healthMonitorId >> healthMonitorId - 1 * provider.getMonitor(region, healthMonitorId) >> healthMonitorV2 - 1 * healthMonitorV2.type >> HealthMonitorType.PING - 1 * healthMonitor.type >> HealthMonitor.HealthMonitorType.PING - 1 * provider.updateMonitor(region, healthMonitorId, healthMonitor) - } - - def "update health monitor - delete/add"() { - given: - String loadBalancerId = UUID.randomUUID() - LbPoolV2 lbPool = Mock(LbPoolV2) - HealthMonitor healthMonitor = Mock(HealthMonitor) - String healthMonitorId = UUID.randomUUID() - HealthMonitorV2 healthMonitorV2 = Mock(HealthMonitorV2) - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> loadBalancerId - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.updateHealthMonitor(region, loadBalancerId, lbPool, healthMonitor) - - then: - 2 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - _ * lbPool.healthMonitorId >> healthMonitorId - 1 * provider.getMonitor(region, healthMonitorId) >> healthMonitorV2 - 1 * healthMonitorV2.type >> HealthMonitorType.PING - 1 * healthMonitor.type >> HealthMonitor.HealthMonitorType.TCP - 1 * provider.deleteMonitor(region, healthMonitorId) - 1 * provider.createMonitor(region, lbPool.id, healthMonitor) - } - - def "update health monitor - no monitor"() { - given: - String loadBalancerId = UUID.randomUUID() - LbPoolV2 lbPool = Mock(LbPoolV2) - HealthMonitor healthMonitor = null - String healthMonitorId = UUID.randomUUID() - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> loadBalancerId - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.updateHealthMonitor(region, loadBalancerId, lbPool, healthMonitor) - - then: - 1 * provider.deleteMonitor(region, healthMonitorId) - 1 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - _ * lbPool.healthMonitorId >> healthMonitorId - } - - def "update health monitor - add monitor no existing"() { - given: - String loadBalancerId = UUID.randomUUID() - LbPoolV2 lbPool = Mock(LbPoolV2) - HealthMonitor healthMonitor = Mock(HealthMonitor) - String healthMonitorId = null - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> loadBalancerId - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.updateHealthMonitor(region, loadBalancerId, lbPool, healthMonitor) - - then: - 1 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - _ * lbPool.healthMonitorId >> healthMonitorId - 1 * provider.createMonitor(region, lbPool.id, healthMonitor) - } - - def "remove health monitor"() { - given: - String id = UUID.randomUUID() - String loadBalancerId = UUID.randomUUID() - LoadBalancerV2 loadBalancer = Stub(LoadBalancerV2) { - getId() >> loadBalancerId - getProvisioningStatus() >> LbProvisioningStatus.ACTIVE - } - - and: - def operation = Spy(UpsertOpenstackLoadBalancerAtomicOperation, constructorArgs: [description]) - - when: - operation.removeHealthMonitor(opName, region, loadBalancerId, id) - - then: - 1 * provider.getLoadBalancer(region, loadBalancerId) >> loadBalancer - 1 * provider.deleteMonitor(region, id) - } - - def "build listener map"() { - given: - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) - ListItem listItem = Mock(ListItem) - String listenerId = UUID.randomUUID() - ListenerV2 listener = Mock(ListenerV2) - String desc = 'test' - - and: - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - Map result = operation.buildListenerMap(region, loadBalancer) - - then: - 1 * loadBalancer.listeners >> [listItem] - 1 * listItem.id >> listenerId - 1 * provider.getListener(region, listenerId) >> listener - 1 * listener.description >> desc - - and: - result == [(desc): listener] - } - - def "validatePeripherals success"() { - given: - String subnetId = UUID.randomUUID() - String networkId = UUID.randomUUID() - String securityGroup = UUID.randomUUID() - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.validatePeripherals(region, subnetId, networkId, [securityGroup]) - - then: - 1 * provider.getSubnet(region, subnetId) >> Mock(Subnet) - 1 * provider.getNetwork(region, networkId) >> Mock(Network) - 1 * provider.getSecurityGroup(region, securityGroup) >> Mock(SecGroupExtension) - noExceptionThrown() - } - - def "validatePeripherals subnet"() { - given: - String subnetId = UUID.randomUUID() - String networkId = UUID.randomUUID() - String securityGroup = UUID.randomUUID() - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.validatePeripherals(region, subnetId, networkId, [securityGroup]) - - then: - 1 * provider.getSubnet(region, subnetId) >> null - 0 * provider.getNetwork(region, networkId) >> Mock(Network) - 0 * provider.getSecurityGroup(region, securityGroup) - - and: - thrown(OpenstackResourceNotFoundException) - } - - def "validatePeripherals network"() { - given: - String subnetId = UUID.randomUUID() - String networkId = UUID.randomUUID() - String securityGroup = UUID.randomUUID() - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.validatePeripherals(region, subnetId, networkId, [securityGroup]) - - then: - 1 * provider.getSubnet(region, subnetId) >> Mock(Subnet) - 1 * provider.getNetwork(region, networkId) >> null - 0 * provider.getSecurityGroup(region, securityGroup) - - and: - thrown(OpenstackResourceNotFoundException) - } - - def "validatePeripherals security groups"() { - given: - String subnetId = UUID.randomUUID() - String networkId = UUID.randomUUID() - String securityGroup = UUID.randomUUID() - def operation = new UpsertOpenstackLoadBalancerAtomicOperation(description) - - when: - operation.validatePeripherals(region, subnetId, networkId, [securityGroup]) - - then: - 1 * provider.getSubnet(region, subnetId) >> Mock(Subnet) - 1 * provider.getNetwork(region, networkId) >> Mock(Network) - 1 * provider.getSecurityGroup(region, securityGroup) >> { throw new OpenstackResourceNotFoundException('test') } - - and: - thrown(OpenstackResourceNotFoundException) - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/DeleteOpenstackSecurityGroupAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/DeleteOpenstackSecurityGroupAtomicOperationSpec.groovy deleted file mode 100644 index 5af59505d14..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/DeleteOpenstackSecurityGroupAtomicOperationSpec.groovy +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.securitygroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.DeleteOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.UpsertOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import spock.lang.Specification - -class DeleteOpenstackSecurityGroupAtomicOperationSpec extends Specification { - private static final String ACCOUNT_NAME = 'account' - private static final String REGION = 'west' - def credentials - def provider - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials namedAccountCredentials = Mock(OpenstackNamedAccountCredentials) - OpenstackProviderFactory.createProvider(namedAccountCredentials) >> { provider } - credentials = new OpenstackCredentials(namedAccountCredentials) - } - - def "delete a security group"() { - setup: - def id = UUID.randomUUID().toString() - def description = new DeleteOpenstackSecurityGroupDescription(account: ACCOUNT_NAME, region: REGION, credentials: credentials, id: id) - def operation = new DeleteOpenstackSecurityGroupAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * provider.deleteSecurityGroup(REGION, id) - noExceptionThrown() - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/UpsertOpenstackSecurityGroupAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/UpsertOpenstackSecurityGroupAtomicOperationSpec.groovy deleted file mode 100644 index d6d23763b93..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/securitygroup/UpsertOpenstackSecurityGroupAtomicOperationSpec.groovy +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.securitygroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.UpsertOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.compute.IPProtocol -import org.openstack4j.model.compute.SecGroupExtension -import org.openstack4j.openstack.compute.domain.NovaSecGroupExtension -import spock.lang.Specification - -class UpsertOpenstackSecurityGroupAtomicOperationSpec extends Specification { - - private static final String ACCOUNT_NAME = 'account' - private static final String REGION = 'west' - OpenstackCredentials credentials - OpenstackClientProvider provider - - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials namedAccountCredentials = Mock(OpenstackNamedAccountCredentials) - OpenstackProviderFactory.createProvider(namedAccountCredentials) >> { provider } - credentials = new OpenstackCredentials(namedAccountCredentials) - } - - def "create security group without rules"() { - setup: - def id = UUID.randomUUID().toString() - def name = 'name' - def desc = 'description' - SecGroupExtension securityGroup = new NovaSecGroupExtension(id: id, name: name, description: desc) - def description = new UpsertOpenstackSecurityGroupDescription(account: ACCOUNT_NAME, region: REGION, credentials: credentials, name: name, description: desc, rules: []) - def operation = new UpsertOpenstackSecurityGroupAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * provider.createSecurityGroup(REGION, name, desc) >> securityGroup - 0 * provider.getSecurityGroup(_, _) - 0 * provider.updateSecurityGroup(_, _, _, _) - 0 * provider.deleteSecurityGroupRule(_, _) - 0 * provider.createSecurityGroupRule(_, _, _, _, _, _, _, _, _) - noExceptionThrown() - } - - def "create security group with rules"() { - setup: - def id = UUID.randomUUID().toString() - def name = 'sec-group-1' - def desc = 'A description' - SecGroupExtension securityGroup = new NovaSecGroupExtension(id: id, name: name, description: desc) - def rules = [ - new UpsertOpenstackSecurityGroupDescription.Rule(ruleType: 'TCP', fromPort: 80, toPort: 80, cidr: '0.0.0.0/0'), - new UpsertOpenstackSecurityGroupDescription.Rule(ruleType: 'TCP', fromPort: 443, toPort: 443, cidr: '0.0.0.0/0'), - new UpsertOpenstackSecurityGroupDescription.Rule(ruleType: 'ICMP', icmpType: 3, icmpCode: 4, remoteSecurityGroupId: 'abc') - ] - - def description = new UpsertOpenstackSecurityGroupDescription(account: ACCOUNT_NAME, region: REGION, credentials: credentials, name: name, description: desc, rules: rules) - def operation = new UpsertOpenstackSecurityGroupAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * provider.createSecurityGroup(REGION, name, desc) >> securityGroup - 0 * provider.getSecurityGroup(_, _) - 0 * provider.updateSecurityGroup(_, _, _, _) - 0 * provider.deleteSecurityGroupRule(_, _) - rules.each { rule -> - 1 * provider.createSecurityGroupRule(REGION, id, IPProtocol.value(rule.ruleType), rule.cidr, rule.remoteSecurityGroupId, rule.fromPort, rule.toPort, rule.icmpType, rule.icmpCode) - } - noExceptionThrown() - } - - def "create security group with self referencial rule"() { - setup: - def id = UUID.randomUUID().toString() - def name = 'sec-group-1' - def desc = 'A description' - SecGroupExtension securityGroup = new NovaSecGroupExtension(id: id, name: name, description: desc) - def rule = new UpsertOpenstackSecurityGroupDescription.Rule(ruleType: 'TCP', fromPort: 80, toPort: 80, remoteSecurityGroupId: 'SELF') - - def description = new UpsertOpenstackSecurityGroupDescription(account: ACCOUNT_NAME, region: REGION, credentials: credentials, name: name, description: desc, rules: [rule]) - def operation = new UpsertOpenstackSecurityGroupAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * provider.createSecurityGroup(REGION, name, desc) >> securityGroup - 0 * provider.getSecurityGroup(_, _) - 0 * provider.updateSecurityGroup(_, _, _, _) - 0 * provider.deleteSecurityGroupRule(_, _) - 1 * provider.createSecurityGroupRule(REGION, id, IPProtocol.value(rule.ruleType), null, id, rule.fromPort, rule.toPort, null, null) - noExceptionThrown() - } - - def "update security group"() { - setup: - def id = UUID.randomUUID().toString() - def name = 'sec-group-2' - def desc= 'A description 2' - - def existingRules = [ - new NovaSecGroupExtension.SecurityGroupRule(id: '1', fromPort: 80, toPort: 8080, cidr: '192.1.68.1/24'), - new NovaSecGroupExtension.SecurityGroupRule(id: '2', fromPort: 443, toPort: 443, cidr: '0.0.0.0/0') - ] - def existingSecurityGroup = new NovaSecGroupExtension(id: id, name: name, description: desc, rules: existingRules) - - def newRules = [ - new UpsertOpenstackSecurityGroupDescription.Rule(ruleType: 'TCP', fromPort: 80, toPort: 80, cidr: '0.0.0.0/0'), - new UpsertOpenstackSecurityGroupDescription.Rule(ruleType: 'TCP', fromPort: 443, toPort: 443, cidr: '0.0.0.0/0') - ] - - def description = new UpsertOpenstackSecurityGroupDescription(account: ACCOUNT_NAME, region: REGION, id: id, credentials: credentials, name: name, description: desc, rules: newRules) - def operation = new UpsertOpenstackSecurityGroupAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * provider.getSecurityGroup(REGION, id) >> existingSecurityGroup - 1 * provider.updateSecurityGroup(REGION, id, name, desc) >> existingSecurityGroup - existingRules.each { rule -> - 1 * provider.deleteSecurityGroupRule(REGION, rule.id) - } - newRules.each { rule -> - 1 * provider.createSecurityGroupRule(REGION, id, IPProtocol.TCP, rule.cidr, rule.remoteSecurityGroupId, rule.fromPort, rule.toPort, rule.icmpType, rule.icmpCode) - } - 0 * provider.createSecurityGroup(_, _, _) - noExceptionThrown() - } - - def "upsert security group handles exceptions"() { - setup: - def name = 'name' - def desc = 'desc' - def description = new UpsertOpenstackSecurityGroupDescription(account: ACCOUNT_NAME, region: REGION, credentials: credentials, name: name, description: desc, rules: []) - def operation = new UpsertOpenstackSecurityGroupAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * provider.createSecurityGroup(REGION, name, desc) >> { throw new OpenstackOperationException('foo') } - thrown(OpenstackOperationException) - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractStackUpdateOpenstackAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractStackUpdateOpenstackAtomicOperationSpec.groovy deleted file mode 100644 index f2f4a0d0bf4..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/AbstractStackUpdateOpenstackAtomicOperationSpec.groovy +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.compute.Server -import org.openstack4j.model.heat.Stack -import spock.lang.Specification -import spock.lang.Subject - -class AbstractStackUpdateOpenstackAtomicOperationSpec extends Specification { - - String ACCOUNT_NAME = 'myaccount' - - def credentials - def description - - String region = 'r1' - String serverGroupName = 'asg1' - - Server server - Stack stack - - String yaml = "foo: bar" - List tags = ["t1","t2"] - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials credz = Mock(OpenstackNamedAccountCredentials) - credz.getStackConfig() >> new OpenstackConfigurationProperties.StackConfig(pollInterval: 0, pollTimeout: 1) - - OpenstackProviderFactory.createProvider(credz) >> { provider } - credentials = new OpenstackCredentials(credz) - description = new OpenstackServerGroupAtomicOperationDescription(serverGroupName: serverGroupName, account: ACCOUNT_NAME, credentials: credentials, region: region) - stack = Mock(Stack) { - it.id >> { serverGroupName } - it.name >> { serverGroupName } - it.parameters >> { [:] } - it.outputs >> { [[output_key: ServerGroupConstants.SUBTEMPLATE_OUTPUT, output_value: yaml], [output_key: ServerGroupConstants.MEMBERTEMPLATE_OUTPUT, output_value: yaml]] } - it.tags >> { tags } - it.status >> "UPDATE_COMPLETE" - } - } - - def "should update stack"() { - given: - @Subject def operation = new SampleAbstractStackUpdateOpenstackAtomicOperation(description) - - when: - operation.operate([]) - - then: - 2 * credentials.provider.getStack(region, serverGroupName) >> stack - 1 * credentials.provider.getHeatTemplate(region, serverGroupName, serverGroupName) >> yaml - 1 * credentials.provider.updateStack(region, serverGroupName, serverGroupName, yaml, _ as Map, _ as ServerGroupParameters, stack.tags) - noExceptionThrown() - } - - def "should throw exception"() { - given: - @Subject def operation = new SampleAbstractStackUpdateOpenstackAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * credentials.provider.getStack(region, serverGroupName) >> stack - 1 * credentials.provider.getHeatTemplate(region, serverGroupName, serverGroupName) >> yaml - 1 * credentials.provider.updateStack(region, serverGroupName, serverGroupName, yaml, _ as Map, _ as ServerGroupParameters, stack.tags) >> { throw new OpenstackOperationException("foobar") } - OpenstackOperationException ex = thrown(OpenstackOperationException) - ex.message == "operation failed: foobar" - } - - static class SampleAbstractStackUpdateOpenstackAtomicOperation extends AbstractStackUpdateOpenstackAtomicOperation { - String phaseName = 'phase' - String operation = 'operation' - SampleAbstractStackUpdateOpenstackAtomicOperation(OpenstackServerGroupAtomicOperationDescription description) { - super(description) - } - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/CloneOpenstackAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/CloneOpenstackAtomicOperationSpec.groovy deleted file mode 100644 index 0ae597072fe..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/CloneOpenstackAtomicOperationSpec.groovy +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.CloneOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import org.openstack4j.model.heat.Stack -import spock.lang.Specification -import spock.lang.Subject - -class CloneOpenstackAtomicOperationSpec extends Specification { - - private static final String ACCOUNT_NAME = 'myaccount' - - private static final STACK = "stack" - private static final APPLICATION = "app" - private static final DETAILS = "details" - private static final REGION = "region" - private static final Integer TIMEOUT_MINS = 5 - private static final Boolean DISABLE_ROLLBACK = false - private static final String INSTANCE_TYPE = 'm1.medium' - private static final String IMAGE = 'ubuntu-latest-orig' - private static final int MAX_SIZE = 6 - private static final int MIN_SIZE = 4 - private static final String SUBNET_ID = '12356' - private static final String POOL_ID = '47890' - private static final List SECURITY_GROUPS = ['sg99','sg3434'] - - private static final SEQUENCE = "v000" - private static final ANCESTOR_STACK_NAME = "$APPLICATION-$STACK-$DETAILS-$SEQUENCE" - - // Changed Parameters - private static final STACK_N = "stackn" - private static final APPLICATION_N = "appn" - private static final DETAILS_N = "detailn" - private static final REGION_N = "regionn" - private static final Integer TIMEOUT_MINS_N = 6 - private static final Boolean DISABLE_ROLLBACK_N = true - private static final String INSTANCE_TYPE_N = 'm1.small' - private static final String IMAGE_N = 'ubuntu-latest' - private static final int MAX_SIZE_N = 5 - private static final int MIN_SIZE_N = 3 - private static final String SUBNET_ID_N = '1234' - private static final String POOL_ID_N = '5678' - private static final List SECURITY_GROUPS_N = ['sg1'] - - def credentials - def provider - - DeployOpenstackAtomicOperationDescription createAncestorDeployAtomicOperationDescription() { - def scaleup = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: 1, period: 60, threshold: 50) - def scaledown = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: -1, period: 600, threshold: 15) - def params = new ServerGroupParameters( - instanceType: INSTANCE_TYPE, - image:IMAGE, - maxSize: MAX_SIZE, - minSize: MIN_SIZE, - subnetId: SUBNET_ID, - loadBalancers: [POOL_ID], - securityGroups: SECURITY_GROUPS, - autoscalingType: ServerGroupParameters.AutoscalingType.CPU, - scaleup: scaleup, - scaledown: scaledown, - tags: ['foo':'bar'], - ) - new DeployOpenstackAtomicOperationDescription( - stack: STACK, - application: APPLICATION, - freeFormDetails: DETAILS, - region: REGION, - serverGroupParameters: params, - timeoutMins: TIMEOUT_MINS, - disableRollback: DISABLE_ROLLBACK, - account: ACCOUNT_NAME, - credentials: credentials, - userData: 'foo' - ) - } - - DeployOpenstackAtomicOperationDescription createNewDeployAtomicOperationDescription() { - def scaleup = new ServerGroupParameters.Scaler(cooldown: 61, adjustment: 2, period: 61, threshold: 51) - def scaledown = new ServerGroupParameters.Scaler(cooldown: 61, adjustment: -2, period: 601, threshold: 16) - def params = new ServerGroupParameters( - instanceType: INSTANCE_TYPE_N, - image:IMAGE_N, - maxSize: MAX_SIZE_N, - minSize: MIN_SIZE_N, - subnetId: SUBNET_ID_N, - loadBalancers: [POOL_ID_N], - securityGroups: SECURITY_GROUPS_N, - autoscalingType: ServerGroupParameters.AutoscalingType.NETWORK_INCOMING, - scaleup: scaleup, - scaledown: scaledown, - tags: ["foo":"barbar"], - ) - new DeployOpenstackAtomicOperationDescription( - stack: STACK_N, - application: APPLICATION_N, - freeFormDetails: DETAILS_N, - region: REGION_N, - serverGroupParameters: params, - timeoutMins: TIMEOUT_MINS_N, - disableRollback: DISABLE_ROLLBACK_N, - account: ACCOUNT_NAME, - credentials: credentials, - userData: 'foo' - ) - } - - def ancestorDeployAtomicOperationDescription - def newDeployAtomicOperationDescription - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global : true) - OpenstackNamedAccountCredentials creds = Mock(OpenstackNamedAccountCredentials) - OpenstackProviderFactory.createProvider(creds) >> { provider } - credentials = new OpenstackCredentials(creds) - ancestorDeployAtomicOperationDescription = createAncestorDeployAtomicOperationDescription() - newDeployAtomicOperationDescription = createNewDeployAtomicOperationDescription() - } - - def "builds a description based on ancestor server group, overrides nothing"() { - given: - def inputDescription = new CloneOpenstackAtomicOperationDescription( - source: new CloneOpenstackAtomicOperationDescription.OpenstackCloneSource( - serverGroupName: ANCESTOR_STACK_NAME, - region: REGION - ), - region: REGION, - account: ACCOUNT_NAME, - credentials: credentials - ) - Stack mockStack = Mock(Stack) - mockStack.parameters >> { ancestorDeployAtomicOperationDescription.serverGroupParameters.toParamsMap() } - mockStack.timeoutMins >> { ancestorDeployAtomicOperationDescription.timeoutMins } - - @Subject def operation = new CloneOpenstackAtomicOperation(inputDescription) - - when: - def resultDescription = operation.cloneAndOverrideDescription() - - then: - 1 * inputDescription.credentials.provider.getStack(REGION, ANCESTOR_STACK_NAME) >> mockStack - resultDescription.application == ancestorDeployAtomicOperationDescription.application - resultDescription.stack == ancestorDeployAtomicOperationDescription.stack - resultDescription.timeoutMins == ancestorDeployAtomicOperationDescription.timeoutMins - resultDescription.serverGroupParameters == ancestorDeployAtomicOperationDescription.serverGroupParameters - resultDescription.freeFormDetails == ancestorDeployAtomicOperationDescription.freeFormDetails - resultDescription.disableRollback == ancestorDeployAtomicOperationDescription.disableRollback - resultDescription.account == ancestorDeployAtomicOperationDescription.account - resultDescription.region == ancestorDeployAtomicOperationDescription.region - } - - def "builds a description based on ancestor server group, overrides everything"() { - given: - def scaleup = new ServerGroupParameters.Scaler(cooldown: 61, adjustment: 2, period: 61, threshold: 51) - def scaledown = new ServerGroupParameters.Scaler(cooldown: 61, adjustment: -2, period: 601, threshold: 16) - def params = new ServerGroupParameters( - instanceType: INSTANCE_TYPE_N, - image:IMAGE_N, - maxSize: MAX_SIZE_N, - minSize: MIN_SIZE_N, - subnetId: SUBNET_ID_N, - loadBalancers: [POOL_ID_N], - securityGroups: SECURITY_GROUPS_N, - autoscalingType: ServerGroupParameters.AutoscalingType.NETWORK_INCOMING, - scaleup: scaleup, - scaledown: scaledown, - tags: ["foo":"barbar"] - ) - def inputDescription = new CloneOpenstackAtomicOperationDescription( - stack: STACK_N, - application: APPLICATION_N, - freeFormDetails: DETAILS_N, - region: REGION_N, - timeoutMins: TIMEOUT_MINS_N, - disableRollback: DISABLE_ROLLBACK_N, - source: new CloneOpenstackAtomicOperationDescription.OpenstackCloneSource( - serverGroupName: ANCESTOR_STACK_NAME, - region: REGION - ), - credentials: credentials, - account: ACCOUNT_NAME, - serverGroupParameters: params - ) - Stack mockStack = Mock(Stack) - mockStack.parameters >> { ancestorDeployAtomicOperationDescription.serverGroupParameters.toParamsMap() } - mockStack.timeoutMins >> { ancestorDeployAtomicOperationDescription.timeoutMins } - - @Subject def operation = new CloneOpenstackAtomicOperation(inputDescription) - - when: - def resultDescription = operation.cloneAndOverrideDescription() - - then: - 1 * inputDescription.credentials.provider.getStack(REGION, ANCESTOR_STACK_NAME) >> mockStack - - resultDescription.application == newDeployAtomicOperationDescription.application - resultDescription.stack == newDeployAtomicOperationDescription.stack - resultDescription.serverGroupParameters == newDeployAtomicOperationDescription.serverGroupParameters - resultDescription.timeoutMins == newDeployAtomicOperationDescription.timeoutMins - resultDescription.freeFormDetails == newDeployAtomicOperationDescription.freeFormDetails - resultDescription.disableRollback == newDeployAtomicOperationDescription.disableRollback - resultDescription.account == newDeployAtomicOperationDescription.account - resultDescription.region == newDeployAtomicOperationDescription.region - } - - def "builds a description based on ancestor server group, overrides floating network id"() { - given: - - def scaleup = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: 1, period: 60, threshold: 50) - def scaledown = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: -1, period: 600, threshold: 15) - def ancestorParams = new ServerGroupParameters( - instanceType: INSTANCE_TYPE, - image:IMAGE, - maxSize: MAX_SIZE, - minSize: MIN_SIZE, - subnetId: SUBNET_ID, - loadBalancers: [POOL_ID], - securityGroups: SECURITY_GROUPS, - autoscalingType: ServerGroupParameters.AutoscalingType.CPU, - scaleup: scaleup, - scaledown: scaledown, - tags: ['foo':'bar'], - floatingNetworkId: UUID.toString(), - ) - def ancestor = new DeployOpenstackAtomicOperationDescription( - stack: STACK, - application: APPLICATION, - freeFormDetails: DETAILS, - region: REGION, - serverGroupParameters: ancestorParams, - timeoutMins: TIMEOUT_MINS, - disableRollback: DISABLE_ROLLBACK, - account: ACCOUNT_NAME, - credentials: credentials, - userData: 'foo' - ) - - def inputDescription = new CloneOpenstackAtomicOperationDescription( - source: new CloneOpenstackAtomicOperationDescription.OpenstackCloneSource( - serverGroupName: ANCESTOR_STACK_NAME, - region: REGION - ), - region: REGION, - account: ACCOUNT_NAME, - credentials: credentials - ) - Stack mockStack = Mock(Stack) - mockStack.parameters >> { ancestor.serverGroupParameters.toParamsMap() } - mockStack.timeoutMins >> { ancestor.timeoutMins } - - @Subject def operation = new CloneOpenstackAtomicOperation(inputDescription) - - when: - def resultDescription = operation.cloneAndOverrideDescription() - - then: - 1 * inputDescription.credentials.provider.getStack(REGION, ANCESTOR_STACK_NAME) >> mockStack - - resultDescription.serverGroupParameters.floatingNetworkId == null - resultDescription.application == ancestor.application - resultDescription.stack == ancestor.stack - resultDescription.timeoutMins == ancestor.timeoutMins - resultDescription.freeFormDetails == ancestor.freeFormDetails - resultDescription.disableRollback == ancestor.disableRollback - resultDescription.account == ancestor.account - resultDescription.region == ancestor.region - } - - def "ancestor stack not found throws operation exception"() { - given: - def stackName = 'app-stack-details-v000' - def notFound = new OpenstackProviderException("foo") - def inputDescription = new CloneOpenstackAtomicOperationDescription( - source: new CloneOpenstackAtomicOperationDescription.OpenstackCloneSource(serverGroupName: stackName, region: REGION), - region: REGION, - account: ACCOUNT_NAME, - credentials: credentials - ) - - @Subject def operation = new CloneOpenstackAtomicOperation(inputDescription) - - when: - operation.operate([]) - - then: - 1 * provider.getStack(REGION, stackName) >> { throw notFound } - def ex = thrown(OpenstackOperationException) - ex.message.contains(AtomicOperations.CLONE_SERVER_GROUP) - ex.cause == notFound - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DeployOpenstackAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DeployOpenstackAtomicOperationSpec.groovy deleted file mode 100644 index 603ee2b3019..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DeployOpenstackAtomicOperationSpec.groovy +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright 2016 The original authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.dataformat.yaml.YAMLFactory -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.UserDataType -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.network.Subnet -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.openstack.heat.domain.HeatStack -import org.openstack4j.openstack.networking.domain.ext.ListItem -import spock.lang.Specification -import spock.lang.Subject -import spock.lang.Unroll - -class DeployOpenstackAtomicOperationSpec extends Specification { - String accountName = 'myaccount' - String application = "app" - String stack = "stack" - String details = "details" - String region = "region" - Integer timeoutMins = 5 - Boolean disableRollback = false - String instanceType = 'm1.small' - int externalPort = 80 - int internalPort = 8100 - String image = 'ubuntu-latest' - int maxSize = 5 - int minSize = 3 - String subnetId = '1234' - String lbId = '5678' - String listenerId = '9999' - String poolId = '8888' - List securityGroups = ['sg1'] - - def credentials - def serverGroupParams - def expectedServerGroupParams - def description - def provider - def mockLb - def mockListener - def mockItem - def mockSubnet - def tags - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global : true) - OpenstackNamedAccountCredentials creds = Mock(OpenstackNamedAccountCredentials) - creds.getStackConfig() >> new OpenstackConfigurationProperties.StackConfig(pollInterval: 0, pollTimeout: 1) - OpenstackProviderFactory.createProvider(creds) >> { provider } - credentials = new OpenstackCredentials(creds) - - serverGroupParams = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, subnetId: subnetId, loadBalancers: [lbId], securityGroups: securityGroups) - description = new DeployOpenstackAtomicOperationDescription(stack: stack, application: application, freeFormDetails: details, region: region, serverGroupParameters: serverGroupParams.clone(), timeoutMins: timeoutMins, disableRollback: disableRollback, account: accountName, credentials: credentials) - - // Add the computed parts to the server group params - expectedServerGroupParams = serverGroupParams.clone() - expectedServerGroupParams.with { - it.networkId = '1234' - it.rawUserData = '' - } - - mockItem = Mock(ListItem) - mockItem.id >> { listenerId } - mockLb = Mock(LoadBalancerV2) - mockLb.name >> { 'mockpool' } - mockLb.listeners >> {[mockItem]} - mockListener = Mock(ListenerV2) - mockListener.id >> { listenerId } - mockListener.defaultPoolId >> { poolId } - mockListener.description >> { "HTTP:$externalPort:HTTP:$internalPort" } - mockSubnet = Mock(Subnet) - mockSubnet.networkId >> { '1234' } - tags = [lbId] - } - - def "should deploy a heat stack"() { - given: - @Subject def operation = new DeployOpenstackAtomicOperation(description) - String createdStackName = 'app-stack-details-v000' - - when: - operation.operate([]) - - then: - 1 * provider.listStacks(region) >> [] - 1 * provider.getLoadBalancer(region, lbId) >> mockLb - 1 * provider.getListener(region, listenerId) >> mockListener - 1 * provider.getSubnet(region, subnetId) >> mockSubnet - 1 * provider.deploy(region, createdStackName, _ as String, _ as Map, expectedServerGroupParams, _ as Boolean, _ as Long, tags) - 1 * provider.getStack(region, createdStackName) >> new HeatStack(status: "CREATE_COMPLETE") - noExceptionThrown() - } - - def "should deploy a heat stack even when stack exists"() { - given: - @Subject def operation = new DeployOpenstackAtomicOperation(description) - Stack stack = Mock(Stack) - String createdStackName = 'app-stack-details-v000' - stack.name >> { createdStackName } - stack.creationTime >> { '2014-06-03T20:59:46Z' } - String newStackName = 'app-stack-details-v001' - - when: - operation.operate([]) - - then: - 1 * provider.listStacks(_) >> [stack] - 1 * provider.getLoadBalancer(region, lbId) >> mockLb - 1 * provider.getListener(region, listenerId) >> mockListener - 1 * provider.getSubnet(region, subnetId) >> mockSubnet - 1 * provider.deploy(region, newStackName, _ as String, _ as Map, expectedServerGroupParams, _ as Boolean, _ as Long, tags) - 1 * provider.getStack(region, newStackName) >> new HeatStack(status: "CREATE_COMPLETE") - noExceptionThrown() - } - - def "should deploy a heat stack with scaleup and scaledown"() { - given: - def scaledServerGroupParams = serverGroupParams.clone() - scaledServerGroupParams.with { - it.autoscalingType = ServerGroupParameters.AutoscalingType.CPU - it.scaleup = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: 1, period: 60, threshold: 50) - it.scaledown = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: -1, period: 600, threshold: 15) - } - - def scaledDescription = description.clone() - scaledDescription.with { - it.serverGroupParameters = scaledServerGroupParams.clone() - } - - def expected = expectedServerGroupParams.clone() - expected.with { - it.autoscalingType = ServerGroupParameters.AutoscalingType.CPU - it.scaleup = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: 1, period: 60, threshold: 50) - it.scaledown = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: -1, period: 600, threshold: 15) - } - - @Subject def operation = new DeployOpenstackAtomicOperation(scaledDescription) - String createdStackName = 'app-stack-details-v000' - - when: - operation.operate([]) - - then: - 1 * provider.listStacks(region) >> [] - 1 * provider.getLoadBalancer(region, lbId) >> mockLb - 1 * provider.getListener(region, listenerId) >> mockListener - 1 * provider.getSubnet(region, subnetId) >> mockSubnet - 1 * provider.deploy(region, createdStackName, _ as String, _ as Map, expected, _ as Boolean, _ as Long, tags) - 1 * provider.getStack(region, createdStackName) >> new HeatStack(status: "CREATE_COMPLETE") - noExceptionThrown() - } - - def "ensure user data is resolved correctly"() { - def userData = '#!/bin/bash\necho "userdata" >> /etc/userdata' - def expected = expectedServerGroupParams.clone() - expected.with { - it.rawUserData = userData - it.sourceUserDataType = UserDataType.TEXT.toString() - it.sourceUserData = userData - } - - def userDataDescription = description.clone() - userDataDescription.with { - it.userData = userData - it.userDataType = UserDataType.TEXT - } - @Subject def operation = new DeployOpenstackAtomicOperation(userDataDescription) - String createdStackName = 'app-stack-details-v000' - - when: - operation.operate([]) - - then: - 1 * provider.listStacks(region) >> [] - 1 * provider.getLoadBalancer(region, lbId) >> mockLb - 1 * provider.getListener(region, listenerId) >> mockListener - 1 * provider.getSubnet(region, subnetId) >> mockSubnet - 1 * provider.deploy(region, createdStackName, _ as String, _ as Map, expected, _ as Boolean, _ as Long, tags) - 1 * provider.getStack(region, createdStackName) >> new HeatStack(status: "CREATE_COMPLETE") - noExceptionThrown() - } - - def "should not deploy a stack when exception thrown"() { - given: - @Subject def operation = new DeployOpenstackAtomicOperation(description) - String createdStackName = 'app-stack-details-v000' - Throwable throwable = new OpenstackProviderException('foo') - - when: - operation.operate([]) - - then: - 1 * provider.listStacks(region) >> [] - 1 * provider.getLoadBalancer(region, lbId) >> mockLb - 1 * provider.getListener(region, listenerId) >> mockListener - 1 * provider.getSubnet(region, subnetId) >> mockSubnet - 1 * provider.deploy(region, createdStackName, _ as String, _ as Map, expectedServerGroupParams, _ as Boolean, _ as Long, tags) >> { throw throwable } - 0 * provider.getStack(region, createdStackName) - Throwable actual = thrown(OpenstackOperationException) - actual.cause == throwable - } - - def "should throw an exception when stack creation fails"() { - given: - @Subject def operation = new DeployOpenstackAtomicOperation(description) - String createdStackName = 'app-stack-details-v000' - - when: - operation.operate([]) - - then: - 1 * provider.listStacks(region) >> [] - 1 * provider.getLoadBalancer(region, lbId) >> mockLb - 1 * provider.getListener(region, listenerId) >> mockListener - 1 * provider.getSubnet(region, subnetId) >> mockSubnet - 1 * provider.deploy(region, createdStackName, _ as String, _ as Map, expectedServerGroupParams, _ as Boolean, _ as Long, tags) - 1 * provider.getStack(region, createdStackName) >> new HeatStack(status: "CREATE_FAILED") - thrown(OpenstackOperationException) - } - - def "should retry when the stack is pending"() { - given: - @Subject def operation = new DeployOpenstackAtomicOperation(description) - String createdStackName = 'app-stack-details-v000' - - when: - operation.operate([]) - - then: - 1 * provider.listStacks(region) >> [] - 1 * provider.getLoadBalancer(region, lbId) >> mockLb - 1 * provider.getListener(region, listenerId) >> mockListener - 1 * provider.getSubnet(region, subnetId) >> mockSubnet - 1 * provider.deploy(region, createdStackName, _ as String, _ as Map, expectedServerGroupParams, _ as Boolean, _ as Long, tags) - 2 * provider.getStack(region, createdStackName) >>> [new HeatStack(status: "CREATE_IN_PROGRESS"), new HeatStack(status: "CREATE_COMPLETE")] - noExceptionThrown() - } - - @Unroll - def "creates HEAT template: #type"() { - given: - def mapper = new ObjectMapper(new YAMLFactory()) - @Subject def operation = new DeployOpenstackAtomicOperation(description) - String createdStackName = 'app-stack-details-v000' - if (fip) { - description.serverGroupParameters.floatingNetworkId = "net-9876" - } - - if (!loadBalancers) { - description.serverGroupParameters.loadBalancers = [] - tags = [] - } - - when: - operation.operate([]) - - then: - 1 * provider.listStacks(region) >> [] - if (loadBalancers) { - 1 * provider.getLoadBalancer(region, lbId) >> mockLb - 1 * provider.getListener(region, listenerId) >> mockListener - } - 1 * provider.getSubnet(region, subnetId) >> mockSubnet - 1 * provider.deploy(region, createdStackName, { assertTemplate(it, mainTemplate) }, { assertTemplates(it, subtemplates)}, { params(it) }, _ as Boolean, _ as Long, tags) - 1 * provider.getStack(region, createdStackName) >> new HeatStack(status: "CREATE_COMPLETE") - noExceptionThrown() - - where: - type | fip | loadBalancers || mainTemplate | subtemplates | params - "no fip, no load balancers" | false | false || exampleTemplate("servergroup.yaml") | ["servergroup_resource.yaml": exampleTemplate("servergroup_server.yaml")] | { ServerGroupParameters params -> true } - "fip, no load balancers" | true | false || exampleTemplate("servergroup_float.yaml") | ["servergroup_resource.yaml": exampleTemplate("servergroup_server_float.yaml")] | { ServerGroupParameters params -> true } - "no fip, load balancers" | false | true || exampleTemplate("servergroup.yaml") | ["servergroup_resource.yaml": exampleTemplate("servergroup_resource.yaml"), "servergroup_resource_member.yaml": memberDataTemplate()] | { ServerGroupParameters params -> true } - "fip, load balancers" | true | true || exampleTemplate("servergroup_float.yaml") | ["servergroup_resource.yaml": exampleTemplate("servergroup_resource_float.yaml"), "servergroup_resource_member.yaml": memberDataTemplate()] | { ServerGroupParameters params -> true } - } - - private boolean assertTemplate(String actual, String expected) { - def mapper = new ObjectMapper(new YAMLFactory()) - return mapper.readValue(actual, Map) == mapper.readValue(expected, Map) - } - - private boolean assertTemplates(Map actual, Map expected) { - def mapper = new ObjectMapper(new YAMLFactory()) - return actual.collectEntries {k, v -> [(k): mapper.readValue(v, Map)]} == expected.collectEntries { k, v -> [(k): mapper.readValue(v, Map)] } - } - - private String exampleTemplate(String name) { - DeployOpenstackAtomicOperationSpec.class.getResource(name).getText("utf-8") - } - - private String memberDataTemplate() { - return """\ ---- -heat_template_version: "2016-04-08" -description: "Pool members for autoscaling group resource" -parameters: - address: - type: "string" - description: "Server address for autoscaling group resource" -resources: - member-mockpool-99-null-null: - type: "OS::Neutron::LBaaS::PoolMember" - properties: - address: - get_param: "address" - pool: "8888" - protocol_port: null - subnet: "1234" -""" - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DestroyOpenstackAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DestroyOpenstackAtomicOperationSpec.groovy deleted file mode 100644 index ab9df7e93a4..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/DestroyOpenstackAtomicOperationSpec.groovy +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.heat.Stack -import spock.lang.Specification -import spock.lang.Subject - -class DestroyOpenstackAtomicOperationSpec extends Specification { - - private static final String ACCOUNT_NAME = 'myaccount' - private static final STACK = "stack" - private static final REGION = "region" - - def credentials - def description - def provider - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials creds = Mock(OpenstackNamedAccountCredentials) - creds.getStackConfig() >> new OpenstackConfigurationProperties.StackConfig(pollInterval: 0, pollTimeout: 1) - - OpenstackProviderFactory.createProvider(creds) >> { provider } - credentials = new OpenstackCredentials(creds) - description = new OpenstackServerGroupAtomicOperationDescription(serverGroupName: STACK, region: REGION, credentials: credentials) - } - - def "destroy stack succeeds"() { - given: - @Subject def operation = new DestroyOpenstackAtomicOperation(description) - Stack mockStack = Mock(Stack) - - when: - operation.operate([]) - - then: - 2 * provider.getStack(description.region, description.serverGroupName) >>> [mockStack, null] - 1 * provider.destroy(description.region, mockStack) - noExceptionThrown() - } - - def "destroy stack throws an exception when unable to delete stack"() { - given: - @Subject def operation = new DestroyOpenstackAtomicOperation(description) - Stack mockStack = Mock(Stack) - - when: - operation.operate([]) - - then: - 1 * provider.getStack(description.region, description.serverGroupName) >> mockStack - 1 * provider.destroy(description.region, mockStack) >> { throw new OpenstackProviderException('foo') } - Exception e = thrown(OpenstackOperationException) - e.cause.message == 'foo' - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/EnableDisableOpenstackAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/EnableDisableOpenstackAtomicOperationSpec.groovy deleted file mode 100644 index bad8ad1e4ed..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/EnableDisableOpenstackAtomicOperationSpec.groovy +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Copyright 2016 Veritas Technologies LLC. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.EnableDisableAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackResourceNotFoundException -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import org.openstack4j.model.compute.Address -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.network.ext.LbProvisioningStatus -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2StatusTree -import org.openstack4j.model.network.ext.status.LbPoolV2Status -import org.openstack4j.model.network.ext.status.ListenerV2Status -import org.openstack4j.model.network.ext.status.LoadBalancerV2Status -import org.openstack4j.model.network.ext.status.MemberV2Status -import spock.lang.Specification -import spock.lang.Unroll - -@Unroll -class EnableDisableOpenstackAtomicOperationSpec extends Specification { - - private static final STACK = "stack" - private static final REGION = "region" - - def credentials - def description - def provider - def stack - - List ids = ['foo', 'bar'] - List lbIds = ['lb1','lb2'] - String memberId = '42' - String poolId = '1' - String subnet = '2' - String listenerId = '3' - Integer port = 8080 - String ip = '1.2.3.4' - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials creds = new OpenstackNamedAccountCredentials("name", "test", "main", "user", "pw", "tenant", "domain", "endpoint", [], false, "", new OpenstackConfigurationProperties.LbaasConfig(pollTimeout: 60, pollInterval: 5), new OpenstackConfigurationProperties.StackConfig(pollTimeout: 600, pollInterval: 5), new ConsulConfig(), null) - OpenstackProviderFactory.createProvider(creds) >> { provider } - credentials = new OpenstackCredentials(creds) - description = new EnableDisableAtomicOperationDescription(serverGroupName: STACK, region: REGION, credentials: credentials) - stack = Mock(Stack) { - it.tags >> { lbIds } - } - } - - def "#testcase stack #testcase instances on load balancer"() { - given: - MemberV2Status mstatus = Mock(MemberV2Status) { - it.id >> { memberId } - it.address >> { ip } - } - LbPoolV2Status pstatus = Mock(LbPoolV2Status) { - it.id >> { poolId } - it.memberStatuses >> { [mstatus] } - } - ListenerV2Status lstatus = Mock(ListenerV2Status) { - it.id >> { listenerId } - it.lbPoolV2Statuses >> { [pstatus] } - } - LoadBalancerV2Status status = Mock(LoadBalancerV2Status) { - it.listenerStatuses >> { [lstatus] } - } - LoadBalancerV2StatusTree tree = Mock(LoadBalancerV2StatusTree) { - it.loadBalancerV2Status >> { status } - } - Address address = Mock(Address) { - it.addr >> { ip } - } - LoadBalancerV2 mockLB = Mock(LoadBalancerV2) { - it.provisioningStatus >> { LbProvisioningStatus.ACTIVE } - } - - when: - AtomicOperation operation = operationClazz.newInstance([description].toArray()) - operation.operate([]) - - then: - _ * provider.getLoadBalancer(description.region, _) >> mockLB - 1 * provider.getInstanceIdsForStack(description.region, description.serverGroupName) >> ids - 1 * provider.getStack(description.region, description.serverGroupName) >> stack - ids.each { id -> - 1 * provider.getIpsForInstance(description.region, id) >> [address] - } - lbIds.each { lbId -> - 1 * provider.getLoadBalancerStatusTree(description.region, lbId) >> tree - 2 * provider.updatePoolMemberStatus(description.region, poolId, memberId, memberStatus) - } - noExceptionThrown() - - where: - operationClazz | memberStatus | testcase - DisableOpenstackAtomicOperation | false | 'disable' - EnableOpenstackAtomicOperation | true | 'enable' - } - - def "#testcase stack does nothing when stack has no instances"() { - when: - AtomicOperation operation = operationClazz.newInstance([description].toArray()) - operation.operate([]) - - then: - 1 * provider.getInstanceIdsForStack(description.region, description.serverGroupName) >> [] - 0 * provider.getStack(description.region, description.serverGroupName) - 0 * provider.getIpsForInstance(description.region, _ as String) - 0 * provider.getLoadBalancerStatusTree(description.region, _ as String) - 0 * provider.updatePoolMemberStatus(description.region, poolId, memberId) - noExceptionThrown() - - where: - operationClazz | memberStatus | testcase - DisableOpenstackAtomicOperation | false | 'disable' - EnableOpenstackAtomicOperation | true | 'enable' - } - - def "#testcase stack does nothing when stack has no load balancers"() { - given: - Stack emptyStack = Mock(Stack) { - it.tags >> { [] } - } - - when: - AtomicOperation operation = operationClazz.newInstance([description].toArray()) - operation.operate([]) - - then: - 1 * provider.getInstanceIdsForStack(description.region, description.serverGroupName) >> ['1','2','3'] - 1 * provider.getStack(description.region, description.serverGroupName) >> emptyStack - 0 * provider.getIpsForInstance(description.region, _ as String) - 0 * provider.getLoadBalancerStatusTree(description.region, _ as String) - 0 * provider.updatePoolMemberStatus(description.region, poolId, memberId, false) - noExceptionThrown() - - where: - operationClazz | memberStatus | testcase - DisableOpenstackAtomicOperation | false | 'disable' - EnableOpenstackAtomicOperation | true | 'enable' - } - - def "stack not found - #testcase"() { - given: - Throwable throwable = new OpenstackProviderException("Unable to find stack $description.serverGroupName in region $description.region") - - when: - AtomicOperation operation = operationClazz.newInstance([description].toArray()) - operation.operate([]) - - then: - 1 * provider.getInstanceIdsForStack(description.region, description.serverGroupName) >> ['1','2','3'] - 1 * provider.getStack(description.region, description.serverGroupName) >> { throw throwable } - 0 * provider.getIpsForInstance(description.region, _ as String) - 0 * provider.getLoadBalancerStatusTree(description.region, _ as String) - 0 * provider.updatePoolMemberStatus(description.region, poolId, memberId, false) - Throwable actual = thrown(OpenstackOperationException) - actual.cause == throwable - - where: - operationClazz | memberStatus | testcase - DisableOpenstackAtomicOperation | false | 'disable' - EnableOpenstackAtomicOperation | true | 'enable' - } - - def "load balancer not found - #testcase"() { - given: - Address address = Mock(Address) { - it.addr >> { ip } - } - Throwable throwable = new OpenstackResourceNotFoundException("Unable to find load balancer lb1 in ${description.region}") - - when: - AtomicOperation operation = operationClazz.newInstance([description].toArray()) - operation.operate([]) - - then: - 1 * provider.getInstanceIdsForStack(description.region, description.serverGroupName) >> ids - 1 * provider.getStack(description.region, description.serverGroupName) >> stack - ids.each { id -> - 1 * provider.getIpsForInstance(description.region, id) >> [address] - } - lbIds.each { lbId -> - 1 * provider.getLoadBalancerStatusTree(description.region, lbId) >> { throw throwable } - 0 * provider.updatePoolMemberStatus(description.region, poolId, memberId, false) - } - Throwable actual = thrown(OpenstackOperationException) - actual.cause.cause == throwable - - where: - operationClazz | memberStatus | testcase - DisableOpenstackAtomicOperation | false | 'disable' - EnableOpenstackAtomicOperation | true | 'enable' - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ResizeOpenstackAtomicOperationSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ResizeOpenstackAtomicOperationSpec.groovy deleted file mode 100644 index be0336986d2..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/ResizeOpenstackAtomicOperationSpec.groovy +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ResizeOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackOperationException -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.heat.Stack -import spock.lang.Specification -import spock.lang.Subject - -class ResizeOpenstackAtomicOperationSpec extends Specification { - String accountName = 'myaccount' - String application = "app" - String stack = "stack" - String region = "r1" - int maxSize = 5 - int minSize = 3 - int desiredSize = 4 - String createdStackName = 'app-stack-details-v000' - String stackId = UUID.randomUUID().toString() - - - def credentials - def serverGroupParams - def description - def provider - - def setupSpec() { - TaskRepository.threadLocalTask.set(Mock(Task)) - } - - def setup() { - provider = Mock(OpenstackClientProvider) - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackNamedAccountCredentials creds = Mock(OpenstackNamedAccountCredentials) - creds.getStackConfig() >> new OpenstackConfigurationProperties.StackConfig(pollInterval: 0, pollTimeout: 1) - - OpenstackProviderFactory.createProvider(creds) >> { provider } - credentials = new OpenstackCredentials(creds) - serverGroupParams = new ServerGroupParameters(maxSize: maxSize, minSize: minSize, desiredSize: desiredSize) - description = new ResizeOpenstackAtomicOperationDescription(region: region, account: accountName, credentials: credentials, serverGroupName: createdStackName, capacity: new ResizeOpenstackAtomicOperationDescription.Capacity(min: 3, desired: 4, max: 5)) - } - - def "should resize a heat stack"() { - given: - @Subject def operation = new ResizeOpenstackAtomicOperation(description) - Stack stack = Mock(Stack) - String template = "foo: bar" - Map sub = ['servergroup_resource.yaml': 'foo: bar', 'servergroup_resource_member.yaml': 'foo: bar'] - List tags = ['lb123'] - - when: - operation.operate([]) - - then: - 2 * provider.getStack(region, createdStackName) >> stack - 1 * provider.getHeatTemplate(region, createdStackName, stackId) >> template - 1 * stack.getOutputs() >> [[output_key: ServerGroupConstants.SUBTEMPLATE_OUTPUT, output_value: sub['servergroup_resource.yaml']], [output_key: ServerGroupConstants.MEMBERTEMPLATE_OUTPUT, output_value: sub['servergroup_resource_member.yaml']]] - _ * stack.getParameters() >> serverGroupParams.toParamsMap() - _ * stack.getId() >> stackId - _ * stack.getName() >> createdStackName - _ * stack.getTags() >> tags - _ * stack.getStatus() >> "UPDATE_COMPLETE" - 1 * provider.updateStack(region, createdStackName, stackId, template, sub, _ as ServerGroupParameters, tags) - noExceptionThrown() - } - - def "should not resize a heat stack if the stack is missing"() { - given: - @Subject def operation = new ResizeOpenstackAtomicOperation(description) - - when: - operation.operate([]) - - then: - 1 * provider.getStack(region, createdStackName) >> null - thrown(OpenstackOperationException) - } - - def "should not resize a stack if exception is thrown"() { - given: - @Subject def operation = new ResizeOpenstackAtomicOperation(description) - Stack stack = Mock(Stack) - String template = "foo: bar" - Map sub = ['servergroup_resource.yaml': 'foo: bar', 'servergroup_resource_member.yaml': 'foo: bar'] - List tags = ['lb123'] - - when: - operation.operate([]) - - then: - 1 * provider.getStack(region, createdStackName) >> stack - 1 * provider.getHeatTemplate(region, createdStackName, stackId) >> template - 1 * stack.getOutputs() >> [[output_key: ServerGroupConstants.SUBTEMPLATE_OUTPUT, output_value: sub['servergroup_resource.yaml']], [output_key: ServerGroupConstants.MEMBERTEMPLATE_OUTPUT, output_value: sub['servergroup_resource_member.yaml']]] - _ * stack.getParameters() >> serverGroupParams.toParamsMap() - _ * stack.getId() >> stackId - _ * stack.getName() >> createdStackName - _ * stack.getTags() >> tags - 1 * provider.updateStack(region, createdStackName, stackId, template, sub, _ as ServerGroupParameters, tags) >> { - throw new OpenstackProviderException('foo') - } - thrown(OpenstackOperationException) - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/StackCheckerTest.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/StackCheckerTest.groovy deleted file mode 100644 index 70e6807231d..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/StackCheckerTest.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2018 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.ops.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import org.openstack4j.openstack.heat.domain.HeatStack -import spock.lang.Specification - -class StackCheckerTest extends Specification { - - def "should return true when heat stack status is CREATE_COMPLETE"() { - given: - def checker = new StackChecker(StackChecker.Operation.CREATE) - when: - def ready = checker.isReady(new HeatStack(status: "CREATE_COMPLETE")) - then: - ready - } - - def "should return false when heat stack status is CREATE_IN_PROGRESS"() { - given: - def checker = new StackChecker(StackChecker.Operation.CREATE) - when: - def ready = checker.isReady(new HeatStack(status: "CREATE_IN_PROGRESS")) - then: - !ready - } - - def "should thrown an exception when heat stack status is CREATE_FAILED"() { - given: - def checker = new StackChecker(StackChecker.Operation.CREATE) - when: - checker.isReady(new HeatStack(status: "CREATE_FAILED")) - then: - thrown(OpenstackProviderException) - } - - def "should thrown an exception when heat stack status is unknown"() { - given: - def checker = new StackChecker(StackChecker.Operation.CREATE) - when: - checker.isReady(new HeatStack(status: "UNKNOWN_STATUS")) - then: - thrown(OpenstackProviderException) - } - - def "should thrown an exception when stack is null"() { - given: - def checker = new StackChecker(StackChecker.Operation.CREATE) - when: - checker.isReady(null) - then: - thrown(OpenstackProviderException) - } - - def "should return true when stack is null but operation is delete"() { - given: - def checker = new StackChecker(StackChecker.Operation.DELETE) - when: - def ready = checker.isReady(null) - then: - ready - noExceptionThrown() - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackAttributeValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackAttributeValidatorSpec.groovy deleted file mode 100644 index 5df44395840..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackAttributeValidatorSpec.groovy +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackIdentityV3Provider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.openstack4j.model.network.ext.LbMethod -import org.springframework.validation.Errors -import spock.lang.Specification -import spock.lang.Unroll - -@Unroll -class OpenstackAttributeValidatorSpec extends Specification { - - OpenstackAttributeValidator validator - def errors - def accountProvider - - void setup() { - errors = Mock(Errors) - validator = new OpenstackAttributeValidator('context', errors) - accountProvider = Mock(AccountCredentialsProvider) - } - - def "ValidateByRegex"() { - when: - boolean actual = validator.validateByRegex(value, 'test', regex) - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', 'context.test.invalid (Must match [A-Z]+)') - } - - where: - value | regex | result - 'foo' | '[A-Za-z]+' | true - '123' | '[A-Z]+' | false - - } - - def "ValidateByContainment"() { - when: - boolean actual = validator.validateByContainment(value, 'test', list) - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', 'context.test.invalid (Must be one of [1234])') - } - - where: - value | list | result - 'foo' | ['foo', 'bar'] | true - '123' | ['1234'] | false - - } - - def "Reject"() { - when: - validator.reject('foo', 'reason') - - then: - 1 * errors.rejectValue('context.foo', 'context.foo.invalid (reason)') - } - - def "validate range"() { - when: - boolean actual = validator.validateRange(value, min, max, 'foo') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.foo', "context.foo.notInRange (Must be in range [${min}, ${max}])") - } - - where: - value | min | max | result - 80 | 0 | 100 | true - 0 | 0 | 10 | true - -1 | 0 | 5 | false - 5 | 0 | 5 | true - 6 | 0 | 5 | false - } - - def "validate port range - #value"() { - when: - boolean actual = validator.validatePortRange(value, 'foo') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.foo', "context.foo.notInRange (Must be in range [-1, 65535])") - } - - where: - value | result - 65535 | true - -1 | true - -2 | false - 65536 | false - } - - def "ValidateNotEmpty"() { - when: - boolean actual = validator.validateNotEmpty(value, 'test') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', 'context.test.empty') - } - - where: - value | result - 'foo' | true - '' | false - null | false - } - - def "ValidateNotNull"() { - when: - boolean actual = validator.validateNotNull(value, 'test') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', _) - } - - where: - value | result - LbMethod.LEAST_CONNECTIONS | true - null | false - } - - def "ValidateNonNegative"() { - when: - boolean actual = validator.validateNonNegative(value, 'test') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', 'context.test.negative') - } - - where: - value | result - 1 | true - 0 | true - -1 | false - } - - def "ValidatePositive"() { - when: - boolean actual = validator.validatePositive(value, 'test') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', 'context.test.notPositive') - } - - where: - value | result - 1 | true - 0 | false - -1 | false - } - - def "valid account and credentials"() { - given: - def named = Mock(OpenstackNamedAccountCredentials) - def cred = Mock(OpenstackCredentials) - String account = 'account' - - when: - boolean actual = validator.validateCredentials(account, accountProvider) - - then: - actual - 1 * accountProvider.getCredentials(account) >> named - 1 * named.credentials >> cred - } - - def "empty account"() { - given: - def named = Mock(OpenstackNamedAccountCredentials) - def cred = Mock(OpenstackCredentials) - String account = '' - - when: - boolean actual = validator.validateCredentials(account, accountProvider) - - then: - !actual - 0 * accountProvider.getCredentials(account) >> named - 0 * named.credentials >> cred - 1 * errors.rejectValue('context.account', 'context.account.empty') - } - - def "valid account, invalid credentials"() { - given: - def named = Mock(AccountCredentials) - def cred = new Object() - String account = 'account' - - when: - boolean actual = validator.validateCredentials(account, accountProvider) - - then: - !actual - 1 * accountProvider.getCredentials(account) >> named - 1 * named.credentials >> cred - 1 * errors.rejectValue('context.account', 'context.account.notFound') - } - - def "ValidateUUID"() { - when: - boolean actual = validator.validateUUID(value, 'test') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', msg) - } - - where: - value | result | msg - '62e3b610-281a-11e6-bdf4-0800200c9a66' | true | 'context.test.notUUID' - '123' | false | 'context.test.notUUID' - '' | false | 'context.test.empty' - null | false | 'context.test.empty' - } - - def "ValidateCIDR"() { - when: - boolean actual = validator.validateCIDR(value, 'test') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', 'context.test.invalidCIDR') - } - - where: - value | result - '0.0.0.0/0' | true - '0.0.:0' | false - } - - def "ValidateCIDR - Empty"() { - when: - boolean actual = validator.validateCIDR('', 'test') - - then: - !actual - 1 * errors.rejectValue('context.test', 'context.test.empty') - } - - def "ValidateRuleType"() { - when: - boolean actual = validator.validateRuleType(value, 'test') - - then: - actual == result - if (!result) { - 1 * errors.rejectValue('context.test', _) - } - - where: - value | result - '' | false - 'SSH' | false - 'ICMP' | true - 'UDP' | true - 'TCP' | true - 'tcp' | true - } - - def "ValidateHttpMethod"() { - when: - boolean actual = validator.validateHttpMethod(value, 'test') - - then: - actual == result - if (!result) { - validator.errors.getFieldError('context.test')?.rejectedValue == expectedRejectedValue - } - - where: - value | result | expectedRejectedValue - 'GET' | true | '' - 'GETTER' | false | 'context.test.invalidHttpMethod' - '' | false | 'context.test.empty' - } - - def "ValidateHttpStatus"() { - when: - boolean actual = validator.validateHttpStatusCode(value, 'test') - - then: - actual == result - if (!result) { - validator.errors.getFieldError('context.test')?.rejectedValue == expectedRejectedValue - } - - where: - value | result | expectedRejectedValue - 200 | true | '' - 199 | false | 'context.test.invalidHttpStatusCode' - null | false | 'context.test.invalidHttpStatusCode' - } - - def "ValidateURL"() { - when: - boolean actual = validator.validateURI(value, 'test') - - then: - actual == result - if (!result) { - validator.errors.getFieldError('context.test')?.rejectedValue == expectedRejectedValue - } - - where: - value | result | expectedRejectedValue - 'http://www.goggle.com' | true | '' - '/test' | true | '' - '' | false | 'context.test.empty' - } - - def "ValidateGreaterThan"() { - when: - boolean actual = validator.validateGreaterThan(subject, other, "test") - - then: - actual == result - if (!result) { - validator.errors.getFieldError('context.test')?.rejectedValue == expectedRejectedValue - } - - where: - subject | other | result | expectedRejectedValue - 1 | 0 | true | '' - 2 | 2 | false | '' - 3 | 4 | false | 'context.test.empty' - } - - def "ValidateGreaterThanEqual"() { - when: - boolean actual = validator.validateGreaterThanEqual(subject, other, "test") - - then: - actual == result - if (!result) { - validator.errors.getFieldError('context.test')?.rejectedValue == expectedRejectedValue - } - - where: - subject | other | result | expectedRejectedValue - 1 | 0 | true | '' - 2 | 2 | true | '' - 3 | 4 | false | 'context.test.empty' - } - - def "ValidateRegion"() { - given: - String region = 'region1' - def v3 = Mock(OpenstackIdentityV3Provider) - def v3provider = new OpenstackClientProvider(v3, null, null, null, null, null, null) - - when: - boolean actual = validator.validateRegion(region, v3provider) - - then: - _ * v3.allRegions >> result - actual == expected - - when: - actual = validator.validateRegion('', v3provider) - - then: - 0 * v3.allRegions - !actual - - where: - result | expected - ['region1'] | true - [] | false - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackDescriptionValidatorSpec.groovy deleted file mode 100644 index 7052528c13c..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/OpenstackDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.OpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification - -class OpenstackDescriptionValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - FooValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - def "Validate no exception"() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> { ['r1'] } - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - 1 * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - 1 * getCredentials(_) >> credentials - } - validator = new FooValidator<>(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'test', instanceIds: ['1','2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_,_) - } - - def "Validate empty account exception"() { - given: - credz = Mock(OpenstackCredentials) - credentials = Mock(OpenstackNamedAccountCredentials) { - 0 * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - 0 * getCredentials(_) >> credentials - } - errors = Mock(Errors) - validator = new FooValidator<>(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: '', instanceIds: ['1','2']) - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - } - - def "Validate invalid region exception"() { - clientProvider = Mock(OpenstackClientProvider) - clientProvider.getProperty('allRegions') >> ['r1'] - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - 1 * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - 1 * getCredentials(_) >> credentials - } - validator = new FooValidator<>(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'test', instanceIds: ['1','2'], credentials: credz, region: 'r2') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - } - - def "Validate empty region exception"() { - clientProvider = Mock(OpenstackClientProvider) - clientProvider.getProperty('allRegions') >> ['r1'] - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - 1 * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - 1 * getCredentials(_) >> credentials - } - validator = new FooValidator<>(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'test', instanceIds: ['1','2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - } - - static class FooValidator extends AbstractOpenstackDescriptionValidator { - - @Override - void validate(OpenstackAttributeValidator validator, List priorDescriptions, OpenstackAtomicOperationDescription description, Errors errors) { - } - - @Override - String getContext() { - "foo" - } - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidatorSpec.groovy deleted file mode 100644 index c7517ab2899..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/discovery/AbstractEnableDisableInstancesInDiscoveryDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.discovery - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -@Unroll -class AbstractEnableDisableInstancesInDiscoveryDescriptionValidatorSpec extends Specification { - - Errors errors - @Shared - AccountCredentialsProvider provider - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - AbstractEnableDisableInstancesInDiscoveryDescriptionValidator validator - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - } - - def "Validate no exception - #type"() { - given: - validator = type.newInstance(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'foo', instanceIds: ['1', '2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - - where: - type << [EnableInstancesInDiscoveryDescriptionValidator, DisableInstancesInDiscoveryDescriptionValidator] - } - - def "Validate empty account exception - #type"() { - given: - validator = type.newInstance(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: '', instanceIds: ['1', '2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_, _) - - where: - type << [EnableInstancesInDiscoveryDescriptionValidator, DisableInstancesInDiscoveryDescriptionValidator] - } - - def "Validate empty instance list exception - #type"() { - given: - validator = type.newInstance(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'foo', instanceIds: [], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_, _) - - where: - type << [EnableInstancesInDiscoveryDescriptionValidator, DisableInstancesInDiscoveryDescriptionValidator] - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/AbstractRegistrationOpenstackInstancesDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/AbstractRegistrationOpenstackInstancesDescriptionValidatorSpec.groovy deleted file mode 100644 index a20e5a7fa1c..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/AbstractRegistrationOpenstackInstancesDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesRegistrationDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -/** - * - */ -@Unroll -class AbstractRegistrationOpenstackInstancesDescriptionValidatorSpec extends Specification { - - Errors errors - @Shared - AccountCredentialsProvider provider - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - } - - def "Validate no exception"() { - given: - def validator = validatorClass.newInstance(accountCredentialsProvider: provider) - OpenstackInstancesRegistrationDescription description = new OpenstackInstancesRegistrationDescription(account: 'foo', instanceIds: ['1','2'], loadBalancerIds: ['lb1','lb2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_,_) - - where: - validatorClass << [RegisterOpenstackInstancesDescriptionValidator, DeregisterOpenstackInstancesDescriptionValidator] - } - - def "Validate empty account exception"() { - given: - def validator = validatorClass.newInstance(accountCredentialsProvider: provider) - OpenstackInstancesRegistrationDescription description = new OpenstackInstancesRegistrationDescription(account: '', instanceIds: ['1','2'], loadBalancerIds: ['lb1','lb2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - - where: - validatorClass << [RegisterOpenstackInstancesDescriptionValidator, DeregisterOpenstackInstancesDescriptionValidator] - } - - def "Validate empty instance list exception"() { - given: - def validator = validatorClass.newInstance(accountCredentialsProvider: provider) - OpenstackInstancesRegistrationDescription description = new OpenstackInstancesRegistrationDescription(account: 'foo', instanceIds: [], loadBalancerIds: ['lb1','lb2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - - where: - validatorClass << [RegisterOpenstackInstancesDescriptionValidator, DeregisterOpenstackInstancesDescriptionValidator] - } - - def "Validate empty load balancer list exception"() { - given: - def validator = validatorClass.newInstance(accountCredentialsProvider: provider) - OpenstackInstancesRegistrationDescription description = new OpenstackInstancesRegistrationDescription(account: 'foo', instanceIds: ['1','2'], loadBalancerIds: [], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - - where: - validatorClass << [RegisterOpenstackInstancesDescriptionValidator, DeregisterOpenstackInstancesDescriptionValidator] - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RebootOpenstackInstancesDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RebootOpenstackInstancesDescriptionValidatorSpec.groovy deleted file mode 100644 index ce27efe07f0..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/RebootOpenstackInstancesDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification - -class RebootOpenstackInstancesDescriptionValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - RebootOpenstackInstancesDescriptionValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - } - - def "Validate no exception"() { - given: - validator = new RebootOpenstackInstancesDescriptionValidator(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'foo', instanceIds: ['1', '2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - } - - def "Validate empty account exception"() { - given: - validator = new RebootOpenstackInstancesDescriptionValidator(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: '', instanceIds: ['1', '2'], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_, _) - } - - def "Validate empty instance list exception"() { - given: - validator = new RebootOpenstackInstancesDescriptionValidator(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'foo', instanceIds: [], credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_, _) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/TerminateOpenstackInstancesDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/TerminateOpenstackInstancesDescriptionValidatorSpec.groovy deleted file mode 100644 index ae4f6c255f2..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/instance/TerminateOpenstackInstancesDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.instance - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.instance.OpenstackInstancesDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification - -/** - * - */ -class TerminateOpenstackInstancesDescriptionValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - TerminateOpenstackInstancesDescriptionValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - } - - def "Validate no exception"() { - given: - validator = new TerminateOpenstackInstancesDescriptionValidator(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'foo', instanceIds: ['1','2'], region: 'r1', credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_,_) - } - - def "Validate empty instance list exception"() { - given: - validator = new TerminateOpenstackInstancesDescriptionValidator(accountCredentialsProvider: provider) - OpenstackInstancesDescription description = new OpenstackInstancesDescription(account: 'foo', instanceIds: [], region: 'r1', credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/DeleteOpenstackLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/DeleteOpenstackLoadBalancerDescriptionValidatorSpec.groovy deleted file mode 100644 index d92984d261b..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/DeleteOpenstackLoadBalancerDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.DeleteOpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -class DeleteOpenstackLoadBalancerDescriptionValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - DeleteOpenstackLoadBalancerDescriptionValidator validator - OpenstackNamedAccountCredentials credentials - @Shared - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - } - - def "Validate no exception"() { - given: - validator = new DeleteOpenstackLoadBalancerDescriptionValidator(accountCredentialsProvider: provider) - DeleteOpenstackLoadBalancerDescription description = new DeleteOpenstackLoadBalancerDescription(account: 'foo', region: 'r1', id: UUID.randomUUID().toString(), credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - } - - @Unroll - def "Validate empty required fields exception"() { - given: - validator = new DeleteOpenstackLoadBalancerDescriptionValidator(accountCredentialsProvider: provider) - DeleteOpenstackLoadBalancerDescription description = new DeleteOpenstackLoadBalancerDescription(account: 'a', region: 'r1', credentials: credz, id: '') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/UpsertOpenstackLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/UpsertOpenstackLoadBalancerDescriptionValidatorSpec.groovy deleted file mode 100644 index 8801fd70e02..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/loadbalancer/UpsertOpenstackLoadBalancerDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.loadbalancer - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor.HealthMonitorType -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Listener -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.loadbalancer.OpenstackLoadBalancerDescription.Listener.ListenerType -import com.netflix.spinnaker.clouddriver.openstack.deploy.validators.OpenstackAttributeValidator -import com.netflix.spinnaker.clouddriver.openstack.domain.HealthMonitor -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification -import spock.lang.Unroll - - - -@Unroll -class UpsertOpenstackLoadBalancerDescriptionValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - UpsertOpenstackLoadBalancerAtomicOperationValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - } - - def "Validate create load balancer no exceptions"() { - given: - validator = new UpsertOpenstackLoadBalancerAtomicOperationValidator(accountCredentialsProvider: provider) - OpenstackLoadBalancerDescription description = new OpenstackLoadBalancerDescription(account: 'foo' - , region: 'r1' - , name: 'name' - , subnetId: UUID.randomUUID().toString() - , algorithm: OpenstackLoadBalancerDescription.Algorithm.ROUND_ROBIN - , securityGroups: [UUID.randomUUID().toString()] - , listeners: [ new Listener(externalPort: 80, externalProtocol: ListenerType.HTTP, internalPort: 8080)] - , healthMonitor: new HealthMonitor(type: HealthMonitorType.PING, delay: 1, timeout: 1, maxRetries: 1) - , credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - } - - def "Validate update load balancer no exceptions"() { - given: - validator = new UpsertOpenstackLoadBalancerAtomicOperationValidator(accountCredentialsProvider: provider) - OpenstackLoadBalancerDescription description = new OpenstackLoadBalancerDescription(account: 'foo' - , region: 'r1' - , id : UUID.randomUUID().toString() - , name: 'name' - , subnetId: UUID.randomUUID().toString() - , algorithm: OpenstackLoadBalancerDescription.Algorithm.ROUND_ROBIN - , securityGroups: [UUID.randomUUID().toString()] - , listeners: [ new Listener(externalPort: 80, externalProtocol: ListenerType.HTTP, internalPort: 8080)] - , healthMonitor: new HealthMonitor(type: HealthMonitorType.PING, delay: 1, timeout: 1, maxRetries: 1) - , credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - } - - def "Validate missing required field - #attribute"() { - given: - validator = new UpsertOpenstackLoadBalancerAtomicOperationValidator(accountCredentialsProvider: provider) - Map inputMap = [account : 'foo' - , region: 'r1' - , name: 'name' - , subnetId: UUID.randomUUID().toString() - , algorithm: OpenstackLoadBalancerDescription.Algorithm.ROUND_ROBIN - , securityGroups: [UUID.randomUUID().toString()] - , listeners: [ new Listener(externalPort: 80, externalProtocol: ListenerType.HTTP, internalPort: 8080)] - , credentials: credz] - inputMap.remove(attribute) - OpenstackLoadBalancerDescription description = new OpenstackLoadBalancerDescription(inputMap) - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue("${validator.context}.${attribute}", _) - - where: - attribute << ['name', 'region', 'subnetId', 'algorithm', 'listeners'] - } - - def "Validate invalid field - #attribute"() { - given: - validator = new UpsertOpenstackLoadBalancerAtomicOperationValidator(accountCredentialsProvider: provider) - Map inputMap = ['account' : 'foo' - , region: 'r1' - , name: 'name' - , id : UUID.randomUUID().toString() - , subnetId: UUID.randomUUID().toString() - , algorithm: OpenstackLoadBalancerDescription.Algorithm.ROUND_ROBIN - , securityGroups: [UUID.randomUUID().toString()] - , listeners: [ new Listener(externalPort: 80, externalProtocol: ListenerType.HTTP, internalPort: 8080)] - , credentials: credz] - inputMap.put(attribute.key, attribute.value) - OpenstackLoadBalancerDescription description = new OpenstackLoadBalancerDescription(inputMap) - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue("${validator.context}.${attribute.key}", _) - - where: - attribute << [name: '', region: '', name : '', id : 'abc', subnetId : null, algorithm: null, listeners: []] - } - - def "Validate health monitor values - #attributes"() { - given: - validator = new UpsertOpenstackLoadBalancerAtomicOperationValidator(accountCredentialsProvider: provider) - OpenstackAttributeValidator attributeValidator = new OpenstackAttributeValidator(validator.context, errors) - Map inputMap = ['delay': 5 - , 'timeout': 5 - , 'maxRetries': 5 - , 'httpMethod': 'GET' - , 'expectedCodes': [200] - , 'url' : 'http://www.google.com'] - inputMap.put(attribute.key, attribute.value) - - when: - validator.validateHealthMonitor(attributeValidator, new HealthMonitor(inputMap)) - - then: - 1 * errors.rejectValue("${validator.context}.${attribute.key}", _) - - where: - attribute << [ 'type': null, 'delay': -1, 'timeout': -1, 'maxRetries': -1, 'httpMethod': 'test', 'expectedCodes': [20], 'url': '\\backslash'] - } - - def "Validate health monitor success"() { - given: - String URL = 'URL' - List expectedCodes = [100] - int delay, timeout, maxRetries = 2 - String method = 'GET' - validator = new UpsertOpenstackLoadBalancerAtomicOperationValidator() - OpenstackAttributeValidator attributeValidator = Mock() - - when: - validator.validateHealthMonitor(attributeValidator, new HealthMonitor(delay: delay, timeout: timeout, maxRetries: maxRetries, httpMethod: method, url: URL, expectedCodes: expectedCodes)) - - then: - 1 * attributeValidator.validatePositive(delay, _) - 1 * attributeValidator.validatePositive(timeout, _) - 1 * attributeValidator.validatePositive(maxRetries, _) - 1 * attributeValidator.validateHttpMethod(method, _) - expectedCodes.size() * attributeValidator.validateHttpStatusCode(_, _) - 1 * attributeValidator.validateURI(URL, _) - } - - def "Validate health monitor success without options"() { - given: - String URL = null - List expectedCodes = null - int delay, timeout, maxRetries = 2 - String method = null - validator = new UpsertOpenstackLoadBalancerAtomicOperationValidator() - OpenstackAttributeValidator attributeValidator = Mock() - - when: - validator.validateHealthMonitor(attributeValidator, new HealthMonitor(delay: delay, timeout: timeout, maxRetries: maxRetries, httpMethod: method, url: URL, expectedCodes: expectedCodes)) - - then: - 1 * attributeValidator.validatePositive(delay, _) - 1 * attributeValidator.validatePositive(timeout, _) - 1 * attributeValidator.validatePositive(maxRetries, _) - 0 * attributeValidator.validateHttpMethod(method, _) - 0 * attributeValidator.validateHttpStatusCode(_, _) - 0 * attributeValidator.validateURI(URL, _) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/DeleteOpenstackSecurityGroupDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/DeleteOpenstackSecurityGroupDescriptionValidatorSpec.groovy deleted file mode 100644 index 14c8ab6d1f4..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/DeleteOpenstackSecurityGroupDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.securitygroup - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.DeleteOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -class DeleteOpenstackSecurityGroupDescriptionValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - @Shared - DeleteOpenstackSecurityGroupDescriptionValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - validator = new DeleteOpenstackSecurityGroupDescriptionValidator(accountCredentialsProvider: provider) - } - - def "valid id"() { - given: - def id = UUID.randomUUID().toString() - def description = new DeleteOpenstackSecurityGroupDescription(account: 'foo', region: 'r1', id: id, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - } - - @Unroll - def "invalid ids"() { - given: - def description = new DeleteOpenstackSecurityGroupDescription(account: 'foo', id: id, credentials: credz, region: 'r1') - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_, msg) - - where: - id | expected | msg - null | false | validator.context + '.id.empty' - '' | false | validator.context + '.id.empty' - '1234' | false | validator.context + '.id.notUUID' - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/UpsertOpenstackSecurityGroupDescriptionValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/UpsertOpenstackSecurityGroupDescriptionValidatorSpec.groovy deleted file mode 100644 index b37230c9af3..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/securitygroup/UpsertOpenstackSecurityGroupDescriptionValidatorSpec.groovy +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.securitygroup - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.securitygroup.UpsertOpenstackSecurityGroupDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -class UpsertOpenstackSecurityGroupDescriptionValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - OpenstackNamedAccountCredentials credentials - OpenstackClientProvider clientProvider - @Shared - OpenstackCredentials credz - @Shared - UpsertOpenstackSecurityGroupDescriptionValidator validator - - - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - validator = new UpsertOpenstackSecurityGroupDescriptionValidator(accountCredentialsProvider: provider) - } - - def "validate no rules"() { - setup: - def id = UUID.randomUUID().toString() - def name = 'name' - def desc = 'description' - def description = new UpsertOpenstackSecurityGroupDescription(account: 'foo', region: 'r1', id: id, name: name, description: desc, rules: [], credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - } - - def "validate with rules"() { - setup: - def id = UUID.randomUUID().toString() - def name = 'name' - def desc = 'description' - def rules = [ - new UpsertOpenstackSecurityGroupDescription.Rule(fromPort: 80, toPort: 80, cidr: '0.0.0.0/0', ruleType: 'TCP'), - new UpsertOpenstackSecurityGroupDescription.Rule(fromPort: 443, toPort: 443, remoteSecurityGroupId: UUID.randomUUID().toString(), ruleType: 'UDP'), - new UpsertOpenstackSecurityGroupDescription.Rule(icmpType: 2, icmpCode: 3, cidr: '0.0.0.0/0', ruleType: 'ICMP'), - new UpsertOpenstackSecurityGroupDescription.Rule(fromPort: 22, toPort: 22, remoteSecurityGroupId: 'SELF', ruleType: 'TCP'), - ] - def description = new UpsertOpenstackSecurityGroupDescription(account: 'foo', region: 'r1', id: id, name: name, description: desc, rules: rules, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - } - - def "validate with invalid id"() { - setup: - def id = 'not a uuid' - def name = 'name' - def desc = 'description' - def description = new UpsertOpenstackSecurityGroupDescription(account: 'foo', region: 'r1', id: id, name: name, description: desc, rules: [], credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue("${validator.context}.id", "${validator.context}.id.notUUID") - } - - def "validate without id is valid"() { - setup: - def name = 'name' - def desc = 'description' - def description = new UpsertOpenstackSecurityGroupDescription(account: 'foo', region: 'r1', id: null, name: name, description: desc, rules: [], credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_, _) - } - - @Unroll - def "validate with invalid rule"() { - setup: - def id = UUID.randomUUID().toString() - def name = 'name' - def desc = 'description' - def rules = [ - new UpsertOpenstackSecurityGroupDescription.Rule( - ruleType: ruleType, - fromPort: fromPort, - toPort: toPort, - icmpType: icmpType, - icmpCode: icmpCode, - cidr: cidr, - remoteSecurityGroupId: remoteGroupId - ) - ] - def description = new UpsertOpenstackSecurityGroupDescription(account: 'foo', region: 'r1', id: id, name: name, description: desc, rules: rules, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_, { it.endsWith(rejectValue) }) - - where: - ruleType | fromPort | toPort | icmpType | icmpCode | cidr | remoteGroupId | rejectValue - 'TCP' | 80 | 80 | null | null | '0.0.0.0' | null | '.cidr.invalidCIDR' - 'TCP' | 80 | 80 | null | null | null | null | '.cidr.empty' - 'TCP' | -2 | 80 | null | null | '0.0.0.0/0' | null | '.fromPort.notInRange (Must be in range [-1, 65535])' - 'TCP' | 80 | -2 | null | null | '0.0.0.0/0' | null | '.toPort.notInRange (Must be in range [-1, 65535])' - 'TCP' | 80 | 80 | null | null | null | 'abc' | '.remoteSecurityGroupId.notUUID' - 'ICMP' | null | null | -2 | 4 | '0.0.0.0/0' | null | '.notInRange (Must be in range [-1, 255])' - 'ICMP' | null | null | 8 | 256 | '0.0.0.0/0' | null | '.notInRange (Must be in range [-1, 255])' - 'SSH' | 2 | 2 | null | null | '0.0.0.0/0' | null | '.invalidSecurityGroupRuleType' - - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/CloneOpenstackAtomicOperationValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/CloneOpenstackAtomicOperationValidatorSpec.groovy deleted file mode 100644 index 64d4444808f..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/CloneOpenstackAtomicOperationValidatorSpec.groovy +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.CloneOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ResizeOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification - -class CloneOpenstackAtomicOperationValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - ResizeOpenstackAtomicOperationValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - String account = 'foo' - String application = 'app1' - String region = 'r1' - String stack = 'stack1' - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - validator = new ResizeOpenstackAtomicOperationValidator(accountCredentialsProvider: provider) - } - - def "Validate - no error"() { - given: - ResizeOpenstackAtomicOperationDescription description = new ResizeOpenstackAtomicOperationDescription(serverGroupName: 'from', region: 'r1', credentials: credz, account: account, capacity: new ResizeOpenstackAtomicOperationDescription.Capacity(max: 5, desired: 4, min: 3)) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_,_) - } - - def "Validate invalid sizing"() { - given: - ResizeOpenstackAtomicOperationDescription description = new ResizeOpenstackAtomicOperationDescription(serverGroupName: 'from', region: 'r1', credentials: credz, account: account, capacity: new ResizeOpenstackAtomicOperationDescription.Capacity(max: 3, min: 4, desired: 5)) - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DeployOpenstackAtomicOperationValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DeployOpenstackAtomicOperationValidatorSpec.groovy deleted file mode 100644 index 43176c17f05..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DeployOpenstackAtomicOperationValidatorSpec.groovy +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.UserDataType -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification -import spock.lang.Unroll - -class DeployOpenstackAtomicOperationValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - DeployOpenstackAtomicOperationValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - String account = 'foo' - String application = 'app1' - String region = 'r1' - String stack = 'stack1' - String freeFormDetails = 'test' - boolean disableRollback = false - int timeoutMins = 5 - String instanceType = 'm1.small' - String image = 'ubuntu-latest' - int maxSize = 5 - int minSize = 3 - int desiredSize = 4 - String subnetId = '1234' - List loadBalancerIds = ['5678'] - List securityGroups = ['sg1'] - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - validator = new DeployOpenstackAtomicOperationValidator(accountCredentialsProvider: provider) - } - - def "Validate no error"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_,_) - } - - def "Validate with scaling parameters"() { - given: - ServerGroupParameters.Scaler scaleup = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: 1, period: 60, threshold: 50) - ServerGroupParameters.Scaler scaledown = new ServerGroupParameters.Scaler(cooldown: 60, adjustment: -1, period: 600, threshold: 15) - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups, autoscalingType: ServerGroupParameters.AutoscalingType.CPU, scaleup: scaleup, scaledown: scaledown) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_,_) - } - - def "Validate with invalid scaling parameters"() { - given: - ServerGroupParameters.Scaler scaleup = new ServerGroupParameters.Scaler(cooldown: -1, adjustment: 10, period: -1, threshold: -1) - ServerGroupParameters.Scaler scaledown = new ServerGroupParameters.Scaler(cooldown: -1, adjustment: -15, period: -1, threshold: -1) - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups, autoscalingType: ServerGroupParameters.AutoscalingType.CPU, scaleup: scaleup, scaledown: scaledown) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 8 * errors.rejectValue(_,_) - } - - @Unroll - def "Validate create missing required core field - #attribute"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - if (attribute != 'stack') { - description."$attribute" = '' - } else { - description."$attribute" = '1-2-3' - } - - when: - validator.validate([], description, errors) - - then: - times * errors.rejectValue(_,_) - - where: - attribute << ['application', 'stack'] - times << [2,1] - } - - @Unroll - def "Validate create missing required template field - #attribute"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - description.serverGroupParameters."$attribute" = null - - when: - validator.validate([], description, errors) - - then: - times * errors.rejectValue(_,_) - - where: - attribute << ['instanceType', 'image', 'maxSize', 'minSize', 'desiredSize', 'subnetId', 'securityGroups'] - times << [1,1,1,2,2,1,1] - } - - def "Validate sizing - error"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: -3, minSize: -1, desiredSize: -2, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 3 * errors.rejectValue(_,_) - } - - @Unroll - def "validate userData"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, userDataType: userDataType.toString(), userData: userData, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - times * errors.rejectValue(_,_) - - where: - userDataType | userData | times - UserDataType.URL | 'http://foobar.com' | 0 - UserDataType.URL | 'http$$$asdfdfadf' | 0 - UserDataType.TEXT | '#!/bin/bash' | 0 - UserDataType.SWIFT | 'container:my/object/file' | 0 - UserDataType.SWIFT | 'my/object/file' | 1 - UserDataType.SWIFT | 'container:' | 1 - null | null | 0 - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DestroyOpenstackAtomicOperationValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DestroyOpenstackAtomicOperationValidatorSpec.groovy deleted file mode 100644 index 9dd0cb7dd92..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/DestroyOpenstackAtomicOperationValidatorSpec.groovy +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.OpenstackServerGroupAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification - -class DestroyOpenstackAtomicOperationValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - DestroyOpenstackAtomicOperationValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - validator = new DestroyOpenstackAtomicOperationValidator(accountCredentialsProvider: provider) - } - - def "Validate no exception"() { - given: - OpenstackServerGroupAtomicOperationDescription description = new OpenstackServerGroupAtomicOperationDescription(account: 'foo', serverGroupName: 'foo', region: 'r1', credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_,_) - } - - def "Validate empty server group name exception"() { - given: - OpenstackServerGroupAtomicOperationDescription description = new OpenstackServerGroupAtomicOperationDescription(account: 'foo', serverGroupName: '', region: 'r1', credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 1 * errors.rejectValue(_,_) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/ResizeOpenstackAtomicOperationValidatorSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/ResizeOpenstackAtomicOperationValidatorSpec.groovy deleted file mode 100644 index 5160849ee24..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/deploy/validators/servergroup/ResizeOpenstackAtomicOperationValidatorSpec.groovy +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.deploy.validators.servergroup - -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackProviderFactory -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.DeployOpenstackAtomicOperationDescription -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.springframework.validation.Errors -import spock.lang.Specification -import spock.lang.Unroll - -class ResizeOpenstackAtomicOperationValidatorSpec extends Specification { - - Errors errors - AccountCredentialsProvider provider - DeployOpenstackAtomicOperationValidator validator - OpenstackNamedAccountCredentials credentials - OpenstackCredentials credz - OpenstackClientProvider clientProvider - - String account = 'foo' - String application = 'app1' - String region = 'r1' - String stack = 'stack1' - String freeFormDetails = 'test' - boolean disableRollback = false - int timeoutMins = 5 - String instanceType = 'm1.small' - String image = 'ubuntu-latest' - int maxSize = 5 - int minSize = 3 - int desiredSize = 4 - String subnetId = '1234' - List loadBalancerIds = ['5678'] - List securityGroups = ['sg1'] - - def setup() { - clientProvider = Mock(OpenstackClientProvider) { - getAllRegions() >> ['r1'] - } - GroovyMock(OpenstackProviderFactory, global: true) - OpenstackProviderFactory.createProvider(credentials) >> clientProvider - credz = new OpenstackCredentials(credentials) - errors = Mock(Errors) - credentials = Mock(OpenstackNamedAccountCredentials) { - _ * getCredentials() >> credz - } - provider = Mock(AccountCredentialsProvider) { - _ * getCredentials(_) >> credentials - } - validator = new DeployOpenstackAtomicOperationValidator(accountCredentialsProvider: provider) - } - - def "Validate no error"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 0 * errors.rejectValue(_,_) - } - - - @Unroll - def "Validate create missing required core field - #attribute"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - description."$attribute" = value - - when: - validator.validate([], description, errors) - - then: - times * errors.rejectValue(_,_) - - where: - attribute | times | value - 'application' | 2 | '' - 'stack' | 1 | '1-2-3' - - } - - @Unroll - def "Validate create missing required template field - #attribute"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: maxSize, minSize: minSize, desiredSize: desiredSize, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - description.serverGroupParameters."$attribute" = null - - when: - validator.validate([], description, errors) - - then: - times * errors.rejectValue(_,_) - - where: - attribute | times - 'instanceType' | 1 - 'image' | 1 - 'maxSize' | 1 - 'minSize' | 2 - 'desiredSize' | 2 - 'subnetId' | 1 - 'securityGroups' | 1 - } - - def "Validate sizing - error"() { - given: - ServerGroupParameters params = new ServerGroupParameters(instanceType: instanceType, image:image, maxSize: -3, minSize: -1, desiredSize: -2, subnetId: subnetId, loadBalancers: loadBalancerIds, securityGroups: securityGroups) - DeployOpenstackAtomicOperationDescription description = new DeployOpenstackAtomicOperationDescription(account: account, application: application, region: region, stack: stack, freeFormDetails: freeFormDetails, disableRollback: disableRollback, timeoutMins: timeoutMins, serverGroupParameters: params, credentials: credz) - - when: - validator.validate([], description, errors) - - then: - 3 * errors.rejectValue(_,_) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/LoadBalancerResolverSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/LoadBalancerResolverSpec.groovy deleted file mode 100644 index 46fd5cab4f9..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/domain/LoadBalancerResolverSpec.groovy +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.domain - -import spock.lang.Specification -import spock.lang.Unroll - -@Unroll -class LoadBalancerResolverSpec extends Specification { - - LoadBalancerResolver resolver = new MyLoadBalancerResolver() - - def "get internal port - #testCase"() { - when: - Map result = resolver.parseListenerKey(description) - - then: - result.toString() == expected.toString() - - where: - testCase | description | expected - 'not found' | 'test' | [:] - 'found' | 'HTTP:80:8080' | [externalProtocol: 'HTTP', externalPort: '80', internalPort: 8080] - 'null' | null | [:] - } - - - def "get created time - #testCase"() { - when: - Long result = resolver.parseCreatedTime(description) - - then: - result == expected - - where: - testCase | description | expected - 'not found' | 'test' | null - 'found' | 'created_time=42' | 42l - 'found' | 'internal_port=20,created_time=42' | 42l - 'found' | 'created_time=42,internal_port=20' | 42l - 'null' | null | null - } - - class MyLoadBalancerResolver implements LoadBalancerResolver {} -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackFloatingIPCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackFloatingIPCachingAgentSpec.groovy deleted file mode 100644 index 547cdb9afec..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackFloatingIPCachingAgentSpec.groovy +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.compute.FloatingIP -import org.openstack4j.model.network.NetFloatingIP -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.FLOATING_IPS - -class OpenstackFloatingIPCachingAgentSpec extends Specification { - - OpenstackFloatingIPCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - OpenstackCredentials credentials - ObjectMapper objectMapper - final String region = 'east' - final String account = 'account' - - void "setup"() { - credentials = GroovyMock(OpenstackCredentials) - namedAccountCredentials = GroovyMock(OpenstackNamedAccountCredentials) { - it.credentials >> { credentials } - } - objectMapper = Mock(ObjectMapper) - cachingAgent = Spy(OpenstackFloatingIPCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper]) { - it.accountName >> { account } - } - } - - void "test load data"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - String ipId = UUID.randomUUID().toString() - NetFloatingIP floatingIP = Mock(NetFloatingIP) { - it.id >> { ipId } - } - Map ipAttributes = new HashMap<>() - String ipKey = Keys.getFloatingIPKey(ipId, account, region) - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * credentials.provider >> provider - 1 * provider.listNetFloatingIps(region) >> [floatingIP] - 1 * objectMapper.convertValue(_, OpenstackInfrastructureProvider.ATTRIBUTES) >> ipAttributes - - and: - result.cacheResults.get(FLOATING_IPS.ns).first().id == ipKey - result.cacheResults.get(FLOATING_IPS.ns).first().attributes == ipAttributes - noExceptionThrown() - } - - void "test load data exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * credentials.provider >> provider - 1 * provider.listNetFloatingIps(region) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackImageCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackImageCachingAgentSpec.groovy deleted file mode 100644 index bb2c8e07887..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackImageCachingAgentSpec.groovy +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.IMAGES - -class OpenstackImageCachingAgentSpec extends Specification { - - OpenstackImageCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - ObjectMapper objectMapper - String region = 'east' - String account = 'test' - - void "setup"() { - namedAccountCredentials = GroovyMock(OpenstackNamedAccountCredentials) - objectMapper = Mock(ObjectMapper) - cachingAgent = Spy(OpenstackImageCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper]) - } - - void "test load data"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackCredentials credentials = GroovyMock() - OpenstackClientProvider provider = Mock() - OpenstackImage image = Mock(OpenstackImage) - String id = UUID.randomUUID().toString() - String imageKey = Keys.getImageKey(id, account, region) - Map imageAttributes = new HashMap<>() - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * namedAccountCredentials.credentials >> credentials - _ * cachingAgent.getAccountName() >> account - 1 * credentials.provider >> provider - 1 * provider.listImages(region) >> [image] - _ * image.id >> id - 1 * objectMapper.convertValue(_, OpenstackInfrastructureProvider.ATTRIBUTES) >> imageAttributes - - and: - result.cacheResults.get(IMAGES.ns).first().id == imageKey - result.cacheResults.get(IMAGES.ns).first().attributes == imageAttributes - } - - void "test load data exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackCredentials credentials = GroovyMock() - OpenstackClientProvider provider = Mock() - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * namedAccountCredentials.credentials >> credentials - 1 * credentials.provider >> provider - 1 * provider.listImages(region) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceCachingAgentSpec.groovy deleted file mode 100644 index a7d8db903f0..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceCachingAgentSpec.groovy +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.compute.Server -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES - -class OpenstackInstanceCachingAgentSpec extends Specification { - - OpenstackInstanceCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - ObjectMapper objectMapper - String region = 'east' - String account = 'test' - - void "setup"() { - namedAccountCredentials = GroovyMock(OpenstackNamedAccountCredentials) - objectMapper = Mock(ObjectMapper) - cachingAgent = Spy(OpenstackInstanceCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper]) - } - - void "test load data"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackCredentials credentials = GroovyMock() - OpenstackClientProvider provider = Mock() - Server server = Mock(Server) - String id = UUID.randomUUID().toString() - String name = 'foobar' - String instanceKey = Keys.getInstanceKey(id, account, region) - Map instanceAttributes = new HashMap<>() - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * namedAccountCredentials.credentials >> credentials - _ * cachingAgent.getAccountName() >> account - 1 * credentials.provider >> provider - 1 * provider.getInstances(region) >> [server] - _ * server.id >> id - _ * server.name >> name - 1 * objectMapper.convertValue(_, OpenstackInfrastructureProvider.ATTRIBUTES) >> instanceAttributes - - and: - result.cacheResults.get(INSTANCES.ns).first().id == instanceKey - result.cacheResults.get(INSTANCES.ns).first().attributes == instanceAttributes - } - - void "test load data exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackCredentials credentials = GroovyMock() - OpenstackClientProvider provider = Mock() - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * namedAccountCredentials.credentials >> credentials - 1 * credentials.provider >> provider - 1 * provider.getInstances(region) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceTypeCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceTypeCachingAgentSpec.groovy deleted file mode 100644 index 7e81694ffd7..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackInstanceTypeCachingAgentSpec.groovy +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackInstanceType -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.compute.Flavor -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCE_TYPES - -class OpenstackInstanceTypeCachingAgentSpec extends Specification { - - OpenstackInstanceTypeCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - OpenstackClientProvider provider - String region = 'east' - String account = 'test' - ObjectMapper objectMapper - - void "setup"() { - namedAccountCredentials = Mock(OpenstackNamedAccountCredentials) - provider = Mock(OpenstackClientProvider) - objectMapper = new ObjectMapper() - cachingAgent = Spy(OpenstackInstanceTypeCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper]) { - getAccountName() >> account - getClientProvider() >> provider - } - } - - void "test load data"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String flavorId = UUID.randomUUID().toString() - - and: - Flavor flavor = Mock(Flavor) { - getId() >> { flavorId } - isPublic() >> { false } - isDisabled() >> { false } - } - OpenstackInstanceType openstackInstanceType = OpenstackInstanceType.builder().region(region).account(account).id(flavorId).build() - Map instanceTypesAttributes = objectMapper.convertValue(openstackInstanceType, OpenstackInfrastructureProvider.ATTRIBUTES) - - and: - String instanceTypeKey = Keys.getInstanceTypeKey(flavorId, account, region) - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * provider.listFlavors(region) >> [flavor] - - and: - result.cacheResults != null - noExceptionThrown() - - and: - Collection instanceTypesData = result.cacheResults.get(INSTANCE_TYPES.ns) - instanceTypesData.size() == 1 - instanceTypesData.first().id == instanceTypeKey - instanceTypesData.first().attributes == instanceTypesAttributes - instanceTypesData.first().relationships.isEmpty() - } - - void "test load data exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * provider.listFlavors(region) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackLoadBalancerCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackLoadBalancerCachingAgentSpec.groovy deleted file mode 100644 index 999ec7765ee..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackLoadBalancerCachingAgentSpec.groovy +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spectator.api.Timer -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent.OnDemandResult -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancer -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.network.Port -import org.openstack4j.model.network.ext.HealthMonitorV2 -import org.openstack4j.model.network.ext.LbPoolV2 -import org.openstack4j.model.network.ext.ListenerProtocol -import org.openstack4j.model.network.ext.ListenerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2StatusTree -import org.openstack4j.model.network.ext.status.LbPoolV2Status -import org.openstack4j.model.network.ext.status.ListenerV2Status -import org.openstack4j.model.network.ext.status.LoadBalancerV2Status -import org.openstack4j.model.network.ext.status.MemberV2Status -import org.openstack4j.openstack.networking.domain.ext.ListItem -import spock.lang.Shared -import spock.lang.Specification - -import java.util.concurrent.CompletableFuture - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.FLOATING_IPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS - -class OpenstackLoadBalancerCachingAgentSpec extends Specification { - - OpenstackLoadBalancerCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - OpenstackCredentials credentials - ObjectMapper objectMapper - OpenstackClientProvider provider - Registry registry - - @Shared - String region = 'east' - @Shared - String account = 'account' - final String serverGroupName = 'myapp-test-v000' - - void "setup"() { - provider = Mock(OpenstackClientProvider) - credentials = GroovyMock(OpenstackCredentials) { - it.provider >> { provider } - } - namedAccountCredentials = GroovyMock(OpenstackNamedAccountCredentials) { - it.credentials >> { credentials } - } - objectMapper = Mock(ObjectMapper) - registry = Stub(Registry) { - timer(_, _) >> Mock(Timer) - } - cachingAgent = Spy(OpenstackLoadBalancerCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper, registry]) { - it.accountName >> { account } - it.clientProvider >> { provider } - } - } - - void "test load data"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - CacheResult cacheResult = Mock(CacheResult) - GroovyMock(CompletableFuture, global: true) - CompletableFuture.supplyAsync(_) >> Mock(CompletableFuture) { - thenApplyAsync(_) >> Mock(CompletableFuture) - } - CompletableFuture.allOf(_ as CompletableFuture, _ as CompletableFuture, _ as CompletableFuture, _ as CompletableFuture, _ as CompletableFuture, _ as CompletableFuture) >> Mock(CompletableFuture) - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * cachingAgent.buildLoadDataCache(providerCache, [], _ as Closure) >> cacheResult - - and: - result == cacheResult - noExceptionThrown() - } - - - void "test load data exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - CompletableFuture f = Mock(CompletableFuture) { - thenApplyAsync(_) >> Mock(CompletableFuture) - } - GroovyMock(CompletableFuture, global: true) - CompletableFuture.supplyAsync(_) >> f - CompletableFuture.allOf(_ as CompletableFuture, _ as CompletableFuture, _ as CompletableFuture, _ as CompletableFuture, _ as CompletableFuture, _ as CompletableFuture) >> Mock(CompletableFuture) - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * f.get() >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } - - void "test build cache"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String loadBalancerId = UUID.randomUUID().toString() - String listenerId = UUID.randomUUID().toString() - String poolId = UUID.randomUUID().toString() - String healthId = UUID.randomUUID().toString() - String ipId = UUID.randomUUID().toString() - String vipPortId = UUID.randomUUID().toString() - String lbName = 'myapp-lb' - String subnetId = UUID.randomUUID().toString() - String ipv6 = 'fd16:3966:18cc:0:f816:3eff:fe88:9004' - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) { - it.id >> { loadBalancerId } - it.name >> { lbName } - it.vipPortId >> { vipPortId } - it.vipSubnetId >> { subnetId } - it.listeners >> [new ListItem(id: listenerId)] - } - ListenerV2 listener = Mock(ListenerV2) { - it.id >> { listenerId } - it.protocol >> { ListenerProtocol.HTTP } - it.protocolPort >> { 80 } - it.description >> { "HTTP:80:HTTP:8080" } - it.defaultPoolId >> { poolId } - } - LbPoolV2 pool = Mock(LbPoolV2) { - it.id >> { poolId } - it.healthMonitorId >> { healthId } - } - HealthMonitorV2 healthMonitor = Mock(HealthMonitorV2) { - it.id >> { 'id' } - } - Map lbAttributes = new HashMap<>() - String lbKey = Keys.getLoadBalancerKey(lbName, loadBalancerId, account, region) - MemberV2Status memberV2Status = Mock(MemberV2Status) { - it.address >> { ipv6 } - it.operatingStatus >> { 'ONLINE' } - } - LbPoolV2Status lbPoolV2Status = Mock(LbPoolV2Status) { - it.memberStatuses >> { [memberV2Status] } - } - ListenerV2Status listenerV2Status = Mock(ListenerV2Status) { - it.lbPoolV2Statuses >> { [lbPoolV2Status] } - } - LoadBalancerV2Status loadBalancerV2Status = Mock(LoadBalancerV2Status) { - it.listenerStatuses >> { [listenerV2Status] } - } - LoadBalancerV2StatusTree loadBalancerV2StatusTree = Mock(LoadBalancerV2StatusTree) { - it.loadBalancerV2Status >> { loadBalancerV2Status } - } - Port port = Mock(Port) { - it.securityGroups >> { [] } - } - - and: - List instanceKeys = [Keys.getInstanceKey(ipId, account, region)] - Map ipAttributes = [instanceId: ipId] - CacheData ipCacheData = Mock(CacheData) { - it.attributes >> { ipAttributes } - } - Collection ipCacheDataList = [ipCacheData] - - and: - List ipKeys = [Keys.getFloatingIPKey(ipId, account, region)] - Map instanceAttributes = [instanceId: ipId, ipv6: ipv6] - CacheData instanceCacheData = Mock(CacheData) { - it.attributes >> { instanceAttributes } - } - Collection instanceCacheDataList = [instanceCacheData] - - and: - OpenstackLoadBalancer openstackLoadBalancer = Mock(OpenstackLoadBalancer) - OpenstackLoadBalancer.metaClass.static.from = { LoadBalancerV2 lb, - Set listeners, - LbPoolV2 pools, - HealthMonitorV2 hm, - String a, String r -> openstackLoadBalancer - } - - when: - CacheResult result = cachingAgent.buildCacheResult(providerCache, [loadBalancer].toSet(), - [listener].toSet(), [pool].toSet(), [healthMonitor].toSet(), [loadBalancerId: loadBalancerV2StatusTree], [vipPortId: port], - new CacheResultBuilder(startTime: System.currentTimeMillis())) - - then: - 1 * providerCache.filterIdentifiers(INSTANCES.ns, Keys.getInstanceKey('*', account, region)) >> instanceKeys - 1 * providerCache.getAll(INSTANCES.ns, instanceKeys, _ as RelationshipCacheFilter) >> instanceCacheDataList - 1 * providerCache.filterIdentifiers(FLOATING_IPS.ns, Keys.getFloatingIPKey('*', account, region)) >> ipKeys - 1 * providerCache.getAll(FLOATING_IPS.ns, ipKeys, _ as RelationshipCacheFilter) >> ipCacheDataList - - and: - 1 * objectMapper.convertValue(openstackLoadBalancer, OpenstackInfrastructureProvider.ATTRIBUTES) >> lbAttributes - - and: - result.cacheResults.get(LOAD_BALANCERS.ns).first().id == lbKey - result.cacheResults.get(LOAD_BALANCERS.ns).first().attributes == lbAttributes - noExceptionThrown() - } - - void "test handles - #testCase"() { - when: - boolean result = cachingAgent.handles(type, cloudProvider) - - then: - result == expected - - where: - testCase | type | cloudProvider | expected - 'wrong type' | OnDemandAgent.OnDemandType.ServerGroup | OpenstackCloudProvider.ID | false - 'wrong provider' | OnDemandAgent.OnDemandType.LoadBalancer | 'aws' | false - 'success' | OnDemandAgent.OnDemandType.LoadBalancer | OpenstackCloudProvider.ID | true - } - - void "test handle on demand no result - #testCase"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - - when: - OnDemandResult result = cachingAgent.handle(providerCache, data) - - then: - result == null - - where: - testCase | data - 'empty data' | [:] - 'missing loadBalancerName' | [account: account, region: region] - 'wrong account' | [loadBalancerName: 'name', account: 'abc', region: region] - 'wrong region' | [loadBalancerName: 'name', account: account, region: 'abc'] - } - - void "test handle on demand no resource"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String loadbalancerName = "test" - String loadBalancerKey = Keys.getLoadBalancerKey(loadbalancerName, '*', account, region) - Map data = [loadBalancerName: loadbalancerName, account: account, region: region] - CacheResult cacheResult = new CacheResultBuilder(startTime: Long.MAX_VALUE).build() - - when: - OnDemandResult result = cachingAgent.handle(providerCache, data) - - then: - 1 * provider.getLoadBalancerByName(region, loadbalancerName) >> { throw new OpenstackProviderException('test') } - 1 * cachingAgent.buildCacheResult(providerCache, [].toSet(), [].toSet(), [].toSet(), [].toSet(), [:] as Map, [:] as Map, _) >> cacheResult - 1 * cachingAgent.resolveKey(providerCache, LOAD_BALANCERS.ns, loadBalancerKey) >> loadBalancerKey - 1 * cachingAgent.processOnDemandCache(cacheResult, objectMapper, _, providerCache, loadBalancerKey) - - and: - result.cacheResult == cacheResult - result.evictions.get(LOAD_BALANCERS.ns) == [loadBalancerKey] - } - - void "test handle on demand"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - CacheResult cacheResult = new CacheResultBuilder(startTime: Long.MAX_VALUE).build() - String loadBalancerId = UUID.randomUUID().toString() - String listenerId = UUID.randomUUID().toString() - String poolId = UUID.randomUUID().toString() - String healthId = UUID.randomUUID().toString() - String lbName = 'myapp-lb' - String subnetId = UUID.randomUUID().toString() - String vipPortId = UUID.randomUUID().toString() - String loadbalancerName = "test" - String loadBalancerKey = Keys.getLoadBalancerKey(loadbalancerName, loadBalancerId, account, region) - Map data = [loadBalancerName: loadbalancerName, account: account, region: region] - LoadBalancerV2 loadBalancer = Mock(LoadBalancerV2) { - it.id >> { loadBalancerId } - it.name >> { lbName } - it.vipPortId >> { vipPortId } - it.vipSubnetId >> { subnetId } - it.listeners >> [new ListItem(id: listenerId)] - } - ListenerV2 listener = Mock(ListenerV2) { - it.id >> { listenerId } - it.protocol >> { ListenerProtocol.HTTP } - it.protocolPort >> { 80 } - it.description >> { "HTTP:80:HTTP:8080" } - it.defaultPoolId >> { poolId } - } - LbPoolV2 pool = Mock(LbPoolV2) { - it.id >> { poolId } - it.healthMonitorId >> { healthId } - } - Port port = Mock(Port) { - it.securityGroups >> { [] } - } - HealthMonitorV2 healthMonitor = Mock(HealthMonitorV2) - LoadBalancerV2StatusTree loadBalancerStatusTree = Mock(LoadBalancerV2StatusTree) - when: - OnDemandResult result = cachingAgent.handle(providerCache, data) - - then: - 1 * provider.getLoadBalancerByName(region, loadbalancerName) >> loadBalancer - 1 * provider.getListener(region, listenerId) >> listener - 1 * provider.getPool(region, poolId) >> pool - 1 * provider.getMonitor(region, healthId) >> healthMonitor - 1 * provider.getLoadBalancerStatusTree(region, loadBalancerId) >> loadBalancerStatusTree - 1 * provider.getPort(region, vipPortId) >> port - 1 * cachingAgent.buildCacheResult(providerCache, [loadBalancer].toSet(), [listener].toSet(), [pool].toSet(), [healthMonitor].toSet(), [(loadBalancerId) : loadBalancerStatusTree], [(vipPortId): port], _) >> cacheResult - 1 * cachingAgent.resolveKey(providerCache, LOAD_BALANCERS.ns, loadBalancerKey) >> loadBalancerKey - 1 * cachingAgent.processOnDemandCache(cacheResult, objectMapper, _, providerCache, loadBalancerKey) - - and: - result.cacheResult == cacheResult - result.evictions.get(LOAD_BALANCERS.ns).isEmpty() - } - - void 'test pending on demand requests'() { - given: - ProviderCache providerCache = Mock(ProviderCache) - Collection maps = Mock(Collection) - - when: - Collection result = cachingAgent.pendingOnDemandRequests(providerCache) - - then: - 1 * cachingAgent.getAllOnDemandCacheByRegionAndAccount(providerCache, account, region) >> maps - - and: - result == maps - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackNetworkCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackNetworkCachingAgentSpec.groovy deleted file mode 100644 index 6cade8e0b93..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackNetworkCachingAgentSpec.groovy +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.network.Network -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.NETWORKS - -class OpenstackNetworkCachingAgentSpec extends Specification { - - OpenstackNetworkCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - ObjectMapper objectMapper - final String region = 'east' - final String account = 'account' - - void "setup"() { - namedAccountCredentials = GroovyMock(OpenstackNamedAccountCredentials) - objectMapper = Mock(ObjectMapper) - cachingAgent = Spy(OpenstackNetworkCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper]) - } - - void "test load data"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackCredentials credentials = GroovyMock(OpenstackCredentials) - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - Network network = Mock(Network) - String networkId = UUID.randomUUID().toString() - Map networkAttributes = new HashMap<>() - String networkKey = Keys.getNetworkKey(networkId, account, region) - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * namedAccountCredentials.credentials >> credentials - _ * cachingAgent.getAccountName() >> account - 1 * credentials.provider >> provider - 1 * provider.listNetworks(region) >> [network] - _ * network.id >> networkId - 1 * objectMapper.convertValue(_, OpenstackInfrastructureProvider.ATTRIBUTES) >> networkAttributes - - and: - result.cacheResults.get(NETWORKS.ns).first().id == networkKey - result.cacheResults.get(NETWORKS.ns).first().attributes == networkAttributes - noExceptionThrown() - } - - void "test load data exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackCredentials credentials = GroovyMock(OpenstackCredentials) - OpenstackClientProvider provider = Mock(OpenstackClientProvider) - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * namedAccountCredentials.credentials >> credentials - 1 * credentials.provider >> provider - 1 * provider.listNetworks(region) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSecurityGroupCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSecurityGroupCachingAgentSpec.groovy deleted file mode 100644 index 8489277ff10..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSecurityGroupCachingAgentSpec.groovy +++ /dev/null @@ -1,441 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spectator.api.Registry -import com.netflix.spectator.api.Timer -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.model.AddressableRange -import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule -import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule -import com.netflix.spinnaker.clouddriver.model.securitygroups.SecurityGroupRule -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSecurityGroup -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.compute.IPProtocol -import org.openstack4j.openstack.compute.domain.NovaSecGroupExtension -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Specification -import spock.lang.Subject -import spock.lang.Unroll - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.ON_DEMAND -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SECURITY_GROUPS - -class OpenstackSecurityGroupCachingAgentSpec extends Specification { - - @Subject - OpenstackSecurityGroupCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - OpenstackCredentials credentials - OpenstackClientProvider provider - ObjectMapper objectMapper - ProviderCache providerCache - String region = 'east' - String accountName = 'os-account' - - def setup() { - providerCache = Mock(ProviderCache) - provider = Mock(OpenstackClientProvider) - credentials = GroovyMock(OpenstackCredentials) - credentials.provider >> provider - namedAccountCredentials = Mock(OpenstackNamedAccountCredentials) - namedAccountCredentials.credentials >> credentials - objectMapper = Mock(ObjectMapper) - Registry registry = Mock(Registry) { - _ * timer(*_) >> Mock(Timer) - } - cachingAgent = Spy(OpenstackSecurityGroupCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper, registry]) - _ * cachingAgent.getAccountName() >> accountName - } - - def "should load data without inbound rules"() { - given: - def id = UUID.randomUUID().toString() - def name = 'a-security-group' - def desc = 'a description' - def key = Keys.getSecurityGroupKey(name, id, accountName, region) - def instanceAttributes = new HashMap<>() - def novaSecurityGroup = new NovaSecGroupExtension(name: name, description: desc, id: id) - def securityGroup = new OpenstackSecurityGroup(id: id, - accountName: accountName, - region: region, - name: name, - description: desc, - inboundRules: [] - ) - - when: - CacheResult cacheResult = cachingAgent.loadData(providerCache) - - then: - 1 * provider.getSecurityGroups(region) >> [novaSecurityGroup] - 1 * objectMapper.convertValue(securityGroup, OpenstackInfrastructureProvider.ATTRIBUTES) >> instanceAttributes - - and: - def cacheData = cacheResult.cacheResults.get(SECURITY_GROUPS.ns).first() - cacheData.id == key - cacheData.attributes == instanceAttributes - } - - def "should load data with cidr inbound rules"() { - given: - def id = UUID.randomUUID().toString() - def name = 'a-security-group' - def desc = 'a description' - def key = Keys.getSecurityGroupKey(name, id, accountName, region) - def instanceAttributes = new HashMap<>() - - def novaSecurityGroup = new NovaSecGroupExtension(name: name, description: desc, id: id, rules: [ - new NovaSecGroupExtension.SecurityGroupRule(fromPort: 80, toPort: 80, ipProtocol: IPProtocol.TCP, - ipRange: new NovaSecGroupExtension.SecurityGroupRule.RuleIpRange(cidr: '10.10.0.0/24') - ), - new NovaSecGroupExtension.SecurityGroupRule(fromPort: 22, toPort: 22, ipProtocol: IPProtocol.TCP, - ipRange: new NovaSecGroupExtension.SecurityGroupRule.RuleIpRange(cidr: '10.10.0.0') - ) - ]) - def securityGroup = new OpenstackSecurityGroup(id: id, - accountName: accountName, - region: region, - name: name, - description: desc, - inboundRules: [ - new IpRangeRule(protocol: IPProtocol.TCP.value(), - portRanges: [new Rule.PortRange(startPort: 80, endPort: 80)] as SortedSet, - range: new AddressableRange(ip: '10.10.0.0', cidr: '/24') - ), - new IpRangeRule(protocol: IPProtocol.TCP.value(), - portRanges: [new Rule.PortRange(startPort: 22, endPort: 22)] as SortedSet, - range: new AddressableRange(ip: '10.10.0.0', cidr: '/32') - ) - ] - ) - - when: - CacheResult cacheResult = cachingAgent.loadData(providerCache) - - then: - 1 * provider.getSecurityGroups(region) >> [novaSecurityGroup] - 1 * objectMapper.convertValue(securityGroup, OpenstackInfrastructureProvider.ATTRIBUTES) >> instanceAttributes - - and: - def cacheData = cacheResult.cacheResults.get(SECURITY_GROUPS.ns).first() - cacheData.id == key - cacheData.attributes == instanceAttributes - } - - def "should load data with referencing security group inbound rule missing referenced security group"() { - given: - def securityGroupId = UUID.randomUUID().toString() - def name = 'a-security-group' - def desc = 'a description' - def instanceAttributes = [:] - - def novaSecurityGroup = new NovaSecGroupExtension(name: name, description: desc, id: securityGroupId, rules: [ - new NovaSecGroupExtension.SecurityGroupRule(fromPort: 80, toPort: 80, ipProtocol: IPProtocol.TCP, - ipRange: new NovaSecGroupExtension.SecurityGroupRule.RuleIpRange(cidr: null), - group: new NovaSecGroupExtension.SecurityGroupRule.RuleGroup(name: 'ref', tenantId: 'tenant') - ) - ]) - - def securityGroup = new OpenstackSecurityGroup(id: securityGroupId, - accountName: accountName, - region: region, - name: name, - description: desc, - inboundRules: [ - new SecurityGroupRule(protocol: IPProtocol.TCP.value(), - portRanges: [new Rule.PortRange(startPort: 80, endPort: 80)] as SortedSet, - securityGroup: new OpenstackSecurityGroup(name: 'ref', type: OpenstackCloudProvider.ID, accountName: accountName, region: region) - ) - ] - ) - - when: - CacheResult cacheResult = cachingAgent.loadData(providerCache) - - then: - 1 * provider.getSecurityGroups(region) >> [novaSecurityGroup] - 1 * objectMapper.convertValue(securityGroup, OpenstackInfrastructureProvider.ATTRIBUTES) >> instanceAttributes - - and: - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).size() == 1 - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).find { it.id == Keys.getSecurityGroupKey(name, securityGroupId, accountName, region) } - } - - def "should load data with referencing security group inbound rule with referenced security group"() { - given: - def securityGroupId = UUID.randomUUID().toString() - def referencedSecurityGroupId = UUID.randomUUID().toString() - def name = 'a-security-group' - def desc = 'a description' - def instanceAttributes = [:] - - def novaSecurityGroups = [ - new NovaSecGroupExtension(name: name, description: desc, id: securityGroupId, rules: [ - new NovaSecGroupExtension.SecurityGroupRule(fromPort: 80, toPort: 80, ipProtocol: IPProtocol.TCP, - ipRange: new NovaSecGroupExtension.SecurityGroupRule.RuleIpRange(cidr: null), - group: new NovaSecGroupExtension.SecurityGroupRule.RuleGroup(name: 'ref', tenantId: 'tenant') - ) - ]), - new NovaSecGroupExtension(name: 'ref', description: desc, id: referencedSecurityGroupId, rules: []) - ] - - def securityGroup = new OpenstackSecurityGroup(id: securityGroupId, - accountName: accountName, - region: region, - name: name, - description: desc, - inboundRules: [ - new SecurityGroupRule(protocol: IPProtocol.TCP.value(), - portRanges: [new Rule.PortRange(startPort: 80, endPort: 80)] as SortedSet, - securityGroup: new OpenstackSecurityGroup(name: 'ref', type: OpenstackCloudProvider.ID, accountName: accountName, region: region, id: referencedSecurityGroupId) - ) - ] - ) - - when: - CacheResult cacheResult = cachingAgent.loadData(providerCache) - - then: - 1 * provider.getSecurityGroups(region) >> novaSecurityGroups - 1 * objectMapper.convertValue(securityGroup, OpenstackInfrastructureProvider.ATTRIBUTES) >> instanceAttributes - - and: - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).size() == 2 - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).find { it.id == Keys.getSecurityGroupKey(name, securityGroupId, accountName, region) } - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).find { it.id == Keys.getSecurityGroupKey('ref', referencedSecurityGroupId, accountName, region) } - } - - def "should load data with referencing security group inbound rule duplicate referenced security group"() { - given: - def securityGroupId = UUID.randomUUID().toString() - def referencedSecurityGroupId = UUID.randomUUID().toString() - def thirdSecurityGroupid = UUID.randomUUID().toString() - def name = 'a-security-group' - def desc = 'a description' - def instanceAttributes = [:] - - def novaSecurityGroups = [ - new NovaSecGroupExtension(name: name, description: desc, id: securityGroupId, rules: [ - new NovaSecGroupExtension.SecurityGroupRule(fromPort: 80, toPort: 80, ipProtocol: IPProtocol.TCP, - ipRange: new NovaSecGroupExtension.SecurityGroupRule.RuleIpRange(cidr: null), - group: new NovaSecGroupExtension.SecurityGroupRule.RuleGroup(name: 'ref', tenantId: 'tenant') - ) - ]), - new NovaSecGroupExtension(name: 'ref', description: desc, id: referencedSecurityGroupId, rules: []), - new NovaSecGroupExtension(name: 'ref', description: desc, id: thirdSecurityGroupid, rules: []) - ] - - def securityGroup = new OpenstackSecurityGroup(id: securityGroupId, - accountName: accountName, - region: region, - name: name, - description: desc, - inboundRules: [ - new SecurityGroupRule(protocol: IPProtocol.TCP.value(), - portRanges: [new Rule.PortRange(startPort: 80, endPort: 80)] as SortedSet, - securityGroup: new OpenstackSecurityGroup(name: 'ref', type: OpenstackCloudProvider.ID, accountName: accountName, region: region) - ) - ] - ) - - when: - CacheResult cacheResult = cachingAgent.loadData(providerCache) - - then: - 1 * provider.getSecurityGroups(region) >> novaSecurityGroups - 1 * objectMapper.convertValue(securityGroup, OpenstackInfrastructureProvider.ATTRIBUTES) >> instanceAttributes - - and: - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).size() == 3 - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).find { it.id == Keys.getSecurityGroupKey(name, securityGroupId, accountName, region) } - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).find { it.id == Keys.getSecurityGroupKey('ref', referencedSecurityGroupId, accountName, region) } - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).find { it.id == Keys.getSecurityGroupKey('ref', thirdSecurityGroupid, accountName, region) } - } - - - def "load data finds no security groups"() { - when: - CacheResult cacheResult = cachingAgent.loadData(providerCache) - - then: - 1 * provider.getSecurityGroups(region) >> [] - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).empty - } - - def "get security groups handles null"() { - when: - CacheResult cacheResult = cachingAgent.loadData(providerCache) - - then: - 1 * provider.getSecurityGroups(region) >> null - cacheResult.cacheResults.get(SECURITY_GROUPS.ns).empty - } - - def "load data lets exception bubble up"() { - given: - Throwable exception = new OpenstackProviderException() - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * provider.getSecurityGroups(region) >> { throw exception } - def ex = thrown(Exception) - exception == ex - } - - @Unroll - def "on demand caching with invalid data"() { - when: - def result = cachingAgent.handle(providerCache, data) - - then: - result == null - - where: - data << [ - [account: 'os-account', region: 'east'], - [securityGroupName: 'sg', account: 'other-account', region: 'east'], - [securityGroupName: 'sg', account: 'os-account', region: 'west'] - ] - } - - def "handle on demand store"() { - given: - def id = UUID.randomUUID().toString() - def secGroupExt = new NovaSecGroupExtension(name: 'sg', id: id) - def data = [ - securityGroupName: secGroupExt.name, - account: accountName, - region: region - ] - CacheData cacheData = new DefaultCacheData(UUID.randomUUID().toString(), [:], [:]) - CacheResult cacheResult = new DefaultCacheResult([(SECURITY_GROUPS.ns): [cacheData]]) - - when: - def result = cachingAgent.handle(providerCache, data) - - then: - 1 * provider.getSecurityGroups(region) >> [secGroupExt] - 1 * cachingAgent.buildCacheResult(_, [secGroupExt]) >> cacheResult - 1 * providerCache.putCacheData(ON_DEMAND.ns, _) - - and: - result.cacheResult.cacheResults[SECURITY_GROUPS.ns].size() == 1 - result.cacheResult.cacheResults[SECURITY_GROUPS.ns].first() == cacheData - } - - def "handle on demand unable to find security group"() { - given: - String unresolvedKey = Keys.getSecurityGroupKey('sg', '*', accountName, region) - String key = Keys.getSecurityGroupKey('sg', UUID.randomUUID().toString(), accountName, region) - def data = [securityGroupName: 'sg', account: accountName, region: region] - - when: - def result = cachingAgent.handle(providerCache, data) - - then: - 1 * provider.getSecurityGroups(region) >> [] - 1 * providerCache.filterIdentifiers(SECURITY_GROUPS.ns, unresolvedKey) >> [key] - - and: - result.cacheResult.cacheResults[ON_DEMAND.ns].isEmpty() - result.evictions[SECURITY_GROUPS.ns] == [key] - } - - def "handle on demand no cache results built"() { - given: - String id = UUID.randomUUID().toString() - String name = 'sg' - String key = Keys.getSecurityGroupKey(name, id, accountName, region) - def data = [securityGroupName: name, account: accountName, region: region] - def securityGroup = new NovaSecGroupExtension(name: name, id: id) - - when: - def result = cachingAgent.handle(providerCache, data) - - then: - 1 * provider.getSecurityGroups(region) >> [securityGroup] - 1 * cachingAgent.buildCacheResult(_, [securityGroup]) >> { builder, groups -> builder.build() } - 1 * providerCache.evictDeletedItems(ON_DEMAND.ns, [key]) - - and: - result.cacheResult.cacheResults[ON_DEMAND.ns].isEmpty() - result.evictions.isEmpty() - } - - def "handles proper type - #testCase"() { - when: - def result = cachingAgent.handles(type, cloudProvider) - - then: - result == expected - - where: - testCase | type | cloudProvider | expected - 'wrong type' | OnDemandAgent.OnDemandType.LoadBalancer | OpenstackCloudProvider.ID | false - 'wrong provider' | OnDemandAgent.OnDemandType.SecurityGroup | 'aws' | false - 'success' | OnDemandAgent.OnDemandType.SecurityGroup | OpenstackCloudProvider.ID | true - } - - def "pending on demand requests"() { - given: - def id = UUID.randomUUID().toString() - def name = 'sg' - def key = Keys.getSecurityGroupKey(name, id, accountName, region) - def cacheData = new DefaultCacheData(key, [cacheTime: System.currentTimeMillis(), processedCount: 1, processedTime: System.currentTimeMillis()], [:]) - - when: - def result = cachingAgent.pendingOnDemandRequests(providerCache) - - then: - 1 * providerCache.getIdentifiers(ON_DEMAND.ns) >> [key] - 1 * providerCache.getAll(ON_DEMAND.ns, [key]) >> [cacheData] - - and: - result.first() == [details: Keys.parse(key), cacheTime: cacheData.attributes.cacheTime, processedCount: cacheData.attributes.processedCount, processedTime: cacheData.attributes.processedTime] - } - - def "pending on demand requests with exception"() { - given: - Throwable throwable = new JedisException('test') - - when: - cachingAgent.pendingOnDemandRequests(providerCache) - - then: - 1 * providerCache.getIdentifiers(ON_DEMAND.ns) >> { throw throwable } - - and: - def exception = thrown(JedisException) - exception == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackServerGroupCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackServerGroupCachingAgentSpec.groovy deleted file mode 100644 index 087d3865fb1..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackServerGroupCachingAgentSpec.groovy +++ /dev/null @@ -1,647 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.Sets -import com.netflix.spectator.api.Registry -import com.netflix.spectator.api.Timer -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.CacheResultBuilder -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.description.servergroup.ServerGroupParameters -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackServerGroup -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.heat.Stack -import org.openstack4j.model.network.ext.LoadBalancerV2 -import org.openstack4j.model.network.ext.LoadBalancerV2StatusTree -import org.openstack4j.model.network.ext.status.LbPoolV2Status -import org.openstack4j.model.network.ext.status.ListenerV2Status -import org.openstack4j.model.network.ext.status.LoadBalancerV2Status -import org.openstack4j.model.network.ext.status.MemberV2Status -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Ignore -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -import java.time.LocalDateTime -import java.time.ZoneId -import java.time.format.DateTimeFormatter - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.CLUSTERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.IMAGES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.ON_DEMAND -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SERVER_GROUPS - -@Unroll -class OpenstackServerGroupCachingAgentSpec extends Specification { - - OpenstackServerGroupCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - OpenstackClientProvider provider - @Shared - String region = 'east' - @Shared - String account = 'test' - ObjectMapper objectMapper - Registry registry - @Shared - String en = 'ENABLED' - @Shared - String dis = 'DISABLED' - - void "setup"() { - namedAccountCredentials = Mock(OpenstackNamedAccountCredentials) - provider = Mock(OpenstackClientProvider) - objectMapper = new ObjectMapper() - registry = Stub(Registry) { - timer(_, _) >> Mock(Timer) - } - cachingAgent = Spy(OpenstackServerGroupCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper, registry]) { - getAccountName() >> account - getClientProvider() >> provider - } - } - - void "test load data"() { - given: - String appName = 'testapp' - String clusterName = "${appName}-stack-detail" - String serverGroupName = "${clusterName}-v000" - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, account, region) - - and: - ProviderCache providerCache = Mock(ProviderCache) - Stack stack = Mock(Stack) { - getName() >> serverGroupName - } - List stacks = [stack] - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * provider.listStacks(region) >> stacks - 1 * providerCache.getAll(ON_DEMAND.ns, [serverGroupKey]) >> [] - 1 * cachingAgent.buildCacheResult(providerCache, _, stacks) >> { cache, builder, stackz -> builder.build() } - - and: - result.cacheResults[ON_DEMAND.ns].isEmpty() - result.evictions.isEmpty() - } - - void "test load data demand to keep"() { - given: - String appName = 'testapp' - String clusterName = "${appName}-stack-detail" - String serverGroupName = "${clusterName}-v000" - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, account, region) - String cacheId = UUID.randomUUID().toString() - - and: - ProviderCache providerCache = Mock(ProviderCache) - Stack stack = Mock(Stack) { - getName() >> serverGroupName - } - CacheData cacheData = new DefaultCacheData(cacheId, ['cacheTime': System.currentTimeMillis()], [:]) - List stacks = [stack] - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * provider.listStacks(region) >> stacks - 1 * providerCache.getAll(ON_DEMAND.ns, [serverGroupKey]) >> [cacheData] - 1 * cachingAgent.buildCacheResult(providerCache, _, stacks) >> { cache, builder, stackz -> builder.build() } - - and: - result.cacheResults[ON_DEMAND.ns].first() == cacheData - result.evictions.isEmpty() - } - - void "test load data evict data"() { - given: - String appName = 'testapp' - String clusterName = "${appName}-stack-detail" - String serverGroupName = "${clusterName}-v000" - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, account, region) - String cacheId = UUID.randomUUID().toString() - - and: - ProviderCache providerCache = Mock(ProviderCache) - Stack stack = Mock(Stack) { - getName() >> serverGroupName - } - CacheData cacheData = new DefaultCacheData(cacheId, ['cacheTime': 1, 'processedCount': 1], [:]) - List stacks = [stack] - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * provider.listStacks(region) >> stacks - 1 * providerCache.getAll(ON_DEMAND.ns, [serverGroupKey]) >> [cacheData] - 1 * cachingAgent.buildCacheResult(providerCache, _, stacks) >> { cache, builder, stackz -> builder.build() } - - and: - result.cacheResults[ON_DEMAND.ns].isEmpty() - result.evictions[ON_DEMAND.ns] == [cacheId] - } - - void "test load data exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * provider.listStacks(region) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } - - void "test build cache result"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String stackId = UUID.randomUUID().toString() - String serverId = UUID.randomUUID().toString() - String appName = 'testapp' - String clusterName = "${appName}-stack-detail" - String serverGroupName = "${clusterName}-v000" - String loadBalancerId = UUID.randomUUID().toString() - String loadBalancerName = "$appName-lb" - - and: - Stack stack = Mock(Stack) { - getId() >> { stackId } - getName() >> { serverGroupName } - } - LoadBalancerV2StatusTree lb = Mock(LoadBalancerV2StatusTree) { - it.loadBalancerV2Status >> { - Mock(LoadBalancerV2Status) { - getId() >> { loadBalancerId } - getName() >> { loadBalancerName } - } - } - } - Stack stackDetail = Mock(Stack) { getParameters() >> ['load_balancers': loadBalancerId] } - OpenstackServerGroup openstackServerGroup = OpenstackServerGroup.builder().account(account).name(serverGroupName).tags([foo:'the bar', port:'5050']).build() - Map serverGroupAttributes = objectMapper.convertValue(openstackServerGroup, OpenstackInfrastructureProvider.ATTRIBUTES) - CacheResultBuilder cacheResultBuilder = new CacheResultBuilder() - - and: - String clusterKey = Keys.getClusterKey(account, appName, clusterName) - String appKey = Keys.getApplicationKey(appName) - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, account, region) - String loadBalancerKey = Keys.getLoadBalancerKey(loadBalancerName, loadBalancerId, account, region) - String instanceKey = Keys.getInstanceKey(serverId, account, region) - List stacks = [stack] - - when: - cachingAgent.buildCacheResult(providerCache, cacheResultBuilder, stacks) - - then: - 1 * cachingAgent.getInstanceIdsByStack(region, stacks) >> [(stackId): [serverId]] - 1 * provider.getStack(region, stack.name) >> stackDetail - 1 * provider.getLoadBalancerStatusTree(region, loadBalancerId) >> lb - 1 * cachingAgent.buildServerGroup(providerCache, stackDetail, _, _) >> openstackServerGroup - - and: - CacheResult result = cacheResultBuilder.build() - noExceptionThrown() - - and: - Collection applicationData = result.cacheResults.get(APPLICATIONS.ns) - applicationData.size() == 1 - applicationData.first().id == appKey - applicationData.first().attributes == ['name': appName] - applicationData.first().relationships == [(CLUSTERS.ns): Sets.newHashSet(clusterKey)] - - and: - Collection clusterData = result.cacheResults.get(CLUSTERS.ns) - clusterData.size() == 1 - clusterData.first().attributes == ['name': clusterName, 'accountName': account] - clusterData.first().id == clusterKey - clusterData.first().relationships == [(APPLICATIONS.ns): Sets.newHashSet(appKey), (SERVER_GROUPS.ns): Sets.newHashSet(serverGroupKey)] - - and: - Collection serverGroupData = result.cacheResults.get(SERVER_GROUPS.ns) - serverGroupData.size() == 1 - serverGroupData.first().id == serverGroupKey - serverGroupData.first().attributes == serverGroupAttributes - serverGroupData.first().relationships == [(APPLICATIONS.ns) : Sets.newHashSet(appKey), (CLUSTERS.ns): Sets.newHashSet(clusterKey) - , (LOAD_BALANCERS.ns): Sets.newHashSet(loadBalancerKey), (INSTANCES.ns): Sets.newHashSet(instanceKey)] - } - - void "test build cache result exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - List stacks = [Mock(Stack)] - when: - cachingAgent.buildCacheResult(providerCache, Mock(CacheResultBuilder), stacks) - - then: - 1 * cachingAgent.getInstanceIdsByStack(region, stacks) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } - - void "test build server group"() { - given: - boolean disabled = false - String loadBalancerId = 'loadBalancerId' - ProviderCache providerCache = Mock(ProviderCache) - LoadBalancerV2Status status = Mock(LoadBalancerV2Status) { - it.id >> { loadBalancerId } - it.name >> { 'loadBalancerName' } - } - Set loadBalancerIds = [Keys.getLoadBalancerKey('loadBalancerName','loadBalancerId', account, region)].toSet() - Set statuses = [status].toSet() - LocalDateTime createdTime = LocalDateTime.now() - String subnetId = UUID.randomUUID().toString() - Stack stack = Mock(Stack) { - it.name >> { 'name' } - it.parameters >> { [subnet_id: subnetId, tags:'{"foo": "the bar","port":"5050"}'] } - it.creationTime >> { DateTimeFormatter.ISO_LOCAL_DATE_TIME.format(createdTime) } - } - - and: - Map launchConfig = [image:'foo'] - Map openstackImage = [properties:[:]] - Map scalingConfig = [:] - Map buildInfo = [:] - Map advancedConfig = [:] - - and: - OpenstackServerGroup expected = OpenstackServerGroup.builder() - .account(account) - .region(region) - .name(stack.name) - .createdTime(createdTime.atZone(ZoneId.systemDefault()).toInstant().toEpochMilli()) - .scalingConfig(scalingConfig) - .launchConfig(launchConfig) - .advancedConfig(advancedConfig) - .loadBalancers(loadBalancerIds) - .image(openstackImage) - .buildInfo(buildInfo) - .disabled(disabled) - .subnetId(subnetId) - .tags([foo:'the bar', port:'5050']) - .build() - - when: - OpenstackServerGroup result = cachingAgent.buildServerGroup(providerCache, stack, statuses, []) - - then: - 1 * cachingAgent.buildLaunchConfig(_ as ServerGroupParameters) >> launchConfig - 1 * cachingAgent.buildImage(providerCache, 'foo') >> openstackImage - 1 * cachingAgent.buildScalingConfig(_ as ServerGroupParameters) >> scalingConfig - 1 * cachingAgent.buildInfo(_ as Map) >> buildInfo - 1 * cachingAgent.buildAdvancedConfig(_ as ServerGroupParameters) >> advancedConfig - 1 * cachingAgent.calculateServerGroupStatus(providerCache, statuses, []) >> disabled - - and: - - expected == result - noExceptionThrown() - } - - void "test build launch config - #testCase"() { - when: - Map result = cachingAgent.buildLaunchConfig(parameters) - - then: - result == expected - noExceptionThrown() - - where: - testCase | parameters | expected - 'empty' | new ServerGroupParameters() | [:] - 'normal' | new ServerGroupParameters(image: 'image', instanceType: 'flavor', networkId: 'network', loadBalancers: ['lb'], securityGroups: ['a','b','c']) | [instanceType: 'flavor', image: 'image', networkId: 'network', loadBalancerId: 'lb', securityGroups: ['a', 'b', 'c']] - } - - void "test build image config"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - CacheData cacheData = Mock(CacheData) - Map attributes = Mock(Map) - String image = UUID.randomUUID().toString() - String imagekey = Keys.getImageKey(image, account, region) - - when: - Map result = cachingAgent.buildImage(providerCache, image) - - then: - 1 * providerCache.get(IMAGES.ns, imagekey) >> cacheData - 1 * cacheData.attributes >> attributes - - and: - result == attributes - noExceptionThrown() - } - - void "test build image config - not found"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String image = UUID.randomUUID().toString() - String imagekey = Keys.getImageKey(image, account, region) - - when: - Map result = cachingAgent.buildImage(providerCache, image) - - then: - 1 * providerCache.get(IMAGES.ns, imagekey) >> null - - and: - result == null - noExceptionThrown() - } - - void "test build image config - exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - Throwable throwable = new JedisException('test') - String image = UUID.randomUUID().toString() - String imagekey = Keys.getImageKey(image, account, region) - - when: - cachingAgent.buildImage(providerCache, image) - - then: - 1 * providerCache.get(IMAGES.ns, imagekey) >> { throw throwable } - - and: - Throwable exception = thrown(JedisException) - exception == throwable - } - - void "test build scaling config - #testCase"() { - when: - Map result = cachingAgent.buildScalingConfig(params).sort { it.key } - - then: - result == expected - noExceptionThrown() - - where: - testCase | params | expected - 'empty' | null | [:] - 'normal' | buildParams(1, 5, 3) | [minSize: 1, maxSize: 5, desiredSize: 3, autoscalingType: 'cpu', scaleup:[cooldown: 60, period: 60, adjustment: 1, threshold: 50], scaledown:[cooldown: 60, period: 600, adjustment: -1, threshold: 15]].sort { it.key } - 'missing' | buildParams() | [minSize: 0, maxSize: 0, desiredSize: 0, autoscalingType: 'cpu', scaleup: [cooldown:null, period:null, adjustment:null, threshold:null], scaledown: [cooldown:null, period:null, adjustment:null, threshold:null]].sort { it.key } - } - - void "test build info config - #testCase"() { - when: - Map result = cachingAgent.buildInfo(properties) - - then: - result == expected - noExceptionThrown() - - where: - testCase | properties | expected - 'null' | null | [:] - 'empty' | [:] | [:] - 'appversion only' | ['appversion': 'helloworld-1.4.0-1140443.h420/build-huxtable/420'] | [packageName: 'helloworld', version: '1.4.0', commit: '1140443', jenkins: [name: 'build-huxtable', number: '420']] - 'appversion and host' | [appversion: 'helloworld-1.4.0-1140443.h420/build-huxtable/420', build_host: 'host'] | [packageName: 'helloworld', version: '1.4.0', commit: '1140443', jenkins: [name: 'build-huxtable', number: '420', host: 'host']] - 'appversion, host, and buildinfo' | [appversion: 'helloworld-1.4.0-1140443.h420/build-huxtable/420', build_host: 'host', build_info_url: 'url'] | [packageName: 'helloworld', version: '1.4.0', commit: '1140443', buildInfoUrl: 'url', jenkins: [name: 'build-huxtable', number: '420', host: 'host']] - } - - void "test build advanced config - #testCase"() { - when: - Map result = cachingAgent.buildAdvancedConfig(parameters) - - then: - result == expected - noExceptionThrown() - - where: - testCase | parameters | expected - 'empty' | new ServerGroupParameters() | [:] - 'normal' | new ServerGroupParameters(sourceUserDataType: 'Text', sourceUserData: 'echo foobar') | [userDataType: 'Text', userData: 'echo foobar'] - } - - void "test calculateServerGroupStatus - #testCase"() { - given: - List addresses = ['addr1', 'addr2'] - ProviderCache providerCache = Mock(ProviderCache) - MemberV2Status memberStatus1 = Mock(MemberV2Status) { - it.address >> { addresses[0] } - it.operatingStatus >> { status1 } - } - MemberV2Status memberStatus2 = Mock(MemberV2Status) { - it.address >> { addresses[1] } - it.operatingStatus >> { status2 } - } - LbPoolV2Status poolStatus = Mock(LbPoolV2Status) { - it.memberStatuses >> { [memberStatus1, memberStatus2] } - } - ListenerV2Status listenerStatus = Mock(ListenerV2Status) { - it.lbPoolV2Statuses >> { [poolStatus] } - } - LoadBalancerV2Status status = Mock(LoadBalancerV2Status) { - it.id >> { 'loadBalancerId' } - it.name >> { 'loadBalancerName' } - it.listenerStatuses >> { [listenerStatus] } - } - Set statuses = [status].toSet() - Collection cacheData = addresses.collect { addr -> Mock(CacheData) { it.attributes >> [ipv6:addr] } } - List instanceKeys = ['id1','id2'].collect { Keys.getInstanceKey(it, account, region) } - - when: - boolean disabled = cachingAgent.calculateServerGroupStatus(providerCache, statuses, instanceKeys) - - then: - 1 * providerCache.getAll(INSTANCES.ns, instanceKeys, _ as RelationshipCacheFilter) >> cacheData - disabled == expected - - where: - testCase | status1 | status2 | expected - "all instances up" | en | en | false - "all instances down" | dis | dis | true - "some up some down" | en | dis | false - } - - void "test handle on demand no result - #testCase"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - - when: - OnDemandAgent.OnDemandResult result = cachingAgent.handle(providerCache, data) - - then: - result == null - - where: - testCase | data - 'empty data' | [:] - 'missing serverGroupName' | [account: account, region: region] - 'wrong account' | [serverGroupName: 'name', account: 'abc', region: region] - 'wrong region' | [serverGroupName: 'name', account: account, region: 'abc'] - } - - void "test handle on demand no stack"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String appName = 'testapp' - String clusterName = "${appName}-stack-detail" - String serverGroupName = "${clusterName}-v000" - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, account, region) - Map data = [serverGroupName: serverGroupName, account: account, region: region] - - when: - OnDemandAgent.OnDemandResult result = cachingAgent.handle(providerCache, data) - - then: - 1 * provider.getStack(region, serverGroupName) >> null - - and: - result.cacheResult.cacheResults[ON_DEMAND.ns].isEmpty() - result.evictions.get(SERVER_GROUPS.ns) == [serverGroupKey] - } - - void "test handle on demand no cache results built"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String appName = 'testapp' - String clusterName = "${appName}-stack-detail" - String serverGroupName = "${clusterName}-v000" - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, account, region) - Map data = [serverGroupName: serverGroupName, account: account, region: region] - Stack stack = Mock(Stack) - - when: - OnDemandAgent.OnDemandResult result = cachingAgent.handle(providerCache, data) - - then: - 1 * provider.getStack(region, serverGroupName) >> stack - 1 * cachingAgent.buildCacheResult(providerCache, _, [stack]) >> { cache, builder, stackz -> builder.build() } - 1 * providerCache.evictDeletedItems(ON_DEMAND.ns, [serverGroupKey]) - - and: - result.cacheResult.cacheResults.get(ON_DEMAND.ns).isEmpty() - result.evictions == [:] - } - - void "test handle on demand store"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String appName = 'testapp' - String clusterName = "${appName}-stack-detail" - String serverGroupName = "${clusterName}-v000" - Map data = [serverGroupName: serverGroupName, account: account, region: region] - Stack stack = Mock(Stack) - CacheData serverGroupCacheData = new DefaultCacheData(UUID.randomUUID().toString(), [:], [:]) - CacheResult cacheResult = new DefaultCacheResult([(SERVER_GROUPS.ns): [serverGroupCacheData]]) - - when: - OnDemandAgent.OnDemandResult result = cachingAgent.handle(providerCache, data) - - then: - 1 * provider.getStack(region, serverGroupName) >> stack - 1 * cachingAgent.buildCacheResult(providerCache, _, [stack]) >> cacheResult - 1 * providerCache.putCacheData(ON_DEMAND.ns, _) - - and: - result.cacheResult.cacheResults.get(SERVER_GROUPS.ns).first() == serverGroupCacheData - result.evictions == [:] - } - - void "test handles - #testCase"() { - when: - boolean result = cachingAgent.handles(type, cloudProvider) - - then: - result == expected - - where: - testCase | type | cloudProvider | expected - 'wrong type' | OnDemandAgent.OnDemandType.LoadBalancer | OpenstackCloudProvider.ID | false - 'wrong provider' | OnDemandAgent.OnDemandType.ServerGroup | 'aws' | false - 'success' | OnDemandAgent.OnDemandType.ServerGroup | OpenstackCloudProvider.ID | true - } - - void "test pending on demand requests"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - String appName = 'testapp' - String clusterName = "${appName}-stack-detail" - String serverGroupName = "${clusterName}-v000" - String serverGroupKey = Keys.getServerGroupKey(serverGroupName, account, region) - Collection keys = [serverGroupKey] - CacheData cacheData = new DefaultCacheData(serverGroupKey, [cacheTime: System.currentTimeMillis(), processedCount: 1, processedTime: System.currentTimeMillis()], [:]) - - when: - Collection result = cachingAgent.pendingOnDemandRequests(providerCache) - - then: - 1 * providerCache.getIdentifiers(ON_DEMAND.ns) >> keys - 1 * providerCache.getAll(ON_DEMAND.ns, keys) >> [cacheData] - - and: - result.first() == [details: Keys.parse(serverGroupKey), cacheTime: cacheData.attributes.cacheTime, processedCount: cacheData.attributes.processedCount, processedTime: cacheData.attributes.processedTime] - } - - void "test pending on demand requests - exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - Throwable throwable = new JedisException('test') - - when: - cachingAgent.pendingOnDemandRequests(providerCache) - - then: - 1 * providerCache.getIdentifiers(ON_DEMAND.ns) >> { throw throwable } - - and: - Throwable exception = thrown(JedisException) - exception == throwable - } - - @Ignore - protected ServerGroupParameters buildParams(Integer minSize = null, Integer maxSize = null, Integer desiredSize = null) { - if (minSize && maxSize && desiredSize) { - new ServerGroupParameters(minSize: minSize, maxSize: maxSize, desiredSize: desiredSize, autoscalingType: ServerGroupParameters.AutoscalingType.CPU, - scaleup: new ServerGroupParameters.Scaler(cooldown: 60, adjustment: 1, period: 60, threshold: 50), - scaledown: new ServerGroupParameters.Scaler(cooldown: 60, adjustment: -1, period: 600, threshold: 15)) - } else { - new ServerGroupParameters(autoscalingType: ServerGroupParameters.AutoscalingType.CPU, scaleup: new ServerGroupParameters.Scaler(), scaledown: new ServerGroupParameters.Scaler()) - } - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSubnetCachingAgentSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSubnetCachingAgentSpec.groovy deleted file mode 100644 index 49c7d5a46df..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/agent/OpenstackSubnetCachingAgentSpec.groovy +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.agent - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import org.openstack4j.model.common.ActionResponse -import org.openstack4j.model.network.Subnet -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SUBNETS - -class OpenstackSubnetCachingAgentSpec extends Specification { - - OpenstackSubnetCachingAgent cachingAgent - OpenstackNamedAccountCredentials namedAccountCredentials - ObjectMapper objectMapper - final String region = 'east' - final String account = 'account' - - void "setup"() { - namedAccountCredentials = GroovyMock(OpenstackNamedAccountCredentials) - objectMapper = Mock(ObjectMapper) - cachingAgent = Spy(OpenstackSubnetCachingAgent, constructorArgs: [namedAccountCredentials, region, objectMapper]) - } - - void "test load data"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackCredentials credentials = GroovyMock() - OpenstackClientProvider provider = Mock() - Subnet subnet = Mock(Subnet) - String subnetId = UUID.randomUUID().toString() - Map subnetAttributes = new HashMap<>() - String subnetKey = Keys.getSubnetKey(subnetId, account, region) - - when: - CacheResult result = cachingAgent.loadData(providerCache) - - then: - 1 * namedAccountCredentials.credentials >> credentials - _ * cachingAgent.getAccountName() >> account - 1 * credentials.provider >> provider - 1 * provider.listSubnets(region) >> [subnet] - _ * subnet.id >> subnetId - 1 * objectMapper.convertValue(_, OpenstackInfrastructureProvider.ATTRIBUTES) >> subnetAttributes - - and: - result.cacheResults.get(SUBNETS.ns).first().id == subnetKey - result.cacheResults.get(SUBNETS.ns).first().attributes == subnetAttributes - noExceptionThrown() - } - - void "test load data exception"() { - given: - ProviderCache providerCache = Mock(ProviderCache) - OpenstackCredentials credentials = GroovyMock() - OpenstackClientProvider provider = Mock() - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - cachingAgent.loadData(providerCache) - - then: - 1 * namedAccountCredentials.credentials >> credentials - 1 * credentials.provider >> provider - 1 * provider.listSubnets(region) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackApplicationProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackApplicationProviderSpec.groovy deleted file mode 100644 index d9f9c75b4bb..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackApplicationProviderSpec.groovy +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackApplication -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import org.mockito.internal.util.collections.Sets -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.CLUSTERS - -class OpenstackApplicationProviderSpec extends Specification { - - String account = 'test' - - OpenstackApplicationProvider provider - Cache cache - ObjectMapper objectMapper - - void "setup"() { - objectMapper = Mock(ObjectMapper) - cache = Mock(Cache) - provider = new OpenstackApplicationProvider(cache, objectMapper) - } - - void "test get applications"() { - given: - String appName = 'app' - String cluster = "$appName-stack-detail-v000" - String clusterKey = Keys.getClusterKey(account, appName, cluster) - String dataKey = Keys.getApplicationKey(appName) - Map relationships = [(CLUSTERS.ns): [clusterKey]] - Map attributes = [application: appName] - CacheData mockData = Mock(CacheData) - Collection cacheData = [mockData] - OpenstackApplication expected = new OpenstackApplication(appName, attributes, [(account): Sets.newSet(cluster)]) - Collection filters = Mock(Collection) - - when: - Set result = provider.getApplications(false) - - then: - 1 * cache.filterIdentifiers(APPLICATIONS.ns, "${OpenstackCloudProvider.ID}:*") >> filters - 1 * cache.getAll(APPLICATIONS.ns, filters, _) >> cacheData - 1 * mockData.id >> dataKey - 1 * mockData.attributes >> attributes - 1 * mockData.getRelationships() >> relationships - 1 * objectMapper.convertValue(attributes, OpenstackInfrastructureProvider.ATTRIBUTES) >> attributes - result?.first() == expected - noExceptionThrown() - } - - void "test get applications no results"() { - given: - Collection filters = Mock(Collection) - - when: - Set result = provider.getApplications(false) - - then: - 1 * cache.filterIdentifiers(APPLICATIONS.ns, "${OpenstackCloudProvider.ID}:*") >> filters - 1 * cache.getAll(APPLICATIONS.ns, filters, _) >> [] - 0 * _ - result.isEmpty() - noExceptionThrown() - } - - void "test get applications exception"() { - given: - Throwable throwable = new JedisException('test') - Collection filters = Mock(Collection) - - when: - provider.getApplications(false) - - then: - 1 * cache.filterIdentifiers(APPLICATIONS.ns, "${OpenstackCloudProvider.ID}:*") >> filters - 1 * cache.getAll(APPLICATIONS.ns, filters, _) >> { throw throwable } - JedisException exception = thrown(JedisException) - exception == throwable - } - - void "test get application"() { - given: - String appName = 'app' - String cluster = "$appName-stack-detail-v000" - String clusterKey = Keys.getClusterKey(account, appName, cluster) - String dataKey = Keys.getApplicationKey(appName) - Map relationships = [(CLUSTERS.ns): [clusterKey]] - Map attributes = [application: appName] - CacheData cacheData = Mock(CacheData) - OpenstackApplication expected = new OpenstackApplication(appName, attributes, [(account): Sets.newSet(cluster)]) - - when: - OpenstackApplication result = provider.getApplication(appName) - - then: - 1 * cache.get(APPLICATIONS.ns, dataKey) >> cacheData - 1 * cacheData.id >> dataKey - 1 * cacheData.attributes >> attributes - 1 * objectMapper.convertValue(attributes, OpenstackInfrastructureProvider.ATTRIBUTES) >> attributes - 1 * cacheData.getRelationships() >> relationships - result == expected - noExceptionThrown() - } - - void "test get application no result"() { - given: - String appName = 'appName' - String appKey = Keys.getApplicationKey(appName) - - when: - OpenstackApplication result = provider.getApplication(appName) - - then: - 1 * cache.get(APPLICATIONS.ns, appKey) >> null - 0 * _ - result == null - noExceptionThrown() - } - - void "test get application exception"() { - given: - String appName = 'appName' - String appKey = Keys.getApplicationKey(appName) - Throwable throwable = new JedisException('test') - - when: - provider.getApplication(appName) - - then: - 1 * cache.get(APPLICATIONS.ns, appKey) >> { throw throwable } - JedisException exception = thrown(JedisException) - exception == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackClusterProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackClusterProviderSpec.groovy deleted file mode 100644 index 6327afc1132..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackClusterProviderSpec.groovy +++ /dev/null @@ -1,440 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackCluster -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackInstance -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancer -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackServerGroup -import org.mockito.internal.util.collections.Sets -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.CLUSTERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SERVER_GROUPS - -@Unroll -class OpenstackClusterProviderSpec extends Specification { - - @Shared - String account = 'test' - - OpenstackClusterProvider provider - OpenstackInstanceProvider instanceProvider - Cache cache - ObjectMapper objectMapper - - void "setup"() { - objectMapper = new ObjectMapper() - cache = Mock(Cache) - instanceProvider = Mock(OpenstackInstanceProvider) - provider = Spy(OpenstackClusterProvider, constructorArgs: [ - new OpenstackCloudProvider(), cache, objectMapper, instanceProvider - ]) - } - - void "test all get clusters"() { - given: - CacheData cacheData = Mock(CacheData) - Map attributes = [name: 'name', accountName: account] - - when: - Map> result = provider.clusters - - then: - 1 * cache.getAll(CLUSTERS.ns) >> [cacheData] - 1 * cacheData.attributes >> attributes - result == [(account): [new OpenstackCluster(attributes).view].toSet()] - noExceptionThrown() - } - - void "test get clusters exception"() { - given: - Throwable throwable = new JedisException('test') - - when: - provider.getClusters() - - then: - 1 * cache.getAll(CLUSTERS.ns) >> { throw throwable } - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } - - void "test get clusters internal"() { - given: - boolean details = false - String appName = 'app' - String appKey = Keys.getApplicationKey(appName) - CacheData appCache = Mock(CacheData) - Map> relationships = Mock(Map) - Collection clusterKeys = Mock(Collection) - CacheData clusterData = Mock(CacheData) - Collection clusters = [clusterData] - OpenstackCluster.View cluster = Mock(OpenstackCluster.View) - - when: - Map> result = provider.getClustersInternal(appName, details) - - then: - 1 * cache.get(APPLICATIONS.ns, appKey) >> appCache - 1 * appCache.relationships >> relationships - 1 * relationships.get(CLUSTERS.ns) >> clusterKeys - 1 * cache.getAll(CLUSTERS.ns, clusterKeys, _) >> clusters - 1 * provider.clusterFromCacheData(_, _) >> cluster - 1 * cluster.accountName >> account - - and: - result == [(account): [cluster].toSet()] - } - - void "test get clusters internal exception"() { - given: - boolean details = false - String appName = 'app' - String appKey = Keys.getApplicationKey(appName) - Throwable throwable = new JedisException('test') - - when: - provider.getClustersInternal(appName, details) - - then: - 1 * cache.get(APPLICATIONS.ns, appKey) >> { throw throwable } - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } - - void "test get cluster summaries by app"() { - given: - String appName = 'app' - Map> attributes = Mock(Map) - - when: - Map> result = provider.getClusterSummaries(appName) - - then: - 1 * provider.getClustersInternal(appName, false) >> attributes - result == attributes - noExceptionThrown() - } - - void "test get cluster details by app"() { - given: - String appName = 'app' - Map> attributes = Mock(Map) - - when: - Map> result = provider.getClusterDetails(appName) - - then: - 1 * provider.getClustersInternal(appName, true) >> attributes - result == attributes - noExceptionThrown() - } - - void "test get clusters by app and account - #testCase"() { - given: - String appName = 'app' - - when: - Set result = provider.getClusters(appName, account) - - then: - 1 * provider.getClusterDetails(appName) >> details - result == expected - noExceptionThrown() - - where: - testCase | details | expected - 'normal' | [(account): Sets.newSet(new OpenstackCluster())] | Sets.newSet(new OpenstackCluster()) - 'empty' | [(account): Sets.newSet()] | Sets.newSet() - 'null' | [(account): null] | null - 'missing' | [:] | null - 'null details' | null | null - } - - void "test get cluster by app, account, name - #testCase"() { - given: - String appName = 'app' - String name = 'name' - - when: - OpenstackCluster.View result = provider.getCluster(appName, account, name) - - then: - 1 * provider.getClusters(appName, account) >> details - result == expected - noExceptionThrown() - - where: - testCase | details | expected - 'normal' | Sets.newSet(new OpenstackCluster(name: 'name').view) | new OpenstackCluster(name: 'name').view - 'missing' | Sets.newSet(new OpenstackCluster(name: 'namez').view) | null - 'empty set' | Sets.newSet() | null - 'null set' | null | null - } - - void "test server group - #testCase"() { - given: - String name = 'name' - String region = 'region' - String serverGroupKey = Keys.getServerGroupKey(name, account, region) - - when: - OpenstackServerGroup.View result = provider.getServerGroup(account, region, name) - - then: - 1 * cache.get(SERVER_GROUPS.ns, serverGroupKey, _) >> cacheData - if (cacheData) { - 1 * provider.serverGroupFromCacheData(cacheData) >> expected - } - result == expected - noExceptionThrown() - - where: - testCase | cacheData | expected - 'normal' | Mock(CacheData) | Mock(OpenstackServerGroup.View) - 'no data' | null | null - } - - void "test server group exception"() { - given: - String name = 'name' - String region = 'region' - String serverGroupKey = Keys.getServerGroupKey(name, account, region) - Throwable throwable = new JedisException('test') - - when: - provider.getServerGroup(account, region, name) - - then: - 1 * cache.get(SERVER_GROUPS.ns, serverGroupKey, _) >> { throw throwable } - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } - - void "test cluster from cache data"() { - given: - boolean details = true - CacheData cacheData = Mock(CacheData) - Map> relationships = Mock(Map) - Collection serverGroupKeys = Mock(Collection) - Map attributes = [accountName: account, name: 'name'] - CacheData serverGroupCache = Mock(CacheData) - OpenstackServerGroup.View openstackServerGroup = Mock(OpenstackServerGroup.View) - OpenstackLoadBalancer.View openstackLoadBalancer = Mock(OpenstackLoadBalancer.View) - - when: - OpenstackCluster.View result = provider.clusterFromCacheData(cacheData, details) - - then: - 1 * cacheData.attributes >> attributes - 1 * cacheData.relationships >> relationships - 1 * relationships.get(SERVER_GROUPS.ns) >> serverGroupKeys - 1 * cache.getAll(SERVER_GROUPS.ns, serverGroupKeys, _) >> [serverGroupCache] - 1 * provider.serverGroupFromCacheData(serverGroupCache) >> openstackServerGroup - 1 * provider.loadBalancersFromCacheData(serverGroupCache) >> [openstackLoadBalancer] - - and: - result.with { - assert accountName == account - assert name == 'name' - assert serverGroups.size() == 1 - assert loadBalancers.size() == 1 - it - } - noExceptionThrown() - } - - void "test cluster from cache data - no server groups"() { - given: - boolean details = true - CacheData cacheData = Mock(CacheData) - Map> relationships = Mock(Map) - Collection serverGroupKeys = null - Map attributes = [accountName: account, name: 'name'] - - when: - OpenstackCluster.View result = provider.clusterFromCacheData(cacheData, details) - - then: - 1 * cacheData.attributes >> attributes - 1 * cacheData.relationships >> relationships - 1 * relationships.get(SERVER_GROUPS.ns) >> serverGroupKeys - - and: - result == new OpenstackCluster(accountName: account, name: 'name').view - noExceptionThrown() - } - - void "test cluster from cache data - exception"() { - given: - boolean details = true - CacheData cacheData = Mock(CacheData) - Map> relationships = Mock(Map) - Collection serverGroupKeys = Mock(Collection) - Map attributes = [accountName: account, name: 'name'] - Throwable throwable = new JedisException('test') - - when: - provider.clusterFromCacheData(cacheData, details) - - then: - 1 * cacheData.attributes >> attributes - 1 * cacheData.relationships >> relationships - 1 * relationships.get(SERVER_GROUPS.ns) >> serverGroupKeys - 1 * cache.getAll(SERVER_GROUPS.ns, serverGroupKeys, _) >> { throw throwable } - - and: - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } - - void "test server group from cache data no keys"() { - given: - CacheData cacheData = Mock(CacheData) - Map> relationships = Mock(Map) - Collection instanceKeys = null - Map attributes = [account: account, name: 'name', region: 'region'] - - when: - OpenstackServerGroup.View result = provider.serverGroupFromCacheData(cacheData) - - then: - 1 * cacheData.attributes >> attributes - 1 * cacheData.relationships >> relationships - 1 * relationships.get(INSTANCES.ns) >> instanceKeys - 0 * instanceProvider.getInstances(instanceKeys) - - and: - result == new OpenstackServerGroup(attributes).view - noExceptionThrown() - } - - void "test server group from cache data"() { - given: - CacheData cacheData = Mock(CacheData) - Map> relationships = Mock(Map) - Collection instanceKeys = Mock(Collection) - Map attributes = [account: account, name: 'name', region: 'region'] - OpenstackInstance.View instance = Mock(OpenstackInstance.View) - Set instances = [instance].toSet() - String zone = 'zone1' - - when: - OpenstackServerGroup.View result = provider.serverGroupFromCacheData(cacheData) - - then: - 1 * cacheData.attributes >> attributes - 1 * cacheData.relationships >> relationships - 1 * relationships.get(INSTANCES.ns) >> instanceKeys - 1 * instanceProvider.getInstances(instanceKeys) >> instances - 1 * instance.zone >> zone - - and: - result.with { - assert account == account - assert name == 'name' - assert region == 'region' - assert instances.size() == 1 - assert zones == [zone].toSet() - it - } - noExceptionThrown() - } - - void "test server group from cache data exception"() { - given: - CacheData cacheData = Mock(CacheData) - Map> relationships = Mock(Map) - Collection instanceKeys = Mock(Collection) - Map attributes = [account: account, name: 'name', region: 'region'] - Throwable throwable = new JedisException('test') - - when: - provider.serverGroupFromCacheData(cacheData) - - then: - 1 * cacheData.attributes >> attributes - 1 * cacheData.relationships >> relationships - 1 * relationships.get(INSTANCES.ns) >> instanceKeys - 1 * instanceProvider.getInstances(instanceKeys) >> { throw throwable } - - and: - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } - - void "test load balancer from cache data - #testCase"() { - given: - CacheData cacheData = Mock(CacheData) - Map> relationships = Mock(Map) - CacheData loadBalancerCache = Mock(CacheData) - Map attributes = [account: account, name: 'name', region: 'region'] - - when: - Set result = provider.loadBalancersFromCacheData(cacheData) - - then: - 1 * cacheData.relationships >> relationships - 1 * relationships.get(LOAD_BALANCERS.ns) >> loadbalancerKeys - if (loadbalancerKeys) { - 1 * cache.getAll(LOAD_BALANCERS.ns, loadbalancerKeys) >> [loadBalancerCache] - 1 * loadBalancerCache.attributes >> attributes - } - - and: - result == expected - noExceptionThrown() - - where: - testCase | loadbalancerKeys | expected - 'no instances' | null | [].toSet() - 'some' | Mock(Collection) | [new OpenstackLoadBalancer(account: account, name: 'name', region: 'region').view].toSet() - } - - void "test load balancer from cache data"() { - given: - CacheData cacheData = Mock(CacheData) - Map> relationships = Mock(Map) - Collection loadbalancerKeys = Mock(Collection) - Throwable throwable = new JedisException('test') - - when: - provider.loadBalancersFromCacheData(cacheData) - - then: - 1 * cacheData.relationships >> relationships - 1 * relationships.get(LOAD_BALANCERS.ns) >> loadbalancerKeys - 1 * cache.getAll(LOAD_BALANCERS.ns, loadbalancerKeys) >> { throw throwable } - - and: - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackImageProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackImageProviderSpec.groovy deleted file mode 100644 index 3e1e4c88965..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackImageProviderSpec.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.common.collect.Sets -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackImage -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.IMAGES - -class OpenstackImageProviderSpec extends Specification { - - String account = 'test' - String region = 'region' - - OpenstackImageProvider imageProvider - Cache cache - ObjectMapper objectMapper - - void "setup"() { - cache = Mock(Cache) - objectMapper = Mock(ObjectMapper) - imageProvider = new OpenstackImageProvider(cache, objectMapper) - } - - void "test get images by account"() { - given: - String id = UUID.randomUUID().toString() - Collection filters = Mock(Collection) - CacheData cacheData = Mock(CacheData) - Map attributes = Mock(Map) - OpenstackImage openstackImage = Mock(OpenstackImage) - - when: - Map> result = imageProvider.listImagesByAccount() - - then: - 1 * cache.filterIdentifiers(IMAGES.ns, "$OpenstackCloudProvider.ID:*") >> filters - 1 * cache.getAll(IMAGES.ns, filters) >> [cacheData] - 1 * cacheData.id >> Keys.getImageKey(id, account, region) - 1 * cacheData.attributes >> attributes - 1 * objectMapper.convertValue(attributes, OpenstackImage) >> openstackImage - result == [(account): Sets.newHashSet(openstackImage)] - noExceptionThrown() - } - - void "test get images by account - exception thrown"() { - given: - Collection filters = Mock(Collection) - Throwable throwable = new JedisException('test') - - when: - imageProvider.listImagesByAccount() - - then: - 1 * cache.filterIdentifiers(IMAGES.ns, "$OpenstackCloudProvider.ID:*") >> filters - 1 * cache.getAll(IMAGES.ns, filters) >> { throw throwable } - thrown(JedisException) - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceProviderSpec.groovy deleted file mode 100644 index 767bd76872c..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceProviderSpec.groovy +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.deploy.exception.OpenstackProviderException -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackInstance -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancer -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancerHealth -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackCredentials -import com.netflix.spinnaker.clouddriver.openstack.security.OpenstackNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import org.openstack4j.model.common.ActionResponse -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCES -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS - -class OpenstackInstanceProviderSpec extends Specification { - - String account = 'test' - String region = 'east' - - OpenstackInstanceProvider instanceProvider - Cache cache - ObjectMapper objectMapper - AccountCredentialsProvider accountCredentialsProvider - - void "setup"() { - accountCredentialsProvider = Mock(AccountCredentialsProvider) - cache = Mock(Cache) - objectMapper = Mock(ObjectMapper) - instanceProvider = new OpenstackInstanceProvider(cache, accountCredentialsProvider, objectMapper) - } - - void "test get instance"() { - given: - String id = 'instance' - CacheData cacheData = Mock(CacheData) - Map attributes = Mock(Map) - String instanceKey = Keys.getInstanceKey(id, account, region) - OpenstackInstance openstackInstance = Mock(OpenstackInstance) - - and: - Collection lbKeys = ['key'] - Map> relationshipMap = [(LOAD_BALANCERS.ns): lbKeys] - CacheData lbCacheData = Mock(CacheData) - OpenstackLoadBalancer openstackLoadBalancer = Mock(OpenstackLoadBalancer) - OpenstackLoadBalancerHealth openstackLoadBalancerHealth = Mock(OpenstackLoadBalancerHealth) - OpenstackInstance.View view = Mock(OpenstackInstance.View) - - when: - OpenstackInstance.View result = instanceProvider.getInstance(account, region, id) - - then: - 1 * cache.get(INSTANCES.ns, instanceKey, _ as RelationshipCacheFilter) >> cacheData - 1 * cacheData.attributes >> attributes - 1 * objectMapper.convertValue(attributes, OpenstackInstance) >> openstackInstance - 1 * cacheData.relationships >> relationshipMap - 1 * cache.getAll(LOAD_BALANCERS.ns, lbKeys) >> [lbCacheData] - 1 * objectMapper.convertValue(_, _) >> openstackLoadBalancer - 1 * openstackLoadBalancer.healths >> [openstackLoadBalancerHealth] - 1 * openstackLoadBalancerHealth.instanceId >> id - 1 * openstackInstance.instanceId >> id - 1 * openstackInstance.view >> view - - and: - result == view - noExceptionThrown() - } - - void "test get instance - nothing found"() { - given: - String id = 'instance' - - when: - OpenstackInstance.View result = instanceProvider.getInstance(account, region, id) - - then: - 1 * cache.get(INSTANCES.ns, Keys.getInstanceKey(id, account, region), _ as RelationshipCacheFilter) >> null - 0 * _ - result == null - noExceptionThrown() - } - - void "test get instance exception thrown"() { - given: - String id = 'instance' - Throwable throwable = new RuntimeException('test') - - when: - instanceProvider.getInstance(account, region, id) - - then: - 1 * cache.get(INSTANCES.ns, Keys.getInstanceKey(id, account, region), _ as RelationshipCacheFilter) >> { - throw throwable - } - thrown(RuntimeException) - } - - void "test get console output"() { - given: - String id = 'instance' - OpenstackNamedAccountCredentials namedAccountCredentials = Mock(OpenstackNamedAccountCredentials) - OpenstackCredentials openstackCredentials = GroovyMock(OpenstackCredentials) - OpenstackClientProvider openstackClientProvider = Mock(OpenstackClientProvider) - String output = 'output' - - when: - String result = instanceProvider.getConsoleOutput(account, region, id) - - then: - 1 * accountCredentialsProvider.getCredentials(account) >> namedAccountCredentials - 1 * namedAccountCredentials.credentials >> openstackCredentials - 1 * openstackCredentials.provider >> openstackClientProvider - 1 * openstackClientProvider.getConsoleOutput(region, id) >> output - result == output - noExceptionThrown() - } - - void "test get console output - illegal argument"() { - given: - String id = 'instance' - - when: - instanceProvider.getConsoleOutput(account, region, id) - - then: - 1 * accountCredentialsProvider.getCredentials(account) >> null - - and: - IllegalArgumentException exception = thrown(IllegalArgumentException) - [account, region].every { - exception.message.contains(it) - } - } - - void "test get console output - exception"() { - given: - String id = 'instance' - OpenstackNamedAccountCredentials namedAccountCredentials = Mock(OpenstackNamedAccountCredentials) - OpenstackCredentials openstackCredentials = GroovyMock(OpenstackCredentials) - OpenstackClientProvider openstackClientProvider = Mock(OpenstackClientProvider) - Throwable throwable = new OpenstackProviderException(ActionResponse.actionFailed('test', 1)) - - when: - instanceProvider.getConsoleOutput(account, region, id) - - then: - 1 * accountCredentialsProvider.getCredentials(account) >> namedAccountCredentials - 1 * namedAccountCredentials.credentials >> openstackCredentials - 1 * openstackCredentials.provider >> openstackClientProvider - 1 * openstackClientProvider.getConsoleOutput(region, id) >> { throw throwable } - - and: - OpenstackProviderException openstackProviderException = thrown(OpenstackProviderException) - openstackProviderException == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceTypeProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceTypeProviderSpec.groovy deleted file mode 100644 index 2204cdcf2e1..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackInstanceTypeProviderSpec.groovy +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackInstanceType -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.INSTANCE_TYPES - -class OpenstackInstanceTypeProviderSpec extends Specification { - - String account = 'test' - String region = 'east' - - OpenstackInstanceTypeProvider instanceProvider - Cache cache - ObjectMapper objectMapper - - void "setup"() { - cache = Mock(Cache) - objectMapper = Mock(ObjectMapper) - instanceProvider = new OpenstackInstanceTypeProvider(cache, objectMapper) - } - - void "test get all"() { - given: - CacheData cacheData = Mock(CacheData) - Map attributes = Mock(Map) - OpenstackInstanceType openstackInstanceType = Mock(OpenstackInstanceType) - - when: - Set result = instanceProvider.getAll() - - then: - 1 * cache.getAll(INSTANCE_TYPES.ns, _) >> [cacheData] - 1 * cacheData.attributes >> attributes - 1 * objectMapper.convertValue(attributes, OpenstackInstanceType) >> openstackInstanceType - result == [openstackInstanceType].toSet() - noExceptionThrown() - } - - void "test get all exception thrown"() { - given: - Throwable throwable = new RuntimeException('test') - - when: - instanceProvider.getAll() - - then: - 1 * cache.getAll(INSTANCE_TYPES.ns, _) >> { throw throwable } - thrown(RuntimeException) - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackLoadBalancerProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackLoadBalancerProviderSpec.groovy deleted file mode 100644 index 370da398449..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackLoadBalancerProviderSpec.groovy +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance -import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackFloatingIP -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancer -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackLoadBalancerSummary -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackNetwork -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackServerGroup -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSubnet -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Ignore -import spock.lang.Specification - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.FLOATING_IPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.LOAD_BALANCERS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.NETWORKS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SERVER_GROUPS -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SUBNETS - -class OpenstackLoadBalancerProviderSpec extends Specification { - - String account = 'test' - String region = 'east' - - OpenstackClusterProvider clusterProvider - Cache cache - ObjectMapper objectMapper - - void "setup"() { - cache = Mock(Cache) - objectMapper = Mock(ObjectMapper) - clusterProvider = Mock(OpenstackClusterProvider) - } - - void "test get all load balancers"() { - given: - String app = 'myapp' - String cluster = "$app-teststack" - String lbid = 'lb1' - String lbName = "$app-lb" - String name = "$cluster-v002" - String lbKey = Keys.getLoadBalancerKey(lbName, lbid, account, region) - CacheData cacheData = Mock(CacheData) - Collection cacheDataList = [cacheData] - OpenstackLoadBalancer loadBalancer = Mock(OpenstackLoadBalancer) { - it.id >> { lbid } - it.name >> { lbName } - it.account >> { account } - it.region >> { region } - it.serverGroups >> { [new LoadBalancerServerGroup(name: name)] } - } - OpenstackFloatingIP floatingIP = Stub(OpenstackFloatingIP) - OpenstackNetwork network = Stub(OpenstackNetwork) - OpenstackSubnet subnet = Stub(OpenstackSubnet) - OpenstackLoadBalancer.View view = buildLoadBalancerView(loadBalancer, floatingIP, network, subnet) - OpenstackLoadBalancerProvider loadBalancerProvider = Spy(OpenstackLoadBalancerProvider, constructorArgs: [cache, objectMapper, clusterProvider]) { - fromCacheData(cacheData) >> { view } - } - - when: - Set result = loadBalancerProvider.getApplicationLoadBalancers(app) - - then: - 1 * cache.filterIdentifiers(LOAD_BALANCERS.ns, Keys.getLoadBalancerKey(app, '*', '*', '*')) >> [lbKey].toSet() - 1 * cache.filterIdentifiers(LOAD_BALANCERS.ns, Keys.getLoadBalancerKey("$app-*", '*', '*', '*')) >> [lbKey].toSet() - 1 * cache.getAll(LOAD_BALANCERS.ns, [lbKey].toSet(), _ as RelationshipCacheFilter) >> cacheDataList - result.size() == 1 - result[0] == view - noExceptionThrown() - } - - void "test get all load balancers - throw exception"() { - given: - String app = 'myapp' - String lbid = 'lb1' - String lbName = "$app-lb" - String lbKey = Keys.getLoadBalancerKey(lbName, lbid, account, region) - Throwable throwable = new JedisException('test') - OpenstackLoadBalancerProvider loadBalancerProvider = Spy(OpenstackLoadBalancerProvider, constructorArgs: [cache, objectMapper, clusterProvider]) - - when: - loadBalancerProvider.getApplicationLoadBalancers(app) - - then: - 1 * cache.filterIdentifiers(LOAD_BALANCERS.ns, Keys.getLoadBalancerKey(app, '*', '*', '*')) >> [lbKey].toSet() - 1 * cache.filterIdentifiers(LOAD_BALANCERS.ns, Keys.getLoadBalancerKey("$app-*", '*', '*', '*')) >> [lbKey].toSet() - 1 * cache.getAll(LOAD_BALANCERS.ns, [lbKey].toSet(), _ as RelationshipCacheFilter) >> { throw throwable } - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } - - void 'test get load balancer by account, region, and name'() { - given: - String lbid = 'lb1' - String name = 'myapp-teststack-v002' - CacheData cacheData = Mock(CacheData) - Collection cacheDataList = [cacheData] - String lbKey = Keys.getLoadBalancerKey('*', lbid, account, region) - OpenstackLoadBalancer loadBalancer = Mock(OpenstackLoadBalancer) { - it.id >> { lbid } - it.account >> { account } - it.region >> { region } - it.serverGroups >> { [new LoadBalancerServerGroup(name: name)] } - } - OpenstackFloatingIP floatingIP = Stub(OpenstackFloatingIP) - OpenstackNetwork network = Stub(OpenstackNetwork) - OpenstackSubnet subnet = Stub(OpenstackSubnet) - OpenstackLoadBalancer.View view = buildLoadBalancerView(loadBalancer, floatingIP, network, subnet) - OpenstackLoadBalancerProvider loadBalancerProvider = Spy(OpenstackLoadBalancerProvider, constructorArgs: [cache, objectMapper, clusterProvider]) { - fromCacheData(cacheData) >> { view } - } - List filter = ['filter'] - - when: - Set result = loadBalancerProvider.getLoadBalancers(account, region, lbid) - - then: - 1 * cache.filterIdentifiers(LOAD_BALANCERS.ns, lbKey) >> filter - 1 * cache.getAll(LOAD_BALANCERS.ns, filter, _ as RelationshipCacheFilter) >> cacheDataList - result.size() == 1 - result[0] == view - noExceptionThrown() - } - - void "test get load balancer by account, region, and name - exception"() { - given: - String lbid = 'lb1' - CacheData cacheData = Mock(CacheData) - Collection cacheDataList = [cacheData] - String lbKey = Keys.getLoadBalancerKey('*', lbid, account, region) - List filter = ['filter'] - Throwable throwable = new JedisException('test') - OpenstackLoadBalancerProvider loadBalancerProvider = Spy(OpenstackLoadBalancerProvider, constructorArgs: [cache, objectMapper, clusterProvider]) - - when: - loadBalancerProvider.getLoadBalancers(account, region, lbid) - - then: - 1 * cache.filterIdentifiers(LOAD_BALANCERS.ns, lbKey) >> filter - 1 * cache.getAll(LOAD_BALANCERS.ns, filter, _ as RelationshipCacheFilter) >> { throw throwable } - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } - - def "test convert cache data to load balancer"() { - given: - String lbid = 'lb1' - String name = 'myapp-teststack-v002' - Map attributes = Mock(Map) - CacheData ipCacheData = Mock(CacheData) - ipCacheData.attributes >> Mock(Map) - CacheData networkCacheData = Mock(CacheData) - networkCacheData.attributes >> Mock(Map) - CacheData subnetCacheData = Mock(CacheData) - subnetCacheData.attributes >> Mock(Map) - String sgKey = Keys.getServerGroupKey(name, account, region) - String ipId = UUID.randomUUID().toString() - String ipKey = Keys.getFloatingIPKey(ipId, account, region) - String subnetId = UUID.randomUUID().toString() - String subnetKey = Keys.getSubnetKey(subnetId, account, region) - String networkId = UUID.randomUUID().toString() - String networkKey = Keys.getNetworkKey(networkId, account, region) - CacheData cacheData = Mock(CacheData) - cacheData.relationships >> [(SERVER_GROUPS.ns): [sgKey], (FLOATING_IPS.ns): [ipKey], (SUBNETS.ns): [subnetKey], (NETWORKS.ns): [networkKey]] - cacheData.attributes >> attributes - OpenstackFloatingIP floatingIP = Stub(OpenstackFloatingIP) - OpenstackNetwork network = Stub(OpenstackNetwork) - OpenstackSubnet subnet = Stub(OpenstackSubnet) - OpenstackServerGroup.View serverGroup = Mock(OpenstackServerGroup.View) { - getName() >> { name } - isDisabled() >> { false } - getInstances() >> { [] } - } - OpenstackLoadBalancer loadBalancer = new OpenstackLoadBalancer(id: lbid, account: account, region: region, floatingIP: floatingIP, subnet: subnet, network: network, serverGroups: [new LoadBalancerServerGroup(name: name)] ) - OpenstackLoadBalancerProvider loadBalancerProvider = new OpenstackLoadBalancerProvider(cache, objectMapper, clusterProvider) - - when: - OpenstackLoadBalancer.View result = loadBalancerProvider.fromCacheData(cacheData) - - then: - 1 * cache.getAll(FLOATING_IPS.ns, cacheData.relationships[(FLOATING_IPS.ns)] ?: []) >> [ipCacheData] - 1 * objectMapper.convertValue(_ as Map, OpenstackFloatingIP) >> floatingIP - 1 * cache.getAll(NETWORKS.ns, cacheData.relationships[(NETWORKS.ns)] ?: []) >> [networkCacheData] - 1 * objectMapper.convertValue(_ as Map, OpenstackNetwork) >> network - 1 * cache.getAll(SUBNETS.ns, cacheData.relationships[(SUBNETS.ns)] ?: []) >> [subnetCacheData] - 1 * objectMapper.convertValue(_ as Map, OpenstackSubnet) >> subnet - 1 * objectMapper.convertValue(attributes, OpenstackLoadBalancer) >> loadBalancer - 1 * clusterProvider.getServerGroup(account, region, name) >> serverGroup - result == loadBalancer.view - noExceptionThrown() - } - - def "test convert cache data to load balancer - exception"() { - given: - Throwable throwable = new JedisException('test') - String lbid = 'lb1' - String name = 'myapp-teststack-v002' - Map attributes = Mock(Map) - CacheData ipCacheData = Mock(CacheData) - ipCacheData.attributes >> Mock(Map) - CacheData networkCacheData = Mock(CacheData) - networkCacheData.attributes >> Mock(Map) - CacheData subnetCacheData = Mock(CacheData) - subnetCacheData.attributes >> Mock(Map) - String sgKey = Keys.getServerGroupKey(name, account, region) - String ipId = UUID.randomUUID().toString() - String ipKey = Keys.getFloatingIPKey(ipId, account, region) - String subnetId = UUID.randomUUID().toString() - String subnetKey = Keys.getSubnetKey(subnetId, account, region) - String networkId = UUID.randomUUID().toString() - String networkKey = Keys.getNetworkKey(networkId, account, region) - CacheData cacheData = Mock(CacheData) - cacheData.relationships >> [(SERVER_GROUPS.ns): [sgKey], (FLOATING_IPS.ns): [ipKey], (SUBNETS.ns): [subnetKey], (NETWORKS.ns): [networkKey]] - cacheData.attributes >> attributes - OpenstackFloatingIP floatingIP = Stub(OpenstackFloatingIP) - OpenstackNetwork network = Stub(OpenstackNetwork) - OpenstackSubnet subnet = Stub(OpenstackSubnet) - OpenstackLoadBalancer.View loadBalancer = Mock(OpenstackLoadBalancer.View) { - it.id >> { lbid } - it.account >> { account } - it.region >> { region } - it.ip >> { floatingIP.id } - it.subnetId >> { subnet.id } - it.subnetName >> { subnet.name } - it.networkId >> { network.id } - it.networkName >> { network.name } - it.serverGroups >> { [new LoadBalancerServerGroup(name: name)] } - } - OpenstackLoadBalancerProvider loadBalancerProvider = new OpenstackLoadBalancerProvider(cache, objectMapper, clusterProvider) - - when: - loadBalancerProvider.fromCacheData(cacheData) - - then: - 1 * cache.getAll(FLOATING_IPS.ns, cacheData.relationships[(FLOATING_IPS.ns)] ?: []) >> [ipCacheData] - 1 * objectMapper.convertValue(_ as Map, OpenstackFloatingIP) >> floatingIP - 1 * cache.getAll(NETWORKS.ns, cacheData.relationships[(NETWORKS.ns)] ?: []) >> [networkCacheData] - 1 * objectMapper.convertValue(_ as Map, OpenstackNetwork) >> network - 1 * cache.getAll(SUBNETS.ns, cacheData.relationships[(SUBNETS.ns)] ?: []) >> [subnetCacheData] - 1 * objectMapper.convertValue(_ as Map, OpenstackSubnet) >> subnet - 1 * objectMapper.convertValue(attributes, OpenstackLoadBalancer) >> loadBalancer - 1 * clusterProvider.getServerGroup(account, region, name) >> { throw throwable } - Throwable thrownException = thrown(JedisException) - throwable == thrownException - } - - @Ignore - OpenstackLoadBalancer.View buildLoadBalancerView(OpenstackLoadBalancer loadBalancer, OpenstackFloatingIP floatingIP, OpenstackNetwork network, OpenstackSubnet subnet) { - new OpenstackLoadBalancer.View(id: loadBalancer.id, name: loadBalancer.name, description: loadBalancer.description, - account: account, region: region, - ip: floatingIP.id, subnetId: subnet.id, - subnetName: subnet.name, networkId: network.id, networkName: network.name, - serverGroups: [new LoadBalancerServerGroup(name: 'myapp-teststack-v002')]) - - } - - - def 'get load balancer by account, region, name'() { - given: - def provider = Spy(OpenstackLoadBalancerProvider, constructorArgs: [cache, objectMapper, clusterProvider]) - String name = 'id0' - - when: - List result = provider.byAccountAndRegionAndName(account, region, name) - - then: - 1 * provider.getLoadBalancers(account, region, name) >> lbs - result.size() == lbs.size() - if (result.size() > 0) { - assert result[0] == lbs[0] - } - noExceptionThrown() - - where: - lbs << [[create(0)].toSet(), [].toSet()] - } - - def 'get load balancer by account, region, name - throw exception'() { - given: - def provider = Spy(OpenstackLoadBalancerProvider, constructorArgs: [cache, objectMapper, clusterProvider]) - String name = 'id0' - Throwable throwable = new JedisException('exception') - - when: - provider.byAccountAndRegionAndName(account, region, name) - - then: - 1 * provider.getLoadBalancers(account, region, name) >> { throw throwable } - Throwable thrownException = thrown(JedisException) - thrownException == throwable - } - - OpenstackLoadBalancer.View create(int i) { - String account = 'test' - String region = 'r1' - String id = "id$i" - String name = "name$i" - String description = 'internal_port=8100' - String status = 'up' - String protocol = 'http' - String algorithm = 'round_robin' - String ip = '1.2.3.4' - Integer externalPort = 80 - String subnet = "subnet$i" - String network = "network$i" - def healthMonitor = new OpenstackLoadBalancer.OpenstackHealthMonitor(id: "health$i", httpMethod: 'GET', - maxRetries: 5, adminStateUp: 'UP', delay: 5, expectedCodes: [200]) - def serverGroups = [new LoadBalancerServerGroup(name: 'sg1', isDisabled: false, - instances: [new LoadBalancerInstance(id: 'id', zone: "zone$i", health: [state:'up', zone: "zone$i"])])] - new OpenstackLoadBalancer.View(account: account, region: region, id: id, name: name, description: description, - status: status, algorithm: algorithm, ip: ip, subnetId: subnet, subnetName: subnet, networkId: network, networkName: network, - healthMonitor: healthMonitor, serverGroups: serverGroups) - } - -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackNetworkProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackNetworkProviderSpec.groovy deleted file mode 100644 index 3af769eddca..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackNetworkProviderSpec.groovy +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackNetwork -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Specification -import spock.lang.Unroll - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.NETWORKS - -class OpenstackNetworkProviderSpec extends Specification { - - String account = 'test' - String region = 'west' - - OpenstackNetworkProvider provider - Cache cache - ObjectMapper objectMapper - - void "setup"() { - objectMapper = Mock(ObjectMapper) - cache = Mock(Cache) - provider = new OpenstackNetworkProvider(cache, objectMapper) - } - - void "test get all"() { - given: - Map attributes = [:] - CacheData mockData = Mock(CacheData) - Collection cacheData = [mockData] - Collection filters = Mock(Collection) - OpenstackNetwork network = Mock(OpenstackNetwork) - - when: - Set result = provider.getAll() - - then: - 1 * cache.filterIdentifiers(NETWORKS.ns, "${OpenstackCloudProvider.ID}:${NETWORKS.ns}:*:*:*") >> filters - 1 * cache.getAll(NETWORKS.ns, filters, _) >> cacheData - 1 * mockData.attributes >> attributes - 1 * objectMapper.convertValue(attributes, OpenstackNetwork) >> network - result?.first() == network - noExceptionThrown() - } - - @Unroll - void "test get all - #testCase"() { - given: - Collection filters = Mock(Collection) - - when: - Set result = provider.getAll() - - then: - 1 * cache.filterIdentifiers(NETWORKS.ns, "${OpenstackCloudProvider.ID}:${NETWORKS.ns}:*:*:*") >> filters - 1 * cache.getAll(NETWORKS.ns, filters, _) >> queryResult - - and: - result != null - result.isEmpty() - noExceptionThrown() - - where: - testCase | queryResult - 'empty' | [] - 'null' | null - } - - void "test get all - exception"() { - given: - Collection filters = Mock(Collection) - Throwable throwable = new JedisException('test') - - when: - provider.getAll() - - then: - 1 * cache.filterIdentifiers(NETWORKS.ns, "${OpenstackCloudProvider.ID}:${NETWORKS.ns}:*:*:*") >> filters - 1 * cache.getAll(NETWORKS.ns, filters, _) >> { throw throwable } - JedisException exception = thrown(JedisException) - exception == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSecurityGroupProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSecurityGroupProviderSpec.groovy deleted file mode 100644 index 12f9270c19f..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSecurityGroupProviderSpec.groovy +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.cache.WriteableCache -import com.netflix.spinnaker.cats.mem.InMemoryCache -import com.netflix.spinnaker.clouddriver.model.AddressableRange -import com.netflix.spinnaker.clouddriver.model.securitygroups.IpRangeRule -import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.cache.Keys -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSecurityGroup -import com.netflix.spinnaker.clouddriver.openstack.provider.OpenstackInfrastructureProvider -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Subject -import spock.lang.Unroll - -@Unroll -class OpenstackSecurityGroupProviderSpec extends Specification { - - @Subject - OpenstackSecurityGroupProvider provider - - WriteableCache cache = new InMemoryCache() - ObjectMapper mapper = new ObjectMapper() - - @Shared - Set account1East = [1, 2].collect { createSecurityGroup('account1', 'east') } - @Shared - Set account1West = [1, 2].collect { createSecurityGroup('account1', 'west') } - @Shared - Set account2East = [1, 2].collect { createSecurityGroup('account2', 'east') } - @Shared - Set account2West = [1, 2].collect { createSecurityGroup('account2', 'west') } - @Shared - Set allSecurityGroups = account1East.plus(account1West).plus(account2East).plus(account2West) - - def setup() { - provider = new OpenstackSecurityGroupProvider(cache, mapper) - cache.mergeAll(Keys.Namespace.SECURITY_GROUPS.ns, getAllCacheData()) - } - - def "cloudProvider is openstack"() { - when: - def type = provider.getCloudProvider() - - then: - type == OpenstackCloudProvider.ID - } - - def "get all security groups"() { - when: - def securityGroups = provider.getAll(true) - - then: - allSecurityGroups == securityGroups - } - - def "get all security groups without rules"() { - given: - def securityGroupsWithoutRules = allSecurityGroups.collect { sg -> - new OpenstackSecurityGroup(id: sg.id, - accountName: sg.accountName, - region: sg.region, - name: sg.name, - description: sg.description, - inboundRules: [] - ) - } as Set - - when: - def securityGroups = provider.getAll(false) - - then: - securityGroups == securityGroupsWithoutRules - } - - def "get all by region"() { - when: - def securityGroups = provider.getAllByRegion(true, region) - - then: - expected == securityGroups - - where: - region | expected - 'mid' | [] as Set - 'west' | account1West.plus(account2West) - 'east' | account2East.plus(account1East) - } - - def "get all by account"() { - when: - def securityGroups = provider.getAllByAccount(true, account) - - then: - expected == securityGroups - - where: - account | expected - 'account3' | [] as Set - 'account1' | account1West.plus(account1East) - 'account2' | account2West.plus(account2East) - } - - def "get all by account and name"() { - when: - def securityGroups = provider.getAllByAccountAndName(true, account, name) - - then: - expected == securityGroups - - where: - account | name | expected - 'account1' | 'invalid' | [] as Set - 'invalid' | 'name-west' | [] as Set - 'account1' | 'name-west' | account1West.findAll { it.name == 'name-west' } - 'account2' | 'name-west' | account2West.findAll { it.name == 'name-west' } - } - - def "get all by account and region"() { - when: - def securityGroups = provider.getAllByAccountAndRegion(true, account, region) - - then: - expected == securityGroups - - where: - account | region | expected - 'invalid' | 'west' | [] as Set - 'account2' | 'invalid' | [] as Set - 'account1' | 'west' | account1West - 'account2' | 'west' | account2West - } - - def "get security group"() { - when: - def securityGroup = provider.get(account, region, name, null) - - then: - if (expected) { - // Security groups are not guaranteed to be unique by account, region, and name - // Just ensuring the found security group have those attributes correct - expected.accountName == securityGroup.accountName - expected.region == securityGroup.region - expected.name == securityGroup.name - expected.inboundRules == securityGroup.inboundRules - } else { - securityGroup == null - } - - where: - account | region | name | expected - 'account1' | 'west' | 'name-east' | null - 'account1' | 'west' | 'name-west' | account1West[0] - 'account2' | 'west' | 'name-west' | account2West[0] - } - - def "get all with an empty cache"() { - given: - // Recreate the provider with an empty cache - cache = new InMemoryCache() - provider = new OpenstackSecurityGroupProvider(cache, mapper) - - when: - def securityGroups = provider.getAll(false) - - then: - securityGroups.empty - } - - void "get all throws an exception"() { - given: - // Recreate the provider with a mock cache to enable throwing an exception - cache = Mock(WriteableCache) - provider = new OpenstackSecurityGroupProvider(cache, mapper) - def filters = [] - def throwable = new JedisException('test') - - when: - provider.getAll(false) - - then: - 1 * cache.filterIdentifiers(Keys.Namespace.SECURITY_GROUPS.ns, _) >> filters - 1 * cache.getAll(Keys.Namespace.SECURITY_GROUPS.ns, filters, _) >> { throw throwable } - Throwable exception = thrown(JedisException) - exception == throwable - } - - - def createSecurityGroup(String account, String region) { - new OpenstackSecurityGroup(id: UUID.randomUUID().toString(), - accountName: account, - region: region, - name: "name-$region", - description: "Description", - inboundRules: [ - new IpRangeRule(protocol: 'tcp', - portRanges: [new Rule.PortRange(startPort: 3272, endPort: 3272)] as SortedSet, - range: new AddressableRange(ip: '10.10.0.0', cidr: '/24') - ) - ] - ) - } - - def getAllCacheData() { - allSecurityGroups.collect { sg -> - def key = Keys.getSecurityGroupKey(sg.name, sg.id, sg.accountName, sg.region) - Map attributes = mapper.convertValue(sg, OpenstackInfrastructureProvider.ATTRIBUTES) - new DefaultCacheData(key, attributes, [:]) - } - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSubnetProviderSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSubnetProviderSpec.groovy deleted file mode 100644 index 369e7adfce9..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/provider/view/OpenstackSubnetProviderSpec.groovy +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.provider.view - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.clouddriver.openstack.OpenstackCloudProvider -import com.netflix.spinnaker.clouddriver.openstack.model.OpenstackSubnet -import redis.clients.jedis.exceptions.JedisException -import spock.lang.Specification -import spock.lang.Unroll - -import static com.netflix.spinnaker.clouddriver.openstack.cache.Keys.Namespace.SUBNETS - -class OpenstackSubnetProviderSpec extends Specification { - - String account = 'test' - String region = 'west' - - OpenstackSubnetProvider provider - Cache cache - ObjectMapper objectMapper - - void "setup"() { - objectMapper = Mock(ObjectMapper) - cache = Mock(Cache) - provider = new OpenstackSubnetProvider(cache, objectMapper) - } - - void "test get all"() { - given: - Map attributes = [:] - CacheData mockData = Mock(CacheData) - Collection cacheData = [mockData] - Collection filters = Mock(Collection) - OpenstackSubnet subnet = Mock(OpenstackSubnet) - - when: - Set result = provider.getAll() - - then: - 1 * cache.filterIdentifiers(SUBNETS.ns, "${OpenstackCloudProvider.ID}:${SUBNETS.ns}:*:*:*") >> filters - 1 * cache.getAll(SUBNETS.ns, filters, _) >> cacheData - 1 * mockData.attributes >> attributes - 1 * objectMapper.convertValue(attributes, OpenstackSubnet) >> subnet - result?.first() == subnet - noExceptionThrown() - } - - @Unroll - void "test get all - #testCase"() { - given: - Collection filters = Mock(Collection) - - when: - Set result = provider.getAll() - - then: - 1 * cache.filterIdentifiers(SUBNETS.ns, "${OpenstackCloudProvider.ID}:${SUBNETS.ns}:*:*:*") >> filters - 1 * cache.getAll(SUBNETS.ns, filters, _) >> queryResult - - and: - result != null - result.isEmpty() - noExceptionThrown() - - where: - testCase | queryResult - 'empty' | [] - 'null' | null - } - - void "test get all - exception"() { - given: - Collection filters = Mock(Collection) - Throwable throwable = new JedisException('test') - - when: - provider.getAll() - - then: - 1 * cache.filterIdentifiers(SUBNETS.ns, "${OpenstackCloudProvider.ID}:${SUBNETS.ns}:*:*:*") >> filters - 1 * cache.getAll(SUBNETS.ns, filters, _) >> { throw throwable } - JedisException exception = thrown(JedisException) - exception == throwable - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackNamedAccountCredentialsSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackNamedAccountCredentialsSpec.groovy deleted file mode 100644 index fdf1b662772..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/security/OpenstackNamedAccountCredentialsSpec.groovy +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.security - -import com.netflix.spinnaker.clouddriver.consul.config.ConsulConfig -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackClientProvider -import com.netflix.spinnaker.clouddriver.openstack.client.OpenstackIdentityV3Provider -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.LbaasConfig -import com.netflix.spinnaker.clouddriver.openstack.config.OpenstackConfigurationProperties.StackConfig -import org.openstack4j.api.OSClient -import org.openstack4j.api.client.IOSClientBuilder -import org.openstack4j.model.identity.v3.Token -import org.openstack4j.openstack.compute.domain.ext.ExtAvailabilityZone -import spock.lang.Specification -import spock.lang.Unroll - -class OpenstackNamedAccountCredentialsSpec extends Specification { - - List regions - - def "setup"() { - regions = ['east'] - } - - def "Provider factory returns v3 provider"() { - setup: - // Mock out the authenticate call within Openstack4J - OSClient.OSClientV3 mockClient = Mock(OSClient.OSClientV3) - IOSClientBuilder.V3.metaClass.authenticate = { mockClient } - - when: - def credentials = new OpenstackNamedAccountCredentials("name", "test", "main", "user", "pw", "project", "domain", "endpoint", [], false, "", new LbaasConfig(pollTimeout: 60, pollInterval: 5),new StackConfig(pollTimeout: 60, pollInterval: 5), new ConsulConfig(), null) - def client = credentials.credentials.provider.client - - then: - 1 * mockClient.token >> Mock(Token) - credentials.credentials.provider.identityProvider instanceof OpenstackIdentityV3Provider - credentials.credentials.provider.identityProvider.token instanceof Token - client instanceof OSClient.OSClientV3 - } - - static def azA = new ExtAvailabilityZone(zoneName: "azA", zoneState: new ExtAvailabilityZone.ExtZoneState(available: true)) - static def azB = new ExtAvailabilityZone(zoneName: "azB", zoneState: new ExtAvailabilityZone.ExtZoneState(available: true)) - static def azUnavailable = new ExtAvailabilityZone(zoneName: "azC", zoneState: new ExtAvailabilityZone.ExtZoneState(available: false)) - - @Unroll() - def "Builder populates region-to-zone map: #description"() { - setup: - OpenstackClientProvider mockProvider = Mock(OpenstackClientProvider) - OpenstackCredentials.metaClass.getProvider = { mockProvider } - - when: - def builder = new OpenstackNamedAccountCredentials.Builder() - builder.regions = regions - def account = builder.build() - - then: - 1 * mockProvider.getZones("r1") >> r1_zones - _ * mockProvider.getZones("r2") >> r2_zones - account.regionToZones == expected - - where: - description | regions | r1_zones | r2_zones | expected - "simple case" | ["r1"] | [azA] | null | ["r1": ["azA"]] - "multiple regions" | ["r1", "r2"] | [azA] | [azB] | ["r1": ["azA"], "r2": ["azB"]] - "multiple zones" | ["r1"] | [azA, azB] | null | ["r1": ["azA", "azB"]] - "skips unavailable zones" | ["r1"] | [azA, azUnavailable, azB] | null | ["r1": ["azA", "azB"]] - "empty region" | ["r1", "r2"] | null | [azA, azB] | ["r1": [], "r2": ["azA", "azB"]] - } -} diff --git a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/utils/DateUtilsSpec.groovy b/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/utils/DateUtilsSpec.groovy deleted file mode 100644 index 6cd5a2abe6e..00000000000 --- a/clouddriver-openstack/src/test/groovy/com/netflix/spinnaker/clouddriver/openstack/utils/DateUtilsSpec.groovy +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2016 Target, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.openstack.utils - -import spock.lang.Specification - -import java.time.LocalDateTime -import java.time.ZoneId -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter -import java.time.format.DateTimeParseException - -class DateUtilsSpec extends Specification { - - - def 'parse local date time'() { - given: - def dateTime = '2011-12-03T10:15:30' - def expected = LocalDateTime.parse(dateTime, DateTimeFormatter.ISO_LOCAL_DATE_TIME)?.atZone(ZoneId.systemDefault()) - - when: - ZonedDateTime result = DateUtils.parseZonedDateTime(dateTime) - - then: - result == expected - noExceptionThrown() - } - - def 'default date time'() { - given: - def defaultDateTime = ZonedDateTime.now() - - when: - ZonedDateTime result = DateUtils.parseZonedDateTime(null, defaultDateTime) - - then: - result == defaultDateTime - noExceptionThrown() - } - - def 'the default has a default'() { - when: - ZonedDateTime result = DateUtils.parseZonedDateTime(null) - - then: - /* - * Can't really verify the actual time of the default's default which is Now, you know, with off by a second, - * or an hour on with daylight savings and what not. Lets just ensure we got back an object without any exceptions. - */ - result - noExceptionThrown() - } - - def 'parse zoned date time format'() { - given: - def time = '2011-12-03T10:15:30+01:00' - def expected = ZonedDateTime.parse(time, DateTimeFormatter.ISO_OFFSET_DATE_TIME) - - when: - def actual = DateUtils.parseZonedDateTime(time) - - then: - actual == expected - noExceptionThrown() - } - - def 'parse UTC date time'() { - given: - def time = '2017-01-18T01:38:53Z' - def expected = ZonedDateTime.parse(time, DateTimeFormatter.ISO_OFFSET_DATE_TIME) - - when: - def actual = DateUtils.parseZonedDateTime(time) - - then: - actual == expected - noExceptionThrown() - } - - def 'throws exception with unknown format'() { - when: - DateUtils.parseZonedDateTime('10:15:30+01:00') - - then: - thrown(DateTimeParseException) - } -} diff --git a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup.yaml b/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup.yaml deleted file mode 100644 index 5e4b042bc76..00000000000 --- a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup.yaml +++ /dev/null @@ -1,184 +0,0 @@ -heat_template_version: 2016-04-08 -description: Auto scaling group for Spinnaker -parameters: - flavor: - type: string - description: Flavor used by the web servers - image: - type: string - description: Image used for servers - max_size: - type: number - description: Maximum cluster size - min_size: - type: number - description: Minimum cluster size - desired_size: - type: number - description: Desired cluster size - network_id: - type: string - description: Network used by the servers. Retained for auditing purposes. - load_balancers: - type: comma_delimited_list - description: Comma-separated string of load balancers to associate to the stack. This is not used in the stack and is defined for auditing purposes. - default: [] - zones: - type: comma_delimited_list - description: Comma-separated string of availability zones - default: [] - security_groups: - type: comma_delimited_list - description: Comma-separated string of security groups to use - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server - autoscaling_type: - type: string - description: Type of autoscaling to perform. can be cpu_util, network.incoming.bytes.rate, or network.outgoing.bytes.rate - default: cpu_util - scaleup_cooldown: - type: number - description: Minimum amount of time (in seconds) between scaleup operations - default: 60 - scaleup_adjustment: - type: number - description: Amount by which to change the instance count. Must be positive - default: 1 - scaleup_period: - type: number - description: Amount of time (in seconds) before the scaleup action is taken - default: 60 - scaleup_threshold: - type: number - description: Threshold that causes the scaleup action to occur, if held for scaleup_period seconds - default: 50 - scaledown_cooldown: - type: number - description: Minimum amount of time (in seconds) between scaledown operations - default: 60 - scaledown_adjustment: - type: number - description: Amount by which to change the instance count. Must be negative - default: -1 - scaledown_period: - type: number - description: Amount of time (in seconds) before the scaledown action is taken - default: 600 - scaledown_threshold: - type: number - description: Threshold that causes the scaledown action to occur, if held for scaledown_period seconds - default: 15 - source_user_data_type: - type: string - description: The source user data type (Swift, URL, Text), retained for auditing purposes - default: "" - source_user_data: - type: string - description: The unencoded source user data, retained for auditing purposes - default: "" - tags: - type: json - description: Map of key-value pairs to store in instance metadata - default: {} - user_data: - type: string - description: Raw base64-encoded string that will execute upon server boot, if cloud-init is installed - default: "" - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - servergroup: - type: OS::Heat::AutoScalingGroup - properties: - min_size: {get_param: min_size} - max_size: {get_param: max_size} - desired_capacity: {get_param: desired_size} - resource: - type: servergroup_resource.yaml - properties: - flavor: {get_param: flavor} - image: {get_param: image} - # metering.stack is used by ceilometer to autoscale against instances that are part of this stack - # the others are user-specified - metadata: - map_merge: - - {"metering.stack": {get_param: "OS::stack_id"}} - - {"metering.stack.name": {get_param: "OS::stack_name"}} - - {get_param: tags} - network_id: {get_param: network_id} - security_groups: {get_param: security_groups} - subnet_id: {get_param: subnet_id} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - web_server_scaleup_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: servergroup} - cooldown: {get_param: scaleup_cooldown} - scaling_adjustment: {get_param: scaleup_adjustment} - web_server_scaledown_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: servergroup} - cooldown: {get_param: scaledown_cooldown} - scaling_adjustment: {get_param: scaledown_adjustment} - meter_alarm_high: - type: OS::Ceilometer::Alarm - properties: - description: Scale up if the average meter_name > scaleup_threshold for scaleup_period seconds - meter_name: {get_param: autoscaling_type} - statistic: avg - period: {get_param: scaleup_period} - evaluation_periods: 1 - threshold: {get_param: scaleup_threshold} - alarm_actions: - - {get_attr: [web_server_scaleup_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: gt - meter_alarm_low: - type: OS::Ceilometer::Alarm - properties: - description: Scale up if the average meter_name < scaledown_threshold for scaledown_period seconds - meter_name: {get_param: autoscaling_type} - statistic: avg - period: {get_param: scaledown_period} - evaluation_periods: 1 - threshold: {get_param: scaledown_threshold} - alarm_actions: - - {get_attr: [web_server_scaledown_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: lt -outputs: - OS::stack_id: - value: {get_resource: servergroup} - # we need to store subtemplate in servergroup output from create, as it is required to do an update and there is no native way - # of obtaining it from a stack - servergroup_resource: - description: servergroup_resource.yaml template value - value: {get_file: servergroup_resource.yaml } - # we need to store subtemplate in servergroup output from create, as it is required to do an update and there is no native way - # of obtaining it from a stack - servergroup_resource_member: - description: servergroup_resource_member.yaml template value - value: {get_file: servergroup_resource_member.yaml} - scale_up_url: - description: > - This URL is the webhook to scale up the autoscaling group. You - can invoke the scale-up operation by doing an HTTP POST to this - URL; no body nor extra headers are needed. - value: {get_attr: [web_server_scaleup_policy, alarm_url]} - scale_dn_url: - description: > - This URL is the webhook to scale down the autoscaling group. - You can invoke the scale-down operation by doing an HTTP POST to - this URL; no body nor extra headers are needed. - value: {get_attr: [web_server_scaledown_policy, alarm_url]} - servergroup_size: - description: > - This is the current size of the auto scaling group. - value: {get_attr: [servergroup, current_size]} diff --git a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_float.yaml b/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_float.yaml deleted file mode 100644 index 6b6ede124a2..00000000000 --- a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_float.yaml +++ /dev/null @@ -1,188 +0,0 @@ -heat_template_version: 2016-04-08 -description: Auto scaling group for Spinnaker -parameters: - flavor: - type: string - description: Flavor used by the web servers - floating_network_id: - type: string - description: Network used to allocate a floating IP for each server. - image: - type: string - description: Image used for servers - max_size: - type: number - description: Maximum cluster size - min_size: - type: number - description: Minimum cluster size - desired_size: - type: number - description: Desired cluster size - network_id: - type: string - description: Network used by the servers. Retained for auditing purposes. - load_balancers: - type: comma_delimited_list - description: Comma-separated string of load balancers to associate to the stack. This is not used in the stack and is defined for auditing purposes. - default: [] - zones: - type: comma_delimited_list - description: Comma-separated string of availability zones - default: [] - security_groups: - type: comma_delimited_list - description: Comma-separated string of security groups to use - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server - autoscaling_type: - type: string - description: Type of autoscaling to perform. can be cpu_util, network.incoming.bytes.rate, or network.outgoing.bytes.rate - default: cpu_util - scaleup_cooldown: - type: number - description: Minimum amount of time (in seconds) between scaleup operations - default: 60 - scaleup_adjustment: - type: number - description: Amount by which to change the instance count. Must be positive - default: 1 - scaleup_period: - type: number - description: Amount of time (in seconds) before the scaleup action is taken - default: 60 - scaleup_threshold: - type: number - description: Threshold that causes the scaleup action to occur, if held for scaleup_period seconds - default: 50 - scaledown_cooldown: - type: number - description: Minimum amount of time (in seconds) between scaledown operations - default: 60 - scaledown_adjustment: - type: number - description: Amount by which to change the instance count. Must be negative - default: -1 - scaledown_period: - type: number - description: Amount of time (in seconds) before the scaledown action is taken - default: 600 - scaledown_threshold: - type: number - description: Threshold that causes the scaledown action to occur, if held for scaledown_period seconds - default: 15 - source_user_data_type: - type: string - description: The source user data type (Swift, URL, Text), retained for auditing purposes - default: "" - source_user_data: - type: string - description: The unencoded source user data, retained for auditing purposes - default: "" - tags: - type: json - description: Map of key-value pairs to store in instance metadata - default: {} - user_data: - type: string - description: Raw base64-encoded string that will execute upon server boot, if cloud-init is installed - default: "" - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - servergroup: - type: OS::Heat::AutoScalingGroup - properties: - min_size: {get_param: min_size} - max_size: {get_param: max_size} - desired_capacity: {get_param: desired_size} - resource: - type: servergroup_resource.yaml - properties: - flavor: {get_param: flavor} - floating_network_id: {get_param: floating_network_id} - image: {get_param: image} - # metering.stack is used by ceilometer to autoscale against instances that are part of this stack - # the others are user-specified - metadata: - map_merge: - - {"metering.stack": {get_param: "OS::stack_id"}} - - {"metering.stack.name": {get_param: "OS::stack_name"}} - - {get_param: tags} - network_id: {get_param: network_id} - security_groups: {get_param: security_groups} - subnet_id: {get_param: subnet_id} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - web_server_scaleup_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: servergroup} - cooldown: {get_param: scaleup_cooldown} - scaling_adjustment: {get_param: scaleup_adjustment} - web_server_scaledown_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: servergroup} - cooldown: {get_param: scaledown_cooldown} - scaling_adjustment: {get_param: scaledown_adjustment} - meter_alarm_high: - type: OS::Ceilometer::Alarm - properties: - description: Scale up if the average meter_name > scaleup_threshold for scaleup_period seconds - meter_name: {get_param: autoscaling_type} - statistic: avg - period: {get_param: scaleup_period} - evaluation_periods: 1 - threshold: {get_param: scaleup_threshold} - alarm_actions: - - {get_attr: [web_server_scaleup_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: gt - meter_alarm_low: - type: OS::Ceilometer::Alarm - properties: - description: Scale up if the average meter_name < scaledown_threshold for scaledown_period seconds - meter_name: {get_param: autoscaling_type} - statistic: avg - period: {get_param: scaledown_period} - evaluation_periods: 1 - threshold: {get_param: scaledown_threshold} - alarm_actions: - - {get_attr: [web_server_scaledown_policy, alarm_url]} - matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}} - comparison_operator: lt -outputs: - OS::stack_id: - value: {get_resource: servergroup} - # we need to store subtemplate in servergroup output from create, as it is required to do an update and there is no native way - # of obtaining it from a stack - servergroup_resource: - description: servergroup_resource.yaml template value - value: {get_file: "servergroup_resource.yaml" } - # we need to store subtemplate in servergroup output from create, as it is required to do an update and there is no native way - # of obtaining it from a stack - servergroup_resource_member: - description: servergroup_resource_member.yaml template value - value: {get_file: servergroup_resource_member.yaml} - scale_up_url: - description: > - This URL is the webhook to scale up the autoscaling group. You - can invoke the scale-up operation by doing an HTTP POST to this - URL; no body nor extra headers are needed. - value: {get_attr: [web_server_scaleup_policy, alarm_url]} - scale_dn_url: - description: > - This URL is the webhook to scale down the autoscaling group. - You can invoke the scale-down operation by doing an HTTP POST to - this URL; no body nor extra headers are needed. - value: {get_attr: [web_server_scaledown_policy, alarm_url]} - servergroup_size: - description: > - This is the current size of the auto scaling group. - value: {get_attr: [servergroup, current_size]} diff --git a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_resource.yaml b/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_resource.yaml deleted file mode 100644 index cb914ee1996..00000000000 --- a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_resource.yaml +++ /dev/null @@ -1,56 +0,0 @@ -heat_template_version: 2016-04-08 -description: A load balanced server for Spinnaker. -parameters: - flavor: - type: string - description: Flavor used by the servers. - image: - type: string - description: Image used for servers. - metadata: - type: json - description: Server instance metadata. - network_id: - type: string - description: Network used by each server. Retained for auditing purposes. - security_groups: - type: comma_delimited_list - description: Security groups associate to each server. - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server. - user_data: - type: string - description: String that will execute upon server boot, if cloud-init is installed. - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - server: - type: OS::Nova::Server - properties: - flavor: {get_param: flavor} - image: {get_param: image} - metadata: {get_param: metadata} - networks: - - subnet: {get_param: subnet_id} - security_groups: {get_param: security_groups} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - user_data_format: RAW - member: - type: OS::Heat::ResourceGroup - properties: - resource_def: - # this is dynamically generated to associate a load balancer pool member from each listener to each server - type: servergroup_resource_member.yaml - properties: - address: {get_attr: [server, first_address]} -outputs: - server_ip: - description: IP Address of the load-balanced server - value: { get_attr: [server, first_address] } - lb_member: - description: LB member details - value: { get_attr: [member, show] } diff --git a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_resource_float.yaml b/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_resource_float.yaml deleted file mode 100644 index bb451fecbbf..00000000000 --- a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_resource_float.yaml +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: 2016-04-08 -description: A load balanced server for Spinnaker. -parameters: - flavor: - type: string - description: Flavor used by the servers. - floating_network_id: - type: string - description: Network used to allocate a floating IP for each server. - image: - type: string - description: Image used for servers. - metadata: - type: json - description: Server instance metadata. - network_id: - type: string - description: Network used by each server. Retained for auditing purposes. - security_groups: - type: comma_delimited_list - description: Security groups associate to each server. - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server. - user_data: - type: string - description: String that will execute upon server boot, if cloud-init is installed. - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - server: - type: OS::Nova::Server - properties: - flavor: {get_param: flavor} - image: {get_param: image} - metadata: {get_param: metadata} - networks: - - subnet: {get_param: subnet_id} - security_groups: {get_param: security_groups} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - user_data_format: RAW - server_floating_ip: - type: OS::Neutron::FloatingIP - properties: - floating_network_id: {get_param: floating_network_id} - port_id: {get_attr: [server, addresses, {get_param: network_id}, 0, port]} - member: - type: OS::Heat::ResourceGroup - properties: - resource_def: - # this is dynamically generated to associate a load balancer pool member from each listener to each server - type: servergroup_resource_member.yaml - properties: - address: {get_attr: [server, first_address]} -outputs: - server_ip: - description: IP Address of the load-balanced server - value: { get_attr: [server, first_address] } - lb_member: - description: LB member details - value: { get_attr: [member, show] } diff --git a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_server.yaml b/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_server.yaml deleted file mode 100644 index 970ca798fb4..00000000000 --- a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_server.yaml +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: 2016-04-08 -description: An auto-scaled server for Spinnaker without any load balancer association. -parameters: - flavor: - type: string - description: Flavor used by the servers. - image: - type: string - description: Image used for servers. - metadata: - type: json - description: Server instance metadata. - network_id: - type: string - description: Network used by each server. Retained for auditing purposes. - security_groups: - type: comma_delimited_list - description: Security groups associate to each server. - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server. - user_data: - type: string - description: Raw base64-encoded string that will execute upon server boot, if cloud-init is installed. - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - server: - type: OS::Nova::Server - properties: - flavor: {get_param: flavor} - image: {get_param: image} - metadata: {get_param: metadata} - networks: - - subnet: {get_param: subnet_id} - security_groups: {get_param: security_groups} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - user_data_format: RAW - -outputs: - server_ip: - description: IP Address of the load-balanced server - value: { get_attr: [server, first_address] } diff --git a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_server_float.yaml b/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_server_float.yaml deleted file mode 100644 index 785beedfce1..00000000000 --- a/clouddriver-openstack/src/test/resources/com/netflix/spinnaker/clouddriver/openstack/deploy/ops/servergroup/servergroup_server_float.yaml +++ /dev/null @@ -1,54 +0,0 @@ -heat_template_version: 2016-04-08 -description: An auto-scaled server for Spinnaker without any load balancer association. -parameters: - flavor: - type: string - description: Flavor used by the servers. - floating_network_id: - type: string - description: Network used to allocate a floating IP for each server. - image: - type: string - description: Image used for servers. - metadata: - type: json - description: Server instance metadata. - network_id: - type: string - description: Network used by each server. Retained for auditing purposes. - security_groups: - type: comma_delimited_list - description: Security groups associate to each server. - subnet_id: - type: string - description: Subnet used to allocate a fixed IP for each server. - user_data: - type: string - description: Raw base64-encoded string that will execute upon server boot, if cloud-init is installed. - scheduler_hints: - type: json - description: Key/Value pairs in json format for scheduler_hints - default: {} -resources: - server: - type: OS::Nova::Server - properties: - flavor: {get_param: flavor} - image: {get_param: image} - metadata: {get_param: metadata} - networks: - - subnet: {get_param: subnet_id} - security_groups: {get_param: security_groups} - user_data: {get_param: user_data} - scheduler_hints: {get_param: scheduler_hints} - user_data_format: RAW - server_floating_ip: - type: OS::Neutron::FloatingIP - properties: - floating_network_id: {get_param: floating_network_id} - port_id: {get_attr: [server, addresses, {get_param: network_id}, 0, port]} - -outputs: - server_ip: - description: IP Address of the load-balanced server - value: { get_attr: [server, first_address] } diff --git a/clouddriver-oracle/clouddriver-oracle.gradle b/clouddriver-oracle/clouddriver-oracle.gradle index 8de18a77b8f..02f8edfaf06 100644 --- a/clouddriver-oracle/clouddriver-oracle.gradle +++ b/clouddriver-oracle/clouddriver-oracle.gradle @@ -1,73 +1,39 @@ - -class DownloadTask extends DefaultTask { - @Input - String sourceUrl - - @OutputFile - File target - - @TaskAction - void download() { - ant.get(src: sourceUrl, dest: target) - } -} - -final File sdkDownloadLocation = project.file('build/sdkdownload') -final File sdkLocation = project.file('build/oci-java-sdk') - -// Oracle BMCS SDK isn't published to any maven repo (yet!), so we manually download, unpack and add to compile/runtime deps -// https://github.com/oracle/oci-java-sdk/issues/25 -task fetchSdk(type: DownloadTask) { - sourceUrl = 'https://github.com/oracle/oci-java-sdk/releases/download/v1.2.44/oci-java-sdk.zip' - target = sdkDownloadLocation -} - -task unpackSdk(type: Sync) { - dependsOn('fetchSdk') - from zipTree(tasks.fetchSdk.target) - into sdkLocation - include "**/*.jar" - exclude "**/*-sources.jar" - exclude "**/*-javadoc.jar" - exclude "apidocs/**" - exclude "examples/**" - - // Scary but works. I think clouddriver deps in general need cleaning at some point - // Even without the oracle bmc sdk 3rd party deps there's still multiple javax.inject and commons-X JARs - exclude "**/*jackson*.jar" - exclude "**/*jersey*.jar" - exclude "**/hk2*.jar" - exclude "**/*guava*.jar" - exclude "**/commons*.jar" - exclude "**/aopalliance*.jar" - exclude "**/javassist*.jar" - exclude "**/slf*.jar" - exclude "**/osgi*.jar" - exclude "**/validation*.jar" - exclude "**/jsr305*.jar" - exclude "**/json-smart*.jar" -} - -task cleanSdk(type: Delete) { - delete sdkLocation, sdkDownloadLocation -} - -tasks.clean.dependsOn('cleanSdk') -tasks.compileJava.dependsOn('unpackSdk') - dependencies { - compile project(":clouddriver-core") - compile spinnaker.dependency('frigga') - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') - compile fileTree(sdkLocation) + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + + implementation "com.github.ben-manes.caffeine:guava" + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-moniker" + implementation "com.oracle.oci.sdk:oci-java-sdk-core" + implementation "com.oracle.oci.sdk:oci-java-sdk-identity" + implementation "com.oracle.oci.sdk:oci-java-sdk-loadbalancer" + implementation "com.oracle.oci.sdk:oci-java-sdk-objectstorage" + implementation "org.apache.groovy:groovy" + implementation "org.springframework.boot:spring-boot-starter-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.apache.groovy:groovy-console" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testRuntimeOnly "net.bytebuddy:byte-buddy" } def allSourceSets = sourceSets -license { - header project.file('oracle-source-header') - includes(["**/*.groovy", "**/*.java", "**/*.properties"]) - strictCheck true - skipExistingHeaders false - sourceSets = allSourceSets + +def licenseExtension = project.extensions.findByName('license') +if (licenseExtension != null) { + licenseExtension.header project.file('oracle-source-header') + licenseExtension.includes(["**/*.groovy", "**/*.java", "**/*.properties"]) + licenseExtension.strictCheck true + licenseExtension.skipExistingHeaders false + licenseExtension.sourceSets = allSourceSets } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/config/OracleConfigurationProperties.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/config/OracleConfigurationProperties.groovy index c432e6222b1..883e8f236d6 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/config/OracleConfigurationProperties.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/config/OracleConfigurationProperties.groovy @@ -24,6 +24,7 @@ class OracleConfigurationProperties { String userId String fingerprint String sshPrivateKeyFilePath + String privateKeyPassphrase String tenancyId String region } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/OracleWorkRequestPoller.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/OracleWorkRequestPoller.groovy index 8d9dfd55354..5c4a65c9efd 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/OracleWorkRequestPoller.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/OracleWorkRequestPoller.groovy @@ -13,9 +13,11 @@ import com.oracle.bmc.loadbalancer.LoadBalancerClient import com.oracle.bmc.loadbalancer.model.WorkRequest import com.oracle.bmc.loadbalancer.requests.GetWorkRequestRequest -class OracleWorkRequestPoller { - - public static WorkRequest poll(String workRequestId, String phase, Task task, LoadBalancerClient loadBalancerClient) { +public class OracleWorkRequestPoller { + + static OracleWorkRequestPoller poller = new OracleWorkRequestPoller(); + + public WorkRequest wait(String workRequestId, String phase, Task task, LoadBalancerClient loadBalancerClient) { def wr = GetWorkRequestRequest.builder().workRequestId(workRequestId).build() task.updateStatus(phase, "Waiting for WorkRequest to finish: $workRequestId") @@ -33,4 +35,8 @@ class OracleWorkRequestPoller { } return finalWorkRequestResult } + + public static WorkRequest poll(String workRequestId, String phase, Task task, LoadBalancerClient loadBalancerClient) { + return poller.wait(workRequestId, phase, task, loadBalancerClient); + } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/CreateOracleLoadBalancerAtomicOperationConverter.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/CreateOracleLoadBalancerAtomicOperationConverter.groovy deleted file mode 100644 index 4ffa2a779a9..00000000000 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/CreateOracleLoadBalancerAtomicOperationConverter.groovy +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2017 Oracle America, Inc. - * - * The contents of this file are subject to the Apache License Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * If a copy of the Apache License Version 2.0 was not distributed with this file, - * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html - */ -package com.netflix.spinnaker.clouddriver.oracle.deploy.converter - -import com.netflix.spinnaker.clouddriver.oracle.OracleOperation -import com.netflix.spinnaker.clouddriver.oracle.deploy.description.CreateLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.oracle.deploy.op.CreateOracleLoadBalancerAtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import groovy.util.logging.Slf4j -import org.springframework.stereotype.Component - -@Slf4j -@OracleOperation(AtomicOperations.UPSERT_LOAD_BALANCER) -@Component("createOracleLoadBalancerDescription") -class CreateOracleLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - @Override - AtomicOperation convertOperation(Map input) { - new CreateOracleLoadBalancerAtomicOperation(convertDescription(input)) - } - - @Override - CreateLoadBalancerDescription convertDescription(Map input) { - OracleAtomicOperationConverterHelper.convertDescription(input, this, CreateLoadBalancerDescription) - } -} diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/NoOpOracleAtomicOperationConverter.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/NoOpOracleAtomicOperationConverter.groovy index f2543e72f8d..8b218f3b6da 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/NoOpOracleAtomicOperationConverter.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/NoOpOracleAtomicOperationConverter.groovy @@ -11,6 +11,7 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.converter import com.netflix.spinnaker.clouddriver.oracle.OracleOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations +import com.netflix.spinnaker.orchestration.OperationDescription import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport import org.springframework.stereotype.Component @@ -28,7 +29,7 @@ class NoOpOracleAtomicOperationConverter extends AbstractAtomicOperationsCredent } @Override - Object convertDescription(Map input) { + OperationDescription convertDescription(Map input) { return null } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/UpsertOracleLoadBalancerAtomicOperationConverter.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/UpsertOracleLoadBalancerAtomicOperationConverter.groovy new file mode 100644 index 00000000000..498a82f5773 --- /dev/null +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/UpsertOracleLoadBalancerAtomicOperationConverter.groovy @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2017 Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.converter + +import com.netflix.spinnaker.clouddriver.oracle.OracleOperation +import com.netflix.spinnaker.clouddriver.oracle.deploy.description.UpsertLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.oracle.deploy.op.UpsertOracleLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import groovy.util.logging.Slf4j +import org.springframework.stereotype.Component + +@Slf4j +@OracleOperation(AtomicOperations.UPSERT_LOAD_BALANCER) +@Component("upsertOracleLoadBalancerDescription") +class UpsertOracleLoadBalancerAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { + + @Override + AtomicOperation convertOperation(Map input) { + new UpsertOracleLoadBalancerAtomicOperation(convertDescription(input)) + } + + @Override + UpsertLoadBalancerDescription convertDescription(Map input) { + OracleAtomicOperationConverterHelper.convertDescription(input, this, UpsertLoadBalancerDescription) + } +} diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/BaseOracleInstanceDescription.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/BaseOracleInstanceDescription.groovy index ce37cf904cd..7e1480221b0 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/BaseOracleInstanceDescription.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/BaseOracleInstanceDescription.groovy @@ -24,5 +24,6 @@ class BaseOracleInstanceDescription extends AbstractOracleCredentialsDescription String vpcId String subnetId String accountName + String sshAuthorizedKeys } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/BasicOracleDeployDescription.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/BasicOracleDeployDescription.groovy index cbbd735ab16..acda129cb68 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/BasicOracleDeployDescription.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/BasicOracleDeployDescription.groovy @@ -25,7 +25,13 @@ class BasicOracleDeployDescription extends BaseOracleInstanceDescription impleme String stack String freeFormDetails String loadBalancerId + String backendSetName ServerGroup.Capacity capacity //targetSize takes precedence if targetSize and capacity.desired are both specified. Integer targetSize + + @Override + Collection getApplications() { + return [application] + } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/CreateLoadBalancerDescription.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/CreateLoadBalancerDescription.groovy deleted file mode 100644 index f3ca2407365..00000000000 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/CreateLoadBalancerDescription.groovy +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2017 Oracle America, Inc. - * - * The contents of this file are subject to the Apache License Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * If a copy of the Apache License Version 2.0 was not distributed with this file, - * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html - */ -package com.netflix.spinnaker.clouddriver.oracle.deploy.description - -import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable - -class CreateLoadBalancerDescription extends AbstractOracleCredentialsDescription implements ApplicationNameable { - - String application - String stack - String shape - String policy - List subnetIds - Listener listener - HealthCheck healthCheck - - static class Listener { - - Integer port - String protocol - } - - static class HealthCheck { - - String protocol - Integer port - Integer interval - Integer retries - Integer timeout - String url - Integer statusCode - String responseBodyRegex - } -} diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/EnableDisableOracleServerGroupDescription.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/EnableDisableOracleServerGroupDescription.groovy index 826313bfc35..9ab8789c5c5 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/EnableDisableOracleServerGroupDescription.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/EnableDisableOracleServerGroupDescription.groovy @@ -9,15 +9,20 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.description import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescriptionTrait -import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable /** * "Enabling" means adding a server group to the target pool of each of its network load balancers. * * "Disabling" means removing a server group from the target pool of each of its network load balancers. */ -class EnableDisableOracleServerGroupDescription extends AbstractOracleCredentialsDescription implements ServerGroupNameable, EnableDisableDescriptionTrait { +class EnableDisableOracleServerGroupDescription extends AbstractOracleCredentialsDescription implements ServerGroupsNameable, EnableDisableDescriptionTrait { String region String accountName + + @Override + Collection getServerGroupNames() { + return [getServerGroupName()] + } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/UpsertLoadBalancerDescription.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/UpsertLoadBalancerDescription.groovy new file mode 100644 index 00000000000..0bceed486c2 --- /dev/null +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/description/UpsertLoadBalancerDescription.groovy @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2017 Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.description + +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable +import com.oracle.bmc.loadbalancer.model.CertificateDetails +import com.oracle.bmc.loadbalancer.model.BackendSetDetails +import com.oracle.bmc.loadbalancer.model.ListenerDetails +import groovy.transform.ToString + +@ToString +class UpsertLoadBalancerDescription extends AbstractOracleCredentialsDescription implements ApplicationNameable { + + String application + String stack + String detail + String shape + String policy + Boolean isPrivate + List subnetIds + Map listeners + Map certificates + Map backendSets + Map subnetTypeMap + String loadBalancerId + + String clusterName() { + application + (stack? '-' + stack : '') + } + + //see NameBuilder.combineAppStackDetail + String qualifiedName() { + def stack = this.stack?: "" + def detail = this.detail + if (detail) { + return this.application + "-" + stack + "-" + detail + } + if (!stack.isEmpty()) { + return this.application + "-" + stack + } + } + + @Override + Collection getApplications() { + return [application] + } +} diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/handler/BasicOracleDeployHandler.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/handler/BasicOracleDeployHandler.groovy index ca978dbf5ec..c5d67e666c1 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/handler/BasicOracleDeployHandler.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/handler/BasicOracleDeployHandler.groovy @@ -6,10 +6,8 @@ * If a copy of the Apache License Version 2.0 was not distributed with this file, * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html */ - package com.netflix.spinnaker.clouddriver.oracle.deploy.handler -import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.deploy.DeployDescription @@ -19,22 +17,21 @@ import com.netflix.spinnaker.clouddriver.model.ServerGroup import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleServerGroupNameResolver import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller import com.netflix.spinnaker.clouddriver.oracle.deploy.description.BasicOracleDeployDescription -import com.netflix.spinnaker.clouddriver.oracle.model.OracleInstance +import com.netflix.spinnaker.clouddriver.oracle.model.Details import com.netflix.spinnaker.clouddriver.oracle.model.OracleServerGroup import com.netflix.spinnaker.clouddriver.oracle.provider.view.OracleClusterProvider import com.netflix.spinnaker.clouddriver.oracle.service.servergroup.OracleServerGroupService import com.oracle.bmc.core.requests.GetVnicRequest import com.oracle.bmc.core.requests.ListVnicAttachmentsRequest import com.oracle.bmc.loadbalancer.model.BackendDetails -import com.oracle.bmc.loadbalancer.model.CreateBackendSetDetails -import com.oracle.bmc.loadbalancer.model.HealthCheckerDetails -import com.oracle.bmc.loadbalancer.model.UpdateListenerDetails -import com.oracle.bmc.loadbalancer.requests.CreateBackendSetRequest +import com.oracle.bmc.loadbalancer.model.BackendSet +import com.oracle.bmc.loadbalancer.model.LoadBalancer +import com.oracle.bmc.loadbalancer.model.UpdateBackendSetDetails import com.oracle.bmc.loadbalancer.requests.GetLoadBalancerRequest -import com.oracle.bmc.loadbalancer.requests.UpdateListenerRequest +import com.oracle.bmc.loadbalancer.requests.UpdateBackendSetRequest +import com.oracle.bmc.loadbalancer.responses.UpdateBackendSetResponse import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component - import java.util.concurrent.TimeUnit @Component @@ -78,6 +75,7 @@ class BasicOracleDeployHandler implements DeployHandler ips = [] - sgView.instances.each { instance -> + sg.instances.each { instance -> def vnicAttachRs = description.credentials.computeClient.listVnicAttachments(ListVnicAttachmentsRequest.builder() .compartmentId(description.credentials.compartmentId) - .instanceId(((OracleInstance) instance).id) + .instanceId(instance.id) .build()) vnicAttachRs.items.each { vnicAttach -> def vnic = description.credentials.networkClient.getVnic(GetVnicRequest.builder() .vnicId(vnicAttach.vnicId).build()).vnic - ips << vnic.privateIp + if (vnic.privateIp) { + instance.privateIp = vnic.privateIp + ips << vnic.privateIp + } } } - - // get LB - task.updateStatus BASE_PHASE, "Getting loadbalancer details" - def lb = description.credentials.loadBalancerClient.getLoadBalancer(GetLoadBalancerRequest.builder().loadBalancerId(description.loadBalancerId).build()).loadBalancer - - // use backend-template to add a new backend set - def names = Names.parseName(sg.name) - def backendTemplate = lb.backendSets.get(names.cluster + '-template') - def backend = CreateBackendSetDetails.builder() - .healthChecker(HealthCheckerDetails.builder() - .protocol(backendTemplate.healthChecker.protocol) - .port(backendTemplate.healthChecker.port) - .intervalInMillis(backendTemplate.healthChecker.intervalInMillis) - .retries(backendTemplate.healthChecker.retries) - .timeoutInMillis(backendTemplate.healthChecker.timeoutInMillis) - .urlPath(backendTemplate.healthChecker.urlPath) - .returnCode(backendTemplate.healthChecker.returnCode) - .responseBodyRegex(backendTemplate.healthChecker.responseBodyRegex) - .build()) - .policy(backendTemplate.policy) - .backends(ips.collect { ip -> - BackendDetails.builder().ipAddress(ip).port(backendTemplate.healthChecker.port).build() - }) - .name(sg.name) - .build() - - // update lb to point to that backend set - task.updateStatus BASE_PHASE, "Creating backend set ${backend.name}" - def rs = description.credentials.loadBalancerClient.createBackendSet(CreateBackendSetRequest.builder() - .loadBalancerId(lb.id) - .createBackendSetDetails(backend).build()) + sg.backendSetName = description.backendSetName + task.updateStatus BASE_PHASE, "Adding IP addresses ${ips} to ${description.backendSetName}" + oracleServerGroupService.updateServerGroup(sg) + // update listener and backendSet + BackendSet defaultBackendSet = lb.backendSets.get(description.backendSetName) + // new backends from the serverGroup + List backends = ips.collect { ip -> + BackendDetails.builder().ipAddress(ip).port(defaultBackendSet.healthChecker.port).build() + } + //merge with existing backendSet + defaultBackendSet.backends.each { existingBackend -> + backends << Details.of(existingBackend) + } + + UpdateBackendSetDetails updateDetails = UpdateBackendSetDetails.builder() + .policy(defaultBackendSet.policy) + .healthChecker(Details.of(defaultBackendSet.healthChecker)) + .backends(backends).build() + task.updateStatus BASE_PHASE, "Updating backendSet ${description.backendSetName}" + UpdateBackendSetResponse updateRes = description.credentials.loadBalancerClient.updateBackendSet( + UpdateBackendSetRequest.builder().loadBalancerId(description.loadBalancerId) + .backendSetName(description.backendSetName).updateBackendSetDetails(updateDetails).build()) // wait for backend set to be created - OracleWorkRequestPoller.poll(rs.getOpcWorkRequestId(), BASE_PHASE, task, description.credentials.loadBalancerClient) - - // update listener - def currentListener = lb.listeners.get(names.cluster) - task.updateStatus BASE_PHASE, "Updating listener ${currentListener.name} to point to backend set ${backend.name}" - def ulrs = description.credentials.loadBalancerClient.updateListener(UpdateListenerRequest.builder() - .listenerName(currentListener.name) - .loadBalancerId(lb.id) - .updateListenerDetails(UpdateListenerDetails.builder() - .port(currentListener.port) - .defaultBackendSetName(backend.name) - .protocol(currentListener.protocol) - .build()) - .build()) - - // wait for listener to be updated - OracleWorkRequestPoller.poll(ulrs.getOpcWorkRequestId(), BASE_PHASE, task, description.credentials.loadBalancerClient) + OracleWorkRequestPoller.poll(updateRes.getOpcWorkRequestId(), BASE_PHASE, task, description.credentials.loadBalancerClient) } - DeploymentResult deploymentResult = new DeploymentResult() deploymentResult.serverGroupNames = ["$region:$serverGroupName".toString()] deploymentResult.serverGroupNameByRegion[region] = serverGroupName diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/AbstractEnableDisableAtomicOperation.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/AbstractEnableDisableAtomicOperation.groovy index fa0445d028e..38942e7a0f4 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/AbstractEnableDisableAtomicOperation.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/AbstractEnableDisableAtomicOperation.groovy @@ -9,16 +9,11 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.op import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller import com.netflix.spinnaker.clouddriver.oracle.deploy.description.EnableDisableOracleServerGroupDescription import com.netflix.spinnaker.clouddriver.oracle.service.servergroup.OracleServerGroupService import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.oracle.bmc.loadbalancer.model.UpdateListenerDetails -import com.oracle.bmc.loadbalancer.requests.GetLoadBalancerRequest -import com.oracle.bmc.loadbalancer.requests.UpdateListenerRequest import org.springframework.beans.factory.annotation.Autowired abstract class AbstractEnableDisableAtomicOperation implements AtomicOperation { @@ -47,54 +42,13 @@ abstract class AbstractEnableDisableAtomicOperation implements AtomicOperation { - - private final CreateLoadBalancerDescription description - - private static final String BASE_PHASE = "CREATE_LOADBALANCER" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - CreateOracleLoadBalancerAtomicOperation(CreateLoadBalancerDescription description) { - this.description = description - } - - @Override - Void operate(List priorOutputs) { - def task = getTask() - def clusterName = "${description.application}${description.stack ? '-' + description.stack : ''}" - def backendSetTemplateName = "$clusterName-template" - def dummyBackendSet = BackendSetDetails.builder() - .policy(description.policy) - .healthChecker(HealthCheckerDetails.builder() - .protocol(description.healthCheck.protocol) - .port(description.healthCheck.port) - .intervalInMillis(description.healthCheck.interval) - .retries(description.healthCheck.retries) - .timeoutInMillis(description.healthCheck.timeout) - .urlPath(description.healthCheck.url) - .returnCode(description.healthCheck.statusCode) - .responseBodyRegex(description.healthCheck.responseBodyRegex) - .build()) - .build() - - def rq = CreateLoadBalancerRequest.builder() - .createLoadBalancerDetails(CreateLoadBalancerDetails.builder() - .displayName(clusterName) - .compartmentId(description.credentials.compartmentId) - .shapeName(description.shape) - .subnetIds(description.subnetIds) - .backendSets([(backendSetTemplateName.toString()): dummyBackendSet]) - .listeners([(clusterName.toString()): ListenerDetails.builder() - .port(description.listener.port) - .protocol(description.listener.protocol) - .defaultBackendSetName(backendSetTemplateName) - .build()]) - .build()).build() - - def rs = description.credentials.loadBalancerClient.createLoadBalancer(rq) - task.updateStatus(BASE_PHASE, "Create LB rq submitted - work request id: ${rs.getOpcWorkRequestId()}") - - OracleWorkRequestPoller.poll(rs.getOpcWorkRequestId(), BASE_PHASE, task, description.credentials.loadBalancerClient) - - return null - } -} diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DeleteOracleLoadBalancerAtomicOperation.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DeleteOracleLoadBalancerAtomicOperation.groovy new file mode 100644 index 00000000000..1c0c333e58d --- /dev/null +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DeleteOracleLoadBalancerAtomicOperation.groovy @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2017 Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.op + +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller +import com.netflix.spinnaker.clouddriver.oracle.deploy.description.DeleteLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.oracle.bmc.loadbalancer.model.BackendSetDetails +import com.oracle.bmc.loadbalancer.model.CreateLoadBalancerDetails +import com.oracle.bmc.loadbalancer.model.HealthCheckerDetails +import com.oracle.bmc.loadbalancer.model.ListenerDetails +import com.oracle.bmc.loadbalancer.requests.DeleteLoadBalancerRequest +import groovy.util.logging.Slf4j + +@Slf4j +class DeleteOracleLoadBalancerAtomicOperation implements AtomicOperation { + + private final DeleteLoadBalancerDescription description + + private static final String BASE_PHASE = "DELETE_LOADBALANCER" + + private static Task getTask() { + TaskRepository.threadLocalTask.get() + } + + DeleteOracleLoadBalancerAtomicOperation(DeleteLoadBalancerDescription description) { + this.description = description + } + + @Override + Void operate(List priorOutputs) { + def task = getTask() + task.updateStatus(BASE_PHASE, "Delete LoadBalancer: ${description}") + def request = DeleteLoadBalancerRequest.builder().loadBalancerId(description.loadBalancerId).build() + def rs = description.credentials.loadBalancerClient.deleteLoadBalancer(request) + task.updateStatus(BASE_PHASE, "Delete LoadBalancer request submitted - work request id: ${rs.getOpcWorkRequestId()}") + OracleWorkRequestPoller.poll(rs.getOpcWorkRequestId(), BASE_PHASE, task, description.credentials.loadBalancerClient) + return null + } +} diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DestroyOracleServerGroupAtomicOperation.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DestroyOracleServerGroupAtomicOperation.groovy index 330f5b2c6a5..9b3123d4af0 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DestroyOracleServerGroupAtomicOperation.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DestroyOracleServerGroupAtomicOperation.groovy @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Oracle America, Inc. + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. * * The contents of this file are subject to the Apache License Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,18 @@ import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller import com.netflix.spinnaker.clouddriver.oracle.deploy.description.DestroyOracleServerGroupDescription +import com.netflix.spinnaker.clouddriver.oracle.model.Details import com.netflix.spinnaker.clouddriver.oracle.service.servergroup.OracleServerGroupService import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.oracle.bmc.loadbalancer.requests.DeleteBackendSetRequest -import com.oracle.bmc.loadbalancer.requests.GetBackendSetRequest +import com.oracle.bmc.loadbalancer.model.Backend +import com.oracle.bmc.loadbalancer.model.BackendDetails +import com.oracle.bmc.loadbalancer.model.BackendSet +import com.oracle.bmc.loadbalancer.model.HealthChecker +import com.oracle.bmc.loadbalancer.model.HealthCheckerDetails +import com.oracle.bmc.loadbalancer.model.LoadBalancer +import com.oracle.bmc.loadbalancer.model.UpdateBackendSetDetails +import com.oracle.bmc.loadbalancer.requests.GetLoadBalancerRequest +import com.oracle.bmc.loadbalancer.requests.UpdateBackendSetRequest import com.oracle.bmc.model.BmcException import org.springframework.beans.factory.annotation.Autowired @@ -36,22 +44,52 @@ class DestroyOracleServerGroupAtomicOperation implements AtomicOperation { DestroyOracleServerGroupAtomicOperation(DestroyOracleServerGroupDescription description) { this.description = description } - + @Override Void operate(List priorOutputs) { - def sg = oracleServerGroupService.getServerGroup(description.credentials, Names.parseName(description.serverGroupName).app, description.serverGroupName) - if (sg.loadBalancerId) { - task.updateStatus BASE_PHASE, "Destroying server group backend set: " + description.serverGroupName + def app = Names.parseName(description.serverGroupName).app + task.updateStatus BASE_PHASE, "Destroying server group backend set ${description.serverGroupName} of ${app}" + def serverGroup = oracleServerGroupService.getServerGroup(description.credentials, app, description.serverGroupName) + LoadBalancer loadBalancer = null + try { + loadBalancer = serverGroup?.loadBalancerId? description.credentials.loadBalancerClient.getLoadBalancer( + GetLoadBalancerRequest.builder().loadBalancerId(serverGroup.loadBalancerId).build())?.getLoadBalancer() : null + } catch(BmcException e) { + if (e.statusCode == 404) { + task.updateStatus BASE_PHASE, "LoadBalancer ${serverGroup.loadBalancerId} did not exist...continuing" + } else { + throw e + } + } + if (loadBalancer) { + Set toGo = serverGroup.instances.collect {it.privateIp} as Set try { - description.credentials.loadBalancerClient.getBackendSet(GetBackendSetRequest.builder() - .backendSetName(description.serverGroupName) - .loadBalancerId(sg.loadBalancerId) - .build()) - def workResponse = description.credentials.loadBalancerClient.deleteBackendSet(DeleteBackendSetRequest.builder() - .backendSetName(description.serverGroupName) - .loadBalancerId(sg.loadBalancerId) - .build()) - OracleWorkRequestPoller.poll(workResponse.opcWorkRequestId, BASE_PHASE, task, description.credentials.loadBalancerClient) + BackendSet backendSet = serverGroup.backendSetName? loadBalancer.backendSets.get(serverGroup.backendSetName) : null + if (backendSet == null && loadBalancer.backendSets.size() == 1) { + backendSet = loadBalancer.backendSets.values().first(); + } + if (backendSet) { + // remove serverGroup instances/IPs from the backendSet + def backends = backendSet.backends.findAll { !toGo.contains(it.ipAddress) } .collect { Details.of(it) } + UpdateBackendSetDetails.Builder details = UpdateBackendSetDetails.builder().backends(backends) + if (backendSet.sslConfiguration) { + details.sslConfiguration(Details.of(backendSet.sslConfiguration)) + } + if (backendSet.sessionPersistenceConfiguration) { + details.sessionPersistenceConfiguration(backendSet.sessionPersistenceConfiguration) + } + if (backendSet.healthChecker) { + details.healthChecker(Details.of(backendSet.healthChecker)) + } + if (backendSet.policy) { + details.policy(backendSet.policy) + } + UpdateBackendSetRequest updateBackendSet = UpdateBackendSetRequest.builder() + .loadBalancerId(serverGroup.loadBalancerId).backendSetName(backendSet.name) + .updateBackendSetDetails(details.build()).build() + def updateRes = description.credentials.loadBalancerClient.updateBackendSet(updateBackendSet) + OracleWorkRequestPoller.poll(updateRes.opcWorkRequestId, BASE_PHASE, task, description.credentials.loadBalancerClient) + } } catch (BmcException e) { if (e.statusCode == 404) { task.updateStatus BASE_PHASE, "Backend set did not exist...continuing" diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/ResizeOracleServerGroupAtomicOperation.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/ResizeOracleServerGroupAtomicOperation.groovy index 81aa04691c0..6049f996904 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/ResizeOracleServerGroupAtomicOperation.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/ResizeOracleServerGroupAtomicOperation.groovy @@ -6,7 +6,6 @@ * If a copy of the Apache License Version 2.0 was not distributed with this file, * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html */ - package com.netflix.spinnaker.clouddriver.oracle.deploy.op import com.netflix.frigga.Names @@ -16,16 +15,20 @@ import com.netflix.spinnaker.clouddriver.model.ServerGroup import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller import com.netflix.spinnaker.clouddriver.oracle.deploy.description.ResizeOracleServerGroupDescription import com.netflix.spinnaker.clouddriver.oracle.model.OracleInstance +import com.netflix.spinnaker.clouddriver.oracle.model.Details import com.netflix.spinnaker.clouddriver.oracle.provider.view.OracleClusterProvider import com.netflix.spinnaker.clouddriver.oracle.service.servergroup.OracleServerGroupService import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.oracle.bmc.core.requests.GetVnicRequest import com.oracle.bmc.core.requests.ListVnicAttachmentsRequest -import com.oracle.bmc.loadbalancer.model.BackendDetails +import com.oracle.bmc.loadbalancer.model.BackendDetails +import com.oracle.bmc.loadbalancer.model.BackendSet import com.oracle.bmc.loadbalancer.model.HealthCheckerDetails +import com.oracle.bmc.loadbalancer.model.LoadBalancer import com.oracle.bmc.loadbalancer.model.UpdateBackendSetDetails import com.oracle.bmc.loadbalancer.requests.GetLoadBalancerRequest import com.oracle.bmc.loadbalancer.requests.UpdateBackendSetRequest +import com.oracle.bmc.model.BmcException import org.springframework.beans.factory.annotation.Autowired import java.util.concurrent.TimeUnit @@ -52,15 +55,21 @@ class ResizeOracleServerGroupAtomicOperation implements AtomicOperation { @Override Void operate(List priorOutputs) { + def app = Names.parseName(description.serverGroupName).app task.updateStatus BASE_PHASE, "Resizing server group: " + description.serverGroupName + def serverGroup = oracleServerGroupService.getServerGroup(description.credentials, app, description.serverGroupName) int targetSize = description.targetSize?: (description.capacity?.desired?:0) + if (targetSize == serverGroup.instances.size()) { + task.updateStatus BASE_PHASE, description.serverGroupName + " is already running the desired number of instances" + return + } + Set oldGroup = serverGroup.instances.collect{it.privateIp} as Set + oracleServerGroupService.resizeServerGroup(task, description.credentials, description.serverGroupName, targetSize) - // SL: sync server group instances to backendset if there is one - def app = Names.parseName(description.serverGroupName).app - def sg = oracleServerGroupService.getServerGroup(description.credentials, app, description.serverGroupName) + serverGroup = oracleServerGroupService.getServerGroup(description.credentials, app, description.serverGroupName) - if (sg.loadBalancerId) { + if (serverGroup.loadBalancerId) { // wait for instances to go into running state ServerGroup sgView @@ -68,7 +77,7 @@ class ResizeOracleServerGroupAtomicOperation implements AtomicOperation { long finishBy = System.currentTimeMillis() + TimeUnit.MINUTES.toMillis(30) boolean allUp = false while (!allUp && System.currentTimeMillis() < finishBy) { - sgView = clusterProvider.getServerGroup(sg.credentials.name, sg.region, sg.name) + sgView = clusterProvider.getServerGroup(serverGroup.credentials.name, serverGroup.region, serverGroup.name) if (sgView && (sgView.instanceCounts.up == sgView.instanceCounts.total) && (sgView.instanceCounts.total == description.capacity.desired)) { task.updateStatus BASE_PHASE, "All instances are Up" allUp = true @@ -85,52 +94,66 @@ class ResizeOracleServerGroupAtomicOperation implements AtomicOperation { // get their ip addresses task.updateStatus BASE_PHASE, "Looking up instance IP addresses" - List ips = [] - sgView.instances.each { instance -> - def vnicAttachRs = description.credentials.computeClient.listVnicAttachments(ListVnicAttachmentsRequest.builder() - .compartmentId(description.credentials.compartmentId) - .instanceId(((OracleInstance) instance).id) - .build()) - vnicAttachRs.items.each { vnicAttach -> - def vnic = description.credentials.networkClient.getVnic(GetVnicRequest.builder() - .vnicId(vnicAttach.vnicId).build()).vnic - ips << vnic.privateIp + List newGroup = [] + serverGroup.instances.each { instance -> + if (!instance.privateIp) { + def vnicAttachRs = description.credentials.computeClient.listVnicAttachments(ListVnicAttachmentsRequest.builder() + .compartmentId(description.credentials.compartmentId) + .instanceId(((OracleInstance) instance).id) + .build()) + vnicAttachRs.items.each { vnicAttach -> + def vnic = description.credentials.networkClient.getVnic(GetVnicRequest.builder() + .vnicId(vnicAttach.vnicId).build()).vnic + instance.privateIp = vnic.privateIp + } + } + newGroup << instance.privateIp + } + //update serverGroup with IPs + oracleServerGroupService.updateServerGroup(serverGroup) + + task.updateStatus BASE_PHASE, "Getting loadbalancer details " + serverGroup?.loadBalancerId + LoadBalancer loadBalancer = serverGroup?.loadBalancerId? description.credentials.loadBalancerClient.getLoadBalancer( + GetLoadBalancerRequest.builder().loadBalancerId(serverGroup.loadBalancerId).build())?.getLoadBalancer() : null + if (loadBalancer) { + try { + BackendSet backendSet = serverGroup.backendSetName? loadBalancer.backendSets.get(serverGroup.backendSetName) : null + if (backendSet == null && loadBalancer.backendSets.size() == 1) { + backendSet = loadBalancer.backendSets.values().first(); + } + if (backendSet) { + List backends = backendSet.backends.findAll { !oldGroup.contains(it.ipAddress) } .collect { Details.of(it) } + newGroup.each { + backends << BackendDetails.builder().ipAddress(it).port(backendSet.healthChecker.port).build() + } + UpdateBackendSetDetails.Builder details = UpdateBackendSetDetails.builder().backends(backends) + if (backendSet.sslConfiguration) { + details.sslConfiguration(Details.of(backendSet.sslConfiguration)) + } + if (backendSet.sessionPersistenceConfiguration) { + details.sessionPersistenceConfiguration(backendSet.sessionPersistenceConfiguration) + } + if (backendSet.healthChecker) { + details.healthChecker(Details.of(backendSet.healthChecker)) + } + if (backendSet.policy) { + details.policy(backendSet.policy) + } + UpdateBackendSetRequest updateBackendSet = UpdateBackendSetRequest.builder() + .loadBalancerId(serverGroup.loadBalancerId).backendSetName(backendSet.name) + .updateBackendSetDetails(details.build()).build() + task.updateStatus BASE_PHASE, "Updating backendSet ${backendSet.name}" + def updateRes = description.credentials.loadBalancerClient.updateBackendSet(updateBackendSet) + OracleWorkRequestPoller.poll(updateRes.opcWorkRequestId, BASE_PHASE, task, description.credentials.loadBalancerClient) + } + } catch (BmcException e) { + if (e.statusCode == 404) { + task.updateStatus BASE_PHASE, "Backend set did not exist...continuing" + } else { + throw e + } } } - - // get LB - task.updateStatus BASE_PHASE, "Getting loadbalancer details" - def lb = description.credentials.loadBalancerClient.getLoadBalancer(GetLoadBalancerRequest.builder().loadBalancerId(sg.loadBalancerId).build()).loadBalancer - - // use backend-template to replace/sync backend set - def names = Names.parseName(description.serverGroupName) - def backendTemplate = lb.backendSets.get("${names.cluster}-template".toString()) - def backend = UpdateBackendSetRequest.builder() - .loadBalancerId(sg.loadBalancerId) - .backendSetName(sg.name) - .updateBackendSetDetails(UpdateBackendSetDetails.builder() - .backends(ips.collect { ip -> - BackendDetails.builder().ipAddress(ip).port(backendTemplate.healthChecker.port).build() - } as List) - .healthChecker(HealthCheckerDetails.builder() - .protocol(backendTemplate.healthChecker.protocol) - .port(backendTemplate.healthChecker.port) - .intervalInMillis(backendTemplate.healthChecker.intervalInMillis) - .retries(backendTemplate.healthChecker.retries) - .timeoutInMillis(backendTemplate.healthChecker.timeoutInMillis) - .urlPath(backendTemplate.healthChecker.urlPath) - .returnCode(backendTemplate.healthChecker.returnCode) - .responseBodyRegex(backendTemplate.healthChecker.responseBodyRegex) - .build()) - .policy(backendTemplate.policy) - .build() - ).build() - - task.updateStatus BASE_PHASE, "Updating backend set ${sg.name}" - def rs = description.credentials.loadBalancerClient.updateBackendSet(backend) - - // wait for backend set to be updated - OracleWorkRequestPoller.poll(rs.getOpcWorkRequestId(), BASE_PHASE, task, description.credentials.loadBalancerClient) } task.updateStatus BASE_PHASE, "Completed server group resize" return null diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/BasicOracleDeployDescriptionValidator.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/BasicOracleDeployDescriptionValidator.groovy index c865eff212c..76dfa697387 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/BasicOracleDeployDescriptionValidator.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/BasicOracleDeployDescriptionValidator.groovy @@ -9,22 +9,27 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.oracle.OracleOperation import com.netflix.spinnaker.clouddriver.oracle.deploy.description.BasicOracleDeployDescription import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations -import com.netflix.spinnaker.clouddriver.security.ProviderVersion import org.springframework.stereotype.Component -import org.springframework.validation.Errors @OracleOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component("basicOracleDeployDescriptionValidator") class BasicOracleDeployDescriptionValidator extends StandardOracleAttributeValidator { - + @Override - void validate(List priorDescriptions, BasicOracleDeployDescription description, Errors errors) { + void validate(List priorDescriptions, BasicOracleDeployDescription description, ValidationErrors errors) { context = "basicOracleDeployDescriptionValidator" validateNotEmptyString(errors, description.application, "application") + if (description.loadBalancerId) { + // If a serverGroup is created with LoadBalancer, then a backendSet is created from the serverGroup with the same name. + // The backendSet name is limited to 32 chars + // This combineAppStackDetail (appName-stack-detail) is limited to 32-5 = 27 chars + validateLimit(errors, combineAppStackDetail(description.application, description.stack, description.freeFormDetails), 27, "combineAppStackDetail") + validateNotEmptyString(errors, description.backendSetName, "backendSetName") + } validateNotEmptyString(errors, description.region, "region") validateNotEmptyString(errors, description.accountName, "accountName") validateNotEmptyString(errors, description.imageId, "imageId") @@ -36,4 +41,19 @@ class BasicOracleDeployDescriptionValidator extends StandardOracleAttributeValid validateCapacity(errors, description.capacity.min, description.capacity.max, description.capacity.desired) } } + + /* + * See NameBuilder.combineAppStackDetail. BasicOracleDeployHandler uses this to create "clusterName" and serverGroupName. + * serverGroupName = appName-stack-detail-v001 or String.format("%s-v%03d", groupName, sequence) + */ + static String combineAppStackDetail(String appName, String stack, String detail) { + stack = stack != null ? stack : ""; + if (detail != null && !detail.isEmpty()) { + return appName + "-" + stack + "-" + detail; + } + if (!stack.isEmpty()) { + return appName + "-" + stack; + } + return appName; + } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/DestroyOracleServerGroupDescriptionValidator.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/DestroyOracleServerGroupDescriptionValidator.groovy index fd2f5e2bf7d..bf565360f10 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/DestroyOracleServerGroupDescriptionValidator.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/DestroyOracleServerGroupDescriptionValidator.groovy @@ -9,18 +9,18 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.oracle.OracleOperation import com.netflix.spinnaker.clouddriver.oracle.deploy.description.DestroyOracleServerGroupDescription import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @OracleOperation(AtomicOperations.DESTROY_SERVER_GROUP) @Component("destroyOracleServerGroupDescriptionValidator") class DestroyOracleServerGroupDescriptionValidator extends StandardOracleAttributeValidator { @Override - void validate(List priorDescriptions, DestroyOracleServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, DestroyOracleServerGroupDescription description, ValidationErrors errors) { context = "destroyServerGroupDescription" validateNotEmptyString(errors, description.accountName, "accountName") validateNotEmptyString(errors, description.region, "region") diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/EnableDisableOracleServerGroupDescriptionValidator.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/EnableDisableOracleServerGroupDescriptionValidator.groovy index 38fcbc2c2a3..747d1e9b4a6 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/EnableDisableOracleServerGroupDescriptionValidator.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/EnableDisableOracleServerGroupDescriptionValidator.groovy @@ -9,15 +9,15 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.oracle.deploy.description.EnableDisableOracleServerGroupDescription import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component("enableDisableOracleServerGroupDescriptionValidator") class EnableDisableOracleServerGroupDescriptionValidator extends StandardOracleAttributeValidator { @Override - void validate(List priorDescriptions, EnableDisableOracleServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableOracleServerGroupDescription description, ValidationErrors errors) { context = "enableDisableServerGroupDescription" validateNotEmptyString(errors, description.region, "region") validateNotEmptyString(errors, description.accountName, "accountName") diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/ResizeOracleServerGroupDescriptionValidator.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/ResizeOracleServerGroupDescriptionValidator.groovy index 78578996e37..16922ea6130 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/ResizeOracleServerGroupDescriptionValidator.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/ResizeOracleServerGroupDescriptionValidator.groovy @@ -9,18 +9,18 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.oracle.OracleOperation import com.netflix.spinnaker.clouddriver.oracle.deploy.description.ResizeOracleServerGroupDescription import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import org.springframework.stereotype.Component -import org.springframework.validation.Errors @OracleOperation(AtomicOperations.RESIZE_SERVER_GROUP) @Component("resizeOracleServerGroupDescriptionValidator") class ResizeOracleServerGroupDescriptionValidator extends StandardOracleAttributeValidator { @Override - void validate(List priorDescriptions, ResizeOracleServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, ResizeOracleServerGroupDescription description, ValidationErrors errors) { context = "resizeServerGroupDescription" validateNotEmptyString(errors, description.serverGroupName, "serverGroupName") validateNotEmptyString(errors, description.region, "region") diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/StandardOracleAttributeValidator.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/StandardOracleAttributeValidator.groovy index 9af912ae895..04618bf759b 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/StandardOracleAttributeValidator.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/StandardOracleAttributeValidator.groovy @@ -9,15 +9,14 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator -import org.springframework.validation.Errors import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator -import com.netflix.spinnaker.clouddriver.orchestration.VersionedCloudProviderOperation +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors abstract class StandardOracleAttributeValidator extends DescriptionValidator { - String context + protected String context - def validateNotEmptyString(Errors errors, String value, String attribute) { + def validateNotEmptyString(ValidationErrors errors, String value, String attribute) { if (!value) { errors.rejectValue(attribute, "${context}.${attribute}.empty") return false @@ -25,7 +24,7 @@ abstract class StandardOracleAttributeValidator extends DescriptionValidator< return true } - def validateNonNegative(Errors errors, int value, String attribute) { + def validateNonNegative(ValidationErrors errors, int value, String attribute) { def result if (value >= 0) { result = true @@ -36,7 +35,7 @@ abstract class StandardOracleAttributeValidator extends DescriptionValidator< result } - def validatePositive(Errors errors, int value, String attribute) { + def validatePositive(ValidationErrors errors, int value, String attribute) { def result if (value > 0) { result = true @@ -46,8 +45,8 @@ abstract class StandardOracleAttributeValidator extends DescriptionValidator< } result } - - def validateCapacity(Errors errors, Integer min, Integer max, Integer desired) { + + def validateCapacity(ValidationErrors errors, Integer min, Integer max, Integer desired) { if (min != null && max != null && min > max) { errors.rejectValue "capacity", "${context}.capacity.transposed", [min, max] as String[], @@ -61,4 +60,22 @@ abstract class StandardOracleAttributeValidator extends DescriptionValidator< } } } + + def validateLimit(ValidationErrors errors, String value, int limit, String attribute) { + if (!value) { + errors.rejectValue(attribute, "${context}.${attribute}.empty") + return false + } else if (value.length() >= limit) { + errors.rejectValue(attribute, "${context}.${attribute}.exceedsLimit") + } + return true + } + + def validateNotNull(ValidationErrors errors, Object value, String attribute) { + if (!value) { + errors.rejectValue(attribute, "${context}.${attribute}.null") + return false + } + return true + } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleCluster.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleCluster.groovy index 11e0aa834f6..d40dce09146 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleCluster.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleCluster.groovy @@ -26,17 +26,24 @@ class OracleCluster { @JsonIgnore View getView() { - new View() + new View(this) } @Canonical class View implements Cluster { final String type = OracleCloudProvider.ID + final String name + final String accountName + final Set serverGroups + final Set loadBalancers + + View(OracleCluster oracleCluster){ + name = oracleCluster.name + accountName = oracleCluster.accountName + serverGroups = oracleCluster.serverGroups.collect { OracleServerGroup it -> it.getView() } as Set + loadBalancers = [] as Set + } - String name = OracleCluster.this.name - String accountName = OracleCluster.this.accountName - Set serverGroups = OracleCluster.this.serverGroups.collect { it.getView() } as Set - Set loadBalancers = [] as Set } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleInstance.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleInstance.groovy index a1e237d53ec..1176a8480e2 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleInstance.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleInstance.groovy @@ -24,4 +24,5 @@ class OracleInstance implements Instance, Serializable { String account String region String id + String privateIp } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleServerGroup.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleServerGroup.groovy index 81712b7cba8..d939e65bc50 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleServerGroup.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/model/OracleServerGroup.groovy @@ -6,7 +6,6 @@ * If a copy of the Apache License Version 2.0 was not distributed with this file, * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html */ - package com.netflix.spinnaker.clouddriver.oracle.model import com.fasterxml.jackson.annotation.JsonIgnore @@ -34,11 +33,12 @@ class OracleServerGroup { Boolean disabled = false Integer targetSize String loadBalancerId + String backendSetName OracleNamedAccountCredentials credentials @JsonIgnore View getView() { - new View() + new View(this) } @JsonInclude(JsonInclude.Include.NON_NULL) @@ -48,20 +48,32 @@ class OracleServerGroup { final String type = OracleCloudProvider.ID final String cloudProvider = OracleCloudProvider.ID - String name = OracleServerGroup.this.name - String region = OracleServerGroup.this.region - String zone = OracleServerGroup.this.zone - Set zones = OracleServerGroup.this.zones - Set instances = OracleServerGroup.this.instances - Map launchConfig = OracleServerGroup.this.launchConfig - Set securityGroups = OracleServerGroup.this.securityGroups - Map buildInfo = OracleServerGroup.this.buildInfo - Boolean disabled = OracleServerGroup.this.disabled - ServerGroup.Capacity capacity = new ServerGroup.Capacity(desired: OracleServerGroup.this.targetSize, - min: OracleServerGroup.this.targetSize, max: OracleServerGroup.this.targetSize) + String name + String region + String zone + Set zones + Set instances + Map launchConfig + Set securityGroups + Map buildInfo + Boolean disabled + ServerGroup.Capacity capacity + + View(OracleServerGroup oracleServerGroup){ + name = oracleServerGroup.name + region = oracleServerGroup.region + zone = oracleServerGroup.zone + zones = oracleServerGroup.zones + instances = oracleServerGroup.instances + launchConfig = oracleServerGroup.launchConfig + securityGroups = oracleServerGroup.securityGroups + buildInfo = oracleServerGroup.buildInfo + disabled = oracleServerGroup.disabled + capacity = new ServerGroup.Capacity(desired: oracleServerGroup.targetSize, + min: oracleServerGroup.targetSize, max: oracleServerGroup.targetSize) + } - @Override - Boolean isDisabled() { // Because groovy isn't smart enough to generate this method :-( + Boolean isDisabled(){ disabled } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/AbstractOracleCachingAgent.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/AbstractOracleCachingAgent.groovy index 9f766096b94..db6f6de3f14 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/AbstractOracleCachingAgent.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/AbstractOracleCachingAgent.groovy @@ -16,7 +16,6 @@ import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider import com.netflix.spinnaker.cats.agent.CachingAgent import com.netflix.spinnaker.clouddriver.oracle.OracleCloudProvider import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials -import com.oracle.bmc.http.internal.ExplicitlySetFilter abstract class AbstractOracleCachingAgent implements CachingAgent { @@ -32,7 +31,7 @@ abstract class AbstractOracleCachingAgent implements CachingAgent { this.credentials = credentials this.clouddriverUserAgentApplicationName = clouddriverUserAgentApplicationName agentType = "${credentials.name}/${credentials.region}/${this.class.simpleName}" - + FilterProvider filters = new SimpleFilterProvider().setFailOnUnknownId(false) //Alternatives of adding explicitlySetFilter: //- FilterProvider filters = new SimpleFilterProvider().addFilter("explicitlySetFilter", (SimpleBeanPropertyFilter) SimpleBeanPropertyFilter.serializeAllExcept(['__explicitlySet__'].toSet())); diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/OracleLoadBalancerCachingAgent.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/OracleLoadBalancerCachingAgent.groovy index b9d3c331431..cb4fe89e9d0 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/OracleLoadBalancerCachingAgent.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/OracleLoadBalancerCachingAgent.groovy @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Oracle America, Inc. + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. * * The contents of this file are subject to the Apache License Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,6 +55,14 @@ class OracleLoadBalancerCachingAgent extends AbstractOracleCachingAgent { return null } Map attributes = objectMapper.convertValue(lb, ATTRIBUTES) + Map certificates = attributes.certificates; + if (certificates) { + certificates.each{ name, cert -> + if (cert) { + cert.remove('publicCertificate') + } + } + } new DefaultCacheData( Keys.getLoadBalancerKey(lb.displayName, lb.id, credentials.region, credentials.name), attributes, diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/OracleSecurityGroupCachingAgent.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/OracleSecurityGroupCachingAgent.groovy index 85691a78b77..a2eb422043f 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/OracleSecurityGroupCachingAgent.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/agent/OracleSecurityGroupCachingAgent.groovy @@ -17,6 +17,7 @@ import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.DefaultCacheData import com.netflix.spinnaker.cats.provider.ProviderCache import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport import com.netflix.spinnaker.clouddriver.oracle.OracleCloudProvider import com.netflix.spinnaker.clouddriver.oracle.cache.Keys @@ -42,7 +43,7 @@ class OracleSecurityGroupCachingAgent extends AbstractOracleCachingAgent impleme OracleSecurityGroupCachingAgent(String clouddriverUserAgentApplicationName, OracleNamedAccountCredentials credentials, ObjectMapper objectMapper, Registry registry) { super(objectMapper, credentials, clouddriverUserAgentApplicationName) - this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${OracleCloudProvider.ID}:${OnDemandAgent.OnDemandType.SecurityGroup}") + this.metricsSupport = new OnDemandMetricsSupport(registry, this, "${OracleCloudProvider.ID}:${OnDemandType.SecurityGroup}") } @Override @@ -67,13 +68,13 @@ class OracleSecurityGroupCachingAgent extends AbstractOracleCachingAgent impleme } @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { + Collection> pendingOnDemandRequests(ProviderCache providerCache) { return [] } @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - type == OnDemandAgent.OnDemandType.SecurityGroup && cloudProvider == OracleCloudProvider.ID + boolean handles(OnDemandType type, String cloudProvider) { + type == OnDemandType.SecurityGroup && cloudProvider == OracleCloudProvider.ID } @Override diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/config/OracleInfrastructureProviderConfig.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/config/OracleInfrastructureProviderConfig.groovy index 42f9b3e6bc9..9903e123734 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/config/OracleInfrastructureProviderConfig.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/config/OracleInfrastructureProviderConfig.groovy @@ -12,7 +12,6 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.Agent -import com.netflix.spinnaker.cats.provider.ProviderSynchronizerTypeWrapper import com.netflix.spinnaker.config.OracleConfiguration import com.netflix.spinnaker.clouddriver.oracle.provider.OracleInfrastructureProvider import com.netflix.spinnaker.clouddriver.oracle.provider.agent.* @@ -20,7 +19,6 @@ import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCrede import com.netflix.spinnaker.clouddriver.oracle.service.servergroup.OracleServerGroupService import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository import com.netflix.spinnaker.clouddriver.security.ProviderUtils -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.boot.context.properties.EnableConfigurationProperties import org.springframework.context.annotation.* @@ -52,24 +50,7 @@ class OracleInfrastructureProviderConfig { return oracleInfrastructureProvider } - @Bean - OracleInfrastructureProviderSynchronizerTypeWrapper oracleInfrastructureProviderSynchronizerTypeWrapper() { - new OracleInfrastructureProviderSynchronizerTypeWrapper() - } - - class OracleInfrastructureProviderSynchronizerTypeWrapper implements ProviderSynchronizerTypeWrapper { - - @Override - Class getSynchronizerType() { - return OracleInfrastructureProviderSynchronizer - } - } - - class OracleInfrastructureProviderSynchronizer {} - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - OracleInfrastructureProviderSynchronizer synchronizeOracleInfrastructureProvider( + private static void synchronizeOracleInfrastructureProvider( String clouddriverUserAgentApplicationName, OracleInfrastructureProvider oracleInfrastructureProvider, AccountCredentialsRepository accountCredentialsRepository, @@ -125,7 +106,5 @@ class OracleInfrastructureProviderConfig { oracleInfrastructureProvider.agents.addAll(newlyAddedAgents) } } - - return new OracleInfrastructureProviderSynchronizer() } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleClusterProvider.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleClusterProvider.groovy index bf02b1016dc..f92af960f3e 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleClusterProvider.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleClusterProvider.groovy @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Oracle America, Inc. + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. * * The contents of this file are subject to the Apache License Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -118,6 +118,8 @@ class OracleClusterProvider implements ClusterProvider { sg.instances?.each { def instance = instanceProvider.getInstance(Keys.parse(cacheItem.id)?.get("account"), "*", it.id) if (instance) { + //TODO display name with id or privateIp + //it.name = it.name + (it.privateIp? '_' + it.privateIp : '') it.healthState = instance.healthState it.health = instance.health if (sg.disabled) { diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleInstanceProvider.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleInstanceProvider.groovy index 5551ed5c6e4..dce9e241187 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleInstanceProvider.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleInstanceProvider.groovy @@ -20,7 +20,6 @@ import com.netflix.spinnaker.clouddriver.oracle.model.OracleInstance import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.oracle.bmc.core.model.Instance import groovy.util.logging.Slf4j -import org.apache.commons.lang.NotImplementedException import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @@ -28,7 +27,7 @@ import static com.netflix.spinnaker.clouddriver.oracle.cache.Keys.Namespace.INST @Slf4j @Component -class OracleInstanceProvider implements InstanceProvider { +class OracleInstanceProvider implements InstanceProvider { private final Cache cacheView final ObjectMapper objectMapper @@ -58,7 +57,7 @@ class OracleInstanceProvider implements InstanceProvider { @Override String getConsoleOutput(String account, String region, String id) { // TODO: Add this when we actually need it in Deck - throw new NotImplementedException() + throw new UnsupportedOperationException() } private Set loadInstances(Collection identifiers) { diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleLoadBalancerProvider.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleLoadBalancerProvider.groovy index 9e72bc06e09..89831d6d2af 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleLoadBalancerProvider.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleLoadBalancerProvider.groovy @@ -14,8 +14,10 @@ import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.cats.cache.CacheData import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup import com.netflix.spinnaker.clouddriver.oracle.OracleCloudProvider import com.netflix.spinnaker.clouddriver.oracle.cache.Keys +import com.netflix.spinnaker.clouddriver.oracle.model.OracleSubnet import com.oracle.bmc.loadbalancer.model.LoadBalancer import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired @@ -29,6 +31,9 @@ class OracleLoadBalancerProvider implements LoadBalancerProvider byAccountAndRegionAndName(String account, String region, String name) { - return getAllMatchingKeyPattern(Keys.getLoadBalancerKey(name, '*', region, account)) + return getAllMatchingKeyPattern(Keys.getLoadBalancerKey(name, '*', region, account))?.toList() } @Override @@ -84,14 +89,21 @@ class OracleLoadBalancerProvider implements LoadBalancerProvider parts = Keys.parse(cacheData.id) - + Set subnets = loadBalancer.subnetIds?.collect { + oracleSubnetProvider.getAllMatchingKeyPattern(Keys.getSubnetKey(it, parts.region, parts.account)) + }.flatten(); return new OracleLoadBalancerDetail( id: loadBalancer.id, name: loadBalancer.displayName, account: parts.account, region: parts.region, - serverGroups: [loadBalancer.listeners.values().first().defaultBackendSetName] as Set - ) + ipAddresses: loadBalancer.ipAddresses, + certificates: loadBalancer.certificates, + listeners: loadBalancer.listeners, + backendSets: loadBalancer.backendSets, + subnets: subnets, + timeCreated: loadBalancer.timeCreated.toInstant().toString(), + serverGroups: [] as Set) } static class OracleLoadBalancerSummary implements LoadBalancerProvider.Item { @@ -142,9 +154,16 @@ class OracleLoadBalancerProvider implements LoadBalancerProvider serverGroups + String timeCreated + Set serverGroups = [] + List ipAddresses = [] + Map certificates + Map listeners + Map backendSets + Set subnets } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleSecurityGroupProvider.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleSecurityGroupProvider.groovy index db3d79d0b1e..b3fadd69169 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleSecurityGroupProvider.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleSecurityGroupProvider.groovy @@ -67,6 +67,12 @@ class OracleSecurityGroupProvider implements SecurityGroupProvider getAllMatchingKeyPattern(String pattern, boolean includeRules) { def identifiers = cacheView.filterIdentifiers(Keys.Namespace.SECURITY_GROUPS.ns, pattern) return loadResults(includeRules, identifiers) diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/security/OracleCredentialsInitializer.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/security/OracleCredentialsInitializer.groovy index abb08d8b873..67a00b312e3 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/security/OracleCredentialsInitializer.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/security/OracleCredentialsInitializer.groovy @@ -12,17 +12,14 @@ package com.netflix.spinnaker.clouddriver.oracle.security import com.netflix.spinnaker.cats.module.CatsModule import com.netflix.spinnaker.clouddriver.oracle.config.OracleConfigurationProperties import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable import com.netflix.spinnaker.clouddriver.security.ProviderUtils import groovy.util.logging.Slf4j -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration -import org.springframework.context.annotation.Scope @Slf4j @Configuration -class OracleCredentialsInitializer implements CredentialsInitializerSynchronizable { +class OracleCredentialsInitializer { @Bean List oracleNamedAccountCredentials( @@ -33,17 +30,11 @@ class OracleCredentialsInitializer implements CredentialsInitializerSynchronizab synchronizeOracleAccounts(clouddriverUserAgentApplicationName, oracleConfigurationProperties, null, accountCredentialsRepository) } - @Override - String getCredentialsSynchronizationBeanName() { - return "synchronizeOracleAccounts" - } - - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) - @Bean - List synchronizeOracleAccounts(String clouddriverUserAgentApplicationName, - OracleConfigurationProperties oracleConfigurationProperties, - CatsModule catsModule, - AccountCredentialsRepository accountCredentialsRepository) { + private List synchronizeOracleAccounts( + String clouddriverUserAgentApplicationName, + OracleConfigurationProperties oracleConfigurationProperties, + CatsModule catsModule, + AccountCredentialsRepository accountCredentialsRepository) { def (ArrayList accountsToAdd, List namesOfDeletedAccounts) = ProviderUtils.calculateAccountDeltas(accountCredentialsRepository, @@ -60,6 +51,7 @@ class OracleCredentialsInitializer implements CredentialsInitializerSynchronizab userId(managedAccount.userId). fingerprint(managedAccount.fingerprint). sshPrivateKeyFilePath(managedAccount.sshPrivateKeyFilePath). + privateKeyPassphrase(managedAccount.privateKeyPassphrase). tenancyId(managedAccount.tenancyId). region(managedAccount.region). build() diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/security/OracleNamedAccountCredentials.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/security/OracleNamedAccountCredentials.groovy index ad116c952cc..991235b8386 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/security/OracleNamedAccountCredentials.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/security/OracleNamedAccountCredentials.groovy @@ -10,7 +10,7 @@ package com.netflix.spinnaker.clouddriver.oracle.security import com.google.common.base.Supplier import com.netflix.spinnaker.clouddriver.oracle.OracleCloudProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials import com.oracle.bmc.Region import com.oracle.bmc.auth.AuthenticationDetailsProvider import com.oracle.bmc.auth.SimpleAuthenticationDetailsProvider @@ -22,7 +22,7 @@ import com.oracle.bmc.identity.requests.ListAvailabilityDomainsRequest import com.oracle.bmc.loadbalancer.LoadBalancerClient import com.oracle.bmc.objectstorage.ObjectStorageClient -class OracleNamedAccountCredentials implements AccountCredentials { +class OracleNamedAccountCredentials extends AbstractAccountCredentials { String cloudProvider = OracleCloudProvider.ID String name @@ -32,6 +32,7 @@ class OracleNamedAccountCredentials implements AccountCredentials { String userId String fingerprint String sshPrivateKeyFilePath + String privateKeyPassphrase String tenancyId String region List requiredGroupMembership = [] @@ -51,6 +52,7 @@ class OracleNamedAccountCredentials implements AccountCredentials { String userId, String fingerprint, String sshPrivateKeyFilePath, + String privateKeyPassphrase, String tenancyId, String region) { this.name = name @@ -61,6 +63,7 @@ class OracleNamedAccountCredentials implements AccountCredentials { this.userId = userId this.fingerprint = fingerprint this.sshPrivateKeyFilePath = sshPrivateKeyFilePath + this.privateKeyPassphrase = privateKeyPassphrase this.tenancyId = tenancyId this.region = region @@ -71,6 +74,7 @@ class OracleNamedAccountCredentials implements AccountCredentials { .userId(this.userId) .fingerprint(this.fingerprint) .privateKeySupplier(privateKeySupplier) + .passPhrase(this.privateKeyPassphrase) .tenantId(this.tenancyId) .build() @@ -100,6 +104,7 @@ class OracleNamedAccountCredentials implements AccountCredentials { String userId String fingerprint String sshPrivateKeyFilePath + String privateKeyPassphrase String tenancyId String region @@ -143,6 +148,11 @@ class OracleNamedAccountCredentials implements AccountCredentials { return this } + Builder privateKeyPassphrase(String privateKeyPassphrase) { + this.privateKeyPassphrase = privateKeyPassphrase + return this + } + Builder tenancyId(String tenancyId) { this.tenancyId = tenancyId return this @@ -163,6 +173,7 @@ class OracleNamedAccountCredentials implements AccountCredentials { this.userId, this.fingerprint, this.sshPrivateKeyFilePath, + this.privateKeyPassphrase, this.tenancyId, this.region) } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/DefaultOracleServerGroupService.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/DefaultOracleServerGroupService.groovy index c4c8f0bf47d..fe940c62119 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/DefaultOracleServerGroupService.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/DefaultOracleServerGroupService.groovy @@ -6,7 +6,6 @@ * If a copy of the Apache License Version 2.0 was not distributed with this file, * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html */ - package com.netflix.spinnaker.clouddriver.oracle.service.servergroup import com.netflix.frigga.Names @@ -26,6 +25,7 @@ import org.springframework.stereotype.Component @Component class DefaultOracleServerGroupService implements OracleServerGroupService { + private static final String DEPLOY = "DEPLOY_SERVER_GROUP" private static final String DESTROY = "DESTROY_SERVER_GROUP" private static final String RESIZE = "RESIZE_SERVER_GROUP" private static final String DISABLE = "DISABLE_SERVER_GROUP" @@ -67,12 +67,33 @@ class DefaultOracleServerGroupService implements OracleServerGroupService { } @Override - void createServerGroup(OracleServerGroup sg) { + void createServerGroup(Task task, OracleServerGroup sg) { def instances = [] as Set - for (int i = 0; i < sg.targetSize; i++) { - instances << createInstance(sg, i) + //if overList createInstance throws com.oracle.bmc.model.BmcException: (400, LimitExceeded, false) + def errors = [] + try { + for (int i = 0; i < sg.targetSize; i++) { + instances << createInstance(sg, i) + } + } catch (BmcException e) { + task.updateStatus DEPLOY, "Creating instance failed: $e" + errors << e + } + if (errors) { + if (instances.size() > 0) { + task.updateStatus DEPLOY, "ServerGroup created with errors: $errors" + } else { + task.updateStatus DEPLOY, "ServerGroup creation failed: $errors" + } + } + if (instances.size() > 0) { + sg.instances = instances + persistence.upsertServerGroup(sg) } - sg.instances = instances + } + + @Override + void updateServerGroup(OracleServerGroup sg) { persistence.upsertServerGroup(sg) } @@ -82,11 +103,14 @@ class DefaultOracleServerGroupService implements OracleServerGroupService { def serverGroup = persistence.getServerGroupByName(persistenceCtx, serverGroupName) if (serverGroup != null) { task.updateStatus DESTROY, "Found server group: $serverGroup.name" - - for (int i = 0; i < serverGroup.targetSize; i++) { - def instance = serverGroup.instances[i] - task.updateStatus DESTROY, "Terminating instance: $instance.name" - terminateInstance(serverGroup, instance) + if (serverGroup.instances && serverGroup.instances.size() > 0) { + for (int i = 0; i < serverGroup.targetSize; i++) { + def instance = serverGroup.instances[i] + if (instance) { + task.updateStatus DESTROY, "Terminating instance: $instance.name" + terminateInstance(serverGroup, instance) + } + } } task.updateStatus DESTROY, "Removing persistent data for $serverGroup.name" persistence.deleteServerGroup(serverGroup) @@ -102,36 +126,18 @@ class DefaultOracleServerGroupService implements OracleServerGroupService { def persistenceCtx = new OraclePersistenceContext(creds) def serverGroup = persistence.getServerGroupByName(persistenceCtx, serverGroupName) if (serverGroup != null) { - task.updateStatus DESTROY, "Found server group: $serverGroup.name resizing to $targetSize" - + task.updateStatus RESIZE, "Found server group: $serverGroup.name resizing to $targetSize" if (targetSize > serverGroup.targetSize) { int numInstancesToCreate = targetSize - serverGroup.targetSize task.updateStatus RESIZE, "Creating $numInstancesToCreate instances" - - resize(serverGroup, targetSize, serverGroup.targetSize, targetSize, - { int i -> - task.updateStatus RESIZE, "Creating instance: $i" - return createInstance(serverGroup, i) - }, - { OracleInstance instance -> - serverGroup.instances.add(instance) - }) - + increase(task, serverGroup, targetSize) } else if (serverGroup.targetSize > targetSize) { int numInstancesToTerminate = serverGroup.targetSize - targetSize task.updateStatus RESIZE, "Terminating $numInstancesToTerminate instances" - - resize(serverGroup, targetSize, targetSize, serverGroup.targetSize, - { int i -> - task.updateStatus RESIZE, "Terminating instance: " + serverGroup.instances[i].name - return terminateInstance(serverGroup, serverGroup.instances[i]) - }, - { OracleInstance instance -> - serverGroup.instances.remove(instance) - }) - + decrease(task, serverGroup, targetSize) } else { task.updateStatus RESIZE, "Already running the desired number of instances" + return true } task.updateStatus RESIZE, "Updating persistent data for $serverGroup.name" persistence.upsertServerGroup(serverGroup) @@ -171,12 +177,21 @@ class DefaultOracleServerGroupService implements OracleServerGroupService { } private OracleInstance createInstance(OracleServerGroup sg, int i) { + Map metadata = new HashMap<>() + if (sg.launchConfig["sshAuthorizedKeys"]?.trim()) { + // "ssh_authorized_keys"* - Provide one or more public SSH keys to be included in the ~/.ssh/authorized_keys file + // for the default user on the instance. + // Use a newline character to separate multiple keys. + metadata.put("ssh_authorized_keys", sg.launchConfig["sshAuthorizedKeys"] as String) + } + LaunchInstanceRequest rq = LaunchInstanceRequest.builder().launchInstanceDetails(LaunchInstanceDetails.builder() .availabilityDomain(sg.launchConfig["availabilityDomain"] as String) .compartmentId(sg.launchConfig["compartmentId"] as String) .imageId(sg.launchConfig["imageId"] as String) .shape(sg.launchConfig["shape"] as String) .subnetId(sg.launchConfig["subnetId"] as String) + .metadata(metadata) .displayName(sg.name + "-$i") .build()).build() @@ -205,14 +220,42 @@ class DefaultOracleServerGroupService implements OracleServerGroupService { return instance } - private void resize(OracleServerGroup sg, Integer targetSize, int from, int to, Closure operate, Closure update) { + private void increase(Task task, OracleServerGroup serverGroup, int targetSize) { + int currentSize = serverGroup.targetSize; + def instances = [] as Set + def errors = [] + for (int i = currentSize; i < targetSize; i++) { + task.updateStatus RESIZE, "Creating instance: $i" + try { + instances << createInstance(serverGroup, i) + } catch (BmcException e) { + task.updateStatus RESIZE, "Creating instance failed: $e" + errors << e + } + } + if (errors) { + if (instances.size() > 0) { + task.updateStatus RESIZE, "ServerGroup resize with errors: $errors" + } else { + task.updateStatus RESIZE, "ServerGroup resize failed: $errors" + } + } + for (OracleInstance instance : instances) { + serverGroup.instances.add(instance) + } + serverGroup.targetSize = currentSize + instances.size() + } + + private void decrease(Task task, OracleServerGroup serverGroup, int targetSize) { def instances = [] as Set - for (int i = from; i < to; i++) { - instances << operate(i) + int currentSize = serverGroup.targetSize; + for (int i = targetSize; i < currentSize; i++) { + task.updateStatus RESIZE, "Terminating instance: " + serverGroup.instances[i].name + instances << terminateInstance(serverGroup, serverGroup.instances[i]) } for (OracleInstance instance : instances) { - update(instance) + serverGroup.instances.remove(instance) } - sg.targetSize = targetSize + serverGroup.targetSize = targetSize } } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/OracleServerGroupPersistence.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/OracleServerGroupPersistence.groovy index 9f50beac34c..e30e8ba2998 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/OracleServerGroupPersistence.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/OracleServerGroupPersistence.groovy @@ -6,13 +6,13 @@ * If a copy of the Apache License Version 2.0 was not distributed with this file, * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html */ - package com.netflix.spinnaker.clouddriver.oracle.service.servergroup import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.oracle.model.OracleServerGroup import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials +import com.oracle.bmc.model.BmcException import com.oracle.bmc.objectstorage.model.CreateBucketDetails import com.oracle.bmc.objectstorage.requests.* import groovy.transform.Synchronized @@ -211,9 +211,16 @@ class OracleServerGroupPersistence { String json inputStream.withStream { json = inputStream.getText("UTF-8") } sg = jsonToServerGroup(json, ctx.creds) - return sg + return sg + } catch (BmcException e) { + if (e.getStatusCode() == 404) { + log.warn(e.getLocalizedMessage()) + } else { + log.error(e.getLocalizedMessage()) + } + return null } catch (Exception e) { - log.error("OSS Read exception", e) + log.error(e.getLocalizedMessage()) return null } break; diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/OracleServerGroupService.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/OracleServerGroupService.groovy index d5cf519f39b..5778d6ada0d 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/OracleServerGroupService.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/OracleServerGroupService.groovy @@ -20,7 +20,7 @@ interface OracleServerGroupService { public OracleServerGroup getServerGroup(OracleNamedAccountCredentials creds, String application, String name) - public void createServerGroup(OracleServerGroup serverGroup) + public void createServerGroup(Task task, OracleServerGroup serverGroup) public boolean destroyServerGroup(Task task, OracleNamedAccountCredentials creds, String serverGroupName) @@ -30,4 +30,5 @@ interface OracleServerGroupService { public void enableServerGroup(Task task, OracleNamedAccountCredentials creds, String serverGroupName) + public void updateServerGroup(OracleServerGroup sg) } diff --git a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/config/OracleConfiguration.groovy b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/config/OracleConfiguration.groovy index e88f89ffa91..602fedf13a0 100644 --- a/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/config/OracleConfiguration.groovy +++ b/clouddriver-oracle/src/main/groovy/com/netflix/spinnaker/config/OracleConfiguration.groovy @@ -11,7 +11,6 @@ package com.netflix.spinnaker.config import com.netflix.spinnaker.clouddriver.oracle.config.OracleConfigurationProperties import com.netflix.spinnaker.clouddriver.oracle.health.OracleHealthIndicator import com.netflix.spinnaker.clouddriver.oracle.security.OracleCredentialsInitializer -import org.springframework.beans.factory.config.ConfigurableBeanFactory import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty import org.springframework.boot.context.properties.ConfigurationProperties import org.springframework.boot.context.properties.EnableConfigurationProperties @@ -26,7 +25,6 @@ import org.springframework.scheduling.annotation.EnableScheduling @Import([ OracleCredentialsInitializer ]) class OracleConfiguration { - @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) @Bean @ConfigurationProperties("oracle") OracleConfigurationProperties oracleConfigurationProperties() { diff --git a/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/DeleteOracleLoadBalancerAtomicOperationConverter.java b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/DeleteOracleLoadBalancerAtomicOperationConverter.java new file mode 100644 index 00000000000..a1ea37a9b02 --- /dev/null +++ b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/DeleteOracleLoadBalancerAtomicOperationConverter.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.converter; + +import com.netflix.spinnaker.clouddriver.oracle.OracleOperation; +import com.netflix.spinnaker.clouddriver.oracle.deploy.description.DeleteLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.oracle.deploy.op.DeleteOracleLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import groovy.util.logging.Slf4j; +import java.util.Map; +import org.springframework.stereotype.Component; + +@Slf4j +@OracleOperation(AtomicOperations.DELETE_LOAD_BALANCER) +@Component("deleteOracleLoadBalancerDescription") +public class DeleteOracleLoadBalancerAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + @SuppressWarnings("rawtypes") + @Override + public AtomicOperation convertOperation(Map input) { + return new DeleteOracleLoadBalancerAtomicOperation(convertDescription(input)); + } + + @SuppressWarnings("rawtypes") + @Override + public DeleteLoadBalancerDescription convertDescription(Map input) { + return OracleAtomicOperationConverterHelper.convertDescription( + input, this, DeleteLoadBalancerDescription.class); + } +} diff --git a/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/description/DeleteLoadBalancerDescription.java b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/description/DeleteLoadBalancerDescription.java new file mode 100644 index 00000000000..698b87a7d37 --- /dev/null +++ b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/description/DeleteLoadBalancerDescription.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.description; + +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import java.util.Collection; +import java.util.Collections; + +public class DeleteLoadBalancerDescription extends AbstractOracleCredentialsDescription + implements ApplicationNameable { + String application; + String loadBalancerId; + + public String getApplication() { + return application; + } + + public void setApplication(String application) { + this.application = application; + } + + public String getLoadBalancerId() { + return loadBalancerId; + } + + public void setLoadBalancerId(String loadBalancerId) { + this.loadBalancerId = loadBalancerId; + } + + @Override + public Collection getApplications() { + return Collections.singleton(application); + } +} diff --git a/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/op/UpsertOracleLoadBalancerAtomicOperation.java b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/op/UpsertOracleLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..6fe2805323a --- /dev/null +++ b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/op/UpsertOracleLoadBalancerAtomicOperation.java @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.op; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller; +import com.netflix.spinnaker.clouddriver.oracle.deploy.description.UpsertLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.oracle.model.Details; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.oracle.bmc.loadbalancer.model.BackendDetails; +import com.oracle.bmc.loadbalancer.model.BackendSet; +import com.oracle.bmc.loadbalancer.model.BackendSetDetails; +import com.oracle.bmc.loadbalancer.model.Certificate; +import com.oracle.bmc.loadbalancer.model.CertificateDetails; +import com.oracle.bmc.loadbalancer.model.CreateBackendSetDetails; +import com.oracle.bmc.loadbalancer.model.CreateCertificateDetails; +import com.oracle.bmc.loadbalancer.model.CreateListenerDetails; +import com.oracle.bmc.loadbalancer.model.CreateLoadBalancerDetails; +import com.oracle.bmc.loadbalancer.model.ListenerDetails; +import com.oracle.bmc.loadbalancer.model.LoadBalancer; +import com.oracle.bmc.loadbalancer.model.UpdateBackendSetDetails; +import com.oracle.bmc.loadbalancer.model.UpdateListenerDetails; +import com.oracle.bmc.loadbalancer.requests.CreateBackendSetRequest; +import com.oracle.bmc.loadbalancer.requests.CreateCertificateRequest; +import com.oracle.bmc.loadbalancer.requests.CreateListenerRequest; +import com.oracle.bmc.loadbalancer.requests.CreateLoadBalancerRequest; +import com.oracle.bmc.loadbalancer.requests.DeleteBackendSetRequest; +import com.oracle.bmc.loadbalancer.requests.DeleteCertificateRequest; +import com.oracle.bmc.loadbalancer.requests.DeleteListenerRequest; +import com.oracle.bmc.loadbalancer.requests.GetLoadBalancerRequest; +import com.oracle.bmc.loadbalancer.requests.UpdateBackendSetRequest; +import com.oracle.bmc.loadbalancer.requests.UpdateListenerRequest; +import com.oracle.bmc.loadbalancer.responses.CreateBackendSetResponse; +import com.oracle.bmc.loadbalancer.responses.CreateCertificateResponse; +import com.oracle.bmc.loadbalancer.responses.CreateListenerResponse; +import com.oracle.bmc.loadbalancer.responses.CreateLoadBalancerResponse; +import com.oracle.bmc.loadbalancer.responses.DeleteBackendSetResponse; +import com.oracle.bmc.loadbalancer.responses.DeleteCertificateResponse; +import com.oracle.bmc.loadbalancer.responses.DeleteListenerResponse; +import com.oracle.bmc.loadbalancer.responses.UpdateBackendSetResponse; +import com.oracle.bmc.loadbalancer.responses.UpdateListenerResponse; +import com.oracle.bmc.model.BmcException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class UpsertOracleLoadBalancerAtomicOperation implements AtomicOperation { + + private final UpsertLoadBalancerDescription description; + + private static final String CREATE = "CreateLB"; + private static final String UPDATE = "UpdateLB"; + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + UpsertOracleLoadBalancerAtomicOperation(UpsertLoadBalancerDescription description) { + this.description = description; + } + + UpdateBackendSetDetails toUpdate(BackendSetDetails details, BackendSet existing) { + UpdateBackendSetDetails.Builder builder = + UpdateBackendSetDetails.builder().policy(details.getPolicy()); + if (details.getHealthChecker() != null) { + builder.healthChecker(details.getHealthChecker()); + } + if (details.getSessionPersistenceConfiguration() != null) { + builder.sessionPersistenceConfiguration(details.getSessionPersistenceConfiguration()); + } + if (details.getSslConfiguration() != null) { + builder.sslConfiguration(details.getSslConfiguration()); + } + List backends = + existing.getBackends().stream().map(b -> Details.of(b)).collect(Collectors.toList()); + builder.backends(backends); + return builder.build(); + } + + CreateBackendSetDetails toCreate(BackendSetDetails details, String name) { + CreateBackendSetDetails.Builder builder = + CreateBackendSetDetails.builder().policy(details.getPolicy()).name(name); + if (details.getHealthChecker() != null) { + builder.healthChecker(details.getHealthChecker()); + } + if (details.getSessionPersistenceConfiguration() != null) { + builder.sessionPersistenceConfiguration(details.getSessionPersistenceConfiguration()); + } + if (details.getSslConfiguration() != null) { + builder.sslConfiguration(details.getSslConfiguration()); + } + return builder.build(); + } + + CreateCertificateDetails toCreate(CertificateDetails details, String name) { + CreateCertificateDetails.Builder builder = + CreateCertificateDetails.builder().certificateName(name); + if (details.getCaCertificate() != null) { + builder.caCertificate(details.getCaCertificate()); + } + if (details.getPublicCertificate() != null) { + builder.publicCertificate(details.getPublicCertificate()); + } + if (details.getPrivateKey() != null) { + builder.privateKey(details.getPrivateKey()); + } + if (details.getPassphrase() != null) { + builder.passphrase(details.getPassphrase()); + } + return builder.build(); + } + + CreateListenerDetails toCreate(ListenerDetails details, String name) { + CreateListenerDetails.Builder builder = + CreateListenerDetails.builder() + .name(name) + .protocol(details.getProtocol()) + .port(details.getPort()); + if (details.getConnectionConfiguration() != null) { + builder.connectionConfiguration(details.getConnectionConfiguration()); + } + if (details.getDefaultBackendSetName() != null) { + builder.defaultBackendSetName(details.getDefaultBackendSetName()); + } + if (details.getHostnameNames() != null) { + builder.hostnameNames(details.getHostnameNames()); + } + if (details.getPathRouteSetName() != null) { + builder.pathRouteSetName(details.getPathRouteSetName()); + } + if (details.getSslConfiguration() != null) { + builder.sslConfiguration(details.getSslConfiguration()); + } + return builder.build(); + } + + UpdateListenerDetails toUpdate(ListenerDetails details) { + UpdateListenerDetails.Builder builder = + UpdateListenerDetails.builder().protocol(details.getProtocol()).port(details.getPort()); + if (details.getConnectionConfiguration() != null) { + builder.connectionConfiguration(details.getConnectionConfiguration()); + } + if (details.getDefaultBackendSetName() != null) { + builder.defaultBackendSetName(details.getDefaultBackendSetName()); + } + if (details.getHostnameNames() != null) { + builder.hostnameNames(details.getHostnameNames()); + } + if (details.getPathRouteSetName() != null) { + builder.pathRouteSetName(details.getPathRouteSetName()); + } + if (details.getSslConfiguration() != null) { + builder.sslConfiguration(details.getSslConfiguration()); + } + return builder.build(); + } + + void updateBackendSets(LoadBalancer lb, Task task) { + if (lb.getBackendSets() != null) { + lb.getBackendSets() + .forEach( + (name, existingBackendSet) -> { + BackendSetDetails backendSetUpdate = + (description.getBackendSets() != null) + ? description.getBackendSets().get(name) + : null; + if (backendSetUpdate != null) { + // Update existing BackendSets + UpdateBackendSetResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .updateBackendSet( + UpdateBackendSetRequest.builder() + .loadBalancerId(lb.getId()) + .backendSetName(name) + .updateBackendSetDetails( + toUpdate(backendSetUpdate, existingBackendSet)) + .build()); + task.updateStatus( + UPDATE, + "UpdateBackendSetRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + UPDATE, + task, + description.getCredentials().getLoadBalancerClient()); + } else { + // Delete backendSet: must have no backend and no listener + DeleteBackendSetResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .deleteBackendSet( + DeleteBackendSetRequest.builder() + .loadBalancerId(lb.getId()) + .backendSetName(name) + .build()); + task.updateStatus( + UPDATE, + "DeleteBackendSetRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + UPDATE, + task, + description.getCredentials().getLoadBalancerClient()); + } + }); + } + // Add new backendSets + Map backendSets = description.getBackendSets(); + if (backendSets != null) { + backendSets.forEach( + (name, details) -> { + if (!lb.getBackendSets().containsKey(name)) { + CreateBackendSetResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .createBackendSet( + CreateBackendSetRequest.builder() + .loadBalancerId(description.getLoadBalancerId()) + .createBackendSetDetails(toCreate(details, name)) + .build()); + task.updateStatus( + UPDATE, + "CreateBackendSetRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + UPDATE, + task, + description.getCredentials().getLoadBalancerClient()); + } + }); + } + } + + void updateCertificates(LoadBalancer lb, Task task) { + if (lb.getCertificates() != null) { + lb.getCertificates() + .forEach( + (name, existingCert) -> { + CertificateDetails cert = + (description.getCertificates() != null) + ? description.getCertificates().get(name) + : null; + if (cert == null) { + // Delete certificate: must have no listener using it + DeleteCertificateResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .deleteCertificate( + DeleteCertificateRequest.builder() + .loadBalancerId(lb.getId()) + .certificateName(name) + .build()); + task.updateStatus( + UPDATE, + "DeleteCertificateRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + UPDATE, + task, + description.getCredentials().getLoadBalancerClient()); + } + }); + } + // Add new certificate + Map certificates = description.getCertificates(); + if (certificates != null) { + certificates.forEach( + (name, details) -> { + Certificate cert = lb.getCertificates().get(name); + if (cert == null) { + CreateCertificateResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .createCertificate( + CreateCertificateRequest.builder() + .loadBalancerId(description.getLoadBalancerId()) + .createCertificateDetails(toCreate(details, name)) + .build()); + task.updateStatus( + UPDATE, + "CreateCertificateRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + UPDATE, + task, + description.getCredentials().getLoadBalancerClient()); + } + }); + } + } + + void update(LoadBalancer lb, Task task) { + task.updateStatus(UPDATE, "UpdateLoadBalancer: $lb.displayName"); + // Delete Listeners + if (lb.getListeners() != null) { + lb.getListeners() + .forEach( + (name, existingListener) -> { + ListenerDetails listenerUpdate = + (description.getListeners() != null) + ? description.getListeners().get(name) + : null; + if (listenerUpdate != null) { + // listener could be updated to use new backendSet so do this after updating + // backendSets + } else { + DeleteListenerResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .deleteListener( + DeleteListenerRequest.builder() + .loadBalancerId(lb.getId()) + .listenerName(name) + .build()); + task.updateStatus( + UPDATE, + "DeleteListenerRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + UPDATE, + task, + description.getCredentials().getLoadBalancerClient()); + } + }); + } + updateBackendSets(lb, task); + updateCertificates(lb, task); + // Update Listeners + if (lb.getListeners() != null) { + lb.getListeners() + .forEach( + (name, existingListener) -> { + ListenerDetails listenerUpdate = + (description.getListeners() != null) + ? description.getListeners().get(name) + : null; + if (listenerUpdate != null) { + UpdateListenerResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .updateListener( + UpdateListenerRequest.builder() + .loadBalancerId(lb.getId()) + .listenerName(name) + .updateListenerDetails(toUpdate(listenerUpdate)) + .build()); + task.updateStatus( + UPDATE, + "UpdateListenerRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + UPDATE, + task, + description.getCredentials().getLoadBalancerClient()); + } + }); + } + // Add new Listeners + Map listeners = description.getListeners(); + if (listeners != null) { + listeners.forEach( + (name, listener) -> { + if (!lb.getListeners().containsKey(name)) { + CreateListenerResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .createListener( + CreateListenerRequest.builder() + .loadBalancerId(description.getLoadBalancerId()) + .createListenerDetails(toCreate(listener, name)) + .build()); + task.updateStatus( + UPDATE, + "CreateListenerRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + UPDATE, + task, + description.getCredentials().getLoadBalancerClient()); + } + }); + } + } + + void create(Task task) { + String clusterName = description.qualifiedName(); + task.updateStatus(CREATE, "Create LB: ${description.qualifiedName()}"); + CreateLoadBalancerDetails.Builder lbDetails = + CreateLoadBalancerDetails.builder() + .displayName(clusterName) + .compartmentId(description.getCredentials().getCompartmentId()) + .shapeName(description.getShape()) + .subnetIds(description.getSubnetIds()); + if (description.getIsPrivate()) { + lbDetails.isPrivate(description.getIsPrivate()); + } + if (description.getCertificates() != null) { + lbDetails.certificates(description.getCertificates()); + } + if (description.getBackendSets() != null) { + lbDetails.backendSets(description.getBackendSets()); + } + if (description.getListeners() != null) { + lbDetails.listeners(description.getListeners()); + } + CreateLoadBalancerResponse res = + description + .getCredentials() + .getLoadBalancerClient() + .createLoadBalancer( + CreateLoadBalancerRequest.builder() + .createLoadBalancerDetails(lbDetails.build()) + .build()); + task.updateStatus( + CREATE, "Create LB rq submitted - work request id: ${rs.getOpcWorkRequestId()}"); + OracleWorkRequestPoller.poll( + res.getOpcWorkRequestId(), + CREATE, + task, + description.getCredentials().getLoadBalancerClient()); + } + + @Override + public Map operate(List priorOutputs) { + Task task = getTask(); + if (description.getLoadBalancerId() != null) { + try { + LoadBalancer lb = + description + .getCredentials() + .getLoadBalancerClient() + .getLoadBalancer( + GetLoadBalancerRequest.builder() + .loadBalancerId(description.getLoadBalancerId()) + .build()) + .getLoadBalancer(); + if (lb != null) { + update(lb, task); + } else { + task.updateStatus(UPDATE, "LoadBalancer ${description.loadBalancerId} does not exist."); + } + } catch (BmcException e) { + if (e.getStatusCode() == 404) { + task.updateStatus(UPDATE, "LoadBalancer ${description.loadBalancerId} does not exist."); + } else { + throw e; + } + } + } else { + create(task); + } + return mapOf( + "loadBalancers", + mapOf( + description.getCredentials().getRegion(), mapOf("name", description.qualifiedName()))); + } + + Map mapOf(String key, Object val) { + Map map = new HashMap<>(); + map.put(key, val); + return map; + } +} diff --git a/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/UpsertLoadBalancerDescriptionValidator.java b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/UpsertLoadBalancerDescriptionValidator.java new file mode 100644 index 00000000000..2f2a2adb7f4 --- /dev/null +++ b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/UpsertLoadBalancerDescriptionValidator.java @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.validator; + +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.oracle.OracleOperation; +import com.netflix.spinnaker.clouddriver.oracle.deploy.description.UpsertLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.springframework.stereotype.Component; + +@OracleOperation(AtomicOperations.UPSERT_LOAD_BALANCER) +@Component("upsertLoadBalancerDescriptionValidator") +class UpsertLoadBalancerDescriptionValidator + extends StandardOracleAttributeValidator { + + Set validShapes = Stream.of("100Mbps", "400Mbps", "8000Mbps").collect(Collectors.toSet()); + + @SuppressWarnings("rawtypes") + @Override + public void validate( + List priorDescriptions, UpsertLoadBalancerDescription description, ValidationErrors errors) { + context = "upsertLoadBalancerDescriptionValidator"; + validateNotEmptyString(errors, description.getApplication(), "application"); + if (description.getLoadBalancerId() == null) { + validateNotEmptyString(errors, description.getShape(), "shape"); + if (!validShapes.contains(description.getShape())) { + errors.rejectValue("${context}.shape", "${context}.shape.invalidLoadBalancerShape"); + } + if (!description.getIsPrivate() && description.getSubnetIds().size() <= 1) { + Map type = description.getSubnetTypeMap(); + + if (description.getSubnetIds().size() == 1) { + if (type.containsKey(description.getSubnetIds().get(0)) + && type.get(description.getSubnetIds().get(0)).compareTo("Regional") != 0) { + errors.rejectValue( + "${context}.CreateServerGroupAtomicOperation", + "${context}.subnetIds.publicLoadBalancerRequiresTwoSubnets"); + } + } else { + errors.rejectValue( + "${context}.CreateServerGroupAtomicOperation", + "${context}.subnetIds.publicLoadBalancerRequiresTwoSubnets"); + } + } + } + if (description.getCertificates() != null) { + description + .getCertificates() + .forEach( + (name, certificate) -> { + // existing cert sends only the certificateName + validateNotEmptyString( + errors, certificate.getCertificateName(), "certificate.certificateName"); + if (certificate.getPublicCertificate() != null) { + validateNotEmptyString( + errors, certificate.getPrivateKey(), "certificate.privateKey"); + validateNotEmptyString( + errors, certificate.getPublicCertificate(), "certificate.publicCertificate"); + } + }); + } + if (description.getBackendSets() != null) { + description + .getBackendSets() + .forEach( + (name, backendSet) -> { + validateLimit(errors, name, 32, "backendSet.name"); + validateNotNull(errors, backendSet.getHealthChecker(), "backendSet.healthChecker"); + validateNotEmptyString(errors, backendSet.getPolicy(), "backendSet.policy"); + if (backendSet.getHealthChecker() != null) { + validateNotEmptyString( + errors, + backendSet.getHealthChecker().getProtocol(), + "backendSet.healthChecker.protocol"); + validateNotNull( + errors, + backendSet.getHealthChecker().getPort(), + "backendSet.healthChecker.port"); + validateNotEmptyString( + errors, + backendSet.getHealthChecker().getUrlPath(), + "backendSet.healthChecker.urlPath"); + } + }); + } + if (description.getListeners() != null) { + description + .getListeners() + .forEach( + (name, listener) -> { + validateNotEmptyString( + errors, listener.getDefaultBackendSetName(), "listener.defaultBackendSetName"); + validateNotEmptyString(errors, listener.getProtocol(), "listener.protocol"); + validateNotNull(errors, listener.getPort(), "listener.port"); + }); + } + } +} diff --git a/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/model/Details.java b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/model/Details.java new file mode 100644 index 00000000000..4390e84c000 --- /dev/null +++ b/clouddriver-oracle/src/main/java/com/netflix/spinnaker/clouddriver/oracle/model/Details.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.model; + +import com.oracle.bmc.loadbalancer.model.Backend; +import com.oracle.bmc.loadbalancer.model.BackendDetails; +import com.oracle.bmc.loadbalancer.model.HealthChecker; +import com.oracle.bmc.loadbalancer.model.HealthCheckerDetails; +import com.oracle.bmc.loadbalancer.model.SSLConfiguration; +import com.oracle.bmc.loadbalancer.model.SSLConfigurationDetails; + +/** Converts model to modelDetails. */ +public class Details { + + public static BackendDetails of(Backend backend) { + return BackendDetails.builder() + .backup(backend.getBackup()) + .drain(backend.getDrain()) + .ipAddress(backend.getIpAddress()) + .offline(backend.getOffline()) + .port(backend.getPort()) + .weight(backend.getWeight()) + .build(); + } + + public static HealthCheckerDetails of(HealthChecker healthChecker) { + return HealthCheckerDetails.builder() + .intervalInMillis(healthChecker.getIntervalInMillis()) + .port(healthChecker.getPort()) + .protocol(healthChecker.getProtocol()) + .responseBodyRegex(healthChecker.getResponseBodyRegex()) + .retries(healthChecker.getRetries()) + .returnCode(healthChecker.getReturnCode()) + .timeoutInMillis(healthChecker.getTimeoutInMillis()) + .urlPath(healthChecker.getUrlPath()) + .build(); + } + + public static SSLConfigurationDetails of(SSLConfiguration sslConfig) { + return SSLConfigurationDetails.builder() + .certificateName(sslConfig.getCertificateName()) + .verifyDepth(sslConfig.getVerifyDepth()) + .verifyPeerCertificate(sslConfig.getVerifyPeerCertificate()) + .build(); + } +} diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/CreateOracleLoadBalancerAtomicOperationConverterUnitSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/CreateOracleLoadBalancerAtomicOperationConverterUnitSpec.groovy deleted file mode 100644 index 03f05ef9a28..00000000000 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/CreateOracleLoadBalancerAtomicOperationConverterUnitSpec.groovy +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2017 Oracle America, Inc. - * - * The contents of this file are subject to the Apache License Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * If a copy of the Apache License Version 2.0 was not distributed with this file, - * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html - */ -package com.netflix.spinnaker.clouddriver.oracle.deploy.converter - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.oracle.deploy.description.CreateLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.oracle.deploy.op.CreateOracleLoadBalancerAtomicOperation -import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import spock.lang.Shared -import spock.lang.Specification - -class CreateOracleLoadBalancerAtomicOperationConverterUnitSpec extends Specification { - - @Shared - ObjectMapper mapper = new ObjectMapper() - - @Shared - CreateOracleLoadBalancerAtomicOperationConverter converter - - def setupSpec() { - this.converter = new CreateOracleLoadBalancerAtomicOperationConverter(objectMapper: mapper) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) - def mockCredentials = Mock(OracleNamedAccountCredentials) - accountCredentialsProvider.getCredentials(_) >> mockCredentials - converter.accountCredentialsProvider = accountCredentialsProvider - } - - def "return correct description and operation"() { - setup: - def input = [application: "foo", - region : "us-phoenix-1", - accountName: "my-oracle" + - "-acc", - stack : "bar", - shape : "100Mbps", - policy : "ROUND_ROBIN", - subnetIds : ["1", "2"], - listener : [ - port : 80, - protocol: "tcp" - ], - healthCheck: [ - protocol : "http", - port : 8080, - interval : 10, - retries : 5, - timeout : 5, - url : "/healthz", - statusCode : 200, - responseBodyRegex: ".*GOOD.*" - ]] - - when: - def description = converter.convertDescription(input) - - then: - description instanceof CreateLoadBalancerDescription - - when: - def operation = converter.convertOperation(input) - - then: - operation instanceof CreateOracleLoadBalancerAtomicOperation - } -} diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/UpsertOracleLoadBalancerAtomicOperationConverterUnitSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/UpsertOracleLoadBalancerAtomicOperationConverterUnitSpec.groovy new file mode 100644 index 00000000000..6494b90f159 --- /dev/null +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/converter/UpsertOracleLoadBalancerAtomicOperationConverterUnitSpec.groovy @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017 Oracle America, Inc. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.converter + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.oracle.deploy.description.UpsertLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.oracle.deploy.op.UpsertOracleLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import spock.lang.Shared +import spock.lang.Specification + +class UpsertOracleLoadBalancerAtomicOperationConverterUnitSpec extends Specification { + + @Shared + ObjectMapper mapper = new ObjectMapper() + + @Shared + UpsertOracleLoadBalancerAtomicOperationConverter converter + + def setupSpec() { + this.converter = new UpsertOracleLoadBalancerAtomicOperationConverter(objectMapper: mapper) + def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def mockCredentials = Mock(OracleNamedAccountCredentials) + accountCredentialsProvider.getCredentials(_) >> mockCredentials + converter.accountCredentialsProvider = accountCredentialsProvider + } + + def "return correct description and operation"() { + setup: + def input = [application: "foo", + region : "us-phoenix-1", + accountName: "my-oracle" + + "-acc", + stack : "bar", + shape : "100Mbps", + policy : "ROUND_ROBIN", + subnetIds : ["1", "2"], + listener : [ + port : 80, + protocol: "tcp" + ], + healthCheck: [ + protocol : "http", + port : 8080, + interval : 10, + retries : 5, + timeout : 5, + url : "/healthz", + statusCode : 200, + responseBodyRegex: ".*GOOD.*" + ]] + + when: + def description = converter.convertDescription(input) + + then: + description instanceof UpsertLoadBalancerDescription + + when: + def operation = converter.convertOperation(input) + + then: + operation instanceof UpsertOracleLoadBalancerAtomicOperation + } +} diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/handler/BasicOracleDeployHandlerSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/handler/BasicOracleDeployHandlerSpec.groovy index f1235ff3c7e..845a00c03a4 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/handler/BasicOracleDeployHandlerSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/handler/BasicOracleDeployHandlerSpec.groovy @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Oracle America, Inc. + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. * * The contents of this file are subject to the Apache License Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ import com.oracle.bmc.loadbalancer.model.Listener import com.oracle.bmc.loadbalancer.model.LoadBalancer import com.oracle.bmc.loadbalancer.responses.CreateBackendSetResponse import com.oracle.bmc.loadbalancer.responses.GetLoadBalancerResponse -import com.oracle.bmc.loadbalancer.responses.UpdateListenerResponse +import com.oracle.bmc.loadbalancer.responses.UpdateBackendSetResponse import spock.lang.Specification class BasicOracleDeployHandlerSpec extends Specification { @@ -47,6 +47,7 @@ class BasicOracleDeployHandlerSpec extends Specification { def "Create server group"() { setup: + def SSHKeys = "ssh-rsa ABC a@b" def creds = Mock(OracleNamedAccountCredentials) creds.compartmentId >> "ocid.compartment.123" TaskRepository.threadLocalTask.set(Mock(Task)) @@ -57,6 +58,8 @@ class BasicOracleDeployHandlerSpec extends Specification { desc.stack = "dev" desc.region = "us-phoenix-1" desc.loadBalancerId = "ocid.lb.oc1..1918273" + desc.sshAuthorizedKeys = SSHKeys + desc.backendSetName = "myBackendSet" def loadBalancerClient = Mock(LoadBalancerClient) creds.loadBalancerClient >> loadBalancerClient def computeClient = Mock(ComputeClient) @@ -81,19 +84,21 @@ class BasicOracleDeployHandlerSpec extends Specification { then: 1 * sgService.listServerGroupNamesByClusterName(_, "foo-dev") >> ["foo-dev-v001"] 1 * sgService.getServerGroup(creds, "foo", "foo-dev-v001") >> sg - 1 * sgService.createServerGroup(_) + 1 * sgService.createServerGroup(_, _) >> { args -> + OracleServerGroup sgArgument = (OracleServerGroup) args[1] + assert sgArgument.launchConfig.get("sshAuthorizedKeys") == SSHKeys + } res != null res.serverGroupNames == ["us-phoenix-1:foo-dev-v002"] 1 * loadBalancerClient.getLoadBalancer(_) >> GetLoadBalancerResponse.builder() .loadBalancer(LoadBalancer.builder() .listeners(["foo-dev": Listener.builder() - .defaultBackendSetName("foo-dev-v001").build()]) - .backendSets(["foo-dev-template": BackendSet.builder() + .defaultBackendSetName("myBackendSet").build()]) + .backendSets(["myBackendSet": BackendSet.builder() .healthChecker(HealthChecker.builder().build()) .build()]).build()).build() - 1 * loadBalancerClient.createBackendSet(_) >> CreateBackendSetResponse.builder().build() - 1 * loadBalancerClient.updateListener(_) >> UpdateListenerResponse.builder().opcWorkRequestId("wr1").build() - 2 * OracleWorkRequestPoller.poll(_, _, _, loadBalancerClient) >> null + 1 * loadBalancerClient.updateBackendSet(_) >> UpdateBackendSetResponse.builder().opcWorkRequestId("wr1").build() + 1 * OracleWorkRequestPoller.poll(_, _, _, loadBalancerClient) >> null 1 * sgProvider.getServerGroup(_, _, _) >> sgViewMock sgViewMock.instanceCounts >> instanceCounts } diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/CreateOracleLoadBalancerAtomicOperationSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/CreateOracleLoadBalancerAtomicOperationSpec.groovy deleted file mode 100644 index 1773a8aa537..00000000000 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/CreateOracleLoadBalancerAtomicOperationSpec.groovy +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2017 Oracle America, Inc. - * - * The contents of this file are subject to the Apache License Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * If a copy of the Apache License Version 2.0 was not distributed with this file, - * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html - */ -package com.netflix.spinnaker.clouddriver.oracle.deploy.op - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller -import com.netflix.spinnaker.clouddriver.oracle.deploy.description.CreateLoadBalancerDescription -import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials -import com.oracle.bmc.loadbalancer.LoadBalancerClient -import com.oracle.bmc.loadbalancer.responses.CreateLoadBalancerResponse -import spock.lang.Specification - -class CreateOracleLoadBalancerAtomicOperationSpec extends Specification { - - def "Create load balancer"() { - setup: - def desc = new CreateLoadBalancerDescription() - desc.application = "foo" - desc.stack = "dev" - def creds = Mock(OracleNamedAccountCredentials) - def loadBalancerClient = Mock(LoadBalancerClient) - creds.loadBalancerClient >> loadBalancerClient - desc.credentials = creds - desc.healthCheck = new CreateLoadBalancerDescription.HealthCheck() - desc.listener = new CreateLoadBalancerDescription.Listener() - GroovySpy(OracleWorkRequestPoller, global: true) - - TaskRepository.threadLocalTask.set(Mock(Task)) - def op = new CreateOracleLoadBalancerAtomicOperation(desc) - - when: - op.operate(null) - - then: - - 1 * loadBalancerClient.createLoadBalancer(_) >> CreateLoadBalancerResponse.builder().opcWorkRequestId("wr1").build() - 1 * OracleWorkRequestPoller.poll("wr1", _, _, loadBalancerClient) >> null - } - -} diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DestroyOracleServerGroupAtomicOperationSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DestroyOracleServerGroupAtomicOperationSpec.groovy index 71dc98dc108..f8d5b3769b0 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DestroyOracleServerGroupAtomicOperationSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DestroyOracleServerGroupAtomicOperationSpec.groovy @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Oracle America, Inc. + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. * * The contents of this file are subject to the Apache License Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -12,11 +12,15 @@ import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller import com.netflix.spinnaker.clouddriver.oracle.deploy.description.DestroyOracleServerGroupDescription +import com.netflix.spinnaker.clouddriver.oracle.model.OracleInstance import com.netflix.spinnaker.clouddriver.oracle.model.OracleServerGroup import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials import com.netflix.spinnaker.clouddriver.oracle.service.servergroup.OracleServerGroupService import com.oracle.bmc.loadbalancer.LoadBalancerClient -import com.oracle.bmc.loadbalancer.responses.DeleteBackendSetResponse +import com.oracle.bmc.loadbalancer.model.* +import com.oracle.bmc.loadbalancer.requests.UpdateBackendSetRequest +import com.oracle.bmc.loadbalancer.responses.GetLoadBalancerResponse +import com.oracle.bmc.loadbalancer.responses.UpdateBackendSetResponse import spock.lang.Specification class DestroyOracleServerGroupAtomicOperationSpec extends Specification { @@ -34,15 +38,28 @@ class DestroyOracleServerGroupAtomicOperationSpec extends Specification { DestroyOracleServerGroupAtomicOperation op = new DestroyOracleServerGroupAtomicOperation(destroyDesc) op.oracleServerGroupService = sgService GroovySpy(OracleWorkRequestPoller, global: true) - + def backends = ['10.1.20.1', '10.1.20.2', '10.1.20.3','10.1.20.4'] + def srvGroup = ['10.1.20.2', '10.1.20.4'] //to be destroyed when: op.operate(null) then: + 1 * loadBalancerClient.getLoadBalancer(_) >> GetLoadBalancerResponse.builder().loadBalancer( + LoadBalancer.builder().backendSets(['myBackendSet': BackendSet.builder().backends( + backends.collect { Backend.builder().ipAddress(it).build() } ).build()]).build()).build() + 1 * sgService.getServerGroup(_, _, "sg1") >> + new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345", backendSetName: 'myBackendSet', + instances: srvGroup.collect {new OracleInstance(id: it, privateIp: it)} as Set) 1 * sgService.destroyServerGroup(_, _, "sg1") - 1 * sgService.getServerGroup(_, _, "sg1") >> new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345") - 1 * loadBalancerClient.deleteBackendSet(_) >> DeleteBackendSetResponse.builder().opcWorkRequestId("wr1").build() + 1 * loadBalancerClient.updateBackendSet(_) >> { args -> + UpdateBackendSetRequest req = (UpdateBackendSetRequest) args[0] + def updatedBackendSet = req.updateBackendSetDetails.backends.collect {it.ipAddress} + assert updatedBackendSet.size() == 2 + assert updatedBackendSet.contains('10.1.20.1') + assert updatedBackendSet.contains('10.1.20.3') + UpdateBackendSetResponse.builder().opcWorkRequestId("wr1").build() + } 1 * OracleWorkRequestPoller.poll("wr1", _, _, loadBalancerClient) >> null } } diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DisableOracleServerGroupAtomicOperationSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DisableOracleServerGroupAtomicOperationSpec.groovy index 7711f46b30c..f2f8b582835 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DisableOracleServerGroupAtomicOperationSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/DisableOracleServerGroupAtomicOperationSpec.groovy @@ -47,9 +47,5 @@ class DisableOracleServerGroupAtomicOperationSpec extends Specification { then: 1 * sgService.disableServerGroup(_, _, "sg1") - 1 * sgService.getServerGroup(_, _, "sg1") >> new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345") - 1 * loadBalancerClient.getLoadBalancer(_) >> GetLoadBalancerResponse.builder().loadBalancer(LoadBalancer.builder().listeners(["sg1": Listener.builder().name("sg1").build()]).build()).build() - 1 * loadBalancerClient.updateListener(_) >> UpdateListenerResponse.builder().opcWorkRequestId("wr1").build() - 1 * OracleWorkRequestPoller.poll("wr1", _, _, loadBalancerClient) >> null } } diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/EnableOracleServerGroupAtomicOperationSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/EnableOracleServerGroupAtomicOperationSpec.groovy index b3798cfe01c..9164b634af4 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/EnableOracleServerGroupAtomicOperationSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/EnableOracleServerGroupAtomicOperationSpec.groovy @@ -46,12 +46,5 @@ class EnableOracleServerGroupAtomicOperationSpec extends Specification { then: 1 * sgService.enableServerGroup(_, _, "sg1") - 1 * sgService.getServerGroup(_, _, "sg1") >> new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345") - 1 * loadBalancerClient.getLoadBalancer(_) >> GetLoadBalancerResponse.builder() - .loadBalancer(LoadBalancer.builder() - .listeners(["sg1": Listener.builder() - .defaultBackendSetName("sg1-old").build()]).build()).build() - 1 * loadBalancerClient.updateListener(_) >> UpdateListenerResponse.builder().opcWorkRequestId("wr1").build() - 1 * OracleWorkRequestPoller.poll("wr1", _, _, loadBalancerClient) >> null } } diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/ResizeOracleServerGroupAtomicOperationSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/ResizeOracleServerGroupAtomicOperationSpec.groovy index 084cdb59e49..acd7ccc3154 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/ResizeOracleServerGroupAtomicOperationSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/ResizeOracleServerGroupAtomicOperationSpec.groovy @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Oracle America, Inc. + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. * * The contents of this file are subject to the Apache License Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,6 +13,7 @@ import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.model.ServerGroup import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller import com.netflix.spinnaker.clouddriver.oracle.deploy.description.ResizeOracleServerGroupDescription +import com.netflix.spinnaker.clouddriver.oracle.model.OracleInstance import com.netflix.spinnaker.clouddriver.oracle.model.OracleServerGroup import com.netflix.spinnaker.clouddriver.oracle.provider.view.OracleClusterProvider import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials @@ -20,57 +21,166 @@ import com.netflix.spinnaker.clouddriver.oracle.service.servergroup.OracleServer import com.oracle.bmc.core.ComputeClient import com.oracle.bmc.core.VirtualNetworkClient import com.oracle.bmc.loadbalancer.LoadBalancerClient +import com.oracle.bmc.loadbalancer.model.Backend import com.oracle.bmc.loadbalancer.model.BackendSet import com.oracle.bmc.loadbalancer.model.HealthChecker import com.oracle.bmc.loadbalancer.model.LoadBalancer +import com.oracle.bmc.loadbalancer.requests.UpdateBackendSetRequest import com.oracle.bmc.loadbalancer.responses.GetLoadBalancerResponse import com.oracle.bmc.loadbalancer.responses.UpdateBackendSetResponse import spock.lang.Specification class ResizeOracleServerGroupAtomicOperationSpec extends Specification { - def "Resize server group"() { + def "resize up serverGroup from 2 to 4"() { setup: - def resizeDesc = new ResizeOracleServerGroupDescription() - resizeDesc.serverGroupName = "sg1" - resizeDesc.capacity = new ServerGroup.Capacity(desired: 3) - def creds = Mock(OracleNamedAccountCredentials) - def loadBalancerClient = Mock(LoadBalancerClient) - creds.loadBalancerClient >> loadBalancerClient - def computeClient = Mock(ComputeClient) - creds.computeClient >> computeClient - def networkClient = Mock(VirtualNetworkClient) - creds.networkClient >> networkClient - resizeDesc.credentials = creds - GroovySpy(OracleWorkRequestPoller, global: true) - + int targetSize = 4 + def resizeDesc = resize('sg1', targetSize) + def creds = resizeDesc.credentials + def loadBalancerClient = creds.loadBalancerClient TaskRepository.threadLocalTask.set(Mock(Task)) def sgService = Mock(OracleServerGroupService) ResizeOracleServerGroupAtomicOperation op = new ResizeOracleServerGroupAtomicOperation(resizeDesc) op.oracleServerGroupService = sgService + def sgProvider = Mock(OracleClusterProvider) + op.clusterProvider = sgProvider + def sgViewMock = Mock(ServerGroup) + def instanceCounts = new ServerGroup.InstanceCounts() + instanceCounts.setUp(targetSize) + instanceCounts.setTotal(targetSize) + def backends = ['10.1.20.1', '10.1.20.2', '10.1.20.3','10.1.20.4'] + def srvGroup = ['10.1.20.2', '10.1.20.4'] + def newGroup = ['10.1.20.2', '10.1.20.4', '10.1.20.5', '10.1.20.6'] + when: + op.operate(null) + then: + 1 * sgService.resizeServerGroup(_, _, "sg1", targetSize) + 2 * sgService.getServerGroup(_, _, "sg1") >> + new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345", backendSetName: 'sg1BackendSet', name: "sg1", credentials: creds, + instances: srvGroup.collect {new OracleInstance(id: it, privateIp: it)} as Set) >> + new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345", backendSetName: 'sg1BackendSet', name: "sg1", credentials: creds, + instances: newGroup.collect {new OracleInstance(id: it, privateIp: it)} as Set) + 1 * loadBalancerClient.getLoadBalancer(_) >> GetLoadBalancerResponse.builder() + .loadBalancer(LoadBalancer.builder() + .backendSets(["sg1BackendSet": BackendSet.builder() + .healthChecker(HealthChecker.builder().build()) + .backends( backends.collect { Backend.builder().ipAddress(it).build() } ) + .build()]).build()).build() + 1 * loadBalancerClient.updateBackendSet(_) >> { args -> + UpdateBackendSetRequest req = (UpdateBackendSetRequest) args[0] + def updatedBackendSet = req.updateBackendSetDetails.backends.collect {it.ipAddress} + assert updatedBackendSet.size() == 6 + assert updatedBackendSet.contains('10.1.20.1') + assert updatedBackendSet.contains('10.1.20.2') + assert updatedBackendSet.contains('10.1.20.5') + assert updatedBackendSet.contains('10.1.20.6') + UpdateBackendSetResponse.builder().opcWorkRequestId("wr1").build() + } + 1 * OracleWorkRequestPoller.poll("wr1", _, _, loadBalancerClient) >> null + 1 * sgProvider.getServerGroup(_, _, _) >> sgViewMock + sgViewMock.instanceCounts >> instanceCounts + } + + def "resize down serverGroup from 3 to 1"() { + setup: + def sgName = 'sgDown' + def resizeDesc = resize(sgName, 1) + def creds = resizeDesc.credentials + def loadBalancerClient = creds.loadBalancerClient + TaskRepository.threadLocalTask.set(Mock(Task)) + def sgService = Mock(OracleServerGroupService) + ResizeOracleServerGroupAtomicOperation op = new ResizeOracleServerGroupAtomicOperation(resizeDesc) + op.oracleServerGroupService = sgService def sgProvider = Mock(OracleClusterProvider) op.clusterProvider = sgProvider def sgViewMock = Mock(ServerGroup) def instanceCounts = new ServerGroup.InstanceCounts() - instanceCounts.setUp(3) - instanceCounts.setTotal(3) + instanceCounts.setUp(1) + instanceCounts.setTotal(1) + def backends = ['10.1.20.1', '10.1.20.2', '10.1.20.3','10.1.20.4', '10.1.20.5', '10.1.20.6'] + def srvGroup = ['10.1.20.2', '10.1.20.4', '10.1.20.6'] + def newGroup = ['10.1.20.4'] when: op.operate(null) then: - 1 * sgService.resizeServerGroup(_, _, "sg1", 3) - 1 * sgService.getServerGroup(_, _, "sg1") >> new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345", name: "sg1", credentials: creds) + 1 * sgService.resizeServerGroup(_, _, sgName, 1) + 2 * sgService.getServerGroup(_, _, sgName) >> + new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345", backendSetName: 'sg1BackendSet', name: sgName, credentials: creds, + instances: srvGroup.collect {new OracleInstance(id: it, privateIp: it)} as Set) >> + new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345", backendSetName: 'sg1BackendSet', name: sgName, credentials: creds, + instances: newGroup.collect {new OracleInstance(id: it, privateIp: it)} as Set) 1 * loadBalancerClient.getLoadBalancer(_) >> GetLoadBalancerResponse.builder() .loadBalancer(LoadBalancer.builder() - .backendSets(["sg1-template": BackendSet.builder() + .backendSets(["sg1BackendSet": BackendSet.builder() .healthChecker(HealthChecker.builder().build()) + .backends( backends.collect { Backend.builder().ipAddress(it).build() } ) .build()]).build()).build() - 1 * loadBalancerClient.updateBackendSet(_) >> UpdateBackendSetResponse.builder().opcWorkRequestId("wr1").build() + 1 * loadBalancerClient.updateBackendSet(_) >> { args -> + UpdateBackendSetRequest req = (UpdateBackendSetRequest) args[0] + def updatedBackendSet = req.updateBackendSetDetails.backends.collect {it.ipAddress} + assert updatedBackendSet.size() == 4 + assert updatedBackendSet.contains('10.1.20.1') + assert updatedBackendSet.contains('10.1.20.3') + assert updatedBackendSet.contains('10.1.20.5') + assert updatedBackendSet.contains('10.1.20.4') + UpdateBackendSetResponse.builder().opcWorkRequestId("wr1").build() + } 1 * OracleWorkRequestPoller.poll("wr1", _, _, loadBalancerClient) >> null 1 * sgProvider.getServerGroup(_, _, _) >> sgViewMock sgViewMock.instanceCounts >> instanceCounts } + + def 'resize same size serverGroup'() { + setup: + int targetSize = 2 + def resizeDesc = resize('sgSame', targetSize) + def creds = resizeDesc.credentials + def loadBalancerClient = creds.loadBalancerClient + + TaskRepository.threadLocalTask.set(Mock(Task)) + def sgService = Mock(OracleServerGroupService) + ResizeOracleServerGroupAtomicOperation op = new ResizeOracleServerGroupAtomicOperation(resizeDesc) + op.oracleServerGroupService = sgService + def sgProvider = Mock(OracleClusterProvider) + op.clusterProvider = sgProvider + def sgViewMock = Mock(ServerGroup) + def instanceCounts = new ServerGroup.InstanceCounts() + instanceCounts.setUp(targetSize) + instanceCounts.setTotal(targetSize) + def srvGroup = ['10.1.20.2', '10.1.20.4'] + + when: + op.operate(null) + + then: + 0 * sgService.resizeServerGroup(_, _, "sgSame", targetSize) + 1 * sgService.getServerGroup(_, _, "sgSame") >> + new OracleServerGroup(loadBalancerId: "ocid.lb.oc1..12345", backendSetName: 'sg1BackendSet', name: "sgSame", credentials: creds, + instances: srvGroup.collect {new OracleInstance(id: it, privateIp: it)} as Set) + 0 * loadBalancerClient.getLoadBalancer(_) + 0 * loadBalancerClient.updateBackendSet(_) + 0 * OracleWorkRequestPoller.poll("wr1", _, _, loadBalancerClient) >> null + 0 * sgProvider.getServerGroup(_, _, _) >> sgViewMock + sgViewMock.instanceCounts >> instanceCounts + } + + ResizeOracleServerGroupDescription resize(String sgName, int targetSize) { + def resizeDesc = new ResizeOracleServerGroupDescription() + resizeDesc.serverGroupName = sgName + resizeDesc.capacity = new ServerGroup.Capacity(desired: targetSize) + def creds = Mock(OracleNamedAccountCredentials) + def loadBalancerClient = Mock(LoadBalancerClient) + creds.loadBalancerClient >> loadBalancerClient + def computeClient = Mock(ComputeClient) + creds.computeClient >> computeClient + def networkClient = Mock(VirtualNetworkClient) + creds.networkClient >> networkClient + resizeDesc.credentials = creds + GroovySpy(OracleWorkRequestPoller, global: true) + return resizeDesc + } } diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/UpsertOracleLoadBalancerAtomicOperationSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/UpsertOracleLoadBalancerAtomicOperationSpec.groovy new file mode 100644 index 00000000000..a45be9bfe85 --- /dev/null +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/op/UpsertOracleLoadBalancerAtomicOperationSpec.groovy @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.op + +import com.fasterxml.jackson.core.type.TypeReference +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller +import com.netflix.spinnaker.clouddriver.oracle.deploy.converter.UpsertOracleLoadBalancerAtomicOperationConverter +import com.netflix.spinnaker.clouddriver.oracle.deploy.description.UpsertLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.oracle.bmc.loadbalancer.LoadBalancerClient +import com.oracle.bmc.loadbalancer.model.Certificate +import com.oracle.bmc.loadbalancer.model.CreateLoadBalancerDetails +import com.oracle.bmc.loadbalancer.model.BackendDetails +import com.oracle.bmc.loadbalancer.model.BackendSet +import com.oracle.bmc.loadbalancer.model.BackendSetDetails +import com.oracle.bmc.loadbalancer.model.CreateBackendSetDetails +import com.oracle.bmc.loadbalancer.model.HealthCheckerDetails +import com.oracle.bmc.loadbalancer.model.Listener +import com.oracle.bmc.loadbalancer.model.ListenerDetails +import com.oracle.bmc.loadbalancer.model.LoadBalancer +import com.oracle.bmc.loadbalancer.model.UpdateBackendSetDetails +import com.oracle.bmc.loadbalancer.model.UpdateListenerDetails +import com.oracle.bmc.loadbalancer.requests.CreateBackendSetRequest +import com.oracle.bmc.loadbalancer.requests.CreateCertificateRequest +import com.oracle.bmc.loadbalancer.requests.CreateListenerRequest +import com.oracle.bmc.loadbalancer.requests.CreateLoadBalancerRequest +import com.oracle.bmc.loadbalancer.requests.DeleteBackendSetRequest +import com.oracle.bmc.loadbalancer.requests.DeleteCertificateRequest +import com.oracle.bmc.loadbalancer.requests.DeleteListenerRequest +import com.oracle.bmc.loadbalancer.requests.UpdateBackendSetRequest +import com.oracle.bmc.loadbalancer.requests.UpdateListenerRequest +import com.oracle.bmc.loadbalancer.responses.CreateBackendSetResponse +import com.oracle.bmc.loadbalancer.responses.CreateCertificateResponse +import com.oracle.bmc.loadbalancer.responses.CreateListenerResponse +import com.oracle.bmc.loadbalancer.responses.CreateLoadBalancerResponse +import com.oracle.bmc.loadbalancer.responses.DeleteBackendSetResponse +import com.oracle.bmc.loadbalancer.responses.DeleteCertificateResponse +import com.oracle.bmc.loadbalancer.responses.DeleteListenerResponse +import com.oracle.bmc.loadbalancer.responses.GetLoadBalancerResponse +import com.oracle.bmc.loadbalancer.responses.UpdateBackendSetResponse +import com.oracle.bmc.loadbalancer.responses.UpdateListenerResponse +import spock.lang.Shared +import spock.lang.Specification + +class UpsertOracleLoadBalancerAtomicOperationSpec extends Specification { + + @Shared + ObjectMapper mapper = new ObjectMapper() + + @Shared + UpsertOracleLoadBalancerAtomicOperationConverter converter + + def setupSpec() { + this.converter = new UpsertOracleLoadBalancerAtomicOperationConverter(objectMapper: mapper) + converter.accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter.accountCredentialsProvider.getCredentials(_) >> Mock(OracleNamedAccountCredentials) + } + + def "Create LoadBalancer"() { + setup: + def req = read('createLoadBalancer1.json') + def desc = converter.convertDescription(req[0].upsertLoadBalancer) + + def creds = Mock(OracleNamedAccountCredentials) + def loadBalancerClient = Mock(LoadBalancerClient) + creds.loadBalancerClient >> loadBalancerClient + desc.credentials = creds + + OracleWorkRequestPoller.poller = Mock(OracleWorkRequestPoller) + + TaskRepository.threadLocalTask.set(Mock(Task)) + def op = new UpsertOracleLoadBalancerAtomicOperation(desc) + + when: + op.operate(null) + + then: + + 1 * loadBalancerClient.createLoadBalancer(_) >> { args -> + CreateLoadBalancerDetails lb = args[0].getCreateLoadBalancerDetails() + def listener = lb.listeners.get('HTTP_80') + assert lb.getIsPrivate() + assert lb.getShapeName() == '400Mbps' + assert lb.listeners.size() == 1 + assert listener.port == 80 + assert listener.protocol == 'HTTP' + assert listener.defaultBackendSetName == 'backendSet1' + assert lb.backendSets.size() == 1 + assert lb.backendSets.backendSet1.policy == 'ROUND_ROBIN' + assert lb.backendSets.backendSet1.healthChecker.port == 80 + assert lb.backendSets.backendSet1.healthChecker.protocol == 'HTTP' + assert lb.backendSets.backendSet1.healthChecker.urlPath == '/healthZ' + CreateLoadBalancerResponse.builder().opcWorkRequestId("wr1").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr1", _, _, loadBalancerClient) >> null + } + + def "Create LoadBalancer with 2 Listeners"() { + setup: + def req = read('createLoadBalancer2.json') + def desc = converter.convertDescription(req[0].upsertLoadBalancer) + + def creds = Mock(OracleNamedAccountCredentials) + def loadBalancerClient = Mock(LoadBalancerClient) + creds.loadBalancerClient >> loadBalancerClient + desc.credentials = creds + + OracleWorkRequestPoller.poller = Mock(OracleWorkRequestPoller) + + TaskRepository.threadLocalTask.set(Mock(Task)) + def op = new UpsertOracleLoadBalancerAtomicOperation(desc) + + when: + op.operate(null) + + then: + + 1 * loadBalancerClient.createLoadBalancer(_) >> { args -> + CreateLoadBalancerDetails lb = args[0].getCreateLoadBalancerDetails() + assert lb.getIsPrivate() + assert lb.listeners.size() == 2 + assert lb.listeners.httpListener.port == 8080 + assert lb.listeners.httpListener.protocol == 'HTTP' + assert lb.listeners.httpsListener.port == 8081 + assert lb.listeners.httpsListener.protocol == 'HTTPS' + assert lb.backendSets.size() == 1 + assert lb.backendSets.myBackendSet.policy == 'ROUND_ROBIN' + assert lb.backendSets.myBackendSet.healthChecker.port == 80 + CreateLoadBalancerResponse.builder().opcWorkRequestId("wr1").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr1", _, _, loadBalancerClient) >> null + } + + def "Update LoadBalancer with BackendSets"() { + setup: + def loadBalancerId = 'updateLoadBalancerBackendSets'; + def req = read('updateLoadBalancerBackendSets.json') + def desc = converter.convertDescription(req[0].upsertLoadBalancer) + + def creds = Mock(OracleNamedAccountCredentials) + def loadBalancerClient = Mock(LoadBalancerClient) + creds.loadBalancerClient >> loadBalancerClient + desc.credentials = creds + + OracleWorkRequestPoller.poller = Mock(OracleWorkRequestPoller) + + TaskRepository.threadLocalTask.set(Mock(Task)) + def op = new UpsertOracleLoadBalancerAtomicOperation(desc) + def backendSets = [ + // to be removed + 'myBackendSet0': BackendSet.builder().name('myBackendSet0').backends([]).build(), + // to be updated + 'myBackendSet1': BackendSet.builder().name('myBackendSet1').backends([]).build(), + ] + + when: + op.operate(null) + + then: + 1 * loadBalancerClient.getLoadBalancer(_) >> + GetLoadBalancerResponse.builder().loadBalancer(LoadBalancer.builder().id(loadBalancerId).backendSets(backendSets).build()).build() + 1 * loadBalancerClient.deleteBackendSet(_) >> { args -> + DeleteBackendSetRequest delBksReq = args[0] + assert delBksReq.getLoadBalancerId() == loadBalancerId + assert delBksReq.getBackendSetName() == 'myBackendSet0' + DeleteBackendSetResponse.builder().opcWorkRequestId("wr0").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr0", _, _, loadBalancerClient) >> null + 1 * loadBalancerClient.updateBackendSet(_) >> { args -> + UpdateBackendSetRequest upBksReq = args[0] + assert upBksReq.getLoadBalancerId() == loadBalancerId + assert upBksReq.getBackendSetName() == 'myBackendSet1' + UpdateBackendSetResponse.builder().opcWorkRequestId("wr1").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr1", _, _, loadBalancerClient) >> null + 1 * loadBalancerClient.createBackendSet(_) >> { args -> + CreateBackendSetRequest crBksReq = args[0] + assert crBksReq.getLoadBalancerId() == loadBalancerId + assert crBksReq.getCreateBackendSetDetails().getName() == 'myBackendSet2' + CreateBackendSetResponse.builder().opcWorkRequestId("wr2").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr2", _, _, loadBalancerClient) >> null + } + + def "Update LoadBalancer with Certificates"() { + setup: + def loadBalancerId = 'updateLoadBalancerCerts'; + def req = read('updateLoadBalancerCerts.json') + def desc = converter.convertDescription(req[0].upsertLoadBalancer) + + def creds = Mock(OracleNamedAccountCredentials) + def loadBalancerClient = Mock(LoadBalancerClient) + creds.loadBalancerClient >> loadBalancerClient + desc.credentials = creds + + OracleWorkRequestPoller.poller = Mock(OracleWorkRequestPoller) + + TaskRepository.threadLocalTask.set(Mock(Task)) + def op = new UpsertOracleLoadBalancerAtomicOperation(desc) + def certs = [ + // to be removed + 'cert0': Certificate.builder().certificateName('cert0').publicCertificate("cert0_pub").build(), + // to keep + 'cert1': Certificate.builder().certificateName('cert1').publicCertificate("cert1_pub").build(), + ] + + when: + op.operate(null) + + then: + 1 * loadBalancerClient.getLoadBalancer(_) >> + GetLoadBalancerResponse.builder().loadBalancer(LoadBalancer.builder().id(loadBalancerId).certificates(certs).build()).build() + 1 * loadBalancerClient.deleteCertificate(_) >> { args -> + DeleteCertificateRequest delCert = args[0] + assert delCert.getLoadBalancerId() == loadBalancerId + assert delCert.certificateName == 'cert0' + DeleteCertificateResponse.builder().opcWorkRequestId("wr0").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr0", _, _, loadBalancerClient) >> null + 1 * loadBalancerClient.createCertificate(_) >> { args -> + CreateCertificateRequest crCertReq = args[0] + assert crCertReq.getLoadBalancerId() == loadBalancerId + assert crCertReq.getCreateCertificateDetails().certificateName == 'cert2' + CreateCertificateResponse.builder().opcWorkRequestId("wr2").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr2", _, _, loadBalancerClient) >> null + } + + def "Update LoadBalancer with Listeners"() { + setup: + def loadBalancerId = 'updateLoadBalancerListeners'; + def req = read('updateLoadBalancerListeners.json') + def desc = converter.convertDescription(req[0].upsertLoadBalancer) + + def creds = Mock(OracleNamedAccountCredentials) + def loadBalancerClient = Mock(LoadBalancerClient) + creds.loadBalancerClient >> loadBalancerClient + desc.credentials = creds + + OracleWorkRequestPoller.poller = Mock(OracleWorkRequestPoller) + + TaskRepository.threadLocalTask.set(Mock(Task)) + def op = new UpsertOracleLoadBalancerAtomicOperation(desc) + def listeners = [ + // to be removed + 'httpListener0': Listener.builder().name('httpListener0').protocol('HTTP').port(80).build(), + // to be updated + 'httpListener1': Listener.builder().name('httpListener1').protocol('HTTP').port(81).build(), + ] + + when: + op.operate(null) + + then: + 1 * loadBalancerClient.getLoadBalancer(_) >> + GetLoadBalancerResponse.builder().loadBalancer(LoadBalancer.builder().id(loadBalancerId) + .listeners(listeners).build()).build() + 1 * loadBalancerClient.deleteListener(_) >> { args -> + DeleteListenerRequest dlLis = args[0] + assert dlLis.getLoadBalancerId() == loadBalancerId + assert dlLis.listenerName == 'httpListener0' + DeleteListenerResponse.builder().opcWorkRequestId("wr0").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr0", _, _, loadBalancerClient) >> null + 1 * loadBalancerClient.updateListener(_) >> { args -> + UpdateListenerRequest upLis = args[0] + assert upLis.getLoadBalancerId() == loadBalancerId + assert upLis.listenerName == 'httpListener1' + assert upLis.updateListenerDetails.port == 8081 + UpdateListenerResponse.builder().opcWorkRequestId("wr1").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr1", _, _, loadBalancerClient) >> null + 1 * loadBalancerClient.createListener(_) >> { args -> + CreateListenerRequest crLis = args[0] + assert crLis.getLoadBalancerId() == loadBalancerId + assert crLis.createListenerDetails.name == 'httpsListener' + assert crLis.createListenerDetails.port == 8082 + CreateListenerResponse.builder().opcWorkRequestId("wr2").build() + } + 1 * OracleWorkRequestPoller.poller.wait("wr2", _, _, loadBalancerClient) >> null + } + + def read(String fileName) { + def json = new File(getClass().getResource('/desc/' + fileName).toURI()).text + List> data = mapper.readValue(json, new TypeReference>>(){}); + return data; + } +} diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/BasicOracleDeployDescriptionValidatorSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/BasicOracleDeployDescriptionValidatorSpec.groovy index 58edc8af632..cfcd0b322e0 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/BasicOracleDeployDescriptionValidatorSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/BasicOracleDeployDescriptionValidatorSpec.groovy @@ -9,9 +9,9 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.model.ServerGroup import com.netflix.spinnaker.clouddriver.oracle.deploy.description.BasicOracleDeployDescription -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Specification @@ -26,7 +26,7 @@ class BasicOracleDeployDescriptionValidatorSpec extends Specification { void "invalid description fails validation"() { setup: def description = new BasicOracleDeployDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -49,7 +49,7 @@ class BasicOracleDeployDescriptionValidatorSpec extends Specification { application: "spinnaker-test-v000" ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -70,7 +70,7 @@ class BasicOracleDeployDescriptionValidatorSpec extends Specification { capacity: new ServerGroup.Capacity(min: 3, max: 1) ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -90,7 +90,7 @@ class BasicOracleDeployDescriptionValidatorSpec extends Specification { targetSize: -1 ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/DestroyOracleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/DestroyOracleServerGroupDescriptionValidatorSpec.groovy index ae4a185174a..d5abbfc25c5 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/DestroyOracleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/DestroyOracleServerGroupDescriptionValidatorSpec.groovy @@ -8,8 +8,8 @@ */ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.oracle.deploy.description.DestroyOracleServerGroupDescription -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Specification @@ -25,7 +25,7 @@ class DestroyOracleServerGroupDescriptionValidatorSpec extends Specification { void "invalid description fails validation"() { setup: def description = new DestroyOracleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -44,7 +44,7 @@ class DestroyOracleServerGroupDescriptionValidatorSpec extends Specification { serverGroupName: "my-group-01" ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/EnableDisableOracleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/EnableDisableOracleServerGroupDescriptionValidatorSpec.groovy index 89fa8e72a41..5b584423fd8 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/EnableDisableOracleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/EnableDisableOracleServerGroupDescriptionValidatorSpec.groovy @@ -8,8 +8,8 @@ */ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.oracle.deploy.description.EnableDisableOracleServerGroupDescription -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Specification @@ -25,7 +25,7 @@ class EnableDisableOracleServerGroupDescriptionValidatorSpec extends Specificati void "invalid description fails validation"() { setup: def description = new EnableDisableOracleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -41,7 +41,7 @@ class EnableDisableOracleServerGroupDescriptionValidatorSpec extends Specificati region: "us-phoenix-1", accountName: "DEFAULT" ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/ResizeOracleServerGroupDescriptionValidatorSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/ResizeOracleServerGroupDescriptionValidatorSpec.groovy index c0770512e03..2818227624b 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/ResizeOracleServerGroupDescriptionValidatorSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/ResizeOracleServerGroupDescriptionValidatorSpec.groovy @@ -8,8 +8,8 @@ */ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.oracle.deploy.description.ResizeOracleServerGroupDescription -import org.springframework.validation.Errors import spock.lang.Shared import spock.lang.Specification @@ -24,7 +24,7 @@ class ResizeOracleServerGroupDescriptionValidatorSpec extends Specification { void "invalid description fails validation"() { setup: def description = new ResizeOracleServerGroupDescription() - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) @@ -43,7 +43,7 @@ class ResizeOracleServerGroupDescriptionValidatorSpec extends Specification { accountName: "DEFAULT" ) - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) when: validator.validate([], description, errors) diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/StandardOracleAttributeValidatorSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/StandardOracleAttributeValidatorSpec.groovy index 1ab323eab1e..21d4f66a761 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/StandardOracleAttributeValidatorSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/StandardOracleAttributeValidatorSpec.groovy @@ -9,21 +9,21 @@ package com.netflix.spinnaker.clouddriver.oracle.deploy.validator -import org.springframework.validation.Errors +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import spock.lang.Specification class StandardOracleAttributeValidatorSpec extends Specification { void "validateNotEmptyString ok"() { setup: - def errors = Mock(Errors) + def errors = Mock(ValidationErrors) def validator = new StandardOracleAttributeValidator() { @Override - void validate(List priorDescriptions, def description, Errors err) { + void validate(List priorDescriptions, def description, ValidationErrors err) { context = "standardOracleAttributeValidator" } } - + when: validator.validateNotEmptyString(errors, "DEFAULT", "accountName") then: diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/UpsertLoadBalancerDescriptionValidatorSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/UpsertLoadBalancerDescriptionValidatorSpec.groovy new file mode 100644 index 00000000000..ff041bad341 --- /dev/null +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/deploy/validator/UpsertLoadBalancerDescriptionValidatorSpec.groovy @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved. + * + * The contents of this file are subject to the Apache License Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * If a copy of the Apache License Version 2.0 was not distributed with this file, + * You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.html + */ +package com.netflix.spinnaker.clouddriver.oracle.deploy.validator + +import com.fasterxml.jackson.core.type.TypeReference +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.oracle.deploy.converter.UpsertOracleLoadBalancerAtomicOperationConverter +import com.netflix.spinnaker.clouddriver.oracle.deploy.description.UpsertLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.oracle.deploy.op.UpsertOracleLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.oracle.deploy.OracleWorkRequestPoller +import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.oracle.bmc.loadbalancer.LoadBalancerClient +import com.oracle.bmc.loadbalancer.model.CreateLoadBalancerDetails +import com.oracle.bmc.loadbalancer.requests.CreateLoadBalancerRequest +import com.oracle.bmc.loadbalancer.responses.CreateLoadBalancerResponse +import spock.lang.Shared +import spock.lang.Specification + +class UpsertLoadBalancerDescriptionValidatorSpec extends Specification { + + @Shared ObjectMapper mapper = new ObjectMapper() + @Shared UpsertOracleLoadBalancerAtomicOperationConverter converter + @Shared UpsertLoadBalancerDescriptionValidator validator + @Shared String context = 'upsertLoadBalancerDescriptionValidator.' + + + def setupSpec() { + this.converter = new UpsertOracleLoadBalancerAtomicOperationConverter(objectMapper: mapper) + converter.accountCredentialsProvider = Mock(AccountCredentialsProvider) + converter.accountCredentialsProvider.getCredentials(_) >> Mock(OracleNamedAccountCredentials) + validator = new UpsertLoadBalancerDescriptionValidator() + } + + def "Create LoadBalancer with invalid Cert"() { + setup: + def req = read('createLoadBalancer_invalidCert.json') + def desc = converter.convertDescription(req[0].upsertLoadBalancer) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], desc, errors) + + then: + 2 * errors.rejectValue('certificate.privateKey', + context + 'certificate.privateKey.empty') + 2 * errors.rejectValue('certificate.certificateName', + context + 'certificate.certificateName.empty') + 1 * errors.rejectValue('certificate.publicCertificate', + context + 'certificate.publicCertificate.empty') + } + + def "Create LoadBalancer with invalid Listener"() { + setup: + def req = read('createLoadBalancer_invalidListener.json') + def desc = converter.convertDescription(req[0].upsertLoadBalancer) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], desc, errors) + + then: + 3 * errors.rejectValue('listener.defaultBackendSetName', + context + 'listener.defaultBackendSetName.empty') + 2 * errors.rejectValue('listener.protocol', + context + 'listener.protocol.empty') + 1 * errors.rejectValue('listener.port', + context + 'listener.port.null') + } + + def "Create LoadBalancer with invalid BackendSet"() { + setup: + def req = read('createLoadBalancer_invalidBackendSet.json') + def desc = converter.convertDescription(req[0].upsertLoadBalancer) + def errors = Mock(ValidationErrors) + + when: + validator.validate([], desc, errors) + + then: + 2 * errors.rejectValue('backendSet.name', + context + 'backendSet.name.exceedsLimit') + 1 * errors.rejectValue('backendSet.healthChecker', + context + 'backendSet.healthChecker.null') + 1 * errors.rejectValue('backendSet.policy', + context + 'backendSet.policy.empty') + 1 * errors.rejectValue('backendSet.healthChecker.protocol', + context + 'backendSet.healthChecker.protocol.empty') + 1 * errors.rejectValue('backendSet.healthChecker.urlPath', + context + 'backendSet.healthChecker.urlPath.empty') + 1 * errors.rejectValue('backendSet.healthChecker.port', + context + 'backendSet.healthChecker.port.null') + } + + def read(String fileName) { + def json = new File(getClass().getResource('/desc/' + fileName).toURI()).text + List> data = mapper.readValue(json, new TypeReference>>(){}); + return data; + } +} diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleSecurityGroupProviderSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleSecurityGroupProviderSpec.groovy index abafca1aa9a..05348240d7f 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleSecurityGroupProviderSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/provider/view/OracleSecurityGroupProviderSpec.groovy @@ -22,7 +22,6 @@ import com.oracle.bmc.core.model.SecurityList import com.oracle.bmc.core.model.TcpOptions import spock.lang.Specification -@spock.lang.Ignore("pass on local runs, failed on travisCI.") class OracleSecurityGroupProviderSpec extends Specification { ObjectMapper objectMapper = new ObjectMapper().setFilterProvider(new SimpleFilterProvider().setFailOnUnknownId(false)) diff --git a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/DefaultOracleServerGroupServiceSpec.groovy b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/DefaultOracleServerGroupServiceSpec.groovy index 11d2aba97ac..de711b10c63 100644 --- a/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/DefaultOracleServerGroupServiceSpec.groovy +++ b/clouddriver-oracle/src/test/groovy/com/netflix/spinnaker/clouddriver/oracle/service/servergroup/DefaultOracleServerGroupServiceSpec.groovy @@ -15,6 +15,7 @@ import com.netflix.spinnaker.clouddriver.oracle.security.OracleNamedAccountCrede import com.oracle.bmc.Region import com.oracle.bmc.core.ComputeClient import com.oracle.bmc.core.model.Instance +import com.oracle.bmc.core.requests.LaunchInstanceRequest import com.oracle.bmc.core.responses.LaunchInstanceResponse import spock.lang.Specification @@ -22,11 +23,13 @@ class DefaultOracleServerGroupServiceSpec extends Specification { def "create server group"() { setup: + def SSHKeys = "ssh-rsa ABC a@b" def creds = Mock(OracleNamedAccountCredentials) creds.getName() >> "foo" creds.getRegion() >> Region.US_PHOENIX_1.regionId creds.getComputeClient() >> Mock(ComputeClient) def persistence = Mock(OracleServerGroupPersistence) + def task = Mock(Task) def sgService = new DefaultOracleServerGroupService(persistence) when: @@ -41,22 +44,64 @@ class DefaultOracleServerGroupServiceSpec extends Specification { "shape" : "small", "vpcId" : "ocid.vcn.123", "subnetId" : "ocid.subnet.123", + "sshAuthorizedKeys" : SSHKeys, "createdTime" : System.currentTimeMillis() ], targetSize: 4, credentials: creds ) - sgService.createServerGroup(sg) + sgService.createServerGroup(task, sg) then: - 4 * creds.computeClient.launchInstance(_) >> LaunchInstanceResponse.builder().instance( - Instance.builder().timeCreated(new Date()).build() - ).build() + 4 * creds.computeClient.launchInstance(_) >> { args -> + LaunchInstanceRequest argumentRequest = (LaunchInstanceRequest) args[0] + assert argumentRequest.getLaunchInstanceDetails().getMetadata().get("ssh_authorized_keys") == SSHKeys + return LaunchInstanceResponse.builder().instance(Instance.builder().timeCreated(new Date()).build()).build() + } 1 * persistence.upsertServerGroup(_) } + + def "create server group over limit"() { + setup: + def creds = Mock(OracleNamedAccountCredentials) + creds.getName() >> "foo" + creds.getRegion() >> Region.US_PHOENIX_1.regionId + creds.getComputeClient() >> Mock(ComputeClient) + def persistence = Mock(OracleServerGroupPersistence) + def task = Mock(Task) + def sgService = new DefaultOracleServerGroupService(persistence) + + when: + def sg = new OracleServerGroup( + name: "sg1", + region: creds.region, + zone: "ad1", + launchConfig: [ + "availabilityDomain": "ad1", + "compartmentId" : "ocid.compartment.123", + "imageId" : "ocid.image.123", + "shape" : "small", + "vpcId" : "ocid.vcn.123", + "subnetId" : "ocid.subnet.123", + "createdTime" : System.currentTimeMillis() + ], + targetSize: 3, + credentials: creds + ) + sgService.createServerGroup(task, sg) + + then: + 3 * creds.computeClient.launchInstance(_) >> launchResponse() >> launchResponse() >> + { throw new com.oracle.bmc.model.BmcException(400, 'LimitExceeded', 'LimitExceeded', 'LimitExceeded') } + 1 * persistence.upsertServerGroup(_) >> { args -> + OracleServerGroup serverGroup = (OracleServerGroup) args[0] + assert serverGroup.instances.size() == 2 + } + } def "resize (increase) server group"() { setup: + def SSHKeys = null def creds = Mock(OracleNamedAccountCredentials) creds.getName() >> "foo" creds.getRegion() >> Region.US_PHOENIX_1.regionId @@ -75,8 +120,12 @@ class DefaultOracleServerGroupServiceSpec extends Specification { "shape" : "small", "vpcId" : "ocid.vcn.123", "subnetId" : "ocid.subnet.123", + "sshAuthorizedKeys" : SSHKeys, "createdTime" : System.currentTimeMillis() ], + instances: [ + new OracleInstance(name: "a") + ], targetSize: 1, credentials: creds ) @@ -85,11 +134,64 @@ class DefaultOracleServerGroupServiceSpec extends Specification { def resized = sgService.resizeServerGroup(task, creds, "sg1", 5) then: - 4 * creds.computeClient.launchInstance(_) >> LaunchInstanceResponse.builder().instance( - Instance.builder().timeCreated(new Date()).build() - ).build() + 4 * creds.computeClient.launchInstance(_) >> { args -> + LaunchInstanceRequest argumentRequest = (LaunchInstanceRequest) args[0] + assert argumentRequest.getLaunchInstanceDetails().getMetadata().get("ssh_authorized_keys") == SSHKeys + return LaunchInstanceResponse.builder().instance(Instance.builder().timeCreated(new Date()).build()).build() + } 1 * persistence.getServerGroupByName(_, "sg1") >> sg - 1 * persistence.upsertServerGroup(_) + 1 * persistence.upsertServerGroup(_) >> { args -> + OracleServerGroup serverGroup = (OracleServerGroup) args[0] + assert serverGroup.instances.size() == 5 + assert serverGroup.targetSize == 5 + } + resized == true + } + + def "resize (increase) server group over limit"() { + setup: + def creds = Mock(OracleNamedAccountCredentials) + creds.getName() >> "foo" + creds.getRegion() >> Region.US_PHOENIX_1.regionId + creds.getComputeClient() >> Mock(ComputeClient) + def task = Mock(Task) + def persistence = Mock(OracleServerGroupPersistence) + def sgService = new DefaultOracleServerGroupService(persistence) + def sg = new OracleServerGroup( + name: "sg1", + region: creds.region, + zone: "ad1", + launchConfig: [ + "availabilityDomain": "ad1", + "compartmentId" : "ocid.compartment.123", + "imageId" : "ocid.image.123", + "shape" : "small", + "vpcId" : "ocid.vcn.123", + "subnetId" : "ocid.subnet.123", + "createdTime" : System.currentTimeMillis() + ], + instances: [ + new OracleInstance(name: "a") + ], + targetSize: 1, + credentials: creds + ) + + when: + def resized = sgService.resizeServerGroup(task, creds, "sg1", 5) + + then: + 4 * creds.computeClient.launchInstance(_) >> + launchResponse() >> + launchResponse() >> + launchResponse() >> + { throw new com.oracle.bmc.model.BmcException(400, 'LimitExceeded', 'LimitExceeded', 'LimitExceeded') } + 1 * persistence.getServerGroupByName(_, "sg1") >> sg + 1 * persistence.upsertServerGroup(_) >> { args -> + OracleServerGroup serverGroup = (OracleServerGroup) args[0] + assert serverGroup.instances.size() == 4 + assert serverGroup.targetSize == 4 + } resized == true } @@ -336,5 +438,9 @@ class DefaultOracleServerGroupServiceSpec extends Specification { 1 * persistence.listServerGroupNames(_) >> ["foo-test-v001", "foo-v002", "foo-edge-v001", "foo-test-v002", "bar-v001"] serverGroups == ["foo-test-v001", "foo-test-v002"] } - + + LaunchInstanceResponse launchResponse() { + LaunchInstanceResponse.builder().instance( + Instance.builder().timeCreated(new Date()).build()).build() + } } diff --git a/clouddriver-oracle/src/test/resources/desc/createLoadBalancer1.json b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer1.json new file mode 100644 index 00000000000..4b623415380 --- /dev/null +++ b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer1.json @@ -0,0 +1,46 @@ +[ + { + "upsertLoadBalancer": { + "name": "deva18app-deck-sep134", + "cloudProvider": "oracle", + "credentials": "myacct", + "region": "us-ashburn-1", + "shape": "400Mbps", + "isPrivate": true, + "subnetIds": [ + "ocid1.subnet.oc1.iad.aaaaaaaath476v3iumd45dyvvzxf6gelwphro2h6ss4mt7lzw3xjqq74hgca" + ], + "listeners": { + "HTTP_80": { + "name": "HTTP_80", + "port": 80, + "protocol": "HTTP", + "defaultBackendSetName": "backendSet1", + "isSsl": false + } + }, + "hostnames": [], + "backendSets": { + "backendSet1": { + "name": "backendSet1", + "policy": "ROUND_ROBIN", + "healthChecker": { + "protocol": "HTTP", + "port": 80, + "urlPath": "/healthZ" + } + } + }, + "freeformTags": {}, + "vpcId": "ocid1.vcn.oc1.iad.aaaaaaaabkdqmjdle6xcqlbnx7gdw3u4d4ra7cvhiddtsjebqllfdmv5arfq", + "stack": "deck", + "detail": "sep134", + "type": "upsertLoadBalancer", + "application": "deva18app", + "loadBalancerName": "deva18app-deck-sep134", + "user": "anonymous", + "refId": "0", + "requisiteStageRefIds": [] + } + } +] diff --git a/clouddriver-oracle/src/test/resources/desc/createLoadBalancer2.json b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer2.json new file mode 100644 index 00000000000..7d14ba4f87c --- /dev/null +++ b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer2.json @@ -0,0 +1,33 @@ +[ + { + "upsertLoadBalancer": { + "application": "chenLBtest", + "stack": "lb", + "shape": "100Mbps", + "selectedProvider": "oracle", + "subnetIds": [ + "ocid1.subnet.oc1.iad.aaaaaaaath476v3iumd45dyvvzxf6gelwphro2h6ss4mt7lzw3xjqq74hgca" + ], + "isPrivate": "true", + "backendSets": { + "myBackendSet": { + "policy": "ROUND_ROBIN", + "healthChecker": { + "protocol": "HTTP", + "port": "80", + "urlPath": "/healthCheck" + } + } + }, + "listeners": { + "httpListener": { "port": "8080", "protocol": "HTTP", "defaultBackendSetName" : "myBackendSet" }, + "httpsListener": { "port": "8081", "protocol": "HTTPS", "defaultBackendSetName" : "myBackendSet" } + }, + "cloudProvider": "oracle", + "region": "us-ashburn-1", + "user": "anonymous", + "account": "myacct", + "credentials": "myacct" + } + } +] diff --git a/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidBackendSet.json b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidBackendSet.json new file mode 100644 index 00000000000..da999585ec0 --- /dev/null +++ b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidBackendSet.json @@ -0,0 +1,39 @@ +[ + { + "upsertLoadBalancer": { + "application": "chenLBtest", + "stack": "lb", + "shape": "100Mbps", + "selectedProvider": "oracle", + "subnetIds": [ + "ocid1.subnet.oc1.iad.aaaaaaaath476v3iumd45dyvvzxf6gelwphro2h6ss4mt7lzw3xjqq74hgca" + ], + "isPrivate": "true", + "backendSets": { + "myBackendSet1______________________________________________LongerThan32Chars!!!!!": { + "policy": "ROUND_ROBIN", + "healthChecker": { + "protocol": "HTTP", + "port": "80", + "urlPath": "/healthCheck" + } + }, + "myBackendSet2______________________________________________LongerThan32Chars!!!!!": { + "policy": "ROUND_ROBIN" + }, + "myBackendSet3": { + "healthChecker": { + } + } + }, + "listeners": { + "httpListener": { "port": "8080", "protocol": "HTTP", "defaultBackendSetName" : "myBackendSet" } + }, + "cloudProvider": "oracle", + "region": "us-ashburn-1", + "user": "anonymous", + "account": "myacct", + "credentials": "myacct" + } + } +] diff --git a/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidCert.json b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidCert.json new file mode 100644 index 00000000000..a5bf1c9d89c --- /dev/null +++ b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidCert.json @@ -0,0 +1,39 @@ +[ + { + "upsertLoadBalancer": { + "application": "chenLBtest", + "stack": "lb", + "shape": "100Mbps", + "selectedProvider": "oracle", + "subnetIds": [ + "ocid1.subnet.oc1.iad.aaaaaaaath476v3iumd45dyvvzxf6gelwphro2h6ss4mt7lzw3xjqq74hgca" + ], + "isPrivate": "true", + "backendSets": { + "myBackendSet": { + "policy": "ROUND_ROBIN", + "healthChecker": { + "protocol": "HTTP", + "port": "80", + "urlPath": "/healthCheck" + } + } + }, + "certificates" : { + "myCert1": { "certificateName": "myCert1" }, + "myCert2": { "privateKey": "myPrivateKey" }, + "myCert3": { "wrong_name": "foo" }, + "myCert4": { "certificateName": "myCert4", "publicCertificate": ""}, + "myCert5": { "certificateName": "myCert5", "publicCertificate": "x"} + }, + "listeners": { + "httpListener": { "port": "8080", "protocol": "HTTP", "defaultBackendSetName" : "myBackendSet" } + }, + "cloudProvider": "oracle", + "region": "us-ashburn-1", + "user": "anonymous", + "account": "myacct", + "credentials": "myacct" + } + } +] diff --git a/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidListener.json b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidListener.json new file mode 100644 index 00000000000..38536617bb0 --- /dev/null +++ b/clouddriver-oracle/src/test/resources/desc/createLoadBalancer_invalidListener.json @@ -0,0 +1,35 @@ +[ + { + "upsertLoadBalancer": { + "application": "chenLBtest", + "stack": "lb", + "shape": "100Mbps", + "selectedProvider": "oracle", + "subnetIds": [ + "ocid1.subnet.oc1.iad.aaaaaaaath476v3iumd45dyvvzxf6gelwphro2h6ss4mt7lzw3xjqq74hgca" + ], + "isPrivate": "true", + "backendSets": { + "myBackendSet": { + "policy": "ROUND_ROBIN", + "healthChecker": { + "protocol": "HTTP", + "port": "80", + "urlPath": "/healthCheck" + } + } + }, + "listeners": { + "httpListener": { "port": "8080", "protocol": "HTTP", "defaultBackendSetName" : "myBackendSet" }, + "invalid1": { "port": "8081", "protocol": "HTTP"}, + "invalid2": { "port": "80"}, + "invalid3": {} + }, + "cloudProvider": "oracle", + "region": "us-ashburn-1", + "user": "anonymous", + "account": "myacct", + "credentials": "myacct" + } + } +] diff --git a/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerBackendSets.json b/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerBackendSets.json new file mode 100644 index 00000000000..92b32690e41 --- /dev/null +++ b/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerBackendSets.json @@ -0,0 +1,38 @@ +[ + { + "upsertLoadBalancer": { + "application": "chenLBtest", + "stack": "lb", + "shape": "100Mbps", + "selectedProvider": "oracle", + "loadBalancerId": "updateLoadBalancerBackendSets", + "subnetIds": [ + "ocid1.subnet.oc1.iad.aaaaaaaath476v3iumd45dyvvzxf6gelwphro2h6ss4mt7lzw3xjqq74hgca" + ], + "isPrivate": "true", + "backendSets": { + "myBackendSet1": { + "policy": "ROUND_ROBIN", + "healthChecker": { + "protocol": "HTTP", + "port": "80", + "urlPath": "/healthCheck" + } + }, + "myBackendSet2": { + "policy": "ROUND_ROBIN", + "healthChecker": { + "protocol": "HTTP", + "port": "80", + "urlPath": "/healthCheck" + } + } + }, + "cloudProvider": "oracle", + "region": "us-ashburn-1", + "user": "anonymous", + "account": "myacct", + "credentials": "myacct" + } + } +] diff --git a/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerCerts.json b/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerCerts.json new file mode 100644 index 00000000000..0b26719e1fa --- /dev/null +++ b/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerCerts.json @@ -0,0 +1,30 @@ +[ + { + "upsertLoadBalancer": { + "application": "chenLBtest", + "stack": "lb", + "shape": "100Mbps", + "selectedProvider": "oracle", + "loadBalancerId": "updateLoadBalancerCerts", + "subnetIds": [ + "ocid1.subnet.oc1.iad.aaaaaaaath476v3iumd45dyvvzxf6gelwphro2h6ss4mt7lzw3xjqq74hgca" + ], + "isPrivate": "true", + "certificates": { + "cert1": { + "certificateName": "cert1" + }, + "cert2": { + "certificateName": "cert2", + "publicCertificate": "-----BEGIN CERTIFICATE-----\nMxxxxjg==\n-----END CERTIFICATE-----\n", + "privateKey": "privateKey" + } + }, + "cloudProvider": "oracle", + "region": "us-ashburn-1", + "user": "anonymous", + "account": "myacct", + "credentials": "myacct" + } + } +] diff --git a/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerListeners.json b/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerListeners.json new file mode 100644 index 00000000000..f25f2af61c6 --- /dev/null +++ b/clouddriver-oracle/src/test/resources/desc/updateLoadBalancerListeners.json @@ -0,0 +1,25 @@ +[ + { + "upsertLoadBalancer": { + "application": "chenLBtest", + "stack": "lb", + "shape": "100Mbps", + "selectedProvider": "oracle", + "loadBalancerId": "updateLoadBalancerListeners", + "subnetIds": [ + "ocid1.subnet.oc1.iad.aaaaaaaath476v3iumd45dyvvzxf6gelwphro2h6ss4mt7lzw3xjqq74hgca" + ], + "isPrivate": "true", + "listeners": { + "httpListener1": { "port": "8081", "protocol": "HTTP", "defaultBackendSetName" : "myBackendSet" }, + "httpsListener": { "port": "8082", "protocol": "HTTPS", "defaultBackendSetName" : "myBackendSet" } + }, + + "cloudProvider": "oracle", + "region": "us-ashburn-1", + "user": "anonymous", + "account": "myacct", + "credentials": "myacct" + } + } +] diff --git a/clouddriver-saga-test/clouddriver-saga-test.gradle b/clouddriver-saga-test/clouddriver-saga-test.gradle new file mode 100644 index 00000000000..d2666a425f2 --- /dev/null +++ b/clouddriver-saga-test/clouddriver-saga-test.gradle @@ -0,0 +1,33 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +dependencies { + api project(":clouddriver-saga") + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "com.fasterxml.jackson.module:jackson-module-kotlin" + + implementation "cglib:cglib-nodep" + implementation "org.objenesis:objenesis" + implementation "org.junit.jupiter:junit-jupiter-api" + implementation "org.springframework:spring-test" + implementation "org.springframework.boot:spring-boot-test" + implementation "org.assertj:assertj-core" + implementation "io.strikt:strikt-core" + implementation "dev.minutest:minutest" + implementation "io.mockk:mockk" +} diff --git a/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/AbstractSagaTest.kt b/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/AbstractSagaTest.kt new file mode 100644 index 00000000000..72ec3a7b5f0 --- /dev/null +++ b/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/AbstractSagaTest.kt @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga + +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository +import dev.minutest.junit.JUnit5Minutests +import io.mockk.every +import io.mockk.mockk +import org.springframework.context.ApplicationContext + +abstract class AbstractSagaTest : JUnit5Minutests { + + protected open inner class BaseSagaFixture(options: FixtureOptions = FixtureOptions()) { + val saga = Saga("test", "test") + + val sagaRepository: SagaRepository + + val applicationContext: ApplicationContext = mockk(relaxed = true) + + val sagaService: SagaService + + init { + if (options.mockSaga) { + sagaRepository = mockk(relaxed = true) + every { sagaRepository.get(eq("test"), eq("test")) } returns saga + } else { + sagaRepository = TestingSagaRepository() + } + if (options.registerDefaultTestTypes) { + registerBeans( + applicationContext, + Action1::class.java, + Action2::class.java, + Action3::class.java, + ShouldBranchPredicate::class.java + ) + } + registerBeans(applicationContext, *options.registerTypes.toTypedArray()) + + sagaService = SagaService(sagaRepository, NoopRegistry()).apply { + setApplicationContext(applicationContext) + } + } + } + + /** + * @param mockSaga Whether or not to use mockk for the [SagaRepository] or the [TestingSagaRepository] + * @param registerDefaultTestTypes Whether or not to register the canned test types for "autowiring" + * @param registerTypes Types to register (additive if [registerDefaultTestTypes] is true) + */ + open inner class FixtureOptions( + val mockSaga: Boolean = false, + val registerDefaultTestTypes: Boolean = true, + val registerTypes: List> = listOf() + ) + + protected fun registerBeans(applicationContext: ApplicationContext, vararg clazz: Class<*>) { + clazz.forEach { + every { applicationContext.getBean(eq(it)) } returns it.newInstance() + } + } +} diff --git a/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/TestingSagaRepository.kt b/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/TestingSagaRepository.kt new file mode 100644 index 00000000000..85730165115 --- /dev/null +++ b/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/TestingSagaRepository.kt @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga + +import com.netflix.spinnaker.clouddriver.event.EventMetadata +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository +import java.util.UUID + +class TestingSagaRepository : SagaRepository { + + private val sagas: MutableMap = mutableMapOf() + + override fun list(criteria: SagaRepository.ListCriteria): List { + return sagas.values.toList() + } + + override fun get(type: String, id: String): Saga? { + return sagas[createId(type, id)] + } + + override fun save(saga: Saga, additionalEvents: List) { + sagas.putIfAbsent(createId(saga), saga) + + val currentSequence = saga.getEvents().map { it.getMetadata().sequence }.maxOrNull() ?: 0 + val originatingVersion = saga.getVersion() + + saga.getPendingEvents() + .plus(additionalEvents) + .forEachIndexed { index, event -> + event.setMetadata( + EventMetadata( + id = UUID.randomUUID().toString(), + aggregateType = saga.name, + aggregateId = saga.id, + sequence = currentSequence + index + 1, + originatingVersion = originatingVersion + ) + ) + saga.addEventForTest(event) + } + } + + private fun createId(saga: Saga): String = createId(saga.name, saga.id) + + private fun createId(sagaName: String, sagaId: String) = "$sagaName/$sagaId" +} diff --git a/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/types.kt b/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/types.kt new file mode 100644 index 00000000000..4c8e9cccfdb --- /dev/null +++ b/clouddriver-saga-test/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/types.kt @@ -0,0 +1,72 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga + +import com.fasterxml.jackson.annotation.JsonTypeName +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import org.springframework.core.Ordered.HIGHEST_PRECEDENCE +import org.springframework.core.annotation.Order + +@JsonTypeName("shouldBranch") +class ShouldBranch : AbstractSagaEvent() + +@JsonTypeName("doAction1") +class DoAction1( + val branch: Boolean = true +) : AbstractSagaEvent(), SagaCommand + +@JsonTypeName("doAction2") +class DoAction2 : AbstractSagaEvent(), SagaCommand + +@JsonTypeName("doAction3") +class DoAction3 : AbstractSagaEvent(), SagaCommand + +class Action1 : SagaAction { + override fun apply(command: DoAction1, saga: Saga): SagaAction.Result { + val events = if (command.branch) listOf(ShouldBranch()) else listOf() + return SagaAction.Result( + ManyCommands( + DoAction2(), + DoAction3() + ), + events + ) + } +} + +@Order(HIGHEST_PRECEDENCE) +class Action2 : SagaAction { + override fun apply(command: DoAction2, saga: Saga): SagaAction.Result { + return SagaAction.Result(null, listOf()) + } +} + +@Order(HIGHEST_PRECEDENCE) +class Action3 : SagaAction { + override fun apply(command: DoAction3, saga: Saga): SagaAction.Result { + return SagaAction.Result(null, listOf()) + } +} + +@Order(HIGHEST_PRECEDENCE) +class ShouldBranchPredicate : SagaFlow.ConditionPredicate { + override fun test(t: Saga): Boolean = + t.getEvents().filterIsInstance().isNotEmpty() + + override val name: String = "shouldBranch" +} diff --git a/clouddriver-saga/clouddriver-saga.gradle b/clouddriver-saga/clouddriver-saga.gradle new file mode 100644 index 00000000000..bb8a726fde6 --- /dev/null +++ b/clouddriver-saga/clouddriver-saga.gradle @@ -0,0 +1,29 @@ +apply from: "$rootDir/gradle/kotlin.gradle" +apply from: "$rootDir/gradle/kotlin-test.gradle" + +dependencies { + api project(":clouddriver-event") + + annotationProcessor "org.springframework.boot:spring-boot-autoconfigure-processor" + + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "com.google.guava:guava" + implementation "com.google.code.findbugs:jsr305" + implementation "org.springframework:spring-web" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "com.fasterxml.jackson.module:jackson-module-kotlin" + implementation "javax.validation:validation-api" + implementation "org.hibernate.validator:hibernate-validator" + + testImplementation project(":clouddriver-saga-test") + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.springframework:spring-test" + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation "org.assertj:assertj-core" + testImplementation "io.strikt:strikt-core" + testImplementation "dev.minutest:minutest" + testImplementation "io.mockk:mockk" +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaService.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaService.kt new file mode 100644 index 00000000000..dc1a106039f --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaService.kt @@ -0,0 +1,199 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaIntegrationException +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaMissingRequiredCommandException +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaNotFoundException +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlowIterator +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository +import com.netflix.spinnaker.kork.annotations.Beta +import com.netflix.spinnaker.kork.exceptions.SpinnakerException +import org.slf4j.LoggerFactory +import org.springframework.context.ApplicationContext +import org.springframework.context.ApplicationContextAware + +/** + * The main brains of the Saga library. Orchestrates the progression of a [Saga] until its completion. + * + * A [Saga] is a way of performing orchestrated distributed service transactions, and in the case of this library, + * is implemented through the a series of log-backed "actions". A [SagaAction] is a reentrant and idempotent function + * that changes a remote system. The results of a [SagaAction] are committed into a log so that if at any point a + * Saga is interrupted, it may be resumed. Like all transactional systems, a [Saga] may also be rolled back if its + * [SagaAction]s are implemented as a [CompensatingSagaAction]. A rollback is managed by consumers of the Saga + * library and as such, there are no internal heuristics to dictate when a [Saga] will or will not be compensated. + * + * For every [SagaCommand], there is 0 to N [SagaAction]s. A [SagaAction] requires a [SagaCommand] which is provided + * either by the initial request into the [SagaService], or by a predecessor [SagaAction]. A [SagaAction] can emit 0 + * to N [SagaCommand]s, as well as [SagaEvent]s. The difference between the two is that a [SagaCommand] will move + * the progress of a [Saga] forward (or backwards if rolling back), whereas a [SagaEvent] will be published to all + * subscribers interested in it and will not affect the workflow of a [Saga]. + * + * ``` + * val flow = SagaFlow() + * .next(MyAction::class.java) + * .completionHandler(MyCompletionHandler::class.java) + * + * val result = sagaService.applyBlocking(flow, DoMyAction()) + * ``` + */ +@Beta +class SagaService( + private val sagaRepository: SagaRepository, + private val registry: Registry +) : ApplicationContextAware { + + private lateinit var applicationContext: ApplicationContext + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + private val actionInvocationsId = registry.createId("sagas.actions.invocations") + + fun applyBlocking(sagaName: String, sagaId: String, flow: SagaFlow, startingCommand: SagaCommand): T? { + val initialSaga = initializeSaga(startingCommand, sagaName, sagaId) + + log.info("Applying saga: ${initialSaga.name}/${initialSaga.id}") + + if (initialSaga.isComplete()) { + log.info("Saga already complete, exiting early: ${initialSaga.name}/${initialSaga.id}") + return invokeCompletionHandler(initialSaga, flow) + } + + // TODO(rz): Validate that the startingCommand == the originating startingCommand payload? + + SagaFlowIterator(sagaRepository, applicationContext, initialSaga, flow).forEach { flowState -> + val saga = flowState.saga + val action = flowState.action + + log.debug("Applying saga action ${action.javaClass.simpleName} for ${saga.name}/${saga.id}") + + val requiredCommand: Class = getRequiredCommand(action) + if (!saga.finalizedCommand(requiredCommand)) { + val stepCommand = saga.getNextCommand(requiredCommand) + ?: throw SagaMissingRequiredCommandException("Missing required command ${requiredCommand.simpleName}") + + val result = try { + action.apply(stepCommand, saga).also { + registry + .counter(actionInvocationsId.withTags("result", "success", "action", action.javaClass.simpleName)) + .increment() + } + } catch (e: Exception) { + // TODO(rz): Add SagaAction.recover() + val handledException = invokeExceptionHandler(flow, e) + + log.error( + "Encountered error while applying action '${action.javaClass.simpleName}' on ${saga.name}/${saga.id}", + handledException + ) + + saga.addEvent( + SagaActionErrorOccurred( + actionName = action.javaClass.simpleName, + error = handledException, + retryable = when (handledException) { + is SpinnakerException -> handledException.retryable ?: false + else -> false + } + ) + ) + sagaRepository.save(saga) + + registry + .counter(actionInvocationsId.withTags("result", "failure", "action", action.javaClass.simpleName)) + .increment() + + log.error("Failed to apply action ${action.javaClass.simpleName} for ${saga.name}/${saga.id}") + throw handledException + } + + saga.setSequence(stepCommand.getMetadata().sequence) + + val newEvents: MutableList = result.events.toMutableList().also { + it.add(SagaCommandCompleted(getStepCommandName(stepCommand))) + } + + val nextCommand = result.nextCommand + if (nextCommand == null) { + if (flowState.hasMoreSteps() && !saga.hasUnappliedCommands()) { + saga.complete(false) + sagaRepository.save(saga, listOf()) + throw SagaIntegrationException("Result did not return a nextCommand value, but flow has more steps defined") + } + saga.complete(true) + } else { + // TODO(rz): Would be nice to flag commands that are optional so its clearer in the event log + if (nextCommand is ManyCommands) { + newEvents.addAll(nextCommand.commands) + } else { + newEvents.add(nextCommand) + } + } + + sagaRepository.save(saga, newEvents) + } + } + + return invokeCompletionHandler(initialSaga, flow) + } + + private fun initializeSaga(command: SagaCommand, sagaName: String, sagaId: String): Saga { + return sagaRepository.get(sagaName, sagaId) + ?: Saga(sagaName, sagaId) + .also { + log.debug("Initializing new saga: $sagaName/$sagaId") + it.addEvent(command) + sagaRepository.save(it) + } + } + + private fun invokeCompletionHandler(saga: Saga, flow: SagaFlow): T? { + return flow.completionHandler + ?.let { completionHandler -> + val handler = applicationContext.getBean(completionHandler) + val result = sagaRepository.get(saga.name, saga.id) + ?.let { handler.handle(it) } + ?: throw SagaNotFoundException("Could not find Saga to complete by ${saga.name}/${saga.id}") + + // TODO(rz): Haha... :( + try { + @Suppress("UNCHECKED_CAST") + return result as T? + } catch (e: ClassCastException) { + throw SagaIntegrationException("The completion handler is incompatible with the expected return type", e) + } + } + } + + private fun invokeExceptionHandler(flow: SagaFlow, exception: Exception): Exception { + flow.exceptionHandler?.let { exceptionHandler -> + val handler = applicationContext.getBean(exceptionHandler) + return handler.handle(exception) + } + return exception + } + + private fun getRequiredCommand(action: SagaAction): Class = + getCommandTypeFromAction(action.javaClass) + + override fun setApplicationContext(applicationContext: ApplicationContext) { + this.applicationContext = applicationContext + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/config/SagaAutoConfiguration.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/config/SagaAutoConfiguration.kt new file mode 100644 index 00000000000..8495f02fc91 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/config/SagaAutoConfiguration.kt @@ -0,0 +1,61 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.config + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository +import com.netflix.spinnaker.clouddriver.saga.SagaService +import com.netflix.spinnaker.clouddriver.saga.persistence.DefaultSagaRepository +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository +import com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer.ClassSubtypeLocator +import com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer.SubtypeLocator +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean +import org.springframework.boot.context.properties.ConfigurationProperties +import org.springframework.boot.context.properties.EnableConfigurationProperties +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.ComponentScan +import org.springframework.context.annotation.Configuration + +@Configuration +@EnableConfigurationProperties(SagaProperties::class) +@ComponentScan("com.netflix.spinnaker.clouddriver.saga.controllers") +open class SagaAutoConfiguration { + + @Bean + @ConditionalOnMissingBean(SagaRepository::class) + open fun sagaRepository(eventRepository: EventRepository): SagaRepository { + return DefaultSagaRepository(eventRepository) + } + + @Bean + open fun sagaService( + sagaRepository: SagaRepository, + registry: Registry + ): SagaService = + SagaService(sagaRepository, registry) + + @Bean + open fun sagaEventSubtypeLocator(): SubtypeLocator { + return ClassSubtypeLocator( + SpinnakerEvent::class.java, + listOf("com.netflix.spinnaker.clouddriver.saga") + ) + } +} + +@ConfigurationProperties("spinnaker.clouddriver.sagas") +open class SagaProperties diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/controllers/SagaController.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/controllers/SagaController.kt new file mode 100644 index 00000000000..d1772ce6e63 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/controllers/SagaController.kt @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.controllers + +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException +import org.springframework.web.bind.annotation.GetMapping +import org.springframework.web.bind.annotation.PathVariable +import org.springframework.web.bind.annotation.RequestMapping +import org.springframework.web.bind.annotation.RestController + +@RequestMapping("/saga") +@RestController +class SagaController( + private val sagaRepository: SagaRepository +) { + @GetMapping("/{name}/{id}") + fun get(@PathVariable("name") name: String, @PathVariable("id") id: String): Saga { + return sagaRepository.get(name, id) + ?: throw NotFoundException("Saga not found (name: $name, id: $id)") + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/events.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/events.kt new file mode 100644 index 00000000000..29a3356dab7 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/events.kt @@ -0,0 +1,194 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga + +import com.fasterxml.jackson.annotation.JsonTypeName +import com.netflix.spinnaker.clouddriver.event.AbstractSpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.kork.exceptions.SpinnakerException + +/** + * Root event type for [Saga]s. + */ +interface SagaEvent : SpinnakerEvent + +/** + * Warning: Do not use with Lombok @Value classes. + */ +abstract class AbstractSagaEvent : AbstractSpinnakerEvent(), SagaEvent + +/** + * Emitted whenever a [Saga] is saved. + * + * This event does not attempt to find a difference in state, trading off persistence verbosity for a little bit + * of a simpler implementation. + * + * @param sequence The [Saga]'s latest sequence + */ +@JsonTypeName("sagaSaved") +class SagaSaved( + val sequence: Long +) : AbstractSagaEvent() + +/** + * Emitted whenever an internal error has occurred while applying a [Saga]. + * + * @param reason A human-readable cause for the error + * @param error The Exception (if any) that caused the error condition + * @param retryable Flags whether or not this error is recoverable + * @param data Additional data that can help with diagnostics of the error + */ +@JsonTypeName("sagaInternalErrorOccurred") +class SagaInternalErrorOccurred( + val reason: String, + val error: Exception? = null, + val retryable: Boolean = true, + val data: Map = mapOf() +) : AbstractSagaEvent() + +/** + * Emitted whenever an error has occurred within a [SagaAction] while applying a [Saga]. + * + * @param actionName The Java simpleName of the handler + * @param error The Exception that caused the error condition + * @param retryable Flags whether or not this error is recoverable + */ +@JsonTypeName("sagaActionErrorOccurred") +class SagaActionErrorOccurred( + val actionName: String, + val error: Exception, + val retryable: Boolean +) : AbstractSagaEvent() + +/** + * Informational log that can be added to a [Saga] for end-user feedback, as well as operational insight. + * This is a direct tie-in for the Kato Task Status concept with some additional bells and whistles. + * + * @param message A tuple message that allows passing end-user- and operator-focused messages + * @param diagnostics Additional metadata that can help provide context to the message + */ +@JsonTypeName("sagaLogAppended") +class SagaLogAppended( + val message: Message, + val diagnostics: Diagnostics? = null +) : AbstractSagaEvent() { + + /** + * @param user An end-user friendly message + * @param system An operator friendly message + */ + data class Message( + val user: String? = null, + val system: String? = null + ) + + /** + * @param error An error, if one exists. This must be a [SpinnakerException] to provide retryable metadata + * @param data Additional metadata + */ + data class Diagnostics( + val error: SpinnakerException? = null, + val data: Map = mapOf() + ) +} + +/** + * Emitted when all actions for a [Saga] have been applied. + */ +@JsonTypeName("sagaCompleted") +class SagaCompleted( + val success: Boolean +) : AbstractSagaEvent() + +/** + * Emitted when a [Saga] enters a rollback state. + */ +@JsonTypeName("sagaRollbackStarted") +class SagaRollbackStarted : AbstractSagaEvent() + +/** + * Emitted when all rollback actions for a [Saga] have been applied. + */ +@JsonTypeName("sagaRollbackCompleted") +class SagaRollbackCompleted : AbstractSagaEvent() + +/** + * @param conditionName The condition name. + * @param result The condition result. + */ +@JsonTypeName("sagaConditionEvaluated") +class SagaConditionEvaluated( + val conditionName: String, + val result: Boolean +) : AbstractSagaEvent() + +/** + * An event type that finalizes a [SagaCommand] + */ +interface CommandFinalizer : SagaEvent { + + /** + * The command name that was finalized. + */ + val command: String + + /** + * Returns whether or not the given [candidateCommand] was finalized by this event. + */ + fun matches(candidateCommand: Class): Boolean = + candidateCommand.getAnnotation(JsonTypeName::class.java)?.value == command +} + +@JsonTypeName("sagaCommandSkipped") +class SagaCommandSkipped( + override val command: String, + val reason: String +) : AbstractSagaEvent(), CommandFinalizer + +/** + * The root event type for all mutating [Saga] operations. + */ +interface SagaCommand : SagaEvent + +/** + * The root event type for all [Saga] rollback operations. + */ +interface SagaRollbackCommand : SagaCommand + +/** + * Marker event for recording that the work associated with a particular [SagaCommand] event has been completed. + * + * @param command The [SagaCommand] name + */ +@JsonTypeName("sagaCommandCompleted") +class SagaCommandCompleted( + override val command: String +) : AbstractSagaEvent(), CommandFinalizer + +/** + * A [SagaCommand] wrapper for [SagaAction]s that need to return more than one [SagaCommand]. + * + * This event is unwrapped prior to being added to the event log; so all [SagaCommand]s defined within this + * wrapper will show up as their own distinct log entries. + */ +@JsonTypeName("sagaManyCommandsWrapper") +class ManyCommands( + command1: SagaCommand, + vararg extraCommands: SagaCommand +) : AbstractSagaEvent(), SagaCommand { + val commands = listOf(command1).plus(extraCommands) +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaException.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaException.kt new file mode 100644 index 00000000000..e9d55666e2c --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaException.kt @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.exceptions + +/** + * Marker + */ +interface SagaException diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaFlowActionNotFoundException.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaFlowActionNotFoundException.kt new file mode 100644 index 00000000000..1ff9b764730 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaFlowActionNotFoundException.kt @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.exceptions + +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction + +/** + * Thrown when a [SagaAction] cannot be found during a [SagaFlow.inject] operation. + */ +class SagaFlowActionNotFoundException(sagaAction: Class>) : SagaIntegrationException( + "Could not find a SagaAction in flow for ${sagaAction.simpleName}" +) { + init { + retryable = false + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaIntegrationException.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaIntegrationException.kt new file mode 100644 index 00000000000..4e6b63a12c9 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaIntegrationException.kt @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.exceptions + +import com.netflix.spinnaker.kork.exceptions.IntegrationException +import com.netflix.spinnaker.kork.exceptions.SpinnakerException + +/** + * Thrown when code using the Saga framework has generated an uncaught exception, it will be wrapped by this + * Exception and re-thrown. + */ +open class SagaIntegrationException(message: String, cause: Throwable?) : + IntegrationException(message, cause), SagaException { + + constructor(message: String) : this(message, null) + + init { + // Defer to the cause for retryable; default to not retryable if the retryable flag is unavailable. + retryable = if (cause is SpinnakerException) { + cause.retryable ?: false + } else { + false + } + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaMissingRequiredCommandException.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaMissingRequiredCommandException.kt new file mode 100644 index 00000000000..a5befc4566a --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaMissingRequiredCommandException.kt @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.saga.exceptions + +/** + * Thrown when a [Saga] requires a specific [SagaCommand] type, but one does not exist in the [Saga] event log. + */ +class SagaMissingRequiredCommandException(message: String) : SagaIntegrationException(message) { + init { + retryable = false + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaNotFoundException.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaNotFoundException.kt new file mode 100644 index 00000000000..092b435165d --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaNotFoundException.kt @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.saga.exceptions + +/** + * Thrown when a [Saga] cannot be found but it is expected to already exist. + */ +class SagaNotFoundException(message: String) : SagaSystemException(message) { + init { + retryable = false + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaStateIntegrationException.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaStateIntegrationException.kt new file mode 100644 index 00000000000..21618818f61 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaStateIntegrationException.kt @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.exceptions + +import com.netflix.spinnaker.clouddriver.saga.models.Saga + +/** + * Thrown when an integration attempts to interact with the internal [Saga] event state incorrectly. + */ +class SagaStateIntegrationException(message: String) : SagaIntegrationException(message) { + companion object { + fun typeNotFound(expectedType: Class<*>, saga: Saga) = + SagaStateIntegrationException( + "No SagaEvent present for requested type: ${expectedType.simpleName} (${saga.name}/${saga.id})" + ).also { + it.retryable = false + } + + fun tooManyResults(expectedType: Class<*>, saga: Saga) = + SagaStateIntegrationException( + "More than one SagaEvent present for requested type: ${expectedType.simpleName} (${saga.name}/${saga.id})" + ).also { + it.retryable = false + } + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaSystemException.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaSystemException.kt new file mode 100644 index 00000000000..f11f4835af5 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/exceptions/SagaSystemException.kt @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.exceptions + +import com.netflix.spinnaker.kork.exceptions.SystemException + +/** + * Root exception for internal Saga framework exceptions. + */ +open class SagaSystemException(message: String, cause: Throwable?) : SystemException(message, cause), SagaException { + constructor(message: String) : this(message, null) + + init { + retryable = false + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/CompensatingSagaAction.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/CompensatingSagaAction.kt new file mode 100644 index 00000000000..ac38e3ad95b --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/CompensatingSagaAction.kt @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.netflix.spinnaker.clouddriver.saga.SagaCommand +import com.netflix.spinnaker.clouddriver.saga.SagaRollbackCommand +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.kork.annotations.Beta + +/** + * A [SagaAction] that has a companion [rollback] method. + */ +@Beta +interface CompensatingSagaAction : SagaAction { + fun rollback(command: R, saga: Saga): SagaAction.Result +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaAction.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaAction.kt new file mode 100644 index 00000000000..6cb6f50daba --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaAction.kt @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.netflix.spinnaker.clouddriver.saga.SagaCommand +import com.netflix.spinnaker.clouddriver.saga.SagaEvent +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.kork.annotations.Beta + +/** + * A discrete action in a [Saga]. + * + * When the Saga reaches this action, it will refresh the latest [Saga] context and persist a snapshot marker + * to the event store once the action has been applied successfully. + * + * A [SagaAction] must be written such that it is reentrant and idempotent in the case of client-invoked + * retries (due to internal or downstream system failure). Upon completion of an Action, it can emit 0 to N + * [SagaCommand]s and 0 to N [SagaEvent]s. Only [SagaCommand]s will be able to move a [Saga]'s progress forward, + * whereas a [SagaEvent] will just notify interested parties of changes within the system. + */ +@Beta +interface SagaAction { + /** + * @param command The input [SagaCommand] to act on + * @param saga The latest [Saga] state + */ + fun apply(command: T, saga: Saga): Result + + /** + * In the event of an exception being raised from [apply], a [SagaAction] can implement custom error handling logic. + * + * By default, nothing happens. + * + * @param command The input [SagaCommand] that was acted on + * @param saga The [Saga] state used to apply the [command] + * @param exception The resulting exception + */ +// fun recover(command: T, saga: Saga, exception: Exception): Result = Result() + + /** + * @property nextCommand The next [SagaCommand] to run, if any. [ManyCommands] can be used to emit more + * than one command if necessary + * @property events A list of events to publish to subscribers + */ + data class Result( + val nextCommand: SagaCommand?, + val events: List + ) { + constructor() : this(null) + constructor(nextCommand: SagaCommand?) : this(nextCommand, listOf()) + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaCompletionHandler.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaCompletionHandler.kt new file mode 100644 index 00000000000..23ee5661618 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaCompletionHandler.kt @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.kork.annotations.Beta + +/** + * The completion handler is used as a way of registering Beans as a callback once a particular [Saga] + * has been completed. Using this allows a Saga to finalize & return data in both successful and failed states. + * Its results and actions are performed outside of the [Saga] event lifecycle and thus will not be persisted + * and should not include any logic that has side effects. + */ +@Beta +interface SagaCompletionHandler { + fun handle(completedSaga: Saga): T? +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaExceptionHandler.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaExceptionHandler.kt new file mode 100644 index 00000000000..18bb58ed79b --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaExceptionHandler.kt @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.netflix.spinnaker.kork.annotations.Beta +import kotlin.Exception + +/** + * The [SagaExceptionHandler] is an optional interface for implementors to use when determining how to + * handle an exception thrown during a [SagaFlow]. An example use-case would be if one wants to + * flag a specific exception as retryable. + */ +@Beta +interface SagaExceptionHandler { + fun handle(exception: Exception): Exception +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlow.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlow.kt new file mode 100644 index 00000000000..83d98d1e41a --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlow.kt @@ -0,0 +1,139 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.google.common.annotations.Beta +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaFlowActionNotFoundException +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow.InjectLocation.AFTER +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow.InjectLocation.BEFORE +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import java.util.function.Consumer +import java.util.function.Predicate + +/** + * A high-level DSL to help build and visualize the workflow that a [Saga] will take towards completion. + * + * The simplest [Saga] is one that has a single [SagaAction]. A [SagaCompletionHandler] is optional. + */ +@Beta +class SagaFlow { + + internal val steps: MutableList = mutableListOf() + internal var exceptionHandler: Class? = null + internal var completionHandler: Class>? = null + + /** + * An action to take next. + */ + fun then(action: Class>): SagaFlow { + steps.add(ActionStep(action)) + return this + } + + /** + * Add a new [SagaAction] into the flow at an arbitrary position in relation to another [SagaAction]. + */ + fun inject( + location: InjectLocation, + targetAction: Class>, + action: Class> + ): SagaFlow { + val index = steps.filterIsInstance().indexOfFirst { it.action == targetAction } + if (index == -1) { + throw SagaFlowActionNotFoundException(targetAction) + } + when (location) { + BEFORE -> steps.add(index, ActionStep(action)) + AFTER -> { + val afterIndex = index + 1 + if (afterIndex > steps.size - 1) { + steps.add(ActionStep(action)) + } else { + steps.add(index + 1, ActionStep(action)) + } + } + } + return this + } + + /** + * Inject the provided [SagaAction] as the first step. + */ + fun injectFirst(action: Class>): SagaFlow { + steps.add(0, ActionStep(action)) + return this + } + + /** + * Define a conditional branch. + * + * The condition is evaluated at runtime. + * + * @param condition The [Predicate] that will evaluate whether or not to branch logic + * @param builder The nested [SagaFlow] used to define the branched steps, will only be called if [condition] is true + */ + fun on(condition: Class, builder: (SagaFlow) -> Unit): SagaFlow { + steps.add(ConditionStep(condition, SagaFlow().also(builder))) + return this + } + + /** + * Java-compatible interface. + */ + fun on(condition: Class, builder: Consumer): SagaFlow { + steps.add(ConditionStep(condition, SagaFlow().also { builder.accept(this) })) + return this + } + + /** + * An optional [SagaCompletionHandler]. + * + * @param handler The [SagaCompletionHandler] to invoke on completion + */ + fun completionHandler(handler: Class>): SagaFlow { + completionHandler = handler + return this + } + + /** + * An optional [SagaExceptionHandler]. + * + * @param handler The [SagaExceptionHandler] to invoke when an exception is caught + */ + fun exceptionHandler(handler: Class): SagaFlow { + exceptionHandler = handler + return this + } + + interface ConditionPredicate : Predicate { + /** + * The name of the predicate. Used for correlating previously evaluated conditions to their responses. + * + * If the same predicate is used more than once in a Saga, the predicates will need to be uniquely named + * to distinguish between them. + */ + val name: String + } + + interface Step + inner class ActionStep(val action: Class>) : Step + inner class ConditionStep(val predicate: Class, val nestedBuilder: SagaFlow) : Step + + enum class InjectLocation { + BEFORE, + AFTER + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowIterator.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowIterator.kt new file mode 100644 index 00000000000..7248cc7baf7 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowIterator.kt @@ -0,0 +1,257 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.fasterxml.jackson.annotation.JsonTypeName +import com.netflix.spinnaker.clouddriver.saga.SagaCommand +import com.netflix.spinnaker.clouddriver.saga.SagaCommandCompleted +import com.netflix.spinnaker.clouddriver.saga.SagaCommandSkipped +import com.netflix.spinnaker.clouddriver.saga.SagaConditionEvaluated +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaNotFoundException +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaSystemException +import com.netflix.spinnaker.clouddriver.saga.flow.seekers.SagaCommandCompletedEventSeeker +import com.netflix.spinnaker.clouddriver.saga.flow.seekers.SagaCommandEventSeeker +import com.netflix.spinnaker.clouddriver.saga.getCommandTypeFromAction +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository +import com.netflix.spinnaker.kork.exceptions.SystemException +import org.slf4j.LoggerFactory +import org.springframework.beans.BeansException +import org.springframework.context.ApplicationContext + +/** + * This iterator is responsible for refreshing the [Saga] state, flattening branch logic and hydrating a [SagaFlow] + * with rollback commands if necessary. + * + * TODO(rz): add rollback direction + * + * @param sagaRepository The [SagaRepository] to refresh [Saga] state with + * @param applicationContext The Spring [ApplicationContext] used to autowire flow steps + * @param saga The [Saga] execution that is being applied + * @param flow The [SagaFlow] being iterated + */ +class SagaFlowIterator( + private val sagaRepository: SagaRepository, + private val applicationContext: ApplicationContext, + private var saga: Saga, + private val flow: SagaFlow, + private val seekingEnabled: Boolean = true, + private val stateRefreshingEnabled: Boolean = true +) : Iterator { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + private val context = Context(saga.name, saga.id) + + private var index: Int = 0 + private var seeked: Boolean = false + + // toList.toMutableList copies the list so while we mutate stuff, it's all internal + private var steps = flow.steps.toList().toMutableList() + + private lateinit var latestSaga: Saga + + override fun hasNext(): Boolean { + if (index >= steps.size) { + return false + } + + // The iterator needs the latest state of a saga to correctly determine in the next step to take. + // This is kind of handy, since we can pass this newly refreshed state straight to the iterator consumer so they + // don't need to concern themselves with that. + if (stateRefreshingEnabled) { + latestSaga = sagaRepository.get(context.sagaName, context.sagaId) + ?: throw SagaNotFoundException("Could not find Saga (${context.sagaName}/${context.sagaId} for flow traversal") + } + + // To support resuming sagas, we want to seek to the next step that has not been processed, + // which may not be the first step. + if (seekingEnabled) { + seekToNextStep(latestSaga) + } + + val nextStep = steps[index] + if (nextStep is SagaFlow.ConditionStep) { + evaluateConditionStep(nextStep) + } + + return index < steps.size + } + + /** + * Evaluates a [SagaFlow.ConditionStep]. + * + * If the condition has not been previously evaluated, the condition will be run against the latest state. If + * the condition's predicate returns true, its nested [SagaFlow] will be injected into the current steps list, + * replacing the condition steps' location. If the condition is false, then the step will just be removed. + * + * Condition results are saved into the event log, so they will only be processed once, all other times the + * [SagaFlow] is replayed, the cached [SagaConditionEvaluated] event will be used instead of invoking the + * predicate another time. + */ + private fun evaluateConditionStep(nextStep: SagaFlow.ConditionStep) { + val predicate = try { + applicationContext.getBean(nextStep.predicate) + } catch (e: BeansException) { + throw SagaSystemException("Failed to create SagaFlow Predicate: ${nextStep.predicate.simpleName}", e) + } + + val previousEvaluationResult = latestSaga.maybeGetEvent(SagaConditionEvaluated::class.java) { events -> + events.firstOrNull { it.conditionName == predicate.name } + }?.result + + val result = previousEvaluationResult + ?.also { + log.debug("Condition '${predicate.name}' previously evaluated: $previousEvaluationResult") + } + ?: predicate.test(latestSaga) + .also { conditionResult -> + log.debug("Condition '${predicate.name}' result: $conditionResult") + latestSaga.addEvent(SagaConditionEvaluated(nextStep.predicate.name, conditionResult)) + + if (!conditionResult) { + skipConditionalCommands(nextStep.nestedBuilder.steps) + } + } + + if (result) { + steps.addAll(index, nextStep.nestedBuilder.steps) + } + steps.remove(nextStep) + } + + /** + * When a conditional branch is not taken, we'll have one or more commands in the event log that were + * meant to start that branch. This method will find all of these commands and finalize them with a + * [SagaCommandSkipped] event. + */ + private fun skipConditionalCommands(conditionalSteps: List) { + // Read the unused SagaFlow steps for all commands to skip. + val skippedCommandTypes = conditionalSteps + .filterIsInstance() + .mapNotNull { + getCommandTypeFromAction(it.action).getAnnotation(JsonTypeName::class.java)?.value + } + + // Search the existing event log for commands that have not been completed. For each incomplete + // command, check against [skippedCommandTypes] for any matches, adding [SagaCommandSkipped] for + // each match. + latestSaga.getEvents() + .filterIsInstance() + .filter { + latestSaga.getEvents() + .filterIsInstance() + .none { completed -> completed.matches(it.javaClass) } + } + .forEach { + val commandName = it.javaClass.getAnnotation(JsonTypeName::class.java)?.value + if (commandName != null && skippedCommandTypes.contains(commandName)) { + latestSaga.addEvent(SagaCommandSkipped(commandName, "Condition evaluated against running branch")) + } + } + } + + /** + * Seeks the iterator to the next step that needs to be (re)started, if the saga has already begun. + * + * Multiple strategies are used to locate the correct index to seek to. The highest index returned from the [Seeker] + * strategies will be used for seeking. + * + * TODO(rz): What if there is more than 1 of a particular command in a flow? :thinking_face: May need more metadata + * in the [SagaCommandCompleted] event passed along... + */ + private fun seekToNextStep(saga: Saga) { + if (seeked) { + // We only want to seek once + return + } + seeked = true + + index = listOf(SagaCommandCompletedEventSeeker(), SagaCommandEventSeeker()) + .mapNotNull { it.invoke(index, steps, saga)?.coerceAtLeast(0) } + .minOrNull() + ?: index + + if (index != 0) { + log.info("Seeking to step index $index") + } + } + + override fun next(): IteratorState { + val step = steps[index] + if (step !is SagaFlow.ActionStep) { + // If this is thrown, it indicates a bug in the hasNext logic + throw SystemException("step must be an action: $step") + } + index += 1 + + val action = try { + applicationContext.getBean(step.action) + } catch (e: BeansException) { + throw SagaSystemException("Failed to create SagaAction: ${step.action.simpleName}", e) + } + + @Suppress("UNCHECKED_CAST") + return IteratorState( + saga = latestSaga, + action = action as SagaAction, + iterator = this + ) + } + + /** + * Copies the iterator for use in seekers without impacting state of the main iterator. + */ + private fun copyForSeeker(): SagaFlowIterator = + SagaFlowIterator( + sagaRepository, applicationContext, saga, flow, seekingEnabled = false, stateRefreshingEnabled = false + ) + + /** + * Encapsulates multiple values for the current iterator item. + * + * @param saga The refreshed [Saga] state + * @param action The actual [SagaAction] + * @param iterator This iterator + */ + data class IteratorState( + val saga: Saga, + val action: SagaAction, + private val iterator: SagaFlowIterator + ) { + + /** + * @return Whether or not there are more flow steps after this item. This may evaluate to true if the next + * step is a condition, but there may not be another [SagaAction]. + */ + fun hasMoreSteps(): Boolean { + return iterator.hasNext() + } + } + + private data class Context( + val sagaName: String, + val sagaId: String + ) +} + +/** + * Allows multiple strategies to be used to locate the correct starting point for the [SagaFlowIterator]. + * + * If a Seeker cannot determine an index, null should be returned. If multiple Seekers return an index, the + * highest value will be used. + */ +internal typealias Seeker = (currentIndex: Int, steps: List, saga: Saga) -> Int? diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/seekers/SagaCommandCompletedEventSeeker.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/seekers/SagaCommandCompletedEventSeeker.kt new file mode 100644 index 00000000000..bca0524f1b5 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/seekers/SagaCommandCompletedEventSeeker.kt @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow.seekers + +import com.netflix.spinnaker.clouddriver.saga.SagaCommandCompleted +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.saga.flow.Seeker +import com.netflix.spinnaker.clouddriver.saga.flow.convertActionStepToCommandName +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import org.slf4j.LoggerFactory + +/** + * Seeks the [SagaFlowIterator] index to the next command following a [SagaCommandCompleted] event. + */ +internal class SagaCommandCompletedEventSeeker : Seeker { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + override fun invoke(currentIndex: Int, steps: List, saga: Saga): Int? { + val completionEvents = saga.getEvents().filterIsInstance() + if (completionEvents.isEmpty()) { + // If there are no completion events, we don't need to seek at all. + return null + } + + val lastCompletedCommand = completionEvents.last().command + val step = steps + .filterIsInstance() + .find { convertActionStepToCommandName(it) == lastCompletedCommand } + + if (step == null) { + // Not the end of the world if this seeker doesn't find a correlated step, but it's definitely an error case + log.error("Could not find step associated with last completed command ($lastCompletedCommand)") + return null + } + + return (steps.indexOf(step) + 1).also { + log.debug("Suggesting to seek index to $it") + } + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/seekers/SagaCommandEventSeeker.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/seekers/SagaCommandEventSeeker.kt new file mode 100644 index 00000000000..926620f218c --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/seekers/SagaCommandEventSeeker.kt @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow.seekers + +import com.netflix.spinnaker.clouddriver.saga.SagaCommand +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.saga.flow.Seeker +import com.netflix.spinnaker.clouddriver.saga.flow.convertActionStepToCommandClass +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import org.slf4j.LoggerFactory + +/** + * Seeks the [SagaFlowIterator] index to the next incomplete, but committed [SagaCommand]. + */ +internal class SagaCommandEventSeeker : Seeker { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + override fun invoke(currentIndex: Int, steps: List, saga: Saga): Int? { + val commands = saga.getEvents().filterIsInstance() + if (commands.isEmpty()) { + // No commands, nothing to seek to + return null + } + + val lastCommand = commands.last().javaClass + val step = steps + .filterIsInstance() + .find { convertActionStepToCommandClass(it) == lastCommand } + + if (step == null) { + log.error("Could not find step associated with last incomplete command ($lastCommand)") + return null + } + + return (steps.indexOf(step)).also { + log.debug("Suggesting to seek index to $it") + } + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/util.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/util.kt new file mode 100644 index 00000000000..04c3636723b --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/util.kt @@ -0,0 +1,43 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.netflix.spinnaker.clouddriver.saga.SagaCommand +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaSystemException +import com.netflix.spinnaker.clouddriver.saga.getStepCommandName +import org.springframework.core.ResolvableType + +/** + * Derives a [SagaCommand] name from a [SagaFlow.ActionStep]. + */ +internal fun convertActionStepToCommandName(step: SagaFlow.ActionStep): String = + getStepCommandName(convertActionStepToCommandClass(step)) + +/** + * Derives a [SagaCommand] Class from a [SagaFlow.ActionStep]. + */ +internal fun convertActionStepToCommandClass(step: SagaFlow.ActionStep): Class { + val actionType = ResolvableType.forClass(step.action) + .also { it.resolve() } + + val commandType = actionType.interfaces + .find { SagaAction::class.java.isAssignableFrom(it.rawClass!!) } + ?.getGeneric(0) + ?: throw SagaSystemException("Could not resolve SagaCommand type from ActionStep: $step") + + @Suppress("UNCHECKED_CAST") + return commandType.rawClass as Class +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/models/Saga.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/models/Saga.kt new file mode 100644 index 00000000000..10ddbd4be43 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/models/Saga.kt @@ -0,0 +1,196 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.models + +import com.fasterxml.jackson.annotation.JsonIgnore +import com.fasterxml.jackson.annotation.JsonIgnoreProperties +import com.google.common.annotations.VisibleForTesting +import com.netflix.spinnaker.clouddriver.saga.CommandFinalizer +import com.netflix.spinnaker.clouddriver.saga.SagaCommand +import com.netflix.spinnaker.clouddriver.saga.SagaCompleted +import com.netflix.spinnaker.clouddriver.saga.SagaEvent +import com.netflix.spinnaker.clouddriver.saga.SagaLogAppended +import com.netflix.spinnaker.clouddriver.saga.SagaRollbackStarted +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaStateIntegrationException +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaSystemException +import com.netflix.spinnaker.kork.annotations.Beta +import org.slf4j.LoggerFactory + +/** + * The primary domain model of the Saga framework. + * + * @param name The name of the Saga type. This should be shared across all same-type Sagas (e.g. aws deploys) + * @param id The Saga instance ID + * @param sequence An internal counter used for tracking a Saga's position in an event log + */ +@Beta +class Saga( + val name: String, + val id: String, + private var sequence: Long = 0 +) { + + constructor(name: String, id: String) : this(name, id, 0) + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + private val events: MutableList = mutableListOf() + private val pendingEvents: MutableList = mutableListOf() + + internal fun complete(success: Boolean = true) { + addEvent(SagaCompleted(success)) + } + + fun isComplete(): Boolean = events.filterIsInstance().isNotEmpty() + + fun isCompensating(): Boolean = events.filterIsInstance().isNotEmpty() + + fun getVersion(): Long { + return events.map { it.getMetadata().originatingVersion }.maxOrNull()?.let { it + 1 } ?: 0 + } + + fun addEvent(event: SagaEvent) { + this.pendingEvents.add(event) + } + + @Suppress("UNCHECKED_CAST") + fun getEvent(clazz: Class): T { + return events.reversed() + .filter { clazz.isAssignableFrom(it.javaClass) } + .let { + when (it.size) { + 0 -> throw SagaStateIntegrationException.typeNotFound(clazz, this) + 1 -> it.first() as T + else -> throw SagaStateIntegrationException.tooManyResults(clazz, this) + } + } + } + + @Suppress("UNCHECKED_CAST") + fun getEvent(clazz: Class, reducer: (List) -> T): T { + return events.reversed() + .filter { clazz.isAssignableFrom(it.javaClass) } + .let { + when (it.size) { + 0 -> throw SagaStateIntegrationException.typeNotFound(clazz, this) + 1 -> it.first() + else -> reducer(it as List) + } as T + } + } + + @Suppress("UNCHECKED_CAST") + fun maybeGetEvent(clazz: Class): T? = + events.reversed() + .filter { clazz.isAssignableFrom(it.javaClass) } + .let { + when (it.size) { + 0 -> null + 1 -> it.first() as T? + else -> throw SagaStateIntegrationException.tooManyResults(clazz, this) + } + } + + @Suppress("UNCHECKED_CAST") + fun maybeGetEvent(clazz: Class, reducer: (List) -> T?): T? = + events.reversed() + .filter { clazz.isAssignableFrom(it.javaClass) } + .let { + when (it.size) { + 0 -> null + 1 -> it.first() as T? + else -> throw SagaStateIntegrationException.tooManyResults(clazz, this) + } + } + + internal fun finalizedCommand(command: Class): Boolean { + return getEvents() + .filterIsInstance() + .any { it.matches(command) } + } + + internal fun getNextCommand(requiredCommand: Class): SagaCommand? { + return getEvents() + .filterIsInstance() + .filterNot { finalizedCommand(it.javaClass) } + .firstOrNull { requiredCommand.isAssignableFrom(it.javaClass) } + } + + internal fun hasUnappliedCommands(): Boolean { + return getEvents().plus(pendingEvents) + .filterIsInstance() + .filterNot { finalizedCommand(it.javaClass) } + .any() + } + + @VisibleForTesting + fun addEventForTest(event: SagaEvent) { + this.events.add(event) + } + + internal fun hydrateEvents(events: List) { + if (this.events.isEmpty()) { + this.events.addAll(events) + } + } + + fun getSequence(): Long = sequence + + internal fun setSequence(appliedEventVersion: Long) { + if (sequence > appliedEventVersion) { + throw SagaSystemException( + "Attempted to set Saga sequence to an event version in the past " + + "(current: $sequence, applying: $appliedEventVersion)" + ) + } + sequence = appliedEventVersion + } + + @JsonIgnoreProperties("saga") + fun getEvents(): List { + return events.toList() + } + + @JsonIgnore + @VisibleForTesting + fun getPendingEvents(flush: Boolean = true): List { + val pending = mutableListOf() + pending.addAll(pendingEvents) + if (flush) { + pendingEvents.clear() + } + return pending.toList() + } + + fun log(message: String) { + this.log.info(message) + + addEvent( + SagaLogAppended( + SagaLogAppended.Message(message, null), + null + ) + ) + } + + fun log(message: String, vararg replacements: Any?) { + log(String.format(message, *replacements)) + } + + fun getLogs(): List { + return events.filterIsInstance().mapNotNull { it.message.user } + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/persistence/DefaultSagaRepository.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/persistence/DefaultSagaRepository.kt new file mode 100644 index 00000000000..6dd2fbc6ebd --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/persistence/DefaultSagaRepository.kt @@ -0,0 +1,87 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.persistence + +import com.netflix.spinnaker.clouddriver.event.Aggregate +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository.ListAggregatesCriteria +import com.netflix.spinnaker.clouddriver.saga.SagaEvent +import com.netflix.spinnaker.clouddriver.saga.SagaSaved +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import org.slf4j.LoggerFactory + +/** + * The default [SagaRepository] implementation. Since Saga persistence is powered entirely by the + * eventing lib, this class does not need an explicit persistence backend dependency. + */ +class DefaultSagaRepository( + private val eventRepository: EventRepository +) : SagaRepository { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + override fun list(criteria: SagaRepository.ListCriteria): List { + val sagas = if (criteria.names != null && criteria.names.isNotEmpty()) { + var token: String? = null + val aggregates: MutableList = mutableListOf() + do { + eventRepository.listAggregates(ListAggregatesCriteria(token = token, perPage = 1_000)).let { + aggregates.addAll(it.aggregates) + token = it.nextPageToken + } + } while (token != null) + aggregates + } else { + eventRepository.listAggregates(ListAggregatesCriteria()).aggregates + }.mapNotNull { get(it.type, it.id) } + + return if (criteria.running == null) { + sagas + } else { + sagas.filter { it.isComplete() != criteria.running } + } + } + + override fun get(type: String, id: String): Saga? { + val events = eventRepository.list(type, id) + if (events.isEmpty()) { + return null + } + + return events + .filterIsInstance() + .last() + .let { + Saga( + name = it.getMetadata().aggregateType, + id = it.getMetadata().aggregateId, + sequence = it.sequence + ) + } + .also { saga -> + saga.hydrateEvents(events.filterIsInstance()) + } + } + + override fun save(saga: Saga, additionalEvents: List) { + val events: MutableList = saga.getPendingEvents().toMutableList() + if (additionalEvents.isNotEmpty()) { + events.addAll(additionalEvents) + } + events.add(SagaSaved(saga.getSequence())) + eventRepository.save(saga.name, saga.id, saga.getVersion(), events) + } +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/persistence/SagaRepository.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/persistence/SagaRepository.kt new file mode 100644 index 00000000000..fd48d675145 --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/persistence/SagaRepository.kt @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.persistence + +import com.netflix.spinnaker.clouddriver.saga.SagaEvent +import com.netflix.spinnaker.clouddriver.saga.models.Saga + +/** + * Provides a thin DSL above [EventRepository] for persisting and retrieving Sagas. + */ +interface SagaRepository { + + /** + * List all [Saga]s that match the provided [criteria]. + * + * TODO(rz): Support pagination + * + * @param criteria Query criteria for Sagas + * @return A list of matching Sagas + */ + fun list(criteria: ListCriteria): List + + /** + * Get a [Saga] by its [type] and [id]. + * + * @param type The type of Saga (e.g. awsDeploy, awsCreateLoadBalancer, etc) + * @param id The specific ID of the Saga + * @return The matching [Saga], if any + */ + fun get(type: String, id: String): Saga? + + /** + * Save a [Saga] as a [SagaSaved] event. + */ + fun save(saga: Saga, additionalEvents: List = listOf()) + + /** + * @param running Whether or not running [Saga]s should be returned. If undefined, running state will + * not be considered + * @param names The names (aggregate types) of Sagas to filter by + */ + data class ListCriteria( + val running: Boolean? = null, + val names: List? = null + ) +} diff --git a/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/util.kt b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/util.kt new file mode 100644 index 00000000000..7a696c0f76e --- /dev/null +++ b/clouddriver-saga/src/main/kotlin/com/netflix/spinnaker/clouddriver/saga/util.kt @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.saga + +import com.fasterxml.jackson.annotation.JsonTypeName +import com.netflix.spinnaker.clouddriver.saga.exceptions.SagaSystemException +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction +import org.springframework.core.ResolvableType + +/** + * Get the name of the provided [command] instance. + * + */ +internal fun getStepCommandName(command: SagaCommand): String = + getStepCommandName(command.javaClass) + +/** + * Get the name of the provided [commandClass]. + * + * TODO(rz): Do we want our own annotation instead of relying on [JsonTypeName]? + */ +internal fun getStepCommandName(commandClass: Class): String = + commandClass.getAnnotation(JsonTypeName::class.java)?.value ?: commandClass.simpleName + +/** + * Get the [SagaCommand] for a given [SagaAction]. + */ +internal fun getCommandTypeFromAction(action: Class>): Class { + val actionType = ResolvableType.forClass(SagaAction::class.java, action) + actionType.resolve() + + val commandType = actionType.getGeneric(0) + commandType.resolve() + + val rawClass = commandType.rawClass!! + if (SagaCommand::class.java.isAssignableFrom(rawClass)) { + @Suppress("UNCHECKED_CAST") + return rawClass as Class + } + throw SagaSystemException("Resolved next action is not a SagaCommand: ${rawClass.simpleName}") +} diff --git a/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaServiceTest.kt b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaServiceTest.kt new file mode 100644 index 00000000000..9695331b6a1 --- /dev/null +++ b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaServiceTest.kt @@ -0,0 +1,155 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga + +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import dev.minutest.rootContext +import strikt.api.expectThat +import strikt.assertions.contains +import strikt.assertions.containsExactly +import strikt.assertions.filterIsInstance +import strikt.assertions.isNotNull +import strikt.assertions.map + +class SagaServiceTest : AbstractSagaTest() { + + fun tests() = rootContext { + fixture { + BaseSagaFixture() + } + + context("a simple saga") { + val flow = SagaFlow() + .then(Action1::class.java) + .then(Action2::class.java) + .then(Action3::class.java) + + test("applies all commands") { + sagaService.applyBlocking("test", "test", flow, DoAction1()) + + expectThat(sagaRepository.get("test", "test")) + .isNotNull() + .and { + get { getEvents() }.filterIsInstance().map { it.javaClass.simpleName }.containsExactly( + "DoAction1", + "DoAction2", + "DoAction3" + ) + get { getEvents() }.map { it.javaClass }.contains(SagaCompleted::class.java) + } + } + } + + context("re-entrance") { + fixture { + ReentranceFixture() + } + + mapOf( + "completed doAction1" to listOf( + DoAction1(), + SagaCommandCompleted("doAction1"), + DoAction2() + ), + "completed doAction1, doAction2 incomplete" to listOf( + DoAction1(), + SagaCommandCompleted("doAction1"), + DoAction2(), + SagaCommandCompleted("doAction2 incomplete") + ), + "completed doAction1, doAction2 skipped" to listOf( + DoAction1(), + SagaCommandCompleted("doAction1"), + DoAction2(), + SagaCommandSkipped("doAction2", "very valid reasons"), + DoAction3() + ) + ).forEach { (name, previousEvents) -> + test("a saga resumes where it left off: $name") { + val flow = SagaFlow() + .then(ReentrantAction1::class.java) + .then(ReentrantAction2::class.java) + .then(ReentrantAction3::class.java) + + // We've already done some of the work. + sagaRepository.save(saga, previousEvents) + + // Apply the saga "again" + sagaService.applyBlocking("test", "test", flow, DoAction1()) + + val saga = sagaRepository.get("test", "test") + expectThat(saga) + .describedAs(name) + .isNotNull() + .and { + get { getEvents() }.filterIsInstance().map { it.javaClass.simpleName }.containsExactly( + "DoAction1", + "DoAction2", + "DoAction3" + ) + when (name) { + "completed doAction1, doAction2 incomplete" -> + get { getEvents() }.filterIsInstance().map { it.command }.containsExactly( + "doAction1", + "doAction2 incomplete", + "doAction2", + "doAction3" + ) + else -> + get { getEvents() }.filterIsInstance().map { it.command }.containsExactly( + "doAction1", + "doAction2", + "doAction3" + ) + } + get { getEvents() }.map { it.javaClass }.contains(SagaCompleted::class.java) + } + } + } + } + } + + private inner class ReentranceFixture : BaseSagaFixture() { + init { + registerBeans( + applicationContext, + ReentrantAction1::class.java, + ReentrantAction2::class.java, + ReentrantAction3::class.java + ) + } + } +} + +private class ReentrantAction1 : SagaAction { + override fun apply(command: DoAction1, saga: Saga): SagaAction.Result { + return SagaAction.Result(DoAction2()) + } +} + +private class ReentrantAction2 : SagaAction { + override fun apply(command: DoAction2, saga: Saga): SagaAction.Result { + return SagaAction.Result(DoAction3()) + } +} + +private class ReentrantAction3 : SagaAction { + override fun apply(command: DoAction3, saga: Saga): SagaAction.Result { + return SagaAction.Result() + } +} diff --git a/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaSystemTest.kt b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaSystemTest.kt new file mode 100644 index 00000000000..2c7e7a72383 --- /dev/null +++ b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/SagaSystemTest.kt @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga + +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.event.config.EventSourceAutoConfiguration +import com.netflix.spinnaker.clouddriver.saga.config.SagaAutoConfiguration +import com.netflix.spinnaker.clouddriver.saga.persistence.SagaRepository +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import org.springframework.boot.autoconfigure.AutoConfigurations +import org.springframework.boot.test.context.assertj.AssertableApplicationContext +import org.springframework.boot.test.context.runner.ApplicationContextRunner +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration +import strikt.api.expect +import strikt.assertions.isA +import strikt.assertions.isNotNull + +class SagaSystemTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + ApplicationContextRunner() + .withConfiguration( + AutoConfigurations.of( + SagaAutoConfiguration::class.java, + EventSourceAutoConfiguration::class.java + ) + ) + } + + test("supports no config") { + withUserConfiguration(SagaAutoConfiguration::class.java, DependencyConfiguration::class.java) + .run { ctx: AssertableApplicationContext -> + expect { + that(ctx.getBean("sagaService")).isNotNull() + that(ctx.getBean("sagaRepository")).isA() + } + } + } + } + + @Configuration + open class DependencyConfiguration { + @Bean + open fun registry(): Registry = NoopRegistry() + } +} diff --git a/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/examples/BranchingExampleTest.kt b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/examples/BranchingExampleTest.kt new file mode 100644 index 00000000000..a658e558df5 --- /dev/null +++ b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/examples/BranchingExampleTest.kt @@ -0,0 +1,133 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.saga.examples + +import com.fasterxml.jackson.annotation.JsonTypeName +import com.netflix.spinnaker.clouddriver.saga.AbstractSagaEvent +import com.netflix.spinnaker.clouddriver.saga.AbstractSagaTest +import com.netflix.spinnaker.clouddriver.saga.ManyCommands +import com.netflix.spinnaker.clouddriver.saga.SagaCommand +import com.netflix.spinnaker.clouddriver.saga.SagaCommandCompleted +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction +import com.netflix.spinnaker.clouddriver.saga.flow.SagaCompletionHandler +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import dev.minutest.rootContext +import strikt.api.expectThat +import strikt.assertions.isEqualTo + +/** + * This example shows how to do branching logic inside of Sagas. + */ +class BranchingExampleTest : AbstractSagaTest() { + fun tests() = rootContext { + + context("branching logic") { + fixture { Fixture() } + + val flow = SagaFlow() + .then(PrepareAction::class.java) + .then(TheThingAction::class.java) + .on(ShouldDoOptionalThings::class.java) { + it.then(AnOptionalThingAction::class.java) + } + .then(FinishAction::class.java) + .completionHandler(ThingsCompletedHandler::class.java) + + test("branch skipped") { + expectThat(sagaService.applyBlocking("test", "test", flow, PrepareForThings(false))) + .isEqualTo("not branch") + } + + test("branch entered") { + expectThat(sagaService.applyBlocking("test", "test", flow, PrepareForThings(true))) + .isEqualTo("branch") + } + } + } + + private inner class Fixture : BaseSagaFixture() { + init { + registerBeans( + applicationContext, + ThingsCompletedHandler::class.java, + PrepareAction::class.java, + TheThingAction::class.java, + AnOptionalThingAction::class.java, + FinishAction::class.java, + ShouldDoOptionalThings::class.java + ) + } + } + + @JsonTypeName("prepareForThings") + class PrepareForThings(val doOptionalThings: Boolean) : AbstractSagaEvent(), SagaCommand + + @JsonTypeName("doTheThing") + class DoTheThing : AbstractSagaEvent(), SagaCommand + + @JsonTypeName("doAnOptionalThing") + class DoAnOptionalThing : AbstractSagaEvent(), SagaCommand + + @JsonTypeName("finishThings") + class FinishThings : AbstractSagaEvent(), SagaCommand + + class PrepareAction : SagaAction { + override fun apply(command: PrepareForThings, saga: Saga): SagaAction.Result { + return SagaAction.Result(DoTheThing()) + } + } + + class TheThingAction : SagaAction { + override fun apply(command: DoTheThing, saga: Saga): SagaAction.Result { + // TODO(rz): Add a condition predicate that just checks for whether or not the command exists at all instead? + return SagaAction.Result( + ManyCommands( + DoAnOptionalThing(), + FinishThings() + ) + ) + } + } + + class AnOptionalThingAction : SagaAction { + override fun apply(command: DoAnOptionalThing, saga: Saga): SagaAction.Result { + return SagaAction.Result() + } + } + + internal class FinishAction : SagaAction { + override fun apply(command: FinishThings, saga: Saga): SagaAction.Result { + return SagaAction.Result() + } + } + + internal class ShouldDoOptionalThings : SagaFlow.ConditionPredicate { + override fun test(t: Saga): Boolean = t.getEvents().filterIsInstance().first().doOptionalThings + override val name: String = "shouldDoOptionalThings" + } + + internal class ThingsCompletedHandler : SagaCompletionHandler { + override fun handle(completedSaga: Saga): String? { + val optionalThingApplied = completedSaga + .getEvents() + .filterIsInstance() + .any { it.matches(DoAnOptionalThing::class.java) } + return if (optionalThingApplied) "branch" else "not branch" + } + } +} diff --git a/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/examples/SpringExampleTest.kt b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/examples/SpringExampleTest.kt new file mode 100644 index 00000000000..ccb579e08ef --- /dev/null +++ b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/examples/SpringExampleTest.kt @@ -0,0 +1,108 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.examples + +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.event.config.EventSourceAutoConfiguration +import com.netflix.spinnaker.clouddriver.saga.Action1 +import com.netflix.spinnaker.clouddriver.saga.Action2 +import com.netflix.spinnaker.clouddriver.saga.Action3 +import com.netflix.spinnaker.clouddriver.saga.DoAction1 +import com.netflix.spinnaker.clouddriver.saga.SagaService +import com.netflix.spinnaker.clouddriver.saga.ShouldBranchPredicate +import com.netflix.spinnaker.clouddriver.saga.config.SagaAutoConfiguration +import com.netflix.spinnaker.clouddriver.saga.flow.SagaCompletionHandler +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import java.util.function.Predicate +import org.springframework.boot.autoconfigure.AutoConfigurations +import org.springframework.boot.test.context.assertj.AssertableApplicationContext +import org.springframework.boot.test.context.runner.ApplicationContextRunner +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration +import strikt.api.expectThat +import strikt.assertions.isA +import strikt.assertions.isEqualTo + +/** + * Shows an example of how to wire up a Saga using Spring! + */ +class SpringExampleTest : JUnit5Minutests { + fun tests() = rootContext { + context("a saga flow") { + fixture { + ApplicationContextRunner() + .withConfiguration( + AutoConfigurations.of( + SagaAutoConfiguration::class.java, + EventSourceAutoConfiguration::class.java + ) + ) + } + + val flow = SagaFlow() + .then(Action1::class.java) + .on(ShouldBranchPredicate::class.java) { + it.then(Action2::class.java) + } + .then(Action3::class.java) + .completionHandler(MyCompletionHandler::class.java) + + test("completes the saga") { + withUserConfiguration(SagaAutoConfiguration::class.java, DependencyConfiguration::class.java) + .run { ctx: AssertableApplicationContext -> + expectThat(ctx.getBean("sagaService")).isA() + + val result = ctx + .getBean(SagaService::class.java) + .applyBlocking("test", "test", flow, DoAction1()) + + expectThat(result).isEqualTo("yayyyyy complete!") + } + } + } + } + + @Configuration + open class DependencyConfiguration { + @Bean + open fun registry(): Registry = NoopRegistry() + + @Bean + open fun action1(): Action1 = Action1() + + @Bean + open fun action2(): Action2 = Action2() + + @Bean + open fun action3(): Action3 = Action3() + + @Bean + open fun shouldBranchPredicate(): Predicate = ShouldBranchPredicate() + + @Bean + open fun myCompletionHandler(): SagaCompletionHandler = MyCompletionHandler() + } + + private class MyCompletionHandler : SagaCompletionHandler { + override fun handle(completedSaga: Saga): String? { + return "yayyyyy complete!" + } + } +} diff --git a/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowIteratorTest.kt b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowIteratorTest.kt new file mode 100644 index 00000000000..37f46d0470f --- /dev/null +++ b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowIteratorTest.kt @@ -0,0 +1,146 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.netflix.spinnaker.clouddriver.saga.AbstractSagaTest +import com.netflix.spinnaker.clouddriver.saga.Action1 +import com.netflix.spinnaker.clouddriver.saga.Action2 +import com.netflix.spinnaker.clouddriver.saga.Action3 +import com.netflix.spinnaker.clouddriver.saga.DoAction1 +import com.netflix.spinnaker.clouddriver.saga.DoAction2 +import com.netflix.spinnaker.clouddriver.saga.DoAction3 +import com.netflix.spinnaker.clouddriver.saga.SagaCommandCompleted +import com.netflix.spinnaker.clouddriver.saga.SagaCommandSkipped +import com.netflix.spinnaker.clouddriver.saga.SagaConditionEvaluated +import com.netflix.spinnaker.clouddriver.saga.ShouldBranch +import com.netflix.spinnaker.clouddriver.saga.ShouldBranchPredicate +import dev.minutest.rootContext +import strikt.api.expect +import strikt.assertions.first +import strikt.assertions.get +import strikt.assertions.hasSize +import strikt.assertions.isA +import strikt.assertions.isEqualTo +import strikt.assertions.isFalse +import strikt.assertions.isTrue + +class SagaFlowIteratorTest : AbstractSagaTest() { + + fun tests() = rootContext { + fixture { Fixture() } + + test("iterates top-level actions only") { + expect { + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isFalse() + } + } + + test("iterates conditional actions") { + saga.addEventForTest(ShouldBranch()) + + expect { + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isFalse() + } + } + + test("conditions are not re-evaluated") { + saga.addEventForTest(SagaConditionEvaluated("shouldBranch", true)) + + expect { + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isFalse() + } + } + + test("seeks iterator with partially applied saga") { + saga.addEventForTest(SagaCommandCompleted("doAction1")) + saga.addEventForTest(ShouldBranch()) + + expect { + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isFalse() + } + } + + test("handles ManyCommands completed out-of-order") { + saga.addEventForTest(DoAction1()) + saga.addEventForTest(SagaCommandCompleted("doAction1")) + saga.addEventForTest(ShouldBranch()) + saga.addEventForTest(DoAction2()) + saga.addEventForTest(DoAction3()) + + expect { + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isFalse() + } + } + + test("adds skipped command messages for skipped branches") { + saga.addEventForTest(DoAction1()) + saga.addEventForTest(SagaCommandCompleted("doAction1")) + saga.addEventForTest(DoAction2()) + saga.addEventForTest(DoAction3()) + + expect { + that(subject.hasNext()).isTrue() + that(subject.next()).get { action }.isA() + that(subject.hasNext()).isFalse() + + // The iterator doesn't perform saves, so the skip events will be pending. + that(saga.getPendingEvents().filterIsInstance()).and { + hasSize(1) + first().get { command }.isEqualTo("doAction2") + } + } + } + } + + private inner class Fixture : BaseSagaFixture() { + val flow = SagaFlow() + .then(Action1::class.java) + .on(ShouldBranchPredicate::class.java) { + it.then(Action2::class.java) + } + .then(Action3::class.java) + + val subject = SagaFlowIterator(sagaRepository, applicationContext, saga, flow) + + init { + sagaRepository.save(saga) + } + } +} diff --git a/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowTest.kt b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowTest.kt new file mode 100644 index 00000000000..9a6d2a0da82 --- /dev/null +++ b/clouddriver-saga/src/test/kotlin/com/netflix/spinnaker/clouddriver/saga/flow/SagaFlowTest.kt @@ -0,0 +1,122 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.saga.flow + +import com.netflix.spinnaker.clouddriver.saga.AbstractSagaEvent +import com.netflix.spinnaker.clouddriver.saga.Action1 +import com.netflix.spinnaker.clouddriver.saga.Action2 +import com.netflix.spinnaker.clouddriver.saga.Action3 +import com.netflix.spinnaker.clouddriver.saga.SagaCommand +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow.InjectLocation.AFTER +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow.InjectLocation.BEFORE +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import strikt.api.expectThat +import strikt.assertions.containsExactly +import strikt.assertions.hasSize + +class SagaFlowTest : JUnit5Minutests { + + fun tests() = rootContext { + context("inject") { + + test("add step before") { + val flow = SagaFlow() + .then(Action1::class.java) + .then(Action2::class.java) + .then(Action3::class.java) + + flow.inject(BEFORE, Action2::class.java, Action4::class.java) + + expectThat(flow.steps) + .hasSize(4) + .get { filterIsInstance().map { it.action } } + .containsExactly( + Action1::class.java, + Action4::class.java, + Action2::class.java, + Action3::class.java + ) + } + + test("add step after") { + val flow = SagaFlow() + .then(Action1::class.java) + .then(Action2::class.java) + .then(Action3::class.java) + + flow.inject(AFTER, Action2::class.java, Action4::class.java) + + expectThat(flow.steps) + .hasSize(4) + .get { filterIsInstance().map { it.action } } + .containsExactly( + Action1::class.java, + Action2::class.java, + Action4::class.java, + Action3::class.java + ) + } + + test("add step after last") { + val flow = SagaFlow() + .then(Action1::class.java) + .then(Action2::class.java) + .then(Action3::class.java) + + flow.inject(AFTER, Action3::class.java, Action4::class.java) + + expectThat(flow.steps) + .hasSize(4) + .get { filterIsInstance().map { it.action } } + .containsExactly( + Action1::class.java, + Action2::class.java, + Action3::class.java, + Action4::class.java + ) + } + + test("add step first") { + val flow = SagaFlow() + .then(Action1::class.java) + .then(Action2::class.java) + .then(Action3::class.java) + + flow.injectFirst(Action4::class.java) + + expectThat(flow.steps) + .hasSize(4) + .get { filterIsInstance().map { it.action } } + .containsExactly( + Action4::class.java, + Action1::class.java, + Action2::class.java, + Action3::class.java + ) + } + } + } + + class DoAction4 : AbstractSagaEvent(), SagaCommand + + private inner class Action4 : SagaAction { + override fun apply(command: DoAction4, saga: Saga): SagaAction.Result { + throw UnsupportedOperationException("not implemented") + } + } +} diff --git a/clouddriver-security/clouddriver-security.gradle b/clouddriver-security/clouddriver-security.gradle index 554a1c8ef09..3b226a5e5e3 100644 --- a/clouddriver-security/clouddriver-security.gradle +++ b/clouddriver-security/clouddriver-security.gradle @@ -1,12 +1,30 @@ dependencies { - spinnaker.group('jackson') + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") - compile spinnaker.dependency('kork') - compile spinnaker.dependency('slf4j') - compile project(':cats:cats-core') + implementation "com.fasterxml.jackson.core:jackson-annotations" + implementation "com.fasterxml.jackson.core:jackson-core" + implementation "com.fasterxml.jackson.core:jackson-databind" + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-moniker" + implementation "org.apache.groovy:groovy" + implementation "org.slf4j:jcl-over-slf4j" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "com.github.ben-manes.caffeine:guava" - compile spinnaker.dependency('fiat') - compile spinnaker.dependency('frigga') + testImplementation project(":cats:cats-test") - testCompile project(':cats:cats-test') + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.junit.jupiter:junit-jupiter-params" + testImplementation "org.mockito:mockito-core" + testImplementation "org.mockito:mockito-junit-jupiter" + testImplementation "org.mockito:mockito-core" + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" } diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentials.java b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentials.java deleted file mode 100644 index d0eb16beefd..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentials.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.netflix.spinnaker.fiat.model.Authorization; -import com.netflix.spinnaker.fiat.model.resources.Permissions; - -import java.util.*; -import java.util.stream.Collectors; - -/** - * Implementations of this interface will provide properties specific to a named account object, - * with capability to retrieve a type of credential object (such as AWSCredentials or GoogleCredentials). - * - * - * @param - type of credential object to be returned - */ -public interface AccountCredentials { - /** - * Provides the name of the account to be returned. - * - * Uniquely identifies the account. - * - * @return the name of the account - */ - String getName(); - - /** - * Provides the environment name for the account. - * - * Many accounts can share the same environment (e.g. dev, test, prod) - * - * @return the Environment name - */ - String getEnvironment(); - - /** - * Provides the type for the account. - * - * Account type is typically consistent among the set of credentials that represent a related set of environments. - * - * e.g.: - *
    - *
  • account name: maindev, environment: dev, accountType: main
  • - *
  • account name: maintest, environment: test, accountType: main
  • - *
  • account name: mainprod, environment: prod, accountType: main
  • - *
- * - * @return the type for the account. - */ - String getAccountType(); - - /** - * Provides the "version" of the account's provider. If an account has been configured at a particular version, it can - * be supported by different caching agents and operation converters. By default every account is at version v1. - * - * @return the account's version. - */ - default ProviderVersion getProviderVersion() { - return ProviderVersion.v1; - } - - /** - * Provides a named "skin" as a signal for Spinnaker API clients, e.g. Deck, to alter their behavior. - * By default, returns an account's provider version, - * but does not need to be coupled to a provider version. - * - * @return the account's skin. - */ - default String getSkin() { - return getProviderVersion().toString(); - } - - /** - * @return the id for the account (may be null if not supported by underlying cloud provider) - */ - default String getAccountId() { - return null; - } - - /** - * Returns an associated credentials object, which may be lazily initialized based off of some detail encapsulated - * within the implementation (like environment or keys, etc) - * - * @return typed credentials object - */ - @JsonIgnore T getCredentials(); - - /** - * Provides the name of the cloud provider. Typically something like 'aws', 'gce' or 'docker'. - * - * @return the name of the cloud provider - */ - String getCloudProvider(); - - /** - * A user in ANY required group should be allowed access to this account. - * - * @return the group names that govern access to this account, empty indicates a public account accessible by all. - */ - @Deprecated - List getRequiredGroupMembership(); - - default Permissions getPermissions() { - Set rgm = - Optional.ofNullable(getRequiredGroupMembership()) - .map(l -> - l.stream() - .map(s -> Optional.ofNullable(s) - .map(String::trim) - .map(String::toLowerCase) - .orElse("")) - .filter(s -> !s.isEmpty()) - .collect(Collectors.toSet())) - .orElse(Collections.EMPTY_SET); - if (rgm.isEmpty()) { - return Permissions.EMPTY; - } - - Permissions.Builder perms = new Permissions.Builder(); - for (String role : rgm) { - perms.add(Authorization.READ, role); - perms.add(Authorization.WRITE, role); - } - return perms.build(); - } -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentialsProvider.java b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentialsProvider.java deleted file mode 100644 index ddaa0191ec4..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentialsProvider.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security; - -import java.util.Set; - -/** - * Implementations of this interface will provide a mechanism to store and retrieve {@link AccountCredentials} - * objects. For manipulating the backing of this provider, consumers of this API should get access to its corresponding {@link AccountCredentialsRepository} - * - * - */ -public interface AccountCredentialsProvider { - - /** - * Returns all of the accounts known to the repository of this provider. - * - * @return a set of account names - */ - Set getAll(); - - /** - * Returns a specific {@link AccountCredentials} object a specified name - * - * @param name the name of the account - * @return account credentials object - */ - AccountCredentials getCredentials(String name); -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentialsRepository.java b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentialsRepository.java deleted file mode 100644 index fe53b03d191..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/AccountCredentialsRepository.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security; - -import java.util.Set; - -/** - * Represents a repository for CRUD operations pertaining to {@link AccountCredentials}. May be - * required by the {@link AccountCredentialsProvider} to get a handle on credentials objects. - * Consumers should use this repository interface for manipulating the backing of the provider. - * - * - */ -public interface AccountCredentialsRepository { - - /** - * Returns a single {@link AccountCredentials} object, referenced by the specified name - * - * @param key the key to retrieve from the repository - * @return account credentials - */ - AccountCredentials getOne(String key); - - /** - * Returns all {@link AccountCredentials} objects known to this repository - * - * @return a set of account credentials - */ - Set getAll(); - - /** - * Stores an {@link AccountCredentials} object at this repository. This is an identify function. - * - * @param key the key to associate with this account credentials object - * @param credentials account credentials object to save - * @return input - */ - AccountCredentials save(String key, AccountCredentials credentials); - - /** - * Indicates that the keyed reference should be updated with the provided {@link AccountCredentials} object. - * This is an identify function. - * - * @param key the key to associate with this account credentials object - * @param credentials account credentials object to associate with the provided key - * @return input - */ - AccountCredentials update(String key, AccountCredentials credentials); - - /** - * Should remove the keyed reference from the repository - * - * @param key ref to be removed - */ - void delete(String key); -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/CredentialsInitializerSynchronizable.java b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/CredentialsInitializerSynchronizable.java deleted file mode 100644 index c201c0d378c..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/CredentialsInitializerSynchronizable.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security; - -/** - * This interface is used by the credentials refresh controller to identify credentials initializers that should be - * re-created when the credentials have changed. - */ -public interface CredentialsInitializerSynchronizable { - /** - * Get the name of the bean to request from Spring's application context. It is expected that the Accounts and Agents - * managed by the credentials initializer will be synchronized with the latest configured accounts as a result of - * requesting this bean. - */ - String getCredentialsSynchronizationBeanName(); -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProvider.java b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProvider.java deleted file mode 100644 index 89a91b7e1b0..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProvider.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security; - -import java.util.Set; - -public class DefaultAccountCredentialsProvider implements AccountCredentialsProvider { - private final AccountCredentialsRepository repository; - - public DefaultAccountCredentialsProvider() { - this.repository = new MapBackedAccountCredentialsRepository(); - } - - public DefaultAccountCredentialsProvider(AccountCredentialsRepository repository) { - this.repository = repository; - } - - @Override - public Set getAll() { - return repository.getAll(); - } - - @Override - public AccountCredentials getCredentials(String name) { - return repository.getOne(name); - } -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAllowedAccountsValidator.groovy b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAllowedAccountsValidator.groovy index d840fbdf3fd..b1db219bbc5 100644 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAllowedAccountsValidator.groovy +++ b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAllowedAccountsValidator.groovy @@ -43,7 +43,9 @@ class DefaultAllowedAccountsValidator implements AllowedAccountsValidator { return } - if (!accountCredentialsProvider.all.find { it.requiredGroupMembership || it.permissions?.isRestricted() }) { + if (!accountCredentialsProvider.all.find { + it.requiredGroupMembership || ((it instanceof AbstractAccountCredentials) && it.permissions?.isRestricted()) + }) { // no accounts have group restrictions so no need to validate / log return } @@ -71,7 +73,7 @@ class DefaultAllowedAccountsValidator implements AllowedAccountsValidator { private void validateTargetAccount(AccountCredentials credentials, Collection allowedAccounts, Object description, String user, Errors errors) { List requiredGroups = [] boolean anonymousAllowed = true - if (credentials.permissions?.isRestricted()) { + if ((credentials instanceof AbstractAccountCredentials) && credentials.permissions?.isRestricted()) { anonymousAllowed = false if (credentials.requiredGroupMembership) { log.warn("For account ${credentials.name}: using permissions ${credentials.permissions} over ${credentials.requiredGroupMembership} for authorization check.") diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/MapBackedAccountCredentialsRepository.groovy b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/MapBackedAccountCredentialsRepository.groovy index 4590e7dca70..16d70a0f0d6 100644 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/MapBackedAccountCredentialsRepository.groovy +++ b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/MapBackedAccountCredentialsRepository.groovy @@ -49,7 +49,7 @@ public class MapBackedAccountCredentialsRepository implements AccountCredentials */ @Override public AccountCredentials save(String key, AccountCredentials credentials) { - if (!credentials?.getRequiredGroupMembership()?.isEmpty()) { + if (!credentials?.getRequiredGroupMembership()?.isEmpty()) { log.warn("Deprecated `requiredGroupMembership` found for account ${credentials?.name}." + " Please update to `permissions` format.") } diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/NoopCredentialsInitializerSynchronizable.groovy b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/NoopCredentialsInitializerSynchronizable.groovy deleted file mode 100644 index 7dca09879c2..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/NoopCredentialsInitializerSynchronizable.groovy +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security - -class NoopCredentialsInitializerSynchronizable implements CredentialsInitializerSynchronizable { - String credentialsSynchronizationBeanName = null -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/ProviderUtils.groovy b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/ProviderUtils.groovy index b1aedf37c34..2da82d51caa 100644 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/ProviderUtils.groovy +++ b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/ProviderUtils.groovy @@ -52,14 +52,14 @@ public class ProviderUtils { /** * Build a thread-safe set containing each account in the accountCredentialsRepository that is of type - * credentialsType, and (if specified) of provdierVersion version. + * credentialsType and (if specified) for a cloud provider. */ - public static Set buildThreadSafeSetOfAccounts(AccountCredentialsRepository accountCredentialsRepository, Class credentialsType, ProviderVersion version) { + public static Set buildThreadSafeSetOfAccounts(AccountCredentialsRepository accountCredentialsRepository, Class credentialsType, String cloudProvider) { def allAccounts = Collections.newSetFromMap(new ConcurrentHashMap()) allAccounts.addAll(accountCredentialsRepository.all.findResults { credentialsType.isInstance(it) ? credentialsType.cast(it) : null }) - if (version != null) { - allAccounts = allAccounts.findAll { acc -> acc.providerVersion == version } + if (cloudProvider != null) { + allAccounts = allAccounts.findAll { acc -> acc.cloudProvider == cloudProvider } } return allAccounts @@ -147,15 +147,4 @@ public class ProviderUtils { } } } - - /** - * Request from Spring's application context each of the provider synchronizer type wrapper beans. It is expected that - * the Accounts and Agents managed by each provider will be synchronized with the latest configured accounts as a - * result of requesting these beans. - */ - public static void synchronizeAgentProviders(def appContext, def providerSynchronizerTypeWrappers) { - for (def providerSynchronizerTypeWrapper : providerSynchronizerTypeWrappers) { - appContext.getBean(providerSynchronizerTypeWrapper.synchronizerType) - } - } } diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/ProviderVersion.java b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/ProviderVersion.java deleted file mode 100644 index 6d3def7bc5e..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/ProviderVersion.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.security; - -public enum ProviderVersion { - v1, - v2 -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/config/SecurityConfig.groovy b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/config/SecurityConfig.groovy index e18d47f7015..f92feac4f8a 100644 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/config/SecurityConfig.groovy +++ b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/config/SecurityConfig.groovy @@ -18,13 +18,10 @@ package com.netflix.spinnaker.clouddriver.security.config import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.AllowedAccountsValidator -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable import com.netflix.spinnaker.clouddriver.security.DefaultAllowedAccountsValidator -import com.netflix.spinnaker.clouddriver.security.NoopCredentialsInitializerSynchronizable + import com.netflix.spinnaker.fiat.shared.EnableFiatAutoConfig import com.netflix.spinnaker.fiat.shared.FiatStatus -import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean import org.springframework.boot.context.properties.ConfigurationProperties import org.springframework.boot.context.properties.EnableConfigurationProperties import org.springframework.context.annotation.Bean @@ -34,12 +31,6 @@ import org.springframework.context.annotation.Configuration @EnableFiatAutoConfig @EnableConfigurationProperties(OperationsSecurityConfigurationProperties) class SecurityConfig { - @Bean - @ConditionalOnMissingBean(CredentialsInitializerSynchronizable) - CredentialsInitializerSynchronizable noopCredentialsInitializerSynchronizable() { - new NoopCredentialsInitializerSynchronizable() - } - @Bean AllowedAccountsValidator allowedAccountsValidator(AccountCredentialsProvider accountCredentialsProvider, FiatStatus fiatStatus) { @@ -50,6 +41,9 @@ class SecurityConfig { static class OperationsSecurityConfigurationProperties { SecurityAction onMissingSecuredCheck = SecurityAction.WARN SecurityAction onMissingValidator = SecurityAction.WARN + + //TODO(jonsie): should be `allowUnauthorizedImageTaggingInAccounts` + List allowUnauthenticatedImageTaggingInAccounts = [] } static enum SecurityAction { diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/AccountNameable.groovy b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/AccountNameable.groovy deleted file mode 100644 index bf853e7286f..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/AccountNameable.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security.resources - -/** - * Denotes an operation description operates on a specific account. - */ -interface AccountNameable { - String getAccount() -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ApplicationNameable.groovy b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ApplicationNameable.groovy deleted file mode 100644 index 33102aaa102..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ApplicationNameable.groovy +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security.resources - -/** - * Denotes an operation description operates on a specific application resource. - */ -interface ApplicationNameable { - String getApplication() -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ResourcesNameable.java b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ResourcesNameable.java deleted file mode 100644 index b64ac10c0e2..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ResourcesNameable.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security.resources; - -import com.netflix.frigga.Names; - -import java.util.Collection; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Convenience trait for extracting application names from operation descriptions that have multiple - * items conforming to the Frigga naming conventions. Examples include load balancers and instances. - */ -public interface ResourcesNameable { - Collection getNames(); - - default List getApplications() { - return getNames().stream() - .map(name -> Names.parseName(name).getApp()) - .collect(Collectors.toList()); - } -} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupNameable.java b/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupNameable.java deleted file mode 100644 index dc5799d502b..00000000000 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupNameable.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2016 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.security.resources; - -import com.netflix.frigga.Names; - -/** - * Convenience trait for parsing the application name out of a description with a "serverGroupName" - * property. - */ -public interface ServerGroupNameable extends ApplicationNameable { - String getServerGroupName(); - - @Override - default String getApplication() { - return Names.parseName(getServerGroupName()).getApp(); - } -} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAccountCredentials.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAccountCredentials.java new file mode 100644 index 00000000000..3e97482962d --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/AbstractAccountCredentials.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.fiat.model.Authorization; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import java.util.Collections; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +// Todo: remove this class once these methods no longer need to be separated from AccountCredentials +public abstract class AbstractAccountCredentials implements AccountCredentials { + + // Todo: use jackson mixin on AccountCredentials rather than putting annotation here + @JsonIgnore + public abstract T getCredentials(); + + // Todo: make Fiat an acceptable dependency for clouddriver-api and push up to AccountCredentials + public Permissions getPermissions() { + Set rgm = + Optional.ofNullable(getRequiredGroupMembership()) + .map( + l -> + l.stream() + .map( + s -> + Optional.ofNullable(s) + .map(String::trim) + .map(String::toLowerCase) + .orElse("")) + .filter(s -> !s.isEmpty()) + .collect(Collectors.toSet())) + .orElse(Collections.EMPTY_SET); + if (rgm.isEmpty()) { + return Permissions.EMPTY; + } + + Permissions.Builder perms = new Permissions.Builder(); + for (String role : rgm) { + perms.add(Authorization.READ, role); + perms.add(Authorization.WRITE, role); + } + return perms.build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AccountCredentials that = (AccountCredentials) o; + return Objects.equals(getName(), that.getName()) && Objects.equals(getType(), that.getType()); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getType()); + } +} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/BaseProvider.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/BaseProvider.java new file mode 100644 index 00000000000..ec272e715b4 --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/BaseProvider.java @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentScheduler; +import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; +import com.netflix.spinnaker.cats.module.CatsModule; +import com.netflix.spinnaker.cats.module.CatsModuleAware; +import com.netflix.spinnaker.cats.provider.Provider; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +public abstract class BaseProvider extends AgentSchedulerAware implements Provider { + private final Collection agents = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + public final Collection getAgents() { + return ImmutableList.copyOf(agents); + } + + public final void addAgents(Collection agentsToSchedule) { + agents.addAll(agentsToSchedule); + + AgentScheduler agentScheduler = getAgentScheduler(); + if (agentScheduler instanceof CatsModuleAware) { + CatsModule catsModule = ((CatsModuleAware) agentScheduler).getCatsModule(); + agentsToSchedule.forEach( + agent -> + agentScheduler.schedule( + agent, + agent.getAgentExecution(catsModule.getProviderRegistry()), + catsModule.getExecutionInstrumentation())); + } + } + + public final void removeAgentsForAccounts(Collection namesOfDeletedAccounts) { + namesOfDeletedAccounts.forEach( + nameOfDeletedAccount -> { + AgentScheduler scheduler = getAgentScheduler(); + List agentsToDelete = + agents.stream() + .filter(agent -> agent.handlesAccount(nameOfDeletedAccount)) + .collect(Collectors.toList()); + if (scheduler != null) { + agentsToDelete.forEach(scheduler::unschedule); + } + agents.removeAll(agentsToDelete); + }); + } +} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/CredentialsInitializerSynchronizable.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/CredentialsInitializerSynchronizable.java new file mode 100644 index 00000000000..bc11264dd1c --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/CredentialsInitializerSynchronizable.java @@ -0,0 +1,29 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +/** + * This interface is used by the credentials refresh controller to identify credentials initializers + * that should be re-created when the credentials have changed. + */ +public interface CredentialsInitializerSynchronizable { + /** + * Synchronize the Accounts and Agents managed by the provider with the latest configured + * accounts. + */ + default void synchronize() {} +} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProvider.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProvider.java new file mode 100644 index 00000000000..6ccccdbfe2d --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProvider.java @@ -0,0 +1,58 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security; + +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import java.util.Collections; +import java.util.Set; + +public class DefaultAccountCredentialsProvider implements AccountCredentialsProvider { + private final AccountCredentialsRepository repository; + private final CompositeCredentialsRepository> compositeRepository; + + public DefaultAccountCredentialsProvider() { + this(new MapBackedAccountCredentialsRepository()); + } + + public DefaultAccountCredentialsProvider(AccountCredentialsRepository repository) { + this(repository, new CompositeCredentialsRepository<>(Collections.emptyList())); + } + + public DefaultAccountCredentialsProvider( + AccountCredentialsRepository repository, + CompositeCredentialsRepository> compositeRepository) { + this.repository = repository; + this.compositeRepository = compositeRepository; + } + + @Override + public Set> getAll() { + Set> all = (Set>) repository.getAll(); + all.addAll(compositeRepository.getAllCredentials()); + return all; + } + + @Override + public AccountCredentials getCredentials(String name) { + AccountCredentials credentials = repository.getOne(name); + if (credentials != null) { + return credentials; + } + + return compositeRepository.getFirstCredentialsWithName(name); + } +} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/AccountNameable.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/AccountNameable.java new file mode 100644 index 00000000000..6dd881d138e --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/AccountNameable.java @@ -0,0 +1,38 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security.resources; + +import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig; +import com.netflix.spinnaker.orchestration.OperationDescription; + +/** Denotes an operation description operates on a specific account. */ +public interface AccountNameable extends OperationDescription { + String getAccount(); + + /** + * @return whether or not this operation description expects to be further restricted by one or + * more applications + */ + default boolean requiresApplicationRestriction() { + return true; + } + + default boolean requiresAuthorization( + SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps) { + return true; + } +} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ApplicationNameable.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ApplicationNameable.java new file mode 100644 index 00000000000..9a0dc6713ce --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ApplicationNameable.java @@ -0,0 +1,25 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security.resources; + +import com.netflix.spinnaker.orchestration.OperationDescription; +import java.util.Collection; + +/** Denotes an operation description operates on one or more specific application resources. */ +public interface ApplicationNameable extends OperationDescription { + Collection getApplications(); +} diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/CredentialsNameable.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/CredentialsNameable.java similarity index 88% rename from clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/CredentialsNameable.java rename to clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/CredentialsNameable.java index 344e7796f95..ecc89caf6b3 100644 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/CredentialsNameable.java +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/CredentialsNameable.java @@ -16,15 +16,17 @@ package com.netflix.spinnaker.clouddriver.security.resources; +import com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.spinnaker.clouddriver.security.AccountCredentials; /** - * Convenience trait for extracting the account name from a credential, which all - * descriptions should have. + * Convenience trait for extracting the account name from a credential, which all descriptions + * should have. */ public interface CredentialsNameable extends AccountNameable { AccountCredentials getCredentials(); + @JsonProperty("credentials") @Override default String getAccount() { return getCredentials().getName(); diff --git a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/NonCredentialed.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/NonCredentialed.java similarity index 85% rename from clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/NonCredentialed.java rename to clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/NonCredentialed.java index d70d48cdd08..32903106bd2 100644 --- a/clouddriver-security/src/main/groovy/com/netflix/spinnaker/clouddriver/security/resources/NonCredentialed.java +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/NonCredentialed.java @@ -16,8 +16,9 @@ package com.netflix.spinnaker.clouddriver.security.resources; +import com.netflix.spinnaker.orchestration.OperationDescription; + /** * Marker interface indicating that a description does not have account-level credentials specified. */ -public interface NonCredentialed { -} +public interface NonCredentialed extends OperationDescription {} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ResourcesNameable.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ResourcesNameable.java new file mode 100644 index 00000000000..e2111e9afb8 --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ResourcesNameable.java @@ -0,0 +1,39 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security.resources; + +import com.netflix.frigga.Names; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Convenience trait for extracting application names from operation descriptions that have multiple + * items conforming to the Frigga naming conventions. Examples include load balancers and instances. + */ +public interface ResourcesNameable { + Collection getNames(); + + default Collection getResourceApplications() { + return Optional.ofNullable(getNames()).orElse(Collections.emptyList()).stream() + .filter(Objects::nonNull) + .map(name -> Names.parseName(name).getApp()) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupNameable.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupNameable.java new file mode 100644 index 00000000000..6a19499d251 --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupNameable.java @@ -0,0 +1,33 @@ +/* + * Copyright 2018 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security.resources; + +import com.google.common.collect.ImmutableList; +import com.netflix.frigga.Names; +import java.util.Collection; + +/** + * Convenience trait for parsing application name out of a description with one server group name. + */ +public interface ServerGroupNameable extends ApplicationNameable { + String getServerGroupName(); + + @Override + default Collection getApplications() { + return ImmutableList.of(Names.parseName(getServerGroupName()).getApp()); + } +} diff --git a/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupsNameable.java b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupsNameable.java new file mode 100644 index 00000000000..8300ab09d86 --- /dev/null +++ b/clouddriver-security/src/main/java/com/netflix/spinnaker/clouddriver/security/resources/ServerGroupsNameable.java @@ -0,0 +1,38 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.security.resources; + +import com.netflix.frigga.Names; +import java.util.Collection; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Convenience trait for parsing application names out of a description with one or more server + * group names. + */ +public interface ServerGroupsNameable extends ApplicationNameable { + Collection getServerGroupNames(); + + @Override + default Collection getApplications() { + return getServerGroupNames().stream() + .filter(Objects::nonNull) + .map(n -> Names.parseName(n).getApp()) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-security/src/test/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProviderSpec.groovy b/clouddriver-security/src/test/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProviderSpec.groovy index 8b3cedb7b2f..89d99eb9443 100644 --- a/clouddriver-security/src/test/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProviderSpec.groovy +++ b/clouddriver-security/src/test/groovy/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProviderSpec.groovy @@ -35,13 +35,13 @@ class DefaultAccountCredentialsProviderSpec extends Specification { provider.getCredentials(key) then: - 1 * repo.getOne(key) + 1 * repo.getOne(key) >> Mock(AccountCredentials) when: provider.getAll() then: - 1 * repo.getAll() + 1 * repo.getAll() >> new HashSet() where: key = "foo" diff --git a/clouddriver-security/src/test/groovy/com/netflix/spinnaker/clouddriver/security/ProviderUtilsSpec.groovy b/clouddriver-security/src/test/groovy/com/netflix/spinnaker/clouddriver/security/ProviderUtilsSpec.groovy index 1a58d32671c..47dab080565 100644 --- a/clouddriver-security/src/test/groovy/com/netflix/spinnaker/clouddriver/security/ProviderUtilsSpec.groovy +++ b/clouddriver-security/src/test/groovy/com/netflix/spinnaker/clouddriver/security/ProviderUtilsSpec.groovy @@ -103,6 +103,24 @@ class ProviderUtilsSpec extends Specification { credentialsType | accountNameSet TestAccountCredentials2 | ["google-account-1", "google-account-2", "google-account-3"] as Set TestAccountCredentials1 | ["aws-account-1", "aws-account-2", "aws-account-3"] as Set + AccountCredentials | ["google-account-1", "google-account-2", "google-account-3", "aws-account-1", "aws-account-2", "aws-account-3"] as Set + } + + @Unroll + void "should collect accounts matching specified credentials type and cloud provider"() { + when: + def accountSet = ProviderUtils.buildThreadSafeSetOfAccounts(accountCredentialsRepository, credentialsType, cloudProvider) + + then: + accountSet.collect { it.name } as Set == accountNameSet + + where: + credentialsType | cloudProvider | accountNameSet + TestAccountCredentials2 | "testCloudProvider" | ["google-account-1", "google-account-2", "google-account-3"] as Set + TestAccountCredentials2 | "otherCloudProvider" | [] as Set + TestAccountCredentials1 | "testCloudProvider" | ["aws-account-1", "aws-account-2", "aws-account-3"] as Set + TestAccountCredentials1 | "otherCloudProvider" | [] as Set + AccountCredentials | "testCloudProvider" | ["google-account-1", "google-account-2", "google-account-3", "aws-account-1", "aws-account-2", "aws-account-3"] as Set } void "should reschedule specified agents"() { @@ -116,7 +134,7 @@ class ProviderUtilsSpec extends Specification { def executionInstrumentation = new NoopExecutionInstrumentation() when: - new DefaultCatsModule([agentSchedulerAwareProvider], namedCacheFactory, scheduler, executionInstrumentation) + new DefaultCatsModule(null, [agentSchedulerAwareProvider], namedCacheFactory, scheduler, executionInstrumentation) then: scheduler.scheduled.collect { it.agent } == [testAgent1] @@ -159,7 +177,7 @@ class ProviderUtilsSpec extends Specification { def catsModule when: - catsModule = new DefaultCatsModule([agentSchedulerAwareProvider], namedCacheFactory, scheduler, executionInstrumentation) + catsModule = new DefaultCatsModule(null, [agentSchedulerAwareProvider], namedCacheFactory, scheduler, executionInstrumentation) then: scheduler.scheduled.collect { diff --git a/clouddriver-security/src/test/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProviderTest.java b/clouddriver-security/src/test/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProviderTest.java new file mode 100644 index 00000000000..16c29eff3e8 --- /dev/null +++ b/clouddriver-security/src/test/java/com/netflix/spinnaker/clouddriver/security/DefaultAccountCredentialsProviderTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2020 Armory + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.security; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.credentials.CompositeCredentialsRepository; +import java.util.HashSet; +import org.junit.jupiter.api.Test; +import org.mockito.AdditionalMatchers; + +public class DefaultAccountCredentialsProviderTest { + + @Test + void testSimpleCredentialsProvider() { + String NAME = "accountName"; + AccountCredentialsRepository repo = mock(AccountCredentialsRepository.class); + AccountCredentials cred1 = mock(AccountCredentials.class); + HashSet set = new HashSet<>(ImmutableList.of(cred1)); + when(repo.getAll()).thenAnswer(invocation -> set); + when(repo.getOne(NAME)).thenReturn(cred1); + + DefaultAccountCredentialsProvider provider = new DefaultAccountCredentialsProvider(repo); + assertThat(provider.getAll()).hasSize(1); + assertThat(provider.getCredentials(NAME)).isEqualTo(cred1); + } + + @Test + void testCompositeCredentialsProvider() { + String NAME1 = "account1"; + String NAME2 = "account2"; + String NAME3 = "account3"; + AccountCredentialsRepository repo = mock(AccountCredentialsRepository.class); + AccountCredentials cred1 = mock(AccountCredentials.class); + HashSet set = new HashSet<>(ImmutableList.of(cred1)); + when(repo.getAll()).thenAnswer(invocation -> set); + when(repo.getOne(NAME1)).thenReturn(cred1); + when(repo.getOne(AdditionalMatchers.not(eq(NAME1)))).thenReturn(null); + + CompositeCredentialsRepository compositeRepo = mock(CompositeCredentialsRepository.class); + AccountCredentials cred2 = mock(AccountCredentials.class); + when(compositeRepo.getAllCredentials()).thenReturn(ImmutableList.of(cred2)); + when(compositeRepo.getFirstCredentialsWithName(NAME2)).thenReturn(cred2); + when(compositeRepo.getFirstCredentialsWithName(AdditionalMatchers.not(eq(NAME2)))) + .thenReturn(null); + + DefaultAccountCredentialsProvider provider = + new DefaultAccountCredentialsProvider(repo, compositeRepo); + assertThat(provider.getAll()).hasSize(2); + assertThat(provider.getCredentials(NAME1)).isEqualTo(cred1); + assertThat(provider.getCredentials(NAME2)).isEqualTo(cred2); + assertThat(provider.getCredentials(NAME3)).isNull(); + } +} diff --git a/clouddriver-sql-mysql/clouddriver-sql-mysql.gradle b/clouddriver-sql-mysql/clouddriver-sql-mysql.gradle new file mode 100644 index 00000000000..a324aa2923a --- /dev/null +++ b/clouddriver-sql-mysql/clouddriver-sql-mysql.gradle @@ -0,0 +1,6 @@ +dependencies { + implementation project(":cats:cats-sql") + implementation project(":clouddriver-sql") + + runtimeOnly "com.mysql:mysql-connector-j" +} diff --git a/clouddriver-sql-mysql/mysql-setup.sql b/clouddriver-sql-mysql/mysql-setup.sql new file mode 100644 index 00000000000..f399a9fde65 --- /dev/null +++ b/clouddriver-sql-mysql/mysql-setup.sql @@ -0,0 +1,9 @@ +DROP DATABASE IF EXISTS clouddriver; +SET tx_isolation = 'READ-COMMITTED'; + +CREATE DATABASE clouddriver; +CREATE USER clouddriver_migrate; +CREATE USER clouddriver_service; + +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, REFERENCES, INDEX, ALTER, LOCK TABLES, EXECUTE, SHOW VIEW ON `clouddriver`.* TO 'clouddriver_migrate'@'%'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, EXECUTE, SHOW VIEW ON `clouddriver`.* TO 'clouddriver_service'@'%'; diff --git a/clouddriver-sql-postgres/clouddriver-sql-postgres.gradle b/clouddriver-sql-postgres/clouddriver-sql-postgres.gradle new file mode 100644 index 00000000000..daaa43f29b2 --- /dev/null +++ b/clouddriver-sql-postgres/clouddriver-sql-postgres.gradle @@ -0,0 +1,6 @@ +dependencies { + implementation project(":cats:cats-sql") + implementation project(":clouddriver-sql") + + runtimeOnly "org.postgresql:postgresql:42.2.18" +} diff --git a/clouddriver-sql/clouddriver-sql.gradle b/clouddriver-sql/clouddriver-sql.gradle new file mode 100644 index 00000000000..e33a47a3e4f --- /dev/null +++ b/clouddriver-sql/clouddriver-sql.gradle @@ -0,0 +1,54 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +apply from: "$rootDir/gradle/kotlin.gradle" +apply from: "$rootDir/gradle/kotlin-test.gradle" + +dependencies { + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-event") + + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-secrets" + implementation "io.spinnaker.kork:kork-sql" + implementation "io.spinnaker.kork:kork-telemetry" + implementation "de.huxhorn.sulky:de.huxhorn.sulky.ulid" + implementation "org.jooq:jooq" + implementation "org.hibernate.validator:hibernate-validator" + implementation "com.fasterxml.jackson.core:jackson-annotations" + implementation "com.fasterxml.jackson.core:jackson-databind" + implementation "javax.validation:validation-api" + + testImplementation project(":clouddriver-core-tck") + + testImplementation "io.spinnaker.kork:kork-sql-test" + testImplementation "org.testcontainers:mysql" + testImplementation "com.mysql:mysql-connector-j" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.objenesis:objenesis" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.springframework:spring-test" + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation "org.assertj:assertj-core" + testImplementation "io.strikt:strikt-core" + testImplementation "dev.minutest:minutest" + testImplementation "io.mockk:mockk" + testImplementation "com.fasterxml.jackson.module:jackson-module-kotlin" + testImplementation "com.fasterxml.jackson.datatype:jackson-datatype-jsr310" +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlAgent.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlAgent.kt new file mode 100644 index 00000000000..56f51e57199 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlAgent.kt @@ -0,0 +1,20 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.netflix.spinnaker.cats.agent.Agent + +interface SqlAgent : Agent diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlProvider.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlProvider.kt new file mode 100644 index 00000000000..fce6a390994 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlProvider.kt @@ -0,0 +1,27 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.netflix.spinnaker.cats.agent.Agent +import com.netflix.spinnaker.cats.agent.AgentSchedulerAware +import com.netflix.spinnaker.cats.provider.Provider + +class SqlProvider( + private val agents: MutableList +) : AgentSchedulerAware(), Provider { + override fun getProviderName(): String = javaClass.name + override fun getAgents(): MutableCollection = agents +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTask.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTask.kt new file mode 100644 index 00000000000..403129d0205 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTask.kt @@ -0,0 +1,187 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.fasterxml.jackson.annotation.JsonIgnore +import com.netflix.spinnaker.clouddriver.data.task.SagaId +import com.netflix.spinnaker.clouddriver.data.task.Status +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskDisplayOutput +import com.netflix.spinnaker.clouddriver.data.task.TaskDisplayStatus +import com.netflix.spinnaker.clouddriver.data.task.TaskOutput +import com.netflix.spinnaker.clouddriver.data.task.TaskState + +import java.util.concurrent.atomic.AtomicBoolean +import org.slf4j.LoggerFactory +import javax.annotation.Nullable + +/** + * TOOD(rz): Refactor 'river to not use an active record pattern. This sucks. + */ +class SqlTask( + private val id: String, + @JsonIgnore internal var ownerId: String, + @JsonIgnore internal val requestId: String, + @JsonIgnore internal val startTimeMs: Long, + private val sagaIds: MutableSet, + private val repository: SqlTaskRepository +) : Task { + + companion object { + private val log = LoggerFactory.getLogger(SqlTask::class.java) + } + + private var resultObjects: MutableList = mutableListOf() + private var history: MutableList = mutableListOf() + private var taskOutputs: MutableList = mutableListOf() + + private val dirty = AtomicBoolean(false) + + override fun getId() = id + override fun getOwnerId() = ownerId + override fun getStartTimeMs() = startTimeMs + override fun getRequestId() = requestId + + override fun getResultObjects(): MutableList { + refresh() + return resultObjects + } + + override fun addResultObjects(results: MutableList) { + if (results.isEmpty()) { + return + } + this.dirty.set(true) + repository.addResultObjects(results, this) + log.debug("Added {} results to task {}", results.size, id) + } + + override fun getHistory(): List { + refresh() + + return history.map { TaskDisplayStatus(it) } + } + + override fun getStatus(): Status? { + refresh() + + return history.lastOrNull() + } + + override fun updateStatus(phase: String, status: String) { + this.dirty.set(true) + repository.updateCurrentStatus(this, phase, status) + log.debug("Updated status for task {} phase={} status={}", id, phase, status) + } + + override fun complete() { + this.dirty.set(true) + repository.updateState(this, TaskState.COMPLETED) + log.debug("Set task {} as complete", id) + } + + override fun fail() { + this.dirty.set(true) + repository.updateState(this, TaskState.FAILED) + } + + override fun fail(retryable: Boolean) { + this.dirty.set(true) + repository.updateState(this, if (retryable) TaskState.FAILED_RETRYABLE else TaskState.FAILED) + } + + override fun addSagaId(sagaId: SagaId) { + this.dirty.set(true) + sagaIds.add(sagaId) + repository.updateSagaIds(this) + log.debug("Added sagaId with name={} and id={} to task={}", sagaId.name, sagaId.id, id) + } + + override fun getSagaIds(): MutableSet { + return sagaIds + } + + override fun hasSagaIds(): Boolean { + return sagaIds.isNotEmpty() + } + + override fun retry() { + this.dirty.set(true) + repository.updateState(this, TaskState.STARTED) + } + + override fun getOutputs(): List { + refresh() + return taskOutputs + } + + override fun updateOutput(manifestName: String, phase: String, stdOut: String?, stdError: String?) { + this.dirty.set(true) + repository.updateOutput(TaskDisplayOutput(manifestName, phase, stdOut, stdError), this) + log.info("Updated output for task {} for manifest {} for phase {} ", id, manifestName, phase) + } + + internal fun hydrateResultObjects(resultObjects: MutableList) { + this.dirty.set(false) + this.resultObjects = resultObjects + } + + internal fun hydrateHistory(history: MutableList) { + this.dirty.set(false) + this.history = history + } + + internal fun hydrateTaskOutputs(taskOutputs: MutableList) { + this.dirty.set(false) + this.taskOutputs = taskOutputs + } + + internal fun refresh(force: Boolean = false) { + if (this.dirty.getAndSet(false) || force) { + val task = repository.retrieveInternal(this.id) + if (task != null) { + history.clear() + resultObjects.clear() + taskOutputs.clear() + history.addAll(task.history) + resultObjects.addAll(task.resultObjects) + taskOutputs.addAll(task.outputs) + } + } + } + + override fun updateOwnerId(ownerId: String?, phase: String) { + this.dirty.set(true) + if (ownerId == null ) { + log.debug("new owner id not provided. No update necessary.") + return + } + + val previousCloudDriverHostname = this.getOwnerId().split("@")[1] + val currentCloudDriverHostname = ownerId.split("@")[1] + + if (previousCloudDriverHostname == currentCloudDriverHostname) { + log.debug("new owner id is the same as the previous owner Id. No update necessary.") + return + } + + val previousOwnerId = this.ownerId + updateStatus(phase, "Re-assigning task from: $previousOwnerId to: $ownerId") + this.ownerId = ownerId + repository.updateOwnerId(this) + log.debug("Updated ownerId for task id={} from {} to {}", id, previousOwnerId, ownerId) + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskCleanupAgent.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskCleanupAgent.kt new file mode 100644 index 00000000000..1e36a844393 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskCleanupAgent.kt @@ -0,0 +1,196 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.cats.agent.RunnableAgent +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider +import com.netflix.spinnaker.clouddriver.data.task.TaskState.COMPLETED +import com.netflix.spinnaker.clouddriver.data.task.TaskState.FAILED +import com.netflix.spinnaker.config.ConnectionPools +import com.netflix.spinnaker.config.SqlTaskCleanupAgentProperties +import com.netflix.spinnaker.kork.sql.routing.withPool +import java.time.Clock +import java.util.Arrays +import java.util.concurrent.TimeUnit +import org.jooq.DSLContext +import org.jooq.impl.DSL.field +import org.slf4j.LoggerFactory + +/** + * Cleans up completed Tasks after a configurable TTL. + */ +class SqlTaskCleanupAgent( + private val jooq: DSLContext, + private val clock: Clock, + private val registry: Registry, + private val properties: SqlTaskCleanupAgentProperties +) : RunnableAgent, CustomScheduledAgent { + + private val log = LoggerFactory.getLogger(javaClass) + + private val deletedId = registry.createId("sql.taskCleanupAgent.deleted") + private val timingId = registry.createId("sql.taskCleanupAgent.timing") + + override fun run() { + withPool(ConnectionPools.TASKS.value) { + val candidates = jooq.read { j -> + val candidates = j.select(field("id"), field("task_id")) + .from(taskStatesTable) + .where( + field("state").`in`(COMPLETED.toString(), FAILED.toString()) + .and( + field("created_at").lessOrEqual( + clock.instant().minusMillis(properties.completedTtlMs).toEpochMilli() + ) + ) + ) + .fetch() + + val candidateTaskIds = candidates.map { r -> r.field("task_id")?.getValue(r)?.toString() } + .filterNotNull() + .toList() + + val candidateTaskStateIds = mutableListOf() + val candidateResultIds = mutableListOf() + val candidateOutputIds = mutableListOf() + + if (candidateTaskIds.isNotEmpty()) { + candidateTaskIds.chunked(properties.batchSize) { chunk -> + candidateTaskStateIds.addAll( + j.select(field("id")) + .from(taskStatesTable) + .where(field("task_id").`in`(*chunk.toTypedArray())) + .fetch("id", String::class.java) + .filterNotNull() + ) + + candidateResultIds.addAll( + j.select(field("id")) + .from(taskResultsTable) + .where(field("task_id").`in`(*chunk.toTypedArray())) + .fetch("id", String::class.java) + .filterNotNull() + ) + + candidateOutputIds.addAll( + j.select(field("id")) + .from(taskOutputsTable) + .where(field("task_id").`in`(*chunk.toTypedArray())) + .fetch("id", String::class.java) + .filterNotNull() + ) + } + } + + CleanupCandidateIds( + taskIds = candidateTaskIds, + stateIds = candidateTaskStateIds, + resultIds = candidateResultIds, + outputIds = candidateOutputIds + ) + } + + if (candidates.hasAny()) { + log.info( + "Cleaning up {} completed tasks ({} states, {} results, {} output objects)", + candidates.taskIds.size, + candidates.stateIds.size, + candidates.resultIds.size, + candidates.outputIds.size + ) + + registry.timer(timingId).record { + candidates.resultIds.chunked(properties.batchSize) { chunk -> + jooq.transactional { ctx -> + ctx.deleteFrom(taskResultsTable) + .where(field("id").`in`(*chunk.toTypedArray())) + .execute() + } + } + + candidates.stateIds.chunked(properties.batchSize) { chunk -> + jooq.transactional { ctx -> + ctx.deleteFrom(taskStatesTable) + .where(field("id").`in`(*chunk.toTypedArray())) + .execute() + } + } + + candidates.outputIds.chunked(properties.batchSize) { chunk -> + jooq.transactional { ctx -> + ctx.deleteFrom(taskOutputsTable) + .where(field("id").`in`(*chunk.toTypedArray())) + .execute() + } + } + + candidates.taskIds.chunked(properties.batchSize) { chunk -> + jooq.transactional { ctx -> + ctx.deleteFrom(tasksTable) + .where(field("id").`in`(*chunk.toTypedArray())) + .execute() + } + } + } + + registry.counter(deletedId).increment(candidates.taskIds.size.toLong()) + } + } + } + + override fun getAgentType(): String = javaClass.simpleName + override fun getProviderName(): String = CoreProvider.PROVIDER_NAME + override fun getPollIntervalMillis(): Long = DEFAULT_POLL_INTERVAL_MILLIS + override fun getTimeoutMillis(): Long = DEFAULT_TIMEOUT_MILLIS + + companion object { + private val DEFAULT_POLL_INTERVAL_MILLIS = TimeUnit.MINUTES.toMillis(3) + private val DEFAULT_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(3) + } +} + +private data class CleanupCandidateIds( + val taskIds: List, + val stateIds: List, + val resultIds: List, + val outputIds: List +) { + fun hasAny() = taskIds.isNotEmpty() + + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + + other as CleanupCandidateIds + + if (taskIds.size != other.taskIds.size || !taskIds.containsAll(other.taskIds)) return false + if (stateIds.size != other.stateIds.size || !stateIds.containsAll(other.stateIds)) return false + if (resultIds.size != other.resultIds.size || !resultIds.containsAll(other.resultIds)) return false + if (outputIds.size != other.outputIds.size || !outputIds.containsAll(other.outputIds)) return false + + return true + } + + override fun hashCode(): Int { + var result = Arrays.hashCode(taskIds.toTypedArray()) + result = 31 * result + Arrays.hashCode(stateIds.toTypedArray()) + result = 31 * result + Arrays.hashCode(resultIds.toTypedArray()) + result = 31 * result + Arrays.hashCode(outputIds.toTypedArray()) + return result + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskRepository.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskRepository.kt new file mode 100644 index 00000000000..1808b89449f --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskRepository.kt @@ -0,0 +1,421 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.core.ClouddriverHostname +import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskOutput +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.data.task.TaskState +import com.netflix.spinnaker.clouddriver.data.task.TaskState.FAILED +import com.netflix.spinnaker.clouddriver.data.task.TaskState.STARTED +import com.netflix.spinnaker.kork.sql.routing.withPool +import de.huxhorn.sulky.ulid.ULID +import java.time.Clock +import org.jooq.Condition +import org.jooq.DSLContext +import org.jooq.Record +import org.jooq.Select +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.max +import org.jooq.impl.DSL.sql +import org.slf4j.LoggerFactory + +class SqlTaskRepository( + private val jooq: DSLContext, + private val mapper: ObjectMapper, + private val clock: Clock, + private val poolName: String +) : TaskRepository { + + private val log = LoggerFactory.getLogger(javaClass) + + init { + log.info("Using ${javaClass.simpleName} with pool $poolName") + } + + override fun create(phase: String, status: String): Task { + return create(phase, status, ulid.nextULID()) + } + + override fun create(phase: String, status: String, clientRequestId: String): Task { + var task = SqlTask(ulid.nextULID(), ClouddriverHostname.ID, clientRequestId, clock.millis(), mutableSetOf(), this) + val historyId = ulid.nextULID() + + withPool(poolName) { + jooq.transactional { ctx -> + val existingTask = getByClientRequestId(clientRequestId) + if (existingTask != null) { + task = existingTask as SqlTask + addToHistory(ctx, historyId, existingTask.id, FAILED, phase, "Duplicate of $clientRequestId") + } else { + val pairs = mapOf( + field("id") to task.id, + field("owner_id") to task.ownerId, + field("request_id") to task.requestId, + field("created_at") to task.startTimeMs, + field("saga_ids") to mapper.writeValueAsString(task.sagaIds) + ) + + ctx.insertInto(tasksTable, *pairs.keys.toTypedArray()).values(*pairs.values.toTypedArray()).execute() + addToHistory(ctx, historyId, task.id, STARTED, phase, status) + } + } + // TODO(rz): So janky and bad. + task.refresh(true) + } + + return task + } + + fun updateSagaIds(task: Task) { + return withPool(poolName) { + jooq.transactional { ctx -> + ctx.update(tasksTable) + .set(field("saga_ids"), mapper.writeValueAsString(task.sagaIds)) + .where(field("id").eq(task.id)) + .execute() + } + } + } + + override fun get(id: String): Task? { + return retrieveInternal(id) + } + + override fun getByClientRequestId(clientRequestId: String): Task? { + return withPool(poolName) { + jooq.read { + it.select(field("id")) + .from(tasksTable) + .where(field("request_id").eq(clientRequestId)) + .fetchOne("id", String::class.java) + ?.let { taskId -> + retrieveInternal(taskId) + } + } + } + } + + override fun list(): MutableList { + return withPool(poolName) { + jooq.read { + runningTaskIds(it, false).let { taskIds -> + retrieveInternal(field("id").`in`(*taskIds), field("task_id").`in`(*taskIds)).toMutableList() + } + } + } + } + + override fun listByThisInstance(): MutableList { + return withPool(poolName) { + jooq.read { + runningTaskIds(it, true).let { taskIds -> + retrieveInternal(field("id").`in`(*taskIds), field("task_id").`in`(*taskIds)).toMutableList() + } + } + } + } + + internal fun addResultObjects(results: List, task: Task) { + val resultIdPairs = results.map { ulid.nextULID() to it }.toMap() + + withPool(poolName) { + jooq.transactional { ctx -> + ctx.select(taskStatesFields) + .from(taskStatesTable) + .where(field("task_id").eq(task.id)) + .orderBy(field("created_at").asc()) + .limit(1) + .fetchTaskStatus() + ?.run { + ensureUpdateable() + } + + resultIdPairs.forEach { result -> + ctx.insertInto(taskResultsTable, listOf(field("id"), field("task_id"), field("body"))) + .values( + listOf( + result.key, + task.id, + mapper.writeValueAsString(result.value) + ) + ) + .execute() + } + } + } + } + + internal fun updateCurrentStatus(task: Task, phase: String, status: String) { + val historyId = ulid.nextULID() + withPool(poolName) { + jooq.transactional { ctx -> + val state = selectLatestState(ctx, task.id) + addToHistory(ctx, historyId, task.id, state?.state ?: STARTED, phase, status.take(MAX_STATUS_LENGTH)) + } + } + } + + private fun addToHistory(ctx: DSLContext, id: String, taskId: String, state: TaskState, phase: String, status: String) { + ctx + .insertInto( + taskStatesTable, + listOf(field("id"), field("task_id"), field("created_at"), field("state"), field("phase"), field("status")) + ) + .values(listOf(id, taskId, clock.millis(), state.toString(), phase, status)) + .execute() + } + + internal fun updateState(task: Task, state: TaskState) { + val historyId = ulid.nextULID() + withPool(poolName) { + jooq.transactional { ctx -> + selectLatestState(ctx, task.id)?.let { + addToHistory(ctx, historyId, task.id, state, it.phase, it.status) + } + } + } + } + + internal fun updateOutput(taskOutput: TaskOutput, task: Task) { + val outputId = ulid.nextULID() + withPool(poolName) { + jooq.transactional { ctx -> + addToOutput(ctx, outputId, task.id, taskOutput.manifest, taskOutput.phase, taskOutput.stdOut, taskOutput.stdError) + } + } + } + + private fun addToOutput(ctx: DSLContext, id: String, taskId: String, manifestName: String, phase: String, stdOut: String?, stdError: String?) { + ctx + .insertInto( + taskOutputsTable, + listOf( + field("id"), + field("task_id"), + field("created_at"), + field("manifest"), + field("phase"), + field("std_out"), + field("std_error") + ) + ) + .values( + listOf( + id, + taskId, + clock.millis(), + manifestName, + phase, + stdOut, + stdError + ) + ) + .execute() + } + + fun updateOwnerId(task: Task) { + return withPool(poolName) { + jooq.transactional { ctx -> + ctx.update(tasksTable) + .set(field("owner_id"), task.ownerId) + .where(field("id").eq(task.id)) + .execute() + } + } + } + + internal fun retrieveInternal(taskId: String): Task? { + return retrieveInternal(field("id").eq(taskId), field("task_id").eq(taskId)).firstOrNull() + } + + private fun retrieveInternal(condition: Condition, relationshipCondition: Condition? = null): Collection { + val tasks = mutableSetOf() + + // TODO: AWS Aurora enforces REPEATABLE_READ on replicas. Kork's dataSourceConnectionProvider sets READ_COMMITTED + // on every connection acquire - need to change this so running on !aurora will behave consistently. + // REPEATABLE_READ is correct here. + withPool(poolName) { + jooq.transactional { ctx -> + /** + * (select id as task_id, owner_id, request_id, created_at, saga_ids, null as body, null as state, null as phase, null as status from tasks_copy where id = '01D2H4H50VTF7CGBMP0D6HTGTF') + * UNION ALL + * (select task_id, null as owner_id, null as request_id, null as created_at, null as saga_ids, null as body, state, phase, status from task_states_copy where task_id = '01D2H4H50VTF7CGBMP0D6HTGTF') + * UNION ALL + * (select task_id, null as owner_id, null as request_id, null as created_at, null as saga_ids, body, null as state, null as phase, null as status from task_results_copy where task_id = '01D2H4H50VTF7CGBMP0D6HTGTF') + * UNION ALL + * (select task_id, null as owner_id, null as request_id, null as created_at, null as saga_ids, null as body, null as state, manifest, phase, stdOut, stdError, null as status from task_outputs_copy where task_id = '01D2H4H50VTF7CGBMP0D6HTGTF') + */ + tasks.addAll( + ctx + .select( + field("id").`as`("task_id"), + field("owner_id"), + field("request_id"), + field("created_at"), + field("saga_ids"), + field(sql("null")).`as`("body"), + field(sql("null")).`as`("state"), + field(sql("null")).`as`("phase"), + field(sql("null")).`as`("status"), + field(sql("null")).`as`("manifest"), + field(sql("null")).`as`("std_out"), + field(sql("null")).`as`("std_error") + ) + .from(tasksTable) + .where(condition) + .unionAll( + ctx + .select( + field("task_id"), + field(sql("null")).`as`("owner_id"), + field(sql("null")).`as`("request_id"), + field(sql("null")).`as`("created_at"), + field(sql("null")).`as`("saga_ids"), + field(sql("null")).`as`("body"), + field("state"), + field("phase"), + field("status"), + field(sql("null")).`as`("manifest"), + field(sql("null")).`as`("std_out"), + field(sql("null")).`as`("std_error") + ) + .from(taskStatesTable) + .where(relationshipCondition ?: condition) + ) + .unionAll( + ctx + .select( + field("task_id"), + field(sql("null")).`as`("owner_id"), + field(sql("null")).`as`("request_id"), + field(sql("null")).`as`("created_at"), + field(sql("null")).`as`("saga_ids"), + field("body"), + field(sql("null")).`as`("state"), + field(sql("null")).`as`("phase"), + field(sql("null")).`as`("status"), + field(sql("null")).`as`("manifest"), + field(sql("null")).`as`("std_out"), + field(sql("null")).`as`("std_error") + ) + .from(taskResultsTable) + .where(relationshipCondition ?: condition) + ) + .unionAll( + ctx + .select( + field("task_id"), + field(sql("null")).`as`("owner_id"), + field(sql("null")).`as`("request_id"), + field(sql("null")).`as`("created_at"), + field(sql("null")).`as`("saga_ids"), + field(sql("null")).`as`("body"), + field(sql("null")).`as`("state"), + field("phase"), + field(sql("null")).`as`("status"), + field("manifest"), + field("std_out"), + field("std_error") + ) + .from(taskOutputsTable) + .where(relationshipCondition ?: condition) + ) + .fetchTasks() + ) + } + } + + return tasks + } + + private fun selectLatestState(ctx: DSLContext, taskId: String): DefaultTaskStatus? { + return withPool(poolName) { + ctx.select(taskStatesFields) + .from(taskStatesTable) + .where(field("task_id").eq(taskId)) + .orderBy(field("created_at").desc()) + .limit(1) + .fetchTaskStatus() + } + } + + /** + * Since task statuses are insert-only, we first need to find the most + * recent status record for each task ID and the filter that result set + * down to the ones that are running. + * + * Query used: + * SELECT a.task_id + * FROM task_states AS `a` + * JOIN ( + * SELECT task_id, MAX(created_at) AS `created` + * FROM task_states + * GROUP BY task_id + * ) AS `b` + * ON (a.task_id = b.task_id AND a.created_at = b.created) + * JOIN tasks AS `t` + * ON (a.task_id = t.id) + * WHERE ( + * t.owner_id = '' + * and a.state = 'STARTED' + * ) + */ + private fun runningTaskIds(ctx: DSLContext, thisInstance: Boolean): Array { + return withPool(poolName) { + val baseQuery = ctx.select(field("a.task_id")) + .from(taskStatesTable.`as`("a")) + .innerJoin( + ctx.select(field("task_id"), max(field("created_at")).`as`("created")) + .from(taskStatesTable) + .groupBy(field("task_id")) + .asTable("b") + ).on(sql("a.task_id = b.task_id and a.created_at = b.created")) + + val select = if (thisInstance) { + baseQuery + .innerJoin(tasksTable.`as`("t")).on(sql("a.task_id = t.id")) + .where( + field("t.owner_id").eq(ClouddriverHostname.ID) + .and(field("a.state").eq(STARTED.toString())) + ) + } else { + baseQuery.where(field("a.state").eq(STARTED.toString())) + } + + select + .fetch("a.task_id", String::class.java) + .toTypedArray() + } + } + + private fun Select.fetchTasks() = + TaskMapper(this@SqlTaskRepository, mapper).map(fetch().intoResultSet()) + + private fun Select.fetchTaskStatuses() = + TaskStatusMapper().map(fetch().intoResultSet()) + + private fun Select.fetchTaskStatus() = + fetchTaskStatuses().firstOrNull() + + companion object { + private val ulid = ULID() + private val MAX_STATUS_LENGTH = 10_000 + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskMapper.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskMapper.kt new file mode 100644 index 00000000000..aa8755768af --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskMapper.kt @@ -0,0 +1,119 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.fasterxml.jackson.core.type.TypeReference +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus +import com.netflix.spinnaker.clouddriver.data.task.SagaId +import com.netflix.spinnaker.clouddriver.data.task.Status +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskDisplayOutput +import com.netflix.spinnaker.clouddriver.data.task.TaskOutput +import com.netflix.spinnaker.clouddriver.data.task.TaskState +import java.io.IOException +import java.lang.String.format +import java.sql.ResultSet +import org.slf4j.LoggerFactory + +class TaskMapper( + private val sqlTaskRepository: SqlTaskRepository, + private val mapper: ObjectMapper +) { + + companion object { + private val log = LoggerFactory.getLogger(TaskMapper::class.java) + + private val SAGA_IDS_TYPE = object : TypeReference>() {} + } + + fun map(rs: ResultSet): Collection { + val tasks = mutableMapOf() + val results = mutableMapOf>() + val history = mutableMapOf>() + val taskOutputs = mutableMapOf>() + + while (rs.next()) { + when { + rs.getString("owner_id") != null -> + SqlTask( + rs.getString("task_id"), + rs.getString("owner_id"), + rs.getString("request_id"), + rs.getLong("created_at"), + sagaIds(rs.getString("saga_ids")), + sqlTaskRepository + ).let { + tasks[it.id] = it + } + rs.getString("body") != null -> { + try { + if (!results.containsKey(rs.getString("task_id"))) { + results[rs.getString("task_id")] = mutableListOf() + } + results[rs.getString("task_id")]!!.add(mapper.readValue(rs.getString("body"), Map::class.java)) + } catch (e: IOException) { + val id = rs.getString("id") + val taskId = rs.getString("task_id") + throw RuntimeException( + format("Failed to convert result object body to map (id: %s, taskId: %s)", id, taskId), + e + ) + } + } + rs.getString("state") != null -> { + if (!history.containsKey(rs.getString("task_id"))) { + history[rs.getString("task_id")] = mutableListOf() + } + history[rs.getString("task_id")]!!.add( + DefaultTaskStatus.create( + rs.getString("phase"), + rs.getString("status"), + TaskState.valueOf(rs.getString("state")) + ) + ) + } + rs.getString("manifest") != null -> { + if (!taskOutputs.containsKey(rs.getString("task_id"))) { + taskOutputs[rs.getString("task_id")] = mutableListOf() + } + taskOutputs[rs.getString("task_id")]!!.add( + TaskDisplayOutput( + rs.getString("manifest"), + rs.getString("phase"), + rs.getString("std_out"), + rs.getString("std_error") + ) + ) + } + } + } + + return tasks.values.map { task -> + task.hydrateResultObjects(results.getOrDefault(task.id, mutableListOf())) + task.hydrateHistory(history.getOrDefault(task.id, mutableListOf())) + task.hydrateTaskOutputs(taskOutputs.getOrDefault(task.id, mutableListOf())) + task + } + } + + private fun sagaIds(sagaIdsValue: String?): MutableSet { + if (sagaIdsValue == null) { + return mutableSetOf() + } + return mapper.readValue(sagaIdsValue, SAGA_IDS_TYPE) + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskResultObjectMapper.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskResultObjectMapper.kt new file mode 100644 index 00000000000..c14348f8f56 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskResultObjectMapper.kt @@ -0,0 +1,45 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.fasterxml.jackson.databind.ObjectMapper +import java.io.IOException +import java.lang.String.format +import java.sql.ResultSet + +class TaskResultObjectMapper( + private val mapper: ObjectMapper +) { + + fun map(rs: ResultSet): Collection { + val results = mutableListOf() + + while (rs.next()) { + try { + results.add(mapper.readValue(rs.getString("body"), Map::class.java)) + } catch (e: IOException) { + val id = rs.getString("id") + val taskId = rs.getString("task_id") + throw RuntimeException( + format("Failed to convert result object body to map (id: %s, taskId: %s)", id, taskId), + e + ) + } + } + + return results + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskStatusMapper.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskStatusMapper.kt new file mode 100644 index 00000000000..fae51ec1c83 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/TaskStatusMapper.kt @@ -0,0 +1,39 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.netflix.spinnaker.clouddriver.data.task.DefaultTaskStatus +import com.netflix.spinnaker.clouddriver.data.task.TaskState +import java.sql.ResultSet + +class TaskStatusMapper { + + fun map(rs: ResultSet): Collection { + val results = mutableListOf() + + while (rs.next()) { + results.add( + DefaultTaskStatus.create( + rs.getString("phase"), + rs.getString("status"), + TaskState.valueOf(rs.getString("state")) + ) + ) + } + + return results + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventCleanupAgent.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventCleanupAgent.kt new file mode 100644 index 00000000000..529dcaefcfc --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventCleanupAgent.kt @@ -0,0 +1,102 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql.event + +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.cats.agent.RunnableAgent +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent +import com.netflix.spinnaker.clouddriver.core.provider.CoreProvider +import com.netflix.spinnaker.clouddriver.sql.SqlAgent +import com.netflix.spinnaker.config.ConnectionPools +import com.netflix.spinnaker.config.SqlEventCleanupAgentConfigProperties +import com.netflix.spinnaker.config.SqlEventCleanupAgentConfigProperties.Companion.EVENT_CLEANUP_LIMIT +import com.netflix.spinnaker.kork.annotations.VisibleForTesting +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.routing.withPool +import java.sql.Timestamp +import java.time.Duration +import java.time.Instant +import org.jooq.DSLContext +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.table +import org.slf4j.LoggerFactory + +/** + * Cleans up [SpinnakerEvent]s (by [Aggregate]) that are older than a configured number of days. + */ +class SqlEventCleanupAgent( + private val jooq: DSLContext, + private val registry: Registry, + private val properties: SqlEventCleanupAgentConfigProperties, + private val dynamicConfigService: DynamicConfigService +) : RunnableAgent, CustomScheduledAgent, SqlAgent { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + private val deletedId = registry.createId("sql.eventCleanupAgent.deleted") + private val timingId = registry.createId("sql.eventCleanupAgent.timing") + + override fun run() { + val duration = Duration.ofDays(properties.maxAggregateAgeDays) + val cutoff = Instant.now().minus(duration) + val limit = dynamicConfigService.getConfig(Int::class.java, EVENT_CLEANUP_LIMIT_KEY, EVENT_CLEANUP_LIMIT) + + log.info("Deleting aggregates last updated earlier than $cutoff ($duration), max $limit events") + + registry.timer(timingId).record { + val threshold = Instant.now().minus(duration) + + withPool(ConnectionPools.EVENTS.value) { + val rs = jooq.select(field("aggregate_type"), field("aggregate_id")) + .from(table("event_aggregates")) + .where(field("last_change_timestamp").lt(Timestamp(threshold.toEpochMilli()))) + .limit(limit) + .fetch() + .intoResultSet() + + var deleted = 0L + while (rs.next()) { + deleted++ + jooq.deleteFrom(table("event_aggregates")) + .where( + field("aggregate_type").eq(rs.getString("aggregate_type")) + .and(field("aggregate_id").eq(rs.getString("aggregate_id"))) + ) + .execute() + } + + registry.counter(deletedId).increment(deleted) + log.info("Deleted $deleted event aggregates") + } + } + } + + override fun getAgentType(): String = javaClass.simpleName + override fun getProviderName(): String = CoreProvider.PROVIDER_NAME + + override fun getPollIntervalMillis() = + Duration.parse(dynamicConfigService.getConfig(String::class.java, EVENT_CLEANUP_INTERVAL_KEY, "PT1M")).toMillis() + + override fun getTimeoutMillis() = + Duration.parse(dynamicConfigService.getConfig(String::class.java, EVENT_CLEANUP_TIMEOUT_KEY, "PT45S")).toMillis() + + @VisibleForTesting + internal companion object { + const val EVENT_CLEANUP_LIMIT_KEY = "spinnaker.clouddriver.eventing.cleanup-agent.cleanup-limit" + const val EVENT_CLEANUP_INTERVAL_KEY = "spinnaker.clouddriver.eventing.cleanup-agent.frequency" + const val EVENT_CLEANUP_TIMEOUT_KEY = "spinnaker.clouddriver.event.cleanup-agent.timeout" + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventRepository.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventRepository.kt new file mode 100644 index 00000000000..c26177bf289 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventRepository.kt @@ -0,0 +1,279 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql.event + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.event.Aggregate +import com.netflix.spinnaker.clouddriver.event.CompositeSpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.EventMetadata +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.exceptions.AggregateChangeRejectedException +import com.netflix.spinnaker.clouddriver.event.exceptions.DuplicateEventAggregateException +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository.ListAggregatesCriteria +import com.netflix.spinnaker.clouddriver.sql.transactional +import com.netflix.spinnaker.config.ConnectionPools +import com.netflix.spinnaker.kork.sql.routing.withPool +import com.netflix.spinnaker.kork.version.ServiceVersion +import de.huxhorn.sulky.ulid.ULID +import java.sql.SQLIntegrityConstraintViolationException +import java.util.UUID +import org.jooq.Condition +import org.jooq.DSLContext +import org.jooq.impl.DSL.currentTimestamp +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.max +import org.jooq.impl.DSL.table +import org.slf4j.LoggerFactory +import org.springframework.context.ApplicationEventPublisher + +class SqlEventRepository( + private val jooq: DSLContext, + private val serviceVersion: ServiceVersion, + private val objectMapper: ObjectMapper, + private val applicationEventPublisher: ApplicationEventPublisher, + private val registry: Registry +) : EventRepository { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + private val eventCountId = registry.createId("eventing.events") + private val eventErrorCountId = registry.createId("eventing.errors") + + override fun save( + aggregateType: String, + aggregateId: String, + originatingVersion: Long, + newEvents: List + ) { + val eventNames = newEvents.joinToString { it.javaClass.simpleName } + log.debug("Saving $aggregateType/$aggregateId expecting version $originatingVersion with [$eventNames]") + + val aggregateCondition = field("aggregate_type").eq(aggregateType) + .and(field("aggregate_id").eq(aggregateId)) + + try { + withPool(POOL_NAME) { + jooq.transactional { ctx -> + // Get or create the aggregate and immediately assert that this save operation is being committed against the + // most recent aggregate state. + val aggregate = ctx.maybeGetAggregate(aggregateCondition) ?: { + if (originatingVersion != 0L) { + // The aggregate doesn't exist and we're already expecting a non-zero version. + throw AggregateChangeRejectedException(-1, originatingVersion) + } + + // The aggregate doesn't exist yet, so we'll go ahead and seed it immediately. + val initialAggregate = mapOf( + field("aggregate_type") to aggregateType, + field("aggregate_id") to aggregateId, + field("token") to ulid.nextULID(), + field("version") to 0 + ) + + try { + ctx.insertInto(AGGREGATES_TABLE) + .columns(initialAggregate.keys) + .values(initialAggregate.values) + .execute() + } catch (e: SQLIntegrityConstraintViolationException) { + // In the event that two requests are made at the same time to create a new aggregate (via two diff + // clouddriver instances), catch the exception and bubble it up as a duplicate exception so that it + // may be processed in an idempotent way, rather than causing an error. + // + // This is preferential to going back to the database to load the existing aggregate record, since we + // already know the aggregate version will not match the originating version expected from this process + // and would fail just below anyway. + throw DuplicateEventAggregateException(e) + } + + Aggregate(aggregateType, aggregateId, 0) + }() + + if (aggregate.version != originatingVersion) { + throw AggregateChangeRejectedException(aggregate.version, originatingVersion) + } + + // Events have their own auto-incrementing sequence within an aggregate; so we need to get the last sequence + // and generate from there. + val lastSequence = ctx.select(max(field("sequence"))).from(EVENTS_TABLE) + .where(aggregateCondition) + .limit(1) + .fetchOne(0, Long::class.java) + + log.debug("Last event sequence number is $lastSequence") + var nextSequence = lastSequence + + // Add the new events, doesn't matter what they are: At this point, they're "probably" valid, as the higher + // libs should be validating the event payload. + ctx.insertInto(EVENTS_TABLE) + .columns( + field("id"), + field("aggregate_type"), + field("aggregate_id"), + field("sequence"), + field("originating_version"), + field("timestamp"), + field("metadata"), + field("data") + ) + .let { insertValuesStep -> + var step = insertValuesStep + newEvents.forEach { + nextSequence = it.initialize(aggregateType, aggregateId, originatingVersion, nextSequence) + step = step.values(it.toSqlValues(objectMapper)) + } + step + } + .execute() + + // Update the aggregates table with a new version + ctx.update(AGGREGATES_TABLE) + .set(field("version"), field("version", Long::class.java).add(1)) + .set(field("last_change_timestamp"), currentTimestamp()) + .where(aggregateCondition) + .execute() + + log.debug("Event sequence number is now $nextSequence") + } + } + } catch (e: AggregateChangeRejectedException) { + registry.counter( + eventErrorCountId + .withTags("aggregateType", aggregateType, "exception", e.javaClass.simpleName) + ) + .increment() + throw e + } catch (e: Exception) { + // This is totally handling it... + registry.counter( + eventErrorCountId + .withTags("aggregateType", aggregateType, "exception", e.javaClass.simpleName) + ) + .increment() + throw SqlEventSystemException("Failed saving new events", e) + } + + log.debug("Saved $aggregateType/$aggregateId: [${newEvents.joinToString { it.javaClass.simpleName}}]") + registry.counter(eventCountId.withTags("aggregateType", aggregateType)).increment(newEvents.size.toLong()) + + newEvents.forEach { applicationEventPublisher.publishEvent(it) } + } + + /** + * Initialize the [SpinnakerEvent] lateinit properties (recursively, if necessary). + * + * This is a bit wonky: In the case of [ComposedSpinnakerEvent]s, we want to initialize the event so we can + * correctly serialize it, but we don't want to increment the sequence for these events as they aren't + * actually on the event log yet. If we're in a [ComposedSpinnakerEvent], we just provide a "-1" sequence + * number and a real, valid sequence will be assigned if/when it gets saved to the event log. + */ + private fun SpinnakerEvent.initialize( + aggregateType: String, + aggregateId: String, + originatingVersion: Long, + currentSequence: Long? + ): Long? { + var nextSequence = if (currentSequence != null) { + currentSequence + 1 + } else { + null + } + + // timestamp is calculated on the SQL server + setMetadata( + EventMetadata( + id = UUID.randomUUID().toString(), + aggregateType = aggregateType, + aggregateId = aggregateId, + sequence = nextSequence ?: -1, + originatingVersion = originatingVersion, + serviceVersion = serviceVersion.resolve() + ) + ) + + if (this is CompositeSpinnakerEvent) { + this.getComposedEvents().forEach { event -> + // We initialize composed events with a null sequence, since they won't actually get added to the log at + // this point; that's up to the action to either add it or not, at which point it'll get a sequence number + event.initialize(aggregateType, aggregateId, originatingVersion, null)?.let { + nextSequence = it + } + } + } + + return nextSequence + } + + override fun list(aggregateType: String, aggregateId: String): List { + return withPool(POOL_NAME) { + jooq.select().from(EVENTS_TABLE) + .where( + field("aggregate_type").eq(aggregateType) + .and(field("aggregate_id").eq(aggregateId)) + ) + .orderBy(field("sequence").asc()) + .fetchEvents(objectMapper) + } + } + + override fun listAggregates(criteria: ListAggregatesCriteria): EventRepository.ListAggregatesResult { + // TODO(rz): validate criteria + + return withPool(POOL_NAME) { + val conditions = mutableListOf() + criteria.aggregateType?.let { conditions.add(field("aggregate_type").eq(it)) } + criteria.token?.let { conditions.add(field("token").greaterThan(it)) } + + val perPage = criteria.perPage.coerceAtMost(10_000) + + val aggregates = jooq.select().from(AGGREGATES_TABLE) + .withConditions(conditions) + .orderBy(field("token").asc()) + .limit(perPage) + .fetchAggregates() + + val remaining = jooq.selectCount().from(AGGREGATES_TABLE) + .withConditions(conditions) + .fetchSingle() + .value1() - perPage + + EventRepository.ListAggregatesResult( + aggregates = aggregates.map { it.model }, + nextPageToken = if (remaining > 0) aggregates.lastOrNull()?.token else null + ) + } + } + + private fun DSLContext.maybeGetAggregate(aggregateCondition: Condition): Aggregate? { + return select() + .from(AGGREGATES_TABLE) + .where(aggregateCondition) + .limit(1) + .fetchAggregates() + .firstOrNull() + ?.model + } + + companion object { + private val POOL_NAME = ConnectionPools.EVENTS.value + private val AGGREGATES_TABLE = table("event_aggregates") + private val EVENTS_TABLE = table("events") + + private val ulid = ULID() + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventSystemException.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventSystemException.kt new file mode 100644 index 00000000000..42f27ec06f2 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventSystemException.kt @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql.event + +import com.netflix.spinnaker.clouddriver.event.exceptions.EventingException +import com.netflix.spinnaker.clouddriver.sql.exceptions.SqlException +import com.netflix.spinnaker.kork.exceptions.SystemException + +class SqlEventSystemException(message: String, cause: Throwable?) : + SystemException( + message, + cause + ), + EventingException, + SqlException { + constructor(message: String) : this(message, null) +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/dsl.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/dsl.kt new file mode 100644 index 00000000000..52f95a38c18 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/event/dsl.kt @@ -0,0 +1,110 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql.event + +import com.fasterxml.jackson.core.JsonProcessingException +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.event.Aggregate +import com.netflix.spinnaker.clouddriver.event.CompositeSpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.EventMetadata +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.exceptions.InvalidEventTypeException +import org.jooq.Condition +import org.jooq.Record +import org.jooq.Select +import org.jooq.SelectConditionStep +import org.jooq.SelectJoinStep +import org.jooq.impl.DSL.currentTimestamp + +/** + * Adds an arbitrary number of [conditions] to a query joined by `AND` operator. + */ +internal fun SelectJoinStep.withConditions(conditions: List): SelectConditionStep { + return if (conditions.isNotEmpty()) this.where( + conditions.reduce { acc, condition -> acc.and(condition) } + ) else { + where("1=1") + } +} + +/** + * Internal model of [Aggregate]. + */ +internal class SqlAggregate( + val model: Aggregate, + val token: String +) + +/** + * Runs [this] as a "select one" query and returns a single [SqlAggregate]. + * It is assumed the underlying [Aggregate] exists. + */ +internal fun Select.fetchAggregates(): List = + fetch().intoResultSet().let { rs -> + mutableListOf().apply { + while (rs.next()) { + add( + SqlAggregate( + model = Aggregate( + type = rs.getString("aggregate_type"), + id = rs.getString("aggregate_id"), + version = rs.getLong("version") + ), + token = rs.getString("token") + ) + ) + } + } + } + +/** + * Converts a [SpinnakerEvent] to a SQL event row. The values are ordered the same as the schema's columns. + */ +internal fun SpinnakerEvent.toSqlValues(objectMapper: ObjectMapper): Collection = listOf( + getMetadata().id, + getMetadata().aggregateType, + getMetadata().aggregateId, + getMetadata().sequence, + getMetadata().originatingVersion, + currentTimestamp(), + objectMapper.writeValueAsString(getMetadata()), + // TODO(rz): optimize + objectMapper.writeValueAsString(this) +) + +/** + * Executes a SQL select query and converts the ResultSet into a list of [SpinnakerEvent]. + */ +internal fun Select.fetchEvents(objectMapper: ObjectMapper): List = + fetch().intoResultSet().let { rs -> + mutableListOf().apply { + while (rs.next()) { + try { + val event = objectMapper.readValue(rs.getString("data"), SpinnakerEvent::class.java).apply { + setMetadata(objectMapper.readValue(rs.getString("metadata"), EventMetadata::class.java)) + } + if (event is CompositeSpinnakerEvent) { + event.getComposedEvents().forEach { + it.setMetadata(event.getMetadata().copy(id = "N/A", sequence = -1)) + } + } + add(event) + } catch (e: JsonProcessingException) { + throw InvalidEventTypeException(e) + } + } + } + } diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/exceptions/SqlException.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/exceptions/SqlException.kt new file mode 100644 index 00000000000..140b0e5e7ec --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/exceptions/SqlException.kt @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql.exceptions + +/** + * Marker + */ +interface SqlException diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/security/SqlAccountDefinitionException.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/security/SqlAccountDefinitionException.kt new file mode 100644 index 00000000000..afe2785421d --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/security/SqlAccountDefinitionException.kt @@ -0,0 +1,24 @@ +/* + * Copyright 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.sql.security + +import com.netflix.spinnaker.clouddriver.sql.exceptions.SqlException +import com.netflix.spinnaker.kork.exceptions.UserException + +class SqlAccountDefinitionException(message: String, cause: Throwable?) : UserException(message, cause), SqlException { + constructor(message: String) : this(message, null) +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/security/SqlAccountDefinitionRepository.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/security/SqlAccountDefinitionRepository.kt new file mode 100644 index 00000000000..af8edc22931 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/security/SqlAccountDefinitionRepository.kt @@ -0,0 +1,274 @@ +/* + * Copyright 2021, 2022 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.sql.security + +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionMapper +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionRepository +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionTypes +import com.netflix.spinnaker.clouddriver.sql.read +import com.netflix.spinnaker.clouddriver.sql.transactional +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition +import com.netflix.spinnaker.kork.secrets.SecretException +import com.netflix.spinnaker.kork.sql.routing.withPool +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException +import com.netflix.spinnaker.security.AuthenticatedRequest +import org.apache.logging.log4j.LogManager +import org.jooq.* +import org.jooq.exception.DataAccessException +import org.jooq.impl.DSL.* +import java.time.Clock + +class SqlAccountDefinitionRepository( + private val jooq: DSLContext, + private val mapper: AccountDefinitionMapper, + private val clock: Clock, + private val poolName: String +) : AccountDefinitionRepository { + + override fun getByName(name: String): CredentialsDefinition? = + withPool(poolName) { + jooq.read { ctx -> + ctx.select(bodyColumn) + .from(accountsTable) + .where(idColumn.eq(name)) + .fetchOne { (json) -> + mapper.deserialize(json.data()) + } + } + } + + override fun listByType( + typeName: String, + limit: Int, + startingAccountName: String? + ): MutableList = + withPool(poolName) { + jooq.read { ctx -> + val conditions = mutableListOf(typeColumn.eq(typeName)) + startingAccountName?.let { conditions += idColumn.ge(it) } + ctx.select(bodyColumn) + .from(accountsTable) + .where(conditions) + .orderBy(idColumn) + .limit(limit) + .fetch { (json) -> + deserializeAccountData(json.data()) + } + .filterNotNullTo(mutableListOf()) + } + } + + override fun listByType(typeName: String): MutableList = + withPool(poolName) { + jooq.read { ctx -> + ctx.select(bodyColumn) + .from(accountsTable) + .where(typeColumn.eq(typeName)) + .fetch { (json) -> + deserializeAccountData(json.data()) + } + .filterNotNullTo(mutableListOf()) + } + } + + private fun deserializeAccountData(accountData: String): CredentialsDefinition? = + try { + mapper.deserialize(accountData) + } catch (e: SecretException) { + LOGGER.warn("Unable to decrypt secret in account data ($accountData). Skipping this account.", e) + null + } catch (e: Exception) { + // invalid data usually isn't stored in the database, hence an error rather than warning + LOGGER.error("Invalid account data loaded ($accountData). Skipping this account; consider deleting or fixing it.", e) + null + } + + private fun getCredentialsType(definition: CredentialsDefinition): String { + val javaClass = definition.javaClass + return AccountDefinitionTypes.getCredentialsTypeName(javaClass) + ?: throw IllegalArgumentException("No @CredentialsType annotation found on $javaClass") + } + + override fun create(definition: CredentialsDefinition) { + withPool(poolName) { + val name = definition.name + val typeName = getCredentialsType(definition) + val timestamp = clock.millis() + val user = AuthenticatedRequest.getSpinnakerUser().orElse("anonymous") + val body = JSON.valueOf(mapper.serialize(definition)) + try { + jooq.transactional { ctx -> + ctx.insertInto(accountsTable) + .set(idColumn, name) + .set(typeColumn, typeName) + .set(bodyColumn, body) + .set(createdColumn, timestamp) + .set(lastModifiedColumn, timestamp) + .set(modifiedByColumn, user) + .execute() + ctx.insertInto(accountHistoryTable) + .set(idColumn, name) + .set(typeColumn, typeName) + .set(bodyColumn, body) + .set(lastModifiedColumn, timestamp) + .set(versionColumn, findLatestVersion(name)) + .execute() + } + } catch (e: DataAccessException) { + throw SqlAccountDefinitionException("Cannot create account with definition $body", e) + } + // TODO(jvz): CredentialsDefinitionNotifier::definitionChanged for https://github.com/spinnaker/kork/pull/958 + } + } + + override fun save(definition: CredentialsDefinition) { + withPool(poolName) { + val name = definition.name + val typeName = getCredentialsType(definition) + val timestamp = clock.millis() + val user = AuthenticatedRequest.getSpinnakerUser().orElse("anonymous") + val body = JSON.valueOf(mapper.serialize(definition)) + try { + jooq.transactional { ctx -> + ctx.insertInto(accountsTable) + .set(idColumn, name) + .set(typeColumn, typeName) + .set(bodyColumn, body) + .set(createdColumn, timestamp) + .set(lastModifiedColumn, timestamp) + .set(modifiedByColumn, user) + .run { + if (jooq.dialect() == SQLDialect.POSTGRES) onConflict(idColumn).doUpdate() + else onDuplicateKeyUpdate() + } + .set(bodyColumn, body) + .set(lastModifiedColumn, timestamp) + .set(modifiedByColumn, user) + .execute() + ctx.insertInto(accountHistoryTable) + .set(idColumn, name) + .set(typeColumn, typeName) + .set(bodyColumn, body) + .set(lastModifiedColumn, timestamp) + .set(versionColumn, findLatestVersion(name)) + .execute() + } + } catch (e: DataAccessException) { + throw SqlAccountDefinitionException("Cannot save account with definition $body", e) + } + // TODO(jvz): CredentialsDefinitionNotifier::definitionChanged for https://github.com/spinnaker/kork/pull/958 + } + } + + override fun update(definition: CredentialsDefinition) { + withPool(poolName) { + val name = definition.name + val typeName = getCredentialsType(definition) + val timestamp = clock.millis() + val user = AuthenticatedRequest.getSpinnakerUser().orElse("anonymous") + val body = JSON.valueOf(mapper.serialize(definition)) + try { + jooq.transactional { ctx -> + val rows = ctx.update(accountsTable) + .set(bodyColumn, body) + .set(lastModifiedColumn, timestamp) + .set(modifiedByColumn, user) + .where(idColumn.eq(name)) + .execute() + if (rows != 1) { + throw NotFoundException("No account found with name $name") + } + ctx.insertInto(accountHistoryTable) + .set(idColumn, name) + .set(typeColumn, typeName) + .set(bodyColumn, body) + .set(lastModifiedColumn, timestamp) + .set(versionColumn, findLatestVersion(name)) + .execute() + } + } catch (e: DataAccessException) { + throw SqlAccountDefinitionException("Cannot update account with definition $body", e) + } + // TODO(jvz): CredentialsDefinitionNotifier::definitionChanged for https://github.com/spinnaker/kork/pull/958 + } + } + + override fun delete(name: String) { + withPool(poolName) { + val typeName = jooq.read { ctx -> + ctx.select(typeColumn) + .from(accountsTable) + .where(idColumn.eq(name)) + .fetchOne(typeColumn) + } ?: throw NotFoundException("No account found with name $name") + try { + jooq.transactional { ctx -> + ctx.insertInto(accountHistoryTable) + .set(idColumn, name) + .set(deletedColumn, true) + .set(lastModifiedColumn, clock.millis()) + .set(versionColumn, findLatestVersion(name)) + .execute() + ctx.deleteFrom(accountsTable) + .where(idColumn.eq(name)) + .execute() + } + } catch (e: DataAccessException) { + throw SqlAccountDefinitionException("Cannot delete account with name $name", e) + } + // TODO(jvz): CredentialsDefinitionNotifier::definitionRemoved for https://github.com/spinnaker/kork/pull/958 + } + } + + private fun findLatestVersion(name: String): Select> = + withPool(poolName) { + jooq.read { ctx -> + ctx.select(count(versionColumn) + 1) + .from(accountHistoryTable) + .where(idColumn.eq(name)) + } + } + + override fun revisionHistory(name: String): MutableList = + withPool(poolName) { + jooq.read { ctx -> + ctx.select(bodyColumn, versionColumn, lastModifiedColumn) + .from(accountHistoryTable) + .where(idColumn.eq(name)) + .orderBy(versionColumn.desc()) + .fetch { (body, version, timestamp) -> AccountDefinitionRepository.Revision( + version, + timestamp, + body?.let { mapper.deserialize(it.data()) } + ) } + } + } + + companion object { + private val accountsTable = table("accounts") + private val accountHistoryTable = table("accounts_history") + private val idColumn = field("id", String::class.java) + private val bodyColumn = field("body", JSON::class.java) + private val typeColumn = field("type", String::class.java) + private val deletedColumn = field("is_deleted", Boolean::class.java) + private val createdColumn = field("created_at", Long::class.java) + private val lastModifiedColumn = field("last_modified_at", Long::class.java) + private val modifiedByColumn = field("last_modified_by", String::class.java) + private val versionColumn = field("version", Int::class.java) + private val LOGGER = LogManager.getLogger(SqlAccountDefinitionRepository::class.java) + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/sql.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/sql.kt new file mode 100644 index 00000000000..5724d6b75bc --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/clouddriver/sql/sql.kt @@ -0,0 +1,51 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql + +import io.github.resilience4j.retry.annotation.Retry +import org.jooq.DSLContext +import org.jooq.impl.DSL +import org.jooq.impl.DSL.field +import org.jooq.impl.DSL.table + +internal val tasksTable = table("tasks") +internal val taskStatesTable = table("task_states") +internal val taskResultsTable = table("task_results") +internal val taskOutputsTable = table("task_outputs") + +internal val tasksFields = listOf("id", "request_id", "owner_id", "created_at").map { field(it) } +internal val taskStatesFields = listOf("id", "task_id", "created_at", "state", "phase", "status").map { field(it) } +internal val taskResultsFields = listOf("id", "task_id", "body").map { field(it) } +internal val taskOutputsFields = listOf("id", "task_id", "created_at", "manifest", "phase", "std_out", "std_error").map { field(it) } + +/** + * Run the provided [fn] in a transaction, retrying on failures using resilience4j.retry.instances.sqlTransaction + * configuration. + */ +@Retry(name = "sqlTransaction") +internal fun DSLContext.transactional(fn: (DSLContext) -> Unit) { + transaction { ctx -> + fn(DSL.using(ctx)) + } +} + +/** + * Run the provided [fn], retrying on failures using resilience4j.retry.instances.sqlRead configuration. + */ +@Retry(name = "sqlRead") +internal fun DSLContext.read(fn: (DSLContext) -> T): T { + return fn(this) +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/ConnectionPools.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/ConnectionPools.kt new file mode 100644 index 00000000000..6267cd94ef2 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/ConnectionPools.kt @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.config + +enum class ConnectionPools( + val value: String +) { + TASKS("tasks"), + CACHE_WRITER("cacheWriter"), + CACHE_READER("cacheReader"), + EVENTS("events"), + ACCOUNTS("accounts"), +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConfiguration.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConfiguration.kt new file mode 100644 index 00000000000..88426f715b9 --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConfiguration.kt @@ -0,0 +1,138 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.config + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionMapper +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionRepository +import com.netflix.spinnaker.clouddriver.sql.SqlProvider +import com.netflix.spinnaker.clouddriver.sql.SqlTaskCleanupAgent +import com.netflix.spinnaker.clouddriver.sql.SqlTaskRepository +import com.netflix.spinnaker.clouddriver.sql.event.SqlEventCleanupAgent +import com.netflix.spinnaker.clouddriver.sql.event.SqlEventRepository +import com.netflix.spinnaker.clouddriver.sql.security.SqlAccountDefinitionRepository +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer +import com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer.SubtypeLocator +import com.netflix.spinnaker.kork.sql.config.DefaultSqlConfiguration +import com.netflix.spinnaker.kork.sql.config.SqlProperties +import com.netflix.spinnaker.kork.telemetry.InstrumentedProxy +import com.netflix.spinnaker.kork.version.ServiceVersion +import org.jooq.DSLContext +import org.springframework.beans.factory.annotation.Value +import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty +import org.springframework.boot.context.properties.EnableConfigurationProperties +import org.springframework.context.ApplicationEventPublisher +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration +import org.springframework.context.annotation.Import +import java.time.Clock + +@Configuration +@ConditionalOnProperty("sql.enabled") +@Import(DefaultSqlConfiguration::class) +@EnableConfigurationProperties(SqlTaskCleanupAgentProperties::class, SqlEventCleanupAgentConfigProperties::class) +class SqlConfiguration { + + @Bean + @ConditionalOnProperty("sql.task-repository.enabled") + fun sqlTaskRepository( + jooq: DSLContext, + clock: Clock, + objectMapper: ObjectMapper + ): TaskRepository = + SqlTaskRepository(jooq, objectMapper, clock, ConnectionPools.TASKS.value) + + @Bean + @ConditionalOnProperty("sql.task-repository.enabled", "sql.task-repository.secondary.enabled") + fun secondarySqlTaskRepository( + jooq: DSLContext, + clock: Clock, + objectMapper: ObjectMapper, + @Value("\${sql.task-repository.secondary.pool-name}") poolName: String + + ): TaskRepository = + SqlTaskRepository(jooq, objectMapper, clock, poolName) + + @Bean + @ConditionalOnProperty("sql.task-repository.enabled") + @ConditionalOnExpression("\${sql.read-only:false} == false") + fun sqlTaskCleanupAgent( + jooq: DSLContext, + clock: Clock, + registry: Registry, + properties: SqlTaskCleanupAgentProperties + ): SqlTaskCleanupAgent = + SqlTaskCleanupAgent(jooq, clock, registry, properties) + + /** + * TODO(rz): When enabled, clouddriver gets wired up with two SqlProviders (one here, another in cats-sql). + * This should get cleaned up such that only one sqlProvider is ever created (register agents via an interface, say + * `SqlAgent`?) + */ + @Bean + @ConditionalOnProperty("sql.task-repository.enabled") + @ConditionalOnExpression("\${sql.read-only:false} == false") + fun sqlProvider(sqlTaskCleanupAgent: SqlTaskCleanupAgent): SqlProvider = + SqlProvider(mutableListOf(sqlTaskCleanupAgent)) + + @Bean + fun sqlEventRepository( + jooq: DSLContext, + sqlProperties: SqlProperties, + serviceVersion: ServiceVersion, + objectMapper: ObjectMapper, + applicationEventPublisher: ApplicationEventPublisher, + registry: Registry, + subtypeLocators: List + ): EventRepository { + // TODO(rz): ObjectMapperSubtypeConfigurer should become a standard kork feature. This is pretty gross. + ObjectMapperSubtypeConfigurer(true).registerSubtypes(objectMapper, subtypeLocators) + return SqlEventRepository( + jooq, + serviceVersion, + objectMapper, + applicationEventPublisher, + registry + ).let { + InstrumentedProxy.proxy(registry, it, "eventRepository", mapOf("backend" to "sql")) + } + } + + @Bean + @ConditionalOnExpression("\${sql.read-only:false} == false") + fun sqlEventCleanupAgent( + jooq: DSLContext, + registry: Registry, + properties: SqlEventCleanupAgentConfigProperties, + dynamicConfigService: DynamicConfigService + ): SqlEventCleanupAgent { + return SqlEventCleanupAgent(jooq, registry, properties, dynamicConfigService) + } + + @Bean + @ConditionalOnProperty("account.storage.enabled") + fun sqlAccountDefinitionRepository( + jooq: DSLContext, + clock: Clock, + mapper: AccountDefinitionMapper + ): AccountDefinitionRepository = SqlAccountDefinitionRepository(jooq, mapper, clock, ConnectionPools.ACCOUNTS.value) + +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlEventCleanupAgentConfigProperties.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlEventCleanupAgentConfigProperties.kt new file mode 100644 index 00000000000..52e7b8ad02a --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlEventCleanupAgentConfigProperties.kt @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.config + +import java.time.Duration +import javax.validation.constraints.Positive +import org.springframework.boot.context.properties.ConfigurationProperties +import org.springframework.validation.annotation.Validated + +@Validated +@ConfigurationProperties("spinnaker.clouddriver.eventing.cleanup-agent") +class SqlEventCleanupAgentConfigProperties { + /** + * The frequency, in milliseconds, of how often the cleanup agent will run. Defaults to 1 minute. + */ + var frequency: Duration = Duration.ofMinutes(1) + + /** + * The ceiling execution time that the agent should be allowed to run before it will be timed out and available for + * reschedule onto a different Clouddriver instance. Defaults to 45 seconds. + */ + var timeout: Duration = Duration.ofSeconds(45) + + /** + * The max age of an [Aggregate]. Defaults to 7 days. + */ + @Positive + var maxAggregateAgeDays: Long = 7 + + /** + * The max number of events to cleanup in each agent invocation. Defaults to 1000. + */ + @Positive + var cleanupLimit: Int = EVENT_CLEANUP_LIMIT + + companion object { + const val EVENT_CLEANUP_LIMIT = 1_000 + } +} diff --git a/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlTaskCleanupAgentProperties.kt b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlTaskCleanupAgentProperties.kt new file mode 100644 index 00000000000..60893f9012b --- /dev/null +++ b/clouddriver-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlTaskCleanupAgentProperties.kt @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.config + +import java.util.concurrent.TimeUnit +import org.springframework.boot.context.properties.ConfigurationProperties + +@ConfigurationProperties("sql.agent.task-cleanup") +class SqlTaskCleanupAgentProperties { + var completedTtlMs: Long = TimeUnit.DAYS.toMillis(4) + var batchSize: Int = 100 +} diff --git a/clouddriver-sql/src/main/resources/db/changelog-master.yml b/clouddriver-sql/src/main/resources/db/changelog-master.yml new file mode 100644 index 00000000000..00980e16a98 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog-master.yml @@ -0,0 +1,31 @@ +databaseChangeLog: +- include: + file: changelog/20180919-initial-schema.yml + relativeToChangelogFile: true +- include: + file: changelog/20181120-cats.yml + relativeToChangelogFile: true +- include: + file: changelog/20181205-agent-scheduler.yml + relativeToChangelogFile: true +- include: + file: changelog/20190822-initial-event-schema.yml + relativeToChangelogFile: true +- include: + file: changelog/20190913-task-sagaids.yml + relativeToChangelogFile: true +- include: + file: changelog/20201110-field-type-postgres.yml + relativeToChangelogFile: true +- include: + file: changelog/20210106-task-outputs.yml + relativeToChangelogFile: true +- include: + file: changelog/20210311-caching-replicas.yml + relativeToChangelogFile: true +- include: + file: changelog/20210927-accounts.yml + relativeToChangelogFile: true +- include: + file: changelog/20240111-accounts-indexes.yml + relativeToChangelogFile: true diff --git a/clouddriver-sql/src/main/resources/db/changelog/20180919-initial-schema.yml b/clouddriver-sql/src/main/resources/db/changelog/20180919-initial-schema.yml new file mode 100644 index 00000000000..2423d12ca80 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20180919-initial-schema.yml @@ -0,0 +1,181 @@ +databaseChangeLog: +- changeSet: + id: create-tasks-table + author: robzienert + changes: + - createTable: + tableName: tasks + columns: + - column: + name: id + type: char(36) + constraints: + primaryKey: true + nullable: false + - column: + name: request_id + type: varchar(255) + constraints: + nullable: false + - column: + name: owner_id + type: varchar(255) + constraints: + nullable: false + - column: + name: created_at + type: bigint + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: tasks + +- changeSet: + id: create-task-states-table + author: robzienert + changes: + - createTable: + tableName: task_states + columns: + - column: + name: id + type: char(36) + constraints: + primaryKey: true + nullable: false + - column: + name: task_id + type: char(36) + constraints: + nullable: false + - column: + name: created_at + type: bigint + constraints: + nullable: false + - column: + name: state + type: varchar(10) + constraints: + nullable: false + - column: + name: phase + type: varchar(255) + constraints: + nullable: false + - column: + name: status + type: text + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: task_states + +- changeSet: + validCheckSum: 8:f0bfebd55de9168e38a8ef9c7217c610 + id: mysql-change-state-stauts-to-enum-type + author: robzienert + changes: + - sql: + dbms: mysql + sql: ALTER TABLE `task_states` MODIFY COLUMN `state` ENUM("STARTED", "COMPLETED", "FAILED") NOT NULL DEFAULT "STARTED" + +- changeSet: + id: create-task-results-table + author: robzienert + changes: + - createTable: + tableName: task_results + columns: + - column: + name: id + type: char(36) + constraints: + primaryKey: true + nullable: false + - column: + name: task_id + type: char(36) + constraints: + nullable: false + - column: + name: body + type: longtext + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: task_results + +- changeSet: + id: create-indices + author: robzienert + changes: + - createIndex: + indexName: task_request_id_idx + tableName: tasks + columns: + - column: + name: request_id + - createIndex: + indexName: task_owner_id_idx + tableName: tasks + columns: + - column: + name: owner_id + - createIndex: + indexName: task_created_at_idx + tableName: tasks + columns: + - column: + name: created_at + - createIndex: + indexName: task_states_taskid_createdat_idx + tableName: task_states + columns: + - column: + name: task_id + - column: + name: created_at + - createIndex: + indexName: result_objects_task_idx + tableName: task_results + columns: + - column: + name: task_id + rollback: + - dropIndex: + indexName: task_request_id_idx + tableName: tasks + - dropIndex: + indexName: task_owner_id_idx + tableName: tasks + - dropIndex: + indexName: task_created_at_idx + tableName: tasks + - dropIndex: + indexName: result_objects_task_idx + tableName: task_results + +- changeSet: + validCheckSum: 8:d6f5eedc195011826620cc0355e8352d + id: mysql-revert-change-state-stauts-to-enum-type + author: afeldman + changes: + - sql: + dbms: mysql + sql: ALTER TABLE task_states CHANGE COLUMN `state` `state` char(9) NOT NULL DEFAULT "STARTED" diff --git a/clouddriver-sql/src/main/resources/db/changelog/20181120-cats.yml b/clouddriver-sql/src/main/resources/db/changelog/20181120-cats.yml new file mode 100644 index 00000000000..07e9e4ac524 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20181120-cats.yml @@ -0,0 +1,235 @@ +databaseChangeLog: +- removeChangeSetProperty: + change: addColumn + dbms: postgresql + remove: afterColumn +- changeSet: + id: create-cats-resource-table-v1 + author: afeldman + changes: + - createTable: + tableName: cats_v1_resource_template + columns: + - column: + name: id + type: varchar(255) + constraints: + nullable: false + - column: + name: agent + type: varchar(127) + constraints: + nullable: false + - column: + name: body_hash + type: char(64) + constraints: + nullable: false + - column: + name: body + type: longtext + constraints: + nullable: false + - column: + name: last_updated + type: bigint + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: cats_v1_resource_template + +- changeSet: + id: create-cats-resource-table-v1-indices + author: afeldman + changes: + - addPrimaryKey: + tableName: cats_v1_resource_template + constraintName: cats_pk + columnNames: id, agent + - createIndex: + indexName: agent_body_hash_idx + tableName: cats_v1_resource_template + columns: + - column: + name: agent + - column: + name: body_hash + - createIndex: + indexName: resource_last_updated_idx + tableName: cats_v1_resource_template + columns: + - column: + name: last_updated + rollback: + - dropIndex: + indexName: agent_body_hash_idx + tableName: cats_v1_resource_template + - dropIndex: + indexName: resource_last_updated_idx + tableName: cats_v1_resource_template + +- changeSet: + id: create-cats-rel-table-v1 + author: afeldman + changes: + - createTable: + tableName: cats_v1_rel_template + columns: + - column: + name: uuid + type: char(26) + constraints: + primaryKey: true + nullable: false + - column: + name: id + type: varchar(255) + constraints: + nullable: false + - column: + name: rel_id + type: varchar(255) + constraints: + nullable: false + - column: + name: rel_agent + type: varchar(127) + constraints: + nullable: false + - column: + name: rel_type + type: varchar(64) + - column: + name: last_updated + type: bigint + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: cats_v1_rel_template + +- changeSet: + id: create-cats-rel-table-v1-indices + author: afeldman + changes: + - createIndex: + indexName: id_idx + tableName: cats_v1_rel_template + columns: + - column: + name: id + - createIndex: + indexName: rel_agent_idx + tableName: cats_v1_rel_template + columns: + - column: + name: rel_agent + - createIndex: + indexName: rel_type_idx + tableName: cats_v1_rel_template + columns: + - column: + name: rel_type + - createIndex: + indexName: rel_last_updated_idx + tableName: cats_v1_rel_template + columns: + - column: + name: last_updated + rollback: + - dropIndex: + indexName: id_idx + tableName: cats_v1_rel_template + - dropIndex: + indexName: rel_last_updated_idx + tableName: cats_v1_rel_template + - dropIndex: + indexName: rel_agent_idx + tableName: cats_v1_rel_template + - dropIndex: + indexName: rel_type_idx + tableName: cats_v1_rel_template + +- changeSet: + id: update-cats-rel-table-v1-indices-1 + author: afeldman + changes: + - createIndex: + indexName: rel_ids_type_idx + tableName: cats_v1_rel_template + columns: + - column: + name: id + - column: + name: rel_type + - column: + name: rel_id + - dropIndex: + indexName: rel_last_updated_idx + tableName: cats_v1_rel_template + rollback: + - dropIndex: + indexName: rel_ids_type_idx + tableName: cats_v1_rel_template + - createIndex: + indexName: rel_last_updated_idx + tableName: cats_v1_rel_template + columns: + - column: + name: last_updated + +- changeSet: + id: longer-id-columns + author: afeldman + changes: + - modifyDataType: + tableName: cats_v1_resource_template + columnName: id + newDataType: varchar(352) + - modifyDataType: + tableName: cats_v1_rel_template + columnName: id + newDataType: varchar(352) + - modifyDataType: + tableName: cats_v1_rel_template + columnName: rel_id + newDataType: varchar(352) + rollback: + - modifyDataType: + tableName: cats_v1_resource_template + columnName: id + newDataType: varchar(255) + - modifyDataType: + tableName: cats_v1_rel_template + columnName: id + newDataType: varchar(255) + - modifyDataType: + tableName: cats_v1_rel_template + columnName: rel_id + newDataType: varchar(255) + +- changeSet: + validCheckSum: 8:03b00d0af09f2e7081d187f246ea4d26 + id: application-index + author: afeldman + changes: + - addColumn: + tableName: cats_v1_resource_template + columns: + - column: + name: application + type: varchar(255) + afterColumn: agent + - createIndex: + indexName: application_idx + tableName: cats_v1_resource_template + columns: + - column: + name: application diff --git a/clouddriver-sql/src/main/resources/db/changelog/20181205-agent-scheduler.yml b/clouddriver-sql/src/main/resources/db/changelog/20181205-agent-scheduler.yml new file mode 100644 index 00000000000..2f31b2a1bc9 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20181205-agent-scheduler.yml @@ -0,0 +1,58 @@ +databaseChangeLog: +- changeSet: + id: create-agent-locks-table + author: robzienert + changes: + - createTable: + tableName: cats_agent_locks + columns: + - column: + name: agent_name + type: varchar(500) + constraints: + nullable: false + primaryKey: true + - column: + name: owner_id + type: varchar(100) + constraints: + nullable: false + - column: + name: lock_acquired + type: bigint + constraints: + nullable: false + - column: + name: lock_expiry + type: bigint + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: cats_agent_locks + +- changeSet: + id: add-lock-unique-constraint + author: robzienert + changes: + - addUniqueConstraint: + tableName: cats_agent_locks + columnNames: agent_name, lock_expiry + constraintName: cats_agent_lock_expiry + rollback: + - dropUniqueConstraint: + tableName: cats_agent_locks + constraintName: cats_agent_lock_expiry + +- changeSet: + id: cats-agent-locks-owner-len + author: afeldman + changes: + - modifyDataType: + tableName: cats_agent_locks + columnName: owner_id + newDataType: varchar(255) diff --git a/clouddriver-sql/src/main/resources/db/changelog/20190822-initial-event-schema.yml b/clouddriver-sql/src/main/resources/db/changelog/20190822-initial-event-schema.yml new file mode 100644 index 00000000000..49aa93bf4e4 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20190822-initial-event-schema.yml @@ -0,0 +1,161 @@ +databaseChangeLog: + - changeSet: + id: create-event-aggregates-table + author: robzienert + validCheckSum: 8:99ad0fa01e0a2f5a2d66e015a58dccfd + changes: + - createTable: + tableName: event_aggregates + columns: + - column: + name: aggregate_type + type: varchar(255) + constraints: + nullable: false + primaryKey: true + - column: + name: aggregate_id + type: char(64) + constraints: + nullable: false + primaryKey: true + - column: + name: token + type: char(26) + constraints: + nullable: false + unique: true + - column: + name: version + type: bigint + constraints: + nullable: false + - column: + name: last_change_timestamp + type: timestamp + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: event_aggregates + + - changeSet: + id: create-event-aggregates-table-indices + author: robzienert + changes: + - createIndex: + indexName: aggregate_type_token_idx + tableName: event_aggregates + columns: + - column: + name: aggregate_type + - column: + name: token + - createIndex: + indexName: aggregate_token_idx + tableName: event_aggregates + columns: + - column: + name: token + - createIndex: + indexName: aggregate_last_change_timestamp_idx + tableName: event_aggregates + columns: + - column: + name: last_change_timestamp + rollback: + - dropIndex: + indexName: aggregate_type_token_idx + tableName: event_aggregates + - dropIndex: + indexName: aggregate_token_idx + tableName: event_aggregates + + - changeSet: + id: create-events-table + author: robzienert + validCheckSum: 8:f41e5362ca2d12053be0053d7019dd9c + changes: + - createTable: + tableName: events + columns: + - column: + name: id + type: char(36) + constraints: + primaryKey: true + nullable: false + - column: + name: aggregate_type + type: varchar(255) + constraints: + nullable: false + - column: + name: aggregate_id + type: char(64) + constraints: + nullable: false + - column: + name: sequence + type: bigint + constraints: + nullable: false + - column: + name: originating_version + type: bigint + constraints: + nullable: false + - column: + name: timestamp + type: timestamp(6) + constraints: + nullable: false + - column: + name: metadata + type: text + constraints: + nullable: false + - column: + name: data + type: longtext + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: events + + - changeSet: + id: create-events-table-indices + author: robzienert + changes: + - createIndex: + indexName: event_aggregate_type_id_sequence_idx + tableName: events + columns: + - column: + name: aggregate_type + - column: + name: aggregate_id + - column: + name: sequence + - addForeignKeyConstraint: + baseColumnNames: aggregate_type,aggregate_id + baseTableName: events + constraintName: event_aggregate_fk + onDelete: CASCADE + onUpdate: RESTRICT + referencedColumnNames: aggregate_type,aggregate_id + referencedTableName: event_aggregates + rollback: + - dropIndex: + indexName: event_aggregate_type_id_sequence_idx + tableName: events + - dropForeignKeyConstraint: + constraintName: event_aggregate_fk + baseTableName: events diff --git a/clouddriver-sql/src/main/resources/db/changelog/20190913-task-sagaids.yml b/clouddriver-sql/src/main/resources/db/changelog/20190913-task-sagaids.yml new file mode 100644 index 00000000000..7b15d45e8a8 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20190913-task-sagaids.yml @@ -0,0 +1,29 @@ +databaseChangeLog: + - removeChangeSetProperty: + change: addColumn + dbms: postgresql + remove: afterColumn + - changeSet: + validCheckSum: 8:91cfabe4a8fa0517124436ef9675708e + id: add-task-sagaids-column + author: robzienert + changes: + - addColumn: + tableName: tasks + columns: + - name: saga_ids + type: text + afterColumn: created_at + rollback: + - dropColumn: + tableName: tasks + columnName: saga_ids + + - changeSet: + validCheckSum: 8:9601af668599fbc12e338b9b84c66f56 + id: mysql-update-state-enum-values + author: robzienert + changes: + - sql: + dbms: mysql + sql: ALTER TABLE `task_states` MODIFY COLUMN `state` ENUM("STARTED", "COMPLETED", "FAILED", "FAILED_RETRYABLE") NOT NULL DEFAULT "STARTED" diff --git a/clouddriver-sql/src/main/resources/db/changelog/20201110-field-type-postgres.yml b/clouddriver-sql/src/main/resources/db/changelog/20201110-field-type-postgres.yml new file mode 100644 index 00000000000..2682f56b850 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20201110-field-type-postgres.yml @@ -0,0 +1,101 @@ +databaseChangeLog: + - changeSet: + preConditions: + onFail: MARK_RAN + dbms: + type: postgresql + id: change-field-types-postgresql + author: ncknt + changes: + - modifyDataType: + columnName: id + newDataType: varchar(36) + tableName: tasks + - modifyDataType: + columnName: id + newDataType: varchar(36) + tableName: task_states + - modifyDataType: + columnName: task_id + newDataType: varchar(36) + tableName: task_states + - modifyDataType: + columnName: id + newDataType: varchar(36) + tableName: task_results + - modifyDataType: + columnName: task_id + newDataType: varchar(36) + tableName: task_results + - modifyDataType: + columnName: id + newDataType: text + tableName: cats_v1_resource_template + - modifyDataType: + columnName: agent + newDataType: text + tableName: cats_v1_resource_template + - modifyDataType: + columnName: application + newDataType: text + tableName: cats_v1_resource_template + - modifyDataType: + columnName: body_hash + newDataType: varchar(64) + tableName: cats_v1_resource_template + - modifyDataType: + columnName: body + newDataType: text + tableName: cats_v1_resource_template + - modifyDataType: + columnName: uuid + newDataType: varchar(26) + tableName: cats_v1_rel_template + - modifyDataType: + columnName: id + newDataType: text + tableName: cats_v1_rel_template + - modifyDataType: + columnName: rel_id + newDataType: text + tableName: cats_v1_rel_template + - modifyDataType: + columnName: rel_agent + newDataType: text + tableName: cats_v1_rel_template + - modifyDataType: + columnName: rel_type + newDataType: text + tableName: cats_v1_rel_template + - modifyDataType: + columnName: agent_name + newDataType: text + tableName: cats_agent_locks + - modifyDataType: + columnName: owner_id + newDataType: text + tableName: cats_agent_locks + - modifyDataType: + columnName: aggregate_type + newDataType: text + tableName: event_aggregates + - modifyDataType: + columnName: aggregate_id + newDataType: text + tableName: event_aggregates + - modifyDataType: + columnName: token + newDataType: text + tableName: event_aggregates + - modifyDataType: + columnName: id + newDataType: text + tableName: events + - modifyDataType: + columnName: aggregate_type + newDataType: text + tableName: events + - modifyDataType: + columnName: aggregate_id + newDataType: text + tableName: events diff --git a/clouddriver-sql/src/main/resources/db/changelog/20210106-task-outputs.yml b/clouddriver-sql/src/main/resources/db/changelog/20210106-task-outputs.yml new file mode 100644 index 00000000000..6058813c9f4 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20210106-task-outputs.yml @@ -0,0 +1,43 @@ +databaseChangeLog: + - changeSet: + id: create-task-outputs-table + author: apoorvmahajan + changes: + - createTable: + tableName: task_outputs + columns: + - column: + name: id + type: char(36) + constraints: + primaryKey: true + nullable: false + - column: + name: task_id + type: char(36) + constraints: + nullable: false + - column: + name: created_at + type: bigint + constraints: + nullable: false + - column: + name: manifest + type: text + - column: + name: phase + type: varchar(255) + - column: + name: std_out + type: longtext + - column: + name: std_error + type: longtext + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: task_outputs diff --git a/clouddriver-sql/src/main/resources/db/changelog/20210311-caching-replicas.yml b/clouddriver-sql/src/main/resources/db/changelog/20210311-caching-replicas.yml new file mode 100644 index 00000000000..1bd31b52fb0 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20210311-caching-replicas.yml @@ -0,0 +1,39 @@ +databaseChangeLog: + - changeSet: + id: create-caching-replicas-table + author: kirangodishala + changes: + - createTable: + tableName: caching_replicas + columns: + - column: + name: pod_id + type: varchar(100) + constraints: + nullable: false + primaryKey: true + - column: + name: last_heartbeat_time + type: bigint + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb" + rollback: + - dropTable: + tableName: caching_replicas + + - changeSet: + id: add-replica-unique-constraint + author: kirangodishala + changes: + - addUniqueConstraint: + tableName: caching_replicas + columnNames: pod_id, last_heartbeat_time + constraintName: pod_id_last_heartbeat_time + rollback: + - dropUniqueConstraint: + tableName: caching_replicas + constraintName: pod_id_last_heartbeat_time diff --git a/clouddriver-sql/src/main/resources/db/changelog/20210927-accounts.yml b/clouddriver-sql/src/main/resources/db/changelog/20210927-accounts.yml new file mode 100644 index 00000000000..ec1b95cdcb0 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20210927-accounts.yml @@ -0,0 +1,132 @@ +databaseChangeLog: +- changeSet: + id: create-accounts-table + author: msicker + changes: + - createTable: + tableName: accounts + columns: + - column: + name: id + type: varchar(255) + constraints: + primaryKey: true + nullable: false + - column: + name: type + type: varchar(50) + constraints: + nullable: false + - column: + name: body + type: json + constraints: + nullable: false + - column: + name: created_at + type: bigint + constraints: + nullable: false + - column: + name: last_modified_at + type: bigint + constraints: + nullable: false + - column: + name: last_modified_by + type: varchar(255) + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_unicode_ci" + - modifySql: + dbms: postgresql + replace: + replace: json + with: jsonb + rollback: + - dropTable: + tableName: accounts + +- changeSet: + id: create-accounts-table-index + author: jcavanagh + changes: + - createIndex: + indexName: accounts_type_index + tableName: accounts + columns: + - column: + name: id + - column: + name: type + - createIndex: + indexName: accounts_timestamp_index + tableName: accounts + columns: + - column: + name: id + - column: + name: type + - column: + name: created_at + - column: + name: last_modified_at + rollback: + - dropTable: + tableName: accounts +- changeSet: + id: create-accounts-history-table + author: msicker + changes: + - createTable: + tableName: accounts_history + columns: + - column: + name: id + type: varchar(255) + constraints: + primaryKey: true + nullable: false + - column: + name: type + type: varchar(50) + constraints: + nullable: true + - column: + name: body + type: json + constraints: + nullable: true + - column: + name: last_modified_at + type: bigint + constraints: + nullable: false + - column: + name: version + type: int + constraints: + primaryKey: true + nullable: false + descending: true + - column: + name: is_deleted + type: boolean + defaultValueBoolean: false + constraints: + nullable: false + - modifySql: + dbms: mysql + append: + value: " engine innodb DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_unicode_ci" + - modifySql: + dbms: postgresql + replace: + replace: json + with: jsonb + rollback: + - dropTable: + tableName: accounts_history diff --git a/clouddriver-sql/src/main/resources/db/changelog/20240111-accounts-indexes.yml b/clouddriver-sql/src/main/resources/db/changelog/20240111-accounts-indexes.yml new file mode 100644 index 00000000000..021b03b5ee2 --- /dev/null +++ b/clouddriver-sql/src/main/resources/db/changelog/20240111-accounts-indexes.yml @@ -0,0 +1,47 @@ +databaseChangeLog: + - changeSet: + id: delete-unused-indexes + author: dzheng + changes: + - dropIndex: + indexName: accounts_type_index + tableName: accounts + - dropIndex: + indexName: accounts_timestamp_index + tableName: accounts + rollback: + - createIndex: + indexName: accounts_type_index + tableName: accounts + columns: + - column: + name: id + - column: + name: type + - createIndex: + indexName: accounts_timestamp_index + tableName: accounts + columns: + - column: + name: id + - column: + name: type + - column: + name: created_at + - column: + name: last_modified_at + + - changeSet: + id: create-type-index + author: dzheng + changes: + - createIndex: + indexName: accounts_type_index + tableName: accounts + columns: + - column: + name: type + rollback: + - dropIndex: + indexName: accounts_type_index + tableName: accounts diff --git a/clouddriver-sql/src/test/java/com/netflix/spinnaker/clouddriver/sql/SqlTaskRepositoryTest.java b/clouddriver-sql/src/test/java/com/netflix/spinnaker/clouddriver/sql/SqlTaskRepositoryTest.java new file mode 100644 index 00000000000..382561336cd --- /dev/null +++ b/clouddriver-sql/src/test/java/com/netflix/spinnaker/clouddriver/sql/SqlTaskRepositoryTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.core.test.TaskRepositoryTck; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.config.ConnectionPools; +import com.netflix.spinnaker.kork.sql.config.RetryProperties; +import com.netflix.spinnaker.kork.sql.config.SqlRetryProperties; +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil; +import java.time.Clock; +import org.junit.jupiter.api.AfterEach; + +public class SqlTaskRepositoryTest extends TaskRepositoryTck { + + private SqlTestUtil.TestDatabase database; + + @Override + protected TaskRepository createTaskRepository() { + database = SqlTestUtil.initTcMysqlDatabase(); + + RetryProperties retry = new RetryProperties(0, 0); + SqlRetryProperties properties = + new SqlRetryProperties(new RetryProperties(1, 10), new RetryProperties(1, 10)); + properties.setReads(retry); + properties.setTransactions(retry); + + return new SqlTaskRepository( + database.context, + new ObjectMapper(), + Clock.systemDefaultZone(), + ConnectionPools.TASKS.getValue()); + } + + @AfterEach + public void cleanup() { + if (database != null) { + SqlTestUtil.cleanupDb(database.context); + } + } +} diff --git a/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskCleanupAgentTest.kt b/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskCleanupAgentTest.kt new file mode 100644 index 00000000000..bac16c70fe9 --- /dev/null +++ b/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskCleanupAgentTest.kt @@ -0,0 +1,161 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.sql + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.sql.event.SqlEventCleanupAgent +import com.netflix.spinnaker.config.SqlEventCleanupAgentConfigProperties +import com.netflix.spinnaker.config.SqlTaskCleanupAgentProperties +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import io.mockk.every +import io.mockk.mockk +import strikt.api.expectThat +import strikt.assertions.isEqualTo +import java.sql.Timestamp +import java.time.Clock +import java.time.Instant +import java.time.temporal.ChronoUnit + + +class SqlTaskCleanupAgentTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + Fixture() + } + + before { + + listOf( + Instant.now().minus(3, ChronoUnit.DAYS), + Instant.now().minus(10, ChronoUnit.DAYS), + ).forEachIndexed { i, ts -> + database.context + .insertInto(tasksTable) + .values( + "myid$i", + "7b96fe8de1e5e8e8620036480771195b8e25c583c9f4f0098a23e97bf2ba013b", + "95637b33-6699-4abf-b1ab-d4077e1cf867@spin-clouddriver-7847bc646b-hgkfd", + ts.toEpochMilli(), + objectMapper.writeValueAsString(mutableListOf()) + ) + .execute() + + database.context + .insertInto(taskResultsTable) + .values( + "$i", + "myid$i", + "body" + ) + .execute() + + database.context + .insertInto(taskStatesTable) + .values( + "$i", + "myid$i", + ts.toEpochMilli(), + "COMPLETED", + "ORCHESTRATION", + "Orchestration completed" + ) + .execute() + + database.context + .insertInto(taskOutputsTable) + .values( + "$i", + "myid$i", + ts.toEpochMilli(), + "configMap render-helm-output-manifest-test-v000", + "DEPLOY_KUBERNETES_MANIFEST", + "stOut", + "stdError" + ) + .execute() + } + } + + after { + SqlTestUtil.cleanupDb(database.context) + } + + test("deletes old tasks and related data") { + expectThat(database.context.fetchCount(tasksTable)).isEqualTo(2) + expectThat(database.context.fetchCount(taskStatesTable)).isEqualTo(2) + expectThat(database.context.fetchCount(taskResultsTable)).isEqualTo(2) + expectThat(database.context.fetchCount(taskOutputsTable)).isEqualTo(2) + subject.run() + + expectThat(database.context.fetchCount(tasksTable)).isEqualTo(1) + expectThat(database.context.fetchCount(taskStatesTable)).isEqualTo(1) + expectThat(database.context.fetchCount(taskResultsTable)).isEqualTo(1) + expectThat(database.context.fetchCount(taskOutputsTable)).isEqualTo(1) + + val tasksResultset = database.context.select() + .from(tasksTable) + .fetch("id", String::class.java) + .toTypedArray() + + expectThat(tasksResultset.size).isEqualTo(1) + expectThat(tasksResultset.get(0)).isEqualTo("myid0") + + val taskResultsResultset = database.context.select() + .from(taskResultsTable) + .fetch("task_id", String::class.java) + .toTypedArray() + + expectThat(taskResultsResultset.size).isEqualTo(1) + expectThat(taskResultsResultset.get(0)).isEqualTo("myid0") + + val taskStatesResultset = database.context.select() + .from(taskStatesTable) + .fetch("task_id", String::class.java) + .toTypedArray() + + expectThat(taskStatesResultset.size).isEqualTo(1) + expectThat(taskStatesResultset.get(0)).isEqualTo("myid0") + + val taskOutputsResultset = database.context.select() + .from(taskOutputsTable) + .fetch("task_id", String::class.java) + .toTypedArray() + + expectThat(taskOutputsResultset.size).isEqualTo(1) + expectThat(taskOutputsResultset.get(0)).isEqualTo("myid0") + } + } + + private inner class Fixture { + val database = SqlTestUtil.initTcMysqlDatabase()!! + + val subject = SqlTaskCleanupAgent( + jooq = database.context, + clock = Clock.systemDefaultZone(), + registry = NoopRegistry(), + SqlTaskCleanupAgentProperties() + ) + + val objectMapper = ObjectMapper() + } +} diff --git a/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskOutputTest.kt b/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskOutputTest.kt new file mode 100644 index 00000000000..3852b57af13 --- /dev/null +++ b/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/SqlTaskOutputTest.kt @@ -0,0 +1,108 @@ +/* + * Copyright 2021 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.sql + +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule +import com.fasterxml.jackson.module.kotlin.KotlinModule +import com.netflix.spinnaker.config.ConnectionPools +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import java.time.Clock + +class SqlTaskOutputTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + Fixture() + } + + after { + SqlTestUtil.cleanupDb(database.context) + } + + context("task output") { + test("verify if the task outputs with null/empty values can be stored and retrieved successfully from the db") { + val t1 = subject.create("TEST", "Test Status") + + t1.updateOutput("some-manifest", "TEST", null, "") + assert(t1.outputs[0].manifest == "some-manifest") + assert(t1.outputs[0].phase == "TEST") + assert(t1.outputs[0].stdOut.isNullOrBlank()) + assert(t1.outputs[0].stdError.isNullOrBlank()) + } + + test("verify if the task outputs can be stored and retrieved successfully from the db") { + val t1 = subject.create("TEST", "Test Status") + + t1.updateOutput("some-manifest", "TEST", "output", "") + assert(t1.outputs[0].manifest == "some-manifest") + assert(t1.outputs[0].phase == "TEST") + assert(t1.outputs[0].stdOut == "output") + assert(t1.outputs[0].stdError.isNullOrBlank()) + } + + test("task has outputs from multiple manifests") { + val t1 = subject.create("TEST", "Test Status") + + t1.updateOutput("some-manifest", "TEST", "output", "") + t1.updateOutput("some-manifest-2", "Deploy", "other output", "") + assert(t1.outputs.size == 2) + assert(t1.outputs[0].manifest == "some-manifest") + assert(t1.outputs[0].phase == "TEST") + assert(t1.outputs[0].stdOut == "output") + assert(t1.outputs[0].stdError == "") + assert(t1.outputs[1].manifest == "some-manifest-2") + assert(t1.outputs[1].phase == "Deploy") + assert(t1.outputs[1].stdOut == "other output") + assert(t1.outputs[1].stdError.isNullOrBlank()) + } + + test("multiple tasks with only one task having outputs from multiple manifests") { + val t1 = subject.create("TEST", "Test Status") + val t2 = subject.create("TEST", "Test Status") + + t1.updateOutput("some-manifest", "TEST", "output", "") + t1.updateOutput("some-manifest-2", "Deploy", "other output", "") + assert(t1.outputs.size == 2) + assert(t1.outputs[0].manifest == "some-manifest") + assert(t1.outputs[0].phase == "TEST") + assert(t1.outputs[0].stdOut == "output") + assert(t1.outputs[0].stdError.isNullOrBlank()) + assert(t1.outputs[1].manifest == "some-manifest-2") + assert(t1.outputs[1].phase == "Deploy") + assert(t1.outputs[1].stdOut == "other output") + assert(t1.outputs[1].stdError.isNullOrBlank()) + assert(t2.outputs.isEmpty()) + } + } + } + + private inner class Fixture { + val database = SqlTestUtil.initTcMysqlDatabase()!! + + val subject = SqlTaskRepository( + jooq = database.context, + mapper = ObjectMapper().apply { + registerModules(KotlinModule(), JavaTimeModule()) + }, + clock = Clock.systemDefaultZone(), + poolName = ConnectionPools.TASKS.value + ) + } +} diff --git a/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventCleanupAgentTest.kt b/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventCleanupAgentTest.kt new file mode 100644 index 00000000000..ad85a6827a9 --- /dev/null +++ b/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventCleanupAgentTest.kt @@ -0,0 +1,99 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.sql.event + +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.sql.event.SqlEventCleanupAgent.Companion.EVENT_CLEANUP_INTERVAL_KEY +import com.netflix.spinnaker.clouddriver.sql.event.SqlEventCleanupAgent.Companion.EVENT_CLEANUP_LIMIT_KEY +import com.netflix.spinnaker.clouddriver.sql.event.SqlEventCleanupAgent.Companion.EVENT_CLEANUP_TIMEOUT_KEY +import com.netflix.spinnaker.config.SqlEventCleanupAgentConfigProperties +import com.netflix.spinnaker.config.SqlEventCleanupAgentConfigProperties.Companion.EVENT_CLEANUP_LIMIT +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import de.huxhorn.sulky.ulid.ULID +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import io.mockk.every +import io.mockk.mockk +import org.jooq.impl.DSL +import org.junit.jupiter.api.Assumptions.assumeTrue +import org.testcontainers.DockerClientFactory +import strikt.api.expectThat +import strikt.assertions.isEqualTo +import java.sql.Timestamp +import java.time.Instant +import java.time.temporal.ChronoUnit + +class SqlEventCleanupAgentTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + Fixture() + } + + beforeAll { + assumeTrue(DockerClientFactory.instance().isDockerAvailable) + } + + before { + listOf( + Instant.now().minus(3, ChronoUnit.DAYS), + Instant.now().minus(10, ChronoUnit.DAYS) + ).forEachIndexed { i, ts -> + database.context + .insertInto(DSL.table("event_aggregates")) + .values( + "mytype", + "myid$i", + ULID().nextULID(), + 1, + Timestamp.from(ts) + ) + .execute() + } + } + + after { + SqlTestUtil.cleanupDb(database.context) + } + + test("deletes old aggregates") { + subject.run() + + val count = database.context.fetchCount(DSL.table("event_aggregates")) + expectThat(count).isEqualTo(1) + } + } + + private inner class Fixture { + val database = SqlTestUtil.initTcMysqlDatabase()!! + val dynamicConfigService: DynamicConfigService = mockk(relaxed = true) + + val subject = SqlEventCleanupAgent( + database.context, + NoopRegistry(), + SqlEventCleanupAgentConfigProperties(), + dynamicConfigService + ) + + init { + every { dynamicConfigService.getConfig(eq(Int::class.java), eq(EVENT_CLEANUP_LIMIT_KEY), eq(EVENT_CLEANUP_LIMIT)) } returns EVENT_CLEANUP_LIMIT + every { dynamicConfigService.getConfig(eq(String::class.java), eq(EVENT_CLEANUP_INTERVAL_KEY), eq("PT1M")) } returns "PT1M" + every { dynamicConfigService.getConfig(eq(String::class.java), eq(EVENT_CLEANUP_TIMEOUT_KEY), eq("PT45S")) } returns "PT45S" + } + } +} diff --git a/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventRepositoryTest.kt b/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventRepositoryTest.kt new file mode 100644 index 00000000000..72cc1f17e8a --- /dev/null +++ b/clouddriver-sql/src/test/kotlin/com/netflix/spinnaker/clouddriver/sql/event/SqlEventRepositoryTest.kt @@ -0,0 +1,229 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.sql.event + +import com.fasterxml.jackson.annotation.JsonTypeName +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule +import com.fasterxml.jackson.module.kotlin.KotlinModule +import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.clouddriver.event.AbstractSpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.exceptions.AggregateChangeRejectedException +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository.ListAggregatesCriteria +import com.netflix.spinnaker.clouddriver.event.persistence.EventRepository.ListAggregatesResult +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil +import com.netflix.spinnaker.kork.version.ServiceVersion +import dev.minutest.junit.JUnit5Minutests +import dev.minutest.rootContext +import io.mockk.every +import io.mockk.mockk +import org.junit.jupiter.api.Assumptions.assumeTrue +import org.springframework.context.ApplicationEventPublisher +import org.testcontainers.DockerClientFactory +import strikt.api.expect +import strikt.api.expectThat +import strikt.api.expectThrows +import strikt.assertions.containsExactly +import strikt.assertions.hasSize +import strikt.assertions.isA +import strikt.assertions.isEqualTo +import strikt.assertions.isNotEmpty +import strikt.assertions.isNotNull +import strikt.assertions.isNull + +class SqlEventRepositoryTest : JUnit5Minutests { + + fun tests() = rootContext { + fixture { + Fixture() + } + + beforeAll { + assumeTrue(DockerClientFactory.instance().isDockerAvailable) + } + + after { + SqlTestUtil.cleanupDb(database.context) + } + + context("event lifecycle") { + test("events can be saved") { + subject.save("agg", "1", 0, listOf(MyEvent("one"))) + + expectThat(subject.listAggregates(ListAggregatesCriteria())) + .isA() + .get { aggregates }.isNotEmpty() + .get { first() } + .and { + get { type }.isEqualTo("agg") + get { id }.isEqualTo("1") + get { version }.isEqualTo(1) + } + + subject.save("agg", "1", 1, listOf(MyEvent("two"), MyEvent("three"))) + + expectThat(subject.list("agg", "1")) + .isA>() + .isNotEmpty() + .hasSize(3) + .and { + get { map { it.value } } + .isA>() + .containsExactly("one", "two", "three") + } + } + + test("events saved against old version are rejected") { + expectThrows { + subject.save("agg", "1", 10, listOf(MyEvent("two"))) + } + + subject.save("agg", "1", 0, listOf(MyEvent("one"))) + + expectThrows { + subject.save("agg", "1", 0, listOf(MyEvent("two"))) + } + } + + test("events correctly increment sequence across transactions") { + subject.save("agg", "1", 0, listOf(MyEvent("1"), MyEvent("2"))) + subject.save("agg", "1", 1, listOf(MyEvent("3"), MyEvent("4"))) + + expectThat(subject.list("agg", "1")) + .get { map { it.getMetadata().sequence } } + .isA>() + .containsExactly(1, 2, 3, 4) + } + + context("listing aggregates") { + fun Fixture.setupAggregates() { + subject.save("foo", "1", 0, listOf(MyEvent("hi foo"))) + subject.save("bar", "1", 0, listOf(MyEvent("hi bar 1"))) + subject.save("bar", "2", 0, listOf(MyEvent("hi bar 2"))) + subject.save("bar", "3", 0, listOf(MyEvent("hi bar 3"))) + subject.save("bar", "4", 0, listOf(MyEvent("hi bar 4"))) + subject.save("bar", "5", 0, listOf(MyEvent("hi bar 5"))) + } + + test("default criteria") { + setupAggregates() + + expectThat(subject.listAggregates(ListAggregatesCriteria())) + .isA() + .and { + get { aggregates }.hasSize(6) + get { nextPageToken }.isNull() + } + } + + test("filtering by type") { + setupAggregates() + + expectThat(subject.listAggregates(ListAggregatesCriteria(aggregateType = "foo"))) + .isA() + .and { + get { aggregates }.hasSize(1) + .get { first() } + .and { + get { type }.isEqualTo("foo") + get { id }.isEqualTo("1") + } + get { nextPageToken }.isNull() + } + } + + test("pagination") { + setupAggregates() + + expect { + var response = subject.listAggregates(ListAggregatesCriteria(perPage = 2)) + that(response) + .describedAs("first page") + .isA() + .and { + get { aggregates }.hasSize(2) + .and { + get { first().type }.isEqualTo("foo") + get { last() } + .and { + get { type }.isEqualTo("bar") + get { id }.isEqualTo("1") + } + } + get { nextPageToken }.isNotNull() + } + + response = subject.listAggregates(ListAggregatesCriteria(perPage = 2, token = response.nextPageToken)) + that(response) + .describedAs("second page") + .isA() + .and { + get { aggregates }.hasSize(2) + .and { + get { first().type }.isEqualTo("bar") + get { first().id }.isEqualTo("2") + get { last().type }.isEqualTo("bar") + get { last().id }.isEqualTo("3") + } + get { nextPageToken }.isNotNull() + } + + that(subject.listAggregates(ListAggregatesCriteria(perPage = 2, token = response.nextPageToken))) + .describedAs("last page") + .isA() + .and { + get { aggregates }.hasSize(2) + .and { + get { first().type }.isEqualTo("bar") + get { first().id }.isEqualTo("4") + get { last().type }.isEqualTo("bar") + get { last().id }.isEqualTo("5") + } + get { nextPageToken }.isNull() + } + } + } + } + } + } + + private inner class Fixture { + val database = SqlTestUtil.initTcMysqlDatabase()!! + + val serviceVersion: ServiceVersion = mockk(relaxed = true) + val applicationEventPublisher: ApplicationEventPublisher = mockk(relaxed = true) + + val subject = SqlEventRepository( + jooq = database.context, + serviceVersion = serviceVersion, + objectMapper = ObjectMapper().apply { + registerModules(KotlinModule(), JavaTimeModule()) + registerSubtypes(MyEvent::class.java) + }, + applicationEventPublisher = applicationEventPublisher, + registry = NoopRegistry() + ) + + init { + every { serviceVersion.resolve() } returns "v1.2.3" + } + } + + @JsonTypeName("myEvent") + private class MyEvent( + val value: String + ) : AbstractSpinnakerEvent() +} diff --git a/clouddriver-tencentcloud/clouddriver-tencentcloud.gradle b/clouddriver-tencentcloud/clouddriver-tencentcloud.gradle new file mode 100644 index 00000000000..001c5e509cc --- /dev/null +++ b/clouddriver-tencentcloud/clouddriver-tencentcloud.gradle @@ -0,0 +1,32 @@ +tasks.compileGroovy.enabled = false +sourceSets.main.java.srcDirs = ['src/main/java'] +dependencies { + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-eureka") + implementation project(":clouddriver-security") + + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-moniker" + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-security" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "com.google.guava:guava" + implementation "com.tencentcloudapi:tencentcloud-sdk-java:3.1.51" + + testImplementation "cglib:cglib-nodep" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter" + testImplementation "org.mockito:mockito-core" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/TencentCloudOperation.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/TencentCloudOperation.java new file mode 100644 index 00000000000..7e18687e3fd --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/TencentCloudOperation.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface TencentCloudOperation { + + String value(); +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/TencentCloudProvider.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/TencentCloudProvider.java new file mode 100644 index 00000000000..a49e21b5872 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/TencentCloudProvider.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud; + +import com.netflix.spinnaker.clouddriver.core.CloudProvider; +import java.lang.annotation.Annotation; +import org.springframework.stereotype.Component; + +@Component +public class TencentCloudProvider implements CloudProvider { + + public static final String ID = "tencentcloud"; + + final String id = ID; + final String displayName = "TencentCloud"; + final Class operationAnnotationType = TencentCloudOperation.class; + + @Override + public String getId() { + return id; + } + + @Override + public String getDisplayName() { + return displayName; + } + + @Override + public Class getOperationAnnotationType() { + return operationAnnotationType; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/cache/Keys.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/cache/Keys.java new file mode 100644 index 00000000000..dee2c674aa4 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/cache/Keys.java @@ -0,0 +1,324 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.cache; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.cache.KeyParser; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import java.util.HashMap; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +@Slf4j +@Component("TencentCloudKeys") +public class Keys implements KeyParser { + + private static final String SEPARATOR = ":"; + + public enum Namespace { + APPLICATIONS, + CLUSTERS, + HEALTH_CHECKS, + LAUNCH_CONFIGS, + IMAGES, + NAMED_IMAGES, + INSTANCES, + INSTANCE_TYPES, + KEY_PAIRS, + LOAD_BALANCERS, + NETWORKS, + SECURITY_GROUPS, + SERVER_GROUPS, + SUBNETS, + ON_DEMAND; + + public final String ns; + + Namespace() { + this.ns = name().toLowerCase(); + } + + public static Namespace fromString(String name) { + try { + return valueOf(name.toUpperCase()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("No matching namespace with name " + name + " exists"); + } + } + + public String toString() { + return ns; + } + } + + @Override + public String getCloudProvider() { + return TencentCloudProvider.ID; + } + + @Override + public Boolean canParseType(final String type) { + try { + Namespace.fromString(type); + return true; + } catch (Exception e) { + return false; + } + } + + @Override + public Boolean canParseField(String field) { + return false; + } + + @Override + public Map parseKey(String key) { + return parse(key); + } + + public static Map parse(String key) { + String[] parts = key.split(SEPARATOR); + + if (parts.length < 2 || !parts[0].equals(TencentCloudProvider.ID)) { + return null; + } + + Map result = new HashMap<>(); + result.put("provider", parts[0]); + result.put("type", parts[1]); + + Namespace namespace = Namespace.fromString(result.get("type")); + + if (namespace == null) { + return null; + } + + switch (namespace) { + case APPLICATIONS: + break; + case CLUSTERS: + Names names = Names.parseName(parts[4]); + result.put("application", parts[3]); + result.put("account", parts[2]); + result.put("name", parts[4]); + result.put("cluster", parts[4]); + result.put("stack", names.getStack()); + result.put("detail", names.getDetail()); + break; + case IMAGES: + result.put("account", parts[2]); + result.put("region", parts[3]); + result.put("imageId", parts[4]); + break; + case NAMED_IMAGES: + result.put("account", parts[2]); + result.put("imageName", parts[3]); + break; + case LOAD_BALANCERS: + case NETWORKS: + case SUBNETS: + result.put("account", parts[2]); + result.put("region", parts[3]); + result.put("id", parts[4]); + break; + case SECURITY_GROUPS: + result.put("application", Names.parseName(parts[2]).getApp()); + result.put("name", parts[2]); + result.put("account", parts[3]); + result.put("region", parts[4]); + result.put("id", parts[5]); + break; + case SERVER_GROUPS: + result.put("account", parts[2]); + result.put("region", parts[3]); + result.put("cluster", parts[4]); + result.put("name", parts[5]); + break; + default: + return null; + } + + return result; + } + + public static String getApplicationKey(String application) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.APPLICATIONS + + SEPARATOR + + application.toLowerCase(); + } + + public static String getClusterKey(String clusterName, String application, String account) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.CLUSTERS + + SEPARATOR + + account + + SEPARATOR + + application.toLowerCase() + + SEPARATOR + + clusterName; + } + + public static String getServerGroupKey(String serverGroupName, String account, String region) { + Names names = Names.parseName(serverGroupName); + return getServerGroupKey(names.getCluster(), names.getGroup(), account, region); + } + + public static String getServerGroupKey( + String cluster, String serverGroupName, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.SERVER_GROUPS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + cluster + + SEPARATOR + + serverGroupName; + } + + public static String getInstanceKey(String instanceId, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.INSTANCES + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + instanceId; + } + + public static String getImageKey(String imageId, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.IMAGES + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + imageId; + } + + public static String getNamedImageKey(String imageName, String account) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.NAMED_IMAGES + + SEPARATOR + + account + + SEPARATOR + + imageName; + } + + public static String getKeyPairKey(String keyId, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.KEY_PAIRS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + keyId; + } + + public static String getInstanceTypeKey(String account, String region, String instanceType) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.INSTANCE_TYPES + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + instanceType; + } + + public static String getLoadBalancerKey(String loadBalancerId, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.LOAD_BALANCERS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + loadBalancerId; + } + + public static String getSecurityGroupKey( + String securityGroupId, String securityGroupName, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.SECURITY_GROUPS + + SEPARATOR + + securityGroupName + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + securityGroupId; + } + + public static String getNetworkKey(String networkId, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.NETWORKS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + networkId; + } + + public static String getSubnetKey(String subnetId, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.SUBNETS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + subnetId; + } + + public static String getTargetHealthKey( + String loadBalancerId, String listenerId, String instanceId, String account, String region) { + return TencentCloudProvider.ID + + SEPARATOR + + Namespace.HEALTH_CHECKS + + SEPARATOR + + account + + SEPARATOR + + region + + SEPARATOR + + loadBalancerId + + SEPARATOR + + listenerId + + SEPARATOR + + instanceId; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/client/AbstractTencentCloudServiceClient.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/client/AbstractTencentCloudServiceClient.java new file mode 100644 index 00000000000..050614c7e73 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/client/AbstractTencentCloudServiceClient.java @@ -0,0 +1,80 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.client; + +import com.tencentcloudapi.common.Credential; +import com.tencentcloudapi.common.profile.ClientProfile; +import com.tencentcloudapi.common.profile.HttpProfile; +import java.time.Instant; +import java.time.format.DateTimeFormatter; +import java.time.temporal.TemporalAccessor; +import java.util.Date; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public abstract class AbstractTencentCloudServiceClient { + public static final long MAX_QUERY_TIME = 1000; + public static final long DEFAULT_LIMIT = 100; + private Credential credential; + private HttpProfile httpProfile; + private ClientProfile clientProfile; + + public abstract String getEndPoint(); + + public AbstractTencentCloudServiceClient(String secretId, String secretKey) { + credential = new Credential(secretId, secretKey); + httpProfile = new HttpProfile(); + httpProfile.setEndpoint(getEndPoint()); + clientProfile = new ClientProfile(); + clientProfile.setHttpProfile(httpProfile); + } + + public static Date convertToIsoDateTime(String isoDateTime) { + try { + DateTimeFormatter timeFormatter = DateTimeFormatter.ISO_DATE_TIME; + TemporalAccessor accessor = timeFormatter.parse(isoDateTime); + return Date.from(Instant.from(accessor)); + } catch (Exception e) { + log.warn("convert time error " + e.toString()); + return null; + } + } + + public Credential getCredential() { + return credential; + } + + public void setCredential(Credential credential) { + this.credential = credential; + } + + public HttpProfile getHttpProfile() { + return httpProfile; + } + + public void setHttpProfile(HttpProfile httpProfile) { + this.httpProfile = httpProfile; + } + + public ClientProfile getClientProfile() { + return clientProfile; + } + + public void setClientProfile(ClientProfile clientProfile) { + this.clientProfile = clientProfile; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/client/AutoScalingClient.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/client/AutoScalingClient.java new file mode 100644 index 00000000000..b26d6f421c6 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/client/AutoScalingClient.java @@ -0,0 +1,781 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.client; + +import static java.lang.Thread.sleep; + +import com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description.ResizeTencentCloudServerGroupDescription.Capacity; +import com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description.TencentCloudDeployDescription; +import com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description.UpsertTencentCloudScalingPolicyDescription; +import com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description.UpsertTencentCloudScheduledActionDescription; +import com.netflix.spinnaker.clouddriver.tencentcloud.exception.TencentCloudOperationException; +import com.tencentcloudapi.as.v20180419.AsClient; +import com.tencentcloudapi.as.v20180419.models.Activity; +import com.tencentcloudapi.as.v20180419.models.AutoScalingGroup; +import com.tencentcloudapi.as.v20180419.models.CreateAutoScalingGroupRequest; +import com.tencentcloudapi.as.v20180419.models.CreateAutoScalingGroupResponse; +import com.tencentcloudapi.as.v20180419.models.CreateLaunchConfigurationRequest; +import com.tencentcloudapi.as.v20180419.models.CreateLaunchConfigurationResponse; +import com.tencentcloudapi.as.v20180419.models.CreateScalingPolicyRequest; +import com.tencentcloudapi.as.v20180419.models.CreateScalingPolicyResponse; +import com.tencentcloudapi.as.v20180419.models.CreateScheduledActionRequest; +import com.tencentcloudapi.as.v20180419.models.CreateScheduledActionResponse; +import com.tencentcloudapi.as.v20180419.models.DataDisk; +import com.tencentcloudapi.as.v20180419.models.DeleteAutoScalingGroupRequest; +import com.tencentcloudapi.as.v20180419.models.DeleteLaunchConfigurationRequest; +import com.tencentcloudapi.as.v20180419.models.DeleteScalingPolicyRequest; +import com.tencentcloudapi.as.v20180419.models.DeleteScheduledActionRequest; +import com.tencentcloudapi.as.v20180419.models.DescribeAutoScalingActivitiesRequest; +import com.tencentcloudapi.as.v20180419.models.DescribeAutoScalingActivitiesResponse; +import com.tencentcloudapi.as.v20180419.models.DescribeAutoScalingGroupsRequest; +import com.tencentcloudapi.as.v20180419.models.DescribeAutoScalingGroupsResponse; +import com.tencentcloudapi.as.v20180419.models.DescribeAutoScalingInstancesRequest; +import com.tencentcloudapi.as.v20180419.models.DescribeAutoScalingInstancesResponse; +import com.tencentcloudapi.as.v20180419.models.DescribeLaunchConfigurationsRequest; +import com.tencentcloudapi.as.v20180419.models.DescribeLaunchConfigurationsResponse; +import com.tencentcloudapi.as.v20180419.models.DescribeScalingPoliciesRequest; +import com.tencentcloudapi.as.v20180419.models.DescribeScalingPoliciesResponse; +import com.tencentcloudapi.as.v20180419.models.DescribeScheduledActionsRequest; +import com.tencentcloudapi.as.v20180419.models.DescribeScheduledActionsResponse; +import com.tencentcloudapi.as.v20180419.models.DisableAutoScalingGroupRequest; +import com.tencentcloudapi.as.v20180419.models.EnableAutoScalingGroupRequest; +import com.tencentcloudapi.as.v20180419.models.EnhancedService; +import com.tencentcloudapi.as.v20180419.models.Filter; +import com.tencentcloudapi.as.v20180419.models.ForwardLoadBalancer; +import com.tencentcloudapi.as.v20180419.models.Instance; +import com.tencentcloudapi.as.v20180419.models.InstanceMarketOptionsRequest; +import com.tencentcloudapi.as.v20180419.models.InstanceTag; +import com.tencentcloudapi.as.v20180419.models.InternetAccessible; +import com.tencentcloudapi.as.v20180419.models.LaunchConfiguration; +import com.tencentcloudapi.as.v20180419.models.LoginSettings; +import com.tencentcloudapi.as.v20180419.models.ModifyAutoScalingGroupRequest; +import com.tencentcloudapi.as.v20180419.models.ModifyScalingPolicyRequest; +import com.tencentcloudapi.as.v20180419.models.ModifyScheduledActionRequest; +import com.tencentcloudapi.as.v20180419.models.RemoveInstancesRequest; +import com.tencentcloudapi.as.v20180419.models.RunMonitorServiceEnabled; +import com.tencentcloudapi.as.v20180419.models.RunSecurityServiceEnabled; +import com.tencentcloudapi.as.v20180419.models.ScalingPolicy; +import com.tencentcloudapi.as.v20180419.models.ScheduledAction; +import com.tencentcloudapi.as.v20180419.models.SpotMarketOptions; +import com.tencentcloudapi.as.v20180419.models.SystemDisk; +import com.tencentcloudapi.clb.v20180317.ClbClient; +import com.tencentcloudapi.clb.v20180317.models.ClassicalTarget; +import com.tencentcloudapi.clb.v20180317.models.ClassicalTargetInfo; +import com.tencentcloudapi.clb.v20180317.models.DeregisterTargetsFromClassicalLBRequest; +import com.tencentcloudapi.clb.v20180317.models.DeregisterTargetsRequest; +import com.tencentcloudapi.clb.v20180317.models.DescribeClassicalLBTargetsRequest; +import com.tencentcloudapi.clb.v20180317.models.DescribeClassicalLBTargetsResponse; +import com.tencentcloudapi.clb.v20180317.models.DescribeTargetsRequest; +import com.tencentcloudapi.clb.v20180317.models.DescribeTargetsResponse; +import com.tencentcloudapi.clb.v20180317.models.ListenerBackend; +import com.tencentcloudapi.clb.v20180317.models.RegisterTargetsRequest; +import com.tencentcloudapi.clb.v20180317.models.RegisterTargetsWithClassicalLBRequest; +import com.tencentcloudapi.clb.v20180317.models.Target; +import com.tencentcloudapi.common.exception.TencentCloudSDKException; +import com.tencentcloudapi.common.profile.ClientProfile; +import com.tencentcloudapi.common.profile.HttpProfile; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.CollectionUtils; +import org.springframework.util.StringUtils; + +@Component +@Slf4j +public class AutoScalingClient extends AbstractTencentCloudServiceClient { + + private static final String END_POINT = "as.tencentcloudapi.com"; + private static final String CLB_ENDPOINT = "clb.tencentcloudapi.com"; + private static final String DEFAULT_SERVER_GROUP_TAG_KEY = "spinnaker:server-group-name"; + + private final AsClient client; + private final ClbClient clbClient; + + public AutoScalingClient(String secretId, String secretKey, String region) { + super(secretId, secretKey); + + this.client = new AsClient(getCredential(), region, getClientProfile()); + + HttpProfile clbHttpProfile = new HttpProfile(); + clbHttpProfile.setEndpoint(CLB_ENDPOINT); + + ClientProfile clbClientProfile = new ClientProfile(); + clbClientProfile.setHttpProfile(clbHttpProfile); + + this.clbClient = new ClbClient(getCredential(), region, clbClientProfile); + } + + public String deploy(TencentCloudDeployDescription description) { + try { + // 1. create launch configuration + CreateLaunchConfigurationRequest createLaunchConfigurationRequest = + buildLaunchConfigurationRequest(description); + CreateLaunchConfigurationResponse createLaunchConfigurationResponse = + client.CreateLaunchConfiguration(createLaunchConfigurationRequest); + String launchConfigurationId = createLaunchConfigurationResponse.getLaunchConfigurationId(); + + try { + // 2. create auto scaling group + CreateAutoScalingGroupRequest createAutoScalingGroupRequest = + buildAutoScalingGroupRequest(description, launchConfigurationId); + CreateAutoScalingGroupResponse createAutoScalingGroupResponse = + client.CreateAutoScalingGroup(createAutoScalingGroupRequest); + return createAutoScalingGroupResponse.getAutoScalingGroupId(); + } catch (TencentCloudSDKException e) { + // if create auto scaling group failed, delete launch configuration. + log.error(e.toString()); + DeleteLaunchConfigurationRequest request = new DeleteLaunchConfigurationRequest(); + request.setLaunchConfigurationId(launchConfigurationId); + client.DeleteLaunchConfiguration(request); + throw e; + } + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + private CreateLaunchConfigurationRequest buildLaunchConfigurationRequest( + TencentCloudDeployDescription description) { + CreateLaunchConfigurationRequest createLaunchConfigurationRequest = + new CreateLaunchConfigurationRequest(); + + String launchConfigurationName = description.getServerGroupName(); + createLaunchConfigurationRequest.setLaunchConfigurationName(launchConfigurationName); + createLaunchConfigurationRequest.setImageId(description.getImageId()); + + if (description.getProjectId() != null) { + createLaunchConfigurationRequest.setProjectId(description.getProjectId()); + } + + if (description.getInstanceType() != null) { + createLaunchConfigurationRequest.setInstanceType(description.getInstanceType()); + } + + if (description.getSystemDisk() != null) { + SystemDisk systemDisk = new SystemDisk(); + systemDisk.setDiskSize((description.getSystemDisk().getDiskSize())); + systemDisk.setDiskType(description.getSystemDisk().getDiskType()); + createLaunchConfigurationRequest.setSystemDisk(systemDisk); + } + + if (!CollectionUtils.isEmpty(description.getDataDisks())) { + createLaunchConfigurationRequest.setDataDisks( + description.getDataDisks().toArray(new DataDisk[0])); + } + + if (description.getInternetAccessible() != null) { + InternetAccessible internetAccessible = new InternetAccessible(); + internetAccessible.setInternetChargeType( + description.getInternetAccessible().getInternetChargeType()); + internetAccessible.setInternetMaxBandwidthOut( + description.getInternetAccessible().getInternetMaxBandwidthOut()); + internetAccessible.setPublicIpAssigned( + description.getInternetAccessible().getPublicIpAssigned()); + createLaunchConfigurationRequest.setInternetAccessible(internetAccessible); + } + + if (description.getLoginSettings() != null) { + LoginSettings loginSettings = new LoginSettings(); + loginSettings.setKeepImageLogin(description.getLoginSettings().getKeepImageLogin()); + loginSettings.setKeyIds(description.getLoginSettings().getKeyIds()); + loginSettings.setPassword(description.getLoginSettings().getPassword()); + createLaunchConfigurationRequest.setLoginSettings(loginSettings); + } + + if (!CollectionUtils.isEmpty(description.getSecurityGroupIds())) { + createLaunchConfigurationRequest.setSecurityGroupIds( + description.getSecurityGroupIds().toArray(new String[0])); + } + + if (description.getEnhancedService() != null) { + EnhancedService enhancedService = new EnhancedService(); + RunMonitorServiceEnabled monitorServiceEnabled = + description.getEnhancedService().getMonitorService(); + RunSecurityServiceEnabled securityServiceEnabled = + description.getEnhancedService().getSecurityService(); + enhancedService.setMonitorService(monitorServiceEnabled); + enhancedService.setSecurityService(securityServiceEnabled); + createLaunchConfigurationRequest.setEnhancedService(enhancedService); + } + + if (!StringUtils.isEmpty(description.getUserData())) { + createLaunchConfigurationRequest.setUserData(description.getUserData()); + } + + if (!StringUtils.isEmpty(description.getInstanceChargeType())) { + createLaunchConfigurationRequest.setInstanceChargeType(description.getInstanceChargeType()); + } + + if (description.getInstanceMarketOptionsRequest() != null) { + InstanceMarketOptionsRequest instanceMarketOptionsRequest = + new InstanceMarketOptionsRequest(); + instanceMarketOptionsRequest.setMarketType( + description.getInstanceMarketOptionsRequest().getMarketType()); + + SpotMarketOptions spotOptions = new SpotMarketOptions(); + spotOptions.setMaxPrice( + description.getInstanceMarketOptionsRequest().getSpotOptions().getMaxPrice()); + spotOptions.setSpotInstanceType( + description.getInstanceMarketOptionsRequest().getSpotOptions().getSpotInstanceType()); + instanceMarketOptionsRequest.setSpotOptions(spotOptions); + + createLaunchConfigurationRequest.setInstanceMarketOptions(instanceMarketOptionsRequest); + } + + if (description.getInstanceTypes() != null) { + createLaunchConfigurationRequest.setInstanceTypes( + description.getInstanceTypes().toArray(new String[0])); + } + + if (!StringUtils.isEmpty(description.getInstanceTypesCheckPolicy())) { + createLaunchConfigurationRequest.setInstanceTypesCheckPolicy( + description.getInstanceTypesCheckPolicy()); + } + + InstanceTag spinnakerTag = new InstanceTag(); + spinnakerTag.setKey(DEFAULT_SERVER_GROUP_TAG_KEY); + spinnakerTag.setValue(description.getServerGroupName()); + + List instanceTags = new ArrayList<>(Arrays.asList(spinnakerTag)); + instanceTags.addAll(description.getInstanceTags()); + + createLaunchConfigurationRequest.setInstanceTags(instanceTags.toArray(new InstanceTag[0])); + + return createLaunchConfigurationRequest; + } + + private static CreateAutoScalingGroupRequest buildAutoScalingGroupRequest( + TencentCloudDeployDescription description, String launchConfigurationId) { + CreateAutoScalingGroupRequest createAutoScalingGroupRequest = + new CreateAutoScalingGroupRequest(); + createAutoScalingGroupRequest.setAutoScalingGroupName(description.getServerGroupName()); + createAutoScalingGroupRequest.setLaunchConfigurationId(launchConfigurationId); + createAutoScalingGroupRequest.setDesiredCapacity(description.getDesiredCapacity()); + createAutoScalingGroupRequest.setMinSize(description.getMinSize()); + createAutoScalingGroupRequest.setMaxSize(description.getMaxSize()); + createAutoScalingGroupRequest.setVpcId(description.getVpcId()); + + if (!CollectionUtils.isEmpty(description.getSubnetIds())) { + createAutoScalingGroupRequest.setSubnetIds(description.getSubnetIds().toArray(new String[0])); + } + + if (!CollectionUtils.isEmpty(description.getZones())) { + createAutoScalingGroupRequest.setZones(description.getZones().toArray(new String[0])); + } + + if (description.getProjectId() != null) { + createAutoScalingGroupRequest.setProjectId(description.getProjectId()); + } + + if (description.getRetryPolicy() != null) { + createAutoScalingGroupRequest.setRetryPolicy(description.getRetryPolicy()); + } + + if (description.getZonesCheckPolicy() != null) { + createAutoScalingGroupRequest.setZonesCheckPolicy(description.getZonesCheckPolicy()); + } + + if (description.getDefaultCooldown() != null) { + createAutoScalingGroupRequest.setDefaultCooldown(description.getDefaultCooldown()); + } + + if (!CollectionUtils.isEmpty(description.getForwardLoadBalancers())) { + createAutoScalingGroupRequest.setForwardLoadBalancers( + description.getForwardLoadBalancers().toArray(new ForwardLoadBalancer[0])); + } + + if (description.getLoadBalancerIds() != null) { + createAutoScalingGroupRequest.setLoadBalancerIds( + description.getLoadBalancerIds().toArray(new String[0])); + } + + if (description.getTerminationPolicies() != null) { + createAutoScalingGroupRequest.setTerminationPolicies( + description.getTerminationPolicies().toArray(new String[0])); + } + + return createAutoScalingGroupRequest; + } + + public List getAllAutoScalingGroups() { + try { + DescribeAutoScalingGroupsRequest request = new DescribeAutoScalingGroupsRequest(); + request.setLimit(DEFAULT_LIMIT); + DescribeAutoScalingGroupsResponse response = client.DescribeAutoScalingGroups(request); + return Arrays.asList(response.getAutoScalingGroupSet()); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public List getAutoScalingGroupsByName(String name) { + try { + DescribeAutoScalingGroupsRequest request = new DescribeAutoScalingGroupsRequest(); + request.setLimit(DEFAULT_LIMIT); + Filter filter = new Filter(); + filter.setName("auto-scaling-group-name"); + filter.setValues(new String[] {name}); + DescribeAutoScalingGroupsResponse response = client.DescribeAutoScalingGroups(request); + return Arrays.asList(response.getAutoScalingGroupSet()); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public List getLaunchConfigurations(List launchConfigurationIds) { + try { + int len = launchConfigurationIds.size(); + List launchConfigurations = new ArrayList<>(); + DescribeLaunchConfigurationsRequest request = new DescribeLaunchConfigurationsRequest(); + request.setLimit(DEFAULT_LIMIT); + for (int i = 0; i < len; i += DEFAULT_LIMIT) { + int endIndex = Math.toIntExact(Math.min(len, i + DEFAULT_LIMIT)); + request.setLaunchConfigurationIds( + launchConfigurationIds.subList(i, endIndex).toArray(new String[0])); + + DescribeLaunchConfigurationsResponse response = + client.DescribeLaunchConfigurations(request); + List launchConfigurationList = + Arrays.stream(response.getLaunchConfigurationSet()).collect(Collectors.toList()); + launchConfigurations.addAll(launchConfigurationList); + } + return launchConfigurations; + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public List getAutoScalingInstances(String asgId) { + List result = new ArrayList<>(); + DescribeAutoScalingInstancesRequest request = new DescribeAutoScalingInstancesRequest(); + + if (!StringUtils.isEmpty(asgId)) { + Filter filter = new Filter(); + filter.setName("auto-scaling-group-id"); + filter.setValues(new String[] {asgId}); + request.setFilters(new Filter[] {filter}); + } + + try { + long offset = 0; + int queryIndex = 0; + while (queryIndex++ < MAX_QUERY_TIME) { + request.setOffset(offset); + request.setLimit(DEFAULT_LIMIT); + DescribeAutoScalingInstancesResponse response = + client.DescribeAutoScalingInstances(request); + + if (response == null + || response.getAutoScalingInstanceSet() == null + || response.getAutoScalingInstanceSet().length <= 0) { + break; + } + result.addAll(Arrays.asList(response.getAutoScalingInstanceSet())); + offset += DEFAULT_LIMIT; + if (result.size() == response.getTotalCount()) { + break; + } + sleep(500); + } + } catch (TencentCloudSDKException | InterruptedException e) { + throw new TencentCloudOperationException(e.toString()); + } + return result; + } + + public List getAutoScalingInstances() { + return getAutoScalingInstances(null); + } + + public List getAutoScalingActivitiesByAsgId(String asgId, int maxActivityNum) { + List result = new ArrayList<>(); + DescribeAutoScalingActivitiesRequest request = new DescribeAutoScalingActivitiesRequest(); + + if (!StringUtils.isEmpty(asgId)) { + Filter filter = new Filter(); + filter.setName("auto-scaling-group-id"); + filter.setValues(new String[] {asgId}); + request.setFilters(new Filter[] {filter}); + } + + try { + long offset = 0; + int queryIndex = 0; + while (queryIndex++ < MAX_QUERY_TIME) { + request.setOffset(offset); + request.setLimit(DEFAULT_LIMIT); + + DescribeAutoScalingActivitiesResponse response = + client.DescribeAutoScalingActivities(request); + + if (response == null + || response.getActivitySet() == null + || response.getActivitySet().length <= 0 + || result.size() + response.getActivitySet().length > maxActivityNum) { + break; + } + + result.addAll(Arrays.asList(response.getActivitySet())); + offset += DEFAULT_LIMIT; + if (result.size() == response.getTotalCount()) { + break; + } + sleep(500); + } + } catch (TencentCloudSDKException | InterruptedException e) { + throw new TencentCloudOperationException(e.toString()); + } + return result; + } + + public void resizeAutoScalingGroup(String asgId, Capacity capacity) { + try { + ModifyAutoScalingGroupRequest request = new ModifyAutoScalingGroupRequest(); + request.setAutoScalingGroupId(asgId); + request.setMaxSize(capacity.getMax()); + request.setMinSize(capacity.getMin()); + request.setDesiredCapacity(capacity.getDesired()); + + client.ModifyAutoScalingGroup(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void enableAutoScalingGroup(String asgId) { + try { + EnableAutoScalingGroupRequest request = new EnableAutoScalingGroupRequest(); + request.setAutoScalingGroupId(asgId); + client.EnableAutoScalingGroup(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void disableAutoScalingGroup(String asgId) { + try { + DisableAutoScalingGroupRequest request = new DisableAutoScalingGroupRequest(); + request.setAutoScalingGroupId(asgId); + client.DisableAutoScalingGroup(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void deleteAutoScalingGroup(String asgId) { + try { + DeleteAutoScalingGroupRequest request = new DeleteAutoScalingGroupRequest(); + request.setAutoScalingGroupId(asgId); + client.DeleteAutoScalingGroup(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void deleteLaunchConfiguration(String ascId) { + try { + DeleteLaunchConfigurationRequest request = new DeleteLaunchConfigurationRequest(); + request.setLaunchConfigurationId(ascId); + client.DeleteLaunchConfiguration(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void removeInstances(String asgId, List instanceIds) { + try { + RemoveInstancesRequest request = new RemoveInstancesRequest(); + request.setInstanceIds(instanceIds.toArray(new String[0])); + request.setAutoScalingGroupId(asgId); + client.RemoveInstances(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void attachAutoScalingInstancesToForwardClb(ForwardLoadBalancer flb, List targets) + throws TencentCloudSDKException { + try { + RegisterTargetsRequest request = new RegisterTargetsRequest(); + request.setLoadBalancerId(flb.getLoadBalancerId()); + request.setListenerId(flb.getListenerId()); + request.setLocationId(flb.getLocationId()); + request.setTargets(targets.toArray(new Target[0])); + + clbClient.RegisterTargets(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudSDKException(e.toString()); + } + } + + public void attachAutoScalingInstancesToClassicClb(String lbId, List targets) { + try { + RegisterTargetsWithClassicalLBRequest request = new RegisterTargetsWithClassicalLBRequest(); + request.setLoadBalancerId(lbId); + List infoList = new ArrayList<>(); + for (Target target : targets) { + ClassicalTargetInfo info = new ClassicalTargetInfo(); + info.setInstanceId(target.getInstanceId()); + info.setWeight(target.getWeight()); + infoList.add(info); + } + request.setTargets(infoList.toArray(new ClassicalTargetInfo[0])); + clbClient.RegisterTargetsWithClassicalLB(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void detachAutoScalingInstancesFromForwardClb( + ForwardLoadBalancer flb, List targets) throws TencentCloudSDKException { + try { + DeregisterTargetsRequest request = new DeregisterTargetsRequest(); + request.setLoadBalancerId(flb.getLoadBalancerId()); + request.setListenerId(flb.getListenerId()); + request.setListenerId(flb.getListenerId()); + request.setTargets(targets.toArray(new Target[0])); + clbClient.DeregisterTargets(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudSDKException(e.toString()); + } + } + + public void detachAutoScalingInstancesFromClassicClb(String lbId, List instanceIds) { + try { + DeregisterTargetsFromClassicalLBRequest request = + new DeregisterTargetsFromClassicalLBRequest(); + request.setLoadBalancerId(lbId); + request.setInstanceIds(instanceIds.toArray(new String[0])); + clbClient.DeregisterTargetsFromClassicalLB(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public Set getClassicLbInstanceIds(String lbId) { + try { + DescribeClassicalLBTargetsRequest request = new DescribeClassicalLBTargetsRequest(); + request.setLoadBalancerId(lbId); + DescribeClassicalLBTargetsResponse response = clbClient.DescribeClassicalLBTargets(request); + return Arrays.stream(response.getTargets()) + .map(ClassicalTarget::getInstanceId) + .collect(Collectors.toSet()); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public List getForwardLbTargets(ForwardLoadBalancer flb) { + try { + DescribeTargetsRequest request = new DescribeTargetsRequest(); + request.setLoadBalancerId(flb.getLoadBalancerId()); + request.setListenerIds(new String[] {flb.getListenerId()}); + DescribeTargetsResponse response = clbClient.DescribeTargets(request); + return Arrays.asList(response.getListeners()); + } catch (TencentCloudSDKException e) { + return new ArrayList<>(); + } + } + + public String createScalingPolicy( + String asgId, UpsertTencentCloudScalingPolicyDescription description) { + try { + CreateScalingPolicyRequest request = new CreateScalingPolicyRequest(); + request.setAutoScalingGroupId(asgId); + request.setScalingPolicyName( + description.getServerGroupName() + "-asp-" + new Date().getTime()); + request.setAdjustmentType(description.getAdjustmentType()); + request.setAdjustmentValue(description.getAdjustmentValue()); + request.setMetricAlarm(description.getMetricAlarm()); + request.setCooldown(description.getCooldown()); + if (!CollectionUtils.isEmpty(description.getNotificationUserGroupIds())) { + request.setNotificationUserGroupIds( + description.getNotificationUserGroupIds().toArray(new String[0])); + } + + CreateScalingPolicyResponse response = client.CreateScalingPolicy(request); + return response.getAutoScalingPolicyId(); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void modifyScalingPolicy( + String aspId, UpsertTencentCloudScalingPolicyDescription description) { + try { + ModifyScalingPolicyRequest request = new ModifyScalingPolicyRequest(); + request.setAutoScalingPolicyId(aspId); + request.setAdjustmentType(description.getAdjustmentType()); + request.setAdjustmentValue(description.getAdjustmentValue()); + request.setMetricAlarm(description.getMetricAlarm()); + request.setCooldown(description.getCooldown()); + if (!CollectionUtils.isEmpty(description.getNotificationUserGroupIds())) { + request.setNotificationUserGroupIds( + description.getNotificationUserGroupIds().toArray(new String[0])); + } + + client.ModifyScalingPolicy(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public List getScalingPolicies(String asgId) { + List result = new ArrayList<>(); + DescribeScalingPoliciesRequest request = new DescribeScalingPoliciesRequest(); + + if (!StringUtils.isEmpty(asgId)) { + Filter filter = new Filter(); + filter.setName("auto-scaling-group-id"); + filter.setValues(new String[] {asgId}); + request.setFilters(new Filter[] {filter}); + } + + try { + long offset = 0; + int queryIndex = 0; + while (queryIndex++ < MAX_QUERY_TIME) { + request.setOffset(offset); + request.setLimit(DEFAULT_LIMIT); + DescribeScalingPoliciesResponse response = client.DescribeScalingPolicies(request); + + if (response == null + || response.getScalingPolicySet() == null + || response.getScalingPolicySet().length <= 0) { + break; + } + result.addAll(Arrays.asList(response.getScalingPolicySet())); + offset += DEFAULT_LIMIT; + if (result.size() == response.getTotalCount()) { + break; + } + sleep(500); + } + } catch (TencentCloudSDKException | InterruptedException e) { + throw new TencentCloudOperationException(e.toString()); + } + return result; + } + + public void deleteScalingPolicy(String aspId) { + try { + DeleteScalingPolicyRequest request = new DeleteScalingPolicyRequest(); + request.setAutoScalingPolicyId(aspId); + client.DeleteScalingPolicy(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public String createScheduledAction( + String asgId, UpsertTencentCloudScheduledActionDescription description) { + try { + CreateScheduledActionRequest request = new CreateScheduledActionRequest(); + request.setAutoScalingGroupId(asgId); + request.setScheduledActionName( + description.getServerGroupName() + "-asst-" + new Date().getTime()); + request.setMaxSize(description.getMaxSize()); + request.setMinSize(description.getMinSize()); + request.setDesiredCapacity(description.getDesiredCapacity()); + request.setStartTime(description.getStartTime()); + request.setEndTime(description.getEndTime()); + request.setRecurrence(description.getRecurrence()); + CreateScheduledActionResponse response = client.CreateScheduledAction(request); + return response.getScheduledActionId(); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public void modifyScheduledAction( + String asstId, UpsertTencentCloudScheduledActionDescription description) { + try { + ModifyScheduledActionRequest request = new ModifyScheduledActionRequest(); + request.setScheduledActionId(asstId); + request.setMaxSize(description.getMaxSize()); + request.setMinSize(description.getMinSize()); + request.setDesiredCapacity(description.getDesiredCapacity()); + request.setStartTime(description.getStartTime()); + request.setEndTime(description.getEndTime()); + request.setRecurrence(description.getRecurrence()); + + client.ModifyScheduledAction(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public List getScheduledAction(String asgId) { + List result = new ArrayList<>(); + DescribeScheduledActionsRequest request = new DescribeScheduledActionsRequest(); + + if (!StringUtils.isEmpty(asgId)) { + Filter filter = new Filter(); + filter.setName("auto-scaling-group-id"); + filter.setValues(new String[] {asgId}); + request.setFilters(new Filter[] {filter}); + } + + try { + long offset = 0; + int queryIndex = 0; + while (queryIndex++ < MAX_QUERY_TIME) { + request.setOffset(offset); + request.setLimit(DEFAULT_LIMIT); + DescribeScheduledActionsResponse response = client.DescribeScheduledActions(request); + + if (response == null + || response.getScheduledActionSet() == null + || response.getScheduledActionSet().length <= 0) { + break; + } + result.addAll(Arrays.asList(response.getScheduledActionSet())); + offset += DEFAULT_LIMIT; + if (result.size() == response.getTotalCount()) { + break; + } + sleep(500); + } + } catch (TencentCloudSDKException | InterruptedException e) { + throw new TencentCloudOperationException(e.toString()); + } + return result; + } + + public void deleteScheduledAction(String asstId) { + try { + DeleteScheduledActionRequest request = new DeleteScheduledActionRequest(); + request.setScheduledActionId(asstId); + client.DeleteScheduledAction(request); + } catch (TencentCloudSDKException e) { + throw new TencentCloudOperationException(e.toString()); + } + } + + public final String getEndPoint() { + return END_POINT; + } + + public static String getDefaultServerGroupTagKey() { + return DEFAULT_SERVER_GROUP_TAG_KEY; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/config/TencentCloudConfigurationProperties.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/config/TencentCloudConfigurationProperties.java new file mode 100644 index 00000000000..3f1b7e8e548 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/config/TencentCloudConfigurationProperties.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.config; + +import java.util.List; +import lombok.Data; + +@Data +public class TencentCloudConfigurationProperties { + + @Data + public static class ManagedAccount { + private String name; + private String environment; + private String accountType; + private String project; + private String secretId; + private String secretKey; + private List regions; + } + + private List accounts; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/AbstractTencentCloudCredentialsDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/AbstractTencentCloudCredentialsDescription.java new file mode 100644 index 00000000000..285efe440c0 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/AbstractTencentCloudCredentialsDescription.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable; +import com.netflix.spinnaker.clouddriver.tencentcloud.security.TencentCloudNamedAccountCredentials; + +public abstract class AbstractTencentCloudCredentialsDescription implements CredentialsNameable { + + @JsonIgnore private TencentCloudNamedAccountCredentials credentials; + + @Override + public TencentCloudNamedAccountCredentials getCredentials() { + return credentials; + } + + public void setCredentials(TencentCloudNamedAccountCredentials credentials) { + this.credentials = credentials; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudLoadBalancerDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudLoadBalancerDescription.java new file mode 100644 index 00000000000..b59823cf40c --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudLoadBalancerDescription.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer.TencentCloudLoadBalancerListener; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeleteTencentCloudLoadBalancerDescription + extends AbstractTencentCloudCredentialsDescription { + + private String application; + private String region; + private String loadBalancerId; + private List listeners = new ArrayList<>(); +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudScalingPolicyDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudScalingPolicyDescription.java new file mode 100644 index 00000000000..4de2598832c --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudScalingPolicyDescription.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeleteTencentCloudScalingPolicyDescription + extends AbstractTencentCloudCredentialsDescription { + + private String scalingPolicyId; + private String serverGroupName; + private String region; + + @JsonProperty("credentials") + private String accountName; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudScheduledActionDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudScheduledActionDescription.java new file mode 100644 index 00000000000..cc3872843ba --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudScheduledActionDescription.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeleteTencentCloudScheduledActionDescription + extends AbstractTencentCloudCredentialsDescription { + + private String scheduledActionId; + private String serverGroupName; + private String region; + + @JsonProperty("credentials") + private String accountName; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudSecurityGroupDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudSecurityGroupDescription.java new file mode 100644 index 00000000000..418fc8ff71e --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DeleteTencentCloudSecurityGroupDescription.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DeleteTencentCloudSecurityGroupDescription + extends AbstractTencentCloudCredentialsDescription { + + @JsonProperty("credentials") + private String accountName; + + private String region; + private String securityGroupId; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DestroyTencentCloudServerGroupDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DestroyTencentCloudServerGroupDescription.java new file mode 100644 index 00000000000..dca6eec0927 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/DestroyTencentCloudServerGroupDescription.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class DestroyTencentCloudServerGroupDescription + extends AbstractTencentCloudCredentialsDescription { + + private String serverGroupName; + private String region; + + @JsonProperty("credentials") + private String accountName; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/EnableDisableTencentCloudServerGroupDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/EnableDisableTencentCloudServerGroupDescription.java new file mode 100644 index 00000000000..09f734da55d --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/EnableDisableTencentCloudServerGroupDescription.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class EnableDisableTencentCloudServerGroupDescription + extends AbstractTencentCloudCredentialsDescription { + + private String serverGroupName; + private String region; + + @JsonProperty("credentials") + private String accountName; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/RebootTencentCloudInstancesDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/RebootTencentCloudInstancesDescription.java new file mode 100644 index 00000000000..49ceaab977d --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/RebootTencentCloudInstancesDescription.java @@ -0,0 +1,36 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class RebootTencentCloudInstancesDescription + extends AbstractTencentCloudCredentialsDescription { + + private String serverGroupName; + private List instanceIds = new ArrayList<>(); + private String region; + + @JsonProperty("credentials") + private String accountName; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/ResizeTencentCloudServerGroupDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/ResizeTencentCloudServerGroupDescription.java new file mode 100644 index 00000000000..974ff84f42e --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/ResizeTencentCloudServerGroupDescription.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class ResizeTencentCloudServerGroupDescription + extends AbstractTencentCloudCredentialsDescription { + + private Capacity capacity; + private String serverGroupName; + private String region; + + @JsonProperty("credentials") + private String accountName; + + @Data + public static class Capacity { + + private Long min; + private Long max; + private Long desired; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TencentCloudDeployDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TencentCloudDeployDescription.java new file mode 100644 index 00000000000..11961317d95 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TencentCloudDeployDescription.java @@ -0,0 +1,87 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable; +import com.tencentcloudapi.as.v20180419.models.*; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@EqualsAndHashCode(callSuper = true) +@Data +public class TencentCloudDeployDescription extends AbstractTencentCloudCredentialsDescription + implements DeployDescription { + /** common */ + private String application; + + private String stack; + private String detail; + private String region; + private String accountName; + private String serverGroupName; + + /** launch configuration part */ + private String instanceType; + + private String imageId; + private Long projectId; + private SystemDisk systemDisk; + private List dataDisks; + private InternetAccessible internetAccessible; + private LoginSettings loginSettings; + private List securityGroupIds; + private EnhancedService enhancedService; + private String userData; + private String instanceChargeType; + private InstanceMarketOptionsRequest instanceMarketOptionsRequest; + private List instanceTypes; + private String instanceTypesCheckPolicy; + private List instanceTags; + + /** auto scaling group part */ + private Long maxSize; + + private Long minSize; + private Long desiredCapacity; + private String vpcId; + private Long defaultCooldown; + private List loadBalancerIds; + private List forwardLoadBalancers; + private List subnetIds; + private List terminationPolicies; + private List zones; + private String retryPolicy; + private String zonesCheckPolicy; + + /** clone source */ + private Source source = new Source(); + + @Data + public static class Source implements ServerGroupsNameable { + private String region; + private String serverGroupName; + + @Override + public Collection getServerGroupNames() { + return Collections.singletonList(serverGroupName); + } + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TerminateAndDecrementTencentCloudServerGroupDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TerminateAndDecrementTencentCloudServerGroupDescription.java new file mode 100644 index 00000000000..2e892af2b11 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TerminateAndDecrementTencentCloudServerGroupDescription.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class TerminateAndDecrementTencentCloudServerGroupDescription + extends AbstractTencentCloudCredentialsDescription { + + private String serverGroupName; + private String region; + private String instance; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TerminateTencentCloudInstancesDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TerminateTencentCloudInstancesDescription.java new file mode 100644 index 00000000000..69c140098c3 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/TerminateTencentCloudInstancesDescription.java @@ -0,0 +1,36 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class TerminateTencentCloudInstancesDescription + extends AbstractTencentCloudCredentialsDescription { + + private String serverGroupName; + private List instanceIds = new ArrayList<>(); + private String region; + + @JsonProperty("credentials") + private String accountName; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudLoadBalancerDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudLoadBalancerDescription.java new file mode 100644 index 00000000000..1c1c1565aa5 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudLoadBalancerDescription.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer.TencentCloudLoadBalancerListener; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class UpsertTencentCloudLoadBalancerDescription + extends AbstractTencentCloudCredentialsDescription { + + @JsonProperty("credentials") + private String accountName; + + private String application; + private String region; + private String loadBalancerId; + private String loadBalancerName; + private String loadBalancerType; + private Long forwardType; + private String vpcId; + private String subnetId; + private Long projectId; + private List securityGroups = new ArrayList<>(); + private List listener = new ArrayList<>(); +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudScalingPolicyDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudScalingPolicyDescription.java new file mode 100644 index 00000000000..7566a7a31d7 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudScalingPolicyDescription.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.tencentcloudapi.as.v20180419.models.MetricAlarm; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class UpsertTencentCloudScalingPolicyDescription + extends AbstractTencentCloudCredentialsDescription { + + @JsonProperty("credentials") + private String accountName; + + private String serverGroupName; + private String region; + private OperationType operationType; + private String scalingPolicyId; + private String adjustmentType; + private Long adjustmentValue; + private MetricAlarm metricAlarm; + private List notificationUserGroupIds = new ArrayList<>(); + private Long cooldown; + + public enum OperationType { + CREATE, + MODIFY + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudScheduledActionDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudScheduledActionDescription.java new file mode 100644 index 00000000000..9d0eac6b861 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudScheduledActionDescription.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class UpsertTencentCloudScheduledActionDescription + extends AbstractTencentCloudCredentialsDescription { + + @JsonProperty("credentials") + private String accountName; + + private String serverGroupName; + private String region; + private OperationType operationType; + private String scheduledActionId; + private Long maxSize; + private Long minSize; + private Long desiredCapacity; + private String startTime; + private String endTime; + private String recurrence; + + public enum OperationType { + CREATE, + MODIFY + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudSecurityGroupDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudSecurityGroupDescription.java new file mode 100644 index 00000000000..466969538ca --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/deploy/description/UpsertTencentCloudSecurityGroupDescription.java @@ -0,0 +1,41 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.deploy.description; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.tencentcloud.model.TencentCloudSecurityGroupRule; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class UpsertTencentCloudSecurityGroupDescription + extends AbstractTencentCloudCredentialsDescription { + + @JsonProperty("credentials") + private String accountName; + + private String application; + private String region; + private String securityGroupId; + private String securityGroupName; + private String securityGroupDesc; + private List inRules = new ArrayList<>(); + private List outRules = new ArrayList<>(); +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/exception/TencentCloudOperationException.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/exception/TencentCloudOperationException.java new file mode 100644 index 00000000000..1cf952562d2 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/exception/TencentCloudOperationException.java @@ -0,0 +1,23 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.exception; + +public class TencentCloudOperationException extends RuntimeException { + public TencentCloudOperationException(String message) { + super(message); + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudBasicResource.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudBasicResource.java new file mode 100644 index 00000000000..e1e999e11b3 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudBasicResource.java @@ -0,0 +1,22 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +public interface TencentCloudBasicResource { + + String getMonikerName(); +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudCluster.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudCluster.java new file mode 100644 index 00000000000..308c50de751 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudCluster.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.netflix.spinnaker.clouddriver.model.Cluster; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer.TencentCloudLoadBalancer; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import lombok.Data; + +@Data +public class TencentCloudCluster implements Cluster { + + private String name; + private String accountName; + private Set serverGroups = Collections.synchronizedSet(new HashSet<>()); + private Set loadBalancers = + Collections.synchronizedSet(new HashSet<>()); + + public final String getType() { + return TencentCloudProvider.ID; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudImage.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudImage.java new file mode 100644 index 00000000000..8d61b042e31 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudImage.java @@ -0,0 +1,43 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.model.Image; +import java.util.List; +import java.util.Map; +import lombok.Builder; +import lombok.Data; + +@Builder +@Data +public class TencentCloudImage implements Image { + + private String name; + private String region; + private String type; + private String createdTime; + private String imageId; + private String osPlatform; + private List> snapshotSet; + + @Override + @JsonIgnore + public String getId() { + return imageId; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstance.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstance.java new file mode 100644 index 00000000000..3969a6c7905 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstance.java @@ -0,0 +1,132 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.Instance; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.Data; + +@Data +public class TencentCloudInstance implements Instance, TencentCloudBasicResource { + + private final String cloudProvider = TencentCloudProvider.ID; + private final String providerType = TencentCloudProvider.ID; + private String instanceName; + private String account; + private String name; + private Long launchTime; + private String zone; + private TencentCloudInstanceHealth instanceHealth; + private TencentCloudTargetHealth targetHealth; + private String vpcId; + private String subnetId; + private List privateIpAddresses = new ArrayList<>(); + private List publicIpAddresses = new ArrayList<>(); + private String instanceType; + private String imageId; + private List securityGroupIds = new ArrayList<>(); + private List> tags = new ArrayList<>(); + private String serverGroupName; + + @Override + public String getHumanReadableName() { + return instanceName; + } + + @Override + @JsonIgnore + public String getMonikerName() { + return serverGroupName; + } + + public List> getHealth() { + ObjectMapper objectMapper = new ObjectMapper(); + List> healths = new ArrayList<>(); + + if (instanceHealth != null) { + healths.add(objectMapper.convertValue(instanceHealth, Map.class)); + } + + if (targetHealth != null) { + healths.add(objectMapper.convertValue(targetHealth, Map.class)); + } + + return healths; + } + + @Override + public HealthState getHealthState() { + if (someUpRemainingUnknown(getHealth())) { + return HealthState.Up; + } else { + if (anyStarting(getHealth())) { + return HealthState.Starting; + } else { + if (anyDown(getHealth())) { + return HealthState.Down; + } else { + if (anyOutOfService(getHealth())) { + return HealthState.OutOfService; + } + } + } + } + return HealthState.Unknown; + } + + public Moniker getMoniker() { + return NamerRegistry.lookup() + .withProvider(TencentCloudProvider.ID) + .withAccount(account) + .withResource(TencentCloudBasicResource.class) + .deriveMoniker(this); + } + + private static boolean someUpRemainingUnknown(List> healthList) { + List> knownHealthList = + healthList.stream() + .filter(it -> HealthState.fromString((String) it.get("state")) != HealthState.Unknown) + .collect(Collectors.toList()); + + return knownHealthList.stream() + .allMatch(knownHealth -> knownHealth.get("state").equals(HealthState.Up.toString())); + } + + private static boolean anyStarting(List> healthList) { + return healthList.stream() + .anyMatch(health -> health.get("state").equals(HealthState.Starting.toString())); + } + + private static boolean anyDown(List> healthList) { + return healthList.stream() + .anyMatch(health -> health.get("state").equals(HealthState.Down.toString())); + } + + private static boolean anyOutOfService(List> healthList) { + return healthList.stream() + .anyMatch(health -> health.get("state").equals(HealthState.OutOfService.toString())); + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstanceHealth.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstanceHealth.java new file mode 100644 index 00000000000..a1c4f9c7d06 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstanceHealth.java @@ -0,0 +1,62 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import static com.netflix.spinnaker.clouddriver.model.HealthState.*; + +import com.netflix.spinnaker.clouddriver.model.Health; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class TencentCloudInstanceHealth implements Health { + + private final String healthClass = "platform"; + private final String type = "TencentCloud"; + private Status instanceStatus; + + public TencentCloudInstanceHealth(String instanceStatus) { + this.instanceStatus = Enum.valueOf(Status.class, instanceStatus); + } + + public HealthState getState() { + switch (instanceStatus) { + case PENDING: + return Starting; + case RUNNING: + return Unknown; + case STOPPED: + return Down; + default: + return Unknown; + } + } + + public enum Status { + PENDING, + LAUNCH_FAILED, + RUNNING, + STOPPED, + STARTING, + STOPPING, + REBOOTING, + SHUTDOWN, + TERMINATING + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstanceType.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstanceType.java new file mode 100644 index 00000000000..55d481948c3 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudInstanceType.java @@ -0,0 +1,32 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.netflix.spinnaker.clouddriver.model.InstanceType; +import lombok.Data; + +@Data +public class TencentCloudInstanceType implements InstanceType { + + private String name; + private String region; + private String zone; + private String account; + private Long cpu; + private Long mem; + private String instanceFamily; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudKeyPair.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudKeyPair.java new file mode 100644 index 00000000000..1022f511963 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudKeyPair.java @@ -0,0 +1,32 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.netflix.spinnaker.clouddriver.model.KeyPair; +import lombok.Builder; +import lombok.Data; + +@Data +@Builder +public class TencentCloudKeyPair implements KeyPair { + + private String account; + private String region; + private String keyId; + private String keyName; + private String keyFingerprint; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudNetwork.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudNetwork.java new file mode 100644 index 00000000000..0e5d057bcd2 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudNetwork.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.netflix.spinnaker.clouddriver.model.Network; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import lombok.Data; + +@Data +public class TencentCloudNetwork implements Network { + + private String id; + private String name; + private String account; + private String region; + private String cidrBlock; + private Boolean isDefault; + + public TencentCloudNetwork( + String id, String name, String account, String region, String cidrBlock, Boolean isDefault) { + this.id = id; + this.name = name; + this.account = account; + this.region = region; + this.cidrBlock = cidrBlock; + this.isDefault = isDefault; + } + + @Override + public String getCloudProvider() { + return TencentCloudProvider.ID; + } + + @Override + public String getId() { + return id; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudNetworkDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudNetworkDescription.java new file mode 100644 index 00000000000..55746591363 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudNetworkDescription.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import lombok.Data; + +@Data +public class TencentCloudNetworkDescription { + + private String vpcId; + private String vpcName; + private String cidrBlock; + private Boolean isDefault; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroup.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroup.java new file mode 100644 index 00000000000..18f4b9607c2 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroup.java @@ -0,0 +1,77 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.netflix.spinnaker.clouddriver.model.SecurityGroup; +import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; +import com.netflix.spinnaker.clouddriver.model.securitygroups.Rule; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import lombok.Data; + +@JsonInclude(Include.NON_EMPTY) +@Data +public class TencentCloudSecurityGroup implements SecurityGroup { + + private final String type = TencentCloudProvider.ID; + private final String cloudProvider = TencentCloudProvider.ID; + // securityGroupId + private String id; + // securityGroupName + private String name; + private String description; + private String application; + private String accountName; + private String region; + private Set inboundRules = new HashSet<>(); + private Set outboundRules = new HashSet<>(); + private List inRules = new ArrayList<>(); + private List outRules = new ArrayList<>(); + + public TencentCloudSecurityGroup( + String id, + String name, + String description, + String application, + String accountName, + String region, + Set inboundRules, + Set outboundRules, + List inRules, + List outRules) { + this.id = id; + this.name = name; + this.description = description; + this.application = application; + this.accountName = accountName; + this.region = region; + this.inboundRules.addAll(inboundRules); + this.outboundRules.addAll(outboundRules); + this.inRules.addAll(inRules); + this.outRules.addAll(outRules); + } + + @Override + public SecurityGroupSummary getSummary() { + return new TencentCloudSecurityGroupSummary(name, id); + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupDescription.java new file mode 100644 index 00000000000..41a3dc58bff --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupDescription.java @@ -0,0 +1,31 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import java.util.List; +import lombok.Data; + +@Data +public class TencentCloudSecurityGroupDescription { + + private String securityGroupId; + private String securityGroupName; + private String securityGroupDesc; + private List inRules; + private List outRules; + private long lastReadTime; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupRule.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupRule.java new file mode 100644 index 00000000000..d7e36d3ddad --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupRule.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class TencentCloudSecurityGroupRule { + // rule index + private Long index; + // TCP, UDP, ICMP, GRE, ALL + private String protocol; + private String port; + private String cidrBlock; + // ACCEPT or DROP + private String action; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupSummary.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupSummary.java new file mode 100644 index 00000000000..1a3218382d3 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSecurityGroupSummary.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.netflix.spinnaker.clouddriver.model.SecurityGroupSummary; +import lombok.AllArgsConstructor; +import lombok.Data; + +@Data +@AllArgsConstructor +public class TencentCloudSecurityGroupSummary implements SecurityGroupSummary { + + private String name; + private String id; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudServerGroup.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudServerGroup.java new file mode 100644 index 00000000000..6a532133d18 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudServerGroup.java @@ -0,0 +1,230 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.Instance; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import com.netflix.spinnaker.clouddriver.tencentcloud.client.AutoScalingClient; +import com.netflix.spinnaker.moniker.Moniker; +import com.tencentcloudapi.as.v20180419.models.AutoScalingGroup; +import com.tencentcloudapi.as.v20180419.models.ForwardLoadBalancer; +import com.tencentcloudapi.as.v20180419.models.ScalingPolicy; +import com.tencentcloudapi.as.v20180419.models.ScheduledAction; +import java.util.*; +import java.util.stream.Collectors; +import lombok.Data; + +@Data +public class TencentCloudServerGroup implements ServerGroup, TencentCloudBasicResource { + + private final String type = TencentCloudProvider.ID; + private final String cloudProvider = TencentCloudProvider.ID; + private String accountName; + private String name; + private String region; + private Set zones; + private Set instances = new HashSet<>(); + private Map image = new HashMap<>(); + private Map launchConfig = new HashMap<>(); + private AutoScalingGroup asg; + private Map buildInfo; + private String vpcId; + private List scalingPolicies = new ArrayList<>(); + private List scheduledActions = new ArrayList<>(); + private Boolean disabled = false; + + @Override + public String getName() { + return this.name; + } + + @Override + public Moniker getMoniker() { + return NamerRegistry.lookup() + .withProvider(TencentCloudProvider.ID) + .withAccount(accountName) + .withResource(TencentCloudBasicResource.class) + .deriveMoniker(this); + } + + @Override + public String getType() { + return this.type; + } + + @Override + public String getCloudProvider() { + return this.cloudProvider; + } + + @Override + public String getRegion() { + return this.region; + } + + @Override + public String getMonikerName() { + return name; + } + + @Override + public Boolean isDisabled() { + return disabled; + } + + @Override + public Long getCreatedTime() { + Date dateTime = null; + if (asg != null) { + dateTime = AutoScalingClient.convertToIsoDateTime(asg.getCreatedTime()); + } + return dateTime != null ? dateTime.getTime() : null; + } + + @Override + public Set getZones() { + return zones; + } + + @Override + public Set getInstances() { + return instances; + } + + @Override + public Set getLoadBalancers() { + Set loadBalancerNames = new HashSet<>(); + if (asg != null && asg.getForwardLoadBalancerSet() != null) { + loadBalancerNames = + Arrays.stream(asg.getForwardLoadBalancerSet()) + .map(ForwardLoadBalancer::getListenerId) + .collect(Collectors.toSet()); + } + + if (asg != null && asg.getLoadBalancerIdSet() != null) { + loadBalancerNames.addAll(Arrays.asList(asg.getLoadBalancerIdSet())); + } + + return loadBalancerNames; + } + + @Override + public Set getSecurityGroups() { + Set securityGroups = new HashSet<>(); + if (launchConfig != null && launchConfig.containsKey("securityGroupIds")) { + securityGroups.addAll((List) launchConfig.get("securityGroupIds")); + } + return securityGroups; + } + + @Override + public Map getLaunchConfig() { + return this.launchConfig; + } + + @Override + public InstanceCounts getInstanceCounts() { + InstanceCounts counts = new InstanceCounts(); + counts.setTotal(instances.size()); + counts.setUp(filterInstancesByHealthState(instances, HealthState.Up).size()); + counts.setDown(filterInstancesByHealthState(instances, HealthState.Down).size()); + counts.setUnknown(filterInstancesByHealthState(instances, HealthState.Unknown).size()); + counts.setStarting(filterInstancesByHealthState(instances, HealthState.Starting).size()); + counts.setOutOfService( + filterInstancesByHealthState(instances, HealthState.OutOfService).size()); + return counts; + } + + @Override + public Capacity getCapacity() { + Capacity capacity = new Capacity(); + capacity.setMin( + Math.toIntExact(asg != null && asg.getMinSize() != null ? asg.getMinSize() : 0)); + capacity.setMax( + Math.toIntExact(asg != null && asg.getMaxSize() != null ? asg.getMaxSize() : 0)); + capacity.setDesired( + Math.toIntExact( + asg != null && asg.getDesiredCapacity() != null ? asg.getDesiredCapacity() : 0)); + return capacity; + } + + @Override + public ImagesSummary getImagesSummary() { + return new ImagesSummary() { + @Override + public List getSummaries() { + return new ArrayList<>( + Arrays.asList( + new ImageSummary() { + private String serverGroupName = getName(); + private String imageName = (image == null ? null : (String) image.get("name")); + private String imageId = (image == null ? null : (String) image.get("imageId")); + + @Override + public Map getBuildInfo() { + return ((Map) (buildInfo)); + } + + @Override + public Map getImage() { + return image; + } + + public String getServerGroupName() { + return serverGroupName; + } + + public void setServerGroupName(String serverGroupName) { + this.serverGroupName = serverGroupName; + } + + public String getImageName() { + return imageName; + } + + public void setImageName(String imageName) { + this.imageName = imageName; + } + + public String getImageId() { + return imageId; + } + + public void setImageId(String imageId) { + this.imageId = imageId; + } + })); + } + }; + } + + @Override + public ImageSummary getImageSummary() { + final ImagesSummary summary = getImagesSummary(); + return summary.getSummaries().get(0); + } + + public static Collection filterInstancesByHealthState( + Set instances, HealthState healthState) { + return instances.stream() + .filter(it -> it.getHealthState() == healthState) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSubnet.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSubnet.java new file mode 100644 index 00000000000..e3b7a9e41c3 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSubnet.java @@ -0,0 +1,36 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import com.netflix.spinnaker.clouddriver.model.Subnet; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import lombok.Data; + +@Data +public class TencentCloudSubnet implements Subnet { + + private final String type = TencentCloudProvider.ID; + private String name; + private String id; + private String account; + private String region; + private String vpcId; + private String cidrBlock; + private Boolean isDefault; + private String zone; + private String purpose; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSubnetDescription.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSubnetDescription.java new file mode 100644 index 00000000000..e9af031efc0 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudSubnetDescription.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import lombok.Data; + +@Data +public class TencentCloudSubnetDescription { + + private String subnetId; + private String vpcId; + private String subnetName; + private String cidrBlock; + private Boolean isDefault; + private String zone; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudTargetHealth.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudTargetHealth.java new file mode 100644 index 00000000000..86094ffb8c0 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/TencentCloudTargetHealth.java @@ -0,0 +1,87 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model; + +import static com.netflix.spinnaker.clouddriver.model.HealthState.*; + +import com.netflix.spinnaker.clouddriver.model.Health; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import java.util.ArrayList; +import java.util.List; +import lombok.Data; + +@Data +public class TencentCloudTargetHealth implements Health { + + private TargetHealthStatus targetHealthStatus; + private List loadBalancers = new ArrayList<>(); + + public TencentCloudTargetHealth(boolean healthStatus) { + targetHealthStatus = healthStatus ? TargetHealthStatus.HEALTHY : TargetHealthStatus.UNHEALTHY; + } + + public TencentCloudTargetHealth() { + targetHealthStatus = TargetHealthStatus.UNKNOWN; + } + + public HealthState getState() { + switch (targetHealthStatus) { + case UNHEALTHY: + case UNKNOWN: + return Down; + case HEALTHY: + return Up; + default: + return Unknown; + } + } + + public final String getType() { + return "LoadBalancer"; + } + + public enum TargetHealthStatus { + UNHEALTHY, + HEALTHY, + UNKNOWN; + + public LBHealthSummary.ServiceStatus toServiceStatus() { + if (this == TargetHealthStatus.HEALTHY) { + return LBHealthSummary.ServiceStatus.InService; + } + return LBHealthSummary.ServiceStatus.OutOfService; + } + } + + @Data + public static class LBHealthSummary { + + private String loadBalancerName; + private ServiceStatus state; + + public String getDescription() { + return state.equals(ServiceStatus.OutOfService) + ? "Instance has failed at least the Unhealthy Threshold number of health checks consecutively." + : "Healthy"; + } + + public enum ServiceStatus { + InService, + OutOfService + } + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancer.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancer.java new file mode 100644 index 00000000000..2ab8324c3ef --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancer.java @@ -0,0 +1,89 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer; + +import com.netflix.spinnaker.clouddriver.model.LoadBalancer; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import com.netflix.spinnaker.clouddriver.tencentcloud.model.TencentCloudBasicResource; +import com.netflix.spinnaker.moniker.Moniker; +import java.util.List; +import java.util.Set; +import lombok.Data; + +@Data +public class TencentCloudLoadBalancer implements LoadBalancer, TencentCloudBasicResource { + + private final String cloudProvider = TencentCloudProvider.ID; + private final String type = TencentCloudProvider.ID; + private String application; + private String accountName; + private String region; + private String id; + private String name; + private String loadBalancerId; + private String loadBalancerName; + private String loadBalancerType; + private Integer forwardType; + private String vpcId; + private String subnetId; + private Integer projectId; + private String createTime; + private List loadBalancerVips; + private List securityGroups; + private List listeners; + private Set serverGroups; + + @Override + public String getAccount() { + return accountName; + } + + @Override + public Moniker getMoniker() { + return NamerRegistry.lookup() + .withProvider(TencentCloudProvider.ID) + .withAccount(accountName) + .withResource(TencentCloudBasicResource.class) + .deriveMoniker(this); + } + + @Override + public String getMonikerName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof TencentCloudLoadBalancer)) { + return false; + } + + TencentCloudLoadBalancer other = (TencentCloudLoadBalancer) o; + return other.getAccount().equals(this.getAccount()) + && other.getName().equals(this.getName()) + && other.getType().equals(this.getType()) + && other.getId().equals(this.getId()) + && other.getRegion().equals(this.getRegion()); + } + + @Override + public int hashCode() { + return getId().hashCode() + getType().hashCode(); + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerCertificate.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerCertificate.java new file mode 100644 index 00000000000..bb0d90c69de --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerCertificate.java @@ -0,0 +1,32 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer; + +import lombok.Data; + +@Data +public class TencentCloudLoadBalancerCertificate { + + private String sslMode; + private String certId; + private String certCaId; + private String certName; + private String certKey; + private String certContent; + private String certCaName; + private String certCaContent; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerHealthCheck.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerHealthCheck.java new file mode 100644 index 00000000000..1770607e797 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerHealthCheck.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer; + +import lombok.Data; + +@Data +public class TencentCloudLoadBalancerHealthCheck { + + private Integer healthSwitch; + private Integer timeOut; + private Integer intervalTime; + private Integer healthNum; + private Integer unHealthNum; + private Integer httpCode; + private String httpCheckPath; + private String httpCheckDomain; + private String httpCheckMethod; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerListener.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerListener.java new file mode 100644 index 00000000000..535121ba095 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerListener.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer; + +import java.util.List; +import lombok.Data; + +@Data +public class TencentCloudLoadBalancerListener { + private String listenerId; + private String listenerName; + private String protocol; + private Integer port; + private TencentCloudLoadBalancerHealthCheck healthCheck; + private TencentCloudLoadBalancerCertificate certificate; + private Integer sessionExpireTime; + private String scheduler; + private Integer sniSwitch; + // target, tcp/udp 4 layer + private List targets; + // rule, http/https 7 layer + private List rules; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerRule.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerRule.java new file mode 100644 index 00000000000..6c85085b837 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerRule.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer; + +import java.util.List; +import lombok.Data; + +@Data +public class TencentCloudLoadBalancerRule { + + private String locationId; + private String domain; + private String url; + private Integer sessionExpireTime; + private TencentCloudLoadBalancerHealthCheck healthCheck; + private TencentCloudLoadBalancerCertificate certificate; + private String scheduler; + private List targets; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerTarget.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerTarget.java new file mode 100644 index 00000000000..abaa97f488e --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerTarget.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer; + +import lombok.Data; + +@Data +public class TencentCloudLoadBalancerTarget { + + private String instanceId; + private Integer port; + private String type; + private Integer weight; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerTargetHealth.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerTargetHealth.java new file mode 100644 index 00000000000..6ef1c4f5ae1 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/model/loadbalancer/TencentCloudLoadBalancerTargetHealth.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.model.loadbalancer; + +import lombok.Data; + +@Data +public class TencentCloudLoadBalancerTargetHealth { + + private String instanceId; + private Integer port; + private Boolean healthStatus; + private String loadBalancerId; + private String listenerId; + private String locationId; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/names/TencentCloudBasicResourceNamer.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/names/TencentCloudBasicResourceNamer.java new file mode 100644 index 00000000000..b26f9e1504e --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/names/TencentCloudBasicResourceNamer.java @@ -0,0 +1,47 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.names; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.names.NamingStrategy; +import com.netflix.spinnaker.clouddriver.tencentcloud.model.TencentCloudBasicResource; +import com.netflix.spinnaker.moniker.Moniker; + +public class TencentCloudBasicResourceNamer implements NamingStrategy { + + @Override + public String getName() { + return "tencentCloudAnnotations"; + } + + public void applyMoniker(TencentCloudBasicResource tencentCloudBasicResource, Moniker moniker) {} + + @Override + public Moniker deriveMoniker(TencentCloudBasicResource tencentCloudBasicResource) { + String name = tencentCloudBasicResource.getMonikerName(); + Names parsed = Names.parseName(name); + + return Moniker.builder() + .app(parsed.getApp()) + .cluster(parsed.getCluster()) + .detail(parsed.getDetail()) + .stack(parsed.getStack()) + .detail(parsed.getDetail()) + .sequence(parsed.getSequence()) + .build(); + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/TencentCloudInfrastructureProvider.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/TencentCloudInfrastructureProvider.java new file mode 100644 index 00000000000..a06110d3d7b --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/TencentCloudInfrastructureProvider.java @@ -0,0 +1,74 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.provider; + +import static com.netflix.spinnaker.clouddriver.tencentcloud.cache.Keys.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.tencentcloud.cache.Keys.Namespace.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.tencentcloud.cache.Keys.Namespace.INSTANCES; +import static com.netflix.spinnaker.clouddriver.tencentcloud.cache.Keys.Namespace.LOAD_BALANCERS; +import static com.netflix.spinnaker.clouddriver.tencentcloud.cache.Keys.Namespace.SECURITY_GROUPS; +import static com.netflix.spinnaker.clouddriver.tencentcloud.cache.Keys.Namespace.SERVER_GROUPS; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; +import com.netflix.spinnaker.clouddriver.cache.SearchableProvider; +import com.netflix.spinnaker.clouddriver.tencentcloud.cache.Keys; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import lombok.Getter; + +@Getter +public class TencentCloudInfrastructureProvider extends AgentSchedulerAware + implements SearchableProvider { + + private final String providerName; + private final List agents; + private final Set defaultCaches; + private final Map urlMappingTemplates; + private final Map searchResultHydrators; + + public TencentCloudInfrastructureProvider(List agents) { + this.providerName = TencentCloudInfrastructureProvider.class.getName(); + this.agents = agents; + + List nsList = + Arrays.asList( + APPLICATIONS.ns, + CLUSTERS.ns, + INSTANCES.ns, + LOAD_BALANCERS.ns, + SECURITY_GROUPS.ns, + SERVER_GROUPS.ns); + + this.defaultCaches = new HashSet<>(); + this.defaultCaches.addAll(nsList); + + this.urlMappingTemplates = new HashMap<>(); + this.urlMappingTemplates.put( + SECURITY_GROUPS.ns, "/securityGroups/$account/$provider/$name?region=$region"); + this.searchResultHydrators = new HashMap<>(); + } + + @Override + public Map parseKey(String key) { + return Keys.parse(key); + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/agent/AbstractTencentCloudCachingAgent.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/agent/AbstractTencentCloudCachingAgent.java new file mode 100644 index 00000000000..275dfd468d9 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/agent/AbstractTencentCloudCachingAgent.java @@ -0,0 +1,68 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.provider.agent; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.clouddriver.tencentcloud.provider.TencentCloudInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.tencentcloud.security.TencentCloudNamedAccountCredentials; +import java.util.Map; + +public abstract class AbstractTencentCloudCachingAgent implements CachingAgent, AccountAware { + + private final ObjectMapper objectMapper; + private final String region; + private final String accountName; + private final TencentCloudNamedAccountCredentials credentials; + private final String providerName = TencentCloudInfrastructureProvider.class.getName(); + final TypeReference> ATTRIBUTES = new TypeReference>() {}; + + public AbstractTencentCloudCachingAgent( + TencentCloudNamedAccountCredentials credentials, ObjectMapper objectMapper, String region) { + this.credentials = credentials; + this.objectMapper = objectMapper; + this.region = region; + this.accountName = credentials.getName(); + } + + @Override + public String getAgentType() { + return getAccountName() + "/" + getRegion() + "/" + this.getClass().getSimpleName(); + } + + public final ObjectMapper getObjectMapper() { + return objectMapper; + } + + public final String getRegion() { + return region; + } + + public final String getAccountName() { + return accountName; + } + + public final TencentCloudNamedAccountCredentials getCredentials() { + return credentials; + } + + public final String getProviderName() { + return providerName; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/view/MutableCacheData.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/view/MutableCacheData.java new file mode 100644 index 00000000000..30907f2c879 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/provider/view/MutableCacheData.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.provider.view; + +import com.netflix.spinnaker.cats.cache.CacheData; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class MutableCacheData implements CacheData { + + private String id; + private int ttlSeconds = -1; + private Map attributes = new HashMap<>(); + private Map> relationships = new HashMap<>(); + + public MutableCacheData(String id) { + this.id = id; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudCredentials.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudCredentials.java new file mode 100644 index 00000000000..7c6bc9da9d0 --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudCredentials.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.security; + +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +@AllArgsConstructor +public final class TencentCloudCredentials { + + private final String secretId; + private final String secretKey; +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudCredentialsInitializer.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudCredentialsInitializer.java new file mode 100644 index 00000000000..7d3813a5b7f --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudCredentialsInitializer.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.security; + +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import com.netflix.spinnaker.clouddriver.tencentcloud.config.TencentCloudConfigurationProperties; +import java.util.ArrayList; +import java.util.List; +import lombok.extern.slf4j.Slf4j; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Slf4j +@Configuration +public class TencentCloudCredentialsInitializer implements CredentialsInitializerSynchronizable { + + @Bean + public List tencentCloudNamedAccountCredentials( + TencentCloudConfigurationProperties tencentCloudConfigurationProperties, + AccountCredentialsRepository accountCredentialsRepository) { + return syncAccounts(tencentCloudConfigurationProperties, accountCredentialsRepository); + } + + private List syncAccounts( + TencentCloudConfigurationProperties tencentCloudConfigurationProperties, + AccountCredentialsRepository accountCredentialsRepository) { + + List credentialsList = new ArrayList<>(); + + tencentCloudConfigurationProperties + .getAccounts() + .forEach( + managedAccount -> { + TencentCloudNamedAccountCredentials credentials = + new TencentCloudNamedAccountCredentials(managedAccount); + accountCredentialsRepository.save(managedAccount.getName(), credentials); + credentialsList.add(credentials); + }); + + return credentialsList; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudNamedAccountCredentials.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudNamedAccountCredentials.java new file mode 100644 index 00000000000..338198b94de --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/clouddriver/tencentcloud/security/TencentCloudNamedAccountCredentials.java @@ -0,0 +1,88 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.security; + +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials; +import com.netflix.spinnaker.clouddriver.tencentcloud.TencentCloudProvider; +import com.netflix.spinnaker.clouddriver.tencentcloud.config.TencentCloudConfigurationProperties; +import com.netflix.spinnaker.clouddriver.tencentcloud.model.TencentCloudBasicResource; +import com.netflix.spinnaker.clouddriver.tencentcloud.names.TencentCloudBasicResourceNamer; +import com.netflix.spinnaker.fiat.model.resources.Permissions; +import com.netflix.spinnaker.moniker.Namer; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import lombok.Data; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.util.CollectionUtils; + +@Slf4j +@Data +public class TencentCloudNamedAccountCredentials + extends AbstractAccountCredentials { + + private String name; + private String environment; + private String accountType; + private TencentCloudCredentials credentials; + private List regions; + private List requiredGroupMembership; + private Permissions permissions; + private Namer namer = new TencentCloudBasicResourceNamer(); + + public TencentCloudNamedAccountCredentials( + TencentCloudConfigurationProperties.ManagedAccount managedAccount) { + this.name = managedAccount.getName(); + this.environment = managedAccount.getEnvironment(); + this.accountType = managedAccount.getAccountType(); + this.credentials = + new TencentCloudCredentials(managedAccount.getSecretId(), managedAccount.getSecretKey()); + this.regions = buildRegions(managedAccount.getRegions()); + NamerRegistry.lookup() + .withProvider(TencentCloudProvider.ID) + .withAccount(name) + .setNamer(TencentCloudBasicResource.class, namer); + } + + private static List buildRegions(List regions) { + if (CollectionUtils.isEmpty(regions)) { + return new ArrayList<>(); + } else { + return regions.stream().map(TencentCloudRegion::new).collect(Collectors.toList()); + } + } + + @Override + public String getCloudProvider() { + return TencentCloudProvider.ID; + } + + @Override + public List getRequiredGroupMembership() { + return requiredGroupMembership; + } + + @Getter + @RequiredArgsConstructor + public static final class TencentCloudRegion { + @Nonnull private final String name; + } +} diff --git a/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/config/TencentCloudConfiguration.java b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/config/TencentCloudConfiguration.java new file mode 100644 index 00000000000..ce42ef5674f --- /dev/null +++ b/clouddriver-tencentcloud/src/main/java/com/netflix/spinnaker/config/TencentCloudConfiguration.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.clouddriver.tencentcloud.config.TencentCloudConfigurationProperties; +import com.netflix.spinnaker.clouddriver.tencentcloud.security.TencentCloudCredentialsInitializer; +import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Import; +import org.springframework.context.annotation.Scope; +import org.springframework.scheduling.annotation.EnableScheduling; + +@Configuration +@EnableConfigurationProperties +@EnableScheduling +@ConditionalOnProperty("tencentcloud.enabled") +@ComponentScan("com.netflix.spinnaker.clouddriver.tencentcloud") +@Import(TencentCloudCredentialsInitializer.class) +public class TencentCloudConfiguration { + + @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + @Bean + @ConfigurationProperties("tencentcloud") + public TencentCloudConfigurationProperties tencentCloudConfigurationProperties() { + return new TencentCloudConfigurationProperties(); + } +} diff --git a/clouddriver-tencentcloud/src/test/java/com/netflix/spinnaker/clouddriver/tencentcloud/cache/KeysTest.java b/clouddriver-tencentcloud/src/test/java/com/netflix/spinnaker/clouddriver/tencentcloud/cache/KeysTest.java new file mode 100644 index 00000000000..bc1e7f00ef9 --- /dev/null +++ b/clouddriver-tencentcloud/src/test/java/com/netflix/spinnaker/clouddriver/tencentcloud/cache/KeysTest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 THL A29 Limited, a Tencent company. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.tencentcloud.cache; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Map; +import org.junit.jupiter.api.Test; + +class KeysTest { + + @Test + void testParseKey() { + String key = + "tencentcloud:security_groups:cvm-dev-spinnaker-sc-1:my-tencent-account:ap-guangzhou:*"; + Map result = Keys.parse(key); + + assertEquals("cvm", result.get("application")); + assertEquals("tencentcloud", result.get("provider")); + assertEquals("security_groups", result.get("type")); + assertEquals("cvm-dev-spinnaker-sc-1", result.get("name")); + assertEquals("ap-guangzhou", result.get("region")); + } + + @Test + void testCanParseType() { + Keys keys = new Keys(); + assertTrue(keys.canParseType("load_balancers")); + assertTrue(keys.canParseType("security_groups")); + assertFalse(keys.canParseType("wrong_type")); + } +} diff --git a/clouddriver-titus/README.md b/clouddriver-titus/README.md index b9219a26893..e893ac84c28 100644 --- a/clouddriver-titus/README.md +++ b/clouddriver-titus/README.md @@ -8,19 +8,19 @@ titus: awsVpc: vpc0 # this is the default vpc used by titus accounts: - name: titusdevint - environment: test + environment: test discovery: "http://discovery.compary.com/v2" discoveryEnabled: true registry: testregistry # reference to the docker registry being used - awsAccount: test # aws account underpinning + awsAccount: test # aws account underpinning autoscalingEnabled: true loadBalancingEnabled: false # load balancing will be released at a later date regions: - name: us-east-1 url: https://myTitus.us-east-1.company.com/ - port: 7104 + port: 7104 autoscalingEnabled: true - loadBalancingEnabled: false + loadBalancingEnabled: false - name: eu-west-1 url: https://myTitus.eu-west-1.company.com/ port: 7104 @@ -28,11 +28,10 @@ titus: loadBalancingEnabled: false ``` -By default, Titus will try to create a grpc connection to port 7104. +By default, Titus will try to create a grpc connection to port 7104. You need to have an underlying aws connection created and ready to be used and specify this in the awsAccount section. -Aws is used for security groups, iam profiles and autoscaling policies. - -There are currently no plans to enable titus in Halyard. +Aws is used for security groups, iam profiles and autoscaling policies. +There are currently no plans to enable titus in Halyard. diff --git a/clouddriver-titus/clouddriver-titus.gradle b/clouddriver-titus/clouddriver-titus.gradle index ff29cab793d..b567e6a99f5 100644 --- a/clouddriver-titus/clouddriver-titus.gradle +++ b/clouddriver-titus/clouddriver-titus.gradle @@ -1,39 +1,55 @@ -apply plugin: 'com.google.protobuf' - -buildscript { - repositories { - jcenter() - } - - dependencies { - classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.5' - } -} +apply plugin: "com.google.protobuf" ext { - protobufVersion = '3.2.+' - grpcVersion = '1.9.+' -} - -repositories { - maven { url 'https://dl.bintray.com/netflixoss' } + protobufVersion = '3.21.12' + grpcVersion = '1.45.1' } dependencies { - protobuf 'com.netflix.titus:titus-api-definitions:0.0.1-rc23' + protobuf 'com.netflix.titus:titus-api-definitions:0.0.1-rc71' - compile "com.google.protobuf:protobuf-java:$protobufVersion" - compile "io.grpc:grpc-protobuf:$grpcVersion" - compile "io.grpc:grpc-stub:$grpcVersion" - compile "io.grpc:grpc-netty-shaded:$grpcVersion" - compile project(":clouddriver-aws") - compile project(":clouddriver-core") - compile project(":clouddriver-eureka") - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootWeb') - compile 'com.squareup.okhttp3:okhttp:3.1.2' - testCompile "org.slf4j:slf4j-simple:${spinnaker.version('slf4j')}" - spinnaker.group('test') + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-aws") + implementation project(":clouddriver-core") + implementation project(":clouddriver-eureka") + implementation project(":clouddriver-saga") + implementation project(":clouddriver-security") + + implementation "javax.inject:javax.inject:1" + implementation "com.amazonaws:aws-java-sdk" + implementation "com.google.protobuf:protobuf-java" + implementation "com.google.protobuf:protobuf-java-util" + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-core" + implementation "io.spinnaker.kork:kork-exceptions" + implementation "io.spinnaker.kork:kork-security" + implementation "io.spinnaker.kork:kork-moniker" + implementation "com.squareup.okhttp3:okhttp:3.1.2" + implementation "io.grpc:grpc-netty-shaded:$grpcVersion" + implementation "io.grpc:grpc-protobuf:$grpcVersion" + implementation "io.grpc:grpc-stub:$grpcVersion" + implementation "org.apache.groovy:groovy" + implementation "org.apache.groovy:groovy-json" + implementation "org.slf4j:slf4j-api" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.yaml:snakeyaml" + + testImplementation "cglib:cglib-nodep" + testImplementation "junit:junit" + testImplementation "org.hamcrest:hamcrest-core" + testImplementation "org.objenesis:objenesis" + testImplementation "org.slf4j:slf4j-simple" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "io.spinnaker.kork:kork-test" + testRuntimeOnly "net.bytebuddy:byte-buddy" } sourceSets { @@ -66,3 +82,14 @@ protobuf { } } } + +javadoc { + exclude 'io/titanframework/**' + exclude 'com/netflix/titus/grpc/protogen/**' +} + +def licenseExtension = project.extensions.findByName('license') +if (licenseExtension != null) { + licenseExtension.exclude('io/titanframework/**') + licenseExtension.exclude('com/netflix/titus/grpc/protogen/**') +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/TitusClientProvider.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/TitusClientProvider.groovy index 09b02a97ffb..367818734fd 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/TitusClientProvider.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/TitusClientProvider.groovy @@ -57,7 +57,7 @@ class TitusClientProvider { TitusAutoscalingClient getTitusAutoscalingClient(NetflixTitusCredentials account, String region) { final TitusRegion titusRegion = Objects.requireNonNull(account.regions.find { it.name == region }, "region") - if (!account.eurekaName || !account.autoscalingEnabled || !titusRegion.autoscalingEnabled) { + if (!account.eurekaName) { return null } final TitusClientKey key = new TitusClientKey(Objects.requireNonNull(account.name), titusRegion) @@ -66,7 +66,7 @@ class TitusClientProvider { TitusLoadBalancerClient getTitusLoadBalancerClient(NetflixTitusCredentials account, String region) { final TitusRegion titusRegion = Objects.requireNonNull(account.regions.find { it.name == region }, "region") - if (!account.eurekaName || !account.loadBalancingEnabled || !titusRegion.loadBalancingEnabled) { + if (!account.eurekaName) { return null } final TitusClientKey key = new TitusClientKey(Objects.requireNonNull(account.name), titusRegion) diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/TitusException.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/TitusException.java deleted file mode 100644 index 127ae197771..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/TitusException.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus; - -public class TitusException extends RuntimeException { - public TitusException(String message) { - super(message); - } - - public TitusException(String message, Throwable cause) { - super(message, cause); - } - - public TitusException(Throwable cause) { - super(cause); - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/Keys.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/Keys.groovy index d871984c4c2..2b5a16c6a5a 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/Keys.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/Keys.groovy @@ -115,7 +115,7 @@ class Keys implements KeyParser { result << [application: names.app.toLowerCase(), cluster: parts[2], account: parts[3], region: parts[4], serverGroup: parts[5], stack: names.stack, detail: names.detail, sequence: names.sequence?.toString()] break case Namespace.INSTANCES.ns: - result << [id: parts[2], region: parts[3], instanceId: parts[5]] + result << [id: parts[2], region: parts[3], instanceId: parts[5]] break case Namespace.CLUSTERS.ns: def names = Names.parseName(parts[4]) @@ -188,4 +188,19 @@ class Keys implements KeyParser { static String getInstanceHealthKey(String id, String healthProvider) { "${TitusCloudProvider.ID}:${Namespace.HEALTH}:${id}:${healthProvider}" } + + static String removeSchemaVersion(String key) { + def parts = key.split(':') + + if ((parts.length < 2) || (parts[0] != TitusCloudProvider.ID)) { + return key + } + + if (parts[2] != CachingSchema.V2.toString()) { + return key + } + + parts[2] = null + return parts.findAll({ it != null }).join(':') + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/TitusCachingProvider.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/TitusCachingProvider.groovy index bd5107ad195..db35d6bcce9 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/TitusCachingProvider.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/TitusCachingProvider.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.clouddriver.titus.caching import com.netflix.spinnaker.cats.agent.Agent import com.netflix.spinnaker.cats.agent.CachingAgent +import com.netflix.spinnaker.cats.cache.Cache import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider import com.netflix.spinnaker.clouddriver.cache.KeyParser import com.netflix.spinnaker.clouddriver.cache.SearchableProvider @@ -59,12 +60,8 @@ class TitusCachingProvider implements SearchableProvider, EurekaAwareProvider { @Override String getInstanceKey(Map attributes, String region) { - CachingSchema schema = cachingSchemaUtil.get().getCachingSchemaForAccount(attributes.accountId) - if (schema == CachingSchema.V2) { - Keys.getInstanceV2Key(attributes.titusTaskId, attributes.accountId, region) - } else { - Keys.getInstanceKey(attributes.titusTaskId, attributes.accountId, attributes.titusStack, region) - } + CachingSchema schema = cachingSchemaUtil.get().getCachingSchemaForAccount((String) attributes.accountId) + Keys.getInstanceV2Key(attributes.titusTaskId, attributes.accountId, region) } @Override @@ -83,10 +80,9 @@ class TitusCachingProvider implements SearchableProvider, EurekaAwareProvider { (Keys.Namespace.CLUSTERS.ns) : '/applications/${application.toLowerCase()}/clusters/$account/$cluster' ].asImmutable() - @Override - Map getSearchResultHydrators() { - return Collections.emptyMap() - } + final Map searchResultHydrators = [ + (new TitusSearchableResource(Keys.Namespace.INSTANCES.ns)): new InstanceSearchResultHydrator(), + ] @Override Map parseKey(String key) { @@ -106,4 +102,31 @@ class TitusCachingProvider implements SearchableProvider, EurekaAwareProvider { filters?.cloudProvider == null || searchableProviders.contains(filters.cloudProvider) ) && hasAgentForType(type, getAgents()) } + + private static class InstanceSearchResultHydrator implements SearchableProvider.SearchResultHydrator { + @Override + Map hydrateResult(Cache cacheView, Map result, String id) { + def item = cacheView.get(Keys.Namespace.INSTANCES.ns, id) + if (!item) { + return null + } + if (!item?.relationships["serverGroups"]) { + return result + } + + def serverGroup = Keys.parse(item.relationships["serverGroups"][0]) + return result + [ + application: serverGroup.application as String, + cluster : serverGroup.cluster as String, + serverGroup: serverGroup.serverGroup as String + ] + } + } + + private static class TitusSearchableResource extends SearchableResource { + TitusSearchableResource (String resourceType) { + this.resourceType = resourceType.toLowerCase() + this.platform = 'titus' + } + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/TitusCachingProviderConfig.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/TitusCachingProviderConfig.groovy index 455e11a33b8..5008a8d528e 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/TitusCachingProviderConfig.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/TitusCachingProviderConfig.groovy @@ -16,20 +16,19 @@ package com.netflix.spinnaker.clouddriver.titus.caching -import com.fasterxml.jackson.databind.DeserializationFeature + import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature import com.netflix.spectator.api.Registry import com.netflix.spinnaker.cats.agent.CachingAgent import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider -import com.netflix.spinnaker.clouddriver.titus.caching.agents.TitusClusterCachingAgent -import com.netflix.spinnaker.clouddriver.titus.caching.agents.TitusInstanceCachingAgent -import com.netflix.spinnaker.clouddriver.titus.caching.agents.TitusV2ClusterCachingAgent +import com.netflix.spinnaker.clouddriver.titus.caching.agents.ClusterCleanupAgent +import com.netflix.spinnaker.clouddriver.titus.caching.agents.TitusStreamingUpdateAgent import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil import com.netflix.spinnaker.clouddriver.titus.caching.utils.CachingSchemaUtil import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService import org.springframework.beans.factory.annotation.Value import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration @@ -40,10 +39,10 @@ import javax.inject.Provider @Configuration class TitusCachingProviderConfig { - @Value('${titus.pollIntervalMillis:30000}') + @Value('${titus.poll-interval-millis:30000}') Long pollIntervalMillis - @Value('${titus.timeoutMillis:300000}') + @Value('${titus.timeout-millis:300000}') Long timeoutMillis @Bean @@ -54,55 +53,26 @@ class TitusCachingProviderConfig { ObjectMapper objectMapper, Registry registry, Provider awsLookupUtilProvider, - Provider cachingSchemaUtilProvider) { + Provider cachingSchemaUtilProvider, + DynamicConfigService dynamicConfigService) { List agents = [] def allAccounts = accountCredentialsRepository.all.findAll { it instanceof NetflixTitusCredentials } as Collection allAccounts.each { NetflixTitusCredentials account -> account.regions.each { region -> - if (!account.splitCachingEnabled) { //default case - agents << new TitusClusterCachingAgent( - titusCloudProvider, - titusClientProvider, - account, - region, - objectMapper, - registry, - awsLookupUtilProvider, - pollIntervalMillis, - timeoutMillis - ) - } else { //use new split caching for this whole account - agents << new TitusInstanceCachingAgent( - titusClientProvider, - account, - region, - objectMapper, - registry, - awsLookupUtilProvider - ) - agents << new TitusV2ClusterCachingAgent( - titusClientProvider, - account, - region, - objectMapper, - registry, - awsLookupUtilProvider, - pollIntervalMillis, - timeoutMillis - ) - } + agents << new TitusStreamingUpdateAgent( + titusClientProvider, + account, + region, + objectMapper, + registry, + awsLookupUtilProvider, + dynamicConfigService + ) } } + agents << new ClusterCleanupAgent() new TitusCachingProvider(agents, cachingSchemaUtilProvider) } - - @Bean - ObjectMapper objectMapper() { - ObjectMapper objectMapper = new ObjectMapper() - objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) - objectMapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false) - objectMapper - } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusClusterCachingAgent.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusClusterCachingAgent.groovy deleted file mode 100644 index 5e57636ce60..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusClusterCachingAgent.groovy +++ /dev/null @@ -1,553 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.caching.agents - -import com.amazonaws.services.elasticloadbalancingv2.model.TargetTypeEnum -import com.fasterxml.jackson.annotation.JsonCreator -import com.fasterxml.jackson.annotation.JsonProperty -import com.fasterxml.jackson.databind.ObjectMapper -import com.google.protobuf.util.JsonFormat -import com.netflix.frigga.Names -import com.netflix.frigga.autoscaling.AutoScalingGroupNameBuilder -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.cats.agent.AgentDataType -import com.netflix.spinnaker.cats.agent.CacheResult -import com.netflix.spinnaker.cats.agent.CachingAgent -import com.netflix.spinnaker.cats.agent.DefaultCacheResult -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.DefaultCacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter -import com.netflix.spinnaker.cats.provider.ProviderCache -import com.netflix.spinnaker.clouddriver.aws.data.ArnUtils -import com.netflix.spinnaker.clouddriver.aws.data.Keys as AwsKeys -import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport -import com.netflix.spinnaker.clouddriver.model.HealthState -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider -import com.netflix.spinnaker.clouddriver.titus.caching.Keys -import com.netflix.spinnaker.clouddriver.titus.caching.TitusCachingProvider -import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil -import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient -import com.netflix.spinnaker.clouddriver.titus.client.TitusLoadBalancerClient -import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion -import com.netflix.spinnaker.clouddriver.titus.client.model.Job -import com.netflix.spinnaker.clouddriver.titus.client.model.TaskState -import com.netflix.spinnaker.clouddriver.titus.client.model.Task -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials -import com.netflix.spinnaker.clouddriver.titus.model.TitusSecurityGroup -import com.netflix.titus.grpc.protogen.ScalingPolicy -import com.netflix.titus.grpc.protogen.ScalingPolicyResult -import com.netflix.titus.grpc.protogen.ScalingPolicyStatus -import com.netflix.titus.grpc.protogen.ScalingPolicyStatus.ScalingPolicyState -import org.slf4j.Logger -import org.slf4j.LoggerFactory - -import javax.inject.Provider - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS -import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.* - -class TitusClusterCachingAgent implements CachingAgent, CustomScheduledAgent, OnDemandAgent { - - private static final Logger log = LoggerFactory.getLogger(TitusClusterCachingAgent) - - static final Set types = Collections.unmodifiableSet([ - AUTHORITATIVE.forType(SERVER_GROUPS.ns), - AUTHORITATIVE.forType(APPLICATIONS.ns), - AUTHORITATIVE.forType(INSTANCES.ns), - INFORMATIVE.forType(CLUSTERS.ns), - INFORMATIVE.forType(TARGET_GROUPS.ns) - ] as Set) - - private final TitusCloudProvider titusCloudProvider - private final TitusClient titusClient - private final TitusAutoscalingClient titusAutoscalingClient - private final TitusLoadBalancerClient titusLoadBalancerClient - private final NetflixTitusCredentials account - private final TitusRegion titusRegion - private final String region - private final ObjectMapper objectMapper - private final OnDemandMetricsSupport metricsSupport - private final Provider awsLookupUtil - private final long pollIntervalMillis - private final long timeoutMillis - - TitusClusterCachingAgent(TitusCloudProvider titusCloudProvider, - TitusClientProvider titusClientProvider, - NetflixTitusCredentials account, - TitusRegion titusRegion, - ObjectMapper objectMapper, - Registry registry, - Provider awsLookupUtil, - pollIntervalMillis, - timeoutMillis) { - this.account = account - this.titusRegion = titusRegion - this.region = titusRegion.name - - this.titusCloudProvider = titusCloudProvider - this.objectMapper = objectMapper - this.metricsSupport = new OnDemandMetricsSupport( - registry, - this, - "${titusCloudProvider.id}:${OnDemandAgent.OnDemandType.ServerGroup}" as String - ) - this.titusClient = titusClientProvider.getTitusClient(account, region) - this.titusAutoscalingClient = titusClientProvider.getTitusAutoscalingClient(account, region) - this.titusLoadBalancerClient = titusClientProvider.getTitusLoadBalancerClient(account, region) - this.awsLookupUtil = awsLookupUtil - this.pollIntervalMillis = pollIntervalMillis - this.timeoutMillis = timeoutMillis - } - - @Override - String getProviderName() { - TitusCachingProvider.PROVIDER_NAME - } - - @Override - String getOnDemandAgentType() { - return "${getAgentType()}-OnDemand" - } - - @Override - OnDemandMetricsSupport getMetricsSupport() { - return metricsSupport - } - - @Override - boolean handles(OnDemandAgent.OnDemandType type, String cloudProvider) { - return type == OnDemandAgent.OnDemandType.ServerGroup && cloudProvider == titusCloudProvider.id - } - - @Override - OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map data) { - if (["serverGroupName", "account", "region"].any { !data.containsKey(it) }) { - return null - } - - if (account.name != data.account) { - return null - } - - if (region != data.region) { - return null - } - - Job job = metricsSupport.readData { - titusClient.findJobByName(data.serverGroupName as String) - } - - return onDemand(providerCache, job, data) - } - - /** - * Avoid writing cache results to both ON_DEMAND and SERVER_GROUPS, etc. - * - * By writing a minimal record to ON_DEMAND only, we eliminate significant overhead (redis and network) at the cost - * of an increase in time before a change becomes visible in the UI. - * - * A change will not be visible until a caching cycle has completed. - */ - private OnDemandResult onDemand(ProviderCache providerCache, Job job, Map data) { - def serverGroupKey = Keys.getServerGroupKey(data.serverGroupName as String, account.name, region) - def cacheResults = [:] - - if (!job) { - providerCache.evictDeletedItems(ON_DEMAND.ns, [serverGroupKey]) - } else { - def cacheData = metricsSupport.onDemandStore { - new DefaultCacheData( - serverGroupKey, - 10 * 60, // ttl is 10 minutes, - [ - cacheTime : new Date(), - cacheResults: [:] - ], - [:] - ) - } - - cacheResults[ON_DEMAND.ns.toString()] = [cacheData] - } - - Map> evictions = job ? [:] : [(SERVER_GROUPS.ns): [serverGroupKey]] - - log.info("minimal onDemand cache refresh (data: ${data}, evictions: ${evictions})") - return new OnDemandResult( - sourceAgentType: getOnDemandAgentType(), - cacheResult: new DefaultCacheResult(cacheResults), - evictions: evictions - ) - } - - @Override - Collection pendingOnDemandRequests(ProviderCache providerCache) { - Set keys = providerCache.getIdentifiers('onDemand').findAll { - def key = Keys.parse(it) - return key && key.type == SERVER_GROUPS.ns && key.account == account.name && key.region == region - } - - return fetchPendingOnDemandRequests(providerCache, keys) - } - - @Override - Map pendingOnDemandRequest(ProviderCache providerCache, String id) { - def pendingOnDemandRequests = fetchPendingOnDemandRequests(providerCache, [id]) - return pendingOnDemandRequests?.getAt(0) - } - - @Override - String getAgentType() { - "${account.name}/${region}/${TitusClusterCachingAgent.simpleName}" - } - - @Override - Collection getProvidedDataTypes() { - types - } - - @Override - Optional> getCacheKeyPatterns() { - return [ - (SERVER_GROUPS.ns): Keys.getServerGroupKey('*', '*', account.name, region) - ] - } - - static class MutableCacheData implements CacheData { - final String id - int ttlSeconds = -1 - final Map attributes = [:] - final Map> relationships = [:].withDefault { [] as Set } - - public MutableCacheData(String id) { - this.id = id - } - - @JsonCreator - public MutableCacheData(@JsonProperty("id") String id, - @JsonProperty("attributes") Map attributes, - @JsonProperty("relationships") Map> relationships) { - this(id) - this.attributes.putAll(attributes) - this.relationships.putAll(relationships) - } - } - - private Map createCache() { - [:].withDefault { String id -> new MutableCacheData(id) } - } - - @Override - CacheResult loadData(ProviderCache providerCache) { - Long start = System.currentTimeMillis() - List jobs = titusClient.getAllJobsWithTasks() - - List evictFromOnDemand = [] - List keepInOnDemand = [] - - def serverGroupKeys = jobs.collect { job -> Keys.getServerGroupKey(job.name, account.name, region) } as Set - def pendingOnDemandRequestKeys = providerCache - .filterIdentifiers(ON_DEMAND.ns, Keys.getServerGroupKey("*", "*", account.name, region)) - .findAll { serverGroupKeys.contains(it) } - - def pendingOnDemandRequestsForServerGroups = providerCache.getAll(ON_DEMAND.ns, pendingOnDemandRequestKeys) - pendingOnDemandRequestsForServerGroups.each { CacheData onDemandEntry -> - if (onDemandEntry.attributes.cacheTime < start && onDemandEntry.attributes.processedCount > 0) { - evictFromOnDemand << onDemandEntry - } else { - keepInOnDemand << onDemandEntry - } - } - - def onDemandMap = keepInOnDemand.collectEntries { CacheData onDemandEntry -> [(onDemandEntry.id): onDemandEntry] } - CacheResult result = buildCacheResult(jobs, onDemandMap, evictFromOnDemand*.id) - - result.cacheResults[ON_DEMAND.ns].each { CacheData onDemandEntry -> - onDemandEntry.attributes.processedTime = System.currentTimeMillis() - onDemandEntry.attributes.processedCount = (onDemandEntry.attributes.processedCount ?: 0) + 1 - } - - return result - } - - private Collection fetchPendingOnDemandRequests(ProviderCache providerCache, Collection keys) { - return providerCache.getAll('onDemand', keys, RelationshipCacheFilter.none()).collect { - [ - id : it.id, - details : Keys.parse(it.id), - cacheTime : it.attributes.cacheTime, - processedCount: it.attributes.processedCount, - processedTime : it.attributes.processedTime - ] - } - } - - private CacheResult buildCacheResult(List jobs, - Map onDemandKeep = [:], - List onDemandEvict = []) { - Map applications = createCache() - Map clusters = createCache() - Map serverGroups = createCache() - Map instances = createCache() - Map targetGroups = createCache() - List allScalingPolicies = titusAutoscalingClient ? titusAutoscalingClient.getAllScalingPolicies() : [] - // Ignore policies in a Deleted state (may need to revisit) - Map allLoadBalancers = [:] - try { - allLoadBalancers = titusLoadBalancerClient ? titusLoadBalancerClient.allLoadBalancers : [:] - } catch (Exception e) { - log.error("Failed to load load balancers for ${account.name}:${region}", e) - } - List cacheablePolicyStates = [ScalingPolicyState.Pending, ScalingPolicyState.Applied, ScalingPolicyState.Deleting] - Map titusSecurityGroupCache = [:] - - def serverGroupDatas = jobs.collect { job -> - List scalingPolicies = allScalingPolicies.findResults { - it.jobId == job.id && cacheablePolicyStates.contains(it.policyState.state) ? - new ScalingPolicyData(id: it.id.id, policy: it.scalingPolicy, status: it.policyState) : - null - } - List loadBalancers = allLoadBalancers.get(job.id) ?: [] - return new ServerGroupData(job, scalingPolicies, loadBalancers, account.name, region, account.stack) - } - - serverGroupDatas.each { data -> - cacheApplication(data, applications) - cacheCluster(data, clusters) - cacheTargetGroups(data, targetGroups) - } - - // caching _all_ jobs at once allows us to optimize the security group lookups - cacheServerGroups(serverGroupDatas, serverGroups, instances, titusSecurityGroupCache) - - new DefaultCacheResult( - [(APPLICATIONS.ns) : applications.values(), - (CLUSTERS.ns) : clusters.values(), - (SERVER_GROUPS.ns): serverGroups.values(), - (INSTANCES.ns) : instances.values(), - (TARGET_GROUPS.ns): targetGroups.values(), - (ON_DEMAND.ns) : onDemandKeep.values() - ], - [(ON_DEMAND.ns): onDemandEvict] - ) - } - - private void cacheApplication(ServerGroupData data, Map applications) { - applications[data.appName].with { - attributes.name = data.name.app - relationships[CLUSTERS.ns].add(data.cluster) - relationships[SERVER_GROUPS.ns].add(data.serverGroup) - relationships[TARGET_GROUPS.ns].addAll(data.targetGroupKeys) - } - } - - private void cacheTargetGroups(ServerGroupData data, Map targetGroups) { - for (String targetGroupKey : data.targetGroupKeys) { - targetGroups[targetGroupKey].with { - relationships[APPLICATIONS.ns].add(data.appName) - relationships[SERVER_GROUPS.ns].add(data.serverGroup) - } - } - } - - private void cacheCluster(ServerGroupData data, Map clusters) { - clusters[data.cluster].with { - attributes.name = data.name.cluster - relationships[APPLICATIONS.ns].add(data.appName) - relationships[SERVER_GROUPS.ns].add(data.serverGroup) - relationships[TARGET_GROUPS.ns].addAll(data.targetGroupKeys) - } - } - - private void cacheServerGroups(List datas, - Map serverGroups, - Map instances, - Map titusSecurityGroupCache) { - def allJobs = datas*.job - - datas.each { data -> - serverGroups[data.serverGroup].with { - try { - Job job = objectMapper.convertValue(data.job, Job.class) - List policies = data.scalingPolicies ? data.scalingPolicies.collect { - // There is probably a better way to convert a protobuf to a Map, but I don't know what it is - [ - id : it.id, - status: [state: it.status.state.name(), reason: it.status.pendingReason], - policy: objectMapper.readValue(JsonFormat.printer().print(it.policy), Map) - ] - } : [] - - // tasks are cached independently as instances so avoid the overhead of also storing on the serialized job - def jobTasks = job.tasks - job.tasks = [] - attributes.job = job - attributes.scalingPolicies = policies - attributes.tasks = jobTasks.collect { [id: it.id, instanceId: it.instanceId] } - attributes.region = region - attributes.account = account.name - attributes.targetGroups = data.targetGroupNames - relationships[APPLICATIONS.ns].add(data.appName) - relationships[CLUSTERS.ns].add(data.cluster) - relationships[INSTANCES.ns].addAll(data.instanceIds) - relationships[TARGET_GROUPS.ns].addAll(data.targetGroupKeys) - for (Task task : jobTasks) { - def instanceData = new InstanceData(job, task, account.name, region, account.stack) - cacheInstance(instanceData, instances) - } - } catch (Exception e) { - log.error("Failed to cache ${data.job.name} in ${account.name}", e) - } - } - } - } - - private void cacheInstance(InstanceData data, Map instances) { - instances[data.instanceId].with { - Task task = objectMapper.convertValue(data.task, Task) - attributes.task = task - Map job = objectMapper.convertValue(data.job, Map) - job.remove('tasks') - attributes.job = job - attributes.put(HEALTH.ns, [getTitusHealth(task)]) - relationships[IMAGES.ns].add(data.imageId) - if (data.serverGroup) { - relationships[SERVER_GROUPS.ns].add(data.serverGroup) - } else { - relationships[SERVER_GROUPS.ns].clear() - } - } - } - - private class ScalingPolicyData { - String id - ScalingPolicy policy - ScalingPolicyStatus status - } - - private class ServerGroupData { - - final Job job - List scalingPolicies - final Names name - final String appName - final String cluster - final String serverGroup - final Set instanceIds - final String region - final Set targetGroupKeys - final Set targetGroupNames - final String account - - ServerGroupData(Job job, List scalingPolicies, List targetGroups, String account, String region, String stack) { - this.job = job - this.scalingPolicies = scalingPolicies - - String asgName = job.name - if (job.labels && job.labels['name']) { - asgName = job.labels['name'] - } else { - if (job.appName) { - def asgNameBuilder = new AutoScalingGroupNameBuilder() - asgNameBuilder.setAppName(job.appName) - asgNameBuilder.setDetail(job.jobGroupDetail) - asgNameBuilder.setStack(job.jobGroupStack) - String version = job.jobGroupSequence - asgName = asgNameBuilder.buildGroupName() + (version ? "-${version}" : '') - } - } - - name = Names.parseName(asgName) - appName = Keys.getApplicationKey(name.app) - cluster = Keys.getClusterKey(name.cluster, name.app, account) - this.region = region - this.account = account - serverGroup = Keys.getServerGroupKey(job.name, account, region) - instanceIds = (job.tasks.id.collect { - Keys.getInstanceKey(it, getAwsAccountId(account, region), stack, region) - } as Set).asImmutable() - - targetGroupNames = (targetGroups.collect { - ArnUtils.extractTargetGroupName(it).get() - } as Set).asImmutable() - - targetGroupKeys = (targetGroupNames.collect { - AwsKeys.getTargetGroupKey(it, getAwsAccountName(account, region), region, TargetTypeEnum.Ip.toString(), getAwsVpcId(account, region)) - } as Set).asImmutable() - - } - } - - private String getAwsAccountId(String account, String region) { - awsLookupUtil.get().awsAccountId(account, region) - } - - private String getAwsAccountName(String account, String region) { - awsLookupUtil.get().awsAccountName(account, region) - } - - private String getAwsVpcId(String account, String region) { - awsLookupUtil.get().awsVpcId(account, region) - } - - private class InstanceData { - private final Job job - private final Task task - private final String instanceId - private final String serverGroup - private final String imageId - - public InstanceData(Job job, Task task, String account, String region, String stack) { - this.job = job - this.task = task - this.instanceId = Keys.getInstanceKey(task.id, getAwsAccountId(account, region), stack, region) - this.serverGroup = job.name - this.imageId = "${job.applicationName}:${job.version}" - } - } - - private Map getTitusHealth(Task task) { - TaskState taskState = task.state - HealthState healthState = HealthState.Unknown - if (taskState in [TaskState.STOPPED, TaskState.FAILED, TaskState.CRASHED, TaskState.FINISHED, TaskState.DEAD, TaskState.TERMINATING]) { - healthState = HealthState.Down - } else if (taskState in [TaskState.STARTING, TaskState.DISPATCHED, TaskState.PENDING, TaskState.QUEUED]) { - healthState = HealthState.Starting - } else { - healthState = HealthState.Unknown - } - [type: 'Titus', healthClass: 'platform', state: healthState.toString()] - } - - @Override - public long getPollIntervalMillis() { - return pollIntervalMillis - } - - @Override - public long getTimeoutMillis() { - return timeoutMillis - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusInstanceCachingAgent.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusInstanceCachingAgent.java deleted file mode 100644 index 2aab3c93cfb..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusInstanceCachingAgent.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.caching.agents; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.spectator.api.Id; -import com.netflix.spectator.api.Registry; -import com.netflix.spectator.api.histogram.PercentileTimer; -import com.netflix.spinnaker.cats.agent.*; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.clouddriver.model.HealthState; -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; -import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider; -import com.netflix.spinnaker.clouddriver.titus.caching.Keys; -import com.netflix.spinnaker.clouddriver.titus.caching.TitusCachingProvider; -import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil; -import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient; -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; -import com.netflix.spinnaker.clouddriver.titus.client.TitusLoadBalancerClient; -import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion; -import com.netflix.spinnaker.clouddriver.titus.client.model.TaskState; -import com.netflix.spinnaker.clouddriver.titus.client.model.Task; -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Provider; -import java.util.*; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; -import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.*; -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -/** - * Caching agent for Titus tasks, mapping to the concept of Spinnaker Instances - * A Titus job has a set of Titus tasks. - */ -public class TitusInstanceCachingAgent implements CachingAgent { - - private static final Logger log = LoggerFactory.getLogger(TitusInstanceCachingAgent.class); - private static final TypeReference> ATTRIBUTES = new TypeReference>() {}; - - private static final java.util.Set types = Collections.unmodifiableSet(Stream.of( - AUTHORITATIVE.forType(INSTANCES.ns), - INFORMATIVE.forType(SERVER_GROUPS.ns) - ).collect(Collectors.toSet())); - - private static final List DOWN_TASK_STATES = Arrays.asList(TaskState.STOPPED, TaskState.FAILED, TaskState.CRASHED, TaskState.FINISHED, TaskState.DEAD, TaskState.TERMINATING); - private static final List STARTING_TASK_STATES = Arrays.asList(TaskState.STARTING, TaskState.DISPATCHED, TaskState.PENDING, TaskState.QUEUED); - - private final TitusClient titusClient; - private final NetflixTitusCredentials account; - private final TitusRegion region; - private final ObjectMapper objectMapper; - private final Provider awsLookupUtil; - private final Id metricId; - private final Registry registry; - - public TitusInstanceCachingAgent(TitusClientProvider titusClientProvider, - NetflixTitusCredentials account, - TitusRegion region, - ObjectMapper objectMapper, - Registry registry, - Provider awsLookupUtil) { - this.account = account; - this.region = region; - - this.objectMapper = objectMapper; - this.titusClient = titusClientProvider.getTitusClient(account, region.getName()); - this.awsLookupUtil = awsLookupUtil; - this.registry = registry; - - this.metricId = registry.createId("titus.cache.instance").withTag("account", account.getName()).withTag("region", region.getName()); - } - - @Override - public Collection getProvidedDataTypes() { - return types; - } - - @Override - public Optional> getCacheKeyPatterns() { - Map cachekeyPatterns = new HashMap<>(); - cachekeyPatterns.put(SERVER_GROUPS.ns, Keys.getServerGroupV2Key("*", "*", account.getName(), region.getName())); - cachekeyPatterns.put(INSTANCES.ns, Keys.getInstanceV2Key("*", account.getName(), region.getName())); - return Optional.of(cachekeyPatterns); - } - - @Override - public String getAgentType() { - return account.getName() + "/" + region.getName() + "/" + TitusInstanceCachingAgent.class.getSimpleName(); - } - - @Override - public String getProviderName() { - return TitusCachingProvider.PROVIDER_NAME; - } - - @Override - public boolean handlesAccount(String accountName) { - return false; - } - - static class MutableCacheData implements CacheData { - - final String id; - int ttlSeconds = -1; - final Map attributes = new HashMap<>(); - final Map> relationships = new HashMap<>(); - - public MutableCacheData(String id) { - this.id = id; - } - - @Override - public String getId() { - return id; - } - - @Override - public int getTtlSeconds() { - return ttlSeconds; - } - - @Override - public Map getAttributes() { - return attributes; - } - - @Override - public Map> getRelationships() { - return relationships; - } - } - - private Map createCache() { - return new HashMap<>(); - } - - @Override - public CacheResult loadData(ProviderCache providerCache) { - log.info("Describing items in {}", getAgentType()); - Long startTime = System.currentTimeMillis(); - - List tasks = titusClient.getAllTasks(); - - // TODO emjburns: do we want to use timer or PercentileTimer? - // PercentileTimer gives us better data but is more runtime expensive to call - PercentileTimer - .get(registry, metricId.withTag("operation", "getAllTasks")) - .record(System.currentTimeMillis() - startTime, MILLISECONDS); - - // Titus tasks only know the job ID, we get all job names from titus in one call - // and use them to cache the instances. - Long jobNamesStartTime = System.currentTimeMillis(); - Map jobNames = titusClient.getAllJobNames(); - PercentileTimer - .get(registry, metricId.withTag("operation", "getAllJobNames")) - .record(System.currentTimeMillis() - jobNamesStartTime, MILLISECONDS); - - - Map serverGroups = createCache(); - Map instances = createCache(); - - for (Task task : tasks) { - InstanceData data = new InstanceData(task, jobNames.get(task.getJobId()), account.getName(), region.getName()); - cacheInstance(data, instances); - cacheServerGroup(data, serverGroups); - } - - log.info("Caching {} instances in {}", instances.size(), getAgentType()); - log.info("Caching {} server groups in {}", serverGroups.size(), getAgentType()); - - Map> cacheResult = new HashMap<>(); - cacheResult.put(INSTANCES.ns, instances.values()); - cacheResult.put(SERVER_GROUPS.ns, serverGroups.values()); - - PercentileTimer - .get(registry, metricId.withTag("operation", "loadData")) - .record(System.currentTimeMillis() - startTime, MILLISECONDS); - log.info("Caching completed in {}s in {}", MILLISECONDS.toSeconds(System.currentTimeMillis() - startTime), getAgentType()); - - return new DefaultCacheResult(cacheResult); - } - - private void cacheInstance(InstanceData data, Map instances) { - CacheData instanceCache = instances.getOrDefault(data.instanceId, new MutableCacheData(data.instanceId)); - instanceCache.getAttributes().putAll(objectMapper.convertValue(data.task, ATTRIBUTES)); - instanceCache.getAttributes().put(HEALTH.ns, Collections.singletonList(getTitusHealth(data.task))); - instanceCache.getAttributes().put("task", data.task); - instanceCache.getAttributes().put("jobId", data.jobId); - - if (!data.serverGroup.isEmpty()) { - instanceCache.getRelationships().computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()).add(data.serverGroup); - } else { - instanceCache.getRelationships().computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()).clear(); - } - instances.put(data.instanceId, instanceCache); - } - - private void cacheServerGroup(InstanceData data, Map serverGroups) { - if (!data.serverGroup.isEmpty()) { - CacheData serverGroupCache = serverGroups.getOrDefault(data.serverGroup, new MutableCacheData(data.serverGroup)); - serverGroupCache.getRelationships().computeIfAbsent(INSTANCES.ns, key -> new HashSet<>()).add(data.instanceId); - serverGroups.put(data.serverGroup, serverGroupCache); - } - } - - private Map getTitusHealth(Task task) { - TaskState taskState = task.getState(); - HealthState healthState = HealthState.Unknown; - if (DOWN_TASK_STATES.contains(taskState)) { - healthState = HealthState.Down; - } else if (STARTING_TASK_STATES.contains(taskState)) { - healthState = HealthState.Starting; - } - - Map response = new HashMap<>(); - response.put("type", "Titus"); - response.put("healthClass", "platform"); - response.put("state", healthState.toString()); - return response; - } - - private String getAwsAccountId(String account, String region) { - return awsLookupUtil.get().awsAccountId(account, region); - } - - private String getAwsVpcId(String account, String region) { - return awsLookupUtil.get().awsVpcId(account, region); - } - - private class InstanceData { - // The instance key, not the task id - private final String instanceId; - private final Task task; - private final String jobId; - private final String serverGroup; - - InstanceData(Task task, String jobName, String account, String region) { - this.instanceId = Keys.getInstanceV2Key(task.getId(), account, region); - this.task = task; - this.jobId = task.getJobId(); - this.serverGroup = jobName != null - ? Keys.getServerGroupV2Key(jobName, account, region) - : ""; - } - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusV2ClusterCachingAgent.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusV2ClusterCachingAgent.java deleted file mode 100644 index f9daf023bcf..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusV2ClusterCachingAgent.java +++ /dev/null @@ -1,632 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.caching.agents; - -import com.amazonaws.services.elasticloadbalancingv2.model.TargetTypeEnum; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.protobuf.util.JsonFormat; -import com.netflix.frigga.Names; -import com.netflix.frigga.autoscaling.AutoScalingGroupNameBuilder; -import com.netflix.spectator.api.Id; -import com.netflix.spectator.api.Registry; -import com.netflix.spectator.api.histogram.PercentileTimer; -import com.netflix.spinnaker.cats.agent.AgentDataType; -import com.netflix.spinnaker.cats.agent.CacheResult; -import com.netflix.spinnaker.cats.agent.CachingAgent; -import com.netflix.spinnaker.cats.agent.DefaultCacheResult; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.cats.cache.DefaultCacheData; -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; -import com.netflix.spinnaker.cats.provider.ProviderCache; -import com.netflix.spinnaker.clouddriver.aws.data.ArnUtils; -import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; -import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; -import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider; -import com.netflix.spinnaker.clouddriver.titus.caching.Keys; -import com.netflix.spinnaker.clouddriver.titus.caching.TitusCachingProvider; -import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil; -import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient; -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; -import com.netflix.spinnaker.clouddriver.titus.client.TitusLoadBalancerClient; -import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion; -import com.netflix.spinnaker.clouddriver.titus.client.model.Job; -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; -import com.netflix.titus.grpc.protogen.ScalingPolicy; -import com.netflix.titus.grpc.protogen.ScalingPolicyResult; -import com.netflix.titus.grpc.protogen.ScalingPolicyStatus; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Provider; -import java.util.*; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; -import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; -import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.*; -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -public class TitusV2ClusterCachingAgent implements CachingAgent, CustomScheduledAgent, OnDemandAgent { - - private static final Logger log = LoggerFactory.getLogger(TitusV2ClusterCachingAgent.class); - - private static final TypeReference> ANY_MAP = new TypeReference>() {}; - - static final java.util.Set types = Collections.unmodifiableSet(Stream.of( - AUTHORITATIVE.forType(SERVER_GROUPS.ns), - AUTHORITATIVE.forType(APPLICATIONS.ns), - INFORMATIVE.forType(IMAGES.ns), - INFORMATIVE.forType(CLUSTERS.ns), - INFORMATIVE.forType(TARGET_GROUPS.ns) - ).collect(Collectors.toSet())); - - private final TitusClient titusClient; - private final TitusAutoscalingClient titusAutoscalingClient; - private final TitusLoadBalancerClient titusLoadBalancerClient; - private final NetflixTitusCredentials account; - private final TitusRegion region; - private final ObjectMapper objectMapper; - private final OnDemandMetricsSupport metricsSupport; - private final Provider awsLookupUtil; - private final long pollIntervalMillis; - private final long timeoutMillis; - private final Registry registry; - private final Id metricId; - - public TitusV2ClusterCachingAgent(TitusClientProvider titusClientProvider, - NetflixTitusCredentials account, - TitusRegion region, - ObjectMapper objectMapper, - Registry registry, - Provider awsLookupUtil, - Long pollIntervalMillis, - Long timeoutMillis) { - this.account = account; - this.region = region; - - this.objectMapper = objectMapper; - this.metricsSupport = new OnDemandMetricsSupport( - registry, - this, - TitusCloudProvider.ID + ":" + OnDemandType.ServerGroup - ); - this.titusClient = titusClientProvider.getTitusClient(account, region.getName()); - this.titusAutoscalingClient = titusClientProvider.getTitusAutoscalingClient(account, region.getName()); - this.titusLoadBalancerClient = titusClientProvider.getTitusLoadBalancerClient(account, region.getName()); - this.awsLookupUtil = awsLookupUtil; - this.pollIntervalMillis = pollIntervalMillis; - this.timeoutMillis = timeoutMillis; - this.registry = registry; - - this.metricId = registry.createId("titus.cache.cluster").withTag("account", account.getName()).withTag("region", region.getName()); - } - - @Override - public long getPollIntervalMillis() { - return pollIntervalMillis; - } - - @Override - public long getTimeoutMillis() { - return timeoutMillis; - } - - @Override - public String getProviderName() { - return TitusCachingProvider.PROVIDER_NAME; - } - - @Override - public String getAgentType() { - return account.getName() + "/" + region.getName() + "/" + TitusV2ClusterCachingAgent.class.getSimpleName(); - } - - @Override - public String getOnDemandAgentType() { - return getAgentType() + "-OnDemand"; - } - - @Override - public OnDemandMetricsSupport getMetricsSupport() { - return metricsSupport; - } - - @Override - public Collection getProvidedDataTypes() { - return types; - } - - @Override - public Optional> getCacheKeyPatterns() { - Map cachekeyPatterns = new HashMap<>(); - cachekeyPatterns.put(SERVER_GROUPS.ns, Keys.getServerGroupV2Key("*", "*", account.getName(), region.getName())); - return Optional.of(cachekeyPatterns); - } - - @Override - public boolean handles(OnDemandType type, String cloudProvider) { - return type == OnDemandType.ServerGroup && cloudProvider.equals(TitusCloudProvider.ID); - } - - @Override - public OnDemandResult handle(ProviderCache providerCache, Map data) { - Long startTime = System.currentTimeMillis(); - - if (!data.containsKey("serverGroupName") || !data.containsKey("account") || !data.containsKey("region")) { - return null; - } - - if (!account.getName().equals(data.get("account"))) { - return null; - } - - if (!region.getName().equals(data.get("region"))) { - return null; - } - - Job job = metricsSupport.readData( () -> { - try { - return titusClient.findJobByName(data.get("serverGroupName").toString()); - } catch (io.grpc.StatusRuntimeException e) { - return null; - } - }); - - OnDemandResult onDemandResult = onDemand(providerCache, job, data); - PercentileTimer - .get(registry, metricId.withTag("operation", "handleOnDemand")) - .record(System.currentTimeMillis() - startTime, MILLISECONDS); - - return onDemandResult; - } - - /** - * Avoid writing cache results to both ON_DEMAND and SERVER_GROUPS, etc. - * - * By writing a minimal record to ON_DEMAND only, we eliminate significant overhead (redis and network) at the cost - * of an increase in time before a change becomes visible in the UI. - * - * A change will not be visible until a caching cycle has completed. - */ - private OnDemandResult onDemand(ProviderCache providerCache, Job job, Map data) { - String serverGroupKey = Keys.getServerGroupV2Key(data.get("serverGroupName").toString(), account.getName(), region.getName()); - Map> cacheResults = new HashMap<>(); - if (job == null) { - // avoid writing an empty onDemand cache record (instead delete any that may have previously existed) - providerCache.evictDeletedItems(ON_DEMAND.ns, Collections.singletonList(serverGroupKey)); - } else { - Map attributes = new HashMap<>(); - attributes.put("cacheTime", new Date()); - attributes.put("cacheResults", Collections.emptyMap()); - - CacheData cacheData = metricsSupport.onDemandStore( () -> new DefaultCacheData( - serverGroupKey, - 10 * 60, // ttl is 10 minutes - attributes, - Collections.emptyMap() - )); - - cacheResults.computeIfAbsent(ON_DEMAND.ns, key -> new ArrayList<>()).add(cacheData); - } - Map> evictions = job != null - ? Collections.emptyMap() - : Collections.singletonMap(SERVER_GROUPS.ns, Collections.singletonList(serverGroupKey)); - - log.info("minimal onDemand cache refresh (data: {}, evictions: {})", data, evictions); - return new OnDemandResult(getOnDemandAgentType(), new DefaultCacheResult(cacheResults), evictions); - } - - @Override - public CacheResult loadData(ProviderCache providerCache) { - Long startTime = System.currentTimeMillis(); - - log.info("Describing items in {}", getAgentType()); - List evictFromOnDemand = new ArrayList<>(); - List keepInOnDemand = new ArrayList<>(); - - - List jobs = titusClient.getAllJobsWithoutTasks(); - PercentileTimer - .get(registry, metricId.withTag("operation", "getAllJobsWithoutTasks")) - .record(System.currentTimeMillis() - startTime, MILLISECONDS); - - Long startScalingPolicyTime = System.currentTimeMillis(); - List scalingPolicyResults = titusAutoscalingClient != null - ? titusAutoscalingClient.getAllScalingPolicies() - : Collections.emptyList(); - PercentileTimer - .get(registry, metricId.withTag("operation", "getScalingPolicies")) - .record(System.currentTimeMillis() - startScalingPolicyTime, MILLISECONDS); - - Long startLoadBalancerTime = System.currentTimeMillis(); - Map> allLoadBalancers = titusLoadBalancerClient != null - ? titusLoadBalancerClient.getAllLoadBalancers() - : Collections.emptyMap(); - PercentileTimer - .get(registry, metricId.withTag("operation", "getLoadBalancers")) - .record(System.currentTimeMillis() - startLoadBalancerTime, MILLISECONDS); - - Long startJobIdsTime = System.currentTimeMillis(); - Map> taskAndJobIds = titusClient.getTaskIdsForJobIds(); - PercentileTimer - .get(registry, metricId.withTag("operation", "getTaskIdsForJobIds")) - .record(System.currentTimeMillis() - startJobIdsTime, MILLISECONDS); - - List serverGroupKeys = jobs.stream() - .map(job -> Keys.getServerGroupV2Key(job.getName(), account.getName(), region.getName())) - .collect(Collectors.toList()); - - List pendingOnDemandRequestKeys = providerCache - .filterIdentifiers(ON_DEMAND.ns, Keys.getServerGroupV2Key("*", "*", account.getName(), region.getName())) - .stream().filter(serverGroupKeys::contains).collect(Collectors.toList()); - - Collection pendingOnDemandRequestsForServerGroups = providerCache.getAll(ON_DEMAND.ns, pendingOnDemandRequestKeys); - - pendingOnDemandRequestsForServerGroups.forEach( onDemandEntry -> { - if (Long.parseLong(onDemandEntry.getAttributes().get("cacheTime").toString()) < startTime - && Long.parseLong(onDemandEntry.getAttributes().getOrDefault("processedCount", "0").toString()) > 0) { - evictFromOnDemand.add(onDemandEntry); - } else { - keepInOnDemand.add(onDemandEntry); - } - }); - - Map onDemandMap = keepInOnDemand.stream().collect(Collectors.toMap(CacheData::getId, it -> it)); - List evictFromOnDemandIds = evictFromOnDemand.stream().map(CacheData::getId).collect(Collectors.toList()); - - CacheResult result = buildCacheResult( - jobs, - scalingPolicyResults, - allLoadBalancers, - taskAndJobIds, - onDemandMap, - evictFromOnDemandIds - ); - - result.getCacheResults().get(ON_DEMAND.ns).forEach( onDemandEntry -> { - onDemandEntry.getAttributes().put("processedTime", System.currentTimeMillis()); - onDemandEntry.getAttributes().put("processedCount", Long.parseLong(onDemandEntry.getAttributes().getOrDefault("processedCount", "0").toString()) + 1); - }); - - PercentileTimer - .get(registry, metricId.withTag("operation", "loadData")) - .record(System.currentTimeMillis() - startTime, MILLISECONDS); - return result; - } - - /** - * Used to build a cache result, whether normal or on demand, - * by transforming known Titus objects into Spinnaker cached objects - */ - private CacheResult buildCacheResult(List jobs, - List scalingPolicyResults, - Map> allLoadBalancers, - Map> taskAndJobIds, - Map onDemandKeep, - List onDemandEvict) { - if (onDemandKeep == null) { - onDemandKeep = new HashMap<>(); - } - if (onDemandEvict == null) { - onDemandEvict = new ArrayList<>(); - } - - // INITIALIZE CACHES - Map applicationCache = createCache(); - Map clusterCache = createCache(); - Map serverGroupCache = createCache(); - Map targetGroupCache = createCache(); - Map imageCache = createCache(); - - // Ignore policies in a Deleted state (may need to revisit) - List cacheablePolicyStates = Arrays.asList( - ScalingPolicyStatus.ScalingPolicyState.Pending, - ScalingPolicyStatus.ScalingPolicyState.Applied, - ScalingPolicyStatus.ScalingPolicyState.Deleting - ); - - List serverGroupDatas = jobs.stream() - .map( job -> { - List jobScalingPolicies = scalingPolicyResults.stream() - .filter( it -> it.getJobId().equalsIgnoreCase(job.getId()) && cacheablePolicyStates.contains(it.getPolicyState().getState())) - .map( it -> new ScalingPolicyData(it.getId().getId(), it.getScalingPolicy(), it.getPolicyState())) - .collect(Collectors.toList()); - - List jobLoadBalancers = allLoadBalancers.getOrDefault(job.getId(), Collections.emptyList()); - return new ServerGroupData(job, jobScalingPolicies, jobLoadBalancers, taskAndJobIds.get(job.getId()), account.getName(), region.getName()); - }) - .collect(Collectors.toList()); - - serverGroupDatas.forEach(data -> { - cacheApplication(data, applicationCache); - cacheCluster(data, clusterCache); - cacheServerGroup(data, serverGroupCache); - cacheImage(data, imageCache); - }); - - Map> cacheResults = new HashMap<>(); - cacheResults.put(APPLICATIONS.ns, applicationCache.values()); - cacheResults.put(CLUSTERS.ns, clusterCache.values()); - cacheResults.put(SERVER_GROUPS.ns, serverGroupCache.values()); - cacheResults.put(TARGET_GROUPS.ns, targetGroupCache.values()); - cacheResults.put(IMAGES.ns, imageCache.values()); - cacheResults.put(ON_DEMAND.ns, onDemandKeep.values()); - Map> evictions = new HashMap<>(); - evictions.put(ON_DEMAND.ns, onDemandEvict); - - log.info("Caching {} applications in {}", applicationCache.size(), getAgentType()); - log.info("Caching {} server groups in {}", serverGroupCache.size(), getAgentType()); - log.info("Caching {} clusters in {}", clusterCache.size(), getAgentType()); - log.info("Caching {} target groups in {}", targetGroupCache.size(), getAgentType()); - log.info("Caching {} images in {}", imageCache.size(), getAgentType()); - - return new DefaultCacheResult(cacheResults, evictions); - } - - /** - * Build authoritative cache object for applications based on server group data - */ - private void cacheApplication(ServerGroupData data, Map applications) { - CacheData applicationCache = applications.getOrDefault(data.appNameKey, new MutableCacheData(data.appNameKey)); - applicationCache.getAttributes().put("name", data.name.getApp()); - Map> relationships = applicationCache.getRelationships(); - relationships.computeIfAbsent(CLUSTERS.ns, key -> new HashSet<>()).add(data.clusterKey); - relationships.computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()).add(data.serverGroupKey); - relationships.computeIfAbsent(TARGET_GROUPS.ns, key -> new HashSet<>()).addAll(data.targetGroupKeys); - applications.put(data.appNameKey, applicationCache); - } - - /** - * Build informative cache object for clusters based on server group data - */ - private void cacheCluster(ServerGroupData data, Map clusters) { - CacheData clusterCache = clusters.getOrDefault(data.clusterKey, new MutableCacheData(data.clusterKey)); - clusterCache.getAttributes().put("name", data.name.getCluster()); - Map> relationships = clusterCache.getRelationships(); - relationships.computeIfAbsent(APPLICATIONS.ns, key -> new HashSet<>()).add(data.appNameKey); - relationships.computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()).add(data.serverGroupKey); - relationships.computeIfAbsent(TARGET_GROUPS.ns, key -> new HashSet<>()).addAll(data.targetGroupKeys); - clusters.put(data.clusterKey, clusterCache); - } - - private void cacheServerGroup(ServerGroupData data, Map serverGroups) { - CacheData serverGroupCache = serverGroups.getOrDefault(data.serverGroupKey, new MutableCacheData(data.serverGroupKey)); - List policies = data.scalingPolicies != null - ? data.scalingPolicies.stream().map(ScalingPolicyData::toMap).collect(Collectors.toList()) - : new ArrayList<>(); - - Map attributes = serverGroupCache.getAttributes(); - attributes.put("job", data.job); - attributes.put("scalingPolicies", policies); - attributes.put("region", region.getName()); - attributes.put("account", account.getName()); - attributes.put("targetGroups", data.targetGroupNames); - attributes.put("taskIds", data.taskIds); //todo: needed? - - Map> relationships = serverGroupCache.getRelationships(); - relationships.computeIfAbsent(APPLICATIONS.ns, key -> new HashSet<>()).add(data.appNameKey); - relationships.computeIfAbsent(CLUSTERS.ns, key -> new HashSet<>()).add(data.clusterKey); - relationships.computeIfAbsent(TARGET_GROUPS.ns, key -> new HashSet<>()).addAll(data.targetGroupKeys); - relationships.computeIfAbsent(IMAGES.ns, key -> new HashSet<>()).add(data.imageKey); - relationships.computeIfAbsent(INSTANCES.ns, key -> new HashSet<>()).addAll(data.taskKeys); - - serverGroups.put(data.serverGroupKey, serverGroupCache); - } - - private void cacheImage(ServerGroupData data, Map images) { - CacheData imageCache = images.getOrDefault(data.imageKey, new MutableCacheData(data.imageKey)); - imageCache.getRelationships().computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()).add(data.serverGroupKey); - images.put(data.imageKey, imageCache); - } - - @Override - public Collection pendingOnDemandRequests(ProviderCache providerCache) { - Set keys = providerCache.getIdentifiers("onDemand").stream() - .filter(it -> { - Map key = Keys.parse(it); - return key != null && - key.get("type").equals(SERVER_GROUPS.ns) && - key.get("account").equals(account.getName()) && - key.get("region").equals(region.getName()); - }) - .collect(Collectors.toSet()); - return fetchPendingOnDemandRequests(providerCache, keys); - } - - private Collection fetchPendingOnDemandRequests(ProviderCache providerCache, Collection keys) { - return providerCache.getAll("onDemand", keys, RelationshipCacheFilter.none()).stream() - .map(it -> { - Map result = new HashMap<>(); - result.put("id", it.getId()); - result.put("details", Keys.parse(it.getId())); - result.put("cacheTime", it.getAttributes().get("cacheTime")); - result.put("proccessedCount", it.getAttributes().get("processedCount")); - result.put("processedTime", it.getAttributes().get("processedTime")); - return result; - }) - .collect(Collectors.toList()); - } - - - @Override - public Map pendingOnDemandRequest(ProviderCache providerCache, String id) { - Collection pendingOnDemandRequests = fetchPendingOnDemandRequests(providerCache, Collections.singletonList(id)); - return pendingOnDemandRequests.isEmpty() - ? Collections.emptyMap() - : pendingOnDemandRequests.stream().findFirst().get(); - } - - private Map createCache() { - return new HashMap<>(); - } - - private String getAwsAccountId(String account, String region) { - return awsLookupUtil.get().awsAccountId(account, region); - } - - private String getAwsAccountName(String account, String region) { - return awsLookupUtil.get().awsAccountName(account, region); - } - - private String getAwsVpcId(String account, String region) { - return awsLookupUtil.get().awsVpcId(account, region); - } - - static class MutableCacheData implements CacheData { - final String id; - int ttlSeconds = -1; - final Map attributes = new HashMap<>(); - final Map> relationships = new HashMap<>(); - - public MutableCacheData(String id) { - this.id = id; - } - - @JsonCreator - public MutableCacheData(@JsonProperty("id") String id, - @JsonProperty("attributes") Map attributes, - @JsonProperty("relationships") Map> relationships) { - this(id); - this.attributes.putAll(attributes); - this.relationships.putAll(relationships); - } - - @Override - public String getId() { - return id; - } - - @Override - public int getTtlSeconds() { - return ttlSeconds; - } - - @Override - public Map getAttributes() { - return attributes; - } - - @Override - public Map> getRelationships() { - return relationships; - } - } - - private class ScalingPolicyData { - String id; - ScalingPolicy policy; - ScalingPolicyStatus status; - - ScalingPolicyData(ScalingPolicyResult scalingPolicyResult) { - this(scalingPolicyResult.getId().getId(), scalingPolicyResult.getScalingPolicy(), scalingPolicyResult.getPolicyState()); - } - - ScalingPolicyData(String id, ScalingPolicy policy, ScalingPolicyStatus status) { - this.id = id; - this.policy = policy; - this.status = status; - } - - protected Map toMap() { - Map status = new HashMap<>(); - status.put("state", this.status.getState().name()); - status.put("reason", this.status.getPendingReason()); - - Map result = new HashMap<>(); - result.put("id", id); - result.put("status", status); - - - try { - String scalingPolicy = JsonFormat.printer().print(policy); - result.put("policy", objectMapper.readValue(scalingPolicy, ANY_MAP)); - } catch (Exception e) { - log.warn("Failed to serialize scaling policy for scaling policy {}", e); - result.put("policy", Collections.emptyMap()); - } - - return result; - } - } - - private class ServerGroupData { - - final Job job; - List scalingPolicies; - final Names name; - final String appNameKey; - final String clusterKey; - final String serverGroupKey; - final String region; - final Set targetGroupKeys; - final Set targetGroupNames; - final String account; - final String imageId; - final String imageKey; - final List taskIds; - final List taskKeys; - - ServerGroupData(Job job, List scalingPolicies, List targetGroups, List taskIds, String account, String region) { - this.job = job; - this.scalingPolicies = scalingPolicies; - this.imageId = job.getApplicationName() + ":" + job.getVersion(); - this.imageKey = Keys.getImageV2Key(imageId, getAwsAccountId(account, region), region); - this.taskIds = taskIds; - this.taskKeys = taskIds == null - ? Collections.emptyList() - : taskIds.stream().map(it -> Keys.getInstanceV2Key(it, account, region)).collect(Collectors.toList()); - - String asgName = job.getName(); - if (job.getLabels().containsKey("name")) { - asgName = job.getLabels().get("name"); - } else { - if (job.getAppName() != null) { - AutoScalingGroupNameBuilder asgNameBuilder = new AutoScalingGroupNameBuilder(); - asgNameBuilder.setAppName(job.getAppName()); - asgNameBuilder.setDetail(job.getJobGroupDetail()); - asgNameBuilder.setStack(job.getJobGroupStack()); - String version = job.getJobGroupSequence(); - asgName = asgNameBuilder.buildGroupName() + (version != null ? "-" + version : ""); - } - } - - name = Names.parseName(asgName); - appNameKey = Keys.getApplicationKey(name.getApp()); - clusterKey = Keys.getClusterV2Key(name.getCluster(), name.getApp(), account); - this.region = region; - this.account = account; - serverGroupKey = Keys.getServerGroupV2Key(job.getName(), account, region); - - targetGroupNames = targetGroups.stream() - .map(ArnUtils::extractTargetGroupName) - .filter(Optional::isPresent) - .map(Optional::get) - .collect(Collectors.toSet()); - - targetGroupKeys = targetGroupNames.stream() - .map(it -> com.netflix.spinnaker.clouddriver.aws.data.Keys.getTargetGroupKey(it, getAwsAccountName(account, region), region, TargetTypeEnum.Ip.toString(), getAwsVpcId(account, region))) - .collect(Collectors.toSet()); - } - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusApplicationProvider.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusApplicationProvider.groovy index e30de38b96b..48645593159 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusApplicationProvider.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusApplicationProvider.groovy @@ -16,11 +16,7 @@ package com.netflix.spinnaker.clouddriver.titus.caching.providers -import com.fasterxml.jackson.core.type.TypeReference -import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.cats.cache.Cache -import com.netflix.spinnaker.cats.cache.CacheData -import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter import com.netflix.spinnaker.clouddriver.model.Application import com.netflix.spinnaker.clouddriver.model.ApplicationProvider import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider @@ -29,51 +25,55 @@ import com.netflix.spinnaker.clouddriver.titus.model.TitusApplication import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.APPLICATIONS -import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.CLUSTERS +import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.SERVER_GROUPS @Component class TitusApplicationProvider implements ApplicationProvider { TitusCloudProvider titusCloudProvider private final Cache cacheView - private final ObjectMapper objectMapper @Autowired - TitusApplicationProvider(TitusCloudProvider titusCloudProvider, Cache cacheView, ObjectMapper objectMapper) { + TitusApplicationProvider(TitusCloudProvider titusCloudProvider, Cache cacheView) { this.titusCloudProvider = titusCloudProvider this.cacheView = cacheView - this.objectMapper = objectMapper } @Override Set getApplications(boolean expand) { - def relationships = expand ? RelationshipCacheFilter.include(CLUSTERS.ns) : RelationshipCacheFilter.none() - Collection applications = cacheView.getAll( - APPLICATIONS.ns, cacheView.filterIdentifiers(APPLICATIONS.ns, "${titusCloudProvider.id}:*"), relationships - ) - applications.collect this.&translate + String allTitusGlob = "${titusCloudProvider.id}:*" + + //ignoring expand since we are deriving existence of the app by presence of server groups + // rather than the application cacheData which is not reliably updated or evicted + Map>> appClusters = getAppClustersByAccount(allTitusGlob) + return appClusters.findResults {translate(it.key, appClusters) } } @Override Application getApplication(String name) { - translate(cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(name))) + name = name.toLowerCase() + String glob = Keys.getServerGroupV2Key("${name}*", "*", "*", "*") + return translate(name, getAppClustersByAccount(glob)) } - Application translate(CacheData cacheData) { - if (cacheData == null) { + Application translate(String name, Map>> appClustersByAccount) { + Map> clusterNames = appClustersByAccount.get(name) + if (!clusterNames) { return null } - String name = Keys.parse(cacheData.id).application - Map attributes = objectMapper.convertValue(cacheData.attributes, new TypeReference>() { - }) - Map> clusterNames = [:].withDefault { new HashSet() } - for (String clusterId : cacheData.relationships[CLUSTERS.ns]) { - Map cluster = Keys.parse(clusterId) - if (cluster.account && cluster.cluster) { - clusterNames[cluster.account].add(cluster.cluster) + Map attributes = Map.of("name", name) + return new TitusApplication(name, clusterNames, attributes) + } + + private Map>> getAppClustersByAccount(String glob) { + // app -> account -> [clusterName..] + Map>> appClustersByAccount = [:].withDefault { [:].withDefault { [] as Set } } + Collection serverGroupKeys = cacheView.filterIdentifiers(SERVER_GROUPS.ns, glob) + for (String key : serverGroupKeys) { + Map sg = Keys.parse(key) + if (sg && sg.application && sg.cluster && sg.account) { + appClustersByAccount.get(sg.application).get(sg.account).add(sg.cluster) } } - new TitusApplication(name, clusterNames, attributes) + return appClustersByAccount } - } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusClusterProvider.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusClusterProvider.groovy index 9b8197bcef8..e6120b6d317 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusClusterProvider.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusClusterProvider.groovy @@ -40,6 +40,8 @@ import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component +import javax.inject.Provider + import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.* @@ -52,21 +54,22 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup private final ObjectMapper objectMapper private final Logger log = LoggerFactory.getLogger(getClass()) - @Autowired - private final AwsLookupUtil awsLookupUtil - - @Autowired - private final CachingSchemaUtil cachingSchemaUtil + private final Provider awsLookupUtil + private final Provider cachingSchemaUtil @Autowired TitusClusterProvider(TitusCloudProvider titusCloudProvider, TitusCachingProvider titusCachingProvider, Cache cacheView, - ObjectMapper objectMapper) { + ObjectMapper objectMapper, + Provider awsLookupUtil, + Provider cachingSchemaUtil) { this.titusCloudProvider = titusCloudProvider this.cacheView = cacheView this.titusCachingProvider = titusCachingProvider this.objectMapper = objectMapper + this.awsLookupUtil = awsLookupUtil + this.cachingSchemaUtil = cachingSchemaUtil } @Autowired(required = false) @@ -127,14 +130,7 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup */ @Override Set getClusters(String applicationName, String account, boolean includeDetails) { - CacheData application = cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(applicationName), - RelationshipCacheFilter.include(CLUSTERS.ns)) - if (application == null) { - return [] as Set - } - Collection clusterKeys = application.relationships[CLUSTERS.ns].findAll { - Keys.parse(it).account == account - } + Collection clusterKeys = cacheView.filterIdentifiers(CLUSTERS.ns, Keys.getClusterV2Key("*", applicationName, account)) Collection clusters = cacheView.getAll(CLUSTERS.ns, clusterKeys) translateClusters(clusters, includeDetails) as Set } @@ -148,9 +144,7 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup */ @Override TitusCluster getCluster(String application, String account, String name, boolean includeDetails) { - String clusterKey = (cachingSchemaUtil.getCachingSchemaForAccount(account) == CachingSchema.V1 - ? Keys.getClusterKey(name, application, account) - : Keys.getClusterV2Key(name, application, account)) + String clusterKey = Keys.getClusterV2Key(name, application, account) CacheData cluster = cacheView.get(CLUSTERS.ns, clusterKey) TitusCluster titusCluster = cluster ? translateClusters([cluster], includeDetails)[0] : null titusCluster @@ -170,9 +164,7 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup */ @Override TitusServerGroup getServerGroup(String account, String region, String name, boolean includeDetails) { - String serverGroupKey = (cachingSchemaUtil.getCachingSchemaForAccount(account) == CachingSchema.V1 - ? Keys.getServerGroupKey(name, account, region) - : Keys.getServerGroupV2Key(name, account, region)) + String serverGroupKey = Keys.getServerGroupV2Key(name, account, region) CacheData serverGroupData = cacheView.get(SERVER_GROUPS.ns, serverGroupKey) if (serverGroupData == null) { return null @@ -184,12 +176,15 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup serverGroup.placement.account = account serverGroup.placement.region = region serverGroup.scalingPolicies = serverGroupData.attributes.scalingPolicies + serverGroup.targetGroups = serverGroupData.attributes.targetGroups if (includeDetails) { serverGroup.instances = translateInstances(resolveRelationshipData(serverGroupData, INSTANCES.ns), Collections.singletonList(serverGroupData)).values() + if (serverGroup.targetGroups) { + awsLookupUtil.get().lookupTargetGroupHealth(job, serverGroup.instances) + } } - serverGroup.targetGroups = serverGroupData.attributes.targetGroups - serverGroup.accountId = awsLookupUtil.awsAccountId(account, region) - serverGroup.awsAccount = awsLookupUtil.lookupAccount(account, region)?.awsAccount + serverGroup.accountId = awsLookupUtil.get().awsAccountId(account, region) + serverGroup.awsAccount = awsLookupUtil.get().lookupAccount(account, region)?.awsAccount serverGroup } @@ -213,7 +208,11 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup account = Optional.ofNullable(account).orElse("*") region = Optional.ofNullable(region).orElse("*") - return cacheView.filterIdentifiers(SERVER_GROUPS.ns, Keys.getServerGroupKey("*", "*", account, region)) + Collection ids = cacheView.filterIdentifiers(SERVER_GROUPS.ns, Keys.getServerGroupKey("*", "*", account, region)) + + return ids.collect({ id -> + Keys.removeSchemaVersion(id) + }).toList() } @Override @@ -223,9 +222,12 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup // Private methods private Map> getClustersInternal(String applicationName, boolean includeDetails) { - CacheData application = cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(applicationName)) - if (application == null) return null - Collection clusters = translateClusters(resolveRelationshipData(application, CLUSTERS.ns), includeDetails) + Collection clusterIdentifiers = cacheView.filterIdentifiers(CLUSTERS.ns, Keys.getClusterV2Key("*", applicationName, "*")) + Collection clusterData = cacheView.getAll(CLUSTERS.ns, clusterIdentifiers, RelationshipCacheFilter.include(SERVER_GROUPS.ns)) + if (!clusterData) { + return null + } + Collection clusters = translateClusters(clusterData, includeDetails) clusters.groupBy { it.accountName }.collectEntries { k, v -> [k, new HashSet(v)] } } @@ -245,7 +247,8 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup cluster.serverGroups = clusterDataEntry.relationships[SERVER_GROUPS.ns]?.findResults { serverGroups.get(it) } cluster } - return clusters + //ensure we only return clusters that have serverGroups (to account for incremental cache updates) + return clusters.findAll {it.serverGroups } } /** @@ -272,9 +275,9 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup serverGroup.instances = serverGroup.instances ?: [] serverGroup.targetGroups = serverGroupEntry.attributes.targetGroups if (serverGroup.targetGroups) { - awsLookupUtil.lookupTargetGroupHealth(job, serverGroup.instances) + awsLookupUtil.get().lookupTargetGroupHealth(job, serverGroup.instances) } - serverGroup.awsAccount = awsLookupUtil.lookupAccount(serverGroupEntry.attributes.account, serverGroupEntry.attributes.region)?.awsAccount + serverGroup.awsAccount = awsLookupUtil.get().lookupAccount(serverGroupEntry.attributes.account, serverGroupEntry.attributes.region)?.awsAccount [(serverGroupEntry.id): serverGroup] } return serverGroups @@ -292,17 +295,30 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup Task task = objectMapper.convertValue(instanceEntry.attributes.task, Task) Job job - if (instanceEntry.attributes.job == null && instanceEntry.relationships[SERVER_GROUPS.ns] && !instanceEntry.relationships[SERVER_GROUPS.ns].empty) { - // job needs to be loaded because it was cached separately - job = jobData.get(instanceEntry.attributes.jobId) + if (instanceEntry.attributes.job != null || (instanceEntry.attributes.jobId != null && + jobData.containsKey(instanceEntry.attributes.jobId))) { + if (instanceEntry.relationships[SERVER_GROUPS.ns] + && !instanceEntry.relationships[SERVER_GROUPS.ns].empty) { + // job needs to be loaded because it was cached separately + job = jobData.get(instanceEntry.attributes.jobId) + } else { + job = objectMapper.convertValue(instanceEntry.attributes.job, Job) + } + + if (job == null) { + log.error("Job is null for instance {}. Instance data {}.", instanceEntry.id, instanceEntry.toString()) + return [:] + } else { + TitusInstance instance = new TitusInstance(job, task) + instance.health = instanceEntry.attributes[HEALTH.ns] + return [(instanceEntry.id): instance] + } + } else { - job = objectMapper.convertValue(instanceEntry.attributes.job, Job) + log.error("Job id is null for instance {}. Are there two jobs with the same server group name?", instanceEntry.id) + return [:] } - - TitusInstance instance = new TitusInstance(job, task) - instance.health = instanceEntry.attributes[HEALTH.ns] - [(instanceEntry.id): instance] - } + }.findAll { it.key != null } Map healthKeysToInstance = [:] instanceData.each { instanceEntry -> @@ -316,8 +332,12 @@ class TitusClusterProvider implements ClusterProvider, ServerGroup Collection healths = cacheView.getAll(HEALTH.ns, healthKeysToInstance.keySet(), RelationshipCacheFilter.none()) healths.each { healthEntry -> def instanceId = healthKeysToInstance.get(healthEntry.id) - healthEntry.attributes.remove('lastUpdatedTimestamp') - instances[instanceId].health << healthEntry.attributes + + // instances[:] may be a subset of instanceData from which healthKeysToInstance is built + if (instances.containsKey(instanceId) && instances[instanceId] != null) { + healthEntry.attributes.remove('lastUpdatedTimestamp') + instances[instanceId].health << healthEntry.attributes + } } return instances } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusInstanceProvider.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusInstanceProvider.groovy index 2d26fa9026b..4f6cf96ddf5 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusInstanceProvider.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusInstanceProvider.groovy @@ -35,17 +35,19 @@ import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component +import javax.inject.Provider + import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.HEALTH import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.INSTANCES import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.SERVER_GROUPS @Component -class TitusInstanceProvider implements InstanceProvider { +class TitusInstanceProvider implements InstanceProvider { final String cloudProvider = TitusCloudProvider.ID private final Cache cacheView private final ObjectMapper objectMapper private final TitusCloudProvider titusCloudProvider - private final CachingSchemaUtil cachingSchemaUtil + private final Provider cachingSchemaUtil private final AwsLookupUtil awsLookupUtil private final Logger log = LoggerFactory.getLogger(getClass()) @@ -58,7 +60,7 @@ class TitusInstanceProvider implements InstanceProvider { Cache cacheView, TitusCloudProvider titusCloudProvider, ObjectMapper objectMapper, - CachingSchemaUtil cachingSchemaUtil, + Provider cachingSchemaUtil, AwsLookupUtil awsLookupUtil ) { this.cacheView = cacheView @@ -81,16 +83,7 @@ class TitusInstanceProvider implements InstanceProvider { return null } - String stack = awsLookupUtil.stack(account) - if (!stack) { - stack = 'mainvpc' - } - - CachingSchema cachingSchema = cachingSchemaUtil.getCachingSchemaForAccount(account) - - String instanceKey = ( cachingSchema == CachingSchema.V1 - ? Keys.getInstanceKey(id, awsAccount, stack, region) - : Keys.getInstanceV2Key(id, account, region)) + String instanceKey = Keys.getInstanceV2Key(id, account, region) CacheData instanceEntry = cacheView.get(INSTANCES.ns, instanceKey) if (!instanceEntry) { @@ -110,6 +103,10 @@ class TitusInstanceProvider implements InstanceProvider { job = objectMapper.convertValue(instanceEntry.attributes.job, Job) } + if (job == null) { + return null + } + TitusInstance instance = new TitusInstance(job, task) instance.accountId = awsAccount @@ -118,9 +115,7 @@ class TitusInstanceProvider implements InstanceProvider { instance.health.addAll(instanceEntry.attributes[HEALTH.ns]) } if (instanceEntry.relationships[SERVER_GROUPS.ns] && !instanceEntry.relationships[SERVER_GROUPS.ns].empty) { - instance.serverGroup = (cachingSchema == CachingSchema.V1 - ? instanceEntry.relationships[SERVER_GROUPS.ns].iterator().next() - : Keys.parse(instanceEntry.relationships[SERVER_GROUPS.ns].iterator().next()).serverGroup) + instance.serverGroup = Keys.parse(instanceEntry.relationships[SERVER_GROUPS.ns].iterator().next()).serverGroup instance.cluster = Names.parseName(instance.serverGroup)?.cluster } externalHealthProviders.each { externalHealthProvider -> @@ -145,6 +140,9 @@ class TitusInstanceProvider implements InstanceProvider { private Job loadJob(CacheData instanceEntry) { Collection data = resolveRelationshipData(instanceEntry, SERVER_GROUPS.ns) + if (data == null || data.isEmpty()) { + return null + } return objectMapper.convertValue(data?.first()?.attributes.job, Job) } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusJobProvider.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusJobProvider.groovy index a373c4bef41..64f1399adea 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusJobProvider.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusJobProvider.groovy @@ -24,15 +24,21 @@ import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider import com.netflix.spinnaker.clouddriver.titus.client.TitusClient import com.netflix.spinnaker.clouddriver.titus.client.model.Job import com.netflix.spinnaker.clouddriver.titus.model.TitusJobStatus +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException +import groovy.util.logging.Slf4j import okhttp3.OkHttpClient import okhttp3.Request +import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component +import org.yaml.snakeyaml.Yaml +import org.yaml.snakeyaml.constructor.SafeConstructor import javax.net.ssl.HostnameVerifier import javax.net.ssl.SSLSession @Component +@Slf4j class TitusJobProvider implements JobProvider { String platform = "titus" @@ -65,7 +71,9 @@ class TitusJobProvider implements JobProvider { TitusJobStatus collectJob(String account, String location, String id) { TitusClient titusClient = titusClientProvider.getTitusClient(accountCredentialsProvider.getCredentials(account), location) Job job = titusClient.getJobAndAllRunningAndCompletedTasks(id) - new TitusJobStatus(job, account, location) + TitusJobStatus jobStatus = new TitusJobStatus(job, account, location) + log.info("run job lookup for ${id} : status ${jobStatus.jobState}") + return jobStatus } @Override @@ -81,13 +89,13 @@ class TitusJobProvider implements JobProvider { try { amazonS3DataProvider.getAdhocData("titus", "${s3.accountName}:${s3.region}:${s3.bucket}", "${s3.key}/${fileName}", outputStream) } catch (Exception e) { - throw new RuntimeException("Could not load ${fileName} for task ${job.tasks.last().id}") + throw new NotFoundException("File [${fileName}] does not exist for job [${job.tasks.last().id}].", e) } fileContents = new ByteArrayInputStream(outputStream.toByteArray()) } else { Map files = titusClient.logsDownload(job.tasks.last().id) if (!files.containsKey(fileName)) { - throw new RuntimeException("File ${fileName} not found for task ${job.tasks.last().id}") + throw new NotFoundException("File [${fileName}] does not exist for job [${job.tasks.last().id}].") } fileContents = client.newCall(new Request.Builder().url(files.get(fileName) as String).build()).execute().body().byteStream() } @@ -96,14 +104,19 @@ class TitusJobProvider implements JobProvider { Map results = [:] if (fileName.endsWith('.json')) { results = objectMapper.readValue(fileContents, Map) + } else if (fileName.endsWith('.yml')) { + def yaml = new Yaml(new SafeConstructor()) + results = yaml.load(fileContents) } else { Properties propertiesFile = new Properties() propertiesFile.load(fileContents) results = results << propertiesFile } + return results } - null + + return Collections.emptyMap() } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusTargetGroupServerGroupProvider.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusTargetGroupServerGroupProvider.java deleted file mode 100644 index 4346d8ff775..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusTargetGroupServerGroupProvider.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.caching.providers; - -import com.netflix.spectator.api.Id; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.cats.cache.Cache; -import com.netflix.spinnaker.cats.cache.CacheData; -import com.netflix.spinnaker.clouddriver.aws.model.AmazonTargetGroup; -import com.netflix.spinnaker.clouddriver.aws.model.TargetGroupServerGroupProvider; -import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; -import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; -import com.netflix.spinnaker.clouddriver.titus.caching.Keys; -import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.*; -import java.util.stream.Collectors; - -import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.*; - -@Slf4j -@Component -public class TitusTargetGroupServerGroupProvider implements TargetGroupServerGroupProvider { - - private final Cache cacheView; - AwsLookupUtil awsLookupUtil; - private final Registry registry; - - private final Id inconsistentCacheId; - - @Autowired - public TitusTargetGroupServerGroupProvider(Cache cacheView, - AwsLookupUtil awsLookupUtil, - Registry registry) { - this.cacheView = cacheView; - this.awsLookupUtil = awsLookupUtil; - this.registry = registry; - - inconsistentCacheId = registry.createId("cache.inconsistentData") - .withTag("location", TitusTargetGroupServerGroupProvider.class.getSimpleName()); - } - - @Override - public Map getServerGroups(String applicationName, - Map allTargetGroups, - Collection targetGroupData) { - - CacheData application = cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(applicationName)); - if (application == null || - allTargetGroups.isEmpty() || - !application.getRelationships().containsKey(TARGET_GROUPS.ns) || - application.getRelationships().get(TARGET_GROUPS.ns).isEmpty()) { - return allTargetGroups; - } - - Collection applicationServerGroups = resolveRelationshipData(application, SERVER_GROUPS.ns); - Set instanceKeys = new HashSet<>(); - for (CacheData serverGroup : applicationServerGroups) { - Map> relationships = serverGroup.getRelationships(); - if (relationships.containsKey(TARGET_GROUPS.ns) && - !relationships.get(TARGET_GROUPS.ns).isEmpty() && - relationships.containsKey(INSTANCES.ns) && - !relationships.get(INSTANCES.ns).isEmpty()) { - instanceKeys.addAll(relationships.get(INSTANCES.ns)); - } - } - - Map instances = cacheView.getAll(INSTANCES.ns, instanceKeys) - .stream() - .collect(Collectors.toMap(CacheData::getId, CacheData::getAttributes)); - - for (CacheData serverGroup : applicationServerGroups) { - if (serverGroup.getRelationships().containsKey(TARGET_GROUPS.ns)) { - for (String targetGroup : serverGroup.getRelationships().get(TARGET_GROUPS.ns)) { - Map targetGroupDetails = com.netflix.spinnaker.clouddriver.aws.data.Keys.parse(targetGroup); - - Set targetGroupInstances = new HashSet<>(); - if (serverGroup.getRelationships().containsKey(INSTANCES.ns)) { - for (String instanceKey : serverGroup.getRelationships().get(INSTANCES.ns)) { - Map instanceDetails = instances.get(instanceKey); - - Optional instance = getInstanceHealth(instanceKey, instanceDetails, targetGroupDetails); - if (instance.isPresent()) { - targetGroupInstances.add(instance.get()); - } else { - registry.counter(inconsistentCacheId).increment(); - log.error( - "Detected potentially inconsistent instance cache data (targetGroup: {}, serverGroup: {})", - targetGroup, - serverGroup.getId() - ); - } - } - } - - Map attributes = serverGroup.getAttributes(); - Map job = (Map) attributes.get("job"); - LoadBalancerServerGroup loadBalancerServerGroup = new LoadBalancerServerGroup( - job.get("name").toString(), - attributes.get("account").toString(), - attributes.get("region").toString(), - !(Boolean) job.get("inService"), - Collections.emptySet(), - targetGroupInstances - ); - - if (allTargetGroups.containsKey(targetGroup)) { - allTargetGroups.get(targetGroup).getServerGroups().add(loadBalancerServerGroup); - allTargetGroups.get(targetGroup).set("instances", targetGroupInstances.stream().map(LoadBalancerInstance::getId).collect(Collectors.toSet())); - } - } - } - } - return allTargetGroups; - } - - Collection resolveRelationshipData(CacheData source, String relationship) { - return source.getRelationships().get(relationship) != null ? cacheView.getAll(relationship, source.getRelationships().get(relationship)) : Collections.emptyList(); - } - - private Optional getInstanceHealth(String instanceKey, - Map instanceDetails, - Map targetGroupDetails) { - String healthKey; - try { - healthKey = com.netflix.spinnaker.clouddriver.aws.data.Keys.getInstanceHealthKey( - ((Map) instanceDetails.get("task")).get("containerIp").toString(), - targetGroupDetails.get("account"), - targetGroupDetails.get("region"), - "aws-load-balancer-v2-target-group-instance-health" - ); - } catch (NullPointerException e) { - return Optional.empty(); - } - - CacheData healthData = cacheView.get(HEALTH.ns, healthKey); - - Map health = getTargetGroupHealth(instanceKey, targetGroupDetails, healthData); - - return Optional.of(new LoadBalancerInstance( - ((Map) instanceDetails.get("task")).get("id").toString(), - null, - health - )); - } - - private static Map getTargetGroupHealth(String instanceKey, - Map targetGroupDetails, - CacheData healthData) { - try { - if (healthDataContainsTargetGroups(healthData)) { - Map targetGroupHealth = getTargetGroupHealthData(targetGroupDetails, healthData); - - if (!targetGroupHealth.isEmpty()) { - Map health = new HashMap<>(); - health.put("targetGroupName", targetGroupHealth.get("targetGroupName").toString()); - health.put("state", targetGroupHealth.get("state").toString()); - - if (targetGroupHealth.containsKey("reasonCode")) { - health.put("reasonCode", targetGroupHealth.get("reasonCode").toString()); - } - - if (targetGroupHealth.containsKey("description")) { - health.put("description", targetGroupHealth.get("description").toString()); - } - - return health; - } - } - } catch (Exception e) { - log.error("failed to load health for " + instanceKey, e); - } - - return Collections.emptyMap(); - } - - private static boolean healthDataContainsTargetGroups(CacheData healthData) { - return healthData != null - && healthData.getAttributes().containsKey("targetGroups") - && !((ArrayList) healthData.getAttributes().get("targetGroups")).isEmpty(); - } - - private static Map getTargetGroupHealthData(Map targetGroupDetails, CacheData healthData) { - List targetGroups = (List) healthData.getAttributes().get("targetGroups"); - return (Map) targetGroups.stream() - .filter(tgh -> - ((Map) tgh).get("targetGroupName").toString().equals(targetGroupDetails.get("targetGroup") - )) - .findFirst() - .orElse(Collections.EMPTY_MAP); - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/AwsLookupUtil.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/AwsLookupUtil.groovy index a773ee8613d..82c7c862570 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/AwsLookupUtil.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/AwsLookupUtil.groovy @@ -25,6 +25,7 @@ import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonSecurityGroupPr import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonVpcProvider import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.aws.services.SecurityGroupService import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.client.model.Job import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials @@ -71,7 +72,27 @@ class AwsLookupUtil { Map awsDetails = awsAccountLookup.find { it.titusAccount == account && it.region == region } - awsSecurityGroupProvider.get(awsDetails.awsAccount, region, providedSecurityGroup, awsDetails.vpcId)?.id + awsSecurityGroupProvider.getIdByName(awsDetails.awsAccount, region, providedSecurityGroup, awsDetails.vpcId) + } + + /** + * Converts security groups to security group names. This handles the case wherein the list of + * security groups may include both IDs and names. + */ + List convertSecurityGroupsToNames(String account, String region, List securityGroups) { + Map awsDetails = awsAccountLookup.find { + it.titusAccount == account && it.region == region + } + + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider = regionScopedProviderFactory.forRegion(accountCredentialsProvider.all.find { + it instanceof AmazonCredentials && it.name == awsDetails.awsAccount + }, region) + + SecurityGroupService securityGroupService = regionScopedProvider.getSecurityGroupService() + + return securityGroupService.resolveSecurityGroupNamesByStrategy(securityGroups) { List ids -> + securityGroupService.getSecurityGroupNamesFromIds(ids) + } } String createSecurityGroupForApplication(account, region, application) { @@ -118,7 +139,11 @@ class AwsLookupUtil { } public Map lookupAccount(account, region) { - Map awsDetails = awsAccountLookup.find { + // rz - avoid a concurrent access exception while interacting with awsAccountLookup (via Titus streaming agent) + List accounts = new ArrayList<>(awsAccountLookup.size()) + accounts.addAll(awsAccountLookup) + + Map awsDetails = accounts.find { it.titusAccount == account && it.region == region } if (!awsDetails) { @@ -187,11 +212,12 @@ class AwsLookupUtil { } } - [name : awsSecurityGroupProvider.getById(awsDetails.awsAccount, + [name : awsSecurityGroupProvider.getNameById( + awsDetails.awsAccount, region, securityGroupId, awsDetails.vpcId - )?.name, + ), awsAccount: awsDetails.awsAccount, vpcId : awsDetails.vpcId ] diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchemaUtil.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchemaUtil.groovy deleted file mode 100644 index c9256a66367..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchemaUtil.groovy +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.caching.utils - -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.stereotype.Component - -import javax.annotation.PostConstruct - -@Component -class CachingSchemaUtil { - private Map cachingSchemaForAccounts = [:] - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Autowired - AwsLookupUtil awsLookupUtil - - CachingSchema getCachingSchemaForAccount(String account) { - return cachingSchemaForAccounts.get(account) ?: CachingSchema.V1 - } - - @PostConstruct - private void init() { - accountCredentialsProvider.all.findAll { - it instanceof NetflixTitusCredentials - }.each { NetflixTitusCredentials credential -> - credential.regions.each { region -> - cachingSchemaForAccounts.put(credential.name, credential.splitCachingEnabled ? CachingSchema.V2 : CachingSchema.V1) - cachingSchemaForAccounts.put( - awsLookupUtil.awsAccountId(credential.name, region.name), - credential.splitCachingEnabled ? CachingSchema.V2 : CachingSchema.V1 - ) - } - } - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/EndpointValidator.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/EndpointValidator.java deleted file mode 100644 index f1208a49d03..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/EndpointValidator.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client; - -import java.net.MalformedURLException; -import java.net.URL; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -public class EndpointValidator { - - private static final Set ALLOWED_PROTOCOLS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList("http", "https"))); - - public static String validateEndpoint(String endpoint) { - URL url; - try { - url = new URL(endpoint); - } catch (NullPointerException e) { - throw new IllegalArgumentException(String.format("Invalid endpoint provided (%s)", endpoint)); - } catch (MalformedURLException e) { - throw new IllegalArgumentException(String.format("Invalid endpoint provided (%s): %s", endpoint, e.getMessage())); - } - - if (url.getHost() == null || "".equals(url.getHost())) { - throw new IllegalArgumentException(String.format("Invalid endpoint provided (%s): No host specified", endpoint)); - } - - String protocol = url.getProtocol(); - if (!ALLOWED_PROTOCOLS.contains(protocol)) { - throw new IllegalArgumentException( - String.format("Invalid endpoint provided (%s): Invalid protocol specified (%s)", endpoint, protocol)); - } - return endpoint; - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusAutoscalingClient.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusAutoscalingClient.java deleted file mode 100644 index 7260c781d32..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusAutoscalingClient.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client; - -import com.google.protobuf.Empty; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.titus.client.model.GrpcChannelFactory; -import com.netflix.titus.grpc.protogen.*; - -import java.util.List; - -public class RegionScopedTitusAutoscalingClient implements TitusAutoscalingClient { - - /** - * Default connect timeout in milliseconds - */ - private static final long DEFAULT_CONNECT_TIMEOUT = 60000; - - private final AutoScalingServiceGrpc.AutoScalingServiceBlockingStub autoScalingServiceBlockingStub; - - public RegionScopedTitusAutoscalingClient(TitusRegion titusRegion, - Registry registry, - String environment, - String eurekaName, - GrpcChannelFactory channelFactory) { - this.autoScalingServiceBlockingStub = AutoScalingServiceGrpc.newBlockingStub(channelFactory.build(titusRegion, environment, eurekaName, DEFAULT_CONNECT_TIMEOUT, registry)); - } - - @Override - public List getAllScalingPolicies() { - return autoScalingServiceBlockingStub.getAllScalingPolicies(Empty.newBuilder().build()).getItemsList(); - } - - @Override - public List getJobScalingPolicies(String jobId) { - JobId request = JobId.newBuilder().setId(jobId).build(); - return autoScalingServiceBlockingStub - .getJobScalingPolicies(request).getItemsList(); - } - - @Override - public ScalingPolicyResult getScalingPolicy(String policyId) { - return autoScalingServiceBlockingStub.getScalingPolicy(ScalingPolicyID.newBuilder().setId(policyId).build()).getItems(0); - } - - @Override - public ScalingPolicyID createScalingPolicy(PutPolicyRequest policy) { - return TitusClientAuthenticationUtil.attachCaller(autoScalingServiceBlockingStub) - .setAutoScalingPolicy(policy); - } - - @Override - public void updateScalingPolicy(UpdatePolicyRequest policy) { - TitusClientAuthenticationUtil.attachCaller(autoScalingServiceBlockingStub).updateAutoScalingPolicy(policy); - } - - @Override - public void deleteScalingPolicy(DeletePolicyRequest request) { - TitusClientAuthenticationUtil.attachCaller(autoScalingServiceBlockingStub).deleteAutoScalingPolicy(request); - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClient.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClient.java deleted file mode 100644 index 482adb75338..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClient.java +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.protobuf.Empty; -import com.netflix.frigga.Names; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.titus.TitusException; -import com.netflix.spinnaker.clouddriver.titus.client.model.*; -import com.netflix.spinnaker.clouddriver.titus.client.model.HealthStatus; -import com.netflix.spinnaker.clouddriver.titus.client.model.Job; -import com.netflix.spinnaker.clouddriver.titus.client.model.Task; -import com.netflix.spinnaker.kork.core.RetrySupport; -import com.netflix.titus.grpc.protogen.*; -import io.grpc.Status; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; - -import java.net.URL; -import java.util.*; -import java.util.stream.Collectors; - -import static java.util.stream.Collectors.mapping; -import static java.util.stream.Collectors.toList; - -@Slf4j -public class RegionScopedTitusClient implements TitusClient { - - /** - * Default connect timeout in milliseconds - */ - private static final long DEFAULT_CONNECT_TIMEOUT = 60000; - - /** - * Default read timeout in milliseconds - */ - private static final long DEFAULT_READ_TIMEOUT = 20000; - - /** - * An instance of {@link TitusRegion} that this RegionScopedTitusClient will use - */ - private final TitusRegion titusRegion; - - private final Registry registry; - - private final List titusJobCustomizers; - - private final String environment; - - private final ObjectMapper objectMapper; - - private final JobManagementServiceGrpc.JobManagementServiceBlockingStub grpcBlockingStub; - - private final RetrySupport retrySupport; - - public RegionScopedTitusClient(TitusRegion titusRegion, Registry registry, List titusJobCustomizers, String environment, String eurekaName, GrpcChannelFactory grpcChannelFactory, RetrySupport retrySupport) { - this(titusRegion, DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT, TitusClientObjectMapper.configure(), registry, titusJobCustomizers, environment, eurekaName, grpcChannelFactory, retrySupport); - } - - public RegionScopedTitusClient(TitusRegion titusRegion, - long connectTimeoutMillis, - long readTimeoutMillis, - ObjectMapper objectMapper, - Registry registry, - List titusJobCustomizers, - String environment, - String eurekaName, - GrpcChannelFactory channelFactory, - RetrySupport retrySupport - ) { - this.titusRegion = titusRegion; - this.registry = registry; - this.titusJobCustomizers = titusJobCustomizers; - this.environment = environment; - this.objectMapper = objectMapper; - this.retrySupport = retrySupport; - - String titusHost = ""; - try { - URL titusUrl = new URL(titusRegion.getEndpoint()); - titusHost = titusUrl.getHost(); - } catch (Exception e) { - - } - this.grpcBlockingStub = JobManagementServiceGrpc.newBlockingStub(channelFactory.build(titusRegion, environment, eurekaName, DEFAULT_CONNECT_TIMEOUT, registry)); - - if (!titusRegion.getFeatureFlags().isEmpty()) { - log.info("Experimental Titus V3 client feature flags {} enabled for account {} and region {}", - StringUtils.join(titusRegion.getFeatureFlags(), ","), - titusRegion.getAccount(), - titusRegion.getName()); - } - } - - // APIs - // ------------------------------------------------------------------------------------------ - - @Override - public Job getJobAndAllRunningAndCompletedTasks(String jobId) { - return new Job(grpcBlockingStub.findJob(JobId.newBuilder().setId(jobId).build()), getTasks(Arrays.asList(jobId), true).get(jobId)); - } - - @Override - public Job findJobByName(String jobName, boolean includeTasks) { - JobQuery.Builder jobQuery = JobQuery.newBuilder() - .putFilteringCriteria("jobType", "SERVICE") - .putFilteringCriteria("attributes", "source:spinnaker,name:" + jobName) - .putFilteringCriteria("attributes.op", "and"); - List results = getJobs(jobQuery, includeTasks); - return results.isEmpty() ? null : results.get(0); - } - - @Override - public Job findJobByName(String jobName) { - return findJobByName(jobName, false); - } - - @Override - public List findJobsByApplication(String application) { - JobQuery.Builder jobQuery = JobQuery.newBuilder().putFilteringCriteria("appName", application); - return getJobs(jobQuery, false); - } - - @Override - public String submitJob(SubmitJobRequest submitJobRequest) { - JobDescription jobDescription = submitJobRequest.getJobDescription(); - if (jobDescription.getType() == null) { - jobDescription.setType("service"); - } - if (jobDescription.getUser() == null) { - jobDescription.setUser("spinnaker@netflix.com"); - } else if (!jobDescription.getUser().contains("@")) { - jobDescription.setUser(jobDescription.getUser() + "@netflix.com"); - } - if (jobDescription.getJobGroupSequence() == null && jobDescription.getType().equals("service")) { - try { - int sequence = Names.parseName(jobDescription.getName()).getSequence(); - jobDescription.setJobGroupSequence(String.format("v%03d", sequence)); - } catch (Exception e) { - // fail silently if we can't get a job group sequence - } - } - jobDescription.getLabels().put("name", jobDescription.getName()); - jobDescription.getLabels().put("source", "spinnaker"); - jobDescription.getLabels().put("spinnakerAccount", submitJobRequest.getCredentials()); - for (TitusJobCustomizer customizer : titusJobCustomizers) { - customizer.customize(jobDescription); - } - return TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub).createJob(jobDescription.getGrpcJobDescriptor()).getId(); - } - - @Override - public Task getTask(String taskId) { - // new Task(grpcBlockingStub.findTask(taskId)); - // return new Task(grpcBlockingStub.findTask(com.netflix.titus.grpc.protogen.TaskId.newBuilder().setId(taskId).build())); - return null; - } - - @Override - public void resizeJob(ResizeJobRequest resizeJobRequest) { - TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub).updateJobCapacity(JobCapacityUpdate.newBuilder() - .setJobId(resizeJobRequest.getJobId()) - .setCapacity(Capacity.newBuilder() - .setDesired(resizeJobRequest.getInstancesDesired()) - .setMax(resizeJobRequest.getInstancesMax()) - .setMin(resizeJobRequest.getInstancesMin()) - ) - .build() - ); - } - - @Override - public void activateJob(ActivateJobRequest activateJobRequest) { - TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub).updateJobStatus(JobStatusUpdate.newBuilder().setId(activateJobRequest.getJobId()).setEnableStatus(activateJobRequest.getInService()).build()); - } - - @Override - public void setAutoscaleEnabled(String jobId, boolean shouldEnable) { - TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub).updateJobProcesses( - JobProcessesUpdate.newBuilder() - .setServiceJobProcesses( - ServiceJobSpec.ServiceJobProcesses.newBuilder() - .setDisableDecreaseDesired(!shouldEnable) - .setDisableIncreaseDesired(!shouldEnable) - .build() - ) - .setJobId(jobId) - .build() - ); - } - - @Override - public void terminateJob(TerminateJobRequest terminateJobRequest) { - TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub).killJob(JobId.newBuilder().setId(terminateJobRequest.getJobId()).build()); - } - - @Override - public void terminateTasksAndShrink(TerminateTasksAndShrinkJobRequest terminateTasksAndShrinkJob) { - List failedTasks = new ArrayList<>(); - terminateTasksAndShrinkJob.getTaskIds().forEach(id -> { - try { - killTaskWithRetry(id, terminateTasksAndShrinkJob); - } catch (Exception e) { - failedTasks.add(id); - log.error("Failed to terminate and shrink titus task {} in account {} and region {}", id, titusRegion.getAccount(), titusRegion.getName(), e); - } - } - ); - if (!failedTasks.isEmpty()) { - throw new TitusException("Failed to terminate and shrink titus tasks: " + StringUtils.join(failedTasks, ",")); - } - } - - private void killTaskWithRetry(String id, TerminateTasksAndShrinkJobRequest terminateTasksAndShrinkJob) { - retrySupport.retry(() -> { - try { - return TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub).killTask( - TaskKillRequest.newBuilder() - .setTaskId(id) - .setShrink(terminateTasksAndShrinkJob.isShrink()) - .build() - ); - } catch (io.grpc.StatusRuntimeException e) { - if (e.getStatus().getCode() == Status.Code.NOT_FOUND) { - log.warn("Titus task {} not found, continuing with terminate tasks and shrink job request.", id); - return Empty.newBuilder().build(); - } - throw e; - } - }, 3, 1000, false); - } - - @Override - public Map logsDownload(String taskId) { - return null; - } - - @Override - public TitusHealth getHealth() { - return new TitusHealth(HealthStatus.HEALTHY); - } - - @Override - public List getAllJobsWithTasks() { - JobQuery.Builder jobQuery = JobQuery.newBuilder() - .putFilteringCriteria("jobType", "SERVICE") - .putFilteringCriteria("attributes", "source:spinnaker"); - return getJobs(jobQuery); - } - - private List getJobs(JobQuery.Builder jobQuery) { - return getJobs(jobQuery, true); - } - - private List getJobs(JobQuery.Builder jobQuery, boolean includeTasks) { - List grpcJobs = getJobsWithFilter(jobQuery); - final Map> tasks; - - if (includeTasks) { - List jobIds = Collections.emptyList(); - if (!titusRegion.getFeatureFlags().contains("jobIds")) { - jobIds = grpcJobs.stream().map(com.netflix.titus.grpc.protogen.Job::getId).collect( - Collectors.toList() - ); - } - tasks = getTasks(jobIds, false); - } else { - tasks = Collections.emptyMap(); - } - return grpcJobs.stream().map(grpcJob -> new Job(grpcJob, tasks.get(grpcJob.getId()))).collect(Collectors.toList()); - } - - @Override - public List getAllJobsWithoutTasks() { - JobQuery.Builder jobQuery = JobQuery.newBuilder() - .putFilteringCriteria("jobType", "SERVICE") - .putFilteringCriteria("attributes", "source:spinnaker"); - - return getJobs(jobQuery, false); - } - - @Override - public Map getAllJobNames() { - JobQuery.Builder jobQuery = JobQuery.newBuilder() - .putFilteringCriteria("jobType", "SERVICE") - .putFilteringCriteria("attributes", "source:spinnaker") - .addFields("id") - .addFields("jobDescriptor.attributes.name"); - - List grpcJobs = getJobsWithFilter(jobQuery, 10000); - - return grpcJobs.stream() - .collect(Collectors.toMap( - com.netflix.titus.grpc.protogen.Job::getId, - it -> it.getJobDescriptor().getAttributesOrDefault("name", "") - )); - } - - @Override - public Map> getTaskIdsForJobIds() { - String filterByStates = "Launched,StartInitiated,Started"; - - TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder(); - taskQueryBuilder - .putFilteringCriteria("attributes", "source:spinnaker") - .putFilteringCriteria("taskStates", filterByStates) - .addFields("id") - .addFields("jobId"); - - List grpcTasks = getTasksWithFilter(taskQueryBuilder, 10000); - return grpcTasks.stream().collect(Collectors.groupingBy(com.netflix.titus.grpc.protogen.Task::getJobId, mapping(com.netflix.titus.grpc.protogen.Task::getId, toList()))); - } - - private Map> getTasks(List jobIds, boolean includeDoneJobs) { - TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder(); - if (!jobIds.isEmpty()) { - taskQueryBuilder.putFilteringCriteria("jobIds", jobIds.stream().collect(Collectors.joining(","))); - } - if (titusRegion.getFeatureFlags().contains("jobIds")) { - taskQueryBuilder.putFilteringCriteria("attributes", "source:spinnaker"); - } - String filterByStates = "Launched,StartInitiated,Started"; - if (includeDoneJobs) { - filterByStates = filterByStates + ",KillInitiated,Finished"; - } - taskQueryBuilder.putFilteringCriteria("taskStates", filterByStates); - - List tasks = getTasksWithFilter(taskQueryBuilder); - return tasks.stream().collect(Collectors.groupingBy(com.netflix.titus.grpc.protogen.Task::getJobId)); - } - - @Override - public List getAllTasks() { - TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder(); - taskQueryBuilder.putFilteringCriteria("attributes", "source:spinnaker"); - String filterByStates = "Launched,StartInitiated,Started"; - taskQueryBuilder.putFilteringCriteria("taskStates", filterByStates); - - List tasks = getTasksWithFilter(taskQueryBuilder); - return tasks.stream().map(Task::new).collect(toList()); - } - - private List getJobsWithFilter(JobQuery.Builder jobQueryBuilder) { - return getJobsWithFilter(jobQueryBuilder, 1000); - } - - private List getJobsWithFilter(JobQuery.Builder jobQueryBuilder, Integer pageSize) { - List grpcJobs = new ArrayList<>(); - String cursor = ""; - boolean hasMore; - do { - if (cursor.isEmpty()) { - jobQueryBuilder.setPage(Page.newBuilder().setPageSize(pageSize)); - } else { - jobQueryBuilder.setPage(Page.newBuilder().setCursor(cursor).setPageSize(pageSize)); - } - - JobQuery criteria = jobQueryBuilder.build(); - JobQueryResult resultPage = TitusClientCompressionUtil.attachCaller(grpcBlockingStub).findJobs(criteria); - grpcJobs.addAll(resultPage.getItemsList()); - cursor = resultPage.getPagination().getCursor(); - hasMore = resultPage.getPagination().getHasMore(); - } while (hasMore); - return grpcJobs; - } - - private List getTasksWithFilter(TaskQuery.Builder taskQueryBuilder) { - return getTasksWithFilter(taskQueryBuilder, titusRegion.getFeatureFlags().contains("largePages") ? 2000 : 1000); - } - - private List getTasksWithFilter(TaskQuery.Builder taskQueryBuilder, Integer pageSize) { - List grpcTasks = new ArrayList<>(); - - TaskQueryResult taskResults; - String cursor = ""; - boolean hasMore; - - do { - if (cursor.isEmpty()) { - taskQueryBuilder.setPage(Page.newBuilder().setPageSize(pageSize)); - } else { - taskQueryBuilder.setPage(Page.newBuilder().setCursor(cursor).setPageSize(pageSize)); - } - taskResults = TitusClientCompressionUtil.attachCaller(grpcBlockingStub).findTasks( - taskQueryBuilder.build() - ); - grpcTasks.addAll(taskResults.getItemsList()); - cursor = taskResults.getPagination().getCursor(); - hasMore = taskResults.getPagination().getHasMore(); - } while (hasMore); - return grpcTasks; - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusLoadBalancerClient.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusLoadBalancerClient.java deleted file mode 100644 index 324c2da8bfe..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusLoadBalancerClient.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client; - -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.titus.client.model.GrpcChannelFactory; -import com.netflix.titus.grpc.protogen.*; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class RegionScopedTitusLoadBalancerClient implements TitusLoadBalancerClient { - - /** - * Default connect timeout in milliseconds - */ - private static final long DEFAULT_CONNECT_TIMEOUT = 60000; - - private final LoadBalancerServiceGrpc.LoadBalancerServiceBlockingStub loadBalancerServiceBlockingStub; - - public RegionScopedTitusLoadBalancerClient(TitusRegion titusRegion, - Registry registry, - String environment, - String eurekaName, - GrpcChannelFactory channelFactory) { - this.loadBalancerServiceBlockingStub = LoadBalancerServiceGrpc.newBlockingStub(channelFactory.build(titusRegion, environment, eurekaName, DEFAULT_CONNECT_TIMEOUT, registry)); - } - - @Override - public List getJobLoadBalancers(String jobId) { - return loadBalancerServiceBlockingStub.getJobLoadBalancers(JobId.newBuilder().setId(jobId).build()).getLoadBalancersList(); - } - - @Override - public void addLoadBalancer(String jobId, String loadBalancerId) { - TitusClientAuthenticationUtil.attachCaller(loadBalancerServiceBlockingStub).addLoadBalancer(AddLoadBalancerRequest.newBuilder().setJobId(jobId).setLoadBalancerId(LoadBalancerId.newBuilder().setId(loadBalancerId).build()).build()); - } - - @Override - public void removeLoadBalancer(String jobId, String loadBalancerId) { - TitusClientAuthenticationUtil.attachCaller(loadBalancerServiceBlockingStub).removeLoadBalancer(RemoveLoadBalancerRequest.newBuilder().setJobId(jobId).setLoadBalancerId(LoadBalancerId.newBuilder().setId(loadBalancerId).build()).build()); - } - - public Map> getAllLoadBalancers() { - Map> results = new HashMap<>(); - String cursor = ""; - boolean hasMore = true; - do { - Page.Builder loadBalancerPage = Page.newBuilder().setPageSize(1000); - if (!cursor.isEmpty()) { - loadBalancerPage.setCursor(cursor); - } - GetAllLoadBalancersResult getAllLoadBalancersResult = loadBalancerServiceBlockingStub.getAllLoadBalancers(GetAllLoadBalancersRequest.newBuilder().setPage(loadBalancerPage).build()); - for (GetJobLoadBalancersResult result : getAllLoadBalancersResult.getJobLoadBalancersList()) { - for (LoadBalancerId loadBalancerid : result.getLoadBalancersList()) { - if (results.get(result.getJobId()) == null) { - List loadBalancers = new ArrayList<>(); - loadBalancers.add(loadBalancerid.getId()); - results.put(result.getJobId(), loadBalancers); - } else { - results.get(result.getJobId()).add(loadBalancerid.getId()); - } - } - } - hasMore = getAllLoadBalancersResult.getPagination().getHasMore(); - cursor = getAllLoadBalancersResult.getPagination().getCursor(); - } while (hasMore); - return results; - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/SimpleGrpcChannelFactory.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/SimpleGrpcChannelFactory.groovy index 39e32ffd2be..4d1add68879 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/SimpleGrpcChannelFactory.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/SimpleGrpcChannelFactory.groovy @@ -24,6 +24,6 @@ import io.grpc.ManagedChannelBuilder class SimpleGrpcChannelFactory implements GrpcChannelFactory { @Override ManagedChannel build(TitusRegion titusRegion, String environment, String eurekaName, long defaultConnectTimeOut, Registry registry) { - return ManagedChannelBuilder.forAddress(titusRegion.url, titusRegion.port).usePlaintext(true).build(); + return ManagedChannelBuilder.forAddress(titusRegion.url, titusRegion.port).build() } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClient.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClient.java deleted file mode 100644 index 53abcee9ce7..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClient.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client; - -import com.netflix.spinnaker.clouddriver.titus.client.model.*; - -import java.util.List; -import java.util.Map; - -public interface TitusClient { - - /** - * @param jobId - * @return - */ - public Job getJobAndAllRunningAndCompletedTasks(String jobId); - - /** - * @param jobName - * @param includeTasks - * @return - */ - public Job findJobByName(String jobName, boolean includeTasks); - - /** - * @param jobName - * @return - */ - public Job findJobByName(String jobName); - - /** - * @param application - * @return - */ - public List findJobsByApplication(String application); - - /** - * @param submitJobRequest - * @return - */ - public String submitJob(SubmitJobRequest submitJobRequest); - - /** - * @param taskId - * @return - */ - public Task getTask(String taskId); - - /** - * @param resizeJobRequest - */ - public void resizeJob(ResizeJobRequest resizeJobRequest); - - /** - * @param activateJobRequest - */ - public void activateJob(ActivateJobRequest activateJobRequest); - - /** - * @param shouldEnable - */ - public void setAutoscaleEnabled(String jobId, boolean shouldEnable); - - /** - * @param terminateJobRequest - */ - public void terminateJob(TerminateJobRequest terminateJobRequest); - - /** - * @param terminateTasksAndShrinkJob - */ - public void terminateTasksAndShrink(TerminateTasksAndShrinkJobRequest terminateTasksAndShrinkJob); - - /** - * @param taskId - * @return - */ - public Map logsDownload(String taskId); - - /** - * @return - */ - public TitusHealth getHealth(); - - /** - * @return - */ - public List getAllJobsWithTasks(); - - /** - * For use in TitusV2ClusterCachingAgent - * @return all jobs w/o task detail that are managed by Spinnaker - */ - public List getAllJobsWithoutTasks(); - - /** - * For use in TitusInstanceCachingAgent - * @return all tasks managed by Spinnaker - */ - public List getAllTasks(); - - public Map getAllJobNames(); - - public Map> getTaskIdsForJobIds(); - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientAuthenticationUtil.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientAuthenticationUtil.java deleted file mode 100644 index 6ec83ef2292..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientAuthenticationUtil.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client; - -import com.netflix.spinnaker.security.AuthenticatedRequest; -import io.grpc.Metadata; -import io.grpc.stub.AbstractStub; -import io.grpc.stub.MetadataUtils; - -public class TitusClientAuthenticationUtil { - - private static String CALLER_ID_HEADER = "X-Titus-CallerId"; - private static String CALL_REASON = "X-Titus-CallReason"; - private static Metadata.Key CALLER_ID_KEY = Metadata.Key.of(CALLER_ID_HEADER, Metadata.ASCII_STRING_MARSHALLER); - private static Metadata.Key CALL_REASON_KEY = Metadata.Key.of(CALL_REASON, Metadata.ASCII_STRING_MARSHALLER); - - public static > STUB attachCaller(STUB serviceStub) { - Metadata metadata = new Metadata(); - metadata.put(CALLER_ID_KEY, AuthenticatedRequest.getSpinnakerUser().orElse("spinnaker")); - metadata.put(CALL_REASON_KEY, AuthenticatedRequest.getSpinnakerExecutionId().orElse("unknown")); - return serviceStub.withInterceptors(MetadataUtils.newAttachHeadersInterceptor(metadata)); - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusRegion.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusRegion.java deleted file mode 100644 index 52afec02aa8..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusRegion.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class TitusRegion { - private final String name; - private final String account; - private final String endpoint; - private final Boolean autoscalingEnabled; - private final Boolean loadBalancingEnabled; - private final List faultDomains; - private final String applicationName; - private final String url; - private final int port; - private final List featureFlags; - - private T notNull(T val, String name) { - if (val == null) { - throw new NullPointerException(name); - } - return val; - } - - public TitusRegion(String name, - String account, - String endpoint, - Boolean autoscalingEnabled, - Boolean loadBalancingEnabled, - List faultDomains, - String applicationName, - String url, - Integer port, - List featureFlags - ) { - this.name = notNull(name, "name"); - this.account = notNull(account, "account"); - this.endpoint = EndpointValidator.validateEndpoint(endpoint); - this.autoscalingEnabled = autoscalingEnabled; - this.loadBalancingEnabled = loadBalancingEnabled; - this.faultDomains = faultDomains == null ? Collections.emptyList() : Collections.unmodifiableList(faultDomains); - this.applicationName = applicationName; - this.url = url; - if (port != null) { - this.port = port; - } else { - this.port = 7104; - } - if (featureFlags == null) { - this.featureFlags = new ArrayList<>(); - } else { - this.featureFlags = featureFlags; - } - } - - public TitusRegion(String name, String account, String endpoint, Boolean autoscalingEnabled, Boolean loadBalancingEnabled, String applicationName, String url, Integer port, List featureFlags) { - this(name, account, endpoint, autoscalingEnabled, loadBalancingEnabled, Collections.emptyList(), applicationName, url, port, featureFlags); - } - - public String getAccount() { - return account; - } - - public String getName() { - return name; - } - - public String getEndpoint() { - return endpoint; - } - - public Boolean isAutoscalingEnabled() { - return autoscalingEnabled; - } - - public Boolean isLoadBalancingEnabled() { - return loadBalancingEnabled; - } - - public List getFaultDomains() { - return faultDomains; - } - - public String getApplicationName() { - return applicationName; - } - - public Integer getPort() { return port; } - - public String getUrl() { return url; } - - public List getFeatureFlags() { - return featureFlags; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - TitusRegion that = (TitusRegion) o; - - if (!name.equals(that.name)) return false; - if (!account.equals(that.account)) return false; - if (!endpoint.equals(that.endpoint)) return false; - return faultDomains.equals(that.faultDomains); - - } - - @Override - public int hashCode() { - int result = name.hashCode(); - result = 31 * result + account.hashCode(); - result = 31 * result + endpoint.hashCode(); - result = 31 * result + faultDomains.hashCode(); - return result; - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Job.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Job.java deleted file mode 100644 index f6c50bd1a39..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Job.java +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client.model; - -import com.netflix.titus.grpc.protogen.*; - -import java.util.*; -import java.util.stream.Collectors; - -public class Job { - - private String id; - private String name; - private String type; - private List tags; - private String applicationName; - private String digest; - private String appName; - private String user; - private String version; - private String entryPoint; - private String iamProfile; - private String capacityGroup; - private Boolean inService; - private int instances; - private int instancesMin; - private int instancesMax; - private int instancesDesired; - private int cpu; - private int memory; - private int disk; - private int gpu; - private int networkMbps; - private int[] ports; - private Map environment; - private Map containerAttributes; - private int retries; - private int runtimeLimitSecs; - private boolean allocateIpAddress; - private Date submittedAt; - private List tasks; - private Map labels; - private List securityGroups; - private String jobGroupStack; - private String jobGroupDetail; - private String jobGroupSequence; - private List hardConstraints; - private List softConstraints; - private Efs efs; - private MigrationPolicy migrationPolicy; - private String jobState; - - public Job() { - } - - public Job(com.netflix.titus.grpc.protogen.Job grpcJob, List grpcTasks) { - id = grpcJob.getId(); - - if (grpcJob.getJobDescriptor().getJobSpecCase().getNumber() == JobDescriptor.BATCH_FIELD_NUMBER) { - type = "batch"; - BatchJobSpec batchJobSpec = grpcJob.getJobDescriptor().getBatch(); - instancesMin = batchJobSpec.getSize(); - instancesMax = batchJobSpec.getSize(); - instancesDesired = batchJobSpec.getSize(); - instances = batchJobSpec.getSize(); - runtimeLimitSecs = (int) batchJobSpec.getRuntimeLimitSec(); - retries = batchJobSpec.getRetryPolicy().getImmediate().getRetries(); - } - - if (grpcJob.getJobDescriptor().getJobSpecCase().getNumber() == JobDescriptor.SERVICE_FIELD_NUMBER) { - type = "service"; - ServiceJobSpec serviceSpec = grpcJob.getJobDescriptor().getService(); - inService = serviceSpec.getEnabled(); - instances = serviceSpec.getCapacity().getDesired(); - instancesMin = serviceSpec.getCapacity().getMin(); - instancesMax = serviceSpec.getCapacity().getMax(); - instancesDesired = serviceSpec.getCapacity().getDesired(); - migrationPolicy = new MigrationPolicy(); - com.netflix.titus.grpc.protogen.MigrationPolicy policy = serviceSpec.getMigrationPolicy(); - if (policy.getPolicyCase().equals(com.netflix.titus.grpc.protogen.MigrationPolicy.PolicyCase.SELFMANAGED)) { - migrationPolicy.setType("selfManaged"); - } else { - migrationPolicy.setType("systemDefault"); - } - } - - labels = grpcJob.getJobDescriptor().getAttributesMap(); - containerAttributes = grpcJob.getJobDescriptor().getContainer().getAttributesMap(); - user = grpcJob.getJobDescriptor().getOwner().getTeamEmail(); - - if (grpcTasks != null) { - tasks = grpcTasks.stream().map(grpcTask -> new Task(grpcTask)).collect(Collectors.toList()); - } else { - tasks = new ArrayList<>(); - } - - appName = grpcJob.getJobDescriptor().getApplicationName(); - name = grpcJob.getJobDescriptor().getAttributesOrDefault("name", appName); - applicationName = grpcJob.getJobDescriptor().getContainer().getImage().getName(); - version = grpcJob.getJobDescriptor().getContainer().getImage().getTag(); - digest = grpcJob.getJobDescriptor().getContainer().getImage().getDigest(); - entryPoint = grpcJob.getJobDescriptor().getContainer().getEntryPointList().stream().collect(Collectors.joining(" ")); - capacityGroup = grpcJob.getJobDescriptor().getCapacityGroup(); - cpu = (int) grpcJob.getJobDescriptor().getContainer().getResources().getCpu(); - memory = grpcJob.getJobDescriptor().getContainer().getResources().getMemoryMB(); - gpu = grpcJob.getJobDescriptor().getContainer().getResources().getGpu(); - networkMbps = grpcJob.getJobDescriptor().getContainer().getResources().getNetworkMbps(); - disk = grpcJob.getJobDescriptor().getContainer().getResources().getDiskMB(); - jobGroupSequence = grpcJob.getJobDescriptor().getJobGroupInfo().getSequence(); - jobGroupStack = grpcJob.getJobDescriptor().getJobGroupInfo().getStack(); - jobGroupDetail = grpcJob.getJobDescriptor().getJobGroupInfo().getDetail(); - environment = grpcJob.getJobDescriptor().getContainer().getEnvMap(); - securityGroups = grpcJob.getJobDescriptor().getContainer().getSecurityProfile().getSecurityGroupsList().stream().collect(Collectors.toList()); - iamProfile = grpcJob.getJobDescriptor().getContainer().getSecurityProfile().getIamRole(); - allocateIpAddress = true; - submittedAt = new Date(grpcJob.getStatus().getTimestamp()); - softConstraints = new ArrayList(); - softConstraints.addAll(grpcJob.getJobDescriptor().getContainer().getSoftConstraints().getConstraintsMap().keySet()); - hardConstraints = new ArrayList(); - hardConstraints.addAll(grpcJob.getJobDescriptor().getContainer().getHardConstraints().getConstraintsMap().keySet()); - - jobState = grpcJob.getStatus().getState().toString(); - - if (grpcJob.getJobDescriptor().getContainer().getResources().getEfsMountsCount() > 0) { - efs = new Efs(); - ContainerResources.EfsMount firstMount = grpcJob.getJobDescriptor().getContainer().getResources().getEfsMounts(0); - efs.setEfsId(firstMount.getEfsId()); - efs.setMountPerm(firstMount.getMountPerm().toString()); - efs.setMountPoint(firstMount.getMountPoint()); - if (firstMount.getEfsRelativeMountPoint() != null) { - efs.setEfsRelativeMountPoint(firstMount.getEfsRelativeMountPoint()); - } - } - - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getApplicationName() { - return applicationName; - } - - public void setApplicationName(String applicationName) { - this.applicationName = applicationName; - } - - public String getUser() { - return user; - } - - public void setUser(String user) { - this.user = user; - } - - public String getVersion() { - return version; - } - - public void setVersion(String version) { - this.version = version; - } - - public String getEntryPoint() { - return entryPoint; - } - - public void setEntryPoint(String entryPoint) { - this.entryPoint = entryPoint; - } - - public int getInstances() { - return instances; - } - - public void setInstances(int instances) { - this.instances = instances; - } - - public int getInstancesMin() { - return instancesMin; - } - - public void setInstancesMin(int instancesMin) { - this.instancesMin = instancesMin; - } - - public int getInstancesMax() { - return instancesMax; - } - - public void setInstancesMax(int instancesMax) { - this.instancesMax = instancesMax; - } - - public int getInstancesDesired() { - return instancesDesired; - } - - public void setInstancesDesired(int instancesDesired) { - this.instancesDesired = instancesDesired; - } - - public int getCpu() { - return cpu; - } - - public void setCpu(int cpu) { - this.cpu = cpu; - } - - public int getMemory() { - return memory; - } - - public void setMemory(int memory) { - this.memory = memory; - } - - public int getDisk() { - return disk; - } - - public void setDisk(int disk) { - this.disk = disk; - } - - public void setGpu(int gpu) { - this.gpu = gpu; - } - - public int getGpu() { - return gpu; - } - - public int[] getPorts() { - return ports; - } - - public void setPorts(int[] ports) { - this.ports = ports; - } - - public Map getEnvironment() { - return environment; - } - - public void setEnvironment(Map environment) { - this.environment = environment; - } - - public int getRetries() { - return retries; - } - - public void setRetries(int retries) { - this.retries = retries; - } - - public int getRuntimeLimitSecs() { - return runtimeLimitSecs; - } - - public void setRuntimeLimitSecs(int runtimeLimitSecs) { - this.runtimeLimitSecs = runtimeLimitSecs; - } - - public boolean isAllocateIpAddress() { - return allocateIpAddress; - } - - public void setAllocateIpAddress(boolean allocateIpAddress) { - this.allocateIpAddress = allocateIpAddress; - } - - public Date getSubmittedAt() { - return submittedAt; - } - - public void setSubmittedAt(Date submittedAt) { - this.submittedAt = submittedAt; - } - - public List getTasks() { - return tasks; - } - - public void setTasks(List tasks) { - this.tasks = tasks; - } - - public String getIamProfile() { - return iamProfile; - } - - public void setIamProfile(String iamProfile) { - this.iamProfile = iamProfile; - } - - public String getCapacityGroup() { - return capacityGroup; - } - - public void setCapacityGroup(String capacityGroup) { - this.capacityGroup = capacityGroup; - } - - public Boolean isInService() { - return inService; - } - - public void setInService(Boolean inService) { - this.inService = inService; - } - - public List getSecurityGroups() { - return securityGroups; - } - - public void setSecurityGroups(List securityGroups) { - this.securityGroups = securityGroups; - } - - public Map getLabels() { - return labels; - } - - public void setLabels(Map labels) { - this.labels = labels; - } - - public Map getContainerAttributes() { - return containerAttributes; - } - - public void setContainerAttributes(Map containerAttributes) { - this.containerAttributes = containerAttributes; - } - - public String getJobGroupStack() { - return jobGroupStack; - } - - public void setJobGroupStack(String jobGroupStack) { - this.jobGroupStack = jobGroupStack; - } - - public String getJobGroupDetail() { - return jobGroupDetail; - } - - public void setJobGroupDetail(String jobGroupDetail) { - this.jobGroupDetail = jobGroupDetail; - } - - public String getJobGroupSequence() { - return jobGroupSequence; - } - - public void setJobGroupSequence(String jobGroupSequence) { - this.jobGroupSequence = jobGroupSequence; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public List getHardConstraints() { - return hardConstraints; - } - - public void setHardConstraints(List hardConstraints) { - this.hardConstraints = hardConstraints; - } - - public List getSoftConstraints() { - return softConstraints; - } - - public void setSoftConstraints(List softConstraints) { - this.softConstraints = softConstraints; - } - - public int getNetworkMbps() { - return networkMbps; - } - - public void setNetworkMbps(int networkMbps) { - this.networkMbps = networkMbps; - } - - public Efs getEfs() { - return efs; - } - - public void setEfs(Efs efs) { - this.efs = efs; - } - - public MigrationPolicy getMigrationPolicy() { - return migrationPolicy; - } - - public void setMigrationPolicy(MigrationPolicy migrationPolicy) { - this.migrationPolicy = migrationPolicy; - } - - public String getJobState() { - return jobState; - } - - public void setDigest(String digest) { this.digest = digest; } - - public String getDigest() { return digest; } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/JobDescription.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/JobDescription.java deleted file mode 100644 index b4c6140de9a..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/JobDescription.java +++ /dev/null @@ -1,566 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client.model; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.netflix.titus.grpc.protogen.*; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -public class JobDescription { - - private String name; - private String type; - private String applicationName; - private String version; - private String digest; - private int instancesDesired; - private int instancesMax; - private int instancesMin; - private int cpu; - private int memory; - private int disk; - private int gpu; - private int retries; - private int runtimeLimitSecs; - private int networkMbps; - private int[] ports; - private Map env; - private boolean allocateIpAddress; - - private String appName; - private String jobGroupStack; - private String jobGroupDetail; - private String jobGroupSequence; - private String user; - private List softConstraints; - private List hardConstraints; - private List securityGroups; - private Map labels; - private Map containerAttributes; - private Boolean inService; - - private String entryPoint; - private String iamProfile; - private String capacityGroup; - private Efs efs; - private MigrationPolicy migrationPolicy; - private Map securityAttributes; - - //Soft/Hard constraints - - JobDescription() { - } - - JobDescription(SubmitJobRequest request) { - type = request.getJobType(); - name = request.getJobName(); - applicationName = request.getDockerImageName(); - version = request.getDockerImageVersion(); - digest = request.getDockerDigest(); - instancesDesired = request.getInstanceDesired(); - instancesMin = request.getInstanceMin(); - instancesMax = request.getInstanceMax(); - cpu = request.getCpu(); - memory = request.getMemory(); - disk = request.getDisk(); - ports = request.getPorts(); - networkMbps = request.getNetworkMbps(); - allocateIpAddress = request.getAllocateIpAddress(); - appName = request.getApplication(); - jobGroupStack = request.getStack(); - jobGroupDetail = request.getDetail(); - softConstraints = request.getConstraints().stream() - .filter((c) -> c.getConstraintType() == SubmitJobRequest.Constraint.ConstraintType.SOFT) - .map(SubmitJobRequest.Constraint::getConstraint) - .collect(Collectors.toList()); - hardConstraints = request.getConstraints().stream() - .filter((c) -> c.getConstraintType() == SubmitJobRequest.Constraint.ConstraintType.HARD) - .map(SubmitJobRequest.Constraint::getConstraint) - .collect(Collectors.toList()); - user = request.getUser(); - env = request.getEnv() != null ? request.getEnv() : new HashMap<>(); - labels = request.getLabels() != null ? request.getLabels() : new HashMap<>(); - containerAttributes = request.getContainerAttributes() != null ? request.getContainerAttributes() : new HashMap<>(); - entryPoint = request.getEntryPoint(); - iamProfile = request.getIamProfile(); - capacityGroup = request.getCapacityGroup(); - securityGroups = request.getSecurityGroups(); - inService = request.getInService(); - migrationPolicy = request.getMigrationPolicy(); - efs = request.getEfs(); - gpu = request.getGpu(); - retries = request.getRetries(); - runtimeLimitSecs = request.getRuntimeLimitSecs(); - securityAttributes = new HashMap(); - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getApplicationName() { - return applicationName; - } - - public void setApplicationName(String applicationName) { - this.applicationName = applicationName; - } - - public String getVersion() { - return version; - } - - public void setVersion(String version) { - this.version = version; - } - - public int getInstancesMin() { - return instancesMin; - } - - public void setInstancesMin(int instances) { - this.instancesMin = instancesMin; - } - - public int getInstancesMax() { - return instancesMax; - } - - public void setInstancesMax(int instances) { - this.instancesMax = instancesMax; - } - - public int getInstancesDesired() { - return instancesDesired; - } - - public void setInstancesDesired(int instances) { - this.instancesDesired = instancesDesired; - } - - public int getCpu() { - return cpu; - } - - public void setCpu(int cpu) { - this.cpu = cpu; - } - - public int getMemory() { - return memory; - } - - public void setMemory(int memory) { - this.memory = memory; - } - - public int getDisk() { - return disk; - } - - public void setDisk(int disk) { - this.disk = disk; - } - - public void setGpu(int gpu) { - this.gpu = gpu; - } - - public int getGpu() { - return gpu; - } - - public void setRetries() { - this.retries = retries; - } - - public int getRetries() { - return retries; - } - - public int getRuntimeLimitSecs() { - return runtimeLimitSecs; - } - - public void setRuntimeLimitSecs(int runtimeLimitSecs) { - this.runtimeLimitSecs = runtimeLimitSecs; - } - - public int getNetworkMbps() { - return networkMbps; - } - - public void setEfs(Efs efs) { - this.efs = efs; - } - - public Efs getEfs() { - return efs; - } - - public void setNetworkMbps(int networkMbps) { - this.networkMbps = networkMbps; - } - - public int[] getPorts() { - return ports; - } - - public void setPorts(int[] ports) { - this.ports = ports; - } - - public boolean getAllocateIpAddress() { - return allocateIpAddress; - } - - public void setAllocateIpAddress(boolean name) { - this.allocateIpAddress = allocateIpAddress; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getJobGroupStack() { - return jobGroupStack; - } - - public void setJobGroupStack(String jobGroupStack) { - this.jobGroupStack = jobGroupStack; - } - - public String getJobGroupDetail() { - return jobGroupDetail; - } - - public void setJobGroupDetail(String jobGroupDetail) { - this.jobGroupDetail = jobGroupDetail; - } - - public String getJobGroupSequence() { - return jobGroupSequence; - } - - public void setJobGroupSequence(String jobGroupSequence) { - this.jobGroupSequence = jobGroupSequence; - } - - public String getUser() { - return user; - } - - public void setUser(String user) { - this.user = user; - } - - public List getSoftConstraints() { - return softConstraints; - } - - public void setSoftConstraints(List softConstraints) { - this.softConstraints = softConstraints; - } - - public List getHardConstraints() { - return hardConstraints; - } - - public void setHardConstraints(List hardConstraints) { - this.hardConstraints = hardConstraints; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public Map getEnv() { - return env; - } - - public void setEnv(Map env) { - this.env = env; - } - - public void setContainerAttributes(Map containerAttributes) { - this.containerAttributes = containerAttributes; - } - - public void setEnvParam(String key, String value) { - if (this.env == null) { - this.env = new HashMap<>(); - } - this.env.put(key, value); - } - - public Map getLabels() { - return labels; - } - - public void setLabels(Map labels) { - this.labels = labels; - } - - public String getEntryPoint() { - return entryPoint; - } - - public void setEntryPoint(String entryPoint) { - this.entryPoint = entryPoint; - } - - public String getIamProfile() { - return iamProfile; - } - - public void setIamProfile(String iamProfile) { - this.iamProfile = iamProfile; - } - - public String getCapacityGroup() { - return capacityGroup; - } - - public void setCapacityGroup(String capacityGroup) { - this.capacityGroup = capacityGroup; - } - - public Boolean getInService() { - return inService; - } - - public void setInService(Boolean inService) { - this.inService = inService; - } - - public MigrationPolicy getMigrationPolicy() { - return migrationPolicy; - } - - public void setMigrationPolicy(MigrationPolicy migrationPolicy) { - this.migrationPolicy = migrationPolicy; - } - - public List getSecurityGroups() { - return securityGroups; - } - - public void setSecurityGroups(List securityGroups) { - this.securityGroups = securityGroups; - } - - public void setDigest(String digest) { this.digest = digest; } - - public String getDigest() { return digest; } - - @JsonIgnore - public Map getSecurityAttributes() { - return securityAttributes; - } - - @JsonIgnore - public JobDescriptor getGrpcJobDescriptor() { - - // trying to keep the same order as in the proto definition https://stash.corp.netflix.com/projects/TN/repos/titus-api-definitions/browse/src/main/proto/netflix/titus/titus_job_api.proto - - JobDescriptor.Builder jobDescriptorBuilder = JobDescriptor.newBuilder(); - - jobDescriptorBuilder.setOwner(Owner.newBuilder().setTeamEmail(user)); - jobDescriptorBuilder.setApplicationName(appName); - - if (!labels.isEmpty()) { - jobDescriptorBuilder.putAllAttributes(labels); - } - - Container.Builder containerBuilder = Container.newBuilder(); - ContainerResources.Builder containerResources = ContainerResources.newBuilder().setAllocateIP(true); - - if (cpu != 0) { - containerResources.setCpu(cpu); - } - - if (gpu != 0) { - containerResources.setGpu(gpu); - } - - if (networkMbps != 0) { - containerResources.setNetworkMbps(networkMbps); - } - - if (memory != 0) { - containerResources.setMemoryMB(memory); - } - - if (disk != 0) { - containerResources.setDiskMB(disk); - } - - if (efs != null && efs.getEfsId() != null) { - ContainerResources.EfsMount.Builder efsBuilder = ContainerResources.EfsMount.newBuilder(); - efsBuilder.setEfsId(efs.getEfsId()); - efsBuilder.setMountPoint(efs.getMountPoint()); - efsBuilder.setMountPerm(convertMountPerm(efs.getMountPerm())); - if (efs.getEfsRelativeMountPoint() != null) { - efsBuilder.setEfsRelativeMountPoint(efs.getEfsRelativeMountPoint()); - } - containerResources.addEfsMounts(efsBuilder); - } - - containerBuilder.setResources(containerResources); - - SecurityProfile.Builder securityProfile = SecurityProfile.newBuilder(); - - if (securityGroups != null && !securityGroups.isEmpty()) { - securityGroups.forEach(sg -> - { - securityProfile.addSecurityGroups(sg); - } - ); - } - - if (iamProfile != null) { - securityProfile.setIamRole(iamProfile); - } - - if (!securityAttributes.isEmpty()) { - securityProfile.putAllAttributes(securityAttributes); - } - - containerBuilder.setSecurityProfile(securityProfile); - - Image.Builder imageBuilder = Image.newBuilder(); - imageBuilder.setName(applicationName); - if(digest!=null){ - imageBuilder.setDigest(digest); - } else { - imageBuilder.setTag(version); - } - - containerBuilder.setImage(imageBuilder); - - if (entryPoint != null) { - containerBuilder.addEntryPoint(entryPoint); - } - - if (!containerAttributes.isEmpty()) { - containerBuilder.putAllAttributes(containerAttributes); - } - - if (!env.isEmpty()) { - containerBuilder.putAllEnv(env); - } - - if (!softConstraints.isEmpty()) { - containerBuilder.setSoftConstraints(constraintTransformer(softConstraints)); - } - - if (!hardConstraints.isEmpty()) { - containerBuilder.setHardConstraints(constraintTransformer(hardConstraints)); - } - - jobDescriptorBuilder.setContainer(containerBuilder); - - Capacity.Builder jobCapacity = Capacity.newBuilder(); - jobCapacity.setMin(instancesMin).setMax(instancesMax).setDesired(instancesDesired); - - if (type.equals("service")) { - JobGroupInfo.Builder jobGroupInfoBuilder = JobGroupInfo.newBuilder(); - if (jobGroupStack != null) { - jobGroupInfoBuilder.setStack(jobGroupStack); - } - if (jobGroupDetail != null) { - jobGroupInfoBuilder.setDetail(jobGroupDetail); - } - jobGroupInfoBuilder.setSequence(jobGroupSequence); - jobDescriptorBuilder.setJobGroupInfo(jobGroupInfoBuilder); - - if (inService == null) { - inService = true; - } - - com.netflix.titus.grpc.protogen.MigrationPolicy serviceMigrationPolicy; - - if (migrationPolicy != null && migrationPolicy.getType().equals("selfManaged")) { - serviceMigrationPolicy = com.netflix.titus.grpc.protogen.MigrationPolicy.newBuilder().setSelfManaged(com.netflix.titus.grpc.protogen.MigrationPolicy.SelfManaged.newBuilder().build()).build(); - } else { - serviceMigrationPolicy = com.netflix.titus.grpc.protogen.MigrationPolicy.newBuilder().setSystemDefault(com.netflix.titus.grpc.protogen.MigrationPolicy.SystemDefault.newBuilder().build()).build(); - } - - jobDescriptorBuilder.setService( - ServiceJobSpec.newBuilder().setEnabled(inService) - .setCapacity(jobCapacity) - .setMigrationPolicy(serviceMigrationPolicy) - .setRetryPolicy(RetryPolicy.newBuilder().setExponentialBackOff(RetryPolicy.ExponentialBackOff.newBuilder().setInitialDelayMs(5000).setMaxDelayIntervalMs(300000)))); - } - - if (type.equals("batch")) { - BatchJobSpec.Builder batchJobSpec = BatchJobSpec.newBuilder(); - batchJobSpec.setSize(instancesDesired); - if (runtimeLimitSecs != 0) { - batchJobSpec.setRuntimeLimitSec(runtimeLimitSecs); - } - batchJobSpec.setRetryPolicy(RetryPolicy.newBuilder().setImmediate(RetryPolicy.Immediate.newBuilder().setRetries(retries))); - jobDescriptorBuilder.setBatch(batchJobSpec); - } - - if (capacityGroup == null || capacityGroup.isEmpty()) { - jobDescriptorBuilder.setCapacityGroup(jobDescriptorBuilder.getApplicationName()); - } else { - jobDescriptorBuilder.setCapacityGroup(capacityGroup); - } - - return jobDescriptorBuilder.build(); - } - - private MountPerm convertMountPerm(String mountPerm) { - switch (mountPerm) { - case "RO": - return MountPerm.RO; - case "WO": - return MountPerm.WO; - default: - return MountPerm.RW; - } - } - - private Constraints.Builder constraintTransformer(List constraints) { - Constraints.Builder constraintsBuilder = Constraints.newBuilder(); - constraints.forEach(constraint -> - { - constraintsBuilder.putConstraints(constraint, "true"); - } - ); - return constraintsBuilder; - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobRequest.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobRequest.java deleted file mode 100644 index f471b9e8861..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobRequest.java +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client.model; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class SubmitJobRequest { - public static class Constraint { - enum ConstraintType {SOFT, HARD} - - public static final String UNIQUE_HOST = "UniqueHost"; - public static final String ZONE_BALANCE = "ZoneBalance"; - - public static Constraint hard(String constraint) { - return new Constraint(ConstraintType.HARD, constraint); - } - - public static Constraint soft(String constraint) { - return new Constraint(ConstraintType.SOFT, constraint); - } - - private final ConstraintType constraintType; - private final String constraint; - - public Constraint(ConstraintType constraintType, String constraint) { - this.constraintType = constraintType; - this.constraint = constraint; - } - - public ConstraintType getConstraintType() { - return constraintType; - } - - public String getConstraint() { - return constraint; - } - } - - private String credentials; - private String jobType; - private String application; - private String jobName; - private String dockerImageName; - private String dockerImageVersion; - private String dockerDigest; - private String stack; - private String detail; - private String user; - private String entryPoint; - private String iamProfile; - private String capacityGroup; - private Boolean inService = true; - private int instancesMin; - private int instancesMax; - private int instancesDesired; - private int cpu; - private int gpu; - private int memory; - private int disk; - private int retries; - private int runtimeLimitSecs; - private int networkMbps; - private Efs efs; - private int[] ports; - private Map env; - private boolean allocateIpAddress; - private List constraints = new ArrayList<>(); - private Map labels = new HashMap(); - private Map containerAttributes = new HashMap(); - private List securityGroups = null; - private MigrationPolicy migrationPolicy = null; - - public SubmitJobRequest withJobType(String jobType) { - this.jobType = jobType; - return this; - } - - public SubmitJobRequest withJobName(String jobName) { - this.jobName = jobName; - return this; - } - - public SubmitJobRequest withApplication(String application) { - this.application = application; - return this; - } - - public SubmitJobRequest withDockerImageName(String dockerImageName) { - this.dockerImageName = dockerImageName; - return this; - } - - public SubmitJobRequest withDockerImageVersion(String dockerImageVersion) { - this.dockerImageVersion = dockerImageVersion; - return this; - } - - public SubmitJobRequest withDockerDigest(String dockerDigest) { - this.dockerDigest = dockerDigest; - return this; - } - - public SubmitJobRequest withInstancesMin(int instancesMin) { - this.instancesMin = instancesMin; - return this; - } - - public SubmitJobRequest withInstancesMax(int instancesMax) { - this.instancesMax = instancesMax; - return this; - } - - public SubmitJobRequest withInstancesDesired(int instancesDesired) { - this.instancesDesired = instancesDesired; - return this; - } - - public SubmitJobRequest withCpu(int cpu) { - this.cpu = cpu; - return this; - } - - public SubmitJobRequest withMemory(int memory) { - this.memory = memory; - return this; - } - - public SubmitJobRequest withDisk(int disk) { - this.disk = disk; - return this; - } - - public SubmitJobRequest withRetries(int retries) { - this.retries = retries; - return this; - } - - public SubmitJobRequest withRuntimeLimitSecs(int runtimeLimitSecs) { - this.runtimeLimitSecs = runtimeLimitSecs; - return this; - } - - public SubmitJobRequest withGpu(int gpu) { - this.gpu = gpu; - return this; - } - - public SubmitJobRequest withPorts(int[] ports) { - this.ports = ports; - return this; - } - - public SubmitJobRequest withNetworkMbps(int networkMbps) { - this.networkMbps = networkMbps; - return this; - } - - public SubmitJobRequest withEnv(Map env) { - this.env = env; - return this; - } - - public SubmitJobRequest withAllocateIpAddress(boolean allocateIpAddress) { - this.allocateIpAddress = allocateIpAddress; - return this; - } - - public SubmitJobRequest withStack(String stack) { - this.stack = stack; - return this; - } - - public SubmitJobRequest withDetail(String detail) { - this.detail = detail; - return this; - } - - public SubmitJobRequest withUser(String user) { - this.user = user; - return this; - } - - public SubmitJobRequest withEntryPoint(String entryPoint) { - this.entryPoint = entryPoint; - return this; - } - - public SubmitJobRequest withIamProfile(String iamProfile) { - this.iamProfile = iamProfile; - return this; - } - - public SubmitJobRequest withSecurityGroups(List securityGroups) { - this.securityGroups = securityGroups; - return this; - } - - public SubmitJobRequest withCapacityGroup(String capacityGroup) { - this.capacityGroup = capacityGroup; - return this; - } - - public SubmitJobRequest withConstraint(Constraint constraint) { - this.constraints.add(constraint); - return this; - } - - public SubmitJobRequest withLabels(Map labels) { - this.labels = labels; - return this; - } - - public SubmitJobRequest withContainerAttributes(Map containerAttributes) { - this.containerAttributes = containerAttributes; - return this; - } - - public SubmitJobRequest withLabel(String key, String value) { - this.labels.put(key, value); - return this; - } - - public SubmitJobRequest withInService(Boolean inService) { - this.inService = inService; - return this; - } - - public SubmitJobRequest withMigrationPolicy(MigrationPolicy migrationPolicy) { - this.migrationPolicy = migrationPolicy; - return this; - } - - public SubmitJobRequest withEfs(Efs efs) { - this.efs = efs; - return this; - } - - public SubmitJobRequest withCredentials(String credentials) { - this.credentials = credentials; - return this; - } - - // Getters - - - public String getJobType() { - return jobType; - } - - public int getInstanceMin() { - return instancesMin; - } - - public int getInstanceMax() { - return instancesMax; - } - - public int getInstanceDesired() { - return instancesDesired; - } - - public int getCpu() { - return cpu; - } - - public int getGpu() { - return gpu; - } - - public int getRetries() { - return retries; - } - - public int getRuntimeLimitSecs() { - return runtimeLimitSecs; - } - - public int getMemory() { - return memory; - } - - public int getDisk() { - return disk; - } - - public int getNetworkMbps() { - return networkMbps; - } - - public int[] getPorts() { - return ports; - } - - public Map getEnv() { - return env; - } - - public String getApplication() { - return application; - } - - public String getJobName() { - return jobName; - } - - public String getDockerImageName() { - return dockerImageName; - } - - public String getDockerImageVersion() { - return dockerImageVersion; - } - - public String getDockerDigest() { return dockerDigest; } - - public boolean getAllocateIpAddress() { - return allocateIpAddress; - } - - public String getStack() { - return stack; - } - - public String getDetail() { - return detail; - } - - public String getUser() { - return user; - } - - public List getConstraints() { - return constraints; - } - - public List getSecurityGroups() { - return securityGroups; - } - - public String getEntryPoint() { - return entryPoint; - } - - public String getIamProfile() { - return iamProfile; - } - - public Boolean getInService() { - return inService; - } - - public String getCapacityGroup() { - return capacityGroup; - } - - public Map getLabels() { - return labels; - } - - public Map getContainerAttributes() { - return containerAttributes; - } - - public JobDescription getJobDescription() { - return new JobDescription(this); - } - - public Efs getEfs() { - return efs; - } - - public String getCredentials() { - return credentials; - } - - public MigrationPolicy getMigrationPolicy() { - return migrationPolicy; - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Task.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Task.java deleted file mode 100644 index 9781a0861fd..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Task.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client.model; - -import com.netflix.titus.grpc.protogen.TaskStatus; - -import java.util.Date; -import java.util.HashMap; -import java.util.Map; - -public class Task { - - public Task() { - } - - public Task(com.netflix.titus.grpc.protogen.Task grpcTask) { - id = grpcTask.getId(); - state = TaskState.from(grpcTask.getStatus().getState().name(), grpcTask.getStatus().getReasonCode()); - jobId = grpcTask.getJobId(); - instanceId = grpcTask.getTaskContextOrDefault("v2.taskInstanceId", id); - host = grpcTask.getTaskContextOrDefault("agent.host", null); - region = grpcTask.getTaskContextOrDefault("agent.region", null); - zone = grpcTask.getTaskContextOrDefault("agent.zone", null); - submittedAt = getTimestampFromStatus(grpcTask, TaskStatus.TaskState.Accepted); - launchedAt = getTimestampFromStatus(grpcTask, TaskStatus.TaskState.Launched); - startedAt = getTimestampFromStatus(grpcTask, TaskStatus.TaskState.StartInitiated); - finishedAt = getTimestampFromStatus(grpcTask, TaskStatus.TaskState.Finished); - containerIp = grpcTask.getTaskContextOrDefault("task.containerIp", null); - logLocation = new HashMap<>(); - logLocation.put("ui", grpcTask.getLogLocation().getUi().getUrl()); - logLocation.put("liveStream", grpcTask.getLogLocation().getLiveStream().getUrl()); - HashMap s3 = new HashMap<>(); - s3.put("accountId", grpcTask.getLogLocation().getS3().getAccountId()); - s3.put("accountName", grpcTask.getLogLocation().getS3().getAccountName()); - s3.put("region", grpcTask.getLogLocation().getS3().getRegion()); - s3.put("bucket", grpcTask.getLogLocation().getS3().getBucket()); - s3.put("key", grpcTask.getLogLocation().getS3().getKey()); - logLocation.put("s3", s3); - } - - private Date getTimestampFromStatus(com.netflix.titus.grpc.protogen.Task grpcTask, TaskStatus.TaskState state) { - return grpcTask.getStatusHistoryList().stream().filter(status -> status.getState().equals(state)).findFirst().map(status -> new Date(status.getTimestamp())).orElse(null); - } - - private String id; - private String jobId; - private String instanceId; - private TaskState state; - private String host; - private String region; - private String zone; - private Date submittedAt; - private Date launchedAt; - private Date startedAt; - private Date finishedAt; - private String message; - private Map data; - private String stdoutLive; - private String logs; - private String snapshots; - private String containerIp; - - private Map logLocation; - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getInstanceId() { - return instanceId; - } - - public void setInstanceId(String instanceId) { - this.instanceId = instanceId; - } - - public String getJobId() { - return jobId; - } - - public void setJobId(String jobId) { - this.jobId = jobId; - } - - public TaskState getState() { - return state; - } - - public void setState(TaskState state) { - this.state = state; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public String getRegion() { - return region; - } - - public void setRegion(String region) { - this.region = region; - } - - public String getZone() { - return zone; - } - - public void setZone(String zone) { - this.zone = zone; - } - - public Date getSubmittedAt() { - return submittedAt; - } - - public void setSubmittedAt(Date submittedAt) { - this.submittedAt = submittedAt; - } - - public Date getLaunchedAt() { - return launchedAt; - } - - public void setLaunchedAt(Date launchedAt) { - this.launchedAt = launchedAt; - } - - public Date getStartedAt() { - return startedAt; - } - - public void setStartedAt(Date startedAt) { - this.startedAt = startedAt; - } - - public Date getFinishedAt() { - return finishedAt; - } - - public void setFinishedAt(Date finishedAt) { - this.finishedAt = finishedAt; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } - - public Map getData() { - return data; - } - - public void setData(Map data) { - this.data = data; - } - - public String getStdoutLive() { - return stdoutLive; - } - - public void setStdoutLive(String stdoutLive) { - this.stdoutLive = stdoutLive; - } - - public String getLogs() { - return logs; - } - - public void setLogs(String logs) { - this.logs = logs; - } - - public String getSnapshots() { - return snapshots; - } - - public void setSnapshots(String snapshots) { - this.snapshots = snapshots; - } - - public String getContainerIp() { - return containerIp; - } - - public void setContainerIp(String containerIp) { - this.containerIp = containerIp; - } - - public Map getLogLocation() { - return logLocation; - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TaskState.java b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TaskState.java deleted file mode 100644 index ddc92d43f6c..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TaskState.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.client.model; - -public enum TaskState { - ALL, - RUNNING, DISPATCHED, FAILED, STOPPED, CRASHED, FINISHED, - STARTING, QUEUED, - TERMINATING, DEAD, PENDING; // Deprecated - - public static TaskState from(String taskStateStr) { - for (TaskState taskState : TaskState.values()) { - if (taskState.name().equals(taskStateStr)) return taskState; - } - switch (taskStateStr) { - case "Accepted": - return TaskState.QUEUED; - case "Launched": - return TaskState.DISPATCHED; - case "StartInitiated": - return TaskState.STARTING; - case "Started": - return TaskState.RUNNING; - case "KillInitiated": - case "Disconnected": - case "Finished": - return TaskState.FINISHED; - default: - return null; - } - } - - public static TaskState from(String taskStateStr, String reasonCode) { - - if (taskStateStr.equals("Finished")) { - switch (reasonCode) { - case "normal": - return TaskState.FINISHED; - case "killed": - return TaskState.STOPPED; - case "crashed": - case "lost": - return TaskState.CRASHED; - case "failed": - return TaskState.FAILED; - default: - return TaskState.FINISHED; - } - } - - switch (taskStateStr) { - case "Accepted": - return TaskState.QUEUED; - case "Launched": - return TaskState.DISPATCHED; - case "StartInitiated": - return TaskState.STARTING; - case "Started": - return TaskState.RUNNING; - case "KillInitiated": - case "Disconnected": - return TaskState.FINISHED; - default: - return null; - } - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/credentials/NetflixTitusCredentials.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/credentials/NetflixTitusCredentials.groovy index 1f6cf490e34..0dbc8c0d05d 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/credentials/NetflixTitusCredentials.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/credentials/NetflixTitusCredentials.groovy @@ -16,18 +16,21 @@ package com.netflix.spinnaker.clouddriver.titus.credentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials + import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion import com.netflix.spinnaker.clouddriver.titus.client.security.TitusCredentials +import com.netflix.spinnaker.fiat.model.resources.Permissions -class NetflixTitusCredentials implements AccountCredentials { +class NetflixTitusCredentials extends AbstractAccountCredentials { private static final String CLOUD_PROVIDER = TitusCloudProvider.ID final String name final String environment final String accountType final List requiredGroupMembership = Collections.emptyList() + final Permissions permissions final String bastionHost final String registry final String discovery @@ -36,12 +39,11 @@ class NetflixTitusCredentials implements AccountCredentials { final boolean discoveryEnabled final String stack final String eurekaName - final boolean autoscalingEnabled - final boolean loadBalancingEnabled - final boolean splitCachingEnabled private final List regions + NetflixTitusCredentials() {} + NetflixTitusCredentials(String name, String environment, String accountType, @@ -54,10 +56,8 @@ class NetflixTitusCredentials implements AccountCredentials { String discovery, String stack, List requiredGroupMembership, - String eurekaName, - boolean autoscalingEnabled, - boolean loadBalancingEnabled, - boolean splitCachingEnabled + Permissions permissions, + String eurekaName ) { this.name = name this.environment = environment @@ -71,10 +71,8 @@ class NetflixTitusCredentials implements AccountCredentials { this.discovery = discovery this.stack = stack this.requiredGroupMembership = requiredGroupMembership + this.permissions = permissions this.eurekaName = eurekaName - this.autoscalingEnabled = autoscalingEnabled - this.loadBalancingEnabled = loadBalancingEnabled - this.splitCachingEnabled = splitCachingEnabled } @Override @@ -119,19 +117,11 @@ class NetflixTitusCredentials implements AccountCredentials { return requiredGroupMembership } - String getEurekaName() { - return eurekaName - } - - boolean getAutoscalingEnabled() { - return autoscalingEnabled - } - - boolean isLoadBalancingEnabled() { - return loadBalancingEnabled + Permissions getPermissions() { + return permissions; } - boolean getSplitCachingEnabled() { - return splitCachingEnabled + String getEurekaName() { + return eurekaName } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/TitusServerGroupNameResolver.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/TitusServerGroupNameResolver.groovy index c5fd0c1d048..c96b51a9428 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/TitusServerGroupNameResolver.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/TitusServerGroupNameResolver.groovy @@ -41,8 +41,9 @@ class TitusServerGroupNameResolver extends AbstractServerGroupNameResolver { @Override List getTakenSlots(String clusterName) { def clusterNameParts = Names.parseName(clusterName) + List jobs = titusClient.findJobsByApplication(clusterNameParts.app) - .findAll { Names.parseName(it.name).cluster == Names.parseName(clusterName).cluster } + .findAll { Names.parseName(it.name).cluster == clusterName } return jobs.collect { Job job -> return new AbstractServerGroupNameResolver.TakenSlot( diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DestroyTitusJobAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DestroyTitusJobAtomicOperationConverter.groovy new file mode 100644 index 00000000000..72ab7d58379 --- /dev/null +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DestroyTitusJobAtomicOperationConverter.groovy @@ -0,0 +1,91 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.converters + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.titus.TitusOperation +import com.netflix.spinnaker.clouddriver.titus.caching.providers.TitusJobProvider +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusJobDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.DestroyTitusJobAtomicOperation +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException +import groovy.util.logging.Slf4j +import io.grpc.Status +import io.grpc.StatusRuntimeException +import org.springframework.beans.factory.annotation.Autowired +import org.springframework.stereotype.Component + +@Slf4j +@Component +@TitusOperation(AtomicOperations.DESTROY_JOB) +class DestroyTitusJobAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { + + private final TitusJobProvider titusJobProvider + + @Autowired + DestroyTitusJobAtomicOperationConverter( + TitusJobProvider titusJobProvider + ) { + this.titusJobProvider = titusJobProvider + } + + @Override + AtomicOperation convertOperation(Map input) { + new DestroyTitusJobAtomicOperation(convertDescription(input)) + } + + @Override + DestroyTitusJobDescription convertDescription(Map input) { + def converted = objectMapper.convertValue(input, DestroyTitusJobDescription) + converted.credentials = getCredentialsObject(input.credentials as String) + + try { + def job = titusJobProvider.collectJob(converted.credentials.name, converted.region, converted.jobId) + converted.applications = [job.application] as Set + converted.requiresApplicationRestriction = !converted.applications.isEmpty() + converted.serverGroupName = job.name + } catch (Exception e) { + if (e instanceof StatusRuntimeException) { + def statusRuntimeException = (StatusRuntimeException) e + if (statusRuntimeException.status.code == Status.NOT_FOUND.code) { + throw new NotFoundException( + String.format( + "Titus job not found (jobId: %s, account: %s, region: %s)", + converted.jobId, + converted.credentials.name, + converted.region + ) + ) + } + } + + converted.applications = [] + converted.requiresApplicationRestriction = true + log.error( + "Unable to determine application for job (jobId: {}, account: {}, region: {})", + converted.jobId, + converted.credentials.name, + converted.region, + e + ) + } + + return converted + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DestroyTitusServerGroupAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DestroyTitusServerGroupAtomicOperationConverter.groovy index 1c461fd4498..6383cfea018 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DestroyTitusServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DestroyTitusServerGroupAtomicOperationConverter.groovy @@ -39,7 +39,7 @@ class DestroyTitusServerGroupAtomicOperationConverter extends AbstractAtomicOper @Override AtomicOperation convertOperation(Map input) { - new DestroyTitusServerGroupAtomicOperation(titusClientProvider, convertDescription(input)) + new DestroyTitusServerGroupAtomicOperation(convertDescription(input)) } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DetachTitusInstancesAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DetachTitusInstancesAtomicOperationConverter.groovy index 82b45e2c4db..f7126a53779 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DetachTitusInstancesAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DetachTitusInstancesAtomicOperationConverter.groovy @@ -19,27 +19,17 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.converters import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation import com.netflix.spinnaker.clouddriver.titus.deploy.description.DetachTitusInstancesDescription import com.netflix.spinnaker.clouddriver.titus.deploy.ops.DetachTitusInstancesAtomicOperation -import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @TitusOperation(AtomicOperations.DETACH_INSTANCES) @Component class DetachTitusInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - private final TitusClientProvider titusClientProvider - - @Autowired - DetachTitusInstancesAtomicOperationConverter(TitusClientProvider titusClientProvider) { - this.titusClientProvider = titusClientProvider - } - @Override AtomicOperation convertOperation(Map input) { - new DetachTitusInstancesAtomicOperation(titusClientProvider, convertDescription(input)) + new DetachTitusInstancesAtomicOperation(convertDescription(input)) } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DisableTitusInstancesInDiscoveryAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DisableTitusInstancesInDiscoveryAtomicOperationConverter.groovy index 026100b41cb..f8d441a22eb 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DisableTitusInstancesInDiscoveryAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DisableTitusInstancesInDiscoveryAtomicOperationConverter.groovy @@ -29,17 +29,9 @@ import org.springframework.stereotype.Component @Component @TitusOperation(AtomicOperations.DISABLE_INSTANCES_IN_DISCOVERY) class DisableTitusInstancesInDiscoveryAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - private final TitusClientProvider titusClientProvider - - @Autowired - DisableTitusInstancesInDiscoveryAtomicOperationConverter(TitusClientProvider titusClientProvider) { - this.titusClientProvider = titusClientProvider - } - @Override AtomicOperation convertOperation(Map input) { - new DisableTitusInstancesInDiscoveryAtomicOperation(titusClientProvider, convertDescription(input)) + new DisableTitusInstancesInDiscoveryAtomicOperation(convertDescription(input)) } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DisableTitusServerGroupAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DisableTitusServerGroupAtomicOperationConverter.groovy index caaab38003b..67c6d9bb801 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DisableTitusServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/DisableTitusServerGroupAtomicOperationConverter.groovy @@ -29,17 +29,9 @@ import org.springframework.stereotype.Component @TitusOperation(AtomicOperations.DISABLE_SERVER_GROUP) @Component class DisableTitusServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - private final TitusClientProvider titusClientProvider - - @Autowired - DisableTitusServerGroupAtomicOperationConverter(TitusClientProvider titusClientProvider) { - this.titusClientProvider = titusClientProvider - } - @Override AtomicOperation convertOperation(Map input) { - new DisableTitusServerGroupAtomicOperation(titusClientProvider, convertDescription(input)) + new DisableTitusServerGroupAtomicOperation(convertDescription(input)) } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/EnableTitusInstancesInDiscoveryAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/EnableTitusInstancesInDiscoveryAtomicOperationConverter.groovy index fa7423e23f7..9410c965125 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/EnableTitusInstancesInDiscoveryAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/EnableTitusInstancesInDiscoveryAtomicOperationConverter.groovy @@ -29,17 +29,9 @@ import org.springframework.stereotype.Component @Component @TitusOperation(AtomicOperations.ENABLE_INSTANCES_IN_DISCOVERY) class EnableTitusInstancesInDiscoveryAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - private final TitusClientProvider titusClientProvider - - @Autowired - EnableTitusInstancesInDiscoveryAtomicOperationConverter(TitusClientProvider titusClientProvider) { - this.titusClientProvider = titusClientProvider - } - @Override AtomicOperation convertOperation(Map input) { - new EnableTitusInstancesInDiscoveryAtomicOperation(titusClientProvider, convertDescription(input)) + new EnableTitusInstancesInDiscoveryAtomicOperation(convertDescription(input)) } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/EnableTitusServerGroupAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/EnableTitusServerGroupAtomicOperationConverter.groovy index bc321f1e52c..0e98131b7d0 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/EnableTitusServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/EnableTitusServerGroupAtomicOperationConverter.groovy @@ -29,17 +29,9 @@ import org.springframework.stereotype.Component @TitusOperation(AtomicOperations.ENABLE_SERVER_GROUP) @Component class EnableTitusServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - private final TitusClientProvider titusClientProvider - - @Autowired - EnableTitusServerGroupAtomicOperationConverter(TitusClientProvider titusClientProvider) { - this.titusClientProvider = titusClientProvider - } - @Override AtomicOperation convertOperation(Map input) { - new EnableTitusServerGroupAtomicOperation(titusClientProvider, convertDescription(input)) + new EnableTitusServerGroupAtomicOperation(convertDescription(input)) } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/ResizeTitusServerGroupAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/ResizeTitusServerGroupAtomicOperationConverter.groovy index 51503aaab2a..982bb1c7cce 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/ResizeTitusServerGroupAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/ResizeTitusServerGroupAtomicOperationConverter.groovy @@ -19,27 +19,17 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.converters import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation import com.netflix.spinnaker.clouddriver.titus.deploy.description.ResizeTitusServerGroupDescription import com.netflix.spinnaker.clouddriver.titus.deploy.ops.ResizeTitusServerGroupAtomicOperation -import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @TitusOperation(AtomicOperations.RESIZE_SERVER_GROUP) @Component class ResizeTitusServerGroupAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - private final TitusClientProvider titusClientProvider - - @Autowired - ResizeTitusServerGroupAtomicOperationConverter(TitusClientProvider titusClientProvider) { - this.titusClientProvider = titusClientProvider - } - @Override AtomicOperation convertOperation(Map input) { - new ResizeTitusServerGroupAtomicOperation(titusClientProvider, convertDescription(input)) + new ResizeTitusServerGroupAtomicOperation(convertDescription(input)) } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TerminateTitusInstancesAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TerminateTitusInstancesAtomicOperationConverter.groovy index 010edaa9e3a..80f7129c3c6 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TerminateTitusInstancesAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TerminateTitusInstancesAtomicOperationConverter.groovy @@ -19,33 +19,54 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.converters import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation +import com.netflix.spinnaker.clouddriver.titus.caching.providers.TitusInstanceProvider import com.netflix.spinnaker.clouddriver.titus.deploy.description.TerminateTitusInstancesDescription import com.netflix.spinnaker.clouddriver.titus.deploy.ops.TerminateTitusInstancesAtomicOperation +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -@TitusOperation(AtomicOperations.TERMINATE_INSTANCES) +@Slf4j @Component +@TitusOperation(AtomicOperations.TERMINATE_INSTANCES) class TerminateTitusInstancesAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { - - private final TitusClientProvider titusClientProvider + private final TitusInstanceProvider titusInstanceProvider @Autowired - TerminateTitusInstancesAtomicOperationConverter(TitusClientProvider titusClientProvider) { - this.titusClientProvider = titusClientProvider + TerminateTitusInstancesAtomicOperationConverter(TitusInstanceProvider titusInstanceProvider) { + this.titusInstanceProvider = titusInstanceProvider } @Override AtomicOperation convertOperation(Map input) { - new TerminateTitusInstancesAtomicOperation(titusClientProvider, convertDescription(input)) + new TerminateTitusInstancesAtomicOperation(convertDescription(input)) } @Override TerminateTitusInstancesDescription convertDescription(Map input) { def converted = objectMapper.convertValue(input, TerminateTitusInstancesDescription) converted.credentials = getCredentialsObject(input.credentials as String) + + try { + def applications = converted.instanceIds.findResults { + def instance = titusInstanceProvider.getInstance(converted.credentials.name, converted.region, it) + return instance?.application + } as Set + converted.applications = applications + converted.requiresApplicationRestriction = !applications.isEmpty() + } catch (Exception e) { + converted.applications = [] + converted.requiresApplicationRestriction = true + log.error( + "Unable to determine applications for instances (instanceIds: {}, account: {}, region: {})", + converted.instanceIds, + converted.credentials.name, + converted.region, + e + ) + } + converted } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TitusDeployAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TitusDeployAtomicOperationConverter.groovy index c73dd521ce4..5c8600861f3 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TitusDeployAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TitusDeployAtomicOperationConverter.groovy @@ -21,20 +21,46 @@ import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport import com.netflix.spinnaker.clouddriver.titus.TitusOperation +import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription +import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component @TitusOperation(AtomicOperations.CREATE_SERVER_GROUP) @Component class TitusDeployAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { + @Autowired + AwsLookupUtil awsLookupUtil + + @Override AtomicOperation convertOperation(Map input) { new DeployAtomicOperation(convertDescription(input)) } + @Override TitusDeployDescription convertDescription(Map input) { + // Backwards-compatibility for when the Titus provider blindly accepted any container + // attribute value, when in reality this can only be string values. Now that the + // description is Java, this can cause Jackson's object mapper to throw exceptions if + // left unconverted. + if (input.containerAttributes != null) { + input.containerAttributes.forEach { k, v -> + if (!(v instanceof String)) { + input.containerAttributes.put(k, v.toString()) + } + } + } + def converted = objectMapper.convertValue(input, TitusDeployDescription) converted.credentials = getCredentialsObject(input.credentials as String) + + if (converted.securityGroups != null && !converted.securityGroups.isEmpty()) { + converted.setSecurityGroupNames( + awsLookupUtil.convertSecurityGroupsToNames(converted.account, converted.region, converted.securityGroups) + ) + } + converted } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpsertTitusScalingPolicyAtomicOperationConverter.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpsertTitusScalingPolicyAtomicOperationConverter.groovy index 335d32e1830..94468b2a55e 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpsertTitusScalingPolicyAtomicOperationConverter.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpsertTitusScalingPolicyAtomicOperationConverter.groovy @@ -20,12 +20,15 @@ import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertTitusScalingPolicyDescription import com.netflix.spinnaker.clouddriver.titus.deploy.ops.UpsertTitusScalingPolicyAtomicOperation +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component +@Slf4j @Component('titusUpsertScalingPolicyDescription') @TitusOperation(AtomicOperations.UPSERT_SCALING_POLICY) class UpsertTitusScalingPolicyAtomicOperationConverter extends AbstractAtomicOperationsCredentialsSupport { @@ -33,6 +36,9 @@ class UpsertTitusScalingPolicyAtomicOperationConverter extends AbstractAtomicOpe @Autowired ObjectMapper objectMapper + @Autowired + TitusClientProvider titusClientProvider + @Override UpsertTitusScalingPolicyAtomicOperation convertOperation(Map input) { new UpsertTitusScalingPolicyAtomicOperation(convertDescription(input)) @@ -44,6 +50,16 @@ class UpsertTitusScalingPolicyAtomicOperationConverter extends AbstractAtomicOpe .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) .convertValue(input, UpsertTitusScalingPolicyDescription) converted.credentials = getCredentialsObject(input.credentials as String) + + try { + def titusClient = titusClientProvider.getTitusClient(converted.credentials, converted.region) + def titusJob = titusClient.findJobById(converted.jobId, false) + converted.application = titusJob.appName + } catch (Exception e) { + converted.application = null + log.warn("Unable to determine application for titus job (jobId: {})", converted.jobId, e) + } + converted } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractRegionAsgInstanceIdsDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractRegionAsgInstanceIdsDescription.groovy index 89241c6617f..039c1944505 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractRegionAsgInstanceIdsDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractRegionAsgInstanceIdsDescription.groovy @@ -16,9 +16,16 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.description -abstract class AbstractRegionAsgInstanceIdsDescription extends AbstractTitusCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +abstract class AbstractRegionAsgInstanceIdsDescription extends AbstractTitusCredentialsDescription implements ServerGroupsNameable { String region String asgName List instanceIds Integer targetHealthyDeployPercentage + + @Override + Collection getServerGroupNames() { + return [asgName] + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractTitusCredentialsDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractTitusCredentialsDescription.groovy deleted file mode 100644 index 451fa6391d6..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractTitusCredentialsDescription.groovy +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.deploy.description - -import com.fasterxml.jackson.annotation.JsonIgnore -import com.fasterxml.jackson.annotation.JsonProperty -import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials - -abstract class AbstractTitusCredentialsDescription implements CredentialsNameable { - @JsonIgnore - NetflixTitusCredentials credentials - - @JsonProperty("credentials") - String getCredentialAccount() { - this.credentials.name - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DeleteTitusScalingPolicyDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DeleteTitusScalingPolicyDescription.groovy index ab1fb897fce..80c5fa2aa60 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DeleteTitusScalingPolicyDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DeleteTitusScalingPolicyDescription.groovy @@ -20,4 +20,9 @@ class DeleteTitusScalingPolicyDescription extends AbstractTitusCredentialsDescri String region String scalingPolicyID + + @Override + boolean requiresApplicationRestriction() { + return false + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DeregisterTitusInstanceFromLoadBalancerDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DeregisterTitusInstanceFromLoadBalancerDescription.groovy index be963e538cc..567ea62c47e 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DeregisterTitusInstanceFromLoadBalancerDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DeregisterTitusInstanceFromLoadBalancerDescription.groovy @@ -22,5 +22,8 @@ import groovy.transform.Canonical @AutoClone @Canonical class DeregisterTitusInstanceFromLoadBalancerDescription extends AbstractTitusCredentialsDescription { - + @Override + boolean requiresApplicationRestriction() { + return false + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DestroyTitusJobDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DestroyTitusJobDescription.groovy new file mode 100644 index 00000000000..c75f8e8c691 --- /dev/null +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DestroyTitusJobDescription.groovy @@ -0,0 +1,35 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.description + +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable + +class DestroyTitusJobDescription extends AbstractTitusCredentialsDescription implements ApplicationNameable{ + String region + String jobId + String serverGroupName + String user + + Set applications + boolean requiresApplicationRestriction = true + + @Override + boolean requiresApplicationRestriction() { + return requiresApplicationRestriction + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DestroyTitusServerGroupDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DestroyTitusServerGroupDescription.groovy index 9e73f422e6e..5cec0db3bb1 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DestroyTitusServerGroupDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DestroyTitusServerGroupDescription.groovy @@ -16,8 +16,15 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.description -class DestroyTitusServerGroupDescription extends AbstractTitusCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class DestroyTitusServerGroupDescription extends AbstractTitusCredentialsDescription implements ServerGroupsNameable { String region String serverGroupName String user + + @Override + Collection getServerGroupNames() { + return [serverGroupName] + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DetachTitusInstancesDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DetachTitusInstancesDescription.groovy index e3c8aa60718..6f1f012f8d0 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DetachTitusInstancesDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/DetachTitusInstancesDescription.groovy @@ -16,11 +16,17 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.description -class DetachTitusInstancesDescription extends AbstractTitusCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable + +class DetachTitusInstancesDescription extends AbstractTitusCredentialsDescription implements ServerGroupsNameable { String region List instanceIds - boolean decrementDesiredCapacity boolean adjustMinIfNecessary String asgName String user + + @Override + Collection getServerGroupNames() { + return [asgName] + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/EnableDisableServerGroupDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/EnableDisableServerGroupDescription.groovy index 638e2599cff..94bbcd2d661 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/EnableDisableServerGroupDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/EnableDisableServerGroupDescription.groovy @@ -20,4 +20,5 @@ import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescrip class EnableDisableServerGroupDescription extends AbstractTitusCredentialsDescription implements EnableDisableDescriptionTrait { String region + Integer desiredPercentage } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/ModifyTitusAsgLaunchConfigurationDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/ModifyTitusAsgLaunchConfigurationDescription.groovy index 5580780d4ec..a4c6df0a1d4 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/ModifyTitusAsgLaunchConfigurationDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/ModifyTitusAsgLaunchConfigurationDescription.groovy @@ -22,5 +22,8 @@ import groovy.transform.Canonical @AutoClone @Canonical class ModifyTitusAsgLaunchConfigurationDescription extends AbstractTitusCredentialsDescription { - + @Override + boolean requiresApplicationRestriction() { + return false + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/ResizeTitusServerGroupDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/ResizeTitusServerGroupDescription.groovy index 5ed622b9efb..14d4869a88e 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/ResizeTitusServerGroupDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/ResizeTitusServerGroupDescription.groovy @@ -16,14 +16,20 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.description +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable import groovy.transform.Canonical -class ResizeTitusServerGroupDescription extends AbstractTitusCredentialsDescription { +class ResizeTitusServerGroupDescription extends AbstractTitusCredentialsDescription implements ServerGroupsNameable { String region String serverGroupName String user Capacity capacity + @Override + Collection getServerGroupNames() { + return [serverGroupName] + } + @Canonical static class Capacity { int min diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TerminateTitusInstancesDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TerminateTitusInstancesDescription.groovy index f8f56478e3f..f147af70839 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TerminateTitusInstancesDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TerminateTitusInstancesDescription.groovy @@ -16,8 +16,18 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.description -class TerminateTitusInstancesDescription extends AbstractTitusCredentialsDescription { +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable + +class TerminateTitusInstancesDescription extends AbstractTitusCredentialsDescription implements ApplicationNameable { String region List instanceIds String user + + Set applications + boolean requiresApplicationRestriction = true + + @Override + boolean requiresApplicationRestriction() { + return requiresApplicationRestriction + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescription.groovy deleted file mode 100644 index 602c219039b..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescription.groovy +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.deploy.description - -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent -import com.netflix.spinnaker.clouddriver.titus.client.model.Efs -import com.netflix.spinnaker.clouddriver.titus.client.model.MigrationPolicy -import groovy.transform.Canonical - -class TitusDeployDescription extends AbstractTitusCredentialsDescription implements DeployDescription { - String region - String subnet - List zones - List securityGroups - List targetGroups - List softConstraints - List hardConstraints - String application - String stack - String freeFormDetails - String imageId - Capacity capacity = new Capacity() - Resources resources = new Resources() - Map env - Map labels - Map containerAttributes - String entryPoint - String iamProfile - String capacityGroup - String user - Boolean inService - String jobType - int retries - int runtimeLimitSecs - Boolean useApplicationDefaultSecurityGroup = true - List interestingHealthProviderNames - MigrationPolicy migrationPolicy - - /** - * If false, the newly created server group will not pick up scaling policies and actions from an ancestor group - */ - boolean copySourceScalingPolicies = true - - Collection events = [] - - Source source = new Source() - - @Canonical - static class Capacity { - int min - int max - int desired - } - - @Canonical - static class Resources { - int cpu - int memory - int disk - int gpu - int networkMbps - int[] ports - boolean allocateIpAddress - } - - @Canonical - static class Source { - String account - String region - String asgName - Boolean useSourceCapacity - } - - Efs efs - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/UpsertTitusScalingPolicyDescription.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/UpsertTitusScalingPolicyDescription.groovy index f5f305566f1..fe9b3becc03 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/UpsertTitusScalingPolicyDescription.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/UpsertTitusScalingPolicyDescription.groovy @@ -26,14 +26,20 @@ import com.google.protobuf.DoubleValue import com.google.protobuf.Int32Value import com.google.protobuf.Int64Value import com.netflix.spinnaker.clouddriver.aws.deploy.description.UpsertAlarmDescription +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable import com.netflix.titus.grpc.protogen.* import com.netflix.titus.grpc.protogen.AlarmConfiguration.ComparisonOperator import com.netflix.titus.grpc.protogen.AlarmConfiguration.Statistic import com.netflix.titus.grpc.protogen.ScalingPolicy.Builder import com.netflix.titus.grpc.protogen.StepScalingPolicy.AdjustmentType import com.netflix.titus.grpc.protogen.StepScalingPolicy.MetricAggregationType +import groovy.util.logging.Slf4j +import groovy.json.JsonOutput + +@Slf4j +class UpsertTitusScalingPolicyDescription extends AbstractTitusCredentialsDescription implements ApplicationNameable { + String application -class UpsertTitusScalingPolicyDescription extends AbstractTitusCredentialsDescription { // required String region String jobId @@ -48,6 +54,11 @@ class UpsertTitusScalingPolicyDescription extends AbstractTitusCredentialsDescri UpsertAlarmDescription alarm + @Override + Collection getApplications() { + return [application] + } + static class Step { Collection stepAdjustments Integer cooldown @@ -155,6 +166,7 @@ class UpsertTitusScalingPolicyDescription extends AbstractTitusCredentialsDescri StepScalingPolicy stepPolicy = stepDescriptor.scalingPolicy Step step = new Step() description.step = step + description.adjustmentType = stepPolicy.adjustmentType step.cooldown = stepPolicy.cooldownSec.value step.metricAggregationType = stepPolicy.metricAggregationType step.stepAdjustments = [] @@ -206,7 +218,9 @@ class UpsertTitusScalingPolicyDescription extends AbstractTitusCredentialsDescri new AwsMetricDimension().withName(dimension.name).withValue(value) }) } - + log.info("UpsertTitusScalingPolicyDescription for ${serverGroupName} description: ${ JsonOutput.toJson(description) }") description } + + } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandler.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandler.groovy deleted file mode 100644 index dc399f131d0..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandler.groovy +++ /dev/null @@ -1,488 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.deploy.handlers - -import com.netflix.spinnaker.config.AwsConfiguration -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.TargetGroupLookupHelper -import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.TargetGroupLookupHelper.TargetGroupLookupResult -import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory -import com.netflix.spinnaker.clouddriver.core.services.Front50Service -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeployDescription -import com.netflix.spinnaker.clouddriver.deploy.DeployHandler -import com.netflix.spinnaker.clouddriver.helpers.OperationPoller -import com.netflix.spinnaker.clouddriver.orchestration.events.CreateServerGroupEvent -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider -import com.netflix.spinnaker.clouddriver.titus.TitusException -import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil -import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient -import com.netflix.spinnaker.clouddriver.titus.client.TitusLoadBalancerClient -import com.netflix.spinnaker.clouddriver.titus.client.model.Job -import com.netflix.spinnaker.clouddriver.titus.client.model.SubmitJobRequest -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials -import com.netflix.spinnaker.clouddriver.titus.deploy.TitusServerGroupNameResolver -import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription -import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription.Source -import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertTitusScalingPolicyDescription -import com.netflix.spinnaker.clouddriver.titus.model.DockerImage -import com.netflix.spinnaker.kork.core.RetrySupport -import com.netflix.titus.grpc.protogen.PutPolicyRequest -import com.netflix.titus.grpc.protogen.PutPolicyRequest.Builder -import com.netflix.titus.grpc.protogen.ScalingPolicyResult -import com.netflix.titus.grpc.protogen.ScalingPolicyStatus.ScalingPolicyState -import groovy.util.logging.Slf4j -import io.grpc.Status -import org.slf4j.Logger -import org.slf4j.LoggerFactory -import org.springframework.beans.factory.annotation.Autowired - -@Slf4j -class TitusDeployHandler implements DeployHandler { - - public static final String USE_APPLICATION_DEFAULT_SG_LABEL = 'spinnaker.useApplicationDefaultSecurityGroup' - - @Autowired - AwsLookupUtil awsLookupUtil - - @Autowired - AwsConfiguration.DeployDefaults deployDefaults - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Autowired - RegionScopedProviderFactory regionScopedProviderFactory - - @Autowired - Front50Service front50Service - - private final Logger logger = LoggerFactory.getLogger(TitusDeployHandler) - - private static final String BASE_PHASE = "DEPLOY" - - private final TitusClientProvider titusClientProvider - private final AccountCredentialsRepository accountCredentialsRepository - private final TargetGroupLookupHelper targetGroupLookupHelper - private final RetrySupport retrySupport - - TitusDeployHandler(TitusClientProvider titusClientProvider, AccountCredentialsRepository accountCredentialsRepository) { - this.titusClientProvider = titusClientProvider - this.accountCredentialsRepository = accountCredentialsRepository - this.targetGroupLookupHelper = new TargetGroupLookupHelper() - this.retrySupport = new RetrySupport() - } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Override - TitusDeploymentResult handle(TitusDeployDescription description, List priorOutputs) { - - try { - task.updateStatus BASE_PHASE, "Initializing handler..." - TitusClient titusClient = titusClientProvider.getTitusClient(description.credentials, description.region) - TitusDeploymentResult deploymentResult = new TitusDeploymentResult() - String account = description.account - String region = description.region - String subnet = description.subnet - - if (!description.env) description.env = [:] - if (!description.containerAttributes) description.containerAttributes = [:] - if (!description.labels) description.labels = [:] - - if (description.source.asgName) { - Source source = description.source - - TitusClient sourceClient = buildSourceTitusClient(source) - if (!sourceClient) { - throw new RuntimeException("Unable to locate source (${source.account}:${source.region}:${source.asgName})") - } - Job sourceJob = sourceClient.findJobByName(source.asgName) - if (!sourceJob) { - throw new RuntimeException("Unable to locate source (${source.account}:${source.region}:${source.asgName})") - } - - task.updateStatus BASE_PHASE, "Copying deployment details from (${source.account}:${source.region}:${source.asgName})" - - description.runtimeLimitSecs = description.runtimeLimitSecs ?: sourceJob.runtimeLimitSecs - description.securityGroups = description.securityGroups ?: sourceJob.securityGroups - description.imageId = description.imageId ?: (sourceJob.applicationName + ":" + (sourceJob.version ?: sourceJob.digest)) - - if (description.source.useSourceCapacity) { - description.capacity.min = sourceJob.instancesMin - description.capacity.max = sourceJob.instancesMax - description.capacity.desired = sourceJob.instancesDesired - } - - description.resources.cpu = description.resources.cpu ?: sourceJob.cpu - description.resources.memory = description.resources.memory ?: sourceJob.memory - description.resources.disk = description.resources.disk ?: sourceJob.disk - description.retries = description.retries ?: sourceJob.retries - description.runtimeLimitSecs = description.runtimeLimitSecs ?: sourceJob.runtimeLimitSecs - description.resources.gpu = description.resources.gpu ?: sourceJob.gpu - description.resources.networkMbps = description.resources.networkMbps ?: sourceJob.networkMbps - description.efs = description.efs ?: sourceJob.efs - description.resources.allocateIpAddress = description.resources.allocateIpAddress ?: sourceJob.allocateIpAddress - description.entryPoint = description.entryPoint ?: sourceJob.entryPoint - description.iamProfile = description.iamProfile ?: sourceJob.iamProfile - description.capacityGroup = description.capacityGroup ?: sourceJob.capacityGroup - - if (description.labels.isEmpty()) { - sourceJob.labels.each { k, v -> description.labels.put(k, v) } - } - - if (description.env.isEmpty()) { - sourceJob.environment.each { k, v -> description.env.put(k, v) } - } - - if (description.containerAttributes.isEmpty()) { - sourceJob.containerAttributes.each { k, v -> description.containerAttributes.put(k, v) } - } - if (description.inService == null) { - description.inService = sourceJob.inService - } - description.migrationPolicy = description.migrationPolicy ?: sourceJob.migrationPolicy - description.jobType = description.jobType ?: "service" - if (!description.hardConstraints) description.hardConstraints = [] - if (!description.softConstraints) description.softConstraints = [] - if (description.softConstraints.empty && sourceJob.softConstraints) { - sourceJob.softConstraints.each { - if (!description.hardConstraints.contains(it)) { - description.softConstraints.add(it) - } - } - } - if (description.hardConstraints.empty && sourceJob.hardConstraints) { - sourceJob.hardConstraints.each { - if (!description.softConstraints.contains(it)) { - description.hardConstraints.add(it) - } - } - } - if (sourceJob.labels?.get(USE_APPLICATION_DEFAULT_SG_LABEL) == "false") { - description.useApplicationDefaultSecurityGroup = false - } - } - - task.updateStatus BASE_PHASE, "Preparing deployment to ${account}:${region}${subnet ? ':' + subnet : ''}... ${System.currentTimeMillis()}" - DockerImage dockerImage = new DockerImage(description.imageId) - - if (description.interestingHealthProviderNames && !description.interestingHealthProviderNames.empty) { - description.labels.put("interestingHealthProviderNames", description.interestingHealthProviderNames.join(",")) - } - - if (description.labels.containsKey(USE_APPLICATION_DEFAULT_SG_LABEL)) { - if (description.labels.get(USE_APPLICATION_DEFAULT_SG_LABEL) == "false") { - description.useApplicationDefaultSecurityGroup = false - } else { - description.useApplicationDefaultSecurityGroup = true - } - } - - if (description.useApplicationDefaultSecurityGroup == false) { - description.labels.put(USE_APPLICATION_DEFAULT_SG_LABEL, "false") - } else { - if (description.labels.containsKey(USE_APPLICATION_DEFAULT_SG_LABEL)) { - description.labels.remove(USE_APPLICATION_DEFAULT_SG_LABEL) - } - } - - SubmitJobRequest submitJobRequest = new SubmitJobRequest() - .withApplication(description.application) - .withDockerImageName(dockerImage.imageName) - .withInstancesMin(description.capacity.min) - .withInstancesMax(description.capacity.max) - .withInstancesDesired(description.capacity.desired) - .withCpu(description.resources.cpu) - .withMemory(description.resources.memory) - .withDisk(description.resources.disk) - .withRetries(description.retries) - .withRuntimeLimitSecs(description.runtimeLimitSecs) - .withGpu(description.resources.gpu) - .withNetworkMbps(description.resources.networkMbps) - .withEfs(description.efs) - .withPorts(description.resources.ports) - .withEnv(description.env) - .withAllocateIpAddress(description.resources.allocateIpAddress) - .withStack(description.stack) - .withDetail(description.freeFormDetails) - .withEntryPoint(description.entryPoint) - .withIamProfile(description.iamProfile) - .withCapacityGroup(description.capacityGroup) - .withLabels(description.labels) - .withInService(description.inService) - .withMigrationPolicy(description.migrationPolicy) - .withCredentials(description.credentials.name) - .withContainerAttributes(description.containerAttributes.collectEntries { [(it.key): it.value?.toString()] }) - - if (dockerImage.imageDigest != null) { - submitJobRequest = submitJobRequest.withDockerDigest(dockerImage.imageDigest) - } else { - submitJobRequest = submitJobRequest.withDockerImageVersion(dockerImage.imageVersion) - } - - Set securityGroups = [] - description.securityGroups?.each { providedSecurityGroup -> - if (awsLookupUtil.securityGroupIdExists(account, region, providedSecurityGroup)) { - securityGroups << providedSecurityGroup - } else { - String convertedSecurityGroup = awsLookupUtil.convertSecurityGroupNameToId(account, region, providedSecurityGroup) - if (!convertedSecurityGroup) { - throw new RuntimeException("Security Group ${providedSecurityGroup} cannot be found") - } - securityGroups << convertedSecurityGroup - } - } - - if (description.jobType != 'batch' && deployDefaults.addAppGroupToServerGroup && securityGroups.size() < deployDefaults.maxSecurityGroups && description.useApplicationDefaultSecurityGroup != false) { - String applicationSecurityGroup = awsLookupUtil.convertSecurityGroupNameToId(account, region, description.application) - if (!applicationSecurityGroup) { - applicationSecurityGroup = OperationPoller.retryWithBackoff({ o -> awsLookupUtil.createSecurityGroupForApplication(account, region, description.application) }, 1000, 5) - } - if (!securityGroups.contains(applicationSecurityGroup)) { - securityGroups << applicationSecurityGroup - } - } - - if (description.hardConstraints) { - description.hardConstraints.each { constraint -> - submitJobRequest.withConstraint(SubmitJobRequest.Constraint.hard(constraint)) - } - } - - if (description.softConstraints) { - description.softConstraints.each { constraint -> - submitJobRequest.withConstraint(SubmitJobRequest.Constraint.soft(constraint)) - } - } - - if (description.getJobType() == "service" && !description.hardConstraints?.contains(SubmitJobRequest.Constraint.ZONE_BALANCE) && !description.softConstraints?.contains(SubmitJobRequest.Constraint.ZONE_BALANCE)) { - submitJobRequest.withConstraint(SubmitJobRequest.Constraint.soft(SubmitJobRequest.Constraint.ZONE_BALANCE)) - } - - if (!securityGroups.empty) { - submitJobRequest.withSecurityGroups(securityGroups.asList()) - } - - Map front50Application - - try { - front50Application = front50Service.getApplication(description.getApplication()) - } catch (Exception e) { - log.error('Failed to load front50 application attributes for {}', description.getApplication()) - } - - if (front50Application && front50Application['email']) { - submitJobRequest.withUser(front50Application['email']) - } else { - if (description.user) { - submitJobRequest.withUser(description.user) - } - } - - if (description.jobType) { - submitJobRequest.withJobType(description.jobType) - } - - TargetGroupLookupResult targetGroupLookupResult - - if (description.targetGroups) { - targetGroupLookupResult = validateLoadBalancers(description) - description.labels.put('spinnaker.targetGroups', targetGroupLookupResult?.targetGroupARNs.join(',')) - } else { - if (description.labels.containsKey('spinnaker.targetGroups')) { - description.labels.remove('spinnaker.targetGroups') - } - } - - String nextServerGroupName = resolveJobName(description, submitJobRequest, task, titusClient) - String jobUri - int retryCount = 0 - - retrySupport.retry({ - try { - task.updateStatus BASE_PHASE, "Submitting job request to Titus... ${System.currentTimeMillis()}" - jobUri = titusClient.submitJob(submitJobRequest) - } catch (io.grpc.StatusRuntimeException e) { - task.updateStatus BASE_PHASE, "Error encountered submitting job request to Titus ${e.message} for ${nextServerGroupName} ${System.currentTimeMillis()}" - if ((e.status.code == Status.RESOURCE_EXHAUSTED.code || e.status.code == Status.INVALID_ARGUMENT.code) && (e.status.description.contains("Job sequence id reserved by another pending job") || e.status.description.contains("Constraint violation - job with group sequence"))) { - if (e.status.description.contains("Job sequence id reserved by another pending job")) { - sleep 1000 ^ pow(2, retryCount) - retryCount++ - } - nextServerGroupName = resolveJobName(description, submitJobRequest, task, titusClient) - task.updateStatus BASE_PHASE, "Retrying with ${nextServerGroupName} after ${tries} attempts ${System.currentTimeMillis()}" - throw e; - } - if (e.status.code == Status.UNAVAILABLE.code) { - throw e; - } else { - log.error("Could not submit job and not retrying for status ${e.status} ", e) - task.updateStatus BASE_PHASE, "could not submit job ${e.status} ${e.message} ${System.currentTimeMillis()}" - } - } - }, 8, 100, true) - - if (jobUri == null) { - throw new TitusException("Could not create job") - } - - task.updateStatus BASE_PHASE, "Successfully submitted job request to Titus (Job URI: ${jobUri}) ${System.currentTimeMillis()}" - - deploymentResult.serverGroupNames = ["${region}:${nextServerGroupName}".toString()] - deploymentResult.serverGroupNameByRegion = [(description.region): nextServerGroupName] - deploymentResult.jobUri = jobUri - - if (description.jobType == 'batch') { - deploymentResult = new TitusDeploymentResult([ - deployedNames : [jobUri], - deployedNamesByLocation: [(description.region): [jobUri]], - jobUri : jobUri - ]) - } - - copyScalingPolicies(description, jobUri, nextServerGroupName) - - addLoadBalancers(description, targetGroupLookupResult, jobUri) - - deploymentResult.messages = task.history.collect { "${it.phase} : ${it.status}".toString() } - - description.events << new CreateServerGroupEvent( - TitusCloudProvider.ID, getAccountId(account), region, nextServerGroupName - ) - - return deploymentResult - } catch (t) { - task.updateStatus(BASE_PHASE, "Task failed $t.message") - task.fail() - logger.error("Deploy failed", t) - throw t - } - } - - private String resolveJobName(TitusDeployDescription description, SubmitJobRequest submitJobRequest, Task task, TitusClient titusClient) { - TitusServerGroupNameResolver serverGroupNameResolver = new TitusServerGroupNameResolver(titusClient, description.region) - String nextServerGroupName = serverGroupNameResolver.resolveNextServerGroupName(description.application, description.stack, description.freeFormDetails, false) - submitJobRequest.withJobName(nextServerGroupName) - task.updateStatus BASE_PHASE, "Resolved server group name to ${nextServerGroupName} ${System.currentTimeMillis()}" - return nextServerGroupName - } - - protected TargetGroupLookupHelper.TargetGroupLookupResult validateLoadBalancers(TitusDeployDescription description) { - if (!description.targetGroups) { - return null - } - def regionScopedProvider = regionScopedProviderFactory.forRegion(accountCredentialsProvider.getCredentials(description.credentials.awsAccount), description.region) - def targetGroups = targetGroupLookupHelper.getTargetGroupsByName(regionScopedProvider, description.targetGroups) - if (targetGroups.unknownTargetGroups) { - throw new IllegalStateException("Unable to find target groups named $targetGroups.unknownTargetGroups ${System.currentTimeMillis()}") - } - return targetGroups - } - - protected void addLoadBalancers(TitusDeployDescription description, TargetGroupLookupHelper.TargetGroupLookupResult targetGroups, String jobUri) { - TitusLoadBalancerClient loadBalancerClient = titusClientProvider.getTitusLoadBalancerClient(description.credentials, description.region) - if (!loadBalancerClient) { - task.updateStatus BASE_PHASE, "Unable to create load balancing client in target account/region" - return - } - targetGroups?.targetGroupARNs.each { targetGroupARN -> - loadBalancerClient.addLoadBalancer(jobUri, targetGroupARN) - task.updateStatus BASE_PHASE, "Attached ${targetGroupARN} to ${jobUri} ${System.currentTimeMillis()}" - } - } - - protected void copyScalingPolicies(TitusDeployDescription description, String jobUri, String serverGroupName) { - if (!description.copySourceScalingPolicies) { - return - } - Source source = description.source - TitusClient sourceClient = buildSourceTitusClient(source) - TitusAutoscalingClient autoscalingClient = titusClientProvider.getTitusAutoscalingClient(description.credentials, description.region) - if (!autoscalingClient) { - task.updateStatus BASE_PHASE, "Unable to create client in target account/region; policies will not be copied" - return - } - TitusAutoscalingClient sourceAutoscalingClient = buildSourceAutoscalingClient(source) - if (!sourceClient) { - task.updateStatus BASE_PHASE, "Unable to create client in source account/region; policies will not be copied" - return - } - if (sourceClient && sourceAutoscalingClient) { - Job sourceJob = sourceClient.findJobByName(source.asgName) - if (!sourceJob) { - task.updateStatus BASE_PHASE, "Unable to locate source (${source.account}:${source.region}:${source.asgName})" - } else { - task.updateStatus BASE_PHASE, "Copying scaling policies from source (Job URI: ${sourceJob.id})" - List policies = sourceAutoscalingClient.getJobScalingPolicies(sourceJob.id) ?: [] - task.updateStatus BASE_PHASE, "Found ${policies.size()} scaling policies for source (Job URI: ${jobUri})" - policies.each { policy -> - // Don't copy deleting or deleted policies - if (![ScalingPolicyState.Deleted, ScalingPolicyState.Deleting].contains(policy.policyState.state)) { - Builder requestBuilder = PutPolicyRequest.newBuilder() - .setJobId(jobUri) - .setScalingPolicy(UpsertTitusScalingPolicyDescription.fromScalingPolicyResult(description.region, policy, serverGroupName).toScalingPolicyBuilder()) - task.updateStatus BASE_PHASE, "Creating new policy copied from policy ${policy.id}" - autoscalingClient.createScalingPolicy(requestBuilder.build()) - } - } - } - } - task.updateStatus BASE_PHASE, "Copy scaling policies succeeded (Job URI: ${jobUri}) ${System.currentTimeMillis()}" - } - - private TitusClient buildSourceTitusClient(Source source) { - if (source.account && source.region && source.asgName) { - def sourceRegion = source.region - def sourceCredentials = accountCredentialsRepository.getOne(source.account) as NetflixTitusCredentials - return titusClientProvider.getTitusClient(sourceCredentials, sourceRegion) - } - - return null - } - - private TitusAutoscalingClient buildSourceAutoscalingClient(Source source) { - if (source.account && source.region && source.asgName) { - def sourceRegion = source.region - def sourceCredentials = accountCredentialsRepository.getOne(source.account) as NetflixTitusCredentials - return titusClientProvider.getTitusAutoscalingClient(sourceCredentials, sourceRegion) - } - - return null - } - - private String getAccountId(String credentials) { - AccountCredentials accountCredentials = accountCredentialsProvider.getCredentials(credentials) - if (accountCredentials instanceof NetflixTitusCredentials) { - return accountCredentialsProvider.getCredentials(accountCredentials.awsAccount).accountId - } - - return accountCredentials.accountId - } - - @Override - boolean handles(DeployDescription description) { - return description instanceof TitusDeployDescription - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeploymentResult.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeploymentResult.groovy deleted file mode 100644 index 15d19771204..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeploymentResult.groovy +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.deploy.handlers - -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult - -class TitusDeploymentResult extends DeploymentResult { - String jobUri -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/AbstractEnableDisableAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/AbstractEnableDisableAtomicOperation.groovy deleted file mode 100644 index 8b180cd0d88..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/AbstractEnableDisableAtomicOperation.groovy +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.deploy.ops - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.client.model.ActivateJobRequest -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials -import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription -import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableServerGroupDescription -import com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery.TitusEurekaSupport -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired - -@Slf4j -abstract class AbstractEnableDisableAtomicOperation implements AtomicOperation { - - @Autowired - TitusEurekaSupport discoverySupport - - private static final long THROTTLE_MS = 150 - - abstract boolean isDisable() - - abstract String getPhaseName() - - TitusClientProvider titusClientProvider - - EnableDisableServerGroupDescription description - - AbstractEnableDisableAtomicOperation(TitusClientProvider titusClientProvider, EnableDisableServerGroupDescription description) { - this.titusClientProvider = titusClientProvider - this.description = description - } - - @Override - Void operate(List priorOutputs) { - String verb = disable ? 'Disable' : 'Enable' - task.updateStatus phaseName, "Initializing ${verb} ServerGroup operation for $description.serverGroupName" - boolean succeeded = operateOnServerGroup(description.serverGroupName, description.credentials, description.region) - if (!succeeded && (!task.status || !task.status.isFailed())) { - task.fail() - } - task.updateStatus phaseName, "Finished ${verb} ServerGroup operation for $description.serverGroupName" - } - - private boolean operateOnServerGroup(String serverGroupName, NetflixTitusCredentials credentials, String region) { - String presentParticipling = disable ? 'Disabling' : 'Enabling' - String verb = disable ? 'Disable' : 'Enable' - - try { - - def provider = titusClientProvider.getTitusClient(credentials, region) - def loadBalancingClient = titusClientProvider.getTitusLoadBalancerClient(credentials, region) - def job = provider.findJobByName(serverGroupName, true) - - if (!job) { - task.updateStatus phaseName, "No Job named '$serverGroupName' found in $region" - return true - } - - task.updateStatus phaseName, "${presentParticipling} ServerGroup '$serverGroupName' in $region..." - - provider.activateJob( - new ActivateJobRequest() - .withUser('spinnaker') - .withJobId(job.id) - .withInService(!disable) - ) - - if (loadBalancingClient && job.labels.containsKey("spinnaker.targetGroups")) { - if (disable) { - task.updateStatus phaseName, "Removing ${job.id} from target groups" - loadBalancingClient.getJobLoadBalancers(job.id).each { loadBalancerId -> - task.updateStatus phaseName, "Removing ${job.id} from ${loadBalancerId.id} " - loadBalancingClient.removeLoadBalancer(job.id, loadBalancerId.getId()) - } - } else { - task.updateStatus phaseName, "Restoring ${job.id} into target groups" - List attachedLoadBalancers = loadBalancingClient.getJobLoadBalancers(job.id)*.id - job.labels.get("spinnaker.targetGroups").split(',').each { loadBalancerId -> - if (!attachedLoadBalancers.contains(loadBalancerId)) { - task.updateStatus phaseName, "Restoring ${job.id} into ${loadBalancerId}" - loadBalancingClient.addLoadBalancer(job.id, loadBalancerId) - } - } - } - } - - if (job.tasks) { - def status = disable ? AbstractEurekaSupport.DiscoveryStatus.Disable : AbstractEurekaSupport.DiscoveryStatus.Enable - task.updateStatus phaseName, "Marking ServerGroup $serverGroupName as $status with Discovery" - - def enableDisableInstanceDiscoveryDescription = new EnableDisableInstanceDiscoveryDescription( - credentials: credentials, - region: region, - asgName: serverGroupName, - instanceIds: job.tasks*.instanceId - ) - discoverySupport.updateDiscoveryStatusForInstances( - enableDisableInstanceDiscoveryDescription, task, phaseName, status, job.tasks*.instanceId - ) - } - - try { - provider.setAutoscaleEnabled(job.id, !disable) - } catch (Exception e) { - log.error("Error toggling autoscale enabled for Titus job ${job.id} in ${credentials.name}/${region}", e) - } - - task.updateStatus phaseName, "Finished ${presentParticipling} ServerGroup $serverGroupName." - - return true - } catch (e) { - def errorMessage = "Could not ${verb} ServerGroup '$serverGroupName' in region $region! Failure Type: ${e.class.simpleName}; Message: ${e.message}" - log.error(errorMessage, e) - if (task.status && (!task.status || !task.status.isFailed())) { - task.updateStatus phaseName, errorMessage - } - return false - } - } - - Task getTask() { - TaskRepository.threadLocalTask.get() - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/CloneTitusServerGroupAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/CloneTitusServerGroupAtomicOperation.groovy index 6d30ec1c623..525b8588bea 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/CloneTitusServerGroupAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/CloneTitusServerGroupAtomicOperation.groovy @@ -20,11 +20,14 @@ import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.SagaContextAware import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusDeployHandler -class CloneTitusServerGroupAtomicOperation implements AtomicOperation { +import javax.annotation.Nonnull + +class CloneTitusServerGroupAtomicOperation implements AtomicOperation, SagaContextAware { private static final String PHASE = "CLONE_TITUS_SERVER_GROUP" @@ -50,4 +53,14 @@ class CloneTitusServerGroupAtomicOperation implements AtomicOperation { - - DeleteTitusScalingPolicyDescription description - - DeleteTitusScalingPolicyAtomicOperation(DeleteTitusScalingPolicyDescription description) { - this.description = description - } - - private static final String BASE_PHASE = "DELETE_SCALING_POLICY" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } - - @Autowired - TitusClientProvider titusClientProvider - - @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing Delete Scaling Policy ${description.scalingPolicyID}..." - def client = titusClientProvider.getTitusAutoscalingClient(description.credentials, description.region) - if (!client) { - throw new UnsupportedOperationException("Autoscaling is not supported for this account/region") - } - - ScalingPolicyID id = ScalingPolicyID.newBuilder().setId(description.scalingPolicyID).build() - client.deleteScalingPolicy(DeletePolicyRequest.newBuilder().setId(id).build()) - task.updateStatus BASE_PHASE, "Delete Scaling Policy ${description.scalingPolicyID} completed." - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusJobAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusJobAtomicOperation.groovy new file mode 100644 index 00000000000..4c595799e4b --- /dev/null +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusJobAtomicOperation.groovy @@ -0,0 +1,65 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.ops + +import com.netflix.spinnaker.clouddriver.orchestration.events.DeleteServerGroupEvent +import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.DestroyTitusJob +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusJobDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.DestroyTitusJobCompletionHandler +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import org.jetbrains.annotations.NotNull + +import javax.annotation.Nonnull + +class DestroyTitusJobAtomicOperation extends AbstractSagaAtomicOperation, Void> { + private final Collection events = [] + + DestroyTitusJobAtomicOperation(DestroyTitusJobDescription description) { + super(description) + } + + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(DestroyTitusJob.class) + .exceptionHandler(TitusExceptionHandler.class) + .completionHandler(DestroyTitusJobCompletionHandler.class); + } + + @Override + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + DestroyTitusJob.DestroyTitusJobCommand.builder().description(description).build() + ) + } + + @Override + protected Void parseSagaResult(@NotNull @Nonnull Optional result) { + result.map { events << it } + return null + } + + @Override + Collection getEvents() { + return events + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusServerGroupAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusServerGroupAtomicOperation.groovy index d6c25ba9063..70998a2b0d6 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusServerGroupAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusServerGroupAtomicOperation.groovy @@ -16,58 +16,60 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.ops -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation + import com.netflix.spinnaker.clouddriver.orchestration.events.DeleteServerGroupEvent import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient -import com.netflix.spinnaker.clouddriver.titus.client.model.Job -import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateJobRequest +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.DestroyTitusJob +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.ResolveTitusJobId import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusServerGroupDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.DestroyTitusJobCompletionHandler +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler import groovy.util.logging.Slf4j +import org.jetbrains.annotations.NotNull -@Slf4j -class DestroyTitusServerGroupAtomicOperation implements AtomicOperation { +import javax.annotation.Nonnull - private static final String PHASE = "DESTROY_TITUS_SERVER_GROUP" - private final TitusClientProvider titusClientProvider - private final DestroyTitusServerGroupDescription description +@Slf4j +class DestroyTitusServerGroupAtomicOperation extends AbstractSagaAtomicOperation, Void> { private final Collection events = [] - DestroyTitusServerGroupAtomicOperation(TitusClientProvider titusClientProvider, - DestroyTitusServerGroupDescription description) { - this.titusClientProvider = titusClientProvider - this.description = description + DestroyTitusServerGroupAtomicOperation(DestroyTitusServerGroupDescription description) { + super(description) + } + + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(ResolveTitusJobId.class) + .then(DestroyTitusJob.class) + .exceptionHandler(TitusExceptionHandler.class) + .completionHandler(DestroyTitusJobCompletionHandler.class) } @Override - Void operate(List priorOutputs) { - task.updateStatus PHASE, "Destroying server group: ${description.serverGroupName}..." - TitusClient titusClient = titusClientProvider.getTitusClient(description.credentials, description.region) - Job job = titusClient.findJobByName(description.serverGroupName) - if (job) { - titusClient.terminateJob((TerminateJobRequest) new TerminateJobRequest().withJobId(job.id).withUser(description.user)) - events << new DeleteServerGroupEvent( - TitusCloudProvider.ID, description.credentials.name, description.region, description.serverGroupName - ) - task.updateStatus PHASE, "Successfully issued terminate job request to titus for ${job.id} which corresponds to ${description.serverGroupName}" - } else { - task.updateStatus PHASE, "No titus job found for ${description.serverGroupName}" - } + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + ResolveTitusJobId.ResolveTitusJobIdCommand + .builder() + .account(description.account) + .region(description.region) + .serverGroupName(description.serverGroupName) + .user(description.user) + .build() + ) + } - task.updateStatus PHASE, "Completed destroy server group operation for ${description.serverGroupName}" - null + @Override + protected Void parseSagaResult(@NotNull @Nonnull Optional result) { + result.map { events << it } + return null } @Override Collection getEvents() { return events } - - private static Task getTask() { - TaskRepository.threadLocalTask.get() - } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DetachTitusInstancesAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DetachTitusInstancesAtomicOperation.groovy index 39571e8a48b..763dfa2726a 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DetachTitusInstancesAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DetachTitusInstancesAtomicOperation.groovy @@ -16,75 +16,37 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.ops -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient -import com.netflix.spinnaker.clouddriver.titus.client.model.ResizeJobRequest -import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateTasksAndShrinkJobRequest +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.DetachTitusTasks import com.netflix.spinnaker.clouddriver.titus.deploy.description.DetachTitusInstancesDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import org.jetbrains.annotations.NotNull -class DetachTitusInstancesAtomicOperation implements AtomicOperation { +import javax.annotation.Nonnull - private static final String BASE_PHASE = "DETACH_TITUS_INSTANCES" - private final TitusClientProvider titusClientProvider - private final DetachTitusInstancesDescription description - - DetachTitusInstancesAtomicOperation(TitusClientProvider titusClientProvider, - DetachTitusInstancesDescription description) { - this.titusClientProvider = titusClientProvider - this.description = description +class DetachTitusInstancesAtomicOperation extends AbstractSagaAtomicOperation { + DetachTitusInstancesAtomicOperation(DetachTitusInstancesDescription description) { + super(description) } @Override - Void operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Detaching instances: ${description.instanceIds}..." - TitusClient titusClient = titusClientProvider.getTitusClient(description.credentials, description.region) - - def job = titusClient.findJobByName(description.asgName, true) - if (!job) { - task.updateStatus BASE_PHASE, "job not found" - return - } - - def validInstanceIds = description.instanceIds.intersect(job.tasks*.id) - - if (validInstanceIds.isEmpty()) { - task.updateStatus BASE_PHASE, "No detachable instances" - return - } - - int newMin = job.instances - validInstanceIds.size() - if (newMin < job.instancesMin) { - if (description.adjustMinIfNecessary) { - if (newMin < 0) { - task.updateStatus BASE_PHASE, "Cannot adjust min size below 0" - } else { - titusClient.resizeJob( - new ResizeJobRequest() - .withInstancesDesired(job.instancesDesired) - .withInstancesMax(job.instancesMax) - .withInstancesMin(newMin) - .withJobId(job.id) - .withUser(description.user) - ) - } - } else { - task.updateStatus BASE_PHASE, "Cannot decrement ASG below minSize - set adjustMinIfNecessary to resize down minSize before detaching instances" - throw new IllegalStateException("Invalid ASG capacity for detachInstances (min: $job.instancesMin, max: $job.instancesMax, desired: $job.instancesDesired)") - } - } + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(DetachTitusTasks.class) + .exceptionHandler(TitusExceptionHandler.class) + } - task.updateStatus BASE_PHASE, "Detaching instances (${validInstanceIds.join(", ")}) from ASG (${description.asgName})." - titusClient.terminateTasksAndShrink( - new TerminateTasksAndShrinkJobRequest().withUser(description.user).withShrink(true).withTaskIds(validInstanceIds) + @Override + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + DetachTitusTasks.DetachTitusTasksCommand.builder().description(description).build() ) - task.updateStatus BASE_PHASE, "Detached instances (${validInstanceIds.join(", ")}) from ASG (${description.asgName})." - } - private static Task getTask() { - TaskRepository.threadLocalTask.get() + @Override + protected Void parseSagaResult(@NotNull @Nonnull Void result) { + return null } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DisableTitusServerGroupAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DisableTitusServerGroupAtomicOperation.groovy index f01d1c30f26..ef42ebd5aba 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DisableTitusServerGroupAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DisableTitusServerGroupAtomicOperation.groovy @@ -15,18 +15,36 @@ */ package com.netflix.spinnaker.clouddriver.titus.deploy.ops -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.DisableTitusJob import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableServerGroupDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import org.jetbrains.annotations.NotNull -class DisableTitusServerGroupAtomicOperation extends AbstractEnableDisableAtomicOperation { - final String phaseName = "DISABLE_TITUS_SERVER_GROUP" +import javax.annotation.Nonnull - DisableTitusServerGroupAtomicOperation(TitusClientProvider titusClientProvider, EnableDisableServerGroupDescription description) { - super(titusClientProvider, description) +class DisableTitusServerGroupAtomicOperation extends AbstractSagaAtomicOperation { + DisableTitusServerGroupAtomicOperation(EnableDisableServerGroupDescription description) { + super(description) } @Override - boolean isDisable() { - true + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(DisableTitusJob.class) + .exceptionHandler(TitusExceptionHandler.class); + } + + @Override + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + def build = DisableTitusJob.DisableTitusJobCommand.builder().description(description).build() + builder.initialCommand(build) + } + + @Override + protected Void parseSagaResult(@NotNull @Nonnull Void result) { + return null } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/EnableTitusServerGroupAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/EnableTitusServerGroupAtomicOperation.groovy index 3d4154a1824..02de208f55c 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/EnableTitusServerGroupAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/EnableTitusServerGroupAtomicOperation.groovy @@ -15,18 +15,36 @@ */ package com.netflix.spinnaker.clouddriver.titus.deploy.ops -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.EnableTitusJob import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableServerGroupDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import org.jetbrains.annotations.NotNull -class EnableTitusServerGroupAtomicOperation extends AbstractEnableDisableAtomicOperation { - final String phaseName = "ENABLE_TITUS_SERVER_GROUP" +import javax.annotation.Nonnull - EnableTitusServerGroupAtomicOperation(TitusClientProvider titusClientProvider, EnableDisableServerGroupDescription description) { - super(titusClientProvider, description) +class EnableTitusServerGroupAtomicOperation extends AbstractSagaAtomicOperation { + EnableTitusServerGroupAtomicOperation(EnableDisableServerGroupDescription description) { + super(description) } @Override - boolean isDisable() { - false + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(EnableTitusJob.class) + .exceptionHandler(TitusExceptionHandler.class); + } + + @Override + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + def build = EnableTitusJob.EnableTitusJobCommand.builder().description(description).build() + builder.initialCommand(build) + } + + @Override + protected Void parseSagaResult(@NotNull @Nonnull Void result) { + return null } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/ResizeTitusServerGroupAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/ResizeTitusServerGroupAtomicOperation.groovy index 4933ef4dbbd..9f5dbb4535d 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/ResizeTitusServerGroupAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/ResizeTitusServerGroupAtomicOperation.groovy @@ -16,60 +16,37 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.ops -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient -import com.netflix.spinnaker.clouddriver.titus.client.model.Job -import com.netflix.spinnaker.clouddriver.titus.client.model.ResizeJobRequest +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.ResizeTitusJob import com.netflix.spinnaker.clouddriver.titus.deploy.description.ResizeTitusServerGroupDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import org.jetbrains.annotations.NotNull -class ResizeTitusServerGroupAtomicOperation implements AtomicOperation { +import javax.annotation.Nonnull - private static final String PHASE = "RESIZE_TITUS_SERVER_GROUP" - private final TitusClientProvider titusClientProvider - private final ResizeTitusServerGroupDescription description - - ResizeTitusServerGroupAtomicOperation(TitusClientProvider titusClientProvider, - ResizeTitusServerGroupDescription description) { - this.titusClientProvider = titusClientProvider - this.description = description +class ResizeTitusServerGroupAtomicOperation extends AbstractSagaAtomicOperation { + ResizeTitusServerGroupAtomicOperation(ResizeTitusServerGroupDescription description) { + super(description) } @Override - Void operate(List priorOutputs) { - task.updateStatus PHASE, "Resizing server group: ${description.serverGroupName}..." - TitusClient titusClient = titusClientProvider.getTitusClient(description.credentials, description.region) - Job job = titusClient.findJobByName(description.serverGroupName) - - if (!job) { - throw new IllegalArgumentException("No titus server group named '${description.serverGroupName}' found") - } - - Boolean shouldToggleScalingFlags = !job.inService - if (shouldToggleScalingFlags) { - titusClient.setAutoscaleEnabled(job.id, true) - } + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(ResizeTitusJob.class) + .exceptionHandler(TitusExceptionHandler.class) + } - titusClient.resizeJob( - new ResizeJobRequest() - .withUser(description.user) - .withJobId(job.id) - .withInstancesDesired(description.capacity.desired) - .withInstancesMin(description.capacity.min) - .withInstancesMax(description.capacity.max) + @Override + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + ResizeTitusJob.ResizeTitusJobCommand.builder().description(description).build() ) - - if (shouldToggleScalingFlags) { - titusClient.setAutoscaleEnabled(job.id, false) - } - - task.updateStatus PHASE, "Completed resize server group operation for ${description.serverGroupName}" - null } - private static Task getTask() { - TaskRepository.threadLocalTask.get() + @Override + protected Void parseSagaResult(@NotNull @Nonnull Void result) { + return null } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/RunTitusJobAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/RunTitusJobAtomicOperation.groovy index a7e95ba9476..9b3818e28ae 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/RunTitusJobAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/RunTitusJobAtomicOperation.groovy @@ -20,11 +20,14 @@ import com.netflix.spinnaker.clouddriver.data.task.Task import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.SagaContextAware import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusDeployHandler -class RunTitusJobAtomicOperation implements AtomicOperation { +import javax.annotation.Nonnull + +class RunTitusJobAtomicOperation implements AtomicOperation, SagaContextAware { private static final String PHASE = "RUN_TITUS_JOB" @@ -51,4 +54,14 @@ class RunTitusJobAtomicOperation implements AtomicOperation { private static Task getTask() { TaskRepository.threadLocalTask.get() } + + @Override + void setSagaContext(@Nonnull SagaContext sagaContext) { + description.sagaContext = sagaContext + } + + @Override + SagaContext getSagaContext() { + return description.sagaContext + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/TerminateTitusInstancesAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/TerminateTitusInstancesAtomicOperation.groovy index a5803f4bb5a..aff12381e44 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/TerminateTitusInstancesAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/TerminateTitusInstancesAtomicOperation.groovy @@ -16,39 +16,37 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.ops -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient -import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateTasksAndShrinkJobRequest +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.TerminateTitusTasks import com.netflix.spinnaker.clouddriver.titus.deploy.description.TerminateTitusInstancesDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import org.jetbrains.annotations.NotNull -class TerminateTitusInstancesAtomicOperation implements AtomicOperation { +import javax.annotation.Nonnull - private static final String PHASE = "TERMINATE_TITUS_INSTANCES" - - private final TitusClientProvider titusClientProvider - private final TerminateTitusInstancesDescription description - - TerminateTitusInstancesAtomicOperation(TitusClientProvider titusClientProvider, TerminateTitusInstancesDescription description) { - this.titusClientProvider = titusClientProvider - this.description = description +class TerminateTitusInstancesAtomicOperation extends AbstractSagaAtomicOperation { + TerminateTitusInstancesAtomicOperation(TerminateTitusInstancesDescription description) { + super(description) } @Override - Void operate(List priorOutputs) { - TitusClient titusClient = titusClientProvider.getTitusClient(description.credentials, description.region) - task.updateStatus PHASE, "Terminating titus tasks: ${description.instanceIds}..." - - titusClient.terminateTasksAndShrink(new TerminateTasksAndShrinkJobRequest().withTaskIds(description.instanceIds).withShrink(false).withUser(description.user)) - task.updateStatus PHASE, "Successfully issued terminate task request to titus for task: ${description.instanceIds.toString()}" + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(TerminateTitusTasks.class) + .exceptionHandler(TitusExceptionHandler.class) + } - task.updateStatus PHASE, "Completed terminate instances operation for ${description.instanceIds}" - null + @Override + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + TerminateTitusTasks.TerminateTitusTasksCommand.builder().description(description).build() + ) } - private static Task getTask() { - TaskRepository.threadLocalTask.get() + @Override + protected Void parseSagaResult(@NotNull @Nonnull Void result) { + return null } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpsertTitusScalingPolicyAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpsertTitusScalingPolicyAtomicOperation.groovy index 9bee6cdc981..bcb25d9ddc6 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpsertTitusScalingPolicyAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpsertTitusScalingPolicyAtomicOperation.groovy @@ -16,92 +16,43 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.ops -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.MonitorTitusScalingPolicy +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.UpsertTitusScalingPolicy import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertTitusScalingPolicyDescription -import com.netflix.spinnaker.kork.core.RetrySupport -import com.netflix.titus.grpc.protogen.* -import com.netflix.titus.grpc.protogen.PutPolicyRequest.Builder -import com.netflix.titus.grpc.protogen.ScalingPolicyStatus.ScalingPolicyState +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusScalingPolicyModified +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.UpsertTitusScalingPolicyCompletionHandler import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired +import org.jetbrains.annotations.NotNull -@Slf4j -class UpsertTitusScalingPolicyAtomicOperation implements AtomicOperation { - - UpsertTitusScalingPolicyDescription description +import javax.annotation.Nonnull +@Slf4j +class UpsertTitusScalingPolicyAtomicOperation extends AbstractSagaAtomicOperation> { UpsertTitusScalingPolicyAtomicOperation(UpsertTitusScalingPolicyDescription description) { - this.description = description + super(description) } - private static final String BASE_PHASE = "UPSERT_SCALING_POLICY" - - private static Task getTask() { - TaskRepository.threadLocalTask.get() + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(UpsertTitusScalingPolicy.class) + .then(MonitorTitusScalingPolicy.class) + .exceptionHandler(TitusExceptionHandler.class) + .completionHandler(UpsertTitusScalingPolicyCompletionHandler.class); } - @Autowired - TitusClientProvider titusClientProvider - - @Autowired - RetrySupport retrySupport - @Override - Map operate(List priorOutputs) { - task.updateStatus BASE_PHASE, "Initializing Upsert Scaling Policy..." - def client = titusClientProvider.getTitusAutoscalingClient(description.credentials, description.region) - - if (!client) { - throw new UnsupportedOperationException("Autoscaling is not supported for this account/region") - } - - if (description.scalingPolicyID) { - - retrySupport.retry({ -> - client.updateScalingPolicy( - UpdatePolicyRequest.newBuilder() - .setScalingPolicy(description.toScalingPolicyBuilder().build()) - .setPolicyId(ScalingPolicyID.newBuilder().setId(description.scalingPolicyID).build()) - .build() - ) - }, 10, 3000, false) - - task.updateStatus BASE_PHASE, "Scaling policy successfully updated" - - return [scalingPolicyID: description.scalingPolicyID] - } else { - ScalingPolicy.Builder builder = description.toScalingPolicyBuilder() - - Builder requestBuilder = PutPolicyRequest.newBuilder() - .setScalingPolicy(builder) - .setJobId(description.jobId) - - task.updateStatus BASE_PHASE, "Create Scaling Policy request constructed, sending..." - - ScalingPolicyID result = client.createScalingPolicy(requestBuilder.build()) - - task.updateStatus BASE_PHASE, "Create Scaling Policy succeeded; new policy ID: ${result.id}; monitoring creation..." - - // make sure the new policy was applied - verifyNewPolicyState(client, result) - - task.updateStatus BASE_PHASE, "Scaling policy successfully created" - - return [scalingPolicyID: result.id] - } - + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + def build = UpsertTitusScalingPolicy.UpsertTitusScalingPolicyCommand.builder().description(description).build() + builder.initialCommand(build) } - private void verifyNewPolicyState(client, result) { - retrySupport.retry({ -> - ScalingPolicyResult updatedPolicy = client.getScalingPolicy(result.id) - if (!updatedPolicy || (updatedPolicy.getPolicyState().state != ScalingPolicyState.Applied)) { - throw new IllegalStateException("New policy did not transition to applied state within 45 seconds") - } - }, 5000, 10, false) + @Override + protected Map parseSagaResult(@NotNull @Nonnull TitusScalingPolicyModified result) { + return [scalingPolicyID: result.getScalingPolicyId()] } - } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/AbstractEnableDisableTitusInstanceDiscoveryAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/AbstractEnableDisableTitusInstanceDiscoveryAtomicOperation.groovy deleted file mode 100644 index 4b1d4cc2c01..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/AbstractEnableDisableTitusInstanceDiscoveryAtomicOperation.groovy +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.client.model.TaskState -import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription -import com.netflix.spinnaker.clouddriver.titus.model.TitusServerGroup -import org.springframework.beans.factory.annotation.Autowired - -abstract class AbstractEnableDisableTitusInstanceDiscoveryAtomicOperation implements AtomicOperation { - abstract boolean isEnable() - - abstract String getPhaseName() - - private final TitusClientProvider titusClientProvider - - @Autowired - TitusEurekaSupport discoverySupport - - EnableDisableInstanceDiscoveryDescription description - - AbstractEnableDisableTitusInstanceDiscoveryAtomicOperation(TitusClientProvider titusClientProvider, EnableDisableInstanceDiscoveryDescription description) { - this.titusClientProvider = titusClientProvider - this.description = description - } - - @Override - Void operate(List priorOutputs) { - def performingAction = isEnable() ? 'Enabling' : 'Disabling' - def task = getTask() - - task.updateStatus phaseName, "Initializing ${performingAction} of Instances (${description.instanceIds.join(", ")}) in Discovery Operation..." - if (!description.credentials.discoveryEnabled) { - task.updateStatus phaseName, "Discovery is not enabled, unable to modify instance status" - task.fail() - return null - } - def titusClient = titusClientProvider.getTitusClient(description.credentials, description.region) - def job = titusClient.findJobByName(description.asgName, true) - if (!job) { - return - } - def asgInstanceIds = new TitusServerGroup(job, description.credentials.name, description.region).instances.findAll { - (it.state == TaskState.RUNNING || it.state == TaskState.STARTING) && description.instanceIds.contains(it.id) - } - if (!asgInstanceIds) { - return - } - def status = isEnable() ? AbstractEurekaSupport.DiscoveryStatus.Enable : AbstractEurekaSupport.DiscoveryStatus.Disable - discoverySupport.updateDiscoveryStatusForInstances( - description, task, phaseName, status, asgInstanceIds*.instanceId - ) - null - } - - Task getTask() { - TaskRepository.threadLocalTask.get() - } -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/DisableTitusInstancesInDiscoveryAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/DisableTitusInstancesInDiscoveryAtomicOperation.groovy index 98c523f8589..23065b9ba2d 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/DisableTitusInstancesInDiscoveryAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/DisableTitusInstancesInDiscoveryAtomicOperation.groovy @@ -16,21 +16,36 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.DisableTitusTasks import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import org.jetbrains.annotations.NotNull -class DisableTitusInstancesInDiscoveryAtomicOperation extends AbstractEnableDisableTitusInstanceDiscoveryAtomicOperation { - DisableTitusInstancesInDiscoveryAtomicOperation(TitusClientProvider titusClientProvider, EnableDisableInstanceDiscoveryDescription description) { - super(titusClientProvider, description) +import javax.annotation.Nonnull + +class DisableTitusInstancesInDiscoveryAtomicOperation extends AbstractSagaAtomicOperation { + DisableTitusInstancesInDiscoveryAtomicOperation(EnableDisableInstanceDiscoveryDescription description) { + super(description) + } + + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(DisableTitusTasks.class) + .exceptionHandler(TitusExceptionHandler.class); } @Override - boolean isEnable() { - return false + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + def build = DisableTitusTasks.DisableTitusTasksCommand.builder().description(description).build() + builder.initialCommand(build) } @Override - String getPhaseName() { - return "DISABLE_INSTANCES_IN_DISCOVERY" + protected Void parseSagaResult(@NotNull @Nonnull Void result) { + return null } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/EnableTitusInstancesInDiscoveryAtomicOperation.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/EnableTitusInstancesInDiscoveryAtomicOperation.groovy index 288fff97890..6884c81c286 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/EnableTitusInstancesInDiscoveryAtomicOperation.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/EnableTitusInstancesInDiscoveryAtomicOperation.groovy @@ -16,21 +16,36 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.EnableTitusTasks import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import org.jetbrains.annotations.NotNull -class EnableTitusInstancesInDiscoveryAtomicOperation extends AbstractEnableDisableTitusInstanceDiscoveryAtomicOperation { - EnableTitusInstancesInDiscoveryAtomicOperation(TitusClientProvider titusClientProvider, EnableDisableInstanceDiscoveryDescription description) { - super(titusClientProvider, description) +import javax.annotation.Nonnull + +class EnableTitusInstancesInDiscoveryAtomicOperation extends AbstractSagaAtomicOperation { + EnableTitusInstancesInDiscoveryAtomicOperation(EnableDisableInstanceDiscoveryDescription description) { + super(description) + } + + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(EnableTitusTasks.class) + .exceptionHandler(TitusExceptionHandler.class); } @Override - boolean isEnable() { - return true + protected void configureSagaBridge(@NotNull @Nonnull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + def build = EnableTitusTasks.EnableTitusTasksCommand.builder().description(description).build() + builder.initialCommand(build) } @Override - String getPhaseName() { - return "ENABLE_INSTANCES_IN_DISCOVERY" + protected Void parseSagaResult(@NotNull @Nonnull Void result) { + return null } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/TitusEurekaSupport.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/TitusEurekaSupport.groovy index 445b4afe623..0bf36921480 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/TitusEurekaSupport.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/discovery/TitusEurekaSupport.groovy @@ -42,8 +42,6 @@ class TitusEurekaSupport extends AbstractEurekaSupport { EurekaUtil.getWritableEureka(credentials.discovery, region) } - @VisibleForTesting - @PackageScope boolean verifyInstanceAndAsgExist(def credentials, String region, String instanceId, diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/AbstractTitusDescriptionValidatorSupport.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/AbstractTitusDescriptionValidatorSupport.groovy index 2d6bd2ff491..ed9670c8d47 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/AbstractTitusDescriptionValidatorSupport.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/AbstractTitusDescriptionValidatorSupport.groovy @@ -17,44 +17,36 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.validators import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.security.AccountCredentials import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials import com.netflix.spinnaker.clouddriver.titus.deploy.description.AbstractTitusCredentialsDescription -import org.springframework.validation.Errors abstract class AbstractTitusDescriptionValidatorSupport extends DescriptionValidator { - private final AccountCredentialsProvider accountCredentialsProvider private final String descriptionName - AbstractTitusDescriptionValidatorSupport(AccountCredentialsProvider accountCredentialsProvider, String descriptionName) { - this.accountCredentialsProvider = accountCredentialsProvider + AbstractTitusDescriptionValidatorSupport(String descriptionName) { this.descriptionName = descriptionName } @Override - void validate(List priorDescriptions, T description, Errors errors) { + void validate(List priorDescriptions, T description, ValidationErrors errors) { if (!description.credentials) { errors.rejectValue "credentials", "${descriptionName}.credentials.empty" } else { - def credentials = getAccountCredentials(description?.credentials?.name) - if (!(credentials instanceof NetflixTitusCredentials)) { + if (!(description?.credentials instanceof NetflixTitusCredentials)) { errors.rejectValue("credentials", "${descriptionName}.credentials.invalid") } } } - AccountCredentials getAccountCredentials(String accountName) { - accountCredentialsProvider.getCredentials(accountName) - } - - - static void validateRegion(T description, String regionName, String errorKey, Errors errors) { + static void validateRegion(T description, String regionName, String errorKey, ValidationErrors errors) { validateRegions(description, regionName ? [regionName] : [], errorKey, errors, "region") } - static void validateRegions(T description, Collection regionNames, String errorKey, Errors errors, String attributeName = "regions") { + static void validateRegions(T description, Collection regionNames, String errorKey, ValidationErrors errors, String attributeName = "regions") { if (!regionNames) { errors.rejectValue(attributeName, "${errorKey}.${attributeName}.empty") } else { @@ -65,14 +57,14 @@ abstract class AbstractTitusDescriptionValidatorSupport void validateAsgName(T description, ValidationErrors errors) { def key = description.getClass().simpleName if (!description.asgName) { errors.rejectValue("asgName", "${key}.asgName.empty") } } - static void validateAsgNameAndRegionAndInstanceIds(T description, Errors errors) { + static void validateAsgNameAndRegionAndInstanceIds(T description, ValidationErrors errors) { def key = description.class.simpleName if (description.asgName) { validateAsgName(description, errors) diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DestroyTitusJobDescriptionValidator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DestroyTitusJobDescriptionValidator.groovy new file mode 100644 index 00000000000..0efc0d33513 --- /dev/null +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DestroyTitusJobDescriptionValidator.groovy @@ -0,0 +1,51 @@ +/* + * + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.validators + +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.titus.TitusOperation +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusJobDescription +import org.springframework.stereotype.Component + +@Component +@TitusOperation(AtomicOperations.DESTROY_JOB) +class DestroyTitusJobDescriptionValidator extends AbstractTitusDescriptionValidatorSupport { + + DestroyTitusJobDescriptionValidator() { + super("destroyTitusJobDescription") + } + + @Override + void validate(List priorDescriptions, DestroyTitusJobDescription description, ValidationErrors errors) { + super.validate(priorDescriptions, description, errors) + + if (!description.region) { + errors.rejectValue "region", "destroyTitusJobDescription.region.empty" + } + if (description?.credentials && !((NetflixTitusCredentials) description?.credentials).regions.name.contains(description.region)) { + errors.rejectValue "region", "destroyTitusJobDescription.region.not.configured", description.region, "Region not configured" + } + + if (!description.jobId) { + errors.rejectValue "jobId", "destroyTitusJobDescription.jobId.empty" + } + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DestroyTitusServerGroupDescriptionValidator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DestroyTitusServerGroupDescriptionValidator.groovy index 785e40d1425..2e93b9760a8 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DestroyTitusServerGroupDescriptionValidator.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DestroyTitusServerGroupDescriptionValidator.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation @@ -23,19 +24,18 @@ import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentia import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusServerGroupDescription import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component @TitusOperation(AtomicOperations.DESTROY_SERVER_GROUP) class DestroyTitusServerGroupDescriptionValidator extends AbstractTitusDescriptionValidatorSupport { @Autowired - DestroyTitusServerGroupDescriptionValidator(AccountCredentialsProvider accountCredentialsProvider) { - super(accountCredentialsProvider, "destroyTitusServerGroupDescription") + DestroyTitusServerGroupDescriptionValidator() { + super("destroyTitusServerGroupDescription") } @Override - void validate(List priorDescriptions, DestroyTitusServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, DestroyTitusServerGroupDescription description, ValidationErrors errors) { super.validate(priorDescriptions, description, errors) @@ -43,8 +43,7 @@ class DestroyTitusServerGroupDescriptionValidator extends AbstractTitusDescripti errors.rejectValue "region", "destroyTitusServerGroupDescription.region.empty" } - def credentials = getAccountCredentials(description?.credentials?.name) - if (credentials && !((NetflixTitusCredentials) credentials).regions.name.contains(description.region)) { + if (description?.credentials && !((NetflixTitusCredentials) description?.credentials).regions.name.contains(description.region)) { errors.rejectValue "region", "destroyTitusServerGroupDescription.region.not.configured", description.region, "Region not configured" } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DisableTitusInstancesInDiscoveryDescriptionValidator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DisableTitusInstancesInDiscoveryDescriptionValidator.groovy index 746fc05d5a9..e04d7015ba0 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DisableTitusInstancesInDiscoveryDescriptionValidator.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DisableTitusInstancesInDiscoveryDescriptionValidator.groovy @@ -16,13 +16,13 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component @TitusOperation(AtomicOperations.DISABLE_INSTANCES_IN_DISCOVERY) @@ -30,12 +30,12 @@ class DisableTitusInstancesInDiscoveryDescriptionValidator extends AbstractTitusDescriptionValidatorSupport { @Autowired - DisableTitusInstancesInDiscoveryDescriptionValidator(AccountCredentialsProvider accountCredentialsProvider) { - super(accountCredentialsProvider, "disableInstacesInDiscoveryDescription") + DisableTitusInstancesInDiscoveryDescriptionValidator() { + super("disableInstacesInDiscoveryDescription") } @Override - void validate(List priorDescriptions, EnableDisableInstanceDiscoveryDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableInstanceDiscoveryDescription description, ValidationErrors errors) { def key = description.class.simpleName validateAsgNameAndRegionAndInstanceIds(description, errors) diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DisableTitusServerGroupDescriptionValidator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DisableTitusServerGroupDescriptionValidator.groovy index 5f3c7501aa3..87937eaf6e1 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DisableTitusServerGroupDescriptionValidator.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/DisableTitusServerGroupDescriptionValidator.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation @@ -23,19 +24,18 @@ import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentia import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableServerGroupDescription import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component @TitusOperation(AtomicOperations.DISABLE_SERVER_GROUP) class DisableTitusServerGroupDescriptionValidator extends AbstractTitusDescriptionValidatorSupport { @Autowired - DisableTitusServerGroupDescriptionValidator(AccountCredentialsProvider accountCredentialsProvider) { - super(accountCredentialsProvider, "disableTitusServerGroupDescription") + DisableTitusServerGroupDescriptionValidator() { + super("disableTitusServerGroupDescription") } @Override - void validate(List priorDescriptions, EnableDisableServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableServerGroupDescription description, ValidationErrors errors) { super.validate(priorDescriptions, description, errors) @@ -43,8 +43,7 @@ class DisableTitusServerGroupDescriptionValidator extends AbstractTitusDescripti errors.rejectValue "region", "disableTitusServerGroupDescription.region.empty" } - def credentials = getAccountCredentials(description?.credentials?.name) - if (credentials && !((NetflixTitusCredentials) credentials).regions.name.contains(description.region)) { + if (description?.credentials && !((NetflixTitusCredentials) description?.credentials).regions.name.contains(description.region)) { errors.rejectValue "region", "disableTitusServerGroupDescription.region.not.configured", description.region, "Region not configured" } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/EnableTitusInstancesInDiscoveryDescriptionValidator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/EnableTitusInstancesInDiscoveryDescriptionValidator.groovy index e13011b4882..49f66e561d9 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/EnableTitusInstancesInDiscoveryDescriptionValidator.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/EnableTitusInstancesInDiscoveryDescriptionValidator.groovy @@ -16,24 +16,24 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component @TitusOperation(AtomicOperations.ENABLE_INSTANCES_IN_DISCOVERY) class EnableTitusInstancesInDiscoveryDescriptionValidator extends AbstractTitusDescriptionValidatorSupport { @Autowired - EnableTitusInstancesInDiscoveryDescriptionValidator(AccountCredentialsProvider accountCredentialsProvider) { - super(accountCredentialsProvider, "enableInstacesInDiscoveryDescription") + EnableTitusInstancesInDiscoveryDescriptionValidator() { + super("enableInstacesInDiscoveryDescription") } - void validate(List priorDescriptions, EnableDisableInstanceDiscoveryDescription description, Errors errors) { + void validate(List priorDescriptions, EnableDisableInstanceDiscoveryDescription description, ValidationErrors errors) { def key = description.class.simpleName validateAsgNameAndRegionAndInstanceIds(description, errors) diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/ResizeTitusServerGroupDescriptionValidator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/ResizeTitusServerGroupDescriptionValidator.groovy index 1cd8cca0aad..5ef66f18cc0 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/ResizeTitusServerGroupDescriptionValidator.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/ResizeTitusServerGroupDescriptionValidator.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation @@ -23,19 +24,18 @@ import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentia import com.netflix.spinnaker.clouddriver.titus.deploy.description.ResizeTitusServerGroupDescription import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component @TitusOperation(AtomicOperations.RESIZE_SERVER_GROUP) class ResizeTitusServerGroupDescriptionValidator extends AbstractTitusDescriptionValidatorSupport { @Autowired - ResizeTitusServerGroupDescriptionValidator(AccountCredentialsProvider accountCredentialsProvider) { - super(accountCredentialsProvider, "resizeTitusServerGroupDescription") + ResizeTitusServerGroupDescriptionValidator() { + super("resizeTitusServerGroupDescription") } @Override - void validate(List priorDescriptions, ResizeTitusServerGroupDescription description, Errors errors) { + void validate(List priorDescriptions, ResizeTitusServerGroupDescription description, ValidationErrors errors) { super.validate(priorDescriptions, description, errors) @@ -43,8 +43,7 @@ class ResizeTitusServerGroupDescriptionValidator extends AbstractTitusDescriptio errors.rejectValue "region", "resizeTitusServerGroupDescription.region.empty" } - def credentials = getAccountCredentials(description?.credentials?.name) - if (credentials && !((NetflixTitusCredentials) credentials).regions.name.contains(description.region)) { + if (description?.credentials && !((NetflixTitusCredentials) description?.credentials).regions.name.contains(description.region)) { errors.rejectValue "region", "resizeTitusServerGroupDescription.region.not.configured", description.region, "Region not configured" } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/TerminateTitusInstancesDescriptionValidator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/TerminateTitusInstancesDescriptionValidator.groovy index 03f888ba288..f14294f605a 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/TerminateTitusInstancesDescriptionValidator.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/TerminateTitusInstancesDescriptionValidator.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation @@ -23,19 +24,18 @@ import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentia import com.netflix.spinnaker.clouddriver.titus.deploy.description.TerminateTitusInstancesDescription import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component @TitusOperation(AtomicOperations.TERMINATE_INSTANCES) class TerminateTitusInstancesDescriptionValidator extends AbstractTitusDescriptionValidatorSupport { @Autowired - TerminateTitusInstancesDescriptionValidator(AccountCredentialsProvider accountCredentialsProvider) { - super(accountCredentialsProvider, "terminateTitusInstancesDescription") + TerminateTitusInstancesDescriptionValidator() { + super("terminateTitusInstancesDescription") } @Override - void validate(List priorDescriptions, TerminateTitusInstancesDescription description, Errors errors) { + void validate(List priorDescriptions, TerminateTitusInstancesDescription description, ValidationErrors errors) { super.validate(priorDescriptions, description, errors) @@ -43,8 +43,7 @@ class TerminateTitusInstancesDescriptionValidator extends AbstractTitusDescripti errors.rejectValue "region", "terminateTitusInstancesDescription.region.empty" } - def credentials = getAccountCredentials(description?.credentials?.name) - if (credentials && !((NetflixTitusCredentials) credentials).regions.name.contains(description.region)) { + if (description?.credentials && !((NetflixTitusCredentials) description?.credentials).regions.name.contains(description.region)) { errors.rejectValue "region", "terminateTitusInstancesDescription.region.not.configured", description.region, "Region not configured" } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/TitusDeployDescriptionValidator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/TitusDeployDescriptionValidator.groovy index c21bb3f9a9b..a6edbacbea7 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/TitusDeployDescriptionValidator.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/validators/TitusDeployDescriptionValidator.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.clouddriver.titus.deploy.validators +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.clouddriver.titus.TitusOperation @@ -23,19 +24,18 @@ import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentia import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import org.springframework.validation.Errors @Component @TitusOperation(AtomicOperations.CREATE_SERVER_GROUP) class TitusDeployDescriptionValidator extends AbstractTitusDescriptionValidatorSupport { @Autowired - TitusDeployDescriptionValidator(AccountCredentialsProvider accountCredentialsProvider) { - super(accountCredentialsProvider, "titusDeployDescription") + TitusDeployDescriptionValidator() { + super("titusDeployDescription") } @Override - void validate(List priorDescriptions, TitusDeployDescription description, Errors errors) { + void validate(List priorDescriptions, TitusDeployDescription description, ValidationErrors errors) { super.validate(priorDescriptions, description, errors) @@ -43,9 +43,8 @@ class TitusDeployDescriptionValidator extends AbstractTitusDescriptionValidatorS errors.rejectValue "region", "titusDeployDescription.region.empty" } - def credentials = getAccountCredentials(description?.credentials?.name) - if (credentials && !((NetflixTitusCredentials) credentials).regions.name.contains(description.region)) { - errors.rejectValue "region", "titusDeployDescription.region.not.configured", description.region, "Region not configured" + if (description?.credentials && !((NetflixTitusCredentials) description?.credentials).regions.name.contains(description.region)) { + errors.rejectValue "region", "titusDeployDescription.region.not.configured", "Region '${description.region}' not configured" } if (!description.application) { diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/health/TitusHealthIndicator.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/health/TitusHealthIndicator.groovy deleted file mode 100644 index 26f1d1c1e98..00000000000 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/health/TitusHealthIndicator.groovy +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.health - -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion -import com.netflix.spinnaker.clouddriver.titus.client.model.HealthStatus -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials -import groovy.util.logging.Slf4j -import org.springframework.boot.actuate.health.Health -import org.springframework.boot.actuate.health.HealthIndicator -import org.springframework.boot.actuate.health.Status -import org.springframework.scheduling.annotation.Scheduled - -import java.util.concurrent.atomic.AtomicReference - -@Slf4j -class TitusHealthIndicator implements HealthIndicator { - - private final AccountCredentialsProvider accountCredentialsProvider - private final TitusClientProvider titusClientProvider - private AtomicReference health = new AtomicReference<>(new Health.Builder().up().build()) - - TitusHealthIndicator(AccountCredentialsProvider accountCredentialsProvider, - TitusClientProvider titusClientProvider) { - this.accountCredentialsProvider = accountCredentialsProvider - this.titusClientProvider = titusClientProvider - } - - @Override - Health health() { - health.get() - } - - @Scheduled(fixedDelay = 300000L) - void checkHealth() { - Status status = Status.UP - Map details = [:] - for (NetflixTitusCredentials account : accountCredentialsProvider.all.findAll { - it instanceof NetflixTitusCredentials - }) { - for (TitusRegion region in account.regions) { - Status regionStatus - Map regionDetails = [:] - try { - HealthStatus health = titusClientProvider.getTitusClient(account, region.name).getHealth().healthStatus - regionStatus = health == HealthStatus.UNHEALTHY ? Status.OUT_OF_SERVICE : Status.UP - } catch (e) { - log.error( - "Failed to verify Titus health (account: {}, region: {})", - account.name, - region.name, - e - ) - regionStatus = Status.OUT_OF_SERVICE - regionDetails << [reason: e.toString()] - } - regionDetails << [status: regionStatus] - if (regionStatus == Status.OUT_OF_SERVICE) { - status = Status.OUT_OF_SERVICE - } - details << [("${account.name}:${region.name}".toString()): regionDetails] - } - } - health.set(new Health.Builder(status, details).build()) - } - -} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusCluster.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusCluster.groovy index eb8eb706b85..952ca11eae5 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusCluster.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusCluster.groovy @@ -16,6 +16,9 @@ package com.netflix.spinnaker.clouddriver.titus.model +import com.fasterxml.jackson.annotation.JsonAnyGetter +import com.fasterxml.jackson.annotation.JsonAnySetter +import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.spinnaker.clouddriver.model.Cluster import com.netflix.spinnaker.clouddriver.model.LoadBalancer import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider @@ -26,4 +29,27 @@ class TitusCluster implements Cluster, Serializable { String accountName Set serverGroups = Collections.synchronizedSet(new HashSet()) Set loadBalancers = Collections.emptySet() + + @JsonIgnore + private Map extraAttributes = new LinkedHashMap() + + @JsonAnyGetter + @Override + Map getExtraAttributes() { + return extraAttributes + } + + /** + * Setter for non explicitly defined values. + * + * Used for both Jackson mapping {@code @JsonAnySetter} as well + * as Groovy's implicit Map constructor (this is the reason the + * method is named {@code set(String name, Object value)} + * @param name The property name + * @param value The property value + */ + @JsonAnySetter + void set(String name, Object value) { + extraAttributes.put(name, value) + } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusInstance.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusInstance.groovy index 2b773babdfe..5c3f3e385b5 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusInstance.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusInstance.groovy @@ -47,6 +47,10 @@ class TitusInstance implements Instance { Set securityGroups final String providerType = TitusCloudProvider.ID final String cloudProvider = TitusCloudProvider.ID + String privateIpAddress + String agentId + String ipv4Address + String ipv6Address TitusInstance() {} @@ -80,6 +84,14 @@ class TitusInstance implements Instance { finishedAt = task.finishedAt ? task.finishedAt.time : null stdoutLive = task.stdoutLive logs = task.logs + + // expose containerIp as privateIpAddress to remain consistent with aws + privateIpAddress = task.containerIp ?: task.data?.ipAddresses?.nfvpc + + agentId = task.agentId + + ipv4Address = task.ipv4Address + ipv6Address = task.ipv6Address } @Override @@ -91,6 +103,22 @@ class TitusInstance implements Instance { placement.getContainerIp() } + String getAgentId() { + agentId + } + + String getAvailabilityZone() { + return placement.getZone() + } + + String getIpv4Address() { + return ipv4Address + } + + String getIpv6Address() { + return ipv6Address + } + String getHostIp() { placement.getHost() } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusServerGroup.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusServerGroup.groovy index d4c98ead76c..eb3cb684461 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusServerGroup.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusServerGroup.groovy @@ -16,14 +16,20 @@ package com.netflix.spinnaker.clouddriver.titus.model +import com.fasterxml.jackson.annotation.JsonAnyGetter +import com.fasterxml.jackson.annotation.JsonAnySetter +import com.fasterxml.jackson.annotation.JsonIgnore import com.netflix.frigga.Names import com.netflix.spinnaker.clouddriver.model.HealthState import com.netflix.spinnaker.clouddriver.model.Instance import com.netflix.spinnaker.clouddriver.model.ServerGroup import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider +import com.netflix.spinnaker.clouddriver.titus.client.model.DisruptionBudget import com.netflix.spinnaker.clouddriver.titus.client.model.Efs import com.netflix.spinnaker.clouddriver.titus.client.model.Job import com.netflix.spinnaker.clouddriver.titus.client.model.MigrationPolicy +import com.netflix.spinnaker.clouddriver.titus.client.model.ServiceJobProcesses +import com.netflix.spinnaker.clouddriver.titus.client.model.SubmitJobRequest /** * Equivalent of a Titus {@link com.netflix.spinnaker.clouddriver.titus.client.model.Job} @@ -36,6 +42,7 @@ class TitusServerGroup implements ServerGroup, Serializable { final String type = TitusCloudProvider.ID final String cloudProvider = TitusCloudProvider.ID String entryPoint + String cmd String awsAccount String accountId String iamProfile @@ -52,25 +59,33 @@ class TitusServerGroup implements ServerGroup, Serializable { Map containerAttributes Set instances = [] as Set ServerGroup.Capacity capacity + DisruptionBudget disruptionBudget TitusServerGroupResources resources = new TitusServerGroupResources() TitusServerGroupPlacement placement = new TitusServerGroupPlacement() - boolean disabled + Boolean disabled Efs efs String capacityGroup int retries int runtimeLimitSecs Map buildInfo MigrationPolicy migrationPolicy + ServiceJobProcesses serviceJobProcesses + SubmitJobRequest.Constraints constraints + + @JsonIgnore + private Map extraAttributes = new LinkedHashMap() TitusServerGroup() {} TitusServerGroup(Job job, String account, String region) { id = job.id name = job.name + disruptionBudget = job.disruptionBudget image << [dockerImageName: job.applicationName] image << [dockerImageVersion: job.version] image << [dockerImageDigest: job.digest] entryPoint = job.entryPoint + cmd = job.cmd iamProfile = job.iamProfile resources.cpu = job.cpu resources.memory = job.memory @@ -105,6 +120,28 @@ class TitusServerGroup implements ServerGroup, Serializable { "digest": "${image.dockerImageDigest}".toString() ] ] + serviceJobProcesses = job.serviceJobProcesses + constraints = job.constraints + } + + @JsonAnyGetter + @Override + Map getExtraAttributes() { + return extraAttributes + } + + /** + * Setter for non explicitly defined values. + * + * Used for both Jackson mapping {@code @JsonAnySetter} as well + * as Groovy's implicit Map constructor (this is the reason the + * method is named {@code set(String name, Object value)} + * @param name The property name + * @param value The property value + */ + @JsonAnySetter + void set(String name, Object value) { + extraAttributes.put(name, value) } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/config/TitusConfiguration.groovy b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/config/TitusConfiguration.groovy index 466f1d2cc65..7c316e3953c 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/config/TitusConfiguration.groovy +++ b/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/config/TitusConfiguration.groovy @@ -17,17 +17,20 @@ package com.netflix.spinnaker.config import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import com.netflix.spinnaker.clouddriver.saga.config.SagaAutoConfiguration import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.titus.client.SimpleGrpcChannelFactory import com.netflix.spinnaker.clouddriver.titus.client.TitusJobCustomizer import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion import com.netflix.spinnaker.clouddriver.titus.client.model.GrpcChannelFactory import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials -import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusDeployHandler -import com.netflix.spinnaker.clouddriver.titus.health.TitusHealthIndicator -import com.netflix.spinnaker.clouddriver.titus.client.SimpleGrpcChannelFactory +import com.netflix.spinnaker.fiat.model.Authorization +import com.netflix.spinnaker.fiat.model.resources.Permissions import com.netflix.spinnaker.kork.core.RetrySupport +import com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer +import com.netflix.spinnaker.kork.jackson.ObjectMapperSubtypeConfigurer.SubtypeLocator import groovy.util.logging.Slf4j import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty @@ -36,6 +39,7 @@ import org.springframework.boot.context.properties.EnableConfigurationProperties import org.springframework.context.annotation.Bean import org.springframework.context.annotation.ComponentScan import org.springframework.context.annotation.Configuration +import org.springframework.context.annotation.Import import java.util.regex.Pattern @@ -43,6 +47,7 @@ import java.util.regex.Pattern @ConditionalOnProperty('titus.enabled') @EnableConfigurationProperties @ComponentScan('com.netflix.spinnaker.clouddriver.titus') +@Import(SagaAutoConfiguration) @Slf4j class TitusConfiguration { @@ -58,12 +63,27 @@ class TitusConfiguration { List accounts = new ArrayList<>() for (TitusCredentialsConfig.Account account in titusCredentialsConfig.accounts) { List regions = account.regions.collect { - new TitusRegion(it.name, account.name, it.endpoint, it.autoscalingEnabled, it.loadBalancingEnabled, it.applicationName, it.url, it.port, it.featureFlags) + new TitusRegion(it.name, account.name, it.endpoint, it.applicationName, it.url, it.port, it.featureFlags, it.eurekaName , it.eurekaRegion) } if (!account.bastionHost && titusCredentialsConfig.defaultBastionHostTemplate) { account.bastionHost = titusCredentialsConfig.defaultBastionHostTemplate.replaceAll(Pattern.quote('{{environment}}'), account.environment) } - NetflixTitusCredentials credentials = new NetflixTitusCredentials(account.name, account.environment, account.accountType, regions, account.bastionHost, account.registry, account.awsAccount, account.awsVpc ?: titusCredentialsConfig.awsVpc, account.discoveryEnabled, account.discovery, account.stack ?: 'mainvpc', account.requiredGroupMembership, account.eurekaName, account.autoscalingEnabled ?: false, account.loadBalancingEnabled ?: false, account.splitCachingEnabled ?: false) + NetflixTitusCredentials credentials = new NetflixTitusCredentials( + account.name, + account.environment, + account.accountType, + regions, + account.bastionHost, + account.registry, + account.awsAccount, + account.awsVpc ?: titusCredentialsConfig.awsVpc, + account.discoveryEnabled, + account.discovery, + account.stack ?: 'mainvpc', + account.requiredGroupMembership, + account.getPermissions(), + account.eurekaName + ) accounts.add(credentials) repository.save(account.name, credentials) } @@ -75,16 +95,6 @@ class TitusConfiguration { return new TitusClientProvider(registry, titusJobCustomizers.orElse(Collections.emptyList()), grpcChannelFactory, retrySupport) } - @Bean - TitusDeployHandler titusDeployHandler(TitusClientProvider titusClientProvider, AccountCredentialsRepository accountCredentialsRepository) { - new TitusDeployHandler(titusClientProvider, accountCredentialsRepository) - } - - @Bean - TitusHealthIndicator titusHealthIndicator(AccountCredentialsProvider accountCredentialsProvider, TitusClientProvider titusClientProvider) { - new TitusHealthIndicator(accountCredentialsProvider, titusClientProvider) - } - @Bean @ConditionalOnMissingBean(GrpcChannelFactory) GrpcChannelFactory simpleGrpcChannelFactory() { @@ -108,21 +118,56 @@ class TitusConfiguration { String awsVpc String stack List requiredGroupMembership + //see getPermissions for the reasoning behind + //the generic types on here.. + Map> permissions String eurekaName - Boolean autoscalingEnabled - Boolean loadBalancingEnabled - Boolean splitCachingEnabled + + Permissions getPermissions() { + //boot yaml mapping is weird.. + //READ: + // - teamdl@company.org + //WRITE: + // - teamdl@company.org + // + //ends up as: [ + // READ: [0: teamdl@company.org], + // WRITE: [0: teamdl@company.org] + //] + + if (!permissions) { + return Permissions.EMPTY + } + + def builder = new Permissions.Builder() + permissions.each { String authType, Map roles -> + //make sure we don't blow up on unknown enum values: + def auth = Authorization.ALL.find { it.toString() == authType.toString() } + if (auth) { + builder.add(auth, roles.values() as List) + } + } + return builder.build() + } } static class Region { String name String endpoint - Boolean autoscalingEnabled - Boolean loadBalancingEnabled String applicationName String url Integer port List featureFlags + String eurekaName + String eurekaRegion } } + + @Bean + SubtypeLocator titusEventSubtypeLocator() { + return new ObjectMapperSubtypeConfigurer.ClassSubtypeLocator( + SpinnakerEvent.class, + Collections.singletonList("com.netflix.spinnaker.clouddriver.titus") + ); + } } diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/JobType.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/JobType.java new file mode 100644 index 00000000000..72e2c621319 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/JobType.java @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** The supported Job types within Titus. */ +public enum JobType { + BATCH("batch"), + SERVICE("service"); + + private final String value; + + JobType(String value) { + this.value = value; + } + + public static JobType from(@Nullable String value) { + if (value == null) { + return SERVICE; + } + return JobType.valueOf(value.toUpperCase()); + } + + /** Use {@code isEqual(String)} instead. */ + @Deprecated + public static boolean isEqual(@Nullable String value, @Nonnull JobType expectedType) { + return from(value).equals(expectedType); + } + + public boolean isEqual(@Nullable String value) { + return from(value).equals(this); + } + + @Nonnull + public String value() { + return value; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/TitusException.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/TitusException.java new file mode 100644 index 00000000000..fc7d6ded13d --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/TitusException.java @@ -0,0 +1,36 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus; + +import com.netflix.spinnaker.kork.exceptions.IntegrationException; + +public class TitusException extends IntegrationException { + public TitusException(String message) { + super(message); + setRetryable(false); + } + + public TitusException(String message, String userMessage) { + super(message, userMessage); + setRetryable(false); + } + + public TitusException(Throwable cause, boolean retryable) { + super(cause); + setRetryable(retryable); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/TitusUtils.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/TitusUtils.java new file mode 100644 index 00000000000..c0eed0e1a1e --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/TitusUtils.java @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus; + +import static java.lang.String.format; + +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.exceptions.UnexpectedAccountCredentialsTypeException; +import javax.annotation.Nonnull; + +/** A collection of utility methods for Titus. */ +public class TitusUtils { + + /** Get the AWS Account ID for a particular Titus or AWS account. */ + @Nonnull + public static String getAccountId( + @Nonnull AccountCredentialsProvider accountCredentialsProvider, @Nonnull String credentials) { + AccountCredentials accountCredentials = accountCredentialsProvider.getCredentials(credentials); + if (accountCredentials instanceof NetflixTitusCredentials) { + return accountCredentialsProvider + .getCredentials(((NetflixTitusCredentials) accountCredentials).getAwsAccount()) + .getAccountId(); + } + return accountCredentials.getAccountId(); + } + + /** Assert that the provided AccountCredentials is a NetflixTitusCredentials type. */ + public static void assertTitusAccountCredentialsType(AccountCredentials accountCredentials) { + if (!(accountCredentials instanceof NetflixTitusCredentials)) { + throw new UnexpectedAccountCredentialsTypeException( + format( + "Account credentials for '%s' was expected to be NetflixTitusCredentials, but got '%s'", + accountCredentials.getName(), accountCredentials.getClass().getSimpleName()), + format( + "There may be a configuration error for Titus: '%s' account was requested, but it is not a Titus account", + accountCredentials.getName())); + } + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/agents/ClusterCleanupAgent.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/agents/ClusterCleanupAgent.java new file mode 100644 index 00000000000..20ec2b5dfcf --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/agents/ClusterCleanupAgent.java @@ -0,0 +1,32 @@ +package com.netflix.spinnaker.clouddriver.titus.caching.agents; + +import com.netflix.spinnaker.clouddriver.aws.provider.agent.AbstractClusterCleanupAgent; +import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider; +import com.netflix.spinnaker.clouddriver.titus.caching.Keys; +import com.netflix.spinnaker.clouddriver.titus.caching.TitusCachingProvider; +import java.util.*; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class ClusterCleanupAgent extends AbstractClusterCleanupAgent { + + @Override + public String getProviderName() { + return TitusCachingProvider.PROVIDER_NAME; + } + + @Override + protected String getCloudProviderId() { + return TitusCloudProvider.ID; + } + + @Override + protected Map parseServerGroupId(String serverGroupId) { + return Keys.parse(serverGroupId); + } + + @Override + protected String buildClusterId(String cluster, String application, String account) { + return Keys.getClusterV2Key(cluster, application, account); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusStreamingUpdateAgent.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusStreamingUpdateAgent.java new file mode 100644 index 00000000000..fb67ab4ca58 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/agents/TitusStreamingUpdateAgent.java @@ -0,0 +1,1100 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.caching.agents; + +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.AUTHORITATIVE; +import static com.netflix.spinnaker.cats.agent.AgentDataType.Authority.INFORMATIVE; +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.TARGET_GROUPS; +import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.HEALTH; +import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.IMAGES; +import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.INSTANCES; +import static com.netflix.spinnaker.clouddriver.titus.caching.Keys.Namespace.SERVER_GROUPS; +import static java.util.Collections.EMPTY_LIST; +import static java.util.Collections.EMPTY_SET; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableSet; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import com.amazonaws.services.elasticloadbalancingv2.model.TargetTypeEnum; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.protobuf.util.JsonFormat; +import com.netflix.frigga.Names; +import com.netflix.frigga.autoscaling.AutoScalingGroupNameBuilder; +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.histogram.PercentileTimer; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.AgentExecution; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.cats.provider.ProviderRegistry; +import com.netflix.spinnaker.clouddriver.aws.data.ArnUtils; +import com.netflix.spinnaker.clouddriver.cache.CustomScheduledAgent; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.caching.Keys; +import com.netflix.spinnaker.clouddriver.titus.caching.TitusCachingProvider; +import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil; +import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.TitusLoadBalancerClient; +import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion; +import com.netflix.spinnaker.clouddriver.titus.client.model.TaskState; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.titus.grpc.protogen.Job; +import com.netflix.titus.grpc.protogen.JobChangeNotification; +import com.netflix.titus.grpc.protogen.JobStatus; +import com.netflix.titus.grpc.protogen.ObserveJobsQuery; +import com.netflix.titus.grpc.protogen.ScalingPolicy; +import com.netflix.titus.grpc.protogen.ScalingPolicyResult; +import com.netflix.titus.grpc.protogen.ScalingPolicyStatus; +import com.netflix.titus.grpc.protogen.Task; +import com.netflix.titus.grpc.protogen.TaskStatus; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.inject.Provider; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TitusStreamingUpdateAgent implements CustomScheduledAgent, CachingAgent { + + private static final TypeReference> ANY_MAP = + new TypeReference>() {}; + + private final TitusClient titusClient; + private final TitusAutoscalingClient titusAutoscalingClient; + private final TitusLoadBalancerClient titusLoadBalancerClient; + private final NetflixTitusCredentials account; + private final TitusRegion region; + private final ObjectMapper objectMapper; + private final Registry registry; + private final Id metricId; + private final Provider awsLookupUtil; + private final DynamicConfigService dynamicConfigService; + + private final Logger log = LoggerFactory.getLogger(TitusStreamingUpdateAgent.class); + + private static final Set FINISHED_TASK_STATES = + unmodifiableSet( + Stream.of(TaskStatus.TaskState.Finished, TaskStatus.TaskState.KillInitiated) + .collect(Collectors.toSet())); + + private static final Set FINISHED_JOB_STATES = + unmodifiableSet( + Stream.of(JobStatus.JobState.Finished, JobStatus.JobState.KillInitiated) + .collect(Collectors.toSet())); + + private static final Set FILTERED_TASK_STATES = + unmodifiableSet( + Stream.of( + TaskStatus.TaskState.Launched, + TaskStatus.TaskState.Started, + TaskStatus.TaskState.StartInitiated) + .collect(Collectors.toSet())); + + private static final Set TYPES = + unmodifiableSet( + Stream.of( + AUTHORITATIVE.forType(SERVER_GROUPS.ns), + AUTHORITATIVE.forType(INSTANCES.ns), + // clusters exist globally and the streaming agent only + // caches regionally so we can't authoritatively evict + // clusters. There is a ClusterCleanupAgent that handles + // eviction of clusters that no longer contain + // server groups. + INFORMATIVE.forType(CLUSTERS.ns), + INFORMATIVE.forType(APPLICATIONS.ns), + INFORMATIVE.forType(IMAGES.ns), + INFORMATIVE.forType(TARGET_GROUPS.ns)) + .collect(Collectors.toSet())); + + private static final List DOWN_TASK_STATES = + Arrays.asList( + TaskState.STOPPED, + TaskState.FAILED, + TaskState.CRASHED, + TaskState.FINISHED, + TaskState.DEAD, + TaskState.TERMINATING); + + private static final List STARTING_TASK_STATES = + Arrays.asList(TaskState.STARTING, TaskState.DISPATCHED, TaskState.PENDING, TaskState.QUEUED); + + private static final List CACHEABLE_POLICY_STATES = + Arrays.asList( + ScalingPolicyStatus.ScalingPolicyState.Applied, + ScalingPolicyStatus.ScalingPolicyState.Deleting); + + public TitusStreamingUpdateAgent( + TitusClientProvider titusClientProvider, + NetflixTitusCredentials account, + TitusRegion region, + ObjectMapper objectMapper, + Registry registry, + Provider awsLookupUtil, + DynamicConfigService dynamicConfigService) { + this.account = account; + this.region = region; + this.objectMapper = objectMapper; + this.titusClient = titusClientProvider.getTitusClient(account, region.getName()); + this.titusAutoscalingClient = + titusClientProvider.getTitusAutoscalingClient(account, region.getName()); + this.titusLoadBalancerClient = + titusClientProvider.getTitusLoadBalancerClient(account, region.getName()); + this.registry = registry; + this.awsLookupUtil = awsLookupUtil; + this.dynamicConfigService = dynamicConfigService; + this.metricId = + registry + .createId("titus.cache.streaming") + .withTag("account", account.getName()) + .withTag("region", region.getName()); + } + + @Override + public String getProviderName() { + return TitusCachingProvider.PROVIDER_NAME; + } + + @Override + public Collection getProvidedDataTypes() { + return TYPES; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + + throw new RuntimeException("Not supported for " + this.getClass().getSimpleName()); + } + + @Override + public AgentExecution getAgentExecution(ProviderRegistry providerRegistry) { + return new StreamingCacheExecution(providerRegistry); + } + + class StreamingCacheExecution implements AgentExecution { + private final ProviderRegistry providerRegistry; + private final ProviderCache cache; + + StreamingCacheExecution(ProviderRegistry providerRegistry) { + this.providerRegistry = providerRegistry; + this.cache = providerRegistry.getProviderCache(getProviderName()); + } + + private String getAgentType() { + return account.getName() + + "/" + + region.getName() + + "/" + + TitusStreamingUpdateAgent.class.getSimpleName(); + } + + /** + * Subscribes to a Titus observeJobs event stream. At initial connect, Titus streams individual + * events for every job and task running on the stack, followed by a SNAPSHOTEND event. Once + * received, the agent builds cacheResults for the full snapshot, equivalent to the standard + * index-the-world caching agent. In the process, we cache mappings between jobIds to + * applications, clusters, and server groups within a StreamingCacheState object. + * + *

After the initial snapshot persist, the agent continues to consume observeJobs events, + * updating StreamingCacheState, including a list of jobIds we've received events for. Once + * either titus.streaming.changeThreshold events have been consumed, or + * titus.streaming.timeThresholdMs ms has passed, cacheResults are built for the full resource + * graph of applications that have had job/task updates. This is more work than only directly + * updating i.e. server groups based on job updates or instance based on task updates, but + * avoids pitfalls in properly maintaining relationships to or deleting higher level objects. if + * the last server group in a cluster is deleted, the cluster object must also be deleted, and + * the application object updated. The later cannot currently be done incrementally in an atomic + * operation; safely updating an application object requires rebuilding it with full context. + */ + @Override + public void executeAgent(Agent agent) { + Long startTime = System.currentTimeMillis(); + + StreamingCacheState state = new StreamingCacheState(); + + ScheduledExecutorService executor = + Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder() + .setNameFormat(TitusStreamingUpdateAgent.class.getSimpleName() + "-%d") + .build()); + final Future handler = + executor.submit( + () -> { + Iterator notificationIt = observeJobs(); + + while (continueStreaming(startTime)) { + try { + while (notificationIt.hasNext() && continueStreaming(startTime)) { + JobChangeNotification notification = notificationIt.next(); + switch (notification.getNotificationCase()) { + case JOBUPDATE: + updateJob(state, notification.getJobUpdate().getJob()); + break; + case TASKUPDATE: + if (notification.getTaskUpdate().getMovedFromAnotherJob()) { + Task task = notification.getTaskUpdate().getTask(); + String destinationJobId = task.getJobId(); + String sourceJobId = + task.getTaskContextOrDefault("task.movedFromJob", null); + log.info( + "{} task moved from job {} to {}", + task.getId(), + sourceJobId, + destinationJobId); + updateMovedTask(state, task, sourceJobId); + } + updateTask(state, notification.getTaskUpdate().getTask()); + break; + case SNAPSHOTEND: + state.lastUpdate.set(0); + log.info( + "{} snapshot finished in {}ms", + getAgentType(), + System.currentTimeMillis() - startTime); + state.tasks.keySet().retainAll(state.jobs.keySet()); + if (state.snapshotComplete) { + log.error( + "{} received >1 SNAPSHOTEND events, this is unexpected and may be handled incorrectly", + getAgentType()); + } + state.snapshotComplete = true; + break; + } + + if (state.snapshotComplete) { + writeToCache(state); + if (!state.savedSnapshot) { + state.savedSnapshot = true; + } + } + } + } catch (io.grpc.StatusRuntimeException e) { + Integer backoff = + dynamicConfigService.getConfig( + Integer.class, "titus.streaming.retry-backoff-ms", 2000); + log.warn( + "gRPC exception while streaming {} updates, attempting to reconnect in {}ms", + getAgentType(), + backoff, + e); + + try { + Thread.sleep(backoff); + } catch (InterruptedException ex) { + log.warn( + "Interrupted while attempting to reconnect to observeJobs, bailing on this invocation", + ex); + break; + } + + notificationIt = observeJobs(); + state.snapshotComplete = false; + state.savedSnapshot = false; + } catch (Exception e) { + log.error("Exception while streaming {} titus updates", getAgentType(), e); + } + } + }); + + executor.schedule( + () -> { + handler.cancel(true); + }, + getTimeoutMillis(), + TimeUnit.MILLISECONDS); + CompletableFuture.completedFuture(handler).join(); + executor.shutdown(); + } + + private Iterator observeJobs() { + return titusClient.observeJobs( + ObserveJobsQuery.newBuilder() + .putFilteringCriteria("jobType", "SERVICE") + .putFilteringCriteria("attributes", "source:spinnaker") + .putFilteringCriteria("attributes", "spinnakerAccount:" + account.getName()) + .build()); + } + + private void updateJob(StreamingCacheState state, Job job) { + String jobId = job.getId(); + String application = job.getJobDescriptor().getApplicationName(); + + state.jobIdToApp.put(jobId, application); + if (state.snapshotComplete) { + state.updatedJobs.add(jobId); + } + + if (FINISHED_JOB_STATES.contains(job.getStatus().getState())) { + if (state.snapshotComplete && state.tasks.containsKey(jobId)) { + state + .tasks + .get(jobId) + .forEach( + t -> + state.completedInstanceIds.add( + Keys.getInstanceV2Key(t.getId(), account.getName(), region.getName()))); + } + state.tasks.remove(jobId); + if (state.jobs.containsKey(jobId)) { + state.jobs.remove(jobId); + } else if (state.snapshotComplete) { + log.debug( + "{} updateJob: jobId: {} has finished, but not present in current snapshot set", + getAgentType(), + jobId); + } + } else { + state.jobs.put(jobId, job); + } + + state.changes.incrementAndGet(); + } + + private void updateTask(StreamingCacheState state, Task task) { + String jobId = task.getJobId(); + if (FILTERED_TASK_STATES.contains(task.getStatus().getState())) { + state.tasks.computeIfAbsent(jobId, t -> new HashSet<>()).remove(task); + state.tasks.get(jobId).add(task); + } else if (FINISHED_TASK_STATES.contains(task.getStatus().getState())) { + if (state.snapshotComplete) { + state.completedInstanceIds.add( + Keys.getInstanceV2Key(task.getId(), account.getName(), region.getName())); + } + if (state.tasks.containsKey(jobId)) { + state.tasks.get(jobId).remove(task); + } else if (state.snapshotComplete) { + log.debug( + "{} updateTask: task: {} jobId: {} has finished, but task not present in current snapshot set", + getAgentType(), + task.getId(), + jobId); + } + } + + if (state.snapshotComplete) { + state.updatedJobs.add(jobId); + } + + state.changes.incrementAndGet(); + } + + private void updateMovedTask(StreamingCacheState state, Task task, String sourceJobId) { + if (sourceJobId != null) { + if (state.tasks.containsKey(sourceJobId)) { + state.tasks.get(sourceJobId).remove(task); + state.updatedJobs.add(sourceJobId); + } + } + } + + private void writeToCache(StreamingCacheState state) { + long startTime = System.currentTimeMillis(); + + if (!state.savedSnapshot + || state.changes.get() + >= dynamicConfigService.getConfig( + Integer.class, "titus.streaming.change-threshold", 1000) + || (startTime - state.lastUpdate.get() + > dynamicConfigService.getConfig( + Integer.class, "titus.streaming.time-threshold-ms", 5000) + && state.changes.get() > 0)) { + if (!state.savedSnapshot) { + log.info( + "Storing snapshot with {} job and tasks in {}", state.changes.get(), getAgentType()); + } else { + state.tasks.keySet().retainAll(state.jobs.keySet()); + + log.info( + "Updating: {} changes ( last update {} milliseconds ) in {}", + state.changes.get(), + startTime - state.lastUpdate.get(), + getAgentType()); + } + + List scalingPolicyResults = + titusAutoscalingClient != null + ? titusAutoscalingClient.getAllScalingPolicies() + : emptyList(); + PercentileTimer.get(registry, metricId.withTag("operation", "getScalingPolicies")) + .record(System.currentTimeMillis() - startTime, MILLISECONDS); + + long startLoadBalancerTime = System.currentTimeMillis(); + Map> allLoadBalancers = + titusLoadBalancerClient != null + ? titusLoadBalancerClient.getAllLoadBalancers() + : emptyMap(); + PercentileTimer.get(registry, metricId.withTag("operation", "getLoadBalancers")) + .record(System.currentTimeMillis() - startLoadBalancerTime, MILLISECONDS); + + CacheResult result = buildCacheResult(state, scalingPolicyResults, allLoadBalancers); + + Collection authoritative = + TYPES.stream() + .filter(t -> t.getAuthority().equals(AUTHORITATIVE)) + .map(AgentDataType::getTypeName) + .collect(Collectors.toSet()); + + if (state.savedSnapshot) { + // Incremental update without implicit evictions + cache.addCacheResult(getAgentType(), authoritative, result); + } else { + cache.putCacheResult(getAgentType(), authoritative, result); + } + + // prune jobIdToApp + Set completedJobs = new HashSet<>(state.jobIdToApp.keySet()); + completedJobs.removeAll(state.jobs.keySet()); + completedJobs.forEach(j -> state.jobIdToApp.remove(j)); + + state.updatedJobs = new HashSet<>(); + state.lastUpdate.set(System.currentTimeMillis()); + state.changes.set(0); + + PercentileTimer.get(registry, metricId.withTag("operation", "processSnapshot")) + .record(System.currentTimeMillis() - startTime, MILLISECONDS); + } + } + + private CacheResult buildCacheResult( + StreamingCacheState state, + List scalingPolicyResults, + Map> allLoadBalancers) { + // INITIALIZE CACHES + Map applicationCache = createCache(); + Map clusterCache = createCache(); + Map serverGroupCache = createCache(); + Map targetGroupCache = createCache(); + Map imageCache = createCache(); + Map instancesCache = createCache(); + + // These are used to calculate deletes when updating incrementally + Set currentApps = new HashSet<>(); + Set currentClusters = new HashSet<>(); + Set currentServerGroups = new HashSet<>(); + + Map> jobIdsByServerGroupKey = new HashMap<>(); + + Map jobs; + + if (state.savedSnapshot) { + List missingJobMappings = + state.updatedJobs.stream() + .filter(j -> !state.jobIdToApp.containsKey(j)) + .collect(Collectors.toList()); + + if (!missingJobMappings.isEmpty()) { + log.error( + "{} updatedJobs missing from jobIdToApp cache: {}", + getAgentType(), + missingJobMappings); + } + + Set changedApplications = + state.updatedJobs.stream() + .map(j -> state.jobIdToApp.get(j)) + .collect(Collectors.toSet()); + changedApplications.remove(null); + + currentApps.addAll(changedApplications); + + Set jobsNeeded = + state.jobIdToApp.entrySet().stream() + .filter(entry -> changedApplications.contains(entry.getValue())) + .map(Map.Entry::getKey) + .collect(Collectors.toSet()); + + jobs = + state.jobs.entrySet().stream() + .filter(e -> jobsNeeded.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } else { + jobs = state.jobs; + } + + List serverGroupDatas = + jobs.values().stream() + .map( + job -> { + List jobScalingPolicies = + scalingPolicyResults.stream() + .filter( + it -> + it.getJobId().equalsIgnoreCase(job.getId()) + && CACHEABLE_POLICY_STATES.contains( + it.getPolicyState().getState())) + .map( + it -> + new ScalingPolicyData( + it.getId().getId(), + it.getScalingPolicy(), + it.getPolicyState())) + .collect(Collectors.toList()); + + List jobLoadBalancers = + allLoadBalancers.getOrDefault(job.getId(), emptyList()); + + return new ServerGroupData( + new com.netflix.spinnaker.clouddriver.titus.client.model.Job( + job, EMPTY_LIST), + jobScalingPolicies, + jobLoadBalancers, + state.tasks.getOrDefault(job.getId(), emptySet()).stream() + .map(Task::getId) + .collect(Collectors.toSet()), + account.getName(), + region.getName()); + }) + .collect(Collectors.toList()); + + serverGroupDatas.forEach( + data -> { + String app = StringUtils.substringAfterLast(data.appNameKey, ":"); + + if (StringUtils.isNotEmpty(app)) { + state.appToClusters.computeIfAbsent(app, c -> new HashSet<>()).add(data.clusterKey); + state + .appsToServerGroups + .computeIfAbsent(app, c -> new HashSet<>()) + .add(data.serverGroupKey); + state.clusterKeyToApp.put(data.clusterKey, app); + state.sgKeyToApp.put(data.serverGroupKey, app); + } + + if (state.savedSnapshot) { + currentApps.add(app); + currentClusters.add(data.clusterKey); + currentServerGroups.add(data.serverGroupKey); + } + + cacheApplication(data, applicationCache); + cacheCluster(data, clusterCache); + cacheServerGroup(data, serverGroupCache); + cacheImage(data, imageCache); + addJobIdsByServerGroupKey(data, jobIdsByServerGroupKey); + + for (Task task : (Set) state.tasks.getOrDefault(data.job.getId(), EMPTY_SET)) { + InstanceData instanceData = + new InstanceData( + new com.netflix.spinnaker.clouddriver.titus.client.model.Task(task), + data.job.getName(), + account.getName(), + region.getName()); + cacheInstance(instanceData, instancesCache); + } + }); + + if (state.savedSnapshot) { + List missingServerGroups = + state.appsToServerGroups.entrySet().stream() + .filter(e -> currentApps.contains(e.getKey())) + .flatMap(e -> e.getValue().stream()) + .filter(c -> !currentServerGroups.contains(c)) + .collect(Collectors.toList()); + + if (!missingServerGroups.isEmpty()) { + log.info("Evicting {} server groups in {}", missingServerGroups.size(), getAgentType()); + cache.evictDeletedItems(SERVER_GROUPS.ns, missingServerGroups); + missingServerGroups.forEach( + sg -> { + state + .appsToServerGroups + .getOrDefault(state.sgKeyToApp.get(sg), emptySet()) + .remove(sg); + state.sgKeyToApp.remove(sg); + }); + } + + if (!state.completedInstanceIds.isEmpty()) { + log.info( + "Evicting {} instances in {}", state.completedInstanceIds.size(), getAgentType()); + cache.evictDeletedItems(INSTANCES.ns, state.completedInstanceIds); + state.completedInstanceIds = new HashSet<>(); + } + } + + Map> cacheResults = new HashMap<>(); + cacheResults.put(APPLICATIONS.ns, applicationCache.values()); + cacheResults.put(CLUSTERS.ns, clusterCache.values()); + cacheResults.put(SERVER_GROUPS.ns, serverGroupCache.values()); + cacheResults.put(TARGET_GROUPS.ns, targetGroupCache.values()); + cacheResults.put(IMAGES.ns, imageCache.values()); + cacheResults.put(INSTANCES.ns, instancesCache.values()); + + // No need to log this on incremental updates + if (!state.savedSnapshot) { + logDuplicateServerGroups(jobIdsByServerGroupKey, serverGroupCache); + } + + String action = state.savedSnapshot ? "Incrementally updating" : "Snapshot caching"; + + log.info("{} {} applications in {}", action, applicationCache.size(), getAgentType()); + log.info("{} {} server groups in {}", action, serverGroupCache.size(), getAgentType()); + log.info("{} {} clusters in {}", action, clusterCache.size(), getAgentType()); + log.info("{} {} target groups in {}", action, targetGroupCache.size(), getAgentType()); + log.info("{} {} images in {}", action, imageCache.size(), getAgentType()); + log.info("{} {} instances in {}", action, instancesCache.size(), getAgentType()); + + return new DefaultCacheResult(cacheResults); + } + + /** Build authoritative cache object for applications based on server group data */ + private void cacheApplication(ServerGroupData data, Map applications) { + CacheData applicationCache = + applications.getOrDefault(data.appNameKey, new MutableCacheData(data.appNameKey)); + applicationCache.getAttributes().put("name", data.name.getApp()); + Map> relationships = applicationCache.getRelationships(); + relationships.computeIfAbsent(CLUSTERS.ns, key -> new HashSet<>()).add(data.clusterKey); + relationships + .computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()) + .add(data.serverGroupKey); + relationships + .computeIfAbsent(TARGET_GROUPS.ns, key -> new HashSet<>()) + .addAll(data.targetGroupKeys); + applications.put(data.appNameKey, applicationCache); + } + + /** Build informative cache object for clusters based on server group data */ + private void cacheCluster(ServerGroupData data, Map clusters) { + CacheData clusterCache = + clusters.getOrDefault(data.clusterKey, new MutableCacheData(data.clusterKey)); + clusterCache.getAttributes().put("name", data.name.getCluster()); + Map> relationships = clusterCache.getRelationships(); + relationships.computeIfAbsent(APPLICATIONS.ns, key -> new HashSet<>()).add(data.appNameKey); + relationships + .computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()) + .add(data.serverGroupKey); + relationships + .computeIfAbsent(TARGET_GROUPS.ns, key -> new HashSet<>()) + .addAll(data.targetGroupKeys); + clusters.put(data.clusterKey, clusterCache); + } + + private void cacheServerGroup(ServerGroupData data, Map serverGroups) { + CacheData serverGroupCache = + serverGroups.getOrDefault(data.serverGroupKey, new MutableCacheData(data.serverGroupKey)); + List policies = + data.scalingPolicies != null + ? data.scalingPolicies.stream() + .map(ScalingPolicyData::toMap) + .collect(Collectors.toList()) + : new ArrayList<>(); + + Map attributes = serverGroupCache.getAttributes(); + attributes.put("job", data.job); + attributes.put("scalingPolicies", policies); + attributes.put("region", region.getName()); + attributes.put("account", account.getName()); + attributes.put("targetGroups", data.targetGroupNames); + + Map> relationships = serverGroupCache.getRelationships(); + relationships.computeIfAbsent(APPLICATIONS.ns, key -> new HashSet<>()).add(data.appNameKey); + relationships.computeIfAbsent(CLUSTERS.ns, key -> new HashSet<>()).add(data.clusterKey); + relationships + .computeIfAbsent(TARGET_GROUPS.ns, key -> new HashSet<>()) + .addAll(data.targetGroupKeys); + relationships.computeIfAbsent(IMAGES.ns, key -> new HashSet<>()).add(data.imageKey); + relationships.computeIfAbsent(INSTANCES.ns, key -> new HashSet<>()).addAll(data.taskKeys); + serverGroups.put(data.serverGroupKey, serverGroupCache); + } + + private void addJobIdsByServerGroupKey( + ServerGroupData data, Map> jobIdsByServerGroupKey) { + jobIdsByServerGroupKey + .computeIfAbsent(data.serverGroupKey, k -> new ArrayList<>()) + .add(data.job.getId()); + } + + private void cacheImage(ServerGroupData data, Map images) { + CacheData imageCache = + images.getOrDefault(data.imageKey, new MutableCacheData(data.imageKey)); + imageCache + .getRelationships() + .computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()) + .add(data.serverGroupKey); + images.put(data.imageKey, imageCache); + } + + private void cacheInstance(InstanceData data, Map instances) { + CacheData instanceCache = + instances.getOrDefault(data.instanceId, new MutableCacheData(data.instanceId)); + instanceCache.getAttributes().putAll(objectMapper.convertValue(data.task, ANY_MAP)); + instanceCache.getAttributes().put(HEALTH.ns, singletonList(getTitusHealth(data.task))); + instanceCache.getAttributes().put("task", data.task); + instanceCache.getAttributes().put("jobId", data.jobId); + + if (!data.serverGroup.isEmpty()) { + instanceCache + .getRelationships() + .computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()) + .add(data.serverGroup); + } else { + instanceCache + .getRelationships() + .computeIfAbsent(SERVER_GROUPS.ns, key -> new HashSet<>()) + .clear(); + } + instances.put(data.instanceId, instanceCache); + } + + class StreamingCacheState { + AtomicInteger changes = new AtomicInteger(0); + AtomicLong lastUpdate = new AtomicLong(0); + + Map jobs = new HashMap<>(); + Map> tasks = new HashMap<>(); + + Map jobIdToApp = new HashMap<>(); + Map> appToClusters = new HashMap<>(); + Map> appsToServerGroups = new HashMap<>(); + Map clusterKeyToApp = new HashMap<>(); + Map sgKeyToApp = new HashMap<>(); + + Set completedInstanceIds = new HashSet<>(); + Set updatedJobs = new HashSet<>(); + + Boolean snapshotComplete = false; + Boolean savedSnapshot = false; + } + } + + @Override + public boolean handlesAccount(String accountName) { + return this.account.getName().equals(accountName); + } + + @Override + public String getAgentType() { + return account.getName() + + "/" + + region.getName() + + "/" + + TitusStreamingUpdateAgent.class.getSimpleName(); + } + + @Override + public long getPollIntervalMillis() { + return TimeUnit.MINUTES.toMillis(3); + } + + // TODO: AgentSchedulers need to support ttl heartbeats for proper streaming agent support. + // We really want a short poll interval (for fast agent failover across instances) with a + // timeout that can be extended indefinitely while streaming updates are actively processed. + @Override + public long getTimeoutMillis() { + return TimeUnit.MINUTES.toMillis(3); + } + + /** + * * + * + * @return Time in milliseconds prior to timeout that the streaming agent will stop polling Titus + * for updates + */ + private long getPadTimeMillis() { + return TimeUnit.SECONDS.toMillis(5); + } + + private boolean continueStreaming(long startTime) { + return System.currentTimeMillis() < (startTime + getTimeoutMillis() - getPadTimeMillis()); + } + + private Map createCache() { + return new HashMap<>(); + } + + private String getAwsAccountId(String account, String region) { + return awsLookupUtil.get().awsAccountId(account, region); + } + + private String getAwsAccountName(String account, String region) { + return awsLookupUtil.get().awsAccountName(account, region); + } + + private String getAwsVpcId(String account, String region) { + return awsLookupUtil.get().awsVpcId(account, region); + } + + static class MutableCacheData implements CacheData { + final String id; + int ttlSeconds = -1; + final Map attributes = new HashMap<>(); + final Map> relationships = new HashMap<>(); + + public MutableCacheData(String id) { + this.id = id; + } + + @JsonCreator + public MutableCacheData( + @JsonProperty("id") String id, + @JsonProperty("attributes") Map attributes, + @JsonProperty("relationships") Map> relationships) { + this(id); + this.attributes.putAll(attributes); + this.relationships.putAll(relationships); + } + + @Override + public String getId() { + return id; + } + + @Override + public int getTtlSeconds() { + return ttlSeconds; + } + + @Override + public Map getAttributes() { + return attributes; + } + + @Override + public Map> getRelationships() { + return relationships; + } + } + + private class ScalingPolicyData { + String id; + ScalingPolicy policy; + ScalingPolicyStatus status; + + ScalingPolicyData(ScalingPolicyResult scalingPolicyResult) { + this( + scalingPolicyResult.getId().getId(), + scalingPolicyResult.getScalingPolicy(), + scalingPolicyResult.getPolicyState()); + } + + ScalingPolicyData(String id, ScalingPolicy policy, ScalingPolicyStatus status) { + this.id = id; + this.policy = policy; + this.status = status; + } + + protected Map toMap() { + Map status = new HashMap<>(); + status.put("state", this.status.getState().name()); + status.put("reason", this.status.getPendingReason()); + + Map result = new HashMap<>(); + result.put("id", id); + result.put("status", status); + + try { + String scalingPolicy = JsonFormat.printer().print(policy); + result.put("policy", objectMapper.readValue(scalingPolicy, ANY_MAP)); + } catch (Exception e) { + log.warn("Failed to serialize scaling policy for scaling policy {}", getAgentType(), e); + result.put("policy", emptyMap()); + } + + return result; + } + } + + private class ServerGroupData { + + final com.netflix.spinnaker.clouddriver.titus.client.model.Job job; + List scalingPolicies; + final Names name; + final String appNameKey; + final String clusterKey; + final String serverGroupKey; + final String region; + final Set targetGroupKeys; + final Set targetGroupNames; + final String account; + final String imageId; + final String imageKey; + final Set taskKeys; + + ServerGroupData( + com.netflix.spinnaker.clouddriver.titus.client.model.Job job, + List scalingPolicies, + List targetGroups, + Set taskIds, + String account, + String region) { + this.job = job; + this.scalingPolicies = scalingPolicies; + this.imageId = job.getApplicationName() + ":" + job.getVersion(); + this.imageKey = Keys.getImageV2Key(imageId, getAwsAccountId(account, region), region); + this.taskKeys = + taskIds == null + ? emptySet() + : taskIds.stream() + .map(it -> Keys.getInstanceV2Key(it, account, region)) + .collect(Collectors.toSet()); + + String asgName = getAsgName(job); + + name = Names.parseName(asgName); + appNameKey = Keys.getApplicationKey(name.getApp()); + clusterKey = Keys.getClusterV2Key(name.getCluster(), name.getApp(), account); + this.region = region; + this.account = account; + serverGroupKey = Keys.getServerGroupV2Key(asgName, account, region); + + targetGroupNames = + targetGroups.stream() + .map(ArnUtils::extractTargetGroupName) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toSet()); + + targetGroupKeys = + targetGroupNames.stream() + .map( + it -> + com.netflix.spinnaker.clouddriver.aws.data.Keys.getTargetGroupKey( + it, + getAwsAccountName(account, region), + region, + TargetTypeEnum.Ip.toString(), + getAwsVpcId(account, region))) + .collect(Collectors.toSet()); + } + } + + private class InstanceData { + // The instance key, not the task id + private final String instanceId; + private final com.netflix.spinnaker.clouddriver.titus.client.model.Task task; + private final String jobId; + private final String serverGroup; + + InstanceData( + com.netflix.spinnaker.clouddriver.titus.client.model.Task task, + String jobName, + String account, + String region) { + this.instanceId = Keys.getInstanceV2Key(task.getId(), account, region); + this.task = task; + this.jobId = task.getJobId(); + this.serverGroup = jobName != null ? Keys.getServerGroupV2Key(jobName, account, region) : ""; + } + } + + private Map getTitusHealth( + com.netflix.spinnaker.clouddriver.titus.client.model.Task task) { + TaskState taskState = task.getState(); + HealthState healthState = HealthState.Unknown; + if (DOWN_TASK_STATES.contains(taskState)) { + healthState = HealthState.Down; + } else if (STARTING_TASK_STATES.contains(taskState)) { + healthState = HealthState.Starting; + } + + Map response = new HashMap<>(); + response.put("type", "Titus"); + response.put("healthClass", "platform"); + response.put("state", healthState.toString()); + return response; + } + + private String getAsgName(com.netflix.spinnaker.clouddriver.titus.client.model.Job job) { + String asgName = job.getName(); + if (job.getLabels().containsKey("name")) { + asgName = job.getLabels().get("name"); + } else { + if (job.getAppName() != null) { + AutoScalingGroupNameBuilder asgNameBuilder = new AutoScalingGroupNameBuilder(); + asgNameBuilder.setAppName(job.getAppName()); + asgNameBuilder.setDetail(job.getJobGroupDetail()); + asgNameBuilder.setStack(job.getJobGroupStack()); + String version = job.getJobGroupSequence(); + asgName = asgNameBuilder.buildGroupName() + (version != null ? "-" + version : ""); + } + } + return asgName; + } + + /** + * For each server group with more than 1 job ID, log out all all the duplicate IDs, and log the + * final cached job ID too. + */ + private void logDuplicateServerGroups( + Map> seenServerGroupByJobIds, Map serverGroupCache) { + seenServerGroupByJobIds.entrySet().stream() + .filter(it -> it.getValue().size() > 1) + .forEach( + (entry) -> { + com.netflix.spinnaker.clouddriver.titus.client.model.Job cachedJob = null; + try { + cachedJob = + (com.netflix.spinnaker.clouddriver.titus.client.model.Job) + serverGroupCache.get(entry.getKey()).getAttributes().get("job"); + } catch (Exception e) { + log.error( + "Error retrieving duplicate server group {} from server group cache.", + entry.getKey(), + e.getCause()); + } + + if (cachedJob != null) { + log.error( + "Duplicate Titus server groups found {} with job IDs [{}]. Cached server " + + "group job ID is {}", + entry.getKey(), + entry.getValue(), + cachedJob.getId()); + } else { + // In theory, this should never happen. + log.error( + "Duplicate Titus server groups found {} with job IDs [{}]. No corresponding " + + "cached server groups found.", + entry.getKey(), + entry.getValue()); + } + }); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusTargetGroupServerGroupProvider.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusTargetGroupServerGroupProvider.java new file mode 100644 index 00000000000..0af99e509bc --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusTargetGroupServerGroupProvider.java @@ -0,0 +1,228 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.caching.providers; + +import static com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.*; + +import com.netflix.spectator.api.Id; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.aws.model.AmazonTargetGroup; +import com.netflix.spinnaker.clouddriver.aws.model.TargetGroupServerGroupProvider; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider; +import com.netflix.spinnaker.clouddriver.titus.caching.Keys; +import java.util.*; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Slf4j +@Component +public class TitusTargetGroupServerGroupProvider implements TargetGroupServerGroupProvider { + + private final Cache cacheView; + private final Registry registry; + + private final Id inconsistentCacheId; + + @Autowired + public TitusTargetGroupServerGroupProvider(Cache cacheView, Registry registry) { + this.cacheView = cacheView; + this.registry = registry; + + inconsistentCacheId = + registry + .createId("cache.inconsistentData") + .withTag("location", TitusTargetGroupServerGroupProvider.class.getSimpleName()); + } + + @Override + public Map getServerGroups( + String applicationName, + Map allTargetGroups, + Collection targetGroupData) { + + CacheData application = cacheView.get(APPLICATIONS.ns, Keys.getApplicationKey(applicationName)); + if (application == null + || allTargetGroups.isEmpty() + || !application.getRelationships().containsKey(TARGET_GROUPS.ns) + || application.getRelationships().get(TARGET_GROUPS.ns).isEmpty()) { + return allTargetGroups; + } + + // fetch keys here instead of using application -> serverGroup relationship which is + // inconsistent due to streaming update agent + Collection applicationServerGroupKeys = + cacheView.filterIdentifiers( + SERVER_GROUPS.ns, Keys.getServerGroupV2Key("*", "*", applicationName, "*")); + + if (applicationServerGroupKeys.isEmpty()) { + return allTargetGroups; + } + + Collection applicationServerGroups = + cacheView.getAll( + SERVER_GROUPS.ns, + applicationServerGroupKeys, + RelationshipCacheFilter.include(TARGET_GROUPS.ns, INSTANCES.ns)); + + Set instanceKeys = new HashSet<>(); + for (CacheData serverGroup : applicationServerGroups) { + Map> relationships = serverGroup.getRelationships(); + if (relationships.containsKey(TARGET_GROUPS.ns) + && !relationships.get(TARGET_GROUPS.ns).isEmpty() + && relationships.containsKey(INSTANCES.ns) + && !relationships.get(INSTANCES.ns).isEmpty()) { + instanceKeys.addAll(relationships.get(INSTANCES.ns)); + } + } + + Map instances = + cacheView.getAll(INSTANCES.ns, instanceKeys).stream() + .collect(Collectors.toMap(CacheData::getId, CacheData::getAttributes)); + + for (CacheData serverGroup : applicationServerGroups) { + if (serverGroup.getRelationships().containsKey(TARGET_GROUPS.ns)) { + for (String targetGroup : serverGroup.getRelationships().get(TARGET_GROUPS.ns)) { + Map targetGroupDetails = + com.netflix.spinnaker.clouddriver.aws.data.Keys.parse(targetGroup); + + Set targetGroupInstances = new HashSet<>(); + if (serverGroup.getRelationships().containsKey(INSTANCES.ns)) { + for (String instanceKey : serverGroup.getRelationships().get(INSTANCES.ns)) { + Map instanceDetails = instances.get(instanceKey); + + Optional instance = + getInstanceHealth(instanceKey, instanceDetails, targetGroupDetails); + if (instance.isPresent()) { + targetGroupInstances.add(instance.get()); + } else { + registry.counter(inconsistentCacheId).increment(); + log.error( + "Detected potentially inconsistent instance cache data (targetGroup: {}, serverGroup: {})", + targetGroup, + serverGroup.getId()); + } + } + } + + Map attributes = serverGroup.getAttributes(); + Map job = (Map) attributes.get("job"); + LoadBalancerServerGroup loadBalancerServerGroup = + new LoadBalancerServerGroup( + job.get("name").toString(), + attributes.get("account").toString(), + attributes.get("region").toString(), + !(Boolean) job.get("inService"), + Collections.emptySet(), + targetGroupInstances, + TitusCloudProvider.ID); + + if (allTargetGroups.containsKey(targetGroup)) { + allTargetGroups.get(targetGroup).getServerGroups().add(loadBalancerServerGroup); + allTargetGroups + .get(targetGroup) + .set( + "instances", + targetGroupInstances.stream() + .map(LoadBalancerInstance::getId) + .collect(Collectors.toSet())); + } + } + } + } + return allTargetGroups; + } + + private Optional getInstanceHealth( + String instanceKey, Map instanceDetails, Map targetGroupDetails) { + String healthKey; + try { + healthKey = + com.netflix.spinnaker.clouddriver.aws.data.Keys.getInstanceHealthKey( + ((Map) instanceDetails.get("task")).get("containerIp").toString(), + targetGroupDetails.get("account"), + targetGroupDetails.get("region"), + "aws-load-balancer-v2-target-group-instance-health"); + } catch (NullPointerException e) { + return Optional.empty(); + } + + CacheData healthData = cacheView.get(HEALTH.ns, healthKey); + + Map health = getTargetGroupHealth(instanceKey, targetGroupDetails, healthData); + + return Optional.of( + new LoadBalancerInstance( + ((Map) instanceDetails.get("task")).get("id").toString(), null, health)); + } + + private static Map getTargetGroupHealth( + String instanceKey, Map targetGroupDetails, CacheData healthData) { + try { + if (healthDataContainsTargetGroups(healthData)) { + Map targetGroupHealth = getTargetGroupHealthData(targetGroupDetails, healthData); + + if (!targetGroupHealth.isEmpty()) { + Map health = new HashMap<>(); + health.put("targetGroupName", targetGroupHealth.get("targetGroupName").toString()); + health.put("state", targetGroupHealth.get("state").toString()); + + if (targetGroupHealth.containsKey("reasonCode")) { + health.put("reasonCode", targetGroupHealth.get("reasonCode").toString()); + } + + if (targetGroupHealth.containsKey("description")) { + health.put("description", targetGroupHealth.get("description").toString()); + } + + return health; + } + } + } catch (Exception e) { + log.error("failed to load health for " + instanceKey, e); + } + + return Collections.emptyMap(); + } + + private static boolean healthDataContainsTargetGroups(CacheData healthData) { + return healthData != null + && healthData.getAttributes().containsKey("targetGroups") + && !((ArrayList) healthData.getAttributes().get("targetGroups")).isEmpty(); + } + + private static Map getTargetGroupHealthData( + Map targetGroupDetails, CacheData healthData) { + List targetGroups = (List) healthData.getAttributes().get("targetGroups"); + return (Map) + targetGroups.stream() + .filter( + tgh -> + ((Map) tgh) + .get("targetGroupName") + .toString() + .equals(targetGroupDetails.get("targetGroup"))) + .findFirst() + .orElse(Collections.EMPTY_MAP); + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchema.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchema.java similarity index 100% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchema.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchema.java diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchemaUtil.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchemaUtil.java new file mode 100644 index 00000000000..37ad3ab092c --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/caching/utils/CachingSchemaUtil.java @@ -0,0 +1,56 @@ +package com.netflix.spinnaker.clouddriver.titus.caching.utils; + +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Optional; +import javax.annotation.PostConstruct; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class CachingSchemaUtil { + + private final AccountCredentialsProvider accountCredentialsProvider; + private final AwsLookupUtil awsLookupUtil; + private final Map cachingSchemaForAccounts = new LinkedHashMap<>(); + + @Autowired + public CachingSchemaUtil( + AccountCredentialsProvider accountCredentialsProvider, AwsLookupUtil awsLookupUtil) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.awsLookupUtil = awsLookupUtil; + } + + public CachingSchema getCachingSchemaForAccount(String account) { + init(); + return Optional.ofNullable(cachingSchemaForAccounts.get(account)).orElse(CachingSchema.V2); + } + + @PostConstruct + private void init() { + accountCredentialsProvider.getAll().stream() + .filter(c -> c instanceof NetflixTitusCredentials) + .forEach( + c -> { + NetflixTitusCredentials credentials = (NetflixTitusCredentials) c; + + Collection regions = credentials.getRegions(); + regions.forEach( + region -> { + cachingSchemaForAccounts.put( + credentials.getName(), cachingSchemaFor(credentials)); + cachingSchemaForAccounts.put( + awsLookupUtil.awsAccountId(credentials.getName(), region.getName()), + cachingSchemaFor(credentials)); + }); + }); + } + + private static CachingSchema cachingSchemaFor(NetflixTitusCredentials credentials) { + return CachingSchema.V2; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/EndpointValidator.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/EndpointValidator.java new file mode 100644 index 00000000000..c50b822a2fe --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/EndpointValidator.java @@ -0,0 +1,56 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public class EndpointValidator { + + private static final Set ALLOWED_PROTOCOLS = + Collections.unmodifiableSet(new HashSet<>(Arrays.asList("http", "https"))); + + public static String validateEndpoint(String endpoint) { + URL url; + try { + url = new URL(endpoint); + } catch (NullPointerException e) { + throw new IllegalArgumentException(String.format("Invalid endpoint provided (%s)", endpoint)); + } catch (MalformedURLException e) { + throw new IllegalArgumentException( + String.format("Invalid endpoint provided (%s): %s", endpoint, e.getMessage())); + } + + if (url.getHost() == null || "".equals(url.getHost())) { + throw new IllegalArgumentException( + String.format("Invalid endpoint provided (%s): No host specified", endpoint)); + } + + String protocol = url.getProtocol(); + if (!ALLOWED_PROTOCOLS.contains(protocol)) { + throw new IllegalArgumentException( + String.format( + "Invalid endpoint provided (%s): Invalid protocol specified (%s)", + endpoint, protocol)); + } + return endpoint; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusAutoscalingClient.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusAutoscalingClient.java new file mode 100644 index 00000000000..4003e65378e --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusAutoscalingClient.java @@ -0,0 +1,82 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client; + +import com.google.protobuf.Empty; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.titus.client.model.GrpcChannelFactory; +import com.netflix.titus.grpc.protogen.*; +import java.util.List; + +public class RegionScopedTitusAutoscalingClient implements TitusAutoscalingClient { + + /** Default connect timeout in milliseconds */ + private static final long DEFAULT_CONNECT_TIMEOUT = 60000; + + private final AutoScalingServiceGrpc.AutoScalingServiceBlockingStub + autoScalingServiceBlockingStub; + + public RegionScopedTitusAutoscalingClient( + TitusRegion titusRegion, + Registry registry, + String environment, + String eurekaName, + GrpcChannelFactory channelFactory) { + this.autoScalingServiceBlockingStub = + AutoScalingServiceGrpc.newBlockingStub( + channelFactory.build( + titusRegion, environment, eurekaName, DEFAULT_CONNECT_TIMEOUT, registry)); + } + + @Override + public List getAllScalingPolicies() { + return autoScalingServiceBlockingStub + .getAllScalingPolicies(Empty.newBuilder().build()) + .getItemsList(); + } + + @Override + public List getJobScalingPolicies(String jobId) { + JobId request = JobId.newBuilder().setId(jobId).build(); + return autoScalingServiceBlockingStub.getJobScalingPolicies(request).getItemsList(); + } + + @Override + public ScalingPolicyResult getScalingPolicy(String policyId) { + return autoScalingServiceBlockingStub + .getScalingPolicy(ScalingPolicyID.newBuilder().setId(policyId).build()) + .getItems(0); + } + + @Override + public ScalingPolicyID createScalingPolicy(PutPolicyRequest policy) { + return TitusClientAuthenticationUtil.attachCaller(autoScalingServiceBlockingStub) + .setAutoScalingPolicy(policy); + } + + @Override + public void updateScalingPolicy(UpdatePolicyRequest policy) { + TitusClientAuthenticationUtil.attachCaller(autoScalingServiceBlockingStub) + .updateAutoScalingPolicy(policy); + } + + @Override + public void deleteScalingPolicy(DeletePolicyRequest request) { + TitusClientAuthenticationUtil.attachCaller(autoScalingServiceBlockingStub) + .deleteAutoScalingPolicy(request); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClient.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClient.java new file mode 100644 index 00000000000..17c27287967 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClient.java @@ -0,0 +1,544 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client; + +import static java.util.stream.Collectors.mapping; +import static java.util.stream.Collectors.toList; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.protobuf.Empty; +import com.netflix.frigga.Names; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import com.netflix.spinnaker.clouddriver.titus.client.model.ActivateJobRequest; +import com.netflix.spinnaker.clouddriver.titus.client.model.DisruptionBudgetHelper; +import com.netflix.spinnaker.clouddriver.titus.client.model.GrpcChannelFactory; +import com.netflix.spinnaker.clouddriver.titus.client.model.HealthStatus; +import com.netflix.spinnaker.clouddriver.titus.client.model.Job; +import com.netflix.spinnaker.clouddriver.titus.client.model.JobDescription; +import com.netflix.spinnaker.clouddriver.titus.client.model.JobDisruptionBudgetUpdateRequest; +import com.netflix.spinnaker.clouddriver.titus.client.model.ResizeJobRequest; +import com.netflix.spinnaker.clouddriver.titus.client.model.SubmitJobRequest; +import com.netflix.spinnaker.clouddriver.titus.client.model.Task; +import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateJobRequest; +import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateTasksAndShrinkJobRequest; +import com.netflix.spinnaker.clouddriver.titus.client.model.TitusHealth; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.ServiceJobProcessesRequest; +import com.netflix.spinnaker.kork.core.RetrySupport; +import com.netflix.titus.grpc.protogen.Capacity; +import com.netflix.titus.grpc.protogen.JobCapacityUpdate; +import com.netflix.titus.grpc.protogen.JobChangeNotification; +import com.netflix.titus.grpc.protogen.JobDisruptionBudget; +import com.netflix.titus.grpc.protogen.JobDisruptionBudgetUpdate; +import com.netflix.titus.grpc.protogen.JobId; +import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc; +import com.netflix.titus.grpc.protogen.JobProcessesUpdate; +import com.netflix.titus.grpc.protogen.JobQuery; +import com.netflix.titus.grpc.protogen.JobQueryResult; +import com.netflix.titus.grpc.protogen.JobStatusUpdate; +import com.netflix.titus.grpc.protogen.ObserveJobsQuery; +import com.netflix.titus.grpc.protogen.Page; +import com.netflix.titus.grpc.protogen.ServiceJobSpec; +import com.netflix.titus.grpc.protogen.TaskKillRequest; +import com.netflix.titus.grpc.protogen.TaskQuery; +import com.netflix.titus.grpc.protogen.TaskQueryResult; +import io.grpc.Status; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class RegionScopedTitusClient implements TitusClient { + + private final Logger log = LoggerFactory.getLogger(getClass()); + + /** Default connect timeout in milliseconds */ + private static final long DEFAULT_CONNECT_TIMEOUT = TimeUnit.SECONDS.toMillis(60); + + /** Default read timeout in milliseconds */ + private static final long DEFAULT_READ_TIMEOUT = 20000; + + /** Default find tasks deadline in milliseconds */ + private static final long FIND_TASKS_DEADLINE = 30000; + + /** An instance of {@link TitusRegion} that this RegionScopedTitusClient will use */ + private final TitusRegion titusRegion; + + private final Registry registry; + + private final List titusJobCustomizers; + + private final String environment; + + private final ObjectMapper objectMapper; + + private final JobManagementServiceGrpc.JobManagementServiceBlockingStub grpcBlockingStub; + + private final JobManagementServiceGrpc.JobManagementServiceBlockingStub grpcNoDeadlineStub; + + private final RetrySupport retrySupport; + + public RegionScopedTitusClient( + TitusRegion titusRegion, + Registry registry, + List titusJobCustomizers, + String environment, + String eurekaName, + GrpcChannelFactory grpcChannelFactory, + RetrySupport retrySupport) { + this( + titusRegion, + DEFAULT_CONNECT_TIMEOUT, + DEFAULT_READ_TIMEOUT, + TitusClientObjectMapper.configure(), + registry, + titusJobCustomizers, + environment, + eurekaName, + grpcChannelFactory, + retrySupport); + } + + public RegionScopedTitusClient( + TitusRegion titusRegion, + long connectTimeoutMillis, + long readTimeoutMillis, + ObjectMapper objectMapper, + Registry registry, + List titusJobCustomizers, + String environment, + String eurekaName, + GrpcChannelFactory channelFactory, + RetrySupport retrySupport) { + this.titusRegion = titusRegion; + this.registry = registry; + this.titusJobCustomizers = titusJobCustomizers; + this.environment = environment; + this.objectMapper = objectMapper; + this.retrySupport = retrySupport; + + String titusHost = ""; + try { + URL titusUrl = new URL(titusRegion.getEndpoint()); + titusHost = titusUrl.getHost(); + } catch (Exception e) { + + } + + this.grpcBlockingStub = + JobManagementServiceGrpc.newBlockingStub( + channelFactory.build( + titusRegion, environment, eurekaName, connectTimeoutMillis, registry)); + + this.grpcNoDeadlineStub = + JobManagementServiceGrpc.newBlockingStub( + channelFactory.build(titusRegion, environment, eurekaName, 0, registry)); + + if (!titusRegion.getFeatureFlags().isEmpty()) { + log.info( + "Experimental Titus V3 client feature flags {} enabled for account {} and region {}", + StringUtils.join(titusRegion.getFeatureFlags(), ","), + titusRegion.getAccount(), + titusRegion.getName()); + } + } + + // APIs + // ------------------------------------------------------------------------------------------ + + @Override + public Job getJobAndAllRunningAndCompletedTasks(String jobId) { + return new Job( + grpcBlockingStub.findJob(JobId.newBuilder().setId(jobId).build()), + getTasks(Arrays.asList(jobId), true).get(jobId)); + } + + @Override + public Job findJobById(String jobId, boolean includeTasks) { + return new Job( + grpcBlockingStub.findJob(JobId.newBuilder().setId(jobId).build()), + includeTasks ? getTasks(List.of(jobId), false).get(jobId) : Collections.emptyList()); + } + + @Override + public Job findJobByName(String jobName, boolean includeTasks) { + JobQuery.Builder jobQuery = + JobQuery.newBuilder() + .putFilteringCriteria("jobType", "SERVICE") + .putFilteringCriteria("attributes", "source:spinnaker,name:" + jobName) + .putFilteringCriteria("attributes.op", "and"); + + List results = getJobs(jobQuery, includeTasks); + return results.isEmpty() ? null : results.get(0); + } + + @Override + public Job findJobByName(String jobName) { + return findJobByName(jobName, false); + } + + @Override + public List findJobsByApplication(String application) { + JobQuery.Builder jobQuery = + JobQuery.newBuilder() + .putFilteringCriteria("appName", application) + .putFilteringCriteria("jobType", "SERVICE"); + return getJobs(jobQuery, false); + } + + @Override + public String submitJob(SubmitJobRequest submitJobRequest) { + JobDescription jobDescription = submitJobRequest.getJobDescription(); + if (jobDescription.getType() == null) { + jobDescription.setType("service"); + } + if (jobDescription.getUser() == null) { + jobDescription.setUser("spinnaker@netflix.com"); + } else if (!jobDescription.getUser().contains("@")) { + jobDescription.setUser(jobDescription.getUser() + "@netflix.com"); + } + if (jobDescription.getJobGroupSequence() == null + && "service".equals(jobDescription.getType())) { + try { + int sequence = Names.parseName(jobDescription.getName()).getSequence(); + jobDescription.setJobGroupSequence(String.format("v%03d", sequence)); + } catch (Exception e) { + // fail silently if we can't get a job group sequence: This is normal if no prior jobs + // exist. + } + } + jobDescription.getLabels().put("name", jobDescription.getName()); + jobDescription.getLabels().put("source", "spinnaker"); + jobDescription.getLabels().put("spinnakerAccount", submitJobRequest.getCredentials()); + for (TitusJobCustomizer customizer : titusJobCustomizers) { + customizer.customize(jobDescription); + } + return TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub) + .createJob(jobDescription.getGrpcJobDescriptor()) + .getId(); + } + + @Override + public Task getTask(String taskId) { + // return new + // Task(grpcBlockingStub.findTask(com.netflix.titus.grpc.protogen.TaskId.newBuilder().setId(taskId).build())); + return null; + } + + @Override + public void updateDisruptionBudget(JobDisruptionBudgetUpdateRequest request) { + JobDisruptionBudget disruptionBudget = + DisruptionBudgetHelper.convertJobDisruptionBudget(request.getDisruptionBudget()); + TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub) + .updateJobDisruptionBudget( + JobDisruptionBudgetUpdate.newBuilder() + .setDisruptionBudget(disruptionBudget) + .setJobId(request.getJobId()) + .build()); + } + + @Override + public void updateScalingProcesses(ServiceJobProcessesRequest serviceJobProcessesRequest) { + TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub) + .updateJobProcesses( + JobProcessesUpdate.newBuilder() + .setServiceJobProcesses( + ServiceJobSpec.ServiceJobProcesses.newBuilder() + .setDisableDecreaseDesired( + serviceJobProcessesRequest + .getServiceJobProcesses() + .isDisableDecreaseDesired()) + .setDisableIncreaseDesired( + serviceJobProcessesRequest + .getServiceJobProcesses() + .isDisableIncreaseDesired())) + .setJobId(serviceJobProcessesRequest.getJobId()) + .build()); + } + + @Override + public void resizeJob(ResizeJobRequest resizeJobRequest) { + TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub) + .updateJobCapacity( + JobCapacityUpdate.newBuilder() + .setJobId(resizeJobRequest.getJobId()) + .setCapacity( + Capacity.newBuilder() + .setDesired(resizeJobRequest.getInstancesDesired()) + .setMax(resizeJobRequest.getInstancesMax()) + .setMin(resizeJobRequest.getInstancesMin())) + .build()); + } + + @Override + public void activateJob(ActivateJobRequest activateJobRequest) { + TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub) + .updateJobStatus( + JobStatusUpdate.newBuilder() + .setId(activateJobRequest.getJobId()) + .setEnableStatus(activateJobRequest.getInService()) + .build()); + } + + @Override + public void setAutoscaleEnabled(String jobId, boolean shouldEnable) { + TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub) + .updateJobProcesses( + JobProcessesUpdate.newBuilder() + .setServiceJobProcesses( + ServiceJobSpec.ServiceJobProcesses.newBuilder() + .setDisableDecreaseDesired(!shouldEnable) + .setDisableIncreaseDesired(!shouldEnable) + .build()) + .setJobId(jobId) + .build()); + } + + @Override + public void terminateJob(TerminateJobRequest terminateJobRequest) { + TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub) + .killJob(JobId.newBuilder().setId(terminateJobRequest.getJobId()).build()); + } + + @Override + public void terminateTasksAndShrink( + TerminateTasksAndShrinkJobRequest terminateTasksAndShrinkJob) { + List failedTasks = new ArrayList<>(); + terminateTasksAndShrinkJob + .getTaskIds() + .forEach( + id -> { + try { + killTaskWithRetry(id, terminateTasksAndShrinkJob); + } catch (Exception e) { + failedTasks.add(id); + log.error( + "Failed to terminate and shrink titus task {} in account {} and region {}", + id, + titusRegion.getAccount(), + titusRegion.getName(), + e); + } + }); + if (!failedTasks.isEmpty()) { + throw new TitusException( + "Failed to terminate and shrink titus tasks: " + StringUtils.join(failedTasks, ",")); + } + } + + private void killTaskWithRetry( + String id, TerminateTasksAndShrinkJobRequest terminateTasksAndShrinkJob) { + retrySupport.retry( + () -> { + try { + return TitusClientAuthenticationUtil.attachCaller(grpcBlockingStub) + .killTask( + TaskKillRequest.newBuilder() + .setTaskId(id) + .setShrink(terminateTasksAndShrinkJob.isShrink()) + .build()); + } catch (io.grpc.StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.NOT_FOUND) { + log.warn( + "Titus task {} not found, continuing with terminate tasks and shrink job request.", + id); + return Empty.newBuilder().build(); + } + throw e; + } + }, + 3, + 1000, + false); + } + + @Override + public Map logsDownload(String taskId) { + return null; + } + + @Override + public TitusHealth getHealth() { + return new TitusHealth(HealthStatus.HEALTHY); + } + + private List getJobs(JobQuery.Builder jobQuery) { + return getJobs(jobQuery, true); + } + + private List getJobs(JobQuery.Builder jobQuery, boolean includeTasks) { + List grpcJobs = getJobsWithFilter(jobQuery); + final Map> tasks; + + if (includeTasks) { + List jobIds = + grpcJobs.stream() + .map(com.netflix.titus.grpc.protogen.Job::getId) + .collect(Collectors.toList()); + tasks = getTasks(jobIds, false); + } else { + tasks = Collections.emptyMap(); + } + return grpcJobs.stream() + .map(grpcJob -> new Job(grpcJob, tasks.get(grpcJob.getId()))) + .collect(Collectors.toList()); + } + + @Override + public List getAllJobsWithoutTasks() { + JobQuery.Builder jobQuery = + JobQuery.newBuilder() + .putFilteringCriteria("jobType", "SERVICE") + .putFilteringCriteria("attributes", "source:spinnaker"); + + return getJobs(jobQuery, false); + } + + @Override + public Map getAllJobNames() { + JobQuery.Builder jobQuery = + JobQuery.newBuilder() + .putFilteringCriteria("jobType", "SERVICE") + .putFilteringCriteria("attributes", "source:spinnaker") + .addFields("id") + .addFields("jobDescriptor.attributes.name"); + + List grpcJobs = getJobsWithFilter(jobQuery, 10000); + + return grpcJobs.stream() + .collect( + Collectors.toMap( + com.netflix.titus.grpc.protogen.Job::getId, + it -> it.getJobDescriptor().getAttributesOrDefault("name", ""))); + } + + @Override + public Map> getTaskIdsForJobIds() { + String filterByStates = "Launched,StartInitiated,Started"; + + TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder(); + taskQueryBuilder + .putFilteringCriteria("attributes", "source:spinnaker") + .putFilteringCriteria("taskStates", filterByStates) + .addFields("id") + .addFields("jobId"); + + List grpcTasks = getTasksWithFilter(taskQueryBuilder); + return grpcTasks.stream() + .collect( + Collectors.groupingBy( + com.netflix.titus.grpc.protogen.Task::getJobId, + mapping(com.netflix.titus.grpc.protogen.Task::getId, toList()))); + } + + @Override + public Iterator observeJobs(ObserveJobsQuery observeJobsQuery) { + return grpcNoDeadlineStub.observeJobs(observeJobsQuery); + } + + private Map> getTasks( + List jobIds, boolean includeDoneJobs) { + TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder(); + if (!jobIds.isEmpty()) { + taskQueryBuilder.putFilteringCriteria( + "jobIds", jobIds.stream().collect(Collectors.joining(","))); + } + taskQueryBuilder.putFilteringCriteria("attributes", "source:spinnaker"); + String filterByStates = "Launched,StartInitiated,Started"; + if (includeDoneJobs) { + filterByStates = filterByStates + ",KillInitiated,Finished"; + } + taskQueryBuilder.putFilteringCriteria("taskStates", filterByStates); + + List tasks = getTasksWithFilter(taskQueryBuilder); + return tasks.stream() + .collect(Collectors.groupingBy(com.netflix.titus.grpc.protogen.Task::getJobId)); + } + + @Override + public List getAllTasks() { + TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder(); + taskQueryBuilder.putFilteringCriteria("attributes", "source:spinnaker"); + String filterByStates = "Launched,StartInitiated,Started"; + taskQueryBuilder.putFilteringCriteria("taskStates", filterByStates); + + List tasks = getTasksWithFilter(taskQueryBuilder); + return tasks.stream().map(Task::new).collect(toList()); + } + + private List getJobsWithFilter( + JobQuery.Builder jobQueryBuilder) { + return getJobsWithFilter(jobQueryBuilder, 1000); + } + + private List getJobsWithFilter( + JobQuery.Builder jobQueryBuilder, Integer pageSize) { + List grpcJobs = new ArrayList<>(); + String cursor = ""; + boolean hasMore; + do { + if (cursor.isEmpty()) { + jobQueryBuilder.setPage(Page.newBuilder().setPageSize(pageSize)); + } else { + jobQueryBuilder.setPage(Page.newBuilder().setCursor(cursor).setPageSize(pageSize)); + } + + JobQuery criteria = jobQueryBuilder.build(); + JobQueryResult resultPage = + TitusClientCompressionUtil.attachCaller(grpcBlockingStub).findJobs(criteria); + grpcJobs.addAll(resultPage.getItemsList()); + cursor = resultPage.getPagination().getCursor(); + hasMore = resultPage.getPagination().getHasMore(); + } while (hasMore); + return grpcJobs; + } + + private List getTasksWithFilter( + TaskQuery.Builder taskQueryBuilder) { + + final int pageSize = 1000; + List grpcTasks = new ArrayList<>(); + + TaskQueryResult taskResults; + String cursor = ""; + boolean hasMore; + + do { + if (cursor.isEmpty()) { + taskQueryBuilder.setPage(Page.newBuilder().setPageSize(pageSize)); + } else { + taskQueryBuilder.setPage(Page.newBuilder().setCursor(cursor).setPageSize(pageSize)); + } + taskResults = + TitusClientCompressionUtil.attachCaller( + grpcBlockingStub.withDeadlineAfter(FIND_TASKS_DEADLINE, TimeUnit.MILLISECONDS)) + .findTasks(taskQueryBuilder.build()); + grpcTasks.addAll(taskResults.getItemsList()); + cursor = taskResults.getPagination().getCursor(); + hasMore = taskResults.getPagination().getHasMore(); + } while (hasMore); + return grpcTasks; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusLoadBalancerClient.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusLoadBalancerClient.java new file mode 100644 index 00000000000..f440248b539 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusLoadBalancerClient.java @@ -0,0 +1,102 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.titus.client.model.GrpcChannelFactory; +import com.netflix.titus.grpc.protogen.*; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class RegionScopedTitusLoadBalancerClient implements TitusLoadBalancerClient { + + /** Default connect timeout in milliseconds */ + private static final long DEFAULT_CONNECT_TIMEOUT = 60000; + + private final LoadBalancerServiceGrpc.LoadBalancerServiceBlockingStub + loadBalancerServiceBlockingStub; + + public RegionScopedTitusLoadBalancerClient( + TitusRegion titusRegion, + Registry registry, + String environment, + String eurekaName, + GrpcChannelFactory channelFactory) { + this.loadBalancerServiceBlockingStub = + LoadBalancerServiceGrpc.newBlockingStub( + channelFactory.build( + titusRegion, environment, eurekaName, DEFAULT_CONNECT_TIMEOUT, registry)); + } + + @Override + public List getJobLoadBalancers(String jobId) { + return loadBalancerServiceBlockingStub + .getJobLoadBalancers(JobId.newBuilder().setId(jobId).build()) + .getLoadBalancersList(); + } + + @Override + public void addLoadBalancer(String jobId, String loadBalancerId) { + TitusClientAuthenticationUtil.attachCaller(loadBalancerServiceBlockingStub) + .addLoadBalancer( + AddLoadBalancerRequest.newBuilder() + .setJobId(jobId) + .setLoadBalancerId(LoadBalancerId.newBuilder().setId(loadBalancerId).build()) + .build()); + } + + @Override + public void removeLoadBalancer(String jobId, String loadBalancerId) { + TitusClientAuthenticationUtil.attachCaller(loadBalancerServiceBlockingStub) + .removeLoadBalancer( + RemoveLoadBalancerRequest.newBuilder() + .setJobId(jobId) + .setLoadBalancerId(LoadBalancerId.newBuilder().setId(loadBalancerId).build()) + .build()); + } + + public Map> getAllLoadBalancers() { + Map> results = new HashMap<>(); + String cursor = ""; + boolean hasMore = true; + do { + Page.Builder loadBalancerPage = Page.newBuilder().setPageSize(1000); + if (!cursor.isEmpty()) { + loadBalancerPage.setCursor(cursor); + } + GetAllLoadBalancersResult getAllLoadBalancersResult = + loadBalancerServiceBlockingStub.getAllLoadBalancers( + GetAllLoadBalancersRequest.newBuilder().setPage(loadBalancerPage).build()); + for (GetJobLoadBalancersResult result : getAllLoadBalancersResult.getJobLoadBalancersList()) { + for (LoadBalancerId loadBalancerid : result.getLoadBalancersList()) { + if (results.get(result.getJobId()) == null) { + List loadBalancers = new ArrayList<>(); + loadBalancers.add(loadBalancerid.getId()); + results.put(result.getJobId(), loadBalancers); + } else { + results.get(result.getJobId()).add(loadBalancerid.getId()); + } + } + } + hasMore = getAllLoadBalancersResult.getPagination().getHasMore(); + cursor = getAllLoadBalancersResult.getPagination().getCursor(); + } while (hasMore); + return results; + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusAutoscalingClient.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusAutoscalingClient.java similarity index 99% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusAutoscalingClient.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusAutoscalingClient.java index cbee79e6fb6..4cddc44eb6f 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusAutoscalingClient.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusAutoscalingClient.java @@ -17,7 +17,6 @@ package com.netflix.spinnaker.clouddriver.titus.client; import com.netflix.titus.grpc.protogen.*; - import java.util.List; public interface TitusAutoscalingClient { diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClient.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClient.java new file mode 100644 index 00000000000..008c22a8542 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClient.java @@ -0,0 +1,118 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client; + +import com.netflix.spinnaker.clouddriver.titus.client.model.*; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.ServiceJobProcessesRequest; +import com.netflix.titus.grpc.protogen.JobChangeNotification; +import com.netflix.titus.grpc.protogen.ObserveJobsQuery; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +public interface TitusClient { + + /** + * @param jobId + * @return + */ + public Job getJobAndAllRunningAndCompletedTasks(String jobId); + + public Job findJobById(String jobId, boolean includeTasks); + + /** + * @param jobName + * @param includeTasks + * @return + */ + public Job findJobByName(String jobName, boolean includeTasks); + + /** + * @param jobName + * @return + */ + public Job findJobByName(String jobName); + + /** + * @param application + * @return + */ + public List findJobsByApplication(String application); + + /** + * @param submitJobRequest + * @return + */ + public String submitJob(SubmitJobRequest submitJobRequest); + + /** + * @param taskId + * @return + */ + public Task getTask(String taskId); + + /** @param jobDisruptionBudgetUpdateRequest */ + public void updateDisruptionBudget( + JobDisruptionBudgetUpdateRequest jobDisruptionBudgetUpdateRequest); + + /** @param serviceJobProcessesRequest */ + public void updateScalingProcesses(ServiceJobProcessesRequest serviceJobProcessesRequest); + + /** @param resizeJobRequest */ + public void resizeJob(ResizeJobRequest resizeJobRequest); + + /** @param activateJobRequest */ + public void activateJob(ActivateJobRequest activateJobRequest); + + /** @param shouldEnable */ + public void setAutoscaleEnabled(String jobId, boolean shouldEnable); + + /** @param terminateJobRequest */ + public void terminateJob(TerminateJobRequest terminateJobRequest); + + /** @param terminateTasksAndShrinkJob */ + public void terminateTasksAndShrink(TerminateTasksAndShrinkJobRequest terminateTasksAndShrinkJob); + + /** + * @param taskId + * @return + */ + public Map logsDownload(String taskId); + + /** @return */ + public TitusHealth getHealth(); + + /** + * For use in TitusV2ClusterCachingAgent + * + * @return all jobs w/o task detail that are managed by Spinnaker + */ + public List getAllJobsWithoutTasks(); + + /** + * For use in TitusInstanceCachingAgent + * + * @return all tasks managed by Spinnaker + */ + public List getAllTasks(); + + public Map getAllJobNames(); + + public Map> getTaskIdsForJobIds(); + + public Iterator observeJobs(ObserveJobsQuery observeJobsQuery); +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientAuthenticationUtil.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientAuthenticationUtil.java new file mode 100644 index 00000000000..7628cd19082 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientAuthenticationUtil.java @@ -0,0 +1,56 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client; + +import com.google.common.base.Splitter; +import com.netflix.spinnaker.security.AuthenticatedRequest; +import io.grpc.Metadata; +import io.grpc.stub.AbstractStub; +import io.grpc.stub.MetadataUtils; +import java.util.List; +import javax.annotation.Nonnull; + +public class TitusClientAuthenticationUtil { + + private static String CALLER_ID_HEADER = "X-Titus-CallerId"; + private static String CALL_REASON = "X-Titus-CallReason"; + private static Metadata.Key CALLER_ID_KEY = + Metadata.Key.of(CALLER_ID_HEADER, Metadata.ASCII_STRING_MARSHALLER); + private static Metadata.Key CALL_REASON_KEY = + Metadata.Key.of(CALL_REASON, Metadata.ASCII_STRING_MARSHALLER); + + public static > STUB attachCaller(STUB serviceStub) { + Metadata metadata = new Metadata(); + metadata.put(CALLER_ID_KEY, AuthenticatedRequest.getSpinnakerUser().orElse("spinnaker")); + metadata.put( + CALL_REASON_KEY, + String.format( + "Invoked by Spinnaker execution %s, Application name %s", + getSpinnakerExecutionId( + AuthenticatedRequest.getSpinnakerExecutionId().orElse("unknown")), + AuthenticatedRequest.getSpinnakerApplication().orElse("unknown"))); + return serviceStub.withInterceptors(MetadataUtils.newAttachHeadersInterceptor(metadata)); + } + + public static @Nonnull String getSpinnakerExecutionId(@Nonnull String executionIdHeader) { + if (!"unknown".equalsIgnoreCase(executionIdHeader) && !executionIdHeader.isEmpty()) { + List ids = Splitter.on(':').splitToList(executionIdHeader); + return ids.get(0); + } + return "unknown"; + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientCompressionUtil.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientCompressionUtil.java similarity index 89% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientCompressionUtil.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientCompressionUtil.java index 024d0380b2d..5426a93fe06 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientCompressionUtil.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientCompressionUtil.java @@ -23,12 +23,12 @@ public class TitusClientCompressionUtil { private static String COMPRESSION_HEADER = "X-Titus-Compression"; - private static Metadata.Key COMPRESSION_KEY = Metadata.Key.of(COMPRESSION_HEADER, Metadata.ASCII_STRING_MARSHALLER); + private static Metadata.Key COMPRESSION_KEY = + Metadata.Key.of(COMPRESSION_HEADER, Metadata.ASCII_STRING_MARSHALLER); public static > STUB attachCaller(STUB serviceStub) { Metadata metadata = new Metadata(); metadata.put(COMPRESSION_KEY, "gzip"); return serviceStub.withInterceptors(MetadataUtils.newAttachHeadersInterceptor(metadata)); } - } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientObjectMapper.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientObjectMapper.java similarity index 86% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientObjectMapper.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientObjectMapper.java index 046082aff86..f20718c354d 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusClientObjectMapper.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusClientObjectMapper.java @@ -19,7 +19,6 @@ import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; - import java.util.Optional; public class TitusClientObjectMapper { @@ -34,8 +33,9 @@ public static ObjectMapper configure(ObjectMapper template) { public static ObjectMapper configure(Optional objectMapper) { return objectMapper - .map(ObjectMapper::copy).orElse(new ObjectMapper()) - .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) - .disable(SerializationFeature.FAIL_ON_EMPTY_BEANS); + .map(ObjectMapper::copy) + .orElse(new ObjectMapper()) + .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) + .disable(SerializationFeature.FAIL_ON_EMPTY_BEANS); } } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusFaultDomain.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusFaultDomain.java similarity index 99% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusFaultDomain.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusFaultDomain.java index 5cf2ee078c4..7128e7e2843 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusFaultDomain.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusFaultDomain.java @@ -38,7 +38,6 @@ public boolean equals(Object o) { TitusFaultDomain that = (TitusFaultDomain) o; return name.equals(that.name); - } @Override diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusJobCustomizer.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusJobCustomizer.java similarity index 100% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusJobCustomizer.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusJobCustomizer.java diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusLoadBalancerClient.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusLoadBalancerClient.java similarity index 93% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusLoadBalancerClient.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusLoadBalancerClient.java index 079f2c38fd3..a2852d17c58 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/TitusLoadBalancerClient.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusLoadBalancerClient.java @@ -16,9 +16,7 @@ package com.netflix.spinnaker.clouddriver.titus.client; - import com.netflix.titus.grpc.protogen.LoadBalancerId; - import java.util.List; import java.util.Map; @@ -30,9 +28,6 @@ public interface TitusLoadBalancerClient { void removeLoadBalancer(String jobId, String loadBalancerId); - /** - * @return a map of jobId to list of loadbalancerIds - */ + /** @return a map of jobId to list of loadbalancerIds */ Map> getAllLoadBalancers(); - } diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusRegion.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusRegion.java new file mode 100644 index 00000000000..bc42da55929 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/TitusRegion.java @@ -0,0 +1,160 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class TitusRegion { + private String name; + private String account; + private String endpoint; + private List faultDomains; + private String applicationName; + private String url; + private int port; + private List featureFlags; + private String eurekaName; + private String eurekaRegion; + + private T notNull(T val, String name) { + if (val == null) { + throw new NullPointerException(name); + } + return val; + } + + public TitusRegion() {} + + public TitusRegion( + String name, + String account, + String endpoint, + List faultDomains, + String applicationName, + String url, + Integer port, + List featureFlags, + String eurekaName, + String eurekaRegion) { + this.name = notNull(name, "name"); + this.account = notNull(account, "account"); + this.endpoint = EndpointValidator.validateEndpoint(endpoint); + this.faultDomains = + faultDomains == null ? Collections.emptyList() : Collections.unmodifiableList(faultDomains); + this.applicationName = applicationName; + this.url = url; + if (port != null) { + this.port = port; + } else { + this.port = 7104; + } + if (featureFlags == null) { + this.featureFlags = new ArrayList<>(); + } else { + this.featureFlags = featureFlags; + } + this.eurekaRegion = eurekaRegion; + this.eurekaName = eurekaName; + } + + public TitusRegion( + String name, + String account, + String endpoint, + String applicationName, + String url, + Integer port, + List featureFlags, + String eurekaName, + String eurekaRegion) { + this( + name, + account, + endpoint, + Collections.emptyList(), + applicationName, + url, + port, + featureFlags, + eurekaName, + eurekaRegion); + } + + public String getAccount() { + return account; + } + + public String getName() { + return name; + } + + public String getEndpoint() { + return endpoint; + } + + public List getFaultDomains() { + return faultDomains; + } + + public String getApplicationName() { + return applicationName; + } + + public Integer getPort() { + return port; + } + + public String getUrl() { + return url; + } + + public List getFeatureFlags() { + return featureFlags; + } + + public String getEurekaName() { + return eurekaName; + } + + public String getEurekaRegion() { + return eurekaRegion; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TitusRegion that = (TitusRegion) o; + + if (!name.equals(that.name)) return false; + if (!account.equals(that.account)) return false; + if (!endpoint.equals(that.endpoint)) return false; + return faultDomains.equals(that.faultDomains); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + account.hashCode(); + result = 31 * result + endpoint.hashCode(); + result = 31 * result + faultDomains.hashCode(); + return result; + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/AbstractJobRequest.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/AbstractJobRequest.java similarity index 100% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/AbstractJobRequest.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/AbstractJobRequest.java diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/ActivateJobRequest.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ActivateJobRequest.java similarity index 99% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/ActivateJobRequest.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ActivateJobRequest.java index 0133587c511..504a885f324 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/ActivateJobRequest.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ActivateJobRequest.java @@ -28,5 +28,4 @@ public ActivateJobRequest withInService(boolean inService) { this.inService = inService; return this; } - } diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/DisruptionBudget.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/DisruptionBudget.java new file mode 100644 index 00000000000..a4e2f5852b9 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/DisruptionBudget.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.*; +import java.io.Serializable; +import java.util.List; +import lombok.Data; + +@Data +public class DisruptionBudget implements Serializable { + AvailabilityPercentageLimit availabilityPercentageLimit; + UnhealthyTasksLimit unhealthyTasksLimit; + RelocationLimit relocationLimit; + RatePercentagePerHour ratePercentagePerHour; + + boolean rateUnlimited; + List timeWindows; + List containerHealthProviders; + SelfManaged selfManaged; + RatePerInterval ratePerInterval; + RatePercentagePerInterval ratePercentagePerInterval; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/DisruptionBudgetHelper.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/DisruptionBudgetHelper.java new file mode 100644 index 00000000000..b5da0dbff4b --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/DisruptionBudgetHelper.java @@ -0,0 +1,132 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import com.netflix.titus.grpc.protogen.ContainerHealthProvider; +import com.netflix.titus.grpc.protogen.Day; +import com.netflix.titus.grpc.protogen.JobDisruptionBudget; +import com.netflix.titus.grpc.protogen.TimeWindow; + +public class DisruptionBudgetHelper { + + public static JobDisruptionBudget convertJobDisruptionBudget(DisruptionBudget budget) { + JobDisruptionBudget.Builder builder = JobDisruptionBudget.newBuilder(); + if (budget.getAvailabilityPercentageLimit() != null) { + builder.setAvailabilityPercentageLimit( + JobDisruptionBudget.AvailabilityPercentageLimit.newBuilder() + .setPercentageOfHealthyContainers( + budget.availabilityPercentageLimit.getPercentageOfHealthyContainers()) + .build()); + } + if (budget.getContainerHealthProviders() != null + && !budget.getContainerHealthProviders().isEmpty()) { + budget + .getContainerHealthProviders() + .forEach( + chp -> + builder.addContainerHealthProviders( + ContainerHealthProvider.newBuilder().setName(chp.getName()).build())); + } + + if (budget.getSelfManaged() != null) { + builder.setSelfManaged( + JobDisruptionBudget.SelfManaged.newBuilder() + .setRelocationTimeMs(budget.getSelfManaged().getRelocationTimeMs()) + .build()); + } + + if (budget.getRatePercentagePerHour() != null) { + builder.setRatePercentagePerHour( + JobDisruptionBudget.RatePercentagePerHour.newBuilder() + .setMaxPercentageOfContainersRelocatedInHour( + budget.getRatePercentagePerHour().getMaxPercentageOfContainersRelocatedInHour()) + .build()); + } + + if (budget.getRatePerInterval() != null) { + builder.setRatePerInterval( + JobDisruptionBudget.RatePerInterval.newBuilder() + .setIntervalMs(budget.getRatePerInterval().getIntervalMs()) + .setLimitPerInterval(budget.getRatePerInterval().getLimitPerInterval()) + .build()); + } + + if (budget.getRatePercentagePerInterval() != null) { + builder.setRatePercentagePerInterval( + JobDisruptionBudget.RatePercentagePerInterval.newBuilder() + .setIntervalMs(budget.getRatePercentagePerInterval().getIntervalMs()) + .setPercentageLimitPerInterval( + budget.getRatePercentagePerInterval().getPercentageLimitPerInterval()) + .build()); + } + + if (budget.getRelocationLimit() != null) { + builder.setRelocationLimit( + JobDisruptionBudget.RelocationLimit.newBuilder() + .setLimit(budget.getRelocationLimit().getLimit())); + } + + if (budget.getTimeWindows() != null && !budget.getTimeWindows().isEmpty()) { + budget + .getTimeWindows() + .forEach( + tw -> { + TimeWindow.Builder timeWindowBuilder = TimeWindow.newBuilder(); + tw.getDays().forEach(day -> timeWindowBuilder.addDays(convertDay(day))); + tw.getHourlyTimeWindows() + .forEach( + htw -> { + timeWindowBuilder.addHourlyTimeWindows( + TimeWindow.HourlyTimeWindow.newBuilder() + .setEndHour(htw.getEndHour()) + .setStartHour(htw.getStartHour()) + .build()); + }); + timeWindowBuilder.setTimeZone(tw.getTimeZone()); + builder.addTimeWindows(timeWindowBuilder.build()); + }); + } + + if (budget.getUnhealthyTasksLimit() != null) { + builder.setUnhealthyTasksLimit( + JobDisruptionBudget.UnhealthyTasksLimit.newBuilder() + .setLimitOfUnhealthyContainers( + budget.getUnhealthyTasksLimit().getLimitOfUnhealthyContainers()) + .build()); + } + + return builder.build(); + } + + private static Day convertDay(String day) { + switch (day) { + case "Monday": + return Day.Monday; + case "Tuesday": + return Day.Tuesday; + case "Wednesday": + return Day.Wednesday; + case "Thursday": + return Day.Thursday; + case "Friday": + return Day.Friday; + case "Saturday": + return Day.Saturday; + default: + return Day.Sunday; + } + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Efs.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Efs.java similarity index 99% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Efs.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Efs.java index e0694a15a9d..50b7cf6f773 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/Efs.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Efs.java @@ -53,5 +53,4 @@ public String getEfsRelativeMountPoint() { public void setEfsRelativeMountPoint(String efsRelativeMountPoint) { this.efsRelativeMountPoint = efsRelativeMountPoint; } - } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/GrpcChannelFactory.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/GrpcChannelFactory.java similarity index 85% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/GrpcChannelFactory.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/GrpcChannelFactory.java index 7ec17be0495..ab01107720a 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/GrpcChannelFactory.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/GrpcChannelFactory.java @@ -22,10 +22,9 @@ public interface GrpcChannelFactory { public ManagedChannel build( - TitusRegion titusRegion, - String environment, - String eurekaName, - long defaultConnectTimeOut, - Registry registry - ); + TitusRegion titusRegion, + String environment, + String eurekaName, + long defaultConnectTimeOut, + Registry registry); } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/HealthStatus.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/HealthStatus.java similarity index 96% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/HealthStatus.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/HealthStatus.java index 217a6c11b26..68b79f84447 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/HealthStatus.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/HealthStatus.java @@ -17,5 +17,6 @@ package com.netflix.spinnaker.clouddriver.titus.client.model; public enum HealthStatus { - HEALTHY, UNHEALTHY + HEALTHY, + UNHEALTHY } diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Job.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Job.java new file mode 100644 index 00000000000..1a8d3bb3b68 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Job.java @@ -0,0 +1,668 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import static com.netflix.titus.grpc.protogen.JobDisruptionBudget.PolicyCase.*; +import static com.netflix.titus.grpc.protogen.JobDisruptionBudget.RateCase.*; + +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.*; +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.ContainerHealthProvider; +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.TimeWindow; +import com.netflix.titus.grpc.protogen.*; +import java.util.*; +import java.util.stream.Collectors; + +public class Job { + + private String id; + private String name; + private String type; + private List tags; + private String applicationName; + private String digest; + private String appName; + private String user; + private String version; + private String entryPoint; + private String cmd; + private String iamProfile; + private String capacityGroup; + private Boolean inService; + private int instances; + private int instancesMin; + private int instancesMax; + private int instancesDesired; + private int cpu; + private int memory; + private int disk; + private int gpu; + private int networkMbps; + private int[] ports; + private Map environment; + private Map containerAttributes; + private int retries; + private int runtimeLimitSecs; + private boolean allocateIpAddress; + private Date submittedAt; + private List tasks; + private Map labels; + private List securityGroups; + private String jobGroupStack; + private String jobGroupDetail; + private String jobGroupSequence; + private List hardConstraints; + private List softConstraints; + private Efs efs; + private MigrationPolicy migrationPolicy; + private DisruptionBudget disruptionBudget; + private String jobState; + private ServiceJobProcesses serviceJobProcesses; + + private SubmitJobRequest.Constraints constraints; + + private List signedAddressAllocations = new ArrayList<>(); + + public Job() {} + + public Job( + com.netflix.titus.grpc.protogen.Job grpcJob, + List grpcTasks) { + id = grpcJob.getId(); + + if (grpcJob.getJobDescriptor().getJobSpecCase().getNumber() + == JobDescriptor.BATCH_FIELD_NUMBER) { + type = "batch"; + BatchJobSpec batchJobSpec = grpcJob.getJobDescriptor().getBatch(); + instancesMin = batchJobSpec.getSize(); + instancesMax = batchJobSpec.getSize(); + instancesDesired = batchJobSpec.getSize(); + instances = batchJobSpec.getSize(); + runtimeLimitSecs = (int) batchJobSpec.getRuntimeLimitSec(); + retries = batchJobSpec.getRetryPolicy().getImmediate().getRetries(); + } + + if (grpcJob.getJobDescriptor().getJobSpecCase().getNumber() + == JobDescriptor.SERVICE_FIELD_NUMBER) { + type = "service"; + ServiceJobSpec serviceSpec = grpcJob.getJobDescriptor().getService(); + inService = serviceSpec.getEnabled(); + instances = serviceSpec.getCapacity().getDesired(); + instancesMin = serviceSpec.getCapacity().getMin(); + instancesMax = serviceSpec.getCapacity().getMax(); + instancesDesired = serviceSpec.getCapacity().getDesired(); + migrationPolicy = new MigrationPolicy(); + com.netflix.titus.grpc.protogen.MigrationPolicy policy = serviceSpec.getMigrationPolicy(); + if (policy + .getPolicyCase() + .equals(com.netflix.titus.grpc.protogen.MigrationPolicy.PolicyCase.SELFMANAGED)) { + migrationPolicy.setType("selfManaged"); + } else { + migrationPolicy.setType("systemDefault"); + } + } + + addDisruptionBudget(grpcJob); + labels = grpcJob.getJobDescriptor().getAttributesMap(); + containerAttributes = grpcJob.getJobDescriptor().getContainer().getAttributesMap(); + user = grpcJob.getJobDescriptor().getOwner().getTeamEmail(); + + if (grpcTasks != null) { + tasks = grpcTasks.stream().map(grpcTask -> new Task(grpcTask)).collect(Collectors.toList()); + } else { + tasks = new ArrayList<>(); + } + + appName = grpcJob.getJobDescriptor().getApplicationName(); + name = grpcJob.getJobDescriptor().getAttributesOrDefault("name", appName); + applicationName = grpcJob.getJobDescriptor().getContainer().getImage().getName(); + version = grpcJob.getJobDescriptor().getContainer().getImage().getTag(); + digest = grpcJob.getJobDescriptor().getContainer().getImage().getDigest(); + entryPoint = + grpcJob.getJobDescriptor().getContainer().getEntryPointList().stream() + .collect(Collectors.joining(" ")); + cmd = + grpcJob.getJobDescriptor().getContainer().getCommandList().stream() + .collect(Collectors.joining(" ")); + capacityGroup = grpcJob.getJobDescriptor().getCapacityGroup(); + cpu = (int) grpcJob.getJobDescriptor().getContainer().getResources().getCpu(); + memory = grpcJob.getJobDescriptor().getContainer().getResources().getMemoryMB(); + gpu = grpcJob.getJobDescriptor().getContainer().getResources().getGpu(); + networkMbps = grpcJob.getJobDescriptor().getContainer().getResources().getNetworkMbps(); + disk = grpcJob.getJobDescriptor().getContainer().getResources().getDiskMB(); + jobGroupSequence = grpcJob.getJobDescriptor().getJobGroupInfo().getSequence(); + jobGroupStack = grpcJob.getJobDescriptor().getJobGroupInfo().getStack(); + jobGroupDetail = grpcJob.getJobDescriptor().getJobGroupInfo().getDetail(); + environment = grpcJob.getJobDescriptor().getContainer().getEnvMap(); + securityGroups = + grpcJob + .getJobDescriptor() + .getContainer() + .getSecurityProfile() + .getSecurityGroupsList() + .stream() + .collect(Collectors.toList()); + iamProfile = grpcJob.getJobDescriptor().getContainer().getSecurityProfile().getIamRole(); + allocateIpAddress = true; + submittedAt = new Date(grpcJob.getStatus().getTimestamp()); + constraints = new SubmitJobRequest.Constraints(); + Map hardConstraintsMap = + grpcJob.getJobDescriptor().getContainer().getHardConstraints().getConstraintsMap(); + if (hardConstraintsMap != null) { + constraints.setHard(hardConstraintsMap); + } + Map softConstraintsMap = + grpcJob.getJobDescriptor().getContainer().getSoftConstraints().getConstraintsMap(); + if (softConstraintsMap != null) { + constraints.setSoft(softConstraintsMap); + } + softConstraints = new ArrayList(); + softConstraints.addAll( + grpcJob + .getJobDescriptor() + .getContainer() + .getSoftConstraints() + .getConstraintsMap() + .keySet()); + hardConstraints = new ArrayList(); + hardConstraints.addAll( + grpcJob + .getJobDescriptor() + .getContainer() + .getHardConstraints() + .getConstraintsMap() + .keySet()); + + jobState = grpcJob.getStatus().getState().toString(); + + if (grpcJob.getJobDescriptor().getContainer().getResources().getEfsMountsCount() > 0) { + efs = new Efs(); + ContainerResources.EfsMount firstMount = + grpcJob.getJobDescriptor().getContainer().getResources().getEfsMounts(0); + efs.setEfsId(firstMount.getEfsId()); + efs.setMountPerm(firstMount.getMountPerm().toString()); + efs.setMountPoint(firstMount.getMountPoint()); + if (firstMount.getEfsRelativeMountPoint() != null) { + efs.setEfsRelativeMountPoint(firstMount.getEfsRelativeMountPoint()); + } + } + + if (grpcJob.getJobDescriptor().getService().getServiceJobProcesses() != null) { + serviceJobProcesses = new ServiceJobProcesses(); + serviceJobProcesses.setDisableDecreaseDesired( + grpcJob + .getJobDescriptor() + .getService() + .getServiceJobProcesses() + .getDisableDecreaseDesired()); + serviceJobProcesses.setDisableIncreaseDesired( + grpcJob + .getJobDescriptor() + .getService() + .getServiceJobProcesses() + .getDisableIncreaseDesired()); + } + addSignedAllocationList(grpcJob); + } + + // Add SignedAddressAllocationsList from grpc to Job.signedAddressAllocations + private void addSignedAllocationList(com.netflix.titus.grpc.protogen.Job grpcJob) { + grpcJob + .getJobDescriptor() + .getContainer() + .getResources() + .getSignedAddressAllocationsList() + .stream() + .map(this::addSignedAddressAllocations) + .forEach(signedAddressAllocation -> signedAddressAllocations.add(signedAddressAllocation)); + } + + private void addDisruptionBudget(com.netflix.titus.grpc.protogen.Job grpcJob) { + JobDisruptionBudget budget = grpcJob.getJobDescriptor().getDisruptionBudget(); + disruptionBudget = new DisruptionBudget(); + if (budget.getContainerHealthProvidersList() != null) { + disruptionBudget.setContainerHealthProviders( + budget.getContainerHealthProvidersList().stream() + .map(c -> new ContainerHealthProvider(c.getName())) + .collect(Collectors.toList())); + } + if (RATEUNLIMITED.equals(budget.getRateCase())) { + disruptionBudget.setRateUnlimited(true); + } + if (RATEPERCENTAGEPERHOUR.equals(budget.getRateCase())) { + disruptionBudget.setRatePercentagePerHour( + new RatePercentagePerHour( + budget.getRatePercentagePerHour().getMaxPercentageOfContainersRelocatedInHour())); + } + if (RATEPERINTERVAL.equals(budget.getRateCase())) { + disruptionBudget.setRatePerInterval( + new RatePerInterval( + budget.getRatePerInterval().getIntervalMs(), + budget.getRatePerInterval().getLimitPerInterval())); + } + if (RATEPERCENTAGEPERINTERVAL.equals(budget.getRateCase())) { + disruptionBudget.setRatePercentagePerInterval( + new RatePercentagePerInterval( + budget.getRatePercentagePerInterval().getIntervalMs(), + budget.getRatePercentagePerInterval().getPercentageLimitPerInterval())); + } + + if (SELFMANAGED.equals(budget.getPolicyCase())) { + disruptionBudget.setSelfManaged( + new SelfManaged(budget.getSelfManaged().getRelocationTimeMs())); + } + if (AVAILABILITYPERCENTAGELIMIT.equals(budget.getPolicyCase())) { + disruptionBudget.setAvailabilityPercentageLimit( + new AvailabilityPercentageLimit( + budget.getAvailabilityPercentageLimit().getPercentageOfHealthyContainers())); + } + if (UNHEALTHYTASKSLIMIT.equals(budget.getPolicyCase())) { + disruptionBudget.setUnhealthyTasksLimit( + new UnhealthyTasksLimit(budget.getUnhealthyTasksLimit().getLimitOfUnhealthyContainers())); + } + if (RELOCATIONLIMIT.equals(budget.getPolicyCase())) { + disruptionBudget.setRelocationLimit( + new RelocationLimit(budget.getRelocationLimit().getLimit())); + } + if (budget.getTimeWindowsList() != null) { + disruptionBudget.setTimeWindows( + budget.getTimeWindowsList().stream() + .map( + w -> + new TimeWindow( + w.getDaysList().stream().map(Enum::name).collect(Collectors.toList()), + w.getHourlyTimeWindowsList().stream() + .map(t -> new HourlyTimeWindow(t.getStartHour(), t.getEndHour())) + .collect(Collectors.toList()), + w.getTimeZone())) + .collect(Collectors.toList())); + } + } + + // Construct the titus internal model SignedAddressAllocations + private SignedAddressAllocations addSignedAddressAllocations( + SignedAddressAllocation signedAddressAllocation) { + SignedAddressAllocations grpcSignedAddressAllocations = new SignedAddressAllocations(); + + SignedAddressAllocations.AddressLocation grpcAddressLocation = + new SignedAddressAllocations.AddressLocation(); + grpcAddressLocation.setAvailabilityZone( + signedAddressAllocation.getAddressAllocation().getAddressLocation().getAvailabilityZone()); + grpcAddressLocation.setRegion( + signedAddressAllocation.getAddressAllocation().getAddressLocation().getRegion()); + grpcAddressLocation.setSubnetId( + signedAddressAllocation.getAddressAllocation().getAddressLocation().getSubnetId()); + + SignedAddressAllocations.AddressAllocation grpcAddressAllocation = + new SignedAddressAllocations.AddressAllocation(); + grpcAddressAllocation.setAddress(signedAddressAllocation.getAddressAllocation().getAddress()); + grpcAddressAllocation.setUuid(signedAddressAllocation.getAddressAllocation().getUuid()); + grpcAddressAllocation.setAddressLocation(grpcAddressLocation); + + grpcSignedAddressAllocations.setAddressAllocation(grpcAddressAllocation); + grpcSignedAddressAllocations.setAuthoritativePublicKey( + signedAddressAllocation.getAuthoritativePublicKey().toStringUtf8()); + grpcSignedAddressAllocations.setHostPublicKey( + signedAddressAllocation.getHostPublicKey().toStringUtf8()); + grpcSignedAddressAllocations.setHostPublicKeySignature( + signedAddressAllocation.getHostPublicKeySignature().toStringUtf8()); + grpcSignedAddressAllocations.setMessage(signedAddressAllocation.getMessage().toStringUtf8()); + grpcSignedAddressAllocations.setMessageSignature( + signedAddressAllocation.getMessageSignature().toStringUtf8()); + + return grpcSignedAddressAllocations; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getApplicationName() { + return applicationName; + } + + public void setApplicationName(String applicationName) { + this.applicationName = applicationName; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + public String getEntryPoint() { + return entryPoint; + } + + public void setEntryPoint(String entryPoint) { + this.entryPoint = entryPoint; + } + + public String getCmd() { + return cmd; + } + + public void setCmd(String cmd) { + this.cmd = cmd; + } + + public int getInstances() { + return instances; + } + + public void setInstances(int instances) { + this.instances = instances; + } + + public int getInstancesMin() { + return instancesMin; + } + + public void setInstancesMin(int instancesMin) { + this.instancesMin = instancesMin; + } + + public int getInstancesMax() { + return instancesMax; + } + + public void setInstancesMax(int instancesMax) { + this.instancesMax = instancesMax; + } + + public int getInstancesDesired() { + return instancesDesired; + } + + public void setInstancesDesired(int instancesDesired) { + this.instancesDesired = instancesDesired; + } + + public int getCpu() { + return cpu; + } + + public void setCpu(int cpu) { + this.cpu = cpu; + } + + public int getMemory() { + return memory; + } + + public void setMemory(int memory) { + this.memory = memory; + } + + public int getDisk() { + return disk; + } + + public void setDisk(int disk) { + this.disk = disk; + } + + public void setGpu(int gpu) { + this.gpu = gpu; + } + + public int getGpu() { + return gpu; + } + + public int[] getPorts() { + return ports; + } + + public void setPorts(int[] ports) { + this.ports = ports; + } + + public Map getEnvironment() { + return environment; + } + + public void setEnvironment(Map environment) { + this.environment = environment; + } + + public int getRetries() { + return retries; + } + + public void setRetries(int retries) { + this.retries = retries; + } + + public int getRuntimeLimitSecs() { + return runtimeLimitSecs; + } + + public void setRuntimeLimitSecs(int runtimeLimitSecs) { + this.runtimeLimitSecs = runtimeLimitSecs; + } + + public boolean isAllocateIpAddress() { + return allocateIpAddress; + } + + public void setAllocateIpAddress(boolean allocateIpAddress) { + this.allocateIpAddress = allocateIpAddress; + } + + public Date getSubmittedAt() { + return submittedAt; + } + + public void setSubmittedAt(Date submittedAt) { + this.submittedAt = submittedAt; + } + + public List getTasks() { + return tasks; + } + + public void setTasks(List tasks) { + this.tasks = tasks; + } + + public String getIamProfile() { + return iamProfile; + } + + public void setIamProfile(String iamProfile) { + this.iamProfile = iamProfile; + } + + public String getCapacityGroup() { + return capacityGroup; + } + + public void setCapacityGroup(String capacityGroup) { + this.capacityGroup = capacityGroup; + } + + public Boolean isInService() { + return inService; + } + + public void setInService(Boolean inService) { + this.inService = inService; + } + + public List getSecurityGroups() { + return securityGroups; + } + + public void setSecurityGroups(List securityGroups) { + this.securityGroups = securityGroups; + } + + public Map getLabels() { + return labels; + } + + public void setLabels(Map labels) { + this.labels = labels; + } + + public Map getContainerAttributes() { + return containerAttributes; + } + + public void setContainerAttributes(Map containerAttributes) { + this.containerAttributes = containerAttributes; + } + + public String getJobGroupStack() { + return jobGroupStack; + } + + public void setJobGroupStack(String jobGroupStack) { + this.jobGroupStack = jobGroupStack; + } + + public String getJobGroupDetail() { + return jobGroupDetail; + } + + public void setJobGroupDetail(String jobGroupDetail) { + this.jobGroupDetail = jobGroupDetail; + } + + public String getJobGroupSequence() { + return jobGroupSequence; + } + + public void setJobGroupSequence(String jobGroupSequence) { + this.jobGroupSequence = jobGroupSequence; + } + + public String getAppName() { + return appName; + } + + public void setAppName(String appName) { + this.appName = appName; + } + + public List getHardConstraints() { + return hardConstraints; + } + + public void setHardConstraints(List hardConstraints) { + this.hardConstraints = hardConstraints; + } + + public List getSoftConstraints() { + return softConstraints; + } + + public void setSoftConstraints(List softConstraints) { + this.softConstraints = softConstraints; + } + + public int getNetworkMbps() { + return networkMbps; + } + + public void setNetworkMbps(int networkMbps) { + this.networkMbps = networkMbps; + } + + public Efs getEfs() { + return efs; + } + + public void setEfs(Efs efs) { + this.efs = efs; + } + + public MigrationPolicy getMigrationPolicy() { + return migrationPolicy; + } + + public List getSignedAddressAllocations() { + return signedAddressAllocations; + } + + public void setSignedAddressAllocations(List signedAddressAllocations) { + this.signedAddressAllocations = signedAddressAllocations; + } + + public void setMigrationPolicy(MigrationPolicy migrationPolicy) { + this.migrationPolicy = migrationPolicy; + } + + public String getJobState() { + return jobState; + } + + public void setDigest(String digest) { + this.digest = digest; + } + + public String getDigest() { + return digest; + } + + public DisruptionBudget getDisruptionBudget() { + return disruptionBudget; + } + + public void setDisruptionBudget(DisruptionBudget disruptionBudget) { + this.disruptionBudget = disruptionBudget; + } + + public ServiceJobProcesses getServiceJobProcesses() { + return serviceJobProcesses; + } + + public void setServiceJobProcesses(ServiceJobProcesses serviceJobProcesses) { + this.serviceJobProcesses = serviceJobProcesses; + } + + public SubmitJobRequest.Constraints getConstraints() { + return constraints; + } + + public void setConstraints(SubmitJobRequest.Constraints constraints) { + this.constraints = constraints; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/JobDescription.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/JobDescription.java new file mode 100644 index 00000000000..f4cb54581f5 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/JobDescription.java @@ -0,0 +1,835 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.protobuf.ByteString; +import com.netflix.titus.grpc.protogen.*; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class JobDescription { + + private String name; + private String type; + private String applicationName; + private String version; + private String digest; + private int instancesDesired; + private int instancesMax; + private int instancesMin; + private int cpu; + private int memory; + private int sharedMemory; + private int disk; + private int gpu; + private int retries; + private int runtimeLimitSecs; + private int networkMbps; + private int[] ports; + private Map env; + private boolean allocateIpAddress; + + private String appName; + private String jobGroupStack; + private String jobGroupDetail; + private String jobGroupSequence; + private String user; + private List softConstraints; + private List hardConstraints; + private List securityGroups; + private Map labels; + private Map containerAttributes; + private Boolean inService; + + private String entryPoint; + private String cmd; + private String iamProfile; + private String capacityGroup; + private Efs efs; + private MigrationPolicy migrationPolicy; + private Map securityAttributes; + + private DisruptionBudget disruptionBudget; + + private SubmitJobRequest.Constraints constraints; + private ServiceJobProcesses serviceJobProcesses; + private List signedAddressAllocations; + + // Soft/Hard constraints + + JobDescription() {} + + JobDescription(SubmitJobRequest request) { + type = request.getJobType(); + name = request.getJobName(); + applicationName = request.getDockerImageName(); + version = request.getDockerImageVersion(); + digest = request.getDockerDigest(); + instancesDesired = request.getInstancesDesired(); + instancesMin = request.getInstancesMin(); + instancesMax = request.getInstancesMax(); + cpu = request.getCpu(); + memory = request.getMemory(); + sharedMemory = request.getSharedMemory(); + disk = request.getDisk(); + ports = request.getPorts(); + networkMbps = request.getNetworkMbps(); + allocateIpAddress = request.isAllocateIpAddress(); + appName = request.getApplication(); + jobGroupStack = request.getStack(); + jobGroupDetail = request.getDetail(); + softConstraints = + request.getConstraints().stream() + .filter((c) -> c.getConstraintType() == SubmitJobRequest.Constraint.ConstraintType.SOFT) + .map(SubmitJobRequest.Constraint::getConstraint) + .collect(Collectors.toList()); + hardConstraints = + request.getConstraints().stream() + .filter((c) -> c.getConstraintType() == SubmitJobRequest.Constraint.ConstraintType.HARD) + .map(SubmitJobRequest.Constraint::getConstraint) + .collect(Collectors.toList()); + user = request.getUser(); + env = request.getEnv() != null ? request.getEnv() : new HashMap<>(); + labels = request.getLabels() != null ? request.getLabels() : new HashMap<>(); + containerAttributes = + request.getContainerAttributes() != null + ? request.getContainerAttributes() + : new HashMap<>(); + entryPoint = request.getEntryPoint(); + cmd = request.getCmd(); + iamProfile = request.getIamProfile(); + capacityGroup = request.getCapacityGroup(); + securityGroups = request.getSecurityGroups(); + inService = request.getInService(); + migrationPolicy = request.getMigrationPolicy(); + efs = request.getEfs(); + gpu = request.getGpu(); + retries = request.getRetries(); + runtimeLimitSecs = request.getRuntimeLimitSecs(); + securityAttributes = new HashMap(); + + disruptionBudget = request.getDisruptionBudget(); + constraints = request.getContainerConstraints(); + serviceJobProcesses = request.getServiceJobProcesses(); + signedAddressAllocations = request.getSignedAddressAllocations(); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getApplicationName() { + return applicationName; + } + + public void setApplicationName(String applicationName) { + this.applicationName = applicationName; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + public int getInstancesMin() { + return instancesMin; + } + + public void setInstancesMin(int instances) { + this.instancesMin = instancesMin; + } + + public int getInstancesMax() { + return instancesMax; + } + + public void setInstancesMax(int instances) { + this.instancesMax = instancesMax; + } + + public int getInstancesDesired() { + return instancesDesired; + } + + public void setInstancesDesired(int instances) { + this.instancesDesired = instancesDesired; + } + + public int getCpu() { + return cpu; + } + + public void setCpu(int cpu) { + this.cpu = cpu; + } + + public int getMemory() { + return memory; + } + + public void setMemory(int memory) { + this.memory = memory; + } + + public int getDisk() { + return disk; + } + + public void setDisk(int disk) { + this.disk = disk; + } + + public void setGpu(int gpu) { + this.gpu = gpu; + } + + public int getGpu() { + return gpu; + } + + public void setRetries() { + this.retries = retries; + } + + public int getRetries() { + return retries; + } + + public int getRuntimeLimitSecs() { + return runtimeLimitSecs; + } + + public void setRuntimeLimitSecs(int runtimeLimitSecs) { + this.runtimeLimitSecs = runtimeLimitSecs; + } + + public int getNetworkMbps() { + return networkMbps; + } + + public void setEfs(Efs efs) { + this.efs = efs; + } + + public Efs getEfs() { + return efs; + } + + public void setNetworkMbps(int networkMbps) { + this.networkMbps = networkMbps; + } + + public int[] getPorts() { + return ports; + } + + public void setPorts(int[] ports) { + this.ports = ports; + } + + public boolean getAllocateIpAddress() { + return allocateIpAddress; + } + + public void setAllocateIpAddress(boolean name) { + this.allocateIpAddress = allocateIpAddress; + } + + public String getAppName() { + return appName; + } + + public void setAppName(String appName) { + this.appName = appName; + } + + public String getJobGroupStack() { + return jobGroupStack; + } + + public void setJobGroupStack(String jobGroupStack) { + this.jobGroupStack = jobGroupStack; + } + + public String getJobGroupDetail() { + return jobGroupDetail; + } + + public void setJobGroupDetail(String jobGroupDetail) { + this.jobGroupDetail = jobGroupDetail; + } + + public String getJobGroupSequence() { + return jobGroupSequence; + } + + public void setJobGroupSequence(String jobGroupSequence) { + this.jobGroupSequence = jobGroupSequence; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public List getSoftConstraints() { + return softConstraints; + } + + public void setSoftConstraints(List softConstraints) { + this.softConstraints = softConstraints; + } + + public List getHardConstraints() { + return hardConstraints; + } + + public void setHardConstraints(List hardConstraints) { + this.hardConstraints = hardConstraints; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Map getEnv() { + return env; + } + + public void setEnv(Map env) { + this.env = env; + } + + public void setContainerAttributes(Map containerAttributes) { + this.containerAttributes = containerAttributes; + } + + public void setEnvParam(String key, String value) { + if (this.env == null) { + this.env = new HashMap<>(); + } + this.env.put(key, value); + } + + public Map getLabels() { + return labels; + } + + public void setLabels(Map labels) { + this.labels = labels; + } + + public String getEntryPoint() { + return entryPoint; + } + + public void setEntryPoint(String entryPoint) { + this.entryPoint = entryPoint; + } + + public String getCmd() { + return cmd; + } + + public void setCmd(String cmd) { + this.cmd = cmd; + } + + public String getIamProfile() { + return iamProfile; + } + + public void setIamProfile(String iamProfile) { + this.iamProfile = iamProfile; + } + + public String getCapacityGroup() { + return capacityGroup; + } + + public void setCapacityGroup(String capacityGroup) { + this.capacityGroup = capacityGroup; + } + + public Boolean getInService() { + return inService; + } + + public void setInService(Boolean inService) { + this.inService = inService; + } + + public MigrationPolicy getMigrationPolicy() { + return migrationPolicy; + } + + public void setMigrationPolicy(MigrationPolicy migrationPolicy) { + this.migrationPolicy = migrationPolicy; + } + + public List getSecurityGroups() { + return securityGroups; + } + + public void setSecurityGroups(List securityGroups) { + this.securityGroups = securityGroups; + } + + public void setDigest(String digest) { + this.digest = digest; + } + + public String getDigest() { + return digest; + } + + public DisruptionBudget getDisruptionBudget() { + return disruptionBudget; + } + + public void setDisruptionBudget(DisruptionBudget disruptionBudget) { + this.disruptionBudget = disruptionBudget; + } + + public ServiceJobProcesses getServiceJobProcesses() { + return serviceJobProcesses; + } + + public void setServiceJobProcesses(ServiceJobProcesses serviceJobProcesses) { + this.serviceJobProcesses = serviceJobProcesses; + } + + public List getSignedAddressAllocations() { + return signedAddressAllocations; + } + + public void setSignedAddressAllocations(List signedAddressAllocations) { + this.signedAddressAllocations = signedAddressAllocations; + } + + @JsonIgnore + public SubmitJobRequest.Constraints getConstraints() { + return constraints; + } + + public void setConstraints(SubmitJobRequest.Constraints constraints) { + this.constraints = constraints; + } + + @JsonIgnore + public Map getSecurityAttributes() { + return securityAttributes; + } + + @JsonIgnore + public JobDescriptor getGrpcJobDescriptor() { + + // trying to keep the same order as in the proto definition + // https://stash.corp.netflix.com/projects/TN/repos/titus-api-definitions/browse/src/main/proto/netflix/titus/titus_job_api.proto + + JobDescriptor.Builder jobDescriptorBuilder = JobDescriptor.newBuilder(); + + jobDescriptorBuilder.setOwner(Owner.newBuilder().setTeamEmail(user)); + jobDescriptorBuilder.setApplicationName(appName); + + if (!labels.isEmpty()) { + jobDescriptorBuilder.putAllAttributes(labels); + } + + Container.Builder containerBuilder = Container.newBuilder(); + ContainerResources.Builder containerResources = + ContainerResources.newBuilder().setAllocateIP(true); + + if (cpu != 0) { + containerResources.setCpu(cpu); + } + + if (gpu != 0) { + containerResources.setGpu(gpu); + } + + if (networkMbps != 0) { + containerResources.setNetworkMbps(networkMbps); + } + + if (memory != 0) { + containerResources.setMemoryMB(memory); + } + + if (sharedMemory != 0) { + containerResources.setShmSizeMB(sharedMemory); + } + + if (disk != 0) { + containerResources.setDiskMB(disk); + } + + if (signedAddressAllocations != null && !signedAddressAllocations.isEmpty()) { + signedAddressAllocations.forEach( + signedAddressAllocation -> { + SignedAddressAllocation.Builder builder = + convertSignedAddressAllocations(signedAddressAllocation); + containerResources.addSignedAddressAllocations(builder); + }); + } + + if (efs != null && efs.getEfsId() != null) { + ContainerResources.EfsMount.Builder efsBuilder = ContainerResources.EfsMount.newBuilder(); + efsBuilder.setEfsId(efs.getEfsId()); + efsBuilder.setMountPoint(efs.getMountPoint()); + efsBuilder.setMountPerm(convertMountPerm(efs.getMountPerm())); + if (efs.getEfsRelativeMountPoint() != null) { + efsBuilder.setEfsRelativeMountPoint(efs.getEfsRelativeMountPoint()); + } + containerResources.addEfsMounts(efsBuilder); + } + + containerBuilder.setResources(containerResources); + + SecurityProfile.Builder securityProfile = SecurityProfile.newBuilder(); + + if (securityGroups != null && !securityGroups.isEmpty()) { + securityGroups.forEach( + sg -> { + securityProfile.addSecurityGroups(sg); + }); + } + + if (iamProfile != null) { + securityProfile.setIamRole(iamProfile); + } + + if (!securityAttributes.isEmpty()) { + securityProfile.putAllAttributes(securityAttributes); + } + + containerBuilder.setSecurityProfile(securityProfile); + + Image.Builder imageBuilder = Image.newBuilder(); + imageBuilder.setName(applicationName); + if (digest != null) { + imageBuilder.setDigest(digest); + } else { + imageBuilder.setTag(version); + } + + containerBuilder.setImage(imageBuilder); + + if (entryPoint != null) { + containerBuilder.addEntryPoint(entryPoint); + } + + if (cmd != null && !cmd.isEmpty()) { + containerBuilder.addCommand(cmd); + } + + if (!containerAttributes.isEmpty()) { + containerBuilder.putAllAttributes(containerAttributes); + } + + if (!env.isEmpty()) { + containerBuilder.putAllEnv(env); + } + + if (constraints != null) { + Constraints.Builder constraintsBuilder = Constraints.newBuilder(); + containerBuilder.setHardConstraints( + constraintsBuilder.putAllConstraints(constraints.getHard())); + constraintsBuilder = Constraints.newBuilder(); + containerBuilder.setSoftConstraints( + constraintsBuilder.putAllConstraints(constraints.getSoft())); + } else { + if (!softConstraints.isEmpty()) { + containerBuilder.setSoftConstraints(constraintTransformer(softConstraints)); + } + + if (!hardConstraints.isEmpty()) { + containerBuilder.setHardConstraints(constraintTransformer(hardConstraints)); + } + } + + jobDescriptorBuilder.setContainer(containerBuilder); + + Capacity.Builder jobCapacity = Capacity.newBuilder(); + jobCapacity.setMin(instancesMin).setMax(instancesMax).setDesired(instancesDesired); + + JobGroupInfo.Builder jobGroupInfoBuilder = JobGroupInfo.newBuilder(); + if (jobGroupStack != null) { + jobGroupInfoBuilder.setStack(jobGroupStack); + } + if (jobGroupDetail != null) { + jobGroupInfoBuilder.setDetail(jobGroupDetail); + } + if (jobGroupSequence != null) { + jobGroupInfoBuilder.setSequence(jobGroupSequence); + } + jobDescriptorBuilder.setJobGroupInfo(jobGroupInfoBuilder); + + if (type.equals("service")) { + if (inService == null) { + inService = true; + } + + com.netflix.titus.grpc.protogen.MigrationPolicy serviceMigrationPolicy; + + if (migrationPolicy != null && migrationPolicy.getType().equals("selfManaged")) { + serviceMigrationPolicy = + com.netflix.titus.grpc.protogen.MigrationPolicy.newBuilder() + .setSelfManaged( + com.netflix.titus.grpc.protogen.MigrationPolicy.SelfManaged.newBuilder() + .build()) + .build(); + } else { + serviceMigrationPolicy = + com.netflix.titus.grpc.protogen.MigrationPolicy.newBuilder() + .setSystemDefault( + com.netflix.titus.grpc.protogen.MigrationPolicy.SystemDefault.newBuilder() + .build()) + .build(); + } + com.netflix.titus.grpc.protogen.ServiceJobSpec.ServiceJobProcesses.Builder + titusServiceJobProcesses = ServiceJobSpec.ServiceJobProcesses.newBuilder(); + if (serviceJobProcesses != null) { + titusServiceJobProcesses + .setDisableDecreaseDesired(serviceJobProcesses.isDisableDecreaseDesired()) + .setDisableIncreaseDesired(serviceJobProcesses.isDisableIncreaseDesired()) + .build(); + } + jobDescriptorBuilder.setService( + ServiceJobSpec.newBuilder() + .setEnabled(inService) + .setCapacity(jobCapacity) + .setMigrationPolicy(serviceMigrationPolicy) + .setServiceJobProcesses(titusServiceJobProcesses) + .setRetryPolicy( + RetryPolicy.newBuilder() + .setExponentialBackOff( + RetryPolicy.ExponentialBackOff.newBuilder() + .setInitialDelayMs(5000) + .setMaxDelayIntervalMs(300000)))); + } + + if (type.equals("batch")) { + BatchJobSpec.Builder batchJobSpec = BatchJobSpec.newBuilder(); + batchJobSpec.setSize(instancesDesired); + if (runtimeLimitSecs != 0) { + batchJobSpec.setRuntimeLimitSec(runtimeLimitSecs); + } + batchJobSpec.setRetryPolicy( + RetryPolicy.newBuilder() + .setImmediate(RetryPolicy.Immediate.newBuilder().setRetries(retries))); + jobDescriptorBuilder.setBatch(batchJobSpec); + } + + if (capacityGroup == null || capacityGroup.isEmpty()) { + jobDescriptorBuilder.setCapacityGroup(jobDescriptorBuilder.getApplicationName()); + } else { + jobDescriptorBuilder.setCapacityGroup(capacityGroup); + } + + if (disruptionBudget != null) { + JobDisruptionBudget budget = convertJobDisruptionBudget(disruptionBudget); + if (budget != null) { + jobDescriptorBuilder.setDisruptionBudget(budget); + } + } + + return jobDescriptorBuilder.build(); + } + + // Returns builder for Protobuf type com.netflix.titus.SignedAddressAllocation + private SignedAddressAllocation.Builder convertSignedAddressAllocations( + SignedAddressAllocations signedAddressAllocation) { + + SignedAddressAllocations.AddressLocation addressLocation = + signedAddressAllocation.getAddressAllocation().getAddressLocation(); + + AddressLocation.Builder addressLocationBuilder = AddressLocation.newBuilder(); + addressLocationBuilder.setAvailabilityZone(addressLocation.getAvailabilityZone()); + addressLocationBuilder.setRegion(addressLocation.getRegion()); + addressLocationBuilder.setSubnetId(addressLocation.getSubnetId()); + addressLocationBuilder.build(); + + AddressAllocation.Builder addressAllocationBuilder = + AddressAllocation.newBuilder().setAddressLocation(addressLocationBuilder); + addressAllocationBuilder.setUuid(signedAddressAllocation.getAddressAllocation().getUuid()); + addressAllocationBuilder.setAddress( + signedAddressAllocation.getAddressAllocation().getAddress()); + addressAllocationBuilder.build(); + + SignedAddressAllocation.Builder signedAddressAllocationBuilder = + SignedAddressAllocation.newBuilder().setAddressAllocation(addressAllocationBuilder); + signedAddressAllocationBuilder.setAuthoritativePublicKey( + ByteString.copyFromUtf8(signedAddressAllocation.getAuthoritativePublicKey())); + signedAddressAllocationBuilder.setHostPublicKey( + ByteString.copyFromUtf8(signedAddressAllocation.getHostPublicKey())); + signedAddressAllocationBuilder.setHostPublicKeySignature( + ByteString.copyFromUtf8(signedAddressAllocation.getHostPublicKeySignature())); + signedAddressAllocationBuilder.setMessage( + ByteString.copyFromUtf8(signedAddressAllocation.getMessage())); + signedAddressAllocationBuilder.setMessageSignature( + ByteString.copyFromUtf8(signedAddressAllocation.getMessageSignature())); + signedAddressAllocationBuilder.build(); + + return signedAddressAllocationBuilder; + } + + private JobDisruptionBudget convertJobDisruptionBudget(DisruptionBudget budget) { + JobDisruptionBudget.Builder builder = JobDisruptionBudget.newBuilder(); + if (budget.getAvailabilityPercentageLimit() != null) { + builder.setAvailabilityPercentageLimit( + JobDisruptionBudget.AvailabilityPercentageLimit.newBuilder() + .setPercentageOfHealthyContainers( + budget.availabilityPercentageLimit.getPercentageOfHealthyContainers()) + .build()); + } + if (budget.getContainerHealthProviders() != null + && !budget.getContainerHealthProviders().isEmpty()) { + budget + .getContainerHealthProviders() + .forEach( + chp -> + builder.addContainerHealthProviders( + ContainerHealthProvider.newBuilder().setName(chp.getName()).build())); + } + + if (budget.getSelfManaged() != null) { + builder.setSelfManaged( + JobDisruptionBudget.SelfManaged.newBuilder() + .setRelocationTimeMs(budget.getSelfManaged().getRelocationTimeMs()) + .build()); + } + + if (budget.getRatePercentagePerHour() != null) { + builder.setRatePercentagePerHour( + JobDisruptionBudget.RatePercentagePerHour.newBuilder() + .setMaxPercentageOfContainersRelocatedInHour( + budget.getRatePercentagePerHour().getMaxPercentageOfContainersRelocatedInHour()) + .build()); + } + + if (budget.getRatePerInterval() != null) { + builder.setRatePerInterval( + JobDisruptionBudget.RatePerInterval.newBuilder() + .setIntervalMs(budget.getRatePerInterval().getIntervalMs()) + .setLimitPerInterval(budget.getRatePerInterval().getLimitPerInterval()) + .build()); + } + + if (budget.getRatePercentagePerInterval() != null) { + builder.setRatePercentagePerInterval( + JobDisruptionBudget.RatePercentagePerInterval.newBuilder() + .setIntervalMs(budget.getRatePercentagePerInterval().getIntervalMs()) + .setPercentageLimitPerInterval( + budget.getRatePercentagePerInterval().getPercentageLimitPerInterval()) + .build()); + } + + if (budget.getRelocationLimit() != null) { + builder.setRelocationLimit( + JobDisruptionBudget.RelocationLimit.newBuilder() + .setLimit(budget.getRelocationLimit().getLimit())); + } + + if (budget.getTimeWindows() != null && !budget.getTimeWindows().isEmpty()) { + budget + .getTimeWindows() + .forEach( + tw -> { + TimeWindow.Builder timeWindowBuilder = TimeWindow.newBuilder(); + tw.getDays().forEach(day -> timeWindowBuilder.addDays(convertDay(day))); + tw.getHourlyTimeWindows() + .forEach( + htw -> { + timeWindowBuilder.addHourlyTimeWindows( + TimeWindow.HourlyTimeWindow.newBuilder() + .setEndHour(htw.getEndHour()) + .setStartHour(htw.getStartHour()) + .build()); + }); + timeWindowBuilder.setTimeZone(tw.getTimeZone()); + builder.addTimeWindows(timeWindowBuilder.build()); + }); + } + + if (budget.getUnhealthyTasksLimit() != null) { + builder.setUnhealthyTasksLimit( + JobDisruptionBudget.UnhealthyTasksLimit.newBuilder() + .setLimitOfUnhealthyContainers( + budget.getUnhealthyTasksLimit().getLimitOfUnhealthyContainers()) + .build()); + } + + return builder.build(); + } + + private Day convertDay(String day) { + switch (day) { + case "Monday": + return Day.Monday; + case "Tuesday": + return Day.Tuesday; + case "Wednesday": + return Day.Wednesday; + case "Thursday": + return Day.Thursday; + case "Friday": + return Day.Friday; + case "Saturday": + return Day.Saturday; + default: + return Day.Sunday; + } + } + + private MountPerm convertMountPerm(String mountPerm) { + switch (mountPerm) { + case "RO": + return MountPerm.RO; + case "WO": + return MountPerm.WO; + default: + return MountPerm.RW; + } + } + + private Constraints.Builder constraintTransformer(List constraints) { + Constraints.Builder constraintsBuilder = Constraints.newBuilder(); + constraints.forEach( + constraint -> { + constraintsBuilder.putConstraints(constraint, "true"); + }); + return constraintsBuilder; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/JobDisruptionBudgetUpdateRequest.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/JobDisruptionBudgetUpdateRequest.java new file mode 100644 index 00000000000..ba26047742d --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/JobDisruptionBudgetUpdateRequest.java @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import lombok.Data; + +@Data +public class JobDisruptionBudgetUpdateRequest { + + private String jobId; + + private DisruptionBudget disruptionBudget; + + public JobDisruptionBudgetUpdateRequest withJobId(String jobId) { + this.jobId = jobId; + return this; + } + + public JobDisruptionBudgetUpdateRequest withDisruptionBudget(DisruptionBudget disruptionBudget) { + this.disruptionBudget = disruptionBudget; + return this; + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/MigrationPolicy.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/MigrationPolicy.java similarity index 100% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/MigrationPolicy.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/MigrationPolicy.java diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/ResizeJobRequest.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ResizeJobRequest.java similarity index 97% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/ResizeJobRequest.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ResizeJobRequest.java index e4dede15b33..bb51051636f 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/ResizeJobRequest.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ResizeJobRequest.java @@ -22,8 +22,7 @@ public class ResizeJobRequest extends AbstractJobRequest { private int instancesMax; private int instancesMin; - public ResizeJobRequest() { - } + public ResizeJobRequest() {} public int getInstancesDesired() { return instancesDesired; @@ -51,5 +50,4 @@ public ResizeJobRequest withInstancesMin(int instancesMin) { this.instancesMin = instancesMin; return this; } - } diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ServiceJobProcesses.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ServiceJobProcesses.java new file mode 100644 index 00000000000..f948c9afbe5 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/ServiceJobProcesses.java @@ -0,0 +1,25 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import lombok.Data; + +@Data +public class ServiceJobProcesses { + + boolean disableIncreaseDesired; + boolean disableDecreaseDesired; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/SignedAddressAllocations.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/SignedAddressAllocations.java new file mode 100644 index 00000000000..0ada56a890d --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/SignedAddressAllocations.java @@ -0,0 +1,43 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import lombok.Data; + +@Data +public class SignedAddressAllocations { + private AddressAllocation addressAllocation; + private String authoritativePublicKey; + private String hostPublicKey; + private String hostPublicKeySignature; + private String message; + private String messageSignature; + + @Data + public static class AddressAllocation { + private AddressLocation addressLocation; + private String uuid; + private String address; + } + + @Data + public static class AddressLocation { + private String region; + private String availabilityZone; + private String subnetId; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobRequest.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobRequest.java new file mode 100644 index 00000000000..090f9258821 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobRequest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.Builder; +import lombok.Data; +import lombok.Value; +import lombok.experimental.Wither; + +@JsonDeserialize(builder = SubmitJobRequest.SubmitJobRequestBuilder.class) +@Builder(builderClassName = "SubmitJobRequestBuilder", toBuilder = true) +@Wither +@Value +public class SubmitJobRequest { + + @JsonDeserialize(builder = Constraint.ConstraintBuilder.class) + @Builder(builderClassName = "ConstraintBuilder", toBuilder = true) + @Value + public static class Constraint { + enum ConstraintType { + SOFT, + HARD + } + + public static final String UNIQUE_HOST = "UniqueHost"; + public static final String ZONE_BALANCE = "ZoneBalance"; + + public static Constraint hard(String constraint) { + return new Constraint(ConstraintType.HARD, constraint); + } + + public static Constraint soft(String constraint) { + return new Constraint(ConstraintType.SOFT, constraint); + } + + @JsonProperty private final ConstraintType constraintType; + @JsonProperty private final String constraint; + + @JsonPOJOBuilder(withPrefix = "") + public static class ConstraintBuilder {} + } + + @Data + public static class Constraints { + public Map hard; + public Map soft; + } + + private String credentials; + private String jobType; + private String application; + private String jobName; + private String dockerImageName; + private String dockerImageVersion; + private String dockerDigest; + private String stack; + private String detail; + private String user; + private String entryPoint; + private String cmd; + private String iamProfile; + private String capacityGroup; + @Builder.Default private Boolean inService = true; + private int instancesMin; + private int instancesMax; + private int instancesDesired; + private int cpu; + private int gpu; + private int memory; + private int sharedMemory; + private int disk; + private int retries; + private int runtimeLimitSecs; + private int networkMbps; + private Efs efs; + private int[] ports; + private Map env; + private boolean allocateIpAddress; + @Builder.Default private List constraints = new ArrayList<>(); + @Builder.Default private Map labels = new HashMap(); + @Builder.Default private Map containerAttributes = new HashMap(); + @Builder.Default private List securityGroups = null; + @Builder.Default private MigrationPolicy migrationPolicy = null; + @Builder.Default private DisruptionBudget disruptionBudget = null; + + @Builder.Default private Constraints containerConstraints = null; + @Builder.Default private ServiceJobProcesses serviceJobProcesses = null; + @Builder.Default private List signedAddressAllocations = null; + + @JsonIgnore + public JobDescription getJobDescription() { + return new JobDescription(this); + } + + @JsonPOJOBuilder(withPrefix = "") + public static class SubmitJobRequestBuilder {} +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobResponse.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobResponse.java similarity index 100% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobResponse.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/SubmitJobResponse.java diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Task.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Task.java new file mode 100644 index 00000000000..29cd3403603 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/Task.java @@ -0,0 +1,255 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model; + +import com.netflix.titus.grpc.protogen.TaskStatus; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; + +public class Task { + + public Task() {} + + public Task(com.netflix.titus.grpc.protogen.Task grpcTask) { + id = grpcTask.getId(); + state = + TaskState.from( + grpcTask.getStatus().getState().name(), grpcTask.getStatus().getReasonCode()); + jobId = grpcTask.getJobId(); + instanceId = grpcTask.getTaskContextOrDefault("v2.taskInstanceId", id); + host = grpcTask.getTaskContextOrDefault("agent.host", null); + region = grpcTask.getTaskContextOrDefault("agent.region", null); + zone = grpcTask.getTaskContextOrDefault("agent.zone", null); + submittedAt = getTimestampFromStatus(grpcTask, TaskStatus.TaskState.Accepted); + launchedAt = getTimestampFromStatus(grpcTask, TaskStatus.TaskState.Launched); + startedAt = getTimestampFromStatus(grpcTask, TaskStatus.TaskState.StartInitiated); + finishedAt = getTimestampFromStatus(grpcTask, TaskStatus.TaskState.Finished); + containerIp = grpcTask.getTaskContextOrDefault("task.containerIp", null); + // The agentId will be used temporarily by deck to lookup IPv6 until Titus API is updated. + agentId = grpcTask.getTaskContextOrDefault("agent.instanceId", null); + // Fetch ipv4 and ipv6 address from titus api if present + ipv4Address = grpcTask.getTaskContextOrDefault("task.containerIPv4", null); + ipv6Address = grpcTask.getTaskContextOrDefault("task.containerIPv6", null); + logLocation = new HashMap<>(); + logLocation.put("ui", grpcTask.getLogLocation().getUi().getUrl()); + logLocation.put("liveStream", grpcTask.getLogLocation().getLiveStream().getUrl()); + HashMap s3 = new HashMap<>(); + s3.put("accountId", grpcTask.getLogLocation().getS3().getAccountId()); + s3.put("accountName", grpcTask.getLogLocation().getS3().getAccountName()); + s3.put("region", grpcTask.getLogLocation().getS3().getRegion()); + s3.put("bucket", grpcTask.getLogLocation().getS3().getBucket()); + s3.put("key", grpcTask.getLogLocation().getS3().getKey()); + logLocation.put("s3", s3); + } + + private Date getTimestampFromStatus( + com.netflix.titus.grpc.protogen.Task grpcTask, TaskStatus.TaskState state) { + return grpcTask.getStatusHistoryList().stream() + .filter(status -> status.getState().equals(state)) + .findFirst() + .map(status -> new Date(status.getTimestamp())) + .orElse(null); + } + + private String id; + private String jobId; + private String instanceId; + private TaskState state; + private String host; + private String region; + private String zone; + private Date submittedAt; + private Date launchedAt; + private Date startedAt; + private Date finishedAt; + private String message; + private Map data; + private String stdoutLive; + private String logs; + private String snapshots; + private String containerIp; + private String agentId; + private String ipv4Address; + private String ipv6Address; + + private Map logLocation; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getInstanceId() { + return instanceId; + } + + public void setInstanceId(String instanceId) { + this.instanceId = instanceId; + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public TaskState getState() { + return state; + } + + public void setState(TaskState state) { + this.state = state; + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + public String getZone() { + return zone; + } + + public void setZone(String zone) { + this.zone = zone; + } + + public Date getSubmittedAt() { + return submittedAt; + } + + public void setSubmittedAt(Date submittedAt) { + this.submittedAt = submittedAt; + } + + public Date getLaunchedAt() { + return launchedAt; + } + + public void setLaunchedAt(Date launchedAt) { + this.launchedAt = launchedAt; + } + + public Date getStartedAt() { + return startedAt; + } + + public void setStartedAt(Date startedAt) { + this.startedAt = startedAt; + } + + public Date getFinishedAt() { + return finishedAt; + } + + public void setFinishedAt(Date finishedAt) { + this.finishedAt = finishedAt; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public Map getData() { + return data; + } + + public void setData(Map data) { + this.data = data; + } + + public String getStdoutLive() { + return stdoutLive; + } + + public void setStdoutLive(String stdoutLive) { + this.stdoutLive = stdoutLive; + } + + public String getLogs() { + return logs; + } + + public void setLogs(String logs) { + this.logs = logs; + } + + public String getSnapshots() { + return snapshots; + } + + public void setSnapshots(String snapshots) { + this.snapshots = snapshots; + } + + public String getContainerIp() { + return containerIp; + } + + public String getAgentId() { + return agentId; + } + + public void setAgentId(String agentId) { + this.agentId = agentId; + } + + public void setContainerIp(String containerIp) { + this.containerIp = containerIp; + } + + public Map getLogLocation() { + return logLocation; + } + + public String getIpv4Address() { + return ipv4Address; + } + + public void setIpv4Address(String ipv4Address) { + this.ipv4Address = ipv4Address; + } + + public String getIpv6Address() { + return ipv6Address; + } + + public void setIpv6Address(String ipv6Address) { + this.ipv6Address = ipv6Address; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TaskState.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TaskState.java new file mode 100644 index 00000000000..be9cafc84ad --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TaskState.java @@ -0,0 +1,96 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model; + +public enum TaskState { + ALL, + RUNNING, + DISPATCHED, + FAILED, + STOPPED, + CRASHED, + FINISHED, + STARTING, + QUEUED, + TERMINATING, + DEAD, + PENDING; // Deprecated + + public static TaskState from(String taskStateStr) { + for (TaskState taskState : TaskState.values()) { + if (taskState.name().equals(taskStateStr)) return taskState; + } + switch (taskStateStr) { + case "Accepted": + return TaskState.QUEUED; + case "Launched": + return TaskState.DISPATCHED; + case "StartInitiated": + return TaskState.STARTING; + case "Started": + return TaskState.RUNNING; + case "KillInitiated": + case "Disconnected": + case "Finished": + return TaskState.FINISHED; + default: + return null; + } + } + + public static TaskState from(String taskStateStr, String reasonCode) { + + if (taskStateStr.equals("Finished")) { + switch (reasonCode) { + case "normal": + return TaskState.FINISHED; + case "killed": + case "scaledDown": + case "stuckInState": + return TaskState.STOPPED; + case "crashed": + case "lost": + return TaskState.CRASHED; + case "failed": + case "invalidRequest": + case "runtimeLimitExceeded": + case "transientSystemError": + case "localSystemError": + case "unknownSystemError": + return TaskState.FAILED; + default: + return TaskState.FINISHED; + } + } + + switch (taskStateStr) { + case "Accepted": + return TaskState.QUEUED; + case "Launched": + return TaskState.DISPATCHED; + case "StartInitiated": + return TaskState.STARTING; + case "Started": + return TaskState.RUNNING; + case "KillInitiated": + case "Disconnected": + return TaskState.FINISHED; + default: + return null; + } + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateJobRequest.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateJobRequest.java similarity index 95% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateJobRequest.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateJobRequest.java index 7d990075fe2..df55d317704 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateJobRequest.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateJobRequest.java @@ -18,7 +18,5 @@ public class TerminateJobRequest extends AbstractJobRequest { - public TerminateJobRequest() { - } - + public TerminateJobRequest() {} } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateTasksAndShrinkJobRequest.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateTasksAndShrinkJobRequest.java similarity index 96% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateTasksAndShrinkJobRequest.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateTasksAndShrinkJobRequest.java index ea24714f140..3c3f09783cf 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateTasksAndShrinkJobRequest.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TerminateTasksAndShrinkJobRequest.java @@ -24,8 +24,7 @@ public class TerminateTasksAndShrinkJobRequest { private List taskIds; private boolean shrink = true; - public TerminateTasksAndShrinkJobRequest() { - } + public TerminateTasksAndShrinkJobRequest() {} public String getUser() { return user; @@ -53,5 +52,4 @@ public TerminateTasksAndShrinkJobRequest withShrink(boolean shrink) { this.shrink = shrink; return this; } - } diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TitusHealth.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TitusHealth.java similarity index 100% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/model/TitusHealth.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/TitusHealth.java diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/AvailabilityPercentageLimit.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/AvailabilityPercentageLimit.java new file mode 100644 index 00000000000..a3ab4691f77 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/AvailabilityPercentageLimit.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class AvailabilityPercentageLimit { + double percentageOfHealthyContainers; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/ContainerHealthProvider.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/ContainerHealthProvider.java new file mode 100644 index 00000000000..d78d33407f9 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/ContainerHealthProvider.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class ContainerHealthProvider { + String name; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/HourlyTimeWindow.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/HourlyTimeWindow.java new file mode 100644 index 00000000000..9138a29a752 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/HourlyTimeWindow.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class HourlyTimeWindow { + int startHour; + int endHour; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePerInterval.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePerInterval.java new file mode 100644 index 00000000000..36c096b6c85 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePerInterval.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class RatePerInterval { + long intervalMs; + int limitPerInterval; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePercentagePerHour.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePercentagePerHour.java new file mode 100644 index 00000000000..448f967eb97 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePercentagePerHour.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class RatePercentagePerHour { + double maxPercentageOfContainersRelocatedInHour; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePercentagePerInterval.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePercentagePerInterval.java new file mode 100644 index 00000000000..aece0c52622 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RatePercentagePerInterval.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class RatePercentagePerInterval { + long intervalMs; + double percentageLimitPerInterval; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RelocationLimit.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RelocationLimit.java new file mode 100644 index 00000000000..26905515a6f --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/RelocationLimit.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class RelocationLimit { + int limit; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/SelfManaged.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/SelfManaged.java new file mode 100644 index 00000000000..7ec0c89ce45 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/SelfManaged.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class SelfManaged { + long relocationTimeMs; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/TimeWindow.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/TimeWindow.java new file mode 100644 index 00000000000..48bdaaf841a --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/TimeWindow.java @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import java.util.List; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class TimeWindow { + List days; + List hourlyTimeWindows; + String timeZone; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/UnhealthyTasksLimit.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/UnhealthyTasksLimit.java new file mode 100644 index 00000000000..81b3bece68b --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/model/disruption/UnhealthyTasksLimit.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.client.model.disruption; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class UnhealthyTasksLimit { + int limitOfUnhealthyContainers; +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/security/TitusCredentials.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/security/TitusCredentials.java similarity index 87% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/security/TitusCredentials.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/security/TitusCredentials.java index 1683d7b296c..19b80fc43c3 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/client/security/TitusCredentials.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/client/security/TitusCredentials.java @@ -16,8 +16,5 @@ package com.netflix.spinnaker.clouddriver.titus.client.security; -/** - * Marker interface (for now) for titus credentials - */ -public interface TitusCredentials { -} +/** Marker interface (for now) for titus credentials */ +public interface TitusCredentials {} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AbstractTitusDeployAction.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AbstractTitusDeployAction.java new file mode 100644 index 00000000000..0f3e42bdd7e --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AbstractTitusDeployAction.java @@ -0,0 +1,84 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusUtils; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; +import java.util.List; +import java.util.Map; +import javax.annotation.Nullable; + +/** + * Common logic for Titus deploy actions. Plus some utility methods for easing the Groovy to Java + * migration. + */ +abstract class AbstractTitusDeployAction { + + AccountCredentialsRepository accountCredentialsRepository; + TitusClientProvider titusClientProvider; + + static boolean isNullOrEmpty(String string) { + return Strings.isNullOrEmpty(string); + } + + static boolean isNullOrEmpty(List list) { + return (list == null) || list.isEmpty(); + } + + static boolean isNullOrEmpty(Map map) { + return (map == null) || map.isEmpty(); + } + + AbstractTitusDeployAction( + AccountCredentialsRepository accountCredentialsRepository, + TitusClientProvider titusClientProvider) { + this.accountCredentialsRepository = accountCredentialsRepository; + this.titusClientProvider = titusClientProvider; + } + + /** Build a Titus client, provided a deployment Source object. */ + @Nullable + TitusClient buildSourceTitusClient(TitusDeployDescription.Source source) { + if (!isNullOrEmpty(source.getAccount()) + && !isNullOrEmpty(source.getRegion()) + && !isNullOrEmpty(source.getAsgName())) { + AccountCredentials sourceCredentials = + accountCredentialsRepository.getOne(source.getAccount()); + + TitusUtils.assertTitusAccountCredentialsType(sourceCredentials); + + return titusClientProvider.getTitusClient( + (NetflixTitusCredentials) sourceCredentials, source.getRegion()); + } + return null; + } + + /** Re-sets credentials into a deserialized {@code TitusDeployDescription}. */ + void prepareDeployDescription(final TitusDeployDescription description) { + if (description.getCredentials() == null) { + AccountCredentials credentials = + accountCredentialsRepository.getOne(description.getAccount()); + TitusUtils.assertTitusAccountCredentialsType(credentials); + description.setCredentials((NetflixTitusCredentials) credentials); + } + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AbstractTitusEnableDisableAction.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AbstractTitusEnableDisableAction.java new file mode 100644 index 00000000000..d7f85d0a8de --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AbstractTitusEnableDisableAction.java @@ -0,0 +1,281 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import static com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport.DiscoveryStatus.OUT_OF_SERVICE; +import static com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport.DiscoveryStatus.UP; + +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport.DiscoveryStatus; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.TitusLoadBalancerClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.ActivateJobRequest; +import com.netflix.spinnaker.clouddriver.titus.client.model.Job; +import com.netflix.spinnaker.clouddriver.titus.client.model.Task; +import com.netflix.spinnaker.clouddriver.titus.client.model.TaskState; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableServerGroupDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery.TitusEurekaSupport; +import com.netflix.spinnaker.kork.exceptions.UserException; +import com.netflix.titus.grpc.protogen.LoadBalancerId; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AbstractTitusEnableDisableAction { + private final Logger log = LoggerFactory.getLogger(getClass()); + + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusEurekaSupport discoverySupport; + private final TitusClientProvider titusClientProvider; + + public AbstractTitusEnableDisableAction( + AccountCredentialsProvider accountCredentialsProvider, + TitusEurekaSupport discoverySupport, + TitusClientProvider titusClientProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.discoverySupport = discoverySupport; + this.titusClientProvider = titusClientProvider; + } + + /** + * Mark a Titus job and containing tasks as UP or OUT_OF_SERVICE in discovery _and_ any associated + * load balancers. + * + * @param saga Enclosing saga. + * @param description Enclosing description. + * @param shouldDisable Whether or not job and tasks should be marked OUT_OF_SERVICE (true) or UP + * (false) + */ + void markJob(Saga saga, EnableDisableServerGroupDescription description, boolean shouldDisable) { + String presentParticipling = shouldDisable ? "Disabling" : "Enabling"; + String verb = shouldDisable ? "Disable" : "Enable"; + + saga.log( + "%s server group %s/%s", + presentParticipling, description.getRegion(), description.getServerGroupName()); + + NetflixTitusCredentials credentials = + (NetflixTitusCredentials) + accountCredentialsProvider.getCredentials(description.getAccount()); + description.setCredentials(credentials); + + try { + TitusClient titusClient = + titusClientProvider.getTitusClient(description.getCredentials(), description.getRegion()); + + TitusLoadBalancerClient titusLoadBalancerClient = + titusClientProvider.getTitusLoadBalancerClient( + description.getCredentials(), description.getRegion()); + + String serverGroupName = description.getServerGroupName(); + String region = description.getRegion(); + + Job job = titusClient.findJobByName(serverGroupName, true); + if (job == null) { + saga.log("No job named '%s' found in %s", serverGroupName, region); + return; + } + + if (shouldDisable + && titusLoadBalancerClient != null + && description.getDesiredPercentage() != null) { + if (job.getLabels().containsKey("spinnaker.targetGroups")) { + throw new TitusException( + "Titus does not support percentage-based disabling for server groups in one or more target groups"); + } + } + + // If desired percentage is part of the description (ie. Monitored Deploy), disable the job + // only if it's set to 100 + if (shouldDisable && description.getDesiredPercentage() != null) { + if (description.getDesiredPercentage() == 100) { + saga.log("Disabling job (desiredPercentage: %d)", description.getDesiredPercentage()); + activateJob(titusClient, job, false); + saga.log("Disabled job (desiredPercentage: %d)", description.getDesiredPercentage()); + } else { + saga.log("Not disabling job (desiredPercentage: %d)", description.getDesiredPercentage()); + } + } else { + activateJob(titusClient, job, !shouldDisable); + } + + if (titusLoadBalancerClient != null + && job.getLabels().containsKey("spinnaker.targetGroups")) { + if (shouldDisable) { + saga.log("Removing %s from target groups", job.getId()); + titusLoadBalancerClient + .getJobLoadBalancers(job.getId()) + .forEach( + loadBalancerId -> { + saga.log("Removing %s from %s", job.getId(), loadBalancerId.getId()); + titusLoadBalancerClient.removeLoadBalancer(job.getId(), loadBalancerId.getId()); + saga.log("Removed %s from %s", job.getId(), loadBalancerId.getId()); + }); + saga.log("Removed %s from target groups", job.getId()); + } else { + saga.log("Restoring %s into target groups", job.getId()); + Set attachedLoadBalancers = + titusLoadBalancerClient.getJobLoadBalancers(job.getId()).stream() + .map(LoadBalancerId::getId) + .collect(Collectors.toSet()); + + for (String loadBalancerId : job.getLabels().get("spinnaker.targetGroups").split(",")) { + if (!attachedLoadBalancers.contains(loadBalancerId)) { + saga.log("Restoring %s into %s", job.getId(), loadBalancerId); + titusLoadBalancerClient.addLoadBalancer(job.getId(), loadBalancerId); + } + } + + saga.log("Restored %s into target groups", job.getId()); + } + } + + if (job.getTasks() != null && !job.getTasks().isEmpty()) { + DiscoveryStatus status = shouldDisable ? OUT_OF_SERVICE : UP; + saga.log("Marking server group %s as %s with Discovery", serverGroupName, status); + + List instanceIds = + job.getTasks().stream().map(Task::getId).collect(Collectors.toList()); + + EnableDisableInstanceDiscoveryDescription updateDiscoveryDescription = + new EnableDisableInstanceDiscoveryDescription(); + updateDiscoveryDescription.setCredentials(description.getCredentials()); + updateDiscoveryDescription.setRegion(region); + updateDiscoveryDescription.setAsgName(serverGroupName); + updateDiscoveryDescription.setInstanceIds(instanceIds); + + if (description.getDesiredPercentage() != null && shouldDisable) { + instanceIds = + discoverySupport.getInstanceToModify( + description.getAccount(), + region, + serverGroupName, + instanceIds, + description.getDesiredPercentage()); + + saga.log( + "Disabling instances %s on ASG %s with percentage %s", + instanceIds, serverGroupName, description.getDesiredPercentage()); + } + + discoverySupport.updateDiscoveryStatusForInstances( + updateDiscoveryDescription, getTask(), verb.toUpperCase(), status, instanceIds); + } + + try { + titusClient.setAutoscaleEnabled(job.getId(), !shouldDisable); + } catch (Exception e) { + log.error( + "Error toggling autoscale enabled for Titus job {} in {}}/{}", + job.getId(), + description.getAccount(), + description.getRegion(), + e); + } + + saga.log("Finished %s server group %s", presentParticipling, serverGroupName); + } catch (Exception e) { + String errorMessage = + String.format( + "Could not %s server group '%s' in region %s! Failure Type: %s; Message: %s", + verb, + description.getServerGroupName(), + description.getRegion(), + e.getClass().getSimpleName(), + e.getMessage()); + log.error(errorMessage, e); + saga.log(errorMessage); + + throw e; + } + } + + /** + * Mark one or more Titus tasks as UP or OUT_OF_SERVICE in discovery. + * + *

No other changes are made to the tasks or job they are a member of. + * + * @param saga Enclosing saga. + * @param description Enclosing description. + * @param shouldDisable Whether or not instances should be marked OUT_OF_SERVICE (true) or UP + * (false) + */ + void markTasks( + Saga saga, EnableDisableInstanceDiscoveryDescription description, boolean shouldDisable) { + String presentParticipling = shouldDisable ? "Disabling" : "Enabling"; + String verb = shouldDisable ? "Disable" : "Enable"; + + saga.log( + "%s instances %s from %s/%s in discovery", + presentParticipling, + description.getInstanceIds(), + description.getRegion(), + description.getAsgName()); + + NetflixTitusCredentials credentials = + (NetflixTitusCredentials) + accountCredentialsProvider.getCredentials(description.getAccount()); + description.setCredentials(credentials); + + if (!description.getCredentials().getDiscoveryEnabled()) { + throw new UserException("Discovery is not enabled, unable to modify instance status"); + } + + TitusClient titusClient = + titusClientProvider.getTitusClient(description.getCredentials(), description.getRegion()); + Job job = titusClient.findJobByName(description.getAsgName(), true); + if (job == null) { + return; + } + + List titusInstanceIds = + job.getTasks().stream() + .filter(it -> it.getState() == TaskState.RUNNING || it.getState() == TaskState.STARTING) + .filter(it -> description.getInstanceIds().contains(it.getId())) + .map(Task::getId) + .collect(Collectors.toList()); + + if (titusInstanceIds.isEmpty()) { + return; + } + + DiscoveryStatus status = shouldDisable ? OUT_OF_SERVICE : UP; + discoverySupport.updateDiscoveryStatusForInstances( + description, getTask(), verb.toUpperCase(), status, titusInstanceIds); + } + + private void activateJob(TitusClient provider, Job job, boolean inService) { + provider.activateJob( + (ActivateJobRequest) + new ActivateJobRequest() + .withInService(inService) + .withUser("spinnaker") + .withJobId(job.getId())); + } + + private com.netflix.spinnaker.clouddriver.data.task.Task getTask() { + return TaskRepository.threadLocalTask.get(); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AttachTitusServiceLoadBalancers.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AttachTitusServiceLoadBalancers.java new file mode 100644 index 00000000000..e0fd08cb1e9 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/AttachTitusServiceLoadBalancers.java @@ -0,0 +1,115 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.TargetGroupLookupHelper; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import com.netflix.spinnaker.clouddriver.titus.client.TitusLoadBalancerClient; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusLoadBalancerAttached; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class AttachTitusServiceLoadBalancers extends AbstractTitusDeployAction + implements SagaAction { + + private final TitusClientProvider titusClientProvider; + + @Autowired + public AttachTitusServiceLoadBalancers( + AccountCredentialsRepository accountCredentialsRepository, + TitusClientProvider titusClientProvider, + TitusClientProvider titusClientProvider1) { + super(accountCredentialsRepository, titusClientProvider); + this.titusClientProvider = titusClientProvider1; + } + + @Nonnull + @Override + public Result apply(@Nonnull AttachTitusServiceLoadBalancersCommand command, @Nonnull Saga saga) { + final TitusDeployDescription description = command.description; + + prepareDeployDescription(description); + + TitusLoadBalancerClient loadBalancerClient = + titusClientProvider.getTitusLoadBalancerClient( + description.getCredentials(), description.getRegion()); + if (loadBalancerClient == null) { + throw new TitusException("Unable to create load balancing client in target account/region"); + } + + TargetGroupLookupHelper.TargetGroupLookupResult targetGroups = + command.getTargetGroupLookupResult(); + + if (targetGroups != null) { + String jobUri = command.getJobUri(); + + targetGroups + .getTargetGroupARNs() + .forEach( + targetGroupArn -> { + loadBalancerClient.addLoadBalancer(jobUri, targetGroupArn); + saga.log("Attached %s to %s", targetGroupArn, jobUri); + saga.addEvent( + TitusLoadBalancerAttached.builder() + .jobUri(jobUri) + .targetGroupArn(targetGroupArn) + .build()); + }); + + saga.log("Load balancers applied"); + } + + return new Result(); + } + + @Builder(builderClassName = "AttachTitusServiceLoadBalancersCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + AttachTitusServiceLoadBalancersCommand.AttachTitusServiceLoadBalancersCommandBuilder + .class) + @JsonTypeName("attachTitusServiceLoadBalancersCommand") + @Value + public static class AttachTitusServiceLoadBalancersCommand implements SagaCommand { + @Nonnull private TitusDeployDescription description; + @Nonnull private String jobUri; + @Nullable private TargetGroupLookupHelper.TargetGroupLookupResult targetGroupLookupResult; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class AttachTitusServiceLoadBalancersCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/CopyTitusServiceScalingPolicies.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/CopyTitusServiceScalingPolicies.java new file mode 100644 index 00000000000..c38123fc30d --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/CopyTitusServiceScalingPolicies.java @@ -0,0 +1,182 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusUtils; +import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.Job; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertTitusScalingPolicyDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusScalingPolicyCopied; +import com.netflix.spinnaker.clouddriver.titus.exceptions.InsufficientDeploySourceStateException; +import com.netflix.titus.grpc.protogen.PutPolicyRequest; +import com.netflix.titus.grpc.protogen.ScalingPolicyResult; +import com.netflix.titus.grpc.protogen.ScalingPolicyStatus.ScalingPolicyState; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class CopyTitusServiceScalingPolicies extends AbstractTitusDeployAction + implements SagaAction { + + private static final List IGNORED_STATES = + Arrays.asList(ScalingPolicyState.Deleted, ScalingPolicyState.Deleting); + + @Autowired + public CopyTitusServiceScalingPolicies( + AccountCredentialsRepository accountCredentialsRepository, + TitusClientProvider titusClientProvider) { + super(accountCredentialsRepository, titusClientProvider); + } + + @Nonnull + @Override + public Result apply(@Nonnull CopyTitusServiceScalingPoliciesCommand command, @Nonnull Saga saga) { + final TitusDeployDescription description = command.description; + + prepareDeployDescription(description); + + if (!description.isCopySourceScalingPolicies() + || !description.getCopySourceScalingPoliciesAndActions()) { + saga.log("Not applying scaling policies: None to apply"); + return new Result(); + } + + TitusDeployDescription.Source source = description.getSource(); + TitusClient sourceClient = buildSourceTitusClient(source); + if (sourceClient == null) { + // No source, no copying. + saga.log("Not applying scaling policies: No source to copy from"); + return new Result(); + } + + TitusAutoscalingClient autoscalingClient = + titusClientProvider.getTitusAutoscalingClient( + description.getCredentials(), description.getRegion()); + if (autoscalingClient == null) { + saga.log("Unable to create client in target account/region; policies will not be copied"); + return new Result(); + } + + TitusAutoscalingClient sourceAutoscalingClient = buildSourceAutoscalingClient(source); + if (sourceAutoscalingClient == null) { + saga.log("Unable to create client in source account/region; policies will not be copied"); + return new Result(); + } + + Job sourceJob = sourceClient.findJobByName(source.getAsgName()); + if (sourceJob == null) { + saga.log( + "Unable to locate source (%s:%s:%s)", + source.getAccount(), source.getRegion(), source.getAsgName()); + } else { + final String jobUri = command.jobUri; + final String serverGroupName = command.deployedServerGroupName; + + saga.log("Copying scaling policies from source (Job URI: %s)", jobUri); + List policies = + Optional.ofNullable(sourceAutoscalingClient.getJobScalingPolicies(sourceJob.getId())) + .orElse(Collections.emptyList()); + saga.log("Found %d scaling policies for source (Job URI: %s)", policies.size(), jobUri); + policies.forEach( + policy -> { + if (!IGNORED_STATES.contains(policy.getPolicyState().getState())) { + PutPolicyRequest.Builder builder = + PutPolicyRequest.newBuilder() + .setJobId(jobUri) + .setScalingPolicy( + UpsertTitusScalingPolicyDescription.fromScalingPolicyResult( + description.getRegion(), policy, serverGroupName) + .toScalingPolicyBuilder()); + autoscalingClient.createScalingPolicy(builder.build()); + saga.addEvent( + TitusScalingPolicyCopied.builder() + .serverGroupName(serverGroupName) + .region(description.getRegion()) + .sourcePolicyId(policy.getId().getId()) + .build()); + } + }); + } + + saga.log("Copy scaling policies completed"); + + return new Result(); + } + + private TitusAutoscalingClient buildSourceAutoscalingClient( + TitusDeployDescription.Source source) { + if (!isNullOrEmpty(source.getAccount()) + && !isNullOrEmpty(source.getRegion()) + && !isNullOrEmpty(source.getAsgName())) { + AccountCredentials sourceCredentials = + accountCredentialsRepository.getOne(source.getAccount()); + + TitusUtils.assertTitusAccountCredentialsType(sourceCredentials); + + return titusClientProvider.getTitusAutoscalingClient( + (NetflixTitusCredentials) sourceCredentials, source.getRegion()); + } + + throw new InsufficientDeploySourceStateException( + "Could not create titus client from deployment Source", + source.getAccount(), + source.getRegion(), + source.getAsgName()); + } + + @Builder(builderClassName = "CopyTitusServiceScalingPoliciesCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + CopyTitusServiceScalingPoliciesCommand.CopyTitusServiceScalingPoliciesCommandBuilder + .class) + @JsonTypeName("copyTitusServiceScalingPoliciesCommand") + @Value + public static class CopyTitusServiceScalingPoliciesCommand implements SagaCommand { + @Nonnull private TitusDeployDescription description; + @Nonnull private String jobUri; + @Nonnull private String deployedServerGroupName; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class CopyTitusServiceScalingPoliciesCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DeleteTitusScalingPolicy.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DeleteTitusScalingPolicy.java new file mode 100644 index 00000000000..e70563bacc1 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DeleteTitusScalingPolicy.java @@ -0,0 +1,110 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DeleteTitusScalingPolicyDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusScalingPolicyDeleted; +import com.netflix.spinnaker.kork.exceptions.UserException; +import com.netflix.titus.grpc.protogen.DeletePolicyRequest; +import com.netflix.titus.grpc.protogen.ScalingPolicyID; +import java.util.Collections; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.stereotype.Component; + +@Component +public class DeleteTitusScalingPolicy + implements SagaAction { + + private final TitusClientProvider titusClientProvider; + final AccountCredentialsRepository accountCredentialsRepository; + + public DeleteTitusScalingPolicy( + TitusClientProvider titusClientProvider, + AccountCredentialsRepository accountCredentialsRepository) { + this.titusClientProvider = titusClientProvider; + this.accountCredentialsRepository = accountCredentialsRepository; + } + + @NotNull + @Override + public Result apply( + @NotNull DeleteTitusScalingPolicy.DeleteTitusScalingPolicyCommand command, + @NotNull Saga saga) { + saga.log("Initializing Delete Scaling Policy " + command.description.getScalingPolicyID()); + AccountCredentials accountCredentials = + accountCredentialsRepository.getOne(command.description.getAccount()); + + TitusAutoscalingClient client = + titusClientProvider.getTitusAutoscalingClient( + (NetflixTitusCredentials) accountCredentials, command.description.getRegion()); + if (client == null) { + throw new UserException( + new UnsupportedOperationException( + "Autoscaling is not supported for this account/region")) + .setRetryable(false); + } + + ScalingPolicyID id = + ScalingPolicyID.newBuilder().setId(command.description.getScalingPolicyID()).build(); + + client.deleteScalingPolicy(DeletePolicyRequest.newBuilder().setId(id).build()); + + saga.log("Deleted Scaling Policy " + command.description.getScalingPolicyID()); + + return new Result( + null, + Collections.singletonList( + TitusScalingPolicyDeleted.builder() + .region(command.description.getRegion()) + .policyId(command.description.getScalingPolicyID()) + .build())); + } + + @Builder(builderClassName = "DeleteTitusScalingPolicyCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = DeleteTitusScalingPolicyCommand.DeleteTitusScalingPolicyCommandBuilder.class) + @JsonTypeName("deleteTitusScalingPolicyCommand") + @Value + public static class DeleteTitusScalingPolicyCommand implements SagaCommand { + @Nonnull private DeleteTitusScalingPolicyDescription description; + + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class DeleteTitusScalingPolicyCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DestroyTitusJob.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DestroyTitusJob.java new file mode 100644 index 00000000000..c01c5755885 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DestroyTitusJob.java @@ -0,0 +1,117 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateJobRequest; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusJobDescription; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class DestroyTitusJob implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + + @Autowired + public DestroyTitusJob( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + } + + @NotNull + @Override + public Result apply(@NotNull DestroyTitusJob.DestroyTitusJobCommand command, @NotNull Saga saga) { + saga.log( + "Destroying Titus Job %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId()); + + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(command.description.getAccount()); + + TitusClient titusClient = + titusClientProvider.getTitusClient( + (NetflixTitusCredentials) accountCredentials, command.description.getRegion()); + + try { + titusClient.terminateJob( + (TerminateJobRequest) + new TerminateJobRequest() + .withJobId(command.description.getJobId()) + .withUser(command.description.getUser())); + + saga.log( + "Destroyed Titus Job %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId()); + + return new Result(); + } catch (Exception e) { + if (e instanceof StatusRuntimeException) { + StatusRuntimeException statusRuntimeException = (StatusRuntimeException) e; + if (statusRuntimeException.getStatus().getCode() == Status.NOT_FOUND.getCode()) { + saga.log("No titus job found"); + return new Result(); + } + } + + throw e; + } + } + + @Builder(builderClassName = "DestroyTitusJobCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = DestroyTitusJob.DestroyTitusJobCommand.DestroyTitusJobCommandBuilder.class) + @JsonTypeName("destroyTitusJobCommand") + @Value + public static class DestroyTitusJobCommand implements SagaCommand { + @Nonnull DestroyTitusJobDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class DestroyTitusJobCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DetachTitusTasks.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DetachTitusTasks.java new file mode 100644 index 00000000000..a1e6e9176fc --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DetachTitusTasks.java @@ -0,0 +1,166 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import static java.lang.String.format; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.google.common.collect.Sets; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.Job; +import com.netflix.spinnaker.clouddriver.titus.client.model.ResizeJobRequest; +import com.netflix.spinnaker.clouddriver.titus.client.model.Task; +import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateTasksAndShrinkJobRequest; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DetachTitusInstancesDescription; +import com.netflix.spinnaker.kork.exceptions.UserException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class DetachTitusTasks implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + + @Autowired + public DetachTitusTasks( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + } + + @NotNull + @Override + public Result apply( + @NotNull DetachTitusTasks.DetachTitusTasksCommand command, @NotNull Saga saga) { + saga.log( + "Detaching Titus Tasks %s:%s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.getDescription().getAsgName(), + command.description.getInstanceIds()); + + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(command.description.getAccount()); + + TitusClient titusClient = + titusClientProvider.getTitusClient( + (NetflixTitusCredentials) accountCredentials, command.description.getRegion()); + + Job job = titusClient.findJobByName(command.description.getAsgName(), true); + if (job == null) { + saga.log("Job not found"); + throw new UserException( + "No titus server group named '" + command.description.getAsgName() + "' found"); + } + + Set validInstanceIds = + Sets.intersection( + new HashSet<>(command.getDescription().getInstanceIds()), + job.getTasks().stream().map(Task::getId).collect(Collectors.toSet())); + + if (validInstanceIds.isEmpty()) { + saga.log("No detachable instances"); + return new Result(); + } + + int newMin = job.getInstances() - validInstanceIds.size(); + if (newMin < job.getInstancesMin()) { + if (command.description.getAdjustMinIfNecessary()) { + if (newMin < 0) { + saga.log("Cannot adjust min size below 0"); + } else { + titusClient.resizeJob( + (ResizeJobRequest) + new ResizeJobRequest() + .withInstancesDesired(job.getInstancesDesired()) + .withInstancesMax(job.getInstancesMax()) + .withInstancesMin(newMin) + .withJobId(job.getId()) + .withUser(command.description.getUser())); + } + } else { + saga.log( + "Cannot decrement server group below minSize - set adjustMinIfNecessary to resize down minSize before detaching instances"); + throw new UserException( + format( + "Invalid server group capacity for detachInstances (min: %d, max: %d, desired: %d)", + job.getInstancesMin(), job.getInstancesMax(), job.getInstancesDesired())); + } + } + + saga.log( + "Filtered Titus Tasks %s:%s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.getDescription().getAsgName(), + validInstanceIds); + + titusClient.terminateTasksAndShrink( + new TerminateTasksAndShrinkJobRequest() + .withUser(command.description.getUser()) + .withShrink(true) + .withTaskIds(new ArrayList<>(validInstanceIds))); + + saga.log( + "Detached Titus Tasks %s:%s:%s:%s (filtered)", + command.description.getAccount(), + command.description.getRegion(), + command.getDescription().getAsgName(), + validInstanceIds); + + return new Result(); + } + + @Builder(builderClassName = "DetachTitusTasksCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = DetachTitusTasks.DetachTitusTasksCommand.DetachTitusTasksCommandBuilder.class) + @JsonTypeName("detachTitusTasksCommand") + @Value + public static class DetachTitusTasksCommand implements SagaCommand { + @Nonnull DetachTitusInstancesDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class DetachTitusTasksCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DisableTitusJob.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DisableTitusJob.java new file mode 100644 index 00000000000..296d7b0ecb4 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DisableTitusJob.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableServerGroupDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery.TitusEurekaSupport; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class DisableTitusJob extends AbstractTitusEnableDisableAction + implements SagaAction { + + @Autowired + public DisableTitusJob( + AccountCredentialsProvider accountCredentialsProvider, + TitusEurekaSupport discoverySupport, + TitusClientProvider titusClientProvider) { + super(accountCredentialsProvider, discoverySupport, titusClientProvider); + } + + @NotNull + @Override + public Result apply(@NotNull DisableTitusJob.DisableTitusJobCommand command, @NotNull Saga saga) { + super.markJob(saga, command.getDescription(), true); + + return new Result(); + } + + @Builder(builderClassName = "DisableTitusJobCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = DisableTitusJob.DisableTitusJobCommand.DisableTitusJobCommandBuilder.class) + @JsonTypeName("disableTitusJobCommand") + @Value + public static class DisableTitusJobCommand implements SagaCommand { + @Nonnull EnableDisableServerGroupDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class DisableTitusJobCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DisableTitusTasks.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DisableTitusTasks.java new file mode 100644 index 00000000000..38339365a6c --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DisableTitusTasks.java @@ -0,0 +1,77 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery.TitusEurekaSupport; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class DisableTitusTasks extends AbstractTitusEnableDisableAction + implements SagaAction { + + @Autowired + public DisableTitusTasks( + AccountCredentialsProvider accountCredentialsProvider, + TitusEurekaSupport discoverySupport, + TitusClientProvider titusClientProvider) { + super(accountCredentialsProvider, discoverySupport, titusClientProvider); + } + + @NotNull + @Override + public Result apply( + @NotNull DisableTitusTasks.DisableTitusTasksCommand command, @NotNull Saga saga) { + super.markTasks(saga, command.getDescription(), true); + + return new Result(); + } + + @Builder(builderClassName = "DisableTitusTasksCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = DisableTitusTasks.DisableTitusTasksCommand.DisableTitusTasksCommandBuilder.class) + @JsonTypeName("disableTitusTasksCommand") + @Value + public static class DisableTitusTasksCommand implements SagaCommand { + @Nonnull EnableDisableInstanceDiscoveryDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class DisableTitusTasksCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/EnableTitusJob.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/EnableTitusJob.java new file mode 100644 index 00000000000..fd23f7dd1bf --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/EnableTitusJob.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableServerGroupDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery.TitusEurekaSupport; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class EnableTitusJob extends AbstractTitusEnableDisableAction + implements SagaAction { + + @Autowired + public EnableTitusJob( + AccountCredentialsProvider accountCredentialsProvider, + TitusEurekaSupport discoverySupport, + TitusClientProvider titusClientProvider) { + super(accountCredentialsProvider, discoverySupport, titusClientProvider); + } + + @NotNull + @Override + public Result apply(@NotNull EnableTitusJob.EnableTitusJobCommand command, @NotNull Saga saga) { + super.markJob(saga, command.getDescription(), false); + + return new Result(); + } + + @Builder(builderClassName = "EnableTitusJobCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = EnableTitusJob.EnableTitusJobCommand.EnableTitusJobCommandBuilder.class) + @JsonTypeName("enableTitusJobCommand") + @Value + public static class EnableTitusJobCommand implements SagaCommand { + @Nonnull EnableDisableServerGroupDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class EnableTitusJobCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/EnableTitusTasks.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/EnableTitusTasks.java new file mode 100644 index 00000000000..e624528659f --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/EnableTitusTasks.java @@ -0,0 +1,77 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.EnableDisableInstanceDiscoveryDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.discovery.TitusEurekaSupport; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class EnableTitusTasks extends AbstractTitusEnableDisableAction + implements SagaAction { + + @Autowired + public EnableTitusTasks( + AccountCredentialsProvider accountCredentialsProvider, + TitusEurekaSupport discoverySupport, + TitusClientProvider titusClientProvider) { + super(accountCredentialsProvider, discoverySupport, titusClientProvider); + } + + @NotNull + @Override + public Result apply( + @NotNull EnableTitusTasks.EnableTitusTasksCommand command, @NotNull Saga saga) { + super.markTasks(saga, command.getDescription(), false); + + return new Result(); + } + + @Builder(builderClassName = "EnableTitusTasksCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = EnableTitusTasks.EnableTitusTasksCommand.EnableTitusTasksCommandBuilder.class) + @JsonTypeName("enableTitusTasksCommand") + @Value + public static class EnableTitusTasksCommand implements SagaCommand { + @Nonnull EnableDisableInstanceDiscoveryDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class EnableTitusTasksCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/MonitorTitusScalingPolicy.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/MonitorTitusScalingPolicy.java new file mode 100644 index 00000000000..19b8210a7bc --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/MonitorTitusScalingPolicy.java @@ -0,0 +1,125 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusScalingPolicyModified; +import com.netflix.spinnaker.kork.core.RetrySupport; +import com.netflix.spinnaker.kork.exceptions.IntegrationException; +import com.netflix.spinnaker.kork.exceptions.UserException; +import com.netflix.titus.grpc.protogen.ScalingPolicyResult; +import com.netflix.titus.grpc.protogen.ScalingPolicyStatus; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class MonitorTitusScalingPolicy + implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + private final RetrySupport retrySupport; + + @Autowired + public MonitorTitusScalingPolicy( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider, + RetrySupport retrySupport) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + this.retrySupport = retrySupport; + } + + @NotNull + @Override + public Result apply( + @NotNull MonitorTitusScalingPolicy.MonitorTitusScalingPolicyCommand command, + @NotNull Saga saga) { + TitusScalingPolicyModified event = saga.getEvent(TitusScalingPolicyModified.class); + + saga.log( + "Monitored Titus Scaling Policy %s:%s:%s (scalingPolicyId: %s", + event.getAccount(), event.getRegion(), event.getJobId(), event.getScalingPolicyId()); + + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(event.getAccount()); + + TitusAutoscalingClient titusClient = + titusClientProvider.getTitusAutoscalingClient( + (NetflixTitusCredentials) accountCredentials, event.getRegion()); + + if (titusClient == null) { + throw new UserException("Autoscaling is not supported for this account/region"); + } + + // make sure the new policy was applied + retrySupport.retry( + () -> { + ScalingPolicyResult updatedPolicy = + titusClient.getScalingPolicy(event.getScalingPolicyId()); + if (updatedPolicy == null + || (updatedPolicy.getPolicyState().getState() + != ScalingPolicyStatus.ScalingPolicyState.Applied)) { + throw new IntegrationException("Scaling policy updates have not been applied") + .setRetryable(true); + } + return true; + }, + 10, + 5000, + false); + + saga.log( + "Monitored Titus Scaling Policy %s:%s:%s (scalingPolicyId: %s)", + event.getAccount(), event.getRegion(), event.getJobId(), event.getScalingPolicyId()); + + return new Result(); + } + + @Builder(builderClassName = "MonitorTitusScalingPolicyCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + MonitorTitusScalingPolicy.MonitorTitusScalingPolicyCommand + .MonitorTitusScalingPolicyCommandBuilder.class) + @JsonTypeName("MonitorTitusScalingPolicyCommand") + @Value + public static class MonitorTitusScalingPolicyCommand implements SagaCommand { + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class MonitorTitusScalingPolicyCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/PrepareTitusDeploy.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/PrepareTitusDeploy.java new file mode 100644 index 00000000000..47643e3a2e9 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/PrepareTitusDeploy.java @@ -0,0 +1,535 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import static java.lang.String.format; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.google.common.base.Joiner; +import com.google.common.collect.Lists; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.TargetGroupLookupHelper; +import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials; +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App.Front50AppAware; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.titus.JobType; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.DisruptionBudget; +import com.netflix.spinnaker.clouddriver.titus.client.model.Job; +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.AvailabilityPercentageLimit; +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.ContainerHealthProvider; +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.HourlyTimeWindow; +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.RatePercentagePerInterval; +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.TimeWindow; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.SubmitTitusJob.SubmitTitusJobCommand; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; +import com.netflix.spinnaker.clouddriver.titus.exceptions.JobNotFoundException; +import com.netflix.spinnaker.clouddriver.titus.model.DockerImage; +import com.netflix.spinnaker.config.AwsConfiguration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class PrepareTitusDeploy extends AbstractTitusDeployAction + implements SagaAction { + private static final Logger log = LoggerFactory.getLogger(PrepareTitusDeploy.class); + + private static final TimeWindow DEFAULT_SYSTEM_TIME_WINDOW = + new TimeWindow( + Arrays.asList("Monday", "Tuesday", "Wednesday", "Thursday", "Friday"), + Collections.singletonList(new HourlyTimeWindow(10, 16)), + "PST"); + private static final String USE_APPLICATION_DEFAULT_SG_LABEL = + "spinnaker.useApplicationDefaultSecurityGroup"; + private static final String SKIP_SECURITY_GROUP_VALIDATION_LABEL = + "spinnaker.skipSecurityGroupValidation"; + private static final String LABEL_TARGET_GROUPS = "spinnaker.targetGroups"; + private static final String SPINNAKER_ACCOUNT_ENV_VAR = "SPINNAKER_ACCOUNT"; + + private final AwsLookupUtil awsLookupUtil; + private final RegionScopedProviderFactory regionScopedProviderFactory; + private final AccountCredentialsProvider accountCredentialsProvider; + private final AwsConfiguration.DeployDefaults deployDefaults; + private final TargetGroupLookupHelper targetGroupLookupHelper; + + @Autowired + public PrepareTitusDeploy( + AccountCredentialsRepository accountCredentialsRepository, + TitusClientProvider titusClientProvider, + AwsLookupUtil awsLookupUtil, + RegionScopedProviderFactory regionScopedProviderFactory, + AccountCredentialsProvider accountCredentialsProvider, + AwsConfiguration.DeployDefaults deployDefaults, + Optional targetGroupLookupHelper) { + super(accountCredentialsRepository, titusClientProvider); + this.awsLookupUtil = awsLookupUtil; + this.regionScopedProviderFactory = regionScopedProviderFactory; + this.accountCredentialsProvider = accountCredentialsProvider; + this.deployDefaults = deployDefaults; + this.targetGroupLookupHelper = targetGroupLookupHelper.orElse(new TargetGroupLookupHelper()); + } + + private static T orDefault(T input, T defaultValue) { + return (input == null) ? defaultValue : input; + } + + private static int orDefault(int input, int defaultValue) { + if (input == 0) { + return defaultValue; + } + return input; + } + + @Nonnull + @Override + public Result apply(@Nonnull PrepareTitusDeployCommand command, @Nonnull Saga saga) { + final TitusDeployDescription description = command.description; + + prepareDeployDescription(description); + + final TitusClient titusClient = + titusClientProvider.getTitusClient( + description.getCredentials(), command.description.getRegion()); + + final LoadFront50App.Front50App front50App = command.getFront50App(); + + final String asgName = description.getSource().getAsgName(); + if (!isNullOrEmpty(asgName)) { + log.trace("Source present, getting details: {}", asgName); + mergeSourceDetailsIntoDescription(saga, description, front50App); + } else { + configureDisruptionBudget(description, null, front50App); + } + + saga.log( + "Preparing deployment to %s:%s:%s", + description.getAccount(), + description.getRegion(), + isNullOrEmpty(description.getSubnet()) ? "" : ":" + description.getSubnet()); + + DockerImage dockerImage = new DockerImage(description.getImageId()); + + if (!isNullOrEmpty(description.getInterestingHealthProviderNames())) { + description + .getLabels() + .put( + "interestingHealthProviderNames", + String.join(",", description.getInterestingHealthProviderNames())); + } + if (!isNullOrEmpty(description.getResources().getSignedAddressAllocations())) { + description + .getResources() + .setSignedAddressAllocations(description.getResources().getSignedAddressAllocations()); + } + + resolveSecurityGroups(saga, description); + + setSpinnakerAccountEnvVar(description); + + TargetGroupLookupHelper.TargetGroupLookupResult targetGroupLookupResult = null; + if (!description.getTargetGroups().isEmpty()) { + targetGroupLookupResult = validateLoadBalancers(description); + if (targetGroupLookupResult != null) { + description + .getLabels() + .put( + LABEL_TARGET_GROUPS, + String.join(",", targetGroupLookupResult.getTargetGroupARNs())); + } + } else { + description.getLabels().remove(LABEL_TARGET_GROUPS); + } + + String nextServerGroupName = TitusJobNameResolver.resolveJobName(titusClient, description); + saga.log("Resolved server group name to %s", nextServerGroupName); + + String user = resolveUser(front50App, description); + + return new Result( + SubmitTitusJobCommand.builder() + .description(description) + .submitJobRequest( + description.toSubmitJobRequest(dockerImage, nextServerGroupName, user)) + .nextServerGroupName(nextServerGroupName) + .targetGroupLookupResult(targetGroupLookupResult) + .build(), + Collections.emptyList()); + } + + @Nullable + private String resolveUser( + LoadFront50App.Front50App front50App, TitusDeployDescription description) { + if (front50App != null && !isNullOrEmpty(front50App.getEmail())) { + return front50App.getEmail(); + } else if (!isNullOrEmpty(description.getUser())) { + return description.getUser(); + } + return null; + } + + private void configureDisruptionBudget( + TitusDeployDescription description, Job sourceJob, LoadFront50App.Front50App front50App) { + if (description.getDisruptionBudget() == null) { + // migrationPolicy should only be used when the disruptionBudget has not been specified + description.setMigrationPolicy( + orDefault( + description.getMigrationPolicy(), + (sourceJob == null) ? null : sourceJob.getMigrationPolicy())); + + // "systemDefault" should be treated as "no migrationPolicy" + if (description.getMigrationPolicy() == null + || "systemDefault".equals(description.getMigrationPolicy().getType())) { + description.setDisruptionBudget(getDefaultDisruptionBudget(front50App)); + } + } + } + + private void mergeSourceDetailsIntoDescription( + Saga saga, TitusDeployDescription description, LoadFront50App.Front50App front50App) { + // If cluster name info was not provided, use the fields from the source asg. + Names sourceName = Names.parseName(description.getSource().getAsgName()); + description.setApplication( + description.getApplication() != null ? description.getApplication() : sourceName.getApp()); + description.setStack( + description.getStack() != null ? description.getStack() : sourceName.getStack()); + description.setFreeFormDetails( + description.getFreeFormDetails() != null + ? description.getFreeFormDetails() + : sourceName.getDetail()); + + TitusDeployDescription.Source source = description.getSource(); + + TitusClient sourceClient = buildSourceTitusClient(source); + if (sourceClient == null) { + throw new TitusException( + "Unable to find a Titus client for deployment source: {}", + description.getSource().getAsgName()); + } + + Job sourceJob = sourceClient.findJobByName(source.getAsgName()); + if (sourceJob == null) { + throw new JobNotFoundException( + format( + "Unable to locate source (%s:%s:%s)", + source.getAccount(), source.getRegion(), source.getAsgName())); + } + + saga.log( + format( + "Copying deployment details from (%s:%s:%s)", + source.getAccount(), source.getRegion(), source.getAsgName())); + + if (isNullOrEmpty(description.getSecurityGroups())) { + description.setSecurityGroups(sourceJob.getSecurityGroups()); + } + if (isNullOrEmpty(description.getImageId())) { + String imageVersion = + (sourceJob.getVersion() == null) ? sourceJob.getDigest() : sourceJob.getVersion(); + description.setImageId(format("%s:%s", sourceJob.getApplicationName(), imageVersion)); + } + + if (description.getSource() != null && description.getSource().isUseSourceCapacity()) { + description.getCapacity().setMin(sourceJob.getInstancesMin()); + description.getCapacity().setMax(sourceJob.getInstancesMax()); + description.getCapacity().setDesired(sourceJob.getInstancesDesired()); + } + + description + .getResources() + .setAllocateIpAddress( + orDefault( + description.getResources().isAllocateIpAddress(), sourceJob.isAllocateIpAddress())); + description + .getResources() + .setCpu(orDefault(description.getResources().getCpu(), sourceJob.getCpu())); + description + .getResources() + .setDisk(orDefault(description.getResources().getDisk(), sourceJob.getDisk())); + description + .getResources() + .setGpu(orDefault(description.getResources().getGpu(), sourceJob.getGpu())); + description + .getResources() + .setMemory(orDefault(description.getResources().getMemory(), sourceJob.getMemory())); + description + .getResources() + .setNetworkMbps( + orDefault(description.getResources().getNetworkMbps(), sourceJob.getNetworkMbps())); + + // Fallback to source allocations if request does not include allocations + description + .getResources() + .setSignedAddressAllocations( + orDefault( + description.getResources().getSignedAddressAllocations(), + sourceJob.getSignedAddressAllocations())); + + description.setRetries(orDefault(description.getRetries(), sourceJob.getRetries())); + description.setRuntimeLimitSecs( + orDefault(description.getRuntimeLimitSecs(), sourceJob.getRuntimeLimitSecs())); + description.setEfs(orDefault(description.getEfs(), sourceJob.getEfs())); + description.setEntryPoint(orDefault(description.getEntryPoint(), sourceJob.getEntryPoint())); + description.setCmd(orDefault(description.getCmd(), sourceJob.getCmd())); + description.setIamProfile(orDefault(description.getIamProfile(), sourceJob.getIamProfile())); + description.setCapacityGroup( + orDefault(description.getCapacityGroup(), sourceJob.getCapacityGroup())); + description.setInService(orDefault(description.getInService(), sourceJob.isInService())); + description.setJobType(orDefault(description.getJobType(), JobType.SERVICE.value())); + + if (isNullOrEmpty(description.getLabels())) { + description.getLabels().putAll(sourceJob.getLabels()); + } + if (isNullOrEmpty(description.getEnv())) { + description.getEnv().putAll(sourceJob.getEnvironment()); + } + if (isNullOrEmpty(description.getContainerAttributes())) { + description.getContainerAttributes().putAll(sourceJob.getContainerAttributes()); + } + + configureDisruptionBudget(description, sourceJob, front50App); + + if (isNullOrEmpty(description.getHardConstraints())) { + description.setHardConstraints(new ArrayList<>()); + } + if (isNullOrEmpty(description.getSoftConstraints())) { + description.setSoftConstraints(new ArrayList<>()); + } + if (description.getSoftConstraints().isEmpty() && !sourceJob.getSoftConstraints().isEmpty()) { + sourceJob + .getSoftConstraints() + .forEach( + softConstraint -> { + if (!description.getHardConstraints().contains(softConstraint)) { + description.getSoftConstraints().add(softConstraint); + } + }); + } + if (description.getHardConstraints().isEmpty() && !sourceJob.getHardConstraints().isEmpty()) { + sourceJob + .getHardConstraints() + .forEach( + hardConstraint -> { + if (!description.getSoftConstraints().contains(hardConstraint)) { + description.getHardConstraints().add(hardConstraint); + } + }); + } + } + + // Sets an env variable that can be accessed within the task (container) which maps to the + // spinnaker account + private void setSpinnakerAccountEnvVar(TitusDeployDescription description) { + if (description.getEnv().get(SPINNAKER_ACCOUNT_ENV_VAR) == null) { + Map existingEnvVars = description.getEnv(); + existingEnvVars.put(SPINNAKER_ACCOUNT_ENV_VAR, description.getAccount()); + description.setEnv(existingEnvVars); + } + } + + @Nonnull + private DisruptionBudget getDefaultDisruptionBudget(LoadFront50App.Front50App front50App) { + DisruptionBudget budget = new DisruptionBudget(); + budget.setAvailabilityPercentageLimit(new AvailabilityPercentageLimit(95)); + budget.setRatePercentagePerInterval(new RatePercentagePerInterval(600_000, 5)); + budget.setTimeWindows(Collections.singletonList(DEFAULT_SYSTEM_TIME_WINDOW)); + + if (front50App != null && front50App.isPlatformHealthOnly()) { + budget.setContainerHealthProviders( + Collections.singletonList(new ContainerHealthProvider("eureka"))); + } + + return budget; + } + + @Nullable + private TargetGroupLookupHelper.TargetGroupLookupResult validateLoadBalancers( + TitusDeployDescription description) { + if (description.getTargetGroups().isEmpty()) { + return null; + } + + RegionScopedProviderFactory.RegionScopedProvider regionScopedProvider = + regionScopedProviderFactory.forRegion( + (NetflixAmazonCredentials) + accountCredentialsProvider.getCredentials( + description.getCredentials().getAwsAccount()), + description.getRegion()); + + TargetGroupLookupHelper.TargetGroupLookupResult targetGroups = + targetGroupLookupHelper.getTargetGroupsByName( + regionScopedProvider, description.getTargetGroups()); + if (!targetGroups.getUnknownTargetGroups().isEmpty()) { + throw new TargetGroupsNotFoundException( + format( + "Unable to find Target Groups: %s", + String.join(", ", targetGroups.getUnknownTargetGroups()))); + } + + return targetGroups; + } + + private void resolveSecurityGroups(Saga saga, TitusDeployDescription description) { + saga.log("Resolving security groups"); + + // Determine if we should configure the app default security group... + // First check for a label, falling back to the value (if any) passed via the description. + boolean useApplicationDefaultSecurityGroup = + Boolean.valueOf( + description + .getLabels() + .getOrDefault( + USE_APPLICATION_DEFAULT_SG_LABEL, + String.valueOf(description.isUseApplicationDefaultSecurityGroup()))); + if (!useApplicationDefaultSecurityGroup) { + description.getLabels().put(USE_APPLICATION_DEFAULT_SG_LABEL, "false"); + } else { + description.getLabels().remove(USE_APPLICATION_DEFAULT_SG_LABEL); + } + description.setUseApplicationDefaultSecurityGroup(useApplicationDefaultSecurityGroup); + + // Resolve the provided security groups, asserting that they actually exist. + // TODO(rz): Seems kinda odd that we'd do resolution & validation here and not in... a validator + // or preprocessor? + Set securityGroups = new HashSet<>(); + // TODO(aravindd) Used to skip validation for cross account SG's + // Remove this workaround when we have support for multi account setup + boolean skipSecurityGroupValidation = + Boolean.valueOf( + description + .getLabels() + .getOrDefault(SKIP_SECURITY_GROUP_VALIDATION_LABEL, String.valueOf(false))); + if (skipSecurityGroupValidation) { + saga.log("Skipping Security Group Validation"); + description + .getSecurityGroups() + .forEach(providedSecurityGroup -> securityGroups.add(providedSecurityGroup)); + } else { + description + .getSecurityGroups() + .forEach( + providedSecurityGroup -> { + saga.log("Resolving Security Group '%s'", providedSecurityGroup); + + if (awsLookupUtil.securityGroupIdExists( + description.getAccount(), description.getRegion(), providedSecurityGroup)) { + securityGroups.add(providedSecurityGroup); + } else { + String convertedSecurityGroup = + awsLookupUtil.convertSecurityGroupNameToId( + description.getAccount(), description.getRegion(), providedSecurityGroup); + if (isNullOrEmpty(convertedSecurityGroup)) { + throw new SecurityGroupNotFoundException( + format("Security Group '%s' cannot be found", providedSecurityGroup)); + } + securityGroups.add(convertedSecurityGroup); + } + }); + + if (deployDefaults.getAddAppGroupToServerGroup() + && securityGroups.size() < deployDefaults.getMaxSecurityGroups() + && useApplicationDefaultSecurityGroup) { + String applicationSecurityGroup = + awsLookupUtil.convertSecurityGroupNameToId( + description.getAccount(), description.getRegion(), description.getApplication()); + if (isNullOrEmpty(applicationSecurityGroup)) { + applicationSecurityGroup = + (String) + OperationPoller.retryWithBackoff( + op -> + awsLookupUtil.createSecurityGroupForApplication( + description.getAccount(), + description.getRegion(), + description.getApplication()), + 1_000, + 5); + } + securityGroups.add(applicationSecurityGroup); + } + } + + if (!securityGroups.isEmpty()) { + description.setSecurityGroups(Lists.newArrayList(securityGroups)); + } + + saga.log( + "Finished resolving security groups: {}", + Joiner.on(",").join(description.getSecurityGroups())); + } + + @Builder(builderClassName = "PrepareTitusDeployCommandBuilder", toBuilder = true) + @JsonDeserialize(builder = PrepareTitusDeployCommand.PrepareTitusDeployCommandBuilder.class) + @JsonTypeName("prepareTitusDeployCommand") + @Value + public static class PrepareTitusDeployCommand implements SagaCommand, Front50AppAware { + private TitusDeployDescription description; + @NonFinal private LoadFront50App.Front50App front50App; + @NonFinal private EventMetadata metadata; + + @Override + public void setFront50App(LoadFront50App.Front50App front50App) { + this.front50App = front50App; + } + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class PrepareTitusDeployCommandBuilder {} + } + + private static class SecurityGroupNotFoundException extends TitusException { + SecurityGroupNotFoundException(String message) { + super(message); + setRetryable(true); + } + } + + private static class TargetGroupsNotFoundException extends TitusException { + TargetGroupsNotFoundException(String message) { + super(message); + setRetryable(true); + } + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/ResizeTitusJob.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/ResizeTitusJob.java new file mode 100644 index 00000000000..3585a4213da --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/ResizeTitusJob.java @@ -0,0 +1,123 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.Job; +import com.netflix.spinnaker.clouddriver.titus.client.model.ResizeJobRequest; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.ResizeTitusServerGroupDescription; +import com.netflix.spinnaker.kork.exceptions.UserException; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class ResizeTitusJob implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + + @Autowired + public ResizeTitusJob( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + } + + @NotNull + @Override + public Result apply(@NotNull ResizeTitusJob.ResizeTitusJobCommand command, @NotNull Saga saga) { + saga.log( + "Resizing Titus Job %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getServerGroupName()); + + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(command.description.getAccount()); + + TitusClient titusClient = + titusClientProvider.getTitusClient( + (NetflixTitusCredentials) accountCredentials, command.description.getRegion()); + + Job job = titusClient.findJobByName(command.description.getServerGroupName()); + if (job == null) { + throw new UserException( + "No titus server group named '" + command.description.getServerGroupName() + "' found"); + } + + boolean shouldToggleScalingFlags = !job.isInService(); + if (shouldToggleScalingFlags) { + titusClient.setAutoscaleEnabled(job.getId(), true); + } + + titusClient.resizeJob( + (ResizeJobRequest) + new ResizeJobRequest() + .withInstancesDesired(command.description.getCapacity().getDesired()) + .withInstancesMin(command.description.getCapacity().getMin()) + .withInstancesMax(command.description.getCapacity().getMax()) + .withUser(command.description.getUser()) + .withJobId(job.getId())); + + if (shouldToggleScalingFlags) { + titusClient.setAutoscaleEnabled(job.getId(), false); + } + + saga.log( + "Resized Titus Job %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getServerGroupName()); + + return new Result(); + } + + @Builder(builderClassName = "ResizeTitusJobCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = ResizeTitusJob.ResizeTitusJobCommand.ResizeTitusJobCommandBuilder.class) + @JsonTypeName("resizeTitusJobCommand") + @Value + public static class ResizeTitusJobCommand implements SagaCommand { + @Nonnull ResizeTitusServerGroupDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class ResizeTitusJobCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/ResolveTitusJobId.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/ResolveTitusJobId.java new file mode 100644 index 00000000000..f8a69510fe8 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/ResolveTitusJobId.java @@ -0,0 +1,105 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.Job; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusJobDescription; +import java.util.Collections; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class ResolveTitusJobId implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + + @Autowired + public ResolveTitusJobId( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + } + + @NotNull + @Override + public Result apply( + @NotNull ResolveTitusJobId.ResolveTitusJobIdCommand command, @NotNull Saga saga) { + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(command.getAccount()); + + TitusClient titusClient = + titusClientProvider.getTitusClient( + (NetflixTitusCredentials) accountCredentials, command.getRegion()); + + Job job = titusClient.findJobByName(command.getServerGroupName()); + if (job != null) { + DestroyTitusJobDescription destroyTitusJobDescription = new DestroyTitusJobDescription(); + destroyTitusJobDescription.setAccount(command.getAccount()); + destroyTitusJobDescription.setRegion(command.getRegion()); + destroyTitusJobDescription.setJobId(job.getId()); + destroyTitusJobDescription.setServerGroupName(job.getName()); + destroyTitusJobDescription.setUser(command.getUser()); + + return new Result( + DestroyTitusJob.DestroyTitusJobCommand.builder() + .description(destroyTitusJobDescription) + .build(), + Collections.emptyList()); + } + + return new Result(); + } + + @Builder(builderClassName = "ResolveTitusJobIdCommandBuilder", toBuilder = true) + @JsonDeserialize(builder = ResolveTitusJobIdCommand.ResolveTitusJobIdCommandBuilder.class) + @JsonTypeName("resolveTitusJobIdCommand") + @Value + public static class ResolveTitusJobIdCommand implements SagaCommand { + @Nonnull String account; + @Nonnull String region; + @Nonnull String serverGroupName; + String user; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class ResolveTitusJobIdCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/SubmitTitusJob.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/SubmitTitusJob.java new file mode 100644 index 00000000000..e3d5762c229 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/SubmitTitusJob.java @@ -0,0 +1,218 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import static com.netflix.spinnaker.clouddriver.titus.deploy.actions.AttachTitusServiceLoadBalancers.AttachTitusServiceLoadBalancersCommand; +import static com.netflix.spinnaker.clouddriver.titus.deploy.actions.CopyTitusServiceScalingPolicies.CopyTitusServiceScalingPoliciesCommand; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.TargetGroupLookupHelper; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App.Front50AppAware; +import com.netflix.spinnaker.clouddriver.saga.ManyCommands; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.titus.JobType; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.SubmitJobRequest; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusJobSubmitted; +import com.netflix.spinnaker.kork.core.RetrySupport; +import com.netflix.spinnaker.kork.exceptions.SpinnakerException; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.time.Duration; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class SubmitTitusJob extends AbstractTitusDeployAction + implements SagaAction { + + private static final Logger log = LoggerFactory.getLogger(SubmitTitusJob.class); + + private final RetrySupport retrySupport; + + @Autowired + public SubmitTitusJob( + AccountCredentialsRepository accountCredentialsRepository, + TitusClientProvider titusClientProvider, + RetrySupport retrySupport) { + super(accountCredentialsRepository, titusClientProvider); + this.retrySupport = retrySupport; + } + + /** + * NOTE: The single-element array usage is to get around line-for-line Groovy conversion variable + * references inside of the lambda. This should really be refactored so that pattern isn't + * necessary. It's really gross as-is. + */ + @Nonnull + @Override + public Result apply(@Nonnull SubmitTitusJobCommand command, @Nonnull Saga saga) { + final TitusDeployDescription description = command.description; + + prepareDeployDescription(description); + + final TitusClient titusClient = + titusClientProvider.getTitusClient(description.getCredentials(), description.getRegion()); + + final SubmitJobRequest submitJobRequest = command.getSubmitJobRequest(); + String[] nextServerGroupName = {command.getNextServerGroupName()}; + + AtomicInteger submissionAttempts = new AtomicInteger(); + String jobUri = + retrySupport.retry( + () -> { + try { + submissionAttempts.getAndIncrement(); + return titusClient.submitJob(submitJobRequest.withJobName(nextServerGroupName[0])); + } catch (StatusRuntimeException e) { + if (isServiceExceptionRetryable(description, e)) { + String statusDescription = e.getStatus().getDescription(); + if (statusDescription != null + && statusDescription.contains( + "Job sequence id reserved by another pending job")) { + nextServerGroupName[0] = + TitusJobNameResolver.resolveJobName(titusClient, description); + saga.log("Retrying with job name %s", nextServerGroupName[0]); + } + // Can't do an exact match on the server group name because the error from Titus + // adds dashes if the server group name does not contain a stack or free form + // details, i.e. testapp---v001 + else if (statusDescription != null + && statusDescription.contains( + "Constraint violation - job with group sequence") + && statusDescription.contains("exists") + && submissionAttempts.intValue() > 1) { + + String jobId = titusClient.findJobByName(nextServerGroupName[0]).getId(); + log.info( + "Retried job submission for job that exists due to previous attempt" + + ", returning jobId {}", + jobId); + return jobId; + } + + throw e; + } + + if (isStatusCodeRetryable(e.getStatus().getCode())) { + throw e; + } else { + log.error( + "Could not submit job and not retrying for status {}", e.getStatus(), e); + saga.log("Could not submit job %s: %s", e.getStatus(), e.getMessage()); + throw new SpinnakerException(e).setRetryable(false); + } + } + }, + 8, + Duration.ofMillis(100), + true); + + if (jobUri == null) { + throw new TitusException("could not create job"); + } + + saga.log("Successfully submitted job request to Titus (Job URI: %s)", jobUri); + + return new Result( + new ManyCommands( + AttachTitusServiceLoadBalancersCommand.builder() + .description(description) + .jobUri(jobUri) + .targetGroupLookupResult(command.targetGroupLookupResult) + .build(), + CopyTitusServiceScalingPoliciesCommand.builder() + .description(description) + .jobUri(jobUri) + .deployedServerGroupName(nextServerGroupName[0]) + .build()), + Collections.singletonList( + TitusJobSubmitted.builder() + .jobType(JobType.from(description.getJobType())) + .serverGroupNameByRegion( + Collections.singletonMap(description.getRegion(), nextServerGroupName[0])) + .jobUri(jobUri) + .build())); + } + + /** + * TODO(rz): Figure out what conditions are not retryable and why. Then document, because what? + */ + private static boolean isServiceExceptionRetryable( + TitusDeployDescription description, StatusRuntimeException e) { + String statusDescription = e.getStatus().getDescription(); + return JobType.SERVICE.isEqual(description.getJobType()) + && (e.getStatus().getCode() == Status.RESOURCE_EXHAUSTED.getCode() + || e.getStatus().getCode() == Status.INVALID_ARGUMENT.getCode()) + && (statusDescription != null + && (statusDescription.contains("Job sequence id reserved by another pending job") + || statusDescription.contains("Constraint violation - job with group sequence"))); + } + + /** + * TODO(rz): Figure out what conditions are not retryable and why. Then document, because what? + */ + private static boolean isStatusCodeRetryable(Status.Code code) { + return code == Status.UNAVAILABLE.getCode() + || code == Status.INTERNAL.getCode() + || code == Status.DEADLINE_EXCEEDED.getCode() + || code == Status.RESOURCE_EXHAUSTED.getCode(); + } + + @Builder(builderClassName = "SubmitTitusJobCommandBuilder", toBuilder = true) + @JsonDeserialize(builder = SubmitTitusJobCommand.SubmitTitusJobCommandBuilder.class) + @JsonTypeName("submitTitusJobCommand") + @Value + public static class SubmitTitusJobCommand implements SagaCommand, Front50AppAware { + @Nonnull private TitusDeployDescription description; + @Nonnull private SubmitJobRequest submitJobRequest; + @Nonnull private String nextServerGroupName; + private TargetGroupLookupHelper.TargetGroupLookupResult targetGroupLookupResult; + @NonFinal private LoadFront50App.Front50App front50App; + @NonFinal private EventMetadata metadata; + + @Override + public void setFront50App(LoadFront50App.Front50App front50App) { + this.front50App = front50App; + } + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class SubmitTitusJobCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TerminateTitusTasks.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TerminateTitusTasks.java new file mode 100644 index 00000000000..920049ea7d9 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TerminateTitusTasks.java @@ -0,0 +1,106 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateTasksAndShrinkJobRequest; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TerminateTitusInstancesDescription; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class TerminateTitusTasks + implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + + @Autowired + public TerminateTitusTasks( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + } + + @NotNull + @Override + public Result apply( + @NotNull TerminateTitusTasks.TerminateTitusTasksCommand command, @NotNull Saga saga) { + saga.log( + "Terminating Titus Tasks %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getInstanceIds()); + + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(command.description.getAccount()); + + TitusClient titusClient = + titusClientProvider.getTitusClient( + (NetflixTitusCredentials) accountCredentials, command.description.getRegion()); + + titusClient.terminateTasksAndShrink( + new TerminateTasksAndShrinkJobRequest() + .withTaskIds(command.description.getInstanceIds()) + .withShrink(false) + .withUser(command.description.getUser())); + + saga.log( + "Terminated Titus Instances %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getInstanceIds()); + + return new Result(); + } + + @Builder(builderClassName = "TerminateTitusTasksCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + TerminateTitusTasks.TerminateTitusTasksCommand.TerminateTitusTasksCommandBuilder.class) + @JsonTypeName("terminateTitusTasksCommand") + @Value + public static class TerminateTitusTasksCommand implements SagaCommand { + @Nonnull TerminateTitusInstancesDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class TerminateTitusTasksCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TitusJobNameResolver.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TitusJobNameResolver.java new file mode 100644 index 00000000000..ac05fc74093 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TitusJobNameResolver.java @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.netflix.spinnaker.clouddriver.titus.JobType; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.deploy.TitusServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; + +/** Helper class for resolving Titus job names. */ +class TitusJobNameResolver { + + static String resolveJobName(TitusClient titusClient, TitusDeployDescription description) { + if (JobType.isEqual(description.getJobType(), JobType.BATCH)) { + return description.getApplication(); + } + + String nextServerGroupName; + TitusServerGroupNameResolver serverGroupNameResolver = + new TitusServerGroupNameResolver(titusClient, description.getRegion()); + if (description.getSequence() != null) { + nextServerGroupName = + serverGroupNameResolver.generateServerGroupName( + description.getApplication(), + description.getStack(), + description.getFreeFormDetails(), + description.getSequence(), + false); + } else { + nextServerGroupName = + serverGroupNameResolver.resolveNextServerGroupName( + description.getApplication(), + description.getStack(), + description.getFreeFormDetails(), + false); + } + + return nextServerGroupName; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TitusServiceJobPredicate.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TitusServiceJobPredicate.java new file mode 100644 index 00000000000..c8521b4e2da --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/TitusServiceJobPredicate.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.titus.JobType; +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.PrepareTitusDeploy.PrepareTitusDeployCommand; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class TitusServiceJobPredicate implements SagaFlow.ConditionPredicate { + @Override + public boolean test(Saga saga) { + return saga.getEvents().stream() + .filter(e -> PrepareTitusDeployCommand.class.isAssignableFrom(e.getClass())) + .findFirst() + .map( + e -> + JobType.SERVICE.isEqual( + ((PrepareTitusDeployCommand) e).getDescription().getJobType())) + .orElseThrow( + () -> + new TitusException( + "Could not determine job type: No TitusDeployDescription found")); + } + + @Nonnull + @Override + public String getName() { + return "titusServiceJobPredicate"; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobDisruptionBudget.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobDisruptionBudget.java new file mode 100644 index 00000000000..3c1738e9f02 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobDisruptionBudget.java @@ -0,0 +1,107 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.client.model.JobDisruptionBudgetUpdateRequest; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertJobDisruptionBudgetDescription; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class UpsertTitusJobDisruptionBudget + implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + + @Autowired + public UpsertTitusJobDisruptionBudget( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + } + + @NotNull + @Override + public Result apply( + @NotNull UpsertTitusJobDisruptionBudget.UpsertTitusJobDisruptionBudgetCommand command, + @NotNull Saga saga) { + saga.log( + "Updating Titus Job Disruption %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId()); + + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(command.description.getAccount()); + + TitusClient titusClient = + titusClientProvider.getTitusClient( + (NetflixTitusCredentials) accountCredentials, command.description.getRegion()); + + titusClient.updateDisruptionBudget( + new JobDisruptionBudgetUpdateRequest() + .withJobId(command.description.getJobId()) + .withDisruptionBudget(command.description.getDisruptionBudget())); + + saga.log( + "Updated Titus Job Disruption %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId()); + + return new Result(); + } + + @Builder(builderClassName = "UpsertTitusJobDisruptionBudgetCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + UpsertTitusJobDisruptionBudget.UpsertTitusJobDisruptionBudgetCommand + .UpsertTitusJobDisruptionBudgetCommandBuilder.class) + @JsonTypeName("upsertTitusJobDisruptionBudgetCommand") + @Value + public static class UpsertTitusJobDisruptionBudgetCommand implements SagaCommand { + @Nonnull UpsertJobDisruptionBudgetDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class UpsertTitusJobDisruptionBudgetCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobProcesses.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobProcesses.java new file mode 100644 index 00000000000..5e1c82f7b39 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobProcesses.java @@ -0,0 +1,102 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.ServiceJobProcessesRequest; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class UpsertTitusJobProcesses + implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + + @Autowired + public UpsertTitusJobProcesses( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + } + + @NotNull + @Override + public Result apply( + @NotNull UpsertTitusJobProcesses.UpsertTitusJobProcessesCommand command, @NotNull Saga saga) { + saga.log( + "Updating Titus Job Processes %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId()); + + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(command.description.getAccount()); + + TitusClient titusClient = + titusClientProvider.getTitusClient( + (NetflixTitusCredentials) accountCredentials, command.description.getRegion()); + + titusClient.updateScalingProcesses(command.description); + + saga.log( + "Updated Titus Job Processes %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId()); + + return new Result(); + } + + @Builder(builderClassName = "UpsertTitusJobProcessesCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + UpsertTitusJobProcesses.UpsertTitusJobProcessesCommand + .UpsertTitusJobProcessesCommandBuilder.class) + @JsonTypeName("upsertTitusJobProcessesCommand") + @Value + public static class UpsertTitusJobProcessesCommand implements SagaCommand { + @Nonnull ServiceJobProcessesRequest description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class UpsertTitusJobProcessesCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusScalingPolicy.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusScalingPolicy.java new file mode 100644 index 00000000000..f67be6a28db --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusScalingPolicy.java @@ -0,0 +1,191 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaCommand; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaAction; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider; +import com.netflix.spinnaker.clouddriver.titus.client.TitusAutoscalingClient; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertTitusScalingPolicyDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusScalingPolicyModified; +import com.netflix.spinnaker.kork.core.RetrySupport; +import com.netflix.spinnaker.kork.exceptions.UserException; +import com.netflix.titus.grpc.protogen.PutPolicyRequest; +import com.netflix.titus.grpc.protogen.ScalingPolicy; +import com.netflix.titus.grpc.protogen.ScalingPolicyID; +import com.netflix.titus.grpc.protogen.UpdatePolicyRequest; +import java.util.Collections; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class UpsertTitusScalingPolicy + implements SagaAction { + private final AccountCredentialsProvider accountCredentialsProvider; + private final TitusClientProvider titusClientProvider; + private final RetrySupport retrySupport; + + @Autowired + public UpsertTitusScalingPolicy( + AccountCredentialsProvider accountCredentialsProvider, + TitusClientProvider titusClientProvider, + RetrySupport retrySupport) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.titusClientProvider = titusClientProvider; + this.retrySupport = retrySupport; + } + + @NotNull + @Override + public Result apply( + @NotNull UpsertTitusScalingPolicy.UpsertTitusScalingPolicyCommand command, + @NotNull Saga saga) { + AccountCredentials accountCredentials = + accountCredentialsProvider.getCredentials(command.description.getAccount()); + + TitusAutoscalingClient titusClient = + titusClientProvider.getTitusAutoscalingClient( + (NetflixTitusCredentials) accountCredentials, command.description.getRegion()); + + if (titusClient == null) { + throw new UserException("Autoscaling is not supported for this account/region"); + } + + String scalingPolicyId = command.description.getScalingPolicyID(); + + boolean shouldCreate = scalingPolicyId == null; + if (shouldCreate) { + scalingPolicyId = createScalingPolicy(command, saga, titusClient); + } else { + scalingPolicyId = updateScalingPolicy(command, saga, titusClient); + } + + return new Result( + MonitorTitusScalingPolicy.MonitorTitusScalingPolicyCommand.builder().build(), + Collections.singletonList( + TitusScalingPolicyModified.builder() + .account(command.description.getAccount()) + .region(command.description.getRegion()) + .jobId(command.description.getJobId()) + .scalingPolicyId(scalingPolicyId) + .build())); + } + + @Nonnull + private String createScalingPolicy( + @NotNull UpsertTitusScalingPolicy.UpsertTitusScalingPolicyCommand command, + @NotNull Saga saga, + TitusAutoscalingClient titusClient) { + saga.log( + "Creating Titus Scaling Policy %s:%s:%s", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId()); + + ScalingPolicy.Builder builder = command.description.toScalingPolicyBuilder(); + + PutPolicyRequest.Builder requestBuilder = + PutPolicyRequest.newBuilder() + .setScalingPolicy(builder) + .setJobId(command.description.getJobId()); + + ScalingPolicyID result = + retrySupport.retry( + () -> titusClient.createScalingPolicy(requestBuilder.build()), 10, 3000, false); + + saga.log( + "Created Titus Scaling Policy %s:%s:%s (scalingPolicyId: %s)", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId(), + result.getId()); + + return result.getId(); + } + + @Nonnull + private String updateScalingPolicy( + @NotNull UpsertTitusScalingPolicy.UpsertTitusScalingPolicyCommand command, + @NotNull Saga saga, + TitusAutoscalingClient titusClient) { + saga.log( + "Updating Titus Scaling Policy %s:%s:%s (scalingPolicyId: %s)", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId(), + command.description.getScalingPolicyID()); + + retrySupport.retry( + () -> { + titusClient.updateScalingPolicy( + UpdatePolicyRequest.newBuilder() + .setScalingPolicy(command.description.toScalingPolicyBuilder().build()) + .setPolicyId( + ScalingPolicyID.newBuilder() + .setId(command.description.getScalingPolicyID()) + .build()) + .build()); + return true; + }, + 10, + 3000, + false); + + saga.log( + "Updated Titus Scaling Policy %s:%s:%s (scalingPolicyId: %s)", + command.description.getAccount(), + command.description.getRegion(), + command.description.getJobId(), + command.description.getScalingPolicyID()); + + return command.description.getScalingPolicyID(); + } + + @Builder(builderClassName = "UpsertTitusScalingPolicyCommandBuilder", toBuilder = true) + @JsonDeserialize( + builder = + UpsertTitusScalingPolicy.UpsertTitusScalingPolicyCommand + .UpsertTitusScalingPolicyCommandBuilder.class) + @JsonTypeName("upsertTitusScalingPolicyCommand") + @Value + public static class UpsertTitusScalingPolicyCommand implements SagaCommand { + @Nonnull UpsertTitusScalingPolicyDescription description; + + @NonFinal EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class UpsertTitusScalingPolicyCommandBuilder {} + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpdateTitusJobProcessesAtomicOperationConverter.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpdateTitusJobProcessesAtomicOperationConverter.java new file mode 100644 index 00000000000..185c4aaff94 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpdateTitusJobProcessesAtomicOperationConverter.java @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.converters; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import com.netflix.spinnaker.clouddriver.titus.TitusOperation; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.ServiceJobProcessesRequest; +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.UpdateTitusJobProcessesAtomicOperation; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@TitusOperation(AtomicOperations.UPDATE_JOB_PROCESSES) +@Component +public class UpdateTitusJobProcessesAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + private final ObjectMapper objectMapper; + + @Autowired + UpdateTitusJobProcessesAtomicOperationConverter(ObjectMapper objectMapper) { + this.objectMapper = objectMapper; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new UpdateTitusJobProcessesAtomicOperation(convertDescription(input)); + } + + @Override + public ServiceJobProcessesRequest convertDescription(Map input) { + ServiceJobProcessesRequest converted = + objectMapper.convertValue(input, ServiceJobProcessesRequest.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + return converted; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpsertTitusJobDisruptionBudgetAtomicOperationConverter.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpsertTitusJobDisruptionBudgetAtomicOperationConverter.java new file mode 100644 index 00000000000..3afa8fa89b1 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/converters/UpsertTitusJobDisruptionBudgetAtomicOperationConverter.java @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.converters; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import com.netflix.spinnaker.clouddriver.titus.TitusOperation; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertJobDisruptionBudgetDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.UpsertTitusJobDisruptionBudgetAtomicOperation; +import java.util.Map; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@TitusOperation(AtomicOperations.UPSERT_DISRUPTION_BUDGET) +@Component +class UpsertTitusJobDisruptionBudgetAtomicOperationConverter + extends AbstractAtomicOperationsCredentialsSupport { + + private ObjectMapper objectMapper; + + @Autowired + UpsertTitusJobDisruptionBudgetAtomicOperationConverter(ObjectMapper objectMapper) { + this.objectMapper = objectMapper; + } + + @Override + public AtomicOperation convertOperation(Map input) { + return new UpsertTitusJobDisruptionBudgetAtomicOperation(convertDescription(input)); + } + + @Override + public UpsertJobDisruptionBudgetDescription convertDescription(Map input) { + UpsertJobDisruptionBudgetDescription converted = + objectMapper.convertValue(input, UpsertJobDisruptionBudgetDescription.class); + converted.setCredentials(getCredentialsObject(input.get("credentials").toString())); + return converted; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractTitusCredentialsDescription.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractTitusCredentialsDescription.java new file mode 100644 index 00000000000..be507cacbd1 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/AbstractTitusCredentialsDescription.java @@ -0,0 +1,40 @@ +package com.netflix.spinnaker.clouddriver.titus.deploy.description; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable; +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials; +import java.util.Optional; + +@JsonIgnoreProperties("credentials") +public abstract class AbstractTitusCredentialsDescription implements CredentialsNameable { + + private String account; + + private NetflixTitusCredentials credentials; + + @JsonIgnore + public NetflixTitusCredentials getCredentials() { + return credentials; + } + + public void setCredentials(NetflixTitusCredentials credentials) { + this.credentials = credentials; + } + + /** For JSON serde only. */ + @JsonProperty + public void setAccount(String account) { + this.account = account; + } + + /** For JSON serde only. */ + @JsonProperty + @Override + public String getAccount() { + return Optional.ofNullable(this.credentials) + .map(NetflixTitusCredentials::getName) + .orElse(account); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/ServiceJobProcessesRequest.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/ServiceJobProcessesRequest.java new file mode 100644 index 00000000000..d8009489f93 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/ServiceJobProcessesRequest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.description; + +import com.netflix.spinnaker.clouddriver.titus.client.model.ServiceJobProcesses; +import lombok.Data; + +@Data +public class ServiceJobProcessesRequest extends AbstractTitusCredentialsDescription { + + ServiceJobProcesses serviceJobProcesses; + String region; + String jobId; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescription.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescription.java new file mode 100644 index 00000000000..69c197c7cbc --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescription.java @@ -0,0 +1,515 @@ +package com.netflix.spinnaker.clouddriver.titus.deploy.description; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; +import com.netflix.spinnaker.clouddriver.orchestration.SagaContextAware; +import com.netflix.spinnaker.clouddriver.orchestration.events.OperationEvent; +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import com.netflix.spinnaker.clouddriver.titus.client.model.DisruptionBudget; +import com.netflix.spinnaker.clouddriver.titus.client.model.Efs; +import com.netflix.spinnaker.clouddriver.titus.client.model.MigrationPolicy; +import com.netflix.spinnaker.clouddriver.titus.client.model.ServiceJobProcesses; +import com.netflix.spinnaker.clouddriver.titus.client.model.SignedAddressAllocations; +import com.netflix.spinnaker.clouddriver.titus.client.model.SubmitJobRequest; +import com.netflix.spinnaker.clouddriver.titus.model.DockerImage; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class TitusDeployDescription extends AbstractTitusCredentialsDescription + implements DeployDescription, ApplicationNameable, SagaContextAware { + private String region; + private String subnet; + private List zones = new ArrayList<>(); + private List securityGroups = new ArrayList<>(); + private List securityGroupNames = new ArrayList<>(); + private List targetGroups = new ArrayList<>(); + private List softConstraints; + private List hardConstraints; + private String application; + private String stack; + private String freeFormDetails; + private String imageId; + private Capacity capacity = new Capacity(); + private Resources resources = new Resources(); + private Map env = new LinkedHashMap<>(); + private Map labels = new LinkedHashMap<>(); + private Map containerAttributes = new LinkedHashMap<>(); + private String entryPoint; + private String cmd; + private String iamProfile; + private String capacityGroup; + private String user; + private Boolean inService; + private String jobType; + private int retries; + private int runtimeLimitSecs; + private List interestingHealthProviderNames = new ArrayList<>(); + private MigrationPolicy migrationPolicy; + private Boolean copySourceScalingPoliciesAndActions = true; + private Integer sequence; + private DisruptionBudget disruptionBudget; + private SubmitJobRequest.Constraints constraints = new SubmitJobRequest.Constraints(); + private ServiceJobProcesses serviceJobProcesses; + @JsonIgnore private SagaContext sagaContext; + + /** + * Will be overridden by any the label {@code PrepareTitusDeploy.USE_APPLICATION_DEFAULT_SG_LABEL} + * + *

TODO(rz): Redundant; migrate off this property or the label (pref to migrate off the label) + */ + @Deprecated private boolean useApplicationDefaultSecurityGroup = true; + + /** + * If false, the newly created server group will not pick up scaling policies and actions from an + * ancestor group + */ + private boolean copySourceScalingPolicies = true; + + private List events = new ArrayList<>(); + private Source source = new Source(); + private Efs efs; + + @Override + public Collection getApplications() { + return Arrays.asList(application); + } + + @Override + public void setSagaContext(SagaContext sagaContext) { + this.sagaContext = sagaContext; + } + + @Nullable + public SagaContext getSagaContext() { + return sagaContext; + } + + /** For Jackson deserialization. */ + public void setApplications(List applications) { + if (!applications.isEmpty()) { + application = applications.get(0); + } + } + + @Nonnull + public SubmitJobRequest toSubmitJobRequest( + @Nonnull DockerImage dockerImage, @Nonnull String jobName, String user) { + final SubmitJobRequest.SubmitJobRequestBuilder submitJobRequest = + SubmitJobRequest.builder() + .jobName(jobName) + .user(user) + .application(application) + .dockerImageName(dockerImage.getImageName()) + .instancesMin(capacity.getMin()) + .instancesMax(capacity.getMax()) + .instancesDesired(capacity.getDesired()) + .cpu(resources.getCpu()) + .memory(resources.getMemory()) + .sharedMemory(resources.getSharedMemory()) + .disk(resources.getDisk()) + .retries(retries) + .runtimeLimitSecs(runtimeLimitSecs) + .gpu(resources.getGpu()) + .networkMbps(resources.getNetworkMbps()) + .efs(efs) + .ports(resources.getPorts()) + .env(env) + .allocateIpAddress(resources.isAllocateIpAddress()) + .stack(stack) + .detail(freeFormDetails) + .entryPoint(entryPoint) + .iamProfile(iamProfile) + .capacityGroup(capacityGroup) + .labels(labels) + .inService(inService) + .migrationPolicy(migrationPolicy) + .credentials(getCredentials().getName()) + .containerAttributes(containerAttributes) + .disruptionBudget(disruptionBudget) + .signedAddressAllocations(resources.getSignedAddressAllocations()) + .serviceJobProcesses(serviceJobProcesses); + + if (cmd != null && !cmd.isEmpty()) { + submitJobRequest.cmd(cmd); + } + + if (!securityGroups.isEmpty()) { + submitJobRequest.securityGroups(securityGroups); + } + + if (dockerImage.getImageDigest() != null) { + submitJobRequest.dockerDigest(dockerImage.getImageDigest()); + } else { + submitJobRequest.dockerImageVersion(dockerImage.getImageVersion()); + } + + /** + * Titus api now supports the ability to set key/value for hard & soft constraints, but the + * original interface we supported was just a list of keys, to make this change backwards + * compatible we give preference to they\ constraints key/value map vs soft & hard constraints + * list + */ + if (constraints.getHard() != null || constraints.getSoft() != null) { + submitJobRequest.containerConstraints(constraints); + } else { + log.warn("Use of deprecated constraints payload: {}-{}", application, stack); + + List constraints = new ArrayList<>(); + if (hardConstraints != null) { + hardConstraints.forEach(c -> constraints.add(SubmitJobRequest.Constraint.hard(c))); + } + if (softConstraints != null) { + softConstraints.forEach(c -> constraints.add(SubmitJobRequest.Constraint.soft(c))); + } + submitJobRequest.constraints(constraints); + } + + if (jobType != null) { + submitJobRequest.jobType(jobType); + } + + return submitJobRequest.build(); + } + + @Data + public static class Capacity { + private int min; + private int max; + private int desired; + } + + @Data + public static class Resources { + private int cpu; + private int memory; + private int sharedMemory; + private int disk; + private int gpu; + private int networkMbps; + private int[] ports; + private boolean allocateIpAddress; + private List signedAddressAllocations; + } + + @Data + public static class Source { + private String account; + private String region; + private String asgName; + private boolean useSourceCapacity; + } + + public String getRegion() { + return region; + } + + public void setRegion(String region) { + this.region = region; + } + + public String getSubnet() { + return subnet; + } + + public void setSubnet(String subnet) { + this.subnet = subnet; + } + + public List getZones() { + return zones; + } + + public void setZones(List zones) { + this.zones = zones; + } + + public List getSecurityGroups() { + return securityGroups; + } + + public void setSecurityGroups(List securityGroups) { + this.securityGroups = securityGroups; + } + + public List getSecurityGroupNames() { + return securityGroupNames; + } + + public void setSecurityGroupNames(List securityGroupNames) { + this.securityGroupNames = securityGroupNames; + } + + public List getTargetGroups() { + return targetGroups; + } + + public void setTargetGroups(List targetGroups) { + this.targetGroups = targetGroups; + } + + public List getSoftConstraints() { + return softConstraints; + } + + public void setSoftConstraints(List softConstraints) { + this.softConstraints = softConstraints; + } + + public List getHardConstraints() { + return hardConstraints; + } + + public void setHardConstraints(List hardConstraints) { + this.hardConstraints = hardConstraints; + } + + public String getApplication() { + return application; + } + + public void setApplication(String application) { + this.application = application; + } + + public String getStack() { + return stack; + } + + public void setStack(String stack) { + this.stack = stack; + } + + public String getFreeFormDetails() { + return freeFormDetails; + } + + public void setFreeFormDetails(String freeFormDetails) { + this.freeFormDetails = freeFormDetails; + } + + public String getImageId() { + return imageId; + } + + public void setImageId(String imageId) { + this.imageId = imageId; + } + + public Capacity getCapacity() { + return capacity; + } + + public void setCapacity(Capacity capacity) { + this.capacity = capacity; + } + + public Resources getResources() { + return resources; + } + + public void setResources(Resources resources) { + this.resources = resources; + } + + public Map getEnv() { + return env; + } + + public void setEnv(Map env) { + this.env = env; + } + + public Map getLabels() { + return labels; + } + + public void setLabels(Map labels) { + this.labels = labels; + } + + public Map getContainerAttributes() { + return containerAttributes; + } + + public void setContainerAttributes(Map containerAttributes) { + this.containerAttributes = containerAttributes; + } + + public String getEntryPoint() { + return entryPoint; + } + + public void setEntryPoint(String entryPoint) { + this.entryPoint = entryPoint; + } + + public String getCmd() { + return cmd; + } + + public void setCmd(String cmd) { + this.cmd = cmd; + } + + public String getIamProfile() { + return iamProfile; + } + + public void setIamProfile(String iamProfile) { + this.iamProfile = iamProfile; + } + + public String getCapacityGroup() { + return capacityGroup; + } + + public void setCapacityGroup(String capacityGroup) { + this.capacityGroup = capacityGroup; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public Boolean getInService() { + return inService; + } + + public void setInService(Boolean inService) { + this.inService = inService; + } + + public String getJobType() { + return jobType; + } + + public void setJobType(String jobType) { + this.jobType = jobType; + } + + public int getRetries() { + return retries; + } + + public void setRetries(int retries) { + this.retries = retries; + } + + public int getRuntimeLimitSecs() { + return runtimeLimitSecs; + } + + public void setRuntimeLimitSecs(int runtimeLimitSecs) { + this.runtimeLimitSecs = runtimeLimitSecs; + } + + public List getInterestingHealthProviderNames() { + return interestingHealthProviderNames; + } + + public void setInterestingHealthProviderNames(List interestingHealthProviderNames) { + this.interestingHealthProviderNames = interestingHealthProviderNames; + } + + public MigrationPolicy getMigrationPolicy() { + return migrationPolicy; + } + + public void setMigrationPolicy(MigrationPolicy migrationPolicy) { + this.migrationPolicy = migrationPolicy; + } + + public Boolean getCopySourceScalingPoliciesAndActions() { + return copySourceScalingPoliciesAndActions; + } + + public void setCopySourceScalingPoliciesAndActions(Boolean copySourceScalingPoliciesAndActions) { + this.copySourceScalingPoliciesAndActions = copySourceScalingPoliciesAndActions; + } + + public Integer getSequence() { + return sequence; + } + + public void setSequence(Integer sequence) { + this.sequence = sequence; + } + + public DisruptionBudget getDisruptionBudget() { + return disruptionBudget; + } + + public void setDisruptionBudget(DisruptionBudget disruptionBudget) { + this.disruptionBudget = disruptionBudget; + } + + public SubmitJobRequest.Constraints getConstraints() { + return constraints; + } + + public void setConstraints(SubmitJobRequest.Constraints constraints) { + this.constraints = constraints; + } + + public ServiceJobProcesses getServiceJobProcesses() { + return serviceJobProcesses; + } + + public void setServiceJobProcesses(ServiceJobProcesses serviceJobProcesses) { + this.serviceJobProcesses = serviceJobProcesses; + } + + public boolean isUseApplicationDefaultSecurityGroup() { + return useApplicationDefaultSecurityGroup; + } + + public void setUseApplicationDefaultSecurityGroup(boolean useApplicationDefaultSecurityGroup) { + this.useApplicationDefaultSecurityGroup = useApplicationDefaultSecurityGroup; + } + + public boolean isCopySourceScalingPolicies() { + return copySourceScalingPolicies; + } + + public void setCopySourceScalingPolicies(boolean copySourceScalingPolicies) { + this.copySourceScalingPolicies = copySourceScalingPolicies; + } + + @Override + public List getEvents() { + return events; + } + + public void setEvents(List events) { + this.events = events; + } + + public Source getSource() { + return source; + } + + public void setSource(Source source) { + this.source = source; + } + + public Efs getEfs() { + return efs; + } + + public void setEfs(Efs efs) { + this.efs = efs; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/UpsertJobDisruptionBudgetDescription.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/UpsertJobDisruptionBudgetDescription.java new file mode 100644 index 00000000000..323feca71e5 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/description/UpsertJobDisruptionBudgetDescription.java @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.description; + +import com.netflix.spinnaker.clouddriver.titus.client.model.DisruptionBudget; +import lombok.Data; + +@Data +public class UpsertJobDisruptionBudgetDescription extends AbstractTitusCredentialsDescription { + + String jobId; + String region; + DisruptionBudget disruptionBudget; +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusJobSubmitted.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusJobSubmitted.java new file mode 100644 index 00000000000..79e470c6bc3 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusJobSubmitted.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.events; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaEvent; +import com.netflix.spinnaker.clouddriver.titus.JobType; +import java.util.Map; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; + +@Builder(builderClassName = "TitusJobSubmittedBuilder", toBuilder = true) +@JsonDeserialize(builder = TitusJobSubmitted.TitusJobSubmittedBuilder.class) +@JsonTypeName("titusJobSubmitted") +@Value +public class TitusJobSubmitted implements SagaEvent { + + @Nonnull private final Map serverGroupNameByRegion; + @Nonnull private final String jobUri; + @Nonnull private final JobType jobType; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class TitusJobSubmittedBuilder {} +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusLoadBalancerAttached.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusLoadBalancerAttached.java new file mode 100644 index 00000000000..973f79f9de0 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusLoadBalancerAttached.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.events; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaEvent; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; + +@Builder(builderClassName = "TitusLoadBalancerAttachedBuilder", toBuilder = true) +@JsonDeserialize(builder = TitusLoadBalancerAttached.TitusLoadBalancerAttachedBuilder.class) +@JsonTypeName("titusLoadBalancerAttached") +@Value +public class TitusLoadBalancerAttached implements SagaEvent { + + @Nonnull private final String jobUri; + @Nonnull private final String targetGroupArn; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class TitusLoadBalancerAttachedBuilder {} +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyCopied.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyCopied.java new file mode 100644 index 00000000000..97d4a34ccfb --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyCopied.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.events; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaEvent; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; + +@Builder(builderClassName = "TitusScalingPolicyCopiedBuilder", toBuilder = true) +@JsonDeserialize(builder = TitusScalingPolicyCopied.TitusScalingPolicyCopiedBuilder.class) +@JsonTypeName("titusScalingPolicyCopied") +@Value +public class TitusScalingPolicyCopied implements SagaEvent { + + @Nonnull private final String serverGroupName; + @Nonnull private final String region; + @Nonnull private final String sourcePolicyId; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class TitusScalingPolicyCopiedBuilder {} +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyDeleted.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyDeleted.java new file mode 100644 index 00000000000..fd9f8a7bbb0 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyDeleted.java @@ -0,0 +1,45 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.events; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaEvent; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; + +@Builder(builderClassName = "TitusScalingPolicyDeletedBuilder", toBuilder = true) +@JsonDeserialize(builder = TitusScalingPolicyDeleted.TitusScalingPolicyDeletedBuilder.class) +@JsonTypeName("titusScalingPolicyDeleted") +@Value +public class TitusScalingPolicyDeleted implements SagaEvent { + + @Nonnull private final String region; + @Nonnull private final String policyId; + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class TitusScalingPolicyDeletedBuilder {} +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyModified.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyModified.java new file mode 100644 index 00000000000..27c5106bacb --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/events/TitusScalingPolicyModified.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.events; + +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; +import com.netflix.spinnaker.clouddriver.event.EventMetadata; +import com.netflix.spinnaker.clouddriver.saga.SagaEvent; +import javax.annotation.Nonnull; +import lombok.Builder; +import lombok.Value; +import lombok.experimental.NonFinal; +import org.jetbrains.annotations.NotNull; + +@Builder(builderClassName = "TitusScalingPolicyModifiedBuilder", toBuilder = true) +@JsonDeserialize(builder = TitusScalingPolicyModified.TitusScalingPolicyModifiedBuilder.class) +@JsonTypeName("titusScalingPolicyModified") +@Value +public class TitusScalingPolicyModified implements SagaEvent { + @Nonnull private final String account; + + @Nonnull private final String region; + + @Nonnull private final String jobId; + + @Nonnull private final String scalingPolicyId; + + @NonFinal private EventMetadata metadata; + + @Override + public void setMetadata(@NotNull EventMetadata metadata) { + this.metadata = metadata; + } + + @JsonPOJOBuilder(withPrefix = "") + public static class TitusScalingPolicyModifiedBuilder {} +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/DestroyTitusJobCompletionHandler.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/DestroyTitusJobCompletionHandler.java new file mode 100644 index 00000000000..2e9ed7711f2 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/DestroyTitusJobCompletionHandler.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers; + +import com.netflix.spinnaker.clouddriver.orchestration.events.DeleteServerGroupEvent; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaCompletionHandler; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.DestroyTitusJob; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusJobDescription; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.springframework.stereotype.Component; + +@Component +public class DestroyTitusJobCompletionHandler + implements SagaCompletionHandler> { + + @Nullable + @Override + public Optional handle(@Nonnull Saga completedSaga) { + final DestroyTitusJobDescription description = + completedSaga.getEvent(DestroyTitusJob.DestroyTitusJobCommand.class).getDescription(); + + if (description.getServerGroupName() == null) { + return Optional.empty(); + } + + return Optional.of( + new DeleteServerGroupEvent( + TitusCloudProvider.ID, + // titus entity tags are created using the account name (and not the accountId) + description.getAccount(), + description.getRegion(), + description.getServerGroupName())); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployCompletionHandler.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployCompletionHandler.java new file mode 100644 index 00000000000..7aa87d5d943 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployCompletionHandler.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers; + +import com.netflix.spinnaker.clouddriver.saga.flow.SagaCompletionHandler; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusUtils; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.SubmitTitusJob; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusJobSubmitted; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class TitusDeployCompletionHandler implements SagaCompletionHandler { + + private final AccountCredentialsProvider accountCredentialsProvider; + + @Autowired + public TitusDeployCompletionHandler(AccountCredentialsProvider accountCredentialsProvider) { + this.accountCredentialsProvider = accountCredentialsProvider; + } + + @Nullable + @Override + public TitusDeploymentResult handle(@Nonnull Saga completedSaga) { + final TitusDeployDescription description = + completedSaga.getEvent(SubmitTitusJob.SubmitTitusJobCommand.class).getDescription(); + + return TitusDeploymentResult.from( + description, + completedSaga.getEvent(TitusJobSubmitted.class), + completedSaga.getLogs(), + TitusUtils.getAccountId(accountCredentialsProvider, description.getAccount())); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandler.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandler.java new file mode 100644 index 00000000000..5a7eea98c47 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandler.java @@ -0,0 +1,110 @@ +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; +import com.netflix.spinnaker.clouddriver.deploy.DeployHandler; +import com.netflix.spinnaker.clouddriver.orchestration.events.CreateServerGroupEvent; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App.LoadFront50AppCommand; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge.ApplyCommandWrapper; +import com.netflix.spinnaker.clouddriver.saga.SagaService; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow; +import com.netflix.spinnaker.clouddriver.titus.TitusCloudProvider; +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.AttachTitusServiceLoadBalancers; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.CopyTitusServiceScalingPolicies; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.PrepareTitusDeploy; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.PrepareTitusDeploy.PrepareTitusDeployCommand; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.SubmitTitusJob; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.TitusServiceJobPredicate; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; +import groovy.util.logging.Slf4j; +import java.util.List; +import java.util.Objects; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Slf4j +@Component +public class TitusDeployHandler implements DeployHandler { + private final SagaService sagaService; + + @Autowired + public TitusDeployHandler(SagaService sagaService) { + this.sagaService = sagaService; + } + + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + @Override + public TitusDeploymentResult handle( + final TitusDeployDescription inputDescription, List priorOutputs) { + Objects.requireNonNull(inputDescription.getSagaContext(), "A saga context must be provided"); + + SagaFlow flow = + new SagaFlow() + .then(LoadFront50App.class) + .then(PrepareTitusDeploy.class) + .then(SubmitTitusJob.class) + .on( + TitusServiceJobPredicate.class, + sagaFlow -> { + sagaFlow + .then(AttachTitusServiceLoadBalancers.class) + .then(CopyTitusServiceScalingPolicies.class); + }) + .exceptionHandler(TitusExceptionHandler.class) + .completionHandler(TitusDeployCompletionHandler.class); + + final TitusDeploymentResult result = + new SagaAtomicOperationBridge(sagaService, inputDescription.getSagaContext().getSagaId()) + .apply( + ApplyCommandWrapper.builder() + .sagaName(TitusDeployHandler.class.getSimpleName()) + .inputDescription(inputDescription) + .priorOutputs(priorOutputs) + .sagaContext(inputDescription.getSagaContext()) + .task(getTask()) + .sagaFlow(flow) + .initialCommand( + LoadFront50AppCommand.builder() + .appName(inputDescription.getApplication()) + .nextCommand( + PrepareTitusDeployCommand.builder() + .description(inputDescription) + .build()) + .allowMissing(true) + .build()) + .build()); + + if (result == null) { + // "This should never happen" + throw new TitusException("Failed to complete Titus deploy: No deployment result created"); + } + + // TODO(rz): Ew, side effects... + result + .getServerGroupNames() + .forEach( + serverGroupName -> + inputDescription + .getEvents() + .add( + new CreateServerGroupEvent( + TitusCloudProvider.ID, + result.getTitusAccountId(), + inputDescription.getRegion(), + serverGroupName))); + + return result; + } + + @Override + public boolean handles(DeployDescription description) { + return description instanceof TitusDeployDescription; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeploymentResult.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeploymentResult.java new file mode 100644 index 00000000000..cc72e713f82 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeploymentResult.java @@ -0,0 +1,56 @@ +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers; + +import static java.lang.String.format; + +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.titus.JobType; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusJobSubmitted; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.Getter; + +public class TitusDeploymentResult extends DeploymentResult { + + @Getter private String titusAccountId; + @Getter private String jobUri; + + public static TitusDeploymentResult from( + TitusDeployDescription description, + TitusJobSubmitted event, + List messages, + String titusAccountId) { + TitusDeploymentResult result = new TitusDeploymentResult(); + + if (JobType.isEqual(description.getJobType(), JobType.SERVICE)) { + forServiceJob(result, event.getServerGroupNameByRegion()); + } else { + forBatchJob(result, description.getRegion(), event.getJobUri()); + } + + result.jobUri = event.getJobUri(); + result.titusAccountId = titusAccountId; + result.setMessages(messages); + + return result; + } + + /** Batch jobs use the "deployedNames" fields of the deployment result. */ + private static void forBatchJob(TitusDeploymentResult result, String region, String jobUri) { + result.setDeployedNames(Collections.singletonList(jobUri)); + result.setDeployedNamesByLocation( + Collections.singletonMap(region, Collections.singletonList(jobUri))); + } + + /** Service jobs use the "serverGroupNames" fields for the deployment result. */ + private static void forServiceJob( + TitusDeploymentResult result, Map serverGroupNameByRegion) { + result.setServerGroupNames( + serverGroupNameByRegion.entrySet().stream() + .map(e -> format("%s:%s", e.getKey(), e.getValue())) + .collect(Collectors.toList())); + result.setServerGroupNameByRegion(serverGroupNameByRegion); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusExceptionHandler.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusExceptionHandler.java new file mode 100644 index 00000000000..a9240485128 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusExceptionHandler.java @@ -0,0 +1,69 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers; + +import com.netflix.spinnaker.clouddriver.saga.flow.SagaExceptionHandler; +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import io.grpc.StatusRuntimeException; +import javax.annotation.Nonnull; +import org.springframework.stereotype.Component; + +@Component +public class TitusExceptionHandler implements SagaExceptionHandler { + + @Nonnull + @Override + public Exception handle(@Nonnull Exception exception) { + if (exception instanceof StatusRuntimeException) { + StatusRuntimeException statusRuntimeException = (StatusRuntimeException) exception; + return new TitusException(statusRuntimeException, isRetryable(statusRuntimeException)); + } + + return exception; + } + + private boolean isRetryable(StatusRuntimeException statusRuntimeException) { + switch (statusRuntimeException.getStatus().getCode()) { + case ABORTED: + case DEADLINE_EXCEEDED: + case INTERNAL: + case RESOURCE_EXHAUSTED: + case UNAVAILABLE: + case UNKNOWN: + return true; + case INVALID_ARGUMENT: + return invalidArgumentConditional(statusRuntimeException.getMessage()); + default: + return false; + } + } + + private boolean invalidArgumentConditional(String statusRuntimeExceptionMessage) { + if (statusRuntimeExceptionMessage == null) { + return false; + } + + boolean rateExceeded = statusRuntimeExceptionMessage.toLowerCase().contains("rate exceeded"); + boolean assumeRoleError = + statusRuntimeExceptionMessage.toLowerCase().contains("jobiamvalidator") + && statusRuntimeExceptionMessage + .toLowerCase() + .contains("titus cannot assume into role"); + + return rateExceeded || assumeRoleError; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/UpsertTitusScalingPolicyCompletionHandler.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/UpsertTitusScalingPolicyCompletionHandler.java new file mode 100644 index 00000000000..97fc11b8a1d --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/UpsertTitusScalingPolicyCompletionHandler.java @@ -0,0 +1,35 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers; + +import com.netflix.spinnaker.clouddriver.saga.flow.SagaCompletionHandler; +import com.netflix.spinnaker.clouddriver.saga.models.Saga; +import com.netflix.spinnaker.clouddriver.titus.deploy.events.TitusScalingPolicyModified; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.springframework.stereotype.Component; + +@Component +public class UpsertTitusScalingPolicyCompletionHandler + implements SagaCompletionHandler { + + @Nullable + @Override + public TitusScalingPolicyModified handle(@Nonnull Saga completedSaga) { + return completedSaga.getEvent(TitusScalingPolicyModified.class); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DeleteTitusScalingPolicyAtomicOperation.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DeleteTitusScalingPolicyAtomicOperation.java new file mode 100644 index 00000000000..1cac00b9101 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DeleteTitusScalingPolicyAtomicOperation.java @@ -0,0 +1,37 @@ +package com.netflix.spinnaker.clouddriver.titus.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.DeleteTitusScalingPolicy; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.DeleteTitusScalingPolicy.DeleteTitusScalingPolicyCommand; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DeleteTitusScalingPolicyDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler; +import java.util.List; + +public class DeleteTitusScalingPolicyAtomicOperation + extends AbstractSagaAtomicOperation { + + public DeleteTitusScalingPolicyAtomicOperation(DeleteTitusScalingPolicyDescription description) { + super(description); + } + + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(DeleteTitusScalingPolicy.class) + .exceptionHandler(TitusExceptionHandler.class); + } + + @Override + protected void configureSagaBridge( + SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + DeleteTitusScalingPolicyCommand.builder().description(description).build()); + } + + @Override + protected Void parseSagaResult(Object result) { + return null; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpdateTitusJobProcessesAtomicOperation.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpdateTitusJobProcessesAtomicOperation.java new file mode 100644 index 00000000000..0e4fd7267fd --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpdateTitusJobProcessesAtomicOperation.java @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.UpsertTitusJobProcesses; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.ServiceJobProcessesRequest; +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler; +import java.util.List; +import org.jetbrains.annotations.NotNull; + +public class UpdateTitusJobProcessesAtomicOperation + extends AbstractSagaAtomicOperation { + public UpdateTitusJobProcessesAtomicOperation(ServiceJobProcessesRequest description) { + super(description); + } + + @NotNull + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(UpsertTitusJobProcesses.class) + .exceptionHandler(TitusExceptionHandler.class); + } + + @Override + protected void configureSagaBridge( + @NotNull SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + UpsertTitusJobProcesses.UpsertTitusJobProcessesCommand.builder() + .description(description) + .build()); + } + + @Override + protected Void parseSagaResult(@NotNull Void result) { + return null; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpsertTitusJobDisruptionBudgetAtomicOperation.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpsertTitusJobDisruptionBudgetAtomicOperation.java new file mode 100644 index 00000000000..e28eb263d9e --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/ops/UpsertTitusJobDisruptionBudgetAtomicOperation.java @@ -0,0 +1,58 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.sagas.AbstractSagaAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.sagas.SagaAtomicOperationBridge; +import com.netflix.spinnaker.clouddriver.saga.flow.SagaFlow; +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.UpsertTitusJobDisruptionBudget; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertJobDisruptionBudgetDescription; +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler; +import java.util.List; +import javax.annotation.Nonnull; +import org.jetbrains.annotations.NotNull; + +public class UpsertTitusJobDisruptionBudgetAtomicOperation + extends AbstractSagaAtomicOperation { + public UpsertTitusJobDisruptionBudgetAtomicOperation( + UpsertJobDisruptionBudgetDescription description) { + super(description); + } + + @NotNull + @Override + protected SagaFlow buildSagaFlow(List priorOutputs) { + return new SagaFlow() + .then(UpsertTitusJobDisruptionBudget.class) + .exceptionHandler(TitusExceptionHandler.class); + } + + @Override + protected void configureSagaBridge( + @NotNull @Nonnull + SagaAtomicOperationBridge.ApplyCommandWrapper.ApplyCommandWrapperBuilder builder) { + builder.initialCommand( + UpsertTitusJobDisruptionBudget.UpsertTitusJobDisruptionBudgetCommand.builder() + .description(description) + .build()); + } + + @Override + protected Void parseSagaResult(@NotNull Void result) { + return null; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/validators/UpsertTitusScalingPolicyDescriptionValidator.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/validators/UpsertTitusScalingPolicyDescriptionValidator.java new file mode 100644 index 00000000000..85974fc44d5 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/deploy/validators/UpsertTitusScalingPolicyDescriptionValidator.java @@ -0,0 +1,51 @@ +/* + * Copyright 2020 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.validators; + +import com.netflix.spinnaker.clouddriver.deploy.ValidationErrors; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.titus.TitusOperation; +import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertTitusScalingPolicyDescription; +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +@TitusOperation(AtomicOperations.UPSERT_SCALING_POLICY) +class UpsertTitusScalingPolicyDescriptionValidator + extends AbstractTitusDescriptionValidatorSupport { + + @Autowired + UpsertTitusScalingPolicyDescriptionValidator() { + super("upsertTitusScalingPolicyDescription"); + } + + @Override + public void validate( + List priorDescriptions, + UpsertTitusScalingPolicyDescription description, + ValidationErrors errors) { + super.validate(priorDescriptions, description, errors); + + if (description.getJobId() == null) { + errors.rejectValue( + "jobId", + "upsertTitusScalingPolicyDescription.jobId.empty", + "A Titus job identifier (jobId) must be specified"); + } + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/IllegalOperationStateException.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/IllegalOperationStateException.java new file mode 100644 index 00000000000..c13d310ce06 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/IllegalOperationStateException.java @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.exceptions; + +import com.netflix.spinnaker.clouddriver.titus.TitusException; + +public class IllegalOperationStateException extends TitusException { + public IllegalOperationStateException(String message) { + super(message); + } + + public IllegalOperationStateException(String message, String userMessage) { + super(message, userMessage); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/InsufficientDeploySourceStateException.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/InsufficientDeploySourceStateException.java new file mode 100644 index 00000000000..ae4f407cb53 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/InsufficientDeploySourceStateException.java @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.exceptions; + +import com.netflix.spinnaker.clouddriver.titus.TitusException; +import java.util.HashMap; +import java.util.Map; + +/** + * Thrown when a Titus deployment does not have sufficient information from a source server group. + */ +public class InsufficientDeploySourceStateException extends TitusException { + private final Map sourceState = new HashMap<>(); + + public InsufficientDeploySourceStateException( + String message, String account, String region, String asgName) { + super(message); + sourceState.put("account", account); + sourceState.put("region", region); + sourceState.put("asgName", asgName); + setRetryable(false); + } + + @Override + public Map getAdditionalAttributes() { + return sourceState; + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/JobNotFoundException.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/JobNotFoundException.java new file mode 100644 index 00000000000..bcfe6028d68 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/JobNotFoundException.java @@ -0,0 +1,25 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.exceptions; + +import com.netflix.spinnaker.clouddriver.titus.TitusException; + +public class JobNotFoundException extends TitusException { + public JobNotFoundException(String message) { + super(message); + setRetryable(true); + } +} diff --git a/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/UnexpectedAccountCredentialsTypeException.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/UnexpectedAccountCredentialsTypeException.java new file mode 100644 index 00000000000..8e2af6347e1 --- /dev/null +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/exceptions/UnexpectedAccountCredentialsTypeException.java @@ -0,0 +1,24 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.exceptions; + +import com.netflix.spinnaker.clouddriver.titus.TitusException; + +public class UnexpectedAccountCredentialsTypeException extends TitusException { + public UnexpectedAccountCredentialsTypeException(String message, String userMessage) { + super(message, userMessage); + } +} diff --git a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusError.java b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/model/TitusError.java similarity index 99% rename from clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusError.java rename to clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/model/TitusError.java index 15b45e1e471..4a8d42ce1f6 100644 --- a/clouddriver-titus/src/main/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusError.java +++ b/clouddriver-titus/src/main/java/com/netflix/spinnaker/clouddriver/titus/model/TitusError.java @@ -27,5 +27,4 @@ public void setMessage(String message) { } private String message; - } diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/TitusExceptionHandlerSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/TitusExceptionHandlerSpec.groovy new file mode 100644 index 00000000000..db35743b546 --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/TitusExceptionHandlerSpec.groovy @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus + +import com.netflix.spinnaker.clouddriver.titus.deploy.handlers.TitusExceptionHandler +import io.grpc.Status +import io.grpc.StatusRuntimeException +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class TitusExceptionHandlerSpec extends Specification { + + @Subject + TitusExceptionHandler exceptionHandler = new TitusExceptionHandler() + + def "should passthrough an exception"() { + given: + Exception downstream = new RuntimeException("exception") + + when: + Exception exception = exceptionHandler.handle(downstream) + + then: + exception == downstream + } + + @Unroll + def "Should determine if StatusRuntimeException is retryable based on #status"() { + given: + StatusRuntimeException downstream = new StatusRuntimeException(status) + + when: + TitusException exception = exceptionHandler.handle(downstream) as TitusException + + then: + exception.retryable == expected + + where: + status | expected + Status.INVALID_ARGUMENT | false + Status.DEADLINE_EXCEEDED | true + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusJobProviderSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusJobProviderSpec.groovy new file mode 100644 index 00000000000..d9612363bae --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/caching/providers/TitusJobProviderSpec.groovy @@ -0,0 +1,153 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.converters + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.aws.provider.view.AmazonS3DataProvider +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.titus.caching.providers.TitusJobProvider +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient +import com.netflix.spinnaker.clouddriver.titus.client.model.Job +import com.netflix.spinnaker.clouddriver.titus.client.model.Task +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import spock.lang.Specification +import spock.lang.Subject + +class TitusJobProviderSpec extends Specification { + TitusClient titusClient = Stub() + TitusClientProvider titusClientProvider = Stub() + AccountCredentialsProvider accountCredentialsProvider = Stub() + AmazonS3DataProvider amazonS3DataProvider = Stub() + NetflixTitusCredentials mockCredentials = Stub() + + String account = 'ACCT' + String location = 'us-best-1' + String id = '12345-12345' + + Task task = new Task( + id: 123, + startedAt: new Date(), + logLocation: [ + s3: [accountName: account, region: location, bucket: 'coolbucket', key: 'coolkey'] + ] + ) + + Job job = new Job(tasks: [task]) + + @Subject + TitusJobProvider titusJobProvider = new TitusJobProvider(titusClientProvider) + + def setup() { + titusJobProvider.objectMapper = new ObjectMapper() + titusJobProvider.accountCredentialsProvider = accountCredentialsProvider + titusJobProvider.amazonS3DataProvider = amazonS3DataProvider + accountCredentialsProvider.getCredentials(_) >> mockCredentials + titusClientProvider.getTitusClient(_, _) >> titusClient + titusClient.getJobAndAllRunningAndCompletedTasks(_) >> job + } + + void 'getFileContents should parse json if the file ends in .json'() { + given: + String fileName = 'data.json' + + String fileContents = ''' + { + "foo": "FOO", + "bar": { + "baz": "BAR.BAZ", + "list": [ "one", "two" ] + } + }''' + + when: + amazonS3DataProvider.getAdhocData(_, _, _, _) >> { args -> + OutputStream outStream = args[3] + outStream << fileContents + outStream.close() + } + + Map contents = titusJobProvider.getFileContents(account, location, id, fileName) + + then: + contents == [ + foo: 'FOO', + bar: [ + baz : 'BAR.BAZ', + list: ['one', 'two'] + ] + ] + } + + void 'getFileContents should parse yaml, if the file ends in .yml'() { + given: + String fileName = 'data.yml' + + String fileContents = ''' + foo: FOO + bar: + baz: BAR.BAZ + list: + - one + - two + ''' + + when: + amazonS3DataProvider.getAdhocData(_, _, _, _) >> { args -> + OutputStream outStream = args[3] + outStream << fileContents + outStream.close() + } + + Map contents = titusJobProvider.getFileContents(account, location, id, fileName) + + then: + contents == [ + foo: 'FOO', + bar: [ + baz : 'BAR.BAZ', + list: ['one', 'two'] + ] + ] + } + + void 'getFileContents should parse properties files for all other extensions'() { + given: + String fileContents = ''' + foo: FOO + bar.baz: BAR.BAZ + ''' + + when: + amazonS3DataProvider.getAdhocData(_, _, _, _) >> { args -> + OutputStream outStream = args[3] + outStream << fileContents + outStream.close() + } + + Map contents = titusJobProvider.getFileContents(account, location, id, fileName) + + then: + contents == [foo: 'FOO', 'bar.baz': 'BAR.BAZ'] + + where: + fileName | _ + 'data' | _ + 'data.properties' | _ + 'data.asdfadf' | _ + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClientSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClientSpec.groovy index e09cbba48ae..6c8b9b4e2f5 100644 --- a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClientSpec.groovy +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/client/RegionScopedTitusClientSpec.groovy @@ -102,29 +102,6 @@ class RegionScopedTitusClientSpec extends Specification { logger.info("job by name {}", job); job != null - logger.info("Jobs request: {}", new Date()); - List jobs = titusClient.getAllJobsWithTasks(); - logger.info("Jobs response: {}", new Date()); - logger.info("Jobs"); - logger.info("-----------------------------------------------------------------------------------------------"); - logger.info("Jobs count: {}", jobs.size()); - - // ****************************************************************************************************************** - when: - int i = 7; - boolean found = false; - while (--i > 0) { - Job queriedJob = titusClient.getAllJobsWithTasks().find { it.id == jobId } - if (queriedJob) { - found = true; - break; - } - Thread.sleep(15 * 1000L); - } - - then: - found - // ****************************************************************************************************************** when: @@ -180,27 +157,7 @@ class RegionScopedTitusClientSpec extends Specification { then: terminated - when: logger.info("Successfully terminated job {}" + terminatedJob); - int k = 14; - boolean foundAfterTermination = true; - while (--k > 0) { - List queriedJobs = titusClient.getAllJobsWithTasks(); - if (!queriedJobs.contains(job)) { - foundAfterTermination = false; - logger.info("Did NOT find job {} in the list of jobs. Terminate successful.", jobId); - break; - } - Thread.sleep(10 * 1000L); - } - - if (foundAfterTermination) { - System.err.println("ERROR: Even after terminate, job was FOUND in the list of jobs: " + jobId); - } - - then: - !foundAfterTermination - } } diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/TitusServerGroupNameResolverSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/TitusServerGroupNameResolverSpec.groovy new file mode 100644 index 00000000000..a56e7bd22fd --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/TitusServerGroupNameResolverSpec.groovy @@ -0,0 +1,94 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy + +import com.netflix.spinnaker.clouddriver.data.task.DefaultTask +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient +import com.netflix.spinnaker.clouddriver.titus.client.model.Job +import spock.lang.Specification +import spock.lang.Unroll + + +class TitusServerGroupNameResolverSpec extends Specification { + + def titusClient = Mock(TitusClient) + def region = 'us-west-1' + + void setup() { + Task task = new DefaultTask("task") + TaskRepository.threadLocalTask.set(task) + } + + @Unroll + void "should correctly resolve next sequence number when details look like a sequence number - i.e., v([0-9]+)"() { + given: + def resolver = new TitusServerGroupNameResolver(titusClient, region) + + def application = 'application' + def stack = 'stack' + def serverGroupName = "$application-$stack-$details-$sequence" + def nextServerGroupName = "$application-$stack-$details-$nextSequence" + + List jobs = + [ + new Job( + name: serverGroupName + ) + ] + titusClient.findJobsByApplication(application) >> jobs + + when: + def result = resolver.resolveNextServerGroupName(application, stack, details, false) + + then: + result == nextServerGroupName + + where: + details | sequence | nextSequence + "v00001" | "v000" | "v001" + "v82589065" | "v000" | "v001" + "v82589065" | "v001" | "v002" + "v82589065" | "v998" | "v999" + "v8258c06b" | "v000" | "v001" //one a-z0-9 test for good measure + } + + void "should rollover sequence number"() { + given: + def resolver = new TitusServerGroupNameResolver(titusClient, region) + + def application = 'application' + def stack = 'v000' // unlikely this would be a stack name, but it's a good test case + def serverGroupName = "$application-$stack-v999" + def nextServerGroupName = "$application-$stack-v000" + + List jobs = + [ + new Job( + name: serverGroupName + ) + ] + titusClient.findJobsByApplication(application) >> jobs + + when: + def result = resolver.resolveNextServerGroupName(application, stack, null, false) + + then: + result == nextServerGroupName + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DestroyTitusJobSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DestroyTitusJobSpec.groovy new file mode 100644 index 00000000000..afdea85c0c8 --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/DestroyTitusJobSpec.groovy @@ -0,0 +1,62 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.titus.deploy.actions + +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient +import com.netflix.spinnaker.clouddriver.titus.client.model.Job +import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateJobRequest +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusJobDescription +import spock.lang.Specification +import spock.lang.Subject +import com.netflix.spinnaker.clouddriver.saga.models.Saga + +class DestroyTitusJobSpec extends Specification { + def titusClient = Mock(TitusClient) + def titusClientProvider = Stub(TitusClientProvider) { + getTitusClient(_, _) >> titusClient + } + + def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def testCredentials = Mock(NetflixTitusCredentials) + + @Subject + DestroyTitusJob operation = new DestroyTitusJob(accountCredentialsProvider, titusClientProvider) + + void 'should terminate the titus job successfully'() { + given: + def saga = new Saga("test-saga", "1") + def command = DestroyTitusJob.DestroyTitusJobCommand.builder().description( + new DestroyTitusJobDescription( + jobId: '1234', region: 'us-east-1', account: 'test', user: 'testUser' + ) + ).build() + + when: + operation.apply(command, saga) + + then: + 1 * accountCredentialsProvider.getCredentials('test') >> testCredentials + 1 * titusClient.terminateJob({ TerminateJobRequest terminateJobRequest -> + assert terminateJobRequest.jobId == '1234' + assert terminateJobRequest.user == 'testUser' + }) + 0 * _ + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobDisruptionBudgetSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobDisruptionBudgetSpec.groovy new file mode 100644 index 00000000000..34bf7d0214c --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobDisruptionBudgetSpec.groovy @@ -0,0 +1,74 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions + +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient +import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion +import com.netflix.spinnaker.clouddriver.titus.client.model.DisruptionBudget +import com.netflix.spinnaker.clouddriver.titus.client.model.JobDisruptionBudgetUpdateRequest +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.RelocationLimit +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.SelfManaged +import com.netflix.spinnaker.clouddriver.titus.client.model.disruption.UnhealthyTasksLimit +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import com.netflix.spinnaker.clouddriver.titus.deploy.description.ServiceJobProcessesRequest +import com.netflix.spinnaker.clouddriver.titus.deploy.description.UpsertJobDisruptionBudgetDescription +import com.netflix.spinnaker.clouddriver.titus.deploy.ops.UpsertTitusJobDisruptionBudgetAtomicOperation +import com.netflix.spinnaker.fiat.model.resources.Permissions +import spock.lang.Specification +import spock.lang.Subject + +class UpsertTitusJobDisruptionBudgetSpec extends Specification { + def testCredentials = Mock(NetflixTitusCredentials) + + def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def titusClientProvider = Mock(TitusClientProvider) + def titusClient = Mock(TitusClient) + + @Subject + UpsertTitusJobDisruptionBudget upsertTitusJobDisruptionBudget = new UpsertTitusJobDisruptionBudget( + accountCredentialsProvider, titusClientProvider + ) + + void 'should update disruption budget'() { + given: + def saga = new Saga("my-saga", "my-id") + def disruptionBudget = new DisruptionBudget() + def description = new UpsertJobDisruptionBudgetDescription( + jobId: "my-job-id", region: "us-east-1", credentials: testCredentials, disruptionBudget: disruptionBudget + ) + def command = UpsertTitusJobDisruptionBudget.UpsertTitusJobDisruptionBudgetCommand.builder().description( + description + ).build() + + when: + upsertTitusJobDisruptionBudget.apply(command, saga) + + then: + 1 * accountCredentialsProvider.getCredentials(_) >> { return testCredentials } + 1 * titusClientProvider.getTitusClient(testCredentials, "us-east-1") >> { return titusClient } + + 1 * titusClient.updateDisruptionBudget(new JobDisruptionBudgetUpdateRequest() + .withDisruptionBudget(disruptionBudget) + .withJobId("my-job-id")) + 0 * _ + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobProcessesSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobProcessesSpec.groovy new file mode 100644 index 00000000000..47d41133ec9 --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/actions/UpsertTitusJobProcessesSpec.groovy @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.actions + +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import com.netflix.spinnaker.clouddriver.titus.deploy.description.ServiceJobProcessesRequest +import spock.lang.Specification +import spock.lang.Subject + +class UpsertTitusJobProcessesSpec extends Specification { + def testCredentials = Mock(NetflixTitusCredentials) + + def accountCredentialsProvider = Mock(AccountCredentialsProvider) + def titusClientProvider = Mock(TitusClientProvider) + def titusClient = Mock(TitusClient) + + @Subject + UpsertTitusJobProcesses upsertTitusJobProcesses = new UpsertTitusJobProcesses( + accountCredentialsProvider, titusClientProvider + ) + + void 'should update scaling processes'() { + given: + def saga = new Saga("my-saga", "my-id") + def request = new ServiceJobProcessesRequest(credentials: testCredentials, region: "us-east-1") + def command = UpsertTitusJobProcesses.UpsertTitusJobProcessesCommand.builder().description( + request + ).build() + + when: + upsertTitusJobProcesses.apply(command, saga) + + then: + 1 * accountCredentialsProvider.getCredentials(_) >> { return testCredentials } + 1 * titusClientProvider.getTitusClient(testCredentials, "us-east-1") >> { return titusClient } + + 1 * titusClient.updateScalingProcesses(request) + 0 * _ + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TitusDeployAtomicOperationConverterSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TitusDeployAtomicOperationConverterSpec.groovy index 9bd0bd59e4b..7b4450cd8d9 100644 --- a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TitusDeployAtomicOperationConverterSpec.groovy +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/converters/TitusDeployAtomicOperationConverterSpec.groovy @@ -41,6 +41,20 @@ class TitusDeployAtomicOperationConverterSpec extends Specification { void 'convertDescription should return a valid TitusDeployDescription'() { given: + + Map signedAddressAllocations = [ + addressAllocation : [ + addressLocation: [region : "us-east-1", availabilityZone: "us-east-1d", + subnetId: "subnet-ffab009"], + uuid : "7e571794-4a8b-4335-8be7-c5e3b2660688", + address : "192.122.100.100"], + authoritativePublicKey: "authoritativePublicKeyValue", + hostPublicKey : "hostPublicKeyValue", + hostPublicKeySignature: "hostPublicKeySignatureValue", + message : "message", + messageSignature : "messageSignatureValue" + ] + Map input = [ application: 'api', stack : 'test', @@ -49,7 +63,8 @@ class TitusDeployAtomicOperationConverterSpec extends Specification { subnetType : 'vpc0', imageId : 'api.server:master-201506020033-trusty-7366606', capacity : [desired: 3, min: 2, max: 5], - resources : [cpu: 2, memory: 4, disk: 4000, ports: [7001], allocateIpAddress: true], + resources : [cpu : 2, memory: 4, disk: 4000, ports: [7001], allocateIpAddress: true, + signedAddressAllocations: [signedAddressAllocations]], env : ['netflix.environment': 'test'], credentials: 'test' ] diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescriptionSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescriptionSpec.groovy new file mode 100644 index 00000000000..8c8ac703a12 --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/description/TitusDeployDescriptionSpec.groovy @@ -0,0 +1,102 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.description + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.orchestration.SagaContextAware +import com.netflix.spinnaker.clouddriver.titus.client.model.MigrationPolicy +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import spock.lang.Specification +import spock.lang.Unroll + +class TitusDeployDescriptionSpec extends Specification { + + @Unroll + def "ser/de"() { + + Map signedAddressAllocations = [ + addressAllocation : [ + addressLocation : [ + region : "us-east-1", + availabilityZone: "us-east-1d", + subnetId : "subnet-ffab009" + ], + uuid : "7e571794-4a8b-4335-8be7-c5e3b2660688", + address : "192.122.100.100", + ], + authoritativePublicKey: "authoritativePublicKeyValue", + hostPublicKey : "hostPublicKeyValue", + hostPublicKeySignature: "hostPublicKeySignatureValue", + message : "message", + messageSignature : "messageSignatureValue" + ] + given: + ObjectMapper objectMapper = new ObjectMapper().findAndRegisterModules() + + and: + TitusDeployDescription subject = new TitusDeployDescription( + account: "titustest", + region: "us-east-1", + application: "helloworld", + capacity: new TitusDeployDescription.Capacity( + desired: 1, + max: 1, + min: 1 + ), + capacityGroup: "helloworld", + containerAttributes: [:], + credentials: credentials, + env: [:], + hardConstraints: [], + iamProfile: "helloworldInstanceProfile", + imageId: "titus/helloworld:latest", + inService: true, + labels: [:], + migrationPolicy: new MigrationPolicy( + type: "systemDefault" + ), + resources: new TitusDeployDescription.Resources( + allocateIpAddress: true, + cpu: 2, + disk: 10000, + memory: 4096, + networkMbps: 128, + signedAddressAllocations: [signedAddressAllocations] + ), + securityGroups: [], + softConstraints: [], + sagaContext: new SagaContextAware.SagaContext( + "titus", + "createServerGroup", + [:] + ) + ) + + when: + objectMapper.readValue(objectMapper.writeValueAsString(subject), TitusDeployDescription) + + then: + noExceptionThrown() + + where: + credentials << [ + null, + Mock(NetflixTitusCredentials) { + getName() >> "titustest" + } + ] + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandlerSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandlerSpec.groovy deleted file mode 100644 index 293ed4a29b0..00000000000 --- a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/TitusDeployHandlerSpec.groovy +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.deploy.handlers - -import com.netflix.spinnaker.config.AwsConfiguration -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient -import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion -import com.netflix.spinnaker.clouddriver.titus.client.model.SubmitJobRequest -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials -import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription -import spock.lang.Specification -import spock.lang.Subject - -class TitusDeployHandlerSpec extends Specification { - NetflixTitusCredentials netflixTitusCredentials = Mock(NetflixTitusCredentials) - def accountCredentialsProvider = Mock(AccountCredentialsProvider) { - getCredentials("test") >> { - return netflixTitusCredentials - } - } - - def accountCredentialsRepository = Mock(AccountCredentialsRepository) { - getOne("test") >> { - return netflixTitusCredentials - } - } - - TitusClient titusClient = Mock(TitusClient) - - TitusClientProvider titusClientProvider = Stub(TitusClientProvider) { - getTitusClient(_, _) >> titusClient - } - - NetflixTitusCredentials testCredentials = new NetflixTitusCredentials( - 'test', 'test', 'test', [new TitusRegion('us-east-1', 'test', 'http://foo', false, false, "blah", "blah", 7104, [])], 'test', 'test', 'test', 'test', false, '', 'mainvpc', [], "", false, false, false - ) - - @Subject - TitusDeployHandler titusDeployHandler = new TitusDeployHandler(titusClientProvider, accountCredentialsRepository) - - def setup() { - Task task = Mock(Task) - TaskRepository.threadLocalTask.set(task) - } - - void 'TitusDeployHandler should submit a Titus job successfully'() { - given: - TitusDeployDescription titusDeployDescription = new TitusDeployDescription( - application: 'api', - stack: 'test', - freeFormDetails: '', - region: 'us-east-1', - subnet: 'vpc0', - imageId: 'api.server:master-201506020033-trusty-7366606', - capacity: [desired: 1, min: 1, max: 2], - resources: [cpu: 2, memory: 4, disk: 4000, ports: [7001], allocateIpAddress: true], - env: ['netflix.environment': 'test'], - credentials: testCredentials, - interestingHealthProviderNames: [ - "Titus", - "Discovery" - ], - containerAttributes: [ - 'k1': 'value1', - 'k2': 123 - ] - ) - titusClient.findJobsByApplication(_) >> [] - - titusDeployHandler.deployDefaults = [ - addAppGroupToServerGroup: false - ] as AwsConfiguration.DeployDefaults - - titusDeployHandler.accountCredentialsProvider = accountCredentialsProvider - - when: - DeploymentResult deploymentResult = titusDeployHandler.handle(titusDeployDescription, []) - - then: - noExceptionThrown() - deploymentResult != null - deploymentResult.serverGroupNames && deploymentResult.serverGroupNames.contains('us-east-1:api-test-v000') - deploymentResult.serverGroupNameByRegion && deploymentResult.serverGroupNameByRegion['us-east-1'] == 'api-test-v000' - accountCredentialsProvider.getCredentials(_) >> netflixTitusCredentials - 1 * titusClient.submitJob({ - it.jobName == 'api-test-v000' && - it.dockerImageName == 'api.server' && - it.dockerImageVersion == 'master-201506020033-trusty-7366606' && - it.instancesMin == titusDeployDescription.capacity.min && - it.instancesMax == titusDeployDescription.capacity.max && - it.instancesDesired == titusDeployDescription.capacity.desired && - it.cpu == titusDeployDescription.resources.cpu && - it.memory == titusDeployDescription.resources.memory && - it.disk == titusDeployDescription.resources.disk && - it.ports == titusDeployDescription.resources.ports && - it.env == titusDeployDescription.env && - it.application == titusDeployDescription.application && - it.allocateIpAddress == titusDeployDescription.resources.allocateIpAddress && - it.labels.get("interestingHealthProviderNames") == "Titus,Discovery" - } as SubmitJobRequest) >> "123456" - } - -} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/CommandSerdeSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/CommandSerdeSpec.groovy new file mode 100644 index 00000000000..2de4e0110bf --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/CommandSerdeSpec.groovy @@ -0,0 +1,124 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers.actions + +import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.clouddriver.event.CompositeSpinnakerEvent +import com.netflix.spinnaker.clouddriver.event.EventMetadata +import com.netflix.spinnaker.clouddriver.event.SpinnakerEvent +import com.netflix.spinnaker.clouddriver.titus.client.model.SubmitJobRequest +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.SubmitTitusJob +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +import java.time.Instant + +import static com.netflix.spinnaker.clouddriver.aws.deploy.ops.loadbalancer.TargetGroupLookupHelper.TargetGroupLookupResult +import static com.netflix.spinnaker.clouddriver.titus.deploy.actions.AttachTitusServiceLoadBalancers.AttachTitusServiceLoadBalancersCommand +import static com.netflix.spinnaker.clouddriver.titus.deploy.actions.CopyTitusServiceScalingPolicies.CopyTitusServiceScalingPoliciesCommand +import static com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App.Front50App +import static com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App.LoadFront50AppCommand +import static com.netflix.spinnaker.clouddriver.titus.deploy.actions.PrepareTitusDeploy.PrepareTitusDeployCommand + +class CommandSerdeSpec extends Specification { + + @Shared NetflixTitusCredentials titusCredentials = Mock() { + getName() >> "titus" + } + + @Shared TitusDeployDescription deployDescription = new TitusDeployDescription(credentials: titusCredentials) + + @Shared Front50App front50App = new Front50App("example@example.com", true) + + @Unroll + def "can serialize and deserialize #command.class.simpleName"() { + given: + ObjectMapper objectMapper = new ObjectMapper() + objectMapper + .findAndRegisterModules() + + and: + registerSubtypes(objectMapper, command) + initializeEvent(command) + + when: + def serialized = objectMapper.writeValueAsString(command) + objectMapper.readValue(serialized, SpinnakerEvent) + + then: + noExceptionThrown() + + where: + command << [ + LoadFront50AppCommand.builder() + .appName("myApp") + .nextCommand(PrepareTitusDeployCommand.builder() + .description(deployDescription) + .front50App(front50App) + .build()) + .allowMissing(true) + .build(), + PrepareTitusDeployCommand.builder() + .description(deployDescription) + .front50App(front50App) + .build(), + AttachTitusServiceLoadBalancersCommand.builder() + .description(deployDescription) + .jobUri("http://localhost/id") + .targetGroupLookupResult(new TargetGroupLookupResult()) + .build(), + CopyTitusServiceScalingPoliciesCommand.builder() + .description(deployDescription) + .jobUri("http://localhost/id") + .deployedServerGroupName("myapp-v000") + .build(), + SubmitTitusJob.SubmitTitusJobCommand.builder() + .description(deployDescription) + .submitJobRequest(SubmitJobRequest.builder().build()) + .nextServerGroupName("myapp-v000") + .front50App(front50App) + .build() + ] + } + + static void registerSubtypes(ObjectMapper objectMapper, SpinnakerEvent event) { + if (event instanceof CompositeSpinnakerEvent) { + objectMapper.registerSubtypes(((CompositeSpinnakerEvent) event).composedEvents.collect { it.class }) + } + objectMapper.registerSubtypes(event.class) + } + + static void initializeEvent(SpinnakerEvent event) { + if (event instanceof CompositeSpinnakerEvent) { + event.composedEvents.forEach { + initializeEvent(it) + } + } + event.metadata = new EventMetadata( + UUID.randomUUID().toString(), + "aggType", + "aggId", + 0, + 0, + Instant.now(), + "unknown", + "unknown" + ) + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/PrepareTitusDeployActionSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/PrepareTitusDeployActionSpec.groovy new file mode 100644 index 00000000000..98d27327af0 --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/PrepareTitusDeployActionSpec.groovy @@ -0,0 +1,299 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers.actions + +import com.netflix.spinnaker.clouddriver.aws.services.RegionScopedProviderFactory +import com.netflix.spinnaker.clouddriver.saga.models.Saga +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository +import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider +import com.netflix.spinnaker.clouddriver.titus.caching.utils.AwsLookupUtil +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient +import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion +import com.netflix.spinnaker.clouddriver.titus.client.model.Job +import com.netflix.spinnaker.clouddriver.titus.client.model.MigrationPolicy +import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials +import com.netflix.spinnaker.clouddriver.orchestration.sagas.LoadFront50App +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.PrepareTitusDeploy +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.PrepareTitusDeploy.PrepareTitusDeployCommand +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.SubmitTitusJob +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription +import com.netflix.spinnaker.config.AwsConfiguration +import com.netflix.spinnaker.fiat.model.resources.Permissions +import com.netflix.spinnaker.kork.test.mimicker.DataContainer +import com.netflix.spinnaker.kork.test.mimicker.Mimicker +import com.netflix.spinnaker.moniker.Moniker +import com.netflix.spinnaker.moniker.frigga.FriggaReflectiveNamer +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +class PrepareTitusDeployActionSpec extends Specification { + + @Shared + Mimicker mimicker = new Mimicker(new DataContainer(["mimicker-titus.yml"]).withDefaultResources()) + + Fixture fixture = new Fixture(mimicker) + + NetflixTitusCredentials netflixTitusCredentials = new NetflixTitusCredentials( + fixture.accountName, + mimicker.text().word(), + mimicker.text().word(), + [ + new TitusRegion( + mimicker.aws().getAvailabilityZone(fixture.region), + fixture.accountName, + 'http://region', // TODO(rz): mimicker.network().url + fixture.moniker.app, + mimicker.text().word(), + mimicker.network().port, + [], + null, + null + ) + ], + 'http://bastion', // TODO(rz): mimicker.network().url + mimicker.text().word(), + mimicker.text().word(), + mimicker.text().word(), + mimicker.random().trueOrFalse(), + mimicker.text().word(), + mimicker.text().word(), + [], + Permissions.EMPTY, + mimicker.text().word() + ) + + AccountCredentialsRepository accountCredentialsRepository = Mock() { + getOne(fixture.accountName) >> { + return netflixTitusCredentials + } + } + TitusClient titusClient = Mock(TitusClient) + TitusClientProvider titusClientProvider = Mock() { + getTitusClient(_, _) >> titusClient + } + AwsLookupUtil awsLookupUtil = Mock() + RegionScopedProviderFactory regionScopedProviderFactory = Mock() + AccountCredentialsProvider accountCredentialsProvider = Mock() + AwsConfiguration.DeployDefaults deployDefaults = Mock() + + Saga saga = new Saga(mimicker.random().uuid(), mimicker.random().uuid()) + + @Subject + PrepareTitusDeploy subject = new PrepareTitusDeploy( + accountCredentialsRepository, + titusClientProvider, + awsLookupUtil, + regionScopedProviderFactory, + accountCredentialsProvider, + deployDefaults, + Optional.empty() + ) + + def "merges source details when no asg name is provided"() { + given: + TitusDeployDescription description = createTitusDeployDescription(mimicker.aws().securityGroupId) + description.source = new TitusDeployDescription.Source( + account: fixture.accountName, + region: fixture.region, + asgName: fixture.monikerName, + useSourceCapacity: mimicker.random().trueOrFalse() + ) + + and: + PrepareTitusDeployCommand command = createCommand(description) + + when: + def result = subject.apply(command, saga) + + then: + titusClient.findJobByName(_) >> { + new Job( + applicationName: fixture.moniker.app, + digest: mimicker.text().word(), + securityGroups: [mimicker.text().word()], + instancesMin: instancesMin, + instancesMax: instancesMax, + instancesDesired: instancesDesired, + labels: [passThru: "label value"], + environment: [passThru: "environment value"], + containerAttributes: [passThru: "containerAttributes value"], + softConstraints: [], + hardConstraints: [], + serviceJobProcesses: [ + disableIncreaseDesired: true, + disableDecreaseDesired: true + ] + ) + } + awsLookupUtil.securityGroupIdExists(_, _, _) >> true + + result.events.isEmpty() == true + result.nextCommand instanceof SubmitTitusJob.SubmitTitusJobCommand + result.nextCommand.description.with { + securityGroups == ["hello"] + capacity.min == instancesMin + capacity.max == instancesMax + capacity.desired == instancesDesired + labels == [passThru: "label value"] + env == [passThru: "environment value"] + containerAttributes == [passThru: "containerAttributes value"] + serviceJobProcesses == [ + disableIncreaseDesired: true, + disableDecreaseDesired: true + ] + } + + where: + instancesMin = mimicker.random().intValue(0, 100_000) + instancesMax = mimicker.random().intValue(0, 100_000) + instancesDesired = mimicker.random().intValue(0, 100_000) + } + + def "security groups are resolved"() { + given: + TitusDeployDescription description = createTitusDeployDescription(sg1Id) + description.securityGroups = [sg1Id, sg2Name] + + when: + subject.resolveSecurityGroups(saga, description) + + then: + awsLookupUtil.securityGroupIdExists(_, _, sg1Id) >> true + awsLookupUtil.securityGroupIdExists(_, _, sg2Name) >> false + awsLookupUtil.convertSecurityGroupNameToId(_, _, sg2Name) >> sg2Id + + description.securityGroups.sort() == [sg2Id, sg1Id].sort() + + where: + sg1Id = mimicker.aws().securityGroupId + sg2Name = mimicker.text().word() + sg2Id = mimicker.aws().securityGroupId + } + + @Unroll + def "security groups include app security group (label=#labelValue, desc=#descriptionValue, includesAppGroup=#includesAppGroup)"() { + given: + TitusDeployDescription description = createTitusDeployDescription(sg1Id) + + and: + if (labelValue != null) { + description.labels[PrepareTitusDeploy.USE_APPLICATION_DEFAULT_SG_LABEL] = labelValue.toString() + } + if (descriptionValue != null) { + description.useApplicationDefaultSecurityGroup = descriptionValue + } + + when: + subject.resolveSecurityGroups(saga, description) + + then: + awsLookupUtil.securityGroupIdExists(_, _, sg1Id) >> true + awsLookupUtil.convertSecurityGroupNameToId(_, _, sg2Name) >> sg2Id + + if (includesAppGroup) { + description.securityGroups == [sg1Id, sg2Id] + } else { + description.securityGroups == [sg1Id] + } + + where: + labelValue | descriptionValue || includesAppGroup + null | null || true + true | null || true + false | null || false + true | true || true + true | false || true + null | true || true + null | false || false + + sg1Id = mimicker.aws().securityGroupId + sg2Name = mimicker.text().word() + sg2Id = mimicker.aws().securityGroupId + } + + private TitusDeployDescription createTitusDeployDescription(String securityGroupId) { + return new TitusDeployDescription( + application: fixture.moniker.app, + capacity: new TitusDeployDescription.Capacity( + desired: 1, + max: 1, + min: 1 + ), + capacityGroup: "spindemo", + containerAttributes: [:] as Map, + credentials: netflixTitusCredentials, + env: [:] as Map, + freeFormDetails: "highlander", + hardConstraints: [ + "UniqueHost", + "ZoneBalance" + ], + iamProfile: "spindemoInstanceProfile", + imageId: "spinnaker/basic:master-h47400.3aa8911", + inService: true, + labels: [:] as Map, + migrationPolicy: new MigrationPolicy(type: "systemDefault"), + region: fixture.region, + resources: new TitusDeployDescription.Resources( + allocateIpAddress: true, + cpu: 1, + disk: 5_000, + gpu: 0, + memory: 5_000, + networkMbps: 128 + ), + securityGroups: [ + securityGroupId + ], + softConstraints: [], + stack: "staging", + ) + } + + private static class Fixture { + Moniker moniker + // TODO(rz): barf + String monikerName + String region + String accountName + + Fixture(Mimicker mimicker) { + moniker = mimicker.moniker().get() + region = mimicker.aws().region + accountName = mimicker.text().word() + new FriggaReflectiveNamer().applyMoniker(this, moniker) + } + + String setName(String name) { + monikerName = name + } + } + + private static PrepareTitusDeployCommand createCommand(TitusDeployDescription description) { + return createCommand(description, null, false) + } + + private static PrepareTitusDeployCommand createCommand( + TitusDeployDescription description, String email, boolean platformHealthOnly) { + return PrepareTitusDeployCommand.builder() + .description(description) + .front50App(new LoadFront50App.Front50App(email, platformHealthOnly)) + .build() + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/TitusJobNameResolverSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/TitusJobNameResolverSpec.groovy new file mode 100644 index 00000000000..9b00a0bcce2 --- /dev/null +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/handlers/actions/TitusJobNameResolverSpec.groovy @@ -0,0 +1,93 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.titus.deploy.handlers.actions + +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.titus.JobType +import com.netflix.spinnaker.clouddriver.titus.client.TitusClient +import com.netflix.spinnaker.clouddriver.titus.client.model.Job +import com.netflix.spinnaker.clouddriver.titus.deploy.actions.TitusJobNameResolver +import com.netflix.spinnaker.clouddriver.titus.deploy.description.TitusDeployDescription +import spock.lang.Specification +import spock.lang.Unroll + +import static com.netflix.spinnaker.clouddriver.titus.JobType.BATCH +import static com.netflix.spinnaker.clouddriver.titus.JobType.SERVICE + +class TitusJobNameResolverSpec extends Specification { + + def setup() { + Task task = Mock(Task) { + getId() >> "taskid" + getRequestId() >> "requestid" + } + TaskRepository.threadLocalTask.set(task) + } + + @Unroll + def "resolves job names"() { + given: + TitusClient titusClient = Mock() { + findJobsByApplication(_) >> { + [ + new Job(name: "spindemo-v001", submittedAt: new Date()), + new Job(name: "spindemo-test-v001", submittedAt: new Date()), + new Job(name: "spindemo-test-titus-v999", submittedAt: new Date()), + ] + } + } + + when: + String result = TitusJobNameResolver.resolveJobName(titusClient, description) + + then: + result == expected + + where: + description || expected + description(BATCH, "test", "free") || "spindemo" + description(SERVICE, "test", "free") || "spindemo-test-free-v000" + description(SERVICE, null, null) || "spindemo-v002" + description(SERVICE, "test", null) || "spindemo-test-v002" + description(SERVICE, "test", "titus") || "spindemo-test-titus-v000" + } + + private static TitusDeployDescription description(JobType jobType, String stack, String freeFormDetails) { + return description(jobType, stack, freeFormDetails, null) + } + + private static TitusDeployDescription description( + JobType jobType, + String stack, + String freeFormDetails, + Integer sequence + ) { + return new TitusDeployDescription().with { + it.jobType = jobType.value() + it.application = "spindemo" + it.stack = stack + it.freeFormDetails = freeFormDetails + it.region = "us-east-1" + + if (sequence != null) { + it.sequence = sequence + } + + it + } + } +} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusServerGroupAtomicOperationSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusServerGroupAtomicOperationSpec.groovy deleted file mode 100644 index a9f093245a2..00000000000 --- a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/deploy/ops/DestroyTitusServerGroupAtomicOperationSpec.groovy +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.titus.deploy.ops - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.titus.TitusClientProvider -import com.netflix.spinnaker.clouddriver.titus.client.TitusClient -import com.netflix.spinnaker.clouddriver.titus.client.TitusRegion -import com.netflix.spinnaker.clouddriver.titus.client.model.Job -import com.netflix.spinnaker.clouddriver.titus.client.model.TerminateJobRequest -import com.netflix.spinnaker.clouddriver.titus.credentials.NetflixTitusCredentials -import com.netflix.spinnaker.clouddriver.titus.deploy.description.DestroyTitusServerGroupDescription -import spock.lang.Specification -import spock.lang.Subject - -class DestroyTitusServerGroupAtomicOperationSpec extends Specification { - - TitusClient titusClient = Mock(TitusClient) - - TitusClientProvider titusClientProvider = Stub(TitusClientProvider) { - getTitusClient(_, _) >> titusClient - } - - NetflixTitusCredentials testCredentials = new NetflixTitusCredentials( - 'test', 'test', 'test', [new TitusRegion('us-east-1', 'test', 'http://foo', false, false, "blah", "blah", 7104, [])], 'test', 'test', 'test', 'test', false, '', 'mainvpc', [], "", false, false, false - ) - - DestroyTitusServerGroupDescription description = new DestroyTitusServerGroupDescription( - serverGroupName: 'api-test-v000', region: 'us-east-1', credentials: testCredentials - ) - - @Subject - AtomicOperation atomicOperation = new DestroyTitusServerGroupAtomicOperation(titusClientProvider, description) - - def setup() { - Task task = Mock(Task) - TaskRepository.threadLocalTask.set(task) - } - - void 'DestroyTitusServerGroupAtomicOperation should terminate the Titus job successfully'() { - given: - titusClient.findJobByName('api-test-v000') >> { new Job(id: '1234') } - - when: - atomicOperation.operate([]) - - then: - titusClient.terminateJob(new TerminateJobRequest().withJobId('1234')) - atomicOperation.events.size() == 1 - atomicOperation.events[0].accountId == 'test' - atomicOperation.events[0].region == 'us-east-1' - atomicOperation.events[0].name == 'api-test-v000' - } -} diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusInstanceSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusInstanceSpec.groovy index 9902f81fbf5..be5c51c5bf4 100644 --- a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusInstanceSpec.groovy +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusInstanceSpec.groovy @@ -42,7 +42,8 @@ class TitusInstanceSpec extends Specification { submittedAt: launchDate, region: 'us-east-1', host: 'ec2-1-2-3-4.compute-1.amazonaws.com', - data: [ipAddresses: [nfvpc: '4.5.6.7'], NetworkConfiguration: [EniIPAddress: '1.2.3.4']] + data: [ipAddresses: [nfvpc: '4.5.6.7'], NetworkConfiguration: [EniIPAddress: '1.2.3.4']], + agentId: 'i-abc123' ) void 'valid titus instance is created from a titus task'() { @@ -71,6 +72,9 @@ class TitusInstanceSpec extends Specification { titusInstance.env?.account == 'test' titusInstance.submittedAt == task.submittedAt.time titusInstance.finishedAt == null + titusInstance.privateIpAddress == task.data.ipAddresses.nfvpc + titusInstance.agentId == task.agentId + titusInstance.availabilityZone == task.zone } void 'can handle null ports'() { diff --git a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusServerGroupSpec.groovy b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusServerGroupSpec.groovy index c55453ba9b2..839e7e6189a 100644 --- a/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusServerGroupSpec.groovy +++ b/clouddriver-titus/src/test/groovy/com/netflix/spinnaker/clouddriver/titus/model/TitusServerGroupSpec.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.clouddriver.titus.model import com.netflix.spinnaker.clouddriver.titus.client.model.Job +import com.netflix.spinnaker.clouddriver.titus.client.model.ServiceJobProcesses import com.netflix.spinnaker.clouddriver.titus.client.model.TaskState import com.netflix.spinnaker.clouddriver.titus.client.model.Task import spock.lang.Specification @@ -45,7 +46,11 @@ class TitusServerGroupSpec extends Specification { state: TaskState.RUNNING, submittedAt: launchDate, host: 'ec2-1-2-3-4.compute-1.amazonaws.com' - )] + )], + serviceJobProcesses: new ServiceJobProcesses( + 'disableIncreaseDesired': false, + 'disableDecreaseDesired': false + ) ) void 'valid server group instance is created from a titus job'() { @@ -75,6 +80,8 @@ class TitusServerGroupSpec extends Specification { titusServerGroup.capacity?.min == job.instancesMin titusServerGroup.capacity?.max == job.instancesMax titusServerGroup.capacity?.desired == job.instancesDesired + titusServerGroup.serviceJobProcesses.disableDecreaseDesired == false + titusServerGroup.serviceJobProcesses.disableIncreaseDesired == false } void 'can handle empty ports'() { diff --git a/clouddriver-titus/src/test/resources/mimicker-titus.yml b/clouddriver-titus/src/test/resources/mimicker-titus.yml new file mode 100644 index 00000000000..ca786be5611 --- /dev/null +++ b/clouddriver-titus/src/test/resources/mimicker-titus.yml @@ -0,0 +1,2 @@ +mimicker: + titus: diff --git a/clouddriver-web/clouddriver-web.gradle b/clouddriver-web/clouddriver-web.gradle index 63e5d212aa8..e85c743ca3f 100644 --- a/clouddriver-web/clouddriver-web.gradle +++ b/clouddriver-web/clouddriver-web.gradle @@ -1,59 +1,58 @@ -apply plugin: 'org.springframework.boot' -// Applying the spring-boot plugin pulls in a newer version of jedis that we can't use yet. The -// other ways to override versions (namely the subprojects.configurations.all.resolutionStrategy in -// build.gradle) didn't work. -ext['jedis.version'] = spinnaker.version('jedis') +apply plugin: 'io.spinnaker.package' -apply plugin: 'spinnaker.package' - -ext { - springConfigLocation = System.getProperty('spring.config.location', "${System.getProperty('user.home')}/.spinnaker/") - repackage = System.getProperty('springBoot.repackage', "false") -} - -tasks.withType(org.springframework.boot.gradle.run.BootRunTask) { - systemProperty('spring.config.location', project.springConfigLocation) -} +mainClassName = 'com.netflix.spinnaker.clouddriver.Main' configurations.all { exclude group: 'javax.servlet', module: 'servlet-api' exclude group: "org.slf4j", module: "slf4j-log4j12" - resolutionStrategy.force 'com.microsoft.rest:client-runtime:1.0.0-20160309.002843-19' -} - -repositories { - maven { url "http://adxsnapshots.azurewebsites.net" } - maven { url 'https://dl.bintray.com/netflixoss' } } dependencies { - compile project(':clouddriver-aws') - compile project(':clouddriver-ecs') - compile project(':clouddriver-azure') - compile project(':clouddriver-appengine') - compile project(':clouddriver-artifacts') - compile project(':clouddriver-google') - compile project(':clouddriver-kubernetes') - compile project(':clouddriver-openstack') - compile project(':clouddriver-docker') - compile project(':clouddriver-eureka') - compile project(':clouddriver-elasticsearch') - compile project(':clouddriver-elasticsearch-aws') - compile project(':clouddriver-oracle') - compile project(':clouddriver-dcos') - compile project(':clouddriver-titus') - compile project(':clouddriver-cloudfoundry') - - runtime spinnaker.dependency('kork') - compile spinnaker.dependency('korkWeb') - compile spinnaker.dependency('korkStackdriver') - compile spinnaker.dependency('korkSwagger') - compile spinnaker.dependency('bootActuator') - compile spinnaker.dependency('bootDataRest') - testCompile ("org.springframework.boot:spring-boot-starter-test") - //this brings in the jetty GzipFilter which boot will autoconfigure - runtime 'org.eclipse.jetty:jetty-servlets:9.2.11.v20150529' + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-artifacts") + implementation project(":clouddriver-core") + implementation project(":clouddriver-elasticsearch") + implementation project(":clouddriver-security") + implementation project(":clouddriver-sql") + + if (!rootProject.hasProperty("excludeSqlDrivers")) { + runtimeOnly(project(":clouddriver-sql-mysql")) + runtimeOnly(project(":clouddriver-sql-postgres")) + } + + implementation "com.netflix.frigga:frigga" + implementation "io.spinnaker.fiat:fiat-api:$fiatVersion" + implementation "io.spinnaker.fiat:fiat-core:$fiatVersion" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-cloud-config-server" + implementation "io.spinnaker.kork:kork-config" + implementation "io.spinnaker.kork:kork-web" + implementation("io.spinnaker.kork:kork-plugins") + implementation "io.spinnaker.kork:kork-moniker" + implementation "commons-io:commons-io" + implementation "io.reactivex:rxjava" + implementation "io.swagger.core.v3:swagger-annotations" + implementation "org.apache.groovy:groovy" + implementation "org.slf4j:slf4j-api" + implementation "org.springframework.boot:spring-boot-starter-actuator" + implementation "org.springframework.boot:spring-boot-starter-json" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "org.springframework.cloud:spring-cloud-context" + + runtimeOnly "io.spinnaker.kork:kork-runtime" + if (!gradle.hasProperty("excludeSpringConfigServer")) { + runtimeOnly project(":clouddriver-configserver") + } + + testImplementation "io.spinnaker.kork:kork-test" + testImplementation "org.springframework.boot:spring-boot-starter-test" + testImplementation "org.spockframework:spock-core" + testImplementation "io.kubernetes:client-java-api-fluent:13.0.2" + testImplementation "org.apache.groovy:groovy-json" + + // Add each included cloud provider project as a runtime dependency + gradle.includedCloudProviderProjects.each { + implementation project(":${it}") + } } - -tasks.bootRepackage.enabled = Boolean.valueOf(project.repackage) - diff --git a/clouddriver-web/config/clouddriver.yml b/clouddriver-web/config/clouddriver.yml index 4d0cad91bea..57407646d98 100644 --- a/clouddriver-web/config/clouddriver.yml +++ b/clouddriver-web/config/clouddriver.yml @@ -2,9 +2,11 @@ server: port: 7002 ssl: enabled: false + compression: + enabled: true redis: - connection: redis://localhost:6379 + connection: ${services.redis.baseUrl:redis://localhost:6379} scheduler: default parallelism: -1 @@ -37,7 +39,6 @@ swagger: - .*appengine.* - .*gce.* - .*kubernetes.* - - .*openstack.* - .*instances.* - .*reports.* - .*docker.* @@ -53,6 +54,15 @@ default: aws: enabled: ${AWS_ENABLED:false} +# features: +# launch-templates: +# enabled: true +# allowed-applications: app1:account1:region1,app2:account1:region1 +# all-applications: false +# allowed-accounts-regions: account1:region1 +# allowed-accounts: account1,account2 +# excluded-applications: "" +# excluded-accounts: account3,account4 # proxy: # proxyHost: 10.0.0.54 # proxyPort: 8888 @@ -62,7 +72,7 @@ aws: # proxyWorkstation: foo # protocol: HTTP defaults: - iamRole: FooRole + iamRole: BaseIAMRole unknownInstanceTypeBlockDevice: deviceName: /dev/sdb size: 40 @@ -135,11 +145,15 @@ azure: appengine: enabled: false +cloudrun: + enabled: false + google: enabled: false baseImageProjects: - centos-cloud - coreos-cloud + - cos-cloud - debian-cloud - opensuse-cloud - rhel-cloud @@ -238,29 +252,6 @@ google: - type: pd-ssd sizeGb: 10 -openstack: - enabled: ${OS_ENABLED:false} - accounts: - - name: my-openstack-account - environment: test - accountType: main - username: ${OS_USERNAME:'username'} - password: ${OS_PASSWORD:'password'} - projectName: ${OS_PROJECT_NAME:''} - domainName: Default - authUrl: ${OS_AUTH_URL:''} - regions: ${OS_REGIONS:''} - insecure: ${OS_INSECURE:false} - # Replacing the Heat template is not recommended - # The implementation is tightly coupled with the suppplied template - # heatTemplatePath: /path/to/custom/heat/template - lbaas: - pollTimeout: ${OS_LBAAS_POLL_TIMEOUT:60} - pollInterval: ${OS_LBAAS_POLL_INTERVAL:5} - stack: - pollTimeout: ${OS_STACK_POLL_TIMEOUT:600} - pollInterval: ${OS_STACK_POLL_INTERVAL:5} - kubernetes: enabled: false v2: @@ -302,10 +293,25 @@ operations.security: # Turn on when confirming Fiat authorization checks # logging.level.com.netflix.spinnaker.clouddriver.listeners: DEBUG +resilience4j.retry: + instances: + sqlTransaction: + maxRetryAttempts: 5 + waitDuration: 100ms + enableExponentialBackoff: false + ignoreExceptions: + - com.netflix.spinnaker.clouddriver.event.exceptions.AggregateChangeRejectedException + sqlRead: + maxRetryAttempts: 5 + waitDuration: 100ms + enableExponentialBackoff: false + --- spring: - profiles: prod + config: + activate: + on-profile: prod # example https configuration for client auth to services: #default: @@ -336,13 +342,16 @@ spring: # regions: #override default regions # - name: us-east-1 # - name: ap-northeast-1 + # externalId: a1b2c3d4 --- # local profile is activated by default when running the application - override values here for local development # for production, set spring.profiles.active to select the appropriate profile for your environment spring: - profiles: local + config: + activate: + on-profile: local # an AWSCredentialsProvider that obtains session credentials via SSH through a bastion instance (useful for local development): #bastion: @@ -350,3 +359,28 @@ spring: # port: 22 # proxyRegion: us-west-1 # proxyCluster: my-credentials-cluster + +--- +# This profile is used in HA deployments for a clouddriver that handles read-only requests from +# other services +spring: + config: + activate: + on-profile: ro + +redis: + connection: ${services.redisRo.baseUrl:${services.redis.baseUrl}} + +caching: + writeEnabled: false + +--- +# This profile is used in HA deployments for a clouddriver that handles mutating requests from +# other services, but does not run caching agents +spring: + config: + activate: + on-profile: rw + +caching: + writeEnabled: false diff --git a/clouddriver-web/pkg_scripts/postInstall.sh b/clouddriver-web/pkg_scripts/postInstall.sh index f9de13dc82d..5c077338b46 100755 --- a/clouddriver-web/pkg_scripts/postInstall.sh +++ b/clouddriver-web/pkg_scripts/postInstall.sh @@ -1,25 +1,33 @@ #!/bin/sh +# Remember to also update Dockerfile.* +KUBECTL_DEFAULT_RELEASE=1.22.17 +KUBECTL_RELEASES="${KUBECTL_DEFAULT_RELEASE} 1.26.12 1.27.9 1.28.5 1.29.0" +AWS_CLI_VERSION=2.15.22 +AWS_AIM_AUTHENTICATOR_VERSION=0.6.14 + # ubuntu # check that owner group exists -if [ -z `getent group spinnaker` ]; then +if [ -z "$(getent group spinnaker)" ]; then groupadd spinnaker fi # check that user exists -if [ -z `getent passwd spinnaker` ]; then +if [ -z "$(getent passwd spinnaker)" ]; then useradd --gid spinnaker spinnaker -m --home-dir /home/spinnaker fi install_kubectl() { - if [ -z `which kubectl` ]; then - wget https://storage.googleapis.com/kubernetes-release/release/stable.txt && wget https://storage.googleapis.com/kubernetes-release/release/$(cat stable.txt)/bin/linux/amd64/kubectl - rm stable.txt - chmod +x kubectl - mv ./kubectl /usr/local/bin/kubectl + if [ -z "$(which kubectl)" ]; then + for version in $KUBECTL_RELEASES; do + release_version=$(echo "${version}" | cut -d. -f1,2); \ + wget -nv "https://cdn.dl.k8s.io/release/v${version}/bin/linux/amd64/kubectl" -O "/usr/local/bin/kubectl-${release_version}"; + chmod +x "/usr/local/bin/kubectl-${release_version}"; + done + ln -sf "/usr/local/bin/kubectl-$(echo ${KUBECTL_DEFAULT_RELEASE} | cut -d. -f1,2)" /usr/local/bin/kubectl fi } install_kubectl -install --mode=755 --owner=spinnaker --group=spinnaker --directory /var/log/spinnaker/clouddriver +install --mode=755 --owner=spinnaker --group=spinnaker --directory /var/log/spinnaker/clouddriver diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/Main.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/Main.groovy index a32b83c45c2..17edb699e28 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/Main.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/Main.groovy @@ -16,17 +16,30 @@ package com.netflix.spinnaker.clouddriver +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.annotation.JsonDeserialize import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig +import com.netflix.spinnaker.kork.artifacts.artifactstore.ArtifactDeserializer +import com.netflix.spinnaker.kork.artifacts.artifactstore.ArtifactStoreConfiguration +import com.netflix.spinnaker.kork.artifacts.model.Artifact +import com.netflix.spinnaker.kork.boot.DefaultPropertiesBuilder +import com.netflix.spinnaker.kork.configserver.ConfigServerBootstrap +import org.springframework.boot.actuate.autoconfigure.elasticsearch.ElasticSearchRestHealthContributorAutoConfiguration import org.springframework.boot.autoconfigure.EnableAutoConfiguration import org.springframework.boot.autoconfigure.batch.BatchAutoConfiguration +import org.springframework.boot.autoconfigure.data.elasticsearch.ElasticsearchDataAutoConfiguration; import org.springframework.boot.autoconfigure.groovy.template.GroovyTemplateAutoConfiguration +import org.springframework.boot.autoconfigure.gson.GsonAutoConfiguration +import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration import org.springframework.boot.builder.SpringApplicationBuilder -import org.springframework.boot.web.support.SpringBootServletInitializer +import org.springframework.boot.web.servlet.support.SpringBootServletInitializer +import org.springframework.context.annotation.Bean import org.springframework.context.annotation.ComponentScan import org.springframework.context.annotation.Configuration import org.springframework.context.annotation.Import +import org.springframework.context.annotation.Primary +import org.springframework.http.converter.json.Jackson2ObjectMapperBuilder import org.springframework.scheduling.annotation.EnableScheduling -import sun.net.InetAddressCachePolicy import java.security.Security @@ -34,46 +47,61 @@ import java.security.Security @Import([ WebConfig, SecurityConfig, + ArtifactStoreConfiguration, ]) @ComponentScan([ 'com.netflix.spinnaker.config', + 'com.netflix.spinnaker.clouddriver.config' ]) @EnableAutoConfiguration(exclude = [ - BatchAutoConfiguration, - GroovyTemplateAutoConfiguration, + BatchAutoConfiguration, + GroovyTemplateAutoConfiguration, + GsonAutoConfiguration, + DataSourceAutoConfiguration, + ElasticsearchDataAutoConfiguration, + ElasticSearchRestHealthContributorAutoConfiguration ]) @EnableScheduling class Main extends SpringBootServletInitializer { - - static final Map DEFAULT_PROPS = [ - 'netflix.environment' : 'test', - 'netflix.account' : '${netflix.environment}', - 'netflix.stack' : 'test', - 'spring.config.location' : '${user.home}/.spinnaker/', - 'spring.application.name': 'clouddriver', - 'spring.config.name' : 'spinnaker,${spring.application.name}', - 'spring.profiles.active' : '${netflix.environment},local' - ] + private static final Map DEFAULT_PROPS = new DefaultPropertiesBuilder().build() static { /** * We often operate in an environment where we expect resolution of DNS names for remote dependencies to change * frequently, so it's best to tell the JVM to avoid caching DNS results internally. */ - InetAddressCachePolicy.cachePolicy = InetAddressCachePolicy.NEVER Security.setProperty('networkaddress.cache.ttl', '0') + System.setProperty("spring.main.allow-bean-definition-overriding", "true") } static void main(String... args) { - launchArgs = args - new SpringApplicationBuilder().properties(DEFAULT_PROPS).sources(Main).run(args) + ConfigServerBootstrap.systemProperties("clouddriver") + new SpringApplicationBuilder() + .properties(DEFAULT_PROPS) + .sources(Main) + .run(args) + } + + @Bean + @Primary + ObjectMapper objectMapper(Jackson2ObjectMapperBuilder builder) { + return builder.createXmlMapper(false) + .mixIn(Artifact.class, ArtifactMixin.class) + .build() } @Override SpringApplicationBuilder configure(SpringApplicationBuilder application) { - application.properties(DEFAULT_PROPS).sources(Main) + application + .properties(DEFAULT_PROPS) + .sources(Main) } - static String[] launchArgs = [] + /** + * Used to deserialize artifacts utilizing an artifact store, and thus + * bypassing the default deserializer on the artifact object itself. + */ + @JsonDeserialize(using = ArtifactDeserializer.class) + private static interface ArtifactMixin{} } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/WebConfig.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/WebConfig.groovy deleted file mode 100644 index db40548cb97..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/WebConfig.groovy +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver - -import com.netflix.spectator.api.Registry -import com.netflix.spinnaker.clouddriver.configuration.CredentialsConfiguration -import com.netflix.spinnaker.clouddriver.configuration.ThreadPoolConfiguration - -import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue -import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueueConfiguration -import com.netflix.spinnaker.filters.AuthenticatedRequestFilter -import com.netflix.spinnaker.kork.web.interceptors.MetricsInterceptor -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.boot.web.servlet.FilterRegistrationBean -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.ComponentScan -import org.springframework.context.annotation.Configuration -import org.springframework.core.Ordered -import org.springframework.http.HttpStatus -import org.springframework.web.bind.annotation.ControllerAdvice -import org.springframework.web.bind.annotation.ExceptionHandler -import org.springframework.web.filter.ShallowEtagHeaderFilter -import org.springframework.web.servlet.config.annotation.ContentNegotiationConfigurer -import org.springframework.web.servlet.config.annotation.InterceptorRegistry -import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter - -import javax.servlet.Filter -import javax.servlet.http.HttpServletResponse - -@Configuration -@ComponentScan([ - 'com.netflix.spinnaker.clouddriver.controllers', - 'com.netflix.spinnaker.clouddriver.filters', - 'com.netflix.spinnaker.clouddriver.listeners', - 'com.netflix.spinnaker.clouddriver.security', -]) -@EnableConfigurationProperties([CredentialsConfiguration, ThreadPoolConfiguration, RequestQueueConfiguration]) -public class WebConfig extends WebMvcConfigurerAdapter { - @Autowired - Registry registry - - @Override - public void addInterceptors(InterceptorRegistry registry) { - registry.addInterceptor( - new MetricsInterceptor( - this.registry, "controller.invocations", ["account", "region"], ["BasicErrorController"] - ) - ) - } - - @Bean - Filter eTagFilter() { - new ShallowEtagHeaderFilter() - } - - @Bean - RequestQueue requestQueue(RequestQueueConfiguration requestQueueConfiguration, Registry registry) { - return RequestQueue.forConfig(registry, requestQueueConfiguration); - } - - @Bean - FilterRegistrationBean authenticatedRequestFilter() { - def frb = new FilterRegistrationBean(new AuthenticatedRequestFilter(true)) - frb.order = Ordered.HIGHEST_PRECEDENCE - return frb - } - - @Override - void configureContentNegotiation(ContentNegotiationConfigurer configurer) { - super.configureContentNegotiation(configurer) - configurer.favorPathExtension(false) - } - - @ControllerAdvice - static class IllegalArgumentExceptionHandler { - @ExceptionHandler(IllegalArgumentException) - public void handle(HttpServletResponse response, IllegalArgumentException ex) { - response.sendError(HttpStatus.BAD_REQUEST.value(), ex.getMessage()) - } - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/WebConfig.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/WebConfig.java new file mode 100644 index 00000000000..b60b3b4d353 --- /dev/null +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/WebConfig.java @@ -0,0 +1,113 @@ +/* + * Copyright 2015 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver; + +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.configuration.CredentialsConfiguration; +import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue; +import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueueConfiguration; +import com.netflix.spinnaker.filters.AuthenticatedRequestFilter; +import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService; +import com.netflix.spinnaker.kork.web.context.MdcCopyingAsyncTaskExecutor; +import com.netflix.spinnaker.kork.web.interceptors.MetricsInterceptor; +import java.util.List; +import javax.servlet.Filter; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.boot.web.servlet.FilterRegistrationBean; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.core.Ordered; +import org.springframework.core.task.AsyncTaskExecutor; +import org.springframework.http.MediaType; +import org.springframework.web.filter.ShallowEtagHeaderFilter; +import org.springframework.web.servlet.config.annotation.AsyncSupportConfigurer; +import org.springframework.web.servlet.config.annotation.ContentNegotiationConfigurer; +import org.springframework.web.servlet.config.annotation.InterceptorRegistry; +import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter; + +@Configuration +@ComponentScan({ + "com.netflix.spinnaker.clouddriver.controllers", + "com.netflix.spinnaker.clouddriver.filters", + "com.netflix.spinnaker.clouddriver.listeners", + "com.netflix.spinnaker.clouddriver.security", +}) +@EnableConfigurationProperties({CredentialsConfiguration.class, RequestQueueConfiguration.class}) +public class WebConfig extends WebMvcConfigurerAdapter { + private final Registry registry; + private final AsyncTaskExecutor asyncTaskExecutor; + + @Autowired + public WebConfig( + Registry registry, + @Qualifier("threadPoolTaskScheduler") AsyncTaskExecutor asyncTaskExecutor) { + this.registry = registry; + this.asyncTaskExecutor = asyncTaskExecutor; + } + + @Override + public void addInterceptors(InterceptorRegistry registry) { + registry.addInterceptor( + new MetricsInterceptor( + this.registry, + "controller.invocations", + List.of("account", "region"), + List.of("BasicErrorController"))); + } + + @Bean + Filter eTagFilter() { + return new ShallowEtagHeaderFilter(); + } + + @Bean + RequestQueue requestQueue( + DynamicConfigService dynamicConfigService, + RequestQueueConfiguration requestQueueConfiguration, + Registry registry) { + return RequestQueue.forConfig(dynamicConfigService, registry, requestQueueConfiguration); + } + + @Bean + AuthenticatedRequestFilter authenticatedRequestFilter() { + return new AuthenticatedRequestFilter(true); + } + + @Bean + FilterRegistrationBean authenticatedRequestFilterRegistrationBean( + AuthenticatedRequestFilter authenticatedRequestFilter) { + FilterRegistrationBean frb = new FilterRegistrationBean(authenticatedRequestFilter); + frb.setOrder(Ordered.HIGHEST_PRECEDENCE); + return frb; + } + + @Override + public void configureContentNegotiation(ContentNegotiationConfigurer configurer) { + configurer + .defaultContentType(MediaType.APPLICATION_JSON_UTF8) + .favorPathExtension(false) + .ignoreAcceptHeader(true); + } + + @Override + public void configureAsyncSupport(AsyncSupportConfigurer configurer) { + configurer.setTaskExecutor(new MdcCopyingAsyncTaskExecutor(asyncTaskExecutor)); + } +} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/configuration/ThreadPoolConfiguration.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/configuration/ThreadPoolConfiguration.groovy deleted file mode 100644 index fdd12f21b75..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/configuration/ThreadPoolConfiguration.groovy +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.configuration - -import groovy.transform.Canonical -import org.springframework.boot.context.properties.ConfigurationProperties - -@Canonical -@ConfigurationProperties -class ThreadPoolConfiguration { - int queryCluster = 25 -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationsController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationsController.groovy index dc7897d530e..ad30af28f72 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationsController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationsController.groovy @@ -38,11 +38,11 @@ import org.springframework.web.bind.annotation.* @RequestMapping("/applications") class ApplicationsController { - @Autowired - List applicationProviders + @Autowired(required = false) + List applicationProviders = [] - @Autowired - List clusterProviders + @Autowired(required = false) + List clusterProviders = [] @Autowired MessageSource messageSource @@ -65,18 +65,14 @@ class ApplicationsController { @PreAuthorize("hasPermission(#name, 'APPLICATION', 'READ')") @RequestMapping(value = "/{name:.+}", method = RequestMethod.GET) ApplicationViewModel get(@PathVariable String name) { - try { - def apps = requestQueue.execute(name, { - applicationProviders.collect { it.getApplication(name) } - }) - null - if (!apps) { - throw new NotFoundException("Application does not exist (name: ${name})") - } else { - return transform(apps) - } - } catch (e) { + def apps = requestQueue.execute(name, { + applicationProviders.collect { it.getApplication(name) } + }) - null + if (!apps) { throw new NotFoundException("Application does not exist (name: ${name})") } + + return transform(apps) } private ApplicationViewModel transform(List apps) { diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ArtifactController.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ArtifactController.java deleted file mode 100644 index 66367bfeef5..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ArtifactController.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.controllers; - -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; -import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; -import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; -import com.netflix.spinnaker.kork.artifacts.model.Artifact; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.io.IOUtils; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.web.bind.annotation.RequestBody; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RequestMethod; -import org.springframework.web.bind.annotation.RestController; -import org.springframework.web.servlet.mvc.method.annotation.StreamingResponseBody; - -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; - -@Slf4j -@RestController -@RequestMapping("/artifacts") -public class ArtifactController { - private ArtifactCredentialsRepository artifactCredentialsRepository; - private ArtifactDownloader artifactDownloader; - - @Autowired - public ArtifactController(Optional artifactCredentialsRepository, - Optional artifactDownloader) { - this.artifactCredentialsRepository = artifactCredentialsRepository.orElse(null); - this.artifactDownloader = artifactDownloader.orElse(null); - } - - @RequestMapping(method = RequestMethod.GET, value = "/credentials") - List list() { - if (artifactCredentialsRepository == null) { - return new ArrayList<>(); - } else { - return artifactCredentialsRepository.getAllCredentials(); - } - } - - // PUT because we need to send a body, which GET does not allow for spring/retrofit - @RequestMapping(method = RequestMethod.PUT, value = "/fetch") - StreamingResponseBody fetch(@RequestBody Artifact artifact) { - if (artifactDownloader == null) { - throw new IllegalStateException("Artifacts have not been enabled. Enable them using 'artifacts.enabled' in clouddriver"); - } - - return outputStream -> IOUtils.copy(artifactDownloader.download(artifact), outputStream); - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/AuthorizationSupport.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/AuthorizationSupport.groovy index 12434e992c2..c8a81a325b0 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/AuthorizationSupport.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/AuthorizationSupport.groovy @@ -17,7 +17,10 @@ package com.netflix.spinnaker.clouddriver.controllers import com.netflix.frigga.Names +import com.netflix.spinnaker.clouddriver.model.EntityTags import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider +import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator import org.springframework.beans.factory.annotation.Autowired import org.springframework.security.core.Authentication @@ -33,6 +36,9 @@ class AuthorizationSupport { @Autowired FiatPermissionEvaluator permissionEvaluator + @Autowired + AccountCredentialsProvider accountCredentialsProvider + /** * Performs READ authorization checks on returned Maps that are keyed by account name. * @param map Objected returned by a controller that has account names as the key @@ -118,4 +124,33 @@ class AuthorizationSupport { } return true } + + /** + * Verify that the current user has access to the application/account (as appropriate) for each tagged entity. + */ + boolean authorizeEntityTags(List entityTags) { + if (!entityTags) { + return false + } + + Map accountNameById = accountCredentialsProvider.all.collectEntries { AccountCredentials credentials -> + [credentials.accountId?.toString(), credentials.name] + } + + def auth = SecurityContextHolder.context.authentication; + return entityTags.every { + boolean hasPermission = true + + if (it.entityRef.application) { + hasPermission = hasPermission && permissionEvaluator.hasPermission(auth, it.entityRef.application, 'APPLICATION', 'READ') + } + + String accountName = accountNameById[it.entityRef.accountId] + if (accountName) { + hasPermission = hasPermission && permissionEvaluator.hasPermission(auth, accountName, 'ACCOUNT', 'READ') + } + + return hasPermission + } + } } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CacheController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CacheController.groovy index ce7e2be9e3c..a0308f41be1 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CacheController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CacheController.groovy @@ -16,8 +16,11 @@ package com.netflix.spinnaker.clouddriver.controllers -import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent +import com.netflix.spinnaker.cats.cache.AgentIntrospection +import com.netflix.spinnaker.cats.cache.CacheIntrospectionStore +import com.netflix.spinnaker.clouddriver.cache.OnDemandCacheStatus import com.netflix.spinnaker.clouddriver.cache.OnDemandCacheUpdater +import com.netflix.spinnaker.clouddriver.cache.OnDemandType import com.netflix.spinnaker.kork.web.exceptions.NotFoundException import org.springframework.beans.factory.annotation.Autowired import org.springframework.http.HttpStatus @@ -35,14 +38,14 @@ class CacheController { ResponseEntity handleOnDemand(@PathVariable String cloudProvider, @PathVariable String type, @RequestBody Map data) { - OnDemandAgent.OnDemandType onDemandType = getOnDemandType(type); + OnDemandType onDemandType = getOnDemandType(type); def onDemandCacheResult = onDemandCacheUpdaters.find { it.handles(onDemandType, cloudProvider) }?.handle(onDemandType, cloudProvider, data) def cacheStatus = onDemandCacheResult?.status - def httpStatus = (cacheStatus == OnDemandCacheUpdater.OnDemandCacheStatus.PENDING) ? HttpStatus.ACCEPTED : HttpStatus.OK + def httpStatus = (cacheStatus == OnDemandCacheStatus.PENDING) ? HttpStatus.ACCEPTED : HttpStatus.OK return new ResponseEntity( [ @@ -52,11 +55,19 @@ class CacheController { ) } + + @RequestMapping(method = RequestMethod.GET, value = "/introspection") + Collection getAgentIntrospections() { + return CacheIntrospectionStore.getStore().listAgentIntrospections() + // sort by descending start time, so newest executions are first + .toSorted { a, b -> b.getLastExecutionStartMs() <=> a.getLastExecutionStartMs() } + } + @RequestMapping(method = RequestMethod.GET, value = "/{cloudProvider}/{type}") Collection pendingOnDemands(@PathVariable String cloudProvider, @PathVariable String type, - @RequestParam(name = "id", required = false) String id) { - OnDemandAgent.OnDemandType onDemandType = getOnDemandType(type) + @RequestParam(value = "id", required = false) String id) { + OnDemandType onDemandType = getOnDemandType(type) onDemandCacheUpdaters.findAll { it.handles(onDemandType, cloudProvider) }?.collect { @@ -68,9 +79,9 @@ class CacheController { }.flatten() } - static OnDemandAgent.OnDemandType getOnDemandType(String type) { + static OnDemandType getOnDemandType(String type) { try { - return OnDemandAgent.OnDemandType.fromString(type) + return OnDemandType.fromString(type) } catch (IllegalArgumentException e) { throw new NotFoundException(e.message) } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CloudMetricController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CloudMetricController.groovy index f5916f58281..8c43c0f2dd1 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CloudMetricController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CloudMetricController.groovy @@ -16,11 +16,13 @@ package com.netflix.spinnaker.clouddriver.controllers -import static java.time.temporal.ChronoUnit.HOURS - import com.netflix.spinnaker.clouddriver.model.CloudMetricDescriptor import com.netflix.spinnaker.clouddriver.model.CloudMetricProvider import com.netflix.spinnaker.clouddriver.model.CloudMetricStatistics + +import static java.time.temporal.ChronoUnit.HOURS + + import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.bind.annotation.PathVariable import org.springframework.web.bind.annotation.RequestMapping @@ -40,9 +42,9 @@ class CloudMetricController { @RequestMapping(method = RequestMethod.GET, value = "/{cloudProvider}/{account}/{region}") List findAll(@PathVariable String cloudProvider, - @PathVariable String account, - @PathVariable String region, - @RequestParam Map filters) { + @PathVariable String account, + @PathVariable String region, + @RequestParam Map filters) { getProvider(cloudProvider).findMetricDescriptors(account, region, filters) } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ClusterController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ClusterController.groovy index 098564620cd..2ff3eaf56a6 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ClusterController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ClusterController.groovy @@ -16,86 +16,101 @@ package com.netflix.spinnaker.clouddriver.controllers -import com.netflix.spinnaker.clouddriver.model.Application -import com.netflix.spinnaker.clouddriver.model.ApplicationProvider -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.model.Summary -import com.netflix.spinnaker.clouddriver.model.TargetServerGroup +import com.netflix.spinnaker.clouddriver.ecs.model.EcsApplication +import com.netflix.spinnaker.clouddriver.model.* +import com.netflix.spinnaker.clouddriver.model.view.ClusterViewModelPostProcessor +import com.netflix.spinnaker.clouddriver.model.view.ServerGroupViewModelPostProcessor import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue import com.netflix.spinnaker.kork.web.exceptions.NotFoundException import com.netflix.spinnaker.moniker.Moniker import groovy.transform.Canonical import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired -import org.springframework.context.MessageSource import org.springframework.security.access.prepost.PostAuthorize import org.springframework.security.access.prepost.PreAuthorize +import org.springframework.util.StringUtils import org.springframework.web.bind.annotation.* +import java.util.stream.Collectors +import java.util.stream.Stream + +import static com.netflix.spinnaker.clouddriver.model.view.ModelObjectViewModelPostProcessor.applyExtensions +import static com.netflix.spinnaker.clouddriver.model.view.ModelObjectViewModelPostProcessor.applyExtensionsToObject + @Slf4j @RestController @RequestMapping("/applications/{application}/clusters") class ClusterController { + public static final Comparator OLDEST_TO_NEWEST = Comparator + .comparingLong({ ServerGroup sg -> sg.getCreatedTime() }) + + public static final Comparator BIGGEST_TO_SMALLEST = Comparator + .comparingInt({ ServerGroup sg -> + Optional.ofNullable(sg.getInstances()).map({ it.size() }).orElse(0) + }) + .thenComparing(OLDEST_TO_NEWEST) + .reversed() + @Autowired List applicationProviders @Autowired List clusterProviders - @Autowired - MessageSource messageSource - @Autowired RequestQueue requestQueue @Autowired ServerGroupController serverGroupController + @Autowired + Optional> clusterExtensions = Optional.empty() + + @Autowired + Optional> serverGroupExtensions = Optional.empty() + @PreAuthorize("@fiatPermissionEvaluator.storeWholePermission() and hasPermission(#application, 'APPLICATION', 'READ')") @PostAuthorize("@authorizationSupport.filterForAccounts(returnObject)") @RequestMapping(method = RequestMethod.GET) Map> listByAccount(@PathVariable String application) { - def apps = ((List) applicationProviders.collectMany { - [it.getApplication(application)] ?: [] - }).findAll().sort { a, b -> a.name.toLowerCase() <=> b.name.toLowerCase() } - def clusterNames = [:] - def lastApp = null - for (app in apps) { - if (!lastApp) { - clusterNames = app.clusterNames - } else { - clusterNames = Application.mergeClusters.curry(lastApp, app).call() - } - lastApp = app - } - clusterNames + List apps = applicationProviders.stream() + .map({ it.getApplication(application) }) + .filter({ it != null }) + .sorted(Comparator.comparing({ Application it -> it.getName().toLowerCase() })) + .collect(Collectors.toList()) + + Map> clusterNames = mergeClusters(apps) + return clusterNames + } + + private Map> mergeClusters(List a) { + Map> map = new HashMap<>() + a.stream() + .flatMap({ + it instanceof EcsApplication + ? it.getClusterNameMetadata().entrySet().stream() + : it.getClusterNames().entrySet().stream() + }) + .forEach({ entry -> + map.computeIfAbsent(entry.getKey(), { new HashSet<>() }).addAll(entry.getValue()) + }) + return map } @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ') && hasPermission(#account, 'ACCOUNT', 'READ')") @RequestMapping(value = "/{account:.+}", method = RequestMethod.GET) Set getForAccount(@PathVariable String application, @PathVariable String account) { - def clusters = clusterProviders.collect { - def clusters = (Set) it.getClusters(application, account, false) - def clusterViews = [] - for (cluster in clusters) { - clusterViews << new ClusterViewModel( - name: cluster.name, - moniker: cluster.moniker, - account: cluster.accountName, - loadBalancers: cluster.loadBalancers.collect { - it.name - }, - serverGroups: cluster.serverGroups.collect { - it.name - }, - ) - } - clusterViews - }?.flatten() as Set - if (!clusters) { + + Set clusters = clusterProviders.stream() + .map({ it.getClusters(application, account, false) }) + .filter({ it != null }) + .flatMap({ + applyExtensions(clusterExtensions, it).stream() + }) + .map({ Cluster cluster -> ClusterViewModel.from(cluster) }) + .collect(Collectors.toSet()) + if (clusters.isEmpty()) { throw new NotFoundException("No clusters found (application: ${application}, account: ${account})") } clusters @@ -106,16 +121,20 @@ class ClusterController { Set getForAccountAndName(@PathVariable String application, @PathVariable String account, @PathVariable String name, - @RequestParam(required = false, value = 'expand', defaultValue = 'true') boolean expand) { - def clusters = clusterProviders.collect { provider -> - requestQueue.execute(application, { provider.getCluster(application, account, name, expand) }) - } - - clusters.removeAll([null]) - if (!clusters) { - throw new NotFoundException("Cluster not found (application: ${application}, account: ${account}, name: ${name})") + @RequestParam(required = false, value = "expand", defaultValue = "true") boolean expand) { + def clusters = clusterProviders.stream() + .map({ provider -> + applyExtensionsToObject(clusterExtensions, + requestQueue.execute(application, { provider.getCluster(application, account, name, expand) })) + }) + .filter({ it != null }) + .collect(Collectors.toSet()) + + if (clusters.isEmpty()) { + throw new NotFoundException(String.format( + "Cluster not found (application: %s, account: %s, name: %s)", application, account, name)) } - clusters + return clusters } @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ') && hasPermission(#account, 'ACCOUNT', 'READ')") @@ -124,13 +143,22 @@ class ClusterController { @PathVariable String account, @PathVariable String name, @PathVariable String type, - @RequestParam(required = false, value = 'expand', defaultValue = 'true') boolean expand) { - Set allClusters = getForAccountAndName(application, account, name, expand) - def cluster = allClusters.find { it.type == type } + @RequestParam(required = false, value = "expand", defaultValue = "true") boolean expand) { + + def clusterProvider = clusterProviders.find { it.cloudProviderId == type } + if (!clusterProvider) { + throw new NotFoundException("No cluster provider of type: ${type} found that can handle cluster: ${name} in application: ${application}, account: ${account}") + } + + Cluster cluster = applyExtensionsToObject(clusterExtensions, + requestQueue.execute(application, { clusterProvider.getCluster(application, account, name, expand) }) + ) + if (!cluster) { - throw new NotFoundException("No clusters found (application: ${application}, account: ${account}, type: ${type})") + throw new NotFoundException(String.format( + "No clusters found (application: %s, account: %s, type: %s)", application, account, type)) } - cluster + return cluster } @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ') && hasPermission(#account, 'ACCOUNT', 'READ')") @@ -140,10 +168,20 @@ class ClusterController { @PathVariable String clusterName, @PathVariable String type, @RequestParam(value = "region", required = false) String region, - @RequestParam(required = false, value = 'expand', defaultValue = 'true') boolean expand) { - Cluster cluster = getForAccountAndNameAndType(application, account, clusterName, type, expand) - def results = region ? cluster.serverGroups.findAll { it.region == region } : cluster.serverGroups - results ?: [] + @RequestParam(required = false, value = "expand", defaultValue = "true") boolean expand) { + Cluster cluster = applyExtensionsToObject(clusterExtensions, getForAccountAndNameAndType(application, account, clusterName, type, expand)) + + Stream serverGroups = cluster.getServerGroups().stream() + + if (!StringUtils.isEmpty(region)) { + serverGroups = serverGroups.filter({ region.equals(it.getRegion()) }) + } + + def result = serverGroups + .map({ applyExtensionsToObject(serverGroupExtensions, it) }) + .collect(Collectors.toSet()) + + return result } @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ') && hasPermission(#account, 'ACCOUNT', 'READ')") @@ -155,27 +193,46 @@ class ClusterController { @PathVariable String serverGroupName, @RequestParam(value = "region", required = false) String region) { // we can optimize loads iff the cloud provider supports loading minimal clusters (ie. w/o instances) - def providers = clusterProviders.findAll { it.cloudProviderId == type } - if (!providers) { - log.warn("No cluster provider found for type (type: ${type}, account: ${account})") - } - - def serverGroups = providers.collect { p -> - def shouldExpand = !p.supportsMinimalClusters() - def serverGroups = getServerGroups(application, account, clusterName, type, region, shouldExpand).findAll { - return region ? it.name == serverGroupName && it.region == region : it.name == serverGroupName - } ?: [] + def providers = providers(type) - return shouldExpand ? serverGroups : serverGroups.collect { ServerGroup sg -> - return serverGroupController.getServerGroupByApplication(application, account, sg.region, sg.name, "true") - } - }.flatten() + if (providers.isEmpty()) { + log.warn("No cluster provider found for type (type: {}, account: {})", type, account) + } - if (!serverGroups) { - throw new NotFoundException("Server group not found (account: ${account}, name: ${serverGroupName}, type: ${type})") + List serverGroups = providers.stream() + .flatMap({ p -> + boolean isExpanded = !p.supportsMinimalClusters() + Stream serverGroups = + applyExtensions(serverGroupExtensions, getServerGroups(application, account, clusterName, type, region, isExpanded)) + .stream() + .filter({ + serverGroupName.equals(it.getName()) && + (StringUtils.isEmpty(region) || region.equals(it.getRegion())) + }) + + return isExpanded + ? serverGroups + : serverGroups + .map({ ServerGroup sg -> + return serverGroupController.getServerGroupByApplication(application, account, sg.getRegion(), sg.getName(), "true") + }) + }) + .collect(Collectors.toList()) + + if (serverGroups.isEmpty()) { + throw new NotFoundException(String.format("Server group not found (account: %s, name: %s, type: %s)", account, serverGroupName, type)) } - return region ? serverGroups?.getAt(0) : serverGroups + // TODO: maybe break up this API into 2 different routes instead of returning 2 types + return StringUtils.isEmpty(region) + ? serverGroups + : serverGroups.get(0) + } + + private List providers(String cloudProvider) { + return clusterProviders.stream() + .filter({ cloudProvider.equals(it.getCloudProviderId()) }) + .collect(Collectors.toList()) } /** @@ -184,121 +241,153 @@ class ClusterController { */ @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ') && hasPermission(#account, 'ACCOUNT', 'READ')") @RequestMapping(value = "/{account:.+}/{clusterName:.+}/{cloudProvider}/{scope}/serverGroups/target/{target:.+}", method = RequestMethod.GET) - ServerGroup getTargetServerGroup( - @PathVariable String application, - @PathVariable String account, - @PathVariable String clusterName, - @PathVariable String cloudProvider, - @PathVariable String scope, - @PathVariable String target, - @RequestParam(value = "onlyEnabled", required = false, defaultValue = "false") String onlyEnabled, - @RequestParam(value = "validateOldest", required = false, defaultValue = "true") String validateOldest) { + ServerGroup getTargetServerGroup(@PathVariable String application, + @PathVariable String account, + @PathVariable String clusterName, + @PathVariable String cloudProvider, + @PathVariable String scope, + @PathVariable String target, + @RequestParam(value = "onlyEnabled", required = false, defaultValue = "false") String onlyEnabled, + @RequestParam(value = "validateOldest", required = false, defaultValue = "true") String validateOldest) { TargetServerGroup tsg try { tsg = TargetServerGroup.fromString(target) } catch (IllegalArgumentException e) { - throw new NotFoundException("Target not found (target: ${target})") + throw new NotFoundException(String.format("Target not found (target: %s)", target)) } // we can optimize loads iff the cloud provider supports loading minimal clusters (ie. w/o instances) - def providers = clusterProviders.findAll { it.cloudProviderId == cloudProvider } - if (!providers) { - log.warn("No cluster provider found for cloud provider (cloudProvider: ${cloudProvider}, account: ${account})") + def providers = providers(cloudProvider) + if (providers.isEmpty()) { + log.warn("No cluster provider found for cloud provider (cloudProvider: {}, account: {})", cloudProvider, account) } - def needsExpand = [:] - - // load all server groups w/o instance details (this is reasonably efficient) - def sortedServerGroups = providers.collect { p -> - def shouldExpand = !p.supportsMinimalClusters() - def serverGroups = getServerGroups(application, account, clusterName, cloudProvider, null /* region */, shouldExpand).findAll { - def scopeMatch = it.region == scope || it.zones?.contains(scope) - - def enableMatch - if (Boolean.valueOf(onlyEnabled)) { - enableMatch = !it.isDisabled() - } else { - enableMatch = true - } - - return scopeMatch && enableMatch - } ?: [] - - if (shouldExpand) { - serverGroups.forEach { sg -> needsExpand[sg] = true } - } + boolean enabledOnly = Boolean.parseBoolean(onlyEnabled) - return serverGroups - }.flatten() - .findAll { it.createdTime != null } - .sort { a, b -> b.createdTime <=> a.createdTime } + Set alreadyExpanded = new HashSet<>() - def expandServerGroup = { ServerGroup serverGroup -> - if (needsExpand[serverGroup]) { - // server group was already expanded on initial load - return serverGroup - } - - return serverGroupController.getServerGroupByApplication( - application, account, serverGroup.region, serverGroup.name, "true" - ) - } - - if (!sortedServerGroups) { - throw new NotFoundException("No server groups found (account: ${account}, cluster: ${clusterName}, type: ${cloudProvider})") - } + // load all server groups w/o instance details (this is reasonably efficient) + Stream filteredServerGroups = providers.stream() + .flatMap({ p -> + boolean isExpanded = !p.supportsMinimalClusters() + Stream serverGroups = getServerGroups(application, account, clusterName, cloudProvider, null /* region */, isExpanded) + .stream() + .filter({ + boolean scopeMatch = scope.equals(it.getRegion()) || + Optional.ofNullable(it.getZones()) + .map({ it.contains(scope) }) + .orElse(false) + + boolean enableMatch = enabledOnly ? !it.isDisabled() : true + + return scopeMatch && enableMatch + }) + .map({ serverGroup -> + if (isExpanded) { + alreadyExpanded.add(serverGroup) // this is kind of gross + } + return serverGroup + }) + + return serverGroups + }) + .filter({ it.getCreatedTime() != null }) + + Optional maybe = Optional.empty() switch (tsg) { case TargetServerGroup.CURRENT: - return expandServerGroup(sortedServerGroups.get(0)) + maybe = filteredServerGroups + .sorted(OLDEST_TO_NEWEST.reversed()) + .findFirst() + break case TargetServerGroup.PREVIOUS: - if (sortedServerGroups.size() == 1) { + def serverGroups = filteredServerGroups + .sorted(OLDEST_TO_NEWEST.reversed()) + .limit(2) + .collect(Collectors.toList()) + if (serverGroups.size() == 1) { throw new NotFoundException("Target not found (target: ${target})") + } else if (serverGroups.size() > 1) { + maybe = Optional.of(serverGroups.get(1)) } - return expandServerGroup(sortedServerGroups.get(1)) + break case TargetServerGroup.OLDEST: // At least two expected, but some cases just want the oldest no matter what. - if (Boolean.valueOf(validateOldest) && sortedServerGroups.size() == 1) { - throw new NotFoundException("Target not found (target: ${target})") + boolean validate = Boolean.parseBoolean(validateOldest) + def serverGroups = filteredServerGroups + .sorted(OLDEST_TO_NEWEST) + .limit(2) + .collect(Collectors.toList()) + if (validate && serverGroups.size() == 1) { + throw new NotFoundException(String.format("Target not found (target: %s)", target)) } - return expandServerGroup(sortedServerGroups.last()) + maybe = Optional.of(serverGroups.get(0)) + break case TargetServerGroup.LARGEST: // Choose the server group with the most instances, falling back to newest in the case of a tie. - return expandServerGroup(sortedServerGroups.sort { lhs, rhs -> - (rhs.instances?.size() ?: 0) <=> (lhs.instances?.size() ?: 0) ?: - rhs.createdTime <=> lhs.createdTime - }.get(0)) + maybe = filteredServerGroups + .sorted(BIGGEST_TO_SMALLEST) + .findFirst() + break case TargetServerGroup.FAIL: - if (sortedServerGroups.size() > 1) { - throw new NotFoundException("More than one target found (scope: ${scope}, serverGroups: ${sortedServerGroups*.name})") + def serverGroups = filteredServerGroups.collect(Collectors.toList()) + if (serverGroups.size() > 1) { + String names = serverGroups.stream() + .map({ it.getName() }) + .collect(Collectors.joining(", ")) + throw new NotFoundException(String.format("More than one target found (scope: %s, serverGroups: %s)", scope, names)) } - return expandServerGroup(sortedServerGroups.get(0)) + maybe = serverGroups.size() == 1 ? Optional.of(serverGroups.get(0)) : Optional.empty() } + + ServerGroup result = maybe + .map({ ServerGroup serverGroup -> + if (alreadyExpanded.contains(serverGroup)) { + // server group was already expanded on initial load + return serverGroup + } + return serverGroupController.getServerGroupByApplication(application, account, serverGroup.getRegion(), serverGroup.getName(), "true") + }) + .orElseThrow({ + new NotFoundException(String.format( + "No server groups found (account: %s, location: %s, cluster: %s, type: %S)", account, scope, clusterName, cloudProvider)) + }) + return result } @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ') && hasPermission(#account, 'ACCOUNT', 'READ')") @RequestMapping(value = "/{account:.+}/{clusterName:.+}/{cloudProvider}/{scope}/serverGroups/target/{target:.+}/{summaryType:.+}", method = RequestMethod.GET) - Summary getServerGroupSummary( - @PathVariable String application, - @PathVariable String account, - @PathVariable String clusterName, - @PathVariable String cloudProvider, - @PathVariable String scope, - @PathVariable String target, - @PathVariable String summaryType, - @RequestParam(value = "onlyEnabled", required = false, defaultValue = "false") String onlyEnabled) { + Summary getServerGroupSummary(@PathVariable String application, + @PathVariable String account, + @PathVariable String clusterName, + @PathVariable String cloudProvider, + @PathVariable String scope, + @PathVariable String target, + @PathVariable String summaryType, + @RequestParam(value = "onlyEnabled", required = false, defaultValue = "false") String onlyEnabled) { ServerGroup sg = getTargetServerGroup(application, - account, - clusterName, - cloudProvider, - scope, - target, - onlyEnabled, - "false" /* validateOldest */) - try { - return (Summary) sg.invokeMethod("get${summaryType.capitalize()}Summary".toString(), null /* args */) - } catch (MissingMethodException e) { - throw new NotFoundException("Summary not found (type: ${summaryType})") + account, + clusterName, + cloudProvider, + scope, + target, + onlyEnabled, + "false" /* validateOldest */) + + if ("image".equalsIgnoreCase(summaryType)) { + return sg.getImageSummary() + } else if ("images".equalsIgnoreCase(summaryType)) { + return sg.getImagesSummary() + } else { + String method = "get" + StringUtils.capitalize(summaryType) + "Summary" + try { + // TODO: this is gross, is it used for anything besides ImageSummary? + log.warn("Getting summary (type: {}) may be removed unless explicit support is added", summaryType) + return (Summary) sg.getClass().getMethod(method).invoke(sg) + } catch (ReflectiveOperationException e) { + throw new NotFoundException(String.format("Summary not found (type: %s)", summaryType)) + } } } @@ -309,5 +398,19 @@ class ClusterController { Moniker moniker List loadBalancers List serverGroups + + static ClusterViewModel from(Cluster cluster) { + def result = new ClusterViewModel() + result.setName(cluster.getName()) + result.setAccount(cluster.getAccountName()) + result.setMoniker(cluster.getMoniker()) + result.setLoadBalancers(cluster.getLoadBalancers().stream() + .map({ it.getName() }) + .collect(Collectors.toList())) + result.setServerGroups(cluster.getServerGroups().stream() + .map({ it.getName() }) + .collect(Collectors.toList())) + return result + } } } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ConfigRefreshController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ConfigRefreshController.groovy deleted file mode 100644 index 69643e45975..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ConfigRefreshController.groovy +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers - -import com.netflix.spinnaker.clouddriver.Main -import com.netflix.spinnaker.clouddriver.events.ConfigRefreshedEvent -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.boot.Banner -import org.springframework.boot.builder.SpringApplicationBuilder -import org.springframework.context.ApplicationContext -import org.springframework.context.ApplicationEventPublisher -import org.springframework.context.annotation.Configuration -import org.springframework.core.env.ConfigurableEnvironment -import org.springframework.core.env.PropertySource -import org.springframework.core.env.StandardEnvironment -import org.springframework.web.bind.annotation.RequestMapping -import org.springframework.web.bind.annotation.RequestMethod -import org.springframework.web.bind.annotation.RestController - -@RestController -@RequestMapping("/config-refresh") -class ConfigRefreshController { - - @Autowired - ConfigurableEnvironment environment - - @Autowired - ApplicationContext appContext - - @Autowired - ApplicationEventPublisher publisher - - @RequestMapping(method = RequestMethod.POST) - void refresh() { - def env = new StandardEnvironment() - def app = new SpringApplicationBuilder() - .properties(Main.DEFAULT_PROPS) - .sources(NoBeans) - .web(false) - .headless(true) - .bannerMode(Banner.Mode.OFF) - .addCommandLineProperties(true) - .logStartupInfo(false) - .environment(env) - .build() - - def ctx = app.run(Main.launchArgs) - def currentProps = ctx.environment.propertySources - - for (PropertySource ps : environment.propertySources) { - if (currentProps.get(ps.name)) { - environment.propertySources.replace(ps.name, currentProps.get(ps.name)) - } - } - - ctx.close() - - publisher.publishEvent(new ConfigRefreshedEvent(appContext)) - } - - @Configuration - private static class NoBeans {} -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CredentialsController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CredentialsController.groovy deleted file mode 100644 index 26d44764ef0..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/CredentialsController.groovy +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers - -import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.configuration.CredentialsConfiguration -import com.netflix.spinnaker.clouddriver.security.AccountCredentials -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider -import com.netflix.spinnaker.kork.web.exceptions.NotFoundException -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.context.MessageSource -import org.springframework.web.bind.annotation.PathVariable -import org.springframework.web.bind.annotation.RequestMapping -import org.springframework.web.bind.annotation.RequestMethod -import org.springframework.web.bind.annotation.RequestParam -import org.springframework.web.bind.annotation.RestController - -@RestController -@RequestMapping("/credentials") -class CredentialsController { - - @Autowired - CredentialsConfiguration credentialsConfiguration - - @Autowired - ObjectMapper objectMapper - - @Autowired - AccountCredentialsProvider accountCredentialsProvider - - @Autowired - MessageSource messageSource - - @RequestMapping(method = RequestMethod.GET) - List list(@RequestParam(value = "expand", required = false) boolean expand) { - accountCredentialsProvider.all.collect { render(expand, it) } - } - - @RequestMapping(value = "/{name:.+}", method = RequestMethod.GET) - Map getAccount(@PathVariable("name") String name) { - def accountDetail = render(true, accountCredentialsProvider.getCredentials(name)) - if (!accountDetail) { - throw new NotFoundException("Account does not exist (name: ${name})") - } - - return accountDetail - } - - Map render(boolean includeDetail, AccountCredentials accountCredentials) { - if (accountCredentials == null) { - return null - } - Map cred = objectMapper.convertValue(accountCredentials, Map) - if (!includeDetail) { - cred.keySet().retainAll(['name', - 'environment', - 'accountType', - 'cloudProvider', - 'requiredGroupMembership', - 'permissions', - 'providerVersion', - 'accountId', - 'skin']) - } - - cred.type = accountCredentials.cloudProvider - cred.challengeDestructiveActions = credentialsConfiguration.challengeDestructiveActionsEnvironments.contains(accountCredentials.environment) - cred.primaryAccount = credentialsConfiguration.primaryAccountTypes.contains(accountCredentials.accountType) - - return cred - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/DataController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/DataController.groovy index 49af3c6c117..ab1a528a56e 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/DataController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/DataController.groovy @@ -35,8 +35,17 @@ import javax.servlet.http.HttpServletRequest @RestController @RequestMapping("/v1/data") class DataController { - @Autowired(required = false) - List dataProviders = [] + + List dataProviders + + @Autowired + DataController(Optional> dataProviders) { + if (dataProviders.present) { + this.dataProviders = dataProviders.get() + } else { + this.dataProviders = [] + } + } @RequestMapping(value = "/static/{id}", method = RequestMethod.GET) Object getStaticData(@PathVariable("id") String id, @RequestParam Map filters) { diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/EntityTagsController.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/EntityTagsController.java deleted file mode 100644 index b54e4fc40bb..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/EntityTagsController.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers; - -import com.netflix.spinnaker.clouddriver.model.EntityTags; -import com.netflix.spinnaker.clouddriver.model.EntityTagsProvider; -import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.context.MessageSource; -import org.springframework.context.i18n.LocaleContextHolder; -import org.springframework.http.HttpStatus; -import org.springframework.util.AntPathMatcher; -import org.springframework.web.bind.annotation.ExceptionHandler; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RequestMethod; -import org.springframework.web.bind.annotation.RequestParam; -import org.springframework.web.bind.annotation.ResponseStatus; -import org.springframework.web.bind.annotation.RestController; -import org.springframework.web.servlet.HandlerMapping; - -import javax.servlet.http.HttpServletRequest; -import java.util.*; -import java.util.stream.Collectors; - -@RestController -@RequestMapping("/tags") -public class EntityTagsController { - private final MessageSource messageSource; - private final EntityTagsProvider tagProvider; - - @Autowired - public EntityTagsController(MessageSource messageSource, - Optional tagProvider) { - this.messageSource = messageSource; - this.tagProvider = tagProvider.orElse(null); - } - - @RequestMapping(method = RequestMethod.GET) - public Collection list(@RequestParam(value = "cloudProvider", required = false) String cloudProvider, - @RequestParam(value = "application", required = false) String application, - @RequestParam(value = "entityType", required = false) String entityType, - @RequestParam(value = "entityId", required = false) String entityId, - @RequestParam(value = "idPrefix", required = false) String idPrefix, - @RequestParam(value = "account", required = false) String account, - @RequestParam(value = "region", required = false) String region, - @RequestParam(value = "namespace", required = false) String namespace, - @RequestParam(value = "maxResults", required = false, defaultValue = "2000") int maxResults, - @RequestParam Map allParameters) { - - Map tags = allParameters.entrySet().stream() - .filter(m -> m.getKey().toLowerCase().startsWith("tag")) - .collect(Collectors.toMap(p -> p.getKey().toLowerCase().replaceAll("tag:", ""), Map.Entry::getValue)); - - return tagProvider.getAll( - cloudProvider, - application, - entityType, - entityId != null ? Arrays.asList(entityId.split(",")) : null, - idPrefix, - account, - region, - namespace, - tags, - maxResults - ); - } - - @RequestMapping(value = "/**", method = RequestMethod.GET) - public EntityTags get(HttpServletRequest request) { - String pattern = (String) request.getAttribute(HandlerMapping.BEST_MATCHING_PATTERN_ATTRIBUTE); - String id = new AntPathMatcher().extractPathWithinPattern(pattern, request.getServletPath()); - return tagProvider.get(id).orElseThrow(() -> new NotFoundException("No EntityTags found w/ id = '" + id + "'")); - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/FeaturesController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/FeaturesController.groovy index 1592dc1ea5b..38a2518ff41 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/FeaturesController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/FeaturesController.groovy @@ -18,7 +18,10 @@ package com.netflix.spinnaker.clouddriver.controllers import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter import groovy.util.logging.Slf4j +import org.springframework.beans.BeansException import org.springframework.beans.factory.annotation.Autowired +import org.springframework.context.ApplicationContext +import org.springframework.context.ApplicationContextAware import org.springframework.core.annotation.AnnotationUtils import org.springframework.stereotype.Component import org.springframework.web.bind.annotation.RequestMapping @@ -28,33 +31,58 @@ import org.springframework.web.bind.annotation.RestController @Slf4j @RestController @RequestMapping("/features") -class FeaturesController { +class FeaturesController implements ApplicationContextAware { + private ApplicationContext applicationContext + @Autowired Collection atomicOperationConverters = [] @RequestMapping(value = "/stages", method = RequestMethod.GET) Collection stages() { return atomicOperationConverters.collect { AtomicOperationConverter atomicOperationConverter -> - def value = atomicOperationConverter.class.annotations.findResult { - def operationInterface = it.class.interfaces.find { - // look for a cloud provider-specific annotation indicating it's an AtomicOperation - it.name.endsWith("Operation") + try { + def value = atomicOperationConverter.class.annotations.findResult { + def operationInterface = it.class.interfaces.find { + // look for a cloud provider-specific annotation indicating it's an AtomicOperation + it.name.endsWith("Operation") + } + + if (operationInterface) { + def annotation = atomicOperationConverter.class.getAnnotation(operationInterface) + return AnnotationUtils.getValue(annotation) + } + + return null } - if (operationInterface) { - def annotation = atomicOperationConverter.class.getAnnotation(operationInterface) - return AnnotationUtils.getValue(annotation) + value = value ?: atomicOperationConverter.class.getAnnotation(Component)?.value() + if (!value) { + def beanNames = applicationContext.getBeanNamesForType(atomicOperationConverter.class) + if (beanNames.size() == 1) { + value = beanNames[0] + } else { + // unable to determine bean/stage name, do not include it in available stages (very strange if it happens!) + value = atomicOperationConverter.class.simpleName + } } - return null + return [ + name : value, + enabled: true + ] + } catch (Exception e) { + log.warn("Unable to determine bean/stage name for ${atomicOperationConverter.class}", e) + return [ + name : atomicOperationConverter.class.simpleName, + enabled: true + ] } + } - value = value ?: atomicOperationConverter.class.getAnnotation(Component).value() + } - return [ - name: value, - enabled: true - ] - } + @Override + void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + this.applicationContext = applicationContext } } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ImageController.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ImageController.java deleted file mode 100644 index c57ced3d881..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ImageController.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2018 Schibsted ASA. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers; - -import com.netflix.spinnaker.clouddriver.model.Image; -import com.netflix.spinnaker.clouddriver.model.ImageProvider; -import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.web.bind.annotation.PathVariable; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RequestMethod; -import org.springframework.web.bind.annotation.RestController; - -import java.util.List; -import java.util.stream.Collectors; - - -@RestController -@RequestMapping("/images") -public class ImageController { - - @Autowired - List imageProviders; - - @RequestMapping(value = "/{provider}/{imageId}", method = RequestMethod.GET) - Image getImage(@PathVariable String provider, @PathVariable String imageId) { - - List imageProviderList = imageProviders.stream() - .filter(imageProvider -> imageProvider.getCloudProvider().equals(provider)) - .collect(Collectors.toList()); - - if (imageProviderList.isEmpty()) { - throw new NotFoundException("ImageProvider for provider " + provider + " not found."); - } else if (imageProviderList.size() > 1) { - throw new IllegalStateException("Found multiple ImageProviders for provider " + provider + ". Multiple ImageProviders for a single provider are not supported."); - } else { - return imageProviderList.get(0).getImageById(imageId).orElseThrow(() -> new NotFoundException("Image not found (id: " + imageId + ")")); - } - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/InstanceController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/InstanceController.groovy index 0c985b97f01..29acb8dde0b 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/InstanceController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/InstanceController.groovy @@ -60,7 +60,7 @@ class InstanceController { @PathVariable String region, @PathVariable String id) { String providerParam = cloudProvider ?: provider - Collection outputs = instanceProviders.findResults { + Collection outputs = instanceProviders.findResults { if (!providerParam || it.cloudProvider == providerParam) { return it.getConsoleOutput(account, region, id) } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/JobController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/JobController.groovy index 61c2d71c97f..986929b4d27 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/JobController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/JobController.groovy @@ -19,11 +19,13 @@ package com.netflix.spinnaker.clouddriver.controllers import com.netflix.spinnaker.clouddriver.model.JobProvider import com.netflix.spinnaker.clouddriver.model.JobStatus import com.netflix.spinnaker.kork.web.exceptions.NotFoundException -import io.swagger.annotations.ApiOperation -import io.swagger.annotations.ApiParam +import io.swagger.v3.oas.annotations.Operation +import io.swagger.v3.oas.annotations.Parameter import org.springframework.beans.factory.annotation.Autowired import org.springframework.context.MessageSource import org.springframework.security.access.prepost.PreAuthorize +import org.springframework.security.core.Authentication +import org.springframework.security.core.context.SecurityContextHolder import org.springframework.web.bind.annotation.PathVariable import org.springframework.web.bind.annotation.RequestMapping import org.springframework.web.bind.annotation.RequestMethod @@ -39,15 +41,16 @@ class JobController { @Autowired MessageSource messageSource - @PreAuthorize("hasPermission(#application, 'APPLICATION', 'WRITE') and hasPermission(#account, 'ACCOUNT', 'WRITE')") - @ApiOperation(value = "Collect a JobStatus", notes = "Collects the output of the job, may modify the job.") - @RequestMapping(value = "/{account}/{location}/{id:.+}", method = RequestMethod.POST) - JobStatus collectJob(@ApiParam(value = "Application name", required = true) @PathVariable String application, - @ApiParam(value = "Account job was created by", required = true) @PathVariable String account, - @ApiParam(value = "Namespace, region, or zone job is running in", required = true) @PathVariable String location, - @ApiParam(value = "Unique identifier of job being looked up", required = true) @PathVariable String id) { + @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ') and hasPermission(#account, 'ACCOUNT', 'READ')") + @Operation(summary = "Collect a JobStatus", description = "Collects the output of the job.") + @RequestMapping(value = "/{account}/{location}/{id:.+}", method = RequestMethod.GET) + JobStatus collectJob(@Parameter(description = "Application name", required = true) @PathVariable String application, + @Parameter(description = "Account job was created by", required = true) @PathVariable String account, + @Parameter(description = "Namespace, region, or zone job is running in", required = true) @PathVariable String location, + @Parameter(description = "Unique identifier of job being looked up", required = true) @PathVariable String id) { + Authentication auth = SecurityContextHolder.getContext().getAuthentication(); Collection jobMatches = jobProviders.findResults { - it.collectJob(account, location, id) + return it.collectJob(account, location, id) } if (!jobMatches) { throw new NotFoundException("Job not found (account: ${account}, location: ${location}, id: ${id})") @@ -55,30 +58,36 @@ class JobController { jobMatches.first() } - @PreAuthorize("hasPermission(#application, 'APPLICATION', 'WRITE') and hasPermission(#account, 'ACCOUNT', 'WRITE')") - @ApiOperation(value = "Collect a JobStatus", notes = "Collects the output of the job, may modify the job.") + @PreAuthorize("hasPermission(#application, 'APPLICATION', 'EXECUTE') and hasPermission(#account, 'ACCOUNT', 'WRITE')") + @Operation(summary = "Cancel a Job", description = "Cancels the job.") @RequestMapping(value = "/{account}/{location}/{id:.+}", method = RequestMethod.DELETE) - void cancelJob(@ApiParam(value = "Application name", required = true) @PathVariable String application, - @ApiParam(value = "Account job was created by", required = true) @PathVariable String account, - @ApiParam(value = "Namespace, region, or zone job is running in", required = true) @PathVariable String location, - @ApiParam(value = "Unique identifier of job being looked up", required = true) @PathVariable String id) { + void cancelJob(@Parameter(description = "Application name", required = true) @PathVariable String application, + @Parameter(description = "Account job is running in", required = true) @PathVariable String account, + @Parameter(description = "Namespace, region, or zone job is running in", required = true) @PathVariable String location, + @Parameter(description = "Unique identifier of job to be canceled", required = true) @PathVariable String id) { jobProviders.forEach { it.cancelJob(account, location, id) } } - @PreAuthorize("hasPermission(#application, 'APPLICATION', 'WRITE') and hasPermission(#account, 'ACCOUNT', 'WRITE')") - @ApiOperation(value = "Collect a file from a job", notes = "Collects the file result of a job.") + @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ') and hasPermission(#account, 'ACCOUNT', 'READ')") + @Operation(summary = "Collect a file from a job", description = "Collects the file result of a job.") @RequestMapping(value = "/{account}/{location}/{id}/{fileName:.+}", method = RequestMethod.GET) Map getFileContents( - @ApiParam(value = "Application name", required = true) @PathVariable String application, - @ApiParam(value = "Account job was created by", required = true) @PathVariable String account, - @ApiParam(value = "Namespace, region, or zone job is running in", required = true) @PathVariable String location, - @ApiParam(value = "Unique identifier of job being looked up", required = true) @PathVariable String id, - @ApiParam(value = "File name to look up", required = true) @PathVariable String fileName + @Parameter(description = "Application name", required = true) @PathVariable String application, + @Parameter(description = "Account job was created by", required = true) @PathVariable String account, + @Parameter(description = "Namespace, region, or zone job is running in", required = true) @PathVariable String location, + @Parameter(description = "Unique identifier of job being looked up", required = true) @PathVariable String id, + @Parameter(description = "File name to look up", required = true) @PathVariable String fileName ) { - jobProviders.findResults { + Collection> results = jobProviders.findResults { it.getFileContents(account, location, id, fileName) - }.first() + } + + if (!results.isEmpty()) { + return results.first() + } + + return Collections.emptyMap() } } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/LoadBalancerController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/LoadBalancerController.groovy index e14d77fb443..db186fd2a3e 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/LoadBalancerController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/LoadBalancerController.groovy @@ -20,6 +20,7 @@ import com.netflix.spinnaker.clouddriver.exceptions.CloudProviderNotFoundExcepti import com.netflix.spinnaker.clouddriver.model.LoadBalancer import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider import org.springframework.beans.factory.annotation.Autowired +import org.springframework.http.MediaType import org.springframework.security.access.prepost.PostAuthorize import org.springframework.security.access.prepost.PreAuthorize import org.springframework.web.bind.annotation.PathVariable @@ -30,6 +31,7 @@ import org.springframework.web.bind.annotation.RestController import java.util.stream.Collectors @RestController +@RequestMapping(produces = MediaType.APPLICATION_JSON_VALUE) class LoadBalancerController { @Autowired diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ManifestController.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ManifestController.java deleted file mode 100644 index 2bd80432d16..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ManifestController.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2017 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.spinnaker.clouddriver.controllers; - -import com.netflix.spinnaker.clouddriver.model.Manifest; -import com.netflix.spinnaker.clouddriver.model.ManifestProvider; -import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue; -import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.security.access.prepost.PostAuthorize; -import org.springframework.security.access.prepost.PreAuthorize; -import org.springframework.web.bind.annotation.PathVariable; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RequestMethod; -import org.springframework.web.bind.annotation.RestController; - -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -@Slf4j -@RestController -@RequestMapping("/manifests") -public class ManifestController { - final List manifestProviders; - - final RequestQueue requestQueue; - - @Autowired - public ManifestController(List manifestProviders, RequestQueue requestQueue) { - this.manifestProviders = manifestProviders; - this.requestQueue = requestQueue; - } - - @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") - @PostAuthorize("hasPermission(returnObject?.moniker?.app, 'APPLICATION', 'READ')") - @RequestMapping(value = "/{account:.+}/_/{name:.+}", method = RequestMethod.GET) - Manifest getForAccountAndName(@PathVariable String account, - @PathVariable String name) { - return getForAccountLocationAndName(account, "", name); - } - - @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") - @PostAuthorize("hasPermission(returnObject?.moniker?.app, 'APPLICATION', 'READ')") - @RequestMapping(value = "/{account:.+}/{location:.+}/{name:.+}", method = RequestMethod.GET) - Manifest getForAccountLocationAndName(@PathVariable String account, - @PathVariable String location, - @PathVariable String name) { - List manifests = manifestProviders.stream() - .map(provider -> { - try { - return requestQueue.execute(account, () -> provider.getManifest(account, location, name)); - } catch (Throwable t) { - log.warn("Failed to read manifest " , t); - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toList()); - - String request = String.format("(account: %s, location: %s, name: %s)", account, location, name); - if (manifests.isEmpty()) { - throw new NotFoundException("Manifest " + request + " not found"); - } else if (manifests.size() > 1) { - log.error("Duplicate manifests " + manifests); - throw new IllegalStateException("Multiple manifests matching " + request + " found"); - } - - return manifests.get(0); - } - - @RequestMapping(value = "/{account:.+}/{name:.+}", method = RequestMethod.GET) - Manifest getForAccountLocationAndName(@PathVariable String account, - @PathVariable String name) { - return getForAccountLocationAndName(account, "", name); - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/OperationsController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/OperationsController.groovy index 7c790756a50..3c4364aee50 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/OperationsController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/OperationsController.groovy @@ -16,201 +16,256 @@ package com.netflix.spinnaker.clouddriver.controllers -import com.netflix.spectator.api.Registry +import com.fasterxml.jackson.annotation.JsonProperty +import com.google.common.collect.ImmutableList import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationErrors -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidationException -import com.netflix.spinnaker.clouddriver.deploy.DescriptionValidator +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationDescriptionPreProcessor -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationNotFoundException -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry +import com.netflix.spinnaker.clouddriver.orchestration.OperationsService import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor -import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository -import com.netflix.spinnaker.clouddriver.security.AllowedAccountsValidator -import com.netflix.spinnaker.clouddriver.security.ProviderVersion -import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig -import com.netflix.spinnaker.security.AuthenticatedRequest -import groovy.transform.Canonical +import com.netflix.spinnaker.kork.exceptions.ConstraintViolationException +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.context.MessageSource -import org.springframework.validation.Errors +import org.springframework.beans.factory.annotation.Value +import org.springframework.web.bind.annotation.GetMapping +import org.springframework.web.bind.annotation.PatchMapping import org.springframework.web.bind.annotation.PathVariable +import org.springframework.web.bind.annotation.PostMapping import org.springframework.web.bind.annotation.RequestBody -import org.springframework.web.bind.annotation.RequestMapping -import org.springframework.web.bind.annotation.RequestMethod import org.springframework.web.bind.annotation.RequestParam import org.springframework.web.bind.annotation.RestController +import javax.annotation.Nonnull +import javax.annotation.Nullable +import javax.annotation.PreDestroy +import javax.naming.OperationNotSupportedException +import java.util.concurrent.TimeUnit + +import static java.lang.String.format + @Slf4j @RestController class OperationsController { - @Autowired MessageSource messageSource - @Autowired OrchestrationProcessor orchestrationProcessor - @Autowired Registry registry - @Autowired (required = false) Collection allowedAccountValidators = [] - @Autowired (required = false) List atomicOperationDescriptionPreProcessors = [] - @Autowired AtomicOperationsRegistry atomicOperationsRegistry - @Autowired SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps - @Autowired AccountCredentialsRepository accountCredentialsRepository - - /* - * APIs - * ---------------------------------------------------------------------------------------------------------------------------- - */ + private final OperationsService operationsService + private final OrchestrationProcessor orchestrationProcessor + private final TaskRepository taskRepository + private final long shutdownWaitSeconds - /** - * @deprecated Use /{cloudProvider}/ops instead - */ + OperationsController( + OperationsService operationsService, + OrchestrationProcessor orchestrationProcessor, + TaskRepository taskRepository, + @Value('${admin.tasks.shutdown-wait-seconds:600}') long shutdownWaitSeconds) { + this.operationsService = operationsService + this.orchestrationProcessor = orchestrationProcessor + this.taskRepository = taskRepository + this.shutdownWaitSeconds = shutdownWaitSeconds + } +/** + * @deprecated Use /{cloudProvider}/ops instead + */ @Deprecated - @RequestMapping(value = "/ops", method = RequestMethod.POST) - Map operations(@RequestParam(value = "clientRequestId", required = false) String clientRequestId, - @RequestBody List> requestBody) { - List atomicOperations = collectAtomicOperations(requestBody) - start(atomicOperations, clientRequestId) + @PostMapping("/ops") + StartOperationResult operations( + @RequestParam(value = "clientRequestId", required = false) String clientRequestId, + @RequestBody List> requestBody) { + List atomicOperations = operationsService.collectAtomicOperations(requestBody) + return start(null, atomicOperations, clientRequestId) } /** * @deprecated Use /{cloudProvider}/ops/{name} instead */ @Deprecated - @RequestMapping(value = "/ops/{name}", method = RequestMethod.POST) - Map operation(@PathVariable("name") String name, - @RequestParam(value = "clientRequestId", required = false) String clientRequestId, - @RequestBody Map requestBody) { - List atomicOperations = collectAtomicOperations([[(name): requestBody]]) - start(atomicOperations, clientRequestId) + @PostMapping("/ops/{name}") + StartOperationResult operation( + @PathVariable("name") String name, + @RequestParam(value = "clientRequestId", required = false) String clientRequestId, + @RequestBody Map requestBody) { + List atomicOperations = operationsService.collectAtomicOperations([[(name): requestBody]]) + return start(null, atomicOperations, clientRequestId) } - @RequestMapping(value = "/{cloudProvider}/ops", method = RequestMethod.POST) - Map cloudProviderOperations(@PathVariable("cloudProvider") String cloudProvider, - @RequestParam(value = "clientRequestId", required = false) String clientRequestId, - @RequestBody List> requestBody) { - List atomicOperations = collectAtomicOperations(cloudProvider, requestBody) - start(atomicOperations, clientRequestId) + @PostMapping("/{cloudProvider}/ops") + StartOperationResult cloudProviderOperations( + @PathVariable("cloudProvider") String cloudProvider, + @RequestParam(value = "clientRequestId", required = false) String clientRequestId, + @RequestBody List> requestBody) { + List atomicOperations = operationsService.collectAtomicOperations(cloudProvider, requestBody) + return start(cloudProvider, atomicOperations, clientRequestId) } - @RequestMapping(value = "/{cloudProvider}/ops/{name}", method = RequestMethod.POST) - Map cloudProviderOperation(@PathVariable("cloudProvider") String cloudProvider, - @PathVariable("name") String name, - @RequestParam(value = "clientRequestId", required = false) String clientRequestId, - @RequestBody Map requestBody) { - List atomicOperations = collectAtomicOperations(cloudProvider, [[(name): requestBody]]) - start(atomicOperations, clientRequestId) + @PostMapping("/{cloudProvider}/ops/{name}") + StartOperationResult cloudProviderOperation( + @PathVariable("cloudProvider") String cloudProvider, + @PathVariable("name") String name, + @RequestParam(value = "clientRequestId", required = false) String clientRequestId, + @RequestBody Map requestBody) { + List atomicOperations = operationsService.collectAtomicOperations(cloudProvider, [[(name): requestBody]]) + return start(cloudProvider, atomicOperations, clientRequestId) } - /* - * ---------------------------------------------------------------------------------------------------------------------------- - */ + @PatchMapping("/{cloudProvider}/task/{id}") + StartOperationResult updateTask(@PathVariable("cloudProvider") String cloudProvider, + @PathVariable("id") String id, + @RequestBody Map requestBody) { + validateCloudProvider(cloudProvider, ImmutableList.of("kubernetes")) + + Optional doRetry = requestBody.entrySet() + .stream() + .filter({ e -> e.getKey().equals("retry") }) + .map({ e -> (Boolean)e.getValue() }) + .findFirst(); + + if (doRetry.isEmpty()) { + throw new OperationNotSupportedException("Patching task id: ${id} with the provided inputs is not supported") + } + + Task t = taskRepository.get(id) + if (!t) { + throw new NotFoundException("Task not found (id: ${id})" + ) + } - private List collectAtomicOperations(List> inputs) { - collectAtomicOperations(null, inputs) + log.debug("updating task: ${t.id} state to retry: ${doRetry.get()}") + t.fail(doRetry.get()) + return new StartOperationResult(t.id) } - private List collectAtomicOperations(String cloudProvider, List> inputs) { - def results = convert(cloudProvider, inputs) - def atomicOperations = [] - for (bindingResult in results) { - if (bindingResult.errors.hasErrors()) { - throw new DescriptionValidationException(bindingResult.errors) - } else { - atomicOperations.addAll(bindingResult.atomicOperations) - } + @GetMapping("/{cloudProvider}/task/{id}/owner") + TaskOwnerResult getOwnerName(@PathVariable("cloudProvider") String cloudProvider, + @PathVariable("id") String id) { + validateCloudProvider(cloudProvider, ImmutableList.of("kubernetes")) + + Task t = taskRepository.get(id) + if (!t) { + throw new NotFoundException("Task not found (id: ${id})") + } + return new TaskOwnerResult(t.getOwnerId().split("@")[1]) + } + + @PostMapping("/{cloudProvider}/task/{id}/restart") + StartOperationResult restartCloudProviderTask( + @PathVariable("cloudProvider") String cloudProvider, + @PathVariable("id") String id, + @RequestBody List> requestBody) { + validateCloudProvider(cloudProvider, ImmutableList.of("kubernetes")) + Task t = taskRepository.get(id) + if (t == null) { + throw new NotFoundException("Task not found (id: $id)") } - atomicOperations - } - - private ProviderVersion getOperationVersion(Map operation) { - def providerVersion = ProviderVersion.v1 - try { - String accountName = operation.credentials ?: operation.accountName ?: operation.account - if (accountName) { - def credentials = accountCredentialsRepository.getOne(accountName) - providerVersion = credentials.getProviderVersion() - } else { - log.warn "Unable to get account name from operation: $operation" + + if (!t.status.retryable) { + throw new ConstraintViolationException("Task id: $id is not retryable").with { + setRetryable(false) + it } - } catch (Exception e) { - log.warn "Unable to determine account version", e } + log.debug("restarting task: ${t.id}") + List atomicOperations = operationsService.collectAtomicOperations(cloudProvider, requestBody) + return start(atomicOperations, t.requestId) + } + + @GetMapping("/task/{id}") + Task get(@PathVariable("id") String id) { + Task t = taskRepository.get(id) + if (!t) { + throw new NotFoundException("Task not found (id: ${id})") + } + return t + } - return providerVersion - } - - private List convert(String cloudProvider, List> inputs) { - def username = AuthenticatedRequest.getSpinnakerUser().orElse("unknown") - def allowedAccounts = AuthenticatedRequest.getSpinnakerAccounts().orElse("").split(",") as List - - def descriptions = [] - inputs.collectMany { Map input -> - input.collect { String k, Map v -> - def providerVersion = getOperationVersion(v) - def converter = atomicOperationsRegistry.getAtomicOperationConverter(k, cloudProvider ?: v.cloudProvider, providerVersion) - - v = processDescriptionInput(atomicOperationDescriptionPreProcessors, converter, v) - def description = converter.convertDescription(v) - - descriptions << description - def errors = new DescriptionValidationErrors(description) - - def validator = atomicOperationsRegistry.getAtomicOperationDescriptionValidator( - DescriptionValidator.getValidatorName(k), cloudProvider ?: v.cloudProvider, providerVersion - ) - if (validator) { - validator.validate(descriptions, description, errors) - validator.authorize(description, errors) - } else { - def msg = "No validator found for operation `${description?.class?.simpleName}` and cloud provider $cloudProvider" - - switch(opsSecurityConfigProps.onMissingValidator) { - case SecurityConfig.SecurityAction.WARN: - log.warn(msg) - break - case SecurityConfig.SecurityAction.FAIL: - errors.reject(msg) - break - } - } - - allowedAccountValidators.each { - it.validate(username, allowedAccounts, description, errors) - } - - AtomicOperation atomicOperation = converter.convertOperation(v) - if (!atomicOperation) { - throw new AtomicOperationNotFoundException(k) - } - if (errors.hasErrors()) { - registry.counter("validationErrors", "operation", atomicOperation.class.simpleName).increment() - } - new AtomicOperationBindingResult(atomicOperation, errors) + @GetMapping("/task") + List list() { + taskRepository.list() + } + + /** + * Endpoint to allow Orca to resume Tasks, if they're backed by Sagas. + * + * @param id + */ + @PostMapping("/task/{id}:resume") + StartOperationResult resumeTask(@PathVariable("id") String id) { + Task t = taskRepository.get(id) + if (t == null) { + throw new NotFoundException("Task not found (id: $id)") + } + + if (!t.status.retryable) { + throw new ConstraintViolationException("Task is not retryable").with { + setRetryable(false) + it } } + + List atomicOperations = operationsService.collectAtomicOperationsFromSagas(t.getSagaIds()) + if (atomicOperations.isEmpty()) { + throw new NotFoundException("No saga was found for this task id: $id - can't resume") + } + + return start(null, atomicOperations, t.requestId) } - private Map start(List atomicOperations, String key) { - key = key ?: UUID.randomUUID().toString() - Task task = orchestrationProcessor.process(atomicOperations, key) - [id: task.id, resourceUri: "/task/${task.id}".toString()] + /** + * TODO(rz): Seems like a weird place to put this logic...? + */ + @PreDestroy + void destroy() { + log.info("Destroy has been triggered. Initiating graceful shutdown of tasks.") + long start = System.currentTimeMillis() + def tasks = taskRepository.listByThisInstance() + while (tasks && !tasks.isEmpty() && + (System.currentTimeMillis() - start) / TimeUnit.SECONDS.toMillis(1) < shutdownWaitSeconds) { + log.info("There are {} task(s) still running... sleeping before shutting down", tasks.size()) + sleep(1000) + tasks = taskRepository.listByThisInstance() + } + + if (tasks && !tasks.isEmpty()) { + log.error("Shutting down while tasks '{}' are still in progress!", tasks) + } + + log.info("Destruction procedure completed.") + } + + private StartOperationResult start(@Nullable String cloudProvider, + @Nonnull List atomicOperations, + @Nullable String id) { + Task task = + orchestrationProcessor.process( + cloudProvider, atomicOperations, Optional.ofNullable(id).orElse(UUID.randomUUID().toString())); + return new StartOperationResult(task.getId()); } - static Map processDescriptionInput(Collection descriptionPreProcessors, - AtomicOperationConverter converter, - Map descriptionInput) { - def descriptionClass = converter.metaClass.methods.find { it.name == "convertDescription" }.returnType - descriptionPreProcessors.findAll { it.supports(descriptionClass) }.each { - descriptionInput = it.process(descriptionInput) + static class StartOperationResult { + @JsonProperty + private final String id + + StartOperationResult(String id) { + this.id = id } - return descriptionInput + @JsonProperty + String getResourceUri() { + return format("/task/%s", id) + } } - @Canonical - static class AtomicOperationBindingResult { - AtomicOperation atomicOperations - Errors errors + static class TaskOwnerResult { + @JsonProperty + private final String name + + TaskOwnerResult(String name) { + this.name = name + } + } + + private void validateCloudProvider(String inputCloudProvider, List supportedCloudProviders) { + if (inputCloudProvider == null || !supportedCloudProviders.contains(inputCloudProvider)) { + throw new UnsupportedOperationException("updating Task (id: $id) information not supported via this " + + "endpoint for cloudprovider: ${inputCloudProvider}. Supported cloudproviders are: ${supportedCloudProviders}") + } } } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ProjectController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ProjectController.groovy index 0f8b229f52b..4031494d64a 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ProjectController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ProjectController.groovy @@ -16,232 +16,58 @@ package com.netflix.spinnaker.clouddriver.controllers -import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.configuration.ThreadPoolConfiguration -import com.netflix.spinnaker.clouddriver.core.services.Front50Service -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.ServerGroup +import com.netflix.spinnaker.cats.cache.Cache +import com.netflix.spinnaker.cats.cache.CacheData +import com.netflix.spinnaker.clouddriver.core.ProjectClustersService +import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace +import com.netflix.spinnaker.clouddriver.config.ProjectClustersCachingAgentProperties import com.netflix.spinnaker.kork.web.exceptions.NotFoundException import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired -import org.springframework.context.MessageSource -import org.springframework.context.i18n.LocaleContextHolder -import org.springframework.http.HttpStatus -import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor -import org.springframework.web.bind.annotation.ExceptionHandler import org.springframework.web.bind.annotation.PathVariable import org.springframework.web.bind.annotation.RequestMapping import org.springframework.web.bind.annotation.RequestMethod -import org.springframework.web.bind.annotation.ResponseStatus import org.springframework.web.bind.annotation.RestController -import rx.Observable -import rx.Scheduler -import rx.schedulers.Schedulers -import com.netflix.spinnaker.clouddriver.model.ServerGroup.InstanceCounts as InstanceCounts +import static com.netflix.spinnaker.clouddriver.core.ProjectClustersService.ClusterModel @Slf4j @RestController @RequestMapping("/projects/{project}") class ProjectController { - private final Scheduler queryClusterScheduler + Cache cacheView + ProjectClustersService projectClustersService + ProjectClustersCachingAgentProperties projectClustersCachingAgentProperties @Autowired - ProjectController(ThreadPoolConfiguration threadPoolConfiguration) { - this(Schedulers.from(newFixedThreadPool(threadPoolConfiguration.queryCluster))) + ProjectController(Cache cacheView, + ProjectClustersService projectClustersService, + ProjectClustersCachingAgentProperties projectClustersCachingAgentProperties) { + this.cacheView = cacheView + this.projectClustersService = projectClustersService + this.projectClustersCachingAgentProperties = projectClustersCachingAgentProperties } - ProjectController(Scheduler queryClusterScheduler) { - this.queryClusterScheduler = queryClusterScheduler - } - - @Autowired - Front50Service front50Service - - @Autowired - MessageSource messageSource - - @Autowired - List clusterProviders - @RequestMapping(method= RequestMethod.GET, value = "/clusters") List getClusters(@PathVariable String project) { - Map projectConfig = null - try { - projectConfig = front50Service.getProject(project) - } catch (e) { - log.error("Unable to fetch project (${project})", e) - throw new NotFoundException("Project not found (name: ${project})") - } - - if (projectConfig.config.clusters.size() == 0) { - return [] - } - - List applicationsToRetrieve = projectConfig.config.applications ?: [] - Map> allClusters = retrieveClusters(applicationsToRetrieve) - - projectConfig.config.clusters.findResults { Map projectCluster -> - List applications = projectCluster.applications ?: projectConfig.config.applications - def applicationModels = applications.findResults { String application -> - def appClusters = allClusters[application] - Set clusterMatches = findClustersForProject(appClusters, projectCluster) - new ApplicationClusterModel(application, clusterMatches) + if (projectClustersCachingAgentProperties.getNormalizedAllowList().contains(project.toLowerCase())) { + CacheData cacheData = cacheView.get(Namespace.PROJECT_CLUSTERS.ns, "v1") + if (cacheData == null) { + throw new NotFoundException("Projects not cached") } - new ClusterModel( - account: projectCluster.account, - stack: projectCluster.stack, - detail: projectCluster.detail, - applications: applicationModels - ) - } - } - - private static HashSet findClustersForProject(Set appClusters, Map projectCluster) { - if (!appClusters) { - return [] - } - appClusters.findAll { appCluster -> - Names clusterNameParts = Names.parseName(appCluster.name) - appCluster.accountName == projectCluster.account && - nameMatches("stack", clusterNameParts, projectCluster) && - nameMatches("detail", clusterNameParts, projectCluster) - } - } - private Map> retrieveClusters(List applications) { - Map> allClusters = [:] - def retrievedClusters = Observable.from(applications) - .flatMap { application -> - retrieveApplication(application).subscribeOn(queryClusterScheduler) - } - .observeOn(queryClusterScheduler).toList().toBlocking().single() - - retrievedClusters.each { - if (!allClusters.containsKey(it.application)) { - allClusters.put(it.application, new HashSet()) + Object clusters = cacheData.attributes.get(project) + if (clusters == null) { + throw new NotFoundException("Project not found (name: $project)") } - allClusters[it.application].addAll(it.clusters) - } - - allClusters - } - - private Observable retrieveApplication(String application) { - return Observable.from(clusterProviders).flatMap({ - Observable.from((it.getClusterDetails(application) ?: [:]).findResults { - new RetrievedClusters(application: application, clusters: it.value) - }) - }); - } - - static boolean nameMatches(String field, Names clusterName, Map projectCluster) { - return projectCluster[field] == clusterName[field] || projectCluster[field] == "*" || - (!projectCluster[field] && !clusterName[field]) - } - - - // Internal model - used to return all clusters for a given application - static class RetrievedClusters { - String application - Set clusters - } - - - // Represents all the data needed to render a specific project cluster view - static class ClusterModel { - String account - String stack - String detail - List applications = [] - InstanceCounts getInstanceCounts() { - List clusterCounts = applications.clusters.flatten().instanceCounts - new InstanceCounts( - total: (Integer) clusterCounts.total.sum(), - down: (Integer) clusterCounts.down.sum(), - outOfService: (Integer) clusterCounts.outOfService.sum(), - up: (Integer) clusterCounts.up.sum(), - unknown: (Integer) clusterCounts.unknown.sum(), - starting: (Integer) clusterCounts.starting.sum() - ) - } - } - - // Represents the cluster data for a particular application - static class ApplicationClusterModel { - String application - Set clusters = [] - Long getLastPush() { - clusters.lastPush.max() - } - - ApplicationClusterModel(String applicationName, Set appClusters) { - application = applicationName - Map regionClusters = [:] - appClusters.serverGroups.flatten().findAll { - !it.isDisabled() && it.instanceCounts.total > 0 - }.each { serverGroup -> - if (!regionClusters.containsKey(serverGroup.region)) { - regionClusters.put(serverGroup.region, new RegionClusterModel(region: serverGroup.region)) - } - RegionClusterModel regionCluster = regionClusters.get(serverGroup.region) - incrementInstanceCounts(serverGroup, regionCluster.instanceCounts) - def buildNumber = serverGroup.imageSummary?.buildInfo?.jenkins?.number ?: "0" - def host = serverGroup.imageSummary?.buildInfo?.jenkins?.host - def job = serverGroup.imageSummary?.buildInfo?.jenkins?.name - def existingBuild = regionCluster.builds.find { - it.buildNumber == buildNumber && it.host == host && it.job == job - } - if (!existingBuild) { - regionCluster.builds << new DeployedBuild( - host: host, - job: job, - buildNumber: buildNumber, - deployed: serverGroup.createdTime, - images: serverGroup.imageSummary?.buildInfo?.images) - } else { - existingBuild.deployed = Math.max(existingBuild.deployed, serverGroup.createdTime) - } + return clusters + } else { + List clusters = projectClustersService.getProjectClusters(project.toLowerCase()) + if (clusters == null) { + throw new NotFoundException("Project not found: (name: $project)") } - clusters = regionClusters.values() - } - } - - // Represents the cluster data for a particular application in a particular region - static class RegionClusterModel { - String region - List builds = [] - InstanceCounts instanceCounts = new InstanceCounts(total: 0, up: 0, down: 0, starting: 0, outOfService: 0, unknown: 0) - Long getLastPush() { - builds.deployed.max() + return clusters } } - - static class DeployedBuild { - String host - String job - String buildNumber - Long deployed - List images - } - - - private static void incrementInstanceCounts(ServerGroup source, InstanceCounts target) { - InstanceCounts sourceCounts = source.instanceCounts - target.total += sourceCounts.total - target.down += sourceCounts.down - target.up += sourceCounts.up - target.outOfService += sourceCounts.outOfService - target.starting += sourceCounts.starting - target.unknown += sourceCounts.unknown - } - - - private static ThreadPoolTaskExecutor newFixedThreadPool(int threadPoolSize) { - def executor = new ThreadPoolTaskExecutor(maxPoolSize: threadPoolSize, corePoolSize: threadPoolSize) - executor.afterPropertiesSet() - executor - } } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ReservationReportController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ReservationReportController.groovy index b5dec12b9b3..6e2522b87a4 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ReservationReportController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ReservationReportController.groovy @@ -33,7 +33,7 @@ class ReservationReportController { @RequestMapping(method = RequestMethod.GET) Collection getReservationReports(@RequestParam Map filters) { - return getReservationReportsByName("v1", filters) + return getReservationReportsByName("v3", filters) } @RequestMapping(method = RequestMethod.GET, value = "/{name}") diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/SearchController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/SearchController.groovy index e65a9a183f9..2a968773d66 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/SearchController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/SearchController.groovy @@ -17,8 +17,11 @@ package com.netflix.spinnaker.clouddriver.controllers import com.netflix.spinnaker.clouddriver.search.SearchProvider +import com.netflix.spinnaker.clouddriver.search.SearchQueryCommand import com.netflix.spinnaker.clouddriver.search.SearchResultSet -import org.apache.log4j.Logger +import com.netflix.spinnaker.clouddriver.search.executor.SearchExecutor +import org.slf4j.Logger +import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired import org.springframework.security.access.prepost.PreAuthorize import org.springframework.web.bind.annotation.RequestMapping @@ -31,11 +34,14 @@ import javax.servlet.http.HttpServletRequest @RestController class SearchController { - protected static final Logger log = Logger.getLogger(this) + protected static final Logger log = LoggerFactory.getLogger(getClass()) @Autowired List searchProviders + @Autowired(required = false) + SearchExecutor searchExecutor + /** * Simple search endpoint that delegates to {@link SearchProvider}s. * @param query the phrase to query @@ -69,14 +75,29 @@ class SearchController { searchProviders.findAll { it.platform == searchQuery.platform } : searchProviders - List results = searchAllProviders(providers, searchQuery) + List results = [] + if (searchExecutor) { + results = searchExecutor.searchAllProviders(providers, searchQuery) + } else { + results = searchAllProviders(providers, searchQuery) + } if (results.size() == 1) { results } else { int total = results.inject(0) { acc, item -> acc + item.totalMatches } - List> allResults = results.inject([]) { acc, item -> acc.addAll(item.results); acc } + List> allResults = results.inject([]) { acc, item -> + // if any of the search providers return items.results as null, then we see + // Ambiguous method overloading for method java.util.ArrayList#addAll. + // Cannot resolve which method to invoke for [null] due to overlapping prototypes between: + // [interface java.util.Collection] + // [interface java.lang.Iterable] + // [interface java.util.Iterator] + // Therefore, let's default to an empty [] in such cases. + acc.addAll(item.results?: []) + acc + } //TODO-cfieber: this is a temporary workaround to https://github.com/spinnaker/deck/issues/128 [new SearchResultSet( @@ -97,11 +118,12 @@ class SearchController { Map filters = searchQuery.filters.findAll { !provider.excludedFilters().contains(it.key) } + try { if (searchQuery.type && !searchQuery.type.isEmpty()) { - it.search(searchQuery.q, searchQuery.type, searchQuery.page, searchQuery.pageSize, filters) + provider.search(searchQuery.q, searchQuery.type, searchQuery.page, searchQuery.pageSize, filters) } else { - it.search(searchQuery.q, searchQuery.page, searchQuery.pageSize, filters) + provider.search(searchQuery.q, searchQuery.page, searchQuery.pageSize, filters) } } catch (Exception e) { log.error("Search for '${searchQuery.q}' in '${it.platform}' failed", e) @@ -111,38 +133,4 @@ class SearchController { results } - - static class SearchQueryCommand { - /** - * the phrase to query - */ - String q - - /** - * (optional) a filter, used to only return results of that type. If no value is supplied, all types will be returned - */ - List type - - /** - * a filter, used to only return results from providers whose platform value matches this - */ - String platform = '' - - /** - * the page number, starting with 1 - */ - Integer page = 1 - - /** - * the maximum number of results to return per page - */ - Integer pageSize = 10 - - /** - * (optional) a map of ad-hoc key-value pairs to further filter the keys, - * based on the map provided by {@link com.netflix.spinnaker.oort.aws.data.Keys#parse(java.lang.String)} - * potential matches must fully intersect the filter map entries - */ - Map filters - } } diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/SecurityGroupController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/SecurityGroupController.groovy index 56c58945433..6456c874400 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/SecurityGroupController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/SecurityGroupController.groovy @@ -154,18 +154,27 @@ class SecurityGroupController { } @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") - @RequestMapping(method = RequestMethod.GET, value = "/{account}/{cloudProvider}/{region}/{securityGroupName:.+}") - SecurityGroup get(@PathVariable String account, - @PathVariable String cloudProvider, - @PathVariable String region, - @PathVariable String securityGroupName, - @RequestParam(value = "vpcId", required = false) String vpcId) { + @RequestMapping(method = RequestMethod.GET, value = "/{account}/{cloudProvider}/{region}/{securityGroupNameOrId:.+}") + SecurityGroup get( + @PathVariable String account, + @PathVariable String cloudProvider, + @PathVariable String region, + @PathVariable String securityGroupNameOrId, + @RequestParam(value = "vpcId", required = false, defaultValue = "*") String vpcId, + @RequestParam(value = "getById", required = false, defaultValue = "false") boolean getById + ) { def securityGroup = securityGroupProviders.findResults { secGrpProv -> - secGrpProv.cloudProvider == cloudProvider ? secGrpProv.get(account, region, securityGroupName, vpcId) : null + if (secGrpProv.cloudProvider == cloudProvider) { + getById + ? secGrpProv.getById(account, region, securityGroupNameOrId, vpcId) + : secGrpProv.get(account, region, securityGroupNameOrId, vpcId) + } else { + null + } } if (securityGroup.size() != 1) { - throw new NotFoundException("Security group '${securityGroupName}' does not exist") + throw new NotFoundException("Security group '${securityGroupNameOrId}' does not exist") } return securityGroup.first() diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ServerGroupController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ServerGroupController.groovy index 8e8f11ccc3d..3ab84f403b6 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ServerGroupController.groovy +++ b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ServerGroupController.groovy @@ -16,35 +16,36 @@ package com.netflix.spinnaker.clouddriver.controllers +import com.fasterxml.jackson.annotation.JsonAnyGetter +import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.frigga.Names -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.Instance -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.model.ServerGroupManager +import com.netflix.spinnaker.clouddriver.model.* +import com.netflix.spinnaker.clouddriver.model.view.ClusterViewModelPostProcessor import com.netflix.spinnaker.clouddriver.model.view.ServerGroupViewModelPostProcessor import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue import com.netflix.spinnaker.kork.web.exceptions.NotFoundException import com.netflix.spinnaker.moniker.Moniker +import groovy.transform.Canonical import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired -import org.springframework.context.MessageSource import org.springframework.security.access.prepost.PostAuthorize import org.springframework.security.access.prepost.PostFilter import org.springframework.security.access.prepost.PreAuthorize -import org.springframework.web.bind.annotation.PathVariable -import org.springframework.web.bind.annotation.RequestMapping -import org.springframework.web.bind.annotation.RequestMethod -import org.springframework.web.bind.annotation.RequestParam -import org.springframework.web.bind.annotation.RestController +import org.springframework.web.bind.annotation.* + +import java.util.stream.Collectors +import java.util.stream.Stream + +import static com.netflix.spinnaker.clouddriver.model.view.ModelObjectViewModelPostProcessor.applyExtensions +import static com.netflix.spinnaker.clouddriver.model.view.ModelObjectViewModelPostProcessor.applyExtensionsToObject @Slf4j @RestController class ServerGroupController { - private static final String INSTANCE_LOAD_BALANCER_HEALTH_TYPE = 'LoadBalancer' - private static final String INSTANCE_TARGET_GROUP_HEALTH_TYPE = 'TargetGroup' + private static final String INSTANCE_LOAD_BALANCER_HEALTH_TYPE = "LoadBalancer" + private static final String INSTANCE_TARGET_GROUP_HEALTH_TYPE = "TargetGroup" @Autowired List clusterProviders @@ -53,13 +54,13 @@ class ServerGroupController { ObjectMapper objectMapper @Autowired - MessageSource messageSource + RequestQueue requestQueue @Autowired - RequestQueue requestQueue + Optional> clusterViewModelPostProcessors = Optional.empty() - @Autowired(required = false) - ServerGroupViewModelPostProcessor serverGroupViewModelPostProcessor + @Autowired + Optional> serverGroupViewModelPostProcessors = Optional.empty() @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") @PostAuthorize("hasPermission(returnObject?.moniker?.app, 'APPLICATION', 'READ')") @@ -68,9 +69,9 @@ class ServerGroupController { @PathVariable String account, @PathVariable String region, @PathVariable() String name, - @RequestParam(required = false, value = 'includeDetails', defaultValue = 'true') String includeDetails + @RequestParam(required = false, value = "includeDetails", defaultValue = "true") String includeDetails ) { - getServerGroup(account, region, name, includeDetails) + return getServerGroup(account, region, name, includeDetails) } @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") @@ -80,88 +81,125 @@ class ServerGroupController { ServerGroup getServerGroupByMoniker(@PathVariable String account, @PathVariable String region, @PathVariable String name, - @RequestParam(required = false, value = 'includeDetails', defaultValue = 'true') String includeDetails) { - getServerGroup(account, region, name, includeDetails) + @RequestParam(required = false, value = "includeDetails", defaultValue = "true") String includeDetails) { + return getServerGroup(account, region, name, includeDetails) } - private getServerGroup(String account, - String region, - String name, - String includeDetails) { + private ServerGroup getServerGroup(String account, + String region, + String name, + String includeDetails) { Boolean shouldIncludeDetails = Boolean.valueOf(includeDetails) - def matches = (Set) clusterProviders.findResults { provider -> - requestQueue.execute(name, { provider.getServerGroup(account, region, name, shouldIncludeDetails) }) - } - if (!matches) { - throw new NotFoundException("Server group not found (account: ${account}, region: ${region}, name: ${name})") - } - ServerGroup serverGroup = matches.first() - if (serverGroupViewModelPostProcessor?.supports(serverGroup)) { - serverGroupViewModelPostProcessor.process(serverGroup) - } - serverGroup + ServerGroup serverGroup = clusterProviders.stream() + .map({ provider -> + requestQueue.execute(name, { -> provider.getServerGroup(account, region, name, shouldIncludeDetails) }) + }) + .filter({ Objects.nonNull(it) }) + .findFirst() + .orElseThrow({ + new NotFoundException(String.format("Server group not found (account: %s, region: %s, name: %s)", account, region, name)) + }) + + return applyExtensionsToObject(serverGroupViewModelPostProcessors, serverGroup) } - List expandedList(String application, String cloudProvider) { - return clusterProviders - .findAll { cloudProvider ? cloudProvider.equalsIgnoreCase(it.cloudProviderId) : true } - .findResults { ClusterProvider cp -> - requestQueue.execute(application, { - cp.getClusterDetails(application)?.values() + private List> expandedList(String application, String cloudProvider) { + return clusterProviders.stream() + .filter({ + cloudProvider != null + ? cloudProvider.equalsIgnoreCase(it.getCloudProviderId()) + : true }) - } - .collectNested { Cluster c -> - c.serverGroups?.collect { - expanded(it, c) - } ?: [] - }.flatten() + .flatMap({ ClusterProvider cp -> + def details = requestQueue.execute(application, { cp.getClusterDetails(application) }) + + Optional.ofNullable(details) + .map({ + it.values().stream() + .filter({ Objects.nonNull(it) }) + .flatMap({ it.stream() }) + .filter({ Objects.nonNull(it) }) + .map( { cluster -> + applyExtensionsToObject(clusterViewModelPostProcessors, cluster) + }) + }) + .orElse(Stream.empty()) + }) + .flatMap({ Cluster c -> + Optional.ofNullable(c.getServerGroups()) + .map({ groups -> + groups.stream() + .map({ serverGroup -> + applyExtensionsToObject(serverGroupViewModelPostProcessors, serverGroup) + }) + .map({ serverGroup -> + expanded(serverGroup, c) + }) + }) + .orElse(Stream.empty()) + }) + .collect(Collectors.toList()) } - Map expanded(ServerGroup serverGroup, Cluster cluster) { - Map sg = objectMapper.convertValue(serverGroup, Map) - sg.accountName = cluster.accountName - def name = Names.parseName(cluster.name) - sg.cluster = name.cluster - sg.application = name.app - sg.stack = name.stack - sg.freeFormDetail = name.detail + private Map expanded(ServerGroup serverGroup, Cluster cluster) { + Map sg = objectMapper.convertValue(serverGroup, Map) + sg.put("accountName", cluster.getAccountName()) + Moniker moniker = cluster.getMoniker() + sg.put("cluster", moniker.getCluster()) + sg.put("application", moniker.getApp()) + sg.put("stack", moniker.getStack()) + sg.put("freeFormDetail", moniker.getDetail()) + sg.put("account", cluster.getAccountName()) return sg } - List summaryList(String application, String cloudProvider) { + private List summaryList(String application, String cloudProvider) { - List serverGroupViews = [] + List serverGroupViews = clusterProviders.stream() + .filter({ + cloudProvider != null + ? cloudProvider.equalsIgnoreCase(it.getCloudProviderId()) + : true + }) + .flatMap({ provider -> + Map> clusterMap = requestQueue.execute(application, { + provider.getClusterDetails(application) + }) + + return Optional.ofNullable(clusterMap) + .map({ it.values() }) + .map({ it.stream().flatMap({ it.stream() }) }) + .orElse(Stream.empty()) + }) + .flatMap({ Cluster cluster -> + cluster.getServerGroups().stream() + .map({ serverGroup -> + new ServerGroupViewModel(applyExtensionsToObject(serverGroupViewModelPostProcessors, serverGroup), cluster.name, cluster.accountName) + }) - def clusters = (Set) clusterProviders - .findAll { cloudProvider ? cloudProvider.equalsIgnoreCase(it.cloudProviderId) : true } - .findResults { provider -> - requestQueue.execute(application, { provider.getClusterDetails(application)?.values() }) - }.flatten() - clusters.each { Cluster cluster -> - cluster.serverGroups.each { ServerGroup serverGroup -> - serverGroupViews << new ServerGroupViewModel(serverGroup, cluster.name, cluster.accountName) - } - } + }) + .collect(Collectors.toList()) - serverGroupViews + return serverGroupViews } @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ')") @PostAuthorize("@authorizationSupport.filterForAccounts(returnObject)") @RequestMapping(value = "/applications/{application}/serverGroups", method = RequestMethod.GET) - List list(@PathVariable String application, - @RequestParam(required = false, value = 'expand', defaultValue = 'false') String expand, - @RequestParam(required = false, value = 'cloudProvider') String cloudProvider, - @RequestParam(required = false, value = 'clusters') Collection clusters) { + List list(@PathVariable String application, + @RequestParam(required = false, value = "expand", defaultValue = "false") String expand, + @RequestParam(required = false, value = "cloudProvider") String cloudProvider, + @RequestParam(required = false, value = "clusters") List clusters) { - Boolean isExpanded = Boolean.valueOf(expand) - if (clusters) { + boolean isExpanded = Boolean.valueOf(expand) + + if (clusters != null && !clusters.isEmpty()) { return buildSubsetForClusters(clusters, application, isExpanded) } - if (clusters?.empty) { - return [] + if (clusters != null) { + return List.of() } if (isExpanded) { return expandedList(application, cloudProvider) @@ -172,14 +210,18 @@ class ServerGroupController { @PostFilter("hasPermission(filterObject?.application, 'APPLICATION', 'READ')") @PostAuthorize("@authorizationSupport.filterForAccounts(returnObject)") @RequestMapping(value = "/serverGroups", method = RequestMethod.GET) - List getServerGroups(@RequestParam(required = false, value = 'applications') List applications, - @RequestParam(required = false, value = 'ids') List ids, - @RequestParam(required = false, value = 'cloudProvider') String cloudProvider) { - if ((applications && ids) || (!applications && !ids)) { - throw new IllegalArgumentException("Provide either 'applications' or 'ids' parameter (but not both)"); + List getServerGroups( + @RequestParam(required = false, value = "applications") List applications, + @RequestParam(required = false, value = "ids") List ids, + @RequestParam(required = false, value = "cloudProvider") String cloudProvider) { + + boolean hasApplications = applications != null && !applications.isEmpty() + boolean hasIds = ids != null && !ids.isEmpty() + if ((hasApplications && hasIds) || (!hasApplications && !hasIds)) { + throw new IllegalArgumentException("Provide either 'applications' or 'ids' parameter (but not both)") } - if (applications) { + if (hasApplications) { return getServerGroupsForApplications(applications, cloudProvider) } else { return getServerGroupsForIds(ids) @@ -187,45 +229,76 @@ class ServerGroupController { } private List getServerGroupsForApplications(List applications, String cloudProvider) { - return applications.collectMany { summaryList(it, cloudProvider) } + return applications.stream() + .flatMap({ it -> summaryList(it, cloudProvider).stream() }) + .collect(Collectors.toList()) } private List getServerGroupsForIds(List serverGroupIds) { - List allIdTokens = serverGroupIds.collect { it.split(':') } + List allIdTokens = serverGroupIds.stream() + .map({ it.split(":") }) + .collect(Collectors.toList()) + + String invalidIds = allIdTokens.stream() + .filter({ it.length != 3 }) + .map({ String.join(":", it) }) + .collect(Collectors.joining(", ")) - def invalidIds = allIdTokens.findAll { it.size() != 3 } - if (invalidIds) { + if (!invalidIds.isBlank()) { throw new IllegalArgumentException("Expected ids in the format :: but got invalid ids: " + - invalidIds.collect { it.join(':') }.join(', ')) + invalidIds) } - allIdTokens.collect { String[] idTokens -> - def (String account, String region, String name) = idTokens - try { - def serverGroup = getServerGroup(account, region, name, "true") - return new ServerGroupViewModel(serverGroup, serverGroup.moniker.cluster, account) - } catch (e) { - log.error("Couldn't get server group ${idTokens.join(':')}", e) - return null - } - }.findAll(); + allIdTokens.stream() + .map({ idTokens -> + String account = idTokens[0] + String region = idTokens[1] + String name = idTokens[2] + try { + ServerGroup serverGroup = getServerGroup(account, region, name, "true") + return new ServerGroupViewModel(serverGroup, serverGroup.getMoniker().getCluster(), account) + } catch (e) { + log.error("Couldn't get server group {}:{}:{}", account, region, name, e) + return null + } + }) + .filter({ Objects.nonNull(it) }) + .collect(Collectors.toList()) } - private Collection buildSubsetForClusters(Collection clusters, String application, Boolean isExpanded) { - Collection matches = clusters.findResults { accountAndName -> - def (account, clusterName) = accountAndName.split(':') - if (account && clusterName) { - return clusterProviders.findResults { clusterProvider -> - requestQueue.execute(application, { clusterProvider.getCluster(application, account, clusterName) }) + private List buildSubsetForClusters(List clusters, String application, boolean isExpanded) { + List matches = clusters.stream() + .flatMap({ accountAndName -> + String[] components = accountAndName.split(":") + if (components.length == 2) { + String account = components[0] + String clusterName = components[1] + if (!account.isEmpty() && !clusterName.isEmpty()) { + return clusterProviders.stream() + .map({ clusterProvider -> + Cluster cluster = requestQueue.execute(application, { + clusterProvider.getCluster(application, account, clusterName) + }) + return applyExtensionsToObject(clusterViewModelPostProcessors, cluster) + }) + } } - } - return null - }.flatten() - return matches.findResults { cluster -> - cluster.serverGroups.collect { - isExpanded ? expanded(it, cluster) : new ServerGroupViewModel(it, cluster.name, cluster.accountName) - } - }.flatten() + return null + }) + .filter({ it != null }) + .collect(Collectors.toList()) + + return matches.stream() + .flatMap({ cluster -> + cluster.getServerGroups().stream() + .map({ + ServerGroup sg = applyExtensionsToObject(serverGroupViewModelPostProcessors, it) + isExpanded + ? expanded(sg, cluster) + : new ServerGroupViewModel(sg, cluster.name, cluster.accountName) + }) + }) + .collect(Collectors.toList()) } static class ServerGroupViewModel { @@ -251,34 +324,57 @@ class ServerGroupController { Set securityGroups ServerGroup.InstanceCounts instanceCounts Map tags + Map labels Map providerMetadata List serverGroupManagers + @JsonIgnore + Map extraAttributes = new HashMap<>() + + @JsonAnyGetter + Map getExtraAttributes() { + return extraAttributes + } + ServerGroupViewModel(ServerGroup serverGroup, String clusterName, String accountName) { + def instanceViews = Optional.ofNullable(serverGroup.getInstances()) + .map({ instances -> + instances.stream() + .filter({ it != null }) + .map({ new InstanceViewModel(it) }) + .collect(Collectors.toList()) + }) + .orElse(List.of()) + + cluster = clusterName - type = serverGroup.type - cloudProvider = serverGroup.cloudProvider - name = serverGroup.name - application = Names.parseName(serverGroup.name).getApp() + type = serverGroup.getType() + cloudProvider = serverGroup.getCloudProvider() + name = serverGroup.getName() + application = Names.parseName(serverGroup.getName()).getApp() account = accountName - region = serverGroup.region + region = serverGroup.getRegion() createdTime = serverGroup.getCreatedTime() isDisabled = serverGroup.isDisabled() - instances = serverGroup.getInstances()?.findResults { it ? new InstanceViewModel(it) : null } ?: [] + instances = instanceViews instanceCounts = serverGroup.getInstanceCounts() securityGroups = serverGroup.getSecurityGroups() loadBalancers = serverGroup.getLoadBalancers() serverGroupManagers = serverGroup.getServerGroupManagers() + instanceType = serverGroup.getInstanceType() moniker = serverGroup.getMoniker() - if (serverGroup.launchConfig) { - if (serverGroup.launchConfig.instanceType) { - instanceType = serverGroup.launchConfig.instanceType - } + + def tags = serverGroup.getTags() + if (tags != null && !tags.isEmpty()) { + this.tags = tags } - if (serverGroup.tags) { - tags = serverGroup.tags + + def labels = serverGroup.getLabels() + if (labels != null && !labels.isEmpty()) { + this.labels = labels } + // TODO: deal with duck typing if (serverGroup.hasProperty("buildInfo")) { buildInfo = serverGroup.buildInfo } @@ -293,13 +389,15 @@ class ServerGroupController { } capacity = serverGroup.getCapacity() + + Optional.ofNullable(serverGroup.extraAttributes).ifPresent { extraAttributes.putAll(it) } } } static class InstanceViewModel { String id String name - List> health + List health String healthState Long launchTime String availabilityZone @@ -311,27 +409,79 @@ class ServerGroupController { launchTime = instance.getLaunchTime() availabilityZone = instance.getZone() health = instance.health.collect { health -> - Map healthMetric = [type: health.type] - if (health.containsKey("state")) { - healthMetric.state = health.state.toString() - } - if (health.containsKey("status")) { - healthMetric.status = health.status - } - if (health.type == INSTANCE_LOAD_BALANCER_HEALTH_TYPE && health.containsKey("loadBalancers")) { - healthMetric.loadBalancers = health.loadBalancers.collect { - [name: it.loadBalancerName, state: it.state, description: it.description, healthState: it.healthState] + + String type = (String) health.get("type") + + String healthState = Optional.ofNullable(health.get("state")) + .map({ it.toString() }) + .orElse(null) + + Object status = health.get("status") + if (type == INSTANCE_LOAD_BALANCER_HEALTH_TYPE && health.containsKey("loadBalancers")) { + List loadBalancers + + Object lbs = health.get("loadBalancers") + if (lbs instanceof Collection) { + loadBalancers = lbs.stream() + .map({ + // TODO: deal with duck typing + new HealthDetail(it.loadBalancerName, it.state, it.description, it.healthState) + }) + .collect(Collectors.toList()) } - } - if (health.type == INSTANCE_TARGET_GROUP_HEALTH_TYPE && health.containsKey("targetGroups")) { - healthMetric.targetGroups = health.targetGroups.collect { - [name: it.targetGroupName, state: it.state, description: it.description, healthState: it.healthState] + + def metric = new LoadBalancerHealth() + metric.setType(type) + metric.setState(healthState) + metric.setStatus(status) + metric.setLoadBalancers(loadBalancers) + return metric + } else if (type == INSTANCE_TARGET_GROUP_HEALTH_TYPE && health.containsKey("targetGroups")) { + List targetGroups + + Object tgs = health.get("targetGroups") + if (tgs instanceof Collection) { + targetGroups = tgs.stream() + .map({ + // TODO: deal with duck typing + new HealthDetail(it.targetGroupName, it.state, it.description, it.healthState) + }) + .collect(Collectors.toList()) } + + def metric = new TargetGroupHealth() + metric.setType(type) + metric.setState(healthState) + metric.setStatus(status) + metric.setTargetGroups(targetGroups) + return metric + } else { + return new Health(type, healthState, status) } - healthMetric } } } -} + @Canonical + static class Health { + String type + String state + Object status + } + + static class LoadBalancerHealth extends Health { + List loadBalancers + } + static class TargetGroupHealth extends Health { + List targetGroups + } + + @Canonical + static class HealthDetail { + Object name + Object state + Object description + Object healthState + } +} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/TaskController.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/TaskController.groovy deleted file mode 100644 index 1c9b30a3a0a..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/TaskController.groovy +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers - -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.data.task.TaskRepository -import com.netflix.spinnaker.kork.web.exceptions.NotFoundException -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.beans.factory.annotation.Value -import org.springframework.web.bind.annotation.PathVariable -import org.springframework.web.bind.annotation.RequestMapping -import org.springframework.web.bind.annotation.RequestMethod -import org.springframework.web.bind.annotation.RestController - -import javax.annotation.PreDestroy -import java.util.concurrent.TimeUnit - -@RequestMapping("/task") -@RestController -@Slf4j -class TaskController { - @Autowired - TaskRepository taskRepository - - @Value('${admin.tasks.shutdownWaitSeconds:-1}') - Long shutdownWaitSeconds - - @RequestMapping(value = "/{id}", method = RequestMethod.GET) - Task get(@PathVariable("id") String id) { - Task t = taskRepository.get(id) - if (!t) { - throw new NotFoundException("Task not found (id: ${id})") - } - return t - } - - @RequestMapping(method = RequestMethod.GET) - List list() { - taskRepository.list() - } - - @PreDestroy - public void destroy() { - long start = System.currentTimeMillis() - def tasks = taskRepository.listByThisInstance() - while (tasks && !tasks.isEmpty() && - (System.currentTimeMillis() - start) / TimeUnit.SECONDS.toMillis(1) < shutdownWaitSeconds) { - log.info("There are {} task(s) still running... sleeping before shutting down", tasks.size()) - sleep(1000) - tasks = taskRepository.listByThisInstance() - } - - if (tasks && !tasks.isEmpty()) { - log.error("Shutting down while tasks '{}' are still in progress!", tasks) - } - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/admin/EntityTagsAdminController.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/admin/EntityTagsAdminController.java deleted file mode 100644 index be36bf8c55a..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/admin/EntityTagsAdminController.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers.admin; - -import com.netflix.spinnaker.clouddriver.model.EntityTagsProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RequestMethod; -import org.springframework.web.bind.annotation.RequestParam; -import org.springframework.web.bind.annotation.RestController; - -import java.util.Map; -import java.util.Optional; - -@RestController -@RequestMapping("/admin/tags") -public class EntityTagsAdminController { - private final Logger log = LoggerFactory.getLogger(getClass()); - - private final EntityTagsProvider entityTagsProvider; - - @Autowired - public EntityTagsAdminController(Optional entityTagsProvider) { - this.entityTagsProvider = entityTagsProvider.orElse(null); - } - - @RequestMapping(value = "/reindex", method = RequestMethod.POST) - void reindex() { - entityTagsProvider.reindex(); - } - - @RequestMapping(value = "/delta", method = RequestMethod.GET) - Map delta() { - return entityTagsProvider.delta(); - } - - @RequestMapping(value = "/reconcile", method = RequestMethod.POST) - Map reconcile(@RequestParam(name = "dryRun", defaultValue = "true") Boolean dryRun, - @RequestParam(name = "cloudProvider") String cloudProvider, - @RequestParam(name = "account", required = false) String account, - @RequestParam(name = "region", required = false) String region) { - return entityTagsProvider.reconcile(cloudProvider, account, region, Optional.ofNullable(dryRun).orElse(true)); - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/events/ConfigRefreshedEvent.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/events/ConfigRefreshedEvent.groovy deleted file mode 100644 index b818fbb9b2e..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/events/ConfigRefreshedEvent.groovy +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.events - -import org.springframework.context.ApplicationEvent - -class ConfigRefreshedEvent extends ApplicationEvent { - public ConfigRefreshedEvent(Object source) { - super(source) - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/listeners/CredentialsRefreshListener.groovy b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/listeners/CredentialsRefreshListener.groovy deleted file mode 100644 index 2863728ad5b..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/listeners/CredentialsRefreshListener.groovy +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015 Google, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.listeners - -import com.netflix.spinnaker.clouddriver.events.ConfigRefreshedEvent -import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.context.ApplicationContext -import org.springframework.context.ApplicationListener -import org.springframework.stereotype.Component - -@Component -public class CredentialsRefreshListener implements ApplicationListener { - - @Autowired - ApplicationContext appContext - - @Autowired - List credentialsInitializers - - @Override - void onApplicationEvent(ConfigRefreshedEvent event) { - for (CredentialsInitializerSynchronizable credentialsInitializer : credentialsInitializers) { - if (credentialsInitializer.credentialsSynchronizationBeanName) { - appContext.getBean(credentialsInitializer.credentialsSynchronizationBeanName) - } - } - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueue.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueue.java deleted file mode 100644 index c75ef552a9b..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/RequestQueue.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.requestqueue; - -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.requestqueue.pooled.PooledRequestQueue; - -import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; - -/** - * RequestQueue. - */ -public interface RequestQueue { - - long DEFAULT_TIMEOUT_MILLIS = 60000; - long DEFAULT_START_WORK_TIMEOUT_MILLIS = 10000; - - static RequestQueue forConfig(Registry registry, RequestQueueConfiguration config) { - if (!config.isEnabled()) { - return noop(); - } - - return pooled(registry, config.getStartWorkTimeoutMillis(), config.getTimeoutMillis(), config.getPoolSize()); - } - - static RequestQueue noop() { - return new NOOP(); - } - - static RequestQueue pooled(Registry registry, int poolSize) { - return pooled(registry, DEFAULT_START_WORK_TIMEOUT_MILLIS, DEFAULT_TIMEOUT_MILLIS, poolSize); - } - - static RequestQueue pooled(Registry registry, long startWorkTimeoutMillis, long timeoutMillis, int poolSize) { - return new PooledRequestQueue(registry, startWorkTimeoutMillis, timeoutMillis, poolSize); - } - - default long getDefaultTimeoutMillis() { - return DEFAULT_TIMEOUT_MILLIS; - } - - default long getDefaultStartWorkTimeoutMillis() { - return DEFAULT_START_WORK_TIMEOUT_MILLIS; - } - - default T execute(String partition, Callable operation) throws Throwable { - return execute(partition, operation, getDefaultStartWorkTimeoutMillis(), getDefaultTimeoutMillis(), TimeUnit.MILLISECONDS); - } - - T execute(String partition, Callable operation, long startWorkTimeout, long timeout, TimeUnit unit) throws Throwable; - - class NOOP implements RequestQueue { - @Override - public T execute(String partition, Callable operation, long startWorkTimeout, long timeout, TimeUnit unit) throws Throwable { - return operation.call(); - } - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequest.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequest.java deleted file mode 100644 index 1b4f3b10953..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequest.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.requestqueue.pooled; - -import com.netflix.spectator.api.Registry; -import com.netflix.spectator.api.Timer; - -import java.util.concurrent.Callable; -import java.util.concurrent.CancellationException; -import java.util.concurrent.TimeUnit; - -class PooledRequest implements Runnable { - private final Timer timer; - private final Promise result; - private final Callable work; - private final long startTime = System.nanoTime(); - - PooledRequest(Registry registry, String partition, Callable work) { - this.timer = registry.timer(registry.createId("pooledRequestQueue.enqueueTime", "partition", partition)); - this.result = new Promise<>(registry, partition); - this.work = work; - } - - Promise getPromise() { - return result; - } - - void cancel() { - result.completeWithException(new CancellationException()); - } - - @Override - public void run() { - timer.record(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); - //request may have expired with a timeout prior to this point, lets not - // issue the work if that is the case as the caller has already moved on - if (result.shouldStart()) { - try { - result.complete(work.call()); - } catch (Throwable t) { - result.completeWithException(t); - } - } - } -} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueue.java b/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueue.java deleted file mode 100644 index 0f278b2f053..00000000000 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueue.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.requestqueue.pooled; - -import com.netflix.spectator.api.Id; -import com.netflix.spectator.api.NoopRegistry; -import com.netflix.spectator.api.Registry; -import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue; - -import javax.annotation.PreDestroy; -import java.util.Collection; -import java.util.Queue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -public class PooledRequestQueue implements RequestQueue { - private final ConcurrentMap>> partitionedRequests = new ConcurrentHashMap<>(); - private final PollCoordinator pollCoordinator = new PollCoordinator(); - - private final long defaultStartWorkTimeout; - private final long defaultTimeout; - private final ExecutorService executorService; - private final BlockingQueue submittedRequests; - private final Collection>> requestQueues; - private final RequestDistributor requestDistributor; - private final Registry registry; - - public PooledRequestQueue(Registry registry, long defaultStartWorkTimeout, long defaultTimeout, int requestPoolSize) { - - if (defaultStartWorkTimeout <= 0) { - throw new IllegalArgumentException("defaultStartWorkTimeout"); - } - - if (defaultTimeout <= 0) { - throw new IllegalArgumentException("defaultTimeout"); - } - - if (requestPoolSize < 1) { - throw new IllegalArgumentException("requestPoolSize"); - } - this.registry = registry; - this.defaultStartWorkTimeout = defaultStartWorkTimeout; - this.defaultTimeout = defaultTimeout; - this.submittedRequests = new LinkedBlockingQueue<>(); - registry.gauge("pooledRequestQueue.executorQueue.size", submittedRequests, Queue::size); - final int actualThreads = requestPoolSize + 1; - this.executorService = new ThreadPoolExecutor(actualThreads, actualThreads, 0, TimeUnit.MILLISECONDS, submittedRequests); - this.requestQueues = new CopyOnWriteArrayList<>(); - this.requestDistributor = new RequestDistributor(registry, pollCoordinator, executorService, requestQueues); - executorService.submit(requestDistributor); - } - - @PreDestroy - public void shutdown() { - requestDistributor.shutdown(); - executorService.shutdown(); - PooledRequest req; - while ((req = (PooledRequest) submittedRequests.poll()) != null) { - req.cancel(); - } - } - - @Override - public long getDefaultTimeoutMillis() { - return defaultTimeout; - } - - @Override - public long getDefaultStartWorkTimeoutMillis() { - return defaultStartWorkTimeout; - } - - @Override - public T execute(String partition, Callable operation, long startWorkTimeout, long timeout, TimeUnit unit) throws Throwable { - final long startTime = System.nanoTime(); - final Queue> queue; - if (!partitionedRequests.containsKey(partition)) { - Queue> newQueue = new LinkedBlockingQueue<>(); - Queue> existing = partitionedRequests.putIfAbsent(partition, newQueue); - if (existing == null) { - requestQueues.add(newQueue); - queue = newQueue; - registry.gauge(registry.createId("pooledRequestQueue.partition.size", "partition", partition), queue, Queue::size); - } else { - queue = existing; - } - } else { - queue = partitionedRequests.get(partition); - } - - final PooledRequest request = new PooledRequest<>(registry, partition, operation); - - queue.offer(request); - pollCoordinator.notifyItemsAdded(); - - Id id = registry.createId("pooledRequestQueue.totalTime", "partition", partition); - try { - T result = request.getPromise().blockingGetOrThrow(startWorkTimeout, timeout, unit); - id = id.withTag("success", "true"); - return result; - } catch (Throwable t) { - id = id.withTags("success", "false", "cause", t.getClass().getSimpleName()); - throw t; - } finally { - registry.timer(id).record(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); - } - } -} diff --git a/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ArtifactController.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ArtifactController.java new file mode 100644 index 00000000000..249ebf29633 --- /dev/null +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ArtifactController.java @@ -0,0 +1,121 @@ +/* + * Copyright 2017 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.spinnaker.clouddriver.controllers; + +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactDownloader; +import com.netflix.spinnaker.clouddriver.artifacts.config.ArtifactCredentials; +import com.netflix.spinnaker.kork.artifacts.artifactstore.ArtifactStore; +import com.netflix.spinnaker.kork.artifacts.artifactstore.ArtifactStoreURIBuilder; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.kork.exceptions.MissingCredentialsException; +import java.io.InputStream; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.IOUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.HttpStatus; +import org.springframework.web.bind.annotation.ExceptionHandler; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.ResponseStatus; +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.servlet.mvc.method.annotation.StreamingResponseBody; + +@Slf4j +@RestController +@RequestMapping("/artifacts") +public class ArtifactController { + private ArtifactCredentialsRepository artifactCredentialsRepository; + private ArtifactDownloader artifactDownloader; + private final ArtifactStore storage; + private final ArtifactStoreURIBuilder artifactStoreURIBuilder; + + @Autowired + public ArtifactController( + Optional artifactCredentialsRepository, + Optional artifactDownloader, + Optional storage, + Optional artifactStoreURIBuilder) { + this.artifactCredentialsRepository = artifactCredentialsRepository.orElse(null); + this.artifactDownloader = artifactDownloader.orElse(null); + this.storage = storage.orElse(null); + this.artifactStoreURIBuilder = artifactStoreURIBuilder.orElse(null); + } + + @RequestMapping(method = RequestMethod.GET, value = "/credentials") + List list() { + if (artifactCredentialsRepository == null) { + return Collections.emptyList(); + } else { + return artifactCredentialsRepository.getAllCredentials(); + } + } + + // PUT because we need to send a body, which GET does not allow for spring/retrofit + @RequestMapping(method = RequestMethod.PUT, value = "/fetch") + StreamingResponseBody fetch(@RequestBody Artifact artifact) { + if (artifactDownloader == null) { + throw new IllegalStateException( + "Artifacts have not been enabled. Enable them using 'artifacts.enabled' in clouddriver"); + } + + return outputStream -> { + try (InputStream artifactStream = artifactDownloader.download(artifact)) { + IOUtils.copy(artifactStream, outputStream); + } + }; + } + + @RequestMapping(method = RequestMethod.GET, value = "/content-address/{application}/{hash}") + Artifact.StoredView getStoredArtifact( + @PathVariable(value = "application") String application, + @PathVariable(value = "hash") String hash) { + Artifact artifact = storage.get(artifactStoreURIBuilder.buildURIFromPaths(application, hash)); + Artifact.StoredView view = new Artifact.StoredView(artifact.getReference()); + return view; + } + + @RequestMapping(method = RequestMethod.GET, value = "/account/{accountName}/names") + List getNames( + @PathVariable("accountName") String accountName, + @RequestParam(value = "type") String artifactType) { + ArtifactCredentials credentials = + artifactCredentialsRepository.getCredentialsForType(accountName, artifactType); + return credentials.getArtifactNames(); + } + + @RequestMapping(method = RequestMethod.GET, value = "/account/{accountName}/versions") + List getVersions( + @PathVariable("accountName") String accountName, + @RequestParam(value = "type") String artifactType, + @RequestParam(value = "artifactName") String artifactName) { + ArtifactCredentials credentials = + artifactCredentialsRepository.getCredentialsForType(accountName, artifactType); + return credentials.getArtifactVersions(artifactName); + } + + @ExceptionHandler(MissingCredentialsException.class) + @ResponseStatus(HttpStatus.NOT_FOUND) + public void handleMissingCredentials() {} +} diff --git a/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/CredentialsController.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/CredentialsController.java new file mode 100644 index 00000000000..5e72a46d937 --- /dev/null +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/CredentialsController.java @@ -0,0 +1,177 @@ +/* + * Copyright 2021 Apple Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.configuration.CredentialsConfiguration; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionRepository; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionService; +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition; +import com.netflix.spinnaker.kork.annotations.Beta; +import com.netflix.spinnaker.kork.exceptions.ConfigurationException; +import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; +import javax.annotation.CheckForNull; +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping("/credentials") +public class CredentialsController { + private final AccountDefinitionService accountDefinitionService; + private final CredentialsConfiguration credentialsConfiguration; + private final ObjectMapper objectMapper; + private final AccountCredentialsProvider accountCredentialsProvider; + + public CredentialsController( + Optional service, + CredentialsConfiguration credentialsConfiguration, + ObjectMapper objectMapper, + AccountCredentialsProvider accountCredentialsProvider) { + this.accountDefinitionService = service.orElse(null); + this.credentialsConfiguration = credentialsConfiguration; + this.objectMapper = objectMapper; + this.accountCredentialsProvider = accountCredentialsProvider; + } + + @GetMapping + public List> listAccountCredentials(@RequestParam Optional expand) { + boolean shouldExpand = expand.orElse(false); + return accountCredentialsProvider.getAll().stream() + .map(accountCredentials -> renderAccountCredentials(accountCredentials, shouldExpand)) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + @GetMapping("/{accountName}") + public Map getAccountCredentialsDetails(@PathVariable String accountName) { + var accountDetail = + renderAccountCredentials(accountCredentialsProvider.getCredentials(accountName), true); + if (accountDetail == null) { + throw new NotFoundException(String.format("Account does not exist (name: %s)", accountName)); + } + return accountDetail; + } + + @CheckForNull + private Map renderAccountCredentials( + AccountCredentials credentials, boolean expand) { + if (credentials == null) { + return null; + } + var cred = objectMapper.convertValue(credentials, new TypeReference>() {}); + if (!expand) { + cred.keySet() + .retainAll( + List.of( + "name", + "environment", + "accountType", + "cloudProvider", + "requiredGroupMembership", + "permissions", + "accountId")); + } + cred.put("type", credentials.getCloudProvider()); + cred.put( + "challengeDestructiveActions", + credentialsConfiguration + .getChallengeDestructiveActionsEnvironments() + .contains(credentials.getEnvironment())); + cred.put( + "primaryAccount", + credentialsConfiguration.getPrimaryAccountTypes().contains(credentials.getAccountType())); + return cred; + } + + @GetMapping("/type/{accountType}") + @Beta + public List listAccountsByType( + @PathVariable String accountType, + @RequestParam(required = false, defaultValue = "100") Integer limit, + @RequestParam(required = false) String startingAccountName) { + validateAccountStorageEnabled(); + return accountDefinitionService.listAccountDefinitionsByType( + accountType, limit, startingAccountName); + } + + @PostMapping + @Beta + public CredentialsDefinition createAccount(@RequestBody CredentialsDefinition definition) { + validateAccountStorageEnabled(); + return accountDefinitionService.createAccount(definition); + } + + @PutMapping + @Beta + public CredentialsDefinition saveAccount(@RequestBody CredentialsDefinition definition) { + validateAccountStorageEnabled(); + return accountDefinitionService.saveAccount(definition); + } + + @PutMapping("/{accountName}") + @Beta + public CredentialsDefinition updateAccount( + @RequestBody CredentialsDefinition definition, @PathVariable String accountName) { + validateAccountStorageEnabled(); + String name = definition.getName(); + if (!accountName.equals(name)) { + throw new InvalidRequestException( + String.format( + "Mismatched account names. URI value: %s. Request body value: %s.", + accountName, name)); + } + return accountDefinitionService.updateAccount(definition); + } + + @DeleteMapping("/{accountName}") + @Beta + public void deleteAccount(@PathVariable String accountName) { + validateAccountStorageEnabled(); + accountDefinitionService.deleteAccount(accountName); + } + + @GetMapping("/{accountName}/history") + @Beta + public List getAccountHistory( + @PathVariable String accountName) { + validateAccountStorageEnabled(); + return accountDefinitionService.getAccountHistory(accountName); + } + + private void validateAccountStorageEnabled() { + if (accountDefinitionService == null) { + throw new ConfigurationException( + "Cannot use AccountDefinitionService endpoints without enabling AccountDefinitionService bean"); + } + } +} diff --git a/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/EntityTagsController.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/EntityTagsController.java new file mode 100644 index 00000000000..f525e87ad5b --- /dev/null +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/EntityTagsController.java @@ -0,0 +1,95 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers; + +import com.netflix.spinnaker.clouddriver.model.EntityTags; +import com.netflix.spinnaker.clouddriver.model.EntityTagsProvider; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.util.*; +import java.util.stream.Collectors; +import javax.servlet.http.HttpServletRequest; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.MessageSource; +import org.springframework.security.access.prepost.PostAuthorize; +import org.springframework.security.access.prepost.PostFilter; +import org.springframework.security.access.prepost.PreAuthorize; +import org.springframework.util.AntPathMatcher; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.servlet.HandlerMapping; + +@RestController +@RequestMapping("/tags") +public class EntityTagsController { + private final MessageSource messageSource; + private final EntityTagsProvider tagProvider; + + @Autowired + public EntityTagsController( + MessageSource messageSource, Optional tagProvider) { + this.messageSource = messageSource; + this.tagProvider = tagProvider.orElse(null); + } + + @RequestMapping(method = RequestMethod.GET) + @PreAuthorize("@fiatPermissionEvaluator.storeWholePermission()") + @PostFilter("hasPermission(filterObject.getEntityRef().getApplication(), 'APPLICATION', 'READ')") + public Collection list( + @RequestParam(value = "cloudProvider", required = false) String cloudProvider, + @RequestParam(value = "application", required = false) String application, + @RequestParam(value = "entityType", required = false) String entityType, + @RequestParam(value = "entityId", required = false) String entityId, + @RequestParam(value = "idPrefix", required = false) String idPrefix, + @RequestParam(value = "account", required = false) String account, + @RequestParam(value = "region", required = false) String region, + @RequestParam(value = "namespace", required = false) String namespace, + @RequestParam(value = "maxResults", required = false, defaultValue = "5000") int maxResults, + @RequestParam Map allParameters) { + + Map tags = + allParameters.entrySet().stream() + .filter(m -> m.getKey().toLowerCase().startsWith("tag")) + .collect( + Collectors.toMap( + p -> p.getKey().toLowerCase().replaceAll("tag:", ""), Map.Entry::getValue)); + + return tagProvider.getAll( + cloudProvider, + application, + entityType, + entityId != null ? Arrays.asList(entityId.split(",")) : null, + idPrefix, + account, + region, + namespace, + tags, + maxResults); + } + + @RequestMapping(value = "/**", method = RequestMethod.GET) + @PreAuthorize("@fiatPermissionEvaluator.storeWholePermission()") + @PostAuthorize("@authorizationSupport.authorizeEntityTags(returnObject)") + public EntityTags get(HttpServletRequest request) { + String pattern = (String) request.getAttribute(HandlerMapping.BEST_MATCHING_PATTERN_ATTRIBUTE); + String id = new AntPathMatcher().extractPathWithinPattern(pattern, request.getServletPath()); + return tagProvider + .get(id) + .orElseThrow(() -> new NotFoundException("No EntityTags found w/ id = '" + id + "'")); + } +} diff --git a/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/FunctionController.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/FunctionController.java new file mode 100644 index 00000000000..31057c9a358 --- /dev/null +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/FunctionController.java @@ -0,0 +1,81 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers; + +import com.netflix.spinnaker.clouddriver.model.Function; +import com.netflix.spinnaker.clouddriver.model.FunctionProvider; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.security.access.prepost.PostAuthorize; +import org.springframework.security.access.prepost.PreAuthorize; +import org.springframework.web.bind.annotation.*; + +@RestController +public class FunctionController { + private final List functionProviders; + private HashMap functionMap = new HashMap(); + + @Autowired + public FunctionController(Optional> functionProviders) { + this.functionProviders = functionProviders.orElse(Collections.emptyList()); + } + + @PostAuthorize("@authorizationSupport.filterForAccounts(returnObject)") + @RequestMapping(value = "/functions", method = RequestMethod.GET) + @ResponseBody + public List list( + @RequestParam(value = "functionName", required = false) String functionName, + @RequestParam(value = "region", required = false) String region, + @RequestParam(value = "account", required = false) String account) { + if (functionName == null || functionName.isEmpty()) { + return functionProviders.stream() + .map(FunctionProvider::getAllFunctions) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + } else { + try { + List myFunction = + functionProviders.stream() + .map( + functionProvider -> functionProvider.getFunction(account, region, functionName)) + .filter(function -> function != null) + .collect(Collectors.toList()); + return myFunction; + } catch (NotFoundException e) { + throw new NotFoundException(functionName + "does not exist"); + } + } + } + + @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ')") + @PostAuthorize("@authorizationSupport.filterForAccounts(returnObject)") + @RequestMapping(value = "/applications/{application}/functions", method = RequestMethod.GET) + List list(@PathVariable String application) { + List appFunctions = + functionProviders.stream() + .map(functionProvider -> functionProvider.getApplicationFunctions(application)) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + return appFunctions; + } +} diff --git a/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ImageController.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ImageController.java new file mode 100644 index 00000000000..8c0fe0ade88 --- /dev/null +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ImageController.java @@ -0,0 +1,58 @@ +/* + * Copyright 2018 Schibsted ASA. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers; + +import com.netflix.spinnaker.clouddriver.model.Image; +import com.netflix.spinnaker.clouddriver.model.ImageProvider; +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException; +import java.util.List; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping("/images") +public class ImageController { + + @Autowired List imageProviders; + + @RequestMapping(value = "/{provider}/{imageId}", method = RequestMethod.GET) + Image getImage(@PathVariable String provider, @PathVariable String imageId) { + + List imageProviderList = + imageProviders.stream() + .filter(imageProvider -> imageProvider.getCloudProvider().equals(provider)) + .collect(Collectors.toList()); + + if (imageProviderList.isEmpty()) { + throw new NotFoundException("ImageProvider for provider " + provider + " not found."); + } else if (imageProviderList.size() > 1) { + throw new IllegalStateException( + "Found multiple ImageProviders for provider " + + provider + + ". Multiple ImageProviders for a single provider are not supported."); + } else { + return imageProviderList + .get(0) + .getImageById(imageId) + .orElseThrow(() -> new NotFoundException("Image not found (id: " + imageId + ")")); + } + } +} diff --git a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ServerGroupManagerController.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ServerGroupManagerController.java similarity index 77% rename from clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ServerGroupManagerController.java rename to clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ServerGroupManagerController.java index b08b8fa5595..fad3435b86e 100644 --- a/clouddriver-web/src/main/groovy/com/netflix/spinnaker/clouddriver/controllers/ServerGroupManagerController.java +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ServerGroupManagerController.java @@ -20,9 +20,13 @@ import com.netflix.spinnaker.clouddriver.model.ServerGroupManager; import com.netflix.spinnaker.clouddriver.model.ServerGroupManagerProvider; import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.security.access.prepost.PostAuthorize; import org.springframework.security.access.prepost.PostFilter; import org.springframework.security.access.prepost.PreAuthorize; import org.springframework.web.bind.annotation.PathVariable; @@ -30,42 +34,39 @@ import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RestController; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - @Slf4j @RestController @RequestMapping("/applications/{application}/serverGroupManagers") public class ServerGroupManagerController { - final List serverGroupManagerProviders; + final List> serverGroupManagerProviders; final RequestQueue requestQueue; @Autowired - public ServerGroupManagerController(List serverGroupManagerProviders, RequestQueue requestQueue) { + public ServerGroupManagerController( + List> serverGroupManagerProviders, + RequestQueue requestQueue) { this.serverGroupManagerProviders = serverGroupManagerProviders; this.requestQueue = requestQueue; } @PreAuthorize("hasPermission(#application, 'APPLICATION', 'READ')") - @PostFilter("hasPermission(filterObject.account, 'ACCOUNT', 'READ')" ) + @PostFilter("hasPermission(filterObject.account, 'ACCOUNT', 'READ')") @RequestMapping(method = RequestMethod.GET) Set getForApplication(@PathVariable String application) { return serverGroupManagerProviders.stream() - .map(provider -> { - try { - return requestQueue.execute(application, () -> provider.getServerGroupManagersByApplication(application)); - } catch (Throwable t) { - log.warn("Failed to read server group managers" , t); - return null; - } - }) + .map( + provider -> { + try { + return requestQueue.execute( + application, () -> provider.getServerGroupManagersByApplication(application)); + } catch (Throwable t) { + log.warn("Failed to read server group managers", t); + return null; + } + }) .filter(Objects::nonNull) .flatMap(Collection::stream) - .map(i -> (ServerGroupManager) i) .collect(Collectors.toSet()); } } diff --git a/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ServiceBrokerController.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ServiceBrokerController.java new file mode 100644 index 00000000000..0723e90284b --- /dev/null +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/ServiceBrokerController.java @@ -0,0 +1,73 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers; + +import static java.util.Comparator.comparing; +import static java.util.stream.Collectors.toList; + +import com.netflix.spinnaker.clouddriver.model.Service; +import com.netflix.spinnaker.clouddriver.model.ServiceInstance; +import com.netflix.spinnaker.clouddriver.model.ServiceProvider; +import java.util.Collection; +import java.util.Collections; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.security.access.prepost.PreAuthorize; +import org.springframework.web.bind.annotation.*; + +@RestController +@RequestMapping("/servicebroker") +public class ServiceBrokerController { + private final Collection serviceProviders; + + public ServiceBrokerController( + @Autowired(required = false) Collection serviceProviders) { + if (serviceProviders != null) { + this.serviceProviders = serviceProviders; + } else { + this.serviceProviders = Collections.emptyList(); + } + } + + @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") + @GetMapping("/{account}/services") + public Collection listServices( + @RequestParam(value = "cloudProvider") String cloudProvider, + @RequestParam(value = "region") String region, + @PathVariable String account) { + return serviceProviders.stream() + .filter(serviceProvider -> serviceProvider.getCloudProvider().equals(cloudProvider)) + .flatMap(serviceProvider -> serviceProvider.getServices(account, region).stream()) + .sorted(comparing(Service::getName)) + .collect(toList()); + } + + @PreAuthorize("hasPermission(#account, 'ACCOUNT', 'READ')") + @GetMapping("/{account}/serviceInstance") + public ServiceInstance getServiceInstance( + @PathVariable String account, + @RequestParam(value = "cloudProvider") String cloudProvider, + @RequestParam(value = "region") String region, + @RequestParam(value = "serviceInstanceName") String serviceInstanceName) { + return serviceProviders.stream() + .filter(serviceProvider -> serviceProvider.getCloudProvider().equals(cloudProvider)) + .findFirst() + .map( + serviceProvider -> + serviceProvider.getServiceInstance(account, region, serviceInstanceName)) + .orElse(null); + } +} diff --git a/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/admin/EntityTagsAdminController.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/admin/EntityTagsAdminController.java new file mode 100644 index 00000000000..923b6aa05e1 --- /dev/null +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/controllers/admin/EntityTagsAdminController.java @@ -0,0 +1,84 @@ +/* + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers.admin; + +import com.netflix.spinnaker.clouddriver.model.EntityTagsProvider; +import java.util.Map; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping("/admin/tags") +public class EntityTagsAdminController { + private final Logger log = LoggerFactory.getLogger(getClass()); + + private final EntityTagsProvider entityTagsProvider; + + @Autowired + public EntityTagsAdminController(Optional entityTagsProvider) { + this.entityTagsProvider = entityTagsProvider.orElse(null); + } + + @RequestMapping(value = "/reindex", method = RequestMethod.POST) + void reindex() { + entityTagsProvider.reindex(); + } + + @RequestMapping(value = "/delta", method = RequestMethod.GET) + Map delta() { + return entityTagsProvider.delta(); + } + + @RequestMapping(value = "/reconcile", method = RequestMethod.POST) + Map reconcile( + @RequestParam(name = "dryRun", defaultValue = "true") Boolean dryRun, + @RequestParam(name = "cloudProvider") String cloudProvider, + @RequestParam(name = "account", required = false) String account, + @RequestParam(name = "region", required = false) String region) { + return entityTagsProvider.reconcile( + cloudProvider, account, region, Optional.ofNullable(dryRun).orElse(true)); + } + + @RequestMapping(value = "/deleteByNamespace/{namespace}", method = RequestMethod.POST) + Map deleteByNamespace( + @PathVariable("namespace") String namespace, + @RequestParam(name = "dryRun", defaultValue = "true") Boolean dryRun, + @RequestParam(name = "deleteFromSource", defaultValue = "false") Boolean deleteFromSource) { + return entityTagsProvider.deleteByNamespace( + namespace, + Optional.ofNullable(dryRun).orElse(true), + Optional.ofNullable(deleteFromSource).orElse(false)); + } + + @RequestMapping(value = "/deleteByTag/{tag}", method = RequestMethod.POST) + Map deleteByTag( + @PathVariable("tag") String namespace, + @RequestParam(name = "dryRun", defaultValue = "true") Boolean dryRun, + @RequestParam(name = "deleteFromSource", defaultValue = "false") Boolean deleteFromSource) { + return entityTagsProvider.deleteByTag( + namespace, + Optional.ofNullable(dryRun).orElse(true), + Optional.ofNullable(deleteFromSource).orElse(false)); + } +} diff --git a/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/listeners/ConfigurationRefreshListener.java b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/listeners/ConfigurationRefreshListener.java new file mode 100644 index 00000000000..6af2d0eabdb --- /dev/null +++ b/clouddriver-web/src/main/java/com/netflix/spinnaker/clouddriver/listeners/ConfigurationRefreshListener.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019 Pivotal, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.listeners; + +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import java.util.List; +import org.springframework.cloud.context.scope.refresh.RefreshScopeRefreshedEvent; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Component; + +@Component +public class ConfigurationRefreshListener + implements ApplicationListener { + + private final List credentialsSynchronizers; + + public ConfigurationRefreshListener( + List credentialsSynchronizers) { + this.credentialsSynchronizers = credentialsSynchronizers; + } + + @Override + public void onApplicationEvent(RefreshScopeRefreshedEvent event) { + credentialsSynchronizers.forEach(CredentialsInitializerSynchronizable::synchronize); + } +} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/MainSpec.java b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/MainSpec.java new file mode 100644 index 00000000000..a0881871729 --- /dev/null +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/MainSpec.java @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringExtension; + +@ExtendWith(SpringExtension.class) +@SpringBootTest(classes = {Main.class}) +@TestPropertySource( + properties = { + "redis.enabled = false", + "sql.enabled = false", + "spring.application.name = clouddriver" + }) +public class MainSpec { + @Test + public void startupTest() {} +} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationControllerSpec.groovy deleted file mode 100644 index 2876d6a0a0e..00000000000 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationControllerSpec.groovy +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers - -import com.netflix.spinnaker.clouddriver.model.ApplicationProvider -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.clouddriver.aws.model.AmazonApplication -import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue -import com.netflix.spinnaker.kork.web.exceptions.NotFoundException -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -class ApplicationControllerSpec extends Specification { - - @Shared - ApplicationsController applicationsController - - def setup() { - applicationsController = new ApplicationsController(requestQueue: RequestQueue.noop()) - } - - def "call all application providers on listing"() { - setup: - def appProvider1 = Mock(ApplicationProvider) - def appProvider2 = Mock(ApplicationProvider) - applicationsController.applicationProviders = [appProvider1, appProvider2] - - when: - applicationsController.list(false /*expand*/, true /*restricted*/) - - then: - 1 * appProvider1.getApplications(false) - 1 * appProvider2.getApplications(false) - } - - def "merge clusterNames and attributes when multiple apps are found"() { - setup: - def appProvider1 = Mock(ApplicationProvider) - def appProvider2 = Mock(ApplicationProvider) - def cluProvider1 = Mock(ClusterProvider) - applicationsController.applicationProviders = [appProvider1, appProvider2] - applicationsController.clusterProviders = [cluProvider1] - def app1 = new AmazonApplication(name: "foo", clusterNames: [test: ["bar"] as Set], attributes: [tag: "val"]) - def app2 = new AmazonApplication(name: "foo", clusterNames: [test: ["baz"] as Set], attributes: [:]) - def cluster = Mock(Cluster) - cluster.getAccountName() >> "test" - cluster.getName() >> "foo" - cluster.getLoadBalancers() >> [] - cluster.getType() >> "aws" - def sg1 = Mock(ServerGroup) - sg1.getName() >> "bar" - def sg2 = Mock(ServerGroup) - sg2.getName() >> "baz" - cluster.getServerGroups() >> [sg1, sg2] - - when: - def result = applicationsController.get("foo") - - then: - 2 * cluProvider1.getClusterSummaries("foo") >> [test: cluster] - 1 * appProvider1.getApplication("foo") >> app1 - 1 * appProvider2.getApplication("foo") >> app2 - result.name == "foo" - result.clusters.test*.serverGroups.flatten() == ["bar", "baz"] - result.attributes == [tag: "val", cloudProviders: "aws"] - } - - def "prune nulls when subset of application providers find app"() { - setup: - def appProvider1 = Mock(ApplicationProvider) - def appProvider2 = Mock(ApplicationProvider) - def cluProvider1 = Mock(ClusterProvider) - applicationsController.applicationProviders = [appProvider1, appProvider2] - applicationsController.clusterProviders = [cluProvider1] - def app1 = new AmazonApplication(name: "foo", clusterNames: [test: ["bar"] as Set], attributes: [tag: "val"]) - def cluster = Mock(Cluster) - cluster.getAccountName() >> "test" - cluster.getName() >> "foo" - cluster.getType() >> "aws" - cluster.getLoadBalancers() >> [] - def sg1 = Mock(ServerGroup) - sg1.getName() >> "bar" - cluster.getServerGroups() >> [sg1] - - when: - def result = applicationsController.get("foo") - - then: - 1 * cluProvider1.getClusterSummaries("foo") >> [test: cluster] - 1 * appProvider1.getApplication("foo") >> app1 - 1 * appProvider2.getApplication("foo") >> null - result.name == "foo" - result.clusters.test*.serverGroups.flatten() == ["bar"] - result.attributes == [tag: "val", cloudProviders: "aws"] - } - - def "throw NotFoundException when no apps are found"() { - setup: - def appProvider1 = Mock(ApplicationProvider) - def appProvider2 = Mock(ApplicationProvider) - applicationsController.applicationProviders = [appProvider1, appProvider2] - - when: - def result = applicationsController.get("foo") - - then: - 1 * appProvider1.getApplication("foo") >> null - 1 * appProvider2.getApplication("foo") >> null - NotFoundException e = thrown() - e.message == "Application does not exist (name: foo)" - } - - @Unroll - def "provide cloudProviders field correctly based on clusters"() { - setup: - def appProvider = Mock(ApplicationProvider) - def cluProvider = Mock(ClusterProvider) - applicationsController.applicationProviders = [appProvider] - applicationsController.clusterProviders = [cluProvider] - def app1 = new AmazonApplication(name: "foo", clusterNames: [test: ["bar", "baz"] as Set], attributes: [tag: "val"]) - def cluster = Mock(Cluster) - cluster.getAccountName() >> "test" - cluster.getName() >> "bar" - cluster.getType() >> cloudProvider1 - cluster.getLoadBalancers() >> [] - cluster.getServerGroups() >> [] - def cluster1 = Mock(Cluster) - cluster1.getAccountName() >> "test" - cluster1.getName() >> "baz" - cluster1.getType() >> cloudProvider2 - cluster1.getLoadBalancers() >> [] - cluster1.getServerGroups() >> [] - - when: - def result = applicationsController.get("foo") - - then: - 1 * cluProvider.getClusterSummaries("foo") >> [test: [cluster, cluster1]] - 1 * appProvider.getApplication("foo") >> app1 - result.attributes.cloudProviders == expectedCloudProviders - - where: - cloudProvider1 | cloudProvider2 || expectedCloudProviders - "aws" | "titus" || "aws,titus" - "aws" | "aws" || "aws" - } -} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationsControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationsControllerSpec.groovy new file mode 100644 index 00000000000..f36400fbc40 --- /dev/null +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ApplicationsControllerSpec.groovy @@ -0,0 +1,175 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers + +import com.netflix.spinnaker.clouddriver.model.* +import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue +import com.netflix.spinnaker.kork.web.exceptions.NotFoundException +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +class ApplicationsControllerSpec extends Specification { + + @Shared + ApplicationsController applicationsController + + def setup() { + applicationsController = new ApplicationsController(requestQueue: RequestQueue.noop()) + } + + def "call all application providers on listing"() { + setup: + def appProvider1 = Mock(ApplicationProvider) + def appProvider2 = Mock(ApplicationProvider) + applicationsController.applicationProviders = [appProvider1, appProvider2] + + when: + applicationsController.list(false /*expand*/, true /*restricted*/) + + then: + 1 * appProvider1.getApplications(false) + 1 * appProvider2.getApplications(false) + } + + def "merge clusterNames and attributes when multiple apps are found"() { + setup: + def appProvider1 = Mock(ApplicationProvider) + def appProvider2 = Mock(ApplicationProvider) + def cluProvider1 = Mock(ClusterProvider) + applicationsController.applicationProviders = [appProvider1, appProvider2] + applicationsController.clusterProviders = [cluProvider1] + def app1 = [getName: { "foo" }, getClusterNames: { [test: ["bar"] as Set] }, getAttributes: { [tag: "val"] }] as Application + def app2 = [getName: { "foo" }, getClusterNames: { [test: ["baz"] as Set] }, getAttributes: { [:] }] as Application + def cluster = Mock(Cluster) + cluster.getAccountName() >> "test" + cluster.getName() >> "foo" + cluster.getLoadBalancers() >> [] + cluster.getType() >> "aws" + def sg1 = Mock(ServerGroup) + sg1.getName() >> "bar" + def sg2 = Mock(ServerGroup) + sg2.getName() >> "baz" + cluster.getServerGroups() >> [sg1, sg2] + + when: + def result = applicationsController.get("foo") + + then: + 2 * cluProvider1.getClusterSummaries("foo") >> [test: cluster] + 1 * appProvider1.getApplication("foo") >> app1 + 1 * appProvider2.getApplication("foo") >> app2 + result.name == "foo" + result.clusters.test*.serverGroups.flatten() == ["bar", "baz"] + result.attributes == [tag: "val", cloudProviders: "aws"] + } + + def "prune nulls when subset of application providers find app"() { + setup: + def appProvider1 = Mock(ApplicationProvider) + def appProvider2 = Mock(ApplicationProvider) + def cluProvider1 = Mock(ClusterProvider) + applicationsController.applicationProviders = [appProvider1, appProvider2] + applicationsController.clusterProviders = [cluProvider1] + def app1 = [getName: { "foo" }, getClusterNames: { [test: ["bar"] as Set] }, getAttributes: { [tag: "val"] }] as Application + def cluster = Mock(Cluster) + cluster.getAccountName() >> "test" + cluster.getName() >> "foo" + cluster.getType() >> "aws" + cluster.getLoadBalancers() >> [] + def sg1 = Mock(ServerGroup) + sg1.getName() >> "bar" + cluster.getServerGroups() >> [sg1] + + when: + def result = applicationsController.get("foo") + + then: + 1 * cluProvider1.getClusterSummaries("foo") >> [test: cluster] + 1 * appProvider1.getApplication("foo") >> app1 + 1 * appProvider2.getApplication("foo") >> null + result.name == "foo" + result.clusters.test*.serverGroups.flatten() == ["bar"] + result.attributes == [tag: "val", cloudProviders: "aws"] + } + + def "throw NotFoundException when no apps are found"() { + setup: + def appProvider1 = Mock(ApplicationProvider) + def appProvider2 = Mock(ApplicationProvider) + applicationsController.applicationProviders = [appProvider1, appProvider2] + + when: + def result = applicationsController.get("foo") + + then: + 1 * appProvider1.getApplication("foo") >> null + 1 * appProvider2.getApplication("foo") >> null + NotFoundException e = thrown() + e.message == "Application does not exist (name: foo)" + } + + def "let exceptions during get bubble up"() { + setup: + def exceptionToThrow = new RuntimeException("arbitrary exception") + def appProvider1 = Mock(ApplicationProvider) + applicationsController.applicationProviders = [appProvider1] + + when: + def result = applicationsController.get("foo") + + then: + 1 * appProvider1.getApplication("foo") >> { throw exceptionToThrow } + RuntimeException e = thrown() + e == exceptionToThrow + } + + @Unroll + def "provide cloudProviders field correctly based on clusters"() { + setup: + def appProvider = Mock(ApplicationProvider) + def cluProvider = Mock(ClusterProvider) + applicationsController.applicationProviders = [appProvider] + applicationsController.clusterProviders = [cluProvider] + def app1 = [getName: { "foo" }, getClusterNames: { [test: ["bar", "baz"] as Set] }, getAttributes: { [tag: "val"] }] as Application + def cluster = Mock(Cluster) + cluster.getAccountName() >> "test" + cluster.getName() >> "bar" + cluster.getType() >> cloudProvider1 + cluster.getLoadBalancers() >> [] + cluster.getServerGroups() >> [] + def cluster1 = Mock(Cluster) + cluster1.getAccountName() >> "test" + cluster1.getName() >> "baz" + cluster1.getType() >> cloudProvider2 + cluster1.getLoadBalancers() >> [] + cluster1.getServerGroups() >> [] + + when: + def result = applicationsController.get("foo") + + then: + 1 * cluProvider.getClusterSummaries("foo") >> [test: [cluster, cluster1]] + 1 * appProvider.getApplication("foo") >> app1 + result.attributes.cloudProviders == expectedCloudProviders + + where: + cloudProvider1 | cloudProvider2 || expectedCloudProviders + "aws" | "titus" || "aws,titus" + "aws" | "aws" || "aws" + } +} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ArtifactControllerSpec.java b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ArtifactControllerSpec.java new file mode 100644 index 00000000000..246efbecdc7 --- /dev/null +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ArtifactControllerSpec.java @@ -0,0 +1,172 @@ +/* + * Copyright 2020 Avast Software, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers; + +import static com.netflix.spinnaker.kork.common.Header.USER; +import static org.assertj.core.api.Assertions.assertThat; +import static org.hamcrest.Matchers.emptyString; +import static org.hamcrest.Matchers.is; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.asyncDispatch; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; +import static org.springframework.test.web.servlet.result.MockMvcResultHandlers.print; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.webAppContextSetup; + +import ch.qos.logback.classic.Level; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.netflix.spinnaker.clouddriver.Main; +import com.netflix.spinnaker.clouddriver.artifacts.ArtifactCredentialsRepository; +import com.netflix.spinnaker.clouddriver.artifacts.helm.HelmArtifactCredentials; +import com.netflix.spinnaker.credentials.CredentialsRepository; +import com.netflix.spinnaker.filters.AuthenticatedRequestFilter; +import com.netflix.spinnaker.kork.artifacts.model.Artifact; +import com.netflix.spinnaker.kork.test.log.MemoryAppender; +import java.util.List; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mockito; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.http.MediaType; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringExtension; +import org.springframework.test.context.web.WebAppConfiguration; +import org.springframework.test.web.servlet.MockMvc; +import org.springframework.test.web.servlet.MvcResult; +import org.springframework.web.context.WebApplicationContext; + +@ExtendWith(SpringExtension.class) +@WebAppConfiguration +@SpringBootTest(classes = Main.class) +@TestPropertySource( + properties = { + "redis.enabled = false", + "sql.enabled = false", + "spring.application.name = clouddriver", + "artifacts.helm.enabled = true" + }) +public class ArtifactControllerSpec { + + private MockMvc mvc; + + @Autowired private WebApplicationContext webApplicationContext; + + @Autowired private ObjectMapper objectMapper; + + @Autowired private CredentialsRepository helmCredentials; + + /** + * This takes X-SPINNAKER-* headers from requests to clouddriver and puts them in the MDC. This is + * enabled when clouddriver runs normally (by WebConfig), but needs explicit mention to function + * in these tests. + */ + @Autowired AuthenticatedRequestFilter authenticatedRequestFilter; + + @BeforeEach + public void setup() throws Exception { + this.mvc = + webAppContextSetup(webApplicationContext).addFilters(authenticatedRequestFilter).build(); + } + + @Test + public void testFetchWithMisconfiguredArtifact() throws Exception { + Artifact misconfiguredArtifact = Artifact.builder().name("foo").build(); + + // Capture the log messages that ArtifactCredentialsRepository generates, + // since that's the class that logs a message when it detects a + // misconfigured artifact. + MemoryAppender memoryAppender = new MemoryAppender(ArtifactCredentialsRepository.class); + + // Use USER (i.e. X-SPINNAKER-HEADER) as a request header to match what + // logback includes in log messages for assertions in this test to work. + String userValue = "some user"; + + MvcResult result = + mvc.perform( + put("/artifacts/fetch") + .contentType(MediaType.APPLICATION_JSON) + .header(USER.getHeader(), userValue) + .content(objectMapper.writeValueAsString(misconfiguredArtifact))) + .andReturn(); + + mvc.perform(asyncDispatch(result)) + .andDo(print()) + .andExpect(status().isBadRequest()) + .andExpect(content().string(is(emptyString()))); + + List userMessages = memoryAppender.layoutSearch("[" + userValue + "]", Level.DEBUG); + assertThat(userMessages).hasSize(1); + } + + @Test + public void testArtifactNames() throws Exception { + List names = ImmutableList.of("artifact1", "artifact2"); + HelmArtifactCredentials credentials = Mockito.mock(HelmArtifactCredentials.class); + Mockito.when(credentials.getName()).thenReturn("my-account"); + Mockito.when(credentials.getType()).thenReturn(HelmArtifactCredentials.CREDENTIALS_TYPE); + Mockito.when(credentials.handlesType("helm/chart")).thenReturn(true); + Mockito.when(credentials.getArtifactNames()).thenReturn(names); + helmCredentials.save(credentials); + + mvc.perform( + get("/artifacts/account/{accountName}/names", credentials.getName()) + .param("type", "helm/chart")) + .andExpect(status().isOk()) + .andExpect(jsonPath("$", Matchers.hasSize(2))) + .andExpect(jsonPath("$[0]", Matchers.is(names.get(0)))) + .andExpect(jsonPath("$[1]", Matchers.is(names.get(1)))); + + // We also don't expect to find an account that can support type artifacts-helm + mvc.perform( + get("/artifacts/account/{accountName}/names", credentials.getName()) + .param("type", HelmArtifactCredentials.CREDENTIALS_TYPE)) + .andExpect(status().isNotFound()); + } + + @Test + public void testArtifactVersions() throws Exception { + final String artifactName = "my-artifact"; + List versions = ImmutableList.of("version1", "version2"); + HelmArtifactCredentials credentials = Mockito.mock(HelmArtifactCredentials.class); + Mockito.when(credentials.getName()).thenReturn("my-account"); + Mockito.when(credentials.getType()).thenReturn(HelmArtifactCredentials.CREDENTIALS_TYPE); + Mockito.when(credentials.handlesType("helm/chart")).thenReturn(true); + Mockito.when(credentials.getArtifactVersions(artifactName)).thenReturn(versions); + helmCredentials.save(credentials); + + mvc.perform( + get("/artifacts/account/{accountName}/versions", credentials.getName()) + .param("type", "helm/chart") + .param("artifactName", artifactName)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$", Matchers.hasSize(2))) + .andExpect(jsonPath("$[0]", Matchers.is(versions.get(0)))) + .andExpect(jsonPath("$[1]", Matchers.is(versions.get(1)))); + + // We also don't expect to find an account that can support type artifacts-helm + mvc.perform( + get("/artifacts/account/{accountName}/versions", credentials.getName()) + .param("type", HelmArtifactCredentials.CREDENTIALS_TYPE) + .param("artifactName", artifactName)) + .andExpect(status().isNotFound()); + } +} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/AuthorizationSupportSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/AuthorizationSupportSpec.groovy index 0d0dc308890..c8dfd2dd268 100644 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/AuthorizationSupportSpec.groovy +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/AuthorizationSupportSpec.groovy @@ -16,16 +16,21 @@ package com.netflix.spinnaker.clouddriver.controllers +import com.netflix.spinnaker.clouddriver.model.EntityTags import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider +import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider import com.netflix.spinnaker.fiat.shared.FiatPermissionEvaluator import com.netflix.spinnaker.security.User import org.springframework.security.authentication.TestingAuthenticationToken import org.springframework.security.core.context.SecurityContextHolder import spock.lang.Specification +import spock.lang.Unroll class AuthorizationSupportSpec extends Specification { FiatPermissionEvaluator permissionEvaluator + AccountCredentialsProvider accountCredentialsProvider = Mock(AccountCredentialsProvider) def setup() { def ctx = SecurityContextHolder.createEmptyContext() @@ -170,6 +175,57 @@ class AuthorizationSupportSpec extends Specification { list.size() == 1 } + @Unroll + def "should verify access to entity tags account/application"() { + given: + def support = new AuthorizationSupport( + permissionEvaluator: permissionEvaluator, + accountCredentialsProvider: accountCredentialsProvider + ) + + and: + 1 * accountCredentialsProvider.getAll() >> { + return [ + accountCredentials("account1", "1"), + accountCredentials("account2", "2") + ] + } + + _ * permissionEvaluator.hasPermission(_, "account1", "ACCOUNT", "READ") >> true + _ * permissionEvaluator.hasPermission(_, "account2", "ACCOUNT", "READ") >> false + _ * permissionEvaluator.hasPermission(_, "clouddriver", "APPLICATION", "READ") >> true + _ * permissionEvaluator.hasPermission(_, "gate", "APPLICATION", "READ") >> false + + expect: + support.authorizeEntityTags([entityTags("id-1", accountId, application)]) == isAuthorized + + where: + accountId | application || isAuthorized + "1" | "clouddriver" || true + "2" | "clouddriver" || false + null | "clouddriver" || true + null | "gate" || false + null | null || true + + } + + EntityTags entityTags(String id, String accountId, String application) { + return new EntityTags( + id: id, + entityRef: new EntityTags.EntityRef( + accountId: accountId, + application: application + ) + ) + } + + AccountCredentials accountCredentials(String name, String accountId) { + return Mock(AccountCredentials) { + _ * getName() >> { return name } + _ * getAccountId() >> { return accountId } + } + } + static List newTestItems() { return [newTestItem("test1-item"), newTestItem("test2-item")] } diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ClusterControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ClusterControllerSpec.groovy index da7c5e716d3..bb9480f7e64 100644 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ClusterControllerSpec.groovy +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ClusterControllerSpec.groovy @@ -16,19 +16,19 @@ package com.netflix.spinnaker.clouddriver.controllers -import com.netflix.spinnaker.clouddriver.aws.model.AmazonServerGroup -import com.netflix.spinnaker.clouddriver.model.Application -import com.netflix.spinnaker.clouddriver.model.ApplicationProvider -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.Instance -import com.netflix.spinnaker.clouddriver.model.ServerGroup +import com.netflix.spinnaker.clouddriver.aws.model.AmazonCluster +import com.netflix.spinnaker.clouddriver.model.* import com.netflix.spinnaker.clouddriver.requestqueue.RequestQueue import com.netflix.spinnaker.kork.web.exceptions.NotFoundException import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll +import java.util.stream.Collectors + +import static com.netflix.spinnaker.clouddriver.controllers.ClusterController.BIGGEST_TO_SMALLEST +import static com.netflix.spinnaker.clouddriver.controllers.ClusterController.OLDEST_TO_NEWEST + class ClusterControllerSpec extends Specification { @Shared @@ -56,25 +56,49 @@ class ClusterControllerSpec extends Specification { getApplication("app") >> app2 } - clusterController.applicationProviders = [appProvider1, appProvider2] + def app3 = Stub(Application) { + getName() >> "app" + getClusterNames() >> ["stage": ["we-need-all-clusters-to-be-returned"] as Set] + } + def appProvider3 = Stub(ApplicationProvider) { + getApplication("app") >> app3 + } + + clusterController.applicationProviders = [appProvider1, appProvider2, appProvider3] when: def result = clusterController.listByAccount("app") then: - result == [test: ["foo", "bar"] as Set, prod: ["baz"] as Set] + result == [test: ["foo", "bar"] as Set, prod: ["baz"] as Set, stage: ["we-need-all-clusters-to-be-returned"] as Set] } void "should throw exception when looking for specific cluster that doesnt exist"() { setup: def clusterProvider1 = Mock(ClusterProvider) + clusterProvider1.getCloudProviderId() >> { "aws" } clusterController.clusterProviders = [clusterProvider1] when: clusterController.getForAccountAndNameAndType("app", "test", "cluster", "aws", true) then: - thrown NotFoundException + def e = thrown(NotFoundException) + e.getMessage() == "No clusters found (application: app, account: test, type: aws)" + } + + void "should throw exception when no cluster provider exists for the cluster"() { + setup: + def clusterProvider1 = Mock(ClusterProvider) + clusterController.clusterProviders = [clusterProvider1] + clusterProvider1.getCloudProviderId() >> { return "some-other-cluster-provider" } + + when: + clusterController.getForAccountAndNameAndType("app", "test", "cluster", "aws", true) + + then: + def e = thrown(NotFoundException) + e.getMessage() == "No cluster provider of type: aws found that can handle cluster: cluster in application: app, account: test" } void "should return specific named serverGroup"() { @@ -85,13 +109,13 @@ class ClusterControllerSpec extends Specification { clusterController.clusterProviders = [clusterProvider1] clusterController.serverGroupController = serverGroupController - def serverGroup = new AmazonServerGroup(name: "clusterName-v001", region: "us-west-2") + def serverGroup = [getName: { "clusterName-v001" }, getRegion: { "us-west-2" }] as ServerGroup when: "region is not supplied" def result = clusterController.getServerGroup("app", "account", "clusterName", "type", "clusterName-v001", null) then: "expect a collection of server groups to be returned" - 1 * clusterProvider1.getCloudProviderId() >> { return "type" } + 2 * clusterProvider1.getCloudProviderId() >> { return "type" } 1 * clusterProvider1.supportsMinimalClusters() >> { return true } 1 * clusterProvider1.getCluster("app", "account", "clusterName", false) >> { def cluster = Mock(Cluster) @@ -109,7 +133,7 @@ class ClusterControllerSpec extends Specification { result = clusterController.getServerGroup("app", "account", "clusterName", "type", "clusterName-v001", "us-west-2") then: "expect a single server group to be returned" - 1 * clusterProvider1.getCloudProviderId() >> { return "type" } + 2 * clusterProvider1.getCloudProviderId() >> { return "type" } 1 * clusterProvider1.supportsMinimalClusters() >> { return false } 1 * clusterProvider1.getCluster("app", "account", "clusterName", true) >> { def cluster = Mock(Cluster) @@ -306,4 +330,58 @@ class ClusterControllerSpec extends Specification { then: thrown NotFoundException } + + void 'sorting server groups should work as expected'() { + given: + ServerGroup a = Stub() { + getCreatedTime() >> 1 + getInstances() >> [Stub(Instance), Stub(Instance)].toSet() + } + ServerGroup b = Stub() { + getCreatedTime() >> 2 + getInstances() >> [Stub(Instance), Stub(Instance)].toSet() + } + + ServerGroup c = Stub() { + getCreatedTime() >> 3 + getInstances() >> [Stub(Instance)].toSet() + } + + expect: + [a, c, b].stream().sorted(OLDEST_TO_NEWEST).collect(Collectors.toList()) == [a, b, c] + [a, c, b].stream().sorted(OLDEST_TO_NEWEST.reversed()).collect(Collectors.toList()) == [c, b, a] + [a, c, b].stream().sorted(BIGGEST_TO_SMALLEST).collect(Collectors.toList()) == [b, a, c] + } + + void "test getForAccountAndNameAndType when multiple cluster providers are present"() { + setup: + def serverGroupController = Mock(ServerGroupController) + + def clusterProvider1 = Mock(ClusterProvider) + clusterProvider1.getCloudProviderId() >> { return "aws" } + def clusterProvider2 = Mock(ClusterProvider) + clusterProvider2.getCloudProviderId() >> { return "some-other-type" } + clusterController.clusterProviders = [clusterProvider1, clusterProvider2] + clusterController.serverGroupController = serverGroupController + + def serverGroup = [getName: { "clusterName-v001" }, getRegion: { "us-west-2" }] as ServerGroup + + when: + def result = clusterController.getForAccountAndNameAndType("app", "account", "clusterName", "aws", true) + + then: "expect that only the correct cluster provider will try to get Cluster" + 1 * clusterProvider1.getCluster("app", "account", "clusterName", true) >> { + def cluster = new AmazonCluster() + cluster.type = "aws" + cluster.getServerGroups().add(serverGroup) + cluster + } + + // the second cluster provider shouldn't be asked to look for the cluster + 0 * clusterProvider2.getCluster("app", "account", "clusterName", true) + + result.getServerGroups().size() == 1 + result.type == "aws" + result.getServerGroups()[0] == serverGroup + } } diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/CredentialsControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/CredentialsControllerSpec.groovy index 62e4abe98a6..d194acd0a60 100644 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/CredentialsControllerSpec.groovy +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/CredentialsControllerSpec.groovy @@ -19,18 +19,20 @@ package com.netflix.spinnaker.clouddriver.controllers import com.fasterxml.jackson.annotation.JsonInclude import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.clouddriver.configuration.CredentialsConfiguration -import com.netflix.spinnaker.clouddriver.security.AccountCredentials +import com.netflix.spinnaker.clouddriver.controllers.resources.DefaultAccountDefinitionService +import com.netflix.spinnaker.clouddriver.controllers.resources.ManagedAccount +import com.netflix.spinnaker.clouddriver.controllers.resources.MapBackedAccountDefinitionRepository +import com.netflix.spinnaker.clouddriver.security.AbstractAccountCredentials import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository import groovy.json.JsonSlurper +import org.springframework.http.MediaType import org.springframework.test.web.servlet.MockMvc import org.springframework.test.web.servlet.request.MockMvcRequestBuilders import org.springframework.test.web.servlet.setup.MockMvcBuilders import spock.lang.Shared import spock.lang.Specification -import javax.ws.rs.core.MediaType - class CredentialsControllerSpec extends Specification { @Shared @@ -43,7 +45,7 @@ class CredentialsControllerSpec extends Specification { def credsRepo = new MapBackedAccountCredentialsRepository() def credsProvider = new DefaultAccountCredentialsProvider(credsRepo) credsRepo.save("test", new TestNamedAccountCredentials()) - def mvc = MockMvcBuilders.standaloneSetup(new CredentialsController(accountCredentialsProvider: credsProvider, objectMapper: objectMapper, credentialsConfiguration: new CredentialsConfiguration())).build() + def mvc = MockMvcBuilders.standaloneSetup(new CredentialsController(Optional.empty(), new CredentialsConfiguration(), objectMapper, credsProvider)).build() when: def result = mvc.perform(MockMvcRequestBuilders.get("/credentials").accept(MediaType.APPLICATION_JSON)).andReturn() @@ -53,10 +55,119 @@ class CredentialsControllerSpec extends Specification { List parsedResponse = new JsonSlurper().parseText(result.response.contentAsString) as List - parsedResponse == [[name: "test", environment: "env", accountType: "acctType", cloudProvider: "testProvider", type: "testProvider", requiredGroupMembership: ["test"], permissions: [READ:["test"], WRITE:["test"]], challengeDestructiveActions: false, primaryAccount: false, providerVersion: "v1", skin: "v1"]] + parsedResponse == [[name: "test", environment: "env", accountType: "acctType", cloudProvider: "testProvider", type: "testProvider", requiredGroupMembership: ["test"], permissions: [READ:["test"], WRITE:["test"]], challengeDestructiveActions: false, primaryAccount: false]] + } + + /** + * Test to verify the use of the mandatory type (path) parameter, + * without passing the optional limit (query) parameter + */ + void "credentials are listed by type"() { + setup: + + def objectMapper = new ObjectMapper().setSerializationInclusion(JsonInclude.Include.NON_NULL) + def credsRepo = new MapBackedAccountCredentialsRepository() + def accountDefRepo = new MapBackedAccountDefinitionRepository() + def credsProvider = new DefaultAccountCredentialsProvider(credsRepo) + def accountDefSrvc = Optional.of(new DefaultAccountDefinitionService(accountDefRepo)) + accountDefRepo.save(new ManagedAccount("test1", "acctType1")) + accountDefRepo.save(new ManagedAccount("test2", "acctType1")) + accountDefRepo.save(new ManagedAccount("test3", "acctType2")) + accountDefRepo.save(new ManagedAccount("test4", "acctType2")) + def mvc = MockMvcBuilders.standaloneSetup(new CredentialsController(accountDefSrvc, new CredentialsConfiguration(), objectMapper, credsProvider)).build() + + // path param: + def acctType = "acctType1" + + when: + def result = mvc.perform(MockMvcRequestBuilders.get("/credentials/type/${acctType}").accept(MediaType.APPLICATION_JSON)).andReturn() + + then: + result.response.status == 200 + + List parsedResponse = new JsonSlurper().parseText(result.response.contentAsString) as List + + parsedResponse.every { acct -> acct.accountType == acctType } + } + + /** + * Test to verify the use of the type (path) + * and limit (query) parameters + */ + void "credentials are listed by type and with limit"() { + setup: + + def objectMapper = new ObjectMapper().setSerializationInclusion(JsonInclude.Include.NON_NULL) + def credsRepo = new MapBackedAccountCredentialsRepository() + def accountDefRepo = new MapBackedAccountDefinitionRepository() + def credsProvider = new DefaultAccountCredentialsProvider(credsRepo) + def accountDefSrvc = Optional.of(new DefaultAccountDefinitionService(accountDefRepo)) + accountDefRepo.save(new ManagedAccount("test1", "acctType1")) + accountDefRepo.save(new ManagedAccount("test2", "acctType1")) + accountDefRepo.save(new ManagedAccount("test3", "acctType1")) + accountDefRepo.save(new ManagedAccount("test4", "acctType1")) + def mvc = MockMvcBuilders.standaloneSetup(new CredentialsController(accountDefSrvc, new CredentialsConfiguration(), objectMapper, credsProvider)).build() + + // path param: + def acctType = "acctType1" + // query param: + def limit = 2 + + when: + def result = mvc.perform(MockMvcRequestBuilders.get("/credentials/type/${acctType}") + .param("limit", "${limit}").accept(MediaType.APPLICATION_JSON)).andReturn() + + then: + result.response.status == 200 + + List parsedResponse = new JsonSlurper().parseText(result.response.contentAsString) as List + + parsedResponse.size() == limit + parsedResponse.every { acct -> acct.accountType == acctType } + } + + /** + * Test to verify the use of the type (path), + * limit (query) and startingAccountName (query) parameters + */ + void "credentials are listed by type, startingAccountName and with limit"() { + setup: + + def objectMapper = new ObjectMapper().setSerializationInclusion(JsonInclude.Include.NON_NULL) + def credsRepo = new MapBackedAccountCredentialsRepository() + def accountDefRepo = new MapBackedAccountDefinitionRepository() + def credsProvider = new DefaultAccountCredentialsProvider(credsRepo) + def accountDefSrvc = Optional.of(new DefaultAccountDefinitionService(accountDefRepo)) + accountDefRepo.save(new ManagedAccount("foo", "acctType1")) + accountDefRepo.save(new ManagedAccount("bar1", "acctType1")) + accountDefRepo.save(new ManagedAccount("bar2", "acctType1")) + accountDefRepo.save(new ManagedAccount("baz1", "acctType1")) + accountDefRepo.save(new ManagedAccount("baz2", "acctType1")) + accountDefRepo.save(new ManagedAccount("test", "acctType1")) + def mvc = MockMvcBuilders.standaloneSetup(new CredentialsController(accountDefSrvc, new CredentialsConfiguration(), objectMapper, credsProvider)).build() + + // path param: + def acctType = "acctType1" + // query params: + def limit = 3 + def startingAccountName = "ba" + + when: + def result = mvc.perform(MockMvcRequestBuilders.get("/credentials/type/${acctType}") + .param("limit", "${limit}") + .param("startingAccountName", "${startingAccountName}") + .accept(MediaType.APPLICATION_JSON)).andReturn() + + then: + result.response.status == 200 + + List parsedResponse = new JsonSlurper().parseText(result.response.contentAsString) as List + + parsedResponse.size() == limit + parsedResponse.every { acct -> acct.name.startsWith(startingAccountName) } } - static class TestNamedAccountCredentials implements AccountCredentials { + static class TestNamedAccountCredentials extends AbstractAccountCredentials { String name = "test" String environment = "env" diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/DataControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/DataControllerSpec.groovy index 2ffa63ada03..e57cdda652b 100644 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/DataControllerSpec.groovy +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/DataControllerSpec.groovy @@ -17,25 +17,36 @@ package com.netflix.spinnaker.clouddriver.controllers import com.netflix.spinnaker.clouddriver.model.DataProvider -import com.netflix.spinnaker.security.AuthenticatedRequest -import org.slf4j.MDC +import com.netflix.spinnaker.kork.web.context.AuthenticatedRequestContextProvider +import com.netflix.spinnaker.kork.web.context.RequestContextProvider import org.springframework.security.access.AccessDeniedException +import spock.lang.Shared import spock.lang.Specification import spock.lang.Subject -import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletRequest class DataControllerSpec extends Specification { - def dataProvider = Mock(DataProvider) { - supportsIdentifier(_, _) >> { return true } - getAccountForIdentifier(_, _) >> { _, id -> return id } - } + + @Shared + Optional> dataProviders @Subject - def dataController = new DataController(dataProviders: [dataProvider]) + def dataController = new DataController(dataProviders) + + RequestContextProvider contextProvider = new AuthenticatedRequestContextProvider() + + void setupSpec() { + DataProvider dataProvider = Mock(DataProvider) { + supportsIdentifier(_ as DataProvider.IdentifierType, _ as String) >> { return true } + getAccountForIdentifier(_ as DataProvider.IdentifierType, _ as String) >> { _, id -> return id } + } + + dataProviders = Optional.of([dataProvider]) + } void setup() { - MDC.remove(AuthenticatedRequest.SPINNAKER_ACCOUNTS) + contextProvider.get().setAccounts(null as String) } @@ -47,14 +58,14 @@ class DataControllerSpec extends Specification { thrown(AccessDeniedException) when: - MDC.put(AuthenticatedRequest.SPINNAKER_ACCOUNTS, "restricted") + contextProvider.get().setAccounts("restricted") dataController.getStaticData("restricted", [:]) then: notThrown(AccessDeniedException) } - def "should verify access to account when fetching adhoc data"() { + def "should deny when fetching adhoc data with no accounts"() { given: def httpServletRequest = Mock(HttpServletRequest) @@ -63,14 +74,28 @@ class DataControllerSpec extends Specification { then: thrown(AccessDeniedException) + } + + def "should allow access to account when fetching adhoc data with correct account"() { + given: + def httpServletRequest = Mock(HttpServletRequest) + contextProvider.get().setAccounts("restricted") when: - MDC.put(AuthenticatedRequest.SPINNAKER_ACCOUNTS, "restricted") dataController.getAdhocData("groupId", "restricted", httpServletRequest) then: - 1 * httpServletRequest.getAttribute(_) >> { return "pattern" } - 1 * httpServletRequest.getServletPath() >> { return "/servlet/path" } + httpServletRequest.getAttribute(_ as String) >> { return "pattern" } + httpServletRequest.getServletPath() >> { return "/servlet/path" } notThrown(AccessDeniedException) } + + // If the wrong slf4j is on the classpath, this fails. So leaving this test in here for sanity. + def "request context works"() { + given: + contextProvider.get().setAccounts("restricted") + + expect: + "restricted".equals(contextProvider.get().getAccounts().get()) + } } diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/FeaturesControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/FeaturesControllerSpec.groovy index ebef2de701d..3e01adc1699 100644 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/FeaturesControllerSpec.groovy +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/FeaturesControllerSpec.groovy @@ -17,9 +17,18 @@ package com.netflix.spinnaker.clouddriver.controllers import com.fasterxml.jackson.databind.ObjectMapper -import com.netflix.spinnaker.clouddriver.aws.deploy.converters.UpsertAmazonLoadBalancerAtomicOperationConverter import com.netflix.spinnaker.clouddriver.elasticsearch.converters.UpsertEntityTagsAtomicOperationConverter -import spock.lang.Specification; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations +import com.netflix.spinnaker.orchestration.OperationDescription +import org.springframework.stereotype.Component +import spock.lang.Specification + +import java.lang.annotation.ElementType +import java.lang.annotation.Retention +import java.lang.annotation.RetentionPolicy +import java.lang.annotation.Target class FeaturesControllerSpec extends Specification { def objectMapper = new ObjectMapper(); @@ -29,7 +38,7 @@ class FeaturesControllerSpec extends Specification { def controller = new FeaturesController( atomicOperationConverters: [ new UpsertEntityTagsAtomicOperationConverter(objectMapper, null, null, null, null), // @Component - new UpsertAmazonLoadBalancerAtomicOperationConverter() // @AmazonOperation and @Component + new UpsertMyCloudLoadBalancerAtomicOperationConverter() // @MyCloudOperation and @Component ] ) @@ -42,4 +51,24 @@ class FeaturesControllerSpec extends Specification { [name: "upsertLoadBalancer", enabled: true] ] } + + @MyCloudOperation(AtomicOperations.UPSERT_LOAD_BALANCER) + @Component("upsertMyCloudLoadBalancerDescription") + class UpsertMyCloudLoadBalancerAtomicOperationConverter implements AtomicOperationConverter { + @Override + AtomicOperation convertOperation(Map input) { + throw new UnsupportedOperationException() + } + + @Override + OperationDescription convertDescription(Map input) { + throw new UnsupportedOperationException() + } + } +} + +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +@interface MyCloudOperation { + String value() } diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/OperationsControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/OperationsControllerSpec.groovy deleted file mode 100644 index db505b7d394..00000000000 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/OperationsControllerSpec.groovy +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2014 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers - -import com.netflix.spinnaker.clouddriver.aws.deploy.converters.BasicAmazonDeployAtomicOperationConverter -import com.netflix.spinnaker.clouddriver.aws.deploy.description.BasicAmazonDeployDescription -import com.netflix.spinnaker.clouddriver.data.task.Task -import com.netflix.spinnaker.clouddriver.google.deploy.description.BasicGoogleDeployDescription -import com.netflix.spinnaker.clouddriver.orchestration.AnnotationsBasedAtomicOperationsRegistry -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter -import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationDescriptionPreProcessor -import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor -import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig -import org.springframework.context.annotation.AnnotationConfigApplicationContext -import org.springframework.context.annotation.Bean -import org.springframework.context.annotation.Configuration -import org.springframework.http.MediaType -import org.springframework.test.web.servlet.request.MockMvcRequestBuilders -import org.springframework.test.web.servlet.setup.MockMvcBuilders -import spock.lang.Shared -import spock.lang.Specification -import spock.lang.Unroll - -class OperationsControllerSpec extends Specification { - - void "controller takes many operation descriptions, resolves them from the spring context, and executes them in order"() { - setup: - """ - AtomicOperationConverter beans must be registered in the application context, with the bean name that corresponds to the key - that is describing them in the request. For example, a description that looks like this: - { "desc1": {} } - will go to the Spring context for a bean named "desc1", and will call the "convertOperation" method on it, with the description as input. - """ - OrchestrationProcessor orchestrationProcessor = Mock(OrchestrationProcessor) - def mvc = MockMvcBuilders.standaloneSetup( - new OperationsController( - orchestrationProcessor: orchestrationProcessor, - atomicOperationsRegistry: new AnnotationsBasedAtomicOperationsRegistry( - applicationContext: new AnnotationConfigApplicationContext(TestConfig), - cloudProviders: [] - ), - opsSecurityConfigProps: new SecurityConfig.OperationsSecurityConfigurationProperties() - )).build() - - when: - mvc.perform(MockMvcRequestBuilders.post("/ops").contentType(MediaType.APPLICATION_JSON).content('[ { "desc1": {}, "desc2": {} } ]')).andReturn() - - then: - "Operations were supplied IN ORDER to the orchestration processor." - 1 * orchestrationProcessor.process(*_) >> { - // The need for this flatten is weird -- seems like a bug in spock. - assert it?.flatten()*.getClass() == [Op1, Op2, String] - Mock(Task) - } - } - - @Shared - def googlePreProcessor = new AtomicOperationDescriptionPreProcessor() { - @Override - boolean supports(Class descriptionClass) { - return descriptionClass == BasicGoogleDeployDescription - } - - @Override - Map process(Map description) { - return ["google": "true"] - } - } - - @Shared - def amazonPreProcessor = new AtomicOperationDescriptionPreProcessor() { - @Override - boolean supports(Class descriptionClass) { - return descriptionClass == BasicAmazonDeployDescription - } - - @Override - Map process(Map description) { - return new HashMap(description) + [ - "additionalKey": "additionalVal", - "amazon" : "true" - ] - } - } - - @Unroll - void "should only pre-process inputs of supported description classes"() { - when: - def output = OperationsController.processDescriptionInput( - descriptionPreProcessors as Collection, - converter, - descriptionInput - ) - - then: - output == expectedOutput - - where: - descriptionPreProcessors | converter | descriptionInput || expectedOutput - [] | new BasicAmazonDeployAtomicOperationConverter() | ["a": "b"] || ["a": "b"] - [googlePreProcessor] | new BasicAmazonDeployAtomicOperationConverter() | ["a": "b"] || ["a": "b"] - [googlePreProcessor, amazonPreProcessor] | new BasicAmazonDeployAtomicOperationConverter() | ["amazon": "false"] || ["additionalKey": "additionalVal", "amazon": "true"] - } - - @Configuration - static class TestConfig { - @Bean - Converter1 desc1() { - new Converter1() - } - - @Bean - Converter2 desc2() { - new Converter2() - } - } - - static class Op1 implements AtomicOperation { - Object operate(List priorOutputs) { - return null - } - } - - static class Op2 implements AtomicOperation { - Object operate(List priorOutputs) { - return null - } - } - - static class Converter1 implements AtomicOperationConverter { - AtomicOperation convertOperation(Map input) { - new Op1() - } - - Object convertDescription(Map input) { - return null - } - } - - static class Converter2 implements AtomicOperationConverter { - AtomicOperation convertOperation(Map input) { - new Op2() - } - - Object convertDescription(Map input) { - return null - } - } -} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ProjectControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ProjectControllerSpec.groovy deleted file mode 100644 index 9039eff4792..00000000000 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/ProjectControllerSpec.groovy +++ /dev/null @@ -1,478 +0,0 @@ -/* - * Copyright 2015 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.controllers - -import com.netflix.spinnaker.clouddriver.configuration.ThreadPoolConfiguration -import com.netflix.spinnaker.clouddriver.core.services.Front50Service -import com.netflix.spinnaker.clouddriver.model.Cluster -import com.netflix.spinnaker.clouddriver.model.ClusterProvider -import com.netflix.spinnaker.clouddriver.model.LoadBalancer -import com.netflix.spinnaker.clouddriver.model.ServerGroup -import com.netflix.spinnaker.kork.web.exceptions.NotFoundException -import retrofit.RetrofitError -import spock.lang.Shared -import spock.lang.Specification - -import com.netflix.spinnaker.clouddriver.model.ServerGroup.InstanceCounts as InstanceCounts - -class ProjectControllerSpec extends Specification { - - @Shared - ProjectController projectController - - @Shared - Front50Service front50Service - - @Shared - ClusterProvider clusterProvider - - @Shared - String projectName - - @Shared - Map projectConfig - - def setup() { - projectController = new ProjectController(new ThreadPoolConfiguration()) - - front50Service = Mock(Front50Service) - projectController.front50Service = front50Service - - clusterProvider = Mock(ClusterProvider) - projectController.clusterProviders = [clusterProvider] - - projectName = "Spinnaker" - - projectConfig = [ - config: [ - applications: ["orca", "deck"], - clusters : [] - ] - ] - } - - void "throws ProjectNotFoundException when project config read fails"() { - setup: - projectName = "Spinnakers" - - when: - projectController.getClusters(projectName) - - then: - 1 * front50Service.getProject(projectName) >> { throw new RetrofitError("a", null, null, null, null, null, null) } - thrown NotFoundException - } - - void "returns an empty list without trying to retrieve applications when no clusters are configured"() { - when: - def clusters = projectController.getClusters(projectName) - - then: - clusters == [] - 1 * front50Service.getProject(projectName) >> projectConfig - 0 * _ - } - - void "builds the very specific model we probably want for the project dashboard"() { - projectConfig.config.clusters = [ - [account: "prod", stack: "main"] - ] - - when: - def clusters = projectController.getClusters(projectName) - - then: - clusters.size() == 1 - clusters[0].account == "prod" - clusters[0].stack == "main" - clusters[0].detail == null - - clusters[0].applications[0].application == "orca" - clusters[0].applications[0].lastPush == 2L - clusters[0].applications[0].clusters[0].region == "us-east-1" - clusters[0].applications[0].clusters[0].lastPush == 2L - clusters[0].applications[0].clusters[0].instanceCounts.total == 1 - clusters[0].applications[0].clusters[0].instanceCounts.up == 1 - - clusters[0].applications[1].application == "deck" - clusters[0].applications[1].lastPush == 1L - clusters[0].applications[1].clusters[0].region == "us-west-1" - clusters[0].applications[1].clusters[0].lastPush == 1L - clusters[0].applications[1].clusters[0].instanceCounts.total == 2 - clusters[0].applications[1].clusters[0].instanceCounts.down == 1 - clusters[0].applications[1].clusters[0].instanceCounts.up == 1 - - 1 * front50Service.getProject(projectName) >> projectConfig - 1 * clusterProvider.getClusterDetails("orca") >> [ - prod: [new TestCluster( - name: "orca-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 2L, new InstanceCounts(total: 1, up: 1)) - ] - )] - ] - 1 * clusterProvider.getClusterDetails("deck") >> [ - prod: [new TestCluster( - name: "deck-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "deck-main-v001", "us-west-1", 31, 1L, new InstanceCounts(total: 2, up: 1, down: 1)) - ] - )] - ] - } - - void "includes all applications if none specified for a cluster"() { - given: - projectConfig.config.clusters = [ - [account: "prod", stack: "main"] - ] - - when: - def clusters = projectController.getClusters(projectName) - - then: - clusters.size() == 1 - clusters[0].applications.application == ["orca", "deck"] - 1 * front50Service.getProject(projectName) >> projectConfig - 1 * clusterProvider.getClusterDetails("orca") >> [ - prod: [new TestCluster( - name: "orca-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 1L, new InstanceCounts(total: 1, up: 1)) - ] - )] - ] - 1 * clusterProvider.getClusterDetails("deck") >> [ - prod: [new TestCluster( - name: "deck-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "deck-main-v001", "us-west-1", 31, 1L, new InstanceCounts(total: 2, up: 1, down: 1)) - ] - )] - ] - } - - void "only returns specified applications if declared in cluster config"() { - projectConfig.config.clusters = [ - [account: "prod", stack: "main", applications: ["deck"]] - ] - - when: - def clusters = projectController.getClusters(projectName) - - then: - clusters.size() == 1 - clusters[0].applications.application == ["deck"] - 1 * front50Service.getProject(projectName) >> projectConfig - 1 * clusterProvider.getClusterDetails("orca") >> [ - prod: [new TestCluster( - name: "orca-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 1L, new InstanceCounts(total: 1, up: 1)) - ] - )] - ] - 1 * clusterProvider.getClusterDetails("deck") >> [ - prod: [new TestCluster( - name: "deck-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "deck-main-v001", "us-west-1", 31, 1L, new InstanceCounts(total: 2, up: 1, down: 1)) - ] - )] - ] - } - - void "includes all clusters on stack wildcard"() { - projectConfig.config.clusters = [ - [account: "prod", stack: "*", applications: ["orca"]] - ] - - when: - def clusters = projectController.getClusters(projectName) - - then: - clusters.size() == 1 - clusters[0].applications.application == ["orca"] - clusters[0].applications[0].lastPush == 5L - clusters[0].applications[0].clusters.size() == 2 - clusters[0].instanceCounts.total == 2 - clusters[0].instanceCounts.up == 2 - clusters[0].instanceCounts.starting == 0 - - 1 * front50Service.getProject(projectName) >> projectConfig - 1 * clusterProvider.getClusterDetails("orca") >> [ - prod: [ - new TestCluster( - name: "orca-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 1L, new InstanceCounts(total: 1, up: 1)) - ]), - new TestCluster( - name: "orca-test", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-test-v001", "us-west-1", 3, 5L, new InstanceCounts(total: 1, up: 1)) - ]), - new TestCluster( - name: "orca--foo", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca--foo-v001", "us-west-1", 3, 3L, new InstanceCounts(total: 1, starting: 1)) - ]), - ] - ] - } - - void "includes all clusters on detail wildcard"() { - projectConfig.config.clusters = [ - [account: "prod", detail: "*", applications: ["orca"]] - ] - - when: - def clusters = projectController.getClusters(projectName) - - then: - clusters.size() == 1 - clusters[0].applications.application == ["orca"] - clusters[0].applications[0].lastPush == 5L - clusters[0].applications[0].clusters.size() == 2 - clusters[0].instanceCounts.total == 2 - clusters[0].instanceCounts.up == 2 - clusters[0].instanceCounts.starting == 0 - - 1 * front50Service.getProject(projectName) >> projectConfig - 1 * clusterProvider.getClusterDetails("orca") >> [ - prod: [ - new TestCluster( - name: "orca--foo", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca--foo-v001", "us-east-1", 3, 1L, new InstanceCounts(total: 1, up: 1)) - ]), - new TestCluster( - name: "orca--bar", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca--bar-v001", "us-west-1", 3, 5L, new InstanceCounts(total: 1, up: 1)) - ]), - new TestCluster( - name: "orca-foo", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-foo-v001", "us-west-1", 3, 3L, new InstanceCounts(total: 1, starting: 1)) - ]), - ] - ] - } - - void "excludes disabled server groups"() { - projectConfig.config.clusters = [ - [account: "prod", stack: "main", applications: ["orca"]] - ] - - TestServerGroup disabledServerGroup = makeServerGroup("prod", "orca-main-v003", "us-east-1", 5, 5L, new InstanceCounts(total: 1, up: 1)) - disabledServerGroup.disabled = true - - when: - def clusters = projectController.getClusters(projectName) - - then: - clusters.size() == 1 - clusters[0].applications.application == ["orca"] - clusters[0].applications[0].lastPush == 4L - clusters[0].applications[0].clusters.size() == 1 - clusters[0].instanceCounts.total == 2 - clusters[0].instanceCounts.up == 2 - - 1 * front50Service.getProject(projectName) >> projectConfig - 1 * clusterProvider.getClusterDetails("orca") >> [ - prod: [ - new TestCluster( - name: "orca-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-v001", "us-east-1", 3, 1L, new InstanceCounts(total: 1, up: 1)), - makeServerGroup("prod", "orca-main-v002", "us-east-1", 4, 4L, new InstanceCounts(total: 1, up: 1)), - disabledServerGroup - ]), - ] - ] - } - - void "includes exactly matched clusters"() { - projectConfig.config.clusters = [ - [account: "prod", stack: "main", detail: "foo", applications: ["orca"]] - ] - - when: - def clusters = projectController.getClusters(projectName) - - then: - clusters.size() == 1 - clusters[0].applications.application == ["orca"] - clusters[0].applications[0].lastPush == 1L - clusters[0].applications[0].clusters.size() == 1 - clusters[0].instanceCounts.total == 1 - clusters[0].instanceCounts.up == 1 - - 1 * front50Service.getProject(projectName) >> projectConfig - 1 * clusterProvider.getClusterDetails("orca") >> [ - prod: [ - new TestCluster( - name: "orca-main-foo", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-foo-v001", "us-east-1", 3, 1L, new InstanceCounts(total: 1, up: 1)), - ]), - new TestCluster( - name: "orca-main-bar", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-bar-v002", "us-east-1", 4, 5L, new InstanceCounts(total: 1, up: 1)), - ]), - new TestCluster( - name: "orca-main", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-v002", "us-east-1", 4, 6L, new InstanceCounts(total: 1, up: 1)), - ]), - new TestCluster( - name: "orca--foo", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca--foo-v002", "us-east-1", 4, 7L, new InstanceCounts(total: 1, up: 1)), - ]), - ] - ] - } - - void "includes all builds per region with latest deployment date, ignoring disabled server groups"() { - given: - projectConfig.config.clusters = [ - [account: "prod", stack: "main", detail: "foo", applications: ["orca"]] - ] - def disabledServerGroup = makeServerGroup("prod", "orca-main-foo-v005", "us-west-1", 6, 7L, new InstanceCounts(total: 1, up: 1)) - disabledServerGroup.disabled = true - - when: - def clusters = projectController.getClusters(projectName) - def eastCluster = clusters[0].applications[0].clusters.find { it.region == "us-east-1"} - def westCluster = clusters[0].applications[0].clusters.find { it.region == "us-west-1"} - - then: - clusters.size() == 1 - clusters[0].applications.application == ["orca"] - clusters[0].applications[0].lastPush == 6L - clusters[0].applications[0].clusters.size() == 2 - - eastCluster.lastPush == 1L - eastCluster.builds.size() == 1 - eastCluster.builds[0].buildNumber == "3" - eastCluster.builds[0].deployed == 1L - - westCluster.lastPush == 6L - westCluster.builds.size() == 2 - westCluster.builds[0].buildNumber == "4" - westCluster.builds[0].deployed == 2L - westCluster.builds[1].buildNumber == "5" - westCluster.builds[1].deployed == 6L - - clusters[0].instanceCounts.total == 4 - clusters[0].instanceCounts.up == 4 - - eastCluster.instanceCounts.total == 1 - eastCluster.instanceCounts.up == 1 - - westCluster.instanceCounts.total == 3 - westCluster.instanceCounts.up == 3 - - 1 * front50Service.getProject(projectName) >> projectConfig - 1 * clusterProvider.getClusterDetails("orca") >> [ - prod: [ - new TestCluster( - name: "orca-main-foo", - accountName: "prod", - serverGroups: [ - makeServerGroup("prod", "orca-main-foo-v001", "us-east-1", 3, 1L, new InstanceCounts(total: 1, up: 1)), - makeServerGroup("prod", "orca-main-foo-v003", "us-west-1", 4, 2L, new InstanceCounts(total: 1, up: 1)), - makeServerGroup("prod", "orca-main-foo-v004", "us-west-1", 5, 3L, new InstanceCounts(total: 1, up: 1)), - makeServerGroup("prod", "orca-main-foo-v005", "us-west-1", 5, 6L, new InstanceCounts(total: 1, up: 1)), - disabledServerGroup - ]) - ] - ] - } - - - TestServerGroup makeServerGroup(String account, String name, String region, Integer buildNumber, Long createdTime, InstanceCounts instanceCounts) { - new TestServerGroup( - name: name, - accountName: account, - region: region, - imageSummary: new TestImageSummary(buildInfo: [jenkins: [name: 'job', host: 'host', number: buildNumber.toString()]]), - createdTime: createdTime, - instanceCounts: instanceCounts, - ) - } - - static class TestImageSummary implements ServerGroup.ImageSummary { - String getServerGroupName() { null } - String getImageId() { null } - String getImageName() { null } - - Map getImage() { null } - - Map buildInfo - } - - static class TestServerGroup implements ServerGroup { - String name - String accountName - ServerGroup.ImageSummary imageSummary - ServerGroup.ImagesSummary imagesSummary - Long createdTime - InstanceCounts instanceCounts - String type = "test" - String cloudProvider = "test" - String region - Boolean disabled - Set instances = [] - Set loadBalancers - Set securityGroups - Map launchConfig - ServerGroup.Capacity capacity - Set zones - - Boolean isDisabled() { disabled } - } - - static class TestCluster implements Cluster { - String name - String type = "test" - String accountName - Set serverGroups - Set loadBalancers - } - -} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/SearchControllerSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/SearchControllerSpec.groovy index 7d7306c52ae..77b5a6b0367 100644 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/SearchControllerSpec.groovy +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/SearchControllerSpec.groovy @@ -16,6 +16,10 @@ package com.netflix.spinnaker.clouddriver.controllers +import com.netflix.spinnaker.clouddriver.core.services.Front50Service +import com.netflix.spinnaker.clouddriver.search.ApplicationSearchProvider +import com.netflix.spinnaker.clouddriver.search.NoopSearchProvider +import com.netflix.spinnaker.clouddriver.search.ProjectSearchProvider import com.netflix.spinnaker.clouddriver.search.SearchProvider import com.netflix.spinnaker.clouddriver.search.SearchResultSet import spock.lang.Specification @@ -24,9 +28,9 @@ import javax.servlet.http.HttpServletRequest class SearchControllerSpec extends Specification { - SearchController searchController - SearchProvider searchProviderA - SearchProvider searchProviderB + SearchProvider searchProviderA = Mock(SearchProvider) + SearchProvider searchProviderB = Mock(SearchProvider) + SearchController searchController = new SearchController(searchProviders: [searchProviderA, searchProviderB]) HttpServletRequest request = Mock(HttpServletRequest) Enumeration enumeration = Mock(Enumeration) @@ -41,9 +45,6 @@ class SearchControllerSpec extends Specification { } def setup() { - searchProviderA = Mock(SearchProvider) - searchProviderB = Mock(SearchProvider) - searchController = new SearchController(searchProviders: [searchProviderA, searchProviderB]) } def 'query all search providers'() { @@ -136,6 +137,38 @@ class SearchControllerSpec extends Specification { searchResultSets == [resultSetA] } + def 'search on type for which there is no search provider'() { + setup: + Front50Service front50Service = Mock(Front50Service) + ApplicationSearchProvider applicationSearchProvider = new ApplicationSearchProvider(front50Service) + ProjectSearchProvider projectSearchProvider = new ProjectSearchProvider(front50Service) + // none of the 3 search providers support the search type + searchController = new SearchController(searchProviders: [applicationSearchProvider, projectSearchProvider, new NoopSearchProvider()]) + def filters = [ + q: "aBC", + type: ['serverGroups'], + page: 1, + pageSize: 10 + ] + + when: + List searchResultSets = getSearchResults(filters) + + then: + 1 * request.getParameterNames() >> enumeration + + searchResultSets == [ + new SearchResultSet( + totalMatches: 0, + pageNumber: 1, + pageSize: 10, + platform: 'aws', + query: 'aBC', + results: [] + ) + ] + } + def "if only one search provider, don't aggregate into an aws result"() { SearchResultSet rsA = Stub(SearchResultSet) searchController = new SearchController(searchProviders: [searchProviderA]) diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/DefaultAccountDefinitionService.java b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/DefaultAccountDefinitionService.java new file mode 100644 index 00000000000..83a35b69c2e --- /dev/null +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/DefaultAccountDefinitionService.java @@ -0,0 +1,33 @@ +/* + * Copyright 2023 The original authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers.resources; + +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionRepository; +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionService; + +public class DefaultAccountDefinitionService extends AccountDefinitionService { + private final AccountDefinitionRepository repository; + + public DefaultAccountDefinitionService() { + this(new MapBackedAccountDefinitionRepository()); + } + + public DefaultAccountDefinitionService(AccountDefinitionRepository repository) { + super(repository, null, null, null, null); + this.repository = repository; + } +} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/ManagedAccount.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/ManagedAccount.groovy new file mode 100644 index 00000000000..ccb239bc0ea --- /dev/null +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/ManagedAccount.groovy @@ -0,0 +1,39 @@ +/* + * Copyright 2023 The original authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers.resources + +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition + +class ManagedAccount implements CredentialsDefinition { + + String name + String accountType + + ManagedAccount(String name, String accountType) { + this.name = name + this.accountType = accountType + } + + @Override + String getName() { + name + } + + String getAccountType() { + accountType + } +} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/MapBackedAccountDefinitionRepository.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/MapBackedAccountDefinitionRepository.groovy new file mode 100644 index 00000000000..c2a54f99e36 --- /dev/null +++ b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/controllers/resources/MapBackedAccountDefinitionRepository.groovy @@ -0,0 +1,106 @@ +/* + * Copyright 2023 The original authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers.resources + +import com.netflix.spinnaker.clouddriver.security.AccountDefinitionRepository +import com.netflix.spinnaker.credentials.definition.CredentialsDefinition +import groovy.util.logging.Slf4j + +import javax.annotation.Nullable +import java.util.concurrent.ConcurrentHashMap + +/** + * An in-memory repository of {@link CredentialsDefinition} objects. + */ +@Slf4j +public class MapBackedAccountDefinitionRepository implements AccountDefinitionRepository { + private final Map map = new ConcurrentHashMap<>(); + + /** + * {@inheritDoc} + */ + @Override + public void delete(String name) { + map.remove(name) + } + + /** + * {@inheritDoc} + */ + @Override + CredentialsDefinition getByName(String name) { + return map.get(name) + } + + /** + * {@inheritDoc} + */ + @Override + List listByType(String typeName, int limit, @Nullable String startingAccountName) { + def list = listByType(typeName) + + if (startingAccountName != null) { + list = list.findAll { acct -> acct.name.startsWith(startingAccountName) } + } + + if (limit < list.size()) { + list = list.subList(0, limit) + } + + return list + } + + /** + * {@inheritDoc} + */ + @Override + List listByType(String typeName) { + return new ArrayList<>(map.findAll{_, definition -> definition.accountType == typeName}.values()) + } + + /** + * {@inheritDoc} + */ + @Override + void create(CredentialsDefinition definition) { + map.put(definition.getName(), definition) + } + + /** + * {@inheritDoc} + */ + @Override + void save(CredentialsDefinition definition) { + map.put(definition.getName(), definition) + } + + /** + * {@inheritDoc} + */ + @Override + void update(CredentialsDefinition definition) { + map.put(definition.getName(), definition) + } + + /** + * {@inheritDoc} + */ + @Override + List revisionHistory(String name) { + return List.of() + } +} diff --git a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueueSpec.groovy b/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueueSpec.groovy deleted file mode 100644 index b0a9af59ce0..00000000000 --- a/clouddriver-web/src/test/groovy/com/netflix/spinnaker/clouddriver/requestqueue/pooled/PooledRequestQueueSpec.groovy +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.spinnaker.clouddriver.requestqueue.pooled - -import com.netflix.spectator.api.NoopRegistry -import spock.lang.Specification - -import java.util.concurrent.Callable -import java.util.concurrent.CountDownLatch -import java.util.concurrent.atomic.AtomicBoolean - -class PooledRequestQueueSpec extends Specification { - def "should execute requests"() { - given: - def queue = new PooledRequestQueue(new NoopRegistry(), 1000, 1000, 1) - - when: - Long result = queue.execute("foo", { return 12345L }) - - then: - result == 12345L - } - - def "should time out if request does not complete"() { - given: - def queue = new PooledRequestQueue(new NoopRegistry(), 5000, 10, 1) - - when: - queue.execute("foo", { Thread.sleep(20); return 12345L }) - - then: - thrown(PromiseTimeoutException) - } - - def "should time out if request does not start in time"() { - given: "a queue with one worker thread" - def queue = new PooledRequestQueue(new NoopRegistry(), 10, 10, 1) - AtomicBoolean itRan = new AtomicBoolean(false) - Callable didItRun = { - itRan.set(true) - } - - when: "we start up a thread that blocks the pool" - def latch = new CountDownLatch(1) - Callable jerkThread = { - latch.countDown() - Thread.sleep(40) - } - - Thread.start { - try { - queue.execute("foo", jerkThread) - } catch (PromiseTimeoutException) { - //expected - } - } - - and: "try to start another" - latch.await() - queue.execute("foo", didItRun) - - then: "the second work is never started" - thrown(PromiseNotStartedException) - !itRan.get() - } -} diff --git a/clouddriver-web/src/test/java/com/netflix/spinnaker/clouddriver/config/ObjectMapperTest.java b/clouddriver-web/src/test/java/com/netflix/spinnaker/clouddriver/config/ObjectMapperTest.java new file mode 100644 index 00000000000..b66d0c63a2a --- /dev/null +++ b/clouddriver-web/src/test/java/com/netflix/spinnaker/clouddriver/config/ObjectMapperTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2022 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.config; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.clouddriver.Main; +import com.netflix.spinnaker.clouddriver.kubernetes.model.KubernetesJobStatus; +import io.kubernetes.client.openapi.models.V1Job; +import io.kubernetes.client.openapi.models.V1ObjectMeta; +import io.kubernetes.client.openapi.models.V1Pod; +import io.kubernetes.client.openapi.models.V1PodCondition; +import io.kubernetes.client.openapi.models.V1PodStatus; +import java.time.OffsetDateTime; +import java.util.List; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringExtension; +import org.springframework.test.context.web.WebAppConfiguration; + +@ExtendWith(SpringExtension.class) +@WebAppConfiguration +@SpringBootTest(classes = Main.class) +@TestPropertySource( + properties = { + "redis.enabled = false", + "sql.enabled = false", + "spring.application.name = clouddriver" + }) +public class ObjectMapperTest { + + @Autowired private ObjectMapper objectMapper; + + @Test + public void testJodaTimeSerializationForKubernetesJob() { + V1Job job = new V1Job(); + V1ObjectMeta metadata = new V1ObjectMeta(); + metadata.setCreationTimestamp(OffsetDateTime.now()); + job.setMetadata(metadata); + KubernetesJobStatus kubernetesJobStatus = new KubernetesJobStatus(job, "kubernetesAccount"); + + V1Pod pod = new V1Pod(); + V1PodStatus status = new V1PodStatus(); + V1PodCondition condition = new V1PodCondition(); + condition.setLastTransitionTime(OffsetDateTime.now()); + status.setConditions(List.of(condition)); + pod.setStatus(status); + V1ObjectMeta metadataPod = new V1ObjectMeta(); + metadataPod.setName("podName"); + pod.setMetadata(metadata); + + KubernetesJobStatus.PodStatus podStatus = new KubernetesJobStatus.PodStatus(pod); + kubernetesJobStatus.setPods(List.of(podStatus)); + + assertDoesNotThrow(() -> objectMapper.writeValueAsString(kubernetesJobStatus)); + } +} diff --git a/clouddriver-web/src/test/java/com/netflix/spinnaker/clouddriver/controllers/KubernetesCustomPropertyBindingRefreshTest.java b/clouddriver-web/src/test/java/com/netflix/spinnaker/clouddriver/controllers/KubernetesCustomPropertyBindingRefreshTest.java new file mode 100644 index 00000000000..f1027c765e9 --- /dev/null +++ b/clouddriver-web/src/test/java/com/netflix/spinnaker/clouddriver/controllers/KubernetesCustomPropertyBindingRefreshTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2023 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.controllers; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.verify; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; + +import com.netflix.spinnaker.clouddriver.Main; +import com.netflix.spinnaker.clouddriver.listeners.ConfigurationRefreshListener; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.mock.mockito.SpyBean; +import org.springframework.cloud.context.scope.refresh.RefreshScopeRefreshedEvent; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.web.WebAppConfiguration; +import org.springframework.test.web.servlet.MockMvc; +import org.springframework.test.web.servlet.setup.MockMvcBuilders; +import org.springframework.web.context.WebApplicationContext; + +@WebAppConfiguration +@SpringBootTest(classes = {Main.class}) +@TestPropertySource( + properties = { + "redis.enabled = false", + "sql.enabled = false", + "spring.application.name = clouddriver", + "kubernetes.enabled = true", + "management.endpoints.web.exposure.include = refresh", + "kubernetes.customPropertyBindingEnabled = true", + "spring.cloud.bootstrap.enabled = true", + "spring.cloud.config.server.bootstrap = true", + "spring.profiles.active = native", + "spring.cloud.config.server.native.search-locations = classpath:/" + }) +public class KubernetesCustomPropertyBindingRefreshTest { + private MockMvc mockMvc; + + @Autowired private WebApplicationContext webApplicationContext; + + @SpyBean private ConfigurationRefreshListener listener; + + @BeforeEach + void setup(TestInfo testInfo) { + System.out.println("--------------- Test " + testInfo.getDisplayName()); + mockMvc = MockMvcBuilders.webAppContextSetup(webApplicationContext).build(); + } + + @Test + public void testRefreshScopeRefreshedEvent() throws Exception { + mockMvc.perform(post("/refresh")).andExpect(status().isOk()); + + verify(listener).onApplicationEvent(any(RefreshScopeRefreshedEvent.class)); + } +} diff --git a/clouddriver-web/src/test/resources/application.properties b/clouddriver-web/src/test/resources/application.properties new file mode 100644 index 00000000000..f3b97762a34 --- /dev/null +++ b/clouddriver-web/src/test/resources/application.properties @@ -0,0 +1,2 @@ +services.fiat.baseUrl=https://fiat.net +services.front50.baseUrl=https://front50.net diff --git a/clouddriver-web/src/test/resources/cf-deploy-domains.json b/clouddriver-web/src/test/resources/cf-deploy-domains.json deleted file mode 100644 index f91a770aeec..00000000000 --- a/clouddriver-web/src/test/resources/cf-deploy-domains.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "cloudFoundryDeployDescription": { - "application": "my-neat-app", - "artifact": "cool-app.jar", - "memory": "512", - "credentials": "me@example.com", - "api": "https://api.whatever.com", - "org": "FrameworksAndRuntimes", - "space": "development", - "domains": ["example.com", "example.de"] - } - } -] \ No newline at end of file diff --git a/clouddriver-web/src/test/resources/cf-deploy-instances.json b/clouddriver-web/src/test/resources/cf-deploy-instances.json deleted file mode 100644 index a504fbc70e8..00000000000 --- a/clouddriver-web/src/test/resources/cf-deploy-instances.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "cloudFoundryDeployDescription": { - "application": "my-neat-app", - "artifact": "cool-app.jar", - "memory": "512", - "credentials": "me@example.com", - "api": "https://api.whatever.com", - "org": "FrameworksAndRuntimes", - "space": "development", - "instances": "5" - } - } -] \ No newline at end of file diff --git a/clouddriver-web/src/test/resources/cf-deploy-urls.json b/clouddriver-web/src/test/resources/cf-deploy-urls.json deleted file mode 100644 index ecdd912a7cc..00000000000 --- a/clouddriver-web/src/test/resources/cf-deploy-urls.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "cloudFoundryDeployDescription": { - "application": "my-neat-app", - "artifact": "cool-app.jar", - "memory": "512", - "credentials": "me@example.com", - "api": "https://api.whatever.com", - "org": "FrameworksAndRuntimes", - "space": "development", - "urls": ["my-neat-app-blue", "my-neat-app-green"] - } - } -] \ No newline at end of file diff --git a/clouddriver-web/src/test/resources/cf-deploy.json b/clouddriver-web/src/test/resources/cf-deploy.json deleted file mode 100644 index a8be9e7189f..00000000000 --- a/clouddriver-web/src/test/resources/cf-deploy.json +++ /dev/null @@ -1,13 +0,0 @@ -[ - { - "cloudFoundryDeployDescription": { - "application": "my-neat-app", - "artifact": "cool-app.jar", - "memory": "512", - "credentials": "me@example.com", - "api": "https://api.whatever.com", - "org": "FrameworksAndRuntimes", - "space": "development" - } - } -] \ No newline at end of file diff --git a/clouddriver-web/src/test/resources/clouddriver.yml b/clouddriver-web/src/test/resources/clouddriver.yml new file mode 100644 index 00000000000..0c8d67d2c70 --- /dev/null +++ b/clouddriver-web/src/test/resources/clouddriver.yml @@ -0,0 +1,5 @@ +kubernetes: + enabled: true + accounts: + - name: my-k8s-account + cacheIntervalSeconds: 60 diff --git a/clouddriver-web/src/test/resources/cool-app.jar b/clouddriver-web/src/test/resources/cool-app.jar deleted file mode 100644 index 04f16a4b165..00000000000 Binary files a/clouddriver-web/src/test/resources/cool-app.jar and /dev/null differ diff --git a/clouddriver-web/src/test/resources/logback.xml b/clouddriver-web/src/test/resources/logback.xml index 9e4d079929b..ff6cb007ddc 100644 --- a/clouddriver-web/src/test/resources/logback.xml +++ b/clouddriver-web/src/test/resources/logback.xml @@ -1,4 +1,28 @@ - + + - - \ No newline at end of file + + + %d{yyyy-MM-dd HH:mm:ss.SSS} %5p ${PID:- } --- [%15.15t] %-40.40logger{39} : [%X{X-SPINNAKER-USER}] %m%n + + + + + + + + diff --git a/clouddriver-yandex/clouddriver-yandex.gradle b/clouddriver-yandex/clouddriver-yandex.gradle new file mode 100644 index 00000000000..f404306f9a3 --- /dev/null +++ b/clouddriver-yandex/clouddriver-yandex.gradle @@ -0,0 +1,51 @@ +plugins { + id("net.ltgt.errorprone") version "4.0.0" +} + +dependencies { + errorprone("com.google.errorprone:error_prone_core:2.28.0") + implementation project(":cats:cats-core") + implementation project(":clouddriver-api") + implementation project(":clouddriver-core") + implementation project(":clouddriver-security") + + //exclude BC -jdk15on libraries to remove CVE-2023-33201. *-jdk18on libraries are already present in the classpath + implementation ('com.yandex.cloud:java-sdk-services:2.1.1'){ + exclude group: "org.bouncycastle", module: "bcpkix-jdk15on" + exclude group: "org.bouncycastle", module: "bcprov-jdk15on" + } + compileOnly "io.opencensus:opencensus-api" + compileOnly "io.opencensus:opencensus-contrib-grpc-metrics" + implementation "org.apache.groovy:groovy-datetime" + implementation "org.apache.commons:commons-lang3" + implementation "com.netflix.frigga:frigga" + implementation "com.netflix.spectator:spectator-api" + implementation "io.spinnaker.kork:kork-artifacts" + implementation "io.spinnaker.kork:kork-config" + implementation "io.spinnaker.kork:kork-moniker" + implementation "com.squareup.retrofit:retrofit" + implementation "org.springframework.boot:spring-boot-actuator" + implementation "org.springframework.boot:spring-boot-starter-web" + implementation "com.google.protobuf:protobuf-java" + implementation "com.google.protobuf:protobuf-java-util" + + testImplementation project(":clouddriver-web") + testImplementation "cglib:cglib-nodep" + testImplementation "commons-fileupload:commons-fileupload:1.4" + testImplementation "org.apache.httpcomponents:httpmime" + testImplementation "org.assertj:assertj-core" + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.mockito:mockito-core" + testImplementation "org.mockito:mockito-junit-jupiter" + testImplementation "org.objenesis:objenesis" + testImplementation "org.spockframework:spock-core" + testImplementation "org.spockframework:spock-spring" + testImplementation "org.springframework:spring-test" + testImplementation "org.springframework.boot:spring-boot-test" + testImplementation "org.springframework.boot:spring-boot-starter-test" +} + +configurations.all { + resolutionStrategy.force 'io.opencensus:opencensus-api:0.21.0' + resolutionStrategy.force 'io.opencensus:opencensus-contrib-grpc-metrics:0.21.0' +} diff --git a/clouddriver-yandex/lombok.config b/clouddriver-yandex/lombok.config new file mode 100644 index 00000000000..d5306c0c526 --- /dev/null +++ b/clouddriver-yandex/lombok.config @@ -0,0 +1 @@ +lombok.accessors.chain = false diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/CacheResultBuilder.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/CacheResultBuilder.java new file mode 100644 index 00000000000..e3f3253013b --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/CacheResultBuilder.java @@ -0,0 +1,121 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex; + +import static java.util.stream.Collectors.toMap; +import static java.util.stream.Collectors.toSet; + +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.AgentDataType.Authority; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import java.util.*; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.RequiredArgsConstructor; + +@Data +public class CacheResultBuilder { + private Long startTime; + private CacheMutation onDemand = new CacheMutation(); + private Set authoritativeTypes = ImmutableSet.of(); + private Map namespaceBuilders = new HashMap<>(); + + public CacheResultBuilder() {} + + /** + * Create a CacheResultBuilder for the given dataTypes. + * + *

Any authoritative types in dataTypes are guaranteed to be listed in the output. If you say + * you are authoritative for "clusters", but don't include any data under that namespace, an empty + * list will be included in the result. (Whereas if you don't pass dataTypes to the constructor, + * "clusters" will just be missing from the result if you don't specify any, and any existing + * clusters will remain in the cache). + */ + public CacheResultBuilder(Collection dataTypes) { + authoritativeTypes = + dataTypes.stream() + .filter(dataType -> dataType.getAuthority().equals(Authority.AUTHORITATIVE)) + .map(AgentDataType::getTypeName) + .collect(toSet()); + } + + public NamespaceBuilder namespace(String ns) { + return namespaceBuilders.computeIfAbsent(ns, NamespaceBuilder::new); + } + + public DefaultCacheResult build() { + Map> keep = new HashMap<>(); + Map> evict = new HashMap<>(); + namespaceBuilders.forEach( + (namespace, nsBuilder) -> { + CacheMutation buildResult = nsBuilder.build(); + if (!buildResult.getToKeep().isEmpty()) { + keep.put(namespace, buildResult.getToKeep().values()); + } + + if (!buildResult.getToEvict().isEmpty()) { + evict.put(namespace, buildResult.getToEvict()); + } + }); + return new DefaultCacheResult(keep, evict); + } + + @Data + @RequiredArgsConstructor + public class NamespaceBuilder { + private final String namepace; + private final Map toKeep = new HashMap<>(); + private List toEvict = new ArrayList<>(); + + public CacheDataBuilder keep(String key) { + return toKeep.computeIfAbsent(key, CacheDataBuilder::new); + } + + public CacheMutation build() { + Map keepers = + toKeep.entrySet().stream().collect(toMap(Map.Entry::getKey, o -> o.getValue().build())); + return new CacheMutation(keepers, toEvict); + } + } + + @AllArgsConstructor + @Data + public class CacheMutation { + private final Map toKeep; + private final List toEvict; + + CacheMutation() { + this(new HashMap<>(), new ArrayList<>()); + } + } + + @RequiredArgsConstructor + @Data + public class CacheDataBuilder { + private final String id; + private int ttlSeconds = -1; + private Map attributes = new HashMap<>(); + private Map> relationships = new HashMap<>(); + + public DefaultCacheData build() { + return new DefaultCacheData(id, ttlSeconds, attributes, relationships); + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/YandexCloudProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/YandexCloudProvider.java new file mode 100644 index 00000000000..8db878d6d15 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/YandexCloudProvider.java @@ -0,0 +1,45 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex; + +import com.netflix.spinnaker.clouddriver.core.CloudProvider; +import java.lang.annotation.Annotation; +import org.springframework.stereotype.Component; + +@Component +public class YandexCloudProvider implements CloudProvider { + public static final String ID = "yandex"; + public static final String REGION = "ru-central1"; + private final String displayName = "Yandex.Cloud"; + + private final Class operationAnnotationType = YandexOperation.class; + + @Override + public String getId() { + return ID; + } + + @Override + public String getDisplayName() { + return displayName; + } + + @Override + public Class getOperationAnnotationType() { + return operationAnnotationType; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/YandexOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/YandexOperation.java new file mode 100644 index 00000000000..e9fc5a7702c --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/YandexOperation.java @@ -0,0 +1,28 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target({ElementType.TYPE, ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +public @interface YandexOperation { + String value(); +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexClusterController.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexClusterController.java new file mode 100644 index 00000000000..86b3342bbe4 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexClusterController.java @@ -0,0 +1,93 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.controller; + +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexLogRecord; +import com.netflix.spinnaker.clouddriver.yandex.provider.view.YandexClusterProvider; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.time.Instant; +import java.util.List; +import java.util.stream.Collectors; +import lombok.Value; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.*; + +@RestController +@RequestMapping( + "/applications/{application}/clusters/{account}/{clusterName}/yandex/serverGroups/{serverGroupName}") +public class YandexClusterController { + private final AccountCredentialsProvider accountCredentialsProvider; + private final YandexClusterProvider yandexClusterProvider; + private final YandexCloudFacade yandexCloudFacade; + + @Autowired + public YandexClusterController( + AccountCredentialsProvider accountCredentialsProvider, + YandexClusterProvider yandexClusterProvider, + YandexCloudFacade yandexCloudFacade) { + this.accountCredentialsProvider = accountCredentialsProvider; + this.yandexClusterProvider = yandexClusterProvider; + this.yandexCloudFacade = yandexCloudFacade; + } + + @RequestMapping(value = "/scalingActivities", method = RequestMethod.GET) + public ResponseEntity> getScalingActivities( + @PathVariable String account, + @PathVariable String serverGroupName, + @RequestParam(value = "region") String region) { + AccountCredentials credentials = accountCredentialsProvider.getCredentials(account); + if (!(credentials instanceof YandexCloudCredentials)) { + return ResponseEntity.badRequest().build(); + } + + YandexCloudServerGroup serverGroup = + yandexClusterProvider.getServerGroup(account, region, serverGroupName); + if (serverGroup == null) { + return ResponseEntity.notFound().build(); + } + + List yandexLogRecordList = + yandexCloudFacade.getLogRecords((YandexCloudCredentials) credentials, serverGroup.getId()); + return ResponseEntity.ok( + yandexLogRecordList.stream().map(this::getActivity).collect(Collectors.toList())); + } + + @NotNull + private YandexClusterController.Activity getActivity(YandexLogRecord record) { + return new Activity( + "details", + record.getMessage(), + "cause: " + record.getMessage(), + "Successful", + record.getTimstamp()); + } + + @Value + public static class Activity { + String details; + String description; + String cause; + String statusCode; + Instant startTime; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexImageController.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexImageController.java new file mode 100644 index 00000000000..15c1039dd8d --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexImageController.java @@ -0,0 +1,124 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.controller; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudImage; +import com.netflix.spinnaker.clouddriver.yandex.provider.view.YandexImageProvider; +import groovy.util.logging.Slf4j; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import javax.servlet.http.HttpServletRequest; +import lombok.AllArgsConstructor; +import lombok.Data; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@Slf4j +@RestController +@RequestMapping("/yandex/images") +public class YandexImageController { + private final YandexImageProvider yandexImageProvider; + + @Autowired + private YandexImageController(YandexImageProvider yandexImageProvider) { + this.yandexImageProvider = yandexImageProvider; + } + + @RequestMapping(value = "/find", method = RequestMethod.GET) + public List list( + @RequestParam(required = false) String q, + @RequestParam(required = false) String account, + HttpServletRequest request) { + return (Strings.isNullOrEmpty(account) + ? yandexImageProvider.getAll() + : yandexImageProvider.findByAccount(account)) + .stream() + .map(YandexImageController::convertToYandexImage) + .filter(getQueryFilter(q)) + .filter(getTagFilter(request)) + .sorted(Comparator.comparing(YandexImage::getImageName)) + .collect(Collectors.toList()); + } + + private static YandexImage convertToYandexImage(YandexCloudImage image) { + return new YandexImage( + image.getId(), image.getName(), image.getRegion(), image.getCreatedAt(), image.getLabels()); + } + + private Predicate getQueryFilter(String q) { + if (q == null || q.trim().length() <= 0) { + return yandexImage -> true; + } + String glob = q.trim(); + // Wrap in '*' if there are no glob-style characters in the query string. + if (!glob.contains("*") && !glob.contains("?") && !glob.contains("[") && !glob.contains("\\")) { + glob = "*" + glob + "*"; + } + Pattern pattern = new InMemoryCache.Glob(glob).toPattern(); + return i -> pattern.matcher(i.imageName).matches(); + } + + private Predicate getTagFilter(HttpServletRequest request) { + Map tagFilters = extractTagFilters(request); + if (tagFilters.isEmpty()) { + return i -> true; + } + return i -> matchesTagFilters(i, tagFilters); + } + + private static boolean matchesTagFilters(YandexImage image, Map tagFilters) { + Map tags = image.getTags(); + return tagFilters.keySet().stream() + .allMatch( + tag -> + tags.containsKey(tag.toLowerCase()) + && tags.get(tag.toLowerCase()).equalsIgnoreCase(tagFilters.get(tag))); + } + + private static Map extractTagFilters(HttpServletRequest httpServletRequest) { + return Collections.list(httpServletRequest.getParameterNames()).stream() + .filter(Objects::nonNull) + .filter(name -> name.toLowerCase().startsWith("tag:")) + .collect( + Collectors.toMap( + tagParameter -> tagParameter.replaceAll("tag:", "").toLowerCase(), + httpServletRequest::getParameter, + (a, b) -> b)); + } + + /** Used in deck and orca, probably better rename in YandexCloudImage */ + @Data + @AllArgsConstructor + public static class YandexImage { + String imageId; + String imageName; + String region; + Long createdAt; + Map tags; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexServiceAccountController.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexServiceAccountController.java new file mode 100644 index 00000000000..4da2d4f820f --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexServiceAccountController.java @@ -0,0 +1,53 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.controller; + +import com.google.common.base.Strings; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServiceAccount; +import com.netflix.spinnaker.clouddriver.yandex.provider.view.YandexServiceAccountProvider; +import groovy.util.logging.Slf4j; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RestController; + +@Slf4j +@RestController +@RequestMapping("/yandex/serviceAcounts") +public class YandexServiceAccountController { + private final YandexServiceAccountProvider yandexServiceAccountProvider; + + @Autowired + private YandexServiceAccountController( + YandexServiceAccountProvider yandexServiceAccountProvider) { + this.yandexServiceAccountProvider = yandexServiceAccountProvider; + } + + @RequestMapping(value = "/{account}", method = RequestMethod.GET) + public List list(@PathVariable String account) { + return (Strings.isNullOrEmpty(account) + ? yandexServiceAccountProvider.getAll() + : yandexServiceAccountProvider.findByAccount(account)) + .stream() + .sorted(Comparator.comparing(YandexCloudServiceAccount::getName)) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/YandexDeployHandler.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/YandexDeployHandler.java new file mode 100644 index 00000000000..2331a8e4cca --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/YandexDeployHandler.java @@ -0,0 +1,107 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy; + +import static com.netflix.spinnaker.clouddriver.yandex.deploy.ops.AbstractYandexAtomicOperation.status; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.InstanceGroup; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.ScalePolicy; + +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; +import com.netflix.spinnaker.clouddriver.deploy.DeployHandler; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.YandexInstanceGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.Collections; +import java.util.List; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +@Slf4j +@Data +public class YandexDeployHandler implements DeployHandler { + private static final String BASE_PHASE = "DEPLOY"; + + @Autowired private final YandexCloudFacade yandexCloudFacade; + + @Override + public boolean handles(DeployDescription description) { + return description instanceof YandexInstanceGroupDescription; + } + + @Override + @SuppressWarnings("rawtypes") + public DeploymentResult handle(YandexInstanceGroupDescription description, List priorOutputs) { + YandexCloudCredentials credentials = description.getCredentials(); + status( + BASE_PHASE, + "Initializing creation of server group for application '%s' stack '%s'...", + description.getApplication(), + description.getStack()); + status(BASE_PHASE, "Looking up next sequence..."); + description.produceServerGroupName(); + status(BASE_PHASE, "Produced server group name '%s'.", description.getName()); + description.saturateLabels(); + status(BASE_PHASE, "Composing server group '%s'...", description.getName()); + InstanceGroup createdInstanceGroup = + yandexCloudFacade.createInstanceGroup(BASE_PHASE, credentials, description); + if (Boolean.TRUE.equals(description.getEnableTraffic()) && description.getBalancers() != null) { + String targetGroupId = createdInstanceGroup.getLoadBalancerState().getTargetGroupId(); + yandexCloudFacade.enableInstanceGroup( + BASE_PHASE, credentials, targetGroupId, description.getBalancers()); + } + status(BASE_PHASE, "Done creating server group '%s'.", description.getName()); + return makeDeploymentResult(createdInstanceGroup, credentials); + } + + @NotNull // todo: don't use InstanceGroup... will be fixed with cache agent refactoring + private DeploymentResult makeDeploymentResult( + InstanceGroup result, YandexCloudCredentials credentials) { + DeploymentResult.Deployment deployment = new DeploymentResult.Deployment(); + deployment.setAccount(credentials.getName()); + DeploymentResult.Deployment.Capacity capacity = new DeploymentResult.Deployment.Capacity(); + if (result.getScalePolicy().hasAutoScale()) { + ScalePolicy.AutoScale autoScale = result.getScalePolicy().getAutoScale(); + capacity.setMin( + (int) (autoScale.getMinZoneSize() * result.getAllocationPolicy().getZonesCount())); + capacity.setMax((int) autoScale.getMaxSize()); + capacity.setDesired((int) autoScale.getInitialSize()); + } else { + int size = (int) result.getScalePolicy().getFixedScale().getSize(); + capacity.setMin(size); + capacity.setMax(size); + capacity.setDesired(size); + } + deployment.setCapacity(capacity); + deployment.setCloudProvider(YandexCloudProvider.ID); + String instanceGroupName = result.getName(); + deployment.setServerGroupName(instanceGroupName); + + DeploymentResult deploymentResult = new DeploymentResult(); + deploymentResult.setServerGroupNames( + Collections.singletonList(YandexCloudProvider.REGION + ":" + instanceGroupName)); + deploymentResult.setServerGroupNameByRegion( + Collections.singletonMap(YandexCloudProvider.REGION, instanceGroupName)); + deploymentResult.setDeployments(Collections.singleton(deployment)); + return deploymentResult; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/YandexServerGroupNameResolver.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/YandexServerGroupNameResolver.java new file mode 100644 index 00000000000..944aea8ea5e --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/YandexServerGroupNameResolver.java @@ -0,0 +1,75 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy; + +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.InstanceGroup; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupServiceOuterClass.ListInstanceGroupsRequest; + +import com.netflix.spinnaker.clouddriver.helpers.AbstractServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.moniker.Namer; +import java.time.Instant; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.groovy.datetime.extensions.DateTimeExtensions; + +public class YandexServerGroupNameResolver extends AbstractServerGroupNameResolver { + private static final String PHASE = "YANDEX_DEPLOY"; + private final YandexCloudCredentials credentials; + private final Namer naming; + + public YandexServerGroupNameResolver(YandexCloudCredentials credentials) { + this.credentials = credentials; + this.naming = + NamerRegistry.lookup() + .withProvider(YandexCloudProvider.ID) + .withAccount(credentials.getName()) + .withResource(InstanceGroup.class); + } + + @Override + public String combineAppStackDetail(String appName, String stack, String detail) { + return super.combineAppStackDetail(appName, stack, detail); + } + + @Override + public String getPhase() { + return PHASE; + } + + @Override + public String getRegion() { + return YandexCloudProvider.REGION; + } + + @Override + public List getTakenSlots(String clusterName) { + ListInstanceGroupsRequest request = + ListInstanceGroupsRequest.newBuilder().setFolderId(credentials.getFolder()).build(); + return credentials.instanceGroupService().list(request).getInstanceGroupsList().stream() + .map( + group -> + new TakenSlot( + group.getName(), + naming.deriveMoniker(group).getSequence(), + DateTimeExtensions.toDate( + Instant.ofEpochSecond(group.getCreatedAt().getSeconds())))) + .collect(Collectors.toList()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/OperationConverter.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/OperationConverter.java new file mode 100644 index 00000000000..e4f077416ed --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/OperationConverter.java @@ -0,0 +1,78 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.converter; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.CredentialsChangeable; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.util.Map; +import java.util.function.Function; +import javax.annotation.Nullable; + +public class OperationConverter> + extends AbstractAtomicOperationsCredentialsSupport { + private Function constructor; + private Class clazz; + + public OperationConverter(Function constructor, Class clazz) { + this.constructor = constructor; + this.clazz = clazz; + } + + @Nullable + @Override + public E convertOperation(Map input) { + return constructor.apply(convertDescription(input)); + } + + @Override + public T convertDescription(Map input) { + return convertDescription(input, this, clazz); + } + + public T convertDescription( + Map input, + AbstractAtomicOperationsCredentialsSupport credentialsSupport, + Class targetDescriptionType) { + + if (!input.containsKey("accountName")) { + input.put("accountName", input.get("credentials")); + } + + if (input.get("accountName") != null) { + input.put( + "credentials", + credentialsSupport.getCredentialsObject(String.valueOf(input.get("accountName")))); + } + + // Save these to re-assign after ObjectMapper does its work. + Object credentials = input.remove("credentials"); + + T t = + credentialsSupport + .getObjectMapper() + .copy() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + .convertValue(input, targetDescriptionType); + if (credentials instanceof YandexCloudCredentials) { + t.setCredentials((YandexCloudCredentials) credentials); + } + return t; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/YandexOperationConvertersFactory.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/YandexOperationConvertersFactory.java new file mode 100644 index 00000000000..cc70f1f91df --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/YandexOperationConvertersFactory.java @@ -0,0 +1,184 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.converter; + +import com.netflix.spinnaker.clouddriver.deploy.DeployAtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations; +import com.netflix.spinnaker.clouddriver.yandex.YandexOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.DeleteYandexLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.DestroyYandexServerGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.EnableDisableYandexServerGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.RebootYandexInstancesDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.ResizeYandexServerGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.UpsertYandexImageTagsDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.UpsertYandexLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.YandexInstanceGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.CloneYandexServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.DeleteYandexLoadBalancerAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.DestroyYandexServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.DisableYandexServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.EnableYandexServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.ModifyYandexInstanceGroupOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.RebootYandexInstancesAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.ResizeYandexServerGroupAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.UpsertYandexImageTagsAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.ops.UpsertYandexLoadBalancerAtomicOperation; +import org.springframework.context.annotation.Configuration; +import org.springframework.stereotype.Component; + +/** + * Populates Yandex Atomic Operations Converters + * + *

The idea was to create factory class with annotated methods: + * + *

{@code
+ * @Bean(name = "upsertYandexLoadBalancerAtomicOperationConverter")
+ * @YandexOperation(AtomicOperations.UPSERT_LOAD_BALANCER)
+ * public OperationConverter<
+ *     UpsertYandexLoadBalancerDescription, UpsertYandexLoadBalancerAtomicOperation> upsertLoadBalancer() {
+ *   return new OperationConverter<>(
+ *       UpsertYandexLoadBalancerAtomicOperation::new, UpsertYandexLoadBalancerDescription.class);
+ * }
+ * }
+ * + * Could be implemented after a couple of fixes in AnnotationsBasedAtomicOperationsRegistry: + * + *
{@code
+ * String descriptionName = value.getClass().getAnnotation(providerAnnotationType).value();
+ * VersionedDescription converterVersion = VersionedDescription.from(descriptionName);
+ * }
+ * + * and process Component's and Bean's and in FeaturesController. + */ +@Configuration +public class YandexOperationConvertersFactory { + @Component("cloneYandexServerGroupAtomicOperationConverter") + @YandexOperation(AtomicOperations.CLONE_SERVER_GROUP) + public static class CloneServerGroup + extends OperationConverter< + YandexInstanceGroupDescription, CloneYandexServerGroupAtomicOperation> { + public CloneServerGroup() { + super(CloneYandexServerGroupAtomicOperation::new, YandexInstanceGroupDescription.class); + } + } + + @Component("createYandexServerGroupAtomicOperationConverter") + @YandexOperation(AtomicOperations.CREATE_SERVER_GROUP) + public static class CreateServerGroup + extends OperationConverter { + public CreateServerGroup() { + super(DeployAtomicOperation::new, YandexInstanceGroupDescription.class); + } + } + + @Component("deleteYandexLoadBalancerAtomicOperationConverter") + @YandexOperation(AtomicOperations.DELETE_LOAD_BALANCER) + public static class DeleteLoadBalancer + extends OperationConverter< + DeleteYandexLoadBalancerDescription, DeleteYandexLoadBalancerAtomicOperation> { + public DeleteLoadBalancer() { + super( + DeleteYandexLoadBalancerAtomicOperation::new, DeleteYandexLoadBalancerDescription.class); + } + } + + @Component("destroyYandexServerGroupAtomicOperationConverter") + @YandexOperation(AtomicOperations.DESTROY_SERVER_GROUP) + public static class DestroyServerGroup + extends OperationConverter< + DestroyYandexServerGroupDescription, DestroyYandexServerGroupAtomicOperation> { + public DestroyServerGroup() { + super( + DestroyYandexServerGroupAtomicOperation::new, DestroyYandexServerGroupDescription.class); + } + } + + @Component("disableYandexServerGroupAtomicOperationConverter") + @YandexOperation(AtomicOperations.DISABLE_SERVER_GROUP) + public static class DisableServerGroup + extends OperationConverter< + EnableDisableYandexServerGroupDescription, DisableYandexServerGroupAtomicOperation> { + public DisableServerGroup() { + super( + DisableYandexServerGroupAtomicOperation::new, + EnableDisableYandexServerGroupDescription.class); + } + } + + @Component("enableYandexServerGroupAtomicOperationConverter") + @YandexOperation(AtomicOperations.ENABLE_SERVER_GROUP) + public static class EnableServerGroup + extends OperationConverter< + EnableDisableYandexServerGroupDescription, EnableYandexServerGroupAtomicOperation> { + public EnableServerGroup() { + super( + EnableYandexServerGroupAtomicOperation::new, + EnableDisableYandexServerGroupDescription.class); + } + } + + @Component("rebootYandexInstancesAtomicOperationConverter") + @YandexOperation(AtomicOperations.REBOOT_INSTANCES) + public static class RebootInstances + extends OperationConverter< + RebootYandexInstancesDescription, RebootYandexInstancesAtomicOperation> { + public RebootInstances() { + super(RebootYandexInstancesAtomicOperation::new, RebootYandexInstancesDescription.class); + } + } + + @Component("resizeYandexServerGroupAtomicOperationConverter") + @YandexOperation(AtomicOperations.RESIZE_SERVER_GROUP) + public static class ResizeServerGroup + extends OperationConverter< + ResizeYandexServerGroupDescription, ResizeYandexServerGroupAtomicOperation> { + public ResizeServerGroup() { + super(ResizeYandexServerGroupAtomicOperation::new, ResizeYandexServerGroupDescription.class); + } + } + + @Component("upsertYandexImageTagsAtomicOperationConverter") + @YandexOperation(AtomicOperations.UPSERT_IMAGE_TAGS) + public static class UpsertImageTags + extends OperationConverter< + UpsertYandexImageTagsDescription, UpsertYandexImageTagsAtomicOperation> { + public UpsertImageTags() { + super(UpsertYandexImageTagsAtomicOperation::new, UpsertYandexImageTagsDescription.class); + } + } + + @Component("upsertYandexLoadBalancerAtomicOperationConverter") + @YandexOperation(AtomicOperations.UPSERT_LOAD_BALANCER) + public static class UpsertLoadBalancer + extends OperationConverter< + UpsertYandexLoadBalancerDescription, UpsertYandexLoadBalancerAtomicOperation> { + public UpsertLoadBalancer() { + super( + UpsertYandexLoadBalancerAtomicOperation::new, UpsertYandexLoadBalancerDescription.class); + } + } + + @Component("yandexModifyInstanceGroupOperationConverter") + @YandexOperation(AtomicOperations.UPDATE_LAUNCH_CONFIG) + public static class ModifyInstanceGroup + extends OperationConverter< + YandexInstanceGroupDescription, ModifyYandexInstanceGroupOperation> { + public ModifyInstanceGroup() { + super(ModifyYandexInstanceGroupOperation::new, YandexInstanceGroupDescription.class); + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/CredentialsChangeable.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/CredentialsChangeable.java new file mode 100644 index 00000000000..60b6dba68ac --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/CredentialsChangeable.java @@ -0,0 +1,24 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; + +public interface CredentialsChangeable extends CredentialsNameable { + void setCredentials(YandexCloudCredentials credentials); +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/DeleteYandexLoadBalancerDescription.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/DeleteYandexLoadBalancerDescription.java new file mode 100644 index 00000000000..37a7bb3a552 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/DeleteYandexLoadBalancerDescription.java @@ -0,0 +1,37 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.util.Collection; +import java.util.Collections; +import lombok.Data; + +@Data +public class DeleteYandexLoadBalancerDescription + implements CredentialsChangeable, ApplicationNameable { + private YandexCloudCredentials credentials; + + private String loadBalancerName; + + @Override + public Collection getApplications() { + return Collections.singletonList(Names.parseName(loadBalancerName).getApp()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/DestroyYandexServerGroupDescription.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/DestroyYandexServerGroupDescription.java new file mode 100644 index 00000000000..c8d443f64c0 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/DestroyYandexServerGroupDescription.java @@ -0,0 +1,29 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import lombok.Data; + +@Data +public class DestroyYandexServerGroupDescription + implements CredentialsChangeable, ServerGroupNameable { + private YandexCloudCredentials credentials; + + private String serverGroupName; +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/EnableDisableYandexServerGroupDescription.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/EnableDisableYandexServerGroupDescription.java new file mode 100644 index 00000000000..06e7fa276d0 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/EnableDisableYandexServerGroupDescription.java @@ -0,0 +1,54 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.netflix.spinnaker.clouddriver.deploy.description.EnableDisableDescriptionTrait; +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupsNameable; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.util.Collection; +import java.util.Collections; +import lombok.Data; + +/** + * "Enabling" means adding a server group to the target pool of each of its network load balancers. + * + *

"Disabling" means removing a server group from the target pool of each of its network load + * balancers. + */ +@Data +public class EnableDisableYandexServerGroupDescription + implements CredentialsChangeable, ServerGroupsNameable, EnableDisableDescriptionTrait { + private YandexCloudCredentials credentials; + private String serverGroupName; + + @Override + public Collection getServerGroupNames() { + return Collections.singletonList(serverGroupName); + } + + @Override + public Integer getDesiredPercentage() { + throw new IllegalArgumentException( + "Yandex cloud provider hasn't implemented enabling/disabling by percentage yet"); + } + + @Override + public void setDesiredPercentage(Integer percentage) { + throw new IllegalArgumentException( + "Yandex cloud provider hasn't implemented enabling/disabling by percentage yet"); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/RebootYandexInstancesDescription.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/RebootYandexInstancesDescription.java new file mode 100644 index 00000000000..5d84dc85473 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/RebootYandexInstancesDescription.java @@ -0,0 +1,33 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.netflix.spinnaker.clouddriver.security.resources.ResourcesNameable; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.util.Collections; +import java.util.List; +import lombok.Data; + +@Data +public class RebootYandexInstancesDescription implements CredentialsChangeable, ResourcesNameable { + private YandexCloudCredentials credentials; + private List instanceIds; + + public List getNames() { + return instanceIds == null ? Collections.emptyList() : instanceIds; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/ResizeYandexServerGroupDescription.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/ResizeYandexServerGroupDescription.java new file mode 100644 index 00000000000..bbc91022687 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/ResizeYandexServerGroupDescription.java @@ -0,0 +1,36 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.netflix.spinnaker.clouddriver.security.resources.ServerGroupNameable; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import lombok.Data; + +@Data +public class ResizeYandexServerGroupDescription + implements CredentialsChangeable, ServerGroupNameable { + private YandexCloudCredentials credentials; + private String serverGroupName; + private Capacity capacity; + + @Data + public static class Capacity { + private Integer min; + private Integer max; + private Integer desired; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/UpsertYandexImageTagsDescription.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/UpsertYandexImageTagsDescription.java new file mode 100644 index 00000000000..154cd133b23 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/UpsertYandexImageTagsDescription.java @@ -0,0 +1,37 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.util.Map; +import lombok.Data; + +@Data +public class UpsertYandexImageTagsDescription implements CredentialsChangeable { + private YandexCloudCredentials credentials; + private String imageName; + private Map tags; + + @Override + public boolean requiresAuthorization( + SecurityConfig.OperationsSecurityConfigurationProperties opsSecurityConfigProps) { + return !opsSecurityConfigProps + .getAllowUnauthenticatedImageTaggingInAccounts() + .contains(getAccount()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/UpsertYandexLoadBalancerDescription.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/UpsertYandexLoadBalancerDescription.java new file mode 100644 index 00000000000..cc42e20de9d --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/UpsertYandexLoadBalancerDescription.java @@ -0,0 +1,45 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.google.common.collect.ImmutableList; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import lombok.Data; + +@Data +public class UpsertYandexLoadBalancerDescription + implements CredentialsChangeable, ApplicationNameable { + private YandexCloudCredentials credentials; + + private String id; + private String name; + private String description; + private YandexCloudLoadBalancer.BalancerType lbType; + private List listeners; + private Map labels; + + @Override + public Collection getApplications() { + return ImmutableList.of(Names.parseName(name).getApp()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/YandexInstanceGroupDescription.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/YandexInstanceGroupDescription.java new file mode 100644 index 00000000000..35018548426 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/YandexInstanceGroupDescription.java @@ -0,0 +1,112 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import com.google.common.base.Strings; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.deploy.DeployDescription; +import com.netflix.spinnaker.clouddriver.security.resources.ApplicationNameable; +import com.netflix.spinnaker.clouddriver.yandex.deploy.YandexServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.util.*; +import lombok.*; + +@Data +@Builder(toBuilder = true) +@NoArgsConstructor +@AllArgsConstructor(access = AccessLevel.PRIVATE) +public class YandexInstanceGroupDescription + implements CredentialsChangeable, Cloneable, DeployDescription, ApplicationNameable { + private YandexCloudCredentials credentials; + + private String application; + private String stack; + private String freeFormDetails; + + private String sourceServerGroupName; + + private String name; + private String description; + private Set zones; + private Map labels; + private Long targetSize; + private YandexCloudServerGroup.AutoScalePolicy autoScalePolicy; + private YandexCloudServerGroup.DeployPolicy deployPolicy; + private YandexCloudServerGroup.TargetGroupSpec targetGroupSpec; + private List healthCheckSpecs; + private YandexCloudServerGroup.InstanceTemplate instanceTemplate; + private String serviceAccountId; + private Map> balancers; + private Boolean enableTraffic; + + private Source source; + + @Override + public Collection getApplications() { + if (!Strings.isNullOrEmpty(application)) { + return Collections.singletonList(application); + } + + if (!Strings.isNullOrEmpty(getName())) { + return Collections.singletonList(Names.parseName(getName()).getApp()); + } + + return null; + } + + public void produceServerGroupName() { + YandexServerGroupNameResolver serverGroupNameResolver = + new YandexServerGroupNameResolver(getCredentials()); + this.setName( + serverGroupNameResolver.resolveNextServerGroupName( + getApplication(), getStack(), getFreeFormDetails(), false)); + } + + public void saturateLabels() { + if (getLabels() == null) { + setLabels(new HashMap<>()); + } + if (getInstanceTemplate().getLabels() == null) { + getInstanceTemplate().setLabels(new HashMap<>()); + } + + Integer sequence = Names.parseName(getName()).getSequence(); + String clusterName = + new YandexServerGroupNameResolver(getCredentials()) + .combineAppStackDetail(getApplication(), getStack(), getFreeFormDetails()); + + saturateLabels(getLabels(), sequence, clusterName); + saturateLabels(getInstanceTemplate().getLabels(), sequence, clusterName); + } + + private void saturateLabels(Map labels, Integer sequence, String clusterName) { + labels.putIfAbsent("spinnaker-server-group", this.getName()); + labels.putIfAbsent("spinnaker-moniker-application", this.getApplication()); + labels.putIfAbsent("spinnaker-moniker-cluster", clusterName); + labels.putIfAbsent("spinnaker-moniker-stack", this.getStack()); + labels.put("spinnaker-moniker-sequence", sequence == null ? null : sequence.toString()); + } + + @Data + @NoArgsConstructor + @AllArgsConstructor(access = AccessLevel.PRIVATE) + public static class Source { + String serverGroupName; + Boolean useSourceCapacity; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/AbstractYandexAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/AbstractYandexAtomicOperation.java new file mode 100644 index 00000000000..fdc04d040cd --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/AbstractYandexAtomicOperation.java @@ -0,0 +1,72 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.security.resources.CredentialsNameable; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.provider.view.YandexClusterProvider; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.Collection; +import java.util.Optional; +import org.springframework.beans.factory.annotation.Autowired; + +/** + * Helper class for Yandex operations. Should be revised. + * + *

todo: fix credentials hierarchy, extract status utility methods, replace autowiring with + * contructors + * + * @param + */ +public class AbstractYandexAtomicOperation { + protected final D description; + protected final YandexCloudCredentials credentials; + + @Autowired protected YandexCloudFacade yandexCloudFacade; + @Autowired private YandexClusterProvider yandexClusterProvider; + + AbstractYandexAtomicOperation(D description) { + this.description = description; + this.credentials = (YandexCloudCredentials) description.getCredentials(); + } + + protected Optional getServerGroup(String name) { + return Optional.ofNullable( + yandexClusterProvider.getServerGroup( + description.getAccount(), YandexCloudProvider.REGION, name)); + } + + protected YandexCloudServerGroup getServerGroup(String phase, String name) { + return getServerGroup(name) + .orElseThrow( + () -> new IllegalStateException(status(phase, "Not found server group '%s'!", name))); + } + + public static String status(String phase, String status, Object... args) { + Task task = TaskRepository.threadLocalTask.get(); + task.updateStatus(phase, String.format(status, args)); + return status; + } + + public static Optional single(Collection values) { + return values.size() == 1 ? Optional.of(values.iterator().next()) : Optional.empty(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/CloneYandexServerGroupAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/CloneYandexServerGroupAtomicOperation.java new file mode 100644 index 00000000000..7469ec57a79 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/CloneYandexServerGroupAtomicOperation.java @@ -0,0 +1,171 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.deploy.DeploymentResult; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.YandexDeployHandler; +import com.netflix.spinnaker.clouddriver.yandex.deploy.YandexServerGroupNameResolver; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.YandexInstanceGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import org.springframework.beans.factory.annotation.Autowired; + +public class CloneYandexServerGroupAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + private static final String BASE_PHASE = "COPY_LAST_SERVER_GROUP"; + + @Autowired private YandexDeployHandler deployHandler; + + public CloneYandexServerGroupAtomicOperation(YandexInstanceGroupDescription description) { + super(description); + } + + @Override + public DeploymentResult operate(List priorOutputs) { + YandexInstanceGroupDescription newDescription = cloneAndOverrideDescription(); + + YandexServerGroupNameResolver serverGroupNameResolver = + new YandexServerGroupNameResolver(newDescription.getCredentials()); + String clusterName = + serverGroupNameResolver.combineAppStackDetail( + newDescription.getApplication(), + newDescription.getStack(), + newDescription.getFreeFormDetails()); + + status(BASE_PHASE, "Initializing copy of server group for cluster '%s'...", clusterName); + DeploymentResult result = deployHandler.handle(newDescription, priorOutputs); + String newServerGroupName = + single(result.getDeployments()) + .map(DeploymentResult.Deployment::getServerGroupName) + .orElse(null); + status( + BASE_PHASE, + "Finished copying server group for cluster '%s'. New server group '%s'.", + clusterName, + newServerGroupName); + return result; + } + + private YandexInstanceGroupDescription cloneAndOverrideDescription() { + return Optional.ofNullable(description.getSource().getServerGroupName()) + .filter(name -> !name.isEmpty()) + .map(name -> getServerGroup(BASE_PHASE, name)) + .map(this::merge) + .orElse(description); + } + + // probably its easier to merge maps... + private YandexInstanceGroupDescription merge(YandexCloudServerGroup ancestorServerGroup) { + YandexInstanceGroupDescription.YandexInstanceGroupDescriptionBuilder newDescription = + description.toBuilder(); + + // Override any ancestor values that were specified directly on the cloneServerGroup call. + Names ancestorNames = Names.parseName(ancestorServerGroup.getName()); + newDescription.application(firstNonEmpty(description.getApplication(), ancestorNames.getApp())); + newDescription.stack(firstNonEmpty(description.getStack(), ancestorNames.getStack())); + newDescription.freeFormDetails( + firstNonEmpty(description.getFreeFormDetails(), ancestorNames.getDetail())); + + newDescription.description( + firstNonEmpty(description.getDescription(), ancestorNames.getDetail())); + newDescription.zones(firstNonEmpty(description.getZones(), ancestorServerGroup.getZones())); + newDescription.labels(firstNonEmpty(description.getLabels(), ancestorServerGroup.getLabels())); + newDescription.targetSize( + firstNotNull( + description.getTargetSize(), + ancestorServerGroup.getCapacity().getDesired().longValue())); + newDescription.autoScalePolicy( + firstNotNull(description.getAutoScalePolicy(), ancestorServerGroup.getAutoScalePolicy())); + newDescription.deployPolicy( + firstNotNull(description.getDeployPolicy(), ancestorServerGroup.getDeployPolicy())); + YandexCloudServerGroup.TargetGroupSpec ancestorTargetGroupSpec = + ancestorServerGroup.getLoadBalancerIntegration() == null + ? null + : ancestorServerGroup.getLoadBalancerIntegration().getTargetGroupSpec(); + newDescription.targetGroupSpec( + firstNotNull(description.getTargetGroupSpec(), ancestorTargetGroupSpec)); + newDescription.healthCheckSpecs( + firstNonEmpty( + description.getHealthCheckSpecs(), ancestorServerGroup.getHealthCheckSpecs())); + newDescription.serviceAccountId( + firstNonEmpty( + description.getServiceAccountId(), ancestorServerGroup.getServiceAccountId())); + + YandexCloudServerGroup.InstanceTemplate template = description.getInstanceTemplate(); + if (template != null) { + YandexCloudServerGroup.InstanceTemplate ancestorTemplate = + ancestorServerGroup.getInstanceTemplate(); + YandexCloudServerGroup.InstanceTemplate.InstanceTemplateBuilder builder = + template.toBuilder() + .description( + firstNonEmpty(template.getDescription(), ancestorTemplate.getDescription())) + .labels(firstNonEmpty(template.getLabels(), ancestorTemplate.getLabels())) + .platformId(firstNonEmpty(template.getPlatformId(), ancestorTemplate.getPlatformId())) + .resourcesSpec( + firstNotNull(template.getResourcesSpec(), ancestorTemplate.getResourcesSpec())) + .metadata(firstNonEmpty(template.getMetadata(), ancestorTemplate.getMetadata())) + .bootDiskSpec( + firstNotNull(template.getBootDiskSpec(), ancestorTemplate.getBootDiskSpec())) + .secondaryDiskSpecs( + firstNonEmpty( + template.getSecondaryDiskSpecs(), ancestorTemplate.getSecondaryDiskSpecs())) + .networkInterfaceSpecs( + firstNonEmpty( + template.getNetworkInterfaceSpecs(), + ancestorTemplate.getNetworkInterfaceSpecs())) + .schedulingPolicy( + firstNotNull( + template.getSchedulingPolicy(), ancestorTemplate.getSchedulingPolicy())) + .serviceAccountId( + firstNonEmpty( + template.getServiceAccountId(), ancestorTemplate.getServiceAccountId())); + newDescription.instanceTemplate(builder.build()); + } else { + newDescription.instanceTemplate(ancestorServerGroup.getInstanceTemplate()); + } + + newDescription.enableTraffic( + description.getEnableTraffic() != null && description.getEnableTraffic()); + newDescription.balancers(ancestorServerGroup.getLoadBalancersWithHealthChecks()); + + return newDescription.build(); + } + + private static T firstNotNull(T first, T second) { + return first == null ? second : first; + } + + private static String firstNonEmpty(String first, String second) { + return first == null || first.isEmpty() ? second : first; + } + + private static > COLLECTION firstNonEmpty( + COLLECTION first, COLLECTION second) { + return first == null || first.isEmpty() ? second : first; + } + + private static Map firstNonEmpty(Map first, Map second) { + return first == null || first.isEmpty() ? second : first; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DeleteYandexLoadBalancerAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DeleteYandexLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..9a6c700bd46 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DeleteYandexLoadBalancerAtomicOperation.java @@ -0,0 +1,50 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.DeleteYandexLoadBalancerDescription; +import java.util.List; + +public class DeleteYandexLoadBalancerAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + private static final String BASE_PHASE = "DELETE_LOAD_BALANCER"; + + public DeleteYandexLoadBalancerAtomicOperation(DeleteYandexLoadBalancerDescription description) { + super(description); + } + + @Override + public Void operate(List priorOutputs) { + String name = description.getLoadBalancerName(); + status(BASE_PHASE, "Initializing deletion of load balancer '%s'+ ...", name); + String loadBalancerId = + single(yandexCloudFacade.getLoadBalancerIds(credentials, name)) + .orElseThrow( + () -> + new IllegalStateException( + status( + BASE_PHASE, + "Found none of more than one load balancer with name '%s'!", + name))); + yandexCloudFacade.deleteLoadBalancer(credentials, loadBalancerId); + status(BASE_PHASE, "Done deleting load balancer '%s'.", name); + return null; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DestroyYandexServerGroupAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DestroyYandexServerGroupAtomicOperation.java new file mode 100644 index 00000000000..e9643b7c0ab --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DestroyYandexServerGroupAtomicOperation.java @@ -0,0 +1,66 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.DestroyYandexServerGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.List; +import java.util.Optional; + +public class DestroyYandexServerGroupAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + private static final String BASE_PHASE = YandexCloudFacade.DESTROY_SERVER_GROUP; + + public DestroyYandexServerGroupAtomicOperation(DestroyYandexServerGroupDescription description) { + super(description); + } + + @Override + public Void operate(List priorOutputs) { + String serverGroupName = description.getServerGroupName(); + status(BASE_PHASE, "Initializing destruction of server group '%s'...", serverGroupName); + YandexCloudServerGroup serverGroup = getServerGroup(BASE_PHASE, serverGroupName); + status(BASE_PHASE, "Checking for associated load balancers..."); + Optional.of(serverGroup) + .map(YandexCloudServerGroup::getLoadBalancerIntegration) + .ifPresent(this::detachFromLoadBalancers); + yandexCloudFacade.deleteInstanceGroup(credentials, serverGroup.getId()); + status(BASE_PHASE, "Done destroying server group '%s'.", serverGroupName); + return null; + } + + public void detachFromLoadBalancers(YandexCloudServerGroup.LoadBalancerIntegration loadBalancer) { + for (YandexCloudLoadBalancer balancer : loadBalancer.getBalancers()) { + status( + BASE_PHASE, + "Detaching server group from associated load balancer '%s'...", + balancer.getName()); + yandexCloudFacade.detachTargetGroup( + BASE_PHASE, credentials, balancer, loadBalancer.getTargetGroupId()); + status( + BASE_PHASE, + "Detached server group from associated load balancer '%s'.", + balancer.getName()); + } + status(BASE_PHASE, "Detached server group from associated load balancers."); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DisableYandexServerGroupAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DisableYandexServerGroupAtomicOperation.java new file mode 100644 index 00000000000..ca084782bbd --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/DisableYandexServerGroupAtomicOperation.java @@ -0,0 +1,62 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.EnableDisableYandexServerGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import java.util.List; +import java.util.Optional; + +public class DisableYandexServerGroupAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + private static final String BASE_PHASE = "DISABLE_SERVER_GROUP"; + + public DisableYandexServerGroupAtomicOperation( + EnableDisableYandexServerGroupDescription description) { + super(description); + } + + @Override + public Void operate(List priorOutputs) { + String serverGroupName = description.getServerGroupName(); + status(BASE_PHASE, "Initializing disable server group operation for '%s'...", serverGroupName); + YandexCloudServerGroup serverGroup = getServerGroup(BASE_PHASE, serverGroupName); + status(BASE_PHASE, "Disabling server group '%s'...", serverGroupName); + Optional.of(serverGroup) + .map(YandexCloudServerGroup::getLoadBalancerIntegration) + .ifPresent(this::disableInstanceGroup); + status(BASE_PHASE, "Done disabling server group '%s'.", serverGroupName); + return null; + } + + private void disableInstanceGroup(YandexCloudServerGroup.LoadBalancerIntegration loadBalancer) { + for (YandexCloudLoadBalancer balancer : loadBalancer.getBalancers()) { + status( + BASE_PHASE, "Deregistering server group from load balancer '%s'...", balancer.getName()); + yandexCloudFacade.detachTargetGroup( + BASE_PHASE, credentials, balancer, loadBalancer.getTargetGroupId()); + status( + BASE_PHASE, + "Done deregistering server group from load balancer '%s'.", + balancer.getName()); + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/EnableYandexServerGroupAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/EnableYandexServerGroupAtomicOperation.java new file mode 100644 index 00000000000..7ac02205d68 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/EnableYandexServerGroupAtomicOperation.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.EnableDisableYandexServerGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +public class EnableYandexServerGroupAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + private static final String BASE_PHASE = "ENABLE_SERVER_GROUP"; + + public EnableYandexServerGroupAtomicOperation( + EnableDisableYandexServerGroupDescription description) { + super(description); + } + + @Override + public Void operate(List priorOutputs) { + String serverGroupName = description.getServerGroupName(); + status(BASE_PHASE, "Initializing enable server group operation for '%s'...", serverGroupName); + YandexCloudServerGroup serverGroup = getServerGroup(BASE_PHASE, serverGroupName); + status(BASE_PHASE, "Enabling server group '%s'...", serverGroupName); + Map> specs = + serverGroup.getLoadBalancersWithHealthChecks(); + Optional.of(serverGroup) + .map(YandexCloudServerGroup::getLoadBalancerIntegration) + .map(YandexCloudServerGroup.LoadBalancerIntegration::getTargetGroupId) + .ifPresent(id -> yandexCloudFacade.enableInstanceGroup(BASE_PHASE, credentials, id, specs)); + status(BASE_PHASE, "Done enabling server group " + serverGroupName + "."); + return null; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/ModifyYandexInstanceGroupOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/ModifyYandexInstanceGroupOperation.java new file mode 100644 index 00000000000..e300793f10f --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/ModifyYandexInstanceGroupOperation.java @@ -0,0 +1,54 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.YandexInstanceGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.List; + +public class ModifyYandexInstanceGroupOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + private static final String BASE_PHASE = YandexCloudFacade.MODIFY_INSTANCE_GROUP; + + public ModifyYandexInstanceGroupOperation(YandexInstanceGroupDescription description) { + super(description); + } + + @Override + public Void operate(List priorOutputs) { + String serverGroupName = description.getName(); + status(BASE_PHASE, "Initializing operation..."); + description.saturateLabels(); + status(BASE_PHASE, "Resolving server group identifier '%s'...", serverGroupName); + String instanceGroupId = + single(yandexCloudFacade.getServerGroupIds(credentials, serverGroupName)) + .orElseThrow( + () -> + new IllegalStateException( + status( + BASE_PHASE, + "Found nothing or more than one server group '%s'.", + serverGroupName))); + status(BASE_PHASE, "Composing server group '%s'...", serverGroupName); + yandexCloudFacade.updateInstanceGroup(credentials, instanceGroupId, description); + status(BASE_PHASE, "Done updating server group '%s'.", serverGroupName); + return null; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/RebootYandexInstancesAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/RebootYandexInstancesAtomicOperation.java new file mode 100644 index 00000000000..1f902266fb1 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/RebootYandexInstancesAtomicOperation.java @@ -0,0 +1,45 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.RebootYandexInstancesDescription; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.List; + +public class RebootYandexInstancesAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + private static final String BASE_PHASE = YandexCloudFacade.REBOOT_INSTANCES; + + public RebootYandexInstancesAtomicOperation(RebootYandexInstancesDescription description) { + super(description); + } + + @Override + public Void operate(List priorOutputs) { + String insances = String.join(", ", description.getInstanceIds()); + status(BASE_PHASE, "Initializing reboot of instances (%s)...", insances); + for (String instanceId : description.getInstanceIds()) { + status(BASE_PHASE, "Attempting to reset instance '%s'...", instanceId); + yandexCloudFacade.restrartInstance(credentials, instanceId); + } + status(BASE_PHASE, "Done rebooting instances (%s).", insances); + return null; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/ResizeYandexServerGroupAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/ResizeYandexServerGroupAtomicOperation.java new file mode 100644 index 00000000000..50670e31fea --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/ResizeYandexServerGroupAtomicOperation.java @@ -0,0 +1,45 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.ResizeYandexServerGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.List; + +public class ResizeYandexServerGroupAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + public static final String BASE_PHASE = YandexCloudFacade.RESIZE_SERVER_GROUP; + + public ResizeYandexServerGroupAtomicOperation(ResizeYandexServerGroupDescription description) { + super(description); + } + + @Override + public Void operate(List priorOutputs) { + String serverGroupName = description.getServerGroupName(); + status(BASE_PHASE, "Initializing resize of server group '%s'...", serverGroupName); + YandexCloudServerGroup serverGroup = getServerGroup(BASE_PHASE, serverGroupName); + yandexCloudFacade.resizeServerGroup( + credentials, serverGroup.getId(), description.getCapacity().getDesired()); + status(BASE_PHASE, "Done resizing server group '%s'.", serverGroupName); + return null; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/UpsertYandexImageTagsAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/UpsertYandexImageTagsAtomicOperation.java new file mode 100644 index 00000000000..068ee5d6b46 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/UpsertYandexImageTagsAtomicOperation.java @@ -0,0 +1,56 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.UpsertYandexImageTagsDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudImage; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class UpsertYandexImageTagsAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation { + + private static final String BASE_PHASE = YandexCloudFacade.UPSERT_IMAGE_TAGS; + + public UpsertYandexImageTagsAtomicOperation(UpsertYandexImageTagsDescription description) { + super(description); + } + + @Override + public Void operate(List priorOutputs) { + String name = description.getImageName(); + status(BASE_PHASE, "Initializing upsert of image tags for '%s'...", name); + YandexCloudImage image = yandexCloudFacade.getImage(credentials, name); + if (image != null) { + Map labels = new HashMap<>(image.getLabels()); + labels.putAll(description.getTags()); + status( + BASE_PHASE, + "Upserting new labels %s in place of original labels %s for image '%s' ...", + labels, + image.getLabels(), + name); + yandexCloudFacade.updateImageTags(credentials, image.getId(), labels); + } + status(BASE_PHASE, "Done tagging image '%s'.", name); + return null; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/UpsertYandexLoadBalancerAtomicOperation.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/UpsertYandexLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..b9a6cdd5882 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/deploy/ops/UpsertYandexLoadBalancerAtomicOperation.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.ops; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.UpsertYandexLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class UpsertYandexLoadBalancerAtomicOperation + extends AbstractYandexAtomicOperation + implements AtomicOperation>>> { + + private static final String BASE_PHASE = YandexCloudFacade.UPSERT_LOAD_BALANCER; + + public UpsertYandexLoadBalancerAtomicOperation(UpsertYandexLoadBalancerDescription description) { + super(description); + } + + @Override + public Map>> operate( + List>>> priorOutputs) { + String name = description.getName(); + String id = description.getId(); + + status(BASE_PHASE, "Initializing upsert of load balancer '%s'...", name); + if (id == null || id.isEmpty()) { + status(BASE_PHASE, "Creating load balancer '%s'...", name); + yandexCloudFacade.createLoadBalancer(credentials, description); + status(BASE_PHASE, "Done creating load balancer '%s'.", name); + } else { + status(BASE_PHASE, "Updating load balancer '%s'...", name); + yandexCloudFacade.updateLoadBalancer(id, credentials, description); + status(BASE_PHASE, "Done updating load balancer '%s'.", name); + } + status(BASE_PHASE, "Done upserting load balancer '%s'.", name); + + return Collections.singletonMap( + "loadBalancers", + Collections.singletonMap( + YandexCloudProvider.REGION, Collections.singletonMap("name", name))); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexApplication.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexApplication.java new file mode 100644 index 00000000000..cced6336e00 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexApplication.java @@ -0,0 +1,32 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import com.netflix.spinnaker.clouddriver.model.Application; +import java.util.*; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class YandexApplication implements Application { + private String name; + private Map attributes = new HashMap<>(); + private List> instances = new ArrayList<>(); + /** Account name -> cluster names */ + private Map> clusterNames = new HashMap<>(); +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudCluster.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudCluster.java new file mode 100644 index 00000000000..f8cf0d4de62 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudCluster.java @@ -0,0 +1,34 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import com.netflix.spinnaker.clouddriver.model.Cluster; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import java.util.HashSet; +import java.util.Set; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class YandexCloudCluster implements Cluster { + private String name; + private String type = YandexCloudProvider.ID; + private String accountName; + private Set serverGroups = new HashSet<>(); + private Set loadBalancers = new HashSet<>(); +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudImage.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudImage.java new file mode 100644 index 00000000000..c37a4bad167 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudImage.java @@ -0,0 +1,50 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import com.netflix.spinnaker.clouddriver.model.Image; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import java.util.Map; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +import yandex.cloud.api.compute.v1.ImageOuterClass; + +@Data +@Builder +@AllArgsConstructor +@NoArgsConstructor +public class YandexCloudImage implements Image { + private String id; + private String name; + private String description; + private String region; + private Long createdAt; + private Map labels; + + public static YandexCloudImage createFromProto(ImageOuterClass.Image image) { + return YandexCloudImage.builder() + .id(image.getId()) + .name(image.getName()) + .description(image.getDescription()) + .region(YandexCloudProvider.REGION) + .createdAt(image.getCreatedAt().getSeconds() * 1000) + .labels(image.getLabelsMap()) + .build(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudInstance.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudInstance.java new file mode 100644 index 00000000000..ada71ca870d --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudInstance.java @@ -0,0 +1,173 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import static yandex.cloud.api.compute.v1.InstanceOuterClass.NetworkInterface; + +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.Instance; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.health.YandexInstanceHealth; +import com.netflix.spinnaker.clouddriver.yandex.model.health.YandexLoadBalancerHealth; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +import yandex.cloud.api.compute.v1.InstanceOuterClass; + +@Data +@AllArgsConstructor +@NoArgsConstructor +@Builder +public class YandexCloudInstance implements Instance { + private String id; + private String name; + private Long launchTime; + private String zone; + private String providerType; + private String cloudProvider; + private YandexInstanceHealth instanceHealth; + private List> health; + private List loadBalancers; + private Map labels; + private Map> addressesInSubnets; + + public static YandexCloudInstance createFromProto(InstanceOuterClass.Instance instance) { + YandexInstanceHealth instanceHealth = createInstanceHealth(instance); + YandexCloudInstance model = + YandexCloudInstance.builder() + .id(instance.getId()) + .name(instance.getName()) + .launchTime(calculateInstanceTimestamp(instance)) + .zone(instance.getZoneId()) + .providerType(YandexCloudProvider.ID) + .cloudProvider(YandexCloudProvider.ID) + .instanceHealth(instanceHealth) + .labels(instance.getLabelsMap()) + .addressesInSubnets( + instance.getNetworkInterfacesList().stream() + .collect( + Collectors.toMap( + NetworkInterface::getSubnetId, + ni -> + Stream.of( + ni.hasPrimaryV4Address() + ? ni.getPrimaryV4Address().getAddress() + : null, + ni.hasPrimaryV4Address() + ? ni.getPrimaryV4Address().getOneToOneNat().getAddress() + : null, + ni.hasPrimaryV6Address() + ? ni.getPrimaryV6Address().getAddress() + : null) + .filter(Objects::nonNull) + .collect(Collectors.toList())))) + .build(); + model.updateHealths(); + return model; + } + + @Override + public HealthState getHealthState() { + return instanceHealth.getState() != HealthState.Unknown + ? instanceHealth.getState() + : loadBalancers == null || loadBalancers.isEmpty() + ? HealthState.Unknown + : loadBalancers.stream() + .map(this::findHealthInBalancer) + .flatMap(Collection::stream) + .allMatch(health -> health.getState() == HealthState.Up) + ? HealthState.Up + : HealthState.Down; + } + + private void updateHealths() { + health = new ArrayList<>(); + health.add(convertInstanceHealth()); + + if (loadBalancers == null) { + return; + } + loadBalancers.forEach( + balancer -> + findHealthInBalancer(balancer) + .forEach( + hc -> { + HashMap lb = new HashMap<>(); + lb.put("name", balancer.getName()); + // lb.put("description","name"); + lb.put("state", hc.getStatus().toServiceStatus()); + + HashMap healthState = new HashMap<>(); + healthState.put("type", "LoadBalancer"); + healthState.put("state", hc.getState()); + healthState.put("loadBalancers", Collections.singletonList(lb)); + + health.add(healthState); + })); + } + + private List findHealthInBalancer(YandexCloudLoadBalancer balancer) { + return balancer.getHealths().values().stream() + .flatMap(Collection::stream) + .filter(this::containsAddress) + .collect(Collectors.toList()); + } + + public boolean containsAddress(YandexLoadBalancerHealth health) { + return addressesInSubnets + .getOrDefault(health.getSubnetId(), Collections.emptyList()) + .contains(health.getAddress()); + } + + private static Long calculateInstanceTimestamp(InstanceOuterClass.Instance instance) { + return instance.getCreatedAt() != null + ? instance.getCreatedAt().getSeconds() * 1000 + : Long.MAX_VALUE; + } + + private static YandexInstanceHealth createInstanceHealth(InstanceOuterClass.Instance instance) { + YandexInstanceHealth health = new YandexInstanceHealth(); + health.setStatus(YandexInstanceHealth.Status.valueOf(instance.getStatus().name())); + return health; + } + + private Map convertInstanceHealth() { + Map instanceHealthState = new HashMap<>(); + instanceHealthState.put("type", "Yandex"); + instanceHealthState.put("healthClass", "platform"); + instanceHealthState.put("state", instanceHealth.getState()); + return instanceHealthState; + } + + public void linkWithLoadBalancer(YandexCloudLoadBalancer balancer) { + if (this.loadBalancers == null) { + this.loadBalancers = new ArrayList<>(); + } + this.loadBalancers.add(balancer); + updateHealths(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudLoadBalancer.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudLoadBalancer.java new file mode 100644 index 00000000000..a8de3b6299b --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudLoadBalancer.java @@ -0,0 +1,125 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import static yandex.cloud.api.loadbalancer.v1.NetworkLoadBalancerOuterClass.NetworkLoadBalancer; + +import com.netflix.spinnaker.clouddriver.model.LoadBalancer; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.health.YandexLoadBalancerHealth; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@Builder +@AllArgsConstructor +@NoArgsConstructor +public class YandexCloudLoadBalancer implements LoadBalancer { + String cloudProvider; + String account; + String id; + String region; + String name; + String description; + BalancerType balancerType; + Long createdTime; + SessionAffinity sessionAffinity; + List listeners; + Map labels; + + Set serverGroups; + Map> healths; + + @Override + public String getType() { + return getCloudProvider(); + } + + public static YandexCloudLoadBalancer createFromNetworkLoadBalancer( + NetworkLoadBalancer nlb, + String account, + Map> healths) { + return YandexCloudLoadBalancer.builder() + .cloudProvider(YandexCloudProvider.ID) + .account(account) + .id(nlb.getId()) + .region(nlb.getRegionId()) + .name(nlb.getName()) + .description(nlb.getDescription()) + .balancerType(BalancerType.valueOf(nlb.getType().name())) + .createdTime(nlb.getCreatedAt().getSeconds() * 1000) + .sessionAffinity(SessionAffinity.valueOf(nlb.getSessionAffinity().name())) + .listeners( + nlb.getListenersList().stream() + .map( + listener -> + new Listener( + listener.getName(), + listener.getAddress(), + (int) listener.getPort(), + Protocol.valueOf(listener.getProtocol().name()), + (int) listener.getTargetPort(), + listener.getSubnetId(), + IpVersion.IPV4)) + .collect(Collectors.toList())) + .labels(nlb.getLabelsMap()) + .healths(healths) + .serverGroups(new HashSet<>()) + .build(); + } + + public enum BalancerType { + EXTERNAL, + INTERNAL; + } + + public enum SessionAffinity { + SESSION_AFFINITY_UNSPECIFIED, + CLIENT_IP_PORT_PROTO; + } + + public enum Protocol { + TCP, + UDP; + } + + public enum IpVersion { + IPV4, + IPV6; + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class Listener { + private String name; + private String address; + private Integer port; + private Protocol protocol; + private Integer targetPort; + private String subnetId; + private IpVersion ipVersion; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudNetwork.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudNetwork.java new file mode 100644 index 00000000000..3bf99b9147f --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudNetwork.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import com.netflix.spinnaker.clouddriver.model.Network; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +import yandex.cloud.api.vpc.v1.NetworkOuterClass; + +@Data +@AllArgsConstructor +@Builder +@NoArgsConstructor +public class YandexCloudNetwork implements Network { + private String id; + private String cloudProvider; + private String name; + private String account; + private String region; + + public static YandexCloudNetwork createFromProto( + NetworkOuterClass.Network network, String accountName) { + return YandexCloudNetwork.builder() + .id(network.getId()) + .cloudProvider(YandexCloudProvider.ID) + .name(network.getName()) + .account(accountName) + .region(YandexCloudProvider.REGION) + .build(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudServerGroup.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudServerGroup.java new file mode 100644 index 00000000000..dd786dd5298 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudServerGroup.java @@ -0,0 +1,431 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.toMap; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import java.time.Duration; +import java.util.*; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +@JsonInclude(JsonInclude.Include.NON_NULL) +public class YandexCloudServerGroup implements ServerGroup { + // We store information about load balancers where server group attached to in + // instanceTemplate.metadata. + // Format is 'lb-name1=hc-spec;lb-name2=hc-spec;...' + // Format hc-spec is 'protocol,port,path,interval,timeout,unhealthyThreshold,healthyThreshold' + // Format for multiple hc-spec is 'hc-spec&hc-spec&hc-spec' + public static final String LOAD_BALANCERS_SPECS = "load-balancer-names"; + + private String id; + private String folder; + private String name; + private String type = YandexCloudProvider.ID; + private String cloudProvider = YandexCloudProvider.ID; + private String region; + private Boolean disabled; + private Long createdTime; + private Set zones; + private Set instances; + private Set securityGroups; + private Map launchConfig; + private InstanceCounts instanceCounts; + private Capacity capacity; + private ImageSummary imageSummary; + private ImagesSummary imagesSummary; + private Map labels; + private String description; + private AutoScalePolicy autoScalePolicy; + private DeployPolicy deployPolicy; + private Status status; + private LoadBalancerIntegration loadBalancerIntegration; + private List healthCheckSpecs; + private InstanceTemplate instanceTemplate; + private String serviceAccountId; + + @Override + public Boolean isDisabled() { + return disabled; + } + + @Override + @JsonIgnore + public Set getLoadBalancers() { + return Optional.ofNullable(loadBalancerIntegration) + .map(LoadBalancerIntegration::getBalancers) + .map(b -> b.stream().map(YandexCloudLoadBalancer::getName).collect(Collectors.toSet())) + .orElse(Collections.emptySet()); + } + + public Map> getLoadBalancersWithHealthChecks() { + if (instanceTemplate.metadata == null) { + return Collections.emptyMap(); + } + if (!instanceTemplate.metadata.containsKey(LOAD_BALANCERS_SPECS)) { + return Collections.emptyMap(); + } + return Arrays.stream(instanceTemplate.metadata.get(LOAD_BALANCERS_SPECS).split(";")) + .map(part -> part.split("=")) + .filter(lbParts -> lbParts.length == 2) + .collect( + toMap( + lbParts -> lbParts[0], + lbParts -> + Arrays.stream(lbParts[1].split("&")) + .map(HealthCheckSpec::deserializeFromMetadataValue) + .collect(Collectors.toList()))); + } + + public static String serializeLoadBalancersWithHealthChecks( + Map> balancers) { + return balancers.keySet().stream() + .map( + balancer -> + balancer + + "=" + + balancers.get(balancer).stream() + .map(HealthCheckSpec::serializeForMetadataValue) + .collect(joining("&"))) + .collect(joining(";")); + } + + public enum Status { + STATUS_UNSPECIFIED, + + // Instance group is being started and will become active soon. + STARTING, + + // Instance group is active. + // In this state the group manages its instances and monitors their health, + // creating, deleting, stopping, updating and starting instances as needed. + // To stop the instance group, call + // [yandex.cloud.compute.v1.instancegroup.InstanceGroupService.Stop]. + ACTIVE, + + // Instance group is being stopped. + // Group's instances stop receiving traffic from the load balancer (if any) and are then + // stopped. + STOPPING, + + // Instance group is stopped. + // In this state the group cannot be updated and does not react to any changes made to its + // instances. + // To start the instance group, call + // [yandex.cloud.compute.v1.instancegroup.InstanceGroupService.Start]. + STOPPED, + + // Instance group is being deleted. + DELETING; + + public static Status valueOf(int number) { + Status[] values = values(); + return values.length <= number ? STATUS_UNSPECIFIED : values[number]; + } + } + + @Data + public static class AutoScalePolicy { + // Lower limit for instance count in each zone. + long minZoneSize; + + // Upper limit for total instance count (across all zones). + long maxSize; + + // Time in seconds allotted for averaging metrics. + Duration measurementDuration; + + // The warmup time of the instance in seconds. During this time, + // traffic is sent to the instance, but instance metrics are not collected. + Duration warmupDuration; + + // Minimum amount of time in seconds allotted for monitoring before + // Instance Groups can reduce the number of instances in the group. + // During this time, the group size doesn't decrease, even if the new metric values + // indicate that it should. + Duration stabilizationDuration; + + // Initial target group size. + long initialSize; + + // Defines an autoscaling rule based on the average CPU utilization of the instance group. + CpuUtilizationRule cpuUtilizationRule; + + // Defines an autoscaling rule based on a custom metric from Yandex Monitoring. + List customRules; + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class CpuUtilizationRule { + // Target CPU utilization level. Instance Groups maintains this level for each availability + // zone. + double utilizationTarget = 1; + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class CustomRule { + // Custom metric rule type. This field affects which label from + // the custom metric should be used: `zone_id` or `instance_id`. + RuleType ruleType; + + // Type of custom metric. This field affects how Instance Groups calculates the average metric + // value. + MetricType metricType; + + // Name of custom metric in Yandex Monitoring that should be used for scaling. + String metricName; + + // Target value for the custom metric. Instance Groups maintains this level for each + // availability zone. + double target; + + public enum RuleType { + RULE_TYPE_UNSPECIFIED, + + // This type means that the metric applies to one instance. + // First, Instance Groups calculates the average metric value for each instance, + // then averages the values for instances in one availability zone. + // This type of metric must have the `instance_id` label. + UTILIZATION, + + // This type means that the metric applies to instances in one availability zone. + // This type of metric must have the `zone_id` label. + WORKLOAD; + + public static RuleType valueOf(int number) { + RuleType[] values = values(); + return values.length <= number ? RULE_TYPE_UNSPECIFIED : values[number]; + } + } + + public enum MetricType { + METRIC_TYPE_UNSPECIFIED, + + // This type is used for metrics that show the metric value at a certain point in time, + // such as requests per second to the server on an instance. + GAUGE, + + // This type is used for metrics that monotonically increase over time, + // such as the total number of requests to the server on an instance. + COUNTER; + + public static MetricType valueOf(int number) { + MetricType[] values = values(); + return values.length <= number ? METRIC_TYPE_UNSPECIFIED : values[number]; + } + } + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class DeployPolicy { + // The maximum number of running instances that can be taken offline (i.e., stopped or deleted) + // at the same time + // during the update process. + long maxUnavailable; + + // The maximum number of instances that can be temporarily allocated above the group's target + // size + // during the update process. + long maxExpansion; + + // The maximum number of instances that can be deleted at the same time. + long maxDeleting; + + // The maximum number of instances that can be created at the same time. + long maxCreating; + + // Instance startup duration. + // Instance will be considered up and running (and start receiving traffic) only after + // startupDuration + // has elapsed and all health checks are passed. + Duration startupDuration; + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class LoadBalancerIntegration { + // ID of the target group used for load balancing. + String targetGroupId; + + // Status message of the target group. + String statusMessage; + + // Specification of the target group that the instance group will be added to. For more + // information, see [Target groups and resources](/docs/load-balancer/target-resources). + TargetGroupSpec targetGroupSpec; + + Set balancers; + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class TargetGroupSpec { + String name; + String description; + Map labels; + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class HealthCheckSpec { + Type type; + long port; + String path; + Duration interval; + Duration timeout; + long unhealthyThreshold; + long healthyThreshold; + + public enum Type { + TCP, + HTTP + } + + String serializeForMetadataValue() { + return type.name().toLowerCase() + + "," + + port + + "," + + path + + "," + + interval.getSeconds() + + "," + + timeout.getSeconds() + + "," + + unhealthyThreshold + + "," + + healthyThreshold; + } + + static HealthCheckSpec deserializeFromMetadataValue(String value) { + String[] parts = value.split(","); + if (parts.length != 7) { + throw new IllegalStateException("Wrong format of health-check stored in metadata"); + } + HealthCheckSpec healthCheckSpec = new HealthCheckSpec(); + healthCheckSpec.setType(Type.valueOf(parts[0].toUpperCase())); + healthCheckSpec.setPort(Integer.parseInt(parts[1])); + healthCheckSpec.setPath(parts[2]); + healthCheckSpec.setInterval(Duration.ofSeconds(Long.parseLong(parts[3]))); + healthCheckSpec.setTimeout(Duration.ofSeconds(Long.parseLong(parts[4]))); + healthCheckSpec.setUnhealthyThreshold(Long.parseLong(parts[5])); + healthCheckSpec.setHealthyThreshold(Long.parseLong(parts[6])); + return healthCheckSpec; + } + } + + @Data + @Builder(toBuilder = true) + @NoArgsConstructor + @AllArgsConstructor + public static class InstanceTemplate { + String description; + Map labels; + String platformId; + ResourcesSpec resourcesSpec; + Map metadata; + AttachedDiskSpec bootDiskSpec; + List secondaryDiskSpecs; + List networkInterfaceSpecs; + SchedulingPolicy schedulingPolicy; + String serviceAccountId; + + @Data + @NoArgsConstructor + @AllArgsConstructor + public static class SchedulingPolicy { + boolean preemptible; + } + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class ResourcesSpec { + long memory; + long cores; + long coreFraction; + long gpus; + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class AttachedDiskSpec { + Mode mode; + String deviceName; + DiskSpec diskSpec; + + public enum Mode { + MODE_UNSPECIFIED, + READ_ONLY, + READ_WRITE; + + public static Mode valueOf(int number) { + Mode[] values = values(); + return values.length <= number ? MODE_UNSPECIFIED : values[number]; + } + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class DiskSpec { + String description; + String typeId; + long size; + String imageId; + String snapshotId; + } + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class NetworkInterfaceSpec { + String networkId; + List subnetIds; + PrimaryAddressSpec primaryV4AddressSpec; + PrimaryAddressSpec primaryV6AddressSpec; + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + public static class PrimaryAddressSpec { + boolean oneToOneNat; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudServiceAccount.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudServiceAccount.java new file mode 100644 index 00000000000..1ddcb370a5d --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudServiceAccount.java @@ -0,0 +1,42 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +import yandex.cloud.api.iam.v1.ServiceAccountOuterClass; + +@Data +@Builder +@AllArgsConstructor +@NoArgsConstructor +public class YandexCloudServiceAccount { + private String id; + private String name; + private String account; + + public static YandexCloudServiceAccount createFromProto( + ServiceAccountOuterClass.ServiceAccount serviceAccount, String accountName) { + return YandexCloudServiceAccount.builder() + .id(serviceAccount.getId()) + .account(accountName) + .name(serviceAccount.getName()) + .build(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudSubnet.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudSubnet.java new file mode 100644 index 00000000000..029c51db107 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexCloudSubnet.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import com.netflix.spinnaker.clouddriver.model.Subnet; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +import yandex.cloud.api.vpc.v1.SubnetOuterClass; + +@Data +@Builder +@AllArgsConstructor +@NoArgsConstructor +public class YandexCloudSubnet implements Subnet { + private String id; + private String name; + private String account; + private String type; + private String purpose; + private String availabilityZone; + private String vpcId; + + public static YandexCloudSubnet createFromProto( + SubnetOuterClass.Subnet subnet, String accountName) { + return YandexCloudSubnet.builder() + .id(subnet.getId()) + .account(accountName) + .name(subnet.getName()) + .type(YandexCloudProvider.ID) + .purpose("internal") + .availabilityZone(subnet.getZoneId()) + .vpcId(subnet.getNetworkId()) + .build(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexLogRecord.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexLogRecord.java new file mode 100644 index 00000000000..fff81cce198 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/YandexLogRecord.java @@ -0,0 +1,40 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model; + +import java.time.Instant; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; +import yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass; + +@Data +@Builder +@AllArgsConstructor +@NoArgsConstructor +public class YandexLogRecord { + private Instant timstamp; + private String message; + + public static YandexLogRecord createFromProto(InstanceGroupOuterClass.LogRecord record) { + return YandexLogRecord.builder() + .message(record.getMessage()) + .timstamp(Instant.ofEpochSecond(record.getTimestamp().getSeconds())) + .build(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/health/YandexInstanceHealth.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/health/YandexInstanceHealth.java new file mode 100644 index 00000000000..8f57ce820e0 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/health/YandexInstanceHealth.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model.health; + +import com.netflix.spinnaker.clouddriver.model.Health; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@NoArgsConstructor +public class YandexInstanceHealth implements Health { + private Status status; + + public HealthState getState() { + return status.toHealthState(); + } + + public enum Status { + PROVISIONING, + RUNNING, + STOPPING, + STOPPED, + STARTING, + RESTARTING, + UPDATING, + ERROR, + CRASHED, + DELETING; + + public HealthState toHealthState() { + switch (this) { + case PROVISIONING: + case STARTING: + return HealthState.Starting; + case RUNNING: + return HealthState.Unknown; + case ERROR: + case CRASHED: + return HealthState.Failed; + default: + return HealthState.Down; + } + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/health/YandexLoadBalancerHealth.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/health/YandexLoadBalancerHealth.java new file mode 100644 index 00000000000..22a523f8a02 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/model/health/YandexLoadBalancerHealth.java @@ -0,0 +1,64 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.model.health; + +import com.netflix.spinnaker.clouddriver.model.Health; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@AllArgsConstructor +@NoArgsConstructor +public class YandexLoadBalancerHealth implements Health { + private String address; + private String subnetId; + private Status status; + + @Override + public HealthState getState() { + return status.toHeathState(); + } + + public enum Status { + INITIAL, + HEALTHY, + UNHEALTHY, + DRAINING, + INACTIVE; + + public HealthState toHeathState() { + if (this == Status.HEALTHY) { + return HealthState.Up; + } + return HealthState.Down; + } + + public ServiceStatus toServiceStatus() { + if (this == Status.HEALTHY) { + return ServiceStatus.InService; + } + return ServiceStatus.OutOfService; + } + + public enum ServiceStatus { + InService, + OutOfService; + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/Keys.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/Keys.java new file mode 100644 index 00000000000..7ef9746759a --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/Keys.java @@ -0,0 +1,189 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider; + +import com.google.common.base.CaseFormat; +import com.netflix.frigga.Names; +import com.netflix.spinnaker.clouddriver.cache.KeyParser; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import groovy.util.logging.Slf4j; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; +import javax.annotation.Nullable; +import lombok.Getter; +import org.springframework.stereotype.Component; + +@Slf4j +@Component("YandexKeys") +public class Keys implements KeyParser { + public static final String KEY_DELIMITER = ":"; + // every wildcard contains cloud provider and namespace filter... + public static final String APPLICATION_WILDCARD = Keys.getApplicationKey("*"); + public static final String CLUSTER_WILDCARD = Keys.getClusterKey("*", "*", "*"); + public static final String IMAGE_WILDCARD = Keys.getImageKey("*", "*", "*", "*"); + public static final String LOAD_BALANCER_WILDCARD = Keys.getLoadBalancerKey("*", "*", "*", "*"); + public static final String NETWORK_WILDCARD = Keys.getNetworkKey("*", "*", "*", "*"); + public static final String SERVICE_ACCOUNT_WILDCARD = Keys.getServiceAccount("*", "*", "*", "*"); + public static final String SUBNET_WILDCARD = Keys.getSubnetKey("*", "*", "*", "*"); + + @Override + public String getCloudProvider() { + // This is intentionally 'aws'. Refer to todos in SearchController#search for why. + return "aws"; + } + + @Override + public Map parseKey(String key) { + return parse(key); + } + + @Override + public Boolean canParseType(final String type) { + return Stream.of(Namespace.values()).anyMatch(it -> it.getNs().equals(type)); + } + + @Override + public Boolean canParseField(String field) { + return false; + } + + @Nullable + public static Map parse(String key) { + String[] parts = key.split(KEY_DELIMITER); + + if (parts.length < 2 || !parts[0].equals(YandexCloudProvider.ID)) { + return null; + } + + Map result = new HashMap<>(); + result.put("provider", parts[0]); + result.put("type", parts[1]); + + Namespace namespace = Namespace.from(parts[1]); + switch (namespace) { + case CLUSTERS: + { + result.put("account", parts[2]); + result.put("application", parts[3]); + result.put("name", parts[4]); + Names names = Names.parseName(parts[4]); + result.put("cluster", names.getCluster()); + result.put("stack", names.getStack()); + result.put("detail", names.getDetail()); + break; + } + case APPLICATIONS: + result.put("name", parts[2]); + break; + case INSTANCES: + case LOAD_BALANCERS: + case NETWORKS: + case SERVER_GROUPS: + case SUBNETS: + case IMAGES: + result.put("id", parts[2]); + result.put("account", parts[3]); + result.put("region", parts[4]); + result.put("folder", parts[5]); + result.put("name", parts.length < 7 ? "" : parts[6]); + break; + case ON_DEMAND: + break; + } + + return result; + } + + public static String getApplicationKey(String name) { + return keyFor(Namespace.APPLICATIONS, name); + } + + public static String getClusterKey(String account, String application, String clusterName) { + return keyFor(Namespace.CLUSTERS, account, application, clusterName); + } + + public static String getNetworkKey(String account, String id, String folderId, String name) { + return keyFor(Namespace.NETWORKS, id, account, folderId, name); + } + + public static String getSubnetKey(String account, String id, String folderId, String name) { + return keyFor(Namespace.SUBNETS, id, account, YandexCloudProvider.REGION, folderId, name); + } + + public static String getLoadBalancerKey(String account, String id, String folderId, String name) { + return keyFor( + Namespace.LOAD_BALANCERS, id, account, YandexCloudProvider.REGION, folderId, name); + } + + public static String getInstanceKey(String account, String id, String folderId, String name) { + return keyFor(Namespace.INSTANCES, id, account, YandexCloudProvider.REGION, folderId, name); + } + + public static String getServerGroupKey(String account, String id, String folderId, String name) { + return keyFor(Namespace.SERVER_GROUPS, id, account, YandexCloudProvider.REGION, folderId, name); + } + + public static String getImageKey(String account, String id, String folderId, String name) { + return keyFor(Namespace.IMAGES, id, account, YandexCloudProvider.REGION, folderId, name); + } + + public static String getServiceAccount(String account, String id, String folderId, String name) { + return keyFor( + Namespace.SERVICE_ACCOUNT, id, account, YandexCloudProvider.REGION, folderId, name); + } + + private static String keyFor(Namespace namespace, String... parts) { + StringBuilder builder = + new StringBuilder(YandexCloudProvider.ID + KEY_DELIMITER).append(namespace); + for (String part : parts) { + builder.append(KEY_DELIMITER).append(part); + } + return builder.toString(); + } + + public enum Namespace { + APPLICATIONS, + CLUSTERS, + INSTANCES, + LOAD_BALANCERS, + NETWORKS, + SERVER_GROUPS, + SUBNETS, + IMAGES, + SERVICE_ACCOUNT, + ON_DEMAND; + + @Getter private final String ns; + + Namespace() { + this.ns = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, name()); // FOO_BAR -> fooBar + } + + @Override + public String toString() { + return ns; + } + + public static Namespace from(String ns) { + return Stream.of(values()) + .filter(namespace -> namespace.ns.equals(ns)) + .findAny() + .orElseThrow(IllegalArgumentException::new); + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/YandexInfrastructureProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/YandexInfrastructureProvider.java new file mode 100644 index 00000000000..30919b7041e --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/YandexInfrastructureProvider.java @@ -0,0 +1,100 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider; + +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.cats.agent.AgentSchedulerAware; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.clouddriver.cache.SearchableProvider; +import java.util.*; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@Getter +public class YandexInfrastructureProvider extends AgentSchedulerAware + implements SearchableProvider { + private final Collection agents; + private final String providerName = YandexInfrastructureProvider.class.getName(); + private final Set defaultCaches = + new HashSet<>( + Arrays.asList( + Keys.Namespace.APPLICATIONS.getNs(), + Keys.Namespace.CLUSTERS.getNs(), + Keys.Namespace.INSTANCES.getNs(), + Keys.Namespace.LOAD_BALANCERS.getNs(), + Keys.Namespace.SERVER_GROUPS.getNs())); + private final Map urlMappingTemplates = + Collections.singletonMap(Keys.Namespace.CLUSTERS.getNs(), "/serverGroups/$name"); + // (SECURITY_GROUPS.ns): '/securityGroups/$account/$provider/$name?region=$region' + + // final Map urlMappingTemplates = [ + // (Keys.Namespace.SERVER_GROUPS.ns) : + // '/applications/${application.toLowerCase()}/clusters/$account/$cluster/$provider/serverGroups/$serverGroup?region=$region', + // (Keys.Namespace.LOAD_BALANCERS.ns): '/$provider/loadBalancers/$loadBalancer', + // (Keys.Namespace.CLUSTERS.ns) : + // '/applications/${application.toLowerCase()}/clusters/$account/$cluster' + // ].asImmutable() + + // final Map urlMappingTemplates = [ + // (SERVER_GROUPS.ns) : + // '/applications/${application.toLowerCase()}/clusters/$account/$cluster/$provider/serverGroups/$serverGroup?region=$region', + // (LOAD_BALANCERS.ns): '/$provider/loadBalancers/$loadBalancer', + // (CLUSTERS.ns) : '/applications/${application.toLowerCase()}/clusters/$account/$cluster' + // ].asImmutable() + + private final Map searchResultHydrators = + new HashMap<>(); + + public YandexInfrastructureProvider(Collection agents) { + this.agents = agents; + registerHydrator(Keys.Namespace.INSTANCES); + registerHydrator(Keys.Namespace.SERVER_GROUPS); + registerHydrator(Keys.Namespace.CLUSTERS); + } + + private void registerHydrator(Keys.Namespace ns) { + searchResultHydrators.put( + new SearchableResource(ns.getNs().toLowerCase(), "yandex"), + new NamespaceResultHydrator(ns)); + } + + @Override + public Map parseKey(String key) { + return Keys.parse(key); + } + + @AllArgsConstructor + private static class NamespaceResultHydrator implements SearchResultHydrator { + Keys.Namespace namespace; + + @Override + public Map hydrateResult( + Cache cacheView, Map result, String id) { + Map hydrated = new HashMap<>(result); + Optional.ofNullable(cacheView.get(namespace.getNs(), id)) + .map(CacheData::getRelationships) + .map(r -> Keys.parse(r.get(Keys.Namespace.CLUSTERS.getNs()).iterator().next())) + .ifPresent( + cluster -> { + hydrated.put("application", cluster.get("application")); + hydrated.put("cluster", cluster.get("cluster")); + }); + return hydrated; + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/AbstractYandexCachingAgent.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/AbstractYandexCachingAgent.java new file mode 100644 index 00000000000..635e2d63145 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/AbstractYandexCachingAgent.java @@ -0,0 +1,149 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.AccountAware; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.CachingAgent; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.yandex.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.provider.YandexInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.Getter; + +@Getter +public abstract class AbstractYandexCachingAgent implements CachingAgent, AccountAware { + static final TypeReference> MAP_TYPE_REFERENCE = + new TypeReference>() {}; + + private final String providerName = YandexInfrastructureProvider.class.getName(); + protected YandexCloudCredentials credentials; + private ObjectMapper objectMapper; + protected final YandexCloudFacade yandexCloudFacade; + + AbstractYandexCachingAgent( + YandexCloudCredentials credentials, + ObjectMapper objectMapper, + YandexCloudFacade yandexCloudFacade) { + this.credentials = credentials; + this.objectMapper = objectMapper; + this.yandexCloudFacade = yandexCloudFacade; + } + + @Override + public String getAgentType() { + return getAccountName() + "/" + getClass().getSimpleName(); + } + + @Override + public Set getProvidedDataTypes() { + return Collections.singleton(AgentDataType.Authority.AUTHORITATIVE.forType(getType())); + } + + String getFolder() { + return credentials == null ? null : credentials.getFolder(); + } + + @Override + public String getAccountName() { + return credentials == null ? null : credentials.getName(); + } + + public Map convert(T object) { + return getObjectMapper().convertValue(object, MAP_TYPE_REFERENCE); + } + + protected Map> getRelationships(T entity) { + return Collections.emptyMap(); + } + + protected CacheData build(String key, T entity) { + return new DefaultCacheData(key, convert(entity), getRelationships(entity)); + } + + protected V convert(CacheData cacheData, Class clazz) { + return getObjectMapper().convertValue(cacheData.getAttributes(), clazz); + } + + protected abstract List loadEntities(ProviderCache providerCache); + + protected abstract String getKey(T entity); + + protected abstract String getType(); + + @Override + public CacheResult loadData(ProviderCache providerCache) { + List cacheData = + loadEntities(providerCache).stream() + .map(entity -> build(getKey(entity), entity)) + .collect(Collectors.toList()); + return new DefaultCacheResult(Collections.singletonMap(getType(), cacheData)); + } + + void moveOnDemandDataToNamespace(CacheResultBuilder cacheResultBuilder, String key) + throws IOException { + Map> onDemandData = + getObjectMapper() + .readValue( + (String) + cacheResultBuilder + .getOnDemand() + .getToKeep() + .get(key) + .getAttributes() + .get("cacheResults"), + new TypeReference>>() {}); + onDemandData.forEach( + (namespace, cacheDatas) -> { + if (namespace.equals(Keys.Namespace.ON_DEMAND.getNs())) { + return; + } + + cacheDatas.forEach( + cacheData -> { + CacheResultBuilder.CacheDataBuilder keep = + cacheResultBuilder.namespace(namespace).keep(cacheData.getId()); + keep.setAttributes(cacheData.getAttributes()); + keep.setRelationships(merge(keep.getRelationships(), cacheData.getRelationships())); + cacheResultBuilder.getOnDemand().getToKeep().remove(cacheData.getId()); + }); + }); + } + + private static Map> merge( + Map> keep, Map> onDemand) { + Map> result = new HashMap<>(keep); + onDemand.forEach((k, v) -> result.merge(k, v, (o, n) -> o.addAll(n) ? o : n)); + return result; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/ImageDescriptorParser.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/ImageDescriptorParser.java new file mode 100644 index 00000000000..d827a816484 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/ImageDescriptorParser.java @@ -0,0 +1,65 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import com.google.common.base.Splitter; +import com.netflix.frigga.ami.AppVersion; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.Nullable; + +public class ImageDescriptorParser { + private static final Splitter.MapSplitter IMAGE_DESCRIPTION_SPLITTER = + Splitter.on(',').withKeyValueSeparator(": "); + + public static Map createBuildInfo(@Nullable String imageDescription) { + if (imageDescription == null) { + return Collections.emptyMap(); + } + Map tags; + try { + tags = IMAGE_DESCRIPTION_SPLITTER.split(imageDescription); + } catch (IllegalArgumentException e) { + return Collections.emptyMap(); + } + if (!tags.containsKey("appversion")) { + return Collections.emptyMap(); + } + AppVersion appversion = AppVersion.parseName(tags.get("appversion")); + if (appversion == null) { + return Collections.emptyMap(); + } + Map buildInfo = new HashMap<>(); + buildInfo.put("package_name", appversion.getPackageName()); + buildInfo.put("version", appversion.getVersion()); + buildInfo.put("commit", appversion.getCommit()); + if (appversion.getBuildJobName() != null) { + Map jenkinsInfo = new HashMap<>(); + jenkinsInfo.put("name", appversion.getBuildJobName()); + jenkinsInfo.put("number", appversion.getBuildNumber()); + if (tags.containsKey("build_host")) { + jenkinsInfo.put("host", tags.get("build_host")); + } + buildInfo.put("jenkins", jenkinsInfo); + } + if (tags.containsKey("build_info_url")) { + buildInfo.put("buildInfoUrl", tags.get("build_info_url")); + } + return buildInfo; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexImageCachingAgent.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexImageCachingAgent.java new file mode 100644 index 00000000000..ca21f55fa15 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexImageCachingAgent.java @@ -0,0 +1,53 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudImage; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.*; + +public class YandexImageCachingAgent extends AbstractYandexCachingAgent { + private static final String TYPE = Keys.Namespace.IMAGES.getNs(); + + public YandexImageCachingAgent( + YandexCloudCredentials credentials, + ObjectMapper objectMapper, + YandexCloudFacade yandexCloudFacade) { + super(credentials, objectMapper, yandexCloudFacade); + } + + @Override + protected String getType() { + return TYPE; + } + + @Override + protected List loadEntities(ProviderCache providerCache) { + List images = yandexCloudFacade.getImages(credentials, getFolder()); + images.addAll(yandexCloudFacade.getImages(credentials, "standard-images")); + return images; + } + + @Override + protected String getKey(YandexCloudImage image) { + return Keys.getImageKey(getAccountName(), image.getId(), getFolder(), image.getName()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexInstanceCachingAgent.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexInstanceCachingAgent.java new file mode 100644 index 00000000000..2c0cc2e5672 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexInstanceCachingAgent.java @@ -0,0 +1,101 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.AgentDataType.Authority; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudInstance; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class YandexInstanceCachingAgent extends AbstractYandexCachingAgent { + private static final String TYPE = Keys.Namespace.INSTANCES.getNs(); + + public YandexInstanceCachingAgent( + YandexCloudCredentials credentials, + ObjectMapper objectMapper, + YandexCloudFacade yandexCloudFacade) { + super(credentials, objectMapper, yandexCloudFacade); + } + + public Set getProvidedDataTypes() { + Set authoritative = new HashSet<>(super.getProvidedDataTypes()); + Collections.addAll( + authoritative, + Authority.INFORMATIVE.forType(Keys.Namespace.CLUSTERS.getNs()), + Authority.INFORMATIVE.forType(Keys.Namespace.LOAD_BALANCERS.getNs())); + return authoritative; + } + + @Override + protected List loadEntities(ProviderCache providerCache) { + return yandexCloudFacade.getInstances(credentials).stream() + .peek(instance -> linkWithLoadBalancers(instance, providerCache)) + .collect(Collectors.toList()); + } + + @Override + protected String getKey(YandexCloudInstance instance) { + return Keys.getInstanceKey(getAccountName(), instance.getId(), getFolder(), instance.getName()); + } + + @Override + protected String getType() { + return TYPE; + } + + private void linkWithLoadBalancers(YandexCloudInstance instance, ProviderCache providerCache) { + providerCache.getAll(Keys.Namespace.LOAD_BALANCERS.getNs()).stream() + .map(cacheData -> convert(cacheData, YandexCloudLoadBalancer.class)) + .filter( + balancer -> + balancer.getHealths().values().stream() + .flatMap(Collection::stream) + .anyMatch(instance::containsAddress)) + .forEach(instance::linkWithLoadBalancer); + } + + @Override + protected Map> getRelationships(YandexCloudInstance instance) { + Map> relationships = new HashMap<>(); + String defaultName = + Strings.isNullOrEmpty(instance.getName()) ? instance.getId() : instance.getName(); + String applicationName = + instance.getLabels().getOrDefault("spinnaker-application", defaultName); + String applicationKey = Keys.getApplicationKey(applicationName); + relationships.put( + Keys.Namespace.APPLICATIONS.getNs(), Collections.singletonList(applicationKey)); + + String clusterName = instance.getLabels().getOrDefault("spinnaker-cluster", defaultName); + String clusterKey = Keys.getClusterKey(getAccountName(), applicationName, clusterName); + relationships.put(Keys.Namespace.CLUSTERS.getNs(), Collections.singletonList(clusterKey)); + return relationships; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkCachingAgent.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkCachingAgent.java new file mode 100644 index 00000000000..03fd31a20cd --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkCachingAgent.java @@ -0,0 +1,61 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.AgentDataType.Authority; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudNetwork; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +public class YandexNetworkCachingAgent extends AbstractYandexCachingAgent { + private static final String TYPE = Keys.Namespace.NETWORKS.getNs(); + + public YandexNetworkCachingAgent( + YandexCloudCredentials credentials, + ObjectMapper objectMapper, + YandexCloudFacade yandexCloudFacade) { + super(credentials, objectMapper, yandexCloudFacade); + } + + @Override + public Set getProvidedDataTypes() { + return Collections.singleton(Authority.AUTHORITATIVE.forType(TYPE)); + } + + @Override + protected String getType() { + return TYPE; + } + + @Override + protected List loadEntities(ProviderCache providerCache) { + return yandexCloudFacade.getNetworks(credentials); + } + + @Override + protected String getKey(YandexCloudNetwork network) { + return Keys.getNetworkKey( + network.getAccount(), network.getId(), credentials.getFolder(), network.getName()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkLoadBalancerCachingAgent.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkLoadBalancerCachingAgent.java new file mode 100644 index 00000000000..4dddb3dba72 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkLoadBalancerCachingAgent.java @@ -0,0 +1,269 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.yandex.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.Getter; +import lombok.NoArgsConstructor; + +@Getter +public class YandexNetworkLoadBalancerCachingAgent + extends AbstractYandexCachingAgent implements OnDemandAgent { + private static final String TYPE = Keys.Namespace.LOAD_BALANCERS.getNs(); + private static final String ON_DEMAND_NS = Keys.Namespace.ON_DEMAND.getNs(); + + private String onDemandAgentType = getAgentType() + "-OnDemand"; + private final OnDemandMetricsSupport metricsSupport; + + public YandexNetworkLoadBalancerCachingAgent( + YandexCloudCredentials credentials, + ObjectMapper objectMapper, + Registry registry, + YandexCloudFacade yandexCloudFacade) { + super(credentials, objectMapper, yandexCloudFacade); + this.metricsSupport = + new OnDemandMetricsSupport( + registry, this, YandexCloudProvider.ID + ":" + OnDemandType.LoadBalancer); + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return type.equals(OnDemandType.LoadBalancer) && cloudProvider.equals(YandexCloudProvider.ID); + } + + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + RefreshRequest request = getObjectMapper().convertValue(data, RefreshRequest.class); + if (request.getLoadBalancerName() == null || !getAccountName().equals(request.getAccount())) { + return null; + } + + OnDemandResult result = new OnDemandResult(); + if (Boolean.TRUE.equals(request.getEvict())) { + String pattern = + Keys.getLoadBalancerKey(getAccountName(), "*", "*", request.getLoadBalancerName()); + Collection keys = providerCache.filterIdentifiers(TYPE, pattern); + result.setEvictions(Collections.singletonMap(TYPE, keys)); + } else { + YandexCloudLoadBalancer loadBalancer = + metricsSupport.readData( + () -> yandexCloudFacade.getLoadBalancer(credentials, request.getLoadBalancerName())); + + CacheResult cacheResult = + metricsSupport.transformData( + () -> { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(); + cacheResultBuilder.setStartTime(Long.MAX_VALUE); + return buildCacheResult( + cacheResultBuilder, Collections.singletonList(loadBalancer)); + }); + + metricsSupport.onDemandStore( + () -> { + CacheStats stats = + new CacheStats(System.currentTimeMillis(), asString(cacheResult), 0, null); + Map attributes = + getObjectMapper().convertValue(stats, MAP_TYPE_REFERENCE); + DefaultCacheData cacheData = + new DefaultCacheData( + getKey(loadBalancer), + (int) TimeUnit.MINUTES.toSeconds(10), + attributes, + Collections.emptyMap()); + providerCache.putCacheData(ON_DEMAND_NS, cacheData); + return null; + }); + result.setCacheResult(cacheResult); + } + return result; + } + + private String asString(CacheResult result) { + try { + return getObjectMapper().writeValueAsString(result.getCacheResults()); + } catch (JsonProcessingException ignored) { + return null; + } + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + List ownedKeys = + providerCache.getIdentifiers(ON_DEMAND_NS).stream() + .filter(this::keyOwnedByThisAgent) + .collect(Collectors.toList()); + + return providerCache.getAll(ON_DEMAND_NS, ownedKeys).stream() + .map( + cacheData -> { + Map details = Keys.parse(cacheData.getId()); + CacheStats stats = convertToCacheStats(cacheData); + Map map = new HashMap<>(); + map.put("details", details); + map.put("moniker", convertOnDemandDetails(details)); + map.put("cacheTime", stats.getCacheTime()); + map.put("processedCount", stats.getProcessedCount()); + map.put("processedTime", stats.getProcessedTime()); + return map; + }) + .collect(Collectors.toList()); + } + + private boolean keyOwnedByThisAgent(String key) { + Map parsedKey = Keys.parse(key); + return parsedKey != null && parsedKey.get("type").equals(TYPE); + } + + @Override + protected List loadEntities(ProviderCache providerCache) { + return yandexCloudFacade.getLoadBalancers(credentials); + } + + @Override + protected String getKey(YandexCloudLoadBalancer loadBalancer) { + return Keys.getLoadBalancerKey( + getAccountName(), loadBalancer.getId(), getFolder(), loadBalancer.getName()); + } + + @Override + protected String getType() { + return TYPE; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + final CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(); + cacheResultBuilder.setStartTime(System.currentTimeMillis()); + + List loadBalancers = loadEntities(providerCache); + List loadBalancerKeys = + loadBalancers.stream().map(this::getKey).collect(Collectors.toList()); + + providerCache + .getAll(ON_DEMAND_NS, loadBalancerKeys) + .forEach( + cacheData -> { + // Ensure that we don't overwrite data that was inserted by the `handle` method while + // we retrieved the + // load balancers. Furthermore, cache data that hasn't been moved to the proper + // namespace needs to be + // updated in the ON_DEMAND cache, so don't evict data without a processedCount > 0. + CacheResultBuilder.CacheMutation onDemand = cacheResultBuilder.getOnDemand(); + CacheStats stats = convertToCacheStats(cacheData); + if (stats.cacheTime < cacheResultBuilder.getStartTime() && stats.processedCount > 0) { + onDemand.getToEvict().add(cacheData.getId()); + } else { + onDemand.getToKeep().put(cacheData.getId(), cacheData); + } + }); + + CacheResult cacheResults = buildCacheResult(cacheResultBuilder, loadBalancers); + if (cacheResults.getCacheResults() != null) { + cacheResults + .getCacheResults() + .getOrDefault(ON_DEMAND_NS, Collections.emptyList()) + .forEach( + cacheData -> { + cacheData.getAttributes().put("processedTime", System.currentTimeMillis()); + cacheData + .getAttributes() + .compute( + "processedCount", (key, count) -> (count != null ? (Long) count : 0) + 1); + }); + } + return cacheResults; + } + + private CacheResult buildCacheResult( + final CacheResultBuilder cacheResultBuilder, List loadBalancers) { + loadBalancers.forEach( + loadBalancer -> { + String loadBalancerKey = getKey(loadBalancer); + if (shouldUseOnDemandData(cacheResultBuilder, loadBalancerKey)) { + try { + moveOnDemandDataToNamespace(cacheResultBuilder, loadBalancerKey); + } catch (IOException e) { + // CatsOnDemandCacheUpdater handles this + throw new UncheckedIOException(e); + } + } else { + CacheResultBuilder.CacheDataBuilder keep = + cacheResultBuilder.namespace(TYPE).keep(loadBalancerKey); + keep.setAttributes(convert(loadBalancer)); + } + }); + return cacheResultBuilder.build(); + } + + private boolean shouldUseOnDemandData( + CacheResultBuilder cacheResultBuilder, String loadBalancerKey) { + Optional stats = + Optional.ofNullable(cacheResultBuilder.getOnDemand().getToKeep().get(loadBalancerKey)) + .map(this::convertToCacheStats); + return stats.isPresent() && stats.get().getCacheTime() >= cacheResultBuilder.getStartTime(); + } + + private CacheStats convertToCacheStats(CacheData cacheData) { + return convert(cacheData, CacheStats.class); + } + + @AllArgsConstructor + @NoArgsConstructor + @Data + static class CacheStats { + Long cacheTime; + String cacheResults; + Integer processedCount; + Long processedTime; + } + + @Data + static class RefreshRequest { + private String account; + private String loadBalancerName; + private Boolean evict; + private String vpcId; + private String region; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexServerGroupCachingAgent.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexServerGroupCachingAgent.java new file mode 100644 index 00000000000..1b8e554116b --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexServerGroupCachingAgent.java @@ -0,0 +1,701 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import static com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup.AutoScalePolicy; +import static com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup.CpuUtilizationRule; +import static com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup.CustomRule; +import static com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup.HealthCheckSpec; +import static com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup.LoadBalancerIntegration; +import static com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup.Status; +import static com.netflix.spinnaker.clouddriver.yandex.provider.Keys.Namespace.APPLICATIONS; +import static com.netflix.spinnaker.clouddriver.yandex.provider.Keys.Namespace.CLUSTERS; +import static com.netflix.spinnaker.clouddriver.yandex.provider.Keys.Namespace.IMAGES; +import static com.netflix.spinnaker.clouddriver.yandex.provider.Keys.Namespace.INSTANCES; +import static com.netflix.spinnaker.clouddriver.yandex.provider.Keys.Namespace.LOAD_BALANCERS; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static java.util.stream.Collectors.toSet; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.AllocationPolicy; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.InstanceGroup; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.LoadBalancerSpec; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.LoadBalancerState; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.ManagedInstancesState; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.ScalePolicy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.AgentDataType; +import com.netflix.spinnaker.cats.agent.AgentDataType.Authority; +import com.netflix.spinnaker.cats.agent.CacheResult; +import com.netflix.spinnaker.cats.agent.DefaultCacheResult; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.DefaultCacheData; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.cache.OnDemandMetricsSupport; +import com.netflix.spinnaker.clouddriver.cache.OnDemandType; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.model.ServerGroup; +import com.netflix.spinnaker.clouddriver.names.NamerRegistry; +import com.netflix.spinnaker.clouddriver.yandex.CacheResultBuilder; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudImage; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudInstance; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import com.netflix.spinnaker.moniker.Moniker; +import com.netflix.spinnaker.moniker.Namer; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.annotation.Nullable; +import lombok.Getter; +import lombok.Value; +import yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass; + +@Getter +public final class YandexServerGroupCachingAgent + extends AbstractYandexCachingAgent implements OnDemandAgent { + private static final long GB = 1024 * 1024 * 1024; + private static final String ON_DEMAND_TYPE = + String.join(":", YandexCloudProvider.ID, OnDemandType.ServerGroup.getValue()); + private static final Splitter COMMA = Splitter.on(',').omitEmptyStrings().trimResults(); + private static final String TYPE = Keys.Namespace.SERVER_GROUPS.getNs(); + private static final String ON_DEMAND_NS = Keys.Namespace.ON_DEMAND.getNs(); + private static final ImmutableSet PROVIDER_DATA_TYPES = + ImmutableSet.of( + Authority.AUTHORITATIVE.forType(TYPE), + Authority.AUTHORITATIVE.forType(CLUSTERS.getNs()), + Authority.AUTHORITATIVE.forType(APPLICATIONS.getNs()), + Authority.INFORMATIVE.forType(LOAD_BALANCERS.getNs())); + + private String onDemandAgentType = getAgentType() + "-OnDemand"; + private OnDemandMetricsSupport metricsSupport; + private final Namer naming; + + public Set getProvidedDataTypes() { + return PROVIDER_DATA_TYPES; + } + + public YandexServerGroupCachingAgent( + YandexCloudCredentials credentials, + Registry registry, + ObjectMapper objectMapper, + YandexCloudFacade yandexCloudFacade) { + super(credentials, objectMapper, yandexCloudFacade); + this.metricsSupport = new OnDemandMetricsSupport(registry, this, ON_DEMAND_TYPE); + this.naming = + NamerRegistry.lookup() + .withProvider(YandexCloudProvider.ID) + .withAccount(credentials.getName()) + .withResource(YandexCloudServerGroup.class); + } + + @Override + protected String getKey(YandexCloudServerGroup serverGroup) { + return Keys.getServerGroupKey( + getAccountName(), serverGroup.getId(), getFolder(), serverGroup.getName()); + } + + @Override + protected String getType() { + return TYPE; + } + + @Override + public CacheResult loadData(ProviderCache providerCache) { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(getProvidedDataTypes()); + cacheResultBuilder.setStartTime(System.currentTimeMillis()); + + List serverGroups = loadEntities(providerCache); + + // If an entry in ON_DEMAND was generated _after_ we started our caching run, add it to the + // cacheResultBuilder, since we may use it in buildCacheResult. + // + // We don't evict things unless they've been processed because Orca, after sending an + // on-demand cache refresh, doesn't consider the request "finished" until it calls + // pendingOnDemandRequests and sees a processedCount of 1. In a saner world, Orca would + // probably just trust that if the key wasn't returned by pendingOnDemandRequests, it must + // have been processed. But we don't live in that world. + Set serverGroupKeys = serverGroups.stream().map(this::getKey).collect(toSet()); + providerCache + .getAll(ON_DEMAND_NS, serverGroupKeys) + .forEach( + cacheData -> { + long cacheTime = (long) cacheData.getAttributes().get("cacheTime"); + if (cacheTime < cacheResultBuilder.getStartTime() + && (int) cacheData.getAttributes().get("processedCount") > 0) { + cacheResultBuilder.getOnDemand().getToEvict().add(cacheData.getId()); + } else { + cacheResultBuilder.getOnDemand().getToKeep().put(cacheData.getId(), cacheData); + } + }); + + CacheResult cacheResult = buildCacheResult(cacheResultBuilder, serverGroups); + + // For all the ON_DEMAND entries that we marked as 'toKeep' earlier, here we mark them as + // processed so that they get evicted in future calls to this method. Why can't we just mark + // them as evicted here, though? Why wait for another run? + cacheResult + .getCacheResults() + .getOrDefault(ON_DEMAND_NS, emptyList()) + .forEach( + cacheData -> { + cacheData.getAttributes().put("processedTime", System.currentTimeMillis()); + int processedCount = (Integer) cacheData.getAttributes().get("processedCount"); + cacheData.getAttributes().put("processedCount", processedCount + 1); + }); + + return cacheResult; + } + + @Override + public boolean handles(OnDemandType type, String cloudProvider) { + return OnDemandType.ServerGroup.equals(type) && YandexCloudProvider.ID.equals(cloudProvider); + } + + @Nullable + @Override + public OnDemandResult handle(ProviderCache providerCache, Map data) { + try { + String serverGroupName = (String) data.get("serverGroupName"); + if (serverGroupName == null || !getAccountName().equals(data.get("account"))) { + return null; + } + Optional serverGroup = + getMetricsSupport().readData(() -> getServerGroup(serverGroupName, providerCache)); + if (serverGroup.isPresent()) { + CacheResultBuilder cacheResultBuilder = new CacheResultBuilder(); + String serverGroupKey = getKey(serverGroup.get()); + CacheResult result = + getMetricsSupport() + .transformData( + () -> + buildCacheResult(cacheResultBuilder, ImmutableList.of(serverGroup.get()))); + String cacheResults = getObjectMapper().writeValueAsString(result.getCacheResults()); + CacheData cacheData = + getMetricsSupport() + .onDemandStore( + () -> + new DefaultCacheData( + serverGroupKey, + /* ttlSeconds= */ (int) Duration.ofMinutes(10).getSeconds(), + ImmutableMap.of( + "cacheTime", + System.currentTimeMillis(), + "cacheResults", + cacheResults, + "processedCount", + 0), + /* relationships= */ ImmutableMap.of())); + providerCache.putCacheData(ON_DEMAND_NS, cacheData); + return new OnDemandResult( + getOnDemandAgentType(), result, /* evictions= */ ImmutableMap.of()); + } else { + Collection existingIdentifiers = + ImmutableSet.of( + Keys.getServerGroupKey(getAccountName(), "*", getFolder(), serverGroupName)); + providerCache.evictDeletedItems(ON_DEMAND_NS, existingIdentifiers); + return new OnDemandResult( + getOnDemandAgentType(), + new DefaultCacheResult(ImmutableMap.of()), + ImmutableMap.of(TYPE, ImmutableList.copyOf(existingIdentifiers))); + } + } catch (IOException e) { + // CatsOnDemandCacheUpdater handles this + throw new UncheckedIOException(e); + } + } + + @Override + public Collection> pendingOnDemandRequests(ProviderCache providerCache) { + List ownedKeys = + providerCache.getIdentifiers(ON_DEMAND_NS).stream() + .filter(this::keyOwnedByThisAgent) + .collect( + Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); + + return providerCache.getAll(ON_DEMAND_NS, ownedKeys).stream() + .map( + cacheData -> { + Map map = new HashMap<>(); + map.put("details", Keys.parse(cacheData.getId())); + map.put("moniker", cacheData.getAttributes().get("moniker")); + map.put("cacheTime", cacheData.getAttributes().get("cacheTime")); + map.put("processedCount", cacheData.getAttributes().get("processedCount")); + map.put("processedTime", cacheData.getAttributes().get("processedTime")); + return map; + }) + .collect(Collectors.toList()); + } + + private boolean keyOwnedByThisAgent(String key) { + Map parsedKey = Keys.parse(key); + return parsedKey != null && parsedKey.get("type").equals(TYPE); + } + + private CacheResult buildCacheResult( + CacheResultBuilder cacheResultBuilder, List serverGroups) { + try { + for (YandexCloudServerGroup serverGroup : serverGroups) { + Moniker moniker = naming.deriveMoniker(serverGroup); + String applicationKey = Keys.getApplicationKey(moniker.getApp()); + String clusterKey = + Keys.getClusterKey(getAccountName(), moniker.getApp(), moniker.getCluster()); + String serverGroupKey = getKey(serverGroup); + Set instanceKeys = + serverGroup.getInstances().stream() + .map( + instance -> + Keys.getInstanceKey( + getAccountName(), instance.getId(), getFolder(), instance.getName())) + .collect( + Collectors.collectingAndThen(Collectors.toSet(), Collections::unmodifiableSet)); + + CacheResultBuilder.CacheDataBuilder application = + cacheResultBuilder.namespace(APPLICATIONS.getNs()).keep(applicationKey); + application.getAttributes().put("name", moniker.getApp()); + application + .getRelationships() + .computeIfAbsent(CLUSTERS.getNs(), s -> new ArrayList<>()) + .add(clusterKey); + application + .getRelationships() + .computeIfAbsent(INSTANCES.getNs(), s -> new ArrayList<>()) + .addAll(instanceKeys); + + CacheResultBuilder.CacheDataBuilder cluster = + cacheResultBuilder.namespace(CLUSTERS.getNs()).keep(clusterKey); + cluster.getAttributes().put("name", moniker.getCluster()); + cluster.getAttributes().put("accountName", getAccountName()); + cluster.getAttributes().put("moniker", moniker); + cluster + .getRelationships() + .computeIfAbsent(APPLICATIONS.getNs(), s -> new ArrayList<>()) + .add(applicationKey); + cluster + .getRelationships() + .computeIfAbsent(TYPE, s -> new ArrayList<>()) + .add(serverGroupKey); + cluster + .getRelationships() + .computeIfAbsent(INSTANCES.getNs(), s -> new ArrayList<>()) + .addAll(instanceKeys); + + Set loadBalancerKeys = + serverGroup.getLoadBalancerIntegration() == null + ? Collections.emptySet() + : serverGroup.getLoadBalancerIntegration().getBalancers().stream() + .map( + lb -> + Keys.getLoadBalancerKey( + getAccountName(), lb.getId(), getFolder(), lb.getName())) + .collect(toSet()); + loadBalancerKeys.forEach( + key -> + cacheResultBuilder + .namespace(LOAD_BALANCERS.getNs()) + .keep(key) + .getRelationships() + .computeIfAbsent(TYPE, s -> new ArrayList<>()) + .add(serverGroupKey)); + + if (shouldUseOnDemandData(cacheResultBuilder, serverGroupKey)) { + moveOnDemandDataToNamespace(cacheResultBuilder, serverGroupKey); + } else { + CacheResultBuilder.CacheDataBuilder serverGroupCacheData = + cacheResultBuilder.namespace(TYPE).keep(serverGroupKey); + serverGroupCacheData.setAttributes(convert(serverGroup)); + serverGroupCacheData + .getRelationships() + .computeIfAbsent(APPLICATIONS.getNs(), s -> new ArrayList<>()) + .add(applicationKey); + serverGroupCacheData + .getRelationships() + .computeIfAbsent(CLUSTERS.getNs(), s -> new ArrayList<>()) + .add(clusterKey); + serverGroupCacheData + .getRelationships() + .computeIfAbsent(LOAD_BALANCERS.getNs(), s -> new ArrayList<>()) + .addAll(loadBalancerKeys); + serverGroupCacheData + .getRelationships() + .computeIfAbsent(INSTANCES.getNs(), s -> new ArrayList<>()) + .addAll(instanceKeys); + } + } + } catch (IOException e) { + // CatsOnDemandCacheUpdater handles this + throw new UncheckedIOException(e); + } + + return cacheResultBuilder.build(); + } + + private static boolean shouldUseOnDemandData( + CacheResultBuilder cacheResultBuilder, String serverGroupKey) { + CacheData cacheData = cacheResultBuilder.getOnDemand().getToKeep().get(serverGroupKey); + return cacheData != null + && (long) cacheData.getAttributes().get("cacheTime") > cacheResultBuilder.getStartTime(); + } + + @Override + protected List loadEntities(ProviderCache providerCache) { + return constructServerGroups(yandexCloudFacade.getServerGroups(credentials), providerCache); + } + + private Optional getServerGroup( + String name, ProviderCache providerCache) { + return yandexCloudFacade + .getServerGroup(credentials, name) + .flatMap( + sg -> + constructServerGroups(Collections.singletonList(sg), providerCache).stream() + .findAny()); + } + + private List constructServerGroups( + List instanceGroups, ProviderCache providerCache) { + + Map instances = + providerCache.getAll(INSTANCES.getNs()).stream() + .map(data -> convert(data, YandexCloudInstance.class)) + .collect(toMap(YandexCloudInstance::getId, Function.identity())); + return instanceGroups.stream() + .map( + group -> { + Set ownedInstances = + yandexCloudFacade.getServerGroupInstanceIds(credentials, group.getId()).stream() + .map(instances::get) + .filter(Objects::nonNull) + .collect(Collectors.toSet()); + return createServerGroup(group, ownedInstances, providerCache); + }) + .collect(toList()); + } + + private YandexCloudServerGroup createServerGroup( + InstanceGroup group, Set instances, ProviderCache providerCache) { + YandexCloudServerGroup serverGroup = new YandexCloudServerGroup(); + serverGroup.setId(group.getId()); + serverGroup.setFolder(group.getFolderId()); + serverGroup.setName(group.getName()); + serverGroup.setServiceAccountId(group.getServiceAccountId()); + serverGroup.setType(YandexCloudProvider.ID); + serverGroup.setCloudProvider(YandexCloudProvider.ID); + serverGroup.setRegion(YandexCloudProvider.REGION); + serverGroup.setCreatedTime(group.getCreatedAt().getSeconds() * 1000); + Set zones = + group.getAllocationPolicy().getZonesList().stream() + .map(AllocationPolicy.Zone::getZoneId) + .collect(toSet()); + serverGroup.setZones(zones); + serverGroup.setInstances(instances); + ManagedInstancesState instancesState = group.getManagedInstancesState(); + int downCount = countInstanceInState(instances, HealthState.Down); + int upCount = countInstanceInState(instances, HealthState.Up); + int outOfServiceCount = countInstanceInState(instances, HealthState.OutOfService); + serverGroup.setInstanceCounts( + ServerGroup.InstanceCounts.builder() + .total( + (int) + (instancesState.getRunningActualCount() + + instancesState.getRunningOutdatedCount() + + instancesState.getProcessingCount())) + .unknown( + (int) + (instancesState.getRunningActualCount() + + instancesState.getRunningOutdatedCount()) + - downCount + - upCount + - outOfServiceCount) + .down(downCount) + .up(upCount) + .starting((int) instancesState.getProcessingCount()) + .outOfService(outOfServiceCount) + .build()); + int targetSize = (int) instancesState.getTargetSize(); + ServerGroup.Capacity.CapacityBuilder capacity = + ServerGroup.Capacity.builder().desired(targetSize); + if (group.getScalePolicy().hasAutoScale()) { + capacity + .max((int) group.getScalePolicy().getAutoScale().getMaxSize()) + .min( + (int) + (group.getScalePolicy().getAutoScale().getMinZoneSize() + * group.getAllocationPolicy().getZonesCount())); + serverGroup.setAutoScalePolicy(convertAutoScalePolicy(group.getScalePolicy().getAutoScale())); + } else { + capacity.min(targetSize); + capacity.max(targetSize); + } + serverGroup.setCapacity(capacity.build()); + serverGroup.setImageSummary( + getImageSummary(providerCache, group, group.getInstanceTemplate().getBootDiskSpec())); + serverGroup.setImagesSummary(getImagesSummary(providerCache, group)); + + serverGroup.setLabels(group.getLabelsMap()); + serverGroup.setDescription(group.getDescription()); + serverGroup.setInstanceTemplate(convertInstanceTemplate(group.getInstanceTemplate())); + serverGroup.setLaunchConfig(makeLaunchConfig(serverGroup)); + + InstanceGroupOuterClass.DeployPolicy deployPolicy = group.getDeployPolicy(); + serverGroup.setDeployPolicy( + new YandexCloudServerGroup.DeployPolicy( + deployPolicy.getMaxUnavailable(), + deployPolicy.getMaxExpansion(), + deployPolicy.getMaxDeleting(), + deployPolicy.getMaxCreating(), + Duration.ofSeconds(deployPolicy.getStartupDuration().getSeconds()))); + serverGroup.setStatus(Status.valueOf(group.getStatusValue())); + if (group.hasLoadBalancerSpec()) { + serverGroup.setLoadBalancerIntegration( + convertLoadBalancerIntegration( + group.getLoadBalancerState(), group.getLoadBalancerSpec(), providerCache)); + } + if (group.hasHealthChecksSpec()) { + List specs = + group.getHealthChecksSpec().getHealthCheckSpecsList().stream() + .map( + hc -> { + HealthCheckSpec.Type type = + hc.hasTcpOptions() ? HealthCheckSpec.Type.TCP : HealthCheckSpec.Type.HTTP; + long port = + type == HealthCheckSpec.Type.TCP + ? hc.getTcpOptions().getPort() + : hc.getHttpOptions().getPort(); + String path = + type == HealthCheckSpec.Type.TCP ? "" : hc.getHttpOptions().getPath(); + return new HealthCheckSpec( + type, + port, + path, + Duration.ofSeconds(hc.getInterval().getSeconds()), + Duration.ofSeconds(hc.getTimeout().getSeconds()), + hc.getUnhealthyThreshold(), + hc.getHealthyThreshold()); + }) + .collect(toList()); + serverGroup.setHealthCheckSpecs(specs); + } + return serverGroup; + } + + // public for test purposes should be refactored to be closer to inverse operation + public static YandexCloudServerGroup.InstanceTemplate convertInstanceTemplate( + InstanceGroupOuterClass.InstanceTemplate template) { + YandexCloudServerGroup.InstanceTemplate.SchedulingPolicy schedulingPolicy = + new YandexCloudServerGroup.InstanceTemplate.SchedulingPolicy( + template.hasSchedulingPolicy() && template.getSchedulingPolicy().getPreemptible()); + return YandexCloudServerGroup.InstanceTemplate.builder() + .description(template.getDescription()) + .labels(template.getLabelsMap()) + .platformId(template.getPlatformId()) + .resourcesSpec( + new YandexCloudServerGroup.ResourcesSpec( + template.getResourcesSpec().getMemory() / GB, + template.getResourcesSpec().getCores(), + template.getResourcesSpec().getCoreFraction() == 0 + ? 100 + : template.getResourcesSpec().getCoreFraction(), + template.getResourcesSpec().getGpus())) + .metadata(template.getMetadataMap()) + .bootDiskSpec(convertAttachedDiskSpec(template.getBootDiskSpec())) + .secondaryDiskSpecs( + template.getSecondaryDiskSpecsList().stream() + .map(YandexServerGroupCachingAgent::convertAttachedDiskSpec) + .collect(Collectors.toList())) + .networkInterfaceSpecs( + template.getNetworkInterfaceSpecsList().stream() + .map(YandexServerGroupCachingAgent::convertNetworkInterfaceSpec) + .collect(Collectors.toList())) + .schedulingPolicy(schedulingPolicy) + .serviceAccountId(template.getServiceAccountId()) + .build(); + } + + private static YandexCloudServerGroup.NetworkInterfaceSpec convertNetworkInterfaceSpec( + InstanceGroupOuterClass.NetworkInterfaceSpec spec) { + return new YandexCloudServerGroup.NetworkInterfaceSpec( + spec.getNetworkId(), + spec.getSubnetIdsList(), + !spec.hasPrimaryV4AddressSpec() + ? null + : new YandexCloudServerGroup.PrimaryAddressSpec( + spec.getPrimaryV4AddressSpec().hasOneToOneNatSpec()), + !spec.hasPrimaryV6AddressSpec() + ? null + : new YandexCloudServerGroup.PrimaryAddressSpec( + spec.getPrimaryV6AddressSpec().hasOneToOneNatSpec())); + } + + private static YandexCloudServerGroup.AttachedDiskSpec convertAttachedDiskSpec( + InstanceGroupOuterClass.AttachedDiskSpec spec) { + return new YandexCloudServerGroup.AttachedDiskSpec( + YandexCloudServerGroup.AttachedDiskSpec.Mode.valueOf(spec.getModeValue()), + spec.getDeviceName(), + new YandexCloudServerGroup.AttachedDiskSpec.DiskSpec( + spec.getDiskSpec().getDescription(), + spec.getDiskSpec().getTypeId(), + spec.getDiskSpec().getSize() / GB, + spec.getDiskSpec().getImageId(), + spec.getDiskSpec().getSnapshotId())); + } + + private LoadBalancerIntegration convertLoadBalancerIntegration( + LoadBalancerState state, LoadBalancerSpec spec, ProviderCache providerCache) { + return new LoadBalancerIntegration( + state.getTargetGroupId(), + state.getStatusMessage(), + new YandexCloudServerGroup.TargetGroupSpec( + spec.getTargetGroupSpec().getName(), + spec.getTargetGroupSpec().getDescription(), + spec.getTargetGroupSpec().getLabelsMap()), + convertLoadBalancer(state.getTargetGroupId(), providerCache)); + } + + private Set convertLoadBalancer( + String targetGroupId, ProviderCache providerCache) { + if (targetGroupId == null) { + return emptySet(); + } + + String pattern = Keys.getLoadBalancerKey("*", "*", "*", "*"); + String balancersNs = LOAD_BALANCERS.getNs(); + Collection identifiers = providerCache.filterIdentifiers(balancersNs, pattern); + + return providerCache.getAll(balancersNs, identifiers).stream() + .map(data -> convert(data, YandexCloudLoadBalancer.class)) + .filter(loadBalancer -> loadBalancer.getHealths().containsKey(targetGroupId)) + .collect(Collectors.toSet()); + } + + private static AutoScalePolicy convertAutoScalePolicy(ScalePolicy.AutoScale scalePolicy) { + AutoScalePolicy policy = new AutoScalePolicy(); + policy.setMinZoneSize(scalePolicy.getMinZoneSize()); + policy.setMaxSize(scalePolicy.getMaxSize()); + policy.setMeasurementDuration( + Duration.ofSeconds(scalePolicy.getMeasurementDuration().getSeconds())); + policy.setWarmupDuration(Duration.ofSeconds(scalePolicy.getWarmupDuration().getSeconds())); + policy.setStabilizationDuration( + Duration.ofSeconds(scalePolicy.getStabilizationDuration().getSeconds())); + policy.setInitialSize(scalePolicy.getInitialSize()); + if (scalePolicy.hasCpuUtilizationRule()) { + double utilizationTarget = scalePolicy.getCpuUtilizationRule().getUtilizationTarget(); + policy.setCpuUtilizationRule(new CpuUtilizationRule(utilizationTarget)); + } + List customRules = + scalePolicy.getCustomRulesList().stream() + .map( + rule -> + new CustomRule( + CustomRule.RuleType.valueOf(rule.getRuleTypeValue()), + CustomRule.MetricType.valueOf(rule.getMetricTypeValue()), + rule.getMetricName(), + rule.getTarget())) + .collect(toList()); + + policy.setCustomRules(customRules); + return policy; + } + + private static int countInstanceInState( + Set instances, HealthState healthState) { + return (int) instances.stream().filter(i -> i.getHealthState() == healthState).count(); + } + + private ServerGroup.ImagesSummary getImagesSummary( + ProviderCache providerCache, InstanceGroup group) { + return () -> + Stream.concat( + Stream.of(group.getInstanceTemplate().getBootDiskSpec()), + group.getInstanceTemplate().getSecondaryDiskSpecsList().stream()) + .map(diskSpec -> getImageSummary(providerCache, group, diskSpec)) + .collect(toList()); + } + + private ServerGroup.ImageSummary getImageSummary( + ProviderCache providerCache, + InstanceGroup group, + InstanceGroupOuterClass.AttachedDiskSpec diskSpec) { + String imageId = + !Strings.isNullOrEmpty(diskSpec.getDiskSpec().getImageId()) + ? diskSpec.getDiskSpec().getImageId() + : diskSpec.getDiskSpec().getSnapshotId(); + + CacheData cacheData = + providerCache.get(IMAGES.getNs(), Keys.getImageKey("*", imageId, "*", "*")); + if (cacheData == null) { + return new ImageSummary( + group.getName(), imageId, "not-found-" + imageId, emptyMap(), emptyMap()); + } + + YandexCloudImage image = convert(cacheData, YandexCloudImage.class); + return new ImageSummary( + group.getName(), + imageId, + image.getName(), + cacheData.getAttributes(), + ImageDescriptorParser.createBuildInfo(image.getDescription())); + } + + @Value + private static class ImageSummary implements ServerGroup.ImageSummary { + String serverGroupName; + String imageId; + String imageName; + Map image; + Map buildInfo; + } + + private static HashMap makeLaunchConfig(YandexCloudServerGroup serverGroup) { + HashMap launchConfig = new HashMap<>(); + launchConfig.put("createdTime", serverGroup.getCreatedTime()); + + String imageId = serverGroup.getInstanceTemplate().getBootDiskSpec().getDiskSpec().getImageId(); + String snapshotId = + serverGroup.getInstanceTemplate().getBootDiskSpec().getDiskSpec().getSnapshotId(); + launchConfig.put("imageId", imageId != null ? imageId : snapshotId); + + launchConfig.put("launchConfigurationName", serverGroup.getName()); + return launchConfig; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexServiceAccountCachingAgent.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexServiceAccountCachingAgent.java new file mode 100644 index 00000000000..e2de7c82d37 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexServiceAccountCachingAgent.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServiceAccount; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.List; + +public class YandexServiceAccountCachingAgent + extends AbstractYandexCachingAgent { + public static final String TYPE = Keys.Namespace.SERVICE_ACCOUNT.getNs(); + + public YandexServiceAccountCachingAgent( + YandexCloudCredentials credentials, + ObjectMapper objectMapper, + YandexCloudFacade yandexCloudFacade) { + super(credentials, objectMapper, yandexCloudFacade); + } + + @Override + protected String getType() { + return TYPE; + } + + @Override + protected List loadEntities(ProviderCache providerCache) { + return yandexCloudFacade.getServiceAccounts(credentials, getFolder()); + } + + @Override + protected String getKey(YandexCloudServiceAccount sa) { + return Keys.getServiceAccount(sa.getAccount(), sa.getId(), getFolder(), sa.getName()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexSubnetCachingAgent.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexSubnetCachingAgent.java new file mode 100644 index 00000000000..02418561aed --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexSubnetCachingAgent.java @@ -0,0 +1,51 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.provider.ProviderCache; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudSubnet; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.List; + +public class YandexSubnetCachingAgent extends AbstractYandexCachingAgent { + public static final String TYPE = Keys.Namespace.SUBNETS.getNs(); + + public YandexSubnetCachingAgent( + YandexCloudCredentials credentials, + ObjectMapper objectMapper, + YandexCloudFacade yandexCloudFacade) { + super(credentials, objectMapper, yandexCloudFacade); + } + + @Override + protected String getType() { + return TYPE; + } + + @Override + protected List loadEntities(ProviderCache providerCache) { + return yandexCloudFacade.getSubnets(credentials, getFolder()); + } + + @Override + protected String getKey(YandexCloudSubnet subnet) { + return Keys.getSubnetKey(subnet.getAccount(), subnet.getId(), getFolder(), subnet.getName()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/config/YandexInfrastructureProviderConfig.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/config/YandexInfrastructureProviderConfig.java new file mode 100644 index 00000000000..45fd7fc3826 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/config/YandexInfrastructureProviderConfig.java @@ -0,0 +1,67 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.config; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.cats.agent.Agent; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.ProviderUtils; +import com.netflix.spinnaker.clouddriver.yandex.provider.YandexInfrastructureProvider; +import com.netflix.spinnaker.clouddriver.yandex.provider.agent.*; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.DependsOn; + +@Configuration +public class YandexInfrastructureProviderConfig { + @Bean + @DependsOn("yandexCloudCredentials") + public YandexInfrastructureProvider yandexInfrastructureProvider( + AccountCredentialsRepository accountCredentialsRepository, + YandexCloudFacade yandexCloudFacade, + ObjectMapper objectMapper, + Registry registry) { + objectMapper.enable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); + + Set allAccounts = + ProviderUtils.buildThreadSafeSetOfAccounts( + accountCredentialsRepository, YandexCloudCredentials.class); + List agents = new ArrayList<>(7 * allAccounts.size()); + for (YandexCloudCredentials credentials : allAccounts) { + agents.add(new YandexNetworkCachingAgent(credentials, objectMapper, yandexCloudFacade)); + agents.add(new YandexSubnetCachingAgent(credentials, objectMapper, yandexCloudFacade)); + agents.add(new YandexInstanceCachingAgent(credentials, objectMapper, yandexCloudFacade)); + agents.add( + new YandexServerGroupCachingAgent( + credentials, registry, objectMapper, yandexCloudFacade)); + agents.add( + new YandexNetworkLoadBalancerCachingAgent( + credentials, objectMapper, registry, yandexCloudFacade)); + agents.add(new YandexImageCachingAgent(credentials, objectMapper, yandexCloudFacade)); + agents.add( + new YandexServiceAccountCachingAgent(credentials, objectMapper, yandexCloudFacade)); + } + return new YandexInfrastructureProvider(agents); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/CacheClient.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/CacheClient.java new file mode 100644 index 00000000000..6cef34b6604 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/CacheClient.java @@ -0,0 +1,97 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.cats.cache.CacheData; +import com.netflix.spinnaker.cats.cache.CacheFilter; +import com.netflix.spinnaker.cats.cache.RelationshipCacheFilter; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +public class CacheClient { + private final ObjectMapper objectMapper; + private final Cache cacheView; + private final Keys.Namespace namespace; + private final Class clazz; + + public CacheClient( + Cache cacheView, ObjectMapper objectMapper, Keys.Namespace namespace, Class clazz) { + this.cacheView = cacheView; + this.objectMapper = objectMapper; + this.namespace = namespace; + this.clazz = clazz; + } + + public Optional findOne(String pattern) { + return findAll(pattern).stream().findFirst(); + } + + public List getAll(Collection identifiers) { + return cacheView.getAll(namespace.getNs(), identifiers).stream() + .map(cacheData -> convert(cacheData.getAttributes())) + .collect(Collectors.toList()); + } + + public Optional get(String identifier) { + return Optional.ofNullable(cacheView.get(namespace.getNs(), identifier)) + .map(cacheData -> convert(cacheData.getAttributes())); + } + + public List findAll(String pattern) { + Collection identifiers = filterIdentifiers(pattern); + return getAll(identifiers); + } + + public Set getAll(String cloudFilter) { + Collection keys = cacheView.filterIdentifiers(namespace.getNs(), cloudFilter); + return cacheView.getAll(namespace.getNs(), keys).stream() + .map(cacheData -> convert(cacheData.getAttributes())) + .collect(Collectors.toSet()); + } + + public Collection filterIdentifiers(String key) { + return cacheView.filterIdentifiers(namespace.getNs(), key); + } + + private T convert(Map attributes) { + return objectMapper.convertValue(attributes, clazz); + } + + public Collection getRelationKeys(String key, Keys.Namespace relationship) { + CacheFilter filter = RelationshipCacheFilter.include(relationship.getNs()); + CacheData cacheData = cacheView.get(namespace.getNs(), key, filter); + return cacheData.getRelationships().getOrDefault(relationship.getNs(), Collections.emptySet()); + } + + public Set getRelationEntities(String key, Keys.Namespace relationship, Class clazz) { + CacheFilter filter = RelationshipCacheFilter.include(relationship.getNs()); + CacheData cacheData = cacheView.get(namespace.getNs(), key, filter); + Collection keys = + cacheData.getRelationships().getOrDefault(relationship.getNs(), Collections.emptySet()); + return cacheView.getAll(relationship.getNs(), keys).stream() + .map(c -> objectMapper.convertValue(c.getAttributes(), clazz)) + .collect(Collectors.toSet()); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexApplicationProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexApplicationProvider.java new file mode 100644 index 00000000000..a6ca96167eb --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexApplicationProvider.java @@ -0,0 +1,91 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.model.ApplicationProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexApplication; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +class YandexApplicationProvider implements ApplicationProvider { + private final CacheClient cacheClient; + + @Autowired + YandexApplicationProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheClient = + new CacheClient<>( + cacheView, objectMapper, Keys.Namespace.APPLICATIONS, YandexApplication.class); + } + + @Override + public Set getApplications(boolean expand) { + Set result = cacheClient.getAll(Keys.APPLICATION_WILDCARD); + if (expand) { + result.forEach(this::updateRelations); + } + return result; + } + + @Override + public YandexApplication getApplication(String name) { + return cacheClient + .findOne(Keys.getApplicationKey(name)) + .map(this::updateRelations) + .orElse(null); + } + + public Collection getRelationship(String key, Keys.Namespace namespace) { + return cacheClient.getRelationKeys(key, namespace); + } + + private YandexApplication updateRelations(YandexApplication application) { + String applicationKey = Keys.getApplicationKey(application.getName()); + application.getClusterNames().putAll(getClusters(applicationKey)); + application.getInstances().addAll(getInstances(applicationKey)); + return application; + } + + @NotNull + private List> getInstances(String key) { + return cacheClient.getRelationKeys(key, Keys.Namespace.INSTANCES).stream() + .map(Keys::parse) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + @NotNull + private Map> getClusters(String key) { + return cacheClient.getRelationKeys(key, Keys.Namespace.CLUSTERS).stream() + .map(Keys::parse) + .filter(Objects::nonNull) + .collect( + Collectors.groupingBy( + parts -> parts.get("account"), + Collectors.mapping(parts -> parts.get("name"), Collectors.toSet()))); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexClusterProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexClusterProvider.java new file mode 100644 index 00000000000..e8cae7e63ef --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexClusterProvider.java @@ -0,0 +1,168 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.model.ClusterProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexApplication; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudCluster; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class YandexClusterProvider implements ClusterProvider { + private final CacheClient cacheClient; + private final YandexApplicationProvider applicationProvider; + private final YandexServerGroupProvider serverGroupProvider; + private AccountCredentialsProvider accountCredentialsProvider; + + @Autowired + public YandexClusterProvider( + Cache cacheView, + ObjectMapper objectMapper, + YandexApplicationProvider applicationProvider, + YandexServerGroupProvider serverGroupProvider, + AccountCredentialsProvider accountCredentialsProvider) { + this.applicationProvider = applicationProvider; + this.serverGroupProvider = serverGroupProvider; + this.accountCredentialsProvider = accountCredentialsProvider; + this.cacheClient = + new CacheClient<>( + cacheView, objectMapper, Keys.Namespace.CLUSTERS, YandexCloudCluster.class); + } + + @Override + public String getCloudProviderId() { + return YandexCloudProvider.ID; + } + + @Override + public Map> getClusters() { + return cacheClient.getAll(Keys.CLUSTER_WILDCARD).stream() + .collect(Collectors.groupingBy(YandexCloudCluster::getAccountName, Collectors.toSet())); + } + + @Override + public Map> getClusterDetails(String applicationName) { + return getClusters(applicationName, true); + } + + @Override + public Map> getClusterSummaries(String applicationName) { + return getClusters(applicationName, false); + } + + @Override + public Set getClusters(String applicationName, String account) { + return getClusterDetails(applicationName).get(account); + } + + @Override + public YandexCloudCluster getCluster( + String application, String account, String name, boolean isDetailed) { + String clusterKey = Keys.getClusterKey(account, application, name); + return cacheClient + .findOne(clusterKey) + .map(cluster -> buildCluster(clusterKey, cluster, isDetailed)) + .orElse(null); + } + + @Override + public YandexCloudCluster getCluster( + String applicationName, String accountName, String clusterName) { + return getCluster(applicationName, accountName, clusterName, true); + } + + @Override + public YandexCloudServerGroup getServerGroup( + String account, String region, String name, boolean includeDetails) { + AccountCredentials credentials = accountCredentialsProvider.getCredentials(account); + if (!(credentials instanceof YandexCloudCredentials)) { + return null; + } + String pattern = + Keys.getServerGroupKey( + account, "*", ((YandexCloudCredentials) credentials).getFolder(), name); + return serverGroupProvider.findOne(pattern).orElse(null); + } + + @Override + public YandexCloudServerGroup getServerGroup(String account, String region, String name) { + return getServerGroup(account, region, name, true); + } + + @Override + public boolean supportsMinimalClusters() { + return false; + } + + private Map> getClusters( + String applicationName, boolean isDetailed) { + YandexApplication application = applicationProvider.getApplication(applicationName); + String applicationKey = Keys.getApplicationKey(applicationName); + + if (application == null) { + return new HashMap<>(); + } + + Collection clusterKeys = + applicationProvider.getRelationship(applicationKey, Keys.Namespace.CLUSTERS); + List clusters = cacheClient.getAll(clusterKeys); + + return clusters.stream() + .map( + cluster -> + buildCluster( + Keys.getClusterKey( + cluster.getAccountName(), applicationName, cluster.getName()), + cluster, + isDetailed)) + .collect(Collectors.groupingBy(YandexCloudCluster::getAccountName, Collectors.toSet())); + } + + private YandexCloudCluster buildCluster( + String key, YandexCloudCluster cluster, boolean isDetailed) { + Collection serverGroupKeys = + cacheClient.getRelationKeys(key, Keys.Namespace.SERVER_GROUPS); + if (serverGroupKeys.isEmpty()) { + return cluster; + } + List groups = serverGroupProvider.getAll(serverGroupKeys, isDetailed); + groups.forEach( + group -> { + cluster.getServerGroups().add(group); + Optional.ofNullable(group.getLoadBalancerIntegration()) + .map(YandexCloudServerGroup.LoadBalancerIntegration::getBalancers) + .ifPresent(lbs -> cluster.getLoadBalancers().addAll(lbs)); + }); + return cluster; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexImageProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexImageProvider.java new file mode 100644 index 00000000000..dfa13c04ba6 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexImageProvider.java @@ -0,0 +1,61 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.model.Image; +import com.netflix.spinnaker.clouddriver.model.ImageProvider; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudImage; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.function.Function; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class YandexImageProvider implements ImageProvider { + private final CacheClient cacheClient; + + @Override + public String getCloudProvider() { + return YandexCloudProvider.ID; + } + + @Autowired + public YandexImageProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheClient = + new CacheClient<>(cacheView, objectMapper, Keys.Namespace.IMAGES, YandexCloudImage.class); + } + + @Override + public Optional getImageById(String imageId) { + return cacheClient.findOne(Keys.getImageKey("*", imageId, "*", "*")).map(Function.identity()); + } + + public Set getAll() { + return cacheClient.getAll(Keys.IMAGE_WILDCARD); + } + + public List findByAccount(String account) { + String imagePattern = Keys.getImageKey(account, "*", "*", "*"); + return cacheClient.findAll(imagePattern); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexInstanceProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexInstanceProvider.java new file mode 100644 index 00000000000..a1d123b99b3 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexInstanceProvider.java @@ -0,0 +1,84 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.model.InstanceProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudInstance; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.Optional; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class YandexInstanceProvider implements InstanceProvider { + private AccountCredentialsProvider accountCredentialsProvider; + private YandexCloudFacade yandexCloudFacade; + private final CacheClient cacheClient; + + @Autowired + public YandexInstanceProvider( + Cache cacheView, + AccountCredentialsProvider accountCredentialsProvider, + YandexCloudFacade yandexCloudFacade, + ObjectMapper objectMapper) { + this.yandexCloudFacade = yandexCloudFacade; + this.accountCredentialsProvider = accountCredentialsProvider; + this.cacheClient = + new CacheClient<>( + cacheView, objectMapper, Keys.Namespace.INSTANCES, YandexCloudInstance.class); + } + + @Override + public String getCloudProvider() { + return YandexCloudProvider.ID; + } + + @Override + public YandexCloudInstance getInstance(String account, String region, String name) { + return getAccountCredentials(account) + .map(credentials -> Keys.getInstanceKey(account, "*", credentials.getFolder(), name)) + .flatMap(cacheClient::findOne) + .orElse(null); + } + + @Override + public String getConsoleOutput(String account, String region, String id) { + YandexCloudCredentials credentials = + getAccountCredentials(account) + .orElseThrow(() -> new IllegalArgumentException("Invalid credentials: " + account)); + return Optional.ofNullable(getInstance(account, region, id)) + .map(instance -> yandexCloudFacade.getSerialPortOutput(credentials, instance.getId())) + .orElse(null); + } + + @NotNull + private Optional getAccountCredentials(String account) { + AccountCredentials accountCredentials = accountCredentialsProvider.getCredentials(account); + if (!(accountCredentials instanceof YandexCloudCredentials)) { + return Optional.empty(); + } + return Optional.of((YandexCloudCredentials) accountCredentials); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexLoadBalancerProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexLoadBalancerProvider.java new file mode 100644 index 00000000000..f3d9b8072db --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexLoadBalancerProvider.java @@ -0,0 +1,254 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerInstance; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerProvider; +import com.netflix.spinnaker.clouddriver.model.LoadBalancerServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudInstance; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.model.health.YandexLoadBalancerHealth; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.Data; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class YandexLoadBalancerProvider implements LoadBalancerProvider { + private YandexServerGroupProvider yandexServerGroupProvider; + private final CacheClient cacheClient; + + @Autowired + public YandexLoadBalancerProvider( + Cache cacheView, + ObjectMapper objectMapper, + YandexServerGroupProvider yandexServerGroupProvider) { + this.yandexServerGroupProvider = yandexServerGroupProvider; + this.cacheClient = + new CacheClient<>( + cacheView, objectMapper, Keys.Namespace.LOAD_BALANCERS, YandexCloudLoadBalancer.class); + } + + public String getCloudProvider() { + return YandexCloudProvider.ID; + } + + @Override + public Set getApplicationLoadBalancers(String applicationName) { + String pattern = Keys.getLoadBalancerKey("*", "*", "*", applicationName + "*"); + Collection identifiers = cacheClient.filterIdentifiers(pattern); + if (!Strings.isNullOrEmpty(applicationName)) { + yandexServerGroupProvider.getByApplication(applicationName).stream() + .map(g -> Keys.getServerGroupKey("*", g.getId(), "*", g.getName())) + .map(k -> yandexServerGroupProvider.getLoadBalancersKeys(k)) + .forEach(identifiers::addAll); + } + return identifiers.stream().map(this::loadBalancersFromKey).collect(Collectors.toSet()); + } + + private YandexCloudLoadBalancer loadBalancersFromKey(String key) { + YandexCloudLoadBalancer loadBalancer = cacheClient.findOne(key).get(); + Collection serverGroupKeys = + cacheClient.getRelationKeys(key, Keys.Namespace.SERVER_GROUPS); + if (!serverGroupKeys.isEmpty()) { + List groups = + yandexServerGroupProvider.getAll(serverGroupKeys, false).stream() + .map(serverGroup -> buildLoadBalancerGroup(loadBalancer, serverGroup)) + .collect(Collectors.toList()); + loadBalancer.getServerGroups().addAll(groups); + } + return loadBalancer; + } + + public List getAll(Collection keys) { + return cacheClient.getAll(keys); + } + + @NotNull + private LoadBalancerServerGroup buildLoadBalancerGroup( + YandexCloudLoadBalancer loadBalancer, YandexCloudServerGroup serverGroup) { + Set instances = + serverGroup.getInstances().stream() + .map(instance -> buildLoadBalancerInstance(loadBalancer, serverGroup, instance)) + .collect(Collectors.toSet()); + + LoadBalancerServerGroup loadBalancerServerGroup = new LoadBalancerServerGroup(); + loadBalancerServerGroup.setCloudProvider(YandexCloudProvider.ID); + loadBalancerServerGroup.setName(serverGroup.getName()); + loadBalancerServerGroup.setRegion(serverGroup.getRegion()); + loadBalancerServerGroup.setIsDisabled(serverGroup.isDisabled()); + loadBalancerServerGroup.setDetachedInstances(Collections.emptySet()); + loadBalancerServerGroup.setInstances(instances); + return loadBalancerServerGroup; + } + + @NotNull + private LoadBalancerInstance buildLoadBalancerInstance( + YandexCloudLoadBalancer loadBalancer, + YandexCloudServerGroup serverGroup, + YandexCloudInstance instance) { + List targetGroups = + loadBalancer + .getHealths() + .getOrDefault( + serverGroup.getLoadBalancerIntegration().getTargetGroupId(), + Collections.emptyList()); + YandexLoadBalancerHealth.Status.ServiceStatus stat = + targetGroups.stream() + .filter(instance::containsAddress) + .findFirst() + .map(health -> health.getStatus().toServiceStatus()) + .orElse(YandexLoadBalancerHealth.Status.ServiceStatus.OutOfService); + return LoadBalancerInstance.builder() + .id(instance.getId()) + .name(instance.getName()) + .zone(instance.getZone()) + .health(Collections.singletonMap("state", stat)) + .build(); + } + + public List list() { + Map> loadBalancerMap = + cacheClient.getAll(Keys.LOAD_BALANCER_WILDCARD).stream() + .collect(Collectors.groupingBy(YandexCloudLoadBalancer::getName)); + return loadBalancerMap.entrySet().stream() + .map(e -> convertToSummary(e.getKey(), e.getValue())) + .collect(Collectors.toList()); + } + + public YandexLoadBalancerAccountRegionSummary get(String name) { + String pattern = Keys.getLoadBalancerKey("*", "*", "*", name); + List balancers = cacheClient.findAll(pattern); + return balancers.isEmpty() ? null : convertToSummary(name, balancers); + } + + @NotNull + private YandexLoadBalancerAccountRegionSummary convertToSummary( + String name, List balancers) { + YandexLoadBalancerAccountRegionSummary summary = new YandexLoadBalancerAccountRegionSummary(); + summary.setName(name); + balancers.stream() + .map(this::buildLoadBalancerSummary) + .forEach( + s -> + summary + .getMappedAccounts() + .computeIfAbsent(s.getAccount(), a -> new YandexLoadBalancerAccount()) + .getMappedRegions() + .computeIfAbsent(s.getRegion(), r -> new YandexLoadBalancerAccountRegion()) + .getLoadBalancers() + .add(s)); + return summary; + } + + @NotNull + private YandexLoadBalancerSummary buildLoadBalancerSummary(YandexCloudLoadBalancer balancer) { + YandexLoadBalancerSummary summary = new YandexLoadBalancerSummary(); + summary.setId(balancer.getId()); + summary.setAccount(balancer.getAccount()); + summary.setName(balancer.getName()); + summary.setRegion(balancer.getRegion()); + return summary; + } + + public List byAccountAndRegionAndName( + String account, String region, String name) { + String pattern = Keys.getLoadBalancerKey(account, "*", "*", name); + return cacheClient.findAll(pattern).stream() + .filter(balancer -> balancer.getRegion().equals(region)) + .map(this::buildLoadBalancerDetails) + .collect(Collectors.toList()); + } + + @NotNull + private YandexLoadBalancerProvider.YandexLoadBalancerDetails buildLoadBalancerDetails( + YandexCloudLoadBalancer balancer) { + return new YandexLoadBalancerDetails( + balancer.getName(), + balancer.getBalancerType(), + balancer.getSessionAffinity().name(), + balancer.getCreatedTime(), + balancer.getListeners()); + } + + @Data + public static class YandexLoadBalancerAccountRegionSummary implements LoadBalancerProvider.Item { + private String name; + + @JsonIgnore private Map mappedAccounts = new HashMap<>(); + + @JsonProperty("accounts") + public List getByAccounts() { + return new ArrayList<>(mappedAccounts.values()); + } + } + + @Data + public static class YandexLoadBalancerAccount implements LoadBalancerProvider.ByAccount { + private String name; + + @JsonIgnore + private Map mappedRegions = new HashMap<>(); + + @JsonProperty("regions") + public List getByRegions() { + return new ArrayList<>(mappedRegions.values()); + } + } + + @Data + private static class YandexLoadBalancerAccountRegion implements LoadBalancerProvider.ByRegion { + private String name; + private List loadBalancers = new ArrayList<>(); + } + + @Data + private static class YandexLoadBalancerSummary implements LoadBalancerProvider.Details { + private String id; + private String account; + private String region; + private String name; + private String type = YandexCloudProvider.ID; + } + + @Data + @AllArgsConstructor + private static class YandexLoadBalancerDetails implements LoadBalancerProvider.Details { + private String loadBalancerName; + YandexCloudLoadBalancer.BalancerType type; + private String sessionAffinity; + private Long createdTime; + private List listeners; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexNetworkProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexNetworkProvider.java new file mode 100644 index 00000000000..41504b1f2df --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexNetworkProvider.java @@ -0,0 +1,49 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.model.NetworkProvider; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudNetwork; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import java.util.Set; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class YandexNetworkProvider implements NetworkProvider { + private final CacheClient cacheClient; + + @Override + public String getCloudProvider() { + return YandexCloudProvider.ID; + } + + @Autowired + public YandexNetworkProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheClient = + new CacheClient<>( + cacheView, objectMapper, Keys.Namespace.NETWORKS, YandexCloudNetwork.class); + } + + @Override + public Set getAll() { + return cacheClient.getAll(Keys.NETWORK_WILDCARD); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexServerGroupProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexServerGroupProvider.java new file mode 100644 index 00000000000..630ef9eff16 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexServerGroupProvider.java @@ -0,0 +1,110 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import static com.netflix.spinnaker.clouddriver.yandex.provider.Keys.Namespace.INSTANCES; +import static com.netflix.spinnaker.clouddriver.yandex.provider.Keys.Namespace.LOAD_BALANCERS; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.model.HealthState; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudInstance; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import java.util.AbstractMap; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class YandexServerGroupProvider { + private final CacheClient cacheClient; + + @Autowired + public YandexServerGroupProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheClient = + new CacheClient<>( + cacheView, objectMapper, Keys.Namespace.SERVER_GROUPS, YandexCloudServerGroup.class); + } + + public List getByApplication(String applicationName) { + String serverGroupKey = Keys.getServerGroupKey("*", "*", "*", applicationName + "*"); + return cacheClient.findAll(serverGroupKey); + } + + public List getAll(Collection identifiers, boolean isDetailed) { + return identifiers.stream() + .map(key -> new AbstractMap.SimpleEntry<>(key, cacheClient.get(key))) + .filter(pair -> pair.getValue().isPresent()) + .map( + pair -> + isDetailed + ? fetchRelationships(pair.getKey(), pair.getValue().get()) + : pair.getValue().get()) + .collect(Collectors.toList()); + } + + public Collection getLoadBalancersKeys(String pattern) { + Collection keys = cacheClient.filterIdentifiers(pattern); + return keys.stream() + .map(key -> cacheClient.getRelationKeys(key, Keys.Namespace.LOAD_BALANCERS)) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + } + + public Optional findOne(String pattern) { + return cacheClient.filterIdentifiers(pattern).stream() + .map(this::fetchRelationships) + .findFirst(); + } + + private YandexCloudServerGroup fetchRelationships(String key) { + return cacheClient.findOne(key).map(group -> fetchRelationships(key, group)).orElse(null); + } + + private YandexCloudServerGroup fetchRelationships(String key, YandexCloudServerGroup group) { + group.setInstances(cacheClient.getRelationEntities(key, INSTANCES, YandexCloudInstance.class)); + updateBalancers( + group, cacheClient.getRelationEntities(key, LOAD_BALANCERS, YandexCloudLoadBalancer.class)); + return group; + } + + private void updateBalancers( + YandexCloudServerGroup serverGroup, Set loadBalancers) { + if (serverGroup.getLoadBalancerIntegration() != null + && !Strings.isNullOrEmpty(serverGroup.getLoadBalancerIntegration().getTargetGroupId())) { + Set loadBalancerIds = serverGroup.getLoadBalancersWithHealthChecks().keySet(); + Set attachedBalancers = + loadBalancers.stream() + .filter(loadBalancer -> loadBalancerIds.contains(loadBalancer.getId())) + .collect(Collectors.toSet()); + serverGroup.getLoadBalancerIntegration().setBalancers(attachedBalancers); + + boolean sgEnable = + loadBalancerIds.isEmpty() + || serverGroup.getInstances().stream() + .anyMatch(instance -> instance.getHealthState() == HealthState.Up); + serverGroup.setDisabled(!sgEnable); + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexServiceAccountProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexServiceAccountProvider.java new file mode 100644 index 00000000000..70ebe5e80bc --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexServiceAccountProvider.java @@ -0,0 +1,55 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServiceAccount; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import java.util.Collection; +import java.util.Set; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class YandexServiceAccountProvider { + private final CacheClient cacheClient; + + public String getCloudProvider() { + return YandexCloudProvider.ID; + } + + @Autowired + public YandexServiceAccountProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheClient = + new CacheClient<>( + cacheView, + objectMapper, + Keys.Namespace.SERVICE_ACCOUNT, + YandexCloudServiceAccount.class); + } + + public Set getAll() { + return cacheClient.getAll(Keys.SERVICE_ACCOUNT_WILDCARD); + } + + public Collection findByAccount(String account) { + String pattern = Keys.getServiceAccount(account, "*", "*", "*"); + return cacheClient.findAll(pattern); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexSubnetProvider.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexSubnetProvider.java new file mode 100644 index 00000000000..77fc36ea124 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/provider/view/YandexSubnetProvider.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.view; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.cats.cache.Cache; +import com.netflix.spinnaker.clouddriver.model.SubnetProvider; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudSubnet; +import com.netflix.spinnaker.clouddriver.yandex.provider.Keys; +import java.util.Set; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class YandexSubnetProvider implements SubnetProvider { + private final CacheClient cacheClient; + + @Override + public String getCloudProvider() { + return YandexCloudProvider.ID; + } + + @Autowired + public YandexSubnetProvider(Cache cacheView, ObjectMapper objectMapper) { + this.cacheClient = + new CacheClient<>(cacheView, objectMapper, Keys.Namespace.SUBNETS, YandexCloudSubnet.class); + } + + @Override + public Set getAll() { + return cacheClient.getAll(Keys.SUBNET_WILDCARD); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/YandexCloudCredentials.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/YandexCloudCredentials.java new file mode 100644 index 00000000000..515be807966 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/YandexCloudCredentials.java @@ -0,0 +1,122 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.yandex.security; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.netflix.spinnaker.clouddriver.security.AccountCredentials; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import io.grpc.Channel; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import lombok.Data; +import yandex.cloud.api.compute.v1.ImageServiceGrpc; +import yandex.cloud.api.compute.v1.InstanceServiceGrpc; +import yandex.cloud.api.compute.v1.SnapshotServiceGrpc; +import yandex.cloud.api.compute.v1.instancegroup.InstanceGroupServiceGrpc; +import yandex.cloud.api.iam.v1.ServiceAccountServiceGrpc; +import yandex.cloud.api.loadbalancer.v1.NetworkLoadBalancerServiceGrpc; +import yandex.cloud.api.operation.OperationServiceGrpc; +import yandex.cloud.api.vpc.v1.NetworkServiceGrpc; +import yandex.cloud.api.vpc.v1.SubnetServiceGrpc; +import yandex.cloud.sdk.ServiceFactory; + +@Data +public class YandexCloudCredentials + implements AccountCredentials { + private String name; + private String environment; + private String accountType; + + private String folder; + + private ServiceFactory serviceFactory; + + @Override + @JsonIgnore + public YandexCredentials getCredentials() { + return null; + } + + @Override + public String getCloudProvider() { + return YandexCloudProvider.ID; + } + + @Override + public List getRequiredGroupMembership() { + return Collections.emptyList(); + } + + public OperationServiceGrpc.OperationServiceBlockingStub operationService() { + return create( + OperationServiceGrpc.OperationServiceBlockingStub.class, + OperationServiceGrpc::newBlockingStub); + } + + public InstanceServiceGrpc.InstanceServiceBlockingStub instanceService() { + return create( + InstanceServiceGrpc.InstanceServiceBlockingStub.class, + InstanceServiceGrpc::newBlockingStub); + } + + public InstanceGroupServiceGrpc.InstanceGroupServiceBlockingStub instanceGroupService() { + return create( + InstanceGroupServiceGrpc.InstanceGroupServiceBlockingStub.class, + InstanceGroupServiceGrpc::newBlockingStub); + } + + public ImageServiceGrpc.ImageServiceBlockingStub imageService() { + return create( + ImageServiceGrpc.ImageServiceBlockingStub.class, ImageServiceGrpc::newBlockingStub); + } + + public SnapshotServiceGrpc.SnapshotServiceBlockingStub snapshotService() { + return create( + SnapshotServiceGrpc.SnapshotServiceBlockingStub.class, + SnapshotServiceGrpc::newBlockingStub); + } + + public NetworkServiceGrpc.NetworkServiceBlockingStub networkService() { + return create( + NetworkServiceGrpc.NetworkServiceBlockingStub.class, NetworkServiceGrpc::newBlockingStub); + } + + public SubnetServiceGrpc.SubnetServiceBlockingStub subnetService() { + return create( + SubnetServiceGrpc.SubnetServiceBlockingStub.class, SubnetServiceGrpc::newBlockingStub); + } + + public NetworkLoadBalancerServiceGrpc.NetworkLoadBalancerServiceBlockingStub + networkLoadBalancerService() { + return create( + NetworkLoadBalancerServiceGrpc.NetworkLoadBalancerServiceBlockingStub.class, + NetworkLoadBalancerServiceGrpc::newBlockingStub); + } + + public ServiceAccountServiceGrpc.ServiceAccountServiceBlockingStub serviceAccountService() { + return create( + ServiceAccountServiceGrpc.ServiceAccountServiceBlockingStub.class, + ServiceAccountServiceGrpc::newBlockingStub); + } + + private > SERVICE create( + Class clazz, Function service) { + return serviceFactory.create(clazz, service); + } + + public static class YandexCredentials {} +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/YandexCloudCredentialsInitializer.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/YandexCloudCredentialsInitializer.java new file mode 100644 index 00000000000..8c4e230354b --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/YandexCloudCredentialsInitializer.java @@ -0,0 +1,83 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.yandex.security; + +import com.google.common.base.MoreObjects; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.CredentialsInitializerSynchronizable; +import com.netflix.spinnaker.clouddriver.yandex.security.config.YandexConfigurationProperties; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.List; +import java.util.stream.Collectors; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Scope; +import yandex.cloud.sdk.ChannelFactory; +import yandex.cloud.sdk.ServiceFactory; +import yandex.cloud.sdk.auth.Auth; + +@Configuration +@EnableConfigurationProperties +public class YandexCloudCredentialsInitializer implements CredentialsInitializerSynchronizable { + + @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + @Bean + @ConfigurationProperties("yandex") + public YandexConfigurationProperties yandexCloudAccountConfig() { + return new YandexConfigurationProperties(); + } + + @Bean + public List yandexCloudCredentials( + YandexConfigurationProperties properties, AccountCredentialsRepository repository) { + return properties.getAccounts().stream() + .map(YandexCloudCredentialsInitializer::convertToCredentials) + .peek(cred -> repository.save(cred.getName(), cred)) + .collect(Collectors.toList()); + } + + @NotNull + private static YandexCloudCredentials convertToCredentials( + YandexConfigurationProperties.Account account) { + YandexCloudCredentials credentials = new YandexCloudCredentials(); + credentials.setFolder(account.getFolder()); + credentials.setName(account.getName()); + credentials.setEnvironment( + MoreObjects.firstNonNull(account.getEnvironment(), account.getName())); + credentials.setAccountType( + MoreObjects.firstNonNull(account.getAccountType(), account.getName())); + credentials.setServiceFactory(makeJDKConfig(account)); + return credentials; + } + + private static ServiceFactory makeJDKConfig(YandexConfigurationProperties.Account account) { + var credProvider = Auth.apiKeyBuilder().fromFile(Paths.get(account.getJsonPath())); + if (account.getIamEndpoint() != null) { + credProvider.cloudIAMEndpoint(account.getIamEndpoint()); + } + return ServiceFactory.builder() + .endpoint( + account.getEndpoint() != null ? account.getEndpoint() : ChannelFactory.DEFAULT_ENDPOINT) + .credentialProvider(credProvider) + .requestTimeout(Duration.ofMinutes(1)) + .build(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/config/YandexConfigurationProperties.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/config/YandexConfigurationProperties.java new file mode 100644 index 00000000000..cefa9729d99 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/security/config/YandexConfigurationProperties.java @@ -0,0 +1,40 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.clouddriver.yandex.security.config; + +import java.util.ArrayList; +import java.util.List; +import lombok.Data; + +/** + * A mutable credentials configurations structure suitable for transformation into concrete + * credentials implementations. + */ +@Data +public class YandexConfigurationProperties { + private List accounts = new ArrayList<>(); + + @Data + public static class Account { + private String endpoint; + private String iamEndpoint; + private String name; + private String folder; + private String environment; + private String accountType; + private String jsonPath; + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/YandexCloudFacade.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/YandexCloudFacade.java new file mode 100644 index 00000000000..f0e7d3ac975 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/YandexCloudFacade.java @@ -0,0 +1,515 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.service; + +import static com.netflix.spinnaker.clouddriver.yandex.deploy.ops.AbstractYandexAtomicOperation.single; +import static com.netflix.spinnaker.clouddriver.yandex.deploy.ops.AbstractYandexAtomicOperation.status; + +import com.google.common.base.Strings; +import com.google.protobuf.FieldMask; +import com.google.protobuf.InvalidProtocolBufferException; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.UpsertYandexLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.YandexInstanceGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudImage; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudInstance; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudNetwork; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServiceAccount; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudSubnet; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexLogRecord; +import com.netflix.spinnaker.clouddriver.yandex.model.health.YandexLoadBalancerHealth; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.converter.YandexInstanceGroupConverter; +import com.netflix.spinnaker.clouddriver.yandex.service.converter.YandexLoadBalancerConverter; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import yandex.cloud.api.compute.v1.ImageOuterClass; +import yandex.cloud.api.compute.v1.ImageServiceOuterClass; +import yandex.cloud.api.compute.v1.InstanceServiceOuterClass; +import yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass; +import yandex.cloud.api.compute.v1.instancegroup.InstanceGroupServiceOuterClass; +import yandex.cloud.api.iam.v1.ServiceAccountServiceOuterClass; +import yandex.cloud.api.loadbalancer.v1.HealthCheckOuterClass; +import yandex.cloud.api.loadbalancer.v1.NetworkLoadBalancerOuterClass; +import yandex.cloud.api.loadbalancer.v1.NetworkLoadBalancerServiceOuterClass; +import yandex.cloud.api.operation.OperationOuterClass; +import yandex.cloud.api.vpc.v1.NetworkServiceOuterClass; +import yandex.cloud.api.vpc.v1.SubnetServiceOuterClass; + +/** + * Facade to Yandex API. Hides implementation details of Yandex.Cloud Java SDK + * + *

todo: - extract mappers into classes - better status and processing (class member or thread + * local) - do not return yandex api object (always convert to spinnaker objects) - process lists in + * a same way with pagination - probably substitute 'operationPoller.doSync' with completable future + */ +@Component +public class YandexCloudFacade { + public static final String RESIZE_SERVER_GROUP = "RESIZE_SERVER_GROUP"; + public static final String REBOOT_INSTANCES = "REBOOT_INSTANCES"; + public static final String DELETE_LOAD_BALANCER = "DELETE_LOAD_BALANCER"; + public static final String UPSERT_IMAGE_TAGS = "UPSERT_IMAGE_TAGS"; + public static final String UPSERT_LOAD_BALANCER = "UPSERT_LOAD_BALANCER"; + public static final String MODIFY_INSTANCE_GROUP = "MODIFY_INSTANCE_GROUP"; + public static final String DESTROY_SERVER_GROUP = "DESTROY_SERVER_GROUP"; + + @Autowired private YandexOperationPoller operationPoller; + + public List getImages(YandexCloudCredentials credentials, String folder) { + return getImages(credentials, folder, null); + } + + public YandexCloudImage getImage(YandexCloudCredentials credentials, String imageName) { + return getImages(credentials, credentials.getFolder(), "name='" + imageName + "'").stream() + .filter(i -> imageName.equals(i.getName())) + .findFirst() + .orElse(null); + } + + private List getImages( + YandexCloudCredentials credentials, String folder, String filter) { + List images = new ArrayList<>(); + String nextPageToken = ""; + ImageServiceOuterClass.ListImagesRequest.Builder builder = + ImageServiceOuterClass.ListImagesRequest.newBuilder().setFolderId(folder); + if (filter != null) { + builder.setFilter(filter); + } + do { + ImageServiceOuterClass.ListImagesRequest request = + builder.setPageToken(nextPageToken).build(); + ImageServiceOuterClass.ListImagesResponse response = credentials.imageService().list(request); + images.addAll(response.getImagesList()); + nextPageToken = response.getNextPageToken(); + } while (!Strings.isNullOrEmpty(nextPageToken)); + return images.stream().map(YandexCloudImage::createFromProto).collect(Collectors.toList()); + } + + public void updateImageTags( + YandexCloudCredentials credentials, String imageId, Map labels) { + ImageServiceOuterClass.UpdateImageRequest request = + ImageServiceOuterClass.UpdateImageRequest.newBuilder() + .setImageId(imageId) + .setUpdateMask(FieldMask.newBuilder().addPaths("labels").build()) + .putAllLabels(labels) + .build(); + operationPoller.doSync( + () -> credentials.imageService().update(request), credentials, UPSERT_IMAGE_TAGS); + } + + public void createLoadBalancer( + YandexCloudCredentials credentials, UpsertYandexLoadBalancerDescription description) { + NetworkLoadBalancerServiceOuterClass.CreateNetworkLoadBalancerRequest request = + YandexLoadBalancerConverter.mapToCreateRequest(description); + operationPoller.doSync( + () -> credentials.networkLoadBalancerService().create(request), + credentials, + UPSERT_LOAD_BALANCER); + } + + public void updateLoadBalancer( + String id, + YandexCloudCredentials credentials, + UpsertYandexLoadBalancerDescription description) { + NetworkLoadBalancerServiceOuterClass.UpdateNetworkLoadBalancerRequest request = + YandexLoadBalancerConverter.mapToUpdateRequest(id, description); + operationPoller.doSync( + () -> credentials.networkLoadBalancerService().update(request), + credentials, + UPSERT_LOAD_BALANCER); + } + + public void deleteLoadBalancer(YandexCloudCredentials credentials, String id) { + NetworkLoadBalancerServiceOuterClass.DeleteNetworkLoadBalancerRequest requests = + NetworkLoadBalancerServiceOuterClass.DeleteNetworkLoadBalancerRequest.newBuilder() + .setNetworkLoadBalancerId(id) + .build(); + operationPoller.doSync( + () -> credentials.networkLoadBalancerService().delete(requests), + credentials, + DELETE_LOAD_BALANCER); + } + + public List getLoadBalancerIds(YandexCloudCredentials credentials, String name) { + NetworkLoadBalancerServiceOuterClass.ListNetworkLoadBalancersRequest listRequest = + NetworkLoadBalancerServiceOuterClass.ListNetworkLoadBalancersRequest.newBuilder() + .setFolderId(credentials.getFolder()) + .setFilter("name='" + name + "'") + .build(); + NetworkLoadBalancerServiceOuterClass.ListNetworkLoadBalancersResponse response = + credentials.networkLoadBalancerService().list(listRequest); + return response.getNetworkLoadBalancersList().stream() + .map(NetworkLoadBalancerOuterClass.NetworkLoadBalancer::getId) + .collect(Collectors.toList()); + } + + public YandexCloudLoadBalancer getLoadBalancer(YandexCloudCredentials credentials, String name) { + List ids = getLoadBalancerIds(credentials, name); + return single(ids) + .map(id -> convertLoadBalancer(credentials, getLoadBalancer(null, credentials, id))) + .orElse(null); + } + + private NetworkLoadBalancerOuterClass.NetworkLoadBalancer getLoadBalancer( + String phase, YandexCloudCredentials credentials, String id) { + try { + NetworkLoadBalancerServiceOuterClass.GetNetworkLoadBalancerRequest request = + NetworkLoadBalancerServiceOuterClass.GetNetworkLoadBalancerRequest.newBuilder() + .setNetworkLoadBalancerId(id) + .build(); + return credentials.networkLoadBalancerService().get(request); + } catch (StatusRuntimeException e) { + throw new IllegalStateException( + status(phase, "Could not resolve load balancer with id '%s'.", id)); + } + } + + public List getLoadBalancers(YandexCloudCredentials credentials) { + NetworkLoadBalancerServiceOuterClass.ListNetworkLoadBalancersRequest build = + NetworkLoadBalancerServiceOuterClass.ListNetworkLoadBalancersRequest.newBuilder() + .setFolderId(credentials.getFolder()) + .build(); + NetworkLoadBalancerServiceOuterClass.ListNetworkLoadBalancersResponse response = + credentials.networkLoadBalancerService().list(build); + return response.getNetworkLoadBalancersList().stream() + .map(balancer -> convertLoadBalancer(credentials, balancer)) + .collect(Collectors.toList()); + } + + public void resizeServerGroup( + YandexCloudCredentials credentials, String serverGroupId, Integer capacity) { + InstanceGroupServiceOuterClass.UpdateInstanceGroupRequest request = + YandexInstanceGroupConverter.buildResizeRequest(serverGroupId, capacity); + operationPoller.doSync( + () -> credentials.instanceGroupService().update(request), credentials, RESIZE_SERVER_GROUP); + } + + public void restrartInstance(YandexCloudCredentials credentials, String instanceId) { + InstanceServiceOuterClass.RestartInstanceRequest request = + InstanceServiceOuterClass.RestartInstanceRequest.newBuilder() + .setInstanceId(instanceId) + .build(); + operationPoller.doSync( + () -> credentials.instanceService().restart(request), credentials, REBOOT_INSTANCES); + } + + public List getInstances(YandexCloudCredentials credentials) { + InstanceServiceOuterClass.ListInstancesRequest request = + InstanceServiceOuterClass.ListInstancesRequest.newBuilder() + .setFolderId(credentials.getFolder()) + .build(); + return credentials.instanceService().list(request).getInstancesList().stream() + .map(YandexCloudInstance::createFromProto) + .collect(Collectors.toList()); + } + + public InstanceGroupOuterClass.InstanceGroup createInstanceGroup( + String phase, + YandexCloudCredentials credentials, + YandexInstanceGroupDescription description) { + InstanceGroupServiceOuterClass.CreateInstanceGroupRequest request = + YandexInstanceGroupConverter.mapToCreateRequest(description); + + OperationOuterClass.Operation operation = credentials.instanceGroupService().create(request); + operation = operationPoller.waitDone(credentials, operation, phase); + try { + return operation.getResponse().unpack(InstanceGroupOuterClass.InstanceGroup.class); + } catch (InvalidProtocolBufferException e) { + throw new IllegalStateException("Invalid protocol of creating instances group", e); + } + } + + public void updateInstanceGroup( + YandexCloudCredentials credentials, + String instanceGroupId, + YandexInstanceGroupDescription description) { + InstanceGroupServiceOuterClass.UpdateInstanceGroupRequest request = + YandexInstanceGroupConverter.mapToUpdateRequest(description, instanceGroupId); + operationPoller.doSync( + () -> credentials.instanceGroupService().update(request), + credentials, + MODIFY_INSTANCE_GROUP); + } + + public List getServerGroupIds( + YandexCloudCredentials credentials, String serverGroupName) { + InstanceGroupServiceOuterClass.ListInstanceGroupsRequest listRequest = + InstanceGroupServiceOuterClass.ListInstanceGroupsRequest.newBuilder() + .setFolderId(credentials.getFolder()) + .setFilter("name='" + serverGroupName + "'") + .setView(InstanceGroupServiceOuterClass.InstanceGroupView.FULL) + .build(); + + return credentials.instanceGroupService().list(listRequest).getInstanceGroupsList().stream() + .map(InstanceGroupOuterClass.InstanceGroup::getId) + .collect(Collectors.toList()); + } + + public void deleteInstanceGroup(YandexCloudCredentials credentials, String instanceGroupId) { + InstanceGroupServiceOuterClass.DeleteInstanceGroupRequest request = + InstanceGroupServiceOuterClass.DeleteInstanceGroupRequest.newBuilder() + .setInstanceGroupId(instanceGroupId) + .build(); + operationPoller.doSync( + () -> credentials.instanceGroupService().delete(request), + credentials, + DESTROY_SERVER_GROUP); + } + + public void enableInstanceGroup( + String phase, + YandexCloudCredentials credentials, + String targetGroupId, + Map> loadBalancersSpecs) { + status(phase, "Registering instances with network load balancers..."); + status(phase, "Retrieving load balancers..."); + // looks like the validation does't make any sense here... + Map balancers = + loadBalancersSpecs.keySet().stream() + .map(id -> getLoadBalancer(phase, credentials, id)) + .collect( + Collectors.toMap( + NetworkLoadBalancerOuterClass.NetworkLoadBalancer::getId, + NetworkLoadBalancerOuterClass.NetworkLoadBalancer::getName)); + + balancers.forEach( + (id, name) -> { + List healthCheckSpecs = + loadBalancersSpecs.get(id); + status(phase, "Registering server group with load balancer '%s'...", name); + attachTargetGroup(phase, credentials, id, targetGroupId, healthCheckSpecs); + status(phase, "Done registering server group with load balancer '%s'.", name); + }); + } + + public void detachTargetGroup( + String phase, + YandexCloudCredentials credentials, + YandexCloudLoadBalancer balancer, + String targetGroupId) { + try { + NetworkLoadBalancerServiceOuterClass.DetachNetworkLoadBalancerTargetGroupRequest request = + NetworkLoadBalancerServiceOuterClass.DetachNetworkLoadBalancerTargetGroupRequest + .newBuilder() + .setNetworkLoadBalancerId(balancer.getId()) + .setTargetGroupId(targetGroupId) + .build(); + operationPoller.doSync( + () -> credentials.networkLoadBalancerService().detachTargetGroup(request), + credentials, + phase); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() != Status.Code.INVALID_ARGUMENT) { + throw e; + } + } + } + + public void attachTargetGroup( + String phase, + YandexCloudCredentials credentials, + String id, + String targetGroupId, + List healthCheckSpecs) { + NetworkLoadBalancerOuterClass.AttachedTargetGroup.Builder targetGroup = + NetworkLoadBalancerOuterClass.AttachedTargetGroup.newBuilder() + .setTargetGroupId(targetGroupId); + for (int idx = 0; idx < healthCheckSpecs.size(); idx++) { + HealthCheckOuterClass.HealthCheck healthCheck = + mapHealthCheckSpec(targetGroupId, idx, healthCheckSpecs.get(idx)); + targetGroup.addHealthChecks(healthCheck); + } + + NetworkLoadBalancerServiceOuterClass.AttachNetworkLoadBalancerTargetGroupRequest request = + NetworkLoadBalancerServiceOuterClass.AttachNetworkLoadBalancerTargetGroupRequest + .newBuilder() + .setNetworkLoadBalancerId(id) + .setAttachedTargetGroup(targetGroup) + .build(); + operationPoller.doSync( + () -> credentials.networkLoadBalancerService().attachTargetGroup(request), + credentials, + phase); + } + + private static HealthCheckOuterClass.HealthCheck mapHealthCheckSpec( + String targetGroupId, int index, YandexCloudServerGroup.HealthCheckSpec hc) { + HealthCheckOuterClass.HealthCheck.Builder builder = + HealthCheckOuterClass.HealthCheck.newBuilder(); + if (hc.getType() == YandexCloudServerGroup.HealthCheckSpec.Type.HTTP) { + builder.setHttpOptions( + HealthCheckOuterClass.HealthCheck.HttpOptions.newBuilder() + .setPort(hc.getPort()) + .setPath(hc.getPath())); + } else { + builder.setTcpOptions( + HealthCheckOuterClass.HealthCheck.TcpOptions.newBuilder().setPort(hc.getPort())); + } + return builder + .setName(targetGroupId + "-" + index) + .setInterval(YandexInstanceGroupConverter.mapDuration(hc.getInterval())) + .setTimeout(YandexInstanceGroupConverter.mapDuration(hc.getTimeout())) + .setUnhealthyThreshold(hc.getUnhealthyThreshold()) + .setHealthyThreshold(hc.getHealthyThreshold()) + .build(); + } + + public List getNetworks(YandexCloudCredentials credentials) { + NetworkServiceOuterClass.ListNetworksRequest request = + NetworkServiceOuterClass.ListNetworksRequest.newBuilder() + .setFolderId(credentials.getFolder()) + .build(); + return credentials.networkService().list(request).getNetworksList().stream() + .map(network -> YandexCloudNetwork.createFromProto(network, credentials.getName())) + .collect(Collectors.toList()); + } + + private YandexCloudLoadBalancer convertLoadBalancer( + YandexCloudCredentials credentials, + NetworkLoadBalancerOuterClass.NetworkLoadBalancer networkLoadBalancer) { + Map> healths = + networkLoadBalancer.getAttachedTargetGroupsList().stream() + .collect( + Collectors.toMap( + NetworkLoadBalancerOuterClass.AttachedTargetGroup::getTargetGroupId, + tg -> getTargetStates(credentials, networkLoadBalancer, tg))); + + return YandexCloudLoadBalancer.createFromNetworkLoadBalancer( + networkLoadBalancer, credentials.getName(), healths); + } + + private List getTargetStates( + YandexCloudCredentials credentials, + NetworkLoadBalancerOuterClass.NetworkLoadBalancer networkLoadBalancer, + NetworkLoadBalancerOuterClass.AttachedTargetGroup tg) { + NetworkLoadBalancerServiceOuterClass.GetTargetStatesRequest request = + NetworkLoadBalancerServiceOuterClass.GetTargetStatesRequest.newBuilder() + .setNetworkLoadBalancerId(networkLoadBalancer.getId()) + .setTargetGroupId(tg.getTargetGroupId()) + .build(); + List targetStatesList = + credentials.networkLoadBalancerService().getTargetStates(request).getTargetStatesList(); + return targetStatesList.stream() + .map( + state -> + new YandexLoadBalancerHealth( + state.getAddress(), + state.getSubnetId(), + YandexLoadBalancerHealth.Status.valueOf(state.getStatus().name()))) + .collect(Collectors.toList()); + } + + public List getServerGroups( + YandexCloudCredentials credentials) { + InstanceGroupServiceOuterClass.ListInstanceGroupsRequest request = + InstanceGroupServiceOuterClass.ListInstanceGroupsRequest.newBuilder() + .setFolderId(credentials.getFolder()) + .setView(InstanceGroupServiceOuterClass.InstanceGroupView.FULL) + .build(); + return credentials.instanceGroupService().list(request).getInstanceGroupsList(); + } + + public Optional getServerGroup( + YandexCloudCredentials credentials, String name) { + try { + InstanceGroupServiceOuterClass.ListInstanceGroupsResponse response = + credentials + .instanceGroupService() + .list( + InstanceGroupServiceOuterClass.ListInstanceGroupsRequest.newBuilder() + .setFolderId(credentials.getFolder()) + .setFilter("name='" + name + "'") + .setView(InstanceGroupServiceOuterClass.InstanceGroupView.FULL) + .build()); + List instanceGroupsList = + response.getInstanceGroupsList(); + if (instanceGroupsList.size() != 1) { + return Optional.empty(); + } + return response.getInstanceGroupsList().stream().findAny(); + } catch (StatusRuntimeException ignored) { + return Optional.empty(); + } + } + + public List getSubnets(YandexCloudCredentials credentials, String folderId) { + SubnetServiceOuterClass.ListSubnetsRequest request = + SubnetServiceOuterClass.ListSubnetsRequest.newBuilder().setFolderId(folderId).build(); + return credentials.subnetService().list(request).getSubnetsList().stream() + .map(subnet -> YandexCloudSubnet.createFromProto(subnet, credentials.getName())) + .collect(Collectors.toList()); + } + + public List getServiceAccounts( + YandexCloudCredentials credentials, String folder) { + ServiceAccountServiceOuterClass.ListServiceAccountsRequest request = + ServiceAccountServiceOuterClass.ListServiceAccountsRequest.newBuilder() + .setFolderId(folder) + .build(); + return credentials.serviceAccountService().list(request).getServiceAccountsList().stream() + .map(sa -> YandexCloudServiceAccount.createFromProto(sa, credentials.getName())) + .collect(Collectors.toList()); + } + + public List getLogRecords( + YandexCloudCredentials credentials, String serverGroupId) { + InstanceGroupServiceOuterClass.ListInstanceGroupLogRecordsRequest request = + InstanceGroupServiceOuterClass.ListInstanceGroupLogRecordsRequest.newBuilder() + .setInstanceGroupId(serverGroupId) + .build(); + return credentials.instanceGroupService().listLogRecords(request).getLogRecordsList().stream() + .map(YandexLogRecord::createFromProto) + .collect(Collectors.toList()); + } + + public String getSerialPortOutput(YandexCloudCredentials credentials, String instanceId) { + InstanceServiceOuterClass.GetInstanceSerialPortOutputRequest request = + InstanceServiceOuterClass.GetInstanceSerialPortOutputRequest.newBuilder() + .setInstanceId(instanceId) + .build(); + return credentials.instanceService().getSerialPortOutput(request).getContents(); + } + + public Set getServerGroupInstanceIds( + YandexCloudCredentials credentials, String serverGroupId) { + try { + InstanceGroupServiceOuterClass.ListInstanceGroupInstancesRequest request = + InstanceGroupServiceOuterClass.ListInstanceGroupInstancesRequest.newBuilder() + .setInstanceGroupId(serverGroupId) + .build(); + return credentials.instanceGroupService().listInstances(request).getInstancesList().stream() + .map(InstanceGroupOuterClass.ManagedInstance::getInstanceId) + .collect(Collectors.toSet()); + } catch (StatusRuntimeException ex) { + if (ex.getStatus() == io.grpc.Status.NOT_FOUND) { + return Collections.emptySet(); + } else { + throw ex; + } + } + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/YandexOperationPoller.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/YandexOperationPoller.java new file mode 100644 index 00000000000..738a3c428aa --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/YandexOperationPoller.java @@ -0,0 +1,62 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.service; + +import static yandex.cloud.api.operation.OperationOuterClass.Operation; +import static yandex.cloud.api.operation.OperationServiceOuterClass.GetOperationRequest; + +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.helpers.OperationPoller; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.time.Duration; +import java.util.function.Supplier; +import org.springframework.stereotype.Component; +import yandex.cloud.api.operation.OperationServiceGrpc; + +@Component +public class YandexOperationPoller { + private OperationPoller operationPoller; + + public YandexOperationPoller() { + operationPoller = + new OperationPoller( + (int) Duration.ofMinutes(10).getSeconds(), (int) Duration.ofMinutes(1).getSeconds()); + } + + public Operation waitDone(YandexCloudCredentials credentials, Operation operation, String phase) { + Task task = TaskRepository.threadLocalTask.get(); + String resourceString = operation.getDescription() + " [" + operation.getId() + "]"; + task.updateStatus(phase, "Waiting on operation '" + resourceString + "'..."); + OperationServiceGrpc.OperationServiceBlockingStub operationService = + credentials.operationService(); + return operationPoller.waitForOperation( + () -> + operationService.get( + GetOperationRequest.newBuilder().setOperationId(operation.getId()).build()), + Operation::getDone, + null, + task, + resourceString, + phase); + } + + public void doSync( + Supplier request, YandexCloudCredentials credentials, String phase) { + waitDone(credentials, request.get(), phase); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/converter/YandexInstanceGroupConverter.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/converter/YandexInstanceGroupConverter.java new file mode 100644 index 00000000000..3637309f544 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/converter/YandexInstanceGroupConverter.java @@ -0,0 +1,357 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.service.converter; + +import static java.util.stream.Collectors.toList; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass.*; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupServiceOuterClass.CreateInstanceGroupRequest; +import static yandex.cloud.api.compute.v1.instancegroup.InstanceGroupServiceOuterClass.UpdateInstanceGroupRequest; + +import com.google.common.base.Strings; +import com.google.protobuf.FieldMask; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.YandexInstanceGroupDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import java.time.Duration; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import org.jetbrains.annotations.NotNull; + +public class YandexInstanceGroupConverter { + private static final long GB = 1024 * 1024 * 1024; + + @SuppressWarnings("Duplicates") + @NotNull + public static CreateInstanceGroupRequest mapToCreateRequest( + YandexInstanceGroupDescription description) { + CreateInstanceGroupRequest.Builder builder = + CreateInstanceGroupRequest.newBuilder() + .setFolderId(description.getCredentials().getFolder()) + .setInstanceTemplate(mapInstanceTemplate(description.getInstanceTemplate())) + .setScalePolicy( + mapScalePolicy(description.getAutoScalePolicy(), description.getTargetSize())) + .setDeployPolicy(mapDeployPolicy(description.getDeployPolicy())) + .setAllocationPolicy(mapAllocationPolicy(description.getZones())) + .setServiceAccountId(description.getServiceAccountId()); + + if (description.getName() != null) { + builder.setName(description.getName()); + } + if (description.getDescription() != null) { + builder.setDescription(description.getDescription()); + } + if (description.getLabels() != null) { + builder.putAllLabels(description.getLabels()); + } + if (description.getEnableTraffic() != null && description.getEnableTraffic()) { + builder.setLoadBalancerSpec(mapLoadBalancerSpec(description.getTargetGroupSpec())); + if (description.getBalancers() != null) { + builder + .getInstanceTemplateBuilder() + .putMetadata( + YandexCloudServerGroup.LOAD_BALANCERS_SPECS, + YandexCloudServerGroup.serializeLoadBalancersWithHealthChecks( + description.getBalancers())); + } + } + + if (description.getHealthCheckSpecs() != null && !description.getHealthCheckSpecs().isEmpty()) { + builder.setHealthChecksSpec(mapHealthCheckSpecs(description.getHealthCheckSpecs())); + } + return builder.build(); + } + + @SuppressWarnings("Duplicates") + @NotNull + public static UpdateInstanceGroupRequest mapToUpdateRequest( + YandexInstanceGroupDescription description, String igID) { + UpdateInstanceGroupRequest.Builder builder = + UpdateInstanceGroupRequest.newBuilder() + .setInstanceGroupId(igID) + .setUpdateMask( + FieldMask.newBuilder() + .addAllPaths( + Arrays.asList( + "name", + "labels", + "instance_template", + "scale_policy", + "deploy_policy", + "allocation_policy", + "health_checks_spec"))) + .setInstanceTemplate(mapInstanceTemplate(description.getInstanceTemplate())) + .setScalePolicy( + mapScalePolicy(description.getAutoScalePolicy(), description.getTargetSize())) + .setDeployPolicy(mapDeployPolicy(description.getDeployPolicy())) + .setAllocationPolicy(mapAllocationPolicy(description.getZones())) + .setServiceAccountId(description.getServiceAccountId()); + + if (description.getName() != null) { + builder.setName(description.getName()); + } + if (description.getDescription() != null) { + builder.setDescription(description.getDescription()); + } + if (description.getLabels() != null) { + builder.putAllLabels(description.getLabels()); + } + if (description.getHealthCheckSpecs() != null && !description.getHealthCheckSpecs().isEmpty()) { + builder.setHealthChecksSpec(mapHealthCheckSpecs(description.getHealthCheckSpecs())); + } + // if (description.getTargetGroupSpec() != null) { + // builder.setLoadBalancerSpec(mapLoadBalancerSpec(description.getTargetGroupSpec())); + // if (description.getBalancers() != null) { + // builder + // .getInstanceTemplateBuilder() + // .putMetadata( + // YandexCloudServerGroup.LOAD_BALANCERS_SPECS, + // YandexCloudServerGroup.serializeLoadBalancersWithHealthChecks( + // description.getBalancers())); + // } + // } + return builder.build(); + } + + private static InstanceTemplate mapInstanceTemplate( + YandexCloudServerGroup.InstanceTemplate instanceTemplate) { + InstanceTemplate.Builder builder = + InstanceTemplate.newBuilder() + .setPlatformId(instanceTemplate.getPlatformId()) + .setResourcesSpec( + ResourcesSpec.newBuilder() + .setCores(instanceTemplate.getResourcesSpec().getCores()) + .setCoreFraction(instanceTemplate.getResourcesSpec().getCoreFraction()) + .setGpus(instanceTemplate.getResourcesSpec().getGpus()) + .setMemory(instanceTemplate.getResourcesSpec().getMemory() * GB)) + .setBootDiskSpec(mapAttachedDiskSpec(instanceTemplate.getBootDiskSpec())) + .addAllNetworkInterfaceSpecs( + instanceTemplate.getNetworkInterfaceSpecs().stream() + .map(YandexInstanceGroupConverter::mapNetworkInterface) + .collect(toList())); + + if (instanceTemplate.getDescription() != null) { + builder.setDescription(instanceTemplate.getDescription()); + } + if (instanceTemplate.getLabels() != null) { + builder.putAllLabels(instanceTemplate.getLabels()); + } + if (instanceTemplate.getMetadata() != null) { + builder.putAllMetadata(instanceTemplate.getMetadata()); + } + if (instanceTemplate.getSecondaryDiskSpecs() != null) { + builder.addAllSecondaryDiskSpecs( + instanceTemplate.getSecondaryDiskSpecs().stream() + .map(YandexInstanceGroupConverter::mapAttachedDiskSpec) + .collect(toList())); + } + if (instanceTemplate.getSchedulingPolicy() != null) { + builder.setSchedulingPolicy( + SchedulingPolicy.newBuilder() + .setPreemptible(instanceTemplate.getSchedulingPolicy().isPreemptible())); + } + if (instanceTemplate.getServiceAccountId() != null) { + builder.setServiceAccountId(instanceTemplate.getServiceAccountId()); + } + + return builder.build(); + } + + private static NetworkInterfaceSpec mapNetworkInterface( + YandexCloudServerGroup.NetworkInterfaceSpec spec) { + NetworkInterfaceSpec.Builder builder = NetworkInterfaceSpec.newBuilder(); + if (spec.getNetworkId() != null) { + builder.setNetworkId(spec.getNetworkId()); + } + if (spec.getSubnetIds() != null) { + builder.addAllSubnetIds(spec.getSubnetIds()); + } + if (spec.getPrimaryV4AddressSpec() != null) { + builder.setPrimaryV4AddressSpec( + mapAddressSpec(spec.getPrimaryV4AddressSpec(), IpVersion.IPV4)); + } + if (spec.getPrimaryV6AddressSpec() != null) { + builder.setPrimaryV6AddressSpec( + mapAddressSpec(spec.getPrimaryV6AddressSpec(), IpVersion.IPV6)); + } + return builder.build(); + } + + @NotNull + private static PrimaryAddressSpec mapAddressSpec( + YandexCloudServerGroup.PrimaryAddressSpec addressSpec, IpVersion ipVersion) { + PrimaryAddressSpec.Builder builder = PrimaryAddressSpec.newBuilder(); + if (addressSpec.isOneToOneNat()) { + builder.setOneToOneNatSpec(OneToOneNatSpec.newBuilder().setIpVersion(ipVersion).build()); + } + return builder.build(); + } + + public static AttachedDiskSpec mapAttachedDiskSpec(YandexCloudServerGroup.AttachedDiskSpec spec) { + AttachedDiskSpec.DiskSpec.Builder diskSpec = + AttachedDiskSpec.DiskSpec.newBuilder() + .setTypeId(spec.getDiskSpec().getTypeId()) + .setSize(spec.getDiskSpec().getSize() * GB); + if (spec.getDiskSpec().getDescription() != null) { + diskSpec.setDescription(spec.getDiskSpec().getDescription()); + } + if (!Strings.isNullOrEmpty(spec.getDiskSpec().getImageId())) { + diskSpec.setImageId(spec.getDiskSpec().getImageId()); + } + if (!Strings.isNullOrEmpty(spec.getDiskSpec().getSnapshotId())) { + diskSpec.setSnapshotId(spec.getDiskSpec().getSnapshotId()); + } + AttachedDiskSpec.Builder builder = + AttachedDiskSpec.newBuilder() + .setMode( + spec.getMode() != null + ? AttachedDiskSpec.Mode.valueOf(spec.getMode().name()) + : AttachedDiskSpec.Mode.READ_WRITE) + .setDiskSpec(diskSpec); + + if (spec.getDeviceName() != null) { + builder.setDeviceName(spec.getDeviceName()); + } + return builder.build(); + } + + @SuppressWarnings("Duplicates") + private static LoadBalancerSpec mapLoadBalancerSpec( + YandexCloudServerGroup.TargetGroupSpec targetGroupSpec) { + TargetGroupSpec.Builder builder = TargetGroupSpec.newBuilder(); + if (targetGroupSpec != null) { + if (targetGroupSpec.getName() != null) { + builder.setName(targetGroupSpec.getName()); + } + if (targetGroupSpec.getDescription() != null) { + builder.setDescription(targetGroupSpec.getDescription()); + } + if (targetGroupSpec.getLabels() != null) { + builder.putAllLabels(targetGroupSpec.getLabels()); + } + } + return LoadBalancerSpec.newBuilder().setTargetGroupSpec(builder).build(); + } + + private static HealthChecksSpec mapHealthCheckSpecs( + List healthCheckSpecs) { + return HealthChecksSpec.newBuilder() + .addAllHealthCheckSpecs( + healthCheckSpecs.stream() + .map(YandexInstanceGroupConverter::mapHealthCheckSpec) + .collect(toList())) + .build(); + } + + @NotNull + private static HealthCheckSpec mapHealthCheckSpec(YandexCloudServerGroup.HealthCheckSpec hc) { + HealthCheckSpec.Builder builder = HealthCheckSpec.newBuilder(); + if (hc.getType() == YandexCloudServerGroup.HealthCheckSpec.Type.HTTP) { + builder.setHttpOptions( + HealthCheckSpec.HttpOptions.newBuilder().setPort(hc.getPort()).setPath(hc.getPath())); + } else { + builder.setTcpOptions(HealthCheckSpec.TcpOptions.newBuilder().setPort(hc.getPort())); + } + return builder + .setInterval(mapDuration(hc.getInterval())) + .setTimeout(mapDuration(hc.getTimeout())) + .setUnhealthyThreshold(hc.getUnhealthyThreshold()) + .setHealthyThreshold(hc.getHealthyThreshold()) + .build(); + } + + private static DeployPolicy mapDeployPolicy(YandexCloudServerGroup.DeployPolicy deployPolicy) { + return DeployPolicy.newBuilder() + .setMaxCreating(deployPolicy.getMaxCreating()) + .setMaxDeleting(deployPolicy.getMaxDeleting()) + .setMaxExpansion(deployPolicy.getMaxExpansion()) + .setMaxUnavailable(deployPolicy.getMaxUnavailable()) + .setStartupDuration(mapDuration(deployPolicy.getStartupDuration())) + .build(); + } + + private static ScalePolicy mapScalePolicy( + YandexCloudServerGroup.AutoScalePolicy autoScalePolicy, Long targetSize) { + ScalePolicy.Builder builder = ScalePolicy.newBuilder(); + if (autoScalePolicy != null) { + ScalePolicy.AutoScale.Builder asBuilder = + ScalePolicy.AutoScale.newBuilder() + .setInitialSize(autoScalePolicy.getInitialSize()) + .setMinZoneSize(autoScalePolicy.getMinZoneSize()) + .setMaxSize(autoScalePolicy.getMaxSize()); + + asBuilder.setMeasurementDuration(mapDuration(autoScalePolicy.getMeasurementDuration())); + asBuilder.setWarmupDuration(mapDuration(autoScalePolicy.getWarmupDuration())); + asBuilder.setStabilizationDuration(mapDuration(autoScalePolicy.getStabilizationDuration())); + if (autoScalePolicy.getCpuUtilizationRule() != null) { + asBuilder.setCpuUtilizationRule( + ScalePolicy.CpuUtilizationRule.newBuilder() + .setUtilizationTarget( + autoScalePolicy.getCpuUtilizationRule().getUtilizationTarget())); + } + if (autoScalePolicy.getCustomRules() != null) { + autoScalePolicy.getCustomRules().stream() + .map( + rule -> + ScalePolicy.CustomRule.newBuilder() + .setRuleType( + ScalePolicy.CustomRule.RuleType.valueOf(rule.getRuleType().name())) + .setMetricType( + ScalePolicy.CustomRule.MetricType.valueOf(rule.getMetricType().name())) + .setMetricName(rule.getMetricName()) + .setTarget(rule.getTarget()) + .build()) + .forEach(asBuilder::addCustomRules); + } + builder.setAutoScale(asBuilder); + } else { + builder.setFixedScale( + ScalePolicy.FixedScale.newBuilder() + .setSize(targetSize == null || targetSize < 0 ? 0 : targetSize)); + } + return builder.build(); + } + + public static UpdateInstanceGroupRequest buildResizeRequest( + String serverGroupId, Integer capacity) { + return UpdateInstanceGroupRequest.newBuilder() + .setInstanceGroupId(serverGroupId) + .setUpdateMask(FieldMask.newBuilder().addPaths("scale_policy")) + .setScalePolicy( + ScalePolicy.newBuilder() + .setFixedScale(ScalePolicy.FixedScale.newBuilder().setSize(capacity))) + .build(); + } + + @NotNull + public static com.google.protobuf.Duration.Builder mapDuration(Duration duration) { + com.google.protobuf.Duration.Builder builder = com.google.protobuf.Duration.newBuilder(); + if (duration == null) { + return builder; + } + return builder.setSeconds(duration.getSeconds()); + } + + @NotNull + private static AllocationPolicy mapAllocationPolicy(Set zones) { + return AllocationPolicy.newBuilder() + .addAllZones( + zones.stream() + .map(zone -> AllocationPolicy.Zone.newBuilder().setZoneId(zone).build()) + .collect(toList())) + .build(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/converter/YandexLoadBalancerConverter.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/converter/YandexLoadBalancerConverter.java new file mode 100644 index 00000000000..a1a51db217d --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/clouddriver/yandex/service/converter/YandexLoadBalancerConverter.java @@ -0,0 +1,116 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.service.converter; + +import static yandex.cloud.api.loadbalancer.v1.NetworkLoadBalancerOuterClass.*; +import static yandex.cloud.api.loadbalancer.v1.NetworkLoadBalancerServiceOuterClass.*; + +import com.google.common.base.Strings; +import com.google.protobuf.FieldMask; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.UpsertYandexLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import java.util.function.Consumer; + +public class YandexLoadBalancerConverter { + public static CreateNetworkLoadBalancerRequest mapToCreateRequest( + UpsertYandexLoadBalancerDescription description) { + CreateNetworkLoadBalancerRequest.Builder builder = + CreateNetworkLoadBalancerRequest.newBuilder() + .setFolderId(description.getCredentials().getFolder()) + .setRegionId(YandexCloudProvider.REGION) + .setType(NetworkLoadBalancer.Type.valueOf(description.getLbType().name())); + + if (description.getName() != null) { + builder.setName(description.getName()); + } + if (description.getDescription() != null) { + builder.setDescription(description.getDescription()); + } + if (description.getLabels() != null) { + builder.putAllLabels(description.getLabels()); + } + if (description.getListeners() != null) { + addListenerSpecs(description, builder::addListenerSpecs); + } + return builder.build(); + } + + private static void addListenerSpecs( + UpsertYandexLoadBalancerDescription description, Consumer builder) { + description + .getListeners() + .forEach(listener -> builder.accept(getListenerBuilder(description.getLbType(), listener))); + } + + private static ListenerSpec.Builder getListenerBuilder( + YandexCloudLoadBalancer.BalancerType type, YandexCloudLoadBalancer.Listener listener) { + ListenerSpec.Builder spec = + ListenerSpec.newBuilder() + .setName(listener.getName()) + .setPort(listener.getPort()) + .setTargetPort(listener.getTargetPort()) + .setProtocol(Listener.Protocol.valueOf(listener.getProtocol().name())); + IpVersion ipVersion = + listener.getIpVersion() == null + ? IpVersion.IPV4 + : IpVersion.valueOf(listener.getIpVersion().name()); + if (type == YandexCloudLoadBalancer.BalancerType.INTERNAL) { + InternalAddressSpec.Builder addressSpec = + InternalAddressSpec.newBuilder() + .setSubnetId(listener.getSubnetId()) + .setIpVersion(ipVersion); + if (!Strings.isNullOrEmpty(listener.getAddress())) { + addressSpec.setAddress(listener.getAddress()); + } + spec.setInternalAddressSpec(addressSpec); + } else { + ExternalAddressSpec.Builder addressSpec = + ExternalAddressSpec.newBuilder().setIpVersion(ipVersion); + if (!Strings.isNullOrEmpty(listener.getAddress())) { + addressSpec.setAddress(listener.getAddress()); + } + spec.setExternalAddressSpec(addressSpec); + } + return spec; + } + + public static UpdateNetworkLoadBalancerRequest mapToUpdateRequest( + String networkLoadBalancerId, UpsertYandexLoadBalancerDescription description) { + FieldMask.Builder updateMask = FieldMask.newBuilder(); + UpdateNetworkLoadBalancerRequest.Builder builder = + UpdateNetworkLoadBalancerRequest.newBuilder() + .setNetworkLoadBalancerId(networkLoadBalancerId); + if (description.getName() != null) { + updateMask.addPaths("name"); + builder.setName(description.getName()); + } + if (description.getDescription() != null) { + updateMask.addPaths("description"); + builder.setDescription(description.getDescription()); + } + if (description.getLabels() != null) { + updateMask.addPaths("labels"); + builder.putAllLabels(description.getLabels()); + } + if (description.getListeners() != null) { + updateMask.addPaths("listener_specs"); + addListenerSpecs(description, builder::addListenerSpecs); + } + return builder.setUpdateMask(updateMask).build(); + } +} diff --git a/clouddriver-yandex/src/main/java/com/netflix/spinnaker/config/YandexCloudConfiguration.java b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/config/YandexCloudConfiguration.java new file mode 100644 index 00000000000..61b46755fe1 --- /dev/null +++ b/clouddriver-yandex/src/main/java/com/netflix/spinnaker/config/YandexCloudConfiguration.java @@ -0,0 +1,41 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.config; + +import com.netflix.spinnaker.clouddriver.yandex.deploy.converter.YandexOperationConvertersFactory; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentialsInitializer; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexOperationPoller; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Import; +import org.springframework.scheduling.annotation.EnableScheduling; + +@Configuration +@EnableConfigurationProperties +@EnableScheduling +@ConditionalOnProperty("yandex.enabled") +@ComponentScan("com.netflix.spinnaker.clouddriver.yandex") +@Import(value = {YandexCloudCredentialsInitializer.class, YandexOperationConvertersFactory.class}) +public class YandexCloudConfiguration { + @Bean + YandexOperationPoller yandexOperationPoller() { + return new YandexOperationPoller(); + } +} diff --git a/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/controller/TestConfig.java b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/controller/TestConfig.java new file mode 100644 index 00000000000..93345f37b87 --- /dev/null +++ b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/controller/TestConfig.java @@ -0,0 +1,109 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.controller; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudImage; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServiceAccount; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexLogRecord; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import com.netflix.spinnaker.config.YandexCloudConfiguration; +import java.time.Instant; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Import; +import org.springframework.context.annotation.Primary; +import yandex.cloud.api.compute.v1.instancegroup.InstanceGroupOuterClass; + +@Configuration +@Import(value = {YandexCloudConfiguration.class}) +class TestConfig { + public static final String TEST_SA_ID = "test-sa-id"; + public static final String ACCOUNT = "test-account"; + public static final String SA_NAME = "sa-name"; + public static final String FOLDER_ID = "folder-id"; + public static final String IMAGE_NAME = "ubuntu"; + public static final String IMAGE_ID = "image-id"; + public static final String SERVER_GROUP_NAME = "test-server-group"; + public static final String ACTIVITY = "done"; + public static final String SERVER_GROUP_ID = "server-group-id"; + + @Bean + @Primary + public List yandexCloudCredentials( + AccountCredentialsRepository repository) { + YandexCloudCredentials mock = mock(YandexCloudCredentials.class); + when(mock.getName()).thenReturn(ACCOUNT); + when(mock.getFolder()).thenReturn(FOLDER_ID); + repository.save(ACCOUNT, mock); + return Collections.singletonList(mock); + } + + @Bean + @Primary + public YandexCloudFacade yandexCloudFacade() { + YandexCloudFacade mock = mock(YandexCloudFacade.class); + when(mock.getServiceAccounts(any(), eq(FOLDER_ID))).thenReturn(testServiceAccounts()); + when(mock.getImages(any(), eq(FOLDER_ID))).thenReturn(testImages()); + when(mock.getServerGroups(any())).thenReturn(testGroups()); + when(mock.getLogRecords(any(), eq(SERVER_GROUP_ID))).thenReturn(testLogRecords()); + return mock; + } + + private static List testImages() { + YandexCloudImage image = + new YandexCloudImage( + IMAGE_ID, + IMAGE_NAME, + "desc", + YandexCloudProvider.REGION, + System.currentTimeMillis(), + Collections.singletonMap("key", "value")); + return Collections.singletonList(image); + } + + private static List testServiceAccounts() { + YandexCloudServiceAccount sa = new YandexCloudServiceAccount(TEST_SA_ID, SA_NAME, ACCOUNT); + return Collections.singletonList(sa); + } + + private static List testGroups() { + InstanceGroupOuterClass.InstanceGroup ig = + InstanceGroupOuterClass.InstanceGroup.newBuilder() + .setId(SERVER_GROUP_ID) + .setName(SERVER_GROUP_NAME) + .setFolderId(FOLDER_ID) + .build(); + return Collections.singletonList(ig); + } + + private static List testLogRecords() { + YandexLogRecord record1 = new YandexLogRecord(Instant.now(), "init"); + YandexLogRecord record2 = new YandexLogRecord(Instant.now(), ACTIVITY); + return Arrays.asList(record1, record2); + } +} diff --git a/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexControllersTest.java b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexControllersTest.java new file mode 100644 index 00000000000..09b8f1677f4 --- /dev/null +++ b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/controller/YandexControllersTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.controller; + +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; + +import com.netflix.spinnaker.clouddriver.Main; +import com.netflix.spinnaker.clouddriver.yandex.YandexCloudProvider; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureMockMvc; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.junit.jupiter.SpringExtension; +import org.springframework.test.web.servlet.MockMvc; + +@AutoConfigureMockMvc +@ExtendWith(SpringExtension.class) +@SpringBootTest(classes = {Main.class, TestConfig.class}) +@TestPropertySource( + properties = { + "redis.enabled = false", + "sql.enabled = false", + "spring.application.name = clouddriver", + "yandex.enabled = true", + "services.front50.baseUrl = http://localhost", + "services.fiat.enabled = false", + "services.fiat.baseUrl = http://localhost" + }) +class YandexControllersTest { + @Autowired private MockMvc mockMvc; + + @Test + void serviceAccountsList() throws Exception { + mockMvc + .perform(get("/yandex/serviceAcounts/{account}", TestConfig.ACCOUNT)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$", Matchers.hasSize(1))) + .andExpect(jsonPath("$[0].name", Matchers.equalTo(TestConfig.SA_NAME))); + } + + @Test + void imageFindByName() throws Exception { + mockMvc + .perform(get("/yandex/images/find?q={name}", TestConfig.IMAGE_NAME)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$", Matchers.hasSize(1))) + .andExpect(jsonPath("$[0].imageId", Matchers.equalTo(TestConfig.IMAGE_ID))); + } + + @Test + void imageFindByTag() throws Exception { + mockMvc + .perform(get("/yandex/images/find?tag:key=value")) + .andExpect(status().isOk()) + .andExpect(jsonPath("$", Matchers.hasSize(1))) + .andExpect(jsonPath("$[0].imageId", Matchers.equalTo(TestConfig.IMAGE_ID))); + } + + @Test + void scalingActivities() throws Exception { + mockMvc + .perform( + get( + "/applications/{app}/clusters/{account}/{cluster}/yandex/serverGroups/{serverGroupName}/scalingActivities?region={region}", + "anyApp", + TestConfig.ACCOUNT, + "anyCluster", + TestConfig.SERVER_GROUP_NAME, + YandexCloudProvider.REGION)) + .andExpect(status().isOk()) + .andExpect(jsonPath("$", Matchers.hasSize(2))) + .andExpect(jsonPath("$[1].description", Matchers.equalTo(TestConfig.ACTIVITY))); + } +} diff --git a/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/AbstractYandexDeployTest.java b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/AbstractYandexDeployTest.java new file mode 100644 index 00000000000..404fd6649b3 --- /dev/null +++ b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/AbstractYandexDeployTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.AccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider; +import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository; +import com.netflix.spinnaker.clouddriver.yandex.deploy.converter.OperationConverter; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.CredentialsChangeable; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.springframework.http.converter.json.Jackson2ObjectMapperBuilder; + +public abstract class AbstractYandexDeployTest { + private static final List ACCOUNTS = Collections.singletonList("test-cred"); + private final AccountCredentialsRepository accountCredentialsRepository = + new MapBackedAccountCredentialsRepository(); + protected AccountCredentialsProvider accountCredentialsProvider = + new DefaultAccountCredentialsProvider(accountCredentialsRepository); + protected ObjectMapper objectMapper = + Jackson2ObjectMapperBuilder.json() + .featuresToDisable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) + .modules(new JavaTimeModule()) + .build(); + + protected AbstractYandexDeployTest() { + ACCOUNTS.forEach( + account -> accountCredentialsRepository.update(account, createCredentials(account))); + } + + private static YandexCloudCredentials createCredentials(String name) { + YandexCloudCredentials cred = new YandexCloudCredentials(); + cred.setName(name); + cred.setFolder("folder"); + return cred; + } + + @SuppressWarnings("unchecked") + public Map getDescription(String resourcePath) throws IOException { + JsonNode json = + objectMapper.readTree(AbstractYandexDeployTest.class.getResourceAsStream(resourcePath)); + return (Map) objectMapper.convertValue(json, Map.class); + } + + public T getObject(String fileName, Class clazz) + throws IOException { + Map input = getDescription(fileName); + TestCredentialSupport credSupport = new TestCredentialSupport(); + credSupport.setAccountCredentialsProvider(accountCredentialsProvider); + credSupport.setObjectMapper(objectMapper); + return new OperationConverter>(null, null) + .convertDescription(input, credSupport, clazz); + } +} diff --git a/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/TestCredentialSupport.java b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/TestCredentialSupport.java new file mode 100644 index 00000000000..914aeb222a5 --- /dev/null +++ b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/TestCredentialSupport.java @@ -0,0 +1,36 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy; + +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.security.AbstractAtomicOperationsCredentialsSupport; +import com.netflix.spinnaker.orchestration.OperationDescription; +import java.util.Map; +import javax.annotation.Nullable; + +public class TestCredentialSupport extends AbstractAtomicOperationsCredentialsSupport { + @Nullable + @Override + public AtomicOperation convertOperation(Map input) { + throw new UnsupportedOperationException(); + } + + @Override + public OperationDescription convertDescription(Map input) { + throw new UnsupportedOperationException(); + } +} diff --git a/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/CreateYandexServerGroupAtomicOperationConverterTest.java b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/CreateYandexServerGroupAtomicOperationConverterTest.java new file mode 100644 index 00000000000..8ae2bf078c1 --- /dev/null +++ b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/converter/CreateYandexServerGroupAtomicOperationConverterTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.converter; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.netflix.spinnaker.clouddriver.deploy.DeployAtomicOperation; +import com.netflix.spinnaker.clouddriver.yandex.deploy.AbstractYandexDeployTest; +import com.netflix.spinnaker.clouddriver.yandex.deploy.description.YandexInstanceGroupDescription; +import java.io.IOException; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class CreateYandexServerGroupAtomicOperationConverterTest extends AbstractYandexDeployTest { + private final OperationConverter converter; + + CreateYandexServerGroupAtomicOperationConverterTest() { + converter = new YandexOperationConvertersFactory.CreateServerGroup(); + converter.setAccountCredentialsProvider(accountCredentialsProvider); + converter.setObjectMapper(objectMapper); + } + + @Test + void convertDescription() throws IOException { + Map description = getDescription("/operations/create_server_group.json"); + YandexInstanceGroupDescription result = converter.convertDescription(description); + + assertThat(result.getApplication()).isNotNull(); + assertThat(result.getEnableTraffic()).isEqualTo(false); + assertThat(result.getInstanceTemplate().getResourcesSpec().getMemory()).isEqualTo(2); + } +} diff --git a/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/YandexInstanceGroupConverterTest.java b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/YandexInstanceGroupConverterTest.java new file mode 100644 index 00000000000..25148556572 --- /dev/null +++ b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/deploy/description/YandexInstanceGroupConverterTest.java @@ -0,0 +1,45 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.deploy.description; + +import static org.junit.jupiter.api.Assertions.*; + +import com.netflix.spinnaker.clouddriver.yandex.deploy.AbstractYandexDeployTest; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudServerGroup; +import com.netflix.spinnaker.clouddriver.yandex.provider.agent.YandexServerGroupCachingAgent; +import com.netflix.spinnaker.clouddriver.yandex.service.converter.YandexInstanceGroupConverter; +import java.io.IOException; +import org.junit.jupiter.api.Test; +import yandex.cloud.api.compute.v1.instancegroup.InstanceGroupServiceOuterClass; + +class YandexInstanceGroupConverterTest extends AbstractYandexDeployTest { + private YandexCloudServerGroup.InstanceTemplate convert(YandexInstanceGroupDescription ig) { + InstanceGroupServiceOuterClass.CreateInstanceGroupRequest request = + YandexInstanceGroupConverter.mapToCreateRequest(ig); + return YandexServerGroupCachingAgent.convertInstanceTemplate(request.getInstanceTemplate()); + } + + @Test + void mapToCreateRequest() throws IOException { + YandexInstanceGroupDescription ig = + getObject("/operations/create_server_group.json", YandexInstanceGroupDescription.class); + YandexCloudServerGroup.InstanceTemplate templateNoNulls = + convert(ig); // first time null replaced with empty + ig.setInstanceTemplate(templateNoNulls); + assertEquals(templateNoNulls, convert(ig)); + } +} diff --git a/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/provider/KeysTest.java b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/provider/KeysTest.java new file mode 100644 index 00000000000..9268de7813a --- /dev/null +++ b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/provider/KeysTest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.util.Map; +import org.junit.jupiter.api.Test; + +class KeysTest { + @Test + void testParseKey() { + String key = Keys.getClusterKey("account", "app", "app-stack-detail"); + assertEquals("yandex:clusters:account:app:app-stack-detail", key); + Map result = Keys.parse(key); + + assertNotNull(result); + assertEquals("app", result.get("application")); + assertEquals("yandex", result.get("provider")); + assertEquals("clusters", result.get("type")); + assertEquals("stack", result.get("stack")); + assertEquals("detail", result.get("detail")); + } +} diff --git a/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkLoadBalancerCachingAgentTest.java b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkLoadBalancerCachingAgentTest.java new file mode 100644 index 00000000000..4a5c798da27 --- /dev/null +++ b/clouddriver-yandex/src/test/java/com/netflix/spinnaker/clouddriver/yandex/provider/agent/YandexNetworkLoadBalancerCachingAgentTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2020 YANDEX LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.yandex.provider.agent; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spectator.api.NoopRegistry; +import com.netflix.spinnaker.cats.cache.WriteableCache; +import com.netflix.spinnaker.cats.mem.InMemoryCache; +import com.netflix.spinnaker.cats.provider.DefaultProviderCache; +import com.netflix.spinnaker.clouddriver.cache.OnDemandAgent; +import com.netflix.spinnaker.clouddriver.yandex.model.YandexCloudLoadBalancer; +import com.netflix.spinnaker.clouddriver.yandex.security.YandexCloudCredentials; +import com.netflix.spinnaker.clouddriver.yandex.service.YandexCloudFacade; +import java.util.HashMap; +import java.util.Map; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.Test; + +class YandexNetworkLoadBalancerCachingAgentTest { + private static final String LOADBALANCER_NAME = "loadbalancer-test"; + private static final String ACCOUNT_NAME = "test-account"; + + @Test + void handleEviction() { + YandexCloudCredentials cred = mock(YandexCloudCredentials.class); + when(cred.getName()).thenReturn(ACCOUNT_NAME); + YandexCloudFacade facade = mock(YandexCloudFacade.class); + YandexCloudLoadBalancer balancer = new YandexCloudLoadBalancer(); + balancer.setAccount(ACCOUNT_NAME); + balancer.setName(LOADBALANCER_NAME); + when(facade.getLoadBalancer(any(), anyString())).thenReturn(balancer); + cred.setName(ACCOUNT_NAME); + YandexNetworkLoadBalancerCachingAgent agent = + new YandexNetworkLoadBalancerCachingAgent( + cred, new ObjectMapper(), new NoopRegistry(), facade); + WriteableCache cache = new InMemoryCache(); + DefaultProviderCache providerCache = new DefaultProviderCache(cache); + Map params = buildTestRequest(true); + OnDemandAgent.OnDemandResult result = agent.handle(providerCache, params); + assertNotNull(result); + assertNull(result.getCacheResult()); + assertFalse(result.getEvictions().isEmpty()); + } + + @Test + void handleReplacement() { + YandexCloudCredentials cred = mock(YandexCloudCredentials.class); + when(cred.getName()).thenReturn(ACCOUNT_NAME); + YandexCloudFacade facade = mock(YandexCloudFacade.class); + when(facade.getLoadBalancer(any(), anyString())).thenReturn(new YandexCloudLoadBalancer()); + cred.setName(ACCOUNT_NAME); + YandexNetworkLoadBalancerCachingAgent agent = + new YandexNetworkLoadBalancerCachingAgent( + cred, new ObjectMapper(), new NoopRegistry(), facade); + WriteableCache cache = new InMemoryCache(); + DefaultProviderCache providerCache = new DefaultProviderCache(cache); + Map params = buildTestRequest(false); + OnDemandAgent.OnDemandResult result = agent.handle(providerCache, params); + assertNotNull(result); + assertFalse(result.getCacheResult().getCacheResults().isEmpty()); + assertTrue(result.getCacheResult().getEvictions().isEmpty()); + } + + @NotNull + private Map buildTestRequest(boolean evict) { + Map params = new HashMap<>(); + params.put("loadBalancerName", "loadbalancer-test"); + params.put("region", "test-region"); + params.put("account", ACCOUNT_NAME); + params.put("vpcId", ""); + params.put("evict", evict); + return params; + } +} diff --git a/clouddriver-yandex/src/test/resources/operations/create_server_group.json b/clouddriver-yandex/src/test/resources/operations/create_server_group.json new file mode 100644 index 00000000000..fcb3ff6a21c --- /dev/null +++ b/clouddriver-yandex/src/test/resources/operations/create_server_group.json @@ -0,0 +1,57 @@ +{ + "healthCheckSpecs": [], + "serviceAccountId": "sa-id", + "enableTraffic": false, + "stack": "viewer", + "interestingHealthProviderNames": [ + "Yandex" + ], + "instanceTemplate": { + "bootDiskSpec": { + "mode": "READ_WRITE", + "diskSpec": { + "imageId": "image-id", + "size": 10, + "typeId": "network-hdd" + } + }, + "metadata": {}, + "platformId": "standard-v1", + "networkInterfaceSpecs": [ + { + "primaryV4AddressSpec": {}, + "subnetIds": [ + "subnet-id" + ] + } + ], + "resourcesSpec": { + "cores": 2, + "memory": 2, + "coreFraction": 100 + }, + "labels": {} + }, + "credentials": "test-cred", + "availabilityZones": {}, + "source": {}, + "zones": [ + "ru-central1-a" + ], + "labels": {}, + "freeFormDetails": "", + "targetSize": 1, + "application": "test-app", + "provider": "yandex", + "cloudProvider": "yandex", + "region": "ru-central1", + "strategy": "", + "user": [ + "anonymous" + ], + "account": "test-account", + "deployPolicy": { + "startupDuration": 0, + "maxUnavailable": 1 + } +} diff --git a/gradle.properties b/gradle.properties index c16de6c7128..97d04b3fa76 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,5 +1,27 @@ +korkVersion=7.249.0 +fiatVersion=1.53.0 org.gradle.parallel=true +spinnakerGradleVersion=8.32.1 +targetJava17=true +kotlinVersion=1.6.21 -jackson.version=2.9.2 +# To enable a composite reference to a project, set the +# project property `'Composite=true'`. +# +# This can be done either as +# * a command line flag, e.g. `-PkorkComposite=true` +# * a project property via gradle.properties +# * a global project property via ~/.gradle/gradle.properties +# +# The composite project must checked out in a sibling directory +# to this project, matching the name of the project +# e.g. '../kork' +# +#fiatComposite=true +#korkComposite=true -includePlatforms=ALL + +## This memory ONLY impacts build time. Tests are forked to a separate JVM process. IF hitting JVM OOPS you can adjust per integration test JVM memory. +org.gradle.jvmargs=-Xmx4g -Xms4g + +testJvmMaxMemory=4g diff --git a/gradle/buildViaTravis.sh b/gradle/buildViaTravis.sh deleted file mode 100755 index 7dda5257500..00000000000 --- a/gradle/buildViaTravis.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# This script will build the project. - -GRADLE="./gradlew --no-daemon --max-workers=1" -export GRADLE_OPTS="-Xmx1g -Xms1g" - -if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - echo -e "Build Pull Request #$TRAVIS_PULL_REQUEST => Branch [$TRAVIS_BRANCH]" - $GRADLE -Prelease.useLastTag=true build -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" == "" ]; then - echo -e 'Build Branch with Snapshot => Branch ['$TRAVIS_BRANCH']' - $GRADLE -Prelease.travisci=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -x test build snapshot --stacktrace -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" != "" ]; then - echo -e 'Build Branch for Release => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG']' - case "$TRAVIS_TAG" in - version-*) - ;; # Ignore Spinnaker product release tags. - *-rc\.*) - $GRADLE -Prelease.travisci=true -Prelease.useLastTag=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -x test candidate --stacktrace - ;; - *) - $GRADLE -Prelease.travisci=true -Prelease.useLastTag=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -x test final --stacktrace - ;; - esac -else - echo -e 'WARN: Should not be here => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG'] Pull Request ['$TRAVIS_PULL_REQUEST']' - $GRADLE -Prelease.useLastTag=true build -fi - diff --git a/gradle/installViaTravis.sh b/gradle/installViaTravis.sh deleted file mode 100755 index b5c4a351e0b..00000000000 --- a/gradle/installViaTravis.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# This script will build the project. - -GRADLE="./gradlew --no-daemon --max-workers=1" -export GRADLE_OPTS="-Xmx1g -Xms1g" - -if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - echo -e "Assemble Pull Request #$TRAVIS_PULL_REQUEST => Branch [$TRAVIS_BRANCH]" - $GRADLE assemble -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" == "" ]; then - echo -e 'Assemble Branch with Snapshot => Branch ['$TRAVIS_BRANCH']' - $GRADLE -Prelease.travisci=true -x test assemble -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" != "" ]; then - echo -e 'Assemble Branch for Release => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG']' - $GRADLE -Prelease.travisci=true -Prelease.useLastTag=true -x test assemble -else - echo -e 'WARN: Should not be here => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG'] Pull Request ['$TRAVIS_PULL_REQUEST']' - $GRADLE assemble -fi - diff --git a/gradle/kotlin-test.gradle b/gradle/kotlin-test.gradle new file mode 100644 index 00000000000..0c4ab04750e --- /dev/null +++ b/gradle/kotlin-test.gradle @@ -0,0 +1,43 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +apply plugin: "kotlin" + +dependencies { + testImplementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlinVersion" + + testImplementation "org.junit.jupiter:junit-jupiter-api" + testImplementation "org.junit.platform:junit-platform-runner" + testImplementation "org.assertj:assertj-core" + testImplementation "io.strikt:strikt-core" + testImplementation "dev.minutest:minutest" + testImplementation "io.mockk:mockk" + + testRuntimeOnly "org.junit.platform:junit-platform-launcher" +} + +test { + useJUnitPlatform { + includeEngines "junit-vintage", "junit-jupiter" + } +} + +compileTestKotlin { + kotlinOptions { + languageVersion = "1.6" + jvmTarget = "17" + } +} diff --git a/gradle/kotlin.gradle b/gradle/kotlin.gradle index f84b4bcbd50..4ef86cedad1 100644 --- a/gradle/kotlin.gradle +++ b/gradle/kotlin.gradle @@ -14,7 +14,8 @@ * limitations under the License. */ -apply plugin: "nebula.kotlin" +apply plugin: "kotlin" +apply plugin: "kotlin-spring" configurations.all { resolutionStrategy { @@ -28,7 +29,7 @@ configurations.all { compileKotlin { kotlinOptions { - languageVersion = "1.2" - jvmTarget = "1.8" + languageVersion = "1.6" + jvmTarget = "17" } } diff --git a/gradle/spek.gradle b/gradle/spek.gradle deleted file mode 100644 index 75eefbd8f1d..00000000000 --- a/gradle/spek.gradle +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License") - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -apply plugin: "org.junit.platform.gradle.plugin" - -repositories { - jcenter() - maven { url "http://dl.bintray.com/jetbrains/spek" } -} - -dependencies { - spinnaker.group('spek') -} - -junitPlatform { - platformVersion junitVersion - filters { - engines { - include "spek", "junit-vintage", "junit-jupiter" - } - } -} diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 91ca28c8b80..943f0cbfa75 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 16d28051c9c..508322917bd 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.7-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.1-bin.zip +networkTimeout=10000 zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index cccdd3d517f..65dcd68d65c 100755 --- a/gradlew +++ b/gradlew @@ -1,78 +1,129 @@ -#!/usr/bin/env sh +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ############################################################################## -## -## Gradle start up script for UN*X -## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# ############################################################################## # Attempt to set APP_HOME + # Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -81,7 +132,7 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" + JAVACMD=java which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the @@ -89,84 +140,105 @@ location of your Java installation." fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=$((i+1)) + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - (0) set -- ;; - (1) set -- "$args0" ;; - (2) set -- "$args0" "$args1" ;; - (3) set -- "$args0" "$args1" "$args2" ;; - (4) set -- "$args0" "$args1" "$args2" "$args3" ;; - (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=$(save "$@") - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" - -# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong -if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then - cd "$(dirname "$0")" +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" fi +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat index e95643d6a2c..6689b85beec 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -1,4 +1,20 @@ -@if "%DEBUG%" == "" @echo off +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -9,19 +25,23 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init +if %ERRORLEVEL% equ 0 goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -35,7 +55,7 @@ goto fail set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe -if exist "%JAVA_EXE%" goto init +if exist "%JAVA_EXE%" goto execute echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% @@ -45,38 +65,26 @@ echo location of your Java installation. goto fail -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal diff --git a/halconfig/README.md b/halconfig/README.md new file mode 100644 index 00000000000..7b53194afe4 --- /dev/null +++ b/halconfig/README.md @@ -0,0 +1,6 @@ +This directory contains skeleton clouddriver configs to which Halyard concatenates +its generated deployment-specific config. + +These configs are **deprecated** and in general should not be further updated. To +set a default config value, either set the value in `clouddriver-web/config/clouddriver.yml` +or set a default in the code reading the config property. diff --git a/halconfig/clouddriver-bootstrap.yml b/halconfig/clouddriver-bootstrap.yml index 3109e93c847..47d61f065ae 100644 --- a/halconfig/clouddriver-bootstrap.yml +++ b/halconfig/clouddriver-bootstrap.yml @@ -6,7 +6,3 @@ server: redis: connection: ${services.redisBootstrap.baseUrl:redis://localhost:6379} - -caching: - redis: - hashingEnabled: true diff --git a/halconfig/clouddriver-caching.yml b/halconfig/clouddriver-caching.yml index 367a7c724f5..030784aa74c 100644 --- a/halconfig/clouddriver-caching.yml +++ b/halconfig/clouddriver-caching.yml @@ -1,10 +1,5 @@ # halconfig server: - port: ${services.clouddriver-caching.port:7002} - address: ${services.clouddriver-caching.host:localhost} - -caching: - redis: - hashingEnabled: true - writeEnabled: true + port: ${services.clouddriverCaching.port:7002} + address: ${services.clouddriverCaching.host:localhost} diff --git a/halconfig/clouddriver-ro-deck.yml b/halconfig/clouddriver-ro-deck.yml new file mode 100644 index 00000000000..7cc59d564c0 --- /dev/null +++ b/halconfig/clouddriver-ro-deck.yml @@ -0,0 +1,8 @@ +# halconfig + +server: + port: ${services.clouddriverRoDeck.port:7002} + address: ${services.clouddriverRoDeck.host:localhost} + +caching: + writeEnabled: false diff --git a/halconfig/clouddriver-ro.yml b/halconfig/clouddriver-ro.yml index 5869a5d6701..38dfcd53776 100644 --- a/halconfig/clouddriver-ro.yml +++ b/halconfig/clouddriver-ro.yml @@ -1,10 +1,8 @@ # halconfig server: - port: ${services.clouddriver-ro.port:7002} - address: ${services.clouddriver-ro.host:localhost} + port: ${services.clouddriverRo.port:7002} + address: ${services.clouddriverRo.host:localhost} caching: - redis: - hashingEnabled: false writeEnabled: false diff --git a/halconfig/clouddriver-rw.yml b/halconfig/clouddriver-rw.yml index 814d4888459..73f3022e821 100644 --- a/halconfig/clouddriver-rw.yml +++ b/halconfig/clouddriver-rw.yml @@ -1,10 +1,8 @@ # halconfig server: - port: ${services.clouddriver-rw.port:7002} - address: ${services.clouddriver-rw.host:localhost} + port: ${services.clouddriverRw.port:7002} + address: ${services.clouddriverRw.host:localhost} caching: - redis: - hashingEnabled: false writeEnabled: false diff --git a/halconfig/clouddriver.yml b/halconfig/clouddriver.yml index eaf47a06c7e..b3a29fc92c8 100644 --- a/halconfig/clouddriver.yml +++ b/halconfig/clouddriver.yml @@ -1,14 +1,5 @@ # halconfig -admin.tasks.shutdownWaitSeconds: 600 # 10 minutes - server: port: ${services.clouddriver.port:7002} address: ${services.clouddriver.host:localhost} - -redis: - connection: ${services.redis.baseUrl:redis://localhost:6379} - -caching: - redis: - hashingEnabled: true diff --git a/lombok.config b/lombok.config index b139174f6bf..0533542b8da 100644 --- a/lombok.config +++ b/lombok.config @@ -1,2 +1,3 @@ lombok.nonNull.exceptionType = IllegalArgumentException lombok.accessors.chain = true +lombok.addLombokGeneratedAnnotation = true diff --git a/settings.gradle b/settings.gradle index f845bf5c8c9..7f7f9c6c524 100644 --- a/settings.gradle +++ b/settings.gradle @@ -14,99 +14,67 @@ * limitations under the License. */ +['fiat', 'kork'].each { prj -> + String propName = "${prj}Composite" + String projectPath = "../$prj" + if (settings.ext.has(propName) && Boolean.parseBoolean(settings.ext.get(propName) as String)) { + includeBuild projectPath + } +} + rootProject.name = "clouddriver" -enum Platform { - ALL, - APPENGINE, - AWS, // Any thoughts on renaming this provider to EC2? - AZURE, - CLOUDFOUNDRY, - DCOS, - GCE, - GCP, - KUBERNETES, - OPENSTACK, - ORACLE, - TITUS, -} +def cloudProviderProjects = [ +// 'alicloud' : [':clouddriver-alicloud'], + 'appengine': [':clouddriver-appengine', ':clouddriver-google-common'], + 'aws': [':clouddriver-aws', ':clouddriver-ecs', ':clouddriver-eureka', ':clouddriver-lambda'], // Pull cd-eureka separate "Discover" platform, along with cd-consul? + 'aws-minimal': [':clouddriver-aws', ':clouddriver-eureka'], + 'azure': [':clouddriver-azure'], + 'cloudfoundry': [':clouddriver-cloudfoundry'], +// 'dcos': [':clouddriver-dcos'], + 'gce': [':clouddriver-consul', ':clouddriver-google', ':clouddriver-google-common'], + 'huaweicloud': [':clouddriver-huaweicloud'], + 'kubernetes': [':clouddriver-kubernetes'], + 'oracle': [':clouddriver-oracle'], + 'tencentcloud': [':clouddriver-tencentcloud'], + 'yandex': [':clouddriver-yandex'], + 'cloudrun': [':clouddriver-cloudrun', ':clouddriver-google-common'] +] +cloudProviderProjects.put('gcp', cloudProviderProjects['appengine'] + cloudProviderProjects['gce'] + cloudProviderProjects['kubernetes']) +cloudProviderProjects.put('titus', cloudProviderProjects['aws-minimal'] + ':clouddriver-titus' + ':clouddriver-docker') +cloudProviderProjects.put('all', cloudProviderProjects.collectMany {_, proj -> proj}) // Include all cloud providers. -// Core requirements. -include 'cats:cats-core', - 'cats:cats-redis', - 'cats:cats-dynomite', - 'cats:cats-test', - 'clouddriver-artifacts', // Will need to be detangled. - 'clouddriver-core', - 'clouddriver-elasticsearch', - 'clouddriver-security', // Detangle. - 'clouddriver-web' +String icp = settings.ext.has('includeCloudProviders') ? settings.ext.get('includeCloudProviders') : 'all' -def includePlatform(Platform platform) { - switch(platform) { - case Platform.APPENGINE: - include 'clouddriver-appengine', - 'clouddriver-google-common' - break - case Platform.AWS: - include 'clouddriver-aws', - 'clouddriver-ecs', - 'clouddriver-eureka', // Pull into separate "Discover" platform, along with cd-consul? - 'clouddriver-elasticsearch-aws' - break - case Platform.AZURE: - include 'clouddriver-azure' - break - case Platform.CLOUDFOUNDRY: - include 'clouddriver-cloudfoundry' - break - case Platform.DCOS: - include 'clouddriver-dcos' - break - case Platform.GCE: - include 'clouddriver-consul', - 'clouddriver-google', - 'clouddriver-google-common' - break - case Platform.GCP: - includePlatform(Platform.APPENGINE) - includePlatform(Platform.GCE) - includePlatform(Platform.KUBERNETES) - break - case Platform.KUBERNETES: - include 'clouddriver-kubernetes', - 'clouddriver-docker' - break - case Platform.OPENSTACK: - include 'clouddriver-openstack' - break - case Platform.ORACLE: - include 'clouddriver-oracle' - break - case Platform.TITUS: - includePlatform(Platform.AWS) - include 'clouddriver-titus' - break - case Platform.ALL: // fallthrough - default: - includePlatform(Platform.APPENGINE) - includePlatform(Platform.AWS) - includePlatform(Platform.AZURE) - includePlatform(Platform.CLOUDFOUNDRY) - includePlatform(Platform.DCOS) - includePlatform(Platform.GCE) - includePlatform(Platform.GCP) - includePlatform(Platform.KUBERNETES) - includePlatform(Platform.OPENSTACK) - includePlatform(Platform.ORACLE) - includePlatform(Platform.TITUS) - break - } -} +// Set as an ext variable so that build scripts can access it +gradle.ext.includedCloudProviderProjects = icp.split(',') + .collectMany { cloudProviderProjects[it.toLowerCase()] } + .toSet() + .toList() -includePlatforms.split(",").each { - includePlatform(it) -} +include 'cats:cats-core', + 'cats:cats-redis', + 'cats:cats-test', + 'cats:cats-sql', + 'clouddriver-api', + 'clouddriver-api-tck', + 'clouddriver-artifacts', + 'clouddriver-bom', + 'clouddriver-configserver', + 'clouddriver-core', + 'clouddriver-core-tck', + 'clouddriver-elasticsearch', + 'clouddriver-event', + 'clouddriver-integration', + 'clouddriver-saga', + 'clouddriver-saga-test', + 'clouddriver-security', + 'clouddriver-sql', + 'clouddriver-sql-mysql', + 'clouddriver-sql-postgres', + 'clouddriver-web' + +include(*gradle.ext.includedCloudProviderProjects) def setBuildFile(project) { project.buildFileName = "${project.name}.gradle" @@ -118,4 +86,3 @@ def setBuildFile(project) { rootProject.children.each { setBuildFile(it) } -